summaryrefslogtreecommitdiff
path: root/clang/utils
diff options
context:
space:
mode:
authorCarlo Zancanaro <carlo@pc-4w14-0.cs.usyd.edu.au>2012-10-15 17:10:06 +1100
committerCarlo Zancanaro <carlo@pc-4w14-0.cs.usyd.edu.au>2012-10-15 17:10:06 +1100
commitbe1de4be954c80875ad4108e0a33e8e131b2f2c0 (patch)
tree1fbbecf276bf7c7bdcbb4dd446099d6d90eaa516 /clang/utils
parentc4626a62754862d20b41e8a46a3574264ea80e6d (diff)
parentf1bd2e48c5324d3f7cda4090c87f8a5b6f463ce2 (diff)
Merge branch 'master' of ssh://bitbucket.org/czan/honours
Diffstat (limited to 'clang/utils')
-rwxr-xr-xclang/utils/ABITest/ABITestGen.py672
-rw-r--r--clang/utils/ABITest/Enumeration.py276
-rw-r--r--clang/utils/ABITest/Makefile.test.common170
-rw-r--r--clang/utils/ABITest/TypeGen.py471
-rwxr-xr-xclang/utils/ABITest/build-and-summarize-all.sh15
-rwxr-xr-xclang/utils/ABITest/build-and-summarize.sh14
-rwxr-xr-xclang/utils/ABITest/build.sh12
-rw-r--r--clang/utils/ABITest/layout/Makefile68
-rw-r--r--clang/utils/ABITest/return-types-32/Makefile7
-rw-r--r--clang/utils/ABITest/return-types-64/Makefile7
-rw-r--r--clang/utils/ABITest/single-args-32/Makefile7
-rw-r--r--clang/utils/ABITest/single-args-64/Makefile13
-rwxr-xr-xclang/utils/ABITest/summarize.sh15
-rw-r--r--clang/utils/C++Tests/Clang-Code-Compile/lit.local.cfg26
-rw-r--r--clang/utils/C++Tests/Clang-Code-Syntax/lit.local.cfg25
-rw-r--r--clang/utils/C++Tests/Clang-Syntax/lit.local.cfg24
-rw-r--r--clang/utils/C++Tests/LLVM-Code-Compile/lit.local.cfg48
-rwxr-xr-xclang/utils/C++Tests/LLVM-Code-Symbols/check-symbols54
-rw-r--r--clang/utils/C++Tests/LLVM-Code-Symbols/lit.local.cfg48
-rw-r--r--clang/utils/C++Tests/LLVM-Code-Syntax/lit.local.cfg46
-rw-r--r--clang/utils/C++Tests/LLVM-Syntax/lit.local.cfg24
-rw-r--r--clang/utils/C++Tests/lit.cfg27
-rw-r--r--clang/utils/C++Tests/stdc++-Syntax/lit.local.cfg17
-rwxr-xr-xclang/utils/CIndex/completion_logger_server.py44
-rwxr-xr-xclang/utils/CaptureCmd73
-rwxr-xr-xclang/utils/CmpDriver210
-rwxr-xr-xclang/utils/FindSpecRefs910
-rwxr-xr-xclang/utils/FuzzTest350
-rw-r--r--clang/utils/OptionalTests/Extra/README.txt3
-rw-r--r--clang/utils/OptionalTests/Extra/Runtime/darwin-clang_rt.c338
-rw-r--r--clang/utils/OptionalTests/README.txt4
-rw-r--r--clang/utils/OptionalTests/lit.cfg26
-rwxr-xr-xclang/utils/SummarizeErrors117
-rw-r--r--clang/utils/TableGen/CMakeLists.txt13
-rw-r--r--clang/utils/TableGen/ClangASTNodesEmitter.cpp168
-rw-r--r--clang/utils/TableGen/ClangASTNodesEmitter.h84
-rw-r--r--clang/utils/TableGen/ClangAttrEmitter.cpp1092
-rw-r--r--clang/utils/TableGen/ClangAttrEmitter.h153
-rw-r--r--clang/utils/TableGen/ClangDiagnosticsEmitter.cpp385
-rw-r--r--clang/utils/TableGen/ClangDiagnosticsEmitter.h54
-rw-r--r--clang/utils/TableGen/ClangSACheckersEmitter.cpp319
-rw-r--r--clang/utils/TableGen/ClangSACheckersEmitter.h31
-rw-r--r--clang/utils/TableGen/Makefile19
-rw-r--r--clang/utils/TableGen/NeonEmitter.cpp1574
-rw-r--r--clang/utils/TableGen/NeonEmitter.h210
-rw-r--r--clang/utils/TableGen/OptParserEmitter.cpp194
-rw-r--r--clang/utils/TableGen/OptParserEmitter.h34
-rw-r--r--clang/utils/TableGen/TableGen.cpp194
-rwxr-xr-xclang/utils/TestUtils/deep-stack.py24
-rwxr-xr-xclang/utils/TestUtils/pch-test.pl61
-rw-r--r--clang/utils/VtableTest/Makefile24
-rwxr-xr-xclang/utils/VtableTest/check-zti20
-rwxr-xr-xclang/utils/VtableTest/check-ztt20
-rwxr-xr-xclang/utils/VtableTest/check-zvt18
-rw-r--r--clang/utils/VtableTest/gen.cc350
-rwxr-xr-xclang/utils/analyzer/CmpRuns.py253
-rwxr-xr-xclang/utils/analyzer/SATestAdd.py80
-rwxr-xr-xclang/utils/analyzer/SATestBuild.py475
-rwxr-xr-xclang/utils/analyzer/ubiviz74
-rw-r--r--clang/utils/builtin-defines.c85
-rw-r--r--clang/utils/clang-completion-mode.el257
-rw-r--r--clang/utils/clangVisualizers.txt134
-rw-r--r--clang/utils/find-unused-diagnostics.sh19
-rwxr-xr-xclang/utils/token-delta.py251
-rw-r--r--clang/utils/valgrind/x86_64-pc-linux-gnu_gcc-4.3.3.supp23
65 files changed, 10853 insertions, 0 deletions
diff --git a/clang/utils/ABITest/ABITestGen.py b/clang/utils/ABITest/ABITestGen.py
new file mode 100755
index 0000000..62925e7
--- /dev/null
+++ b/clang/utils/ABITest/ABITestGen.py
@@ -0,0 +1,672 @@
+#!/usr/bin/env python
+
+from pprint import pprint
+import random, atexit, time
+from random import randrange
+import re
+
+from Enumeration import *
+from TypeGen import *
+
+####
+
+class TypePrinter:
+ def __init__(self, output, outputHeader=None,
+ outputTests=None, outputDriver=None,
+ headerName=None, info=None):
+ self.output = output
+ self.outputHeader = outputHeader
+ self.outputTests = outputTests
+ self.outputDriver = outputDriver
+ self.writeBody = outputHeader or outputTests or outputDriver
+ self.types = {}
+ self.testValues = {}
+ self.testReturnValues = {}
+ self.layoutTests = []
+ self.declarations = set()
+
+ if info:
+ for f in (self.output,self.outputHeader,self.outputTests,self.outputDriver):
+ if f:
+ print >>f,info
+
+ if self.writeBody:
+ print >>self.output, '#include <stdio.h>\n'
+ if self.outputTests:
+ print >>self.outputTests, '#include <stdio.h>'
+ print >>self.outputTests, '#include <string.h>'
+ print >>self.outputTests, '#include <assert.h>\n'
+
+ if headerName:
+ for f in (self.output,self.outputTests,self.outputDriver):
+ if f is not None:
+ print >>f, '#include "%s"\n'%(headerName,)
+
+ if self.outputDriver:
+ print >>self.outputDriver, '#include <stdio.h>'
+ print >>self.outputDriver, '#include <stdlib.h>\n'
+ print >>self.outputDriver, 'int main(int argc, char **argv) {'
+ print >>self.outputDriver, ' int index = -1;'
+ print >>self.outputDriver, ' if (argc > 1) index = atoi(argv[1]);'
+
+ def finish(self):
+ if self.layoutTests:
+ print >>self.output, 'int main(int argc, char **argv) {'
+ print >>self.output, ' int index = -1;'
+ print >>self.output, ' if (argc > 1) index = atoi(argv[1]);'
+ for i,f in self.layoutTests:
+ print >>self.output, ' if (index == -1 || index == %d)' % i
+ print >>self.output, ' %s();' % f
+ print >>self.output, ' return 0;'
+ print >>self.output, '}'
+
+ if self.outputDriver:
+ print >>self.outputDriver, ' printf("DONE\\n");'
+ print >>self.outputDriver, ' return 0;'
+ print >>self.outputDriver, '}'
+
+ def addDeclaration(self, decl):
+ if decl in self.declarations:
+ return False
+
+ self.declarations.add(decl)
+ if self.outputHeader:
+ print >>self.outputHeader, decl
+ else:
+ print >>self.output, decl
+ if self.outputTests:
+ print >>self.outputTests, decl
+ return True
+
+ def getTypeName(self, T):
+ name = self.types.get(T)
+ if name is None:
+ # Reserve slot
+ self.types[T] = None
+ self.types[T] = name = T.getTypeName(self)
+ return name
+
+ def writeLayoutTest(self, i, ty):
+ tyName = self.getTypeName(ty)
+ tyNameClean = tyName.replace(' ','_').replace('*','star')
+ fnName = 'test_%s' % tyNameClean
+
+ print >>self.output,'void %s(void) {' % fnName
+ self.printSizeOfType(' %s'%fnName, tyName, ty, self.output)
+ self.printAlignOfType(' %s'%fnName, tyName, ty, self.output)
+ self.printOffsetsOfType(' %s'%fnName, tyName, ty, self.output)
+ print >>self.output,'}'
+ print >>self.output
+
+ self.layoutTests.append((i,fnName))
+
+ def writeFunction(self, i, FT):
+ args = ', '.join(['%s arg%d'%(self.getTypeName(t),i) for i,t in enumerate(FT.argTypes)])
+ if not args:
+ args = 'void'
+
+ if FT.returnType is None:
+ retvalName = None
+ retvalTypeName = 'void'
+ else:
+ retvalTypeName = self.getTypeName(FT.returnType)
+ if self.writeBody or self.outputTests:
+ retvalName = self.getTestReturnValue(FT.returnType)
+
+ fnName = 'fn%d'%(FT.index,)
+ if self.outputHeader:
+ print >>self.outputHeader,'%s %s(%s);'%(retvalTypeName, fnName, args)
+ elif self.outputTests:
+ print >>self.outputTests,'%s %s(%s);'%(retvalTypeName, fnName, args)
+
+ print >>self.output,'%s %s(%s)'%(retvalTypeName, fnName, args),
+ if self.writeBody:
+ print >>self.output, '{'
+
+ for i,t in enumerate(FT.argTypes):
+ self.printValueOfType(' %s'%fnName, 'arg%d'%i, t)
+
+ if retvalName is not None:
+ print >>self.output, ' return %s;'%(retvalName,)
+ print >>self.output, '}'
+ else:
+ print >>self.output, '{}'
+ print >>self.output
+
+ if self.outputDriver:
+ print >>self.outputDriver, ' if (index == -1 || index == %d) {' % i
+ print >>self.outputDriver, ' extern void test_%s(void);' % fnName
+ print >>self.outputDriver, ' test_%s();' % fnName
+ print >>self.outputDriver, ' }'
+
+ if self.outputTests:
+ if self.outputHeader:
+ print >>self.outputHeader, 'void test_%s(void);'%(fnName,)
+
+ if retvalName is None:
+ retvalTests = None
+ else:
+ retvalTests = self.getTestValuesArray(FT.returnType)
+ tests = map(self.getTestValuesArray, FT.argTypes)
+ print >>self.outputTests, 'void test_%s(void) {'%(fnName,)
+
+ if retvalTests is not None:
+ print >>self.outputTests, ' printf("%s: testing return.\\n");'%(fnName,)
+ print >>self.outputTests, ' for (int i=0; i<%d; ++i) {'%(retvalTests[1],)
+ args = ', '.join(['%s[%d]'%(t,randrange(l)) for t,l in tests])
+ print >>self.outputTests, ' %s RV;'%(retvalTypeName,)
+ print >>self.outputTests, ' %s = %s[i];'%(retvalName, retvalTests[0])
+ print >>self.outputTests, ' RV = %s(%s);'%(fnName, args)
+ self.printValueOfType(' %s_RV'%fnName, 'RV', FT.returnType, output=self.outputTests, indent=4)
+ self.checkTypeValues('RV', '%s[i]' % retvalTests[0], FT.returnType, output=self.outputTests, indent=4)
+ print >>self.outputTests, ' }'
+
+ if tests:
+ print >>self.outputTests, ' printf("%s: testing arguments.\\n");'%(fnName,)
+ for i,(array,length) in enumerate(tests):
+ for j in range(length):
+ args = ['%s[%d]'%(t,randrange(l)) for t,l in tests]
+ args[i] = '%s[%d]'%(array,j)
+ print >>self.outputTests, ' %s(%s);'%(fnName, ', '.join(args),)
+ print >>self.outputTests, '}'
+
+ def getTestReturnValue(self, type):
+ typeName = self.getTypeName(type)
+ info = self.testReturnValues.get(typeName)
+ if info is None:
+ name = '%s_retval'%(typeName.replace(' ','_').replace('*','star'),)
+ print >>self.output, '%s %s;'%(typeName,name)
+ if self.outputHeader:
+ print >>self.outputHeader, 'extern %s %s;'%(typeName,name)
+ elif self.outputTests:
+ print >>self.outputTests, 'extern %s %s;'%(typeName,name)
+ info = self.testReturnValues[typeName] = name
+ return info
+
+ def getTestValuesArray(self, type):
+ typeName = self.getTypeName(type)
+ info = self.testValues.get(typeName)
+ if info is None:
+ name = '%s_values'%(typeName.replace(' ','_').replace('*','star'),)
+ print >>self.outputTests, 'static %s %s[] = {'%(typeName,name)
+ length = 0
+ for item in self.getTestValues(type):
+ print >>self.outputTests, '\t%s,'%(item,)
+ length += 1
+ print >>self.outputTests,'};'
+ info = self.testValues[typeName] = (name,length)
+ return info
+
+ def getTestValues(self, t):
+ if isinstance(t, BuiltinType):
+ if t.name=='float':
+ for i in ['0.0','-1.0','1.0']:
+ yield i+'f'
+ elif t.name=='double':
+ for i in ['0.0','-1.0','1.0']:
+ yield i
+ elif t.name in ('void *'):
+ yield '(void*) 0'
+ yield '(void*) -1'
+ else:
+ yield '(%s) 0'%(t.name,)
+ yield '(%s) -1'%(t.name,)
+ yield '(%s) 1'%(t.name,)
+ elif isinstance(t, EnumType):
+ for i in range(0, len(t.enumerators)):
+ yield 'enum%dval%d' % (t.index, i)
+ elif isinstance(t, RecordType):
+ nonPadding = [f for f in t.fields
+ if not f.isPaddingBitField()]
+
+ if not nonPadding:
+ yield '{ }'
+ return
+
+ # FIXME: Use designated initializers to access non-first
+ # fields of unions.
+ if t.isUnion:
+ for v in self.getTestValues(nonPadding[0]):
+ yield '{ %s }' % v
+ return
+
+ fieldValues = map(list, map(self.getTestValues, nonPadding))
+ for i,values in enumerate(fieldValues):
+ for v in values:
+ elements = map(random.choice,fieldValues)
+ elements[i] = v
+ yield '{ %s }'%(', '.join(elements))
+
+ elif isinstance(t, ComplexType):
+ for t in self.getTestValues(t.elementType):
+ yield '%s + %s * 1i'%(t,t)
+ elif isinstance(t, ArrayType):
+ values = list(self.getTestValues(t.elementType))
+ if not values:
+ yield '{ }'
+ for i in range(t.numElements):
+ for v in values:
+ elements = [random.choice(values) for i in range(t.numElements)]
+ elements[i] = v
+ yield '{ %s }'%(', '.join(elements))
+ else:
+ raise NotImplementedError,'Cannot make tests values of type: "%s"'%(t,)
+
+ def printSizeOfType(self, prefix, name, t, output=None, indent=2):
+ print >>output, '%*sprintf("%s: sizeof(%s) = %%ld\\n", (long)sizeof(%s));'%(indent, '', prefix, name, name)
+ def printAlignOfType(self, prefix, name, t, output=None, indent=2):
+ print >>output, '%*sprintf("%s: __alignof__(%s) = %%ld\\n", (long)__alignof__(%s));'%(indent, '', prefix, name, name)
+ def printOffsetsOfType(self, prefix, name, t, output=None, indent=2):
+ if isinstance(t, RecordType):
+ for i,f in enumerate(t.fields):
+ if f.isBitField():
+ continue
+ fname = 'field%d' % i
+ print >>output, '%*sprintf("%s: __builtin_offsetof(%s, %s) = %%ld\\n", (long)__builtin_offsetof(%s, %s));'%(indent, '', prefix, name, fname, name, fname)
+
+ def printValueOfType(self, prefix, name, t, output=None, indent=2):
+ if output is None:
+ output = self.output
+ if isinstance(t, BuiltinType):
+ value_expr = name
+ if t.name.split(' ')[-1] == '_Bool':
+ # Hack to work around PR5579.
+ value_expr = "%s ? 2 : 0" % name
+
+ if t.name.endswith('long long'):
+ code = 'lld'
+ elif t.name.endswith('long'):
+ code = 'ld'
+ elif t.name.split(' ')[-1] in ('_Bool','char','short',
+ 'int','unsigned'):
+ code = 'd'
+ elif t.name in ('float','double'):
+ code = 'f'
+ elif t.name == 'long double':
+ code = 'Lf'
+ else:
+ code = 'p'
+ print >>output, '%*sprintf("%s: %s = %%%s\\n", %s);'%(
+ indent, '', prefix, name, code, value_expr)
+ elif isinstance(t, EnumType):
+ print >>output, '%*sprintf("%s: %s = %%d\\n", %s);'%(indent, '', prefix, name, name)
+ elif isinstance(t, RecordType):
+ if not t.fields:
+ print >>output, '%*sprintf("%s: %s (empty)\\n");'%(indent, '', prefix, name)
+ for i,f in enumerate(t.fields):
+ if f.isPaddingBitField():
+ continue
+ fname = '%s.field%d'%(name,i)
+ self.printValueOfType(prefix, fname, f, output=output, indent=indent)
+ elif isinstance(t, ComplexType):
+ self.printValueOfType(prefix, '(__real %s)'%name, t.elementType, output=output,indent=indent)
+ self.printValueOfType(prefix, '(__imag %s)'%name, t.elementType, output=output,indent=indent)
+ elif isinstance(t, ArrayType):
+ for i in range(t.numElements):
+ # Access in this fashion as a hackish way to portably
+ # access vectors.
+ if t.isVector:
+ self.printValueOfType(prefix, '((%s*) &%s)[%d]'%(t.elementType,name,i), t.elementType, output=output,indent=indent)
+ else:
+ self.printValueOfType(prefix, '%s[%d]'%(name,i), t.elementType, output=output,indent=indent)
+ else:
+ raise NotImplementedError,'Cannot print value of type: "%s"'%(t,)
+
+ def checkTypeValues(self, nameLHS, nameRHS, t, output=None, indent=2):
+ prefix = 'foo'
+ if output is None:
+ output = self.output
+ if isinstance(t, BuiltinType):
+ print >>output, '%*sassert(%s == %s);' % (indent, '', nameLHS, nameRHS)
+ elif isinstance(t, EnumType):
+ print >>output, '%*sassert(%s == %s);' % (indent, '', nameLHS, nameRHS)
+ elif isinstance(t, RecordType):
+ for i,f in enumerate(t.fields):
+ if f.isPaddingBitField():
+ continue
+ self.checkTypeValues('%s.field%d'%(nameLHS,i), '%s.field%d'%(nameRHS,i),
+ f, output=output, indent=indent)
+ if t.isUnion:
+ break
+ elif isinstance(t, ComplexType):
+ self.checkTypeValues('(__real %s)'%nameLHS, '(__real %s)'%nameRHS, t.elementType, output=output,indent=indent)
+ self.checkTypeValues('(__imag %s)'%nameLHS, '(__imag %s)'%nameRHS, t.elementType, output=output,indent=indent)
+ elif isinstance(t, ArrayType):
+ for i in range(t.numElements):
+ # Access in this fashion as a hackish way to portably
+ # access vectors.
+ if t.isVector:
+ self.checkTypeValues('((%s*) &%s)[%d]'%(t.elementType,nameLHS,i),
+ '((%s*) &%s)[%d]'%(t.elementType,nameRHS,i),
+ t.elementType, output=output,indent=indent)
+ else:
+ self.checkTypeValues('%s[%d]'%(nameLHS,i), '%s[%d]'%(nameRHS,i),
+ t.elementType, output=output,indent=indent)
+ else:
+ raise NotImplementedError,'Cannot print value of type: "%s"'%(t,)
+
+import sys
+
+def main():
+ from optparse import OptionParser, OptionGroup
+ parser = OptionParser("%prog [options] {indices}")
+ parser.add_option("", "--mode", dest="mode",
+ help="autogeneration mode (random or linear) [default %default]",
+ type='choice', choices=('random','linear'), default='linear')
+ parser.add_option("", "--count", dest="count",
+ help="autogenerate COUNT functions according to MODE",
+ type=int, default=0)
+ parser.add_option("", "--min", dest="minIndex", metavar="N",
+ help="start autogeneration with the Nth function type [default %default]",
+ type=int, default=0)
+ parser.add_option("", "--max", dest="maxIndex", metavar="N",
+ help="maximum index for random autogeneration [default %default]",
+ type=int, default=10000000)
+ parser.add_option("", "--seed", dest="seed",
+ help="random number generator seed [default %default]",
+ type=int, default=1)
+ parser.add_option("", "--use-random-seed", dest="useRandomSeed",
+ help="use random value for initial random number generator seed",
+ action='store_true', default=False)
+ parser.add_option("", "--skip", dest="skipTests",
+ help="add a test index to skip",
+ type=int, action='append', default=[])
+ parser.add_option("-o", "--output", dest="output", metavar="FILE",
+ help="write output to FILE [default %default]",
+ type=str, default='-')
+ parser.add_option("-O", "--output-header", dest="outputHeader", metavar="FILE",
+ help="write header file for output to FILE [default %default]",
+ type=str, default=None)
+ parser.add_option("-T", "--output-tests", dest="outputTests", metavar="FILE",
+ help="write function tests to FILE [default %default]",
+ type=str, default=None)
+ parser.add_option("-D", "--output-driver", dest="outputDriver", metavar="FILE",
+ help="write test driver to FILE [default %default]",
+ type=str, default=None)
+ parser.add_option("", "--test-layout", dest="testLayout", metavar="FILE",
+ help="test structure layout",
+ action='store_true', default=False)
+
+ group = OptionGroup(parser, "Type Enumeration Options")
+ # Builtins - Ints
+ group.add_option("", "--no-char", dest="useChar",
+ help="do not generate char types",
+ action="store_false", default=True)
+ group.add_option("", "--no-short", dest="useShort",
+ help="do not generate short types",
+ action="store_false", default=True)
+ group.add_option("", "--no-int", dest="useInt",
+ help="do not generate int types",
+ action="store_false", default=True)
+ group.add_option("", "--no-long", dest="useLong",
+ help="do not generate long types",
+ action="store_false", default=True)
+ group.add_option("", "--no-long-long", dest="useLongLong",
+ help="do not generate long long types",
+ action="store_false", default=True)
+ group.add_option("", "--no-unsigned", dest="useUnsigned",
+ help="do not generate unsigned integer types",
+ action="store_false", default=True)
+
+ # Other builtins
+ group.add_option("", "--no-bool", dest="useBool",
+ help="do not generate bool types",
+ action="store_false", default=True)
+ group.add_option("", "--no-float", dest="useFloat",
+ help="do not generate float types",
+ action="store_false", default=True)
+ group.add_option("", "--no-double", dest="useDouble",
+ help="do not generate double types",
+ action="store_false", default=True)
+ group.add_option("", "--no-long-double", dest="useLongDouble",
+ help="do not generate long double types",
+ action="store_false", default=True)
+ group.add_option("", "--no-void-pointer", dest="useVoidPointer",
+ help="do not generate void* types",
+ action="store_false", default=True)
+
+ # Enumerations
+ group.add_option("", "--no-enums", dest="useEnum",
+ help="do not generate enum types",
+ action="store_false", default=True)
+
+ # Derived types
+ group.add_option("", "--no-array", dest="useArray",
+ help="do not generate record types",
+ action="store_false", default=True)
+ group.add_option("", "--no-complex", dest="useComplex",
+ help="do not generate complex types",
+ action="store_false", default=True)
+ group.add_option("", "--no-record", dest="useRecord",
+ help="do not generate record types",
+ action="store_false", default=True)
+ group.add_option("", "--no-union", dest="recordUseUnion",
+ help="do not generate union types",
+ action="store_false", default=True)
+ group.add_option("", "--no-vector", dest="useVector",
+ help="do not generate vector types",
+ action="store_false", default=True)
+ group.add_option("", "--no-bit-field", dest="useBitField",
+ help="do not generate bit-field record members",
+ action="store_false", default=True)
+ group.add_option("", "--no-builtins", dest="useBuiltins",
+ help="do not use any types",
+ action="store_false", default=True)
+
+ # Tuning
+ group.add_option("", "--no-function-return", dest="functionUseReturn",
+ help="do not generate return types for functions",
+ action="store_false", default=True)
+ group.add_option("", "--vector-types", dest="vectorTypes",
+ help="comma separated list of vector types (e.g., v2i32) [default %default]",
+ action="store", type=str, default='v2i16, v1i64, v2i32, v4i16, v8i8, v2f32, v2i64, v4i32, v8i16, v16i8, v2f64, v4f32, v16f32', metavar="N")
+ group.add_option("", "--bit-fields", dest="bitFields",
+ help="comma separated list 'type:width' bit-field specifiers [default %default]",
+ action="store", type=str, default=(
+ "char:0,char:4,int:0,unsigned:1,int:1,int:4,int:13,int:24"))
+ group.add_option("", "--max-args", dest="functionMaxArgs",
+ help="maximum number of arguments per function [default %default]",
+ action="store", type=int, default=4, metavar="N")
+ group.add_option("", "--max-array", dest="arrayMaxSize",
+ help="maximum array size [default %default]",
+ action="store", type=int, default=4, metavar="N")
+ group.add_option("", "--max-record", dest="recordMaxSize",
+ help="maximum number of fields per record [default %default]",
+ action="store", type=int, default=4, metavar="N")
+ group.add_option("", "--max-record-depth", dest="recordMaxDepth",
+ help="maximum nested structure depth [default %default]",
+ action="store", type=int, default=None, metavar="N")
+ parser.add_option_group(group)
+ (opts, args) = parser.parse_args()
+
+ if not opts.useRandomSeed:
+ random.seed(opts.seed)
+
+ # Contruct type generator
+ builtins = []
+ if opts.useBuiltins:
+ ints = []
+ if opts.useChar: ints.append(('char',1))
+ if opts.useShort: ints.append(('short',2))
+ if opts.useInt: ints.append(('int',4))
+ # FIXME: Wrong size.
+ if opts.useLong: ints.append(('long',4))
+ if opts.useLongLong: ints.append(('long long',8))
+ if opts.useUnsigned:
+ ints = ([('unsigned %s'%i,s) for i,s in ints] +
+ [('signed %s'%i,s) for i,s in ints])
+ builtins.extend(ints)
+
+ if opts.useBool: builtins.append(('_Bool',1))
+ if opts.useFloat: builtins.append(('float',4))
+ if opts.useDouble: builtins.append(('double',8))
+ if opts.useLongDouble: builtins.append(('long double',16))
+ # FIXME: Wrong size.
+ if opts.useVoidPointer: builtins.append(('void*',4))
+
+ btg = FixedTypeGenerator([BuiltinType(n,s) for n,s in builtins])
+
+ bitfields = []
+ for specifier in opts.bitFields.split(','):
+ if not specifier.strip():
+ continue
+ name,width = specifier.strip().split(':', 1)
+ bitfields.append(BuiltinType(name,None,int(width)))
+ bftg = FixedTypeGenerator(bitfields)
+
+ charType = BuiltinType('char',1)
+ shortType = BuiltinType('short',2)
+ intType = BuiltinType('int',4)
+ longlongType = BuiltinType('long long',8)
+ floatType = BuiltinType('float',4)
+ doubleType = BuiltinType('double',8)
+ sbtg = FixedTypeGenerator([charType, intType, floatType, doubleType])
+
+ atg = AnyTypeGenerator()
+ artg = AnyTypeGenerator()
+ def makeGenerator(atg, subgen, subfieldgen, useRecord, useArray, useBitField):
+ atg.addGenerator(btg)
+ if useBitField and opts.useBitField:
+ atg.addGenerator(bftg)
+ if useRecord and opts.useRecord:
+ assert subgen
+ atg.addGenerator(RecordTypeGenerator(subfieldgen, opts.recordUseUnion,
+ opts.recordMaxSize))
+ if opts.useComplex:
+ # FIXME: Allow overriding builtins here
+ atg.addGenerator(ComplexTypeGenerator(sbtg))
+ if useArray and opts.useArray:
+ assert subgen
+ atg.addGenerator(ArrayTypeGenerator(subgen, opts.arrayMaxSize))
+ if opts.useVector:
+ vTypes = []
+ for i,t in enumerate(opts.vectorTypes.split(',')):
+ m = re.match('v([1-9][0-9]*)([if][1-9][0-9]*)', t.strip())
+ if not m:
+ parser.error('Invalid vector type: %r' % t)
+ count,kind = m.groups()
+ count = int(count)
+ type = { 'i8' : charType,
+ 'i16' : shortType,
+ 'i32' : intType,
+ 'i64' : longlongType,
+ 'f32' : floatType,
+ 'f64' : doubleType,
+ }.get(kind)
+ if not type:
+ parser.error('Invalid vector type: %r' % t)
+ vTypes.append(ArrayType(i, True, type, count * type.size))
+
+ atg.addGenerator(FixedTypeGenerator(vTypes))
+ if opts.useEnum:
+ atg.addGenerator(EnumTypeGenerator([None, '-1', '1', '1u'], 1, 4))
+
+ if opts.recordMaxDepth is None:
+ # Fully recursive, just avoid top-level arrays.
+ subFTG = AnyTypeGenerator()
+ subTG = AnyTypeGenerator()
+ atg = AnyTypeGenerator()
+ makeGenerator(subFTG, atg, atg, True, True, True)
+ makeGenerator(subTG, atg, subFTG, True, True, False)
+ makeGenerator(atg, subTG, subFTG, True, False, False)
+ else:
+ # Make a chain of type generators, each builds smaller
+ # structures.
+ base = AnyTypeGenerator()
+ fbase = AnyTypeGenerator()
+ makeGenerator(base, None, None, False, False, False)
+ makeGenerator(fbase, None, None, False, False, True)
+ for i in range(opts.recordMaxDepth):
+ n = AnyTypeGenerator()
+ fn = AnyTypeGenerator()
+ makeGenerator(n, base, fbase, True, True, False)
+ makeGenerator(fn, base, fbase, True, True, True)
+ base = n
+ fbase = fn
+ atg = AnyTypeGenerator()
+ makeGenerator(atg, base, fbase, True, False, False)
+
+ if opts.testLayout:
+ ftg = atg
+ else:
+ ftg = FunctionTypeGenerator(atg, opts.functionUseReturn, opts.functionMaxArgs)
+
+ # Override max,min,count if finite
+ if opts.maxIndex is None:
+ if ftg.cardinality is aleph0:
+ opts.maxIndex = 10000000
+ else:
+ opts.maxIndex = ftg.cardinality
+ opts.maxIndex = min(opts.maxIndex, ftg.cardinality)
+ opts.minIndex = max(0,min(opts.maxIndex-1, opts.minIndex))
+ if not opts.mode=='random':
+ opts.count = min(opts.count, opts.maxIndex-opts.minIndex)
+
+ if opts.output=='-':
+ output = sys.stdout
+ else:
+ output = open(opts.output,'w')
+ atexit.register(lambda: output.close())
+
+ outputHeader = None
+ if opts.outputHeader:
+ outputHeader = open(opts.outputHeader,'w')
+ atexit.register(lambda: outputHeader.close())
+
+ outputTests = None
+ if opts.outputTests:
+ outputTests = open(opts.outputTests,'w')
+ atexit.register(lambda: outputTests.close())
+
+ outputDriver = None
+ if opts.outputDriver:
+ outputDriver = open(opts.outputDriver,'w')
+ atexit.register(lambda: outputDriver.close())
+
+ info = ''
+ info += '// %s\n'%(' '.join(sys.argv),)
+ info += '// Generated: %s\n'%(time.strftime('%Y-%m-%d %H:%M'),)
+ info += '// Cardinality of function generator: %s\n'%(ftg.cardinality,)
+ info += '// Cardinality of type generator: %s\n'%(atg.cardinality,)
+
+ if opts.testLayout:
+ info += '\n#include <stdio.h>'
+
+ P = TypePrinter(output,
+ outputHeader=outputHeader,
+ outputTests=outputTests,
+ outputDriver=outputDriver,
+ headerName=opts.outputHeader,
+ info=info)
+
+ def write(N):
+ try:
+ FT = ftg.get(N)
+ except RuntimeError,e:
+ if e.args[0]=='maximum recursion depth exceeded':
+ print >>sys.stderr,'WARNING: Skipped %d, recursion limit exceeded (bad arguments?)'%(N,)
+ return
+ raise
+ if opts.testLayout:
+ P.writeLayoutTest(N, FT)
+ else:
+ P.writeFunction(N, FT)
+
+ if args:
+ [write(int(a)) for a in args]
+
+ skipTests = set(opts.skipTests)
+ for i in range(opts.count):
+ if opts.mode=='linear':
+ index = opts.minIndex + i
+ else:
+ index = opts.minIndex + int((opts.maxIndex-opts.minIndex) * random.random())
+ if index in skipTests:
+ continue
+ write(index)
+
+ P.finish()
+
+if __name__=='__main__':
+ main()
+
diff --git a/clang/utils/ABITest/Enumeration.py b/clang/utils/ABITest/Enumeration.py
new file mode 100644
index 0000000..47e4702
--- /dev/null
+++ b/clang/utils/ABITest/Enumeration.py
@@ -0,0 +1,276 @@
+"""Utilities for enumeration of finite and countably infinite sets.
+"""
+###
+# Countable iteration
+
+# Simplifies some calculations
+class Aleph0(int):
+ _singleton = None
+ def __new__(type):
+ if type._singleton is None:
+ type._singleton = int.__new__(type)
+ return type._singleton
+ def __repr__(self): return '<aleph0>'
+ def __str__(self): return 'inf'
+
+ def __cmp__(self, b):
+ return 1
+
+ def __sub__(self, b):
+ raise ValueError,"Cannot subtract aleph0"
+ __rsub__ = __sub__
+
+ def __add__(self, b):
+ return self
+ __radd__ = __add__
+
+ def __mul__(self, b):
+ if b == 0: return b
+ return self
+ __rmul__ = __mul__
+
+ def __floordiv__(self, b):
+ if b == 0: raise ZeroDivisionError
+ return self
+ __rfloordiv__ = __floordiv__
+ __truediv__ = __floordiv__
+ __rtuediv__ = __floordiv__
+ __div__ = __floordiv__
+ __rdiv__ = __floordiv__
+
+ def __pow__(self, b):
+ if b == 0: return 1
+ return self
+aleph0 = Aleph0()
+
+def base(line):
+ return line*(line+1)//2
+
+def pairToN((x,y)):
+ line,index = x+y,y
+ return base(line)+index
+
+def getNthPairInfo(N):
+ # Avoid various singularities
+ if N==0:
+ return (0,0)
+
+ # Gallop to find bounds for line
+ line = 1
+ next = 2
+ while base(next)<=N:
+ line = next
+ next = line << 1
+
+ # Binary search for starting line
+ lo = line
+ hi = line<<1
+ while lo + 1 != hi:
+ #assert base(lo) <= N < base(hi)
+ mid = (lo + hi)>>1
+ if base(mid)<=N:
+ lo = mid
+ else:
+ hi = mid
+
+ line = lo
+ return line, N - base(line)
+
+def getNthPair(N):
+ line,index = getNthPairInfo(N)
+ return (line - index, index)
+
+def getNthPairBounded(N,W=aleph0,H=aleph0,useDivmod=False):
+ """getNthPairBounded(N, W, H) -> (x, y)
+
+ Return the N-th pair such that 0 <= x < W and 0 <= y < H."""
+
+ if W <= 0 or H <= 0:
+ raise ValueError,"Invalid bounds"
+ elif N >= W*H:
+ raise ValueError,"Invalid input (out of bounds)"
+
+ # Simple case...
+ if W is aleph0 and H is aleph0:
+ return getNthPair(N)
+
+ # Otherwise simplify by assuming W < H
+ if H < W:
+ x,y = getNthPairBounded(N,H,W,useDivmod=useDivmod)
+ return y,x
+
+ if useDivmod:
+ return N%W,N//W
+ else:
+ # Conceptually we want to slide a diagonal line across a
+ # rectangle. This gives more interesting results for large
+ # bounds than using divmod.
+
+ # If in lower left, just return as usual
+ cornerSize = base(W)
+ if N < cornerSize:
+ return getNthPair(N)
+
+ # Otherwise if in upper right, subtract from corner
+ if H is not aleph0:
+ M = W*H - N - 1
+ if M < cornerSize:
+ x,y = getNthPair(M)
+ return (W-1-x,H-1-y)
+
+ # Otherwise, compile line and index from number of times we
+ # wrap.
+ N = N - cornerSize
+ index,offset = N%W,N//W
+ # p = (W-1, 1+offset) + (-1,1)*index
+ return (W-1-index, 1+offset+index)
+def getNthPairBoundedChecked(N,W=aleph0,H=aleph0,useDivmod=False,GNP=getNthPairBounded):
+ x,y = GNP(N,W,H,useDivmod)
+ assert 0 <= x < W and 0 <= y < H
+ return x,y
+
+def getNthNTuple(N, W, H=aleph0, useLeftToRight=False):
+ """getNthNTuple(N, W, H) -> (x_0, x_1, ..., x_W)
+
+ Return the N-th W-tuple, where for 0 <= x_i < H."""
+
+ if useLeftToRight:
+ elts = [None]*W
+ for i in range(W):
+ elts[i],N = getNthPairBounded(N, H)
+ return tuple(elts)
+ else:
+ if W==0:
+ return ()
+ elif W==1:
+ return (N,)
+ elif W==2:
+ return getNthPairBounded(N, H, H)
+ else:
+ LW,RW = W//2, W - (W//2)
+ L,R = getNthPairBounded(N, H**LW, H**RW)
+ return (getNthNTuple(L,LW,H=H,useLeftToRight=useLeftToRight) +
+ getNthNTuple(R,RW,H=H,useLeftToRight=useLeftToRight))
+def getNthNTupleChecked(N, W, H=aleph0, useLeftToRight=False, GNT=getNthNTuple):
+ t = GNT(N,W,H,useLeftToRight)
+ assert len(t) == W
+ for i in t:
+ assert i < H
+ return t
+
+def getNthTuple(N, maxSize=aleph0, maxElement=aleph0, useDivmod=False, useLeftToRight=False):
+ """getNthTuple(N, maxSize, maxElement) -> x
+
+ Return the N-th tuple where len(x) < maxSize and for y in x, 0 <=
+ y < maxElement."""
+
+ # All zero sized tuples are isomorphic, don't ya know.
+ if N == 0:
+ return ()
+ N -= 1
+ if maxElement is not aleph0:
+ if maxSize is aleph0:
+ raise NotImplementedError,'Max element size without max size unhandled'
+ bounds = [maxElement**i for i in range(1, maxSize+1)]
+ S,M = getNthPairVariableBounds(N, bounds)
+ else:
+ S,M = getNthPairBounded(N, maxSize, useDivmod=useDivmod)
+ return getNthNTuple(M, S+1, maxElement, useLeftToRight=useLeftToRight)
+def getNthTupleChecked(N, maxSize=aleph0, maxElement=aleph0,
+ useDivmod=False, useLeftToRight=False, GNT=getNthTuple):
+ # FIXME: maxsize is inclusive
+ t = GNT(N,maxSize,maxElement,useDivmod,useLeftToRight)
+ assert len(t) <= maxSize
+ for i in t:
+ assert i < maxElement
+ return t
+
+def getNthPairVariableBounds(N, bounds):
+ """getNthPairVariableBounds(N, bounds) -> (x, y)
+
+ Given a finite list of bounds (which may be finite or aleph0),
+ return the N-th pair such that 0 <= x < len(bounds) and 0 <= y <
+ bounds[x]."""
+
+ if not bounds:
+ raise ValueError,"Invalid bounds"
+ if not (0 <= N < sum(bounds)):
+ raise ValueError,"Invalid input (out of bounds)"
+
+ level = 0
+ active = range(len(bounds))
+ active.sort(key=lambda i: bounds[i])
+ prevLevel = 0
+ for i,index in enumerate(active):
+ level = bounds[index]
+ W = len(active) - i
+ if level is aleph0:
+ H = aleph0
+ else:
+ H = level - prevLevel
+ levelSize = W*H
+ if N<levelSize: # Found the level
+ idelta,delta = getNthPairBounded(N, W, H)
+ return active[i+idelta],prevLevel+delta
+ else:
+ N -= levelSize
+ prevLevel = level
+ else:
+ raise RuntimError,"Unexpected loop completion"
+
+def getNthPairVariableBoundsChecked(N, bounds, GNVP=getNthPairVariableBounds):
+ x,y = GNVP(N,bounds)
+ assert 0 <= x < len(bounds) and 0 <= y < bounds[x]
+ return (x,y)
+
+###
+
+def testPairs():
+ W = 3
+ H = 6
+ a = [[' ' for x in range(10)] for y in range(10)]
+ b = [[' ' for x in range(10)] for y in range(10)]
+ for i in range(min(W*H,40)):
+ x,y = getNthPairBounded(i,W,H)
+ x2,y2 = getNthPairBounded(i,W,H,useDivmod=True)
+ print i,(x,y),(x2,y2)
+ a[y][x] = '%2d'%i
+ b[y2][x2] = '%2d'%i
+
+ print '-- a --'
+ for ln in a[::-1]:
+ if ''.join(ln).strip():
+ print ' '.join(ln)
+ print '-- b --'
+ for ln in b[::-1]:
+ if ''.join(ln).strip():
+ print ' '.join(ln)
+
+def testPairsVB():
+ bounds = [2,2,4,aleph0,5,aleph0]
+ a = [[' ' for x in range(15)] for y in range(15)]
+ b = [[' ' for x in range(15)] for y in range(15)]
+ for i in range(min(sum(bounds),40)):
+ x,y = getNthPairVariableBounds(i, bounds)
+ print i,(x,y)
+ a[y][x] = '%2d'%i
+
+ print '-- a --'
+ for ln in a[::-1]:
+ if ''.join(ln).strip():
+ print ' '.join(ln)
+
+###
+
+# Toggle to use checked versions of enumeration routines.
+if False:
+ getNthPairVariableBounds = getNthPairVariableBoundsChecked
+ getNthPairBounded = getNthPairBoundedChecked
+ getNthNTuple = getNthNTupleChecked
+ getNthTuple = getNthTupleChecked
+
+if __name__ == '__main__':
+ testPairs()
+
+ testPairsVB()
+
diff --git a/clang/utils/ABITest/Makefile.test.common b/clang/utils/ABITest/Makefile.test.common
new file mode 100644
index 0000000..3c208ad
--- /dev/null
+++ b/clang/utils/ABITest/Makefile.test.common
@@ -0,0 +1,170 @@
+# -*- Makefile -*-
+
+# Usage: make test.N.report
+#
+# COUNT can be over-ridden to change the number of tests generated per
+# file, and TESTARGS is used to change the type generation. Make sure
+# to 'make clean' after changing either of these parameters.
+
+TESTARGS := --no-unsigned --no-vector --no-complex --no-bool
+
+COUNT := 1
+TIMEOUT := 5
+
+CFLAGS := -std=gnu99
+
+X_COMPILER := gcc
+X_LL_CFLAGS := -emit-llvm -S
+Y_COMPILER := clang
+Y_LL_CFLAGS := -emit-llvm -S
+CC := gcc
+
+###
+
+ABITESTGEN := ../ABITestGen.py
+
+ifndef VERBOSE
+ Verb := @
+endif
+
+.PHONY: test.%.report
+test.%.report: temps/test.%.xx.diff temps/test.%.xy.diff temps/test.%.yx.diff temps/test.%.yy.diff
+ @ok=1;\
+ for t in $^; do \
+ if [ -s $$t ]; then \
+ echo "TEST $*: $$t failed"; \
+ ok=0;\
+ fi; \
+ done; \
+ if [ $$ok -eq 1 ]; then \
+ true; \
+ else \
+ false; \
+ fi
+
+
+.PHONY: test.%.defs-report
+test.%.defs-report: temps/test.%.defs.diff
+ @for t in $^; do \
+ if [ -s $$t ]; then \
+ echo "TEST $*: $$t failed"; \
+ cat $$t; \
+ fi; \
+ done
+
+.PHONY: test.%.build
+test.%.build: temps/test.%.ref temps/test.%.xx temps/test.%.xy temps/test.%.yx temps/test.%.yy temps/test.%.x.defs temps/test.%.y.defs
+ @true
+
+###
+
+# Diffs and output
+
+.PRECIOUS: temps/.dir
+
+.PRECIOUS: temps/test.%.xx.diff
+temps/test.%.xx.diff: temps/test.%.ref.out temps/test.%.xx.out
+ $(Verb) diff $^ > $@ || true
+.PRECIOUS: temps/test.%.xy.diff
+temps/test.%.xy.diff: temps/test.%.ref.out temps/test.%.xy.out
+ $(Verb) diff $^ > $@ || true
+.PRECIOUS: temps/test.%.yx.diff
+temps/test.%.yx.diff: temps/test.%.ref.out temps/test.%.yx.out
+ $(Verb) diff $^ > $@ || true
+.PRECIOUS: temps/test.%.yy.diff
+temps/test.%.yy.diff: temps/test.%.ref.out temps/test.%.yy.out
+ $(Verb) diff $^ > $@ || true
+.PRECIOUS: temps/test.%.defs.diff
+temps/test.%.defs.diff: temps/test.%.x.defs temps/test.%.y.defs
+ $(Verb) zipdifflines \
+ --replace "%struct.T[0-9]+" "%struct.s" \
+ --replace "%union.T[0-9]+" "%struct.s" \
+ --replace "byval align [0-9]+" "byval" \
+ $^ > $@
+
+.PRECIOUS: temps/test.%.out
+temps/test.%.out: temps/test.%
+ -$(Verb) ./$< > $@
+
+# Executables
+
+.PRECIOUS: temps/test.%.ref
+temps/test.%.ref: temps/test.%.driver.ref.o temps/test.%.a.ref.o temps/test.%.b.ref.o
+ $(Verb) $(CC) $(CFLAGS) $(CC_CFLAGS) -O3 -o $@ $^
+.PRECIOUS: temps/test.%.xx
+temps/test.%.xx: temps/test.%.driver.ref.o temps/test.%.a.x.o temps/test.%.b.x.o
+ $(Verb) $(CC) $(CFLAGS) $(CC_CFLAGS) -O3 -o $@ $^
+.PRECIOUS: temps/test.%.xy
+temps/test.%.xy: temps/test.%.driver.ref.o temps/test.%.a.x.o temps/test.%.b.y.o
+ $(Verb) $(CC) $(CFLAGS) $(CC_CFLAGS) -O3 -o $@ $^
+.PRECIOUS: temps/test.%.yx
+temps/test.%.yx: temps/test.%.driver.ref.o temps/test.%.a.y.o temps/test.%.b.x.o
+ $(Verb) $(CC) $(CFLAGS) $(CC_CFLAGS) -O3 -o $@ $^
+.PRECIOUS: temps/test.%.yy
+temps/test.%.yy: temps/test.%.driver.ref.o temps/test.%.a.y.o temps/test.%.b.y.o
+ $(Verb) $(CC) $(CFLAGS) $(CC_CFLAGS) -O3 -o $@ $^
+
+# Object files
+
+.PRECIOUS: temps/test.%.ref.o
+temps/test.%.ref.o: inputs/test.%.c temps/.dir
+ $(Verb) $(CC) -c $(CFLAGS) $(CC_CFLAGS) -o $@ $<
+.PRECIOUS: temps/test.%.x.o
+temps/test.%.x.o: inputs/test.%.c temps/.dir
+ $(Verb) $(X_COMPILER) -c $(CFLAGS) $(X_CFLAGS) -o $@ $<
+.PRECIOUS: temps/test.%.y.o
+temps/test.%.y.o: inputs/test.%.c temps/.dir
+ $(Verb) $(Y_COMPILER) -c $(CFLAGS) $(Y_CFLAGS) -o $@ $<
+
+.PRECIOUS: temps/test.%.x.defs
+temps/test.%.x.defs: temps/test.%.a.x.ll temps/.dir
+ -$(Verb) -grep '^define ' $< > $@
+.PRECIOUS: temps/test.%.y.defs
+temps/test.%.y.defs: temps/test.%.a.y.ll temps/.dir
+ -$(Verb) -grep '^define ' $< > $@
+
+.PRECIOUS: temps/test.%.a.x.ll
+temps/test.%.a.x.ll: inputs/test.%.a.c temps/.dir
+ $(Verb) $(X_COMPILER) $(CFLAGS) $(X_LL_CFLAGS) $(X_CFLAGS) -o $@ $<
+.PRECIOUS: temps/test.%.b.x.ll
+temps/test.%.b.x.ll: inputs/test.%.b.c temps/.dir
+ $(Verb) $(X_COMPILER) $(CFLAGS) $(X_LL_CFLAGS) $(X_CFLAGS) -o $@ $<
+.PRECIOUS: temps/test.%.a.y.ll
+temps/test.%.a.y.ll: inputs/test.%.a.c temps/.dir
+ $(Verb) $(Y_COMPILER) $(CFLAGS) $(Y_LL_CFLAGS) $(Y_CFLAGS) -o $@ $<
+.PRECIOUS: temps/test.%.b.y.ll
+temps/test.%.b.y.ll: inputs/test.%.b.c temps/.dir
+ $(Verb) $(Y_COMPILER) $(CFLAGS) $(Y_LL_CFLAGS) $(Y_CFLAGS) -o $@ $<
+
+# Input generation
+
+.PHONY: test.%.top
+test.%.top: inputs/test.%.a.c inputs/test.%.b.c inputs/test.%.driver.c
+ @true
+
+.PRECIOUS: inputs/test.%.a.c inputs/test.%.b.c inputs/test.%.driver.c
+inputs/test.%.a.c: test.%.generate
+ @true
+inputs/test.%.b.c: test.%.generate
+ @true
+inputs/test.%.driver.c: test.%.generate
+ @true
+
+.PHONY: test.%.generate
+.PRECIOUS: inputs/.dir
+test.%.generate: $(ABITESTGEN) inputs/.dir
+ $(Verb) $(ABITESTGEN) $(TESTARGS) -o inputs/test.$*.a.c -T inputs/test.$*.b.c -D inputs/test.$*.driver.c --min=$(shell expr $* '*' $(COUNT)) --count=$(COUNT)
+
+# Cleaning
+
+clean-temps:
+ $(Verb) rm -rf temps
+
+clean:
+ $(Verb) rm -rf temps inputs
+
+# Etc.
+
+%/.dir:
+ $(Verb) mkdir -p $* > /dev/null
+ $(Verb) $(DATE) > $@
diff --git a/clang/utils/ABITest/TypeGen.py b/clang/utils/ABITest/TypeGen.py
new file mode 100644
index 0000000..7a99d62
--- /dev/null
+++ b/clang/utils/ABITest/TypeGen.py
@@ -0,0 +1,471 @@
+"""Flexible enumeration of C types."""
+
+from Enumeration import *
+
+# TODO:
+
+# - struct improvements (flexible arrays, packed &
+# unpacked, alignment)
+# - objective-c qualified id
+# - anonymous / transparent unions
+# - VLAs
+# - block types
+# - K&R functions
+# - pass arguments of different types (test extension, transparent union)
+# - varargs
+
+###
+# Actual type types
+
+class Type:
+ def isBitField(self):
+ return False
+
+ def isPaddingBitField(self):
+ return False
+
+ def getTypeName(self, printer):
+ name = 'T%d' % len(printer.types)
+ typedef = self.getTypedefDef(name, printer)
+ printer.addDeclaration(typedef)
+ return name
+
+class BuiltinType(Type):
+ def __init__(self, name, size, bitFieldSize=None):
+ self.name = name
+ self.size = size
+ self.bitFieldSize = bitFieldSize
+
+ def isBitField(self):
+ return self.bitFieldSize is not None
+
+ def isPaddingBitField(self):
+ return self.bitFieldSize is 0
+
+ def getBitFieldSize(self):
+ assert self.isBitField()
+ return self.bitFieldSize
+
+ def getTypeName(self, printer):
+ return self.name
+
+ def sizeof(self):
+ return self.size
+
+ def __str__(self):
+ return self.name
+
+class EnumType(Type):
+ def __init__(self, index, enumerators):
+ self.index = index
+ self.enumerators = enumerators
+
+ def getEnumerators(self):
+ result = ''
+ for i, init in enumerate(self.enumerators):
+ if i > 0:
+ result = result + ', '
+ result = result + 'enum%dval%d' % (self.index, i)
+ if init:
+ result = result + ' = %s' % (init)
+
+ return result
+
+ def __str__(self):
+ return 'enum { %s }' % (self.getEnumerators())
+
+ def getTypedefDef(self, name, printer):
+ return 'typedef enum %s { %s } %s;'%(name, self.getEnumerators(), name)
+
+class RecordType(Type):
+ def __init__(self, index, isUnion, fields):
+ self.index = index
+ self.isUnion = isUnion
+ self.fields = fields
+ self.name = None
+
+ def __str__(self):
+ def getField(t):
+ if t.isBitField():
+ return "%s : %d;" % (t, t.getBitFieldSize())
+ else:
+ return "%s;" % t
+
+ return '%s { %s }'%(('struct','union')[self.isUnion],
+ ' '.join(map(getField, self.fields)))
+
+ def getTypedefDef(self, name, printer):
+ def getField((i, t)):
+ if t.isBitField():
+ if t.isPaddingBitField():
+ return '%s : 0;'%(printer.getTypeName(t),)
+ else:
+ return '%s field%d : %d;'%(printer.getTypeName(t),i,
+ t.getBitFieldSize())
+ else:
+ return '%s field%d;'%(printer.getTypeName(t),i)
+ fields = map(getField, enumerate(self.fields))
+ # Name the struct for more readable LLVM IR.
+ return 'typedef %s %s { %s } %s;'%(('struct','union')[self.isUnion],
+ name, ' '.join(fields), name)
+
+class ArrayType(Type):
+ def __init__(self, index, isVector, elementType, size):
+ if isVector:
+ # Note that for vectors, this is the size in bytes.
+ assert size > 0
+ else:
+ assert size is None or size >= 0
+ self.index = index
+ self.isVector = isVector
+ self.elementType = elementType
+ self.size = size
+ if isVector:
+ eltSize = self.elementType.sizeof()
+ assert not (self.size % eltSize)
+ self.numElements = self.size // eltSize
+ else:
+ self.numElements = self.size
+
+ def __str__(self):
+ if self.isVector:
+ return 'vector (%s)[%d]'%(self.elementType,self.size)
+ elif self.size is not None:
+ return '(%s)[%d]'%(self.elementType,self.size)
+ else:
+ return '(%s)[]'%(self.elementType,)
+
+ def getTypedefDef(self, name, printer):
+ elementName = printer.getTypeName(self.elementType)
+ if self.isVector:
+ return 'typedef %s %s __attribute__ ((vector_size (%d)));'%(elementName,
+ name,
+ self.size)
+ else:
+ if self.size is None:
+ sizeStr = ''
+ else:
+ sizeStr = str(self.size)
+ return 'typedef %s %s[%s];'%(elementName, name, sizeStr)
+
+class ComplexType(Type):
+ def __init__(self, index, elementType):
+ self.index = index
+ self.elementType = elementType
+
+ def __str__(self):
+ return '_Complex (%s)'%(self.elementType)
+
+ def getTypedefDef(self, name, printer):
+ return 'typedef _Complex %s %s;'%(printer.getTypeName(self.elementType), name)
+
+class FunctionType(Type):
+ def __init__(self, index, returnType, argTypes):
+ self.index = index
+ self.returnType = returnType
+ self.argTypes = argTypes
+
+ def __str__(self):
+ if self.returnType is None:
+ rt = 'void'
+ else:
+ rt = str(self.returnType)
+ if not self.argTypes:
+ at = 'void'
+ else:
+ at = ', '.join(map(str, self.argTypes))
+ return '%s (*)(%s)'%(rt, at)
+
+ def getTypedefDef(self, name, printer):
+ if self.returnType is None:
+ rt = 'void'
+ else:
+ rt = str(self.returnType)
+ if not self.argTypes:
+ at = 'void'
+ else:
+ at = ', '.join(map(str, self.argTypes))
+ return 'typedef %s (*%s)(%s);'%(rt, name, at)
+
+###
+# Type enumerators
+
+class TypeGenerator(object):
+ def __init__(self):
+ self.cache = {}
+
+ def setCardinality(self):
+ abstract
+
+ def get(self, N):
+ T = self.cache.get(N)
+ if T is None:
+ assert 0 <= N < self.cardinality
+ T = self.cache[N] = self.generateType(N)
+ return T
+
+ def generateType(self, N):
+ abstract
+
+class FixedTypeGenerator(TypeGenerator):
+ def __init__(self, types):
+ TypeGenerator.__init__(self)
+ self.types = types
+ self.setCardinality()
+
+ def setCardinality(self):
+ self.cardinality = len(self.types)
+
+ def generateType(self, N):
+ return self.types[N]
+
+# Factorial
+def fact(n):
+ result = 1
+ while n > 0:
+ result = result * n
+ n = n - 1
+ return result
+
+# Compute the number of combinations (n choose k)
+def num_combinations(n, k):
+ return fact(n) / (fact(k) * fact(n - k))
+
+# Enumerate the combinations choosing k elements from the list of values
+def combinations(values, k):
+ # From ActiveState Recipe 190465: Generator for permutations,
+ # combinations, selections of a sequence
+ if k==0: yield []
+ else:
+ for i in xrange(len(values)-k+1):
+ for cc in combinations(values[i+1:],k-1):
+ yield [values[i]]+cc
+
+class EnumTypeGenerator(TypeGenerator):
+ def __init__(self, values, minEnumerators, maxEnumerators):
+ TypeGenerator.__init__(self)
+ self.values = values
+ self.minEnumerators = minEnumerators
+ self.maxEnumerators = maxEnumerators
+ self.setCardinality()
+
+ def setCardinality(self):
+ self.cardinality = 0
+ for num in range(self.minEnumerators, self.maxEnumerators + 1):
+ self.cardinality += num_combinations(len(self.values), num)
+
+ def generateType(self, n):
+ # Figure out the number of enumerators in this type
+ numEnumerators = self.minEnumerators
+ valuesCovered = 0
+ while numEnumerators < self.maxEnumerators:
+ comb = num_combinations(len(self.values), numEnumerators)
+ if valuesCovered + comb > n:
+ break
+ numEnumerators = numEnumerators + 1
+ valuesCovered += comb
+
+ # Find the requested combination of enumerators and build a
+ # type from it.
+ i = 0
+ for enumerators in combinations(self.values, numEnumerators):
+ if i == n - valuesCovered:
+ return EnumType(n, enumerators)
+
+ i = i + 1
+
+ assert False
+
+class ComplexTypeGenerator(TypeGenerator):
+ def __init__(self, typeGen):
+ TypeGenerator.__init__(self)
+ self.typeGen = typeGen
+ self.setCardinality()
+
+ def setCardinality(self):
+ self.cardinality = self.typeGen.cardinality
+
+ def generateType(self, N):
+ return ComplexType(N, self.typeGen.get(N))
+
+class VectorTypeGenerator(TypeGenerator):
+ def __init__(self, typeGen, sizes):
+ TypeGenerator.__init__(self)
+ self.typeGen = typeGen
+ self.sizes = tuple(map(int,sizes))
+ self.setCardinality()
+
+ def setCardinality(self):
+ self.cardinality = len(self.sizes)*self.typeGen.cardinality
+
+ def generateType(self, N):
+ S,T = getNthPairBounded(N, len(self.sizes), self.typeGen.cardinality)
+ return ArrayType(N, True, self.typeGen.get(T), self.sizes[S])
+
+class FixedArrayTypeGenerator(TypeGenerator):
+ def __init__(self, typeGen, sizes):
+ TypeGenerator.__init__(self)
+ self.typeGen = typeGen
+ self.sizes = tuple(size)
+ self.setCardinality()
+
+ def setCardinality(self):
+ self.cardinality = len(self.sizes)*self.typeGen.cardinality
+
+ def generateType(self, N):
+ S,T = getNthPairBounded(N, len(self.sizes), self.typeGen.cardinality)
+ return ArrayType(N, false, self.typeGen.get(T), self.sizes[S])
+
+class ArrayTypeGenerator(TypeGenerator):
+ def __init__(self, typeGen, maxSize, useIncomplete=False, useZero=False):
+ TypeGenerator.__init__(self)
+ self.typeGen = typeGen
+ self.useIncomplete = useIncomplete
+ self.useZero = useZero
+ self.maxSize = int(maxSize)
+ self.W = useIncomplete + useZero + self.maxSize
+ self.setCardinality()
+
+ def setCardinality(self):
+ self.cardinality = self.W * self.typeGen.cardinality
+
+ def generateType(self, N):
+ S,T = getNthPairBounded(N, self.W, self.typeGen.cardinality)
+ if self.useIncomplete:
+ if S==0:
+ size = None
+ S = None
+ else:
+ S = S - 1
+ if S is not None:
+ if self.useZero:
+ size = S
+ else:
+ size = S + 1
+ return ArrayType(N, False, self.typeGen.get(T), size)
+
+class RecordTypeGenerator(TypeGenerator):
+ def __init__(self, typeGen, useUnion, maxSize):
+ TypeGenerator.__init__(self)
+ self.typeGen = typeGen
+ self.useUnion = bool(useUnion)
+ self.maxSize = int(maxSize)
+ self.setCardinality()
+
+ def setCardinality(self):
+ M = 1 + self.useUnion
+ if self.maxSize is aleph0:
+ S = aleph0 * self.typeGen.cardinality
+ else:
+ S = 0
+ for i in range(self.maxSize+1):
+ S += M * (self.typeGen.cardinality ** i)
+ self.cardinality = S
+
+ def generateType(self, N):
+ isUnion,I = False,N
+ if self.useUnion:
+ isUnion,I = (I&1),I>>1
+ fields = map(self.typeGen.get,getNthTuple(I,self.maxSize,self.typeGen.cardinality))
+ return RecordType(N, isUnion, fields)
+
+class FunctionTypeGenerator(TypeGenerator):
+ def __init__(self, typeGen, useReturn, maxSize):
+ TypeGenerator.__init__(self)
+ self.typeGen = typeGen
+ self.useReturn = useReturn
+ self.maxSize = maxSize
+ self.setCardinality()
+
+ def setCardinality(self):
+ if self.maxSize is aleph0:
+ S = aleph0 * self.typeGen.cardinality()
+ elif self.useReturn:
+ S = 0
+ for i in range(1,self.maxSize+1+1):
+ S += self.typeGen.cardinality ** i
+ else:
+ S = 0
+ for i in range(self.maxSize+1):
+ S += self.typeGen.cardinality ** i
+ self.cardinality = S
+
+ def generateType(self, N):
+ if self.useReturn:
+ # Skip the empty tuple
+ argIndices = getNthTuple(N+1, self.maxSize+1, self.typeGen.cardinality)
+ retIndex,argIndices = argIndices[0],argIndices[1:]
+ retTy = self.typeGen.get(retIndex)
+ else:
+ retTy = None
+ argIndices = getNthTuple(N, self.maxSize, self.typeGen.cardinality)
+ args = map(self.typeGen.get, argIndices)
+ return FunctionType(N, retTy, args)
+
+class AnyTypeGenerator(TypeGenerator):
+ def __init__(self):
+ TypeGenerator.__init__(self)
+ self.generators = []
+ self.bounds = []
+ self.setCardinality()
+ self._cardinality = None
+
+ def getCardinality(self):
+ if self._cardinality is None:
+ return aleph0
+ else:
+ return self._cardinality
+ def setCardinality(self):
+ self.bounds = [g.cardinality for g in self.generators]
+ self._cardinality = sum(self.bounds)
+ cardinality = property(getCardinality, None)
+
+ def addGenerator(self, g):
+ self.generators.append(g)
+ for i in range(100):
+ prev = self._cardinality
+ self._cardinality = None
+ for g in self.generators:
+ g.setCardinality()
+ self.setCardinality()
+ if (self._cardinality is aleph0) or prev==self._cardinality:
+ break
+ else:
+ raise RuntimeError,"Infinite loop in setting cardinality"
+
+ def generateType(self, N):
+ index,M = getNthPairVariableBounds(N, self.bounds)
+ return self.generators[index].get(M)
+
+def test():
+ fbtg = FixedTypeGenerator([BuiltinType('char', 4),
+ BuiltinType('char', 4, 0),
+ BuiltinType('int', 4, 5)])
+
+ fields1 = AnyTypeGenerator()
+ fields1.addGenerator( fbtg )
+
+ fields0 = AnyTypeGenerator()
+ fields0.addGenerator( fbtg )
+# fields0.addGenerator( RecordTypeGenerator(fields1, False, 4) )
+
+ btg = FixedTypeGenerator([BuiltinType('char', 4),
+ BuiltinType('int', 4)])
+ etg = EnumTypeGenerator([None, '-1', '1', '1u'], 0, 3)
+
+ atg = AnyTypeGenerator()
+ atg.addGenerator( btg )
+ atg.addGenerator( RecordTypeGenerator(fields0, False, 4) )
+ atg.addGenerator( etg )
+ print 'Cardinality:',atg.cardinality
+ for i in range(100):
+ if i == atg.cardinality:
+ try:
+ atg.get(i)
+ raise RuntimeError,"Cardinality was wrong"
+ except AssertionError:
+ break
+ print '%4d: %s'%(i, atg.get(i))
+
+if __name__ == '__main__':
+ test()
diff --git a/clang/utils/ABITest/build-and-summarize-all.sh b/clang/utils/ABITest/build-and-summarize-all.sh
new file mode 100755
index 0000000..23e34a4
--- /dev/null
+++ b/clang/utils/ABITest/build-and-summarize-all.sh
@@ -0,0 +1,15 @@
+#!/bin/sh
+
+set -eu
+
+if [ $# != 1 ]; then
+ echo "usage: $0 <num-tests>"
+ exit 1
+fi
+
+for bits in 32 64; do
+ for kind in return-types single-args; do
+ echo "-- $kind-$bits --"
+ (cd $kind-$bits && ../build-and-summarize.sh $1)
+ done
+done
diff --git a/clang/utils/ABITest/build-and-summarize.sh b/clang/utils/ABITest/build-and-summarize.sh
new file mode 100755
index 0000000..602728b
--- /dev/null
+++ b/clang/utils/ABITest/build-and-summarize.sh
@@ -0,0 +1,14 @@
+#!/bin/sh
+
+set -eu
+
+if [ $# != 1 ]; then
+ echo "usage: $0 <num-tests>"
+ exit 1
+fi
+
+dir=$(dirname $0)
+$dir/build.sh $1 &> /dev/null || true
+../summarize.sh $1 &> fails-x.txt
+cat fails-x.txt
+wc -l fails-x.txt
diff --git a/clang/utils/ABITest/build.sh b/clang/utils/ABITest/build.sh
new file mode 100755
index 0000000..a50d14a
--- /dev/null
+++ b/clang/utils/ABITest/build.sh
@@ -0,0 +1,12 @@
+#!/bin/sh
+
+set -eu
+
+if [ $# != 1 ]; then
+ echo "usage: $0 <num-tests>"
+ exit 1
+fi
+
+CPUS=2
+make -j $CPUS \
+ $(for i in $(seq 0 $1); do echo test.$i.report; done) -k
diff --git a/clang/utils/ABITest/layout/Makefile b/clang/utils/ABITest/layout/Makefile
new file mode 100644
index 0000000..0520625
--- /dev/null
+++ b/clang/utils/ABITest/layout/Makefile
@@ -0,0 +1,68 @@
+# Usage: make test.N.report
+#
+# COUNT can be over-ridden to change the number of tests generated per
+# file, and TESTARGS is used to change the type generation. Make sure
+# to 'make clean' after changing either of these parameters.
+
+ABITESTGEN := ../ABITestGen.py
+TESTARGS := --max-args 0 --test-layout
+COUNT := 1000
+TIMEOUT := 5
+
+CFLAGS := -std=gnu99
+
+X_COMPILER := llvm-gcc
+Y_COMPILER := clang
+CC := gcc
+
+ifeq (0, 0)
+X_CFLAGS := -m32
+Y_CFLAGS := -m32
+CC_CFLAGS := -m32
+else
+X_CFLAGS := -m64
+Y_CFLAGS := -m64
+CC_CFLAGS := -m64
+endif
+
+.PHONY: test.%.report
+test.%.report: test.%.x.diff test.%.y.diff
+ @for t in $^; do \
+ if [ -s $$t ]; then \
+ echo "TEST $*: $$t failed"; \
+ fi; \
+ done
+
+.PHONY: test.%.build
+test.%.build: test.%.ref test.%.x test.%.y
+ @true
+
+###
+
+.PRECIOUS: test.%.x.diff
+test.%.x.diff: test.%.ref.out test.%.x.out
+ -diff $^ > $@
+.PRECIOUS: test.%.y.diff
+test.%.y.diff: test.%.ref.out test.%.y.out
+ -diff $^ > $@
+
+.PRECIOUS: test.%.out
+test.%.out: test.%
+ -./$< > $@
+
+.PRECIOUS: test.%.ref
+test.%.ref: test.%.c
+ $(CC) $(CFLAGS) $(CC_CFLAGS) -o $@ $^
+.PRECIOUS: test.%.x
+test.%.x: test.%.c
+ $(X_COMPILER) $(CFLAGS) $(X_CFLAGS) -o $@ $^
+.PRECIOUS: test.%.y
+test.%.y: test.%.c
+ $(Y_COMPILER) $(CFLAGS) $(Y_CFLAGS) -o $@ $^
+
+.PRECIOUS: test.%.c
+test.%.c: $(ABITESTGEN)
+ $(ABITESTGEN) $(TESTARGS) -o $@ --min=$(shell expr $* '*' $(COUNT)) --count=$(COUNT)
+
+clean:
+ rm -f test.* *~
diff --git a/clang/utils/ABITest/return-types-32/Makefile b/clang/utils/ABITest/return-types-32/Makefile
new file mode 100644
index 0000000..df1c53f
--- /dev/null
+++ b/clang/utils/ABITest/return-types-32/Makefile
@@ -0,0 +1,7 @@
+X_CFLAGS := -m32
+Y_CFLAGS := -m32
+CC_CFLAGS := -m32
+
+include ../Makefile.test.common
+
+TESTARGS += --max-args 0
diff --git a/clang/utils/ABITest/return-types-64/Makefile b/clang/utils/ABITest/return-types-64/Makefile
new file mode 100644
index 0000000..9616e45
--- /dev/null
+++ b/clang/utils/ABITest/return-types-64/Makefile
@@ -0,0 +1,7 @@
+X_CFLAGS := -m64
+Y_CFLAGS := -m64
+CC_CFLAGS := -m64
+
+include ../Makefile.test.common
+
+TESTARGS += --max-args 0
diff --git a/clang/utils/ABITest/single-args-32/Makefile b/clang/utils/ABITest/single-args-32/Makefile
new file mode 100644
index 0000000..9ff417f
--- /dev/null
+++ b/clang/utils/ABITest/single-args-32/Makefile
@@ -0,0 +1,7 @@
+X_CFLAGS := -m32
+Y_CFLAGS := -m32
+CC_CFLAGS := -m32
+
+include ../Makefile.test.common
+
+TESTARGS += --no-function-return --max-args 1
diff --git a/clang/utils/ABITest/single-args-64/Makefile b/clang/utils/ABITest/single-args-64/Makefile
new file mode 100644
index 0000000..b8acb70
--- /dev/null
+++ b/clang/utils/ABITest/single-args-64/Makefile
@@ -0,0 +1,13 @@
+# Usage: make test.N.report
+#
+# COUNT can be over-ridden to change the number of tests generated per
+# file, and TESTARGS is used to change the type generation. Make sure
+# to 'make clean' after changing either of these parameters.
+
+X_CFLAGS := -m64
+Y_CFLAGS := -m64
+CC_CFLAGS := -m64
+
+include ../Makefile.test.common
+
+TESTARGS += --no-function-return --max-args 1
diff --git a/clang/utils/ABITest/summarize.sh b/clang/utils/ABITest/summarize.sh
new file mode 100755
index 0000000..3efb52b
--- /dev/null
+++ b/clang/utils/ABITest/summarize.sh
@@ -0,0 +1,15 @@
+#!/bin/sh
+
+set -eu
+
+if [ $# != 1 ]; then
+ echo "usage: $0 <num-tests>"
+ exit 1
+fi
+
+for i in $(seq 0 $1); do
+ if (! make test.$i.report &> /dev/null); then
+ echo "FAIL: $i";
+ fi;
+done
+
diff --git a/clang/utils/C++Tests/Clang-Code-Compile/lit.local.cfg b/clang/utils/C++Tests/Clang-Code-Compile/lit.local.cfg
new file mode 100644
index 0000000..59d3466
--- /dev/null
+++ b/clang/utils/C++Tests/Clang-Code-Compile/lit.local.cfg
@@ -0,0 +1,26 @@
+# -*- Python -*-
+
+# Configuration file for the 'lit' test runner.
+
+def getRoot(config):
+ if not config.parent:
+ return config
+ return getRoot(config.parent)
+
+root = getRoot(config)
+
+# testFormat: The test format to use to interpret tests.
+cxxflags = ['-D__STDC_LIMIT_MACROS',
+ '-D__STDC_CONSTANT_MACROS',
+ '-Wno-sign-compare',
+ '-I%s/include' % root.llvm_src_root,
+ '-I%s/include' % root.llvm_obj_root,
+ '-I%s/tools/clang/include' % root.llvm_src_root,
+ '-I%s/tools/clang/include' % root.llvm_obj_root]
+config.test_format = \
+ lit.formats.OneCommandPerFileTest(command=[root.clang, '-emit-llvm', '-c',
+ '-o', '/dev/null'] + cxxflags,
+ dir='%s/tools/clang/lib' % root.llvm_src_root,
+ recursive=True,
+ pattern='^(.*\\.cpp)$')
+
diff --git a/clang/utils/C++Tests/Clang-Code-Syntax/lit.local.cfg b/clang/utils/C++Tests/Clang-Code-Syntax/lit.local.cfg
new file mode 100644
index 0000000..8f00c8d
--- /dev/null
+++ b/clang/utils/C++Tests/Clang-Code-Syntax/lit.local.cfg
@@ -0,0 +1,25 @@
+# -*- Python -*-
+
+# Configuration file for the 'lit' test runner.
+
+def getRoot(config):
+ if not config.parent:
+ return config
+ return getRoot(config.parent)
+
+root = getRoot(config)
+
+# testFormat: The test format to use to interpret tests.
+cxxflags = ['-D__STDC_LIMIT_MACROS',
+ '-D__STDC_CONSTANT_MACROS',
+ '-Wno-sign-compare',
+ '-I%s/include' % root.llvm_src_root,
+ '-I%s/include' % root.llvm_obj_root,
+ '-I%s/tools/clang/include' % root.llvm_src_root,
+ '-I%s/tools/clang/include' % root.llvm_obj_root]
+config.test_format = \
+ lit.formats.OneCommandPerFileTest(command=[root.clang,
+ '-fsyntax-only'] + cxxflags,
+ dir='%s/tools/clang/lib' % root.llvm_src_root,
+ recursive=True,
+ pattern='^(.*\\.cpp)$')
diff --git a/clang/utils/C++Tests/Clang-Syntax/lit.local.cfg b/clang/utils/C++Tests/Clang-Syntax/lit.local.cfg
new file mode 100644
index 0000000..89fdd8e
--- /dev/null
+++ b/clang/utils/C++Tests/Clang-Syntax/lit.local.cfg
@@ -0,0 +1,24 @@
+# -*- Python -*-
+
+# Configuration file for the 'lit' test runner.
+
+def getRoot(config):
+ if not config.parent:
+ return config
+ return getRoot(config.parent)
+
+root = getRoot(config)
+
+# testFormat: The test format to use to interpret tests.
+config.test_format = lit.formats.SyntaxCheckTest(compiler=root.clang,
+ dir='%s/tools/clang/include/clang' % root.llvm_src_root,
+ recursive=True,
+ pattern='^(.*\\.h)$',
+ extra_cxx_args=['-D__STDC_LIMIT_MACROS',
+ '-D__STDC_CONSTANT_MACROS',
+ '-Wno-sign-compare',
+ '-Werror',
+ '-I%s/include' % root.llvm_src_root,
+ '-I%s/include' % root.llvm_obj_root,
+ '-I%s/tools/clang/include' % root.llvm_src_root,
+ '-I%s/tools/clang/include' % root.llvm_obj_root])
diff --git a/clang/utils/C++Tests/LLVM-Code-Compile/lit.local.cfg b/clang/utils/C++Tests/LLVM-Code-Compile/lit.local.cfg
new file mode 100644
index 0000000..c1ac6a9
--- /dev/null
+++ b/clang/utils/C++Tests/LLVM-Code-Compile/lit.local.cfg
@@ -0,0 +1,48 @@
+# -*- Python -*-
+
+# Configuration file for the 'lit' test runner.
+
+def getRoot(config):
+ if not config.parent:
+ return config
+ return getRoot(config.parent)
+
+root = getRoot(config)
+
+# testFormat: The test format to use to interpret tests.
+target_obj_root = root.llvm_obj_root
+cxxflags = ['-D__STDC_LIMIT_MACROS',
+ '-D__STDC_CONSTANT_MACROS',
+ '-Wno-sign-compare',
+ '-I%s/include' % root.llvm_src_root,
+ '-I%s/include' % root.llvm_obj_root,
+ '-I%s/lib/Target/ARM' % root.llvm_src_root,
+ '-I%s/lib/Target/CellSPU' % root.llvm_src_root,
+ '-I%s/lib/Target/CppBackend' % root.llvm_src_root,
+ '-I%s/lib/Target/Mips' % root.llvm_src_root,
+ '-I%s/lib/Target/MSIL' % root.llvm_src_root,
+ '-I%s/lib/Target/MSP430' % root.llvm_src_root,
+ '-I%s/lib/Target/PIC16' % root.llvm_src_root,
+ '-I%s/lib/Target/PowerPC' % root.llvm_src_root,
+ '-I%s/lib/Target/Sparc' % root.llvm_src_root,
+ '-I%s/lib/Target/X86' % root.llvm_src_root,
+ '-I%s/lib/Target/XCore' % root.llvm_src_root,
+ '-I%s/lib/Target/ARM' % target_obj_root,
+ '-I%s/lib/Target/CellSPU' % target_obj_root,
+ '-I%s/lib/Target/CppBackend' % target_obj_root,
+ '-I%s/lib/Target/Mips' % target_obj_root,
+ '-I%s/lib/Target/MSIL' % target_obj_root,
+ '-I%s/lib/Target/MSP430' % target_obj_root,
+ '-I%s/lib/Target/PIC16' % target_obj_root,
+ '-I%s/lib/Target/PowerPC' % target_obj_root,
+ '-I%s/lib/Target/Sparc' % target_obj_root,
+ '-I%s/lib/Target/X86' % target_obj_root,
+ '-I%s/lib/Target/XCore' % target_obj_root];
+
+config.test_format = \
+ lit.formats.OneCommandPerFileTest(command=[root.clang, '-emit-llvm', '-c',
+ '-o', '/dev/null'] + cxxflags,
+ dir='%s/lib' % root.llvm_src_root,
+ recursive=True,
+ pattern='^(.*\\.cpp)$')
+
diff --git a/clang/utils/C++Tests/LLVM-Code-Symbols/check-symbols b/clang/utils/C++Tests/LLVM-Code-Symbols/check-symbols
new file mode 100755
index 0000000..cd54eed
--- /dev/null
+++ b/clang/utils/C++Tests/LLVM-Code-Symbols/check-symbols
@@ -0,0 +1,54 @@
+#!/usr/bin/env python
+
+import subprocess
+import difflib
+
+def capture_2(args0, args1):
+ import subprocess
+ p0 = subprocess.Popen(args0, stdin=None, stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ p1 = subprocess.Popen(args1, stdin=p0.stdout, stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ out,_ = p1.communicate()
+ return out
+
+def normalize_nm(data):
+ lines = data.split('\n')
+ lines.sort()
+
+ # FIXME: Ignore common symbols for now.
+ lines = [ln for ln in lines
+ if not ln.startswith(' C')]
+
+ return lines
+
+def main():
+ import sys
+ clang = sys.argv[1]
+ flags = sys.argv[2:]
+
+ # FIXME: Relax to include undefined symbols.
+ nm_args = ["llvm-nm", "-extern-only", "-defined-only"]
+
+ llvmgcc_args = ["llvm-gcc"] + flags + ["-emit-llvm","-c","-o","-"]
+ clang_args = [clang] + flags + ["-emit-llvm","-c","-o","-"]
+
+ llvmgcc_nm = capture_2(llvmgcc_args, nm_args)
+ clang_nm = capture_2(clang_args, nm_args)
+
+ llvmgcc_nm = normalize_nm(llvmgcc_nm)
+ clang_nm = normalize_nm(clang_nm)
+
+ if llvmgcc_nm == clang_nm:
+ sys.exit(0)
+
+ print ' '.join(llvmgcc_args), '|', ' '.join(nm_args)
+ print ' '.join(clang_args), '|', ' '.join(nm_args)
+ for line in difflib.unified_diff(llvmgcc_nm, clang_nm,
+ fromfile="llvm-gcc symbols",
+ tofile="clang symbols"):
+ print line
+ sys.exit(1)
+
+if __name__ == '__main__':
+ main()
diff --git a/clang/utils/C++Tests/LLVM-Code-Symbols/lit.local.cfg b/clang/utils/C++Tests/LLVM-Code-Symbols/lit.local.cfg
new file mode 100644
index 0000000..7882813
--- /dev/null
+++ b/clang/utils/C++Tests/LLVM-Code-Symbols/lit.local.cfg
@@ -0,0 +1,48 @@
+# -*- Python -*-
+
+# Configuration file for the 'lit' test runner.
+
+def getRoot(config):
+ if not config.parent:
+ return config
+ return getRoot(config.parent)
+
+root = getRoot(config)
+
+# testFormat: The test format to use to interpret tests.
+target_obj_root = root.llvm_obj_root
+cxxflags = ['-D__STDC_LIMIT_MACROS',
+ '-D__STDC_CONSTANT_MACROS',
+ '-Wno-sign-compare',
+ '-I%s/include' % root.llvm_src_root,
+ '-I%s/include' % root.llvm_obj_root,
+ '-I%s/lib/Target/ARM' % root.llvm_src_root,
+ '-I%s/lib/Target/CellSPU' % root.llvm_src_root,
+ '-I%s/lib/Target/CppBackend' % root.llvm_src_root,
+ '-I%s/lib/Target/Mips' % root.llvm_src_root,
+ '-I%s/lib/Target/MSIL' % root.llvm_src_root,
+ '-I%s/lib/Target/MSP430' % root.llvm_src_root,
+ '-I%s/lib/Target/PIC16' % root.llvm_src_root,
+ '-I%s/lib/Target/PowerPC' % root.llvm_src_root,
+ '-I%s/lib/Target/Sparc' % root.llvm_src_root,
+ '-I%s/lib/Target/X86' % root.llvm_src_root,
+ '-I%s/lib/Target/XCore' % root.llvm_src_root,
+ '-I%s/lib/Target/ARM' % target_obj_root,
+ '-I%s/lib/Target/CellSPU' % target_obj_root,
+ '-I%s/lib/Target/CppBackend' % target_obj_root,
+ '-I%s/lib/Target/Mips' % target_obj_root,
+ '-I%s/lib/Target/MSIL' % target_obj_root,
+ '-I%s/lib/Target/MSP430' % target_obj_root,
+ '-I%s/lib/Target/PIC16' % target_obj_root,
+ '-I%s/lib/Target/PowerPC' % target_obj_root,
+ '-I%s/lib/Target/Sparc' % target_obj_root,
+ '-I%s/lib/Target/X86' % target_obj_root,
+ '-I%s/lib/Target/XCore' % target_obj_root];
+
+kScript = os.path.join(os.path.dirname(__file__), "check-symbols")
+config.test_format = \
+ lit.formats.OneCommandPerFileTest(command=[kScript, root.clang] + cxxflags,
+ dir='%s/lib' % root.llvm_src_root,
+ recursive=True,
+ pattern='^(.*\\.cpp)$')
+
diff --git a/clang/utils/C++Tests/LLVM-Code-Syntax/lit.local.cfg b/clang/utils/C++Tests/LLVM-Code-Syntax/lit.local.cfg
new file mode 100644
index 0000000..42bec2d
--- /dev/null
+++ b/clang/utils/C++Tests/LLVM-Code-Syntax/lit.local.cfg
@@ -0,0 +1,46 @@
+# -*- Python -*-
+
+# Configuration file for the 'lit' test runner.
+
+def getRoot(config):
+ if not config.parent:
+ return config
+ return getRoot(config.parent)
+
+root = getRoot(config)
+
+# testFormat: The test format to use to interpret tests.
+target_obj_root = root.llvm_obj_root
+cxxflags = ['-D__STDC_LIMIT_MACROS',
+ '-D__STDC_CONSTANT_MACROS',
+ '-I%s/include' % root.llvm_src_root,
+ '-I%s/include' % root.llvm_obj_root,
+ '-I%s/lib/Target/ARM' % root.llvm_src_root,
+ '-I%s/lib/Target/CellSPU' % root.llvm_src_root,
+ '-I%s/lib/Target/CppBackend' % root.llvm_src_root,
+ '-I%s/lib/Target/Mips' % root.llvm_src_root,
+ '-I%s/lib/Target/MSIL' % root.llvm_src_root,
+ '-I%s/lib/Target/MSP430' % root.llvm_src_root,
+ '-I%s/lib/Target/PIC16' % root.llvm_src_root,
+ '-I%s/lib/Target/PowerPC' % root.llvm_src_root,
+ '-I%s/lib/Target/Sparc' % root.llvm_src_root,
+ '-I%s/lib/Target/X86' % root.llvm_src_root,
+ '-I%s/lib/Target/XCore' % root.llvm_src_root,
+ '-I%s/lib/Target/ARM' % target_obj_root,
+ '-I%s/lib/Target/CellSPU' % target_obj_root,
+ '-I%s/lib/Target/CppBackend' % target_obj_root,
+ '-I%s/lib/Target/Mips' % target_obj_root,
+ '-I%s/lib/Target/MSIL' % target_obj_root,
+ '-I%s/lib/Target/MSP430' % target_obj_root,
+ '-I%s/lib/Target/PIC16' % target_obj_root,
+ '-I%s/lib/Target/PowerPC' % target_obj_root,
+ '-I%s/lib/Target/Sparc' % target_obj_root,
+ '-I%s/lib/Target/X86' % target_obj_root,
+ '-I%s/lib/Target/XCore' % target_obj_root];
+
+config.test_format = \
+ lit.formats.OneCommandPerFileTest(command=[root.clang,
+ '-fsyntax-only'] + cxxflags,
+ dir='%s/lib' % root.llvm_src_root,
+ recursive=True,
+ pattern='^(.*\\.cpp)$')
diff --git a/clang/utils/C++Tests/LLVM-Syntax/lit.local.cfg b/clang/utils/C++Tests/LLVM-Syntax/lit.local.cfg
new file mode 100644
index 0000000..cb0e566
--- /dev/null
+++ b/clang/utils/C++Tests/LLVM-Syntax/lit.local.cfg
@@ -0,0 +1,24 @@
+# -*- Python -*-
+
+# Configuration file for the 'lit' test runner.
+
+def getRoot(config):
+ if not config.parent:
+ return config
+ return getRoot(config.parent)
+
+root = getRoot(config)
+
+# testFormat: The test format to use to interpret tests.
+config.test_format = lit.formats.SyntaxCheckTest(compiler=root.clang,
+ dir='%s/include/llvm' % root.llvm_src_root,
+ recursive=True,
+ pattern='^(.*\\.h|[^.]*)$',
+ extra_cxx_args=['-D__STDC_LIMIT_MACROS',
+ '-D__STDC_CONSTANT_MACROS',
+ '-Werror',
+ '-I%s/include' % root.llvm_src_root,
+ '-I%s/include' % root.llvm_obj_root])
+
+config.excludes = ['AbstractTypeUser.h', 'DAGISelHeader.h',
+ 'AIXDataTypesFix.h', 'Solaris.h']
diff --git a/clang/utils/C++Tests/lit.cfg b/clang/utils/C++Tests/lit.cfg
new file mode 100644
index 0000000..274ca10
--- /dev/null
+++ b/clang/utils/C++Tests/lit.cfg
@@ -0,0 +1,27 @@
+# -*- Python -*-
+
+# Configuration file for the 'lit' test runner.
+
+# Load the main clang test config so we can leech its clang finding logic.
+lit.load_config(config, os.path.join(os.path.dirname(__file__),
+ '..', '..', 'test', 'lit.cfg'))
+assert config.clang, "Failed to set clang!?"
+
+# name: The name of this test suite.
+config.name = 'Clang++'
+
+# suffixes: A list of file extensions to treat as test files, this is actually
+# set by on_clone().
+config.suffixes = []
+
+# Reset these from the Clang config.
+config.test_source_root = config.test_exec_root = None
+
+# Don't run Clang and LLVM code checks by default.
+config.excludes = []
+if not lit.params.get('run_clang_all'):
+ config.excludes.append('Clang-Code-Syntax')
+ config.excludes.append('Clang-Code-Compile')
+ config.excludes.append('LLVM-Code-Syntax')
+ config.excludes.append('LLVM-Code-Compile')
+ config.excludes.append('LLVM-Code-Symbols')
diff --git a/clang/utils/C++Tests/stdc++-Syntax/lit.local.cfg b/clang/utils/C++Tests/stdc++-Syntax/lit.local.cfg
new file mode 100644
index 0000000..eb04866
--- /dev/null
+++ b/clang/utils/C++Tests/stdc++-Syntax/lit.local.cfg
@@ -0,0 +1,17 @@
+# -*- Python -*-
+
+# Configuration file for the 'lit' test runner.
+
+def getRoot(config):
+ if not config.parent:
+ return config
+ return getRoot(config.parent)
+
+root = getRoot(config)
+
+# testFormat: The test format to use to interpret tests.
+config.test_format = lit.formats.SyntaxCheckTest(compiler=root.clang,
+ dir='/usr/include/c++/4.2.1',
+ recursive=False,
+ pattern='^(.*\\.h|[^.]*)$')
+
diff --git a/clang/utils/CIndex/completion_logger_server.py b/clang/utils/CIndex/completion_logger_server.py
new file mode 100755
index 0000000..0652b1f
--- /dev/null
+++ b/clang/utils/CIndex/completion_logger_server.py
@@ -0,0 +1,44 @@
+#!/usr/bin/env python
+import sys
+from socket import *
+from time import strftime
+import datetime
+
+def main():
+ if len(sys.argv) < 4:
+ print "completion_logger_server.py <listen address> <listen port> <log file>"
+ exit(1)
+
+ host = sys.argv[1]
+ port = int(sys.argv[2])
+ buf = 1024 * 8
+ addr = (host,port)
+
+ # Create socket and bind to address
+ UDPSock = socket(AF_INET,SOCK_DGRAM)
+ UDPSock.bind(addr)
+
+ print "Listing on {0}:{1} and logging to '{2}'".format(host, port, sys.argv[3])
+
+ # Open the logging file.
+ f = open(sys.argv[3], "a")
+
+ # Receive messages
+ while 1:
+ data,addr = UDPSock.recvfrom(buf)
+ if not data:
+ break
+ else:
+ f.write("{ ");
+ f.write("\"time\": \"{0}\"".format(datetime.datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S')))
+ f.write(", \"sender\": \"{0}\" ".format(addr[0]))
+ f.write(", \"data\": ")
+ f.write(data)
+ f.write(" }\n")
+ f.flush()
+
+ # Close socket
+ UDPSock.close()
+
+if __name__ == '__main__':
+ main()
diff --git a/clang/utils/CaptureCmd b/clang/utils/CaptureCmd
new file mode 100755
index 0000000..705585c
--- /dev/null
+++ b/clang/utils/CaptureCmd
@@ -0,0 +1,73 @@
+#!/usr/bin/env python
+
+"""CaptureCmd - A generic tool for capturing information about the
+invocations of another program.
+
+Usage
+--
+1. Move the original tool to a safe known location.
+
+2. Link CaptureCmd to the original tool's location.
+
+3. Define CAPTURE_CMD_PROGRAM to the known location of the original
+tool; this must be an absolute path.
+
+4. Define CAPTURE_CMD_DIR to a directory to write invocation
+information to.
+"""
+
+import hashlib
+import os
+import sys
+import time
+
+def saveCaptureData(prefix, dir, object):
+ string = repr(object) + '\n'
+ key = hashlib.sha1(string).hexdigest()
+ path = os.path.join(dir,
+ prefix + key)
+ if not os.path.exists(path):
+ f = open(path, 'wb')
+ f.write(string)
+ f.close()
+ return prefix + key
+
+def main():
+ program = os.getenv('CAPTURE_CMD_PROGRAM')
+ dir = os.getenv('CAPTURE_CMD_DIR')
+ fallback = os.getenv('CAPTURE_CMD_FALLBACK')
+ if not program:
+ raise ValueError('CAPTURE_CMD_PROGRAM is not defined!')
+ if not dir:
+ raise ValueError('CAPTURE_CMD_DIR is not defined!')
+
+ # Make the output directory if it doesn't already exist.
+ if not os.path.exists(dir):
+ os.mkdir(dir, 0700)
+
+ # Get keys for various data.
+ env = os.environ.items()
+ env.sort()
+ envKey = saveCaptureData('env-', dir, env)
+ cwdKey = saveCaptureData('cwd-', dir, os.getcwd())
+ argvKey = saveCaptureData('argv-', dir, sys.argv)
+ entry = (time.time(), envKey, cwdKey, argvKey)
+ saveCaptureData('cmd-', dir, entry)
+
+ if fallback:
+ pid = os.fork()
+ if not pid:
+ os.execv(program, sys.argv)
+ os._exit(1)
+ else:
+ res = os.waitpid(pid, 0)
+ if not res:
+ os.execv(fallback, sys.argv)
+ os._exit(1)
+ os._exit(res)
+ else:
+ os.execv(program, sys.argv)
+ os._exit(1)
+
+if __name__ == '__main__':
+ main()
diff --git a/clang/utils/CmpDriver b/clang/utils/CmpDriver
new file mode 100755
index 0000000..2533f54
--- /dev/null
+++ b/clang/utils/CmpDriver
@@ -0,0 +1,210 @@
+#!/usr/bin/env python
+
+import subprocess
+
+def splitArgs(s):
+ it = iter(s)
+ current = ''
+ inQuote = False
+ for c in it:
+ if c == '"':
+ if inQuote:
+ inQuote = False
+ yield current + '"'
+ else:
+ inQuote = True
+ current = '"'
+ elif inQuote:
+ if c == '\\':
+ current += c
+ current += it.next()
+ else:
+ current += c
+ elif not c.isspace():
+ yield c
+
+def insertMinimumPadding(a, b, dist):
+ """insertMinimumPadding(a,b) -> (a',b')
+
+ Return two lists of equal length, where some number of Nones have
+ been inserted into the shorter list such that sum(map(dist, a',
+ b')) is minimized.
+
+ Assumes dist(X, Y) -> int and non-negative.
+ """
+
+ def cost(a, b):
+ return sum(map(dist, a + [None] * (len(b) - len(a)), b))
+
+ # Normalize so a is shortest.
+ if len(b) < len(a):
+ b, a = insertMinimumPadding(b, a, dist)
+ return a,b
+
+ # For each None we have to insert...
+ for i in range(len(b) - len(a)):
+ # For each position we could insert it...
+ current = cost(a, b)
+ best = None
+ for j in range(len(a) + 1):
+ a_0 = a[:j] + [None] + a[j:]
+ candidate = cost(a_0, b)
+ if best is None or candidate < best[0]:
+ best = (candidate, a_0, j)
+ a = best[1]
+ return a,b
+
+class ZipperDiff(object):
+ """ZipperDiff - Simple (slow) diff only accommodating inserts."""
+
+ def __init__(self, a, b):
+ self.a = a
+ self.b = b
+
+ def dist(self, a, b):
+ return a != b
+
+ def getDiffs(self):
+ a,b = insertMinimumPadding(self.a, self.b, self.dist)
+ for aElt,bElt in zip(a,b):
+ if self.dist(aElt, bElt):
+ yield aElt,bElt
+
+class DriverZipperDiff(ZipperDiff):
+ def isTempFile(self, filename):
+ if filename[0] != '"' or filename[-1] != '"':
+ return False
+ return (filename.startswith('/tmp/', 1) or
+ filename.startswith('/var/', 1))
+
+ def dist(self, a, b):
+ if a and b and self.isTempFile(a) and self.isTempFile(b):
+ return 0
+ return super(DriverZipperDiff, self).dist(a,b)
+
+class CompileInfo:
+ def __init__(self, out, err, res):
+ self.commands = []
+
+ # Standard out isn't used for much.
+ self.stdout = out
+ self.stderr = ''
+
+ # FIXME: Compare error messages as well.
+ for ln in err.split('\n'):
+ if (ln == 'Using built-in specs.' or
+ ln.startswith('Target: ') or
+ ln.startswith('Configured with: ') or
+ ln.startswith('Thread model: ') or
+ ln.startswith('gcc version') or
+ ln.startswith('clang version')):
+ pass
+ elif ln.strip().startswith('"'):
+ self.commands.append(list(splitArgs(ln)))
+ else:
+ self.stderr += ln + '\n'
+
+ self.stderr = self.stderr.strip()
+ self.exitCode = res
+
+def captureDriverInfo(cmd, args):
+ p = subprocess.Popen([cmd,'-###'] + args,
+ stdin=None,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ out,err = p.communicate()
+ res = p.wait()
+ return CompileInfo(out,err,res)
+
+def main():
+ import os, sys
+
+ args = sys.argv[1:]
+ driverA = os.getenv('DRIVER_A') or 'gcc'
+ driverB = os.getenv('DRIVER_B') or 'clang'
+
+ infoA = captureDriverInfo(driverA, args)
+ infoB = captureDriverInfo(driverB, args)
+
+ differ = False
+
+ # Compare stdout.
+ if infoA.stdout != infoB.stdout:
+ print '-- STDOUT DIFFERS -'
+ print 'A OUTPUT: ',infoA.stdout
+ print 'B OUTPUT: ',infoB.stdout
+ print
+
+ diff = ZipperDiff(infoA.stdout.split('\n'),
+ infoB.stdout.split('\n'))
+ for i,(aElt,bElt) in enumerate(diff.getDiffs()):
+ if aElt is None:
+ print 'A missing: %s' % bElt
+ elif bElt is None:
+ print 'B missing: %s' % aElt
+ else:
+ print 'mismatch: A: %s' % aElt
+ print ' B: %s' % bElt
+
+ differ = True
+
+ # Compare stderr.
+ if infoA.stderr != infoB.stderr:
+ print '-- STDERR DIFFERS -'
+ print 'A STDERR: ',infoA.stderr
+ print 'B STDERR: ',infoB.stderr
+ print
+
+ diff = ZipperDiff(infoA.stderr.split('\n'),
+ infoB.stderr.split('\n'))
+ for i,(aElt,bElt) in enumerate(diff.getDiffs()):
+ if aElt is None:
+ print 'A missing: %s' % bElt
+ elif bElt is None:
+ print 'B missing: %s' % aElt
+ else:
+ print 'mismatch: A: %s' % aElt
+ print ' B: %s' % bElt
+
+ differ = True
+
+ # Compare commands.
+ for i,(a,b) in enumerate(map(None, infoA.commands, infoB.commands)):
+ if a is None:
+ print 'A MISSING:',' '.join(b)
+ differ = True
+ continue
+ elif b is None:
+ print 'B MISSING:',' '.join(a)
+ differ = True
+ continue
+
+ diff = DriverZipperDiff(a,b)
+ diffs = list(diff.getDiffs())
+ if diffs:
+ print '-- COMMAND %d DIFFERS -' % i
+ print 'A COMMAND:',' '.join(a)
+ print 'B COMMAND:',' '.join(b)
+ print
+ for i,(aElt,bElt) in enumerate(diffs):
+ if aElt is None:
+ print 'A missing: %s' % bElt
+ elif bElt is None:
+ print 'B missing: %s' % aElt
+ else:
+ print 'mismatch: A: %s' % aElt
+ print ' B: %s' % bElt
+ differ = True
+
+ # Compare result codes.
+ if infoA.exitCode != infoB.exitCode:
+ print '-- EXIT CODES DIFFER -'
+ print 'A: ',infoA.exitCode
+ print 'B: ',infoB.exitCode
+ differ = True
+
+ if differ:
+ sys.exit(1)
+
+if __name__ == '__main__':
+ main()
diff --git a/clang/utils/FindSpecRefs b/clang/utils/FindSpecRefs
new file mode 100755
index 0000000..9097f93
--- /dev/null
+++ b/clang/utils/FindSpecRefs
@@ -0,0 +1,910 @@
+#!/usr/bin/env python
+
+import os
+import re
+import time
+from pprint import pprint
+
+###
+
+c99URL = 'http://www.open-std.org/jtc1/sc22/wg14/www/docs/n1124.pdf'
+c99TOC = [('Foreword', 'xi'),
+('Introduction', 'xiv'),
+('1. Scope', '1'),
+('2. Normative references', '2'),
+('3. Terms, definitions, and symbols', '3'),
+('4. Conformance', '7'),
+('5. Environment', '9'),
+('5.1 Conceptual models', '9'),
+('5.1.1 Translation environment', '9'),
+('5.1.2 Execution environments', '11'),
+('5.2 Environmental considerations', '17'),
+('5.2.1 Character sets', '17'),
+('5.2.2 Character display semantics', '19'),
+('5.2.3 Signals and interrupts', '20'),
+('5.2.4 Environmental limits', '20'),
+('6. Language', '29'),
+('6.1 Notation', '29'),
+('6.2 Concepts', '29'),
+('6.2.1 Scopes of identifiers', '29'),
+('6.2.2 Linkages of identifiers', '30'),
+('6.2.3 Name spaces of identifiers', '31'),
+('6.2.4 Storage durations of objects', '32'),
+('6.2.5 Types', '33'),
+('6.2.6 Representations of types', '37'),
+('6.2.7 Compatible type and composite type', '40'),
+('6.3 Conversions', '42'),
+('6.3.1 Arithmetic operands', '42'),
+('6.3.2 Other operands', '46'),
+('6.4 Lexical elements', '49'),
+('6.4.1 Keywords', '50'),
+('6.4.2 Identifiers', '51'),
+('6.4.3 Universal character names', '53'),
+('6.4.4 Constants', '54'),
+('6.4.5 String literals', '62'),
+('6.4.6 Punctuators', '63'),
+('6.4.7 Header names', '64'),
+('6.4.8 Preprocessing numbers', '65'),
+('6.4.9 Comments', '66'),
+('6.5 Expressions', '67'),
+('6.5.1 Primary expressions', '69'),
+('6.5.2 Postfix operators', '69'),
+('6.5.3 Unary operators', '78'),
+('6.5.4 Cast operators', '81'),
+('6.5.5 Multiplicative operators', '82'),
+('6.5.6 Additive operators', '82'),
+('6.5.7 Bitwise shift operators', '84'),
+('6.5.8 Relational operators', '85'),
+('6.5.9 Equality operators', '86'),
+('6.5.10 Bitwise AND operator', '87'),
+('6.5.11 Bitwise exclusive OR operator', '88'),
+('6.5.12 Bitwise inclusive OR operator', '88'),
+('6.5.13 Logical AND operator', '89'),
+('6.5.14 Logical OR operator', '89'),
+('6.5.15 Conditional operator', '90'),
+('6.5.16 Assignment operators', '91'),
+('6.5.17 Comma operator', '94'),
+('6.6 Constant expressions', '95'),
+('6.7 Declarations', '97'),
+('6.7.1 Storage-class specifiers', '98'),
+('6.7.2 Type specifiers', '99'),
+('6.7.3 Type qualifiers', '108'),
+('6.7.4 Function specifiers', '112'),
+('6.7.5 Declarators', '114'),
+('6.7.6 Type names', '122'),
+('6.7.7 Type definitions', '123'),
+('6.7.8 Initialization', '125'),
+('6.8 Statements and blocks', '131'),
+('6.8.1 Labeled statements', '131'),
+('6.8.2 Compound statement', '132'),
+('6.8.3 Expression and null statements', '132'),
+('6.8.4 Selection statements', '133'),
+('6.8.5 Iteration statements', '135'),
+('6.8.6 Jump statements', '136'),
+('6.9 External definitions', '140'),
+('6.9.1 Function definitions', '141'),
+('6.9.2 External object definitions', '143'),
+('6.10 Preprocessing directives', '145'),
+('6.10.1 Conditional inclusion', '147'),
+('6.10.2 Source file inclusion', '149'),
+('6.10.3 Macro replacement', '151'),
+('6.10.4 Line control', '158'),
+('6.10.5 Error directive', '159'),
+('6.10.6 Pragma directive', '159'),
+('6.10.7 Null directive', '160'),
+('6.10.8 Predefined macro names', '160'),
+('6.10.9 Pragma operator', '161'),
+('6.11 Future language directions', '163'),
+('6.11.1 Floating types', '163'),
+('6.11.2 Linkages of identifiers', '163'),
+('6.11.3 External names', '163'),
+('6.11.4 Character escape sequences', '163'),
+('6.11.5 Storage-class specifiers', '163'),
+('6.11.6 Function declarators', '163'),
+('6.11.7 Function definitions', '163'),
+('6.11.8 Pragma directives', '163'),
+('6.11.9 Predefined macro names', '163'),
+('7. Library', '164'),
+('7.1 Introduction', '164'),
+('7.1.1 Definitions of terms', '164'),
+('7.1.2 Standard headers', '165'),
+('7.1.3 Reserved identifiers', '166'),
+('7.1.4 Use of library functions', '166'),
+('7.2 Diagnostics <assert.h>', '169'),
+('7.2.1 Program diagnostics', '169'),
+('7.3 Complex arithmetic <complex.h>', '170'),
+('7.3.1 Introduction', '170'),
+('7.3.2 Conventions', '170'),
+('7.3.3 Branch cuts', '171'),
+('7.3.4 The CX_LIMITED_RANGE pragma', '171'),
+('7.3.5 Trigonometric functions', '172'),
+('7.3.6 Hyperbolic functions', '174'),
+('7.3.7 Exponential and logarithmic functions', '176'),
+('7.3.8 Power and absolute-value functions', '177'),
+('7.3.9 Manipulation functions', '178'),
+('7.4 Character handling <ctype.h>', '181'),
+('7.4.1 Character classification functions', '181'),
+('7.4.2 Character case mapping functions', '184'),
+('7.5 Errors <errno.h>', '186'),
+('7.6 Floating-point environment <fenv.h>', '187'),
+('7.6.1 The FENV_ACCESS pragma', '189'),
+('7.6.2 Floating-point exceptions', '190'),
+('7.6.3 Rounding', '193'),
+('7.6.4 Environment', '194'),
+('7.7 Characteristics of floating types <float.h>', '197'),
+('7.8 Format conversion of integer types <inttypes.h>', '198'),
+('7.8.1 Macros for format specifiers', '198'),
+('7.8.2 Functions for greatest-width integer types', '199'),
+('7.9 Alternative spellings <iso646.h>', '202'),
+('7.10 Sizes of integer types <limits.h>', '203'),
+('7.11 Localization <locale.h>', '204'),
+('7.11.1 Locale control', '205'),
+('7.11.2 Numeric formatting convention inquiry', '206'),
+('7.12 Mathematics <math.h>', '212'),
+('7.12.1 Treatment of error conditions', '214'),
+('7.12.2 The FP_CONTRACT pragma', '215'),
+('7.12.3 Classification macros', '216'),
+('7.12.4 Trigonometric functions', '218'),
+('7.12.5 Hyperbolic functions', '221'),
+('7.12.6 Exponential and logarithmic functions', '223'),
+('7.12.7 Power and absolute-value functions', '228'),
+('7.12.8 Error and gamma functions', '230'),
+('7.12.9 Nearest integer functions', '231'),
+('7.12.10 Remainder functions', '235'),
+('7.12.11 Manipulation functions', '236'),
+('7.12.12 Maximum, minimum, and positive difference functions', '238'),
+('7.12.13 Floating multiply-add', '239'),
+('7.12.14 Comparison macros', '240'),
+('7.13 Nonlocal jumps <setjmp.h>', '243'),
+('7.13.1 Save calling environment', '243'),
+('7.13.2 Restore calling environment', '244'),
+('7.14 Signal handling <signal.h>', '246'),
+('7.14.1 Specify signal handling', '247'),
+('7.14.2 Send signal', '248'),
+('7.15 Variable arguments <stdarg.h>', '249'),
+('7.15.1 Variable argument list access macros', '249'),
+('7.16 Boolean type and values <stdbool.h>', '253'),
+('7.17 Common definitions <stddef.h>', '254'),
+('7.18 Integer types <stdint.h>', '255'),
+('7.18.1 Integer types', '255'),
+('7.18.2 Limits of specified-width integer types', '257'),
+('7.18.3 Limits of other integer types', '259'),
+('7.18.4 Macros for integer constants', '260'),
+('7.19 Input/output <stdio.h>', '262'),
+('7.19.1 Introduction', '262'),
+('7.19.2 Streams', '264'),
+('7.19.3 Files', '266'),
+('7.19.4 Operations on files', '268'),
+('7.19.5 File access functions', '270'),
+('7.19.6 Formatted input/output functions', '274'),
+('7.19.7 Character input/output functions', '296'),
+('7.19.8 Direct input/output functions', '301'),
+('7.19.9 File positioning functions', '302'),
+('7.19.10 Error-handling functions', '304'),
+('7.20 General utilities <stdlib.h>', '306'),
+('7.20.1 Numeric conversion functions', '307'),
+('7.20.2 Pseudo-random sequence generation functions', '312'),
+('7.20.3 Memory management functions', '313'),
+('7.20.4 Communication with the environment', '315'),
+('7.20.5 Searching and sorting utilities', '318'),
+('7.20.6 Integer arithmetic functions', '320'),
+('7.20.7 Multibyte/wide character conversion functions', '321'),
+('7.20.8 Multibyte/wide string conversion functions', '323'),
+('7.21 String handling <string.h>', '325'),
+('7.21.1 String function conventions', '325'),
+('7.21.2 Copying functions', '325'),
+('7.21.3 Concatenation functions', '327'),
+('7.21.4 Comparison functions', '328'),
+('7.21.5 Search functions', '330'),
+('7.21.6 Miscellaneous functions', '333'),
+('7.22 Type-generic math <tgmath.h>', '335'),
+('7.23 Date and time <time.h>', '338'),
+('7.23.1 Components of time', '338'),
+('7.23.2 Time manipulation functions', '339'),
+('7.23.3 Time conversion functions', '341'),
+('7.24 Extended multibyte and wide character utilities <wchar.h>', '348'),
+('7.24.1 Introduction', '348'),
+('7.24.2 Formatted wide character input/output functions', '349'),
+('7.24.3 Wide character input/output functions', '367'),
+('7.24.4 General wide string utilities', '371'),
+('7.24.5 Wide character time conversion functions', '385'),
+('7.24.6 Extended multibyte/wide character conversion utilities', '386'),
+('7.25 Wide character classification and mapping utilities <wctype.h>',
+ '393'),
+('7.25.1 Introduction', '393'),
+('7.25.2 Wide character classification utilities', '394'),
+('7.25.3 Wide character case mapping utilities', '399'),
+('7.26 Future library directions', '401'),
+('7.26.1 Complex arithmetic <complex.h>', '401'),
+('7.26.2 Character handling <ctype.h>', '401'),
+('7.26.3 Errors <errno.h>', '401'),
+('7.26.4 Format conversion of integer types <inttypes.h>', '401'),
+('7.26.5 Localization <locale.h>', '401'),
+('7.26.6 Signal handling <signal.h>', '401'),
+('7.26.7 Boolean type and values <stdbool.h>', '401'),
+('7.26.8 Integer types <stdint.h>', '401'),
+('7.26.9 Input/output <stdio.h>', '402'),
+('7.26.10 General utilities <stdlib.h>', '402'),
+('7.26.11 String handling <string.h>', '402'),
+('<wchar.h>', '402'),
+('<wctype.h>', '402'),
+('Annex A (informative) Language syntax summary', '403'),
+('A.1 Lexical grammar', '403'),
+('A.2 Phrase structure grammar', '409'),
+('A.3 Preprocessing directives', '416'),
+('Annex B (informative) Library summary', '418'),
+('B.1 Diagnostics <assert.h>', '418'),
+('B.2 Complex <complex.h>', '418'),
+('B.3 Character handling <ctype.h>', '420'),
+('B.4 Errors <errno.h>', '420'),
+('B.5 Floating-point environment <fenv.h>', '420'),
+('B.6 Characteristics of floating types <float.h>', '421'),
+('B.7 Format conversion of integer types <inttypes.h>', '421'),
+('B.8 Alternative spellings <iso646.h>', '422'),
+('B.9 Sizes of integer types <limits.h>', '422'),
+('B.10 Localization <locale.h>', '422'),
+('B.11 Mathematics <math.h>', '422'),
+('B.12 Nonlocal jumps <setjmp.h>', '427'),
+('B.13 Signal handling <signal.h>', '427'),
+('B.14 Variable arguments <stdarg.h>', '427'),
+('B.15 Boolean type and values <stdbool.h>', '427'),
+('B.16 Common definitions <stddef.h>', '428'),
+('B.17 Integer types <stdint.h>', '428'),
+('B.18 Input/output <stdio.h>', '428'),
+('B.19 General utilities <stdlib.h>', '430'),
+('B.20 String handling <string.h>', '432'),
+('B.21 Type-generic math <tgmath.h>', '433'),
+('B.22 Date and time <time.h>', '433'),
+('B.23 Extended multibyte/wide character utilities <wchar.h>', '434'),
+('B.24 Wide character classification and mapping utilities <wctype.h>',
+ '436'),
+('Annex C (informative) Sequence points', '438'),
+('Annex D (normative) Universal character names for identifiers', '439'),
+('Annex E (informative) Implementation limits', '441'),
+('Annex F (normative) IEC 60559 floating-point arithmetic', '443'),
+('F.1 Introduction', '443'),
+('F.2 Types', '443'),
+('F.3 Operators and functions', '444'),
+('F.4 Floating to integer conversion', '446'),
+('F.5 Binary-decimal conversion', '446'),
+('F.6 Contracted expressions', '447'),
+('F.7 Floating-point environment', '447'),
+('F.8 Optimization', '450'),
+('F.9 Mathematics <math.h>', '453'),
+('Annex G (informative) IEC 60559-compatible complex arithmetic', '466'),
+('G.1 Introduction', '466'),
+('G.2 Types', '466'),
+('G.3 Conventions', '466'),
+('G.4 Conversions', '467'),
+('G.5 Binary operators', '467'),
+('G.6 Complex arithmetic <complex.h>', '471'),
+('G.7 Type-generic math <tgmath.h>', '479'),
+('Annex H (informative) Language independent arithmetic', '480'),
+('H.1 Introduction', '480'),
+('H.2 Types', '480'),
+('H.3 Notification', '484'),
+('Annex I (informative) Common warnings', '486'),
+('Annex J (informative) Portability issues', '488'),
+('J.1 Unspecified behavior', '488'),
+('J.2 Undefined behavior', '491'),
+('J.3 Implementation-defined behavior', '504'),
+('J.4 Locale-specific behavior', '511'),
+('J.5 Common extensions', '512'),
+('Bibliography', '515'),
+('Index', '517')]
+
+cXXURL = 'http://open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2723.pdf'
+cXXTOC = [('Contents', 'ii'),
+('List of Tables', 'ix'),
+('1 General', '1'),
+('1.1 Scope', '1'),
+('1.2 Normative references', '1'),
+('1.3 Definitions', '2'),
+('1.4 Implementation compliance', '4'),
+('1.5 Structure of this International Standard', '5'),
+('1.6 Syntax notation', '5'),
+('1.7 The C++ memory model', '6'),
+('1.8 The C++ object model', '6'),
+('1.9 Program execution', '7'),
+('1.10 Multi-threaded executions and data races', '10'),
+('1.11 Acknowledgments', '13'),
+('2 Lexical conventions', '15'),
+('2.1 Phases of translation', '15'),
+('2.2 Character sets', '16'),
+('2.3 Trigraph sequences', '17'),
+('2.4 Preprocessing tokens', '17'),
+('2.5 Alternative tokens', '18'),
+('2.6 Tokens', '19'),
+('2.7 Comments', '19'),
+('2.8 Header names', '19'),
+('2.9 Preprocessing numbers', '20'),
+('2.10 Identifiers', '20'),
+('2.11 Keywords', '20'),
+('2.12 Operators and punctuators', '21'),
+('2.13 Literals', '21'),
+('3 Basic concepts', '29'),
+('3.1 Declarations and definitions', '29'),
+('3.2 One definition rule', '31'),
+('3.3 Declarative regions and scopes', '33'),
+('3.4 Name lookup', '38'),
+('3.5 Program and linkage', '51'),
+('3.6 Start and termination', '54'),
+('3.7 Storage duration', '58'),
+('3.8 Object Lifetime', '62'),
+('3.9 Types', '65'),
+('3.10 Lvalues and rvalues', '70'),
+('3.11 Alignment', '72'),
+('4 Standard conversions', '73'),
+('4.1 Lvalue-to-rvalue conversion', '74'),
+('4.2 Array-to-pointer conversion', '74'),
+('4.3 Function-to-pointer conversion', '74'),
+('4.4 Qualification conversions', '74'),
+('4.5 Integral promotions', '75'),
+('4.6 Floating point promotion', '76'),
+('4.7 Integral conversions', '76'),
+('4.8 Floating point conversions', '76'),
+('4.9 Floating-integral conversions', '77'),
+('4.10 Pointer conversions', '77'),
+('4.11 Pointer to member conversions', '77'),
+('4.12 Boolean conversions', '78'),
+('4.13 Integer conversion rank', '78'),
+('5 Expressions', '79'),
+('5.1 Primary expressions', '80'),
+('5.2 Postfix expressions', '85'),
+('5.3 Unary expressions', '96'),
+('5.4 Explicit type conversion (cast notation)', '104'),
+('5.5 Pointer-to-member operators', '105'),
+('5.6 Multiplicative operators', '106'),
+('5.7 Additive operators', '106'),
+('5.8 Shift operators', '107'),
+('5.9 Relational operators', '108'),
+('5.10 Equality operators', '109'),
+('5.11 Bitwise AND operator', '110'),
+('5.12 Bitwise exclusive OR operator', '110'),
+('5.13 Bitwise inclusive OR operator', '110'),
+('5.14 Logical AND operator', '110'),
+('5.15 Logical OR operator', '110'),
+('5.16 Conditional operator', '111'),
+('5.17 Assignment and compound assignment operators', '112'),
+('5.18 Comma operator', '113'),
+('5.19 Constant expressions', '113'),
+('6 Statements', '116'),
+('6.1 Labeled statement', '116'),
+('6.2 Expression statement', '116'),
+('6.3 Compound statement or block', '116'),
+('6.4 Selection statements', '117'),
+('6.5 Iteration statements', '118'),
+('6.6 Jump statements', '121'),
+('6.7 Declaration statement', '122'),
+('6.8 Ambiguity resolution', '123'),
+('7 Declarations', '125'),
+('7.1 Specifiers', '126'),
+('7.2 Enumeration declarations', '140'),
+('7.3 Namespaces', '143'),
+('7.4 The asm declaration', '156'),
+('7.5 Linkage specifications', '156'),
+('8 Declarators', '160'),
+('8.1 Type names', '161'),
+('8.2 Ambiguity resolution', '161'),
+('8.3 Meaning of declarators', '163'),
+('8.4 Function definitions', '175'),
+('8.5 Initializers', '177'),
+('9 Classes', '191'),
+('9.1 Class names', '193'),
+('9.2 Class members', '194'),
+('9.3 Member functions', '197'),
+('9.4 Static members', '200'),
+('9.5 Unions', '202'),
+('9.6 Bit-fields', '203'),
+('9.7 Nested class declarations', '204'),
+('9.8 Local class declarations', '205'),
+('9.9 Nested type names', '206'),
+('10 Derived classes', '207'),
+('10.1 Multiple base classes', '208'),
+('10.2 Member name lookup', '210'),
+('10.3 Virtual functions', '213'),
+('10.4 Abstract classes', '217'),
+('11 Member access control', '219'),
+('11.1 Access specifiers', '221'),
+('11.2 Accessibility of base classes and base class members', '222'),
+('11.3 Access declarations', '224'),
+('11.4 Friends', '225'),
+('11.5 Protected member access', '228'),
+('11.6 Access to virtual functions', '229'),
+('11.7 Multiple access', '230'),
+('11.8 Nested classes', '230'),
+('12 Special member functions', '231'),
+('12.1 Constructors', '231'),
+('12.2 Temporary objects', '233'),
+('12.3 Conversions', '235'),
+('12.4 Destructors', '238'),
+('12.5 Free store', '240'),
+('12.6 Initialization', '242'),
+('12.7 Construction and destruction', '247'),
+('12.8 Copying class objects', '250'),
+('12.9 Inheriting Constructors', '255'),
+('13 Overloading', '259'),
+('13.1 Overloadable declarations', '259'),
+('13.2 Declaration matching', '261'),
+('13.3 Overload resolution', '262'),
+('13.4 Address of overloaded function', '281'),
+('13.5 Overloaded operators', '282'),
+('13.6 Built-in operators', '286'),
+('14 Templates', '290'),
+('14.1 Template parameters', '291'),
+('14.2 Names of template specializations', '294'),
+('14.3 Template arguments', '296'),
+('14.4 Type equivalence', '302'),
+('14.5 Template declarations', '303'),
+('14.6 Name resolution', '318'),
+('14.7 Template instantiation and specialization', '331'),
+('14.8 Function template specializations', '343'),
+('15 Exception handling', '363'),
+('15.1 Throwing an exception', '364'),
+('15.2 Constructors and destructors', '366'),
+('15.3 Handling an exception', '366'),
+('15.4 Exception specifications', '368'),
+('15.5 Special functions', '371'),
+('15.6 Exceptions and access', '372'),
+('16 Preprocessing directives', '373'),
+('16.1 Conditional inclusion', '375'),
+('16.2 Source file inclusion', '376'),
+('16.3 Macro replacement', '377'),
+('16.4 Line control', '382'),
+('16.5 Error directive', '383'),
+('16.6 Pragma directive', '383'),
+('16.7 Null directive', '383'),
+('16.8 Predefined macro names', '383'),
+('16.9 Pragma operator', '384'),
+('17 Library introduction', '386'),
+('17.1 General', '386'),
+('17.2 Overview', '386'),
+('17.3 Definitions', '386'),
+('17.4 Additional definitions', '390'),
+('17.5 Method of description (Informative)', '390'),
+('17.6 Library-wide requirements', '396'),
+('18 Language support library', '407'),
+('18.1 Types', '407'),
+('18.2 Implementation properties', '408'),
+('18.3 Integer types', '417'),
+('18.4 Start and termination', '418'),
+('18.5 Dynamic memory management', '420'),
+('18.6 Type identification', '424'),
+('18.7 Exception handling', '427'),
+('18.8 Initializer lists', '432'),
+('18.9 Other runtime support', '434'),
+('19 Diagnostics library', '435'),
+('19.1 Exception classes', '435'),
+('19.2 Assertions', '439'),
+('19.3 Error numbers', '440'),
+('19.4 System error support', '440'),
+('20 General utilities library', '452'),
+('20.1 Requirements', '452'),
+('20.2 Utility components', '457'),
+('20.3 Compile-time rational arithmetic', '463'),
+('20.4 Tuples', '465'),
+('20.5 Metaprogramming and type traits', '473'),
+('20.6 Function objects', '486'),
+('20.7 Memory', '509'),
+('20.8 Time utilities', '548'),
+('20.9 Date and time functions', '562'),
+('21 Strings library', '563'),
+('21.1 Character traits', '563'),
+('21.2 String classes', '569'),
+('21.3 Class template basic_string', '572'),
+('21.4 Numeric Conversions', '599'),
+('21.5 Null-terminated sequence utilities', '600'),
+('22 Localization library', '604'),
+('22.1 Locales', '604'),
+('22.2 Standard locale categories', '617'),
+('22.3 Standard code conversion facets', '657'),
+('22.4 C Library Locales', '659'),
+('23 Containers library', '660'),
+('23.1 Container requirements', '660'),
+('23.2 Sequence containers', '681'),
+('23.3 Associative containers', '719'),
+('23.4 Unordered associative containers', '744'),
+('24 Iterators library', '759'),
+('24.1 Iterator requirements', '759'),
+('24.2 Header <iterator> synopsis', '764'),
+('24.3 Iterator primitives', '767'),
+('24.4 Predefined iterators', '770'),
+('24.5 Stream iterators', '784'),
+('25 Algorithms library', '792'),
+('25.1 Non-modifying sequence operations', '802'),
+('25.2 Mutating sequence operations', '806'),
+('25.3 Sorting and related operations', '815'),
+('25.4 C library algorithms', '829'),
+('26 Numerics library', '831'),
+('26.1 Numeric type requirements', '831'),
+('26.2 The floating-point environment', '832'),
+('26.3 Complex numbers', '833'),
+('26.4 Random number generation', '842'),
+('26.5 Numeric arrays', '884'),
+('26.6 Generalized numeric operations', '904'),
+('26.7 C Library', '907'),
+('27 Input/output library', '912'),
+('27.1 Iostreams requirements', '912'),
+('27.2 Forward declarations', '912'),
+('27.3 Standard iostream objects', '915'),
+('27.4 Iostreams base classes', '916'),
+('27.5 Stream buffers', '934'),
+('27.6 Formatting and manipulators', '944'),
+('27.7 String-based streams', '972'),
+('27.8 File-based streams', '984'),
+('28 Regular expressions library', '1000'),
+('28.1 Definitions', '1000'),
+('28.2 Requirements', '1000'),
+('28.3 Regular expressions summary', '1002'),
+('28.4 Header <regex> synopsis', '1003'),
+('28.5 Namespace std::regex_constants', '1009'),
+('28.6 Class regex_error', '1012'),
+('28.7 Class template regex_traits', '1012'),
+('28.8 Class template basic_regex', '1015'),
+('28.9 Class template sub_match', '1020'),
+('28.10Class template match_results', '1025'),
+('28.11Regular expression algorithms', '1029'),
+('28.12Regular expression Iterators', '1033'),
+('28.13Modified ECMAScript regular expression grammar', '1039'),
+('29 Atomic operations library', '1042'),
+('29.1 Order and Consistency', '1044'),
+('29.2 Lock-free Property', '1046'),
+('29.3 Atomic Types', '1046'),
+('29.4 Operations on Atomic Types', '1051'),
+('29.5 Flag Type and Operations', '1054'),
+('30 Thread support library', '1057'),
+('30.1 Requirements', '1057'),
+('30.2 Threads', '1058'),
+('30.3 Mutual exclusion', '1063'),
+('30.4 Condition variables', '1077'),
+('A Grammar summary', '1085'),
+('A.1 Keywords', '1085'),
+('A.2 Lexical conventions', '1085'),
+('A.3 Basic concepts', '1089'),
+('A.4 Expressions', '1090'),
+('A.5 Statements', '1093'),
+('A.6 Declarations', '1094'),
+('A.7 Declarators', '1097'),
+('A.8 Classes', '1098'),
+('A.9 Derived classes', '1099'),
+('A.10 Special member functions', '1099'),
+('A.11 Overloading', '1100'),
+('A.12 Templates', '1100'),
+('A.13 Exception handling', '1101'),
+('A.14 Preprocessing directives', '1101'),
+('B Implementation quantities', '1103'),
+('C Compatibility', '1105'),
+('C.1 C++ and ISO C', '1105'),
+('C.2 Standard C library', '1114'),
+('D Compatibility features', '1119'),
+('D.1 Increment operator with bool operand', '1119'),
+('D.2 static keyword', '1119'),
+('D.3 Access declarations', '1119'),
+('D.4 Implicit conversion from const strings', '1119'),
+('D.5 C standard library headers', '1119'),
+('D.6 Old iostreams members', '1120'),
+('D.7 char* streams', '1121'),
+('D.8 Binders', '1130'),
+('D.9 auto_ptr', '1132'),
+('E Universal-character-names', '1135'),
+('F Cross references', '1137'),
+('Index', '1153')]
+
+kDocuments = {
+ 'C99' : (c99URL, c99TOC, 12),
+ 'C++' : (cXXURL, cXXTOC, 12),
+}
+
+def findClosestTOCEntry(data, target):
+ # FIXME: Fix for named spec references
+ if isinstance(target[0],str):
+ return ('.'.join(target),'<named>',1)
+
+ offset = data[2]
+ best = None
+ for (name,page) in data[1]:
+ if ' ' in name:
+ section,name = name.split(' ',1)
+ if section == 'Annex':
+ section,name = name.split(' ',1)
+ section = 'Annex '+section
+ else:
+ section = None
+ try:
+ page = int(page) + offset
+ except:
+ page = 1
+ try:
+ spec = SpecIndex.fromstring(section)
+ except:
+ spec = None
+
+ # Meh, could be better...
+ if spec is not None:
+ dist = spec - target
+ if best is None or dist < best[0]:
+ best = (dist, (section, name, page))
+ return best[1]
+
+# What a hack. Slow to boot.
+doxyLineRefRE = re.compile(r"<a name=\"l([0-9]+)\"></a>")
+def findClosestLineReference(clangRoot, doxyName, target):
+ try:
+ f = open(os.path.join(clangRoot, 'docs', 'doxygen', 'html', doxyName))
+ except:
+ return None
+
+ best = None
+ for m in doxyLineRefRE.finditer(f.read()):
+ line = int(m.group(1), 10)
+ dist = abs(line - target)
+ if best is None or dist < best[0]:
+ best = (dist,'l'+m.group(1))
+ f.close()
+ if best is not None:
+ return best[1]
+ return None
+
+###
+
+nameAndSpecRefRE = re.compile(r"(C99|C90|C\+\+|H\&S) ((([0-9]+)(\.[0-9]+)*|\[[^]]+\])(p[0-9]+)?)")
+loneSpecRefRE = re.compile(r" (([0-9]+)(\.[0-9]+){2,100}(p[0-9]+)?)")
+def scanFile(path, filename):
+ try:
+ f = open(path)
+ except IOError:
+ print >>sys.stderr,'WARNING: Unable to open:',path
+ return
+
+ for i,ln in enumerate(f):
+ ignore = set()
+ for m in nameAndSpecRefRE.finditer(ln):
+ section = m.group(2)
+ name = m.group(1)
+ if section.endswith('.'):
+ section = section[:-1]
+ yield RefItem(name, section, filename, path, i+1)
+ ignore.add(section)
+ for m in loneSpecRefRE.finditer(ln):
+ section = m.group(1)
+ if section.endswith('.'):
+ section = section[:-1]
+ if section not in ignore:
+ yield RefItem(None, section, filename, path, i+1)
+
+###
+
+class SpecIndex:
+ @staticmethod
+ def fromstring(str):
+ # Check for named sections
+ if str[0] == '[':
+ assert ']' in str
+ secs = str[1:str.index(']')].split('.')
+ tail = str[str.index(']')+1:]
+ if tail:
+ assert tail[0] == 'p'
+ paragraph = int(tail[1:])
+ else:
+ paragraph = None
+ indices = secs
+ else:
+ secs = str.split('.')
+ paragraph = None
+ if 'p' in secs[-1]:
+ secs[-1],p = secs[-1].split('p',1)
+ paragraph = int(p)
+ indices = map(int, secs)
+ return SpecIndex(indices, paragraph)
+
+ def __init__(self, indices, paragraph=None):
+ assert len(indices)>0
+ self.indices = tuple(indices)
+ self.paragraph = paragraph
+
+ def __str__(self):
+ s = '.'.join(map(str,self.indices))
+ if self.paragraph is not None:
+ s += '.p%d'%(self.paragraph,)
+ return s
+
+ def __repr__(self):
+ return 'SpecIndex(%s, %s)'%(self.indices, self.paragraph)
+
+ def __cmp__(self, b):
+ return cmp((self.indices,self.paragraph),
+ (b.indices,b.paragraph))
+
+ def __hash__(self):
+ return hash((self.indices,self.paragraph))
+
+ def __sub__(self, indices):
+ def sub(a,b):
+ a = a or 0
+ b = b or 0
+ return abs(a-b)
+ return map(sub,self.indices,indices)
+
+class RefItem:
+ def __init__(self, name, section, filename, path, line):
+ self.name = name
+ self.section = SpecIndex.fromstring(section)
+ self.filename = filename
+ self.path = path
+ self.line = line
+
+ def __str__(self):
+ if self.name is not None:
+ return '%s %s'%(self.name, self.section)
+ else:
+ return '--- %s'%(self.section,)
+
+ def __repr__(self):
+ return 'RefItem(%s, %r, "%s", "%s", %d)'%(self.name,
+ self.section,
+ self.filename,
+ self.path,
+ self.line)
+
+ def __cmp__(self, b):
+ return cmp((self.name,self.section,self.filename,self.path,self.line),
+ (b.name,b.section,self.filename,self.path,self.line))
+
+ def __hash__(self):
+ return hash((self.name,self.section,self.filename,self.path,self.line))
+
+###
+
+def sorted(l):
+ l = list(l)
+ l.sort()
+ return l
+
+def getRevision(path):
+ import subprocess
+ p = subprocess.Popen(['svn', 'info', path],
+ stdin=open('/dev/null','r'),
+ stdout=subprocess.PIPE)
+ for ln in p.stdout.read(1024).split('\n'):
+ if ln.startswith('Revision:'):
+ return ln.split(':',1)[1].strip()
+ return None
+
+def buildRefTree(references):
+ root = (None, {}, [])
+
+ def getNode(keys):
+ if not keys:
+ return root
+ key,parent = keys[-1],getNode(keys[:-1])
+ node = parent[1].get(key)
+ if node is None:
+ parent[1][key] = node = (key, {}, [])
+ return node
+
+ for ref in references:
+ n = getNode((ref.name,) + ref.section.indices)
+ n[2].append(ref)
+
+ def flatten((key, children, data)):
+ children = sorted(map(flatten,children.values()))
+ return (key, children, sorted(data))
+
+ return flatten(root)
+
+def preorder(node,parents=(),first=True):
+ (key,children,data) = node
+ if first:
+ yield parents+(node,)
+ for c in children:
+ for item in preorder(c, parents+(node,)):
+ yield item
+
+def main():
+ global options
+ from optparse import OptionParser
+ parser = OptionParser("usage: %prog [options] CLANG_ROOT <output-dir>")
+ parser.add_option("", "--debug", dest="debug",
+ help="Print extra debugging output",
+ action="store_true",
+ default=False)
+ (opts, args) = parser.parse_args()
+
+ if len(args) != 2:
+ parser.error("incorrect number of arguments")
+
+ references = []
+ root,outputDir = args
+ if os.path.isdir(root):
+ for (dirpath, dirnames, filenames) in os.walk(root):
+ for filename in filenames:
+ name,ext = os.path.splitext(filename)
+ if ext in ('.c', '.cpp', '.h', '.def'):
+ fullpath = os.path.join(dirpath, filename)
+ references.extend(list(scanFile(fullpath, filename)))
+ else:
+ references.extend(list(scanFile(root, root)))
+
+ refTree = buildRefTree(references)
+
+ specs = {}
+ for ref in references:
+ spec = specs[ref.name] = specs.get(ref.name,{})
+ items = spec[ref.section] = spec.get(ref.section,[])
+ items.append(ref)
+
+ print 'Found %d references.'%(len(references),)
+
+ if opts.debug:
+ pprint(refTree)
+
+ referencesPath = os.path.join(outputDir,'references.html')
+ print 'Writing: %s'%(referencesPath,)
+ f = open(referencesPath,'w')
+ print >>f, '<html><head><title>clang: Specification References</title></head>'
+ print >>f, '<body>'
+ print >>f, '\t<h2>Specification References</h2>'
+ for i,node in enumerate(refTree[1]):
+ specName = node[0] or 'Unknown'
+ print >>f, '<a href="#spec%d">%s</a><br>'%(i,specName)
+ for i,node in enumerate(refTree[1]):
+ specName = node[0] or 'Unknown'
+ print >>f, '<hr>'
+ print >>f, '<a name="spec%d">'%(i,)
+ print >>f, '<h3>Document: %s</h3>'%(specName or 'Unknown',)
+ print >>f, '<table border="1" cellspacing="2" width="80%">'
+ print >>f, '<tr><th width="20%">Name</th><th>References</th></tr>'
+ docData = kDocuments.get(specName)
+ for path in preorder(node,first=False):
+ if not path[-1][2]:
+ continue
+ components = '.'.join([str(p[0]) for p in path[1:]])
+ print >>f, '\t<tr>'
+ tocEntry = None
+ if docData is not None:
+ tocEntry = findClosestTOCEntry(docData, [p[0] for p in path[1:]])
+ if tocEntry is not None:
+ section,name,page = tocEntry
+ # If section is exact print the TOC name
+ if page is not None:
+ linkStr = '<a href="%s#page=%d">%s</a> (pg.%d)'%(docData[0],page,components,page)
+ else:
+ linkStr = components
+ if section == components:
+ print >>f, '\t\t<td valign=top>%s<br>%s</td>'%(linkStr,name)
+ else:
+ print >>f, '\t\t<td valign=top>%s</td>'%(linkStr,)
+ else:
+ print >>f, '\t\t<td valign=top>%s</td>'%(components,)
+ print >>f, '\t\t<td valign=top>'
+ for item in path[-1][2]:
+ # XXX total hack
+ relativePath = item.path[len(root):]
+ if relativePath.startswith('/'):
+ relativePath = relativePath[1:]
+ # XXX this is broken, how does doxygen mangle w/ multiple
+ # refs? Can we just read its map?
+ filename = os.path.basename(relativePath)
+ doxyName = '%s-source.html'%(filename.replace('.','_8'),)
+ # Grrr, why can't doxygen write line number references.
+ lineReference = findClosestLineReference(root,doxyName,item.line)
+ if lineReference is not None:
+ linkStr = 'http://clang.llvm.org/doxygen/%s#%s'%(doxyName,lineReference)
+ else:
+ linkStr = 'http://clang.llvm.org/doxygen/%s'%(doxyName,)
+ if item.section.paragraph is not None:
+ paraText = '&nbsp;(p%d)'%(item.section.paragraph,)
+ else:
+ paraText = ''
+ print >>f,'<a href="%s">%s:%d</a>%s<br>'%(linkStr,relativePath,item.line,paraText)
+ print >>f, '\t\t</td>'
+ print >>f, '\t</tr>'
+ print >>f, '</table>'
+ print >>f, '<hr>'
+ print >>f, 'Generated: %s<br>'%(time.strftime('%Y-%m-%d %H:%M'),)
+ print >>f, 'SVN Revision: %s'%(getRevision(root),)
+ print >>f, '</body>'
+ f.close()
+
+if __name__=='__main__':
+ main()
diff --git a/clang/utils/FuzzTest b/clang/utils/FuzzTest
new file mode 100755
index 0000000..0e043df
--- /dev/null
+++ b/clang/utils/FuzzTest
@@ -0,0 +1,350 @@
+#!/usr/bin/env python
+
+"""
+This is a generic fuzz testing tool, see --help for more information.
+"""
+
+import os
+import sys
+import random
+import subprocess
+import itertools
+
+class TestGenerator:
+ def __init__(self, inputs, delete, insert, replace,
+ insert_strings, pick_input):
+ self.inputs = [(s, open(s).read()) for s in inputs]
+
+ self.delete = bool(delete)
+ self.insert = bool(insert)
+ self.replace = bool(replace)
+ self.pick_input = bool(pick_input)
+ self.insert_strings = list(insert_strings)
+
+ self.num_positions = sum([len(d) for _,d in self.inputs])
+ self.num_insert_strings = len(insert_strings)
+ self.num_tests = ((delete + (insert + replace)*self.num_insert_strings)
+ * self.num_positions)
+ self.num_tests += 1
+
+ if self.pick_input:
+ self.num_tests *= self.num_positions
+
+ def position_to_source_index(self, position):
+ for i,(s,d) in enumerate(self.inputs):
+ n = len(d)
+ if position < n:
+ return (i,position)
+ position -= n
+ raise ValueError,'Invalid position.'
+
+ def get_test(self, index):
+ assert 0 <= index < self.num_tests
+
+ picked_position = None
+ if self.pick_input:
+ index,picked_position = divmod(index, self.num_positions)
+ picked_position = self.position_to_source_index(picked_position)
+
+ if index == 0:
+ return ('nothing', None, None, picked_position)
+
+ index -= 1
+ index,position = divmod(index, self.num_positions)
+ position = self.position_to_source_index(position)
+ if self.delete:
+ if index == 0:
+ return ('delete', position, None, picked_position)
+ index -= 1
+
+ index,insert_index = divmod(index, self.num_insert_strings)
+ insert_str = self.insert_strings[insert_index]
+ if self.insert:
+ if index == 0:
+ return ('insert', position, insert_str, picked_position)
+ index -= 1
+
+ assert self.replace
+ assert index == 0
+ return ('replace', position, insert_str, picked_position)
+
+class TestApplication:
+ def __init__(self, tg, test):
+ self.tg = tg
+ self.test = test
+
+ def apply(self):
+ if self.test[0] == 'nothing':
+ pass
+ else:
+ i,j = self.test[1]
+ name,data = self.tg.inputs[i]
+ if self.test[0] == 'delete':
+ data = data[:j] + data[j+1:]
+ elif self.test[0] == 'insert':
+ data = data[:j] + self.test[2] + data[j:]
+ elif self.test[0] == 'replace':
+ data = data[:j] + self.test[2] + data[j+1:]
+ else:
+ raise ValueError,'Invalid test %r' % self.test
+ open(name,'wb').write(data)
+
+ def revert(self):
+ if self.test[0] != 'nothing':
+ i,j = self.test[1]
+ name,data = self.tg.inputs[i]
+ open(name,'wb').write(data)
+
+def quote(str):
+ return '"' + str + '"'
+
+def run_one_test(test_application, index, input_files, args):
+ test = test_application.test
+
+ # Interpolate arguments.
+ options = { 'index' : index,
+ 'inputs' : ' '.join(quote(f) for f in input_files) }
+
+ # Add picked input interpolation arguments, if used.
+ if test[3] is not None:
+ pos = test[3][1]
+ options['picked_input'] = input_files[test[3][0]]
+ options['picked_input_pos'] = pos
+ # Compute the line and column.
+ file_data = test_application.tg.inputs[test[3][0]][1]
+ line = column = 1
+ for i in range(pos):
+ c = file_data[i]
+ if c == '\n':
+ line += 1
+ column = 1
+ else:
+ column += 1
+ options['picked_input_line'] = line
+ options['picked_input_col'] = column
+
+ test_args = [a % options for a in args]
+ if opts.verbose:
+ print '%s: note: executing %r' % (sys.argv[0], test_args)
+
+ stdout = None
+ stderr = None
+ if opts.log_dir:
+ stdout_log_path = os.path.join(opts.log_dir, '%s.out' % index)
+ stderr_log_path = os.path.join(opts.log_dir, '%s.err' % index)
+ stdout = open(stdout_log_path, 'wb')
+ stderr = open(stderr_log_path, 'wb')
+ else:
+ sys.stdout.flush()
+ p = subprocess.Popen(test_args, stdout=stdout, stderr=stderr)
+ p.communicate()
+ exit_code = p.wait()
+
+ test_result = (exit_code == opts.expected_exit_code or
+ exit_code in opts.extra_exit_codes)
+
+ if stdout is not None:
+ stdout.close()
+ stderr.close()
+
+ # Remove the logs for passes, unless logging all results.
+ if not opts.log_all and test_result:
+ os.remove(stdout_log_path)
+ os.remove(stderr_log_path)
+
+ if not test_result:
+ print 'FAIL: %d' % index
+ elif not opts.succinct:
+ print 'PASS: %d' % index
+ return test_result
+
+def main():
+ global opts
+ from optparse import OptionParser, OptionGroup
+ parser = OptionParser("""%prog [options] ... test command args ...
+
+%prog is a tool for fuzzing inputs and testing them.
+
+The most basic usage is something like:
+
+ $ %prog --file foo.txt ./test.sh
+
+which will run a default list of fuzzing strategies on the input. For each
+fuzzed input, it will overwrite the input files (in place), run the test script,
+then restore the files back to their original contents.
+
+NOTE: You should make sure you have a backup copy of your inputs, in case
+something goes wrong!!!
+
+You can cause the fuzzing to not restore the original files with
+'--no-revert'. Generally this is used with '--test <index>' to run one failing
+test and then leave the fuzzed inputs in place to examine the failure.
+
+For each fuzzed input, %prog will run the test command given on the command
+line. Each argument in the command is subject to string interpolation before
+being executed. The syntax is "%(VARIABLE)FORMAT" where FORMAT is a standard
+printf format, and VARIABLE is one of:
+
+ 'index' - the test index being run
+ 'inputs' - the full list of test inputs
+ 'picked_input' - (with --pick-input) the selected input file
+ 'picked_input_pos' - (with --pick-input) the selected input position
+ 'picked_input_line' - (with --pick-input) the selected input line
+ 'picked_input_col' - (with --pick-input) the selected input column
+
+By default, the script will run forever continually picking new tests to
+run. You can limit the number of tests that are run with '--max-tests <number>',
+and you can run a particular test with '--test <index>'.
+
+You can specify '--stop-on-fail' to stop the script on the first failure
+without reverting the changes.
+
+""")
+ parser.add_option("-v", "--verbose", help="Show more output",
+ action='store_true', dest="verbose", default=False)
+ parser.add_option("-s", "--succinct", help="Reduce amount of output",
+ action="store_true", dest="succinct", default=False)
+
+ group = OptionGroup(parser, "Test Execution")
+ group.add_option("", "--expected-exit-code", help="Set expected exit code",
+ type=int, dest="expected_exit_code",
+ default=0)
+ group.add_option("", "--extra-exit-code",
+ help="Set additional expected exit code",
+ type=int, action="append", dest="extra_exit_codes",
+ default=[])
+ group.add_option("", "--log-dir",
+ help="Capture test logs to an output directory",
+ type=str, dest="log_dir",
+ default=None)
+ group.add_option("", "--log-all",
+ help="Log all outputs (not just failures)",
+ action="store_true", dest="log_all", default=False)
+ parser.add_option_group(group)
+
+ group = OptionGroup(parser, "Input Files")
+ group.add_option("", "--file", metavar="PATH",
+ help="Add an input file to fuzz",
+ type=str, action="append", dest="input_files", default=[])
+ group.add_option("", "--filelist", metavar="LIST",
+ help="Add a list of inputs files to fuzz (one per line)",
+ type=str, action="append", dest="filelists", default=[])
+ parser.add_option_group(group)
+
+ group = OptionGroup(parser, "Fuzz Options")
+ group.add_option("", "--replacement-chars", dest="replacement_chars",
+ help="Characters to insert/replace",
+ default="0{}[]<>\;@#$^%& ")
+ group.add_option("", "--replacement-string", dest="replacement_strings",
+ action="append", help="Add a replacement string to use",
+ default=[])
+ group.add_option("", "--replacement-list", dest="replacement_lists",
+ help="Add a list of replacement strings (one per line)",
+ action="append", default=[])
+ group.add_option("", "--no-delete", help="Don't delete characters",
+ action='store_false', dest="enable_delete", default=True)
+ group.add_option("", "--no-insert", help="Don't insert strings",
+ action='store_false', dest="enable_insert", default=True)
+ group.add_option("", "--no-replace", help="Don't replace strings",
+ action='store_false', dest="enable_replace", default=True)
+ group.add_option("", "--no-revert", help="Don't revert changes",
+ action='store_false', dest="revert", default=True)
+ group.add_option("", "--stop-on-fail", help="Stop on first failure",
+ action='store_true', dest="stop_on_fail", default=False)
+ parser.add_option_group(group)
+
+ group = OptionGroup(parser, "Test Selection")
+ group.add_option("", "--test", help="Run a particular test",
+ type=int, dest="test", default=None, metavar="INDEX")
+ group.add_option("", "--max-tests", help="Maximum number of tests",
+ type=int, dest="max_tests", default=None, metavar="COUNT")
+ group.add_option("", "--pick-input",
+ help="Randomly select an input byte as well as fuzzing",
+ action='store_true', dest="pick_input", default=False)
+ parser.add_option_group(group)
+
+ parser.disable_interspersed_args()
+
+ (opts, args) = parser.parse_args()
+
+ if not args:
+ parser.error("Invalid number of arguments")
+
+ # Collect the list of inputs.
+ input_files = list(opts.input_files)
+ for filelist in opts.filelists:
+ f = open(filelist)
+ try:
+ for ln in f:
+ ln = ln.strip()
+ if ln:
+ input_files.append(ln)
+ finally:
+ f.close()
+ input_files.sort()
+
+ if not input_files:
+ parser.error("No input files!")
+
+ print '%s: note: fuzzing %d files.' % (sys.argv[0], len(input_files))
+
+ # Make sure the log directory exists if used.
+ if opts.log_dir:
+ if not os.path.exists(opts.log_dir):
+ try:
+ os.mkdir(opts.log_dir)
+ except OSError:
+ print "%s: error: log directory couldn't be created!" % (
+ sys.argv[0],)
+ raise SystemExit,1
+
+ # Get the list if insert/replacement strings.
+ replacements = list(opts.replacement_chars)
+ replacements.extend(opts.replacement_strings)
+ for replacement_list in opts.replacement_lists:
+ f = open(replacement_list)
+ try:
+ for ln in f:
+ ln = ln[:-1]
+ if ln:
+ replacements.append(ln)
+ finally:
+ f.close()
+
+ # Unique and order the replacement list.
+ replacements = list(set(replacements))
+ replacements.sort()
+
+ # Create the test generator.
+ tg = TestGenerator(input_files, opts.enable_delete, opts.enable_insert,
+ opts.enable_replace, replacements, opts.pick_input)
+
+ print '%s: note: %d input bytes.' % (sys.argv[0], tg.num_positions)
+ print '%s: note: %d total tests.' % (sys.argv[0], tg.num_tests)
+ if opts.test is not None:
+ it = [opts.test]
+ elif opts.max_tests is not None:
+ it = itertools.imap(random.randrange,
+ itertools.repeat(tg.num_tests, opts.max_tests))
+ else:
+ it = itertools.imap(random.randrange, itertools.repeat(tg.num_tests))
+ for test in it:
+ t = tg.get_test(test)
+
+ if opts.verbose:
+ print '%s: note: running test %d: %r' % (sys.argv[0], test, t)
+ ta = TestApplication(tg, t)
+ try:
+ ta.apply()
+ test_result = run_one_test(ta, test, input_files, args)
+ if not test_result and opts.stop_on_fail:
+ opts.revert = False
+ sys.exit(1)
+ finally:
+ if opts.revert:
+ ta.revert()
+
+ sys.stdout.flush()
+
+if __name__ == '__main__':
+ main()
diff --git a/clang/utils/OptionalTests/Extra/README.txt b/clang/utils/OptionalTests/Extra/README.txt
new file mode 100644
index 0000000..565241b
--- /dev/null
+++ b/clang/utils/OptionalTests/Extra/README.txt
@@ -0,0 +1,3 @@
+This directory is for extra unit style tests following the structure of
+clang/tests, but which are not portable or not suitable for inclusion in the
+regular test suite.
diff --git a/clang/utils/OptionalTests/Extra/Runtime/darwin-clang_rt.c b/clang/utils/OptionalTests/Extra/Runtime/darwin-clang_rt.c
new file mode 100644
index 0000000..e527789
--- /dev/null
+++ b/clang/utils/OptionalTests/Extra/Runtime/darwin-clang_rt.c
@@ -0,0 +1,338 @@
+/* This file tests that we can successfully call each compiler-rt function. It is
+ designed to check that the runtime libraries are available for linking and
+ that they contain the expected contents. It is not designed to test the
+ correctness of the individual functions in compiler-rt.
+
+ This test is assumed to be run on a 10.6 machine. The two environment
+ variables below should be set to 10.4 and 10.5 machines which can be directly
+ ssh/rsync'd to in order to actually test the executables can run on the
+ desired targets.
+*/
+
+// RUN: export TENFOUR_X86_MACHINE=localhost
+// RUN: export TENFIVE_X86_MACHINE=localhost
+// RUN: export ARM_MACHINE=localhost
+// RUN: export ARM_SYSROOT=$(xcodebuild -sdk iphoneos -version Path)
+
+// RUN: echo iPhoneOS, ARM, v6, thumb
+// RUN: %clang -isysroot $ARM_SYSROOT -arch armv6 -mthumb -c %s -o %t.o
+// RUN: %clang -isysroot $ARM_SYSROOT -arch armv6 -mthumb -v -Wl,-t,-v -o %t %t.o 1>&2
+// RUN: rsync -arv %t $ARM_MACHINE:/tmp/a.out
+// RUN: ssh $ARM_MACHINE /tmp/a.out
+// RUN: echo
+
+// RUN: echo iPhoneOS, ARM, v6, no-thumb
+// RUN: %clang -isysroot $ARM_SYSROOT -arch armv6 -mno-thumb -c %s -o %t.o
+// RUN: %clang -isysroot $ARM_SYSROOT -arch armv6 -mno-thumb -v -Wl,-t,-v -o %t %t.o 1>&2
+// RUN: rsync -arv %t $ARM_MACHINE:/tmp/a.out
+// RUN: ssh $ARM_MACHINE /tmp/a.out
+// RUN: echo
+
+// RUN: echo iPhoneOS, ARM, v7, thumb
+// RUN: %clang -isysroot $ARM_SYSROOT -arch armv7 -mthumb -c %s -o %t.o
+// RUN: %clang -isysroot $ARM_SYSROOT -arch armv7 -mthumb -v -Wl,-t,-v -o %t %t.o 1>&2
+// RUN: rsync -arv %t $ARM_MACHINE:/tmp/a.out
+// RUN: ssh $ARM_MACHINE /tmp/a.out
+// RUN: echo
+
+// RUN: echo iPhoneOS, ARM, v7, no-thumb
+// RUN: %clang -isysroot $ARM_SYSROOT -arch armv7 -mno-thumb -c %s -o %t.o
+// RUN: %clang -isysroot $ARM_SYSROOT -arch armv7 -mno-thumb -v -Wl,-t,-v -o %t %t.o 1>&2
+// RUN: rsync -arv %t $ARM_MACHINE:/tmp/a.out
+// RUN: ssh $ARM_MACHINE /tmp/a.out
+// RUN: echo
+
+// RUN: echo 10.4, i386
+// RUN: %clang -arch i386 -mmacosx-version-min=10.4 -c %s -o %t.o
+// RUN: %clang -arch i386 -mmacosx-version-min=10.4 -v -Wl,-t,-v -o %t %t.o 1>&2
+// RUN: %t
+// RUN: echo
+
+// RUN: rsync -arv %t $TENFOUR_X86_MACHINE:/tmp/a.out
+// RUN: ssh $TENFOUR_X86_MACHINE /tmp/a.out
+// RUN: echo
+
+// RUX: rsync -arv %t $TENFIVE_X86_MACHINE:/tmp/a.out
+// RUX: ssh $TENFIVE_X86_MACHINE /tmp/a.out
+// RUN: echo
+
+// RUN: echo 10.5, i386
+// RUN: %clang -arch i386 -mmacosx-version-min=10.5 -c %s -o %t.o
+// RUN: %clang -arch i386 -mmacosx-version-min=10.5 -v -Wl,-t,-v -o %t %t.o 1>&2
+// RUN: %t
+// RUN: echo
+
+// RUN: rsync -arv %t $TENFIVE_X86_MACHINE:/tmp/a.out
+// RUN: ssh $TENFIVE_X86_MACHINE /tmp/a.out
+// RUN: echo
+
+// RUN: echo 10.6, i386
+// RUN: %clang -arch i386 -mmacosx-version-min=10.6 -c %s -o %t.o
+// RUN: %clang -arch i386 -mmacosx-version-min=10.6 -v -Wl,-t,-v -o %t %t.o 1>&2
+// RUN: %t
+// RUN: echo
+
+// RUN: echo 10.4, x86_64
+// RUN: %clang -arch x86_64 -mmacosx-version-min=10.4 -c %s -o %t.o
+// RUN: %clang -arch x86_64 -mmacosx-version-min=10.4 -v -Wl,-t,-v -o %t %t.o 1>&2
+// RUN: %t
+// RUN: echo
+
+// RUN: rsync -arv %t $TENFOUR_X86_MACHINE:/tmp/a.out
+// RUN: ssh $TENFOUR_X86_MACHINE /tmp/a.out
+// RUN: echo
+
+// RUN: rsync -arv %t $TENFIVE_X86_MACHINE:/tmp/a.out
+// RUN: ssh $TENFIVE_X86_MACHINE /tmp/a.out
+// RUN: echo
+
+// RUN: echo 10.5, x86_64
+// RUN: %clang -arch x86_64 -mmacosx-version-min=10.5 -c %s -o %t.o
+// RUN: %clang -arch x86_64 -mmacosx-version-min=10.5 -v -Wl,-t,-v -o %t %t.o 1>&2
+// RUN: %t
+// RUN: echo
+
+// RUN: rsync -arv %t $TENFIVE_X86_MACHINE:/tmp/a.out
+// RUN: ssh $TENFIVE_X86_MACHINE /tmp/a.out
+// RUN: echo
+
+// RUN: echo 10.6, x86_64
+// RUN: %clang -arch x86_64 -mmacosx-version-min=10.6 -c %s -o %t.o
+// RUN: %clang -arch x86_64 -mmacosx-version-min=10.6 -v -Wl,-t,-v -o %t %t.o 1>&2
+// RUN: %t
+// RUN: echo
+
+#include <assert.h>
+#include <stdio.h>
+#include <sys/utsname.h>
+
+typedef int si_int;
+typedef unsigned su_int;
+
+typedef long long di_int;
+typedef unsigned long long du_int;
+
+// Integral bit manipulation
+
+di_int __ashldi3(di_int a, si_int b); // a << b
+di_int __ashrdi3(di_int a, si_int b); // a >> b arithmetic (sign fill)
+di_int __lshrdi3(di_int a, si_int b); // a >> b logical (zero fill)
+
+si_int __clzsi2(si_int a); // count leading zeros
+si_int __clzdi2(di_int a); // count leading zeros
+si_int __ctzsi2(si_int a); // count trailing zeros
+si_int __ctzdi2(di_int a); // count trailing zeros
+
+si_int __ffsdi2(di_int a); // find least significant 1 bit
+
+si_int __paritysi2(si_int a); // bit parity
+si_int __paritydi2(di_int a); // bit parity
+
+si_int __popcountsi2(si_int a); // bit population
+si_int __popcountdi2(di_int a); // bit population
+
+// Integral arithmetic
+
+di_int __negdi2 (di_int a); // -a
+di_int __muldi3 (di_int a, di_int b); // a * b
+di_int __divdi3 (di_int a, di_int b); // a / b signed
+du_int __udivdi3 (du_int a, du_int b); // a / b unsigned
+di_int __moddi3 (di_int a, di_int b); // a % b signed
+du_int __umoddi3 (du_int a, du_int b); // a % b unsigned
+du_int __udivmoddi4(du_int a, du_int b, du_int* rem); // a / b, *rem = a % b
+
+// Integral arithmetic with trapping overflow
+
+si_int __absvsi2(si_int a); // abs(a)
+di_int __absvdi2(di_int a); // abs(a)
+
+si_int __negvsi2(si_int a); // -a
+di_int __negvdi2(di_int a); // -a
+
+si_int __addvsi3(si_int a, si_int b); // a + b
+di_int __addvdi3(di_int a, di_int b); // a + b
+
+si_int __subvsi3(si_int a, si_int b); // a - b
+di_int __subvdi3(di_int a, di_int b); // a - b
+
+si_int __mulvsi3(si_int a, si_int b); // a * b
+di_int __mulvdi3(di_int a, di_int b); // a * b
+
+// Integral comparison: a < b -> 0
+// a == b -> 1
+// a > b -> 2
+
+si_int __cmpdi2 (di_int a, di_int b);
+si_int __ucmpdi2(du_int a, du_int b);
+
+// Integral / floating point conversion
+
+di_int __fixsfdi( float a);
+di_int __fixdfdi( double a);
+di_int __fixxfdi(long double a);
+
+su_int __fixunssfsi( float a);
+su_int __fixunsdfsi( double a);
+su_int __fixunsxfsi(long double a);
+
+du_int __fixunssfdi( float a);
+du_int __fixunsdfdi( double a);
+du_int __fixunsxfdi(long double a);
+
+float __floatdisf(di_int a);
+double __floatdidf(di_int a);
+long double __floatdixf(di_int a);
+
+float __floatundisf(du_int a);
+double __floatundidf(du_int a);
+long double __floatundixf(du_int a);
+
+// Floating point raised to integer power
+
+float __powisf2( float a, si_int b); // a ^ b
+double __powidf2( double a, si_int b); // a ^ b
+long double __powixf2(long double a, si_int b); // a ^ b
+
+// Complex arithmetic
+
+// (a + ib) * (c + id)
+
+ float _Complex __mulsc3( float a, float b, float c, float d);
+ double _Complex __muldc3(double a, double b, double c, double d);
+long double _Complex __mulxc3(long double a, long double b,
+ long double c, long double d);
+
+// (a + ib) / (c + id)
+
+ float _Complex __divsc3( float a, float b, float c, float d);
+ double _Complex __divdc3(double a, double b, double c, double d);
+long double _Complex __divxc3(long double a, long double b,
+ long double c, long double d);
+
+#ifndef __arm
+#define HAS_LONG_DOUBLE
+#endif
+
+int main(int argc, char **argv) {
+ du_int du_tmp;
+ struct utsname name;
+#ifdef __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__
+ const char *target_name = "OS X";
+ unsigned target_version = __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__;
+ unsigned target_maj = target_version / 100;
+ unsigned target_min = (target_version / 10) % 10;
+ unsigned target_micro = target_version % 10;
+#else
+ const char *target_name = "iPhoneOS";
+ unsigned target_version = __ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__;
+ unsigned target_maj = target_version / 10000;
+ unsigned target_min = (target_version / 100) % 100;
+ unsigned target_micro = target_version % 100;
+#endif
+
+ if (uname(&name))
+ return 1;
+
+ fprintf(stderr, "%s: clang_rt test:\n", argv[0]);
+ fprintf(stderr, " target : %s %d.%d.%d\n\n", target_name,
+ target_maj, target_min, target_micro);
+ fprintf(stderr, " sysname : %s\n", name.sysname);
+ fprintf(stderr, " nodename: %s\n", name.nodename);
+ fprintf(stderr, " release : %s\n", name.release);
+ fprintf(stderr, " version : %s\n", name.version);
+ fprintf(stderr, " machine : %s\n", name.machine);
+
+ assert(__ashldi3(1, 1) == 2);
+ assert(__ashrdi3(2, 1) == 1);
+ assert(__lshrdi3(2, 1) == 1);
+ assert(__clzsi2(1) == 31);
+ assert(__clzdi2(1) == 63);
+ assert(__ctzsi2(2) == 1);
+ assert(__ctzdi2(2) == 1);
+ assert(__ffsdi2(12) == 3);
+ assert(__paritysi2(13) == 1);
+ assert(__paritydi2(13) == 1);
+ assert(__popcountsi2(13) == 3);
+ assert(__popcountdi2(13) == 3);
+ assert(__negdi2(3) == -3);
+ assert(__muldi3(2,2) == 4);
+ assert(__divdi3(-4,2) == -2);
+ assert(__udivdi3(4,2) == 2);
+ assert(__moddi3(3,2) == 1);
+ assert(__umoddi3(3,2) == 1);
+ assert(__udivmoddi4(5,2,&du_tmp) == 2 && du_tmp == 1);
+ assert(__absvsi2(-2) == 2);
+ assert(__absvdi2(-2) == 2);
+ assert(__negvsi2(2) == -2);
+ assert(__negvdi2(2) == -2);
+ assert(__addvsi3(2, 3) == 5);
+ assert(__addvdi3(2, 3) == 5);
+ assert(__subvsi3(2, 3) == -1);
+ assert(__subvdi3(2, 3) == -1);
+ assert(__mulvsi3(2, 3) == 6);
+ assert(__mulvdi3(2, 3) == 6);
+ assert(__cmpdi2(3, 2) == 2);
+ assert(__ucmpdi2(3, 2) == 2);
+ assert(__fixsfdi(2.0) == 2);
+ assert(__fixdfdi(2.0) == 2);
+ assert(__fixunssfsi(2.0) == 2);
+ assert(__fixunsdfsi(2.0) == 2);
+ assert(__fixunssfdi(2.0) == 2);
+ assert(__fixunsdfdi(2.0) == 2);
+ assert(__floatdisf(2) == 2.0);
+ assert(__floatdidf(2) == 2.0);
+ assert(__floatundisf(2) == 2.0);
+ assert(__floatundidf(2) == 2.0);
+ assert(__powisf2(2.0, 2) == 4.0);
+ assert(__powidf2(2.0, 2) == 4.0);
+
+ // FIXME: Clang/LLVM seems to be miscompiling _Complex currently, probably an
+ // ABI issue.
+#ifndef __arm
+ {
+ _Complex float a = __mulsc3(1.0, 2.0, 4.0, 8.0);
+ _Complex float b = (-12.0 + 16.0j);
+ fprintf(stderr, "a: (%f + %f), b: (%f + %f)\n",
+ __real a, __imag a, __real b, __imag b);
+ }
+ assert(__mulsc3(1.0, 2.0, 4.0, 8.0) == (-12.0 + 16.0j));
+ assert(__muldc3(1.0, 2.0, 4.0, 8.0) == (-12.0 + 16.0j));
+ assert(__divsc3(1.0, 2.0, 4.0, 8.0) == (0.25 + 0j));
+ assert(__divdc3(1.0, 2.0, 4.0, 8.0) == (0.25 + 0j));
+#endif
+
+#ifdef HAS_LONG_DOUBLE
+ assert(__divxc3(1.0, 2.0, 4.0, 8.0) == (0.25 + 0j));
+ assert(__fixunsxfdi(2.0) == 2);
+ assert(__fixunsxfsi(2.0) == 2);
+ assert(__fixxfdi(2.0) == 2);
+ assert(__floatdixf(2) == 2.0);
+ assert(__floatundixf(2) == 2);
+ assert(__mulxc3(1.0, 2.0, 4.0, 8.0) == (-12.0 + 16.0j));
+ assert(__powixf2(2.0, 2) == 4.0);
+#endif
+
+ // Test some calls which are used on armv6/thumb. The calls/prototypes are
+ // fake, it would be nice to test correctness, but mostly we just want to
+ // make sure we resolve symbols correctly.
+#if defined(__arm) && defined(__ARM_ARCH_6K__) && defined(__thumb__)
+ if (argc == 100) {
+ extern void __restore_vfp_d8_d15_regs(void), __save_vfp_d8_d15_regs(void);
+ extern void __switch8(void), __switchu8(void),
+ __switch16(void), __switch32(void);
+ extern void __addsf3vfp(void);
+
+ __addsf3vfp();
+ __restore_vfp_d8_d15_regs();
+ __save_vfp_d8_d15_regs();
+ __switch8();
+ __switchu8();
+ __switch16();
+ __switch32();
+ }
+#endif
+
+ fprintf(stderr, " OK!\n");
+
+ return 0;
+}
diff --git a/clang/utils/OptionalTests/README.txt b/clang/utils/OptionalTests/README.txt
new file mode 100644
index 0000000..4ffdb3b
--- /dev/null
+++ b/clang/utils/OptionalTests/README.txt
@@ -0,0 +1,4 @@
+This is a dumping ground for additional tests which do not fit cleanly into the
+clang regression tests. For example, tests which are not portable, require
+additional software or configuration, take an excessive time to run, or are
+flaky can be kept here.
diff --git a/clang/utils/OptionalTests/lit.cfg b/clang/utils/OptionalTests/lit.cfg
new file mode 100644
index 0000000..592c424
--- /dev/null
+++ b/clang/utils/OptionalTests/lit.cfg
@@ -0,0 +1,26 @@
+# -*- Python -*-
+
+# Configuration file for the 'lit' test runner.
+
+# Load the main clang test config so we can leech its clang finding logic.
+lit.load_config(config, os.path.join(os.path.dirname(__file__),
+ '..', '..', 'test', 'lit.cfg'))
+assert config.clang, "Failed to set clang!?"
+
+# name: The name of this test suite.
+config.name = 'Clang-Opt-Tests'
+
+# suffixes: A list of file extensions to treat as test files.
+config.suffixes = []
+
+# Reset these from the Clang config.
+
+# test_source_root: The root path where tests are located.
+config.test_source_root = os.path.dirname(__file__)
+
+# test_exec_root: The root path where tests should be run.
+clang_obj_root = getattr(config, 'clang_obj_root', None)
+if clang_obj_root is not None:
+ config.test_exec_root = os.path.join(clang_obj_root, 'utils',
+ 'OptionalTests')
+
diff --git a/clang/utils/SummarizeErrors b/clang/utils/SummarizeErrors
new file mode 100755
index 0000000..b6e9122
--- /dev/null
+++ b/clang/utils/SummarizeErrors
@@ -0,0 +1,117 @@
+#!/usr/bin/env python
+
+import os, sys, re
+
+class multidict:
+ def __init__(self, elts=()):
+ self.data = {}
+ for key,value in elts:
+ self[key] = value
+
+ def __getitem__(self, item):
+ return self.data[item]
+ def __setitem__(self, key, value):
+ if key in self.data:
+ self.data[key].append(value)
+ else:
+ self.data[key] = [value]
+ def items(self):
+ return self.data.items()
+ def values(self):
+ return self.data.values()
+ def keys(self):
+ return self.data.keys()
+ def __len__(self):
+ return len(self.data)
+
+kDiagnosticRE = re.compile(': (error|warning): (.*)')
+kAssertionRE = re.compile('Assertion failed: (.*, function .*, file .*, line [0-9]+\\.)')
+
+def readInfo(path, opts):
+ lastProgress = [-100,0]
+ def progress(pos):
+ pct = (100. * pos) / (size * 2)
+ if (pct - lastProgress[0]) >= 10:
+ lastProgress[0] = pct
+ print '%d/%d = %.2f%%' % (pos, size*2, pct)
+
+ f = open(path)
+ data = f.read()
+ f.close()
+
+ if opts.truncate != -1:
+ data = data[:opts.truncate]
+
+ size = len(data)
+ warnings = multidict()
+ errors = multidict()
+ for m in kDiagnosticRE.finditer(data):
+ progress(m.end())
+ if m.group(1) == 'error':
+ d = errors
+ else:
+ d = warnings
+ d[m.group(2)] = m
+ warnings = warnings.items()
+ errors = errors.items()
+ assertions = multidict()
+ for m in kAssertionRE.finditer(data):
+ print '%d/%d = %.2f%%' % (size + m.end(), size, (float(m.end()) / (size*2)) * 100.)
+ assertions[m.group(1)] = m
+ assertions = assertions.items()
+
+ # Manual scan for stack traces
+ aborts = multidict()
+ if 0:
+ prevLine = None
+ lnIter = iter(data.split('\n'))
+ for ln in lnIter:
+ m = kStackDumpLineRE.match(ln)
+ if m:
+ stack = [m.group(2)]
+ for ln in lnIter:
+ m = kStackDumpLineRE.match(ln)
+ if not m:
+ break
+ stack.append(m.group(2))
+ if prevLine is None or not kAssertionRE.match(prevLine):
+ aborts[tuple(stack)] = stack
+ prevLine = ln
+
+ sections = [
+ (warnings, 'Warnings'),
+ (errors, 'Errors'),
+ (assertions, 'Assertions'),
+ (aborts.items(), 'Aborts'),
+ ]
+
+ if opts.ascending:
+ sections.reverse()
+
+ for l,title in sections:
+ l.sort(key = lambda (a,b): -len(b))
+ if l:
+ print '-- %d %s (%d kinds) --' % (sum([len(b) for a,b in l]), title, len(l))
+ for name,elts in l:
+ print '%5d:' % len(elts), name
+
+def main():
+ global options
+ from optparse import OptionParser
+ parser = OptionParser("usage: %prog [options] {inputs}")
+ parser.add_option("", "--ascending", dest="ascending",
+ help="Print output in ascending order of severity.",
+ action="store_true", default=False)
+ parser.add_option("", "--truncate", dest="truncate",
+ help="Truncate input file (for testing).",
+ type=int, action="store", default=-1)
+ (opts, args) = parser.parse_args()
+
+ if not args:
+ parser.error('No inputs specified')
+
+ for arg in args:
+ readInfo(arg, opts)
+
+if __name__=='__main__':
+ main()
diff --git a/clang/utils/TableGen/CMakeLists.txt b/clang/utils/TableGen/CMakeLists.txt
new file mode 100644
index 0000000..0d87921
--- /dev/null
+++ b/clang/utils/TableGen/CMakeLists.txt
@@ -0,0 +1,13 @@
+set(LLVM_REQUIRES_EH 1)
+set(LLVM_REQUIRES_RTTI 1)
+set(LLVM_LINK_COMPONENTS Support)
+
+add_tablegen(clang-tblgen CLANG
+ ClangASTNodesEmitter.cpp
+ ClangAttrEmitter.cpp
+ ClangDiagnosticsEmitter.cpp
+ ClangSACheckersEmitter.cpp
+ NeonEmitter.cpp
+ OptParserEmitter.cpp
+ TableGen.cpp
+ )
diff --git a/clang/utils/TableGen/ClangASTNodesEmitter.cpp b/clang/utils/TableGen/ClangASTNodesEmitter.cpp
new file mode 100644
index 0000000..d9d5a3c
--- /dev/null
+++ b/clang/utils/TableGen/ClangASTNodesEmitter.cpp
@@ -0,0 +1,168 @@
+//=== ClangASTNodesEmitter.cpp - Generate Clang AST node tables -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// These tablegen backends emit Clang AST node tables
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangASTNodesEmitter.h"
+#include <set>
+using namespace llvm;
+
+//===----------------------------------------------------------------------===//
+// Statement Node Tables (.inc file) generation.
+//===----------------------------------------------------------------------===//
+
+// Returns the first and last non-abstract subrecords
+// Called recursively to ensure that nodes remain contiguous
+std::pair<Record *, Record *> ClangASTNodesEmitter::EmitNode(
+ const ChildMap &Tree,
+ raw_ostream &OS,
+ Record *Base) {
+ std::string BaseName = macroName(Base->getName());
+
+ ChildIterator i = Tree.lower_bound(Base), e = Tree.upper_bound(Base);
+
+ Record *First = 0, *Last = 0;
+ // This might be the pseudo-node for Stmt; don't assume it has an Abstract
+ // bit
+ if (Base->getValue("Abstract") && !Base->getValueAsBit("Abstract"))
+ First = Last = Base;
+
+ for (; i != e; ++i) {
+ Record *R = i->second;
+ bool Abstract = R->getValueAsBit("Abstract");
+ std::string NodeName = macroName(R->getName());
+
+ OS << "#ifndef " << NodeName << "\n";
+ OS << "# define " << NodeName << "(Type, Base) "
+ << BaseName << "(Type, Base)\n";
+ OS << "#endif\n";
+
+ if (Abstract)
+ OS << "ABSTRACT_" << macroName(Root.getName()) << "(" << NodeName << "("
+ << R->getName() << ", " << baseName(*Base) << "))\n";
+ else
+ OS << NodeName << "(" << R->getName() << ", "
+ << baseName(*Base) << ")\n";
+
+ if (Tree.find(R) != Tree.end()) {
+ const std::pair<Record *, Record *> &Result
+ = EmitNode(Tree, OS, R);
+ if (!First && Result.first)
+ First = Result.first;
+ if (Result.second)
+ Last = Result.second;
+ } else {
+ if (!Abstract) {
+ Last = R;
+
+ if (!First)
+ First = R;
+ }
+ }
+
+ OS << "#undef " << NodeName << "\n\n";
+ }
+
+ if (First) {
+ assert (Last && "Got a first node but not a last node for a range!");
+ if (Base == &Root)
+ OS << "LAST_" << macroName(Root.getName()) << "_RANGE(";
+ else
+ OS << macroName(Root.getName()) << "_RANGE(";
+ OS << Base->getName() << ", " << First->getName() << ", "
+ << Last->getName() << ")\n\n";
+ }
+
+ return std::make_pair(First, Last);
+}
+
+void ClangASTNodesEmitter::run(raw_ostream &OS) {
+ // Write the preamble
+ OS << "#ifndef ABSTRACT_" << macroName(Root.getName()) << "\n";
+ OS << "# define ABSTRACT_" << macroName(Root.getName()) << "(Type) Type\n";
+ OS << "#endif\n";
+
+ OS << "#ifndef " << macroName(Root.getName()) << "_RANGE\n";
+ OS << "# define "
+ << macroName(Root.getName()) << "_RANGE(Base, First, Last)\n";
+ OS << "#endif\n\n";
+
+ OS << "#ifndef LAST_" << macroName(Root.getName()) << "_RANGE\n";
+ OS << "# define LAST_"
+ << macroName(Root.getName()) << "_RANGE(Base, First, Last) "
+ << macroName(Root.getName()) << "_RANGE(Base, First, Last)\n";
+ OS << "#endif\n\n";
+
+ // Emit statements
+ const std::vector<Record*> Stmts
+ = Records.getAllDerivedDefinitions(Root.getName());
+
+ ChildMap Tree;
+
+ for (unsigned i = 0, e = Stmts.size(); i != e; ++i) {
+ Record *R = Stmts[i];
+
+ if (R->getValue("Base"))
+ Tree.insert(std::make_pair(R->getValueAsDef("Base"), R));
+ else
+ Tree.insert(std::make_pair(&Root, R));
+ }
+
+ EmitNode(Tree, OS, &Root);
+
+ OS << "#undef " << macroName(Root.getName()) << "\n";
+ OS << "#undef " << macroName(Root.getName()) << "_RANGE\n";
+ OS << "#undef LAST_" << macroName(Root.getName()) << "_RANGE\n";
+ OS << "#undef ABSTRACT_" << macroName(Root.getName()) << "\n";
+}
+
+void ClangDeclContextEmitter::run(raw_ostream &OS) {
+ // FIXME: Find a .td file format to allow for this to be represented better.
+
+ OS << "#ifndef DECL_CONTEXT\n";
+ OS << "# define DECL_CONTEXT(DECL)\n";
+ OS << "#endif\n";
+
+ OS << "#ifndef DECL_CONTEXT_BASE\n";
+ OS << "# define DECL_CONTEXT_BASE(DECL) DECL_CONTEXT(DECL)\n";
+ OS << "#endif\n";
+
+ typedef std::set<Record*> RecordSet;
+ typedef std::vector<Record*> RecordVector;
+
+ RecordVector DeclContextsVector
+ = Records.getAllDerivedDefinitions("DeclContext");
+ RecordVector Decls = Records.getAllDerivedDefinitions("Decl");
+ RecordSet DeclContexts (DeclContextsVector.begin(), DeclContextsVector.end());
+
+ for (RecordVector::iterator i = Decls.begin(), e = Decls.end(); i != e; ++i) {
+ Record *R = *i;
+
+ if (R->getValue("Base")) {
+ Record *B = R->getValueAsDef("Base");
+ if (DeclContexts.find(B) != DeclContexts.end()) {
+ OS << "DECL_CONTEXT_BASE(" << B->getName() << ")\n";
+ DeclContexts.erase(B);
+ }
+ }
+ }
+
+ // To keep identical order, RecordVector may be used
+ // instead of RecordSet.
+ for (RecordVector::iterator
+ i = DeclContextsVector.begin(), e = DeclContextsVector.end();
+ i != e; ++i)
+ if (DeclContexts.find(*i) != DeclContexts.end())
+ OS << "DECL_CONTEXT(" << (*i)->getName() << ")\n";
+
+ OS << "#undef DECL_CONTEXT\n";
+ OS << "#undef DECL_CONTEXT_BASE\n";
+}
diff --git a/clang/utils/TableGen/ClangASTNodesEmitter.h b/clang/utils/TableGen/ClangASTNodesEmitter.h
new file mode 100644
index 0000000..edd9316
--- /dev/null
+++ b/clang/utils/TableGen/ClangASTNodesEmitter.h
@@ -0,0 +1,84 @@
+//===- ClangASTNodesEmitter.h - Generate Clang AST node tables -*- C++ -*--===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// These tablegen backends emit Clang AST node tables
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANGAST_EMITTER_H
+#define CLANGAST_EMITTER_H
+
+#include "llvm/TableGen/TableGenBackend.h"
+#include "llvm/TableGen/Record.h"
+#include <string>
+#include <cctype>
+#include <map>
+
+namespace llvm {
+
+/// ClangASTNodesEmitter - The top-level class emits .inc files containing
+/// declarations of Clang statements.
+///
+class ClangASTNodesEmitter : public TableGenBackend {
+ // A map from a node to each of its derived nodes.
+ typedef std::multimap<Record*, Record*> ChildMap;
+ typedef ChildMap::const_iterator ChildIterator;
+
+ RecordKeeper &Records;
+ Record Root;
+ const std::string &BaseSuffix;
+
+ // Create a macro-ized version of a name
+ static std::string macroName(std::string S) {
+ for (unsigned i = 0; i < S.size(); ++i)
+ S[i] = std::toupper(S[i]);
+
+ return S;
+ }
+
+ // Return the name to be printed in the base field. Normally this is
+ // the record's name plus the base suffix, but if it is the root node and
+ // the suffix is non-empty, it's just the suffix.
+ std::string baseName(Record &R) {
+ if (&R == &Root && !BaseSuffix.empty())
+ return BaseSuffix;
+
+ return R.getName() + BaseSuffix;
+ }
+
+ std::pair<Record *, Record *> EmitNode (const ChildMap &Tree, raw_ostream& OS,
+ Record *Base);
+public:
+ explicit ClangASTNodesEmitter(RecordKeeper &R, const std::string &N,
+ const std::string &S)
+ : Records(R), Root(N, SMLoc(), R), BaseSuffix(S)
+ {}
+
+ // run - Output the .inc file contents
+ void run(raw_ostream &OS);
+};
+
+/// ClangDeclContextEmitter - Emits an addendum to a .inc file to enumerate the
+/// clang declaration contexts.
+///
+class ClangDeclContextEmitter : public TableGenBackend {
+ RecordKeeper &Records;
+
+public:
+ explicit ClangDeclContextEmitter(RecordKeeper &R)
+ : Records(R)
+ {}
+
+ // run - Output the .inc file contents
+ void run(raw_ostream &OS);
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/clang/utils/TableGen/ClangAttrEmitter.cpp b/clang/utils/TableGen/ClangAttrEmitter.cpp
new file mode 100644
index 0000000..7951fc4
--- /dev/null
+++ b/clang/utils/TableGen/ClangAttrEmitter.cpp
@@ -0,0 +1,1092 @@
+//===- ClangAttrEmitter.cpp - Generate Clang attribute handling =-*- C++ -*--=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// These tablegen backends emit Clang attribute processing code
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangAttrEmitter.h"
+#include "llvm/ADT/StringSwitch.h"
+#include "llvm/TableGen/Record.h"
+#include <algorithm>
+#include <cctype>
+#include <set>
+
+using namespace llvm;
+
+static const std::vector<StringRef>
+getValueAsListOfStrings(Record &R, StringRef FieldName) {
+ ListInit *List = R.getValueAsListInit(FieldName);
+ assert (List && "Got a null ListInit");
+
+ std::vector<StringRef> Strings;
+ Strings.reserve(List->getSize());
+
+ for (ListInit::const_iterator i = List->begin(), e = List->end();
+ i != e;
+ ++i) {
+ assert(*i && "Got a null element in a ListInit");
+ if (StringInit *S = dynamic_cast<StringInit *>(*i))
+ Strings.push_back(S->getValue());
+ else
+ assert(false && "Got a non-string, non-code element in a ListInit");
+ }
+
+ return Strings;
+}
+
+static std::string ReadPCHRecord(StringRef type) {
+ return StringSwitch<std::string>(type)
+ .EndsWith("Decl *", "GetLocalDeclAs<"
+ + std::string(type, 0, type.size()-1) + ">(F, Record[Idx++])")
+ .Case("QualType", "getLocalType(F, Record[Idx++])")
+ .Case("Expr *", "ReadSubExpr()")
+ .Case("IdentifierInfo *", "GetIdentifierInfo(F, Record, Idx)")
+ .Case("SourceLocation", "ReadSourceLocation(F, Record, Idx)")
+ .Default("Record[Idx++]");
+}
+
+// Assumes that the way to get the value is SA->getname()
+static std::string WritePCHRecord(StringRef type, StringRef name) {
+ return StringSwitch<std::string>(type)
+ .EndsWith("Decl *", "AddDeclRef(" + std::string(name) +
+ ", Record);\n")
+ .Case("QualType", "AddTypeRef(" + std::string(name) + ", Record);\n")
+ .Case("Expr *", "AddStmt(" + std::string(name) + ");\n")
+ .Case("IdentifierInfo *",
+ "AddIdentifierRef(" + std::string(name) + ", Record);\n")
+ .Case("SourceLocation",
+ "AddSourceLocation(" + std::string(name) + ", Record);\n")
+ .Default("Record.push_back(" + std::string(name) + ");\n");
+}
+
+// Normalize attribute name by removing leading and trailing
+// underscores. For example, __foo, foo__, __foo__ would
+// become foo.
+static StringRef NormalizeAttrName(StringRef AttrName) {
+ if (AttrName.startswith("__"))
+ AttrName = AttrName.substr(2, AttrName.size());
+
+ if (AttrName.endswith("__"))
+ AttrName = AttrName.substr(0, AttrName.size() - 2);
+
+ return AttrName;
+}
+
+// Normalize attribute spelling only if the spelling has both leading
+// and trailing underscores. For example, __ms_struct__ will be
+// normalized to "ms_struct"; __cdecl will remain intact.
+static StringRef NormalizeAttrSpelling(StringRef AttrSpelling) {
+ if (AttrSpelling.startswith("__") && AttrSpelling.endswith("__")) {
+ AttrSpelling = AttrSpelling.substr(2, AttrSpelling.size() - 4);
+ }
+
+ return AttrSpelling;
+}
+
+namespace {
+ class Argument {
+ std::string lowerName, upperName;
+ StringRef attrName;
+
+ public:
+ Argument(Record &Arg, StringRef Attr)
+ : lowerName(Arg.getValueAsString("Name")), upperName(lowerName),
+ attrName(Attr) {
+ if (!lowerName.empty()) {
+ lowerName[0] = std::tolower(lowerName[0]);
+ upperName[0] = std::toupper(upperName[0]);
+ }
+ }
+ virtual ~Argument() {}
+
+ StringRef getLowerName() const { return lowerName; }
+ StringRef getUpperName() const { return upperName; }
+ StringRef getAttrName() const { return attrName; }
+
+ // These functions print the argument contents formatted in different ways.
+ virtual void writeAccessors(raw_ostream &OS) const = 0;
+ virtual void writeAccessorDefinitions(raw_ostream &OS) const {}
+ virtual void writeCloneArgs(raw_ostream &OS) const = 0;
+ virtual void writeTemplateInstantiationArgs(raw_ostream &OS) const = 0;
+ virtual void writeTemplateInstantiation(raw_ostream &OS) const {}
+ virtual void writeCtorBody(raw_ostream &OS) const {}
+ virtual void writeCtorInitializers(raw_ostream &OS) const = 0;
+ virtual void writeCtorParameters(raw_ostream &OS) const = 0;
+ virtual void writeDeclarations(raw_ostream &OS) const = 0;
+ virtual void writePCHReadArgs(raw_ostream &OS) const = 0;
+ virtual void writePCHReadDecls(raw_ostream &OS) const = 0;
+ virtual void writePCHWrite(raw_ostream &OS) const = 0;
+ virtual void writeValue(raw_ostream &OS) const = 0;
+ };
+
+ class SimpleArgument : public Argument {
+ std::string type;
+
+ public:
+ SimpleArgument(Record &Arg, StringRef Attr, std::string T)
+ : Argument(Arg, Attr), type(T)
+ {}
+
+ std::string getType() const { return type; }
+
+ void writeAccessors(raw_ostream &OS) const {
+ OS << " " << type << " get" << getUpperName() << "() const {\n";
+ OS << " return " << getLowerName() << ";\n";
+ OS << " }";
+ }
+ void writeCloneArgs(raw_ostream &OS) const {
+ OS << getLowerName();
+ }
+ void writeTemplateInstantiationArgs(raw_ostream &OS) const {
+ OS << "A->get" << getUpperName() << "()";
+ }
+ void writeCtorInitializers(raw_ostream &OS) const {
+ OS << getLowerName() << "(" << getUpperName() << ")";
+ }
+ void writeCtorParameters(raw_ostream &OS) const {
+ OS << type << " " << getUpperName();
+ }
+ void writeDeclarations(raw_ostream &OS) const {
+ OS << type << " " << getLowerName() << ";";
+ }
+ void writePCHReadDecls(raw_ostream &OS) const {
+ std::string read = ReadPCHRecord(type);
+ OS << " " << type << " " << getLowerName() << " = " << read << ";\n";
+ }
+ void writePCHReadArgs(raw_ostream &OS) const {
+ OS << getLowerName();
+ }
+ void writePCHWrite(raw_ostream &OS) const {
+ OS << " " << WritePCHRecord(type, "SA->get" +
+ std::string(getUpperName()) + "()");
+ }
+ void writeValue(raw_ostream &OS) const {
+ if (type == "FunctionDecl *") {
+ OS << "\" << get" << getUpperName() << "()->getNameInfo().getAsString() << \"";
+ } else if (type == "IdentifierInfo *") {
+ OS << "\" << get" << getUpperName() << "()->getName() << \"";
+ } else if (type == "QualType") {
+ OS << "\" << get" << getUpperName() << "().getAsString() << \"";
+ } else if (type == "SourceLocation") {
+ OS << "\" << get" << getUpperName() << "().getRawEncoding() << \"";
+ } else {
+ OS << "\" << get" << getUpperName() << "() << \"";
+ }
+ }
+ };
+
+ class StringArgument : public Argument {
+ public:
+ StringArgument(Record &Arg, StringRef Attr)
+ : Argument(Arg, Attr)
+ {}
+
+ void writeAccessors(raw_ostream &OS) const {
+ OS << " llvm::StringRef get" << getUpperName() << "() const {\n";
+ OS << " return llvm::StringRef(" << getLowerName() << ", "
+ << getLowerName() << "Length);\n";
+ OS << " }\n";
+ OS << " unsigned get" << getUpperName() << "Length() const {\n";
+ OS << " return " << getLowerName() << "Length;\n";
+ OS << " }\n";
+ OS << " void set" << getUpperName()
+ << "(ASTContext &C, llvm::StringRef S) {\n";
+ OS << " " << getLowerName() << "Length = S.size();\n";
+ OS << " this->" << getLowerName() << " = new (C, 1) char ["
+ << getLowerName() << "Length];\n";
+ OS << " std::memcpy(this->" << getLowerName() << ", S.data(), "
+ << getLowerName() << "Length);\n";
+ OS << " }";
+ }
+ void writeCloneArgs(raw_ostream &OS) const {
+ OS << "get" << getUpperName() << "()";
+ }
+ void writeTemplateInstantiationArgs(raw_ostream &OS) const {
+ OS << "A->get" << getUpperName() << "()";
+ }
+ void writeCtorBody(raw_ostream &OS) const {
+ OS << " std::memcpy(" << getLowerName() << ", " << getUpperName()
+ << ".data(), " << getLowerName() << "Length);";
+ }
+ void writeCtorInitializers(raw_ostream &OS) const {
+ OS << getLowerName() << "Length(" << getUpperName() << ".size()),"
+ << getLowerName() << "(new (Ctx, 1) char[" << getLowerName()
+ << "Length])";
+ }
+ void writeCtorParameters(raw_ostream &OS) const {
+ OS << "llvm::StringRef " << getUpperName();
+ }
+ void writeDeclarations(raw_ostream &OS) const {
+ OS << "unsigned " << getLowerName() << "Length;\n";
+ OS << "char *" << getLowerName() << ";";
+ }
+ void writePCHReadDecls(raw_ostream &OS) const {
+ OS << " std::string " << getLowerName()
+ << "= ReadString(Record, Idx);\n";
+ }
+ void writePCHReadArgs(raw_ostream &OS) const {
+ OS << getLowerName();
+ }
+ void writePCHWrite(raw_ostream &OS) const {
+ OS << " AddString(SA->get" << getUpperName() << "(), Record);\n";
+ }
+ void writeValue(raw_ostream &OS) const {
+ OS << "\\\"\" << get" << getUpperName() << "() << \"\\\"";
+ }
+ };
+
+ class AlignedArgument : public Argument {
+ public:
+ AlignedArgument(Record &Arg, StringRef Attr)
+ : Argument(Arg, Attr)
+ {}
+
+ void writeAccessors(raw_ostream &OS) const {
+ OS << " bool is" << getUpperName() << "Dependent() const;\n";
+
+ OS << " unsigned get" << getUpperName() << "(ASTContext &Ctx) const;\n";
+
+ OS << " bool is" << getUpperName() << "Expr() const {\n";
+ OS << " return is" << getLowerName() << "Expr;\n";
+ OS << " }\n";
+
+ OS << " Expr *get" << getUpperName() << "Expr() const {\n";
+ OS << " assert(is" << getLowerName() << "Expr);\n";
+ OS << " return " << getLowerName() << "Expr;\n";
+ OS << " }\n";
+
+ OS << " TypeSourceInfo *get" << getUpperName() << "Type() const {\n";
+ OS << " assert(!is" << getLowerName() << "Expr);\n";
+ OS << " return " << getLowerName() << "Type;\n";
+ OS << " }";
+ }
+ void writeAccessorDefinitions(raw_ostream &OS) const {
+ OS << "bool " << getAttrName() << "Attr::is" << getUpperName()
+ << "Dependent() const {\n";
+ OS << " if (is" << getLowerName() << "Expr)\n";
+ OS << " return " << getLowerName() << "Expr && (" << getLowerName()
+ << "Expr->isValueDependent() || " << getLowerName()
+ << "Expr->isTypeDependent());\n";
+ OS << " else\n";
+ OS << " return " << getLowerName()
+ << "Type->getType()->isDependentType();\n";
+ OS << "}\n";
+
+ // FIXME: Do not do the calculation here
+ // FIXME: Handle types correctly
+ // A null pointer means maximum alignment
+ // FIXME: Load the platform-specific maximum alignment, rather than
+ // 16, the x86 max.
+ OS << "unsigned " << getAttrName() << "Attr::get" << getUpperName()
+ << "(ASTContext &Ctx) const {\n";
+ OS << " assert(!is" << getUpperName() << "Dependent());\n";
+ OS << " if (is" << getLowerName() << "Expr)\n";
+ OS << " return (" << getLowerName() << "Expr ? " << getLowerName()
+ << "Expr->EvaluateKnownConstInt(Ctx).getZExtValue() : 16)"
+ << "* Ctx.getCharWidth();\n";
+ OS << " else\n";
+ OS << " return 0; // FIXME\n";
+ OS << "}\n";
+ }
+ void writeCloneArgs(raw_ostream &OS) const {
+ OS << "is" << getLowerName() << "Expr, is" << getLowerName()
+ << "Expr ? static_cast<void*>(" << getLowerName()
+ << "Expr) : " << getLowerName()
+ << "Type";
+ }
+ void writeTemplateInstantiationArgs(raw_ostream &OS) const {
+ // FIXME: move the definition in Sema::InstantiateAttrs to here.
+ // In the meantime, aligned attributes are cloned.
+ }
+ void writeCtorBody(raw_ostream &OS) const {
+ OS << " if (is" << getLowerName() << "Expr)\n";
+ OS << " " << getLowerName() << "Expr = reinterpret_cast<Expr *>("
+ << getUpperName() << ");\n";
+ OS << " else\n";
+ OS << " " << getLowerName()
+ << "Type = reinterpret_cast<TypeSourceInfo *>(" << getUpperName()
+ << ");";
+ }
+ void writeCtorInitializers(raw_ostream &OS) const {
+ OS << "is" << getLowerName() << "Expr(Is" << getUpperName() << "Expr)";
+ }
+ void writeCtorParameters(raw_ostream &OS) const {
+ OS << "bool Is" << getUpperName() << "Expr, void *" << getUpperName();
+ }
+ void writeDeclarations(raw_ostream &OS) const {
+ OS << "bool is" << getLowerName() << "Expr;\n";
+ OS << "union {\n";
+ OS << "Expr *" << getLowerName() << "Expr;\n";
+ OS << "TypeSourceInfo *" << getLowerName() << "Type;\n";
+ OS << "};";
+ }
+ void writePCHReadArgs(raw_ostream &OS) const {
+ OS << "is" << getLowerName() << "Expr, " << getLowerName() << "Ptr";
+ }
+ void writePCHReadDecls(raw_ostream &OS) const {
+ OS << " bool is" << getLowerName() << "Expr = Record[Idx++];\n";
+ OS << " void *" << getLowerName() << "Ptr;\n";
+ OS << " if (is" << getLowerName() << "Expr)\n";
+ OS << " " << getLowerName() << "Ptr = ReadExpr(F);\n";
+ OS << " else\n";
+ OS << " " << getLowerName()
+ << "Ptr = GetTypeSourceInfo(F, Record, Idx);\n";
+ }
+ void writePCHWrite(raw_ostream &OS) const {
+ OS << " Record.push_back(SA->is" << getUpperName() << "Expr());\n";
+ OS << " if (SA->is" << getUpperName() << "Expr())\n";
+ OS << " AddStmt(SA->get" << getUpperName() << "Expr());\n";
+ OS << " else\n";
+ OS << " AddTypeSourceInfo(SA->get" << getUpperName()
+ << "Type(), Record);\n";
+ }
+ void writeValue(raw_ostream &OS) const {
+ OS << "\" << get" << getUpperName() << "(Ctx) << \"";
+ }
+ };
+
+ class VariadicArgument : public Argument {
+ std::string type;
+
+ public:
+ VariadicArgument(Record &Arg, StringRef Attr, std::string T)
+ : Argument(Arg, Attr), type(T)
+ {}
+
+ std::string getType() const { return type; }
+
+ void writeAccessors(raw_ostream &OS) const {
+ OS << " typedef " << type << "* " << getLowerName() << "_iterator;\n";
+ OS << " " << getLowerName() << "_iterator " << getLowerName()
+ << "_begin() const {\n";
+ OS << " return " << getLowerName() << ";\n";
+ OS << " }\n";
+ OS << " " << getLowerName() << "_iterator " << getLowerName()
+ << "_end() const {\n";
+ OS << " return " << getLowerName() << " + " << getLowerName()
+ << "Size;\n";
+ OS << " }\n";
+ OS << " unsigned " << getLowerName() << "_size() const {\n"
+ << " return " << getLowerName() << "Size;\n";
+ OS << " }";
+ }
+ void writeCloneArgs(raw_ostream &OS) const {
+ OS << getLowerName() << ", " << getLowerName() << "Size";
+ }
+ void writeTemplateInstantiationArgs(raw_ostream &OS) const {
+ // This isn't elegant, but we have to go through public methods...
+ OS << "A->" << getLowerName() << "_begin(), "
+ << "A->" << getLowerName() << "_size()";
+ }
+ void writeCtorBody(raw_ostream &OS) const {
+ // FIXME: memcpy is not safe on non-trivial types.
+ OS << " std::memcpy(" << getLowerName() << ", " << getUpperName()
+ << ", " << getLowerName() << "Size * sizeof(" << getType() << "));\n";
+ }
+ void writeCtorInitializers(raw_ostream &OS) const {
+ OS << getLowerName() << "Size(" << getUpperName() << "Size), "
+ << getLowerName() << "(new (Ctx, 16) " << getType() << "["
+ << getLowerName() << "Size])";
+ }
+ void writeCtorParameters(raw_ostream &OS) const {
+ OS << getType() << " *" << getUpperName() << ", unsigned "
+ << getUpperName() << "Size";
+ }
+ void writeDeclarations(raw_ostream &OS) const {
+ OS << " unsigned " << getLowerName() << "Size;\n";
+ OS << " " << getType() << " *" << getLowerName() << ";";
+ }
+ void writePCHReadDecls(raw_ostream &OS) const {
+ OS << " unsigned " << getLowerName() << "Size = Record[Idx++];\n";
+ OS << " llvm::SmallVector<" << type << ", 4> " << getLowerName()
+ << ";\n";
+ OS << " " << getLowerName() << ".reserve(" << getLowerName()
+ << "Size);\n";
+ OS << " for (unsigned i = " << getLowerName() << "Size; i; --i)\n";
+
+ std::string read = ReadPCHRecord(type);
+ OS << " " << getLowerName() << ".push_back(" << read << ");\n";
+ }
+ void writePCHReadArgs(raw_ostream &OS) const {
+ OS << getLowerName() << ".data(), " << getLowerName() << "Size";
+ }
+ void writePCHWrite(raw_ostream &OS) const{
+ OS << " Record.push_back(SA->" << getLowerName() << "_size());\n";
+ OS << " for (" << getAttrName() << "Attr::" << getLowerName()
+ << "_iterator i = SA->" << getLowerName() << "_begin(), e = SA->"
+ << getLowerName() << "_end(); i != e; ++i)\n";
+ OS << " " << WritePCHRecord(type, "(*i)");
+ }
+ void writeValue(raw_ostream &OS) const {
+ OS << "\";\n";
+ OS << " bool isFirst = true;\n"
+ << " for (" << getAttrName() << "Attr::" << getLowerName()
+ << "_iterator i = " << getLowerName() << "_begin(), e = "
+ << getLowerName() << "_end(); i != e; ++i) {\n"
+ << " if (isFirst) isFirst = false;\n"
+ << " else OS << \", \";\n"
+ << " OS << *i;\n"
+ << " }\n";
+ OS << " OS << \"";
+ }
+ };
+
+ class EnumArgument : public Argument {
+ std::string type;
+ std::vector<StringRef> values, enums;
+ public:
+ EnumArgument(Record &Arg, StringRef Attr)
+ : Argument(Arg, Attr), type(Arg.getValueAsString("Type")),
+ values(getValueAsListOfStrings(Arg, "Values")),
+ enums(getValueAsListOfStrings(Arg, "Enums"))
+ {}
+
+ void writeAccessors(raw_ostream &OS) const {
+ OS << " " << type << " get" << getUpperName() << "() const {\n";
+ OS << " return " << getLowerName() << ";\n";
+ OS << " }";
+ }
+ void writeCloneArgs(raw_ostream &OS) const {
+ OS << getLowerName();
+ }
+ void writeTemplateInstantiationArgs(raw_ostream &OS) const {
+ OS << "A->get" << getUpperName() << "()";
+ }
+ void writeCtorInitializers(raw_ostream &OS) const {
+ OS << getLowerName() << "(" << getUpperName() << ")";
+ }
+ void writeCtorParameters(raw_ostream &OS) const {
+ OS << type << " " << getUpperName();
+ }
+ void writeDeclarations(raw_ostream &OS) const {
+ // Calculate the various enum values
+ std::vector<StringRef> uniques(enums);
+ std::sort(uniques.begin(), uniques.end());
+ uniques.erase(std::unique(uniques.begin(), uniques.end()),
+ uniques.end());
+ // FIXME: Emit a proper error
+ assert(!uniques.empty());
+
+ std::vector<StringRef>::iterator i = uniques.begin(),
+ e = uniques.end();
+ // The last one needs to not have a comma.
+ --e;
+
+ OS << "public:\n";
+ OS << " enum " << type << " {\n";
+ for (; i != e; ++i)
+ OS << " " << *i << ",\n";
+ OS << " " << *e << "\n";
+ OS << " };\n";
+ OS << "private:\n";
+ OS << " " << type << " " << getLowerName() << ";";
+ }
+ void writePCHReadDecls(raw_ostream &OS) const {
+ OS << " " << getAttrName() << "Attr::" << type << " " << getLowerName()
+ << "(static_cast<" << getAttrName() << "Attr::" << type
+ << ">(Record[Idx++]));\n";
+ }
+ void writePCHReadArgs(raw_ostream &OS) const {
+ OS << getLowerName();
+ }
+ void writePCHWrite(raw_ostream &OS) const {
+ OS << "Record.push_back(SA->get" << getUpperName() << "());\n";
+ }
+ void writeValue(raw_ostream &OS) const {
+ OS << "\" << get" << getUpperName() << "() << \"";
+ }
+ };
+
+ class VersionArgument : public Argument {
+ public:
+ VersionArgument(Record &Arg, StringRef Attr)
+ : Argument(Arg, Attr)
+ {}
+
+ void writeAccessors(raw_ostream &OS) const {
+ OS << " VersionTuple get" << getUpperName() << "() const {\n";
+ OS << " return " << getLowerName() << ";\n";
+ OS << " }\n";
+ OS << " void set" << getUpperName()
+ << "(ASTContext &C, VersionTuple V) {\n";
+ OS << " " << getLowerName() << " = V;\n";
+ OS << " }";
+ }
+ void writeCloneArgs(raw_ostream &OS) const {
+ OS << "get" << getUpperName() << "()";
+ }
+ void writeTemplateInstantiationArgs(raw_ostream &OS) const {
+ OS << "A->get" << getUpperName() << "()";
+ }
+ void writeCtorBody(raw_ostream &OS) const {
+ }
+ void writeCtorInitializers(raw_ostream &OS) const {
+ OS << getLowerName() << "(" << getUpperName() << ")";
+ }
+ void writeCtorParameters(raw_ostream &OS) const {
+ OS << "VersionTuple " << getUpperName();
+ }
+ void writeDeclarations(raw_ostream &OS) const {
+ OS << "VersionTuple " << getLowerName() << ";\n";
+ }
+ void writePCHReadDecls(raw_ostream &OS) const {
+ OS << " VersionTuple " << getLowerName()
+ << "= ReadVersionTuple(Record, Idx);\n";
+ }
+ void writePCHReadArgs(raw_ostream &OS) const {
+ OS << getLowerName();
+ }
+ void writePCHWrite(raw_ostream &OS) const {
+ OS << " AddVersionTuple(SA->get" << getUpperName() << "(), Record);\n";
+ }
+ void writeValue(raw_ostream &OS) const {
+ OS << getLowerName() << "=\" << get" << getUpperName() << "() << \"";
+ }
+ };
+
+ class ExprArgument : public SimpleArgument {
+ public:
+ ExprArgument(Record &Arg, StringRef Attr)
+ : SimpleArgument(Arg, Attr, "Expr *")
+ {}
+
+ void writeTemplateInstantiationArgs(raw_ostream &OS) const {
+ OS << "tempInst" << getUpperName();
+ }
+
+ void writeTemplateInstantiation(raw_ostream &OS) const {
+ OS << " " << getType() << " tempInst" << getUpperName() << ";\n";
+ OS << " {\n";
+ OS << " EnterExpressionEvaluationContext "
+ << "Unevaluated(S, Sema::Unevaluated);\n";
+ OS << " ExprResult " << "Result = S.SubstExpr("
+ << "A->get" << getUpperName() << "(), TemplateArgs);\n";
+ OS << " tempInst" << getUpperName() << " = "
+ << "Result.takeAs<Expr>();\n";
+ OS << " }\n";
+ }
+ };
+
+ class VariadicExprArgument : public VariadicArgument {
+ public:
+ VariadicExprArgument(Record &Arg, StringRef Attr)
+ : VariadicArgument(Arg, Attr, "Expr *")
+ {}
+
+ void writeTemplateInstantiationArgs(raw_ostream &OS) const {
+ OS << "tempInst" << getUpperName() << ", "
+ << "A->" << getLowerName() << "_size()";
+ }
+
+ void writeTemplateInstantiation(raw_ostream &OS) const {
+ OS << " " << getType() << " *tempInst" << getUpperName()
+ << " = new (C, 16) " << getType()
+ << "[A->" << getLowerName() << "_size()];\n";
+ OS << " {\n";
+ OS << " EnterExpressionEvaluationContext "
+ << "Unevaluated(S, Sema::Unevaluated);\n";
+ OS << " " << getType() << " *TI = tempInst" << getUpperName()
+ << ";\n";
+ OS << " " << getType() << " *I = A->" << getLowerName()
+ << "_begin();\n";
+ OS << " " << getType() << " *E = A->" << getLowerName()
+ << "_end();\n";
+ OS << " for (; I != E; ++I, ++TI) {\n";
+ OS << " ExprResult Result = S.SubstExpr(*I, TemplateArgs);\n";
+ OS << " *TI = Result.takeAs<Expr>();\n";
+ OS << " }\n";
+ OS << " }\n";
+ }
+ };
+}
+
+static Argument *createArgument(Record &Arg, StringRef Attr,
+ Record *Search = 0) {
+ if (!Search)
+ Search = &Arg;
+
+ Argument *Ptr = 0;
+ llvm::StringRef ArgName = Search->getName();
+
+ if (ArgName == "AlignedArgument") Ptr = new AlignedArgument(Arg, Attr);
+ else if (ArgName == "EnumArgument") Ptr = new EnumArgument(Arg, Attr);
+ else if (ArgName == "ExprArgument") Ptr = new ExprArgument(Arg, Attr);
+ else if (ArgName == "FunctionArgument")
+ Ptr = new SimpleArgument(Arg, Attr, "FunctionDecl *");
+ else if (ArgName == "IdentifierArgument")
+ Ptr = new SimpleArgument(Arg, Attr, "IdentifierInfo *");
+ else if (ArgName == "BoolArgument") Ptr = new SimpleArgument(Arg, Attr,
+ "bool");
+ else if (ArgName == "IntArgument") Ptr = new SimpleArgument(Arg, Attr, "int");
+ else if (ArgName == "StringArgument") Ptr = new StringArgument(Arg, Attr);
+ else if (ArgName == "TypeArgument")
+ Ptr = new SimpleArgument(Arg, Attr, "QualType");
+ else if (ArgName == "UnsignedArgument")
+ Ptr = new SimpleArgument(Arg, Attr, "unsigned");
+ else if (ArgName == "SourceLocArgument")
+ Ptr = new SimpleArgument(Arg, Attr, "SourceLocation");
+ else if (ArgName == "VariadicUnsignedArgument")
+ Ptr = new VariadicArgument(Arg, Attr, "unsigned");
+ else if (ArgName == "VariadicExprArgument")
+ Ptr = new VariadicExprArgument(Arg, Attr);
+ else if (ArgName == "VersionArgument")
+ Ptr = new VersionArgument(Arg, Attr);
+
+ if (!Ptr) {
+ std::vector<Record*> Bases = Search->getSuperClasses();
+ for (std::vector<Record*>::iterator i = Bases.begin(), e = Bases.end();
+ i != e; ++i) {
+ Ptr = createArgument(Arg, Attr, *i);
+ if (Ptr)
+ break;
+ }
+ }
+ return Ptr;
+}
+
+static void writeAvailabilityValue(raw_ostream &OS) {
+ OS << "\" << getPlatform()->getName();\n"
+ << " if (!getIntroduced().empty()) OS << \", introduced=\" << getIntroduced();\n"
+ << " if (!getDeprecated().empty()) OS << \", deprecated=\" << getDeprecated();\n"
+ << " if (!getObsoleted().empty()) OS << \", obsoleted=\" << getObsoleted();\n"
+ << " if (getUnavailable()) OS << \", unavailable\";\n"
+ << " OS << \"";
+}
+
+void ClangAttrClassEmitter::run(raw_ostream &OS) {
+ OS << "// This file is generated by TableGen. Do not edit.\n\n";
+ OS << "#ifndef LLVM_CLANG_ATTR_CLASSES_INC\n";
+ OS << "#define LLVM_CLANG_ATTR_CLASSES_INC\n\n";
+
+ std::vector<Record*> Attrs = Records.getAllDerivedDefinitions("Attr");
+
+ for (std::vector<Record*>::iterator i = Attrs.begin(), e = Attrs.end();
+ i != e; ++i) {
+ Record &R = **i;
+ const std::string &SuperName = R.getSuperClasses().back()->getName();
+
+ OS << "class " << R.getName() << "Attr : public " << SuperName << " {\n";
+
+ std::vector<Record*> ArgRecords = R.getValueAsListOfDefs("Args");
+ std::vector<Argument*> Args;
+ std::vector<Argument*>::iterator ai, ae;
+ Args.reserve(ArgRecords.size());
+
+ for (std::vector<Record*>::iterator ri = ArgRecords.begin(),
+ re = ArgRecords.end();
+ ri != re; ++ri) {
+ Record &ArgRecord = **ri;
+ Argument *Arg = createArgument(ArgRecord, R.getName());
+ assert(Arg);
+ Args.push_back(Arg);
+
+ Arg->writeDeclarations(OS);
+ OS << "\n\n";
+ }
+
+ ae = Args.end();
+
+ OS << "\n public:\n";
+ OS << " " << R.getName() << "Attr(SourceRange R, ASTContext &Ctx\n";
+
+ for (ai = Args.begin(); ai != ae; ++ai) {
+ OS << " , ";
+ (*ai)->writeCtorParameters(OS);
+ OS << "\n";
+ }
+
+ OS << " )\n";
+ OS << " : " << SuperName << "(attr::" << R.getName() << ", R)\n";
+
+ for (ai = Args.begin(); ai != ae; ++ai) {
+ OS << " , ";
+ (*ai)->writeCtorInitializers(OS);
+ OS << "\n";
+ }
+
+ OS << " {\n";
+
+ for (ai = Args.begin(); ai != ae; ++ai) {
+ (*ai)->writeCtorBody(OS);
+ OS << "\n";
+ }
+ OS << " }\n\n";
+
+ OS << " virtual " << R.getName() << "Attr *clone (ASTContext &C) const;\n";
+ OS << " virtual void printPretty(llvm::raw_ostream &OS, ASTContext &Ctx) const;\n";
+
+ for (ai = Args.begin(); ai != ae; ++ai) {
+ (*ai)->writeAccessors(OS);
+ OS << "\n\n";
+ }
+
+ OS << R.getValueAsString("AdditionalMembers");
+ OS << "\n\n";
+
+ OS << " static bool classof(const Attr *A) { return A->getKind() == "
+ << "attr::" << R.getName() << "; }\n";
+ OS << " static bool classof(const " << R.getName()
+ << "Attr *) { return true; }\n";
+
+ bool LateParsed = R.getValueAsBit("LateParsed");
+ OS << " virtual bool isLateParsed() const { return "
+ << LateParsed << "; }\n";
+
+ OS << "};\n\n";
+ }
+
+ OS << "#endif\n";
+}
+
+void ClangAttrImplEmitter::run(raw_ostream &OS) {
+ OS << "// This file is generated by TableGen. Do not edit.\n\n";
+
+ std::vector<Record*> Attrs = Records.getAllDerivedDefinitions("Attr");
+ std::vector<Record*>::iterator i = Attrs.begin(), e = Attrs.end(), ri, re;
+ std::vector<Argument*>::iterator ai, ae;
+
+ for (; i != e; ++i) {
+ Record &R = **i;
+ std::vector<Record*> ArgRecords = R.getValueAsListOfDefs("Args");
+ std::vector<StringRef> Spellings = getValueAsListOfStrings(R, "Spellings");
+ std::vector<Argument*> Args;
+ for (ri = ArgRecords.begin(), re = ArgRecords.end(); ri != re; ++ri)
+ Args.push_back(createArgument(**ri, R.getName()));
+
+ for (ai = Args.begin(), ae = Args.end(); ai != ae; ++ai)
+ (*ai)->writeAccessorDefinitions(OS);
+
+ OS << R.getName() << "Attr *" << R.getName()
+ << "Attr::clone(ASTContext &C) const {\n";
+ OS << " return new (C) " << R.getName() << "Attr(getLocation(), C";
+ for (ai = Args.begin(); ai != ae; ++ai) {
+ OS << ", ";
+ (*ai)->writeCloneArgs(OS);
+ }
+ OS << ");\n}\n\n";
+
+ OS << "void " << R.getName() << "Attr::printPretty("
+ << "llvm::raw_ostream &OS, ASTContext &Ctx) const {\n";
+ if (Spellings.begin() != Spellings.end()) {
+ OS << " OS << \" __attribute__((" << *Spellings.begin();
+ if (Args.size()) OS << "(";
+ if (*Spellings.begin()=="availability") {
+ writeAvailabilityValue(OS);
+ } else {
+ for (ai = Args.begin(); ai != ae; ++ai) {
+ if (ai!=Args.begin()) OS <<", ";
+ (*ai)->writeValue(OS);
+ }
+ }
+ if (Args.size()) OS << ")";
+ OS << "))\";\n";
+ }
+ OS << "}\n\n";
+ }
+}
+
+static void EmitAttrList(raw_ostream &OS, StringRef Class,
+ const std::vector<Record*> &AttrList) {
+ std::vector<Record*>::const_iterator i = AttrList.begin(), e = AttrList.end();
+
+ if (i != e) {
+ // Move the end iterator back to emit the last attribute.
+ for(--e; i != e; ++i)
+ OS << Class << "(" << (*i)->getName() << ")\n";
+
+ OS << "LAST_" << Class << "(" << (*i)->getName() << ")\n\n";
+ }
+}
+
+void ClangAttrListEmitter::run(raw_ostream &OS) {
+ OS << "// This file is generated by TableGen. Do not edit.\n\n";
+
+ OS << "#ifndef LAST_ATTR\n";
+ OS << "#define LAST_ATTR(NAME) ATTR(NAME)\n";
+ OS << "#endif\n\n";
+
+ OS << "#ifndef INHERITABLE_ATTR\n";
+ OS << "#define INHERITABLE_ATTR(NAME) ATTR(NAME)\n";
+ OS << "#endif\n\n";
+
+ OS << "#ifndef LAST_INHERITABLE_ATTR\n";
+ OS << "#define LAST_INHERITABLE_ATTR(NAME) INHERITABLE_ATTR(NAME)\n";
+ OS << "#endif\n\n";
+
+ OS << "#ifndef INHERITABLE_PARAM_ATTR\n";
+ OS << "#define INHERITABLE_PARAM_ATTR(NAME) ATTR(NAME)\n";
+ OS << "#endif\n\n";
+
+ OS << "#ifndef LAST_INHERITABLE_PARAM_ATTR\n";
+ OS << "#define LAST_INHERITABLE_PARAM_ATTR(NAME)"
+ " INHERITABLE_PARAM_ATTR(NAME)\n";
+ OS << "#endif\n\n";
+
+ Record *InhClass = Records.getClass("InheritableAttr");
+ Record *InhParamClass = Records.getClass("InheritableParamAttr");
+ std::vector<Record*> Attrs = Records.getAllDerivedDefinitions("Attr"),
+ NonInhAttrs, InhAttrs, InhParamAttrs;
+ for (std::vector<Record*>::iterator i = Attrs.begin(), e = Attrs.end();
+ i != e; ++i) {
+ if ((*i)->isSubClassOf(InhParamClass))
+ InhParamAttrs.push_back(*i);
+ else if ((*i)->isSubClassOf(InhClass))
+ InhAttrs.push_back(*i);
+ else
+ NonInhAttrs.push_back(*i);
+ }
+
+ EmitAttrList(OS, "INHERITABLE_PARAM_ATTR", InhParamAttrs);
+ EmitAttrList(OS, "INHERITABLE_ATTR", InhAttrs);
+ EmitAttrList(OS, "ATTR", NonInhAttrs);
+
+ OS << "#undef LAST_ATTR\n";
+ OS << "#undef INHERITABLE_ATTR\n";
+ OS << "#undef LAST_INHERITABLE_ATTR\n";
+ OS << "#undef LAST_INHERITABLE_PARAM_ATTR\n";
+ OS << "#undef ATTR\n";
+}
+
+void ClangAttrPCHReadEmitter::run(raw_ostream &OS) {
+ OS << "// This file is generated by TableGen. Do not edit.\n\n";
+
+ Record *InhClass = Records.getClass("InheritableAttr");
+ std::vector<Record*> Attrs = Records.getAllDerivedDefinitions("Attr"),
+ ArgRecords;
+ std::vector<Record*>::iterator i = Attrs.begin(), e = Attrs.end(), ai, ae;
+ std::vector<Argument*> Args;
+ std::vector<Argument*>::iterator ri, re;
+
+ OS << " switch (Kind) {\n";
+ OS << " default:\n";
+ OS << " assert(0 && \"Unknown attribute!\");\n";
+ OS << " break;\n";
+ for (; i != e; ++i) {
+ Record &R = **i;
+ OS << " case attr::" << R.getName() << ": {\n";
+ if (R.isSubClassOf(InhClass))
+ OS << " bool isInherited = Record[Idx++];\n";
+ ArgRecords = R.getValueAsListOfDefs("Args");
+ Args.clear();
+ for (ai = ArgRecords.begin(), ae = ArgRecords.end(); ai != ae; ++ai) {
+ Argument *A = createArgument(**ai, R.getName());
+ Args.push_back(A);
+ A->writePCHReadDecls(OS);
+ }
+ OS << " New = new (Context) " << R.getName() << "Attr(Range, Context";
+ for (ri = Args.begin(), re = Args.end(); ri != re; ++ri) {
+ OS << ", ";
+ (*ri)->writePCHReadArgs(OS);
+ }
+ OS << ");\n";
+ if (R.isSubClassOf(InhClass))
+ OS << " cast<InheritableAttr>(New)->setInherited(isInherited);\n";
+ OS << " break;\n";
+ OS << " }\n";
+ }
+ OS << " }\n";
+}
+
+void ClangAttrPCHWriteEmitter::run(raw_ostream &OS) {
+ Record *InhClass = Records.getClass("InheritableAttr");
+ std::vector<Record*> Attrs = Records.getAllDerivedDefinitions("Attr"), Args;
+ std::vector<Record*>::iterator i = Attrs.begin(), e = Attrs.end(), ai, ae;
+
+ OS << " switch (A->getKind()) {\n";
+ OS << " default:\n";
+ OS << " llvm_unreachable(\"Unknown attribute kind!\");\n";
+ OS << " break;\n";
+ for (; i != e; ++i) {
+ Record &R = **i;
+ OS << " case attr::" << R.getName() << ": {\n";
+ Args = R.getValueAsListOfDefs("Args");
+ if (R.isSubClassOf(InhClass) || !Args.empty())
+ OS << " const " << R.getName() << "Attr *SA = cast<" << R.getName()
+ << "Attr>(A);\n";
+ if (R.isSubClassOf(InhClass))
+ OS << " Record.push_back(SA->isInherited());\n";
+ for (ai = Args.begin(), ae = Args.end(); ai != ae; ++ai)
+ createArgument(**ai, R.getName())->writePCHWrite(OS);
+ OS << " break;\n";
+ OS << " }\n";
+ }
+ OS << " }\n";
+}
+
+void ClangAttrSpellingListEmitter::run(raw_ostream &OS) {
+ OS << "// This file is generated by TableGen. Do not edit.\n\n";
+
+ std::vector<Record*> Attrs = Records.getAllDerivedDefinitions("Attr");
+
+ for (std::vector<Record*>::iterator I = Attrs.begin(), E = Attrs.end(); I != E; ++I) {
+ Record &Attr = **I;
+
+ std::vector<StringRef> Spellings = getValueAsListOfStrings(Attr, "Spellings");
+
+ for (std::vector<StringRef>::const_iterator I = Spellings.begin(), E = Spellings.end(); I != E; ++I) {
+ StringRef Spelling = *I;
+ OS << ".Case(\"" << Spelling << "\", true)\n";
+ }
+ }
+
+}
+
+void ClangAttrLateParsedListEmitter::run(raw_ostream &OS) {
+ OS << "// This file is generated by TableGen. Do not edit.\n\n";
+
+ std::vector<Record*> Attrs = Records.getAllDerivedDefinitions("Attr");
+
+ for (std::vector<Record*>::iterator I = Attrs.begin(), E = Attrs.end();
+ I != E; ++I) {
+ Record &Attr = **I;
+
+ bool LateParsed = Attr.getValueAsBit("LateParsed");
+
+ if (LateParsed) {
+ std::vector<StringRef> Spellings =
+ getValueAsListOfStrings(Attr, "Spellings");
+
+ for (std::vector<StringRef>::const_iterator I = Spellings.begin(),
+ E = Spellings.end(); I != E; ++I) {
+ OS << ".Case(\"" << (*I) << "\", " << LateParsed << ")\n";
+ }
+ }
+ }
+}
+
+
+void ClangAttrTemplateInstantiateEmitter::run(raw_ostream &OS) {
+ OS << "// This file is generated by TableGen. Do not edit.\n\n";
+
+ std::vector<Record*> Attrs = Records.getAllDerivedDefinitions("Attr");
+
+ OS << "namespace clang {\n"
+ << "namespace sema {\n\n"
+ << "Attr *instantiateTemplateAttribute(const Attr *At, ASTContext &C, "
+ << "Sema &S,\n"
+ << " const MultiLevelTemplateArgumentList &TemplateArgs) {\n"
+ << " switch (At->getKind()) {\n"
+ << " default:\n"
+ << " break;\n";
+
+ for (std::vector<Record*>::iterator I = Attrs.begin(), E = Attrs.end();
+ I != E; ++I) {
+ Record &R = **I;
+
+ OS << " case attr::" << R.getName() << ": {\n";
+ OS << " const " << R.getName() << "Attr *A = cast<"
+ << R.getName() << "Attr>(At);\n";
+ bool TDependent = R.getValueAsBit("TemplateDependent");
+
+ if (!TDependent) {
+ OS << " return A->clone(C);\n";
+ OS << " }\n";
+ continue;
+ }
+
+ std::vector<Record*> ArgRecords = R.getValueAsListOfDefs("Args");
+ std::vector<Argument*> Args;
+ std::vector<Argument*>::iterator ai, ae;
+ Args.reserve(ArgRecords.size());
+
+ for (std::vector<Record*>::iterator ri = ArgRecords.begin(),
+ re = ArgRecords.end();
+ ri != re; ++ri) {
+ Record &ArgRecord = **ri;
+ Argument *Arg = createArgument(ArgRecord, R.getName());
+ assert(Arg);
+ Args.push_back(Arg);
+ }
+ ae = Args.end();
+
+ for (ai = Args.begin(); ai != ae; ++ai) {
+ (*ai)->writeTemplateInstantiation(OS);
+ }
+ OS << " return new (C) " << R.getName() << "Attr(A->getLocation(), C";
+ for (ai = Args.begin(); ai != ae; ++ai) {
+ OS << ", ";
+ (*ai)->writeTemplateInstantiationArgs(OS);
+ }
+ OS << ");\n }\n";
+ }
+ OS << " } // end switch\n"
+ << " llvm_unreachable(\"Unknown attribute!\");\n"
+ << " return 0;\n"
+ << "}\n\n"
+ << "} // end namespace sema\n"
+ << "} // end namespace clang\n";
+}
+
+void ClangAttrParsedAttrListEmitter::run(raw_ostream &OS) {
+ OS << "// This file is generated by TableGen. Do not edit.\n\n";
+
+ OS << "#ifndef PARSED_ATTR\n";
+ OS << "#define PARSED_ATTR(NAME) NAME\n";
+ OS << "#endif\n\n";
+
+ std::vector<Record*> Attrs = Records.getAllDerivedDefinitions("Attr");
+ std::set<StringRef> ProcessedAttrs;
+
+ for (std::vector<Record*>::iterator I = Attrs.begin(), E = Attrs.end();
+ I != E; ++I) {
+ Record &Attr = **I;
+
+ bool SemaHandler = Attr.getValueAsBit("SemaHandler");
+
+ if (SemaHandler) {
+ std::vector<StringRef> Spellings =
+ getValueAsListOfStrings(Attr, "Spellings");
+
+ for (std::vector<StringRef>::const_iterator I = Spellings.begin(),
+ E = Spellings.end(); I != E; ++I) {
+ StringRef AttrName = *I;
+
+ AttrName = NormalizeAttrName(AttrName);
+ // skip if a normalized version has been processed.
+ if (ProcessedAttrs.find(AttrName) != ProcessedAttrs.end())
+ continue;
+ else
+ ProcessedAttrs.insert(AttrName);
+
+ OS << "PARSED_ATTR(" << AttrName << ")\n";
+ }
+ }
+ }
+}
+
+void ClangAttrParsedAttrKindsEmitter::run(raw_ostream &OS) {
+ OS << "// This file is generated by TableGen. Do not edit.\n\n";
+
+ std::vector<Record*> Attrs = Records.getAllDerivedDefinitions("Attr");
+
+ for (std::vector<Record*>::iterator I = Attrs.begin(), E = Attrs.end();
+ I != E; ++I) {
+ Record &Attr = **I;
+
+ bool SemaHandler = Attr.getValueAsBit("SemaHandler");
+
+ if (SemaHandler) {
+ std::vector<StringRef> Spellings =
+ getValueAsListOfStrings(Attr, "Spellings");
+
+ for (std::vector<StringRef>::const_iterator I = Spellings.begin(),
+ E = Spellings.end(); I != E; ++I) {
+ StringRef AttrName = *I, Spelling = *I;
+
+ AttrName = NormalizeAttrName(AttrName);
+ Spelling = NormalizeAttrSpelling(Spelling);
+
+ OS << ".Case(\"" << Spelling << "\", " << "AT_" << AttrName << ")\n";
+ }
+ }
+ }
+}
+
+
diff --git a/clang/utils/TableGen/ClangAttrEmitter.h b/clang/utils/TableGen/ClangAttrEmitter.h
new file mode 100644
index 0000000..d119a09
--- /dev/null
+++ b/clang/utils/TableGen/ClangAttrEmitter.h
@@ -0,0 +1,153 @@
+//===- ClangAttrEmitter.h - Generate Clang attribute handling =-*- C++ -*--===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// These tablegen backends emit Clang attribute processing code
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANGATTR_EMITTER_H
+#define CLANGATTR_EMITTER_H
+
+#include "llvm/TableGen/TableGenBackend.h"
+
+namespace llvm {
+
+/// ClangAttrClassEmitter - class emits the class defintions for attributes for
+/// clang.
+class ClangAttrClassEmitter : public TableGenBackend {
+ RecordKeeper &Records;
+
+ public:
+ explicit ClangAttrClassEmitter(RecordKeeper &R)
+ : Records(R)
+ {}
+
+ void run(raw_ostream &OS);
+};
+
+/// ClangAttrImplEmitter - class emits the class method defintions for
+/// attributes for clang.
+class ClangAttrImplEmitter : public TableGenBackend {
+ RecordKeeper &Records;
+
+ public:
+ explicit ClangAttrImplEmitter(RecordKeeper &R)
+ : Records(R)
+ {}
+
+ void run(raw_ostream &OS);
+};
+
+/// ClangAttrListEmitter - class emits the enumeration list for attributes for
+/// clang.
+class ClangAttrListEmitter : public TableGenBackend {
+ RecordKeeper &Records;
+
+ public:
+ explicit ClangAttrListEmitter(RecordKeeper &R)
+ : Records(R)
+ {}
+
+ void run(raw_ostream &OS);
+};
+
+/// ClangAttrPCHReadEmitter - class emits the code to read an attribute from
+/// a clang precompiled header.
+class ClangAttrPCHReadEmitter : public TableGenBackend {
+ RecordKeeper &Records;
+
+public:
+ explicit ClangAttrPCHReadEmitter(RecordKeeper &R)
+ : Records(R)
+ {}
+
+ void run(raw_ostream &OS);
+};
+
+/// ClangAttrPCHWriteEmitter - class emits the code to read an attribute from
+/// a clang precompiled header.
+class ClangAttrPCHWriteEmitter : public TableGenBackend {
+ RecordKeeper &Records;
+
+public:
+ explicit ClangAttrPCHWriteEmitter(RecordKeeper &R)
+ : Records(R)
+ {}
+
+ void run(raw_ostream &OS);
+};
+
+/// ClangAttrSpellingListEmitter - class emits the list of spellings for attributes for
+/// clang.
+class ClangAttrSpellingListEmitter : public TableGenBackend {
+ RecordKeeper &Records;
+
+ public:
+ explicit ClangAttrSpellingListEmitter(RecordKeeper &R)
+ : Records(R)
+ {}
+
+ void run(raw_ostream &OS);
+};
+
+/// ClangAttrLateParsedListEmitter emits the LateParsed property for attributes
+/// for clang.
+class ClangAttrLateParsedListEmitter : public TableGenBackend {
+ RecordKeeper &Records;
+
+ public:
+ explicit ClangAttrLateParsedListEmitter(RecordKeeper &R)
+ : Records(R)
+ {}
+
+ void run(raw_ostream &OS);
+};
+
+/// ClangAttrTemplateInstantiateEmitter emits code to instantiate dependent
+/// attributes on templates.
+class ClangAttrTemplateInstantiateEmitter : public TableGenBackend {
+ RecordKeeper &Records;
+
+ public:
+ explicit ClangAttrTemplateInstantiateEmitter(RecordKeeper &R)
+ : Records(R)
+ {}
+
+ void run(raw_ostream &OS);
+};
+
+/// ClangAttrParsedAttrListEmitter emits the list of parsed attributes
+/// for clang.
+class ClangAttrParsedAttrListEmitter : public TableGenBackend {
+ RecordKeeper &Records;
+
+public:
+ explicit ClangAttrParsedAttrListEmitter(RecordKeeper &R)
+ : Records(R)
+ {}
+
+ void run(raw_ostream &OS);
+};
+
+/// ClangAttrParsedAttrKindsEmitter emits the kind list of parsed attributes
+/// for clang.
+class ClangAttrParsedAttrKindsEmitter : public TableGenBackend {
+ RecordKeeper &Records;
+
+public:
+ explicit ClangAttrParsedAttrKindsEmitter(RecordKeeper &R)
+ : Records(R)
+ {}
+
+ void run(raw_ostream &OS);
+};
+
+}
+
+#endif
diff --git a/clang/utils/TableGen/ClangDiagnosticsEmitter.cpp b/clang/utils/TableGen/ClangDiagnosticsEmitter.cpp
new file mode 100644
index 0000000..8a49619
--- /dev/null
+++ b/clang/utils/TableGen/ClangDiagnosticsEmitter.cpp
@@ -0,0 +1,385 @@
+//=- ClangDiagnosticsEmitter.cpp - Generate Clang diagnostics tables -*- C++ -*-
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// These tablegen backends emit Clang diagnostics tables.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangDiagnosticsEmitter.h"
+#include "llvm/TableGen/Record.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/SmallString.h"
+#include <map>
+#include <algorithm>
+#include <functional>
+#include <set>
+using namespace llvm;
+
+//===----------------------------------------------------------------------===//
+// Diagnostic category computation code.
+//===----------------------------------------------------------------------===//
+
+namespace {
+class DiagGroupParentMap {
+ RecordKeeper &Records;
+ std::map<const Record*, std::vector<Record*> > Mapping;
+public:
+ DiagGroupParentMap(RecordKeeper &records) : Records(records) {
+ std::vector<Record*> DiagGroups
+ = Records.getAllDerivedDefinitions("DiagGroup");
+ for (unsigned i = 0, e = DiagGroups.size(); i != e; ++i) {
+ std::vector<Record*> SubGroups =
+ DiagGroups[i]->getValueAsListOfDefs("SubGroups");
+ for (unsigned j = 0, e = SubGroups.size(); j != e; ++j)
+ Mapping[SubGroups[j]].push_back(DiagGroups[i]);
+ }
+ }
+
+ const std::vector<Record*> &getParents(const Record *Group) {
+ return Mapping[Group];
+ }
+};
+} // end anonymous namespace.
+
+static std::string
+getCategoryFromDiagGroup(const Record *Group,
+ DiagGroupParentMap &DiagGroupParents) {
+ // If the DiagGroup has a category, return it.
+ std::string CatName = Group->getValueAsString("CategoryName");
+ if (!CatName.empty()) return CatName;
+
+ // The diag group may the subgroup of one or more other diagnostic groups,
+ // check these for a category as well.
+ const std::vector<Record*> &Parents = DiagGroupParents.getParents(Group);
+ for (unsigned i = 0, e = Parents.size(); i != e; ++i) {
+ CatName = getCategoryFromDiagGroup(Parents[i], DiagGroupParents);
+ if (!CatName.empty()) return CatName;
+ }
+ return "";
+}
+
+/// getDiagnosticCategory - Return the category that the specified diagnostic
+/// lives in.
+static std::string getDiagnosticCategory(const Record *R,
+ DiagGroupParentMap &DiagGroupParents) {
+ // If the diagnostic is in a group, and that group has a category, use it.
+ if (DefInit *Group = dynamic_cast<DefInit*>(R->getValueInit("Group"))) {
+ // Check the diagnostic's diag group for a category.
+ std::string CatName = getCategoryFromDiagGroup(Group->getDef(),
+ DiagGroupParents);
+ if (!CatName.empty()) return CatName;
+ }
+
+ // If the diagnostic itself has a category, get it.
+ return R->getValueAsString("CategoryName");
+}
+
+namespace {
+ class DiagCategoryIDMap {
+ RecordKeeper &Records;
+ StringMap<unsigned> CategoryIDs;
+ std::vector<std::string> CategoryStrings;
+ public:
+ DiagCategoryIDMap(RecordKeeper &records) : Records(records) {
+ DiagGroupParentMap ParentInfo(Records);
+
+ // The zero'th category is "".
+ CategoryStrings.push_back("");
+ CategoryIDs[""] = 0;
+
+ std::vector<Record*> Diags =
+ Records.getAllDerivedDefinitions("Diagnostic");
+ for (unsigned i = 0, e = Diags.size(); i != e; ++i) {
+ std::string Category = getDiagnosticCategory(Diags[i], ParentInfo);
+ if (Category.empty()) continue; // Skip diags with no category.
+
+ unsigned &ID = CategoryIDs[Category];
+ if (ID != 0) continue; // Already seen.
+
+ ID = CategoryStrings.size();
+ CategoryStrings.push_back(Category);
+ }
+ }
+
+ unsigned getID(StringRef CategoryString) {
+ return CategoryIDs[CategoryString];
+ }
+
+ typedef std::vector<std::string>::iterator iterator;
+ iterator begin() { return CategoryStrings.begin(); }
+ iterator end() { return CategoryStrings.end(); }
+ };
+
+ struct GroupInfo {
+ std::vector<const Record*> DiagsInGroup;
+ std::vector<std::string> SubGroups;
+ unsigned IDNo;
+ };
+} // end anonymous namespace.
+
+/// \brief Invert the 1-[0/1] mapping of diags to group into a one to many
+/// mapping of groups to diags in the group.
+static void groupDiagnostics(const std::vector<Record*> &Diags,
+ const std::vector<Record*> &DiagGroups,
+ std::map<std::string, GroupInfo> &DiagsInGroup) {
+ for (unsigned i = 0, e = Diags.size(); i != e; ++i) {
+ const Record *R = Diags[i];
+ DefInit *DI = dynamic_cast<DefInit*>(R->getValueInit("Group"));
+ if (DI == 0) continue;
+ std::string GroupName = DI->getDef()->getValueAsString("GroupName");
+ DiagsInGroup[GroupName].DiagsInGroup.push_back(R);
+ }
+
+ // Add all DiagGroup's to the DiagsInGroup list to make sure we pick up empty
+ // groups (these are warnings that GCC supports that clang never produces).
+ for (unsigned i = 0, e = DiagGroups.size(); i != e; ++i) {
+ Record *Group = DiagGroups[i];
+ GroupInfo &GI = DiagsInGroup[Group->getValueAsString("GroupName")];
+
+ std::vector<Record*> SubGroups = Group->getValueAsListOfDefs("SubGroups");
+ for (unsigned j = 0, e = SubGroups.size(); j != e; ++j)
+ GI.SubGroups.push_back(SubGroups[j]->getValueAsString("GroupName"));
+ }
+
+ // Assign unique ID numbers to the groups.
+ unsigned IDNo = 0;
+ for (std::map<std::string, GroupInfo>::iterator
+ I = DiagsInGroup.begin(), E = DiagsInGroup.end(); I != E; ++I, ++IDNo)
+ I->second.IDNo = IDNo;
+}
+
+//===----------------------------------------------------------------------===//
+// Warning Tables (.inc file) generation.
+//===----------------------------------------------------------------------===//
+
+void ClangDiagsDefsEmitter::run(raw_ostream &OS) {
+ // Write the #if guard
+ if (!Component.empty()) {
+ std::string ComponentName = StringRef(Component).upper();
+ OS << "#ifdef " << ComponentName << "START\n";
+ OS << "__" << ComponentName << "START = DIAG_START_" << ComponentName
+ << ",\n";
+ OS << "#undef " << ComponentName << "START\n";
+ OS << "#endif\n\n";
+ }
+
+ const std::vector<Record*> &Diags =
+ Records.getAllDerivedDefinitions("Diagnostic");
+
+ std::vector<Record*> DiagGroups
+ = Records.getAllDerivedDefinitions("DiagGroup");
+
+ std::map<std::string, GroupInfo> DiagsInGroup;
+ groupDiagnostics(Diags, DiagGroups, DiagsInGroup);
+
+ DiagCategoryIDMap CategoryIDs(Records);
+ DiagGroupParentMap DGParentMap(Records);
+
+ for (unsigned i = 0, e = Diags.size(); i != e; ++i) {
+ const Record &R = *Diags[i];
+ // Filter by component.
+ if (!Component.empty() && Component != R.getValueAsString("Component"))
+ continue;
+
+ OS << "DIAG(" << R.getName() << ", ";
+ OS << R.getValueAsDef("Class")->getName();
+ OS << ", diag::" << R.getValueAsDef("DefaultMapping")->getName();
+
+ // Description string.
+ OS << ", \"";
+ OS.write_escaped(R.getValueAsString("Text")) << '"';
+
+ // Warning associated with the diagnostic. This is stored as an index into
+ // the alphabetically sorted warning table.
+ if (DefInit *DI = dynamic_cast<DefInit*>(R.getValueInit("Group"))) {
+ std::map<std::string, GroupInfo>::iterator I =
+ DiagsInGroup.find(DI->getDef()->getValueAsString("GroupName"));
+ assert(I != DiagsInGroup.end());
+ OS << ", " << I->second.IDNo;
+ } else {
+ OS << ", 0";
+ }
+
+ // SFINAE bit
+ if (R.getValueAsBit("SFINAE"))
+ OS << ", true";
+ else
+ OS << ", false";
+
+ // Access control bit
+ if (R.getValueAsBit("AccessControl"))
+ OS << ", true";
+ else
+ OS << ", false";
+
+ // FIXME: This condition is just to avoid temporary revlock, it can be
+ // removed.
+ if (R.getValue("WarningNoWerror")) {
+ // Default warning has no Werror bit.
+ if (R.getValueAsBit("WarningNoWerror"))
+ OS << ", true";
+ else
+ OS << ", false";
+
+ // Default warning show in system header bit.
+ if (R.getValueAsBit("WarningShowInSystemHeader"))
+ OS << ", true";
+ else
+ OS << ", false";
+ }
+
+ // Category number.
+ OS << ", " << CategoryIDs.getID(getDiagnosticCategory(&R, DGParentMap));
+ OS << ")\n";
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// Warning Group Tables generation
+//===----------------------------------------------------------------------===//
+
+static std::string getDiagCategoryEnum(llvm::StringRef name) {
+ if (name.empty())
+ return "DiagCat_None";
+ SmallString<256> enumName = llvm::StringRef("DiagCat_");
+ for (llvm::StringRef::iterator I = name.begin(), E = name.end(); I != E; ++I)
+ enumName += isalnum(*I) ? *I : '_';
+ return enumName.str();
+}
+
+void ClangDiagGroupsEmitter::run(raw_ostream &OS) {
+ // Compute a mapping from a DiagGroup to all of its parents.
+ DiagGroupParentMap DGParentMap(Records);
+
+ std::vector<Record*> Diags =
+ Records.getAllDerivedDefinitions("Diagnostic");
+
+ std::vector<Record*> DiagGroups
+ = Records.getAllDerivedDefinitions("DiagGroup");
+
+ std::map<std::string, GroupInfo> DiagsInGroup;
+ groupDiagnostics(Diags, DiagGroups, DiagsInGroup);
+
+ // Walk through the groups emitting an array for each diagnostic of the diags
+ // that are mapped to.
+ OS << "\n#ifdef GET_DIAG_ARRAYS\n";
+ unsigned MaxLen = 0;
+ for (std::map<std::string, GroupInfo>::iterator
+ I = DiagsInGroup.begin(), E = DiagsInGroup.end(); I != E; ++I) {
+ MaxLen = std::max(MaxLen, (unsigned)I->first.size());
+
+ std::vector<const Record*> &V = I->second.DiagsInGroup;
+ if (!V.empty()) {
+ OS << "static const short DiagArray" << I->second.IDNo << "[] = { ";
+ for (unsigned i = 0, e = V.size(); i != e; ++i)
+ OS << "diag::" << V[i]->getName() << ", ";
+ OS << "-1 };\n";
+ }
+
+ const std::vector<std::string> &SubGroups = I->second.SubGroups;
+ if (!SubGroups.empty()) {
+ OS << "static const short DiagSubGroup" << I->second.IDNo << "[] = { ";
+ for (unsigned i = 0, e = SubGroups.size(); i != e; ++i) {
+ std::map<std::string, GroupInfo>::iterator RI =
+ DiagsInGroup.find(SubGroups[i]);
+ assert(RI != DiagsInGroup.end() && "Referenced without existing?");
+ OS << RI->second.IDNo << ", ";
+ }
+ OS << "-1 };\n";
+ }
+ }
+ OS << "#endif // GET_DIAG_ARRAYS\n\n";
+
+ // Emit the table now.
+ OS << "\n#ifdef GET_DIAG_TABLE\n";
+ for (std::map<std::string, GroupInfo>::iterator
+ I = DiagsInGroup.begin(), E = DiagsInGroup.end(); I != E; ++I) {
+ // Group option string.
+ OS << " { ";
+ OS << I->first.size() << ", ";
+ OS << "\"";
+ if (I->first.find_first_not_of("abcdefghijklmnopqrstuvwxyz"
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
+ "0123456789!@#$%^*-+=:?")!=std::string::npos)
+ throw "Invalid character in diagnostic group '" + I->first + "'";
+ OS.write_escaped(I->first) << "\","
+ << std::string(MaxLen-I->first.size()+1, ' ');
+
+ // Diagnostics in the group.
+ if (I->second.DiagsInGroup.empty())
+ OS << "0, ";
+ else
+ OS << "DiagArray" << I->second.IDNo << ", ";
+
+ // Subgroups.
+ if (I->second.SubGroups.empty())
+ OS << 0;
+ else
+ OS << "DiagSubGroup" << I->second.IDNo;
+ OS << " },\n";
+ }
+ OS << "#endif // GET_DIAG_TABLE\n\n";
+
+ // Emit the category table next.
+ DiagCategoryIDMap CategoriesByID(Records);
+ OS << "\n#ifdef GET_CATEGORY_TABLE\n";
+ for (DiagCategoryIDMap::iterator I = CategoriesByID.begin(),
+ E = CategoriesByID.end(); I != E; ++I)
+ OS << "CATEGORY(\"" << *I << "\", " << getDiagCategoryEnum(*I) << ")\n";
+ OS << "#endif // GET_CATEGORY_TABLE\n\n";
+}
+
+//===----------------------------------------------------------------------===//
+// Diagnostic name index generation
+//===----------------------------------------------------------------------===//
+
+namespace {
+struct RecordIndexElement
+{
+ RecordIndexElement() {}
+ explicit RecordIndexElement(Record const &R):
+ Name(R.getName()) {}
+
+ std::string Name;
+};
+
+struct RecordIndexElementSorter :
+ public std::binary_function<RecordIndexElement, RecordIndexElement, bool> {
+
+ bool operator()(RecordIndexElement const &Lhs,
+ RecordIndexElement const &Rhs) const {
+ return Lhs.Name < Rhs.Name;
+ }
+
+};
+
+} // end anonymous namespace.
+
+void ClangDiagsIndexNameEmitter::run(raw_ostream &OS) {
+ const std::vector<Record*> &Diags =
+ Records.getAllDerivedDefinitions("Diagnostic");
+
+ std::vector<RecordIndexElement> Index;
+ Index.reserve(Diags.size());
+ for (unsigned i = 0, e = Diags.size(); i != e; ++i) {
+ const Record &R = *(Diags[i]);
+ Index.push_back(RecordIndexElement(R));
+ }
+
+ std::sort(Index.begin(), Index.end(), RecordIndexElementSorter());
+
+ for (unsigned i = 0, e = Index.size(); i != e; ++i) {
+ const RecordIndexElement &R = Index[i];
+
+ OS << "DIAG_NAME_INDEX(" << R.Name << ")\n";
+ }
+}
diff --git a/clang/utils/TableGen/ClangDiagnosticsEmitter.h b/clang/utils/TableGen/ClangDiagnosticsEmitter.h
new file mode 100644
index 0000000..73d3c4d
--- /dev/null
+++ b/clang/utils/TableGen/ClangDiagnosticsEmitter.h
@@ -0,0 +1,54 @@
+//===- ClangDiagnosticsEmitter.h - Generate Clang diagnostics tables -*- C++ -*-
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// These tablegen backends emit Clang diagnostics tables.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANGDIAGS_EMITTER_H
+#define CLANGDIAGS_EMITTER_H
+
+#include "llvm/TableGen/TableGenBackend.h"
+
+namespace llvm {
+
+/// ClangDiagsDefsEmitter - The top-level class emits .def files containing
+/// declarations of Clang diagnostics.
+///
+class ClangDiagsDefsEmitter : public TableGenBackend {
+ RecordKeeper &Records;
+ const std::string& Component;
+public:
+ explicit ClangDiagsDefsEmitter(RecordKeeper &R, const std::string& component)
+ : Records(R), Component(component) {}
+
+ // run - Output the .def file contents
+ void run(raw_ostream &OS);
+};
+
+class ClangDiagGroupsEmitter : public TableGenBackend {
+ RecordKeeper &Records;
+public:
+ explicit ClangDiagGroupsEmitter(RecordKeeper &R) : Records(R) {}
+
+ void run(raw_ostream &OS);
+};
+
+class ClangDiagsIndexNameEmitter : public TableGenBackend {
+ RecordKeeper &Records;
+public:
+ explicit ClangDiagsIndexNameEmitter(RecordKeeper &R) : Records(R) {}
+
+ void run(raw_ostream &OS);
+};
+
+
+} // End llvm namespace
+
+#endif
diff --git a/clang/utils/TableGen/ClangSACheckersEmitter.cpp b/clang/utils/TableGen/ClangSACheckersEmitter.cpp
new file mode 100644
index 0000000..423b68a
--- /dev/null
+++ b/clang/utils/TableGen/ClangSACheckersEmitter.cpp
@@ -0,0 +1,319 @@
+//=- ClangSACheckersEmitter.cpp - Generate Clang SA checkers tables -*- C++ -*-
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This tablegen backend emits Clang Static Analyzer checkers tables.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckersEmitter.h"
+#include "llvm/TableGen/Record.h"
+#include "llvm/ADT/DenseSet.h"
+#include <map>
+#include <string>
+using namespace llvm;
+
+//===----------------------------------------------------------------------===//
+// Static Analyzer Checkers Tables generation
+//===----------------------------------------------------------------------===//
+
+/// \brief True if it is specified hidden or a parent package is specified
+/// as hidden, otherwise false.
+static bool isHidden(const Record &R) {
+ if (R.getValueAsBit("Hidden"))
+ return true;
+ // Not declared as hidden, check the parent package if it is hidden.
+ if (DefInit *DI = dynamic_cast<DefInit*>(R.getValueInit("ParentPackage")))
+ return isHidden(*DI->getDef());
+
+ return false;
+}
+
+static bool isCheckerNamed(const Record *R) {
+ return !R->getValueAsString("CheckerName").empty();
+}
+
+static std::string getPackageFullName(const Record *R);
+
+static std::string getParentPackageFullName(const Record *R) {
+ std::string name;
+ if (DefInit *DI = dynamic_cast<DefInit*>(R->getValueInit("ParentPackage")))
+ name = getPackageFullName(DI->getDef());
+ return name;
+}
+
+static std::string getPackageFullName(const Record *R) {
+ std::string name = getParentPackageFullName(R);
+ if (!name.empty()) name += ".";
+ return name + R->getValueAsString("PackageName");
+}
+
+static std::string getCheckerFullName(const Record *R) {
+ std::string name = getParentPackageFullName(R);
+ if (isCheckerNamed(R)) {
+ if (!name.empty()) name += ".";
+ name += R->getValueAsString("CheckerName");
+ }
+ return name;
+}
+
+static std::string getStringValue(const Record &R, StringRef field) {
+ if (StringInit *
+ SI = dynamic_cast<StringInit*>(R.getValueInit(field)))
+ return SI->getValue();
+ return std::string();
+}
+
+namespace {
+struct GroupInfo {
+ llvm::DenseSet<const Record*> Checkers;
+ llvm::DenseSet<const Record *> SubGroups;
+ bool Hidden;
+ unsigned Index;
+
+ GroupInfo() : Hidden(false) { }
+};
+}
+
+static void addPackageToCheckerGroup(const Record *package, const Record *group,
+ llvm::DenseMap<const Record *, GroupInfo *> &recordGroupMap) {
+ llvm::DenseSet<const Record *> &checkers = recordGroupMap[package]->Checkers;
+ for (llvm::DenseSet<const Record *>::iterator
+ I = checkers.begin(), E = checkers.end(); I != E; ++I)
+ recordGroupMap[group]->Checkers.insert(*I);
+
+ llvm::DenseSet<const Record *> &subGroups = recordGroupMap[package]->SubGroups;
+ for (llvm::DenseSet<const Record *>::iterator
+ I = subGroups.begin(), E = subGroups.end(); I != E; ++I)
+ addPackageToCheckerGroup(*I, group, recordGroupMap);
+}
+
+void ClangSACheckersEmitter::run(raw_ostream &OS) {
+ std::vector<Record*> checkers = Records.getAllDerivedDefinitions("Checker");
+ llvm::DenseMap<const Record *, unsigned> checkerRecIndexMap;
+ for (unsigned i = 0, e = checkers.size(); i != e; ++i)
+ checkerRecIndexMap[checkers[i]] = i;
+
+ // Invert the mapping of checkers to package/group into a one to many
+ // mapping of packages/groups to checkers.
+ std::map<std::string, GroupInfo> groupInfoByName;
+ llvm::DenseMap<const Record *, GroupInfo *> recordGroupMap;
+
+ std::vector<Record*> packages = Records.getAllDerivedDefinitions("Package");
+ for (unsigned i = 0, e = packages.size(); i != e; ++i) {
+ Record *R = packages[i];
+ std::string fullName = getPackageFullName(R);
+ if (!fullName.empty()) {
+ GroupInfo &info = groupInfoByName[fullName];
+ info.Hidden = isHidden(*R);
+ recordGroupMap[R] = &info;
+ }
+ }
+
+ std::vector<Record*>
+ checkerGroups = Records.getAllDerivedDefinitions("CheckerGroup");
+ for (unsigned i = 0, e = checkerGroups.size(); i != e; ++i) {
+ Record *R = checkerGroups[i];
+ std::string name = R->getValueAsString("GroupName");
+ if (!name.empty()) {
+ GroupInfo &info = groupInfoByName[name];
+ recordGroupMap[R] = &info;
+ }
+ }
+
+ for (unsigned i = 0, e = checkers.size(); i != e; ++i) {
+ Record *R = checkers[i];
+ Record *package = 0;
+ if (DefInit *
+ DI = dynamic_cast<DefInit*>(R->getValueInit("ParentPackage")))
+ package = DI->getDef();
+ if (!isCheckerNamed(R) && !package)
+ throw "Checker '" + R->getName() + "' is neither named, nor in a package!";
+
+ if (isCheckerNamed(R)) {
+ // Create a pseudo-group to hold this checker.
+ std::string fullName = getCheckerFullName(R);
+ GroupInfo &info = groupInfoByName[fullName];
+ info.Hidden = R->getValueAsBit("Hidden");
+ recordGroupMap[R] = &info;
+ info.Checkers.insert(R);
+ } else {
+ recordGroupMap[package]->Checkers.insert(R);
+ }
+
+ Record *currR = isCheckerNamed(R) ? R : package;
+ // Insert the checker and its parent packages into the subgroups set of
+ // the corresponding parent package.
+ while (DefInit *DI
+ = dynamic_cast<DefInit*>(currR->getValueInit("ParentPackage"))) {
+ Record *parentPackage = DI->getDef();
+ recordGroupMap[parentPackage]->SubGroups.insert(currR);
+ currR = parentPackage;
+ }
+ // Insert the checker into the set of its group.
+ if (DefInit *DI = dynamic_cast<DefInit*>(R->getValueInit("Group")))
+ recordGroupMap[DI->getDef()]->Checkers.insert(R);
+ }
+
+ // If a package is in group, add all its checkers and its sub-packages
+ // checkers into the group.
+ for (unsigned i = 0, e = packages.size(); i != e; ++i)
+ if (DefInit *DI = dynamic_cast<DefInit*>(packages[i]->getValueInit("Group")))
+ addPackageToCheckerGroup(packages[i], DI->getDef(), recordGroupMap);
+
+ typedef std::map<std::string, const Record *> SortedRecords;
+ typedef llvm::DenseMap<const Record *, unsigned> RecToSortIndex;
+
+ SortedRecords sortedGroups;
+ RecToSortIndex groupToSortIndex;
+ OS << "\n#ifdef GET_GROUPS\n";
+ {
+ for (unsigned i = 0, e = checkerGroups.size(); i != e; ++i)
+ sortedGroups[checkerGroups[i]->getValueAsString("GroupName")]
+ = checkerGroups[i];
+
+ unsigned sortIndex = 0;
+ for (SortedRecords::iterator
+ I = sortedGroups.begin(), E = sortedGroups.end(); I != E; ++I) {
+ const Record *R = I->second;
+
+ OS << "GROUP(" << "\"";
+ OS.write_escaped(R->getValueAsString("GroupName")) << "\"";
+ OS << ")\n";
+
+ groupToSortIndex[R] = sortIndex++;
+ }
+ }
+ OS << "#endif // GET_GROUPS\n\n";
+
+ OS << "\n#ifdef GET_PACKAGES\n";
+ {
+ SortedRecords sortedPackages;
+ for (unsigned i = 0, e = packages.size(); i != e; ++i)
+ sortedPackages[getPackageFullName(packages[i])] = packages[i];
+
+ for (SortedRecords::iterator
+ I = sortedPackages.begin(), E = sortedPackages.end(); I != E; ++I) {
+ const Record &R = *I->second;
+
+ OS << "PACKAGE(" << "\"";
+ OS.write_escaped(getPackageFullName(&R)) << "\", ";
+ // Group index
+ if (DefInit *DI = dynamic_cast<DefInit*>(R.getValueInit("Group")))
+ OS << groupToSortIndex[DI->getDef()] << ", ";
+ else
+ OS << "-1, ";
+ // Hidden bit
+ if (isHidden(R))
+ OS << "true";
+ else
+ OS << "false";
+ OS << ")\n";
+ }
+ }
+ OS << "#endif // GET_PACKAGES\n\n";
+
+ OS << "\n#ifdef GET_CHECKERS\n";
+ for (unsigned i = 0, e = checkers.size(); i != e; ++i) {
+ const Record &R = *checkers[i];
+
+ OS << "CHECKER(" << "\"";
+ std::string name;
+ if (isCheckerNamed(&R))
+ name = getCheckerFullName(&R);
+ OS.write_escaped(name) << "\", ";
+ OS << R.getName() << ", ";
+ OS << getStringValue(R, "DescFile") << ", ";
+ OS << "\"";
+ OS.write_escaped(getStringValue(R, "HelpText")) << "\", ";
+ // Group index
+ if (DefInit *DI = dynamic_cast<DefInit*>(R.getValueInit("Group")))
+ OS << groupToSortIndex[DI->getDef()] << ", ";
+ else
+ OS << "-1, ";
+ // Hidden bit
+ if (isHidden(R))
+ OS << "true";
+ else
+ OS << "false";
+ OS << ")\n";
+ }
+ OS << "#endif // GET_CHECKERS\n\n";
+
+ unsigned index = 0;
+ for (std::map<std::string, GroupInfo>::iterator
+ I = groupInfoByName.begin(), E = groupInfoByName.end(); I != E; ++I)
+ I->second.Index = index++;
+
+ // Walk through the packages/groups/checkers emitting an array for each
+ // set of checkers and an array for each set of subpackages.
+
+ OS << "\n#ifdef GET_MEMBER_ARRAYS\n";
+ unsigned maxLen = 0;
+ for (std::map<std::string, GroupInfo>::iterator
+ I = groupInfoByName.begin(), E = groupInfoByName.end(); I != E; ++I) {
+ maxLen = std::max(maxLen, (unsigned)I->first.size());
+
+ llvm::DenseSet<const Record *> &checkers = I->second.Checkers;
+ if (!checkers.empty()) {
+ OS << "static const short CheckerArray" << I->second.Index << "[] = { ";
+ // Make the output order deterministic.
+ std::map<int, const Record *> sorted;
+ for (llvm::DenseSet<const Record *>::iterator
+ I = checkers.begin(), E = checkers.end(); I != E; ++I)
+ sorted[(*I)->getID()] = *I;
+
+ for (std::map<int, const Record *>::iterator
+ I = sorted.begin(), E = sorted.end(); I != E; ++I)
+ OS << checkerRecIndexMap[I->second] << ", ";
+ OS << "-1 };\n";
+ }
+
+ llvm::DenseSet<const Record *> &subGroups = I->second.SubGroups;
+ if (!subGroups.empty()) {
+ OS << "static const short SubPackageArray" << I->second.Index << "[] = { ";
+ // Make the output order deterministic.
+ std::map<int, const Record *> sorted;
+ for (llvm::DenseSet<const Record *>::iterator
+ I = subGroups.begin(), E = subGroups.end(); I != E; ++I)
+ sorted[(*I)->getID()] = *I;
+
+ for (std::map<int, const Record *>::iterator
+ I = sorted.begin(), E = sorted.end(); I != E; ++I) {
+ OS << recordGroupMap[I->second]->Index << ", ";
+ }
+ OS << "-1 };\n";
+ }
+ }
+ OS << "#endif // GET_MEMBER_ARRAYS\n\n";
+
+ OS << "\n#ifdef GET_CHECKNAME_TABLE\n";
+ for (std::map<std::string, GroupInfo>::iterator
+ I = groupInfoByName.begin(), E = groupInfoByName.end(); I != E; ++I) {
+ // Group option string.
+ OS << " { \"";
+ OS.write_escaped(I->first) << "\","
+ << std::string(maxLen-I->first.size()+1, ' ');
+
+ if (I->second.Checkers.empty())
+ OS << "0, ";
+ else
+ OS << "CheckerArray" << I->second.Index << ", ";
+
+ // Subgroups.
+ if (I->second.SubGroups.empty())
+ OS << "0, ";
+ else
+ OS << "SubPackageArray" << I->second.Index << ", ";
+
+ OS << (I->second.Hidden ? "true" : "false");
+
+ OS << " },\n";
+ }
+ OS << "#endif // GET_CHECKNAME_TABLE\n\n";
+}
diff --git a/clang/utils/TableGen/ClangSACheckersEmitter.h b/clang/utils/TableGen/ClangSACheckersEmitter.h
new file mode 100644
index 0000000..5a0e148
--- /dev/null
+++ b/clang/utils/TableGen/ClangSACheckersEmitter.h
@@ -0,0 +1,31 @@
+//===- ClangSACheckersEmitter.h - Generate Clang SA checkers tables -*- C++ -*-
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This tablegen backend emits Clang Static Analyzer checkers tables.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANGSACHECKERS_EMITTER_H
+#define CLANGSACHECKERS_EMITTER_H
+
+#include "llvm/TableGen/TableGenBackend.h"
+
+namespace llvm {
+
+class ClangSACheckersEmitter : public TableGenBackend {
+ RecordKeeper &Records;
+public:
+ explicit ClangSACheckersEmitter(RecordKeeper &R) : Records(R) {}
+
+ void run(raw_ostream &OS);
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/clang/utils/TableGen/Makefile b/clang/utils/TableGen/Makefile
new file mode 100644
index 0000000..9790efc
--- /dev/null
+++ b/clang/utils/TableGen/Makefile
@@ -0,0 +1,19 @@
+##===- utils/TableGen/Makefile -----------------------------*- Makefile -*-===##
+#
+# The LLVM Compiler Infrastructure
+#
+# This file is distributed under the University of Illinois Open Source
+# License. See LICENSE.TXT for details.
+#
+##===----------------------------------------------------------------------===##
+
+LEVEL = ../../../..
+TOOLNAME = clang-tblgen
+USEDLIBS = LLVMTableGen.a LLVMSupport.a
+REQUIRES_EH := 1
+REQUIRES_RTTI := 1
+
+# This tool has no plugins, optimize startup time.
+TOOL_NO_EXPORTS = 1
+
+include $(LEVEL)/Makefile.common
diff --git a/clang/utils/TableGen/NeonEmitter.cpp b/clang/utils/TableGen/NeonEmitter.cpp
new file mode 100644
index 0000000..e6f2e53
--- /dev/null
+++ b/clang/utils/TableGen/NeonEmitter.cpp
@@ -0,0 +1,1574 @@
+//===- NeonEmitter.cpp - Generate arm_neon.h for use with clang -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This tablegen backend is responsible for emitting arm_neon.h, which includes
+// a declaration and definition of each function specified by the ARM NEON
+// compiler interface. See ARM document DUI0348B.
+//
+// Each NEON instruction is implemented in terms of 1 or more functions which
+// are suffixed with the element type of the input vectors. Functions may be
+// implemented in terms of generic vector operations such as +, *, -, etc. or
+// by calling a __builtin_-prefixed function which will be handled by clang's
+// CodeGen library.
+//
+// Additional validation code can be generated by this file when runHeader() is
+// called, rather than the normal run() entry point. A complete set of tests
+// for Neon intrinsics can be generated by calling the runTests() entry point.
+//
+//===----------------------------------------------------------------------===//
+
+#include "NeonEmitter.h"
+#include "llvm/TableGen/Error.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/Support/ErrorHandling.h"
+#include <string>
+
+using namespace llvm;
+
+/// ParseTypes - break down a string such as "fQf" into a vector of StringRefs,
+/// which each StringRef representing a single type declared in the string.
+/// for "fQf" we would end up with 2 StringRefs, "f", and "Qf", representing
+/// 2xfloat and 4xfloat respectively.
+static void ParseTypes(Record *r, std::string &s,
+ SmallVectorImpl<StringRef> &TV) {
+ const char *data = s.data();
+ int len = 0;
+
+ for (unsigned i = 0, e = s.size(); i != e; ++i, ++len) {
+ if (data[len] == 'P' || data[len] == 'Q' || data[len] == 'U')
+ continue;
+
+ switch (data[len]) {
+ case 'c':
+ case 's':
+ case 'i':
+ case 'l':
+ case 'h':
+ case 'f':
+ break;
+ default:
+ throw TGError(r->getLoc(),
+ "Unexpected letter: " + std::string(data + len, 1));
+ }
+ TV.push_back(StringRef(data, len + 1));
+ data += len + 1;
+ len = -1;
+ }
+}
+
+/// Widen - Convert a type code into the next wider type. char -> short,
+/// short -> int, etc.
+static char Widen(const char t) {
+ switch (t) {
+ case 'c':
+ return 's';
+ case 's':
+ return 'i';
+ case 'i':
+ return 'l';
+ case 'h':
+ return 'f';
+ default: throw "unhandled type in widen!";
+ }
+}
+
+/// Narrow - Convert a type code into the next smaller type. short -> char,
+/// float -> half float, etc.
+static char Narrow(const char t) {
+ switch (t) {
+ case 's':
+ return 'c';
+ case 'i':
+ return 's';
+ case 'l':
+ return 'i';
+ case 'f':
+ return 'h';
+ default: throw "unhandled type in narrow!";
+ }
+}
+
+/// For a particular StringRef, return the base type code, and whether it has
+/// the quad-vector, polynomial, or unsigned modifiers set.
+static char ClassifyType(StringRef ty, bool &quad, bool &poly, bool &usgn) {
+ unsigned off = 0;
+
+ // remember quad.
+ if (ty[off] == 'Q') {
+ quad = true;
+ ++off;
+ }
+
+ // remember poly.
+ if (ty[off] == 'P') {
+ poly = true;
+ ++off;
+ }
+
+ // remember unsigned.
+ if (ty[off] == 'U') {
+ usgn = true;
+ ++off;
+ }
+
+ // base type to get the type string for.
+ return ty[off];
+}
+
+/// ModType - Transform a type code and its modifiers based on a mod code. The
+/// mod code definitions may be found at the top of arm_neon.td.
+static char ModType(const char mod, char type, bool &quad, bool &poly,
+ bool &usgn, bool &scal, bool &cnst, bool &pntr) {
+ switch (mod) {
+ case 't':
+ if (poly) {
+ poly = false;
+ usgn = true;
+ }
+ break;
+ case 'u':
+ usgn = true;
+ poly = false;
+ if (type == 'f')
+ type = 'i';
+ break;
+ case 'x':
+ usgn = false;
+ poly = false;
+ if (type == 'f')
+ type = 'i';
+ break;
+ case 'f':
+ if (type == 'h')
+ quad = true;
+ type = 'f';
+ usgn = false;
+ break;
+ case 'g':
+ quad = false;
+ break;
+ case 'w':
+ type = Widen(type);
+ quad = true;
+ break;
+ case 'n':
+ type = Widen(type);
+ break;
+ case 'i':
+ type = 'i';
+ scal = true;
+ break;
+ case 'l':
+ type = 'l';
+ scal = true;
+ usgn = true;
+ break;
+ case 's':
+ case 'a':
+ scal = true;
+ break;
+ case 'k':
+ quad = true;
+ break;
+ case 'c':
+ cnst = true;
+ case 'p':
+ pntr = true;
+ scal = true;
+ break;
+ case 'h':
+ type = Narrow(type);
+ if (type == 'h')
+ quad = false;
+ break;
+ case 'e':
+ type = Narrow(type);
+ usgn = true;
+ break;
+ default:
+ break;
+ }
+ return type;
+}
+
+/// TypeString - for a modifier and type, generate the name of the typedef for
+/// that type. QUc -> uint8x8_t.
+static std::string TypeString(const char mod, StringRef typestr) {
+ bool quad = false;
+ bool poly = false;
+ bool usgn = false;
+ bool scal = false;
+ bool cnst = false;
+ bool pntr = false;
+
+ if (mod == 'v')
+ return "void";
+ if (mod == 'i')
+ return "int";
+
+ // base type to get the type string for.
+ char type = ClassifyType(typestr, quad, poly, usgn);
+
+ // Based on the modifying character, change the type and width if necessary.
+ type = ModType(mod, type, quad, poly, usgn, scal, cnst, pntr);
+
+ SmallString<128> s;
+
+ if (usgn)
+ s.push_back('u');
+
+ switch (type) {
+ case 'c':
+ s += poly ? "poly8" : "int8";
+ if (scal)
+ break;
+ s += quad ? "x16" : "x8";
+ break;
+ case 's':
+ s += poly ? "poly16" : "int16";
+ if (scal)
+ break;
+ s += quad ? "x8" : "x4";
+ break;
+ case 'i':
+ s += "int32";
+ if (scal)
+ break;
+ s += quad ? "x4" : "x2";
+ break;
+ case 'l':
+ s += "int64";
+ if (scal)
+ break;
+ s += quad ? "x2" : "x1";
+ break;
+ case 'h':
+ s += "float16";
+ if (scal)
+ break;
+ s += quad ? "x8" : "x4";
+ break;
+ case 'f':
+ s += "float32";
+ if (scal)
+ break;
+ s += quad ? "x4" : "x2";
+ break;
+ default:
+ throw "unhandled type!";
+ }
+
+ if (mod == '2')
+ s += "x2";
+ if (mod == '3')
+ s += "x3";
+ if (mod == '4')
+ s += "x4";
+
+ // Append _t, finishing the type string typedef type.
+ s += "_t";
+
+ if (cnst)
+ s += " const";
+
+ if (pntr)
+ s += " *";
+
+ return s.str();
+}
+
+/// BuiltinTypeString - for a modifier and type, generate the clang
+/// BuiltinsARM.def prototype code for the function. See the top of clang's
+/// Builtins.def for a description of the type strings.
+static std::string BuiltinTypeString(const char mod, StringRef typestr,
+ ClassKind ck, bool ret) {
+ bool quad = false;
+ bool poly = false;
+ bool usgn = false;
+ bool scal = false;
+ bool cnst = false;
+ bool pntr = false;
+
+ if (mod == 'v')
+ return "v"; // void
+ if (mod == 'i')
+ return "i"; // int
+
+ // base type to get the type string for.
+ char type = ClassifyType(typestr, quad, poly, usgn);
+
+ // Based on the modifying character, change the type and width if necessary.
+ type = ModType(mod, type, quad, poly, usgn, scal, cnst, pntr);
+
+ // All pointers are void* pointers. Change type to 'v' now.
+ if (pntr) {
+ usgn = false;
+ poly = false;
+ type = 'v';
+ }
+ // Treat half-float ('h') types as unsigned short ('s') types.
+ if (type == 'h') {
+ type = 's';
+ usgn = true;
+ }
+ usgn = usgn | poly | ((ck == ClassI || ck == ClassW) && scal && type != 'f');
+
+ if (scal) {
+ SmallString<128> s;
+
+ if (usgn)
+ s.push_back('U');
+ else if (type == 'c')
+ s.push_back('S'); // make chars explicitly signed
+
+ if (type == 'l') // 64-bit long
+ s += "LLi";
+ else
+ s.push_back(type);
+
+ if (cnst)
+ s.push_back('C');
+ if (pntr)
+ s.push_back('*');
+ return s.str();
+ }
+
+ // Since the return value must be one type, return a vector type of the
+ // appropriate width which we will bitcast. An exception is made for
+ // returning structs of 2, 3, or 4 vectors which are returned in a sret-like
+ // fashion, storing them to a pointer arg.
+ if (ret) {
+ if (mod >= '2' && mod <= '4')
+ return "vv*"; // void result with void* first argument
+ if (mod == 'f' || (ck != ClassB && type == 'f'))
+ return quad ? "V4f" : "V2f";
+ if (ck != ClassB && type == 's')
+ return quad ? "V8s" : "V4s";
+ if (ck != ClassB && type == 'i')
+ return quad ? "V4i" : "V2i";
+ if (ck != ClassB && type == 'l')
+ return quad ? "V2LLi" : "V1LLi";
+
+ return quad ? "V16Sc" : "V8Sc";
+ }
+
+ // Non-return array types are passed as individual vectors.
+ if (mod == '2')
+ return quad ? "V16ScV16Sc" : "V8ScV8Sc";
+ if (mod == '3')
+ return quad ? "V16ScV16ScV16Sc" : "V8ScV8ScV8Sc";
+ if (mod == '4')
+ return quad ? "V16ScV16ScV16ScV16Sc" : "V8ScV8ScV8ScV8Sc";
+
+ if (mod == 'f' || (ck != ClassB && type == 'f'))
+ return quad ? "V4f" : "V2f";
+ if (ck != ClassB && type == 's')
+ return quad ? "V8s" : "V4s";
+ if (ck != ClassB && type == 'i')
+ return quad ? "V4i" : "V2i";
+ if (ck != ClassB && type == 'l')
+ return quad ? "V2LLi" : "V1LLi";
+
+ return quad ? "V16Sc" : "V8Sc";
+}
+
+/// MangleName - Append a type or width suffix to a base neon function name,
+/// and insert a 'q' in the appropriate location if the operation works on
+/// 128b rather than 64b. E.g. turn "vst2_lane" into "vst2q_lane_f32", etc.
+static std::string MangleName(const std::string &name, StringRef typestr,
+ ClassKind ck) {
+ if (name == "vcvt_f32_f16")
+ return name;
+
+ bool quad = false;
+ bool poly = false;
+ bool usgn = false;
+ char type = ClassifyType(typestr, quad, poly, usgn);
+
+ std::string s = name;
+
+ switch (type) {
+ case 'c':
+ switch (ck) {
+ case ClassS: s += poly ? "_p8" : usgn ? "_u8" : "_s8"; break;
+ case ClassI: s += "_i8"; break;
+ case ClassW: s += "_8"; break;
+ default: break;
+ }
+ break;
+ case 's':
+ switch (ck) {
+ case ClassS: s += poly ? "_p16" : usgn ? "_u16" : "_s16"; break;
+ case ClassI: s += "_i16"; break;
+ case ClassW: s += "_16"; break;
+ default: break;
+ }
+ break;
+ case 'i':
+ switch (ck) {
+ case ClassS: s += usgn ? "_u32" : "_s32"; break;
+ case ClassI: s += "_i32"; break;
+ case ClassW: s += "_32"; break;
+ default: break;
+ }
+ break;
+ case 'l':
+ switch (ck) {
+ case ClassS: s += usgn ? "_u64" : "_s64"; break;
+ case ClassI: s += "_i64"; break;
+ case ClassW: s += "_64"; break;
+ default: break;
+ }
+ break;
+ case 'h':
+ switch (ck) {
+ case ClassS:
+ case ClassI: s += "_f16"; break;
+ case ClassW: s += "_16"; break;
+ default: break;
+ }
+ break;
+ case 'f':
+ switch (ck) {
+ case ClassS:
+ case ClassI: s += "_f32"; break;
+ case ClassW: s += "_32"; break;
+ default: break;
+ }
+ break;
+ default:
+ throw "unhandled type!";
+ }
+ if (ck == ClassB)
+ s += "_v";
+
+ // Insert a 'q' before the first '_' character so that it ends up before
+ // _lane or _n on vector-scalar operations.
+ if (quad) {
+ size_t pos = s.find('_');
+ s = s.insert(pos, "q");
+ }
+ return s;
+}
+
+/// UseMacro - Examine the prototype string to determine if the intrinsic
+/// should be defined as a preprocessor macro instead of an inline function.
+static bool UseMacro(const std::string &proto) {
+ // If this builtin takes an immediate argument, we need to #define it rather
+ // than use a standard declaration, so that SemaChecking can range check
+ // the immediate passed by the user.
+ if (proto.find('i') != std::string::npos)
+ return true;
+
+ // Pointer arguments need to use macros to avoid hiding aligned attributes
+ // from the pointer type.
+ if (proto.find('p') != std::string::npos ||
+ proto.find('c') != std::string::npos)
+ return true;
+
+ return false;
+}
+
+/// MacroArgUsedDirectly - Return true if argument i for an intrinsic that is
+/// defined as a macro should be accessed directly instead of being first
+/// assigned to a local temporary.
+static bool MacroArgUsedDirectly(const std::string &proto, unsigned i) {
+ // True for constant ints (i), pointers (p) and const pointers (c).
+ return (proto[i] == 'i' || proto[i] == 'p' || proto[i] == 'c');
+}
+
+// Generate the string "(argtype a, argtype b, ...)"
+static std::string GenArgs(const std::string &proto, StringRef typestr) {
+ bool define = UseMacro(proto);
+ char arg = 'a';
+
+ std::string s;
+ s += "(";
+
+ for (unsigned i = 1, e = proto.size(); i != e; ++i, ++arg) {
+ if (define) {
+ // Some macro arguments are used directly instead of being assigned
+ // to local temporaries; prepend an underscore prefix to make their
+ // names consistent with the local temporaries.
+ if (MacroArgUsedDirectly(proto, i))
+ s += "__";
+ } else {
+ s += TypeString(proto[i], typestr) + " __";
+ }
+ s.push_back(arg);
+ if ((i + 1) < e)
+ s += ", ";
+ }
+
+ s += ")";
+ return s;
+}
+
+// Macro arguments are not type-checked like inline function arguments, so
+// assign them to local temporaries to get the right type checking.
+static std::string GenMacroLocals(const std::string &proto, StringRef typestr) {
+ char arg = 'a';
+ std::string s;
+ bool generatedLocal = false;
+
+ for (unsigned i = 1, e = proto.size(); i != e; ++i, ++arg) {
+ // Do not create a temporary for an immediate argument.
+ // That would defeat the whole point of using a macro!
+ if (MacroArgUsedDirectly(proto, i))
+ continue;
+ generatedLocal = true;
+
+ s += TypeString(proto[i], typestr) + " __";
+ s.push_back(arg);
+ s += " = (";
+ s.push_back(arg);
+ s += "); ";
+ }
+
+ if (generatedLocal)
+ s += "\\\n ";
+ return s;
+}
+
+// Use the vmovl builtin to sign-extend or zero-extend a vector.
+static std::string Extend(StringRef typestr, const std::string &a) {
+ std::string s;
+ s = MangleName("vmovl", typestr, ClassS);
+ s += "(" + a + ")";
+ return s;
+}
+
+static std::string Duplicate(unsigned nElts, StringRef typestr,
+ const std::string &a) {
+ std::string s;
+
+ s = "(" + TypeString('d', typestr) + "){ ";
+ for (unsigned i = 0; i != nElts; ++i) {
+ s += a;
+ if ((i + 1) < nElts)
+ s += ", ";
+ }
+ s += " }";
+
+ return s;
+}
+
+static std::string SplatLane(unsigned nElts, const std::string &vec,
+ const std::string &lane) {
+ std::string s = "__builtin_shufflevector(" + vec + ", " + vec;
+ for (unsigned i = 0; i < nElts; ++i)
+ s += ", " + lane;
+ s += ")";
+ return s;
+}
+
+static unsigned GetNumElements(StringRef typestr, bool &quad) {
+ quad = false;
+ bool dummy = false;
+ char type = ClassifyType(typestr, quad, dummy, dummy);
+ unsigned nElts = 0;
+ switch (type) {
+ case 'c': nElts = 8; break;
+ case 's': nElts = 4; break;
+ case 'i': nElts = 2; break;
+ case 'l': nElts = 1; break;
+ case 'h': nElts = 4; break;
+ case 'f': nElts = 2; break;
+ default:
+ throw "unhandled type!";
+ }
+ if (quad) nElts <<= 1;
+ return nElts;
+}
+
+// Generate the definition for this intrinsic, e.g. "a + b" for OpAdd.
+static std::string GenOpString(OpKind op, const std::string &proto,
+ StringRef typestr) {
+ bool quad;
+ unsigned nElts = GetNumElements(typestr, quad);
+ bool define = UseMacro(proto);
+
+ std::string ts = TypeString(proto[0], typestr);
+ std::string s;
+ if (!define) {
+ s = "return ";
+ }
+
+ switch(op) {
+ case OpAdd:
+ s += "__a + __b;";
+ break;
+ case OpAddl:
+ s += Extend(typestr, "__a") + " + " + Extend(typestr, "__b") + ";";
+ break;
+ case OpAddw:
+ s += "__a + " + Extend(typestr, "__b") + ";";
+ break;
+ case OpSub:
+ s += "__a - __b;";
+ break;
+ case OpSubl:
+ s += Extend(typestr, "__a") + " - " + Extend(typestr, "__b") + ";";
+ break;
+ case OpSubw:
+ s += "__a - " + Extend(typestr, "__b") + ";";
+ break;
+ case OpMulN:
+ s += "__a * " + Duplicate(nElts, typestr, "__b") + ";";
+ break;
+ case OpMulLane:
+ s += "__a * " + SplatLane(nElts, "__b", "__c") + ";";
+ break;
+ case OpMul:
+ s += "__a * __b;";
+ break;
+ case OpMullLane:
+ s += MangleName("vmull", typestr, ClassS) + "(__a, " +
+ SplatLane(nElts, "__b", "__c") + ");";
+ break;
+ case OpMlaN:
+ s += "__a + (__b * " + Duplicate(nElts, typestr, "__c") + ");";
+ break;
+ case OpMlaLane:
+ s += "__a + (__b * " + SplatLane(nElts, "__c", "__d") + ");";
+ break;
+ case OpMla:
+ s += "__a + (__b * __c);";
+ break;
+ case OpMlalN:
+ s += "__a + " + MangleName("vmull", typestr, ClassS) + "(__b, " +
+ Duplicate(nElts, typestr, "__c") + ");";
+ break;
+ case OpMlalLane:
+ s += "__a + " + MangleName("vmull", typestr, ClassS) + "(__b, " +
+ SplatLane(nElts, "__c", "__d") + ");";
+ break;
+ case OpMlal:
+ s += "__a + " + MangleName("vmull", typestr, ClassS) + "(__b, __c);";
+ break;
+ case OpMlsN:
+ s += "__a - (__b * " + Duplicate(nElts, typestr, "__c") + ");";
+ break;
+ case OpMlsLane:
+ s += "__a - (__b * " + SplatLane(nElts, "__c", "__d") + ");";
+ break;
+ case OpMls:
+ s += "__a - (__b * __c);";
+ break;
+ case OpMlslN:
+ s += "__a - " + MangleName("vmull", typestr, ClassS) + "(__b, " +
+ Duplicate(nElts, typestr, "__c") + ");";
+ break;
+ case OpMlslLane:
+ s += "__a - " + MangleName("vmull", typestr, ClassS) + "(__b, " +
+ SplatLane(nElts, "__c", "__d") + ");";
+ break;
+ case OpMlsl:
+ s += "__a - " + MangleName("vmull", typestr, ClassS) + "(__b, __c);";
+ break;
+ case OpQDMullLane:
+ s += MangleName("vqdmull", typestr, ClassS) + "(__a, " +
+ SplatLane(nElts, "__b", "__c") + ");";
+ break;
+ case OpQDMlalLane:
+ s += MangleName("vqdmlal", typestr, ClassS) + "(__a, __b, " +
+ SplatLane(nElts, "__c", "__d") + ");";
+ break;
+ case OpQDMlslLane:
+ s += MangleName("vqdmlsl", typestr, ClassS) + "(__a, __b, " +
+ SplatLane(nElts, "__c", "__d") + ");";
+ break;
+ case OpQDMulhLane:
+ s += MangleName("vqdmulh", typestr, ClassS) + "(__a, " +
+ SplatLane(nElts, "__b", "__c") + ");";
+ break;
+ case OpQRDMulhLane:
+ s += MangleName("vqrdmulh", typestr, ClassS) + "(__a, " +
+ SplatLane(nElts, "__b", "__c") + ");";
+ break;
+ case OpEq:
+ s += "(" + ts + ")(__a == __b);";
+ break;
+ case OpGe:
+ s += "(" + ts + ")(__a >= __b);";
+ break;
+ case OpLe:
+ s += "(" + ts + ")(__a <= __b);";
+ break;
+ case OpGt:
+ s += "(" + ts + ")(__a > __b);";
+ break;
+ case OpLt:
+ s += "(" + ts + ")(__a < __b);";
+ break;
+ case OpNeg:
+ s += " -__a;";
+ break;
+ case OpNot:
+ s += " ~__a;";
+ break;
+ case OpAnd:
+ s += "__a & __b;";
+ break;
+ case OpOr:
+ s += "__a | __b;";
+ break;
+ case OpXor:
+ s += "__a ^ __b;";
+ break;
+ case OpAndNot:
+ s += "__a & ~__b;";
+ break;
+ case OpOrNot:
+ s += "__a | ~__b;";
+ break;
+ case OpCast:
+ s += "(" + ts + ")__a;";
+ break;
+ case OpConcat:
+ s += "(" + ts + ")__builtin_shufflevector((int64x1_t)__a";
+ s += ", (int64x1_t)__b, 0, 1);";
+ break;
+ case OpHi:
+ s += "(" + ts +
+ ")__builtin_shufflevector((int64x2_t)__a, (int64x2_t)__a, 1);";
+ break;
+ case OpLo:
+ s += "(" + ts +
+ ")__builtin_shufflevector((int64x2_t)__a, (int64x2_t)__a, 0);";
+ break;
+ case OpDup:
+ s += Duplicate(nElts, typestr, "__a") + ";";
+ break;
+ case OpDupLane:
+ s += SplatLane(nElts, "__a", "__b") + ";";
+ break;
+ case OpSelect:
+ // ((0 & 1) | (~0 & 2))
+ s += "(" + ts + ")";
+ ts = TypeString(proto[1], typestr);
+ s += "((__a & (" + ts + ")__b) | ";
+ s += "(~__a & (" + ts + ")__c));";
+ break;
+ case OpRev16:
+ s += "__builtin_shufflevector(__a, __a";
+ for (unsigned i = 2; i <= nElts; i += 2)
+ for (unsigned j = 0; j != 2; ++j)
+ s += ", " + utostr(i - j - 1);
+ s += ");";
+ break;
+ case OpRev32: {
+ unsigned WordElts = nElts >> (1 + (int)quad);
+ s += "__builtin_shufflevector(__a, __a";
+ for (unsigned i = WordElts; i <= nElts; i += WordElts)
+ for (unsigned j = 0; j != WordElts; ++j)
+ s += ", " + utostr(i - j - 1);
+ s += ");";
+ break;
+ }
+ case OpRev64: {
+ unsigned DblWordElts = nElts >> (int)quad;
+ s += "__builtin_shufflevector(__a, __a";
+ for (unsigned i = DblWordElts; i <= nElts; i += DblWordElts)
+ for (unsigned j = 0; j != DblWordElts; ++j)
+ s += ", " + utostr(i - j - 1);
+ s += ");";
+ break;
+ }
+ case OpAbdl: {
+ std::string abd = MangleName("vabd", typestr, ClassS) + "(__a, __b)";
+ if (typestr[0] != 'U') {
+ // vabd results are always unsigned and must be zero-extended.
+ std::string utype = "U" + typestr.str();
+ s += "(" + TypeString(proto[0], typestr) + ")";
+ abd = "(" + TypeString('d', utype) + ")" + abd;
+ s += Extend(utype, abd) + ";";
+ } else {
+ s += Extend(typestr, abd) + ";";
+ }
+ break;
+ }
+ case OpAba:
+ s += "__a + " + MangleName("vabd", typestr, ClassS) + "(__b, __c);";
+ break;
+ case OpAbal: {
+ s += "__a + ";
+ std::string abd = MangleName("vabd", typestr, ClassS) + "(__b, __c)";
+ if (typestr[0] != 'U') {
+ // vabd results are always unsigned and must be zero-extended.
+ std::string utype = "U" + typestr.str();
+ s += "(" + TypeString(proto[0], typestr) + ")";
+ abd = "(" + TypeString('d', utype) + ")" + abd;
+ s += Extend(utype, abd) + ";";
+ } else {
+ s += Extend(typestr, abd) + ";";
+ }
+ break;
+ }
+ default:
+ throw "unknown OpKind!";
+ }
+ return s;
+}
+
+static unsigned GetNeonEnum(const std::string &proto, StringRef typestr) {
+ unsigned mod = proto[0];
+
+ if (mod == 'v' || mod == 'f')
+ mod = proto[1];
+
+ bool quad = false;
+ bool poly = false;
+ bool usgn = false;
+ bool scal = false;
+ bool cnst = false;
+ bool pntr = false;
+
+ // Base type to get the type string for.
+ char type = ClassifyType(typestr, quad, poly, usgn);
+
+ // Based on the modifying character, change the type and width if necessary.
+ type = ModType(mod, type, quad, poly, usgn, scal, cnst, pntr);
+
+ NeonTypeFlags::EltType ET;
+ switch (type) {
+ case 'c':
+ ET = poly ? NeonTypeFlags::Poly8 : NeonTypeFlags::Int8;
+ break;
+ case 's':
+ ET = poly ? NeonTypeFlags::Poly16 : NeonTypeFlags::Int16;
+ break;
+ case 'i':
+ ET = NeonTypeFlags::Int32;
+ break;
+ case 'l':
+ ET = NeonTypeFlags::Int64;
+ break;
+ case 'h':
+ ET = NeonTypeFlags::Float16;
+ break;
+ case 'f':
+ ET = NeonTypeFlags::Float32;
+ break;
+ default:
+ throw "unhandled type!";
+ }
+ NeonTypeFlags Flags(ET, usgn, quad && proto[1] != 'g');
+ return Flags.getFlags();
+}
+
+// Generate the definition for this intrinsic, e.g. __builtin_neon_cls(a)
+static std::string GenBuiltin(const std::string &name, const std::string &proto,
+ StringRef typestr, ClassKind ck) {
+ std::string s;
+
+ // If this builtin returns a struct 2, 3, or 4 vectors, pass it as an implicit
+ // sret-like argument.
+ bool sret = (proto[0] >= '2' && proto[0] <= '4');
+
+ bool define = UseMacro(proto);
+
+ // Check if the prototype has a scalar operand with the type of the vector
+ // elements. If not, bitcasting the args will take care of arg checking.
+ // The actual signedness etc. will be taken care of with special enums.
+ if (proto.find('s') == std::string::npos)
+ ck = ClassB;
+
+ if (proto[0] != 'v') {
+ std::string ts = TypeString(proto[0], typestr);
+
+ if (define) {
+ if (sret)
+ s += ts + " r; ";
+ else
+ s += "(" + ts + ")";
+ } else if (sret) {
+ s += ts + " r; ";
+ } else {
+ s += "return (" + ts + ")";
+ }
+ }
+
+ bool splat = proto.find('a') != std::string::npos;
+
+ s += "__builtin_neon_";
+ if (splat) {
+ // Call the non-splat builtin: chop off the "_n" suffix from the name.
+ std::string vname(name, 0, name.size()-2);
+ s += MangleName(vname, typestr, ck);
+ } else {
+ s += MangleName(name, typestr, ck);
+ }
+ s += "(";
+
+ // Pass the address of the return variable as the first argument to sret-like
+ // builtins.
+ if (sret)
+ s += "&r, ";
+
+ char arg = 'a';
+ for (unsigned i = 1, e = proto.size(); i != e; ++i, ++arg) {
+ std::string args = std::string(&arg, 1);
+
+ // Use the local temporaries instead of the macro arguments.
+ args = "__" + args;
+
+ bool argQuad = false;
+ bool argPoly = false;
+ bool argUsgn = false;
+ bool argScalar = false;
+ bool dummy = false;
+ char argType = ClassifyType(typestr, argQuad, argPoly, argUsgn);
+ argType = ModType(proto[i], argType, argQuad, argPoly, argUsgn, argScalar,
+ dummy, dummy);
+
+ // Handle multiple-vector values specially, emitting each subvector as an
+ // argument to the __builtin.
+ if (proto[i] >= '2' && proto[i] <= '4') {
+ // Check if an explicit cast is needed.
+ if (argType != 'c' || argPoly || argUsgn)
+ args = (argQuad ? "(int8x16_t)" : "(int8x8_t)") + args;
+
+ for (unsigned vi = 0, ve = proto[i] - '0'; vi != ve; ++vi) {
+ s += args + ".val[" + utostr(vi) + "]";
+ if ((vi + 1) < ve)
+ s += ", ";
+ }
+ if ((i + 1) < e)
+ s += ", ";
+
+ continue;
+ }
+
+ if (splat && (i + 1) == e)
+ args = Duplicate(GetNumElements(typestr, argQuad), typestr, args);
+
+ // Check if an explicit cast is needed.
+ if ((splat || !argScalar) &&
+ ((ck == ClassB && argType != 'c') || argPoly || argUsgn)) {
+ std::string argTypeStr = "c";
+ if (ck != ClassB)
+ argTypeStr = argType;
+ if (argQuad)
+ argTypeStr = "Q" + argTypeStr;
+ args = "(" + TypeString('d', argTypeStr) + ")" + args;
+ }
+
+ s += args;
+ if ((i + 1) < e)
+ s += ", ";
+ }
+
+ // Extra constant integer to hold type class enum for this function, e.g. s8
+ if (ck == ClassB)
+ s += ", " + utostr(GetNeonEnum(proto, typestr));
+
+ s += ");";
+
+ if (proto[0] != 'v' && sret) {
+ if (define)
+ s += " r;";
+ else
+ s += " return r;";
+ }
+ return s;
+}
+
+static std::string GenBuiltinDef(const std::string &name,
+ const std::string &proto,
+ StringRef typestr, ClassKind ck) {
+ std::string s("BUILTIN(__builtin_neon_");
+
+ // If all types are the same size, bitcasting the args will take care
+ // of arg checking. The actual signedness etc. will be taken care of with
+ // special enums.
+ if (proto.find('s') == std::string::npos)
+ ck = ClassB;
+
+ s += MangleName(name, typestr, ck);
+ s += ", \"";
+
+ for (unsigned i = 0, e = proto.size(); i != e; ++i)
+ s += BuiltinTypeString(proto[i], typestr, ck, i == 0);
+
+ // Extra constant integer to hold type class enum for this function, e.g. s8
+ if (ck == ClassB)
+ s += "i";
+
+ s += "\", \"n\")";
+ return s;
+}
+
+static std::string GenIntrinsic(const std::string &name,
+ const std::string &proto,
+ StringRef outTypeStr, StringRef inTypeStr,
+ OpKind kind, ClassKind classKind) {
+ assert(!proto.empty() && "");
+ bool define = UseMacro(proto);
+ std::string s;
+
+ // static always inline + return type
+ if (define)
+ s += "#define ";
+ else
+ s += "__ai " + TypeString(proto[0], outTypeStr) + " ";
+
+ // Function name with type suffix
+ std::string mangledName = MangleName(name, outTypeStr, ClassS);
+ if (outTypeStr != inTypeStr) {
+ // If the input type is different (e.g., for vreinterpret), append a suffix
+ // for the input type. String off a "Q" (quad) prefix so that MangleName
+ // does not insert another "q" in the name.
+ unsigned typeStrOff = (inTypeStr[0] == 'Q' ? 1 : 0);
+ StringRef inTypeNoQuad = inTypeStr.substr(typeStrOff);
+ mangledName = MangleName(mangledName, inTypeNoQuad, ClassS);
+ }
+ s += mangledName;
+
+ // Function arguments
+ s += GenArgs(proto, inTypeStr);
+
+ // Definition.
+ if (define) {
+ s += " __extension__ ({ \\\n ";
+ s += GenMacroLocals(proto, inTypeStr);
+ } else {
+ s += " { \\\n ";
+ }
+
+ if (kind != OpNone)
+ s += GenOpString(kind, proto, outTypeStr);
+ else
+ s += GenBuiltin(name, proto, outTypeStr, classKind);
+ if (define)
+ s += " })";
+ else
+ s += " }";
+ s += "\n";
+ return s;
+}
+
+/// run - Read the records in arm_neon.td and output arm_neon.h. arm_neon.h
+/// is comprised of type definitions and function declarations.
+void NeonEmitter::run(raw_ostream &OS) {
+ OS <<
+ "/*===---- arm_neon.h - ARM Neon intrinsics ------------------------------"
+ "---===\n"
+ " *\n"
+ " * Permission is hereby granted, free of charge, to any person obtaining "
+ "a copy\n"
+ " * of this software and associated documentation files (the \"Software\"),"
+ " to deal\n"
+ " * in the Software without restriction, including without limitation the "
+ "rights\n"
+ " * to use, copy, modify, merge, publish, distribute, sublicense, "
+ "and/or sell\n"
+ " * copies of the Software, and to permit persons to whom the Software is\n"
+ " * furnished to do so, subject to the following conditions:\n"
+ " *\n"
+ " * The above copyright notice and this permission notice shall be "
+ "included in\n"
+ " * all copies or substantial portions of the Software.\n"
+ " *\n"
+ " * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, "
+ "EXPRESS OR\n"
+ " * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF "
+ "MERCHANTABILITY,\n"
+ " * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT "
+ "SHALL THE\n"
+ " * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR "
+ "OTHER\n"
+ " * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, "
+ "ARISING FROM,\n"
+ " * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER "
+ "DEALINGS IN\n"
+ " * THE SOFTWARE.\n"
+ " *\n"
+ " *===--------------------------------------------------------------------"
+ "---===\n"
+ " */\n\n";
+
+ OS << "#ifndef __ARM_NEON_H\n";
+ OS << "#define __ARM_NEON_H\n\n";
+
+ OS << "#ifndef __ARM_NEON__\n";
+ OS << "#error \"NEON support not enabled\"\n";
+ OS << "#endif\n\n";
+
+ OS << "#include <stdint.h>\n\n";
+
+ // Emit NEON-specific scalar typedefs.
+ OS << "typedef float float32_t;\n";
+ OS << "typedef int8_t poly8_t;\n";
+ OS << "typedef int16_t poly16_t;\n";
+ OS << "typedef uint16_t float16_t;\n";
+
+ // Emit Neon vector typedefs.
+ std::string TypedefTypes("cQcsQsiQilQlUcQUcUsQUsUiQUiUlQUlhQhfQfPcQPcPsQPs");
+ SmallVector<StringRef, 24> TDTypeVec;
+ ParseTypes(0, TypedefTypes, TDTypeVec);
+
+ // Emit vector typedefs.
+ for (unsigned i = 0, e = TDTypeVec.size(); i != e; ++i) {
+ bool dummy, quad = false, poly = false;
+ (void) ClassifyType(TDTypeVec[i], quad, poly, dummy);
+ if (poly)
+ OS << "typedef __attribute__((neon_polyvector_type(";
+ else
+ OS << "typedef __attribute__((neon_vector_type(";
+
+ unsigned nElts = GetNumElements(TDTypeVec[i], quad);
+ OS << utostr(nElts) << "))) ";
+ if (nElts < 10)
+ OS << " ";
+
+ OS << TypeString('s', TDTypeVec[i]);
+ OS << " " << TypeString('d', TDTypeVec[i]) << ";\n";
+ }
+ OS << "\n";
+
+ // Emit struct typedefs.
+ for (unsigned vi = 2; vi != 5; ++vi) {
+ for (unsigned i = 0, e = TDTypeVec.size(); i != e; ++i) {
+ std::string ts = TypeString('d', TDTypeVec[i]);
+ std::string vs = TypeString('0' + vi, TDTypeVec[i]);
+ OS << "typedef struct " << vs << " {\n";
+ OS << " " << ts << " val";
+ OS << "[" << utostr(vi) << "]";
+ OS << ";\n} ";
+ OS << vs << ";\n\n";
+ }
+ }
+
+ OS<<"#define __ai static __attribute__((__always_inline__, __nodebug__))\n\n";
+
+ std::vector<Record*> RV = Records.getAllDerivedDefinitions("Inst");
+
+ // Emit vmovl, vmull and vabd intrinsics first so they can be used by other
+ // intrinsics. (Some of the saturating multiply instructions are also
+ // used to implement the corresponding "_lane" variants, but tablegen
+ // sorts the records into alphabetical order so that the "_lane" variants
+ // come after the intrinsics they use.)
+ emitIntrinsic(OS, Records.getDef("VMOVL"));
+ emitIntrinsic(OS, Records.getDef("VMULL"));
+ emitIntrinsic(OS, Records.getDef("VABD"));
+
+ for (unsigned i = 0, e = RV.size(); i != e; ++i) {
+ Record *R = RV[i];
+ if (R->getName() != "VMOVL" &&
+ R->getName() != "VMULL" &&
+ R->getName() != "VABD")
+ emitIntrinsic(OS, R);
+ }
+
+ OS << "#undef __ai\n\n";
+ OS << "#endif /* __ARM_NEON_H */\n";
+}
+
+/// emitIntrinsic - Write out the arm_neon.h header file definitions for the
+/// intrinsics specified by record R.
+void NeonEmitter::emitIntrinsic(raw_ostream &OS, Record *R) {
+ std::string name = R->getValueAsString("Name");
+ std::string Proto = R->getValueAsString("Prototype");
+ std::string Types = R->getValueAsString("Types");
+
+ SmallVector<StringRef, 16> TypeVec;
+ ParseTypes(R, Types, TypeVec);
+
+ OpKind kind = OpMap[R->getValueAsDef("Operand")->getName()];
+
+ ClassKind classKind = ClassNone;
+ if (R->getSuperClasses().size() >= 2)
+ classKind = ClassMap[R->getSuperClasses()[1]];
+ if (classKind == ClassNone && kind == OpNone)
+ throw TGError(R->getLoc(), "Builtin has no class kind");
+
+ for (unsigned ti = 0, te = TypeVec.size(); ti != te; ++ti) {
+ if (kind == OpReinterpret) {
+ bool outQuad = false;
+ bool dummy = false;
+ (void)ClassifyType(TypeVec[ti], outQuad, dummy, dummy);
+ for (unsigned srcti = 0, srcte = TypeVec.size();
+ srcti != srcte; ++srcti) {
+ bool inQuad = false;
+ (void)ClassifyType(TypeVec[srcti], inQuad, dummy, dummy);
+ if (srcti == ti || inQuad != outQuad)
+ continue;
+ OS << GenIntrinsic(name, Proto, TypeVec[ti], TypeVec[srcti],
+ OpCast, ClassS);
+ }
+ } else {
+ OS << GenIntrinsic(name, Proto, TypeVec[ti], TypeVec[ti],
+ kind, classKind);
+ }
+ }
+ OS << "\n";
+}
+
+static unsigned RangeFromType(const char mod, StringRef typestr) {
+ // base type to get the type string for.
+ bool quad = false, dummy = false;
+ char type = ClassifyType(typestr, quad, dummy, dummy);
+ type = ModType(mod, type, quad, dummy, dummy, dummy, dummy, dummy);
+
+ switch (type) {
+ case 'c':
+ return (8 << (int)quad) - 1;
+ case 'h':
+ case 's':
+ return (4 << (int)quad) - 1;
+ case 'f':
+ case 'i':
+ return (2 << (int)quad) - 1;
+ case 'l':
+ return (1 << (int)quad) - 1;
+ default:
+ throw "unhandled type!";
+ }
+}
+
+/// runHeader - Emit a file with sections defining:
+/// 1. the NEON section of BuiltinsARM.def.
+/// 2. the SemaChecking code for the type overload checking.
+/// 3. the SemaChecking code for validation of intrinsic immedate arguments.
+void NeonEmitter::runHeader(raw_ostream &OS) {
+ std::vector<Record*> RV = Records.getAllDerivedDefinitions("Inst");
+
+ StringMap<OpKind> EmittedMap;
+
+ // Generate BuiltinsARM.def for NEON
+ OS << "#ifdef GET_NEON_BUILTINS\n";
+ for (unsigned i = 0, e = RV.size(); i != e; ++i) {
+ Record *R = RV[i];
+ OpKind k = OpMap[R->getValueAsDef("Operand")->getName()];
+ if (k != OpNone)
+ continue;
+
+ std::string Proto = R->getValueAsString("Prototype");
+
+ // Functions with 'a' (the splat code) in the type prototype should not get
+ // their own builtin as they use the non-splat variant.
+ if (Proto.find('a') != std::string::npos)
+ continue;
+
+ std::string Types = R->getValueAsString("Types");
+ SmallVector<StringRef, 16> TypeVec;
+ ParseTypes(R, Types, TypeVec);
+
+ if (R->getSuperClasses().size() < 2)
+ throw TGError(R->getLoc(), "Builtin has no class kind");
+
+ std::string name = R->getValueAsString("Name");
+ ClassKind ck = ClassMap[R->getSuperClasses()[1]];
+
+ for (unsigned ti = 0, te = TypeVec.size(); ti != te; ++ti) {
+ // Generate the BuiltinsARM.def declaration for this builtin, ensuring
+ // that each unique BUILTIN() macro appears only once in the output
+ // stream.
+ std::string bd = GenBuiltinDef(name, Proto, TypeVec[ti], ck);
+ if (EmittedMap.count(bd))
+ continue;
+
+ EmittedMap[bd] = OpNone;
+ OS << bd << "\n";
+ }
+ }
+ OS << "#endif\n\n";
+
+ // Generate the overloaded type checking code for SemaChecking.cpp
+ OS << "#ifdef GET_NEON_OVERLOAD_CHECK\n";
+ for (unsigned i = 0, e = RV.size(); i != e; ++i) {
+ Record *R = RV[i];
+ OpKind k = OpMap[R->getValueAsDef("Operand")->getName()];
+ if (k != OpNone)
+ continue;
+
+ std::string Proto = R->getValueAsString("Prototype");
+ std::string Types = R->getValueAsString("Types");
+ std::string name = R->getValueAsString("Name");
+
+ // Functions with 'a' (the splat code) in the type prototype should not get
+ // their own builtin as they use the non-splat variant.
+ if (Proto.find('a') != std::string::npos)
+ continue;
+
+ // Functions which have a scalar argument cannot be overloaded, no need to
+ // check them if we are emitting the type checking code.
+ if (Proto.find('s') != std::string::npos)
+ continue;
+
+ SmallVector<StringRef, 16> TypeVec;
+ ParseTypes(R, Types, TypeVec);
+
+ if (R->getSuperClasses().size() < 2)
+ throw TGError(R->getLoc(), "Builtin has no class kind");
+
+ int si = -1, qi = -1;
+ unsigned mask = 0, qmask = 0;
+ for (unsigned ti = 0, te = TypeVec.size(); ti != te; ++ti) {
+ // Generate the switch case(s) for this builtin for the type validation.
+ bool quad = false, poly = false, usgn = false;
+ (void) ClassifyType(TypeVec[ti], quad, poly, usgn);
+
+ if (quad) {
+ qi = ti;
+ qmask |= 1 << GetNeonEnum(Proto, TypeVec[ti]);
+ } else {
+ si = ti;
+ mask |= 1 << GetNeonEnum(Proto, TypeVec[ti]);
+ }
+ }
+
+ // Check if the builtin function has a pointer or const pointer argument.
+ int PtrArgNum = -1;
+ bool HasConstPtr = false;
+ for (unsigned arg = 1, arge = Proto.size(); arg != arge; ++arg) {
+ char ArgType = Proto[arg];
+ if (ArgType == 'c') {
+ HasConstPtr = true;
+ PtrArgNum = arg - 1;
+ break;
+ }
+ if (ArgType == 'p') {
+ PtrArgNum = arg - 1;
+ break;
+ }
+ }
+ // For sret builtins, adjust the pointer argument index.
+ if (PtrArgNum >= 0 && (Proto[0] >= '2' && Proto[0] <= '4'))
+ PtrArgNum += 1;
+
+ // Omit type checking for the pointer arguments of vld1_lane, vld1_dup,
+ // and vst1_lane intrinsics. Using a pointer to the vector element
+ // type with one of those operations causes codegen to select an aligned
+ // load/store instruction. If you want an unaligned operation,
+ // the pointer argument needs to have less alignment than element type,
+ // so just accept any pointer type.
+ if (name == "vld1_lane" || name == "vld1_dup" || name == "vst1_lane") {
+ PtrArgNum = -1;
+ HasConstPtr = false;
+ }
+
+ if (mask) {
+ OS << "case ARM::BI__builtin_neon_"
+ << MangleName(name, TypeVec[si], ClassB)
+ << ": mask = " << "0x" << utohexstr(mask);
+ if (PtrArgNum >= 0)
+ OS << "; PtrArgNum = " << PtrArgNum;
+ if (HasConstPtr)
+ OS << "; HasConstPtr = true";
+ OS << "; break;\n";
+ }
+ if (qmask) {
+ OS << "case ARM::BI__builtin_neon_"
+ << MangleName(name, TypeVec[qi], ClassB)
+ << ": mask = " << "0x" << utohexstr(qmask);
+ if (PtrArgNum >= 0)
+ OS << "; PtrArgNum = " << PtrArgNum;
+ if (HasConstPtr)
+ OS << "; HasConstPtr = true";
+ OS << "; break;\n";
+ }
+ }
+ OS << "#endif\n\n";
+
+ // Generate the intrinsic range checking code for shift/lane immediates.
+ OS << "#ifdef GET_NEON_IMMEDIATE_CHECK\n";
+ for (unsigned i = 0, e = RV.size(); i != e; ++i) {
+ Record *R = RV[i];
+
+ OpKind k = OpMap[R->getValueAsDef("Operand")->getName()];
+ if (k != OpNone)
+ continue;
+
+ std::string name = R->getValueAsString("Name");
+ std::string Proto = R->getValueAsString("Prototype");
+ std::string Types = R->getValueAsString("Types");
+
+ // Functions with 'a' (the splat code) in the type prototype should not get
+ // their own builtin as they use the non-splat variant.
+ if (Proto.find('a') != std::string::npos)
+ continue;
+
+ // Functions which do not have an immediate do not need to have range
+ // checking code emitted.
+ size_t immPos = Proto.find('i');
+ if (immPos == std::string::npos)
+ continue;
+
+ SmallVector<StringRef, 16> TypeVec;
+ ParseTypes(R, Types, TypeVec);
+
+ if (R->getSuperClasses().size() < 2)
+ throw TGError(R->getLoc(), "Builtin has no class kind");
+
+ ClassKind ck = ClassMap[R->getSuperClasses()[1]];
+
+ for (unsigned ti = 0, te = TypeVec.size(); ti != te; ++ti) {
+ std::string namestr, shiftstr, rangestr;
+
+ if (R->getValueAsBit("isVCVT_N")) {
+ // VCVT between floating- and fixed-point values takes an immediate
+ // in the range 1 to 32.
+ ck = ClassB;
+ rangestr = "l = 1; u = 31"; // upper bound = l + u
+ } else if (Proto.find('s') == std::string::npos) {
+ // Builtins which are overloaded by type will need to have their upper
+ // bound computed at Sema time based on the type constant.
+ ck = ClassB;
+ if (R->getValueAsBit("isShift")) {
+ shiftstr = ", true";
+
+ // Right shifts have an 'r' in the name, left shifts do not.
+ if (name.find('r') != std::string::npos)
+ rangestr = "l = 1; ";
+ }
+ rangestr += "u = RFT(TV" + shiftstr + ")";
+ } else {
+ // The immediate generally refers to a lane in the preceding argument.
+ assert(immPos > 0 && "unexpected immediate operand");
+ rangestr = "u = " + utostr(RangeFromType(Proto[immPos-1], TypeVec[ti]));
+ }
+ // Make sure cases appear only once by uniquing them in a string map.
+ namestr = MangleName(name, TypeVec[ti], ck);
+ if (EmittedMap.count(namestr))
+ continue;
+ EmittedMap[namestr] = OpNone;
+
+ // Calculate the index of the immediate that should be range checked.
+ unsigned immidx = 0;
+
+ // Builtins that return a struct of multiple vectors have an extra
+ // leading arg for the struct return.
+ if (Proto[0] >= '2' && Proto[0] <= '4')
+ ++immidx;
+
+ // Add one to the index for each argument until we reach the immediate
+ // to be checked. Structs of vectors are passed as multiple arguments.
+ for (unsigned ii = 1, ie = Proto.size(); ii != ie; ++ii) {
+ switch (Proto[ii]) {
+ default: immidx += 1; break;
+ case '2': immidx += 2; break;
+ case '3': immidx += 3; break;
+ case '4': immidx += 4; break;
+ case 'i': ie = ii + 1; break;
+ }
+ }
+ OS << "case ARM::BI__builtin_neon_" << MangleName(name, TypeVec[ti], ck)
+ << ": i = " << immidx << "; " << rangestr << "; break;\n";
+ }
+ }
+ OS << "#endif\n\n";
+}
+
+/// GenTest - Write out a test for the intrinsic specified by the name and
+/// type strings, including the embedded patterns for FileCheck to match.
+static std::string GenTest(const std::string &name,
+ const std::string &proto,
+ StringRef outTypeStr, StringRef inTypeStr,
+ bool isShift) {
+ assert(!proto.empty() && "");
+ std::string s;
+
+ // Function name with type suffix
+ std::string mangledName = MangleName(name, outTypeStr, ClassS);
+ if (outTypeStr != inTypeStr) {
+ // If the input type is different (e.g., for vreinterpret), append a suffix
+ // for the input type. String off a "Q" (quad) prefix so that MangleName
+ // does not insert another "q" in the name.
+ unsigned typeStrOff = (inTypeStr[0] == 'Q' ? 1 : 0);
+ StringRef inTypeNoQuad = inTypeStr.substr(typeStrOff);
+ mangledName = MangleName(mangledName, inTypeNoQuad, ClassS);
+ }
+
+ // Emit the FileCheck patterns.
+ s += "// CHECK: test_" + mangledName + "\n";
+ // s += "// CHECK: \n"; // FIXME: + expected instruction opcode.
+
+ // Emit the start of the test function.
+ s += TypeString(proto[0], outTypeStr) + " test_" + mangledName + "(";
+ char arg = 'a';
+ std::string comma;
+ for (unsigned i = 1, e = proto.size(); i != e; ++i, ++arg) {
+ // Do not create arguments for values that must be immediate constants.
+ if (proto[i] == 'i')
+ continue;
+ s += comma + TypeString(proto[i], inTypeStr) + " ";
+ s.push_back(arg);
+ comma = ", ";
+ }
+ s += ") { \\\n ";
+
+ if (proto[0] != 'v')
+ s += "return ";
+ s += mangledName + "(";
+ arg = 'a';
+ for (unsigned i = 1, e = proto.size(); i != e; ++i, ++arg) {
+ if (proto[i] == 'i') {
+ // For immediate operands, test the maximum value.
+ if (isShift)
+ s += "1"; // FIXME
+ else
+ // The immediate generally refers to a lane in the preceding argument.
+ s += utostr(RangeFromType(proto[i-1], inTypeStr));
+ } else {
+ s.push_back(arg);
+ }
+ if ((i + 1) < e)
+ s += ", ";
+ }
+ s += ");\n}\n\n";
+ return s;
+}
+
+/// runTests - Write out a complete set of tests for all of the Neon
+/// intrinsics.
+void NeonEmitter::runTests(raw_ostream &OS) {
+ OS <<
+ "// RUN: %clang_cc1 -triple thumbv7-apple-darwin \\\n"
+ "// RUN: -target-cpu cortex-a9 -ffreestanding -S -o - %s | FileCheck %s\n"
+ "\n"
+ "#include <arm_neon.h>\n"
+ "\n";
+
+ std::vector<Record*> RV = Records.getAllDerivedDefinitions("Inst");
+ for (unsigned i = 0, e = RV.size(); i != e; ++i) {
+ Record *R = RV[i];
+ std::string name = R->getValueAsString("Name");
+ std::string Proto = R->getValueAsString("Prototype");
+ std::string Types = R->getValueAsString("Types");
+ bool isShift = R->getValueAsBit("isShift");
+
+ SmallVector<StringRef, 16> TypeVec;
+ ParseTypes(R, Types, TypeVec);
+
+ OpKind kind = OpMap[R->getValueAsDef("Operand")->getName()];
+ for (unsigned ti = 0, te = TypeVec.size(); ti != te; ++ti) {
+ if (kind == OpReinterpret) {
+ bool outQuad = false;
+ bool dummy = false;
+ (void)ClassifyType(TypeVec[ti], outQuad, dummy, dummy);
+ for (unsigned srcti = 0, srcte = TypeVec.size();
+ srcti != srcte; ++srcti) {
+ bool inQuad = false;
+ (void)ClassifyType(TypeVec[srcti], inQuad, dummy, dummy);
+ if (srcti == ti || inQuad != outQuad)
+ continue;
+ OS << GenTest(name, Proto, TypeVec[ti], TypeVec[srcti], isShift);
+ }
+ } else {
+ OS << GenTest(name, Proto, TypeVec[ti], TypeVec[ti], isShift);
+ }
+ }
+ OS << "\n";
+ }
+}
+
diff --git a/clang/utils/TableGen/NeonEmitter.h b/clang/utils/TableGen/NeonEmitter.h
new file mode 100644
index 0000000..dec7451
--- /dev/null
+++ b/clang/utils/TableGen/NeonEmitter.h
@@ -0,0 +1,210 @@
+//===- NeonEmitter.h - Generate arm_neon.h for use with clang ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This tablegen backend is responsible for emitting arm_neon.h, which includes
+// a declaration and definition of each function specified by the ARM NEON
+// compiler interface. See ARM document DUI0348B.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef NEON_EMITTER_H
+#define NEON_EMITTER_H
+
+#include "llvm/TableGen/Record.h"
+#include "llvm/TableGen/TableGenBackend.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/StringMap.h"
+
+enum OpKind {
+ OpNone,
+ OpAdd,
+ OpAddl,
+ OpAddw,
+ OpSub,
+ OpSubl,
+ OpSubw,
+ OpMul,
+ OpMla,
+ OpMlal,
+ OpMls,
+ OpMlsl,
+ OpMulN,
+ OpMlaN,
+ OpMlsN,
+ OpMlalN,
+ OpMlslN,
+ OpMulLane,
+ OpMullLane,
+ OpMlaLane,
+ OpMlsLane,
+ OpMlalLane,
+ OpMlslLane,
+ OpQDMullLane,
+ OpQDMlalLane,
+ OpQDMlslLane,
+ OpQDMulhLane,
+ OpQRDMulhLane,
+ OpEq,
+ OpGe,
+ OpLe,
+ OpGt,
+ OpLt,
+ OpNeg,
+ OpNot,
+ OpAnd,
+ OpOr,
+ OpXor,
+ OpAndNot,
+ OpOrNot,
+ OpCast,
+ OpConcat,
+ OpDup,
+ OpDupLane,
+ OpHi,
+ OpLo,
+ OpSelect,
+ OpRev16,
+ OpRev32,
+ OpRev64,
+ OpReinterpret,
+ OpAbdl,
+ OpAba,
+ OpAbal
+};
+
+enum ClassKind {
+ ClassNone,
+ ClassI, // generic integer instruction, e.g., "i8" suffix
+ ClassS, // signed/unsigned/poly, e.g., "s8", "u8" or "p8" suffix
+ ClassW, // width-specific instruction, e.g., "8" suffix
+ ClassB // bitcast arguments with enum argument to specify type
+};
+
+/// NeonTypeFlags - Flags to identify the types for overloaded Neon
+/// builtins. These must be kept in sync with the flags in
+/// include/clang/Basic/TargetBuiltins.h.
+class NeonTypeFlags {
+ enum {
+ EltTypeMask = 0xf,
+ UnsignedFlag = 0x10,
+ QuadFlag = 0x20
+ };
+ uint32_t Flags;
+
+public:
+ enum EltType {
+ Int8,
+ Int16,
+ Int32,
+ Int64,
+ Poly8,
+ Poly16,
+ Float16,
+ Float32
+ };
+
+ NeonTypeFlags(unsigned F) : Flags(F) {}
+ NeonTypeFlags(EltType ET, bool IsUnsigned, bool IsQuad) : Flags(ET) {
+ if (IsUnsigned)
+ Flags |= UnsignedFlag;
+ if (IsQuad)
+ Flags |= QuadFlag;
+ }
+
+ uint32_t getFlags() const { return Flags; }
+};
+
+namespace llvm {
+
+ class NeonEmitter : public TableGenBackend {
+ RecordKeeper &Records;
+ StringMap<OpKind> OpMap;
+ DenseMap<Record*, ClassKind> ClassMap;
+
+ public:
+ NeonEmitter(RecordKeeper &R) : Records(R) {
+ OpMap["OP_NONE"] = OpNone;
+ OpMap["OP_ADD"] = OpAdd;
+ OpMap["OP_ADDL"] = OpAddl;
+ OpMap["OP_ADDW"] = OpAddw;
+ OpMap["OP_SUB"] = OpSub;
+ OpMap["OP_SUBL"] = OpSubl;
+ OpMap["OP_SUBW"] = OpSubw;
+ OpMap["OP_MUL"] = OpMul;
+ OpMap["OP_MLA"] = OpMla;
+ OpMap["OP_MLAL"] = OpMlal;
+ OpMap["OP_MLS"] = OpMls;
+ OpMap["OP_MLSL"] = OpMlsl;
+ OpMap["OP_MUL_N"] = OpMulN;
+ OpMap["OP_MLA_N"] = OpMlaN;
+ OpMap["OP_MLS_N"] = OpMlsN;
+ OpMap["OP_MLAL_N"] = OpMlalN;
+ OpMap["OP_MLSL_N"] = OpMlslN;
+ OpMap["OP_MUL_LN"]= OpMulLane;
+ OpMap["OP_MULL_LN"] = OpMullLane;
+ OpMap["OP_MLA_LN"]= OpMlaLane;
+ OpMap["OP_MLS_LN"]= OpMlsLane;
+ OpMap["OP_MLAL_LN"] = OpMlalLane;
+ OpMap["OP_MLSL_LN"] = OpMlslLane;
+ OpMap["OP_QDMULL_LN"] = OpQDMullLane;
+ OpMap["OP_QDMLAL_LN"] = OpQDMlalLane;
+ OpMap["OP_QDMLSL_LN"] = OpQDMlslLane;
+ OpMap["OP_QDMULH_LN"] = OpQDMulhLane;
+ OpMap["OP_QRDMULH_LN"] = OpQRDMulhLane;
+ OpMap["OP_EQ"] = OpEq;
+ OpMap["OP_GE"] = OpGe;
+ OpMap["OP_LE"] = OpLe;
+ OpMap["OP_GT"] = OpGt;
+ OpMap["OP_LT"] = OpLt;
+ OpMap["OP_NEG"] = OpNeg;
+ OpMap["OP_NOT"] = OpNot;
+ OpMap["OP_AND"] = OpAnd;
+ OpMap["OP_OR"] = OpOr;
+ OpMap["OP_XOR"] = OpXor;
+ OpMap["OP_ANDN"] = OpAndNot;
+ OpMap["OP_ORN"] = OpOrNot;
+ OpMap["OP_CAST"] = OpCast;
+ OpMap["OP_CONC"] = OpConcat;
+ OpMap["OP_HI"] = OpHi;
+ OpMap["OP_LO"] = OpLo;
+ OpMap["OP_DUP"] = OpDup;
+ OpMap["OP_DUP_LN"] = OpDupLane;
+ OpMap["OP_SEL"] = OpSelect;
+ OpMap["OP_REV16"] = OpRev16;
+ OpMap["OP_REV32"] = OpRev32;
+ OpMap["OP_REV64"] = OpRev64;
+ OpMap["OP_REINT"] = OpReinterpret;
+ OpMap["OP_ABDL"] = OpAbdl;
+ OpMap["OP_ABA"] = OpAba;
+ OpMap["OP_ABAL"] = OpAbal;
+
+ Record *SI = R.getClass("SInst");
+ Record *II = R.getClass("IInst");
+ Record *WI = R.getClass("WInst");
+ ClassMap[SI] = ClassS;
+ ClassMap[II] = ClassI;
+ ClassMap[WI] = ClassW;
+ }
+
+ // run - Emit arm_neon.h.inc
+ void run(raw_ostream &o);
+
+ // runHeader - Emit all the __builtin prototypes used in arm_neon.h
+ void runHeader(raw_ostream &o);
+
+ // runTests - Emit tests for all the Neon intrinsics.
+ void runTests(raw_ostream &o);
+
+ private:
+ void emitIntrinsic(raw_ostream &OS, Record *R);
+ };
+
+} // End llvm namespace
+
+#endif
diff --git a/clang/utils/TableGen/OptParserEmitter.cpp b/clang/utils/TableGen/OptParserEmitter.cpp
new file mode 100644
index 0000000..dea22d3
--- /dev/null
+++ b/clang/utils/TableGen/OptParserEmitter.cpp
@@ -0,0 +1,194 @@
+//===- OptParserEmitter.cpp - Table Driven Command Line Parsing -----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "OptParserEmitter.h"
+#include "llvm/TableGen/Record.h"
+#include "llvm/ADT/STLExtras.h"
+using namespace llvm;
+
+static int StrCmpOptionName(const char *A, const char *B) {
+ char a = *A, b = *B;
+ while (a == b) {
+ if (a == '\0')
+ return 0;
+
+ a = *++A;
+ b = *++B;
+ }
+
+ if (a == '\0') // A is a prefix of B.
+ return 1;
+ if (b == '\0') // B is a prefix of A.
+ return -1;
+
+ // Otherwise lexicographic.
+ return (a < b) ? -1 : 1;
+}
+
+static int CompareOptionRecords(const void *Av, const void *Bv) {
+ const Record *A = *(Record**) Av;
+ const Record *B = *(Record**) Bv;
+
+ // Sentinel options precede all others and are only ordered by precedence.
+ bool ASent = A->getValueAsDef("Kind")->getValueAsBit("Sentinel");
+ bool BSent = B->getValueAsDef("Kind")->getValueAsBit("Sentinel");
+ if (ASent != BSent)
+ return ASent ? -1 : 1;
+
+ // Compare options by name, unless they are sentinels.
+ if (!ASent)
+ if (int Cmp = StrCmpOptionName(A->getValueAsString("Name").c_str(),
+ B->getValueAsString("Name").c_str()))
+ return Cmp;
+
+ // Then by the kind precedence;
+ int APrec = A->getValueAsDef("Kind")->getValueAsInt("Precedence");
+ int BPrec = B->getValueAsDef("Kind")->getValueAsInt("Precedence");
+ assert(APrec != BPrec && "Options are equivalent!");
+ return APrec < BPrec ? -1 : 1;
+}
+
+static const std::string getOptionName(const Record &R) {
+ // Use the record name unless EnumName is defined.
+ if (dynamic_cast<UnsetInit*>(R.getValueInit("EnumName")))
+ return R.getName();
+
+ return R.getValueAsString("EnumName");
+}
+
+static raw_ostream &write_cstring(raw_ostream &OS, llvm::StringRef Str) {
+ OS << '"';
+ OS.write_escaped(Str);
+ OS << '"';
+ return OS;
+}
+
+void OptParserEmitter::run(raw_ostream &OS) {
+ // Get the option groups and options.
+ const std::vector<Record*> &Groups =
+ Records.getAllDerivedDefinitions("OptionGroup");
+ std::vector<Record*> Opts = Records.getAllDerivedDefinitions("Option");
+
+ if (GenDefs)
+ EmitSourceFileHeader("Option Parsing Definitions", OS);
+ else
+ EmitSourceFileHeader("Option Parsing Table", OS);
+
+ array_pod_sort(Opts.begin(), Opts.end(), CompareOptionRecords);
+ if (GenDefs) {
+ OS << "#ifndef OPTION\n";
+ OS << "#error \"Define OPTION prior to including this file!\"\n";
+ OS << "#endif\n\n";
+
+ OS << "/////////\n";
+ OS << "// Groups\n\n";
+ for (unsigned i = 0, e = Groups.size(); i != e; ++i) {
+ const Record &R = *Groups[i];
+
+ // Start a single option entry.
+ OS << "OPTION(";
+
+ // The option string.
+ OS << '"' << R.getValueAsString("Name") << '"';
+
+ // The option identifier name.
+ OS << ", "<< getOptionName(R);
+
+ // The option kind.
+ OS << ", Group";
+
+ // The containing option group (if any).
+ OS << ", ";
+ if (const DefInit *DI = dynamic_cast<DefInit*>(R.getValueInit("Group")))
+ OS << getOptionName(*DI->getDef());
+ else
+ OS << "INVALID";
+
+ // The other option arguments (unused for groups).
+ OS << ", INVALID, 0, 0";
+
+ // The option help text.
+ if (!dynamic_cast<UnsetInit*>(R.getValueInit("HelpText"))) {
+ OS << ",\n";
+ OS << " ";
+ write_cstring(OS, R.getValueAsString("HelpText"));
+ } else
+ OS << ", 0";
+
+ // The option meta-variable name (unused).
+ OS << ", 0)\n";
+ }
+ OS << "\n";
+
+ OS << "//////////\n";
+ OS << "// Options\n\n";
+ for (unsigned i = 0, e = Opts.size(); i != e; ++i) {
+ const Record &R = *Opts[i];
+
+ // Start a single option entry.
+ OS << "OPTION(";
+
+ // The option string.
+ write_cstring(OS, R.getValueAsString("Name"));
+
+ // The option identifier name.
+ OS << ", "<< getOptionName(R);
+
+ // The option kind.
+ OS << ", " << R.getValueAsDef("Kind")->getValueAsString("Name");
+
+ // The containing option group (if any).
+ OS << ", ";
+ if (const DefInit *DI = dynamic_cast<DefInit*>(R.getValueInit("Group")))
+ OS << getOptionName(*DI->getDef());
+ else
+ OS << "INVALID";
+
+ // The option alias (if any).
+ OS << ", ";
+ if (const DefInit *DI = dynamic_cast<DefInit*>(R.getValueInit("Alias")))
+ OS << getOptionName(*DI->getDef());
+ else
+ OS << "INVALID";
+
+ // The option flags.
+ const ListInit *LI = R.getValueAsListInit("Flags");
+ if (LI->empty()) {
+ OS << ", 0";
+ } else {
+ OS << ", ";
+ for (unsigned i = 0, e = LI->size(); i != e; ++i) {
+ if (i)
+ OS << " | ";
+ OS << dynamic_cast<DefInit*>(LI->getElement(i))->getDef()->getName();
+ }
+ }
+
+ // The option parameter field.
+ OS << ", " << R.getValueAsInt("NumArgs");
+
+ // The option help text.
+ if (!dynamic_cast<UnsetInit*>(R.getValueInit("HelpText"))) {
+ OS << ",\n";
+ OS << " ";
+ write_cstring(OS, R.getValueAsString("HelpText"));
+ } else
+ OS << ", 0";
+
+ // The option meta-variable name.
+ OS << ", ";
+ if (!dynamic_cast<UnsetInit*>(R.getValueInit("MetaVarName")))
+ write_cstring(OS, R.getValueAsString("MetaVarName"));
+ else
+ OS << "0";
+
+ OS << ")\n";
+ }
+ }
+}
diff --git a/clang/utils/TableGen/OptParserEmitter.h b/clang/utils/TableGen/OptParserEmitter.h
new file mode 100644
index 0000000..ca667ca
--- /dev/null
+++ b/clang/utils/TableGen/OptParserEmitter.h
@@ -0,0 +1,34 @@
+//===- OptParserEmitter.h - Table Driven Command Line Parsing ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef UTILS_TABLEGEN_OPTPARSEREMITTER_H
+#define UTILS_TABLEGEN_OPTPARSEREMITTER_H
+
+#include "llvm/TableGen/TableGenBackend.h"
+
+namespace llvm {
+ /// OptParserEmitter - This tablegen backend takes an input .td file
+ /// describing a list of options and emits a data structure for parsing and
+ /// working with those options when given an input command line.
+ class OptParserEmitter : public TableGenBackend {
+ RecordKeeper &Records;
+ bool GenDefs;
+
+ public:
+ OptParserEmitter(RecordKeeper &R, bool _GenDefs)
+ : Records(R), GenDefs(_GenDefs) {}
+
+ /// run - Output the option parsing information.
+ ///
+ /// \param GenHeader - Generate the header describing the option IDs.x
+ void run(raw_ostream &OS);
+ };
+}
+
+#endif
diff --git a/clang/utils/TableGen/TableGen.cpp b/clang/utils/TableGen/TableGen.cpp
new file mode 100644
index 0000000..5ff88db
--- /dev/null
+++ b/clang/utils/TableGen/TableGen.cpp
@@ -0,0 +1,194 @@
+//===- TableGen.cpp - Top-Level TableGen implementation for Clang ---------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the main function for Clang's TableGen.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangASTNodesEmitter.h"
+#include "ClangAttrEmitter.h"
+#include "ClangDiagnosticsEmitter.h"
+#include "ClangSACheckersEmitter.h"
+#include "NeonEmitter.h"
+#include "OptParserEmitter.h"
+
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/PrettyStackTrace.h"
+#include "llvm/Support/Signals.h"
+#include "llvm/TableGen/Error.h"
+#include "llvm/TableGen/Main.h"
+#include "llvm/TableGen/Record.h"
+#include "llvm/TableGen/TableGenAction.h"
+
+using namespace llvm;
+
+enum ActionType {
+ GenClangAttrClasses,
+ GenClangAttrImpl,
+ GenClangAttrList,
+ GenClangAttrPCHRead,
+ GenClangAttrPCHWrite,
+ GenClangAttrSpellingList,
+ GenClangAttrLateParsedList,
+ GenClangAttrTemplateInstantiate,
+ GenClangAttrParsedAttrList,
+ GenClangAttrParsedAttrKinds,
+ GenClangDiagsDefs,
+ GenClangDiagGroups,
+ GenClangDiagsIndexName,
+ GenClangDeclNodes,
+ GenClangStmtNodes,
+ GenClangSACheckers,
+ GenOptParserDefs, GenOptParserImpl,
+ GenArmNeon,
+ GenArmNeonSema,
+ GenArmNeonTest
+};
+
+namespace {
+ cl::opt<ActionType>
+ Action(cl::desc("Action to perform:"),
+ cl::values(clEnumValN(GenOptParserDefs, "gen-opt-parser-defs",
+ "Generate option definitions"),
+ clEnumValN(GenOptParserImpl, "gen-opt-parser-impl",
+ "Generate option parser implementation"),
+ clEnumValN(GenClangAttrClasses, "gen-clang-attr-classes",
+ "Generate clang attribute clases"),
+ clEnumValN(GenClangAttrImpl, "gen-clang-attr-impl",
+ "Generate clang attribute implementations"),
+ clEnumValN(GenClangAttrList, "gen-clang-attr-list",
+ "Generate a clang attribute list"),
+ clEnumValN(GenClangAttrPCHRead, "gen-clang-attr-pch-read",
+ "Generate clang PCH attribute reader"),
+ clEnumValN(GenClangAttrPCHWrite, "gen-clang-attr-pch-write",
+ "Generate clang PCH attribute writer"),
+ clEnumValN(GenClangAttrSpellingList,
+ "gen-clang-attr-spelling-list",
+ "Generate a clang attribute spelling list"),
+ clEnumValN(GenClangAttrLateParsedList,
+ "gen-clang-attr-late-parsed-list",
+ "Generate a clang attribute LateParsed list"),
+ clEnumValN(GenClangAttrTemplateInstantiate,
+ "gen-clang-attr-template-instantiate",
+ "Generate a clang template instantiate code"),
+ clEnumValN(GenClangAttrParsedAttrList,
+ "gen-clang-attr-parsed-attr-list",
+ "Generate a clang parsed attribute list"),
+ clEnumValN(GenClangAttrParsedAttrKinds,
+ "gen-clang-attr-parsed-attr-kinds",
+ "Generate a clang parsed attribute kinds"),
+ clEnumValN(GenClangDiagsDefs, "gen-clang-diags-defs",
+ "Generate Clang diagnostics definitions"),
+ clEnumValN(GenClangDiagGroups, "gen-clang-diag-groups",
+ "Generate Clang diagnostic groups"),
+ clEnumValN(GenClangDiagsIndexName,
+ "gen-clang-diags-index-name",
+ "Generate Clang diagnostic name index"),
+ clEnumValN(GenClangDeclNodes, "gen-clang-decl-nodes",
+ "Generate Clang AST declaration nodes"),
+ clEnumValN(GenClangStmtNodes, "gen-clang-stmt-nodes",
+ "Generate Clang AST statement nodes"),
+ clEnumValN(GenClangSACheckers, "gen-clang-sa-checkers",
+ "Generate Clang Static Analyzer checkers"),
+ clEnumValN(GenArmNeon, "gen-arm-neon",
+ "Generate arm_neon.h for clang"),
+ clEnumValN(GenArmNeonSema, "gen-arm-neon-sema",
+ "Generate ARM NEON sema support for clang"),
+ clEnumValN(GenArmNeonTest, "gen-arm-neon-test",
+ "Generate ARM NEON tests for clang"),
+ clEnumValEnd));
+
+ cl::opt<std::string>
+ ClangComponent("clang-component",
+ cl::desc("Only use warnings from specified component"),
+ cl::value_desc("component"), cl::Hidden);
+
+class ClangTableGenAction : public TableGenAction {
+public:
+ bool operator()(raw_ostream &OS, RecordKeeper &Records) {
+ switch (Action) {
+ case GenClangAttrClasses:
+ ClangAttrClassEmitter(Records).run(OS);
+ break;
+ case GenClangAttrImpl:
+ ClangAttrImplEmitter(Records).run(OS);
+ break;
+ case GenClangAttrList:
+ ClangAttrListEmitter(Records).run(OS);
+ break;
+ case GenClangAttrPCHRead:
+ ClangAttrPCHReadEmitter(Records).run(OS);
+ break;
+ case GenClangAttrPCHWrite:
+ ClangAttrPCHWriteEmitter(Records).run(OS);
+ break;
+ case GenClangAttrSpellingList:
+ ClangAttrSpellingListEmitter(Records).run(OS);
+ break;
+ case GenClangAttrLateParsedList:
+ ClangAttrLateParsedListEmitter(Records).run(OS);
+ break;
+ case GenClangAttrTemplateInstantiate:
+ ClangAttrTemplateInstantiateEmitter(Records).run(OS);
+ break;
+ case GenClangAttrParsedAttrList:
+ ClangAttrParsedAttrListEmitter(Records).run(OS);
+ break;
+ case GenClangAttrParsedAttrKinds:
+ ClangAttrParsedAttrKindsEmitter(Records).run(OS);
+ break;
+ case GenClangDiagsDefs:
+ ClangDiagsDefsEmitter(Records, ClangComponent).run(OS);
+ break;
+ case GenClangDiagGroups:
+ ClangDiagGroupsEmitter(Records).run(OS);
+ break;
+ case GenClangDiagsIndexName:
+ ClangDiagsIndexNameEmitter(Records).run(OS);
+ break;
+ case GenClangDeclNodes:
+ ClangASTNodesEmitter(Records, "Decl", "Decl").run(OS);
+ ClangDeclContextEmitter(Records).run(OS);
+ break;
+ case GenClangStmtNodes:
+ ClangASTNodesEmitter(Records, "Stmt", "").run(OS);
+ break;
+ case GenClangSACheckers:
+ ClangSACheckersEmitter(Records).run(OS);
+ break;
+ case GenOptParserDefs:
+ OptParserEmitter(Records, true).run(OS);
+ break;
+ case GenOptParserImpl:
+ OptParserEmitter(Records, false).run(OS);
+ break;
+ case GenArmNeon:
+ NeonEmitter(Records).run(OS);
+ break;
+ case GenArmNeonSema:
+ NeonEmitter(Records).runHeader(OS);
+ break;
+ case GenArmNeonTest:
+ NeonEmitter(Records).runTests(OS);
+ break;
+ }
+
+ return false;
+ }
+};
+}
+
+int main(int argc, char **argv) {
+ sys::PrintStackTraceOnErrorSignal();
+ PrettyStackTraceProgram X(argc, argv);
+ cl::ParseCommandLineOptions(argc, argv);
+
+ ClangTableGenAction Action;
+ return TableGenMain(argv[0], Action);
+}
diff --git a/clang/utils/TestUtils/deep-stack.py b/clang/utils/TestUtils/deep-stack.py
new file mode 100755
index 0000000..1750a5f
--- /dev/null
+++ b/clang/utils/TestUtils/deep-stack.py
@@ -0,0 +1,24 @@
+#!/usr/bin/env python
+
+def pcall(f, N):
+ if N == 0:
+ print >>f, ' f(0)'
+ return
+
+ print >>f, ' f('
+ pcall(f, N - 1)
+ print >>f, ' )'
+
+def main():
+ f = open('t.c','w')
+ print >>f, 'int f(int n) { return n; }'
+ print >>f, 'int t() {'
+ print >>f, ' return'
+ pcall(f, 10000)
+ print >>f, ' ;'
+ print >>f, '}'
+
+if __name__ == "__main__":
+ import sys
+ sys.setrecursionlimit(100000)
+ main()
diff --git a/clang/utils/TestUtils/pch-test.pl b/clang/utils/TestUtils/pch-test.pl
new file mode 100755
index 0000000..e4311e9
--- /dev/null
+++ b/clang/utils/TestUtils/pch-test.pl
@@ -0,0 +1,61 @@
+#!/usr/bin/perl -w
+
+# This tiny little script, which should be run from the clang
+# directory (with clang in your patch), tries to take each
+# compilable Clang test and build a PCH file from that test, then read
+# and dump the contents of the PCH file just created.
+use POSIX;
+
+$exitcode = 0;
+sub testfiles($$) {
+ my $suffix = shift;
+ my $language = shift;
+ my $passed = 0;
+ my $failed = 0;
+ my $skipped = 0;
+
+ @files = `ls test/*/*.$suffix`;
+ foreach $file (@files) {
+ chomp($file);
+ my $code = system("clang -fsyntax-only -x $language $file > /dev/null 2>&1");
+ if ($code == 0) {
+ print(".");
+ $code = system("clang -cc1 -emit-pch -x $language -o $file.pch $file > /dev/null 2>&1");
+ if ($code == 0) {
+ $code = system("clang -cc1 -include-pch $file.pch -x $language -ast-dump /dev/null > /dev/null 2>&1");
+ if ($code == 0) {
+ $passed++;
+ } elsif (($code & 0xFF) == SIGINT) {
+ exit($exitcode);
+ } else {
+ print("\n---Failed to dump AST file for \"$file\"---\n");
+ $exitcode = 1;
+ $failed++;
+ }
+ unlink "$file.pch";
+ } elsif (($code & 0xFF) == SIGINT) {
+ exit($exitcode);
+ } else {
+ print("\n---Failed to build PCH file for \"$file\"---\n");
+ $exitcode = 1;
+ $failed++;
+ }
+ } elsif (($code & 0xFF) == SIGINT) {
+ exit($exitcode);
+ } else {
+ print("x");
+ $skipped++;
+ }
+ }
+
+ print("\n\n$passed tests passed\n");
+ print("$failed tests failed\n");
+ print("$skipped tests skipped ('x')\n")
+}
+
+printf("-----Testing precompiled headers for C-----\n");
+testfiles("c", "c");
+printf("\n-----Testing precompiled headers for Objective-C-----\n");
+testfiles("m", "objective-c");
+print("\n");
+exit($exitcode);
diff --git a/clang/utils/VtableTest/Makefile b/clang/utils/VtableTest/Makefile
new file mode 100644
index 0000000..dd615ae
--- /dev/null
+++ b/clang/utils/VtableTest/Makefile
@@ -0,0 +1,24 @@
+GXX := llvm-g++-4.2
+CLANGXX := clang++
+
+all: one
+
+test.cc: gen.cc
+ g++ gen.cc -o gen
+ ./gen >test.cc
+
+test-gcc.sum: test.cc
+ time $(GXX) test.cc -o test-gcc.s -S
+ $(GXX) test-gcc.s -o test-gcc
+ ./test-gcc >test-gcc.sum
+
+test-clang.sum: test.cc
+ time $(CLANGXX) test.cc -o test-clang.s -S
+ $(CLANGXX) test-clang.s -o test-clang
+ ./test-clang >test-clang.sum
+
+one: test-gcc.sum test-clang.sum
+ cmp test-gcc.sum test-clang.sum
+
+clean:
+ rm -f gen test-gcc test-clang test.cc test-gcc.sum test-clang.sum test-gcc.s test-clang.s
diff --git a/clang/utils/VtableTest/check-zti b/clang/utils/VtableTest/check-zti
new file mode 100755
index 0000000..bf5b045
--- /dev/null
+++ b/clang/utils/VtableTest/check-zti
@@ -0,0 +1,20 @@
+#!/bin/sh
+
+N_STRUCTS=300
+
+# Utility routine to "hand" check type infos.
+
+let i=1;
+while [ $i != $N_STRUCTS ]; do
+ sed -n "/^__ZTI.*s$i:/,/\.[sg][el]/p" test-clang.s |
+ grep -v '\.[sg][el]' | sed 's/(\([0-9][0-9]*\))/\1/' >test-clang-zti
+ sed -n "/^__ZTI.*s$i:/,/\.[sg][el]/p" test-gcc.s |
+ grep -v '\.[sg][el]' | sed 's/(\([0-9][0-9]*\))/\1/' >test-gcc-zti
+ diff -U3 test-gcc-zti test-clang-zti
+ if [ $? != 0 ]; then
+ echo "FAIL: s$i type info"
+ else
+ echo "PASS: s$i type info"
+ fi
+ let i=i+1
+done
diff --git a/clang/utils/VtableTest/check-ztt b/clang/utils/VtableTest/check-ztt
new file mode 100755
index 0000000..4a83c55
--- /dev/null
+++ b/clang/utils/VtableTest/check-ztt
@@ -0,0 +1,20 @@
+#!/bin/sh
+
+N_STRUCTS=300
+
+# Utility routine to "hand" check VTTs.
+
+let i=1;
+while [ $i != $N_STRUCTS ]; do
+ sed -n "/^__ZTT.*s$i:/,/\.[sgm][elo]/p" test-clang.s |
+ grep -v '\.[sgm][elo]' | sed -e 's/[()]//g' -e '/^$/d' >test-clang-ztt
+ sed -n "/^__ZTT.*s$i:/,/\.[sgm][elo]/p" test-gcc.s |
+ grep -v '\.[sgm][elo]' | sed -e 's/[()]//g' -e 's/ + /+/' >test-gcc-ztt
+ diff -U3 test-gcc-ztt test-clang-ztt
+ if [ $? != 0 ]; then
+ echo "FAIL: s$i VTT"
+ else
+ echo "PASS: s$i VTT"
+ fi
+ let i=i+1
+done
diff --git a/clang/utils/VtableTest/check-zvt b/clang/utils/VtableTest/check-zvt
new file mode 100755
index 0000000..d8b93bd
--- /dev/null
+++ b/clang/utils/VtableTest/check-zvt
@@ -0,0 +1,18 @@
+#!/bin/sh
+
+N_STRUCTS=300
+
+# Utility routine to "hand" check vtables.
+
+let i=1;
+while [ $i != $N_STRUCTS ]; do
+ sed -n "/^__ZTV.*s$i:/,/\.[sg][el]/p" test-clang.s | grep -v '\.[sg][el]' >test-clang-ztv
+ sed -n "/^__ZTV.*s$i:/,/\.[sg][el]/p" test-gcc.s | grep -v '\.[sg][el]' >test-gcc-ztv
+ diff -U3 test-gcc-ztv test-clang-ztv
+ if [ $? != 0 ]; then
+ echo "FAIL: s$i vtable"
+ else
+ echo "PASS: s$i vtable"
+ fi
+ let i=i+1
+done
diff --git a/clang/utils/VtableTest/gen.cc b/clang/utils/VtableTest/gen.cc
new file mode 100644
index 0000000..8396f8d
--- /dev/null
+++ b/clang/utils/VtableTest/gen.cc
@@ -0,0 +1,350 @@
+#include <stdio.h>
+#include <stdlib.h>
+
+#define N_FIELDS 7
+#define N_FUNCS 128
+#define FUNCSPACING 20
+#define N_STRUCTS 180 /* 1280 */
+#define N_BASES 6
+#define COVARIANT 0
+
+const char *simple_types[] = { "bool", "char", "short", "int", "float",
+ "double", "long double", "wchar_t", "void *",
+ "char *"
+};
+
+void gl(const char *c) {
+ printf("%s\n", c);
+}
+
+void g(const char *c) {
+ printf("%s", c);
+}
+
+void g(int i) {
+ printf("%d", i);
+}
+
+int uuid = 0;
+char base_present[N_STRUCTS][N_STRUCTS];
+
+// The return type for each function when doing covariant testcase generation.
+short ret_types[N_STRUCTS][N_FUNCS*FUNCSPACING];
+
+bool is_ambiguous(int s, int base) {
+ for (int i = 0; i < N_STRUCTS; ++i) {
+ if ((base_present[base][i] & base_present[s][i]) == 1)
+ return true;
+ }
+ return false;
+}
+
+void add_bases(int s, int base) {
+ for (int i = 0; i < N_STRUCTS; ++i)
+ base_present[s][i] |= base_present[base][i];
+ if (!COVARIANT)
+ return;
+ for (int i = 0; i < N_FUNCS*FUNCSPACING; ++i) {
+ if (!ret_types[base][i])
+ continue;
+ if (!ret_types[s][i]) {
+ ret_types[s][i] = ret_types[base][i];
+ continue;
+ }
+ if (base_present[ret_types[base][i]][ret_types[s][i]])
+ // If the return type of the function from this base dominates
+ ret_types[s][i] = ret_types[base][i];
+ if (base_present[ret_types[s][i]][ret_types[base][i]])
+ // If a previous base dominates
+ continue;
+ // If neither dominates, we'll use this class.
+ ret_types[s][i] = s;
+ }
+}
+
+// This contains the class that has the final override for
+// each class, for each function.
+short final_override[N_STRUCTS][N_FUNCS*FUNCSPACING];
+
+void gs(int s) {
+ bool polymorphic = false;
+
+ static int bases[N_BASES];
+ int i_bases = random() % (N_BASES*2);
+ if (i_bases >= N_BASES)
+ // PARAM: 1/2 of all clases should have no bases
+ i_bases = 0;
+ int n_bases = 0;
+ bool first_base = true;
+
+ // PARAM: 3/4 of all should be class, the rest are structs
+ if (random() % 4 == 0)
+ g("struct s");
+ else
+ g("class s");
+ g(s);
+ int old_base = -1;
+ if (s == 0 || s == 1)
+ i_bases = 0;
+ while (i_bases) {
+ --i_bases;
+ int base = random() % (s-1) + 1;
+ if (!base_present[s][base]) {
+ if (is_ambiguous(s, base))
+ continue;
+ if (first_base) {
+ first_base = false;
+ g(": ");
+ } else
+ g(", ");
+ int base_type = 1;
+ if (random()%8 == 0) {
+ // PARAM: 1/8th the bases are virtual
+ g("virtual ");
+ // We have a vtable and rtti, but technically we're not polymorphic
+ // polymorphic = true;
+ base_type = 3;
+ }
+ // PARAM: 1/4 are public, 1/8 are privare, 1/8 are protected, the reset, default
+ int base_protection = 0;
+ if (!COVARIANT)
+ base_protection = random()%8;
+ switch (base_protection) {
+ case 0:
+ case 1:
+ g("public "); break;
+ case 2:
+ case 3:
+ case 4:
+ case 5:
+ break;
+ case 6:
+ g("private "); break;
+ case 7:
+ g("protected "); break;
+ }
+ g("s");
+ add_bases(s, base);
+ bases[n_bases] = base;
+ base_present[s][base] = base_type;
+ ++n_bases;
+ g(base);
+ old_base = base;
+ }
+ }
+ gl(" {");
+
+ /* Fields */
+ int n_fields = N_FIELDS == 0 ? 0 : random() % (N_FIELDS*4);
+ // PARAM: 3/4 of all structs should have no members
+ if (n_fields >= N_FIELDS)
+ n_fields = 0;
+ for (int i = 0; i < n_fields; ++i) {
+ int t = random() % (sizeof(simple_types) / sizeof(simple_types[0]));
+ g(" "); g(simple_types[t]); g(" field"); g(i); gl(";");
+ }
+
+ /* Virtual functions */
+ static int funcs[N_FUNCS*FUNCSPACING];
+ // PARAM: 1/2 of all structs should have no virtual functions
+ int n_funcs = random() % (N_FUNCS*2);
+ if (n_funcs > N_FUNCS)
+ n_funcs = 0;
+ int old_func = -1;
+ for (int i = 0; i < n_funcs; ++i) {
+ int fn = old_func + random() % FUNCSPACING + 1;
+ funcs[i] = fn;
+ int ret_type = 0;
+ if (COVARIANT) {
+ ret_type = random() % s + 1;
+ if (!base_present[s][ret_type]
+ || !base_present[ret_type][ret_types[s][fn]])
+ if (ret_types[s][fn]) {
+ printf(" // Found one for s%d for s%d* fun%d.\n", s,
+ ret_types[s][fn], fn);
+ ret_type = ret_types[s][fn];
+ } else
+ ret_type = s;
+ else
+ printf(" // Wow found one for s%d for fun%d.\n", s, fn);
+ ret_types[s][fn] = ret_type;
+ }
+ if (ret_type) {
+ g(" virtual s"); g(ret_type); g("* fun");
+ } else
+ g(" virtual void fun");
+ g(fn); g("(char *t) { mix(\"vfn this offset\", (char *)this - t); mix(\"vfn uuid\", "); g(++uuid);
+ if (ret_type)
+ gl("); return 0; }");
+ else
+ gl("); }");
+ final_override[s][fn] = s;
+ old_func = fn;
+ }
+
+ // Add required overriders for correctness
+ for (int i = 0; i < n_bases; ++i) {
+ // For each base
+ int base = bases[i];
+ for (int fn = 0; fn < N_FUNCS*FUNCSPACING; ++fn) {
+ // For each possible function
+ int new_base = final_override[base][fn];
+ if (new_base == 0)
+ // If the base didn't have a final overrider, skip
+ continue;
+
+ int prev_base = final_override[s][fn];
+ if (prev_base == s)
+ // Skip functions defined in this class
+ continue;
+
+ // If we don't want to change the info, skip
+ if (prev_base == new_base)
+ continue;
+
+ if (prev_base == 0) {
+ // record the final override
+ final_override[s][fn] = new_base;
+ continue;
+ }
+
+ if (base_present[prev_base][new_base]) {
+ // The previous base dominates the new base, no update necessary
+ printf(" // No override for fun%d in s%d as s%d dominates s%d.\n",
+ fn, s, prev_base, new_base);
+ continue;
+ }
+
+ if (base_present[new_base][prev_base]) {
+ // The new base dominates the old base, no override necessary
+ printf(" // No override for fun%d in s%d as s%d dominates s%d.\n",
+ fn, s, new_base, prev_base);
+ // record the final override
+ final_override[s][fn] = new_base;
+ continue;
+ }
+
+ printf(" // Found we needed override for fun%d in s%d.\n", fn, s);
+
+ // record the final override
+ funcs[n_funcs++] = fn;
+ if (n_funcs == (N_FUNCS*FUNCSPACING-1))
+ abort();
+ int ret_type = 0;
+ if (COVARIANT) {
+ if (!ret_types[s][fn]) {
+ ret_types[s][fn] = ret_type = s;
+ } else {
+ ret_type = ret_types[s][fn];
+ if (ret_type != s)
+ printf(" // Calculated return type in s%d as s%d* fun%d.\n",
+ s, ret_type, fn);
+ }
+ }
+ if (ret_type) {
+ g(" virtual s"); g(ret_type); g("* fun");
+ } else
+ g(" virtual void fun");
+ g(fn); g("(char *t) { mix(\"vfn this offset\", (char *)this - t); mix(\"vfn uuid\", "); g(++uuid);
+ if (ret_type)
+ gl("); return 0; }");
+ else
+ gl("); }");
+ final_override[s][fn] = s;
+ }
+ }
+
+ gl("public:");
+ gl(" void calc(char *t) {");
+
+ // mix in the type number
+ g(" mix(\"type num\", "); g(s); gl(");");
+ // mix in the size
+ g(" mix(\"type size\", sizeof (s"); g(s); gl("));");
+ // mix in the this offset
+ gl(" mix(\"subobject offset\", (char *)this - t);");
+ if (n_funcs)
+ polymorphic = true;
+ if (polymorphic) {
+ // mix in offset to the complete object under construction
+ gl(" mix(\"real top v current top\", t - (char *)dynamic_cast<void*>(this));");
+ }
+
+ /* check base layout and overrides */
+ for (int i = 0; i < n_bases; ++i) {
+ g(" calc_s"); g(bases[i]); gl("(t);");
+ }
+
+ if (polymorphic) {
+ /* check dynamic_cast to each direct base */
+ for (int i = 0; i < n_bases; ++i) {
+ g(" if ((char *)dynamic_cast<s"); g(bases[i]); gl("*>(this))");
+ g(" mix(\"base dyn cast\", t - (char *)dynamic_cast<s"); g(bases[i]); gl("*>(this));");
+ g(" else mix(\"no dyncast\", "); g(++uuid); gl(");");
+ }
+ }
+
+ /* check field layout */
+ for (int i = 0; i < n_fields; ++i) {
+ g(" mix(\"field offset\", (char *)&field"); g(i); gl(" - (char *)this);");
+ }
+ if (n_fields == 0) {
+ g(" mix(\"no fields\", "); g(++uuid); gl(");");
+ }
+
+ /* check functions */
+ for (int i = 0; i < n_funcs; ++i) {
+ g(" fun"); g(funcs[i]); gl("(t);");
+ }
+ if (n_funcs == 0) {
+ g(" mix(\"no funcs\", "); g(++uuid); gl(");");
+ }
+
+ gl(" }");
+
+ // default ctor
+ g(" s"); g(s); g("() ");
+ first_base = true;
+ for (int i = 0; i < n_bases; ++i) {
+ if (first_base) {
+ g(": ");
+ first_base = false;
+ } else
+ g(", ");
+ g("s"); g(bases[i]); g("((char *)this)");
+ }
+ gl(" { calc((char *)this); }");
+ g(" ~s"); g(s); gl("() { calc((char *)this); }");
+
+ // ctor with this to the complete object
+ g(" s"); g(s); gl("(char *t) { calc(t); }");
+ g(" void calc_s"); g(s); gl("(char *t) { calc(t); }");
+ g("} a"); g(s); gl(";");
+}
+
+main(int argc, char **argv) {
+ unsigned seed = 0;
+ char state[16];
+ if (argc > 1)
+ seed = atol(argv[1]);
+
+ initstate(seed, state, sizeof(state));
+ gl("extern \"C\" int printf(const char *...);");
+ gl("");
+ gl("long long sum;");
+ gl("void mix(const char *desc, long long i) {");
+ // If this ever becomes too slow, we can remove this after we improve the
+ // mixing function
+ gl(" printf(\"%s: %lld\\n\", desc, i);");
+ gl(" sum += ((sum ^ i) << 3) + (sum<<1) - i;");
+ gl("}");
+ gl("");
+ // PARAM: Randomly size testcases or large testcases?
+ int n_structs = /* random() % */ N_STRUCTS;
+ for (int i = 1; i < n_structs; ++i)
+ gs(i);
+ gl("int main() {");
+ gl(" printf(\"%llx\\n\", sum);");
+ gl("}");
+ return 0;
+}
diff --git a/clang/utils/analyzer/CmpRuns.py b/clang/utils/analyzer/CmpRuns.py
new file mode 100755
index 0000000..e68c45d
--- /dev/null
+++ b/clang/utils/analyzer/CmpRuns.py
@@ -0,0 +1,253 @@
+#!/usr/bin/env python
+
+"""
+CmpRuns - A simple tool for comparing two static analyzer runs to determine
+which reports have been added, removed, or changed.
+
+This is designed to support automated testing using the static analyzer, from
+two perspectives:
+ 1. To monitor changes in the static analyzer's reports on real code bases, for
+ regression testing.
+
+ 2. For use by end users who want to integrate regular static analyzer testing
+ into a buildbot like environment.
+"""
+
+import os
+import plistlib
+
+#
+
+class multidict:
+ def __init__(self, elts=()):
+ self.data = {}
+ for key,value in elts:
+ self[key] = value
+
+ def __getitem__(self, item):
+ return self.data[item]
+ def __setitem__(self, key, value):
+ if key in self.data:
+ self.data[key].append(value)
+ else:
+ self.data[key] = [value]
+ def items(self):
+ return self.data.items()
+ def values(self):
+ return self.data.values()
+ def keys(self):
+ return self.data.keys()
+ def __len__(self):
+ return len(self.data)
+ def get(self, key, default=None):
+ return self.data.get(key, default)
+
+#
+
+class CmpOptions:
+ def __init__(self, verboseLog=None, root=""):
+ self.root = root
+ self.verboseLog = verboseLog
+
+class AnalysisReport:
+ def __init__(self, run, files):
+ self.run = run
+ self.files = files
+
+class AnalysisDiagnostic:
+ def __init__(self, data, report, htmlReport):
+ self.data = data
+ self.report = report
+ self.htmlReport = htmlReport
+
+ def getReadableName(self):
+ loc = self.data['location']
+ filename = self.report.run.getSourceName(self.report.files[loc['file']])
+ line = loc['line']
+ column = loc['col']
+ category = self.data['category']
+ description = self.data['description']
+
+ # FIXME: Get a report number based on this key, to 'distinguish'
+ # reports, or something.
+
+ return '%s:%d:%d, %s: %s' % (filename, line, column, category,
+ description)
+
+ def getReportData(self):
+ if self.htmlReport is None:
+ return " "
+ return os.path.join(self.report.run.path, self.htmlReport)
+ # We could also dump the report with:
+ # return open(os.path.join(self.report.run.path,
+ # self.htmlReport), "rb").read()
+
+class AnalysisRun:
+ def __init__(self, path, opts):
+ self.path = path
+ self.reports = []
+ self.diagnostics = []
+ self.opts = opts
+
+ def getSourceName(self, path):
+ if path.startswith(self.opts.root):
+ return path[len(self.opts.root):]
+ return path
+
+def loadResults(path, opts, deleteEmpty=True):
+ run = AnalysisRun(path, opts)
+
+ for f in os.listdir(path):
+ if (not f.startswith('report') or
+ not f.endswith('plist')):
+ continue
+
+ p = os.path.join(path, f)
+ data = plistlib.readPlist(p)
+
+ # Ignore/delete empty reports.
+ if not data['files']:
+ if deleteEmpty == True:
+ os.remove(p)
+ continue
+
+ # Extract the HTML reports, if they exists.
+ if 'HTMLDiagnostics_files' in data['diagnostics'][0]:
+ htmlFiles = []
+ for d in data['diagnostics']:
+ # FIXME: Why is this named files, when does it have multiple
+ # files?
+ assert len(d['HTMLDiagnostics_files']) == 1
+ htmlFiles.append(d.pop('HTMLDiagnostics_files')[0])
+ else:
+ htmlFiles = [None] * len(data['diagnostics'])
+
+ report = AnalysisReport(run, data.pop('files'))
+ diagnostics = [AnalysisDiagnostic(d, report, h)
+ for d,h in zip(data.pop('diagnostics'),
+ htmlFiles)]
+
+ assert not data
+
+ run.reports.append(report)
+ run.diagnostics.extend(diagnostics)
+
+ return run
+
+def compareResults(A, B):
+ """
+ compareResults - Generate a relation from diagnostics in run A to
+ diagnostics in run B.
+
+ The result is the relation as a list of triples (a, b, confidence) where
+ each element {a,b} is None or an element from the respective run, and
+ confidence is a measure of the match quality (where 0 indicates equality,
+ and None is used if either element is None).
+ """
+
+ res = []
+
+ # Quickly eliminate equal elements.
+ neqA = []
+ neqB = []
+ eltsA = list(A.diagnostics)
+ eltsB = list(B.diagnostics)
+ eltsA.sort(key = lambda d: d.data)
+ eltsB.sort(key = lambda d: d.data)
+ while eltsA and eltsB:
+ a = eltsA.pop()
+ b = eltsB.pop()
+ if a.data['location'] == b.data['location']:
+ res.append((a, b, 0))
+ elif a.data > b.data:
+ neqA.append(a)
+ eltsB.append(b)
+ else:
+ neqB.append(b)
+ eltsA.append(a)
+ neqA.extend(eltsA)
+ neqB.extend(eltsB)
+
+ # FIXME: Add fuzzy matching. One simple and possible effective idea would be
+ # to bin the diagnostics, print them in a normalized form (based solely on
+ # the structure of the diagnostic), compute the diff, then use that as the
+ # basis for matching. This has the nice property that we don't depend in any
+ # way on the diagnostic format.
+
+ for a in neqA:
+ res.append((a, None, None))
+ for b in neqB:
+ res.append((None, b, None))
+
+ return res
+
+def cmpScanBuildResults(dirA, dirB, opts, deleteEmpty=True):
+ # Load the run results.
+ resultsA = loadResults(dirA, opts, deleteEmpty)
+ resultsB = loadResults(dirB, opts, deleteEmpty)
+
+ # Open the verbose log, if given.
+ if opts.verboseLog:
+ auxLog = open(opts.verboseLog, "wb")
+ else:
+ auxLog = None
+
+ diff = compareResults(resultsA, resultsB)
+ foundDiffs = 0
+ for res in diff:
+ a,b,confidence = res
+ if a is None:
+ print "ADDED: %r" % b.getReadableName()
+ foundDiffs += 1
+ if auxLog:
+ print >>auxLog, ("('ADDED', %r, %r)" % (b.getReadableName(),
+ b.getReportData()))
+ elif b is None:
+ print "REMOVED: %r" % a.getReadableName()
+ foundDiffs += 1
+ if auxLog:
+ print >>auxLog, ("('REMOVED', %r, %r)" % (a.getReadableName(),
+ a.getReportData()))
+ elif confidence:
+ print "CHANGED: %r to %r" % (a.getReadableName(),
+ b.getReadableName())
+ foundDiffs += 1
+ if auxLog:
+ print >>auxLog, ("('CHANGED', %r, %r, %r, %r)"
+ % (a.getReadableName(),
+ b.getReadableName(),
+ a.getReportData(),
+ b.getReportData()))
+ else:
+ pass
+
+ TotalReports = len(resultsB.diagnostics)
+ print "TOTAL REPORTS: %r" % TotalReports
+ print "TOTAL DIFFERENCES: %r" % foundDiffs
+ if auxLog:
+ print >>auxLog, "('TOTAL NEW REPORTS', %r)" % TotalReports
+ print >>auxLog, "('TOTAL DIFFERENCES', %r)" % foundDiffs
+
+ return foundDiffs
+
+def main():
+ from optparse import OptionParser
+ parser = OptionParser("usage: %prog [options] [dir A] [dir B]")
+ parser.add_option("", "--root", dest="root",
+ help="Prefix to ignore on source files",
+ action="store", type=str, default="")
+ parser.add_option("", "--verbose-log", dest="verboseLog",
+ help="Write additional information to LOG [default=None]",
+ action="store", type=str, default=None,
+ metavar="LOG")
+ (opts, args) = parser.parse_args()
+
+ if len(args) != 2:
+ parser.error("invalid number of arguments")
+
+ dirA,dirB = args
+
+ cmpScanBuildResults(dirA, dirB, opts)
+
+if __name__ == '__main__':
+ main()
diff --git a/clang/utils/analyzer/SATestAdd.py b/clang/utils/analyzer/SATestAdd.py
new file mode 100755
index 0000000..ce64bc8
--- /dev/null
+++ b/clang/utils/analyzer/SATestAdd.py
@@ -0,0 +1,80 @@
+#!/usr/bin/env python
+
+"""
+Static Analyzer qualification infrastructure: adding a new project to
+the Repository Directory.
+
+ Add a new project for testing: build it and add to the Project Map file.
+ Assumes it's being run from the Repository Directory.
+ The project directory should be added inside the Repository Directory and
+ have the same name as the project ID
+
+ The project should use the following files for set up:
+ - pre_run_static_analyzer.sh - prepare the build environment.
+ Ex: make clean can be a part of it.
+ - run_static_analyzer.cmd - a list of commands to run through scan-build.
+ Each command should be on a separate line.
+ Choose from: configure, make, xcodebuild
+"""
+import SATestBuild
+
+import os
+import csv
+import sys
+
+def isExistingProject(PMapFile, projectID) :
+ PMapReader = csv.reader(PMapFile)
+ for I in PMapReader:
+ if projectID == I[0]:
+ return True
+ return False
+
+# Add a new project for testing: build it and add to the Project Map file.
+# Params:
+# Dir is the directory where the sources are.
+# ID is a short string used to identify a project.
+def addNewProject(ID, IsScanBuild) :
+ CurDir = os.path.abspath(os.curdir)
+ Dir = SATestBuild.getProjectDir(ID)
+ if not os.path.exists(Dir):
+ print "Error: Project directory is missing: %s" % Dir
+ sys.exit(-1)
+
+ # Build the project.
+ SATestBuild.testProject(ID, True, IsScanBuild, Dir)
+
+ # Add the project ID to the project map.
+ ProjectMapPath = os.path.join(CurDir, SATestBuild.ProjectMapFile)
+ if os.path.exists(ProjectMapPath):
+ PMapFile = open(ProjectMapPath, "r+b")
+ else:
+ print "Warning: Creating the Project Map file!!"
+ PMapFile = open(ProjectMapPath, "w+b")
+ try:
+ if (isExistingProject(PMapFile, ID)) :
+ print >> sys.stdout, 'Warning: Project with ID \'', ID, \
+ '\' already exists.'
+ print >> sys.stdout, "Reference output has been regenerated."
+ else:
+ PMapWriter = csv.writer(PMapFile)
+ PMapWriter.writerow( (ID, int(IsScanBuild)) );
+ print "The project map is updated: ", ProjectMapPath
+ finally:
+ PMapFile.close()
+
+
+# TODO: Add an option not to build.
+# TODO: Set the path to the Repository directory.
+if __name__ == '__main__':
+ if len(sys.argv) < 2:
+ print >> sys.stderr, 'Usage: ', sys.argv[0],\
+ 'project_ID <mode>' \
+ 'mode - 0 for single file project; 1 for scan_build'
+ sys.exit(-1)
+
+ IsScanBuild = 1
+ if (len(sys.argv) >= 3):
+ IsScanBuild = int(sys.argv[2])
+ assert((IsScanBuild == 0) | (IsScanBuild == 1))
+
+ addNewProject(sys.argv[1], IsScanBuild)
diff --git a/clang/utils/analyzer/SATestBuild.py b/clang/utils/analyzer/SATestBuild.py
new file mode 100755
index 0000000..3fccb9a
--- /dev/null
+++ b/clang/utils/analyzer/SATestBuild.py
@@ -0,0 +1,475 @@
+#!/usr/bin/env python
+
+"""
+Static Analyzer qualification infrastructure.
+
+The goal is to test the analyzer against different projects, check for failures,
+compare results, and measure performance.
+
+Repository Directory will contain sources of the projects as well as the
+information on how to build them and the expected output.
+Repository Directory structure:
+ - ProjectMap file
+ - Historical Performance Data
+ - Project Dir1
+ - ReferenceOutput
+ - Project Dir2
+ - ReferenceOutput
+ ..
+
+To test the build of the analyzer one would:
+ - Copy over a copy of the Repository Directory. (TODO: Prefer to ensure that
+ the build directory does not pollute the repository to min network traffic).
+ - Build all projects, until error. Produce logs to report errors.
+ - Compare results.
+
+The files which should be kept around for failure investigations:
+ RepositoryCopy/Project DirI/ScanBuildResults
+ RepositoryCopy/Project DirI/run_static_analyzer.log
+
+Assumptions (TODO: shouldn't need to assume these.):
+ The script is being run from the Repository Directory.
+ The compiler for scan-build and scan-build are in the PATH.
+ export PATH=/Users/zaks/workspace/c2llvm/build/Release+Asserts/bin:$PATH
+
+For more logging, set the env variables:
+ zaks:TI zaks$ export CCC_ANALYZER_LOG=1
+ zaks:TI zaks$ export CCC_ANALYZER_VERBOSE=1
+"""
+import CmpRuns
+
+import os
+import csv
+import sys
+import glob
+import shutil
+import time
+import plistlib
+from subprocess import check_call, CalledProcessError
+
+# Project map stores info about all the "registered" projects.
+ProjectMapFile = "projectMap.csv"
+
+# Names of the project specific scripts.
+# The script that needs to be executed before the build can start.
+CleanupScript = "cleanup_run_static_analyzer.sh"
+# This is a file containing commands for scan-build.
+BuildScript = "run_static_analyzer.cmd"
+
+# The log file name.
+LogFolderName = "Logs"
+BuildLogName = "run_static_analyzer.log"
+# Summary file - contains the summary of the failures. Ex: This info can be be
+# displayed when buildbot detects a build failure.
+NumOfFailuresInSummary = 10
+FailuresSummaryFileName = "failures.txt"
+# Summary of the result diffs.
+DiffsSummaryFileName = "diffs.txt"
+
+# The scan-build result directory.
+SBOutputDirName = "ScanBuildResults"
+SBOutputDirReferencePrefix = "Ref"
+
+# The list of checkers used during analyzes.
+# Currently, consists of all the non experimental checkers.
+Checkers="experimental.security.taint,core,deadcode,cplusplus,security,unix,osx,cocoa"
+
+Verbose = 1
+
+IsReferenceBuild = False
+
+# Make sure we flush the output after every print statement.
+class flushfile(object):
+ def __init__(self, f):
+ self.f = f
+ def write(self, x):
+ self.f.write(x)
+ self.f.flush()
+
+sys.stdout = flushfile(sys.stdout)
+
+def getProjectMapPath():
+ ProjectMapPath = os.path.join(os.path.abspath(os.curdir),
+ ProjectMapFile)
+ if not os.path.exists(ProjectMapPath):
+ print "Error: Cannot find the Project Map file " + ProjectMapPath +\
+ "\nRunning script for the wrong directory?"
+ sys.exit(-1)
+ return ProjectMapPath
+
+def getProjectDir(ID):
+ return os.path.join(os.path.abspath(os.curdir), ID)
+
+def getSBOutputDirName() :
+ if IsReferenceBuild == True :
+ return SBOutputDirReferencePrefix + SBOutputDirName
+ else :
+ return SBOutputDirName
+
+# Run pre-processing script if any.
+def runCleanupScript(Dir, PBuildLogFile):
+ ScriptPath = os.path.join(Dir, CleanupScript)
+ if os.path.exists(ScriptPath):
+ try:
+ if Verbose == 1:
+ print " Executing: %s" % (ScriptPath,)
+ check_call("chmod +x %s" % ScriptPath, cwd = Dir,
+ stderr=PBuildLogFile,
+ stdout=PBuildLogFile,
+ shell=True)
+ check_call(ScriptPath, cwd = Dir, stderr=PBuildLogFile,
+ stdout=PBuildLogFile,
+ shell=True)
+ except:
+ print "Error: The pre-processing step failed. See ", \
+ PBuildLogFile.name, " for details."
+ sys.exit(-1)
+
+# Build the project with scan-build by reading in the commands and
+# prefixing them with the scan-build options.
+def runScanBuild(Dir, SBOutputDir, PBuildLogFile):
+ BuildScriptPath = os.path.join(Dir, BuildScript)
+ if not os.path.exists(BuildScriptPath):
+ print "Error: build script is not defined: %s" % BuildScriptPath
+ sys.exit(-1)
+ SBOptions = "-plist-html -o " + SBOutputDir + " "
+ SBOptions += "-enable-checker " + Checkers + " "
+ try:
+ SBCommandFile = open(BuildScriptPath, "r")
+ SBPrefix = "scan-build " + SBOptions + " "
+ for Command in SBCommandFile:
+ SBCommand = SBPrefix + Command
+ if Verbose == 1:
+ print " Executing: %s" % (SBCommand,)
+ check_call(SBCommand, cwd = Dir, stderr=PBuildLogFile,
+ stdout=PBuildLogFile,
+ shell=True)
+ except:
+ print "Error: scan-build failed. See ",PBuildLogFile.name,\
+ " for details."
+ raise
+
+def hasNoExtension(FileName):
+ (Root, Ext) = os.path.splitext(FileName)
+ if ((Ext == "")) :
+ return True
+ return False
+
+def isValidSingleInputFile(FileName):
+ (Root, Ext) = os.path.splitext(FileName)
+ if ((Ext == ".i") | (Ext == ".ii") |
+ (Ext == ".c") | (Ext == ".cpp") |
+ (Ext == ".m") | (Ext == "")) :
+ return True
+ return False
+
+# Run analysis on a set of preprocessed files.
+def runAnalyzePreprocessed(Dir, SBOutputDir):
+ if os.path.exists(os.path.join(Dir, BuildScript)):
+ print "Error: The preprocessed files project should not contain %s" % \
+ BuildScript
+ raise Exception()
+
+ CmdPrefix = "clang -cc1 -analyze -analyzer-output=plist -w "
+ CmdPrefix += "-analyzer-checker=" + Checkers +" -fcxx-exceptions -fblocks "
+
+ PlistPath = os.path.join(Dir, SBOutputDir, "date")
+ FailPath = os.path.join(PlistPath, "failures");
+ os.makedirs(FailPath);
+
+ for FullFileName in glob.glob(Dir + "/*"):
+ FileName = os.path.basename(FullFileName)
+ Failed = False
+
+ # Only run the analyzes on supported files.
+ if (hasNoExtension(FileName)):
+ continue
+ if (isValidSingleInputFile(FileName) == False):
+ print "Error: Invalid single input file %s." % (FullFileName,)
+ raise Exception()
+
+ # Build and call the analyzer command.
+ OutputOption = "-o " + os.path.join(PlistPath, FileName) + ".plist "
+ Command = CmdPrefix + OutputOption + os.path.join(Dir, FileName)
+ LogFile = open(os.path.join(FailPath, FileName + ".stderr.txt"), "w+b")
+ try:
+ if Verbose == 1:
+ print " Executing: %s" % (Command,)
+ check_call(Command, cwd = Dir, stderr=LogFile,
+ stdout=LogFile,
+ shell=True)
+ except CalledProcessError, e:
+ print "Error: Analyzes of %s failed. See %s for details." \
+ "Error code %d." % \
+ (FullFileName, LogFile.name, e.returncode)
+ Failed = True
+ finally:
+ LogFile.close()
+
+ # If command did not fail, erase the log file.
+ if Failed == False:
+ os.remove(LogFile.name);
+
+def buildProject(Dir, SBOutputDir, IsScanBuild):
+ TBegin = time.time()
+
+ BuildLogPath = os.path.join(SBOutputDir, LogFolderName, BuildLogName)
+ print "Log file: %s" % (BuildLogPath,)
+ print "Output directory: %s" %(SBOutputDir, )
+
+ # Clean up the log file.
+ if (os.path.exists(BuildLogPath)) :
+ RmCommand = "rm " + BuildLogPath
+ if Verbose == 1:
+ print " Executing: %s" % (RmCommand,)
+ check_call(RmCommand, shell=True)
+
+ # Clean up scan build results.
+ if (os.path.exists(SBOutputDir)) :
+ RmCommand = "rm -r " + SBOutputDir
+ if Verbose == 1:
+ print " Executing: %s" % (RmCommand,)
+ check_call(RmCommand, shell=True)
+ assert(not os.path.exists(SBOutputDir))
+ os.makedirs(os.path.join(SBOutputDir, LogFolderName))
+
+ # Open the log file.
+ PBuildLogFile = open(BuildLogPath, "wb+")
+
+ # Build and analyze the project.
+ try:
+ runCleanupScript(Dir, PBuildLogFile)
+
+ if IsScanBuild:
+ runScanBuild(Dir, SBOutputDir, PBuildLogFile)
+ else:
+ runAnalyzePreprocessed(Dir, SBOutputDir)
+
+ if IsReferenceBuild :
+ runCleanupScript(Dir, PBuildLogFile)
+
+ finally:
+ PBuildLogFile.close()
+
+ print "Build complete (time: %.2f). See the log for more details: %s" % \
+ ((time.time()-TBegin), BuildLogPath)
+
+# A plist file is created for each call to the analyzer(each source file).
+# We are only interested on the once that have bug reports, so delete the rest.
+def CleanUpEmptyPlists(SBOutputDir):
+ for F in glob.glob(SBOutputDir + "/*/*.plist"):
+ P = os.path.join(SBOutputDir, F)
+
+ Data = plistlib.readPlist(P)
+ # Delete empty reports.
+ if not Data['files']:
+ os.remove(P)
+ continue
+
+# Given the scan-build output directory, checks if the build failed
+# (by searching for the failures directories). If there are failures, it
+# creates a summary file in the output directory.
+def checkBuild(SBOutputDir):
+ # Check if there are failures.
+ Failures = glob.glob(SBOutputDir + "/*/failures/*.stderr.txt")
+ TotalFailed = len(Failures);
+ if TotalFailed == 0:
+ CleanUpEmptyPlists(SBOutputDir)
+ Plists = glob.glob(SBOutputDir + "/*/*.plist")
+ print "Number of bug reports (non empty plist files) produced: %d" %\
+ len(Plists)
+ return;
+
+ # Create summary file to display when the build fails.
+ SummaryPath = os.path.join(SBOutputDir, LogFolderName, FailuresSummaryFileName)
+ if (Verbose > 0):
+ print " Creating the failures summary file %s" % (SummaryPath,)
+
+ SummaryLog = open(SummaryPath, "w+")
+ try:
+ SummaryLog.write("Total of %d failures discovered.\n" % (TotalFailed,))
+ if TotalFailed > NumOfFailuresInSummary:
+ SummaryLog.write("See the first %d below.\n"
+ % (NumOfFailuresInSummary,))
+ # TODO: Add a line "See the results folder for more."
+
+ FailuresCopied = NumOfFailuresInSummary
+ Idx = 0
+ for FailLogPathI in glob.glob(SBOutputDir + "/*/failures/*.stderr.txt"):
+ if Idx >= NumOfFailuresInSummary:
+ break;
+ Idx += 1
+ SummaryLog.write("\n-- Error #%d -----------\n" % (Idx,));
+ FailLogI = open(FailLogPathI, "r");
+ try:
+ shutil.copyfileobj(FailLogI, SummaryLog);
+ finally:
+ FailLogI.close()
+ finally:
+ SummaryLog.close()
+
+ print "Error: analysis failed. See ", SummaryPath
+ sys.exit(-1)
+
+# Auxiliary object to discard stdout.
+class Discarder(object):
+ def write(self, text):
+ pass # do nothing
+
+# Compare the warnings produced by scan-build.
+def runCmpResults(Dir):
+ TBegin = time.time()
+
+ RefDir = os.path.join(Dir, SBOutputDirReferencePrefix + SBOutputDirName)
+ NewDir = os.path.join(Dir, SBOutputDirName)
+
+ # We have to go one level down the directory tree.
+ RefList = glob.glob(RefDir + "/*")
+ NewList = glob.glob(NewDir + "/*")
+
+ # Log folders are also located in the results dir, so ignore them.
+ RefList.remove(os.path.join(RefDir, LogFolderName))
+ NewList.remove(os.path.join(NewDir, LogFolderName))
+
+ if len(RefList) == 0 or len(NewList) == 0:
+ return False
+ assert(len(RefList) == len(NewList))
+
+ # There might be more then one folder underneath - one per each scan-build
+ # command (Ex: one for configure and one for make).
+ if (len(RefList) > 1):
+ # Assume that the corresponding folders have the same names.
+ RefList.sort()
+ NewList.sort()
+
+ # Iterate and find the differences.
+ NumDiffs = 0
+ PairList = zip(RefList, NewList)
+ for P in PairList:
+ RefDir = P[0]
+ NewDir = P[1]
+
+ assert(RefDir != NewDir)
+ if Verbose == 1:
+ print " Comparing Results: %s %s" % (RefDir, NewDir)
+
+ DiffsPath = os.path.join(NewDir, DiffsSummaryFileName)
+ Opts = CmpRuns.CmpOptions(DiffsPath)
+ # Discard everything coming out of stdout (CmpRun produces a lot of them).
+ OLD_STDOUT = sys.stdout
+ sys.stdout = Discarder()
+ # Scan the results, delete empty plist files.
+ NumDiffs = CmpRuns.cmpScanBuildResults(RefDir, NewDir, Opts, False)
+ sys.stdout = OLD_STDOUT
+ if (NumDiffs > 0) :
+ print "Warning: %r differences in diagnostics. See %s" % \
+ (NumDiffs, DiffsPath,)
+
+ print "Diagnostic comparison complete (time: %.2f)." % (time.time()-TBegin)
+ return (NumDiffs > 0)
+
+def updateSVN(Mode, ProjectsMap):
+ try:
+ ProjectsMap.seek(0)
+ for I in csv.reader(ProjectsMap):
+ ProjName = I[0]
+ Path = os.path.join(ProjName, getSBOutputDirName())
+
+ if Mode == "delete":
+ Command = "svn delete %s" % (Path,)
+ else:
+ Command = "svn add %s" % (Path,)
+
+ if Verbose == 1:
+ print " Executing: %s" % (Command,)
+ check_call(Command, shell=True)
+
+ if Mode == "delete":
+ CommitCommand = "svn commit -m \"[analyzer tests] Remove " \
+ "reference results.\""
+ else:
+ CommitCommand = "svn commit -m \"[analyzer tests] Add new " \
+ "reference results.\""
+ if Verbose == 1:
+ print " Executing: %s" % (CommitCommand,)
+ check_call(CommitCommand, shell=True)
+ except:
+ print "Error: SVN update failed."
+ sys.exit(-1)
+
+def testProject(ID, IsScanBuild, Dir=None):
+ print " \n\n--- Building project %s" % (ID,)
+
+ TBegin = time.time()
+
+ if Dir is None :
+ Dir = getProjectDir(ID)
+ if Verbose == 1:
+ print " Build directory: %s." % (Dir,)
+
+ # Set the build results directory.
+ RelOutputDir = getSBOutputDirName()
+ SBOutputDir = os.path.join(Dir, RelOutputDir)
+
+ buildProject(Dir, SBOutputDir, IsScanBuild)
+
+ checkBuild(SBOutputDir)
+
+ if IsReferenceBuild == False:
+ runCmpResults(Dir)
+
+ print "Completed tests for project %s (time: %.2f)." % \
+ (ID, (time.time()-TBegin))
+
+def testAll(InIsReferenceBuild = False, UpdateSVN = False):
+ global IsReferenceBuild
+ IsReferenceBuild = InIsReferenceBuild
+
+ PMapFile = open(getProjectMapPath(), "rb")
+ try:
+ # Validate the input.
+ for I in csv.reader(PMapFile):
+ if (len(I) != 2) :
+ print "Error: Rows in the ProjectMapFile should have 3 entries."
+ raise Exception()
+ if (not ((I[1] == "1") | (I[1] == "0"))):
+ print "Error: Second entry in the ProjectMapFile should be 0 or 1."
+ raise Exception()
+
+ # When we are regenerating the reference results, we might need to
+ # update svn. Remove reference results from SVN.
+ if UpdateSVN == True:
+ assert(InIsReferenceBuild == True);
+ updateSVN("delete", PMapFile);
+
+ # Test the projects.
+ PMapFile.seek(0)
+ for I in csv.reader(PMapFile):
+ testProject(I[0], int(I[1]))
+
+ # Add reference results to SVN.
+ if UpdateSVN == True:
+ updateSVN("add", PMapFile);
+
+ except:
+ print "Error occurred. Premature termination."
+ raise
+ finally:
+ PMapFile.close()
+
+if __name__ == '__main__':
+ IsReference = False
+ UpdateSVN = False
+ if len(sys.argv) >= 2:
+ if sys.argv[1] == "-r":
+ IsReference = True
+ elif sys.argv[1] == "-rs":
+ IsReference = True
+ UpdateSVN = True
+ else:
+ print >> sys.stderr, 'Usage: ', sys.argv[0],\
+ '[-r|-rs]' \
+ 'Use -r to regenerate reference output' \
+ 'Use -rs to regenerate reference output and update svn'
+
+ testAll(IsReference, UpdateSVN)
diff --git a/clang/utils/analyzer/ubiviz b/clang/utils/analyzer/ubiviz
new file mode 100755
index 0000000..1582797
--- /dev/null
+++ b/clang/utils/analyzer/ubiviz
@@ -0,0 +1,74 @@
+#!/usr/bin/env python
+#
+# The LLVM Compiler Infrastructure
+#
+# This file is distributed under the University of Illinois Open Source
+# License. See LICENSE.TXT for details.
+#
+##===----------------------------------------------------------------------===##
+#
+# This script reads visualization data emitted by the static analyzer for
+# display in Ubigraph.
+#
+##===----------------------------------------------------------------------===##
+
+import xmlrpclib
+import sys
+
+def Error(message):
+ print >> sys.stderr, 'ubiviz: ' + message
+ sys.exit(1)
+
+def StreamData(filename):
+ file = open(filename)
+ for ln in file:
+ yield eval(ln)
+ file.close()
+
+def Display(G, data):
+ action = data[0]
+ if action == 'vertex':
+ vertex = data[1]
+ G.new_vertex_w_id(vertex)
+ for attribute in data[2:]:
+ G.set_vertex_attribute(vertex, attribute[0], attribute[1])
+ elif action == 'edge':
+ src = data[1]
+ dst = data[2]
+ edge = G.new_edge(src,dst)
+ for attribute in data[3:]:
+ G.set_edge_attribute(edge, attribute[0], attribute[1])
+ elif action == "vertex_style":
+ style_id = data[1]
+ parent_id = data[2]
+ G.new_vertex_style_w_id(style_id, parent_id)
+ for attribute in data[3:]:
+ G.set_vertex_style_attribute(style_id, attribute[0], attribute[1])
+ elif action == "vertex_style_attribute":
+ style_id = data[1]
+ for attribute in data[2:]:
+ G.set_vertex_style_attribute(style_id, attribute[0], attribute[1])
+ elif action == "change_vertex_style":
+ vertex_id = data[1]
+ style_id = data[2]
+ G.change_vertex_style(vertex_id,style_id)
+
+def main(args):
+ if len(args) == 0:
+ Error('no input files')
+
+ server = xmlrpclib.Server('http://127.0.0.1:20738/RPC2')
+ G = server.ubigraph
+
+ for arg in args:
+ G.clear()
+ for x in StreamData(arg):
+ Display(G,x)
+
+ sys.exit(0)
+
+
+if __name__ == '__main__':
+ main(sys.argv[1:])
+
+ \ No newline at end of file
diff --git a/clang/utils/builtin-defines.c b/clang/utils/builtin-defines.c
new file mode 100644
index 0000000..9bbe5be
--- /dev/null
+++ b/clang/utils/builtin-defines.c
@@ -0,0 +1,85 @@
+/*
+This is a clang style test case for checking that preprocessor
+defines match gcc.
+*/
+
+/*
+RUN: for arch in -m32 -m64; do \
+RUN: for lang in -std=gnu89 -ansi -std=c99 -std=gnu99; do \
+RUN: for input in c objective-c; do \
+RUN: for opts in "-O0" "-O1 -dynamic" "-O2 -static" "-Os"; do \
+RUN: echo "-- $arch, $lang, $input, $opts --"; \
+RUN: for cc in 0 1; do \
+RUN: if [ "$cc" == 0 ]; then \
+RUN: cc_prog=clang; \
+RUN: output=%t0; \
+RUN: else \
+RUN: cc_prog=gcc; \
+RUN: output=%t1; \
+RUN: fi; \
+RUN: $cc_prog $arch $lang $opts -march=core2 -dM -E -x $input %s | sort > $output; \
+RUN: done; \
+RUN: if (! diff %t0 %t1); then exit 1; fi; \
+RUN: done; \
+RUN: done; \
+RUN: done; \
+RUN: done;
+*/
+
+/* We don't care about this difference */
+#ifdef __PIC__
+#if __PIC__ == 1
+#undef __PIC__
+#undef __pic__
+#define __PIC__ 2
+#define __pic__ 2
+#endif
+#endif
+
+/* Undefine things we don't expect to match. */
+#undef __core2
+#undef __core2__
+#undef __SSSE3__
+
+/* Undefine things we don't expect to match. */
+#undef __DEC_EVAL_METHOD__
+#undef __INT16_TYPE__
+#undef __INT32_TYPE__
+#undef __INT64_TYPE__
+#undef __INT8_TYPE__
+#undef __SSP__
+#undef __APPLE_CC__
+#undef __VERSION__
+#undef __clang__
+#undef __llvm__
+#undef __nocona
+#undef __nocona__
+#undef __k8
+#undef __k8__
+#undef __tune_nocona__
+#undef __tune_core2__
+#undef __POINTER_WIDTH__
+#undef __INTPTR_TYPE__
+#undef __NO_MATH_INLINES
+
+#undef __DEC128_DEN__
+#undef __DEC128_EPSILON__
+#undef __DEC128_MANT_DIG__
+#undef __DEC128_MAX_EXP__
+#undef __DEC128_MAX__
+#undef __DEC128_MIN_EXP__
+#undef __DEC128_MIN__
+#undef __DEC32_DEN__
+#undef __DEC32_EPSILON__
+#undef __DEC32_MANT_DIG__
+#undef __DEC32_MAX_EXP__
+#undef __DEC32_MAX__
+#undef __DEC32_MIN_EXP__
+#undef __DEC32_MIN__
+#undef __DEC64_DEN__
+#undef __DEC64_EPSILON__
+#undef __DEC64_MANT_DIG__
+#undef __DEC64_MAX_EXP__
+#undef __DEC64_MAX__
+#undef __DEC64_MIN_EXP__
+#undef __DEC64_MIN__
diff --git a/clang/utils/clang-completion-mode.el b/clang/utils/clang-completion-mode.el
new file mode 100644
index 0000000..36d8181
--- /dev/null
+++ b/clang/utils/clang-completion-mode.el
@@ -0,0 +1,257 @@
+;;; Clang Code-Completion minor mode, for use with C/Objective-C/C++.
+
+;;; Commentary:
+
+;; This minor mode uses Clang's command line interface for code
+;; completion to provide code completion results for C, Objective-C,
+;; and C++ source files. When enabled, Clang will provide
+;; code-completion results in a secondary buffer based on the code
+;; being typed. For example, after typing "struct " (triggered via the
+;; space), Clang will provide the names of all structs visible from
+;; the current scope. After typing "p->" (triggered via the ">"),
+;; Clang will provide the names of all of the members of whatever
+;; class/struct/union "p" points to. Note that this minor mode isn't
+;; meant for serious use: it is meant to help experiment with code
+;; completion based on Clang. It needs your help to make it better!
+;;
+;; To use the Clang code completion mode, first make sure that the
+;; "clang" variable below refers to the "clang" executable,
+;; which is typically installed in libexec/. Then, place
+;; clang-completion-mode.el somewhere in your Emacs load path. You can
+;; add a new load path to Emacs by adding some like the following to
+;; your .emacs:
+;;
+;; (setq load-path (cons "~/.emacs.d" load-path))
+;;
+;; Then, use
+;;
+;; M-x load-library
+;;
+;; to load the library in your Emacs session or add the following to
+;; your .emacs to always load this mode (not recommended):
+;;
+;; (load-library "clang-completion-mode")
+;;
+;; Finally, to try Clang-based code completion in a particular buffer,
+;; use M-x clang-completion-mode. When "Clang-CC" shows up in the mode
+;; line, Clang's code-completion is enabled.
+;;
+;; Clang's code completion is based on parsing the complete source
+;; file up to the point where the cursor is located. Therefore, Clang
+;; needs all of the various compilation flags (include paths, dialect
+;; options, etc.) to provide code-completion results. Currently, these
+;; need to be placed into the clang-flags variable in a format
+;; acceptable to clang. This is a hack: patches are welcome to
+;; improve the interface between this Emacs mode and Clang!
+;;
+
+;;; Code:
+;;; The clang executable
+(defcustom clang "clang"
+ "The location of the Clang compiler executable"
+ :type 'file
+ :group 'clang-completion-mode)
+
+;;; Extra compilation flags to pass to clang.
+(defcustom clang-flags nil
+ "Extra flags to pass to the Clang executable.
+This variable will typically contain include paths, e.g., -I~/MyProject."
+ :type '(repeat (string :tag "Argument" ""))
+ :group 'clang-completion-mode)
+
+;;; The prefix header to use with Clang code completion.
+(setq clang-completion-prefix-header "")
+
+;;; The substring we will use to filter completion results
+(setq clang-completion-substring "")
+
+;;; The current completion buffer
+(setq clang-completion-buffer nil)
+
+(setq clang-result-string "")
+
+;;; Compute the current line in the buffer
+(defun current-line ()
+ "Return the vertical position of point..."
+ (+ (count-lines (point-min) (point))
+ (if (= (current-column) 0) 1 0)
+ -1))
+
+;;; Set the Clang prefix header
+(defun clang-prefix-header ()
+ (interactive)
+ (setq clang-completion-prefix-header
+ (read-string "Clang prefix header> " "" clang-completion-prefix-header
+ "")))
+
+;; Process "filter" that keeps track of the code-completion results
+;; produced. We store all of the results in a string, then the
+;; sentinel processes the entire string at once.
+(defun clang-completion-stash-filter (proc string)
+ (setq clang-result-string (concat clang-result-string string)))
+
+;; Filter the given list based on a predicate.
+(defun filter (condp lst)
+ (delq nil
+ (mapcar (lambda (x) (and (funcall condp x) x)) lst)))
+
+;; Determine whether
+(defun is-completion-line (line)
+ (or (string-match "OVERLOAD:" line)
+ (string-match (concat "COMPLETION: " clang-completion-substring) line)))
+
+(defun clang-completion-display (buffer)
+ (let* ((all-lines (split-string clang-result-string "\n"))
+ (completion-lines (filter 'is-completion-line all-lines)))
+ (if (consp completion-lines)
+ (progn
+ ;; Erase the process buffer
+ (let ((cur (current-buffer)))
+ (set-buffer buffer)
+ (goto-char (point-min))
+ (erase-buffer)
+ (set-buffer cur))
+
+ ;; Display the process buffer
+ (display-buffer buffer)
+
+ ;; Insert the code-completion string into the process buffer.
+ (with-current-buffer buffer
+ (insert (mapconcat 'identity completion-lines "\n")))
+ ))))
+
+;; Process "sentinal" that, on successful code completion, replaces the
+;; contents of the code-completion buffer with the new code-completion results
+;; and ensures that the buffer is visible.
+(defun clang-completion-sentinel (proc event)
+ (let* ((all-lines (split-string clang-result-string "\n"))
+ (completion-lines (filter 'is-completion-line all-lines)))
+ (if (consp completion-lines)
+ (progn
+ ;; Erase the process buffer
+ (let ((cur (current-buffer)))
+ (set-buffer (process-buffer proc))
+ (goto-char (point-min))
+ (erase-buffer)
+ (set-buffer cur))
+
+ ;; Display the process buffer
+ (display-buffer (process-buffer proc))
+
+ ;; Insert the code-completion string into the process buffer.
+ (with-current-buffer (process-buffer proc)
+ (insert (mapconcat 'identity completion-lines "\n")))
+ ))))
+
+(defun clang-complete ()
+ (let* ((cc-point (concat (buffer-file-name)
+ ":"
+ (number-to-string (+ 1 (current-line)))
+ ":"
+ (number-to-string (+ 1 (current-column)))))
+ (cc-pch (if (equal clang-completion-prefix-header "") nil
+ (list "-include-pch"
+ (concat clang-completion-prefix-header ".pch"))))
+ (cc-flags (if (listp clang-flags) clang-flags nil))
+ (cc-command (append `(,clang "-cc1" "-fsyntax-only")
+ cc-flags
+ cc-pch
+ `("-code-completion-at" ,cc-point)
+ (list (buffer-file-name))))
+ (cc-buffer-name (concat "*Clang Completion for " (buffer-name) "*")))
+ ;; Start the code-completion process
+ (if (buffer-file-name)
+ (progn
+ ;; If there is already a code-completion process, kill it first.
+ (let ((cc-proc (get-process "Clang Code-Completion")))
+ (if cc-proc
+ (delete-process cc-proc)))
+
+ (setq clang-completion-substring "")
+ (setq clang-result-string "")
+ (setq clang-completion-buffer cc-buffer-name)
+
+ (let ((cc-proc (apply 'start-process
+ (append (list "Clang Code-Completion" cc-buffer-name)
+ cc-command))))
+ (set-process-filter cc-proc 'clang-completion-stash-filter)
+ (set-process-sentinel cc-proc 'clang-completion-sentinel)
+ )))))
+
+;; Code-completion when one of the trigger characters is typed into
+;; the buffer, e.g., '(', ',' or '.'.
+(defun clang-complete-self-insert (arg)
+ (interactive "p")
+ (self-insert-command arg)
+ (save-buffer)
+ (clang-complete))
+
+;; When the user has typed a character that requires the filter to be
+;; updated, do so (and update the display of results).
+(defun clang-update-filter ()
+ (setq clang-completion-substring (thing-at-point 'symbol))
+ (if (get-process "Clang Code-Completion")
+ ()
+ (clang-completion-display clang-completion-buffer)
+ ))
+
+;; Invoked when the user types an alphanumeric character or "_" to
+;; update the filter for the currently-active code completion.
+(defun clang-filter-self-insert (arg)
+ (interactive "p")
+ (self-insert-command arg)
+ (clang-update-filter)
+ )
+
+;; Invoked when the user types the backspace key to update the filter
+;; for the currently-active code completion.
+(defun clang-backspace ()
+ (interactive)
+ (delete-backward-char 1)
+ (clang-update-filter))
+
+;; Invoked when the user types the delete key to update the filter
+;; for the currently-active code completion.
+(defun clang-delete ()
+ (interactive)
+ (delete-backward-char 1)
+ (clang-update-filter))
+
+;; Set up the keymap for the Clang minor mode.
+(defvar clang-completion-mode-map nil
+ "Keymap for Clang Completion Mode.")
+
+(if (null clang-completion-mode-map)
+ (fset 'clang-completion-mode-map
+ (setq clang-completion-mode-map (make-sparse-keymap))))
+
+(if (not (assq 'clang-completion-mode minor-mode-map-alist))
+ (setq minor-mode-map-alist
+ (cons (cons 'clang-completion-mode clang-completion-mode-map)
+ minor-mode-map-alist)))
+
+;; Punctuation characters trigger code completion.
+(dolist (char '("(" "," "." ">" ":" "=" ")" " "))
+ (define-key clang-completion-mode-map char 'clang-complete-self-insert))
+
+;; Alphanumeric characters (and "_") filter the results of the
+;; currently-active code completion.
+(dolist (char '("A" "B" "C" "D" "E" "F" "G" "H" "I" "J" "K" "L" "M" "N" "O"
+ "P" "Q" "R" "S" "T" "U" "V" "W" "X" "Y" "Z"
+ "a" "b" "c" "d" "e" "f" "g" "h" "i" "j" "k" "l" "m" "n" "o"
+ "p" "q" "r" "s" "t" "u" "v" "w" "x" "y" "z"
+ "_" "0" "1" "2" "3" "4" "5" "6" "7" "8" "9"))
+ (define-key clang-completion-mode-map char 'clang-filter-self-insert))
+
+;; Delete and backspace filter the results of the currently-active
+;; code completion.
+(define-key clang-completion-mode-map [(backspace)] 'clang-backspace)
+(define-key clang-completion-mode-map [(delete)] 'clang-delete)
+
+;; Set up the Clang minor mode.
+(define-minor-mode clang-completion-mode
+ "Clang code-completion mode"
+ nil
+ " Clang"
+ clang-completion-mode-map)
+
diff --git a/clang/utils/clangVisualizers.txt b/clang/utils/clangVisualizers.txt
new file mode 100644
index 0000000..0fef65f
--- /dev/null
+++ b/clang/utils/clangVisualizers.txt
@@ -0,0 +1,134 @@
+
+[Visualizer]
+
+llvm::SmallVector<*,*>{
+ preview (
+ #(
+ "[",
+ ($T1*)$e.EndX - ($T1*)$e.BeginX,
+ "](",
+ #array(
+ expr: (($T1*)$e.BeginX)[$i],
+ size: ($T1*)$e.EndX - ($T1*)$e.BeginX
+ ),
+ ")"
+ )
+ )
+
+ children (
+ #(
+ #([size] : ($T1*)$e.EndX - ($T1*)$e.BeginX),
+ #([capacity] : ($T1*)$e.CapacityX - ($T1*)$e.BeginX),
+ #array(
+ expr: (($T1*)$e.BeginX)[$i],
+ size: ($T1*)$e.EndX - ($T1*)$e.BeginX
+ )
+ )
+ )
+}
+
+llvm::StringRef{
+ preview ([$e.Data,s])
+ stringview ([$e.Data,sb])
+
+ children (
+ #(
+ #([size] : $e.Length),
+ #array(expr: $e.Data[$i], size: $e.Length)
+ )
+ )
+}
+
+clang::Token{
+ preview((clang::tok::TokenKind)(int)$e.Kind)
+}
+
+llvm::PointerIntPair<*,*,*,*>{
+ preview (
+ #(
+ ($T1*)($e.Value & $e.PointerBitMask),
+ " [",
+ ($T3)(($e.Value >> $e.IntShift) & $e.IntMask),
+ "]"
+ )
+ )
+
+ children (
+ #(
+ #([ptr] : ($T1*)($e.Value & $e.PointerBitMask)),
+ #([int] : ($T3)($e.Value >> $e.IntShift) & $e.IntMask)
+ )
+ )
+}
+
+llvm::PointerUnion<*,*>{
+ preview (
+ #if ((($e.Val.Value >> $e.Val.IntShift) & $e.Val.IntMask) == 0) (
+ "PT1"
+ ) #else (
+ "PT2"
+ )
+ )
+
+ children (
+ #(
+ #if ((($e.Val.Value >> $e.Val.IntShift) & $e.Val.IntMask) == 0) (
+ #([ptr] : ($T1)($e.Val.Value & $e.Val.PointerBitMask))
+ ) #else (
+ #([ptr] : ($T2)($e.Val.Value & $e.Val.PointerBitMask))
+ )
+ )
+ )
+}
+
+llvm::PointerUnion3<*,*,*>{
+ preview (
+ #if (($e.Val.Val.Value & 0x2) == 2) (
+ "PT2"
+ ) #elif (($e.Val.Val.Value & 0x1) == 1) (
+ "PT3"
+ ) #else (
+ "PT1"
+ )
+ )
+
+ children (
+ #(
+ #if (($e.Val.Val.Value & 0x2) == 2) (
+ #([ptr] : ($T2)(($e.Val.Val.Value >> 2) << 2))
+ ) #elif (($e.Val.Val.Value & 0x1) == 1) (
+ #([ptr] : ($T3)(($e.Val.Val.Value >> 2) << 2))
+ ) #else (
+ #([ptr] : ($T1)(($e.Val.Val.Value >> 2) << 2))
+ )
+ )
+ )
+}
+
+llvm::PointerUnion4<*,*,*,*>{
+ preview (
+ #if (($e.Val.Val.Value & 0x3) == 3) (
+ "PT4"
+ ) #elif (($e.Val.Val.Value & 0x2) == 2) (
+ "PT2"
+ ) #elif (($e.Val.Val.Value & 0x1) == 1) (
+ "PT3"
+ ) #else (
+ "PT1"
+ )
+ )
+
+ children (
+ #(
+ #if (($e.Val.Val.Value & 0x3) == 3) (
+ #([ptr] : ($T4)(($e.Val.Val.Value >> 2) << 2))
+ ) #elif (($e.Val.Val.Value & 0x2) == 2) (
+ #([ptr] : ($T2)(($e.Val.Val.Value >> 2) << 2))
+ ) #elif (($e.Val.Val.Value & 0x1) == 1) (
+ #([ptr] : ($T3)(($e.Val.Val.Value >> 2) << 2))
+ ) #else (
+ #([ptr] : ($T1)(($e.Val.Val.Value >> 2) << 2))
+ )
+ )
+ )
+}
diff --git a/clang/utils/find-unused-diagnostics.sh b/clang/utils/find-unused-diagnostics.sh
new file mode 100644
index 0000000..89b7f7a
--- /dev/null
+++ b/clang/utils/find-unused-diagnostics.sh
@@ -0,0 +1,19 @@
+#!/bin/bash
+#
+# This script produces a list of all diagnostics that are defined
+# in Diagnostic*.td files but not used in sources.
+#
+
+ALL_DIAGS=$(mktemp)
+ALL_SOURCES=$(mktemp)
+
+grep -E --only-matching --no-filename '(err_|warn_|ext_|note_)[a-z_]+ ' ./include/clang/Basic/Diagnostic*.td > $ALL_DIAGS
+find lib include tools -name \*.cpp -or -name \*.h > $ALL_SOURCES
+for DIAG in $(cat $ALL_DIAGS); do
+ if ! grep -r $DIAG $(cat $ALL_SOURCES) > /dev/null; then
+ echo $DIAG
+ fi;
+done
+
+rm $ALL_DIAGS $ALL_SOURCES
+
diff --git a/clang/utils/token-delta.py b/clang/utils/token-delta.py
new file mode 100755
index 0000000..327fa92
--- /dev/null
+++ b/clang/utils/token-delta.py
@@ -0,0 +1,251 @@
+#!/usr/bin/env python
+
+import os
+import re
+import subprocess
+import sys
+import tempfile
+
+###
+
+class DeltaAlgorithm(object):
+ def __init__(self):
+ self.cache = set()
+
+ def test(self, changes):
+ abstract
+
+ ###
+
+ def getTestResult(self, changes):
+ # There is no reason to cache successful tests because we will
+ # always reduce the changeset when we see one.
+
+ changeset = frozenset(changes)
+ if changeset in self.cache:
+ return False
+ elif not self.test(changes):
+ self.cache.add(changeset)
+ return False
+ else:
+ return True
+
+ def run(self, changes, force=False):
+ # Make sure the initial test passes, if not then (a) either
+ # the user doesn't expect monotonicity, and we may end up
+ # doing O(N^2) tests, or (b) the test is wrong. Avoid the
+ # O(N^2) case unless user requests it.
+ if not force:
+ if not self.getTestResult(changes):
+ raise ValueError,'Initial test passed to delta fails.'
+
+ # Check empty set first to quickly find poor test functions.
+ if self.getTestResult(set()):
+ return set()
+ else:
+ return self.delta(changes, self.split(changes))
+
+ def split(self, S):
+ """split(set) -> [sets]
+
+ Partition a set into one or two pieces.
+ """
+
+ # There are many ways to split, we could do a better job with more
+ # context information (but then the API becomes grosser).
+ L = list(S)
+ mid = len(L)//2
+ if mid==0:
+ return L,
+ else:
+ return L[:mid],L[mid:]
+
+ def delta(self, c, sets):
+ # assert(reduce(set.union, sets, set()) == c)
+
+ # If there is nothing left we can remove, we are done.
+ if len(sets) <= 1:
+ return c
+
+ # Look for a passing subset.
+ res = self.search(c, sets)
+ if res is not None:
+ return res
+
+ # Otherwise, partition sets if possible; if not we are done.
+ refined = sum(map(list, map(self.split, sets)), [])
+ if len(refined) == len(sets):
+ return c
+
+ return self.delta(c, refined)
+
+ def search(self, c, sets):
+ for i,S in enumerate(sets):
+ # If test passes on this subset alone, recurse.
+ if self.getTestResult(S):
+ return self.delta(S, self.split(S))
+
+ # Otherwise if we have more than two sets, see if test
+ # pases without this subset.
+ if len(sets) > 2:
+ complement = sum(sets[:i] + sets[i+1:],[])
+ if self.getTestResult(complement):
+ return self.delta(complement, sets[:i] + sets[i+1:])
+
+###
+
+class Token:
+ def __init__(self, type, data, flags, file, line, column):
+ self.type = type
+ self.data = data
+ self.flags = flags
+ self.file = file
+ self.line = line
+ self.column = column
+
+kTokenRE = re.compile(r"""([a-z_]+) '(.*)'\t(.*)\tLoc=<(.*):(.*):(.*)>""",
+ re.DOTALL | re.MULTILINE)
+
+def getTokens(path):
+ p = subprocess.Popen(['clang','-dump-raw-tokens',path],
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ out,err = p.communicate()
+
+ tokens = []
+ collect = None
+ for ln in err.split('\n'):
+ # Silly programmers refuse to print in simple machine readable
+ # formats. Whatever.
+ if collect is None:
+ collect = ln
+ else:
+ collect = collect + '\n' + ln
+ if 'Loc=<' in ln and ln.endswith('>'):
+ ln,collect = collect,None
+ tokens.append(Token(*kTokenRE.match(ln).groups()))
+
+ return tokens
+
+###
+
+class TMBDDelta(DeltaAlgorithm):
+ def __init__(self, testProgram, tokenLists, log):
+ def patchName(name, suffix):
+ base,ext = os.path.splitext(name)
+ return base + '.' + suffix + ext
+ super(TMBDDelta, self).__init__()
+ self.testProgram = testProgram
+ self.tokenLists = tokenLists
+ self.tempFiles = [patchName(f,'tmp')
+ for f,_ in self.tokenLists]
+ self.targetFiles = [patchName(f,'ok')
+ for f,_ in self.tokenLists]
+ self.log = log
+ self.numTests = 0
+
+ def writeFiles(self, changes, fileNames):
+ assert len(fileNames) == len(self.tokenLists)
+ byFile = [[] for i in self.tokenLists]
+ for i,j in changes:
+ byFile[i].append(j)
+
+ for i,(file,tokens) in enumerate(self.tokenLists):
+ f = open(fileNames[i],'w')
+ for j in byFile[i]:
+ f.write(tokens[j])
+ f.close()
+
+ return byFile
+
+ def test(self, changes):
+ self.numTests += 1
+
+ byFile = self.writeFiles(changes, self.tempFiles)
+
+ if self.log:
+ print >>sys.stderr, 'TEST - ',
+ if self.log > 1:
+ for i,(file,_) in enumerate(self.tokenLists):
+ indices = byFile[i]
+ if i:
+ sys.stderr.write('\n ')
+ sys.stderr.write('%s:%d tokens: [' % (file,len(byFile[i])))
+ prev = None
+ for j in byFile[i]:
+ if prev is None or j != prev + 1:
+ if prev:
+ sys.stderr.write('%d][' % prev)
+ sys.stderr.write(str(j))
+ sys.stderr.write(':')
+ prev = j
+ if byFile[i]:
+ sys.stderr.write(str(byFile[i][-1]))
+ sys.stderr.write('] ')
+ else:
+ print >>sys.stderr, ', '.join(['%s:%d tokens' % (file, len(byFile[i]))
+ for i,(file,_) in enumerate(self.tokenLists)]),
+
+ p = subprocess.Popen([self.testProgram] + self.tempFiles)
+ res = p.wait() == 0
+
+ if res:
+ self.writeFiles(changes, self.targetFiles)
+
+ if self.log:
+ print >>sys.stderr, '=> %s' % res
+ else:
+ if res:
+ print '\nSUCCESS (%d tokens)' % len(changes)
+ else:
+ sys.stderr.write('.')
+
+ return res
+
+ def run(self):
+ res = super(TMBDDelta, self).run([(i,j)
+ for i,(file,tokens) in enumerate(self.tokenLists)
+ for j in range(len(tokens))])
+ self.writeFiles(res, self.targetFiles)
+ if not self.log:
+ print >>sys.stderr
+ return res
+
+def tokenBasedMultiDelta(program, files, log):
+ # Read in the lists of tokens.
+ tokenLists = [(file, [t.data for t in getTokens(file)])
+ for file in files]
+
+ numTokens = sum([len(tokens) for _,tokens in tokenLists])
+ print "Delta on %s with %d tokens." % (', '.join(files), numTokens)
+
+ tbmd = TMBDDelta(program, tokenLists, log)
+
+ res = tbmd.run()
+
+ print "Finished %s with %d tokens (in %d tests)." % (', '.join(tbmd.targetFiles),
+ len(res),
+ tbmd.numTests)
+
+def main():
+ from optparse import OptionParser, OptionGroup
+ parser = OptionParser("%prog <test program> {files+}")
+ parser.add_option("", "--debug", dest="debugLevel",
+ help="set debug level [default %default]",
+ action="store", type=int, default=0)
+ (opts, args) = parser.parse_args()
+
+ if len(args) <= 1:
+ parser.error('Invalid number of arguments.')
+
+ program,files = args[0],args[1:]
+
+ md = tokenBasedMultiDelta(program, files, log=opts.debugLevel)
+
+if __name__ == '__main__':
+ try:
+ main()
+ except KeyboardInterrupt:
+ print >>sys.stderr,'Interrupted.'
+ os._exit(1) # Avoid freeing our giant cache.
diff --git a/clang/utils/valgrind/x86_64-pc-linux-gnu_gcc-4.3.3.supp b/clang/utils/valgrind/x86_64-pc-linux-gnu_gcc-4.3.3.supp
new file mode 100644
index 0000000..a86be6c
--- /dev/null
+++ b/clang/utils/valgrind/x86_64-pc-linux-gnu_gcc-4.3.3.supp
@@ -0,0 +1,23 @@
+{
+ libstdcxx_overlapped_memcpy_in_stable_sort_1
+ Memcheck:Overlap
+ fun:memcpy
+ ...
+ fun:_ZSt11stable_sortIN9__gnu_cxx17__normal_iteratorIPSt4pairIPKN4llvm5ValueEjESt6vectorIS7_SaIS7_EEEEN12_GLOBAL__N_116CstSortPredicateEEvT_SF_T0_
+}
+
+{
+ libstdcxx_overlapped_memcpy_in_stable_sort_2
+ Memcheck:Overlap
+ fun:memcpy
+ ...
+ fun:_ZSt11stable_sortIN9__gnu_cxx17__normal_iteratorIPSt4pairIPKN4llvm5ValueEjESt6vectorIS7_SaIS7_EEEEN12_GLOBAL__N_116CstSortPredicateEEvT_SF_T0_
+}
+
+{
+ libstdcxx_overlapped_memcpy_in_stable_sort_3
+ Memcheck:Overlap
+ fun:memcpy
+ ...
+ fun:_ZSt11stable_sortIN9__gnu_cxx17__normal_iteratorIPSt4pairIPKN4llvm4TypeEjESt6vectorIS7_SaIS7_EEEEPFbRKS7_SE_EEvT_SH_T0_
+}