summaryrefslogtreecommitdiff
path: root/clang/lib/AST
diff options
context:
space:
mode:
authorCarlo Zancanaro <carlo@pc-4w14-0.cs.usyd.edu.au>2012-10-15 17:10:06 +1100
committerCarlo Zancanaro <carlo@pc-4w14-0.cs.usyd.edu.au>2012-10-15 17:10:06 +1100
commitbe1de4be954c80875ad4108e0a33e8e131b2f2c0 (patch)
tree1fbbecf276bf7c7bdcbb4dd446099d6d90eaa516 /clang/lib/AST
parentc4626a62754862d20b41e8a46a3574264ea80e6d (diff)
parentf1bd2e48c5324d3f7cda4090c87f8a5b6f463ce2 (diff)
Merge branch 'master' of ssh://bitbucket.org/czan/honours
Diffstat (limited to 'clang/lib/AST')
-rw-r--r--clang/lib/AST/APValue.cpp607
-rw-r--r--clang/lib/AST/ASTConsumer.cpp26
-rw-r--r--clang/lib/AST/ASTContext.cpp6771
-rw-r--r--clang/lib/AST/ASTDiagnostic.cpp331
-rw-r--r--clang/lib/AST/ASTImporter.cpp4676
-rw-r--r--clang/lib/AST/AttrImpl.cpp26
-rw-r--r--clang/lib/AST/CMakeLists.txt57
-rw-r--r--clang/lib/AST/CXXABI.h48
-rw-r--r--clang/lib/AST/CXXInheritance.cpp718
-rw-r--r--clang/lib/AST/Decl.cpp3057
-rw-r--r--clang/lib/AST/DeclBase.cpp1441
-rw-r--r--clang/lib/AST/DeclCXX.cpp2029
-rw-r--r--clang/lib/AST/DeclFriend.cpp48
-rw-r--r--clang/lib/AST/DeclGroup.cpp32
-rw-r--r--clang/lib/AST/DeclObjC.cpp1326
-rw-r--r--clang/lib/AST/DeclPrinter.cpp1072
-rw-r--r--clang/lib/AST/DeclTemplate.cpp872
-rw-r--r--clang/lib/AST/DeclarationName.cpp627
-rw-r--r--clang/lib/AST/DumpXML.cpp1040
-rw-r--r--clang/lib/AST/Expr.cpp3588
-rw-r--r--clang/lib/AST/ExprCXX.cpp1335
-rw-r--r--clang/lib/AST/ExprClassification.cpp644
-rw-r--r--clang/lib/AST/ExprConstant.cpp6926
-rw-r--r--clang/lib/AST/ExternalASTSource.cpp62
-rw-r--r--clang/lib/AST/InheritViz.cpp168
-rw-r--r--clang/lib/AST/ItaniumCXXABI.cpp73
-rw-r--r--clang/lib/AST/ItaniumMangle.cpp3587
-rw-r--r--clang/lib/AST/LambdaMangleContext.cpp30
-rw-r--r--clang/lib/AST/Makefile18
-rw-r--r--clang/lib/AST/Mangle.cpp142
-rw-r--r--clang/lib/AST/MicrosoftCXXABI.cpp71
-rw-r--r--clang/lib/AST/MicrosoftMangle.cpp1191
-rw-r--r--clang/lib/AST/NSAPI.cpp312
-rw-r--r--clang/lib/AST/NestedNameSpecifier.cpp633
-rw-r--r--clang/lib/AST/ParentMap.cpp130
-rw-r--r--clang/lib/AST/RecordLayout.cpp89
-rw-r--r--clang/lib/AST/RecordLayoutBuilder.cpp2488
-rw-r--r--clang/lib/AST/SelectorLocationsKind.cpp128
-rw-r--r--clang/lib/AST/Stmt.cpp867
-rw-r--r--clang/lib/AST/StmtDumper.cpp763
-rw-r--r--clang/lib/AST/StmtIterator.cpp155
-rw-r--r--clang/lib/AST/StmtPrinter.cpp1902
-rw-r--r--clang/lib/AST/StmtProfile.cpp1184
-rw-r--r--clang/lib/AST/StmtViz.cpp62
-rw-r--r--clang/lib/AST/TemplateBase.cpp628
-rw-r--r--clang/lib/AST/TemplateName.cpp176
-rw-r--r--clang/lib/AST/Type.cpp2256
-rw-r--r--clang/lib/AST/TypeLoc.cpp332
-rw-r--r--clang/lib/AST/TypePrinter.cpp1232
-rw-r--r--clang/lib/AST/VTTBuilder.cpp212
-rw-r--r--clang/lib/AST/VTableBuilder.cpp2404
51 files changed, 58592 insertions, 0 deletions
diff --git a/clang/lib/AST/APValue.cpp b/clang/lib/AST/APValue.cpp
new file mode 100644
index 0000000..a31b3c5
--- /dev/null
+++ b/clang/lib/AST/APValue.cpp
@@ -0,0 +1,607 @@
+//===--- APValue.cpp - Union class for APFloat/APSInt/Complex -------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the APValue class.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/APValue.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/CharUnits.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/Type.h"
+#include "clang/Basic/Diagnostic.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/ErrorHandling.h"
+using namespace clang;
+
+namespace {
+ struct LVBase {
+ llvm::PointerIntPair<APValue::LValueBase, 1, bool> BaseAndIsOnePastTheEnd;
+ CharUnits Offset;
+ unsigned PathLength;
+ unsigned CallIndex;
+ };
+}
+
+struct APValue::LV : LVBase {
+ static const unsigned InlinePathSpace =
+ (MaxSize - sizeof(LVBase)) / sizeof(LValuePathEntry);
+
+ /// Path - The sequence of base classes, fields and array indices to follow to
+ /// walk from Base to the subobject. When performing GCC-style folding, there
+ /// may not be such a path.
+ union {
+ LValuePathEntry Path[InlinePathSpace];
+ LValuePathEntry *PathPtr;
+ };
+
+ LV() { PathLength = (unsigned)-1; }
+ ~LV() { resizePath(0); }
+
+ void resizePath(unsigned Length) {
+ if (Length == PathLength)
+ return;
+ if (hasPathPtr())
+ delete [] PathPtr;
+ PathLength = Length;
+ if (hasPathPtr())
+ PathPtr = new LValuePathEntry[Length];
+ }
+
+ bool hasPath() const { return PathLength != (unsigned)-1; }
+ bool hasPathPtr() const { return hasPath() && PathLength > InlinePathSpace; }
+
+ LValuePathEntry *getPath() { return hasPathPtr() ? PathPtr : Path; }
+ const LValuePathEntry *getPath() const {
+ return hasPathPtr() ? PathPtr : Path;
+ }
+};
+
+namespace {
+ struct MemberPointerBase {
+ llvm::PointerIntPair<const ValueDecl*, 1, bool> MemberAndIsDerivedMember;
+ unsigned PathLength;
+ };
+}
+
+struct APValue::MemberPointerData : MemberPointerBase {
+ static const unsigned InlinePathSpace =
+ (MaxSize - sizeof(MemberPointerBase)) / sizeof(const CXXRecordDecl*);
+ typedef const CXXRecordDecl *PathElem;
+ union {
+ PathElem Path[InlinePathSpace];
+ PathElem *PathPtr;
+ };
+
+ MemberPointerData() { PathLength = 0; }
+ ~MemberPointerData() { resizePath(0); }
+
+ void resizePath(unsigned Length) {
+ if (Length == PathLength)
+ return;
+ if (hasPathPtr())
+ delete [] PathPtr;
+ PathLength = Length;
+ if (hasPathPtr())
+ PathPtr = new PathElem[Length];
+ }
+
+ bool hasPathPtr() const { return PathLength > InlinePathSpace; }
+
+ PathElem *getPath() { return hasPathPtr() ? PathPtr : Path; }
+ const PathElem *getPath() const {
+ return hasPathPtr() ? PathPtr : Path;
+ }
+};
+
+// FIXME: Reduce the malloc traffic here.
+
+APValue::Arr::Arr(unsigned NumElts, unsigned Size) :
+ Elts(new APValue[NumElts + (NumElts != Size ? 1 : 0)]),
+ NumElts(NumElts), ArrSize(Size) {}
+APValue::Arr::~Arr() { delete [] Elts; }
+
+APValue::StructData::StructData(unsigned NumBases, unsigned NumFields) :
+ Elts(new APValue[NumBases+NumFields]),
+ NumBases(NumBases), NumFields(NumFields) {}
+APValue::StructData::~StructData() {
+ delete [] Elts;
+}
+
+APValue::UnionData::UnionData() : Field(0), Value(new APValue) {}
+APValue::UnionData::~UnionData () {
+ delete Value;
+}
+
+APValue::APValue(const APValue &RHS) : Kind(Uninitialized) {
+ switch (RHS.getKind()) {
+ case Uninitialized:
+ break;
+ case Int:
+ MakeInt();
+ setInt(RHS.getInt());
+ break;
+ case Float:
+ MakeFloat();
+ setFloat(RHS.getFloat());
+ break;
+ case Vector:
+ MakeVector();
+ setVector(((const Vec *)(const char *)RHS.Data)->Elts,
+ RHS.getVectorLength());
+ break;
+ case ComplexInt:
+ MakeComplexInt();
+ setComplexInt(RHS.getComplexIntReal(), RHS.getComplexIntImag());
+ break;
+ case ComplexFloat:
+ MakeComplexFloat();
+ setComplexFloat(RHS.getComplexFloatReal(), RHS.getComplexFloatImag());
+ break;
+ case LValue:
+ MakeLValue();
+ if (RHS.hasLValuePath())
+ setLValue(RHS.getLValueBase(), RHS.getLValueOffset(), RHS.getLValuePath(),
+ RHS.isLValueOnePastTheEnd(), RHS.getLValueCallIndex());
+ else
+ setLValue(RHS.getLValueBase(), RHS.getLValueOffset(), NoLValuePath(),
+ RHS.getLValueCallIndex());
+ break;
+ case Array:
+ MakeArray(RHS.getArrayInitializedElts(), RHS.getArraySize());
+ for (unsigned I = 0, N = RHS.getArrayInitializedElts(); I != N; ++I)
+ getArrayInitializedElt(I) = RHS.getArrayInitializedElt(I);
+ if (RHS.hasArrayFiller())
+ getArrayFiller() = RHS.getArrayFiller();
+ break;
+ case Struct:
+ MakeStruct(RHS.getStructNumBases(), RHS.getStructNumFields());
+ for (unsigned I = 0, N = RHS.getStructNumBases(); I != N; ++I)
+ getStructBase(I) = RHS.getStructBase(I);
+ for (unsigned I = 0, N = RHS.getStructNumFields(); I != N; ++I)
+ getStructField(I) = RHS.getStructField(I);
+ break;
+ case Union:
+ MakeUnion();
+ setUnion(RHS.getUnionField(), RHS.getUnionValue());
+ break;
+ case MemberPointer:
+ MakeMemberPointer(RHS.getMemberPointerDecl(),
+ RHS.isMemberPointerToDerivedMember(),
+ RHS.getMemberPointerPath());
+ break;
+ case AddrLabelDiff:
+ MakeAddrLabelDiff();
+ setAddrLabelDiff(RHS.getAddrLabelDiffLHS(), RHS.getAddrLabelDiffRHS());
+ break;
+ }
+}
+
+void APValue::DestroyDataAndMakeUninit() {
+ if (Kind == Int)
+ ((APSInt*)(char*)Data)->~APSInt();
+ else if (Kind == Float)
+ ((APFloat*)(char*)Data)->~APFloat();
+ else if (Kind == Vector)
+ ((Vec*)(char*)Data)->~Vec();
+ else if (Kind == ComplexInt)
+ ((ComplexAPSInt*)(char*)Data)->~ComplexAPSInt();
+ else if (Kind == ComplexFloat)
+ ((ComplexAPFloat*)(char*)Data)->~ComplexAPFloat();
+ else if (Kind == LValue)
+ ((LV*)(char*)Data)->~LV();
+ else if (Kind == Array)
+ ((Arr*)(char*)Data)->~Arr();
+ else if (Kind == Struct)
+ ((StructData*)(char*)Data)->~StructData();
+ else if (Kind == Union)
+ ((UnionData*)(char*)Data)->~UnionData();
+ else if (Kind == MemberPointer)
+ ((MemberPointerData*)(char*)Data)->~MemberPointerData();
+ else if (Kind == AddrLabelDiff)
+ ((AddrLabelDiffData*)(char*)Data)->~AddrLabelDiffData();
+ Kind = Uninitialized;
+}
+
+void APValue::swap(APValue &RHS) {
+ std::swap(Kind, RHS.Kind);
+ char TmpData[MaxSize];
+ memcpy(TmpData, Data, MaxSize);
+ memcpy(Data, RHS.Data, MaxSize);
+ memcpy(RHS.Data, TmpData, MaxSize);
+}
+
+void APValue::dump() const {
+ dump(llvm::errs());
+ llvm::errs() << '\n';
+}
+
+static double GetApproxValue(const llvm::APFloat &F) {
+ llvm::APFloat V = F;
+ bool ignored;
+ V.convert(llvm::APFloat::IEEEdouble, llvm::APFloat::rmNearestTiesToEven,
+ &ignored);
+ return V.convertToDouble();
+}
+
+void APValue::dump(raw_ostream &OS) const {
+ switch (getKind()) {
+ case Uninitialized:
+ OS << "Uninitialized";
+ return;
+ case Int:
+ OS << "Int: " << getInt();
+ return;
+ case Float:
+ OS << "Float: " << GetApproxValue(getFloat());
+ return;
+ case Vector:
+ OS << "Vector: ";
+ getVectorElt(0).dump(OS);
+ for (unsigned i = 1; i != getVectorLength(); ++i) {
+ OS << ", ";
+ getVectorElt(i).dump(OS);
+ }
+ return;
+ case ComplexInt:
+ OS << "ComplexInt: " << getComplexIntReal() << ", " << getComplexIntImag();
+ return;
+ case ComplexFloat:
+ OS << "ComplexFloat: " << GetApproxValue(getComplexFloatReal())
+ << ", " << GetApproxValue(getComplexFloatImag());
+ return;
+ case LValue:
+ OS << "LValue: <todo>";
+ return;
+ case Array:
+ OS << "Array: ";
+ for (unsigned I = 0, N = getArrayInitializedElts(); I != N; ++I) {
+ getArrayInitializedElt(I).dump(OS);
+ if (I != getArraySize() - 1) OS << ", ";
+ }
+ if (hasArrayFiller()) {
+ OS << getArraySize() - getArrayInitializedElts() << " x ";
+ getArrayFiller().dump(OS);
+ }
+ return;
+ case Struct:
+ OS << "Struct ";
+ if (unsigned N = getStructNumBases()) {
+ OS << " bases: ";
+ getStructBase(0).dump(OS);
+ for (unsigned I = 1; I != N; ++I) {
+ OS << ", ";
+ getStructBase(I).dump(OS);
+ }
+ }
+ if (unsigned N = getStructNumFields()) {
+ OS << " fields: ";
+ getStructField(0).dump(OS);
+ for (unsigned I = 1; I != N; ++I) {
+ OS << ", ";
+ getStructField(I).dump(OS);
+ }
+ }
+ return;
+ case Union:
+ OS << "Union: ";
+ getUnionValue().dump(OS);
+ return;
+ case MemberPointer:
+ OS << "MemberPointer: <todo>";
+ return;
+ case AddrLabelDiff:
+ OS << "AddrLabelDiff: <todo>";
+ return;
+ }
+ llvm_unreachable("Unknown APValue kind!");
+}
+
+void APValue::printPretty(raw_ostream &Out, ASTContext &Ctx, QualType Ty) const{
+ switch (getKind()) {
+ case APValue::Uninitialized:
+ Out << "<uninitialized>";
+ return;
+ case APValue::Int:
+ if (Ty->isBooleanType())
+ Out << (getInt().getBoolValue() ? "true" : "false");
+ else
+ Out << getInt();
+ return;
+ case APValue::Float:
+ Out << GetApproxValue(getFloat());
+ return;
+ case APValue::Vector: {
+ Out << '{';
+ QualType ElemTy = Ty->getAs<VectorType>()->getElementType();
+ getVectorElt(0).printPretty(Out, Ctx, ElemTy);
+ for (unsigned i = 1; i != getVectorLength(); ++i) {
+ Out << ", ";
+ getVectorElt(i).printPretty(Out, Ctx, ElemTy);
+ }
+ Out << '}';
+ return;
+ }
+ case APValue::ComplexInt:
+ Out << getComplexIntReal() << "+" << getComplexIntImag() << "i";
+ return;
+ case APValue::ComplexFloat:
+ Out << GetApproxValue(getComplexFloatReal()) << "+"
+ << GetApproxValue(getComplexFloatImag()) << "i";
+ return;
+ case APValue::LValue: {
+ LValueBase Base = getLValueBase();
+ if (!Base) {
+ Out << "0";
+ return;
+ }
+
+ bool IsReference = Ty->isReferenceType();
+ QualType InnerTy
+ = IsReference ? Ty.getNonReferenceType() : Ty->getPointeeType();
+
+ if (!hasLValuePath()) {
+ // No lvalue path: just print the offset.
+ CharUnits O = getLValueOffset();
+ CharUnits S = Ctx.getTypeSizeInChars(InnerTy);
+ if (!O.isZero()) {
+ if (IsReference)
+ Out << "*(";
+ if (O % S) {
+ Out << "(char*)";
+ S = CharUnits::One();
+ }
+ Out << '&';
+ } else if (!IsReference)
+ Out << '&';
+
+ if (const ValueDecl *VD = Base.dyn_cast<const ValueDecl*>())
+ Out << *VD;
+ else
+ Base.get<const Expr*>()->printPretty(Out, Ctx, 0,
+ Ctx.getPrintingPolicy());
+ if (!O.isZero()) {
+ Out << " + " << (O / S);
+ if (IsReference)
+ Out << ')';
+ }
+ return;
+ }
+
+ // We have an lvalue path. Print it out nicely.
+ if (!IsReference)
+ Out << '&';
+ else if (isLValueOnePastTheEnd())
+ Out << "*(&";
+
+ QualType ElemTy;
+ if (const ValueDecl *VD = Base.dyn_cast<const ValueDecl*>()) {
+ Out << *VD;
+ ElemTy = VD->getType();
+ } else {
+ const Expr *E = Base.get<const Expr*>();
+ E->printPretty(Out, Ctx, 0,Ctx.getPrintingPolicy());
+ ElemTy = E->getType();
+ }
+
+ ArrayRef<LValuePathEntry> Path = getLValuePath();
+ const CXXRecordDecl *CastToBase = 0;
+ for (unsigned I = 0, N = Path.size(); I != N; ++I) {
+ if (ElemTy->getAs<RecordType>()) {
+ // The lvalue refers to a class type, so the next path entry is a base
+ // or member.
+ const Decl *BaseOrMember =
+ BaseOrMemberType::getFromOpaqueValue(Path[I].BaseOrMember).getPointer();
+ if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(BaseOrMember)) {
+ CastToBase = RD;
+ ElemTy = Ctx.getRecordType(RD);
+ } else {
+ const ValueDecl *VD = cast<ValueDecl>(BaseOrMember);
+ Out << ".";
+ if (CastToBase)
+ Out << *CastToBase << "::";
+ Out << *VD;
+ ElemTy = VD->getType();
+ }
+ } else {
+ // The lvalue must refer to an array.
+ Out << '[' << Path[I].ArrayIndex << ']';
+ ElemTy = Ctx.getAsArrayType(ElemTy)->getElementType();
+ }
+ }
+
+ // Handle formatting of one-past-the-end lvalues.
+ if (isLValueOnePastTheEnd()) {
+ // FIXME: If CastToBase is non-0, we should prefix the output with
+ // "(CastToBase*)".
+ Out << " + 1";
+ if (IsReference)
+ Out << ')';
+ }
+ return;
+ }
+ case APValue::Array: {
+ const ArrayType *AT = Ctx.getAsArrayType(Ty);
+ QualType ElemTy = AT->getElementType();
+ Out << '{';
+ if (unsigned N = getArrayInitializedElts()) {
+ getArrayInitializedElt(0).printPretty(Out, Ctx, ElemTy);
+ for (unsigned I = 1; I != N; ++I) {
+ Out << ", ";
+ if (I == 10) {
+ // Avoid printing out the entire contents of large arrays.
+ Out << "...";
+ break;
+ }
+ getArrayInitializedElt(I).printPretty(Out, Ctx, ElemTy);
+ }
+ }
+ Out << '}';
+ return;
+ }
+ case APValue::Struct: {
+ Out << '{';
+ const RecordDecl *RD = Ty->getAs<RecordType>()->getDecl();
+ bool First = true;
+ if (unsigned N = getStructNumBases()) {
+ const CXXRecordDecl *CD = cast<CXXRecordDecl>(RD);
+ CXXRecordDecl::base_class_const_iterator BI = CD->bases_begin();
+ for (unsigned I = 0; I != N; ++I, ++BI) {
+ assert(BI != CD->bases_end());
+ if (!First)
+ Out << ", ";
+ getStructBase(I).printPretty(Out, Ctx, BI->getType());
+ First = false;
+ }
+ }
+ for (RecordDecl::field_iterator FI = RD->field_begin();
+ FI != RD->field_end(); ++FI) {
+ if (!First)
+ Out << ", ";
+ if ((*FI)->isUnnamedBitfield()) continue;
+ getStructField((*FI)->getFieldIndex()).
+ printPretty(Out, Ctx, (*FI)->getType());
+ First = false;
+ }
+ Out << '}';
+ return;
+ }
+ case APValue::Union:
+ Out << '{';
+ if (const FieldDecl *FD = getUnionField()) {
+ Out << "." << *FD << " = ";
+ getUnionValue().printPretty(Out, Ctx, FD->getType());
+ }
+ Out << '}';
+ return;
+ case APValue::MemberPointer:
+ // FIXME: This is not enough to unambiguously identify the member in a
+ // multiple-inheritance scenario.
+ if (const ValueDecl *VD = getMemberPointerDecl()) {
+ Out << '&' << *cast<CXXRecordDecl>(VD->getDeclContext()) << "::" << *VD;
+ return;
+ }
+ Out << "0";
+ return;
+ case APValue::AddrLabelDiff:
+ Out << "&&" << getAddrLabelDiffLHS()->getLabel()->getName();
+ Out << " - ";
+ Out << "&&" << getAddrLabelDiffRHS()->getLabel()->getName();
+ return;
+ }
+ llvm_unreachable("Unknown APValue kind!");
+}
+
+std::string APValue::getAsString(ASTContext &Ctx, QualType Ty) const {
+ std::string Result;
+ llvm::raw_string_ostream Out(Result);
+ printPretty(Out, Ctx, Ty);
+ Out.flush();
+ return Result;
+}
+
+const APValue::LValueBase APValue::getLValueBase() const {
+ assert(isLValue() && "Invalid accessor");
+ return ((const LV*)(const void*)Data)->BaseAndIsOnePastTheEnd.getPointer();
+}
+
+bool APValue::isLValueOnePastTheEnd() const {
+ assert(isLValue() && "Invalid accessor");
+ return ((const LV*)(const void*)Data)->BaseAndIsOnePastTheEnd.getInt();
+}
+
+CharUnits &APValue::getLValueOffset() {
+ assert(isLValue() && "Invalid accessor");
+ return ((LV*)(void*)Data)->Offset;
+}
+
+bool APValue::hasLValuePath() const {
+ assert(isLValue() && "Invalid accessor");
+ return ((const LV*)(const char*)Data)->hasPath();
+}
+
+ArrayRef<APValue::LValuePathEntry> APValue::getLValuePath() const {
+ assert(isLValue() && hasLValuePath() && "Invalid accessor");
+ const LV &LVal = *((const LV*)(const char*)Data);
+ return ArrayRef<LValuePathEntry>(LVal.getPath(), LVal.PathLength);
+}
+
+unsigned APValue::getLValueCallIndex() const {
+ assert(isLValue() && "Invalid accessor");
+ return ((const LV*)(const char*)Data)->CallIndex;
+}
+
+void APValue::setLValue(LValueBase B, const CharUnits &O, NoLValuePath,
+ unsigned CallIndex) {
+ assert(isLValue() && "Invalid accessor");
+ LV &LVal = *((LV*)(char*)Data);
+ LVal.BaseAndIsOnePastTheEnd.setPointer(B);
+ LVal.BaseAndIsOnePastTheEnd.setInt(false);
+ LVal.Offset = O;
+ LVal.CallIndex = CallIndex;
+ LVal.resizePath((unsigned)-1);
+}
+
+void APValue::setLValue(LValueBase B, const CharUnits &O,
+ ArrayRef<LValuePathEntry> Path, bool IsOnePastTheEnd,
+ unsigned CallIndex) {
+ assert(isLValue() && "Invalid accessor");
+ LV &LVal = *((LV*)(char*)Data);
+ LVal.BaseAndIsOnePastTheEnd.setPointer(B);
+ LVal.BaseAndIsOnePastTheEnd.setInt(IsOnePastTheEnd);
+ LVal.Offset = O;
+ LVal.CallIndex = CallIndex;
+ LVal.resizePath(Path.size());
+ memcpy(LVal.getPath(), Path.data(), Path.size() * sizeof(LValuePathEntry));
+}
+
+const ValueDecl *APValue::getMemberPointerDecl() const {
+ assert(isMemberPointer() && "Invalid accessor");
+ const MemberPointerData &MPD = *((const MemberPointerData*)(const char*)Data);
+ return MPD.MemberAndIsDerivedMember.getPointer();
+}
+
+bool APValue::isMemberPointerToDerivedMember() const {
+ assert(isMemberPointer() && "Invalid accessor");
+ const MemberPointerData &MPD = *((const MemberPointerData*)(const char*)Data);
+ return MPD.MemberAndIsDerivedMember.getInt();
+}
+
+ArrayRef<const CXXRecordDecl*> APValue::getMemberPointerPath() const {
+ assert(isMemberPointer() && "Invalid accessor");
+ const MemberPointerData &MPD = *((const MemberPointerData*)(const char*)Data);
+ return ArrayRef<const CXXRecordDecl*>(MPD.getPath(), MPD.PathLength);
+}
+
+void APValue::MakeLValue() {
+ assert(isUninit() && "Bad state change");
+ assert(sizeof(LV) <= MaxSize && "LV too big");
+ new ((void*)(char*)Data) LV();
+ Kind = LValue;
+}
+
+void APValue::MakeArray(unsigned InitElts, unsigned Size) {
+ assert(isUninit() && "Bad state change");
+ new ((void*)(char*)Data) Arr(InitElts, Size);
+ Kind = Array;
+}
+
+void APValue::MakeMemberPointer(const ValueDecl *Member, bool IsDerivedMember,
+ ArrayRef<const CXXRecordDecl*> Path) {
+ assert(isUninit() && "Bad state change");
+ MemberPointerData *MPD = new ((void*)(char*)Data) MemberPointerData;
+ Kind = MemberPointer;
+ MPD->MemberAndIsDerivedMember.setPointer(Member);
+ MPD->MemberAndIsDerivedMember.setInt(IsDerivedMember);
+ MPD->resizePath(Path.size());
+ memcpy(MPD->getPath(), Path.data(), Path.size()*sizeof(const CXXRecordDecl*));
+}
diff --git a/clang/lib/AST/ASTConsumer.cpp b/clang/lib/AST/ASTConsumer.cpp
new file mode 100644
index 0000000..1672bc8
--- /dev/null
+++ b/clang/lib/AST/ASTConsumer.cpp
@@ -0,0 +1,26 @@
+//===--- ASTConsumer.cpp - Abstract interface for reading ASTs --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the ASTConsumer class.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/ASTConsumer.h"
+#include "clang/AST/DeclGroup.h"
+using namespace clang;
+
+bool ASTConsumer::HandleTopLevelDecl(DeclGroupRef D) {
+ return true;
+}
+
+void ASTConsumer::HandleInterestingDecl(DeclGroupRef D) {
+ HandleTopLevelDecl(D);
+}
+
+void ASTConsumer::HandleTopLevelDeclInObjCContainer(DeclGroupRef D) {}
diff --git a/clang/lib/AST/ASTContext.cpp b/clang/lib/AST/ASTContext.cpp
new file mode 100644
index 0000000..cb4d336
--- /dev/null
+++ b/clang/lib/AST/ASTContext.cpp
@@ -0,0 +1,6771 @@
+//===--- ASTContext.cpp - Context to hold long-lived AST nodes ------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the ASTContext interface.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/CharUnits.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/TypeLoc.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/ExternalASTSource.h"
+#include "clang/AST/ASTMutationListener.h"
+#include "clang/AST/RecordLayout.h"
+#include "clang/AST/Mangle.h"
+#include "clang/Basic/Builtins.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/TargetInfo.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/Support/MathExtras.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/Capacity.h"
+#include "CXXABI.h"
+#include <map>
+
+using namespace clang;
+
+unsigned ASTContext::NumImplicitDefaultConstructors;
+unsigned ASTContext::NumImplicitDefaultConstructorsDeclared;
+unsigned ASTContext::NumImplicitCopyConstructors;
+unsigned ASTContext::NumImplicitCopyConstructorsDeclared;
+unsigned ASTContext::NumImplicitMoveConstructors;
+unsigned ASTContext::NumImplicitMoveConstructorsDeclared;
+unsigned ASTContext::NumImplicitCopyAssignmentOperators;
+unsigned ASTContext::NumImplicitCopyAssignmentOperatorsDeclared;
+unsigned ASTContext::NumImplicitMoveAssignmentOperators;
+unsigned ASTContext::NumImplicitMoveAssignmentOperatorsDeclared;
+unsigned ASTContext::NumImplicitDestructors;
+unsigned ASTContext::NumImplicitDestructorsDeclared;
+
+enum FloatingRank {
+ HalfRank, FloatRank, DoubleRank, LongDoubleRank
+};
+
+void
+ASTContext::CanonicalTemplateTemplateParm::Profile(llvm::FoldingSetNodeID &ID,
+ TemplateTemplateParmDecl *Parm) {
+ ID.AddInteger(Parm->getDepth());
+ ID.AddInteger(Parm->getPosition());
+ ID.AddBoolean(Parm->isParameterPack());
+
+ TemplateParameterList *Params = Parm->getTemplateParameters();
+ ID.AddInteger(Params->size());
+ for (TemplateParameterList::const_iterator P = Params->begin(),
+ PEnd = Params->end();
+ P != PEnd; ++P) {
+ if (TemplateTypeParmDecl *TTP = dyn_cast<TemplateTypeParmDecl>(*P)) {
+ ID.AddInteger(0);
+ ID.AddBoolean(TTP->isParameterPack());
+ continue;
+ }
+
+ if (NonTypeTemplateParmDecl *NTTP = dyn_cast<NonTypeTemplateParmDecl>(*P)) {
+ ID.AddInteger(1);
+ ID.AddBoolean(NTTP->isParameterPack());
+ ID.AddPointer(NTTP->getType().getCanonicalType().getAsOpaquePtr());
+ if (NTTP->isExpandedParameterPack()) {
+ ID.AddBoolean(true);
+ ID.AddInteger(NTTP->getNumExpansionTypes());
+ for (unsigned I = 0, N = NTTP->getNumExpansionTypes(); I != N; ++I) {
+ QualType T = NTTP->getExpansionType(I);
+ ID.AddPointer(T.getCanonicalType().getAsOpaquePtr());
+ }
+ } else
+ ID.AddBoolean(false);
+ continue;
+ }
+
+ TemplateTemplateParmDecl *TTP = cast<TemplateTemplateParmDecl>(*P);
+ ID.AddInteger(2);
+ Profile(ID, TTP);
+ }
+}
+
+TemplateTemplateParmDecl *
+ASTContext::getCanonicalTemplateTemplateParmDecl(
+ TemplateTemplateParmDecl *TTP) const {
+ // Check if we already have a canonical template template parameter.
+ llvm::FoldingSetNodeID ID;
+ CanonicalTemplateTemplateParm::Profile(ID, TTP);
+ void *InsertPos = 0;
+ CanonicalTemplateTemplateParm *Canonical
+ = CanonTemplateTemplateParms.FindNodeOrInsertPos(ID, InsertPos);
+ if (Canonical)
+ return Canonical->getParam();
+
+ // Build a canonical template parameter list.
+ TemplateParameterList *Params = TTP->getTemplateParameters();
+ SmallVector<NamedDecl *, 4> CanonParams;
+ CanonParams.reserve(Params->size());
+ for (TemplateParameterList::const_iterator P = Params->begin(),
+ PEnd = Params->end();
+ P != PEnd; ++P) {
+ if (TemplateTypeParmDecl *TTP = dyn_cast<TemplateTypeParmDecl>(*P))
+ CanonParams.push_back(
+ TemplateTypeParmDecl::Create(*this, getTranslationUnitDecl(),
+ SourceLocation(),
+ SourceLocation(),
+ TTP->getDepth(),
+ TTP->getIndex(), 0, false,
+ TTP->isParameterPack()));
+ else if (NonTypeTemplateParmDecl *NTTP
+ = dyn_cast<NonTypeTemplateParmDecl>(*P)) {
+ QualType T = getCanonicalType(NTTP->getType());
+ TypeSourceInfo *TInfo = getTrivialTypeSourceInfo(T);
+ NonTypeTemplateParmDecl *Param;
+ if (NTTP->isExpandedParameterPack()) {
+ SmallVector<QualType, 2> ExpandedTypes;
+ SmallVector<TypeSourceInfo *, 2> ExpandedTInfos;
+ for (unsigned I = 0, N = NTTP->getNumExpansionTypes(); I != N; ++I) {
+ ExpandedTypes.push_back(getCanonicalType(NTTP->getExpansionType(I)));
+ ExpandedTInfos.push_back(
+ getTrivialTypeSourceInfo(ExpandedTypes.back()));
+ }
+
+ Param = NonTypeTemplateParmDecl::Create(*this, getTranslationUnitDecl(),
+ SourceLocation(),
+ SourceLocation(),
+ NTTP->getDepth(),
+ NTTP->getPosition(), 0,
+ T,
+ TInfo,
+ ExpandedTypes.data(),
+ ExpandedTypes.size(),
+ ExpandedTInfos.data());
+ } else {
+ Param = NonTypeTemplateParmDecl::Create(*this, getTranslationUnitDecl(),
+ SourceLocation(),
+ SourceLocation(),
+ NTTP->getDepth(),
+ NTTP->getPosition(), 0,
+ T,
+ NTTP->isParameterPack(),
+ TInfo);
+ }
+ CanonParams.push_back(Param);
+
+ } else
+ CanonParams.push_back(getCanonicalTemplateTemplateParmDecl(
+ cast<TemplateTemplateParmDecl>(*P)));
+ }
+
+ TemplateTemplateParmDecl *CanonTTP
+ = TemplateTemplateParmDecl::Create(*this, getTranslationUnitDecl(),
+ SourceLocation(), TTP->getDepth(),
+ TTP->getPosition(),
+ TTP->isParameterPack(),
+ 0,
+ TemplateParameterList::Create(*this, SourceLocation(),
+ SourceLocation(),
+ CanonParams.data(),
+ CanonParams.size(),
+ SourceLocation()));
+
+ // Get the new insert position for the node we care about.
+ Canonical = CanonTemplateTemplateParms.FindNodeOrInsertPos(ID, InsertPos);
+ assert(Canonical == 0 && "Shouldn't be in the map!");
+ (void)Canonical;
+
+ // Create the canonical template template parameter entry.
+ Canonical = new (*this) CanonicalTemplateTemplateParm(CanonTTP);
+ CanonTemplateTemplateParms.InsertNode(Canonical, InsertPos);
+ return CanonTTP;
+}
+
+CXXABI *ASTContext::createCXXABI(const TargetInfo &T) {
+ if (!LangOpts.CPlusPlus) return 0;
+
+ switch (T.getCXXABI()) {
+ case CXXABI_ARM:
+ return CreateARMCXXABI(*this);
+ case CXXABI_Itanium:
+ return CreateItaniumCXXABI(*this);
+ case CXXABI_Microsoft:
+ return CreateMicrosoftCXXABI(*this);
+ }
+ llvm_unreachable("Invalid CXXABI type!");
+}
+
+static const LangAS::Map *getAddressSpaceMap(const TargetInfo &T,
+ const LangOptions &LOpts) {
+ if (LOpts.FakeAddressSpaceMap) {
+ // The fake address space map must have a distinct entry for each
+ // language-specific address space.
+ static const unsigned FakeAddrSpaceMap[] = {
+ 1, // opencl_global
+ 2, // opencl_local
+ 3 // opencl_constant
+ };
+ return &FakeAddrSpaceMap;
+ } else {
+ return &T.getAddressSpaceMap();
+ }
+}
+
+ASTContext::ASTContext(LangOptions& LOpts, SourceManager &SM,
+ const TargetInfo *t,
+ IdentifierTable &idents, SelectorTable &sels,
+ Builtin::Context &builtins,
+ unsigned size_reserve,
+ bool DelayInitialization)
+ : FunctionProtoTypes(this_()),
+ TemplateSpecializationTypes(this_()),
+ DependentTemplateSpecializationTypes(this_()),
+ SubstTemplateTemplateParmPacks(this_()),
+ GlobalNestedNameSpecifier(0),
+ Int128Decl(0), UInt128Decl(0),
+ ObjCIdDecl(0), ObjCSelDecl(0), ObjCClassDecl(0), ObjCProtocolClassDecl(0),
+ CFConstantStringTypeDecl(0), ObjCInstanceTypeDecl(0),
+ FILEDecl(0),
+ jmp_bufDecl(0), sigjmp_bufDecl(0), ucontext_tDecl(0),
+ BlockDescriptorType(0), BlockDescriptorExtendedType(0),
+ cudaConfigureCallDecl(0),
+ NullTypeSourceInfo(QualType()),
+ FirstLocalImport(), LastLocalImport(),
+ SourceMgr(SM), LangOpts(LOpts),
+ AddrSpaceMap(0), Target(t), PrintingPolicy(LOpts),
+ Idents(idents), Selectors(sels),
+ BuiltinInfo(builtins),
+ DeclarationNames(*this),
+ ExternalSource(0), Listener(0),
+ LastSDM(0, 0),
+ UniqueBlockByRefTypeID(0)
+{
+ if (size_reserve > 0) Types.reserve(size_reserve);
+ TUDecl = TranslationUnitDecl::Create(*this);
+
+ if (!DelayInitialization) {
+ assert(t && "No target supplied for ASTContext initialization");
+ InitBuiltinTypes(*t);
+ }
+}
+
+ASTContext::~ASTContext() {
+ // Release the DenseMaps associated with DeclContext objects.
+ // FIXME: Is this the ideal solution?
+ ReleaseDeclContextMaps();
+
+ // Call all of the deallocation functions.
+ for (unsigned I = 0, N = Deallocations.size(); I != N; ++I)
+ Deallocations[I].first(Deallocations[I].second);
+
+ // ASTRecordLayout objects in ASTRecordLayouts must always be destroyed
+ // because they can contain DenseMaps.
+ for (llvm::DenseMap<const ObjCContainerDecl*,
+ const ASTRecordLayout*>::iterator
+ I = ObjCLayouts.begin(), E = ObjCLayouts.end(); I != E; )
+ // Increment in loop to prevent using deallocated memory.
+ if (ASTRecordLayout *R = const_cast<ASTRecordLayout*>((I++)->second))
+ R->Destroy(*this);
+
+ for (llvm::DenseMap<const RecordDecl*, const ASTRecordLayout*>::iterator
+ I = ASTRecordLayouts.begin(), E = ASTRecordLayouts.end(); I != E; ) {
+ // Increment in loop to prevent using deallocated memory.
+ if (ASTRecordLayout *R = const_cast<ASTRecordLayout*>((I++)->second))
+ R->Destroy(*this);
+ }
+
+ for (llvm::DenseMap<const Decl*, AttrVec*>::iterator A = DeclAttrs.begin(),
+ AEnd = DeclAttrs.end();
+ A != AEnd; ++A)
+ A->second->~AttrVec();
+}
+
+void ASTContext::AddDeallocation(void (*Callback)(void*), void *Data) {
+ Deallocations.push_back(std::make_pair(Callback, Data));
+}
+
+void
+ASTContext::setExternalSource(OwningPtr<ExternalASTSource> &Source) {
+ ExternalSource.reset(Source.take());
+}
+
+void ASTContext::PrintStats() const {
+ llvm::errs() << "\n*** AST Context Stats:\n";
+ llvm::errs() << " " << Types.size() << " types total.\n";
+
+ unsigned counts[] = {
+#define TYPE(Name, Parent) 0,
+#define ABSTRACT_TYPE(Name, Parent)
+#include "clang/AST/TypeNodes.def"
+ 0 // Extra
+ };
+
+ for (unsigned i = 0, e = Types.size(); i != e; ++i) {
+ Type *T = Types[i];
+ counts[(unsigned)T->getTypeClass()]++;
+ }
+
+ unsigned Idx = 0;
+ unsigned TotalBytes = 0;
+#define TYPE(Name, Parent) \
+ if (counts[Idx]) \
+ llvm::errs() << " " << counts[Idx] << " " << #Name \
+ << " types\n"; \
+ TotalBytes += counts[Idx] * sizeof(Name##Type); \
+ ++Idx;
+#define ABSTRACT_TYPE(Name, Parent)
+#include "clang/AST/TypeNodes.def"
+
+ llvm::errs() << "Total bytes = " << TotalBytes << "\n";
+
+ // Implicit special member functions.
+ llvm::errs() << NumImplicitDefaultConstructorsDeclared << "/"
+ << NumImplicitDefaultConstructors
+ << " implicit default constructors created\n";
+ llvm::errs() << NumImplicitCopyConstructorsDeclared << "/"
+ << NumImplicitCopyConstructors
+ << " implicit copy constructors created\n";
+ if (getLangOpts().CPlusPlus)
+ llvm::errs() << NumImplicitMoveConstructorsDeclared << "/"
+ << NumImplicitMoveConstructors
+ << " implicit move constructors created\n";
+ llvm::errs() << NumImplicitCopyAssignmentOperatorsDeclared << "/"
+ << NumImplicitCopyAssignmentOperators
+ << " implicit copy assignment operators created\n";
+ if (getLangOpts().CPlusPlus)
+ llvm::errs() << NumImplicitMoveAssignmentOperatorsDeclared << "/"
+ << NumImplicitMoveAssignmentOperators
+ << " implicit move assignment operators created\n";
+ llvm::errs() << NumImplicitDestructorsDeclared << "/"
+ << NumImplicitDestructors
+ << " implicit destructors created\n";
+
+ if (ExternalSource.get()) {
+ llvm::errs() << "\n";
+ ExternalSource->PrintStats();
+ }
+
+ BumpAlloc.PrintStats();
+}
+
+TypedefDecl *ASTContext::getInt128Decl() const {
+ if (!Int128Decl) {
+ TypeSourceInfo *TInfo = getTrivialTypeSourceInfo(Int128Ty);
+ Int128Decl = TypedefDecl::Create(const_cast<ASTContext &>(*this),
+ getTranslationUnitDecl(),
+ SourceLocation(),
+ SourceLocation(),
+ &Idents.get("__int128_t"),
+ TInfo);
+ }
+
+ return Int128Decl;
+}
+
+TypedefDecl *ASTContext::getUInt128Decl() const {
+ if (!UInt128Decl) {
+ TypeSourceInfo *TInfo = getTrivialTypeSourceInfo(UnsignedInt128Ty);
+ UInt128Decl = TypedefDecl::Create(const_cast<ASTContext &>(*this),
+ getTranslationUnitDecl(),
+ SourceLocation(),
+ SourceLocation(),
+ &Idents.get("__uint128_t"),
+ TInfo);
+ }
+
+ return UInt128Decl;
+}
+
+void ASTContext::InitBuiltinType(CanQualType &R, BuiltinType::Kind K) {
+ BuiltinType *Ty = new (*this, TypeAlignment) BuiltinType(K);
+ R = CanQualType::CreateUnsafe(QualType(Ty, 0));
+ Types.push_back(Ty);
+}
+
+void ASTContext::InitBuiltinTypes(const TargetInfo &Target) {
+ assert((!this->Target || this->Target == &Target) &&
+ "Incorrect target reinitialization");
+ assert(VoidTy.isNull() && "Context reinitialized?");
+
+ this->Target = &Target;
+
+ ABI.reset(createCXXABI(Target));
+ AddrSpaceMap = getAddressSpaceMap(Target, LangOpts);
+
+ // C99 6.2.5p19.
+ InitBuiltinType(VoidTy, BuiltinType::Void);
+
+ // C99 6.2.5p2.
+ InitBuiltinType(BoolTy, BuiltinType::Bool);
+ // C99 6.2.5p3.
+ if (LangOpts.CharIsSigned)
+ InitBuiltinType(CharTy, BuiltinType::Char_S);
+ else
+ InitBuiltinType(CharTy, BuiltinType::Char_U);
+ // C99 6.2.5p4.
+ InitBuiltinType(SignedCharTy, BuiltinType::SChar);
+ InitBuiltinType(ShortTy, BuiltinType::Short);
+ InitBuiltinType(IntTy, BuiltinType::Int);
+ InitBuiltinType(LongTy, BuiltinType::Long);
+ InitBuiltinType(LongLongTy, BuiltinType::LongLong);
+
+ // C99 6.2.5p6.
+ InitBuiltinType(UnsignedCharTy, BuiltinType::UChar);
+ InitBuiltinType(UnsignedShortTy, BuiltinType::UShort);
+ InitBuiltinType(UnsignedIntTy, BuiltinType::UInt);
+ InitBuiltinType(UnsignedLongTy, BuiltinType::ULong);
+ InitBuiltinType(UnsignedLongLongTy, BuiltinType::ULongLong);
+
+ // C99 6.2.5p10.
+ InitBuiltinType(FloatTy, BuiltinType::Float);
+ InitBuiltinType(DoubleTy, BuiltinType::Double);
+ InitBuiltinType(LongDoubleTy, BuiltinType::LongDouble);
+
+ // GNU extension, 128-bit integers.
+ InitBuiltinType(Int128Ty, BuiltinType::Int128);
+ InitBuiltinType(UnsignedInt128Ty, BuiltinType::UInt128);
+
+ if (LangOpts.CPlusPlus) { // C++ 3.9.1p5
+ if (TargetInfo::isTypeSigned(Target.getWCharType()))
+ InitBuiltinType(WCharTy, BuiltinType::WChar_S);
+ else // -fshort-wchar makes wchar_t be unsigned.
+ InitBuiltinType(WCharTy, BuiltinType::WChar_U);
+ } else // C99
+ WCharTy = getFromTargetType(Target.getWCharType());
+
+ if (LangOpts.CPlusPlus) // C++0x 3.9.1p5, extension for C++
+ InitBuiltinType(Char16Ty, BuiltinType::Char16);
+ else // C99
+ Char16Ty = getFromTargetType(Target.getChar16Type());
+
+ if (LangOpts.CPlusPlus) // C++0x 3.9.1p5, extension for C++
+ InitBuiltinType(Char32Ty, BuiltinType::Char32);
+ else // C99
+ Char32Ty = getFromTargetType(Target.getChar32Type());
+
+ // Placeholder type for type-dependent expressions whose type is
+ // completely unknown. No code should ever check a type against
+ // DependentTy and users should never see it; however, it is here to
+ // help diagnose failures to properly check for type-dependent
+ // expressions.
+ InitBuiltinType(DependentTy, BuiltinType::Dependent);
+
+ // Placeholder type for functions.
+ InitBuiltinType(OverloadTy, BuiltinType::Overload);
+
+ // Placeholder type for bound members.
+ InitBuiltinType(BoundMemberTy, BuiltinType::BoundMember);
+
+ // Placeholder type for pseudo-objects.
+ InitBuiltinType(PseudoObjectTy, BuiltinType::PseudoObject);
+
+ // "any" type; useful for debugger-like clients.
+ InitBuiltinType(UnknownAnyTy, BuiltinType::UnknownAny);
+
+ // Placeholder type for unbridged ARC casts.
+ InitBuiltinType(ARCUnbridgedCastTy, BuiltinType::ARCUnbridgedCast);
+
+ // C99 6.2.5p11.
+ FloatComplexTy = getComplexType(FloatTy);
+ DoubleComplexTy = getComplexType(DoubleTy);
+ LongDoubleComplexTy = getComplexType(LongDoubleTy);
+
+ BuiltinVaListType = QualType();
+
+ // Builtin types for 'id', 'Class', and 'SEL'.
+ InitBuiltinType(ObjCBuiltinIdTy, BuiltinType::ObjCId);
+ InitBuiltinType(ObjCBuiltinClassTy, BuiltinType::ObjCClass);
+ InitBuiltinType(ObjCBuiltinSelTy, BuiltinType::ObjCSel);
+
+ // Builtin type for __objc_yes and __objc_no
+ ObjCBuiltinBoolTy = (Target.useSignedCharForObjCBool() ?
+ SignedCharTy : BoolTy);
+
+ ObjCConstantStringType = QualType();
+
+ // void * type
+ VoidPtrTy = getPointerType(VoidTy);
+
+ // nullptr type (C++0x 2.14.7)
+ InitBuiltinType(NullPtrTy, BuiltinType::NullPtr);
+
+ // half type (OpenCL 6.1.1.1) / ARM NEON __fp16
+ InitBuiltinType(HalfTy, BuiltinType::Half);
+}
+
+DiagnosticsEngine &ASTContext::getDiagnostics() const {
+ return SourceMgr.getDiagnostics();
+}
+
+AttrVec& ASTContext::getDeclAttrs(const Decl *D) {
+ AttrVec *&Result = DeclAttrs[D];
+ if (!Result) {
+ void *Mem = Allocate(sizeof(AttrVec));
+ Result = new (Mem) AttrVec;
+ }
+
+ return *Result;
+}
+
+/// \brief Erase the attributes corresponding to the given declaration.
+void ASTContext::eraseDeclAttrs(const Decl *D) {
+ llvm::DenseMap<const Decl*, AttrVec*>::iterator Pos = DeclAttrs.find(D);
+ if (Pos != DeclAttrs.end()) {
+ Pos->second->~AttrVec();
+ DeclAttrs.erase(Pos);
+ }
+}
+
+MemberSpecializationInfo *
+ASTContext::getInstantiatedFromStaticDataMember(const VarDecl *Var) {
+ assert(Var->isStaticDataMember() && "Not a static data member");
+ llvm::DenseMap<const VarDecl *, MemberSpecializationInfo *>::iterator Pos
+ = InstantiatedFromStaticDataMember.find(Var);
+ if (Pos == InstantiatedFromStaticDataMember.end())
+ return 0;
+
+ return Pos->second;
+}
+
+void
+ASTContext::setInstantiatedFromStaticDataMember(VarDecl *Inst, VarDecl *Tmpl,
+ TemplateSpecializationKind TSK,
+ SourceLocation PointOfInstantiation) {
+ assert(Inst->isStaticDataMember() && "Not a static data member");
+ assert(Tmpl->isStaticDataMember() && "Not a static data member");
+ assert(!InstantiatedFromStaticDataMember[Inst] &&
+ "Already noted what static data member was instantiated from");
+ InstantiatedFromStaticDataMember[Inst]
+ = new (*this) MemberSpecializationInfo(Tmpl, TSK, PointOfInstantiation);
+}
+
+FunctionDecl *ASTContext::getClassScopeSpecializationPattern(
+ const FunctionDecl *FD){
+ assert(FD && "Specialization is 0");
+ llvm::DenseMap<const FunctionDecl*, FunctionDecl *>::const_iterator Pos
+ = ClassScopeSpecializationPattern.find(FD);
+ if (Pos == ClassScopeSpecializationPattern.end())
+ return 0;
+
+ return Pos->second;
+}
+
+void ASTContext::setClassScopeSpecializationPattern(FunctionDecl *FD,
+ FunctionDecl *Pattern) {
+ assert(FD && "Specialization is 0");
+ assert(Pattern && "Class scope specialization pattern is 0");
+ ClassScopeSpecializationPattern[FD] = Pattern;
+}
+
+NamedDecl *
+ASTContext::getInstantiatedFromUsingDecl(UsingDecl *UUD) {
+ llvm::DenseMap<UsingDecl *, NamedDecl *>::const_iterator Pos
+ = InstantiatedFromUsingDecl.find(UUD);
+ if (Pos == InstantiatedFromUsingDecl.end())
+ return 0;
+
+ return Pos->second;
+}
+
+void
+ASTContext::setInstantiatedFromUsingDecl(UsingDecl *Inst, NamedDecl *Pattern) {
+ assert((isa<UsingDecl>(Pattern) ||
+ isa<UnresolvedUsingValueDecl>(Pattern) ||
+ isa<UnresolvedUsingTypenameDecl>(Pattern)) &&
+ "pattern decl is not a using decl");
+ assert(!InstantiatedFromUsingDecl[Inst] && "pattern already exists");
+ InstantiatedFromUsingDecl[Inst] = Pattern;
+}
+
+UsingShadowDecl *
+ASTContext::getInstantiatedFromUsingShadowDecl(UsingShadowDecl *Inst) {
+ llvm::DenseMap<UsingShadowDecl*, UsingShadowDecl*>::const_iterator Pos
+ = InstantiatedFromUsingShadowDecl.find(Inst);
+ if (Pos == InstantiatedFromUsingShadowDecl.end())
+ return 0;
+
+ return Pos->second;
+}
+
+void
+ASTContext::setInstantiatedFromUsingShadowDecl(UsingShadowDecl *Inst,
+ UsingShadowDecl *Pattern) {
+ assert(!InstantiatedFromUsingShadowDecl[Inst] && "pattern already exists");
+ InstantiatedFromUsingShadowDecl[Inst] = Pattern;
+}
+
+FieldDecl *ASTContext::getInstantiatedFromUnnamedFieldDecl(FieldDecl *Field) {
+ llvm::DenseMap<FieldDecl *, FieldDecl *>::iterator Pos
+ = InstantiatedFromUnnamedFieldDecl.find(Field);
+ if (Pos == InstantiatedFromUnnamedFieldDecl.end())
+ return 0;
+
+ return Pos->second;
+}
+
+void ASTContext::setInstantiatedFromUnnamedFieldDecl(FieldDecl *Inst,
+ FieldDecl *Tmpl) {
+ assert(!Inst->getDeclName() && "Instantiated field decl is not unnamed");
+ assert(!Tmpl->getDeclName() && "Template field decl is not unnamed");
+ assert(!InstantiatedFromUnnamedFieldDecl[Inst] &&
+ "Already noted what unnamed field was instantiated from");
+
+ InstantiatedFromUnnamedFieldDecl[Inst] = Tmpl;
+}
+
+bool ASTContext::ZeroBitfieldFollowsNonBitfield(const FieldDecl *FD,
+ const FieldDecl *LastFD) const {
+ return (FD->isBitField() && LastFD && !LastFD->isBitField() &&
+ FD->getBitWidthValue(*this) == 0);
+}
+
+bool ASTContext::ZeroBitfieldFollowsBitfield(const FieldDecl *FD,
+ const FieldDecl *LastFD) const {
+ return (FD->isBitField() && LastFD && LastFD->isBitField() &&
+ FD->getBitWidthValue(*this) == 0 &&
+ LastFD->getBitWidthValue(*this) != 0);
+}
+
+bool ASTContext::BitfieldFollowsBitfield(const FieldDecl *FD,
+ const FieldDecl *LastFD) const {
+ return (FD->isBitField() && LastFD && LastFD->isBitField() &&
+ FD->getBitWidthValue(*this) &&
+ LastFD->getBitWidthValue(*this));
+}
+
+bool ASTContext::NonBitfieldFollowsBitfield(const FieldDecl *FD,
+ const FieldDecl *LastFD) const {
+ return (!FD->isBitField() && LastFD && LastFD->isBitField() &&
+ LastFD->getBitWidthValue(*this));
+}
+
+bool ASTContext::BitfieldFollowsNonBitfield(const FieldDecl *FD,
+ const FieldDecl *LastFD) const {
+ return (FD->isBitField() && LastFD && !LastFD->isBitField() &&
+ FD->getBitWidthValue(*this));
+}
+
+ASTContext::overridden_cxx_method_iterator
+ASTContext::overridden_methods_begin(const CXXMethodDecl *Method) const {
+ llvm::DenseMap<const CXXMethodDecl *, CXXMethodVector>::const_iterator Pos
+ = OverriddenMethods.find(Method);
+ if (Pos == OverriddenMethods.end())
+ return 0;
+
+ return Pos->second.begin();
+}
+
+ASTContext::overridden_cxx_method_iterator
+ASTContext::overridden_methods_end(const CXXMethodDecl *Method) const {
+ llvm::DenseMap<const CXXMethodDecl *, CXXMethodVector>::const_iterator Pos
+ = OverriddenMethods.find(Method);
+ if (Pos == OverriddenMethods.end())
+ return 0;
+
+ return Pos->second.end();
+}
+
+unsigned
+ASTContext::overridden_methods_size(const CXXMethodDecl *Method) const {
+ llvm::DenseMap<const CXXMethodDecl *, CXXMethodVector>::const_iterator Pos
+ = OverriddenMethods.find(Method);
+ if (Pos == OverriddenMethods.end())
+ return 0;
+
+ return Pos->second.size();
+}
+
+void ASTContext::addOverriddenMethod(const CXXMethodDecl *Method,
+ const CXXMethodDecl *Overridden) {
+ OverriddenMethods[Method].push_back(Overridden);
+}
+
+void ASTContext::addedLocalImportDecl(ImportDecl *Import) {
+ assert(!Import->NextLocalImport && "Import declaration already in the chain");
+ assert(!Import->isFromASTFile() && "Non-local import declaration");
+ if (!FirstLocalImport) {
+ FirstLocalImport = Import;
+ LastLocalImport = Import;
+ return;
+ }
+
+ LastLocalImport->NextLocalImport = Import;
+ LastLocalImport = Import;
+}
+
+//===----------------------------------------------------------------------===//
+// Type Sizing and Analysis
+//===----------------------------------------------------------------------===//
+
+/// getFloatTypeSemantics - Return the APFloat 'semantics' for the specified
+/// scalar floating point type.
+const llvm::fltSemantics &ASTContext::getFloatTypeSemantics(QualType T) const {
+ const BuiltinType *BT = T->getAs<BuiltinType>();
+ assert(BT && "Not a floating point type!");
+ switch (BT->getKind()) {
+ default: llvm_unreachable("Not a floating point type!");
+ case BuiltinType::Half: return Target->getHalfFormat();
+ case BuiltinType::Float: return Target->getFloatFormat();
+ case BuiltinType::Double: return Target->getDoubleFormat();
+ case BuiltinType::LongDouble: return Target->getLongDoubleFormat();
+ }
+}
+
+/// getDeclAlign - Return a conservative estimate of the alignment of the
+/// specified decl. Note that bitfields do not have a valid alignment, so
+/// this method will assert on them.
+/// If @p RefAsPointee, references are treated like their underlying type
+/// (for alignof), else they're treated like pointers (for CodeGen).
+CharUnits ASTContext::getDeclAlign(const Decl *D, bool RefAsPointee) const {
+ unsigned Align = Target->getCharWidth();
+
+ bool UseAlignAttrOnly = false;
+ if (unsigned AlignFromAttr = D->getMaxAlignment()) {
+ Align = AlignFromAttr;
+
+ // __attribute__((aligned)) can increase or decrease alignment
+ // *except* on a struct or struct member, where it only increases
+ // alignment unless 'packed' is also specified.
+ //
+ // It is an error for alignas to decrease alignment, so we can
+ // ignore that possibility; Sema should diagnose it.
+ if (isa<FieldDecl>(D)) {
+ UseAlignAttrOnly = D->hasAttr<PackedAttr>() ||
+ cast<FieldDecl>(D)->getParent()->hasAttr<PackedAttr>();
+ } else {
+ UseAlignAttrOnly = true;
+ }
+ }
+ else if (isa<FieldDecl>(D))
+ UseAlignAttrOnly =
+ D->hasAttr<PackedAttr>() ||
+ cast<FieldDecl>(D)->getParent()->hasAttr<PackedAttr>();
+
+ // If we're using the align attribute only, just ignore everything
+ // else about the declaration and its type.
+ if (UseAlignAttrOnly) {
+ // do nothing
+
+ } else if (const ValueDecl *VD = dyn_cast<ValueDecl>(D)) {
+ QualType T = VD->getType();
+ if (const ReferenceType* RT = T->getAs<ReferenceType>()) {
+ if (RefAsPointee)
+ T = RT->getPointeeType();
+ else
+ T = getPointerType(RT->getPointeeType());
+ }
+ if (!T->isIncompleteType() && !T->isFunctionType()) {
+ // Adjust alignments of declarations with array type by the
+ // large-array alignment on the target.
+ unsigned MinWidth = Target->getLargeArrayMinWidth();
+ const ArrayType *arrayType;
+ if (MinWidth && (arrayType = getAsArrayType(T))) {
+ if (isa<VariableArrayType>(arrayType))
+ Align = std::max(Align, Target->getLargeArrayAlign());
+ else if (isa<ConstantArrayType>(arrayType) &&
+ MinWidth <= getTypeSize(cast<ConstantArrayType>(arrayType)))
+ Align = std::max(Align, Target->getLargeArrayAlign());
+
+ // Walk through any array types while we're at it.
+ T = getBaseElementType(arrayType);
+ }
+ Align = std::max(Align, getPreferredTypeAlign(T.getTypePtr()));
+ }
+
+ // Fields can be subject to extra alignment constraints, like if
+ // the field is packed, the struct is packed, or the struct has a
+ // a max-field-alignment constraint (#pragma pack). So calculate
+ // the actual alignment of the field within the struct, and then
+ // (as we're expected to) constrain that by the alignment of the type.
+ if (const FieldDecl *field = dyn_cast<FieldDecl>(VD)) {
+ // So calculate the alignment of the field.
+ const ASTRecordLayout &layout = getASTRecordLayout(field->getParent());
+
+ // Start with the record's overall alignment.
+ unsigned fieldAlign = toBits(layout.getAlignment());
+
+ // Use the GCD of that and the offset within the record.
+ uint64_t offset = layout.getFieldOffset(field->getFieldIndex());
+ if (offset > 0) {
+ // Alignment is always a power of 2, so the GCD will be a power of 2,
+ // which means we get to do this crazy thing instead of Euclid's.
+ uint64_t lowBitOfOffset = offset & (~offset + 1);
+ if (lowBitOfOffset < fieldAlign)
+ fieldAlign = static_cast<unsigned>(lowBitOfOffset);
+ }
+
+ Align = std::min(Align, fieldAlign);
+ }
+ }
+
+ return toCharUnitsFromBits(Align);
+}
+
+std::pair<CharUnits, CharUnits>
+ASTContext::getTypeInfoInChars(const Type *T) const {
+ std::pair<uint64_t, unsigned> Info = getTypeInfo(T);
+ return std::make_pair(toCharUnitsFromBits(Info.first),
+ toCharUnitsFromBits(Info.second));
+}
+
+std::pair<CharUnits, CharUnits>
+ASTContext::getTypeInfoInChars(QualType T) const {
+ return getTypeInfoInChars(T.getTypePtr());
+}
+
+std::pair<uint64_t, unsigned> ASTContext::getTypeInfo(const Type *T) const {
+ TypeInfoMap::iterator it = MemoizedTypeInfo.find(T);
+ if (it != MemoizedTypeInfo.end())
+ return it->second;
+
+ std::pair<uint64_t, unsigned> Info = getTypeInfoImpl(T);
+ MemoizedTypeInfo.insert(std::make_pair(T, Info));
+ return Info;
+}
+
+/// getTypeInfoImpl - Return the size of the specified type, in bits. This
+/// method does not work on incomplete types.
+///
+/// FIXME: Pointers into different addr spaces could have different sizes and
+/// alignment requirements: getPointerInfo should take an AddrSpace, this
+/// should take a QualType, &c.
+std::pair<uint64_t, unsigned>
+ASTContext::getTypeInfoImpl(const Type *T) const {
+ uint64_t Width=0;
+ unsigned Align=8;
+ switch (T->getTypeClass()) {
+#define TYPE(Class, Base)
+#define ABSTRACT_TYPE(Class, Base)
+#define NON_CANONICAL_TYPE(Class, Base)
+#define DEPENDENT_TYPE(Class, Base) case Type::Class:
+#include "clang/AST/TypeNodes.def"
+ llvm_unreachable("Should not see dependent types");
+
+ case Type::FunctionNoProto:
+ case Type::FunctionProto:
+ // GCC extension: alignof(function) = 32 bits
+ Width = 0;
+ Align = 32;
+ break;
+
+ case Type::IncompleteArray:
+ case Type::VariableArray:
+ Width = 0;
+ Align = getTypeAlign(cast<ArrayType>(T)->getElementType());
+ break;
+
+ case Type::ConstantArray: {
+ const ConstantArrayType *CAT = cast<ConstantArrayType>(T);
+
+ std::pair<uint64_t, unsigned> EltInfo = getTypeInfo(CAT->getElementType());
+ uint64_t Size = CAT->getSize().getZExtValue();
+ assert((Size == 0 || EltInfo.first <= (uint64_t)(-1)/Size) &&
+ "Overflow in array type bit size evaluation");
+ Width = EltInfo.first*Size;
+ Align = EltInfo.second;
+ Width = llvm::RoundUpToAlignment(Width, Align);
+ break;
+ }
+ case Type::ExtVector:
+ case Type::Vector: {
+ const VectorType *VT = cast<VectorType>(T);
+ std::pair<uint64_t, unsigned> EltInfo = getTypeInfo(VT->getElementType());
+ Width = EltInfo.first*VT->getNumElements();
+ Align = Width;
+ // If the alignment is not a power of 2, round up to the next power of 2.
+ // This happens for non-power-of-2 length vectors.
+ if (Align & (Align-1)) {
+ Align = llvm::NextPowerOf2(Align);
+ Width = llvm::RoundUpToAlignment(Width, Align);
+ }
+ break;
+ }
+
+ case Type::Builtin:
+ switch (cast<BuiltinType>(T)->getKind()) {
+ default: llvm_unreachable("Unknown builtin type!");
+ case BuiltinType::Void:
+ // GCC extension: alignof(void) = 8 bits.
+ Width = 0;
+ Align = 8;
+ break;
+
+ case BuiltinType::Bool:
+ Width = Target->getBoolWidth();
+ Align = Target->getBoolAlign();
+ break;
+ case BuiltinType::Char_S:
+ case BuiltinType::Char_U:
+ case BuiltinType::UChar:
+ case BuiltinType::SChar:
+ Width = Target->getCharWidth();
+ Align = Target->getCharAlign();
+ break;
+ case BuiltinType::WChar_S:
+ case BuiltinType::WChar_U:
+ Width = Target->getWCharWidth();
+ Align = Target->getWCharAlign();
+ break;
+ case BuiltinType::Char16:
+ Width = Target->getChar16Width();
+ Align = Target->getChar16Align();
+ break;
+ case BuiltinType::Char32:
+ Width = Target->getChar32Width();
+ Align = Target->getChar32Align();
+ break;
+ case BuiltinType::UShort:
+ case BuiltinType::Short:
+ Width = Target->getShortWidth();
+ Align = Target->getShortAlign();
+ break;
+ case BuiltinType::UInt:
+ case BuiltinType::Int:
+ Width = Target->getIntWidth();
+ Align = Target->getIntAlign();
+ break;
+ case BuiltinType::ULong:
+ case BuiltinType::Long:
+ Width = Target->getLongWidth();
+ Align = Target->getLongAlign();
+ break;
+ case BuiltinType::ULongLong:
+ case BuiltinType::LongLong:
+ Width = Target->getLongLongWidth();
+ Align = Target->getLongLongAlign();
+ break;
+ case BuiltinType::Int128:
+ case BuiltinType::UInt128:
+ Width = 128;
+ Align = 128; // int128_t is 128-bit aligned on all targets.
+ break;
+ case BuiltinType::Half:
+ Width = Target->getHalfWidth();
+ Align = Target->getHalfAlign();
+ break;
+ case BuiltinType::Float:
+ Width = Target->getFloatWidth();
+ Align = Target->getFloatAlign();
+ break;
+ case BuiltinType::Double:
+ Width = Target->getDoubleWidth();
+ Align = Target->getDoubleAlign();
+ break;
+ case BuiltinType::LongDouble:
+ Width = Target->getLongDoubleWidth();
+ Align = Target->getLongDoubleAlign();
+ break;
+ case BuiltinType::NullPtr:
+ Width = Target->getPointerWidth(0); // C++ 3.9.1p11: sizeof(nullptr_t)
+ Align = Target->getPointerAlign(0); // == sizeof(void*)
+ break;
+ case BuiltinType::ObjCId:
+ case BuiltinType::ObjCClass:
+ case BuiltinType::ObjCSel:
+ Width = Target->getPointerWidth(0);
+ Align = Target->getPointerAlign(0);
+ break;
+ }
+ break;
+ case Type::ObjCObjectPointer:
+ Width = Target->getPointerWidth(0);
+ Align = Target->getPointerAlign(0);
+ break;
+ case Type::BlockPointer: {
+ unsigned AS = getTargetAddressSpace(
+ cast<BlockPointerType>(T)->getPointeeType());
+ Width = Target->getPointerWidth(AS);
+ Align = Target->getPointerAlign(AS);
+ break;
+ }
+ case Type::LValueReference:
+ case Type::RValueReference: {
+ // alignof and sizeof should never enter this code path here, so we go
+ // the pointer route.
+ unsigned AS = getTargetAddressSpace(
+ cast<ReferenceType>(T)->getPointeeType());
+ Width = Target->getPointerWidth(AS);
+ Align = Target->getPointerAlign(AS);
+ break;
+ }
+ case Type::Pointer: {
+ unsigned AS = getTargetAddressSpace(cast<PointerType>(T)->getPointeeType());
+ Width = Target->getPointerWidth(AS);
+ Align = Target->getPointerAlign(AS);
+ break;
+ }
+ case Type::MemberPointer: {
+ const MemberPointerType *MPT = cast<MemberPointerType>(T);
+ std::pair<uint64_t, unsigned> PtrDiffInfo =
+ getTypeInfo(getPointerDiffType());
+ Width = PtrDiffInfo.first * ABI->getMemberPointerSize(MPT);
+ Align = PtrDiffInfo.second;
+ break;
+ }
+ case Type::Complex: {
+ // Complex types have the same alignment as their elements, but twice the
+ // size.
+ std::pair<uint64_t, unsigned> EltInfo =
+ getTypeInfo(cast<ComplexType>(T)->getElementType());
+ Width = EltInfo.first*2;
+ Align = EltInfo.second;
+ break;
+ }
+ case Type::ObjCObject:
+ return getTypeInfo(cast<ObjCObjectType>(T)->getBaseType().getTypePtr());
+ case Type::ObjCInterface: {
+ const ObjCInterfaceType *ObjCI = cast<ObjCInterfaceType>(T);
+ const ASTRecordLayout &Layout = getASTObjCInterfaceLayout(ObjCI->getDecl());
+ Width = toBits(Layout.getSize());
+ Align = toBits(Layout.getAlignment());
+ break;
+ }
+ case Type::Record:
+ case Type::Enum: {
+ const TagType *TT = cast<TagType>(T);
+
+ if (TT->getDecl()->isInvalidDecl()) {
+ Width = 8;
+ Align = 8;
+ break;
+ }
+
+ if (const EnumType *ET = dyn_cast<EnumType>(TT))
+ return getTypeInfo(ET->getDecl()->getIntegerType());
+
+ const RecordType *RT = cast<RecordType>(TT);
+ const ASTRecordLayout &Layout = getASTRecordLayout(RT->getDecl());
+ Width = toBits(Layout.getSize());
+ Align = toBits(Layout.getAlignment());
+ break;
+ }
+
+ case Type::SubstTemplateTypeParm:
+ return getTypeInfo(cast<SubstTemplateTypeParmType>(T)->
+ getReplacementType().getTypePtr());
+
+ case Type::Auto: {
+ const AutoType *A = cast<AutoType>(T);
+ assert(A->isDeduced() && "Cannot request the size of a dependent type");
+ return getTypeInfo(A->getDeducedType().getTypePtr());
+ }
+
+ case Type::Paren:
+ return getTypeInfo(cast<ParenType>(T)->getInnerType().getTypePtr());
+
+ case Type::Typedef: {
+ const TypedefNameDecl *Typedef = cast<TypedefType>(T)->getDecl();
+ std::pair<uint64_t, unsigned> Info
+ = getTypeInfo(Typedef->getUnderlyingType().getTypePtr());
+ // If the typedef has an aligned attribute on it, it overrides any computed
+ // alignment we have. This violates the GCC documentation (which says that
+ // attribute(aligned) can only round up) but matches its implementation.
+ if (unsigned AttrAlign = Typedef->getMaxAlignment())
+ Align = AttrAlign;
+ else
+ Align = Info.second;
+ Width = Info.first;
+ break;
+ }
+
+ case Type::TypeOfExpr:
+ return getTypeInfo(cast<TypeOfExprType>(T)->getUnderlyingExpr()->getType()
+ .getTypePtr());
+
+ case Type::TypeOf:
+ return getTypeInfo(cast<TypeOfType>(T)->getUnderlyingType().getTypePtr());
+
+ case Type::Decltype:
+ return getTypeInfo(cast<DecltypeType>(T)->getUnderlyingExpr()->getType()
+ .getTypePtr());
+
+ case Type::UnaryTransform:
+ return getTypeInfo(cast<UnaryTransformType>(T)->getUnderlyingType());
+
+ case Type::Elaborated:
+ return getTypeInfo(cast<ElaboratedType>(T)->getNamedType().getTypePtr());
+
+ case Type::Attributed:
+ return getTypeInfo(
+ cast<AttributedType>(T)->getEquivalentType().getTypePtr());
+
+ case Type::TemplateSpecialization: {
+ assert(getCanonicalType(T) != T &&
+ "Cannot request the size of a dependent type");
+ const TemplateSpecializationType *TST = cast<TemplateSpecializationType>(T);
+ // A type alias template specialization may refer to a typedef with the
+ // aligned attribute on it.
+ if (TST->isTypeAlias())
+ return getTypeInfo(TST->getAliasedType().getTypePtr());
+ else
+ return getTypeInfo(getCanonicalType(T));
+ }
+
+ case Type::Atomic: {
+ std::pair<uint64_t, unsigned> Info
+ = getTypeInfo(cast<AtomicType>(T)->getValueType());
+ Width = Info.first;
+ Align = Info.second;
+ if (Width != 0 && Width <= Target->getMaxAtomicPromoteWidth() &&
+ llvm::isPowerOf2_64(Width)) {
+ // We can potentially perform lock-free atomic operations for this
+ // type; promote the alignment appropriately.
+ // FIXME: We could potentially promote the width here as well...
+ // is that worthwhile? (Non-struct atomic types generally have
+ // power-of-two size anyway, but structs might not. Requires a bit
+ // of implementation work to make sure we zero out the extra bits.)
+ Align = static_cast<unsigned>(Width);
+ }
+ }
+
+ }
+
+ assert(llvm::isPowerOf2_32(Align) && "Alignment must be power of 2");
+ return std::make_pair(Width, Align);
+}
+
+/// toCharUnitsFromBits - Convert a size in bits to a size in characters.
+CharUnits ASTContext::toCharUnitsFromBits(int64_t BitSize) const {
+ return CharUnits::fromQuantity(BitSize / getCharWidth());
+}
+
+/// toBits - Convert a size in characters to a size in characters.
+int64_t ASTContext::toBits(CharUnits CharSize) const {
+ return CharSize.getQuantity() * getCharWidth();
+}
+
+/// getTypeSizeInChars - Return the size of the specified type, in characters.
+/// This method does not work on incomplete types.
+CharUnits ASTContext::getTypeSizeInChars(QualType T) const {
+ return toCharUnitsFromBits(getTypeSize(T));
+}
+CharUnits ASTContext::getTypeSizeInChars(const Type *T) const {
+ return toCharUnitsFromBits(getTypeSize(T));
+}
+
+/// getTypeAlignInChars - Return the ABI-specified alignment of a type, in
+/// characters. This method does not work on incomplete types.
+CharUnits ASTContext::getTypeAlignInChars(QualType T) const {
+ return toCharUnitsFromBits(getTypeAlign(T));
+}
+CharUnits ASTContext::getTypeAlignInChars(const Type *T) const {
+ return toCharUnitsFromBits(getTypeAlign(T));
+}
+
+/// getPreferredTypeAlign - Return the "preferred" alignment of the specified
+/// type for the current target in bits. This can be different than the ABI
+/// alignment in cases where it is beneficial for performance to overalign
+/// a data type.
+unsigned ASTContext::getPreferredTypeAlign(const Type *T) const {
+ unsigned ABIAlign = getTypeAlign(T);
+
+ // Double and long long should be naturally aligned if possible.
+ if (const ComplexType* CT = T->getAs<ComplexType>())
+ T = CT->getElementType().getTypePtr();
+ if (T->isSpecificBuiltinType(BuiltinType::Double) ||
+ T->isSpecificBuiltinType(BuiltinType::LongLong) ||
+ T->isSpecificBuiltinType(BuiltinType::ULongLong))
+ return std::max(ABIAlign, (unsigned)getTypeSize(T));
+
+ return ABIAlign;
+}
+
+/// DeepCollectObjCIvars -
+/// This routine first collects all declared, but not synthesized, ivars in
+/// super class and then collects all ivars, including those synthesized for
+/// current class. This routine is used for implementation of current class
+/// when all ivars, declared and synthesized are known.
+///
+void ASTContext::DeepCollectObjCIvars(const ObjCInterfaceDecl *OI,
+ bool leafClass,
+ SmallVectorImpl<const ObjCIvarDecl*> &Ivars) const {
+ if (const ObjCInterfaceDecl *SuperClass = OI->getSuperClass())
+ DeepCollectObjCIvars(SuperClass, false, Ivars);
+ if (!leafClass) {
+ for (ObjCInterfaceDecl::ivar_iterator I = OI->ivar_begin(),
+ E = OI->ivar_end(); I != E; ++I)
+ Ivars.push_back(*I);
+ } else {
+ ObjCInterfaceDecl *IDecl = const_cast<ObjCInterfaceDecl *>(OI);
+ for (const ObjCIvarDecl *Iv = IDecl->all_declared_ivar_begin(); Iv;
+ Iv= Iv->getNextIvar())
+ Ivars.push_back(Iv);
+ }
+}
+
+/// CollectInheritedProtocols - Collect all protocols in current class and
+/// those inherited by it.
+void ASTContext::CollectInheritedProtocols(const Decl *CDecl,
+ llvm::SmallPtrSet<ObjCProtocolDecl*, 8> &Protocols) {
+ if (const ObjCInterfaceDecl *OI = dyn_cast<ObjCInterfaceDecl>(CDecl)) {
+ // We can use protocol_iterator here instead of
+ // all_referenced_protocol_iterator since we are walking all categories.
+ for (ObjCInterfaceDecl::all_protocol_iterator P = OI->all_referenced_protocol_begin(),
+ PE = OI->all_referenced_protocol_end(); P != PE; ++P) {
+ ObjCProtocolDecl *Proto = (*P);
+ Protocols.insert(Proto->getCanonicalDecl());
+ for (ObjCProtocolDecl::protocol_iterator P = Proto->protocol_begin(),
+ PE = Proto->protocol_end(); P != PE; ++P) {
+ Protocols.insert((*P)->getCanonicalDecl());
+ CollectInheritedProtocols(*P, Protocols);
+ }
+ }
+
+ // Categories of this Interface.
+ for (const ObjCCategoryDecl *CDeclChain = OI->getCategoryList();
+ CDeclChain; CDeclChain = CDeclChain->getNextClassCategory())
+ CollectInheritedProtocols(CDeclChain, Protocols);
+ if (ObjCInterfaceDecl *SD = OI->getSuperClass())
+ while (SD) {
+ CollectInheritedProtocols(SD, Protocols);
+ SD = SD->getSuperClass();
+ }
+ } else if (const ObjCCategoryDecl *OC = dyn_cast<ObjCCategoryDecl>(CDecl)) {
+ for (ObjCCategoryDecl::protocol_iterator P = OC->protocol_begin(),
+ PE = OC->protocol_end(); P != PE; ++P) {
+ ObjCProtocolDecl *Proto = (*P);
+ Protocols.insert(Proto->getCanonicalDecl());
+ for (ObjCProtocolDecl::protocol_iterator P = Proto->protocol_begin(),
+ PE = Proto->protocol_end(); P != PE; ++P)
+ CollectInheritedProtocols(*P, Protocols);
+ }
+ } else if (const ObjCProtocolDecl *OP = dyn_cast<ObjCProtocolDecl>(CDecl)) {
+ for (ObjCProtocolDecl::protocol_iterator P = OP->protocol_begin(),
+ PE = OP->protocol_end(); P != PE; ++P) {
+ ObjCProtocolDecl *Proto = (*P);
+ Protocols.insert(Proto->getCanonicalDecl());
+ for (ObjCProtocolDecl::protocol_iterator P = Proto->protocol_begin(),
+ PE = Proto->protocol_end(); P != PE; ++P)
+ CollectInheritedProtocols(*P, Protocols);
+ }
+ }
+}
+
+unsigned ASTContext::CountNonClassIvars(const ObjCInterfaceDecl *OI) const {
+ unsigned count = 0;
+ // Count ivars declared in class extension.
+ for (const ObjCCategoryDecl *CDecl = OI->getFirstClassExtension(); CDecl;
+ CDecl = CDecl->getNextClassExtension())
+ count += CDecl->ivar_size();
+
+ // Count ivar defined in this class's implementation. This
+ // includes synthesized ivars.
+ if (ObjCImplementationDecl *ImplDecl = OI->getImplementation())
+ count += ImplDecl->ivar_size();
+
+ return count;
+}
+
+bool ASTContext::isSentinelNullExpr(const Expr *E) {
+ if (!E)
+ return false;
+
+ // nullptr_t is always treated as null.
+ if (E->getType()->isNullPtrType()) return true;
+
+ if (E->getType()->isAnyPointerType() &&
+ E->IgnoreParenCasts()->isNullPointerConstant(*this,
+ Expr::NPC_ValueDependentIsNull))
+ return true;
+
+ // Unfortunately, __null has type 'int'.
+ if (isa<GNUNullExpr>(E)) return true;
+
+ return false;
+}
+
+/// \brief Get the implementation of ObjCInterfaceDecl,or NULL if none exists.
+ObjCImplementationDecl *ASTContext::getObjCImplementation(ObjCInterfaceDecl *D) {
+ llvm::DenseMap<ObjCContainerDecl*, ObjCImplDecl*>::iterator
+ I = ObjCImpls.find(D);
+ if (I != ObjCImpls.end())
+ return cast<ObjCImplementationDecl>(I->second);
+ return 0;
+}
+/// \brief Get the implementation of ObjCCategoryDecl, or NULL if none exists.
+ObjCCategoryImplDecl *ASTContext::getObjCImplementation(ObjCCategoryDecl *D) {
+ llvm::DenseMap<ObjCContainerDecl*, ObjCImplDecl*>::iterator
+ I = ObjCImpls.find(D);
+ if (I != ObjCImpls.end())
+ return cast<ObjCCategoryImplDecl>(I->second);
+ return 0;
+}
+
+/// \brief Set the implementation of ObjCInterfaceDecl.
+void ASTContext::setObjCImplementation(ObjCInterfaceDecl *IFaceD,
+ ObjCImplementationDecl *ImplD) {
+ assert(IFaceD && ImplD && "Passed null params");
+ ObjCImpls[IFaceD] = ImplD;
+}
+/// \brief Set the implementation of ObjCCategoryDecl.
+void ASTContext::setObjCImplementation(ObjCCategoryDecl *CatD,
+ ObjCCategoryImplDecl *ImplD) {
+ assert(CatD && ImplD && "Passed null params");
+ ObjCImpls[CatD] = ImplD;
+}
+
+ObjCInterfaceDecl *ASTContext::getObjContainingInterface(NamedDecl *ND) const {
+ if (ObjCInterfaceDecl *ID = dyn_cast<ObjCInterfaceDecl>(ND->getDeclContext()))
+ return ID;
+ if (ObjCCategoryDecl *CD = dyn_cast<ObjCCategoryDecl>(ND->getDeclContext()))
+ return CD->getClassInterface();
+ if (ObjCImplDecl *IMD = dyn_cast<ObjCImplDecl>(ND->getDeclContext()))
+ return IMD->getClassInterface();
+
+ return 0;
+}
+
+/// \brief Get the copy initialization expression of VarDecl,or NULL if
+/// none exists.
+Expr *ASTContext::getBlockVarCopyInits(const VarDecl*VD) {
+ assert(VD && "Passed null params");
+ assert(VD->hasAttr<BlocksAttr>() &&
+ "getBlockVarCopyInits - not __block var");
+ llvm::DenseMap<const VarDecl*, Expr*>::iterator
+ I = BlockVarCopyInits.find(VD);
+ return (I != BlockVarCopyInits.end()) ? cast<Expr>(I->second) : 0;
+}
+
+/// \brief Set the copy inialization expression of a block var decl.
+void ASTContext::setBlockVarCopyInits(VarDecl*VD, Expr* Init) {
+ assert(VD && Init && "Passed null params");
+ assert(VD->hasAttr<BlocksAttr>() &&
+ "setBlockVarCopyInits - not __block var");
+ BlockVarCopyInits[VD] = Init;
+}
+
+/// \brief Allocate an uninitialized TypeSourceInfo.
+///
+/// The caller should initialize the memory held by TypeSourceInfo using
+/// the TypeLoc wrappers.
+///
+/// \param T the type that will be the basis for type source info. This type
+/// should refer to how the declarator was written in source code, not to
+/// what type semantic analysis resolved the declarator to.
+TypeSourceInfo *ASTContext::CreateTypeSourceInfo(QualType T,
+ unsigned DataSize) const {
+ if (!DataSize)
+ DataSize = TypeLoc::getFullDataSizeForType(T);
+ else
+ assert(DataSize == TypeLoc::getFullDataSizeForType(T) &&
+ "incorrect data size provided to CreateTypeSourceInfo!");
+
+ TypeSourceInfo *TInfo =
+ (TypeSourceInfo*)BumpAlloc.Allocate(sizeof(TypeSourceInfo) + DataSize, 8);
+ new (TInfo) TypeSourceInfo(T);
+ return TInfo;
+}
+
+TypeSourceInfo *ASTContext::getTrivialTypeSourceInfo(QualType T,
+ SourceLocation L) const {
+ TypeSourceInfo *DI = CreateTypeSourceInfo(T);
+ DI->getTypeLoc().initialize(const_cast<ASTContext &>(*this), L);
+ return DI;
+}
+
+const ASTRecordLayout &
+ASTContext::getASTObjCInterfaceLayout(const ObjCInterfaceDecl *D) const {
+ return getObjCLayout(D, 0);
+}
+
+const ASTRecordLayout &
+ASTContext::getASTObjCImplementationLayout(
+ const ObjCImplementationDecl *D) const {
+ return getObjCLayout(D->getClassInterface(), D);
+}
+
+//===----------------------------------------------------------------------===//
+// Type creation/memoization methods
+//===----------------------------------------------------------------------===//
+
+QualType
+ASTContext::getExtQualType(const Type *baseType, Qualifiers quals) const {
+ unsigned fastQuals = quals.getFastQualifiers();
+ quals.removeFastQualifiers();
+
+ // Check if we've already instantiated this type.
+ llvm::FoldingSetNodeID ID;
+ ExtQuals::Profile(ID, baseType, quals);
+ void *insertPos = 0;
+ if (ExtQuals *eq = ExtQualNodes.FindNodeOrInsertPos(ID, insertPos)) {
+ assert(eq->getQualifiers() == quals);
+ return QualType(eq, fastQuals);
+ }
+
+ // If the base type is not canonical, make the appropriate canonical type.
+ QualType canon;
+ if (!baseType->isCanonicalUnqualified()) {
+ SplitQualType canonSplit = baseType->getCanonicalTypeInternal().split();
+ canonSplit.Quals.addConsistentQualifiers(quals);
+ canon = getExtQualType(canonSplit.Ty, canonSplit.Quals);
+
+ // Re-find the insert position.
+ (void) ExtQualNodes.FindNodeOrInsertPos(ID, insertPos);
+ }
+
+ ExtQuals *eq = new (*this, TypeAlignment) ExtQuals(baseType, canon, quals);
+ ExtQualNodes.InsertNode(eq, insertPos);
+ return QualType(eq, fastQuals);
+}
+
+QualType
+ASTContext::getAddrSpaceQualType(QualType T, unsigned AddressSpace) const {
+ QualType CanT = getCanonicalType(T);
+ if (CanT.getAddressSpace() == AddressSpace)
+ return T;
+
+ // If we are composing extended qualifiers together, merge together
+ // into one ExtQuals node.
+ QualifierCollector Quals;
+ const Type *TypeNode = Quals.strip(T);
+
+ // If this type already has an address space specified, it cannot get
+ // another one.
+ assert(!Quals.hasAddressSpace() &&
+ "Type cannot be in multiple addr spaces!");
+ Quals.addAddressSpace(AddressSpace);
+
+ return getExtQualType(TypeNode, Quals);
+}
+
+QualType ASTContext::getObjCGCQualType(QualType T,
+ Qualifiers::GC GCAttr) const {
+ QualType CanT = getCanonicalType(T);
+ if (CanT.getObjCGCAttr() == GCAttr)
+ return T;
+
+ if (const PointerType *ptr = T->getAs<PointerType>()) {
+ QualType Pointee = ptr->getPointeeType();
+ if (Pointee->isAnyPointerType()) {
+ QualType ResultType = getObjCGCQualType(Pointee, GCAttr);
+ return getPointerType(ResultType);
+ }
+ }
+
+ // If we are composing extended qualifiers together, merge together
+ // into one ExtQuals node.
+ QualifierCollector Quals;
+ const Type *TypeNode = Quals.strip(T);
+
+ // If this type already has an ObjCGC specified, it cannot get
+ // another one.
+ assert(!Quals.hasObjCGCAttr() &&
+ "Type cannot have multiple ObjCGCs!");
+ Quals.addObjCGCAttr(GCAttr);
+
+ return getExtQualType(TypeNode, Quals);
+}
+
+const FunctionType *ASTContext::adjustFunctionType(const FunctionType *T,
+ FunctionType::ExtInfo Info) {
+ if (T->getExtInfo() == Info)
+ return T;
+
+ QualType Result;
+ if (const FunctionNoProtoType *FNPT = dyn_cast<FunctionNoProtoType>(T)) {
+ Result = getFunctionNoProtoType(FNPT->getResultType(), Info);
+ } else {
+ const FunctionProtoType *FPT = cast<FunctionProtoType>(T);
+ FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo();
+ EPI.ExtInfo = Info;
+ Result = getFunctionType(FPT->getResultType(), FPT->arg_type_begin(),
+ FPT->getNumArgs(), EPI);
+ }
+
+ return cast<FunctionType>(Result.getTypePtr());
+}
+
+/// getComplexType - Return the uniqued reference to the type for a complex
+/// number with the specified element type.
+QualType ASTContext::getComplexType(QualType T) const {
+ // Unique pointers, to guarantee there is only one pointer of a particular
+ // structure.
+ llvm::FoldingSetNodeID ID;
+ ComplexType::Profile(ID, T);
+
+ void *InsertPos = 0;
+ if (ComplexType *CT = ComplexTypes.FindNodeOrInsertPos(ID, InsertPos))
+ return QualType(CT, 0);
+
+ // If the pointee type isn't canonical, this won't be a canonical type either,
+ // so fill in the canonical type field.
+ QualType Canonical;
+ if (!T.isCanonical()) {
+ Canonical = getComplexType(getCanonicalType(T));
+
+ // Get the new insert position for the node we care about.
+ ComplexType *NewIP = ComplexTypes.FindNodeOrInsertPos(ID, InsertPos);
+ assert(NewIP == 0 && "Shouldn't be in the map!"); (void)NewIP;
+ }
+ ComplexType *New = new (*this, TypeAlignment) ComplexType(T, Canonical);
+ Types.push_back(New);
+ ComplexTypes.InsertNode(New, InsertPos);
+ return QualType(New, 0);
+}
+
+/// getPointerType - Return the uniqued reference to the type for a pointer to
+/// the specified type.
+QualType ASTContext::getPointerType(QualType T) const {
+ // Unique pointers, to guarantee there is only one pointer of a particular
+ // structure.
+ llvm::FoldingSetNodeID ID;
+ PointerType::Profile(ID, T);
+
+ void *InsertPos = 0;
+ if (PointerType *PT = PointerTypes.FindNodeOrInsertPos(ID, InsertPos))
+ return QualType(PT, 0);
+
+ // If the pointee type isn't canonical, this won't be a canonical type either,
+ // so fill in the canonical type field.
+ QualType Canonical;
+ if (!T.isCanonical()) {
+ Canonical = getPointerType(getCanonicalType(T));
+
+ // Get the new insert position for the node we care about.
+ PointerType *NewIP = PointerTypes.FindNodeOrInsertPos(ID, InsertPos);
+ assert(NewIP == 0 && "Shouldn't be in the map!"); (void)NewIP;
+ }
+ PointerType *New = new (*this, TypeAlignment) PointerType(T, Canonical);
+ Types.push_back(New);
+ PointerTypes.InsertNode(New, InsertPos);
+ return QualType(New, 0);
+}
+
+/// getBlockPointerType - Return the uniqued reference to the type for
+/// a pointer to the specified block.
+QualType ASTContext::getBlockPointerType(QualType T) const {
+ assert(T->isFunctionType() && "block of function types only");
+ // Unique pointers, to guarantee there is only one block of a particular
+ // structure.
+ llvm::FoldingSetNodeID ID;
+ BlockPointerType::Profile(ID, T);
+
+ void *InsertPos = 0;
+ if (BlockPointerType *PT =
+ BlockPointerTypes.FindNodeOrInsertPos(ID, InsertPos))
+ return QualType(PT, 0);
+
+ // If the block pointee type isn't canonical, this won't be a canonical
+ // type either so fill in the canonical type field.
+ QualType Canonical;
+ if (!T.isCanonical()) {
+ Canonical = getBlockPointerType(getCanonicalType(T));
+
+ // Get the new insert position for the node we care about.
+ BlockPointerType *NewIP =
+ BlockPointerTypes.FindNodeOrInsertPos(ID, InsertPos);
+ assert(NewIP == 0 && "Shouldn't be in the map!"); (void)NewIP;
+ }
+ BlockPointerType *New
+ = new (*this, TypeAlignment) BlockPointerType(T, Canonical);
+ Types.push_back(New);
+ BlockPointerTypes.InsertNode(New, InsertPos);
+ return QualType(New, 0);
+}
+
+/// getLValueReferenceType - Return the uniqued reference to the type for an
+/// lvalue reference to the specified type.
+QualType
+ASTContext::getLValueReferenceType(QualType T, bool SpelledAsLValue) const {
+ assert(getCanonicalType(T) != OverloadTy &&
+ "Unresolved overloaded function type");
+
+ // Unique pointers, to guarantee there is only one pointer of a particular
+ // structure.
+ llvm::FoldingSetNodeID ID;
+ ReferenceType::Profile(ID, T, SpelledAsLValue);
+
+ void *InsertPos = 0;
+ if (LValueReferenceType *RT =
+ LValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos))
+ return QualType(RT, 0);
+
+ const ReferenceType *InnerRef = T->getAs<ReferenceType>();
+
+ // If the referencee type isn't canonical, this won't be a canonical type
+ // either, so fill in the canonical type field.
+ QualType Canonical;
+ if (!SpelledAsLValue || InnerRef || !T.isCanonical()) {
+ QualType PointeeType = (InnerRef ? InnerRef->getPointeeType() : T);
+ Canonical = getLValueReferenceType(getCanonicalType(PointeeType));
+
+ // Get the new insert position for the node we care about.
+ LValueReferenceType *NewIP =
+ LValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos);
+ assert(NewIP == 0 && "Shouldn't be in the map!"); (void)NewIP;
+ }
+
+ LValueReferenceType *New
+ = new (*this, TypeAlignment) LValueReferenceType(T, Canonical,
+ SpelledAsLValue);
+ Types.push_back(New);
+ LValueReferenceTypes.InsertNode(New, InsertPos);
+
+ return QualType(New, 0);
+}
+
+/// getRValueReferenceType - Return the uniqued reference to the type for an
+/// rvalue reference to the specified type.
+QualType ASTContext::getRValueReferenceType(QualType T) const {
+ // Unique pointers, to guarantee there is only one pointer of a particular
+ // structure.
+ llvm::FoldingSetNodeID ID;
+ ReferenceType::Profile(ID, T, false);
+
+ void *InsertPos = 0;
+ if (RValueReferenceType *RT =
+ RValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos))
+ return QualType(RT, 0);
+
+ const ReferenceType *InnerRef = T->getAs<ReferenceType>();
+
+ // If the referencee type isn't canonical, this won't be a canonical type
+ // either, so fill in the canonical type field.
+ QualType Canonical;
+ if (InnerRef || !T.isCanonical()) {
+ QualType PointeeType = (InnerRef ? InnerRef->getPointeeType() : T);
+ Canonical = getRValueReferenceType(getCanonicalType(PointeeType));
+
+ // Get the new insert position for the node we care about.
+ RValueReferenceType *NewIP =
+ RValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos);
+ assert(NewIP == 0 && "Shouldn't be in the map!"); (void)NewIP;
+ }
+
+ RValueReferenceType *New
+ = new (*this, TypeAlignment) RValueReferenceType(T, Canonical);
+ Types.push_back(New);
+ RValueReferenceTypes.InsertNode(New, InsertPos);
+ return QualType(New, 0);
+}
+
+/// getMemberPointerType - Return the uniqued reference to the type for a
+/// member pointer to the specified type, in the specified class.
+QualType ASTContext::getMemberPointerType(QualType T, const Type *Cls) const {
+ // Unique pointers, to guarantee there is only one pointer of a particular
+ // structure.
+ llvm::FoldingSetNodeID ID;
+ MemberPointerType::Profile(ID, T, Cls);
+
+ void *InsertPos = 0;
+ if (MemberPointerType *PT =
+ MemberPointerTypes.FindNodeOrInsertPos(ID, InsertPos))
+ return QualType(PT, 0);
+
+ // If the pointee or class type isn't canonical, this won't be a canonical
+ // type either, so fill in the canonical type field.
+ QualType Canonical;
+ if (!T.isCanonical() || !Cls->isCanonicalUnqualified()) {
+ Canonical = getMemberPointerType(getCanonicalType(T),getCanonicalType(Cls));
+
+ // Get the new insert position for the node we care about.
+ MemberPointerType *NewIP =
+ MemberPointerTypes.FindNodeOrInsertPos(ID, InsertPos);
+ assert(NewIP == 0 && "Shouldn't be in the map!"); (void)NewIP;
+ }
+ MemberPointerType *New
+ = new (*this, TypeAlignment) MemberPointerType(T, Cls, Canonical);
+ Types.push_back(New);
+ MemberPointerTypes.InsertNode(New, InsertPos);
+ return QualType(New, 0);
+}
+
+/// getConstantArrayType - Return the unique reference to the type for an
+/// array of the specified element type.
+QualType ASTContext::getConstantArrayType(QualType EltTy,
+ const llvm::APInt &ArySizeIn,
+ ArrayType::ArraySizeModifier ASM,
+ unsigned IndexTypeQuals) const {
+ assert((EltTy->isDependentType() ||
+ EltTy->isIncompleteType() || EltTy->isConstantSizeType()) &&
+ "Constant array of VLAs is illegal!");
+
+ // Convert the array size into a canonical width matching the pointer size for
+ // the target.
+ llvm::APInt ArySize(ArySizeIn);
+ ArySize =
+ ArySize.zextOrTrunc(Target->getPointerWidth(getTargetAddressSpace(EltTy)));
+
+ llvm::FoldingSetNodeID ID;
+ ConstantArrayType::Profile(ID, EltTy, ArySize, ASM, IndexTypeQuals);
+
+ void *InsertPos = 0;
+ if (ConstantArrayType *ATP =
+ ConstantArrayTypes.FindNodeOrInsertPos(ID, InsertPos))
+ return QualType(ATP, 0);
+
+ // If the element type isn't canonical or has qualifiers, this won't
+ // be a canonical type either, so fill in the canonical type field.
+ QualType Canon;
+ if (!EltTy.isCanonical() || EltTy.hasLocalQualifiers()) {
+ SplitQualType canonSplit = getCanonicalType(EltTy).split();
+ Canon = getConstantArrayType(QualType(canonSplit.Ty, 0), ArySize,
+ ASM, IndexTypeQuals);
+ Canon = getQualifiedType(Canon, canonSplit.Quals);
+
+ // Get the new insert position for the node we care about.
+ ConstantArrayType *NewIP =
+ ConstantArrayTypes.FindNodeOrInsertPos(ID, InsertPos);
+ assert(NewIP == 0 && "Shouldn't be in the map!"); (void)NewIP;
+ }
+
+ ConstantArrayType *New = new(*this,TypeAlignment)
+ ConstantArrayType(EltTy, Canon, ArySize, ASM, IndexTypeQuals);
+ ConstantArrayTypes.InsertNode(New, InsertPos);
+ Types.push_back(New);
+ return QualType(New, 0);
+}
+
+/// getVariableArrayDecayedType - Turns the given type, which may be
+/// variably-modified, into the corresponding type with all the known
+/// sizes replaced with [*].
+QualType ASTContext::getVariableArrayDecayedType(QualType type) const {
+ // Vastly most common case.
+ if (!type->isVariablyModifiedType()) return type;
+
+ QualType result;
+
+ SplitQualType split = type.getSplitDesugaredType();
+ const Type *ty = split.Ty;
+ switch (ty->getTypeClass()) {
+#define TYPE(Class, Base)
+#define ABSTRACT_TYPE(Class, Base)
+#define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
+#include "clang/AST/TypeNodes.def"
+ llvm_unreachable("didn't desugar past all non-canonical types?");
+
+ // These types should never be variably-modified.
+ case Type::Builtin:
+ case Type::Complex:
+ case Type::Vector:
+ case Type::ExtVector:
+ case Type::DependentSizedExtVector:
+ case Type::ObjCObject:
+ case Type::ObjCInterface:
+ case Type::ObjCObjectPointer:
+ case Type::Record:
+ case Type::Enum:
+ case Type::UnresolvedUsing:
+ case Type::TypeOfExpr:
+ case Type::TypeOf:
+ case Type::Decltype:
+ case Type::UnaryTransform:
+ case Type::DependentName:
+ case Type::InjectedClassName:
+ case Type::TemplateSpecialization:
+ case Type::DependentTemplateSpecialization:
+ case Type::TemplateTypeParm:
+ case Type::SubstTemplateTypeParmPack:
+ case Type::Auto:
+ case Type::PackExpansion:
+ llvm_unreachable("type should never be variably-modified");
+
+ // These types can be variably-modified but should never need to
+ // further decay.
+ case Type::FunctionNoProto:
+ case Type::FunctionProto:
+ case Type::BlockPointer:
+ case Type::MemberPointer:
+ return type;
+
+ // These types can be variably-modified. All these modifications
+ // preserve structure except as noted by comments.
+ // TODO: if we ever care about optimizing VLAs, there are no-op
+ // optimizations available here.
+ case Type::Pointer:
+ result = getPointerType(getVariableArrayDecayedType(
+ cast<PointerType>(ty)->getPointeeType()));
+ break;
+
+ case Type::LValueReference: {
+ const LValueReferenceType *lv = cast<LValueReferenceType>(ty);
+ result = getLValueReferenceType(
+ getVariableArrayDecayedType(lv->getPointeeType()),
+ lv->isSpelledAsLValue());
+ break;
+ }
+
+ case Type::RValueReference: {
+ const RValueReferenceType *lv = cast<RValueReferenceType>(ty);
+ result = getRValueReferenceType(
+ getVariableArrayDecayedType(lv->getPointeeType()));
+ break;
+ }
+
+ case Type::Atomic: {
+ const AtomicType *at = cast<AtomicType>(ty);
+ result = getAtomicType(getVariableArrayDecayedType(at->getValueType()));
+ break;
+ }
+
+ case Type::ConstantArray: {
+ const ConstantArrayType *cat = cast<ConstantArrayType>(ty);
+ result = getConstantArrayType(
+ getVariableArrayDecayedType(cat->getElementType()),
+ cat->getSize(),
+ cat->getSizeModifier(),
+ cat->getIndexTypeCVRQualifiers());
+ break;
+ }
+
+ case Type::DependentSizedArray: {
+ const DependentSizedArrayType *dat = cast<DependentSizedArrayType>(ty);
+ result = getDependentSizedArrayType(
+ getVariableArrayDecayedType(dat->getElementType()),
+ dat->getSizeExpr(),
+ dat->getSizeModifier(),
+ dat->getIndexTypeCVRQualifiers(),
+ dat->getBracketsRange());
+ break;
+ }
+
+ // Turn incomplete types into [*] types.
+ case Type::IncompleteArray: {
+ const IncompleteArrayType *iat = cast<IncompleteArrayType>(ty);
+ result = getVariableArrayType(
+ getVariableArrayDecayedType(iat->getElementType()),
+ /*size*/ 0,
+ ArrayType::Normal,
+ iat->getIndexTypeCVRQualifiers(),
+ SourceRange());
+ break;
+ }
+
+ // Turn VLA types into [*] types.
+ case Type::VariableArray: {
+ const VariableArrayType *vat = cast<VariableArrayType>(ty);
+ result = getVariableArrayType(
+ getVariableArrayDecayedType(vat->getElementType()),
+ /*size*/ 0,
+ ArrayType::Star,
+ vat->getIndexTypeCVRQualifiers(),
+ vat->getBracketsRange());
+ break;
+ }
+ }
+
+ // Apply the top-level qualifiers from the original.
+ return getQualifiedType(result, split.Quals);
+}
+
+/// getVariableArrayType - Returns a non-unique reference to the type for a
+/// variable array of the specified element type.
+QualType ASTContext::getVariableArrayType(QualType EltTy,
+ Expr *NumElts,
+ ArrayType::ArraySizeModifier ASM,
+ unsigned IndexTypeQuals,
+ SourceRange Brackets) const {
+ // Since we don't unique expressions, it isn't possible to unique VLA's
+ // that have an expression provided for their size.
+ QualType Canon;
+
+ // Be sure to pull qualifiers off the element type.
+ if (!EltTy.isCanonical() || EltTy.hasLocalQualifiers()) {
+ SplitQualType canonSplit = getCanonicalType(EltTy).split();
+ Canon = getVariableArrayType(QualType(canonSplit.Ty, 0), NumElts, ASM,
+ IndexTypeQuals, Brackets);
+ Canon = getQualifiedType(Canon, canonSplit.Quals);
+ }
+
+ VariableArrayType *New = new(*this, TypeAlignment)
+ VariableArrayType(EltTy, Canon, NumElts, ASM, IndexTypeQuals, Brackets);
+
+ VariableArrayTypes.push_back(New);
+ Types.push_back(New);
+ return QualType(New, 0);
+}
+
+/// getDependentSizedArrayType - Returns a non-unique reference to
+/// the type for a dependently-sized array of the specified element
+/// type.
+QualType ASTContext::getDependentSizedArrayType(QualType elementType,
+ Expr *numElements,
+ ArrayType::ArraySizeModifier ASM,
+ unsigned elementTypeQuals,
+ SourceRange brackets) const {
+ assert((!numElements || numElements->isTypeDependent() ||
+ numElements->isValueDependent()) &&
+ "Size must be type- or value-dependent!");
+
+ // Dependently-sized array types that do not have a specified number
+ // of elements will have their sizes deduced from a dependent
+ // initializer. We do no canonicalization here at all, which is okay
+ // because they can't be used in most locations.
+ if (!numElements) {
+ DependentSizedArrayType *newType
+ = new (*this, TypeAlignment)
+ DependentSizedArrayType(*this, elementType, QualType(),
+ numElements, ASM, elementTypeQuals,
+ brackets);
+ Types.push_back(newType);
+ return QualType(newType, 0);
+ }
+
+ // Otherwise, we actually build a new type every time, but we
+ // also build a canonical type.
+
+ SplitQualType canonElementType = getCanonicalType(elementType).split();
+
+ void *insertPos = 0;
+ llvm::FoldingSetNodeID ID;
+ DependentSizedArrayType::Profile(ID, *this,
+ QualType(canonElementType.Ty, 0),
+ ASM, elementTypeQuals, numElements);
+
+ // Look for an existing type with these properties.
+ DependentSizedArrayType *canonTy =
+ DependentSizedArrayTypes.FindNodeOrInsertPos(ID, insertPos);
+
+ // If we don't have one, build one.
+ if (!canonTy) {
+ canonTy = new (*this, TypeAlignment)
+ DependentSizedArrayType(*this, QualType(canonElementType.Ty, 0),
+ QualType(), numElements, ASM, elementTypeQuals,
+ brackets);
+ DependentSizedArrayTypes.InsertNode(canonTy, insertPos);
+ Types.push_back(canonTy);
+ }
+
+ // Apply qualifiers from the element type to the array.
+ QualType canon = getQualifiedType(QualType(canonTy,0),
+ canonElementType.Quals);
+
+ // If we didn't need extra canonicalization for the element type,
+ // then just use that as our result.
+ if (QualType(canonElementType.Ty, 0) == elementType)
+ return canon;
+
+ // Otherwise, we need to build a type which follows the spelling
+ // of the element type.
+ DependentSizedArrayType *sugaredType
+ = new (*this, TypeAlignment)
+ DependentSizedArrayType(*this, elementType, canon, numElements,
+ ASM, elementTypeQuals, brackets);
+ Types.push_back(sugaredType);
+ return QualType(sugaredType, 0);
+}
+
+QualType ASTContext::getIncompleteArrayType(QualType elementType,
+ ArrayType::ArraySizeModifier ASM,
+ unsigned elementTypeQuals) const {
+ llvm::FoldingSetNodeID ID;
+ IncompleteArrayType::Profile(ID, elementType, ASM, elementTypeQuals);
+
+ void *insertPos = 0;
+ if (IncompleteArrayType *iat =
+ IncompleteArrayTypes.FindNodeOrInsertPos(ID, insertPos))
+ return QualType(iat, 0);
+
+ // If the element type isn't canonical, this won't be a canonical type
+ // either, so fill in the canonical type field. We also have to pull
+ // qualifiers off the element type.
+ QualType canon;
+
+ if (!elementType.isCanonical() || elementType.hasLocalQualifiers()) {
+ SplitQualType canonSplit = getCanonicalType(elementType).split();
+ canon = getIncompleteArrayType(QualType(canonSplit.Ty, 0),
+ ASM, elementTypeQuals);
+ canon = getQualifiedType(canon, canonSplit.Quals);
+
+ // Get the new insert position for the node we care about.
+ IncompleteArrayType *existing =
+ IncompleteArrayTypes.FindNodeOrInsertPos(ID, insertPos);
+ assert(!existing && "Shouldn't be in the map!"); (void) existing;
+ }
+
+ IncompleteArrayType *newType = new (*this, TypeAlignment)
+ IncompleteArrayType(elementType, canon, ASM, elementTypeQuals);
+
+ IncompleteArrayTypes.InsertNode(newType, insertPos);
+ Types.push_back(newType);
+ return QualType(newType, 0);
+}
+
+/// getVectorType - Return the unique reference to a vector type of
+/// the specified element type and size. VectorType must be a built-in type.
+QualType ASTContext::getVectorType(QualType vecType, unsigned NumElts,
+ VectorType::VectorKind VecKind) const {
+ assert(vecType->isBuiltinType());
+
+ // Check if we've already instantiated a vector of this type.
+ llvm::FoldingSetNodeID ID;
+ VectorType::Profile(ID, vecType, NumElts, Type::Vector, VecKind);
+
+ void *InsertPos = 0;
+ if (VectorType *VTP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos))
+ return QualType(VTP, 0);
+
+ // If the element type isn't canonical, this won't be a canonical type either,
+ // so fill in the canonical type field.
+ QualType Canonical;
+ if (!vecType.isCanonical()) {
+ Canonical = getVectorType(getCanonicalType(vecType), NumElts, VecKind);
+
+ // Get the new insert position for the node we care about.
+ VectorType *NewIP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos);
+ assert(NewIP == 0 && "Shouldn't be in the map!"); (void)NewIP;
+ }
+ VectorType *New = new (*this, TypeAlignment)
+ VectorType(vecType, NumElts, Canonical, VecKind);
+ VectorTypes.InsertNode(New, InsertPos);
+ Types.push_back(New);
+ return QualType(New, 0);
+}
+
+/// getExtVectorType - Return the unique reference to an extended vector type of
+/// the specified element type and size. VectorType must be a built-in type.
+QualType
+ASTContext::getExtVectorType(QualType vecType, unsigned NumElts) const {
+ assert(vecType->isBuiltinType() || vecType->isDependentType());
+
+ // Check if we've already instantiated a vector of this type.
+ llvm::FoldingSetNodeID ID;
+ VectorType::Profile(ID, vecType, NumElts, Type::ExtVector,
+ VectorType::GenericVector);
+ void *InsertPos = 0;
+ if (VectorType *VTP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos))
+ return QualType(VTP, 0);
+
+ // If the element type isn't canonical, this won't be a canonical type either,
+ // so fill in the canonical type field.
+ QualType Canonical;
+ if (!vecType.isCanonical()) {
+ Canonical = getExtVectorType(getCanonicalType(vecType), NumElts);
+
+ // Get the new insert position for the node we care about.
+ VectorType *NewIP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos);
+ assert(NewIP == 0 && "Shouldn't be in the map!"); (void)NewIP;
+ }
+ ExtVectorType *New = new (*this, TypeAlignment)
+ ExtVectorType(vecType, NumElts, Canonical);
+ VectorTypes.InsertNode(New, InsertPos);
+ Types.push_back(New);
+ return QualType(New, 0);
+}
+
+QualType
+ASTContext::getDependentSizedExtVectorType(QualType vecType,
+ Expr *SizeExpr,
+ SourceLocation AttrLoc) const {
+ llvm::FoldingSetNodeID ID;
+ DependentSizedExtVectorType::Profile(ID, *this, getCanonicalType(vecType),
+ SizeExpr);
+
+ void *InsertPos = 0;
+ DependentSizedExtVectorType *Canon
+ = DependentSizedExtVectorTypes.FindNodeOrInsertPos(ID, InsertPos);
+ DependentSizedExtVectorType *New;
+ if (Canon) {
+ // We already have a canonical version of this array type; use it as
+ // the canonical type for a newly-built type.
+ New = new (*this, TypeAlignment)
+ DependentSizedExtVectorType(*this, vecType, QualType(Canon, 0),
+ SizeExpr, AttrLoc);
+ } else {
+ QualType CanonVecTy = getCanonicalType(vecType);
+ if (CanonVecTy == vecType) {
+ New = new (*this, TypeAlignment)
+ DependentSizedExtVectorType(*this, vecType, QualType(), SizeExpr,
+ AttrLoc);
+
+ DependentSizedExtVectorType *CanonCheck
+ = DependentSizedExtVectorTypes.FindNodeOrInsertPos(ID, InsertPos);
+ assert(!CanonCheck && "Dependent-sized ext_vector canonical type broken");
+ (void)CanonCheck;
+ DependentSizedExtVectorTypes.InsertNode(New, InsertPos);
+ } else {
+ QualType Canon = getDependentSizedExtVectorType(CanonVecTy, SizeExpr,
+ SourceLocation());
+ New = new (*this, TypeAlignment)
+ DependentSizedExtVectorType(*this, vecType, Canon, SizeExpr, AttrLoc);
+ }
+ }
+
+ Types.push_back(New);
+ return QualType(New, 0);
+}
+
+/// getFunctionNoProtoType - Return a K&R style C function type like 'int()'.
+///
+QualType
+ASTContext::getFunctionNoProtoType(QualType ResultTy,
+ const FunctionType::ExtInfo &Info) const {
+ const CallingConv DefaultCC = Info.getCC();
+ const CallingConv CallConv = (LangOpts.MRTD && DefaultCC == CC_Default) ?
+ CC_X86StdCall : DefaultCC;
+ // Unique functions, to guarantee there is only one function of a particular
+ // structure.
+ llvm::FoldingSetNodeID ID;
+ FunctionNoProtoType::Profile(ID, ResultTy, Info);
+
+ void *InsertPos = 0;
+ if (FunctionNoProtoType *FT =
+ FunctionNoProtoTypes.FindNodeOrInsertPos(ID, InsertPos))
+ return QualType(FT, 0);
+
+ QualType Canonical;
+ if (!ResultTy.isCanonical() ||
+ getCanonicalCallConv(CallConv) != CallConv) {
+ Canonical =
+ getFunctionNoProtoType(getCanonicalType(ResultTy),
+ Info.withCallingConv(getCanonicalCallConv(CallConv)));
+
+ // Get the new insert position for the node we care about.
+ FunctionNoProtoType *NewIP =
+ FunctionNoProtoTypes.FindNodeOrInsertPos(ID, InsertPos);
+ assert(NewIP == 0 && "Shouldn't be in the map!"); (void)NewIP;
+ }
+
+ FunctionProtoType::ExtInfo newInfo = Info.withCallingConv(CallConv);
+ FunctionNoProtoType *New = new (*this, TypeAlignment)
+ FunctionNoProtoType(ResultTy, Canonical, newInfo);
+ Types.push_back(New);
+ FunctionNoProtoTypes.InsertNode(New, InsertPos);
+ return QualType(New, 0);
+}
+
+/// getFunctionType - Return a normal function type with a typed argument
+/// list. isVariadic indicates whether the argument list includes '...'.
+QualType
+ASTContext::getFunctionType(QualType ResultTy,
+ const QualType *ArgArray, unsigned NumArgs,
+ const FunctionProtoType::ExtProtoInfo &EPI) const {
+ // Unique functions, to guarantee there is only one function of a particular
+ // structure.
+ llvm::FoldingSetNodeID ID;
+ FunctionProtoType::Profile(ID, ResultTy, ArgArray, NumArgs, EPI, *this);
+
+ void *InsertPos = 0;
+ if (FunctionProtoType *FTP =
+ FunctionProtoTypes.FindNodeOrInsertPos(ID, InsertPos))
+ return QualType(FTP, 0);
+
+ // Determine whether the type being created is already canonical or not.
+ bool isCanonical =
+ EPI.ExceptionSpecType == EST_None && ResultTy.isCanonical() &&
+ !EPI.HasTrailingReturn;
+ for (unsigned i = 0; i != NumArgs && isCanonical; ++i)
+ if (!ArgArray[i].isCanonicalAsParam())
+ isCanonical = false;
+
+ const CallingConv DefaultCC = EPI.ExtInfo.getCC();
+ const CallingConv CallConv = (LangOpts.MRTD && DefaultCC == CC_Default) ?
+ CC_X86StdCall : DefaultCC;
+
+ // If this type isn't canonical, get the canonical version of it.
+ // The exception spec is not part of the canonical type.
+ QualType Canonical;
+ if (!isCanonical || getCanonicalCallConv(CallConv) != CallConv) {
+ SmallVector<QualType, 16> CanonicalArgs;
+ CanonicalArgs.reserve(NumArgs);
+ for (unsigned i = 0; i != NumArgs; ++i)
+ CanonicalArgs.push_back(getCanonicalParamType(ArgArray[i]));
+
+ FunctionProtoType::ExtProtoInfo CanonicalEPI = EPI;
+ CanonicalEPI.HasTrailingReturn = false;
+ CanonicalEPI.ExceptionSpecType = EST_None;
+ CanonicalEPI.NumExceptions = 0;
+ CanonicalEPI.ExtInfo
+ = CanonicalEPI.ExtInfo.withCallingConv(getCanonicalCallConv(CallConv));
+
+ Canonical = getFunctionType(getCanonicalType(ResultTy),
+ CanonicalArgs.data(), NumArgs,
+ CanonicalEPI);
+
+ // Get the new insert position for the node we care about.
+ FunctionProtoType *NewIP =
+ FunctionProtoTypes.FindNodeOrInsertPos(ID, InsertPos);
+ assert(NewIP == 0 && "Shouldn't be in the map!"); (void)NewIP;
+ }
+
+ // FunctionProtoType objects are allocated with extra bytes after
+ // them for three variable size arrays at the end:
+ // - parameter types
+ // - exception types
+ // - consumed-arguments flags
+ // Instead of the exception types, there could be a noexcept
+ // expression.
+ size_t Size = sizeof(FunctionProtoType) +
+ NumArgs * sizeof(QualType);
+ if (EPI.ExceptionSpecType == EST_Dynamic)
+ Size += EPI.NumExceptions * sizeof(QualType);
+ else if (EPI.ExceptionSpecType == EST_ComputedNoexcept) {
+ Size += sizeof(Expr*);
+ } else if (EPI.ExceptionSpecType == EST_Uninstantiated) {
+ Size += 2 * sizeof(FunctionDecl*);
+ }
+ if (EPI.ConsumedArguments)
+ Size += NumArgs * sizeof(bool);
+
+ FunctionProtoType *FTP = (FunctionProtoType*) Allocate(Size, TypeAlignment);
+ FunctionProtoType::ExtProtoInfo newEPI = EPI;
+ newEPI.ExtInfo = EPI.ExtInfo.withCallingConv(CallConv);
+ new (FTP) FunctionProtoType(ResultTy, ArgArray, NumArgs, Canonical, newEPI);
+ Types.push_back(FTP);
+ FunctionProtoTypes.InsertNode(FTP, InsertPos);
+ return QualType(FTP, 0);
+}
+
+#ifndef NDEBUG
+static bool NeedsInjectedClassNameType(const RecordDecl *D) {
+ if (!isa<CXXRecordDecl>(D)) return false;
+ const CXXRecordDecl *RD = cast<CXXRecordDecl>(D);
+ if (isa<ClassTemplatePartialSpecializationDecl>(RD))
+ return true;
+ if (RD->getDescribedClassTemplate() &&
+ !isa<ClassTemplateSpecializationDecl>(RD))
+ return true;
+ return false;
+}
+#endif
+
+/// getInjectedClassNameType - Return the unique reference to the
+/// injected class name type for the specified templated declaration.
+QualType ASTContext::getInjectedClassNameType(CXXRecordDecl *Decl,
+ QualType TST) const {
+ assert(NeedsInjectedClassNameType(Decl));
+ if (Decl->TypeForDecl) {
+ assert(isa<InjectedClassNameType>(Decl->TypeForDecl));
+ } else if (CXXRecordDecl *PrevDecl = Decl->getPreviousDecl()) {
+ assert(PrevDecl->TypeForDecl && "previous declaration has no type");
+ Decl->TypeForDecl = PrevDecl->TypeForDecl;
+ assert(isa<InjectedClassNameType>(Decl->TypeForDecl));
+ } else {
+ Type *newType =
+ new (*this, TypeAlignment) InjectedClassNameType(Decl, TST);
+ Decl->TypeForDecl = newType;
+ Types.push_back(newType);
+ }
+ return QualType(Decl->TypeForDecl, 0);
+}
+
+/// getTypeDeclType - Return the unique reference to the type for the
+/// specified type declaration.
+QualType ASTContext::getTypeDeclTypeSlow(const TypeDecl *Decl) const {
+ assert(Decl && "Passed null for Decl param");
+ assert(!Decl->TypeForDecl && "TypeForDecl present in slow case");
+
+ if (const TypedefNameDecl *Typedef = dyn_cast<TypedefNameDecl>(Decl))
+ return getTypedefType(Typedef);
+
+ assert(!isa<TemplateTypeParmDecl>(Decl) &&
+ "Template type parameter types are always available.");
+
+ if (const RecordDecl *Record = dyn_cast<RecordDecl>(Decl)) {
+ assert(!Record->getPreviousDecl() &&
+ "struct/union has previous declaration");
+ assert(!NeedsInjectedClassNameType(Record));
+ return getRecordType(Record);
+ } else if (const EnumDecl *Enum = dyn_cast<EnumDecl>(Decl)) {
+ assert(!Enum->getPreviousDecl() &&
+ "enum has previous declaration");
+ return getEnumType(Enum);
+ } else if (const UnresolvedUsingTypenameDecl *Using =
+ dyn_cast<UnresolvedUsingTypenameDecl>(Decl)) {
+ Type *newType = new (*this, TypeAlignment) UnresolvedUsingType(Using);
+ Decl->TypeForDecl = newType;
+ Types.push_back(newType);
+ } else
+ llvm_unreachable("TypeDecl without a type?");
+
+ return QualType(Decl->TypeForDecl, 0);
+}
+
+/// getTypedefType - Return the unique reference to the type for the
+/// specified typedef name decl.
+QualType
+ASTContext::getTypedefType(const TypedefNameDecl *Decl,
+ QualType Canonical) const {
+ if (Decl->TypeForDecl) return QualType(Decl->TypeForDecl, 0);
+
+ if (Canonical.isNull())
+ Canonical = getCanonicalType(Decl->getUnderlyingType());
+ TypedefType *newType = new(*this, TypeAlignment)
+ TypedefType(Type::Typedef, Decl, Canonical);
+ Decl->TypeForDecl = newType;
+ Types.push_back(newType);
+ return QualType(newType, 0);
+}
+
+QualType ASTContext::getRecordType(const RecordDecl *Decl) const {
+ if (Decl->TypeForDecl) return QualType(Decl->TypeForDecl, 0);
+
+ if (const RecordDecl *PrevDecl = Decl->getPreviousDecl())
+ if (PrevDecl->TypeForDecl)
+ return QualType(Decl->TypeForDecl = PrevDecl->TypeForDecl, 0);
+
+ RecordType *newType = new (*this, TypeAlignment) RecordType(Decl);
+ Decl->TypeForDecl = newType;
+ Types.push_back(newType);
+ return QualType(newType, 0);
+}
+
+QualType ASTContext::getEnumType(const EnumDecl *Decl) const {
+ if (Decl->TypeForDecl) return QualType(Decl->TypeForDecl, 0);
+
+ if (const EnumDecl *PrevDecl = Decl->getPreviousDecl())
+ if (PrevDecl->TypeForDecl)
+ return QualType(Decl->TypeForDecl = PrevDecl->TypeForDecl, 0);
+
+ EnumType *newType = new (*this, TypeAlignment) EnumType(Decl);
+ Decl->TypeForDecl = newType;
+ Types.push_back(newType);
+ return QualType(newType, 0);
+}
+
+QualType ASTContext::getAttributedType(AttributedType::Kind attrKind,
+ QualType modifiedType,
+ QualType equivalentType) {
+ llvm::FoldingSetNodeID id;
+ AttributedType::Profile(id, attrKind, modifiedType, equivalentType);
+
+ void *insertPos = 0;
+ AttributedType *type = AttributedTypes.FindNodeOrInsertPos(id, insertPos);
+ if (type) return QualType(type, 0);
+
+ QualType canon = getCanonicalType(equivalentType);
+ type = new (*this, TypeAlignment)
+ AttributedType(canon, attrKind, modifiedType, equivalentType);
+
+ Types.push_back(type);
+ AttributedTypes.InsertNode(type, insertPos);
+
+ return QualType(type, 0);
+}
+
+
+/// \brief Retrieve a substitution-result type.
+QualType
+ASTContext::getSubstTemplateTypeParmType(const TemplateTypeParmType *Parm,
+ QualType Replacement) const {
+ assert(Replacement.isCanonical()
+ && "replacement types must always be canonical");
+
+ llvm::FoldingSetNodeID ID;
+ SubstTemplateTypeParmType::Profile(ID, Parm, Replacement);
+ void *InsertPos = 0;
+ SubstTemplateTypeParmType *SubstParm
+ = SubstTemplateTypeParmTypes.FindNodeOrInsertPos(ID, InsertPos);
+
+ if (!SubstParm) {
+ SubstParm = new (*this, TypeAlignment)
+ SubstTemplateTypeParmType(Parm, Replacement);
+ Types.push_back(SubstParm);
+ SubstTemplateTypeParmTypes.InsertNode(SubstParm, InsertPos);
+ }
+
+ return QualType(SubstParm, 0);
+}
+
+/// \brief Retrieve a
+QualType ASTContext::getSubstTemplateTypeParmPackType(
+ const TemplateTypeParmType *Parm,
+ const TemplateArgument &ArgPack) {
+#ifndef NDEBUG
+ for (TemplateArgument::pack_iterator P = ArgPack.pack_begin(),
+ PEnd = ArgPack.pack_end();
+ P != PEnd; ++P) {
+ assert(P->getKind() == TemplateArgument::Type &&"Pack contains a non-type");
+ assert(P->getAsType().isCanonical() && "Pack contains non-canonical type");
+ }
+#endif
+
+ llvm::FoldingSetNodeID ID;
+ SubstTemplateTypeParmPackType::Profile(ID, Parm, ArgPack);
+ void *InsertPos = 0;
+ if (SubstTemplateTypeParmPackType *SubstParm
+ = SubstTemplateTypeParmPackTypes.FindNodeOrInsertPos(ID, InsertPos))
+ return QualType(SubstParm, 0);
+
+ QualType Canon;
+ if (!Parm->isCanonicalUnqualified()) {
+ Canon = getCanonicalType(QualType(Parm, 0));
+ Canon = getSubstTemplateTypeParmPackType(cast<TemplateTypeParmType>(Canon),
+ ArgPack);
+ SubstTemplateTypeParmPackTypes.FindNodeOrInsertPos(ID, InsertPos);
+ }
+
+ SubstTemplateTypeParmPackType *SubstParm
+ = new (*this, TypeAlignment) SubstTemplateTypeParmPackType(Parm, Canon,
+ ArgPack);
+ Types.push_back(SubstParm);
+ SubstTemplateTypeParmTypes.InsertNode(SubstParm, InsertPos);
+ return QualType(SubstParm, 0);
+}
+
+/// \brief Retrieve the template type parameter type for a template
+/// parameter or parameter pack with the given depth, index, and (optionally)
+/// name.
+QualType ASTContext::getTemplateTypeParmType(unsigned Depth, unsigned Index,
+ bool ParameterPack,
+ TemplateTypeParmDecl *TTPDecl) const {
+ llvm::FoldingSetNodeID ID;
+ TemplateTypeParmType::Profile(ID, Depth, Index, ParameterPack, TTPDecl);
+ void *InsertPos = 0;
+ TemplateTypeParmType *TypeParm
+ = TemplateTypeParmTypes.FindNodeOrInsertPos(ID, InsertPos);
+
+ if (TypeParm)
+ return QualType(TypeParm, 0);
+
+ if (TTPDecl) {
+ QualType Canon = getTemplateTypeParmType(Depth, Index, ParameterPack);
+ TypeParm = new (*this, TypeAlignment) TemplateTypeParmType(TTPDecl, Canon);
+
+ TemplateTypeParmType *TypeCheck
+ = TemplateTypeParmTypes.FindNodeOrInsertPos(ID, InsertPos);
+ assert(!TypeCheck && "Template type parameter canonical type broken");
+ (void)TypeCheck;
+ } else
+ TypeParm = new (*this, TypeAlignment)
+ TemplateTypeParmType(Depth, Index, ParameterPack);
+
+ Types.push_back(TypeParm);
+ TemplateTypeParmTypes.InsertNode(TypeParm, InsertPos);
+
+ return QualType(TypeParm, 0);
+}
+
+TypeSourceInfo *
+ASTContext::getTemplateSpecializationTypeInfo(TemplateName Name,
+ SourceLocation NameLoc,
+ const TemplateArgumentListInfo &Args,
+ QualType Underlying) const {
+ assert(!Name.getAsDependentTemplateName() &&
+ "No dependent template names here!");
+ QualType TST = getTemplateSpecializationType(Name, Args, Underlying);
+
+ TypeSourceInfo *DI = CreateTypeSourceInfo(TST);
+ TemplateSpecializationTypeLoc TL
+ = cast<TemplateSpecializationTypeLoc>(DI->getTypeLoc());
+ TL.setTemplateKeywordLoc(SourceLocation());
+ TL.setTemplateNameLoc(NameLoc);
+ TL.setLAngleLoc(Args.getLAngleLoc());
+ TL.setRAngleLoc(Args.getRAngleLoc());
+ for (unsigned i = 0, e = TL.getNumArgs(); i != e; ++i)
+ TL.setArgLocInfo(i, Args[i].getLocInfo());
+ return DI;
+}
+
+QualType
+ASTContext::getTemplateSpecializationType(TemplateName Template,
+ const TemplateArgumentListInfo &Args,
+ QualType Underlying) const {
+ assert(!Template.getAsDependentTemplateName() &&
+ "No dependent template names here!");
+
+ unsigned NumArgs = Args.size();
+
+ SmallVector<TemplateArgument, 4> ArgVec;
+ ArgVec.reserve(NumArgs);
+ for (unsigned i = 0; i != NumArgs; ++i)
+ ArgVec.push_back(Args[i].getArgument());
+
+ return getTemplateSpecializationType(Template, ArgVec.data(), NumArgs,
+ Underlying);
+}
+
+#ifndef NDEBUG
+static bool hasAnyPackExpansions(const TemplateArgument *Args,
+ unsigned NumArgs) {
+ for (unsigned I = 0; I != NumArgs; ++I)
+ if (Args[I].isPackExpansion())
+ return true;
+
+ return true;
+}
+#endif
+
+QualType
+ASTContext::getTemplateSpecializationType(TemplateName Template,
+ const TemplateArgument *Args,
+ unsigned NumArgs,
+ QualType Underlying) const {
+ assert(!Template.getAsDependentTemplateName() &&
+ "No dependent template names here!");
+ // Look through qualified template names.
+ if (QualifiedTemplateName *QTN = Template.getAsQualifiedTemplateName())
+ Template = TemplateName(QTN->getTemplateDecl());
+
+ bool IsTypeAlias =
+ Template.getAsTemplateDecl() &&
+ isa<TypeAliasTemplateDecl>(Template.getAsTemplateDecl());
+ QualType CanonType;
+ if (!Underlying.isNull())
+ CanonType = getCanonicalType(Underlying);
+ else {
+ // We can get here with an alias template when the specialization contains
+ // a pack expansion that does not match up with a parameter pack.
+ assert((!IsTypeAlias || hasAnyPackExpansions(Args, NumArgs)) &&
+ "Caller must compute aliased type");
+ IsTypeAlias = false;
+ CanonType = getCanonicalTemplateSpecializationType(Template, Args,
+ NumArgs);
+ }
+
+ // Allocate the (non-canonical) template specialization type, but don't
+ // try to unique it: these types typically have location information that
+ // we don't unique and don't want to lose.
+ void *Mem = Allocate(sizeof(TemplateSpecializationType) +
+ sizeof(TemplateArgument) * NumArgs +
+ (IsTypeAlias? sizeof(QualType) : 0),
+ TypeAlignment);
+ TemplateSpecializationType *Spec
+ = new (Mem) TemplateSpecializationType(Template, Args, NumArgs, CanonType,
+ IsTypeAlias ? Underlying : QualType());
+
+ Types.push_back(Spec);
+ return QualType(Spec, 0);
+}
+
+QualType
+ASTContext::getCanonicalTemplateSpecializationType(TemplateName Template,
+ const TemplateArgument *Args,
+ unsigned NumArgs) const {
+ assert(!Template.getAsDependentTemplateName() &&
+ "No dependent template names here!");
+
+ // Look through qualified template names.
+ if (QualifiedTemplateName *QTN = Template.getAsQualifiedTemplateName())
+ Template = TemplateName(QTN->getTemplateDecl());
+
+ // Build the canonical template specialization type.
+ TemplateName CanonTemplate = getCanonicalTemplateName(Template);
+ SmallVector<TemplateArgument, 4> CanonArgs;
+ CanonArgs.reserve(NumArgs);
+ for (unsigned I = 0; I != NumArgs; ++I)
+ CanonArgs.push_back(getCanonicalTemplateArgument(Args[I]));
+
+ // Determine whether this canonical template specialization type already
+ // exists.
+ llvm::FoldingSetNodeID ID;
+ TemplateSpecializationType::Profile(ID, CanonTemplate,
+ CanonArgs.data(), NumArgs, *this);
+
+ void *InsertPos = 0;
+ TemplateSpecializationType *Spec
+ = TemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos);
+
+ if (!Spec) {
+ // Allocate a new canonical template specialization type.
+ void *Mem = Allocate((sizeof(TemplateSpecializationType) +
+ sizeof(TemplateArgument) * NumArgs),
+ TypeAlignment);
+ Spec = new (Mem) TemplateSpecializationType(CanonTemplate,
+ CanonArgs.data(), NumArgs,
+ QualType(), QualType());
+ Types.push_back(Spec);
+ TemplateSpecializationTypes.InsertNode(Spec, InsertPos);
+ }
+
+ assert(Spec->isDependentType() &&
+ "Non-dependent template-id type must have a canonical type");
+ return QualType(Spec, 0);
+}
+
+QualType
+ASTContext::getElaboratedType(ElaboratedTypeKeyword Keyword,
+ NestedNameSpecifier *NNS,
+ QualType NamedType) const {
+ llvm::FoldingSetNodeID ID;
+ ElaboratedType::Profile(ID, Keyword, NNS, NamedType);
+
+ void *InsertPos = 0;
+ ElaboratedType *T = ElaboratedTypes.FindNodeOrInsertPos(ID, InsertPos);
+ if (T)
+ return QualType(T, 0);
+
+ QualType Canon = NamedType;
+ if (!Canon.isCanonical()) {
+ Canon = getCanonicalType(NamedType);
+ ElaboratedType *CheckT = ElaboratedTypes.FindNodeOrInsertPos(ID, InsertPos);
+ assert(!CheckT && "Elaborated canonical type broken");
+ (void)CheckT;
+ }
+
+ T = new (*this) ElaboratedType(Keyword, NNS, NamedType, Canon);
+ Types.push_back(T);
+ ElaboratedTypes.InsertNode(T, InsertPos);
+ return QualType(T, 0);
+}
+
+QualType
+ASTContext::getParenType(QualType InnerType) const {
+ llvm::FoldingSetNodeID ID;
+ ParenType::Profile(ID, InnerType);
+
+ void *InsertPos = 0;
+ ParenType *T = ParenTypes.FindNodeOrInsertPos(ID, InsertPos);
+ if (T)
+ return QualType(T, 0);
+
+ QualType Canon = InnerType;
+ if (!Canon.isCanonical()) {
+ Canon = getCanonicalType(InnerType);
+ ParenType *CheckT = ParenTypes.FindNodeOrInsertPos(ID, InsertPos);
+ assert(!CheckT && "Paren canonical type broken");
+ (void)CheckT;
+ }
+
+ T = new (*this) ParenType(InnerType, Canon);
+ Types.push_back(T);
+ ParenTypes.InsertNode(T, InsertPos);
+ return QualType(T, 0);
+}
+
+QualType ASTContext::getDependentNameType(ElaboratedTypeKeyword Keyword,
+ NestedNameSpecifier *NNS,
+ const IdentifierInfo *Name,
+ QualType Canon) const {
+ assert(NNS->isDependent() && "nested-name-specifier must be dependent");
+
+ if (Canon.isNull()) {
+ NestedNameSpecifier *CanonNNS = getCanonicalNestedNameSpecifier(NNS);
+ ElaboratedTypeKeyword CanonKeyword = Keyword;
+ if (Keyword == ETK_None)
+ CanonKeyword = ETK_Typename;
+
+ if (CanonNNS != NNS || CanonKeyword != Keyword)
+ Canon = getDependentNameType(CanonKeyword, CanonNNS, Name);
+ }
+
+ llvm::FoldingSetNodeID ID;
+ DependentNameType::Profile(ID, Keyword, NNS, Name);
+
+ void *InsertPos = 0;
+ DependentNameType *T
+ = DependentNameTypes.FindNodeOrInsertPos(ID, InsertPos);
+ if (T)
+ return QualType(T, 0);
+
+ T = new (*this) DependentNameType(Keyword, NNS, Name, Canon);
+ Types.push_back(T);
+ DependentNameTypes.InsertNode(T, InsertPos);
+ return QualType(T, 0);
+}
+
+QualType
+ASTContext::getDependentTemplateSpecializationType(
+ ElaboratedTypeKeyword Keyword,
+ NestedNameSpecifier *NNS,
+ const IdentifierInfo *Name,
+ const TemplateArgumentListInfo &Args) const {
+ // TODO: avoid this copy
+ SmallVector<TemplateArgument, 16> ArgCopy;
+ for (unsigned I = 0, E = Args.size(); I != E; ++I)
+ ArgCopy.push_back(Args[I].getArgument());
+ return getDependentTemplateSpecializationType(Keyword, NNS, Name,
+ ArgCopy.size(),
+ ArgCopy.data());
+}
+
+QualType
+ASTContext::getDependentTemplateSpecializationType(
+ ElaboratedTypeKeyword Keyword,
+ NestedNameSpecifier *NNS,
+ const IdentifierInfo *Name,
+ unsigned NumArgs,
+ const TemplateArgument *Args) const {
+ assert((!NNS || NNS->isDependent()) &&
+ "nested-name-specifier must be dependent");
+
+ llvm::FoldingSetNodeID ID;
+ DependentTemplateSpecializationType::Profile(ID, *this, Keyword, NNS,
+ Name, NumArgs, Args);
+
+ void *InsertPos = 0;
+ DependentTemplateSpecializationType *T
+ = DependentTemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos);
+ if (T)
+ return QualType(T, 0);
+
+ NestedNameSpecifier *CanonNNS = getCanonicalNestedNameSpecifier(NNS);
+
+ ElaboratedTypeKeyword CanonKeyword = Keyword;
+ if (Keyword == ETK_None) CanonKeyword = ETK_Typename;
+
+ bool AnyNonCanonArgs = false;
+ SmallVector<TemplateArgument, 16> CanonArgs(NumArgs);
+ for (unsigned I = 0; I != NumArgs; ++I) {
+ CanonArgs[I] = getCanonicalTemplateArgument(Args[I]);
+ if (!CanonArgs[I].structurallyEquals(Args[I]))
+ AnyNonCanonArgs = true;
+ }
+
+ QualType Canon;
+ if (AnyNonCanonArgs || CanonNNS != NNS || CanonKeyword != Keyword) {
+ Canon = getDependentTemplateSpecializationType(CanonKeyword, CanonNNS,
+ Name, NumArgs,
+ CanonArgs.data());
+
+ // Find the insert position again.
+ DependentTemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos);
+ }
+
+ void *Mem = Allocate((sizeof(DependentTemplateSpecializationType) +
+ sizeof(TemplateArgument) * NumArgs),
+ TypeAlignment);
+ T = new (Mem) DependentTemplateSpecializationType(Keyword, NNS,
+ Name, NumArgs, Args, Canon);
+ Types.push_back(T);
+ DependentTemplateSpecializationTypes.InsertNode(T, InsertPos);
+ return QualType(T, 0);
+}
+
+QualType ASTContext::getPackExpansionType(QualType Pattern,
+ llvm::Optional<unsigned> NumExpansions) {
+ llvm::FoldingSetNodeID ID;
+ PackExpansionType::Profile(ID, Pattern, NumExpansions);
+
+ assert(Pattern->containsUnexpandedParameterPack() &&
+ "Pack expansions must expand one or more parameter packs");
+ void *InsertPos = 0;
+ PackExpansionType *T
+ = PackExpansionTypes.FindNodeOrInsertPos(ID, InsertPos);
+ if (T)
+ return QualType(T, 0);
+
+ QualType Canon;
+ if (!Pattern.isCanonical()) {
+ Canon = getPackExpansionType(getCanonicalType(Pattern), NumExpansions);
+
+ // Find the insert position again.
+ PackExpansionTypes.FindNodeOrInsertPos(ID, InsertPos);
+ }
+
+ T = new (*this) PackExpansionType(Pattern, Canon, NumExpansions);
+ Types.push_back(T);
+ PackExpansionTypes.InsertNode(T, InsertPos);
+ return QualType(T, 0);
+}
+
+/// CmpProtocolNames - Comparison predicate for sorting protocols
+/// alphabetically.
+static bool CmpProtocolNames(const ObjCProtocolDecl *LHS,
+ const ObjCProtocolDecl *RHS) {
+ return LHS->getDeclName() < RHS->getDeclName();
+}
+
+static bool areSortedAndUniqued(ObjCProtocolDecl * const *Protocols,
+ unsigned NumProtocols) {
+ if (NumProtocols == 0) return true;
+
+ if (Protocols[0]->getCanonicalDecl() != Protocols[0])
+ return false;
+
+ for (unsigned i = 1; i != NumProtocols; ++i)
+ if (!CmpProtocolNames(Protocols[i-1], Protocols[i]) ||
+ Protocols[i]->getCanonicalDecl() != Protocols[i])
+ return false;
+ return true;
+}
+
+static void SortAndUniqueProtocols(ObjCProtocolDecl **Protocols,
+ unsigned &NumProtocols) {
+ ObjCProtocolDecl **ProtocolsEnd = Protocols+NumProtocols;
+
+ // Sort protocols, keyed by name.
+ std::sort(Protocols, Protocols+NumProtocols, CmpProtocolNames);
+
+ // Canonicalize.
+ for (unsigned I = 0, N = NumProtocols; I != N; ++I)
+ Protocols[I] = Protocols[I]->getCanonicalDecl();
+
+ // Remove duplicates.
+ ProtocolsEnd = std::unique(Protocols, ProtocolsEnd);
+ NumProtocols = ProtocolsEnd-Protocols;
+}
+
+QualType ASTContext::getObjCObjectType(QualType BaseType,
+ ObjCProtocolDecl * const *Protocols,
+ unsigned NumProtocols) const {
+ // If the base type is an interface and there aren't any protocols
+ // to add, then the interface type will do just fine.
+ if (!NumProtocols && isa<ObjCInterfaceType>(BaseType))
+ return BaseType;
+
+ // Look in the folding set for an existing type.
+ llvm::FoldingSetNodeID ID;
+ ObjCObjectTypeImpl::Profile(ID, BaseType, Protocols, NumProtocols);
+ void *InsertPos = 0;
+ if (ObjCObjectType *QT = ObjCObjectTypes.FindNodeOrInsertPos(ID, InsertPos))
+ return QualType(QT, 0);
+
+ // Build the canonical type, which has the canonical base type and
+ // a sorted-and-uniqued list of protocols.
+ QualType Canonical;
+ bool ProtocolsSorted = areSortedAndUniqued(Protocols, NumProtocols);
+ if (!ProtocolsSorted || !BaseType.isCanonical()) {
+ if (!ProtocolsSorted) {
+ SmallVector<ObjCProtocolDecl*, 8> Sorted(Protocols,
+ Protocols + NumProtocols);
+ unsigned UniqueCount = NumProtocols;
+
+ SortAndUniqueProtocols(&Sorted[0], UniqueCount);
+ Canonical = getObjCObjectType(getCanonicalType(BaseType),
+ &Sorted[0], UniqueCount);
+ } else {
+ Canonical = getObjCObjectType(getCanonicalType(BaseType),
+ Protocols, NumProtocols);
+ }
+
+ // Regenerate InsertPos.
+ ObjCObjectTypes.FindNodeOrInsertPos(ID, InsertPos);
+ }
+
+ unsigned Size = sizeof(ObjCObjectTypeImpl);
+ Size += NumProtocols * sizeof(ObjCProtocolDecl *);
+ void *Mem = Allocate(Size, TypeAlignment);
+ ObjCObjectTypeImpl *T =
+ new (Mem) ObjCObjectTypeImpl(Canonical, BaseType, Protocols, NumProtocols);
+
+ Types.push_back(T);
+ ObjCObjectTypes.InsertNode(T, InsertPos);
+ return QualType(T, 0);
+}
+
+/// getObjCObjectPointerType - Return a ObjCObjectPointerType type for
+/// the given object type.
+QualType ASTContext::getObjCObjectPointerType(QualType ObjectT) const {
+ llvm::FoldingSetNodeID ID;
+ ObjCObjectPointerType::Profile(ID, ObjectT);
+
+ void *InsertPos = 0;
+ if (ObjCObjectPointerType *QT =
+ ObjCObjectPointerTypes.FindNodeOrInsertPos(ID, InsertPos))
+ return QualType(QT, 0);
+
+ // Find the canonical object type.
+ QualType Canonical;
+ if (!ObjectT.isCanonical()) {
+ Canonical = getObjCObjectPointerType(getCanonicalType(ObjectT));
+
+ // Regenerate InsertPos.
+ ObjCObjectPointerTypes.FindNodeOrInsertPos(ID, InsertPos);
+ }
+
+ // No match.
+ void *Mem = Allocate(sizeof(ObjCObjectPointerType), TypeAlignment);
+ ObjCObjectPointerType *QType =
+ new (Mem) ObjCObjectPointerType(Canonical, ObjectT);
+
+ Types.push_back(QType);
+ ObjCObjectPointerTypes.InsertNode(QType, InsertPos);
+ return QualType(QType, 0);
+}
+
+/// getObjCInterfaceType - Return the unique reference to the type for the
+/// specified ObjC interface decl. The list of protocols is optional.
+QualType ASTContext::getObjCInterfaceType(const ObjCInterfaceDecl *Decl,
+ ObjCInterfaceDecl *PrevDecl) const {
+ if (Decl->TypeForDecl)
+ return QualType(Decl->TypeForDecl, 0);
+
+ if (PrevDecl) {
+ assert(PrevDecl->TypeForDecl && "previous decl has no TypeForDecl");
+ Decl->TypeForDecl = PrevDecl->TypeForDecl;
+ return QualType(PrevDecl->TypeForDecl, 0);
+ }
+
+ // Prefer the definition, if there is one.
+ if (const ObjCInterfaceDecl *Def = Decl->getDefinition())
+ Decl = Def;
+
+ void *Mem = Allocate(sizeof(ObjCInterfaceType), TypeAlignment);
+ ObjCInterfaceType *T = new (Mem) ObjCInterfaceType(Decl);
+ Decl->TypeForDecl = T;
+ Types.push_back(T);
+ return QualType(T, 0);
+}
+
+/// getTypeOfExprType - Unlike many "get<Type>" functions, we can't unique
+/// TypeOfExprType AST's (since expression's are never shared). For example,
+/// multiple declarations that refer to "typeof(x)" all contain different
+/// DeclRefExpr's. This doesn't effect the type checker, since it operates
+/// on canonical type's (which are always unique).
+QualType ASTContext::getTypeOfExprType(Expr *tofExpr) const {
+ TypeOfExprType *toe;
+ if (tofExpr->isTypeDependent()) {
+ llvm::FoldingSetNodeID ID;
+ DependentTypeOfExprType::Profile(ID, *this, tofExpr);
+
+ void *InsertPos = 0;
+ DependentTypeOfExprType *Canon
+ = DependentTypeOfExprTypes.FindNodeOrInsertPos(ID, InsertPos);
+ if (Canon) {
+ // We already have a "canonical" version of an identical, dependent
+ // typeof(expr) type. Use that as our canonical type.
+ toe = new (*this, TypeAlignment) TypeOfExprType(tofExpr,
+ QualType((TypeOfExprType*)Canon, 0));
+ } else {
+ // Build a new, canonical typeof(expr) type.
+ Canon
+ = new (*this, TypeAlignment) DependentTypeOfExprType(*this, tofExpr);
+ DependentTypeOfExprTypes.InsertNode(Canon, InsertPos);
+ toe = Canon;
+ }
+ } else {
+ QualType Canonical = getCanonicalType(tofExpr->getType());
+ toe = new (*this, TypeAlignment) TypeOfExprType(tofExpr, Canonical);
+ }
+ Types.push_back(toe);
+ return QualType(toe, 0);
+}
+
+/// getTypeOfType - Unlike many "get<Type>" functions, we don't unique
+/// TypeOfType AST's. The only motivation to unique these nodes would be
+/// memory savings. Since typeof(t) is fairly uncommon, space shouldn't be
+/// an issue. This doesn't effect the type checker, since it operates
+/// on canonical type's (which are always unique).
+QualType ASTContext::getTypeOfType(QualType tofType) const {
+ QualType Canonical = getCanonicalType(tofType);
+ TypeOfType *tot = new (*this, TypeAlignment) TypeOfType(tofType, Canonical);
+ Types.push_back(tot);
+ return QualType(tot, 0);
+}
+
+
+/// getDecltypeType - Unlike many "get<Type>" functions, we don't unique
+/// DecltypeType AST's. The only motivation to unique these nodes would be
+/// memory savings. Since decltype(t) is fairly uncommon, space shouldn't be
+/// an issue. This doesn't effect the type checker, since it operates
+/// on canonical types (which are always unique).
+QualType ASTContext::getDecltypeType(Expr *e, QualType UnderlyingType) const {
+ DecltypeType *dt;
+
+ // C++0x [temp.type]p2:
+ // If an expression e involves a template parameter, decltype(e) denotes a
+ // unique dependent type. Two such decltype-specifiers refer to the same
+ // type only if their expressions are equivalent (14.5.6.1).
+ if (e->isInstantiationDependent()) {
+ llvm::FoldingSetNodeID ID;
+ DependentDecltypeType::Profile(ID, *this, e);
+
+ void *InsertPos = 0;
+ DependentDecltypeType *Canon
+ = DependentDecltypeTypes.FindNodeOrInsertPos(ID, InsertPos);
+ if (Canon) {
+ // We already have a "canonical" version of an equivalent, dependent
+ // decltype type. Use that as our canonical type.
+ dt = new (*this, TypeAlignment) DecltypeType(e, DependentTy,
+ QualType((DecltypeType*)Canon, 0));
+ } else {
+ // Build a new, canonical typeof(expr) type.
+ Canon = new (*this, TypeAlignment) DependentDecltypeType(*this, e);
+ DependentDecltypeTypes.InsertNode(Canon, InsertPos);
+ dt = Canon;
+ }
+ } else {
+ dt = new (*this, TypeAlignment) DecltypeType(e, UnderlyingType,
+ getCanonicalType(UnderlyingType));
+ }
+ Types.push_back(dt);
+ return QualType(dt, 0);
+}
+
+/// getUnaryTransformationType - We don't unique these, since the memory
+/// savings are minimal and these are rare.
+QualType ASTContext::getUnaryTransformType(QualType BaseType,
+ QualType UnderlyingType,
+ UnaryTransformType::UTTKind Kind)
+ const {
+ UnaryTransformType *Ty =
+ new (*this, TypeAlignment) UnaryTransformType (BaseType, UnderlyingType,
+ Kind,
+ UnderlyingType->isDependentType() ?
+ QualType() : getCanonicalType(UnderlyingType));
+ Types.push_back(Ty);
+ return QualType(Ty, 0);
+}
+
+/// getAutoType - We only unique auto types after they've been deduced.
+QualType ASTContext::getAutoType(QualType DeducedType) const {
+ void *InsertPos = 0;
+ if (!DeducedType.isNull()) {
+ // Look in the folding set for an existing type.
+ llvm::FoldingSetNodeID ID;
+ AutoType::Profile(ID, DeducedType);
+ if (AutoType *AT = AutoTypes.FindNodeOrInsertPos(ID, InsertPos))
+ return QualType(AT, 0);
+ }
+
+ AutoType *AT = new (*this, TypeAlignment) AutoType(DeducedType);
+ Types.push_back(AT);
+ if (InsertPos)
+ AutoTypes.InsertNode(AT, InsertPos);
+ return QualType(AT, 0);
+}
+
+/// getAtomicType - Return the uniqued reference to the atomic type for
+/// the given value type.
+QualType ASTContext::getAtomicType(QualType T) const {
+ // Unique pointers, to guarantee there is only one pointer of a particular
+ // structure.
+ llvm::FoldingSetNodeID ID;
+ AtomicType::Profile(ID, T);
+
+ void *InsertPos = 0;
+ if (AtomicType *AT = AtomicTypes.FindNodeOrInsertPos(ID, InsertPos))
+ return QualType(AT, 0);
+
+ // If the atomic value type isn't canonical, this won't be a canonical type
+ // either, so fill in the canonical type field.
+ QualType Canonical;
+ if (!T.isCanonical()) {
+ Canonical = getAtomicType(getCanonicalType(T));
+
+ // Get the new insert position for the node we care about.
+ AtomicType *NewIP = AtomicTypes.FindNodeOrInsertPos(ID, InsertPos);
+ assert(NewIP == 0 && "Shouldn't be in the map!"); (void)NewIP;
+ }
+ AtomicType *New = new (*this, TypeAlignment) AtomicType(T, Canonical);
+ Types.push_back(New);
+ AtomicTypes.InsertNode(New, InsertPos);
+ return QualType(New, 0);
+}
+
+/// getAutoDeductType - Get type pattern for deducing against 'auto'.
+QualType ASTContext::getAutoDeductType() const {
+ if (AutoDeductTy.isNull())
+ AutoDeductTy = getAutoType(QualType());
+ assert(!AutoDeductTy.isNull() && "can't build 'auto' pattern");
+ return AutoDeductTy;
+}
+
+/// getAutoRRefDeductType - Get type pattern for deducing against 'auto &&'.
+QualType ASTContext::getAutoRRefDeductType() const {
+ if (AutoRRefDeductTy.isNull())
+ AutoRRefDeductTy = getRValueReferenceType(getAutoDeductType());
+ assert(!AutoRRefDeductTy.isNull() && "can't build 'auto &&' pattern");
+ return AutoRRefDeductTy;
+}
+
+/// getTagDeclType - Return the unique reference to the type for the
+/// specified TagDecl (struct/union/class/enum) decl.
+QualType ASTContext::getTagDeclType(const TagDecl *Decl) const {
+ assert (Decl);
+ // FIXME: What is the design on getTagDeclType when it requires casting
+ // away const? mutable?
+ return getTypeDeclType(const_cast<TagDecl*>(Decl));
+}
+
+/// getSizeType - Return the unique type for "size_t" (C99 7.17), the result
+/// of the sizeof operator (C99 6.5.3.4p4). The value is target dependent and
+/// needs to agree with the definition in <stddef.h>.
+CanQualType ASTContext::getSizeType() const {
+ return getFromTargetType(Target->getSizeType());
+}
+
+/// getIntMaxType - Return the unique type for "intmax_t" (C99 7.18.1.5).
+CanQualType ASTContext::getIntMaxType() const {
+ return getFromTargetType(Target->getIntMaxType());
+}
+
+/// getUIntMaxType - Return the unique type for "uintmax_t" (C99 7.18.1.5).
+CanQualType ASTContext::getUIntMaxType() const {
+ return getFromTargetType(Target->getUIntMaxType());
+}
+
+/// getSignedWCharType - Return the type of "signed wchar_t".
+/// Used when in C++, as a GCC extension.
+QualType ASTContext::getSignedWCharType() const {
+ // FIXME: derive from "Target" ?
+ return WCharTy;
+}
+
+/// getUnsignedWCharType - Return the type of "unsigned wchar_t".
+/// Used when in C++, as a GCC extension.
+QualType ASTContext::getUnsignedWCharType() const {
+ // FIXME: derive from "Target" ?
+ return UnsignedIntTy;
+}
+
+/// getPointerDiffType - Return the unique type for "ptrdiff_t" (C99 7.17)
+/// defined in <stddef.h>. Pointer - pointer requires this (C99 6.5.6p9).
+QualType ASTContext::getPointerDiffType() const {
+ return getFromTargetType(Target->getPtrDiffType(0));
+}
+
+//===----------------------------------------------------------------------===//
+// Type Operators
+//===----------------------------------------------------------------------===//
+
+CanQualType ASTContext::getCanonicalParamType(QualType T) const {
+ // Push qualifiers into arrays, and then discard any remaining
+ // qualifiers.
+ T = getCanonicalType(T);
+ T = getVariableArrayDecayedType(T);
+ const Type *Ty = T.getTypePtr();
+ QualType Result;
+ if (isa<ArrayType>(Ty)) {
+ Result = getArrayDecayedType(QualType(Ty,0));
+ } else if (isa<FunctionType>(Ty)) {
+ Result = getPointerType(QualType(Ty, 0));
+ } else {
+ Result = QualType(Ty, 0);
+ }
+
+ return CanQualType::CreateUnsafe(Result);
+}
+
+QualType ASTContext::getUnqualifiedArrayType(QualType type,
+ Qualifiers &quals) {
+ SplitQualType splitType = type.getSplitUnqualifiedType();
+
+ // FIXME: getSplitUnqualifiedType() actually walks all the way to
+ // the unqualified desugared type and then drops it on the floor.
+ // We then have to strip that sugar back off with
+ // getUnqualifiedDesugaredType(), which is silly.
+ const ArrayType *AT =
+ dyn_cast<ArrayType>(splitType.Ty->getUnqualifiedDesugaredType());
+
+ // If we don't have an array, just use the results in splitType.
+ if (!AT) {
+ quals = splitType.Quals;
+ return QualType(splitType.Ty, 0);
+ }
+
+ // Otherwise, recurse on the array's element type.
+ QualType elementType = AT->getElementType();
+ QualType unqualElementType = getUnqualifiedArrayType(elementType, quals);
+
+ // If that didn't change the element type, AT has no qualifiers, so we
+ // can just use the results in splitType.
+ if (elementType == unqualElementType) {
+ assert(quals.empty()); // from the recursive call
+ quals = splitType.Quals;
+ return QualType(splitType.Ty, 0);
+ }
+
+ // Otherwise, add in the qualifiers from the outermost type, then
+ // build the type back up.
+ quals.addConsistentQualifiers(splitType.Quals);
+
+ if (const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(AT)) {
+ return getConstantArrayType(unqualElementType, CAT->getSize(),
+ CAT->getSizeModifier(), 0);
+ }
+
+ if (const IncompleteArrayType *IAT = dyn_cast<IncompleteArrayType>(AT)) {
+ return getIncompleteArrayType(unqualElementType, IAT->getSizeModifier(), 0);
+ }
+
+ if (const VariableArrayType *VAT = dyn_cast<VariableArrayType>(AT)) {
+ return getVariableArrayType(unqualElementType,
+ VAT->getSizeExpr(),
+ VAT->getSizeModifier(),
+ VAT->getIndexTypeCVRQualifiers(),
+ VAT->getBracketsRange());
+ }
+
+ const DependentSizedArrayType *DSAT = cast<DependentSizedArrayType>(AT);
+ return getDependentSizedArrayType(unqualElementType, DSAT->getSizeExpr(),
+ DSAT->getSizeModifier(), 0,
+ SourceRange());
+}
+
+/// UnwrapSimilarPointerTypes - If T1 and T2 are pointer types that
+/// may be similar (C++ 4.4), replaces T1 and T2 with the type that
+/// they point to and return true. If T1 and T2 aren't pointer types
+/// or pointer-to-member types, or if they are not similar at this
+/// level, returns false and leaves T1 and T2 unchanged. Top-level
+/// qualifiers on T1 and T2 are ignored. This function will typically
+/// be called in a loop that successively "unwraps" pointer and
+/// pointer-to-member types to compare them at each level.
+bool ASTContext::UnwrapSimilarPointerTypes(QualType &T1, QualType &T2) {
+ const PointerType *T1PtrType = T1->getAs<PointerType>(),
+ *T2PtrType = T2->getAs<PointerType>();
+ if (T1PtrType && T2PtrType) {
+ T1 = T1PtrType->getPointeeType();
+ T2 = T2PtrType->getPointeeType();
+ return true;
+ }
+
+ const MemberPointerType *T1MPType = T1->getAs<MemberPointerType>(),
+ *T2MPType = T2->getAs<MemberPointerType>();
+ if (T1MPType && T2MPType &&
+ hasSameUnqualifiedType(QualType(T1MPType->getClass(), 0),
+ QualType(T2MPType->getClass(), 0))) {
+ T1 = T1MPType->getPointeeType();
+ T2 = T2MPType->getPointeeType();
+ return true;
+ }
+
+ if (getLangOpts().ObjC1) {
+ const ObjCObjectPointerType *T1OPType = T1->getAs<ObjCObjectPointerType>(),
+ *T2OPType = T2->getAs<ObjCObjectPointerType>();
+ if (T1OPType && T2OPType) {
+ T1 = T1OPType->getPointeeType();
+ T2 = T2OPType->getPointeeType();
+ return true;
+ }
+ }
+
+ // FIXME: Block pointers, too?
+
+ return false;
+}
+
+DeclarationNameInfo
+ASTContext::getNameForTemplate(TemplateName Name,
+ SourceLocation NameLoc) const {
+ switch (Name.getKind()) {
+ case TemplateName::QualifiedTemplate:
+ case TemplateName::Template:
+ // DNInfo work in progress: CHECKME: what about DNLoc?
+ return DeclarationNameInfo(Name.getAsTemplateDecl()->getDeclName(),
+ NameLoc);
+
+ case TemplateName::OverloadedTemplate: {
+ OverloadedTemplateStorage *Storage = Name.getAsOverloadedTemplate();
+ // DNInfo work in progress: CHECKME: what about DNLoc?
+ return DeclarationNameInfo((*Storage->begin())->getDeclName(), NameLoc);
+ }
+
+ case TemplateName::DependentTemplate: {
+ DependentTemplateName *DTN = Name.getAsDependentTemplateName();
+ DeclarationName DName;
+ if (DTN->isIdentifier()) {
+ DName = DeclarationNames.getIdentifier(DTN->getIdentifier());
+ return DeclarationNameInfo(DName, NameLoc);
+ } else {
+ DName = DeclarationNames.getCXXOperatorName(DTN->getOperator());
+ // DNInfo work in progress: FIXME: source locations?
+ DeclarationNameLoc DNLoc;
+ DNLoc.CXXOperatorName.BeginOpNameLoc = SourceLocation().getRawEncoding();
+ DNLoc.CXXOperatorName.EndOpNameLoc = SourceLocation().getRawEncoding();
+ return DeclarationNameInfo(DName, NameLoc, DNLoc);
+ }
+ }
+
+ case TemplateName::SubstTemplateTemplateParm: {
+ SubstTemplateTemplateParmStorage *subst
+ = Name.getAsSubstTemplateTemplateParm();
+ return DeclarationNameInfo(subst->getParameter()->getDeclName(),
+ NameLoc);
+ }
+
+ case TemplateName::SubstTemplateTemplateParmPack: {
+ SubstTemplateTemplateParmPackStorage *subst
+ = Name.getAsSubstTemplateTemplateParmPack();
+ return DeclarationNameInfo(subst->getParameterPack()->getDeclName(),
+ NameLoc);
+ }
+ }
+
+ llvm_unreachable("bad template name kind!");
+}
+
+TemplateName ASTContext::getCanonicalTemplateName(TemplateName Name) const {
+ switch (Name.getKind()) {
+ case TemplateName::QualifiedTemplate:
+ case TemplateName::Template: {
+ TemplateDecl *Template = Name.getAsTemplateDecl();
+ if (TemplateTemplateParmDecl *TTP
+ = dyn_cast<TemplateTemplateParmDecl>(Template))
+ Template = getCanonicalTemplateTemplateParmDecl(TTP);
+
+ // The canonical template name is the canonical template declaration.
+ return TemplateName(cast<TemplateDecl>(Template->getCanonicalDecl()));
+ }
+
+ case TemplateName::OverloadedTemplate:
+ llvm_unreachable("cannot canonicalize overloaded template");
+
+ case TemplateName::DependentTemplate: {
+ DependentTemplateName *DTN = Name.getAsDependentTemplateName();
+ assert(DTN && "Non-dependent template names must refer to template decls.");
+ return DTN->CanonicalTemplateName;
+ }
+
+ case TemplateName::SubstTemplateTemplateParm: {
+ SubstTemplateTemplateParmStorage *subst
+ = Name.getAsSubstTemplateTemplateParm();
+ return getCanonicalTemplateName(subst->getReplacement());
+ }
+
+ case TemplateName::SubstTemplateTemplateParmPack: {
+ SubstTemplateTemplateParmPackStorage *subst
+ = Name.getAsSubstTemplateTemplateParmPack();
+ TemplateTemplateParmDecl *canonParameter
+ = getCanonicalTemplateTemplateParmDecl(subst->getParameterPack());
+ TemplateArgument canonArgPack
+ = getCanonicalTemplateArgument(subst->getArgumentPack());
+ return getSubstTemplateTemplateParmPack(canonParameter, canonArgPack);
+ }
+ }
+
+ llvm_unreachable("bad template name!");
+}
+
+bool ASTContext::hasSameTemplateName(TemplateName X, TemplateName Y) {
+ X = getCanonicalTemplateName(X);
+ Y = getCanonicalTemplateName(Y);
+ return X.getAsVoidPointer() == Y.getAsVoidPointer();
+}
+
+TemplateArgument
+ASTContext::getCanonicalTemplateArgument(const TemplateArgument &Arg) const {
+ switch (Arg.getKind()) {
+ case TemplateArgument::Null:
+ return Arg;
+
+ case TemplateArgument::Expression:
+ return Arg;
+
+ case TemplateArgument::Declaration: {
+ if (Decl *D = Arg.getAsDecl())
+ return TemplateArgument(D->getCanonicalDecl());
+ return TemplateArgument((Decl*)0);
+ }
+
+ case TemplateArgument::Template:
+ return TemplateArgument(getCanonicalTemplateName(Arg.getAsTemplate()));
+
+ case TemplateArgument::TemplateExpansion:
+ return TemplateArgument(getCanonicalTemplateName(
+ Arg.getAsTemplateOrTemplatePattern()),
+ Arg.getNumTemplateExpansions());
+
+ case TemplateArgument::Integral:
+ return TemplateArgument(*Arg.getAsIntegral(),
+ getCanonicalType(Arg.getIntegralType()));
+
+ case TemplateArgument::Type:
+ return TemplateArgument(getCanonicalType(Arg.getAsType()));
+
+ case TemplateArgument::Pack: {
+ if (Arg.pack_size() == 0)
+ return Arg;
+
+ TemplateArgument *CanonArgs
+ = new (*this) TemplateArgument[Arg.pack_size()];
+ unsigned Idx = 0;
+ for (TemplateArgument::pack_iterator A = Arg.pack_begin(),
+ AEnd = Arg.pack_end();
+ A != AEnd; (void)++A, ++Idx)
+ CanonArgs[Idx] = getCanonicalTemplateArgument(*A);
+
+ return TemplateArgument(CanonArgs, Arg.pack_size());
+ }
+ }
+
+ // Silence GCC warning
+ llvm_unreachable("Unhandled template argument kind");
+}
+
+NestedNameSpecifier *
+ASTContext::getCanonicalNestedNameSpecifier(NestedNameSpecifier *NNS) const {
+ if (!NNS)
+ return 0;
+
+ switch (NNS->getKind()) {
+ case NestedNameSpecifier::Identifier:
+ // Canonicalize the prefix but keep the identifier the same.
+ return NestedNameSpecifier::Create(*this,
+ getCanonicalNestedNameSpecifier(NNS->getPrefix()),
+ NNS->getAsIdentifier());
+
+ case NestedNameSpecifier::Namespace:
+ // A namespace is canonical; build a nested-name-specifier with
+ // this namespace and no prefix.
+ return NestedNameSpecifier::Create(*this, 0,
+ NNS->getAsNamespace()->getOriginalNamespace());
+
+ case NestedNameSpecifier::NamespaceAlias:
+ // A namespace is canonical; build a nested-name-specifier with
+ // this namespace and no prefix.
+ return NestedNameSpecifier::Create(*this, 0,
+ NNS->getAsNamespaceAlias()->getNamespace()
+ ->getOriginalNamespace());
+
+ case NestedNameSpecifier::TypeSpec:
+ case NestedNameSpecifier::TypeSpecWithTemplate: {
+ QualType T = getCanonicalType(QualType(NNS->getAsType(), 0));
+
+ // If we have some kind of dependent-named type (e.g., "typename T::type"),
+ // break it apart into its prefix and identifier, then reconsititute those
+ // as the canonical nested-name-specifier. This is required to canonicalize
+ // a dependent nested-name-specifier involving typedefs of dependent-name
+ // types, e.g.,
+ // typedef typename T::type T1;
+ // typedef typename T1::type T2;
+ if (const DependentNameType *DNT = T->getAs<DependentNameType>())
+ return NestedNameSpecifier::Create(*this, DNT->getQualifier(),
+ const_cast<IdentifierInfo *>(DNT->getIdentifier()));
+
+ // Otherwise, just canonicalize the type, and force it to be a TypeSpec.
+ // FIXME: Why are TypeSpec and TypeSpecWithTemplate distinct in the
+ // first place?
+ return NestedNameSpecifier::Create(*this, 0, false,
+ const_cast<Type*>(T.getTypePtr()));
+ }
+
+ case NestedNameSpecifier::Global:
+ // The global specifier is canonical and unique.
+ return NNS;
+ }
+
+ llvm_unreachable("Invalid NestedNameSpecifier::Kind!");
+}
+
+
+const ArrayType *ASTContext::getAsArrayType(QualType T) const {
+ // Handle the non-qualified case efficiently.
+ if (!T.hasLocalQualifiers()) {
+ // Handle the common positive case fast.
+ if (const ArrayType *AT = dyn_cast<ArrayType>(T))
+ return AT;
+ }
+
+ // Handle the common negative case fast.
+ if (!isa<ArrayType>(T.getCanonicalType()))
+ return 0;
+
+ // Apply any qualifiers from the array type to the element type. This
+ // implements C99 6.7.3p8: "If the specification of an array type includes
+ // any type qualifiers, the element type is so qualified, not the array type."
+
+ // If we get here, we either have type qualifiers on the type, or we have
+ // sugar such as a typedef in the way. If we have type qualifiers on the type
+ // we must propagate them down into the element type.
+
+ SplitQualType split = T.getSplitDesugaredType();
+ Qualifiers qs = split.Quals;
+
+ // If we have a simple case, just return now.
+ const ArrayType *ATy = dyn_cast<ArrayType>(split.Ty);
+ if (ATy == 0 || qs.empty())
+ return ATy;
+
+ // Otherwise, we have an array and we have qualifiers on it. Push the
+ // qualifiers into the array element type and return a new array type.
+ QualType NewEltTy = getQualifiedType(ATy->getElementType(), qs);
+
+ if (const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(ATy))
+ return cast<ArrayType>(getConstantArrayType(NewEltTy, CAT->getSize(),
+ CAT->getSizeModifier(),
+ CAT->getIndexTypeCVRQualifiers()));
+ if (const IncompleteArrayType *IAT = dyn_cast<IncompleteArrayType>(ATy))
+ return cast<ArrayType>(getIncompleteArrayType(NewEltTy,
+ IAT->getSizeModifier(),
+ IAT->getIndexTypeCVRQualifiers()));
+
+ if (const DependentSizedArrayType *DSAT
+ = dyn_cast<DependentSizedArrayType>(ATy))
+ return cast<ArrayType>(
+ getDependentSizedArrayType(NewEltTy,
+ DSAT->getSizeExpr(),
+ DSAT->getSizeModifier(),
+ DSAT->getIndexTypeCVRQualifiers(),
+ DSAT->getBracketsRange()));
+
+ const VariableArrayType *VAT = cast<VariableArrayType>(ATy);
+ return cast<ArrayType>(getVariableArrayType(NewEltTy,
+ VAT->getSizeExpr(),
+ VAT->getSizeModifier(),
+ VAT->getIndexTypeCVRQualifiers(),
+ VAT->getBracketsRange()));
+}
+
+QualType ASTContext::getAdjustedParameterType(QualType T) {
+ // C99 6.7.5.3p7:
+ // A declaration of a parameter as "array of type" shall be
+ // adjusted to "qualified pointer to type", where the type
+ // qualifiers (if any) are those specified within the [ and ] of
+ // the array type derivation.
+ if (T->isArrayType())
+ return getArrayDecayedType(T);
+
+ // C99 6.7.5.3p8:
+ // A declaration of a parameter as "function returning type"
+ // shall be adjusted to "pointer to function returning type", as
+ // in 6.3.2.1.
+ if (T->isFunctionType())
+ return getPointerType(T);
+
+ return T;
+}
+
+QualType ASTContext::getSignatureParameterType(QualType T) {
+ T = getVariableArrayDecayedType(T);
+ T = getAdjustedParameterType(T);
+ return T.getUnqualifiedType();
+}
+
+/// getArrayDecayedType - Return the properly qualified result of decaying the
+/// specified array type to a pointer. This operation is non-trivial when
+/// handling typedefs etc. The canonical type of "T" must be an array type,
+/// this returns a pointer to a properly qualified element of the array.
+///
+/// See C99 6.7.5.3p7 and C99 6.3.2.1p3.
+QualType ASTContext::getArrayDecayedType(QualType Ty) const {
+ // Get the element type with 'getAsArrayType' so that we don't lose any
+ // typedefs in the element type of the array. This also handles propagation
+ // of type qualifiers from the array type into the element type if present
+ // (C99 6.7.3p8).
+ const ArrayType *PrettyArrayType = getAsArrayType(Ty);
+ assert(PrettyArrayType && "Not an array type!");
+
+ QualType PtrTy = getPointerType(PrettyArrayType->getElementType());
+
+ // int x[restrict 4] -> int *restrict
+ return getQualifiedType(PtrTy, PrettyArrayType->getIndexTypeQualifiers());
+}
+
+QualType ASTContext::getBaseElementType(const ArrayType *array) const {
+ return getBaseElementType(array->getElementType());
+}
+
+QualType ASTContext::getBaseElementType(QualType type) const {
+ Qualifiers qs;
+ while (true) {
+ SplitQualType split = type.getSplitDesugaredType();
+ const ArrayType *array = split.Ty->getAsArrayTypeUnsafe();
+ if (!array) break;
+
+ type = array->getElementType();
+ qs.addConsistentQualifiers(split.Quals);
+ }
+
+ return getQualifiedType(type, qs);
+}
+
+/// getConstantArrayElementCount - Returns number of constant array elements.
+uint64_t
+ASTContext::getConstantArrayElementCount(const ConstantArrayType *CA) const {
+ uint64_t ElementCount = 1;
+ do {
+ ElementCount *= CA->getSize().getZExtValue();
+ CA = dyn_cast<ConstantArrayType>(CA->getElementType());
+ } while (CA);
+ return ElementCount;
+}
+
+/// getFloatingRank - Return a relative rank for floating point types.
+/// This routine will assert if passed a built-in type that isn't a float.
+static FloatingRank getFloatingRank(QualType T) {
+ if (const ComplexType *CT = T->getAs<ComplexType>())
+ return getFloatingRank(CT->getElementType());
+
+ assert(T->getAs<BuiltinType>() && "getFloatingRank(): not a floating type");
+ switch (T->getAs<BuiltinType>()->getKind()) {
+ default: llvm_unreachable("getFloatingRank(): not a floating type");
+ case BuiltinType::Half: return HalfRank;
+ case BuiltinType::Float: return FloatRank;
+ case BuiltinType::Double: return DoubleRank;
+ case BuiltinType::LongDouble: return LongDoubleRank;
+ }
+}
+
+/// getFloatingTypeOfSizeWithinDomain - Returns a real floating
+/// point or a complex type (based on typeDomain/typeSize).
+/// 'typeDomain' is a real floating point or complex type.
+/// 'typeSize' is a real floating point or complex type.
+QualType ASTContext::getFloatingTypeOfSizeWithinDomain(QualType Size,
+ QualType Domain) const {
+ FloatingRank EltRank = getFloatingRank(Size);
+ if (Domain->isComplexType()) {
+ switch (EltRank) {
+ case HalfRank: llvm_unreachable("Complex half is not supported");
+ case FloatRank: return FloatComplexTy;
+ case DoubleRank: return DoubleComplexTy;
+ case LongDoubleRank: return LongDoubleComplexTy;
+ }
+ }
+
+ assert(Domain->isRealFloatingType() && "Unknown domain!");
+ switch (EltRank) {
+ case HalfRank: llvm_unreachable("Half ranks are not valid here");
+ case FloatRank: return FloatTy;
+ case DoubleRank: return DoubleTy;
+ case LongDoubleRank: return LongDoubleTy;
+ }
+ llvm_unreachable("getFloatingRank(): illegal value for rank");
+}
+
+/// getFloatingTypeOrder - Compare the rank of the two specified floating
+/// point types, ignoring the domain of the type (i.e. 'double' ==
+/// '_Complex double'). If LHS > RHS, return 1. If LHS == RHS, return 0. If
+/// LHS < RHS, return -1.
+int ASTContext::getFloatingTypeOrder(QualType LHS, QualType RHS) const {
+ FloatingRank LHSR = getFloatingRank(LHS);
+ FloatingRank RHSR = getFloatingRank(RHS);
+
+ if (LHSR == RHSR)
+ return 0;
+ if (LHSR > RHSR)
+ return 1;
+ return -1;
+}
+
+/// getIntegerRank - Return an integer conversion rank (C99 6.3.1.1p1). This
+/// routine will assert if passed a built-in type that isn't an integer or enum,
+/// or if it is not canonicalized.
+unsigned ASTContext::getIntegerRank(const Type *T) const {
+ assert(T->isCanonicalUnqualified() && "T should be canonicalized");
+
+ switch (cast<BuiltinType>(T)->getKind()) {
+ default: llvm_unreachable("getIntegerRank(): not a built-in integer");
+ case BuiltinType::Bool:
+ return 1 + (getIntWidth(BoolTy) << 3);
+ case BuiltinType::Char_S:
+ case BuiltinType::Char_U:
+ case BuiltinType::SChar:
+ case BuiltinType::UChar:
+ return 2 + (getIntWidth(CharTy) << 3);
+ case BuiltinType::Short:
+ case BuiltinType::UShort:
+ return 3 + (getIntWidth(ShortTy) << 3);
+ case BuiltinType::Int:
+ case BuiltinType::UInt:
+ return 4 + (getIntWidth(IntTy) << 3);
+ case BuiltinType::Long:
+ case BuiltinType::ULong:
+ return 5 + (getIntWidth(LongTy) << 3);
+ case BuiltinType::LongLong:
+ case BuiltinType::ULongLong:
+ return 6 + (getIntWidth(LongLongTy) << 3);
+ case BuiltinType::Int128:
+ case BuiltinType::UInt128:
+ return 7 + (getIntWidth(Int128Ty) << 3);
+ }
+}
+
+/// \brief Whether this is a promotable bitfield reference according
+/// to C99 6.3.1.1p2, bullet 2 (and GCC extensions).
+///
+/// \returns the type this bit-field will promote to, or NULL if no
+/// promotion occurs.
+QualType ASTContext::isPromotableBitField(Expr *E) const {
+ if (E->isTypeDependent() || E->isValueDependent())
+ return QualType();
+
+ FieldDecl *Field = E->getBitField();
+ if (!Field)
+ return QualType();
+
+ QualType FT = Field->getType();
+
+ uint64_t BitWidth = Field->getBitWidthValue(*this);
+ uint64_t IntSize = getTypeSize(IntTy);
+ // GCC extension compatibility: if the bit-field size is less than or equal
+ // to the size of int, it gets promoted no matter what its type is.
+ // For instance, unsigned long bf : 4 gets promoted to signed int.
+ if (BitWidth < IntSize)
+ return IntTy;
+
+ if (BitWidth == IntSize)
+ return FT->isSignedIntegerType() ? IntTy : UnsignedIntTy;
+
+ // Types bigger than int are not subject to promotions, and therefore act
+ // like the base type.
+ // FIXME: This doesn't quite match what gcc does, but what gcc does here
+ // is ridiculous.
+ return QualType();
+}
+
+/// getPromotedIntegerType - Returns the type that Promotable will
+/// promote to: C99 6.3.1.1p2, assuming that Promotable is a promotable
+/// integer type.
+QualType ASTContext::getPromotedIntegerType(QualType Promotable) const {
+ assert(!Promotable.isNull());
+ assert(Promotable->isPromotableIntegerType());
+ if (const EnumType *ET = Promotable->getAs<EnumType>())
+ return ET->getDecl()->getPromotionType();
+
+ if (const BuiltinType *BT = Promotable->getAs<BuiltinType>()) {
+ // C++ [conv.prom]: A prvalue of type char16_t, char32_t, or wchar_t
+ // (3.9.1) can be converted to a prvalue of the first of the following
+ // types that can represent all the values of its underlying type:
+ // int, unsigned int, long int, unsigned long int, long long int, or
+ // unsigned long long int [...]
+ // FIXME: Is there some better way to compute this?
+ if (BT->getKind() == BuiltinType::WChar_S ||
+ BT->getKind() == BuiltinType::WChar_U ||
+ BT->getKind() == BuiltinType::Char16 ||
+ BT->getKind() == BuiltinType::Char32) {
+ bool FromIsSigned = BT->getKind() == BuiltinType::WChar_S;
+ uint64_t FromSize = getTypeSize(BT);
+ QualType PromoteTypes[] = { IntTy, UnsignedIntTy, LongTy, UnsignedLongTy,
+ LongLongTy, UnsignedLongLongTy };
+ for (size_t Idx = 0; Idx < llvm::array_lengthof(PromoteTypes); ++Idx) {
+ uint64_t ToSize = getTypeSize(PromoteTypes[Idx]);
+ if (FromSize < ToSize ||
+ (FromSize == ToSize &&
+ FromIsSigned == PromoteTypes[Idx]->isSignedIntegerType()))
+ return PromoteTypes[Idx];
+ }
+ llvm_unreachable("char type should fit into long long");
+ }
+ }
+
+ // At this point, we should have a signed or unsigned integer type.
+ if (Promotable->isSignedIntegerType())
+ return IntTy;
+ uint64_t PromotableSize = getTypeSize(Promotable);
+ uint64_t IntSize = getTypeSize(IntTy);
+ assert(Promotable->isUnsignedIntegerType() && PromotableSize <= IntSize);
+ return (PromotableSize != IntSize) ? IntTy : UnsignedIntTy;
+}
+
+/// \brief Recurses in pointer/array types until it finds an objc retainable
+/// type and returns its ownership.
+Qualifiers::ObjCLifetime ASTContext::getInnerObjCOwnership(QualType T) const {
+ while (!T.isNull()) {
+ if (T.getObjCLifetime() != Qualifiers::OCL_None)
+ return T.getObjCLifetime();
+ if (T->isArrayType())
+ T = getBaseElementType(T);
+ else if (const PointerType *PT = T->getAs<PointerType>())
+ T = PT->getPointeeType();
+ else if (const ReferenceType *RT = T->getAs<ReferenceType>())
+ T = RT->getPointeeType();
+ else
+ break;
+ }
+
+ return Qualifiers::OCL_None;
+}
+
+/// getIntegerTypeOrder - Returns the highest ranked integer type:
+/// C99 6.3.1.8p1. If LHS > RHS, return 1. If LHS == RHS, return 0. If
+/// LHS < RHS, return -1.
+int ASTContext::getIntegerTypeOrder(QualType LHS, QualType RHS) const {
+ const Type *LHSC = getCanonicalType(LHS).getTypePtr();
+ const Type *RHSC = getCanonicalType(RHS).getTypePtr();
+ if (LHSC == RHSC) return 0;
+
+ bool LHSUnsigned = LHSC->isUnsignedIntegerType();
+ bool RHSUnsigned = RHSC->isUnsignedIntegerType();
+
+ unsigned LHSRank = getIntegerRank(LHSC);
+ unsigned RHSRank = getIntegerRank(RHSC);
+
+ if (LHSUnsigned == RHSUnsigned) { // Both signed or both unsigned.
+ if (LHSRank == RHSRank) return 0;
+ return LHSRank > RHSRank ? 1 : -1;
+ }
+
+ // Otherwise, the LHS is signed and the RHS is unsigned or visa versa.
+ if (LHSUnsigned) {
+ // If the unsigned [LHS] type is larger, return it.
+ if (LHSRank >= RHSRank)
+ return 1;
+
+ // If the signed type can represent all values of the unsigned type, it
+ // wins. Because we are dealing with 2's complement and types that are
+ // powers of two larger than each other, this is always safe.
+ return -1;
+ }
+
+ // If the unsigned [RHS] type is larger, return it.
+ if (RHSRank >= LHSRank)
+ return -1;
+
+ // If the signed type can represent all values of the unsigned type, it
+ // wins. Because we are dealing with 2's complement and types that are
+ // powers of two larger than each other, this is always safe.
+ return 1;
+}
+
+static RecordDecl *
+CreateRecordDecl(const ASTContext &Ctx, RecordDecl::TagKind TK,
+ DeclContext *DC, IdentifierInfo *Id) {
+ SourceLocation Loc;
+ if (Ctx.getLangOpts().CPlusPlus)
+ return CXXRecordDecl::Create(Ctx, TK, DC, Loc, Loc, Id);
+ else
+ return RecordDecl::Create(Ctx, TK, DC, Loc, Loc, Id);
+}
+
+// getCFConstantStringType - Return the type used for constant CFStrings.
+QualType ASTContext::getCFConstantStringType() const {
+ if (!CFConstantStringTypeDecl) {
+ CFConstantStringTypeDecl =
+ CreateRecordDecl(*this, TTK_Struct, TUDecl,
+ &Idents.get("NSConstantString"));
+ CFConstantStringTypeDecl->startDefinition();
+
+ QualType FieldTypes[4];
+
+ // const int *isa;
+ FieldTypes[0] = getPointerType(IntTy.withConst());
+ // int flags;
+ FieldTypes[1] = IntTy;
+ // const char *str;
+ FieldTypes[2] = getPointerType(CharTy.withConst());
+ // long length;
+ FieldTypes[3] = LongTy;
+
+ // Create fields
+ for (unsigned i = 0; i < 4; ++i) {
+ FieldDecl *Field = FieldDecl::Create(*this, CFConstantStringTypeDecl,
+ SourceLocation(),
+ SourceLocation(), 0,
+ FieldTypes[i], /*TInfo=*/0,
+ /*BitWidth=*/0,
+ /*Mutable=*/false,
+ /*HasInit=*/false);
+ Field->setAccess(AS_public);
+ CFConstantStringTypeDecl->addDecl(Field);
+ }
+
+ CFConstantStringTypeDecl->completeDefinition();
+ }
+
+ return getTagDeclType(CFConstantStringTypeDecl);
+}
+
+void ASTContext::setCFConstantStringType(QualType T) {
+ const RecordType *Rec = T->getAs<RecordType>();
+ assert(Rec && "Invalid CFConstantStringType");
+ CFConstantStringTypeDecl = Rec->getDecl();
+}
+
+QualType ASTContext::getBlockDescriptorType() const {
+ if (BlockDescriptorType)
+ return getTagDeclType(BlockDescriptorType);
+
+ RecordDecl *T;
+ // FIXME: Needs the FlagAppleBlock bit.
+ T = CreateRecordDecl(*this, TTK_Struct, TUDecl,
+ &Idents.get("__block_descriptor"));
+ T->startDefinition();
+
+ QualType FieldTypes[] = {
+ UnsignedLongTy,
+ UnsignedLongTy,
+ };
+
+ const char *FieldNames[] = {
+ "reserved",
+ "Size"
+ };
+
+ for (size_t i = 0; i < 2; ++i) {
+ FieldDecl *Field = FieldDecl::Create(*this, T, SourceLocation(),
+ SourceLocation(),
+ &Idents.get(FieldNames[i]),
+ FieldTypes[i], /*TInfo=*/0,
+ /*BitWidth=*/0,
+ /*Mutable=*/false,
+ /*HasInit=*/false);
+ Field->setAccess(AS_public);
+ T->addDecl(Field);
+ }
+
+ T->completeDefinition();
+
+ BlockDescriptorType = T;
+
+ return getTagDeclType(BlockDescriptorType);
+}
+
+QualType ASTContext::getBlockDescriptorExtendedType() const {
+ if (BlockDescriptorExtendedType)
+ return getTagDeclType(BlockDescriptorExtendedType);
+
+ RecordDecl *T;
+ // FIXME: Needs the FlagAppleBlock bit.
+ T = CreateRecordDecl(*this, TTK_Struct, TUDecl,
+ &Idents.get("__block_descriptor_withcopydispose"));
+ T->startDefinition();
+
+ QualType FieldTypes[] = {
+ UnsignedLongTy,
+ UnsignedLongTy,
+ getPointerType(VoidPtrTy),
+ getPointerType(VoidPtrTy)
+ };
+
+ const char *FieldNames[] = {
+ "reserved",
+ "Size",
+ "CopyFuncPtr",
+ "DestroyFuncPtr"
+ };
+
+ for (size_t i = 0; i < 4; ++i) {
+ FieldDecl *Field = FieldDecl::Create(*this, T, SourceLocation(),
+ SourceLocation(),
+ &Idents.get(FieldNames[i]),
+ FieldTypes[i], /*TInfo=*/0,
+ /*BitWidth=*/0,
+ /*Mutable=*/false,
+ /*HasInit=*/false);
+ Field->setAccess(AS_public);
+ T->addDecl(Field);
+ }
+
+ T->completeDefinition();
+
+ BlockDescriptorExtendedType = T;
+
+ return getTagDeclType(BlockDescriptorExtendedType);
+}
+
+bool ASTContext::BlockRequiresCopying(QualType Ty) const {
+ if (Ty->isObjCRetainableType())
+ return true;
+ if (getLangOpts().CPlusPlus) {
+ if (const RecordType *RT = Ty->getAs<RecordType>()) {
+ CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
+ return RD->hasConstCopyConstructor();
+
+ }
+ }
+ return false;
+}
+
+QualType
+ASTContext::BuildByRefType(StringRef DeclName, QualType Ty) const {
+ // type = struct __Block_byref_1_X {
+ // void *__isa;
+ // struct __Block_byref_1_X *__forwarding;
+ // unsigned int __flags;
+ // unsigned int __size;
+ // void *__copy_helper; // as needed
+ // void *__destroy_help // as needed
+ // int X;
+ // } *
+
+ bool HasCopyAndDispose = BlockRequiresCopying(Ty);
+
+ // FIXME: Move up
+ SmallString<36> Name;
+ llvm::raw_svector_ostream(Name) << "__Block_byref_" <<
+ ++UniqueBlockByRefTypeID << '_' << DeclName;
+ RecordDecl *T;
+ T = CreateRecordDecl(*this, TTK_Struct, TUDecl, &Idents.get(Name.str()));
+ T->startDefinition();
+ QualType Int32Ty = IntTy;
+ assert(getIntWidth(IntTy) == 32 && "non-32bit int not supported");
+ QualType FieldTypes[] = {
+ getPointerType(VoidPtrTy),
+ getPointerType(getTagDeclType(T)),
+ Int32Ty,
+ Int32Ty,
+ getPointerType(VoidPtrTy),
+ getPointerType(VoidPtrTy),
+ Ty
+ };
+
+ StringRef FieldNames[] = {
+ "__isa",
+ "__forwarding",
+ "__flags",
+ "__size",
+ "__copy_helper",
+ "__destroy_helper",
+ DeclName,
+ };
+
+ for (size_t i = 0; i < 7; ++i) {
+ if (!HasCopyAndDispose && i >=4 && i <= 5)
+ continue;
+ FieldDecl *Field = FieldDecl::Create(*this, T, SourceLocation(),
+ SourceLocation(),
+ &Idents.get(FieldNames[i]),
+ FieldTypes[i], /*TInfo=*/0,
+ /*BitWidth=*/0, /*Mutable=*/false,
+ /*HasInit=*/false);
+ Field->setAccess(AS_public);
+ T->addDecl(Field);
+ }
+
+ T->completeDefinition();
+
+ return getPointerType(getTagDeclType(T));
+}
+
+TypedefDecl *ASTContext::getObjCInstanceTypeDecl() {
+ if (!ObjCInstanceTypeDecl)
+ ObjCInstanceTypeDecl = TypedefDecl::Create(*this,
+ getTranslationUnitDecl(),
+ SourceLocation(),
+ SourceLocation(),
+ &Idents.get("instancetype"),
+ getTrivialTypeSourceInfo(getObjCIdType()));
+ return ObjCInstanceTypeDecl;
+}
+
+// This returns true if a type has been typedefed to BOOL:
+// typedef <type> BOOL;
+static bool isTypeTypedefedAsBOOL(QualType T) {
+ if (const TypedefType *TT = dyn_cast<TypedefType>(T))
+ if (IdentifierInfo *II = TT->getDecl()->getIdentifier())
+ return II->isStr("BOOL");
+
+ return false;
+}
+
+/// getObjCEncodingTypeSize returns size of type for objective-c encoding
+/// purpose.
+CharUnits ASTContext::getObjCEncodingTypeSize(QualType type) const {
+ if (!type->isIncompleteArrayType() && type->isIncompleteType())
+ return CharUnits::Zero();
+
+ CharUnits sz = getTypeSizeInChars(type);
+
+ // Make all integer and enum types at least as large as an int
+ if (sz.isPositive() && type->isIntegralOrEnumerationType())
+ sz = std::max(sz, getTypeSizeInChars(IntTy));
+ // Treat arrays as pointers, since that's how they're passed in.
+ else if (type->isArrayType())
+ sz = getTypeSizeInChars(VoidPtrTy);
+ return sz;
+}
+
+static inline
+std::string charUnitsToString(const CharUnits &CU) {
+ return llvm::itostr(CU.getQuantity());
+}
+
+/// getObjCEncodingForBlock - Return the encoded type for this block
+/// declaration.
+std::string ASTContext::getObjCEncodingForBlock(const BlockExpr *Expr) const {
+ std::string S;
+
+ const BlockDecl *Decl = Expr->getBlockDecl();
+ QualType BlockTy =
+ Expr->getType()->getAs<BlockPointerType>()->getPointeeType();
+ // Encode result type.
+ getObjCEncodingForType(BlockTy->getAs<FunctionType>()->getResultType(), S);
+ // Compute size of all parameters.
+ // Start with computing size of a pointer in number of bytes.
+ // FIXME: There might(should) be a better way of doing this computation!
+ SourceLocation Loc;
+ CharUnits PtrSize = getTypeSizeInChars(VoidPtrTy);
+ CharUnits ParmOffset = PtrSize;
+ for (BlockDecl::param_const_iterator PI = Decl->param_begin(),
+ E = Decl->param_end(); PI != E; ++PI) {
+ QualType PType = (*PI)->getType();
+ CharUnits sz = getObjCEncodingTypeSize(PType);
+ assert (sz.isPositive() && "BlockExpr - Incomplete param type");
+ ParmOffset += sz;
+ }
+ // Size of the argument frame
+ S += charUnitsToString(ParmOffset);
+ // Block pointer and offset.
+ S += "@?0";
+
+ // Argument types.
+ ParmOffset = PtrSize;
+ for (BlockDecl::param_const_iterator PI = Decl->param_begin(), E =
+ Decl->param_end(); PI != E; ++PI) {
+ ParmVarDecl *PVDecl = *PI;
+ QualType PType = PVDecl->getOriginalType();
+ if (const ArrayType *AT =
+ dyn_cast<ArrayType>(PType->getCanonicalTypeInternal())) {
+ // Use array's original type only if it has known number of
+ // elements.
+ if (!isa<ConstantArrayType>(AT))
+ PType = PVDecl->getType();
+ } else if (PType->isFunctionType())
+ PType = PVDecl->getType();
+ getObjCEncodingForType(PType, S);
+ S += charUnitsToString(ParmOffset);
+ ParmOffset += getObjCEncodingTypeSize(PType);
+ }
+
+ return S;
+}
+
+bool ASTContext::getObjCEncodingForFunctionDecl(const FunctionDecl *Decl,
+ std::string& S) {
+ // Encode result type.
+ getObjCEncodingForType(Decl->getResultType(), S);
+ CharUnits ParmOffset;
+ // Compute size of all parameters.
+ for (FunctionDecl::param_const_iterator PI = Decl->param_begin(),
+ E = Decl->param_end(); PI != E; ++PI) {
+ QualType PType = (*PI)->getType();
+ CharUnits sz = getObjCEncodingTypeSize(PType);
+ if (sz.isZero())
+ return true;
+
+ assert (sz.isPositive() &&
+ "getObjCEncodingForFunctionDecl - Incomplete param type");
+ ParmOffset += sz;
+ }
+ S += charUnitsToString(ParmOffset);
+ ParmOffset = CharUnits::Zero();
+
+ // Argument types.
+ for (FunctionDecl::param_const_iterator PI = Decl->param_begin(),
+ E = Decl->param_end(); PI != E; ++PI) {
+ ParmVarDecl *PVDecl = *PI;
+ QualType PType = PVDecl->getOriginalType();
+ if (const ArrayType *AT =
+ dyn_cast<ArrayType>(PType->getCanonicalTypeInternal())) {
+ // Use array's original type only if it has known number of
+ // elements.
+ if (!isa<ConstantArrayType>(AT))
+ PType = PVDecl->getType();
+ } else if (PType->isFunctionType())
+ PType = PVDecl->getType();
+ getObjCEncodingForType(PType, S);
+ S += charUnitsToString(ParmOffset);
+ ParmOffset += getObjCEncodingTypeSize(PType);
+ }
+
+ return false;
+}
+
+/// getObjCEncodingForMethodParameter - Return the encoded type for a single
+/// method parameter or return type. If Extended, include class names and
+/// block object types.
+void ASTContext::getObjCEncodingForMethodParameter(Decl::ObjCDeclQualifier QT,
+ QualType T, std::string& S,
+ bool Extended) const {
+ // Encode type qualifer, 'in', 'inout', etc. for the parameter.
+ getObjCEncodingForTypeQualifier(QT, S);
+ // Encode parameter type.
+ getObjCEncodingForTypeImpl(T, S, true, true, 0,
+ true /*OutermostType*/,
+ false /*EncodingProperty*/,
+ false /*StructField*/,
+ Extended /*EncodeBlockParameters*/,
+ Extended /*EncodeClassNames*/);
+}
+
+/// getObjCEncodingForMethodDecl - Return the encoded type for this method
+/// declaration.
+bool ASTContext::getObjCEncodingForMethodDecl(const ObjCMethodDecl *Decl,
+ std::string& S,
+ bool Extended) const {
+ // FIXME: This is not very efficient.
+ // Encode return type.
+ getObjCEncodingForMethodParameter(Decl->getObjCDeclQualifier(),
+ Decl->getResultType(), S, Extended);
+ // Compute size of all parameters.
+ // Start with computing size of a pointer in number of bytes.
+ // FIXME: There might(should) be a better way of doing this computation!
+ SourceLocation Loc;
+ CharUnits PtrSize = getTypeSizeInChars(VoidPtrTy);
+ // The first two arguments (self and _cmd) are pointers; account for
+ // their size.
+ CharUnits ParmOffset = 2 * PtrSize;
+ for (ObjCMethodDecl::param_const_iterator PI = Decl->param_begin(),
+ E = Decl->sel_param_end(); PI != E; ++PI) {
+ QualType PType = (*PI)->getType();
+ CharUnits sz = getObjCEncodingTypeSize(PType);
+ if (sz.isZero())
+ return true;
+
+ assert (sz.isPositive() &&
+ "getObjCEncodingForMethodDecl - Incomplete param type");
+ ParmOffset += sz;
+ }
+ S += charUnitsToString(ParmOffset);
+ S += "@0:";
+ S += charUnitsToString(PtrSize);
+
+ // Argument types.
+ ParmOffset = 2 * PtrSize;
+ for (ObjCMethodDecl::param_const_iterator PI = Decl->param_begin(),
+ E = Decl->sel_param_end(); PI != E; ++PI) {
+ const ParmVarDecl *PVDecl = *PI;
+ QualType PType = PVDecl->getOriginalType();
+ if (const ArrayType *AT =
+ dyn_cast<ArrayType>(PType->getCanonicalTypeInternal())) {
+ // Use array's original type only if it has known number of
+ // elements.
+ if (!isa<ConstantArrayType>(AT))
+ PType = PVDecl->getType();
+ } else if (PType->isFunctionType())
+ PType = PVDecl->getType();
+ getObjCEncodingForMethodParameter(PVDecl->getObjCDeclQualifier(),
+ PType, S, Extended);
+ S += charUnitsToString(ParmOffset);
+ ParmOffset += getObjCEncodingTypeSize(PType);
+ }
+
+ return false;
+}
+
+/// getObjCEncodingForPropertyDecl - Return the encoded type for this
+/// property declaration. If non-NULL, Container must be either an
+/// ObjCCategoryImplDecl or ObjCImplementationDecl; it should only be
+/// NULL when getting encodings for protocol properties.
+/// Property attributes are stored as a comma-delimited C string. The simple
+/// attributes readonly and bycopy are encoded as single characters. The
+/// parametrized attributes, getter=name, setter=name, and ivar=name, are
+/// encoded as single characters, followed by an identifier. Property types
+/// are also encoded as a parametrized attribute. The characters used to encode
+/// these attributes are defined by the following enumeration:
+/// @code
+/// enum PropertyAttributes {
+/// kPropertyReadOnly = 'R', // property is read-only.
+/// kPropertyBycopy = 'C', // property is a copy of the value last assigned
+/// kPropertyByref = '&', // property is a reference to the value last assigned
+/// kPropertyDynamic = 'D', // property is dynamic
+/// kPropertyGetter = 'G', // followed by getter selector name
+/// kPropertySetter = 'S', // followed by setter selector name
+/// kPropertyInstanceVariable = 'V' // followed by instance variable name
+/// kPropertyType = 'T' // followed by old-style type encoding.
+/// kPropertyWeak = 'W' // 'weak' property
+/// kPropertyStrong = 'P' // property GC'able
+/// kPropertyNonAtomic = 'N' // property non-atomic
+/// };
+/// @endcode
+void ASTContext::getObjCEncodingForPropertyDecl(const ObjCPropertyDecl *PD,
+ const Decl *Container,
+ std::string& S) const {
+ // Collect information from the property implementation decl(s).
+ bool Dynamic = false;
+ ObjCPropertyImplDecl *SynthesizePID = 0;
+
+ // FIXME: Duplicated code due to poor abstraction.
+ if (Container) {
+ if (const ObjCCategoryImplDecl *CID =
+ dyn_cast<ObjCCategoryImplDecl>(Container)) {
+ for (ObjCCategoryImplDecl::propimpl_iterator
+ i = CID->propimpl_begin(), e = CID->propimpl_end();
+ i != e; ++i) {
+ ObjCPropertyImplDecl *PID = *i;
+ if (PID->getPropertyDecl() == PD) {
+ if (PID->getPropertyImplementation()==ObjCPropertyImplDecl::Dynamic) {
+ Dynamic = true;
+ } else {
+ SynthesizePID = PID;
+ }
+ }
+ }
+ } else {
+ const ObjCImplementationDecl *OID=cast<ObjCImplementationDecl>(Container);
+ for (ObjCCategoryImplDecl::propimpl_iterator
+ i = OID->propimpl_begin(), e = OID->propimpl_end();
+ i != e; ++i) {
+ ObjCPropertyImplDecl *PID = *i;
+ if (PID->getPropertyDecl() == PD) {
+ if (PID->getPropertyImplementation()==ObjCPropertyImplDecl::Dynamic) {
+ Dynamic = true;
+ } else {
+ SynthesizePID = PID;
+ }
+ }
+ }
+ }
+ }
+
+ // FIXME: This is not very efficient.
+ S = "T";
+
+ // Encode result type.
+ // GCC has some special rules regarding encoding of properties which
+ // closely resembles encoding of ivars.
+ getObjCEncodingForTypeImpl(PD->getType(), S, true, true, 0,
+ true /* outermost type */,
+ true /* encoding for property */);
+
+ if (PD->isReadOnly()) {
+ S += ",R";
+ } else {
+ switch (PD->getSetterKind()) {
+ case ObjCPropertyDecl::Assign: break;
+ case ObjCPropertyDecl::Copy: S += ",C"; break;
+ case ObjCPropertyDecl::Retain: S += ",&"; break;
+ case ObjCPropertyDecl::Weak: S += ",W"; break;
+ }
+ }
+
+ // It really isn't clear at all what this means, since properties
+ // are "dynamic by default".
+ if (Dynamic)
+ S += ",D";
+
+ if (PD->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_nonatomic)
+ S += ",N";
+
+ if (PD->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_getter) {
+ S += ",G";
+ S += PD->getGetterName().getAsString();
+ }
+
+ if (PD->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_setter) {
+ S += ",S";
+ S += PD->getSetterName().getAsString();
+ }
+
+ if (SynthesizePID) {
+ const ObjCIvarDecl *OID = SynthesizePID->getPropertyIvarDecl();
+ S += ",V";
+ S += OID->getNameAsString();
+ }
+
+ // FIXME: OBJCGC: weak & strong
+}
+
+/// getLegacyIntegralTypeEncoding -
+/// Another legacy compatibility encoding: 32-bit longs are encoded as
+/// 'l' or 'L' , but not always. For typedefs, we need to use
+/// 'i' or 'I' instead if encoding a struct field, or a pointer!
+///
+void ASTContext::getLegacyIntegralTypeEncoding (QualType &PointeeTy) const {
+ if (isa<TypedefType>(PointeeTy.getTypePtr())) {
+ if (const BuiltinType *BT = PointeeTy->getAs<BuiltinType>()) {
+ if (BT->getKind() == BuiltinType::ULong && getIntWidth(PointeeTy) == 32)
+ PointeeTy = UnsignedIntTy;
+ else
+ if (BT->getKind() == BuiltinType::Long && getIntWidth(PointeeTy) == 32)
+ PointeeTy = IntTy;
+ }
+ }
+}
+
+void ASTContext::getObjCEncodingForType(QualType T, std::string& S,
+ const FieldDecl *Field) const {
+ // We follow the behavior of gcc, expanding structures which are
+ // directly pointed to, and expanding embedded structures. Note that
+ // these rules are sufficient to prevent recursive encoding of the
+ // same type.
+ getObjCEncodingForTypeImpl(T, S, true, true, Field,
+ true /* outermost type */);
+}
+
+static char ObjCEncodingForPrimitiveKind(const ASTContext *C, QualType T) {
+ switch (T->getAs<BuiltinType>()->getKind()) {
+ default: llvm_unreachable("Unhandled builtin type kind");
+ case BuiltinType::Void: return 'v';
+ case BuiltinType::Bool: return 'B';
+ case BuiltinType::Char_U:
+ case BuiltinType::UChar: return 'C';
+ case BuiltinType::UShort: return 'S';
+ case BuiltinType::UInt: return 'I';
+ case BuiltinType::ULong:
+ return C->getIntWidth(T) == 32 ? 'L' : 'Q';
+ case BuiltinType::UInt128: return 'T';
+ case BuiltinType::ULongLong: return 'Q';
+ case BuiltinType::Char_S:
+ case BuiltinType::SChar: return 'c';
+ case BuiltinType::Short: return 's';
+ case BuiltinType::WChar_S:
+ case BuiltinType::WChar_U:
+ case BuiltinType::Int: return 'i';
+ case BuiltinType::Long:
+ return C->getIntWidth(T) == 32 ? 'l' : 'q';
+ case BuiltinType::LongLong: return 'q';
+ case BuiltinType::Int128: return 't';
+ case BuiltinType::Float: return 'f';
+ case BuiltinType::Double: return 'd';
+ case BuiltinType::LongDouble: return 'D';
+ }
+}
+
+static char ObjCEncodingForEnumType(const ASTContext *C, const EnumType *ET) {
+ EnumDecl *Enum = ET->getDecl();
+
+ // The encoding of an non-fixed enum type is always 'i', regardless of size.
+ if (!Enum->isFixed())
+ return 'i';
+
+ // The encoding of a fixed enum type matches its fixed underlying type.
+ return ObjCEncodingForPrimitiveKind(C, Enum->getIntegerType());
+}
+
+static void EncodeBitField(const ASTContext *Ctx, std::string& S,
+ QualType T, const FieldDecl *FD) {
+ assert(FD->isBitField() && "not a bitfield - getObjCEncodingForTypeImpl");
+ S += 'b';
+ // The NeXT runtime encodes bit fields as b followed by the number of bits.
+ // The GNU runtime requires more information; bitfields are encoded as b,
+ // then the offset (in bits) of the first element, then the type of the
+ // bitfield, then the size in bits. For example, in this structure:
+ //
+ // struct
+ // {
+ // int integer;
+ // int flags:2;
+ // };
+ // On a 32-bit system, the encoding for flags would be b2 for the NeXT
+ // runtime, but b32i2 for the GNU runtime. The reason for this extra
+ // information is not especially sensible, but we're stuck with it for
+ // compatibility with GCC, although providing it breaks anything that
+ // actually uses runtime introspection and wants to work on both runtimes...
+ if (!Ctx->getLangOpts().NeXTRuntime) {
+ const RecordDecl *RD = FD->getParent();
+ const ASTRecordLayout &RL = Ctx->getASTRecordLayout(RD);
+ S += llvm::utostr(RL.getFieldOffset(FD->getFieldIndex()));
+ if (const EnumType *ET = T->getAs<EnumType>())
+ S += ObjCEncodingForEnumType(Ctx, ET);
+ else
+ S += ObjCEncodingForPrimitiveKind(Ctx, T);
+ }
+ S += llvm::utostr(FD->getBitWidthValue(*Ctx));
+}
+
+// FIXME: Use SmallString for accumulating string.
+void ASTContext::getObjCEncodingForTypeImpl(QualType T, std::string& S,
+ bool ExpandPointedToStructures,
+ bool ExpandStructures,
+ const FieldDecl *FD,
+ bool OutermostType,
+ bool EncodingProperty,
+ bool StructField,
+ bool EncodeBlockParameters,
+ bool EncodeClassNames) const {
+ if (T->getAs<BuiltinType>()) {
+ if (FD && FD->isBitField())
+ return EncodeBitField(this, S, T, FD);
+ S += ObjCEncodingForPrimitiveKind(this, T);
+ return;
+ }
+
+ if (const ComplexType *CT = T->getAs<ComplexType>()) {
+ S += 'j';
+ getObjCEncodingForTypeImpl(CT->getElementType(), S, false, false, 0, false,
+ false);
+ return;
+ }
+
+ // encoding for pointer or r3eference types.
+ QualType PointeeTy;
+ if (const PointerType *PT = T->getAs<PointerType>()) {
+ if (PT->isObjCSelType()) {
+ S += ':';
+ return;
+ }
+ PointeeTy = PT->getPointeeType();
+ }
+ else if (const ReferenceType *RT = T->getAs<ReferenceType>())
+ PointeeTy = RT->getPointeeType();
+ if (!PointeeTy.isNull()) {
+ bool isReadOnly = false;
+ // For historical/compatibility reasons, the read-only qualifier of the
+ // pointee gets emitted _before_ the '^'. The read-only qualifier of
+ // the pointer itself gets ignored, _unless_ we are looking at a typedef!
+ // Also, do not emit the 'r' for anything but the outermost type!
+ if (isa<TypedefType>(T.getTypePtr())) {
+ if (OutermostType && T.isConstQualified()) {
+ isReadOnly = true;
+ S += 'r';
+ }
+ } else if (OutermostType) {
+ QualType P = PointeeTy;
+ while (P->getAs<PointerType>())
+ P = P->getAs<PointerType>()->getPointeeType();
+ if (P.isConstQualified()) {
+ isReadOnly = true;
+ S += 'r';
+ }
+ }
+ if (isReadOnly) {
+ // Another legacy compatibility encoding. Some ObjC qualifier and type
+ // combinations need to be rearranged.
+ // Rewrite "in const" from "nr" to "rn"
+ if (StringRef(S).endswith("nr"))
+ S.replace(S.end()-2, S.end(), "rn");
+ }
+
+ if (PointeeTy->isCharType()) {
+ // char pointer types should be encoded as '*' unless it is a
+ // type that has been typedef'd to 'BOOL'.
+ if (!isTypeTypedefedAsBOOL(PointeeTy)) {
+ S += '*';
+ return;
+ }
+ } else if (const RecordType *RTy = PointeeTy->getAs<RecordType>()) {
+ // GCC binary compat: Need to convert "struct objc_class *" to "#".
+ if (RTy->getDecl()->getIdentifier() == &Idents.get("objc_class")) {
+ S += '#';
+ return;
+ }
+ // GCC binary compat: Need to convert "struct objc_object *" to "@".
+ if (RTy->getDecl()->getIdentifier() == &Idents.get("objc_object")) {
+ S += '@';
+ return;
+ }
+ // fall through...
+ }
+ S += '^';
+ getLegacyIntegralTypeEncoding(PointeeTy);
+
+ getObjCEncodingForTypeImpl(PointeeTy, S, false, ExpandPointedToStructures,
+ NULL);
+ return;
+ }
+
+ if (const ArrayType *AT =
+ // Ignore type qualifiers etc.
+ dyn_cast<ArrayType>(T->getCanonicalTypeInternal())) {
+ if (isa<IncompleteArrayType>(AT) && !StructField) {
+ // Incomplete arrays are encoded as a pointer to the array element.
+ S += '^';
+
+ getObjCEncodingForTypeImpl(AT->getElementType(), S,
+ false, ExpandStructures, FD);
+ } else {
+ S += '[';
+
+ if (const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(AT)) {
+ if (getTypeSize(CAT->getElementType()) == 0)
+ S += '0';
+ else
+ S += llvm::utostr(CAT->getSize().getZExtValue());
+ } else {
+ //Variable length arrays are encoded as a regular array with 0 elements.
+ assert((isa<VariableArrayType>(AT) || isa<IncompleteArrayType>(AT)) &&
+ "Unknown array type!");
+ S += '0';
+ }
+
+ getObjCEncodingForTypeImpl(AT->getElementType(), S,
+ false, ExpandStructures, FD);
+ S += ']';
+ }
+ return;
+ }
+
+ if (T->getAs<FunctionType>()) {
+ S += '?';
+ return;
+ }
+
+ if (const RecordType *RTy = T->getAs<RecordType>()) {
+ RecordDecl *RDecl = RTy->getDecl();
+ S += RDecl->isUnion() ? '(' : '{';
+ // Anonymous structures print as '?'
+ if (const IdentifierInfo *II = RDecl->getIdentifier()) {
+ S += II->getName();
+ if (ClassTemplateSpecializationDecl *Spec
+ = dyn_cast<ClassTemplateSpecializationDecl>(RDecl)) {
+ const TemplateArgumentList &TemplateArgs = Spec->getTemplateArgs();
+ std::string TemplateArgsStr
+ = TemplateSpecializationType::PrintTemplateArgumentList(
+ TemplateArgs.data(),
+ TemplateArgs.size(),
+ (*this).getPrintingPolicy());
+
+ S += TemplateArgsStr;
+ }
+ } else {
+ S += '?';
+ }
+ if (ExpandStructures) {
+ S += '=';
+ if (!RDecl->isUnion()) {
+ getObjCEncodingForStructureImpl(RDecl, S, FD);
+ } else {
+ for (RecordDecl::field_iterator Field = RDecl->field_begin(),
+ FieldEnd = RDecl->field_end();
+ Field != FieldEnd; ++Field) {
+ if (FD) {
+ S += '"';
+ S += Field->getNameAsString();
+ S += '"';
+ }
+
+ // Special case bit-fields.
+ if (Field->isBitField()) {
+ getObjCEncodingForTypeImpl(Field->getType(), S, false, true,
+ (*Field));
+ } else {
+ QualType qt = Field->getType();
+ getLegacyIntegralTypeEncoding(qt);
+ getObjCEncodingForTypeImpl(qt, S, false, true,
+ FD, /*OutermostType*/false,
+ /*EncodingProperty*/false,
+ /*StructField*/true);
+ }
+ }
+ }
+ }
+ S += RDecl->isUnion() ? ')' : '}';
+ return;
+ }
+
+ if (const EnumType *ET = T->getAs<EnumType>()) {
+ if (FD && FD->isBitField())
+ EncodeBitField(this, S, T, FD);
+ else
+ S += ObjCEncodingForEnumType(this, ET);
+ return;
+ }
+
+ if (const BlockPointerType *BT = T->getAs<BlockPointerType>()) {
+ S += "@?"; // Unlike a pointer-to-function, which is "^?".
+ if (EncodeBlockParameters) {
+ const FunctionType *FT = BT->getPointeeType()->getAs<FunctionType>();
+
+ S += '<';
+ // Block return type
+ getObjCEncodingForTypeImpl(FT->getResultType(), S,
+ ExpandPointedToStructures, ExpandStructures,
+ FD,
+ false /* OutermostType */,
+ EncodingProperty,
+ false /* StructField */,
+ EncodeBlockParameters,
+ EncodeClassNames);
+ // Block self
+ S += "@?";
+ // Block parameters
+ if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(FT)) {
+ for (FunctionProtoType::arg_type_iterator I = FPT->arg_type_begin(),
+ E = FPT->arg_type_end(); I && (I != E); ++I) {
+ getObjCEncodingForTypeImpl(*I, S,
+ ExpandPointedToStructures,
+ ExpandStructures,
+ FD,
+ false /* OutermostType */,
+ EncodingProperty,
+ false /* StructField */,
+ EncodeBlockParameters,
+ EncodeClassNames);
+ }
+ }
+ S += '>';
+ }
+ return;
+ }
+
+ // Ignore protocol qualifiers when mangling at this level.
+ if (const ObjCObjectType *OT = T->getAs<ObjCObjectType>())
+ T = OT->getBaseType();
+
+ if (const ObjCInterfaceType *OIT = T->getAs<ObjCInterfaceType>()) {
+ // @encode(class_name)
+ ObjCInterfaceDecl *OI = OIT->getDecl();
+ S += '{';
+ const IdentifierInfo *II = OI->getIdentifier();
+ S += II->getName();
+ S += '=';
+ SmallVector<const ObjCIvarDecl*, 32> Ivars;
+ DeepCollectObjCIvars(OI, true, Ivars);
+ for (unsigned i = 0, e = Ivars.size(); i != e; ++i) {
+ const FieldDecl *Field = cast<FieldDecl>(Ivars[i]);
+ if (Field->isBitField())
+ getObjCEncodingForTypeImpl(Field->getType(), S, false, true, Field);
+ else
+ getObjCEncodingForTypeImpl(Field->getType(), S, false, true, FD);
+ }
+ S += '}';
+ return;
+ }
+
+ if (const ObjCObjectPointerType *OPT = T->getAs<ObjCObjectPointerType>()) {
+ if (OPT->isObjCIdType()) {
+ S += '@';
+ return;
+ }
+
+ if (OPT->isObjCClassType() || OPT->isObjCQualifiedClassType()) {
+ // FIXME: Consider if we need to output qualifiers for 'Class<p>'.
+ // Since this is a binary compatibility issue, need to consult with runtime
+ // folks. Fortunately, this is a *very* obsure construct.
+ S += '#';
+ return;
+ }
+
+ if (OPT->isObjCQualifiedIdType()) {
+ getObjCEncodingForTypeImpl(getObjCIdType(), S,
+ ExpandPointedToStructures,
+ ExpandStructures, FD);
+ if (FD || EncodingProperty || EncodeClassNames) {
+ // Note that we do extended encoding of protocol qualifer list
+ // Only when doing ivar or property encoding.
+ S += '"';
+ for (ObjCObjectPointerType::qual_iterator I = OPT->qual_begin(),
+ E = OPT->qual_end(); I != E; ++I) {
+ S += '<';
+ S += (*I)->getNameAsString();
+ S += '>';
+ }
+ S += '"';
+ }
+ return;
+ }
+
+ QualType PointeeTy = OPT->getPointeeType();
+ if (!EncodingProperty &&
+ isa<TypedefType>(PointeeTy.getTypePtr())) {
+ // Another historical/compatibility reason.
+ // We encode the underlying type which comes out as
+ // {...};
+ S += '^';
+ getObjCEncodingForTypeImpl(PointeeTy, S,
+ false, ExpandPointedToStructures,
+ NULL);
+ return;
+ }
+
+ S += '@';
+ if (OPT->getInterfaceDecl() &&
+ (FD || EncodingProperty || EncodeClassNames)) {
+ S += '"';
+ S += OPT->getInterfaceDecl()->getIdentifier()->getName();
+ for (ObjCObjectPointerType::qual_iterator I = OPT->qual_begin(),
+ E = OPT->qual_end(); I != E; ++I) {
+ S += '<';
+ S += (*I)->getNameAsString();
+ S += '>';
+ }
+ S += '"';
+ }
+ return;
+ }
+
+ // gcc just blithely ignores member pointers.
+ // TODO: maybe there should be a mangling for these
+ if (T->getAs<MemberPointerType>())
+ return;
+
+ if (T->isVectorType()) {
+ // This matches gcc's encoding, even though technically it is
+ // insufficient.
+ // FIXME. We should do a better job than gcc.
+ return;
+ }
+
+ llvm_unreachable("@encode for type not implemented!");
+}
+
+void ASTContext::getObjCEncodingForStructureImpl(RecordDecl *RDecl,
+ std::string &S,
+ const FieldDecl *FD,
+ bool includeVBases) const {
+ assert(RDecl && "Expected non-null RecordDecl");
+ assert(!RDecl->isUnion() && "Should not be called for unions");
+ if (!RDecl->getDefinition())
+ return;
+
+ CXXRecordDecl *CXXRec = dyn_cast<CXXRecordDecl>(RDecl);
+ std::multimap<uint64_t, NamedDecl *> FieldOrBaseOffsets;
+ const ASTRecordLayout &layout = getASTRecordLayout(RDecl);
+
+ if (CXXRec) {
+ for (CXXRecordDecl::base_class_iterator
+ BI = CXXRec->bases_begin(),
+ BE = CXXRec->bases_end(); BI != BE; ++BI) {
+ if (!BI->isVirtual()) {
+ CXXRecordDecl *base = BI->getType()->getAsCXXRecordDecl();
+ if (base->isEmpty())
+ continue;
+ uint64_t offs = layout.getBaseClassOffsetInBits(base);
+ FieldOrBaseOffsets.insert(FieldOrBaseOffsets.upper_bound(offs),
+ std::make_pair(offs, base));
+ }
+ }
+ }
+
+ unsigned i = 0;
+ for (RecordDecl::field_iterator Field = RDecl->field_begin(),
+ FieldEnd = RDecl->field_end();
+ Field != FieldEnd; ++Field, ++i) {
+ uint64_t offs = layout.getFieldOffset(i);
+ FieldOrBaseOffsets.insert(FieldOrBaseOffsets.upper_bound(offs),
+ std::make_pair(offs, *Field));
+ }
+
+ if (CXXRec && includeVBases) {
+ for (CXXRecordDecl::base_class_iterator
+ BI = CXXRec->vbases_begin(),
+ BE = CXXRec->vbases_end(); BI != BE; ++BI) {
+ CXXRecordDecl *base = BI->getType()->getAsCXXRecordDecl();
+ if (base->isEmpty())
+ continue;
+ uint64_t offs = layout.getVBaseClassOffsetInBits(base);
+ if (FieldOrBaseOffsets.find(offs) == FieldOrBaseOffsets.end())
+ FieldOrBaseOffsets.insert(FieldOrBaseOffsets.end(),
+ std::make_pair(offs, base));
+ }
+ }
+
+ CharUnits size;
+ if (CXXRec) {
+ size = includeVBases ? layout.getSize() : layout.getNonVirtualSize();
+ } else {
+ size = layout.getSize();
+ }
+
+ uint64_t CurOffs = 0;
+ std::multimap<uint64_t, NamedDecl *>::iterator
+ CurLayObj = FieldOrBaseOffsets.begin();
+
+ if ((CurLayObj != FieldOrBaseOffsets.end() && CurLayObj->first != 0) ||
+ (CurLayObj == FieldOrBaseOffsets.end() &&
+ CXXRec && CXXRec->isDynamicClass())) {
+ assert(CXXRec && CXXRec->isDynamicClass() &&
+ "Offset 0 was empty but no VTable ?");
+ if (FD) {
+ S += "\"_vptr$";
+ std::string recname = CXXRec->getNameAsString();
+ if (recname.empty()) recname = "?";
+ S += recname;
+ S += '"';
+ }
+ S += "^^?";
+ CurOffs += getTypeSize(VoidPtrTy);
+ }
+
+ if (!RDecl->hasFlexibleArrayMember()) {
+ // Mark the end of the structure.
+ uint64_t offs = toBits(size);
+ FieldOrBaseOffsets.insert(FieldOrBaseOffsets.upper_bound(offs),
+ std::make_pair(offs, (NamedDecl*)0));
+ }
+
+ for (; CurLayObj != FieldOrBaseOffsets.end(); ++CurLayObj) {
+ assert(CurOffs <= CurLayObj->first);
+
+ if (CurOffs < CurLayObj->first) {
+ uint64_t padding = CurLayObj->first - CurOffs;
+ // FIXME: There doesn't seem to be a way to indicate in the encoding that
+ // packing/alignment of members is different that normal, in which case
+ // the encoding will be out-of-sync with the real layout.
+ // If the runtime switches to just consider the size of types without
+ // taking into account alignment, we could make padding explicit in the
+ // encoding (e.g. using arrays of chars). The encoding strings would be
+ // longer then though.
+ CurOffs += padding;
+ }
+
+ NamedDecl *dcl = CurLayObj->second;
+ if (dcl == 0)
+ break; // reached end of structure.
+
+ if (CXXRecordDecl *base = dyn_cast<CXXRecordDecl>(dcl)) {
+ // We expand the bases without their virtual bases since those are going
+ // in the initial structure. Note that this differs from gcc which
+ // expands virtual bases each time one is encountered in the hierarchy,
+ // making the encoding type bigger than it really is.
+ getObjCEncodingForStructureImpl(base, S, FD, /*includeVBases*/false);
+ assert(!base->isEmpty());
+ CurOffs += toBits(getASTRecordLayout(base).getNonVirtualSize());
+ } else {
+ FieldDecl *field = cast<FieldDecl>(dcl);
+ if (FD) {
+ S += '"';
+ S += field->getNameAsString();
+ S += '"';
+ }
+
+ if (field->isBitField()) {
+ EncodeBitField(this, S, field->getType(), field);
+ CurOffs += field->getBitWidthValue(*this);
+ } else {
+ QualType qt = field->getType();
+ getLegacyIntegralTypeEncoding(qt);
+ getObjCEncodingForTypeImpl(qt, S, false, true, FD,
+ /*OutermostType*/false,
+ /*EncodingProperty*/false,
+ /*StructField*/true);
+ CurOffs += getTypeSize(field->getType());
+ }
+ }
+ }
+}
+
+void ASTContext::getObjCEncodingForTypeQualifier(Decl::ObjCDeclQualifier QT,
+ std::string& S) const {
+ if (QT & Decl::OBJC_TQ_In)
+ S += 'n';
+ if (QT & Decl::OBJC_TQ_Inout)
+ S += 'N';
+ if (QT & Decl::OBJC_TQ_Out)
+ S += 'o';
+ if (QT & Decl::OBJC_TQ_Bycopy)
+ S += 'O';
+ if (QT & Decl::OBJC_TQ_Byref)
+ S += 'R';
+ if (QT & Decl::OBJC_TQ_Oneway)
+ S += 'V';
+}
+
+void ASTContext::setBuiltinVaListType(QualType T) {
+ assert(BuiltinVaListType.isNull() && "__builtin_va_list type already set!");
+
+ BuiltinVaListType = T;
+}
+
+TypedefDecl *ASTContext::getObjCIdDecl() const {
+ if (!ObjCIdDecl) {
+ QualType T = getObjCObjectType(ObjCBuiltinIdTy, 0, 0);
+ T = getObjCObjectPointerType(T);
+ TypeSourceInfo *IdInfo = getTrivialTypeSourceInfo(T);
+ ObjCIdDecl = TypedefDecl::Create(const_cast<ASTContext &>(*this),
+ getTranslationUnitDecl(),
+ SourceLocation(), SourceLocation(),
+ &Idents.get("id"), IdInfo);
+ }
+
+ return ObjCIdDecl;
+}
+
+TypedefDecl *ASTContext::getObjCSelDecl() const {
+ if (!ObjCSelDecl) {
+ QualType SelT = getPointerType(ObjCBuiltinSelTy);
+ TypeSourceInfo *SelInfo = getTrivialTypeSourceInfo(SelT);
+ ObjCSelDecl = TypedefDecl::Create(const_cast<ASTContext &>(*this),
+ getTranslationUnitDecl(),
+ SourceLocation(), SourceLocation(),
+ &Idents.get("SEL"), SelInfo);
+ }
+ return ObjCSelDecl;
+}
+
+TypedefDecl *ASTContext::getObjCClassDecl() const {
+ if (!ObjCClassDecl) {
+ QualType T = getObjCObjectType(ObjCBuiltinClassTy, 0, 0);
+ T = getObjCObjectPointerType(T);
+ TypeSourceInfo *ClassInfo = getTrivialTypeSourceInfo(T);
+ ObjCClassDecl = TypedefDecl::Create(const_cast<ASTContext &>(*this),
+ getTranslationUnitDecl(),
+ SourceLocation(), SourceLocation(),
+ &Idents.get("Class"), ClassInfo);
+ }
+
+ return ObjCClassDecl;
+}
+
+ObjCInterfaceDecl *ASTContext::getObjCProtocolDecl() const {
+ if (!ObjCProtocolClassDecl) {
+ ObjCProtocolClassDecl
+ = ObjCInterfaceDecl::Create(*this, getTranslationUnitDecl(),
+ SourceLocation(),
+ &Idents.get("Protocol"),
+ /*PrevDecl=*/0,
+ SourceLocation(), true);
+ }
+
+ return ObjCProtocolClassDecl;
+}
+
+void ASTContext::setObjCConstantStringInterface(ObjCInterfaceDecl *Decl) {
+ assert(ObjCConstantStringType.isNull() &&
+ "'NSConstantString' type already set!");
+
+ ObjCConstantStringType = getObjCInterfaceType(Decl);
+}
+
+/// \brief Retrieve the template name that corresponds to a non-empty
+/// lookup.
+TemplateName
+ASTContext::getOverloadedTemplateName(UnresolvedSetIterator Begin,
+ UnresolvedSetIterator End) const {
+ unsigned size = End - Begin;
+ assert(size > 1 && "set is not overloaded!");
+
+ void *memory = Allocate(sizeof(OverloadedTemplateStorage) +
+ size * sizeof(FunctionTemplateDecl*));
+ OverloadedTemplateStorage *OT = new(memory) OverloadedTemplateStorage(size);
+
+ NamedDecl **Storage = OT->getStorage();
+ for (UnresolvedSetIterator I = Begin; I != End; ++I) {
+ NamedDecl *D = *I;
+ assert(isa<FunctionTemplateDecl>(D) ||
+ (isa<UsingShadowDecl>(D) &&
+ isa<FunctionTemplateDecl>(D->getUnderlyingDecl())));
+ *Storage++ = D;
+ }
+
+ return TemplateName(OT);
+}
+
+/// \brief Retrieve the template name that represents a qualified
+/// template name such as \c std::vector.
+TemplateName
+ASTContext::getQualifiedTemplateName(NestedNameSpecifier *NNS,
+ bool TemplateKeyword,
+ TemplateDecl *Template) const {
+ assert(NNS && "Missing nested-name-specifier in qualified template name");
+
+ // FIXME: Canonicalization?
+ llvm::FoldingSetNodeID ID;
+ QualifiedTemplateName::Profile(ID, NNS, TemplateKeyword, Template);
+
+ void *InsertPos = 0;
+ QualifiedTemplateName *QTN =
+ QualifiedTemplateNames.FindNodeOrInsertPos(ID, InsertPos);
+ if (!QTN) {
+ QTN = new (*this,4) QualifiedTemplateName(NNS, TemplateKeyword, Template);
+ QualifiedTemplateNames.InsertNode(QTN, InsertPos);
+ }
+
+ return TemplateName(QTN);
+}
+
+/// \brief Retrieve the template name that represents a dependent
+/// template name such as \c MetaFun::template apply.
+TemplateName
+ASTContext::getDependentTemplateName(NestedNameSpecifier *NNS,
+ const IdentifierInfo *Name) const {
+ assert((!NNS || NNS->isDependent()) &&
+ "Nested name specifier must be dependent");
+
+ llvm::FoldingSetNodeID ID;
+ DependentTemplateName::Profile(ID, NNS, Name);
+
+ void *InsertPos = 0;
+ DependentTemplateName *QTN =
+ DependentTemplateNames.FindNodeOrInsertPos(ID, InsertPos);
+
+ if (QTN)
+ return TemplateName(QTN);
+
+ NestedNameSpecifier *CanonNNS = getCanonicalNestedNameSpecifier(NNS);
+ if (CanonNNS == NNS) {
+ QTN = new (*this,4) DependentTemplateName(NNS, Name);
+ } else {
+ TemplateName Canon = getDependentTemplateName(CanonNNS, Name);
+ QTN = new (*this,4) DependentTemplateName(NNS, Name, Canon);
+ DependentTemplateName *CheckQTN =
+ DependentTemplateNames.FindNodeOrInsertPos(ID, InsertPos);
+ assert(!CheckQTN && "Dependent type name canonicalization broken");
+ (void)CheckQTN;
+ }
+
+ DependentTemplateNames.InsertNode(QTN, InsertPos);
+ return TemplateName(QTN);
+}
+
+/// \brief Retrieve the template name that represents a dependent
+/// template name such as \c MetaFun::template operator+.
+TemplateName
+ASTContext::getDependentTemplateName(NestedNameSpecifier *NNS,
+ OverloadedOperatorKind Operator) const {
+ assert((!NNS || NNS->isDependent()) &&
+ "Nested name specifier must be dependent");
+
+ llvm::FoldingSetNodeID ID;
+ DependentTemplateName::Profile(ID, NNS, Operator);
+
+ void *InsertPos = 0;
+ DependentTemplateName *QTN
+ = DependentTemplateNames.FindNodeOrInsertPos(ID, InsertPos);
+
+ if (QTN)
+ return TemplateName(QTN);
+
+ NestedNameSpecifier *CanonNNS = getCanonicalNestedNameSpecifier(NNS);
+ if (CanonNNS == NNS) {
+ QTN = new (*this,4) DependentTemplateName(NNS, Operator);
+ } else {
+ TemplateName Canon = getDependentTemplateName(CanonNNS, Operator);
+ QTN = new (*this,4) DependentTemplateName(NNS, Operator, Canon);
+
+ DependentTemplateName *CheckQTN
+ = DependentTemplateNames.FindNodeOrInsertPos(ID, InsertPos);
+ assert(!CheckQTN && "Dependent template name canonicalization broken");
+ (void)CheckQTN;
+ }
+
+ DependentTemplateNames.InsertNode(QTN, InsertPos);
+ return TemplateName(QTN);
+}
+
+TemplateName
+ASTContext::getSubstTemplateTemplateParm(TemplateTemplateParmDecl *param,
+ TemplateName replacement) const {
+ llvm::FoldingSetNodeID ID;
+ SubstTemplateTemplateParmStorage::Profile(ID, param, replacement);
+
+ void *insertPos = 0;
+ SubstTemplateTemplateParmStorage *subst
+ = SubstTemplateTemplateParms.FindNodeOrInsertPos(ID, insertPos);
+
+ if (!subst) {
+ subst = new (*this) SubstTemplateTemplateParmStorage(param, replacement);
+ SubstTemplateTemplateParms.InsertNode(subst, insertPos);
+ }
+
+ return TemplateName(subst);
+}
+
+TemplateName
+ASTContext::getSubstTemplateTemplateParmPack(TemplateTemplateParmDecl *Param,
+ const TemplateArgument &ArgPack) const {
+ ASTContext &Self = const_cast<ASTContext &>(*this);
+ llvm::FoldingSetNodeID ID;
+ SubstTemplateTemplateParmPackStorage::Profile(ID, Self, Param, ArgPack);
+
+ void *InsertPos = 0;
+ SubstTemplateTemplateParmPackStorage *Subst
+ = SubstTemplateTemplateParmPacks.FindNodeOrInsertPos(ID, InsertPos);
+
+ if (!Subst) {
+ Subst = new (*this) SubstTemplateTemplateParmPackStorage(Param,
+ ArgPack.pack_size(),
+ ArgPack.pack_begin());
+ SubstTemplateTemplateParmPacks.InsertNode(Subst, InsertPos);
+ }
+
+ return TemplateName(Subst);
+}
+
+/// getFromTargetType - Given one of the integer types provided by
+/// TargetInfo, produce the corresponding type. The unsigned @p Type
+/// is actually a value of type @c TargetInfo::IntType.
+CanQualType ASTContext::getFromTargetType(unsigned Type) const {
+ switch (Type) {
+ case TargetInfo::NoInt: return CanQualType();
+ case TargetInfo::SignedShort: return ShortTy;
+ case TargetInfo::UnsignedShort: return UnsignedShortTy;
+ case TargetInfo::SignedInt: return IntTy;
+ case TargetInfo::UnsignedInt: return UnsignedIntTy;
+ case TargetInfo::SignedLong: return LongTy;
+ case TargetInfo::UnsignedLong: return UnsignedLongTy;
+ case TargetInfo::SignedLongLong: return LongLongTy;
+ case TargetInfo::UnsignedLongLong: return UnsignedLongLongTy;
+ }
+
+ llvm_unreachable("Unhandled TargetInfo::IntType value");
+}
+
+//===----------------------------------------------------------------------===//
+// Type Predicates.
+//===----------------------------------------------------------------------===//
+
+/// getObjCGCAttr - Returns one of GCNone, Weak or Strong objc's
+/// garbage collection attribute.
+///
+Qualifiers::GC ASTContext::getObjCGCAttrKind(QualType Ty) const {
+ if (getLangOpts().getGC() == LangOptions::NonGC)
+ return Qualifiers::GCNone;
+
+ assert(getLangOpts().ObjC1);
+ Qualifiers::GC GCAttrs = Ty.getObjCGCAttr();
+
+ // Default behaviour under objective-C's gc is for ObjC pointers
+ // (or pointers to them) be treated as though they were declared
+ // as __strong.
+ if (GCAttrs == Qualifiers::GCNone) {
+ if (Ty->isObjCObjectPointerType() || Ty->isBlockPointerType())
+ return Qualifiers::Strong;
+ else if (Ty->isPointerType())
+ return getObjCGCAttrKind(Ty->getAs<PointerType>()->getPointeeType());
+ } else {
+ // It's not valid to set GC attributes on anything that isn't a
+ // pointer.
+#ifndef NDEBUG
+ QualType CT = Ty->getCanonicalTypeInternal();
+ while (const ArrayType *AT = dyn_cast<ArrayType>(CT))
+ CT = AT->getElementType();
+ assert(CT->isAnyPointerType() || CT->isBlockPointerType());
+#endif
+ }
+ return GCAttrs;
+}
+
+//===----------------------------------------------------------------------===//
+// Type Compatibility Testing
+//===----------------------------------------------------------------------===//
+
+/// areCompatVectorTypes - Return true if the two specified vector types are
+/// compatible.
+static bool areCompatVectorTypes(const VectorType *LHS,
+ const VectorType *RHS) {
+ assert(LHS->isCanonicalUnqualified() && RHS->isCanonicalUnqualified());
+ return LHS->getElementType() == RHS->getElementType() &&
+ LHS->getNumElements() == RHS->getNumElements();
+}
+
+bool ASTContext::areCompatibleVectorTypes(QualType FirstVec,
+ QualType SecondVec) {
+ assert(FirstVec->isVectorType() && "FirstVec should be a vector type");
+ assert(SecondVec->isVectorType() && "SecondVec should be a vector type");
+
+ if (hasSameUnqualifiedType(FirstVec, SecondVec))
+ return true;
+
+ // Treat Neon vector types and most AltiVec vector types as if they are the
+ // equivalent GCC vector types.
+ const VectorType *First = FirstVec->getAs<VectorType>();
+ const VectorType *Second = SecondVec->getAs<VectorType>();
+ if (First->getNumElements() == Second->getNumElements() &&
+ hasSameType(First->getElementType(), Second->getElementType()) &&
+ First->getVectorKind() != VectorType::AltiVecPixel &&
+ First->getVectorKind() != VectorType::AltiVecBool &&
+ Second->getVectorKind() != VectorType::AltiVecPixel &&
+ Second->getVectorKind() != VectorType::AltiVecBool)
+ return true;
+
+ return false;
+}
+
+//===----------------------------------------------------------------------===//
+// ObjCQualifiedIdTypesAreCompatible - Compatibility testing for qualified id's.
+//===----------------------------------------------------------------------===//
+
+/// ProtocolCompatibleWithProtocol - return 'true' if 'lProto' is in the
+/// inheritance hierarchy of 'rProto'.
+bool
+ASTContext::ProtocolCompatibleWithProtocol(ObjCProtocolDecl *lProto,
+ ObjCProtocolDecl *rProto) const {
+ if (declaresSameEntity(lProto, rProto))
+ return true;
+ for (ObjCProtocolDecl::protocol_iterator PI = rProto->protocol_begin(),
+ E = rProto->protocol_end(); PI != E; ++PI)
+ if (ProtocolCompatibleWithProtocol(lProto, *PI))
+ return true;
+ return false;
+}
+
+/// QualifiedIdConformsQualifiedId - compare id<p,...> with id<p1,...>
+/// return true if lhs's protocols conform to rhs's protocol; false
+/// otherwise.
+bool ASTContext::QualifiedIdConformsQualifiedId(QualType lhs, QualType rhs) {
+ if (lhs->isObjCQualifiedIdType() && rhs->isObjCQualifiedIdType())
+ return ObjCQualifiedIdTypesAreCompatible(lhs, rhs, false);
+ return false;
+}
+
+/// ObjCQualifiedClassTypesAreCompatible - compare Class<p,...> and
+/// Class<p1, ...>.
+bool ASTContext::ObjCQualifiedClassTypesAreCompatible(QualType lhs,
+ QualType rhs) {
+ const ObjCObjectPointerType *lhsQID = lhs->getAs<ObjCObjectPointerType>();
+ const ObjCObjectPointerType *rhsOPT = rhs->getAs<ObjCObjectPointerType>();
+ assert ((lhsQID && rhsOPT) && "ObjCQualifiedClassTypesAreCompatible");
+
+ for (ObjCObjectPointerType::qual_iterator I = lhsQID->qual_begin(),
+ E = lhsQID->qual_end(); I != E; ++I) {
+ bool match = false;
+ ObjCProtocolDecl *lhsProto = *I;
+ for (ObjCObjectPointerType::qual_iterator J = rhsOPT->qual_begin(),
+ E = rhsOPT->qual_end(); J != E; ++J) {
+ ObjCProtocolDecl *rhsProto = *J;
+ if (ProtocolCompatibleWithProtocol(lhsProto, rhsProto)) {
+ match = true;
+ break;
+ }
+ }
+ if (!match)
+ return false;
+ }
+ return true;
+}
+
+/// ObjCQualifiedIdTypesAreCompatible - We know that one of lhs/rhs is an
+/// ObjCQualifiedIDType.
+bool ASTContext::ObjCQualifiedIdTypesAreCompatible(QualType lhs, QualType rhs,
+ bool compare) {
+ // Allow id<P..> and an 'id' or void* type in all cases.
+ if (lhs->isVoidPointerType() ||
+ lhs->isObjCIdType() || lhs->isObjCClassType())
+ return true;
+ else if (rhs->isVoidPointerType() ||
+ rhs->isObjCIdType() || rhs->isObjCClassType())
+ return true;
+
+ if (const ObjCObjectPointerType *lhsQID = lhs->getAsObjCQualifiedIdType()) {
+ const ObjCObjectPointerType *rhsOPT = rhs->getAs<ObjCObjectPointerType>();
+
+ if (!rhsOPT) return false;
+
+ if (rhsOPT->qual_empty()) {
+ // If the RHS is a unqualified interface pointer "NSString*",
+ // make sure we check the class hierarchy.
+ if (ObjCInterfaceDecl *rhsID = rhsOPT->getInterfaceDecl()) {
+ for (ObjCObjectPointerType::qual_iterator I = lhsQID->qual_begin(),
+ E = lhsQID->qual_end(); I != E; ++I) {
+ // when comparing an id<P> on lhs with a static type on rhs,
+ // see if static class implements all of id's protocols, directly or
+ // through its super class and categories.
+ if (!rhsID->ClassImplementsProtocol(*I, true))
+ return false;
+ }
+ }
+ // If there are no qualifiers and no interface, we have an 'id'.
+ return true;
+ }
+ // Both the right and left sides have qualifiers.
+ for (ObjCObjectPointerType::qual_iterator I = lhsQID->qual_begin(),
+ E = lhsQID->qual_end(); I != E; ++I) {
+ ObjCProtocolDecl *lhsProto = *I;
+ bool match = false;
+
+ // when comparing an id<P> on lhs with a static type on rhs,
+ // see if static class implements all of id's protocols, directly or
+ // through its super class and categories.
+ for (ObjCObjectPointerType::qual_iterator J = rhsOPT->qual_begin(),
+ E = rhsOPT->qual_end(); J != E; ++J) {
+ ObjCProtocolDecl *rhsProto = *J;
+ if (ProtocolCompatibleWithProtocol(lhsProto, rhsProto) ||
+ (compare && ProtocolCompatibleWithProtocol(rhsProto, lhsProto))) {
+ match = true;
+ break;
+ }
+ }
+ // If the RHS is a qualified interface pointer "NSString<P>*",
+ // make sure we check the class hierarchy.
+ if (ObjCInterfaceDecl *rhsID = rhsOPT->getInterfaceDecl()) {
+ for (ObjCObjectPointerType::qual_iterator I = lhsQID->qual_begin(),
+ E = lhsQID->qual_end(); I != E; ++I) {
+ // when comparing an id<P> on lhs with a static type on rhs,
+ // see if static class implements all of id's protocols, directly or
+ // through its super class and categories.
+ if (rhsID->ClassImplementsProtocol(*I, true)) {
+ match = true;
+ break;
+ }
+ }
+ }
+ if (!match)
+ return false;
+ }
+
+ return true;
+ }
+
+ const ObjCObjectPointerType *rhsQID = rhs->getAsObjCQualifiedIdType();
+ assert(rhsQID && "One of the LHS/RHS should be id<x>");
+
+ if (const ObjCObjectPointerType *lhsOPT =
+ lhs->getAsObjCInterfacePointerType()) {
+ // If both the right and left sides have qualifiers.
+ for (ObjCObjectPointerType::qual_iterator I = lhsOPT->qual_begin(),
+ E = lhsOPT->qual_end(); I != E; ++I) {
+ ObjCProtocolDecl *lhsProto = *I;
+ bool match = false;
+
+ // when comparing an id<P> on rhs with a static type on lhs,
+ // see if static class implements all of id's protocols, directly or
+ // through its super class and categories.
+ // First, lhs protocols in the qualifier list must be found, direct
+ // or indirect in rhs's qualifier list or it is a mismatch.
+ for (ObjCObjectPointerType::qual_iterator J = rhsQID->qual_begin(),
+ E = rhsQID->qual_end(); J != E; ++J) {
+ ObjCProtocolDecl *rhsProto = *J;
+ if (ProtocolCompatibleWithProtocol(lhsProto, rhsProto) ||
+ (compare && ProtocolCompatibleWithProtocol(rhsProto, lhsProto))) {
+ match = true;
+ break;
+ }
+ }
+ if (!match)
+ return false;
+ }
+
+ // Static class's protocols, or its super class or category protocols
+ // must be found, direct or indirect in rhs's qualifier list or it is a mismatch.
+ if (ObjCInterfaceDecl *lhsID = lhsOPT->getInterfaceDecl()) {
+ llvm::SmallPtrSet<ObjCProtocolDecl *, 8> LHSInheritedProtocols;
+ CollectInheritedProtocols(lhsID, LHSInheritedProtocols);
+ // This is rather dubious but matches gcc's behavior. If lhs has
+ // no type qualifier and its class has no static protocol(s)
+ // assume that it is mismatch.
+ if (LHSInheritedProtocols.empty() && lhsOPT->qual_empty())
+ return false;
+ for (llvm::SmallPtrSet<ObjCProtocolDecl*,8>::iterator I =
+ LHSInheritedProtocols.begin(),
+ E = LHSInheritedProtocols.end(); I != E; ++I) {
+ bool match = false;
+ ObjCProtocolDecl *lhsProto = (*I);
+ for (ObjCObjectPointerType::qual_iterator J = rhsQID->qual_begin(),
+ E = rhsQID->qual_end(); J != E; ++J) {
+ ObjCProtocolDecl *rhsProto = *J;
+ if (ProtocolCompatibleWithProtocol(lhsProto, rhsProto) ||
+ (compare && ProtocolCompatibleWithProtocol(rhsProto, lhsProto))) {
+ match = true;
+ break;
+ }
+ }
+ if (!match)
+ return false;
+ }
+ }
+ return true;
+ }
+ return false;
+}
+
+/// canAssignObjCInterfaces - Return true if the two interface types are
+/// compatible for assignment from RHS to LHS. This handles validation of any
+/// protocol qualifiers on the LHS or RHS.
+///
+bool ASTContext::canAssignObjCInterfaces(const ObjCObjectPointerType *LHSOPT,
+ const ObjCObjectPointerType *RHSOPT) {
+ const ObjCObjectType* LHS = LHSOPT->getObjectType();
+ const ObjCObjectType* RHS = RHSOPT->getObjectType();
+
+ // If either type represents the built-in 'id' or 'Class' types, return true.
+ if (LHS->isObjCUnqualifiedIdOrClass() ||
+ RHS->isObjCUnqualifiedIdOrClass())
+ return true;
+
+ if (LHS->isObjCQualifiedId() || RHS->isObjCQualifiedId())
+ return ObjCQualifiedIdTypesAreCompatible(QualType(LHSOPT,0),
+ QualType(RHSOPT,0),
+ false);
+
+ if (LHS->isObjCQualifiedClass() && RHS->isObjCQualifiedClass())
+ return ObjCQualifiedClassTypesAreCompatible(QualType(LHSOPT,0),
+ QualType(RHSOPT,0));
+
+ // If we have 2 user-defined types, fall into that path.
+ if (LHS->getInterface() && RHS->getInterface())
+ return canAssignObjCInterfaces(LHS, RHS);
+
+ return false;
+}
+
+/// canAssignObjCInterfacesInBlockPointer - This routine is specifically written
+/// for providing type-safety for objective-c pointers used to pass/return
+/// arguments in block literals. When passed as arguments, passing 'A*' where
+/// 'id' is expected is not OK. Passing 'Sub *" where 'Super *" is expected is
+/// not OK. For the return type, the opposite is not OK.
+bool ASTContext::canAssignObjCInterfacesInBlockPointer(
+ const ObjCObjectPointerType *LHSOPT,
+ const ObjCObjectPointerType *RHSOPT,
+ bool BlockReturnType) {
+ if (RHSOPT->isObjCBuiltinType() || LHSOPT->isObjCIdType())
+ return true;
+
+ if (LHSOPT->isObjCBuiltinType()) {
+ return RHSOPT->isObjCBuiltinType() || RHSOPT->isObjCQualifiedIdType();
+ }
+
+ if (LHSOPT->isObjCQualifiedIdType() || RHSOPT->isObjCQualifiedIdType())
+ return ObjCQualifiedIdTypesAreCompatible(QualType(LHSOPT,0),
+ QualType(RHSOPT,0),
+ false);
+
+ const ObjCInterfaceType* LHS = LHSOPT->getInterfaceType();
+ const ObjCInterfaceType* RHS = RHSOPT->getInterfaceType();
+ if (LHS && RHS) { // We have 2 user-defined types.
+ if (LHS != RHS) {
+ if (LHS->getDecl()->isSuperClassOf(RHS->getDecl()))
+ return BlockReturnType;
+ if (RHS->getDecl()->isSuperClassOf(LHS->getDecl()))
+ return !BlockReturnType;
+ }
+ else
+ return true;
+ }
+ return false;
+}
+
+/// getIntersectionOfProtocols - This routine finds the intersection of set
+/// of protocols inherited from two distinct objective-c pointer objects.
+/// It is used to build composite qualifier list of the composite type of
+/// the conditional expression involving two objective-c pointer objects.
+static
+void getIntersectionOfProtocols(ASTContext &Context,
+ const ObjCObjectPointerType *LHSOPT,
+ const ObjCObjectPointerType *RHSOPT,
+ SmallVectorImpl<ObjCProtocolDecl *> &IntersectionOfProtocols) {
+
+ const ObjCObjectType* LHS = LHSOPT->getObjectType();
+ const ObjCObjectType* RHS = RHSOPT->getObjectType();
+ assert(LHS->getInterface() && "LHS must have an interface base");
+ assert(RHS->getInterface() && "RHS must have an interface base");
+
+ llvm::SmallPtrSet<ObjCProtocolDecl *, 8> InheritedProtocolSet;
+ unsigned LHSNumProtocols = LHS->getNumProtocols();
+ if (LHSNumProtocols > 0)
+ InheritedProtocolSet.insert(LHS->qual_begin(), LHS->qual_end());
+ else {
+ llvm::SmallPtrSet<ObjCProtocolDecl *, 8> LHSInheritedProtocols;
+ Context.CollectInheritedProtocols(LHS->getInterface(),
+ LHSInheritedProtocols);
+ InheritedProtocolSet.insert(LHSInheritedProtocols.begin(),
+ LHSInheritedProtocols.end());
+ }
+
+ unsigned RHSNumProtocols = RHS->getNumProtocols();
+ if (RHSNumProtocols > 0) {
+ ObjCProtocolDecl **RHSProtocols =
+ const_cast<ObjCProtocolDecl **>(RHS->qual_begin());
+ for (unsigned i = 0; i < RHSNumProtocols; ++i)
+ if (InheritedProtocolSet.count(RHSProtocols[i]))
+ IntersectionOfProtocols.push_back(RHSProtocols[i]);
+ } else {
+ llvm::SmallPtrSet<ObjCProtocolDecl *, 8> RHSInheritedProtocols;
+ Context.CollectInheritedProtocols(RHS->getInterface(),
+ RHSInheritedProtocols);
+ for (llvm::SmallPtrSet<ObjCProtocolDecl*,8>::iterator I =
+ RHSInheritedProtocols.begin(),
+ E = RHSInheritedProtocols.end(); I != E; ++I)
+ if (InheritedProtocolSet.count((*I)))
+ IntersectionOfProtocols.push_back((*I));
+ }
+}
+
+/// areCommonBaseCompatible - Returns common base class of the two classes if
+/// one found. Note that this is O'2 algorithm. But it will be called as the
+/// last type comparison in a ?-exp of ObjC pointer types before a
+/// warning is issued. So, its invokation is extremely rare.
+QualType ASTContext::areCommonBaseCompatible(
+ const ObjCObjectPointerType *Lptr,
+ const ObjCObjectPointerType *Rptr) {
+ const ObjCObjectType *LHS = Lptr->getObjectType();
+ const ObjCObjectType *RHS = Rptr->getObjectType();
+ const ObjCInterfaceDecl* LDecl = LHS->getInterface();
+ const ObjCInterfaceDecl* RDecl = RHS->getInterface();
+ if (!LDecl || !RDecl || (declaresSameEntity(LDecl, RDecl)))
+ return QualType();
+
+ do {
+ LHS = cast<ObjCInterfaceType>(getObjCInterfaceType(LDecl));
+ if (canAssignObjCInterfaces(LHS, RHS)) {
+ SmallVector<ObjCProtocolDecl *, 8> Protocols;
+ getIntersectionOfProtocols(*this, Lptr, Rptr, Protocols);
+
+ QualType Result = QualType(LHS, 0);
+ if (!Protocols.empty())
+ Result = getObjCObjectType(Result, Protocols.data(), Protocols.size());
+ Result = getObjCObjectPointerType(Result);
+ return Result;
+ }
+ } while ((LDecl = LDecl->getSuperClass()));
+
+ return QualType();
+}
+
+bool ASTContext::canAssignObjCInterfaces(const ObjCObjectType *LHS,
+ const ObjCObjectType *RHS) {
+ assert(LHS->getInterface() && "LHS is not an interface type");
+ assert(RHS->getInterface() && "RHS is not an interface type");
+
+ // Verify that the base decls are compatible: the RHS must be a subclass of
+ // the LHS.
+ if (!LHS->getInterface()->isSuperClassOf(RHS->getInterface()))
+ return false;
+
+ // RHS must have a superset of the protocols in the LHS. If the LHS is not
+ // protocol qualified at all, then we are good.
+ if (LHS->getNumProtocols() == 0)
+ return true;
+
+ // Okay, we know the LHS has protocol qualifiers. If the RHS doesn't,
+ // more detailed analysis is required.
+ if (RHS->getNumProtocols() == 0) {
+ // OK, if LHS is a superclass of RHS *and*
+ // this superclass is assignment compatible with LHS.
+ // false otherwise.
+ bool IsSuperClass =
+ LHS->getInterface()->isSuperClassOf(RHS->getInterface());
+ if (IsSuperClass) {
+ // OK if conversion of LHS to SuperClass results in narrowing of types
+ // ; i.e., SuperClass may implement at least one of the protocols
+ // in LHS's protocol list. Example, SuperObj<P1> = lhs<P1,P2> is ok.
+ // But not SuperObj<P1,P2,P3> = lhs<P1,P2>.
+ llvm::SmallPtrSet<ObjCProtocolDecl *, 8> SuperClassInheritedProtocols;
+ CollectInheritedProtocols(RHS->getInterface(), SuperClassInheritedProtocols);
+ // If super class has no protocols, it is not a match.
+ if (SuperClassInheritedProtocols.empty())
+ return false;
+
+ for (ObjCObjectType::qual_iterator LHSPI = LHS->qual_begin(),
+ LHSPE = LHS->qual_end();
+ LHSPI != LHSPE; LHSPI++) {
+ bool SuperImplementsProtocol = false;
+ ObjCProtocolDecl *LHSProto = (*LHSPI);
+
+ for (llvm::SmallPtrSet<ObjCProtocolDecl*,8>::iterator I =
+ SuperClassInheritedProtocols.begin(),
+ E = SuperClassInheritedProtocols.end(); I != E; ++I) {
+ ObjCProtocolDecl *SuperClassProto = (*I);
+ if (SuperClassProto->lookupProtocolNamed(LHSProto->getIdentifier())) {
+ SuperImplementsProtocol = true;
+ break;
+ }
+ }
+ if (!SuperImplementsProtocol)
+ return false;
+ }
+ return true;
+ }
+ return false;
+ }
+
+ for (ObjCObjectType::qual_iterator LHSPI = LHS->qual_begin(),
+ LHSPE = LHS->qual_end();
+ LHSPI != LHSPE; LHSPI++) {
+ bool RHSImplementsProtocol = false;
+
+ // If the RHS doesn't implement the protocol on the left, the types
+ // are incompatible.
+ for (ObjCObjectType::qual_iterator RHSPI = RHS->qual_begin(),
+ RHSPE = RHS->qual_end();
+ RHSPI != RHSPE; RHSPI++) {
+ if ((*RHSPI)->lookupProtocolNamed((*LHSPI)->getIdentifier())) {
+ RHSImplementsProtocol = true;
+ break;
+ }
+ }
+ // FIXME: For better diagnostics, consider passing back the protocol name.
+ if (!RHSImplementsProtocol)
+ return false;
+ }
+ // The RHS implements all protocols listed on the LHS.
+ return true;
+}
+
+bool ASTContext::areComparableObjCPointerTypes(QualType LHS, QualType RHS) {
+ // get the "pointed to" types
+ const ObjCObjectPointerType *LHSOPT = LHS->getAs<ObjCObjectPointerType>();
+ const ObjCObjectPointerType *RHSOPT = RHS->getAs<ObjCObjectPointerType>();
+
+ if (!LHSOPT || !RHSOPT)
+ return false;
+
+ return canAssignObjCInterfaces(LHSOPT, RHSOPT) ||
+ canAssignObjCInterfaces(RHSOPT, LHSOPT);
+}
+
+bool ASTContext::canBindObjCObjectType(QualType To, QualType From) {
+ return canAssignObjCInterfaces(
+ getObjCObjectPointerType(To)->getAs<ObjCObjectPointerType>(),
+ getObjCObjectPointerType(From)->getAs<ObjCObjectPointerType>());
+}
+
+/// typesAreCompatible - C99 6.7.3p9: For two qualified types to be compatible,
+/// both shall have the identically qualified version of a compatible type.
+/// C99 6.2.7p1: Two types have compatible types if their types are the
+/// same. See 6.7.[2,3,5] for additional rules.
+bool ASTContext::typesAreCompatible(QualType LHS, QualType RHS,
+ bool CompareUnqualified) {
+ if (getLangOpts().CPlusPlus)
+ return hasSameType(LHS, RHS);
+
+ return !mergeTypes(LHS, RHS, false, CompareUnqualified).isNull();
+}
+
+bool ASTContext::propertyTypesAreCompatible(QualType LHS, QualType RHS) {
+ return typesAreCompatible(LHS, RHS);
+}
+
+bool ASTContext::typesAreBlockPointerCompatible(QualType LHS, QualType RHS) {
+ return !mergeTypes(LHS, RHS, true).isNull();
+}
+
+/// mergeTransparentUnionType - if T is a transparent union type and a member
+/// of T is compatible with SubType, return the merged type, else return
+/// QualType()
+QualType ASTContext::mergeTransparentUnionType(QualType T, QualType SubType,
+ bool OfBlockPointer,
+ bool Unqualified) {
+ if (const RecordType *UT = T->getAsUnionType()) {
+ RecordDecl *UD = UT->getDecl();
+ if (UD->hasAttr<TransparentUnionAttr>()) {
+ for (RecordDecl::field_iterator it = UD->field_begin(),
+ itend = UD->field_end(); it != itend; ++it) {
+ QualType ET = it->getType().getUnqualifiedType();
+ QualType MT = mergeTypes(ET, SubType, OfBlockPointer, Unqualified);
+ if (!MT.isNull())
+ return MT;
+ }
+ }
+ }
+
+ return QualType();
+}
+
+/// mergeFunctionArgumentTypes - merge two types which appear as function
+/// argument types
+QualType ASTContext::mergeFunctionArgumentTypes(QualType lhs, QualType rhs,
+ bool OfBlockPointer,
+ bool Unqualified) {
+ // GNU extension: two types are compatible if they appear as a function
+ // argument, one of the types is a transparent union type and the other
+ // type is compatible with a union member
+ QualType lmerge = mergeTransparentUnionType(lhs, rhs, OfBlockPointer,
+ Unqualified);
+ if (!lmerge.isNull())
+ return lmerge;
+
+ QualType rmerge = mergeTransparentUnionType(rhs, lhs, OfBlockPointer,
+ Unqualified);
+ if (!rmerge.isNull())
+ return rmerge;
+
+ return mergeTypes(lhs, rhs, OfBlockPointer, Unqualified);
+}
+
+QualType ASTContext::mergeFunctionTypes(QualType lhs, QualType rhs,
+ bool OfBlockPointer,
+ bool Unqualified) {
+ const FunctionType *lbase = lhs->getAs<FunctionType>();
+ const FunctionType *rbase = rhs->getAs<FunctionType>();
+ const FunctionProtoType *lproto = dyn_cast<FunctionProtoType>(lbase);
+ const FunctionProtoType *rproto = dyn_cast<FunctionProtoType>(rbase);
+ bool allLTypes = true;
+ bool allRTypes = true;
+
+ // Check return type
+ QualType retType;
+ if (OfBlockPointer) {
+ QualType RHS = rbase->getResultType();
+ QualType LHS = lbase->getResultType();
+ bool UnqualifiedResult = Unqualified;
+ if (!UnqualifiedResult)
+ UnqualifiedResult = (!RHS.hasQualifiers() && LHS.hasQualifiers());
+ retType = mergeTypes(LHS, RHS, true, UnqualifiedResult, true);
+ }
+ else
+ retType = mergeTypes(lbase->getResultType(), rbase->getResultType(), false,
+ Unqualified);
+ if (retType.isNull()) return QualType();
+
+ if (Unqualified)
+ retType = retType.getUnqualifiedType();
+
+ CanQualType LRetType = getCanonicalType(lbase->getResultType());
+ CanQualType RRetType = getCanonicalType(rbase->getResultType());
+ if (Unqualified) {
+ LRetType = LRetType.getUnqualifiedType();
+ RRetType = RRetType.getUnqualifiedType();
+ }
+
+ if (getCanonicalType(retType) != LRetType)
+ allLTypes = false;
+ if (getCanonicalType(retType) != RRetType)
+ allRTypes = false;
+
+ // FIXME: double check this
+ // FIXME: should we error if lbase->getRegParmAttr() != 0 &&
+ // rbase->getRegParmAttr() != 0 &&
+ // lbase->getRegParmAttr() != rbase->getRegParmAttr()?
+ FunctionType::ExtInfo lbaseInfo = lbase->getExtInfo();
+ FunctionType::ExtInfo rbaseInfo = rbase->getExtInfo();
+
+ // Compatible functions must have compatible calling conventions
+ if (!isSameCallConv(lbaseInfo.getCC(), rbaseInfo.getCC()))
+ return QualType();
+
+ // Regparm is part of the calling convention.
+ if (lbaseInfo.getHasRegParm() != rbaseInfo.getHasRegParm())
+ return QualType();
+ if (lbaseInfo.getRegParm() != rbaseInfo.getRegParm())
+ return QualType();
+
+ if (lbaseInfo.getProducesResult() != rbaseInfo.getProducesResult())
+ return QualType();
+
+ // functypes which return are preferred over those that do not.
+ if (lbaseInfo.getNoReturn() && !rbaseInfo.getNoReturn())
+ allLTypes = false;
+ else if (!lbaseInfo.getNoReturn() && rbaseInfo.getNoReturn())
+ allRTypes = false;
+ // FIXME: some uses, e.g. conditional exprs, really want this to be 'both'.
+ bool NoReturn = lbaseInfo.getNoReturn() || rbaseInfo.getNoReturn();
+
+ FunctionType::ExtInfo einfo = lbaseInfo.withNoReturn(NoReturn);
+
+ if (lproto && rproto) { // two C99 style function prototypes
+ assert(!lproto->hasExceptionSpec() && !rproto->hasExceptionSpec() &&
+ "C++ shouldn't be here");
+ unsigned lproto_nargs = lproto->getNumArgs();
+ unsigned rproto_nargs = rproto->getNumArgs();
+
+ // Compatible functions must have the same number of arguments
+ if (lproto_nargs != rproto_nargs)
+ return QualType();
+
+ // Variadic and non-variadic functions aren't compatible
+ if (lproto->isVariadic() != rproto->isVariadic())
+ return QualType();
+
+ if (lproto->getTypeQuals() != rproto->getTypeQuals())
+ return QualType();
+
+ if (LangOpts.ObjCAutoRefCount &&
+ !FunctionTypesMatchOnNSConsumedAttrs(rproto, lproto))
+ return QualType();
+
+ // Check argument compatibility
+ SmallVector<QualType, 10> types;
+ for (unsigned i = 0; i < lproto_nargs; i++) {
+ QualType largtype = lproto->getArgType(i).getUnqualifiedType();
+ QualType rargtype = rproto->getArgType(i).getUnqualifiedType();
+ QualType argtype = mergeFunctionArgumentTypes(largtype, rargtype,
+ OfBlockPointer,
+ Unqualified);
+ if (argtype.isNull()) return QualType();
+
+ if (Unqualified)
+ argtype = argtype.getUnqualifiedType();
+
+ types.push_back(argtype);
+ if (Unqualified) {
+ largtype = largtype.getUnqualifiedType();
+ rargtype = rargtype.getUnqualifiedType();
+ }
+
+ if (getCanonicalType(argtype) != getCanonicalType(largtype))
+ allLTypes = false;
+ if (getCanonicalType(argtype) != getCanonicalType(rargtype))
+ allRTypes = false;
+ }
+
+ if (allLTypes) return lhs;
+ if (allRTypes) return rhs;
+
+ FunctionProtoType::ExtProtoInfo EPI = lproto->getExtProtoInfo();
+ EPI.ExtInfo = einfo;
+ return getFunctionType(retType, types.begin(), types.size(), EPI);
+ }
+
+ if (lproto) allRTypes = false;
+ if (rproto) allLTypes = false;
+
+ const FunctionProtoType *proto = lproto ? lproto : rproto;
+ if (proto) {
+ assert(!proto->hasExceptionSpec() && "C++ shouldn't be here");
+ if (proto->isVariadic()) return QualType();
+ // Check that the types are compatible with the types that
+ // would result from default argument promotions (C99 6.7.5.3p15).
+ // The only types actually affected are promotable integer
+ // types and floats, which would be passed as a different
+ // type depending on whether the prototype is visible.
+ unsigned proto_nargs = proto->getNumArgs();
+ for (unsigned i = 0; i < proto_nargs; ++i) {
+ QualType argTy = proto->getArgType(i);
+
+ // Look at the promotion type of enum types, since that is the type used
+ // to pass enum values.
+ if (const EnumType *Enum = argTy->getAs<EnumType>())
+ argTy = Enum->getDecl()->getPromotionType();
+
+ if (argTy->isPromotableIntegerType() ||
+ getCanonicalType(argTy).getUnqualifiedType() == FloatTy)
+ return QualType();
+ }
+
+ if (allLTypes) return lhs;
+ if (allRTypes) return rhs;
+
+ FunctionProtoType::ExtProtoInfo EPI = proto->getExtProtoInfo();
+ EPI.ExtInfo = einfo;
+ return getFunctionType(retType, proto->arg_type_begin(),
+ proto->getNumArgs(), EPI);
+ }
+
+ if (allLTypes) return lhs;
+ if (allRTypes) return rhs;
+ return getFunctionNoProtoType(retType, einfo);
+}
+
+QualType ASTContext::mergeTypes(QualType LHS, QualType RHS,
+ bool OfBlockPointer,
+ bool Unqualified, bool BlockReturnType) {
+ // C++ [expr]: If an expression initially has the type "reference to T", the
+ // type is adjusted to "T" prior to any further analysis, the expression
+ // designates the object or function denoted by the reference, and the
+ // expression is an lvalue unless the reference is an rvalue reference and
+ // the expression is a function call (possibly inside parentheses).
+ assert(!LHS->getAs<ReferenceType>() && "LHS is a reference type?");
+ assert(!RHS->getAs<ReferenceType>() && "RHS is a reference type?");
+
+ if (Unqualified) {
+ LHS = LHS.getUnqualifiedType();
+ RHS = RHS.getUnqualifiedType();
+ }
+
+ QualType LHSCan = getCanonicalType(LHS),
+ RHSCan = getCanonicalType(RHS);
+
+ // If two types are identical, they are compatible.
+ if (LHSCan == RHSCan)
+ return LHS;
+
+ // If the qualifiers are different, the types aren't compatible... mostly.
+ Qualifiers LQuals = LHSCan.getLocalQualifiers();
+ Qualifiers RQuals = RHSCan.getLocalQualifiers();
+ if (LQuals != RQuals) {
+ // If any of these qualifiers are different, we have a type
+ // mismatch.
+ if (LQuals.getCVRQualifiers() != RQuals.getCVRQualifiers() ||
+ LQuals.getAddressSpace() != RQuals.getAddressSpace() ||
+ LQuals.getObjCLifetime() != RQuals.getObjCLifetime())
+ return QualType();
+
+ // Exactly one GC qualifier difference is allowed: __strong is
+ // okay if the other type has no GC qualifier but is an Objective
+ // C object pointer (i.e. implicitly strong by default). We fix
+ // this by pretending that the unqualified type was actually
+ // qualified __strong.
+ Qualifiers::GC GC_L = LQuals.getObjCGCAttr();
+ Qualifiers::GC GC_R = RQuals.getObjCGCAttr();
+ assert((GC_L != GC_R) && "unequal qualifier sets had only equal elements");
+
+ if (GC_L == Qualifiers::Weak || GC_R == Qualifiers::Weak)
+ return QualType();
+
+ if (GC_L == Qualifiers::Strong && RHSCan->isObjCObjectPointerType()) {
+ return mergeTypes(LHS, getObjCGCQualType(RHS, Qualifiers::Strong));
+ }
+ if (GC_R == Qualifiers::Strong && LHSCan->isObjCObjectPointerType()) {
+ return mergeTypes(getObjCGCQualType(LHS, Qualifiers::Strong), RHS);
+ }
+ return QualType();
+ }
+
+ // Okay, qualifiers are equal.
+
+ Type::TypeClass LHSClass = LHSCan->getTypeClass();
+ Type::TypeClass RHSClass = RHSCan->getTypeClass();
+
+ // We want to consider the two function types to be the same for these
+ // comparisons, just force one to the other.
+ if (LHSClass == Type::FunctionProto) LHSClass = Type::FunctionNoProto;
+ if (RHSClass == Type::FunctionProto) RHSClass = Type::FunctionNoProto;
+
+ // Same as above for arrays
+ if (LHSClass == Type::VariableArray || LHSClass == Type::IncompleteArray)
+ LHSClass = Type::ConstantArray;
+ if (RHSClass == Type::VariableArray || RHSClass == Type::IncompleteArray)
+ RHSClass = Type::ConstantArray;
+
+ // ObjCInterfaces are just specialized ObjCObjects.
+ if (LHSClass == Type::ObjCInterface) LHSClass = Type::ObjCObject;
+ if (RHSClass == Type::ObjCInterface) RHSClass = Type::ObjCObject;
+
+ // Canonicalize ExtVector -> Vector.
+ if (LHSClass == Type::ExtVector) LHSClass = Type::Vector;
+ if (RHSClass == Type::ExtVector) RHSClass = Type::Vector;
+
+ // If the canonical type classes don't match.
+ if (LHSClass != RHSClass) {
+ // C99 6.7.2.2p4: Each enumerated type shall be compatible with char,
+ // a signed integer type, or an unsigned integer type.
+ // Compatibility is based on the underlying type, not the promotion
+ // type.
+ if (const EnumType* ETy = LHS->getAs<EnumType>()) {
+ QualType TINT = ETy->getDecl()->getIntegerType();
+ if (!TINT.isNull() && hasSameType(TINT, RHSCan.getUnqualifiedType()))
+ return RHS;
+ }
+ if (const EnumType* ETy = RHS->getAs<EnumType>()) {
+ QualType TINT = ETy->getDecl()->getIntegerType();
+ if (!TINT.isNull() && hasSameType(TINT, LHSCan.getUnqualifiedType()))
+ return LHS;
+ }
+ // allow block pointer type to match an 'id' type.
+ if (OfBlockPointer && !BlockReturnType) {
+ if (LHS->isObjCIdType() && RHS->isBlockPointerType())
+ return LHS;
+ if (RHS->isObjCIdType() && LHS->isBlockPointerType())
+ return RHS;
+ }
+
+ return QualType();
+ }
+
+ // The canonical type classes match.
+ switch (LHSClass) {
+#define TYPE(Class, Base)
+#define ABSTRACT_TYPE(Class, Base)
+#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
+#define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
+#define DEPENDENT_TYPE(Class, Base) case Type::Class:
+#include "clang/AST/TypeNodes.def"
+ llvm_unreachable("Non-canonical and dependent types shouldn't get here");
+
+ case Type::LValueReference:
+ case Type::RValueReference:
+ case Type::MemberPointer:
+ llvm_unreachable("C++ should never be in mergeTypes");
+
+ case Type::ObjCInterface:
+ case Type::IncompleteArray:
+ case Type::VariableArray:
+ case Type::FunctionProto:
+ case Type::ExtVector:
+ llvm_unreachable("Types are eliminated above");
+
+ case Type::Pointer:
+ {
+ // Merge two pointer types, while trying to preserve typedef info
+ QualType LHSPointee = LHS->getAs<PointerType>()->getPointeeType();
+ QualType RHSPointee = RHS->getAs<PointerType>()->getPointeeType();
+ if (Unqualified) {
+ LHSPointee = LHSPointee.getUnqualifiedType();
+ RHSPointee = RHSPointee.getUnqualifiedType();
+ }
+ QualType ResultType = mergeTypes(LHSPointee, RHSPointee, false,
+ Unqualified);
+ if (ResultType.isNull()) return QualType();
+ if (getCanonicalType(LHSPointee) == getCanonicalType(ResultType))
+ return LHS;
+ if (getCanonicalType(RHSPointee) == getCanonicalType(ResultType))
+ return RHS;
+ return getPointerType(ResultType);
+ }
+ case Type::BlockPointer:
+ {
+ // Merge two block pointer types, while trying to preserve typedef info
+ QualType LHSPointee = LHS->getAs<BlockPointerType>()->getPointeeType();
+ QualType RHSPointee = RHS->getAs<BlockPointerType>()->getPointeeType();
+ if (Unqualified) {
+ LHSPointee = LHSPointee.getUnqualifiedType();
+ RHSPointee = RHSPointee.getUnqualifiedType();
+ }
+ QualType ResultType = mergeTypes(LHSPointee, RHSPointee, OfBlockPointer,
+ Unqualified);
+ if (ResultType.isNull()) return QualType();
+ if (getCanonicalType(LHSPointee) == getCanonicalType(ResultType))
+ return LHS;
+ if (getCanonicalType(RHSPointee) == getCanonicalType(ResultType))
+ return RHS;
+ return getBlockPointerType(ResultType);
+ }
+ case Type::Atomic:
+ {
+ // Merge two pointer types, while trying to preserve typedef info
+ QualType LHSValue = LHS->getAs<AtomicType>()->getValueType();
+ QualType RHSValue = RHS->getAs<AtomicType>()->getValueType();
+ if (Unqualified) {
+ LHSValue = LHSValue.getUnqualifiedType();
+ RHSValue = RHSValue.getUnqualifiedType();
+ }
+ QualType ResultType = mergeTypes(LHSValue, RHSValue, false,
+ Unqualified);
+ if (ResultType.isNull()) return QualType();
+ if (getCanonicalType(LHSValue) == getCanonicalType(ResultType))
+ return LHS;
+ if (getCanonicalType(RHSValue) == getCanonicalType(ResultType))
+ return RHS;
+ return getAtomicType(ResultType);
+ }
+ case Type::ConstantArray:
+ {
+ const ConstantArrayType* LCAT = getAsConstantArrayType(LHS);
+ const ConstantArrayType* RCAT = getAsConstantArrayType(RHS);
+ if (LCAT && RCAT && RCAT->getSize() != LCAT->getSize())
+ return QualType();
+
+ QualType LHSElem = getAsArrayType(LHS)->getElementType();
+ QualType RHSElem = getAsArrayType(RHS)->getElementType();
+ if (Unqualified) {
+ LHSElem = LHSElem.getUnqualifiedType();
+ RHSElem = RHSElem.getUnqualifiedType();
+ }
+
+ QualType ResultType = mergeTypes(LHSElem, RHSElem, false, Unqualified);
+ if (ResultType.isNull()) return QualType();
+ if (LCAT && getCanonicalType(LHSElem) == getCanonicalType(ResultType))
+ return LHS;
+ if (RCAT && getCanonicalType(RHSElem) == getCanonicalType(ResultType))
+ return RHS;
+ if (LCAT) return getConstantArrayType(ResultType, LCAT->getSize(),
+ ArrayType::ArraySizeModifier(), 0);
+ if (RCAT) return getConstantArrayType(ResultType, RCAT->getSize(),
+ ArrayType::ArraySizeModifier(), 0);
+ const VariableArrayType* LVAT = getAsVariableArrayType(LHS);
+ const VariableArrayType* RVAT = getAsVariableArrayType(RHS);
+ if (LVAT && getCanonicalType(LHSElem) == getCanonicalType(ResultType))
+ return LHS;
+ if (RVAT && getCanonicalType(RHSElem) == getCanonicalType(ResultType))
+ return RHS;
+ if (LVAT) {
+ // FIXME: This isn't correct! But tricky to implement because
+ // the array's size has to be the size of LHS, but the type
+ // has to be different.
+ return LHS;
+ }
+ if (RVAT) {
+ // FIXME: This isn't correct! But tricky to implement because
+ // the array's size has to be the size of RHS, but the type
+ // has to be different.
+ return RHS;
+ }
+ if (getCanonicalType(LHSElem) == getCanonicalType(ResultType)) return LHS;
+ if (getCanonicalType(RHSElem) == getCanonicalType(ResultType)) return RHS;
+ return getIncompleteArrayType(ResultType,
+ ArrayType::ArraySizeModifier(), 0);
+ }
+ case Type::FunctionNoProto:
+ return mergeFunctionTypes(LHS, RHS, OfBlockPointer, Unqualified);
+ case Type::Record:
+ case Type::Enum:
+ return QualType();
+ case Type::Builtin:
+ // Only exactly equal builtin types are compatible, which is tested above.
+ return QualType();
+ case Type::Complex:
+ // Distinct complex types are incompatible.
+ return QualType();
+ case Type::Vector:
+ // FIXME: The merged type should be an ExtVector!
+ if (areCompatVectorTypes(LHSCan->getAs<VectorType>(),
+ RHSCan->getAs<VectorType>()))
+ return LHS;
+ return QualType();
+ case Type::ObjCObject: {
+ // Check if the types are assignment compatible.
+ // FIXME: This should be type compatibility, e.g. whether
+ // "LHS x; RHS x;" at global scope is legal.
+ const ObjCObjectType* LHSIface = LHS->getAs<ObjCObjectType>();
+ const ObjCObjectType* RHSIface = RHS->getAs<ObjCObjectType>();
+ if (canAssignObjCInterfaces(LHSIface, RHSIface))
+ return LHS;
+
+ return QualType();
+ }
+ case Type::ObjCObjectPointer: {
+ if (OfBlockPointer) {
+ if (canAssignObjCInterfacesInBlockPointer(
+ LHS->getAs<ObjCObjectPointerType>(),
+ RHS->getAs<ObjCObjectPointerType>(),
+ BlockReturnType))
+ return LHS;
+ return QualType();
+ }
+ if (canAssignObjCInterfaces(LHS->getAs<ObjCObjectPointerType>(),
+ RHS->getAs<ObjCObjectPointerType>()))
+ return LHS;
+
+ return QualType();
+ }
+ }
+
+ llvm_unreachable("Invalid Type::Class!");
+}
+
+bool ASTContext::FunctionTypesMatchOnNSConsumedAttrs(
+ const FunctionProtoType *FromFunctionType,
+ const FunctionProtoType *ToFunctionType) {
+ if (FromFunctionType->hasAnyConsumedArgs() !=
+ ToFunctionType->hasAnyConsumedArgs())
+ return false;
+ FunctionProtoType::ExtProtoInfo FromEPI =
+ FromFunctionType->getExtProtoInfo();
+ FunctionProtoType::ExtProtoInfo ToEPI =
+ ToFunctionType->getExtProtoInfo();
+ if (FromEPI.ConsumedArguments && ToEPI.ConsumedArguments)
+ for (unsigned ArgIdx = 0, NumArgs = FromFunctionType->getNumArgs();
+ ArgIdx != NumArgs; ++ArgIdx) {
+ if (FromEPI.ConsumedArguments[ArgIdx] !=
+ ToEPI.ConsumedArguments[ArgIdx])
+ return false;
+ }
+ return true;
+}
+
+/// mergeObjCGCQualifiers - This routine merges ObjC's GC attribute of 'LHS' and
+/// 'RHS' attributes and returns the merged version; including for function
+/// return types.
+QualType ASTContext::mergeObjCGCQualifiers(QualType LHS, QualType RHS) {
+ QualType LHSCan = getCanonicalType(LHS),
+ RHSCan = getCanonicalType(RHS);
+ // If two types are identical, they are compatible.
+ if (LHSCan == RHSCan)
+ return LHS;
+ if (RHSCan->isFunctionType()) {
+ if (!LHSCan->isFunctionType())
+ return QualType();
+ QualType OldReturnType =
+ cast<FunctionType>(RHSCan.getTypePtr())->getResultType();
+ QualType NewReturnType =
+ cast<FunctionType>(LHSCan.getTypePtr())->getResultType();
+ QualType ResReturnType =
+ mergeObjCGCQualifiers(NewReturnType, OldReturnType);
+ if (ResReturnType.isNull())
+ return QualType();
+ if (ResReturnType == NewReturnType || ResReturnType == OldReturnType) {
+ // id foo(); ... __strong id foo(); or: __strong id foo(); ... id foo();
+ // In either case, use OldReturnType to build the new function type.
+ const FunctionType *F = LHS->getAs<FunctionType>();
+ if (const FunctionProtoType *FPT = cast<FunctionProtoType>(F)) {
+ FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo();
+ EPI.ExtInfo = getFunctionExtInfo(LHS);
+ QualType ResultType
+ = getFunctionType(OldReturnType, FPT->arg_type_begin(),
+ FPT->getNumArgs(), EPI);
+ return ResultType;
+ }
+ }
+ return QualType();
+ }
+
+ // If the qualifiers are different, the types can still be merged.
+ Qualifiers LQuals = LHSCan.getLocalQualifiers();
+ Qualifiers RQuals = RHSCan.getLocalQualifiers();
+ if (LQuals != RQuals) {
+ // If any of these qualifiers are different, we have a type mismatch.
+ if (LQuals.getCVRQualifiers() != RQuals.getCVRQualifiers() ||
+ LQuals.getAddressSpace() != RQuals.getAddressSpace())
+ return QualType();
+
+ // Exactly one GC qualifier difference is allowed: __strong is
+ // okay if the other type has no GC qualifier but is an Objective
+ // C object pointer (i.e. implicitly strong by default). We fix
+ // this by pretending that the unqualified type was actually
+ // qualified __strong.
+ Qualifiers::GC GC_L = LQuals.getObjCGCAttr();
+ Qualifiers::GC GC_R = RQuals.getObjCGCAttr();
+ assert((GC_L != GC_R) && "unequal qualifier sets had only equal elements");
+
+ if (GC_L == Qualifiers::Weak || GC_R == Qualifiers::Weak)
+ return QualType();
+
+ if (GC_L == Qualifiers::Strong)
+ return LHS;
+ if (GC_R == Qualifiers::Strong)
+ return RHS;
+ return QualType();
+ }
+
+ if (LHSCan->isObjCObjectPointerType() && RHSCan->isObjCObjectPointerType()) {
+ QualType LHSBaseQT = LHS->getAs<ObjCObjectPointerType>()->getPointeeType();
+ QualType RHSBaseQT = RHS->getAs<ObjCObjectPointerType>()->getPointeeType();
+ QualType ResQT = mergeObjCGCQualifiers(LHSBaseQT, RHSBaseQT);
+ if (ResQT == LHSBaseQT)
+ return LHS;
+ if (ResQT == RHSBaseQT)
+ return RHS;
+ }
+ return QualType();
+}
+
+//===----------------------------------------------------------------------===//
+// Integer Predicates
+//===----------------------------------------------------------------------===//
+
+unsigned ASTContext::getIntWidth(QualType T) const {
+ if (const EnumType *ET = dyn_cast<EnumType>(T))
+ T = ET->getDecl()->getIntegerType();
+ if (T->isBooleanType())
+ return 1;
+ // For builtin types, just use the standard type sizing method
+ return (unsigned)getTypeSize(T);
+}
+
+QualType ASTContext::getCorrespondingUnsignedType(QualType T) {
+ assert(T->hasSignedIntegerRepresentation() && "Unexpected type");
+
+ // Turn <4 x signed int> -> <4 x unsigned int>
+ if (const VectorType *VTy = T->getAs<VectorType>())
+ return getVectorType(getCorrespondingUnsignedType(VTy->getElementType()),
+ VTy->getNumElements(), VTy->getVectorKind());
+
+ // For enums, we return the unsigned version of the base type.
+ if (const EnumType *ETy = T->getAs<EnumType>())
+ T = ETy->getDecl()->getIntegerType();
+
+ const BuiltinType *BTy = T->getAs<BuiltinType>();
+ assert(BTy && "Unexpected signed integer type");
+ switch (BTy->getKind()) {
+ case BuiltinType::Char_S:
+ case BuiltinType::SChar:
+ return UnsignedCharTy;
+ case BuiltinType::Short:
+ return UnsignedShortTy;
+ case BuiltinType::Int:
+ return UnsignedIntTy;
+ case BuiltinType::Long:
+ return UnsignedLongTy;
+ case BuiltinType::LongLong:
+ return UnsignedLongLongTy;
+ case BuiltinType::Int128:
+ return UnsignedInt128Ty;
+ default:
+ llvm_unreachable("Unexpected signed integer type");
+ }
+}
+
+ASTMutationListener::~ASTMutationListener() { }
+
+
+//===----------------------------------------------------------------------===//
+// Builtin Type Computation
+//===----------------------------------------------------------------------===//
+
+/// DecodeTypeFromStr - This decodes one type descriptor from Str, advancing the
+/// pointer over the consumed characters. This returns the resultant type. If
+/// AllowTypeModifiers is false then modifier like * are not parsed, just basic
+/// types. This allows "v2i*" to be parsed as a pointer to a v2i instead of
+/// a vector of "i*".
+///
+/// RequiresICE is filled in on return to indicate whether the value is required
+/// to be an Integer Constant Expression.
+static QualType DecodeTypeFromStr(const char *&Str, const ASTContext &Context,
+ ASTContext::GetBuiltinTypeError &Error,
+ bool &RequiresICE,
+ bool AllowTypeModifiers) {
+ // Modifiers.
+ int HowLong = 0;
+ bool Signed = false, Unsigned = false;
+ RequiresICE = false;
+
+ // Read the prefixed modifiers first.
+ bool Done = false;
+ while (!Done) {
+ switch (*Str++) {
+ default: Done = true; --Str; break;
+ case 'I':
+ RequiresICE = true;
+ break;
+ case 'S':
+ assert(!Unsigned && "Can't use both 'S' and 'U' modifiers!");
+ assert(!Signed && "Can't use 'S' modifier multiple times!");
+ Signed = true;
+ break;
+ case 'U':
+ assert(!Signed && "Can't use both 'S' and 'U' modifiers!");
+ assert(!Unsigned && "Can't use 'S' modifier multiple times!");
+ Unsigned = true;
+ break;
+ case 'L':
+ assert(HowLong <= 2 && "Can't have LLLL modifier");
+ ++HowLong;
+ break;
+ }
+ }
+
+ QualType Type;
+
+ // Read the base type.
+ switch (*Str++) {
+ default: llvm_unreachable("Unknown builtin type letter!");
+ case 'v':
+ assert(HowLong == 0 && !Signed && !Unsigned &&
+ "Bad modifiers used with 'v'!");
+ Type = Context.VoidTy;
+ break;
+ case 'f':
+ assert(HowLong == 0 && !Signed && !Unsigned &&
+ "Bad modifiers used with 'f'!");
+ Type = Context.FloatTy;
+ break;
+ case 'd':
+ assert(HowLong < 2 && !Signed && !Unsigned &&
+ "Bad modifiers used with 'd'!");
+ if (HowLong)
+ Type = Context.LongDoubleTy;
+ else
+ Type = Context.DoubleTy;
+ break;
+ case 's':
+ assert(HowLong == 0 && "Bad modifiers used with 's'!");
+ if (Unsigned)
+ Type = Context.UnsignedShortTy;
+ else
+ Type = Context.ShortTy;
+ break;
+ case 'i':
+ if (HowLong == 3)
+ Type = Unsigned ? Context.UnsignedInt128Ty : Context.Int128Ty;
+ else if (HowLong == 2)
+ Type = Unsigned ? Context.UnsignedLongLongTy : Context.LongLongTy;
+ else if (HowLong == 1)
+ Type = Unsigned ? Context.UnsignedLongTy : Context.LongTy;
+ else
+ Type = Unsigned ? Context.UnsignedIntTy : Context.IntTy;
+ break;
+ case 'c':
+ assert(HowLong == 0 && "Bad modifiers used with 'c'!");
+ if (Signed)
+ Type = Context.SignedCharTy;
+ else if (Unsigned)
+ Type = Context.UnsignedCharTy;
+ else
+ Type = Context.CharTy;
+ break;
+ case 'b': // boolean
+ assert(HowLong == 0 && !Signed && !Unsigned && "Bad modifiers for 'b'!");
+ Type = Context.BoolTy;
+ break;
+ case 'z': // size_t.
+ assert(HowLong == 0 && !Signed && !Unsigned && "Bad modifiers for 'z'!");
+ Type = Context.getSizeType();
+ break;
+ case 'F':
+ Type = Context.getCFConstantStringType();
+ break;
+ case 'G':
+ Type = Context.getObjCIdType();
+ break;
+ case 'H':
+ Type = Context.getObjCSelType();
+ break;
+ case 'a':
+ Type = Context.getBuiltinVaListType();
+ assert(!Type.isNull() && "builtin va list type not initialized!");
+ break;
+ case 'A':
+ // This is a "reference" to a va_list; however, what exactly
+ // this means depends on how va_list is defined. There are two
+ // different kinds of va_list: ones passed by value, and ones
+ // passed by reference. An example of a by-value va_list is
+ // x86, where va_list is a char*. An example of by-ref va_list
+ // is x86-64, where va_list is a __va_list_tag[1]. For x86,
+ // we want this argument to be a char*&; for x86-64, we want
+ // it to be a __va_list_tag*.
+ Type = Context.getBuiltinVaListType();
+ assert(!Type.isNull() && "builtin va list type not initialized!");
+ if (Type->isArrayType())
+ Type = Context.getArrayDecayedType(Type);
+ else
+ Type = Context.getLValueReferenceType(Type);
+ break;
+ case 'V': {
+ char *End;
+ unsigned NumElements = strtoul(Str, &End, 10);
+ assert(End != Str && "Missing vector size");
+ Str = End;
+
+ QualType ElementType = DecodeTypeFromStr(Str, Context, Error,
+ RequiresICE, false);
+ assert(!RequiresICE && "Can't require vector ICE");
+
+ // TODO: No way to make AltiVec vectors in builtins yet.
+ Type = Context.getVectorType(ElementType, NumElements,
+ VectorType::GenericVector);
+ break;
+ }
+ case 'X': {
+ QualType ElementType = DecodeTypeFromStr(Str, Context, Error, RequiresICE,
+ false);
+ assert(!RequiresICE && "Can't require complex ICE");
+ Type = Context.getComplexType(ElementType);
+ break;
+ }
+ case 'Y' : {
+ Type = Context.getPointerDiffType();
+ break;
+ }
+ case 'P':
+ Type = Context.getFILEType();
+ if (Type.isNull()) {
+ Error = ASTContext::GE_Missing_stdio;
+ return QualType();
+ }
+ break;
+ case 'J':
+ if (Signed)
+ Type = Context.getsigjmp_bufType();
+ else
+ Type = Context.getjmp_bufType();
+
+ if (Type.isNull()) {
+ Error = ASTContext::GE_Missing_setjmp;
+ return QualType();
+ }
+ break;
+ case 'K':
+ assert(HowLong == 0 && !Signed && !Unsigned && "Bad modifiers for 'K'!");
+ Type = Context.getucontext_tType();
+
+ if (Type.isNull()) {
+ Error = ASTContext::GE_Missing_ucontext;
+ return QualType();
+ }
+ break;
+ }
+
+ // If there are modifiers and if we're allowed to parse them, go for it.
+ Done = !AllowTypeModifiers;
+ while (!Done) {
+ switch (char c = *Str++) {
+ default: Done = true; --Str; break;
+ case '*':
+ case '&': {
+ // Both pointers and references can have their pointee types
+ // qualified with an address space.
+ char *End;
+ unsigned AddrSpace = strtoul(Str, &End, 10);
+ if (End != Str && AddrSpace != 0) {
+ Type = Context.getAddrSpaceQualType(Type, AddrSpace);
+ Str = End;
+ }
+ if (c == '*')
+ Type = Context.getPointerType(Type);
+ else
+ Type = Context.getLValueReferenceType(Type);
+ break;
+ }
+ // FIXME: There's no way to have a built-in with an rvalue ref arg.
+ case 'C':
+ Type = Type.withConst();
+ break;
+ case 'D':
+ Type = Context.getVolatileType(Type);
+ break;
+ case 'R':
+ Type = Type.withRestrict();
+ break;
+ }
+ }
+
+ assert((!RequiresICE || Type->isIntegralOrEnumerationType()) &&
+ "Integer constant 'I' type must be an integer");
+
+ return Type;
+}
+
+/// GetBuiltinType - Return the type for the specified builtin.
+QualType ASTContext::GetBuiltinType(unsigned Id,
+ GetBuiltinTypeError &Error,
+ unsigned *IntegerConstantArgs) const {
+ const char *TypeStr = BuiltinInfo.GetTypeString(Id);
+
+ SmallVector<QualType, 8> ArgTypes;
+
+ bool RequiresICE = false;
+ Error = GE_None;
+ QualType ResType = DecodeTypeFromStr(TypeStr, *this, Error,
+ RequiresICE, true);
+ if (Error != GE_None)
+ return QualType();
+
+ assert(!RequiresICE && "Result of intrinsic cannot be required to be an ICE");
+
+ while (TypeStr[0] && TypeStr[0] != '.') {
+ QualType Ty = DecodeTypeFromStr(TypeStr, *this, Error, RequiresICE, true);
+ if (Error != GE_None)
+ return QualType();
+
+ // If this argument is required to be an IntegerConstantExpression and the
+ // caller cares, fill in the bitmask we return.
+ if (RequiresICE && IntegerConstantArgs)
+ *IntegerConstantArgs |= 1 << ArgTypes.size();
+
+ // Do array -> pointer decay. The builtin should use the decayed type.
+ if (Ty->isArrayType())
+ Ty = getArrayDecayedType(Ty);
+
+ ArgTypes.push_back(Ty);
+ }
+
+ assert((TypeStr[0] != '.' || TypeStr[1] == 0) &&
+ "'.' should only occur at end of builtin type list!");
+
+ FunctionType::ExtInfo EI;
+ if (BuiltinInfo.isNoReturn(Id)) EI = EI.withNoReturn(true);
+
+ bool Variadic = (TypeStr[0] == '.');
+
+ // We really shouldn't be making a no-proto type here, especially in C++.
+ if (ArgTypes.empty() && Variadic)
+ return getFunctionNoProtoType(ResType, EI);
+
+ FunctionProtoType::ExtProtoInfo EPI;
+ EPI.ExtInfo = EI;
+ EPI.Variadic = Variadic;
+
+ return getFunctionType(ResType, ArgTypes.data(), ArgTypes.size(), EPI);
+}
+
+GVALinkage ASTContext::GetGVALinkageForFunction(const FunctionDecl *FD) {
+ GVALinkage External = GVA_StrongExternal;
+
+ Linkage L = FD->getLinkage();
+ switch (L) {
+ case NoLinkage:
+ case InternalLinkage:
+ case UniqueExternalLinkage:
+ return GVA_Internal;
+
+ case ExternalLinkage:
+ switch (FD->getTemplateSpecializationKind()) {
+ case TSK_Undeclared:
+ case TSK_ExplicitSpecialization:
+ External = GVA_StrongExternal;
+ break;
+
+ case TSK_ExplicitInstantiationDefinition:
+ return GVA_ExplicitTemplateInstantiation;
+
+ case TSK_ExplicitInstantiationDeclaration:
+ case TSK_ImplicitInstantiation:
+ External = GVA_TemplateInstantiation;
+ break;
+ }
+ }
+
+ if (!FD->isInlined())
+ return External;
+
+ if (!getLangOpts().CPlusPlus || FD->hasAttr<GNUInlineAttr>()) {
+ // GNU or C99 inline semantics. Determine whether this symbol should be
+ // externally visible.
+ if (FD->isInlineDefinitionExternallyVisible())
+ return External;
+
+ // C99 inline semantics, where the symbol is not externally visible.
+ return GVA_C99Inline;
+ }
+
+ // C++0x [temp.explicit]p9:
+ // [ Note: The intent is that an inline function that is the subject of
+ // an explicit instantiation declaration will still be implicitly
+ // instantiated when used so that the body can be considered for
+ // inlining, but that no out-of-line copy of the inline function would be
+ // generated in the translation unit. -- end note ]
+ if (FD->getTemplateSpecializationKind()
+ == TSK_ExplicitInstantiationDeclaration)
+ return GVA_C99Inline;
+
+ return GVA_CXXInline;
+}
+
+GVALinkage ASTContext::GetGVALinkageForVariable(const VarDecl *VD) {
+ // If this is a static data member, compute the kind of template
+ // specialization. Otherwise, this variable is not part of a
+ // template.
+ TemplateSpecializationKind TSK = TSK_Undeclared;
+ if (VD->isStaticDataMember())
+ TSK = VD->getTemplateSpecializationKind();
+
+ Linkage L = VD->getLinkage();
+ if (L == ExternalLinkage && getLangOpts().CPlusPlus &&
+ VD->getType()->getLinkage() == UniqueExternalLinkage)
+ L = UniqueExternalLinkage;
+
+ switch (L) {
+ case NoLinkage:
+ case InternalLinkage:
+ case UniqueExternalLinkage:
+ return GVA_Internal;
+
+ case ExternalLinkage:
+ switch (TSK) {
+ case TSK_Undeclared:
+ case TSK_ExplicitSpecialization:
+ return GVA_StrongExternal;
+
+ case TSK_ExplicitInstantiationDeclaration:
+ llvm_unreachable("Variable should not be instantiated");
+ // Fall through to treat this like any other instantiation.
+
+ case TSK_ExplicitInstantiationDefinition:
+ return GVA_ExplicitTemplateInstantiation;
+
+ case TSK_ImplicitInstantiation:
+ return GVA_TemplateInstantiation;
+ }
+ }
+
+ llvm_unreachable("Invalid Linkage!");
+}
+
+bool ASTContext::DeclMustBeEmitted(const Decl *D) {
+ if (const VarDecl *VD = dyn_cast<VarDecl>(D)) {
+ if (!VD->isFileVarDecl())
+ return false;
+ } else if (!isa<FunctionDecl>(D))
+ return false;
+
+ // Weak references don't produce any output by themselves.
+ if (D->hasAttr<WeakRefAttr>())
+ return false;
+
+ // Aliases and used decls are required.
+ if (D->hasAttr<AliasAttr>() || D->hasAttr<UsedAttr>())
+ return true;
+
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ // Forward declarations aren't required.
+ if (!FD->doesThisDeclarationHaveABody())
+ return FD->doesDeclarationForceExternallyVisibleDefinition();
+
+ // Constructors and destructors are required.
+ if (FD->hasAttr<ConstructorAttr>() || FD->hasAttr<DestructorAttr>())
+ return true;
+
+ // The key function for a class is required.
+ if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD)) {
+ const CXXRecordDecl *RD = MD->getParent();
+ if (MD->isOutOfLine() && RD->isDynamicClass()) {
+ const CXXMethodDecl *KeyFunc = getKeyFunction(RD);
+ if (KeyFunc && KeyFunc->getCanonicalDecl() == MD->getCanonicalDecl())
+ return true;
+ }
+ }
+
+ GVALinkage Linkage = GetGVALinkageForFunction(FD);
+
+ // static, static inline, always_inline, and extern inline functions can
+ // always be deferred. Normal inline functions can be deferred in C99/C++.
+ // Implicit template instantiations can also be deferred in C++.
+ if (Linkage == GVA_Internal || Linkage == GVA_C99Inline ||
+ Linkage == GVA_CXXInline || Linkage == GVA_TemplateInstantiation)
+ return false;
+ return true;
+ }
+
+ const VarDecl *VD = cast<VarDecl>(D);
+ assert(VD->isFileVarDecl() && "Expected file scoped var");
+
+ if (VD->isThisDeclarationADefinition() == VarDecl::DeclarationOnly)
+ return false;
+
+ // Structs that have non-trivial constructors or destructors are required.
+
+ // FIXME: Handle references.
+ // FIXME: Be more selective about which constructors we care about.
+ if (const RecordType *RT = VD->getType()->getAs<RecordType>()) {
+ if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl())) {
+ if (RD->hasDefinition() && !(RD->hasTrivialDefaultConstructor() &&
+ RD->hasTrivialCopyConstructor() &&
+ RD->hasTrivialMoveConstructor() &&
+ RD->hasTrivialDestructor()))
+ return true;
+ }
+ }
+
+ GVALinkage L = GetGVALinkageForVariable(VD);
+ if (L == GVA_Internal || L == GVA_TemplateInstantiation) {
+ if (!(VD->getInit() && VD->getInit()->HasSideEffects(*this)))
+ return false;
+ }
+
+ return true;
+}
+
+CallingConv ASTContext::getDefaultMethodCallConv() {
+ // Pass through to the C++ ABI object
+ return ABI->getDefaultMethodCallConv();
+}
+
+bool ASTContext::isNearlyEmpty(const CXXRecordDecl *RD) const {
+ // Pass through to the C++ ABI object
+ return ABI->isNearlyEmpty(RD);
+}
+
+MangleContext *ASTContext::createMangleContext() {
+ switch (Target->getCXXABI()) {
+ case CXXABI_ARM:
+ case CXXABI_Itanium:
+ return createItaniumMangleContext(*this, getDiagnostics());
+ case CXXABI_Microsoft:
+ return createMicrosoftMangleContext(*this, getDiagnostics());
+ }
+ llvm_unreachable("Unsupported ABI");
+}
+
+CXXABI::~CXXABI() {}
+
+size_t ASTContext::getSideTableAllocatedMemory() const {
+ return ASTRecordLayouts.getMemorySize()
+ + llvm::capacity_in_bytes(ObjCLayouts)
+ + llvm::capacity_in_bytes(KeyFunctions)
+ + llvm::capacity_in_bytes(ObjCImpls)
+ + llvm::capacity_in_bytes(BlockVarCopyInits)
+ + llvm::capacity_in_bytes(DeclAttrs)
+ + llvm::capacity_in_bytes(InstantiatedFromStaticDataMember)
+ + llvm::capacity_in_bytes(InstantiatedFromUsingDecl)
+ + llvm::capacity_in_bytes(InstantiatedFromUsingShadowDecl)
+ + llvm::capacity_in_bytes(InstantiatedFromUnnamedFieldDecl)
+ + llvm::capacity_in_bytes(OverriddenMethods)
+ + llvm::capacity_in_bytes(Types)
+ + llvm::capacity_in_bytes(VariableArrayTypes)
+ + llvm::capacity_in_bytes(ClassScopeSpecializationPattern);
+}
+
+unsigned ASTContext::getLambdaManglingNumber(CXXMethodDecl *CallOperator) {
+ CXXRecordDecl *Lambda = CallOperator->getParent();
+ return LambdaMangleContexts[Lambda->getDeclContext()]
+ .getManglingNumber(CallOperator);
+}
+
+
+void ASTContext::setParameterIndex(const ParmVarDecl *D, unsigned int index) {
+ ParamIndices[D] = index;
+}
+
+unsigned ASTContext::getParameterIndex(const ParmVarDecl *D) const {
+ ParameterIndexTable::const_iterator I = ParamIndices.find(D);
+ assert(I != ParamIndices.end() &&
+ "ParmIndices lacks entry set by ParmVarDecl");
+ return I->second;
+}
diff --git a/clang/lib/AST/ASTDiagnostic.cpp b/clang/lib/AST/ASTDiagnostic.cpp
new file mode 100644
index 0000000..ca4fe26
--- /dev/null
+++ b/clang/lib/AST/ASTDiagnostic.cpp
@@ -0,0 +1,331 @@
+//===--- ASTDiagnostic.cpp - Diagnostic Printing Hooks for AST Nodes ------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements a diagnostic formatting hook for AST elements.
+//
+//===----------------------------------------------------------------------===//
+#include "clang/AST/ASTDiagnostic.h"
+
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/Type.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang;
+
+// Returns a desugared version of the QualType, and marks ShouldAKA as true
+// whenever we remove significant sugar from the type.
+static QualType Desugar(ASTContext &Context, QualType QT, bool &ShouldAKA) {
+ QualifierCollector QC;
+
+ while (true) {
+ const Type *Ty = QC.strip(QT);
+
+ // Don't aka just because we saw an elaborated type...
+ if (const ElaboratedType *ET = dyn_cast<ElaboratedType>(Ty)) {
+ QT = ET->desugar();
+ continue;
+ }
+ // ... or a paren type ...
+ if (const ParenType *PT = dyn_cast<ParenType>(Ty)) {
+ QT = PT->desugar();
+ continue;
+ }
+ // ...or a substituted template type parameter ...
+ if (const SubstTemplateTypeParmType *ST =
+ dyn_cast<SubstTemplateTypeParmType>(Ty)) {
+ QT = ST->desugar();
+ continue;
+ }
+ // ...or an attributed type...
+ if (const AttributedType *AT = dyn_cast<AttributedType>(Ty)) {
+ QT = AT->desugar();
+ continue;
+ }
+ // ... or an auto type.
+ if (const AutoType *AT = dyn_cast<AutoType>(Ty)) {
+ if (!AT->isSugared())
+ break;
+ QT = AT->desugar();
+ continue;
+ }
+
+ // Don't desugar template specializations, unless it's an alias template.
+ if (const TemplateSpecializationType *TST
+ = dyn_cast<TemplateSpecializationType>(Ty))
+ if (!TST->isTypeAlias())
+ break;
+
+ // Don't desugar magic Objective-C types.
+ if (QualType(Ty,0) == Context.getObjCIdType() ||
+ QualType(Ty,0) == Context.getObjCClassType() ||
+ QualType(Ty,0) == Context.getObjCSelType() ||
+ QualType(Ty,0) == Context.getObjCProtoType())
+ break;
+
+ // Don't desugar va_list.
+ if (QualType(Ty,0) == Context.getBuiltinVaListType())
+ break;
+
+ // Otherwise, do a single-step desugar.
+ QualType Underlying;
+ bool IsSugar = false;
+ switch (Ty->getTypeClass()) {
+#define ABSTRACT_TYPE(Class, Base)
+#define TYPE(Class, Base) \
+case Type::Class: { \
+const Class##Type *CTy = cast<Class##Type>(Ty); \
+if (CTy->isSugared()) { \
+IsSugar = true; \
+Underlying = CTy->desugar(); \
+} \
+break; \
+}
+#include "clang/AST/TypeNodes.def"
+ }
+
+ // If it wasn't sugared, we're done.
+ if (!IsSugar)
+ break;
+
+ // If the desugared type is a vector type, we don't want to expand
+ // it, it will turn into an attribute mess. People want their "vec4".
+ if (isa<VectorType>(Underlying))
+ break;
+
+ // Don't desugar through the primary typedef of an anonymous type.
+ if (const TagType *UTT = Underlying->getAs<TagType>())
+ if (const TypedefType *QTT = dyn_cast<TypedefType>(QT))
+ if (UTT->getDecl()->getTypedefNameForAnonDecl() == QTT->getDecl())
+ break;
+
+ // Record that we actually looked through an opaque type here.
+ ShouldAKA = true;
+ QT = Underlying;
+ }
+
+ // If we have a pointer-like type, desugar the pointee as well.
+ // FIXME: Handle other pointer-like types.
+ if (const PointerType *Ty = QT->getAs<PointerType>()) {
+ QT = Context.getPointerType(Desugar(Context, Ty->getPointeeType(),
+ ShouldAKA));
+ } else if (const LValueReferenceType *Ty = QT->getAs<LValueReferenceType>()) {
+ QT = Context.getLValueReferenceType(Desugar(Context, Ty->getPointeeType(),
+ ShouldAKA));
+ } else if (const RValueReferenceType *Ty = QT->getAs<RValueReferenceType>()) {
+ QT = Context.getRValueReferenceType(Desugar(Context, Ty->getPointeeType(),
+ ShouldAKA));
+ }
+
+ return QC.apply(Context, QT);
+}
+
+/// \brief Convert the given type to a string suitable for printing as part of
+/// a diagnostic.
+///
+/// There are four main criteria when determining whether we should have an
+/// a.k.a. clause when pretty-printing a type:
+///
+/// 1) Some types provide very minimal sugar that doesn't impede the
+/// user's understanding --- for example, elaborated type
+/// specifiers. If this is all the sugar we see, we don't want an
+/// a.k.a. clause.
+/// 2) Some types are technically sugared but are much more familiar
+/// when seen in their sugared form --- for example, va_list,
+/// vector types, and the magic Objective C types. We don't
+/// want to desugar these, even if we do produce an a.k.a. clause.
+/// 3) Some types may have already been desugared previously in this diagnostic.
+/// if this is the case, doing another "aka" would just be clutter.
+/// 4) Two different types within the same diagnostic have the same output
+/// string. In this case, force an a.k.a with the desugared type when
+/// doing so will provide additional information.
+///
+/// \param Context the context in which the type was allocated
+/// \param Ty the type to print
+/// \param QualTypeVals pointer values to QualTypes which are used in the
+/// diagnostic message
+static std::string
+ConvertTypeToDiagnosticString(ASTContext &Context, QualType Ty,
+ const DiagnosticsEngine::ArgumentValue *PrevArgs,
+ unsigned NumPrevArgs,
+ ArrayRef<intptr_t> QualTypeVals) {
+ // FIXME: Playing with std::string is really slow.
+ bool ForceAKA = false;
+ QualType CanTy = Ty.getCanonicalType();
+ std::string S = Ty.getAsString(Context.getPrintingPolicy());
+ std::string CanS = CanTy.getAsString(Context.getPrintingPolicy());
+
+ for (unsigned I = 0, E = QualTypeVals.size(); I != E; ++I) {
+ QualType CompareTy =
+ QualType::getFromOpaquePtr(reinterpret_cast<void*>(QualTypeVals[I]));
+ if (CompareTy.isNull())
+ continue;
+ if (CompareTy == Ty)
+ continue; // Same types
+ QualType CompareCanTy = CompareTy.getCanonicalType();
+ if (CompareCanTy == CanTy)
+ continue; // Same canonical types
+ std::string CompareS = CompareTy.getAsString(Context.getPrintingPolicy());
+ bool aka;
+ QualType CompareDesugar = Desugar(Context, CompareTy, aka);
+ std::string CompareDesugarStr =
+ CompareDesugar.getAsString(Context.getPrintingPolicy());
+ if (CompareS != S && CompareDesugarStr != S)
+ continue; // The type string is different than the comparison string
+ // and the desugared comparison string.
+ std::string CompareCanS =
+ CompareCanTy.getAsString(Context.getPrintingPolicy());
+
+ if (CompareCanS == CanS)
+ continue; // No new info from canonical type
+
+ ForceAKA = true;
+ break;
+ }
+
+ // Check to see if we already desugared this type in this
+ // diagnostic. If so, don't do it again.
+ bool Repeated = false;
+ for (unsigned i = 0; i != NumPrevArgs; ++i) {
+ // TODO: Handle ak_declcontext case.
+ if (PrevArgs[i].first == DiagnosticsEngine::ak_qualtype) {
+ void *Ptr = (void*)PrevArgs[i].second;
+ QualType PrevTy(QualType::getFromOpaquePtr(Ptr));
+ if (PrevTy == Ty) {
+ Repeated = true;
+ break;
+ }
+ }
+ }
+
+ // Consider producing an a.k.a. clause if removing all the direct
+ // sugar gives us something "significantly different".
+ if (!Repeated) {
+ bool ShouldAKA = false;
+ QualType DesugaredTy = Desugar(Context, Ty, ShouldAKA);
+ if (ShouldAKA || ForceAKA) {
+ if (DesugaredTy == Ty) {
+ DesugaredTy = Ty.getCanonicalType();
+ }
+ std::string akaStr = DesugaredTy.getAsString(Context.getPrintingPolicy());
+ if (akaStr != S) {
+ S = "'" + S + "' (aka '" + akaStr + "')";
+ return S;
+ }
+ }
+ }
+
+ S = "'" + S + "'";
+ return S;
+}
+
+void clang::FormatASTNodeDiagnosticArgument(
+ DiagnosticsEngine::ArgumentKind Kind,
+ intptr_t Val,
+ const char *Modifier,
+ unsigned ModLen,
+ const char *Argument,
+ unsigned ArgLen,
+ const DiagnosticsEngine::ArgumentValue *PrevArgs,
+ unsigned NumPrevArgs,
+ SmallVectorImpl<char> &Output,
+ void *Cookie,
+ ArrayRef<intptr_t> QualTypeVals) {
+ ASTContext &Context = *static_cast<ASTContext*>(Cookie);
+
+ std::string S;
+ bool NeedQuotes = true;
+
+ switch (Kind) {
+ default: llvm_unreachable("unknown ArgumentKind");
+ case DiagnosticsEngine::ak_qualtype: {
+ assert(ModLen == 0 && ArgLen == 0 &&
+ "Invalid modifier for QualType argument");
+
+ QualType Ty(QualType::getFromOpaquePtr(reinterpret_cast<void*>(Val)));
+ S = ConvertTypeToDiagnosticString(Context, Ty, PrevArgs, NumPrevArgs,
+ QualTypeVals);
+ NeedQuotes = false;
+ break;
+ }
+ case DiagnosticsEngine::ak_declarationname: {
+ DeclarationName N = DeclarationName::getFromOpaqueInteger(Val);
+ S = N.getAsString();
+
+ if (ModLen == 9 && !memcmp(Modifier, "objcclass", 9) && ArgLen == 0)
+ S = '+' + S;
+ else if (ModLen == 12 && !memcmp(Modifier, "objcinstance", 12)
+ && ArgLen==0)
+ S = '-' + S;
+ else
+ assert(ModLen == 0 && ArgLen == 0 &&
+ "Invalid modifier for DeclarationName argument");
+ break;
+ }
+ case DiagnosticsEngine::ak_nameddecl: {
+ bool Qualified;
+ if (ModLen == 1 && Modifier[0] == 'q' && ArgLen == 0)
+ Qualified = true;
+ else {
+ assert(ModLen == 0 && ArgLen == 0 &&
+ "Invalid modifier for NamedDecl* argument");
+ Qualified = false;
+ }
+ const NamedDecl *ND = reinterpret_cast<const NamedDecl*>(Val);
+ ND->getNameForDiagnostic(S, Context.getPrintingPolicy(), Qualified);
+ break;
+ }
+ case DiagnosticsEngine::ak_nestednamespec: {
+ llvm::raw_string_ostream OS(S);
+ reinterpret_cast<NestedNameSpecifier*>(Val)->print(OS,
+ Context.getPrintingPolicy());
+ NeedQuotes = false;
+ break;
+ }
+ case DiagnosticsEngine::ak_declcontext: {
+ DeclContext *DC = reinterpret_cast<DeclContext *> (Val);
+ assert(DC && "Should never have a null declaration context");
+
+ if (DC->isTranslationUnit()) {
+ // FIXME: Get these strings from some localized place
+ if (Context.getLangOpts().CPlusPlus)
+ S = "the global namespace";
+ else
+ S = "the global scope";
+ } else if (TypeDecl *Type = dyn_cast<TypeDecl>(DC)) {
+ S = ConvertTypeToDiagnosticString(Context,
+ Context.getTypeDeclType(Type),
+ PrevArgs, NumPrevArgs, QualTypeVals);
+ } else {
+ // FIXME: Get these strings from some localized place
+ NamedDecl *ND = cast<NamedDecl>(DC);
+ if (isa<NamespaceDecl>(ND))
+ S += "namespace ";
+ else if (isa<ObjCMethodDecl>(ND))
+ S += "method ";
+ else if (isa<FunctionDecl>(ND))
+ S += "function ";
+
+ S += "'";
+ ND->getNameForDiagnostic(S, Context.getPrintingPolicy(), true);
+ S += "'";
+ }
+ NeedQuotes = false;
+ break;
+ }
+ }
+
+ if (NeedQuotes)
+ Output.push_back('\'');
+
+ Output.append(S.begin(), S.end());
+
+ if (NeedQuotes)
+ Output.push_back('\'');
+}
diff --git a/clang/lib/AST/ASTImporter.cpp b/clang/lib/AST/ASTImporter.cpp
new file mode 100644
index 0000000..3879907
--- /dev/null
+++ b/clang/lib/AST/ASTImporter.cpp
@@ -0,0 +1,4676 @@
+//===--- ASTImporter.cpp - Importing ASTs from other Contexts ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the ASTImporter class which imports AST nodes from one
+// context into another context.
+//
+//===----------------------------------------------------------------------===//
+#include "clang/AST/ASTImporter.h"
+
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/ASTDiagnostic.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/DeclVisitor.h"
+#include "clang/AST/StmtVisitor.h"
+#include "clang/AST/TypeVisitor.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/Basic/SourceManager.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include <deque>
+
+namespace clang {
+ class ASTNodeImporter : public TypeVisitor<ASTNodeImporter, QualType>,
+ public DeclVisitor<ASTNodeImporter, Decl *>,
+ public StmtVisitor<ASTNodeImporter, Stmt *> {
+ ASTImporter &Importer;
+
+ public:
+ explicit ASTNodeImporter(ASTImporter &Importer) : Importer(Importer) { }
+
+ using TypeVisitor<ASTNodeImporter, QualType>::Visit;
+ using DeclVisitor<ASTNodeImporter, Decl *>::Visit;
+ using StmtVisitor<ASTNodeImporter, Stmt *>::Visit;
+
+ // Importing types
+ QualType VisitType(const Type *T);
+ QualType VisitBuiltinType(const BuiltinType *T);
+ QualType VisitComplexType(const ComplexType *T);
+ QualType VisitPointerType(const PointerType *T);
+ QualType VisitBlockPointerType(const BlockPointerType *T);
+ QualType VisitLValueReferenceType(const LValueReferenceType *T);
+ QualType VisitRValueReferenceType(const RValueReferenceType *T);
+ QualType VisitMemberPointerType(const MemberPointerType *T);
+ QualType VisitConstantArrayType(const ConstantArrayType *T);
+ QualType VisitIncompleteArrayType(const IncompleteArrayType *T);
+ QualType VisitVariableArrayType(const VariableArrayType *T);
+ // FIXME: DependentSizedArrayType
+ // FIXME: DependentSizedExtVectorType
+ QualType VisitVectorType(const VectorType *T);
+ QualType VisitExtVectorType(const ExtVectorType *T);
+ QualType VisitFunctionNoProtoType(const FunctionNoProtoType *T);
+ QualType VisitFunctionProtoType(const FunctionProtoType *T);
+ // FIXME: UnresolvedUsingType
+ QualType VisitParenType(const ParenType *T);
+ QualType VisitTypedefType(const TypedefType *T);
+ QualType VisitTypeOfExprType(const TypeOfExprType *T);
+ // FIXME: DependentTypeOfExprType
+ QualType VisitTypeOfType(const TypeOfType *T);
+ QualType VisitDecltypeType(const DecltypeType *T);
+ QualType VisitUnaryTransformType(const UnaryTransformType *T);
+ QualType VisitAutoType(const AutoType *T);
+ // FIXME: DependentDecltypeType
+ QualType VisitRecordType(const RecordType *T);
+ QualType VisitEnumType(const EnumType *T);
+ // FIXME: TemplateTypeParmType
+ // FIXME: SubstTemplateTypeParmType
+ QualType VisitTemplateSpecializationType(const TemplateSpecializationType *T);
+ QualType VisitElaboratedType(const ElaboratedType *T);
+ // FIXME: DependentNameType
+ // FIXME: DependentTemplateSpecializationType
+ QualType VisitObjCInterfaceType(const ObjCInterfaceType *T);
+ QualType VisitObjCObjectType(const ObjCObjectType *T);
+ QualType VisitObjCObjectPointerType(const ObjCObjectPointerType *T);
+
+ // Importing declarations
+ bool ImportDeclParts(NamedDecl *D, DeclContext *&DC,
+ DeclContext *&LexicalDC, DeclarationName &Name,
+ SourceLocation &Loc);
+ void ImportDefinitionIfNeeded(Decl *FromD, Decl *ToD = 0);
+ void ImportDeclarationNameLoc(const DeclarationNameInfo &From,
+ DeclarationNameInfo& To);
+ void ImportDeclContext(DeclContext *FromDC, bool ForceImport = false);
+
+ /// \brief What we should import from the definition.
+ enum ImportDefinitionKind {
+ /// \brief Import the default subset of the definition, which might be
+ /// nothing (if minimal import is set) or might be everything (if minimal
+ /// import is not set).
+ IDK_Default,
+ /// \brief Import everything.
+ IDK_Everything,
+ /// \brief Import only the bare bones needed to establish a valid
+ /// DeclContext.
+ IDK_Basic
+ };
+
+ bool shouldForceImportDeclContext(ImportDefinitionKind IDK) {
+ return IDK == IDK_Everything ||
+ (IDK == IDK_Default && !Importer.isMinimalImport());
+ }
+
+ bool ImportDefinition(RecordDecl *From, RecordDecl *To,
+ ImportDefinitionKind Kind = IDK_Default);
+ bool ImportDefinition(EnumDecl *From, EnumDecl *To,
+ ImportDefinitionKind Kind = IDK_Default);
+ bool ImportDefinition(ObjCInterfaceDecl *From, ObjCInterfaceDecl *To,
+ ImportDefinitionKind Kind = IDK_Default);
+ bool ImportDefinition(ObjCProtocolDecl *From, ObjCProtocolDecl *To,
+ ImportDefinitionKind Kind = IDK_Default);
+ TemplateParameterList *ImportTemplateParameterList(
+ TemplateParameterList *Params);
+ TemplateArgument ImportTemplateArgument(const TemplateArgument &From);
+ bool ImportTemplateArguments(const TemplateArgument *FromArgs,
+ unsigned NumFromArgs,
+ SmallVectorImpl<TemplateArgument> &ToArgs);
+ bool IsStructuralMatch(RecordDecl *FromRecord, RecordDecl *ToRecord);
+ bool IsStructuralMatch(EnumDecl *FromEnum, EnumDecl *ToRecord);
+ bool IsStructuralMatch(ClassTemplateDecl *From, ClassTemplateDecl *To);
+ Decl *VisitDecl(Decl *D);
+ Decl *VisitTranslationUnitDecl(TranslationUnitDecl *D);
+ Decl *VisitNamespaceDecl(NamespaceDecl *D);
+ Decl *VisitTypedefNameDecl(TypedefNameDecl *D, bool IsAlias);
+ Decl *VisitTypedefDecl(TypedefDecl *D);
+ Decl *VisitTypeAliasDecl(TypeAliasDecl *D);
+ Decl *VisitEnumDecl(EnumDecl *D);
+ Decl *VisitRecordDecl(RecordDecl *D);
+ Decl *VisitEnumConstantDecl(EnumConstantDecl *D);
+ Decl *VisitFunctionDecl(FunctionDecl *D);
+ Decl *VisitCXXMethodDecl(CXXMethodDecl *D);
+ Decl *VisitCXXConstructorDecl(CXXConstructorDecl *D);
+ Decl *VisitCXXDestructorDecl(CXXDestructorDecl *D);
+ Decl *VisitCXXConversionDecl(CXXConversionDecl *D);
+ Decl *VisitFieldDecl(FieldDecl *D);
+ Decl *VisitIndirectFieldDecl(IndirectFieldDecl *D);
+ Decl *VisitObjCIvarDecl(ObjCIvarDecl *D);
+ Decl *VisitVarDecl(VarDecl *D);
+ Decl *VisitImplicitParamDecl(ImplicitParamDecl *D);
+ Decl *VisitParmVarDecl(ParmVarDecl *D);
+ Decl *VisitObjCMethodDecl(ObjCMethodDecl *D);
+ Decl *VisitObjCCategoryDecl(ObjCCategoryDecl *D);
+ Decl *VisitObjCProtocolDecl(ObjCProtocolDecl *D);
+ Decl *VisitObjCInterfaceDecl(ObjCInterfaceDecl *D);
+ Decl *VisitObjCCategoryImplDecl(ObjCCategoryImplDecl *D);
+ Decl *VisitObjCImplementationDecl(ObjCImplementationDecl *D);
+ Decl *VisitObjCPropertyDecl(ObjCPropertyDecl *D);
+ Decl *VisitObjCPropertyImplDecl(ObjCPropertyImplDecl *D);
+ Decl *VisitTemplateTypeParmDecl(TemplateTypeParmDecl *D);
+ Decl *VisitNonTypeTemplateParmDecl(NonTypeTemplateParmDecl *D);
+ Decl *VisitTemplateTemplateParmDecl(TemplateTemplateParmDecl *D);
+ Decl *VisitClassTemplateDecl(ClassTemplateDecl *D);
+ Decl *VisitClassTemplateSpecializationDecl(
+ ClassTemplateSpecializationDecl *D);
+
+ // Importing statements
+ Stmt *VisitStmt(Stmt *S);
+
+ // Importing expressions
+ Expr *VisitExpr(Expr *E);
+ Expr *VisitDeclRefExpr(DeclRefExpr *E);
+ Expr *VisitIntegerLiteral(IntegerLiteral *E);
+ Expr *VisitCharacterLiteral(CharacterLiteral *E);
+ Expr *VisitParenExpr(ParenExpr *E);
+ Expr *VisitUnaryOperator(UnaryOperator *E);
+ Expr *VisitUnaryExprOrTypeTraitExpr(UnaryExprOrTypeTraitExpr *E);
+ Expr *VisitBinaryOperator(BinaryOperator *E);
+ Expr *VisitCompoundAssignOperator(CompoundAssignOperator *E);
+ Expr *VisitImplicitCastExpr(ImplicitCastExpr *E);
+ Expr *VisitCStyleCastExpr(CStyleCastExpr *E);
+ };
+}
+using namespace clang;
+
+//----------------------------------------------------------------------------
+// Structural Equivalence
+//----------------------------------------------------------------------------
+
+namespace {
+ struct StructuralEquivalenceContext {
+ /// \brief AST contexts for which we are checking structural equivalence.
+ ASTContext &C1, &C2;
+
+ /// \brief The set of "tentative" equivalences between two canonical
+ /// declarations, mapping from a declaration in the first context to the
+ /// declaration in the second context that we believe to be equivalent.
+ llvm::DenseMap<Decl *, Decl *> TentativeEquivalences;
+
+ /// \brief Queue of declarations in the first context whose equivalence
+ /// with a declaration in the second context still needs to be verified.
+ std::deque<Decl *> DeclsToCheck;
+
+ /// \brief Declaration (from, to) pairs that are known not to be equivalent
+ /// (which we have already complained about).
+ llvm::DenseSet<std::pair<Decl *, Decl *> > &NonEquivalentDecls;
+
+ /// \brief Whether we're being strict about the spelling of types when
+ /// unifying two types.
+ bool StrictTypeSpelling;
+
+ StructuralEquivalenceContext(ASTContext &C1, ASTContext &C2,
+ llvm::DenseSet<std::pair<Decl *, Decl *> > &NonEquivalentDecls,
+ bool StrictTypeSpelling = false)
+ : C1(C1), C2(C2), NonEquivalentDecls(NonEquivalentDecls),
+ StrictTypeSpelling(StrictTypeSpelling) { }
+
+ /// \brief Determine whether the two declarations are structurally
+ /// equivalent.
+ bool IsStructurallyEquivalent(Decl *D1, Decl *D2);
+
+ /// \brief Determine whether the two types are structurally equivalent.
+ bool IsStructurallyEquivalent(QualType T1, QualType T2);
+
+ private:
+ /// \brief Finish checking all of the structural equivalences.
+ ///
+ /// \returns true if an error occurred, false otherwise.
+ bool Finish();
+
+ public:
+ DiagnosticBuilder Diag1(SourceLocation Loc, unsigned DiagID) {
+ return C1.getDiagnostics().Report(Loc, DiagID);
+ }
+
+ DiagnosticBuilder Diag2(SourceLocation Loc, unsigned DiagID) {
+ return C2.getDiagnostics().Report(Loc, DiagID);
+ }
+ };
+}
+
+static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
+ QualType T1, QualType T2);
+static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
+ Decl *D1, Decl *D2);
+
+/// \brief Determine if two APInts have the same value, after zero-extending
+/// one of them (if needed!) to ensure that the bit-widths match.
+static bool IsSameValue(const llvm::APInt &I1, const llvm::APInt &I2) {
+ if (I1.getBitWidth() == I2.getBitWidth())
+ return I1 == I2;
+
+ if (I1.getBitWidth() > I2.getBitWidth())
+ return I1 == I2.zext(I1.getBitWidth());
+
+ return I1.zext(I2.getBitWidth()) == I2;
+}
+
+/// \brief Determine if two APSInts have the same value, zero- or sign-extending
+/// as needed.
+static bool IsSameValue(const llvm::APSInt &I1, const llvm::APSInt &I2) {
+ if (I1.getBitWidth() == I2.getBitWidth() && I1.isSigned() == I2.isSigned())
+ return I1 == I2;
+
+ // Check for a bit-width mismatch.
+ if (I1.getBitWidth() > I2.getBitWidth())
+ return IsSameValue(I1, I2.extend(I1.getBitWidth()));
+ else if (I2.getBitWidth() > I1.getBitWidth())
+ return IsSameValue(I1.extend(I2.getBitWidth()), I2);
+
+ // We have a signedness mismatch. Turn the signed value into an unsigned
+ // value.
+ if (I1.isSigned()) {
+ if (I1.isNegative())
+ return false;
+
+ return llvm::APSInt(I1, true) == I2;
+ }
+
+ if (I2.isNegative())
+ return false;
+
+ return I1 == llvm::APSInt(I2, true);
+}
+
+/// \brief Determine structural equivalence of two expressions.
+static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
+ Expr *E1, Expr *E2) {
+ if (!E1 || !E2)
+ return E1 == E2;
+
+ // FIXME: Actually perform a structural comparison!
+ return true;
+}
+
+/// \brief Determine whether two identifiers are equivalent.
+static bool IsStructurallyEquivalent(const IdentifierInfo *Name1,
+ const IdentifierInfo *Name2) {
+ if (!Name1 || !Name2)
+ return Name1 == Name2;
+
+ return Name1->getName() == Name2->getName();
+}
+
+/// \brief Determine whether two nested-name-specifiers are equivalent.
+static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
+ NestedNameSpecifier *NNS1,
+ NestedNameSpecifier *NNS2) {
+ // FIXME: Implement!
+ return true;
+}
+
+/// \brief Determine whether two template arguments are equivalent.
+static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
+ const TemplateArgument &Arg1,
+ const TemplateArgument &Arg2) {
+ if (Arg1.getKind() != Arg2.getKind())
+ return false;
+
+ switch (Arg1.getKind()) {
+ case TemplateArgument::Null:
+ return true;
+
+ case TemplateArgument::Type:
+ return Context.IsStructurallyEquivalent(Arg1.getAsType(), Arg2.getAsType());
+
+ case TemplateArgument::Integral:
+ if (!Context.IsStructurallyEquivalent(Arg1.getIntegralType(),
+ Arg2.getIntegralType()))
+ return false;
+
+ return IsSameValue(*Arg1.getAsIntegral(), *Arg2.getAsIntegral());
+
+ case TemplateArgument::Declaration:
+ if (!Arg1.getAsDecl() || !Arg2.getAsDecl())
+ return !Arg1.getAsDecl() && !Arg2.getAsDecl();
+ return Context.IsStructurallyEquivalent(Arg1.getAsDecl(), Arg2.getAsDecl());
+
+ case TemplateArgument::Template:
+ return IsStructurallyEquivalent(Context,
+ Arg1.getAsTemplate(),
+ Arg2.getAsTemplate());
+
+ case TemplateArgument::TemplateExpansion:
+ return IsStructurallyEquivalent(Context,
+ Arg1.getAsTemplateOrTemplatePattern(),
+ Arg2.getAsTemplateOrTemplatePattern());
+
+ case TemplateArgument::Expression:
+ return IsStructurallyEquivalent(Context,
+ Arg1.getAsExpr(), Arg2.getAsExpr());
+
+ case TemplateArgument::Pack:
+ if (Arg1.pack_size() != Arg2.pack_size())
+ return false;
+
+ for (unsigned I = 0, N = Arg1.pack_size(); I != N; ++I)
+ if (!IsStructurallyEquivalent(Context,
+ Arg1.pack_begin()[I],
+ Arg2.pack_begin()[I]))
+ return false;
+
+ return true;
+ }
+
+ llvm_unreachable("Invalid template argument kind");
+}
+
+/// \brief Determine structural equivalence for the common part of array
+/// types.
+static bool IsArrayStructurallyEquivalent(StructuralEquivalenceContext &Context,
+ const ArrayType *Array1,
+ const ArrayType *Array2) {
+ if (!IsStructurallyEquivalent(Context,
+ Array1->getElementType(),
+ Array2->getElementType()))
+ return false;
+ if (Array1->getSizeModifier() != Array2->getSizeModifier())
+ return false;
+ if (Array1->getIndexTypeQualifiers() != Array2->getIndexTypeQualifiers())
+ return false;
+
+ return true;
+}
+
+/// \brief Determine structural equivalence of two types.
+static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
+ QualType T1, QualType T2) {
+ if (T1.isNull() || T2.isNull())
+ return T1.isNull() && T2.isNull();
+
+ if (!Context.StrictTypeSpelling) {
+ // We aren't being strict about token-to-token equivalence of types,
+ // so map down to the canonical type.
+ T1 = Context.C1.getCanonicalType(T1);
+ T2 = Context.C2.getCanonicalType(T2);
+ }
+
+ if (T1.getQualifiers() != T2.getQualifiers())
+ return false;
+
+ Type::TypeClass TC = T1->getTypeClass();
+
+ if (T1->getTypeClass() != T2->getTypeClass()) {
+ // Compare function types with prototypes vs. without prototypes as if
+ // both did not have prototypes.
+ if (T1->getTypeClass() == Type::FunctionProto &&
+ T2->getTypeClass() == Type::FunctionNoProto)
+ TC = Type::FunctionNoProto;
+ else if (T1->getTypeClass() == Type::FunctionNoProto &&
+ T2->getTypeClass() == Type::FunctionProto)
+ TC = Type::FunctionNoProto;
+ else
+ return false;
+ }
+
+ switch (TC) {
+ case Type::Builtin:
+ // FIXME: Deal with Char_S/Char_U.
+ if (cast<BuiltinType>(T1)->getKind() != cast<BuiltinType>(T2)->getKind())
+ return false;
+ break;
+
+ case Type::Complex:
+ if (!IsStructurallyEquivalent(Context,
+ cast<ComplexType>(T1)->getElementType(),
+ cast<ComplexType>(T2)->getElementType()))
+ return false;
+ break;
+
+ case Type::Pointer:
+ if (!IsStructurallyEquivalent(Context,
+ cast<PointerType>(T1)->getPointeeType(),
+ cast<PointerType>(T2)->getPointeeType()))
+ return false;
+ break;
+
+ case Type::BlockPointer:
+ if (!IsStructurallyEquivalent(Context,
+ cast<BlockPointerType>(T1)->getPointeeType(),
+ cast<BlockPointerType>(T2)->getPointeeType()))
+ return false;
+ break;
+
+ case Type::LValueReference:
+ case Type::RValueReference: {
+ const ReferenceType *Ref1 = cast<ReferenceType>(T1);
+ const ReferenceType *Ref2 = cast<ReferenceType>(T2);
+ if (Ref1->isSpelledAsLValue() != Ref2->isSpelledAsLValue())
+ return false;
+ if (Ref1->isInnerRef() != Ref2->isInnerRef())
+ return false;
+ if (!IsStructurallyEquivalent(Context,
+ Ref1->getPointeeTypeAsWritten(),
+ Ref2->getPointeeTypeAsWritten()))
+ return false;
+ break;
+ }
+
+ case Type::MemberPointer: {
+ const MemberPointerType *MemPtr1 = cast<MemberPointerType>(T1);
+ const MemberPointerType *MemPtr2 = cast<MemberPointerType>(T2);
+ if (!IsStructurallyEquivalent(Context,
+ MemPtr1->getPointeeType(),
+ MemPtr2->getPointeeType()))
+ return false;
+ if (!IsStructurallyEquivalent(Context,
+ QualType(MemPtr1->getClass(), 0),
+ QualType(MemPtr2->getClass(), 0)))
+ return false;
+ break;
+ }
+
+ case Type::ConstantArray: {
+ const ConstantArrayType *Array1 = cast<ConstantArrayType>(T1);
+ const ConstantArrayType *Array2 = cast<ConstantArrayType>(T2);
+ if (!IsSameValue(Array1->getSize(), Array2->getSize()))
+ return false;
+
+ if (!IsArrayStructurallyEquivalent(Context, Array1, Array2))
+ return false;
+ break;
+ }
+
+ case Type::IncompleteArray:
+ if (!IsArrayStructurallyEquivalent(Context,
+ cast<ArrayType>(T1),
+ cast<ArrayType>(T2)))
+ return false;
+ break;
+
+ case Type::VariableArray: {
+ const VariableArrayType *Array1 = cast<VariableArrayType>(T1);
+ const VariableArrayType *Array2 = cast<VariableArrayType>(T2);
+ if (!IsStructurallyEquivalent(Context,
+ Array1->getSizeExpr(), Array2->getSizeExpr()))
+ return false;
+
+ if (!IsArrayStructurallyEquivalent(Context, Array1, Array2))
+ return false;
+
+ break;
+ }
+
+ case Type::DependentSizedArray: {
+ const DependentSizedArrayType *Array1 = cast<DependentSizedArrayType>(T1);
+ const DependentSizedArrayType *Array2 = cast<DependentSizedArrayType>(T2);
+ if (!IsStructurallyEquivalent(Context,
+ Array1->getSizeExpr(), Array2->getSizeExpr()))
+ return false;
+
+ if (!IsArrayStructurallyEquivalent(Context, Array1, Array2))
+ return false;
+
+ break;
+ }
+
+ case Type::DependentSizedExtVector: {
+ const DependentSizedExtVectorType *Vec1
+ = cast<DependentSizedExtVectorType>(T1);
+ const DependentSizedExtVectorType *Vec2
+ = cast<DependentSizedExtVectorType>(T2);
+ if (!IsStructurallyEquivalent(Context,
+ Vec1->getSizeExpr(), Vec2->getSizeExpr()))
+ return false;
+ if (!IsStructurallyEquivalent(Context,
+ Vec1->getElementType(),
+ Vec2->getElementType()))
+ return false;
+ break;
+ }
+
+ case Type::Vector:
+ case Type::ExtVector: {
+ const VectorType *Vec1 = cast<VectorType>(T1);
+ const VectorType *Vec2 = cast<VectorType>(T2);
+ if (!IsStructurallyEquivalent(Context,
+ Vec1->getElementType(),
+ Vec2->getElementType()))
+ return false;
+ if (Vec1->getNumElements() != Vec2->getNumElements())
+ return false;
+ if (Vec1->getVectorKind() != Vec2->getVectorKind())
+ return false;
+ break;
+ }
+
+ case Type::FunctionProto: {
+ const FunctionProtoType *Proto1 = cast<FunctionProtoType>(T1);
+ const FunctionProtoType *Proto2 = cast<FunctionProtoType>(T2);
+ if (Proto1->getNumArgs() != Proto2->getNumArgs())
+ return false;
+ for (unsigned I = 0, N = Proto1->getNumArgs(); I != N; ++I) {
+ if (!IsStructurallyEquivalent(Context,
+ Proto1->getArgType(I),
+ Proto2->getArgType(I)))
+ return false;
+ }
+ if (Proto1->isVariadic() != Proto2->isVariadic())
+ return false;
+ if (Proto1->getExceptionSpecType() != Proto2->getExceptionSpecType())
+ return false;
+ if (Proto1->getExceptionSpecType() == EST_Dynamic) {
+ if (Proto1->getNumExceptions() != Proto2->getNumExceptions())
+ return false;
+ for (unsigned I = 0, N = Proto1->getNumExceptions(); I != N; ++I) {
+ if (!IsStructurallyEquivalent(Context,
+ Proto1->getExceptionType(I),
+ Proto2->getExceptionType(I)))
+ return false;
+ }
+ } else if (Proto1->getExceptionSpecType() == EST_ComputedNoexcept) {
+ if (!IsStructurallyEquivalent(Context,
+ Proto1->getNoexceptExpr(),
+ Proto2->getNoexceptExpr()))
+ return false;
+ }
+ if (Proto1->getTypeQuals() != Proto2->getTypeQuals())
+ return false;
+
+ // Fall through to check the bits common with FunctionNoProtoType.
+ }
+
+ case Type::FunctionNoProto: {
+ const FunctionType *Function1 = cast<FunctionType>(T1);
+ const FunctionType *Function2 = cast<FunctionType>(T2);
+ if (!IsStructurallyEquivalent(Context,
+ Function1->getResultType(),
+ Function2->getResultType()))
+ return false;
+ if (Function1->getExtInfo() != Function2->getExtInfo())
+ return false;
+ break;
+ }
+
+ case Type::UnresolvedUsing:
+ if (!IsStructurallyEquivalent(Context,
+ cast<UnresolvedUsingType>(T1)->getDecl(),
+ cast<UnresolvedUsingType>(T2)->getDecl()))
+ return false;
+
+ break;
+
+ case Type::Attributed:
+ if (!IsStructurallyEquivalent(Context,
+ cast<AttributedType>(T1)->getModifiedType(),
+ cast<AttributedType>(T2)->getModifiedType()))
+ return false;
+ if (!IsStructurallyEquivalent(Context,
+ cast<AttributedType>(T1)->getEquivalentType(),
+ cast<AttributedType>(T2)->getEquivalentType()))
+ return false;
+ break;
+
+ case Type::Paren:
+ if (!IsStructurallyEquivalent(Context,
+ cast<ParenType>(T1)->getInnerType(),
+ cast<ParenType>(T2)->getInnerType()))
+ return false;
+ break;
+
+ case Type::Typedef:
+ if (!IsStructurallyEquivalent(Context,
+ cast<TypedefType>(T1)->getDecl(),
+ cast<TypedefType>(T2)->getDecl()))
+ return false;
+ break;
+
+ case Type::TypeOfExpr:
+ if (!IsStructurallyEquivalent(Context,
+ cast<TypeOfExprType>(T1)->getUnderlyingExpr(),
+ cast<TypeOfExprType>(T2)->getUnderlyingExpr()))
+ return false;
+ break;
+
+ case Type::TypeOf:
+ if (!IsStructurallyEquivalent(Context,
+ cast<TypeOfType>(T1)->getUnderlyingType(),
+ cast<TypeOfType>(T2)->getUnderlyingType()))
+ return false;
+ break;
+
+ case Type::UnaryTransform:
+ if (!IsStructurallyEquivalent(Context,
+ cast<UnaryTransformType>(T1)->getUnderlyingType(),
+ cast<UnaryTransformType>(T1)->getUnderlyingType()))
+ return false;
+ break;
+
+ case Type::Decltype:
+ if (!IsStructurallyEquivalent(Context,
+ cast<DecltypeType>(T1)->getUnderlyingExpr(),
+ cast<DecltypeType>(T2)->getUnderlyingExpr()))
+ return false;
+ break;
+
+ case Type::Auto:
+ if (!IsStructurallyEquivalent(Context,
+ cast<AutoType>(T1)->getDeducedType(),
+ cast<AutoType>(T2)->getDeducedType()))
+ return false;
+ break;
+
+ case Type::Record:
+ case Type::Enum:
+ if (!IsStructurallyEquivalent(Context,
+ cast<TagType>(T1)->getDecl(),
+ cast<TagType>(T2)->getDecl()))
+ return false;
+ break;
+
+ case Type::TemplateTypeParm: {
+ const TemplateTypeParmType *Parm1 = cast<TemplateTypeParmType>(T1);
+ const TemplateTypeParmType *Parm2 = cast<TemplateTypeParmType>(T2);
+ if (Parm1->getDepth() != Parm2->getDepth())
+ return false;
+ if (Parm1->getIndex() != Parm2->getIndex())
+ return false;
+ if (Parm1->isParameterPack() != Parm2->isParameterPack())
+ return false;
+
+ // Names of template type parameters are never significant.
+ break;
+ }
+
+ case Type::SubstTemplateTypeParm: {
+ const SubstTemplateTypeParmType *Subst1
+ = cast<SubstTemplateTypeParmType>(T1);
+ const SubstTemplateTypeParmType *Subst2
+ = cast<SubstTemplateTypeParmType>(T2);
+ if (!IsStructurallyEquivalent(Context,
+ QualType(Subst1->getReplacedParameter(), 0),
+ QualType(Subst2->getReplacedParameter(), 0)))
+ return false;
+ if (!IsStructurallyEquivalent(Context,
+ Subst1->getReplacementType(),
+ Subst2->getReplacementType()))
+ return false;
+ break;
+ }
+
+ case Type::SubstTemplateTypeParmPack: {
+ const SubstTemplateTypeParmPackType *Subst1
+ = cast<SubstTemplateTypeParmPackType>(T1);
+ const SubstTemplateTypeParmPackType *Subst2
+ = cast<SubstTemplateTypeParmPackType>(T2);
+ if (!IsStructurallyEquivalent(Context,
+ QualType(Subst1->getReplacedParameter(), 0),
+ QualType(Subst2->getReplacedParameter(), 0)))
+ return false;
+ if (!IsStructurallyEquivalent(Context,
+ Subst1->getArgumentPack(),
+ Subst2->getArgumentPack()))
+ return false;
+ break;
+ }
+ case Type::TemplateSpecialization: {
+ const TemplateSpecializationType *Spec1
+ = cast<TemplateSpecializationType>(T1);
+ const TemplateSpecializationType *Spec2
+ = cast<TemplateSpecializationType>(T2);
+ if (!IsStructurallyEquivalent(Context,
+ Spec1->getTemplateName(),
+ Spec2->getTemplateName()))
+ return false;
+ if (Spec1->getNumArgs() != Spec2->getNumArgs())
+ return false;
+ for (unsigned I = 0, N = Spec1->getNumArgs(); I != N; ++I) {
+ if (!IsStructurallyEquivalent(Context,
+ Spec1->getArg(I), Spec2->getArg(I)))
+ return false;
+ }
+ break;
+ }
+
+ case Type::Elaborated: {
+ const ElaboratedType *Elab1 = cast<ElaboratedType>(T1);
+ const ElaboratedType *Elab2 = cast<ElaboratedType>(T2);
+ // CHECKME: what if a keyword is ETK_None or ETK_typename ?
+ if (Elab1->getKeyword() != Elab2->getKeyword())
+ return false;
+ if (!IsStructurallyEquivalent(Context,
+ Elab1->getQualifier(),
+ Elab2->getQualifier()))
+ return false;
+ if (!IsStructurallyEquivalent(Context,
+ Elab1->getNamedType(),
+ Elab2->getNamedType()))
+ return false;
+ break;
+ }
+
+ case Type::InjectedClassName: {
+ const InjectedClassNameType *Inj1 = cast<InjectedClassNameType>(T1);
+ const InjectedClassNameType *Inj2 = cast<InjectedClassNameType>(T2);
+ if (!IsStructurallyEquivalent(Context,
+ Inj1->getInjectedSpecializationType(),
+ Inj2->getInjectedSpecializationType()))
+ return false;
+ break;
+ }
+
+ case Type::DependentName: {
+ const DependentNameType *Typename1 = cast<DependentNameType>(T1);
+ const DependentNameType *Typename2 = cast<DependentNameType>(T2);
+ if (!IsStructurallyEquivalent(Context,
+ Typename1->getQualifier(),
+ Typename2->getQualifier()))
+ return false;
+ if (!IsStructurallyEquivalent(Typename1->getIdentifier(),
+ Typename2->getIdentifier()))
+ return false;
+
+ break;
+ }
+
+ case Type::DependentTemplateSpecialization: {
+ const DependentTemplateSpecializationType *Spec1 =
+ cast<DependentTemplateSpecializationType>(T1);
+ const DependentTemplateSpecializationType *Spec2 =
+ cast<DependentTemplateSpecializationType>(T2);
+ if (!IsStructurallyEquivalent(Context,
+ Spec1->getQualifier(),
+ Spec2->getQualifier()))
+ return false;
+ if (!IsStructurallyEquivalent(Spec1->getIdentifier(),
+ Spec2->getIdentifier()))
+ return false;
+ if (Spec1->getNumArgs() != Spec2->getNumArgs())
+ return false;
+ for (unsigned I = 0, N = Spec1->getNumArgs(); I != N; ++I) {
+ if (!IsStructurallyEquivalent(Context,
+ Spec1->getArg(I), Spec2->getArg(I)))
+ return false;
+ }
+ break;
+ }
+
+ case Type::PackExpansion:
+ if (!IsStructurallyEquivalent(Context,
+ cast<PackExpansionType>(T1)->getPattern(),
+ cast<PackExpansionType>(T2)->getPattern()))
+ return false;
+ break;
+
+ case Type::ObjCInterface: {
+ const ObjCInterfaceType *Iface1 = cast<ObjCInterfaceType>(T1);
+ const ObjCInterfaceType *Iface2 = cast<ObjCInterfaceType>(T2);
+ if (!IsStructurallyEquivalent(Context,
+ Iface1->getDecl(), Iface2->getDecl()))
+ return false;
+ break;
+ }
+
+ case Type::ObjCObject: {
+ const ObjCObjectType *Obj1 = cast<ObjCObjectType>(T1);
+ const ObjCObjectType *Obj2 = cast<ObjCObjectType>(T2);
+ if (!IsStructurallyEquivalent(Context,
+ Obj1->getBaseType(),
+ Obj2->getBaseType()))
+ return false;
+ if (Obj1->getNumProtocols() != Obj2->getNumProtocols())
+ return false;
+ for (unsigned I = 0, N = Obj1->getNumProtocols(); I != N; ++I) {
+ if (!IsStructurallyEquivalent(Context,
+ Obj1->getProtocol(I),
+ Obj2->getProtocol(I)))
+ return false;
+ }
+ break;
+ }
+
+ case Type::ObjCObjectPointer: {
+ const ObjCObjectPointerType *Ptr1 = cast<ObjCObjectPointerType>(T1);
+ const ObjCObjectPointerType *Ptr2 = cast<ObjCObjectPointerType>(T2);
+ if (!IsStructurallyEquivalent(Context,
+ Ptr1->getPointeeType(),
+ Ptr2->getPointeeType()))
+ return false;
+ break;
+ }
+
+ case Type::Atomic: {
+ if (!IsStructurallyEquivalent(Context,
+ cast<AtomicType>(T1)->getValueType(),
+ cast<AtomicType>(T2)->getValueType()))
+ return false;
+ break;
+ }
+
+ } // end switch
+
+ return true;
+}
+
+/// \brief Determine structural equivalence of two fields.
+static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
+ FieldDecl *Field1, FieldDecl *Field2) {
+ RecordDecl *Owner2 = cast<RecordDecl>(Field2->getDeclContext());
+
+ if (!IsStructurallyEquivalent(Context,
+ Field1->getType(), Field2->getType())) {
+ Context.Diag2(Owner2->getLocation(), diag::warn_odr_tag_type_inconsistent)
+ << Context.C2.getTypeDeclType(Owner2);
+ Context.Diag2(Field2->getLocation(), diag::note_odr_field)
+ << Field2->getDeclName() << Field2->getType();
+ Context.Diag1(Field1->getLocation(), diag::note_odr_field)
+ << Field1->getDeclName() << Field1->getType();
+ return false;
+ }
+
+ if (Field1->isBitField() != Field2->isBitField()) {
+ Context.Diag2(Owner2->getLocation(), diag::warn_odr_tag_type_inconsistent)
+ << Context.C2.getTypeDeclType(Owner2);
+ if (Field1->isBitField()) {
+ Context.Diag1(Field1->getLocation(), diag::note_odr_bit_field)
+ << Field1->getDeclName() << Field1->getType()
+ << Field1->getBitWidthValue(Context.C1);
+ Context.Diag2(Field2->getLocation(), diag::note_odr_not_bit_field)
+ << Field2->getDeclName();
+ } else {
+ Context.Diag2(Field2->getLocation(), diag::note_odr_bit_field)
+ << Field2->getDeclName() << Field2->getType()
+ << Field2->getBitWidthValue(Context.C2);
+ Context.Diag1(Field1->getLocation(), diag::note_odr_not_bit_field)
+ << Field1->getDeclName();
+ }
+ return false;
+ }
+
+ if (Field1->isBitField()) {
+ // Make sure that the bit-fields are the same length.
+ unsigned Bits1 = Field1->getBitWidthValue(Context.C1);
+ unsigned Bits2 = Field2->getBitWidthValue(Context.C2);
+
+ if (Bits1 != Bits2) {
+ Context.Diag2(Owner2->getLocation(), diag::warn_odr_tag_type_inconsistent)
+ << Context.C2.getTypeDeclType(Owner2);
+ Context.Diag2(Field2->getLocation(), diag::note_odr_bit_field)
+ << Field2->getDeclName() << Field2->getType() << Bits2;
+ Context.Diag1(Field1->getLocation(), diag::note_odr_bit_field)
+ << Field1->getDeclName() << Field1->getType() << Bits1;
+ return false;
+ }
+ }
+
+ return true;
+}
+
+/// \brief Determine structural equivalence of two records.
+static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
+ RecordDecl *D1, RecordDecl *D2) {
+ if (D1->isUnion() != D2->isUnion()) {
+ Context.Diag2(D2->getLocation(), diag::warn_odr_tag_type_inconsistent)
+ << Context.C2.getTypeDeclType(D2);
+ Context.Diag1(D1->getLocation(), diag::note_odr_tag_kind_here)
+ << D1->getDeclName() << (unsigned)D1->getTagKind();
+ return false;
+ }
+
+ // If both declarations are class template specializations, we know
+ // the ODR applies, so check the template and template arguments.
+ ClassTemplateSpecializationDecl *Spec1
+ = dyn_cast<ClassTemplateSpecializationDecl>(D1);
+ ClassTemplateSpecializationDecl *Spec2
+ = dyn_cast<ClassTemplateSpecializationDecl>(D2);
+ if (Spec1 && Spec2) {
+ // Check that the specialized templates are the same.
+ if (!IsStructurallyEquivalent(Context, Spec1->getSpecializedTemplate(),
+ Spec2->getSpecializedTemplate()))
+ return false;
+
+ // Check that the template arguments are the same.
+ if (Spec1->getTemplateArgs().size() != Spec2->getTemplateArgs().size())
+ return false;
+
+ for (unsigned I = 0, N = Spec1->getTemplateArgs().size(); I != N; ++I)
+ if (!IsStructurallyEquivalent(Context,
+ Spec1->getTemplateArgs().get(I),
+ Spec2->getTemplateArgs().get(I)))
+ return false;
+ }
+ // If one is a class template specialization and the other is not, these
+ // structures are different.
+ else if (Spec1 || Spec2)
+ return false;
+
+ // Compare the definitions of these two records. If either or both are
+ // incomplete, we assume that they are equivalent.
+ D1 = D1->getDefinition();
+ D2 = D2->getDefinition();
+ if (!D1 || !D2)
+ return true;
+
+ if (CXXRecordDecl *D1CXX = dyn_cast<CXXRecordDecl>(D1)) {
+ if (CXXRecordDecl *D2CXX = dyn_cast<CXXRecordDecl>(D2)) {
+ if (D1CXX->getNumBases() != D2CXX->getNumBases()) {
+ Context.Diag2(D2->getLocation(), diag::warn_odr_tag_type_inconsistent)
+ << Context.C2.getTypeDeclType(D2);
+ Context.Diag2(D2->getLocation(), diag::note_odr_number_of_bases)
+ << D2CXX->getNumBases();
+ Context.Diag1(D1->getLocation(), diag::note_odr_number_of_bases)
+ << D1CXX->getNumBases();
+ return false;
+ }
+
+ // Check the base classes.
+ for (CXXRecordDecl::base_class_iterator Base1 = D1CXX->bases_begin(),
+ BaseEnd1 = D1CXX->bases_end(),
+ Base2 = D2CXX->bases_begin();
+ Base1 != BaseEnd1;
+ ++Base1, ++Base2) {
+ if (!IsStructurallyEquivalent(Context,
+ Base1->getType(), Base2->getType())) {
+ Context.Diag2(D2->getLocation(), diag::warn_odr_tag_type_inconsistent)
+ << Context.C2.getTypeDeclType(D2);
+ Context.Diag2(Base2->getLocStart(), diag::note_odr_base)
+ << Base2->getType()
+ << Base2->getSourceRange();
+ Context.Diag1(Base1->getLocStart(), diag::note_odr_base)
+ << Base1->getType()
+ << Base1->getSourceRange();
+ return false;
+ }
+
+ // Check virtual vs. non-virtual inheritance mismatch.
+ if (Base1->isVirtual() != Base2->isVirtual()) {
+ Context.Diag2(D2->getLocation(), diag::warn_odr_tag_type_inconsistent)
+ << Context.C2.getTypeDeclType(D2);
+ Context.Diag2(Base2->getLocStart(),
+ diag::note_odr_virtual_base)
+ << Base2->isVirtual() << Base2->getSourceRange();
+ Context.Diag1(Base1->getLocStart(), diag::note_odr_base)
+ << Base1->isVirtual()
+ << Base1->getSourceRange();
+ return false;
+ }
+ }
+ } else if (D1CXX->getNumBases() > 0) {
+ Context.Diag2(D2->getLocation(), diag::warn_odr_tag_type_inconsistent)
+ << Context.C2.getTypeDeclType(D2);
+ const CXXBaseSpecifier *Base1 = D1CXX->bases_begin();
+ Context.Diag1(Base1->getLocStart(), diag::note_odr_base)
+ << Base1->getType()
+ << Base1->getSourceRange();
+ Context.Diag2(D2->getLocation(), diag::note_odr_missing_base);
+ return false;
+ }
+ }
+
+ // Check the fields for consistency.
+ CXXRecordDecl::field_iterator Field2 = D2->field_begin(),
+ Field2End = D2->field_end();
+ for (CXXRecordDecl::field_iterator Field1 = D1->field_begin(),
+ Field1End = D1->field_end();
+ Field1 != Field1End;
+ ++Field1, ++Field2) {
+ if (Field2 == Field2End) {
+ Context.Diag2(D2->getLocation(), diag::warn_odr_tag_type_inconsistent)
+ << Context.C2.getTypeDeclType(D2);
+ Context.Diag1(Field1->getLocation(), diag::note_odr_field)
+ << Field1->getDeclName() << Field1->getType();
+ Context.Diag2(D2->getLocation(), diag::note_odr_missing_field);
+ return false;
+ }
+
+ if (!IsStructurallyEquivalent(Context, *Field1, *Field2))
+ return false;
+ }
+
+ if (Field2 != Field2End) {
+ Context.Diag2(D2->getLocation(), diag::warn_odr_tag_type_inconsistent)
+ << Context.C2.getTypeDeclType(D2);
+ Context.Diag2(Field2->getLocation(), diag::note_odr_field)
+ << Field2->getDeclName() << Field2->getType();
+ Context.Diag1(D1->getLocation(), diag::note_odr_missing_field);
+ return false;
+ }
+
+ return true;
+}
+
+/// \brief Determine structural equivalence of two enums.
+static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
+ EnumDecl *D1, EnumDecl *D2) {
+ EnumDecl::enumerator_iterator EC2 = D2->enumerator_begin(),
+ EC2End = D2->enumerator_end();
+ for (EnumDecl::enumerator_iterator EC1 = D1->enumerator_begin(),
+ EC1End = D1->enumerator_end();
+ EC1 != EC1End; ++EC1, ++EC2) {
+ if (EC2 == EC2End) {
+ Context.Diag2(D2->getLocation(), diag::warn_odr_tag_type_inconsistent)
+ << Context.C2.getTypeDeclType(D2);
+ Context.Diag1(EC1->getLocation(), diag::note_odr_enumerator)
+ << EC1->getDeclName()
+ << EC1->getInitVal().toString(10);
+ Context.Diag2(D2->getLocation(), diag::note_odr_missing_enumerator);
+ return false;
+ }
+
+ llvm::APSInt Val1 = EC1->getInitVal();
+ llvm::APSInt Val2 = EC2->getInitVal();
+ if (!IsSameValue(Val1, Val2) ||
+ !IsStructurallyEquivalent(EC1->getIdentifier(), EC2->getIdentifier())) {
+ Context.Diag2(D2->getLocation(), diag::warn_odr_tag_type_inconsistent)
+ << Context.C2.getTypeDeclType(D2);
+ Context.Diag2(EC2->getLocation(), diag::note_odr_enumerator)
+ << EC2->getDeclName()
+ << EC2->getInitVal().toString(10);
+ Context.Diag1(EC1->getLocation(), diag::note_odr_enumerator)
+ << EC1->getDeclName()
+ << EC1->getInitVal().toString(10);
+ return false;
+ }
+ }
+
+ if (EC2 != EC2End) {
+ Context.Diag2(D2->getLocation(), diag::warn_odr_tag_type_inconsistent)
+ << Context.C2.getTypeDeclType(D2);
+ Context.Diag2(EC2->getLocation(), diag::note_odr_enumerator)
+ << EC2->getDeclName()
+ << EC2->getInitVal().toString(10);
+ Context.Diag1(D1->getLocation(), diag::note_odr_missing_enumerator);
+ return false;
+ }
+
+ return true;
+}
+
+static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
+ TemplateParameterList *Params1,
+ TemplateParameterList *Params2) {
+ if (Params1->size() != Params2->size()) {
+ Context.Diag2(Params2->getTemplateLoc(),
+ diag::err_odr_different_num_template_parameters)
+ << Params1->size() << Params2->size();
+ Context.Diag1(Params1->getTemplateLoc(),
+ diag::note_odr_template_parameter_list);
+ return false;
+ }
+
+ for (unsigned I = 0, N = Params1->size(); I != N; ++I) {
+ if (Params1->getParam(I)->getKind() != Params2->getParam(I)->getKind()) {
+ Context.Diag2(Params2->getParam(I)->getLocation(),
+ diag::err_odr_different_template_parameter_kind);
+ Context.Diag1(Params1->getParam(I)->getLocation(),
+ diag::note_odr_template_parameter_here);
+ return false;
+ }
+
+ if (!Context.IsStructurallyEquivalent(Params1->getParam(I),
+ Params2->getParam(I))) {
+
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
+ TemplateTypeParmDecl *D1,
+ TemplateTypeParmDecl *D2) {
+ if (D1->isParameterPack() != D2->isParameterPack()) {
+ Context.Diag2(D2->getLocation(), diag::err_odr_parameter_pack_non_pack)
+ << D2->isParameterPack();
+ Context.Diag1(D1->getLocation(), diag::note_odr_parameter_pack_non_pack)
+ << D1->isParameterPack();
+ return false;
+ }
+
+ return true;
+}
+
+static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
+ NonTypeTemplateParmDecl *D1,
+ NonTypeTemplateParmDecl *D2) {
+ // FIXME: Enable once we have variadic templates.
+#if 0
+ if (D1->isParameterPack() != D2->isParameterPack()) {
+ Context.Diag2(D2->getLocation(), diag::err_odr_parameter_pack_non_pack)
+ << D2->isParameterPack();
+ Context.Diag1(D1->getLocation(), diag::note_odr_parameter_pack_non_pack)
+ << D1->isParameterPack();
+ return false;
+ }
+#endif
+
+ // Check types.
+ if (!Context.IsStructurallyEquivalent(D1->getType(), D2->getType())) {
+ Context.Diag2(D2->getLocation(),
+ diag::err_odr_non_type_parameter_type_inconsistent)
+ << D2->getType() << D1->getType();
+ Context.Diag1(D1->getLocation(), diag::note_odr_value_here)
+ << D1->getType();
+ return false;
+ }
+
+ return true;
+}
+
+static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
+ TemplateTemplateParmDecl *D1,
+ TemplateTemplateParmDecl *D2) {
+ // FIXME: Enable once we have variadic templates.
+#if 0
+ if (D1->isParameterPack() != D2->isParameterPack()) {
+ Context.Diag2(D2->getLocation(), diag::err_odr_parameter_pack_non_pack)
+ << D2->isParameterPack();
+ Context.Diag1(D1->getLocation(), diag::note_odr_parameter_pack_non_pack)
+ << D1->isParameterPack();
+ return false;
+ }
+#endif
+
+ // Check template parameter lists.
+ return IsStructurallyEquivalent(Context, D1->getTemplateParameters(),
+ D2->getTemplateParameters());
+}
+
+static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
+ ClassTemplateDecl *D1,
+ ClassTemplateDecl *D2) {
+ // Check template parameters.
+ if (!IsStructurallyEquivalent(Context,
+ D1->getTemplateParameters(),
+ D2->getTemplateParameters()))
+ return false;
+
+ // Check the templated declaration.
+ return Context.IsStructurallyEquivalent(D1->getTemplatedDecl(),
+ D2->getTemplatedDecl());
+}
+
+/// \brief Determine structural equivalence of two declarations.
+static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
+ Decl *D1, Decl *D2) {
+ // FIXME: Check for known structural equivalences via a callback of some sort.
+
+ // Check whether we already know that these two declarations are not
+ // structurally equivalent.
+ if (Context.NonEquivalentDecls.count(std::make_pair(D1->getCanonicalDecl(),
+ D2->getCanonicalDecl())))
+ return false;
+
+ // Determine whether we've already produced a tentative equivalence for D1.
+ Decl *&EquivToD1 = Context.TentativeEquivalences[D1->getCanonicalDecl()];
+ if (EquivToD1)
+ return EquivToD1 == D2->getCanonicalDecl();
+
+ // Produce a tentative equivalence D1 <-> D2, which will be checked later.
+ EquivToD1 = D2->getCanonicalDecl();
+ Context.DeclsToCheck.push_back(D1->getCanonicalDecl());
+ return true;
+}
+
+bool StructuralEquivalenceContext::IsStructurallyEquivalent(Decl *D1,
+ Decl *D2) {
+ if (!::IsStructurallyEquivalent(*this, D1, D2))
+ return false;
+
+ return !Finish();
+}
+
+bool StructuralEquivalenceContext::IsStructurallyEquivalent(QualType T1,
+ QualType T2) {
+ if (!::IsStructurallyEquivalent(*this, T1, T2))
+ return false;
+
+ return !Finish();
+}
+
+bool StructuralEquivalenceContext::Finish() {
+ while (!DeclsToCheck.empty()) {
+ // Check the next declaration.
+ Decl *D1 = DeclsToCheck.front();
+ DeclsToCheck.pop_front();
+
+ Decl *D2 = TentativeEquivalences[D1];
+ assert(D2 && "Unrecorded tentative equivalence?");
+
+ bool Equivalent = true;
+
+ // FIXME: Switch on all declaration kinds. For now, we're just going to
+ // check the obvious ones.
+ if (RecordDecl *Record1 = dyn_cast<RecordDecl>(D1)) {
+ if (RecordDecl *Record2 = dyn_cast<RecordDecl>(D2)) {
+ // Check for equivalent structure names.
+ IdentifierInfo *Name1 = Record1->getIdentifier();
+ if (!Name1 && Record1->getTypedefNameForAnonDecl())
+ Name1 = Record1->getTypedefNameForAnonDecl()->getIdentifier();
+ IdentifierInfo *Name2 = Record2->getIdentifier();
+ if (!Name2 && Record2->getTypedefNameForAnonDecl())
+ Name2 = Record2->getTypedefNameForAnonDecl()->getIdentifier();
+ if (!::IsStructurallyEquivalent(Name1, Name2) ||
+ !::IsStructurallyEquivalent(*this, Record1, Record2))
+ Equivalent = false;
+ } else {
+ // Record/non-record mismatch.
+ Equivalent = false;
+ }
+ } else if (EnumDecl *Enum1 = dyn_cast<EnumDecl>(D1)) {
+ if (EnumDecl *Enum2 = dyn_cast<EnumDecl>(D2)) {
+ // Check for equivalent enum names.
+ IdentifierInfo *Name1 = Enum1->getIdentifier();
+ if (!Name1 && Enum1->getTypedefNameForAnonDecl())
+ Name1 = Enum1->getTypedefNameForAnonDecl()->getIdentifier();
+ IdentifierInfo *Name2 = Enum2->getIdentifier();
+ if (!Name2 && Enum2->getTypedefNameForAnonDecl())
+ Name2 = Enum2->getTypedefNameForAnonDecl()->getIdentifier();
+ if (!::IsStructurallyEquivalent(Name1, Name2) ||
+ !::IsStructurallyEquivalent(*this, Enum1, Enum2))
+ Equivalent = false;
+ } else {
+ // Enum/non-enum mismatch
+ Equivalent = false;
+ }
+ } else if (TypedefNameDecl *Typedef1 = dyn_cast<TypedefNameDecl>(D1)) {
+ if (TypedefNameDecl *Typedef2 = dyn_cast<TypedefNameDecl>(D2)) {
+ if (!::IsStructurallyEquivalent(Typedef1->getIdentifier(),
+ Typedef2->getIdentifier()) ||
+ !::IsStructurallyEquivalent(*this,
+ Typedef1->getUnderlyingType(),
+ Typedef2->getUnderlyingType()))
+ Equivalent = false;
+ } else {
+ // Typedef/non-typedef mismatch.
+ Equivalent = false;
+ }
+ } else if (ClassTemplateDecl *ClassTemplate1
+ = dyn_cast<ClassTemplateDecl>(D1)) {
+ if (ClassTemplateDecl *ClassTemplate2 = dyn_cast<ClassTemplateDecl>(D2)) {
+ if (!::IsStructurallyEquivalent(ClassTemplate1->getIdentifier(),
+ ClassTemplate2->getIdentifier()) ||
+ !::IsStructurallyEquivalent(*this, ClassTemplate1, ClassTemplate2))
+ Equivalent = false;
+ } else {
+ // Class template/non-class-template mismatch.
+ Equivalent = false;
+ }
+ } else if (TemplateTypeParmDecl *TTP1= dyn_cast<TemplateTypeParmDecl>(D1)) {
+ if (TemplateTypeParmDecl *TTP2 = dyn_cast<TemplateTypeParmDecl>(D2)) {
+ if (!::IsStructurallyEquivalent(*this, TTP1, TTP2))
+ Equivalent = false;
+ } else {
+ // Kind mismatch.
+ Equivalent = false;
+ }
+ } else if (NonTypeTemplateParmDecl *NTTP1
+ = dyn_cast<NonTypeTemplateParmDecl>(D1)) {
+ if (NonTypeTemplateParmDecl *NTTP2
+ = dyn_cast<NonTypeTemplateParmDecl>(D2)) {
+ if (!::IsStructurallyEquivalent(*this, NTTP1, NTTP2))
+ Equivalent = false;
+ } else {
+ // Kind mismatch.
+ Equivalent = false;
+ }
+ } else if (TemplateTemplateParmDecl *TTP1
+ = dyn_cast<TemplateTemplateParmDecl>(D1)) {
+ if (TemplateTemplateParmDecl *TTP2
+ = dyn_cast<TemplateTemplateParmDecl>(D2)) {
+ if (!::IsStructurallyEquivalent(*this, TTP1, TTP2))
+ Equivalent = false;
+ } else {
+ // Kind mismatch.
+ Equivalent = false;
+ }
+ }
+
+ if (!Equivalent) {
+ // Note that these two declarations are not equivalent (and we already
+ // know about it).
+ NonEquivalentDecls.insert(std::make_pair(D1->getCanonicalDecl(),
+ D2->getCanonicalDecl()));
+ return true;
+ }
+ // FIXME: Check other declaration kinds!
+ }
+
+ return false;
+}
+
+//----------------------------------------------------------------------------
+// Import Types
+//----------------------------------------------------------------------------
+
+QualType ASTNodeImporter::VisitType(const Type *T) {
+ Importer.FromDiag(SourceLocation(), diag::err_unsupported_ast_node)
+ << T->getTypeClassName();
+ return QualType();
+}
+
+QualType ASTNodeImporter::VisitBuiltinType(const BuiltinType *T) {
+ switch (T->getKind()) {
+#define SHARED_SINGLETON_TYPE(Expansion)
+#define BUILTIN_TYPE(Id, SingletonId) \
+ case BuiltinType::Id: return Importer.getToContext().SingletonId;
+#include "clang/AST/BuiltinTypes.def"
+
+ // FIXME: for Char16, Char32, and NullPtr, make sure that the "to"
+ // context supports C++.
+
+ // FIXME: for ObjCId, ObjCClass, and ObjCSel, make sure that the "to"
+ // context supports ObjC.
+
+ case BuiltinType::Char_U:
+ // The context we're importing from has an unsigned 'char'. If we're
+ // importing into a context with a signed 'char', translate to
+ // 'unsigned char' instead.
+ if (Importer.getToContext().getLangOpts().CharIsSigned)
+ return Importer.getToContext().UnsignedCharTy;
+
+ return Importer.getToContext().CharTy;
+
+ case BuiltinType::Char_S:
+ // The context we're importing from has an unsigned 'char'. If we're
+ // importing into a context with a signed 'char', translate to
+ // 'unsigned char' instead.
+ if (!Importer.getToContext().getLangOpts().CharIsSigned)
+ return Importer.getToContext().SignedCharTy;
+
+ return Importer.getToContext().CharTy;
+
+ case BuiltinType::WChar_S:
+ case BuiltinType::WChar_U:
+ // FIXME: If not in C++, shall we translate to the C equivalent of
+ // wchar_t?
+ return Importer.getToContext().WCharTy;
+ }
+
+ llvm_unreachable("Invalid BuiltinType Kind!");
+}
+
+QualType ASTNodeImporter::VisitComplexType(const ComplexType *T) {
+ QualType ToElementType = Importer.Import(T->getElementType());
+ if (ToElementType.isNull())
+ return QualType();
+
+ return Importer.getToContext().getComplexType(ToElementType);
+}
+
+QualType ASTNodeImporter::VisitPointerType(const PointerType *T) {
+ QualType ToPointeeType = Importer.Import(T->getPointeeType());
+ if (ToPointeeType.isNull())
+ return QualType();
+
+ return Importer.getToContext().getPointerType(ToPointeeType);
+}
+
+QualType ASTNodeImporter::VisitBlockPointerType(const BlockPointerType *T) {
+ // FIXME: Check for blocks support in "to" context.
+ QualType ToPointeeType = Importer.Import(T->getPointeeType());
+ if (ToPointeeType.isNull())
+ return QualType();
+
+ return Importer.getToContext().getBlockPointerType(ToPointeeType);
+}
+
+QualType
+ASTNodeImporter::VisitLValueReferenceType(const LValueReferenceType *T) {
+ // FIXME: Check for C++ support in "to" context.
+ QualType ToPointeeType = Importer.Import(T->getPointeeTypeAsWritten());
+ if (ToPointeeType.isNull())
+ return QualType();
+
+ return Importer.getToContext().getLValueReferenceType(ToPointeeType);
+}
+
+QualType
+ASTNodeImporter::VisitRValueReferenceType(const RValueReferenceType *T) {
+ // FIXME: Check for C++0x support in "to" context.
+ QualType ToPointeeType = Importer.Import(T->getPointeeTypeAsWritten());
+ if (ToPointeeType.isNull())
+ return QualType();
+
+ return Importer.getToContext().getRValueReferenceType(ToPointeeType);
+}
+
+QualType ASTNodeImporter::VisitMemberPointerType(const MemberPointerType *T) {
+ // FIXME: Check for C++ support in "to" context.
+ QualType ToPointeeType = Importer.Import(T->getPointeeType());
+ if (ToPointeeType.isNull())
+ return QualType();
+
+ QualType ClassType = Importer.Import(QualType(T->getClass(), 0));
+ return Importer.getToContext().getMemberPointerType(ToPointeeType,
+ ClassType.getTypePtr());
+}
+
+QualType ASTNodeImporter::VisitConstantArrayType(const ConstantArrayType *T) {
+ QualType ToElementType = Importer.Import(T->getElementType());
+ if (ToElementType.isNull())
+ return QualType();
+
+ return Importer.getToContext().getConstantArrayType(ToElementType,
+ T->getSize(),
+ T->getSizeModifier(),
+ T->getIndexTypeCVRQualifiers());
+}
+
+QualType
+ASTNodeImporter::VisitIncompleteArrayType(const IncompleteArrayType *T) {
+ QualType ToElementType = Importer.Import(T->getElementType());
+ if (ToElementType.isNull())
+ return QualType();
+
+ return Importer.getToContext().getIncompleteArrayType(ToElementType,
+ T->getSizeModifier(),
+ T->getIndexTypeCVRQualifiers());
+}
+
+QualType ASTNodeImporter::VisitVariableArrayType(const VariableArrayType *T) {
+ QualType ToElementType = Importer.Import(T->getElementType());
+ if (ToElementType.isNull())
+ return QualType();
+
+ Expr *Size = Importer.Import(T->getSizeExpr());
+ if (!Size)
+ return QualType();
+
+ SourceRange Brackets = Importer.Import(T->getBracketsRange());
+ return Importer.getToContext().getVariableArrayType(ToElementType, Size,
+ T->getSizeModifier(),
+ T->getIndexTypeCVRQualifiers(),
+ Brackets);
+}
+
+QualType ASTNodeImporter::VisitVectorType(const VectorType *T) {
+ QualType ToElementType = Importer.Import(T->getElementType());
+ if (ToElementType.isNull())
+ return QualType();
+
+ return Importer.getToContext().getVectorType(ToElementType,
+ T->getNumElements(),
+ T->getVectorKind());
+}
+
+QualType ASTNodeImporter::VisitExtVectorType(const ExtVectorType *T) {
+ QualType ToElementType = Importer.Import(T->getElementType());
+ if (ToElementType.isNull())
+ return QualType();
+
+ return Importer.getToContext().getExtVectorType(ToElementType,
+ T->getNumElements());
+}
+
+QualType
+ASTNodeImporter::VisitFunctionNoProtoType(const FunctionNoProtoType *T) {
+ // FIXME: What happens if we're importing a function without a prototype
+ // into C++? Should we make it variadic?
+ QualType ToResultType = Importer.Import(T->getResultType());
+ if (ToResultType.isNull())
+ return QualType();
+
+ return Importer.getToContext().getFunctionNoProtoType(ToResultType,
+ T->getExtInfo());
+}
+
+QualType ASTNodeImporter::VisitFunctionProtoType(const FunctionProtoType *T) {
+ QualType ToResultType = Importer.Import(T->getResultType());
+ if (ToResultType.isNull())
+ return QualType();
+
+ // Import argument types
+ SmallVector<QualType, 4> ArgTypes;
+ for (FunctionProtoType::arg_type_iterator A = T->arg_type_begin(),
+ AEnd = T->arg_type_end();
+ A != AEnd; ++A) {
+ QualType ArgType = Importer.Import(*A);
+ if (ArgType.isNull())
+ return QualType();
+ ArgTypes.push_back(ArgType);
+ }
+
+ // Import exception types
+ SmallVector<QualType, 4> ExceptionTypes;
+ for (FunctionProtoType::exception_iterator E = T->exception_begin(),
+ EEnd = T->exception_end();
+ E != EEnd; ++E) {
+ QualType ExceptionType = Importer.Import(*E);
+ if (ExceptionType.isNull())
+ return QualType();
+ ExceptionTypes.push_back(ExceptionType);
+ }
+
+ FunctionProtoType::ExtProtoInfo EPI = T->getExtProtoInfo();
+ EPI.Exceptions = ExceptionTypes.data();
+
+ return Importer.getToContext().getFunctionType(ToResultType, ArgTypes.data(),
+ ArgTypes.size(), EPI);
+}
+
+QualType ASTNodeImporter::VisitParenType(const ParenType *T) {
+ QualType ToInnerType = Importer.Import(T->getInnerType());
+ if (ToInnerType.isNull())
+ return QualType();
+
+ return Importer.getToContext().getParenType(ToInnerType);
+}
+
+QualType ASTNodeImporter::VisitTypedefType(const TypedefType *T) {
+ TypedefNameDecl *ToDecl
+ = dyn_cast_or_null<TypedefNameDecl>(Importer.Import(T->getDecl()));
+ if (!ToDecl)
+ return QualType();
+
+ return Importer.getToContext().getTypeDeclType(ToDecl);
+}
+
+QualType ASTNodeImporter::VisitTypeOfExprType(const TypeOfExprType *T) {
+ Expr *ToExpr = Importer.Import(T->getUnderlyingExpr());
+ if (!ToExpr)
+ return QualType();
+
+ return Importer.getToContext().getTypeOfExprType(ToExpr);
+}
+
+QualType ASTNodeImporter::VisitTypeOfType(const TypeOfType *T) {
+ QualType ToUnderlyingType = Importer.Import(T->getUnderlyingType());
+ if (ToUnderlyingType.isNull())
+ return QualType();
+
+ return Importer.getToContext().getTypeOfType(ToUnderlyingType);
+}
+
+QualType ASTNodeImporter::VisitDecltypeType(const DecltypeType *T) {
+ // FIXME: Make sure that the "to" context supports C++0x!
+ Expr *ToExpr = Importer.Import(T->getUnderlyingExpr());
+ if (!ToExpr)
+ return QualType();
+
+ QualType UnderlyingType = Importer.Import(T->getUnderlyingType());
+ if (UnderlyingType.isNull())
+ return QualType();
+
+ return Importer.getToContext().getDecltypeType(ToExpr, UnderlyingType);
+}
+
+QualType ASTNodeImporter::VisitUnaryTransformType(const UnaryTransformType *T) {
+ QualType ToBaseType = Importer.Import(T->getBaseType());
+ QualType ToUnderlyingType = Importer.Import(T->getUnderlyingType());
+ if (ToBaseType.isNull() || ToUnderlyingType.isNull())
+ return QualType();
+
+ return Importer.getToContext().getUnaryTransformType(ToBaseType,
+ ToUnderlyingType,
+ T->getUTTKind());
+}
+
+QualType ASTNodeImporter::VisitAutoType(const AutoType *T) {
+ // FIXME: Make sure that the "to" context supports C++0x!
+ QualType FromDeduced = T->getDeducedType();
+ QualType ToDeduced;
+ if (!FromDeduced.isNull()) {
+ ToDeduced = Importer.Import(FromDeduced);
+ if (ToDeduced.isNull())
+ return QualType();
+ }
+
+ return Importer.getToContext().getAutoType(ToDeduced);
+}
+
+QualType ASTNodeImporter::VisitRecordType(const RecordType *T) {
+ RecordDecl *ToDecl
+ = dyn_cast_or_null<RecordDecl>(Importer.Import(T->getDecl()));
+ if (!ToDecl)
+ return QualType();
+
+ return Importer.getToContext().getTagDeclType(ToDecl);
+}
+
+QualType ASTNodeImporter::VisitEnumType(const EnumType *T) {
+ EnumDecl *ToDecl
+ = dyn_cast_or_null<EnumDecl>(Importer.Import(T->getDecl()));
+ if (!ToDecl)
+ return QualType();
+
+ return Importer.getToContext().getTagDeclType(ToDecl);
+}
+
+QualType ASTNodeImporter::VisitTemplateSpecializationType(
+ const TemplateSpecializationType *T) {
+ TemplateName ToTemplate = Importer.Import(T->getTemplateName());
+ if (ToTemplate.isNull())
+ return QualType();
+
+ SmallVector<TemplateArgument, 2> ToTemplateArgs;
+ if (ImportTemplateArguments(T->getArgs(), T->getNumArgs(), ToTemplateArgs))
+ return QualType();
+
+ QualType ToCanonType;
+ if (!QualType(T, 0).isCanonical()) {
+ QualType FromCanonType
+ = Importer.getFromContext().getCanonicalType(QualType(T, 0));
+ ToCanonType =Importer.Import(FromCanonType);
+ if (ToCanonType.isNull())
+ return QualType();
+ }
+ return Importer.getToContext().getTemplateSpecializationType(ToTemplate,
+ ToTemplateArgs.data(),
+ ToTemplateArgs.size(),
+ ToCanonType);
+}
+
+QualType ASTNodeImporter::VisitElaboratedType(const ElaboratedType *T) {
+ NestedNameSpecifier *ToQualifier = 0;
+ // Note: the qualifier in an ElaboratedType is optional.
+ if (T->getQualifier()) {
+ ToQualifier = Importer.Import(T->getQualifier());
+ if (!ToQualifier)
+ return QualType();
+ }
+
+ QualType ToNamedType = Importer.Import(T->getNamedType());
+ if (ToNamedType.isNull())
+ return QualType();
+
+ return Importer.getToContext().getElaboratedType(T->getKeyword(),
+ ToQualifier, ToNamedType);
+}
+
+QualType ASTNodeImporter::VisitObjCInterfaceType(const ObjCInterfaceType *T) {
+ ObjCInterfaceDecl *Class
+ = dyn_cast_or_null<ObjCInterfaceDecl>(Importer.Import(T->getDecl()));
+ if (!Class)
+ return QualType();
+
+ return Importer.getToContext().getObjCInterfaceType(Class);
+}
+
+QualType ASTNodeImporter::VisitObjCObjectType(const ObjCObjectType *T) {
+ QualType ToBaseType = Importer.Import(T->getBaseType());
+ if (ToBaseType.isNull())
+ return QualType();
+
+ SmallVector<ObjCProtocolDecl *, 4> Protocols;
+ for (ObjCObjectType::qual_iterator P = T->qual_begin(),
+ PEnd = T->qual_end();
+ P != PEnd; ++P) {
+ ObjCProtocolDecl *Protocol
+ = dyn_cast_or_null<ObjCProtocolDecl>(Importer.Import(*P));
+ if (!Protocol)
+ return QualType();
+ Protocols.push_back(Protocol);
+ }
+
+ return Importer.getToContext().getObjCObjectType(ToBaseType,
+ Protocols.data(),
+ Protocols.size());
+}
+
+QualType
+ASTNodeImporter::VisitObjCObjectPointerType(const ObjCObjectPointerType *T) {
+ QualType ToPointeeType = Importer.Import(T->getPointeeType());
+ if (ToPointeeType.isNull())
+ return QualType();
+
+ return Importer.getToContext().getObjCObjectPointerType(ToPointeeType);
+}
+
+//----------------------------------------------------------------------------
+// Import Declarations
+//----------------------------------------------------------------------------
+bool ASTNodeImporter::ImportDeclParts(NamedDecl *D, DeclContext *&DC,
+ DeclContext *&LexicalDC,
+ DeclarationName &Name,
+ SourceLocation &Loc) {
+ // Import the context of this declaration.
+ DC = Importer.ImportContext(D->getDeclContext());
+ if (!DC)
+ return true;
+
+ LexicalDC = DC;
+ if (D->getDeclContext() != D->getLexicalDeclContext()) {
+ LexicalDC = Importer.ImportContext(D->getLexicalDeclContext());
+ if (!LexicalDC)
+ return true;
+ }
+
+ // Import the name of this declaration.
+ Name = Importer.Import(D->getDeclName());
+ if (D->getDeclName() && !Name)
+ return true;
+
+ // Import the location of this declaration.
+ Loc = Importer.Import(D->getLocation());
+ return false;
+}
+
+void ASTNodeImporter::ImportDefinitionIfNeeded(Decl *FromD, Decl *ToD) {
+ if (!FromD)
+ return;
+
+ if (!ToD) {
+ ToD = Importer.Import(FromD);
+ if (!ToD)
+ return;
+ }
+
+ if (RecordDecl *FromRecord = dyn_cast<RecordDecl>(FromD)) {
+ if (RecordDecl *ToRecord = cast_or_null<RecordDecl>(ToD)) {
+ if (FromRecord->getDefinition() && !ToRecord->getDefinition()) {
+ ImportDefinition(FromRecord, ToRecord);
+ }
+ }
+ return;
+ }
+
+ if (EnumDecl *FromEnum = dyn_cast<EnumDecl>(FromD)) {
+ if (EnumDecl *ToEnum = cast_or_null<EnumDecl>(ToD)) {
+ if (FromEnum->getDefinition() && !ToEnum->getDefinition()) {
+ ImportDefinition(FromEnum, ToEnum);
+ }
+ }
+ return;
+ }
+}
+
+void
+ASTNodeImporter::ImportDeclarationNameLoc(const DeclarationNameInfo &From,
+ DeclarationNameInfo& To) {
+ // NOTE: To.Name and To.Loc are already imported.
+ // We only have to import To.LocInfo.
+ switch (To.getName().getNameKind()) {
+ case DeclarationName::Identifier:
+ case DeclarationName::ObjCZeroArgSelector:
+ case DeclarationName::ObjCOneArgSelector:
+ case DeclarationName::ObjCMultiArgSelector:
+ case DeclarationName::CXXUsingDirective:
+ return;
+
+ case DeclarationName::CXXOperatorName: {
+ SourceRange Range = From.getCXXOperatorNameRange();
+ To.setCXXOperatorNameRange(Importer.Import(Range));
+ return;
+ }
+ case DeclarationName::CXXLiteralOperatorName: {
+ SourceLocation Loc = From.getCXXLiteralOperatorNameLoc();
+ To.setCXXLiteralOperatorNameLoc(Importer.Import(Loc));
+ return;
+ }
+ case DeclarationName::CXXConstructorName:
+ case DeclarationName::CXXDestructorName:
+ case DeclarationName::CXXConversionFunctionName: {
+ TypeSourceInfo *FromTInfo = From.getNamedTypeInfo();
+ To.setNamedTypeInfo(Importer.Import(FromTInfo));
+ return;
+ }
+ }
+ llvm_unreachable("Unknown name kind.");
+}
+
+void ASTNodeImporter::ImportDeclContext(DeclContext *FromDC, bool ForceImport) {
+ if (Importer.isMinimalImport() && !ForceImport) {
+ Importer.ImportContext(FromDC);
+ return;
+ }
+
+ for (DeclContext::decl_iterator From = FromDC->decls_begin(),
+ FromEnd = FromDC->decls_end();
+ From != FromEnd;
+ ++From)
+ Importer.Import(*From);
+}
+
+bool ASTNodeImporter::ImportDefinition(RecordDecl *From, RecordDecl *To,
+ ImportDefinitionKind Kind) {
+ if (To->getDefinition() || To->isBeingDefined()) {
+ if (Kind == IDK_Everything)
+ ImportDeclContext(From, /*ForceImport=*/true);
+
+ return false;
+ }
+
+ To->startDefinition();
+
+ // Add base classes.
+ if (CXXRecordDecl *ToCXX = dyn_cast<CXXRecordDecl>(To)) {
+ CXXRecordDecl *FromCXX = cast<CXXRecordDecl>(From);
+
+ struct CXXRecordDecl::DefinitionData &ToData = ToCXX->data();
+ struct CXXRecordDecl::DefinitionData &FromData = FromCXX->data();
+ ToData.UserDeclaredConstructor = FromData.UserDeclaredConstructor;
+ ToData.UserDeclaredCopyConstructor = FromData.UserDeclaredCopyConstructor;
+ ToData.UserDeclaredMoveConstructor = FromData.UserDeclaredMoveConstructor;
+ ToData.UserDeclaredCopyAssignment = FromData.UserDeclaredCopyAssignment;
+ ToData.UserDeclaredMoveAssignment = FromData.UserDeclaredMoveAssignment;
+ ToData.UserDeclaredDestructor = FromData.UserDeclaredDestructor;
+ ToData.Aggregate = FromData.Aggregate;
+ ToData.PlainOldData = FromData.PlainOldData;
+ ToData.Empty = FromData.Empty;
+ ToData.Polymorphic = FromData.Polymorphic;
+ ToData.Abstract = FromData.Abstract;
+ ToData.IsStandardLayout = FromData.IsStandardLayout;
+ ToData.HasNoNonEmptyBases = FromData.HasNoNonEmptyBases;
+ ToData.HasPrivateFields = FromData.HasPrivateFields;
+ ToData.HasProtectedFields = FromData.HasProtectedFields;
+ ToData.HasPublicFields = FromData.HasPublicFields;
+ ToData.HasMutableFields = FromData.HasMutableFields;
+ ToData.HasOnlyCMembers = FromData.HasOnlyCMembers;
+ ToData.HasTrivialDefaultConstructor = FromData.HasTrivialDefaultConstructor;
+ ToData.HasConstexprNonCopyMoveConstructor
+ = FromData.HasConstexprNonCopyMoveConstructor;
+ ToData.DefaultedDefaultConstructorIsConstexpr
+ = FromData.DefaultedDefaultConstructorIsConstexpr;
+ ToData.DefaultedCopyConstructorIsConstexpr
+ = FromData.DefaultedCopyConstructorIsConstexpr;
+ ToData.DefaultedMoveConstructorIsConstexpr
+ = FromData.DefaultedMoveConstructorIsConstexpr;
+ ToData.HasConstexprDefaultConstructor
+ = FromData.HasConstexprDefaultConstructor;
+ ToData.HasConstexprCopyConstructor = FromData.HasConstexprCopyConstructor;
+ ToData.HasConstexprMoveConstructor = FromData.HasConstexprMoveConstructor;
+ ToData.HasTrivialCopyConstructor = FromData.HasTrivialCopyConstructor;
+ ToData.HasTrivialMoveConstructor = FromData.HasTrivialMoveConstructor;
+ ToData.HasTrivialCopyAssignment = FromData.HasTrivialCopyAssignment;
+ ToData.HasTrivialMoveAssignment = FromData.HasTrivialMoveAssignment;
+ ToData.HasTrivialDestructor = FromData.HasTrivialDestructor;
+ ToData.HasIrrelevantDestructor = FromData.HasIrrelevantDestructor;
+ ToData.HasNonLiteralTypeFieldsOrBases
+ = FromData.HasNonLiteralTypeFieldsOrBases;
+ // ComputedVisibleConversions not imported.
+ ToData.UserProvidedDefaultConstructor
+ = FromData.UserProvidedDefaultConstructor;
+ ToData.DeclaredDefaultConstructor = FromData.DeclaredDefaultConstructor;
+ ToData.DeclaredCopyConstructor = FromData.DeclaredCopyConstructor;
+ ToData.DeclaredMoveConstructor = FromData.DeclaredMoveConstructor;
+ ToData.DeclaredCopyAssignment = FromData.DeclaredCopyAssignment;
+ ToData.DeclaredMoveAssignment = FromData.DeclaredMoveAssignment;
+ ToData.DeclaredDestructor = FromData.DeclaredDestructor;
+ ToData.FailedImplicitMoveConstructor
+ = FromData.FailedImplicitMoveConstructor;
+ ToData.FailedImplicitMoveAssignment = FromData.FailedImplicitMoveAssignment;
+ ToData.IsLambda = FromData.IsLambda;
+
+ SmallVector<CXXBaseSpecifier *, 4> Bases;
+ for (CXXRecordDecl::base_class_iterator
+ Base1 = FromCXX->bases_begin(),
+ FromBaseEnd = FromCXX->bases_end();
+ Base1 != FromBaseEnd;
+ ++Base1) {
+ QualType T = Importer.Import(Base1->getType());
+ if (T.isNull())
+ return true;
+
+ SourceLocation EllipsisLoc;
+ if (Base1->isPackExpansion())
+ EllipsisLoc = Importer.Import(Base1->getEllipsisLoc());
+
+ // Ensure that we have a definition for the base.
+ ImportDefinitionIfNeeded(Base1->getType()->getAsCXXRecordDecl());
+
+ Bases.push_back(
+ new (Importer.getToContext())
+ CXXBaseSpecifier(Importer.Import(Base1->getSourceRange()),
+ Base1->isVirtual(),
+ Base1->isBaseOfClass(),
+ Base1->getAccessSpecifierAsWritten(),
+ Importer.Import(Base1->getTypeSourceInfo()),
+ EllipsisLoc));
+ }
+ if (!Bases.empty())
+ ToCXX->setBases(Bases.data(), Bases.size());
+ }
+
+ if (shouldForceImportDeclContext(Kind))
+ ImportDeclContext(From, /*ForceImport=*/true);
+
+ To->completeDefinition();
+ return false;
+}
+
+bool ASTNodeImporter::ImportDefinition(EnumDecl *From, EnumDecl *To,
+ ImportDefinitionKind Kind) {
+ if (To->getDefinition() || To->isBeingDefined()) {
+ if (Kind == IDK_Everything)
+ ImportDeclContext(From, /*ForceImport=*/true);
+ return false;
+ }
+
+ To->startDefinition();
+
+ QualType T = Importer.Import(Importer.getFromContext().getTypeDeclType(From));
+ if (T.isNull())
+ return true;
+
+ QualType ToPromotionType = Importer.Import(From->getPromotionType());
+ if (ToPromotionType.isNull())
+ return true;
+
+ if (shouldForceImportDeclContext(Kind))
+ ImportDeclContext(From, /*ForceImport=*/true);
+
+ // FIXME: we might need to merge the number of positive or negative bits
+ // if the enumerator lists don't match.
+ To->completeDefinition(T, ToPromotionType,
+ From->getNumPositiveBits(),
+ From->getNumNegativeBits());
+ return false;
+}
+
+TemplateParameterList *ASTNodeImporter::ImportTemplateParameterList(
+ TemplateParameterList *Params) {
+ SmallVector<NamedDecl *, 4> ToParams;
+ ToParams.reserve(Params->size());
+ for (TemplateParameterList::iterator P = Params->begin(),
+ PEnd = Params->end();
+ P != PEnd; ++P) {
+ Decl *To = Importer.Import(*P);
+ if (!To)
+ return 0;
+
+ ToParams.push_back(cast<NamedDecl>(To));
+ }
+
+ return TemplateParameterList::Create(Importer.getToContext(),
+ Importer.Import(Params->getTemplateLoc()),
+ Importer.Import(Params->getLAngleLoc()),
+ ToParams.data(), ToParams.size(),
+ Importer.Import(Params->getRAngleLoc()));
+}
+
+TemplateArgument
+ASTNodeImporter::ImportTemplateArgument(const TemplateArgument &From) {
+ switch (From.getKind()) {
+ case TemplateArgument::Null:
+ return TemplateArgument();
+
+ case TemplateArgument::Type: {
+ QualType ToType = Importer.Import(From.getAsType());
+ if (ToType.isNull())
+ return TemplateArgument();
+ return TemplateArgument(ToType);
+ }
+
+ case TemplateArgument::Integral: {
+ QualType ToType = Importer.Import(From.getIntegralType());
+ if (ToType.isNull())
+ return TemplateArgument();
+ return TemplateArgument(*From.getAsIntegral(), ToType);
+ }
+
+ case TemplateArgument::Declaration:
+ if (Decl *To = Importer.Import(From.getAsDecl()))
+ return TemplateArgument(To);
+ return TemplateArgument();
+
+ case TemplateArgument::Template: {
+ TemplateName ToTemplate = Importer.Import(From.getAsTemplate());
+ if (ToTemplate.isNull())
+ return TemplateArgument();
+
+ return TemplateArgument(ToTemplate);
+ }
+
+ case TemplateArgument::TemplateExpansion: {
+ TemplateName ToTemplate
+ = Importer.Import(From.getAsTemplateOrTemplatePattern());
+ if (ToTemplate.isNull())
+ return TemplateArgument();
+
+ return TemplateArgument(ToTemplate, From.getNumTemplateExpansions());
+ }
+
+ case TemplateArgument::Expression:
+ if (Expr *ToExpr = Importer.Import(From.getAsExpr()))
+ return TemplateArgument(ToExpr);
+ return TemplateArgument();
+
+ case TemplateArgument::Pack: {
+ SmallVector<TemplateArgument, 2> ToPack;
+ ToPack.reserve(From.pack_size());
+ if (ImportTemplateArguments(From.pack_begin(), From.pack_size(), ToPack))
+ return TemplateArgument();
+
+ TemplateArgument *ToArgs
+ = new (Importer.getToContext()) TemplateArgument[ToPack.size()];
+ std::copy(ToPack.begin(), ToPack.end(), ToArgs);
+ return TemplateArgument(ToArgs, ToPack.size());
+ }
+ }
+
+ llvm_unreachable("Invalid template argument kind");
+}
+
+bool ASTNodeImporter::ImportTemplateArguments(const TemplateArgument *FromArgs,
+ unsigned NumFromArgs,
+ SmallVectorImpl<TemplateArgument> &ToArgs) {
+ for (unsigned I = 0; I != NumFromArgs; ++I) {
+ TemplateArgument To = ImportTemplateArgument(FromArgs[I]);
+ if (To.isNull() && !FromArgs[I].isNull())
+ return true;
+
+ ToArgs.push_back(To);
+ }
+
+ return false;
+}
+
+bool ASTNodeImporter::IsStructuralMatch(RecordDecl *FromRecord,
+ RecordDecl *ToRecord) {
+ StructuralEquivalenceContext Ctx(Importer.getFromContext(),
+ Importer.getToContext(),
+ Importer.getNonEquivalentDecls());
+ return Ctx.IsStructurallyEquivalent(FromRecord, ToRecord);
+}
+
+bool ASTNodeImporter::IsStructuralMatch(EnumDecl *FromEnum, EnumDecl *ToEnum) {
+ StructuralEquivalenceContext Ctx(Importer.getFromContext(),
+ Importer.getToContext(),
+ Importer.getNonEquivalentDecls());
+ return Ctx.IsStructurallyEquivalent(FromEnum, ToEnum);
+}
+
+bool ASTNodeImporter::IsStructuralMatch(ClassTemplateDecl *From,
+ ClassTemplateDecl *To) {
+ StructuralEquivalenceContext Ctx(Importer.getFromContext(),
+ Importer.getToContext(),
+ Importer.getNonEquivalentDecls());
+ return Ctx.IsStructurallyEquivalent(From, To);
+}
+
+Decl *ASTNodeImporter::VisitDecl(Decl *D) {
+ Importer.FromDiag(D->getLocation(), diag::err_unsupported_ast_node)
+ << D->getDeclKindName();
+ return 0;
+}
+
+Decl *ASTNodeImporter::VisitTranslationUnitDecl(TranslationUnitDecl *D) {
+ TranslationUnitDecl *ToD =
+ Importer.getToContext().getTranslationUnitDecl();
+
+ Importer.Imported(D, ToD);
+
+ return ToD;
+}
+
+Decl *ASTNodeImporter::VisitNamespaceDecl(NamespaceDecl *D) {
+ // Import the major distinguishing characteristics of this namespace.
+ DeclContext *DC, *LexicalDC;
+ DeclarationName Name;
+ SourceLocation Loc;
+ if (ImportDeclParts(D, DC, LexicalDC, Name, Loc))
+ return 0;
+
+ NamespaceDecl *MergeWithNamespace = 0;
+ if (!Name) {
+ // This is an anonymous namespace. Adopt an existing anonymous
+ // namespace if we can.
+ // FIXME: Not testable.
+ if (TranslationUnitDecl *TU = dyn_cast<TranslationUnitDecl>(DC))
+ MergeWithNamespace = TU->getAnonymousNamespace();
+ else
+ MergeWithNamespace = cast<NamespaceDecl>(DC)->getAnonymousNamespace();
+ } else {
+ SmallVector<NamedDecl *, 4> ConflictingDecls;
+ llvm::SmallVector<NamedDecl *, 2> FoundDecls;
+ DC->localUncachedLookup(Name, FoundDecls);
+ for (unsigned I = 0, N = FoundDecls.size(); I != N; ++I) {
+ if (!FoundDecls[I]->isInIdentifierNamespace(Decl::IDNS_Namespace))
+ continue;
+
+ if (NamespaceDecl *FoundNS = dyn_cast<NamespaceDecl>(FoundDecls[I])) {
+ MergeWithNamespace = FoundNS;
+ ConflictingDecls.clear();
+ break;
+ }
+
+ ConflictingDecls.push_back(FoundDecls[I]);
+ }
+
+ if (!ConflictingDecls.empty()) {
+ Name = Importer.HandleNameConflict(Name, DC, Decl::IDNS_Namespace,
+ ConflictingDecls.data(),
+ ConflictingDecls.size());
+ }
+ }
+
+ // Create the "to" namespace, if needed.
+ NamespaceDecl *ToNamespace = MergeWithNamespace;
+ if (!ToNamespace) {
+ ToNamespace = NamespaceDecl::Create(Importer.getToContext(), DC,
+ D->isInline(),
+ Importer.Import(D->getLocStart()),
+ Loc, Name.getAsIdentifierInfo(),
+ /*PrevDecl=*/0);
+ ToNamespace->setLexicalDeclContext(LexicalDC);
+ LexicalDC->addDeclInternal(ToNamespace);
+
+ // If this is an anonymous namespace, register it as the anonymous
+ // namespace within its context.
+ if (!Name) {
+ if (TranslationUnitDecl *TU = dyn_cast<TranslationUnitDecl>(DC))
+ TU->setAnonymousNamespace(ToNamespace);
+ else
+ cast<NamespaceDecl>(DC)->setAnonymousNamespace(ToNamespace);
+ }
+ }
+ Importer.Imported(D, ToNamespace);
+
+ ImportDeclContext(D);
+
+ return ToNamespace;
+}
+
+Decl *ASTNodeImporter::VisitTypedefNameDecl(TypedefNameDecl *D, bool IsAlias) {
+ // Import the major distinguishing characteristics of this typedef.
+ DeclContext *DC, *LexicalDC;
+ DeclarationName Name;
+ SourceLocation Loc;
+ if (ImportDeclParts(D, DC, LexicalDC, Name, Loc))
+ return 0;
+
+ // If this typedef is not in block scope, determine whether we've
+ // seen a typedef with the same name (that we can merge with) or any
+ // other entity by that name (which name lookup could conflict with).
+ if (!DC->isFunctionOrMethod()) {
+ SmallVector<NamedDecl *, 4> ConflictingDecls;
+ unsigned IDNS = Decl::IDNS_Ordinary;
+ llvm::SmallVector<NamedDecl *, 2> FoundDecls;
+ DC->localUncachedLookup(Name, FoundDecls);
+ for (unsigned I = 0, N = FoundDecls.size(); I != N; ++I) {
+ if (!FoundDecls[I]->isInIdentifierNamespace(IDNS))
+ continue;
+ if (TypedefNameDecl *FoundTypedef =
+ dyn_cast<TypedefNameDecl>(FoundDecls[I])) {
+ if (Importer.IsStructurallyEquivalent(D->getUnderlyingType(),
+ FoundTypedef->getUnderlyingType()))
+ return Importer.Imported(D, FoundTypedef);
+ }
+
+ ConflictingDecls.push_back(FoundDecls[I]);
+ }
+
+ if (!ConflictingDecls.empty()) {
+ Name = Importer.HandleNameConflict(Name, DC, IDNS,
+ ConflictingDecls.data(),
+ ConflictingDecls.size());
+ if (!Name)
+ return 0;
+ }
+ }
+
+ // Import the underlying type of this typedef;
+ QualType T = Importer.Import(D->getUnderlyingType());
+ if (T.isNull())
+ return 0;
+
+ // Create the new typedef node.
+ TypeSourceInfo *TInfo = Importer.Import(D->getTypeSourceInfo());
+ SourceLocation StartL = Importer.Import(D->getLocStart());
+ TypedefNameDecl *ToTypedef;
+ if (IsAlias)
+ ToTypedef = TypeAliasDecl::Create(Importer.getToContext(), DC,
+ StartL, Loc,
+ Name.getAsIdentifierInfo(),
+ TInfo);
+ else
+ ToTypedef = TypedefDecl::Create(Importer.getToContext(), DC,
+ StartL, Loc,
+ Name.getAsIdentifierInfo(),
+ TInfo);
+
+ ToTypedef->setAccess(D->getAccess());
+ ToTypedef->setLexicalDeclContext(LexicalDC);
+ Importer.Imported(D, ToTypedef);
+ LexicalDC->addDeclInternal(ToTypedef);
+
+ return ToTypedef;
+}
+
+Decl *ASTNodeImporter::VisitTypedefDecl(TypedefDecl *D) {
+ return VisitTypedefNameDecl(D, /*IsAlias=*/false);
+}
+
+Decl *ASTNodeImporter::VisitTypeAliasDecl(TypeAliasDecl *D) {
+ return VisitTypedefNameDecl(D, /*IsAlias=*/true);
+}
+
+Decl *ASTNodeImporter::VisitEnumDecl(EnumDecl *D) {
+ // Import the major distinguishing characteristics of this enum.
+ DeclContext *DC, *LexicalDC;
+ DeclarationName Name;
+ SourceLocation Loc;
+ if (ImportDeclParts(D, DC, LexicalDC, Name, Loc))
+ return 0;
+
+ // Figure out what enum name we're looking for.
+ unsigned IDNS = Decl::IDNS_Tag;
+ DeclarationName SearchName = Name;
+ if (!SearchName && D->getTypedefNameForAnonDecl()) {
+ SearchName = Importer.Import(D->getTypedefNameForAnonDecl()->getDeclName());
+ IDNS = Decl::IDNS_Ordinary;
+ } else if (Importer.getToContext().getLangOpts().CPlusPlus)
+ IDNS |= Decl::IDNS_Ordinary;
+
+ // We may already have an enum of the same name; try to find and match it.
+ if (!DC->isFunctionOrMethod() && SearchName) {
+ SmallVector<NamedDecl *, 4> ConflictingDecls;
+ llvm::SmallVector<NamedDecl *, 2> FoundDecls;
+ DC->localUncachedLookup(SearchName, FoundDecls);
+ for (unsigned I = 0, N = FoundDecls.size(); I != N; ++I) {
+ if (!FoundDecls[I]->isInIdentifierNamespace(IDNS))
+ continue;
+
+ Decl *Found = FoundDecls[I];
+ if (TypedefNameDecl *Typedef = dyn_cast<TypedefNameDecl>(Found)) {
+ if (const TagType *Tag = Typedef->getUnderlyingType()->getAs<TagType>())
+ Found = Tag->getDecl();
+ }
+
+ if (EnumDecl *FoundEnum = dyn_cast<EnumDecl>(Found)) {
+ if (IsStructuralMatch(D, FoundEnum))
+ return Importer.Imported(D, FoundEnum);
+ }
+
+ ConflictingDecls.push_back(FoundDecls[I]);
+ }
+
+ if (!ConflictingDecls.empty()) {
+ Name = Importer.HandleNameConflict(Name, DC, IDNS,
+ ConflictingDecls.data(),
+ ConflictingDecls.size());
+ }
+ }
+
+ // Create the enum declaration.
+ EnumDecl *D2 = EnumDecl::Create(Importer.getToContext(), DC,
+ Importer.Import(D->getLocStart()),
+ Loc, Name.getAsIdentifierInfo(), 0,
+ D->isScoped(), D->isScopedUsingClassTag(),
+ D->isFixed());
+ // Import the qualifier, if any.
+ D2->setQualifierInfo(Importer.Import(D->getQualifierLoc()));
+ D2->setAccess(D->getAccess());
+ D2->setLexicalDeclContext(LexicalDC);
+ Importer.Imported(D, D2);
+ LexicalDC->addDeclInternal(D2);
+
+ // Import the integer type.
+ QualType ToIntegerType = Importer.Import(D->getIntegerType());
+ if (ToIntegerType.isNull())
+ return 0;
+ D2->setIntegerType(ToIntegerType);
+
+ // Import the definition
+ if (D->isCompleteDefinition() && ImportDefinition(D, D2))
+ return 0;
+
+ return D2;
+}
+
+Decl *ASTNodeImporter::VisitRecordDecl(RecordDecl *D) {
+ // If this record has a definition in the translation unit we're coming from,
+ // but this particular declaration is not that definition, import the
+ // definition and map to that.
+ TagDecl *Definition = D->getDefinition();
+ if (Definition && Definition != D) {
+ Decl *ImportedDef = Importer.Import(Definition);
+ if (!ImportedDef)
+ return 0;
+
+ return Importer.Imported(D, ImportedDef);
+ }
+
+ // Import the major distinguishing characteristics of this record.
+ DeclContext *DC, *LexicalDC;
+ DeclarationName Name;
+ SourceLocation Loc;
+ if (ImportDeclParts(D, DC, LexicalDC, Name, Loc))
+ return 0;
+
+ // Figure out what structure name we're looking for.
+ unsigned IDNS = Decl::IDNS_Tag;
+ DeclarationName SearchName = Name;
+ if (!SearchName && D->getTypedefNameForAnonDecl()) {
+ SearchName = Importer.Import(D->getTypedefNameForAnonDecl()->getDeclName());
+ IDNS = Decl::IDNS_Ordinary;
+ } else if (Importer.getToContext().getLangOpts().CPlusPlus)
+ IDNS |= Decl::IDNS_Ordinary;
+
+ // We may already have a record of the same name; try to find and match it.
+ RecordDecl *AdoptDecl = 0;
+ if (!DC->isFunctionOrMethod() && SearchName) {
+ SmallVector<NamedDecl *, 4> ConflictingDecls;
+ llvm::SmallVector<NamedDecl *, 2> FoundDecls;
+ DC->localUncachedLookup(SearchName, FoundDecls);
+ for (unsigned I = 0, N = FoundDecls.size(); I != N; ++I) {
+ if (!FoundDecls[I]->isInIdentifierNamespace(IDNS))
+ continue;
+
+ Decl *Found = FoundDecls[I];
+ if (TypedefNameDecl *Typedef = dyn_cast<TypedefNameDecl>(Found)) {
+ if (const TagType *Tag = Typedef->getUnderlyingType()->getAs<TagType>())
+ Found = Tag->getDecl();
+ }
+
+ if (RecordDecl *FoundRecord = dyn_cast<RecordDecl>(Found)) {
+ if (RecordDecl *FoundDef = FoundRecord->getDefinition()) {
+ if (!D->isCompleteDefinition() || IsStructuralMatch(D, FoundDef)) {
+ // The record types structurally match, or the "from" translation
+ // unit only had a forward declaration anyway; call it the same
+ // function.
+ // FIXME: For C++, we should also merge methods here.
+ return Importer.Imported(D, FoundDef);
+ }
+ } else {
+ // We have a forward declaration of this type, so adopt that forward
+ // declaration rather than building a new one.
+ AdoptDecl = FoundRecord;
+ continue;
+ }
+ }
+
+ ConflictingDecls.push_back(FoundDecls[I]);
+ }
+
+ if (!ConflictingDecls.empty()) {
+ Name = Importer.HandleNameConflict(Name, DC, IDNS,
+ ConflictingDecls.data(),
+ ConflictingDecls.size());
+ }
+ }
+
+ // Create the record declaration.
+ RecordDecl *D2 = AdoptDecl;
+ SourceLocation StartLoc = Importer.Import(D->getLocStart());
+ if (!D2) {
+ if (isa<CXXRecordDecl>(D)) {
+ CXXRecordDecl *D2CXX = CXXRecordDecl::Create(Importer.getToContext(),
+ D->getTagKind(),
+ DC, StartLoc, Loc,
+ Name.getAsIdentifierInfo());
+ D2 = D2CXX;
+ D2->setAccess(D->getAccess());
+ } else {
+ D2 = RecordDecl::Create(Importer.getToContext(), D->getTagKind(),
+ DC, StartLoc, Loc, Name.getAsIdentifierInfo());
+ }
+
+ D2->setQualifierInfo(Importer.Import(D->getQualifierLoc()));
+ D2->setLexicalDeclContext(LexicalDC);
+ LexicalDC->addDeclInternal(D2);
+ }
+
+ Importer.Imported(D, D2);
+
+ if (D->isCompleteDefinition() && ImportDefinition(D, D2, IDK_Default))
+ return 0;
+
+ return D2;
+}
+
+Decl *ASTNodeImporter::VisitEnumConstantDecl(EnumConstantDecl *D) {
+ // Import the major distinguishing characteristics of this enumerator.
+ DeclContext *DC, *LexicalDC;
+ DeclarationName Name;
+ SourceLocation Loc;
+ if (ImportDeclParts(D, DC, LexicalDC, Name, Loc))
+ return 0;
+
+ QualType T = Importer.Import(D->getType());
+ if (T.isNull())
+ return 0;
+
+ // Determine whether there are any other declarations with the same name and
+ // in the same context.
+ if (!LexicalDC->isFunctionOrMethod()) {
+ SmallVector<NamedDecl *, 4> ConflictingDecls;
+ unsigned IDNS = Decl::IDNS_Ordinary;
+ llvm::SmallVector<NamedDecl *, 2> FoundDecls;
+ DC->localUncachedLookup(Name, FoundDecls);
+ for (unsigned I = 0, N = FoundDecls.size(); I != N; ++I) {
+ if (!FoundDecls[I]->isInIdentifierNamespace(IDNS))
+ continue;
+
+ ConflictingDecls.push_back(FoundDecls[I]);
+ }
+
+ if (!ConflictingDecls.empty()) {
+ Name = Importer.HandleNameConflict(Name, DC, IDNS,
+ ConflictingDecls.data(),
+ ConflictingDecls.size());
+ if (!Name)
+ return 0;
+ }
+ }
+
+ Expr *Init = Importer.Import(D->getInitExpr());
+ if (D->getInitExpr() && !Init)
+ return 0;
+
+ EnumConstantDecl *ToEnumerator
+ = EnumConstantDecl::Create(Importer.getToContext(), cast<EnumDecl>(DC), Loc,
+ Name.getAsIdentifierInfo(), T,
+ Init, D->getInitVal());
+ ToEnumerator->setAccess(D->getAccess());
+ ToEnumerator->setLexicalDeclContext(LexicalDC);
+ Importer.Imported(D, ToEnumerator);
+ LexicalDC->addDeclInternal(ToEnumerator);
+ return ToEnumerator;
+}
+
+Decl *ASTNodeImporter::VisitFunctionDecl(FunctionDecl *D) {
+ // Import the major distinguishing characteristics of this function.
+ DeclContext *DC, *LexicalDC;
+ DeclarationName Name;
+ SourceLocation Loc;
+ if (ImportDeclParts(D, DC, LexicalDC, Name, Loc))
+ return 0;
+
+ // Try to find a function in our own ("to") context with the same name, same
+ // type, and in the same context as the function we're importing.
+ if (!LexicalDC->isFunctionOrMethod()) {
+ SmallVector<NamedDecl *, 4> ConflictingDecls;
+ unsigned IDNS = Decl::IDNS_Ordinary;
+ llvm::SmallVector<NamedDecl *, 2> FoundDecls;
+ DC->localUncachedLookup(Name, FoundDecls);
+ for (unsigned I = 0, N = FoundDecls.size(); I != N; ++I) {
+ if (!FoundDecls[I]->isInIdentifierNamespace(IDNS))
+ continue;
+
+ if (FunctionDecl *FoundFunction = dyn_cast<FunctionDecl>(FoundDecls[I])) {
+ if (isExternalLinkage(FoundFunction->getLinkage()) &&
+ isExternalLinkage(D->getLinkage())) {
+ if (Importer.IsStructurallyEquivalent(D->getType(),
+ FoundFunction->getType())) {
+ // FIXME: Actually try to merge the body and other attributes.
+ return Importer.Imported(D, FoundFunction);
+ }
+
+ // FIXME: Check for overloading more carefully, e.g., by boosting
+ // Sema::IsOverload out to the AST library.
+
+ // Function overloading is okay in C++.
+ if (Importer.getToContext().getLangOpts().CPlusPlus)
+ continue;
+
+ // Complain about inconsistent function types.
+ Importer.ToDiag(Loc, diag::err_odr_function_type_inconsistent)
+ << Name << D->getType() << FoundFunction->getType();
+ Importer.ToDiag(FoundFunction->getLocation(),
+ diag::note_odr_value_here)
+ << FoundFunction->getType();
+ }
+ }
+
+ ConflictingDecls.push_back(FoundDecls[I]);
+ }
+
+ if (!ConflictingDecls.empty()) {
+ Name = Importer.HandleNameConflict(Name, DC, IDNS,
+ ConflictingDecls.data(),
+ ConflictingDecls.size());
+ if (!Name)
+ return 0;
+ }
+ }
+
+ DeclarationNameInfo NameInfo(Name, Loc);
+ // Import additional name location/type info.
+ ImportDeclarationNameLoc(D->getNameInfo(), NameInfo);
+
+ // Import the type.
+ QualType T = Importer.Import(D->getType());
+ if (T.isNull())
+ return 0;
+
+ // Import the function parameters.
+ SmallVector<ParmVarDecl *, 8> Parameters;
+ for (FunctionDecl::param_iterator P = D->param_begin(), PEnd = D->param_end();
+ P != PEnd; ++P) {
+ ParmVarDecl *ToP = cast_or_null<ParmVarDecl>(Importer.Import(*P));
+ if (!ToP)
+ return 0;
+
+ Parameters.push_back(ToP);
+ }
+
+ // Create the imported function.
+ TypeSourceInfo *TInfo = Importer.Import(D->getTypeSourceInfo());
+ FunctionDecl *ToFunction = 0;
+ if (CXXConstructorDecl *FromConstructor = dyn_cast<CXXConstructorDecl>(D)) {
+ ToFunction = CXXConstructorDecl::Create(Importer.getToContext(),
+ cast<CXXRecordDecl>(DC),
+ D->getInnerLocStart(),
+ NameInfo, T, TInfo,
+ FromConstructor->isExplicit(),
+ D->isInlineSpecified(),
+ D->isImplicit(),
+ D->isConstexpr());
+ } else if (isa<CXXDestructorDecl>(D)) {
+ ToFunction = CXXDestructorDecl::Create(Importer.getToContext(),
+ cast<CXXRecordDecl>(DC),
+ D->getInnerLocStart(),
+ NameInfo, T, TInfo,
+ D->isInlineSpecified(),
+ D->isImplicit());
+ } else if (CXXConversionDecl *FromConversion
+ = dyn_cast<CXXConversionDecl>(D)) {
+ ToFunction = CXXConversionDecl::Create(Importer.getToContext(),
+ cast<CXXRecordDecl>(DC),
+ D->getInnerLocStart(),
+ NameInfo, T, TInfo,
+ D->isInlineSpecified(),
+ FromConversion->isExplicit(),
+ D->isConstexpr(),
+ Importer.Import(D->getLocEnd()));
+ } else if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(D)) {
+ ToFunction = CXXMethodDecl::Create(Importer.getToContext(),
+ cast<CXXRecordDecl>(DC),
+ D->getInnerLocStart(),
+ NameInfo, T, TInfo,
+ Method->isStatic(),
+ Method->getStorageClassAsWritten(),
+ Method->isInlineSpecified(),
+ D->isConstexpr(),
+ Importer.Import(D->getLocEnd()));
+ } else {
+ ToFunction = FunctionDecl::Create(Importer.getToContext(), DC,
+ D->getInnerLocStart(),
+ NameInfo, T, TInfo, D->getStorageClass(),
+ D->getStorageClassAsWritten(),
+ D->isInlineSpecified(),
+ D->hasWrittenPrototype(),
+ D->isConstexpr());
+ }
+
+ // Import the qualifier, if any.
+ ToFunction->setQualifierInfo(Importer.Import(D->getQualifierLoc()));
+ ToFunction->setAccess(D->getAccess());
+ ToFunction->setLexicalDeclContext(LexicalDC);
+ ToFunction->setVirtualAsWritten(D->isVirtualAsWritten());
+ ToFunction->setTrivial(D->isTrivial());
+ ToFunction->setPure(D->isPure());
+ Importer.Imported(D, ToFunction);
+
+ // Set the parameters.
+ for (unsigned I = 0, N = Parameters.size(); I != N; ++I) {
+ Parameters[I]->setOwningFunction(ToFunction);
+ ToFunction->addDeclInternal(Parameters[I]);
+ }
+ ToFunction->setParams(Parameters);
+
+ // FIXME: Other bits to merge?
+
+ // Add this function to the lexical context.
+ LexicalDC->addDeclInternal(ToFunction);
+
+ return ToFunction;
+}
+
+Decl *ASTNodeImporter::VisitCXXMethodDecl(CXXMethodDecl *D) {
+ return VisitFunctionDecl(D);
+}
+
+Decl *ASTNodeImporter::VisitCXXConstructorDecl(CXXConstructorDecl *D) {
+ return VisitCXXMethodDecl(D);
+}
+
+Decl *ASTNodeImporter::VisitCXXDestructorDecl(CXXDestructorDecl *D) {
+ return VisitCXXMethodDecl(D);
+}
+
+Decl *ASTNodeImporter::VisitCXXConversionDecl(CXXConversionDecl *D) {
+ return VisitCXXMethodDecl(D);
+}
+
+Decl *ASTNodeImporter::VisitFieldDecl(FieldDecl *D) {
+ // Import the major distinguishing characteristics of a variable.
+ DeclContext *DC, *LexicalDC;
+ DeclarationName Name;
+ SourceLocation Loc;
+ if (ImportDeclParts(D, DC, LexicalDC, Name, Loc))
+ return 0;
+
+ // Determine whether we've already imported this field.
+ llvm::SmallVector<NamedDecl *, 2> FoundDecls;
+ DC->localUncachedLookup(Name, FoundDecls);
+ for (unsigned I = 0, N = FoundDecls.size(); I != N; ++I) {
+ if (FieldDecl *FoundField = dyn_cast<FieldDecl>(FoundDecls[I])) {
+ if (Importer.IsStructurallyEquivalent(D->getType(),
+ FoundField->getType())) {
+ Importer.Imported(D, FoundField);
+ return FoundField;
+ }
+
+ Importer.ToDiag(Loc, diag::err_odr_field_type_inconsistent)
+ << Name << D->getType() << FoundField->getType();
+ Importer.ToDiag(FoundField->getLocation(), diag::note_odr_value_here)
+ << FoundField->getType();
+ return 0;
+ }
+ }
+
+ // Import the type.
+ QualType T = Importer.Import(D->getType());
+ if (T.isNull())
+ return 0;
+
+ TypeSourceInfo *TInfo = Importer.Import(D->getTypeSourceInfo());
+ Expr *BitWidth = Importer.Import(D->getBitWidth());
+ if (!BitWidth && D->getBitWidth())
+ return 0;
+
+ FieldDecl *ToField = FieldDecl::Create(Importer.getToContext(), DC,
+ Importer.Import(D->getInnerLocStart()),
+ Loc, Name.getAsIdentifierInfo(),
+ T, TInfo, BitWidth, D->isMutable(),
+ D->hasInClassInitializer());
+ ToField->setAccess(D->getAccess());
+ ToField->setLexicalDeclContext(LexicalDC);
+ if (ToField->hasInClassInitializer())
+ ToField->setInClassInitializer(D->getInClassInitializer());
+ Importer.Imported(D, ToField);
+ LexicalDC->addDeclInternal(ToField);
+ return ToField;
+}
+
+Decl *ASTNodeImporter::VisitIndirectFieldDecl(IndirectFieldDecl *D) {
+ // Import the major distinguishing characteristics of a variable.
+ DeclContext *DC, *LexicalDC;
+ DeclarationName Name;
+ SourceLocation Loc;
+ if (ImportDeclParts(D, DC, LexicalDC, Name, Loc))
+ return 0;
+
+ // Determine whether we've already imported this field.
+ llvm::SmallVector<NamedDecl *, 2> FoundDecls;
+ DC->localUncachedLookup(Name, FoundDecls);
+ for (unsigned I = 0, N = FoundDecls.size(); I != N; ++I) {
+ if (IndirectFieldDecl *FoundField
+ = dyn_cast<IndirectFieldDecl>(FoundDecls[I])) {
+ if (Importer.IsStructurallyEquivalent(D->getType(),
+ FoundField->getType())) {
+ Importer.Imported(D, FoundField);
+ return FoundField;
+ }
+
+ Importer.ToDiag(Loc, diag::err_odr_field_type_inconsistent)
+ << Name << D->getType() << FoundField->getType();
+ Importer.ToDiag(FoundField->getLocation(), diag::note_odr_value_here)
+ << FoundField->getType();
+ return 0;
+ }
+ }
+
+ // Import the type.
+ QualType T = Importer.Import(D->getType());
+ if (T.isNull())
+ return 0;
+
+ NamedDecl **NamedChain =
+ new (Importer.getToContext())NamedDecl*[D->getChainingSize()];
+
+ unsigned i = 0;
+ for (IndirectFieldDecl::chain_iterator PI = D->chain_begin(),
+ PE = D->chain_end(); PI != PE; ++PI) {
+ Decl* D = Importer.Import(*PI);
+ if (!D)
+ return 0;
+ NamedChain[i++] = cast<NamedDecl>(D);
+ }
+
+ IndirectFieldDecl *ToIndirectField = IndirectFieldDecl::Create(
+ Importer.getToContext(), DC,
+ Loc, Name.getAsIdentifierInfo(), T,
+ NamedChain, D->getChainingSize());
+ ToIndirectField->setAccess(D->getAccess());
+ ToIndirectField->setLexicalDeclContext(LexicalDC);
+ Importer.Imported(D, ToIndirectField);
+ LexicalDC->addDeclInternal(ToIndirectField);
+ return ToIndirectField;
+}
+
+Decl *ASTNodeImporter::VisitObjCIvarDecl(ObjCIvarDecl *D) {
+ // Import the major distinguishing characteristics of an ivar.
+ DeclContext *DC, *LexicalDC;
+ DeclarationName Name;
+ SourceLocation Loc;
+ if (ImportDeclParts(D, DC, LexicalDC, Name, Loc))
+ return 0;
+
+ // Determine whether we've already imported this ivar
+ llvm::SmallVector<NamedDecl *, 2> FoundDecls;
+ DC->localUncachedLookup(Name, FoundDecls);
+ for (unsigned I = 0, N = FoundDecls.size(); I != N; ++I) {
+ if (ObjCIvarDecl *FoundIvar = dyn_cast<ObjCIvarDecl>(FoundDecls[I])) {
+ if (Importer.IsStructurallyEquivalent(D->getType(),
+ FoundIvar->getType())) {
+ Importer.Imported(D, FoundIvar);
+ return FoundIvar;
+ }
+
+ Importer.ToDiag(Loc, diag::err_odr_ivar_type_inconsistent)
+ << Name << D->getType() << FoundIvar->getType();
+ Importer.ToDiag(FoundIvar->getLocation(), diag::note_odr_value_here)
+ << FoundIvar->getType();
+ return 0;
+ }
+ }
+
+ // Import the type.
+ QualType T = Importer.Import(D->getType());
+ if (T.isNull())
+ return 0;
+
+ TypeSourceInfo *TInfo = Importer.Import(D->getTypeSourceInfo());
+ Expr *BitWidth = Importer.Import(D->getBitWidth());
+ if (!BitWidth && D->getBitWidth())
+ return 0;
+
+ ObjCIvarDecl *ToIvar = ObjCIvarDecl::Create(Importer.getToContext(),
+ cast<ObjCContainerDecl>(DC),
+ Importer.Import(D->getInnerLocStart()),
+ Loc, Name.getAsIdentifierInfo(),
+ T, TInfo, D->getAccessControl(),
+ BitWidth, D->getSynthesize());
+ ToIvar->setLexicalDeclContext(LexicalDC);
+ Importer.Imported(D, ToIvar);
+ LexicalDC->addDeclInternal(ToIvar);
+ return ToIvar;
+
+}
+
+Decl *ASTNodeImporter::VisitVarDecl(VarDecl *D) {
+ // Import the major distinguishing characteristics of a variable.
+ DeclContext *DC, *LexicalDC;
+ DeclarationName Name;
+ SourceLocation Loc;
+ if (ImportDeclParts(D, DC, LexicalDC, Name, Loc))
+ return 0;
+
+ // Try to find a variable in our own ("to") context with the same name and
+ // in the same context as the variable we're importing.
+ if (D->isFileVarDecl()) {
+ VarDecl *MergeWithVar = 0;
+ SmallVector<NamedDecl *, 4> ConflictingDecls;
+ unsigned IDNS = Decl::IDNS_Ordinary;
+ llvm::SmallVector<NamedDecl *, 2> FoundDecls;
+ DC->localUncachedLookup(Name, FoundDecls);
+ for (unsigned I = 0, N = FoundDecls.size(); I != N; ++I) {
+ if (!FoundDecls[I]->isInIdentifierNamespace(IDNS))
+ continue;
+
+ if (VarDecl *FoundVar = dyn_cast<VarDecl>(FoundDecls[I])) {
+ // We have found a variable that we may need to merge with. Check it.
+ if (isExternalLinkage(FoundVar->getLinkage()) &&
+ isExternalLinkage(D->getLinkage())) {
+ if (Importer.IsStructurallyEquivalent(D->getType(),
+ FoundVar->getType())) {
+ MergeWithVar = FoundVar;
+ break;
+ }
+
+ const ArrayType *FoundArray
+ = Importer.getToContext().getAsArrayType(FoundVar->getType());
+ const ArrayType *TArray
+ = Importer.getToContext().getAsArrayType(D->getType());
+ if (FoundArray && TArray) {
+ if (isa<IncompleteArrayType>(FoundArray) &&
+ isa<ConstantArrayType>(TArray)) {
+ // Import the type.
+ QualType T = Importer.Import(D->getType());
+ if (T.isNull())
+ return 0;
+
+ FoundVar->setType(T);
+ MergeWithVar = FoundVar;
+ break;
+ } else if (isa<IncompleteArrayType>(TArray) &&
+ isa<ConstantArrayType>(FoundArray)) {
+ MergeWithVar = FoundVar;
+ break;
+ }
+ }
+
+ Importer.ToDiag(Loc, diag::err_odr_variable_type_inconsistent)
+ << Name << D->getType() << FoundVar->getType();
+ Importer.ToDiag(FoundVar->getLocation(), diag::note_odr_value_here)
+ << FoundVar->getType();
+ }
+ }
+
+ ConflictingDecls.push_back(FoundDecls[I]);
+ }
+
+ if (MergeWithVar) {
+ // An equivalent variable with external linkage has been found. Link
+ // the two declarations, then merge them.
+ Importer.Imported(D, MergeWithVar);
+
+ if (VarDecl *DDef = D->getDefinition()) {
+ if (VarDecl *ExistingDef = MergeWithVar->getDefinition()) {
+ Importer.ToDiag(ExistingDef->getLocation(),
+ diag::err_odr_variable_multiple_def)
+ << Name;
+ Importer.FromDiag(DDef->getLocation(), diag::note_odr_defined_here);
+ } else {
+ Expr *Init = Importer.Import(DDef->getInit());
+ MergeWithVar->setInit(Init);
+ if (DDef->isInitKnownICE()) {
+ EvaluatedStmt *Eval = MergeWithVar->ensureEvaluatedStmt();
+ Eval->CheckedICE = true;
+ Eval->IsICE = DDef->isInitICE();
+ }
+ }
+ }
+
+ return MergeWithVar;
+ }
+
+ if (!ConflictingDecls.empty()) {
+ Name = Importer.HandleNameConflict(Name, DC, IDNS,
+ ConflictingDecls.data(),
+ ConflictingDecls.size());
+ if (!Name)
+ return 0;
+ }
+ }
+
+ // Import the type.
+ QualType T = Importer.Import(D->getType());
+ if (T.isNull())
+ return 0;
+
+ // Create the imported variable.
+ TypeSourceInfo *TInfo = Importer.Import(D->getTypeSourceInfo());
+ VarDecl *ToVar = VarDecl::Create(Importer.getToContext(), DC,
+ Importer.Import(D->getInnerLocStart()),
+ Loc, Name.getAsIdentifierInfo(),
+ T, TInfo,
+ D->getStorageClass(),
+ D->getStorageClassAsWritten());
+ ToVar->setQualifierInfo(Importer.Import(D->getQualifierLoc()));
+ ToVar->setAccess(D->getAccess());
+ ToVar->setLexicalDeclContext(LexicalDC);
+ Importer.Imported(D, ToVar);
+ LexicalDC->addDeclInternal(ToVar);
+
+ // Merge the initializer.
+ // FIXME: Can we really import any initializer? Alternatively, we could force
+ // ourselves to import every declaration of a variable and then only use
+ // getInit() here.
+ ToVar->setInit(Importer.Import(const_cast<Expr *>(D->getAnyInitializer())));
+
+ // FIXME: Other bits to merge?
+
+ return ToVar;
+}
+
+Decl *ASTNodeImporter::VisitImplicitParamDecl(ImplicitParamDecl *D) {
+ // Parameters are created in the translation unit's context, then moved
+ // into the function declaration's context afterward.
+ DeclContext *DC = Importer.getToContext().getTranslationUnitDecl();
+
+ // Import the name of this declaration.
+ DeclarationName Name = Importer.Import(D->getDeclName());
+ if (D->getDeclName() && !Name)
+ return 0;
+
+ // Import the location of this declaration.
+ SourceLocation Loc = Importer.Import(D->getLocation());
+
+ // Import the parameter's type.
+ QualType T = Importer.Import(D->getType());
+ if (T.isNull())
+ return 0;
+
+ // Create the imported parameter.
+ ImplicitParamDecl *ToParm
+ = ImplicitParamDecl::Create(Importer.getToContext(), DC,
+ Loc, Name.getAsIdentifierInfo(),
+ T);
+ return Importer.Imported(D, ToParm);
+}
+
+Decl *ASTNodeImporter::VisitParmVarDecl(ParmVarDecl *D) {
+ // Parameters are created in the translation unit's context, then moved
+ // into the function declaration's context afterward.
+ DeclContext *DC = Importer.getToContext().getTranslationUnitDecl();
+
+ // Import the name of this declaration.
+ DeclarationName Name = Importer.Import(D->getDeclName());
+ if (D->getDeclName() && !Name)
+ return 0;
+
+ // Import the location of this declaration.
+ SourceLocation Loc = Importer.Import(D->getLocation());
+
+ // Import the parameter's type.
+ QualType T = Importer.Import(D->getType());
+ if (T.isNull())
+ return 0;
+
+ // Create the imported parameter.
+ TypeSourceInfo *TInfo = Importer.Import(D->getTypeSourceInfo());
+ ParmVarDecl *ToParm = ParmVarDecl::Create(Importer.getToContext(), DC,
+ Importer.Import(D->getInnerLocStart()),
+ Loc, Name.getAsIdentifierInfo(),
+ T, TInfo, D->getStorageClass(),
+ D->getStorageClassAsWritten(),
+ /*FIXME: Default argument*/ 0);
+ ToParm->setHasInheritedDefaultArg(D->hasInheritedDefaultArg());
+ return Importer.Imported(D, ToParm);
+}
+
+Decl *ASTNodeImporter::VisitObjCMethodDecl(ObjCMethodDecl *D) {
+ // Import the major distinguishing characteristics of a method.
+ DeclContext *DC, *LexicalDC;
+ DeclarationName Name;
+ SourceLocation Loc;
+ if (ImportDeclParts(D, DC, LexicalDC, Name, Loc))
+ return 0;
+
+ llvm::SmallVector<NamedDecl *, 2> FoundDecls;
+ DC->localUncachedLookup(Name, FoundDecls);
+ for (unsigned I = 0, N = FoundDecls.size(); I != N; ++I) {
+ if (ObjCMethodDecl *FoundMethod = dyn_cast<ObjCMethodDecl>(FoundDecls[I])) {
+ if (FoundMethod->isInstanceMethod() != D->isInstanceMethod())
+ continue;
+
+ // Check return types.
+ if (!Importer.IsStructurallyEquivalent(D->getResultType(),
+ FoundMethod->getResultType())) {
+ Importer.ToDiag(Loc, diag::err_odr_objc_method_result_type_inconsistent)
+ << D->isInstanceMethod() << Name
+ << D->getResultType() << FoundMethod->getResultType();
+ Importer.ToDiag(FoundMethod->getLocation(),
+ diag::note_odr_objc_method_here)
+ << D->isInstanceMethod() << Name;
+ return 0;
+ }
+
+ // Check the number of parameters.
+ if (D->param_size() != FoundMethod->param_size()) {
+ Importer.ToDiag(Loc, diag::err_odr_objc_method_num_params_inconsistent)
+ << D->isInstanceMethod() << Name
+ << D->param_size() << FoundMethod->param_size();
+ Importer.ToDiag(FoundMethod->getLocation(),
+ diag::note_odr_objc_method_here)
+ << D->isInstanceMethod() << Name;
+ return 0;
+ }
+
+ // Check parameter types.
+ for (ObjCMethodDecl::param_iterator P = D->param_begin(),
+ PEnd = D->param_end(), FoundP = FoundMethod->param_begin();
+ P != PEnd; ++P, ++FoundP) {
+ if (!Importer.IsStructurallyEquivalent((*P)->getType(),
+ (*FoundP)->getType())) {
+ Importer.FromDiag((*P)->getLocation(),
+ diag::err_odr_objc_method_param_type_inconsistent)
+ << D->isInstanceMethod() << Name
+ << (*P)->getType() << (*FoundP)->getType();
+ Importer.ToDiag((*FoundP)->getLocation(), diag::note_odr_value_here)
+ << (*FoundP)->getType();
+ return 0;
+ }
+ }
+
+ // Check variadic/non-variadic.
+ // Check the number of parameters.
+ if (D->isVariadic() != FoundMethod->isVariadic()) {
+ Importer.ToDiag(Loc, diag::err_odr_objc_method_variadic_inconsistent)
+ << D->isInstanceMethod() << Name;
+ Importer.ToDiag(FoundMethod->getLocation(),
+ diag::note_odr_objc_method_here)
+ << D->isInstanceMethod() << Name;
+ return 0;
+ }
+
+ // FIXME: Any other bits we need to merge?
+ return Importer.Imported(D, FoundMethod);
+ }
+ }
+
+ // Import the result type.
+ QualType ResultTy = Importer.Import(D->getResultType());
+ if (ResultTy.isNull())
+ return 0;
+
+ TypeSourceInfo *ResultTInfo = Importer.Import(D->getResultTypeSourceInfo());
+
+ ObjCMethodDecl *ToMethod
+ = ObjCMethodDecl::Create(Importer.getToContext(),
+ Loc,
+ Importer.Import(D->getLocEnd()),
+ Name.getObjCSelector(),
+ ResultTy, ResultTInfo, DC,
+ D->isInstanceMethod(),
+ D->isVariadic(),
+ D->isSynthesized(),
+ D->isImplicit(),
+ D->isDefined(),
+ D->getImplementationControl(),
+ D->hasRelatedResultType());
+
+ // FIXME: When we decide to merge method definitions, we'll need to
+ // deal with implicit parameters.
+
+ // Import the parameters
+ SmallVector<ParmVarDecl *, 5> ToParams;
+ for (ObjCMethodDecl::param_iterator FromP = D->param_begin(),
+ FromPEnd = D->param_end();
+ FromP != FromPEnd;
+ ++FromP) {
+ ParmVarDecl *ToP = cast_or_null<ParmVarDecl>(Importer.Import(*FromP));
+ if (!ToP)
+ return 0;
+
+ ToParams.push_back(ToP);
+ }
+
+ // Set the parameters.
+ for (unsigned I = 0, N = ToParams.size(); I != N; ++I) {
+ ToParams[I]->setOwningFunction(ToMethod);
+ ToMethod->addDeclInternal(ToParams[I]);
+ }
+ SmallVector<SourceLocation, 12> SelLocs;
+ D->getSelectorLocs(SelLocs);
+ ToMethod->setMethodParams(Importer.getToContext(), ToParams, SelLocs);
+
+ ToMethod->setLexicalDeclContext(LexicalDC);
+ Importer.Imported(D, ToMethod);
+ LexicalDC->addDeclInternal(ToMethod);
+ return ToMethod;
+}
+
+Decl *ASTNodeImporter::VisitObjCCategoryDecl(ObjCCategoryDecl *D) {
+ // Import the major distinguishing characteristics of a category.
+ DeclContext *DC, *LexicalDC;
+ DeclarationName Name;
+ SourceLocation Loc;
+ if (ImportDeclParts(D, DC, LexicalDC, Name, Loc))
+ return 0;
+
+ ObjCInterfaceDecl *ToInterface
+ = cast_or_null<ObjCInterfaceDecl>(Importer.Import(D->getClassInterface()));
+ if (!ToInterface)
+ return 0;
+
+ // Determine if we've already encountered this category.
+ ObjCCategoryDecl *MergeWithCategory
+ = ToInterface->FindCategoryDeclaration(Name.getAsIdentifierInfo());
+ ObjCCategoryDecl *ToCategory = MergeWithCategory;
+ if (!ToCategory) {
+ ToCategory = ObjCCategoryDecl::Create(Importer.getToContext(), DC,
+ Importer.Import(D->getAtStartLoc()),
+ Loc,
+ Importer.Import(D->getCategoryNameLoc()),
+ Name.getAsIdentifierInfo(),
+ ToInterface,
+ Importer.Import(D->getIvarLBraceLoc()),
+ Importer.Import(D->getIvarRBraceLoc()));
+ ToCategory->setLexicalDeclContext(LexicalDC);
+ LexicalDC->addDeclInternal(ToCategory);
+ Importer.Imported(D, ToCategory);
+
+ // Import protocols
+ SmallVector<ObjCProtocolDecl *, 4> Protocols;
+ SmallVector<SourceLocation, 4> ProtocolLocs;
+ ObjCCategoryDecl::protocol_loc_iterator FromProtoLoc
+ = D->protocol_loc_begin();
+ for (ObjCCategoryDecl::protocol_iterator FromProto = D->protocol_begin(),
+ FromProtoEnd = D->protocol_end();
+ FromProto != FromProtoEnd;
+ ++FromProto, ++FromProtoLoc) {
+ ObjCProtocolDecl *ToProto
+ = cast_or_null<ObjCProtocolDecl>(Importer.Import(*FromProto));
+ if (!ToProto)
+ return 0;
+ Protocols.push_back(ToProto);
+ ProtocolLocs.push_back(Importer.Import(*FromProtoLoc));
+ }
+
+ // FIXME: If we're merging, make sure that the protocol list is the same.
+ ToCategory->setProtocolList(Protocols.data(), Protocols.size(),
+ ProtocolLocs.data(), Importer.getToContext());
+
+ } else {
+ Importer.Imported(D, ToCategory);
+ }
+
+ // Import all of the members of this category.
+ ImportDeclContext(D);
+
+ // If we have an implementation, import it as well.
+ if (D->getImplementation()) {
+ ObjCCategoryImplDecl *Impl
+ = cast_or_null<ObjCCategoryImplDecl>(
+ Importer.Import(D->getImplementation()));
+ if (!Impl)
+ return 0;
+
+ ToCategory->setImplementation(Impl);
+ }
+
+ return ToCategory;
+}
+
+bool ASTNodeImporter::ImportDefinition(ObjCProtocolDecl *From,
+ ObjCProtocolDecl *To,
+ ImportDefinitionKind Kind) {
+ if (To->getDefinition()) {
+ if (shouldForceImportDeclContext(Kind))
+ ImportDeclContext(From);
+ return false;
+ }
+
+ // Start the protocol definition
+ To->startDefinition();
+
+ // Import protocols
+ SmallVector<ObjCProtocolDecl *, 4> Protocols;
+ SmallVector<SourceLocation, 4> ProtocolLocs;
+ ObjCProtocolDecl::protocol_loc_iterator
+ FromProtoLoc = From->protocol_loc_begin();
+ for (ObjCProtocolDecl::protocol_iterator FromProto = From->protocol_begin(),
+ FromProtoEnd = From->protocol_end();
+ FromProto != FromProtoEnd;
+ ++FromProto, ++FromProtoLoc) {
+ ObjCProtocolDecl *ToProto
+ = cast_or_null<ObjCProtocolDecl>(Importer.Import(*FromProto));
+ if (!ToProto)
+ return true;
+ Protocols.push_back(ToProto);
+ ProtocolLocs.push_back(Importer.Import(*FromProtoLoc));
+ }
+
+ // FIXME: If we're merging, make sure that the protocol list is the same.
+ To->setProtocolList(Protocols.data(), Protocols.size(),
+ ProtocolLocs.data(), Importer.getToContext());
+
+ if (shouldForceImportDeclContext(Kind)) {
+ // Import all of the members of this protocol.
+ ImportDeclContext(From, /*ForceImport=*/true);
+ }
+ return false;
+}
+
+Decl *ASTNodeImporter::VisitObjCProtocolDecl(ObjCProtocolDecl *D) {
+ // If this protocol has a definition in the translation unit we're coming
+ // from, but this particular declaration is not that definition, import the
+ // definition and map to that.
+ ObjCProtocolDecl *Definition = D->getDefinition();
+ if (Definition && Definition != D) {
+ Decl *ImportedDef = Importer.Import(Definition);
+ if (!ImportedDef)
+ return 0;
+
+ return Importer.Imported(D, ImportedDef);
+ }
+
+ // Import the major distinguishing characteristics of a protocol.
+ DeclContext *DC, *LexicalDC;
+ DeclarationName Name;
+ SourceLocation Loc;
+ if (ImportDeclParts(D, DC, LexicalDC, Name, Loc))
+ return 0;
+
+ ObjCProtocolDecl *MergeWithProtocol = 0;
+ llvm::SmallVector<NamedDecl *, 2> FoundDecls;
+ DC->localUncachedLookup(Name, FoundDecls);
+ for (unsigned I = 0, N = FoundDecls.size(); I != N; ++I) {
+ if (!FoundDecls[I]->isInIdentifierNamespace(Decl::IDNS_ObjCProtocol))
+ continue;
+
+ if ((MergeWithProtocol = dyn_cast<ObjCProtocolDecl>(FoundDecls[I])))
+ break;
+ }
+
+ ObjCProtocolDecl *ToProto = MergeWithProtocol;
+ if (!ToProto) {
+ ToProto = ObjCProtocolDecl::Create(Importer.getToContext(), DC,
+ Name.getAsIdentifierInfo(), Loc,
+ Importer.Import(D->getAtStartLoc()),
+ /*PrevDecl=*/0);
+ ToProto->setLexicalDeclContext(LexicalDC);
+ LexicalDC->addDeclInternal(ToProto);
+ }
+
+ Importer.Imported(D, ToProto);
+
+ if (D->isThisDeclarationADefinition() && ImportDefinition(D, ToProto))
+ return 0;
+
+ return ToProto;
+}
+
+bool ASTNodeImporter::ImportDefinition(ObjCInterfaceDecl *From,
+ ObjCInterfaceDecl *To,
+ ImportDefinitionKind Kind) {
+ if (To->getDefinition()) {
+ // Check consistency of superclass.
+ ObjCInterfaceDecl *FromSuper = From->getSuperClass();
+ if (FromSuper) {
+ FromSuper = cast_or_null<ObjCInterfaceDecl>(Importer.Import(FromSuper));
+ if (!FromSuper)
+ return true;
+ }
+
+ ObjCInterfaceDecl *ToSuper = To->getSuperClass();
+ if ((bool)FromSuper != (bool)ToSuper ||
+ (FromSuper && !declaresSameEntity(FromSuper, ToSuper))) {
+ Importer.ToDiag(To->getLocation(),
+ diag::err_odr_objc_superclass_inconsistent)
+ << To->getDeclName();
+ if (ToSuper)
+ Importer.ToDiag(To->getSuperClassLoc(), diag::note_odr_objc_superclass)
+ << To->getSuperClass()->getDeclName();
+ else
+ Importer.ToDiag(To->getLocation(),
+ diag::note_odr_objc_missing_superclass);
+ if (From->getSuperClass())
+ Importer.FromDiag(From->getSuperClassLoc(),
+ diag::note_odr_objc_superclass)
+ << From->getSuperClass()->getDeclName();
+ else
+ Importer.FromDiag(From->getLocation(),
+ diag::note_odr_objc_missing_superclass);
+ }
+
+ if (shouldForceImportDeclContext(Kind))
+ ImportDeclContext(From);
+ return false;
+ }
+
+ // Start the definition.
+ To->startDefinition();
+
+ // If this class has a superclass, import it.
+ if (From->getSuperClass()) {
+ ObjCInterfaceDecl *Super = cast_or_null<ObjCInterfaceDecl>(
+ Importer.Import(From->getSuperClass()));
+ if (!Super)
+ return true;
+
+ To->setSuperClass(Super);
+ To->setSuperClassLoc(Importer.Import(From->getSuperClassLoc()));
+ }
+
+ // Import protocols
+ SmallVector<ObjCProtocolDecl *, 4> Protocols;
+ SmallVector<SourceLocation, 4> ProtocolLocs;
+ ObjCInterfaceDecl::protocol_loc_iterator
+ FromProtoLoc = From->protocol_loc_begin();
+
+ for (ObjCInterfaceDecl::protocol_iterator FromProto = From->protocol_begin(),
+ FromProtoEnd = From->protocol_end();
+ FromProto != FromProtoEnd;
+ ++FromProto, ++FromProtoLoc) {
+ ObjCProtocolDecl *ToProto
+ = cast_or_null<ObjCProtocolDecl>(Importer.Import(*FromProto));
+ if (!ToProto)
+ return true;
+ Protocols.push_back(ToProto);
+ ProtocolLocs.push_back(Importer.Import(*FromProtoLoc));
+ }
+
+ // FIXME: If we're merging, make sure that the protocol list is the same.
+ To->setProtocolList(Protocols.data(), Protocols.size(),
+ ProtocolLocs.data(), Importer.getToContext());
+
+ // Import categories. When the categories themselves are imported, they'll
+ // hook themselves into this interface.
+ for (ObjCCategoryDecl *FromCat = From->getCategoryList(); FromCat;
+ FromCat = FromCat->getNextClassCategory())
+ Importer.Import(FromCat);
+
+ // If we have an @implementation, import it as well.
+ if (From->getImplementation()) {
+ ObjCImplementationDecl *Impl = cast_or_null<ObjCImplementationDecl>(
+ Importer.Import(From->getImplementation()));
+ if (!Impl)
+ return true;
+
+ To->setImplementation(Impl);
+ }
+
+ if (shouldForceImportDeclContext(Kind)) {
+ // Import all of the members of this class.
+ ImportDeclContext(From, /*ForceImport=*/true);
+ }
+ return false;
+}
+
+Decl *ASTNodeImporter::VisitObjCInterfaceDecl(ObjCInterfaceDecl *D) {
+ // If this class has a definition in the translation unit we're coming from,
+ // but this particular declaration is not that definition, import the
+ // definition and map to that.
+ ObjCInterfaceDecl *Definition = D->getDefinition();
+ if (Definition && Definition != D) {
+ Decl *ImportedDef = Importer.Import(Definition);
+ if (!ImportedDef)
+ return 0;
+
+ return Importer.Imported(D, ImportedDef);
+ }
+
+ // Import the major distinguishing characteristics of an @interface.
+ DeclContext *DC, *LexicalDC;
+ DeclarationName Name;
+ SourceLocation Loc;
+ if (ImportDeclParts(D, DC, LexicalDC, Name, Loc))
+ return 0;
+
+ // Look for an existing interface with the same name.
+ ObjCInterfaceDecl *MergeWithIface = 0;
+ llvm::SmallVector<NamedDecl *, 2> FoundDecls;
+ DC->localUncachedLookup(Name, FoundDecls);
+ for (unsigned I = 0, N = FoundDecls.size(); I != N; ++I) {
+ if (!FoundDecls[I]->isInIdentifierNamespace(Decl::IDNS_Ordinary))
+ continue;
+
+ if ((MergeWithIface = dyn_cast<ObjCInterfaceDecl>(FoundDecls[I])))
+ break;
+ }
+
+ // Create an interface declaration, if one does not already exist.
+ ObjCInterfaceDecl *ToIface = MergeWithIface;
+ if (!ToIface) {
+ ToIface = ObjCInterfaceDecl::Create(Importer.getToContext(), DC,
+ Importer.Import(D->getAtStartLoc()),
+ Name.getAsIdentifierInfo(),
+ /*PrevDecl=*/0,Loc,
+ D->isImplicitInterfaceDecl());
+ ToIface->setLexicalDeclContext(LexicalDC);
+ LexicalDC->addDeclInternal(ToIface);
+ }
+ Importer.Imported(D, ToIface);
+
+ if (D->isThisDeclarationADefinition() && ImportDefinition(D, ToIface))
+ return 0;
+
+ return ToIface;
+}
+
+Decl *ASTNodeImporter::VisitObjCCategoryImplDecl(ObjCCategoryImplDecl *D) {
+ ObjCCategoryDecl *Category = cast_or_null<ObjCCategoryDecl>(
+ Importer.Import(D->getCategoryDecl()));
+ if (!Category)
+ return 0;
+
+ ObjCCategoryImplDecl *ToImpl = Category->getImplementation();
+ if (!ToImpl) {
+ DeclContext *DC = Importer.ImportContext(D->getDeclContext());
+ if (!DC)
+ return 0;
+
+ SourceLocation CategoryNameLoc = Importer.Import(D->getCategoryNameLoc());
+ ToImpl = ObjCCategoryImplDecl::Create(Importer.getToContext(), DC,
+ Importer.Import(D->getIdentifier()),
+ Category->getClassInterface(),
+ Importer.Import(D->getLocation()),
+ Importer.Import(D->getAtStartLoc()),
+ CategoryNameLoc);
+
+ DeclContext *LexicalDC = DC;
+ if (D->getDeclContext() != D->getLexicalDeclContext()) {
+ LexicalDC = Importer.ImportContext(D->getLexicalDeclContext());
+ if (!LexicalDC)
+ return 0;
+
+ ToImpl->setLexicalDeclContext(LexicalDC);
+ }
+
+ LexicalDC->addDeclInternal(ToImpl);
+ Category->setImplementation(ToImpl);
+ }
+
+ Importer.Imported(D, ToImpl);
+ ImportDeclContext(D);
+ return ToImpl;
+}
+
+Decl *ASTNodeImporter::VisitObjCImplementationDecl(ObjCImplementationDecl *D) {
+ // Find the corresponding interface.
+ ObjCInterfaceDecl *Iface = cast_or_null<ObjCInterfaceDecl>(
+ Importer.Import(D->getClassInterface()));
+ if (!Iface)
+ return 0;
+
+ // Import the superclass, if any.
+ ObjCInterfaceDecl *Super = 0;
+ if (D->getSuperClass()) {
+ Super = cast_or_null<ObjCInterfaceDecl>(
+ Importer.Import(D->getSuperClass()));
+ if (!Super)
+ return 0;
+ }
+
+ ObjCImplementationDecl *Impl = Iface->getImplementation();
+ if (!Impl) {
+ // We haven't imported an implementation yet. Create a new @implementation
+ // now.
+ Impl = ObjCImplementationDecl::Create(Importer.getToContext(),
+ Importer.ImportContext(D->getDeclContext()),
+ Iface, Super,
+ Importer.Import(D->getLocation()),
+ Importer.Import(D->getAtStartLoc()),
+ Importer.Import(D->getIvarLBraceLoc()),
+ Importer.Import(D->getIvarRBraceLoc()));
+
+ if (D->getDeclContext() != D->getLexicalDeclContext()) {
+ DeclContext *LexicalDC
+ = Importer.ImportContext(D->getLexicalDeclContext());
+ if (!LexicalDC)
+ return 0;
+ Impl->setLexicalDeclContext(LexicalDC);
+ }
+
+ // Associate the implementation with the class it implements.
+ Iface->setImplementation(Impl);
+ Importer.Imported(D, Iface->getImplementation());
+ } else {
+ Importer.Imported(D, Iface->getImplementation());
+
+ // Verify that the existing @implementation has the same superclass.
+ if ((Super && !Impl->getSuperClass()) ||
+ (!Super && Impl->getSuperClass()) ||
+ (Super && Impl->getSuperClass() &&
+ !declaresSameEntity(Super->getCanonicalDecl(), Impl->getSuperClass()))) {
+ Importer.ToDiag(Impl->getLocation(),
+ diag::err_odr_objc_superclass_inconsistent)
+ << Iface->getDeclName();
+ // FIXME: It would be nice to have the location of the superclass
+ // below.
+ if (Impl->getSuperClass())
+ Importer.ToDiag(Impl->getLocation(),
+ diag::note_odr_objc_superclass)
+ << Impl->getSuperClass()->getDeclName();
+ else
+ Importer.ToDiag(Impl->getLocation(),
+ diag::note_odr_objc_missing_superclass);
+ if (D->getSuperClass())
+ Importer.FromDiag(D->getLocation(),
+ diag::note_odr_objc_superclass)
+ << D->getSuperClass()->getDeclName();
+ else
+ Importer.FromDiag(D->getLocation(),
+ diag::note_odr_objc_missing_superclass);
+ return 0;
+ }
+ }
+
+ // Import all of the members of this @implementation.
+ ImportDeclContext(D);
+
+ return Impl;
+}
+
+Decl *ASTNodeImporter::VisitObjCPropertyDecl(ObjCPropertyDecl *D) {
+ // Import the major distinguishing characteristics of an @property.
+ DeclContext *DC, *LexicalDC;
+ DeclarationName Name;
+ SourceLocation Loc;
+ if (ImportDeclParts(D, DC, LexicalDC, Name, Loc))
+ return 0;
+
+ // Check whether we have already imported this property.
+ llvm::SmallVector<NamedDecl *, 2> FoundDecls;
+ DC->localUncachedLookup(Name, FoundDecls);
+ for (unsigned I = 0, N = FoundDecls.size(); I != N; ++I) {
+ if (ObjCPropertyDecl *FoundProp
+ = dyn_cast<ObjCPropertyDecl>(FoundDecls[I])) {
+ // Check property types.
+ if (!Importer.IsStructurallyEquivalent(D->getType(),
+ FoundProp->getType())) {
+ Importer.ToDiag(Loc, diag::err_odr_objc_property_type_inconsistent)
+ << Name << D->getType() << FoundProp->getType();
+ Importer.ToDiag(FoundProp->getLocation(), diag::note_odr_value_here)
+ << FoundProp->getType();
+ return 0;
+ }
+
+ // FIXME: Check property attributes, getters, setters, etc.?
+
+ // Consider these properties to be equivalent.
+ Importer.Imported(D, FoundProp);
+ return FoundProp;
+ }
+ }
+
+ // Import the type.
+ TypeSourceInfo *T = Importer.Import(D->getTypeSourceInfo());
+ if (!T)
+ return 0;
+
+ // Create the new property.
+ ObjCPropertyDecl *ToProperty
+ = ObjCPropertyDecl::Create(Importer.getToContext(), DC, Loc,
+ Name.getAsIdentifierInfo(),
+ Importer.Import(D->getAtLoc()),
+ Importer.Import(D->getLParenLoc()),
+ T,
+ D->getPropertyImplementation());
+ Importer.Imported(D, ToProperty);
+ ToProperty->setLexicalDeclContext(LexicalDC);
+ LexicalDC->addDeclInternal(ToProperty);
+
+ ToProperty->setPropertyAttributes(D->getPropertyAttributes());
+ ToProperty->setPropertyAttributesAsWritten(
+ D->getPropertyAttributesAsWritten());
+ ToProperty->setGetterName(Importer.Import(D->getGetterName()));
+ ToProperty->setSetterName(Importer.Import(D->getSetterName()));
+ ToProperty->setGetterMethodDecl(
+ cast_or_null<ObjCMethodDecl>(Importer.Import(D->getGetterMethodDecl())));
+ ToProperty->setSetterMethodDecl(
+ cast_or_null<ObjCMethodDecl>(Importer.Import(D->getSetterMethodDecl())));
+ ToProperty->setPropertyIvarDecl(
+ cast_or_null<ObjCIvarDecl>(Importer.Import(D->getPropertyIvarDecl())));
+ return ToProperty;
+}
+
+Decl *ASTNodeImporter::VisitObjCPropertyImplDecl(ObjCPropertyImplDecl *D) {
+ ObjCPropertyDecl *Property = cast_or_null<ObjCPropertyDecl>(
+ Importer.Import(D->getPropertyDecl()));
+ if (!Property)
+ return 0;
+
+ DeclContext *DC = Importer.ImportContext(D->getDeclContext());
+ if (!DC)
+ return 0;
+
+ // Import the lexical declaration context.
+ DeclContext *LexicalDC = DC;
+ if (D->getDeclContext() != D->getLexicalDeclContext()) {
+ LexicalDC = Importer.ImportContext(D->getLexicalDeclContext());
+ if (!LexicalDC)
+ return 0;
+ }
+
+ ObjCImplDecl *InImpl = dyn_cast<ObjCImplDecl>(LexicalDC);
+ if (!InImpl)
+ return 0;
+
+ // Import the ivar (for an @synthesize).
+ ObjCIvarDecl *Ivar = 0;
+ if (D->getPropertyIvarDecl()) {
+ Ivar = cast_or_null<ObjCIvarDecl>(
+ Importer.Import(D->getPropertyIvarDecl()));
+ if (!Ivar)
+ return 0;
+ }
+
+ ObjCPropertyImplDecl *ToImpl
+ = InImpl->FindPropertyImplDecl(Property->getIdentifier());
+ if (!ToImpl) {
+ ToImpl = ObjCPropertyImplDecl::Create(Importer.getToContext(), DC,
+ Importer.Import(D->getLocStart()),
+ Importer.Import(D->getLocation()),
+ Property,
+ D->getPropertyImplementation(),
+ Ivar,
+ Importer.Import(D->getPropertyIvarDeclLoc()));
+ ToImpl->setLexicalDeclContext(LexicalDC);
+ Importer.Imported(D, ToImpl);
+ LexicalDC->addDeclInternal(ToImpl);
+ } else {
+ // Check that we have the same kind of property implementation (@synthesize
+ // vs. @dynamic).
+ if (D->getPropertyImplementation() != ToImpl->getPropertyImplementation()) {
+ Importer.ToDiag(ToImpl->getLocation(),
+ diag::err_odr_objc_property_impl_kind_inconsistent)
+ << Property->getDeclName()
+ << (ToImpl->getPropertyImplementation()
+ == ObjCPropertyImplDecl::Dynamic);
+ Importer.FromDiag(D->getLocation(),
+ diag::note_odr_objc_property_impl_kind)
+ << D->getPropertyDecl()->getDeclName()
+ << (D->getPropertyImplementation() == ObjCPropertyImplDecl::Dynamic);
+ return 0;
+ }
+
+ // For @synthesize, check that we have the same
+ if (D->getPropertyImplementation() == ObjCPropertyImplDecl::Synthesize &&
+ Ivar != ToImpl->getPropertyIvarDecl()) {
+ Importer.ToDiag(ToImpl->getPropertyIvarDeclLoc(),
+ diag::err_odr_objc_synthesize_ivar_inconsistent)
+ << Property->getDeclName()
+ << ToImpl->getPropertyIvarDecl()->getDeclName()
+ << Ivar->getDeclName();
+ Importer.FromDiag(D->getPropertyIvarDeclLoc(),
+ diag::note_odr_objc_synthesize_ivar_here)
+ << D->getPropertyIvarDecl()->getDeclName();
+ return 0;
+ }
+
+ // Merge the existing implementation with the new implementation.
+ Importer.Imported(D, ToImpl);
+ }
+
+ return ToImpl;
+}
+
+Decl *ASTNodeImporter::VisitTemplateTypeParmDecl(TemplateTypeParmDecl *D) {
+ // For template arguments, we adopt the translation unit as our declaration
+ // context. This context will be fixed when the actual template declaration
+ // is created.
+
+ // FIXME: Import default argument.
+ return TemplateTypeParmDecl::Create(Importer.getToContext(),
+ Importer.getToContext().getTranslationUnitDecl(),
+ Importer.Import(D->getLocStart()),
+ Importer.Import(D->getLocation()),
+ D->getDepth(),
+ D->getIndex(),
+ Importer.Import(D->getIdentifier()),
+ D->wasDeclaredWithTypename(),
+ D->isParameterPack());
+}
+
+Decl *
+ASTNodeImporter::VisitNonTypeTemplateParmDecl(NonTypeTemplateParmDecl *D) {
+ // Import the name of this declaration.
+ DeclarationName Name = Importer.Import(D->getDeclName());
+ if (D->getDeclName() && !Name)
+ return 0;
+
+ // Import the location of this declaration.
+ SourceLocation Loc = Importer.Import(D->getLocation());
+
+ // Import the type of this declaration.
+ QualType T = Importer.Import(D->getType());
+ if (T.isNull())
+ return 0;
+
+ // Import type-source information.
+ TypeSourceInfo *TInfo = Importer.Import(D->getTypeSourceInfo());
+ if (D->getTypeSourceInfo() && !TInfo)
+ return 0;
+
+ // FIXME: Import default argument.
+
+ return NonTypeTemplateParmDecl::Create(Importer.getToContext(),
+ Importer.getToContext().getTranslationUnitDecl(),
+ Importer.Import(D->getInnerLocStart()),
+ Loc, D->getDepth(), D->getPosition(),
+ Name.getAsIdentifierInfo(),
+ T, D->isParameterPack(), TInfo);
+}
+
+Decl *
+ASTNodeImporter::VisitTemplateTemplateParmDecl(TemplateTemplateParmDecl *D) {
+ // Import the name of this declaration.
+ DeclarationName Name = Importer.Import(D->getDeclName());
+ if (D->getDeclName() && !Name)
+ return 0;
+
+ // Import the location of this declaration.
+ SourceLocation Loc = Importer.Import(D->getLocation());
+
+ // Import template parameters.
+ TemplateParameterList *TemplateParams
+ = ImportTemplateParameterList(D->getTemplateParameters());
+ if (!TemplateParams)
+ return 0;
+
+ // FIXME: Import default argument.
+
+ return TemplateTemplateParmDecl::Create(Importer.getToContext(),
+ Importer.getToContext().getTranslationUnitDecl(),
+ Loc, D->getDepth(), D->getPosition(),
+ D->isParameterPack(),
+ Name.getAsIdentifierInfo(),
+ TemplateParams);
+}
+
+Decl *ASTNodeImporter::VisitClassTemplateDecl(ClassTemplateDecl *D) {
+ // If this record has a definition in the translation unit we're coming from,
+ // but this particular declaration is not that definition, import the
+ // definition and map to that.
+ CXXRecordDecl *Definition
+ = cast_or_null<CXXRecordDecl>(D->getTemplatedDecl()->getDefinition());
+ if (Definition && Definition != D->getTemplatedDecl()) {
+ Decl *ImportedDef
+ = Importer.Import(Definition->getDescribedClassTemplate());
+ if (!ImportedDef)
+ return 0;
+
+ return Importer.Imported(D, ImportedDef);
+ }
+
+ // Import the major distinguishing characteristics of this class template.
+ DeclContext *DC, *LexicalDC;
+ DeclarationName Name;
+ SourceLocation Loc;
+ if (ImportDeclParts(D, DC, LexicalDC, Name, Loc))
+ return 0;
+
+ // We may already have a template of the same name; try to find and match it.
+ if (!DC->isFunctionOrMethod()) {
+ SmallVector<NamedDecl *, 4> ConflictingDecls;
+ llvm::SmallVector<NamedDecl *, 2> FoundDecls;
+ DC->localUncachedLookup(Name, FoundDecls);
+ for (unsigned I = 0, N = FoundDecls.size(); I != N; ++I) {
+ if (!FoundDecls[I]->isInIdentifierNamespace(Decl::IDNS_Ordinary))
+ continue;
+
+ Decl *Found = FoundDecls[I];
+ if (ClassTemplateDecl *FoundTemplate
+ = dyn_cast<ClassTemplateDecl>(Found)) {
+ if (IsStructuralMatch(D, FoundTemplate)) {
+ // The class templates structurally match; call it the same template.
+ // FIXME: We may be filling in a forward declaration here. Handle
+ // this case!
+ Importer.Imported(D->getTemplatedDecl(),
+ FoundTemplate->getTemplatedDecl());
+ return Importer.Imported(D, FoundTemplate);
+ }
+ }
+
+ ConflictingDecls.push_back(FoundDecls[I]);
+ }
+
+ if (!ConflictingDecls.empty()) {
+ Name = Importer.HandleNameConflict(Name, DC, Decl::IDNS_Ordinary,
+ ConflictingDecls.data(),
+ ConflictingDecls.size());
+ }
+
+ if (!Name)
+ return 0;
+ }
+
+ CXXRecordDecl *DTemplated = D->getTemplatedDecl();
+
+ // Create the declaration that is being templated.
+ SourceLocation StartLoc = Importer.Import(DTemplated->getLocStart());
+ SourceLocation IdLoc = Importer.Import(DTemplated->getLocation());
+ CXXRecordDecl *D2Templated = CXXRecordDecl::Create(Importer.getToContext(),
+ DTemplated->getTagKind(),
+ DC, StartLoc, IdLoc,
+ Name.getAsIdentifierInfo());
+ D2Templated->setAccess(DTemplated->getAccess());
+ D2Templated->setQualifierInfo(Importer.Import(DTemplated->getQualifierLoc()));
+ D2Templated->setLexicalDeclContext(LexicalDC);
+
+ // Create the class template declaration itself.
+ TemplateParameterList *TemplateParams
+ = ImportTemplateParameterList(D->getTemplateParameters());
+ if (!TemplateParams)
+ return 0;
+
+ ClassTemplateDecl *D2 = ClassTemplateDecl::Create(Importer.getToContext(), DC,
+ Loc, Name, TemplateParams,
+ D2Templated,
+ /*PrevDecl=*/0);
+ D2Templated->setDescribedClassTemplate(D2);
+
+ D2->setAccess(D->getAccess());
+ D2->setLexicalDeclContext(LexicalDC);
+ LexicalDC->addDeclInternal(D2);
+
+ // Note the relationship between the class templates.
+ Importer.Imported(D, D2);
+ Importer.Imported(DTemplated, D2Templated);
+
+ if (DTemplated->isCompleteDefinition() &&
+ !D2Templated->isCompleteDefinition()) {
+ // FIXME: Import definition!
+ }
+
+ return D2;
+}
+
+Decl *ASTNodeImporter::VisitClassTemplateSpecializationDecl(
+ ClassTemplateSpecializationDecl *D) {
+ // If this record has a definition in the translation unit we're coming from,
+ // but this particular declaration is not that definition, import the
+ // definition and map to that.
+ TagDecl *Definition = D->getDefinition();
+ if (Definition && Definition != D) {
+ Decl *ImportedDef = Importer.Import(Definition);
+ if (!ImportedDef)
+ return 0;
+
+ return Importer.Imported(D, ImportedDef);
+ }
+
+ ClassTemplateDecl *ClassTemplate
+ = cast_or_null<ClassTemplateDecl>(Importer.Import(
+ D->getSpecializedTemplate()));
+ if (!ClassTemplate)
+ return 0;
+
+ // Import the context of this declaration.
+ DeclContext *DC = ClassTemplate->getDeclContext();
+ if (!DC)
+ return 0;
+
+ DeclContext *LexicalDC = DC;
+ if (D->getDeclContext() != D->getLexicalDeclContext()) {
+ LexicalDC = Importer.ImportContext(D->getLexicalDeclContext());
+ if (!LexicalDC)
+ return 0;
+ }
+
+ // Import the location of this declaration.
+ SourceLocation StartLoc = Importer.Import(D->getLocStart());
+ SourceLocation IdLoc = Importer.Import(D->getLocation());
+
+ // Import template arguments.
+ SmallVector<TemplateArgument, 2> TemplateArgs;
+ if (ImportTemplateArguments(D->getTemplateArgs().data(),
+ D->getTemplateArgs().size(),
+ TemplateArgs))
+ return 0;
+
+ // Try to find an existing specialization with these template arguments.
+ void *InsertPos = 0;
+ ClassTemplateSpecializationDecl *D2
+ = ClassTemplate->findSpecialization(TemplateArgs.data(),
+ TemplateArgs.size(), InsertPos);
+ if (D2) {
+ // We already have a class template specialization with these template
+ // arguments.
+
+ // FIXME: Check for specialization vs. instantiation errors.
+
+ if (RecordDecl *FoundDef = D2->getDefinition()) {
+ if (!D->isCompleteDefinition() || IsStructuralMatch(D, FoundDef)) {
+ // The record types structurally match, or the "from" translation
+ // unit only had a forward declaration anyway; call it the same
+ // function.
+ return Importer.Imported(D, FoundDef);
+ }
+ }
+ } else {
+ // Create a new specialization.
+ D2 = ClassTemplateSpecializationDecl::Create(Importer.getToContext(),
+ D->getTagKind(), DC,
+ StartLoc, IdLoc,
+ ClassTemplate,
+ TemplateArgs.data(),
+ TemplateArgs.size(),
+ /*PrevDecl=*/0);
+ D2->setSpecializationKind(D->getSpecializationKind());
+
+ // Add this specialization to the class template.
+ ClassTemplate->AddSpecialization(D2, InsertPos);
+
+ // Import the qualifier, if any.
+ D2->setQualifierInfo(Importer.Import(D->getQualifierLoc()));
+
+ // Add the specialization to this context.
+ D2->setLexicalDeclContext(LexicalDC);
+ LexicalDC->addDeclInternal(D2);
+ }
+ Importer.Imported(D, D2);
+
+ if (D->isCompleteDefinition() && ImportDefinition(D, D2))
+ return 0;
+
+ return D2;
+}
+
+//----------------------------------------------------------------------------
+// Import Statements
+//----------------------------------------------------------------------------
+
+Stmt *ASTNodeImporter::VisitStmt(Stmt *S) {
+ Importer.FromDiag(S->getLocStart(), diag::err_unsupported_ast_node)
+ << S->getStmtClassName();
+ return 0;
+}
+
+//----------------------------------------------------------------------------
+// Import Expressions
+//----------------------------------------------------------------------------
+Expr *ASTNodeImporter::VisitExpr(Expr *E) {
+ Importer.FromDiag(E->getLocStart(), diag::err_unsupported_ast_node)
+ << E->getStmtClassName();
+ return 0;
+}
+
+Expr *ASTNodeImporter::VisitDeclRefExpr(DeclRefExpr *E) {
+ ValueDecl *ToD = cast_or_null<ValueDecl>(Importer.Import(E->getDecl()));
+ if (!ToD)
+ return 0;
+
+ NamedDecl *FoundD = 0;
+ if (E->getDecl() != E->getFoundDecl()) {
+ FoundD = cast_or_null<NamedDecl>(Importer.Import(E->getFoundDecl()));
+ if (!FoundD)
+ return 0;
+ }
+
+ QualType T = Importer.Import(E->getType());
+ if (T.isNull())
+ return 0;
+
+ DeclRefExpr *DRE = DeclRefExpr::Create(Importer.getToContext(),
+ Importer.Import(E->getQualifierLoc()),
+ Importer.Import(E->getTemplateKeywordLoc()),
+ ToD,
+ E->refersToEnclosingLocal(),
+ Importer.Import(E->getLocation()),
+ T, E->getValueKind(),
+ FoundD,
+ /*FIXME:TemplateArgs=*/0);
+ if (E->hadMultipleCandidates())
+ DRE->setHadMultipleCandidates(true);
+ return DRE;
+}
+
+Expr *ASTNodeImporter::VisitIntegerLiteral(IntegerLiteral *E) {
+ QualType T = Importer.Import(E->getType());
+ if (T.isNull())
+ return 0;
+
+ return IntegerLiteral::Create(Importer.getToContext(),
+ E->getValue(), T,
+ Importer.Import(E->getLocation()));
+}
+
+Expr *ASTNodeImporter::VisitCharacterLiteral(CharacterLiteral *E) {
+ QualType T = Importer.Import(E->getType());
+ if (T.isNull())
+ return 0;
+
+ return new (Importer.getToContext()) CharacterLiteral(E->getValue(),
+ E->getKind(), T,
+ Importer.Import(E->getLocation()));
+}
+
+Expr *ASTNodeImporter::VisitParenExpr(ParenExpr *E) {
+ Expr *SubExpr = Importer.Import(E->getSubExpr());
+ if (!SubExpr)
+ return 0;
+
+ return new (Importer.getToContext())
+ ParenExpr(Importer.Import(E->getLParen()),
+ Importer.Import(E->getRParen()),
+ SubExpr);
+}
+
+Expr *ASTNodeImporter::VisitUnaryOperator(UnaryOperator *E) {
+ QualType T = Importer.Import(E->getType());
+ if (T.isNull())
+ return 0;
+
+ Expr *SubExpr = Importer.Import(E->getSubExpr());
+ if (!SubExpr)
+ return 0;
+
+ return new (Importer.getToContext()) UnaryOperator(SubExpr, E->getOpcode(),
+ T, E->getValueKind(),
+ E->getObjectKind(),
+ Importer.Import(E->getOperatorLoc()));
+}
+
+Expr *ASTNodeImporter::VisitUnaryExprOrTypeTraitExpr(
+ UnaryExprOrTypeTraitExpr *E) {
+ QualType ResultType = Importer.Import(E->getType());
+
+ if (E->isArgumentType()) {
+ TypeSourceInfo *TInfo = Importer.Import(E->getArgumentTypeInfo());
+ if (!TInfo)
+ return 0;
+
+ return new (Importer.getToContext()) UnaryExprOrTypeTraitExpr(E->getKind(),
+ TInfo, ResultType,
+ Importer.Import(E->getOperatorLoc()),
+ Importer.Import(E->getRParenLoc()));
+ }
+
+ Expr *SubExpr = Importer.Import(E->getArgumentExpr());
+ if (!SubExpr)
+ return 0;
+
+ return new (Importer.getToContext()) UnaryExprOrTypeTraitExpr(E->getKind(),
+ SubExpr, ResultType,
+ Importer.Import(E->getOperatorLoc()),
+ Importer.Import(E->getRParenLoc()));
+}
+
+Expr *ASTNodeImporter::VisitBinaryOperator(BinaryOperator *E) {
+ QualType T = Importer.Import(E->getType());
+ if (T.isNull())
+ return 0;
+
+ Expr *LHS = Importer.Import(E->getLHS());
+ if (!LHS)
+ return 0;
+
+ Expr *RHS = Importer.Import(E->getRHS());
+ if (!RHS)
+ return 0;
+
+ return new (Importer.getToContext()) BinaryOperator(LHS, RHS, E->getOpcode(),
+ T, E->getValueKind(),
+ E->getObjectKind(),
+ Importer.Import(E->getOperatorLoc()));
+}
+
+Expr *ASTNodeImporter::VisitCompoundAssignOperator(CompoundAssignOperator *E) {
+ QualType T = Importer.Import(E->getType());
+ if (T.isNull())
+ return 0;
+
+ QualType CompLHSType = Importer.Import(E->getComputationLHSType());
+ if (CompLHSType.isNull())
+ return 0;
+
+ QualType CompResultType = Importer.Import(E->getComputationResultType());
+ if (CompResultType.isNull())
+ return 0;
+
+ Expr *LHS = Importer.Import(E->getLHS());
+ if (!LHS)
+ return 0;
+
+ Expr *RHS = Importer.Import(E->getRHS());
+ if (!RHS)
+ return 0;
+
+ return new (Importer.getToContext())
+ CompoundAssignOperator(LHS, RHS, E->getOpcode(),
+ T, E->getValueKind(),
+ E->getObjectKind(),
+ CompLHSType, CompResultType,
+ Importer.Import(E->getOperatorLoc()));
+}
+
+static bool ImportCastPath(CastExpr *E, CXXCastPath &Path) {
+ if (E->path_empty()) return false;
+
+ // TODO: import cast paths
+ return true;
+}
+
+Expr *ASTNodeImporter::VisitImplicitCastExpr(ImplicitCastExpr *E) {
+ QualType T = Importer.Import(E->getType());
+ if (T.isNull())
+ return 0;
+
+ Expr *SubExpr = Importer.Import(E->getSubExpr());
+ if (!SubExpr)
+ return 0;
+
+ CXXCastPath BasePath;
+ if (ImportCastPath(E, BasePath))
+ return 0;
+
+ return ImplicitCastExpr::Create(Importer.getToContext(), T, E->getCastKind(),
+ SubExpr, &BasePath, E->getValueKind());
+}
+
+Expr *ASTNodeImporter::VisitCStyleCastExpr(CStyleCastExpr *E) {
+ QualType T = Importer.Import(E->getType());
+ if (T.isNull())
+ return 0;
+
+ Expr *SubExpr = Importer.Import(E->getSubExpr());
+ if (!SubExpr)
+ return 0;
+
+ TypeSourceInfo *TInfo = Importer.Import(E->getTypeInfoAsWritten());
+ if (!TInfo && E->getTypeInfoAsWritten())
+ return 0;
+
+ CXXCastPath BasePath;
+ if (ImportCastPath(E, BasePath))
+ return 0;
+
+ return CStyleCastExpr::Create(Importer.getToContext(), T,
+ E->getValueKind(), E->getCastKind(),
+ SubExpr, &BasePath, TInfo,
+ Importer.Import(E->getLParenLoc()),
+ Importer.Import(E->getRParenLoc()));
+}
+
+ASTImporter::ASTImporter(ASTContext &ToContext, FileManager &ToFileManager,
+ ASTContext &FromContext, FileManager &FromFileManager,
+ bool MinimalImport)
+ : ToContext(ToContext), FromContext(FromContext),
+ ToFileManager(ToFileManager), FromFileManager(FromFileManager),
+ Minimal(MinimalImport)
+{
+ ImportedDecls[FromContext.getTranslationUnitDecl()]
+ = ToContext.getTranslationUnitDecl();
+}
+
+ASTImporter::~ASTImporter() { }
+
+QualType ASTImporter::Import(QualType FromT) {
+ if (FromT.isNull())
+ return QualType();
+
+ const Type *fromTy = FromT.getTypePtr();
+
+ // Check whether we've already imported this type.
+ llvm::DenseMap<const Type *, const Type *>::iterator Pos
+ = ImportedTypes.find(fromTy);
+ if (Pos != ImportedTypes.end())
+ return ToContext.getQualifiedType(Pos->second, FromT.getLocalQualifiers());
+
+ // Import the type
+ ASTNodeImporter Importer(*this);
+ QualType ToT = Importer.Visit(fromTy);
+ if (ToT.isNull())
+ return ToT;
+
+ // Record the imported type.
+ ImportedTypes[fromTy] = ToT.getTypePtr();
+
+ return ToContext.getQualifiedType(ToT, FromT.getLocalQualifiers());
+}
+
+TypeSourceInfo *ASTImporter::Import(TypeSourceInfo *FromTSI) {
+ if (!FromTSI)
+ return FromTSI;
+
+ // FIXME: For now we just create a "trivial" type source info based
+ // on the type and a single location. Implement a real version of this.
+ QualType T = Import(FromTSI->getType());
+ if (T.isNull())
+ return 0;
+
+ return ToContext.getTrivialTypeSourceInfo(T,
+ FromTSI->getTypeLoc().getLocStart());
+}
+
+Decl *ASTImporter::Import(Decl *FromD) {
+ if (!FromD)
+ return 0;
+
+ ASTNodeImporter Importer(*this);
+
+ // Check whether we've already imported this declaration.
+ llvm::DenseMap<Decl *, Decl *>::iterator Pos = ImportedDecls.find(FromD);
+ if (Pos != ImportedDecls.end()) {
+ Decl *ToD = Pos->second;
+ Importer.ImportDefinitionIfNeeded(FromD, ToD);
+ return ToD;
+ }
+
+ // Import the type
+ Decl *ToD = Importer.Visit(FromD);
+ if (!ToD)
+ return 0;
+
+ // Record the imported declaration.
+ ImportedDecls[FromD] = ToD;
+
+ if (TagDecl *FromTag = dyn_cast<TagDecl>(FromD)) {
+ // Keep track of anonymous tags that have an associated typedef.
+ if (FromTag->getTypedefNameForAnonDecl())
+ AnonTagsWithPendingTypedefs.push_back(FromTag);
+ } else if (TypedefNameDecl *FromTypedef = dyn_cast<TypedefNameDecl>(FromD)) {
+ // When we've finished transforming a typedef, see whether it was the
+ // typedef for an anonymous tag.
+ for (SmallVector<TagDecl *, 4>::iterator
+ FromTag = AnonTagsWithPendingTypedefs.begin(),
+ FromTagEnd = AnonTagsWithPendingTypedefs.end();
+ FromTag != FromTagEnd; ++FromTag) {
+ if ((*FromTag)->getTypedefNameForAnonDecl() == FromTypedef) {
+ if (TagDecl *ToTag = cast_or_null<TagDecl>(Import(*FromTag))) {
+ // We found the typedef for an anonymous tag; link them.
+ ToTag->setTypedefNameForAnonDecl(cast<TypedefNameDecl>(ToD));
+ AnonTagsWithPendingTypedefs.erase(FromTag);
+ break;
+ }
+ }
+ }
+ }
+
+ return ToD;
+}
+
+DeclContext *ASTImporter::ImportContext(DeclContext *FromDC) {
+ if (!FromDC)
+ return FromDC;
+
+ DeclContext *ToDC = cast_or_null<DeclContext>(Import(cast<Decl>(FromDC)));
+ if (!ToDC)
+ return 0;
+
+ // When we're using a record/enum/Objective-C class/protocol as a context, we
+ // need it to have a definition.
+ if (RecordDecl *ToRecord = dyn_cast<RecordDecl>(ToDC)) {
+ RecordDecl *FromRecord = cast<RecordDecl>(FromDC);
+ if (ToRecord->isCompleteDefinition()) {
+ // Do nothing.
+ } else if (FromRecord->isCompleteDefinition()) {
+ ASTNodeImporter(*this).ImportDefinition(FromRecord, ToRecord,
+ ASTNodeImporter::IDK_Basic);
+ } else {
+ CompleteDecl(ToRecord);
+ }
+ } else if (EnumDecl *ToEnum = dyn_cast<EnumDecl>(ToDC)) {
+ EnumDecl *FromEnum = cast<EnumDecl>(FromDC);
+ if (ToEnum->isCompleteDefinition()) {
+ // Do nothing.
+ } else if (FromEnum->isCompleteDefinition()) {
+ ASTNodeImporter(*this).ImportDefinition(FromEnum, ToEnum,
+ ASTNodeImporter::IDK_Basic);
+ } else {
+ CompleteDecl(ToEnum);
+ }
+ } else if (ObjCInterfaceDecl *ToClass = dyn_cast<ObjCInterfaceDecl>(ToDC)) {
+ ObjCInterfaceDecl *FromClass = cast<ObjCInterfaceDecl>(FromDC);
+ if (ToClass->getDefinition()) {
+ // Do nothing.
+ } else if (ObjCInterfaceDecl *FromDef = FromClass->getDefinition()) {
+ ASTNodeImporter(*this).ImportDefinition(FromDef, ToClass,
+ ASTNodeImporter::IDK_Basic);
+ } else {
+ CompleteDecl(ToClass);
+ }
+ } else if (ObjCProtocolDecl *ToProto = dyn_cast<ObjCProtocolDecl>(ToDC)) {
+ ObjCProtocolDecl *FromProto = cast<ObjCProtocolDecl>(FromDC);
+ if (ToProto->getDefinition()) {
+ // Do nothing.
+ } else if (ObjCProtocolDecl *FromDef = FromProto->getDefinition()) {
+ ASTNodeImporter(*this).ImportDefinition(FromDef, ToProto,
+ ASTNodeImporter::IDK_Basic);
+ } else {
+ CompleteDecl(ToProto);
+ }
+ }
+
+ return ToDC;
+}
+
+Expr *ASTImporter::Import(Expr *FromE) {
+ if (!FromE)
+ return 0;
+
+ return cast_or_null<Expr>(Import(cast<Stmt>(FromE)));
+}
+
+Stmt *ASTImporter::Import(Stmt *FromS) {
+ if (!FromS)
+ return 0;
+
+ // Check whether we've already imported this declaration.
+ llvm::DenseMap<Stmt *, Stmt *>::iterator Pos = ImportedStmts.find(FromS);
+ if (Pos != ImportedStmts.end())
+ return Pos->second;
+
+ // Import the type
+ ASTNodeImporter Importer(*this);
+ Stmt *ToS = Importer.Visit(FromS);
+ if (!ToS)
+ return 0;
+
+ // Record the imported declaration.
+ ImportedStmts[FromS] = ToS;
+ return ToS;
+}
+
+NestedNameSpecifier *ASTImporter::Import(NestedNameSpecifier *FromNNS) {
+ if (!FromNNS)
+ return 0;
+
+ NestedNameSpecifier *prefix = Import(FromNNS->getPrefix());
+
+ switch (FromNNS->getKind()) {
+ case NestedNameSpecifier::Identifier:
+ if (IdentifierInfo *II = Import(FromNNS->getAsIdentifier())) {
+ return NestedNameSpecifier::Create(ToContext, prefix, II);
+ }
+ return 0;
+
+ case NestedNameSpecifier::Namespace:
+ if (NamespaceDecl *NS =
+ cast<NamespaceDecl>(Import(FromNNS->getAsNamespace()))) {
+ return NestedNameSpecifier::Create(ToContext, prefix, NS);
+ }
+ return 0;
+
+ case NestedNameSpecifier::NamespaceAlias:
+ if (NamespaceAliasDecl *NSAD =
+ cast<NamespaceAliasDecl>(Import(FromNNS->getAsNamespaceAlias()))) {
+ return NestedNameSpecifier::Create(ToContext, prefix, NSAD);
+ }
+ return 0;
+
+ case NestedNameSpecifier::Global:
+ return NestedNameSpecifier::GlobalSpecifier(ToContext);
+
+ case NestedNameSpecifier::TypeSpec:
+ case NestedNameSpecifier::TypeSpecWithTemplate: {
+ QualType T = Import(QualType(FromNNS->getAsType(), 0u));
+ if (!T.isNull()) {
+ bool bTemplate = FromNNS->getKind() ==
+ NestedNameSpecifier::TypeSpecWithTemplate;
+ return NestedNameSpecifier::Create(ToContext, prefix,
+ bTemplate, T.getTypePtr());
+ }
+ }
+ return 0;
+ }
+
+ llvm_unreachable("Invalid nested name specifier kind");
+}
+
+NestedNameSpecifierLoc ASTImporter::Import(NestedNameSpecifierLoc FromNNS) {
+ // FIXME: Implement!
+ return NestedNameSpecifierLoc();
+}
+
+TemplateName ASTImporter::Import(TemplateName From) {
+ switch (From.getKind()) {
+ case TemplateName::Template:
+ if (TemplateDecl *ToTemplate
+ = cast_or_null<TemplateDecl>(Import(From.getAsTemplateDecl())))
+ return TemplateName(ToTemplate);
+
+ return TemplateName();
+
+ case TemplateName::OverloadedTemplate: {
+ OverloadedTemplateStorage *FromStorage = From.getAsOverloadedTemplate();
+ UnresolvedSet<2> ToTemplates;
+ for (OverloadedTemplateStorage::iterator I = FromStorage->begin(),
+ E = FromStorage->end();
+ I != E; ++I) {
+ if (NamedDecl *To = cast_or_null<NamedDecl>(Import(*I)))
+ ToTemplates.addDecl(To);
+ else
+ return TemplateName();
+ }
+ return ToContext.getOverloadedTemplateName(ToTemplates.begin(),
+ ToTemplates.end());
+ }
+
+ case TemplateName::QualifiedTemplate: {
+ QualifiedTemplateName *QTN = From.getAsQualifiedTemplateName();
+ NestedNameSpecifier *Qualifier = Import(QTN->getQualifier());
+ if (!Qualifier)
+ return TemplateName();
+
+ if (TemplateDecl *ToTemplate
+ = cast_or_null<TemplateDecl>(Import(From.getAsTemplateDecl())))
+ return ToContext.getQualifiedTemplateName(Qualifier,
+ QTN->hasTemplateKeyword(),
+ ToTemplate);
+
+ return TemplateName();
+ }
+
+ case TemplateName::DependentTemplate: {
+ DependentTemplateName *DTN = From.getAsDependentTemplateName();
+ NestedNameSpecifier *Qualifier = Import(DTN->getQualifier());
+ if (!Qualifier)
+ return TemplateName();
+
+ if (DTN->isIdentifier()) {
+ return ToContext.getDependentTemplateName(Qualifier,
+ Import(DTN->getIdentifier()));
+ }
+
+ return ToContext.getDependentTemplateName(Qualifier, DTN->getOperator());
+ }
+
+ case TemplateName::SubstTemplateTemplateParm: {
+ SubstTemplateTemplateParmStorage *subst
+ = From.getAsSubstTemplateTemplateParm();
+ TemplateTemplateParmDecl *param
+ = cast_or_null<TemplateTemplateParmDecl>(Import(subst->getParameter()));
+ if (!param)
+ return TemplateName();
+
+ TemplateName replacement = Import(subst->getReplacement());
+ if (replacement.isNull()) return TemplateName();
+
+ return ToContext.getSubstTemplateTemplateParm(param, replacement);
+ }
+
+ case TemplateName::SubstTemplateTemplateParmPack: {
+ SubstTemplateTemplateParmPackStorage *SubstPack
+ = From.getAsSubstTemplateTemplateParmPack();
+ TemplateTemplateParmDecl *Param
+ = cast_or_null<TemplateTemplateParmDecl>(
+ Import(SubstPack->getParameterPack()));
+ if (!Param)
+ return TemplateName();
+
+ ASTNodeImporter Importer(*this);
+ TemplateArgument ArgPack
+ = Importer.ImportTemplateArgument(SubstPack->getArgumentPack());
+ if (ArgPack.isNull())
+ return TemplateName();
+
+ return ToContext.getSubstTemplateTemplateParmPack(Param, ArgPack);
+ }
+ }
+
+ llvm_unreachable("Invalid template name kind");
+}
+
+SourceLocation ASTImporter::Import(SourceLocation FromLoc) {
+ if (FromLoc.isInvalid())
+ return SourceLocation();
+
+ SourceManager &FromSM = FromContext.getSourceManager();
+
+ // For now, map everything down to its spelling location, so that we
+ // don't have to import macro expansions.
+ // FIXME: Import macro expansions!
+ FromLoc = FromSM.getSpellingLoc(FromLoc);
+ std::pair<FileID, unsigned> Decomposed = FromSM.getDecomposedLoc(FromLoc);
+ SourceManager &ToSM = ToContext.getSourceManager();
+ return ToSM.getLocForStartOfFile(Import(Decomposed.first))
+ .getLocWithOffset(Decomposed.second);
+}
+
+SourceRange ASTImporter::Import(SourceRange FromRange) {
+ return SourceRange(Import(FromRange.getBegin()), Import(FromRange.getEnd()));
+}
+
+FileID ASTImporter::Import(FileID FromID) {
+ llvm::DenseMap<FileID, FileID>::iterator Pos
+ = ImportedFileIDs.find(FromID);
+ if (Pos != ImportedFileIDs.end())
+ return Pos->second;
+
+ SourceManager &FromSM = FromContext.getSourceManager();
+ SourceManager &ToSM = ToContext.getSourceManager();
+ const SrcMgr::SLocEntry &FromSLoc = FromSM.getSLocEntry(FromID);
+ assert(FromSLoc.isFile() && "Cannot handle macro expansions yet");
+
+ // Include location of this file.
+ SourceLocation ToIncludeLoc = Import(FromSLoc.getFile().getIncludeLoc());
+
+ // Map the FileID for to the "to" source manager.
+ FileID ToID;
+ const SrcMgr::ContentCache *Cache = FromSLoc.getFile().getContentCache();
+ if (Cache->OrigEntry) {
+ // FIXME: We probably want to use getVirtualFile(), so we don't hit the
+ // disk again
+ // FIXME: We definitely want to re-use the existing MemoryBuffer, rather
+ // than mmap the files several times.
+ const FileEntry *Entry = ToFileManager.getFile(Cache->OrigEntry->getName());
+ ToID = ToSM.createFileID(Entry, ToIncludeLoc,
+ FromSLoc.getFile().getFileCharacteristic());
+ } else {
+ // FIXME: We want to re-use the existing MemoryBuffer!
+ const llvm::MemoryBuffer *
+ FromBuf = Cache->getBuffer(FromContext.getDiagnostics(), FromSM);
+ llvm::MemoryBuffer *ToBuf
+ = llvm::MemoryBuffer::getMemBufferCopy(FromBuf->getBuffer(),
+ FromBuf->getBufferIdentifier());
+ ToID = ToSM.createFileIDForMemBuffer(ToBuf);
+ }
+
+
+ ImportedFileIDs[FromID] = ToID;
+ return ToID;
+}
+
+void ASTImporter::ImportDefinition(Decl *From) {
+ Decl *To = Import(From);
+ if (!To)
+ return;
+
+ if (DeclContext *FromDC = cast<DeclContext>(From)) {
+ ASTNodeImporter Importer(*this);
+
+ if (RecordDecl *ToRecord = dyn_cast<RecordDecl>(To)) {
+ if (!ToRecord->getDefinition()) {
+ Importer.ImportDefinition(cast<RecordDecl>(FromDC), ToRecord,
+ ASTNodeImporter::IDK_Everything);
+ return;
+ }
+ }
+
+ if (EnumDecl *ToEnum = dyn_cast<EnumDecl>(To)) {
+ if (!ToEnum->getDefinition()) {
+ Importer.ImportDefinition(cast<EnumDecl>(FromDC), ToEnum,
+ ASTNodeImporter::IDK_Everything);
+ return;
+ }
+ }
+
+ if (ObjCInterfaceDecl *ToIFace = dyn_cast<ObjCInterfaceDecl>(To)) {
+ if (!ToIFace->getDefinition()) {
+ Importer.ImportDefinition(cast<ObjCInterfaceDecl>(FromDC), ToIFace,
+ ASTNodeImporter::IDK_Everything);
+ return;
+ }
+ }
+
+ if (ObjCProtocolDecl *ToProto = dyn_cast<ObjCProtocolDecl>(To)) {
+ if (!ToProto->getDefinition()) {
+ Importer.ImportDefinition(cast<ObjCProtocolDecl>(FromDC), ToProto,
+ ASTNodeImporter::IDK_Everything);
+ return;
+ }
+ }
+
+ Importer.ImportDeclContext(FromDC, true);
+ }
+}
+
+DeclarationName ASTImporter::Import(DeclarationName FromName) {
+ if (!FromName)
+ return DeclarationName();
+
+ switch (FromName.getNameKind()) {
+ case DeclarationName::Identifier:
+ return Import(FromName.getAsIdentifierInfo());
+
+ case DeclarationName::ObjCZeroArgSelector:
+ case DeclarationName::ObjCOneArgSelector:
+ case DeclarationName::ObjCMultiArgSelector:
+ return Import(FromName.getObjCSelector());
+
+ case DeclarationName::CXXConstructorName: {
+ QualType T = Import(FromName.getCXXNameType());
+ if (T.isNull())
+ return DeclarationName();
+
+ return ToContext.DeclarationNames.getCXXConstructorName(
+ ToContext.getCanonicalType(T));
+ }
+
+ case DeclarationName::CXXDestructorName: {
+ QualType T = Import(FromName.getCXXNameType());
+ if (T.isNull())
+ return DeclarationName();
+
+ return ToContext.DeclarationNames.getCXXDestructorName(
+ ToContext.getCanonicalType(T));
+ }
+
+ case DeclarationName::CXXConversionFunctionName: {
+ QualType T = Import(FromName.getCXXNameType());
+ if (T.isNull())
+ return DeclarationName();
+
+ return ToContext.DeclarationNames.getCXXConversionFunctionName(
+ ToContext.getCanonicalType(T));
+ }
+
+ case DeclarationName::CXXOperatorName:
+ return ToContext.DeclarationNames.getCXXOperatorName(
+ FromName.getCXXOverloadedOperator());
+
+ case DeclarationName::CXXLiteralOperatorName:
+ return ToContext.DeclarationNames.getCXXLiteralOperatorName(
+ Import(FromName.getCXXLiteralIdentifier()));
+
+ case DeclarationName::CXXUsingDirective:
+ // FIXME: STATICS!
+ return DeclarationName::getUsingDirectiveName();
+ }
+
+ llvm_unreachable("Invalid DeclarationName Kind!");
+}
+
+IdentifierInfo *ASTImporter::Import(const IdentifierInfo *FromId) {
+ if (!FromId)
+ return 0;
+
+ return &ToContext.Idents.get(FromId->getName());
+}
+
+Selector ASTImporter::Import(Selector FromSel) {
+ if (FromSel.isNull())
+ return Selector();
+
+ SmallVector<IdentifierInfo *, 4> Idents;
+ Idents.push_back(Import(FromSel.getIdentifierInfoForSlot(0)));
+ for (unsigned I = 1, N = FromSel.getNumArgs(); I < N; ++I)
+ Idents.push_back(Import(FromSel.getIdentifierInfoForSlot(I)));
+ return ToContext.Selectors.getSelector(FromSel.getNumArgs(), Idents.data());
+}
+
+DeclarationName ASTImporter::HandleNameConflict(DeclarationName Name,
+ DeclContext *DC,
+ unsigned IDNS,
+ NamedDecl **Decls,
+ unsigned NumDecls) {
+ return Name;
+}
+
+DiagnosticBuilder ASTImporter::ToDiag(SourceLocation Loc, unsigned DiagID) {
+ return ToContext.getDiagnostics().Report(Loc, DiagID);
+}
+
+DiagnosticBuilder ASTImporter::FromDiag(SourceLocation Loc, unsigned DiagID) {
+ return FromContext.getDiagnostics().Report(Loc, DiagID);
+}
+
+void ASTImporter::CompleteDecl (Decl *D) {
+ if (ObjCInterfaceDecl *ID = dyn_cast<ObjCInterfaceDecl>(D)) {
+ if (!ID->getDefinition())
+ ID->startDefinition();
+ }
+ else if (ObjCProtocolDecl *PD = dyn_cast<ObjCProtocolDecl>(D)) {
+ if (!PD->getDefinition())
+ PD->startDefinition();
+ }
+ else if (TagDecl *TD = dyn_cast<TagDecl>(D)) {
+ if (!TD->getDefinition() && !TD->isBeingDefined()) {
+ TD->startDefinition();
+ TD->setCompleteDefinition(true);
+ }
+ }
+ else {
+ assert (0 && "CompleteDecl called on a Decl that can't be completed");
+ }
+}
+
+Decl *ASTImporter::Imported(Decl *From, Decl *To) {
+ ImportedDecls[From] = To;
+ return To;
+}
+
+bool ASTImporter::IsStructurallyEquivalent(QualType From, QualType To) {
+ llvm::DenseMap<const Type *, const Type *>::iterator Pos
+ = ImportedTypes.find(From.getTypePtr());
+ if (Pos != ImportedTypes.end() && ToContext.hasSameType(Import(From), To))
+ return true;
+
+ StructuralEquivalenceContext Ctx(FromContext, ToContext, NonEquivalentDecls);
+ return Ctx.IsStructurallyEquivalent(From, To);
+}
diff --git a/clang/lib/AST/AttrImpl.cpp b/clang/lib/AST/AttrImpl.cpp
new file mode 100644
index 0000000..cffcc65
--- /dev/null
+++ b/clang/lib/AST/AttrImpl.cpp
@@ -0,0 +1,26 @@
+//===--- AttrImpl.cpp - Classes for representing attributes -----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains out-of-line virtual methods for Attr classes.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/Attr.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Type.h"
+#include "clang/AST/Expr.h"
+using namespace clang;
+
+Attr::~Attr() { }
+
+void InheritableAttr::anchor() { }
+
+void InheritableParamAttr::anchor() { }
+
+#include "clang/AST/AttrImpl.inc"
diff --git a/clang/lib/AST/CMakeLists.txt b/clang/lib/AST/CMakeLists.txt
new file mode 100644
index 0000000..716459a
--- /dev/null
+++ b/clang/lib/AST/CMakeLists.txt
@@ -0,0 +1,57 @@
+set(LLVM_LINK_COMPONENTS support)
+
+set(LLVM_USED_LIBS clangBasic clangLex)
+
+add_clang_library(clangAST
+ APValue.cpp
+ ASTConsumer.cpp
+ ASTContext.cpp
+ ASTDiagnostic.cpp
+ ASTImporter.cpp
+ AttrImpl.cpp
+ CXXInheritance.cpp
+ Decl.cpp
+ DeclarationName.cpp
+ DeclBase.cpp
+ DeclCXX.cpp
+ DeclFriend.cpp
+ DeclGroup.cpp
+ DeclObjC.cpp
+ DeclPrinter.cpp
+ DeclTemplate.cpp
+ DumpXML.cpp
+ Expr.cpp
+ ExprClassification.cpp
+ ExprConstant.cpp
+ ExprCXX.cpp
+ ExternalASTSource.cpp
+ InheritViz.cpp
+ ItaniumCXXABI.cpp
+ ItaniumMangle.cpp
+ LambdaMangleContext.cpp
+ Mangle.cpp
+ MicrosoftCXXABI.cpp
+ MicrosoftMangle.cpp
+ NestedNameSpecifier.cpp
+ NSAPI.cpp
+ ParentMap.cpp
+ RecordLayout.cpp
+ RecordLayoutBuilder.cpp
+ SelectorLocationsKind.cpp
+ Stmt.cpp
+ StmtDumper.cpp
+ StmtIterator.cpp
+ StmtPrinter.cpp
+ StmtProfile.cpp
+ StmtViz.cpp
+ TemplateBase.cpp
+ TemplateName.cpp
+ Type.cpp
+ TypeLoc.cpp
+ TypePrinter.cpp
+ VTableBuilder.cpp
+ VTTBuilder.cpp
+ )
+
+add_dependencies(clangAST ClangARMNeon ClangAttrClasses ClangAttrList
+ ClangAttrImpl ClangDiagnosticAST ClangDeclNodes ClangStmtNodes)
diff --git a/clang/lib/AST/CXXABI.h b/clang/lib/AST/CXXABI.h
new file mode 100644
index 0000000..943c43e
--- /dev/null
+++ b/clang/lib/AST/CXXABI.h
@@ -0,0 +1,48 @@
+//===----- CXXABI.h - Interface to C++ ABIs ---------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This provides an abstract class for C++ AST support. Concrete
+// subclasses of this implement AST support for specific C++ ABIs.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_AST_CXXABI_H
+#define LLVM_CLANG_AST_CXXABI_H
+
+#include "clang/AST/Type.h"
+
+namespace clang {
+
+class ASTContext;
+class MemberPointerType;
+
+/// Implements C++ ABI-specific semantic analysis functions.
+class CXXABI {
+public:
+ virtual ~CXXABI();
+
+ /// Returns the size of a member pointer in multiples of the target
+ /// pointer size.
+ virtual unsigned getMemberPointerSize(const MemberPointerType *MPT) const = 0;
+
+ /// Returns the default calling convention for C++ methods.
+ virtual CallingConv getDefaultMethodCallConv() const = 0;
+
+ // Returns whether the given class is nearly empty, with just virtual pointers
+ // and no data except possibly virtual bases.
+ virtual bool isNearlyEmpty(const CXXRecordDecl *RD) const = 0;
+};
+
+/// Creates an instance of a C++ ABI class.
+CXXABI *CreateARMCXXABI(ASTContext &Ctx);
+CXXABI *CreateItaniumCXXABI(ASTContext &Ctx);
+CXXABI *CreateMicrosoftCXXABI(ASTContext &Ctx);
+}
+
+#endif
diff --git a/clang/lib/AST/CXXInheritance.cpp b/clang/lib/AST/CXXInheritance.cpp
new file mode 100644
index 0000000..2186730
--- /dev/null
+++ b/clang/lib/AST/CXXInheritance.cpp
@@ -0,0 +1,718 @@
+//===------ CXXInheritance.cpp - C++ Inheritance ----------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provides routines that help analyzing C++ inheritance hierarchies.
+//
+//===----------------------------------------------------------------------===//
+#include "clang/AST/CXXInheritance.h"
+#include "clang/AST/RecordLayout.h"
+#include "clang/AST/DeclCXX.h"
+#include <algorithm>
+#include <set>
+
+using namespace clang;
+
+/// \brief Computes the set of declarations referenced by these base
+/// paths.
+void CXXBasePaths::ComputeDeclsFound() {
+ assert(NumDeclsFound == 0 && !DeclsFound &&
+ "Already computed the set of declarations");
+
+ SmallVector<NamedDecl *, 8> Decls;
+ for (paths_iterator Path = begin(), PathEnd = end(); Path != PathEnd; ++Path)
+ Decls.push_back(*Path->Decls.first);
+
+ // Eliminate duplicated decls.
+ llvm::array_pod_sort(Decls.begin(), Decls.end());
+ Decls.erase(std::unique(Decls.begin(), Decls.end()), Decls.end());
+
+ NumDeclsFound = Decls.size();
+ DeclsFound = new NamedDecl * [NumDeclsFound];
+ std::copy(Decls.begin(), Decls.end(), DeclsFound);
+}
+
+CXXBasePaths::decl_iterator CXXBasePaths::found_decls_begin() {
+ if (NumDeclsFound == 0)
+ ComputeDeclsFound();
+ return DeclsFound;
+}
+
+CXXBasePaths::decl_iterator CXXBasePaths::found_decls_end() {
+ if (NumDeclsFound == 0)
+ ComputeDeclsFound();
+ return DeclsFound + NumDeclsFound;
+}
+
+/// isAmbiguous - Determines whether the set of paths provided is
+/// ambiguous, i.e., there are two or more paths that refer to
+/// different base class subobjects of the same type. BaseType must be
+/// an unqualified, canonical class type.
+bool CXXBasePaths::isAmbiguous(CanQualType BaseType) {
+ BaseType = BaseType.getUnqualifiedType();
+ std::pair<bool, unsigned>& Subobjects = ClassSubobjects[BaseType];
+ return Subobjects.second + (Subobjects.first? 1 : 0) > 1;
+}
+
+/// clear - Clear out all prior path information.
+void CXXBasePaths::clear() {
+ Paths.clear();
+ ClassSubobjects.clear();
+ ScratchPath.clear();
+ DetectedVirtual = 0;
+}
+
+/// @brief Swaps the contents of this CXXBasePaths structure with the
+/// contents of Other.
+void CXXBasePaths::swap(CXXBasePaths &Other) {
+ std::swap(Origin, Other.Origin);
+ Paths.swap(Other.Paths);
+ ClassSubobjects.swap(Other.ClassSubobjects);
+ std::swap(FindAmbiguities, Other.FindAmbiguities);
+ std::swap(RecordPaths, Other.RecordPaths);
+ std::swap(DetectVirtual, Other.DetectVirtual);
+ std::swap(DetectedVirtual, Other.DetectedVirtual);
+}
+
+bool CXXRecordDecl::isDerivedFrom(const CXXRecordDecl *Base) const {
+ CXXBasePaths Paths(/*FindAmbiguities=*/false, /*RecordPaths=*/false,
+ /*DetectVirtual=*/false);
+ return isDerivedFrom(Base, Paths);
+}
+
+bool CXXRecordDecl::isDerivedFrom(const CXXRecordDecl *Base,
+ CXXBasePaths &Paths) const {
+ if (getCanonicalDecl() == Base->getCanonicalDecl())
+ return false;
+
+ Paths.setOrigin(const_cast<CXXRecordDecl*>(this));
+ return lookupInBases(&FindBaseClass,
+ const_cast<CXXRecordDecl*>(Base->getCanonicalDecl()),
+ Paths);
+}
+
+bool CXXRecordDecl::isVirtuallyDerivedFrom(CXXRecordDecl *Base) const {
+ if (!getNumVBases())
+ return false;
+
+ CXXBasePaths Paths(/*FindAmbiguities=*/false, /*RecordPaths=*/false,
+ /*DetectVirtual=*/false);
+
+ if (getCanonicalDecl() == Base->getCanonicalDecl())
+ return false;
+
+ Paths.setOrigin(const_cast<CXXRecordDecl*>(this));
+ return lookupInBases(&FindVirtualBaseClass, Base->getCanonicalDecl(), Paths);
+}
+
+static bool BaseIsNot(const CXXRecordDecl *Base, void *OpaqueTarget) {
+ // OpaqueTarget is a CXXRecordDecl*.
+ return Base->getCanonicalDecl() != (const CXXRecordDecl*) OpaqueTarget;
+}
+
+bool CXXRecordDecl::isProvablyNotDerivedFrom(const CXXRecordDecl *Base) const {
+ return forallBases(BaseIsNot, (void*) Base->getCanonicalDecl());
+}
+
+bool CXXRecordDecl::forallBases(ForallBasesCallback *BaseMatches,
+ void *OpaqueData,
+ bool AllowShortCircuit) const {
+ SmallVector<const CXXRecordDecl*, 8> Queue;
+
+ const CXXRecordDecl *Record = this;
+ bool AllMatches = true;
+ while (true) {
+ for (CXXRecordDecl::base_class_const_iterator
+ I = Record->bases_begin(), E = Record->bases_end(); I != E; ++I) {
+ const RecordType *Ty = I->getType()->getAs<RecordType>();
+ if (!Ty) {
+ if (AllowShortCircuit) return false;
+ AllMatches = false;
+ continue;
+ }
+
+ CXXRecordDecl *Base =
+ cast_or_null<CXXRecordDecl>(Ty->getDecl()->getDefinition());
+ if (!Base) {
+ if (AllowShortCircuit) return false;
+ AllMatches = false;
+ continue;
+ }
+
+ Queue.push_back(Base);
+ if (!BaseMatches(Base, OpaqueData)) {
+ if (AllowShortCircuit) return false;
+ AllMatches = false;
+ continue;
+ }
+ }
+
+ if (Queue.empty()) break;
+ Record = Queue.back(); // not actually a queue.
+ Queue.pop_back();
+ }
+
+ return AllMatches;
+}
+
+bool CXXBasePaths::lookupInBases(ASTContext &Context,
+ const CXXRecordDecl *Record,
+ CXXRecordDecl::BaseMatchesCallback *BaseMatches,
+ void *UserData) {
+ bool FoundPath = false;
+
+ // The access of the path down to this record.
+ AccessSpecifier AccessToHere = ScratchPath.Access;
+ bool IsFirstStep = ScratchPath.empty();
+
+ for (CXXRecordDecl::base_class_const_iterator BaseSpec = Record->bases_begin(),
+ BaseSpecEnd = Record->bases_end();
+ BaseSpec != BaseSpecEnd;
+ ++BaseSpec) {
+ // Find the record of the base class subobjects for this type.
+ QualType BaseType = Context.getCanonicalType(BaseSpec->getType())
+ .getUnqualifiedType();
+
+ // C++ [temp.dep]p3:
+ // In the definition of a class template or a member of a class template,
+ // if a base class of the class template depends on a template-parameter,
+ // the base class scope is not examined during unqualified name lookup
+ // either at the point of definition of the class template or member or
+ // during an instantiation of the class tem- plate or member.
+ if (BaseType->isDependentType())
+ continue;
+
+ // Determine whether we need to visit this base class at all,
+ // updating the count of subobjects appropriately.
+ std::pair<bool, unsigned>& Subobjects = ClassSubobjects[BaseType];
+ bool VisitBase = true;
+ bool SetVirtual = false;
+ if (BaseSpec->isVirtual()) {
+ VisitBase = !Subobjects.first;
+ Subobjects.first = true;
+ if (isDetectingVirtual() && DetectedVirtual == 0) {
+ // If this is the first virtual we find, remember it. If it turns out
+ // there is no base path here, we'll reset it later.
+ DetectedVirtual = BaseType->getAs<RecordType>();
+ SetVirtual = true;
+ }
+ } else
+ ++Subobjects.second;
+
+ if (isRecordingPaths()) {
+ // Add this base specifier to the current path.
+ CXXBasePathElement Element;
+ Element.Base = &*BaseSpec;
+ Element.Class = Record;
+ if (BaseSpec->isVirtual())
+ Element.SubobjectNumber = 0;
+ else
+ Element.SubobjectNumber = Subobjects.second;
+ ScratchPath.push_back(Element);
+
+ // Calculate the "top-down" access to this base class.
+ // The spec actually describes this bottom-up, but top-down is
+ // equivalent because the definition works out as follows:
+ // 1. Write down the access along each step in the inheritance
+ // chain, followed by the access of the decl itself.
+ // For example, in
+ // class A { public: int foo; };
+ // class B : protected A {};
+ // class C : public B {};
+ // class D : private C {};
+ // we would write:
+ // private public protected public
+ // 2. If 'private' appears anywhere except far-left, access is denied.
+ // 3. Otherwise, overall access is determined by the most restrictive
+ // access in the sequence.
+ if (IsFirstStep)
+ ScratchPath.Access = BaseSpec->getAccessSpecifier();
+ else
+ ScratchPath.Access = CXXRecordDecl::MergeAccess(AccessToHere,
+ BaseSpec->getAccessSpecifier());
+ }
+
+ // Track whether there's a path involving this specific base.
+ bool FoundPathThroughBase = false;
+
+ if (BaseMatches(BaseSpec, ScratchPath, UserData)) {
+ // We've found a path that terminates at this base.
+ FoundPath = FoundPathThroughBase = true;
+ if (isRecordingPaths()) {
+ // We have a path. Make a copy of it before moving on.
+ Paths.push_back(ScratchPath);
+ } else if (!isFindingAmbiguities()) {
+ // We found a path and we don't care about ambiguities;
+ // return immediately.
+ return FoundPath;
+ }
+ } else if (VisitBase) {
+ CXXRecordDecl *BaseRecord
+ = cast<CXXRecordDecl>(BaseSpec->getType()->getAs<RecordType>()
+ ->getDecl());
+ if (lookupInBases(Context, BaseRecord, BaseMatches, UserData)) {
+ // C++ [class.member.lookup]p2:
+ // A member name f in one sub-object B hides a member name f in
+ // a sub-object A if A is a base class sub-object of B. Any
+ // declarations that are so hidden are eliminated from
+ // consideration.
+
+ // There is a path to a base class that meets the criteria. If we're
+ // not collecting paths or finding ambiguities, we're done.
+ FoundPath = FoundPathThroughBase = true;
+ if (!isFindingAmbiguities())
+ return FoundPath;
+ }
+ }
+
+ // Pop this base specifier off the current path (if we're
+ // collecting paths).
+ if (isRecordingPaths()) {
+ ScratchPath.pop_back();
+ }
+
+ // If we set a virtual earlier, and this isn't a path, forget it again.
+ if (SetVirtual && !FoundPathThroughBase) {
+ DetectedVirtual = 0;
+ }
+ }
+
+ // Reset the scratch path access.
+ ScratchPath.Access = AccessToHere;
+
+ return FoundPath;
+}
+
+bool CXXRecordDecl::lookupInBases(BaseMatchesCallback *BaseMatches,
+ void *UserData,
+ CXXBasePaths &Paths) const {
+ // If we didn't find anything, report that.
+ if (!Paths.lookupInBases(getASTContext(), this, BaseMatches, UserData))
+ return false;
+
+ // If we're not recording paths or we won't ever find ambiguities,
+ // we're done.
+ if (!Paths.isRecordingPaths() || !Paths.isFindingAmbiguities())
+ return true;
+
+ // C++ [class.member.lookup]p6:
+ // When virtual base classes are used, a hidden declaration can be
+ // reached along a path through the sub-object lattice that does
+ // not pass through the hiding declaration. This is not an
+ // ambiguity. The identical use with nonvirtual base classes is an
+ // ambiguity; in that case there is no unique instance of the name
+ // that hides all the others.
+ //
+ // FIXME: This is an O(N^2) algorithm, but DPG doesn't see an easy
+ // way to make it any faster.
+ for (CXXBasePaths::paths_iterator P = Paths.begin(), PEnd = Paths.end();
+ P != PEnd; /* increment in loop */) {
+ bool Hidden = false;
+
+ for (CXXBasePath::iterator PE = P->begin(), PEEnd = P->end();
+ PE != PEEnd && !Hidden; ++PE) {
+ if (PE->Base->isVirtual()) {
+ CXXRecordDecl *VBase = 0;
+ if (const RecordType *Record = PE->Base->getType()->getAs<RecordType>())
+ VBase = cast<CXXRecordDecl>(Record->getDecl());
+ if (!VBase)
+ break;
+
+ // The declaration(s) we found along this path were found in a
+ // subobject of a virtual base. Check whether this virtual
+ // base is a subobject of any other path; if so, then the
+ // declaration in this path are hidden by that patch.
+ for (CXXBasePaths::paths_iterator HidingP = Paths.begin(),
+ HidingPEnd = Paths.end();
+ HidingP != HidingPEnd;
+ ++HidingP) {
+ CXXRecordDecl *HidingClass = 0;
+ if (const RecordType *Record
+ = HidingP->back().Base->getType()->getAs<RecordType>())
+ HidingClass = cast<CXXRecordDecl>(Record->getDecl());
+ if (!HidingClass)
+ break;
+
+ if (HidingClass->isVirtuallyDerivedFrom(VBase)) {
+ Hidden = true;
+ break;
+ }
+ }
+ }
+ }
+
+ if (Hidden)
+ P = Paths.Paths.erase(P);
+ else
+ ++P;
+ }
+
+ return true;
+}
+
+bool CXXRecordDecl::FindBaseClass(const CXXBaseSpecifier *Specifier,
+ CXXBasePath &Path,
+ void *BaseRecord) {
+ assert(((Decl *)BaseRecord)->getCanonicalDecl() == BaseRecord &&
+ "User data for FindBaseClass is not canonical!");
+ return Specifier->getType()->getAs<RecordType>()->getDecl()
+ ->getCanonicalDecl() == BaseRecord;
+}
+
+bool CXXRecordDecl::FindVirtualBaseClass(const CXXBaseSpecifier *Specifier,
+ CXXBasePath &Path,
+ void *BaseRecord) {
+ assert(((Decl *)BaseRecord)->getCanonicalDecl() == BaseRecord &&
+ "User data for FindBaseClass is not canonical!");
+ return Specifier->isVirtual() &&
+ Specifier->getType()->getAs<RecordType>()->getDecl()
+ ->getCanonicalDecl() == BaseRecord;
+}
+
+bool CXXRecordDecl::FindTagMember(const CXXBaseSpecifier *Specifier,
+ CXXBasePath &Path,
+ void *Name) {
+ RecordDecl *BaseRecord = Specifier->getType()->getAs<RecordType>()->getDecl();
+
+ DeclarationName N = DeclarationName::getFromOpaquePtr(Name);
+ for (Path.Decls = BaseRecord->lookup(N);
+ Path.Decls.first != Path.Decls.second;
+ ++Path.Decls.first) {
+ if ((*Path.Decls.first)->isInIdentifierNamespace(IDNS_Tag))
+ return true;
+ }
+
+ return false;
+}
+
+bool CXXRecordDecl::FindOrdinaryMember(const CXXBaseSpecifier *Specifier,
+ CXXBasePath &Path,
+ void *Name) {
+ RecordDecl *BaseRecord = Specifier->getType()->getAs<RecordType>()->getDecl();
+
+ const unsigned IDNS = IDNS_Ordinary | IDNS_Tag | IDNS_Member;
+ DeclarationName N = DeclarationName::getFromOpaquePtr(Name);
+ for (Path.Decls = BaseRecord->lookup(N);
+ Path.Decls.first != Path.Decls.second;
+ ++Path.Decls.first) {
+ if ((*Path.Decls.first)->isInIdentifierNamespace(IDNS))
+ return true;
+ }
+
+ return false;
+}
+
+bool CXXRecordDecl::
+FindNestedNameSpecifierMember(const CXXBaseSpecifier *Specifier,
+ CXXBasePath &Path,
+ void *Name) {
+ RecordDecl *BaseRecord = Specifier->getType()->getAs<RecordType>()->getDecl();
+
+ DeclarationName N = DeclarationName::getFromOpaquePtr(Name);
+ for (Path.Decls = BaseRecord->lookup(N);
+ Path.Decls.first != Path.Decls.second;
+ ++Path.Decls.first) {
+ // FIXME: Refactor the "is it a nested-name-specifier?" check
+ if (isa<TypedefNameDecl>(*Path.Decls.first) ||
+ (*Path.Decls.first)->isInIdentifierNamespace(IDNS_Tag))
+ return true;
+ }
+
+ return false;
+}
+
+void OverridingMethods::add(unsigned OverriddenSubobject,
+ UniqueVirtualMethod Overriding) {
+ SmallVector<UniqueVirtualMethod, 4> &SubobjectOverrides
+ = Overrides[OverriddenSubobject];
+ if (std::find(SubobjectOverrides.begin(), SubobjectOverrides.end(),
+ Overriding) == SubobjectOverrides.end())
+ SubobjectOverrides.push_back(Overriding);
+}
+
+void OverridingMethods::add(const OverridingMethods &Other) {
+ for (const_iterator I = Other.begin(), IE = Other.end(); I != IE; ++I) {
+ for (overriding_const_iterator M = I->second.begin(),
+ MEnd = I->second.end();
+ M != MEnd;
+ ++M)
+ add(I->first, *M);
+ }
+}
+
+void OverridingMethods::replaceAll(UniqueVirtualMethod Overriding) {
+ for (iterator I = begin(), IEnd = end(); I != IEnd; ++I) {
+ I->second.clear();
+ I->second.push_back(Overriding);
+ }
+}
+
+
+namespace {
+ class FinalOverriderCollector {
+ /// \brief The number of subobjects of a given class type that
+ /// occur within the class hierarchy.
+ llvm::DenseMap<const CXXRecordDecl *, unsigned> SubobjectCount;
+
+ /// \brief Overriders for each virtual base subobject.
+ llvm::DenseMap<const CXXRecordDecl *, CXXFinalOverriderMap *> VirtualOverriders;
+
+ CXXFinalOverriderMap FinalOverriders;
+
+ public:
+ ~FinalOverriderCollector();
+
+ void Collect(const CXXRecordDecl *RD, bool VirtualBase,
+ const CXXRecordDecl *InVirtualSubobject,
+ CXXFinalOverriderMap &Overriders);
+ };
+}
+
+void FinalOverriderCollector::Collect(const CXXRecordDecl *RD,
+ bool VirtualBase,
+ const CXXRecordDecl *InVirtualSubobject,
+ CXXFinalOverriderMap &Overriders) {
+ unsigned SubobjectNumber = 0;
+ if (!VirtualBase)
+ SubobjectNumber
+ = ++SubobjectCount[cast<CXXRecordDecl>(RD->getCanonicalDecl())];
+
+ for (CXXRecordDecl::base_class_const_iterator Base = RD->bases_begin(),
+ BaseEnd = RD->bases_end(); Base != BaseEnd; ++Base) {
+ if (const RecordType *RT = Base->getType()->getAs<RecordType>()) {
+ const CXXRecordDecl *BaseDecl = cast<CXXRecordDecl>(RT->getDecl());
+ if (!BaseDecl->isPolymorphic())
+ continue;
+
+ if (Overriders.empty() && !Base->isVirtual()) {
+ // There are no other overriders of virtual member functions,
+ // so let the base class fill in our overriders for us.
+ Collect(BaseDecl, false, InVirtualSubobject, Overriders);
+ continue;
+ }
+
+ // Collect all of the overridders from the base class subobject
+ // and merge them into the set of overridders for this class.
+ // For virtual base classes, populate or use the cached virtual
+ // overrides so that we do not walk the virtual base class (and
+ // its base classes) more than once.
+ CXXFinalOverriderMap ComputedBaseOverriders;
+ CXXFinalOverriderMap *BaseOverriders = &ComputedBaseOverriders;
+ if (Base->isVirtual()) {
+ CXXFinalOverriderMap *&MyVirtualOverriders = VirtualOverriders[BaseDecl];
+ if (!MyVirtualOverriders) {
+ MyVirtualOverriders = new CXXFinalOverriderMap;
+ Collect(BaseDecl, true, BaseDecl, *MyVirtualOverriders);
+ }
+
+ BaseOverriders = MyVirtualOverriders;
+ } else
+ Collect(BaseDecl, false, InVirtualSubobject, ComputedBaseOverriders);
+
+ // Merge the overriders from this base class into our own set of
+ // overriders.
+ for (CXXFinalOverriderMap::iterator OM = BaseOverriders->begin(),
+ OMEnd = BaseOverriders->end();
+ OM != OMEnd;
+ ++OM) {
+ const CXXMethodDecl *CanonOM
+ = cast<CXXMethodDecl>(OM->first->getCanonicalDecl());
+ Overriders[CanonOM].add(OM->second);
+ }
+ }
+ }
+
+ for (CXXRecordDecl::method_iterator M = RD->method_begin(),
+ MEnd = RD->method_end();
+ M != MEnd;
+ ++M) {
+ // We only care about virtual methods.
+ if (!M->isVirtual())
+ continue;
+
+ CXXMethodDecl *CanonM = cast<CXXMethodDecl>(M->getCanonicalDecl());
+
+ if (CanonM->begin_overridden_methods()
+ == CanonM->end_overridden_methods()) {
+ // This is a new virtual function that does not override any
+ // other virtual function. Add it to the map of virtual
+ // functions for which we are tracking overridders.
+
+ // C++ [class.virtual]p2:
+ // For convenience we say that any virtual function overrides itself.
+ Overriders[CanonM].add(SubobjectNumber,
+ UniqueVirtualMethod(CanonM, SubobjectNumber,
+ InVirtualSubobject));
+ continue;
+ }
+
+ // This virtual method overrides other virtual methods, so it does
+ // not add any new slots into the set of overriders. Instead, we
+ // replace entries in the set of overriders with the new
+ // overrider. To do so, we dig down to the original virtual
+ // functions using data recursion and update all of the methods it
+ // overrides.
+ typedef std::pair<CXXMethodDecl::method_iterator,
+ CXXMethodDecl::method_iterator> OverriddenMethods;
+ SmallVector<OverriddenMethods, 4> Stack;
+ Stack.push_back(std::make_pair(CanonM->begin_overridden_methods(),
+ CanonM->end_overridden_methods()));
+ while (!Stack.empty()) {
+ OverriddenMethods OverMethods = Stack.back();
+ Stack.pop_back();
+
+ for (; OverMethods.first != OverMethods.second; ++OverMethods.first) {
+ const CXXMethodDecl *CanonOM
+ = cast<CXXMethodDecl>((*OverMethods.first)->getCanonicalDecl());
+
+ // C++ [class.virtual]p2:
+ // A virtual member function C::vf of a class object S is
+ // a final overrider unless the most derived class (1.8)
+ // of which S is a base class subobject (if any) declares
+ // or inherits another member function that overrides vf.
+ //
+ // Treating this object like the most derived class, we
+ // replace any overrides from base classes with this
+ // overriding virtual function.
+ Overriders[CanonOM].replaceAll(
+ UniqueVirtualMethod(CanonM, SubobjectNumber,
+ InVirtualSubobject));
+
+ if (CanonOM->begin_overridden_methods()
+ == CanonOM->end_overridden_methods())
+ continue;
+
+ // Continue recursion to the methods that this virtual method
+ // overrides.
+ Stack.push_back(std::make_pair(CanonOM->begin_overridden_methods(),
+ CanonOM->end_overridden_methods()));
+ }
+ }
+
+ // C++ [class.virtual]p2:
+ // For convenience we say that any virtual function overrides itself.
+ Overriders[CanonM].add(SubobjectNumber,
+ UniqueVirtualMethod(CanonM, SubobjectNumber,
+ InVirtualSubobject));
+ }
+}
+
+FinalOverriderCollector::~FinalOverriderCollector() {
+ for (llvm::DenseMap<const CXXRecordDecl *, CXXFinalOverriderMap *>::iterator
+ VO = VirtualOverriders.begin(), VOEnd = VirtualOverriders.end();
+ VO != VOEnd;
+ ++VO)
+ delete VO->second;
+}
+
+void
+CXXRecordDecl::getFinalOverriders(CXXFinalOverriderMap &FinalOverriders) const {
+ FinalOverriderCollector Collector;
+ Collector.Collect(this, false, 0, FinalOverriders);
+
+ // Weed out any final overriders that come from virtual base class
+ // subobjects that were hidden by other subobjects along any path.
+ // This is the final-overrider variant of C++ [class.member.lookup]p10.
+ for (CXXFinalOverriderMap::iterator OM = FinalOverriders.begin(),
+ OMEnd = FinalOverriders.end();
+ OM != OMEnd;
+ ++OM) {
+ for (OverridingMethods::iterator SO = OM->second.begin(),
+ SOEnd = OM->second.end();
+ SO != SOEnd;
+ ++SO) {
+ SmallVector<UniqueVirtualMethod, 4> &Overriding = SO->second;
+ if (Overriding.size() < 2)
+ continue;
+
+ for (SmallVector<UniqueVirtualMethod, 4>::iterator
+ Pos = Overriding.begin(), PosEnd = Overriding.end();
+ Pos != PosEnd;
+ /* increment in loop */) {
+ if (!Pos->InVirtualSubobject) {
+ ++Pos;
+ continue;
+ }
+
+ // We have an overriding method in a virtual base class
+ // subobject (or non-virtual base class subobject thereof);
+ // determine whether there exists an other overriding method
+ // in a base class subobject that hides the virtual base class
+ // subobject.
+ bool Hidden = false;
+ for (SmallVector<UniqueVirtualMethod, 4>::iterator
+ OP = Overriding.begin(), OPEnd = Overriding.end();
+ OP != OPEnd && !Hidden;
+ ++OP) {
+ if (Pos == OP)
+ continue;
+
+ if (OP->Method->getParent()->isVirtuallyDerivedFrom(
+ const_cast<CXXRecordDecl *>(Pos->InVirtualSubobject)))
+ Hidden = true;
+ }
+
+ if (Hidden) {
+ // The current overriding function is hidden by another
+ // overriding function; remove this one.
+ Pos = Overriding.erase(Pos);
+ PosEnd = Overriding.end();
+ } else {
+ ++Pos;
+ }
+ }
+ }
+ }
+}
+
+static void
+AddIndirectPrimaryBases(const CXXRecordDecl *RD, ASTContext &Context,
+ CXXIndirectPrimaryBaseSet& Bases) {
+ // If the record has a virtual primary base class, add it to our set.
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
+ if (Layout.isPrimaryBaseVirtual())
+ Bases.insert(Layout.getPrimaryBase());
+
+ for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
+ E = RD->bases_end(); I != E; ++I) {
+ assert(!I->getType()->isDependentType() &&
+ "Cannot get indirect primary bases for class with dependent bases.");
+
+ const CXXRecordDecl *BaseDecl =
+ cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+
+ // Only bases with virtual bases participate in computing the
+ // indirect primary virtual base classes.
+ if (BaseDecl->getNumVBases())
+ AddIndirectPrimaryBases(BaseDecl, Context, Bases);
+ }
+
+}
+
+void
+CXXRecordDecl::getIndirectPrimaryBases(CXXIndirectPrimaryBaseSet& Bases) const {
+ ASTContext &Context = getASTContext();
+
+ if (!getNumVBases())
+ return;
+
+ for (CXXRecordDecl::base_class_const_iterator I = bases_begin(),
+ E = bases_end(); I != E; ++I) {
+ assert(!I->getType()->isDependentType() &&
+ "Cannot get indirect primary bases for class with dependent bases.");
+
+ const CXXRecordDecl *BaseDecl =
+ cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+
+ // Only bases with virtual bases participate in computing the
+ // indirect primary virtual base classes.
+ if (BaseDecl->getNumVBases())
+ AddIndirectPrimaryBases(BaseDecl, Context, Bases);
+ }
+}
+
diff --git a/clang/lib/AST/Decl.cpp b/clang/lib/AST/Decl.cpp
new file mode 100644
index 0000000..53032bc
--- /dev/null
+++ b/clang/lib/AST/Decl.cpp
@@ -0,0 +1,3057 @@
+//===--- Decl.cpp - Declaration AST Node Implementation -------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the Decl subclasses.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/TypeLoc.h"
+#include "clang/AST/Stmt.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/PrettyPrinter.h"
+#include "clang/AST/ASTMutationListener.h"
+#include "clang/Basic/Builtins.h"
+#include "clang/Basic/IdentifierTable.h"
+#include "clang/Basic/Module.h"
+#include "clang/Basic/Specifiers.h"
+#include "clang/Basic/TargetInfo.h"
+#include "llvm/Support/ErrorHandling.h"
+
+#include <algorithm>
+
+using namespace clang;
+
+//===----------------------------------------------------------------------===//
+// NamedDecl Implementation
+//===----------------------------------------------------------------------===//
+
+static llvm::Optional<Visibility> getVisibilityOf(const Decl *D) {
+ // If this declaration has an explicit visibility attribute, use it.
+ if (const VisibilityAttr *A = D->getAttr<VisibilityAttr>()) {
+ switch (A->getVisibility()) {
+ case VisibilityAttr::Default:
+ return DefaultVisibility;
+ case VisibilityAttr::Hidden:
+ return HiddenVisibility;
+ case VisibilityAttr::Protected:
+ return ProtectedVisibility;
+ }
+ }
+
+ // If we're on Mac OS X, an 'availability' for Mac OS X attribute
+ // implies visibility(default).
+ if (D->getASTContext().getTargetInfo().getTriple().isOSDarwin()) {
+ for (specific_attr_iterator<AvailabilityAttr>
+ A = D->specific_attr_begin<AvailabilityAttr>(),
+ AEnd = D->specific_attr_end<AvailabilityAttr>();
+ A != AEnd; ++A)
+ if ((*A)->getPlatform()->getName().equals("macosx"))
+ return DefaultVisibility;
+ }
+
+ return llvm::Optional<Visibility>();
+}
+
+typedef NamedDecl::LinkageInfo LinkageInfo;
+
+namespace {
+/// Flags controlling the computation of linkage and visibility.
+struct LVFlags {
+ const bool ConsiderGlobalVisibility;
+ const bool ConsiderVisibilityAttributes;
+ const bool ConsiderTemplateParameterTypes;
+
+ LVFlags() : ConsiderGlobalVisibility(true),
+ ConsiderVisibilityAttributes(true),
+ ConsiderTemplateParameterTypes(true) {
+ }
+
+ LVFlags(bool Global, bool Attributes, bool Parameters) :
+ ConsiderGlobalVisibility(Global),
+ ConsiderVisibilityAttributes(Attributes),
+ ConsiderTemplateParameterTypes(Parameters) {
+ }
+
+ /// \brief Returns a set of flags that is only useful for computing the
+ /// linkage, not the visibility, of a declaration.
+ static LVFlags CreateOnlyDeclLinkage() {
+ return LVFlags(false, false, false);
+ }
+};
+} // end anonymous namespace
+
+static LinkageInfo getLVForType(QualType T) {
+ std::pair<Linkage,Visibility> P = T->getLinkageAndVisibility();
+ return LinkageInfo(P.first, P.second, T->isVisibilityExplicit());
+}
+
+/// \brief Get the most restrictive linkage for the types in the given
+/// template parameter list.
+static LinkageInfo
+getLVForTemplateParameterList(const TemplateParameterList *Params) {
+ LinkageInfo LV(ExternalLinkage, DefaultVisibility, false);
+ for (TemplateParameterList::const_iterator P = Params->begin(),
+ PEnd = Params->end();
+ P != PEnd; ++P) {
+ if (NonTypeTemplateParmDecl *NTTP = dyn_cast<NonTypeTemplateParmDecl>(*P)) {
+ if (NTTP->isExpandedParameterPack()) {
+ for (unsigned I = 0, N = NTTP->getNumExpansionTypes(); I != N; ++I) {
+ QualType T = NTTP->getExpansionType(I);
+ if (!T->isDependentType())
+ LV.merge(getLVForType(T));
+ }
+ continue;
+ }
+
+ if (!NTTP->getType()->isDependentType()) {
+ LV.merge(getLVForType(NTTP->getType()));
+ continue;
+ }
+ }
+
+ if (TemplateTemplateParmDecl *TTP
+ = dyn_cast<TemplateTemplateParmDecl>(*P)) {
+ LV.merge(getLVForTemplateParameterList(TTP->getTemplateParameters()));
+ }
+ }
+
+ return LV;
+}
+
+/// getLVForDecl - Get the linkage and visibility for the given declaration.
+static LinkageInfo getLVForDecl(const NamedDecl *D, LVFlags F);
+
+/// \brief Get the most restrictive linkage for the types and
+/// declarations in the given template argument list.
+static LinkageInfo getLVForTemplateArgumentList(const TemplateArgument *Args,
+ unsigned NumArgs,
+ LVFlags &F) {
+ LinkageInfo LV(ExternalLinkage, DefaultVisibility, false);
+
+ for (unsigned I = 0; I != NumArgs; ++I) {
+ switch (Args[I].getKind()) {
+ case TemplateArgument::Null:
+ case TemplateArgument::Integral:
+ case TemplateArgument::Expression:
+ break;
+
+ case TemplateArgument::Type:
+ LV.merge(getLVForType(Args[I].getAsType()));
+ break;
+
+ case TemplateArgument::Declaration:
+ // The decl can validly be null as the representation of nullptr
+ // arguments, valid only in C++0x.
+ if (Decl *D = Args[I].getAsDecl()) {
+ if (NamedDecl *ND = dyn_cast<NamedDecl>(D))
+ LV = merge(LV, getLVForDecl(ND, F));
+ }
+ break;
+
+ case TemplateArgument::Template:
+ case TemplateArgument::TemplateExpansion:
+ if (TemplateDecl *Template
+ = Args[I].getAsTemplateOrTemplatePattern().getAsTemplateDecl())
+ LV.merge(getLVForDecl(Template, F));
+ break;
+
+ case TemplateArgument::Pack:
+ LV.mergeWithMin(getLVForTemplateArgumentList(Args[I].pack_begin(),
+ Args[I].pack_size(),
+ F));
+ break;
+ }
+ }
+
+ return LV;
+}
+
+static LinkageInfo
+getLVForTemplateArgumentList(const TemplateArgumentList &TArgs,
+ LVFlags &F) {
+ return getLVForTemplateArgumentList(TArgs.data(), TArgs.size(), F);
+}
+
+static bool shouldConsiderTemplateLV(const FunctionDecl *fn,
+ const FunctionTemplateSpecializationInfo *spec) {
+ return !(spec->isExplicitSpecialization() &&
+ fn->hasAttr<VisibilityAttr>());
+}
+
+static bool shouldConsiderTemplateLV(const ClassTemplateSpecializationDecl *d) {
+ return !(d->isExplicitSpecialization() && d->hasAttr<VisibilityAttr>());
+}
+
+static LinkageInfo getLVForNamespaceScopeDecl(const NamedDecl *D, LVFlags F) {
+ assert(D->getDeclContext()->getRedeclContext()->isFileContext() &&
+ "Not a name having namespace scope");
+ ASTContext &Context = D->getASTContext();
+
+ // C++ [basic.link]p3:
+ // A name having namespace scope (3.3.6) has internal linkage if it
+ // is the name of
+ // - an object, reference, function or function template that is
+ // explicitly declared static; or,
+ // (This bullet corresponds to C99 6.2.2p3.)
+ if (const VarDecl *Var = dyn_cast<VarDecl>(D)) {
+ // Explicitly declared static.
+ if (Var->getStorageClass() == SC_Static)
+ return LinkageInfo::internal();
+
+ // - an object or reference that is explicitly declared const
+ // and neither explicitly declared extern nor previously
+ // declared to have external linkage; or
+ // (there is no equivalent in C99)
+ if (Context.getLangOpts().CPlusPlus &&
+ Var->getType().isConstant(Context) &&
+ Var->getStorageClass() != SC_Extern &&
+ Var->getStorageClass() != SC_PrivateExtern) {
+ bool FoundExtern = false;
+ for (const VarDecl *PrevVar = Var->getPreviousDecl();
+ PrevVar && !FoundExtern;
+ PrevVar = PrevVar->getPreviousDecl())
+ if (isExternalLinkage(PrevVar->getLinkage()))
+ FoundExtern = true;
+
+ if (!FoundExtern)
+ return LinkageInfo::internal();
+ }
+ if (Var->getStorageClass() == SC_None) {
+ const VarDecl *PrevVar = Var->getPreviousDecl();
+ for (; PrevVar; PrevVar = PrevVar->getPreviousDecl())
+ if (PrevVar->getStorageClass() == SC_PrivateExtern)
+ break;
+ if (PrevVar)
+ return PrevVar->getLinkageAndVisibility();
+ }
+ } else if (isa<FunctionDecl>(D) || isa<FunctionTemplateDecl>(D)) {
+ // C++ [temp]p4:
+ // A non-member function template can have internal linkage; any
+ // other template name shall have external linkage.
+ const FunctionDecl *Function = 0;
+ if (const FunctionTemplateDecl *FunTmpl
+ = dyn_cast<FunctionTemplateDecl>(D))
+ Function = FunTmpl->getTemplatedDecl();
+ else
+ Function = cast<FunctionDecl>(D);
+
+ // Explicitly declared static.
+ if (Function->getStorageClass() == SC_Static)
+ return LinkageInfo(InternalLinkage, DefaultVisibility, false);
+ } else if (const FieldDecl *Field = dyn_cast<FieldDecl>(D)) {
+ // - a data member of an anonymous union.
+ if (cast<RecordDecl>(Field->getDeclContext())->isAnonymousStructOrUnion())
+ return LinkageInfo::internal();
+ }
+
+ if (D->isInAnonymousNamespace()) {
+ const VarDecl *Var = dyn_cast<VarDecl>(D);
+ const FunctionDecl *Func = dyn_cast<FunctionDecl>(D);
+ if ((!Var || !Var->getDeclContext()->isExternCContext()) &&
+ (!Func || !Func->getDeclContext()->isExternCContext()))
+ return LinkageInfo::uniqueExternal();
+ }
+
+ // Set up the defaults.
+
+ // C99 6.2.2p5:
+ // If the declaration of an identifier for an object has file
+ // scope and no storage-class specifier, its linkage is
+ // external.
+ LinkageInfo LV;
+ LV.mergeVisibility(Context.getLangOpts().getVisibilityMode());
+
+ if (F.ConsiderVisibilityAttributes) {
+ if (llvm::Optional<Visibility> Vis = D->getExplicitVisibility()) {
+ LV.setVisibility(*Vis, true);
+ } else {
+ // If we're declared in a namespace with a visibility attribute,
+ // use that namespace's visibility, but don't call it explicit.
+ for (const DeclContext *DC = D->getDeclContext();
+ !isa<TranslationUnitDecl>(DC);
+ DC = DC->getParent()) {
+ const NamespaceDecl *ND = dyn_cast<NamespaceDecl>(DC);
+ if (!ND) continue;
+ if (llvm::Optional<Visibility> Vis = ND->getExplicitVisibility()) {
+ LV.setVisibility(*Vis, true);
+ break;
+ }
+ }
+ }
+ }
+
+ // C++ [basic.link]p4:
+
+ // A name having namespace scope has external linkage if it is the
+ // name of
+ //
+ // - an object or reference, unless it has internal linkage; or
+ if (const VarDecl *Var = dyn_cast<VarDecl>(D)) {
+ // GCC applies the following optimization to variables and static
+ // data members, but not to functions:
+ //
+ // Modify the variable's LV by the LV of its type unless this is
+ // C or extern "C". This follows from [basic.link]p9:
+ // A type without linkage shall not be used as the type of a
+ // variable or function with external linkage unless
+ // - the entity has C language linkage, or
+ // - the entity is declared within an unnamed namespace, or
+ // - the entity is not used or is defined in the same
+ // translation unit.
+ // and [basic.link]p10:
+ // ...the types specified by all declarations referring to a
+ // given variable or function shall be identical...
+ // C does not have an equivalent rule.
+ //
+ // Ignore this if we've got an explicit attribute; the user
+ // probably knows what they're doing.
+ //
+ // Note that we don't want to make the variable non-external
+ // because of this, but unique-external linkage suits us.
+ if (Context.getLangOpts().CPlusPlus &&
+ !Var->getDeclContext()->isExternCContext()) {
+ LinkageInfo TypeLV = getLVForType(Var->getType());
+ if (TypeLV.linkage() != ExternalLinkage)
+ return LinkageInfo::uniqueExternal();
+ LV.mergeVisibilityWithMin(TypeLV);
+ }
+
+ if (Var->getStorageClass() == SC_PrivateExtern)
+ LV.setVisibility(HiddenVisibility, true);
+
+ if (!Context.getLangOpts().CPlusPlus &&
+ (Var->getStorageClass() == SC_Extern ||
+ Var->getStorageClass() == SC_PrivateExtern)) {
+
+ // C99 6.2.2p4:
+ // For an identifier declared with the storage-class specifier
+ // extern in a scope in which a prior declaration of that
+ // identifier is visible, if the prior declaration specifies
+ // internal or external linkage, the linkage of the identifier
+ // at the later declaration is the same as the linkage
+ // specified at the prior declaration. If no prior declaration
+ // is visible, or if the prior declaration specifies no
+ // linkage, then the identifier has external linkage.
+ if (const VarDecl *PrevVar = Var->getPreviousDecl()) {
+ LinkageInfo PrevLV = getLVForDecl(PrevVar, F);
+ if (PrevLV.linkage()) LV.setLinkage(PrevLV.linkage());
+ LV.mergeVisibility(PrevLV);
+ }
+ }
+
+ // - a function, unless it has internal linkage; or
+ } else if (const FunctionDecl *Function = dyn_cast<FunctionDecl>(D)) {
+ // In theory, we can modify the function's LV by the LV of its
+ // type unless it has C linkage (see comment above about variables
+ // for justification). In practice, GCC doesn't do this, so it's
+ // just too painful to make work.
+
+ if (Function->getStorageClass() == SC_PrivateExtern)
+ LV.setVisibility(HiddenVisibility, true);
+
+ // C99 6.2.2p5:
+ // If the declaration of an identifier for a function has no
+ // storage-class specifier, its linkage is determined exactly
+ // as if it were declared with the storage-class specifier
+ // extern.
+ if (!Context.getLangOpts().CPlusPlus &&
+ (Function->getStorageClass() == SC_Extern ||
+ Function->getStorageClass() == SC_PrivateExtern ||
+ Function->getStorageClass() == SC_None)) {
+ // C99 6.2.2p4:
+ // For an identifier declared with the storage-class specifier
+ // extern in a scope in which a prior declaration of that
+ // identifier is visible, if the prior declaration specifies
+ // internal or external linkage, the linkage of the identifier
+ // at the later declaration is the same as the linkage
+ // specified at the prior declaration. If no prior declaration
+ // is visible, or if the prior declaration specifies no
+ // linkage, then the identifier has external linkage.
+ if (const FunctionDecl *PrevFunc = Function->getPreviousDecl()) {
+ LinkageInfo PrevLV = getLVForDecl(PrevFunc, F);
+ if (PrevLV.linkage()) LV.setLinkage(PrevLV.linkage());
+ LV.mergeVisibility(PrevLV);
+ }
+ }
+
+ // In C++, then if the type of the function uses a type with
+ // unique-external linkage, it's not legally usable from outside
+ // this translation unit. However, we should use the C linkage
+ // rules instead for extern "C" declarations.
+ if (Context.getLangOpts().CPlusPlus &&
+ !Function->getDeclContext()->isExternCContext() &&
+ Function->getType()->getLinkage() == UniqueExternalLinkage)
+ return LinkageInfo::uniqueExternal();
+
+ // Consider LV from the template and the template arguments unless
+ // this is an explicit specialization with a visibility attribute.
+ if (FunctionTemplateSpecializationInfo *specInfo
+ = Function->getTemplateSpecializationInfo()) {
+ if (shouldConsiderTemplateLV(Function, specInfo)) {
+ LV.merge(getLVForDecl(specInfo->getTemplate(),
+ LVFlags::CreateOnlyDeclLinkage()));
+ const TemplateArgumentList &templateArgs = *specInfo->TemplateArguments;
+ LV.mergeWithMin(getLVForTemplateArgumentList(templateArgs, F));
+ }
+ }
+
+ // - a named class (Clause 9), or an unnamed class defined in a
+ // typedef declaration in which the class has the typedef name
+ // for linkage purposes (7.1.3); or
+ // - a named enumeration (7.2), or an unnamed enumeration
+ // defined in a typedef declaration in which the enumeration
+ // has the typedef name for linkage purposes (7.1.3); or
+ } else if (const TagDecl *Tag = dyn_cast<TagDecl>(D)) {
+ // Unnamed tags have no linkage.
+ if (!Tag->getDeclName() && !Tag->getTypedefNameForAnonDecl())
+ return LinkageInfo::none();
+
+ // If this is a class template specialization, consider the
+ // linkage of the template and template arguments.
+ if (const ClassTemplateSpecializationDecl *spec
+ = dyn_cast<ClassTemplateSpecializationDecl>(Tag)) {
+ if (shouldConsiderTemplateLV(spec)) {
+ // From the template.
+ LV.merge(getLVForDecl(spec->getSpecializedTemplate(),
+ LVFlags::CreateOnlyDeclLinkage()));
+
+ // The arguments at which the template was instantiated.
+ const TemplateArgumentList &TemplateArgs = spec->getTemplateArgs();
+ LV.mergeWithMin(getLVForTemplateArgumentList(TemplateArgs, F));
+ }
+ }
+
+ // - an enumerator belonging to an enumeration with external linkage;
+ } else if (isa<EnumConstantDecl>(D)) {
+ LinkageInfo EnumLV = getLVForDecl(cast<NamedDecl>(D->getDeclContext()), F);
+ if (!isExternalLinkage(EnumLV.linkage()))
+ return LinkageInfo::none();
+ LV.merge(EnumLV);
+
+ // - a template, unless it is a function template that has
+ // internal linkage (Clause 14);
+ } else if (const TemplateDecl *temp = dyn_cast<TemplateDecl>(D)) {
+ if (F.ConsiderTemplateParameterTypes)
+ LV.merge(getLVForTemplateParameterList(temp->getTemplateParameters()));
+
+ // - a namespace (7.3), unless it is declared within an unnamed
+ // namespace.
+ } else if (isa<NamespaceDecl>(D) && !D->isInAnonymousNamespace()) {
+ return LV;
+
+ // By extension, we assign external linkage to Objective-C
+ // interfaces.
+ } else if (isa<ObjCInterfaceDecl>(D)) {
+ // fallout
+
+ // Everything not covered here has no linkage.
+ } else {
+ return LinkageInfo::none();
+ }
+
+ // If we ended up with non-external linkage, visibility should
+ // always be default.
+ if (LV.linkage() != ExternalLinkage)
+ return LinkageInfo(LV.linkage(), DefaultVisibility, false);
+
+ return LV;
+}
+
+static LinkageInfo getLVForClassMember(const NamedDecl *D, LVFlags F) {
+ // Only certain class members have linkage. Note that fields don't
+ // really have linkage, but it's convenient to say they do for the
+ // purposes of calculating linkage of pointer-to-data-member
+ // template arguments.
+ if (!(isa<CXXMethodDecl>(D) ||
+ isa<VarDecl>(D) ||
+ isa<FieldDecl>(D) ||
+ (isa<TagDecl>(D) &&
+ (D->getDeclName() || cast<TagDecl>(D)->getTypedefNameForAnonDecl()))))
+ return LinkageInfo::none();
+
+ LinkageInfo LV;
+ LV.mergeVisibility(D->getASTContext().getLangOpts().getVisibilityMode());
+
+ bool DHasExplicitVisibility = false;
+ // If we have an explicit visibility attribute, merge that in.
+ if (F.ConsiderVisibilityAttributes) {
+ if (llvm::Optional<Visibility> Vis = D->getExplicitVisibility()) {
+ LV.mergeVisibility(*Vis, true);
+
+ DHasExplicitVisibility = true;
+ }
+ }
+ // Ignore both global visibility and attributes when computing our
+ // parent's visibility if we already have an explicit one.
+ LVFlags ClassF = DHasExplicitVisibility ?
+ LVFlags::CreateOnlyDeclLinkage() : F;
+
+ // If we're paying attention to global visibility, apply
+ // -finline-visibility-hidden if this is an inline method.
+ //
+ // Note that we do this before merging information about
+ // the class visibility.
+ if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(D)) {
+ TemplateSpecializationKind TSK = TSK_Undeclared;
+ if (FunctionTemplateSpecializationInfo *spec
+ = MD->getTemplateSpecializationInfo()) {
+ TSK = spec->getTemplateSpecializationKind();
+ } else if (MemberSpecializationInfo *MSI =
+ MD->getMemberSpecializationInfo()) {
+ TSK = MSI->getTemplateSpecializationKind();
+ }
+
+ const FunctionDecl *Def = 0;
+ // InlineVisibilityHidden only applies to definitions, and
+ // isInlined() only gives meaningful answers on definitions
+ // anyway.
+ if (TSK != TSK_ExplicitInstantiationDeclaration &&
+ TSK != TSK_ExplicitInstantiationDefinition &&
+ F.ConsiderGlobalVisibility &&
+ !LV.visibilityExplicit() &&
+ MD->getASTContext().getLangOpts().InlineVisibilityHidden &&
+ MD->hasBody(Def) && Def->isInlined())
+ LV.mergeVisibility(HiddenVisibility, true);
+ }
+
+ // Class members only have linkage if their class has external
+ // linkage.
+ LV.merge(getLVForDecl(cast<RecordDecl>(D->getDeclContext()), ClassF));
+ if (!isExternalLinkage(LV.linkage()))
+ return LinkageInfo::none();
+
+ // If the class already has unique-external linkage, we can't improve.
+ if (LV.linkage() == UniqueExternalLinkage)
+ return LinkageInfo::uniqueExternal();
+
+ if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(D)) {
+ // If the type of the function uses a type with unique-external
+ // linkage, it's not legally usable from outside this translation unit.
+ if (MD->getType()->getLinkage() == UniqueExternalLinkage)
+ return LinkageInfo::uniqueExternal();
+
+ // If this is a method template specialization, use the linkage for
+ // the template parameters and arguments.
+ if (FunctionTemplateSpecializationInfo *spec
+ = MD->getTemplateSpecializationInfo()) {
+ if (shouldConsiderTemplateLV(MD, spec)) {
+ LV.mergeWithMin(getLVForTemplateArgumentList(*spec->TemplateArguments,
+ F));
+ if (F.ConsiderTemplateParameterTypes)
+ LV.merge(getLVForTemplateParameterList(
+ spec->getTemplate()->getTemplateParameters()));
+ }
+ }
+
+ // Note that in contrast to basically every other situation, we
+ // *do* apply -fvisibility to method declarations.
+
+ } else if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D)) {
+ if (const ClassTemplateSpecializationDecl *spec
+ = dyn_cast<ClassTemplateSpecializationDecl>(RD)) {
+ if (shouldConsiderTemplateLV(spec)) {
+ // Merge template argument/parameter information for member
+ // class template specializations.
+ LV.mergeWithMin(getLVForTemplateArgumentList(spec->getTemplateArgs(),
+ F));
+ if (F.ConsiderTemplateParameterTypes)
+ LV.merge(getLVForTemplateParameterList(
+ spec->getSpecializedTemplate()->getTemplateParameters()));
+ }
+ }
+
+ // Static data members.
+ } else if (const VarDecl *VD = dyn_cast<VarDecl>(D)) {
+ // Modify the variable's linkage by its type, but ignore the
+ // type's visibility unless it's a definition.
+ LinkageInfo TypeLV = getLVForType(VD->getType());
+ if (TypeLV.linkage() != ExternalLinkage)
+ LV.mergeLinkage(UniqueExternalLinkage);
+ if (!LV.visibilityExplicit())
+ LV.mergeVisibility(TypeLV);
+ }
+
+ return LV;
+}
+
+static void clearLinkageForClass(const CXXRecordDecl *record) {
+ for (CXXRecordDecl::decl_iterator
+ i = record->decls_begin(), e = record->decls_end(); i != e; ++i) {
+ Decl *child = *i;
+ if (isa<NamedDecl>(child))
+ cast<NamedDecl>(child)->ClearLinkageCache();
+ }
+}
+
+void NamedDecl::anchor() { }
+
+void NamedDecl::ClearLinkageCache() {
+ // Note that we can't skip clearing the linkage of children just
+ // because the parent doesn't have cached linkage: we don't cache
+ // when computing linkage for parent contexts.
+
+ HasCachedLinkage = 0;
+
+ // If we're changing the linkage of a class, we need to reset the
+ // linkage of child declarations, too.
+ if (const CXXRecordDecl *record = dyn_cast<CXXRecordDecl>(this))
+ clearLinkageForClass(record);
+
+ if (ClassTemplateDecl *temp =
+ dyn_cast<ClassTemplateDecl>(const_cast<NamedDecl*>(this))) {
+ // Clear linkage for the template pattern.
+ CXXRecordDecl *record = temp->getTemplatedDecl();
+ record->HasCachedLinkage = 0;
+ clearLinkageForClass(record);
+
+ // We need to clear linkage for specializations, too.
+ for (ClassTemplateDecl::spec_iterator
+ i = temp->spec_begin(), e = temp->spec_end(); i != e; ++i)
+ i->ClearLinkageCache();
+ }
+
+ // Clear cached linkage for function template decls, too.
+ if (FunctionTemplateDecl *temp =
+ dyn_cast<FunctionTemplateDecl>(const_cast<NamedDecl*>(this))) {
+ temp->getTemplatedDecl()->ClearLinkageCache();
+ for (FunctionTemplateDecl::spec_iterator
+ i = temp->spec_begin(), e = temp->spec_end(); i != e; ++i)
+ i->ClearLinkageCache();
+ }
+
+}
+
+Linkage NamedDecl::getLinkage() const {
+ if (HasCachedLinkage) {
+ assert(Linkage(CachedLinkage) ==
+ getLVForDecl(this, LVFlags::CreateOnlyDeclLinkage()).linkage());
+ return Linkage(CachedLinkage);
+ }
+
+ CachedLinkage = getLVForDecl(this,
+ LVFlags::CreateOnlyDeclLinkage()).linkage();
+ HasCachedLinkage = 1;
+ return Linkage(CachedLinkage);
+}
+
+LinkageInfo NamedDecl::getLinkageAndVisibility() const {
+ LinkageInfo LI = getLVForDecl(this, LVFlags());
+ assert(!HasCachedLinkage || Linkage(CachedLinkage) == LI.linkage());
+ HasCachedLinkage = 1;
+ CachedLinkage = LI.linkage();
+ return LI;
+}
+
+llvm::Optional<Visibility> NamedDecl::getExplicitVisibility() const {
+ // Use the most recent declaration of a variable.
+ if (const VarDecl *var = dyn_cast<VarDecl>(this))
+ return getVisibilityOf(var->getMostRecentDecl());
+
+ // Use the most recent declaration of a function, and also handle
+ // function template specializations.
+ if (const FunctionDecl *fn = dyn_cast<FunctionDecl>(this)) {
+ if (llvm::Optional<Visibility> V
+ = getVisibilityOf(fn->getMostRecentDecl()))
+ return V;
+
+ // If the function is a specialization of a template with an
+ // explicit visibility attribute, use that.
+ if (FunctionTemplateSpecializationInfo *templateInfo
+ = fn->getTemplateSpecializationInfo())
+ return getVisibilityOf(templateInfo->getTemplate()->getTemplatedDecl());
+
+ // If the function is a member of a specialization of a class template
+ // and the corresponding decl has explicit visibility, use that.
+ FunctionDecl *InstantiatedFrom = fn->getInstantiatedFromMemberFunction();
+ if (InstantiatedFrom)
+ return getVisibilityOf(InstantiatedFrom);
+
+ return llvm::Optional<Visibility>();
+ }
+
+ // Otherwise, just check the declaration itself first.
+ if (llvm::Optional<Visibility> V = getVisibilityOf(this))
+ return V;
+
+ // If there wasn't explicit visibility there, and this is a
+ // specialization of a class template, check for visibility
+ // on the pattern.
+ if (const ClassTemplateSpecializationDecl *spec
+ = dyn_cast<ClassTemplateSpecializationDecl>(this))
+ return getVisibilityOf(spec->getSpecializedTemplate()->getTemplatedDecl());
+
+ // If this is a member class of a specialization of a class template
+ // and the corresponding decl has explicit visibility, use that.
+ if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(this)) {
+ CXXRecordDecl *InstantiatedFrom = RD->getInstantiatedFromMemberClass();
+ if (InstantiatedFrom)
+ return getVisibilityOf(InstantiatedFrom);
+ }
+
+ return llvm::Optional<Visibility>();
+}
+
+static LinkageInfo getLVForDecl(const NamedDecl *D, LVFlags Flags) {
+ // Objective-C: treat all Objective-C declarations as having external
+ // linkage.
+ switch (D->getKind()) {
+ default:
+ break;
+ case Decl::ParmVar:
+ return LinkageInfo::none();
+ case Decl::TemplateTemplateParm: // count these as external
+ case Decl::NonTypeTemplateParm:
+ case Decl::ObjCAtDefsField:
+ case Decl::ObjCCategory:
+ case Decl::ObjCCategoryImpl:
+ case Decl::ObjCCompatibleAlias:
+ case Decl::ObjCImplementation:
+ case Decl::ObjCMethod:
+ case Decl::ObjCProperty:
+ case Decl::ObjCPropertyImpl:
+ case Decl::ObjCProtocol:
+ return LinkageInfo::external();
+
+ case Decl::CXXRecord: {
+ const CXXRecordDecl *Record = cast<CXXRecordDecl>(D);
+ if (Record->isLambda()) {
+ if (!Record->getLambdaManglingNumber()) {
+ // This lambda has no mangling number, so it's internal.
+ return LinkageInfo::internal();
+ }
+
+ // This lambda has its linkage/visibility determined by its owner.
+ const DeclContext *DC = D->getDeclContext()->getRedeclContext();
+ if (Decl *ContextDecl = Record->getLambdaContextDecl()) {
+ if (isa<ParmVarDecl>(ContextDecl))
+ DC = ContextDecl->getDeclContext()->getRedeclContext();
+ else
+ return getLVForDecl(cast<NamedDecl>(ContextDecl), Flags);
+ }
+
+ if (const NamedDecl *ND = dyn_cast<NamedDecl>(DC))
+ return getLVForDecl(ND, Flags);
+
+ return LinkageInfo::external();
+ }
+
+ break;
+ }
+ }
+
+ // Handle linkage for namespace-scope names.
+ if (D->getDeclContext()->getRedeclContext()->isFileContext())
+ return getLVForNamespaceScopeDecl(D, Flags);
+
+ // C++ [basic.link]p5:
+ // In addition, a member function, static data member, a named
+ // class or enumeration of class scope, or an unnamed class or
+ // enumeration defined in a class-scope typedef declaration such
+ // that the class or enumeration has the typedef name for linkage
+ // purposes (7.1.3), has external linkage if the name of the class
+ // has external linkage.
+ if (D->getDeclContext()->isRecord())
+ return getLVForClassMember(D, Flags);
+
+ // C++ [basic.link]p6:
+ // The name of a function declared in block scope and the name of
+ // an object declared by a block scope extern declaration have
+ // linkage. If there is a visible declaration of an entity with
+ // linkage having the same name and type, ignoring entities
+ // declared outside the innermost enclosing namespace scope, the
+ // block scope declaration declares that same entity and receives
+ // the linkage of the previous declaration. If there is more than
+ // one such matching entity, the program is ill-formed. Otherwise,
+ // if no matching entity is found, the block scope entity receives
+ // external linkage.
+ if (D->getLexicalDeclContext()->isFunctionOrMethod()) {
+ if (const FunctionDecl *Function = dyn_cast<FunctionDecl>(D)) {
+ if (Function->isInAnonymousNamespace() &&
+ !Function->getDeclContext()->isExternCContext())
+ return LinkageInfo::uniqueExternal();
+
+ LinkageInfo LV;
+ if (Flags.ConsiderVisibilityAttributes) {
+ if (llvm::Optional<Visibility> Vis = Function->getExplicitVisibility())
+ LV.setVisibility(*Vis, true);
+ }
+
+ if (const FunctionDecl *Prev = Function->getPreviousDecl()) {
+ LinkageInfo PrevLV = getLVForDecl(Prev, Flags);
+ if (PrevLV.linkage()) LV.setLinkage(PrevLV.linkage());
+ LV.mergeVisibility(PrevLV);
+ }
+
+ return LV;
+ }
+
+ if (const VarDecl *Var = dyn_cast<VarDecl>(D))
+ if (Var->getStorageClass() == SC_Extern ||
+ Var->getStorageClass() == SC_PrivateExtern) {
+ if (Var->isInAnonymousNamespace() &&
+ !Var->getDeclContext()->isExternCContext())
+ return LinkageInfo::uniqueExternal();
+
+ LinkageInfo LV;
+ if (Var->getStorageClass() == SC_PrivateExtern)
+ LV.setVisibility(HiddenVisibility, true);
+ else if (Flags.ConsiderVisibilityAttributes) {
+ if (llvm::Optional<Visibility> Vis = Var->getExplicitVisibility())
+ LV.setVisibility(*Vis, true);
+ }
+
+ if (const VarDecl *Prev = Var->getPreviousDecl()) {
+ LinkageInfo PrevLV = getLVForDecl(Prev, Flags);
+ if (PrevLV.linkage()) LV.setLinkage(PrevLV.linkage());
+ LV.mergeVisibility(PrevLV);
+ }
+
+ return LV;
+ }
+ }
+
+ // C++ [basic.link]p6:
+ // Names not covered by these rules have no linkage.
+ return LinkageInfo::none();
+}
+
+std::string NamedDecl::getQualifiedNameAsString() const {
+ return getQualifiedNameAsString(getASTContext().getPrintingPolicy());
+}
+
+std::string NamedDecl::getQualifiedNameAsString(const PrintingPolicy &P) const {
+ const DeclContext *Ctx = getDeclContext();
+
+ if (Ctx->isFunctionOrMethod())
+ return getNameAsString();
+
+ typedef SmallVector<const DeclContext *, 8> ContextsTy;
+ ContextsTy Contexts;
+
+ // Collect contexts.
+ while (Ctx && isa<NamedDecl>(Ctx)) {
+ Contexts.push_back(Ctx);
+ Ctx = Ctx->getParent();
+ };
+
+ std::string QualName;
+ llvm::raw_string_ostream OS(QualName);
+
+ for (ContextsTy::reverse_iterator I = Contexts.rbegin(), E = Contexts.rend();
+ I != E; ++I) {
+ if (const ClassTemplateSpecializationDecl *Spec
+ = dyn_cast<ClassTemplateSpecializationDecl>(*I)) {
+ const TemplateArgumentList &TemplateArgs = Spec->getTemplateArgs();
+ std::string TemplateArgsStr
+ = TemplateSpecializationType::PrintTemplateArgumentList(
+ TemplateArgs.data(),
+ TemplateArgs.size(),
+ P);
+ OS << Spec->getName() << TemplateArgsStr;
+ } else if (const NamespaceDecl *ND = dyn_cast<NamespaceDecl>(*I)) {
+ if (ND->isAnonymousNamespace())
+ OS << "<anonymous namespace>";
+ else
+ OS << *ND;
+ } else if (const RecordDecl *RD = dyn_cast<RecordDecl>(*I)) {
+ if (!RD->getIdentifier())
+ OS << "<anonymous " << RD->getKindName() << '>';
+ else
+ OS << *RD;
+ } else if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(*I)) {
+ const FunctionProtoType *FT = 0;
+ if (FD->hasWrittenPrototype())
+ FT = dyn_cast<FunctionProtoType>(FD->getType()->getAs<FunctionType>());
+
+ OS << *FD << '(';
+ if (FT) {
+ unsigned NumParams = FD->getNumParams();
+ for (unsigned i = 0; i < NumParams; ++i) {
+ if (i)
+ OS << ", ";
+ std::string Param;
+ FD->getParamDecl(i)->getType().getAsStringInternal(Param, P);
+ OS << Param;
+ }
+
+ if (FT->isVariadic()) {
+ if (NumParams > 0)
+ OS << ", ";
+ OS << "...";
+ }
+ }
+ OS << ')';
+ } else {
+ OS << *cast<NamedDecl>(*I);
+ }
+ OS << "::";
+ }
+
+ if (getDeclName())
+ OS << *this;
+ else
+ OS << "<anonymous>";
+
+ return OS.str();
+}
+
+bool NamedDecl::declarationReplaces(NamedDecl *OldD) const {
+ assert(getDeclName() == OldD->getDeclName() && "Declaration name mismatch");
+
+ // UsingDirectiveDecl's are not really NamedDecl's, and all have same name.
+ // We want to keep it, unless it nominates same namespace.
+ if (getKind() == Decl::UsingDirective) {
+ return cast<UsingDirectiveDecl>(this)->getNominatedNamespace()
+ ->getOriginalNamespace() ==
+ cast<UsingDirectiveDecl>(OldD)->getNominatedNamespace()
+ ->getOriginalNamespace();
+ }
+
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(this))
+ // For function declarations, we keep track of redeclarations.
+ return FD->getPreviousDecl() == OldD;
+
+ // For function templates, the underlying function declarations are linked.
+ if (const FunctionTemplateDecl *FunctionTemplate
+ = dyn_cast<FunctionTemplateDecl>(this))
+ if (const FunctionTemplateDecl *OldFunctionTemplate
+ = dyn_cast<FunctionTemplateDecl>(OldD))
+ return FunctionTemplate->getTemplatedDecl()
+ ->declarationReplaces(OldFunctionTemplate->getTemplatedDecl());
+
+ // For method declarations, we keep track of redeclarations.
+ if (isa<ObjCMethodDecl>(this))
+ return false;
+
+ if (isa<ObjCInterfaceDecl>(this) && isa<ObjCCompatibleAliasDecl>(OldD))
+ return true;
+
+ if (isa<UsingShadowDecl>(this) && isa<UsingShadowDecl>(OldD))
+ return cast<UsingShadowDecl>(this)->getTargetDecl() ==
+ cast<UsingShadowDecl>(OldD)->getTargetDecl();
+
+ if (isa<UsingDecl>(this) && isa<UsingDecl>(OldD)) {
+ ASTContext &Context = getASTContext();
+ return Context.getCanonicalNestedNameSpecifier(
+ cast<UsingDecl>(this)->getQualifier()) ==
+ Context.getCanonicalNestedNameSpecifier(
+ cast<UsingDecl>(OldD)->getQualifier());
+ }
+
+ // A typedef of an Objective-C class type can replace an Objective-C class
+ // declaration or definition, and vice versa.
+ if ((isa<TypedefNameDecl>(this) && isa<ObjCInterfaceDecl>(OldD)) ||
+ (isa<ObjCInterfaceDecl>(this) && isa<TypedefNameDecl>(OldD)))
+ return true;
+
+ // For non-function declarations, if the declarations are of the
+ // same kind then this must be a redeclaration, or semantic analysis
+ // would not have given us the new declaration.
+ return this->getKind() == OldD->getKind();
+}
+
+bool NamedDecl::hasLinkage() const {
+ return getLinkage() != NoLinkage;
+}
+
+NamedDecl *NamedDecl::getUnderlyingDeclImpl() {
+ NamedDecl *ND = this;
+ while (UsingShadowDecl *UD = dyn_cast<UsingShadowDecl>(ND))
+ ND = UD->getTargetDecl();
+
+ if (ObjCCompatibleAliasDecl *AD = dyn_cast<ObjCCompatibleAliasDecl>(ND))
+ return AD->getClassInterface();
+
+ return ND;
+}
+
+bool NamedDecl::isCXXInstanceMember() const {
+ if (!isCXXClassMember())
+ return false;
+
+ const NamedDecl *D = this;
+ if (isa<UsingShadowDecl>(D))
+ D = cast<UsingShadowDecl>(D)->getTargetDecl();
+
+ if (isa<FieldDecl>(D) || isa<IndirectFieldDecl>(D))
+ return true;
+ if (isa<CXXMethodDecl>(D))
+ return cast<CXXMethodDecl>(D)->isInstance();
+ if (isa<FunctionTemplateDecl>(D))
+ return cast<CXXMethodDecl>(cast<FunctionTemplateDecl>(D)
+ ->getTemplatedDecl())->isInstance();
+ return false;
+}
+
+//===----------------------------------------------------------------------===//
+// DeclaratorDecl Implementation
+//===----------------------------------------------------------------------===//
+
+template <typename DeclT>
+static SourceLocation getTemplateOrInnerLocStart(const DeclT *decl) {
+ if (decl->getNumTemplateParameterLists() > 0)
+ return decl->getTemplateParameterList(0)->getTemplateLoc();
+ else
+ return decl->getInnerLocStart();
+}
+
+SourceLocation DeclaratorDecl::getTypeSpecStartLoc() const {
+ TypeSourceInfo *TSI = getTypeSourceInfo();
+ if (TSI) return TSI->getTypeLoc().getBeginLoc();
+ return SourceLocation();
+}
+
+void DeclaratorDecl::setQualifierInfo(NestedNameSpecifierLoc QualifierLoc) {
+ if (QualifierLoc) {
+ // Make sure the extended decl info is allocated.
+ if (!hasExtInfo()) {
+ // Save (non-extended) type source info pointer.
+ TypeSourceInfo *savedTInfo = DeclInfo.get<TypeSourceInfo*>();
+ // Allocate external info struct.
+ DeclInfo = new (getASTContext()) ExtInfo;
+ // Restore savedTInfo into (extended) decl info.
+ getExtInfo()->TInfo = savedTInfo;
+ }
+ // Set qualifier info.
+ getExtInfo()->QualifierLoc = QualifierLoc;
+ } else {
+ // Here Qualifier == 0, i.e., we are removing the qualifier (if any).
+ if (hasExtInfo()) {
+ if (getExtInfo()->NumTemplParamLists == 0) {
+ // Save type source info pointer.
+ TypeSourceInfo *savedTInfo = getExtInfo()->TInfo;
+ // Deallocate the extended decl info.
+ getASTContext().Deallocate(getExtInfo());
+ // Restore savedTInfo into (non-extended) decl info.
+ DeclInfo = savedTInfo;
+ }
+ else
+ getExtInfo()->QualifierLoc = QualifierLoc;
+ }
+ }
+}
+
+void
+DeclaratorDecl::setTemplateParameterListsInfo(ASTContext &Context,
+ unsigned NumTPLists,
+ TemplateParameterList **TPLists) {
+ assert(NumTPLists > 0);
+ // Make sure the extended decl info is allocated.
+ if (!hasExtInfo()) {
+ // Save (non-extended) type source info pointer.
+ TypeSourceInfo *savedTInfo = DeclInfo.get<TypeSourceInfo*>();
+ // Allocate external info struct.
+ DeclInfo = new (getASTContext()) ExtInfo;
+ // Restore savedTInfo into (extended) decl info.
+ getExtInfo()->TInfo = savedTInfo;
+ }
+ // Set the template parameter lists info.
+ getExtInfo()->setTemplateParameterListsInfo(Context, NumTPLists, TPLists);
+}
+
+SourceLocation DeclaratorDecl::getOuterLocStart() const {
+ return getTemplateOrInnerLocStart(this);
+}
+
+namespace {
+
+// Helper function: returns true if QT is or contains a type
+// having a postfix component.
+bool typeIsPostfix(clang::QualType QT) {
+ while (true) {
+ const Type* T = QT.getTypePtr();
+ switch (T->getTypeClass()) {
+ default:
+ return false;
+ case Type::Pointer:
+ QT = cast<PointerType>(T)->getPointeeType();
+ break;
+ case Type::BlockPointer:
+ QT = cast<BlockPointerType>(T)->getPointeeType();
+ break;
+ case Type::MemberPointer:
+ QT = cast<MemberPointerType>(T)->getPointeeType();
+ break;
+ case Type::LValueReference:
+ case Type::RValueReference:
+ QT = cast<ReferenceType>(T)->getPointeeType();
+ break;
+ case Type::PackExpansion:
+ QT = cast<PackExpansionType>(T)->getPattern();
+ break;
+ case Type::Paren:
+ case Type::ConstantArray:
+ case Type::DependentSizedArray:
+ case Type::IncompleteArray:
+ case Type::VariableArray:
+ case Type::FunctionProto:
+ case Type::FunctionNoProto:
+ return true;
+ }
+ }
+}
+
+} // namespace
+
+SourceRange DeclaratorDecl::getSourceRange() const {
+ SourceLocation RangeEnd = getLocation();
+ if (TypeSourceInfo *TInfo = getTypeSourceInfo()) {
+ if (typeIsPostfix(TInfo->getType()))
+ RangeEnd = TInfo->getTypeLoc().getSourceRange().getEnd();
+ }
+ return SourceRange(getOuterLocStart(), RangeEnd);
+}
+
+void
+QualifierInfo::setTemplateParameterListsInfo(ASTContext &Context,
+ unsigned NumTPLists,
+ TemplateParameterList **TPLists) {
+ assert((NumTPLists == 0 || TPLists != 0) &&
+ "Empty array of template parameters with positive size!");
+
+ // Free previous template parameters (if any).
+ if (NumTemplParamLists > 0) {
+ Context.Deallocate(TemplParamLists);
+ TemplParamLists = 0;
+ NumTemplParamLists = 0;
+ }
+ // Set info on matched template parameter lists (if any).
+ if (NumTPLists > 0) {
+ TemplParamLists = new (Context) TemplateParameterList*[NumTPLists];
+ NumTemplParamLists = NumTPLists;
+ for (unsigned i = NumTPLists; i-- > 0; )
+ TemplParamLists[i] = TPLists[i];
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// VarDecl Implementation
+//===----------------------------------------------------------------------===//
+
+const char *VarDecl::getStorageClassSpecifierString(StorageClass SC) {
+ switch (SC) {
+ case SC_None: break;
+ case SC_Auto: return "auto";
+ case SC_Extern: return "extern";
+ case SC_OpenCLWorkGroupLocal: return "<<work-group-local>>";
+ case SC_PrivateExtern: return "__private_extern__";
+ case SC_Register: return "register";
+ case SC_Static: return "static";
+ }
+
+ llvm_unreachable("Invalid storage class");
+}
+
+VarDecl *VarDecl::Create(ASTContext &C, DeclContext *DC,
+ SourceLocation StartL, SourceLocation IdL,
+ IdentifierInfo *Id, QualType T, TypeSourceInfo *TInfo,
+ StorageClass S, StorageClass SCAsWritten) {
+ return new (C) VarDecl(Var, DC, StartL, IdL, Id, T, TInfo, S, SCAsWritten);
+}
+
+VarDecl *VarDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID, sizeof(VarDecl));
+ return new (Mem) VarDecl(Var, 0, SourceLocation(), SourceLocation(), 0,
+ QualType(), 0, SC_None, SC_None);
+}
+
+void VarDecl::setStorageClass(StorageClass SC) {
+ assert(isLegalForVariable(SC));
+ if (getStorageClass() != SC)
+ ClearLinkageCache();
+
+ VarDeclBits.SClass = SC;
+}
+
+SourceRange VarDecl::getSourceRange() const {
+ if (getInit())
+ return SourceRange(getOuterLocStart(), getInit()->getLocEnd());
+ return DeclaratorDecl::getSourceRange();
+}
+
+bool VarDecl::isExternC() const {
+ if (getLinkage() != ExternalLinkage)
+ return false;
+
+ const DeclContext *DC = getDeclContext();
+ if (DC->isRecord())
+ return false;
+
+ ASTContext &Context = getASTContext();
+ if (!Context.getLangOpts().CPlusPlus)
+ return true;
+ return DC->isExternCContext();
+}
+
+VarDecl *VarDecl::getCanonicalDecl() {
+ return getFirstDeclaration();
+}
+
+VarDecl::DefinitionKind VarDecl::isThisDeclarationADefinition(
+ ASTContext &C) const
+{
+ // C++ [basic.def]p2:
+ // A declaration is a definition unless [...] it contains the 'extern'
+ // specifier or a linkage-specification and neither an initializer [...],
+ // it declares a static data member in a class declaration [...].
+ // C++ [temp.expl.spec]p15:
+ // An explicit specialization of a static data member of a template is a
+ // definition if the declaration includes an initializer; otherwise, it is
+ // a declaration.
+ if (isStaticDataMember()) {
+ if (isOutOfLine() && (hasInit() ||
+ getTemplateSpecializationKind() != TSK_ExplicitSpecialization))
+ return Definition;
+ else
+ return DeclarationOnly;
+ }
+ // C99 6.7p5:
+ // A definition of an identifier is a declaration for that identifier that
+ // [...] causes storage to be reserved for that object.
+ // Note: that applies for all non-file-scope objects.
+ // C99 6.9.2p1:
+ // If the declaration of an identifier for an object has file scope and an
+ // initializer, the declaration is an external definition for the identifier
+ if (hasInit())
+ return Definition;
+ // AST for 'extern "C" int foo;' is annotated with 'extern'.
+ if (hasExternalStorage())
+ return DeclarationOnly;
+
+ if (getStorageClassAsWritten() == SC_Extern ||
+ getStorageClassAsWritten() == SC_PrivateExtern) {
+ for (const VarDecl *PrevVar = getPreviousDecl();
+ PrevVar; PrevVar = PrevVar->getPreviousDecl()) {
+ if (PrevVar->getLinkage() == InternalLinkage && PrevVar->hasInit())
+ return DeclarationOnly;
+ }
+ }
+ // C99 6.9.2p2:
+ // A declaration of an object that has file scope without an initializer,
+ // and without a storage class specifier or the scs 'static', constitutes
+ // a tentative definition.
+ // No such thing in C++.
+ if (!C.getLangOpts().CPlusPlus && isFileVarDecl())
+ return TentativeDefinition;
+
+ // What's left is (in C, block-scope) declarations without initializers or
+ // external storage. These are definitions.
+ return Definition;
+}
+
+VarDecl *VarDecl::getActingDefinition() {
+ DefinitionKind Kind = isThisDeclarationADefinition();
+ if (Kind != TentativeDefinition)
+ return 0;
+
+ VarDecl *LastTentative = 0;
+ VarDecl *First = getFirstDeclaration();
+ for (redecl_iterator I = First->redecls_begin(), E = First->redecls_end();
+ I != E; ++I) {
+ Kind = (*I)->isThisDeclarationADefinition();
+ if (Kind == Definition)
+ return 0;
+ else if (Kind == TentativeDefinition)
+ LastTentative = *I;
+ }
+ return LastTentative;
+}
+
+bool VarDecl::isTentativeDefinitionNow() const {
+ DefinitionKind Kind = isThisDeclarationADefinition();
+ if (Kind != TentativeDefinition)
+ return false;
+
+ for (redecl_iterator I = redecls_begin(), E = redecls_end(); I != E; ++I) {
+ if ((*I)->isThisDeclarationADefinition() == Definition)
+ return false;
+ }
+ return true;
+}
+
+VarDecl *VarDecl::getDefinition(ASTContext &C) {
+ VarDecl *First = getFirstDeclaration();
+ for (redecl_iterator I = First->redecls_begin(), E = First->redecls_end();
+ I != E; ++I) {
+ if ((*I)->isThisDeclarationADefinition(C) == Definition)
+ return *I;
+ }
+ return 0;
+}
+
+VarDecl::DefinitionKind VarDecl::hasDefinition(ASTContext &C) const {
+ DefinitionKind Kind = DeclarationOnly;
+
+ const VarDecl *First = getFirstDeclaration();
+ for (redecl_iterator I = First->redecls_begin(), E = First->redecls_end();
+ I != E; ++I) {
+ Kind = std::max(Kind, (*I)->isThisDeclarationADefinition(C));
+ if (Kind == Definition)
+ break;
+ }
+
+ return Kind;
+}
+
+const Expr *VarDecl::getAnyInitializer(const VarDecl *&D) const {
+ redecl_iterator I = redecls_begin(), E = redecls_end();
+ while (I != E && !I->getInit())
+ ++I;
+
+ if (I != E) {
+ D = *I;
+ return I->getInit();
+ }
+ return 0;
+}
+
+bool VarDecl::isOutOfLine() const {
+ if (Decl::isOutOfLine())
+ return true;
+
+ if (!isStaticDataMember())
+ return false;
+
+ // If this static data member was instantiated from a static data member of
+ // a class template, check whether that static data member was defined
+ // out-of-line.
+ if (VarDecl *VD = getInstantiatedFromStaticDataMember())
+ return VD->isOutOfLine();
+
+ return false;
+}
+
+VarDecl *VarDecl::getOutOfLineDefinition() {
+ if (!isStaticDataMember())
+ return 0;
+
+ for (VarDecl::redecl_iterator RD = redecls_begin(), RDEnd = redecls_end();
+ RD != RDEnd; ++RD) {
+ if (RD->getLexicalDeclContext()->isFileContext())
+ return *RD;
+ }
+
+ return 0;
+}
+
+void VarDecl::setInit(Expr *I) {
+ if (EvaluatedStmt *Eval = Init.dyn_cast<EvaluatedStmt *>()) {
+ Eval->~EvaluatedStmt();
+ getASTContext().Deallocate(Eval);
+ }
+
+ Init = I;
+}
+
+bool VarDecl::isUsableInConstantExpressions(ASTContext &C) const {
+ const LangOptions &Lang = C.getLangOpts();
+
+ if (!Lang.CPlusPlus)
+ return false;
+
+ // In C++11, any variable of reference type can be used in a constant
+ // expression if it is initialized by a constant expression.
+ if (Lang.CPlusPlus0x && getType()->isReferenceType())
+ return true;
+
+ // Only const objects can be used in constant expressions in C++. C++98 does
+ // not require the variable to be non-volatile, but we consider this to be a
+ // defect.
+ if (!getType().isConstQualified() || getType().isVolatileQualified())
+ return false;
+
+ // In C++, const, non-volatile variables of integral or enumeration types
+ // can be used in constant expressions.
+ if (getType()->isIntegralOrEnumerationType())
+ return true;
+
+ // Additionally, in C++11, non-volatile constexpr variables can be used in
+ // constant expressions.
+ return Lang.CPlusPlus0x && isConstexpr();
+}
+
+/// Convert the initializer for this declaration to the elaborated EvaluatedStmt
+/// form, which contains extra information on the evaluated value of the
+/// initializer.
+EvaluatedStmt *VarDecl::ensureEvaluatedStmt() const {
+ EvaluatedStmt *Eval = Init.dyn_cast<EvaluatedStmt *>();
+ if (!Eval) {
+ Stmt *S = Init.get<Stmt *>();
+ Eval = new (getASTContext()) EvaluatedStmt;
+ Eval->Value = S;
+ Init = Eval;
+ }
+ return Eval;
+}
+
+APValue *VarDecl::evaluateValue() const {
+ llvm::SmallVector<PartialDiagnosticAt, 8> Notes;
+ return evaluateValue(Notes);
+}
+
+APValue *VarDecl::evaluateValue(
+ llvm::SmallVectorImpl<PartialDiagnosticAt> &Notes) const {
+ EvaluatedStmt *Eval = ensureEvaluatedStmt();
+
+ // We only produce notes indicating why an initializer is non-constant the
+ // first time it is evaluated. FIXME: The notes won't always be emitted the
+ // first time we try evaluation, so might not be produced at all.
+ if (Eval->WasEvaluated)
+ return Eval->Evaluated.isUninit() ? 0 : &Eval->Evaluated;
+
+ const Expr *Init = cast<Expr>(Eval->Value);
+ assert(!Init->isValueDependent());
+
+ if (Eval->IsEvaluating) {
+ // FIXME: Produce a diagnostic for self-initialization.
+ Eval->CheckedICE = true;
+ Eval->IsICE = false;
+ return 0;
+ }
+
+ Eval->IsEvaluating = true;
+
+ bool Result = Init->EvaluateAsInitializer(Eval->Evaluated, getASTContext(),
+ this, Notes);
+
+ // Ensure the result is an uninitialized APValue if evaluation fails.
+ if (!Result)
+ Eval->Evaluated = APValue();
+
+ Eval->IsEvaluating = false;
+ Eval->WasEvaluated = true;
+
+ // In C++11, we have determined whether the initializer was a constant
+ // expression as a side-effect.
+ if (getASTContext().getLangOpts().CPlusPlus0x && !Eval->CheckedICE) {
+ Eval->CheckedICE = true;
+ Eval->IsICE = Result && Notes.empty();
+ }
+
+ return Result ? &Eval->Evaluated : 0;
+}
+
+bool VarDecl::checkInitIsICE() const {
+ // Initializers of weak variables are never ICEs.
+ if (isWeak())
+ return false;
+
+ EvaluatedStmt *Eval = ensureEvaluatedStmt();
+ if (Eval->CheckedICE)
+ // We have already checked whether this subexpression is an
+ // integral constant expression.
+ return Eval->IsICE;
+
+ const Expr *Init = cast<Expr>(Eval->Value);
+ assert(!Init->isValueDependent());
+
+ // In C++11, evaluate the initializer to check whether it's a constant
+ // expression.
+ if (getASTContext().getLangOpts().CPlusPlus0x) {
+ llvm::SmallVector<PartialDiagnosticAt, 8> Notes;
+ evaluateValue(Notes);
+ return Eval->IsICE;
+ }
+
+ // It's an ICE whether or not the definition we found is
+ // out-of-line. See DR 721 and the discussion in Clang PR
+ // 6206 for details.
+
+ if (Eval->CheckingICE)
+ return false;
+ Eval->CheckingICE = true;
+
+ Eval->IsICE = Init->isIntegerConstantExpr(getASTContext());
+ Eval->CheckingICE = false;
+ Eval->CheckedICE = true;
+ return Eval->IsICE;
+}
+
+bool VarDecl::extendsLifetimeOfTemporary() const {
+ assert(getType()->isReferenceType() &&"Non-references never extend lifetime");
+
+ const Expr *E = getInit();
+ if (!E)
+ return false;
+
+ if (const ExprWithCleanups *Cleanups = dyn_cast<ExprWithCleanups>(E))
+ E = Cleanups->getSubExpr();
+
+ return isa<MaterializeTemporaryExpr>(E);
+}
+
+VarDecl *VarDecl::getInstantiatedFromStaticDataMember() const {
+ if (MemberSpecializationInfo *MSI = getMemberSpecializationInfo())
+ return cast<VarDecl>(MSI->getInstantiatedFrom());
+
+ return 0;
+}
+
+TemplateSpecializationKind VarDecl::getTemplateSpecializationKind() const {
+ if (MemberSpecializationInfo *MSI = getMemberSpecializationInfo())
+ return MSI->getTemplateSpecializationKind();
+
+ return TSK_Undeclared;
+}
+
+MemberSpecializationInfo *VarDecl::getMemberSpecializationInfo() const {
+ return getASTContext().getInstantiatedFromStaticDataMember(this);
+}
+
+void VarDecl::setTemplateSpecializationKind(TemplateSpecializationKind TSK,
+ SourceLocation PointOfInstantiation) {
+ MemberSpecializationInfo *MSI = getMemberSpecializationInfo();
+ assert(MSI && "Not an instantiated static data member?");
+ MSI->setTemplateSpecializationKind(TSK);
+ if (TSK != TSK_ExplicitSpecialization &&
+ PointOfInstantiation.isValid() &&
+ MSI->getPointOfInstantiation().isInvalid())
+ MSI->setPointOfInstantiation(PointOfInstantiation);
+}
+
+//===----------------------------------------------------------------------===//
+// ParmVarDecl Implementation
+//===----------------------------------------------------------------------===//
+
+ParmVarDecl *ParmVarDecl::Create(ASTContext &C, DeclContext *DC,
+ SourceLocation StartLoc,
+ SourceLocation IdLoc, IdentifierInfo *Id,
+ QualType T, TypeSourceInfo *TInfo,
+ StorageClass S, StorageClass SCAsWritten,
+ Expr *DefArg) {
+ return new (C) ParmVarDecl(ParmVar, DC, StartLoc, IdLoc, Id, T, TInfo,
+ S, SCAsWritten, DefArg);
+}
+
+ParmVarDecl *ParmVarDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID, sizeof(ParmVarDecl));
+ return new (Mem) ParmVarDecl(ParmVar, 0, SourceLocation(), SourceLocation(),
+ 0, QualType(), 0, SC_None, SC_None, 0);
+}
+
+SourceRange ParmVarDecl::getSourceRange() const {
+ if (!hasInheritedDefaultArg()) {
+ SourceRange ArgRange = getDefaultArgRange();
+ if (ArgRange.isValid())
+ return SourceRange(getOuterLocStart(), ArgRange.getEnd());
+ }
+
+ return DeclaratorDecl::getSourceRange();
+}
+
+Expr *ParmVarDecl::getDefaultArg() {
+ assert(!hasUnparsedDefaultArg() && "Default argument is not yet parsed!");
+ assert(!hasUninstantiatedDefaultArg() &&
+ "Default argument is not yet instantiated!");
+
+ Expr *Arg = getInit();
+ if (ExprWithCleanups *E = dyn_cast_or_null<ExprWithCleanups>(Arg))
+ return E->getSubExpr();
+
+ return Arg;
+}
+
+SourceRange ParmVarDecl::getDefaultArgRange() const {
+ if (const Expr *E = getInit())
+ return E->getSourceRange();
+
+ if (hasUninstantiatedDefaultArg())
+ return getUninstantiatedDefaultArg()->getSourceRange();
+
+ return SourceRange();
+}
+
+bool ParmVarDecl::isParameterPack() const {
+ return isa<PackExpansionType>(getType());
+}
+
+void ParmVarDecl::setParameterIndexLarge(unsigned parameterIndex) {
+ getASTContext().setParameterIndex(this, parameterIndex);
+ ParmVarDeclBits.ParameterIndex = ParameterIndexSentinel;
+}
+
+unsigned ParmVarDecl::getParameterIndexLarge() const {
+ return getASTContext().getParameterIndex(this);
+}
+
+//===----------------------------------------------------------------------===//
+// FunctionDecl Implementation
+//===----------------------------------------------------------------------===//
+
+void FunctionDecl::getNameForDiagnostic(std::string &S,
+ const PrintingPolicy &Policy,
+ bool Qualified) const {
+ NamedDecl::getNameForDiagnostic(S, Policy, Qualified);
+ const TemplateArgumentList *TemplateArgs = getTemplateSpecializationArgs();
+ if (TemplateArgs)
+ S += TemplateSpecializationType::PrintTemplateArgumentList(
+ TemplateArgs->data(),
+ TemplateArgs->size(),
+ Policy);
+
+}
+
+bool FunctionDecl::isVariadic() const {
+ if (const FunctionProtoType *FT = getType()->getAs<FunctionProtoType>())
+ return FT->isVariadic();
+ return false;
+}
+
+bool FunctionDecl::hasBody(const FunctionDecl *&Definition) const {
+ for (redecl_iterator I = redecls_begin(), E = redecls_end(); I != E; ++I) {
+ if (I->Body || I->IsLateTemplateParsed) {
+ Definition = *I;
+ return true;
+ }
+ }
+
+ return false;
+}
+
+bool FunctionDecl::hasTrivialBody() const
+{
+ Stmt *S = getBody();
+ if (!S) {
+ // Since we don't have a body for this function, we don't know if it's
+ // trivial or not.
+ return false;
+ }
+
+ if (isa<CompoundStmt>(S) && cast<CompoundStmt>(S)->body_empty())
+ return true;
+ return false;
+}
+
+bool FunctionDecl::isDefined(const FunctionDecl *&Definition) const {
+ for (redecl_iterator I = redecls_begin(), E = redecls_end(); I != E; ++I) {
+ if (I->IsDeleted || I->IsDefaulted || I->Body || I->IsLateTemplateParsed) {
+ Definition = I->IsDeleted ? I->getCanonicalDecl() : *I;
+ return true;
+ }
+ }
+
+ return false;
+}
+
+Stmt *FunctionDecl::getBody(const FunctionDecl *&Definition) const {
+ for (redecl_iterator I = redecls_begin(), E = redecls_end(); I != E; ++I) {
+ if (I->Body) {
+ Definition = *I;
+ return I->Body.get(getASTContext().getExternalSource());
+ } else if (I->IsLateTemplateParsed) {
+ Definition = *I;
+ return 0;
+ }
+ }
+
+ return 0;
+}
+
+void FunctionDecl::setBody(Stmt *B) {
+ Body = B;
+ if (B)
+ EndRangeLoc = B->getLocEnd();
+}
+
+void FunctionDecl::setPure(bool P) {
+ IsPure = P;
+ if (P)
+ if (CXXRecordDecl *Parent = dyn_cast<CXXRecordDecl>(getDeclContext()))
+ Parent->markedVirtualFunctionPure();
+}
+
+bool FunctionDecl::isMain() const {
+ const TranslationUnitDecl *tunit =
+ dyn_cast<TranslationUnitDecl>(getDeclContext()->getRedeclContext());
+ return tunit &&
+ !tunit->getASTContext().getLangOpts().Freestanding &&
+ getIdentifier() &&
+ getIdentifier()->isStr("main");
+}
+
+bool FunctionDecl::isReservedGlobalPlacementOperator() const {
+ assert(getDeclName().getNameKind() == DeclarationName::CXXOperatorName);
+ assert(getDeclName().getCXXOverloadedOperator() == OO_New ||
+ getDeclName().getCXXOverloadedOperator() == OO_Delete ||
+ getDeclName().getCXXOverloadedOperator() == OO_Array_New ||
+ getDeclName().getCXXOverloadedOperator() == OO_Array_Delete);
+
+ if (isa<CXXRecordDecl>(getDeclContext())) return false;
+ assert(getDeclContext()->getRedeclContext()->isTranslationUnit());
+
+ const FunctionProtoType *proto = getType()->castAs<FunctionProtoType>();
+ if (proto->getNumArgs() != 2 || proto->isVariadic()) return false;
+
+ ASTContext &Context =
+ cast<TranslationUnitDecl>(getDeclContext()->getRedeclContext())
+ ->getASTContext();
+
+ // The result type and first argument type are constant across all
+ // these operators. The second argument must be exactly void*.
+ return (proto->getArgType(1).getCanonicalType() == Context.VoidPtrTy);
+}
+
+bool FunctionDecl::isExternC() const {
+ if (getLinkage() != ExternalLinkage)
+ return false;
+
+ if (getAttr<OverloadableAttr>())
+ return false;
+
+ const DeclContext *DC = getDeclContext();
+ if (DC->isRecord())
+ return false;
+
+ ASTContext &Context = getASTContext();
+ if (!Context.getLangOpts().CPlusPlus)
+ return true;
+
+ return isMain() || DC->isExternCContext();
+}
+
+bool FunctionDecl::isGlobal() const {
+ if (const CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(this))
+ return Method->isStatic();
+
+ if (getStorageClass() == SC_Static)
+ return false;
+
+ for (const DeclContext *DC = getDeclContext();
+ DC->isNamespace();
+ DC = DC->getParent()) {
+ if (const NamespaceDecl *Namespace = cast<NamespaceDecl>(DC)) {
+ if (!Namespace->getDeclName())
+ return false;
+ break;
+ }
+ }
+
+ return true;
+}
+
+void
+FunctionDecl::setPreviousDeclaration(FunctionDecl *PrevDecl) {
+ redeclarable_base::setPreviousDeclaration(PrevDecl);
+
+ if (FunctionTemplateDecl *FunTmpl = getDescribedFunctionTemplate()) {
+ FunctionTemplateDecl *PrevFunTmpl
+ = PrevDecl? PrevDecl->getDescribedFunctionTemplate() : 0;
+ assert((!PrevDecl || PrevFunTmpl) && "Function/function template mismatch");
+ FunTmpl->setPreviousDeclaration(PrevFunTmpl);
+ }
+
+ if (PrevDecl && PrevDecl->IsInline)
+ IsInline = true;
+}
+
+const FunctionDecl *FunctionDecl::getCanonicalDecl() const {
+ return getFirstDeclaration();
+}
+
+FunctionDecl *FunctionDecl::getCanonicalDecl() {
+ return getFirstDeclaration();
+}
+
+void FunctionDecl::setStorageClass(StorageClass SC) {
+ assert(isLegalForFunction(SC));
+ if (getStorageClass() != SC)
+ ClearLinkageCache();
+
+ SClass = SC;
+}
+
+/// \brief Returns a value indicating whether this function
+/// corresponds to a builtin function.
+///
+/// The function corresponds to a built-in function if it is
+/// declared at translation scope or within an extern "C" block and
+/// its name matches with the name of a builtin. The returned value
+/// will be 0 for functions that do not correspond to a builtin, a
+/// value of type \c Builtin::ID if in the target-independent range
+/// \c [1,Builtin::First), or a target-specific builtin value.
+unsigned FunctionDecl::getBuiltinID() const {
+ if (!getIdentifier())
+ return 0;
+
+ unsigned BuiltinID = getIdentifier()->getBuiltinID();
+ if (!BuiltinID)
+ return 0;
+
+ ASTContext &Context = getASTContext();
+ if (!Context.BuiltinInfo.isPredefinedLibFunction(BuiltinID))
+ return BuiltinID;
+
+ // This function has the name of a known C library
+ // function. Determine whether it actually refers to the C library
+ // function or whether it just has the same name.
+
+ // If this is a static function, it's not a builtin.
+ if (getStorageClass() == SC_Static)
+ return 0;
+
+ // If this function is at translation-unit scope and we're not in
+ // C++, it refers to the C library function.
+ if (!Context.getLangOpts().CPlusPlus &&
+ getDeclContext()->isTranslationUnit())
+ return BuiltinID;
+
+ // If the function is in an extern "C" linkage specification and is
+ // not marked "overloadable", it's the real function.
+ if (isa<LinkageSpecDecl>(getDeclContext()) &&
+ cast<LinkageSpecDecl>(getDeclContext())->getLanguage()
+ == LinkageSpecDecl::lang_c &&
+ !getAttr<OverloadableAttr>())
+ return BuiltinID;
+
+ // Not a builtin
+ return 0;
+}
+
+
+/// getNumParams - Return the number of parameters this function must have
+/// based on its FunctionType. This is the length of the ParamInfo array
+/// after it has been created.
+unsigned FunctionDecl::getNumParams() const {
+ const FunctionType *FT = getType()->getAs<FunctionType>();
+ if (isa<FunctionNoProtoType>(FT))
+ return 0;
+ return cast<FunctionProtoType>(FT)->getNumArgs();
+
+}
+
+void FunctionDecl::setParams(ASTContext &C,
+ llvm::ArrayRef<ParmVarDecl *> NewParamInfo) {
+ assert(ParamInfo == 0 && "Already has param info!");
+ assert(NewParamInfo.size() == getNumParams() && "Parameter count mismatch!");
+
+ // Zero params -> null pointer.
+ if (!NewParamInfo.empty()) {
+ ParamInfo = new (C) ParmVarDecl*[NewParamInfo.size()];
+ std::copy(NewParamInfo.begin(), NewParamInfo.end(), ParamInfo);
+ }
+}
+
+void FunctionDecl::setDeclsInPrototypeScope(llvm::ArrayRef<NamedDecl *> NewDecls) {
+ assert(DeclsInPrototypeScope.empty() && "Already has prototype decls!");
+
+ if (!NewDecls.empty()) {
+ NamedDecl **A = new (getASTContext()) NamedDecl*[NewDecls.size()];
+ std::copy(NewDecls.begin(), NewDecls.end(), A);
+ DeclsInPrototypeScope = llvm::ArrayRef<NamedDecl*>(A, NewDecls.size());
+ }
+}
+
+/// getMinRequiredArguments - Returns the minimum number of arguments
+/// needed to call this function. This may be fewer than the number of
+/// function parameters, if some of the parameters have default
+/// arguments (in C++) or the last parameter is a parameter pack.
+unsigned FunctionDecl::getMinRequiredArguments() const {
+ if (!getASTContext().getLangOpts().CPlusPlus)
+ return getNumParams();
+
+ unsigned NumRequiredArgs = getNumParams();
+
+ // If the last parameter is a parameter pack, we don't need an argument for
+ // it.
+ if (NumRequiredArgs > 0 &&
+ getParamDecl(NumRequiredArgs - 1)->isParameterPack())
+ --NumRequiredArgs;
+
+ // If this parameter has a default argument, we don't need an argument for
+ // it.
+ while (NumRequiredArgs > 0 &&
+ getParamDecl(NumRequiredArgs-1)->hasDefaultArg())
+ --NumRequiredArgs;
+
+ // We might have parameter packs before the end. These can't be deduced,
+ // but they can still handle multiple arguments.
+ unsigned ArgIdx = NumRequiredArgs;
+ while (ArgIdx > 0) {
+ if (getParamDecl(ArgIdx - 1)->isParameterPack())
+ NumRequiredArgs = ArgIdx;
+
+ --ArgIdx;
+ }
+
+ return NumRequiredArgs;
+}
+
+bool FunctionDecl::isInlined() const {
+ if (IsInline)
+ return true;
+
+ if (isa<CXXMethodDecl>(this)) {
+ if (!isOutOfLine() || getCanonicalDecl()->isInlineSpecified())
+ return true;
+ }
+
+ switch (getTemplateSpecializationKind()) {
+ case TSK_Undeclared:
+ case TSK_ExplicitSpecialization:
+ return false;
+
+ case TSK_ImplicitInstantiation:
+ case TSK_ExplicitInstantiationDeclaration:
+ case TSK_ExplicitInstantiationDefinition:
+ // Handle below.
+ break;
+ }
+
+ const FunctionDecl *PatternDecl = getTemplateInstantiationPattern();
+ bool HasPattern = false;
+ if (PatternDecl)
+ HasPattern = PatternDecl->hasBody(PatternDecl);
+
+ if (HasPattern && PatternDecl)
+ return PatternDecl->isInlined();
+
+ return false;
+}
+
+static bool RedeclForcesDefC99(const FunctionDecl *Redecl) {
+ // Only consider file-scope declarations in this test.
+ if (!Redecl->getLexicalDeclContext()->isTranslationUnit())
+ return false;
+
+ // Only consider explicit declarations; the presence of a builtin for a
+ // libcall shouldn't affect whether a definition is externally visible.
+ if (Redecl->isImplicit())
+ return false;
+
+ if (!Redecl->isInlineSpecified() || Redecl->getStorageClass() == SC_Extern)
+ return true; // Not an inline definition
+
+ return false;
+}
+
+/// \brief For a function declaration in C or C++, determine whether this
+/// declaration causes the definition to be externally visible.
+///
+/// Specifically, this determines if adding the current declaration to the set
+/// of redeclarations of the given functions causes
+/// isInlineDefinitionExternallyVisible to change from false to true.
+bool FunctionDecl::doesDeclarationForceExternallyVisibleDefinition() const {
+ assert(!doesThisDeclarationHaveABody() &&
+ "Must have a declaration without a body.");
+
+ ASTContext &Context = getASTContext();
+
+ if (Context.getLangOpts().GNUInline || hasAttr<GNUInlineAttr>()) {
+ // With GNU inlining, a declaration with 'inline' but not 'extern', forces
+ // an externally visible definition.
+ //
+ // FIXME: What happens if gnu_inline gets added on after the first
+ // declaration?
+ if (!isInlineSpecified() || getStorageClassAsWritten() == SC_Extern)
+ return false;
+
+ const FunctionDecl *Prev = this;
+ bool FoundBody = false;
+ while ((Prev = Prev->getPreviousDecl())) {
+ FoundBody |= Prev->Body;
+
+ if (Prev->Body) {
+ // If it's not the case that both 'inline' and 'extern' are
+ // specified on the definition, then it is always externally visible.
+ if (!Prev->isInlineSpecified() ||
+ Prev->getStorageClassAsWritten() != SC_Extern)
+ return false;
+ } else if (Prev->isInlineSpecified() &&
+ Prev->getStorageClassAsWritten() != SC_Extern) {
+ return false;
+ }
+ }
+ return FoundBody;
+ }
+
+ if (Context.getLangOpts().CPlusPlus)
+ return false;
+
+ // C99 6.7.4p6:
+ // [...] If all of the file scope declarations for a function in a
+ // translation unit include the inline function specifier without extern,
+ // then the definition in that translation unit is an inline definition.
+ if (isInlineSpecified() && getStorageClass() != SC_Extern)
+ return false;
+ const FunctionDecl *Prev = this;
+ bool FoundBody = false;
+ while ((Prev = Prev->getPreviousDecl())) {
+ FoundBody |= Prev->Body;
+ if (RedeclForcesDefC99(Prev))
+ return false;
+ }
+ return FoundBody;
+}
+
+/// \brief For an inline function definition in C or C++, determine whether the
+/// definition will be externally visible.
+///
+/// Inline function definitions are always available for inlining optimizations.
+/// However, depending on the language dialect, declaration specifiers, and
+/// attributes, the definition of an inline function may or may not be
+/// "externally" visible to other translation units in the program.
+///
+/// In C99, inline definitions are not externally visible by default. However,
+/// if even one of the global-scope declarations is marked "extern inline", the
+/// inline definition becomes externally visible (C99 6.7.4p6).
+///
+/// In GNU89 mode, or if the gnu_inline attribute is attached to the function
+/// definition, we use the GNU semantics for inline, which are nearly the
+/// opposite of C99 semantics. In particular, "inline" by itself will create
+/// an externally visible symbol, but "extern inline" will not create an
+/// externally visible symbol.
+bool FunctionDecl::isInlineDefinitionExternallyVisible() const {
+ assert(doesThisDeclarationHaveABody() && "Must have the function definition");
+ assert(isInlined() && "Function must be inline");
+ ASTContext &Context = getASTContext();
+
+ if (Context.getLangOpts().GNUInline || hasAttr<GNUInlineAttr>()) {
+ // Note: If you change the logic here, please change
+ // doesDeclarationForceExternallyVisibleDefinition as well.
+ //
+ // If it's not the case that both 'inline' and 'extern' are
+ // specified on the definition, then this inline definition is
+ // externally visible.
+ if (!(isInlineSpecified() && getStorageClassAsWritten() == SC_Extern))
+ return true;
+
+ // If any declaration is 'inline' but not 'extern', then this definition
+ // is externally visible.
+ for (redecl_iterator Redecl = redecls_begin(), RedeclEnd = redecls_end();
+ Redecl != RedeclEnd;
+ ++Redecl) {
+ if (Redecl->isInlineSpecified() &&
+ Redecl->getStorageClassAsWritten() != SC_Extern)
+ return true;
+ }
+
+ return false;
+ }
+
+ // C99 6.7.4p6:
+ // [...] If all of the file scope declarations for a function in a
+ // translation unit include the inline function specifier without extern,
+ // then the definition in that translation unit is an inline definition.
+ for (redecl_iterator Redecl = redecls_begin(), RedeclEnd = redecls_end();
+ Redecl != RedeclEnd;
+ ++Redecl) {
+ if (RedeclForcesDefC99(*Redecl))
+ return true;
+ }
+
+ // C99 6.7.4p6:
+ // An inline definition does not provide an external definition for the
+ // function, and does not forbid an external definition in another
+ // translation unit.
+ return false;
+}
+
+/// getOverloadedOperator - Which C++ overloaded operator this
+/// function represents, if any.
+OverloadedOperatorKind FunctionDecl::getOverloadedOperator() const {
+ if (getDeclName().getNameKind() == DeclarationName::CXXOperatorName)
+ return getDeclName().getCXXOverloadedOperator();
+ else
+ return OO_None;
+}
+
+/// getLiteralIdentifier - The literal suffix identifier this function
+/// represents, if any.
+const IdentifierInfo *FunctionDecl::getLiteralIdentifier() const {
+ if (getDeclName().getNameKind() == DeclarationName::CXXLiteralOperatorName)
+ return getDeclName().getCXXLiteralIdentifier();
+ else
+ return 0;
+}
+
+FunctionDecl::TemplatedKind FunctionDecl::getTemplatedKind() const {
+ if (TemplateOrSpecialization.isNull())
+ return TK_NonTemplate;
+ if (TemplateOrSpecialization.is<FunctionTemplateDecl *>())
+ return TK_FunctionTemplate;
+ if (TemplateOrSpecialization.is<MemberSpecializationInfo *>())
+ return TK_MemberSpecialization;
+ if (TemplateOrSpecialization.is<FunctionTemplateSpecializationInfo *>())
+ return TK_FunctionTemplateSpecialization;
+ if (TemplateOrSpecialization.is
+ <DependentFunctionTemplateSpecializationInfo*>())
+ return TK_DependentFunctionTemplateSpecialization;
+
+ llvm_unreachable("Did we miss a TemplateOrSpecialization type?");
+}
+
+FunctionDecl *FunctionDecl::getInstantiatedFromMemberFunction() const {
+ if (MemberSpecializationInfo *Info = getMemberSpecializationInfo())
+ return cast<FunctionDecl>(Info->getInstantiatedFrom());
+
+ return 0;
+}
+
+MemberSpecializationInfo *FunctionDecl::getMemberSpecializationInfo() const {
+ return TemplateOrSpecialization.dyn_cast<MemberSpecializationInfo*>();
+}
+
+void
+FunctionDecl::setInstantiationOfMemberFunction(ASTContext &C,
+ FunctionDecl *FD,
+ TemplateSpecializationKind TSK) {
+ assert(TemplateOrSpecialization.isNull() &&
+ "Member function is already a specialization");
+ MemberSpecializationInfo *Info
+ = new (C) MemberSpecializationInfo(FD, TSK);
+ TemplateOrSpecialization = Info;
+}
+
+bool FunctionDecl::isImplicitlyInstantiable() const {
+ // If the function is invalid, it can't be implicitly instantiated.
+ if (isInvalidDecl())
+ return false;
+
+ switch (getTemplateSpecializationKind()) {
+ case TSK_Undeclared:
+ case TSK_ExplicitInstantiationDefinition:
+ return false;
+
+ case TSK_ImplicitInstantiation:
+ return true;
+
+ // It is possible to instantiate TSK_ExplicitSpecialization kind
+ // if the FunctionDecl has a class scope specialization pattern.
+ case TSK_ExplicitSpecialization:
+ return getClassScopeSpecializationPattern() != 0;
+
+ case TSK_ExplicitInstantiationDeclaration:
+ // Handled below.
+ break;
+ }
+
+ // Find the actual template from which we will instantiate.
+ const FunctionDecl *PatternDecl = getTemplateInstantiationPattern();
+ bool HasPattern = false;
+ if (PatternDecl)
+ HasPattern = PatternDecl->hasBody(PatternDecl);
+
+ // C++0x [temp.explicit]p9:
+ // Except for inline functions, other explicit instantiation declarations
+ // have the effect of suppressing the implicit instantiation of the entity
+ // to which they refer.
+ if (!HasPattern || !PatternDecl)
+ return true;
+
+ return PatternDecl->isInlined();
+}
+
+bool FunctionDecl::isTemplateInstantiation() const {
+ switch (getTemplateSpecializationKind()) {
+ case TSK_Undeclared:
+ case TSK_ExplicitSpecialization:
+ return false;
+ case TSK_ImplicitInstantiation:
+ case TSK_ExplicitInstantiationDeclaration:
+ case TSK_ExplicitInstantiationDefinition:
+ return true;
+ }
+ llvm_unreachable("All TSK values handled.");
+}
+
+FunctionDecl *FunctionDecl::getTemplateInstantiationPattern() const {
+ // Handle class scope explicit specialization special case.
+ if (getTemplateSpecializationKind() == TSK_ExplicitSpecialization)
+ return getClassScopeSpecializationPattern();
+
+ if (FunctionTemplateDecl *Primary = getPrimaryTemplate()) {
+ while (Primary->getInstantiatedFromMemberTemplate()) {
+ // If we have hit a point where the user provided a specialization of
+ // this template, we're done looking.
+ if (Primary->isMemberSpecialization())
+ break;
+
+ Primary = Primary->getInstantiatedFromMemberTemplate();
+ }
+
+ return Primary->getTemplatedDecl();
+ }
+
+ return getInstantiatedFromMemberFunction();
+}
+
+FunctionTemplateDecl *FunctionDecl::getPrimaryTemplate() const {
+ if (FunctionTemplateSpecializationInfo *Info
+ = TemplateOrSpecialization
+ .dyn_cast<FunctionTemplateSpecializationInfo*>()) {
+ return Info->Template.getPointer();
+ }
+ return 0;
+}
+
+FunctionDecl *FunctionDecl::getClassScopeSpecializationPattern() const {
+ return getASTContext().getClassScopeSpecializationPattern(this);
+}
+
+const TemplateArgumentList *
+FunctionDecl::getTemplateSpecializationArgs() const {
+ if (FunctionTemplateSpecializationInfo *Info
+ = TemplateOrSpecialization
+ .dyn_cast<FunctionTemplateSpecializationInfo*>()) {
+ return Info->TemplateArguments;
+ }
+ return 0;
+}
+
+const ASTTemplateArgumentListInfo *
+FunctionDecl::getTemplateSpecializationArgsAsWritten() const {
+ if (FunctionTemplateSpecializationInfo *Info
+ = TemplateOrSpecialization
+ .dyn_cast<FunctionTemplateSpecializationInfo*>()) {
+ return Info->TemplateArgumentsAsWritten;
+ }
+ return 0;
+}
+
+void
+FunctionDecl::setFunctionTemplateSpecialization(ASTContext &C,
+ FunctionTemplateDecl *Template,
+ const TemplateArgumentList *TemplateArgs,
+ void *InsertPos,
+ TemplateSpecializationKind TSK,
+ const TemplateArgumentListInfo *TemplateArgsAsWritten,
+ SourceLocation PointOfInstantiation) {
+ assert(TSK != TSK_Undeclared &&
+ "Must specify the type of function template specialization");
+ FunctionTemplateSpecializationInfo *Info
+ = TemplateOrSpecialization.dyn_cast<FunctionTemplateSpecializationInfo*>();
+ if (!Info)
+ Info = FunctionTemplateSpecializationInfo::Create(C, this, Template, TSK,
+ TemplateArgs,
+ TemplateArgsAsWritten,
+ PointOfInstantiation);
+ TemplateOrSpecialization = Info;
+ Template->addSpecialization(Info, InsertPos);
+}
+
+void
+FunctionDecl::setDependentTemplateSpecialization(ASTContext &Context,
+ const UnresolvedSetImpl &Templates,
+ const TemplateArgumentListInfo &TemplateArgs) {
+ assert(TemplateOrSpecialization.isNull());
+ size_t Size = sizeof(DependentFunctionTemplateSpecializationInfo);
+ Size += Templates.size() * sizeof(FunctionTemplateDecl*);
+ Size += TemplateArgs.size() * sizeof(TemplateArgumentLoc);
+ void *Buffer = Context.Allocate(Size);
+ DependentFunctionTemplateSpecializationInfo *Info =
+ new (Buffer) DependentFunctionTemplateSpecializationInfo(Templates,
+ TemplateArgs);
+ TemplateOrSpecialization = Info;
+}
+
+DependentFunctionTemplateSpecializationInfo::
+DependentFunctionTemplateSpecializationInfo(const UnresolvedSetImpl &Ts,
+ const TemplateArgumentListInfo &TArgs)
+ : AngleLocs(TArgs.getLAngleLoc(), TArgs.getRAngleLoc()) {
+
+ d.NumTemplates = Ts.size();
+ d.NumArgs = TArgs.size();
+
+ FunctionTemplateDecl **TsArray =
+ const_cast<FunctionTemplateDecl**>(getTemplates());
+ for (unsigned I = 0, E = Ts.size(); I != E; ++I)
+ TsArray[I] = cast<FunctionTemplateDecl>(Ts[I]->getUnderlyingDecl());
+
+ TemplateArgumentLoc *ArgsArray =
+ const_cast<TemplateArgumentLoc*>(getTemplateArgs());
+ for (unsigned I = 0, E = TArgs.size(); I != E; ++I)
+ new (&ArgsArray[I]) TemplateArgumentLoc(TArgs[I]);
+}
+
+TemplateSpecializationKind FunctionDecl::getTemplateSpecializationKind() const {
+ // For a function template specialization, query the specialization
+ // information object.
+ FunctionTemplateSpecializationInfo *FTSInfo
+ = TemplateOrSpecialization.dyn_cast<FunctionTemplateSpecializationInfo*>();
+ if (FTSInfo)
+ return FTSInfo->getTemplateSpecializationKind();
+
+ MemberSpecializationInfo *MSInfo
+ = TemplateOrSpecialization.dyn_cast<MemberSpecializationInfo*>();
+ if (MSInfo)
+ return MSInfo->getTemplateSpecializationKind();
+
+ return TSK_Undeclared;
+}
+
+void
+FunctionDecl::setTemplateSpecializationKind(TemplateSpecializationKind TSK,
+ SourceLocation PointOfInstantiation) {
+ if (FunctionTemplateSpecializationInfo *FTSInfo
+ = TemplateOrSpecialization.dyn_cast<
+ FunctionTemplateSpecializationInfo*>()) {
+ FTSInfo->setTemplateSpecializationKind(TSK);
+ if (TSK != TSK_ExplicitSpecialization &&
+ PointOfInstantiation.isValid() &&
+ FTSInfo->getPointOfInstantiation().isInvalid())
+ FTSInfo->setPointOfInstantiation(PointOfInstantiation);
+ } else if (MemberSpecializationInfo *MSInfo
+ = TemplateOrSpecialization.dyn_cast<MemberSpecializationInfo*>()) {
+ MSInfo->setTemplateSpecializationKind(TSK);
+ if (TSK != TSK_ExplicitSpecialization &&
+ PointOfInstantiation.isValid() &&
+ MSInfo->getPointOfInstantiation().isInvalid())
+ MSInfo->setPointOfInstantiation(PointOfInstantiation);
+ } else
+ llvm_unreachable("Function cannot have a template specialization kind");
+}
+
+SourceLocation FunctionDecl::getPointOfInstantiation() const {
+ if (FunctionTemplateSpecializationInfo *FTSInfo
+ = TemplateOrSpecialization.dyn_cast<
+ FunctionTemplateSpecializationInfo*>())
+ return FTSInfo->getPointOfInstantiation();
+ else if (MemberSpecializationInfo *MSInfo
+ = TemplateOrSpecialization.dyn_cast<MemberSpecializationInfo*>())
+ return MSInfo->getPointOfInstantiation();
+
+ return SourceLocation();
+}
+
+bool FunctionDecl::isOutOfLine() const {
+ if (Decl::isOutOfLine())
+ return true;
+
+ // If this function was instantiated from a member function of a
+ // class template, check whether that member function was defined out-of-line.
+ if (FunctionDecl *FD = getInstantiatedFromMemberFunction()) {
+ const FunctionDecl *Definition;
+ if (FD->hasBody(Definition))
+ return Definition->isOutOfLine();
+ }
+
+ // If this function was instantiated from a function template,
+ // check whether that function template was defined out-of-line.
+ if (FunctionTemplateDecl *FunTmpl = getPrimaryTemplate()) {
+ const FunctionDecl *Definition;
+ if (FunTmpl->getTemplatedDecl()->hasBody(Definition))
+ return Definition->isOutOfLine();
+ }
+
+ return false;
+}
+
+SourceRange FunctionDecl::getSourceRange() const {
+ return SourceRange(getOuterLocStart(), EndRangeLoc);
+}
+
+unsigned FunctionDecl::getMemoryFunctionKind() const {
+ IdentifierInfo *FnInfo = getIdentifier();
+
+ if (!FnInfo)
+ return 0;
+
+ // Builtin handling.
+ switch (getBuiltinID()) {
+ case Builtin::BI__builtin_memset:
+ case Builtin::BI__builtin___memset_chk:
+ case Builtin::BImemset:
+ return Builtin::BImemset;
+
+ case Builtin::BI__builtin_memcpy:
+ case Builtin::BI__builtin___memcpy_chk:
+ case Builtin::BImemcpy:
+ return Builtin::BImemcpy;
+
+ case Builtin::BI__builtin_memmove:
+ case Builtin::BI__builtin___memmove_chk:
+ case Builtin::BImemmove:
+ return Builtin::BImemmove;
+
+ case Builtin::BIstrlcpy:
+ return Builtin::BIstrlcpy;
+ case Builtin::BIstrlcat:
+ return Builtin::BIstrlcat;
+
+ case Builtin::BI__builtin_memcmp:
+ case Builtin::BImemcmp:
+ return Builtin::BImemcmp;
+
+ case Builtin::BI__builtin_strncpy:
+ case Builtin::BI__builtin___strncpy_chk:
+ case Builtin::BIstrncpy:
+ return Builtin::BIstrncpy;
+
+ case Builtin::BI__builtin_strncmp:
+ case Builtin::BIstrncmp:
+ return Builtin::BIstrncmp;
+
+ case Builtin::BI__builtin_strncasecmp:
+ case Builtin::BIstrncasecmp:
+ return Builtin::BIstrncasecmp;
+
+ case Builtin::BI__builtin_strncat:
+ case Builtin::BI__builtin___strncat_chk:
+ case Builtin::BIstrncat:
+ return Builtin::BIstrncat;
+
+ case Builtin::BI__builtin_strndup:
+ case Builtin::BIstrndup:
+ return Builtin::BIstrndup;
+
+ case Builtin::BI__builtin_strlen:
+ case Builtin::BIstrlen:
+ return Builtin::BIstrlen;
+
+ default:
+ if (isExternC()) {
+ if (FnInfo->isStr("memset"))
+ return Builtin::BImemset;
+ else if (FnInfo->isStr("memcpy"))
+ return Builtin::BImemcpy;
+ else if (FnInfo->isStr("memmove"))
+ return Builtin::BImemmove;
+ else if (FnInfo->isStr("memcmp"))
+ return Builtin::BImemcmp;
+ else if (FnInfo->isStr("strncpy"))
+ return Builtin::BIstrncpy;
+ else if (FnInfo->isStr("strncmp"))
+ return Builtin::BIstrncmp;
+ else if (FnInfo->isStr("strncasecmp"))
+ return Builtin::BIstrncasecmp;
+ else if (FnInfo->isStr("strncat"))
+ return Builtin::BIstrncat;
+ else if (FnInfo->isStr("strndup"))
+ return Builtin::BIstrndup;
+ else if (FnInfo->isStr("strlen"))
+ return Builtin::BIstrlen;
+ }
+ break;
+ }
+ return 0;
+}
+
+//===----------------------------------------------------------------------===//
+// FieldDecl Implementation
+//===----------------------------------------------------------------------===//
+
+FieldDecl *FieldDecl::Create(const ASTContext &C, DeclContext *DC,
+ SourceLocation StartLoc, SourceLocation IdLoc,
+ IdentifierInfo *Id, QualType T,
+ TypeSourceInfo *TInfo, Expr *BW, bool Mutable,
+ bool HasInit) {
+ return new (C) FieldDecl(Decl::Field, DC, StartLoc, IdLoc, Id, T, TInfo,
+ BW, Mutable, HasInit);
+}
+
+FieldDecl *FieldDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID, sizeof(FieldDecl));
+ return new (Mem) FieldDecl(Field, 0, SourceLocation(), SourceLocation(),
+ 0, QualType(), 0, 0, false, false);
+}
+
+bool FieldDecl::isAnonymousStructOrUnion() const {
+ if (!isImplicit() || getDeclName())
+ return false;
+
+ if (const RecordType *Record = getType()->getAs<RecordType>())
+ return Record->getDecl()->isAnonymousStructOrUnion();
+
+ return false;
+}
+
+unsigned FieldDecl::getBitWidthValue(const ASTContext &Ctx) const {
+ assert(isBitField() && "not a bitfield");
+ Expr *BitWidth = InitializerOrBitWidth.getPointer();
+ return BitWidth->EvaluateKnownConstInt(Ctx).getZExtValue();
+}
+
+unsigned FieldDecl::getFieldIndex() const {
+ if (CachedFieldIndex) return CachedFieldIndex - 1;
+
+ unsigned Index = 0;
+ const RecordDecl *RD = getParent();
+ const FieldDecl *LastFD = 0;
+ bool IsMsStruct = RD->hasAttr<MsStructAttr>();
+
+ for (RecordDecl::field_iterator I = RD->field_begin(), E = RD->field_end();
+ I != E; ++I, ++Index) {
+ (*I)->CachedFieldIndex = Index + 1;
+
+ if (IsMsStruct) {
+ // Zero-length bitfields following non-bitfield members are ignored.
+ if (getASTContext().ZeroBitfieldFollowsNonBitfield((*I), LastFD)) {
+ --Index;
+ continue;
+ }
+ LastFD = (*I);
+ }
+ }
+
+ assert(CachedFieldIndex && "failed to find field in parent");
+ return CachedFieldIndex - 1;
+}
+
+SourceRange FieldDecl::getSourceRange() const {
+ if (const Expr *E = InitializerOrBitWidth.getPointer())
+ return SourceRange(getInnerLocStart(), E->getLocEnd());
+ return DeclaratorDecl::getSourceRange();
+}
+
+void FieldDecl::setInClassInitializer(Expr *Init) {
+ assert(!InitializerOrBitWidth.getPointer() &&
+ "bit width or initializer already set");
+ InitializerOrBitWidth.setPointer(Init);
+ InitializerOrBitWidth.setInt(0);
+}
+
+//===----------------------------------------------------------------------===//
+// TagDecl Implementation
+//===----------------------------------------------------------------------===//
+
+SourceLocation TagDecl::getOuterLocStart() const {
+ return getTemplateOrInnerLocStart(this);
+}
+
+SourceRange TagDecl::getSourceRange() const {
+ SourceLocation E = RBraceLoc.isValid() ? RBraceLoc : getLocation();
+ return SourceRange(getOuterLocStart(), E);
+}
+
+TagDecl* TagDecl::getCanonicalDecl() {
+ return getFirstDeclaration();
+}
+
+void TagDecl::setTypedefNameForAnonDecl(TypedefNameDecl *TDD) {
+ TypedefNameDeclOrQualifier = TDD;
+ if (TypeForDecl)
+ const_cast<Type*>(TypeForDecl)->ClearLinkageCache();
+ ClearLinkageCache();
+}
+
+void TagDecl::startDefinition() {
+ IsBeingDefined = true;
+
+ if (isa<CXXRecordDecl>(this)) {
+ CXXRecordDecl *D = cast<CXXRecordDecl>(this);
+ struct CXXRecordDecl::DefinitionData *Data =
+ new (getASTContext()) struct CXXRecordDecl::DefinitionData(D);
+ for (redecl_iterator I = redecls_begin(), E = redecls_end(); I != E; ++I)
+ cast<CXXRecordDecl>(*I)->DefinitionData = Data;
+ }
+}
+
+void TagDecl::completeDefinition() {
+ assert((!isa<CXXRecordDecl>(this) ||
+ cast<CXXRecordDecl>(this)->hasDefinition()) &&
+ "definition completed but not started");
+
+ IsCompleteDefinition = true;
+ IsBeingDefined = false;
+
+ if (ASTMutationListener *L = getASTMutationListener())
+ L->CompletedTagDefinition(this);
+}
+
+TagDecl *TagDecl::getDefinition() const {
+ if (isCompleteDefinition())
+ return const_cast<TagDecl *>(this);
+ if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(this))
+ return CXXRD->getDefinition();
+
+ for (redecl_iterator R = redecls_begin(), REnd = redecls_end();
+ R != REnd; ++R)
+ if (R->isCompleteDefinition())
+ return *R;
+
+ return 0;
+}
+
+void TagDecl::setQualifierInfo(NestedNameSpecifierLoc QualifierLoc) {
+ if (QualifierLoc) {
+ // Make sure the extended qualifier info is allocated.
+ if (!hasExtInfo())
+ TypedefNameDeclOrQualifier = new (getASTContext()) ExtInfo;
+ // Set qualifier info.
+ getExtInfo()->QualifierLoc = QualifierLoc;
+ } else {
+ // Here Qualifier == 0, i.e., we are removing the qualifier (if any).
+ if (hasExtInfo()) {
+ if (getExtInfo()->NumTemplParamLists == 0) {
+ getASTContext().Deallocate(getExtInfo());
+ TypedefNameDeclOrQualifier = (TypedefNameDecl*) 0;
+ }
+ else
+ getExtInfo()->QualifierLoc = QualifierLoc;
+ }
+ }
+}
+
+void TagDecl::setTemplateParameterListsInfo(ASTContext &Context,
+ unsigned NumTPLists,
+ TemplateParameterList **TPLists) {
+ assert(NumTPLists > 0);
+ // Make sure the extended decl info is allocated.
+ if (!hasExtInfo())
+ // Allocate external info struct.
+ TypedefNameDeclOrQualifier = new (getASTContext()) ExtInfo;
+ // Set the template parameter lists info.
+ getExtInfo()->setTemplateParameterListsInfo(Context, NumTPLists, TPLists);
+}
+
+//===----------------------------------------------------------------------===//
+// EnumDecl Implementation
+//===----------------------------------------------------------------------===//
+
+void EnumDecl::anchor() { }
+
+EnumDecl *EnumDecl::Create(ASTContext &C, DeclContext *DC,
+ SourceLocation StartLoc, SourceLocation IdLoc,
+ IdentifierInfo *Id,
+ EnumDecl *PrevDecl, bool IsScoped,
+ bool IsScopedUsingClassTag, bool IsFixed) {
+ EnumDecl *Enum = new (C) EnumDecl(DC, StartLoc, IdLoc, Id, PrevDecl,
+ IsScoped, IsScopedUsingClassTag, IsFixed);
+ C.getTypeDeclType(Enum, PrevDecl);
+ return Enum;
+}
+
+EnumDecl *EnumDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID, sizeof(EnumDecl));
+ return new (Mem) EnumDecl(0, SourceLocation(), SourceLocation(), 0, 0,
+ false, false, false);
+}
+
+void EnumDecl::completeDefinition(QualType NewType,
+ QualType NewPromotionType,
+ unsigned NumPositiveBits,
+ unsigned NumNegativeBits) {
+ assert(!isCompleteDefinition() && "Cannot redefine enums!");
+ if (!IntegerType)
+ IntegerType = NewType.getTypePtr();
+ PromotionType = NewPromotionType;
+ setNumPositiveBits(NumPositiveBits);
+ setNumNegativeBits(NumNegativeBits);
+ TagDecl::completeDefinition();
+}
+
+TemplateSpecializationKind EnumDecl::getTemplateSpecializationKind() const {
+ if (MemberSpecializationInfo *MSI = getMemberSpecializationInfo())
+ return MSI->getTemplateSpecializationKind();
+
+ return TSK_Undeclared;
+}
+
+void EnumDecl::setTemplateSpecializationKind(TemplateSpecializationKind TSK,
+ SourceLocation PointOfInstantiation) {
+ MemberSpecializationInfo *MSI = getMemberSpecializationInfo();
+ assert(MSI && "Not an instantiated member enumeration?");
+ MSI->setTemplateSpecializationKind(TSK);
+ if (TSK != TSK_ExplicitSpecialization &&
+ PointOfInstantiation.isValid() &&
+ MSI->getPointOfInstantiation().isInvalid())
+ MSI->setPointOfInstantiation(PointOfInstantiation);
+}
+
+EnumDecl *EnumDecl::getInstantiatedFromMemberEnum() const {
+ if (SpecializationInfo)
+ return cast<EnumDecl>(SpecializationInfo->getInstantiatedFrom());
+
+ return 0;
+}
+
+void EnumDecl::setInstantiationOfMemberEnum(ASTContext &C, EnumDecl *ED,
+ TemplateSpecializationKind TSK) {
+ assert(!SpecializationInfo && "Member enum is already a specialization");
+ SpecializationInfo = new (C) MemberSpecializationInfo(ED, TSK);
+}
+
+//===----------------------------------------------------------------------===//
+// RecordDecl Implementation
+//===----------------------------------------------------------------------===//
+
+RecordDecl::RecordDecl(Kind DK, TagKind TK, DeclContext *DC,
+ SourceLocation StartLoc, SourceLocation IdLoc,
+ IdentifierInfo *Id, RecordDecl *PrevDecl)
+ : TagDecl(DK, TK, DC, IdLoc, Id, PrevDecl, StartLoc) {
+ HasFlexibleArrayMember = false;
+ AnonymousStructOrUnion = false;
+ HasObjectMember = false;
+ LoadedFieldsFromExternalStorage = false;
+ assert(classof(static_cast<Decl*>(this)) && "Invalid Kind!");
+}
+
+RecordDecl *RecordDecl::Create(const ASTContext &C, TagKind TK, DeclContext *DC,
+ SourceLocation StartLoc, SourceLocation IdLoc,
+ IdentifierInfo *Id, RecordDecl* PrevDecl) {
+ RecordDecl* R = new (C) RecordDecl(Record, TK, DC, StartLoc, IdLoc, Id,
+ PrevDecl);
+ C.getTypeDeclType(R, PrevDecl);
+ return R;
+}
+
+RecordDecl *RecordDecl::CreateDeserialized(const ASTContext &C, unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID, sizeof(RecordDecl));
+ return new (Mem) RecordDecl(Record, TTK_Struct, 0, SourceLocation(),
+ SourceLocation(), 0, 0);
+}
+
+bool RecordDecl::isInjectedClassName() const {
+ return isImplicit() && getDeclName() && getDeclContext()->isRecord() &&
+ cast<RecordDecl>(getDeclContext())->getDeclName() == getDeclName();
+}
+
+RecordDecl::field_iterator RecordDecl::field_begin() const {
+ if (hasExternalLexicalStorage() && !LoadedFieldsFromExternalStorage)
+ LoadFieldsFromExternalStorage();
+
+ return field_iterator(decl_iterator(FirstDecl));
+}
+
+/// completeDefinition - Notes that the definition of this type is now
+/// complete.
+void RecordDecl::completeDefinition() {
+ assert(!isCompleteDefinition() && "Cannot redefine record!");
+ TagDecl::completeDefinition();
+}
+
+void RecordDecl::LoadFieldsFromExternalStorage() const {
+ ExternalASTSource *Source = getASTContext().getExternalSource();
+ assert(hasExternalLexicalStorage() && Source && "No external storage?");
+
+ // Notify that we have a RecordDecl doing some initialization.
+ ExternalASTSource::Deserializing TheFields(Source);
+
+ SmallVector<Decl*, 64> Decls;
+ LoadedFieldsFromExternalStorage = true;
+ switch (Source->FindExternalLexicalDeclsBy<FieldDecl>(this, Decls)) {
+ case ELR_Success:
+ break;
+
+ case ELR_AlreadyLoaded:
+ case ELR_Failure:
+ return;
+ }
+
+#ifndef NDEBUG
+ // Check that all decls we got were FieldDecls.
+ for (unsigned i=0, e=Decls.size(); i != e; ++i)
+ assert(isa<FieldDecl>(Decls[i]));
+#endif
+
+ if (Decls.empty())
+ return;
+
+ llvm::tie(FirstDecl, LastDecl) = BuildDeclChain(Decls,
+ /*FieldsAlreadyLoaded=*/false);
+}
+
+//===----------------------------------------------------------------------===//
+// BlockDecl Implementation
+//===----------------------------------------------------------------------===//
+
+void BlockDecl::setParams(llvm::ArrayRef<ParmVarDecl *> NewParamInfo) {
+ assert(ParamInfo == 0 && "Already has param info!");
+
+ // Zero params -> null pointer.
+ if (!NewParamInfo.empty()) {
+ NumParams = NewParamInfo.size();
+ ParamInfo = new (getASTContext()) ParmVarDecl*[NewParamInfo.size()];
+ std::copy(NewParamInfo.begin(), NewParamInfo.end(), ParamInfo);
+ }
+}
+
+void BlockDecl::setCaptures(ASTContext &Context,
+ const Capture *begin,
+ const Capture *end,
+ bool capturesCXXThis) {
+ CapturesCXXThis = capturesCXXThis;
+
+ if (begin == end) {
+ NumCaptures = 0;
+ Captures = 0;
+ return;
+ }
+
+ NumCaptures = end - begin;
+
+ // Avoid new Capture[] because we don't want to provide a default
+ // constructor.
+ size_t allocationSize = NumCaptures * sizeof(Capture);
+ void *buffer = Context.Allocate(allocationSize, /*alignment*/sizeof(void*));
+ memcpy(buffer, begin, allocationSize);
+ Captures = static_cast<Capture*>(buffer);
+}
+
+bool BlockDecl::capturesVariable(const VarDecl *variable) const {
+ for (capture_const_iterator
+ i = capture_begin(), e = capture_end(); i != e; ++i)
+ // Only auto vars can be captured, so no redeclaration worries.
+ if (i->getVariable() == variable)
+ return true;
+
+ return false;
+}
+
+SourceRange BlockDecl::getSourceRange() const {
+ return SourceRange(getLocation(), Body? Body->getLocEnd() : getLocation());
+}
+
+//===----------------------------------------------------------------------===//
+// Other Decl Allocation/Deallocation Method Implementations
+//===----------------------------------------------------------------------===//
+
+void TranslationUnitDecl::anchor() { }
+
+TranslationUnitDecl *TranslationUnitDecl::Create(ASTContext &C) {
+ return new (C) TranslationUnitDecl(C);
+}
+
+void LabelDecl::anchor() { }
+
+LabelDecl *LabelDecl::Create(ASTContext &C, DeclContext *DC,
+ SourceLocation IdentL, IdentifierInfo *II) {
+ return new (C) LabelDecl(DC, IdentL, II, 0, IdentL);
+}
+
+LabelDecl *LabelDecl::Create(ASTContext &C, DeclContext *DC,
+ SourceLocation IdentL, IdentifierInfo *II,
+ SourceLocation GnuLabelL) {
+ assert(GnuLabelL != IdentL && "Use this only for GNU local labels");
+ return new (C) LabelDecl(DC, IdentL, II, 0, GnuLabelL);
+}
+
+LabelDecl *LabelDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID, sizeof(LabelDecl));
+ return new (Mem) LabelDecl(0, SourceLocation(), 0, 0, SourceLocation());
+}
+
+void ValueDecl::anchor() { }
+
+void ImplicitParamDecl::anchor() { }
+
+ImplicitParamDecl *ImplicitParamDecl::Create(ASTContext &C, DeclContext *DC,
+ SourceLocation IdLoc,
+ IdentifierInfo *Id,
+ QualType Type) {
+ return new (C) ImplicitParamDecl(DC, IdLoc, Id, Type);
+}
+
+ImplicitParamDecl *ImplicitParamDecl::CreateDeserialized(ASTContext &C,
+ unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID, sizeof(ImplicitParamDecl));
+ return new (Mem) ImplicitParamDecl(0, SourceLocation(), 0, QualType());
+}
+
+FunctionDecl *FunctionDecl::Create(ASTContext &C, DeclContext *DC,
+ SourceLocation StartLoc,
+ const DeclarationNameInfo &NameInfo,
+ QualType T, TypeSourceInfo *TInfo,
+ StorageClass SC, StorageClass SCAsWritten,
+ bool isInlineSpecified,
+ bool hasWrittenPrototype,
+ bool isConstexprSpecified) {
+ FunctionDecl *New = new (C) FunctionDecl(Function, DC, StartLoc, NameInfo,
+ T, TInfo, SC, SCAsWritten,
+ isInlineSpecified,
+ isConstexprSpecified);
+ New->HasWrittenPrototype = hasWrittenPrototype;
+ return New;
+}
+
+FunctionDecl *FunctionDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID, sizeof(FunctionDecl));
+ return new (Mem) FunctionDecl(Function, 0, SourceLocation(),
+ DeclarationNameInfo(), QualType(), 0,
+ SC_None, SC_None, false, false);
+}
+
+BlockDecl *BlockDecl::Create(ASTContext &C, DeclContext *DC, SourceLocation L) {
+ return new (C) BlockDecl(DC, L);
+}
+
+BlockDecl *BlockDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID, sizeof(BlockDecl));
+ return new (Mem) BlockDecl(0, SourceLocation());
+}
+
+EnumConstantDecl *EnumConstantDecl::Create(ASTContext &C, EnumDecl *CD,
+ SourceLocation L,
+ IdentifierInfo *Id, QualType T,
+ Expr *E, const llvm::APSInt &V) {
+ return new (C) EnumConstantDecl(CD, L, Id, T, E, V);
+}
+
+EnumConstantDecl *
+EnumConstantDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID, sizeof(EnumConstantDecl));
+ return new (Mem) EnumConstantDecl(0, SourceLocation(), 0, QualType(), 0,
+ llvm::APSInt());
+}
+
+void IndirectFieldDecl::anchor() { }
+
+IndirectFieldDecl *
+IndirectFieldDecl::Create(ASTContext &C, DeclContext *DC, SourceLocation L,
+ IdentifierInfo *Id, QualType T, NamedDecl **CH,
+ unsigned CHS) {
+ return new (C) IndirectFieldDecl(DC, L, Id, T, CH, CHS);
+}
+
+IndirectFieldDecl *IndirectFieldDecl::CreateDeserialized(ASTContext &C,
+ unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID, sizeof(IndirectFieldDecl));
+ return new (Mem) IndirectFieldDecl(0, SourceLocation(), DeclarationName(),
+ QualType(), 0, 0);
+}
+
+SourceRange EnumConstantDecl::getSourceRange() const {
+ SourceLocation End = getLocation();
+ if (Init)
+ End = Init->getLocEnd();
+ return SourceRange(getLocation(), End);
+}
+
+void TypeDecl::anchor() { }
+
+TypedefDecl *TypedefDecl::Create(ASTContext &C, DeclContext *DC,
+ SourceLocation StartLoc, SourceLocation IdLoc,
+ IdentifierInfo *Id, TypeSourceInfo *TInfo) {
+ return new (C) TypedefDecl(DC, StartLoc, IdLoc, Id, TInfo);
+}
+
+void TypedefNameDecl::anchor() { }
+
+TypedefDecl *TypedefDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID, sizeof(TypedefDecl));
+ return new (Mem) TypedefDecl(0, SourceLocation(), SourceLocation(), 0, 0);
+}
+
+TypeAliasDecl *TypeAliasDecl::Create(ASTContext &C, DeclContext *DC,
+ SourceLocation StartLoc,
+ SourceLocation IdLoc, IdentifierInfo *Id,
+ TypeSourceInfo *TInfo) {
+ return new (C) TypeAliasDecl(DC, StartLoc, IdLoc, Id, TInfo);
+}
+
+TypeAliasDecl *TypeAliasDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID, sizeof(TypeAliasDecl));
+ return new (Mem) TypeAliasDecl(0, SourceLocation(), SourceLocation(), 0, 0);
+}
+
+SourceRange TypedefDecl::getSourceRange() const {
+ SourceLocation RangeEnd = getLocation();
+ if (TypeSourceInfo *TInfo = getTypeSourceInfo()) {
+ if (typeIsPostfix(TInfo->getType()))
+ RangeEnd = TInfo->getTypeLoc().getSourceRange().getEnd();
+ }
+ return SourceRange(getLocStart(), RangeEnd);
+}
+
+SourceRange TypeAliasDecl::getSourceRange() const {
+ SourceLocation RangeEnd = getLocStart();
+ if (TypeSourceInfo *TInfo = getTypeSourceInfo())
+ RangeEnd = TInfo->getTypeLoc().getSourceRange().getEnd();
+ return SourceRange(getLocStart(), RangeEnd);
+}
+
+void FileScopeAsmDecl::anchor() { }
+
+FileScopeAsmDecl *FileScopeAsmDecl::Create(ASTContext &C, DeclContext *DC,
+ StringLiteral *Str,
+ SourceLocation AsmLoc,
+ SourceLocation RParenLoc) {
+ return new (C) FileScopeAsmDecl(DC, Str, AsmLoc, RParenLoc);
+}
+
+FileScopeAsmDecl *FileScopeAsmDecl::CreateDeserialized(ASTContext &C,
+ unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID, sizeof(FileScopeAsmDecl));
+ return new (Mem) FileScopeAsmDecl(0, 0, SourceLocation(), SourceLocation());
+}
+
+//===----------------------------------------------------------------------===//
+// ImportDecl Implementation
+//===----------------------------------------------------------------------===//
+
+/// \brief Retrieve the number of module identifiers needed to name the given
+/// module.
+static unsigned getNumModuleIdentifiers(Module *Mod) {
+ unsigned Result = 1;
+ while (Mod->Parent) {
+ Mod = Mod->Parent;
+ ++Result;
+ }
+ return Result;
+}
+
+ImportDecl::ImportDecl(DeclContext *DC, SourceLocation StartLoc,
+ Module *Imported,
+ ArrayRef<SourceLocation> IdentifierLocs)
+ : Decl(Import, DC, StartLoc), ImportedAndComplete(Imported, true),
+ NextLocalImport()
+{
+ assert(getNumModuleIdentifiers(Imported) == IdentifierLocs.size());
+ SourceLocation *StoredLocs = reinterpret_cast<SourceLocation *>(this + 1);
+ memcpy(StoredLocs, IdentifierLocs.data(),
+ IdentifierLocs.size() * sizeof(SourceLocation));
+}
+
+ImportDecl::ImportDecl(DeclContext *DC, SourceLocation StartLoc,
+ Module *Imported, SourceLocation EndLoc)
+ : Decl(Import, DC, StartLoc), ImportedAndComplete(Imported, false),
+ NextLocalImport()
+{
+ *reinterpret_cast<SourceLocation *>(this + 1) = EndLoc;
+}
+
+ImportDecl *ImportDecl::Create(ASTContext &C, DeclContext *DC,
+ SourceLocation StartLoc, Module *Imported,
+ ArrayRef<SourceLocation> IdentifierLocs) {
+ void *Mem = C.Allocate(sizeof(ImportDecl) +
+ IdentifierLocs.size() * sizeof(SourceLocation));
+ return new (Mem) ImportDecl(DC, StartLoc, Imported, IdentifierLocs);
+}
+
+ImportDecl *ImportDecl::CreateImplicit(ASTContext &C, DeclContext *DC,
+ SourceLocation StartLoc,
+ Module *Imported,
+ SourceLocation EndLoc) {
+ void *Mem = C.Allocate(sizeof(ImportDecl) + sizeof(SourceLocation));
+ ImportDecl *Import = new (Mem) ImportDecl(DC, StartLoc, Imported, EndLoc);
+ Import->setImplicit();
+ return Import;
+}
+
+ImportDecl *ImportDecl::CreateDeserialized(ASTContext &C, unsigned ID,
+ unsigned NumLocations) {
+ void *Mem = AllocateDeserializedDecl(C, ID,
+ (sizeof(ImportDecl) +
+ NumLocations * sizeof(SourceLocation)));
+ return new (Mem) ImportDecl(EmptyShell());
+}
+
+ArrayRef<SourceLocation> ImportDecl::getIdentifierLocs() const {
+ if (!ImportedAndComplete.getInt())
+ return ArrayRef<SourceLocation>();
+
+ const SourceLocation *StoredLocs
+ = reinterpret_cast<const SourceLocation *>(this + 1);
+ return ArrayRef<SourceLocation>(StoredLocs,
+ getNumModuleIdentifiers(getImportedModule()));
+}
+
+SourceRange ImportDecl::getSourceRange() const {
+ if (!ImportedAndComplete.getInt())
+ return SourceRange(getLocation(),
+ *reinterpret_cast<const SourceLocation *>(this + 1));
+
+ return SourceRange(getLocation(), getIdentifierLocs().back());
+}
diff --git a/clang/lib/AST/DeclBase.cpp b/clang/lib/AST/DeclBase.cpp
new file mode 100644
index 0000000..47a0d25
--- /dev/null
+++ b/clang/lib/AST/DeclBase.cpp
@@ -0,0 +1,1441 @@
+//===--- DeclBase.cpp - Declaration AST Node Implementation ---------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the Decl and DeclContext classes.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/DeclBase.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclContextInternals.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclFriend.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/DependentDiagnostic.h"
+#include "clang/AST/ExternalASTSource.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Type.h"
+#include "clang/AST/Stmt.h"
+#include "clang/AST/StmtCXX.h"
+#include "clang/AST/ASTMutationListener.h"
+#include "clang/Basic/TargetInfo.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/Support/raw_ostream.h"
+#include <algorithm>
+using namespace clang;
+
+//===----------------------------------------------------------------------===//
+// Statistics
+//===----------------------------------------------------------------------===//
+
+#define DECL(DERIVED, BASE) static int n##DERIVED##s = 0;
+#define ABSTRACT_DECL(DECL)
+#include "clang/AST/DeclNodes.inc"
+
+void *Decl::AllocateDeserializedDecl(const ASTContext &Context,
+ unsigned ID,
+ unsigned Size) {
+ // Allocate an extra 8 bytes worth of storage, which ensures that the
+ // resulting pointer will still be 8-byte aligned.
+ void *Start = Context.Allocate(Size + 8);
+ void *Result = (char*)Start + 8;
+
+ unsigned *PrefixPtr = (unsigned *)Result - 2;
+
+ // Zero out the first 4 bytes; this is used to store the owning module ID.
+ PrefixPtr[0] = 0;
+
+ // Store the global declaration ID in the second 4 bytes.
+ PrefixPtr[1] = ID;
+
+ return Result;
+}
+
+const char *Decl::getDeclKindName() const {
+ switch (DeclKind) {
+ default: llvm_unreachable("Declaration not in DeclNodes.inc!");
+#define DECL(DERIVED, BASE) case DERIVED: return #DERIVED;
+#define ABSTRACT_DECL(DECL)
+#include "clang/AST/DeclNodes.inc"
+ }
+}
+
+void Decl::setInvalidDecl(bool Invalid) {
+ InvalidDecl = Invalid;
+ if (Invalid && !isa<ParmVarDecl>(this)) {
+ // Defensive maneuver for ill-formed code: we're likely not to make it to
+ // a point where we set the access specifier, so default it to "public"
+ // to avoid triggering asserts elsewhere in the front end.
+ setAccess(AS_public);
+ }
+}
+
+const char *DeclContext::getDeclKindName() const {
+ switch (DeclKind) {
+ default: llvm_unreachable("Declaration context not in DeclNodes.inc!");
+#define DECL(DERIVED, BASE) case Decl::DERIVED: return #DERIVED;
+#define ABSTRACT_DECL(DECL)
+#include "clang/AST/DeclNodes.inc"
+ }
+}
+
+bool Decl::StatisticsEnabled = false;
+void Decl::EnableStatistics() {
+ StatisticsEnabled = true;
+}
+
+void Decl::PrintStats() {
+ llvm::errs() << "\n*** Decl Stats:\n";
+
+ int totalDecls = 0;
+#define DECL(DERIVED, BASE) totalDecls += n##DERIVED##s;
+#define ABSTRACT_DECL(DECL)
+#include "clang/AST/DeclNodes.inc"
+ llvm::errs() << " " << totalDecls << " decls total.\n";
+
+ int totalBytes = 0;
+#define DECL(DERIVED, BASE) \
+ if (n##DERIVED##s > 0) { \
+ totalBytes += (int)(n##DERIVED##s * sizeof(DERIVED##Decl)); \
+ llvm::errs() << " " << n##DERIVED##s << " " #DERIVED " decls, " \
+ << sizeof(DERIVED##Decl) << " each (" \
+ << n##DERIVED##s * sizeof(DERIVED##Decl) \
+ << " bytes)\n"; \
+ }
+#define ABSTRACT_DECL(DECL)
+#include "clang/AST/DeclNodes.inc"
+
+ llvm::errs() << "Total bytes = " << totalBytes << "\n";
+}
+
+void Decl::add(Kind k) {
+ switch (k) {
+#define DECL(DERIVED, BASE) case DERIVED: ++n##DERIVED##s; break;
+#define ABSTRACT_DECL(DECL)
+#include "clang/AST/DeclNodes.inc"
+ }
+}
+
+bool Decl::isTemplateParameterPack() const {
+ if (const TemplateTypeParmDecl *TTP = dyn_cast<TemplateTypeParmDecl>(this))
+ return TTP->isParameterPack();
+ if (const NonTypeTemplateParmDecl *NTTP
+ = dyn_cast<NonTypeTemplateParmDecl>(this))
+ return NTTP->isParameterPack();
+ if (const TemplateTemplateParmDecl *TTP
+ = dyn_cast<TemplateTemplateParmDecl>(this))
+ return TTP->isParameterPack();
+ return false;
+}
+
+bool Decl::isParameterPack() const {
+ if (const ParmVarDecl *Parm = dyn_cast<ParmVarDecl>(this))
+ return Parm->isParameterPack();
+
+ return isTemplateParameterPack();
+}
+
+bool Decl::isFunctionOrFunctionTemplate() const {
+ if (const UsingShadowDecl *UD = dyn_cast<UsingShadowDecl>(this))
+ return UD->getTargetDecl()->isFunctionOrFunctionTemplate();
+
+ return isa<FunctionDecl>(this) || isa<FunctionTemplateDecl>(this);
+}
+
+bool Decl::isTemplateDecl() const {
+ return isa<TemplateDecl>(this);
+}
+
+const DeclContext *Decl::getParentFunctionOrMethod() const {
+ for (const DeclContext *DC = getDeclContext();
+ DC && !DC->isTranslationUnit() && !DC->isNamespace();
+ DC = DC->getParent())
+ if (DC->isFunctionOrMethod())
+ return DC;
+
+ return 0;
+}
+
+
+//===----------------------------------------------------------------------===//
+// PrettyStackTraceDecl Implementation
+//===----------------------------------------------------------------------===//
+
+void PrettyStackTraceDecl::print(raw_ostream &OS) const {
+ SourceLocation TheLoc = Loc;
+ if (TheLoc.isInvalid() && TheDecl)
+ TheLoc = TheDecl->getLocation();
+
+ if (TheLoc.isValid()) {
+ TheLoc.print(OS, SM);
+ OS << ": ";
+ }
+
+ OS << Message;
+
+ if (const NamedDecl *DN = dyn_cast_or_null<NamedDecl>(TheDecl))
+ OS << " '" << DN->getQualifiedNameAsString() << '\'';
+ OS << '\n';
+}
+
+//===----------------------------------------------------------------------===//
+// Decl Implementation
+//===----------------------------------------------------------------------===//
+
+// Out-of-line virtual method providing a home for Decl.
+Decl::~Decl() { }
+
+void Decl::setDeclContext(DeclContext *DC) {
+ DeclCtx = DC;
+}
+
+void Decl::setLexicalDeclContext(DeclContext *DC) {
+ if (DC == getLexicalDeclContext())
+ return;
+
+ if (isInSemaDC()) {
+ setDeclContextsImpl(getDeclContext(), DC, getASTContext());
+ } else {
+ getMultipleDC()->LexicalDC = DC;
+ }
+}
+
+void Decl::setDeclContextsImpl(DeclContext *SemaDC, DeclContext *LexicalDC,
+ ASTContext &Ctx) {
+ if (SemaDC == LexicalDC) {
+ DeclCtx = SemaDC;
+ } else {
+ Decl::MultipleDC *MDC = new (Ctx) Decl::MultipleDC();
+ MDC->SemanticDC = SemaDC;
+ MDC->LexicalDC = LexicalDC;
+ DeclCtx = MDC;
+ }
+}
+
+bool Decl::isInAnonymousNamespace() const {
+ const DeclContext *DC = getDeclContext();
+ do {
+ if (const NamespaceDecl *ND = dyn_cast<NamespaceDecl>(DC))
+ if (ND->isAnonymousNamespace())
+ return true;
+ } while ((DC = DC->getParent()));
+
+ return false;
+}
+
+TranslationUnitDecl *Decl::getTranslationUnitDecl() {
+ if (TranslationUnitDecl *TUD = dyn_cast<TranslationUnitDecl>(this))
+ return TUD;
+
+ DeclContext *DC = getDeclContext();
+ assert(DC && "This decl is not contained in a translation unit!");
+
+ while (!DC->isTranslationUnit()) {
+ DC = DC->getParent();
+ assert(DC && "This decl is not contained in a translation unit!");
+ }
+
+ return cast<TranslationUnitDecl>(DC);
+}
+
+ASTContext &Decl::getASTContext() const {
+ return getTranslationUnitDecl()->getASTContext();
+}
+
+ASTMutationListener *Decl::getASTMutationListener() const {
+ return getASTContext().getASTMutationListener();
+}
+
+bool Decl::isUsed(bool CheckUsedAttr) const {
+ if (Used)
+ return true;
+
+ // Check for used attribute.
+ if (CheckUsedAttr && hasAttr<UsedAttr>())
+ return true;
+
+ // Check redeclarations for used attribute.
+ for (redecl_iterator I = redecls_begin(), E = redecls_end(); I != E; ++I) {
+ if ((CheckUsedAttr && I->hasAttr<UsedAttr>()) || I->Used)
+ return true;
+ }
+
+ return false;
+}
+
+bool Decl::isReferenced() const {
+ if (Referenced)
+ return true;
+
+ // Check redeclarations.
+ for (redecl_iterator I = redecls_begin(), E = redecls_end(); I != E; ++I)
+ if (I->Referenced)
+ return true;
+
+ return false;
+}
+
+/// \brief Determine the availability of the given declaration based on
+/// the target platform.
+///
+/// When it returns an availability result other than \c AR_Available,
+/// if the \p Message parameter is non-NULL, it will be set to a
+/// string describing why the entity is unavailable.
+///
+/// FIXME: Make these strings localizable, since they end up in
+/// diagnostics.
+static AvailabilityResult CheckAvailability(ASTContext &Context,
+ const AvailabilityAttr *A,
+ std::string *Message) {
+ StringRef TargetPlatform = Context.getTargetInfo().getPlatformName();
+ StringRef PrettyPlatformName
+ = AvailabilityAttr::getPrettyPlatformName(TargetPlatform);
+ if (PrettyPlatformName.empty())
+ PrettyPlatformName = TargetPlatform;
+
+ VersionTuple TargetMinVersion = Context.getTargetInfo().getPlatformMinVersion();
+ if (TargetMinVersion.empty())
+ return AR_Available;
+
+ // Match the platform name.
+ if (A->getPlatform()->getName() != TargetPlatform)
+ return AR_Available;
+
+ std::string HintMessage;
+ if (!A->getMessage().empty()) {
+ HintMessage = " - ";
+ HintMessage += A->getMessage();
+ }
+
+ // Make sure that this declaration has not been marked 'unavailable'.
+ if (A->getUnavailable()) {
+ if (Message) {
+ Message->clear();
+ llvm::raw_string_ostream Out(*Message);
+ Out << "not available on " << PrettyPlatformName
+ << HintMessage;
+ }
+
+ return AR_Unavailable;
+ }
+
+ // Make sure that this declaration has already been introduced.
+ if (!A->getIntroduced().empty() &&
+ TargetMinVersion < A->getIntroduced()) {
+ if (Message) {
+ Message->clear();
+ llvm::raw_string_ostream Out(*Message);
+ Out << "introduced in " << PrettyPlatformName << ' '
+ << A->getIntroduced() << HintMessage;
+ }
+
+ return AR_NotYetIntroduced;
+ }
+
+ // Make sure that this declaration hasn't been obsoleted.
+ if (!A->getObsoleted().empty() && TargetMinVersion >= A->getObsoleted()) {
+ if (Message) {
+ Message->clear();
+ llvm::raw_string_ostream Out(*Message);
+ Out << "obsoleted in " << PrettyPlatformName << ' '
+ << A->getObsoleted() << HintMessage;
+ }
+
+ return AR_Unavailable;
+ }
+
+ // Make sure that this declaration hasn't been deprecated.
+ if (!A->getDeprecated().empty() && TargetMinVersion >= A->getDeprecated()) {
+ if (Message) {
+ Message->clear();
+ llvm::raw_string_ostream Out(*Message);
+ Out << "first deprecated in " << PrettyPlatformName << ' '
+ << A->getDeprecated() << HintMessage;
+ }
+
+ return AR_Deprecated;
+ }
+
+ return AR_Available;
+}
+
+AvailabilityResult Decl::getAvailability(std::string *Message) const {
+ AvailabilityResult Result = AR_Available;
+ std::string ResultMessage;
+
+ for (attr_iterator A = attr_begin(), AEnd = attr_end(); A != AEnd; ++A) {
+ if (DeprecatedAttr *Deprecated = dyn_cast<DeprecatedAttr>(*A)) {
+ if (Result >= AR_Deprecated)
+ continue;
+
+ if (Message)
+ ResultMessage = Deprecated->getMessage();
+
+ Result = AR_Deprecated;
+ continue;
+ }
+
+ if (UnavailableAttr *Unavailable = dyn_cast<UnavailableAttr>(*A)) {
+ if (Message)
+ *Message = Unavailable->getMessage();
+ return AR_Unavailable;
+ }
+
+ if (AvailabilityAttr *Availability = dyn_cast<AvailabilityAttr>(*A)) {
+ AvailabilityResult AR = CheckAvailability(getASTContext(), Availability,
+ Message);
+
+ if (AR == AR_Unavailable)
+ return AR_Unavailable;
+
+ if (AR > Result) {
+ Result = AR;
+ if (Message)
+ ResultMessage.swap(*Message);
+ }
+ continue;
+ }
+ }
+
+ if (Message)
+ Message->swap(ResultMessage);
+ return Result;
+}
+
+bool Decl::canBeWeakImported(bool &IsDefinition) const {
+ IsDefinition = false;
+ if (const VarDecl *Var = dyn_cast<VarDecl>(this)) {
+ if (!Var->hasExternalStorage() || Var->getInit()) {
+ IsDefinition = true;
+ return false;
+ }
+ } else if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(this)) {
+ if (FD->hasBody()) {
+ IsDefinition = true;
+ return false;
+ }
+ } else if (isa<ObjCPropertyDecl>(this) || isa<ObjCMethodDecl>(this))
+ return false;
+ else if (!(getASTContext().getLangOpts().ObjCNonFragileABI &&
+ isa<ObjCInterfaceDecl>(this)))
+ return false;
+
+ return true;
+}
+
+bool Decl::isWeakImported() const {
+ bool IsDefinition;
+ if (!canBeWeakImported(IsDefinition))
+ return false;
+
+ for (attr_iterator A = attr_begin(), AEnd = attr_end(); A != AEnd; ++A) {
+ if (isa<WeakImportAttr>(*A))
+ return true;
+
+ if (AvailabilityAttr *Availability = dyn_cast<AvailabilityAttr>(*A)) {
+ if (CheckAvailability(getASTContext(), Availability, 0)
+ == AR_NotYetIntroduced)
+ return true;
+ }
+ }
+
+ return false;
+}
+
+unsigned Decl::getIdentifierNamespaceForKind(Kind DeclKind) {
+ switch (DeclKind) {
+ case Function:
+ case CXXMethod:
+ case CXXConstructor:
+ case CXXDestructor:
+ case CXXConversion:
+ case EnumConstant:
+ case Var:
+ case ImplicitParam:
+ case ParmVar:
+ case NonTypeTemplateParm:
+ case ObjCMethod:
+ case ObjCProperty:
+ return IDNS_Ordinary;
+ case Label:
+ return IDNS_Label;
+ case IndirectField:
+ return IDNS_Ordinary | IDNS_Member;
+
+ case ObjCCompatibleAlias:
+ case ObjCInterface:
+ return IDNS_Ordinary | IDNS_Type;
+
+ case Typedef:
+ case TypeAlias:
+ case TypeAliasTemplate:
+ case UnresolvedUsingTypename:
+ case TemplateTypeParm:
+ return IDNS_Ordinary | IDNS_Type;
+
+ case UsingShadow:
+ return 0; // we'll actually overwrite this later
+
+ case UnresolvedUsingValue:
+ return IDNS_Ordinary | IDNS_Using;
+
+ case Using:
+ return IDNS_Using;
+
+ case ObjCProtocol:
+ return IDNS_ObjCProtocol;
+
+ case Field:
+ case ObjCAtDefsField:
+ case ObjCIvar:
+ return IDNS_Member;
+
+ case Record:
+ case CXXRecord:
+ case Enum:
+ return IDNS_Tag | IDNS_Type;
+
+ case Namespace:
+ case NamespaceAlias:
+ return IDNS_Namespace;
+
+ case FunctionTemplate:
+ return IDNS_Ordinary;
+
+ case ClassTemplate:
+ case TemplateTemplateParm:
+ return IDNS_Ordinary | IDNS_Tag | IDNS_Type;
+
+ // Never have names.
+ case Friend:
+ case FriendTemplate:
+ case AccessSpec:
+ case LinkageSpec:
+ case FileScopeAsm:
+ case StaticAssert:
+ case ObjCPropertyImpl:
+ case Block:
+ case TranslationUnit:
+
+ case UsingDirective:
+ case ClassTemplateSpecialization:
+ case ClassTemplatePartialSpecialization:
+ case ClassScopeFunctionSpecialization:
+ case ObjCImplementation:
+ case ObjCCategory:
+ case ObjCCategoryImpl:
+ case Import:
+ // Never looked up by name.
+ return 0;
+ }
+
+ llvm_unreachable("Invalid DeclKind!");
+}
+
+void Decl::setAttrsImpl(const AttrVec &attrs, ASTContext &Ctx) {
+ assert(!HasAttrs && "Decl already contains attrs.");
+
+ AttrVec &AttrBlank = Ctx.getDeclAttrs(this);
+ assert(AttrBlank.empty() && "HasAttrs was wrong?");
+
+ AttrBlank = attrs;
+ HasAttrs = true;
+}
+
+void Decl::dropAttrs() {
+ if (!HasAttrs) return;
+
+ HasAttrs = false;
+ getASTContext().eraseDeclAttrs(this);
+}
+
+const AttrVec &Decl::getAttrs() const {
+ assert(HasAttrs && "No attrs to get!");
+ return getASTContext().getDeclAttrs(this);
+}
+
+void Decl::swapAttrs(Decl *RHS) {
+ bool HasLHSAttr = this->HasAttrs;
+ bool HasRHSAttr = RHS->HasAttrs;
+
+ // Usually, neither decl has attrs, nothing to do.
+ if (!HasLHSAttr && !HasRHSAttr) return;
+
+ // If 'this' has no attrs, swap the other way.
+ if (!HasLHSAttr)
+ return RHS->swapAttrs(this);
+
+ ASTContext &Context = getASTContext();
+
+ // Handle the case when both decls have attrs.
+ if (HasRHSAttr) {
+ std::swap(Context.getDeclAttrs(this), Context.getDeclAttrs(RHS));
+ return;
+ }
+
+ // Otherwise, LHS has an attr and RHS doesn't.
+ Context.getDeclAttrs(RHS) = Context.getDeclAttrs(this);
+ Context.eraseDeclAttrs(this);
+ this->HasAttrs = false;
+ RHS->HasAttrs = true;
+}
+
+Decl *Decl::castFromDeclContext (const DeclContext *D) {
+ Decl::Kind DK = D->getDeclKind();
+ switch(DK) {
+#define DECL(NAME, BASE)
+#define DECL_CONTEXT(NAME) \
+ case Decl::NAME: \
+ return static_cast<NAME##Decl*>(const_cast<DeclContext*>(D));
+#define DECL_CONTEXT_BASE(NAME)
+#include "clang/AST/DeclNodes.inc"
+ default:
+#define DECL(NAME, BASE)
+#define DECL_CONTEXT_BASE(NAME) \
+ if (DK >= first##NAME && DK <= last##NAME) \
+ return static_cast<NAME##Decl*>(const_cast<DeclContext*>(D));
+#include "clang/AST/DeclNodes.inc"
+ llvm_unreachable("a decl that inherits DeclContext isn't handled");
+ }
+}
+
+DeclContext *Decl::castToDeclContext(const Decl *D) {
+ Decl::Kind DK = D->getKind();
+ switch(DK) {
+#define DECL(NAME, BASE)
+#define DECL_CONTEXT(NAME) \
+ case Decl::NAME: \
+ return static_cast<NAME##Decl*>(const_cast<Decl*>(D));
+#define DECL_CONTEXT_BASE(NAME)
+#include "clang/AST/DeclNodes.inc"
+ default:
+#define DECL(NAME, BASE)
+#define DECL_CONTEXT_BASE(NAME) \
+ if (DK >= first##NAME && DK <= last##NAME) \
+ return static_cast<NAME##Decl*>(const_cast<Decl*>(D));
+#include "clang/AST/DeclNodes.inc"
+ llvm_unreachable("a decl that inherits DeclContext isn't handled");
+ }
+}
+
+SourceLocation Decl::getBodyRBrace() const {
+ // Special handling of FunctionDecl to avoid de-serializing the body from PCH.
+ // FunctionDecl stores EndRangeLoc for this purpose.
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(this)) {
+ const FunctionDecl *Definition;
+ if (FD->hasBody(Definition))
+ return Definition->getSourceRange().getEnd();
+ return SourceLocation();
+ }
+
+ if (Stmt *Body = getBody())
+ return Body->getSourceRange().getEnd();
+
+ return SourceLocation();
+}
+
+void Decl::CheckAccessDeclContext() const {
+#ifndef NDEBUG
+ // Suppress this check if any of the following hold:
+ // 1. this is the translation unit (and thus has no parent)
+ // 2. this is a template parameter (and thus doesn't belong to its context)
+ // 3. this is a non-type template parameter
+ // 4. the context is not a record
+ // 5. it's invalid
+ // 6. it's a C++0x static_assert.
+ if (isa<TranslationUnitDecl>(this) ||
+ isa<TemplateTypeParmDecl>(this) ||
+ isa<NonTypeTemplateParmDecl>(this) ||
+ !isa<CXXRecordDecl>(getDeclContext()) ||
+ isInvalidDecl() ||
+ isa<StaticAssertDecl>(this) ||
+ // FIXME: a ParmVarDecl can have ClassTemplateSpecialization
+ // as DeclContext (?).
+ isa<ParmVarDecl>(this) ||
+ // FIXME: a ClassTemplateSpecialization or CXXRecordDecl can have
+ // AS_none as access specifier.
+ isa<CXXRecordDecl>(this) ||
+ isa<ClassScopeFunctionSpecializationDecl>(this))
+ return;
+
+ assert(Access != AS_none &&
+ "Access specifier is AS_none inside a record decl");
+#endif
+}
+
+DeclContext *Decl::getNonClosureContext() {
+ return getDeclContext()->getNonClosureAncestor();
+}
+
+DeclContext *DeclContext::getNonClosureAncestor() {
+ DeclContext *DC = this;
+
+ // This is basically "while (DC->isClosure()) DC = DC->getParent();"
+ // except that it's significantly more efficient to cast to a known
+ // decl type and call getDeclContext() than to call getParent().
+ while (isa<BlockDecl>(DC))
+ DC = cast<BlockDecl>(DC)->getDeclContext();
+
+ assert(!DC->isClosure());
+ return DC;
+}
+
+//===----------------------------------------------------------------------===//
+// DeclContext Implementation
+//===----------------------------------------------------------------------===//
+
+bool DeclContext::classof(const Decl *D) {
+ switch (D->getKind()) {
+#define DECL(NAME, BASE)
+#define DECL_CONTEXT(NAME) case Decl::NAME:
+#define DECL_CONTEXT_BASE(NAME)
+#include "clang/AST/DeclNodes.inc"
+ return true;
+ default:
+#define DECL(NAME, BASE)
+#define DECL_CONTEXT_BASE(NAME) \
+ if (D->getKind() >= Decl::first##NAME && \
+ D->getKind() <= Decl::last##NAME) \
+ return true;
+#include "clang/AST/DeclNodes.inc"
+ return false;
+ }
+}
+
+DeclContext::~DeclContext() { }
+
+/// \brief Find the parent context of this context that will be
+/// used for unqualified name lookup.
+///
+/// Generally, the parent lookup context is the semantic context. However, for
+/// a friend function the parent lookup context is the lexical context, which
+/// is the class in which the friend is declared.
+DeclContext *DeclContext::getLookupParent() {
+ // FIXME: Find a better way to identify friends
+ if (isa<FunctionDecl>(this))
+ if (getParent()->getRedeclContext()->isFileContext() &&
+ getLexicalParent()->getRedeclContext()->isRecord())
+ return getLexicalParent();
+
+ return getParent();
+}
+
+bool DeclContext::isInlineNamespace() const {
+ return isNamespace() &&
+ cast<NamespaceDecl>(this)->isInline();
+}
+
+bool DeclContext::isDependentContext() const {
+ if (isFileContext())
+ return false;
+
+ if (isa<ClassTemplatePartialSpecializationDecl>(this))
+ return true;
+
+ if (const CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(this)) {
+ if (Record->getDescribedClassTemplate())
+ return true;
+
+ if (Record->isDependentLambda())
+ return true;
+ }
+
+ if (const FunctionDecl *Function = dyn_cast<FunctionDecl>(this)) {
+ if (Function->getDescribedFunctionTemplate())
+ return true;
+
+ // Friend function declarations are dependent if their *lexical*
+ // context is dependent.
+ if (cast<Decl>(this)->getFriendObjectKind())
+ return getLexicalParent()->isDependentContext();
+ }
+
+ return getParent() && getParent()->isDependentContext();
+}
+
+bool DeclContext::isTransparentContext() const {
+ if (DeclKind == Decl::Enum)
+ return !cast<EnumDecl>(this)->isScoped();
+ else if (DeclKind == Decl::LinkageSpec)
+ return true;
+
+ return false;
+}
+
+bool DeclContext::isExternCContext() const {
+ const DeclContext *DC = this;
+ while (DC->DeclKind != Decl::TranslationUnit) {
+ if (DC->DeclKind == Decl::LinkageSpec)
+ return cast<LinkageSpecDecl>(DC)->getLanguage()
+ == LinkageSpecDecl::lang_c;
+ DC = DC->getParent();
+ }
+ return false;
+}
+
+bool DeclContext::Encloses(const DeclContext *DC) const {
+ if (getPrimaryContext() != this)
+ return getPrimaryContext()->Encloses(DC);
+
+ for (; DC; DC = DC->getParent())
+ if (DC->getPrimaryContext() == this)
+ return true;
+ return false;
+}
+
+DeclContext *DeclContext::getPrimaryContext() {
+ switch (DeclKind) {
+ case Decl::TranslationUnit:
+ case Decl::LinkageSpec:
+ case Decl::Block:
+ // There is only one DeclContext for these entities.
+ return this;
+
+ case Decl::Namespace:
+ // The original namespace is our primary context.
+ return static_cast<NamespaceDecl*>(this)->getOriginalNamespace();
+
+ case Decl::ObjCMethod:
+ return this;
+
+ case Decl::ObjCInterface:
+ if (ObjCInterfaceDecl *Def = cast<ObjCInterfaceDecl>(this)->getDefinition())
+ return Def;
+
+ return this;
+
+ case Decl::ObjCProtocol:
+ if (ObjCProtocolDecl *Def = cast<ObjCProtocolDecl>(this)->getDefinition())
+ return Def;
+
+ return this;
+
+ case Decl::ObjCCategory:
+ return this;
+
+ case Decl::ObjCImplementation:
+ case Decl::ObjCCategoryImpl:
+ return this;
+
+ default:
+ if (DeclKind >= Decl::firstTag && DeclKind <= Decl::lastTag) {
+ // If this is a tag type that has a definition or is currently
+ // being defined, that definition is our primary context.
+ TagDecl *Tag = cast<TagDecl>(this);
+ assert(isa<TagType>(Tag->TypeForDecl) ||
+ isa<InjectedClassNameType>(Tag->TypeForDecl));
+
+ if (TagDecl *Def = Tag->getDefinition())
+ return Def;
+
+ if (!isa<InjectedClassNameType>(Tag->TypeForDecl)) {
+ const TagType *TagTy = cast<TagType>(Tag->TypeForDecl);
+ if (TagTy->isBeingDefined())
+ // FIXME: is it necessarily being defined in the decl
+ // that owns the type?
+ return TagTy->getDecl();
+ }
+
+ return Tag;
+ }
+
+ assert(DeclKind >= Decl::firstFunction && DeclKind <= Decl::lastFunction &&
+ "Unknown DeclContext kind");
+ return this;
+ }
+}
+
+void
+DeclContext::collectAllContexts(llvm::SmallVectorImpl<DeclContext *> &Contexts){
+ Contexts.clear();
+
+ if (DeclKind != Decl::Namespace) {
+ Contexts.push_back(this);
+ return;
+ }
+
+ NamespaceDecl *Self = static_cast<NamespaceDecl *>(this);
+ for (NamespaceDecl *N = Self->getMostRecentDecl(); N;
+ N = N->getPreviousDecl())
+ Contexts.push_back(N);
+
+ std::reverse(Contexts.begin(), Contexts.end());
+}
+
+std::pair<Decl *, Decl *>
+DeclContext::BuildDeclChain(ArrayRef<Decl*> Decls,
+ bool FieldsAlreadyLoaded) {
+ // Build up a chain of declarations via the Decl::NextInContextAndBits field.
+ Decl *FirstNewDecl = 0;
+ Decl *PrevDecl = 0;
+ for (unsigned I = 0, N = Decls.size(); I != N; ++I) {
+ if (FieldsAlreadyLoaded && isa<FieldDecl>(Decls[I]))
+ continue;
+
+ Decl *D = Decls[I];
+ if (PrevDecl)
+ PrevDecl->NextInContextAndBits.setPointer(D);
+ else
+ FirstNewDecl = D;
+
+ PrevDecl = D;
+ }
+
+ return std::make_pair(FirstNewDecl, PrevDecl);
+}
+
+/// \brief Load the declarations within this lexical storage from an
+/// external source.
+void
+DeclContext::LoadLexicalDeclsFromExternalStorage() const {
+ ExternalASTSource *Source = getParentASTContext().getExternalSource();
+ assert(hasExternalLexicalStorage() && Source && "No external storage?");
+
+ // Notify that we have a DeclContext that is initializing.
+ ExternalASTSource::Deserializing ADeclContext(Source);
+
+ // Load the external declarations, if any.
+ SmallVector<Decl*, 64> Decls;
+ ExternalLexicalStorage = false;
+ switch (Source->FindExternalLexicalDecls(this, Decls)) {
+ case ELR_Success:
+ break;
+
+ case ELR_Failure:
+ case ELR_AlreadyLoaded:
+ return;
+ }
+
+ if (Decls.empty())
+ return;
+
+ // We may have already loaded just the fields of this record, in which case
+ // we need to ignore them.
+ bool FieldsAlreadyLoaded = false;
+ if (const RecordDecl *RD = dyn_cast<RecordDecl>(this))
+ FieldsAlreadyLoaded = RD->LoadedFieldsFromExternalStorage;
+
+ // Splice the newly-read declarations into the beginning of the list
+ // of declarations.
+ Decl *ExternalFirst, *ExternalLast;
+ llvm::tie(ExternalFirst, ExternalLast) = BuildDeclChain(Decls,
+ FieldsAlreadyLoaded);
+ ExternalLast->NextInContextAndBits.setPointer(FirstDecl);
+ FirstDecl = ExternalFirst;
+ if (!LastDecl)
+ LastDecl = ExternalLast;
+}
+
+DeclContext::lookup_result
+ExternalASTSource::SetNoExternalVisibleDeclsForName(const DeclContext *DC,
+ DeclarationName Name) {
+ ASTContext &Context = DC->getParentASTContext();
+ StoredDeclsMap *Map;
+ if (!(Map = DC->LookupPtr.getPointer()))
+ Map = DC->CreateStoredDeclsMap(Context);
+
+ StoredDeclsList &List = (*Map)[Name];
+ assert(List.isNull());
+ (void) List;
+
+ return DeclContext::lookup_result();
+}
+
+DeclContext::lookup_result
+ExternalASTSource::SetExternalVisibleDeclsForName(const DeclContext *DC,
+ DeclarationName Name,
+ ArrayRef<NamedDecl*> Decls) {
+ ASTContext &Context = DC->getParentASTContext();;
+
+ StoredDeclsMap *Map;
+ if (!(Map = DC->LookupPtr.getPointer()))
+ Map = DC->CreateStoredDeclsMap(Context);
+
+ StoredDeclsList &List = (*Map)[Name];
+ for (ArrayRef<NamedDecl*>::iterator
+ I = Decls.begin(), E = Decls.end(); I != E; ++I) {
+ if (List.isNull())
+ List.setOnlyValue(*I);
+ else
+ List.AddSubsequentDecl(*I);
+ }
+
+ return List.getLookupResult();
+}
+
+DeclContext::decl_iterator DeclContext::noload_decls_begin() const {
+ return decl_iterator(FirstDecl);
+}
+
+DeclContext::decl_iterator DeclContext::noload_decls_end() const {
+ return decl_iterator();
+}
+
+DeclContext::decl_iterator DeclContext::decls_begin() const {
+ if (hasExternalLexicalStorage())
+ LoadLexicalDeclsFromExternalStorage();
+
+ return decl_iterator(FirstDecl);
+}
+
+DeclContext::decl_iterator DeclContext::decls_end() const {
+ if (hasExternalLexicalStorage())
+ LoadLexicalDeclsFromExternalStorage();
+
+ return decl_iterator();
+}
+
+bool DeclContext::decls_empty() const {
+ if (hasExternalLexicalStorage())
+ LoadLexicalDeclsFromExternalStorage();
+
+ return !FirstDecl;
+}
+
+void DeclContext::removeDecl(Decl *D) {
+ assert(D->getLexicalDeclContext() == this &&
+ "decl being removed from non-lexical context");
+ assert((D->NextInContextAndBits.getPointer() || D == LastDecl) &&
+ "decl is not in decls list");
+
+ // Remove D from the decl chain. This is O(n) but hopefully rare.
+ if (D == FirstDecl) {
+ if (D == LastDecl)
+ FirstDecl = LastDecl = 0;
+ else
+ FirstDecl = D->NextInContextAndBits.getPointer();
+ } else {
+ for (Decl *I = FirstDecl; true; I = I->NextInContextAndBits.getPointer()) {
+ assert(I && "decl not found in linked list");
+ if (I->NextInContextAndBits.getPointer() == D) {
+ I->NextInContextAndBits.setPointer(D->NextInContextAndBits.getPointer());
+ if (D == LastDecl) LastDecl = I;
+ break;
+ }
+ }
+ }
+
+ // Mark that D is no longer in the decl chain.
+ D->NextInContextAndBits.setPointer(0);
+
+ // Remove D from the lookup table if necessary.
+ if (isa<NamedDecl>(D)) {
+ NamedDecl *ND = cast<NamedDecl>(D);
+
+ // Remove only decls that have a name
+ if (!ND->getDeclName()) return;
+
+ StoredDeclsMap *Map = getPrimaryContext()->LookupPtr.getPointer();
+ if (!Map) return;
+
+ StoredDeclsMap::iterator Pos = Map->find(ND->getDeclName());
+ assert(Pos != Map->end() && "no lookup entry for decl");
+ if (Pos->second.getAsVector() || Pos->second.getAsDecl() == ND)
+ Pos->second.remove(ND);
+ }
+}
+
+void DeclContext::addHiddenDecl(Decl *D) {
+ assert(D->getLexicalDeclContext() == this &&
+ "Decl inserted into wrong lexical context");
+ assert(!D->getNextDeclInContext() && D != LastDecl &&
+ "Decl already inserted into a DeclContext");
+
+ if (FirstDecl) {
+ LastDecl->NextInContextAndBits.setPointer(D);
+ LastDecl = D;
+ } else {
+ FirstDecl = LastDecl = D;
+ }
+
+ // Notify a C++ record declaration that we've added a member, so it can
+ // update it's class-specific state.
+ if (CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(this))
+ Record->addedMember(D);
+
+ // If this is a newly-created (not de-serialized) import declaration, wire
+ // it in to the list of local import declarations.
+ if (!D->isFromASTFile()) {
+ if (ImportDecl *Import = dyn_cast<ImportDecl>(D))
+ D->getASTContext().addedLocalImportDecl(Import);
+ }
+}
+
+void DeclContext::addDecl(Decl *D) {
+ addHiddenDecl(D);
+
+ if (NamedDecl *ND = dyn_cast<NamedDecl>(D))
+ ND->getDeclContext()->getPrimaryContext()->
+ makeDeclVisibleInContextWithFlags(ND, false, true);
+}
+
+void DeclContext::addDeclInternal(Decl *D) {
+ addHiddenDecl(D);
+
+ if (NamedDecl *ND = dyn_cast<NamedDecl>(D))
+ ND->getDeclContext()->getPrimaryContext()->
+ makeDeclVisibleInContextWithFlags(ND, true, true);
+}
+
+/// shouldBeHidden - Determine whether a declaration which was declared
+/// within its semantic context should be invisible to qualified name lookup.
+static bool shouldBeHidden(NamedDecl *D) {
+ // Skip unnamed declarations.
+ if (!D->getDeclName())
+ return true;
+
+ // Skip entities that can't be found by name lookup into a particular
+ // context.
+ if ((D->getIdentifierNamespace() == 0 && !isa<UsingDirectiveDecl>(D)) ||
+ D->isTemplateParameter())
+ return true;
+
+ // Skip template specializations.
+ // FIXME: This feels like a hack. Should DeclarationName support
+ // template-ids, or is there a better way to keep specializations
+ // from being visible?
+ if (isa<ClassTemplateSpecializationDecl>(D))
+ return true;
+ if (FunctionDecl *FD = dyn_cast<FunctionDecl>(D))
+ if (FD->isFunctionTemplateSpecialization())
+ return true;
+
+ return false;
+}
+
+/// buildLookup - Build the lookup data structure with all of the
+/// declarations in this DeclContext (and any other contexts linked
+/// to it or transparent contexts nested within it) and return it.
+StoredDeclsMap *DeclContext::buildLookup() {
+ assert(this == getPrimaryContext() && "buildLookup called on non-primary DC");
+
+ if (!LookupPtr.getInt())
+ return LookupPtr.getPointer();
+
+ llvm::SmallVector<DeclContext *, 2> Contexts;
+ collectAllContexts(Contexts);
+ for (unsigned I = 0, N = Contexts.size(); I != N; ++I)
+ buildLookupImpl(Contexts[I]);
+
+ // We no longer have any lazy decls.
+ LookupPtr.setInt(false);
+ return LookupPtr.getPointer();
+}
+
+/// buildLookupImpl - Build part of the lookup data structure for the
+/// declarations contained within DCtx, which will either be this
+/// DeclContext, a DeclContext linked to it, or a transparent context
+/// nested within it.
+void DeclContext::buildLookupImpl(DeclContext *DCtx) {
+ for (decl_iterator I = DCtx->decls_begin(), E = DCtx->decls_end();
+ I != E; ++I) {
+ Decl *D = *I;
+
+ // Insert this declaration into the lookup structure, but only if
+ // it's semantically within its decl context. Any other decls which
+ // should be found in this context are added eagerly.
+ if (NamedDecl *ND = dyn_cast<NamedDecl>(D))
+ if (ND->getDeclContext() == DCtx && !shouldBeHidden(ND))
+ makeDeclVisibleInContextImpl(ND, false);
+
+ // If this declaration is itself a transparent declaration context
+ // or inline namespace, add the members of this declaration of that
+ // context (recursively).
+ if (DeclContext *InnerCtx = dyn_cast<DeclContext>(D))
+ if (InnerCtx->isTransparentContext() || InnerCtx->isInlineNamespace())
+ buildLookupImpl(InnerCtx);
+ }
+}
+
+DeclContext::lookup_result
+DeclContext::lookup(DeclarationName Name) {
+ assert(DeclKind != Decl::LinkageSpec &&
+ "Should not perform lookups into linkage specs!");
+
+ DeclContext *PrimaryContext = getPrimaryContext();
+ if (PrimaryContext != this)
+ return PrimaryContext->lookup(Name);
+
+ if (hasExternalVisibleStorage()) {
+ // If a PCH has a result for this name, and we have a local declaration, we
+ // will have imported the PCH result when adding the local declaration.
+ // FIXME: For modules, we could have had more declarations added by module
+ // imoprts since we saw the declaration of the local name.
+ if (StoredDeclsMap *Map = LookupPtr.getPointer()) {
+ StoredDeclsMap::iterator I = Map->find(Name);
+ if (I != Map->end())
+ return I->second.getLookupResult();
+ }
+
+ ExternalASTSource *Source = getParentASTContext().getExternalSource();
+ return Source->FindExternalVisibleDeclsByName(this, Name);
+ }
+
+ StoredDeclsMap *Map = LookupPtr.getPointer();
+ if (LookupPtr.getInt())
+ Map = buildLookup();
+
+ if (!Map)
+ return lookup_result(lookup_iterator(0), lookup_iterator(0));
+
+ StoredDeclsMap::iterator I = Map->find(Name);
+ if (I == Map->end())
+ return lookup_result(lookup_iterator(0), lookup_iterator(0));
+
+ return I->second.getLookupResult();
+}
+
+DeclContext::lookup_const_result
+DeclContext::lookup(DeclarationName Name) const {
+ return const_cast<DeclContext*>(this)->lookup(Name);
+}
+
+void DeclContext::localUncachedLookup(DeclarationName Name,
+ llvm::SmallVectorImpl<NamedDecl *> &Results) {
+ Results.clear();
+
+ // If there's no external storage, just perform a normal lookup and copy
+ // the results.
+ if (!hasExternalVisibleStorage() && !hasExternalLexicalStorage()) {
+ lookup_result LookupResults = lookup(Name);
+ Results.insert(Results.end(), LookupResults.first, LookupResults.second);
+ return;
+ }
+
+ // If we have a lookup table, check there first. Maybe we'll get lucky.
+ if (StoredDeclsMap *Map = LookupPtr.getPointer()) {
+ StoredDeclsMap::iterator Pos = Map->find(Name);
+ if (Pos != Map->end()) {
+ Results.insert(Results.end(),
+ Pos->second.getLookupResult().first,
+ Pos->second.getLookupResult().second);
+ return;
+ }
+ }
+
+ // Slow case: grovel through the declarations in our chain looking for
+ // matches.
+ for (Decl *D = FirstDecl; D; D = D->getNextDeclInContext()) {
+ if (NamedDecl *ND = dyn_cast<NamedDecl>(D))
+ if (ND->getDeclName() == Name)
+ Results.push_back(ND);
+ }
+}
+
+DeclContext *DeclContext::getRedeclContext() {
+ DeclContext *Ctx = this;
+ // Skip through transparent contexts.
+ while (Ctx->isTransparentContext())
+ Ctx = Ctx->getParent();
+ return Ctx;
+}
+
+DeclContext *DeclContext::getEnclosingNamespaceContext() {
+ DeclContext *Ctx = this;
+ // Skip through non-namespace, non-translation-unit contexts.
+ while (!Ctx->isFileContext())
+ Ctx = Ctx->getParent();
+ return Ctx->getPrimaryContext();
+}
+
+bool DeclContext::InEnclosingNamespaceSetOf(const DeclContext *O) const {
+ // For non-file contexts, this is equivalent to Equals.
+ if (!isFileContext())
+ return O->Equals(this);
+
+ do {
+ if (O->Equals(this))
+ return true;
+
+ const NamespaceDecl *NS = dyn_cast<NamespaceDecl>(O);
+ if (!NS || !NS->isInline())
+ break;
+ O = NS->getParent();
+ } while (O);
+
+ return false;
+}
+
+void DeclContext::makeDeclVisibleInContext(NamedDecl *D) {
+ DeclContext *PrimaryDC = this->getPrimaryContext();
+ DeclContext *DeclDC = D->getDeclContext()->getPrimaryContext();
+ // If the decl is being added outside of its semantic decl context, we
+ // need to ensure that we eagerly build the lookup information for it.
+ PrimaryDC->makeDeclVisibleInContextWithFlags(D, false, PrimaryDC == DeclDC);
+}
+
+void DeclContext::makeDeclVisibleInContextWithFlags(NamedDecl *D, bool Internal,
+ bool Recoverable) {
+ assert(this == getPrimaryContext() && "expected a primary DC");
+
+ // Skip declarations within functions.
+ // FIXME: We shouldn't need to build lookup tables for function declarations
+ // ever, and we can't do so correctly because we can't model the nesting of
+ // scopes which occurs within functions. We use "qualified" lookup into
+ // function declarations when handling friend declarations inside nested
+ // classes, and consequently accept the following invalid code:
+ //
+ // void f() { void g(); { int g; struct S { friend void g(); }; } }
+ if (isFunctionOrMethod() && !isa<FunctionDecl>(D))
+ return;
+
+ // Skip declarations which should be invisible to name lookup.
+ if (shouldBeHidden(D))
+ return;
+
+ // If we already have a lookup data structure, perform the insertion into
+ // it. If we might have externally-stored decls with this name, look them
+ // up and perform the insertion. If this decl was declared outside its
+ // semantic context, buildLookup won't add it, so add it now.
+ //
+ // FIXME: As a performance hack, don't add such decls into the translation
+ // unit unless we're in C++, since qualified lookup into the TU is never
+ // performed.
+ if (LookupPtr.getPointer() || hasExternalVisibleStorage() ||
+ ((!Recoverable || D->getDeclContext() != D->getLexicalDeclContext()) &&
+ (getParentASTContext().getLangOpts().CPlusPlus ||
+ !isTranslationUnit()))) {
+ // If we have lazily omitted any decls, they might have the same name as
+ // the decl which we are adding, so build a full lookup table before adding
+ // this decl.
+ buildLookup();
+ makeDeclVisibleInContextImpl(D, Internal);
+ } else {
+ LookupPtr.setInt(true);
+ }
+
+ // If we are a transparent context or inline namespace, insert into our
+ // parent context, too. This operation is recursive.
+ if (isTransparentContext() || isInlineNamespace())
+ getParent()->getPrimaryContext()->
+ makeDeclVisibleInContextWithFlags(D, Internal, Recoverable);
+
+ Decl *DCAsDecl = cast<Decl>(this);
+ // Notify that a decl was made visible unless we are a Tag being defined.
+ if (!(isa<TagDecl>(DCAsDecl) && cast<TagDecl>(DCAsDecl)->isBeingDefined()))
+ if (ASTMutationListener *L = DCAsDecl->getASTMutationListener())
+ L->AddedVisibleDecl(this, D);
+}
+
+void DeclContext::makeDeclVisibleInContextImpl(NamedDecl *D, bool Internal) {
+ // Find or create the stored declaration map.
+ StoredDeclsMap *Map = LookupPtr.getPointer();
+ if (!Map) {
+ ASTContext *C = &getParentASTContext();
+ Map = CreateStoredDeclsMap(*C);
+ }
+
+ // If there is an external AST source, load any declarations it knows about
+ // with this declaration's name.
+ // If the lookup table contains an entry about this name it means that we
+ // have already checked the external source.
+ if (!Internal)
+ if (ExternalASTSource *Source = getParentASTContext().getExternalSource())
+ if (hasExternalVisibleStorage() &&
+ Map->find(D->getDeclName()) == Map->end())
+ Source->FindExternalVisibleDeclsByName(this, D->getDeclName());
+
+ // Insert this declaration into the map.
+ StoredDeclsList &DeclNameEntries = (*Map)[D->getDeclName()];
+ if (DeclNameEntries.isNull()) {
+ DeclNameEntries.setOnlyValue(D);
+ return;
+ }
+
+ if (DeclNameEntries.HandleRedeclaration(D)) {
+ // This declaration has replaced an existing one for which
+ // declarationReplaces returns true.
+ return;
+ }
+
+ // Put this declaration into the appropriate slot.
+ DeclNameEntries.AddSubsequentDecl(D);
+}
+
+/// Returns iterator range [First, Last) of UsingDirectiveDecls stored within
+/// this context.
+DeclContext::udir_iterator_range
+DeclContext::getUsingDirectives() const {
+ // FIXME: Use something more efficient than normal lookup for using
+ // directives. In C++, using directives are looked up more than anything else.
+ lookup_const_result Result = lookup(UsingDirectiveDecl::getName());
+ return udir_iterator_range(reinterpret_cast<udir_iterator>(Result.first),
+ reinterpret_cast<udir_iterator>(Result.second));
+}
+
+//===----------------------------------------------------------------------===//
+// Creation and Destruction of StoredDeclsMaps. //
+//===----------------------------------------------------------------------===//
+
+StoredDeclsMap *DeclContext::CreateStoredDeclsMap(ASTContext &C) const {
+ assert(!LookupPtr.getPointer() && "context already has a decls map");
+ assert(getPrimaryContext() == this &&
+ "creating decls map on non-primary context");
+
+ StoredDeclsMap *M;
+ bool Dependent = isDependentContext();
+ if (Dependent)
+ M = new DependentStoredDeclsMap();
+ else
+ M = new StoredDeclsMap();
+ M->Previous = C.LastSDM;
+ C.LastSDM = llvm::PointerIntPair<StoredDeclsMap*,1>(M, Dependent);
+ LookupPtr.setPointer(M);
+ return M;
+}
+
+void ASTContext::ReleaseDeclContextMaps() {
+ // It's okay to delete DependentStoredDeclsMaps via a StoredDeclsMap
+ // pointer because the subclass doesn't add anything that needs to
+ // be deleted.
+ StoredDeclsMap::DestroyAll(LastSDM.getPointer(), LastSDM.getInt());
+}
+
+void StoredDeclsMap::DestroyAll(StoredDeclsMap *Map, bool Dependent) {
+ while (Map) {
+ // Advance the iteration before we invalidate memory.
+ llvm::PointerIntPair<StoredDeclsMap*,1> Next = Map->Previous;
+
+ if (Dependent)
+ delete static_cast<DependentStoredDeclsMap*>(Map);
+ else
+ delete Map;
+
+ Map = Next.getPointer();
+ Dependent = Next.getInt();
+ }
+}
+
+DependentDiagnostic *DependentDiagnostic::Create(ASTContext &C,
+ DeclContext *Parent,
+ const PartialDiagnostic &PDiag) {
+ assert(Parent->isDependentContext()
+ && "cannot iterate dependent diagnostics of non-dependent context");
+ Parent = Parent->getPrimaryContext();
+ if (!Parent->LookupPtr.getPointer())
+ Parent->CreateStoredDeclsMap(C);
+
+ DependentStoredDeclsMap *Map
+ = static_cast<DependentStoredDeclsMap*>(Parent->LookupPtr.getPointer());
+
+ // Allocate the copy of the PartialDiagnostic via the ASTContext's
+ // BumpPtrAllocator, rather than the ASTContext itself.
+ PartialDiagnostic::Storage *DiagStorage = 0;
+ if (PDiag.hasStorage())
+ DiagStorage = new (C) PartialDiagnostic::Storage;
+
+ DependentDiagnostic *DD = new (C) DependentDiagnostic(PDiag, DiagStorage);
+
+ // TODO: Maybe we shouldn't reverse the order during insertion.
+ DD->NextDiagnostic = Map->FirstDiagnostic;
+ Map->FirstDiagnostic = DD;
+
+ return DD;
+}
diff --git a/clang/lib/AST/DeclCXX.cpp b/clang/lib/AST/DeclCXX.cpp
new file mode 100644
index 0000000..114322b
--- /dev/null
+++ b/clang/lib/AST/DeclCXX.cpp
@@ -0,0 +1,2029 @@
+//===--- DeclCXX.cpp - C++ Declaration AST Node Implementation ------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the C++ related Decl classes.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/ASTMutationListener.h"
+#include "clang/AST/CXXInheritance.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/TypeLoc.h"
+#include "clang/Basic/IdentifierTable.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallPtrSet.h"
+using namespace clang;
+
+//===----------------------------------------------------------------------===//
+// Decl Allocation/Deallocation Method Implementations
+//===----------------------------------------------------------------------===//
+
+void AccessSpecDecl::anchor() { }
+
+AccessSpecDecl *AccessSpecDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID, sizeof(AccessSpecDecl));
+ return new (Mem) AccessSpecDecl(EmptyShell());
+}
+
+CXXRecordDecl::DefinitionData::DefinitionData(CXXRecordDecl *D)
+ : UserDeclaredConstructor(false), UserDeclaredCopyConstructor(false),
+ UserDeclaredMoveConstructor(false), UserDeclaredCopyAssignment(false),
+ UserDeclaredMoveAssignment(false), UserDeclaredDestructor(false),
+ Aggregate(true), PlainOldData(true), Empty(true), Polymorphic(false),
+ Abstract(false), IsStandardLayout(true), HasNoNonEmptyBases(true),
+ HasPrivateFields(false), HasProtectedFields(false), HasPublicFields(false),
+ HasMutableFields(false), HasOnlyCMembers(true),
+ HasTrivialDefaultConstructor(true),
+ HasConstexprNonCopyMoveConstructor(false),
+ DefaultedDefaultConstructorIsConstexpr(true),
+ DefaultedCopyConstructorIsConstexpr(true),
+ DefaultedMoveConstructorIsConstexpr(true),
+ HasConstexprDefaultConstructor(false), HasConstexprCopyConstructor(false),
+ HasConstexprMoveConstructor(false), HasTrivialCopyConstructor(true),
+ HasTrivialMoveConstructor(true), HasTrivialCopyAssignment(true),
+ HasTrivialMoveAssignment(true), HasTrivialDestructor(true),
+ HasIrrelevantDestructor(true),
+ HasNonLiteralTypeFieldsOrBases(false), ComputedVisibleConversions(false),
+ UserProvidedDefaultConstructor(false), DeclaredDefaultConstructor(false),
+ DeclaredCopyConstructor(false), DeclaredMoveConstructor(false),
+ DeclaredCopyAssignment(false), DeclaredMoveAssignment(false),
+ DeclaredDestructor(false), FailedImplicitMoveConstructor(false),
+ FailedImplicitMoveAssignment(false), IsLambda(false), NumBases(0),
+ NumVBases(0), Bases(), VBases(), Definition(D), FirstFriend(0) {
+}
+
+CXXRecordDecl::CXXRecordDecl(Kind K, TagKind TK, DeclContext *DC,
+ SourceLocation StartLoc, SourceLocation IdLoc,
+ IdentifierInfo *Id, CXXRecordDecl *PrevDecl)
+ : RecordDecl(K, TK, DC, StartLoc, IdLoc, Id, PrevDecl),
+ DefinitionData(PrevDecl ? PrevDecl->DefinitionData : 0),
+ TemplateOrInstantiation() { }
+
+CXXRecordDecl *CXXRecordDecl::Create(const ASTContext &C, TagKind TK,
+ DeclContext *DC, SourceLocation StartLoc,
+ SourceLocation IdLoc, IdentifierInfo *Id,
+ CXXRecordDecl* PrevDecl,
+ bool DelayTypeCreation) {
+ CXXRecordDecl* R = new (C) CXXRecordDecl(CXXRecord, TK, DC, StartLoc, IdLoc,
+ Id, PrevDecl);
+
+ // FIXME: DelayTypeCreation seems like such a hack
+ if (!DelayTypeCreation)
+ C.getTypeDeclType(R, PrevDecl);
+ return R;
+}
+
+CXXRecordDecl *CXXRecordDecl::CreateLambda(const ASTContext &C, DeclContext *DC,
+ SourceLocation Loc, bool Dependent) {
+ CXXRecordDecl* R = new (C) CXXRecordDecl(CXXRecord, TTK_Class, DC, Loc, Loc,
+ 0, 0);
+ R->IsBeingDefined = true;
+ R->DefinitionData = new (C) struct LambdaDefinitionData(R, Dependent);
+ C.getTypeDeclType(R, /*PrevDecl=*/0);
+ return R;
+}
+
+CXXRecordDecl *
+CXXRecordDecl::CreateDeserialized(const ASTContext &C, unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID, sizeof(CXXRecordDecl));
+ return new (Mem) CXXRecordDecl(CXXRecord, TTK_Struct, 0, SourceLocation(),
+ SourceLocation(), 0, 0);
+}
+
+void
+CXXRecordDecl::setBases(CXXBaseSpecifier const * const *Bases,
+ unsigned NumBases) {
+ ASTContext &C = getASTContext();
+
+ if (!data().Bases.isOffset() && data().NumBases > 0)
+ C.Deallocate(data().getBases());
+
+ if (NumBases) {
+ // C++ [dcl.init.aggr]p1:
+ // An aggregate is [...] a class with [...] no base classes [...].
+ data().Aggregate = false;
+
+ // C++ [class]p4:
+ // A POD-struct is an aggregate class...
+ data().PlainOldData = false;
+ }
+
+ // The set of seen virtual base types.
+ llvm::SmallPtrSet<CanQualType, 8> SeenVBaseTypes;
+
+ // The virtual bases of this class.
+ SmallVector<const CXXBaseSpecifier *, 8> VBases;
+
+ data().Bases = new(C) CXXBaseSpecifier [NumBases];
+ data().NumBases = NumBases;
+ for (unsigned i = 0; i < NumBases; ++i) {
+ data().getBases()[i] = *Bases[i];
+ // Keep track of inherited vbases for this base class.
+ const CXXBaseSpecifier *Base = Bases[i];
+ QualType BaseType = Base->getType();
+ // Skip dependent types; we can't do any checking on them now.
+ if (BaseType->isDependentType())
+ continue;
+ CXXRecordDecl *BaseClassDecl
+ = cast<CXXRecordDecl>(BaseType->getAs<RecordType>()->getDecl());
+
+ // A class with a non-empty base class is not empty.
+ // FIXME: Standard ref?
+ if (!BaseClassDecl->isEmpty()) {
+ if (!data().Empty) {
+ // C++0x [class]p7:
+ // A standard-layout class is a class that:
+ // [...]
+ // -- either has no non-static data members in the most derived
+ // class and at most one base class with non-static data members,
+ // or has no base classes with non-static data members, and
+ // If this is the second non-empty base, then neither of these two
+ // clauses can be true.
+ data().IsStandardLayout = false;
+ }
+
+ data().Empty = false;
+ data().HasNoNonEmptyBases = false;
+ }
+
+ // C++ [class.virtual]p1:
+ // A class that declares or inherits a virtual function is called a
+ // polymorphic class.
+ if (BaseClassDecl->isPolymorphic())
+ data().Polymorphic = true;
+
+ // C++0x [class]p7:
+ // A standard-layout class is a class that: [...]
+ // -- has no non-standard-layout base classes
+ if (!BaseClassDecl->isStandardLayout())
+ data().IsStandardLayout = false;
+
+ // Record if this base is the first non-literal field or base.
+ if (!hasNonLiteralTypeFieldsOrBases() && !BaseType->isLiteralType())
+ data().HasNonLiteralTypeFieldsOrBases = true;
+
+ // Now go through all virtual bases of this base and add them.
+ for (CXXRecordDecl::base_class_iterator VBase =
+ BaseClassDecl->vbases_begin(),
+ E = BaseClassDecl->vbases_end(); VBase != E; ++VBase) {
+ // Add this base if it's not already in the list.
+ if (SeenVBaseTypes.insert(C.getCanonicalType(VBase->getType())))
+ VBases.push_back(VBase);
+ }
+
+ if (Base->isVirtual()) {
+ // Add this base if it's not already in the list.
+ if (SeenVBaseTypes.insert(C.getCanonicalType(BaseType)))
+ VBases.push_back(Base);
+
+ // C++0x [meta.unary.prop] is_empty:
+ // T is a class type, but not a union type, with ... no virtual base
+ // classes
+ data().Empty = false;
+
+ // C++ [class.ctor]p5:
+ // A default constructor is trivial [...] if:
+ // -- its class has [...] no virtual bases
+ data().HasTrivialDefaultConstructor = false;
+
+ // C++0x [class.copy]p13:
+ // A copy/move constructor for class X is trivial if it is neither
+ // user-provided nor deleted and if
+ // -- class X has no virtual functions and no virtual base classes, and
+ data().HasTrivialCopyConstructor = false;
+ data().HasTrivialMoveConstructor = false;
+
+ // C++0x [class.copy]p27:
+ // A copy/move assignment operator for class X is trivial if it is
+ // neither user-provided nor deleted and if
+ // -- class X has no virtual functions and no virtual base classes, and
+ data().HasTrivialCopyAssignment = false;
+ data().HasTrivialMoveAssignment = false;
+
+ // C++0x [class]p7:
+ // A standard-layout class is a class that: [...]
+ // -- has [...] no virtual base classes
+ data().IsStandardLayout = false;
+
+ // C++11 [dcl.constexpr]p4:
+ // In the definition of a constexpr constructor [...]
+ // -- the class shall not have any virtual base classes
+ data().DefaultedDefaultConstructorIsConstexpr = false;
+ data().DefaultedCopyConstructorIsConstexpr = false;
+ data().DefaultedMoveConstructorIsConstexpr = false;
+ } else {
+ // C++ [class.ctor]p5:
+ // A default constructor is trivial [...] if:
+ // -- all the direct base classes of its class have trivial default
+ // constructors.
+ if (!BaseClassDecl->hasTrivialDefaultConstructor())
+ data().HasTrivialDefaultConstructor = false;
+
+ // C++0x [class.copy]p13:
+ // A copy/move constructor for class X is trivial if [...]
+ // [...]
+ // -- the constructor selected to copy/move each direct base class
+ // subobject is trivial, and
+ // FIXME: C++0x: We need to only consider the selected constructor
+ // instead of all of them.
+ if (!BaseClassDecl->hasTrivialCopyConstructor())
+ data().HasTrivialCopyConstructor = false;
+ if (!BaseClassDecl->hasTrivialMoveConstructor())
+ data().HasTrivialMoveConstructor = false;
+
+ // C++0x [class.copy]p27:
+ // A copy/move assignment operator for class X is trivial if [...]
+ // [...]
+ // -- the assignment operator selected to copy/move each direct base
+ // class subobject is trivial, and
+ // FIXME: C++0x: We need to only consider the selected operator instead
+ // of all of them.
+ if (!BaseClassDecl->hasTrivialCopyAssignment())
+ data().HasTrivialCopyAssignment = false;
+ if (!BaseClassDecl->hasTrivialMoveAssignment())
+ data().HasTrivialMoveAssignment = false;
+
+ // C++11 [class.ctor]p6:
+ // If that user-written default constructor would satisfy the
+ // requirements of a constexpr constructor, the implicitly-defined
+ // default constructor is constexpr.
+ if (!BaseClassDecl->hasConstexprDefaultConstructor())
+ data().DefaultedDefaultConstructorIsConstexpr = false;
+
+ // C++11 [class.copy]p13:
+ // If the implicitly-defined constructor would satisfy the requirements
+ // of a constexpr constructor, the implicitly-defined constructor is
+ // constexpr.
+ // C++11 [dcl.constexpr]p4:
+ // -- every constructor involved in initializing [...] base class
+ // sub-objects shall be a constexpr constructor
+ if (!BaseClassDecl->hasConstexprCopyConstructor())
+ data().DefaultedCopyConstructorIsConstexpr = false;
+ if (BaseClassDecl->hasDeclaredMoveConstructor() ||
+ BaseClassDecl->needsImplicitMoveConstructor())
+ // FIXME: If the implicit move constructor generated for the base class
+ // would be ill-formed, the implicit move constructor generated for the
+ // derived class calls the base class' copy constructor.
+ data().DefaultedMoveConstructorIsConstexpr &=
+ BaseClassDecl->hasConstexprMoveConstructor();
+ else if (!BaseClassDecl->hasConstexprCopyConstructor())
+ data().DefaultedMoveConstructorIsConstexpr = false;
+ }
+
+ // C++ [class.ctor]p3:
+ // A destructor is trivial if all the direct base classes of its class
+ // have trivial destructors.
+ if (!BaseClassDecl->hasTrivialDestructor())
+ data().HasTrivialDestructor = false;
+
+ if (!BaseClassDecl->hasIrrelevantDestructor())
+ data().HasIrrelevantDestructor = false;
+
+ // A class has an Objective-C object member if... or any of its bases
+ // has an Objective-C object member.
+ if (BaseClassDecl->hasObjectMember())
+ setHasObjectMember(true);
+
+ // Keep track of the presence of mutable fields.
+ if (BaseClassDecl->hasMutableFields())
+ data().HasMutableFields = true;
+ }
+
+ if (VBases.empty())
+ return;
+
+ // Create base specifier for any direct or indirect virtual bases.
+ data().VBases = new (C) CXXBaseSpecifier[VBases.size()];
+ data().NumVBases = VBases.size();
+ for (int I = 0, E = VBases.size(); I != E; ++I)
+ data().getVBases()[I] = *VBases[I];
+}
+
+/// Callback function for CXXRecordDecl::forallBases that acknowledges
+/// that it saw a base class.
+static bool SawBase(const CXXRecordDecl *, void *) {
+ return true;
+}
+
+bool CXXRecordDecl::hasAnyDependentBases() const {
+ if (!isDependentContext())
+ return false;
+
+ return !forallBases(SawBase, 0);
+}
+
+bool CXXRecordDecl::hasConstCopyConstructor() const {
+ return getCopyConstructor(Qualifiers::Const) != 0;
+}
+
+bool CXXRecordDecl::isTriviallyCopyable() const {
+ // C++0x [class]p5:
+ // A trivially copyable class is a class that:
+ // -- has no non-trivial copy constructors,
+ if (!hasTrivialCopyConstructor()) return false;
+ // -- has no non-trivial move constructors,
+ if (!hasTrivialMoveConstructor()) return false;
+ // -- has no non-trivial copy assignment operators,
+ if (!hasTrivialCopyAssignment()) return false;
+ // -- has no non-trivial move assignment operators, and
+ if (!hasTrivialMoveAssignment()) return false;
+ // -- has a trivial destructor.
+ if (!hasTrivialDestructor()) return false;
+
+ return true;
+}
+
+/// \brief Perform a simplistic form of overload resolution that only considers
+/// cv-qualifiers on a single parameter, and return the best overload candidate
+/// (if there is one).
+static CXXMethodDecl *
+GetBestOverloadCandidateSimple(
+ const SmallVectorImpl<std::pair<CXXMethodDecl *, Qualifiers> > &Cands) {
+ if (Cands.empty())
+ return 0;
+ if (Cands.size() == 1)
+ return Cands[0].first;
+
+ unsigned Best = 0, N = Cands.size();
+ for (unsigned I = 1; I != N; ++I)
+ if (Cands[Best].second.compatiblyIncludes(Cands[I].second))
+ Best = I;
+
+ for (unsigned I = 1; I != N; ++I)
+ if (Cands[Best].second.compatiblyIncludes(Cands[I].second))
+ return 0;
+
+ return Cands[Best].first;
+}
+
+CXXConstructorDecl *CXXRecordDecl::getCopyConstructor(unsigned TypeQuals) const{
+ ASTContext &Context = getASTContext();
+ QualType ClassType
+ = Context.getTypeDeclType(const_cast<CXXRecordDecl*>(this));
+ DeclarationName ConstructorName
+ = Context.DeclarationNames.getCXXConstructorName(
+ Context.getCanonicalType(ClassType));
+ unsigned FoundTQs;
+ SmallVector<std::pair<CXXMethodDecl *, Qualifiers>, 4> Found;
+ DeclContext::lookup_const_iterator Con, ConEnd;
+ for (llvm::tie(Con, ConEnd) = this->lookup(ConstructorName);
+ Con != ConEnd; ++Con) {
+ // C++ [class.copy]p2:
+ // A non-template constructor for class X is a copy constructor if [...]
+ if (isa<FunctionTemplateDecl>(*Con))
+ continue;
+
+ CXXConstructorDecl *Constructor = cast<CXXConstructorDecl>(*Con);
+ if (Constructor->isCopyConstructor(FoundTQs)) {
+ if (((TypeQuals & Qualifiers::Const) == (FoundTQs & Qualifiers::Const)) ||
+ (!(TypeQuals & Qualifiers::Const) && (FoundTQs & Qualifiers::Const)))
+ Found.push_back(std::make_pair(
+ const_cast<CXXConstructorDecl *>(Constructor),
+ Qualifiers::fromCVRMask(FoundTQs)));
+ }
+ }
+
+ return cast_or_null<CXXConstructorDecl>(
+ GetBestOverloadCandidateSimple(Found));
+}
+
+CXXConstructorDecl *CXXRecordDecl::getMoveConstructor() const {
+ for (ctor_iterator I = ctor_begin(), E = ctor_end(); I != E; ++I)
+ if (I->isMoveConstructor())
+ return *I;
+
+ return 0;
+}
+
+CXXMethodDecl *CXXRecordDecl::getCopyAssignmentOperator(bool ArgIsConst) const {
+ ASTContext &Context = getASTContext();
+ QualType Class = Context.getTypeDeclType(const_cast<CXXRecordDecl *>(this));
+ DeclarationName Name = Context.DeclarationNames.getCXXOperatorName(OO_Equal);
+
+ SmallVector<std::pair<CXXMethodDecl *, Qualifiers>, 4> Found;
+ DeclContext::lookup_const_iterator Op, OpEnd;
+ for (llvm::tie(Op, OpEnd) = this->lookup(Name); Op != OpEnd; ++Op) {
+ // C++ [class.copy]p9:
+ // A user-declared copy assignment operator is a non-static non-template
+ // member function of class X with exactly one parameter of type X, X&,
+ // const X&, volatile X& or const volatile X&.
+ const CXXMethodDecl* Method = dyn_cast<CXXMethodDecl>(*Op);
+ if (!Method || Method->isStatic() || Method->getPrimaryTemplate())
+ continue;
+
+ const FunctionProtoType *FnType
+ = Method->getType()->getAs<FunctionProtoType>();
+ assert(FnType && "Overloaded operator has no prototype.");
+ // Don't assert on this; an invalid decl might have been left in the AST.
+ if (FnType->getNumArgs() != 1 || FnType->isVariadic())
+ continue;
+
+ QualType ArgType = FnType->getArgType(0);
+ Qualifiers Quals;
+ if (const LValueReferenceType *Ref = ArgType->getAs<LValueReferenceType>()) {
+ ArgType = Ref->getPointeeType();
+ // If we have a const argument and we have a reference to a non-const,
+ // this function does not match.
+ if (ArgIsConst && !ArgType.isConstQualified())
+ continue;
+
+ Quals = ArgType.getQualifiers();
+ } else {
+ // By-value copy-assignment operators are treated like const X&
+ // copy-assignment operators.
+ Quals = Qualifiers::fromCVRMask(Qualifiers::Const);
+ }
+
+ if (!Context.hasSameUnqualifiedType(ArgType, Class))
+ continue;
+
+ // Save this copy-assignment operator. It might be "the one".
+ Found.push_back(std::make_pair(const_cast<CXXMethodDecl *>(Method), Quals));
+ }
+
+ // Use a simplistic form of overload resolution to find the candidate.
+ return GetBestOverloadCandidateSimple(Found);
+}
+
+CXXMethodDecl *CXXRecordDecl::getMoveAssignmentOperator() const {
+ for (method_iterator I = method_begin(), E = method_end(); I != E; ++I)
+ if (I->isMoveAssignmentOperator())
+ return *I;
+
+ return 0;
+}
+
+void CXXRecordDecl::markedVirtualFunctionPure() {
+ // C++ [class.abstract]p2:
+ // A class is abstract if it has at least one pure virtual function.
+ data().Abstract = true;
+}
+
+void CXXRecordDecl::addedMember(Decl *D) {
+ if (!D->isImplicit() &&
+ !isa<FieldDecl>(D) &&
+ !isa<IndirectFieldDecl>(D) &&
+ (!isa<TagDecl>(D) || cast<TagDecl>(D)->getTagKind() == TTK_Class))
+ data().HasOnlyCMembers = false;
+
+ // Ignore friends and invalid declarations.
+ if (D->getFriendObjectKind() || D->isInvalidDecl())
+ return;
+
+ FunctionTemplateDecl *FunTmpl = dyn_cast<FunctionTemplateDecl>(D);
+ if (FunTmpl)
+ D = FunTmpl->getTemplatedDecl();
+
+ if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(D)) {
+ if (Method->isVirtual()) {
+ // C++ [dcl.init.aggr]p1:
+ // An aggregate is an array or a class with [...] no virtual functions.
+ data().Aggregate = false;
+
+ // C++ [class]p4:
+ // A POD-struct is an aggregate class...
+ data().PlainOldData = false;
+
+ // Virtual functions make the class non-empty.
+ // FIXME: Standard ref?
+ data().Empty = false;
+
+ // C++ [class.virtual]p1:
+ // A class that declares or inherits a virtual function is called a
+ // polymorphic class.
+ data().Polymorphic = true;
+
+ // C++0x [class.ctor]p5
+ // A default constructor is trivial [...] if:
+ // -- its class has no virtual functions [...]
+ data().HasTrivialDefaultConstructor = false;
+
+ // C++0x [class.copy]p13:
+ // A copy/move constructor for class X is trivial if [...]
+ // -- class X has no virtual functions [...]
+ data().HasTrivialCopyConstructor = false;
+ data().HasTrivialMoveConstructor = false;
+
+ // C++0x [class.copy]p27:
+ // A copy/move assignment operator for class X is trivial if [...]
+ // -- class X has no virtual functions [...]
+ data().HasTrivialCopyAssignment = false;
+ data().HasTrivialMoveAssignment = false;
+
+ // C++0x [class]p7:
+ // A standard-layout class is a class that: [...]
+ // -- has no virtual functions
+ data().IsStandardLayout = false;
+ }
+ }
+
+ if (D->isImplicit()) {
+ // Notify that an implicit member was added after the definition
+ // was completed.
+ if (!isBeingDefined())
+ if (ASTMutationListener *L = getASTMutationListener())
+ L->AddedCXXImplicitMember(data().Definition, D);
+
+ // If this is a special member function, note that it was added and then
+ // return early.
+ if (CXXConstructorDecl *Constructor = dyn_cast<CXXConstructorDecl>(D)) {
+ if (Constructor->isDefaultConstructor()) {
+ data().DeclaredDefaultConstructor = true;
+ if (Constructor->isConstexpr()) {
+ data().HasConstexprDefaultConstructor = true;
+ data().HasConstexprNonCopyMoveConstructor = true;
+ }
+ } else if (Constructor->isCopyConstructor()) {
+ data().DeclaredCopyConstructor = true;
+ if (Constructor->isConstexpr())
+ data().HasConstexprCopyConstructor = true;
+ } else if (Constructor->isMoveConstructor()) {
+ data().DeclaredMoveConstructor = true;
+ if (Constructor->isConstexpr())
+ data().HasConstexprMoveConstructor = true;
+ } else
+ goto NotASpecialMember;
+ return;
+ } else if (isa<CXXDestructorDecl>(D)) {
+ data().DeclaredDestructor = true;
+ return;
+ } else if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(D)) {
+ if (Method->isCopyAssignmentOperator())
+ data().DeclaredCopyAssignment = true;
+ else if (Method->isMoveAssignmentOperator())
+ data().DeclaredMoveAssignment = true;
+ else
+ goto NotASpecialMember;
+ return;
+ }
+
+NotASpecialMember:;
+ // Any other implicit declarations are handled like normal declarations.
+ }
+
+ // Handle (user-declared) constructors.
+ if (CXXConstructorDecl *Constructor = dyn_cast<CXXConstructorDecl>(D)) {
+ // Note that we have a user-declared constructor.
+ data().UserDeclaredConstructor = true;
+
+ // Technically, "user-provided" is only defined for special member
+ // functions, but the intent of the standard is clearly that it should apply
+ // to all functions.
+ bool UserProvided = Constructor->isUserProvided();
+
+ if (Constructor->isDefaultConstructor()) {
+ data().DeclaredDefaultConstructor = true;
+ if (UserProvided) {
+ // C++0x [class.ctor]p5:
+ // A default constructor is trivial if it is not user-provided [...]
+ data().HasTrivialDefaultConstructor = false;
+ data().UserProvidedDefaultConstructor = true;
+ }
+ if (Constructor->isConstexpr()) {
+ data().HasConstexprDefaultConstructor = true;
+ data().HasConstexprNonCopyMoveConstructor = true;
+ }
+ }
+
+ // Note when we have a user-declared copy or move constructor, which will
+ // suppress the implicit declaration of those constructors.
+ if (!FunTmpl) {
+ if (Constructor->isCopyConstructor()) {
+ data().UserDeclaredCopyConstructor = true;
+ data().DeclaredCopyConstructor = true;
+
+ // C++0x [class.copy]p13:
+ // A copy/move constructor for class X is trivial if it is not
+ // user-provided [...]
+ if (UserProvided)
+ data().HasTrivialCopyConstructor = false;
+
+ if (Constructor->isConstexpr())
+ data().HasConstexprCopyConstructor = true;
+ } else if (Constructor->isMoveConstructor()) {
+ data().UserDeclaredMoveConstructor = true;
+ data().DeclaredMoveConstructor = true;
+
+ // C++0x [class.copy]p13:
+ // A copy/move constructor for class X is trivial if it is not
+ // user-provided [...]
+ if (UserProvided)
+ data().HasTrivialMoveConstructor = false;
+
+ if (Constructor->isConstexpr())
+ data().HasConstexprMoveConstructor = true;
+ }
+ }
+ if (Constructor->isConstexpr() && !Constructor->isCopyOrMoveConstructor()) {
+ // Record if we see any constexpr constructors which are neither copy
+ // nor move constructors.
+ data().HasConstexprNonCopyMoveConstructor = true;
+ }
+
+ // C++ [dcl.init.aggr]p1:
+ // An aggregate is an array or a class with no user-declared
+ // constructors [...].
+ // C++0x [dcl.init.aggr]p1:
+ // An aggregate is an array or a class with no user-provided
+ // constructors [...].
+ if (!getASTContext().getLangOpts().CPlusPlus0x || UserProvided)
+ data().Aggregate = false;
+
+ // C++ [class]p4:
+ // A POD-struct is an aggregate class [...]
+ // Since the POD bit is meant to be C++03 POD-ness, clear it even if the
+ // type is technically an aggregate in C++0x since it wouldn't be in 03.
+ data().PlainOldData = false;
+
+ return;
+ }
+
+ // Handle (user-declared) destructors.
+ if (CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(D)) {
+ data().DeclaredDestructor = true;
+ data().UserDeclaredDestructor = true;
+ data().HasIrrelevantDestructor = false;
+
+ // C++ [class]p4:
+ // A POD-struct is an aggregate class that has [...] no user-defined
+ // destructor.
+ // This bit is the C++03 POD bit, not the 0x one.
+ data().PlainOldData = false;
+
+ // C++11 [class.dtor]p5:
+ // A destructor is trivial if it is not user-provided and if
+ // -- the destructor is not virtual.
+ if (DD->isUserProvided() || DD->isVirtual()) {
+ data().HasTrivialDestructor = false;
+ // C++11 [dcl.constexpr]p1:
+ // The constexpr specifier shall be applied only to [...] the
+ // declaration of a static data member of a literal type.
+ // C++11 [basic.types]p10:
+ // A type is a literal type if it is [...] a class type that [...] has
+ // a trivial destructor.
+ data().DefaultedDefaultConstructorIsConstexpr = false;
+ data().DefaultedCopyConstructorIsConstexpr = false;
+ data().DefaultedMoveConstructorIsConstexpr = false;
+ }
+
+ return;
+ }
+
+ // Handle (user-declared) member functions.
+ if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(D)) {
+ if (Method->isCopyAssignmentOperator()) {
+ // C++ [class]p4:
+ // A POD-struct is an aggregate class that [...] has no user-defined
+ // copy assignment operator [...].
+ // This is the C++03 bit only.
+ data().PlainOldData = false;
+
+ // This is a copy assignment operator.
+
+ // Suppress the implicit declaration of a copy constructor.
+ data().UserDeclaredCopyAssignment = true;
+ data().DeclaredCopyAssignment = true;
+
+ // C++0x [class.copy]p27:
+ // A copy/move assignment operator for class X is trivial if it is
+ // neither user-provided nor deleted [...]
+ if (Method->isUserProvided())
+ data().HasTrivialCopyAssignment = false;
+
+ return;
+ }
+
+ if (Method->isMoveAssignmentOperator()) {
+ // This is an extension in C++03 mode, but we'll keep consistency by
+ // taking a move assignment operator to induce non-POD-ness
+ data().PlainOldData = false;
+
+ // This is a move assignment operator.
+ data().UserDeclaredMoveAssignment = true;
+ data().DeclaredMoveAssignment = true;
+
+ // C++0x [class.copy]p27:
+ // A copy/move assignment operator for class X is trivial if it is
+ // neither user-provided nor deleted [...]
+ if (Method->isUserProvided())
+ data().HasTrivialMoveAssignment = false;
+ }
+
+ // Keep the list of conversion functions up-to-date.
+ if (CXXConversionDecl *Conversion = dyn_cast<CXXConversionDecl>(D)) {
+ // We don't record specializations.
+ if (Conversion->getPrimaryTemplate())
+ return;
+
+ // FIXME: We intentionally don't use the decl's access here because it
+ // hasn't been set yet. That's really just a misdesign in Sema.
+
+ if (FunTmpl) {
+ if (FunTmpl->getPreviousDecl())
+ data().Conversions.replace(FunTmpl->getPreviousDecl(),
+ FunTmpl);
+ else
+ data().Conversions.addDecl(FunTmpl);
+ } else {
+ if (Conversion->getPreviousDecl())
+ data().Conversions.replace(Conversion->getPreviousDecl(),
+ Conversion);
+ else
+ data().Conversions.addDecl(Conversion);
+ }
+ }
+
+ return;
+ }
+
+ // Handle non-static data members.
+ if (FieldDecl *Field = dyn_cast<FieldDecl>(D)) {
+ // C++ [class.bit]p2:
+ // A declaration for a bit-field that omits the identifier declares an
+ // unnamed bit-field. Unnamed bit-fields are not members and cannot be
+ // initialized.
+ if (Field->isUnnamedBitfield())
+ return;
+
+ // C++ [dcl.init.aggr]p1:
+ // An aggregate is an array or a class (clause 9) with [...] no
+ // private or protected non-static data members (clause 11).
+ //
+ // A POD must be an aggregate.
+ if (D->getAccess() == AS_private || D->getAccess() == AS_protected) {
+ data().Aggregate = false;
+ data().PlainOldData = false;
+ }
+
+ // C++0x [class]p7:
+ // A standard-layout class is a class that:
+ // [...]
+ // -- has the same access control for all non-static data members,
+ switch (D->getAccess()) {
+ case AS_private: data().HasPrivateFields = true; break;
+ case AS_protected: data().HasProtectedFields = true; break;
+ case AS_public: data().HasPublicFields = true; break;
+ case AS_none: llvm_unreachable("Invalid access specifier");
+ };
+ if ((data().HasPrivateFields + data().HasProtectedFields +
+ data().HasPublicFields) > 1)
+ data().IsStandardLayout = false;
+
+ // Keep track of the presence of mutable fields.
+ if (Field->isMutable())
+ data().HasMutableFields = true;
+
+ // C++0x [class]p9:
+ // A POD struct is a class that is both a trivial class and a
+ // standard-layout class, and has no non-static data members of type
+ // non-POD struct, non-POD union (or array of such types).
+ //
+ // Automatic Reference Counting: the presence of a member of Objective-C pointer type
+ // that does not explicitly have no lifetime makes the class a non-POD.
+ // However, we delay setting PlainOldData to false in this case so that
+ // Sema has a chance to diagnostic causes where the same class will be
+ // non-POD with Automatic Reference Counting but a POD without Instant Objects.
+ // In this case, the class will become a non-POD class when we complete
+ // the definition.
+ ASTContext &Context = getASTContext();
+ QualType T = Context.getBaseElementType(Field->getType());
+ if (T->isObjCRetainableType() || T.isObjCGCStrong()) {
+ if (!Context.getLangOpts().ObjCAutoRefCount ||
+ T.getObjCLifetime() != Qualifiers::OCL_ExplicitNone)
+ setHasObjectMember(true);
+ } else if (!T.isPODType(Context))
+ data().PlainOldData = false;
+
+ if (T->isReferenceType()) {
+ data().HasTrivialDefaultConstructor = false;
+
+ // C++0x [class]p7:
+ // A standard-layout class is a class that:
+ // -- has no non-static data members of type [...] reference,
+ data().IsStandardLayout = false;
+ }
+
+ // Record if this field is the first non-literal or volatile field or base.
+ if (!T->isLiteralType() || T.isVolatileQualified())
+ data().HasNonLiteralTypeFieldsOrBases = true;
+
+ if (Field->hasInClassInitializer()) {
+ // C++0x [class]p5:
+ // A default constructor is trivial if [...] no non-static data member
+ // of its class has a brace-or-equal-initializer.
+ data().HasTrivialDefaultConstructor = false;
+
+ // C++0x [dcl.init.aggr]p1:
+ // An aggregate is a [...] class with [...] no
+ // brace-or-equal-initializers for non-static data members.
+ data().Aggregate = false;
+
+ // C++0x [class]p10:
+ // A POD struct is [...] a trivial class.
+ data().PlainOldData = false;
+ }
+
+ if (const RecordType *RecordTy = T->getAs<RecordType>()) {
+ CXXRecordDecl* FieldRec = cast<CXXRecordDecl>(RecordTy->getDecl());
+ if (FieldRec->getDefinition()) {
+ // C++0x [class.ctor]p5:
+ // A default constructor is trivial [...] if:
+ // -- for all the non-static data members of its class that are of
+ // class type (or array thereof), each such class has a trivial
+ // default constructor.
+ if (!FieldRec->hasTrivialDefaultConstructor())
+ data().HasTrivialDefaultConstructor = false;
+
+ // C++0x [class.copy]p13:
+ // A copy/move constructor for class X is trivial if [...]
+ // [...]
+ // -- for each non-static data member of X that is of class type (or
+ // an array thereof), the constructor selected to copy/move that
+ // member is trivial;
+ // FIXME: C++0x: We don't correctly model 'selected' constructors.
+ if (!FieldRec->hasTrivialCopyConstructor())
+ data().HasTrivialCopyConstructor = false;
+ if (!FieldRec->hasTrivialMoveConstructor())
+ data().HasTrivialMoveConstructor = false;
+
+ // C++0x [class.copy]p27:
+ // A copy/move assignment operator for class X is trivial if [...]
+ // [...]
+ // -- for each non-static data member of X that is of class type (or
+ // an array thereof), the assignment operator selected to
+ // copy/move that member is trivial;
+ // FIXME: C++0x: We don't correctly model 'selected' operators.
+ if (!FieldRec->hasTrivialCopyAssignment())
+ data().HasTrivialCopyAssignment = false;
+ if (!FieldRec->hasTrivialMoveAssignment())
+ data().HasTrivialMoveAssignment = false;
+
+ if (!FieldRec->hasTrivialDestructor())
+ data().HasTrivialDestructor = false;
+ if (!FieldRec->hasIrrelevantDestructor())
+ data().HasIrrelevantDestructor = false;
+ if (FieldRec->hasObjectMember())
+ setHasObjectMember(true);
+
+ // C++0x [class]p7:
+ // A standard-layout class is a class that:
+ // -- has no non-static data members of type non-standard-layout
+ // class (or array of such types) [...]
+ if (!FieldRec->isStandardLayout())
+ data().IsStandardLayout = false;
+
+ // C++0x [class]p7:
+ // A standard-layout class is a class that:
+ // [...]
+ // -- has no base classes of the same type as the first non-static
+ // data member.
+ // We don't want to expend bits in the state of the record decl
+ // tracking whether this is the first non-static data member so we
+ // cheat a bit and use some of the existing state: the empty bit.
+ // Virtual bases and virtual methods make a class non-empty, but they
+ // also make it non-standard-layout so we needn't check here.
+ // A non-empty base class may leave the class standard-layout, but not
+ // if we have arrived here, and have at least on non-static data
+ // member. If IsStandardLayout remains true, then the first non-static
+ // data member must come through here with Empty still true, and Empty
+ // will subsequently be set to false below.
+ if (data().IsStandardLayout && data().Empty) {
+ for (CXXRecordDecl::base_class_const_iterator BI = bases_begin(),
+ BE = bases_end();
+ BI != BE; ++BI) {
+ if (Context.hasSameUnqualifiedType(BI->getType(), T)) {
+ data().IsStandardLayout = false;
+ break;
+ }
+ }
+ }
+
+ // Keep track of the presence of mutable fields.
+ if (FieldRec->hasMutableFields())
+ data().HasMutableFields = true;
+
+ // C++11 [class.copy]p13:
+ // If the implicitly-defined constructor would satisfy the
+ // requirements of a constexpr constructor, the implicitly-defined
+ // constructor is constexpr.
+ // C++11 [dcl.constexpr]p4:
+ // -- every constructor involved in initializing non-static data
+ // members [...] shall be a constexpr constructor
+ if (!Field->hasInClassInitializer() &&
+ !FieldRec->hasConstexprDefaultConstructor())
+ // The standard requires any in-class initializer to be a constant
+ // expression. We consider this to be a defect.
+ data().DefaultedDefaultConstructorIsConstexpr = false;
+
+ if (!FieldRec->hasConstexprCopyConstructor())
+ data().DefaultedCopyConstructorIsConstexpr = false;
+
+ if (FieldRec->hasDeclaredMoveConstructor() ||
+ FieldRec->needsImplicitMoveConstructor())
+ // FIXME: If the implicit move constructor generated for the member's
+ // class would be ill-formed, the implicit move constructor generated
+ // for this class calls the member's copy constructor.
+ data().DefaultedMoveConstructorIsConstexpr &=
+ FieldRec->hasConstexprMoveConstructor();
+ else if (!FieldRec->hasConstexprCopyConstructor())
+ data().DefaultedMoveConstructorIsConstexpr = false;
+ }
+ } else {
+ // Base element type of field is a non-class type.
+ if (!T->isLiteralType()) {
+ data().DefaultedDefaultConstructorIsConstexpr = false;
+ data().DefaultedCopyConstructorIsConstexpr = false;
+ data().DefaultedMoveConstructorIsConstexpr = false;
+ } else if (!Field->hasInClassInitializer())
+ data().DefaultedDefaultConstructorIsConstexpr = false;
+ }
+
+ // C++0x [class]p7:
+ // A standard-layout class is a class that:
+ // [...]
+ // -- either has no non-static data members in the most derived
+ // class and at most one base class with non-static data members,
+ // or has no base classes with non-static data members, and
+ // At this point we know that we have a non-static data member, so the last
+ // clause holds.
+ if (!data().HasNoNonEmptyBases)
+ data().IsStandardLayout = false;
+
+ // If this is not a zero-length bit-field, then the class is not empty.
+ if (data().Empty) {
+ if (!Field->isBitField() ||
+ (!Field->getBitWidth()->isTypeDependent() &&
+ !Field->getBitWidth()->isValueDependent() &&
+ Field->getBitWidthValue(Context) != 0))
+ data().Empty = false;
+ }
+ }
+
+ // Handle using declarations of conversion functions.
+ if (UsingShadowDecl *Shadow = dyn_cast<UsingShadowDecl>(D))
+ if (Shadow->getDeclName().getNameKind()
+ == DeclarationName::CXXConversionFunctionName)
+ data().Conversions.addDecl(Shadow, Shadow->getAccess());
+}
+
+bool CXXRecordDecl::isCLike() const {
+ if (getTagKind() == TTK_Class || !TemplateOrInstantiation.isNull())
+ return false;
+ if (!hasDefinition())
+ return true;
+
+ return isPOD() && data().HasOnlyCMembers;
+}
+
+void CXXRecordDecl::getCaptureFields(
+ llvm::DenseMap<const VarDecl *, FieldDecl *> &Captures,
+ FieldDecl *&ThisCapture) const {
+ Captures.clear();
+ ThisCapture = 0;
+
+ LambdaDefinitionData &Lambda = getLambdaData();
+ RecordDecl::field_iterator Field = field_begin();
+ for (LambdaExpr::Capture *C = Lambda.Captures, *CEnd = C + Lambda.NumCaptures;
+ C != CEnd; ++C, ++Field) {
+ if (C->capturesThis()) {
+ ThisCapture = *Field;
+ continue;
+ }
+
+ Captures[C->getCapturedVar()] = *Field;
+ }
+}
+
+
+static CanQualType GetConversionType(ASTContext &Context, NamedDecl *Conv) {
+ QualType T;
+ if (isa<UsingShadowDecl>(Conv))
+ Conv = cast<UsingShadowDecl>(Conv)->getTargetDecl();
+ if (FunctionTemplateDecl *ConvTemp = dyn_cast<FunctionTemplateDecl>(Conv))
+ T = ConvTemp->getTemplatedDecl()->getResultType();
+ else
+ T = cast<CXXConversionDecl>(Conv)->getConversionType();
+ return Context.getCanonicalType(T);
+}
+
+/// Collect the visible conversions of a base class.
+///
+/// \param Base a base class of the class we're considering
+/// \param InVirtual whether this base class is a virtual base (or a base
+/// of a virtual base)
+/// \param Access the access along the inheritance path to this base
+/// \param ParentHiddenTypes the conversions provided by the inheritors
+/// of this base
+/// \param Output the set to which to add conversions from non-virtual bases
+/// \param VOutput the set to which to add conversions from virtual bases
+/// \param HiddenVBaseCs the set of conversions which were hidden in a
+/// virtual base along some inheritance path
+static void CollectVisibleConversions(ASTContext &Context,
+ CXXRecordDecl *Record,
+ bool InVirtual,
+ AccessSpecifier Access,
+ const llvm::SmallPtrSet<CanQualType, 8> &ParentHiddenTypes,
+ UnresolvedSetImpl &Output,
+ UnresolvedSetImpl &VOutput,
+ llvm::SmallPtrSet<NamedDecl*, 8> &HiddenVBaseCs) {
+ // The set of types which have conversions in this class or its
+ // subclasses. As an optimization, we don't copy the derived set
+ // unless it might change.
+ const llvm::SmallPtrSet<CanQualType, 8> *HiddenTypes = &ParentHiddenTypes;
+ llvm::SmallPtrSet<CanQualType, 8> HiddenTypesBuffer;
+
+ // Collect the direct conversions and figure out which conversions
+ // will be hidden in the subclasses.
+ UnresolvedSetImpl &Cs = *Record->getConversionFunctions();
+ if (!Cs.empty()) {
+ HiddenTypesBuffer = ParentHiddenTypes;
+ HiddenTypes = &HiddenTypesBuffer;
+
+ for (UnresolvedSetIterator I = Cs.begin(), E = Cs.end(); I != E; ++I) {
+ bool Hidden =
+ !HiddenTypesBuffer.insert(GetConversionType(Context, I.getDecl()));
+
+ // If this conversion is hidden and we're in a virtual base,
+ // remember that it's hidden along some inheritance path.
+ if (Hidden && InVirtual)
+ HiddenVBaseCs.insert(cast<NamedDecl>(I.getDecl()->getCanonicalDecl()));
+
+ // If this conversion isn't hidden, add it to the appropriate output.
+ else if (!Hidden) {
+ AccessSpecifier IAccess
+ = CXXRecordDecl::MergeAccess(Access, I.getAccess());
+
+ if (InVirtual)
+ VOutput.addDecl(I.getDecl(), IAccess);
+ else
+ Output.addDecl(I.getDecl(), IAccess);
+ }
+ }
+ }
+
+ // Collect information recursively from any base classes.
+ for (CXXRecordDecl::base_class_iterator
+ I = Record->bases_begin(), E = Record->bases_end(); I != E; ++I) {
+ const RecordType *RT = I->getType()->getAs<RecordType>();
+ if (!RT) continue;
+
+ AccessSpecifier BaseAccess
+ = CXXRecordDecl::MergeAccess(Access, I->getAccessSpecifier());
+ bool BaseInVirtual = InVirtual || I->isVirtual();
+
+ CXXRecordDecl *Base = cast<CXXRecordDecl>(RT->getDecl());
+ CollectVisibleConversions(Context, Base, BaseInVirtual, BaseAccess,
+ *HiddenTypes, Output, VOutput, HiddenVBaseCs);
+ }
+}
+
+/// Collect the visible conversions of a class.
+///
+/// This would be extremely straightforward if it weren't for virtual
+/// bases. It might be worth special-casing that, really.
+static void CollectVisibleConversions(ASTContext &Context,
+ CXXRecordDecl *Record,
+ UnresolvedSetImpl &Output) {
+ // The collection of all conversions in virtual bases that we've
+ // found. These will be added to the output as long as they don't
+ // appear in the hidden-conversions set.
+ UnresolvedSet<8> VBaseCs;
+
+ // The set of conversions in virtual bases that we've determined to
+ // be hidden.
+ llvm::SmallPtrSet<NamedDecl*, 8> HiddenVBaseCs;
+
+ // The set of types hidden by classes derived from this one.
+ llvm::SmallPtrSet<CanQualType, 8> HiddenTypes;
+
+ // Go ahead and collect the direct conversions and add them to the
+ // hidden-types set.
+ UnresolvedSetImpl &Cs = *Record->getConversionFunctions();
+ Output.append(Cs.begin(), Cs.end());
+ for (UnresolvedSetIterator I = Cs.begin(), E = Cs.end(); I != E; ++I)
+ HiddenTypes.insert(GetConversionType(Context, I.getDecl()));
+
+ // Recursively collect conversions from base classes.
+ for (CXXRecordDecl::base_class_iterator
+ I = Record->bases_begin(), E = Record->bases_end(); I != E; ++I) {
+ const RecordType *RT = I->getType()->getAs<RecordType>();
+ if (!RT) continue;
+
+ CollectVisibleConversions(Context, cast<CXXRecordDecl>(RT->getDecl()),
+ I->isVirtual(), I->getAccessSpecifier(),
+ HiddenTypes, Output, VBaseCs, HiddenVBaseCs);
+ }
+
+ // Add any unhidden conversions provided by virtual bases.
+ for (UnresolvedSetIterator I = VBaseCs.begin(), E = VBaseCs.end();
+ I != E; ++I) {
+ if (!HiddenVBaseCs.count(cast<NamedDecl>(I.getDecl()->getCanonicalDecl())))
+ Output.addDecl(I.getDecl(), I.getAccess());
+ }
+}
+
+/// getVisibleConversionFunctions - get all conversion functions visible
+/// in current class; including conversion function templates.
+const UnresolvedSetImpl *CXXRecordDecl::getVisibleConversionFunctions() {
+ // If root class, all conversions are visible.
+ if (bases_begin() == bases_end())
+ return &data().Conversions;
+ // If visible conversion list is already evaluated, return it.
+ if (data().ComputedVisibleConversions)
+ return &data().VisibleConversions;
+ CollectVisibleConversions(getASTContext(), this, data().VisibleConversions);
+ data().ComputedVisibleConversions = true;
+ return &data().VisibleConversions;
+}
+
+void CXXRecordDecl::removeConversion(const NamedDecl *ConvDecl) {
+ // This operation is O(N) but extremely rare. Sema only uses it to
+ // remove UsingShadowDecls in a class that were followed by a direct
+ // declaration, e.g.:
+ // class A : B {
+ // using B::operator int;
+ // operator int();
+ // };
+ // This is uncommon by itself and even more uncommon in conjunction
+ // with sufficiently large numbers of directly-declared conversions
+ // that asymptotic behavior matters.
+
+ UnresolvedSetImpl &Convs = *getConversionFunctions();
+ for (unsigned I = 0, E = Convs.size(); I != E; ++I) {
+ if (Convs[I].getDecl() == ConvDecl) {
+ Convs.erase(I);
+ assert(std::find(Convs.begin(), Convs.end(), ConvDecl) == Convs.end()
+ && "conversion was found multiple times in unresolved set");
+ return;
+ }
+ }
+
+ llvm_unreachable("conversion not found in set!");
+}
+
+CXXRecordDecl *CXXRecordDecl::getInstantiatedFromMemberClass() const {
+ if (MemberSpecializationInfo *MSInfo = getMemberSpecializationInfo())
+ return cast<CXXRecordDecl>(MSInfo->getInstantiatedFrom());
+
+ return 0;
+}
+
+MemberSpecializationInfo *CXXRecordDecl::getMemberSpecializationInfo() const {
+ return TemplateOrInstantiation.dyn_cast<MemberSpecializationInfo *>();
+}
+
+void
+CXXRecordDecl::setInstantiationOfMemberClass(CXXRecordDecl *RD,
+ TemplateSpecializationKind TSK) {
+ assert(TemplateOrInstantiation.isNull() &&
+ "Previous template or instantiation?");
+ assert(!isa<ClassTemplateSpecializationDecl>(this));
+ TemplateOrInstantiation
+ = new (getASTContext()) MemberSpecializationInfo(RD, TSK);
+}
+
+TemplateSpecializationKind CXXRecordDecl::getTemplateSpecializationKind() const{
+ if (const ClassTemplateSpecializationDecl *Spec
+ = dyn_cast<ClassTemplateSpecializationDecl>(this))
+ return Spec->getSpecializationKind();
+
+ if (MemberSpecializationInfo *MSInfo = getMemberSpecializationInfo())
+ return MSInfo->getTemplateSpecializationKind();
+
+ return TSK_Undeclared;
+}
+
+void
+CXXRecordDecl::setTemplateSpecializationKind(TemplateSpecializationKind TSK) {
+ if (ClassTemplateSpecializationDecl *Spec
+ = dyn_cast<ClassTemplateSpecializationDecl>(this)) {
+ Spec->setSpecializationKind(TSK);
+ return;
+ }
+
+ if (MemberSpecializationInfo *MSInfo = getMemberSpecializationInfo()) {
+ MSInfo->setTemplateSpecializationKind(TSK);
+ return;
+ }
+
+ llvm_unreachable("Not a class template or member class specialization");
+}
+
+CXXDestructorDecl *CXXRecordDecl::getDestructor() const {
+ ASTContext &Context = getASTContext();
+ QualType ClassType = Context.getTypeDeclType(this);
+
+ DeclarationName Name
+ = Context.DeclarationNames.getCXXDestructorName(
+ Context.getCanonicalType(ClassType));
+
+ DeclContext::lookup_const_iterator I, E;
+ llvm::tie(I, E) = lookup(Name);
+ if (I == E)
+ return 0;
+
+ CXXDestructorDecl *Dtor = cast<CXXDestructorDecl>(*I);
+ return Dtor;
+}
+
+void CXXRecordDecl::completeDefinition() {
+ completeDefinition(0);
+}
+
+void CXXRecordDecl::completeDefinition(CXXFinalOverriderMap *FinalOverriders) {
+ RecordDecl::completeDefinition();
+
+ if (hasObjectMember() && getASTContext().getLangOpts().ObjCAutoRefCount) {
+ // Objective-C Automatic Reference Counting:
+ // If a class has a non-static data member of Objective-C pointer
+ // type (or array thereof), it is a non-POD type and its
+ // default constructor (if any), copy constructor, copy assignment
+ // operator, and destructor are non-trivial.
+ struct DefinitionData &Data = data();
+ Data.PlainOldData = false;
+ Data.HasTrivialDefaultConstructor = false;
+ Data.HasTrivialCopyConstructor = false;
+ Data.HasTrivialCopyAssignment = false;
+ Data.HasTrivialDestructor = false;
+ Data.HasIrrelevantDestructor = false;
+ }
+
+ // If the class may be abstract (but hasn't been marked as such), check for
+ // any pure final overriders.
+ if (mayBeAbstract()) {
+ CXXFinalOverriderMap MyFinalOverriders;
+ if (!FinalOverriders) {
+ getFinalOverriders(MyFinalOverriders);
+ FinalOverriders = &MyFinalOverriders;
+ }
+
+ bool Done = false;
+ for (CXXFinalOverriderMap::iterator M = FinalOverriders->begin(),
+ MEnd = FinalOverriders->end();
+ M != MEnd && !Done; ++M) {
+ for (OverridingMethods::iterator SO = M->second.begin(),
+ SOEnd = M->second.end();
+ SO != SOEnd && !Done; ++SO) {
+ assert(SO->second.size() > 0 &&
+ "All virtual functions have overridding virtual functions");
+
+ // C++ [class.abstract]p4:
+ // A class is abstract if it contains or inherits at least one
+ // pure virtual function for which the final overrider is pure
+ // virtual.
+ if (SO->second.front().Method->isPure()) {
+ data().Abstract = true;
+ Done = true;
+ break;
+ }
+ }
+ }
+ }
+
+ // Set access bits correctly on the directly-declared conversions.
+ for (UnresolvedSetIterator I = data().Conversions.begin(),
+ E = data().Conversions.end();
+ I != E; ++I)
+ data().Conversions.setAccess(I, (*I)->getAccess());
+}
+
+bool CXXRecordDecl::mayBeAbstract() const {
+ if (data().Abstract || isInvalidDecl() || !data().Polymorphic ||
+ isDependentContext())
+ return false;
+
+ for (CXXRecordDecl::base_class_const_iterator B = bases_begin(),
+ BEnd = bases_end();
+ B != BEnd; ++B) {
+ CXXRecordDecl *BaseDecl
+ = cast<CXXRecordDecl>(B->getType()->getAs<RecordType>()->getDecl());
+ if (BaseDecl->isAbstract())
+ return true;
+ }
+
+ return false;
+}
+
+void CXXMethodDecl::anchor() { }
+
+CXXMethodDecl *
+CXXMethodDecl::Create(ASTContext &C, CXXRecordDecl *RD,
+ SourceLocation StartLoc,
+ const DeclarationNameInfo &NameInfo,
+ QualType T, TypeSourceInfo *TInfo,
+ bool isStatic, StorageClass SCAsWritten, bool isInline,
+ bool isConstexpr, SourceLocation EndLocation) {
+ return new (C) CXXMethodDecl(CXXMethod, RD, StartLoc, NameInfo, T, TInfo,
+ isStatic, SCAsWritten, isInline, isConstexpr,
+ EndLocation);
+}
+
+CXXMethodDecl *CXXMethodDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID, sizeof(CXXMethodDecl));
+ return new (Mem) CXXMethodDecl(CXXMethod, 0, SourceLocation(),
+ DeclarationNameInfo(), QualType(),
+ 0, false, SC_None, false, false,
+ SourceLocation());
+}
+
+bool CXXMethodDecl::isUsualDeallocationFunction() const {
+ if (getOverloadedOperator() != OO_Delete &&
+ getOverloadedOperator() != OO_Array_Delete)
+ return false;
+
+ // C++ [basic.stc.dynamic.deallocation]p2:
+ // A template instance is never a usual deallocation function,
+ // regardless of its signature.
+ if (getPrimaryTemplate())
+ return false;
+
+ // C++ [basic.stc.dynamic.deallocation]p2:
+ // If a class T has a member deallocation function named operator delete
+ // with exactly one parameter, then that function is a usual (non-placement)
+ // deallocation function. [...]
+ if (getNumParams() == 1)
+ return true;
+
+ // C++ [basic.stc.dynamic.deallocation]p2:
+ // [...] If class T does not declare such an operator delete but does
+ // declare a member deallocation function named operator delete with
+ // exactly two parameters, the second of which has type std::size_t (18.1),
+ // then this function is a usual deallocation function.
+ ASTContext &Context = getASTContext();
+ if (getNumParams() != 2 ||
+ !Context.hasSameUnqualifiedType(getParamDecl(1)->getType(),
+ Context.getSizeType()))
+ return false;
+
+ // This function is a usual deallocation function if there are no
+ // single-parameter deallocation functions of the same kind.
+ for (DeclContext::lookup_const_result R = getDeclContext()->lookup(getDeclName());
+ R.first != R.second; ++R.first) {
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(*R.first))
+ if (FD->getNumParams() == 1)
+ return false;
+ }
+
+ return true;
+}
+
+bool CXXMethodDecl::isCopyAssignmentOperator() const {
+ // C++0x [class.copy]p17:
+ // A user-declared copy assignment operator X::operator= is a non-static
+ // non-template member function of class X with exactly one parameter of
+ // type X, X&, const X&, volatile X& or const volatile X&.
+ if (/*operator=*/getOverloadedOperator() != OO_Equal ||
+ /*non-static*/ isStatic() ||
+ /*non-template*/getPrimaryTemplate() || getDescribedFunctionTemplate())
+ return false;
+
+ QualType ParamType = getParamDecl(0)->getType();
+ if (const LValueReferenceType *Ref = ParamType->getAs<LValueReferenceType>())
+ ParamType = Ref->getPointeeType();
+
+ ASTContext &Context = getASTContext();
+ QualType ClassType
+ = Context.getCanonicalType(Context.getTypeDeclType(getParent()));
+ return Context.hasSameUnqualifiedType(ClassType, ParamType);
+}
+
+bool CXXMethodDecl::isMoveAssignmentOperator() const {
+ // C++0x [class.copy]p19:
+ // A user-declared move assignment operator X::operator= is a non-static
+ // non-template member function of class X with exactly one parameter of type
+ // X&&, const X&&, volatile X&&, or const volatile X&&.
+ if (getOverloadedOperator() != OO_Equal || isStatic() ||
+ getPrimaryTemplate() || getDescribedFunctionTemplate())
+ return false;
+
+ QualType ParamType = getParamDecl(0)->getType();
+ if (!isa<RValueReferenceType>(ParamType))
+ return false;
+ ParamType = ParamType->getPointeeType();
+
+ ASTContext &Context = getASTContext();
+ QualType ClassType
+ = Context.getCanonicalType(Context.getTypeDeclType(getParent()));
+ return Context.hasSameUnqualifiedType(ClassType, ParamType);
+}
+
+void CXXMethodDecl::addOverriddenMethod(const CXXMethodDecl *MD) {
+ assert(MD->isCanonicalDecl() && "Method is not canonical!");
+ assert(!MD->getParent()->isDependentContext() &&
+ "Can't add an overridden method to a class template!");
+ assert(MD->isVirtual() && "Method is not virtual!");
+
+ getASTContext().addOverriddenMethod(this, MD);
+}
+
+CXXMethodDecl::method_iterator CXXMethodDecl::begin_overridden_methods() const {
+ if (isa<CXXConstructorDecl>(this)) return 0;
+ return getASTContext().overridden_methods_begin(this);
+}
+
+CXXMethodDecl::method_iterator CXXMethodDecl::end_overridden_methods() const {
+ if (isa<CXXConstructorDecl>(this)) return 0;
+ return getASTContext().overridden_methods_end(this);
+}
+
+unsigned CXXMethodDecl::size_overridden_methods() const {
+ if (isa<CXXConstructorDecl>(this)) return 0;
+ return getASTContext().overridden_methods_size(this);
+}
+
+QualType CXXMethodDecl::getThisType(ASTContext &C) const {
+ // C++ 9.3.2p1: The type of this in a member function of a class X is X*.
+ // If the member function is declared const, the type of this is const X*,
+ // if the member function is declared volatile, the type of this is
+ // volatile X*, and if the member function is declared const volatile,
+ // the type of this is const volatile X*.
+
+ assert(isInstance() && "No 'this' for static methods!");
+
+ QualType ClassTy = C.getTypeDeclType(getParent());
+ ClassTy = C.getQualifiedType(ClassTy,
+ Qualifiers::fromCVRMask(getTypeQualifiers()));
+ return C.getPointerType(ClassTy);
+}
+
+bool CXXMethodDecl::hasInlineBody() const {
+ // If this function is a template instantiation, look at the template from
+ // which it was instantiated.
+ const FunctionDecl *CheckFn = getTemplateInstantiationPattern();
+ if (!CheckFn)
+ CheckFn = this;
+
+ const FunctionDecl *fn;
+ return CheckFn->hasBody(fn) && !fn->isOutOfLine();
+}
+
+bool CXXMethodDecl::isLambdaStaticInvoker() const {
+ return getParent()->isLambda() &&
+ getIdentifier() && getIdentifier()->getName() == "__invoke";
+}
+
+
+CXXCtorInitializer::CXXCtorInitializer(ASTContext &Context,
+ TypeSourceInfo *TInfo, bool IsVirtual,
+ SourceLocation L, Expr *Init,
+ SourceLocation R,
+ SourceLocation EllipsisLoc)
+ : Initializee(TInfo), MemberOrEllipsisLocation(EllipsisLoc), Init(Init),
+ LParenLoc(L), RParenLoc(R), IsDelegating(false), IsVirtual(IsVirtual),
+ IsWritten(false), SourceOrderOrNumArrayIndices(0)
+{
+}
+
+CXXCtorInitializer::CXXCtorInitializer(ASTContext &Context,
+ FieldDecl *Member,
+ SourceLocation MemberLoc,
+ SourceLocation L, Expr *Init,
+ SourceLocation R)
+ : Initializee(Member), MemberOrEllipsisLocation(MemberLoc), Init(Init),
+ LParenLoc(L), RParenLoc(R), IsDelegating(false), IsVirtual(false),
+ IsWritten(false), SourceOrderOrNumArrayIndices(0)
+{
+}
+
+CXXCtorInitializer::CXXCtorInitializer(ASTContext &Context,
+ IndirectFieldDecl *Member,
+ SourceLocation MemberLoc,
+ SourceLocation L, Expr *Init,
+ SourceLocation R)
+ : Initializee(Member), MemberOrEllipsisLocation(MemberLoc), Init(Init),
+ LParenLoc(L), RParenLoc(R), IsDelegating(false), IsVirtual(false),
+ IsWritten(false), SourceOrderOrNumArrayIndices(0)
+{
+}
+
+CXXCtorInitializer::CXXCtorInitializer(ASTContext &Context,
+ TypeSourceInfo *TInfo,
+ SourceLocation L, Expr *Init,
+ SourceLocation R)
+ : Initializee(TInfo), MemberOrEllipsisLocation(), Init(Init),
+ LParenLoc(L), RParenLoc(R), IsDelegating(true), IsVirtual(false),
+ IsWritten(false), SourceOrderOrNumArrayIndices(0)
+{
+}
+
+CXXCtorInitializer::CXXCtorInitializer(ASTContext &Context,
+ FieldDecl *Member,
+ SourceLocation MemberLoc,
+ SourceLocation L, Expr *Init,
+ SourceLocation R,
+ VarDecl **Indices,
+ unsigned NumIndices)
+ : Initializee(Member), MemberOrEllipsisLocation(MemberLoc), Init(Init),
+ LParenLoc(L), RParenLoc(R), IsVirtual(false),
+ IsWritten(false), SourceOrderOrNumArrayIndices(NumIndices)
+{
+ VarDecl **MyIndices = reinterpret_cast<VarDecl **> (this + 1);
+ memcpy(MyIndices, Indices, NumIndices * sizeof(VarDecl *));
+}
+
+CXXCtorInitializer *CXXCtorInitializer::Create(ASTContext &Context,
+ FieldDecl *Member,
+ SourceLocation MemberLoc,
+ SourceLocation L, Expr *Init,
+ SourceLocation R,
+ VarDecl **Indices,
+ unsigned NumIndices) {
+ void *Mem = Context.Allocate(sizeof(CXXCtorInitializer) +
+ sizeof(VarDecl *) * NumIndices,
+ llvm::alignOf<CXXCtorInitializer>());
+ return new (Mem) CXXCtorInitializer(Context, Member, MemberLoc, L, Init, R,
+ Indices, NumIndices);
+}
+
+TypeLoc CXXCtorInitializer::getBaseClassLoc() const {
+ if (isBaseInitializer())
+ return Initializee.get<TypeSourceInfo*>()->getTypeLoc();
+ else
+ return TypeLoc();
+}
+
+const Type *CXXCtorInitializer::getBaseClass() const {
+ if (isBaseInitializer())
+ return Initializee.get<TypeSourceInfo*>()->getType().getTypePtr();
+ else
+ return 0;
+}
+
+SourceLocation CXXCtorInitializer::getSourceLocation() const {
+ if (isAnyMemberInitializer())
+ return getMemberLocation();
+
+ if (isInClassMemberInitializer())
+ return getAnyMember()->getLocation();
+
+ if (TypeSourceInfo *TSInfo = Initializee.get<TypeSourceInfo*>())
+ return TSInfo->getTypeLoc().getLocalSourceRange().getBegin();
+
+ return SourceLocation();
+}
+
+SourceRange CXXCtorInitializer::getSourceRange() const {
+ if (isInClassMemberInitializer()) {
+ FieldDecl *D = getAnyMember();
+ if (Expr *I = D->getInClassInitializer())
+ return I->getSourceRange();
+ return SourceRange();
+ }
+
+ return SourceRange(getSourceLocation(), getRParenLoc());
+}
+
+void CXXConstructorDecl::anchor() { }
+
+CXXConstructorDecl *
+CXXConstructorDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID, sizeof(CXXConstructorDecl));
+ return new (Mem) CXXConstructorDecl(0, SourceLocation(),DeclarationNameInfo(),
+ QualType(), 0, false, false, false,false);
+}
+
+CXXConstructorDecl *
+CXXConstructorDecl::Create(ASTContext &C, CXXRecordDecl *RD,
+ SourceLocation StartLoc,
+ const DeclarationNameInfo &NameInfo,
+ QualType T, TypeSourceInfo *TInfo,
+ bool isExplicit, bool isInline,
+ bool isImplicitlyDeclared, bool isConstexpr) {
+ assert(NameInfo.getName().getNameKind()
+ == DeclarationName::CXXConstructorName &&
+ "Name must refer to a constructor");
+ return new (C) CXXConstructorDecl(RD, StartLoc, NameInfo, T, TInfo,
+ isExplicit, isInline, isImplicitlyDeclared,
+ isConstexpr);
+}
+
+CXXConstructorDecl *CXXConstructorDecl::getTargetConstructor() const {
+ assert(isDelegatingConstructor() && "Not a delegating constructor!");
+ Expr *E = (*init_begin())->getInit()->IgnoreImplicit();
+ if (CXXConstructExpr *Construct = dyn_cast<CXXConstructExpr>(E))
+ return Construct->getConstructor();
+
+ return 0;
+}
+
+bool CXXConstructorDecl::isDefaultConstructor() const {
+ // C++ [class.ctor]p5:
+ // A default constructor for a class X is a constructor of class
+ // X that can be called without an argument.
+ return (getNumParams() == 0) ||
+ (getNumParams() > 0 && getParamDecl(0)->hasDefaultArg());
+}
+
+bool
+CXXConstructorDecl::isCopyConstructor(unsigned &TypeQuals) const {
+ return isCopyOrMoveConstructor(TypeQuals) &&
+ getParamDecl(0)->getType()->isLValueReferenceType();
+}
+
+bool CXXConstructorDecl::isMoveConstructor(unsigned &TypeQuals) const {
+ return isCopyOrMoveConstructor(TypeQuals) &&
+ getParamDecl(0)->getType()->isRValueReferenceType();
+}
+
+/// \brief Determine whether this is a copy or move constructor.
+bool CXXConstructorDecl::isCopyOrMoveConstructor(unsigned &TypeQuals) const {
+ // C++ [class.copy]p2:
+ // A non-template constructor for class X is a copy constructor
+ // if its first parameter is of type X&, const X&, volatile X& or
+ // const volatile X&, and either there are no other parameters
+ // or else all other parameters have default arguments (8.3.6).
+ // C++0x [class.copy]p3:
+ // A non-template constructor for class X is a move constructor if its
+ // first parameter is of type X&&, const X&&, volatile X&&, or
+ // const volatile X&&, and either there are no other parameters or else
+ // all other parameters have default arguments.
+ if ((getNumParams() < 1) ||
+ (getNumParams() > 1 && !getParamDecl(1)->hasDefaultArg()) ||
+ (getPrimaryTemplate() != 0) ||
+ (getDescribedFunctionTemplate() != 0))
+ return false;
+
+ const ParmVarDecl *Param = getParamDecl(0);
+
+ // Do we have a reference type?
+ const ReferenceType *ParamRefType = Param->getType()->getAs<ReferenceType>();
+ if (!ParamRefType)
+ return false;
+
+ // Is it a reference to our class type?
+ ASTContext &Context = getASTContext();
+
+ CanQualType PointeeType
+ = Context.getCanonicalType(ParamRefType->getPointeeType());
+ CanQualType ClassTy
+ = Context.getCanonicalType(Context.getTagDeclType(getParent()));
+ if (PointeeType.getUnqualifiedType() != ClassTy)
+ return false;
+
+ // FIXME: other qualifiers?
+
+ // We have a copy or move constructor.
+ TypeQuals = PointeeType.getCVRQualifiers();
+ return true;
+}
+
+bool CXXConstructorDecl::isConvertingConstructor(bool AllowExplicit) const {
+ // C++ [class.conv.ctor]p1:
+ // A constructor declared without the function-specifier explicit
+ // that can be called with a single parameter specifies a
+ // conversion from the type of its first parameter to the type of
+ // its class. Such a constructor is called a converting
+ // constructor.
+ if (isExplicit() && !AllowExplicit)
+ return false;
+
+ return (getNumParams() == 0 &&
+ getType()->getAs<FunctionProtoType>()->isVariadic()) ||
+ (getNumParams() == 1) ||
+ (getNumParams() > 1 && getParamDecl(1)->hasDefaultArg());
+}
+
+bool CXXConstructorDecl::isSpecializationCopyingObject() const {
+ if ((getNumParams() < 1) ||
+ (getNumParams() > 1 && !getParamDecl(1)->hasDefaultArg()) ||
+ (getPrimaryTemplate() == 0) ||
+ (getDescribedFunctionTemplate() != 0))
+ return false;
+
+ const ParmVarDecl *Param = getParamDecl(0);
+
+ ASTContext &Context = getASTContext();
+ CanQualType ParamType = Context.getCanonicalType(Param->getType());
+
+ // Is it the same as our our class type?
+ CanQualType ClassTy
+ = Context.getCanonicalType(Context.getTagDeclType(getParent()));
+ if (ParamType.getUnqualifiedType() != ClassTy)
+ return false;
+
+ return true;
+}
+
+const CXXConstructorDecl *CXXConstructorDecl::getInheritedConstructor() const {
+ // Hack: we store the inherited constructor in the overridden method table
+ method_iterator It = getASTContext().overridden_methods_begin(this);
+ if (It == getASTContext().overridden_methods_end(this))
+ return 0;
+
+ return cast<CXXConstructorDecl>(*It);
+}
+
+void
+CXXConstructorDecl::setInheritedConstructor(const CXXConstructorDecl *BaseCtor){
+ // Hack: we store the inherited constructor in the overridden method table
+ assert(getASTContext().overridden_methods_size(this) == 0 &&
+ "Base ctor already set.");
+ getASTContext().addOverriddenMethod(this, BaseCtor);
+}
+
+void CXXDestructorDecl::anchor() { }
+
+CXXDestructorDecl *
+CXXDestructorDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID, sizeof(CXXDestructorDecl));
+ return new (Mem) CXXDestructorDecl(0, SourceLocation(), DeclarationNameInfo(),
+ QualType(), 0, false, false);
+}
+
+CXXDestructorDecl *
+CXXDestructorDecl::Create(ASTContext &C, CXXRecordDecl *RD,
+ SourceLocation StartLoc,
+ const DeclarationNameInfo &NameInfo,
+ QualType T, TypeSourceInfo *TInfo,
+ bool isInline, bool isImplicitlyDeclared) {
+ assert(NameInfo.getName().getNameKind()
+ == DeclarationName::CXXDestructorName &&
+ "Name must refer to a destructor");
+ return new (C) CXXDestructorDecl(RD, StartLoc, NameInfo, T, TInfo, isInline,
+ isImplicitlyDeclared);
+}
+
+void CXXConversionDecl::anchor() { }
+
+CXXConversionDecl *
+CXXConversionDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID, sizeof(CXXConversionDecl));
+ return new (Mem) CXXConversionDecl(0, SourceLocation(), DeclarationNameInfo(),
+ QualType(), 0, false, false, false,
+ SourceLocation());
+}
+
+CXXConversionDecl *
+CXXConversionDecl::Create(ASTContext &C, CXXRecordDecl *RD,
+ SourceLocation StartLoc,
+ const DeclarationNameInfo &NameInfo,
+ QualType T, TypeSourceInfo *TInfo,
+ bool isInline, bool isExplicit,
+ bool isConstexpr, SourceLocation EndLocation) {
+ assert(NameInfo.getName().getNameKind()
+ == DeclarationName::CXXConversionFunctionName &&
+ "Name must refer to a conversion function");
+ return new (C) CXXConversionDecl(RD, StartLoc, NameInfo, T, TInfo,
+ isInline, isExplicit, isConstexpr,
+ EndLocation);
+}
+
+bool CXXConversionDecl::isLambdaToBlockPointerConversion() const {
+ return isImplicit() && getParent()->isLambda() &&
+ getConversionType()->isBlockPointerType();
+}
+
+void LinkageSpecDecl::anchor() { }
+
+LinkageSpecDecl *LinkageSpecDecl::Create(ASTContext &C,
+ DeclContext *DC,
+ SourceLocation ExternLoc,
+ SourceLocation LangLoc,
+ LanguageIDs Lang,
+ SourceLocation RBraceLoc) {
+ return new (C) LinkageSpecDecl(DC, ExternLoc, LangLoc, Lang, RBraceLoc);
+}
+
+LinkageSpecDecl *LinkageSpecDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID, sizeof(LinkageSpecDecl));
+ return new (Mem) LinkageSpecDecl(0, SourceLocation(), SourceLocation(),
+ lang_c, SourceLocation());
+}
+
+void UsingDirectiveDecl::anchor() { }
+
+UsingDirectiveDecl *UsingDirectiveDecl::Create(ASTContext &C, DeclContext *DC,
+ SourceLocation L,
+ SourceLocation NamespaceLoc,
+ NestedNameSpecifierLoc QualifierLoc,
+ SourceLocation IdentLoc,
+ NamedDecl *Used,
+ DeclContext *CommonAncestor) {
+ if (NamespaceDecl *NS = dyn_cast_or_null<NamespaceDecl>(Used))
+ Used = NS->getOriginalNamespace();
+ return new (C) UsingDirectiveDecl(DC, L, NamespaceLoc, QualifierLoc,
+ IdentLoc, Used, CommonAncestor);
+}
+
+UsingDirectiveDecl *
+UsingDirectiveDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID, sizeof(UsingDirectiveDecl));
+ return new (Mem) UsingDirectiveDecl(0, SourceLocation(), SourceLocation(),
+ NestedNameSpecifierLoc(),
+ SourceLocation(), 0, 0);
+}
+
+NamespaceDecl *UsingDirectiveDecl::getNominatedNamespace() {
+ if (NamespaceAliasDecl *NA =
+ dyn_cast_or_null<NamespaceAliasDecl>(NominatedNamespace))
+ return NA->getNamespace();
+ return cast_or_null<NamespaceDecl>(NominatedNamespace);
+}
+
+void NamespaceDecl::anchor() { }
+
+NamespaceDecl::NamespaceDecl(DeclContext *DC, bool Inline,
+ SourceLocation StartLoc,
+ SourceLocation IdLoc, IdentifierInfo *Id,
+ NamespaceDecl *PrevDecl)
+ : NamedDecl(Namespace, DC, IdLoc, Id), DeclContext(Namespace),
+ LocStart(StartLoc), RBraceLoc(), AnonOrFirstNamespaceAndInline(0, Inline)
+{
+ setPreviousDeclaration(PrevDecl);
+
+ if (PrevDecl)
+ AnonOrFirstNamespaceAndInline.setPointer(PrevDecl->getOriginalNamespace());
+}
+
+NamespaceDecl *NamespaceDecl::Create(ASTContext &C, DeclContext *DC,
+ bool Inline, SourceLocation StartLoc,
+ SourceLocation IdLoc, IdentifierInfo *Id,
+ NamespaceDecl *PrevDecl) {
+ return new (C) NamespaceDecl(DC, Inline, StartLoc, IdLoc, Id, PrevDecl);
+}
+
+NamespaceDecl *NamespaceDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID, sizeof(NamespaceDecl));
+ return new (Mem) NamespaceDecl(0, false, SourceLocation(), SourceLocation(),
+ 0, 0);
+}
+
+void NamespaceAliasDecl::anchor() { }
+
+NamespaceAliasDecl *NamespaceAliasDecl::Create(ASTContext &C, DeclContext *DC,
+ SourceLocation UsingLoc,
+ SourceLocation AliasLoc,
+ IdentifierInfo *Alias,
+ NestedNameSpecifierLoc QualifierLoc,
+ SourceLocation IdentLoc,
+ NamedDecl *Namespace) {
+ if (NamespaceDecl *NS = dyn_cast_or_null<NamespaceDecl>(Namespace))
+ Namespace = NS->getOriginalNamespace();
+ return new (C) NamespaceAliasDecl(DC, UsingLoc, AliasLoc, Alias,
+ QualifierLoc, IdentLoc, Namespace);
+}
+
+NamespaceAliasDecl *
+NamespaceAliasDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID, sizeof(NamespaceAliasDecl));
+ return new (Mem) NamespaceAliasDecl(0, SourceLocation(), SourceLocation(), 0,
+ NestedNameSpecifierLoc(),
+ SourceLocation(), 0);
+}
+
+void UsingShadowDecl::anchor() { }
+
+UsingShadowDecl *
+UsingShadowDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID, sizeof(UsingShadowDecl));
+ return new (Mem) UsingShadowDecl(0, SourceLocation(), 0, 0);
+}
+
+UsingDecl *UsingShadowDecl::getUsingDecl() const {
+ const UsingShadowDecl *Shadow = this;
+ while (const UsingShadowDecl *NextShadow =
+ dyn_cast<UsingShadowDecl>(Shadow->UsingOrNextShadow))
+ Shadow = NextShadow;
+ return cast<UsingDecl>(Shadow->UsingOrNextShadow);
+}
+
+void UsingDecl::anchor() { }
+
+void UsingDecl::addShadowDecl(UsingShadowDecl *S) {
+ assert(std::find(shadow_begin(), shadow_end(), S) == shadow_end() &&
+ "declaration already in set");
+ assert(S->getUsingDecl() == this);
+
+ if (FirstUsingShadow.getPointer())
+ S->UsingOrNextShadow = FirstUsingShadow.getPointer();
+ FirstUsingShadow.setPointer(S);
+}
+
+void UsingDecl::removeShadowDecl(UsingShadowDecl *S) {
+ assert(std::find(shadow_begin(), shadow_end(), S) != shadow_end() &&
+ "declaration not in set");
+ assert(S->getUsingDecl() == this);
+
+ // Remove S from the shadow decl chain. This is O(n) but hopefully rare.
+
+ if (FirstUsingShadow.getPointer() == S) {
+ FirstUsingShadow.setPointer(
+ dyn_cast<UsingShadowDecl>(S->UsingOrNextShadow));
+ S->UsingOrNextShadow = this;
+ return;
+ }
+
+ UsingShadowDecl *Prev = FirstUsingShadow.getPointer();
+ while (Prev->UsingOrNextShadow != S)
+ Prev = cast<UsingShadowDecl>(Prev->UsingOrNextShadow);
+ Prev->UsingOrNextShadow = S->UsingOrNextShadow;
+ S->UsingOrNextShadow = this;
+}
+
+UsingDecl *UsingDecl::Create(ASTContext &C, DeclContext *DC, SourceLocation UL,
+ NestedNameSpecifierLoc QualifierLoc,
+ const DeclarationNameInfo &NameInfo,
+ bool IsTypeNameArg) {
+ return new (C) UsingDecl(DC, UL, QualifierLoc, NameInfo, IsTypeNameArg);
+}
+
+UsingDecl *UsingDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID, sizeof(UsingDecl));
+ return new (Mem) UsingDecl(0, SourceLocation(), NestedNameSpecifierLoc(),
+ DeclarationNameInfo(), false);
+}
+
+void UnresolvedUsingValueDecl::anchor() { }
+
+UnresolvedUsingValueDecl *
+UnresolvedUsingValueDecl::Create(ASTContext &C, DeclContext *DC,
+ SourceLocation UsingLoc,
+ NestedNameSpecifierLoc QualifierLoc,
+ const DeclarationNameInfo &NameInfo) {
+ return new (C) UnresolvedUsingValueDecl(DC, C.DependentTy, UsingLoc,
+ QualifierLoc, NameInfo);
+}
+
+UnresolvedUsingValueDecl *
+UnresolvedUsingValueDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID, sizeof(UnresolvedUsingValueDecl));
+ return new (Mem) UnresolvedUsingValueDecl(0, QualType(), SourceLocation(),
+ NestedNameSpecifierLoc(),
+ DeclarationNameInfo());
+}
+
+void UnresolvedUsingTypenameDecl::anchor() { }
+
+UnresolvedUsingTypenameDecl *
+UnresolvedUsingTypenameDecl::Create(ASTContext &C, DeclContext *DC,
+ SourceLocation UsingLoc,
+ SourceLocation TypenameLoc,
+ NestedNameSpecifierLoc QualifierLoc,
+ SourceLocation TargetNameLoc,
+ DeclarationName TargetName) {
+ return new (C) UnresolvedUsingTypenameDecl(DC, UsingLoc, TypenameLoc,
+ QualifierLoc, TargetNameLoc,
+ TargetName.getAsIdentifierInfo());
+}
+
+UnresolvedUsingTypenameDecl *
+UnresolvedUsingTypenameDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID,
+ sizeof(UnresolvedUsingTypenameDecl));
+ return new (Mem) UnresolvedUsingTypenameDecl(0, SourceLocation(),
+ SourceLocation(),
+ NestedNameSpecifierLoc(),
+ SourceLocation(),
+ 0);
+}
+
+void StaticAssertDecl::anchor() { }
+
+StaticAssertDecl *StaticAssertDecl::Create(ASTContext &C, DeclContext *DC,
+ SourceLocation StaticAssertLoc,
+ Expr *AssertExpr,
+ StringLiteral *Message,
+ SourceLocation RParenLoc) {
+ return new (C) StaticAssertDecl(DC, StaticAssertLoc, AssertExpr, Message,
+ RParenLoc);
+}
+
+StaticAssertDecl *StaticAssertDecl::CreateDeserialized(ASTContext &C,
+ unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID, sizeof(StaticAssertDecl));
+ return new (Mem) StaticAssertDecl(0, SourceLocation(), 0, 0,SourceLocation());
+}
+
+static const char *getAccessName(AccessSpecifier AS) {
+ switch (AS) {
+ case AS_none:
+ llvm_unreachable("Invalid access specifier!");
+ case AS_public:
+ return "public";
+ case AS_private:
+ return "private";
+ case AS_protected:
+ return "protected";
+ }
+ llvm_unreachable("Invalid access specifier!");
+}
+
+const DiagnosticBuilder &clang::operator<<(const DiagnosticBuilder &DB,
+ AccessSpecifier AS) {
+ return DB << getAccessName(AS);
+}
+
+const PartialDiagnostic &clang::operator<<(const PartialDiagnostic &DB,
+ AccessSpecifier AS) {
+ return DB << getAccessName(AS);
+}
diff --git a/clang/lib/AST/DeclFriend.cpp b/clang/lib/AST/DeclFriend.cpp
new file mode 100644
index 0000000..6e3bd8d
--- /dev/null
+++ b/clang/lib/AST/DeclFriend.cpp
@@ -0,0 +1,48 @@
+//===--- DeclFriend.cpp - C++ Friend Declaration AST Node Implementation --===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the AST classes related to C++ friend
+// declarations.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/DeclFriend.h"
+#include "clang/AST/DeclTemplate.h"
+using namespace clang;
+
+void FriendDecl::anchor() { }
+
+FriendDecl *FriendDecl::Create(ASTContext &C, DeclContext *DC,
+ SourceLocation L,
+ FriendUnion Friend,
+ SourceLocation FriendL) {
+#ifndef NDEBUG
+ if (Friend.is<NamedDecl*>()) {
+ NamedDecl *D = Friend.get<NamedDecl*>();
+ assert(isa<FunctionDecl>(D) ||
+ isa<CXXRecordDecl>(D) ||
+ isa<FunctionTemplateDecl>(D) ||
+ isa<ClassTemplateDecl>(D));
+
+ // As a temporary hack, we permit template instantiation to point
+ // to the original declaration when instantiating members.
+ assert(D->getFriendObjectKind() ||
+ (cast<CXXRecordDecl>(DC)->getTemplateSpecializationKind()));
+ }
+#endif
+
+ FriendDecl *FD = new (C) FriendDecl(DC, L, Friend, FriendL);
+ cast<CXXRecordDecl>(DC)->pushFriendDecl(FD);
+ return FD;
+}
+
+FriendDecl *FriendDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID, sizeof(FriendDecl));
+ return new (Mem) FriendDecl(EmptyShell());
+}
diff --git a/clang/lib/AST/DeclGroup.cpp b/clang/lib/AST/DeclGroup.cpp
new file mode 100644
index 0000000..036acc2
--- /dev/null
+++ b/clang/lib/AST/DeclGroup.cpp
@@ -0,0 +1,32 @@
+//===--- DeclGroup.cpp - Classes for representing groups of Decls -*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the DeclGroup and DeclGroupRef classes.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/DeclGroup.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/ASTContext.h"
+#include "llvm/Support/Allocator.h"
+using namespace clang;
+
+DeclGroup* DeclGroup::Create(ASTContext &C, Decl **Decls, unsigned NumDecls) {
+ assert(NumDecls > 1 && "Invalid DeclGroup");
+ unsigned Size = sizeof(DeclGroup) + sizeof(Decl*) * NumDecls;
+ void* Mem = C.Allocate(Size, llvm::AlignOf<DeclGroup>::Alignment);
+ new (Mem) DeclGroup(NumDecls, Decls);
+ return static_cast<DeclGroup*>(Mem);
+}
+
+DeclGroup::DeclGroup(unsigned numdecls, Decl** decls) : NumDecls(numdecls) {
+ assert(numdecls > 0);
+ assert(decls);
+ memcpy(this+1, decls, numdecls * sizeof(*decls));
+}
diff --git a/clang/lib/AST/DeclObjC.cpp b/clang/lib/AST/DeclObjC.cpp
new file mode 100644
index 0000000..2370d3c
--- /dev/null
+++ b/clang/lib/AST/DeclObjC.cpp
@@ -0,0 +1,1326 @@
+//===--- DeclObjC.cpp - ObjC Declaration AST Node Implementation ----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the Objective-C related Decl classes.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Stmt.h"
+#include "clang/AST/ASTMutationListener.h"
+#include "llvm/ADT/STLExtras.h"
+using namespace clang;
+
+//===----------------------------------------------------------------------===//
+// ObjCListBase
+//===----------------------------------------------------------------------===//
+
+void ObjCListBase::set(void *const* InList, unsigned Elts, ASTContext &Ctx) {
+ List = 0;
+ if (Elts == 0) return; // Setting to an empty list is a noop.
+
+
+ List = new (Ctx) void*[Elts];
+ NumElts = Elts;
+ memcpy(List, InList, sizeof(void*)*Elts);
+}
+
+void ObjCProtocolList::set(ObjCProtocolDecl* const* InList, unsigned Elts,
+ const SourceLocation *Locs, ASTContext &Ctx) {
+ if (Elts == 0)
+ return;
+
+ Locations = new (Ctx) SourceLocation[Elts];
+ memcpy(Locations, Locs, sizeof(SourceLocation) * Elts);
+ set(InList, Elts, Ctx);
+}
+
+//===----------------------------------------------------------------------===//
+// ObjCInterfaceDecl
+//===----------------------------------------------------------------------===//
+
+void ObjCContainerDecl::anchor() { }
+
+/// getIvarDecl - This method looks up an ivar in this ContextDecl.
+///
+ObjCIvarDecl *
+ObjCContainerDecl::getIvarDecl(IdentifierInfo *Id) const {
+ lookup_const_iterator Ivar, IvarEnd;
+ for (llvm::tie(Ivar, IvarEnd) = lookup(Id); Ivar != IvarEnd; ++Ivar) {
+ if (ObjCIvarDecl *ivar = dyn_cast<ObjCIvarDecl>(*Ivar))
+ return ivar;
+ }
+ return 0;
+}
+
+// Get the local instance/class method declared in this interface.
+ObjCMethodDecl *
+ObjCContainerDecl::getMethod(Selector Sel, bool isInstance) const {
+ // Since instance & class methods can have the same name, the loop below
+ // ensures we get the correct method.
+ //
+ // @interface Whatever
+ // - (int) class_method;
+ // + (float) class_method;
+ // @end
+ //
+ lookup_const_iterator Meth, MethEnd;
+ for (llvm::tie(Meth, MethEnd) = lookup(Sel); Meth != MethEnd; ++Meth) {
+ ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(*Meth);
+ if (MD && MD->isInstanceMethod() == isInstance)
+ return MD;
+ }
+ return 0;
+}
+
+ObjCPropertyDecl *
+ObjCPropertyDecl::findPropertyDecl(const DeclContext *DC,
+ IdentifierInfo *propertyID) {
+
+ DeclContext::lookup_const_iterator I, E;
+ llvm::tie(I, E) = DC->lookup(propertyID);
+ for ( ; I != E; ++I)
+ if (ObjCPropertyDecl *PD = dyn_cast<ObjCPropertyDecl>(*I))
+ return PD;
+
+ return 0;
+}
+
+/// FindPropertyDeclaration - Finds declaration of the property given its name
+/// in 'PropertyId' and returns it. It returns 0, if not found.
+ObjCPropertyDecl *
+ObjCContainerDecl::FindPropertyDeclaration(IdentifierInfo *PropertyId) const {
+
+ if (ObjCPropertyDecl *PD =
+ ObjCPropertyDecl::findPropertyDecl(cast<DeclContext>(this), PropertyId))
+ return PD;
+
+ switch (getKind()) {
+ default:
+ break;
+ case Decl::ObjCProtocol: {
+ const ObjCProtocolDecl *PID = cast<ObjCProtocolDecl>(this);
+ for (ObjCProtocolDecl::protocol_iterator I = PID->protocol_begin(),
+ E = PID->protocol_end(); I != E; ++I)
+ if (ObjCPropertyDecl *P = (*I)->FindPropertyDeclaration(PropertyId))
+ return P;
+ break;
+ }
+ case Decl::ObjCInterface: {
+ const ObjCInterfaceDecl *OID = cast<ObjCInterfaceDecl>(this);
+ // Look through categories.
+ for (ObjCCategoryDecl *Cat = OID->getCategoryList();
+ Cat; Cat = Cat->getNextClassCategory())
+ if (!Cat->IsClassExtension())
+ if (ObjCPropertyDecl *P = Cat->FindPropertyDeclaration(PropertyId))
+ return P;
+
+ // Look through protocols.
+ for (ObjCInterfaceDecl::all_protocol_iterator
+ I = OID->all_referenced_protocol_begin(),
+ E = OID->all_referenced_protocol_end(); I != E; ++I)
+ if (ObjCPropertyDecl *P = (*I)->FindPropertyDeclaration(PropertyId))
+ return P;
+
+ // Finally, check the super class.
+ if (const ObjCInterfaceDecl *superClass = OID->getSuperClass())
+ return superClass->FindPropertyDeclaration(PropertyId);
+ break;
+ }
+ case Decl::ObjCCategory: {
+ const ObjCCategoryDecl *OCD = cast<ObjCCategoryDecl>(this);
+ // Look through protocols.
+ if (!OCD->IsClassExtension())
+ for (ObjCCategoryDecl::protocol_iterator
+ I = OCD->protocol_begin(), E = OCD->protocol_end(); I != E; ++I)
+ if (ObjCPropertyDecl *P = (*I)->FindPropertyDeclaration(PropertyId))
+ return P;
+
+ break;
+ }
+ }
+ return 0;
+}
+
+void ObjCInterfaceDecl::anchor() { }
+
+/// FindPropertyVisibleInPrimaryClass - Finds declaration of the property
+/// with name 'PropertyId' in the primary class; including those in protocols
+/// (direct or indirect) used by the primary class.
+///
+ObjCPropertyDecl *
+ObjCInterfaceDecl::FindPropertyVisibleInPrimaryClass(
+ IdentifierInfo *PropertyId) const {
+ // FIXME: Should make sure no callers ever do this.
+ if (!hasDefinition())
+ return 0;
+
+ if (data().ExternallyCompleted)
+ LoadExternalDefinition();
+
+ if (ObjCPropertyDecl *PD =
+ ObjCPropertyDecl::findPropertyDecl(cast<DeclContext>(this), PropertyId))
+ return PD;
+
+ // Look through protocols.
+ for (ObjCInterfaceDecl::all_protocol_iterator
+ I = all_referenced_protocol_begin(),
+ E = all_referenced_protocol_end(); I != E; ++I)
+ if (ObjCPropertyDecl *P = (*I)->FindPropertyDeclaration(PropertyId))
+ return P;
+
+ return 0;
+}
+
+void ObjCInterfaceDecl::mergeClassExtensionProtocolList(
+ ObjCProtocolDecl *const* ExtList, unsigned ExtNum,
+ ASTContext &C)
+{
+ if (data().ExternallyCompleted)
+ LoadExternalDefinition();
+
+ if (data().AllReferencedProtocols.empty() &&
+ data().ReferencedProtocols.empty()) {
+ data().AllReferencedProtocols.set(ExtList, ExtNum, C);
+ return;
+ }
+
+ // Check for duplicate protocol in class's protocol list.
+ // This is O(n*m). But it is extremely rare and number of protocols in
+ // class or its extension are very few.
+ SmallVector<ObjCProtocolDecl*, 8> ProtocolRefs;
+ for (unsigned i = 0; i < ExtNum; i++) {
+ bool protocolExists = false;
+ ObjCProtocolDecl *ProtoInExtension = ExtList[i];
+ for (all_protocol_iterator
+ p = all_referenced_protocol_begin(),
+ e = all_referenced_protocol_end(); p != e; ++p) {
+ ObjCProtocolDecl *Proto = (*p);
+ if (C.ProtocolCompatibleWithProtocol(ProtoInExtension, Proto)) {
+ protocolExists = true;
+ break;
+ }
+ }
+ // Do we want to warn on a protocol in extension class which
+ // already exist in the class? Probably not.
+ if (!protocolExists)
+ ProtocolRefs.push_back(ProtoInExtension);
+ }
+
+ if (ProtocolRefs.empty())
+ return;
+
+ // Merge ProtocolRefs into class's protocol list;
+ for (all_protocol_iterator p = all_referenced_protocol_begin(),
+ e = all_referenced_protocol_end(); p != e; ++p) {
+ ProtocolRefs.push_back(*p);
+ }
+
+ data().AllReferencedProtocols.set(ProtocolRefs.data(), ProtocolRefs.size(),C);
+}
+
+void ObjCInterfaceDecl::allocateDefinitionData() {
+ assert(!hasDefinition() && "ObjC class already has a definition");
+ Data = new (getASTContext()) DefinitionData();
+ Data->Definition = this;
+
+ // Make the type point at the definition, now that we have one.
+ if (TypeForDecl)
+ cast<ObjCInterfaceType>(TypeForDecl)->Decl = this;
+}
+
+void ObjCInterfaceDecl::startDefinition() {
+ allocateDefinitionData();
+
+ // Update all of the declarations with a pointer to the definition.
+ for (redecl_iterator RD = redecls_begin(), RDEnd = redecls_end();
+ RD != RDEnd; ++RD) {
+ if (*RD != this)
+ RD->Data = Data;
+ }
+}
+
+/// getFirstClassExtension - Find first class extension of the given class.
+ObjCCategoryDecl* ObjCInterfaceDecl::getFirstClassExtension() const {
+ for (ObjCCategoryDecl *CDecl = getCategoryList(); CDecl;
+ CDecl = CDecl->getNextClassCategory())
+ if (CDecl->IsClassExtension())
+ return CDecl;
+ return 0;
+}
+
+/// getNextClassCategory - Find next class extension in list of categories.
+const ObjCCategoryDecl* ObjCCategoryDecl::getNextClassExtension() const {
+ for (const ObjCCategoryDecl *CDecl = getNextClassCategory(); CDecl;
+ CDecl = CDecl->getNextClassCategory())
+ if (CDecl->IsClassExtension())
+ return CDecl;
+ return 0;
+}
+
+ObjCIvarDecl *ObjCInterfaceDecl::lookupInstanceVariable(IdentifierInfo *ID,
+ ObjCInterfaceDecl *&clsDeclared) {
+ // FIXME: Should make sure no callers ever do this.
+ if (!hasDefinition())
+ return 0;
+
+ if (data().ExternallyCompleted)
+ LoadExternalDefinition();
+
+ ObjCInterfaceDecl* ClassDecl = this;
+ while (ClassDecl != NULL) {
+ if (ObjCIvarDecl *I = ClassDecl->getIvarDecl(ID)) {
+ clsDeclared = ClassDecl;
+ return I;
+ }
+ for (const ObjCCategoryDecl *CDecl = ClassDecl->getFirstClassExtension();
+ CDecl; CDecl = CDecl->getNextClassExtension()) {
+ if (ObjCIvarDecl *I = CDecl->getIvarDecl(ID)) {
+ clsDeclared = ClassDecl;
+ return I;
+ }
+ }
+
+ ClassDecl = ClassDecl->getSuperClass();
+ }
+ return NULL;
+}
+
+/// lookupInheritedClass - This method returns ObjCInterfaceDecl * of the super
+/// class whose name is passed as argument. If it is not one of the super classes
+/// the it returns NULL.
+ObjCInterfaceDecl *ObjCInterfaceDecl::lookupInheritedClass(
+ const IdentifierInfo*ICName) {
+ // FIXME: Should make sure no callers ever do this.
+ if (!hasDefinition())
+ return 0;
+
+ if (data().ExternallyCompleted)
+ LoadExternalDefinition();
+
+ ObjCInterfaceDecl* ClassDecl = this;
+ while (ClassDecl != NULL) {
+ if (ClassDecl->getIdentifier() == ICName)
+ return ClassDecl;
+ ClassDecl = ClassDecl->getSuperClass();
+ }
+ return NULL;
+}
+
+/// lookupMethod - This method returns an instance/class method by looking in
+/// the class, its categories, and its super classes (using a linear search).
+ObjCMethodDecl *ObjCInterfaceDecl::lookupMethod(Selector Sel,
+ bool isInstance,
+ bool shallowCategoryLookup) const {
+ // FIXME: Should make sure no callers ever do this.
+ if (!hasDefinition())
+ return 0;
+
+ const ObjCInterfaceDecl* ClassDecl = this;
+ ObjCMethodDecl *MethodDecl = 0;
+
+ if (data().ExternallyCompleted)
+ LoadExternalDefinition();
+
+ while (ClassDecl != NULL) {
+ if ((MethodDecl = ClassDecl->getMethod(Sel, isInstance)))
+ return MethodDecl;
+
+ // Didn't find one yet - look through protocols.
+ for (ObjCInterfaceDecl::protocol_iterator I = ClassDecl->protocol_begin(),
+ E = ClassDecl->protocol_end();
+ I != E; ++I)
+ if ((MethodDecl = (*I)->lookupMethod(Sel, isInstance)))
+ return MethodDecl;
+
+ // Didn't find one yet - now look through categories.
+ ObjCCategoryDecl *CatDecl = ClassDecl->getCategoryList();
+ while (CatDecl) {
+ if ((MethodDecl = CatDecl->getMethod(Sel, isInstance)))
+ return MethodDecl;
+
+ if (!shallowCategoryLookup) {
+ // Didn't find one yet - look through protocols.
+ const ObjCList<ObjCProtocolDecl> &Protocols =
+ CatDecl->getReferencedProtocols();
+ for (ObjCList<ObjCProtocolDecl>::iterator I = Protocols.begin(),
+ E = Protocols.end(); I != E; ++I)
+ if ((MethodDecl = (*I)->lookupMethod(Sel, isInstance)))
+ return MethodDecl;
+ }
+ CatDecl = CatDecl->getNextClassCategory();
+ }
+
+ ClassDecl = ClassDecl->getSuperClass();
+ }
+ return NULL;
+}
+
+ObjCMethodDecl *ObjCInterfaceDecl::lookupPrivateMethod(
+ const Selector &Sel,
+ bool Instance) {
+ // FIXME: Should make sure no callers ever do this.
+ if (!hasDefinition())
+ return 0;
+
+ if (data().ExternallyCompleted)
+ LoadExternalDefinition();
+
+ ObjCMethodDecl *Method = 0;
+ if (ObjCImplementationDecl *ImpDecl = getImplementation())
+ Method = Instance ? ImpDecl->getInstanceMethod(Sel)
+ : ImpDecl->getClassMethod(Sel);
+
+ if (!Method && getSuperClass())
+ return getSuperClass()->lookupPrivateMethod(Sel, Instance);
+ return Method;
+}
+
+//===----------------------------------------------------------------------===//
+// ObjCMethodDecl
+//===----------------------------------------------------------------------===//
+
+ObjCMethodDecl *ObjCMethodDecl::Create(ASTContext &C,
+ SourceLocation beginLoc,
+ SourceLocation endLoc,
+ Selector SelInfo, QualType T,
+ TypeSourceInfo *ResultTInfo,
+ DeclContext *contextDecl,
+ bool isInstance,
+ bool isVariadic,
+ bool isSynthesized,
+ bool isImplicitlyDeclared,
+ bool isDefined,
+ ImplementationControl impControl,
+ bool HasRelatedResultType) {
+ return new (C) ObjCMethodDecl(beginLoc, endLoc,
+ SelInfo, T, ResultTInfo, contextDecl,
+ isInstance,
+ isVariadic, isSynthesized, isImplicitlyDeclared,
+ isDefined,
+ impControl,
+ HasRelatedResultType);
+}
+
+ObjCMethodDecl *ObjCMethodDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID, sizeof(ObjCMethodDecl));
+ return new (Mem) ObjCMethodDecl(SourceLocation(), SourceLocation(),
+ Selector(), QualType(), 0, 0);
+}
+
+void ObjCMethodDecl::setAsRedeclaration(const ObjCMethodDecl *PrevMethod) {
+ assert(PrevMethod);
+ getASTContext().setObjCMethodRedeclaration(PrevMethod, this);
+ IsRedeclaration = true;
+ PrevMethod->HasRedeclaration = true;
+}
+
+void ObjCMethodDecl::setParamsAndSelLocs(ASTContext &C,
+ ArrayRef<ParmVarDecl*> Params,
+ ArrayRef<SourceLocation> SelLocs) {
+ ParamsAndSelLocs = 0;
+ NumParams = Params.size();
+ if (Params.empty() && SelLocs.empty())
+ return;
+
+ unsigned Size = sizeof(ParmVarDecl *) * NumParams +
+ sizeof(SourceLocation) * SelLocs.size();
+ ParamsAndSelLocs = C.Allocate(Size);
+ std::copy(Params.begin(), Params.end(), getParams());
+ std::copy(SelLocs.begin(), SelLocs.end(), getStoredSelLocs());
+}
+
+void ObjCMethodDecl::getSelectorLocs(
+ SmallVectorImpl<SourceLocation> &SelLocs) const {
+ for (unsigned i = 0, e = getNumSelectorLocs(); i != e; ++i)
+ SelLocs.push_back(getSelectorLoc(i));
+}
+
+void ObjCMethodDecl::setMethodParams(ASTContext &C,
+ ArrayRef<ParmVarDecl*> Params,
+ ArrayRef<SourceLocation> SelLocs) {
+ assert((!SelLocs.empty() || isImplicit()) &&
+ "No selector locs for non-implicit method");
+ if (isImplicit())
+ return setParamsAndSelLocs(C, Params, ArrayRef<SourceLocation>());
+
+ SelLocsKind = hasStandardSelectorLocs(getSelector(), SelLocs, Params, EndLoc);
+ if (SelLocsKind != SelLoc_NonStandard)
+ return setParamsAndSelLocs(C, Params, ArrayRef<SourceLocation>());
+
+ setParamsAndSelLocs(C, Params, SelLocs);
+}
+
+/// \brief A definition will return its interface declaration.
+/// An interface declaration will return its definition.
+/// Otherwise it will return itself.
+ObjCMethodDecl *ObjCMethodDecl::getNextRedeclaration() {
+ ASTContext &Ctx = getASTContext();
+ ObjCMethodDecl *Redecl = 0;
+ if (HasRedeclaration)
+ Redecl = const_cast<ObjCMethodDecl*>(Ctx.getObjCMethodRedeclaration(this));
+ if (Redecl)
+ return Redecl;
+
+ Decl *CtxD = cast<Decl>(getDeclContext());
+
+ if (ObjCInterfaceDecl *IFD = dyn_cast<ObjCInterfaceDecl>(CtxD)) {
+ if (ObjCImplementationDecl *ImplD = Ctx.getObjCImplementation(IFD))
+ Redecl = ImplD->getMethod(getSelector(), isInstanceMethod());
+
+ } else if (ObjCCategoryDecl *CD = dyn_cast<ObjCCategoryDecl>(CtxD)) {
+ if (ObjCCategoryImplDecl *ImplD = Ctx.getObjCImplementation(CD))
+ Redecl = ImplD->getMethod(getSelector(), isInstanceMethod());
+
+ } else if (ObjCImplementationDecl *ImplD =
+ dyn_cast<ObjCImplementationDecl>(CtxD)) {
+ if (ObjCInterfaceDecl *IFD = ImplD->getClassInterface())
+ Redecl = IFD->getMethod(getSelector(), isInstanceMethod());
+
+ } else if (ObjCCategoryImplDecl *CImplD =
+ dyn_cast<ObjCCategoryImplDecl>(CtxD)) {
+ if (ObjCCategoryDecl *CatD = CImplD->getCategoryDecl())
+ Redecl = CatD->getMethod(getSelector(), isInstanceMethod());
+ }
+
+ if (!Redecl && isRedeclaration()) {
+ // This is the last redeclaration, go back to the first method.
+ return cast<ObjCContainerDecl>(CtxD)->getMethod(getSelector(),
+ isInstanceMethod());
+ }
+
+ return Redecl ? Redecl : this;
+}
+
+ObjCMethodDecl *ObjCMethodDecl::getCanonicalDecl() {
+ Decl *CtxD = cast<Decl>(getDeclContext());
+
+ if (ObjCImplementationDecl *ImplD = dyn_cast<ObjCImplementationDecl>(CtxD)) {
+ if (ObjCInterfaceDecl *IFD = ImplD->getClassInterface())
+ if (ObjCMethodDecl *MD = IFD->getMethod(getSelector(),
+ isInstanceMethod()))
+ return MD;
+
+ } else if (ObjCCategoryImplDecl *CImplD =
+ dyn_cast<ObjCCategoryImplDecl>(CtxD)) {
+ if (ObjCCategoryDecl *CatD = CImplD->getCategoryDecl())
+ if (ObjCMethodDecl *MD = CatD->getMethod(getSelector(),
+ isInstanceMethod()))
+ return MD;
+ }
+
+ if (isRedeclaration())
+ return cast<ObjCContainerDecl>(CtxD)->getMethod(getSelector(),
+ isInstanceMethod());
+
+ return this;
+}
+
+ObjCMethodFamily ObjCMethodDecl::getMethodFamily() const {
+ ObjCMethodFamily family = static_cast<ObjCMethodFamily>(Family);
+ if (family != static_cast<unsigned>(InvalidObjCMethodFamily))
+ return family;
+
+ // Check for an explicit attribute.
+ if (const ObjCMethodFamilyAttr *attr = getAttr<ObjCMethodFamilyAttr>()) {
+ // The unfortunate necessity of mapping between enums here is due
+ // to the attributes framework.
+ switch (attr->getFamily()) {
+ case ObjCMethodFamilyAttr::OMF_None: family = OMF_None; break;
+ case ObjCMethodFamilyAttr::OMF_alloc: family = OMF_alloc; break;
+ case ObjCMethodFamilyAttr::OMF_copy: family = OMF_copy; break;
+ case ObjCMethodFamilyAttr::OMF_init: family = OMF_init; break;
+ case ObjCMethodFamilyAttr::OMF_mutableCopy: family = OMF_mutableCopy; break;
+ case ObjCMethodFamilyAttr::OMF_new: family = OMF_new; break;
+ }
+ Family = static_cast<unsigned>(family);
+ return family;
+ }
+
+ family = getSelector().getMethodFamily();
+ switch (family) {
+ case OMF_None: break;
+
+ // init only has a conventional meaning for an instance method, and
+ // it has to return an object.
+ case OMF_init:
+ if (!isInstanceMethod() || !getResultType()->isObjCObjectPointerType())
+ family = OMF_None;
+ break;
+
+ // alloc/copy/new have a conventional meaning for both class and
+ // instance methods, but they require an object return.
+ case OMF_alloc:
+ case OMF_copy:
+ case OMF_mutableCopy:
+ case OMF_new:
+ if (!getResultType()->isObjCObjectPointerType())
+ family = OMF_None;
+ break;
+
+ // These selectors have a conventional meaning only for instance methods.
+ case OMF_dealloc:
+ case OMF_finalize:
+ case OMF_retain:
+ case OMF_release:
+ case OMF_autorelease:
+ case OMF_retainCount:
+ case OMF_self:
+ if (!isInstanceMethod())
+ family = OMF_None;
+ break;
+
+ case OMF_performSelector:
+ if (!isInstanceMethod() ||
+ !getResultType()->isObjCIdType())
+ family = OMF_None;
+ else {
+ unsigned noParams = param_size();
+ if (noParams < 1 || noParams > 3)
+ family = OMF_None;
+ else {
+ ObjCMethodDecl::arg_type_iterator it = arg_type_begin();
+ QualType ArgT = (*it);
+ if (!ArgT->isObjCSelType()) {
+ family = OMF_None;
+ break;
+ }
+ while (--noParams) {
+ it++;
+ ArgT = (*it);
+ if (!ArgT->isObjCIdType()) {
+ family = OMF_None;
+ break;
+ }
+ }
+ }
+ }
+ break;
+
+ }
+
+ // Cache the result.
+ Family = static_cast<unsigned>(family);
+ return family;
+}
+
+void ObjCMethodDecl::createImplicitParams(ASTContext &Context,
+ const ObjCInterfaceDecl *OID) {
+ QualType selfTy;
+ if (isInstanceMethod()) {
+ // There may be no interface context due to error in declaration
+ // of the interface (which has been reported). Recover gracefully.
+ if (OID) {
+ selfTy = Context.getObjCInterfaceType(OID);
+ selfTy = Context.getObjCObjectPointerType(selfTy);
+ } else {
+ selfTy = Context.getObjCIdType();
+ }
+ } else // we have a factory method.
+ selfTy = Context.getObjCClassType();
+
+ bool selfIsPseudoStrong = false;
+ bool selfIsConsumed = false;
+
+ if (Context.getLangOpts().ObjCAutoRefCount) {
+ if (isInstanceMethod()) {
+ selfIsConsumed = hasAttr<NSConsumesSelfAttr>();
+
+ // 'self' is always __strong. It's actually pseudo-strong except
+ // in init methods (or methods labeled ns_consumes_self), though.
+ Qualifiers qs;
+ qs.setObjCLifetime(Qualifiers::OCL_Strong);
+ selfTy = Context.getQualifiedType(selfTy, qs);
+
+ // In addition, 'self' is const unless this is an init method.
+ if (getMethodFamily() != OMF_init && !selfIsConsumed) {
+ selfTy = selfTy.withConst();
+ selfIsPseudoStrong = true;
+ }
+ }
+ else {
+ assert(isClassMethod());
+ // 'self' is always const in class methods.
+ selfTy = selfTy.withConst();
+ selfIsPseudoStrong = true;
+ }
+ }
+
+ ImplicitParamDecl *self
+ = ImplicitParamDecl::Create(Context, this, SourceLocation(),
+ &Context.Idents.get("self"), selfTy);
+ setSelfDecl(self);
+
+ if (selfIsConsumed)
+ self->addAttr(new (Context) NSConsumedAttr(SourceLocation(), Context));
+
+ if (selfIsPseudoStrong)
+ self->setARCPseudoStrong(true);
+
+ setCmdDecl(ImplicitParamDecl::Create(Context, this, SourceLocation(),
+ &Context.Idents.get("_cmd"),
+ Context.getObjCSelType()));
+}
+
+ObjCInterfaceDecl *ObjCMethodDecl::getClassInterface() {
+ if (ObjCInterfaceDecl *ID = dyn_cast<ObjCInterfaceDecl>(getDeclContext()))
+ return ID;
+ if (ObjCCategoryDecl *CD = dyn_cast<ObjCCategoryDecl>(getDeclContext()))
+ return CD->getClassInterface();
+ if (ObjCImplDecl *IMD = dyn_cast<ObjCImplDecl>(getDeclContext()))
+ return IMD->getClassInterface();
+
+ assert(!isa<ObjCProtocolDecl>(getDeclContext()) && "It's a protocol method");
+ llvm_unreachable("unknown method context");
+}
+
+//===----------------------------------------------------------------------===//
+// ObjCInterfaceDecl
+//===----------------------------------------------------------------------===//
+
+ObjCInterfaceDecl *ObjCInterfaceDecl::Create(const ASTContext &C,
+ DeclContext *DC,
+ SourceLocation atLoc,
+ IdentifierInfo *Id,
+ ObjCInterfaceDecl *PrevDecl,
+ SourceLocation ClassLoc,
+ bool isInternal){
+ ObjCInterfaceDecl *Result = new (C) ObjCInterfaceDecl(DC, atLoc, Id, ClassLoc,
+ PrevDecl, isInternal);
+ C.getObjCInterfaceType(Result, PrevDecl);
+ return Result;
+}
+
+ObjCInterfaceDecl *ObjCInterfaceDecl::CreateDeserialized(ASTContext &C,
+ unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID, sizeof(ObjCInterfaceDecl));
+ return new (Mem) ObjCInterfaceDecl(0, SourceLocation(), 0, SourceLocation(),
+ 0, false);
+}
+
+ObjCInterfaceDecl::
+ObjCInterfaceDecl(DeclContext *DC, SourceLocation atLoc, IdentifierInfo *Id,
+ SourceLocation CLoc, ObjCInterfaceDecl *PrevDecl,
+ bool isInternal)
+ : ObjCContainerDecl(ObjCInterface, DC, Id, CLoc, atLoc),
+ TypeForDecl(0), Data()
+{
+ setPreviousDeclaration(PrevDecl);
+
+ // Copy the 'data' pointer over.
+ if (PrevDecl)
+ Data = PrevDecl->Data;
+
+ setImplicit(isInternal);
+}
+
+void ObjCInterfaceDecl::LoadExternalDefinition() const {
+ assert(data().ExternallyCompleted && "Class is not externally completed");
+ data().ExternallyCompleted = false;
+ getASTContext().getExternalSource()->CompleteType(
+ const_cast<ObjCInterfaceDecl *>(this));
+}
+
+void ObjCInterfaceDecl::setExternallyCompleted() {
+ assert(getASTContext().getExternalSource() &&
+ "Class can't be externally completed without an external source");
+ assert(hasDefinition() &&
+ "Forward declarations can't be externally completed");
+ data().ExternallyCompleted = true;
+}
+
+ObjCImplementationDecl *ObjCInterfaceDecl::getImplementation() const {
+ if (const ObjCInterfaceDecl *Def = getDefinition()) {
+ if (data().ExternallyCompleted)
+ LoadExternalDefinition();
+
+ return getASTContext().getObjCImplementation(
+ const_cast<ObjCInterfaceDecl*>(Def));
+ }
+
+ // FIXME: Should make sure no callers ever do this.
+ return 0;
+}
+
+void ObjCInterfaceDecl::setImplementation(ObjCImplementationDecl *ImplD) {
+ getASTContext().setObjCImplementation(getDefinition(), ImplD);
+}
+
+/// all_declared_ivar_begin - return first ivar declared in this class,
+/// its extensions and its implementation. Lazily build the list on first
+/// access.
+ObjCIvarDecl *ObjCInterfaceDecl::all_declared_ivar_begin() {
+ // FIXME: Should make sure no callers ever do this.
+ if (!hasDefinition())
+ return 0;
+
+ if (data().IvarList)
+ return data().IvarList;
+
+ ObjCIvarDecl *curIvar = 0;
+ if (!ivar_empty()) {
+ ObjCInterfaceDecl::ivar_iterator I = ivar_begin(), E = ivar_end();
+ data().IvarList = (*I); ++I;
+ for (curIvar = data().IvarList; I != E; curIvar = *I, ++I)
+ curIvar->setNextIvar(*I);
+ }
+
+ for (const ObjCCategoryDecl *CDecl = getFirstClassExtension(); CDecl;
+ CDecl = CDecl->getNextClassExtension()) {
+ if (!CDecl->ivar_empty()) {
+ ObjCCategoryDecl::ivar_iterator I = CDecl->ivar_begin(),
+ E = CDecl->ivar_end();
+ if (!data().IvarList) {
+ data().IvarList = (*I); ++I;
+ curIvar = data().IvarList;
+ }
+ for ( ;I != E; curIvar = *I, ++I)
+ curIvar->setNextIvar(*I);
+ }
+ }
+
+ if (ObjCImplementationDecl *ImplDecl = getImplementation()) {
+ if (!ImplDecl->ivar_empty()) {
+ ObjCImplementationDecl::ivar_iterator I = ImplDecl->ivar_begin(),
+ E = ImplDecl->ivar_end();
+ if (!data().IvarList) {
+ data().IvarList = (*I); ++I;
+ curIvar = data().IvarList;
+ }
+ for ( ;I != E; curIvar = *I, ++I)
+ curIvar->setNextIvar(*I);
+ }
+ }
+ return data().IvarList;
+}
+
+/// FindCategoryDeclaration - Finds category declaration in the list of
+/// categories for this class and returns it. Name of the category is passed
+/// in 'CategoryId'. If category not found, return 0;
+///
+ObjCCategoryDecl *
+ObjCInterfaceDecl::FindCategoryDeclaration(IdentifierInfo *CategoryId) const {
+ // FIXME: Should make sure no callers ever do this.
+ if (!hasDefinition())
+ return 0;
+
+ if (data().ExternallyCompleted)
+ LoadExternalDefinition();
+
+ for (ObjCCategoryDecl *Category = getCategoryList();
+ Category; Category = Category->getNextClassCategory())
+ if (Category->getIdentifier() == CategoryId)
+ return Category;
+ return 0;
+}
+
+ObjCMethodDecl *
+ObjCInterfaceDecl::getCategoryInstanceMethod(Selector Sel) const {
+ for (ObjCCategoryDecl *Category = getCategoryList();
+ Category; Category = Category->getNextClassCategory())
+ if (ObjCCategoryImplDecl *Impl = Category->getImplementation())
+ if (ObjCMethodDecl *MD = Impl->getInstanceMethod(Sel))
+ return MD;
+ return 0;
+}
+
+ObjCMethodDecl *ObjCInterfaceDecl::getCategoryClassMethod(Selector Sel) const {
+ for (ObjCCategoryDecl *Category = getCategoryList();
+ Category; Category = Category->getNextClassCategory())
+ if (ObjCCategoryImplDecl *Impl = Category->getImplementation())
+ if (ObjCMethodDecl *MD = Impl->getClassMethod(Sel))
+ return MD;
+ return 0;
+}
+
+/// ClassImplementsProtocol - Checks that 'lProto' protocol
+/// has been implemented in IDecl class, its super class or categories (if
+/// lookupCategory is true).
+bool ObjCInterfaceDecl::ClassImplementsProtocol(ObjCProtocolDecl *lProto,
+ bool lookupCategory,
+ bool RHSIsQualifiedID) {
+ if (!hasDefinition())
+ return false;
+
+ ObjCInterfaceDecl *IDecl = this;
+ // 1st, look up the class.
+ for (ObjCInterfaceDecl::protocol_iterator
+ PI = IDecl->protocol_begin(), E = IDecl->protocol_end(); PI != E; ++PI){
+ if (getASTContext().ProtocolCompatibleWithProtocol(lProto, *PI))
+ return true;
+ // This is dubious and is added to be compatible with gcc. In gcc, it is
+ // also allowed assigning a protocol-qualified 'id' type to a LHS object
+ // when protocol in qualified LHS is in list of protocols in the rhs 'id'
+ // object. This IMO, should be a bug.
+ // FIXME: Treat this as an extension, and flag this as an error when GCC
+ // extensions are not enabled.
+ if (RHSIsQualifiedID &&
+ getASTContext().ProtocolCompatibleWithProtocol(*PI, lProto))
+ return true;
+ }
+
+ // 2nd, look up the category.
+ if (lookupCategory)
+ for (ObjCCategoryDecl *CDecl = IDecl->getCategoryList(); CDecl;
+ CDecl = CDecl->getNextClassCategory()) {
+ for (ObjCCategoryDecl::protocol_iterator PI = CDecl->protocol_begin(),
+ E = CDecl->protocol_end(); PI != E; ++PI)
+ if (getASTContext().ProtocolCompatibleWithProtocol(lProto, *PI))
+ return true;
+ }
+
+ // 3rd, look up the super class(s)
+ if (IDecl->getSuperClass())
+ return
+ IDecl->getSuperClass()->ClassImplementsProtocol(lProto, lookupCategory,
+ RHSIsQualifiedID);
+
+ return false;
+}
+
+//===----------------------------------------------------------------------===//
+// ObjCIvarDecl
+//===----------------------------------------------------------------------===//
+
+void ObjCIvarDecl::anchor() { }
+
+ObjCIvarDecl *ObjCIvarDecl::Create(ASTContext &C, ObjCContainerDecl *DC,
+ SourceLocation StartLoc,
+ SourceLocation IdLoc, IdentifierInfo *Id,
+ QualType T, TypeSourceInfo *TInfo,
+ AccessControl ac, Expr *BW,
+ bool synthesized) {
+ if (DC) {
+ // Ivar's can only appear in interfaces, implementations (via synthesized
+ // properties), and class extensions (via direct declaration, or synthesized
+ // properties).
+ //
+ // FIXME: This should really be asserting this:
+ // (isa<ObjCCategoryDecl>(DC) &&
+ // cast<ObjCCategoryDecl>(DC)->IsClassExtension()))
+ // but unfortunately we sometimes place ivars into non-class extension
+ // categories on error. This breaks an AST invariant, and should not be
+ // fixed.
+ assert((isa<ObjCInterfaceDecl>(DC) || isa<ObjCImplementationDecl>(DC) ||
+ isa<ObjCCategoryDecl>(DC)) &&
+ "Invalid ivar decl context!");
+ // Once a new ivar is created in any of class/class-extension/implementation
+ // decl contexts, the previously built IvarList must be rebuilt.
+ ObjCInterfaceDecl *ID = dyn_cast<ObjCInterfaceDecl>(DC);
+ if (!ID) {
+ if (ObjCImplementationDecl *IM = dyn_cast<ObjCImplementationDecl>(DC)) {
+ ID = IM->getClassInterface();
+ if (BW)
+ IM->setHasSynthBitfield(true);
+ } else {
+ ObjCCategoryDecl *CD = cast<ObjCCategoryDecl>(DC);
+ ID = CD->getClassInterface();
+ if (BW)
+ CD->setHasSynthBitfield(true);
+ }
+ }
+ ID->setIvarList(0);
+ }
+
+ return new (C) ObjCIvarDecl(DC, StartLoc, IdLoc, Id, T, TInfo,
+ ac, BW, synthesized);
+}
+
+ObjCIvarDecl *ObjCIvarDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID, sizeof(ObjCIvarDecl));
+ return new (Mem) ObjCIvarDecl(0, SourceLocation(), SourceLocation(), 0,
+ QualType(), 0, ObjCIvarDecl::None, 0, false);
+}
+
+const ObjCInterfaceDecl *ObjCIvarDecl::getContainingInterface() const {
+ const ObjCContainerDecl *DC = cast<ObjCContainerDecl>(getDeclContext());
+
+ switch (DC->getKind()) {
+ default:
+ case ObjCCategoryImpl:
+ case ObjCProtocol:
+ llvm_unreachable("invalid ivar container!");
+
+ // Ivars can only appear in class extension categories.
+ case ObjCCategory: {
+ const ObjCCategoryDecl *CD = cast<ObjCCategoryDecl>(DC);
+ assert(CD->IsClassExtension() && "invalid container for ivar!");
+ return CD->getClassInterface();
+ }
+
+ case ObjCImplementation:
+ return cast<ObjCImplementationDecl>(DC)->getClassInterface();
+
+ case ObjCInterface:
+ return cast<ObjCInterfaceDecl>(DC);
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// ObjCAtDefsFieldDecl
+//===----------------------------------------------------------------------===//
+
+void ObjCAtDefsFieldDecl::anchor() { }
+
+ObjCAtDefsFieldDecl
+*ObjCAtDefsFieldDecl::Create(ASTContext &C, DeclContext *DC,
+ SourceLocation StartLoc, SourceLocation IdLoc,
+ IdentifierInfo *Id, QualType T, Expr *BW) {
+ return new (C) ObjCAtDefsFieldDecl(DC, StartLoc, IdLoc, Id, T, BW);
+}
+
+ObjCAtDefsFieldDecl *ObjCAtDefsFieldDecl::CreateDeserialized(ASTContext &C,
+ unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID, sizeof(ObjCAtDefsFieldDecl));
+ return new (Mem) ObjCAtDefsFieldDecl(0, SourceLocation(), SourceLocation(),
+ 0, QualType(), 0);
+}
+
+//===----------------------------------------------------------------------===//
+// ObjCProtocolDecl
+//===----------------------------------------------------------------------===//
+
+void ObjCProtocolDecl::anchor() { }
+
+ObjCProtocolDecl::ObjCProtocolDecl(DeclContext *DC, IdentifierInfo *Id,
+ SourceLocation nameLoc,
+ SourceLocation atStartLoc,
+ ObjCProtocolDecl *PrevDecl)
+ : ObjCContainerDecl(ObjCProtocol, DC, Id, nameLoc, atStartLoc), Data()
+{
+ setPreviousDeclaration(PrevDecl);
+ if (PrevDecl)
+ Data = PrevDecl->Data;
+}
+
+ObjCProtocolDecl *ObjCProtocolDecl::Create(ASTContext &C, DeclContext *DC,
+ IdentifierInfo *Id,
+ SourceLocation nameLoc,
+ SourceLocation atStartLoc,
+ ObjCProtocolDecl *PrevDecl) {
+ ObjCProtocolDecl *Result
+ = new (C) ObjCProtocolDecl(DC, Id, nameLoc, atStartLoc, PrevDecl);
+
+ return Result;
+}
+
+ObjCProtocolDecl *ObjCProtocolDecl::CreateDeserialized(ASTContext &C,
+ unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID, sizeof(ObjCProtocolDecl));
+ return new (Mem) ObjCProtocolDecl(0, 0, SourceLocation(), SourceLocation(),
+ 0);
+}
+
+ObjCProtocolDecl *ObjCProtocolDecl::lookupProtocolNamed(IdentifierInfo *Name) {
+ ObjCProtocolDecl *PDecl = this;
+
+ if (Name == getIdentifier())
+ return PDecl;
+
+ for (protocol_iterator I = protocol_begin(), E = protocol_end(); I != E; ++I)
+ if ((PDecl = (*I)->lookupProtocolNamed(Name)))
+ return PDecl;
+
+ return NULL;
+}
+
+// lookupMethod - Lookup a instance/class method in the protocol and protocols
+// it inherited.
+ObjCMethodDecl *ObjCProtocolDecl::lookupMethod(Selector Sel,
+ bool isInstance) const {
+ ObjCMethodDecl *MethodDecl = NULL;
+
+ if ((MethodDecl = getMethod(Sel, isInstance)))
+ return MethodDecl;
+
+ for (protocol_iterator I = protocol_begin(), E = protocol_end(); I != E; ++I)
+ if ((MethodDecl = (*I)->lookupMethod(Sel, isInstance)))
+ return MethodDecl;
+ return NULL;
+}
+
+void ObjCProtocolDecl::allocateDefinitionData() {
+ assert(!Data && "Protocol already has a definition!");
+ Data = new (getASTContext()) DefinitionData;
+ Data->Definition = this;
+}
+
+void ObjCProtocolDecl::startDefinition() {
+ allocateDefinitionData();
+
+ // Update all of the declarations with a pointer to the definition.
+ for (redecl_iterator RD = redecls_begin(), RDEnd = redecls_end();
+ RD != RDEnd; ++RD)
+ RD->Data = this->Data;
+}
+
+//===----------------------------------------------------------------------===//
+// ObjCCategoryDecl
+//===----------------------------------------------------------------------===//
+
+void ObjCCategoryDecl::anchor() { }
+
+ObjCCategoryDecl *ObjCCategoryDecl::Create(ASTContext &C, DeclContext *DC,
+ SourceLocation AtLoc,
+ SourceLocation ClassNameLoc,
+ SourceLocation CategoryNameLoc,
+ IdentifierInfo *Id,
+ ObjCInterfaceDecl *IDecl,
+ SourceLocation IvarLBraceLoc,
+ SourceLocation IvarRBraceLoc) {
+ ObjCCategoryDecl *CatDecl = new (C) ObjCCategoryDecl(DC, AtLoc, ClassNameLoc,
+ CategoryNameLoc, Id,
+ IDecl,
+ IvarLBraceLoc, IvarRBraceLoc);
+ if (IDecl) {
+ // Link this category into its class's category list.
+ CatDecl->NextClassCategory = IDecl->getCategoryList();
+ if (IDecl->hasDefinition()) {
+ IDecl->setCategoryList(CatDecl);
+ if (ASTMutationListener *L = C.getASTMutationListener())
+ L->AddedObjCCategoryToInterface(CatDecl, IDecl);
+ }
+ }
+
+ return CatDecl;
+}
+
+ObjCCategoryDecl *ObjCCategoryDecl::CreateDeserialized(ASTContext &C,
+ unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID, sizeof(ObjCCategoryDecl));
+ return new (Mem) ObjCCategoryDecl(0, SourceLocation(), SourceLocation(),
+ SourceLocation(), 0, 0);
+}
+
+ObjCCategoryImplDecl *ObjCCategoryDecl::getImplementation() const {
+ return getASTContext().getObjCImplementation(
+ const_cast<ObjCCategoryDecl*>(this));
+}
+
+void ObjCCategoryDecl::setImplementation(ObjCCategoryImplDecl *ImplD) {
+ getASTContext().setObjCImplementation(this, ImplD);
+}
+
+
+//===----------------------------------------------------------------------===//
+// ObjCCategoryImplDecl
+//===----------------------------------------------------------------------===//
+
+void ObjCCategoryImplDecl::anchor() { }
+
+ObjCCategoryImplDecl *
+ObjCCategoryImplDecl::Create(ASTContext &C, DeclContext *DC,
+ IdentifierInfo *Id,
+ ObjCInterfaceDecl *ClassInterface,
+ SourceLocation nameLoc,
+ SourceLocation atStartLoc,
+ SourceLocation CategoryNameLoc) {
+ if (ClassInterface && ClassInterface->hasDefinition())
+ ClassInterface = ClassInterface->getDefinition();
+ return new (C) ObjCCategoryImplDecl(DC, Id, ClassInterface,
+ nameLoc, atStartLoc, CategoryNameLoc);
+}
+
+ObjCCategoryImplDecl *ObjCCategoryImplDecl::CreateDeserialized(ASTContext &C,
+ unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID, sizeof(ObjCCategoryImplDecl));
+ return new (Mem) ObjCCategoryImplDecl(0, 0, 0, SourceLocation(),
+ SourceLocation(), SourceLocation());
+}
+
+ObjCCategoryDecl *ObjCCategoryImplDecl::getCategoryDecl() const {
+ // The class interface might be NULL if we are working with invalid code.
+ if (const ObjCInterfaceDecl *ID = getClassInterface())
+ return ID->FindCategoryDeclaration(getIdentifier());
+ return 0;
+}
+
+
+void ObjCImplDecl::anchor() { }
+
+void ObjCImplDecl::addPropertyImplementation(ObjCPropertyImplDecl *property) {
+ // FIXME: The context should be correct before we get here.
+ property->setLexicalDeclContext(this);
+ addDecl(property);
+}
+
+void ObjCImplDecl::setClassInterface(ObjCInterfaceDecl *IFace) {
+ ASTContext &Ctx = getASTContext();
+
+ if (ObjCImplementationDecl *ImplD
+ = dyn_cast_or_null<ObjCImplementationDecl>(this)) {
+ if (IFace)
+ Ctx.setObjCImplementation(IFace, ImplD);
+
+ } else if (ObjCCategoryImplDecl *ImplD =
+ dyn_cast_or_null<ObjCCategoryImplDecl>(this)) {
+ if (ObjCCategoryDecl *CD = IFace->FindCategoryDeclaration(getIdentifier()))
+ Ctx.setObjCImplementation(CD, ImplD);
+ }
+
+ ClassInterface = IFace;
+}
+
+/// FindPropertyImplIvarDecl - This method lookup the ivar in the list of
+/// properties implemented in this category @implementation block and returns
+/// the implemented property that uses it.
+///
+ObjCPropertyImplDecl *ObjCImplDecl::
+FindPropertyImplIvarDecl(IdentifierInfo *ivarId) const {
+ for (propimpl_iterator i = propimpl_begin(), e = propimpl_end(); i != e; ++i){
+ ObjCPropertyImplDecl *PID = *i;
+ if (PID->getPropertyIvarDecl() &&
+ PID->getPropertyIvarDecl()->getIdentifier() == ivarId)
+ return PID;
+ }
+ return 0;
+}
+
+/// FindPropertyImplDecl - This method looks up a previous ObjCPropertyImplDecl
+/// added to the list of those properties @synthesized/@dynamic in this
+/// category @implementation block.
+///
+ObjCPropertyImplDecl *ObjCImplDecl::
+FindPropertyImplDecl(IdentifierInfo *Id) const {
+ for (propimpl_iterator i = propimpl_begin(), e = propimpl_end(); i != e; ++i){
+ ObjCPropertyImplDecl *PID = *i;
+ if (PID->getPropertyDecl()->getIdentifier() == Id)
+ return PID;
+ }
+ return 0;
+}
+
+raw_ostream &clang::operator<<(raw_ostream &OS,
+ const ObjCCategoryImplDecl &CID) {
+ OS << CID.getName();
+ return OS;
+}
+
+//===----------------------------------------------------------------------===//
+// ObjCImplementationDecl
+//===----------------------------------------------------------------------===//
+
+void ObjCImplementationDecl::anchor() { }
+
+ObjCImplementationDecl *
+ObjCImplementationDecl::Create(ASTContext &C, DeclContext *DC,
+ ObjCInterfaceDecl *ClassInterface,
+ ObjCInterfaceDecl *SuperDecl,
+ SourceLocation nameLoc,
+ SourceLocation atStartLoc,
+ SourceLocation IvarLBraceLoc,
+ SourceLocation IvarRBraceLoc) {
+ if (ClassInterface && ClassInterface->hasDefinition())
+ ClassInterface = ClassInterface->getDefinition();
+ return new (C) ObjCImplementationDecl(DC, ClassInterface, SuperDecl,
+ nameLoc, atStartLoc,
+ IvarLBraceLoc, IvarRBraceLoc);
+}
+
+ObjCImplementationDecl *
+ObjCImplementationDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID, sizeof(ObjCImplementationDecl));
+ return new (Mem) ObjCImplementationDecl(0, 0, 0, SourceLocation(),
+ SourceLocation());
+}
+
+void ObjCImplementationDecl::setIvarInitializers(ASTContext &C,
+ CXXCtorInitializer ** initializers,
+ unsigned numInitializers) {
+ if (numInitializers > 0) {
+ NumIvarInitializers = numInitializers;
+ CXXCtorInitializer **ivarInitializers =
+ new (C) CXXCtorInitializer*[NumIvarInitializers];
+ memcpy(ivarInitializers, initializers,
+ numInitializers * sizeof(CXXCtorInitializer*));
+ IvarInitializers = ivarInitializers;
+ }
+}
+
+raw_ostream &clang::operator<<(raw_ostream &OS,
+ const ObjCImplementationDecl &ID) {
+ OS << ID.getName();
+ return OS;
+}
+
+//===----------------------------------------------------------------------===//
+// ObjCCompatibleAliasDecl
+//===----------------------------------------------------------------------===//
+
+void ObjCCompatibleAliasDecl::anchor() { }
+
+ObjCCompatibleAliasDecl *
+ObjCCompatibleAliasDecl::Create(ASTContext &C, DeclContext *DC,
+ SourceLocation L,
+ IdentifierInfo *Id,
+ ObjCInterfaceDecl* AliasedClass) {
+ return new (C) ObjCCompatibleAliasDecl(DC, L, Id, AliasedClass);
+}
+
+ObjCCompatibleAliasDecl *
+ObjCCompatibleAliasDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID, sizeof(ObjCCompatibleAliasDecl));
+ return new (Mem) ObjCCompatibleAliasDecl(0, SourceLocation(), 0, 0);
+}
+
+//===----------------------------------------------------------------------===//
+// ObjCPropertyDecl
+//===----------------------------------------------------------------------===//
+
+void ObjCPropertyDecl::anchor() { }
+
+ObjCPropertyDecl *ObjCPropertyDecl::Create(ASTContext &C, DeclContext *DC,
+ SourceLocation L,
+ IdentifierInfo *Id,
+ SourceLocation AtLoc,
+ SourceLocation LParenLoc,
+ TypeSourceInfo *T,
+ PropertyControl propControl) {
+ return new (C) ObjCPropertyDecl(DC, L, Id, AtLoc, LParenLoc, T);
+}
+
+ObjCPropertyDecl *ObjCPropertyDecl::CreateDeserialized(ASTContext &C,
+ unsigned ID) {
+ void * Mem = AllocateDeserializedDecl(C, ID, sizeof(ObjCPropertyDecl));
+ return new (Mem) ObjCPropertyDecl(0, SourceLocation(), 0, SourceLocation(),
+ SourceLocation(),
+ 0);
+}
+
+//===----------------------------------------------------------------------===//
+// ObjCPropertyImplDecl
+//===----------------------------------------------------------------------===//
+
+ObjCPropertyImplDecl *ObjCPropertyImplDecl::Create(ASTContext &C,
+ DeclContext *DC,
+ SourceLocation atLoc,
+ SourceLocation L,
+ ObjCPropertyDecl *property,
+ Kind PK,
+ ObjCIvarDecl *ivar,
+ SourceLocation ivarLoc) {
+ return new (C) ObjCPropertyImplDecl(DC, atLoc, L, property, PK, ivar,
+ ivarLoc);
+}
+
+ObjCPropertyImplDecl *ObjCPropertyImplDecl::CreateDeserialized(ASTContext &C,
+ unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID, sizeof(ObjCPropertyImplDecl));
+ return new (Mem) ObjCPropertyImplDecl(0, SourceLocation(), SourceLocation(),
+ 0, Dynamic, 0, SourceLocation());
+}
+
+SourceRange ObjCPropertyImplDecl::getSourceRange() const {
+ SourceLocation EndLoc = getLocation();
+ if (IvarLoc.isValid())
+ EndLoc = IvarLoc;
+
+ return SourceRange(AtLoc, EndLoc);
+}
diff --git a/clang/lib/AST/DeclPrinter.cpp b/clang/lib/AST/DeclPrinter.cpp
new file mode 100644
index 0000000..74e1c1b
--- /dev/null
+++ b/clang/lib/AST/DeclPrinter.cpp
@@ -0,0 +1,1072 @@
+//===--- DeclPrinter.cpp - Printing implementation for Decl ASTs ----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the Decl::dump method, which pretty print the
+// AST back out to C/Objective-C/C++/Objective-C++ code.
+//
+//===----------------------------------------------------------------------===//
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclVisitor.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/PrettyPrinter.h"
+#include "clang/Basic/Module.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace clang;
+
+namespace {
+ class DeclPrinter : public DeclVisitor<DeclPrinter> {
+ raw_ostream &Out;
+ ASTContext &Context;
+ PrintingPolicy Policy;
+ unsigned Indentation;
+ bool PrintInstantiation;
+
+ raw_ostream& Indent() { return Indent(Indentation); }
+ raw_ostream& Indent(unsigned Indentation);
+ void ProcessDeclGroup(SmallVectorImpl<Decl*>& Decls);
+
+ void Print(AccessSpecifier AS);
+
+ public:
+ DeclPrinter(raw_ostream &Out, ASTContext &Context,
+ const PrintingPolicy &Policy,
+ unsigned Indentation = 0,
+ bool PrintInstantiation = false)
+ : Out(Out), Context(Context), Policy(Policy), Indentation(Indentation),
+ PrintInstantiation(PrintInstantiation) { }
+
+ void VisitDeclContext(DeclContext *DC, bool Indent = true);
+
+ void VisitTranslationUnitDecl(TranslationUnitDecl *D);
+ void VisitTypedefDecl(TypedefDecl *D);
+ void VisitTypeAliasDecl(TypeAliasDecl *D);
+ void VisitEnumDecl(EnumDecl *D);
+ void VisitRecordDecl(RecordDecl *D);
+ void VisitEnumConstantDecl(EnumConstantDecl *D);
+ void VisitFunctionDecl(FunctionDecl *D);
+ void VisitFieldDecl(FieldDecl *D);
+ void VisitVarDecl(VarDecl *D);
+ void VisitLabelDecl(LabelDecl *D);
+ void VisitParmVarDecl(ParmVarDecl *D);
+ void VisitFileScopeAsmDecl(FileScopeAsmDecl *D);
+ void VisitImportDecl(ImportDecl *D);
+ void VisitStaticAssertDecl(StaticAssertDecl *D);
+ void VisitNamespaceDecl(NamespaceDecl *D);
+ void VisitUsingDirectiveDecl(UsingDirectiveDecl *D);
+ void VisitNamespaceAliasDecl(NamespaceAliasDecl *D);
+ void VisitCXXRecordDecl(CXXRecordDecl *D);
+ void VisitLinkageSpecDecl(LinkageSpecDecl *D);
+ void VisitTemplateDecl(const TemplateDecl *D);
+ void VisitFunctionTemplateDecl(FunctionTemplateDecl *D);
+ void VisitClassTemplateDecl(ClassTemplateDecl *D);
+ void VisitObjCMethodDecl(ObjCMethodDecl *D);
+ void VisitObjCImplementationDecl(ObjCImplementationDecl *D);
+ void VisitObjCInterfaceDecl(ObjCInterfaceDecl *D);
+ void VisitObjCProtocolDecl(ObjCProtocolDecl *D);
+ void VisitObjCCategoryImplDecl(ObjCCategoryImplDecl *D);
+ void VisitObjCCategoryDecl(ObjCCategoryDecl *D);
+ void VisitObjCCompatibleAliasDecl(ObjCCompatibleAliasDecl *D);
+ void VisitObjCPropertyDecl(ObjCPropertyDecl *D);
+ void VisitObjCPropertyImplDecl(ObjCPropertyImplDecl *D);
+ void VisitUnresolvedUsingTypenameDecl(UnresolvedUsingTypenameDecl *D);
+ void VisitUnresolvedUsingValueDecl(UnresolvedUsingValueDecl *D);
+ void VisitUsingDecl(UsingDecl *D);
+ void VisitUsingShadowDecl(UsingShadowDecl *D);
+
+ void PrintTemplateParameters(const TemplateParameterList *Params,
+ const TemplateArgumentList *Args);
+ void prettyPrintAttributes(Decl *D);
+ };
+}
+
+void Decl::print(raw_ostream &Out, unsigned Indentation,
+ bool PrintInstantiation) const {
+ print(Out, getASTContext().getPrintingPolicy(), Indentation, PrintInstantiation);
+}
+
+void Decl::print(raw_ostream &Out, const PrintingPolicy &Policy,
+ unsigned Indentation, bool PrintInstantiation) const {
+ DeclPrinter Printer(Out, getASTContext(), Policy, Indentation, PrintInstantiation);
+ Printer.Visit(const_cast<Decl*>(this));
+}
+
+static QualType GetBaseType(QualType T) {
+ // FIXME: This should be on the Type class!
+ QualType BaseType = T;
+ while (!BaseType->isSpecifierType()) {
+ if (isa<TypedefType>(BaseType))
+ break;
+ else if (const PointerType* PTy = BaseType->getAs<PointerType>())
+ BaseType = PTy->getPointeeType();
+ else if (const ArrayType* ATy = dyn_cast<ArrayType>(BaseType))
+ BaseType = ATy->getElementType();
+ else if (const FunctionType* FTy = BaseType->getAs<FunctionType>())
+ BaseType = FTy->getResultType();
+ else if (const VectorType *VTy = BaseType->getAs<VectorType>())
+ BaseType = VTy->getElementType();
+ else
+ llvm_unreachable("Unknown declarator!");
+ }
+ return BaseType;
+}
+
+static QualType getDeclType(Decl* D) {
+ if (TypedefNameDecl* TDD = dyn_cast<TypedefNameDecl>(D))
+ return TDD->getUnderlyingType();
+ if (ValueDecl* VD = dyn_cast<ValueDecl>(D))
+ return VD->getType();
+ return QualType();
+}
+
+void Decl::printGroup(Decl** Begin, unsigned NumDecls,
+ raw_ostream &Out, const PrintingPolicy &Policy,
+ unsigned Indentation) {
+ if (NumDecls == 1) {
+ (*Begin)->print(Out, Policy, Indentation);
+ return;
+ }
+
+ Decl** End = Begin + NumDecls;
+ TagDecl* TD = dyn_cast<TagDecl>(*Begin);
+ if (TD)
+ ++Begin;
+
+ PrintingPolicy SubPolicy(Policy);
+ if (TD && TD->isCompleteDefinition()) {
+ TD->print(Out, Policy, Indentation);
+ Out << " ";
+ SubPolicy.SuppressTag = true;
+ }
+
+ bool isFirst = true;
+ for ( ; Begin != End; ++Begin) {
+ if (isFirst) {
+ SubPolicy.SuppressSpecifiers = false;
+ isFirst = false;
+ } else {
+ if (!isFirst) Out << ", ";
+ SubPolicy.SuppressSpecifiers = true;
+ }
+
+ (*Begin)->print(Out, SubPolicy, Indentation);
+ }
+}
+
+void DeclContext::dumpDeclContext() const {
+ // Get the translation unit
+ const DeclContext *DC = this;
+ while (!DC->isTranslationUnit())
+ DC = DC->getParent();
+
+ ASTContext &Ctx = cast<TranslationUnitDecl>(DC)->getASTContext();
+ DeclPrinter Printer(llvm::errs(), Ctx, Ctx.getPrintingPolicy(), 0);
+ Printer.VisitDeclContext(const_cast<DeclContext *>(this), /*Indent=*/false);
+}
+
+void Decl::dump() const {
+ print(llvm::errs());
+}
+
+raw_ostream& DeclPrinter::Indent(unsigned Indentation) {
+ for (unsigned i = 0; i != Indentation; ++i)
+ Out << " ";
+ return Out;
+}
+
+void DeclPrinter::prettyPrintAttributes(Decl *D) {
+ if (D->hasAttrs()) {
+ AttrVec &Attrs = D->getAttrs();
+ for (AttrVec::const_iterator i=Attrs.begin(), e=Attrs.end(); i!=e; ++i) {
+ Attr *A = *i;
+ A->printPretty(Out, Context);
+ }
+ }
+}
+
+void DeclPrinter::ProcessDeclGroup(SmallVectorImpl<Decl*>& Decls) {
+ this->Indent();
+ Decl::printGroup(Decls.data(), Decls.size(), Out, Policy, Indentation);
+ Out << ";\n";
+ Decls.clear();
+
+}
+
+void DeclPrinter::Print(AccessSpecifier AS) {
+ switch(AS) {
+ case AS_none: llvm_unreachable("No access specifier!");
+ case AS_public: Out << "public"; break;
+ case AS_protected: Out << "protected"; break;
+ case AS_private: Out << "private"; break;
+ }
+}
+
+//----------------------------------------------------------------------------
+// Common C declarations
+//----------------------------------------------------------------------------
+
+void DeclPrinter::VisitDeclContext(DeclContext *DC, bool Indent) {
+ if (Indent)
+ Indentation += Policy.Indentation;
+
+ SmallVector<Decl*, 2> Decls;
+ for (DeclContext::decl_iterator D = DC->decls_begin(), DEnd = DC->decls_end();
+ D != DEnd; ++D) {
+
+ // Don't print ObjCIvarDecls, as they are printed when visiting the
+ // containing ObjCInterfaceDecl.
+ if (isa<ObjCIvarDecl>(*D))
+ continue;
+
+ if (!Policy.Dump) {
+ // Skip over implicit declarations in pretty-printing mode.
+ if (D->isImplicit()) continue;
+ // FIXME: Ugly hack so we don't pretty-print the builtin declaration
+ // of __builtin_va_list or __[u]int128_t. There should be some other way
+ // to check that.
+ if (NamedDecl *ND = dyn_cast<NamedDecl>(*D)) {
+ if (IdentifierInfo *II = ND->getIdentifier()) {
+ if (II->isStr("__builtin_va_list") ||
+ II->isStr("__int128_t") || II->isStr("__uint128_t"))
+ continue;
+ }
+ }
+ }
+
+ // The next bits of code handles stuff like "struct {int x;} a,b"; we're
+ // forced to merge the declarations because there's no other way to
+ // refer to the struct in question. This limited merging is safe without
+ // a bunch of other checks because it only merges declarations directly
+ // referring to the tag, not typedefs.
+ //
+ // Check whether the current declaration should be grouped with a previous
+ // unnamed struct.
+ QualType CurDeclType = getDeclType(*D);
+ if (!Decls.empty() && !CurDeclType.isNull()) {
+ QualType BaseType = GetBaseType(CurDeclType);
+ if (!BaseType.isNull() && isa<TagType>(BaseType) &&
+ cast<TagType>(BaseType)->getDecl() == Decls[0]) {
+ Decls.push_back(*D);
+ continue;
+ }
+ }
+
+ // If we have a merged group waiting to be handled, handle it now.
+ if (!Decls.empty())
+ ProcessDeclGroup(Decls);
+
+ // If the current declaration is an unnamed tag type, save it
+ // so we can merge it with the subsequent declaration(s) using it.
+ if (isa<TagDecl>(*D) && !cast<TagDecl>(*D)->getIdentifier()) {
+ Decls.push_back(*D);
+ continue;
+ }
+
+ if (isa<AccessSpecDecl>(*D)) {
+ Indentation -= Policy.Indentation;
+ this->Indent();
+ Print(D->getAccess());
+ Out << ":\n";
+ Indentation += Policy.Indentation;
+ continue;
+ }
+
+ this->Indent();
+ Visit(*D);
+
+ // FIXME: Need to be able to tell the DeclPrinter when
+ const char *Terminator = 0;
+ if (isa<FunctionDecl>(*D) &&
+ cast<FunctionDecl>(*D)->isThisDeclarationADefinition())
+ Terminator = 0;
+ else if (isa<ObjCMethodDecl>(*D) && cast<ObjCMethodDecl>(*D)->getBody())
+ Terminator = 0;
+ else if (isa<NamespaceDecl>(*D) || isa<LinkageSpecDecl>(*D) ||
+ isa<ObjCImplementationDecl>(*D) ||
+ isa<ObjCInterfaceDecl>(*D) ||
+ isa<ObjCProtocolDecl>(*D) ||
+ isa<ObjCCategoryImplDecl>(*D) ||
+ isa<ObjCCategoryDecl>(*D))
+ Terminator = 0;
+ else if (isa<EnumConstantDecl>(*D)) {
+ DeclContext::decl_iterator Next = D;
+ ++Next;
+ if (Next != DEnd)
+ Terminator = ",";
+ } else
+ Terminator = ";";
+
+ if (Terminator)
+ Out << Terminator;
+ Out << "\n";
+ }
+
+ if (!Decls.empty())
+ ProcessDeclGroup(Decls);
+
+ if (Indent)
+ Indentation -= Policy.Indentation;
+}
+
+void DeclPrinter::VisitTranslationUnitDecl(TranslationUnitDecl *D) {
+ VisitDeclContext(D, false);
+}
+
+void DeclPrinter::VisitTypedefDecl(TypedefDecl *D) {
+ std::string S = D->getNameAsString();
+ D->getUnderlyingType().getAsStringInternal(S, Policy);
+ if (!Policy.SuppressSpecifiers) {
+ Out << "typedef ";
+
+ if (D->isModulePrivate())
+ Out << "__module_private__ ";
+ }
+ Out << S;
+ prettyPrintAttributes(D);
+}
+
+void DeclPrinter::VisitTypeAliasDecl(TypeAliasDecl *D) {
+ Out << "using " << *D << " = " << D->getUnderlyingType().getAsString(Policy);
+}
+
+void DeclPrinter::VisitEnumDecl(EnumDecl *D) {
+ if (!Policy.SuppressSpecifiers && D->isModulePrivate())
+ Out << "__module_private__ ";
+ Out << "enum ";
+ if (D->isScoped()) {
+ if (D->isScopedUsingClassTag())
+ Out << "class ";
+ else
+ Out << "struct ";
+ }
+ Out << *D;
+
+ if (D->isFixed()) {
+ std::string Underlying;
+ D->getIntegerType().getAsStringInternal(Underlying, Policy);
+ Out << " : " << Underlying;
+ }
+
+ if (D->isCompleteDefinition()) {
+ Out << " {\n";
+ VisitDeclContext(D);
+ Indent() << "}";
+ }
+ prettyPrintAttributes(D);
+}
+
+void DeclPrinter::VisitRecordDecl(RecordDecl *D) {
+ if (!Policy.SuppressSpecifiers && D->isModulePrivate())
+ Out << "__module_private__ ";
+ Out << D->getKindName();
+ if (D->getIdentifier())
+ Out << ' ' << *D;
+
+ if (D->isCompleteDefinition()) {
+ Out << " {\n";
+ VisitDeclContext(D);
+ Indent() << "}";
+ }
+}
+
+void DeclPrinter::VisitEnumConstantDecl(EnumConstantDecl *D) {
+ Out << *D;
+ if (Expr *Init = D->getInitExpr()) {
+ Out << " = ";
+ Init->printPretty(Out, Context, 0, Policy, Indentation);
+ }
+}
+
+void DeclPrinter::VisitFunctionDecl(FunctionDecl *D) {
+ if (!Policy.SuppressSpecifiers) {
+ switch (D->getStorageClassAsWritten()) {
+ case SC_None: break;
+ case SC_Extern: Out << "extern "; break;
+ case SC_Static: Out << "static "; break;
+ case SC_PrivateExtern: Out << "__private_extern__ "; break;
+ case SC_Auto: case SC_Register: case SC_OpenCLWorkGroupLocal:
+ llvm_unreachable("invalid for functions");
+ }
+
+ if (D->isInlineSpecified()) Out << "inline ";
+ if (D->isVirtualAsWritten()) Out << "virtual ";
+ if (D->isModulePrivate()) Out << "__module_private__ ";
+ }
+
+ PrintingPolicy SubPolicy(Policy);
+ SubPolicy.SuppressSpecifiers = false;
+ std::string Proto = D->getNameInfo().getAsString();
+
+ QualType Ty = D->getType();
+ while (const ParenType *PT = dyn_cast<ParenType>(Ty)) {
+ Proto = '(' + Proto + ')';
+ Ty = PT->getInnerType();
+ }
+
+ if (isa<FunctionType>(Ty)) {
+ const FunctionType *AFT = Ty->getAs<FunctionType>();
+ const FunctionProtoType *FT = 0;
+ if (D->hasWrittenPrototype())
+ FT = dyn_cast<FunctionProtoType>(AFT);
+
+ Proto += "(";
+ if (FT) {
+ llvm::raw_string_ostream POut(Proto);
+ DeclPrinter ParamPrinter(POut, Context, SubPolicy, Indentation);
+ for (unsigned i = 0, e = D->getNumParams(); i != e; ++i) {
+ if (i) POut << ", ";
+ ParamPrinter.VisitParmVarDecl(D->getParamDecl(i));
+ }
+
+ if (FT->isVariadic()) {
+ if (D->getNumParams()) POut << ", ";
+ POut << "...";
+ }
+ } else if (D->doesThisDeclarationHaveABody() && !D->hasPrototype()) {
+ for (unsigned i = 0, e = D->getNumParams(); i != e; ++i) {
+ if (i)
+ Proto += ", ";
+ Proto += D->getParamDecl(i)->getNameAsString();
+ }
+ }
+
+ Proto += ")";
+
+ if (FT && FT->getTypeQuals()) {
+ unsigned TypeQuals = FT->getTypeQuals();
+ if (TypeQuals & Qualifiers::Const)
+ Proto += " const";
+ if (TypeQuals & Qualifiers::Volatile)
+ Proto += " volatile";
+ if (TypeQuals & Qualifiers::Restrict)
+ Proto += " restrict";
+ }
+
+ if (FT && FT->hasDynamicExceptionSpec()) {
+ Proto += " throw(";
+ if (FT->getExceptionSpecType() == EST_MSAny)
+ Proto += "...";
+ else
+ for (unsigned I = 0, N = FT->getNumExceptions(); I != N; ++I) {
+ if (I)
+ Proto += ", ";
+
+ std::string ExceptionType;
+ FT->getExceptionType(I).getAsStringInternal(ExceptionType, SubPolicy);
+ Proto += ExceptionType;
+ }
+ Proto += ")";
+ } else if (FT && isNoexceptExceptionSpec(FT->getExceptionSpecType())) {
+ Proto += " noexcept";
+ if (FT->getExceptionSpecType() == EST_ComputedNoexcept) {
+ Proto += "(";
+ llvm::raw_string_ostream EOut(Proto);
+ FT->getNoexceptExpr()->printPretty(EOut, Context, 0, SubPolicy,
+ Indentation);
+ EOut.flush();
+ Proto += EOut.str();
+ Proto += ")";
+ }
+ }
+
+ if (CXXConstructorDecl *CDecl = dyn_cast<CXXConstructorDecl>(D)) {
+ bool HasInitializerList = false;
+ for (CXXConstructorDecl::init_const_iterator B = CDecl->init_begin(),
+ E = CDecl->init_end();
+ B != E; ++B) {
+ CXXCtorInitializer * BMInitializer = (*B);
+ if (BMInitializer->isInClassMemberInitializer())
+ continue;
+
+ if (!HasInitializerList) {
+ Proto += " : ";
+ Out << Proto;
+ Proto.clear();
+ HasInitializerList = true;
+ } else
+ Out << ", ";
+
+ if (BMInitializer->isAnyMemberInitializer()) {
+ FieldDecl *FD = BMInitializer->getAnyMember();
+ Out << *FD;
+ } else {
+ Out << QualType(BMInitializer->getBaseClass(), 0).getAsString(Policy);
+ }
+
+ Out << "(";
+ if (!BMInitializer->getInit()) {
+ // Nothing to print
+ } else {
+ Expr *Init = BMInitializer->getInit();
+ if (ExprWithCleanups *Tmp = dyn_cast<ExprWithCleanups>(Init))
+ Init = Tmp->getSubExpr();
+
+ Init = Init->IgnoreParens();
+
+ Expr *SimpleInit = 0;
+ Expr **Args = 0;
+ unsigned NumArgs = 0;
+ if (ParenListExpr *ParenList = dyn_cast<ParenListExpr>(Init)) {
+ Args = ParenList->getExprs();
+ NumArgs = ParenList->getNumExprs();
+ } else if (CXXConstructExpr *Construct
+ = dyn_cast<CXXConstructExpr>(Init)) {
+ Args = Construct->getArgs();
+ NumArgs = Construct->getNumArgs();
+ } else
+ SimpleInit = Init;
+
+ if (SimpleInit)
+ SimpleInit->printPretty(Out, Context, 0, Policy, Indentation);
+ else {
+ for (unsigned I = 0; I != NumArgs; ++I) {
+ if (isa<CXXDefaultArgExpr>(Args[I]))
+ break;
+
+ if (I)
+ Out << ", ";
+ Args[I]->printPretty(Out, Context, 0, Policy, Indentation);
+ }
+ }
+ }
+ Out << ")";
+ }
+ }
+ else
+ AFT->getResultType().getAsStringInternal(Proto, Policy);
+ } else {
+ Ty.getAsStringInternal(Proto, Policy);
+ }
+
+ Out << Proto;
+ prettyPrintAttributes(D);
+
+ if (D->isPure())
+ Out << " = 0";
+ else if (D->isDeletedAsWritten())
+ Out << " = delete";
+ else if (D->doesThisDeclarationHaveABody()) {
+ if (!D->hasPrototype() && D->getNumParams()) {
+ // This is a K&R function definition, so we need to print the
+ // parameters.
+ Out << '\n';
+ DeclPrinter ParamPrinter(Out, Context, SubPolicy, Indentation);
+ Indentation += Policy.Indentation;
+ for (unsigned i = 0, e = D->getNumParams(); i != e; ++i) {
+ Indent();
+ ParamPrinter.VisitParmVarDecl(D->getParamDecl(i));
+ Out << ";\n";
+ }
+ Indentation -= Policy.Indentation;
+ } else
+ Out << ' ';
+
+ D->getBody()->printPretty(Out, Context, 0, SubPolicy, Indentation);
+ Out << '\n';
+ }
+}
+
+void DeclPrinter::VisitFieldDecl(FieldDecl *D) {
+ if (!Policy.SuppressSpecifiers && D->isMutable())
+ Out << "mutable ";
+ if (!Policy.SuppressSpecifiers && D->isModulePrivate())
+ Out << "__module_private__ ";
+
+ std::string Name = D->getNameAsString();
+ D->getType().getAsStringInternal(Name, Policy);
+ Out << Name;
+
+ if (D->isBitField()) {
+ Out << " : ";
+ D->getBitWidth()->printPretty(Out, Context, 0, Policy, Indentation);
+ }
+
+ Expr *Init = D->getInClassInitializer();
+ if (!Policy.SuppressInitializers && Init) {
+ Out << " = ";
+ Init->printPretty(Out, Context, 0, Policy, Indentation);
+ }
+ prettyPrintAttributes(D);
+}
+
+void DeclPrinter::VisitLabelDecl(LabelDecl *D) {
+ Out << *D << ":";
+}
+
+
+void DeclPrinter::VisitVarDecl(VarDecl *D) {
+ StorageClass SCAsWritten = D->getStorageClassAsWritten();
+ if (!Policy.SuppressSpecifiers && SCAsWritten != SC_None)
+ Out << VarDecl::getStorageClassSpecifierString(SCAsWritten) << " ";
+
+ if (!Policy.SuppressSpecifiers && D->isThreadSpecified())
+ Out << "__thread ";
+ if (!Policy.SuppressSpecifiers && D->isModulePrivate())
+ Out << "__module_private__ ";
+
+ std::string Name = D->getNameAsString();
+ QualType T = D->getType();
+ if (ParmVarDecl *Parm = dyn_cast<ParmVarDecl>(D))
+ T = Parm->getOriginalType();
+ T.getAsStringInternal(Name, Policy);
+ Out << Name;
+ Expr *Init = D->getInit();
+ if (!Policy.SuppressInitializers && Init) {
+ bool ImplicitInit = false;
+ if (CXXConstructExpr *Construct = dyn_cast<CXXConstructExpr>(Init))
+ ImplicitInit = D->getInitStyle() == VarDecl::CallInit &&
+ Construct->getNumArgs() == 0 && !Construct->isListInitialization();
+ if (!ImplicitInit) {
+ if (D->getInitStyle() == VarDecl::CallInit)
+ Out << "(";
+ else if (D->getInitStyle() == VarDecl::CInit) {
+ Out << " = ";
+ }
+ Init->printPretty(Out, Context, 0, Policy, Indentation);
+ if (D->getInitStyle() == VarDecl::CallInit)
+ Out << ")";
+ }
+ }
+ prettyPrintAttributes(D);
+}
+
+void DeclPrinter::VisitParmVarDecl(ParmVarDecl *D) {
+ VisitVarDecl(D);
+}
+
+void DeclPrinter::VisitFileScopeAsmDecl(FileScopeAsmDecl *D) {
+ Out << "__asm (";
+ D->getAsmString()->printPretty(Out, Context, 0, Policy, Indentation);
+ Out << ")";
+}
+
+void DeclPrinter::VisitImportDecl(ImportDecl *D) {
+ Out << "@__experimental_modules_import " << D->getImportedModule()->getFullModuleName()
+ << ";\n";
+}
+
+void DeclPrinter::VisitStaticAssertDecl(StaticAssertDecl *D) {
+ Out << "static_assert(";
+ D->getAssertExpr()->printPretty(Out, Context, 0, Policy, Indentation);
+ Out << ", ";
+ D->getMessage()->printPretty(Out, Context, 0, Policy, Indentation);
+ Out << ")";
+}
+
+//----------------------------------------------------------------------------
+// C++ declarations
+//----------------------------------------------------------------------------
+void DeclPrinter::VisitNamespaceDecl(NamespaceDecl *D) {
+ Out << "namespace " << *D << " {\n";
+ VisitDeclContext(D);
+ Indent() << "}";
+}
+
+void DeclPrinter::VisitUsingDirectiveDecl(UsingDirectiveDecl *D) {
+ Out << "using namespace ";
+ if (D->getQualifier())
+ D->getQualifier()->print(Out, Policy);
+ Out << *D->getNominatedNamespaceAsWritten();
+}
+
+void DeclPrinter::VisitNamespaceAliasDecl(NamespaceAliasDecl *D) {
+ Out << "namespace " << *D << " = ";
+ if (D->getQualifier())
+ D->getQualifier()->print(Out, Policy);
+ Out << *D->getAliasedNamespace();
+}
+
+void DeclPrinter::VisitCXXRecordDecl(CXXRecordDecl *D) {
+ if (!Policy.SuppressSpecifiers && D->isModulePrivate())
+ Out << "__module_private__ ";
+ Out << D->getKindName();
+ if (D->getIdentifier())
+ Out << ' ' << *D;
+
+ if (D->isCompleteDefinition()) {
+ // Print the base classes
+ if (D->getNumBases()) {
+ Out << " : ";
+ for (CXXRecordDecl::base_class_iterator Base = D->bases_begin(),
+ BaseEnd = D->bases_end(); Base != BaseEnd; ++Base) {
+ if (Base != D->bases_begin())
+ Out << ", ";
+
+ if (Base->isVirtual())
+ Out << "virtual ";
+
+ AccessSpecifier AS = Base->getAccessSpecifierAsWritten();
+ if (AS != AS_none)
+ Print(AS);
+ Out << " " << Base->getType().getAsString(Policy);
+
+ if (Base->isPackExpansion())
+ Out << "...";
+ }
+ }
+
+ // Print the class definition
+ // FIXME: Doesn't print access specifiers, e.g., "public:"
+ Out << " {\n";
+ VisitDeclContext(D);
+ Indent() << "}";
+ }
+}
+
+void DeclPrinter::VisitLinkageSpecDecl(LinkageSpecDecl *D) {
+ const char *l;
+ if (D->getLanguage() == LinkageSpecDecl::lang_c)
+ l = "C";
+ else {
+ assert(D->getLanguage() == LinkageSpecDecl::lang_cxx &&
+ "unknown language in linkage specification");
+ l = "C++";
+ }
+
+ Out << "extern \"" << l << "\" ";
+ if (D->hasBraces()) {
+ Out << "{\n";
+ VisitDeclContext(D);
+ Indent() << "}";
+ } else
+ Visit(*D->decls_begin());
+}
+
+void DeclPrinter::PrintTemplateParameters(
+ const TemplateParameterList *Params, const TemplateArgumentList *Args = 0) {
+ assert(Params);
+ assert(!Args || Params->size() == Args->size());
+
+ Out << "template <";
+
+ for (unsigned i = 0, e = Params->size(); i != e; ++i) {
+ if (i != 0)
+ Out << ", ";
+
+ const Decl *Param = Params->getParam(i);
+ if (const TemplateTypeParmDecl *TTP =
+ dyn_cast<TemplateTypeParmDecl>(Param)) {
+
+ if (TTP->wasDeclaredWithTypename())
+ Out << "typename ";
+ else
+ Out << "class ";
+
+ if (TTP->isParameterPack())
+ Out << "... ";
+
+ Out << *TTP;
+
+ if (Args) {
+ Out << " = ";
+ Args->get(i).print(Policy, Out);
+ } else if (TTP->hasDefaultArgument()) {
+ Out << " = ";
+ Out << TTP->getDefaultArgument().getAsString(Policy);
+ };
+ } else if (const NonTypeTemplateParmDecl *NTTP =
+ dyn_cast<NonTypeTemplateParmDecl>(Param)) {
+ Out << NTTP->getType().getAsString(Policy);
+
+ if (NTTP->isParameterPack() && !isa<PackExpansionType>(NTTP->getType()))
+ Out << "...";
+
+ if (IdentifierInfo *Name = NTTP->getIdentifier()) {
+ Out << ' ';
+ Out << Name->getName();
+ }
+
+ if (Args) {
+ Out << " = ";
+ Args->get(i).print(Policy, Out);
+ } else if (NTTP->hasDefaultArgument()) {
+ Out << " = ";
+ NTTP->getDefaultArgument()->printPretty(Out, Context, 0, Policy,
+ Indentation);
+ }
+ } else if (const TemplateTemplateParmDecl *TTPD =
+ dyn_cast<TemplateTemplateParmDecl>(Param)) {
+ VisitTemplateDecl(TTPD);
+ // FIXME: print the default argument, if present.
+ }
+ }
+
+ Out << "> ";
+}
+
+void DeclPrinter::VisitTemplateDecl(const TemplateDecl *D) {
+ PrintTemplateParameters(D->getTemplateParameters());
+
+ if (const TemplateTemplateParmDecl *TTP =
+ dyn_cast<TemplateTemplateParmDecl>(D)) {
+ Out << "class ";
+ if (TTP->isParameterPack())
+ Out << "...";
+ Out << D->getName();
+ } else {
+ Visit(D->getTemplatedDecl());
+ }
+}
+
+void DeclPrinter::VisitFunctionTemplateDecl(FunctionTemplateDecl *D) {
+ if (PrintInstantiation) {
+ TemplateParameterList *Params = D->getTemplateParameters();
+ for (FunctionTemplateDecl::spec_iterator I = D->spec_begin(), E = D->spec_end();
+ I != E; ++I) {
+ PrintTemplateParameters(Params, (*I)->getTemplateSpecializationArgs());
+ Visit(*I);
+ }
+ }
+
+ return VisitRedeclarableTemplateDecl(D);
+}
+
+void DeclPrinter::VisitClassTemplateDecl(ClassTemplateDecl *D) {
+ if (PrintInstantiation) {
+ TemplateParameterList *Params = D->getTemplateParameters();
+ for (ClassTemplateDecl::spec_iterator I = D->spec_begin(), E = D->spec_end();
+ I != E; ++I) {
+ PrintTemplateParameters(Params, &(*I)->getTemplateArgs());
+ Visit(*I);
+ Out << '\n';
+ }
+ }
+
+ return VisitRedeclarableTemplateDecl(D);
+}
+
+//----------------------------------------------------------------------------
+// Objective-C declarations
+//----------------------------------------------------------------------------
+
+void DeclPrinter::VisitObjCMethodDecl(ObjCMethodDecl *OMD) {
+ if (OMD->isInstanceMethod())
+ Out << "- ";
+ else
+ Out << "+ ";
+ if (!OMD->getResultType().isNull())
+ Out << '(' << OMD->getResultType().getAsString(Policy) << ")";
+
+ std::string name = OMD->getSelector().getAsString();
+ std::string::size_type pos, lastPos = 0;
+ for (ObjCMethodDecl::param_iterator PI = OMD->param_begin(),
+ E = OMD->param_end(); PI != E; ++PI) {
+ // FIXME: selector is missing here!
+ pos = name.find_first_of(':', lastPos);
+ Out << " " << name.substr(lastPos, pos - lastPos);
+ Out << ":(" << (*PI)->getType().getAsString(Policy) << ')' << **PI;
+ lastPos = pos + 1;
+ }
+
+ if (OMD->param_begin() == OMD->param_end())
+ Out << " " << name;
+
+ if (OMD->isVariadic())
+ Out << ", ...";
+
+ if (OMD->getBody()) {
+ Out << ' ';
+ OMD->getBody()->printPretty(Out, Context, 0, Policy);
+ Out << '\n';
+ }
+}
+
+void DeclPrinter::VisitObjCImplementationDecl(ObjCImplementationDecl *OID) {
+ std::string I = OID->getNameAsString();
+ ObjCInterfaceDecl *SID = OID->getSuperClass();
+
+ if (SID)
+ Out << "@implementation " << I << " : " << *SID;
+ else
+ Out << "@implementation " << I;
+ Out << "\n";
+ VisitDeclContext(OID, false);
+ Out << "@end";
+}
+
+void DeclPrinter::VisitObjCInterfaceDecl(ObjCInterfaceDecl *OID) {
+ std::string I = OID->getNameAsString();
+ ObjCInterfaceDecl *SID = OID->getSuperClass();
+
+ if (!OID->isThisDeclarationADefinition()) {
+ Out << "@class " << I << ";";
+ return;
+ }
+
+ if (SID)
+ Out << "@interface " << I << " : " << *SID;
+ else
+ Out << "@interface " << I;
+
+ // Protocols?
+ const ObjCList<ObjCProtocolDecl> &Protocols = OID->getReferencedProtocols();
+ if (!Protocols.empty()) {
+ for (ObjCList<ObjCProtocolDecl>::iterator I = Protocols.begin(),
+ E = Protocols.end(); I != E; ++I)
+ Out << (I == Protocols.begin() ? '<' : ',') << **I;
+ }
+
+ if (!Protocols.empty())
+ Out << "> ";
+
+ if (OID->ivar_size() > 0) {
+ Out << "{\n";
+ Indentation += Policy.Indentation;
+ for (ObjCInterfaceDecl::ivar_iterator I = OID->ivar_begin(),
+ E = OID->ivar_end(); I != E; ++I) {
+ Indent() << (*I)->getType().getAsString(Policy) << ' ' << **I << ";\n";
+ }
+ Indentation -= Policy.Indentation;
+ Out << "}\n";
+ }
+
+ VisitDeclContext(OID, false);
+ Out << "@end";
+ // FIXME: implement the rest...
+}
+
+void DeclPrinter::VisitObjCProtocolDecl(ObjCProtocolDecl *PID) {
+ if (!PID->isThisDeclarationADefinition()) {
+ Out << "@protocol " << PID->getIdentifier() << ";\n";
+ return;
+ }
+
+ Out << "@protocol " << *PID << '\n';
+ VisitDeclContext(PID, false);
+ Out << "@end";
+}
+
+void DeclPrinter::VisitObjCCategoryImplDecl(ObjCCategoryImplDecl *PID) {
+ Out << "@implementation " << *PID->getClassInterface() << '(' << *PID <<")\n";
+
+ VisitDeclContext(PID, false);
+ Out << "@end";
+ // FIXME: implement the rest...
+}
+
+void DeclPrinter::VisitObjCCategoryDecl(ObjCCategoryDecl *PID) {
+ Out << "@interface " << *PID->getClassInterface() << '(' << *PID << ")\n";
+ VisitDeclContext(PID, false);
+ Out << "@end";
+
+ // FIXME: implement the rest...
+}
+
+void DeclPrinter::VisitObjCCompatibleAliasDecl(ObjCCompatibleAliasDecl *AID) {
+ Out << "@compatibility_alias " << *AID
+ << ' ' << *AID->getClassInterface() << ";\n";
+}
+
+/// PrintObjCPropertyDecl - print a property declaration.
+///
+void DeclPrinter::VisitObjCPropertyDecl(ObjCPropertyDecl *PDecl) {
+ if (PDecl->getPropertyImplementation() == ObjCPropertyDecl::Required)
+ Out << "@required\n";
+ else if (PDecl->getPropertyImplementation() == ObjCPropertyDecl::Optional)
+ Out << "@optional\n";
+
+ Out << "@property";
+ if (PDecl->getPropertyAttributes() != ObjCPropertyDecl::OBJC_PR_noattr) {
+ bool first = true;
+ Out << " (";
+ if (PDecl->getPropertyAttributes() &
+ ObjCPropertyDecl::OBJC_PR_readonly) {
+ Out << (first ? ' ' : ',') << "readonly";
+ first = false;
+ }
+
+ if (PDecl->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_getter) {
+ Out << (first ? ' ' : ',') << "getter = "
+ << PDecl->getGetterName().getAsString();
+ first = false;
+ }
+ if (PDecl->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_setter) {
+ Out << (first ? ' ' : ',') << "setter = "
+ << PDecl->getSetterName().getAsString();
+ first = false;
+ }
+
+ if (PDecl->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_assign) {
+ Out << (first ? ' ' : ',') << "assign";
+ first = false;
+ }
+
+ if (PDecl->getPropertyAttributes() &
+ ObjCPropertyDecl::OBJC_PR_readwrite) {
+ Out << (first ? ' ' : ',') << "readwrite";
+ first = false;
+ }
+
+ if (PDecl->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_retain) {
+ Out << (first ? ' ' : ',') << "retain";
+ first = false;
+ }
+
+ if (PDecl->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_strong) {
+ Out << (first ? ' ' : ',') << "strong";
+ first = false;
+ }
+
+ if (PDecl->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_copy) {
+ Out << (first ? ' ' : ',') << "copy";
+ first = false;
+ }
+
+ if (PDecl->getPropertyAttributes() &
+ ObjCPropertyDecl::OBJC_PR_nonatomic) {
+ Out << (first ? ' ' : ',') << "nonatomic";
+ first = false;
+ }
+ if (PDecl->getPropertyAttributes() &
+ ObjCPropertyDecl::OBJC_PR_atomic) {
+ Out << (first ? ' ' : ',') << "atomic";
+ first = false;
+ }
+
+ (void) first; // Silence dead store warning due to idiomatic code.
+ Out << " )";
+ }
+ Out << ' ' << PDecl->getType().getAsString(Policy) << ' ' << *PDecl;
+}
+
+void DeclPrinter::VisitObjCPropertyImplDecl(ObjCPropertyImplDecl *PID) {
+ if (PID->getPropertyImplementation() == ObjCPropertyImplDecl::Synthesize)
+ Out << "@synthesize ";
+ else
+ Out << "@dynamic ";
+ Out << *PID->getPropertyDecl();
+ if (PID->getPropertyIvarDecl())
+ Out << '=' << *PID->getPropertyIvarDecl();
+}
+
+void DeclPrinter::VisitUsingDecl(UsingDecl *D) {
+ Out << "using ";
+ D->getQualifier()->print(Out, Policy);
+ Out << *D;
+}
+
+void
+DeclPrinter::VisitUnresolvedUsingTypenameDecl(UnresolvedUsingTypenameDecl *D) {
+ Out << "using typename ";
+ D->getQualifier()->print(Out, Policy);
+ Out << D->getDeclName();
+}
+
+void DeclPrinter::VisitUnresolvedUsingValueDecl(UnresolvedUsingValueDecl *D) {
+ Out << "using ";
+ D->getQualifier()->print(Out, Policy);
+ Out << D->getDeclName();
+}
+
+void DeclPrinter::VisitUsingShadowDecl(UsingShadowDecl *D) {
+ // ignore
+}
diff --git a/clang/lib/AST/DeclTemplate.cpp b/clang/lib/AST/DeclTemplate.cpp
new file mode 100644
index 0000000..4590195
--- /dev/null
+++ b/clang/lib/AST/DeclTemplate.cpp
@@ -0,0 +1,872 @@
+//===--- DeclTemplate.cpp - Template Declaration AST Node Implementation --===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the C++ related Decl classes for templates.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/TypeLoc.h"
+#include "clang/AST/ASTMutationListener.h"
+#include "clang/Basic/IdentifierTable.h"
+#include "llvm/ADT/STLExtras.h"
+#include <memory>
+using namespace clang;
+
+//===----------------------------------------------------------------------===//
+// TemplateParameterList Implementation
+//===----------------------------------------------------------------------===//
+
+TemplateParameterList::TemplateParameterList(SourceLocation TemplateLoc,
+ SourceLocation LAngleLoc,
+ NamedDecl **Params, unsigned NumParams,
+ SourceLocation RAngleLoc)
+ : TemplateLoc(TemplateLoc), LAngleLoc(LAngleLoc), RAngleLoc(RAngleLoc),
+ NumParams(NumParams) {
+ for (unsigned Idx = 0; Idx < NumParams; ++Idx)
+ begin()[Idx] = Params[Idx];
+}
+
+TemplateParameterList *
+TemplateParameterList::Create(const ASTContext &C, SourceLocation TemplateLoc,
+ SourceLocation LAngleLoc, NamedDecl **Params,
+ unsigned NumParams, SourceLocation RAngleLoc) {
+ unsigned Size = sizeof(TemplateParameterList)
+ + sizeof(NamedDecl *) * NumParams;
+ unsigned Align = llvm::AlignOf<TemplateParameterList>::Alignment;
+ void *Mem = C.Allocate(Size, Align);
+ return new (Mem) TemplateParameterList(TemplateLoc, LAngleLoc, Params,
+ NumParams, RAngleLoc);
+}
+
+unsigned TemplateParameterList::getMinRequiredArguments() const {
+ unsigned NumRequiredArgs = 0;
+ for (iterator P = const_cast<TemplateParameterList *>(this)->begin(),
+ PEnd = const_cast<TemplateParameterList *>(this)->end();
+ P != PEnd; ++P) {
+ if ((*P)->isTemplateParameterPack()) {
+ if (NonTypeTemplateParmDecl *NTTP = dyn_cast<NonTypeTemplateParmDecl>(*P))
+ if (NTTP->isExpandedParameterPack()) {
+ NumRequiredArgs += NTTP->getNumExpansionTypes();
+ continue;
+ }
+
+ break;
+ }
+
+ if (TemplateTypeParmDecl *TTP = dyn_cast<TemplateTypeParmDecl>(*P)) {
+ if (TTP->hasDefaultArgument())
+ break;
+ } else if (NonTypeTemplateParmDecl *NTTP
+ = dyn_cast<NonTypeTemplateParmDecl>(*P)) {
+ if (NTTP->hasDefaultArgument())
+ break;
+ } else if (cast<TemplateTemplateParmDecl>(*P)->hasDefaultArgument())
+ break;
+
+ ++NumRequiredArgs;
+ }
+
+ return NumRequiredArgs;
+}
+
+unsigned TemplateParameterList::getDepth() const {
+ if (size() == 0)
+ return 0;
+
+ const NamedDecl *FirstParm = getParam(0);
+ if (const TemplateTypeParmDecl *TTP
+ = dyn_cast<TemplateTypeParmDecl>(FirstParm))
+ return TTP->getDepth();
+ else if (const NonTypeTemplateParmDecl *NTTP
+ = dyn_cast<NonTypeTemplateParmDecl>(FirstParm))
+ return NTTP->getDepth();
+ else
+ return cast<TemplateTemplateParmDecl>(FirstParm)->getDepth();
+}
+
+static void AdoptTemplateParameterList(TemplateParameterList *Params,
+ DeclContext *Owner) {
+ for (TemplateParameterList::iterator P = Params->begin(),
+ PEnd = Params->end();
+ P != PEnd; ++P) {
+ (*P)->setDeclContext(Owner);
+
+ if (TemplateTemplateParmDecl *TTP = dyn_cast<TemplateTemplateParmDecl>(*P))
+ AdoptTemplateParameterList(TTP->getTemplateParameters(), Owner);
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// RedeclarableTemplateDecl Implementation
+//===----------------------------------------------------------------------===//
+
+RedeclarableTemplateDecl::CommonBase *RedeclarableTemplateDecl::getCommonPtr() {
+ if (!Common) {
+ // Walk the previous-declaration chain until we either find a declaration
+ // with a common pointer or we run out of previous declarations.
+ llvm::SmallVector<RedeclarableTemplateDecl *, 2> PrevDecls;
+ for (RedeclarableTemplateDecl *Prev = getPreviousDecl(); Prev;
+ Prev = Prev->getPreviousDecl()) {
+ if (Prev->Common) {
+ Common = Prev->Common;
+ break;
+ }
+
+ PrevDecls.push_back(Prev);
+ }
+
+ // If we never found a common pointer, allocate one now.
+ if (!Common) {
+ // FIXME: If any of the declarations is from an AST file, we probably
+ // need an update record to add the common data.
+
+ Common = newCommon(getASTContext());
+ }
+
+ // Update any previous declarations we saw with the common pointer.
+ for (unsigned I = 0, N = PrevDecls.size(); I != N; ++I)
+ PrevDecls[I]->Common = Common;
+ }
+
+ return Common;
+}
+
+template <class EntryType>
+typename RedeclarableTemplateDecl::SpecEntryTraits<EntryType>::DeclType*
+RedeclarableTemplateDecl::findSpecializationImpl(
+ llvm::FoldingSet<EntryType> &Specs,
+ const TemplateArgument *Args, unsigned NumArgs,
+ void *&InsertPos) {
+ typedef SpecEntryTraits<EntryType> SETraits;
+ llvm::FoldingSetNodeID ID;
+ EntryType::Profile(ID,Args,NumArgs, getASTContext());
+ EntryType *Entry = Specs.FindNodeOrInsertPos(ID, InsertPos);
+ return Entry ? SETraits::getMostRecentDecl(Entry) : 0;
+}
+
+/// \brief Generate the injected template arguments for the given template
+/// parameter list, e.g., for the injected-class-name of a class template.
+static void GenerateInjectedTemplateArgs(ASTContext &Context,
+ TemplateParameterList *Params,
+ TemplateArgument *Args) {
+ for (TemplateParameterList::iterator Param = Params->begin(),
+ ParamEnd = Params->end();
+ Param != ParamEnd; ++Param) {
+ TemplateArgument Arg;
+ if (TemplateTypeParmDecl *TTP = dyn_cast<TemplateTypeParmDecl>(*Param)) {
+ QualType ArgType = Context.getTypeDeclType(TTP);
+ if (TTP->isParameterPack())
+ ArgType = Context.getPackExpansionType(ArgType,
+ llvm::Optional<unsigned>());
+
+ Arg = TemplateArgument(ArgType);
+ } else if (NonTypeTemplateParmDecl *NTTP =
+ dyn_cast<NonTypeTemplateParmDecl>(*Param)) {
+ Expr *E = new (Context) DeclRefExpr(NTTP, /*enclosing*/ false,
+ NTTP->getType().getNonLValueExprType(Context),
+ Expr::getValueKindForType(NTTP->getType()),
+ NTTP->getLocation());
+
+ if (NTTP->isParameterPack())
+ E = new (Context) PackExpansionExpr(Context.DependentTy, E,
+ NTTP->getLocation(),
+ llvm::Optional<unsigned>());
+ Arg = TemplateArgument(E);
+ } else {
+ TemplateTemplateParmDecl *TTP = cast<TemplateTemplateParmDecl>(*Param);
+ if (TTP->isParameterPack())
+ Arg = TemplateArgument(TemplateName(TTP), llvm::Optional<unsigned>());
+ else
+ Arg = TemplateArgument(TemplateName(TTP));
+ }
+
+ if ((*Param)->isTemplateParameterPack())
+ Arg = TemplateArgument::CreatePackCopy(Context, &Arg, 1);
+
+ *Args++ = Arg;
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// FunctionTemplateDecl Implementation
+//===----------------------------------------------------------------------===//
+
+void FunctionTemplateDecl::DeallocateCommon(void *Ptr) {
+ static_cast<Common *>(Ptr)->~Common();
+}
+
+FunctionTemplateDecl *FunctionTemplateDecl::Create(ASTContext &C,
+ DeclContext *DC,
+ SourceLocation L,
+ DeclarationName Name,
+ TemplateParameterList *Params,
+ NamedDecl *Decl) {
+ AdoptTemplateParameterList(Params, cast<DeclContext>(Decl));
+ return new (C) FunctionTemplateDecl(DC, L, Name, Params, Decl);
+}
+
+FunctionTemplateDecl *FunctionTemplateDecl::CreateDeserialized(ASTContext &C,
+ unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID, sizeof(FunctionTemplateDecl));
+ return new (Mem) FunctionTemplateDecl(0, SourceLocation(), DeclarationName(),
+ 0, 0);
+}
+
+RedeclarableTemplateDecl::CommonBase *
+FunctionTemplateDecl::newCommon(ASTContext &C) {
+ Common *CommonPtr = new (C) Common;
+ C.AddDeallocation(DeallocateCommon, CommonPtr);
+ return CommonPtr;
+}
+
+FunctionDecl *
+FunctionTemplateDecl::findSpecialization(const TemplateArgument *Args,
+ unsigned NumArgs, void *&InsertPos) {
+ return findSpecializationImpl(getSpecializations(), Args, NumArgs, InsertPos);
+}
+
+void FunctionTemplateDecl::addSpecialization(
+ FunctionTemplateSpecializationInfo *Info, void *InsertPos) {
+ if (InsertPos)
+ getSpecializations().InsertNode(Info, InsertPos);
+ else
+ getSpecializations().GetOrInsertNode(Info);
+ if (ASTMutationListener *L = getASTMutationListener())
+ L->AddedCXXTemplateSpecialization(this, Info->Function);
+}
+
+std::pair<const TemplateArgument *, unsigned>
+FunctionTemplateDecl::getInjectedTemplateArgs() {
+ TemplateParameterList *Params = getTemplateParameters();
+ Common *CommonPtr = getCommonPtr();
+ if (!CommonPtr->InjectedArgs) {
+ CommonPtr->InjectedArgs
+ = new (getASTContext()) TemplateArgument [Params->size()];
+ GenerateInjectedTemplateArgs(getASTContext(), Params,
+ CommonPtr->InjectedArgs);
+ }
+
+ return std::make_pair(CommonPtr->InjectedArgs, Params->size());
+}
+
+//===----------------------------------------------------------------------===//
+// ClassTemplateDecl Implementation
+//===----------------------------------------------------------------------===//
+
+void ClassTemplateDecl::DeallocateCommon(void *Ptr) {
+ static_cast<Common *>(Ptr)->~Common();
+}
+
+ClassTemplateDecl *ClassTemplateDecl::Create(ASTContext &C,
+ DeclContext *DC,
+ SourceLocation L,
+ DeclarationName Name,
+ TemplateParameterList *Params,
+ NamedDecl *Decl,
+ ClassTemplateDecl *PrevDecl) {
+ AdoptTemplateParameterList(Params, cast<DeclContext>(Decl));
+ ClassTemplateDecl *New = new (C) ClassTemplateDecl(DC, L, Name, Params, Decl);
+ New->setPreviousDeclaration(PrevDecl);
+ return New;
+}
+
+ClassTemplateDecl *ClassTemplateDecl::CreateDeserialized(ASTContext &C,
+ unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID, sizeof(ClassTemplateDecl));
+ return new (Mem) ClassTemplateDecl(EmptyShell());
+}
+
+void ClassTemplateDecl::LoadLazySpecializations() {
+ Common *CommonPtr = getCommonPtr();
+ if (CommonPtr->LazySpecializations) {
+ ASTContext &Context = getASTContext();
+ uint32_t *Specs = CommonPtr->LazySpecializations;
+ CommonPtr->LazySpecializations = 0;
+ for (uint32_t I = 0, N = *Specs++; I != N; ++I)
+ (void)Context.getExternalSource()->GetExternalDecl(Specs[I]);
+ }
+}
+
+llvm::FoldingSet<ClassTemplateSpecializationDecl> &
+ClassTemplateDecl::getSpecializations() {
+ LoadLazySpecializations();
+ return getCommonPtr()->Specializations;
+}
+
+llvm::FoldingSet<ClassTemplatePartialSpecializationDecl> &
+ClassTemplateDecl::getPartialSpecializations() {
+ LoadLazySpecializations();
+ return getCommonPtr()->PartialSpecializations;
+}
+
+RedeclarableTemplateDecl::CommonBase *
+ClassTemplateDecl::newCommon(ASTContext &C) {
+ Common *CommonPtr = new (C) Common;
+ C.AddDeallocation(DeallocateCommon, CommonPtr);
+ return CommonPtr;
+}
+
+ClassTemplateSpecializationDecl *
+ClassTemplateDecl::findSpecialization(const TemplateArgument *Args,
+ unsigned NumArgs, void *&InsertPos) {
+ return findSpecializationImpl(getSpecializations(), Args, NumArgs, InsertPos);
+}
+
+void ClassTemplateDecl::AddSpecialization(ClassTemplateSpecializationDecl *D,
+ void *InsertPos) {
+ if (InsertPos)
+ getSpecializations().InsertNode(D, InsertPos);
+ else {
+ ClassTemplateSpecializationDecl *Existing
+ = getSpecializations().GetOrInsertNode(D);
+ (void)Existing;
+ assert(Existing->isCanonicalDecl() && "Non-canonical specialization?");
+ }
+ if (ASTMutationListener *L = getASTMutationListener())
+ L->AddedCXXTemplateSpecialization(this, D);
+}
+
+ClassTemplatePartialSpecializationDecl *
+ClassTemplateDecl::findPartialSpecialization(const TemplateArgument *Args,
+ unsigned NumArgs,
+ void *&InsertPos) {
+ return findSpecializationImpl(getPartialSpecializations(), Args, NumArgs,
+ InsertPos);
+}
+
+void ClassTemplateDecl::AddPartialSpecialization(
+ ClassTemplatePartialSpecializationDecl *D,
+ void *InsertPos) {
+ if (InsertPos)
+ getPartialSpecializations().InsertNode(D, InsertPos);
+ else {
+ ClassTemplatePartialSpecializationDecl *Existing
+ = getPartialSpecializations().GetOrInsertNode(D);
+ (void)Existing;
+ assert(Existing->isCanonicalDecl() && "Non-canonical specialization?");
+ }
+
+ if (ASTMutationListener *L = getASTMutationListener())
+ L->AddedCXXTemplateSpecialization(this, D);
+}
+
+void ClassTemplateDecl::getPartialSpecializations(
+ SmallVectorImpl<ClassTemplatePartialSpecializationDecl *> &PS) {
+ llvm::FoldingSet<ClassTemplatePartialSpecializationDecl> &PartialSpecs
+ = getPartialSpecializations();
+ PS.clear();
+ PS.resize(PartialSpecs.size());
+ for (llvm::FoldingSet<ClassTemplatePartialSpecializationDecl>::iterator
+ P = PartialSpecs.begin(), PEnd = PartialSpecs.end();
+ P != PEnd; ++P) {
+ assert(!PS[P->getSequenceNumber()]);
+ PS[P->getSequenceNumber()] = P->getMostRecentDecl();
+ }
+}
+
+ClassTemplatePartialSpecializationDecl *
+ClassTemplateDecl::findPartialSpecialization(QualType T) {
+ ASTContext &Context = getASTContext();
+ typedef llvm::FoldingSet<ClassTemplatePartialSpecializationDecl>::iterator
+ partial_spec_iterator;
+ for (partial_spec_iterator P = getPartialSpecializations().begin(),
+ PEnd = getPartialSpecializations().end();
+ P != PEnd; ++P) {
+ if (Context.hasSameType(P->getInjectedSpecializationType(), T))
+ return P->getMostRecentDecl();
+ }
+
+ return 0;
+}
+
+ClassTemplatePartialSpecializationDecl *
+ClassTemplateDecl::findPartialSpecInstantiatedFromMember(
+ ClassTemplatePartialSpecializationDecl *D) {
+ Decl *DCanon = D->getCanonicalDecl();
+ for (llvm::FoldingSet<ClassTemplatePartialSpecializationDecl>::iterator
+ P = getPartialSpecializations().begin(),
+ PEnd = getPartialSpecializations().end();
+ P != PEnd; ++P) {
+ if (P->getInstantiatedFromMember()->getCanonicalDecl() == DCanon)
+ return P->getMostRecentDecl();
+ }
+
+ return 0;
+}
+
+QualType
+ClassTemplateDecl::getInjectedClassNameSpecialization() {
+ Common *CommonPtr = getCommonPtr();
+ if (!CommonPtr->InjectedClassNameType.isNull())
+ return CommonPtr->InjectedClassNameType;
+
+ // C++0x [temp.dep.type]p2:
+ // The template argument list of a primary template is a template argument
+ // list in which the nth template argument has the value of the nth template
+ // parameter of the class template. If the nth template parameter is a
+ // template parameter pack (14.5.3), the nth template argument is a pack
+ // expansion (14.5.3) whose pattern is the name of the template parameter
+ // pack.
+ ASTContext &Context = getASTContext();
+ TemplateParameterList *Params = getTemplateParameters();
+ SmallVector<TemplateArgument, 16> TemplateArgs;
+ TemplateArgs.resize(Params->size());
+ GenerateInjectedTemplateArgs(getASTContext(), Params, TemplateArgs.data());
+ CommonPtr->InjectedClassNameType
+ = Context.getTemplateSpecializationType(TemplateName(this),
+ &TemplateArgs[0],
+ TemplateArgs.size());
+ return CommonPtr->InjectedClassNameType;
+}
+
+//===----------------------------------------------------------------------===//
+// TemplateTypeParm Allocation/Deallocation Method Implementations
+//===----------------------------------------------------------------------===//
+
+TemplateTypeParmDecl *
+TemplateTypeParmDecl::Create(const ASTContext &C, DeclContext *DC,
+ SourceLocation KeyLoc, SourceLocation NameLoc,
+ unsigned D, unsigned P, IdentifierInfo *Id,
+ bool Typename, bool ParameterPack) {
+ TemplateTypeParmDecl *TTPDecl =
+ new (C) TemplateTypeParmDecl(DC, KeyLoc, NameLoc, Id, Typename);
+ QualType TTPType = C.getTemplateTypeParmType(D, P, ParameterPack, TTPDecl);
+ TTPDecl->TypeForDecl = TTPType.getTypePtr();
+ return TTPDecl;
+}
+
+TemplateTypeParmDecl *
+TemplateTypeParmDecl::CreateDeserialized(const ASTContext &C, unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID, sizeof(TemplateTypeParmDecl));
+ return new (Mem) TemplateTypeParmDecl(0, SourceLocation(), SourceLocation(),
+ 0, false);
+}
+
+SourceLocation TemplateTypeParmDecl::getDefaultArgumentLoc() const {
+ return hasDefaultArgument()
+ ? DefaultArgument->getTypeLoc().getBeginLoc()
+ : SourceLocation();
+}
+
+SourceRange TemplateTypeParmDecl::getSourceRange() const {
+ if (hasDefaultArgument() && !defaultArgumentWasInherited())
+ return SourceRange(getLocStart(),
+ DefaultArgument->getTypeLoc().getEndLoc());
+ else
+ return TypeDecl::getSourceRange();
+}
+
+unsigned TemplateTypeParmDecl::getDepth() const {
+ return TypeForDecl->getAs<TemplateTypeParmType>()->getDepth();
+}
+
+unsigned TemplateTypeParmDecl::getIndex() const {
+ return TypeForDecl->getAs<TemplateTypeParmType>()->getIndex();
+}
+
+bool TemplateTypeParmDecl::isParameterPack() const {
+ return TypeForDecl->getAs<TemplateTypeParmType>()->isParameterPack();
+}
+
+//===----------------------------------------------------------------------===//
+// NonTypeTemplateParmDecl Method Implementations
+//===----------------------------------------------------------------------===//
+
+NonTypeTemplateParmDecl::NonTypeTemplateParmDecl(DeclContext *DC,
+ SourceLocation StartLoc,
+ SourceLocation IdLoc,
+ unsigned D, unsigned P,
+ IdentifierInfo *Id,
+ QualType T,
+ TypeSourceInfo *TInfo,
+ const QualType *ExpandedTypes,
+ unsigned NumExpandedTypes,
+ TypeSourceInfo **ExpandedTInfos)
+ : DeclaratorDecl(NonTypeTemplateParm, DC, IdLoc, Id, T, TInfo, StartLoc),
+ TemplateParmPosition(D, P), DefaultArgumentAndInherited(0, false),
+ ParameterPack(true), ExpandedParameterPack(true),
+ NumExpandedTypes(NumExpandedTypes)
+{
+ if (ExpandedTypes && ExpandedTInfos) {
+ void **TypesAndInfos = reinterpret_cast<void **>(this + 1);
+ for (unsigned I = 0; I != NumExpandedTypes; ++I) {
+ TypesAndInfos[2*I] = ExpandedTypes[I].getAsOpaquePtr();
+ TypesAndInfos[2*I + 1] = ExpandedTInfos[I];
+ }
+ }
+}
+
+NonTypeTemplateParmDecl *
+NonTypeTemplateParmDecl::Create(const ASTContext &C, DeclContext *DC,
+ SourceLocation StartLoc, SourceLocation IdLoc,
+ unsigned D, unsigned P, IdentifierInfo *Id,
+ QualType T, bool ParameterPack,
+ TypeSourceInfo *TInfo) {
+ return new (C) NonTypeTemplateParmDecl(DC, StartLoc, IdLoc, D, P, Id,
+ T, ParameterPack, TInfo);
+}
+
+NonTypeTemplateParmDecl *
+NonTypeTemplateParmDecl::Create(const ASTContext &C, DeclContext *DC,
+ SourceLocation StartLoc, SourceLocation IdLoc,
+ unsigned D, unsigned P,
+ IdentifierInfo *Id, QualType T,
+ TypeSourceInfo *TInfo,
+ const QualType *ExpandedTypes,
+ unsigned NumExpandedTypes,
+ TypeSourceInfo **ExpandedTInfos) {
+ unsigned Size = sizeof(NonTypeTemplateParmDecl)
+ + NumExpandedTypes * 2 * sizeof(void*);
+ void *Mem = C.Allocate(Size);
+ return new (Mem) NonTypeTemplateParmDecl(DC, StartLoc, IdLoc,
+ D, P, Id, T, TInfo,
+ ExpandedTypes, NumExpandedTypes,
+ ExpandedTInfos);
+}
+
+NonTypeTemplateParmDecl *
+NonTypeTemplateParmDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID, sizeof(NonTypeTemplateParmDecl));
+ return new (Mem) NonTypeTemplateParmDecl(0, SourceLocation(),
+ SourceLocation(), 0, 0, 0,
+ QualType(), false, 0);
+}
+
+NonTypeTemplateParmDecl *
+NonTypeTemplateParmDecl::CreateDeserialized(ASTContext &C, unsigned ID,
+ unsigned NumExpandedTypes) {
+ unsigned Size = sizeof(NonTypeTemplateParmDecl)
+ + NumExpandedTypes * 2 * sizeof(void*);
+
+ void *Mem = AllocateDeserializedDecl(C, ID, Size);
+ return new (Mem) NonTypeTemplateParmDecl(0, SourceLocation(),
+ SourceLocation(), 0, 0, 0,
+ QualType(), 0, 0, NumExpandedTypes,
+ 0);
+}
+
+SourceRange NonTypeTemplateParmDecl::getSourceRange() const {
+ if (hasDefaultArgument() && !defaultArgumentWasInherited())
+ return SourceRange(getOuterLocStart(),
+ getDefaultArgument()->getSourceRange().getEnd());
+ return DeclaratorDecl::getSourceRange();
+}
+
+SourceLocation NonTypeTemplateParmDecl::getDefaultArgumentLoc() const {
+ return hasDefaultArgument()
+ ? getDefaultArgument()->getSourceRange().getBegin()
+ : SourceLocation();
+}
+
+//===----------------------------------------------------------------------===//
+// TemplateTemplateParmDecl Method Implementations
+//===----------------------------------------------------------------------===//
+
+void TemplateTemplateParmDecl::anchor() { }
+
+TemplateTemplateParmDecl *
+TemplateTemplateParmDecl::Create(const ASTContext &C, DeclContext *DC,
+ SourceLocation L, unsigned D, unsigned P,
+ bool ParameterPack, IdentifierInfo *Id,
+ TemplateParameterList *Params) {
+ return new (C) TemplateTemplateParmDecl(DC, L, D, P, ParameterPack, Id,
+ Params);
+}
+
+TemplateTemplateParmDecl *
+TemplateTemplateParmDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID, sizeof(TemplateTemplateParmDecl));
+ return new (Mem) TemplateTemplateParmDecl(0, SourceLocation(), 0, 0, false,
+ 0, 0);
+}
+
+//===----------------------------------------------------------------------===//
+// TemplateArgumentList Implementation
+//===----------------------------------------------------------------------===//
+TemplateArgumentList *
+TemplateArgumentList::CreateCopy(ASTContext &Context,
+ const TemplateArgument *Args,
+ unsigned NumArgs) {
+ std::size_t Size = sizeof(TemplateArgumentList)
+ + NumArgs * sizeof(TemplateArgument);
+ void *Mem = Context.Allocate(Size);
+ TemplateArgument *StoredArgs
+ = reinterpret_cast<TemplateArgument *>(
+ static_cast<TemplateArgumentList *>(Mem) + 1);
+ std::uninitialized_copy(Args, Args + NumArgs, StoredArgs);
+ return new (Mem) TemplateArgumentList(StoredArgs, NumArgs, true);
+}
+
+FunctionTemplateSpecializationInfo *
+FunctionTemplateSpecializationInfo::Create(ASTContext &C, FunctionDecl *FD,
+ FunctionTemplateDecl *Template,
+ TemplateSpecializationKind TSK,
+ const TemplateArgumentList *TemplateArgs,
+ const TemplateArgumentListInfo *TemplateArgsAsWritten,
+ SourceLocation POI) {
+ const ASTTemplateArgumentListInfo *ArgsAsWritten = 0;
+ if (TemplateArgsAsWritten)
+ ArgsAsWritten = ASTTemplateArgumentListInfo::Create(C,
+ *TemplateArgsAsWritten);
+
+ return new (C) FunctionTemplateSpecializationInfo(FD, Template, TSK,
+ TemplateArgs,
+ ArgsAsWritten,
+ POI);
+}
+
+//===----------------------------------------------------------------------===//
+// TemplateDecl Implementation
+//===----------------------------------------------------------------------===//
+
+void TemplateDecl::anchor() { }
+
+//===----------------------------------------------------------------------===//
+// ClassTemplateSpecializationDecl Implementation
+//===----------------------------------------------------------------------===//
+ClassTemplateSpecializationDecl::
+ClassTemplateSpecializationDecl(ASTContext &Context, Kind DK, TagKind TK,
+ DeclContext *DC, SourceLocation StartLoc,
+ SourceLocation IdLoc,
+ ClassTemplateDecl *SpecializedTemplate,
+ const TemplateArgument *Args,
+ unsigned NumArgs,
+ ClassTemplateSpecializationDecl *PrevDecl)
+ : CXXRecordDecl(DK, TK, DC, StartLoc, IdLoc,
+ SpecializedTemplate->getIdentifier(),
+ PrevDecl),
+ SpecializedTemplate(SpecializedTemplate),
+ ExplicitInfo(0),
+ TemplateArgs(TemplateArgumentList::CreateCopy(Context, Args, NumArgs)),
+ SpecializationKind(TSK_Undeclared) {
+}
+
+ClassTemplateSpecializationDecl::ClassTemplateSpecializationDecl(Kind DK)
+ : CXXRecordDecl(DK, TTK_Struct, 0, SourceLocation(), SourceLocation(), 0, 0),
+ ExplicitInfo(0),
+ SpecializationKind(TSK_Undeclared) {
+}
+
+ClassTemplateSpecializationDecl *
+ClassTemplateSpecializationDecl::Create(ASTContext &Context, TagKind TK,
+ DeclContext *DC,
+ SourceLocation StartLoc,
+ SourceLocation IdLoc,
+ ClassTemplateDecl *SpecializedTemplate,
+ const TemplateArgument *Args,
+ unsigned NumArgs,
+ ClassTemplateSpecializationDecl *PrevDecl) {
+ ClassTemplateSpecializationDecl *Result
+ = new (Context)ClassTemplateSpecializationDecl(Context,
+ ClassTemplateSpecialization,
+ TK, DC, StartLoc, IdLoc,
+ SpecializedTemplate,
+ Args, NumArgs,
+ PrevDecl);
+ Context.getTypeDeclType(Result, PrevDecl);
+ return Result;
+}
+
+ClassTemplateSpecializationDecl *
+ClassTemplateSpecializationDecl::CreateDeserialized(ASTContext &C,
+ unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID,
+ sizeof(ClassTemplateSpecializationDecl));
+ return new (Mem) ClassTemplateSpecializationDecl(ClassTemplateSpecialization);
+}
+
+void
+ClassTemplateSpecializationDecl::getNameForDiagnostic(std::string &S,
+ const PrintingPolicy &Policy,
+ bool Qualified) const {
+ NamedDecl::getNameForDiagnostic(S, Policy, Qualified);
+
+ const TemplateArgumentList &TemplateArgs = getTemplateArgs();
+ S += TemplateSpecializationType::PrintTemplateArgumentList(
+ TemplateArgs.data(),
+ TemplateArgs.size(),
+ Policy);
+}
+
+ClassTemplateDecl *
+ClassTemplateSpecializationDecl::getSpecializedTemplate() const {
+ if (SpecializedPartialSpecialization *PartialSpec
+ = SpecializedTemplate.dyn_cast<SpecializedPartialSpecialization*>())
+ return PartialSpec->PartialSpecialization->getSpecializedTemplate();
+ return SpecializedTemplate.get<ClassTemplateDecl*>();
+}
+
+SourceRange
+ClassTemplateSpecializationDecl::getSourceRange() const {
+ if (ExplicitInfo) {
+ SourceLocation Begin = getExternLoc();
+ if (Begin.isInvalid())
+ Begin = getTemplateKeywordLoc();
+ SourceLocation End = getRBraceLoc();
+ if (End.isInvalid())
+ End = getTypeAsWritten()->getTypeLoc().getEndLoc();
+ return SourceRange(Begin, End);
+ }
+ else {
+ // No explicit info available.
+ llvm::PointerUnion<ClassTemplateDecl *,
+ ClassTemplatePartialSpecializationDecl *>
+ inst_from = getInstantiatedFrom();
+ if (inst_from.isNull())
+ return getSpecializedTemplate()->getSourceRange();
+ if (ClassTemplateDecl *ctd = inst_from.dyn_cast<ClassTemplateDecl*>())
+ return ctd->getSourceRange();
+ return inst_from.get<ClassTemplatePartialSpecializationDecl*>()
+ ->getSourceRange();
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// ClassTemplatePartialSpecializationDecl Implementation
+//===----------------------------------------------------------------------===//
+void ClassTemplatePartialSpecializationDecl::anchor() { }
+
+ClassTemplatePartialSpecializationDecl::
+ClassTemplatePartialSpecializationDecl(ASTContext &Context, TagKind TK,
+ DeclContext *DC,
+ SourceLocation StartLoc,
+ SourceLocation IdLoc,
+ TemplateParameterList *Params,
+ ClassTemplateDecl *SpecializedTemplate,
+ const TemplateArgument *Args,
+ unsigned NumArgs,
+ TemplateArgumentLoc *ArgInfos,
+ unsigned NumArgInfos,
+ ClassTemplatePartialSpecializationDecl *PrevDecl,
+ unsigned SequenceNumber)
+ : ClassTemplateSpecializationDecl(Context,
+ ClassTemplatePartialSpecialization,
+ TK, DC, StartLoc, IdLoc,
+ SpecializedTemplate,
+ Args, NumArgs, PrevDecl),
+ TemplateParams(Params), ArgsAsWritten(ArgInfos),
+ NumArgsAsWritten(NumArgInfos), SequenceNumber(SequenceNumber),
+ InstantiatedFromMember(0, false)
+{
+ AdoptTemplateParameterList(Params, this);
+}
+
+ClassTemplatePartialSpecializationDecl *
+ClassTemplatePartialSpecializationDecl::
+Create(ASTContext &Context, TagKind TK,DeclContext *DC,
+ SourceLocation StartLoc, SourceLocation IdLoc,
+ TemplateParameterList *Params,
+ ClassTemplateDecl *SpecializedTemplate,
+ const TemplateArgument *Args,
+ unsigned NumArgs,
+ const TemplateArgumentListInfo &ArgInfos,
+ QualType CanonInjectedType,
+ ClassTemplatePartialSpecializationDecl *PrevDecl,
+ unsigned SequenceNumber) {
+ unsigned N = ArgInfos.size();
+ TemplateArgumentLoc *ClonedArgs = new (Context) TemplateArgumentLoc[N];
+ for (unsigned I = 0; I != N; ++I)
+ ClonedArgs[I] = ArgInfos[I];
+
+ ClassTemplatePartialSpecializationDecl *Result
+ = new (Context)ClassTemplatePartialSpecializationDecl(Context, TK, DC,
+ StartLoc, IdLoc,
+ Params,
+ SpecializedTemplate,
+ Args, NumArgs,
+ ClonedArgs, N,
+ PrevDecl,
+ SequenceNumber);
+ Result->setSpecializationKind(TSK_ExplicitSpecialization);
+
+ Context.getInjectedClassNameType(Result, CanonInjectedType);
+ return Result;
+}
+
+ClassTemplatePartialSpecializationDecl *
+ClassTemplatePartialSpecializationDecl::CreateDeserialized(ASTContext &C,
+ unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID,
+ sizeof(ClassTemplatePartialSpecializationDecl));
+ return new (Mem) ClassTemplatePartialSpecializationDecl();
+}
+
+//===----------------------------------------------------------------------===//
+// FriendTemplateDecl Implementation
+//===----------------------------------------------------------------------===//
+
+void FriendTemplateDecl::anchor() { }
+
+FriendTemplateDecl *FriendTemplateDecl::Create(ASTContext &Context,
+ DeclContext *DC,
+ SourceLocation L,
+ unsigned NParams,
+ TemplateParameterList **Params,
+ FriendUnion Friend,
+ SourceLocation FLoc) {
+ FriendTemplateDecl *Result
+ = new (Context) FriendTemplateDecl(DC, L, NParams, Params, Friend, FLoc);
+ return Result;
+}
+
+FriendTemplateDecl *FriendTemplateDecl::CreateDeserialized(ASTContext &C,
+ unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID, sizeof(FriendTemplateDecl));
+ return new (Mem) FriendTemplateDecl(EmptyShell());
+}
+
+//===----------------------------------------------------------------------===//
+// TypeAliasTemplateDecl Implementation
+//===----------------------------------------------------------------------===//
+
+TypeAliasTemplateDecl *TypeAliasTemplateDecl::Create(ASTContext &C,
+ DeclContext *DC,
+ SourceLocation L,
+ DeclarationName Name,
+ TemplateParameterList *Params,
+ NamedDecl *Decl) {
+ AdoptTemplateParameterList(Params, DC);
+ return new (C) TypeAliasTemplateDecl(DC, L, Name, Params, Decl);
+}
+
+TypeAliasTemplateDecl *TypeAliasTemplateDecl::CreateDeserialized(ASTContext &C,
+ unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID, sizeof(TypeAliasTemplateDecl));
+ return new (Mem) TypeAliasTemplateDecl(0, SourceLocation(), DeclarationName(),
+ 0, 0);
+}
+
+void TypeAliasTemplateDecl::DeallocateCommon(void *Ptr) {
+ static_cast<Common *>(Ptr)->~Common();
+}
+RedeclarableTemplateDecl::CommonBase *
+TypeAliasTemplateDecl::newCommon(ASTContext &C) {
+ Common *CommonPtr = new (C) Common;
+ C.AddDeallocation(DeallocateCommon, CommonPtr);
+ return CommonPtr;
+}
+
+//===----------------------------------------------------------------------===//
+// ClassScopeFunctionSpecializationDecl Implementation
+//===----------------------------------------------------------------------===//
+
+void ClassScopeFunctionSpecializationDecl::anchor() { }
+
+ClassScopeFunctionSpecializationDecl *
+ClassScopeFunctionSpecializationDecl::CreateDeserialized(ASTContext &C,
+ unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID,
+ sizeof(ClassScopeFunctionSpecializationDecl));
+ return new (Mem) ClassScopeFunctionSpecializationDecl(0, SourceLocation(), 0);
+}
diff --git a/clang/lib/AST/DeclarationName.cpp b/clang/lib/AST/DeclarationName.cpp
new file mode 100644
index 0000000..64924ad
--- /dev/null
+++ b/clang/lib/AST/DeclarationName.cpp
@@ -0,0 +1,627 @@
+//===-- DeclarationName.cpp - Declaration names implementation --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the DeclarationName and DeclarationNameTable
+// classes.
+//
+//===----------------------------------------------------------------------===//
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclarationName.h"
+#include "clang/AST/Type.h"
+#include "clang/AST/TypeLoc.h"
+#include "clang/AST/TypeOrdering.h"
+#include "clang/Basic/IdentifierTable.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/FoldingSet.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace clang;
+
+namespace clang {
+/// CXXSpecialName - Records the type associated with one of the
+/// "special" kinds of declaration names in C++, e.g., constructors,
+/// destructors, and conversion functions.
+class CXXSpecialName
+ : public DeclarationNameExtra, public llvm::FoldingSetNode {
+public:
+ /// Type - The type associated with this declaration name.
+ QualType Type;
+
+ /// FETokenInfo - Extra information associated with this declaration
+ /// name that can be used by the front end.
+ void *FETokenInfo;
+
+ void Profile(llvm::FoldingSetNodeID &ID) {
+ ID.AddInteger(ExtraKindOrNumArgs);
+ ID.AddPointer(Type.getAsOpaquePtr());
+ }
+};
+
+/// CXXOperatorIdName - Contains extra information for the name of an
+/// overloaded operator in C++, such as "operator+.
+class CXXOperatorIdName : public DeclarationNameExtra {
+public:
+ /// FETokenInfo - Extra information associated with this operator
+ /// name that can be used by the front end.
+ void *FETokenInfo;
+};
+
+/// CXXLiteralOperatorName - Contains the actual identifier that makes up the
+/// name.
+///
+/// This identifier is stored here rather than directly in DeclarationName so as
+/// to allow Objective-C selectors, which are about a million times more common,
+/// to consume minimal memory.
+class CXXLiteralOperatorIdName
+ : public DeclarationNameExtra, public llvm::FoldingSetNode {
+public:
+ IdentifierInfo *ID;
+
+ /// FETokenInfo - Extra information associated with this operator
+ /// name that can be used by the front end.
+ void *FETokenInfo;
+
+ void Profile(llvm::FoldingSetNodeID &FSID) {
+ FSID.AddPointer(ID);
+ }
+};
+
+static int compareInt(unsigned A, unsigned B) {
+ return (A < B ? -1 : (A > B ? 1 : 0));
+}
+
+int DeclarationName::compare(DeclarationName LHS, DeclarationName RHS) {
+ if (LHS.getNameKind() != RHS.getNameKind())
+ return (LHS.getNameKind() < RHS.getNameKind() ? -1 : 1);
+
+ switch (LHS.getNameKind()) {
+ case DeclarationName::Identifier: {
+ IdentifierInfo *LII = LHS.getAsIdentifierInfo();
+ IdentifierInfo *RII = RHS.getAsIdentifierInfo();
+ if (!LII) return RII ? -1 : 0;
+ if (!RII) return 1;
+
+ return LII->getName().compare(RII->getName());
+ }
+
+ case DeclarationName::ObjCZeroArgSelector:
+ case DeclarationName::ObjCOneArgSelector:
+ case DeclarationName::ObjCMultiArgSelector: {
+ Selector LHSSelector = LHS.getObjCSelector();
+ Selector RHSSelector = RHS.getObjCSelector();
+ unsigned LN = LHSSelector.getNumArgs(), RN = RHSSelector.getNumArgs();
+ for (unsigned I = 0, N = std::min(LN, RN); I != N; ++I) {
+ switch (LHSSelector.getNameForSlot(I).compare(
+ RHSSelector.getNameForSlot(I))) {
+ case -1: return true;
+ case 1: return false;
+ default: break;
+ }
+ }
+
+ return compareInt(LN, RN);
+ }
+
+ case DeclarationName::CXXConstructorName:
+ case DeclarationName::CXXDestructorName:
+ case DeclarationName::CXXConversionFunctionName:
+ if (QualTypeOrdering()(LHS.getCXXNameType(), RHS.getCXXNameType()))
+ return -1;
+ if (QualTypeOrdering()(RHS.getCXXNameType(), LHS.getCXXNameType()))
+ return 1;
+ return 0;
+
+ case DeclarationName::CXXOperatorName:
+ return compareInt(LHS.getCXXOverloadedOperator(),
+ RHS.getCXXOverloadedOperator());
+
+ case DeclarationName::CXXLiteralOperatorName:
+ return LHS.getCXXLiteralIdentifier()->getName().compare(
+ RHS.getCXXLiteralIdentifier()->getName());
+
+ case DeclarationName::CXXUsingDirective:
+ return 0;
+ }
+
+ llvm_unreachable("Invalid DeclarationName Kind!");
+}
+
+} // end namespace clang
+
+DeclarationName::DeclarationName(Selector Sel) {
+ if (!Sel.getAsOpaquePtr()) {
+ Ptr = 0;
+ return;
+ }
+
+ switch (Sel.getNumArgs()) {
+ case 0:
+ Ptr = reinterpret_cast<uintptr_t>(Sel.getAsIdentifierInfo());
+ assert((Ptr & PtrMask) == 0 && "Improperly aligned IdentifierInfo");
+ Ptr |= StoredObjCZeroArgSelector;
+ break;
+
+ case 1:
+ Ptr = reinterpret_cast<uintptr_t>(Sel.getAsIdentifierInfo());
+ assert((Ptr & PtrMask) == 0 && "Improperly aligned IdentifierInfo");
+ Ptr |= StoredObjCOneArgSelector;
+ break;
+
+ default:
+ Ptr = Sel.InfoPtr & ~Selector::ArgFlags;
+ assert((Ptr & PtrMask) == 0 && "Improperly aligned MultiKeywordSelector");
+ Ptr |= StoredDeclarationNameExtra;
+ break;
+ }
+}
+
+DeclarationName::NameKind DeclarationName::getNameKind() const {
+ switch (getStoredNameKind()) {
+ case StoredIdentifier: return Identifier;
+ case StoredObjCZeroArgSelector: return ObjCZeroArgSelector;
+ case StoredObjCOneArgSelector: return ObjCOneArgSelector;
+
+ case StoredDeclarationNameExtra:
+ switch (getExtra()->ExtraKindOrNumArgs) {
+ case DeclarationNameExtra::CXXConstructor:
+ return CXXConstructorName;
+
+ case DeclarationNameExtra::CXXDestructor:
+ return CXXDestructorName;
+
+ case DeclarationNameExtra::CXXConversionFunction:
+ return CXXConversionFunctionName;
+
+ case DeclarationNameExtra::CXXLiteralOperator:
+ return CXXLiteralOperatorName;
+
+ case DeclarationNameExtra::CXXUsingDirective:
+ return CXXUsingDirective;
+
+ default:
+ // Check if we have one of the CXXOperator* enumeration values.
+ if (getExtra()->ExtraKindOrNumArgs <
+ DeclarationNameExtra::CXXUsingDirective)
+ return CXXOperatorName;
+
+ return ObjCMultiArgSelector;
+ }
+ }
+
+ // Can't actually get here.
+ llvm_unreachable("This should be unreachable!");
+}
+
+bool DeclarationName::isDependentName() const {
+ QualType T = getCXXNameType();
+ return !T.isNull() && T->isDependentType();
+}
+
+std::string DeclarationName::getAsString() const {
+ std::string Result;
+ llvm::raw_string_ostream OS(Result);
+ printName(OS);
+ return OS.str();
+}
+
+void DeclarationName::printName(raw_ostream &OS) const {
+ switch (getNameKind()) {
+ case Identifier:
+ if (const IdentifierInfo *II = getAsIdentifierInfo())
+ OS << II->getName();
+ return;
+
+ case ObjCZeroArgSelector:
+ case ObjCOneArgSelector:
+ case ObjCMultiArgSelector:
+ OS << getObjCSelector().getAsString();
+ return;
+
+ case CXXConstructorName: {
+ QualType ClassType = getCXXNameType();
+ if (const RecordType *ClassRec = ClassType->getAs<RecordType>())
+ OS << *ClassRec->getDecl();
+ else
+ OS << ClassType.getAsString();
+ return;
+ }
+
+ case CXXDestructorName: {
+ OS << '~';
+ QualType Type = getCXXNameType();
+ if (const RecordType *Rec = Type->getAs<RecordType>())
+ OS << *Rec->getDecl();
+ else
+ OS << Type.getAsString();
+ return;
+ }
+
+ case CXXOperatorName: {
+ static const char* const OperatorNames[NUM_OVERLOADED_OPERATORS] = {
+ 0,
+#define OVERLOADED_OPERATOR(Name,Spelling,Token,Unary,Binary,MemberOnly) \
+ Spelling,
+#include "clang/Basic/OperatorKinds.def"
+ };
+ const char *OpName = OperatorNames[getCXXOverloadedOperator()];
+ assert(OpName && "not an overloaded operator");
+
+ OS << "operator";
+ if (OpName[0] >= 'a' && OpName[0] <= 'z')
+ OS << ' ';
+ OS << OpName;
+ return;
+ }
+
+ case CXXLiteralOperatorName:
+ OS << "operator \"\" " << getCXXLiteralIdentifier()->getName();
+ return;
+
+ case CXXConversionFunctionName: {
+ OS << "operator ";
+ QualType Type = getCXXNameType();
+ if (const RecordType *Rec = Type->getAs<RecordType>())
+ OS << *Rec->getDecl();
+ else
+ OS << Type.getAsString();
+ return;
+ }
+ case CXXUsingDirective:
+ OS << "<using-directive>";
+ return;
+ }
+
+ llvm_unreachable("Unexpected declaration name kind");
+}
+
+QualType DeclarationName::getCXXNameType() const {
+ if (CXXSpecialName *CXXName = getAsCXXSpecialName())
+ return CXXName->Type;
+ else
+ return QualType();
+}
+
+OverloadedOperatorKind DeclarationName::getCXXOverloadedOperator() const {
+ if (CXXOperatorIdName *CXXOp = getAsCXXOperatorIdName()) {
+ unsigned value
+ = CXXOp->ExtraKindOrNumArgs - DeclarationNameExtra::CXXConversionFunction;
+ return static_cast<OverloadedOperatorKind>(value);
+ } else {
+ return OO_None;
+ }
+}
+
+IdentifierInfo *DeclarationName::getCXXLiteralIdentifier() const {
+ if (CXXLiteralOperatorIdName *CXXLit = getAsCXXLiteralOperatorIdName())
+ return CXXLit->ID;
+ else
+ return 0;
+}
+
+Selector DeclarationName::getObjCSelector() const {
+ switch (getNameKind()) {
+ case ObjCZeroArgSelector:
+ return Selector(reinterpret_cast<IdentifierInfo *>(Ptr & ~PtrMask), 0);
+
+ case ObjCOneArgSelector:
+ return Selector(reinterpret_cast<IdentifierInfo *>(Ptr & ~PtrMask), 1);
+
+ case ObjCMultiArgSelector:
+ return Selector(reinterpret_cast<MultiKeywordSelector *>(Ptr & ~PtrMask));
+
+ default:
+ break;
+ }
+
+ return Selector();
+}
+
+void *DeclarationName::getFETokenInfoAsVoid() const {
+ switch (getNameKind()) {
+ case Identifier:
+ return getAsIdentifierInfo()->getFETokenInfo<void>();
+
+ case CXXConstructorName:
+ case CXXDestructorName:
+ case CXXConversionFunctionName:
+ return getAsCXXSpecialName()->FETokenInfo;
+
+ case CXXOperatorName:
+ return getAsCXXOperatorIdName()->FETokenInfo;
+
+ case CXXLiteralOperatorName:
+ return getAsCXXLiteralOperatorIdName()->FETokenInfo;
+
+ default:
+ llvm_unreachable("Declaration name has no FETokenInfo");
+ }
+}
+
+void DeclarationName::setFETokenInfo(void *T) {
+ switch (getNameKind()) {
+ case Identifier:
+ getAsIdentifierInfo()->setFETokenInfo(T);
+ break;
+
+ case CXXConstructorName:
+ case CXXDestructorName:
+ case CXXConversionFunctionName:
+ getAsCXXSpecialName()->FETokenInfo = T;
+ break;
+
+ case CXXOperatorName:
+ getAsCXXOperatorIdName()->FETokenInfo = T;
+ break;
+
+ case CXXLiteralOperatorName:
+ getAsCXXLiteralOperatorIdName()->FETokenInfo = T;
+ break;
+
+ default:
+ llvm_unreachable("Declaration name has no FETokenInfo");
+ }
+}
+
+DeclarationName DeclarationName::getUsingDirectiveName() {
+ // Single instance of DeclarationNameExtra for using-directive
+ static const DeclarationNameExtra UDirExtra =
+ { DeclarationNameExtra::CXXUsingDirective };
+
+ uintptr_t Ptr = reinterpret_cast<uintptr_t>(&UDirExtra);
+ Ptr |= StoredDeclarationNameExtra;
+
+ return DeclarationName(Ptr);
+}
+
+void DeclarationName::dump() const {
+ printName(llvm::errs());
+ llvm::errs() << '\n';
+}
+
+DeclarationNameTable::DeclarationNameTable(const ASTContext &C) : Ctx(C) {
+ CXXSpecialNamesImpl = new llvm::FoldingSet<CXXSpecialName>;
+ CXXLiteralOperatorNames = new llvm::FoldingSet<CXXLiteralOperatorIdName>;
+
+ // Initialize the overloaded operator names.
+ CXXOperatorNames = new (Ctx) CXXOperatorIdName[NUM_OVERLOADED_OPERATORS];
+ for (unsigned Op = 0; Op < NUM_OVERLOADED_OPERATORS; ++Op) {
+ CXXOperatorNames[Op].ExtraKindOrNumArgs
+ = Op + DeclarationNameExtra::CXXConversionFunction;
+ CXXOperatorNames[Op].FETokenInfo = 0;
+ }
+}
+
+DeclarationNameTable::~DeclarationNameTable() {
+ llvm::FoldingSet<CXXSpecialName> *SpecialNames =
+ static_cast<llvm::FoldingSet<CXXSpecialName>*>(CXXSpecialNamesImpl);
+ llvm::FoldingSet<CXXLiteralOperatorIdName> *LiteralNames
+ = static_cast<llvm::FoldingSet<CXXLiteralOperatorIdName>*>
+ (CXXLiteralOperatorNames);
+
+ delete SpecialNames;
+ delete LiteralNames;
+}
+
+DeclarationName
+DeclarationNameTable::getCXXSpecialName(DeclarationName::NameKind Kind,
+ CanQualType Ty) {
+ assert(Kind >= DeclarationName::CXXConstructorName &&
+ Kind <= DeclarationName::CXXConversionFunctionName &&
+ "Kind must be a C++ special name kind");
+ llvm::FoldingSet<CXXSpecialName> *SpecialNames
+ = static_cast<llvm::FoldingSet<CXXSpecialName>*>(CXXSpecialNamesImpl);
+
+ DeclarationNameExtra::ExtraKind EKind;
+ switch (Kind) {
+ case DeclarationName::CXXConstructorName:
+ EKind = DeclarationNameExtra::CXXConstructor;
+ assert(!Ty.hasQualifiers() &&"Constructor type must be unqualified");
+ break;
+ case DeclarationName::CXXDestructorName:
+ EKind = DeclarationNameExtra::CXXDestructor;
+ assert(!Ty.hasQualifiers() && "Destructor type must be unqualified");
+ break;
+ case DeclarationName::CXXConversionFunctionName:
+ EKind = DeclarationNameExtra::CXXConversionFunction;
+ break;
+ default:
+ return DeclarationName();
+ }
+
+ // Unique selector, to guarantee there is one per name.
+ llvm::FoldingSetNodeID ID;
+ ID.AddInteger(EKind);
+ ID.AddPointer(Ty.getAsOpaquePtr());
+
+ void *InsertPos = 0;
+ if (CXXSpecialName *Name = SpecialNames->FindNodeOrInsertPos(ID, InsertPos))
+ return DeclarationName(Name);
+
+ CXXSpecialName *SpecialName = new (Ctx) CXXSpecialName;
+ SpecialName->ExtraKindOrNumArgs = EKind;
+ SpecialName->Type = Ty;
+ SpecialName->FETokenInfo = 0;
+
+ SpecialNames->InsertNode(SpecialName, InsertPos);
+ return DeclarationName(SpecialName);
+}
+
+DeclarationName
+DeclarationNameTable::getCXXOperatorName(OverloadedOperatorKind Op) {
+ return DeclarationName(&CXXOperatorNames[(unsigned)Op]);
+}
+
+DeclarationName
+DeclarationNameTable::getCXXLiteralOperatorName(IdentifierInfo *II) {
+ llvm::FoldingSet<CXXLiteralOperatorIdName> *LiteralNames
+ = static_cast<llvm::FoldingSet<CXXLiteralOperatorIdName>*>
+ (CXXLiteralOperatorNames);
+
+ llvm::FoldingSetNodeID ID;
+ ID.AddPointer(II);
+
+ void *InsertPos = 0;
+ if (CXXLiteralOperatorIdName *Name =
+ LiteralNames->FindNodeOrInsertPos(ID, InsertPos))
+ return DeclarationName (Name);
+
+ CXXLiteralOperatorIdName *LiteralName = new (Ctx) CXXLiteralOperatorIdName;
+ LiteralName->ExtraKindOrNumArgs = DeclarationNameExtra::CXXLiteralOperator;
+ LiteralName->ID = II;
+ LiteralName->FETokenInfo = 0;
+
+ LiteralNames->InsertNode(LiteralName, InsertPos);
+ return DeclarationName(LiteralName);
+}
+
+unsigned
+llvm::DenseMapInfo<clang::DeclarationName>::
+getHashValue(clang::DeclarationName N) {
+ return DenseMapInfo<void*>::getHashValue(N.getAsOpaquePtr());
+}
+
+DeclarationNameLoc::DeclarationNameLoc(DeclarationName Name) {
+ switch (Name.getNameKind()) {
+ case DeclarationName::Identifier:
+ break;
+ case DeclarationName::CXXConstructorName:
+ case DeclarationName::CXXDestructorName:
+ case DeclarationName::CXXConversionFunctionName:
+ NamedType.TInfo = 0;
+ break;
+ case DeclarationName::CXXOperatorName:
+ CXXOperatorName.BeginOpNameLoc = SourceLocation().getRawEncoding();
+ CXXOperatorName.EndOpNameLoc = SourceLocation().getRawEncoding();
+ break;
+ case DeclarationName::CXXLiteralOperatorName:
+ CXXLiteralOperatorName.OpNameLoc = SourceLocation().getRawEncoding();
+ break;
+ case DeclarationName::ObjCZeroArgSelector:
+ case DeclarationName::ObjCOneArgSelector:
+ case DeclarationName::ObjCMultiArgSelector:
+ // FIXME: ?
+ break;
+ case DeclarationName::CXXUsingDirective:
+ break;
+ }
+}
+
+bool DeclarationNameInfo::containsUnexpandedParameterPack() const {
+ switch (Name.getNameKind()) {
+ case DeclarationName::Identifier:
+ case DeclarationName::ObjCZeroArgSelector:
+ case DeclarationName::ObjCOneArgSelector:
+ case DeclarationName::ObjCMultiArgSelector:
+ case DeclarationName::CXXOperatorName:
+ case DeclarationName::CXXLiteralOperatorName:
+ case DeclarationName::CXXUsingDirective:
+ return false;
+
+ case DeclarationName::CXXConstructorName:
+ case DeclarationName::CXXDestructorName:
+ case DeclarationName::CXXConversionFunctionName:
+ if (TypeSourceInfo *TInfo = LocInfo.NamedType.TInfo)
+ return TInfo->getType()->containsUnexpandedParameterPack();
+
+ return Name.getCXXNameType()->containsUnexpandedParameterPack();
+ }
+ llvm_unreachable("All name kinds handled.");
+}
+
+bool DeclarationNameInfo::isInstantiationDependent() const {
+ switch (Name.getNameKind()) {
+ case DeclarationName::Identifier:
+ case DeclarationName::ObjCZeroArgSelector:
+ case DeclarationName::ObjCOneArgSelector:
+ case DeclarationName::ObjCMultiArgSelector:
+ case DeclarationName::CXXOperatorName:
+ case DeclarationName::CXXLiteralOperatorName:
+ case DeclarationName::CXXUsingDirective:
+ return false;
+
+ case DeclarationName::CXXConstructorName:
+ case DeclarationName::CXXDestructorName:
+ case DeclarationName::CXXConversionFunctionName:
+ if (TypeSourceInfo *TInfo = LocInfo.NamedType.TInfo)
+ return TInfo->getType()->isInstantiationDependentType();
+
+ return Name.getCXXNameType()->isInstantiationDependentType();
+ }
+ llvm_unreachable("All name kinds handled.");
+}
+
+std::string DeclarationNameInfo::getAsString() const {
+ std::string Result;
+ llvm::raw_string_ostream OS(Result);
+ printName(OS);
+ return OS.str();
+}
+
+void DeclarationNameInfo::printName(raw_ostream &OS) const {
+ switch (Name.getNameKind()) {
+ case DeclarationName::Identifier:
+ case DeclarationName::ObjCZeroArgSelector:
+ case DeclarationName::ObjCOneArgSelector:
+ case DeclarationName::ObjCMultiArgSelector:
+ case DeclarationName::CXXOperatorName:
+ case DeclarationName::CXXLiteralOperatorName:
+ case DeclarationName::CXXUsingDirective:
+ Name.printName(OS);
+ return;
+
+ case DeclarationName::CXXConstructorName:
+ case DeclarationName::CXXDestructorName:
+ case DeclarationName::CXXConversionFunctionName:
+ if (TypeSourceInfo *TInfo = LocInfo.NamedType.TInfo) {
+ if (Name.getNameKind() == DeclarationName::CXXDestructorName)
+ OS << '~';
+ else if (Name.getNameKind() == DeclarationName::CXXConversionFunctionName)
+ OS << "operator ";
+ OS << TInfo->getType().getAsString();
+ }
+ else
+ Name.printName(OS);
+ return;
+ }
+ llvm_unreachable("Unexpected declaration name kind");
+}
+
+SourceLocation DeclarationNameInfo::getEndLoc() const {
+ switch (Name.getNameKind()) {
+ case DeclarationName::Identifier:
+ return NameLoc;
+
+ case DeclarationName::CXXOperatorName: {
+ unsigned raw = LocInfo.CXXOperatorName.EndOpNameLoc;
+ return SourceLocation::getFromRawEncoding(raw);
+ }
+
+ case DeclarationName::CXXLiteralOperatorName: {
+ unsigned raw = LocInfo.CXXLiteralOperatorName.OpNameLoc;
+ return SourceLocation::getFromRawEncoding(raw);
+ }
+
+ case DeclarationName::CXXConstructorName:
+ case DeclarationName::CXXDestructorName:
+ case DeclarationName::CXXConversionFunctionName:
+ if (TypeSourceInfo *TInfo = LocInfo.NamedType.TInfo)
+ return TInfo->getTypeLoc().getEndLoc();
+ else
+ return NameLoc;
+
+ // DNInfo work in progress: FIXME.
+ case DeclarationName::ObjCZeroArgSelector:
+ case DeclarationName::ObjCOneArgSelector:
+ case DeclarationName::ObjCMultiArgSelector:
+ case DeclarationName::CXXUsingDirective:
+ return NameLoc;
+ }
+ llvm_unreachable("Unexpected declaration name kind");
+}
diff --git a/clang/lib/AST/DumpXML.cpp b/clang/lib/AST/DumpXML.cpp
new file mode 100644
index 0000000..4c7cd8a
--- /dev/null
+++ b/clang/lib/AST/DumpXML.cpp
@@ -0,0 +1,1040 @@
+//===--- DumpXML.cpp - Detailed XML dumping ---------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the Decl::dumpXML() method, a debugging tool to
+// print a detailed graph of an AST in an unspecified XML format.
+//
+// There is no guarantee of stability for this format.
+//
+//===----------------------------------------------------------------------===//
+
+// Only pay for this in code size in assertions-enabled builds.
+
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclFriend.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/DeclVisitor.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/ExprObjC.h"
+#include "clang/AST/NestedNameSpecifier.h"
+#include "clang/AST/Stmt.h"
+#include "clang/AST/StmtCXX.h"
+#include "clang/AST/StmtObjC.h"
+#include "clang/AST/StmtVisitor.h"
+#include "clang/AST/TemplateBase.h"
+#include "clang/AST/TemplateName.h"
+#include "clang/AST/Type.h"
+#include "clang/AST/TypeLoc.h"
+#include "clang/AST/TypeLocVisitor.h"
+#include "clang/AST/TypeVisitor.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/ExprCXX.h"
+#include "llvm/ADT/SmallString.h"
+
+using namespace clang;
+
+#ifndef NDEBUG
+
+namespace {
+
+enum NodeState {
+ NS_Attrs, NS_LazyChildren, NS_Children
+};
+
+struct Node {
+ StringRef Name;
+ NodeState State;
+ Node(StringRef name) : Name(name), State(NS_Attrs) {}
+
+ bool isDoneWithAttrs() const { return State != NS_Attrs; }
+};
+
+template <class Impl> struct XMLDeclVisitor {
+#define DISPATCH(NAME, CLASS) \
+ static_cast<Impl*>(this)->NAME(static_cast<CLASS*>(D))
+
+ void dispatch(Decl *D) {
+ switch (D->getKind()) {
+#define DECL(DERIVED, BASE) \
+ case Decl::DERIVED: \
+ DISPATCH(dispatch##DERIVED##DeclAttrs, DERIVED##Decl); \
+ static_cast<Impl*>(this)->completeAttrs(); \
+ DISPATCH(dispatch##DERIVED##DeclChildren, DERIVED##Decl); \
+ DISPATCH(dispatch##DERIVED##DeclAsContext, DERIVED##Decl); \
+ break;
+#define ABSTRACT_DECL(DECL)
+#include "clang/AST/DeclNodes.inc"
+ }
+ }
+
+#define DECL(DERIVED, BASE) \
+ void dispatch##DERIVED##DeclAttrs(DERIVED##Decl *D) { \
+ DISPATCH(dispatch##BASE##Attrs, BASE); \
+ DISPATCH(visit##DERIVED##DeclAttrs, DERIVED##Decl); \
+ } \
+ void visit##DERIVED##DeclAttrs(DERIVED##Decl *D) {} \
+ void dispatch##DERIVED##DeclChildren(DERIVED##Decl *D) { \
+ DISPATCH(dispatch##BASE##Children, BASE); \
+ DISPATCH(visit##DERIVED##DeclChildren, DERIVED##Decl); \
+ } \
+ void visit##DERIVED##DeclChildren(DERIVED##Decl *D) {} \
+ void dispatch##DERIVED##DeclAsContext(DERIVED##Decl *D) { \
+ DISPATCH(dispatch##BASE##AsContext, BASE); \
+ DISPATCH(visit##DERIVED##DeclAsContext, DERIVED##Decl); \
+ } \
+ void visit##DERIVED##DeclAsContext(DERIVED##Decl *D) {}
+#include "clang/AST/DeclNodes.inc"
+
+ void dispatchDeclAttrs(Decl *D) {
+ DISPATCH(visitDeclAttrs, Decl);
+ }
+ void visitDeclAttrs(Decl *D) {}
+
+ void dispatchDeclChildren(Decl *D) {
+ DISPATCH(visitDeclChildren, Decl);
+ }
+ void visitDeclChildren(Decl *D) {}
+
+ void dispatchDeclAsContext(Decl *D) {
+ DISPATCH(visitDeclAsContext, Decl);
+ }
+ void visitDeclAsContext(Decl *D) {}
+
+#undef DISPATCH
+};
+
+template <class Impl> struct XMLTypeVisitor {
+#define DISPATCH(NAME, CLASS) \
+ static_cast<Impl*>(this)->NAME(static_cast<CLASS*>(T))
+
+ void dispatch(Type *T) {
+ switch (T->getTypeClass()) {
+#define TYPE(DERIVED, BASE) \
+ case Type::DERIVED: \
+ DISPATCH(dispatch##DERIVED##TypeAttrs, DERIVED##Type); \
+ static_cast<Impl*>(this)->completeAttrs(); \
+ DISPATCH(dispatch##DERIVED##TypeChildren, DERIVED##Type); \
+ break;
+#define ABSTRACT_TYPE(DERIVED, BASE)
+#include "clang/AST/TypeNodes.def"
+ }
+ }
+
+#define TYPE(DERIVED, BASE) \
+ void dispatch##DERIVED##TypeAttrs(DERIVED##Type *T) { \
+ DISPATCH(dispatch##BASE##Attrs, BASE); \
+ DISPATCH(visit##DERIVED##TypeAttrs, DERIVED##Type); \
+ } \
+ void visit##DERIVED##TypeAttrs(DERIVED##Type *T) {} \
+ void dispatch##DERIVED##TypeChildren(DERIVED##Type *T) { \
+ DISPATCH(dispatch##BASE##Children, BASE); \
+ DISPATCH(visit##DERIVED##TypeChildren, DERIVED##Type); \
+ } \
+ void visit##DERIVED##TypeChildren(DERIVED##Type *T) {}
+#include "clang/AST/TypeNodes.def"
+
+ void dispatchTypeAttrs(Type *T) {
+ DISPATCH(visitTypeAttrs, Type);
+ }
+ void visitTypeAttrs(Type *T) {}
+
+ void dispatchTypeChildren(Type *T) {
+ DISPATCH(visitTypeChildren, Type);
+ }
+ void visitTypeChildren(Type *T) {}
+
+#undef DISPATCH
+};
+
+static StringRef getTypeKindName(Type *T) {
+ switch (T->getTypeClass()) {
+#define TYPE(DERIVED, BASE) case Type::DERIVED: return #DERIVED "Type";
+#define ABSTRACT_TYPE(DERIVED, BASE)
+#include "clang/AST/TypeNodes.def"
+ }
+
+ llvm_unreachable("unknown type kind!");
+}
+
+struct XMLDumper : public XMLDeclVisitor<XMLDumper>,
+ public XMLTypeVisitor<XMLDumper> {
+ raw_ostream &out;
+ ASTContext &Context;
+ SmallVector<Node, 16> Stack;
+ unsigned Indent;
+ explicit XMLDumper(raw_ostream &OS, ASTContext &context)
+ : out(OS), Context(context), Indent(0) {}
+
+ void indent() {
+ for (unsigned I = Indent; I; --I)
+ out << ' ';
+ }
+
+ /// Push a new node on the stack.
+ void push(StringRef name) {
+ if (!Stack.empty()) {
+ assert(Stack.back().isDoneWithAttrs());
+ if (Stack.back().State == NS_LazyChildren) {
+ Stack.back().State = NS_Children;
+ out << ">\n";
+ }
+ Indent++;
+ indent();
+ }
+ Stack.push_back(Node(name));
+ out << '<' << name;
+ }
+
+ /// Set the given attribute to the given value.
+ void set(StringRef attr, StringRef value) {
+ assert(!Stack.empty() && !Stack.back().isDoneWithAttrs());
+ out << ' ' << attr << '=' << '"' << value << '"'; // TODO: quotation
+ }
+
+ /// Finish attributes.
+ void completeAttrs() {
+ assert(!Stack.empty() && !Stack.back().isDoneWithAttrs());
+ Stack.back().State = NS_LazyChildren;
+ }
+
+ /// Pop a node.
+ void pop() {
+ assert(!Stack.empty() && Stack.back().isDoneWithAttrs());
+ if (Stack.back().State == NS_LazyChildren) {
+ out << "/>\n";
+ } else {
+ indent();
+ out << "</" << Stack.back().Name << ">\n";
+ }
+ if (Stack.size() > 1) Indent--;
+ Stack.pop_back();
+ }
+
+ //---- General utilities -------------------------------------------//
+
+ void setPointer(StringRef prop, const void *p) {
+ SmallString<10> buffer;
+ llvm::raw_svector_ostream os(buffer);
+ os << p;
+ os.flush();
+ set(prop, buffer);
+ }
+
+ void setPointer(void *p) {
+ setPointer("ptr", p);
+ }
+
+ void setInteger(StringRef prop, const llvm::APSInt &v) {
+ set(prop, v.toString(10));
+ }
+
+ void setInteger(StringRef prop, unsigned n) {
+ SmallString<10> buffer;
+ llvm::raw_svector_ostream os(buffer);
+ os << n;
+ os.flush();
+ set(prop, buffer);
+ }
+
+ void setFlag(StringRef prop, bool flag) {
+ if (flag) set(prop, "true");
+ }
+
+ void setName(DeclarationName Name) {
+ if (!Name)
+ return set("name", "");
+
+ // Common case.
+ if (Name.isIdentifier())
+ return set("name", Name.getAsIdentifierInfo()->getName());
+
+ set("name", Name.getAsString());
+ }
+
+ class TemporaryContainer {
+ XMLDumper &Dumper;
+ public:
+ TemporaryContainer(XMLDumper &dumper, StringRef name)
+ : Dumper(dumper) {
+ Dumper.push(name);
+ Dumper.completeAttrs();
+ }
+
+ ~TemporaryContainer() {
+ Dumper.pop();
+ }
+ };
+
+ void visitTemplateParameters(TemplateParameterList *L) {
+ push("template_parameters");
+ completeAttrs();
+ for (TemplateParameterList::iterator
+ I = L->begin(), E = L->end(); I != E; ++I)
+ dispatch(*I);
+ pop();
+ }
+
+ void visitTemplateArguments(const TemplateArgumentList &L) {
+ push("template_arguments");
+ completeAttrs();
+ for (unsigned I = 0, E = L.size(); I != E; ++I)
+ dispatch(L[I]);
+ pop();
+ }
+
+ /// Visits a reference to the given declaration.
+ void visitDeclRef(Decl *D) {
+ push(D->getDeclKindName());
+ setPointer("ref", D);
+ completeAttrs();
+ pop();
+ }
+ void visitDeclRef(StringRef Name, Decl *D) {
+ TemporaryContainer C(*this, Name);
+ if (D) visitDeclRef(D);
+ }
+
+ void dispatch(const TemplateArgument &A) {
+ switch (A.getKind()) {
+ case TemplateArgument::Null: {
+ TemporaryContainer C(*this, "null");
+ break;
+ }
+ case TemplateArgument::Type: {
+ dispatch(A.getAsType());
+ break;
+ }
+ case TemplateArgument::Template:
+ case TemplateArgument::TemplateExpansion:
+ // FIXME: Implement!
+ break;
+
+ case TemplateArgument::Declaration: {
+ if (Decl *D = A.getAsDecl())
+ visitDeclRef(D);
+ break;
+ }
+ case TemplateArgument::Integral: {
+ push("integer");
+ setInteger("value", *A.getAsIntegral());
+ completeAttrs();
+ pop();
+ break;
+ }
+ case TemplateArgument::Expression: {
+ dispatch(A.getAsExpr());
+ break;
+ }
+ case TemplateArgument::Pack: {
+ for (TemplateArgument::pack_iterator P = A.pack_begin(),
+ PEnd = A.pack_end();
+ P != PEnd; ++P)
+ dispatch(*P);
+ break;
+ }
+ }
+ }
+
+ void dispatch(const TemplateArgumentLoc &A) {
+ dispatch(A.getArgument());
+ }
+
+ //---- Declarations ------------------------------------------------//
+ // Calls are made in this order:
+ // # Enter a new node.
+ // push("FieldDecl")
+ //
+ // # In this phase, attributes are set on the node.
+ // visitDeclAttrs(D)
+ // visitNamedDeclAttrs(D)
+ // ...
+ // visitFieldDeclAttrs(D)
+ //
+ // # No more attributes after this point.
+ // completeAttrs()
+ //
+ // # Create "header" child nodes, i.e. those which logically
+ // # belong to the declaration itself.
+ // visitDeclChildren(D)
+ // visitNamedDeclChildren(D)
+ // ...
+ // visitFieldDeclChildren(D)
+ //
+ // # Create nodes for the lexical children.
+ // visitDeclAsContext(D)
+ // visitNamedDeclAsContext(D)
+ // ...
+ // visitFieldDeclAsContext(D)
+ //
+ // # Finish the node.
+ // pop();
+ void dispatch(Decl *D) {
+ push(D->getDeclKindName());
+ XMLDeclVisitor<XMLDumper>::dispatch(D);
+ pop();
+ }
+ void visitDeclAttrs(Decl *D) {
+ setPointer(D);
+ }
+
+ /// Visit all the lexical decls in the given context.
+ void visitDeclContext(DeclContext *DC) {
+ for (DeclContext::decl_iterator
+ I = DC->decls_begin(), E = DC->decls_end(); I != E; ++I)
+ dispatch(*I);
+
+ // FIXME: point out visible declarations not in lexical context?
+ }
+
+ /// Set the "access" attribute on the current node according to the
+ /// given specifier.
+ void setAccess(AccessSpecifier AS) {
+ switch (AS) {
+ case AS_public: return set("access", "public");
+ case AS_protected: return set("access", "protected");
+ case AS_private: return set("access", "private");
+ case AS_none: llvm_unreachable("explicit forbidden access");
+ }
+ }
+
+ template <class T> void visitRedeclarableAttrs(T *D) {
+ if (T *Prev = D->getPreviousDecl())
+ setPointer("previous", Prev);
+ }
+
+
+ // TranslationUnitDecl
+ void visitTranslationUnitDeclAsContext(TranslationUnitDecl *D) {
+ visitDeclContext(D);
+ }
+
+ // LinkageSpecDecl
+ void visitLinkageSpecDeclAttrs(LinkageSpecDecl *D) {
+ StringRef lang = "";
+ switch (D->getLanguage()) {
+ case LinkageSpecDecl::lang_c: lang = "C"; break;
+ case LinkageSpecDecl::lang_cxx: lang = "C++"; break;
+ }
+ set("lang", lang);
+ }
+ void visitLinkageSpecDeclAsContext(LinkageSpecDecl *D) {
+ visitDeclContext(D);
+ }
+
+ // NamespaceDecl
+ void visitNamespaceDeclAttrs(NamespaceDecl *D) {
+ setFlag("inline", D->isInline());
+ if (!D->isOriginalNamespace())
+ setPointer("original", D->getOriginalNamespace());
+ }
+ void visitNamespaceDeclAsContext(NamespaceDecl *D) {
+ visitDeclContext(D);
+ }
+
+ // NamedDecl
+ void visitNamedDeclAttrs(NamedDecl *D) {
+ setName(D->getDeclName());
+ }
+
+ // ValueDecl
+ void visitValueDeclChildren(ValueDecl *D) {
+ dispatch(D->getType());
+ }
+
+ // DeclaratorDecl
+ void visitDeclaratorDeclChildren(DeclaratorDecl *D) {
+ //dispatch(D->getTypeSourceInfo()->getTypeLoc());
+ }
+
+ // VarDecl
+ void visitVarDeclAttrs(VarDecl *D) {
+ visitRedeclarableAttrs(D);
+ if (D->getStorageClass() != SC_None)
+ set("storage",
+ VarDecl::getStorageClassSpecifierString(D->getStorageClass()));
+ StringRef initStyle = "";
+ switch (D->getInitStyle()) {
+ case VarDecl::CInit: initStyle = "c"; break;
+ case VarDecl::CallInit: initStyle = "call"; break;
+ case VarDecl::ListInit: initStyle = "list"; break;
+ }
+ set("initstyle", initStyle);
+ setFlag("nrvo", D->isNRVOVariable());
+ // TODO: instantiation, etc.
+ }
+ void visitVarDeclChildren(VarDecl *D) {
+ if (D->hasInit()) dispatch(D->getInit());
+ }
+
+ // ParmVarDecl?
+
+ // FunctionDecl
+ void visitFunctionDeclAttrs(FunctionDecl *D) {
+ visitRedeclarableAttrs(D);
+ setFlag("pure", D->isPure());
+ setFlag("trivial", D->isTrivial());
+ setFlag("returnzero", D->hasImplicitReturnZero());
+ setFlag("prototype", D->hasWrittenPrototype());
+ setFlag("deleted", D->isDeletedAsWritten());
+ if (D->getStorageClass() != SC_None)
+ set("storage",
+ VarDecl::getStorageClassSpecifierString(D->getStorageClass()));
+ setFlag("inline", D->isInlineSpecified());
+ if (const AsmLabelAttr *ALA = D->getAttr<AsmLabelAttr>())
+ set("asmlabel", ALA->getLabel());
+ // TODO: instantiation, etc.
+ }
+ void visitFunctionDeclChildren(FunctionDecl *D) {
+ for (FunctionDecl::param_iterator
+ I = D->param_begin(), E = D->param_end(); I != E; ++I)
+ dispatch(*I);
+ for (llvm::ArrayRef<NamedDecl*>::iterator
+ I = D->getDeclsInPrototypeScope().begin(), E = D->getDeclsInPrototypeScope().end();
+ I != E; ++I)
+ dispatch(*I);
+ if (D->doesThisDeclarationHaveABody())
+ dispatch(D->getBody());
+ }
+
+ // CXXMethodDecl ?
+ // CXXConstructorDecl ?
+ // CXXDestructorDecl ?
+ // CXXConversionDecl ?
+
+ void dispatch(CXXCtorInitializer *Init) {
+ // TODO
+ }
+
+ // FieldDecl
+ void visitFieldDeclAttrs(FieldDecl *D) {
+ setFlag("mutable", D->isMutable());
+ }
+ void visitFieldDeclChildren(FieldDecl *D) {
+ if (D->isBitField()) {
+ TemporaryContainer C(*this, "bitwidth");
+ dispatch(D->getBitWidth());
+ }
+ // TODO: C++0x member initializer
+ }
+
+ // EnumConstantDecl
+ void visitEnumConstantDeclChildren(EnumConstantDecl *D) {
+ // value in any case?
+ if (D->getInitExpr()) dispatch(D->getInitExpr());
+ }
+
+ // IndirectFieldDecl
+ void visitIndirectFieldDeclChildren(IndirectFieldDecl *D) {
+ for (IndirectFieldDecl::chain_iterator
+ I = D->chain_begin(), E = D->chain_end(); I != E; ++I) {
+ NamedDecl *VD = const_cast<NamedDecl*>(*I);
+ push(isa<VarDecl>(VD) ? "variable" : "field");
+ setPointer("ptr", VD);
+ completeAttrs();
+ pop();
+ }
+ }
+
+ // TypeDecl
+ void visitTypeDeclAttrs(TypeDecl *D) {
+ setPointer("typeptr", D->getTypeForDecl());
+ }
+
+ // TypedefDecl
+ void visitTypedefDeclAttrs(TypedefDecl *D) {
+ visitRedeclarableAttrs<TypedefNameDecl>(D);
+ }
+ void visitTypedefDeclChildren(TypedefDecl *D) {
+ dispatch(D->getTypeSourceInfo()->getTypeLoc());
+ }
+
+ // TypeAliasDecl
+ void visitTypeAliasDeclAttrs(TypeAliasDecl *D) {
+ visitRedeclarableAttrs<TypedefNameDecl>(D);
+ }
+ void visitTypeAliasDeclChildren(TypeAliasDecl *D) {
+ dispatch(D->getTypeSourceInfo()->getTypeLoc());
+ }
+
+ // TagDecl
+ void visitTagDeclAttrs(TagDecl *D) {
+ visitRedeclarableAttrs(D);
+ }
+ void visitTagDeclAsContext(TagDecl *D) {
+ visitDeclContext(D);
+ }
+
+ // EnumDecl
+ void visitEnumDeclAttrs(EnumDecl *D) {
+ setFlag("scoped", D->isScoped());
+ setFlag("fixed", D->isFixed());
+ }
+ void visitEnumDeclChildren(EnumDecl *D) {
+ {
+ TemporaryContainer C(*this, "promotion_type");
+ dispatch(D->getPromotionType());
+ }
+ {
+ TemporaryContainer C(*this, "integer_type");
+ dispatch(D->getIntegerType());
+ }
+ }
+
+ // RecordDecl ?
+
+ void visitCXXRecordDeclChildren(CXXRecordDecl *D) {
+ if (!D->isThisDeclarationADefinition()) return;
+
+ for (CXXRecordDecl::base_class_iterator
+ I = D->bases_begin(), E = D->bases_end(); I != E; ++I) {
+ push("base");
+ setAccess(I->getAccessSpecifier());
+ completeAttrs();
+ dispatch(I->getTypeSourceInfo()->getTypeLoc());
+ pop();
+ }
+ }
+
+ // ClassTemplateSpecializationDecl ?
+
+ // FileScopeAsmDecl ?
+
+ // BlockDecl
+ void visitBlockDeclAttrs(BlockDecl *D) {
+ setFlag("variadic", D->isVariadic());
+ }
+ void visitBlockDeclChildren(BlockDecl *D) {
+ for (FunctionDecl::param_iterator
+ I = D->param_begin(), E = D->param_end(); I != E; ++I)
+ dispatch(*I);
+ dispatch(D->getBody());
+ }
+
+ // AccessSpecDecl
+ void visitAccessSpecDeclAttrs(AccessSpecDecl *D) {
+ setAccess(D->getAccess());
+ }
+
+ // TemplateDecl
+ void visitTemplateDeclChildren(TemplateDecl *D) {
+ visitTemplateParameters(D->getTemplateParameters());
+ if (D->getTemplatedDecl())
+ dispatch(D->getTemplatedDecl());
+ }
+
+ // FunctionTemplateDecl
+ void visitFunctionTemplateDeclAttrs(FunctionTemplateDecl *D) {
+ visitRedeclarableAttrs(D);
+ }
+ void visitFunctionTemplateDeclChildren(FunctionTemplateDecl *D) {
+ // Mention all the specializations which don't have explicit
+ // declarations elsewhere.
+ for (FunctionTemplateDecl::spec_iterator
+ I = D->spec_begin(), E = D->spec_end(); I != E; ++I) {
+ FunctionTemplateSpecializationInfo *Info
+ = I->getTemplateSpecializationInfo();
+
+ bool Unknown = false;
+ switch (Info->getTemplateSpecializationKind()) {
+ case TSK_ImplicitInstantiation: Unknown = false; break;
+ case TSK_Undeclared: Unknown = true; break;
+
+ // These will be covered at their respective sites.
+ case TSK_ExplicitSpecialization: continue;
+ case TSK_ExplicitInstantiationDeclaration: continue;
+ case TSK_ExplicitInstantiationDefinition: continue;
+ }
+
+ TemporaryContainer C(*this,
+ Unknown ? "uninstantiated" : "instantiation");
+ visitTemplateArguments(*Info->TemplateArguments);
+ dispatch(Info->Function);
+ }
+ }
+
+ // ClasTemplateDecl
+ void visitClassTemplateDeclAttrs(ClassTemplateDecl *D) {
+ visitRedeclarableAttrs(D);
+ }
+ void visitClassTemplateDeclChildren(ClassTemplateDecl *D) {
+ // Mention all the specializations which don't have explicit
+ // declarations elsewhere.
+ for (ClassTemplateDecl::spec_iterator
+ I = D->spec_begin(), E = D->spec_end(); I != E; ++I) {
+
+ bool Unknown = false;
+ switch (I->getTemplateSpecializationKind()) {
+ case TSK_ImplicitInstantiation: Unknown = false; break;
+ case TSK_Undeclared: Unknown = true; break;
+
+ // These will be covered at their respective sites.
+ case TSK_ExplicitSpecialization: continue;
+ case TSK_ExplicitInstantiationDeclaration: continue;
+ case TSK_ExplicitInstantiationDefinition: continue;
+ }
+
+ TemporaryContainer C(*this,
+ Unknown ? "uninstantiated" : "instantiation");
+ visitTemplateArguments(I->getTemplateArgs());
+ dispatch(*I);
+ }
+ }
+
+ // TemplateTypeParmDecl
+ void visitTemplateTypeParmDeclAttrs(TemplateTypeParmDecl *D) {
+ setInteger("depth", D->getDepth());
+ setInteger("index", D->getIndex());
+ }
+ void visitTemplateTypeParmDeclChildren(TemplateTypeParmDecl *D) {
+ if (D->hasDefaultArgument() && !D->defaultArgumentWasInherited())
+ dispatch(D->getDefaultArgumentInfo()->getTypeLoc());
+ // parameter pack?
+ }
+
+ // NonTypeTemplateParmDecl
+ void visitNonTypeTemplateParmDeclAttrs(NonTypeTemplateParmDecl *D) {
+ setInteger("depth", D->getDepth());
+ setInteger("index", D->getIndex());
+ }
+ void visitNonTypeTemplateParmDeclChildren(NonTypeTemplateParmDecl *D) {
+ if (D->hasDefaultArgument() && !D->defaultArgumentWasInherited())
+ dispatch(D->getDefaultArgument());
+ // parameter pack?
+ }
+
+ // TemplateTemplateParmDecl
+ void visitTemplateTemplateParmDeclAttrs(TemplateTemplateParmDecl *D) {
+ setInteger("depth", D->getDepth());
+ setInteger("index", D->getIndex());
+ }
+ void visitTemplateTemplateParmDeclChildren(TemplateTemplateParmDecl *D) {
+ if (D->hasDefaultArgument() && !D->defaultArgumentWasInherited())
+ dispatch(D->getDefaultArgument());
+ // parameter pack?
+ }
+
+ // FriendDecl
+ void visitFriendDeclChildren(FriendDecl *D) {
+ if (TypeSourceInfo *T = D->getFriendType())
+ dispatch(T->getTypeLoc());
+ else
+ dispatch(D->getFriendDecl());
+ }
+
+ // UsingDirectiveDecl ?
+ // UsingDecl ?
+ // UsingShadowDecl ?
+ // NamespaceAliasDecl ?
+ // UnresolvedUsingValueDecl ?
+ // UnresolvedUsingTypenameDecl ?
+ // StaticAssertDecl ?
+
+ // ObjCImplDecl
+ void visitObjCImplDeclChildren(ObjCImplDecl *D) {
+ visitDeclRef(D->getClassInterface());
+ }
+ void visitObjCImplDeclAsContext(ObjCImplDecl *D) {
+ visitDeclContext(D);
+ }
+
+ // ObjCInterfaceDecl
+ void visitCategoryList(ObjCCategoryDecl *D) {
+ if (!D) return;
+
+ TemporaryContainer C(*this, "categories");
+ for (; D; D = D->getNextClassCategory())
+ visitDeclRef(D);
+ }
+ void visitObjCInterfaceDeclAttrs(ObjCInterfaceDecl *D) {
+ setPointer("typeptr", D->getTypeForDecl());
+ setFlag("forward_decl", !D->isThisDeclarationADefinition());
+ setFlag("implicit_interface", D->isImplicitInterfaceDecl());
+ }
+ void visitObjCInterfaceDeclChildren(ObjCInterfaceDecl *D) {
+ visitDeclRef("super", D->getSuperClass());
+ visitDeclRef("implementation", D->getImplementation());
+ if (D->protocol_begin() != D->protocol_end()) {
+ TemporaryContainer C(*this, "protocols");
+ for (ObjCInterfaceDecl::protocol_iterator
+ I = D->protocol_begin(), E = D->protocol_end(); I != E; ++I)
+ visitDeclRef(*I);
+ }
+ visitCategoryList(D->getCategoryList());
+ }
+ void visitObjCInterfaceDeclAsContext(ObjCInterfaceDecl *D) {
+ visitDeclContext(D);
+ }
+
+ // ObjCCategoryDecl
+ void visitObjCCategoryDeclAttrs(ObjCCategoryDecl *D) {
+ setFlag("extension", D->IsClassExtension());
+ setFlag("synth_bitfield", D->hasSynthBitfield());
+ }
+ void visitObjCCategoryDeclChildren(ObjCCategoryDecl *D) {
+ visitDeclRef("interface", D->getClassInterface());
+ visitDeclRef("implementation", D->getImplementation());
+ if (D->protocol_begin() != D->protocol_end()) {
+ TemporaryContainer C(*this, "protocols");
+ for (ObjCCategoryDecl::protocol_iterator
+ I = D->protocol_begin(), E = D->protocol_end(); I != E; ++I)
+ visitDeclRef(*I);
+ }
+ }
+ void visitObjCCategoryDeclAsContext(ObjCCategoryDecl *D) {
+ visitDeclContext(D);
+ }
+
+ // ObjCCategoryImplDecl
+ void visitObjCCategoryImplDeclAttrs(ObjCCategoryImplDecl *D) {
+ set("identifier", D->getName());
+ }
+ void visitObjCCategoryImplDeclChildren(ObjCCategoryImplDecl *D) {
+ visitDeclRef(D->getCategoryDecl());
+ }
+
+ // ObjCImplementationDecl
+ void visitObjCImplementationDeclAttrs(ObjCImplementationDecl *D) {
+ setFlag("synth_bitfield", D->hasSynthBitfield());
+ set("identifier", D->getName());
+ }
+ void visitObjCImplementationDeclChildren(ObjCImplementationDecl *D) {
+ visitDeclRef("super", D->getSuperClass());
+ if (D->init_begin() != D->init_end()) {
+ TemporaryContainer C(*this, "initializers");
+ for (ObjCImplementationDecl::init_iterator
+ I = D->init_begin(), E = D->init_end(); I != E; ++I)
+ dispatch(*I);
+ }
+ }
+
+ // ObjCProtocolDecl
+ void visitObjCProtocolDeclChildren(ObjCProtocolDecl *D) {
+ if (!D->isThisDeclarationADefinition())
+ return;
+
+ if (D->protocol_begin() != D->protocol_end()) {
+ TemporaryContainer C(*this, "protocols");
+ for (ObjCInterfaceDecl::protocol_iterator
+ I = D->protocol_begin(), E = D->protocol_end(); I != E; ++I)
+ visitDeclRef(*I);
+ }
+ }
+ void visitObjCProtocolDeclAsContext(ObjCProtocolDecl *D) {
+ if (!D->isThisDeclarationADefinition())
+ return;
+
+ visitDeclContext(D);
+ }
+
+ // ObjCMethodDecl
+ void visitObjCMethodDeclAttrs(ObjCMethodDecl *D) {
+ // decl qualifier?
+ // implementation control?
+
+ setFlag("instance", D->isInstanceMethod());
+ setFlag("variadic", D->isVariadic());
+ setFlag("synthesized", D->isSynthesized());
+ setFlag("defined", D->isDefined());
+ setFlag("related_result_type", D->hasRelatedResultType());
+ }
+ void visitObjCMethodDeclChildren(ObjCMethodDecl *D) {
+ dispatch(D->getResultType());
+ for (ObjCMethodDecl::param_iterator
+ I = D->param_begin(), E = D->param_end(); I != E; ++I)
+ dispatch(*I);
+ if (D->isThisDeclarationADefinition())
+ dispatch(D->getBody());
+ }
+
+ // ObjCIvarDecl
+ void setAccessControl(StringRef prop, ObjCIvarDecl::AccessControl AC) {
+ switch (AC) {
+ case ObjCIvarDecl::None: return set(prop, "none");
+ case ObjCIvarDecl::Private: return set(prop, "private");
+ case ObjCIvarDecl::Protected: return set(prop, "protected");
+ case ObjCIvarDecl::Public: return set(prop, "public");
+ case ObjCIvarDecl::Package: return set(prop, "package");
+ }
+ }
+ void visitObjCIvarDeclAttrs(ObjCIvarDecl *D) {
+ setFlag("synthesize", D->getSynthesize());
+ setAccessControl("access", D->getAccessControl());
+ }
+
+ // ObjCCompatibleAliasDecl
+ void visitObjCCompatibleAliasDeclChildren(ObjCCompatibleAliasDecl *D) {
+ visitDeclRef(D->getClassInterface());
+ }
+
+ // FIXME: ObjCPropertyDecl
+ // FIXME: ObjCPropertyImplDecl
+
+ //---- Types -----------------------------------------------------//
+ void dispatch(TypeLoc TL) {
+ dispatch(TL.getType()); // for now
+ }
+
+ void dispatch(QualType T) {
+ if (T.hasLocalQualifiers()) {
+ push("QualType");
+ Qualifiers Qs = T.getLocalQualifiers();
+ setFlag("const", Qs.hasConst());
+ setFlag("volatile", Qs.hasVolatile());
+ setFlag("restrict", Qs.hasRestrict());
+ if (Qs.hasAddressSpace()) setInteger("addrspace", Qs.getAddressSpace());
+ if (Qs.hasObjCGCAttr()) {
+ switch (Qs.getObjCGCAttr()) {
+ case Qualifiers::Weak: set("gc", "weak"); break;
+ case Qualifiers::Strong: set("gc", "strong"); break;
+ case Qualifiers::GCNone: llvm_unreachable("explicit none");
+ }
+ }
+
+ completeAttrs();
+ dispatch(QualType(T.getTypePtr(), 0));
+ pop();
+ return;
+ }
+
+ Type *Ty = const_cast<Type*>(T.getTypePtr());
+ push(getTypeKindName(Ty));
+ XMLTypeVisitor<XMLDumper>::dispatch(const_cast<Type*>(T.getTypePtr()));
+ pop();
+ }
+
+ void setCallingConv(CallingConv CC) {
+ switch (CC) {
+ case CC_Default: return;
+ case CC_C: return set("cc", "cdecl");
+ case CC_X86FastCall: return set("cc", "x86_fastcall");
+ case CC_X86StdCall: return set("cc", "x86_stdcall");
+ case CC_X86ThisCall: return set("cc", "x86_thiscall");
+ case CC_X86Pascal: return set("cc", "x86_pascal");
+ case CC_AAPCS: return set("cc", "aapcs");
+ case CC_AAPCS_VFP: return set("cc", "aapcs_vfp");
+ }
+ }
+
+ void visitTypeAttrs(Type *D) {
+ setPointer(D);
+ setFlag("dependent", D->isDependentType());
+ setFlag("variably_modified", D->isVariablyModifiedType());
+
+ setPointer("canonical", D->getCanonicalTypeInternal().getAsOpaquePtr());
+ }
+
+ void visitPointerTypeChildren(PointerType *T) {
+ dispatch(T->getPointeeType());
+ }
+ void visitReferenceTypeChildren(ReferenceType *T) {
+ dispatch(T->getPointeeType());
+ }
+ void visitObjCObjectPointerTypeChildren(ObjCObjectPointerType *T) {
+ dispatch(T->getPointeeType());
+ }
+ void visitBlockPointerTypeChildren(BlockPointerType *T) {
+ dispatch(T->getPointeeType());
+ }
+
+ // Types that just wrap declarations.
+ void visitTagTypeChildren(TagType *T) {
+ visitDeclRef(T->getDecl());
+ }
+ void visitTypedefTypeChildren(TypedefType *T) {
+ visitDeclRef(T->getDecl());
+ }
+ void visitObjCInterfaceTypeChildren(ObjCInterfaceType *T) {
+ visitDeclRef(T->getDecl());
+ }
+ void visitUnresolvedUsingTypeChildren(UnresolvedUsingType *T) {
+ visitDeclRef(T->getDecl());
+ }
+ void visitInjectedClassNameTypeChildren(InjectedClassNameType *T) {
+ visitDeclRef(T->getDecl());
+ }
+
+ void visitFunctionTypeAttrs(FunctionType *T) {
+ setFlag("noreturn", T->getNoReturnAttr());
+ setCallingConv(T->getCallConv());
+ if (T->getHasRegParm()) setInteger("regparm", T->getRegParmType());
+ }
+ void visitFunctionTypeChildren(FunctionType *T) {
+ dispatch(T->getResultType());
+ }
+
+ void visitFunctionProtoTypeAttrs(FunctionProtoType *T) {
+ setFlag("const", T->getTypeQuals() & Qualifiers::Const);
+ setFlag("volatile", T->getTypeQuals() & Qualifiers::Volatile);
+ setFlag("restrict", T->getTypeQuals() & Qualifiers::Restrict);
+ }
+ void visitFunctionProtoTypeChildren(FunctionProtoType *T) {
+ push("parameters");
+ setFlag("variadic", T->isVariadic());
+ completeAttrs();
+ for (FunctionProtoType::arg_type_iterator
+ I = T->arg_type_begin(), E = T->arg_type_end(); I != E; ++I)
+ dispatch(*I);
+ pop();
+
+ if (T->hasDynamicExceptionSpec()) {
+ push("exception_specifiers");
+ setFlag("any", T->getExceptionSpecType() == EST_MSAny);
+ completeAttrs();
+ for (FunctionProtoType::exception_iterator
+ I = T->exception_begin(), E = T->exception_end(); I != E; ++I)
+ dispatch(*I);
+ pop();
+ }
+ // FIXME: noexcept specifier
+ }
+
+ void visitTemplateSpecializationTypeChildren(TemplateSpecializationType *T) {
+ if (const RecordType *RT = T->getAs<RecordType>())
+ visitDeclRef(RT->getDecl());
+
+ // TODO: TemplateName
+
+ push("template_arguments");
+ completeAttrs();
+ for (unsigned I = 0, E = T->getNumArgs(); I != E; ++I)
+ dispatch(T->getArg(I));
+ pop();
+ }
+
+ //---- Statements ------------------------------------------------//
+ void dispatch(Stmt *S) {
+ // FIXME: this is not really XML at all
+ push("Stmt");
+ out << ">\n";
+ Stack.back().State = NS_Children; // explicitly become non-lazy
+ S->dump(out, Context.getSourceManager());
+ out << '\n';
+ pop();
+ }
+};
+}
+
+void Decl::dumpXML() const {
+ dumpXML(llvm::errs());
+}
+
+void Decl::dumpXML(raw_ostream &out) const {
+ XMLDumper(out, getASTContext()).dispatch(const_cast<Decl*>(this));
+}
+
+#else /* ifndef NDEBUG */
+
+void Decl::dumpXML() const {}
+void Decl::dumpXML(raw_ostream &out) const {}
+
+#endif
diff --git a/clang/lib/AST/Expr.cpp b/clang/lib/AST/Expr.cpp
new file mode 100644
index 0000000..fcde542
--- /dev/null
+++ b/clang/lib/AST/Expr.cpp
@@ -0,0 +1,3588 @@
+//===--- Expr.cpp - Expression AST Node Implementation --------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the Expr class and subclasses.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/Expr.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/APValue.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/EvaluatedExprVisitor.h"
+#include "clang/AST/RecordLayout.h"
+#include "clang/AST/StmtVisitor.h"
+#include "clang/Lex/LiteralSupport.h"
+#include "clang/Lex/Lexer.h"
+#include "clang/Sema/SemaDiagnostic.h"
+#include "clang/Basic/Builtins.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/TargetInfo.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/raw_ostream.h"
+#include <algorithm>
+#include <cstring>
+using namespace clang;
+
+/// isKnownToHaveBooleanValue - Return true if this is an integer expression
+/// that is known to return 0 or 1. This happens for _Bool/bool expressions
+/// but also int expressions which are produced by things like comparisons in
+/// C.
+bool Expr::isKnownToHaveBooleanValue() const {
+ const Expr *E = IgnoreParens();
+
+ // If this value has _Bool type, it is obvious 0/1.
+ if (E->getType()->isBooleanType()) return true;
+ // If this is a non-scalar-integer type, we don't care enough to try.
+ if (!E->getType()->isIntegralOrEnumerationType()) return false;
+
+ if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) {
+ switch (UO->getOpcode()) {
+ case UO_Plus:
+ return UO->getSubExpr()->isKnownToHaveBooleanValue();
+ default:
+ return false;
+ }
+ }
+
+ // Only look through implicit casts. If the user writes
+ // '(int) (a && b)' treat it as an arbitrary int.
+ if (const ImplicitCastExpr *CE = dyn_cast<ImplicitCastExpr>(E))
+ return CE->getSubExpr()->isKnownToHaveBooleanValue();
+
+ if (const BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) {
+ switch (BO->getOpcode()) {
+ default: return false;
+ case BO_LT: // Relational operators.
+ case BO_GT:
+ case BO_LE:
+ case BO_GE:
+ case BO_EQ: // Equality operators.
+ case BO_NE:
+ case BO_LAnd: // AND operator.
+ case BO_LOr: // Logical OR operator.
+ return true;
+
+ case BO_And: // Bitwise AND operator.
+ case BO_Xor: // Bitwise XOR operator.
+ case BO_Or: // Bitwise OR operator.
+ // Handle things like (x==2)|(y==12).
+ return BO->getLHS()->isKnownToHaveBooleanValue() &&
+ BO->getRHS()->isKnownToHaveBooleanValue();
+
+ case BO_Comma:
+ case BO_Assign:
+ return BO->getRHS()->isKnownToHaveBooleanValue();
+ }
+ }
+
+ if (const ConditionalOperator *CO = dyn_cast<ConditionalOperator>(E))
+ return CO->getTrueExpr()->isKnownToHaveBooleanValue() &&
+ CO->getFalseExpr()->isKnownToHaveBooleanValue();
+
+ return false;
+}
+
+// Amusing macro metaprogramming hack: check whether a class provides
+// a more specific implementation of getExprLoc().
+//
+// See also Stmt.cpp:{getLocStart(),getLocEnd()}.
+namespace {
+ /// This implementation is used when a class provides a custom
+ /// implementation of getExprLoc.
+ template <class E, class T>
+ SourceLocation getExprLocImpl(const Expr *expr,
+ SourceLocation (T::*v)() const) {
+ return static_cast<const E*>(expr)->getExprLoc();
+ }
+
+ /// This implementation is used when a class doesn't provide
+ /// a custom implementation of getExprLoc. Overload resolution
+ /// should pick it over the implementation above because it's
+ /// more specialized according to function template partial ordering.
+ template <class E>
+ SourceLocation getExprLocImpl(const Expr *expr,
+ SourceLocation (Expr::*v)() const) {
+ return static_cast<const E*>(expr)->getLocStart();
+ }
+}
+
+SourceLocation Expr::getExprLoc() const {
+ switch (getStmtClass()) {
+ case Stmt::NoStmtClass: llvm_unreachable("statement without class");
+#define ABSTRACT_STMT(type)
+#define STMT(type, base) \
+ case Stmt::type##Class: llvm_unreachable(#type " is not an Expr"); break;
+#define EXPR(type, base) \
+ case Stmt::type##Class: return getExprLocImpl<type>(this, &type::getExprLoc);
+#include "clang/AST/StmtNodes.inc"
+ }
+ llvm_unreachable("unknown statement kind");
+}
+
+//===----------------------------------------------------------------------===//
+// Primary Expressions.
+//===----------------------------------------------------------------------===//
+
+/// \brief Compute the type-, value-, and instantiation-dependence of a
+/// declaration reference
+/// based on the declaration being referenced.
+static void computeDeclRefDependence(ASTContext &Ctx, NamedDecl *D, QualType T,
+ bool &TypeDependent,
+ bool &ValueDependent,
+ bool &InstantiationDependent) {
+ TypeDependent = false;
+ ValueDependent = false;
+ InstantiationDependent = false;
+
+ // (TD) C++ [temp.dep.expr]p3:
+ // An id-expression is type-dependent if it contains:
+ //
+ // and
+ //
+ // (VD) C++ [temp.dep.constexpr]p2:
+ // An identifier is value-dependent if it is:
+
+ // (TD) - an identifier that was declared with dependent type
+ // (VD) - a name declared with a dependent type,
+ if (T->isDependentType()) {
+ TypeDependent = true;
+ ValueDependent = true;
+ InstantiationDependent = true;
+ return;
+ } else if (T->isInstantiationDependentType()) {
+ InstantiationDependent = true;
+ }
+
+ // (TD) - a conversion-function-id that specifies a dependent type
+ if (D->getDeclName().getNameKind()
+ == DeclarationName::CXXConversionFunctionName) {
+ QualType T = D->getDeclName().getCXXNameType();
+ if (T->isDependentType()) {
+ TypeDependent = true;
+ ValueDependent = true;
+ InstantiationDependent = true;
+ return;
+ }
+
+ if (T->isInstantiationDependentType())
+ InstantiationDependent = true;
+ }
+
+ // (VD) - the name of a non-type template parameter,
+ if (isa<NonTypeTemplateParmDecl>(D)) {
+ ValueDependent = true;
+ InstantiationDependent = true;
+ return;
+ }
+
+ // (VD) - a constant with integral or enumeration type and is
+ // initialized with an expression that is value-dependent.
+ // (VD) - a constant with literal type and is initialized with an
+ // expression that is value-dependent [C++11].
+ // (VD) - FIXME: Missing from the standard:
+ // - an entity with reference type and is initialized with an
+ // expression that is value-dependent [C++11]
+ if (VarDecl *Var = dyn_cast<VarDecl>(D)) {
+ if ((Ctx.getLangOpts().CPlusPlus0x ?
+ Var->getType()->isLiteralType() :
+ Var->getType()->isIntegralOrEnumerationType()) &&
+ (Var->getType().getCVRQualifiers() == Qualifiers::Const ||
+ Var->getType()->isReferenceType())) {
+ if (const Expr *Init = Var->getAnyInitializer())
+ if (Init->isValueDependent()) {
+ ValueDependent = true;
+ InstantiationDependent = true;
+ }
+ }
+
+ // (VD) - FIXME: Missing from the standard:
+ // - a member function or a static data member of the current
+ // instantiation
+ if (Var->isStaticDataMember() &&
+ Var->getDeclContext()->isDependentContext()) {
+ ValueDependent = true;
+ InstantiationDependent = true;
+ }
+
+ return;
+ }
+
+ // (VD) - FIXME: Missing from the standard:
+ // - a member function or a static data member of the current
+ // instantiation
+ if (isa<CXXMethodDecl>(D) && D->getDeclContext()->isDependentContext()) {
+ ValueDependent = true;
+ InstantiationDependent = true;
+ }
+}
+
+void DeclRefExpr::computeDependence(ASTContext &Ctx) {
+ bool TypeDependent = false;
+ bool ValueDependent = false;
+ bool InstantiationDependent = false;
+ computeDeclRefDependence(Ctx, getDecl(), getType(), TypeDependent,
+ ValueDependent, InstantiationDependent);
+
+ // (TD) C++ [temp.dep.expr]p3:
+ // An id-expression is type-dependent if it contains:
+ //
+ // and
+ //
+ // (VD) C++ [temp.dep.constexpr]p2:
+ // An identifier is value-dependent if it is:
+ if (!TypeDependent && !ValueDependent &&
+ hasExplicitTemplateArgs() &&
+ TemplateSpecializationType::anyDependentTemplateArguments(
+ getTemplateArgs(),
+ getNumTemplateArgs(),
+ InstantiationDependent)) {
+ TypeDependent = true;
+ ValueDependent = true;
+ InstantiationDependent = true;
+ }
+
+ ExprBits.TypeDependent = TypeDependent;
+ ExprBits.ValueDependent = ValueDependent;
+ ExprBits.InstantiationDependent = InstantiationDependent;
+
+ // Is the declaration a parameter pack?
+ if (getDecl()->isParameterPack())
+ ExprBits.ContainsUnexpandedParameterPack = true;
+}
+
+DeclRefExpr::DeclRefExpr(ASTContext &Ctx,
+ NestedNameSpecifierLoc QualifierLoc,
+ SourceLocation TemplateKWLoc,
+ ValueDecl *D, bool RefersToEnclosingLocal,
+ const DeclarationNameInfo &NameInfo,
+ NamedDecl *FoundD,
+ const TemplateArgumentListInfo *TemplateArgs,
+ QualType T, ExprValueKind VK)
+ : Expr(DeclRefExprClass, T, VK, OK_Ordinary, false, false, false, false),
+ D(D), Loc(NameInfo.getLoc()), DNLoc(NameInfo.getInfo()) {
+ DeclRefExprBits.HasQualifier = QualifierLoc ? 1 : 0;
+ if (QualifierLoc)
+ getInternalQualifierLoc() = QualifierLoc;
+ DeclRefExprBits.HasFoundDecl = FoundD ? 1 : 0;
+ if (FoundD)
+ getInternalFoundDecl() = FoundD;
+ DeclRefExprBits.HasTemplateKWAndArgsInfo
+ = (TemplateArgs || TemplateKWLoc.isValid()) ? 1 : 0;
+ DeclRefExprBits.RefersToEnclosingLocal = RefersToEnclosingLocal;
+ if (TemplateArgs) {
+ bool Dependent = false;
+ bool InstantiationDependent = false;
+ bool ContainsUnexpandedParameterPack = false;
+ getTemplateKWAndArgsInfo()->initializeFrom(TemplateKWLoc, *TemplateArgs,
+ Dependent,
+ InstantiationDependent,
+ ContainsUnexpandedParameterPack);
+ if (InstantiationDependent)
+ setInstantiationDependent(true);
+ } else if (TemplateKWLoc.isValid()) {
+ getTemplateKWAndArgsInfo()->initializeFrom(TemplateKWLoc);
+ }
+ DeclRefExprBits.HadMultipleCandidates = 0;
+
+ computeDependence(Ctx);
+}
+
+DeclRefExpr *DeclRefExpr::Create(ASTContext &Context,
+ NestedNameSpecifierLoc QualifierLoc,
+ SourceLocation TemplateKWLoc,
+ ValueDecl *D,
+ bool RefersToEnclosingLocal,
+ SourceLocation NameLoc,
+ QualType T,
+ ExprValueKind VK,
+ NamedDecl *FoundD,
+ const TemplateArgumentListInfo *TemplateArgs) {
+ return Create(Context, QualifierLoc, TemplateKWLoc, D,
+ RefersToEnclosingLocal,
+ DeclarationNameInfo(D->getDeclName(), NameLoc),
+ T, VK, FoundD, TemplateArgs);
+}
+
+DeclRefExpr *DeclRefExpr::Create(ASTContext &Context,
+ NestedNameSpecifierLoc QualifierLoc,
+ SourceLocation TemplateKWLoc,
+ ValueDecl *D,
+ bool RefersToEnclosingLocal,
+ const DeclarationNameInfo &NameInfo,
+ QualType T,
+ ExprValueKind VK,
+ NamedDecl *FoundD,
+ const TemplateArgumentListInfo *TemplateArgs) {
+ // Filter out cases where the found Decl is the same as the value refenenced.
+ if (D == FoundD)
+ FoundD = 0;
+
+ std::size_t Size = sizeof(DeclRefExpr);
+ if (QualifierLoc != 0)
+ Size += sizeof(NestedNameSpecifierLoc);
+ if (FoundD)
+ Size += sizeof(NamedDecl *);
+ if (TemplateArgs)
+ Size += ASTTemplateKWAndArgsInfo::sizeFor(TemplateArgs->size());
+ else if (TemplateKWLoc.isValid())
+ Size += ASTTemplateKWAndArgsInfo::sizeFor(0);
+
+ void *Mem = Context.Allocate(Size, llvm::alignOf<DeclRefExpr>());
+ return new (Mem) DeclRefExpr(Context, QualifierLoc, TemplateKWLoc, D,
+ RefersToEnclosingLocal,
+ NameInfo, FoundD, TemplateArgs, T, VK);
+}
+
+DeclRefExpr *DeclRefExpr::CreateEmpty(ASTContext &Context,
+ bool HasQualifier,
+ bool HasFoundDecl,
+ bool HasTemplateKWAndArgsInfo,
+ unsigned NumTemplateArgs) {
+ std::size_t Size = sizeof(DeclRefExpr);
+ if (HasQualifier)
+ Size += sizeof(NestedNameSpecifierLoc);
+ if (HasFoundDecl)
+ Size += sizeof(NamedDecl *);
+ if (HasTemplateKWAndArgsInfo)
+ Size += ASTTemplateKWAndArgsInfo::sizeFor(NumTemplateArgs);
+
+ void *Mem = Context.Allocate(Size, llvm::alignOf<DeclRefExpr>());
+ return new (Mem) DeclRefExpr(EmptyShell());
+}
+
+SourceRange DeclRefExpr::getSourceRange() const {
+ SourceRange R = getNameInfo().getSourceRange();
+ if (hasQualifier())
+ R.setBegin(getQualifierLoc().getBeginLoc());
+ if (hasExplicitTemplateArgs())
+ R.setEnd(getRAngleLoc());
+ return R;
+}
+SourceLocation DeclRefExpr::getLocStart() const {
+ if (hasQualifier())
+ return getQualifierLoc().getBeginLoc();
+ return getNameInfo().getLocStart();
+}
+SourceLocation DeclRefExpr::getLocEnd() const {
+ if (hasExplicitTemplateArgs())
+ return getRAngleLoc();
+ return getNameInfo().getLocEnd();
+}
+
+// FIXME: Maybe this should use DeclPrinter with a special "print predefined
+// expr" policy instead.
+std::string PredefinedExpr::ComputeName(IdentType IT, const Decl *CurrentDecl) {
+ ASTContext &Context = CurrentDecl->getASTContext();
+
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(CurrentDecl)) {
+ if (IT != PrettyFunction && IT != PrettyFunctionNoVirtual)
+ return FD->getNameAsString();
+
+ SmallString<256> Name;
+ llvm::raw_svector_ostream Out(Name);
+
+ if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD)) {
+ if (MD->isVirtual() && IT != PrettyFunctionNoVirtual)
+ Out << "virtual ";
+ if (MD->isStatic())
+ Out << "static ";
+ }
+
+ PrintingPolicy Policy(Context.getLangOpts());
+ std::string Proto = FD->getQualifiedNameAsString(Policy);
+ llvm::raw_string_ostream POut(Proto);
+
+ const FunctionDecl *Decl = FD;
+ if (const FunctionDecl* Pattern = FD->getTemplateInstantiationPattern())
+ Decl = Pattern;
+ const FunctionType *AFT = Decl->getType()->getAs<FunctionType>();
+ const FunctionProtoType *FT = 0;
+ if (FD->hasWrittenPrototype())
+ FT = dyn_cast<FunctionProtoType>(AFT);
+
+ POut << "(";
+ if (FT) {
+ for (unsigned i = 0, e = Decl->getNumParams(); i != e; ++i) {
+ if (i) POut << ", ";
+ std::string Param;
+ Decl->getParamDecl(i)->getType().getAsStringInternal(Param, Policy);
+ POut << Param;
+ }
+
+ if (FT->isVariadic()) {
+ if (FD->getNumParams()) POut << ", ";
+ POut << "...";
+ }
+ }
+ POut << ")";
+
+ if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD)) {
+ Qualifiers ThisQuals = Qualifiers::fromCVRMask(MD->getTypeQualifiers());
+ if (ThisQuals.hasConst())
+ POut << " const";
+ if (ThisQuals.hasVolatile())
+ POut << " volatile";
+ RefQualifierKind Ref = MD->getRefQualifier();
+ if (Ref == RQ_LValue)
+ POut << " &";
+ else if (Ref == RQ_RValue)
+ POut << " &&";
+ }
+
+ typedef SmallVector<const ClassTemplateSpecializationDecl *, 8> SpecsTy;
+ SpecsTy Specs;
+ const DeclContext *Ctx = FD->getDeclContext();
+ while (Ctx && isa<NamedDecl>(Ctx)) {
+ const ClassTemplateSpecializationDecl *Spec
+ = dyn_cast<ClassTemplateSpecializationDecl>(Ctx);
+ if (Spec && !Spec->isExplicitSpecialization())
+ Specs.push_back(Spec);
+ Ctx = Ctx->getParent();
+ }
+
+ std::string TemplateParams;
+ llvm::raw_string_ostream TOut(TemplateParams);
+ for (SpecsTy::reverse_iterator I = Specs.rbegin(), E = Specs.rend();
+ I != E; ++I) {
+ const TemplateParameterList *Params
+ = (*I)->getSpecializedTemplate()->getTemplateParameters();
+ const TemplateArgumentList &Args = (*I)->getTemplateArgs();
+ assert(Params->size() == Args.size());
+ for (unsigned i = 0, numParams = Params->size(); i != numParams; ++i) {
+ StringRef Param = Params->getParam(i)->getName();
+ if (Param.empty()) continue;
+ TOut << Param << " = ";
+ Args.get(i).print(Policy, TOut);
+ TOut << ", ";
+ }
+ }
+
+ FunctionTemplateSpecializationInfo *FSI
+ = FD->getTemplateSpecializationInfo();
+ if (FSI && !FSI->isExplicitSpecialization()) {
+ const TemplateParameterList* Params
+ = FSI->getTemplate()->getTemplateParameters();
+ const TemplateArgumentList* Args = FSI->TemplateArguments;
+ assert(Params->size() == Args->size());
+ for (unsigned i = 0, e = Params->size(); i != e; ++i) {
+ StringRef Param = Params->getParam(i)->getName();
+ if (Param.empty()) continue;
+ TOut << Param << " = ";
+ Args->get(i).print(Policy, TOut);
+ TOut << ", ";
+ }
+ }
+
+ TOut.flush();
+ if (!TemplateParams.empty()) {
+ // remove the trailing comma and space
+ TemplateParams.resize(TemplateParams.size() - 2);
+ POut << " [" << TemplateParams << "]";
+ }
+
+ POut.flush();
+
+ if (!isa<CXXConstructorDecl>(FD) && !isa<CXXDestructorDecl>(FD))
+ AFT->getResultType().getAsStringInternal(Proto, Policy);
+
+ Out << Proto;
+
+ Out.flush();
+ return Name.str().str();
+ }
+ if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(CurrentDecl)) {
+ SmallString<256> Name;
+ llvm::raw_svector_ostream Out(Name);
+ Out << (MD->isInstanceMethod() ? '-' : '+');
+ Out << '[';
+
+ // For incorrect code, there might not be an ObjCInterfaceDecl. Do
+ // a null check to avoid a crash.
+ if (const ObjCInterfaceDecl *ID = MD->getClassInterface())
+ Out << *ID;
+
+ if (const ObjCCategoryImplDecl *CID =
+ dyn_cast<ObjCCategoryImplDecl>(MD->getDeclContext()))
+ Out << '(' << *CID << ')';
+
+ Out << ' ';
+ Out << MD->getSelector().getAsString();
+ Out << ']';
+
+ Out.flush();
+ return Name.str().str();
+ }
+ if (isa<TranslationUnitDecl>(CurrentDecl) && IT == PrettyFunction) {
+ // __PRETTY_FUNCTION__ -> "top level", the others produce an empty string.
+ return "top level";
+ }
+ return "";
+}
+
+void APNumericStorage::setIntValue(ASTContext &C, const llvm::APInt &Val) {
+ if (hasAllocation())
+ C.Deallocate(pVal);
+
+ BitWidth = Val.getBitWidth();
+ unsigned NumWords = Val.getNumWords();
+ const uint64_t* Words = Val.getRawData();
+ if (NumWords > 1) {
+ pVal = new (C) uint64_t[NumWords];
+ std::copy(Words, Words + NumWords, pVal);
+ } else if (NumWords == 1)
+ VAL = Words[0];
+ else
+ VAL = 0;
+}
+
+IntegerLiteral *
+IntegerLiteral::Create(ASTContext &C, const llvm::APInt &V,
+ QualType type, SourceLocation l) {
+ return new (C) IntegerLiteral(C, V, type, l);
+}
+
+IntegerLiteral *
+IntegerLiteral::Create(ASTContext &C, EmptyShell Empty) {
+ return new (C) IntegerLiteral(Empty);
+}
+
+FloatingLiteral *
+FloatingLiteral::Create(ASTContext &C, const llvm::APFloat &V,
+ bool isexact, QualType Type, SourceLocation L) {
+ return new (C) FloatingLiteral(C, V, isexact, Type, L);
+}
+
+FloatingLiteral *
+FloatingLiteral::Create(ASTContext &C, EmptyShell Empty) {
+ return new (C) FloatingLiteral(C, Empty);
+}
+
+/// getValueAsApproximateDouble - This returns the value as an inaccurate
+/// double. Note that this may cause loss of precision, but is useful for
+/// debugging dumps, etc.
+double FloatingLiteral::getValueAsApproximateDouble() const {
+ llvm::APFloat V = getValue();
+ bool ignored;
+ V.convert(llvm::APFloat::IEEEdouble, llvm::APFloat::rmNearestTiesToEven,
+ &ignored);
+ return V.convertToDouble();
+}
+
+int StringLiteral::mapCharByteWidth(TargetInfo const &target,StringKind k) {
+ int CharByteWidth = 0;
+ switch(k) {
+ case Ascii:
+ case UTF8:
+ CharByteWidth = target.getCharWidth();
+ break;
+ case Wide:
+ CharByteWidth = target.getWCharWidth();
+ break;
+ case UTF16:
+ CharByteWidth = target.getChar16Width();
+ break;
+ case UTF32:
+ CharByteWidth = target.getChar32Width();
+ break;
+ }
+ assert((CharByteWidth & 7) == 0 && "Assumes character size is byte multiple");
+ CharByteWidth /= 8;
+ assert((CharByteWidth==1 || CharByteWidth==2 || CharByteWidth==4)
+ && "character byte widths supported are 1, 2, and 4 only");
+ return CharByteWidth;
+}
+
+StringLiteral *StringLiteral::Create(ASTContext &C, StringRef Str,
+ StringKind Kind, bool Pascal, QualType Ty,
+ const SourceLocation *Loc,
+ unsigned NumStrs) {
+ // Allocate enough space for the StringLiteral plus an array of locations for
+ // any concatenated string tokens.
+ void *Mem = C.Allocate(sizeof(StringLiteral)+
+ sizeof(SourceLocation)*(NumStrs-1),
+ llvm::alignOf<StringLiteral>());
+ StringLiteral *SL = new (Mem) StringLiteral(Ty);
+
+ // OPTIMIZE: could allocate this appended to the StringLiteral.
+ SL->setString(C,Str,Kind,Pascal);
+
+ SL->TokLocs[0] = Loc[0];
+ SL->NumConcatenated = NumStrs;
+
+ if (NumStrs != 1)
+ memcpy(&SL->TokLocs[1], Loc+1, sizeof(SourceLocation)*(NumStrs-1));
+ return SL;
+}
+
+StringLiteral *StringLiteral::CreateEmpty(ASTContext &C, unsigned NumStrs) {
+ void *Mem = C.Allocate(sizeof(StringLiteral)+
+ sizeof(SourceLocation)*(NumStrs-1),
+ llvm::alignOf<StringLiteral>());
+ StringLiteral *SL = new (Mem) StringLiteral(QualType());
+ SL->CharByteWidth = 0;
+ SL->Length = 0;
+ SL->NumConcatenated = NumStrs;
+ return SL;
+}
+
+void StringLiteral::setString(ASTContext &C, StringRef Str,
+ StringKind Kind, bool IsPascal) {
+ //FIXME: we assume that the string data comes from a target that uses the same
+ // code unit size and endianess for the type of string.
+ this->Kind = Kind;
+ this->IsPascal = IsPascal;
+
+ CharByteWidth = mapCharByteWidth(C.getTargetInfo(),Kind);
+ assert((Str.size()%CharByteWidth == 0)
+ && "size of data must be multiple of CharByteWidth");
+ Length = Str.size()/CharByteWidth;
+
+ switch(CharByteWidth) {
+ case 1: {
+ char *AStrData = new (C) char[Length];
+ std::memcpy(AStrData,Str.data(),Str.size());
+ StrData.asChar = AStrData;
+ break;
+ }
+ case 2: {
+ uint16_t *AStrData = new (C) uint16_t[Length];
+ std::memcpy(AStrData,Str.data(),Str.size());
+ StrData.asUInt16 = AStrData;
+ break;
+ }
+ case 4: {
+ uint32_t *AStrData = new (C) uint32_t[Length];
+ std::memcpy(AStrData,Str.data(),Str.size());
+ StrData.asUInt32 = AStrData;
+ break;
+ }
+ default:
+ assert(false && "unsupported CharByteWidth");
+ }
+}
+
+/// getLocationOfByte - Return a source location that points to the specified
+/// byte of this string literal.
+///
+/// Strings are amazingly complex. They can be formed from multiple tokens and
+/// can have escape sequences in them in addition to the usual trigraph and
+/// escaped newline business. This routine handles this complexity.
+///
+SourceLocation StringLiteral::
+getLocationOfByte(unsigned ByteNo, const SourceManager &SM,
+ const LangOptions &Features, const TargetInfo &Target) const {
+ assert(Kind == StringLiteral::Ascii && "This only works for ASCII strings");
+
+ // Loop over all of the tokens in this string until we find the one that
+ // contains the byte we're looking for.
+ unsigned TokNo = 0;
+ while (1) {
+ assert(TokNo < getNumConcatenated() && "Invalid byte number!");
+ SourceLocation StrTokLoc = getStrTokenLoc(TokNo);
+
+ // Get the spelling of the string so that we can get the data that makes up
+ // the string literal, not the identifier for the macro it is potentially
+ // expanded through.
+ SourceLocation StrTokSpellingLoc = SM.getSpellingLoc(StrTokLoc);
+
+ // Re-lex the token to get its length and original spelling.
+ std::pair<FileID, unsigned> LocInfo =SM.getDecomposedLoc(StrTokSpellingLoc);
+ bool Invalid = false;
+ StringRef Buffer = SM.getBufferData(LocInfo.first, &Invalid);
+ if (Invalid)
+ return StrTokSpellingLoc;
+
+ const char *StrData = Buffer.data()+LocInfo.second;
+
+ // Create a langops struct and enable trigraphs. This is sufficient for
+ // relexing tokens.
+ LangOptions LangOpts;
+ LangOpts.Trigraphs = true;
+
+ // Create a lexer starting at the beginning of this token.
+ Lexer TheLexer(StrTokSpellingLoc, Features, Buffer.begin(), StrData,
+ Buffer.end());
+ Token TheTok;
+ TheLexer.LexFromRawLexer(TheTok);
+
+ // Use the StringLiteralParser to compute the length of the string in bytes.
+ StringLiteralParser SLP(&TheTok, 1, SM, Features, Target);
+ unsigned TokNumBytes = SLP.GetStringLength();
+
+ // If the byte is in this token, return the location of the byte.
+ if (ByteNo < TokNumBytes ||
+ (ByteNo == TokNumBytes && TokNo == getNumConcatenated() - 1)) {
+ unsigned Offset = SLP.getOffsetOfStringByte(TheTok, ByteNo);
+
+ // Now that we know the offset of the token in the spelling, use the
+ // preprocessor to get the offset in the original source.
+ return Lexer::AdvanceToTokenCharacter(StrTokLoc, Offset, SM, Features);
+ }
+
+ // Move to the next string token.
+ ++TokNo;
+ ByteNo -= TokNumBytes;
+ }
+}
+
+
+
+/// getOpcodeStr - Turn an Opcode enum value into the punctuation char it
+/// corresponds to, e.g. "sizeof" or "[pre]++".
+const char *UnaryOperator::getOpcodeStr(Opcode Op) {
+ switch (Op) {
+ case UO_PostInc: return "++";
+ case UO_PostDec: return "--";
+ case UO_PreInc: return "++";
+ case UO_PreDec: return "--";
+ case UO_AddrOf: return "&";
+ case UO_Deref: return "*";
+ case UO_Plus: return "+";
+ case UO_Minus: return "-";
+ case UO_Not: return "~";
+ case UO_LNot: return "!";
+ case UO_Real: return "__real";
+ case UO_Imag: return "__imag";
+ case UO_Extension: return "__extension__";
+ }
+ llvm_unreachable("Unknown unary operator");
+}
+
+UnaryOperatorKind
+UnaryOperator::getOverloadedOpcode(OverloadedOperatorKind OO, bool Postfix) {
+ switch (OO) {
+ default: llvm_unreachable("No unary operator for overloaded function");
+ case OO_PlusPlus: return Postfix ? UO_PostInc : UO_PreInc;
+ case OO_MinusMinus: return Postfix ? UO_PostDec : UO_PreDec;
+ case OO_Amp: return UO_AddrOf;
+ case OO_Star: return UO_Deref;
+ case OO_Plus: return UO_Plus;
+ case OO_Minus: return UO_Minus;
+ case OO_Tilde: return UO_Not;
+ case OO_Exclaim: return UO_LNot;
+ }
+}
+
+OverloadedOperatorKind UnaryOperator::getOverloadedOperator(Opcode Opc) {
+ switch (Opc) {
+ case UO_PostInc: case UO_PreInc: return OO_PlusPlus;
+ case UO_PostDec: case UO_PreDec: return OO_MinusMinus;
+ case UO_AddrOf: return OO_Amp;
+ case UO_Deref: return OO_Star;
+ case UO_Plus: return OO_Plus;
+ case UO_Minus: return OO_Minus;
+ case UO_Not: return OO_Tilde;
+ case UO_LNot: return OO_Exclaim;
+ default: return OO_None;
+ }
+}
+
+
+//===----------------------------------------------------------------------===//
+// Postfix Operators.
+//===----------------------------------------------------------------------===//
+
+CallExpr::CallExpr(ASTContext& C, StmtClass SC, Expr *fn, unsigned NumPreArgs,
+ Expr **args, unsigned numargs, QualType t, ExprValueKind VK,
+ SourceLocation rparenloc)
+ : Expr(SC, t, VK, OK_Ordinary,
+ fn->isTypeDependent(),
+ fn->isValueDependent(),
+ fn->isInstantiationDependent(),
+ fn->containsUnexpandedParameterPack()),
+ NumArgs(numargs) {
+
+ SubExprs = new (C) Stmt*[numargs+PREARGS_START+NumPreArgs];
+ SubExprs[FN] = fn;
+ for (unsigned i = 0; i != numargs; ++i) {
+ if (args[i]->isTypeDependent())
+ ExprBits.TypeDependent = true;
+ if (args[i]->isValueDependent())
+ ExprBits.ValueDependent = true;
+ if (args[i]->isInstantiationDependent())
+ ExprBits.InstantiationDependent = true;
+ if (args[i]->containsUnexpandedParameterPack())
+ ExprBits.ContainsUnexpandedParameterPack = true;
+
+ SubExprs[i+PREARGS_START+NumPreArgs] = args[i];
+ }
+
+ CallExprBits.NumPreArgs = NumPreArgs;
+ RParenLoc = rparenloc;
+}
+
+CallExpr::CallExpr(ASTContext& C, Expr *fn, Expr **args, unsigned numargs,
+ QualType t, ExprValueKind VK, SourceLocation rparenloc)
+ : Expr(CallExprClass, t, VK, OK_Ordinary,
+ fn->isTypeDependent(),
+ fn->isValueDependent(),
+ fn->isInstantiationDependent(),
+ fn->containsUnexpandedParameterPack()),
+ NumArgs(numargs) {
+
+ SubExprs = new (C) Stmt*[numargs+PREARGS_START];
+ SubExprs[FN] = fn;
+ for (unsigned i = 0; i != numargs; ++i) {
+ if (args[i]->isTypeDependent())
+ ExprBits.TypeDependent = true;
+ if (args[i]->isValueDependent())
+ ExprBits.ValueDependent = true;
+ if (args[i]->isInstantiationDependent())
+ ExprBits.InstantiationDependent = true;
+ if (args[i]->containsUnexpandedParameterPack())
+ ExprBits.ContainsUnexpandedParameterPack = true;
+
+ SubExprs[i+PREARGS_START] = args[i];
+ }
+
+ CallExprBits.NumPreArgs = 0;
+ RParenLoc = rparenloc;
+}
+
+CallExpr::CallExpr(ASTContext &C, StmtClass SC, EmptyShell Empty)
+ : Expr(SC, Empty), SubExprs(0), NumArgs(0) {
+ // FIXME: Why do we allocate this?
+ SubExprs = new (C) Stmt*[PREARGS_START];
+ CallExprBits.NumPreArgs = 0;
+}
+
+CallExpr::CallExpr(ASTContext &C, StmtClass SC, unsigned NumPreArgs,
+ EmptyShell Empty)
+ : Expr(SC, Empty), SubExprs(0), NumArgs(0) {
+ // FIXME: Why do we allocate this?
+ SubExprs = new (C) Stmt*[PREARGS_START+NumPreArgs];
+ CallExprBits.NumPreArgs = NumPreArgs;
+}
+
+Decl *CallExpr::getCalleeDecl() {
+ Expr *CEE = getCallee()->IgnoreParenImpCasts();
+
+ while (SubstNonTypeTemplateParmExpr *NTTP
+ = dyn_cast<SubstNonTypeTemplateParmExpr>(CEE)) {
+ CEE = NTTP->getReplacement()->IgnoreParenCasts();
+ }
+
+ // If we're calling a dereference, look at the pointer instead.
+ if (BinaryOperator *BO = dyn_cast<BinaryOperator>(CEE)) {
+ if (BO->isPtrMemOp())
+ CEE = BO->getRHS()->IgnoreParenCasts();
+ } else if (UnaryOperator *UO = dyn_cast<UnaryOperator>(CEE)) {
+ if (UO->getOpcode() == UO_Deref)
+ CEE = UO->getSubExpr()->IgnoreParenCasts();
+ }
+ if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(CEE))
+ return DRE->getDecl();
+ if (MemberExpr *ME = dyn_cast<MemberExpr>(CEE))
+ return ME->getMemberDecl();
+
+ return 0;
+}
+
+FunctionDecl *CallExpr::getDirectCallee() {
+ return dyn_cast_or_null<FunctionDecl>(getCalleeDecl());
+}
+
+/// setNumArgs - This changes the number of arguments present in this call.
+/// Any orphaned expressions are deleted by this, and any new operands are set
+/// to null.
+void CallExpr::setNumArgs(ASTContext& C, unsigned NumArgs) {
+ // No change, just return.
+ if (NumArgs == getNumArgs()) return;
+
+ // If shrinking # arguments, just delete the extras and forgot them.
+ if (NumArgs < getNumArgs()) {
+ this->NumArgs = NumArgs;
+ return;
+ }
+
+ // Otherwise, we are growing the # arguments. New an bigger argument array.
+ unsigned NumPreArgs = getNumPreArgs();
+ Stmt **NewSubExprs = new (C) Stmt*[NumArgs+PREARGS_START+NumPreArgs];
+ // Copy over args.
+ for (unsigned i = 0; i != getNumArgs()+PREARGS_START+NumPreArgs; ++i)
+ NewSubExprs[i] = SubExprs[i];
+ // Null out new args.
+ for (unsigned i = getNumArgs()+PREARGS_START+NumPreArgs;
+ i != NumArgs+PREARGS_START+NumPreArgs; ++i)
+ NewSubExprs[i] = 0;
+
+ if (SubExprs) C.Deallocate(SubExprs);
+ SubExprs = NewSubExprs;
+ this->NumArgs = NumArgs;
+}
+
+/// isBuiltinCall - If this is a call to a builtin, return the builtin ID. If
+/// not, return 0.
+unsigned CallExpr::isBuiltinCall() const {
+ // All simple function calls (e.g. func()) are implicitly cast to pointer to
+ // function. As a result, we try and obtain the DeclRefExpr from the
+ // ImplicitCastExpr.
+ const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(getCallee());
+ if (!ICE) // FIXME: deal with more complex calls (e.g. (func)(), (*func)()).
+ return 0;
+
+ const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(ICE->getSubExpr());
+ if (!DRE)
+ return 0;
+
+ const FunctionDecl *FDecl = dyn_cast<FunctionDecl>(DRE->getDecl());
+ if (!FDecl)
+ return 0;
+
+ if (!FDecl->getIdentifier())
+ return 0;
+
+ return FDecl->getBuiltinID();
+}
+
+QualType CallExpr::getCallReturnType() const {
+ QualType CalleeType = getCallee()->getType();
+ if (const PointerType *FnTypePtr = CalleeType->getAs<PointerType>())
+ CalleeType = FnTypePtr->getPointeeType();
+ else if (const BlockPointerType *BPT = CalleeType->getAs<BlockPointerType>())
+ CalleeType = BPT->getPointeeType();
+ else if (CalleeType->isSpecificPlaceholderType(BuiltinType::BoundMember))
+ // This should never be overloaded and so should never return null.
+ CalleeType = Expr::findBoundMemberType(getCallee());
+
+ const FunctionType *FnType = CalleeType->castAs<FunctionType>();
+ return FnType->getResultType();
+}
+
+SourceRange CallExpr::getSourceRange() const {
+ if (isa<CXXOperatorCallExpr>(this))
+ return cast<CXXOperatorCallExpr>(this)->getSourceRange();
+
+ SourceLocation begin = getCallee()->getLocStart();
+ if (begin.isInvalid() && getNumArgs() > 0)
+ begin = getArg(0)->getLocStart();
+ SourceLocation end = getRParenLoc();
+ if (end.isInvalid() && getNumArgs() > 0)
+ end = getArg(getNumArgs() - 1)->getLocEnd();
+ return SourceRange(begin, end);
+}
+SourceLocation CallExpr::getLocStart() const {
+ if (isa<CXXOperatorCallExpr>(this))
+ return cast<CXXOperatorCallExpr>(this)->getSourceRange().getBegin();
+
+ SourceLocation begin = getCallee()->getLocStart();
+ if (begin.isInvalid() && getNumArgs() > 0)
+ begin = getArg(0)->getLocStart();
+ return begin;
+}
+SourceLocation CallExpr::getLocEnd() const {
+ if (isa<CXXOperatorCallExpr>(this))
+ return cast<CXXOperatorCallExpr>(this)->getSourceRange().getEnd();
+
+ SourceLocation end = getRParenLoc();
+ if (end.isInvalid() && getNumArgs() > 0)
+ end = getArg(getNumArgs() - 1)->getLocEnd();
+ return end;
+}
+
+OffsetOfExpr *OffsetOfExpr::Create(ASTContext &C, QualType type,
+ SourceLocation OperatorLoc,
+ TypeSourceInfo *tsi,
+ OffsetOfNode* compsPtr, unsigned numComps,
+ Expr** exprsPtr, unsigned numExprs,
+ SourceLocation RParenLoc) {
+ void *Mem = C.Allocate(sizeof(OffsetOfExpr) +
+ sizeof(OffsetOfNode) * numComps +
+ sizeof(Expr*) * numExprs);
+
+ return new (Mem) OffsetOfExpr(C, type, OperatorLoc, tsi, compsPtr, numComps,
+ exprsPtr, numExprs, RParenLoc);
+}
+
+OffsetOfExpr *OffsetOfExpr::CreateEmpty(ASTContext &C,
+ unsigned numComps, unsigned numExprs) {
+ void *Mem = C.Allocate(sizeof(OffsetOfExpr) +
+ sizeof(OffsetOfNode) * numComps +
+ sizeof(Expr*) * numExprs);
+ return new (Mem) OffsetOfExpr(numComps, numExprs);
+}
+
+OffsetOfExpr::OffsetOfExpr(ASTContext &C, QualType type,
+ SourceLocation OperatorLoc, TypeSourceInfo *tsi,
+ OffsetOfNode* compsPtr, unsigned numComps,
+ Expr** exprsPtr, unsigned numExprs,
+ SourceLocation RParenLoc)
+ : Expr(OffsetOfExprClass, type, VK_RValue, OK_Ordinary,
+ /*TypeDependent=*/false,
+ /*ValueDependent=*/tsi->getType()->isDependentType(),
+ tsi->getType()->isInstantiationDependentType(),
+ tsi->getType()->containsUnexpandedParameterPack()),
+ OperatorLoc(OperatorLoc), RParenLoc(RParenLoc), TSInfo(tsi),
+ NumComps(numComps), NumExprs(numExprs)
+{
+ for(unsigned i = 0; i < numComps; ++i) {
+ setComponent(i, compsPtr[i]);
+ }
+
+ for(unsigned i = 0; i < numExprs; ++i) {
+ if (exprsPtr[i]->isTypeDependent() || exprsPtr[i]->isValueDependent())
+ ExprBits.ValueDependent = true;
+ if (exprsPtr[i]->containsUnexpandedParameterPack())
+ ExprBits.ContainsUnexpandedParameterPack = true;
+
+ setIndexExpr(i, exprsPtr[i]);
+ }
+}
+
+IdentifierInfo *OffsetOfExpr::OffsetOfNode::getFieldName() const {
+ assert(getKind() == Field || getKind() == Identifier);
+ if (getKind() == Field)
+ return getField()->getIdentifier();
+
+ return reinterpret_cast<IdentifierInfo *> (Data & ~(uintptr_t)Mask);
+}
+
+MemberExpr *MemberExpr::Create(ASTContext &C, Expr *base, bool isarrow,
+ NestedNameSpecifierLoc QualifierLoc,
+ SourceLocation TemplateKWLoc,
+ ValueDecl *memberdecl,
+ DeclAccessPair founddecl,
+ DeclarationNameInfo nameinfo,
+ const TemplateArgumentListInfo *targs,
+ QualType ty,
+ ExprValueKind vk,
+ ExprObjectKind ok) {
+ std::size_t Size = sizeof(MemberExpr);
+
+ bool hasQualOrFound = (QualifierLoc ||
+ founddecl.getDecl() != memberdecl ||
+ founddecl.getAccess() != memberdecl->getAccess());
+ if (hasQualOrFound)
+ Size += sizeof(MemberNameQualifier);
+
+ if (targs)
+ Size += ASTTemplateKWAndArgsInfo::sizeFor(targs->size());
+ else if (TemplateKWLoc.isValid())
+ Size += ASTTemplateKWAndArgsInfo::sizeFor(0);
+
+ void *Mem = C.Allocate(Size, llvm::alignOf<MemberExpr>());
+ MemberExpr *E = new (Mem) MemberExpr(base, isarrow, memberdecl, nameinfo,
+ ty, vk, ok);
+
+ if (hasQualOrFound) {
+ // FIXME: Wrong. We should be looking at the member declaration we found.
+ if (QualifierLoc && QualifierLoc.getNestedNameSpecifier()->isDependent()) {
+ E->setValueDependent(true);
+ E->setTypeDependent(true);
+ E->setInstantiationDependent(true);
+ }
+ else if (QualifierLoc &&
+ QualifierLoc.getNestedNameSpecifier()->isInstantiationDependent())
+ E->setInstantiationDependent(true);
+
+ E->HasQualifierOrFoundDecl = true;
+
+ MemberNameQualifier *NQ = E->getMemberQualifier();
+ NQ->QualifierLoc = QualifierLoc;
+ NQ->FoundDecl = founddecl;
+ }
+
+ E->HasTemplateKWAndArgsInfo = (targs || TemplateKWLoc.isValid());
+
+ if (targs) {
+ bool Dependent = false;
+ bool InstantiationDependent = false;
+ bool ContainsUnexpandedParameterPack = false;
+ E->getTemplateKWAndArgsInfo()->initializeFrom(TemplateKWLoc, *targs,
+ Dependent,
+ InstantiationDependent,
+ ContainsUnexpandedParameterPack);
+ if (InstantiationDependent)
+ E->setInstantiationDependent(true);
+ } else if (TemplateKWLoc.isValid()) {
+ E->getTemplateKWAndArgsInfo()->initializeFrom(TemplateKWLoc);
+ }
+
+ return E;
+}
+
+SourceRange MemberExpr::getSourceRange() const {
+ return SourceRange(getLocStart(), getLocEnd());
+}
+SourceLocation MemberExpr::getLocStart() const {
+ if (isImplicitAccess()) {
+ if (hasQualifier())
+ return getQualifierLoc().getBeginLoc();
+ return MemberLoc;
+ }
+
+ // FIXME: We don't want this to happen. Rather, we should be able to
+ // detect all kinds of implicit accesses more cleanly.
+ SourceLocation BaseStartLoc = getBase()->getLocStart();
+ if (BaseStartLoc.isValid())
+ return BaseStartLoc;
+ return MemberLoc;
+}
+SourceLocation MemberExpr::getLocEnd() const {
+ if (hasExplicitTemplateArgs())
+ return getRAngleLoc();
+ return getMemberNameInfo().getEndLoc();
+}
+
+void CastExpr::CheckCastConsistency() const {
+ switch (getCastKind()) {
+ case CK_DerivedToBase:
+ case CK_UncheckedDerivedToBase:
+ case CK_DerivedToBaseMemberPointer:
+ case CK_BaseToDerived:
+ case CK_BaseToDerivedMemberPointer:
+ assert(!path_empty() && "Cast kind should have a base path!");
+ break;
+
+ case CK_CPointerToObjCPointerCast:
+ assert(getType()->isObjCObjectPointerType());
+ assert(getSubExpr()->getType()->isPointerType());
+ goto CheckNoBasePath;
+
+ case CK_BlockPointerToObjCPointerCast:
+ assert(getType()->isObjCObjectPointerType());
+ assert(getSubExpr()->getType()->isBlockPointerType());
+ goto CheckNoBasePath;
+
+ case CK_ReinterpretMemberPointer:
+ assert(getType()->isMemberPointerType());
+ assert(getSubExpr()->getType()->isMemberPointerType());
+ goto CheckNoBasePath;
+
+ case CK_BitCast:
+ // Arbitrary casts to C pointer types count as bitcasts.
+ // Otherwise, we should only have block and ObjC pointer casts
+ // here if they stay within the type kind.
+ if (!getType()->isPointerType()) {
+ assert(getType()->isObjCObjectPointerType() ==
+ getSubExpr()->getType()->isObjCObjectPointerType());
+ assert(getType()->isBlockPointerType() ==
+ getSubExpr()->getType()->isBlockPointerType());
+ }
+ goto CheckNoBasePath;
+
+ case CK_AnyPointerToBlockPointerCast:
+ assert(getType()->isBlockPointerType());
+ assert(getSubExpr()->getType()->isAnyPointerType() &&
+ !getSubExpr()->getType()->isBlockPointerType());
+ goto CheckNoBasePath;
+
+ case CK_CopyAndAutoreleaseBlockObject:
+ assert(getType()->isBlockPointerType());
+ assert(getSubExpr()->getType()->isBlockPointerType());
+ goto CheckNoBasePath;
+
+ // These should not have an inheritance path.
+ case CK_Dynamic:
+ case CK_ToUnion:
+ case CK_ArrayToPointerDecay:
+ case CK_FunctionToPointerDecay:
+ case CK_NullToMemberPointer:
+ case CK_NullToPointer:
+ case CK_ConstructorConversion:
+ case CK_IntegralToPointer:
+ case CK_PointerToIntegral:
+ case CK_ToVoid:
+ case CK_VectorSplat:
+ case CK_IntegralCast:
+ case CK_IntegralToFloating:
+ case CK_FloatingToIntegral:
+ case CK_FloatingCast:
+ case CK_ObjCObjectLValueCast:
+ case CK_FloatingRealToComplex:
+ case CK_FloatingComplexToReal:
+ case CK_FloatingComplexCast:
+ case CK_FloatingComplexToIntegralComplex:
+ case CK_IntegralRealToComplex:
+ case CK_IntegralComplexToReal:
+ case CK_IntegralComplexCast:
+ case CK_IntegralComplexToFloatingComplex:
+ case CK_ARCProduceObject:
+ case CK_ARCConsumeObject:
+ case CK_ARCReclaimReturnedObject:
+ case CK_ARCExtendBlockObject:
+ assert(!getType()->isBooleanType() && "unheralded conversion to bool");
+ goto CheckNoBasePath;
+
+ case CK_Dependent:
+ case CK_LValueToRValue:
+ case CK_NoOp:
+ case CK_AtomicToNonAtomic:
+ case CK_NonAtomicToAtomic:
+ case CK_PointerToBoolean:
+ case CK_IntegralToBoolean:
+ case CK_FloatingToBoolean:
+ case CK_MemberPointerToBoolean:
+ case CK_FloatingComplexToBoolean:
+ case CK_IntegralComplexToBoolean:
+ case CK_LValueBitCast: // -> bool&
+ case CK_UserDefinedConversion: // operator bool()
+ CheckNoBasePath:
+ assert(path_empty() && "Cast kind should not have a base path!");
+ break;
+ }
+}
+
+const char *CastExpr::getCastKindName() const {
+ switch (getCastKind()) {
+ case CK_Dependent:
+ return "Dependent";
+ case CK_BitCast:
+ return "BitCast";
+ case CK_LValueBitCast:
+ return "LValueBitCast";
+ case CK_LValueToRValue:
+ return "LValueToRValue";
+ case CK_NoOp:
+ return "NoOp";
+ case CK_BaseToDerived:
+ return "BaseToDerived";
+ case CK_DerivedToBase:
+ return "DerivedToBase";
+ case CK_UncheckedDerivedToBase:
+ return "UncheckedDerivedToBase";
+ case CK_Dynamic:
+ return "Dynamic";
+ case CK_ToUnion:
+ return "ToUnion";
+ case CK_ArrayToPointerDecay:
+ return "ArrayToPointerDecay";
+ case CK_FunctionToPointerDecay:
+ return "FunctionToPointerDecay";
+ case CK_NullToMemberPointer:
+ return "NullToMemberPointer";
+ case CK_NullToPointer:
+ return "NullToPointer";
+ case CK_BaseToDerivedMemberPointer:
+ return "BaseToDerivedMemberPointer";
+ case CK_DerivedToBaseMemberPointer:
+ return "DerivedToBaseMemberPointer";
+ case CK_ReinterpretMemberPointer:
+ return "ReinterpretMemberPointer";
+ case CK_UserDefinedConversion:
+ return "UserDefinedConversion";
+ case CK_ConstructorConversion:
+ return "ConstructorConversion";
+ case CK_IntegralToPointer:
+ return "IntegralToPointer";
+ case CK_PointerToIntegral:
+ return "PointerToIntegral";
+ case CK_PointerToBoolean:
+ return "PointerToBoolean";
+ case CK_ToVoid:
+ return "ToVoid";
+ case CK_VectorSplat:
+ return "VectorSplat";
+ case CK_IntegralCast:
+ return "IntegralCast";
+ case CK_IntegralToBoolean:
+ return "IntegralToBoolean";
+ case CK_IntegralToFloating:
+ return "IntegralToFloating";
+ case CK_FloatingToIntegral:
+ return "FloatingToIntegral";
+ case CK_FloatingCast:
+ return "FloatingCast";
+ case CK_FloatingToBoolean:
+ return "FloatingToBoolean";
+ case CK_MemberPointerToBoolean:
+ return "MemberPointerToBoolean";
+ case CK_CPointerToObjCPointerCast:
+ return "CPointerToObjCPointerCast";
+ case CK_BlockPointerToObjCPointerCast:
+ return "BlockPointerToObjCPointerCast";
+ case CK_AnyPointerToBlockPointerCast:
+ return "AnyPointerToBlockPointerCast";
+ case CK_ObjCObjectLValueCast:
+ return "ObjCObjectLValueCast";
+ case CK_FloatingRealToComplex:
+ return "FloatingRealToComplex";
+ case CK_FloatingComplexToReal:
+ return "FloatingComplexToReal";
+ case CK_FloatingComplexToBoolean:
+ return "FloatingComplexToBoolean";
+ case CK_FloatingComplexCast:
+ return "FloatingComplexCast";
+ case CK_FloatingComplexToIntegralComplex:
+ return "FloatingComplexToIntegralComplex";
+ case CK_IntegralRealToComplex:
+ return "IntegralRealToComplex";
+ case CK_IntegralComplexToReal:
+ return "IntegralComplexToReal";
+ case CK_IntegralComplexToBoolean:
+ return "IntegralComplexToBoolean";
+ case CK_IntegralComplexCast:
+ return "IntegralComplexCast";
+ case CK_IntegralComplexToFloatingComplex:
+ return "IntegralComplexToFloatingComplex";
+ case CK_ARCConsumeObject:
+ return "ARCConsumeObject";
+ case CK_ARCProduceObject:
+ return "ARCProduceObject";
+ case CK_ARCReclaimReturnedObject:
+ return "ARCReclaimReturnedObject";
+ case CK_ARCExtendBlockObject:
+ return "ARCCExtendBlockObject";
+ case CK_AtomicToNonAtomic:
+ return "AtomicToNonAtomic";
+ case CK_NonAtomicToAtomic:
+ return "NonAtomicToAtomic";
+ case CK_CopyAndAutoreleaseBlockObject:
+ return "CopyAndAutoreleaseBlockObject";
+ }
+
+ llvm_unreachable("Unhandled cast kind!");
+}
+
+Expr *CastExpr::getSubExprAsWritten() {
+ Expr *SubExpr = 0;
+ CastExpr *E = this;
+ do {
+ SubExpr = E->getSubExpr();
+
+ // Skip through reference binding to temporary.
+ if (MaterializeTemporaryExpr *Materialize
+ = dyn_cast<MaterializeTemporaryExpr>(SubExpr))
+ SubExpr = Materialize->GetTemporaryExpr();
+
+ // Skip any temporary bindings; they're implicit.
+ if (CXXBindTemporaryExpr *Binder = dyn_cast<CXXBindTemporaryExpr>(SubExpr))
+ SubExpr = Binder->getSubExpr();
+
+ // Conversions by constructor and conversion functions have a
+ // subexpression describing the call; strip it off.
+ if (E->getCastKind() == CK_ConstructorConversion)
+ SubExpr = cast<CXXConstructExpr>(SubExpr)->getArg(0);
+ else if (E->getCastKind() == CK_UserDefinedConversion)
+ SubExpr = cast<CXXMemberCallExpr>(SubExpr)->getImplicitObjectArgument();
+
+ // If the subexpression we're left with is an implicit cast, look
+ // through that, too.
+ } while ((E = dyn_cast<ImplicitCastExpr>(SubExpr)));
+
+ return SubExpr;
+}
+
+CXXBaseSpecifier **CastExpr::path_buffer() {
+ switch (getStmtClass()) {
+#define ABSTRACT_STMT(x)
+#define CASTEXPR(Type, Base) \
+ case Stmt::Type##Class: \
+ return reinterpret_cast<CXXBaseSpecifier**>(static_cast<Type*>(this)+1);
+#define STMT(Type, Base)
+#include "clang/AST/StmtNodes.inc"
+ default:
+ llvm_unreachable("non-cast expressions not possible here");
+ }
+}
+
+void CastExpr::setCastPath(const CXXCastPath &Path) {
+ assert(Path.size() == path_size());
+ memcpy(path_buffer(), Path.data(), Path.size() * sizeof(CXXBaseSpecifier*));
+}
+
+ImplicitCastExpr *ImplicitCastExpr::Create(ASTContext &C, QualType T,
+ CastKind Kind, Expr *Operand,
+ const CXXCastPath *BasePath,
+ ExprValueKind VK) {
+ unsigned PathSize = (BasePath ? BasePath->size() : 0);
+ void *Buffer =
+ C.Allocate(sizeof(ImplicitCastExpr) + PathSize * sizeof(CXXBaseSpecifier*));
+ ImplicitCastExpr *E =
+ new (Buffer) ImplicitCastExpr(T, Kind, Operand, PathSize, VK);
+ if (PathSize) E->setCastPath(*BasePath);
+ return E;
+}
+
+ImplicitCastExpr *ImplicitCastExpr::CreateEmpty(ASTContext &C,
+ unsigned PathSize) {
+ void *Buffer =
+ C.Allocate(sizeof(ImplicitCastExpr) + PathSize * sizeof(CXXBaseSpecifier*));
+ return new (Buffer) ImplicitCastExpr(EmptyShell(), PathSize);
+}
+
+
+CStyleCastExpr *CStyleCastExpr::Create(ASTContext &C, QualType T,
+ ExprValueKind VK, CastKind K, Expr *Op,
+ const CXXCastPath *BasePath,
+ TypeSourceInfo *WrittenTy,
+ SourceLocation L, SourceLocation R) {
+ unsigned PathSize = (BasePath ? BasePath->size() : 0);
+ void *Buffer =
+ C.Allocate(sizeof(CStyleCastExpr) + PathSize * sizeof(CXXBaseSpecifier*));
+ CStyleCastExpr *E =
+ new (Buffer) CStyleCastExpr(T, VK, K, Op, PathSize, WrittenTy, L, R);
+ if (PathSize) E->setCastPath(*BasePath);
+ return E;
+}
+
+CStyleCastExpr *CStyleCastExpr::CreateEmpty(ASTContext &C, unsigned PathSize) {
+ void *Buffer =
+ C.Allocate(sizeof(CStyleCastExpr) + PathSize * sizeof(CXXBaseSpecifier*));
+ return new (Buffer) CStyleCastExpr(EmptyShell(), PathSize);
+}
+
+/// getOpcodeStr - Turn an Opcode enum value into the punctuation char it
+/// corresponds to, e.g. "<<=".
+const char *BinaryOperator::getOpcodeStr(Opcode Op) {
+ switch (Op) {
+ case BO_PtrMemD: return ".*";
+ case BO_PtrMemI: return "->*";
+ case BO_Mul: return "*";
+ case BO_Div: return "/";
+ case BO_Rem: return "%";
+ case BO_Add: return "+";
+ case BO_Sub: return "-";
+ case BO_Shl: return "<<";
+ case BO_Shr: return ">>";
+ case BO_LT: return "<";
+ case BO_GT: return ">";
+ case BO_LE: return "<=";
+ case BO_GE: return ">=";
+ case BO_EQ: return "==";
+ case BO_NE: return "!=";
+ case BO_And: return "&";
+ case BO_Xor: return "^";
+ case BO_Or: return "|";
+ case BO_LAnd: return "&&";
+ case BO_LOr: return "||";
+ case BO_Assign: return "=";
+ case BO_MulAssign: return "*=";
+ case BO_DivAssign: return "/=";
+ case BO_RemAssign: return "%=";
+ case BO_AddAssign: return "+=";
+ case BO_SubAssign: return "-=";
+ case BO_ShlAssign: return "<<=";
+ case BO_ShrAssign: return ">>=";
+ case BO_AndAssign: return "&=";
+ case BO_XorAssign: return "^=";
+ case BO_OrAssign: return "|=";
+ case BO_Comma: return ",";
+ }
+
+ llvm_unreachable("Invalid OpCode!");
+}
+
+BinaryOperatorKind
+BinaryOperator::getOverloadedOpcode(OverloadedOperatorKind OO) {
+ switch (OO) {
+ default: llvm_unreachable("Not an overloadable binary operator");
+ case OO_Plus: return BO_Add;
+ case OO_Minus: return BO_Sub;
+ case OO_Star: return BO_Mul;
+ case OO_Slash: return BO_Div;
+ case OO_Percent: return BO_Rem;
+ case OO_Caret: return BO_Xor;
+ case OO_Amp: return BO_And;
+ case OO_Pipe: return BO_Or;
+ case OO_Equal: return BO_Assign;
+ case OO_Less: return BO_LT;
+ case OO_Greater: return BO_GT;
+ case OO_PlusEqual: return BO_AddAssign;
+ case OO_MinusEqual: return BO_SubAssign;
+ case OO_StarEqual: return BO_MulAssign;
+ case OO_SlashEqual: return BO_DivAssign;
+ case OO_PercentEqual: return BO_RemAssign;
+ case OO_CaretEqual: return BO_XorAssign;
+ case OO_AmpEqual: return BO_AndAssign;
+ case OO_PipeEqual: return BO_OrAssign;
+ case OO_LessLess: return BO_Shl;
+ case OO_GreaterGreater: return BO_Shr;
+ case OO_LessLessEqual: return BO_ShlAssign;
+ case OO_GreaterGreaterEqual: return BO_ShrAssign;
+ case OO_EqualEqual: return BO_EQ;
+ case OO_ExclaimEqual: return BO_NE;
+ case OO_LessEqual: return BO_LE;
+ case OO_GreaterEqual: return BO_GE;
+ case OO_AmpAmp: return BO_LAnd;
+ case OO_PipePipe: return BO_LOr;
+ case OO_Comma: return BO_Comma;
+ case OO_ArrowStar: return BO_PtrMemI;
+ }
+}
+
+OverloadedOperatorKind BinaryOperator::getOverloadedOperator(Opcode Opc) {
+ static const OverloadedOperatorKind OverOps[] = {
+ /* .* Cannot be overloaded */OO_None, OO_ArrowStar,
+ OO_Star, OO_Slash, OO_Percent,
+ OO_Plus, OO_Minus,
+ OO_LessLess, OO_GreaterGreater,
+ OO_Less, OO_Greater, OO_LessEqual, OO_GreaterEqual,
+ OO_EqualEqual, OO_ExclaimEqual,
+ OO_Amp,
+ OO_Caret,
+ OO_Pipe,
+ OO_AmpAmp,
+ OO_PipePipe,
+ OO_Equal, OO_StarEqual,
+ OO_SlashEqual, OO_PercentEqual,
+ OO_PlusEqual, OO_MinusEqual,
+ OO_LessLessEqual, OO_GreaterGreaterEqual,
+ OO_AmpEqual, OO_CaretEqual,
+ OO_PipeEqual,
+ OO_Comma
+ };
+ return OverOps[Opc];
+}
+
+InitListExpr::InitListExpr(ASTContext &C, SourceLocation lbraceloc,
+ Expr **initExprs, unsigned numInits,
+ SourceLocation rbraceloc)
+ : Expr(InitListExprClass, QualType(), VK_RValue, OK_Ordinary, false, false,
+ false, false),
+ InitExprs(C, numInits),
+ LBraceLoc(lbraceloc), RBraceLoc(rbraceloc), SyntacticForm(0)
+{
+ sawArrayRangeDesignator(false);
+ setInitializesStdInitializerList(false);
+ for (unsigned I = 0; I != numInits; ++I) {
+ if (initExprs[I]->isTypeDependent())
+ ExprBits.TypeDependent = true;
+ if (initExprs[I]->isValueDependent())
+ ExprBits.ValueDependent = true;
+ if (initExprs[I]->isInstantiationDependent())
+ ExprBits.InstantiationDependent = true;
+ if (initExprs[I]->containsUnexpandedParameterPack())
+ ExprBits.ContainsUnexpandedParameterPack = true;
+ }
+
+ InitExprs.insert(C, InitExprs.end(), initExprs, initExprs+numInits);
+}
+
+void InitListExpr::reserveInits(ASTContext &C, unsigned NumInits) {
+ if (NumInits > InitExprs.size())
+ InitExprs.reserve(C, NumInits);
+}
+
+void InitListExpr::resizeInits(ASTContext &C, unsigned NumInits) {
+ InitExprs.resize(C, NumInits, 0);
+}
+
+Expr *InitListExpr::updateInit(ASTContext &C, unsigned Init, Expr *expr) {
+ if (Init >= InitExprs.size()) {
+ InitExprs.insert(C, InitExprs.end(), Init - InitExprs.size() + 1, 0);
+ InitExprs.back() = expr;
+ return 0;
+ }
+
+ Expr *Result = cast_or_null<Expr>(InitExprs[Init]);
+ InitExprs[Init] = expr;
+ return Result;
+}
+
+void InitListExpr::setArrayFiller(Expr *filler) {
+ assert(!hasArrayFiller() && "Filler already set!");
+ ArrayFillerOrUnionFieldInit = filler;
+ // Fill out any "holes" in the array due to designated initializers.
+ Expr **inits = getInits();
+ for (unsigned i = 0, e = getNumInits(); i != e; ++i)
+ if (inits[i] == 0)
+ inits[i] = filler;
+}
+
+bool InitListExpr::isStringLiteralInit() const {
+ if (getNumInits() != 1)
+ return false;
+ const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(getType());
+ if (!CAT || !CAT->getElementType()->isIntegerType())
+ return false;
+ const Expr *Init = getInit(0)->IgnoreParenImpCasts();
+ return isa<StringLiteral>(Init) || isa<ObjCEncodeExpr>(Init);
+}
+
+SourceRange InitListExpr::getSourceRange() const {
+ if (SyntacticForm)
+ return SyntacticForm->getSourceRange();
+ SourceLocation Beg = LBraceLoc, End = RBraceLoc;
+ if (Beg.isInvalid()) {
+ // Find the first non-null initializer.
+ for (InitExprsTy::const_iterator I = InitExprs.begin(),
+ E = InitExprs.end();
+ I != E; ++I) {
+ if (Stmt *S = *I) {
+ Beg = S->getLocStart();
+ break;
+ }
+ }
+ }
+ if (End.isInvalid()) {
+ // Find the first non-null initializer from the end.
+ for (InitExprsTy::const_reverse_iterator I = InitExprs.rbegin(),
+ E = InitExprs.rend();
+ I != E; ++I) {
+ if (Stmt *S = *I) {
+ End = S->getSourceRange().getEnd();
+ break;
+ }
+ }
+ }
+ return SourceRange(Beg, End);
+}
+
+/// getFunctionType - Return the underlying function type for this block.
+///
+const FunctionProtoType *BlockExpr::getFunctionType() const {
+ // The block pointer is never sugared, but the function type might be.
+ return cast<BlockPointerType>(getType())
+ ->getPointeeType()->castAs<FunctionProtoType>();
+}
+
+SourceLocation BlockExpr::getCaretLocation() const {
+ return TheBlock->getCaretLocation();
+}
+const Stmt *BlockExpr::getBody() const {
+ return TheBlock->getBody();
+}
+Stmt *BlockExpr::getBody() {
+ return TheBlock->getBody();
+}
+
+
+//===----------------------------------------------------------------------===//
+// Generic Expression Routines
+//===----------------------------------------------------------------------===//
+
+/// isUnusedResultAWarning - Return true if this immediate expression should
+/// be warned about if the result is unused. If so, fill in Loc and Ranges
+/// with location to warn on and the source range[s] to report with the
+/// warning.
+bool Expr::isUnusedResultAWarning(SourceLocation &Loc, SourceRange &R1,
+ SourceRange &R2, ASTContext &Ctx) const {
+ // Don't warn if the expr is type dependent. The type could end up
+ // instantiating to void.
+ if (isTypeDependent())
+ return false;
+
+ switch (getStmtClass()) {
+ default:
+ if (getType()->isVoidType())
+ return false;
+ Loc = getExprLoc();
+ R1 = getSourceRange();
+ return true;
+ case ParenExprClass:
+ return cast<ParenExpr>(this)->getSubExpr()->
+ isUnusedResultAWarning(Loc, R1, R2, Ctx);
+ case GenericSelectionExprClass:
+ return cast<GenericSelectionExpr>(this)->getResultExpr()->
+ isUnusedResultAWarning(Loc, R1, R2, Ctx);
+ case UnaryOperatorClass: {
+ const UnaryOperator *UO = cast<UnaryOperator>(this);
+
+ switch (UO->getOpcode()) {
+ default: break;
+ case UO_PostInc:
+ case UO_PostDec:
+ case UO_PreInc:
+ case UO_PreDec: // ++/--
+ return false; // Not a warning.
+ case UO_Deref:
+ // Dereferencing a volatile pointer is a side-effect.
+ if (Ctx.getCanonicalType(getType()).isVolatileQualified())
+ return false;
+ break;
+ case UO_Real:
+ case UO_Imag:
+ // accessing a piece of a volatile complex is a side-effect.
+ if (Ctx.getCanonicalType(UO->getSubExpr()->getType())
+ .isVolatileQualified())
+ return false;
+ break;
+ case UO_Extension:
+ return UO->getSubExpr()->isUnusedResultAWarning(Loc, R1, R2, Ctx);
+ }
+ Loc = UO->getOperatorLoc();
+ R1 = UO->getSubExpr()->getSourceRange();
+ return true;
+ }
+ case BinaryOperatorClass: {
+ const BinaryOperator *BO = cast<BinaryOperator>(this);
+ switch (BO->getOpcode()) {
+ default:
+ break;
+ // Consider the RHS of comma for side effects. LHS was checked by
+ // Sema::CheckCommaOperands.
+ case BO_Comma:
+ // ((foo = <blah>), 0) is an idiom for hiding the result (and
+ // lvalue-ness) of an assignment written in a macro.
+ if (IntegerLiteral *IE =
+ dyn_cast<IntegerLiteral>(BO->getRHS()->IgnoreParens()))
+ if (IE->getValue() == 0)
+ return false;
+ return BO->getRHS()->isUnusedResultAWarning(Loc, R1, R2, Ctx);
+ // Consider '||', '&&' to have side effects if the LHS or RHS does.
+ case BO_LAnd:
+ case BO_LOr:
+ if (!BO->getLHS()->isUnusedResultAWarning(Loc, R1, R2, Ctx) ||
+ !BO->getRHS()->isUnusedResultAWarning(Loc, R1, R2, Ctx))
+ return false;
+ break;
+ }
+ if (BO->isAssignmentOp())
+ return false;
+ Loc = BO->getOperatorLoc();
+ R1 = BO->getLHS()->getSourceRange();
+ R2 = BO->getRHS()->getSourceRange();
+ return true;
+ }
+ case CompoundAssignOperatorClass:
+ case VAArgExprClass:
+ case AtomicExprClass:
+ return false;
+
+ case ConditionalOperatorClass: {
+ // If only one of the LHS or RHS is a warning, the operator might
+ // be being used for control flow. Only warn if both the LHS and
+ // RHS are warnings.
+ const ConditionalOperator *Exp = cast<ConditionalOperator>(this);
+ if (!Exp->getRHS()->isUnusedResultAWarning(Loc, R1, R2, Ctx))
+ return false;
+ if (!Exp->getLHS())
+ return true;
+ return Exp->getLHS()->isUnusedResultAWarning(Loc, R1, R2, Ctx);
+ }
+
+ case MemberExprClass:
+ // If the base pointer or element is to a volatile pointer/field, accessing
+ // it is a side effect.
+ if (Ctx.getCanonicalType(getType()).isVolatileQualified())
+ return false;
+ Loc = cast<MemberExpr>(this)->getMemberLoc();
+ R1 = SourceRange(Loc, Loc);
+ R2 = cast<MemberExpr>(this)->getBase()->getSourceRange();
+ return true;
+
+ case ArraySubscriptExprClass:
+ // If the base pointer or element is to a volatile pointer/field, accessing
+ // it is a side effect.
+ if (Ctx.getCanonicalType(getType()).isVolatileQualified())
+ return false;
+ Loc = cast<ArraySubscriptExpr>(this)->getRBracketLoc();
+ R1 = cast<ArraySubscriptExpr>(this)->getLHS()->getSourceRange();
+ R2 = cast<ArraySubscriptExpr>(this)->getRHS()->getSourceRange();
+ return true;
+
+ case CXXOperatorCallExprClass: {
+ // We warn about operator== and operator!= even when user-defined operator
+ // overloads as there is no reasonable way to define these such that they
+ // have non-trivial, desirable side-effects. See the -Wunused-comparison
+ // warning: these operators are commonly typo'ed, and so warning on them
+ // provides additional value as well. If this list is updated,
+ // DiagnoseUnusedComparison should be as well.
+ const CXXOperatorCallExpr *Op = cast<CXXOperatorCallExpr>(this);
+ if (Op->getOperator() == OO_EqualEqual ||
+ Op->getOperator() == OO_ExclaimEqual) {
+ Loc = Op->getOperatorLoc();
+ R1 = Op->getSourceRange();
+ return true;
+ }
+
+ // Fallthrough for generic call handling.
+ }
+ case CallExprClass:
+ case CXXMemberCallExprClass:
+ case UserDefinedLiteralClass: {
+ // If this is a direct call, get the callee.
+ const CallExpr *CE = cast<CallExpr>(this);
+ if (const Decl *FD = CE->getCalleeDecl()) {
+ // If the callee has attribute pure, const, or warn_unused_result, warn
+ // about it. void foo() { strlen("bar"); } should warn.
+ //
+ // Note: If new cases are added here, DiagnoseUnusedExprResult should be
+ // updated to match for QoI.
+ if (FD->getAttr<WarnUnusedResultAttr>() ||
+ FD->getAttr<PureAttr>() || FD->getAttr<ConstAttr>()) {
+ Loc = CE->getCallee()->getLocStart();
+ R1 = CE->getCallee()->getSourceRange();
+
+ if (unsigned NumArgs = CE->getNumArgs())
+ R2 = SourceRange(CE->getArg(0)->getLocStart(),
+ CE->getArg(NumArgs-1)->getLocEnd());
+ return true;
+ }
+ }
+ return false;
+ }
+
+ case CXXTemporaryObjectExprClass:
+ case CXXConstructExprClass:
+ return false;
+
+ case ObjCMessageExprClass: {
+ const ObjCMessageExpr *ME = cast<ObjCMessageExpr>(this);
+ if (Ctx.getLangOpts().ObjCAutoRefCount &&
+ ME->isInstanceMessage() &&
+ !ME->getType()->isVoidType() &&
+ ME->getSelector().getIdentifierInfoForSlot(0) &&
+ ME->getSelector().getIdentifierInfoForSlot(0)
+ ->getName().startswith("init")) {
+ Loc = getExprLoc();
+ R1 = ME->getSourceRange();
+ return true;
+ }
+
+ const ObjCMethodDecl *MD = ME->getMethodDecl();
+ if (MD && MD->getAttr<WarnUnusedResultAttr>()) {
+ Loc = getExprLoc();
+ return true;
+ }
+ return false;
+ }
+
+ case ObjCPropertyRefExprClass:
+ Loc = getExprLoc();
+ R1 = getSourceRange();
+ return true;
+
+ case PseudoObjectExprClass: {
+ const PseudoObjectExpr *PO = cast<PseudoObjectExpr>(this);
+
+ // Only complain about things that have the form of a getter.
+ if (isa<UnaryOperator>(PO->getSyntacticForm()) ||
+ isa<BinaryOperator>(PO->getSyntacticForm()))
+ return false;
+
+ Loc = getExprLoc();
+ R1 = getSourceRange();
+ return true;
+ }
+
+ case StmtExprClass: {
+ // Statement exprs don't logically have side effects themselves, but are
+ // sometimes used in macros in ways that give them a type that is unused.
+ // For example ({ blah; foo(); }) will end up with a type if foo has a type.
+ // however, if the result of the stmt expr is dead, we don't want to emit a
+ // warning.
+ const CompoundStmt *CS = cast<StmtExpr>(this)->getSubStmt();
+ if (!CS->body_empty()) {
+ if (const Expr *E = dyn_cast<Expr>(CS->body_back()))
+ return E->isUnusedResultAWarning(Loc, R1, R2, Ctx);
+ if (const LabelStmt *Label = dyn_cast<LabelStmt>(CS->body_back()))
+ if (const Expr *E = dyn_cast<Expr>(Label->getSubStmt()))
+ return E->isUnusedResultAWarning(Loc, R1, R2, Ctx);
+ }
+
+ if (getType()->isVoidType())
+ return false;
+ Loc = cast<StmtExpr>(this)->getLParenLoc();
+ R1 = getSourceRange();
+ return true;
+ }
+ case CStyleCastExprClass:
+ // If this is an explicit cast to void, allow it. People do this when they
+ // think they know what they're doing :).
+ if (getType()->isVoidType())
+ return false;
+ Loc = cast<CStyleCastExpr>(this)->getLParenLoc();
+ R1 = cast<CStyleCastExpr>(this)->getSubExpr()->getSourceRange();
+ return true;
+ case CXXFunctionalCastExprClass: {
+ if (getType()->isVoidType())
+ return false;
+ const CastExpr *CE = cast<CastExpr>(this);
+
+ // If this is a cast to void or a constructor conversion, check the operand.
+ // Otherwise, the result of the cast is unused.
+ if (CE->getCastKind() == CK_ToVoid ||
+ CE->getCastKind() == CK_ConstructorConversion)
+ return (cast<CastExpr>(this)->getSubExpr()
+ ->isUnusedResultAWarning(Loc, R1, R2, Ctx));
+ Loc = cast<CXXFunctionalCastExpr>(this)->getTypeBeginLoc();
+ R1 = cast<CXXFunctionalCastExpr>(this)->getSubExpr()->getSourceRange();
+ return true;
+ }
+
+ case ImplicitCastExprClass:
+ // Check the operand, since implicit casts are inserted by Sema
+ return (cast<ImplicitCastExpr>(this)
+ ->getSubExpr()->isUnusedResultAWarning(Loc, R1, R2, Ctx));
+
+ case CXXDefaultArgExprClass:
+ return (cast<CXXDefaultArgExpr>(this)
+ ->getExpr()->isUnusedResultAWarning(Loc, R1, R2, Ctx));
+
+ case CXXNewExprClass:
+ // FIXME: In theory, there might be new expressions that don't have side
+ // effects (e.g. a placement new with an uninitialized POD).
+ case CXXDeleteExprClass:
+ return false;
+ case CXXBindTemporaryExprClass:
+ return (cast<CXXBindTemporaryExpr>(this)
+ ->getSubExpr()->isUnusedResultAWarning(Loc, R1, R2, Ctx));
+ case ExprWithCleanupsClass:
+ return (cast<ExprWithCleanups>(this)
+ ->getSubExpr()->isUnusedResultAWarning(Loc, R1, R2, Ctx));
+ }
+}
+
+/// isOBJCGCCandidate - Check if an expression is objc gc'able.
+/// returns true, if it is; false otherwise.
+bool Expr::isOBJCGCCandidate(ASTContext &Ctx) const {
+ const Expr *E = IgnoreParens();
+ switch (E->getStmtClass()) {
+ default:
+ return false;
+ case ObjCIvarRefExprClass:
+ return true;
+ case Expr::UnaryOperatorClass:
+ return cast<UnaryOperator>(E)->getSubExpr()->isOBJCGCCandidate(Ctx);
+ case ImplicitCastExprClass:
+ return cast<ImplicitCastExpr>(E)->getSubExpr()->isOBJCGCCandidate(Ctx);
+ case MaterializeTemporaryExprClass:
+ return cast<MaterializeTemporaryExpr>(E)->GetTemporaryExpr()
+ ->isOBJCGCCandidate(Ctx);
+ case CStyleCastExprClass:
+ return cast<CStyleCastExpr>(E)->getSubExpr()->isOBJCGCCandidate(Ctx);
+ case DeclRefExprClass: {
+ const Decl *D = cast<DeclRefExpr>(E)->getDecl();
+
+ if (const VarDecl *VD = dyn_cast<VarDecl>(D)) {
+ if (VD->hasGlobalStorage())
+ return true;
+ QualType T = VD->getType();
+ // dereferencing to a pointer is always a gc'able candidate,
+ // unless it is __weak.
+ return T->isPointerType() &&
+ (Ctx.getObjCGCAttrKind(T) != Qualifiers::Weak);
+ }
+ return false;
+ }
+ case MemberExprClass: {
+ const MemberExpr *M = cast<MemberExpr>(E);
+ return M->getBase()->isOBJCGCCandidate(Ctx);
+ }
+ case ArraySubscriptExprClass:
+ return cast<ArraySubscriptExpr>(E)->getBase()->isOBJCGCCandidate(Ctx);
+ }
+}
+
+bool Expr::isBoundMemberFunction(ASTContext &Ctx) const {
+ if (isTypeDependent())
+ return false;
+ return ClassifyLValue(Ctx) == Expr::LV_MemberFunction;
+}
+
+QualType Expr::findBoundMemberType(const Expr *expr) {
+ assert(expr->hasPlaceholderType(BuiltinType::BoundMember));
+
+ // Bound member expressions are always one of these possibilities:
+ // x->m x.m x->*y x.*y
+ // (possibly parenthesized)
+
+ expr = expr->IgnoreParens();
+ if (const MemberExpr *mem = dyn_cast<MemberExpr>(expr)) {
+ assert(isa<CXXMethodDecl>(mem->getMemberDecl()));
+ return mem->getMemberDecl()->getType();
+ }
+
+ if (const BinaryOperator *op = dyn_cast<BinaryOperator>(expr)) {
+ QualType type = op->getRHS()->getType()->castAs<MemberPointerType>()
+ ->getPointeeType();
+ assert(type->isFunctionType());
+ return type;
+ }
+
+ assert(isa<UnresolvedMemberExpr>(expr));
+ return QualType();
+}
+
+Expr* Expr::IgnoreParens() {
+ Expr* E = this;
+ while (true) {
+ if (ParenExpr* P = dyn_cast<ParenExpr>(E)) {
+ E = P->getSubExpr();
+ continue;
+ }
+ if (UnaryOperator* P = dyn_cast<UnaryOperator>(E)) {
+ if (P->getOpcode() == UO_Extension) {
+ E = P->getSubExpr();
+ continue;
+ }
+ }
+ if (GenericSelectionExpr* P = dyn_cast<GenericSelectionExpr>(E)) {
+ if (!P->isResultDependent()) {
+ E = P->getResultExpr();
+ continue;
+ }
+ }
+ return E;
+ }
+}
+
+/// IgnoreParenCasts - Ignore parentheses and casts. Strip off any ParenExpr
+/// or CastExprs or ImplicitCastExprs, returning their operand.
+Expr *Expr::IgnoreParenCasts() {
+ Expr *E = this;
+ while (true) {
+ if (ParenExpr* P = dyn_cast<ParenExpr>(E)) {
+ E = P->getSubExpr();
+ continue;
+ }
+ if (CastExpr *P = dyn_cast<CastExpr>(E)) {
+ E = P->getSubExpr();
+ continue;
+ }
+ if (UnaryOperator* P = dyn_cast<UnaryOperator>(E)) {
+ if (P->getOpcode() == UO_Extension) {
+ E = P->getSubExpr();
+ continue;
+ }
+ }
+ if (GenericSelectionExpr* P = dyn_cast<GenericSelectionExpr>(E)) {
+ if (!P->isResultDependent()) {
+ E = P->getResultExpr();
+ continue;
+ }
+ }
+ if (MaterializeTemporaryExpr *Materialize
+ = dyn_cast<MaterializeTemporaryExpr>(E)) {
+ E = Materialize->GetTemporaryExpr();
+ continue;
+ }
+ if (SubstNonTypeTemplateParmExpr *NTTP
+ = dyn_cast<SubstNonTypeTemplateParmExpr>(E)) {
+ E = NTTP->getReplacement();
+ continue;
+ }
+ return E;
+ }
+}
+
+/// IgnoreParenLValueCasts - Ignore parentheses and lvalue-to-rvalue
+/// casts. This is intended purely as a temporary workaround for code
+/// that hasn't yet been rewritten to do the right thing about those
+/// casts, and may disappear along with the last internal use.
+Expr *Expr::IgnoreParenLValueCasts() {
+ Expr *E = this;
+ while (true) {
+ if (ParenExpr *P = dyn_cast<ParenExpr>(E)) {
+ E = P->getSubExpr();
+ continue;
+ } else if (CastExpr *P = dyn_cast<CastExpr>(E)) {
+ if (P->getCastKind() == CK_LValueToRValue) {
+ E = P->getSubExpr();
+ continue;
+ }
+ } else if (UnaryOperator* P = dyn_cast<UnaryOperator>(E)) {
+ if (P->getOpcode() == UO_Extension) {
+ E = P->getSubExpr();
+ continue;
+ }
+ } else if (GenericSelectionExpr* P = dyn_cast<GenericSelectionExpr>(E)) {
+ if (!P->isResultDependent()) {
+ E = P->getResultExpr();
+ continue;
+ }
+ } else if (MaterializeTemporaryExpr *Materialize
+ = dyn_cast<MaterializeTemporaryExpr>(E)) {
+ E = Materialize->GetTemporaryExpr();
+ continue;
+ } else if (SubstNonTypeTemplateParmExpr *NTTP
+ = dyn_cast<SubstNonTypeTemplateParmExpr>(E)) {
+ E = NTTP->getReplacement();
+ continue;
+ }
+ break;
+ }
+ return E;
+}
+
+Expr *Expr::IgnoreParenImpCasts() {
+ Expr *E = this;
+ while (true) {
+ if (ParenExpr *P = dyn_cast<ParenExpr>(E)) {
+ E = P->getSubExpr();
+ continue;
+ }
+ if (ImplicitCastExpr *P = dyn_cast<ImplicitCastExpr>(E)) {
+ E = P->getSubExpr();
+ continue;
+ }
+ if (UnaryOperator* P = dyn_cast<UnaryOperator>(E)) {
+ if (P->getOpcode() == UO_Extension) {
+ E = P->getSubExpr();
+ continue;
+ }
+ }
+ if (GenericSelectionExpr* P = dyn_cast<GenericSelectionExpr>(E)) {
+ if (!P->isResultDependent()) {
+ E = P->getResultExpr();
+ continue;
+ }
+ }
+ if (MaterializeTemporaryExpr *Materialize
+ = dyn_cast<MaterializeTemporaryExpr>(E)) {
+ E = Materialize->GetTemporaryExpr();
+ continue;
+ }
+ if (SubstNonTypeTemplateParmExpr *NTTP
+ = dyn_cast<SubstNonTypeTemplateParmExpr>(E)) {
+ E = NTTP->getReplacement();
+ continue;
+ }
+ return E;
+ }
+}
+
+Expr *Expr::IgnoreConversionOperator() {
+ if (CXXMemberCallExpr *MCE = dyn_cast<CXXMemberCallExpr>(this)) {
+ if (MCE->getMethodDecl() && isa<CXXConversionDecl>(MCE->getMethodDecl()))
+ return MCE->getImplicitObjectArgument();
+ }
+ return this;
+}
+
+/// IgnoreParenNoopCasts - Ignore parentheses and casts that do not change the
+/// value (including ptr->int casts of the same size). Strip off any
+/// ParenExpr or CastExprs, returning their operand.
+Expr *Expr::IgnoreParenNoopCasts(ASTContext &Ctx) {
+ Expr *E = this;
+ while (true) {
+ if (ParenExpr *P = dyn_cast<ParenExpr>(E)) {
+ E = P->getSubExpr();
+ continue;
+ }
+
+ if (CastExpr *P = dyn_cast<CastExpr>(E)) {
+ // We ignore integer <-> casts that are of the same width, ptr<->ptr and
+ // ptr<->int casts of the same width. We also ignore all identity casts.
+ Expr *SE = P->getSubExpr();
+
+ if (Ctx.hasSameUnqualifiedType(E->getType(), SE->getType())) {
+ E = SE;
+ continue;
+ }
+
+ if ((E->getType()->isPointerType() ||
+ E->getType()->isIntegralType(Ctx)) &&
+ (SE->getType()->isPointerType() ||
+ SE->getType()->isIntegralType(Ctx)) &&
+ Ctx.getTypeSize(E->getType()) == Ctx.getTypeSize(SE->getType())) {
+ E = SE;
+ continue;
+ }
+ }
+
+ if (UnaryOperator* P = dyn_cast<UnaryOperator>(E)) {
+ if (P->getOpcode() == UO_Extension) {
+ E = P->getSubExpr();
+ continue;
+ }
+ }
+
+ if (GenericSelectionExpr* P = dyn_cast<GenericSelectionExpr>(E)) {
+ if (!P->isResultDependent()) {
+ E = P->getResultExpr();
+ continue;
+ }
+ }
+
+ if (SubstNonTypeTemplateParmExpr *NTTP
+ = dyn_cast<SubstNonTypeTemplateParmExpr>(E)) {
+ E = NTTP->getReplacement();
+ continue;
+ }
+
+ return E;
+ }
+}
+
+bool Expr::isDefaultArgument() const {
+ const Expr *E = this;
+ if (const MaterializeTemporaryExpr *M = dyn_cast<MaterializeTemporaryExpr>(E))
+ E = M->GetTemporaryExpr();
+
+ while (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E))
+ E = ICE->getSubExprAsWritten();
+
+ return isa<CXXDefaultArgExpr>(E);
+}
+
+/// \brief Skip over any no-op casts and any temporary-binding
+/// expressions.
+static const Expr *skipTemporaryBindingsNoOpCastsAndParens(const Expr *E) {
+ if (const MaterializeTemporaryExpr *M = dyn_cast<MaterializeTemporaryExpr>(E))
+ E = M->GetTemporaryExpr();
+
+ while (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E)) {
+ if (ICE->getCastKind() == CK_NoOp)
+ E = ICE->getSubExpr();
+ else
+ break;
+ }
+
+ while (const CXXBindTemporaryExpr *BE = dyn_cast<CXXBindTemporaryExpr>(E))
+ E = BE->getSubExpr();
+
+ while (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E)) {
+ if (ICE->getCastKind() == CK_NoOp)
+ E = ICE->getSubExpr();
+ else
+ break;
+ }
+
+ return E->IgnoreParens();
+}
+
+/// isTemporaryObject - Determines if this expression produces a
+/// temporary of the given class type.
+bool Expr::isTemporaryObject(ASTContext &C, const CXXRecordDecl *TempTy) const {
+ if (!C.hasSameUnqualifiedType(getType(), C.getTypeDeclType(TempTy)))
+ return false;
+
+ const Expr *E = skipTemporaryBindingsNoOpCastsAndParens(this);
+
+ // Temporaries are by definition pr-values of class type.
+ if (!E->Classify(C).isPRValue()) {
+ // In this context, property reference is a message call and is pr-value.
+ if (!isa<ObjCPropertyRefExpr>(E))
+ return false;
+ }
+
+ // Black-list a few cases which yield pr-values of class type that don't
+ // refer to temporaries of that type:
+
+ // - implicit derived-to-base conversions
+ if (isa<ImplicitCastExpr>(E)) {
+ switch (cast<ImplicitCastExpr>(E)->getCastKind()) {
+ case CK_DerivedToBase:
+ case CK_UncheckedDerivedToBase:
+ return false;
+ default:
+ break;
+ }
+ }
+
+ // - member expressions (all)
+ if (isa<MemberExpr>(E))
+ return false;
+
+ // - opaque values (all)
+ if (isa<OpaqueValueExpr>(E))
+ return false;
+
+ return true;
+}
+
+bool Expr::isImplicitCXXThis() const {
+ const Expr *E = this;
+
+ // Strip away parentheses and casts we don't care about.
+ while (true) {
+ if (const ParenExpr *Paren = dyn_cast<ParenExpr>(E)) {
+ E = Paren->getSubExpr();
+ continue;
+ }
+
+ if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E)) {
+ if (ICE->getCastKind() == CK_NoOp ||
+ ICE->getCastKind() == CK_LValueToRValue ||
+ ICE->getCastKind() == CK_DerivedToBase ||
+ ICE->getCastKind() == CK_UncheckedDerivedToBase) {
+ E = ICE->getSubExpr();
+ continue;
+ }
+ }
+
+ if (const UnaryOperator* UnOp = dyn_cast<UnaryOperator>(E)) {
+ if (UnOp->getOpcode() == UO_Extension) {
+ E = UnOp->getSubExpr();
+ continue;
+ }
+ }
+
+ if (const MaterializeTemporaryExpr *M
+ = dyn_cast<MaterializeTemporaryExpr>(E)) {
+ E = M->GetTemporaryExpr();
+ continue;
+ }
+
+ break;
+ }
+
+ if (const CXXThisExpr *This = dyn_cast<CXXThisExpr>(E))
+ return This->isImplicit();
+
+ return false;
+}
+
+/// hasAnyTypeDependentArguments - Determines if any of the expressions
+/// in Exprs is type-dependent.
+bool Expr::hasAnyTypeDependentArguments(llvm::ArrayRef<Expr *> Exprs) {
+ for (unsigned I = 0; I < Exprs.size(); ++I)
+ if (Exprs[I]->isTypeDependent())
+ return true;
+
+ return false;
+}
+
+bool Expr::isConstantInitializer(ASTContext &Ctx, bool IsForRef) const {
+ // This function is attempting whether an expression is an initializer
+ // which can be evaluated at compile-time. isEvaluatable handles most
+ // of the cases, but it can't deal with some initializer-specific
+ // expressions, and it can't deal with aggregates; we deal with those here,
+ // and fall back to isEvaluatable for the other cases.
+
+ // If we ever capture reference-binding directly in the AST, we can
+ // kill the second parameter.
+
+ if (IsForRef) {
+ EvalResult Result;
+ return EvaluateAsLValue(Result, Ctx) && !Result.HasSideEffects;
+ }
+
+ switch (getStmtClass()) {
+ default: break;
+ case IntegerLiteralClass:
+ case FloatingLiteralClass:
+ case StringLiteralClass:
+ case ObjCStringLiteralClass:
+ case ObjCEncodeExprClass:
+ return true;
+ case CXXTemporaryObjectExprClass:
+ case CXXConstructExprClass: {
+ const CXXConstructExpr *CE = cast<CXXConstructExpr>(this);
+
+ // Only if it's
+ if (CE->getConstructor()->isTrivial()) {
+ // 1) an application of the trivial default constructor or
+ if (!CE->getNumArgs()) return true;
+
+ // 2) an elidable trivial copy construction of an operand which is
+ // itself a constant initializer. Note that we consider the
+ // operand on its own, *not* as a reference binding.
+ if (CE->isElidable() &&
+ CE->getArg(0)->isConstantInitializer(Ctx, false))
+ return true;
+ }
+
+ // 3) a foldable constexpr constructor.
+ break;
+ }
+ case CompoundLiteralExprClass: {
+ // This handles gcc's extension that allows global initializers like
+ // "struct x {int x;} x = (struct x) {};".
+ // FIXME: This accepts other cases it shouldn't!
+ const Expr *Exp = cast<CompoundLiteralExpr>(this)->getInitializer();
+ return Exp->isConstantInitializer(Ctx, false);
+ }
+ case InitListExprClass: {
+ // FIXME: This doesn't deal with fields with reference types correctly.
+ // FIXME: This incorrectly allows pointers cast to integers to be assigned
+ // to bitfields.
+ const InitListExpr *Exp = cast<InitListExpr>(this);
+ unsigned numInits = Exp->getNumInits();
+ for (unsigned i = 0; i < numInits; i++) {
+ if (!Exp->getInit(i)->isConstantInitializer(Ctx, false))
+ return false;
+ }
+ return true;
+ }
+ case ImplicitValueInitExprClass:
+ return true;
+ case ParenExprClass:
+ return cast<ParenExpr>(this)->getSubExpr()
+ ->isConstantInitializer(Ctx, IsForRef);
+ case GenericSelectionExprClass:
+ if (cast<GenericSelectionExpr>(this)->isResultDependent())
+ return false;
+ return cast<GenericSelectionExpr>(this)->getResultExpr()
+ ->isConstantInitializer(Ctx, IsForRef);
+ case ChooseExprClass:
+ return cast<ChooseExpr>(this)->getChosenSubExpr(Ctx)
+ ->isConstantInitializer(Ctx, IsForRef);
+ case UnaryOperatorClass: {
+ const UnaryOperator* Exp = cast<UnaryOperator>(this);
+ if (Exp->getOpcode() == UO_Extension)
+ return Exp->getSubExpr()->isConstantInitializer(Ctx, false);
+ break;
+ }
+ case CXXFunctionalCastExprClass:
+ case CXXStaticCastExprClass:
+ case ImplicitCastExprClass:
+ case CStyleCastExprClass: {
+ const CastExpr *CE = cast<CastExpr>(this);
+
+ // If we're promoting an integer to an _Atomic type then this is constant
+ // if the integer is constant. We also need to check the converse in case
+ // someone does something like:
+ //
+ // int a = (_Atomic(int))42;
+ //
+ // I doubt anyone would write code like this directly, but it's quite
+ // possible as the result of macro expansions.
+ if (CE->getCastKind() == CK_NonAtomicToAtomic ||
+ CE->getCastKind() == CK_AtomicToNonAtomic)
+ return CE->getSubExpr()->isConstantInitializer(Ctx, false);
+
+ // Handle bitcasts of vector constants.
+ if (getType()->isVectorType() && CE->getCastKind() == CK_BitCast)
+ return CE->getSubExpr()->isConstantInitializer(Ctx, false);
+
+ // Handle misc casts we want to ignore.
+ // FIXME: Is it really safe to ignore all these?
+ if (CE->getCastKind() == CK_NoOp ||
+ CE->getCastKind() == CK_LValueToRValue ||
+ CE->getCastKind() == CK_ToUnion ||
+ CE->getCastKind() == CK_ConstructorConversion)
+ return CE->getSubExpr()->isConstantInitializer(Ctx, false);
+
+ break;
+ }
+ case MaterializeTemporaryExprClass:
+ return cast<MaterializeTemporaryExpr>(this)->GetTemporaryExpr()
+ ->isConstantInitializer(Ctx, false);
+ }
+ return isEvaluatable(Ctx);
+}
+
+namespace {
+ /// \brief Look for a call to a non-trivial function within an expression.
+ class NonTrivialCallFinder : public EvaluatedExprVisitor<NonTrivialCallFinder>
+ {
+ typedef EvaluatedExprVisitor<NonTrivialCallFinder> Inherited;
+
+ bool NonTrivial;
+
+ public:
+ explicit NonTrivialCallFinder(ASTContext &Context)
+ : Inherited(Context), NonTrivial(false) { }
+
+ bool hasNonTrivialCall() const { return NonTrivial; }
+
+ void VisitCallExpr(CallExpr *E) {
+ if (CXXMethodDecl *Method
+ = dyn_cast_or_null<CXXMethodDecl>(E->getCalleeDecl())) {
+ if (Method->isTrivial()) {
+ // Recurse to children of the call.
+ Inherited::VisitStmt(E);
+ return;
+ }
+ }
+
+ NonTrivial = true;
+ }
+
+ void VisitCXXConstructExpr(CXXConstructExpr *E) {
+ if (E->getConstructor()->isTrivial()) {
+ // Recurse to children of the call.
+ Inherited::VisitStmt(E);
+ return;
+ }
+
+ NonTrivial = true;
+ }
+
+ void VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) {
+ if (E->getTemporary()->getDestructor()->isTrivial()) {
+ Inherited::VisitStmt(E);
+ return;
+ }
+
+ NonTrivial = true;
+ }
+ };
+}
+
+bool Expr::hasNonTrivialCall(ASTContext &Ctx) {
+ NonTrivialCallFinder Finder(Ctx);
+ Finder.Visit(this);
+ return Finder.hasNonTrivialCall();
+}
+
+/// isNullPointerConstant - C99 6.3.2.3p3 - Return whether this is a null
+/// pointer constant or not, as well as the specific kind of constant detected.
+/// Null pointer constants can be integer constant expressions with the
+/// value zero, casts of zero to void*, nullptr (C++0X), or __null
+/// (a GNU extension).
+Expr::NullPointerConstantKind
+Expr::isNullPointerConstant(ASTContext &Ctx,
+ NullPointerConstantValueDependence NPC) const {
+ if (isValueDependent()) {
+ switch (NPC) {
+ case NPC_NeverValueDependent:
+ llvm_unreachable("Unexpected value dependent expression!");
+ case NPC_ValueDependentIsNull:
+ if (isTypeDependent() || getType()->isIntegralType(Ctx))
+ return NPCK_ZeroInteger;
+ else
+ return NPCK_NotNull;
+
+ case NPC_ValueDependentIsNotNull:
+ return NPCK_NotNull;
+ }
+ }
+
+ // Strip off a cast to void*, if it exists. Except in C++.
+ if (const ExplicitCastExpr *CE = dyn_cast<ExplicitCastExpr>(this)) {
+ if (!Ctx.getLangOpts().CPlusPlus) {
+ // Check that it is a cast to void*.
+ if (const PointerType *PT = CE->getType()->getAs<PointerType>()) {
+ QualType Pointee = PT->getPointeeType();
+ if (!Pointee.hasQualifiers() &&
+ Pointee->isVoidType() && // to void*
+ CE->getSubExpr()->getType()->isIntegerType()) // from int.
+ return CE->getSubExpr()->isNullPointerConstant(Ctx, NPC);
+ }
+ }
+ } else if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(this)) {
+ // Ignore the ImplicitCastExpr type entirely.
+ return ICE->getSubExpr()->isNullPointerConstant(Ctx, NPC);
+ } else if (const ParenExpr *PE = dyn_cast<ParenExpr>(this)) {
+ // Accept ((void*)0) as a null pointer constant, as many other
+ // implementations do.
+ return PE->getSubExpr()->isNullPointerConstant(Ctx, NPC);
+ } else if (const GenericSelectionExpr *GE =
+ dyn_cast<GenericSelectionExpr>(this)) {
+ return GE->getResultExpr()->isNullPointerConstant(Ctx, NPC);
+ } else if (const CXXDefaultArgExpr *DefaultArg
+ = dyn_cast<CXXDefaultArgExpr>(this)) {
+ // See through default argument expressions
+ return DefaultArg->getExpr()->isNullPointerConstant(Ctx, NPC);
+ } else if (isa<GNUNullExpr>(this)) {
+ // The GNU __null extension is always a null pointer constant.
+ return NPCK_GNUNull;
+ } else if (const MaterializeTemporaryExpr *M
+ = dyn_cast<MaterializeTemporaryExpr>(this)) {
+ return M->GetTemporaryExpr()->isNullPointerConstant(Ctx, NPC);
+ } else if (const OpaqueValueExpr *OVE = dyn_cast<OpaqueValueExpr>(this)) {
+ if (const Expr *Source = OVE->getSourceExpr())
+ return Source->isNullPointerConstant(Ctx, NPC);
+ }
+
+ // C++0x nullptr_t is always a null pointer constant.
+ if (getType()->isNullPtrType())
+ return NPCK_CXX0X_nullptr;
+
+ if (const RecordType *UT = getType()->getAsUnionType())
+ if (UT && UT->getDecl()->hasAttr<TransparentUnionAttr>())
+ if (const CompoundLiteralExpr *CLE = dyn_cast<CompoundLiteralExpr>(this)){
+ const Expr *InitExpr = CLE->getInitializer();
+ if (const InitListExpr *ILE = dyn_cast<InitListExpr>(InitExpr))
+ return ILE->getInit(0)->isNullPointerConstant(Ctx, NPC);
+ }
+ // This expression must be an integer type.
+ if (!getType()->isIntegerType() ||
+ (Ctx.getLangOpts().CPlusPlus && getType()->isEnumeralType()))
+ return NPCK_NotNull;
+
+ // If we have an integer constant expression, we need to *evaluate* it and
+ // test for the value 0. Don't use the C++11 constant expression semantics
+ // for this, for now; once the dust settles on core issue 903, we might only
+ // allow a literal 0 here in C++11 mode.
+ if (Ctx.getLangOpts().CPlusPlus0x) {
+ if (!isCXX98IntegralConstantExpr(Ctx))
+ return NPCK_NotNull;
+ } else {
+ if (!isIntegerConstantExpr(Ctx))
+ return NPCK_NotNull;
+ }
+
+ return (EvaluateKnownConstInt(Ctx) == 0) ? NPCK_ZeroInteger : NPCK_NotNull;
+}
+
+/// \brief If this expression is an l-value for an Objective C
+/// property, find the underlying property reference expression.
+const ObjCPropertyRefExpr *Expr::getObjCProperty() const {
+ const Expr *E = this;
+ while (true) {
+ assert((E->getValueKind() == VK_LValue &&
+ E->getObjectKind() == OK_ObjCProperty) &&
+ "expression is not a property reference");
+ E = E->IgnoreParenCasts();
+ if (const BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) {
+ if (BO->getOpcode() == BO_Comma) {
+ E = BO->getRHS();
+ continue;
+ }
+ }
+
+ break;
+ }
+
+ return cast<ObjCPropertyRefExpr>(E);
+}
+
+FieldDecl *Expr::getBitField() {
+ Expr *E = this->IgnoreParens();
+
+ while (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E)) {
+ if (ICE->getCastKind() == CK_LValueToRValue ||
+ (ICE->getValueKind() != VK_RValue && ICE->getCastKind() == CK_NoOp))
+ E = ICE->getSubExpr()->IgnoreParens();
+ else
+ break;
+ }
+
+ if (MemberExpr *MemRef = dyn_cast<MemberExpr>(E))
+ if (FieldDecl *Field = dyn_cast<FieldDecl>(MemRef->getMemberDecl()))
+ if (Field->isBitField())
+ return Field;
+
+ if (DeclRefExpr *DeclRef = dyn_cast<DeclRefExpr>(E))
+ if (FieldDecl *Field = dyn_cast<FieldDecl>(DeclRef->getDecl()))
+ if (Field->isBitField())
+ return Field;
+
+ if (BinaryOperator *BinOp = dyn_cast<BinaryOperator>(E)) {
+ if (BinOp->isAssignmentOp() && BinOp->getLHS())
+ return BinOp->getLHS()->getBitField();
+
+ if (BinOp->getOpcode() == BO_Comma && BinOp->getRHS())
+ return BinOp->getRHS()->getBitField();
+ }
+
+ return 0;
+}
+
+bool Expr::refersToVectorElement() const {
+ const Expr *E = this->IgnoreParens();
+
+ while (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E)) {
+ if (ICE->getValueKind() != VK_RValue &&
+ ICE->getCastKind() == CK_NoOp)
+ E = ICE->getSubExpr()->IgnoreParens();
+ else
+ break;
+ }
+
+ if (const ArraySubscriptExpr *ASE = dyn_cast<ArraySubscriptExpr>(E))
+ return ASE->getBase()->getType()->isVectorType();
+
+ if (isa<ExtVectorElementExpr>(E))
+ return true;
+
+ return false;
+}
+
+/// isArrow - Return true if the base expression is a pointer to vector,
+/// return false if the base expression is a vector.
+bool ExtVectorElementExpr::isArrow() const {
+ return getBase()->getType()->isPointerType();
+}
+
+unsigned ExtVectorElementExpr::getNumElements() const {
+ if (const VectorType *VT = getType()->getAs<VectorType>())
+ return VT->getNumElements();
+ return 1;
+}
+
+/// containsDuplicateElements - Return true if any element access is repeated.
+bool ExtVectorElementExpr::containsDuplicateElements() const {
+ // FIXME: Refactor this code to an accessor on the AST node which returns the
+ // "type" of component access, and share with code below and in Sema.
+ StringRef Comp = Accessor->getName();
+
+ // Halving swizzles do not contain duplicate elements.
+ if (Comp == "hi" || Comp == "lo" || Comp == "even" || Comp == "odd")
+ return false;
+
+ // Advance past s-char prefix on hex swizzles.
+ if (Comp[0] == 's' || Comp[0] == 'S')
+ Comp = Comp.substr(1);
+
+ for (unsigned i = 0, e = Comp.size(); i != e; ++i)
+ if (Comp.substr(i + 1).find(Comp[i]) != StringRef::npos)
+ return true;
+
+ return false;
+}
+
+/// getEncodedElementAccess - We encode the fields as a llvm ConstantArray.
+void ExtVectorElementExpr::getEncodedElementAccess(
+ SmallVectorImpl<unsigned> &Elts) const {
+ StringRef Comp = Accessor->getName();
+ if (Comp[0] == 's' || Comp[0] == 'S')
+ Comp = Comp.substr(1);
+
+ bool isHi = Comp == "hi";
+ bool isLo = Comp == "lo";
+ bool isEven = Comp == "even";
+ bool isOdd = Comp == "odd";
+
+ for (unsigned i = 0, e = getNumElements(); i != e; ++i) {
+ uint64_t Index;
+
+ if (isHi)
+ Index = e + i;
+ else if (isLo)
+ Index = i;
+ else if (isEven)
+ Index = 2 * i;
+ else if (isOdd)
+ Index = 2 * i + 1;
+ else
+ Index = ExtVectorType::getAccessorIdx(Comp[i]);
+
+ Elts.push_back(Index);
+ }
+}
+
+ObjCMessageExpr::ObjCMessageExpr(QualType T,
+ ExprValueKind VK,
+ SourceLocation LBracLoc,
+ SourceLocation SuperLoc,
+ bool IsInstanceSuper,
+ QualType SuperType,
+ Selector Sel,
+ ArrayRef<SourceLocation> SelLocs,
+ SelectorLocationsKind SelLocsK,
+ ObjCMethodDecl *Method,
+ ArrayRef<Expr *> Args,
+ SourceLocation RBracLoc,
+ bool isImplicit)
+ : Expr(ObjCMessageExprClass, T, VK, OK_Ordinary,
+ /*TypeDependent=*/false, /*ValueDependent=*/false,
+ /*InstantiationDependent=*/false,
+ /*ContainsUnexpandedParameterPack=*/false),
+ SelectorOrMethod(reinterpret_cast<uintptr_t>(Method? Method
+ : Sel.getAsOpaquePtr())),
+ Kind(IsInstanceSuper? SuperInstance : SuperClass),
+ HasMethod(Method != 0), IsDelegateInitCall(false), IsImplicit(isImplicit),
+ SuperLoc(SuperLoc), LBracLoc(LBracLoc), RBracLoc(RBracLoc)
+{
+ initArgsAndSelLocs(Args, SelLocs, SelLocsK);
+ setReceiverPointer(SuperType.getAsOpaquePtr());
+}
+
+ObjCMessageExpr::ObjCMessageExpr(QualType T,
+ ExprValueKind VK,
+ SourceLocation LBracLoc,
+ TypeSourceInfo *Receiver,
+ Selector Sel,
+ ArrayRef<SourceLocation> SelLocs,
+ SelectorLocationsKind SelLocsK,
+ ObjCMethodDecl *Method,
+ ArrayRef<Expr *> Args,
+ SourceLocation RBracLoc,
+ bool isImplicit)
+ : Expr(ObjCMessageExprClass, T, VK, OK_Ordinary, T->isDependentType(),
+ T->isDependentType(), T->isInstantiationDependentType(),
+ T->containsUnexpandedParameterPack()),
+ SelectorOrMethod(reinterpret_cast<uintptr_t>(Method? Method
+ : Sel.getAsOpaquePtr())),
+ Kind(Class),
+ HasMethod(Method != 0), IsDelegateInitCall(false), IsImplicit(isImplicit),
+ LBracLoc(LBracLoc), RBracLoc(RBracLoc)
+{
+ initArgsAndSelLocs(Args, SelLocs, SelLocsK);
+ setReceiverPointer(Receiver);
+}
+
+ObjCMessageExpr::ObjCMessageExpr(QualType T,
+ ExprValueKind VK,
+ SourceLocation LBracLoc,
+ Expr *Receiver,
+ Selector Sel,
+ ArrayRef<SourceLocation> SelLocs,
+ SelectorLocationsKind SelLocsK,
+ ObjCMethodDecl *Method,
+ ArrayRef<Expr *> Args,
+ SourceLocation RBracLoc,
+ bool isImplicit)
+ : Expr(ObjCMessageExprClass, T, VK, OK_Ordinary, Receiver->isTypeDependent(),
+ Receiver->isTypeDependent(),
+ Receiver->isInstantiationDependent(),
+ Receiver->containsUnexpandedParameterPack()),
+ SelectorOrMethod(reinterpret_cast<uintptr_t>(Method? Method
+ : Sel.getAsOpaquePtr())),
+ Kind(Instance),
+ HasMethod(Method != 0), IsDelegateInitCall(false), IsImplicit(isImplicit),
+ LBracLoc(LBracLoc), RBracLoc(RBracLoc)
+{
+ initArgsAndSelLocs(Args, SelLocs, SelLocsK);
+ setReceiverPointer(Receiver);
+}
+
+void ObjCMessageExpr::initArgsAndSelLocs(ArrayRef<Expr *> Args,
+ ArrayRef<SourceLocation> SelLocs,
+ SelectorLocationsKind SelLocsK) {
+ setNumArgs(Args.size());
+ Expr **MyArgs = getArgs();
+ for (unsigned I = 0; I != Args.size(); ++I) {
+ if (Args[I]->isTypeDependent())
+ ExprBits.TypeDependent = true;
+ if (Args[I]->isValueDependent())
+ ExprBits.ValueDependent = true;
+ if (Args[I]->isInstantiationDependent())
+ ExprBits.InstantiationDependent = true;
+ if (Args[I]->containsUnexpandedParameterPack())
+ ExprBits.ContainsUnexpandedParameterPack = true;
+
+ MyArgs[I] = Args[I];
+ }
+
+ SelLocsKind = SelLocsK;
+ if (!isImplicit()) {
+ if (SelLocsK == SelLoc_NonStandard)
+ std::copy(SelLocs.begin(), SelLocs.end(), getStoredSelLocs());
+ }
+}
+
+ObjCMessageExpr *ObjCMessageExpr::Create(ASTContext &Context, QualType T,
+ ExprValueKind VK,
+ SourceLocation LBracLoc,
+ SourceLocation SuperLoc,
+ bool IsInstanceSuper,
+ QualType SuperType,
+ Selector Sel,
+ ArrayRef<SourceLocation> SelLocs,
+ ObjCMethodDecl *Method,
+ ArrayRef<Expr *> Args,
+ SourceLocation RBracLoc,
+ bool isImplicit) {
+ assert((!SelLocs.empty() || isImplicit) &&
+ "No selector locs for non-implicit message");
+ ObjCMessageExpr *Mem;
+ SelectorLocationsKind SelLocsK = SelectorLocationsKind();
+ if (isImplicit)
+ Mem = alloc(Context, Args.size(), 0);
+ else
+ Mem = alloc(Context, Args, RBracLoc, SelLocs, Sel, SelLocsK);
+ return new (Mem) ObjCMessageExpr(T, VK, LBracLoc, SuperLoc, IsInstanceSuper,
+ SuperType, Sel, SelLocs, SelLocsK,
+ Method, Args, RBracLoc, isImplicit);
+}
+
+ObjCMessageExpr *ObjCMessageExpr::Create(ASTContext &Context, QualType T,
+ ExprValueKind VK,
+ SourceLocation LBracLoc,
+ TypeSourceInfo *Receiver,
+ Selector Sel,
+ ArrayRef<SourceLocation> SelLocs,
+ ObjCMethodDecl *Method,
+ ArrayRef<Expr *> Args,
+ SourceLocation RBracLoc,
+ bool isImplicit) {
+ assert((!SelLocs.empty() || isImplicit) &&
+ "No selector locs for non-implicit message");
+ ObjCMessageExpr *Mem;
+ SelectorLocationsKind SelLocsK = SelectorLocationsKind();
+ if (isImplicit)
+ Mem = alloc(Context, Args.size(), 0);
+ else
+ Mem = alloc(Context, Args, RBracLoc, SelLocs, Sel, SelLocsK);
+ return new (Mem) ObjCMessageExpr(T, VK, LBracLoc, Receiver, Sel,
+ SelLocs, SelLocsK, Method, Args, RBracLoc,
+ isImplicit);
+}
+
+ObjCMessageExpr *ObjCMessageExpr::Create(ASTContext &Context, QualType T,
+ ExprValueKind VK,
+ SourceLocation LBracLoc,
+ Expr *Receiver,
+ Selector Sel,
+ ArrayRef<SourceLocation> SelLocs,
+ ObjCMethodDecl *Method,
+ ArrayRef<Expr *> Args,
+ SourceLocation RBracLoc,
+ bool isImplicit) {
+ assert((!SelLocs.empty() || isImplicit) &&
+ "No selector locs for non-implicit message");
+ ObjCMessageExpr *Mem;
+ SelectorLocationsKind SelLocsK = SelectorLocationsKind();
+ if (isImplicit)
+ Mem = alloc(Context, Args.size(), 0);
+ else
+ Mem = alloc(Context, Args, RBracLoc, SelLocs, Sel, SelLocsK);
+ return new (Mem) ObjCMessageExpr(T, VK, LBracLoc, Receiver, Sel,
+ SelLocs, SelLocsK, Method, Args, RBracLoc,
+ isImplicit);
+}
+
+ObjCMessageExpr *ObjCMessageExpr::CreateEmpty(ASTContext &Context,
+ unsigned NumArgs,
+ unsigned NumStoredSelLocs) {
+ ObjCMessageExpr *Mem = alloc(Context, NumArgs, NumStoredSelLocs);
+ return new (Mem) ObjCMessageExpr(EmptyShell(), NumArgs);
+}
+
+ObjCMessageExpr *ObjCMessageExpr::alloc(ASTContext &C,
+ ArrayRef<Expr *> Args,
+ SourceLocation RBraceLoc,
+ ArrayRef<SourceLocation> SelLocs,
+ Selector Sel,
+ SelectorLocationsKind &SelLocsK) {
+ SelLocsK = hasStandardSelectorLocs(Sel, SelLocs, Args, RBraceLoc);
+ unsigned NumStoredSelLocs = (SelLocsK == SelLoc_NonStandard) ? SelLocs.size()
+ : 0;
+ return alloc(C, Args.size(), NumStoredSelLocs);
+}
+
+ObjCMessageExpr *ObjCMessageExpr::alloc(ASTContext &C,
+ unsigned NumArgs,
+ unsigned NumStoredSelLocs) {
+ unsigned Size = sizeof(ObjCMessageExpr) + sizeof(void *) +
+ NumArgs * sizeof(Expr *) + NumStoredSelLocs * sizeof(SourceLocation);
+ return (ObjCMessageExpr *)C.Allocate(Size,
+ llvm::AlignOf<ObjCMessageExpr>::Alignment);
+}
+
+void ObjCMessageExpr::getSelectorLocs(
+ SmallVectorImpl<SourceLocation> &SelLocs) const {
+ for (unsigned i = 0, e = getNumSelectorLocs(); i != e; ++i)
+ SelLocs.push_back(getSelectorLoc(i));
+}
+
+SourceRange ObjCMessageExpr::getReceiverRange() const {
+ switch (getReceiverKind()) {
+ case Instance:
+ return getInstanceReceiver()->getSourceRange();
+
+ case Class:
+ return getClassReceiverTypeInfo()->getTypeLoc().getSourceRange();
+
+ case SuperInstance:
+ case SuperClass:
+ return getSuperLoc();
+ }
+
+ llvm_unreachable("Invalid ReceiverKind!");
+}
+
+Selector ObjCMessageExpr::getSelector() const {
+ if (HasMethod)
+ return reinterpret_cast<const ObjCMethodDecl *>(SelectorOrMethod)
+ ->getSelector();
+ return Selector(SelectorOrMethod);
+}
+
+ObjCInterfaceDecl *ObjCMessageExpr::getReceiverInterface() const {
+ switch (getReceiverKind()) {
+ case Instance:
+ if (const ObjCObjectPointerType *Ptr
+ = getInstanceReceiver()->getType()->getAs<ObjCObjectPointerType>())
+ return Ptr->getInterfaceDecl();
+ break;
+
+ case Class:
+ if (const ObjCObjectType *Ty
+ = getClassReceiver()->getAs<ObjCObjectType>())
+ return Ty->getInterface();
+ break;
+
+ case SuperInstance:
+ if (const ObjCObjectPointerType *Ptr
+ = getSuperType()->getAs<ObjCObjectPointerType>())
+ return Ptr->getInterfaceDecl();
+ break;
+
+ case SuperClass:
+ if (const ObjCObjectType *Iface
+ = getSuperType()->getAs<ObjCObjectType>())
+ return Iface->getInterface();
+ break;
+ }
+
+ return 0;
+}
+
+StringRef ObjCBridgedCastExpr::getBridgeKindName() const {
+ switch (getBridgeKind()) {
+ case OBC_Bridge:
+ return "__bridge";
+ case OBC_BridgeTransfer:
+ return "__bridge_transfer";
+ case OBC_BridgeRetained:
+ return "__bridge_retained";
+ }
+
+ llvm_unreachable("Invalid BridgeKind!");
+}
+
+bool ChooseExpr::isConditionTrue(const ASTContext &C) const {
+ return getCond()->EvaluateKnownConstInt(C) != 0;
+}
+
+ShuffleVectorExpr::ShuffleVectorExpr(ASTContext &C, Expr **args, unsigned nexpr,
+ QualType Type, SourceLocation BLoc,
+ SourceLocation RP)
+ : Expr(ShuffleVectorExprClass, Type, VK_RValue, OK_Ordinary,
+ Type->isDependentType(), Type->isDependentType(),
+ Type->isInstantiationDependentType(),
+ Type->containsUnexpandedParameterPack()),
+ BuiltinLoc(BLoc), RParenLoc(RP), NumExprs(nexpr)
+{
+ SubExprs = new (C) Stmt*[nexpr];
+ for (unsigned i = 0; i < nexpr; i++) {
+ if (args[i]->isTypeDependent())
+ ExprBits.TypeDependent = true;
+ if (args[i]->isValueDependent())
+ ExprBits.ValueDependent = true;
+ if (args[i]->isInstantiationDependent())
+ ExprBits.InstantiationDependent = true;
+ if (args[i]->containsUnexpandedParameterPack())
+ ExprBits.ContainsUnexpandedParameterPack = true;
+
+ SubExprs[i] = args[i];
+ }
+}
+
+void ShuffleVectorExpr::setExprs(ASTContext &C, Expr ** Exprs,
+ unsigned NumExprs) {
+ if (SubExprs) C.Deallocate(SubExprs);
+
+ SubExprs = new (C) Stmt* [NumExprs];
+ this->NumExprs = NumExprs;
+ memcpy(SubExprs, Exprs, sizeof(Expr *) * NumExprs);
+}
+
+GenericSelectionExpr::GenericSelectionExpr(ASTContext &Context,
+ SourceLocation GenericLoc, Expr *ControllingExpr,
+ TypeSourceInfo **AssocTypes, Expr **AssocExprs,
+ unsigned NumAssocs, SourceLocation DefaultLoc,
+ SourceLocation RParenLoc,
+ bool ContainsUnexpandedParameterPack,
+ unsigned ResultIndex)
+ : Expr(GenericSelectionExprClass,
+ AssocExprs[ResultIndex]->getType(),
+ AssocExprs[ResultIndex]->getValueKind(),
+ AssocExprs[ResultIndex]->getObjectKind(),
+ AssocExprs[ResultIndex]->isTypeDependent(),
+ AssocExprs[ResultIndex]->isValueDependent(),
+ AssocExprs[ResultIndex]->isInstantiationDependent(),
+ ContainsUnexpandedParameterPack),
+ AssocTypes(new (Context) TypeSourceInfo*[NumAssocs]),
+ SubExprs(new (Context) Stmt*[END_EXPR+NumAssocs]), NumAssocs(NumAssocs),
+ ResultIndex(ResultIndex), GenericLoc(GenericLoc), DefaultLoc(DefaultLoc),
+ RParenLoc(RParenLoc) {
+ SubExprs[CONTROLLING] = ControllingExpr;
+ std::copy(AssocTypes, AssocTypes+NumAssocs, this->AssocTypes);
+ std::copy(AssocExprs, AssocExprs+NumAssocs, SubExprs+END_EXPR);
+}
+
+GenericSelectionExpr::GenericSelectionExpr(ASTContext &Context,
+ SourceLocation GenericLoc, Expr *ControllingExpr,
+ TypeSourceInfo **AssocTypes, Expr **AssocExprs,
+ unsigned NumAssocs, SourceLocation DefaultLoc,
+ SourceLocation RParenLoc,
+ bool ContainsUnexpandedParameterPack)
+ : Expr(GenericSelectionExprClass,
+ Context.DependentTy,
+ VK_RValue,
+ OK_Ordinary,
+ /*isTypeDependent=*/true,
+ /*isValueDependent=*/true,
+ /*isInstantiationDependent=*/true,
+ ContainsUnexpandedParameterPack),
+ AssocTypes(new (Context) TypeSourceInfo*[NumAssocs]),
+ SubExprs(new (Context) Stmt*[END_EXPR+NumAssocs]), NumAssocs(NumAssocs),
+ ResultIndex(-1U), GenericLoc(GenericLoc), DefaultLoc(DefaultLoc),
+ RParenLoc(RParenLoc) {
+ SubExprs[CONTROLLING] = ControllingExpr;
+ std::copy(AssocTypes, AssocTypes+NumAssocs, this->AssocTypes);
+ std::copy(AssocExprs, AssocExprs+NumAssocs, SubExprs+END_EXPR);
+}
+
+//===----------------------------------------------------------------------===//
+// DesignatedInitExpr
+//===----------------------------------------------------------------------===//
+
+IdentifierInfo *DesignatedInitExpr::Designator::getFieldName() const {
+ assert(Kind == FieldDesignator && "Only valid on a field designator");
+ if (Field.NameOrField & 0x01)
+ return reinterpret_cast<IdentifierInfo *>(Field.NameOrField&~0x01);
+ else
+ return getField()->getIdentifier();
+}
+
+DesignatedInitExpr::DesignatedInitExpr(ASTContext &C, QualType Ty,
+ unsigned NumDesignators,
+ const Designator *Designators,
+ SourceLocation EqualOrColonLoc,
+ bool GNUSyntax,
+ Expr **IndexExprs,
+ unsigned NumIndexExprs,
+ Expr *Init)
+ : Expr(DesignatedInitExprClass, Ty,
+ Init->getValueKind(), Init->getObjectKind(),
+ Init->isTypeDependent(), Init->isValueDependent(),
+ Init->isInstantiationDependent(),
+ Init->containsUnexpandedParameterPack()),
+ EqualOrColonLoc(EqualOrColonLoc), GNUSyntax(GNUSyntax),
+ NumDesignators(NumDesignators), NumSubExprs(NumIndexExprs + 1) {
+ this->Designators = new (C) Designator[NumDesignators];
+
+ // Record the initializer itself.
+ child_range Child = children();
+ *Child++ = Init;
+
+ // Copy the designators and their subexpressions, computing
+ // value-dependence along the way.
+ unsigned IndexIdx = 0;
+ for (unsigned I = 0; I != NumDesignators; ++I) {
+ this->Designators[I] = Designators[I];
+
+ if (this->Designators[I].isArrayDesignator()) {
+ // Compute type- and value-dependence.
+ Expr *Index = IndexExprs[IndexIdx];
+ if (Index->isTypeDependent() || Index->isValueDependent())
+ ExprBits.ValueDependent = true;
+ if (Index->isInstantiationDependent())
+ ExprBits.InstantiationDependent = true;
+ // Propagate unexpanded parameter packs.
+ if (Index->containsUnexpandedParameterPack())
+ ExprBits.ContainsUnexpandedParameterPack = true;
+
+ // Copy the index expressions into permanent storage.
+ *Child++ = IndexExprs[IndexIdx++];
+ } else if (this->Designators[I].isArrayRangeDesignator()) {
+ // Compute type- and value-dependence.
+ Expr *Start = IndexExprs[IndexIdx];
+ Expr *End = IndexExprs[IndexIdx + 1];
+ if (Start->isTypeDependent() || Start->isValueDependent() ||
+ End->isTypeDependent() || End->isValueDependent()) {
+ ExprBits.ValueDependent = true;
+ ExprBits.InstantiationDependent = true;
+ } else if (Start->isInstantiationDependent() ||
+ End->isInstantiationDependent()) {
+ ExprBits.InstantiationDependent = true;
+ }
+
+ // Propagate unexpanded parameter packs.
+ if (Start->containsUnexpandedParameterPack() ||
+ End->containsUnexpandedParameterPack())
+ ExprBits.ContainsUnexpandedParameterPack = true;
+
+ // Copy the start/end expressions into permanent storage.
+ *Child++ = IndexExprs[IndexIdx++];
+ *Child++ = IndexExprs[IndexIdx++];
+ }
+ }
+
+ assert(IndexIdx == NumIndexExprs && "Wrong number of index expressions");
+}
+
+DesignatedInitExpr *
+DesignatedInitExpr::Create(ASTContext &C, Designator *Designators,
+ unsigned NumDesignators,
+ Expr **IndexExprs, unsigned NumIndexExprs,
+ SourceLocation ColonOrEqualLoc,
+ bool UsesColonSyntax, Expr *Init) {
+ void *Mem = C.Allocate(sizeof(DesignatedInitExpr) +
+ sizeof(Stmt *) * (NumIndexExprs + 1), 8);
+ return new (Mem) DesignatedInitExpr(C, C.VoidTy, NumDesignators, Designators,
+ ColonOrEqualLoc, UsesColonSyntax,
+ IndexExprs, NumIndexExprs, Init);
+}
+
+DesignatedInitExpr *DesignatedInitExpr::CreateEmpty(ASTContext &C,
+ unsigned NumIndexExprs) {
+ void *Mem = C.Allocate(sizeof(DesignatedInitExpr) +
+ sizeof(Stmt *) * (NumIndexExprs + 1), 8);
+ return new (Mem) DesignatedInitExpr(NumIndexExprs + 1);
+}
+
+void DesignatedInitExpr::setDesignators(ASTContext &C,
+ const Designator *Desigs,
+ unsigned NumDesigs) {
+ Designators = new (C) Designator[NumDesigs];
+ NumDesignators = NumDesigs;
+ for (unsigned I = 0; I != NumDesigs; ++I)
+ Designators[I] = Desigs[I];
+}
+
+SourceRange DesignatedInitExpr::getDesignatorsSourceRange() const {
+ DesignatedInitExpr *DIE = const_cast<DesignatedInitExpr*>(this);
+ if (size() == 1)
+ return DIE->getDesignator(0)->getSourceRange();
+ return SourceRange(DIE->getDesignator(0)->getStartLocation(),
+ DIE->getDesignator(size()-1)->getEndLocation());
+}
+
+SourceRange DesignatedInitExpr::getSourceRange() const {
+ SourceLocation StartLoc;
+ Designator &First =
+ *const_cast<DesignatedInitExpr*>(this)->designators_begin();
+ if (First.isFieldDesignator()) {
+ if (GNUSyntax)
+ StartLoc = SourceLocation::getFromRawEncoding(First.Field.FieldLoc);
+ else
+ StartLoc = SourceLocation::getFromRawEncoding(First.Field.DotLoc);
+ } else
+ StartLoc =
+ SourceLocation::getFromRawEncoding(First.ArrayOrRange.LBracketLoc);
+ return SourceRange(StartLoc, getInit()->getSourceRange().getEnd());
+}
+
+Expr *DesignatedInitExpr::getArrayIndex(const Designator& D) {
+ assert(D.Kind == Designator::ArrayDesignator && "Requires array designator");
+ char* Ptr = static_cast<char*>(static_cast<void *>(this));
+ Ptr += sizeof(DesignatedInitExpr);
+ Stmt **SubExprs = reinterpret_cast<Stmt**>(reinterpret_cast<void**>(Ptr));
+ return cast<Expr>(*(SubExprs + D.ArrayOrRange.Index + 1));
+}
+
+Expr *DesignatedInitExpr::getArrayRangeStart(const Designator& D) {
+ assert(D.Kind == Designator::ArrayRangeDesignator &&
+ "Requires array range designator");
+ char* Ptr = static_cast<char*>(static_cast<void *>(this));
+ Ptr += sizeof(DesignatedInitExpr);
+ Stmt **SubExprs = reinterpret_cast<Stmt**>(reinterpret_cast<void**>(Ptr));
+ return cast<Expr>(*(SubExprs + D.ArrayOrRange.Index + 1));
+}
+
+Expr *DesignatedInitExpr::getArrayRangeEnd(const Designator& D) {
+ assert(D.Kind == Designator::ArrayRangeDesignator &&
+ "Requires array range designator");
+ char* Ptr = static_cast<char*>(static_cast<void *>(this));
+ Ptr += sizeof(DesignatedInitExpr);
+ Stmt **SubExprs = reinterpret_cast<Stmt**>(reinterpret_cast<void**>(Ptr));
+ return cast<Expr>(*(SubExprs + D.ArrayOrRange.Index + 2));
+}
+
+/// \brief Replaces the designator at index @p Idx with the series
+/// of designators in [First, Last).
+void DesignatedInitExpr::ExpandDesignator(ASTContext &C, unsigned Idx,
+ const Designator *First,
+ const Designator *Last) {
+ unsigned NumNewDesignators = Last - First;
+ if (NumNewDesignators == 0) {
+ std::copy_backward(Designators + Idx + 1,
+ Designators + NumDesignators,
+ Designators + Idx);
+ --NumNewDesignators;
+ return;
+ } else if (NumNewDesignators == 1) {
+ Designators[Idx] = *First;
+ return;
+ }
+
+ Designator *NewDesignators
+ = new (C) Designator[NumDesignators - 1 + NumNewDesignators];
+ std::copy(Designators, Designators + Idx, NewDesignators);
+ std::copy(First, Last, NewDesignators + Idx);
+ std::copy(Designators + Idx + 1, Designators + NumDesignators,
+ NewDesignators + Idx + NumNewDesignators);
+ Designators = NewDesignators;
+ NumDesignators = NumDesignators - 1 + NumNewDesignators;
+}
+
+ParenListExpr::ParenListExpr(ASTContext& C, SourceLocation lparenloc,
+ Expr **exprs, unsigned nexprs,
+ SourceLocation rparenloc)
+ : Expr(ParenListExprClass, QualType(), VK_RValue, OK_Ordinary,
+ false, false, false, false),
+ NumExprs(nexprs), LParenLoc(lparenloc), RParenLoc(rparenloc) {
+ Exprs = new (C) Stmt*[nexprs];
+ for (unsigned i = 0; i != nexprs; ++i) {
+ if (exprs[i]->isTypeDependent())
+ ExprBits.TypeDependent = true;
+ if (exprs[i]->isValueDependent())
+ ExprBits.ValueDependent = true;
+ if (exprs[i]->isInstantiationDependent())
+ ExprBits.InstantiationDependent = true;
+ if (exprs[i]->containsUnexpandedParameterPack())
+ ExprBits.ContainsUnexpandedParameterPack = true;
+
+ Exprs[i] = exprs[i];
+ }
+}
+
+const OpaqueValueExpr *OpaqueValueExpr::findInCopyConstruct(const Expr *e) {
+ if (const ExprWithCleanups *ewc = dyn_cast<ExprWithCleanups>(e))
+ e = ewc->getSubExpr();
+ if (const MaterializeTemporaryExpr *m = dyn_cast<MaterializeTemporaryExpr>(e))
+ e = m->GetTemporaryExpr();
+ e = cast<CXXConstructExpr>(e)->getArg(0);
+ while (const ImplicitCastExpr *ice = dyn_cast<ImplicitCastExpr>(e))
+ e = ice->getSubExpr();
+ return cast<OpaqueValueExpr>(e);
+}
+
+PseudoObjectExpr *PseudoObjectExpr::Create(ASTContext &Context, EmptyShell sh,
+ unsigned numSemanticExprs) {
+ void *buffer = Context.Allocate(sizeof(PseudoObjectExpr) +
+ (1 + numSemanticExprs) * sizeof(Expr*),
+ llvm::alignOf<PseudoObjectExpr>());
+ return new(buffer) PseudoObjectExpr(sh, numSemanticExprs);
+}
+
+PseudoObjectExpr::PseudoObjectExpr(EmptyShell shell, unsigned numSemanticExprs)
+ : Expr(PseudoObjectExprClass, shell) {
+ PseudoObjectExprBits.NumSubExprs = numSemanticExprs + 1;
+}
+
+PseudoObjectExpr *PseudoObjectExpr::Create(ASTContext &C, Expr *syntax,
+ ArrayRef<Expr*> semantics,
+ unsigned resultIndex) {
+ assert(syntax && "no syntactic expression!");
+ assert(semantics.size() && "no semantic expressions!");
+
+ QualType type;
+ ExprValueKind VK;
+ if (resultIndex == NoResult) {
+ type = C.VoidTy;
+ VK = VK_RValue;
+ } else {
+ assert(resultIndex < semantics.size());
+ type = semantics[resultIndex]->getType();
+ VK = semantics[resultIndex]->getValueKind();
+ assert(semantics[resultIndex]->getObjectKind() == OK_Ordinary);
+ }
+
+ void *buffer = C.Allocate(sizeof(PseudoObjectExpr) +
+ (1 + semantics.size()) * sizeof(Expr*),
+ llvm::alignOf<PseudoObjectExpr>());
+ return new(buffer) PseudoObjectExpr(type, VK, syntax, semantics,
+ resultIndex);
+}
+
+PseudoObjectExpr::PseudoObjectExpr(QualType type, ExprValueKind VK,
+ Expr *syntax, ArrayRef<Expr*> semantics,
+ unsigned resultIndex)
+ : Expr(PseudoObjectExprClass, type, VK, OK_Ordinary,
+ /*filled in at end of ctor*/ false, false, false, false) {
+ PseudoObjectExprBits.NumSubExprs = semantics.size() + 1;
+ PseudoObjectExprBits.ResultIndex = resultIndex + 1;
+
+ for (unsigned i = 0, e = semantics.size() + 1; i != e; ++i) {
+ Expr *E = (i == 0 ? syntax : semantics[i-1]);
+ getSubExprsBuffer()[i] = E;
+
+ if (E->isTypeDependent())
+ ExprBits.TypeDependent = true;
+ if (E->isValueDependent())
+ ExprBits.ValueDependent = true;
+ if (E->isInstantiationDependent())
+ ExprBits.InstantiationDependent = true;
+ if (E->containsUnexpandedParameterPack())
+ ExprBits.ContainsUnexpandedParameterPack = true;
+
+ if (isa<OpaqueValueExpr>(E))
+ assert(cast<OpaqueValueExpr>(E)->getSourceExpr() != 0 &&
+ "opaque-value semantic expressions for pseudo-object "
+ "operations must have sources");
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// ExprIterator.
+//===----------------------------------------------------------------------===//
+
+Expr* ExprIterator::operator[](size_t idx) { return cast<Expr>(I[idx]); }
+Expr* ExprIterator::operator*() const { return cast<Expr>(*I); }
+Expr* ExprIterator::operator->() const { return cast<Expr>(*I); }
+const Expr* ConstExprIterator::operator[](size_t idx) const {
+ return cast<Expr>(I[idx]);
+}
+const Expr* ConstExprIterator::operator*() const { return cast<Expr>(*I); }
+const Expr* ConstExprIterator::operator->() const { return cast<Expr>(*I); }
+
+//===----------------------------------------------------------------------===//
+// Child Iterators for iterating over subexpressions/substatements
+//===----------------------------------------------------------------------===//
+
+// UnaryExprOrTypeTraitExpr
+Stmt::child_range UnaryExprOrTypeTraitExpr::children() {
+ // If this is of a type and the type is a VLA type (and not a typedef), the
+ // size expression of the VLA needs to be treated as an executable expression.
+ // Why isn't this weirdness documented better in StmtIterator?
+ if (isArgumentType()) {
+ if (const VariableArrayType* T = dyn_cast<VariableArrayType>(
+ getArgumentType().getTypePtr()))
+ return child_range(child_iterator(T), child_iterator());
+ return child_range();
+ }
+ return child_range(&Argument.Ex, &Argument.Ex + 1);
+}
+
+// ObjCMessageExpr
+Stmt::child_range ObjCMessageExpr::children() {
+ Stmt **begin;
+ if (getReceiverKind() == Instance)
+ begin = reinterpret_cast<Stmt **>(this + 1);
+ else
+ begin = reinterpret_cast<Stmt **>(getArgs());
+ return child_range(begin,
+ reinterpret_cast<Stmt **>(getArgs() + getNumArgs()));
+}
+
+ObjCArrayLiteral::ObjCArrayLiteral(llvm::ArrayRef<Expr *> Elements,
+ QualType T, ObjCMethodDecl *Method,
+ SourceRange SR)
+ : Expr(ObjCArrayLiteralClass, T, VK_RValue, OK_Ordinary,
+ false, false, false, false),
+ NumElements(Elements.size()), Range(SR), ArrayWithObjectsMethod(Method)
+{
+ Expr **SaveElements = getElements();
+ for (unsigned I = 0, N = Elements.size(); I != N; ++I) {
+ if (Elements[I]->isTypeDependent() || Elements[I]->isValueDependent())
+ ExprBits.ValueDependent = true;
+ if (Elements[I]->isInstantiationDependent())
+ ExprBits.InstantiationDependent = true;
+ if (Elements[I]->containsUnexpandedParameterPack())
+ ExprBits.ContainsUnexpandedParameterPack = true;
+
+ SaveElements[I] = Elements[I];
+ }
+}
+
+ObjCArrayLiteral *ObjCArrayLiteral::Create(ASTContext &C,
+ llvm::ArrayRef<Expr *> Elements,
+ QualType T, ObjCMethodDecl * Method,
+ SourceRange SR) {
+ void *Mem = C.Allocate(sizeof(ObjCArrayLiteral)
+ + Elements.size() * sizeof(Expr *));
+ return new (Mem) ObjCArrayLiteral(Elements, T, Method, SR);
+}
+
+ObjCArrayLiteral *ObjCArrayLiteral::CreateEmpty(ASTContext &C,
+ unsigned NumElements) {
+
+ void *Mem = C.Allocate(sizeof(ObjCArrayLiteral)
+ + NumElements * sizeof(Expr *));
+ return new (Mem) ObjCArrayLiteral(EmptyShell(), NumElements);
+}
+
+ObjCDictionaryLiteral::ObjCDictionaryLiteral(
+ ArrayRef<ObjCDictionaryElement> VK,
+ bool HasPackExpansions,
+ QualType T, ObjCMethodDecl *method,
+ SourceRange SR)
+ : Expr(ObjCDictionaryLiteralClass, T, VK_RValue, OK_Ordinary, false, false,
+ false, false),
+ NumElements(VK.size()), HasPackExpansions(HasPackExpansions), Range(SR),
+ DictWithObjectsMethod(method)
+{
+ KeyValuePair *KeyValues = getKeyValues();
+ ExpansionData *Expansions = getExpansionData();
+ for (unsigned I = 0; I < NumElements; I++) {
+ if (VK[I].Key->isTypeDependent() || VK[I].Key->isValueDependent() ||
+ VK[I].Value->isTypeDependent() || VK[I].Value->isValueDependent())
+ ExprBits.ValueDependent = true;
+ if (VK[I].Key->isInstantiationDependent() ||
+ VK[I].Value->isInstantiationDependent())
+ ExprBits.InstantiationDependent = true;
+ if (VK[I].EllipsisLoc.isInvalid() &&
+ (VK[I].Key->containsUnexpandedParameterPack() ||
+ VK[I].Value->containsUnexpandedParameterPack()))
+ ExprBits.ContainsUnexpandedParameterPack = true;
+
+ KeyValues[I].Key = VK[I].Key;
+ KeyValues[I].Value = VK[I].Value;
+ if (Expansions) {
+ Expansions[I].EllipsisLoc = VK[I].EllipsisLoc;
+ if (VK[I].NumExpansions)
+ Expansions[I].NumExpansionsPlusOne = *VK[I].NumExpansions + 1;
+ else
+ Expansions[I].NumExpansionsPlusOne = 0;
+ }
+ }
+}
+
+ObjCDictionaryLiteral *
+ObjCDictionaryLiteral::Create(ASTContext &C,
+ ArrayRef<ObjCDictionaryElement> VK,
+ bool HasPackExpansions,
+ QualType T, ObjCMethodDecl *method,
+ SourceRange SR) {
+ unsigned ExpansionsSize = 0;
+ if (HasPackExpansions)
+ ExpansionsSize = sizeof(ExpansionData) * VK.size();
+
+ void *Mem = C.Allocate(sizeof(ObjCDictionaryLiteral) +
+ sizeof(KeyValuePair) * VK.size() + ExpansionsSize);
+ return new (Mem) ObjCDictionaryLiteral(VK, HasPackExpansions, T, method, SR);
+}
+
+ObjCDictionaryLiteral *
+ObjCDictionaryLiteral::CreateEmpty(ASTContext &C, unsigned NumElements,
+ bool HasPackExpansions) {
+ unsigned ExpansionsSize = 0;
+ if (HasPackExpansions)
+ ExpansionsSize = sizeof(ExpansionData) * NumElements;
+ void *Mem = C.Allocate(sizeof(ObjCDictionaryLiteral) +
+ sizeof(KeyValuePair) * NumElements + ExpansionsSize);
+ return new (Mem) ObjCDictionaryLiteral(EmptyShell(), NumElements,
+ HasPackExpansions);
+}
+
+ObjCSubscriptRefExpr *ObjCSubscriptRefExpr::Create(ASTContext &C,
+ Expr *base,
+ Expr *key, QualType T,
+ ObjCMethodDecl *getMethod,
+ ObjCMethodDecl *setMethod,
+ SourceLocation RB) {
+ void *Mem = C.Allocate(sizeof(ObjCSubscriptRefExpr));
+ return new (Mem) ObjCSubscriptRefExpr(base, key, T, VK_LValue,
+ OK_ObjCSubscript,
+ getMethod, setMethod, RB);
+}
+
+AtomicExpr::AtomicExpr(SourceLocation BLoc, Expr **args, unsigned nexpr,
+ QualType t, AtomicOp op, SourceLocation RP)
+ : Expr(AtomicExprClass, t, VK_RValue, OK_Ordinary,
+ false, false, false, false),
+ NumSubExprs(nexpr), BuiltinLoc(BLoc), RParenLoc(RP), Op(op)
+{
+ assert(nexpr == getNumSubExprs(op) && "wrong number of subexpressions");
+ for (unsigned i = 0; i < nexpr; i++) {
+ if (args[i]->isTypeDependent())
+ ExprBits.TypeDependent = true;
+ if (args[i]->isValueDependent())
+ ExprBits.ValueDependent = true;
+ if (args[i]->isInstantiationDependent())
+ ExprBits.InstantiationDependent = true;
+ if (args[i]->containsUnexpandedParameterPack())
+ ExprBits.ContainsUnexpandedParameterPack = true;
+
+ SubExprs[i] = args[i];
+ }
+}
+
+unsigned AtomicExpr::getNumSubExprs(AtomicOp Op) {
+ switch (Op) {
+ case AO__c11_atomic_init:
+ case AO__c11_atomic_load:
+ case AO__atomic_load_n:
+ return 2;
+
+ case AO__c11_atomic_store:
+ case AO__c11_atomic_exchange:
+ case AO__atomic_load:
+ case AO__atomic_store:
+ case AO__atomic_store_n:
+ case AO__atomic_exchange_n:
+ case AO__c11_atomic_fetch_add:
+ case AO__c11_atomic_fetch_sub:
+ case AO__c11_atomic_fetch_and:
+ case AO__c11_atomic_fetch_or:
+ case AO__c11_atomic_fetch_xor:
+ case AO__atomic_fetch_add:
+ case AO__atomic_fetch_sub:
+ case AO__atomic_fetch_and:
+ case AO__atomic_fetch_or:
+ case AO__atomic_fetch_xor:
+ case AO__atomic_fetch_nand:
+ case AO__atomic_add_fetch:
+ case AO__atomic_sub_fetch:
+ case AO__atomic_and_fetch:
+ case AO__atomic_or_fetch:
+ case AO__atomic_xor_fetch:
+ case AO__atomic_nand_fetch:
+ return 3;
+
+ case AO__atomic_exchange:
+ return 4;
+
+ case AO__c11_atomic_compare_exchange_strong:
+ case AO__c11_atomic_compare_exchange_weak:
+ return 5;
+
+ case AO__atomic_compare_exchange:
+ case AO__atomic_compare_exchange_n:
+ return 6;
+ }
+ llvm_unreachable("unknown atomic op");
+}
diff --git a/clang/lib/AST/ExprCXX.cpp b/clang/lib/AST/ExprCXX.cpp
new file mode 100644
index 0000000..8cf519c
--- /dev/null
+++ b/clang/lib/AST/ExprCXX.cpp
@@ -0,0 +1,1335 @@
+//===--- ExprCXX.cpp - (C++) Expression AST Node Implementation -----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the subclesses of Expr class declared in ExprCXX.h
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Basic/IdentifierTable.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/TypeLoc.h"
+using namespace clang;
+
+
+//===----------------------------------------------------------------------===//
+// Child Iterators for iterating over subexpressions/substatements
+//===----------------------------------------------------------------------===//
+
+QualType CXXTypeidExpr::getTypeOperand() const {
+ assert(isTypeOperand() && "Cannot call getTypeOperand for typeid(expr)");
+ return Operand.get<TypeSourceInfo *>()->getType().getNonReferenceType()
+ .getUnqualifiedType();
+}
+
+QualType CXXUuidofExpr::getTypeOperand() const {
+ assert(isTypeOperand() && "Cannot call getTypeOperand for __uuidof(expr)");
+ return Operand.get<TypeSourceInfo *>()->getType().getNonReferenceType()
+ .getUnqualifiedType();
+}
+
+// CXXScalarValueInitExpr
+SourceRange CXXScalarValueInitExpr::getSourceRange() const {
+ SourceLocation Start = RParenLoc;
+ if (TypeInfo)
+ Start = TypeInfo->getTypeLoc().getBeginLoc();
+ return SourceRange(Start, RParenLoc);
+}
+
+// CXXNewExpr
+CXXNewExpr::CXXNewExpr(ASTContext &C, bool globalNew, FunctionDecl *operatorNew,
+ FunctionDecl *operatorDelete,
+ bool usualArrayDeleteWantsSize,
+ Expr **placementArgs, unsigned numPlaceArgs,
+ SourceRange typeIdParens, Expr *arraySize,
+ InitializationStyle initializationStyle,
+ Expr *initializer, QualType ty,
+ TypeSourceInfo *allocatedTypeInfo,
+ SourceLocation startLoc, SourceRange directInitRange)
+ : Expr(CXXNewExprClass, ty, VK_RValue, OK_Ordinary,
+ ty->isDependentType(), ty->isDependentType(),
+ ty->isInstantiationDependentType(),
+ ty->containsUnexpandedParameterPack()),
+ SubExprs(0), OperatorNew(operatorNew), OperatorDelete(operatorDelete),
+ AllocatedTypeInfo(allocatedTypeInfo), TypeIdParens(typeIdParens),
+ StartLoc(startLoc), DirectInitRange(directInitRange),
+ GlobalNew(globalNew), UsualArrayDeleteWantsSize(usualArrayDeleteWantsSize) {
+ assert((initializer != 0 || initializationStyle == NoInit) &&
+ "Only NoInit can have no initializer.");
+ StoredInitializationStyle = initializer ? initializationStyle + 1 : 0;
+ AllocateArgsArray(C, arraySize != 0, numPlaceArgs, initializer != 0);
+ unsigned i = 0;
+ if (Array) {
+ if (arraySize->isInstantiationDependent())
+ ExprBits.InstantiationDependent = true;
+
+ if (arraySize->containsUnexpandedParameterPack())
+ ExprBits.ContainsUnexpandedParameterPack = true;
+
+ SubExprs[i++] = arraySize;
+ }
+
+ if (initializer) {
+ if (initializer->isInstantiationDependent())
+ ExprBits.InstantiationDependent = true;
+
+ if (initializer->containsUnexpandedParameterPack())
+ ExprBits.ContainsUnexpandedParameterPack = true;
+
+ SubExprs[i++] = initializer;
+ }
+
+ for (unsigned j = 0; j < NumPlacementArgs; ++j) {
+ if (placementArgs[j]->isInstantiationDependent())
+ ExprBits.InstantiationDependent = true;
+ if (placementArgs[j]->containsUnexpandedParameterPack())
+ ExprBits.ContainsUnexpandedParameterPack = true;
+
+ SubExprs[i++] = placementArgs[j];
+ }
+}
+
+void CXXNewExpr::AllocateArgsArray(ASTContext &C, bool isArray,
+ unsigned numPlaceArgs, bool hasInitializer){
+ assert(SubExprs == 0 && "SubExprs already allocated");
+ Array = isArray;
+ NumPlacementArgs = numPlaceArgs;
+
+ unsigned TotalSize = Array + hasInitializer + NumPlacementArgs;
+ SubExprs = new (C) Stmt*[TotalSize];
+}
+
+bool CXXNewExpr::shouldNullCheckAllocation(ASTContext &Ctx) const {
+ return getOperatorNew()->getType()->
+ castAs<FunctionProtoType>()->isNothrow(Ctx);
+}
+
+SourceLocation CXXNewExpr::getEndLoc() const {
+ switch (getInitializationStyle()) {
+ case NoInit:
+ return AllocatedTypeInfo->getTypeLoc().getEndLoc();
+ case CallInit:
+ return DirectInitRange.getEnd();
+ case ListInit:
+ return getInitializer()->getSourceRange().getEnd();
+ }
+ llvm_unreachable("bogus initialization style");
+}
+
+// CXXDeleteExpr
+QualType CXXDeleteExpr::getDestroyedType() const {
+ const Expr *Arg = getArgument();
+ while (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Arg)) {
+ if (ICE->getCastKind() != CK_UserDefinedConversion &&
+ ICE->getType()->isVoidPointerType())
+ Arg = ICE->getSubExpr();
+ else
+ break;
+ }
+ // The type-to-delete may not be a pointer if it's a dependent type.
+ const QualType ArgType = Arg->getType();
+
+ if (ArgType->isDependentType() && !ArgType->isPointerType())
+ return QualType();
+
+ return ArgType->getAs<PointerType>()->getPointeeType();
+}
+
+// CXXPseudoDestructorExpr
+PseudoDestructorTypeStorage::PseudoDestructorTypeStorage(TypeSourceInfo *Info)
+ : Type(Info)
+{
+ Location = Info->getTypeLoc().getLocalSourceRange().getBegin();
+}
+
+CXXPseudoDestructorExpr::CXXPseudoDestructorExpr(ASTContext &Context,
+ Expr *Base, bool isArrow, SourceLocation OperatorLoc,
+ NestedNameSpecifierLoc QualifierLoc, TypeSourceInfo *ScopeType,
+ SourceLocation ColonColonLoc, SourceLocation TildeLoc,
+ PseudoDestructorTypeStorage DestroyedType)
+ : Expr(CXXPseudoDestructorExprClass,
+ Context.getPointerType(Context.getFunctionType(Context.VoidTy, 0, 0,
+ FunctionProtoType::ExtProtoInfo())),
+ VK_RValue, OK_Ordinary,
+ /*isTypeDependent=*/(Base->isTypeDependent() ||
+ (DestroyedType.getTypeSourceInfo() &&
+ DestroyedType.getTypeSourceInfo()->getType()->isDependentType())),
+ /*isValueDependent=*/Base->isValueDependent(),
+ (Base->isInstantiationDependent() ||
+ (QualifierLoc &&
+ QualifierLoc.getNestedNameSpecifier()->isInstantiationDependent()) ||
+ (ScopeType &&
+ ScopeType->getType()->isInstantiationDependentType()) ||
+ (DestroyedType.getTypeSourceInfo() &&
+ DestroyedType.getTypeSourceInfo()->getType()
+ ->isInstantiationDependentType())),
+ // ContainsUnexpandedParameterPack
+ (Base->containsUnexpandedParameterPack() ||
+ (QualifierLoc &&
+ QualifierLoc.getNestedNameSpecifier()
+ ->containsUnexpandedParameterPack()) ||
+ (ScopeType &&
+ ScopeType->getType()->containsUnexpandedParameterPack()) ||
+ (DestroyedType.getTypeSourceInfo() &&
+ DestroyedType.getTypeSourceInfo()->getType()
+ ->containsUnexpandedParameterPack()))),
+ Base(static_cast<Stmt *>(Base)), IsArrow(isArrow),
+ OperatorLoc(OperatorLoc), QualifierLoc(QualifierLoc),
+ ScopeType(ScopeType), ColonColonLoc(ColonColonLoc), TildeLoc(TildeLoc),
+ DestroyedType(DestroyedType) { }
+
+QualType CXXPseudoDestructorExpr::getDestroyedType() const {
+ if (TypeSourceInfo *TInfo = DestroyedType.getTypeSourceInfo())
+ return TInfo->getType();
+
+ return QualType();
+}
+
+SourceRange CXXPseudoDestructorExpr::getSourceRange() const {
+ SourceLocation End = DestroyedType.getLocation();
+ if (TypeSourceInfo *TInfo = DestroyedType.getTypeSourceInfo())
+ End = TInfo->getTypeLoc().getLocalSourceRange().getEnd();
+ return SourceRange(Base->getLocStart(), End);
+}
+
+// UnresolvedLookupExpr
+UnresolvedLookupExpr *
+UnresolvedLookupExpr::Create(ASTContext &C,
+ CXXRecordDecl *NamingClass,
+ NestedNameSpecifierLoc QualifierLoc,
+ SourceLocation TemplateKWLoc,
+ const DeclarationNameInfo &NameInfo,
+ bool ADL,
+ const TemplateArgumentListInfo *Args,
+ UnresolvedSetIterator Begin,
+ UnresolvedSetIterator End)
+{
+ assert(Args || TemplateKWLoc.isValid());
+ unsigned num_args = Args ? Args->size() : 0;
+ void *Mem = C.Allocate(sizeof(UnresolvedLookupExpr) +
+ ASTTemplateKWAndArgsInfo::sizeFor(num_args));
+ return new (Mem) UnresolvedLookupExpr(C, NamingClass, QualifierLoc,
+ TemplateKWLoc, NameInfo,
+ ADL, /*Overload*/ true, Args,
+ Begin, End, /*StdIsAssociated=*/false);
+}
+
+UnresolvedLookupExpr *
+UnresolvedLookupExpr::CreateEmpty(ASTContext &C,
+ bool HasTemplateKWAndArgsInfo,
+ unsigned NumTemplateArgs) {
+ std::size_t size = sizeof(UnresolvedLookupExpr);
+ if (HasTemplateKWAndArgsInfo)
+ size += ASTTemplateKWAndArgsInfo::sizeFor(NumTemplateArgs);
+
+ void *Mem = C.Allocate(size, llvm::alignOf<UnresolvedLookupExpr>());
+ UnresolvedLookupExpr *E = new (Mem) UnresolvedLookupExpr(EmptyShell());
+ E->HasTemplateKWAndArgsInfo = HasTemplateKWAndArgsInfo;
+ return E;
+}
+
+OverloadExpr::OverloadExpr(StmtClass K, ASTContext &C,
+ NestedNameSpecifierLoc QualifierLoc,
+ SourceLocation TemplateKWLoc,
+ const DeclarationNameInfo &NameInfo,
+ const TemplateArgumentListInfo *TemplateArgs,
+ UnresolvedSetIterator Begin,
+ UnresolvedSetIterator End,
+ bool KnownDependent,
+ bool KnownInstantiationDependent,
+ bool KnownContainsUnexpandedParameterPack)
+ : Expr(K, C.OverloadTy, VK_LValue, OK_Ordinary, KnownDependent,
+ KnownDependent,
+ (KnownInstantiationDependent ||
+ NameInfo.isInstantiationDependent() ||
+ (QualifierLoc &&
+ QualifierLoc.getNestedNameSpecifier()->isInstantiationDependent())),
+ (KnownContainsUnexpandedParameterPack ||
+ NameInfo.containsUnexpandedParameterPack() ||
+ (QualifierLoc &&
+ QualifierLoc.getNestedNameSpecifier()
+ ->containsUnexpandedParameterPack()))),
+ NameInfo(NameInfo), QualifierLoc(QualifierLoc),
+ Results(0), NumResults(End - Begin),
+ HasTemplateKWAndArgsInfo(TemplateArgs != 0 || TemplateKWLoc.isValid())
+{
+ NumResults = End - Begin;
+ if (NumResults) {
+ // Determine whether this expression is type-dependent.
+ for (UnresolvedSetImpl::const_iterator I = Begin; I != End; ++I) {
+ if ((*I)->getDeclContext()->isDependentContext() ||
+ isa<UnresolvedUsingValueDecl>(*I)) {
+ ExprBits.TypeDependent = true;
+ ExprBits.ValueDependent = true;
+ }
+ }
+
+ Results = static_cast<DeclAccessPair *>(
+ C.Allocate(sizeof(DeclAccessPair) * NumResults,
+ llvm::alignOf<DeclAccessPair>()));
+ memcpy(Results, &*Begin.getIterator(),
+ NumResults * sizeof(DeclAccessPair));
+ }
+
+ // If we have explicit template arguments, check for dependent
+ // template arguments and whether they contain any unexpanded pack
+ // expansions.
+ if (TemplateArgs) {
+ bool Dependent = false;
+ bool InstantiationDependent = false;
+ bool ContainsUnexpandedParameterPack = false;
+ getTemplateKWAndArgsInfo()->initializeFrom(TemplateKWLoc, *TemplateArgs,
+ Dependent,
+ InstantiationDependent,
+ ContainsUnexpandedParameterPack);
+
+ if (Dependent) {
+ ExprBits.TypeDependent = true;
+ ExprBits.ValueDependent = true;
+ }
+ if (InstantiationDependent)
+ ExprBits.InstantiationDependent = true;
+ if (ContainsUnexpandedParameterPack)
+ ExprBits.ContainsUnexpandedParameterPack = true;
+ } else if (TemplateKWLoc.isValid()) {
+ getTemplateKWAndArgsInfo()->initializeFrom(TemplateKWLoc);
+ }
+
+ if (isTypeDependent())
+ setType(C.DependentTy);
+}
+
+void OverloadExpr::initializeResults(ASTContext &C,
+ UnresolvedSetIterator Begin,
+ UnresolvedSetIterator End) {
+ assert(Results == 0 && "Results already initialized!");
+ NumResults = End - Begin;
+ if (NumResults) {
+ Results = static_cast<DeclAccessPair *>(
+ C.Allocate(sizeof(DeclAccessPair) * NumResults,
+
+ llvm::alignOf<DeclAccessPair>()));
+ memcpy(Results, &*Begin.getIterator(),
+ NumResults * sizeof(DeclAccessPair));
+ }
+}
+
+CXXRecordDecl *OverloadExpr::getNamingClass() const {
+ if (isa<UnresolvedLookupExpr>(this))
+ return cast<UnresolvedLookupExpr>(this)->getNamingClass();
+ else
+ return cast<UnresolvedMemberExpr>(this)->getNamingClass();
+}
+
+// DependentScopeDeclRefExpr
+DependentScopeDeclRefExpr::DependentScopeDeclRefExpr(QualType T,
+ NestedNameSpecifierLoc QualifierLoc,
+ SourceLocation TemplateKWLoc,
+ const DeclarationNameInfo &NameInfo,
+ const TemplateArgumentListInfo *Args)
+ : Expr(DependentScopeDeclRefExprClass, T, VK_LValue, OK_Ordinary,
+ true, true,
+ (NameInfo.isInstantiationDependent() ||
+ (QualifierLoc &&
+ QualifierLoc.getNestedNameSpecifier()->isInstantiationDependent())),
+ (NameInfo.containsUnexpandedParameterPack() ||
+ (QualifierLoc &&
+ QualifierLoc.getNestedNameSpecifier()
+ ->containsUnexpandedParameterPack()))),
+ QualifierLoc(QualifierLoc), NameInfo(NameInfo),
+ HasTemplateKWAndArgsInfo(Args != 0 || TemplateKWLoc.isValid())
+{
+ if (Args) {
+ bool Dependent = true;
+ bool InstantiationDependent = true;
+ bool ContainsUnexpandedParameterPack
+ = ExprBits.ContainsUnexpandedParameterPack;
+ getTemplateKWAndArgsInfo()->initializeFrom(TemplateKWLoc, *Args,
+ Dependent,
+ InstantiationDependent,
+ ContainsUnexpandedParameterPack);
+ ExprBits.ContainsUnexpandedParameterPack = ContainsUnexpandedParameterPack;
+ } else if (TemplateKWLoc.isValid()) {
+ getTemplateKWAndArgsInfo()->initializeFrom(TemplateKWLoc);
+ }
+}
+
+DependentScopeDeclRefExpr *
+DependentScopeDeclRefExpr::Create(ASTContext &C,
+ NestedNameSpecifierLoc QualifierLoc,
+ SourceLocation TemplateKWLoc,
+ const DeclarationNameInfo &NameInfo,
+ const TemplateArgumentListInfo *Args) {
+ std::size_t size = sizeof(DependentScopeDeclRefExpr);
+ if (Args)
+ size += ASTTemplateKWAndArgsInfo::sizeFor(Args->size());
+ else if (TemplateKWLoc.isValid())
+ size += ASTTemplateKWAndArgsInfo::sizeFor(0);
+ void *Mem = C.Allocate(size);
+ return new (Mem) DependentScopeDeclRefExpr(C.DependentTy, QualifierLoc,
+ TemplateKWLoc, NameInfo, Args);
+}
+
+DependentScopeDeclRefExpr *
+DependentScopeDeclRefExpr::CreateEmpty(ASTContext &C,
+ bool HasTemplateKWAndArgsInfo,
+ unsigned NumTemplateArgs) {
+ std::size_t size = sizeof(DependentScopeDeclRefExpr);
+ if (HasTemplateKWAndArgsInfo)
+ size += ASTTemplateKWAndArgsInfo::sizeFor(NumTemplateArgs);
+ void *Mem = C.Allocate(size);
+ DependentScopeDeclRefExpr *E
+ = new (Mem) DependentScopeDeclRefExpr(QualType(), NestedNameSpecifierLoc(),
+ SourceLocation(),
+ DeclarationNameInfo(), 0);
+ E->HasTemplateKWAndArgsInfo = HasTemplateKWAndArgsInfo;
+ return E;
+}
+
+SourceRange CXXConstructExpr::getSourceRange() const {
+ if (isa<CXXTemporaryObjectExpr>(this))
+ return cast<CXXTemporaryObjectExpr>(this)->getSourceRange();
+
+ if (ParenRange.isValid())
+ return SourceRange(Loc, ParenRange.getEnd());
+
+ SourceLocation End = Loc;
+ for (unsigned I = getNumArgs(); I > 0; --I) {
+ const Expr *Arg = getArg(I-1);
+ if (!Arg->isDefaultArgument()) {
+ SourceLocation NewEnd = Arg->getLocEnd();
+ if (NewEnd.isValid()) {
+ End = NewEnd;
+ break;
+ }
+ }
+ }
+
+ return SourceRange(Loc, End);
+}
+
+SourceRange CXXOperatorCallExpr::getSourceRange() const {
+ OverloadedOperatorKind Kind = getOperator();
+ if (Kind == OO_PlusPlus || Kind == OO_MinusMinus) {
+ if (getNumArgs() == 1)
+ // Prefix operator
+ return SourceRange(getOperatorLoc(),
+ getArg(0)->getSourceRange().getEnd());
+ else
+ // Postfix operator
+ return SourceRange(getArg(0)->getSourceRange().getBegin(),
+ getOperatorLoc());
+ } else if (Kind == OO_Arrow) {
+ return getArg(0)->getSourceRange();
+ } else if (Kind == OO_Call) {
+ return SourceRange(getArg(0)->getSourceRange().getBegin(), getRParenLoc());
+ } else if (Kind == OO_Subscript) {
+ return SourceRange(getArg(0)->getSourceRange().getBegin(), getRParenLoc());
+ } else if (getNumArgs() == 1) {
+ return SourceRange(getOperatorLoc(), getArg(0)->getSourceRange().getEnd());
+ } else if (getNumArgs() == 2) {
+ return SourceRange(getArg(0)->getSourceRange().getBegin(),
+ getArg(1)->getSourceRange().getEnd());
+ } else {
+ return SourceRange();
+ }
+}
+
+Expr *CXXMemberCallExpr::getImplicitObjectArgument() const {
+ if (const MemberExpr *MemExpr =
+ dyn_cast<MemberExpr>(getCallee()->IgnoreParens()))
+ return MemExpr->getBase();
+
+ // FIXME: Will eventually need to cope with member pointers.
+ return 0;
+}
+
+CXXMethodDecl *CXXMemberCallExpr::getMethodDecl() const {
+ if (const MemberExpr *MemExpr =
+ dyn_cast<MemberExpr>(getCallee()->IgnoreParens()))
+ return cast<CXXMethodDecl>(MemExpr->getMemberDecl());
+
+ // FIXME: Will eventually need to cope with member pointers.
+ return 0;
+}
+
+
+CXXRecordDecl *CXXMemberCallExpr::getRecordDecl() {
+ Expr* ThisArg = getImplicitObjectArgument();
+ if (!ThisArg)
+ return 0;
+
+ if (ThisArg->getType()->isAnyPointerType())
+ return ThisArg->getType()->getPointeeType()->getAsCXXRecordDecl();
+
+ return ThisArg->getType()->getAsCXXRecordDecl();
+}
+
+
+//===----------------------------------------------------------------------===//
+// Named casts
+//===----------------------------------------------------------------------===//
+
+/// getCastName - Get the name of the C++ cast being used, e.g.,
+/// "static_cast", "dynamic_cast", "reinterpret_cast", or
+/// "const_cast". The returned pointer must not be freed.
+const char *CXXNamedCastExpr::getCastName() const {
+ switch (getStmtClass()) {
+ case CXXStaticCastExprClass: return "static_cast";
+ case CXXDynamicCastExprClass: return "dynamic_cast";
+ case CXXReinterpretCastExprClass: return "reinterpret_cast";
+ case CXXConstCastExprClass: return "const_cast";
+ default: return "<invalid cast>";
+ }
+}
+
+CXXStaticCastExpr *CXXStaticCastExpr::Create(ASTContext &C, QualType T,
+ ExprValueKind VK,
+ CastKind K, Expr *Op,
+ const CXXCastPath *BasePath,
+ TypeSourceInfo *WrittenTy,
+ SourceLocation L,
+ SourceLocation RParenLoc) {
+ unsigned PathSize = (BasePath ? BasePath->size() : 0);
+ void *Buffer = C.Allocate(sizeof(CXXStaticCastExpr)
+ + PathSize * sizeof(CXXBaseSpecifier*));
+ CXXStaticCastExpr *E =
+ new (Buffer) CXXStaticCastExpr(T, VK, K, Op, PathSize, WrittenTy, L,
+ RParenLoc);
+ if (PathSize) E->setCastPath(*BasePath);
+ return E;
+}
+
+CXXStaticCastExpr *CXXStaticCastExpr::CreateEmpty(ASTContext &C,
+ unsigned PathSize) {
+ void *Buffer =
+ C.Allocate(sizeof(CXXStaticCastExpr) + PathSize * sizeof(CXXBaseSpecifier*));
+ return new (Buffer) CXXStaticCastExpr(EmptyShell(), PathSize);
+}
+
+CXXDynamicCastExpr *CXXDynamicCastExpr::Create(ASTContext &C, QualType T,
+ ExprValueKind VK,
+ CastKind K, Expr *Op,
+ const CXXCastPath *BasePath,
+ TypeSourceInfo *WrittenTy,
+ SourceLocation L,
+ SourceLocation RParenLoc) {
+ unsigned PathSize = (BasePath ? BasePath->size() : 0);
+ void *Buffer = C.Allocate(sizeof(CXXDynamicCastExpr)
+ + PathSize * sizeof(CXXBaseSpecifier*));
+ CXXDynamicCastExpr *E =
+ new (Buffer) CXXDynamicCastExpr(T, VK, K, Op, PathSize, WrittenTy, L,
+ RParenLoc);
+ if (PathSize) E->setCastPath(*BasePath);
+ return E;
+}
+
+CXXDynamicCastExpr *CXXDynamicCastExpr::CreateEmpty(ASTContext &C,
+ unsigned PathSize) {
+ void *Buffer =
+ C.Allocate(sizeof(CXXDynamicCastExpr) + PathSize * sizeof(CXXBaseSpecifier*));
+ return new (Buffer) CXXDynamicCastExpr(EmptyShell(), PathSize);
+}
+
+/// isAlwaysNull - Return whether the result of the dynamic_cast is proven
+/// to always be null. For example:
+///
+/// struct A { };
+/// struct B final : A { };
+/// struct C { };
+///
+/// C *f(B* b) { return dynamic_cast<C*>(b); }
+bool CXXDynamicCastExpr::isAlwaysNull() const
+{
+ QualType SrcType = getSubExpr()->getType();
+ QualType DestType = getType();
+
+ if (const PointerType *SrcPTy = SrcType->getAs<PointerType>()) {
+ SrcType = SrcPTy->getPointeeType();
+ DestType = DestType->castAs<PointerType>()->getPointeeType();
+ }
+
+ const CXXRecordDecl *SrcRD =
+ cast<CXXRecordDecl>(SrcType->castAs<RecordType>()->getDecl());
+
+ if (!SrcRD->hasAttr<FinalAttr>())
+ return false;
+
+ const CXXRecordDecl *DestRD =
+ cast<CXXRecordDecl>(DestType->castAs<RecordType>()->getDecl());
+
+ return !DestRD->isDerivedFrom(SrcRD);
+}
+
+CXXReinterpretCastExpr *
+CXXReinterpretCastExpr::Create(ASTContext &C, QualType T, ExprValueKind VK,
+ CastKind K, Expr *Op,
+ const CXXCastPath *BasePath,
+ TypeSourceInfo *WrittenTy, SourceLocation L,
+ SourceLocation RParenLoc) {
+ unsigned PathSize = (BasePath ? BasePath->size() : 0);
+ void *Buffer =
+ C.Allocate(sizeof(CXXReinterpretCastExpr) + PathSize * sizeof(CXXBaseSpecifier*));
+ CXXReinterpretCastExpr *E =
+ new (Buffer) CXXReinterpretCastExpr(T, VK, K, Op, PathSize, WrittenTy, L,
+ RParenLoc);
+ if (PathSize) E->setCastPath(*BasePath);
+ return E;
+}
+
+CXXReinterpretCastExpr *
+CXXReinterpretCastExpr::CreateEmpty(ASTContext &C, unsigned PathSize) {
+ void *Buffer = C.Allocate(sizeof(CXXReinterpretCastExpr)
+ + PathSize * sizeof(CXXBaseSpecifier*));
+ return new (Buffer) CXXReinterpretCastExpr(EmptyShell(), PathSize);
+}
+
+CXXConstCastExpr *CXXConstCastExpr::Create(ASTContext &C, QualType T,
+ ExprValueKind VK, Expr *Op,
+ TypeSourceInfo *WrittenTy,
+ SourceLocation L,
+ SourceLocation RParenLoc) {
+ return new (C) CXXConstCastExpr(T, VK, Op, WrittenTy, L, RParenLoc);
+}
+
+CXXConstCastExpr *CXXConstCastExpr::CreateEmpty(ASTContext &C) {
+ return new (C) CXXConstCastExpr(EmptyShell());
+}
+
+CXXFunctionalCastExpr *
+CXXFunctionalCastExpr::Create(ASTContext &C, QualType T, ExprValueKind VK,
+ TypeSourceInfo *Written, SourceLocation L,
+ CastKind K, Expr *Op, const CXXCastPath *BasePath,
+ SourceLocation R) {
+ unsigned PathSize = (BasePath ? BasePath->size() : 0);
+ void *Buffer = C.Allocate(sizeof(CXXFunctionalCastExpr)
+ + PathSize * sizeof(CXXBaseSpecifier*));
+ CXXFunctionalCastExpr *E =
+ new (Buffer) CXXFunctionalCastExpr(T, VK, Written, L, K, Op, PathSize, R);
+ if (PathSize) E->setCastPath(*BasePath);
+ return E;
+}
+
+CXXFunctionalCastExpr *
+CXXFunctionalCastExpr::CreateEmpty(ASTContext &C, unsigned PathSize) {
+ void *Buffer = C.Allocate(sizeof(CXXFunctionalCastExpr)
+ + PathSize * sizeof(CXXBaseSpecifier*));
+ return new (Buffer) CXXFunctionalCastExpr(EmptyShell(), PathSize);
+}
+
+UserDefinedLiteral::LiteralOperatorKind
+UserDefinedLiteral::getLiteralOperatorKind() const {
+ if (getNumArgs() == 0)
+ return LOK_Template;
+ if (getNumArgs() == 2)
+ return LOK_String;
+
+ assert(getNumArgs() == 1 && "unexpected #args in literal operator call");
+ QualType ParamTy =
+ cast<FunctionDecl>(getCalleeDecl())->getParamDecl(0)->getType();
+ if (ParamTy->isPointerType())
+ return LOK_Raw;
+ if (ParamTy->isAnyCharacterType())
+ return LOK_Character;
+ if (ParamTy->isIntegerType())
+ return LOK_Integer;
+ if (ParamTy->isFloatingType())
+ return LOK_Floating;
+
+ llvm_unreachable("unknown kind of literal operator");
+}
+
+Expr *UserDefinedLiteral::getCookedLiteral() {
+#ifndef NDEBUG
+ LiteralOperatorKind LOK = getLiteralOperatorKind();
+ assert(LOK != LOK_Template && LOK != LOK_Raw && "not a cooked literal");
+#endif
+ return getArg(0);
+}
+
+const IdentifierInfo *UserDefinedLiteral::getUDSuffix() const {
+ return cast<FunctionDecl>(getCalleeDecl())->getLiteralIdentifier();
+}
+
+CXXDefaultArgExpr *
+CXXDefaultArgExpr::Create(ASTContext &C, SourceLocation Loc,
+ ParmVarDecl *Param, Expr *SubExpr) {
+ void *Mem = C.Allocate(sizeof(CXXDefaultArgExpr) + sizeof(Stmt *));
+ return new (Mem) CXXDefaultArgExpr(CXXDefaultArgExprClass, Loc, Param,
+ SubExpr);
+}
+
+CXXTemporary *CXXTemporary::Create(ASTContext &C,
+ const CXXDestructorDecl *Destructor) {
+ return new (C) CXXTemporary(Destructor);
+}
+
+CXXBindTemporaryExpr *CXXBindTemporaryExpr::Create(ASTContext &C,
+ CXXTemporary *Temp,
+ Expr* SubExpr) {
+ assert((SubExpr->getType()->isRecordType() ||
+ SubExpr->getType()->isArrayType()) &&
+ "Expression bound to a temporary must have record or array type!");
+
+ return new (C) CXXBindTemporaryExpr(Temp, SubExpr);
+}
+
+CXXTemporaryObjectExpr::CXXTemporaryObjectExpr(ASTContext &C,
+ CXXConstructorDecl *Cons,
+ TypeSourceInfo *Type,
+ Expr **Args,
+ unsigned NumArgs,
+ SourceRange parenRange,
+ bool HadMultipleCandidates,
+ bool ZeroInitialization)
+ : CXXConstructExpr(C, CXXTemporaryObjectExprClass,
+ Type->getType().getNonReferenceType(),
+ Type->getTypeLoc().getBeginLoc(),
+ Cons, false, Args, NumArgs,
+ HadMultipleCandidates, /*FIXME*/false, ZeroInitialization,
+ CXXConstructExpr::CK_Complete, parenRange),
+ Type(Type) {
+}
+
+SourceRange CXXTemporaryObjectExpr::getSourceRange() const {
+ return SourceRange(Type->getTypeLoc().getBeginLoc(),
+ getParenRange().getEnd());
+}
+
+CXXConstructExpr *CXXConstructExpr::Create(ASTContext &C, QualType T,
+ SourceLocation Loc,
+ CXXConstructorDecl *D, bool Elidable,
+ Expr **Args, unsigned NumArgs,
+ bool HadMultipleCandidates,
+ bool ListInitialization,
+ bool ZeroInitialization,
+ ConstructionKind ConstructKind,
+ SourceRange ParenRange) {
+ return new (C) CXXConstructExpr(C, CXXConstructExprClass, T, Loc, D,
+ Elidable, Args, NumArgs,
+ HadMultipleCandidates, ListInitialization,
+ ZeroInitialization, ConstructKind,
+ ParenRange);
+}
+
+CXXConstructExpr::CXXConstructExpr(ASTContext &C, StmtClass SC, QualType T,
+ SourceLocation Loc,
+ CXXConstructorDecl *D, bool elidable,
+ Expr **args, unsigned numargs,
+ bool HadMultipleCandidates,
+ bool ListInitialization,
+ bool ZeroInitialization,
+ ConstructionKind ConstructKind,
+ SourceRange ParenRange)
+ : Expr(SC, T, VK_RValue, OK_Ordinary,
+ T->isDependentType(), T->isDependentType(),
+ T->isInstantiationDependentType(),
+ T->containsUnexpandedParameterPack()),
+ Constructor(D), Loc(Loc), ParenRange(ParenRange), NumArgs(numargs),
+ Elidable(elidable), HadMultipleCandidates(HadMultipleCandidates),
+ ListInitialization(ListInitialization),
+ ZeroInitialization(ZeroInitialization),
+ ConstructKind(ConstructKind), Args(0)
+{
+ if (NumArgs) {
+ Args = new (C) Stmt*[NumArgs];
+
+ for (unsigned i = 0; i != NumArgs; ++i) {
+ assert(args[i] && "NULL argument in CXXConstructExpr");
+
+ if (args[i]->isValueDependent())
+ ExprBits.ValueDependent = true;
+ if (args[i]->isInstantiationDependent())
+ ExprBits.InstantiationDependent = true;
+ if (args[i]->containsUnexpandedParameterPack())
+ ExprBits.ContainsUnexpandedParameterPack = true;
+
+ Args[i] = args[i];
+ }
+ }
+}
+
+LambdaExpr::Capture::Capture(SourceLocation Loc, bool Implicit,
+ LambdaCaptureKind Kind, VarDecl *Var,
+ SourceLocation EllipsisLoc)
+ : VarAndBits(Var, 0), Loc(Loc), EllipsisLoc(EllipsisLoc)
+{
+ unsigned Bits = 0;
+ if (Implicit)
+ Bits |= Capture_Implicit;
+
+ switch (Kind) {
+ case LCK_This:
+ assert(Var == 0 && "'this' capture cannot have a variable!");
+ break;
+
+ case LCK_ByCopy:
+ Bits |= Capture_ByCopy;
+ // Fall through
+ case LCK_ByRef:
+ assert(Var && "capture must have a variable!");
+ break;
+ }
+ VarAndBits.setInt(Bits);
+}
+
+LambdaCaptureKind LambdaExpr::Capture::getCaptureKind() const {
+ if (capturesThis())
+ return LCK_This;
+
+ return (VarAndBits.getInt() & Capture_ByCopy)? LCK_ByCopy : LCK_ByRef;
+}
+
+LambdaExpr::LambdaExpr(QualType T,
+ SourceRange IntroducerRange,
+ LambdaCaptureDefault CaptureDefault,
+ ArrayRef<Capture> Captures,
+ bool ExplicitParams,
+ bool ExplicitResultType,
+ ArrayRef<Expr *> CaptureInits,
+ ArrayRef<VarDecl *> ArrayIndexVars,
+ ArrayRef<unsigned> ArrayIndexStarts,
+ SourceLocation ClosingBrace)
+ : Expr(LambdaExprClass, T, VK_RValue, OK_Ordinary,
+ T->isDependentType(), T->isDependentType(), T->isDependentType(),
+ /*ContainsUnexpandedParameterPack=*/false),
+ IntroducerRange(IntroducerRange),
+ NumCaptures(Captures.size()),
+ CaptureDefault(CaptureDefault),
+ ExplicitParams(ExplicitParams),
+ ExplicitResultType(ExplicitResultType),
+ ClosingBrace(ClosingBrace)
+{
+ assert(CaptureInits.size() == Captures.size() && "Wrong number of arguments");
+ CXXRecordDecl *Class = getLambdaClass();
+ CXXRecordDecl::LambdaDefinitionData &Data = Class->getLambdaData();
+
+ // FIXME: Propagate "has unexpanded parameter pack" bit.
+
+ // Copy captures.
+ ASTContext &Context = Class->getASTContext();
+ Data.NumCaptures = NumCaptures;
+ Data.NumExplicitCaptures = 0;
+ Data.Captures = (Capture *)Context.Allocate(sizeof(Capture) * NumCaptures);
+ Capture *ToCapture = Data.Captures;
+ for (unsigned I = 0, N = Captures.size(); I != N; ++I) {
+ if (Captures[I].isExplicit())
+ ++Data.NumExplicitCaptures;
+
+ *ToCapture++ = Captures[I];
+ }
+
+ // Copy initialization expressions for the non-static data members.
+ Stmt **Stored = getStoredStmts();
+ for (unsigned I = 0, N = CaptureInits.size(); I != N; ++I)
+ *Stored++ = CaptureInits[I];
+
+ // Copy the body of the lambda.
+ *Stored++ = getCallOperator()->getBody();
+
+ // Copy the array index variables, if any.
+ HasArrayIndexVars = !ArrayIndexVars.empty();
+ if (HasArrayIndexVars) {
+ assert(ArrayIndexStarts.size() == NumCaptures);
+ memcpy(getArrayIndexVars(), ArrayIndexVars.data(),
+ sizeof(VarDecl *) * ArrayIndexVars.size());
+ memcpy(getArrayIndexStarts(), ArrayIndexStarts.data(),
+ sizeof(unsigned) * Captures.size());
+ getArrayIndexStarts()[Captures.size()] = ArrayIndexVars.size();
+ }
+}
+
+LambdaExpr *LambdaExpr::Create(ASTContext &Context,
+ CXXRecordDecl *Class,
+ SourceRange IntroducerRange,
+ LambdaCaptureDefault CaptureDefault,
+ ArrayRef<Capture> Captures,
+ bool ExplicitParams,
+ bool ExplicitResultType,
+ ArrayRef<Expr *> CaptureInits,
+ ArrayRef<VarDecl *> ArrayIndexVars,
+ ArrayRef<unsigned> ArrayIndexStarts,
+ SourceLocation ClosingBrace) {
+ // Determine the type of the expression (i.e., the type of the
+ // function object we're creating).
+ QualType T = Context.getTypeDeclType(Class);
+
+ unsigned Size = sizeof(LambdaExpr) + sizeof(Stmt *) * (Captures.size() + 1);
+ if (!ArrayIndexVars.empty())
+ Size += sizeof(VarDecl *) * ArrayIndexVars.size()
+ + sizeof(unsigned) * (Captures.size() + 1);
+ void *Mem = Context.Allocate(Size);
+ return new (Mem) LambdaExpr(T, IntroducerRange, CaptureDefault,
+ Captures, ExplicitParams, ExplicitResultType,
+ CaptureInits, ArrayIndexVars, ArrayIndexStarts,
+ ClosingBrace);
+}
+
+LambdaExpr *LambdaExpr::CreateDeserialized(ASTContext &C, unsigned NumCaptures,
+ unsigned NumArrayIndexVars) {
+ unsigned Size = sizeof(LambdaExpr) + sizeof(Stmt *) * (NumCaptures + 1);
+ if (NumArrayIndexVars)
+ Size += sizeof(VarDecl) * NumArrayIndexVars
+ + sizeof(unsigned) * (NumCaptures + 1);
+ void *Mem = C.Allocate(Size);
+ return new (Mem) LambdaExpr(EmptyShell(), NumCaptures, NumArrayIndexVars > 0);
+}
+
+LambdaExpr::capture_iterator LambdaExpr::capture_begin() const {
+ return getLambdaClass()->getLambdaData().Captures;
+}
+
+LambdaExpr::capture_iterator LambdaExpr::capture_end() const {
+ return capture_begin() + NumCaptures;
+}
+
+LambdaExpr::capture_iterator LambdaExpr::explicit_capture_begin() const {
+ return capture_begin();
+}
+
+LambdaExpr::capture_iterator LambdaExpr::explicit_capture_end() const {
+ struct CXXRecordDecl::LambdaDefinitionData &Data
+ = getLambdaClass()->getLambdaData();
+ return Data.Captures + Data.NumExplicitCaptures;
+}
+
+LambdaExpr::capture_iterator LambdaExpr::implicit_capture_begin() const {
+ return explicit_capture_end();
+}
+
+LambdaExpr::capture_iterator LambdaExpr::implicit_capture_end() const {
+ return capture_end();
+}
+
+ArrayRef<VarDecl *>
+LambdaExpr::getCaptureInitIndexVars(capture_init_iterator Iter) const {
+ assert(HasArrayIndexVars && "No array index-var data?");
+
+ unsigned Index = Iter - capture_init_begin();
+ assert(Index < getLambdaClass()->getLambdaData().NumCaptures &&
+ "Capture index out-of-range");
+ VarDecl **IndexVars = getArrayIndexVars();
+ unsigned *IndexStarts = getArrayIndexStarts();
+ return ArrayRef<VarDecl *>(IndexVars + IndexStarts[Index],
+ IndexVars + IndexStarts[Index + 1]);
+}
+
+CXXRecordDecl *LambdaExpr::getLambdaClass() const {
+ return getType()->getAsCXXRecordDecl();
+}
+
+CXXMethodDecl *LambdaExpr::getCallOperator() const {
+ CXXRecordDecl *Record = getLambdaClass();
+ DeclarationName Name
+ = Record->getASTContext().DeclarationNames.getCXXOperatorName(OO_Call);
+ DeclContext::lookup_result Calls = Record->lookup(Name);
+ assert(Calls.first != Calls.second && "Missing lambda call operator!");
+ CXXMethodDecl *Result = cast<CXXMethodDecl>(*Calls.first++);
+ assert(Calls.first == Calls.second && "More than lambda one call operator?");
+ return Result;
+}
+
+CompoundStmt *LambdaExpr::getBody() const {
+ if (!getStoredStmts()[NumCaptures])
+ getStoredStmts()[NumCaptures] = getCallOperator()->getBody();
+
+ return reinterpret_cast<CompoundStmt *>(getStoredStmts()[NumCaptures]);
+}
+
+bool LambdaExpr::isMutable() const {
+ return (getCallOperator()->getTypeQualifiers() & Qualifiers::Const) == 0;
+}
+
+ExprWithCleanups::ExprWithCleanups(Expr *subexpr,
+ ArrayRef<CleanupObject> objects)
+ : Expr(ExprWithCleanupsClass, subexpr->getType(),
+ subexpr->getValueKind(), subexpr->getObjectKind(),
+ subexpr->isTypeDependent(), subexpr->isValueDependent(),
+ subexpr->isInstantiationDependent(),
+ subexpr->containsUnexpandedParameterPack()),
+ SubExpr(subexpr) {
+ ExprWithCleanupsBits.NumObjects = objects.size();
+ for (unsigned i = 0, e = objects.size(); i != e; ++i)
+ getObjectsBuffer()[i] = objects[i];
+}
+
+ExprWithCleanups *ExprWithCleanups::Create(ASTContext &C, Expr *subexpr,
+ ArrayRef<CleanupObject> objects) {
+ size_t size = sizeof(ExprWithCleanups)
+ + objects.size() * sizeof(CleanupObject);
+ void *buffer = C.Allocate(size, llvm::alignOf<ExprWithCleanups>());
+ return new (buffer) ExprWithCleanups(subexpr, objects);
+}
+
+ExprWithCleanups::ExprWithCleanups(EmptyShell empty, unsigned numObjects)
+ : Expr(ExprWithCleanupsClass, empty) {
+ ExprWithCleanupsBits.NumObjects = numObjects;
+}
+
+ExprWithCleanups *ExprWithCleanups::Create(ASTContext &C, EmptyShell empty,
+ unsigned numObjects) {
+ size_t size = sizeof(ExprWithCleanups) + numObjects * sizeof(CleanupObject);
+ void *buffer = C.Allocate(size, llvm::alignOf<ExprWithCleanups>());
+ return new (buffer) ExprWithCleanups(empty, numObjects);
+}
+
+CXXUnresolvedConstructExpr::CXXUnresolvedConstructExpr(TypeSourceInfo *Type,
+ SourceLocation LParenLoc,
+ Expr **Args,
+ unsigned NumArgs,
+ SourceLocation RParenLoc)
+ : Expr(CXXUnresolvedConstructExprClass,
+ Type->getType().getNonReferenceType(),
+ (Type->getType()->isLValueReferenceType() ? VK_LValue
+ :Type->getType()->isRValueReferenceType()? VK_XValue
+ :VK_RValue),
+ OK_Ordinary,
+ Type->getType()->isDependentType(), true, true,
+ Type->getType()->containsUnexpandedParameterPack()),
+ Type(Type),
+ LParenLoc(LParenLoc),
+ RParenLoc(RParenLoc),
+ NumArgs(NumArgs) {
+ Stmt **StoredArgs = reinterpret_cast<Stmt **>(this + 1);
+ for (unsigned I = 0; I != NumArgs; ++I) {
+ if (Args[I]->containsUnexpandedParameterPack())
+ ExprBits.ContainsUnexpandedParameterPack = true;
+
+ StoredArgs[I] = Args[I];
+ }
+}
+
+CXXUnresolvedConstructExpr *
+CXXUnresolvedConstructExpr::Create(ASTContext &C,
+ TypeSourceInfo *Type,
+ SourceLocation LParenLoc,
+ Expr **Args,
+ unsigned NumArgs,
+ SourceLocation RParenLoc) {
+ void *Mem = C.Allocate(sizeof(CXXUnresolvedConstructExpr) +
+ sizeof(Expr *) * NumArgs);
+ return new (Mem) CXXUnresolvedConstructExpr(Type, LParenLoc,
+ Args, NumArgs, RParenLoc);
+}
+
+CXXUnresolvedConstructExpr *
+CXXUnresolvedConstructExpr::CreateEmpty(ASTContext &C, unsigned NumArgs) {
+ Stmt::EmptyShell Empty;
+ void *Mem = C.Allocate(sizeof(CXXUnresolvedConstructExpr) +
+ sizeof(Expr *) * NumArgs);
+ return new (Mem) CXXUnresolvedConstructExpr(Empty, NumArgs);
+}
+
+SourceRange CXXUnresolvedConstructExpr::getSourceRange() const {
+ return SourceRange(Type->getTypeLoc().getBeginLoc(), RParenLoc);
+}
+
+CXXDependentScopeMemberExpr::CXXDependentScopeMemberExpr(ASTContext &C,
+ Expr *Base, QualType BaseType,
+ bool IsArrow,
+ SourceLocation OperatorLoc,
+ NestedNameSpecifierLoc QualifierLoc,
+ SourceLocation TemplateKWLoc,
+ NamedDecl *FirstQualifierFoundInScope,
+ DeclarationNameInfo MemberNameInfo,
+ const TemplateArgumentListInfo *TemplateArgs)
+ : Expr(CXXDependentScopeMemberExprClass, C.DependentTy,
+ VK_LValue, OK_Ordinary, true, true, true,
+ ((Base && Base->containsUnexpandedParameterPack()) ||
+ (QualifierLoc &&
+ QualifierLoc.getNestedNameSpecifier()
+ ->containsUnexpandedParameterPack()) ||
+ MemberNameInfo.containsUnexpandedParameterPack())),
+ Base(Base), BaseType(BaseType), IsArrow(IsArrow),
+ HasTemplateKWAndArgsInfo(TemplateArgs != 0 || TemplateKWLoc.isValid()),
+ OperatorLoc(OperatorLoc), QualifierLoc(QualifierLoc),
+ FirstQualifierFoundInScope(FirstQualifierFoundInScope),
+ MemberNameInfo(MemberNameInfo) {
+ if (TemplateArgs) {
+ bool Dependent = true;
+ bool InstantiationDependent = true;
+ bool ContainsUnexpandedParameterPack = false;
+ getTemplateKWAndArgsInfo()->initializeFrom(TemplateKWLoc, *TemplateArgs,
+ Dependent,
+ InstantiationDependent,
+ ContainsUnexpandedParameterPack);
+ if (ContainsUnexpandedParameterPack)
+ ExprBits.ContainsUnexpandedParameterPack = true;
+ } else if (TemplateKWLoc.isValid()) {
+ getTemplateKWAndArgsInfo()->initializeFrom(TemplateKWLoc);
+ }
+}
+
+CXXDependentScopeMemberExpr::CXXDependentScopeMemberExpr(ASTContext &C,
+ Expr *Base, QualType BaseType,
+ bool IsArrow,
+ SourceLocation OperatorLoc,
+ NestedNameSpecifierLoc QualifierLoc,
+ NamedDecl *FirstQualifierFoundInScope,
+ DeclarationNameInfo MemberNameInfo)
+ : Expr(CXXDependentScopeMemberExprClass, C.DependentTy,
+ VK_LValue, OK_Ordinary, true, true, true,
+ ((Base && Base->containsUnexpandedParameterPack()) ||
+ (QualifierLoc &&
+ QualifierLoc.getNestedNameSpecifier()->
+ containsUnexpandedParameterPack()) ||
+ MemberNameInfo.containsUnexpandedParameterPack())),
+ Base(Base), BaseType(BaseType), IsArrow(IsArrow),
+ HasTemplateKWAndArgsInfo(false),
+ OperatorLoc(OperatorLoc), QualifierLoc(QualifierLoc),
+ FirstQualifierFoundInScope(FirstQualifierFoundInScope),
+ MemberNameInfo(MemberNameInfo) { }
+
+CXXDependentScopeMemberExpr *
+CXXDependentScopeMemberExpr::Create(ASTContext &C,
+ Expr *Base, QualType BaseType, bool IsArrow,
+ SourceLocation OperatorLoc,
+ NestedNameSpecifierLoc QualifierLoc,
+ SourceLocation TemplateKWLoc,
+ NamedDecl *FirstQualifierFoundInScope,
+ DeclarationNameInfo MemberNameInfo,
+ const TemplateArgumentListInfo *TemplateArgs) {
+ if (!TemplateArgs && !TemplateKWLoc.isValid())
+ return new (C) CXXDependentScopeMemberExpr(C, Base, BaseType,
+ IsArrow, OperatorLoc,
+ QualifierLoc,
+ FirstQualifierFoundInScope,
+ MemberNameInfo);
+
+ unsigned NumTemplateArgs = TemplateArgs ? TemplateArgs->size() : 0;
+ std::size_t size = sizeof(CXXDependentScopeMemberExpr)
+ + ASTTemplateKWAndArgsInfo::sizeFor(NumTemplateArgs);
+
+ void *Mem = C.Allocate(size, llvm::alignOf<CXXDependentScopeMemberExpr>());
+ return new (Mem) CXXDependentScopeMemberExpr(C, Base, BaseType,
+ IsArrow, OperatorLoc,
+ QualifierLoc,
+ TemplateKWLoc,
+ FirstQualifierFoundInScope,
+ MemberNameInfo, TemplateArgs);
+}
+
+CXXDependentScopeMemberExpr *
+CXXDependentScopeMemberExpr::CreateEmpty(ASTContext &C,
+ bool HasTemplateKWAndArgsInfo,
+ unsigned NumTemplateArgs) {
+ if (!HasTemplateKWAndArgsInfo)
+ return new (C) CXXDependentScopeMemberExpr(C, 0, QualType(),
+ 0, SourceLocation(),
+ NestedNameSpecifierLoc(), 0,
+ DeclarationNameInfo());
+
+ std::size_t size = sizeof(CXXDependentScopeMemberExpr) +
+ ASTTemplateKWAndArgsInfo::sizeFor(NumTemplateArgs);
+ void *Mem = C.Allocate(size, llvm::alignOf<CXXDependentScopeMemberExpr>());
+ CXXDependentScopeMemberExpr *E
+ = new (Mem) CXXDependentScopeMemberExpr(C, 0, QualType(),
+ 0, SourceLocation(),
+ NestedNameSpecifierLoc(),
+ SourceLocation(), 0,
+ DeclarationNameInfo(), 0);
+ E->HasTemplateKWAndArgsInfo = true;
+ return E;
+}
+
+bool CXXDependentScopeMemberExpr::isImplicitAccess() const {
+ if (Base == 0)
+ return true;
+
+ return cast<Expr>(Base)->isImplicitCXXThis();
+}
+
+static bool hasOnlyNonStaticMemberFunctions(UnresolvedSetIterator begin,
+ UnresolvedSetIterator end) {
+ do {
+ NamedDecl *decl = *begin;
+ if (isa<UnresolvedUsingValueDecl>(decl))
+ return false;
+ if (isa<UsingShadowDecl>(decl))
+ decl = cast<UsingShadowDecl>(decl)->getUnderlyingDecl();
+
+ // Unresolved member expressions should only contain methods and
+ // method templates.
+ assert(isa<CXXMethodDecl>(decl) || isa<FunctionTemplateDecl>(decl));
+
+ if (isa<FunctionTemplateDecl>(decl))
+ decl = cast<FunctionTemplateDecl>(decl)->getTemplatedDecl();
+ if (cast<CXXMethodDecl>(decl)->isStatic())
+ return false;
+ } while (++begin != end);
+
+ return true;
+}
+
+UnresolvedMemberExpr::UnresolvedMemberExpr(ASTContext &C,
+ bool HasUnresolvedUsing,
+ Expr *Base, QualType BaseType,
+ bool IsArrow,
+ SourceLocation OperatorLoc,
+ NestedNameSpecifierLoc QualifierLoc,
+ SourceLocation TemplateKWLoc,
+ const DeclarationNameInfo &MemberNameInfo,
+ const TemplateArgumentListInfo *TemplateArgs,
+ UnresolvedSetIterator Begin,
+ UnresolvedSetIterator End)
+ : OverloadExpr(UnresolvedMemberExprClass, C, QualifierLoc, TemplateKWLoc,
+ MemberNameInfo, TemplateArgs, Begin, End,
+ // Dependent
+ ((Base && Base->isTypeDependent()) ||
+ BaseType->isDependentType()),
+ ((Base && Base->isInstantiationDependent()) ||
+ BaseType->isInstantiationDependentType()),
+ // Contains unexpanded parameter pack
+ ((Base && Base->containsUnexpandedParameterPack()) ||
+ BaseType->containsUnexpandedParameterPack())),
+ IsArrow(IsArrow), HasUnresolvedUsing(HasUnresolvedUsing),
+ Base(Base), BaseType(BaseType), OperatorLoc(OperatorLoc) {
+
+ // Check whether all of the members are non-static member functions,
+ // and if so, mark give this bound-member type instead of overload type.
+ if (hasOnlyNonStaticMemberFunctions(Begin, End))
+ setType(C.BoundMemberTy);
+}
+
+bool UnresolvedMemberExpr::isImplicitAccess() const {
+ if (Base == 0)
+ return true;
+
+ return cast<Expr>(Base)->isImplicitCXXThis();
+}
+
+UnresolvedMemberExpr *
+UnresolvedMemberExpr::Create(ASTContext &C,
+ bool HasUnresolvedUsing,
+ Expr *Base, QualType BaseType, bool IsArrow,
+ SourceLocation OperatorLoc,
+ NestedNameSpecifierLoc QualifierLoc,
+ SourceLocation TemplateKWLoc,
+ const DeclarationNameInfo &MemberNameInfo,
+ const TemplateArgumentListInfo *TemplateArgs,
+ UnresolvedSetIterator Begin,
+ UnresolvedSetIterator End) {
+ std::size_t size = sizeof(UnresolvedMemberExpr);
+ if (TemplateArgs)
+ size += ASTTemplateKWAndArgsInfo::sizeFor(TemplateArgs->size());
+ else if (TemplateKWLoc.isValid())
+ size += ASTTemplateKWAndArgsInfo::sizeFor(0);
+
+ void *Mem = C.Allocate(size, llvm::alignOf<UnresolvedMemberExpr>());
+ return new (Mem) UnresolvedMemberExpr(C,
+ HasUnresolvedUsing, Base, BaseType,
+ IsArrow, OperatorLoc, QualifierLoc, TemplateKWLoc,
+ MemberNameInfo, TemplateArgs, Begin, End);
+}
+
+UnresolvedMemberExpr *
+UnresolvedMemberExpr::CreateEmpty(ASTContext &C, bool HasTemplateKWAndArgsInfo,
+ unsigned NumTemplateArgs) {
+ std::size_t size = sizeof(UnresolvedMemberExpr);
+ if (HasTemplateKWAndArgsInfo)
+ size += ASTTemplateKWAndArgsInfo::sizeFor(NumTemplateArgs);
+
+ void *Mem = C.Allocate(size, llvm::alignOf<UnresolvedMemberExpr>());
+ UnresolvedMemberExpr *E = new (Mem) UnresolvedMemberExpr(EmptyShell());
+ E->HasTemplateKWAndArgsInfo = HasTemplateKWAndArgsInfo;
+ return E;
+}
+
+CXXRecordDecl *UnresolvedMemberExpr::getNamingClass() const {
+ // Unlike for UnresolvedLookupExpr, it is very easy to re-derive this.
+
+ // If there was a nested name specifier, it names the naming class.
+ // It can't be dependent: after all, we were actually able to do the
+ // lookup.
+ CXXRecordDecl *Record = 0;
+ if (getQualifier()) {
+ const Type *T = getQualifier()->getAsType();
+ assert(T && "qualifier in member expression does not name type");
+ Record = T->getAsCXXRecordDecl();
+ assert(Record && "qualifier in member expression does not name record");
+ }
+ // Otherwise the naming class must have been the base class.
+ else {
+ QualType BaseType = getBaseType().getNonReferenceType();
+ if (isArrow()) {
+ const PointerType *PT = BaseType->getAs<PointerType>();
+ assert(PT && "base of arrow member access is not pointer");
+ BaseType = PT->getPointeeType();
+ }
+
+ Record = BaseType->getAsCXXRecordDecl();
+ assert(Record && "base of member expression does not name record");
+ }
+
+ return Record;
+}
+
+SubstNonTypeTemplateParmPackExpr::
+SubstNonTypeTemplateParmPackExpr(QualType T,
+ NonTypeTemplateParmDecl *Param,
+ SourceLocation NameLoc,
+ const TemplateArgument &ArgPack)
+ : Expr(SubstNonTypeTemplateParmPackExprClass, T, VK_RValue, OK_Ordinary,
+ true, true, true, true),
+ Param(Param), Arguments(ArgPack.pack_begin()),
+ NumArguments(ArgPack.pack_size()), NameLoc(NameLoc) { }
+
+TemplateArgument SubstNonTypeTemplateParmPackExpr::getArgumentPack() const {
+ return TemplateArgument(Arguments, NumArguments);
+}
+
+TypeTraitExpr::TypeTraitExpr(QualType T, SourceLocation Loc, TypeTrait Kind,
+ ArrayRef<TypeSourceInfo *> Args,
+ SourceLocation RParenLoc,
+ bool Value)
+ : Expr(TypeTraitExprClass, T, VK_RValue, OK_Ordinary,
+ /*TypeDependent=*/false,
+ /*ValueDependent=*/false,
+ /*InstantiationDependent=*/false,
+ /*ContainsUnexpandedParameterPack=*/false),
+ Loc(Loc), RParenLoc(RParenLoc)
+{
+ TypeTraitExprBits.Kind = Kind;
+ TypeTraitExprBits.Value = Value;
+ TypeTraitExprBits.NumArgs = Args.size();
+
+ TypeSourceInfo **ToArgs = getTypeSourceInfos();
+
+ for (unsigned I = 0, N = Args.size(); I != N; ++I) {
+ if (Args[I]->getType()->isDependentType())
+ setValueDependent(true);
+ if (Args[I]->getType()->isInstantiationDependentType())
+ setInstantiationDependent(true);
+ if (Args[I]->getType()->containsUnexpandedParameterPack())
+ setContainsUnexpandedParameterPack(true);
+
+ ToArgs[I] = Args[I];
+ }
+}
+
+TypeTraitExpr *TypeTraitExpr::Create(ASTContext &C, QualType T,
+ SourceLocation Loc,
+ TypeTrait Kind,
+ ArrayRef<TypeSourceInfo *> Args,
+ SourceLocation RParenLoc,
+ bool Value) {
+ unsigned Size = sizeof(TypeTraitExpr) + sizeof(TypeSourceInfo*) * Args.size();
+ void *Mem = C.Allocate(Size);
+ return new (Mem) TypeTraitExpr(T, Loc, Kind, Args, RParenLoc, Value);
+}
+
+TypeTraitExpr *TypeTraitExpr::CreateDeserialized(ASTContext &C,
+ unsigned NumArgs) {
+ unsigned Size = sizeof(TypeTraitExpr) + sizeof(TypeSourceInfo*) * NumArgs;
+ void *Mem = C.Allocate(Size);
+ return new (Mem) TypeTraitExpr(EmptyShell());
+}
+
+void ArrayTypeTraitExpr::anchor() { }
diff --git a/clang/lib/AST/ExprClassification.cpp b/clang/lib/AST/ExprClassification.cpp
new file mode 100644
index 0000000..b091e19
--- /dev/null
+++ b/clang/lib/AST/ExprClassification.cpp
@@ -0,0 +1,644 @@
+//===--- ExprClassification.cpp - Expression AST Node Implementation ------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements Expr::classify.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Support/ErrorHandling.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/ExprObjC.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclTemplate.h"
+using namespace clang;
+
+typedef Expr::Classification Cl;
+
+static Cl::Kinds ClassifyInternal(ASTContext &Ctx, const Expr *E);
+static Cl::Kinds ClassifyDecl(ASTContext &Ctx, const Decl *D);
+static Cl::Kinds ClassifyUnnamed(ASTContext &Ctx, QualType T);
+static Cl::Kinds ClassifyMemberExpr(ASTContext &Ctx, const MemberExpr *E);
+static Cl::Kinds ClassifyBinaryOp(ASTContext &Ctx, const BinaryOperator *E);
+static Cl::Kinds ClassifyConditional(ASTContext &Ctx,
+ const Expr *trueExpr,
+ const Expr *falseExpr);
+static Cl::ModifiableType IsModifiable(ASTContext &Ctx, const Expr *E,
+ Cl::Kinds Kind, SourceLocation &Loc);
+
+static Cl::Kinds ClassifyExprValueKind(const LangOptions &Lang,
+ const Expr *E,
+ ExprValueKind Kind) {
+ switch (Kind) {
+ case VK_RValue:
+ return Lang.CPlusPlus && E->getType()->isRecordType() ?
+ Cl::CL_ClassTemporary : Cl::CL_PRValue;
+ case VK_LValue:
+ return Cl::CL_LValue;
+ case VK_XValue:
+ return Cl::CL_XValue;
+ }
+ llvm_unreachable("Invalid value category of implicit cast.");
+}
+
+Cl Expr::ClassifyImpl(ASTContext &Ctx, SourceLocation *Loc) const {
+ assert(!TR->isReferenceType() && "Expressions can't have reference type.");
+
+ Cl::Kinds kind = ClassifyInternal(Ctx, this);
+ // C99 6.3.2.1: An lvalue is an expression with an object type or an
+ // incomplete type other than void.
+ if (!Ctx.getLangOpts().CPlusPlus) {
+ // Thus, no functions.
+ if (TR->isFunctionType() || TR == Ctx.OverloadTy)
+ kind = Cl::CL_Function;
+ // No void either, but qualified void is OK because it is "other than void".
+ // Void "lvalues" are classified as addressable void values, which are void
+ // expressions whose address can be taken.
+ else if (TR->isVoidType() && !TR.hasQualifiers())
+ kind = (kind == Cl::CL_LValue ? Cl::CL_AddressableVoid : Cl::CL_Void);
+ }
+
+ // Enable this assertion for testing.
+ switch (kind) {
+ case Cl::CL_LValue: assert(getValueKind() == VK_LValue); break;
+ case Cl::CL_XValue: assert(getValueKind() == VK_XValue); break;
+ case Cl::CL_Function:
+ case Cl::CL_Void:
+ case Cl::CL_AddressableVoid:
+ case Cl::CL_DuplicateVectorComponents:
+ case Cl::CL_MemberFunction:
+ case Cl::CL_SubObjCPropertySetting:
+ case Cl::CL_ClassTemporary:
+ case Cl::CL_ObjCMessageRValue:
+ case Cl::CL_PRValue: assert(getValueKind() == VK_RValue); break;
+ }
+
+ Cl::ModifiableType modifiable = Cl::CM_Untested;
+ if (Loc)
+ modifiable = IsModifiable(Ctx, this, kind, *Loc);
+ return Classification(kind, modifiable);
+}
+
+static Cl::Kinds ClassifyInternal(ASTContext &Ctx, const Expr *E) {
+ // This function takes the first stab at classifying expressions.
+ const LangOptions &Lang = Ctx.getLangOpts();
+
+ switch (E->getStmtClass()) {
+ case Stmt::NoStmtClass:
+#define ABSTRACT_STMT(Kind)
+#define STMT(Kind, Base) case Expr::Kind##Class:
+#define EXPR(Kind, Base)
+#include "clang/AST/StmtNodes.inc"
+ llvm_unreachable("cannot classify a statement");
+
+ // First come the expressions that are always lvalues, unconditionally.
+ case Expr::ObjCIsaExprClass:
+ // C++ [expr.prim.general]p1: A string literal is an lvalue.
+ case Expr::StringLiteralClass:
+ // @encode is equivalent to its string
+ case Expr::ObjCEncodeExprClass:
+ // __func__ and friends are too.
+ case Expr::PredefinedExprClass:
+ // Property references are lvalues
+ case Expr::ObjCSubscriptRefExprClass:
+ case Expr::ObjCPropertyRefExprClass:
+ // C++ [expr.typeid]p1: The result of a typeid expression is an lvalue of...
+ case Expr::CXXTypeidExprClass:
+ // Unresolved lookups get classified as lvalues.
+ // FIXME: Is this wise? Should they get their own kind?
+ case Expr::UnresolvedLookupExprClass:
+ case Expr::UnresolvedMemberExprClass:
+ case Expr::CXXDependentScopeMemberExprClass:
+ case Expr::DependentScopeDeclRefExprClass:
+ // ObjC instance variables are lvalues
+ // FIXME: ObjC++0x might have different rules
+ case Expr::ObjCIvarRefExprClass:
+ return Cl::CL_LValue;
+
+ // C99 6.5.2.5p5 says that compound literals are lvalues.
+ // In C++, they're class temporaries.
+ case Expr::CompoundLiteralExprClass:
+ return Ctx.getLangOpts().CPlusPlus? Cl::CL_ClassTemporary
+ : Cl::CL_LValue;
+
+ // Expressions that are prvalues.
+ case Expr::CXXBoolLiteralExprClass:
+ case Expr::CXXPseudoDestructorExprClass:
+ case Expr::UnaryExprOrTypeTraitExprClass:
+ case Expr::CXXNewExprClass:
+ case Expr::CXXThisExprClass:
+ case Expr::CXXNullPtrLiteralExprClass:
+ case Expr::ImaginaryLiteralClass:
+ case Expr::GNUNullExprClass:
+ case Expr::OffsetOfExprClass:
+ case Expr::CXXThrowExprClass:
+ case Expr::ShuffleVectorExprClass:
+ case Expr::IntegerLiteralClass:
+ case Expr::CharacterLiteralClass:
+ case Expr::AddrLabelExprClass:
+ case Expr::CXXDeleteExprClass:
+ case Expr::ImplicitValueInitExprClass:
+ case Expr::BlockExprClass:
+ case Expr::FloatingLiteralClass:
+ case Expr::CXXNoexceptExprClass:
+ case Expr::CXXScalarValueInitExprClass:
+ case Expr::UnaryTypeTraitExprClass:
+ case Expr::BinaryTypeTraitExprClass:
+ case Expr::TypeTraitExprClass:
+ case Expr::ArrayTypeTraitExprClass:
+ case Expr::ExpressionTraitExprClass:
+ case Expr::ObjCSelectorExprClass:
+ case Expr::ObjCProtocolExprClass:
+ case Expr::ObjCStringLiteralClass:
+ case Expr::ObjCNumericLiteralClass:
+ case Expr::ObjCArrayLiteralClass:
+ case Expr::ObjCDictionaryLiteralClass:
+ case Expr::ObjCBoolLiteralExprClass:
+ case Expr::ParenListExprClass:
+ case Expr::SizeOfPackExprClass:
+ case Expr::SubstNonTypeTemplateParmPackExprClass:
+ case Expr::AsTypeExprClass:
+ case Expr::ObjCIndirectCopyRestoreExprClass:
+ case Expr::AtomicExprClass:
+ return Cl::CL_PRValue;
+
+ // Next come the complicated cases.
+ case Expr::SubstNonTypeTemplateParmExprClass:
+ return ClassifyInternal(Ctx,
+ cast<SubstNonTypeTemplateParmExpr>(E)->getReplacement());
+
+ // C++ [expr.sub]p1: The result is an lvalue of type "T".
+ // However, subscripting vector types is more like member access.
+ case Expr::ArraySubscriptExprClass:
+ if (cast<ArraySubscriptExpr>(E)->getBase()->getType()->isVectorType())
+ return ClassifyInternal(Ctx, cast<ArraySubscriptExpr>(E)->getBase());
+ return Cl::CL_LValue;
+
+ // C++ [expr.prim.general]p3: The result is an lvalue if the entity is a
+ // function or variable and a prvalue otherwise.
+ case Expr::DeclRefExprClass:
+ if (E->getType() == Ctx.UnknownAnyTy)
+ return isa<FunctionDecl>(cast<DeclRefExpr>(E)->getDecl())
+ ? Cl::CL_PRValue : Cl::CL_LValue;
+ return ClassifyDecl(Ctx, cast<DeclRefExpr>(E)->getDecl());
+
+ // Member access is complex.
+ case Expr::MemberExprClass:
+ return ClassifyMemberExpr(Ctx, cast<MemberExpr>(E));
+
+ case Expr::UnaryOperatorClass:
+ switch (cast<UnaryOperator>(E)->getOpcode()) {
+ // C++ [expr.unary.op]p1: The unary * operator performs indirection:
+ // [...] the result is an lvalue referring to the object or function
+ // to which the expression points.
+ case UO_Deref:
+ return Cl::CL_LValue;
+
+ // GNU extensions, simply look through them.
+ case UO_Extension:
+ return ClassifyInternal(Ctx, cast<UnaryOperator>(E)->getSubExpr());
+
+ // Treat _Real and _Imag basically as if they were member
+ // expressions: l-value only if the operand is a true l-value.
+ case UO_Real:
+ case UO_Imag: {
+ const Expr *Op = cast<UnaryOperator>(E)->getSubExpr()->IgnoreParens();
+ Cl::Kinds K = ClassifyInternal(Ctx, Op);
+ if (K != Cl::CL_LValue) return K;
+
+ if (isa<ObjCPropertyRefExpr>(Op))
+ return Cl::CL_SubObjCPropertySetting;
+ return Cl::CL_LValue;
+ }
+
+ // C++ [expr.pre.incr]p1: The result is the updated operand; it is an
+ // lvalue, [...]
+ // Not so in C.
+ case UO_PreInc:
+ case UO_PreDec:
+ return Lang.CPlusPlus ? Cl::CL_LValue : Cl::CL_PRValue;
+
+ default:
+ return Cl::CL_PRValue;
+ }
+
+ case Expr::OpaqueValueExprClass:
+ return ClassifyExprValueKind(Lang, E, E->getValueKind());
+
+ // Pseudo-object expressions can produce l-values with reference magic.
+ case Expr::PseudoObjectExprClass:
+ return ClassifyExprValueKind(Lang, E,
+ cast<PseudoObjectExpr>(E)->getValueKind());
+
+ // Implicit casts are lvalues if they're lvalue casts. Other than that, we
+ // only specifically record class temporaries.
+ case Expr::ImplicitCastExprClass:
+ return ClassifyExprValueKind(Lang, E, E->getValueKind());
+
+ // C++ [expr.prim.general]p4: The presence of parentheses does not affect
+ // whether the expression is an lvalue.
+ case Expr::ParenExprClass:
+ return ClassifyInternal(Ctx, cast<ParenExpr>(E)->getSubExpr());
+
+ // C11 6.5.1.1p4: [A generic selection] is an lvalue, a function designator,
+ // or a void expression if its result expression is, respectively, an
+ // lvalue, a function designator, or a void expression.
+ case Expr::GenericSelectionExprClass:
+ if (cast<GenericSelectionExpr>(E)->isResultDependent())
+ return Cl::CL_PRValue;
+ return ClassifyInternal(Ctx,cast<GenericSelectionExpr>(E)->getResultExpr());
+
+ case Expr::BinaryOperatorClass:
+ case Expr::CompoundAssignOperatorClass:
+ // C doesn't have any binary expressions that are lvalues.
+ if (Lang.CPlusPlus)
+ return ClassifyBinaryOp(Ctx, cast<BinaryOperator>(E));
+ return Cl::CL_PRValue;
+
+ case Expr::CallExprClass:
+ case Expr::CXXOperatorCallExprClass:
+ case Expr::CXXMemberCallExprClass:
+ case Expr::UserDefinedLiteralClass:
+ case Expr::CUDAKernelCallExprClass:
+ return ClassifyUnnamed(Ctx, cast<CallExpr>(E)->getCallReturnType());
+
+ // __builtin_choose_expr is equivalent to the chosen expression.
+ case Expr::ChooseExprClass:
+ return ClassifyInternal(Ctx, cast<ChooseExpr>(E)->getChosenSubExpr(Ctx));
+
+ // Extended vector element access is an lvalue unless there are duplicates
+ // in the shuffle expression.
+ case Expr::ExtVectorElementExprClass:
+ return cast<ExtVectorElementExpr>(E)->containsDuplicateElements() ?
+ Cl::CL_DuplicateVectorComponents : Cl::CL_LValue;
+
+ // Simply look at the actual default argument.
+ case Expr::CXXDefaultArgExprClass:
+ return ClassifyInternal(Ctx, cast<CXXDefaultArgExpr>(E)->getExpr());
+
+ // Same idea for temporary binding.
+ case Expr::CXXBindTemporaryExprClass:
+ return ClassifyInternal(Ctx, cast<CXXBindTemporaryExpr>(E)->getSubExpr());
+
+ // And the cleanups guard.
+ case Expr::ExprWithCleanupsClass:
+ return ClassifyInternal(Ctx, cast<ExprWithCleanups>(E)->getSubExpr());
+
+ // Casts depend completely on the target type. All casts work the same.
+ case Expr::CStyleCastExprClass:
+ case Expr::CXXFunctionalCastExprClass:
+ case Expr::CXXStaticCastExprClass:
+ case Expr::CXXDynamicCastExprClass:
+ case Expr::CXXReinterpretCastExprClass:
+ case Expr::CXXConstCastExprClass:
+ case Expr::ObjCBridgedCastExprClass:
+ // Only in C++ can casts be interesting at all.
+ if (!Lang.CPlusPlus) return Cl::CL_PRValue;
+ return ClassifyUnnamed(Ctx, cast<ExplicitCastExpr>(E)->getTypeAsWritten());
+
+ case Expr::CXXUnresolvedConstructExprClass:
+ return ClassifyUnnamed(Ctx,
+ cast<CXXUnresolvedConstructExpr>(E)->getTypeAsWritten());
+
+ case Expr::BinaryConditionalOperatorClass: {
+ if (!Lang.CPlusPlus) return Cl::CL_PRValue;
+ const BinaryConditionalOperator *co = cast<BinaryConditionalOperator>(E);
+ return ClassifyConditional(Ctx, co->getTrueExpr(), co->getFalseExpr());
+ }
+
+ case Expr::ConditionalOperatorClass: {
+ // Once again, only C++ is interesting.
+ if (!Lang.CPlusPlus) return Cl::CL_PRValue;
+ const ConditionalOperator *co = cast<ConditionalOperator>(E);
+ return ClassifyConditional(Ctx, co->getTrueExpr(), co->getFalseExpr());
+ }
+
+ // ObjC message sends are effectively function calls, if the target function
+ // is known.
+ case Expr::ObjCMessageExprClass:
+ if (const ObjCMethodDecl *Method =
+ cast<ObjCMessageExpr>(E)->getMethodDecl()) {
+ Cl::Kinds kind = ClassifyUnnamed(Ctx, Method->getResultType());
+ return (kind == Cl::CL_PRValue) ? Cl::CL_ObjCMessageRValue : kind;
+ }
+ return Cl::CL_PRValue;
+
+ // Some C++ expressions are always class temporaries.
+ case Expr::CXXConstructExprClass:
+ case Expr::CXXTemporaryObjectExprClass:
+ case Expr::LambdaExprClass:
+ return Cl::CL_ClassTemporary;
+
+ case Expr::VAArgExprClass:
+ return ClassifyUnnamed(Ctx, E->getType());
+
+ case Expr::DesignatedInitExprClass:
+ return ClassifyInternal(Ctx, cast<DesignatedInitExpr>(E)->getInit());
+
+ case Expr::StmtExprClass: {
+ const CompoundStmt *S = cast<StmtExpr>(E)->getSubStmt();
+ if (const Expr *LastExpr = dyn_cast_or_null<Expr>(S->body_back()))
+ return ClassifyUnnamed(Ctx, LastExpr->getType());
+ return Cl::CL_PRValue;
+ }
+
+ case Expr::CXXUuidofExprClass:
+ return Cl::CL_LValue;
+
+ case Expr::PackExpansionExprClass:
+ return ClassifyInternal(Ctx, cast<PackExpansionExpr>(E)->getPattern());
+
+ case Expr::MaterializeTemporaryExprClass:
+ return cast<MaterializeTemporaryExpr>(E)->isBoundToLvalueReference()
+ ? Cl::CL_LValue
+ : Cl::CL_XValue;
+
+ case Expr::InitListExprClass:
+ // An init list can be an lvalue if it is bound to a reference and
+ // contains only one element. In that case, we look at that element
+ // for an exact classification. Init list creation takes care of the
+ // value kind for us, so we only need to fine-tune.
+ if (E->isRValue())
+ return ClassifyExprValueKind(Lang, E, E->getValueKind());
+ assert(cast<InitListExpr>(E)->getNumInits() == 1 &&
+ "Only 1-element init lists can be glvalues.");
+ return ClassifyInternal(Ctx, cast<InitListExpr>(E)->getInit(0));
+ }
+
+ llvm_unreachable("unhandled expression kind in classification");
+}
+
+/// ClassifyDecl - Return the classification of an expression referencing the
+/// given declaration.
+static Cl::Kinds ClassifyDecl(ASTContext &Ctx, const Decl *D) {
+ // C++ [expr.prim.general]p6: The result is an lvalue if the entity is a
+ // function, variable, or data member and a prvalue otherwise.
+ // In C, functions are not lvalues.
+ // In addition, NonTypeTemplateParmDecl derives from VarDecl but isn't an
+ // lvalue unless it's a reference type (C++ [temp.param]p6), so we need to
+ // special-case this.
+
+ if (isa<CXXMethodDecl>(D) && cast<CXXMethodDecl>(D)->isInstance())
+ return Cl::CL_MemberFunction;
+
+ bool islvalue;
+ if (const NonTypeTemplateParmDecl *NTTParm =
+ dyn_cast<NonTypeTemplateParmDecl>(D))
+ islvalue = NTTParm->getType()->isReferenceType();
+ else
+ islvalue = isa<VarDecl>(D) || isa<FieldDecl>(D) ||
+ isa<IndirectFieldDecl>(D) ||
+ (Ctx.getLangOpts().CPlusPlus &&
+ (isa<FunctionDecl>(D) || isa<FunctionTemplateDecl>(D)));
+
+ return islvalue ? Cl::CL_LValue : Cl::CL_PRValue;
+}
+
+/// ClassifyUnnamed - Return the classification of an expression yielding an
+/// unnamed value of the given type. This applies in particular to function
+/// calls and casts.
+static Cl::Kinds ClassifyUnnamed(ASTContext &Ctx, QualType T) {
+ // In C, function calls are always rvalues.
+ if (!Ctx.getLangOpts().CPlusPlus) return Cl::CL_PRValue;
+
+ // C++ [expr.call]p10: A function call is an lvalue if the result type is an
+ // lvalue reference type or an rvalue reference to function type, an xvalue
+ // if the result type is an rvalue reference to object type, and a prvalue
+ // otherwise.
+ if (T->isLValueReferenceType())
+ return Cl::CL_LValue;
+ const RValueReferenceType *RV = T->getAs<RValueReferenceType>();
+ if (!RV) // Could still be a class temporary, though.
+ return T->isRecordType() ? Cl::CL_ClassTemporary : Cl::CL_PRValue;
+
+ return RV->getPointeeType()->isFunctionType() ? Cl::CL_LValue : Cl::CL_XValue;
+}
+
+static Cl::Kinds ClassifyMemberExpr(ASTContext &Ctx, const MemberExpr *E) {
+ if (E->getType() == Ctx.UnknownAnyTy)
+ return (isa<FunctionDecl>(E->getMemberDecl())
+ ? Cl::CL_PRValue : Cl::CL_LValue);
+
+ // Handle C first, it's easier.
+ if (!Ctx.getLangOpts().CPlusPlus) {
+ // C99 6.5.2.3p3
+ // For dot access, the expression is an lvalue if the first part is. For
+ // arrow access, it always is an lvalue.
+ if (E->isArrow())
+ return Cl::CL_LValue;
+ // ObjC property accesses are not lvalues, but get special treatment.
+ Expr *Base = E->getBase()->IgnoreParens();
+ if (isa<ObjCPropertyRefExpr>(Base))
+ return Cl::CL_SubObjCPropertySetting;
+ return ClassifyInternal(Ctx, Base);
+ }
+
+ NamedDecl *Member = E->getMemberDecl();
+ // C++ [expr.ref]p3: E1->E2 is converted to the equivalent form (*(E1)).E2.
+ // C++ [expr.ref]p4: If E2 is declared to have type "reference to T", then
+ // E1.E2 is an lvalue.
+ if (ValueDecl *Value = dyn_cast<ValueDecl>(Member))
+ if (Value->getType()->isReferenceType())
+ return Cl::CL_LValue;
+
+ // Otherwise, one of the following rules applies.
+ // -- If E2 is a static member [...] then E1.E2 is an lvalue.
+ if (isa<VarDecl>(Member) && Member->getDeclContext()->isRecord())
+ return Cl::CL_LValue;
+
+ // -- If E2 is a non-static data member [...]. If E1 is an lvalue, then
+ // E1.E2 is an lvalue; if E1 is an xvalue, then E1.E2 is an xvalue;
+ // otherwise, it is a prvalue.
+ if (isa<FieldDecl>(Member)) {
+ // *E1 is an lvalue
+ if (E->isArrow())
+ return Cl::CL_LValue;
+ Expr *Base = E->getBase()->IgnoreParenImpCasts();
+ if (isa<ObjCPropertyRefExpr>(Base))
+ return Cl::CL_SubObjCPropertySetting;
+ return ClassifyInternal(Ctx, E->getBase());
+ }
+
+ // -- If E2 is a [...] member function, [...]
+ // -- If it refers to a static member function [...], then E1.E2 is an
+ // lvalue; [...]
+ // -- Otherwise [...] E1.E2 is a prvalue.
+ if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(Member))
+ return Method->isStatic() ? Cl::CL_LValue : Cl::CL_MemberFunction;
+
+ // -- If E2 is a member enumerator [...], the expression E1.E2 is a prvalue.
+ // So is everything else we haven't handled yet.
+ return Cl::CL_PRValue;
+}
+
+static Cl::Kinds ClassifyBinaryOp(ASTContext &Ctx, const BinaryOperator *E) {
+ assert(Ctx.getLangOpts().CPlusPlus &&
+ "This is only relevant for C++.");
+ // C++ [expr.ass]p1: All [...] return an lvalue referring to the left operand.
+ // Except we override this for writes to ObjC properties.
+ if (E->isAssignmentOp())
+ return (E->getLHS()->getObjectKind() == OK_ObjCProperty
+ ? Cl::CL_PRValue : Cl::CL_LValue);
+
+ // C++ [expr.comma]p1: the result is of the same value category as its right
+ // operand, [...].
+ if (E->getOpcode() == BO_Comma)
+ return ClassifyInternal(Ctx, E->getRHS());
+
+ // C++ [expr.mptr.oper]p6: The result of a .* expression whose second operand
+ // is a pointer to a data member is of the same value category as its first
+ // operand.
+ if (E->getOpcode() == BO_PtrMemD)
+ return (E->getType()->isFunctionType() ||
+ E->hasPlaceholderType(BuiltinType::BoundMember))
+ ? Cl::CL_MemberFunction
+ : ClassifyInternal(Ctx, E->getLHS());
+
+ // C++ [expr.mptr.oper]p6: The result of an ->* expression is an lvalue if its
+ // second operand is a pointer to data member and a prvalue otherwise.
+ if (E->getOpcode() == BO_PtrMemI)
+ return (E->getType()->isFunctionType() ||
+ E->hasPlaceholderType(BuiltinType::BoundMember))
+ ? Cl::CL_MemberFunction
+ : Cl::CL_LValue;
+
+ // All other binary operations are prvalues.
+ return Cl::CL_PRValue;
+}
+
+static Cl::Kinds ClassifyConditional(ASTContext &Ctx, const Expr *True,
+ const Expr *False) {
+ assert(Ctx.getLangOpts().CPlusPlus &&
+ "This is only relevant for C++.");
+
+ // C++ [expr.cond]p2
+ // If either the second or the third operand has type (cv) void, [...]
+ // the result [...] is a prvalue.
+ if (True->getType()->isVoidType() || False->getType()->isVoidType())
+ return Cl::CL_PRValue;
+
+ // Note that at this point, we have already performed all conversions
+ // according to [expr.cond]p3.
+ // C++ [expr.cond]p4: If the second and third operands are glvalues of the
+ // same value category [...], the result is of that [...] value category.
+ // C++ [expr.cond]p5: Otherwise, the result is a prvalue.
+ Cl::Kinds LCl = ClassifyInternal(Ctx, True),
+ RCl = ClassifyInternal(Ctx, False);
+ return LCl == RCl ? LCl : Cl::CL_PRValue;
+}
+
+static Cl::ModifiableType IsModifiable(ASTContext &Ctx, const Expr *E,
+ Cl::Kinds Kind, SourceLocation &Loc) {
+ // As a general rule, we only care about lvalues. But there are some rvalues
+ // for which we want to generate special results.
+ if (Kind == Cl::CL_PRValue) {
+ // For the sake of better diagnostics, we want to specifically recognize
+ // use of the GCC cast-as-lvalue extension.
+ if (const ExplicitCastExpr *CE =
+ dyn_cast<ExplicitCastExpr>(E->IgnoreParens())) {
+ if (CE->getSubExpr()->IgnoreParenImpCasts()->isLValue()) {
+ Loc = CE->getExprLoc();
+ return Cl::CM_LValueCast;
+ }
+ }
+ }
+ if (Kind != Cl::CL_LValue)
+ return Cl::CM_RValue;
+
+ // This is the lvalue case.
+ // Functions are lvalues in C++, but not modifiable. (C++ [basic.lval]p6)
+ if (Ctx.getLangOpts().CPlusPlus && E->getType()->isFunctionType())
+ return Cl::CM_Function;
+
+ // Assignment to a property in ObjC is an implicit setter access. But a
+ // setter might not exist.
+ if (const ObjCPropertyRefExpr *Expr = dyn_cast<ObjCPropertyRefExpr>(E)) {
+ if (Expr->isImplicitProperty() && Expr->getImplicitPropertySetter() == 0)
+ return Cl::CM_NoSetterProperty;
+ }
+
+ CanQualType CT = Ctx.getCanonicalType(E->getType());
+ // Const stuff is obviously not modifiable.
+ if (CT.isConstQualified())
+ return Cl::CM_ConstQualified;
+
+ // Arrays are not modifiable, only their elements are.
+ if (CT->isArrayType())
+ return Cl::CM_ArrayType;
+ // Incomplete types are not modifiable.
+ if (CT->isIncompleteType())
+ return Cl::CM_IncompleteType;
+
+ // Records with any const fields (recursively) are not modifiable.
+ if (const RecordType *R = CT->getAs<RecordType>()) {
+ assert((E->getObjectKind() == OK_ObjCProperty ||
+ !Ctx.getLangOpts().CPlusPlus) &&
+ "C++ struct assignment should be resolved by the "
+ "copy assignment operator.");
+ if (R->hasConstFields())
+ return Cl::CM_ConstQualified;
+ }
+
+ return Cl::CM_Modifiable;
+}
+
+Expr::LValueClassification Expr::ClassifyLValue(ASTContext &Ctx) const {
+ Classification VC = Classify(Ctx);
+ switch (VC.getKind()) {
+ case Cl::CL_LValue: return LV_Valid;
+ case Cl::CL_XValue: return LV_InvalidExpression;
+ case Cl::CL_Function: return LV_NotObjectType;
+ case Cl::CL_Void: return LV_InvalidExpression;
+ case Cl::CL_AddressableVoid: return LV_IncompleteVoidType;
+ case Cl::CL_DuplicateVectorComponents: return LV_DuplicateVectorComponents;
+ case Cl::CL_MemberFunction: return LV_MemberFunction;
+ case Cl::CL_SubObjCPropertySetting: return LV_SubObjCPropertySetting;
+ case Cl::CL_ClassTemporary: return LV_ClassTemporary;
+ case Cl::CL_ObjCMessageRValue: return LV_InvalidMessageExpression;
+ case Cl::CL_PRValue: return LV_InvalidExpression;
+ }
+ llvm_unreachable("Unhandled kind");
+}
+
+Expr::isModifiableLvalueResult
+Expr::isModifiableLvalue(ASTContext &Ctx, SourceLocation *Loc) const {
+ SourceLocation dummy;
+ Classification VC = ClassifyModifiable(Ctx, Loc ? *Loc : dummy);
+ switch (VC.getKind()) {
+ case Cl::CL_LValue: break;
+ case Cl::CL_XValue: return MLV_InvalidExpression;
+ case Cl::CL_Function: return MLV_NotObjectType;
+ case Cl::CL_Void: return MLV_InvalidExpression;
+ case Cl::CL_AddressableVoid: return MLV_IncompleteVoidType;
+ case Cl::CL_DuplicateVectorComponents: return MLV_DuplicateVectorComponents;
+ case Cl::CL_MemberFunction: return MLV_MemberFunction;
+ case Cl::CL_SubObjCPropertySetting: return MLV_SubObjCPropertySetting;
+ case Cl::CL_ClassTemporary: return MLV_ClassTemporary;
+ case Cl::CL_ObjCMessageRValue: return MLV_InvalidMessageExpression;
+ case Cl::CL_PRValue:
+ return VC.getModifiable() == Cl::CM_LValueCast ?
+ MLV_LValueCast : MLV_InvalidExpression;
+ }
+ assert(VC.getKind() == Cl::CL_LValue && "Unhandled kind");
+ switch (VC.getModifiable()) {
+ case Cl::CM_Untested: llvm_unreachable("Did not test modifiability");
+ case Cl::CM_Modifiable: return MLV_Valid;
+ case Cl::CM_RValue: llvm_unreachable("CM_RValue and CL_LValue don't match");
+ case Cl::CM_Function: return MLV_NotObjectType;
+ case Cl::CM_LValueCast:
+ llvm_unreachable("CM_LValueCast and CL_LValue don't match");
+ case Cl::CM_NoSetterProperty: return MLV_NoSetterProperty;
+ case Cl::CM_ConstQualified: return MLV_ConstQualified;
+ case Cl::CM_ArrayType: return MLV_ArrayType;
+ case Cl::CM_IncompleteType: return MLV_IncompleteType;
+ }
+ llvm_unreachable("Unhandled modifiable type");
+}
diff --git a/clang/lib/AST/ExprConstant.cpp b/clang/lib/AST/ExprConstant.cpp
new file mode 100644
index 0000000..66a88b0
--- /dev/null
+++ b/clang/lib/AST/ExprConstant.cpp
@@ -0,0 +1,6926 @@
+//===--- ExprConstant.cpp - Expression Constant Evaluator -----------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the Expr constant evaluator.
+//
+// Constant expression evaluation produces four main results:
+//
+// * A success/failure flag indicating whether constant folding was successful.
+// This is the 'bool' return value used by most of the code in this file. A
+// 'false' return value indicates that constant folding has failed, and any
+// appropriate diagnostic has already been produced.
+//
+// * An evaluated result, valid only if constant folding has not failed.
+//
+// * A flag indicating if evaluation encountered (unevaluated) side-effects.
+// These arise in cases such as (sideEffect(), 0) and (sideEffect() || 1),
+// where it is possible to determine the evaluated result regardless.
+//
+// * A set of notes indicating why the evaluation was not a constant expression
+// (under the C++11 rules only, at the moment), or, if folding failed too,
+// why the expression could not be folded.
+//
+// If we are checking for a potential constant expression, failure to constant
+// fold a potential constant sub-expression will be indicated by a 'false'
+// return value (the expression could not be folded) and no diagnostic (the
+// expression is not necessarily non-constant).
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/APValue.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/CharUnits.h"
+#include "clang/AST/RecordLayout.h"
+#include "clang/AST/StmtVisitor.h"
+#include "clang/AST/TypeLoc.h"
+#include "clang/AST/ASTDiagnostic.h"
+#include "clang/AST/Expr.h"
+#include "clang/Basic/Builtins.h"
+#include "clang/Basic/TargetInfo.h"
+#include "llvm/ADT/SmallString.h"
+#include <cstring>
+#include <functional>
+
+using namespace clang;
+using llvm::APSInt;
+using llvm::APFloat;
+
+static bool IsGlobalLValue(APValue::LValueBase B);
+
+namespace {
+ struct LValue;
+ struct CallStackFrame;
+ struct EvalInfo;
+
+ static QualType getType(APValue::LValueBase B) {
+ if (!B) return QualType();
+ if (const ValueDecl *D = B.dyn_cast<const ValueDecl*>())
+ return D->getType();
+ return B.get<const Expr*>()->getType();
+ }
+
+ /// Get an LValue path entry, which is known to not be an array index, as a
+ /// field or base class.
+ static
+ APValue::BaseOrMemberType getAsBaseOrMember(APValue::LValuePathEntry E) {
+ APValue::BaseOrMemberType Value;
+ Value.setFromOpaqueValue(E.BaseOrMember);
+ return Value;
+ }
+
+ /// Get an LValue path entry, which is known to not be an array index, as a
+ /// field declaration.
+ static const FieldDecl *getAsField(APValue::LValuePathEntry E) {
+ return dyn_cast<FieldDecl>(getAsBaseOrMember(E).getPointer());
+ }
+ /// Get an LValue path entry, which is known to not be an array index, as a
+ /// base class declaration.
+ static const CXXRecordDecl *getAsBaseClass(APValue::LValuePathEntry E) {
+ return dyn_cast<CXXRecordDecl>(getAsBaseOrMember(E).getPointer());
+ }
+ /// Determine whether this LValue path entry for a base class names a virtual
+ /// base class.
+ static bool isVirtualBaseClass(APValue::LValuePathEntry E) {
+ return getAsBaseOrMember(E).getInt();
+ }
+
+ /// Find the path length and type of the most-derived subobject in the given
+ /// path, and find the size of the containing array, if any.
+ static
+ unsigned findMostDerivedSubobject(ASTContext &Ctx, QualType Base,
+ ArrayRef<APValue::LValuePathEntry> Path,
+ uint64_t &ArraySize, QualType &Type) {
+ unsigned MostDerivedLength = 0;
+ Type = Base;
+ for (unsigned I = 0, N = Path.size(); I != N; ++I) {
+ if (Type->isArrayType()) {
+ const ConstantArrayType *CAT =
+ cast<ConstantArrayType>(Ctx.getAsArrayType(Type));
+ Type = CAT->getElementType();
+ ArraySize = CAT->getSize().getZExtValue();
+ MostDerivedLength = I + 1;
+ } else if (Type->isAnyComplexType()) {
+ const ComplexType *CT = Type->castAs<ComplexType>();
+ Type = CT->getElementType();
+ ArraySize = 2;
+ MostDerivedLength = I + 1;
+ } else if (const FieldDecl *FD = getAsField(Path[I])) {
+ Type = FD->getType();
+ ArraySize = 0;
+ MostDerivedLength = I + 1;
+ } else {
+ // Path[I] describes a base class.
+ ArraySize = 0;
+ }
+ }
+ return MostDerivedLength;
+ }
+
+ // The order of this enum is important for diagnostics.
+ enum CheckSubobjectKind {
+ CSK_Base, CSK_Derived, CSK_Field, CSK_ArrayToPointer, CSK_ArrayIndex,
+ CSK_This, CSK_Real, CSK_Imag
+ };
+
+ /// A path from a glvalue to a subobject of that glvalue.
+ struct SubobjectDesignator {
+ /// True if the subobject was named in a manner not supported by C++11. Such
+ /// lvalues can still be folded, but they are not core constant expressions
+ /// and we cannot perform lvalue-to-rvalue conversions on them.
+ bool Invalid : 1;
+
+ /// Is this a pointer one past the end of an object?
+ bool IsOnePastTheEnd : 1;
+
+ /// The length of the path to the most-derived object of which this is a
+ /// subobject.
+ unsigned MostDerivedPathLength : 30;
+
+ /// The size of the array of which the most-derived object is an element, or
+ /// 0 if the most-derived object is not an array element.
+ uint64_t MostDerivedArraySize;
+
+ /// The type of the most derived object referred to by this address.
+ QualType MostDerivedType;
+
+ typedef APValue::LValuePathEntry PathEntry;
+
+ /// The entries on the path from the glvalue to the designated subobject.
+ SmallVector<PathEntry, 8> Entries;
+
+ SubobjectDesignator() : Invalid(true) {}
+
+ explicit SubobjectDesignator(QualType T)
+ : Invalid(false), IsOnePastTheEnd(false), MostDerivedPathLength(0),
+ MostDerivedArraySize(0), MostDerivedType(T) {}
+
+ SubobjectDesignator(ASTContext &Ctx, const APValue &V)
+ : Invalid(!V.isLValue() || !V.hasLValuePath()), IsOnePastTheEnd(false),
+ MostDerivedPathLength(0), MostDerivedArraySize(0) {
+ if (!Invalid) {
+ IsOnePastTheEnd = V.isLValueOnePastTheEnd();
+ ArrayRef<PathEntry> VEntries = V.getLValuePath();
+ Entries.insert(Entries.end(), VEntries.begin(), VEntries.end());
+ if (V.getLValueBase())
+ MostDerivedPathLength =
+ findMostDerivedSubobject(Ctx, getType(V.getLValueBase()),
+ V.getLValuePath(), MostDerivedArraySize,
+ MostDerivedType);
+ }
+ }
+
+ void setInvalid() {
+ Invalid = true;
+ Entries.clear();
+ }
+
+ /// Determine whether this is a one-past-the-end pointer.
+ bool isOnePastTheEnd() const {
+ if (IsOnePastTheEnd)
+ return true;
+ if (MostDerivedArraySize &&
+ Entries[MostDerivedPathLength - 1].ArrayIndex == MostDerivedArraySize)
+ return true;
+ return false;
+ }
+
+ /// Check that this refers to a valid subobject.
+ bool isValidSubobject() const {
+ if (Invalid)
+ return false;
+ return !isOnePastTheEnd();
+ }
+ /// Check that this refers to a valid subobject, and if not, produce a
+ /// relevant diagnostic and set the designator as invalid.
+ bool checkSubobject(EvalInfo &Info, const Expr *E, CheckSubobjectKind CSK);
+
+ /// Update this designator to refer to the first element within this array.
+ void addArrayUnchecked(const ConstantArrayType *CAT) {
+ PathEntry Entry;
+ Entry.ArrayIndex = 0;
+ Entries.push_back(Entry);
+
+ // This is a most-derived object.
+ MostDerivedType = CAT->getElementType();
+ MostDerivedArraySize = CAT->getSize().getZExtValue();
+ MostDerivedPathLength = Entries.size();
+ }
+ /// Update this designator to refer to the given base or member of this
+ /// object.
+ void addDeclUnchecked(const Decl *D, bool Virtual = false) {
+ PathEntry Entry;
+ APValue::BaseOrMemberType Value(D, Virtual);
+ Entry.BaseOrMember = Value.getOpaqueValue();
+ Entries.push_back(Entry);
+
+ // If this isn't a base class, it's a new most-derived object.
+ if (const FieldDecl *FD = dyn_cast<FieldDecl>(D)) {
+ MostDerivedType = FD->getType();
+ MostDerivedArraySize = 0;
+ MostDerivedPathLength = Entries.size();
+ }
+ }
+ /// Update this designator to refer to the given complex component.
+ void addComplexUnchecked(QualType EltTy, bool Imag) {
+ PathEntry Entry;
+ Entry.ArrayIndex = Imag;
+ Entries.push_back(Entry);
+
+ // This is technically a most-derived object, though in practice this
+ // is unlikely to matter.
+ MostDerivedType = EltTy;
+ MostDerivedArraySize = 2;
+ MostDerivedPathLength = Entries.size();
+ }
+ void diagnosePointerArithmetic(EvalInfo &Info, const Expr *E, uint64_t N);
+ /// Add N to the address of this subobject.
+ void adjustIndex(EvalInfo &Info, const Expr *E, uint64_t N) {
+ if (Invalid) return;
+ if (MostDerivedPathLength == Entries.size() && MostDerivedArraySize) {
+ Entries.back().ArrayIndex += N;
+ if (Entries.back().ArrayIndex > MostDerivedArraySize) {
+ diagnosePointerArithmetic(Info, E, Entries.back().ArrayIndex);
+ setInvalid();
+ }
+ return;
+ }
+ // [expr.add]p4: For the purposes of these operators, a pointer to a
+ // nonarray object behaves the same as a pointer to the first element of
+ // an array of length one with the type of the object as its element type.
+ if (IsOnePastTheEnd && N == (uint64_t)-1)
+ IsOnePastTheEnd = false;
+ else if (!IsOnePastTheEnd && N == 1)
+ IsOnePastTheEnd = true;
+ else if (N != 0) {
+ diagnosePointerArithmetic(Info, E, uint64_t(IsOnePastTheEnd) + N);
+ setInvalid();
+ }
+ }
+ };
+
+ /// A stack frame in the constexpr call stack.
+ struct CallStackFrame {
+ EvalInfo &Info;
+
+ /// Parent - The caller of this stack frame.
+ CallStackFrame *Caller;
+
+ /// CallLoc - The location of the call expression for this call.
+ SourceLocation CallLoc;
+
+ /// Callee - The function which was called.
+ const FunctionDecl *Callee;
+
+ /// Index - The call index of this call.
+ unsigned Index;
+
+ /// This - The binding for the this pointer in this call, if any.
+ const LValue *This;
+
+ /// ParmBindings - Parameter bindings for this function call, indexed by
+ /// parameters' function scope indices.
+ const APValue *Arguments;
+
+ typedef llvm::DenseMap<const Expr*, APValue> MapTy;
+ typedef MapTy::const_iterator temp_iterator;
+ /// Temporaries - Temporary lvalues materialized within this stack frame.
+ MapTy Temporaries;
+
+ CallStackFrame(EvalInfo &Info, SourceLocation CallLoc,
+ const FunctionDecl *Callee, const LValue *This,
+ const APValue *Arguments);
+ ~CallStackFrame();
+ };
+
+ /// A partial diagnostic which we might know in advance that we are not going
+ /// to emit.
+ class OptionalDiagnostic {
+ PartialDiagnostic *Diag;
+
+ public:
+ explicit OptionalDiagnostic(PartialDiagnostic *Diag = 0) : Diag(Diag) {}
+
+ template<typename T>
+ OptionalDiagnostic &operator<<(const T &v) {
+ if (Diag)
+ *Diag << v;
+ return *this;
+ }
+
+ OptionalDiagnostic &operator<<(const APSInt &I) {
+ if (Diag) {
+ llvm::SmallVector<char, 32> Buffer;
+ I.toString(Buffer);
+ *Diag << StringRef(Buffer.data(), Buffer.size());
+ }
+ return *this;
+ }
+
+ OptionalDiagnostic &operator<<(const APFloat &F) {
+ if (Diag) {
+ llvm::SmallVector<char, 32> Buffer;
+ F.toString(Buffer);
+ *Diag << StringRef(Buffer.data(), Buffer.size());
+ }
+ return *this;
+ }
+ };
+
+ /// EvalInfo - This is a private struct used by the evaluator to capture
+ /// information about a subexpression as it is folded. It retains information
+ /// about the AST context, but also maintains information about the folded
+ /// expression.
+ ///
+ /// If an expression could be evaluated, it is still possible it is not a C
+ /// "integer constant expression" or constant expression. If not, this struct
+ /// captures information about how and why not.
+ ///
+ /// One bit of information passed *into* the request for constant folding
+ /// indicates whether the subexpression is "evaluated" or not according to C
+ /// rules. For example, the RHS of (0 && foo()) is not evaluated. We can
+ /// evaluate the expression regardless of what the RHS is, but C only allows
+ /// certain things in certain situations.
+ struct EvalInfo {
+ ASTContext &Ctx;
+
+ /// EvalStatus - Contains information about the evaluation.
+ Expr::EvalStatus &EvalStatus;
+
+ /// CurrentCall - The top of the constexpr call stack.
+ CallStackFrame *CurrentCall;
+
+ /// CallStackDepth - The number of calls in the call stack right now.
+ unsigned CallStackDepth;
+
+ /// NextCallIndex - The next call index to assign.
+ unsigned NextCallIndex;
+
+ typedef llvm::DenseMap<const OpaqueValueExpr*, APValue> MapTy;
+ /// OpaqueValues - Values used as the common expression in a
+ /// BinaryConditionalOperator.
+ MapTy OpaqueValues;
+
+ /// BottomFrame - The frame in which evaluation started. This must be
+ /// initialized after CurrentCall and CallStackDepth.
+ CallStackFrame BottomFrame;
+
+ /// EvaluatingDecl - This is the declaration whose initializer is being
+ /// evaluated, if any.
+ const VarDecl *EvaluatingDecl;
+
+ /// EvaluatingDeclValue - This is the value being constructed for the
+ /// declaration whose initializer is being evaluated, if any.
+ APValue *EvaluatingDeclValue;
+
+ /// HasActiveDiagnostic - Was the previous diagnostic stored? If so, further
+ /// notes attached to it will also be stored, otherwise they will not be.
+ bool HasActiveDiagnostic;
+
+ /// CheckingPotentialConstantExpression - Are we checking whether the
+ /// expression is a potential constant expression? If so, some diagnostics
+ /// are suppressed.
+ bool CheckingPotentialConstantExpression;
+
+ EvalInfo(const ASTContext &C, Expr::EvalStatus &S)
+ : Ctx(const_cast<ASTContext&>(C)), EvalStatus(S), CurrentCall(0),
+ CallStackDepth(0), NextCallIndex(1),
+ BottomFrame(*this, SourceLocation(), 0, 0, 0),
+ EvaluatingDecl(0), EvaluatingDeclValue(0), HasActiveDiagnostic(false),
+ CheckingPotentialConstantExpression(false) {}
+
+ const APValue *getOpaqueValue(const OpaqueValueExpr *e) const {
+ MapTy::const_iterator i = OpaqueValues.find(e);
+ if (i == OpaqueValues.end()) return 0;
+ return &i->second;
+ }
+
+ void setEvaluatingDecl(const VarDecl *VD, APValue &Value) {
+ EvaluatingDecl = VD;
+ EvaluatingDeclValue = &Value;
+ }
+
+ const LangOptions &getLangOpts() const { return Ctx.getLangOpts(); }
+
+ bool CheckCallLimit(SourceLocation Loc) {
+ // Don't perform any constexpr calls (other than the call we're checking)
+ // when checking a potential constant expression.
+ if (CheckingPotentialConstantExpression && CallStackDepth > 1)
+ return false;
+ if (NextCallIndex == 0) {
+ // NextCallIndex has wrapped around.
+ Diag(Loc, diag::note_constexpr_call_limit_exceeded);
+ return false;
+ }
+ if (CallStackDepth <= getLangOpts().ConstexprCallDepth)
+ return true;
+ Diag(Loc, diag::note_constexpr_depth_limit_exceeded)
+ << getLangOpts().ConstexprCallDepth;
+ return false;
+ }
+
+ CallStackFrame *getCallFrame(unsigned CallIndex) {
+ assert(CallIndex && "no call index in getCallFrame");
+ // We will eventually hit BottomFrame, which has Index 1, so Frame can't
+ // be null in this loop.
+ CallStackFrame *Frame = CurrentCall;
+ while (Frame->Index > CallIndex)
+ Frame = Frame->Caller;
+ return (Frame->Index == CallIndex) ? Frame : 0;
+ }
+
+ private:
+ /// Add a diagnostic to the diagnostics list.
+ PartialDiagnostic &addDiag(SourceLocation Loc, diag::kind DiagId) {
+ PartialDiagnostic PD(DiagId, Ctx.getDiagAllocator());
+ EvalStatus.Diag->push_back(std::make_pair(Loc, PD));
+ return EvalStatus.Diag->back().second;
+ }
+
+ /// Add notes containing a call stack to the current point of evaluation.
+ void addCallStack(unsigned Limit);
+
+ public:
+ /// Diagnose that the evaluation cannot be folded.
+ OptionalDiagnostic Diag(SourceLocation Loc, diag::kind DiagId
+ = diag::note_invalid_subexpr_in_const_expr,
+ unsigned ExtraNotes = 0) {
+ // If we have a prior diagnostic, it will be noting that the expression
+ // isn't a constant expression. This diagnostic is more important.
+ // FIXME: We might want to show both diagnostics to the user.
+ if (EvalStatus.Diag) {
+ unsigned CallStackNotes = CallStackDepth - 1;
+ unsigned Limit = Ctx.getDiagnostics().getConstexprBacktraceLimit();
+ if (Limit)
+ CallStackNotes = std::min(CallStackNotes, Limit + 1);
+ if (CheckingPotentialConstantExpression)
+ CallStackNotes = 0;
+
+ HasActiveDiagnostic = true;
+ EvalStatus.Diag->clear();
+ EvalStatus.Diag->reserve(1 + ExtraNotes + CallStackNotes);
+ addDiag(Loc, DiagId);
+ if (!CheckingPotentialConstantExpression)
+ addCallStack(Limit);
+ return OptionalDiagnostic(&(*EvalStatus.Diag)[0].second);
+ }
+ HasActiveDiagnostic = false;
+ return OptionalDiagnostic();
+ }
+
+ OptionalDiagnostic Diag(const Expr *E, diag::kind DiagId
+ = diag::note_invalid_subexpr_in_const_expr,
+ unsigned ExtraNotes = 0) {
+ if (EvalStatus.Diag)
+ return Diag(E->getExprLoc(), DiagId, ExtraNotes);
+ HasActiveDiagnostic = false;
+ return OptionalDiagnostic();
+ }
+
+ /// Diagnose that the evaluation does not produce a C++11 core constant
+ /// expression.
+ template<typename LocArg>
+ OptionalDiagnostic CCEDiag(LocArg Loc, diag::kind DiagId
+ = diag::note_invalid_subexpr_in_const_expr,
+ unsigned ExtraNotes = 0) {
+ // Don't override a previous diagnostic.
+ if (!EvalStatus.Diag || !EvalStatus.Diag->empty()) {
+ HasActiveDiagnostic = false;
+ return OptionalDiagnostic();
+ }
+ return Diag(Loc, DiagId, ExtraNotes);
+ }
+
+ /// Add a note to a prior diagnostic.
+ OptionalDiagnostic Note(SourceLocation Loc, diag::kind DiagId) {
+ if (!HasActiveDiagnostic)
+ return OptionalDiagnostic();
+ return OptionalDiagnostic(&addDiag(Loc, DiagId));
+ }
+
+ /// Add a stack of notes to a prior diagnostic.
+ void addNotes(ArrayRef<PartialDiagnosticAt> Diags) {
+ if (HasActiveDiagnostic) {
+ EvalStatus.Diag->insert(EvalStatus.Diag->end(),
+ Diags.begin(), Diags.end());
+ }
+ }
+
+ /// Should we continue evaluation as much as possible after encountering a
+ /// construct which can't be folded?
+ bool keepEvaluatingAfterFailure() {
+ return CheckingPotentialConstantExpression &&
+ EvalStatus.Diag && EvalStatus.Diag->empty();
+ }
+ };
+
+ /// Object used to treat all foldable expressions as constant expressions.
+ struct FoldConstant {
+ bool Enabled;
+
+ explicit FoldConstant(EvalInfo &Info)
+ : Enabled(Info.EvalStatus.Diag && Info.EvalStatus.Diag->empty() &&
+ !Info.EvalStatus.HasSideEffects) {
+ }
+ // Treat the value we've computed since this object was created as constant.
+ void Fold(EvalInfo &Info) {
+ if (Enabled && !Info.EvalStatus.Diag->empty() &&
+ !Info.EvalStatus.HasSideEffects)
+ Info.EvalStatus.Diag->clear();
+ }
+ };
+
+ /// RAII object used to suppress diagnostics and side-effects from a
+ /// speculative evaluation.
+ class SpeculativeEvaluationRAII {
+ EvalInfo &Info;
+ Expr::EvalStatus Old;
+
+ public:
+ SpeculativeEvaluationRAII(EvalInfo &Info,
+ llvm::SmallVectorImpl<PartialDiagnosticAt>
+ *NewDiag = 0)
+ : Info(Info), Old(Info.EvalStatus) {
+ Info.EvalStatus.Diag = NewDiag;
+ }
+ ~SpeculativeEvaluationRAII() {
+ Info.EvalStatus = Old;
+ }
+ };
+}
+
+bool SubobjectDesignator::checkSubobject(EvalInfo &Info, const Expr *E,
+ CheckSubobjectKind CSK) {
+ if (Invalid)
+ return false;
+ if (isOnePastTheEnd()) {
+ Info.CCEDiag(E, diag::note_constexpr_past_end_subobject)
+ << CSK;
+ setInvalid();
+ return false;
+ }
+ return true;
+}
+
+void SubobjectDesignator::diagnosePointerArithmetic(EvalInfo &Info,
+ const Expr *E, uint64_t N) {
+ if (MostDerivedPathLength == Entries.size() && MostDerivedArraySize)
+ Info.CCEDiag(E, diag::note_constexpr_array_index)
+ << static_cast<int>(N) << /*array*/ 0
+ << static_cast<unsigned>(MostDerivedArraySize);
+ else
+ Info.CCEDiag(E, diag::note_constexpr_array_index)
+ << static_cast<int>(N) << /*non-array*/ 1;
+ setInvalid();
+}
+
+CallStackFrame::CallStackFrame(EvalInfo &Info, SourceLocation CallLoc,
+ const FunctionDecl *Callee, const LValue *This,
+ const APValue *Arguments)
+ : Info(Info), Caller(Info.CurrentCall), CallLoc(CallLoc), Callee(Callee),
+ Index(Info.NextCallIndex++), This(This), Arguments(Arguments) {
+ Info.CurrentCall = this;
+ ++Info.CallStackDepth;
+}
+
+CallStackFrame::~CallStackFrame() {
+ assert(Info.CurrentCall == this && "calls retired out of order");
+ --Info.CallStackDepth;
+ Info.CurrentCall = Caller;
+}
+
+/// Produce a string describing the given constexpr call.
+static void describeCall(CallStackFrame *Frame, llvm::raw_ostream &Out) {
+ unsigned ArgIndex = 0;
+ bool IsMemberCall = isa<CXXMethodDecl>(Frame->Callee) &&
+ !isa<CXXConstructorDecl>(Frame->Callee) &&
+ cast<CXXMethodDecl>(Frame->Callee)->isInstance();
+
+ if (!IsMemberCall)
+ Out << *Frame->Callee << '(';
+
+ for (FunctionDecl::param_const_iterator I = Frame->Callee->param_begin(),
+ E = Frame->Callee->param_end(); I != E; ++I, ++ArgIndex) {
+ if (ArgIndex > (unsigned)IsMemberCall)
+ Out << ", ";
+
+ const ParmVarDecl *Param = *I;
+ const APValue &Arg = Frame->Arguments[ArgIndex];
+ Arg.printPretty(Out, Frame->Info.Ctx, Param->getType());
+
+ if (ArgIndex == 0 && IsMemberCall)
+ Out << "->" << *Frame->Callee << '(';
+ }
+
+ Out << ')';
+}
+
+void EvalInfo::addCallStack(unsigned Limit) {
+ // Determine which calls to skip, if any.
+ unsigned ActiveCalls = CallStackDepth - 1;
+ unsigned SkipStart = ActiveCalls, SkipEnd = SkipStart;
+ if (Limit && Limit < ActiveCalls) {
+ SkipStart = Limit / 2 + Limit % 2;
+ SkipEnd = ActiveCalls - Limit / 2;
+ }
+
+ // Walk the call stack and add the diagnostics.
+ unsigned CallIdx = 0;
+ for (CallStackFrame *Frame = CurrentCall; Frame != &BottomFrame;
+ Frame = Frame->Caller, ++CallIdx) {
+ // Skip this call?
+ if (CallIdx >= SkipStart && CallIdx < SkipEnd) {
+ if (CallIdx == SkipStart) {
+ // Note that we're skipping calls.
+ addDiag(Frame->CallLoc, diag::note_constexpr_calls_suppressed)
+ << unsigned(ActiveCalls - Limit);
+ }
+ continue;
+ }
+
+ llvm::SmallVector<char, 128> Buffer;
+ llvm::raw_svector_ostream Out(Buffer);
+ describeCall(Frame, Out);
+ addDiag(Frame->CallLoc, diag::note_constexpr_call_here) << Out.str();
+ }
+}
+
+namespace {
+ struct ComplexValue {
+ private:
+ bool IsInt;
+
+ public:
+ APSInt IntReal, IntImag;
+ APFloat FloatReal, FloatImag;
+
+ ComplexValue() : FloatReal(APFloat::Bogus), FloatImag(APFloat::Bogus) {}
+
+ void makeComplexFloat() { IsInt = false; }
+ bool isComplexFloat() const { return !IsInt; }
+ APFloat &getComplexFloatReal() { return FloatReal; }
+ APFloat &getComplexFloatImag() { return FloatImag; }
+
+ void makeComplexInt() { IsInt = true; }
+ bool isComplexInt() const { return IsInt; }
+ APSInt &getComplexIntReal() { return IntReal; }
+ APSInt &getComplexIntImag() { return IntImag; }
+
+ void moveInto(APValue &v) const {
+ if (isComplexFloat())
+ v = APValue(FloatReal, FloatImag);
+ else
+ v = APValue(IntReal, IntImag);
+ }
+ void setFrom(const APValue &v) {
+ assert(v.isComplexFloat() || v.isComplexInt());
+ if (v.isComplexFloat()) {
+ makeComplexFloat();
+ FloatReal = v.getComplexFloatReal();
+ FloatImag = v.getComplexFloatImag();
+ } else {
+ makeComplexInt();
+ IntReal = v.getComplexIntReal();
+ IntImag = v.getComplexIntImag();
+ }
+ }
+ };
+
+ struct LValue {
+ APValue::LValueBase Base;
+ CharUnits Offset;
+ unsigned CallIndex;
+ SubobjectDesignator Designator;
+
+ const APValue::LValueBase getLValueBase() const { return Base; }
+ CharUnits &getLValueOffset() { return Offset; }
+ const CharUnits &getLValueOffset() const { return Offset; }
+ unsigned getLValueCallIndex() const { return CallIndex; }
+ SubobjectDesignator &getLValueDesignator() { return Designator; }
+ const SubobjectDesignator &getLValueDesignator() const { return Designator;}
+
+ void moveInto(APValue &V) const {
+ if (Designator.Invalid)
+ V = APValue(Base, Offset, APValue::NoLValuePath(), CallIndex);
+ else
+ V = APValue(Base, Offset, Designator.Entries,
+ Designator.IsOnePastTheEnd, CallIndex);
+ }
+ void setFrom(ASTContext &Ctx, const APValue &V) {
+ assert(V.isLValue());
+ Base = V.getLValueBase();
+ Offset = V.getLValueOffset();
+ CallIndex = V.getLValueCallIndex();
+ Designator = SubobjectDesignator(Ctx, V);
+ }
+
+ void set(APValue::LValueBase B, unsigned I = 0) {
+ Base = B;
+ Offset = CharUnits::Zero();
+ CallIndex = I;
+ Designator = SubobjectDesignator(getType(B));
+ }
+
+ // Check that this LValue is not based on a null pointer. If it is, produce
+ // a diagnostic and mark the designator as invalid.
+ bool checkNullPointer(EvalInfo &Info, const Expr *E,
+ CheckSubobjectKind CSK) {
+ if (Designator.Invalid)
+ return false;
+ if (!Base) {
+ Info.CCEDiag(E, diag::note_constexpr_null_subobject)
+ << CSK;
+ Designator.setInvalid();
+ return false;
+ }
+ return true;
+ }
+
+ // Check this LValue refers to an object. If not, set the designator to be
+ // invalid and emit a diagnostic.
+ bool checkSubobject(EvalInfo &Info, const Expr *E, CheckSubobjectKind CSK) {
+ // Outside C++11, do not build a designator referring to a subobject of
+ // any object: we won't use such a designator for anything.
+ if (!Info.getLangOpts().CPlusPlus0x)
+ Designator.setInvalid();
+ return checkNullPointer(Info, E, CSK) &&
+ Designator.checkSubobject(Info, E, CSK);
+ }
+
+ void addDecl(EvalInfo &Info, const Expr *E,
+ const Decl *D, bool Virtual = false) {
+ if (checkSubobject(Info, E, isa<FieldDecl>(D) ? CSK_Field : CSK_Base))
+ Designator.addDeclUnchecked(D, Virtual);
+ }
+ void addArray(EvalInfo &Info, const Expr *E, const ConstantArrayType *CAT) {
+ if (checkSubobject(Info, E, CSK_ArrayToPointer))
+ Designator.addArrayUnchecked(CAT);
+ }
+ void addComplex(EvalInfo &Info, const Expr *E, QualType EltTy, bool Imag) {
+ if (checkSubobject(Info, E, Imag ? CSK_Imag : CSK_Real))
+ Designator.addComplexUnchecked(EltTy, Imag);
+ }
+ void adjustIndex(EvalInfo &Info, const Expr *E, uint64_t N) {
+ if (checkNullPointer(Info, E, CSK_ArrayIndex))
+ Designator.adjustIndex(Info, E, N);
+ }
+ };
+
+ struct MemberPtr {
+ MemberPtr() {}
+ explicit MemberPtr(const ValueDecl *Decl) :
+ DeclAndIsDerivedMember(Decl, false), Path() {}
+
+ /// The member or (direct or indirect) field referred to by this member
+ /// pointer, or 0 if this is a null member pointer.
+ const ValueDecl *getDecl() const {
+ return DeclAndIsDerivedMember.getPointer();
+ }
+ /// Is this actually a member of some type derived from the relevant class?
+ bool isDerivedMember() const {
+ return DeclAndIsDerivedMember.getInt();
+ }
+ /// Get the class which the declaration actually lives in.
+ const CXXRecordDecl *getContainingRecord() const {
+ return cast<CXXRecordDecl>(
+ DeclAndIsDerivedMember.getPointer()->getDeclContext());
+ }
+
+ void moveInto(APValue &V) const {
+ V = APValue(getDecl(), isDerivedMember(), Path);
+ }
+ void setFrom(const APValue &V) {
+ assert(V.isMemberPointer());
+ DeclAndIsDerivedMember.setPointer(V.getMemberPointerDecl());
+ DeclAndIsDerivedMember.setInt(V.isMemberPointerToDerivedMember());
+ Path.clear();
+ ArrayRef<const CXXRecordDecl*> P = V.getMemberPointerPath();
+ Path.insert(Path.end(), P.begin(), P.end());
+ }
+
+ /// DeclAndIsDerivedMember - The member declaration, and a flag indicating
+ /// whether the member is a member of some class derived from the class type
+ /// of the member pointer.
+ llvm::PointerIntPair<const ValueDecl*, 1, bool> DeclAndIsDerivedMember;
+ /// Path - The path of base/derived classes from the member declaration's
+ /// class (exclusive) to the class type of the member pointer (inclusive).
+ SmallVector<const CXXRecordDecl*, 4> Path;
+
+ /// Perform a cast towards the class of the Decl (either up or down the
+ /// hierarchy).
+ bool castBack(const CXXRecordDecl *Class) {
+ assert(!Path.empty());
+ const CXXRecordDecl *Expected;
+ if (Path.size() >= 2)
+ Expected = Path[Path.size() - 2];
+ else
+ Expected = getContainingRecord();
+ if (Expected->getCanonicalDecl() != Class->getCanonicalDecl()) {
+ // C++11 [expr.static.cast]p12: In a conversion from (D::*) to (B::*),
+ // if B does not contain the original member and is not a base or
+ // derived class of the class containing the original member, the result
+ // of the cast is undefined.
+ // C++11 [conv.mem]p2 does not cover this case for a cast from (B::*) to
+ // (D::*). We consider that to be a language defect.
+ return false;
+ }
+ Path.pop_back();
+ return true;
+ }
+ /// Perform a base-to-derived member pointer cast.
+ bool castToDerived(const CXXRecordDecl *Derived) {
+ if (!getDecl())
+ return true;
+ if (!isDerivedMember()) {
+ Path.push_back(Derived);
+ return true;
+ }
+ if (!castBack(Derived))
+ return false;
+ if (Path.empty())
+ DeclAndIsDerivedMember.setInt(false);
+ return true;
+ }
+ /// Perform a derived-to-base member pointer cast.
+ bool castToBase(const CXXRecordDecl *Base) {
+ if (!getDecl())
+ return true;
+ if (Path.empty())
+ DeclAndIsDerivedMember.setInt(true);
+ if (isDerivedMember()) {
+ Path.push_back(Base);
+ return true;
+ }
+ return castBack(Base);
+ }
+ };
+
+ /// Compare two member pointers, which are assumed to be of the same type.
+ static bool operator==(const MemberPtr &LHS, const MemberPtr &RHS) {
+ if (!LHS.getDecl() || !RHS.getDecl())
+ return !LHS.getDecl() && !RHS.getDecl();
+ if (LHS.getDecl()->getCanonicalDecl() != RHS.getDecl()->getCanonicalDecl())
+ return false;
+ return LHS.Path == RHS.Path;
+ }
+
+ /// Kinds of constant expression checking, for diagnostics.
+ enum CheckConstantExpressionKind {
+ CCEK_Constant, ///< A normal constant.
+ CCEK_ReturnValue, ///< A constexpr function return value.
+ CCEK_MemberInit ///< A constexpr constructor mem-initializer.
+ };
+}
+
+static bool Evaluate(APValue &Result, EvalInfo &Info, const Expr *E);
+static bool EvaluateInPlace(APValue &Result, EvalInfo &Info,
+ const LValue &This, const Expr *E,
+ CheckConstantExpressionKind CCEK = CCEK_Constant,
+ bool AllowNonLiteralTypes = false);
+static bool EvaluateLValue(const Expr *E, LValue &Result, EvalInfo &Info);
+static bool EvaluatePointer(const Expr *E, LValue &Result, EvalInfo &Info);
+static bool EvaluateMemberPointer(const Expr *E, MemberPtr &Result,
+ EvalInfo &Info);
+static bool EvaluateTemporary(const Expr *E, LValue &Result, EvalInfo &Info);
+static bool EvaluateInteger(const Expr *E, APSInt &Result, EvalInfo &Info);
+static bool EvaluateIntegerOrLValue(const Expr *E, APValue &Result,
+ EvalInfo &Info);
+static bool EvaluateFloat(const Expr *E, APFloat &Result, EvalInfo &Info);
+static bool EvaluateComplex(const Expr *E, ComplexValue &Res, EvalInfo &Info);
+
+//===----------------------------------------------------------------------===//
+// Misc utilities
+//===----------------------------------------------------------------------===//
+
+/// Should this call expression be treated as a string literal?
+static bool IsStringLiteralCall(const CallExpr *E) {
+ unsigned Builtin = E->isBuiltinCall();
+ return (Builtin == Builtin::BI__builtin___CFStringMakeConstantString ||
+ Builtin == Builtin::BI__builtin___NSStringMakeConstantString);
+}
+
+static bool IsGlobalLValue(APValue::LValueBase B) {
+ // C++11 [expr.const]p3 An address constant expression is a prvalue core
+ // constant expression of pointer type that evaluates to...
+
+ // ... a null pointer value, or a prvalue core constant expression of type
+ // std::nullptr_t.
+ if (!B) return true;
+
+ if (const ValueDecl *D = B.dyn_cast<const ValueDecl*>()) {
+ // ... the address of an object with static storage duration,
+ if (const VarDecl *VD = dyn_cast<VarDecl>(D))
+ return VD->hasGlobalStorage();
+ // ... the address of a function,
+ return isa<FunctionDecl>(D);
+ }
+
+ const Expr *E = B.get<const Expr*>();
+ switch (E->getStmtClass()) {
+ default:
+ return false;
+ case Expr::CompoundLiteralExprClass: {
+ const CompoundLiteralExpr *CLE = cast<CompoundLiteralExpr>(E);
+ return CLE->isFileScope() && CLE->isLValue();
+ }
+ // A string literal has static storage duration.
+ case Expr::StringLiteralClass:
+ case Expr::PredefinedExprClass:
+ case Expr::ObjCStringLiteralClass:
+ case Expr::ObjCEncodeExprClass:
+ case Expr::CXXTypeidExprClass:
+ case Expr::CXXUuidofExprClass:
+ return true;
+ case Expr::CallExprClass:
+ return IsStringLiteralCall(cast<CallExpr>(E));
+ // For GCC compatibility, &&label has static storage duration.
+ case Expr::AddrLabelExprClass:
+ return true;
+ // A Block literal expression may be used as the initialization value for
+ // Block variables at global or local static scope.
+ case Expr::BlockExprClass:
+ return !cast<BlockExpr>(E)->getBlockDecl()->hasCaptures();
+ case Expr::ImplicitValueInitExprClass:
+ // FIXME:
+ // We can never form an lvalue with an implicit value initialization as its
+ // base through expression evaluation, so these only appear in one case: the
+ // implicit variable declaration we invent when checking whether a constexpr
+ // constructor can produce a constant expression. We must assume that such
+ // an expression might be a global lvalue.
+ return true;
+ }
+}
+
+static void NoteLValueLocation(EvalInfo &Info, APValue::LValueBase Base) {
+ assert(Base && "no location for a null lvalue");
+ const ValueDecl *VD = Base.dyn_cast<const ValueDecl*>();
+ if (VD)
+ Info.Note(VD->getLocation(), diag::note_declared_at);
+ else
+ Info.Note(Base.dyn_cast<const Expr*>()->getExprLoc(),
+ diag::note_constexpr_temporary_here);
+}
+
+/// Check that this reference or pointer core constant expression is a valid
+/// value for an address or reference constant expression. Return true if we
+/// can fold this expression, whether or not it's a constant expression.
+static bool CheckLValueConstantExpression(EvalInfo &Info, SourceLocation Loc,
+ QualType Type, const LValue &LVal) {
+ bool IsReferenceType = Type->isReferenceType();
+
+ APValue::LValueBase Base = LVal.getLValueBase();
+ const SubobjectDesignator &Designator = LVal.getLValueDesignator();
+
+ // Check that the object is a global. Note that the fake 'this' object we
+ // manufacture when checking potential constant expressions is conservatively
+ // assumed to be global here.
+ if (!IsGlobalLValue(Base)) {
+ if (Info.getLangOpts().CPlusPlus0x) {
+ const ValueDecl *VD = Base.dyn_cast<const ValueDecl*>();
+ Info.Diag(Loc, diag::note_constexpr_non_global, 1)
+ << IsReferenceType << !Designator.Entries.empty()
+ << !!VD << VD;
+ NoteLValueLocation(Info, Base);
+ } else {
+ Info.Diag(Loc);
+ }
+ // Don't allow references to temporaries to escape.
+ return false;
+ }
+ assert((Info.CheckingPotentialConstantExpression ||
+ LVal.getLValueCallIndex() == 0) &&
+ "have call index for global lvalue");
+
+ // Allow address constant expressions to be past-the-end pointers. This is
+ // an extension: the standard requires them to point to an object.
+ if (!IsReferenceType)
+ return true;
+
+ // A reference constant expression must refer to an object.
+ if (!Base) {
+ // FIXME: diagnostic
+ Info.CCEDiag(Loc);
+ return true;
+ }
+
+ // Does this refer one past the end of some object?
+ if (Designator.isOnePastTheEnd()) {
+ const ValueDecl *VD = Base.dyn_cast<const ValueDecl*>();
+ Info.Diag(Loc, diag::note_constexpr_past_end, 1)
+ << !Designator.Entries.empty() << !!VD << VD;
+ NoteLValueLocation(Info, Base);
+ }
+
+ return true;
+}
+
+/// Check that this core constant expression is of literal type, and if not,
+/// produce an appropriate diagnostic.
+static bool CheckLiteralType(EvalInfo &Info, const Expr *E) {
+ if (!E->isRValue() || E->getType()->isLiteralType())
+ return true;
+
+ // Prvalue constant expressions must be of literal types.
+ if (Info.getLangOpts().CPlusPlus0x)
+ Info.Diag(E, diag::note_constexpr_nonliteral)
+ << E->getType();
+ else
+ Info.Diag(E, diag::note_invalid_subexpr_in_const_expr);
+ return false;
+}
+
+/// Check that this core constant expression value is a valid value for a
+/// constant expression. If not, report an appropriate diagnostic. Does not
+/// check that the expression is of literal type.
+static bool CheckConstantExpression(EvalInfo &Info, SourceLocation DiagLoc,
+ QualType Type, const APValue &Value) {
+ // Core issue 1454: For a literal constant expression of array or class type,
+ // each subobject of its value shall have been initialized by a constant
+ // expression.
+ if (Value.isArray()) {
+ QualType EltTy = Type->castAsArrayTypeUnsafe()->getElementType();
+ for (unsigned I = 0, N = Value.getArrayInitializedElts(); I != N; ++I) {
+ if (!CheckConstantExpression(Info, DiagLoc, EltTy,
+ Value.getArrayInitializedElt(I)))
+ return false;
+ }
+ if (!Value.hasArrayFiller())
+ return true;
+ return CheckConstantExpression(Info, DiagLoc, EltTy,
+ Value.getArrayFiller());
+ }
+ if (Value.isUnion() && Value.getUnionField()) {
+ return CheckConstantExpression(Info, DiagLoc,
+ Value.getUnionField()->getType(),
+ Value.getUnionValue());
+ }
+ if (Value.isStruct()) {
+ RecordDecl *RD = Type->castAs<RecordType>()->getDecl();
+ if (const CXXRecordDecl *CD = dyn_cast<CXXRecordDecl>(RD)) {
+ unsigned BaseIndex = 0;
+ for (CXXRecordDecl::base_class_const_iterator I = CD->bases_begin(),
+ End = CD->bases_end(); I != End; ++I, ++BaseIndex) {
+ if (!CheckConstantExpression(Info, DiagLoc, I->getType(),
+ Value.getStructBase(BaseIndex)))
+ return false;
+ }
+ }
+ for (RecordDecl::field_iterator I = RD->field_begin(), E = RD->field_end();
+ I != E; ++I) {
+ if (!CheckConstantExpression(Info, DiagLoc, (*I)->getType(),
+ Value.getStructField((*I)->getFieldIndex())))
+ return false;
+ }
+ }
+
+ if (Value.isLValue()) {
+ LValue LVal;
+ LVal.setFrom(Info.Ctx, Value);
+ return CheckLValueConstantExpression(Info, DiagLoc, Type, LVal);
+ }
+
+ // Everything else is fine.
+ return true;
+}
+
+const ValueDecl *GetLValueBaseDecl(const LValue &LVal) {
+ return LVal.Base.dyn_cast<const ValueDecl*>();
+}
+
+static bool IsLiteralLValue(const LValue &Value) {
+ return Value.Base.dyn_cast<const Expr*>() && !Value.CallIndex;
+}
+
+static bool IsWeakLValue(const LValue &Value) {
+ const ValueDecl *Decl = GetLValueBaseDecl(Value);
+ return Decl && Decl->isWeak();
+}
+
+static bool EvalPointerValueAsBool(const APValue &Value, bool &Result) {
+ // A null base expression indicates a null pointer. These are always
+ // evaluatable, and they are false unless the offset is zero.
+ if (!Value.getLValueBase()) {
+ Result = !Value.getLValueOffset().isZero();
+ return true;
+ }
+
+ // We have a non-null base. These are generally known to be true, but if it's
+ // a weak declaration it can be null at runtime.
+ Result = true;
+ const ValueDecl *Decl = Value.getLValueBase().dyn_cast<const ValueDecl*>();
+ return !Decl || !Decl->isWeak();
+}
+
+static bool HandleConversionToBool(const APValue &Val, bool &Result) {
+ switch (Val.getKind()) {
+ case APValue::Uninitialized:
+ return false;
+ case APValue::Int:
+ Result = Val.getInt().getBoolValue();
+ return true;
+ case APValue::Float:
+ Result = !Val.getFloat().isZero();
+ return true;
+ case APValue::ComplexInt:
+ Result = Val.getComplexIntReal().getBoolValue() ||
+ Val.getComplexIntImag().getBoolValue();
+ return true;
+ case APValue::ComplexFloat:
+ Result = !Val.getComplexFloatReal().isZero() ||
+ !Val.getComplexFloatImag().isZero();
+ return true;
+ case APValue::LValue:
+ return EvalPointerValueAsBool(Val, Result);
+ case APValue::MemberPointer:
+ Result = Val.getMemberPointerDecl();
+ return true;
+ case APValue::Vector:
+ case APValue::Array:
+ case APValue::Struct:
+ case APValue::Union:
+ case APValue::AddrLabelDiff:
+ return false;
+ }
+
+ llvm_unreachable("unknown APValue kind");
+}
+
+static bool EvaluateAsBooleanCondition(const Expr *E, bool &Result,
+ EvalInfo &Info) {
+ assert(E->isRValue() && "missing lvalue-to-rvalue conv in bool condition");
+ APValue Val;
+ if (!Evaluate(Val, Info, E))
+ return false;
+ return HandleConversionToBool(Val, Result);
+}
+
+template<typename T>
+static bool HandleOverflow(EvalInfo &Info, const Expr *E,
+ const T &SrcValue, QualType DestType) {
+ Info.Diag(E, diag::note_constexpr_overflow)
+ << SrcValue << DestType;
+ return false;
+}
+
+static bool HandleFloatToIntCast(EvalInfo &Info, const Expr *E,
+ QualType SrcType, const APFloat &Value,
+ QualType DestType, APSInt &Result) {
+ unsigned DestWidth = Info.Ctx.getIntWidth(DestType);
+ // Determine whether we are converting to unsigned or signed.
+ bool DestSigned = DestType->isSignedIntegerOrEnumerationType();
+
+ Result = APSInt(DestWidth, !DestSigned);
+ bool ignored;
+ if (Value.convertToInteger(Result, llvm::APFloat::rmTowardZero, &ignored)
+ & APFloat::opInvalidOp)
+ return HandleOverflow(Info, E, Value, DestType);
+ return true;
+}
+
+static bool HandleFloatToFloatCast(EvalInfo &Info, const Expr *E,
+ QualType SrcType, QualType DestType,
+ APFloat &Result) {
+ APFloat Value = Result;
+ bool ignored;
+ if (Result.convert(Info.Ctx.getFloatTypeSemantics(DestType),
+ APFloat::rmNearestTiesToEven, &ignored)
+ & APFloat::opOverflow)
+ return HandleOverflow(Info, E, Value, DestType);
+ return true;
+}
+
+static APSInt HandleIntToIntCast(EvalInfo &Info, const Expr *E,
+ QualType DestType, QualType SrcType,
+ APSInt &Value) {
+ unsigned DestWidth = Info.Ctx.getIntWidth(DestType);
+ APSInt Result = Value;
+ // Figure out if this is a truncate, extend or noop cast.
+ // If the input is signed, do a sign extend, noop, or truncate.
+ Result = Result.extOrTrunc(DestWidth);
+ Result.setIsUnsigned(DestType->isUnsignedIntegerOrEnumerationType());
+ return Result;
+}
+
+static bool HandleIntToFloatCast(EvalInfo &Info, const Expr *E,
+ QualType SrcType, const APSInt &Value,
+ QualType DestType, APFloat &Result) {
+ Result = APFloat(Info.Ctx.getFloatTypeSemantics(DestType), 1);
+ if (Result.convertFromAPInt(Value, Value.isSigned(),
+ APFloat::rmNearestTiesToEven)
+ & APFloat::opOverflow)
+ return HandleOverflow(Info, E, Value, DestType);
+ return true;
+}
+
+static bool EvalAndBitcastToAPInt(EvalInfo &Info, const Expr *E,
+ llvm::APInt &Res) {
+ APValue SVal;
+ if (!Evaluate(SVal, Info, E))
+ return false;
+ if (SVal.isInt()) {
+ Res = SVal.getInt();
+ return true;
+ }
+ if (SVal.isFloat()) {
+ Res = SVal.getFloat().bitcastToAPInt();
+ return true;
+ }
+ if (SVal.isVector()) {
+ QualType VecTy = E->getType();
+ unsigned VecSize = Info.Ctx.getTypeSize(VecTy);
+ QualType EltTy = VecTy->castAs<VectorType>()->getElementType();
+ unsigned EltSize = Info.Ctx.getTypeSize(EltTy);
+ bool BigEndian = Info.Ctx.getTargetInfo().isBigEndian();
+ Res = llvm::APInt::getNullValue(VecSize);
+ for (unsigned i = 0; i < SVal.getVectorLength(); i++) {
+ APValue &Elt = SVal.getVectorElt(i);
+ llvm::APInt EltAsInt;
+ if (Elt.isInt()) {
+ EltAsInt = Elt.getInt();
+ } else if (Elt.isFloat()) {
+ EltAsInt = Elt.getFloat().bitcastToAPInt();
+ } else {
+ // Don't try to handle vectors of anything other than int or float
+ // (not sure if it's possible to hit this case).
+ Info.Diag(E, diag::note_invalid_subexpr_in_const_expr);
+ return false;
+ }
+ unsigned BaseEltSize = EltAsInt.getBitWidth();
+ if (BigEndian)
+ Res |= EltAsInt.zextOrTrunc(VecSize).rotr(i*EltSize+BaseEltSize);
+ else
+ Res |= EltAsInt.zextOrTrunc(VecSize).rotl(i*EltSize);
+ }
+ return true;
+ }
+ // Give up if the input isn't an int, float, or vector. For example, we
+ // reject "(v4i16)(intptr_t)&a".
+ Info.Diag(E, diag::note_invalid_subexpr_in_const_expr);
+ return false;
+}
+
+/// Cast an lvalue referring to a base subobject to a derived class, by
+/// truncating the lvalue's path to the given length.
+static bool CastToDerivedClass(EvalInfo &Info, const Expr *E, LValue &Result,
+ const RecordDecl *TruncatedType,
+ unsigned TruncatedElements) {
+ SubobjectDesignator &D = Result.Designator;
+
+ // Check we actually point to a derived class object.
+ if (TruncatedElements == D.Entries.size())
+ return true;
+ assert(TruncatedElements >= D.MostDerivedPathLength &&
+ "not casting to a derived class");
+ if (!Result.checkSubobject(Info, E, CSK_Derived))
+ return false;
+
+ // Truncate the path to the subobject, and remove any derived-to-base offsets.
+ const RecordDecl *RD = TruncatedType;
+ for (unsigned I = TruncatedElements, N = D.Entries.size(); I != N; ++I) {
+ const ASTRecordLayout &Layout = Info.Ctx.getASTRecordLayout(RD);
+ const CXXRecordDecl *Base = getAsBaseClass(D.Entries[I]);
+ if (isVirtualBaseClass(D.Entries[I]))
+ Result.Offset -= Layout.getVBaseClassOffset(Base);
+ else
+ Result.Offset -= Layout.getBaseClassOffset(Base);
+ RD = Base;
+ }
+ D.Entries.resize(TruncatedElements);
+ return true;
+}
+
+static void HandleLValueDirectBase(EvalInfo &Info, const Expr *E, LValue &Obj,
+ const CXXRecordDecl *Derived,
+ const CXXRecordDecl *Base,
+ const ASTRecordLayout *RL = 0) {
+ if (!RL) RL = &Info.Ctx.getASTRecordLayout(Derived);
+ Obj.getLValueOffset() += RL->getBaseClassOffset(Base);
+ Obj.addDecl(Info, E, Base, /*Virtual*/ false);
+}
+
+static bool HandleLValueBase(EvalInfo &Info, const Expr *E, LValue &Obj,
+ const CXXRecordDecl *DerivedDecl,
+ const CXXBaseSpecifier *Base) {
+ const CXXRecordDecl *BaseDecl = Base->getType()->getAsCXXRecordDecl();
+
+ if (!Base->isVirtual()) {
+ HandleLValueDirectBase(Info, E, Obj, DerivedDecl, BaseDecl);
+ return true;
+ }
+
+ SubobjectDesignator &D = Obj.Designator;
+ if (D.Invalid)
+ return false;
+
+ // Extract most-derived object and corresponding type.
+ DerivedDecl = D.MostDerivedType->getAsCXXRecordDecl();
+ if (!CastToDerivedClass(Info, E, Obj, DerivedDecl, D.MostDerivedPathLength))
+ return false;
+
+ // Find the virtual base class.
+ const ASTRecordLayout &Layout = Info.Ctx.getASTRecordLayout(DerivedDecl);
+ Obj.getLValueOffset() += Layout.getVBaseClassOffset(BaseDecl);
+ Obj.addDecl(Info, E, BaseDecl, /*Virtual*/ true);
+ return true;
+}
+
+/// Update LVal to refer to the given field, which must be a member of the type
+/// currently described by LVal.
+static void HandleLValueMember(EvalInfo &Info, const Expr *E, LValue &LVal,
+ const FieldDecl *FD,
+ const ASTRecordLayout *RL = 0) {
+ if (!RL)
+ RL = &Info.Ctx.getASTRecordLayout(FD->getParent());
+
+ unsigned I = FD->getFieldIndex();
+ LVal.Offset += Info.Ctx.toCharUnitsFromBits(RL->getFieldOffset(I));
+ LVal.addDecl(Info, E, FD);
+}
+
+/// Update LVal to refer to the given indirect field.
+static void HandleLValueIndirectMember(EvalInfo &Info, const Expr *E,
+ LValue &LVal,
+ const IndirectFieldDecl *IFD) {
+ for (IndirectFieldDecl::chain_iterator C = IFD->chain_begin(),
+ CE = IFD->chain_end(); C != CE; ++C)
+ HandleLValueMember(Info, E, LVal, cast<FieldDecl>(*C));
+}
+
+/// Get the size of the given type in char units.
+static bool HandleSizeof(EvalInfo &Info, SourceLocation Loc,
+ QualType Type, CharUnits &Size) {
+ // sizeof(void), __alignof__(void), sizeof(function) = 1 as a gcc
+ // extension.
+ if (Type->isVoidType() || Type->isFunctionType()) {
+ Size = CharUnits::One();
+ return true;
+ }
+
+ if (!Type->isConstantSizeType()) {
+ // sizeof(vla) is not a constantexpr: C99 6.5.3.4p2.
+ // FIXME: Better diagnostic.
+ Info.Diag(Loc);
+ return false;
+ }
+
+ Size = Info.Ctx.getTypeSizeInChars(Type);
+ return true;
+}
+
+/// Update a pointer value to model pointer arithmetic.
+/// \param Info - Information about the ongoing evaluation.
+/// \param E - The expression being evaluated, for diagnostic purposes.
+/// \param LVal - The pointer value to be updated.
+/// \param EltTy - The pointee type represented by LVal.
+/// \param Adjustment - The adjustment, in objects of type EltTy, to add.
+static bool HandleLValueArrayAdjustment(EvalInfo &Info, const Expr *E,
+ LValue &LVal, QualType EltTy,
+ int64_t Adjustment) {
+ CharUnits SizeOfPointee;
+ if (!HandleSizeof(Info, E->getExprLoc(), EltTy, SizeOfPointee))
+ return false;
+
+ // Compute the new offset in the appropriate width.
+ LVal.Offset += Adjustment * SizeOfPointee;
+ LVal.adjustIndex(Info, E, Adjustment);
+ return true;
+}
+
+/// Update an lvalue to refer to a component of a complex number.
+/// \param Info - Information about the ongoing evaluation.
+/// \param LVal - The lvalue to be updated.
+/// \param EltTy - The complex number's component type.
+/// \param Imag - False for the real component, true for the imaginary.
+static bool HandleLValueComplexElement(EvalInfo &Info, const Expr *E,
+ LValue &LVal, QualType EltTy,
+ bool Imag) {
+ if (Imag) {
+ CharUnits SizeOfComponent;
+ if (!HandleSizeof(Info, E->getExprLoc(), EltTy, SizeOfComponent))
+ return false;
+ LVal.Offset += SizeOfComponent;
+ }
+ LVal.addComplex(Info, E, EltTy, Imag);
+ return true;
+}
+
+/// Try to evaluate the initializer for a variable declaration.
+static bool EvaluateVarDeclInit(EvalInfo &Info, const Expr *E,
+ const VarDecl *VD,
+ CallStackFrame *Frame, APValue &Result) {
+ // If this is a parameter to an active constexpr function call, perform
+ // argument substitution.
+ if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(VD)) {
+ // Assume arguments of a potential constant expression are unknown
+ // constant expressions.
+ if (Info.CheckingPotentialConstantExpression)
+ return false;
+ if (!Frame || !Frame->Arguments) {
+ Info.Diag(E, diag::note_invalid_subexpr_in_const_expr);
+ return false;
+ }
+ Result = Frame->Arguments[PVD->getFunctionScopeIndex()];
+ return true;
+ }
+
+ // Dig out the initializer, and use the declaration which it's attached to.
+ const Expr *Init = VD->getAnyInitializer(VD);
+ if (!Init || Init->isValueDependent()) {
+ // If we're checking a potential constant expression, the variable could be
+ // initialized later.
+ if (!Info.CheckingPotentialConstantExpression)
+ Info.Diag(E, diag::note_invalid_subexpr_in_const_expr);
+ return false;
+ }
+
+ // If we're currently evaluating the initializer of this declaration, use that
+ // in-flight value.
+ if (Info.EvaluatingDecl == VD) {
+ Result = *Info.EvaluatingDeclValue;
+ return !Result.isUninit();
+ }
+
+ // Never evaluate the initializer of a weak variable. We can't be sure that
+ // this is the definition which will be used.
+ if (VD->isWeak()) {
+ Info.Diag(E, diag::note_invalid_subexpr_in_const_expr);
+ return false;
+ }
+
+ // Check that we can fold the initializer. In C++, we will have already done
+ // this in the cases where it matters for conformance.
+ llvm::SmallVector<PartialDiagnosticAt, 8> Notes;
+ if (!VD->evaluateValue(Notes)) {
+ Info.Diag(E, diag::note_constexpr_var_init_non_constant,
+ Notes.size() + 1) << VD;
+ Info.Note(VD->getLocation(), diag::note_declared_at);
+ Info.addNotes(Notes);
+ return false;
+ } else if (!VD->checkInitIsICE()) {
+ Info.CCEDiag(E, diag::note_constexpr_var_init_non_constant,
+ Notes.size() + 1) << VD;
+ Info.Note(VD->getLocation(), diag::note_declared_at);
+ Info.addNotes(Notes);
+ }
+
+ Result = *VD->getEvaluatedValue();
+ return true;
+}
+
+static bool IsConstNonVolatile(QualType T) {
+ Qualifiers Quals = T.getQualifiers();
+ return Quals.hasConst() && !Quals.hasVolatile();
+}
+
+/// Get the base index of the given base class within an APValue representing
+/// the given derived class.
+static unsigned getBaseIndex(const CXXRecordDecl *Derived,
+ const CXXRecordDecl *Base) {
+ Base = Base->getCanonicalDecl();
+ unsigned Index = 0;
+ for (CXXRecordDecl::base_class_const_iterator I = Derived->bases_begin(),
+ E = Derived->bases_end(); I != E; ++I, ++Index) {
+ if (I->getType()->getAsCXXRecordDecl()->getCanonicalDecl() == Base)
+ return Index;
+ }
+
+ llvm_unreachable("base class missing from derived class's bases list");
+}
+
+/// Extract the value of a character from a string literal. CharType is used to
+/// determine the expected signedness of the result -- a string literal used to
+/// initialize an array of 'signed char' or 'unsigned char' might contain chars
+/// of the wrong signedness.
+static APSInt ExtractStringLiteralCharacter(EvalInfo &Info, const Expr *Lit,
+ uint64_t Index, QualType CharType) {
+ // FIXME: Support PredefinedExpr, ObjCEncodeExpr, MakeStringConstant
+ const StringLiteral *S = dyn_cast<StringLiteral>(Lit);
+ assert(S && "unexpected string literal expression kind");
+ assert(CharType->isIntegerType() && "unexpected character type");
+
+ APSInt Value(S->getCharByteWidth() * Info.Ctx.getCharWidth(),
+ CharType->isUnsignedIntegerType());
+ if (Index < S->getLength())
+ Value = S->getCodeUnit(Index);
+ return Value;
+}
+
+/// Extract the designated sub-object of an rvalue.
+static bool ExtractSubobject(EvalInfo &Info, const Expr *E,
+ APValue &Obj, QualType ObjType,
+ const SubobjectDesignator &Sub, QualType SubType) {
+ if (Sub.Invalid)
+ // A diagnostic will have already been produced.
+ return false;
+ if (Sub.isOnePastTheEnd()) {
+ Info.Diag(E, Info.getLangOpts().CPlusPlus0x ?
+ (unsigned)diag::note_constexpr_read_past_end :
+ (unsigned)diag::note_invalid_subexpr_in_const_expr);
+ return false;
+ }
+ if (Sub.Entries.empty())
+ return true;
+ if (Info.CheckingPotentialConstantExpression && Obj.isUninit())
+ // This object might be initialized later.
+ return false;
+
+ APValue *O = &Obj;
+ // Walk the designator's path to find the subobject.
+ for (unsigned I = 0, N = Sub.Entries.size(); I != N; ++I) {
+ if (ObjType->isArrayType()) {
+ // Next subobject is an array element.
+ const ConstantArrayType *CAT = Info.Ctx.getAsConstantArrayType(ObjType);
+ assert(CAT && "vla in literal type?");
+ uint64_t Index = Sub.Entries[I].ArrayIndex;
+ if (CAT->getSize().ule(Index)) {
+ // Note, it should not be possible to form a pointer with a valid
+ // designator which points more than one past the end of the array.
+ Info.Diag(E, Info.getLangOpts().CPlusPlus0x ?
+ (unsigned)diag::note_constexpr_read_past_end :
+ (unsigned)diag::note_invalid_subexpr_in_const_expr);
+ return false;
+ }
+ // An array object is represented as either an Array APValue or as an
+ // LValue which refers to a string literal.
+ if (O->isLValue()) {
+ assert(I == N - 1 && "extracting subobject of character?");
+ assert(!O->hasLValuePath() || O->getLValuePath().empty());
+ Obj = APValue(ExtractStringLiteralCharacter(
+ Info, O->getLValueBase().get<const Expr*>(), Index, SubType));
+ return true;
+ } else if (O->getArrayInitializedElts() > Index)
+ O = &O->getArrayInitializedElt(Index);
+ else
+ O = &O->getArrayFiller();
+ ObjType = CAT->getElementType();
+ } else if (ObjType->isAnyComplexType()) {
+ // Next subobject is a complex number.
+ uint64_t Index = Sub.Entries[I].ArrayIndex;
+ if (Index > 1) {
+ Info.Diag(E, Info.getLangOpts().CPlusPlus0x ?
+ (unsigned)diag::note_constexpr_read_past_end :
+ (unsigned)diag::note_invalid_subexpr_in_const_expr);
+ return false;
+ }
+ assert(I == N - 1 && "extracting subobject of scalar?");
+ if (O->isComplexInt()) {
+ Obj = APValue(Index ? O->getComplexIntImag()
+ : O->getComplexIntReal());
+ } else {
+ assert(O->isComplexFloat());
+ Obj = APValue(Index ? O->getComplexFloatImag()
+ : O->getComplexFloatReal());
+ }
+ return true;
+ } else if (const FieldDecl *Field = getAsField(Sub.Entries[I])) {
+ if (Field->isMutable()) {
+ Info.Diag(E, diag::note_constexpr_ltor_mutable, 1)
+ << Field;
+ Info.Note(Field->getLocation(), diag::note_declared_at);
+ return false;
+ }
+
+ // Next subobject is a class, struct or union field.
+ RecordDecl *RD = ObjType->castAs<RecordType>()->getDecl();
+ if (RD->isUnion()) {
+ const FieldDecl *UnionField = O->getUnionField();
+ if (!UnionField ||
+ UnionField->getCanonicalDecl() != Field->getCanonicalDecl()) {
+ Info.Diag(E, diag::note_constexpr_read_inactive_union_member)
+ << Field << !UnionField << UnionField;
+ return false;
+ }
+ O = &O->getUnionValue();
+ } else
+ O = &O->getStructField(Field->getFieldIndex());
+ ObjType = Field->getType();
+
+ if (ObjType.isVolatileQualified()) {
+ if (Info.getLangOpts().CPlusPlus) {
+ // FIXME: Include a description of the path to the volatile subobject.
+ Info.Diag(E, diag::note_constexpr_ltor_volatile_obj, 1)
+ << 2 << Field;
+ Info.Note(Field->getLocation(), diag::note_declared_at);
+ } else {
+ Info.Diag(E, diag::note_invalid_subexpr_in_const_expr);
+ }
+ return false;
+ }
+ } else {
+ // Next subobject is a base class.
+ const CXXRecordDecl *Derived = ObjType->getAsCXXRecordDecl();
+ const CXXRecordDecl *Base = getAsBaseClass(Sub.Entries[I]);
+ O = &O->getStructBase(getBaseIndex(Derived, Base));
+ ObjType = Info.Ctx.getRecordType(Base);
+ }
+
+ if (O->isUninit()) {
+ if (!Info.CheckingPotentialConstantExpression)
+ Info.Diag(E, diag::note_constexpr_read_uninit);
+ return false;
+ }
+ }
+
+ // This may look super-stupid, but it serves an important purpose: if we just
+ // swapped Obj and *O, we'd create an object which had itself as a subobject.
+ // To avoid the leak, we ensure that Tmp ends up owning the original complete
+ // object, which is destroyed by Tmp's destructor.
+ APValue Tmp;
+ O->swap(Tmp);
+ Obj.swap(Tmp);
+ return true;
+}
+
+/// Find the position where two subobject designators diverge, or equivalently
+/// the length of the common initial subsequence.
+static unsigned FindDesignatorMismatch(QualType ObjType,
+ const SubobjectDesignator &A,
+ const SubobjectDesignator &B,
+ bool &WasArrayIndex) {
+ unsigned I = 0, N = std::min(A.Entries.size(), B.Entries.size());
+ for (/**/; I != N; ++I) {
+ if (!ObjType.isNull() &&
+ (ObjType->isArrayType() || ObjType->isAnyComplexType())) {
+ // Next subobject is an array element.
+ if (A.Entries[I].ArrayIndex != B.Entries[I].ArrayIndex) {
+ WasArrayIndex = true;
+ return I;
+ }
+ if (ObjType->isAnyComplexType())
+ ObjType = ObjType->castAs<ComplexType>()->getElementType();
+ else
+ ObjType = ObjType->castAsArrayTypeUnsafe()->getElementType();
+ } else {
+ if (A.Entries[I].BaseOrMember != B.Entries[I].BaseOrMember) {
+ WasArrayIndex = false;
+ return I;
+ }
+ if (const FieldDecl *FD = getAsField(A.Entries[I]))
+ // Next subobject is a field.
+ ObjType = FD->getType();
+ else
+ // Next subobject is a base class.
+ ObjType = QualType();
+ }
+ }
+ WasArrayIndex = false;
+ return I;
+}
+
+/// Determine whether the given subobject designators refer to elements of the
+/// same array object.
+static bool AreElementsOfSameArray(QualType ObjType,
+ const SubobjectDesignator &A,
+ const SubobjectDesignator &B) {
+ if (A.Entries.size() != B.Entries.size())
+ return false;
+
+ bool IsArray = A.MostDerivedArraySize != 0;
+ if (IsArray && A.MostDerivedPathLength != A.Entries.size())
+ // A is a subobject of the array element.
+ return false;
+
+ // If A (and B) designates an array element, the last entry will be the array
+ // index. That doesn't have to match. Otherwise, we're in the 'implicit array
+ // of length 1' case, and the entire path must match.
+ bool WasArrayIndex;
+ unsigned CommonLength = FindDesignatorMismatch(ObjType, A, B, WasArrayIndex);
+ return CommonLength >= A.Entries.size() - IsArray;
+}
+
+/// HandleLValueToRValueConversion - Perform an lvalue-to-rvalue conversion on
+/// the given lvalue. This can also be used for 'lvalue-to-lvalue' conversions
+/// for looking up the glvalue referred to by an entity of reference type.
+///
+/// \param Info - Information about the ongoing evaluation.
+/// \param Conv - The expression for which we are performing the conversion.
+/// Used for diagnostics.
+/// \param Type - The type we expect this conversion to produce, before
+/// stripping cv-qualifiers in the case of a non-clas type.
+/// \param LVal - The glvalue on which we are attempting to perform this action.
+/// \param RVal - The produced value will be placed here.
+static bool HandleLValueToRValueConversion(EvalInfo &Info, const Expr *Conv,
+ QualType Type,
+ const LValue &LVal, APValue &RVal) {
+ if (LVal.Designator.Invalid)
+ // A diagnostic will have already been produced.
+ return false;
+
+ const Expr *Base = LVal.Base.dyn_cast<const Expr*>();
+
+ if (!LVal.Base) {
+ // FIXME: Indirection through a null pointer deserves a specific diagnostic.
+ Info.Diag(Conv, diag::note_invalid_subexpr_in_const_expr);
+ return false;
+ }
+
+ CallStackFrame *Frame = 0;
+ if (LVal.CallIndex) {
+ Frame = Info.getCallFrame(LVal.CallIndex);
+ if (!Frame) {
+ Info.Diag(Conv, diag::note_constexpr_lifetime_ended, 1) << !Base;
+ NoteLValueLocation(Info, LVal.Base);
+ return false;
+ }
+ }
+
+ // C++11 DR1311: An lvalue-to-rvalue conversion on a volatile-qualified type
+ // is not a constant expression (even if the object is non-volatile). We also
+ // apply this rule to C++98, in order to conform to the expected 'volatile'
+ // semantics.
+ if (Type.isVolatileQualified()) {
+ if (Info.getLangOpts().CPlusPlus)
+ Info.Diag(Conv, diag::note_constexpr_ltor_volatile_type) << Type;
+ else
+ Info.Diag(Conv);
+ return false;
+ }
+
+ if (const ValueDecl *D = LVal.Base.dyn_cast<const ValueDecl*>()) {
+ // In C++98, const, non-volatile integers initialized with ICEs are ICEs.
+ // In C++11, constexpr, non-volatile variables initialized with constant
+ // expressions are constant expressions too. Inside constexpr functions,
+ // parameters are constant expressions even if they're non-const.
+ // In C, such things can also be folded, although they are not ICEs.
+ const VarDecl *VD = dyn_cast<VarDecl>(D);
+ if (VD) {
+ if (const VarDecl *VDef = VD->getDefinition(Info.Ctx))
+ VD = VDef;
+ }
+ if (!VD || VD->isInvalidDecl()) {
+ Info.Diag(Conv);
+ return false;
+ }
+
+ // DR1313: If the object is volatile-qualified but the glvalue was not,
+ // behavior is undefined so the result is not a constant expression.
+ QualType VT = VD->getType();
+ if (VT.isVolatileQualified()) {
+ if (Info.getLangOpts().CPlusPlus) {
+ Info.Diag(Conv, diag::note_constexpr_ltor_volatile_obj, 1) << 1 << VD;
+ Info.Note(VD->getLocation(), diag::note_declared_at);
+ } else {
+ Info.Diag(Conv);
+ }
+ return false;
+ }
+
+ if (!isa<ParmVarDecl>(VD)) {
+ if (VD->isConstexpr()) {
+ // OK, we can read this variable.
+ } else if (VT->isIntegralOrEnumerationType()) {
+ if (!VT.isConstQualified()) {
+ if (Info.getLangOpts().CPlusPlus) {
+ Info.Diag(Conv, diag::note_constexpr_ltor_non_const_int, 1) << VD;
+ Info.Note(VD->getLocation(), diag::note_declared_at);
+ } else {
+ Info.Diag(Conv);
+ }
+ return false;
+ }
+ } else if (VT->isFloatingType() && VT.isConstQualified()) {
+ // We support folding of const floating-point types, in order to make
+ // static const data members of such types (supported as an extension)
+ // more useful.
+ if (Info.getLangOpts().CPlusPlus0x) {
+ Info.CCEDiag(Conv, diag::note_constexpr_ltor_non_constexpr, 1) << VD;
+ Info.Note(VD->getLocation(), diag::note_declared_at);
+ } else {
+ Info.CCEDiag(Conv);
+ }
+ } else {
+ // FIXME: Allow folding of values of any literal type in all languages.
+ if (Info.getLangOpts().CPlusPlus0x) {
+ Info.Diag(Conv, diag::note_constexpr_ltor_non_constexpr, 1) << VD;
+ Info.Note(VD->getLocation(), diag::note_declared_at);
+ } else {
+ Info.Diag(Conv);
+ }
+ return false;
+ }
+ }
+
+ if (!EvaluateVarDeclInit(Info, Conv, VD, Frame, RVal))
+ return false;
+
+ if (isa<ParmVarDecl>(VD) || !VD->getAnyInitializer()->isLValue())
+ return ExtractSubobject(Info, Conv, RVal, VT, LVal.Designator, Type);
+
+ // The declaration was initialized by an lvalue, with no lvalue-to-rvalue
+ // conversion. This happens when the declaration and the lvalue should be
+ // considered synonymous, for instance when initializing an array of char
+ // from a string literal. Continue as if the initializer lvalue was the
+ // value we were originally given.
+ assert(RVal.getLValueOffset().isZero() &&
+ "offset for lvalue init of non-reference");
+ Base = RVal.getLValueBase().get<const Expr*>();
+
+ if (unsigned CallIndex = RVal.getLValueCallIndex()) {
+ Frame = Info.getCallFrame(CallIndex);
+ if (!Frame) {
+ Info.Diag(Conv, diag::note_constexpr_lifetime_ended, 1) << !Base;
+ NoteLValueLocation(Info, RVal.getLValueBase());
+ return false;
+ }
+ } else {
+ Frame = 0;
+ }
+ }
+
+ // Volatile temporary objects cannot be read in constant expressions.
+ if (Base->getType().isVolatileQualified()) {
+ if (Info.getLangOpts().CPlusPlus) {
+ Info.Diag(Conv, diag::note_constexpr_ltor_volatile_obj, 1) << 0;
+ Info.Note(Base->getExprLoc(), diag::note_constexpr_temporary_here);
+ } else {
+ Info.Diag(Conv);
+ }
+ return false;
+ }
+
+ if (Frame) {
+ // If this is a temporary expression with a nontrivial initializer, grab the
+ // value from the relevant stack frame.
+ RVal = Frame->Temporaries[Base];
+ } else if (const CompoundLiteralExpr *CLE
+ = dyn_cast<CompoundLiteralExpr>(Base)) {
+ // In C99, a CompoundLiteralExpr is an lvalue, and we defer evaluating the
+ // initializer until now for such expressions. Such an expression can't be
+ // an ICE in C, so this only matters for fold.
+ assert(!Info.getLangOpts().CPlusPlus && "lvalue compound literal in c++?");
+ if (!Evaluate(RVal, Info, CLE->getInitializer()))
+ return false;
+ } else if (isa<StringLiteral>(Base)) {
+ // We represent a string literal array as an lvalue pointing at the
+ // corresponding expression, rather than building an array of chars.
+ // FIXME: Support PredefinedExpr, ObjCEncodeExpr, MakeStringConstant
+ RVal = APValue(Base, CharUnits::Zero(), APValue::NoLValuePath(), 0);
+ } else {
+ Info.Diag(Conv, diag::note_invalid_subexpr_in_const_expr);
+ return false;
+ }
+
+ return ExtractSubobject(Info, Conv, RVal, Base->getType(), LVal.Designator,
+ Type);
+}
+
+/// Build an lvalue for the object argument of a member function call.
+static bool EvaluateObjectArgument(EvalInfo &Info, const Expr *Object,
+ LValue &This) {
+ if (Object->getType()->isPointerType())
+ return EvaluatePointer(Object, This, Info);
+
+ if (Object->isGLValue())
+ return EvaluateLValue(Object, This, Info);
+
+ if (Object->getType()->isLiteralType())
+ return EvaluateTemporary(Object, This, Info);
+
+ return false;
+}
+
+/// HandleMemberPointerAccess - Evaluate a member access operation and build an
+/// lvalue referring to the result.
+///
+/// \param Info - Information about the ongoing evaluation.
+/// \param BO - The member pointer access operation.
+/// \param LV - Filled in with a reference to the resulting object.
+/// \param IncludeMember - Specifies whether the member itself is included in
+/// the resulting LValue subobject designator. This is not possible when
+/// creating a bound member function.
+/// \return The field or method declaration to which the member pointer refers,
+/// or 0 if evaluation fails.
+static const ValueDecl *HandleMemberPointerAccess(EvalInfo &Info,
+ const BinaryOperator *BO,
+ LValue &LV,
+ bool IncludeMember = true) {
+ assert(BO->getOpcode() == BO_PtrMemD || BO->getOpcode() == BO_PtrMemI);
+
+ bool EvalObjOK = EvaluateObjectArgument(Info, BO->getLHS(), LV);
+ if (!EvalObjOK && !Info.keepEvaluatingAfterFailure())
+ return 0;
+
+ MemberPtr MemPtr;
+ if (!EvaluateMemberPointer(BO->getRHS(), MemPtr, Info))
+ return 0;
+
+ // C++11 [expr.mptr.oper]p6: If the second operand is the null pointer to
+ // member value, the behavior is undefined.
+ if (!MemPtr.getDecl())
+ return 0;
+
+ if (!EvalObjOK)
+ return 0;
+
+ if (MemPtr.isDerivedMember()) {
+ // This is a member of some derived class. Truncate LV appropriately.
+ // The end of the derived-to-base path for the base object must match the
+ // derived-to-base path for the member pointer.
+ if (LV.Designator.MostDerivedPathLength + MemPtr.Path.size() >
+ LV.Designator.Entries.size())
+ return 0;
+ unsigned PathLengthToMember =
+ LV.Designator.Entries.size() - MemPtr.Path.size();
+ for (unsigned I = 0, N = MemPtr.Path.size(); I != N; ++I) {
+ const CXXRecordDecl *LVDecl = getAsBaseClass(
+ LV.Designator.Entries[PathLengthToMember + I]);
+ const CXXRecordDecl *MPDecl = MemPtr.Path[I];
+ if (LVDecl->getCanonicalDecl() != MPDecl->getCanonicalDecl())
+ return 0;
+ }
+
+ // Truncate the lvalue to the appropriate derived class.
+ if (!CastToDerivedClass(Info, BO, LV, MemPtr.getContainingRecord(),
+ PathLengthToMember))
+ return 0;
+ } else if (!MemPtr.Path.empty()) {
+ // Extend the LValue path with the member pointer's path.
+ LV.Designator.Entries.reserve(LV.Designator.Entries.size() +
+ MemPtr.Path.size() + IncludeMember);
+
+ // Walk down to the appropriate base class.
+ QualType LVType = BO->getLHS()->getType();
+ if (const PointerType *PT = LVType->getAs<PointerType>())
+ LVType = PT->getPointeeType();
+ const CXXRecordDecl *RD = LVType->getAsCXXRecordDecl();
+ assert(RD && "member pointer access on non-class-type expression");
+ // The first class in the path is that of the lvalue.
+ for (unsigned I = 1, N = MemPtr.Path.size(); I != N; ++I) {
+ const CXXRecordDecl *Base = MemPtr.Path[N - I - 1];
+ HandleLValueDirectBase(Info, BO, LV, RD, Base);
+ RD = Base;
+ }
+ // Finally cast to the class containing the member.
+ HandleLValueDirectBase(Info, BO, LV, RD, MemPtr.getContainingRecord());
+ }
+
+ // Add the member. Note that we cannot build bound member functions here.
+ if (IncludeMember) {
+ if (const FieldDecl *FD = dyn_cast<FieldDecl>(MemPtr.getDecl()))
+ HandleLValueMember(Info, BO, LV, FD);
+ else if (const IndirectFieldDecl *IFD =
+ dyn_cast<IndirectFieldDecl>(MemPtr.getDecl()))
+ HandleLValueIndirectMember(Info, BO, LV, IFD);
+ else
+ llvm_unreachable("can't construct reference to bound member function");
+ }
+
+ return MemPtr.getDecl();
+}
+
+/// HandleBaseToDerivedCast - Apply the given base-to-derived cast operation on
+/// the provided lvalue, which currently refers to the base object.
+static bool HandleBaseToDerivedCast(EvalInfo &Info, const CastExpr *E,
+ LValue &Result) {
+ SubobjectDesignator &D = Result.Designator;
+ if (D.Invalid || !Result.checkNullPointer(Info, E, CSK_Derived))
+ return false;
+
+ QualType TargetQT = E->getType();
+ if (const PointerType *PT = TargetQT->getAs<PointerType>())
+ TargetQT = PT->getPointeeType();
+
+ // Check this cast lands within the final derived-to-base subobject path.
+ if (D.MostDerivedPathLength + E->path_size() > D.Entries.size()) {
+ Info.CCEDiag(E, diag::note_constexpr_invalid_downcast)
+ << D.MostDerivedType << TargetQT;
+ return false;
+ }
+
+ // Check the type of the final cast. We don't need to check the path,
+ // since a cast can only be formed if the path is unique.
+ unsigned NewEntriesSize = D.Entries.size() - E->path_size();
+ const CXXRecordDecl *TargetType = TargetQT->getAsCXXRecordDecl();
+ const CXXRecordDecl *FinalType;
+ if (NewEntriesSize == D.MostDerivedPathLength)
+ FinalType = D.MostDerivedType->getAsCXXRecordDecl();
+ else
+ FinalType = getAsBaseClass(D.Entries[NewEntriesSize - 1]);
+ if (FinalType->getCanonicalDecl() != TargetType->getCanonicalDecl()) {
+ Info.CCEDiag(E, diag::note_constexpr_invalid_downcast)
+ << D.MostDerivedType << TargetQT;
+ return false;
+ }
+
+ // Truncate the lvalue to the appropriate derived class.
+ return CastToDerivedClass(Info, E, Result, TargetType, NewEntriesSize);
+}
+
+namespace {
+enum EvalStmtResult {
+ /// Evaluation failed.
+ ESR_Failed,
+ /// Hit a 'return' statement.
+ ESR_Returned,
+ /// Evaluation succeeded.
+ ESR_Succeeded
+};
+}
+
+// Evaluate a statement.
+static EvalStmtResult EvaluateStmt(APValue &Result, EvalInfo &Info,
+ const Stmt *S) {
+ switch (S->getStmtClass()) {
+ default:
+ return ESR_Failed;
+
+ case Stmt::NullStmtClass:
+ case Stmt::DeclStmtClass:
+ return ESR_Succeeded;
+
+ case Stmt::ReturnStmtClass: {
+ const Expr *RetExpr = cast<ReturnStmt>(S)->getRetValue();
+ if (!Evaluate(Result, Info, RetExpr))
+ return ESR_Failed;
+ return ESR_Returned;
+ }
+
+ case Stmt::CompoundStmtClass: {
+ const CompoundStmt *CS = cast<CompoundStmt>(S);
+ for (CompoundStmt::const_body_iterator BI = CS->body_begin(),
+ BE = CS->body_end(); BI != BE; ++BI) {
+ EvalStmtResult ESR = EvaluateStmt(Result, Info, *BI);
+ if (ESR != ESR_Succeeded)
+ return ESR;
+ }
+ return ESR_Succeeded;
+ }
+ }
+}
+
+/// CheckTrivialDefaultConstructor - Check whether a constructor is a trivial
+/// default constructor. If so, we'll fold it whether or not it's marked as
+/// constexpr. If it is marked as constexpr, we will never implicitly define it,
+/// so we need special handling.
+static bool CheckTrivialDefaultConstructor(EvalInfo &Info, SourceLocation Loc,
+ const CXXConstructorDecl *CD,
+ bool IsValueInitialization) {
+ if (!CD->isTrivial() || !CD->isDefaultConstructor())
+ return false;
+
+ // Value-initialization does not call a trivial default constructor, so such a
+ // call is a core constant expression whether or not the constructor is
+ // constexpr.
+ if (!CD->isConstexpr() && !IsValueInitialization) {
+ if (Info.getLangOpts().CPlusPlus0x) {
+ // FIXME: If DiagDecl is an implicitly-declared special member function,
+ // we should be much more explicit about why it's not constexpr.
+ Info.CCEDiag(Loc, diag::note_constexpr_invalid_function, 1)
+ << /*IsConstexpr*/0 << /*IsConstructor*/1 << CD;
+ Info.Note(CD->getLocation(), diag::note_declared_at);
+ } else {
+ Info.CCEDiag(Loc, diag::note_invalid_subexpr_in_const_expr);
+ }
+ }
+ return true;
+}
+
+/// CheckConstexprFunction - Check that a function can be called in a constant
+/// expression.
+static bool CheckConstexprFunction(EvalInfo &Info, SourceLocation CallLoc,
+ const FunctionDecl *Declaration,
+ const FunctionDecl *Definition) {
+ // Potential constant expressions can contain calls to declared, but not yet
+ // defined, constexpr functions.
+ if (Info.CheckingPotentialConstantExpression && !Definition &&
+ Declaration->isConstexpr())
+ return false;
+
+ // Can we evaluate this function call?
+ if (Definition && Definition->isConstexpr() && !Definition->isInvalidDecl())
+ return true;
+
+ if (Info.getLangOpts().CPlusPlus0x) {
+ const FunctionDecl *DiagDecl = Definition ? Definition : Declaration;
+ // FIXME: If DiagDecl is an implicitly-declared special member function, we
+ // should be much more explicit about why it's not constexpr.
+ Info.Diag(CallLoc, diag::note_constexpr_invalid_function, 1)
+ << DiagDecl->isConstexpr() << isa<CXXConstructorDecl>(DiagDecl)
+ << DiagDecl;
+ Info.Note(DiagDecl->getLocation(), diag::note_declared_at);
+ } else {
+ Info.Diag(CallLoc, diag::note_invalid_subexpr_in_const_expr);
+ }
+ return false;
+}
+
+namespace {
+typedef SmallVector<APValue, 8> ArgVector;
+}
+
+/// EvaluateArgs - Evaluate the arguments to a function call.
+static bool EvaluateArgs(ArrayRef<const Expr*> Args, ArgVector &ArgValues,
+ EvalInfo &Info) {
+ bool Success = true;
+ for (ArrayRef<const Expr*>::iterator I = Args.begin(), E = Args.end();
+ I != E; ++I) {
+ if (!Evaluate(ArgValues[I - Args.begin()], Info, *I)) {
+ // If we're checking for a potential constant expression, evaluate all
+ // initializers even if some of them fail.
+ if (!Info.keepEvaluatingAfterFailure())
+ return false;
+ Success = false;
+ }
+ }
+ return Success;
+}
+
+/// Evaluate a function call.
+static bool HandleFunctionCall(SourceLocation CallLoc,
+ const FunctionDecl *Callee, const LValue *This,
+ ArrayRef<const Expr*> Args, const Stmt *Body,
+ EvalInfo &Info, APValue &Result) {
+ ArgVector ArgValues(Args.size());
+ if (!EvaluateArgs(Args, ArgValues, Info))
+ return false;
+
+ if (!Info.CheckCallLimit(CallLoc))
+ return false;
+
+ CallStackFrame Frame(Info, CallLoc, Callee, This, ArgValues.data());
+ return EvaluateStmt(Result, Info, Body) == ESR_Returned;
+}
+
+/// Evaluate a constructor call.
+static bool HandleConstructorCall(SourceLocation CallLoc, const LValue &This,
+ ArrayRef<const Expr*> Args,
+ const CXXConstructorDecl *Definition,
+ EvalInfo &Info, APValue &Result) {
+ ArgVector ArgValues(Args.size());
+ if (!EvaluateArgs(Args, ArgValues, Info))
+ return false;
+
+ if (!Info.CheckCallLimit(CallLoc))
+ return false;
+
+ const CXXRecordDecl *RD = Definition->getParent();
+ if (RD->getNumVBases()) {
+ Info.Diag(CallLoc, diag::note_constexpr_virtual_base) << RD;
+ return false;
+ }
+
+ CallStackFrame Frame(Info, CallLoc, Definition, &This, ArgValues.data());
+
+ // If it's a delegating constructor, just delegate.
+ if (Definition->isDelegatingConstructor()) {
+ CXXConstructorDecl::init_const_iterator I = Definition->init_begin();
+ return EvaluateInPlace(Result, Info, This, (*I)->getInit());
+ }
+
+ // For a trivial copy or move constructor, perform an APValue copy. This is
+ // essential for unions, where the operations performed by the constructor
+ // cannot be represented by ctor-initializers.
+ if (Definition->isDefaulted() &&
+ ((Definition->isCopyConstructor() && Definition->isTrivial()) ||
+ (Definition->isMoveConstructor() && Definition->isTrivial()))) {
+ LValue RHS;
+ RHS.setFrom(Info.Ctx, ArgValues[0]);
+ return HandleLValueToRValueConversion(Info, Args[0], Args[0]->getType(),
+ RHS, Result);
+ }
+
+ // Reserve space for the struct members.
+ if (!RD->isUnion() && Result.isUninit())
+ Result = APValue(APValue::UninitStruct(), RD->getNumBases(),
+ std::distance(RD->field_begin(), RD->field_end()));
+
+ const ASTRecordLayout &Layout = Info.Ctx.getASTRecordLayout(RD);
+
+ bool Success = true;
+ unsigned BasesSeen = 0;
+#ifndef NDEBUG
+ CXXRecordDecl::base_class_const_iterator BaseIt = RD->bases_begin();
+#endif
+ for (CXXConstructorDecl::init_const_iterator I = Definition->init_begin(),
+ E = Definition->init_end(); I != E; ++I) {
+ LValue Subobject = This;
+ APValue *Value = &Result;
+
+ // Determine the subobject to initialize.
+ if ((*I)->isBaseInitializer()) {
+ QualType BaseType((*I)->getBaseClass(), 0);
+#ifndef NDEBUG
+ // Non-virtual base classes are initialized in the order in the class
+ // definition. We have already checked for virtual base classes.
+ assert(!BaseIt->isVirtual() && "virtual base for literal type");
+ assert(Info.Ctx.hasSameType(BaseIt->getType(), BaseType) &&
+ "base class initializers not in expected order");
+ ++BaseIt;
+#endif
+ HandleLValueDirectBase(Info, (*I)->getInit(), Subobject, RD,
+ BaseType->getAsCXXRecordDecl(), &Layout);
+ Value = &Result.getStructBase(BasesSeen++);
+ } else if (FieldDecl *FD = (*I)->getMember()) {
+ HandleLValueMember(Info, (*I)->getInit(), Subobject, FD, &Layout);
+ if (RD->isUnion()) {
+ Result = APValue(FD);
+ Value = &Result.getUnionValue();
+ } else {
+ Value = &Result.getStructField(FD->getFieldIndex());
+ }
+ } else if (IndirectFieldDecl *IFD = (*I)->getIndirectMember()) {
+ // Walk the indirect field decl's chain to find the object to initialize,
+ // and make sure we've initialized every step along it.
+ for (IndirectFieldDecl::chain_iterator C = IFD->chain_begin(),
+ CE = IFD->chain_end();
+ C != CE; ++C) {
+ FieldDecl *FD = cast<FieldDecl>(*C);
+ CXXRecordDecl *CD = cast<CXXRecordDecl>(FD->getParent());
+ // Switch the union field if it differs. This happens if we had
+ // preceding zero-initialization, and we're now initializing a union
+ // subobject other than the first.
+ // FIXME: In this case, the values of the other subobjects are
+ // specified, since zero-initialization sets all padding bits to zero.
+ if (Value->isUninit() ||
+ (Value->isUnion() && Value->getUnionField() != FD)) {
+ if (CD->isUnion())
+ *Value = APValue(FD);
+ else
+ *Value = APValue(APValue::UninitStruct(), CD->getNumBases(),
+ std::distance(CD->field_begin(), CD->field_end()));
+ }
+ HandleLValueMember(Info, (*I)->getInit(), Subobject, FD);
+ if (CD->isUnion())
+ Value = &Value->getUnionValue();
+ else
+ Value = &Value->getStructField(FD->getFieldIndex());
+ }
+ } else {
+ llvm_unreachable("unknown base initializer kind");
+ }
+
+ if (!EvaluateInPlace(*Value, Info, Subobject, (*I)->getInit(),
+ (*I)->isBaseInitializer()
+ ? CCEK_Constant : CCEK_MemberInit)) {
+ // If we're checking for a potential constant expression, evaluate all
+ // initializers even if some of them fail.
+ if (!Info.keepEvaluatingAfterFailure())
+ return false;
+ Success = false;
+ }
+ }
+
+ return Success;
+}
+
+namespace {
+class HasSideEffect
+ : public ConstStmtVisitor<HasSideEffect, bool> {
+ const ASTContext &Ctx;
+public:
+
+ HasSideEffect(const ASTContext &C) : Ctx(C) {}
+
+ // Unhandled nodes conservatively default to having side effects.
+ bool VisitStmt(const Stmt *S) {
+ return true;
+ }
+
+ bool VisitParenExpr(const ParenExpr *E) { return Visit(E->getSubExpr()); }
+ bool VisitGenericSelectionExpr(const GenericSelectionExpr *E) {
+ return Visit(E->getResultExpr());
+ }
+ bool VisitDeclRefExpr(const DeclRefExpr *E) {
+ if (Ctx.getCanonicalType(E->getType()).isVolatileQualified())
+ return true;
+ return false;
+ }
+ bool VisitObjCIvarRefExpr(const ObjCIvarRefExpr *E) {
+ if (Ctx.getCanonicalType(E->getType()).isVolatileQualified())
+ return true;
+ return false;
+ }
+
+ // We don't want to evaluate BlockExprs multiple times, as they generate
+ // a ton of code.
+ bool VisitBlockExpr(const BlockExpr *E) { return true; }
+ bool VisitPredefinedExpr(const PredefinedExpr *E) { return false; }
+ bool VisitCompoundLiteralExpr(const CompoundLiteralExpr *E)
+ { return Visit(E->getInitializer()); }
+ bool VisitMemberExpr(const MemberExpr *E) { return Visit(E->getBase()); }
+ bool VisitIntegerLiteral(const IntegerLiteral *E) { return false; }
+ bool VisitFloatingLiteral(const FloatingLiteral *E) { return false; }
+ bool VisitStringLiteral(const StringLiteral *E) { return false; }
+ bool VisitCharacterLiteral(const CharacterLiteral *E) { return false; }
+ bool VisitUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr *E)
+ { return false; }
+ bool VisitArraySubscriptExpr(const ArraySubscriptExpr *E)
+ { return Visit(E->getLHS()) || Visit(E->getRHS()); }
+ bool VisitChooseExpr(const ChooseExpr *E)
+ { return Visit(E->getChosenSubExpr(Ctx)); }
+ bool VisitCastExpr(const CastExpr *E) { return Visit(E->getSubExpr()); }
+ bool VisitBinAssign(const BinaryOperator *E) { return true; }
+ bool VisitCompoundAssignOperator(const BinaryOperator *E) { return true; }
+ bool VisitBinaryOperator(const BinaryOperator *E)
+ { return Visit(E->getLHS()) || Visit(E->getRHS()); }
+ bool VisitUnaryPreInc(const UnaryOperator *E) { return true; }
+ bool VisitUnaryPostInc(const UnaryOperator *E) { return true; }
+ bool VisitUnaryPreDec(const UnaryOperator *E) { return true; }
+ bool VisitUnaryPostDec(const UnaryOperator *E) { return true; }
+ bool VisitUnaryDeref(const UnaryOperator *E) {
+ if (Ctx.getCanonicalType(E->getType()).isVolatileQualified())
+ return true;
+ return Visit(E->getSubExpr());
+ }
+ bool VisitUnaryOperator(const UnaryOperator *E) { return Visit(E->getSubExpr()); }
+
+ // Has side effects if any element does.
+ bool VisitInitListExpr(const InitListExpr *E) {
+ for (unsigned i = 0, e = E->getNumInits(); i != e; ++i)
+ if (Visit(E->getInit(i))) return true;
+ if (const Expr *filler = E->getArrayFiller())
+ return Visit(filler);
+ return false;
+ }
+
+ bool VisitSizeOfPackExpr(const SizeOfPackExpr *) { return false; }
+};
+
+class OpaqueValueEvaluation {
+ EvalInfo &info;
+ OpaqueValueExpr *opaqueValue;
+
+public:
+ OpaqueValueEvaluation(EvalInfo &info, OpaqueValueExpr *opaqueValue,
+ Expr *value)
+ : info(info), opaqueValue(opaqueValue) {
+
+ // If evaluation fails, fail immediately.
+ if (!Evaluate(info.OpaqueValues[opaqueValue], info, value)) {
+ this->opaqueValue = 0;
+ return;
+ }
+ }
+
+ bool hasError() const { return opaqueValue == 0; }
+
+ ~OpaqueValueEvaluation() {
+ // FIXME: For a recursive constexpr call, an outer stack frame might have
+ // been using this opaque value too, and will now have to re-evaluate the
+ // source expression.
+ if (opaqueValue) info.OpaqueValues.erase(opaqueValue);
+ }
+};
+
+} // end anonymous namespace
+
+//===----------------------------------------------------------------------===//
+// Generic Evaluation
+//===----------------------------------------------------------------------===//
+namespace {
+
+// FIXME: RetTy is always bool. Remove it.
+template <class Derived, typename RetTy=bool>
+class ExprEvaluatorBase
+ : public ConstStmtVisitor<Derived, RetTy> {
+private:
+ RetTy DerivedSuccess(const APValue &V, const Expr *E) {
+ return static_cast<Derived*>(this)->Success(V, E);
+ }
+ RetTy DerivedZeroInitialization(const Expr *E) {
+ return static_cast<Derived*>(this)->ZeroInitialization(E);
+ }
+
+ // Check whether a conditional operator with a non-constant condition is a
+ // potential constant expression. If neither arm is a potential constant
+ // expression, then the conditional operator is not either.
+ template<typename ConditionalOperator>
+ void CheckPotentialConstantConditional(const ConditionalOperator *E) {
+ assert(Info.CheckingPotentialConstantExpression);
+
+ // Speculatively evaluate both arms.
+ {
+ llvm::SmallVector<PartialDiagnosticAt, 8> Diag;
+ SpeculativeEvaluationRAII Speculate(Info, &Diag);
+
+ StmtVisitorTy::Visit(E->getFalseExpr());
+ if (Diag.empty())
+ return;
+
+ Diag.clear();
+ StmtVisitorTy::Visit(E->getTrueExpr());
+ if (Diag.empty())
+ return;
+ }
+
+ Error(E, diag::note_constexpr_conditional_never_const);
+ }
+
+
+ template<typename ConditionalOperator>
+ bool HandleConditionalOperator(const ConditionalOperator *E) {
+ bool BoolResult;
+ if (!EvaluateAsBooleanCondition(E->getCond(), BoolResult, Info)) {
+ if (Info.CheckingPotentialConstantExpression)
+ CheckPotentialConstantConditional(E);
+ return false;
+ }
+
+ Expr *EvalExpr = BoolResult ? E->getTrueExpr() : E->getFalseExpr();
+ return StmtVisitorTy::Visit(EvalExpr);
+ }
+
+protected:
+ EvalInfo &Info;
+ typedef ConstStmtVisitor<Derived, RetTy> StmtVisitorTy;
+ typedef ExprEvaluatorBase ExprEvaluatorBaseTy;
+
+ OptionalDiagnostic CCEDiag(const Expr *E, diag::kind D) {
+ return Info.CCEDiag(E, D);
+ }
+
+ RetTy ZeroInitialization(const Expr *E) { return Error(E); }
+
+public:
+ ExprEvaluatorBase(EvalInfo &Info) : Info(Info) {}
+
+ EvalInfo &getEvalInfo() { return Info; }
+
+ /// Report an evaluation error. This should only be called when an error is
+ /// first discovered. When propagating an error, just return false.
+ bool Error(const Expr *E, diag::kind D) {
+ Info.Diag(E, D);
+ return false;
+ }
+ bool Error(const Expr *E) {
+ return Error(E, diag::note_invalid_subexpr_in_const_expr);
+ }
+
+ RetTy VisitStmt(const Stmt *) {
+ llvm_unreachable("Expression evaluator should not be called on stmts");
+ }
+ RetTy VisitExpr(const Expr *E) {
+ return Error(E);
+ }
+
+ RetTy VisitParenExpr(const ParenExpr *E)
+ { return StmtVisitorTy::Visit(E->getSubExpr()); }
+ RetTy VisitUnaryExtension(const UnaryOperator *E)
+ { return StmtVisitorTy::Visit(E->getSubExpr()); }
+ RetTy VisitUnaryPlus(const UnaryOperator *E)
+ { return StmtVisitorTy::Visit(E->getSubExpr()); }
+ RetTy VisitChooseExpr(const ChooseExpr *E)
+ { return StmtVisitorTy::Visit(E->getChosenSubExpr(Info.Ctx)); }
+ RetTy VisitGenericSelectionExpr(const GenericSelectionExpr *E)
+ { return StmtVisitorTy::Visit(E->getResultExpr()); }
+ RetTy VisitSubstNonTypeTemplateParmExpr(const SubstNonTypeTemplateParmExpr *E)
+ { return StmtVisitorTy::Visit(E->getReplacement()); }
+ RetTy VisitCXXDefaultArgExpr(const CXXDefaultArgExpr *E)
+ { return StmtVisitorTy::Visit(E->getExpr()); }
+ // We cannot create any objects for which cleanups are required, so there is
+ // nothing to do here; all cleanups must come from unevaluated subexpressions.
+ RetTy VisitExprWithCleanups(const ExprWithCleanups *E)
+ { return StmtVisitorTy::Visit(E->getSubExpr()); }
+
+ RetTy VisitCXXReinterpretCastExpr(const CXXReinterpretCastExpr *E) {
+ CCEDiag(E, diag::note_constexpr_invalid_cast) << 0;
+ return static_cast<Derived*>(this)->VisitCastExpr(E);
+ }
+ RetTy VisitCXXDynamicCastExpr(const CXXDynamicCastExpr *E) {
+ CCEDiag(E, diag::note_constexpr_invalid_cast) << 1;
+ return static_cast<Derived*>(this)->VisitCastExpr(E);
+ }
+
+ RetTy VisitBinaryOperator(const BinaryOperator *E) {
+ switch (E->getOpcode()) {
+ default:
+ return Error(E);
+
+ case BO_Comma:
+ VisitIgnoredValue(E->getLHS());
+ return StmtVisitorTy::Visit(E->getRHS());
+
+ case BO_PtrMemD:
+ case BO_PtrMemI: {
+ LValue Obj;
+ if (!HandleMemberPointerAccess(Info, E, Obj))
+ return false;
+ APValue Result;
+ if (!HandleLValueToRValueConversion(Info, E, E->getType(), Obj, Result))
+ return false;
+ return DerivedSuccess(Result, E);
+ }
+ }
+ }
+
+ RetTy VisitBinaryConditionalOperator(const BinaryConditionalOperator *E) {
+ // Cache the value of the common expression.
+ OpaqueValueEvaluation opaque(Info, E->getOpaqueValue(), E->getCommon());
+ if (opaque.hasError())
+ return false;
+
+ return HandleConditionalOperator(E);
+ }
+
+ RetTy VisitConditionalOperator(const ConditionalOperator *E) {
+ bool IsBcpCall = false;
+ // If the condition (ignoring parens) is a __builtin_constant_p call,
+ // the result is a constant expression if it can be folded without
+ // side-effects. This is an important GNU extension. See GCC PR38377
+ // for discussion.
+ if (const CallExpr *CallCE =
+ dyn_cast<CallExpr>(E->getCond()->IgnoreParenCasts()))
+ if (CallCE->isBuiltinCall() == Builtin::BI__builtin_constant_p)
+ IsBcpCall = true;
+
+ // Always assume __builtin_constant_p(...) ? ... : ... is a potential
+ // constant expression; we can't check whether it's potentially foldable.
+ if (Info.CheckingPotentialConstantExpression && IsBcpCall)
+ return false;
+
+ FoldConstant Fold(Info);
+
+ if (!HandleConditionalOperator(E))
+ return false;
+
+ if (IsBcpCall)
+ Fold.Fold(Info);
+
+ return true;
+ }
+
+ RetTy VisitOpaqueValueExpr(const OpaqueValueExpr *E) {
+ const APValue *Value = Info.getOpaqueValue(E);
+ if (!Value) {
+ const Expr *Source = E->getSourceExpr();
+ if (!Source)
+ return Error(E);
+ if (Source == E) { // sanity checking.
+ assert(0 && "OpaqueValueExpr recursively refers to itself");
+ return Error(E);
+ }
+ return StmtVisitorTy::Visit(Source);
+ }
+ return DerivedSuccess(*Value, E);
+ }
+
+ RetTy VisitCallExpr(const CallExpr *E) {
+ const Expr *Callee = E->getCallee()->IgnoreParens();
+ QualType CalleeType = Callee->getType();
+
+ const FunctionDecl *FD = 0;
+ LValue *This = 0, ThisVal;
+ llvm::ArrayRef<const Expr*> Args(E->getArgs(), E->getNumArgs());
+ bool HasQualifier = false;
+
+ // Extract function decl and 'this' pointer from the callee.
+ if (CalleeType->isSpecificBuiltinType(BuiltinType::BoundMember)) {
+ const ValueDecl *Member = 0;
+ if (const MemberExpr *ME = dyn_cast<MemberExpr>(Callee)) {
+ // Explicit bound member calls, such as x.f() or p->g();
+ if (!EvaluateObjectArgument(Info, ME->getBase(), ThisVal))
+ return false;
+ Member = ME->getMemberDecl();
+ This = &ThisVal;
+ HasQualifier = ME->hasQualifier();
+ } else if (const BinaryOperator *BE = dyn_cast<BinaryOperator>(Callee)) {
+ // Indirect bound member calls ('.*' or '->*').
+ Member = HandleMemberPointerAccess(Info, BE, ThisVal, false);
+ if (!Member) return false;
+ This = &ThisVal;
+ } else
+ return Error(Callee);
+
+ FD = dyn_cast<FunctionDecl>(Member);
+ if (!FD)
+ return Error(Callee);
+ } else if (CalleeType->isFunctionPointerType()) {
+ LValue Call;
+ if (!EvaluatePointer(Callee, Call, Info))
+ return false;
+
+ if (!Call.getLValueOffset().isZero())
+ return Error(Callee);
+ FD = dyn_cast_or_null<FunctionDecl>(
+ Call.getLValueBase().dyn_cast<const ValueDecl*>());
+ if (!FD)
+ return Error(Callee);
+
+ // Overloaded operator calls to member functions are represented as normal
+ // calls with '*this' as the first argument.
+ const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD);
+ if (MD && !MD->isStatic()) {
+ // FIXME: When selecting an implicit conversion for an overloaded
+ // operator delete, we sometimes try to evaluate calls to conversion
+ // operators without a 'this' parameter!
+ if (Args.empty())
+ return Error(E);
+
+ if (!EvaluateObjectArgument(Info, Args[0], ThisVal))
+ return false;
+ This = &ThisVal;
+ Args = Args.slice(1);
+ }
+
+ // Don't call function pointers which have been cast to some other type.
+ if (!Info.Ctx.hasSameType(CalleeType->getPointeeType(), FD->getType()))
+ return Error(E);
+ } else
+ return Error(E);
+
+ if (This && !This->checkSubobject(Info, E, CSK_This))
+ return false;
+
+ // DR1358 allows virtual constexpr functions in some cases. Don't allow
+ // calls to such functions in constant expressions.
+ if (This && !HasQualifier &&
+ isa<CXXMethodDecl>(FD) && cast<CXXMethodDecl>(FD)->isVirtual())
+ return Error(E, diag::note_constexpr_virtual_call);
+
+ const FunctionDecl *Definition = 0;
+ Stmt *Body = FD->getBody(Definition);
+ APValue Result;
+
+ if (!CheckConstexprFunction(Info, E->getExprLoc(), FD, Definition) ||
+ !HandleFunctionCall(E->getExprLoc(), Definition, This, Args, Body,
+ Info, Result))
+ return false;
+
+ return DerivedSuccess(Result, E);
+ }
+
+ RetTy VisitCompoundLiteralExpr(const CompoundLiteralExpr *E) {
+ return StmtVisitorTy::Visit(E->getInitializer());
+ }
+ RetTy VisitInitListExpr(const InitListExpr *E) {
+ if (E->getNumInits() == 0)
+ return DerivedZeroInitialization(E);
+ if (E->getNumInits() == 1)
+ return StmtVisitorTy::Visit(E->getInit(0));
+ return Error(E);
+ }
+ RetTy VisitImplicitValueInitExpr(const ImplicitValueInitExpr *E) {
+ return DerivedZeroInitialization(E);
+ }
+ RetTy VisitCXXScalarValueInitExpr(const CXXScalarValueInitExpr *E) {
+ return DerivedZeroInitialization(E);
+ }
+ RetTy VisitCXXNullPtrLiteralExpr(const CXXNullPtrLiteralExpr *E) {
+ return DerivedZeroInitialization(E);
+ }
+
+ /// A member expression where the object is a prvalue is itself a prvalue.
+ RetTy VisitMemberExpr(const MemberExpr *E) {
+ assert(!E->isArrow() && "missing call to bound member function?");
+
+ APValue Val;
+ if (!Evaluate(Val, Info, E->getBase()))
+ return false;
+
+ QualType BaseTy = E->getBase()->getType();
+
+ const FieldDecl *FD = dyn_cast<FieldDecl>(E->getMemberDecl());
+ if (!FD) return Error(E);
+ assert(!FD->getType()->isReferenceType() && "prvalue reference?");
+ assert(BaseTy->getAs<RecordType>()->getDecl()->getCanonicalDecl() ==
+ FD->getParent()->getCanonicalDecl() && "record / field mismatch");
+
+ SubobjectDesignator Designator(BaseTy);
+ Designator.addDeclUnchecked(FD);
+
+ return ExtractSubobject(Info, E, Val, BaseTy, Designator, E->getType()) &&
+ DerivedSuccess(Val, E);
+ }
+
+ RetTy VisitCastExpr(const CastExpr *E) {
+ switch (E->getCastKind()) {
+ default:
+ break;
+
+ case CK_AtomicToNonAtomic:
+ case CK_NonAtomicToAtomic:
+ case CK_NoOp:
+ case CK_UserDefinedConversion:
+ return StmtVisitorTy::Visit(E->getSubExpr());
+
+ case CK_LValueToRValue: {
+ LValue LVal;
+ if (!EvaluateLValue(E->getSubExpr(), LVal, Info))
+ return false;
+ APValue RVal;
+ // Note, we use the subexpression's type in order to retain cv-qualifiers.
+ if (!HandleLValueToRValueConversion(Info, E, E->getSubExpr()->getType(),
+ LVal, RVal))
+ return false;
+ return DerivedSuccess(RVal, E);
+ }
+ }
+
+ return Error(E);
+ }
+
+ /// Visit a value which is evaluated, but whose value is ignored.
+ void VisitIgnoredValue(const Expr *E) {
+ APValue Scratch;
+ if (!Evaluate(Scratch, Info, E))
+ Info.EvalStatus.HasSideEffects = true;
+ }
+};
+
+}
+
+//===----------------------------------------------------------------------===//
+// Common base class for lvalue and temporary evaluation.
+//===----------------------------------------------------------------------===//
+namespace {
+template<class Derived>
+class LValueExprEvaluatorBase
+ : public ExprEvaluatorBase<Derived, bool> {
+protected:
+ LValue &Result;
+ typedef LValueExprEvaluatorBase LValueExprEvaluatorBaseTy;
+ typedef ExprEvaluatorBase<Derived, bool> ExprEvaluatorBaseTy;
+
+ bool Success(APValue::LValueBase B) {
+ Result.set(B);
+ return true;
+ }
+
+public:
+ LValueExprEvaluatorBase(EvalInfo &Info, LValue &Result) :
+ ExprEvaluatorBaseTy(Info), Result(Result) {}
+
+ bool Success(const APValue &V, const Expr *E) {
+ Result.setFrom(this->Info.Ctx, V);
+ return true;
+ }
+
+ bool VisitMemberExpr(const MemberExpr *E) {
+ // Handle non-static data members.
+ QualType BaseTy;
+ if (E->isArrow()) {
+ if (!EvaluatePointer(E->getBase(), Result, this->Info))
+ return false;
+ BaseTy = E->getBase()->getType()->getAs<PointerType>()->getPointeeType();
+ } else if (E->getBase()->isRValue()) {
+ assert(E->getBase()->getType()->isRecordType());
+ if (!EvaluateTemporary(E->getBase(), Result, this->Info))
+ return false;
+ BaseTy = E->getBase()->getType();
+ } else {
+ if (!this->Visit(E->getBase()))
+ return false;
+ BaseTy = E->getBase()->getType();
+ }
+
+ const ValueDecl *MD = E->getMemberDecl();
+ if (const FieldDecl *FD = dyn_cast<FieldDecl>(E->getMemberDecl())) {
+ assert(BaseTy->getAs<RecordType>()->getDecl()->getCanonicalDecl() ==
+ FD->getParent()->getCanonicalDecl() && "record / field mismatch");
+ (void)BaseTy;
+ HandleLValueMember(this->Info, E, Result, FD);
+ } else if (const IndirectFieldDecl *IFD = dyn_cast<IndirectFieldDecl>(MD)) {
+ HandleLValueIndirectMember(this->Info, E, Result, IFD);
+ } else
+ return this->Error(E);
+
+ if (MD->getType()->isReferenceType()) {
+ APValue RefValue;
+ if (!HandleLValueToRValueConversion(this->Info, E, MD->getType(), Result,
+ RefValue))
+ return false;
+ return Success(RefValue, E);
+ }
+ return true;
+ }
+
+ bool VisitBinaryOperator(const BinaryOperator *E) {
+ switch (E->getOpcode()) {
+ default:
+ return ExprEvaluatorBaseTy::VisitBinaryOperator(E);
+
+ case BO_PtrMemD:
+ case BO_PtrMemI:
+ return HandleMemberPointerAccess(this->Info, E, Result);
+ }
+ }
+
+ bool VisitCastExpr(const CastExpr *E) {
+ switch (E->getCastKind()) {
+ default:
+ return ExprEvaluatorBaseTy::VisitCastExpr(E);
+
+ case CK_DerivedToBase:
+ case CK_UncheckedDerivedToBase: {
+ if (!this->Visit(E->getSubExpr()))
+ return false;
+
+ // Now figure out the necessary offset to add to the base LV to get from
+ // the derived class to the base class.
+ QualType Type = E->getSubExpr()->getType();
+
+ for (CastExpr::path_const_iterator PathI = E->path_begin(),
+ PathE = E->path_end(); PathI != PathE; ++PathI) {
+ if (!HandleLValueBase(this->Info, E, Result, Type->getAsCXXRecordDecl(),
+ *PathI))
+ return false;
+ Type = (*PathI)->getType();
+ }
+
+ return true;
+ }
+ }
+ }
+};
+}
+
+//===----------------------------------------------------------------------===//
+// LValue Evaluation
+//
+// This is used for evaluating lvalues (in C and C++), xvalues (in C++11),
+// function designators (in C), decl references to void objects (in C), and
+// temporaries (if building with -Wno-address-of-temporary).
+//
+// LValue evaluation produces values comprising a base expression of one of the
+// following types:
+// - Declarations
+// * VarDecl
+// * FunctionDecl
+// - Literals
+// * CompoundLiteralExpr in C
+// * StringLiteral
+// * CXXTypeidExpr
+// * PredefinedExpr
+// * ObjCStringLiteralExpr
+// * ObjCEncodeExpr
+// * AddrLabelExpr
+// * BlockExpr
+// * CallExpr for a MakeStringConstant builtin
+// - Locals and temporaries
+// * Any Expr, with a CallIndex indicating the function in which the temporary
+// was evaluated.
+// plus an offset in bytes.
+//===----------------------------------------------------------------------===//
+namespace {
+class LValueExprEvaluator
+ : public LValueExprEvaluatorBase<LValueExprEvaluator> {
+public:
+ LValueExprEvaluator(EvalInfo &Info, LValue &Result) :
+ LValueExprEvaluatorBaseTy(Info, Result) {}
+
+ bool VisitVarDecl(const Expr *E, const VarDecl *VD);
+
+ bool VisitDeclRefExpr(const DeclRefExpr *E);
+ bool VisitPredefinedExpr(const PredefinedExpr *E) { return Success(E); }
+ bool VisitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *E);
+ bool VisitCompoundLiteralExpr(const CompoundLiteralExpr *E);
+ bool VisitMemberExpr(const MemberExpr *E);
+ bool VisitStringLiteral(const StringLiteral *E) { return Success(E); }
+ bool VisitObjCEncodeExpr(const ObjCEncodeExpr *E) { return Success(E); }
+ bool VisitCXXTypeidExpr(const CXXTypeidExpr *E);
+ bool VisitCXXUuidofExpr(const CXXUuidofExpr *E);
+ bool VisitArraySubscriptExpr(const ArraySubscriptExpr *E);
+ bool VisitUnaryDeref(const UnaryOperator *E);
+ bool VisitUnaryReal(const UnaryOperator *E);
+ bool VisitUnaryImag(const UnaryOperator *E);
+
+ bool VisitCastExpr(const CastExpr *E) {
+ switch (E->getCastKind()) {
+ default:
+ return LValueExprEvaluatorBaseTy::VisitCastExpr(E);
+
+ case CK_LValueBitCast:
+ this->CCEDiag(E, diag::note_constexpr_invalid_cast) << 2;
+ if (!Visit(E->getSubExpr()))
+ return false;
+ Result.Designator.setInvalid();
+ return true;
+
+ case CK_BaseToDerived:
+ if (!Visit(E->getSubExpr()))
+ return false;
+ return HandleBaseToDerivedCast(Info, E, Result);
+ }
+ }
+};
+} // end anonymous namespace
+
+/// Evaluate an expression as an lvalue. This can be legitimately called on
+/// expressions which are not glvalues, in a few cases:
+/// * function designators in C,
+/// * "extern void" objects,
+/// * temporaries, if building with -Wno-address-of-temporary.
+static bool EvaluateLValue(const Expr* E, LValue& Result, EvalInfo &Info) {
+ assert((E->isGLValue() || E->getType()->isFunctionType() ||
+ E->getType()->isVoidType() || isa<CXXTemporaryObjectExpr>(E)) &&
+ "can't evaluate expression as an lvalue");
+ return LValueExprEvaluator(Info, Result).Visit(E);
+}
+
+bool LValueExprEvaluator::VisitDeclRefExpr(const DeclRefExpr *E) {
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(E->getDecl()))
+ return Success(FD);
+ if (const VarDecl *VD = dyn_cast<VarDecl>(E->getDecl()))
+ return VisitVarDecl(E, VD);
+ return Error(E);
+}
+
+bool LValueExprEvaluator::VisitVarDecl(const Expr *E, const VarDecl *VD) {
+ if (!VD->getType()->isReferenceType()) {
+ if (isa<ParmVarDecl>(VD)) {
+ Result.set(VD, Info.CurrentCall->Index);
+ return true;
+ }
+ return Success(VD);
+ }
+
+ APValue V;
+ if (!EvaluateVarDeclInit(Info, E, VD, Info.CurrentCall, V))
+ return false;
+ return Success(V, E);
+}
+
+bool LValueExprEvaluator::VisitMaterializeTemporaryExpr(
+ const MaterializeTemporaryExpr *E) {
+ if (E->GetTemporaryExpr()->isRValue()) {
+ if (E->getType()->isRecordType())
+ return EvaluateTemporary(E->GetTemporaryExpr(), Result, Info);
+
+ Result.set(E, Info.CurrentCall->Index);
+ return EvaluateInPlace(Info.CurrentCall->Temporaries[E], Info,
+ Result, E->GetTemporaryExpr());
+ }
+
+ // Materialization of an lvalue temporary occurs when we need to force a copy
+ // (for instance, if it's a bitfield).
+ // FIXME: The AST should contain an lvalue-to-rvalue node for such cases.
+ if (!Visit(E->GetTemporaryExpr()))
+ return false;
+ if (!HandleLValueToRValueConversion(Info, E, E->getType(), Result,
+ Info.CurrentCall->Temporaries[E]))
+ return false;
+ Result.set(E, Info.CurrentCall->Index);
+ return true;
+}
+
+bool
+LValueExprEvaluator::VisitCompoundLiteralExpr(const CompoundLiteralExpr *E) {
+ assert(!Info.getLangOpts().CPlusPlus && "lvalue compound literal in c++?");
+ // Defer visiting the literal until the lvalue-to-rvalue conversion. We can
+ // only see this when folding in C, so there's no standard to follow here.
+ return Success(E);
+}
+
+bool LValueExprEvaluator::VisitCXXTypeidExpr(const CXXTypeidExpr *E) {
+ if (E->isTypeOperand())
+ return Success(E);
+ CXXRecordDecl *RD = E->getExprOperand()->getType()->getAsCXXRecordDecl();
+ if (RD && RD->isPolymorphic()) {
+ Info.Diag(E, diag::note_constexpr_typeid_polymorphic)
+ << E->getExprOperand()->getType()
+ << E->getExprOperand()->getSourceRange();
+ return false;
+ }
+ return Success(E);
+}
+
+bool LValueExprEvaluator::VisitCXXUuidofExpr(const CXXUuidofExpr *E) {
+ return Success(E);
+}
+
+bool LValueExprEvaluator::VisitMemberExpr(const MemberExpr *E) {
+ // Handle static data members.
+ if (const VarDecl *VD = dyn_cast<VarDecl>(E->getMemberDecl())) {
+ VisitIgnoredValue(E->getBase());
+ return VisitVarDecl(E, VD);
+ }
+
+ // Handle static member functions.
+ if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(E->getMemberDecl())) {
+ if (MD->isStatic()) {
+ VisitIgnoredValue(E->getBase());
+ return Success(MD);
+ }
+ }
+
+ // Handle non-static data members.
+ return LValueExprEvaluatorBaseTy::VisitMemberExpr(E);
+}
+
+bool LValueExprEvaluator::VisitArraySubscriptExpr(const ArraySubscriptExpr *E) {
+ // FIXME: Deal with vectors as array subscript bases.
+ if (E->getBase()->getType()->isVectorType())
+ return Error(E);
+
+ if (!EvaluatePointer(E->getBase(), Result, Info))
+ return false;
+
+ APSInt Index;
+ if (!EvaluateInteger(E->getIdx(), Index, Info))
+ return false;
+ int64_t IndexValue
+ = Index.isSigned() ? Index.getSExtValue()
+ : static_cast<int64_t>(Index.getZExtValue());
+
+ return HandleLValueArrayAdjustment(Info, E, Result, E->getType(), IndexValue);
+}
+
+bool LValueExprEvaluator::VisitUnaryDeref(const UnaryOperator *E) {
+ return EvaluatePointer(E->getSubExpr(), Result, Info);
+}
+
+bool LValueExprEvaluator::VisitUnaryReal(const UnaryOperator *E) {
+ if (!Visit(E->getSubExpr()))
+ return false;
+ // __real is a no-op on scalar lvalues.
+ if (E->getSubExpr()->getType()->isAnyComplexType())
+ HandleLValueComplexElement(Info, E, Result, E->getType(), false);
+ return true;
+}
+
+bool LValueExprEvaluator::VisitUnaryImag(const UnaryOperator *E) {
+ assert(E->getSubExpr()->getType()->isAnyComplexType() &&
+ "lvalue __imag__ on scalar?");
+ if (!Visit(E->getSubExpr()))
+ return false;
+ HandleLValueComplexElement(Info, E, Result, E->getType(), true);
+ return true;
+}
+
+//===----------------------------------------------------------------------===//
+// Pointer Evaluation
+//===----------------------------------------------------------------------===//
+
+namespace {
+class PointerExprEvaluator
+ : public ExprEvaluatorBase<PointerExprEvaluator, bool> {
+ LValue &Result;
+
+ bool Success(const Expr *E) {
+ Result.set(E);
+ return true;
+ }
+public:
+
+ PointerExprEvaluator(EvalInfo &info, LValue &Result)
+ : ExprEvaluatorBaseTy(info), Result(Result) {}
+
+ bool Success(const APValue &V, const Expr *E) {
+ Result.setFrom(Info.Ctx, V);
+ return true;
+ }
+ bool ZeroInitialization(const Expr *E) {
+ return Success((Expr*)0);
+ }
+
+ bool VisitBinaryOperator(const BinaryOperator *E);
+ bool VisitCastExpr(const CastExpr* E);
+ bool VisitUnaryAddrOf(const UnaryOperator *E);
+ bool VisitObjCStringLiteral(const ObjCStringLiteral *E)
+ { return Success(E); }
+ bool VisitObjCNumericLiteral(const ObjCNumericLiteral *E)
+ { return Success(E); }
+ bool VisitAddrLabelExpr(const AddrLabelExpr *E)
+ { return Success(E); }
+ bool VisitCallExpr(const CallExpr *E);
+ bool VisitBlockExpr(const BlockExpr *E) {
+ if (!E->getBlockDecl()->hasCaptures())
+ return Success(E);
+ return Error(E);
+ }
+ bool VisitCXXThisExpr(const CXXThisExpr *E) {
+ if (!Info.CurrentCall->This)
+ return Error(E);
+ Result = *Info.CurrentCall->This;
+ return true;
+ }
+
+ // FIXME: Missing: @protocol, @selector
+};
+} // end anonymous namespace
+
+static bool EvaluatePointer(const Expr* E, LValue& Result, EvalInfo &Info) {
+ assert(E->isRValue() && E->getType()->hasPointerRepresentation());
+ return PointerExprEvaluator(Info, Result).Visit(E);
+}
+
+bool PointerExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) {
+ if (E->getOpcode() != BO_Add &&
+ E->getOpcode() != BO_Sub)
+ return ExprEvaluatorBaseTy::VisitBinaryOperator(E);
+
+ const Expr *PExp = E->getLHS();
+ const Expr *IExp = E->getRHS();
+ if (IExp->getType()->isPointerType())
+ std::swap(PExp, IExp);
+
+ bool EvalPtrOK = EvaluatePointer(PExp, Result, Info);
+ if (!EvalPtrOK && !Info.keepEvaluatingAfterFailure())
+ return false;
+
+ llvm::APSInt Offset;
+ if (!EvaluateInteger(IExp, Offset, Info) || !EvalPtrOK)
+ return false;
+ int64_t AdditionalOffset
+ = Offset.isSigned() ? Offset.getSExtValue()
+ : static_cast<int64_t>(Offset.getZExtValue());
+ if (E->getOpcode() == BO_Sub)
+ AdditionalOffset = -AdditionalOffset;
+
+ QualType Pointee = PExp->getType()->getAs<PointerType>()->getPointeeType();
+ return HandleLValueArrayAdjustment(Info, E, Result, Pointee,
+ AdditionalOffset);
+}
+
+bool PointerExprEvaluator::VisitUnaryAddrOf(const UnaryOperator *E) {
+ return EvaluateLValue(E->getSubExpr(), Result, Info);
+}
+
+bool PointerExprEvaluator::VisitCastExpr(const CastExpr* E) {
+ const Expr* SubExpr = E->getSubExpr();
+
+ switch (E->getCastKind()) {
+ default:
+ break;
+
+ case CK_BitCast:
+ case CK_CPointerToObjCPointerCast:
+ case CK_BlockPointerToObjCPointerCast:
+ case CK_AnyPointerToBlockPointerCast:
+ if (!Visit(SubExpr))
+ return false;
+ // Bitcasts to cv void* are static_casts, not reinterpret_casts, so are
+ // permitted in constant expressions in C++11. Bitcasts from cv void* are
+ // also static_casts, but we disallow them as a resolution to DR1312.
+ if (!E->getType()->isVoidPointerType()) {
+ Result.Designator.setInvalid();
+ if (SubExpr->getType()->isVoidPointerType())
+ CCEDiag(E, diag::note_constexpr_invalid_cast)
+ << 3 << SubExpr->getType();
+ else
+ CCEDiag(E, diag::note_constexpr_invalid_cast) << 2;
+ }
+ return true;
+
+ case CK_DerivedToBase:
+ case CK_UncheckedDerivedToBase: {
+ if (!EvaluatePointer(E->getSubExpr(), Result, Info))
+ return false;
+ if (!Result.Base && Result.Offset.isZero())
+ return true;
+
+ // Now figure out the necessary offset to add to the base LV to get from
+ // the derived class to the base class.
+ QualType Type =
+ E->getSubExpr()->getType()->castAs<PointerType>()->getPointeeType();
+
+ for (CastExpr::path_const_iterator PathI = E->path_begin(),
+ PathE = E->path_end(); PathI != PathE; ++PathI) {
+ if (!HandleLValueBase(Info, E, Result, Type->getAsCXXRecordDecl(),
+ *PathI))
+ return false;
+ Type = (*PathI)->getType();
+ }
+
+ return true;
+ }
+
+ case CK_BaseToDerived:
+ if (!Visit(E->getSubExpr()))
+ return false;
+ if (!Result.Base && Result.Offset.isZero())
+ return true;
+ return HandleBaseToDerivedCast(Info, E, Result);
+
+ case CK_NullToPointer:
+ VisitIgnoredValue(E->getSubExpr());
+ return ZeroInitialization(E);
+
+ case CK_IntegralToPointer: {
+ CCEDiag(E, diag::note_constexpr_invalid_cast) << 2;
+
+ APValue Value;
+ if (!EvaluateIntegerOrLValue(SubExpr, Value, Info))
+ break;
+
+ if (Value.isInt()) {
+ unsigned Size = Info.Ctx.getTypeSize(E->getType());
+ uint64_t N = Value.getInt().extOrTrunc(Size).getZExtValue();
+ Result.Base = (Expr*)0;
+ Result.Offset = CharUnits::fromQuantity(N);
+ Result.CallIndex = 0;
+ Result.Designator.setInvalid();
+ return true;
+ } else {
+ // Cast is of an lvalue, no need to change value.
+ Result.setFrom(Info.Ctx, Value);
+ return true;
+ }
+ }
+ case CK_ArrayToPointerDecay:
+ if (SubExpr->isGLValue()) {
+ if (!EvaluateLValue(SubExpr, Result, Info))
+ return false;
+ } else {
+ Result.set(SubExpr, Info.CurrentCall->Index);
+ if (!EvaluateInPlace(Info.CurrentCall->Temporaries[SubExpr],
+ Info, Result, SubExpr))
+ return false;
+ }
+ // The result is a pointer to the first element of the array.
+ if (const ConstantArrayType *CAT
+ = Info.Ctx.getAsConstantArrayType(SubExpr->getType()))
+ Result.addArray(Info, E, CAT);
+ else
+ Result.Designator.setInvalid();
+ return true;
+
+ case CK_FunctionToPointerDecay:
+ return EvaluateLValue(SubExpr, Result, Info);
+ }
+
+ return ExprEvaluatorBaseTy::VisitCastExpr(E);
+}
+
+bool PointerExprEvaluator::VisitCallExpr(const CallExpr *E) {
+ if (IsStringLiteralCall(E))
+ return Success(E);
+
+ return ExprEvaluatorBaseTy::VisitCallExpr(E);
+}
+
+//===----------------------------------------------------------------------===//
+// Member Pointer Evaluation
+//===----------------------------------------------------------------------===//
+
+namespace {
+class MemberPointerExprEvaluator
+ : public ExprEvaluatorBase<MemberPointerExprEvaluator, bool> {
+ MemberPtr &Result;
+
+ bool Success(const ValueDecl *D) {
+ Result = MemberPtr(D);
+ return true;
+ }
+public:
+
+ MemberPointerExprEvaluator(EvalInfo &Info, MemberPtr &Result)
+ : ExprEvaluatorBaseTy(Info), Result(Result) {}
+
+ bool Success(const APValue &V, const Expr *E) {
+ Result.setFrom(V);
+ return true;
+ }
+ bool ZeroInitialization(const Expr *E) {
+ return Success((const ValueDecl*)0);
+ }
+
+ bool VisitCastExpr(const CastExpr *E);
+ bool VisitUnaryAddrOf(const UnaryOperator *E);
+};
+} // end anonymous namespace
+
+static bool EvaluateMemberPointer(const Expr *E, MemberPtr &Result,
+ EvalInfo &Info) {
+ assert(E->isRValue() && E->getType()->isMemberPointerType());
+ return MemberPointerExprEvaluator(Info, Result).Visit(E);
+}
+
+bool MemberPointerExprEvaluator::VisitCastExpr(const CastExpr *E) {
+ switch (E->getCastKind()) {
+ default:
+ return ExprEvaluatorBaseTy::VisitCastExpr(E);
+
+ case CK_NullToMemberPointer:
+ VisitIgnoredValue(E->getSubExpr());
+ return ZeroInitialization(E);
+
+ case CK_BaseToDerivedMemberPointer: {
+ if (!Visit(E->getSubExpr()))
+ return false;
+ if (E->path_empty())
+ return true;
+ // Base-to-derived member pointer casts store the path in derived-to-base
+ // order, so iterate backwards. The CXXBaseSpecifier also provides us with
+ // the wrong end of the derived->base arc, so stagger the path by one class.
+ typedef std::reverse_iterator<CastExpr::path_const_iterator> ReverseIter;
+ for (ReverseIter PathI(E->path_end() - 1), PathE(E->path_begin());
+ PathI != PathE; ++PathI) {
+ assert(!(*PathI)->isVirtual() && "memptr cast through vbase");
+ const CXXRecordDecl *Derived = (*PathI)->getType()->getAsCXXRecordDecl();
+ if (!Result.castToDerived(Derived))
+ return Error(E);
+ }
+ const Type *FinalTy = E->getType()->castAs<MemberPointerType>()->getClass();
+ if (!Result.castToDerived(FinalTy->getAsCXXRecordDecl()))
+ return Error(E);
+ return true;
+ }
+
+ case CK_DerivedToBaseMemberPointer:
+ if (!Visit(E->getSubExpr()))
+ return false;
+ for (CastExpr::path_const_iterator PathI = E->path_begin(),
+ PathE = E->path_end(); PathI != PathE; ++PathI) {
+ assert(!(*PathI)->isVirtual() && "memptr cast through vbase");
+ const CXXRecordDecl *Base = (*PathI)->getType()->getAsCXXRecordDecl();
+ if (!Result.castToBase(Base))
+ return Error(E);
+ }
+ return true;
+ }
+}
+
+bool MemberPointerExprEvaluator::VisitUnaryAddrOf(const UnaryOperator *E) {
+ // C++11 [expr.unary.op]p3 has very strict rules on how the address of a
+ // member can be formed.
+ return Success(cast<DeclRefExpr>(E->getSubExpr())->getDecl());
+}
+
+//===----------------------------------------------------------------------===//
+// Record Evaluation
+//===----------------------------------------------------------------------===//
+
+namespace {
+ class RecordExprEvaluator
+ : public ExprEvaluatorBase<RecordExprEvaluator, bool> {
+ const LValue &This;
+ APValue &Result;
+ public:
+
+ RecordExprEvaluator(EvalInfo &info, const LValue &This, APValue &Result)
+ : ExprEvaluatorBaseTy(info), This(This), Result(Result) {}
+
+ bool Success(const APValue &V, const Expr *E) {
+ Result = V;
+ return true;
+ }
+ bool ZeroInitialization(const Expr *E);
+
+ bool VisitCastExpr(const CastExpr *E);
+ bool VisitInitListExpr(const InitListExpr *E);
+ bool VisitCXXConstructExpr(const CXXConstructExpr *E);
+ };
+}
+
+/// Perform zero-initialization on an object of non-union class type.
+/// C++11 [dcl.init]p5:
+/// To zero-initialize an object or reference of type T means:
+/// [...]
+/// -- if T is a (possibly cv-qualified) non-union class type,
+/// each non-static data member and each base-class subobject is
+/// zero-initialized
+static bool HandleClassZeroInitialization(EvalInfo &Info, const Expr *E,
+ const RecordDecl *RD,
+ const LValue &This, APValue &Result) {
+ assert(!RD->isUnion() && "Expected non-union class type");
+ const CXXRecordDecl *CD = dyn_cast<CXXRecordDecl>(RD);
+ Result = APValue(APValue::UninitStruct(), CD ? CD->getNumBases() : 0,
+ std::distance(RD->field_begin(), RD->field_end()));
+
+ const ASTRecordLayout &Layout = Info.Ctx.getASTRecordLayout(RD);
+
+ if (CD) {
+ unsigned Index = 0;
+ for (CXXRecordDecl::base_class_const_iterator I = CD->bases_begin(),
+ End = CD->bases_end(); I != End; ++I, ++Index) {
+ const CXXRecordDecl *Base = I->getType()->getAsCXXRecordDecl();
+ LValue Subobject = This;
+ HandleLValueDirectBase(Info, E, Subobject, CD, Base, &Layout);
+ if (!HandleClassZeroInitialization(Info, E, Base, Subobject,
+ Result.getStructBase(Index)))
+ return false;
+ }
+ }
+
+ for (RecordDecl::field_iterator I = RD->field_begin(), End = RD->field_end();
+ I != End; ++I) {
+ // -- if T is a reference type, no initialization is performed.
+ if ((*I)->getType()->isReferenceType())
+ continue;
+
+ LValue Subobject = This;
+ HandleLValueMember(Info, E, Subobject, *I, &Layout);
+
+ ImplicitValueInitExpr VIE((*I)->getType());
+ if (!EvaluateInPlace(
+ Result.getStructField((*I)->getFieldIndex()), Info, Subobject, &VIE))
+ return false;
+ }
+
+ return true;
+}
+
+bool RecordExprEvaluator::ZeroInitialization(const Expr *E) {
+ const RecordDecl *RD = E->getType()->castAs<RecordType>()->getDecl();
+ if (RD->isUnion()) {
+ // C++11 [dcl.init]p5: If T is a (possibly cv-qualified) union type, the
+ // object's first non-static named data member is zero-initialized
+ RecordDecl::field_iterator I = RD->field_begin();
+ if (I == RD->field_end()) {
+ Result = APValue((const FieldDecl*)0);
+ return true;
+ }
+
+ LValue Subobject = This;
+ HandleLValueMember(Info, E, Subobject, *I);
+ Result = APValue(*I);
+ ImplicitValueInitExpr VIE((*I)->getType());
+ return EvaluateInPlace(Result.getUnionValue(), Info, Subobject, &VIE);
+ }
+
+ if (isa<CXXRecordDecl>(RD) && cast<CXXRecordDecl>(RD)->getNumVBases()) {
+ Info.Diag(E, diag::note_constexpr_virtual_base) << RD;
+ return false;
+ }
+
+ return HandleClassZeroInitialization(Info, E, RD, This, Result);
+}
+
+bool RecordExprEvaluator::VisitCastExpr(const CastExpr *E) {
+ switch (E->getCastKind()) {
+ default:
+ return ExprEvaluatorBaseTy::VisitCastExpr(E);
+
+ case CK_ConstructorConversion:
+ return Visit(E->getSubExpr());
+
+ case CK_DerivedToBase:
+ case CK_UncheckedDerivedToBase: {
+ APValue DerivedObject;
+ if (!Evaluate(DerivedObject, Info, E->getSubExpr()))
+ return false;
+ if (!DerivedObject.isStruct())
+ return Error(E->getSubExpr());
+
+ // Derived-to-base rvalue conversion: just slice off the derived part.
+ APValue *Value = &DerivedObject;
+ const CXXRecordDecl *RD = E->getSubExpr()->getType()->getAsCXXRecordDecl();
+ for (CastExpr::path_const_iterator PathI = E->path_begin(),
+ PathE = E->path_end(); PathI != PathE; ++PathI) {
+ assert(!(*PathI)->isVirtual() && "record rvalue with virtual base");
+ const CXXRecordDecl *Base = (*PathI)->getType()->getAsCXXRecordDecl();
+ Value = &Value->getStructBase(getBaseIndex(RD, Base));
+ RD = Base;
+ }
+ Result = *Value;
+ return true;
+ }
+ }
+}
+
+bool RecordExprEvaluator::VisitInitListExpr(const InitListExpr *E) {
+ // Cannot constant-evaluate std::initializer_list inits.
+ if (E->initializesStdInitializerList())
+ return false;
+
+ const RecordDecl *RD = E->getType()->castAs<RecordType>()->getDecl();
+ const ASTRecordLayout &Layout = Info.Ctx.getASTRecordLayout(RD);
+
+ if (RD->isUnion()) {
+ const FieldDecl *Field = E->getInitializedFieldInUnion();
+ Result = APValue(Field);
+ if (!Field)
+ return true;
+
+ // If the initializer list for a union does not contain any elements, the
+ // first element of the union is value-initialized.
+ ImplicitValueInitExpr VIE(Field->getType());
+ const Expr *InitExpr = E->getNumInits() ? E->getInit(0) : &VIE;
+
+ LValue Subobject = This;
+ HandleLValueMember(Info, InitExpr, Subobject, Field, &Layout);
+ return EvaluateInPlace(Result.getUnionValue(), Info, Subobject, InitExpr);
+ }
+
+ assert((!isa<CXXRecordDecl>(RD) || !cast<CXXRecordDecl>(RD)->getNumBases()) &&
+ "initializer list for class with base classes");
+ Result = APValue(APValue::UninitStruct(), 0,
+ std::distance(RD->field_begin(), RD->field_end()));
+ unsigned ElementNo = 0;
+ bool Success = true;
+ for (RecordDecl::field_iterator Field = RD->field_begin(),
+ FieldEnd = RD->field_end(); Field != FieldEnd; ++Field) {
+ // Anonymous bit-fields are not considered members of the class for
+ // purposes of aggregate initialization.
+ if (Field->isUnnamedBitfield())
+ continue;
+
+ LValue Subobject = This;
+
+ bool HaveInit = ElementNo < E->getNumInits();
+
+ // FIXME: Diagnostics here should point to the end of the initializer
+ // list, not the start.
+ HandleLValueMember(Info, HaveInit ? E->getInit(ElementNo) : E, Subobject,
+ *Field, &Layout);
+
+ // Perform an implicit value-initialization for members beyond the end of
+ // the initializer list.
+ ImplicitValueInitExpr VIE(HaveInit ? Info.Ctx.IntTy : Field->getType());
+
+ if (!EvaluateInPlace(
+ Result.getStructField((*Field)->getFieldIndex()),
+ Info, Subobject, HaveInit ? E->getInit(ElementNo++) : &VIE)) {
+ if (!Info.keepEvaluatingAfterFailure())
+ return false;
+ Success = false;
+ }
+ }
+
+ return Success;
+}
+
+bool RecordExprEvaluator::VisitCXXConstructExpr(const CXXConstructExpr *E) {
+ const CXXConstructorDecl *FD = E->getConstructor();
+ bool ZeroInit = E->requiresZeroInitialization();
+ if (CheckTrivialDefaultConstructor(Info, E->getExprLoc(), FD, ZeroInit)) {
+ // If we've already performed zero-initialization, we're already done.
+ if (!Result.isUninit())
+ return true;
+
+ if (ZeroInit)
+ return ZeroInitialization(E);
+
+ const CXXRecordDecl *RD = FD->getParent();
+ if (RD->isUnion())
+ Result = APValue((FieldDecl*)0);
+ else
+ Result = APValue(APValue::UninitStruct(), RD->getNumBases(),
+ std::distance(RD->field_begin(), RD->field_end()));
+ return true;
+ }
+
+ const FunctionDecl *Definition = 0;
+ FD->getBody(Definition);
+
+ if (!CheckConstexprFunction(Info, E->getExprLoc(), FD, Definition))
+ return false;
+
+ // Avoid materializing a temporary for an elidable copy/move constructor.
+ if (E->isElidable() && !ZeroInit)
+ if (const MaterializeTemporaryExpr *ME
+ = dyn_cast<MaterializeTemporaryExpr>(E->getArg(0)))
+ return Visit(ME->GetTemporaryExpr());
+
+ if (ZeroInit && !ZeroInitialization(E))
+ return false;
+
+ llvm::ArrayRef<const Expr*> Args(E->getArgs(), E->getNumArgs());
+ return HandleConstructorCall(E->getExprLoc(), This, Args,
+ cast<CXXConstructorDecl>(Definition), Info,
+ Result);
+}
+
+static bool EvaluateRecord(const Expr *E, const LValue &This,
+ APValue &Result, EvalInfo &Info) {
+ assert(E->isRValue() && E->getType()->isRecordType() &&
+ "can't evaluate expression as a record rvalue");
+ return RecordExprEvaluator(Info, This, Result).Visit(E);
+}
+
+//===----------------------------------------------------------------------===//
+// Temporary Evaluation
+//
+// Temporaries are represented in the AST as rvalues, but generally behave like
+// lvalues. The full-object of which the temporary is a subobject is implicitly
+// materialized so that a reference can bind to it.
+//===----------------------------------------------------------------------===//
+namespace {
+class TemporaryExprEvaluator
+ : public LValueExprEvaluatorBase<TemporaryExprEvaluator> {
+public:
+ TemporaryExprEvaluator(EvalInfo &Info, LValue &Result) :
+ LValueExprEvaluatorBaseTy(Info, Result) {}
+
+ /// Visit an expression which constructs the value of this temporary.
+ bool VisitConstructExpr(const Expr *E) {
+ Result.set(E, Info.CurrentCall->Index);
+ return EvaluateInPlace(Info.CurrentCall->Temporaries[E], Info, Result, E);
+ }
+
+ bool VisitCastExpr(const CastExpr *E) {
+ switch (E->getCastKind()) {
+ default:
+ return LValueExprEvaluatorBaseTy::VisitCastExpr(E);
+
+ case CK_ConstructorConversion:
+ return VisitConstructExpr(E->getSubExpr());
+ }
+ }
+ bool VisitInitListExpr(const InitListExpr *E) {
+ return VisitConstructExpr(E);
+ }
+ bool VisitCXXConstructExpr(const CXXConstructExpr *E) {
+ return VisitConstructExpr(E);
+ }
+ bool VisitCallExpr(const CallExpr *E) {
+ return VisitConstructExpr(E);
+ }
+};
+} // end anonymous namespace
+
+/// Evaluate an expression of record type as a temporary.
+static bool EvaluateTemporary(const Expr *E, LValue &Result, EvalInfo &Info) {
+ assert(E->isRValue() && E->getType()->isRecordType());
+ return TemporaryExprEvaluator(Info, Result).Visit(E);
+}
+
+//===----------------------------------------------------------------------===//
+// Vector Evaluation
+//===----------------------------------------------------------------------===//
+
+namespace {
+ class VectorExprEvaluator
+ : public ExprEvaluatorBase<VectorExprEvaluator, bool> {
+ APValue &Result;
+ public:
+
+ VectorExprEvaluator(EvalInfo &info, APValue &Result)
+ : ExprEvaluatorBaseTy(info), Result(Result) {}
+
+ bool Success(const ArrayRef<APValue> &V, const Expr *E) {
+ assert(V.size() == E->getType()->castAs<VectorType>()->getNumElements());
+ // FIXME: remove this APValue copy.
+ Result = APValue(V.data(), V.size());
+ return true;
+ }
+ bool Success(const APValue &V, const Expr *E) {
+ assert(V.isVector());
+ Result = V;
+ return true;
+ }
+ bool ZeroInitialization(const Expr *E);
+
+ bool VisitUnaryReal(const UnaryOperator *E)
+ { return Visit(E->getSubExpr()); }
+ bool VisitCastExpr(const CastExpr* E);
+ bool VisitInitListExpr(const InitListExpr *E);
+ bool VisitUnaryImag(const UnaryOperator *E);
+ // FIXME: Missing: unary -, unary ~, binary add/sub/mul/div,
+ // binary comparisons, binary and/or/xor,
+ // shufflevector, ExtVectorElementExpr
+ };
+} // end anonymous namespace
+
+static bool EvaluateVector(const Expr* E, APValue& Result, EvalInfo &Info) {
+ assert(E->isRValue() && E->getType()->isVectorType() &&"not a vector rvalue");
+ return VectorExprEvaluator(Info, Result).Visit(E);
+}
+
+bool VectorExprEvaluator::VisitCastExpr(const CastExpr* E) {
+ const VectorType *VTy = E->getType()->castAs<VectorType>();
+ unsigned NElts = VTy->getNumElements();
+
+ const Expr *SE = E->getSubExpr();
+ QualType SETy = SE->getType();
+
+ switch (E->getCastKind()) {
+ case CK_VectorSplat: {
+ APValue Val = APValue();
+ if (SETy->isIntegerType()) {
+ APSInt IntResult;
+ if (!EvaluateInteger(SE, IntResult, Info))
+ return false;
+ Val = APValue(IntResult);
+ } else if (SETy->isRealFloatingType()) {
+ APFloat F(0.0);
+ if (!EvaluateFloat(SE, F, Info))
+ return false;
+ Val = APValue(F);
+ } else {
+ return Error(E);
+ }
+
+ // Splat and create vector APValue.
+ SmallVector<APValue, 4> Elts(NElts, Val);
+ return Success(Elts, E);
+ }
+ case CK_BitCast: {
+ // Evaluate the operand into an APInt we can extract from.
+ llvm::APInt SValInt;
+ if (!EvalAndBitcastToAPInt(Info, SE, SValInt))
+ return false;
+ // Extract the elements
+ QualType EltTy = VTy->getElementType();
+ unsigned EltSize = Info.Ctx.getTypeSize(EltTy);
+ bool BigEndian = Info.Ctx.getTargetInfo().isBigEndian();
+ SmallVector<APValue, 4> Elts;
+ if (EltTy->isRealFloatingType()) {
+ const llvm::fltSemantics &Sem = Info.Ctx.getFloatTypeSemantics(EltTy);
+ bool isIEESem = &Sem != &APFloat::PPCDoubleDouble;
+ unsigned FloatEltSize = EltSize;
+ if (&Sem == &APFloat::x87DoubleExtended)
+ FloatEltSize = 80;
+ for (unsigned i = 0; i < NElts; i++) {
+ llvm::APInt Elt;
+ if (BigEndian)
+ Elt = SValInt.rotl(i*EltSize+FloatEltSize).trunc(FloatEltSize);
+ else
+ Elt = SValInt.rotr(i*EltSize).trunc(FloatEltSize);
+ Elts.push_back(APValue(APFloat(Elt, isIEESem)));
+ }
+ } else if (EltTy->isIntegerType()) {
+ for (unsigned i = 0; i < NElts; i++) {
+ llvm::APInt Elt;
+ if (BigEndian)
+ Elt = SValInt.rotl(i*EltSize+EltSize).zextOrTrunc(EltSize);
+ else
+ Elt = SValInt.rotr(i*EltSize).zextOrTrunc(EltSize);
+ Elts.push_back(APValue(APSInt(Elt, EltTy->isSignedIntegerType())));
+ }
+ } else {
+ return Error(E);
+ }
+ return Success(Elts, E);
+ }
+ default:
+ return ExprEvaluatorBaseTy::VisitCastExpr(E);
+ }
+}
+
+bool
+VectorExprEvaluator::VisitInitListExpr(const InitListExpr *E) {
+ const VectorType *VT = E->getType()->castAs<VectorType>();
+ unsigned NumInits = E->getNumInits();
+ unsigned NumElements = VT->getNumElements();
+
+ QualType EltTy = VT->getElementType();
+ SmallVector<APValue, 4> Elements;
+
+ // The number of initializers can be less than the number of
+ // vector elements. For OpenCL, this can be due to nested vector
+ // initialization. For GCC compatibility, missing trailing elements
+ // should be initialized with zeroes.
+ unsigned CountInits = 0, CountElts = 0;
+ while (CountElts < NumElements) {
+ // Handle nested vector initialization.
+ if (CountInits < NumInits
+ && E->getInit(CountInits)->getType()->isExtVectorType()) {
+ APValue v;
+ if (!EvaluateVector(E->getInit(CountInits), v, Info))
+ return Error(E);
+ unsigned vlen = v.getVectorLength();
+ for (unsigned j = 0; j < vlen; j++)
+ Elements.push_back(v.getVectorElt(j));
+ CountElts += vlen;
+ } else if (EltTy->isIntegerType()) {
+ llvm::APSInt sInt(32);
+ if (CountInits < NumInits) {
+ if (!EvaluateInteger(E->getInit(CountInits), sInt, Info))
+ return false;
+ } else // trailing integer zero.
+ sInt = Info.Ctx.MakeIntValue(0, EltTy);
+ Elements.push_back(APValue(sInt));
+ CountElts++;
+ } else {
+ llvm::APFloat f(0.0);
+ if (CountInits < NumInits) {
+ if (!EvaluateFloat(E->getInit(CountInits), f, Info))
+ return false;
+ } else // trailing float zero.
+ f = APFloat::getZero(Info.Ctx.getFloatTypeSemantics(EltTy));
+ Elements.push_back(APValue(f));
+ CountElts++;
+ }
+ CountInits++;
+ }
+ return Success(Elements, E);
+}
+
+bool
+VectorExprEvaluator::ZeroInitialization(const Expr *E) {
+ const VectorType *VT = E->getType()->getAs<VectorType>();
+ QualType EltTy = VT->getElementType();
+ APValue ZeroElement;
+ if (EltTy->isIntegerType())
+ ZeroElement = APValue(Info.Ctx.MakeIntValue(0, EltTy));
+ else
+ ZeroElement =
+ APValue(APFloat::getZero(Info.Ctx.getFloatTypeSemantics(EltTy)));
+
+ SmallVector<APValue, 4> Elements(VT->getNumElements(), ZeroElement);
+ return Success(Elements, E);
+}
+
+bool VectorExprEvaluator::VisitUnaryImag(const UnaryOperator *E) {
+ VisitIgnoredValue(E->getSubExpr());
+ return ZeroInitialization(E);
+}
+
+//===----------------------------------------------------------------------===//
+// Array Evaluation
+//===----------------------------------------------------------------------===//
+
+namespace {
+ class ArrayExprEvaluator
+ : public ExprEvaluatorBase<ArrayExprEvaluator, bool> {
+ const LValue &This;
+ APValue &Result;
+ public:
+
+ ArrayExprEvaluator(EvalInfo &Info, const LValue &This, APValue &Result)
+ : ExprEvaluatorBaseTy(Info), This(This), Result(Result) {}
+
+ bool Success(const APValue &V, const Expr *E) {
+ assert((V.isArray() || V.isLValue()) &&
+ "expected array or string literal");
+ Result = V;
+ return true;
+ }
+
+ bool ZeroInitialization(const Expr *E) {
+ const ConstantArrayType *CAT =
+ Info.Ctx.getAsConstantArrayType(E->getType());
+ if (!CAT)
+ return Error(E);
+
+ Result = APValue(APValue::UninitArray(), 0,
+ CAT->getSize().getZExtValue());
+ if (!Result.hasArrayFiller()) return true;
+
+ // Zero-initialize all elements.
+ LValue Subobject = This;
+ Subobject.addArray(Info, E, CAT);
+ ImplicitValueInitExpr VIE(CAT->getElementType());
+ return EvaluateInPlace(Result.getArrayFiller(), Info, Subobject, &VIE);
+ }
+
+ bool VisitInitListExpr(const InitListExpr *E);
+ bool VisitCXXConstructExpr(const CXXConstructExpr *E);
+ };
+} // end anonymous namespace
+
+static bool EvaluateArray(const Expr *E, const LValue &This,
+ APValue &Result, EvalInfo &Info) {
+ assert(E->isRValue() && E->getType()->isArrayType() && "not an array rvalue");
+ return ArrayExprEvaluator(Info, This, Result).Visit(E);
+}
+
+bool ArrayExprEvaluator::VisitInitListExpr(const InitListExpr *E) {
+ const ConstantArrayType *CAT = Info.Ctx.getAsConstantArrayType(E->getType());
+ if (!CAT)
+ return Error(E);
+
+ // C++11 [dcl.init.string]p1: A char array [...] can be initialized by [...]
+ // an appropriately-typed string literal enclosed in braces.
+ if (E->isStringLiteralInit()) {
+ LValue LV;
+ if (!EvaluateLValue(E->getInit(0), LV, Info))
+ return false;
+ APValue Val;
+ LV.moveInto(Val);
+ return Success(Val, E);
+ }
+
+ bool Success = true;
+
+ Result = APValue(APValue::UninitArray(), E->getNumInits(),
+ CAT->getSize().getZExtValue());
+ LValue Subobject = This;
+ Subobject.addArray(Info, E, CAT);
+ unsigned Index = 0;
+ for (InitListExpr::const_iterator I = E->begin(), End = E->end();
+ I != End; ++I, ++Index) {
+ if (!EvaluateInPlace(Result.getArrayInitializedElt(Index),
+ Info, Subobject, cast<Expr>(*I)) ||
+ !HandleLValueArrayAdjustment(Info, cast<Expr>(*I), Subobject,
+ CAT->getElementType(), 1)) {
+ if (!Info.keepEvaluatingAfterFailure())
+ return false;
+ Success = false;
+ }
+ }
+
+ if (!Result.hasArrayFiller()) return Success;
+ assert(E->hasArrayFiller() && "no array filler for incomplete init list");
+ // FIXME: The Subobject here isn't necessarily right. This rarely matters,
+ // but sometimes does:
+ // struct S { constexpr S() : p(&p) {} void *p; };
+ // S s[10] = {};
+ return EvaluateInPlace(Result.getArrayFiller(), Info,
+ Subobject, E->getArrayFiller()) && Success;
+}
+
+bool ArrayExprEvaluator::VisitCXXConstructExpr(const CXXConstructExpr *E) {
+ const ConstantArrayType *CAT = Info.Ctx.getAsConstantArrayType(E->getType());
+ if (!CAT)
+ return Error(E);
+
+ bool HadZeroInit = !Result.isUninit();
+ if (!HadZeroInit)
+ Result = APValue(APValue::UninitArray(), 0, CAT->getSize().getZExtValue());
+ if (!Result.hasArrayFiller())
+ return true;
+
+ const CXXConstructorDecl *FD = E->getConstructor();
+
+ bool ZeroInit = E->requiresZeroInitialization();
+ if (CheckTrivialDefaultConstructor(Info, E->getExprLoc(), FD, ZeroInit)) {
+ if (HadZeroInit)
+ return true;
+
+ if (ZeroInit) {
+ LValue Subobject = This;
+ Subobject.addArray(Info, E, CAT);
+ ImplicitValueInitExpr VIE(CAT->getElementType());
+ return EvaluateInPlace(Result.getArrayFiller(), Info, Subobject, &VIE);
+ }
+
+ const CXXRecordDecl *RD = FD->getParent();
+ if (RD->isUnion())
+ Result.getArrayFiller() = APValue((FieldDecl*)0);
+ else
+ Result.getArrayFiller() =
+ APValue(APValue::UninitStruct(), RD->getNumBases(),
+ std::distance(RD->field_begin(), RD->field_end()));
+ return true;
+ }
+
+ const FunctionDecl *Definition = 0;
+ FD->getBody(Definition);
+
+ if (!CheckConstexprFunction(Info, E->getExprLoc(), FD, Definition))
+ return false;
+
+ // FIXME: The Subobject here isn't necessarily right. This rarely matters,
+ // but sometimes does:
+ // struct S { constexpr S() : p(&p) {} void *p; };
+ // S s[10];
+ LValue Subobject = This;
+ Subobject.addArray(Info, E, CAT);
+
+ if (ZeroInit && !HadZeroInit) {
+ ImplicitValueInitExpr VIE(CAT->getElementType());
+ if (!EvaluateInPlace(Result.getArrayFiller(), Info, Subobject, &VIE))
+ return false;
+ }
+
+ llvm::ArrayRef<const Expr*> Args(E->getArgs(), E->getNumArgs());
+ return HandleConstructorCall(E->getExprLoc(), Subobject, Args,
+ cast<CXXConstructorDecl>(Definition),
+ Info, Result.getArrayFiller());
+}
+
+//===----------------------------------------------------------------------===//
+// Integer Evaluation
+//
+// As a GNU extension, we support casting pointers to sufficiently-wide integer
+// types and back in constant folding. Integer values are thus represented
+// either as an integer-valued APValue, or as an lvalue-valued APValue.
+//===----------------------------------------------------------------------===//
+
+namespace {
+class IntExprEvaluator
+ : public ExprEvaluatorBase<IntExprEvaluator, bool> {
+ APValue &Result;
+public:
+ IntExprEvaluator(EvalInfo &info, APValue &result)
+ : ExprEvaluatorBaseTy(info), Result(result) {}
+
+ bool Success(const llvm::APSInt &SI, const Expr *E, APValue &Result) {
+ assert(E->getType()->isIntegralOrEnumerationType() &&
+ "Invalid evaluation result.");
+ assert(SI.isSigned() == E->getType()->isSignedIntegerOrEnumerationType() &&
+ "Invalid evaluation result.");
+ assert(SI.getBitWidth() == Info.Ctx.getIntWidth(E->getType()) &&
+ "Invalid evaluation result.");
+ Result = APValue(SI);
+ return true;
+ }
+ bool Success(const llvm::APSInt &SI, const Expr *E) {
+ return Success(SI, E, Result);
+ }
+
+ bool Success(const llvm::APInt &I, const Expr *E, APValue &Result) {
+ assert(E->getType()->isIntegralOrEnumerationType() &&
+ "Invalid evaluation result.");
+ assert(I.getBitWidth() == Info.Ctx.getIntWidth(E->getType()) &&
+ "Invalid evaluation result.");
+ Result = APValue(APSInt(I));
+ Result.getInt().setIsUnsigned(
+ E->getType()->isUnsignedIntegerOrEnumerationType());
+ return true;
+ }
+ bool Success(const llvm::APInt &I, const Expr *E) {
+ return Success(I, E, Result);
+ }
+
+ bool Success(uint64_t Value, const Expr *E, APValue &Result) {
+ assert(E->getType()->isIntegralOrEnumerationType() &&
+ "Invalid evaluation result.");
+ Result = APValue(Info.Ctx.MakeIntValue(Value, E->getType()));
+ return true;
+ }
+ bool Success(uint64_t Value, const Expr *E) {
+ return Success(Value, E, Result);
+ }
+
+ bool Success(CharUnits Size, const Expr *E) {
+ return Success(Size.getQuantity(), E);
+ }
+
+ bool Success(const APValue &V, const Expr *E) {
+ if (V.isLValue() || V.isAddrLabelDiff()) {
+ Result = V;
+ return true;
+ }
+ return Success(V.getInt(), E);
+ }
+
+ bool ZeroInitialization(const Expr *E) { return Success(0, E); }
+
+ //===--------------------------------------------------------------------===//
+ // Visitor Methods
+ //===--------------------------------------------------------------------===//
+
+ bool VisitIntegerLiteral(const IntegerLiteral *E) {
+ return Success(E->getValue(), E);
+ }
+ bool VisitCharacterLiteral(const CharacterLiteral *E) {
+ return Success(E->getValue(), E);
+ }
+
+ bool CheckReferencedDecl(const Expr *E, const Decl *D);
+ bool VisitDeclRefExpr(const DeclRefExpr *E) {
+ if (CheckReferencedDecl(E, E->getDecl()))
+ return true;
+
+ return ExprEvaluatorBaseTy::VisitDeclRefExpr(E);
+ }
+ bool VisitMemberExpr(const MemberExpr *E) {
+ if (CheckReferencedDecl(E, E->getMemberDecl())) {
+ VisitIgnoredValue(E->getBase());
+ return true;
+ }
+
+ return ExprEvaluatorBaseTy::VisitMemberExpr(E);
+ }
+
+ bool VisitCallExpr(const CallExpr *E);
+ bool VisitBinaryOperator(const BinaryOperator *E);
+ bool VisitOffsetOfExpr(const OffsetOfExpr *E);
+ bool VisitUnaryOperator(const UnaryOperator *E);
+
+ bool VisitCastExpr(const CastExpr* E);
+ bool VisitUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr *E);
+
+ bool VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *E) {
+ return Success(E->getValue(), E);
+ }
+
+ bool VisitObjCBoolLiteralExpr(const ObjCBoolLiteralExpr *E) {
+ return Success(E->getValue(), E);
+ }
+
+ // Note, GNU defines __null as an integer, not a pointer.
+ bool VisitGNUNullExpr(const GNUNullExpr *E) {
+ return ZeroInitialization(E);
+ }
+
+ bool VisitUnaryTypeTraitExpr(const UnaryTypeTraitExpr *E) {
+ return Success(E->getValue(), E);
+ }
+
+ bool VisitBinaryTypeTraitExpr(const BinaryTypeTraitExpr *E) {
+ return Success(E->getValue(), E);
+ }
+
+ bool VisitTypeTraitExpr(const TypeTraitExpr *E) {
+ return Success(E->getValue(), E);
+ }
+
+ bool VisitArrayTypeTraitExpr(const ArrayTypeTraitExpr *E) {
+ return Success(E->getValue(), E);
+ }
+
+ bool VisitExpressionTraitExpr(const ExpressionTraitExpr *E) {
+ return Success(E->getValue(), E);
+ }
+
+ bool VisitUnaryReal(const UnaryOperator *E);
+ bool VisitUnaryImag(const UnaryOperator *E);
+
+ bool VisitCXXNoexceptExpr(const CXXNoexceptExpr *E);
+ bool VisitSizeOfPackExpr(const SizeOfPackExpr *E);
+
+private:
+ CharUnits GetAlignOfExpr(const Expr *E);
+ CharUnits GetAlignOfType(QualType T);
+ static QualType GetObjectType(APValue::LValueBase B);
+ bool TryEvaluateBuiltinObjectSize(const CallExpr *E);
+ // FIXME: Missing: array subscript of vector, member of vector
+};
+} // end anonymous namespace
+
+/// EvaluateIntegerOrLValue - Evaluate an rvalue integral-typed expression, and
+/// produce either the integer value or a pointer.
+///
+/// GCC has a heinous extension which folds casts between pointer types and
+/// pointer-sized integral types. We support this by allowing the evaluation of
+/// an integer rvalue to produce a pointer (represented as an lvalue) instead.
+/// Some simple arithmetic on such values is supported (they are treated much
+/// like char*).
+static bool EvaluateIntegerOrLValue(const Expr *E, APValue &Result,
+ EvalInfo &Info) {
+ assert(E->isRValue() && E->getType()->isIntegralOrEnumerationType());
+ return IntExprEvaluator(Info, Result).Visit(E);
+}
+
+static bool EvaluateInteger(const Expr *E, APSInt &Result, EvalInfo &Info) {
+ APValue Val;
+ if (!EvaluateIntegerOrLValue(E, Val, Info))
+ return false;
+ if (!Val.isInt()) {
+ // FIXME: It would be better to produce the diagnostic for casting
+ // a pointer to an integer.
+ Info.Diag(E, diag::note_invalid_subexpr_in_const_expr);
+ return false;
+ }
+ Result = Val.getInt();
+ return true;
+}
+
+/// Check whether the given declaration can be directly converted to an integral
+/// rvalue. If not, no diagnostic is produced; there are other things we can
+/// try.
+bool IntExprEvaluator::CheckReferencedDecl(const Expr* E, const Decl* D) {
+ // Enums are integer constant exprs.
+ if (const EnumConstantDecl *ECD = dyn_cast<EnumConstantDecl>(D)) {
+ // Check for signedness/width mismatches between E type and ECD value.
+ bool SameSign = (ECD->getInitVal().isSigned()
+ == E->getType()->isSignedIntegerOrEnumerationType());
+ bool SameWidth = (ECD->getInitVal().getBitWidth()
+ == Info.Ctx.getIntWidth(E->getType()));
+ if (SameSign && SameWidth)
+ return Success(ECD->getInitVal(), E);
+ else {
+ // Get rid of mismatch (otherwise Success assertions will fail)
+ // by computing a new value matching the type of E.
+ llvm::APSInt Val = ECD->getInitVal();
+ if (!SameSign)
+ Val.setIsSigned(!ECD->getInitVal().isSigned());
+ if (!SameWidth)
+ Val = Val.extOrTrunc(Info.Ctx.getIntWidth(E->getType()));
+ return Success(Val, E);
+ }
+ }
+ return false;
+}
+
+/// EvaluateBuiltinClassifyType - Evaluate __builtin_classify_type the same way
+/// as GCC.
+static int EvaluateBuiltinClassifyType(const CallExpr *E) {
+ // The following enum mimics the values returned by GCC.
+ // FIXME: Does GCC differ between lvalue and rvalue references here?
+ enum gcc_type_class {
+ no_type_class = -1,
+ void_type_class, integer_type_class, char_type_class,
+ enumeral_type_class, boolean_type_class,
+ pointer_type_class, reference_type_class, offset_type_class,
+ real_type_class, complex_type_class,
+ function_type_class, method_type_class,
+ record_type_class, union_type_class,
+ array_type_class, string_type_class,
+ lang_type_class
+ };
+
+ // If no argument was supplied, default to "no_type_class". This isn't
+ // ideal, however it is what gcc does.
+ if (E->getNumArgs() == 0)
+ return no_type_class;
+
+ QualType ArgTy = E->getArg(0)->getType();
+ if (ArgTy->isVoidType())
+ return void_type_class;
+ else if (ArgTy->isEnumeralType())
+ return enumeral_type_class;
+ else if (ArgTy->isBooleanType())
+ return boolean_type_class;
+ else if (ArgTy->isCharType())
+ return string_type_class; // gcc doesn't appear to use char_type_class
+ else if (ArgTy->isIntegerType())
+ return integer_type_class;
+ else if (ArgTy->isPointerType())
+ return pointer_type_class;
+ else if (ArgTy->isReferenceType())
+ return reference_type_class;
+ else if (ArgTy->isRealType())
+ return real_type_class;
+ else if (ArgTy->isComplexType())
+ return complex_type_class;
+ else if (ArgTy->isFunctionType())
+ return function_type_class;
+ else if (ArgTy->isStructureOrClassType())
+ return record_type_class;
+ else if (ArgTy->isUnionType())
+ return union_type_class;
+ else if (ArgTy->isArrayType())
+ return array_type_class;
+ else if (ArgTy->isUnionType())
+ return union_type_class;
+ else // FIXME: offset_type_class, method_type_class, & lang_type_class?
+ llvm_unreachable("CallExpr::isBuiltinClassifyType(): unimplemented type");
+}
+
+/// EvaluateBuiltinConstantPForLValue - Determine the result of
+/// __builtin_constant_p when applied to the given lvalue.
+///
+/// An lvalue is only "constant" if it is a pointer or reference to the first
+/// character of a string literal.
+template<typename LValue>
+static bool EvaluateBuiltinConstantPForLValue(const LValue &LV) {
+ const Expr *E = LV.getLValueBase().template dyn_cast<const Expr*>();
+ return E && isa<StringLiteral>(E) && LV.getLValueOffset().isZero();
+}
+
+/// EvaluateBuiltinConstantP - Evaluate __builtin_constant_p as similarly to
+/// GCC as we can manage.
+static bool EvaluateBuiltinConstantP(ASTContext &Ctx, const Expr *Arg) {
+ QualType ArgType = Arg->getType();
+
+ // __builtin_constant_p always has one operand. The rules which gcc follows
+ // are not precisely documented, but are as follows:
+ //
+ // - If the operand is of integral, floating, complex or enumeration type,
+ // and can be folded to a known value of that type, it returns 1.
+ // - If the operand and can be folded to a pointer to the first character
+ // of a string literal (or such a pointer cast to an integral type), it
+ // returns 1.
+ //
+ // Otherwise, it returns 0.
+ //
+ // FIXME: GCC also intends to return 1 for literals of aggregate types, but
+ // its support for this does not currently work.
+ if (ArgType->isIntegralOrEnumerationType()) {
+ Expr::EvalResult Result;
+ if (!Arg->EvaluateAsRValue(Result, Ctx) || Result.HasSideEffects)
+ return false;
+
+ APValue &V = Result.Val;
+ if (V.getKind() == APValue::Int)
+ return true;
+
+ return EvaluateBuiltinConstantPForLValue(V);
+ } else if (ArgType->isFloatingType() || ArgType->isAnyComplexType()) {
+ return Arg->isEvaluatable(Ctx);
+ } else if (ArgType->isPointerType() || Arg->isGLValue()) {
+ LValue LV;
+ Expr::EvalStatus Status;
+ EvalInfo Info(Ctx, Status);
+ if ((Arg->isGLValue() ? EvaluateLValue(Arg, LV, Info)
+ : EvaluatePointer(Arg, LV, Info)) &&
+ !Status.HasSideEffects)
+ return EvaluateBuiltinConstantPForLValue(LV);
+ }
+
+ // Anything else isn't considered to be sufficiently constant.
+ return false;
+}
+
+/// Retrieves the "underlying object type" of the given expression,
+/// as used by __builtin_object_size.
+QualType IntExprEvaluator::GetObjectType(APValue::LValueBase B) {
+ if (const ValueDecl *D = B.dyn_cast<const ValueDecl*>()) {
+ if (const VarDecl *VD = dyn_cast<VarDecl>(D))
+ return VD->getType();
+ } else if (const Expr *E = B.get<const Expr*>()) {
+ if (isa<CompoundLiteralExpr>(E))
+ return E->getType();
+ }
+
+ return QualType();
+}
+
+bool IntExprEvaluator::TryEvaluateBuiltinObjectSize(const CallExpr *E) {
+ // TODO: Perhaps we should let LLVM lower this?
+ LValue Base;
+ if (!EvaluatePointer(E->getArg(0), Base, Info))
+ return false;
+
+ // If we can prove the base is null, lower to zero now.
+ if (!Base.getLValueBase()) return Success(0, E);
+
+ QualType T = GetObjectType(Base.getLValueBase());
+ if (T.isNull() ||
+ T->isIncompleteType() ||
+ T->isFunctionType() ||
+ T->isVariablyModifiedType() ||
+ T->isDependentType())
+ return Error(E);
+
+ CharUnits Size = Info.Ctx.getTypeSizeInChars(T);
+ CharUnits Offset = Base.getLValueOffset();
+
+ if (!Offset.isNegative() && Offset <= Size)
+ Size -= Offset;
+ else
+ Size = CharUnits::Zero();
+ return Success(Size, E);
+}
+
+bool IntExprEvaluator::VisitCallExpr(const CallExpr *E) {
+ switch (unsigned BuiltinOp = E->isBuiltinCall()) {
+ default:
+ return ExprEvaluatorBaseTy::VisitCallExpr(E);
+
+ case Builtin::BI__builtin_object_size: {
+ if (TryEvaluateBuiltinObjectSize(E))
+ return true;
+
+ // If evaluating the argument has side-effects we can't determine
+ // the size of the object and lower it to unknown now.
+ if (E->getArg(0)->HasSideEffects(Info.Ctx)) {
+ if (E->getArg(1)->EvaluateKnownConstInt(Info.Ctx).getZExtValue() <= 1)
+ return Success(-1ULL, E);
+ return Success(0, E);
+ }
+
+ return Error(E);
+ }
+
+ case Builtin::BI__builtin_classify_type:
+ return Success(EvaluateBuiltinClassifyType(E), E);
+
+ case Builtin::BI__builtin_constant_p:
+ return Success(EvaluateBuiltinConstantP(Info.Ctx, E->getArg(0)), E);
+
+ case Builtin::BI__builtin_eh_return_data_regno: {
+ int Operand = E->getArg(0)->EvaluateKnownConstInt(Info.Ctx).getZExtValue();
+ Operand = Info.Ctx.getTargetInfo().getEHDataRegisterNumber(Operand);
+ return Success(Operand, E);
+ }
+
+ case Builtin::BI__builtin_expect:
+ return Visit(E->getArg(0));
+
+ case Builtin::BIstrlen:
+ // A call to strlen is not a constant expression.
+ if (Info.getLangOpts().CPlusPlus0x)
+ Info.CCEDiag(E, diag::note_constexpr_invalid_function)
+ << /*isConstexpr*/0 << /*isConstructor*/0 << "'strlen'";
+ else
+ Info.CCEDiag(E, diag::note_invalid_subexpr_in_const_expr);
+ // Fall through.
+ case Builtin::BI__builtin_strlen:
+ // As an extension, we support strlen() and __builtin_strlen() as constant
+ // expressions when the argument is a string literal.
+ if (const StringLiteral *S
+ = dyn_cast<StringLiteral>(E->getArg(0)->IgnoreParenImpCasts())) {
+ // The string literal may have embedded null characters. Find the first
+ // one and truncate there.
+ StringRef Str = S->getString();
+ StringRef::size_type Pos = Str.find(0);
+ if (Pos != StringRef::npos)
+ Str = Str.substr(0, Pos);
+
+ return Success(Str.size(), E);
+ }
+
+ return Error(E);
+
+ case Builtin::BI__atomic_always_lock_free:
+ case Builtin::BI__atomic_is_lock_free:
+ case Builtin::BI__c11_atomic_is_lock_free: {
+ APSInt SizeVal;
+ if (!EvaluateInteger(E->getArg(0), SizeVal, Info))
+ return false;
+
+ // For __atomic_is_lock_free(sizeof(_Atomic(T))), if the size is a power
+ // of two less than the maximum inline atomic width, we know it is
+ // lock-free. If the size isn't a power of two, or greater than the
+ // maximum alignment where we promote atomics, we know it is not lock-free
+ // (at least not in the sense of atomic_is_lock_free). Otherwise,
+ // the answer can only be determined at runtime; for example, 16-byte
+ // atomics have lock-free implementations on some, but not all,
+ // x86-64 processors.
+
+ // Check power-of-two.
+ CharUnits Size = CharUnits::fromQuantity(SizeVal.getZExtValue());
+ if (Size.isPowerOfTwo()) {
+ // Check against inlining width.
+ unsigned InlineWidthBits =
+ Info.Ctx.getTargetInfo().getMaxAtomicInlineWidth();
+ if (Size <= Info.Ctx.toCharUnitsFromBits(InlineWidthBits)) {
+ if (BuiltinOp == Builtin::BI__c11_atomic_is_lock_free ||
+ Size == CharUnits::One() ||
+ E->getArg(1)->isNullPointerConstant(Info.Ctx,
+ Expr::NPC_NeverValueDependent))
+ // OK, we will inline appropriately-aligned operations of this size,
+ // and _Atomic(T) is appropriately-aligned.
+ return Success(1, E);
+
+ QualType PointeeType = E->getArg(1)->IgnoreImpCasts()->getType()->
+ castAs<PointerType>()->getPointeeType();
+ if (!PointeeType->isIncompleteType() &&
+ Info.Ctx.getTypeAlignInChars(PointeeType) >= Size) {
+ // OK, we will inline operations on this object.
+ return Success(1, E);
+ }
+ }
+ }
+
+ return BuiltinOp == Builtin::BI__atomic_always_lock_free ?
+ Success(0, E) : Error(E);
+ }
+ }
+}
+
+static bool HasSameBase(const LValue &A, const LValue &B) {
+ if (!A.getLValueBase())
+ return !B.getLValueBase();
+ if (!B.getLValueBase())
+ return false;
+
+ if (A.getLValueBase().getOpaqueValue() !=
+ B.getLValueBase().getOpaqueValue()) {
+ const Decl *ADecl = GetLValueBaseDecl(A);
+ if (!ADecl)
+ return false;
+ const Decl *BDecl = GetLValueBaseDecl(B);
+ if (!BDecl || ADecl->getCanonicalDecl() != BDecl->getCanonicalDecl())
+ return false;
+ }
+
+ return IsGlobalLValue(A.getLValueBase()) ||
+ A.getLValueCallIndex() == B.getLValueCallIndex();
+}
+
+/// Perform the given integer operation, which is known to need at most BitWidth
+/// bits, and check for overflow in the original type (if that type was not an
+/// unsigned type).
+template<typename Operation>
+static APSInt CheckedIntArithmetic(EvalInfo &Info, const Expr *E,
+ const APSInt &LHS, const APSInt &RHS,
+ unsigned BitWidth, Operation Op) {
+ if (LHS.isUnsigned())
+ return Op(LHS, RHS);
+
+ APSInt Value(Op(LHS.extend(BitWidth), RHS.extend(BitWidth)), false);
+ APSInt Result = Value.trunc(LHS.getBitWidth());
+ if (Result.extend(BitWidth) != Value)
+ HandleOverflow(Info, E, Value, E->getType());
+ return Result;
+}
+
+namespace {
+
+/// \brief Data recursive integer evaluator of certain binary operators.
+///
+/// We use a data recursive algorithm for binary operators so that we are able
+/// to handle extreme cases of chained binary operators without causing stack
+/// overflow.
+class DataRecursiveIntBinOpEvaluator {
+ struct EvalResult {
+ APValue Val;
+ bool Failed;
+
+ EvalResult() : Failed(false) { }
+
+ void swap(EvalResult &RHS) {
+ Val.swap(RHS.Val);
+ Failed = RHS.Failed;
+ RHS.Failed = false;
+ }
+ };
+
+ struct Job {
+ const Expr *E;
+ EvalResult LHSResult; // meaningful only for binary operator expression.
+ enum { AnyExprKind, BinOpKind, BinOpVisitedLHSKind } Kind;
+
+ Job() : StoredInfo(0) { }
+ void startSpeculativeEval(EvalInfo &Info) {
+ OldEvalStatus = Info.EvalStatus;
+ Info.EvalStatus.Diag = 0;
+ StoredInfo = &Info;
+ }
+ ~Job() {
+ if (StoredInfo) {
+ StoredInfo->EvalStatus = OldEvalStatus;
+ }
+ }
+ private:
+ EvalInfo *StoredInfo; // non-null if status changed.
+ Expr::EvalStatus OldEvalStatus;
+ };
+
+ SmallVector<Job, 16> Queue;
+
+ IntExprEvaluator &IntEval;
+ EvalInfo &Info;
+ APValue &FinalResult;
+
+public:
+ DataRecursiveIntBinOpEvaluator(IntExprEvaluator &IntEval, APValue &Result)
+ : IntEval(IntEval), Info(IntEval.getEvalInfo()), FinalResult(Result) { }
+
+ /// \brief True if \param E is a binary operator that we are going to handle
+ /// data recursively.
+ /// We handle binary operators that are comma, logical, or that have operands
+ /// with integral or enumeration type.
+ static bool shouldEnqueue(const BinaryOperator *E) {
+ return E->getOpcode() == BO_Comma ||
+ E->isLogicalOp() ||
+ (E->getLHS()->getType()->isIntegralOrEnumerationType() &&
+ E->getRHS()->getType()->isIntegralOrEnumerationType());
+ }
+
+ bool Traverse(const BinaryOperator *E) {
+ enqueue(E);
+ EvalResult PrevResult;
+ while (!Queue.empty())
+ process(PrevResult);
+
+ if (PrevResult.Failed) return false;
+
+ FinalResult.swap(PrevResult.Val);
+ return true;
+ }
+
+private:
+ bool Success(uint64_t Value, const Expr *E, APValue &Result) {
+ return IntEval.Success(Value, E, Result);
+ }
+ bool Success(const APSInt &Value, const Expr *E, APValue &Result) {
+ return IntEval.Success(Value, E, Result);
+ }
+ bool Error(const Expr *E) {
+ return IntEval.Error(E);
+ }
+ bool Error(const Expr *E, diag::kind D) {
+ return IntEval.Error(E, D);
+ }
+
+ OptionalDiagnostic CCEDiag(const Expr *E, diag::kind D) {
+ return Info.CCEDiag(E, D);
+ }
+
+ // \brief Returns true if visiting the RHS is necessary, false otherwise.
+ bool VisitBinOpLHSOnly(EvalResult &LHSResult, const BinaryOperator *E,
+ bool &SuppressRHSDiags);
+
+ bool VisitBinOp(const EvalResult &LHSResult, const EvalResult &RHSResult,
+ const BinaryOperator *E, APValue &Result);
+
+ void EvaluateExpr(const Expr *E, EvalResult &Result) {
+ Result.Failed = !Evaluate(Result.Val, Info, E);
+ if (Result.Failed)
+ Result.Val = APValue();
+ }
+
+ void process(EvalResult &Result);
+
+ void enqueue(const Expr *E) {
+ E = E->IgnoreParens();
+ Queue.resize(Queue.size()+1);
+ Queue.back().E = E;
+ Queue.back().Kind = Job::AnyExprKind;
+ }
+};
+
+}
+
+bool DataRecursiveIntBinOpEvaluator::
+ VisitBinOpLHSOnly(EvalResult &LHSResult, const BinaryOperator *E,
+ bool &SuppressRHSDiags) {
+ if (E->getOpcode() == BO_Comma) {
+ // Ignore LHS but note if we could not evaluate it.
+ if (LHSResult.Failed)
+ Info.EvalStatus.HasSideEffects = true;
+ return true;
+ }
+
+ if (E->isLogicalOp()) {
+ bool lhsResult;
+ if (HandleConversionToBool(LHSResult.Val, lhsResult)) {
+ // We were able to evaluate the LHS, see if we can get away with not
+ // evaluating the RHS: 0 && X -> 0, 1 || X -> 1
+ if (lhsResult == (E->getOpcode() == BO_LOr)) {
+ Success(lhsResult, E, LHSResult.Val);
+ return false; // Ignore RHS
+ }
+ } else {
+ // Since we weren't able to evaluate the left hand side, it
+ // must have had side effects.
+ Info.EvalStatus.HasSideEffects = true;
+
+ // We can't evaluate the LHS; however, sometimes the result
+ // is determined by the RHS: X && 0 -> 0, X || 1 -> 1.
+ // Don't ignore RHS and suppress diagnostics from this arm.
+ SuppressRHSDiags = true;
+ }
+
+ return true;
+ }
+
+ assert(E->getLHS()->getType()->isIntegralOrEnumerationType() &&
+ E->getRHS()->getType()->isIntegralOrEnumerationType());
+
+ if (LHSResult.Failed && !Info.keepEvaluatingAfterFailure())
+ return false; // Ignore RHS;
+
+ return true;
+}
+
+bool DataRecursiveIntBinOpEvaluator::
+ VisitBinOp(const EvalResult &LHSResult, const EvalResult &RHSResult,
+ const BinaryOperator *E, APValue &Result) {
+ if (E->getOpcode() == BO_Comma) {
+ if (RHSResult.Failed)
+ return false;
+ Result = RHSResult.Val;
+ return true;
+ }
+
+ if (E->isLogicalOp()) {
+ bool lhsResult, rhsResult;
+ bool LHSIsOK = HandleConversionToBool(LHSResult.Val, lhsResult);
+ bool RHSIsOK = HandleConversionToBool(RHSResult.Val, rhsResult);
+
+ if (LHSIsOK) {
+ if (RHSIsOK) {
+ if (E->getOpcode() == BO_LOr)
+ return Success(lhsResult || rhsResult, E, Result);
+ else
+ return Success(lhsResult && rhsResult, E, Result);
+ }
+ } else {
+ if (RHSIsOK) {
+ // We can't evaluate the LHS; however, sometimes the result
+ // is determined by the RHS: X && 0 -> 0, X || 1 -> 1.
+ if (rhsResult == (E->getOpcode() == BO_LOr))
+ return Success(rhsResult, E, Result);
+ }
+ }
+
+ return false;
+ }
+
+ assert(E->getLHS()->getType()->isIntegralOrEnumerationType() &&
+ E->getRHS()->getType()->isIntegralOrEnumerationType());
+
+ if (LHSResult.Failed || RHSResult.Failed)
+ return false;
+
+ const APValue &LHSVal = LHSResult.Val;
+ const APValue &RHSVal = RHSResult.Val;
+
+ // Handle cases like (unsigned long)&a + 4.
+ if (E->isAdditiveOp() && LHSVal.isLValue() && RHSVal.isInt()) {
+ Result = LHSVal;
+ CharUnits AdditionalOffset = CharUnits::fromQuantity(
+ RHSVal.getInt().getZExtValue());
+ if (E->getOpcode() == BO_Add)
+ Result.getLValueOffset() += AdditionalOffset;
+ else
+ Result.getLValueOffset() -= AdditionalOffset;
+ return true;
+ }
+
+ // Handle cases like 4 + (unsigned long)&a
+ if (E->getOpcode() == BO_Add &&
+ RHSVal.isLValue() && LHSVal.isInt()) {
+ Result = RHSVal;
+ Result.getLValueOffset() += CharUnits::fromQuantity(
+ LHSVal.getInt().getZExtValue());
+ return true;
+ }
+
+ if (E->getOpcode() == BO_Sub && LHSVal.isLValue() && RHSVal.isLValue()) {
+ // Handle (intptr_t)&&A - (intptr_t)&&B.
+ if (!LHSVal.getLValueOffset().isZero() ||
+ !RHSVal.getLValueOffset().isZero())
+ return false;
+ const Expr *LHSExpr = LHSVal.getLValueBase().dyn_cast<const Expr*>();
+ const Expr *RHSExpr = RHSVal.getLValueBase().dyn_cast<const Expr*>();
+ if (!LHSExpr || !RHSExpr)
+ return false;
+ const AddrLabelExpr *LHSAddrExpr = dyn_cast<AddrLabelExpr>(LHSExpr);
+ const AddrLabelExpr *RHSAddrExpr = dyn_cast<AddrLabelExpr>(RHSExpr);
+ if (!LHSAddrExpr || !RHSAddrExpr)
+ return false;
+ // Make sure both labels come from the same function.
+ if (LHSAddrExpr->getLabel()->getDeclContext() !=
+ RHSAddrExpr->getLabel()->getDeclContext())
+ return false;
+ Result = APValue(LHSAddrExpr, RHSAddrExpr);
+ return true;
+ }
+
+ // All the following cases expect both operands to be an integer
+ if (!LHSVal.isInt() || !RHSVal.isInt())
+ return Error(E);
+
+ const APSInt &LHS = LHSVal.getInt();
+ APSInt RHS = RHSVal.getInt();
+
+ switch (E->getOpcode()) {
+ default:
+ return Error(E);
+ case BO_Mul:
+ return Success(CheckedIntArithmetic(Info, E, LHS, RHS,
+ LHS.getBitWidth() * 2,
+ std::multiplies<APSInt>()), E,
+ Result);
+ case BO_Add:
+ return Success(CheckedIntArithmetic(Info, E, LHS, RHS,
+ LHS.getBitWidth() + 1,
+ std::plus<APSInt>()), E, Result);
+ case BO_Sub:
+ return Success(CheckedIntArithmetic(Info, E, LHS, RHS,
+ LHS.getBitWidth() + 1,
+ std::minus<APSInt>()), E, Result);
+ case BO_And: return Success(LHS & RHS, E, Result);
+ case BO_Xor: return Success(LHS ^ RHS, E, Result);
+ case BO_Or: return Success(LHS | RHS, E, Result);
+ case BO_Div:
+ case BO_Rem:
+ if (RHS == 0)
+ return Error(E, diag::note_expr_divide_by_zero);
+ // Check for overflow case: INT_MIN / -1 or INT_MIN % -1. The latter is
+ // not actually undefined behavior in C++11 due to a language defect.
+ if (RHS.isNegative() && RHS.isAllOnesValue() &&
+ LHS.isSigned() && LHS.isMinSignedValue())
+ HandleOverflow(Info, E, -LHS.extend(LHS.getBitWidth() + 1), E->getType());
+ return Success(E->getOpcode() == BO_Rem ? LHS % RHS : LHS / RHS, E,
+ Result);
+ case BO_Shl: {
+ // During constant-folding, a negative shift is an opposite shift. Such
+ // a shift is not a constant expression.
+ if (RHS.isSigned() && RHS.isNegative()) {
+ CCEDiag(E, diag::note_constexpr_negative_shift) << RHS;
+ RHS = -RHS;
+ goto shift_right;
+ }
+
+ shift_left:
+ // C++11 [expr.shift]p1: Shift width must be less than the bit width of
+ // the shifted type.
+ unsigned SA = (unsigned) RHS.getLimitedValue(LHS.getBitWidth()-1);
+ if (SA != RHS) {
+ CCEDiag(E, diag::note_constexpr_large_shift)
+ << RHS << E->getType() << LHS.getBitWidth();
+ } else if (LHS.isSigned()) {
+ // C++11 [expr.shift]p2: A signed left shift must have a non-negative
+ // operand, and must not overflow the corresponding unsigned type.
+ if (LHS.isNegative())
+ CCEDiag(E, diag::note_constexpr_lshift_of_negative) << LHS;
+ else if (LHS.countLeadingZeros() < SA)
+ CCEDiag(E, diag::note_constexpr_lshift_discards);
+ }
+
+ return Success(LHS << SA, E, Result);
+ }
+ case BO_Shr: {
+ // During constant-folding, a negative shift is an opposite shift. Such a
+ // shift is not a constant expression.
+ if (RHS.isSigned() && RHS.isNegative()) {
+ CCEDiag(E, diag::note_constexpr_negative_shift) << RHS;
+ RHS = -RHS;
+ goto shift_left;
+ }
+
+ shift_right:
+ // C++11 [expr.shift]p1: Shift width must be less than the bit width of the
+ // shifted type.
+ unsigned SA = (unsigned) RHS.getLimitedValue(LHS.getBitWidth()-1);
+ if (SA != RHS)
+ CCEDiag(E, diag::note_constexpr_large_shift)
+ << RHS << E->getType() << LHS.getBitWidth();
+
+ return Success(LHS >> SA, E, Result);
+ }
+
+ case BO_LT: return Success(LHS < RHS, E, Result);
+ case BO_GT: return Success(LHS > RHS, E, Result);
+ case BO_LE: return Success(LHS <= RHS, E, Result);
+ case BO_GE: return Success(LHS >= RHS, E, Result);
+ case BO_EQ: return Success(LHS == RHS, E, Result);
+ case BO_NE: return Success(LHS != RHS, E, Result);
+ }
+}
+
+void DataRecursiveIntBinOpEvaluator::process(EvalResult &Result) {
+ Job &job = Queue.back();
+
+ switch (job.Kind) {
+ case Job::AnyExprKind: {
+ if (const BinaryOperator *Bop = dyn_cast<BinaryOperator>(job.E)) {
+ if (shouldEnqueue(Bop)) {
+ job.Kind = Job::BinOpKind;
+ enqueue(Bop->getLHS());
+ return;
+ }
+ }
+
+ EvaluateExpr(job.E, Result);
+ Queue.pop_back();
+ return;
+ }
+
+ case Job::BinOpKind: {
+ const BinaryOperator *Bop = cast<BinaryOperator>(job.E);
+ bool SuppressRHSDiags = false;
+ if (!VisitBinOpLHSOnly(Result, Bop, SuppressRHSDiags)) {
+ Queue.pop_back();
+ return;
+ }
+ if (SuppressRHSDiags)
+ job.startSpeculativeEval(Info);
+ job.LHSResult.swap(Result);
+ job.Kind = Job::BinOpVisitedLHSKind;
+ enqueue(Bop->getRHS());
+ return;
+ }
+
+ case Job::BinOpVisitedLHSKind: {
+ const BinaryOperator *Bop = cast<BinaryOperator>(job.E);
+ EvalResult RHS;
+ RHS.swap(Result);
+ Result.Failed = !VisitBinOp(job.LHSResult, RHS, Bop, Result.Val);
+ Queue.pop_back();
+ return;
+ }
+ }
+
+ llvm_unreachable("Invalid Job::Kind!");
+}
+
+bool IntExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) {
+ if (E->isAssignmentOp())
+ return Error(E);
+
+ if (DataRecursiveIntBinOpEvaluator::shouldEnqueue(E))
+ return DataRecursiveIntBinOpEvaluator(*this, Result).Traverse(E);
+
+ QualType LHSTy = E->getLHS()->getType();
+ QualType RHSTy = E->getRHS()->getType();
+
+ if (LHSTy->isAnyComplexType()) {
+ assert(RHSTy->isAnyComplexType() && "Invalid comparison");
+ ComplexValue LHS, RHS;
+
+ bool LHSOK = EvaluateComplex(E->getLHS(), LHS, Info);
+ if (!LHSOK && !Info.keepEvaluatingAfterFailure())
+ return false;
+
+ if (!EvaluateComplex(E->getRHS(), RHS, Info) || !LHSOK)
+ return false;
+
+ if (LHS.isComplexFloat()) {
+ APFloat::cmpResult CR_r =
+ LHS.getComplexFloatReal().compare(RHS.getComplexFloatReal());
+ APFloat::cmpResult CR_i =
+ LHS.getComplexFloatImag().compare(RHS.getComplexFloatImag());
+
+ if (E->getOpcode() == BO_EQ)
+ return Success((CR_r == APFloat::cmpEqual &&
+ CR_i == APFloat::cmpEqual), E);
+ else {
+ assert(E->getOpcode() == BO_NE &&
+ "Invalid complex comparison.");
+ return Success(((CR_r == APFloat::cmpGreaterThan ||
+ CR_r == APFloat::cmpLessThan ||
+ CR_r == APFloat::cmpUnordered) ||
+ (CR_i == APFloat::cmpGreaterThan ||
+ CR_i == APFloat::cmpLessThan ||
+ CR_i == APFloat::cmpUnordered)), E);
+ }
+ } else {
+ if (E->getOpcode() == BO_EQ)
+ return Success((LHS.getComplexIntReal() == RHS.getComplexIntReal() &&
+ LHS.getComplexIntImag() == RHS.getComplexIntImag()), E);
+ else {
+ assert(E->getOpcode() == BO_NE &&
+ "Invalid compex comparison.");
+ return Success((LHS.getComplexIntReal() != RHS.getComplexIntReal() ||
+ LHS.getComplexIntImag() != RHS.getComplexIntImag()), E);
+ }
+ }
+ }
+
+ if (LHSTy->isRealFloatingType() &&
+ RHSTy->isRealFloatingType()) {
+ APFloat RHS(0.0), LHS(0.0);
+
+ bool LHSOK = EvaluateFloat(E->getRHS(), RHS, Info);
+ if (!LHSOK && !Info.keepEvaluatingAfterFailure())
+ return false;
+
+ if (!EvaluateFloat(E->getLHS(), LHS, Info) || !LHSOK)
+ return false;
+
+ APFloat::cmpResult CR = LHS.compare(RHS);
+
+ switch (E->getOpcode()) {
+ default:
+ llvm_unreachable("Invalid binary operator!");
+ case BO_LT:
+ return Success(CR == APFloat::cmpLessThan, E);
+ case BO_GT:
+ return Success(CR == APFloat::cmpGreaterThan, E);
+ case BO_LE:
+ return Success(CR == APFloat::cmpLessThan || CR == APFloat::cmpEqual, E);
+ case BO_GE:
+ return Success(CR == APFloat::cmpGreaterThan || CR == APFloat::cmpEqual,
+ E);
+ case BO_EQ:
+ return Success(CR == APFloat::cmpEqual, E);
+ case BO_NE:
+ return Success(CR == APFloat::cmpGreaterThan
+ || CR == APFloat::cmpLessThan
+ || CR == APFloat::cmpUnordered, E);
+ }
+ }
+
+ if (LHSTy->isPointerType() && RHSTy->isPointerType()) {
+ if (E->getOpcode() == BO_Sub || E->isComparisonOp()) {
+ LValue LHSValue, RHSValue;
+
+ bool LHSOK = EvaluatePointer(E->getLHS(), LHSValue, Info);
+ if (!LHSOK && Info.keepEvaluatingAfterFailure())
+ return false;
+
+ if (!EvaluatePointer(E->getRHS(), RHSValue, Info) || !LHSOK)
+ return false;
+
+ // Reject differing bases from the normal codepath; we special-case
+ // comparisons to null.
+ if (!HasSameBase(LHSValue, RHSValue)) {
+ if (E->getOpcode() == BO_Sub) {
+ // Handle &&A - &&B.
+ if (!LHSValue.Offset.isZero() || !RHSValue.Offset.isZero())
+ return false;
+ const Expr *LHSExpr = LHSValue.Base.dyn_cast<const Expr*>();
+ const Expr *RHSExpr = LHSValue.Base.dyn_cast<const Expr*>();
+ if (!LHSExpr || !RHSExpr)
+ return false;
+ const AddrLabelExpr *LHSAddrExpr = dyn_cast<AddrLabelExpr>(LHSExpr);
+ const AddrLabelExpr *RHSAddrExpr = dyn_cast<AddrLabelExpr>(RHSExpr);
+ if (!LHSAddrExpr || !RHSAddrExpr)
+ return false;
+ // Make sure both labels come from the same function.
+ if (LHSAddrExpr->getLabel()->getDeclContext() !=
+ RHSAddrExpr->getLabel()->getDeclContext())
+ return false;
+ Result = APValue(LHSAddrExpr, RHSAddrExpr);
+ return true;
+ }
+ // Inequalities and subtractions between unrelated pointers have
+ // unspecified or undefined behavior.
+ if (!E->isEqualityOp())
+ return Error(E);
+ // A constant address may compare equal to the address of a symbol.
+ // The one exception is that address of an object cannot compare equal
+ // to a null pointer constant.
+ if ((!LHSValue.Base && !LHSValue.Offset.isZero()) ||
+ (!RHSValue.Base && !RHSValue.Offset.isZero()))
+ return Error(E);
+ // It's implementation-defined whether distinct literals will have
+ // distinct addresses. In clang, the result of such a comparison is
+ // unspecified, so it is not a constant expression. However, we do know
+ // that the address of a literal will be non-null.
+ if ((IsLiteralLValue(LHSValue) || IsLiteralLValue(RHSValue)) &&
+ LHSValue.Base && RHSValue.Base)
+ return Error(E);
+ // We can't tell whether weak symbols will end up pointing to the same
+ // object.
+ if (IsWeakLValue(LHSValue) || IsWeakLValue(RHSValue))
+ return Error(E);
+ // Pointers with different bases cannot represent the same object.
+ // (Note that clang defaults to -fmerge-all-constants, which can
+ // lead to inconsistent results for comparisons involving the address
+ // of a constant; this generally doesn't matter in practice.)
+ return Success(E->getOpcode() == BO_NE, E);
+ }
+
+ const CharUnits &LHSOffset = LHSValue.getLValueOffset();
+ const CharUnits &RHSOffset = RHSValue.getLValueOffset();
+
+ SubobjectDesignator &LHSDesignator = LHSValue.getLValueDesignator();
+ SubobjectDesignator &RHSDesignator = RHSValue.getLValueDesignator();
+
+ if (E->getOpcode() == BO_Sub) {
+ // C++11 [expr.add]p6:
+ // Unless both pointers point to elements of the same array object, or
+ // one past the last element of the array object, the behavior is
+ // undefined.
+ if (!LHSDesignator.Invalid && !RHSDesignator.Invalid &&
+ !AreElementsOfSameArray(getType(LHSValue.Base),
+ LHSDesignator, RHSDesignator))
+ CCEDiag(E, diag::note_constexpr_pointer_subtraction_not_same_array);
+
+ QualType Type = E->getLHS()->getType();
+ QualType ElementType = Type->getAs<PointerType>()->getPointeeType();
+
+ CharUnits ElementSize;
+ if (!HandleSizeof(Info, E->getExprLoc(), ElementType, ElementSize))
+ return false;
+
+ // FIXME: LLVM and GCC both compute LHSOffset - RHSOffset at runtime,
+ // and produce incorrect results when it overflows. Such behavior
+ // appears to be non-conforming, but is common, so perhaps we should
+ // assume the standard intended for such cases to be undefined behavior
+ // and check for them.
+
+ // Compute (LHSOffset - RHSOffset) / Size carefully, checking for
+ // overflow in the final conversion to ptrdiff_t.
+ APSInt LHS(
+ llvm::APInt(65, (int64_t)LHSOffset.getQuantity(), true), false);
+ APSInt RHS(
+ llvm::APInt(65, (int64_t)RHSOffset.getQuantity(), true), false);
+ APSInt ElemSize(
+ llvm::APInt(65, (int64_t)ElementSize.getQuantity(), true), false);
+ APSInt TrueResult = (LHS - RHS) / ElemSize;
+ APSInt Result = TrueResult.trunc(Info.Ctx.getIntWidth(E->getType()));
+
+ if (Result.extend(65) != TrueResult)
+ HandleOverflow(Info, E, TrueResult, E->getType());
+ return Success(Result, E);
+ }
+
+ // C++11 [expr.rel]p3:
+ // Pointers to void (after pointer conversions) can be compared, with a
+ // result defined as follows: If both pointers represent the same
+ // address or are both the null pointer value, the result is true if the
+ // operator is <= or >= and false otherwise; otherwise the result is
+ // unspecified.
+ // We interpret this as applying to pointers to *cv* void.
+ if (LHSTy->isVoidPointerType() && LHSOffset != RHSOffset &&
+ E->isRelationalOp())
+ CCEDiag(E, diag::note_constexpr_void_comparison);
+
+ // C++11 [expr.rel]p2:
+ // - If two pointers point to non-static data members of the same object,
+ // or to subobjects or array elements fo such members, recursively, the
+ // pointer to the later declared member compares greater provided the
+ // two members have the same access control and provided their class is
+ // not a union.
+ // [...]
+ // - Otherwise pointer comparisons are unspecified.
+ if (!LHSDesignator.Invalid && !RHSDesignator.Invalid &&
+ E->isRelationalOp()) {
+ bool WasArrayIndex;
+ unsigned Mismatch =
+ FindDesignatorMismatch(getType(LHSValue.Base), LHSDesignator,
+ RHSDesignator, WasArrayIndex);
+ // At the point where the designators diverge, the comparison has a
+ // specified value if:
+ // - we are comparing array indices
+ // - we are comparing fields of a union, or fields with the same access
+ // Otherwise, the result is unspecified and thus the comparison is not a
+ // constant expression.
+ if (!WasArrayIndex && Mismatch < LHSDesignator.Entries.size() &&
+ Mismatch < RHSDesignator.Entries.size()) {
+ const FieldDecl *LF = getAsField(LHSDesignator.Entries[Mismatch]);
+ const FieldDecl *RF = getAsField(RHSDesignator.Entries[Mismatch]);
+ if (!LF && !RF)
+ CCEDiag(E, diag::note_constexpr_pointer_comparison_base_classes);
+ else if (!LF)
+ CCEDiag(E, diag::note_constexpr_pointer_comparison_base_field)
+ << getAsBaseClass(LHSDesignator.Entries[Mismatch])
+ << RF->getParent() << RF;
+ else if (!RF)
+ CCEDiag(E, diag::note_constexpr_pointer_comparison_base_field)
+ << getAsBaseClass(RHSDesignator.Entries[Mismatch])
+ << LF->getParent() << LF;
+ else if (!LF->getParent()->isUnion() &&
+ LF->getAccess() != RF->getAccess())
+ CCEDiag(E, diag::note_constexpr_pointer_comparison_differing_access)
+ << LF << LF->getAccess() << RF << RF->getAccess()
+ << LF->getParent();
+ }
+ }
+
+ // The comparison here must be unsigned, and performed with the same
+ // width as the pointer.
+ unsigned PtrSize = Info.Ctx.getTypeSize(LHSTy);
+ uint64_t CompareLHS = LHSOffset.getQuantity();
+ uint64_t CompareRHS = RHSOffset.getQuantity();
+ assert(PtrSize <= 64 && "Unexpected pointer width");
+ uint64_t Mask = ~0ULL >> (64 - PtrSize);
+ CompareLHS &= Mask;
+ CompareRHS &= Mask;
+
+ // If there is a base and this is a relational operator, we can only
+ // compare pointers within the object in question; otherwise, the result
+ // depends on where the object is located in memory.
+ if (!LHSValue.Base.isNull() && E->isRelationalOp()) {
+ QualType BaseTy = getType(LHSValue.Base);
+ if (BaseTy->isIncompleteType())
+ return Error(E);
+ CharUnits Size = Info.Ctx.getTypeSizeInChars(BaseTy);
+ uint64_t OffsetLimit = Size.getQuantity();
+ if (CompareLHS > OffsetLimit || CompareRHS > OffsetLimit)
+ return Error(E);
+ }
+
+ switch (E->getOpcode()) {
+ default: llvm_unreachable("missing comparison operator");
+ case BO_LT: return Success(CompareLHS < CompareRHS, E);
+ case BO_GT: return Success(CompareLHS > CompareRHS, E);
+ case BO_LE: return Success(CompareLHS <= CompareRHS, E);
+ case BO_GE: return Success(CompareLHS >= CompareRHS, E);
+ case BO_EQ: return Success(CompareLHS == CompareRHS, E);
+ case BO_NE: return Success(CompareLHS != CompareRHS, E);
+ }
+ }
+ }
+
+ if (LHSTy->isMemberPointerType()) {
+ assert(E->isEqualityOp() && "unexpected member pointer operation");
+ assert(RHSTy->isMemberPointerType() && "invalid comparison");
+
+ MemberPtr LHSValue, RHSValue;
+
+ bool LHSOK = EvaluateMemberPointer(E->getLHS(), LHSValue, Info);
+ if (!LHSOK && Info.keepEvaluatingAfterFailure())
+ return false;
+
+ if (!EvaluateMemberPointer(E->getRHS(), RHSValue, Info) || !LHSOK)
+ return false;
+
+ // C++11 [expr.eq]p2:
+ // If both operands are null, they compare equal. Otherwise if only one is
+ // null, they compare unequal.
+ if (!LHSValue.getDecl() || !RHSValue.getDecl()) {
+ bool Equal = !LHSValue.getDecl() && !RHSValue.getDecl();
+ return Success(E->getOpcode() == BO_EQ ? Equal : !Equal, E);
+ }
+
+ // Otherwise if either is a pointer to a virtual member function, the
+ // result is unspecified.
+ if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(LHSValue.getDecl()))
+ if (MD->isVirtual())
+ CCEDiag(E, diag::note_constexpr_compare_virtual_mem_ptr) << MD;
+ if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(RHSValue.getDecl()))
+ if (MD->isVirtual())
+ CCEDiag(E, diag::note_constexpr_compare_virtual_mem_ptr) << MD;
+
+ // Otherwise they compare equal if and only if they would refer to the
+ // same member of the same most derived object or the same subobject if
+ // they were dereferenced with a hypothetical object of the associated
+ // class type.
+ bool Equal = LHSValue == RHSValue;
+ return Success(E->getOpcode() == BO_EQ ? Equal : !Equal, E);
+ }
+
+ if (LHSTy->isNullPtrType()) {
+ assert(E->isComparisonOp() && "unexpected nullptr operation");
+ assert(RHSTy->isNullPtrType() && "missing pointer conversion");
+ // C++11 [expr.rel]p4, [expr.eq]p3: If two operands of type std::nullptr_t
+ // are compared, the result is true of the operator is <=, >= or ==, and
+ // false otherwise.
+ BinaryOperator::Opcode Opcode = E->getOpcode();
+ return Success(Opcode == BO_EQ || Opcode == BO_LE || Opcode == BO_GE, E);
+ }
+
+ assert((!LHSTy->isIntegralOrEnumerationType() ||
+ !RHSTy->isIntegralOrEnumerationType()) &&
+ "DataRecursiveIntBinOpEvaluator should have handled integral types");
+ // We can't continue from here for non-integral types.
+ return ExprEvaluatorBaseTy::VisitBinaryOperator(E);
+}
+
+CharUnits IntExprEvaluator::GetAlignOfType(QualType T) {
+ // C++ [expr.alignof]p3: "When alignof is applied to a reference type, the
+ // result shall be the alignment of the referenced type."
+ if (const ReferenceType *Ref = T->getAs<ReferenceType>())
+ T = Ref->getPointeeType();
+
+ // __alignof is defined to return the preferred alignment.
+ return Info.Ctx.toCharUnitsFromBits(
+ Info.Ctx.getPreferredTypeAlign(T.getTypePtr()));
+}
+
+CharUnits IntExprEvaluator::GetAlignOfExpr(const Expr *E) {
+ E = E->IgnoreParens();
+
+ // alignof decl is always accepted, even if it doesn't make sense: we default
+ // to 1 in those cases.
+ if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E))
+ return Info.Ctx.getDeclAlign(DRE->getDecl(),
+ /*RefAsPointee*/true);
+
+ if (const MemberExpr *ME = dyn_cast<MemberExpr>(E))
+ return Info.Ctx.getDeclAlign(ME->getMemberDecl(),
+ /*RefAsPointee*/true);
+
+ return GetAlignOfType(E->getType());
+}
+
+
+/// VisitUnaryExprOrTypeTraitExpr - Evaluate a sizeof, alignof or vec_step with
+/// a result as the expression's type.
+bool IntExprEvaluator::VisitUnaryExprOrTypeTraitExpr(
+ const UnaryExprOrTypeTraitExpr *E) {
+ switch(E->getKind()) {
+ case UETT_AlignOf: {
+ if (E->isArgumentType())
+ return Success(GetAlignOfType(E->getArgumentType()), E);
+ else
+ return Success(GetAlignOfExpr(E->getArgumentExpr()), E);
+ }
+
+ case UETT_VecStep: {
+ QualType Ty = E->getTypeOfArgument();
+
+ if (Ty->isVectorType()) {
+ unsigned n = Ty->getAs<VectorType>()->getNumElements();
+
+ // The vec_step built-in functions that take a 3-component
+ // vector return 4. (OpenCL 1.1 spec 6.11.12)
+ if (n == 3)
+ n = 4;
+
+ return Success(n, E);
+ } else
+ return Success(1, E);
+ }
+
+ case UETT_SizeOf: {
+ QualType SrcTy = E->getTypeOfArgument();
+ // C++ [expr.sizeof]p2: "When applied to a reference or a reference type,
+ // the result is the size of the referenced type."
+ if (const ReferenceType *Ref = SrcTy->getAs<ReferenceType>())
+ SrcTy = Ref->getPointeeType();
+
+ CharUnits Sizeof;
+ if (!HandleSizeof(Info, E->getExprLoc(), SrcTy, Sizeof))
+ return false;
+ return Success(Sizeof, E);
+ }
+ }
+
+ llvm_unreachable("unknown expr/type trait");
+}
+
+bool IntExprEvaluator::VisitOffsetOfExpr(const OffsetOfExpr *OOE) {
+ CharUnits Result;
+ unsigned n = OOE->getNumComponents();
+ if (n == 0)
+ return Error(OOE);
+ QualType CurrentType = OOE->getTypeSourceInfo()->getType();
+ for (unsigned i = 0; i != n; ++i) {
+ OffsetOfExpr::OffsetOfNode ON = OOE->getComponent(i);
+ switch (ON.getKind()) {
+ case OffsetOfExpr::OffsetOfNode::Array: {
+ const Expr *Idx = OOE->getIndexExpr(ON.getArrayExprIndex());
+ APSInt IdxResult;
+ if (!EvaluateInteger(Idx, IdxResult, Info))
+ return false;
+ const ArrayType *AT = Info.Ctx.getAsArrayType(CurrentType);
+ if (!AT)
+ return Error(OOE);
+ CurrentType = AT->getElementType();
+ CharUnits ElementSize = Info.Ctx.getTypeSizeInChars(CurrentType);
+ Result += IdxResult.getSExtValue() * ElementSize;
+ break;
+ }
+
+ case OffsetOfExpr::OffsetOfNode::Field: {
+ FieldDecl *MemberDecl = ON.getField();
+ const RecordType *RT = CurrentType->getAs<RecordType>();
+ if (!RT)
+ return Error(OOE);
+ RecordDecl *RD = RT->getDecl();
+ const ASTRecordLayout &RL = Info.Ctx.getASTRecordLayout(RD);
+ unsigned i = MemberDecl->getFieldIndex();
+ assert(i < RL.getFieldCount() && "offsetof field in wrong type");
+ Result += Info.Ctx.toCharUnitsFromBits(RL.getFieldOffset(i));
+ CurrentType = MemberDecl->getType().getNonReferenceType();
+ break;
+ }
+
+ case OffsetOfExpr::OffsetOfNode::Identifier:
+ llvm_unreachable("dependent __builtin_offsetof");
+
+ case OffsetOfExpr::OffsetOfNode::Base: {
+ CXXBaseSpecifier *BaseSpec = ON.getBase();
+ if (BaseSpec->isVirtual())
+ return Error(OOE);
+
+ // Find the layout of the class whose base we are looking into.
+ const RecordType *RT = CurrentType->getAs<RecordType>();
+ if (!RT)
+ return Error(OOE);
+ RecordDecl *RD = RT->getDecl();
+ const ASTRecordLayout &RL = Info.Ctx.getASTRecordLayout(RD);
+
+ // Find the base class itself.
+ CurrentType = BaseSpec->getType();
+ const RecordType *BaseRT = CurrentType->getAs<RecordType>();
+ if (!BaseRT)
+ return Error(OOE);
+
+ // Add the offset to the base.
+ Result += RL.getBaseClassOffset(cast<CXXRecordDecl>(BaseRT->getDecl()));
+ break;
+ }
+ }
+ }
+ return Success(Result, OOE);
+}
+
+bool IntExprEvaluator::VisitUnaryOperator(const UnaryOperator *E) {
+ switch (E->getOpcode()) {
+ default:
+ // Address, indirect, pre/post inc/dec, etc are not valid constant exprs.
+ // See C99 6.6p3.
+ return Error(E);
+ case UO_Extension:
+ // FIXME: Should extension allow i-c-e extension expressions in its scope?
+ // If so, we could clear the diagnostic ID.
+ return Visit(E->getSubExpr());
+ case UO_Plus:
+ // The result is just the value.
+ return Visit(E->getSubExpr());
+ case UO_Minus: {
+ if (!Visit(E->getSubExpr()))
+ return false;
+ if (!Result.isInt()) return Error(E);
+ const APSInt &Value = Result.getInt();
+ if (Value.isSigned() && Value.isMinSignedValue())
+ HandleOverflow(Info, E, -Value.extend(Value.getBitWidth() + 1),
+ E->getType());
+ return Success(-Value, E);
+ }
+ case UO_Not: {
+ if (!Visit(E->getSubExpr()))
+ return false;
+ if (!Result.isInt()) return Error(E);
+ return Success(~Result.getInt(), E);
+ }
+ case UO_LNot: {
+ bool bres;
+ if (!EvaluateAsBooleanCondition(E->getSubExpr(), bres, Info))
+ return false;
+ return Success(!bres, E);
+ }
+ }
+}
+
+/// HandleCast - This is used to evaluate implicit or explicit casts where the
+/// result type is integer.
+bool IntExprEvaluator::VisitCastExpr(const CastExpr *E) {
+ const Expr *SubExpr = E->getSubExpr();
+ QualType DestType = E->getType();
+ QualType SrcType = SubExpr->getType();
+
+ switch (E->getCastKind()) {
+ case CK_BaseToDerived:
+ case CK_DerivedToBase:
+ case CK_UncheckedDerivedToBase:
+ case CK_Dynamic:
+ case CK_ToUnion:
+ case CK_ArrayToPointerDecay:
+ case CK_FunctionToPointerDecay:
+ case CK_NullToPointer:
+ case CK_NullToMemberPointer:
+ case CK_BaseToDerivedMemberPointer:
+ case CK_DerivedToBaseMemberPointer:
+ case CK_ReinterpretMemberPointer:
+ case CK_ConstructorConversion:
+ case CK_IntegralToPointer:
+ case CK_ToVoid:
+ case CK_VectorSplat:
+ case CK_IntegralToFloating:
+ case CK_FloatingCast:
+ case CK_CPointerToObjCPointerCast:
+ case CK_BlockPointerToObjCPointerCast:
+ case CK_AnyPointerToBlockPointerCast:
+ case CK_ObjCObjectLValueCast:
+ case CK_FloatingRealToComplex:
+ case CK_FloatingComplexToReal:
+ case CK_FloatingComplexCast:
+ case CK_FloatingComplexToIntegralComplex:
+ case CK_IntegralRealToComplex:
+ case CK_IntegralComplexCast:
+ case CK_IntegralComplexToFloatingComplex:
+ llvm_unreachable("invalid cast kind for integral value");
+
+ case CK_BitCast:
+ case CK_Dependent:
+ case CK_LValueBitCast:
+ case CK_ARCProduceObject:
+ case CK_ARCConsumeObject:
+ case CK_ARCReclaimReturnedObject:
+ case CK_ARCExtendBlockObject:
+ case CK_CopyAndAutoreleaseBlockObject:
+ return Error(E);
+
+ case CK_UserDefinedConversion:
+ case CK_LValueToRValue:
+ case CK_AtomicToNonAtomic:
+ case CK_NonAtomicToAtomic:
+ case CK_NoOp:
+ return ExprEvaluatorBaseTy::VisitCastExpr(E);
+
+ case CK_MemberPointerToBoolean:
+ case CK_PointerToBoolean:
+ case CK_IntegralToBoolean:
+ case CK_FloatingToBoolean:
+ case CK_FloatingComplexToBoolean:
+ case CK_IntegralComplexToBoolean: {
+ bool BoolResult;
+ if (!EvaluateAsBooleanCondition(SubExpr, BoolResult, Info))
+ return false;
+ return Success(BoolResult, E);
+ }
+
+ case CK_IntegralCast: {
+ if (!Visit(SubExpr))
+ return false;
+
+ if (!Result.isInt()) {
+ // Allow casts of address-of-label differences if they are no-ops
+ // or narrowing. (The narrowing case isn't actually guaranteed to
+ // be constant-evaluatable except in some narrow cases which are hard
+ // to detect here. We let it through on the assumption the user knows
+ // what they are doing.)
+ if (Result.isAddrLabelDiff())
+ return Info.Ctx.getTypeSize(DestType) <= Info.Ctx.getTypeSize(SrcType);
+ // Only allow casts of lvalues if they are lossless.
+ return Info.Ctx.getTypeSize(DestType) == Info.Ctx.getTypeSize(SrcType);
+ }
+
+ return Success(HandleIntToIntCast(Info, E, DestType, SrcType,
+ Result.getInt()), E);
+ }
+
+ case CK_PointerToIntegral: {
+ CCEDiag(E, diag::note_constexpr_invalid_cast) << 2;
+
+ LValue LV;
+ if (!EvaluatePointer(SubExpr, LV, Info))
+ return false;
+
+ if (LV.getLValueBase()) {
+ // Only allow based lvalue casts if they are lossless.
+ // FIXME: Allow a larger integer size than the pointer size, and allow
+ // narrowing back down to pointer width in subsequent integral casts.
+ // FIXME: Check integer type's active bits, not its type size.
+ if (Info.Ctx.getTypeSize(DestType) != Info.Ctx.getTypeSize(SrcType))
+ return Error(E);
+
+ LV.Designator.setInvalid();
+ LV.moveInto(Result);
+ return true;
+ }
+
+ APSInt AsInt = Info.Ctx.MakeIntValue(LV.getLValueOffset().getQuantity(),
+ SrcType);
+ return Success(HandleIntToIntCast(Info, E, DestType, SrcType, AsInt), E);
+ }
+
+ case CK_IntegralComplexToReal: {
+ ComplexValue C;
+ if (!EvaluateComplex(SubExpr, C, Info))
+ return false;
+ return Success(C.getComplexIntReal(), E);
+ }
+
+ case CK_FloatingToIntegral: {
+ APFloat F(0.0);
+ if (!EvaluateFloat(SubExpr, F, Info))
+ return false;
+
+ APSInt Value;
+ if (!HandleFloatToIntCast(Info, E, SrcType, F, DestType, Value))
+ return false;
+ return Success(Value, E);
+ }
+ }
+
+ llvm_unreachable("unknown cast resulting in integral value");
+}
+
+bool IntExprEvaluator::VisitUnaryReal(const UnaryOperator *E) {
+ if (E->getSubExpr()->getType()->isAnyComplexType()) {
+ ComplexValue LV;
+ if (!EvaluateComplex(E->getSubExpr(), LV, Info))
+ return false;
+ if (!LV.isComplexInt())
+ return Error(E);
+ return Success(LV.getComplexIntReal(), E);
+ }
+
+ return Visit(E->getSubExpr());
+}
+
+bool IntExprEvaluator::VisitUnaryImag(const UnaryOperator *E) {
+ if (E->getSubExpr()->getType()->isComplexIntegerType()) {
+ ComplexValue LV;
+ if (!EvaluateComplex(E->getSubExpr(), LV, Info))
+ return false;
+ if (!LV.isComplexInt())
+ return Error(E);
+ return Success(LV.getComplexIntImag(), E);
+ }
+
+ VisitIgnoredValue(E->getSubExpr());
+ return Success(0, E);
+}
+
+bool IntExprEvaluator::VisitSizeOfPackExpr(const SizeOfPackExpr *E) {
+ return Success(E->getPackLength(), E);
+}
+
+bool IntExprEvaluator::VisitCXXNoexceptExpr(const CXXNoexceptExpr *E) {
+ return Success(E->getValue(), E);
+}
+
+//===----------------------------------------------------------------------===//
+// Float Evaluation
+//===----------------------------------------------------------------------===//
+
+namespace {
+class FloatExprEvaluator
+ : public ExprEvaluatorBase<FloatExprEvaluator, bool> {
+ APFloat &Result;
+public:
+ FloatExprEvaluator(EvalInfo &info, APFloat &result)
+ : ExprEvaluatorBaseTy(info), Result(result) {}
+
+ bool Success(const APValue &V, const Expr *e) {
+ Result = V.getFloat();
+ return true;
+ }
+
+ bool ZeroInitialization(const Expr *E) {
+ Result = APFloat::getZero(Info.Ctx.getFloatTypeSemantics(E->getType()));
+ return true;
+ }
+
+ bool VisitCallExpr(const CallExpr *E);
+
+ bool VisitUnaryOperator(const UnaryOperator *E);
+ bool VisitBinaryOperator(const BinaryOperator *E);
+ bool VisitFloatingLiteral(const FloatingLiteral *E);
+ bool VisitCastExpr(const CastExpr *E);
+
+ bool VisitUnaryReal(const UnaryOperator *E);
+ bool VisitUnaryImag(const UnaryOperator *E);
+
+ // FIXME: Missing: array subscript of vector, member of vector
+};
+} // end anonymous namespace
+
+static bool EvaluateFloat(const Expr* E, APFloat& Result, EvalInfo &Info) {
+ assert(E->isRValue() && E->getType()->isRealFloatingType());
+ return FloatExprEvaluator(Info, Result).Visit(E);
+}
+
+static bool TryEvaluateBuiltinNaN(const ASTContext &Context,
+ QualType ResultTy,
+ const Expr *Arg,
+ bool SNaN,
+ llvm::APFloat &Result) {
+ const StringLiteral *S = dyn_cast<StringLiteral>(Arg->IgnoreParenCasts());
+ if (!S) return false;
+
+ const llvm::fltSemantics &Sem = Context.getFloatTypeSemantics(ResultTy);
+
+ llvm::APInt fill;
+
+ // Treat empty strings as if they were zero.
+ if (S->getString().empty())
+ fill = llvm::APInt(32, 0);
+ else if (S->getString().getAsInteger(0, fill))
+ return false;
+
+ if (SNaN)
+ Result = llvm::APFloat::getSNaN(Sem, false, &fill);
+ else
+ Result = llvm::APFloat::getQNaN(Sem, false, &fill);
+ return true;
+}
+
+bool FloatExprEvaluator::VisitCallExpr(const CallExpr *E) {
+ switch (E->isBuiltinCall()) {
+ default:
+ return ExprEvaluatorBaseTy::VisitCallExpr(E);
+
+ case Builtin::BI__builtin_huge_val:
+ case Builtin::BI__builtin_huge_valf:
+ case Builtin::BI__builtin_huge_vall:
+ case Builtin::BI__builtin_inf:
+ case Builtin::BI__builtin_inff:
+ case Builtin::BI__builtin_infl: {
+ const llvm::fltSemantics &Sem =
+ Info.Ctx.getFloatTypeSemantics(E->getType());
+ Result = llvm::APFloat::getInf(Sem);
+ return true;
+ }
+
+ case Builtin::BI__builtin_nans:
+ case Builtin::BI__builtin_nansf:
+ case Builtin::BI__builtin_nansl:
+ if (!TryEvaluateBuiltinNaN(Info.Ctx, E->getType(), E->getArg(0),
+ true, Result))
+ return Error(E);
+ return true;
+
+ case Builtin::BI__builtin_nan:
+ case Builtin::BI__builtin_nanf:
+ case Builtin::BI__builtin_nanl:
+ // If this is __builtin_nan() turn this into a nan, otherwise we
+ // can't constant fold it.
+ if (!TryEvaluateBuiltinNaN(Info.Ctx, E->getType(), E->getArg(0),
+ false, Result))
+ return Error(E);
+ return true;
+
+ case Builtin::BI__builtin_fabs:
+ case Builtin::BI__builtin_fabsf:
+ case Builtin::BI__builtin_fabsl:
+ if (!EvaluateFloat(E->getArg(0), Result, Info))
+ return false;
+
+ if (Result.isNegative())
+ Result.changeSign();
+ return true;
+
+ case Builtin::BI__builtin_copysign:
+ case Builtin::BI__builtin_copysignf:
+ case Builtin::BI__builtin_copysignl: {
+ APFloat RHS(0.);
+ if (!EvaluateFloat(E->getArg(0), Result, Info) ||
+ !EvaluateFloat(E->getArg(1), RHS, Info))
+ return false;
+ Result.copySign(RHS);
+ return true;
+ }
+ }
+}
+
+bool FloatExprEvaluator::VisitUnaryReal(const UnaryOperator *E) {
+ if (E->getSubExpr()->getType()->isAnyComplexType()) {
+ ComplexValue CV;
+ if (!EvaluateComplex(E->getSubExpr(), CV, Info))
+ return false;
+ Result = CV.FloatReal;
+ return true;
+ }
+
+ return Visit(E->getSubExpr());
+}
+
+bool FloatExprEvaluator::VisitUnaryImag(const UnaryOperator *E) {
+ if (E->getSubExpr()->getType()->isAnyComplexType()) {
+ ComplexValue CV;
+ if (!EvaluateComplex(E->getSubExpr(), CV, Info))
+ return false;
+ Result = CV.FloatImag;
+ return true;
+ }
+
+ VisitIgnoredValue(E->getSubExpr());
+ const llvm::fltSemantics &Sem = Info.Ctx.getFloatTypeSemantics(E->getType());
+ Result = llvm::APFloat::getZero(Sem);
+ return true;
+}
+
+bool FloatExprEvaluator::VisitUnaryOperator(const UnaryOperator *E) {
+ switch (E->getOpcode()) {
+ default: return Error(E);
+ case UO_Plus:
+ return EvaluateFloat(E->getSubExpr(), Result, Info);
+ case UO_Minus:
+ if (!EvaluateFloat(E->getSubExpr(), Result, Info))
+ return false;
+ Result.changeSign();
+ return true;
+ }
+}
+
+bool FloatExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) {
+ if (E->isPtrMemOp() || E->isAssignmentOp() || E->getOpcode() == BO_Comma)
+ return ExprEvaluatorBaseTy::VisitBinaryOperator(E);
+
+ APFloat RHS(0.0);
+ bool LHSOK = EvaluateFloat(E->getLHS(), Result, Info);
+ if (!LHSOK && !Info.keepEvaluatingAfterFailure())
+ return false;
+ if (!EvaluateFloat(E->getRHS(), RHS, Info) || !LHSOK)
+ return false;
+
+ switch (E->getOpcode()) {
+ default: return Error(E);
+ case BO_Mul:
+ Result.multiply(RHS, APFloat::rmNearestTiesToEven);
+ break;
+ case BO_Add:
+ Result.add(RHS, APFloat::rmNearestTiesToEven);
+ break;
+ case BO_Sub:
+ Result.subtract(RHS, APFloat::rmNearestTiesToEven);
+ break;
+ case BO_Div:
+ Result.divide(RHS, APFloat::rmNearestTiesToEven);
+ break;
+ }
+
+ if (Result.isInfinity() || Result.isNaN())
+ CCEDiag(E, diag::note_constexpr_float_arithmetic) << Result.isNaN();
+ return true;
+}
+
+bool FloatExprEvaluator::VisitFloatingLiteral(const FloatingLiteral *E) {
+ Result = E->getValue();
+ return true;
+}
+
+bool FloatExprEvaluator::VisitCastExpr(const CastExpr *E) {
+ const Expr* SubExpr = E->getSubExpr();
+
+ switch (E->getCastKind()) {
+ default:
+ return ExprEvaluatorBaseTy::VisitCastExpr(E);
+
+ case CK_IntegralToFloating: {
+ APSInt IntResult;
+ return EvaluateInteger(SubExpr, IntResult, Info) &&
+ HandleIntToFloatCast(Info, E, SubExpr->getType(), IntResult,
+ E->getType(), Result);
+ }
+
+ case CK_FloatingCast: {
+ if (!Visit(SubExpr))
+ return false;
+ return HandleFloatToFloatCast(Info, E, SubExpr->getType(), E->getType(),
+ Result);
+ }
+
+ case CK_FloatingComplexToReal: {
+ ComplexValue V;
+ if (!EvaluateComplex(SubExpr, V, Info))
+ return false;
+ Result = V.getComplexFloatReal();
+ return true;
+ }
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// Complex Evaluation (for float and integer)
+//===----------------------------------------------------------------------===//
+
+namespace {
+class ComplexExprEvaluator
+ : public ExprEvaluatorBase<ComplexExprEvaluator, bool> {
+ ComplexValue &Result;
+
+public:
+ ComplexExprEvaluator(EvalInfo &info, ComplexValue &Result)
+ : ExprEvaluatorBaseTy(info), Result(Result) {}
+
+ bool Success(const APValue &V, const Expr *e) {
+ Result.setFrom(V);
+ return true;
+ }
+
+ bool ZeroInitialization(const Expr *E);
+
+ //===--------------------------------------------------------------------===//
+ // Visitor Methods
+ //===--------------------------------------------------------------------===//
+
+ bool VisitImaginaryLiteral(const ImaginaryLiteral *E);
+ bool VisitCastExpr(const CastExpr *E);
+ bool VisitBinaryOperator(const BinaryOperator *E);
+ bool VisitUnaryOperator(const UnaryOperator *E);
+ bool VisitInitListExpr(const InitListExpr *E);
+};
+} // end anonymous namespace
+
+static bool EvaluateComplex(const Expr *E, ComplexValue &Result,
+ EvalInfo &Info) {
+ assert(E->isRValue() && E->getType()->isAnyComplexType());
+ return ComplexExprEvaluator(Info, Result).Visit(E);
+}
+
+bool ComplexExprEvaluator::ZeroInitialization(const Expr *E) {
+ QualType ElemTy = E->getType()->getAs<ComplexType>()->getElementType();
+ if (ElemTy->isRealFloatingType()) {
+ Result.makeComplexFloat();
+ APFloat Zero = APFloat::getZero(Info.Ctx.getFloatTypeSemantics(ElemTy));
+ Result.FloatReal = Zero;
+ Result.FloatImag = Zero;
+ } else {
+ Result.makeComplexInt();
+ APSInt Zero = Info.Ctx.MakeIntValue(0, ElemTy);
+ Result.IntReal = Zero;
+ Result.IntImag = Zero;
+ }
+ return true;
+}
+
+bool ComplexExprEvaluator::VisitImaginaryLiteral(const ImaginaryLiteral *E) {
+ const Expr* SubExpr = E->getSubExpr();
+
+ if (SubExpr->getType()->isRealFloatingType()) {
+ Result.makeComplexFloat();
+ APFloat &Imag = Result.FloatImag;
+ if (!EvaluateFloat(SubExpr, Imag, Info))
+ return false;
+
+ Result.FloatReal = APFloat(Imag.getSemantics());
+ return true;
+ } else {
+ assert(SubExpr->getType()->isIntegerType() &&
+ "Unexpected imaginary literal.");
+
+ Result.makeComplexInt();
+ APSInt &Imag = Result.IntImag;
+ if (!EvaluateInteger(SubExpr, Imag, Info))
+ return false;
+
+ Result.IntReal = APSInt(Imag.getBitWidth(), !Imag.isSigned());
+ return true;
+ }
+}
+
+bool ComplexExprEvaluator::VisitCastExpr(const CastExpr *E) {
+
+ switch (E->getCastKind()) {
+ case CK_BitCast:
+ case CK_BaseToDerived:
+ case CK_DerivedToBase:
+ case CK_UncheckedDerivedToBase:
+ case CK_Dynamic:
+ case CK_ToUnion:
+ case CK_ArrayToPointerDecay:
+ case CK_FunctionToPointerDecay:
+ case CK_NullToPointer:
+ case CK_NullToMemberPointer:
+ case CK_BaseToDerivedMemberPointer:
+ case CK_DerivedToBaseMemberPointer:
+ case CK_MemberPointerToBoolean:
+ case CK_ReinterpretMemberPointer:
+ case CK_ConstructorConversion:
+ case CK_IntegralToPointer:
+ case CK_PointerToIntegral:
+ case CK_PointerToBoolean:
+ case CK_ToVoid:
+ case CK_VectorSplat:
+ case CK_IntegralCast:
+ case CK_IntegralToBoolean:
+ case CK_IntegralToFloating:
+ case CK_FloatingToIntegral:
+ case CK_FloatingToBoolean:
+ case CK_FloatingCast:
+ case CK_CPointerToObjCPointerCast:
+ case CK_BlockPointerToObjCPointerCast:
+ case CK_AnyPointerToBlockPointerCast:
+ case CK_ObjCObjectLValueCast:
+ case CK_FloatingComplexToReal:
+ case CK_FloatingComplexToBoolean:
+ case CK_IntegralComplexToReal:
+ case CK_IntegralComplexToBoolean:
+ case CK_ARCProduceObject:
+ case CK_ARCConsumeObject:
+ case CK_ARCReclaimReturnedObject:
+ case CK_ARCExtendBlockObject:
+ case CK_CopyAndAutoreleaseBlockObject:
+ llvm_unreachable("invalid cast kind for complex value");
+
+ case CK_LValueToRValue:
+ case CK_AtomicToNonAtomic:
+ case CK_NonAtomicToAtomic:
+ case CK_NoOp:
+ return ExprEvaluatorBaseTy::VisitCastExpr(E);
+
+ case CK_Dependent:
+ case CK_LValueBitCast:
+ case CK_UserDefinedConversion:
+ return Error(E);
+
+ case CK_FloatingRealToComplex: {
+ APFloat &Real = Result.FloatReal;
+ if (!EvaluateFloat(E->getSubExpr(), Real, Info))
+ return false;
+
+ Result.makeComplexFloat();
+ Result.FloatImag = APFloat(Real.getSemantics());
+ return true;
+ }
+
+ case CK_FloatingComplexCast: {
+ if (!Visit(E->getSubExpr()))
+ return false;
+
+ QualType To = E->getType()->getAs<ComplexType>()->getElementType();
+ QualType From
+ = E->getSubExpr()->getType()->getAs<ComplexType>()->getElementType();
+
+ return HandleFloatToFloatCast(Info, E, From, To, Result.FloatReal) &&
+ HandleFloatToFloatCast(Info, E, From, To, Result.FloatImag);
+ }
+
+ case CK_FloatingComplexToIntegralComplex: {
+ if (!Visit(E->getSubExpr()))
+ return false;
+
+ QualType To = E->getType()->getAs<ComplexType>()->getElementType();
+ QualType From
+ = E->getSubExpr()->getType()->getAs<ComplexType>()->getElementType();
+ Result.makeComplexInt();
+ return HandleFloatToIntCast(Info, E, From, Result.FloatReal,
+ To, Result.IntReal) &&
+ HandleFloatToIntCast(Info, E, From, Result.FloatImag,
+ To, Result.IntImag);
+ }
+
+ case CK_IntegralRealToComplex: {
+ APSInt &Real = Result.IntReal;
+ if (!EvaluateInteger(E->getSubExpr(), Real, Info))
+ return false;
+
+ Result.makeComplexInt();
+ Result.IntImag = APSInt(Real.getBitWidth(), !Real.isSigned());
+ return true;
+ }
+
+ case CK_IntegralComplexCast: {
+ if (!Visit(E->getSubExpr()))
+ return false;
+
+ QualType To = E->getType()->getAs<ComplexType>()->getElementType();
+ QualType From
+ = E->getSubExpr()->getType()->getAs<ComplexType>()->getElementType();
+
+ Result.IntReal = HandleIntToIntCast(Info, E, To, From, Result.IntReal);
+ Result.IntImag = HandleIntToIntCast(Info, E, To, From, Result.IntImag);
+ return true;
+ }
+
+ case CK_IntegralComplexToFloatingComplex: {
+ if (!Visit(E->getSubExpr()))
+ return false;
+
+ QualType To = E->getType()->getAs<ComplexType>()->getElementType();
+ QualType From
+ = E->getSubExpr()->getType()->getAs<ComplexType>()->getElementType();
+ Result.makeComplexFloat();
+ return HandleIntToFloatCast(Info, E, From, Result.IntReal,
+ To, Result.FloatReal) &&
+ HandleIntToFloatCast(Info, E, From, Result.IntImag,
+ To, Result.FloatImag);
+ }
+ }
+
+ llvm_unreachable("unknown cast resulting in complex value");
+}
+
+bool ComplexExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) {
+ if (E->isPtrMemOp() || E->isAssignmentOp() || E->getOpcode() == BO_Comma)
+ return ExprEvaluatorBaseTy::VisitBinaryOperator(E);
+
+ bool LHSOK = Visit(E->getLHS());
+ if (!LHSOK && !Info.keepEvaluatingAfterFailure())
+ return false;
+
+ ComplexValue RHS;
+ if (!EvaluateComplex(E->getRHS(), RHS, Info) || !LHSOK)
+ return false;
+
+ assert(Result.isComplexFloat() == RHS.isComplexFloat() &&
+ "Invalid operands to binary operator.");
+ switch (E->getOpcode()) {
+ default: return Error(E);
+ case BO_Add:
+ if (Result.isComplexFloat()) {
+ Result.getComplexFloatReal().add(RHS.getComplexFloatReal(),
+ APFloat::rmNearestTiesToEven);
+ Result.getComplexFloatImag().add(RHS.getComplexFloatImag(),
+ APFloat::rmNearestTiesToEven);
+ } else {
+ Result.getComplexIntReal() += RHS.getComplexIntReal();
+ Result.getComplexIntImag() += RHS.getComplexIntImag();
+ }
+ break;
+ case BO_Sub:
+ if (Result.isComplexFloat()) {
+ Result.getComplexFloatReal().subtract(RHS.getComplexFloatReal(),
+ APFloat::rmNearestTiesToEven);
+ Result.getComplexFloatImag().subtract(RHS.getComplexFloatImag(),
+ APFloat::rmNearestTiesToEven);
+ } else {
+ Result.getComplexIntReal() -= RHS.getComplexIntReal();
+ Result.getComplexIntImag() -= RHS.getComplexIntImag();
+ }
+ break;
+ case BO_Mul:
+ if (Result.isComplexFloat()) {
+ ComplexValue LHS = Result;
+ APFloat &LHS_r = LHS.getComplexFloatReal();
+ APFloat &LHS_i = LHS.getComplexFloatImag();
+ APFloat &RHS_r = RHS.getComplexFloatReal();
+ APFloat &RHS_i = RHS.getComplexFloatImag();
+
+ APFloat Tmp = LHS_r;
+ Tmp.multiply(RHS_r, APFloat::rmNearestTiesToEven);
+ Result.getComplexFloatReal() = Tmp;
+ Tmp = LHS_i;
+ Tmp.multiply(RHS_i, APFloat::rmNearestTiesToEven);
+ Result.getComplexFloatReal().subtract(Tmp, APFloat::rmNearestTiesToEven);
+
+ Tmp = LHS_r;
+ Tmp.multiply(RHS_i, APFloat::rmNearestTiesToEven);
+ Result.getComplexFloatImag() = Tmp;
+ Tmp = LHS_i;
+ Tmp.multiply(RHS_r, APFloat::rmNearestTiesToEven);
+ Result.getComplexFloatImag().add(Tmp, APFloat::rmNearestTiesToEven);
+ } else {
+ ComplexValue LHS = Result;
+ Result.getComplexIntReal() =
+ (LHS.getComplexIntReal() * RHS.getComplexIntReal() -
+ LHS.getComplexIntImag() * RHS.getComplexIntImag());
+ Result.getComplexIntImag() =
+ (LHS.getComplexIntReal() * RHS.getComplexIntImag() +
+ LHS.getComplexIntImag() * RHS.getComplexIntReal());
+ }
+ break;
+ case BO_Div:
+ if (Result.isComplexFloat()) {
+ ComplexValue LHS = Result;
+ APFloat &LHS_r = LHS.getComplexFloatReal();
+ APFloat &LHS_i = LHS.getComplexFloatImag();
+ APFloat &RHS_r = RHS.getComplexFloatReal();
+ APFloat &RHS_i = RHS.getComplexFloatImag();
+ APFloat &Res_r = Result.getComplexFloatReal();
+ APFloat &Res_i = Result.getComplexFloatImag();
+
+ APFloat Den = RHS_r;
+ Den.multiply(RHS_r, APFloat::rmNearestTiesToEven);
+ APFloat Tmp = RHS_i;
+ Tmp.multiply(RHS_i, APFloat::rmNearestTiesToEven);
+ Den.add(Tmp, APFloat::rmNearestTiesToEven);
+
+ Res_r = LHS_r;
+ Res_r.multiply(RHS_r, APFloat::rmNearestTiesToEven);
+ Tmp = LHS_i;
+ Tmp.multiply(RHS_i, APFloat::rmNearestTiesToEven);
+ Res_r.add(Tmp, APFloat::rmNearestTiesToEven);
+ Res_r.divide(Den, APFloat::rmNearestTiesToEven);
+
+ Res_i = LHS_i;
+ Res_i.multiply(RHS_r, APFloat::rmNearestTiesToEven);
+ Tmp = LHS_r;
+ Tmp.multiply(RHS_i, APFloat::rmNearestTiesToEven);
+ Res_i.subtract(Tmp, APFloat::rmNearestTiesToEven);
+ Res_i.divide(Den, APFloat::rmNearestTiesToEven);
+ } else {
+ if (RHS.getComplexIntReal() == 0 && RHS.getComplexIntImag() == 0)
+ return Error(E, diag::note_expr_divide_by_zero);
+
+ ComplexValue LHS = Result;
+ APSInt Den = RHS.getComplexIntReal() * RHS.getComplexIntReal() +
+ RHS.getComplexIntImag() * RHS.getComplexIntImag();
+ Result.getComplexIntReal() =
+ (LHS.getComplexIntReal() * RHS.getComplexIntReal() +
+ LHS.getComplexIntImag() * RHS.getComplexIntImag()) / Den;
+ Result.getComplexIntImag() =
+ (LHS.getComplexIntImag() * RHS.getComplexIntReal() -
+ LHS.getComplexIntReal() * RHS.getComplexIntImag()) / Den;
+ }
+ break;
+ }
+
+ return true;
+}
+
+bool ComplexExprEvaluator::VisitUnaryOperator(const UnaryOperator *E) {
+ // Get the operand value into 'Result'.
+ if (!Visit(E->getSubExpr()))
+ return false;
+
+ switch (E->getOpcode()) {
+ default:
+ return Error(E);
+ case UO_Extension:
+ return true;
+ case UO_Plus:
+ // The result is always just the subexpr.
+ return true;
+ case UO_Minus:
+ if (Result.isComplexFloat()) {
+ Result.getComplexFloatReal().changeSign();
+ Result.getComplexFloatImag().changeSign();
+ }
+ else {
+ Result.getComplexIntReal() = -Result.getComplexIntReal();
+ Result.getComplexIntImag() = -Result.getComplexIntImag();
+ }
+ return true;
+ case UO_Not:
+ if (Result.isComplexFloat())
+ Result.getComplexFloatImag().changeSign();
+ else
+ Result.getComplexIntImag() = -Result.getComplexIntImag();
+ return true;
+ }
+}
+
+bool ComplexExprEvaluator::VisitInitListExpr(const InitListExpr *E) {
+ if (E->getNumInits() == 2) {
+ if (E->getType()->isComplexType()) {
+ Result.makeComplexFloat();
+ if (!EvaluateFloat(E->getInit(0), Result.FloatReal, Info))
+ return false;
+ if (!EvaluateFloat(E->getInit(1), Result.FloatImag, Info))
+ return false;
+ } else {
+ Result.makeComplexInt();
+ if (!EvaluateInteger(E->getInit(0), Result.IntReal, Info))
+ return false;
+ if (!EvaluateInteger(E->getInit(1), Result.IntImag, Info))
+ return false;
+ }
+ return true;
+ }
+ return ExprEvaluatorBaseTy::VisitInitListExpr(E);
+}
+
+//===----------------------------------------------------------------------===//
+// Void expression evaluation, primarily for a cast to void on the LHS of a
+// comma operator
+//===----------------------------------------------------------------------===//
+
+namespace {
+class VoidExprEvaluator
+ : public ExprEvaluatorBase<VoidExprEvaluator, bool> {
+public:
+ VoidExprEvaluator(EvalInfo &Info) : ExprEvaluatorBaseTy(Info) {}
+
+ bool Success(const APValue &V, const Expr *e) { return true; }
+
+ bool VisitCastExpr(const CastExpr *E) {
+ switch (E->getCastKind()) {
+ default:
+ return ExprEvaluatorBaseTy::VisitCastExpr(E);
+ case CK_ToVoid:
+ VisitIgnoredValue(E->getSubExpr());
+ return true;
+ }
+ }
+};
+} // end anonymous namespace
+
+static bool EvaluateVoid(const Expr *E, EvalInfo &Info) {
+ assert(E->isRValue() && E->getType()->isVoidType());
+ return VoidExprEvaluator(Info).Visit(E);
+}
+
+//===----------------------------------------------------------------------===//
+// Top level Expr::EvaluateAsRValue method.
+//===----------------------------------------------------------------------===//
+
+static bool Evaluate(APValue &Result, EvalInfo &Info, const Expr *E) {
+ // In C, function designators are not lvalues, but we evaluate them as if they
+ // are.
+ if (E->isGLValue() || E->getType()->isFunctionType()) {
+ LValue LV;
+ if (!EvaluateLValue(E, LV, Info))
+ return false;
+ LV.moveInto(Result);
+ } else if (E->getType()->isVectorType()) {
+ if (!EvaluateVector(E, Result, Info))
+ return false;
+ } else if (E->getType()->isIntegralOrEnumerationType()) {
+ if (!IntExprEvaluator(Info, Result).Visit(E))
+ return false;
+ } else if (E->getType()->hasPointerRepresentation()) {
+ LValue LV;
+ if (!EvaluatePointer(E, LV, Info))
+ return false;
+ LV.moveInto(Result);
+ } else if (E->getType()->isRealFloatingType()) {
+ llvm::APFloat F(0.0);
+ if (!EvaluateFloat(E, F, Info))
+ return false;
+ Result = APValue(F);
+ } else if (E->getType()->isAnyComplexType()) {
+ ComplexValue C;
+ if (!EvaluateComplex(E, C, Info))
+ return false;
+ C.moveInto(Result);
+ } else if (E->getType()->isMemberPointerType()) {
+ MemberPtr P;
+ if (!EvaluateMemberPointer(E, P, Info))
+ return false;
+ P.moveInto(Result);
+ return true;
+ } else if (E->getType()->isArrayType()) {
+ LValue LV;
+ LV.set(E, Info.CurrentCall->Index);
+ if (!EvaluateArray(E, LV, Info.CurrentCall->Temporaries[E], Info))
+ return false;
+ Result = Info.CurrentCall->Temporaries[E];
+ } else if (E->getType()->isRecordType()) {
+ LValue LV;
+ LV.set(E, Info.CurrentCall->Index);
+ if (!EvaluateRecord(E, LV, Info.CurrentCall->Temporaries[E], Info))
+ return false;
+ Result = Info.CurrentCall->Temporaries[E];
+ } else if (E->getType()->isVoidType()) {
+ if (Info.getLangOpts().CPlusPlus0x)
+ Info.CCEDiag(E, diag::note_constexpr_nonliteral)
+ << E->getType();
+ else
+ Info.CCEDiag(E, diag::note_invalid_subexpr_in_const_expr);
+ if (!EvaluateVoid(E, Info))
+ return false;
+ } else if (Info.getLangOpts().CPlusPlus0x) {
+ Info.Diag(E, diag::note_constexpr_nonliteral) << E->getType();
+ return false;
+ } else {
+ Info.Diag(E, diag::note_invalid_subexpr_in_const_expr);
+ return false;
+ }
+
+ return true;
+}
+
+/// EvaluateInPlace - Evaluate an expression in-place in an APValue. In some
+/// cases, the in-place evaluation is essential, since later initializers for
+/// an object can indirectly refer to subobjects which were initialized earlier.
+static bool EvaluateInPlace(APValue &Result, EvalInfo &Info, const LValue &This,
+ const Expr *E, CheckConstantExpressionKind CCEK,
+ bool AllowNonLiteralTypes) {
+ if (!AllowNonLiteralTypes && !CheckLiteralType(Info, E))
+ return false;
+
+ if (E->isRValue()) {
+ // Evaluate arrays and record types in-place, so that later initializers can
+ // refer to earlier-initialized members of the object.
+ if (E->getType()->isArrayType())
+ return EvaluateArray(E, This, Result, Info);
+ else if (E->getType()->isRecordType())
+ return EvaluateRecord(E, This, Result, Info);
+ }
+
+ // For any other type, in-place evaluation is unimportant.
+ return Evaluate(Result, Info, E);
+}
+
+/// EvaluateAsRValue - Try to evaluate this expression, performing an implicit
+/// lvalue-to-rvalue cast if it is an lvalue.
+static bool EvaluateAsRValue(EvalInfo &Info, const Expr *E, APValue &Result) {
+ if (!CheckLiteralType(Info, E))
+ return false;
+
+ if (!::Evaluate(Result, Info, E))
+ return false;
+
+ if (E->isGLValue()) {
+ LValue LV;
+ LV.setFrom(Info.Ctx, Result);
+ if (!HandleLValueToRValueConversion(Info, E, E->getType(), LV, Result))
+ return false;
+ }
+
+ // Check this core constant expression is a constant expression.
+ return CheckConstantExpression(Info, E->getExprLoc(), E->getType(), Result);
+}
+
+/// EvaluateAsRValue - Return true if this is a constant which we can fold using
+/// any crazy technique (that has nothing to do with language standards) that
+/// we want to. If this function returns true, it returns the folded constant
+/// in Result. If this expression is a glvalue, an lvalue-to-rvalue conversion
+/// will be applied to the result.
+bool Expr::EvaluateAsRValue(EvalResult &Result, const ASTContext &Ctx) const {
+ // Fast-path evaluations of integer literals, since we sometimes see files
+ // containing vast quantities of these.
+ if (const IntegerLiteral *L = dyn_cast<IntegerLiteral>(this)) {
+ Result.Val = APValue(APSInt(L->getValue(),
+ L->getType()->isUnsignedIntegerType()));
+ return true;
+ }
+
+ // FIXME: Evaluating values of large array and record types can cause
+ // performance problems. Only do so in C++11 for now.
+ if (isRValue() && (getType()->isArrayType() || getType()->isRecordType()) &&
+ !Ctx.getLangOpts().CPlusPlus0x)
+ return false;
+
+ EvalInfo Info(Ctx, Result);
+ return ::EvaluateAsRValue(Info, this, Result.Val);
+}
+
+bool Expr::EvaluateAsBooleanCondition(bool &Result,
+ const ASTContext &Ctx) const {
+ EvalResult Scratch;
+ return EvaluateAsRValue(Scratch, Ctx) &&
+ HandleConversionToBool(Scratch.Val, Result);
+}
+
+bool Expr::EvaluateAsInt(APSInt &Result, const ASTContext &Ctx,
+ SideEffectsKind AllowSideEffects) const {
+ if (!getType()->isIntegralOrEnumerationType())
+ return false;
+
+ EvalResult ExprResult;
+ if (!EvaluateAsRValue(ExprResult, Ctx) || !ExprResult.Val.isInt() ||
+ (!AllowSideEffects && ExprResult.HasSideEffects))
+ return false;
+
+ Result = ExprResult.Val.getInt();
+ return true;
+}
+
+bool Expr::EvaluateAsLValue(EvalResult &Result, const ASTContext &Ctx) const {
+ EvalInfo Info(Ctx, Result);
+
+ LValue LV;
+ if (!EvaluateLValue(this, LV, Info) || Result.HasSideEffects ||
+ !CheckLValueConstantExpression(Info, getExprLoc(),
+ Ctx.getLValueReferenceType(getType()), LV))
+ return false;
+
+ LV.moveInto(Result.Val);
+ return true;
+}
+
+bool Expr::EvaluateAsInitializer(APValue &Value, const ASTContext &Ctx,
+ const VarDecl *VD,
+ llvm::SmallVectorImpl<PartialDiagnosticAt> &Notes) const {
+ // FIXME: Evaluating initializers for large array and record types can cause
+ // performance problems. Only do so in C++11 for now.
+ if (isRValue() && (getType()->isArrayType() || getType()->isRecordType()) &&
+ !Ctx.getLangOpts().CPlusPlus0x)
+ return false;
+
+ Expr::EvalStatus EStatus;
+ EStatus.Diag = &Notes;
+
+ EvalInfo InitInfo(Ctx, EStatus);
+ InitInfo.setEvaluatingDecl(VD, Value);
+
+ LValue LVal;
+ LVal.set(VD);
+
+ // C++11 [basic.start.init]p2:
+ // Variables with static storage duration or thread storage duration shall be
+ // zero-initialized before any other initialization takes place.
+ // This behavior is not present in C.
+ if (Ctx.getLangOpts().CPlusPlus && !VD->hasLocalStorage() &&
+ !VD->getType()->isReferenceType()) {
+ ImplicitValueInitExpr VIE(VD->getType());
+ if (!EvaluateInPlace(Value, InitInfo, LVal, &VIE, CCEK_Constant,
+ /*AllowNonLiteralTypes=*/true))
+ return false;
+ }
+
+ if (!EvaluateInPlace(Value, InitInfo, LVal, this, CCEK_Constant,
+ /*AllowNonLiteralTypes=*/true) ||
+ EStatus.HasSideEffects)
+ return false;
+
+ return CheckConstantExpression(InitInfo, VD->getLocation(), VD->getType(),
+ Value);
+}
+
+/// isEvaluatable - Call EvaluateAsRValue to see if this expression can be
+/// constant folded, but discard the result.
+bool Expr::isEvaluatable(const ASTContext &Ctx) const {
+ EvalResult Result;
+ return EvaluateAsRValue(Result, Ctx) && !Result.HasSideEffects;
+}
+
+bool Expr::HasSideEffects(const ASTContext &Ctx) const {
+ return HasSideEffect(Ctx).Visit(this);
+}
+
+APSInt Expr::EvaluateKnownConstInt(const ASTContext &Ctx) const {
+ EvalResult EvalResult;
+ bool Result = EvaluateAsRValue(EvalResult, Ctx);
+ (void)Result;
+ assert(Result && "Could not evaluate expression");
+ assert(EvalResult.Val.isInt() && "Expression did not evaluate to integer");
+
+ return EvalResult.Val.getInt();
+}
+
+ bool Expr::EvalResult::isGlobalLValue() const {
+ assert(Val.isLValue());
+ return IsGlobalLValue(Val.getLValueBase());
+ }
+
+
+/// isIntegerConstantExpr - this recursive routine will test if an expression is
+/// an integer constant expression.
+
+/// FIXME: Pass up a reason why! Invalid operation in i-c-e, division by zero,
+/// comma, etc
+///
+/// FIXME: Handle offsetof. Two things to do: Handle GCC's __builtin_offsetof
+/// to support gcc 4.0+ and handle the idiom GCC recognizes with a null pointer
+/// cast+dereference.
+
+// CheckICE - This function does the fundamental ICE checking: the returned
+// ICEDiag contains a Val of 0, 1, or 2, and a possibly null SourceLocation.
+// Note that to reduce code duplication, this helper does no evaluation
+// itself; the caller checks whether the expression is evaluatable, and
+// in the rare cases where CheckICE actually cares about the evaluated
+// value, it calls into Evalute.
+//
+// Meanings of Val:
+// 0: This expression is an ICE.
+// 1: This expression is not an ICE, but if it isn't evaluated, it's
+// a legal subexpression for an ICE. This return value is used to handle
+// the comma operator in C99 mode.
+// 2: This expression is not an ICE, and is not a legal subexpression for one.
+
+namespace {
+
+struct ICEDiag {
+ unsigned Val;
+ SourceLocation Loc;
+
+ public:
+ ICEDiag(unsigned v, SourceLocation l) : Val(v), Loc(l) {}
+ ICEDiag() : Val(0) {}
+};
+
+}
+
+static ICEDiag NoDiag() { return ICEDiag(); }
+
+static ICEDiag CheckEvalInICE(const Expr* E, ASTContext &Ctx) {
+ Expr::EvalResult EVResult;
+ if (!E->EvaluateAsRValue(EVResult, Ctx) || EVResult.HasSideEffects ||
+ !EVResult.Val.isInt()) {
+ return ICEDiag(2, E->getLocStart());
+ }
+ return NoDiag();
+}
+
+static ICEDiag CheckICE(const Expr* E, ASTContext &Ctx) {
+ assert(!E->isValueDependent() && "Should not see value dependent exprs!");
+ if (!E->getType()->isIntegralOrEnumerationType()) {
+ return ICEDiag(2, E->getLocStart());
+ }
+
+ switch (E->getStmtClass()) {
+#define ABSTRACT_STMT(Node)
+#define STMT(Node, Base) case Expr::Node##Class:
+#define EXPR(Node, Base)
+#include "clang/AST/StmtNodes.inc"
+ case Expr::PredefinedExprClass:
+ case Expr::FloatingLiteralClass:
+ case Expr::ImaginaryLiteralClass:
+ case Expr::StringLiteralClass:
+ case Expr::ArraySubscriptExprClass:
+ case Expr::MemberExprClass:
+ case Expr::CompoundAssignOperatorClass:
+ case Expr::CompoundLiteralExprClass:
+ case Expr::ExtVectorElementExprClass:
+ case Expr::DesignatedInitExprClass:
+ case Expr::ImplicitValueInitExprClass:
+ case Expr::ParenListExprClass:
+ case Expr::VAArgExprClass:
+ case Expr::AddrLabelExprClass:
+ case Expr::StmtExprClass:
+ case Expr::CXXMemberCallExprClass:
+ case Expr::CUDAKernelCallExprClass:
+ case Expr::CXXDynamicCastExprClass:
+ case Expr::CXXTypeidExprClass:
+ case Expr::CXXUuidofExprClass:
+ case Expr::CXXNullPtrLiteralExprClass:
+ case Expr::UserDefinedLiteralClass:
+ case Expr::CXXThisExprClass:
+ case Expr::CXXThrowExprClass:
+ case Expr::CXXNewExprClass:
+ case Expr::CXXDeleteExprClass:
+ case Expr::CXXPseudoDestructorExprClass:
+ case Expr::UnresolvedLookupExprClass:
+ case Expr::DependentScopeDeclRefExprClass:
+ case Expr::CXXConstructExprClass:
+ case Expr::CXXBindTemporaryExprClass:
+ case Expr::ExprWithCleanupsClass:
+ case Expr::CXXTemporaryObjectExprClass:
+ case Expr::CXXUnresolvedConstructExprClass:
+ case Expr::CXXDependentScopeMemberExprClass:
+ case Expr::UnresolvedMemberExprClass:
+ case Expr::ObjCStringLiteralClass:
+ case Expr::ObjCNumericLiteralClass:
+ case Expr::ObjCArrayLiteralClass:
+ case Expr::ObjCDictionaryLiteralClass:
+ case Expr::ObjCEncodeExprClass:
+ case Expr::ObjCMessageExprClass:
+ case Expr::ObjCSelectorExprClass:
+ case Expr::ObjCProtocolExprClass:
+ case Expr::ObjCIvarRefExprClass:
+ case Expr::ObjCPropertyRefExprClass:
+ case Expr::ObjCSubscriptRefExprClass:
+ case Expr::ObjCIsaExprClass:
+ case Expr::ShuffleVectorExprClass:
+ case Expr::BlockExprClass:
+ case Expr::NoStmtClass:
+ case Expr::OpaqueValueExprClass:
+ case Expr::PackExpansionExprClass:
+ case Expr::SubstNonTypeTemplateParmPackExprClass:
+ case Expr::AsTypeExprClass:
+ case Expr::ObjCIndirectCopyRestoreExprClass:
+ case Expr::MaterializeTemporaryExprClass:
+ case Expr::PseudoObjectExprClass:
+ case Expr::AtomicExprClass:
+ case Expr::InitListExprClass:
+ case Expr::LambdaExprClass:
+ return ICEDiag(2, E->getLocStart());
+
+ case Expr::SizeOfPackExprClass:
+ case Expr::GNUNullExprClass:
+ // GCC considers the GNU __null value to be an integral constant expression.
+ return NoDiag();
+
+ case Expr::SubstNonTypeTemplateParmExprClass:
+ return
+ CheckICE(cast<SubstNonTypeTemplateParmExpr>(E)->getReplacement(), Ctx);
+
+ case Expr::ParenExprClass:
+ return CheckICE(cast<ParenExpr>(E)->getSubExpr(), Ctx);
+ case Expr::GenericSelectionExprClass:
+ return CheckICE(cast<GenericSelectionExpr>(E)->getResultExpr(), Ctx);
+ case Expr::IntegerLiteralClass:
+ case Expr::CharacterLiteralClass:
+ case Expr::ObjCBoolLiteralExprClass:
+ case Expr::CXXBoolLiteralExprClass:
+ case Expr::CXXScalarValueInitExprClass:
+ case Expr::UnaryTypeTraitExprClass:
+ case Expr::BinaryTypeTraitExprClass:
+ case Expr::TypeTraitExprClass:
+ case Expr::ArrayTypeTraitExprClass:
+ case Expr::ExpressionTraitExprClass:
+ case Expr::CXXNoexceptExprClass:
+ return NoDiag();
+ case Expr::CallExprClass:
+ case Expr::CXXOperatorCallExprClass: {
+ // C99 6.6/3 allows function calls within unevaluated subexpressions of
+ // constant expressions, but they can never be ICEs because an ICE cannot
+ // contain an operand of (pointer to) function type.
+ const CallExpr *CE = cast<CallExpr>(E);
+ if (CE->isBuiltinCall())
+ return CheckEvalInICE(E, Ctx);
+ return ICEDiag(2, E->getLocStart());
+ }
+ case Expr::DeclRefExprClass: {
+ if (isa<EnumConstantDecl>(cast<DeclRefExpr>(E)->getDecl()))
+ return NoDiag();
+ const ValueDecl *D = dyn_cast<ValueDecl>(cast<DeclRefExpr>(E)->getDecl());
+ if (Ctx.getLangOpts().CPlusPlus &&
+ D && IsConstNonVolatile(D->getType())) {
+ // Parameter variables are never constants. Without this check,
+ // getAnyInitializer() can find a default argument, which leads
+ // to chaos.
+ if (isa<ParmVarDecl>(D))
+ return ICEDiag(2, cast<DeclRefExpr>(E)->getLocation());
+
+ // C++ 7.1.5.1p2
+ // A variable of non-volatile const-qualified integral or enumeration
+ // type initialized by an ICE can be used in ICEs.
+ if (const VarDecl *Dcl = dyn_cast<VarDecl>(D)) {
+ if (!Dcl->getType()->isIntegralOrEnumerationType())
+ return ICEDiag(2, cast<DeclRefExpr>(E)->getLocation());
+
+ const VarDecl *VD;
+ // Look for a declaration of this variable that has an initializer, and
+ // check whether it is an ICE.
+ if (Dcl->getAnyInitializer(VD) && VD->checkInitIsICE())
+ return NoDiag();
+ else
+ return ICEDiag(2, cast<DeclRefExpr>(E)->getLocation());
+ }
+ }
+ return ICEDiag(2, E->getLocStart());
+ }
+ case Expr::UnaryOperatorClass: {
+ const UnaryOperator *Exp = cast<UnaryOperator>(E);
+ switch (Exp->getOpcode()) {
+ case UO_PostInc:
+ case UO_PostDec:
+ case UO_PreInc:
+ case UO_PreDec:
+ case UO_AddrOf:
+ case UO_Deref:
+ // C99 6.6/3 allows increment and decrement within unevaluated
+ // subexpressions of constant expressions, but they can never be ICEs
+ // because an ICE cannot contain an lvalue operand.
+ return ICEDiag(2, E->getLocStart());
+ case UO_Extension:
+ case UO_LNot:
+ case UO_Plus:
+ case UO_Minus:
+ case UO_Not:
+ case UO_Real:
+ case UO_Imag:
+ return CheckICE(Exp->getSubExpr(), Ctx);
+ }
+
+ // OffsetOf falls through here.
+ }
+ case Expr::OffsetOfExprClass: {
+ // Note that per C99, offsetof must be an ICE. And AFAIK, using
+ // EvaluateAsRValue matches the proposed gcc behavior for cases like
+ // "offsetof(struct s{int x[4];}, x[1.0])". This doesn't affect
+ // compliance: we should warn earlier for offsetof expressions with
+ // array subscripts that aren't ICEs, and if the array subscripts
+ // are ICEs, the value of the offsetof must be an integer constant.
+ return CheckEvalInICE(E, Ctx);
+ }
+ case Expr::UnaryExprOrTypeTraitExprClass: {
+ const UnaryExprOrTypeTraitExpr *Exp = cast<UnaryExprOrTypeTraitExpr>(E);
+ if ((Exp->getKind() == UETT_SizeOf) &&
+ Exp->getTypeOfArgument()->isVariableArrayType())
+ return ICEDiag(2, E->getLocStart());
+ return NoDiag();
+ }
+ case Expr::BinaryOperatorClass: {
+ const BinaryOperator *Exp = cast<BinaryOperator>(E);
+ switch (Exp->getOpcode()) {
+ case BO_PtrMemD:
+ case BO_PtrMemI:
+ case BO_Assign:
+ case BO_MulAssign:
+ case BO_DivAssign:
+ case BO_RemAssign:
+ case BO_AddAssign:
+ case BO_SubAssign:
+ case BO_ShlAssign:
+ case BO_ShrAssign:
+ case BO_AndAssign:
+ case BO_XorAssign:
+ case BO_OrAssign:
+ // C99 6.6/3 allows assignments within unevaluated subexpressions of
+ // constant expressions, but they can never be ICEs because an ICE cannot
+ // contain an lvalue operand.
+ return ICEDiag(2, E->getLocStart());
+
+ case BO_Mul:
+ case BO_Div:
+ case BO_Rem:
+ case BO_Add:
+ case BO_Sub:
+ case BO_Shl:
+ case BO_Shr:
+ case BO_LT:
+ case BO_GT:
+ case BO_LE:
+ case BO_GE:
+ case BO_EQ:
+ case BO_NE:
+ case BO_And:
+ case BO_Xor:
+ case BO_Or:
+ case BO_Comma: {
+ ICEDiag LHSResult = CheckICE(Exp->getLHS(), Ctx);
+ ICEDiag RHSResult = CheckICE(Exp->getRHS(), Ctx);
+ if (Exp->getOpcode() == BO_Div ||
+ Exp->getOpcode() == BO_Rem) {
+ // EvaluateAsRValue gives an error for undefined Div/Rem, so make sure
+ // we don't evaluate one.
+ if (LHSResult.Val == 0 && RHSResult.Val == 0) {
+ llvm::APSInt REval = Exp->getRHS()->EvaluateKnownConstInt(Ctx);
+ if (REval == 0)
+ return ICEDiag(1, E->getLocStart());
+ if (REval.isSigned() && REval.isAllOnesValue()) {
+ llvm::APSInt LEval = Exp->getLHS()->EvaluateKnownConstInt(Ctx);
+ if (LEval.isMinSignedValue())
+ return ICEDiag(1, E->getLocStart());
+ }
+ }
+ }
+ if (Exp->getOpcode() == BO_Comma) {
+ if (Ctx.getLangOpts().C99) {
+ // C99 6.6p3 introduces a strange edge case: comma can be in an ICE
+ // if it isn't evaluated.
+ if (LHSResult.Val == 0 && RHSResult.Val == 0)
+ return ICEDiag(1, E->getLocStart());
+ } else {
+ // In both C89 and C++, commas in ICEs are illegal.
+ return ICEDiag(2, E->getLocStart());
+ }
+ }
+ if (LHSResult.Val >= RHSResult.Val)
+ return LHSResult;
+ return RHSResult;
+ }
+ case BO_LAnd:
+ case BO_LOr: {
+ ICEDiag LHSResult = CheckICE(Exp->getLHS(), Ctx);
+ ICEDiag RHSResult = CheckICE(Exp->getRHS(), Ctx);
+ if (LHSResult.Val == 0 && RHSResult.Val == 1) {
+ // Rare case where the RHS has a comma "side-effect"; we need
+ // to actually check the condition to see whether the side
+ // with the comma is evaluated.
+ if ((Exp->getOpcode() == BO_LAnd) !=
+ (Exp->getLHS()->EvaluateKnownConstInt(Ctx) == 0))
+ return RHSResult;
+ return NoDiag();
+ }
+
+ if (LHSResult.Val >= RHSResult.Val)
+ return LHSResult;
+ return RHSResult;
+ }
+ }
+ }
+ case Expr::ImplicitCastExprClass:
+ case Expr::CStyleCastExprClass:
+ case Expr::CXXFunctionalCastExprClass:
+ case Expr::CXXStaticCastExprClass:
+ case Expr::CXXReinterpretCastExprClass:
+ case Expr::CXXConstCastExprClass:
+ case Expr::ObjCBridgedCastExprClass: {
+ const Expr *SubExpr = cast<CastExpr>(E)->getSubExpr();
+ if (isa<ExplicitCastExpr>(E)) {
+ if (const FloatingLiteral *FL
+ = dyn_cast<FloatingLiteral>(SubExpr->IgnoreParenImpCasts())) {
+ unsigned DestWidth = Ctx.getIntWidth(E->getType());
+ bool DestSigned = E->getType()->isSignedIntegerOrEnumerationType();
+ APSInt IgnoredVal(DestWidth, !DestSigned);
+ bool Ignored;
+ // If the value does not fit in the destination type, the behavior is
+ // undefined, so we are not required to treat it as a constant
+ // expression.
+ if (FL->getValue().convertToInteger(IgnoredVal,
+ llvm::APFloat::rmTowardZero,
+ &Ignored) & APFloat::opInvalidOp)
+ return ICEDiag(2, E->getLocStart());
+ return NoDiag();
+ }
+ }
+ switch (cast<CastExpr>(E)->getCastKind()) {
+ case CK_LValueToRValue:
+ case CK_AtomicToNonAtomic:
+ case CK_NonAtomicToAtomic:
+ case CK_NoOp:
+ case CK_IntegralToBoolean:
+ case CK_IntegralCast:
+ return CheckICE(SubExpr, Ctx);
+ default:
+ return ICEDiag(2, E->getLocStart());
+ }
+ }
+ case Expr::BinaryConditionalOperatorClass: {
+ const BinaryConditionalOperator *Exp = cast<BinaryConditionalOperator>(E);
+ ICEDiag CommonResult = CheckICE(Exp->getCommon(), Ctx);
+ if (CommonResult.Val == 2) return CommonResult;
+ ICEDiag FalseResult = CheckICE(Exp->getFalseExpr(), Ctx);
+ if (FalseResult.Val == 2) return FalseResult;
+ if (CommonResult.Val == 1) return CommonResult;
+ if (FalseResult.Val == 1 &&
+ Exp->getCommon()->EvaluateKnownConstInt(Ctx) == 0) return NoDiag();
+ return FalseResult;
+ }
+ case Expr::ConditionalOperatorClass: {
+ const ConditionalOperator *Exp = cast<ConditionalOperator>(E);
+ // If the condition (ignoring parens) is a __builtin_constant_p call,
+ // then only the true side is actually considered in an integer constant
+ // expression, and it is fully evaluated. This is an important GNU
+ // extension. See GCC PR38377 for discussion.
+ if (const CallExpr *CallCE
+ = dyn_cast<CallExpr>(Exp->getCond()->IgnoreParenCasts()))
+ if (CallCE->isBuiltinCall() == Builtin::BI__builtin_constant_p)
+ return CheckEvalInICE(E, Ctx);
+ ICEDiag CondResult = CheckICE(Exp->getCond(), Ctx);
+ if (CondResult.Val == 2)
+ return CondResult;
+
+ ICEDiag TrueResult = CheckICE(Exp->getTrueExpr(), Ctx);
+ ICEDiag FalseResult = CheckICE(Exp->getFalseExpr(), Ctx);
+
+ if (TrueResult.Val == 2)
+ return TrueResult;
+ if (FalseResult.Val == 2)
+ return FalseResult;
+ if (CondResult.Val == 1)
+ return CondResult;
+ if (TrueResult.Val == 0 && FalseResult.Val == 0)
+ return NoDiag();
+ // Rare case where the diagnostics depend on which side is evaluated
+ // Note that if we get here, CondResult is 0, and at least one of
+ // TrueResult and FalseResult is non-zero.
+ if (Exp->getCond()->EvaluateKnownConstInt(Ctx) == 0) {
+ return FalseResult;
+ }
+ return TrueResult;
+ }
+ case Expr::CXXDefaultArgExprClass:
+ return CheckICE(cast<CXXDefaultArgExpr>(E)->getExpr(), Ctx);
+ case Expr::ChooseExprClass: {
+ return CheckICE(cast<ChooseExpr>(E)->getChosenSubExpr(Ctx), Ctx);
+ }
+ }
+
+ llvm_unreachable("Invalid StmtClass!");
+}
+
+/// Evaluate an expression as a C++11 integral constant expression.
+static bool EvaluateCPlusPlus11IntegralConstantExpr(ASTContext &Ctx,
+ const Expr *E,
+ llvm::APSInt *Value,
+ SourceLocation *Loc) {
+ if (!E->getType()->isIntegralOrEnumerationType()) {
+ if (Loc) *Loc = E->getExprLoc();
+ return false;
+ }
+
+ APValue Result;
+ if (!E->isCXX11ConstantExpr(Ctx, &Result, Loc))
+ return false;
+
+ assert(Result.isInt() && "pointer cast to int is not an ICE");
+ if (Value) *Value = Result.getInt();
+ return true;
+}
+
+bool Expr::isIntegerConstantExpr(ASTContext &Ctx, SourceLocation *Loc) const {
+ if (Ctx.getLangOpts().CPlusPlus0x)
+ return EvaluateCPlusPlus11IntegralConstantExpr(Ctx, this, 0, Loc);
+
+ ICEDiag d = CheckICE(this, Ctx);
+ if (d.Val != 0) {
+ if (Loc) *Loc = d.Loc;
+ return false;
+ }
+ return true;
+}
+
+bool Expr::isIntegerConstantExpr(llvm::APSInt &Value, ASTContext &Ctx,
+ SourceLocation *Loc, bool isEvaluated) const {
+ if (Ctx.getLangOpts().CPlusPlus0x)
+ return EvaluateCPlusPlus11IntegralConstantExpr(Ctx, this, &Value, Loc);
+
+ if (!isIntegerConstantExpr(Ctx, Loc))
+ return false;
+ if (!EvaluateAsInt(Value, Ctx))
+ llvm_unreachable("ICE cannot be evaluated!");
+ return true;
+}
+
+bool Expr::isCXX98IntegralConstantExpr(ASTContext &Ctx) const {
+ return CheckICE(this, Ctx).Val == 0;
+}
+
+bool Expr::isCXX11ConstantExpr(ASTContext &Ctx, APValue *Result,
+ SourceLocation *Loc) const {
+ // We support this checking in C++98 mode in order to diagnose compatibility
+ // issues.
+ assert(Ctx.getLangOpts().CPlusPlus);
+
+ // Build evaluation settings.
+ Expr::EvalStatus Status;
+ llvm::SmallVector<PartialDiagnosticAt, 8> Diags;
+ Status.Diag = &Diags;
+ EvalInfo Info(Ctx, Status);
+
+ APValue Scratch;
+ bool IsConstExpr = ::EvaluateAsRValue(Info, this, Result ? *Result : Scratch);
+
+ if (!Diags.empty()) {
+ IsConstExpr = false;
+ if (Loc) *Loc = Diags[0].first;
+ } else if (!IsConstExpr) {
+ // FIXME: This shouldn't happen.
+ if (Loc) *Loc = getExprLoc();
+ }
+
+ return IsConstExpr;
+}
+
+bool Expr::isPotentialConstantExpr(const FunctionDecl *FD,
+ llvm::SmallVectorImpl<
+ PartialDiagnosticAt> &Diags) {
+ // FIXME: It would be useful to check constexpr function templates, but at the
+ // moment the constant expression evaluator cannot cope with the non-rigorous
+ // ASTs which we build for dependent expressions.
+ if (FD->isDependentContext())
+ return true;
+
+ Expr::EvalStatus Status;
+ Status.Diag = &Diags;
+
+ EvalInfo Info(FD->getASTContext(), Status);
+ Info.CheckingPotentialConstantExpression = true;
+
+ const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD);
+ const CXXRecordDecl *RD = MD ? MD->getParent()->getCanonicalDecl() : 0;
+
+ // FIXME: Fabricate an arbitrary expression on the stack and pretend that it
+ // is a temporary being used as the 'this' pointer.
+ LValue This;
+ ImplicitValueInitExpr VIE(RD ? Info.Ctx.getRecordType(RD) : Info.Ctx.IntTy);
+ This.set(&VIE, Info.CurrentCall->Index);
+
+ ArrayRef<const Expr*> Args;
+
+ SourceLocation Loc = FD->getLocation();
+
+ APValue Scratch;
+ if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD))
+ HandleConstructorCall(Loc, This, Args, CD, Info, Scratch);
+ else
+ HandleFunctionCall(Loc, FD, (MD && MD->isInstance()) ? &This : 0,
+ Args, FD->getBody(), Info, Scratch);
+
+ return Diags.empty();
+}
diff --git a/clang/lib/AST/ExternalASTSource.cpp b/clang/lib/AST/ExternalASTSource.cpp
new file mode 100644
index 0000000..6b9fe26
--- /dev/null
+++ b/clang/lib/AST/ExternalASTSource.cpp
@@ -0,0 +1,62 @@
+//===- ExternalASTSource.cpp - Abstract External AST Interface --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provides the default implementation of the ExternalASTSource
+// interface, which enables construction of AST nodes from some external
+// source.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/ExternalASTSource.h"
+#include "clang/AST/DeclarationName.h"
+
+using namespace clang;
+
+ExternalASTSource::~ExternalASTSource() { }
+
+void ExternalASTSource::PrintStats() { }
+
+Decl *ExternalASTSource::GetExternalDecl(uint32_t ID) {
+ return 0;
+}
+
+Selector ExternalASTSource::GetExternalSelector(uint32_t ID) {
+ return Selector();
+}
+
+uint32_t ExternalASTSource::GetNumExternalSelectors() {
+ return 0;
+}
+
+Stmt *ExternalASTSource::GetExternalDeclStmt(uint64_t Offset) {
+ return 0;
+}
+
+CXXBaseSpecifier *
+ExternalASTSource::GetExternalCXXBaseSpecifiers(uint64_t Offset) {
+ return 0;
+}
+
+DeclContextLookupResult
+ExternalASTSource::FindExternalVisibleDeclsByName(const DeclContext *DC,
+ DeclarationName Name) {
+ return DeclContext::lookup_result();
+}
+
+void ExternalASTSource::completeVisibleDeclsMap(const DeclContext *DC) {
+}
+
+ExternalLoadResult
+ExternalASTSource::FindExternalLexicalDecls(const DeclContext *DC,
+ bool (*isKindWeWant)(Decl::Kind),
+ SmallVectorImpl<Decl*> &Result) {
+ return ELR_AlreadyLoaded;
+}
+
+void ExternalASTSource::getMemoryBufferSizes(MemoryBufferSizes &sizes) const { }
diff --git a/clang/lib/AST/InheritViz.cpp b/clang/lib/AST/InheritViz.cpp
new file mode 100644
index 0000000..b70520f
--- /dev/null
+++ b/clang/lib/AST/InheritViz.cpp
@@ -0,0 +1,168 @@
+//===- InheritViz.cpp - Graphviz visualization for inheritance --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements CXXRecordDecl::viewInheritance, which
+// generates a GraphViz DOT file that depicts the class inheritance
+// diagram and then calls Graphviz/dot+gv on it.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/TypeOrdering.h"
+#include "llvm/Support/GraphWriter.h"
+#include "llvm/Support/raw_ostream.h"
+#include <map>
+
+using namespace llvm;
+
+namespace clang {
+
+/// InheritanceHierarchyWriter - Helper class that writes out a
+/// GraphViz file that diagrams the inheritance hierarchy starting at
+/// a given C++ class type. Note that we do not use LLVM's
+/// GraphWriter, because the interface does not permit us to properly
+/// differentiate between uses of types as virtual bases
+/// vs. non-virtual bases.
+class InheritanceHierarchyWriter {
+ ASTContext& Context;
+ raw_ostream &Out;
+ std::map<QualType, int, QualTypeOrdering> DirectBaseCount;
+ std::set<QualType, QualTypeOrdering> KnownVirtualBases;
+
+public:
+ InheritanceHierarchyWriter(ASTContext& Context, raw_ostream& Out)
+ : Context(Context), Out(Out) { }
+
+ void WriteGraph(QualType Type) {
+ Out << "digraph \"" << DOT::EscapeString(Type.getAsString()) << "\" {\n";
+ WriteNode(Type, false);
+ Out << "}\n";
+ }
+
+protected:
+ /// WriteNode - Write out the description of node in the inheritance
+ /// diagram, which may be a base class or it may be the root node.
+ void WriteNode(QualType Type, bool FromVirtual);
+
+ /// WriteNodeReference - Write out a reference to the given node,
+ /// using a unique identifier for each direct base and for the
+ /// (only) virtual base.
+ raw_ostream& WriteNodeReference(QualType Type, bool FromVirtual);
+};
+
+void InheritanceHierarchyWriter::WriteNode(QualType Type, bool FromVirtual) {
+ QualType CanonType = Context.getCanonicalType(Type);
+
+ if (FromVirtual) {
+ if (KnownVirtualBases.find(CanonType) != KnownVirtualBases.end())
+ return;
+
+ // We haven't seen this virtual base before, so display it and
+ // its bases.
+ KnownVirtualBases.insert(CanonType);
+ }
+
+ // Declare the node itself.
+ Out << " ";
+ WriteNodeReference(Type, FromVirtual);
+
+ // Give the node a label based on the name of the class.
+ std::string TypeName = Type.getAsString();
+ Out << " [ shape=\"box\", label=\"" << DOT::EscapeString(TypeName);
+
+ // If the name of the class was a typedef or something different
+ // from the "real" class name, show the real class name in
+ // parentheses so we don't confuse ourselves.
+ if (TypeName != CanonType.getAsString()) {
+ Out << "\\n(" << CanonType.getAsString() << ")";
+ }
+
+ // Finished describing the node.
+ Out << " \"];\n";
+
+ // Display the base classes.
+ const CXXRecordDecl *Decl
+ = static_cast<const CXXRecordDecl *>(Type->getAs<RecordType>()->getDecl());
+ for (CXXRecordDecl::base_class_const_iterator Base = Decl->bases_begin();
+ Base != Decl->bases_end(); ++Base) {
+ QualType CanonBaseType = Context.getCanonicalType(Base->getType());
+
+ // If this is not virtual inheritance, bump the direct base
+ // count for the type.
+ if (!Base->isVirtual())
+ ++DirectBaseCount[CanonBaseType];
+
+ // Write out the node (if we need to).
+ WriteNode(Base->getType(), Base->isVirtual());
+
+ // Write out the edge.
+ Out << " ";
+ WriteNodeReference(Type, FromVirtual);
+ Out << " -> ";
+ WriteNodeReference(Base->getType(), Base->isVirtual());
+
+ // Write out edge attributes to show the kind of inheritance.
+ if (Base->isVirtual()) {
+ Out << " [ style=\"dashed\" ]";
+ }
+ Out << ";";
+ }
+}
+
+/// WriteNodeReference - Write out a reference to the given node,
+/// using a unique identifier for each direct base and for the
+/// (only) virtual base.
+raw_ostream&
+InheritanceHierarchyWriter::WriteNodeReference(QualType Type,
+ bool FromVirtual) {
+ QualType CanonType = Context.getCanonicalType(Type);
+
+ Out << "Class_" << CanonType.getAsOpaquePtr();
+ if (!FromVirtual)
+ Out << "_" << DirectBaseCount[CanonType];
+ return Out;
+}
+
+/// viewInheritance - Display the inheritance hierarchy of this C++
+/// class using GraphViz.
+void CXXRecordDecl::viewInheritance(ASTContext& Context) const {
+ QualType Self = Context.getTypeDeclType(const_cast<CXXRecordDecl *>(this));
+ std::string ErrMsg;
+ sys::Path Filename = sys::Path::GetTemporaryDirectory(&ErrMsg);
+ if (Filename.isEmpty()) {
+ llvm::errs() << "Error: " << ErrMsg << "\n";
+ return;
+ }
+ Filename.appendComponent(Self.getAsString() + ".dot");
+ if (Filename.makeUnique(true,&ErrMsg)) {
+ llvm::errs() << "Error: " << ErrMsg << "\n";
+ return;
+ }
+
+ llvm::errs() << "Writing '" << Filename.c_str() << "'... ";
+
+ llvm::raw_fd_ostream O(Filename.c_str(), ErrMsg);
+
+ if (ErrMsg.empty()) {
+ InheritanceHierarchyWriter Writer(Context, O);
+ Writer.WriteGraph(Self);
+ llvm::errs() << " done. \n";
+
+ O.close();
+
+ // Display the graph
+ DisplayGraph(Filename);
+ } else {
+ llvm::errs() << "error opening file for writing!\n";
+ }
+}
+
+}
diff --git a/clang/lib/AST/ItaniumCXXABI.cpp b/clang/lib/AST/ItaniumCXXABI.cpp
new file mode 100644
index 0000000..0027dbf
--- /dev/null
+++ b/clang/lib/AST/ItaniumCXXABI.cpp
@@ -0,0 +1,73 @@
+//===------- ItaniumCXXABI.cpp - AST support for the Itanium C++ ABI ------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This provides C++ AST support targeting the Itanium C++ ABI, which is
+// documented at:
+// http://www.codesourcery.com/public/cxx-abi/abi.html
+// http://www.codesourcery.com/public/cxx-abi/abi-eh.html
+//
+// It also supports the closely-related ARM C++ ABI, documented at:
+// http://infocenter.arm.com/help/topic/com.arm.doc.ihi0041c/IHI0041C_cppabi.pdf
+//
+//===----------------------------------------------------------------------===//
+
+#include "CXXABI.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/RecordLayout.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/Type.h"
+#include "clang/Basic/TargetInfo.h"
+
+using namespace clang;
+
+namespace {
+class ItaniumCXXABI : public CXXABI {
+protected:
+ ASTContext &Context;
+public:
+ ItaniumCXXABI(ASTContext &Ctx) : Context(Ctx) { }
+
+ unsigned getMemberPointerSize(const MemberPointerType *MPT) const {
+ QualType Pointee = MPT->getPointeeType();
+ if (Pointee->isFunctionType()) return 2;
+ return 1;
+ }
+
+ CallingConv getDefaultMethodCallConv() const {
+ return CC_C;
+ }
+
+ // We cheat and just check that the class has a vtable pointer, and that it's
+ // only big enough to have a vtable pointer and nothing more (or less).
+ bool isNearlyEmpty(const CXXRecordDecl *RD) const {
+
+ // Check that the class has a vtable pointer.
+ if (!RD->isDynamicClass())
+ return false;
+
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
+ CharUnits PointerSize =
+ Context.toCharUnitsFromBits(Context.getTargetInfo().getPointerWidth(0));
+ return Layout.getNonVirtualSize() == PointerSize;
+ }
+};
+
+class ARMCXXABI : public ItaniumCXXABI {
+public:
+ ARMCXXABI(ASTContext &Ctx) : ItaniumCXXABI(Ctx) { }
+};
+}
+
+CXXABI *clang::CreateItaniumCXXABI(ASTContext &Ctx) {
+ return new ItaniumCXXABI(Ctx);
+}
+
+CXXABI *clang::CreateARMCXXABI(ASTContext &Ctx) {
+ return new ARMCXXABI(Ctx);
+}
diff --git a/clang/lib/AST/ItaniumMangle.cpp b/clang/lib/AST/ItaniumMangle.cpp
new file mode 100644
index 0000000..0d405f1
--- /dev/null
+++ b/clang/lib/AST/ItaniumMangle.cpp
@@ -0,0 +1,3587 @@
+//===--- ItaniumMangle.cpp - Itanium C++ Name Mangling ----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Implements C++ name mangling according to the Itanium C++ ABI,
+// which is used in GCC 3.2 and newer (and many compilers that are
+// ABI-compatible with GCC):
+//
+// http://www.codesourcery.com/public/cxx-abi/abi.html
+//
+//===----------------------------------------------------------------------===//
+#include "clang/AST/Mangle.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/ExprObjC.h"
+#include "clang/AST/TypeLoc.h"
+#include "clang/Basic/ABI.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/TargetInfo.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/ErrorHandling.h"
+
+#define MANGLE_CHECKER 0
+
+#if MANGLE_CHECKER
+#include <cxxabi.h>
+#endif
+
+using namespace clang;
+
+namespace {
+
+/// \brief Retrieve the declaration context that should be used when mangling
+/// the given declaration.
+static const DeclContext *getEffectiveDeclContext(const Decl *D) {
+ // The ABI assumes that lambda closure types that occur within
+ // default arguments live in the context of the function. However, due to
+ // the way in which Clang parses and creates function declarations, this is
+ // not the case: the lambda closure type ends up living in the context
+ // where the function itself resides, because the function declaration itself
+ // had not yet been created. Fix the context here.
+ if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D)) {
+ if (RD->isLambda())
+ if (ParmVarDecl *ContextParam
+ = dyn_cast_or_null<ParmVarDecl>(RD->getLambdaContextDecl()))
+ return ContextParam->getDeclContext();
+ }
+
+ return D->getDeclContext();
+}
+
+static const DeclContext *getEffectiveParentContext(const DeclContext *DC) {
+ return getEffectiveDeclContext(cast<Decl>(DC));
+}
+
+static const CXXRecordDecl *GetLocalClassDecl(const NamedDecl *ND) {
+ const DeclContext *DC = dyn_cast<DeclContext>(ND);
+ if (!DC)
+ DC = getEffectiveDeclContext(ND);
+ while (!DC->isNamespace() && !DC->isTranslationUnit()) {
+ const DeclContext *Parent = getEffectiveDeclContext(cast<Decl>(DC));
+ if (isa<FunctionDecl>(Parent))
+ return dyn_cast<CXXRecordDecl>(DC);
+ DC = Parent;
+ }
+ return 0;
+}
+
+static const FunctionDecl *getStructor(const FunctionDecl *fn) {
+ if (const FunctionTemplateDecl *ftd = fn->getPrimaryTemplate())
+ return ftd->getTemplatedDecl();
+
+ return fn;
+}
+
+static const NamedDecl *getStructor(const NamedDecl *decl) {
+ const FunctionDecl *fn = dyn_cast_or_null<FunctionDecl>(decl);
+ return (fn ? getStructor(fn) : decl);
+}
+
+static const unsigned UnknownArity = ~0U;
+
+class ItaniumMangleContext : public MangleContext {
+ llvm::DenseMap<const TagDecl *, uint64_t> AnonStructIds;
+ unsigned Discriminator;
+ llvm::DenseMap<const NamedDecl*, unsigned> Uniquifier;
+
+public:
+ explicit ItaniumMangleContext(ASTContext &Context,
+ DiagnosticsEngine &Diags)
+ : MangleContext(Context, Diags) { }
+
+ uint64_t getAnonymousStructId(const TagDecl *TD) {
+ std::pair<llvm::DenseMap<const TagDecl *,
+ uint64_t>::iterator, bool> Result =
+ AnonStructIds.insert(std::make_pair(TD, AnonStructIds.size()));
+ return Result.first->second;
+ }
+
+ void startNewFunction() {
+ MangleContext::startNewFunction();
+ mangleInitDiscriminator();
+ }
+
+ /// @name Mangler Entry Points
+ /// @{
+
+ bool shouldMangleDeclName(const NamedDecl *D);
+ void mangleName(const NamedDecl *D, raw_ostream &);
+ void mangleThunk(const CXXMethodDecl *MD,
+ const ThunkInfo &Thunk,
+ raw_ostream &);
+ void mangleCXXDtorThunk(const CXXDestructorDecl *DD, CXXDtorType Type,
+ const ThisAdjustment &ThisAdjustment,
+ raw_ostream &);
+ void mangleReferenceTemporary(const VarDecl *D,
+ raw_ostream &);
+ void mangleCXXVTable(const CXXRecordDecl *RD,
+ raw_ostream &);
+ void mangleCXXVTT(const CXXRecordDecl *RD,
+ raw_ostream &);
+ void mangleCXXCtorVTable(const CXXRecordDecl *RD, int64_t Offset,
+ const CXXRecordDecl *Type,
+ raw_ostream &);
+ void mangleCXXRTTI(QualType T, raw_ostream &);
+ void mangleCXXRTTIName(QualType T, raw_ostream &);
+ void mangleCXXCtor(const CXXConstructorDecl *D, CXXCtorType Type,
+ raw_ostream &);
+ void mangleCXXDtor(const CXXDestructorDecl *D, CXXDtorType Type,
+ raw_ostream &);
+
+ void mangleItaniumGuardVariable(const VarDecl *D, raw_ostream &);
+
+ void mangleInitDiscriminator() {
+ Discriminator = 0;
+ }
+
+ bool getNextDiscriminator(const NamedDecl *ND, unsigned &disc) {
+ // Lambda closure types with external linkage (indicated by a
+ // non-zero lambda mangling number) have their own numbering scheme, so
+ // they do not need a discriminator.
+ if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(ND))
+ if (RD->isLambda() && RD->getLambdaManglingNumber() > 0)
+ return false;
+
+ unsigned &discriminator = Uniquifier[ND];
+ if (!discriminator)
+ discriminator = ++Discriminator;
+ if (discriminator == 1)
+ return false;
+ disc = discriminator-2;
+ return true;
+ }
+ /// @}
+};
+
+/// CXXNameMangler - Manage the mangling of a single name.
+class CXXNameMangler {
+ ItaniumMangleContext &Context;
+ raw_ostream &Out;
+
+ /// The "structor" is the top-level declaration being mangled, if
+ /// that's not a template specialization; otherwise it's the pattern
+ /// for that specialization.
+ const NamedDecl *Structor;
+ unsigned StructorType;
+
+ /// SeqID - The next subsitution sequence number.
+ unsigned SeqID;
+
+ class FunctionTypeDepthState {
+ unsigned Bits;
+
+ enum { InResultTypeMask = 1 };
+
+ public:
+ FunctionTypeDepthState() : Bits(0) {}
+
+ /// The number of function types we're inside.
+ unsigned getDepth() const {
+ return Bits >> 1;
+ }
+
+ /// True if we're in the return type of the innermost function type.
+ bool isInResultType() const {
+ return Bits & InResultTypeMask;
+ }
+
+ FunctionTypeDepthState push() {
+ FunctionTypeDepthState tmp = *this;
+ Bits = (Bits & ~InResultTypeMask) + 2;
+ return tmp;
+ }
+
+ void enterResultType() {
+ Bits |= InResultTypeMask;
+ }
+
+ void leaveResultType() {
+ Bits &= ~InResultTypeMask;
+ }
+
+ void pop(FunctionTypeDepthState saved) {
+ assert(getDepth() == saved.getDepth() + 1);
+ Bits = saved.Bits;
+ }
+
+ } FunctionTypeDepth;
+
+ llvm::DenseMap<uintptr_t, unsigned> Substitutions;
+
+ ASTContext &getASTContext() const { return Context.getASTContext(); }
+
+public:
+ CXXNameMangler(ItaniumMangleContext &C, raw_ostream &Out_,
+ const NamedDecl *D = 0)
+ : Context(C), Out(Out_), Structor(getStructor(D)), StructorType(0),
+ SeqID(0) {
+ // These can't be mangled without a ctor type or dtor type.
+ assert(!D || (!isa<CXXDestructorDecl>(D) &&
+ !isa<CXXConstructorDecl>(D)));
+ }
+ CXXNameMangler(ItaniumMangleContext &C, raw_ostream &Out_,
+ const CXXConstructorDecl *D, CXXCtorType Type)
+ : Context(C), Out(Out_), Structor(getStructor(D)), StructorType(Type),
+ SeqID(0) { }
+ CXXNameMangler(ItaniumMangleContext &C, raw_ostream &Out_,
+ const CXXDestructorDecl *D, CXXDtorType Type)
+ : Context(C), Out(Out_), Structor(getStructor(D)), StructorType(Type),
+ SeqID(0) { }
+
+#if MANGLE_CHECKER
+ ~CXXNameMangler() {
+ if (Out.str()[0] == '\01')
+ return;
+
+ int status = 0;
+ char *result = abi::__cxa_demangle(Out.str().str().c_str(), 0, 0, &status);
+ assert(status == 0 && "Could not demangle mangled name!");
+ free(result);
+ }
+#endif
+ raw_ostream &getStream() { return Out; }
+
+ void mangle(const NamedDecl *D, StringRef Prefix = "_Z");
+ void mangleCallOffset(int64_t NonVirtual, int64_t Virtual);
+ void mangleNumber(const llvm::APSInt &I);
+ void mangleNumber(int64_t Number);
+ void mangleFloat(const llvm::APFloat &F);
+ void mangleFunctionEncoding(const FunctionDecl *FD);
+ void mangleName(const NamedDecl *ND);
+ void mangleType(QualType T);
+ void mangleNameOrStandardSubstitution(const NamedDecl *ND);
+
+private:
+ bool mangleSubstitution(const NamedDecl *ND);
+ bool mangleSubstitution(QualType T);
+ bool mangleSubstitution(TemplateName Template);
+ bool mangleSubstitution(uintptr_t Ptr);
+
+ void mangleExistingSubstitution(QualType type);
+ void mangleExistingSubstitution(TemplateName name);
+
+ bool mangleStandardSubstitution(const NamedDecl *ND);
+
+ void addSubstitution(const NamedDecl *ND) {
+ ND = cast<NamedDecl>(ND->getCanonicalDecl());
+
+ addSubstitution(reinterpret_cast<uintptr_t>(ND));
+ }
+ void addSubstitution(QualType T);
+ void addSubstitution(TemplateName Template);
+ void addSubstitution(uintptr_t Ptr);
+
+ void mangleUnresolvedPrefix(NestedNameSpecifier *qualifier,
+ NamedDecl *firstQualifierLookup,
+ bool recursive = false);
+ void mangleUnresolvedName(NestedNameSpecifier *qualifier,
+ NamedDecl *firstQualifierLookup,
+ DeclarationName name,
+ unsigned KnownArity = UnknownArity);
+
+ void mangleName(const TemplateDecl *TD,
+ const TemplateArgument *TemplateArgs,
+ unsigned NumTemplateArgs);
+ void mangleUnqualifiedName(const NamedDecl *ND) {
+ mangleUnqualifiedName(ND, ND->getDeclName(), UnknownArity);
+ }
+ void mangleUnqualifiedName(const NamedDecl *ND, DeclarationName Name,
+ unsigned KnownArity);
+ void mangleUnscopedName(const NamedDecl *ND);
+ void mangleUnscopedTemplateName(const TemplateDecl *ND);
+ void mangleUnscopedTemplateName(TemplateName);
+ void mangleSourceName(const IdentifierInfo *II);
+ void mangleLocalName(const NamedDecl *ND);
+ void mangleLambda(const CXXRecordDecl *Lambda);
+ void mangleNestedName(const NamedDecl *ND, const DeclContext *DC,
+ bool NoFunction=false);
+ void mangleNestedName(const TemplateDecl *TD,
+ const TemplateArgument *TemplateArgs,
+ unsigned NumTemplateArgs);
+ void manglePrefix(NestedNameSpecifier *qualifier);
+ void manglePrefix(const DeclContext *DC, bool NoFunction=false);
+ void manglePrefix(QualType type);
+ void mangleTemplatePrefix(const TemplateDecl *ND);
+ void mangleTemplatePrefix(TemplateName Template);
+ void mangleOperatorName(OverloadedOperatorKind OO, unsigned Arity);
+ void mangleQualifiers(Qualifiers Quals);
+ void mangleRefQualifier(RefQualifierKind RefQualifier);
+
+ void mangleObjCMethodName(const ObjCMethodDecl *MD);
+
+ // Declare manglers for every type class.
+#define ABSTRACT_TYPE(CLASS, PARENT)
+#define NON_CANONICAL_TYPE(CLASS, PARENT)
+#define TYPE(CLASS, PARENT) void mangleType(const CLASS##Type *T);
+#include "clang/AST/TypeNodes.def"
+
+ void mangleType(const TagType*);
+ void mangleType(TemplateName);
+ void mangleBareFunctionType(const FunctionType *T,
+ bool MangleReturnType);
+ void mangleNeonVectorType(const VectorType *T);
+
+ void mangleIntegerLiteral(QualType T, const llvm::APSInt &Value);
+ void mangleMemberExpr(const Expr *base, bool isArrow,
+ NestedNameSpecifier *qualifier,
+ NamedDecl *firstQualifierLookup,
+ DeclarationName name,
+ unsigned knownArity);
+ void mangleExpression(const Expr *E, unsigned Arity = UnknownArity);
+ void mangleCXXCtorType(CXXCtorType T);
+ void mangleCXXDtorType(CXXDtorType T);
+
+ void mangleTemplateArgs(const ASTTemplateArgumentListInfo &TemplateArgs);
+ void mangleTemplateArgs(TemplateName Template,
+ const TemplateArgument *TemplateArgs,
+ unsigned NumTemplateArgs);
+ void mangleTemplateArgs(const TemplateParameterList &PL,
+ const TemplateArgument *TemplateArgs,
+ unsigned NumTemplateArgs);
+ void mangleTemplateArgs(const TemplateParameterList &PL,
+ const TemplateArgumentList &AL);
+ void mangleTemplateArg(const NamedDecl *P, TemplateArgument A);
+ void mangleUnresolvedTemplateArgs(const TemplateArgument *args,
+ unsigned numArgs);
+
+ void mangleTemplateParameter(unsigned Index);
+
+ void mangleFunctionParam(const ParmVarDecl *parm);
+};
+
+}
+
+static bool isInCLinkageSpecification(const Decl *D) {
+ D = D->getCanonicalDecl();
+ for (const DeclContext *DC = getEffectiveDeclContext(D);
+ !DC->isTranslationUnit(); DC = getEffectiveParentContext(DC)) {
+ if (const LinkageSpecDecl *Linkage = dyn_cast<LinkageSpecDecl>(DC))
+ return Linkage->getLanguage() == LinkageSpecDecl::lang_c;
+ }
+
+ return false;
+}
+
+bool ItaniumMangleContext::shouldMangleDeclName(const NamedDecl *D) {
+ // In C, functions with no attributes never need to be mangled. Fastpath them.
+ if (!getASTContext().getLangOpts().CPlusPlus && !D->hasAttrs())
+ return false;
+
+ // Any decl can be declared with __asm("foo") on it, and this takes precedence
+ // over all other naming in the .o file.
+ if (D->hasAttr<AsmLabelAttr>())
+ return true;
+
+ // Clang's "overloadable" attribute extension to C/C++ implies name mangling
+ // (always) as does passing a C++ member function and a function
+ // whose name is not a simple identifier.
+ const FunctionDecl *FD = dyn_cast<FunctionDecl>(D);
+ if (FD && (FD->hasAttr<OverloadableAttr>() || isa<CXXMethodDecl>(FD) ||
+ !FD->getDeclName().isIdentifier()))
+ return true;
+
+ // Otherwise, no mangling is done outside C++ mode.
+ if (!getASTContext().getLangOpts().CPlusPlus)
+ return false;
+
+ // Variables at global scope with non-internal linkage are not mangled
+ if (!FD) {
+ const DeclContext *DC = getEffectiveDeclContext(D);
+ // Check for extern variable declared locally.
+ if (DC->isFunctionOrMethod() && D->hasLinkage())
+ while (!DC->isNamespace() && !DC->isTranslationUnit())
+ DC = getEffectiveParentContext(DC);
+ if (DC->isTranslationUnit() && D->getLinkage() != InternalLinkage)
+ return false;
+ }
+
+ // Class members are always mangled.
+ if (getEffectiveDeclContext(D)->isRecord())
+ return true;
+
+ // C functions and "main" are not mangled.
+ if ((FD && FD->isMain()) || isInCLinkageSpecification(D))
+ return false;
+
+ return true;
+}
+
+void CXXNameMangler::mangle(const NamedDecl *D, StringRef Prefix) {
+ // Any decl can be declared with __asm("foo") on it, and this takes precedence
+ // over all other naming in the .o file.
+ if (const AsmLabelAttr *ALA = D->getAttr<AsmLabelAttr>()) {
+ // If we have an asm name, then we use it as the mangling.
+
+ // Adding the prefix can cause problems when one file has a "foo" and
+ // another has a "\01foo". That is known to happen on ELF with the
+ // tricks normally used for producing aliases (PR9177). Fortunately the
+ // llvm mangler on ELF is a nop, so we can just avoid adding the \01
+ // marker. We also avoid adding the marker if this is an alias for an
+ // LLVM intrinsic.
+ StringRef UserLabelPrefix =
+ getASTContext().getTargetInfo().getUserLabelPrefix();
+ if (!UserLabelPrefix.empty() && !ALA->getLabel().startswith("llvm."))
+ Out << '\01'; // LLVM IR Marker for __asm("foo")
+
+ Out << ALA->getLabel();
+ return;
+ }
+
+ // <mangled-name> ::= _Z <encoding>
+ // ::= <data name>
+ // ::= <special-name>
+ Out << Prefix;
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D))
+ mangleFunctionEncoding(FD);
+ else if (const VarDecl *VD = dyn_cast<VarDecl>(D))
+ mangleName(VD);
+ else
+ mangleName(cast<FieldDecl>(D));
+}
+
+void CXXNameMangler::mangleFunctionEncoding(const FunctionDecl *FD) {
+ // <encoding> ::= <function name> <bare-function-type>
+ mangleName(FD);
+
+ // Don't mangle in the type if this isn't a decl we should typically mangle.
+ if (!Context.shouldMangleDeclName(FD))
+ return;
+
+ // Whether the mangling of a function type includes the return type depends on
+ // the context and the nature of the function. The rules for deciding whether
+ // the return type is included are:
+ //
+ // 1. Template functions (names or types) have return types encoded, with
+ // the exceptions listed below.
+ // 2. Function types not appearing as part of a function name mangling,
+ // e.g. parameters, pointer types, etc., have return type encoded, with the
+ // exceptions listed below.
+ // 3. Non-template function names do not have return types encoded.
+ //
+ // The exceptions mentioned in (1) and (2) above, for which the return type is
+ // never included, are
+ // 1. Constructors.
+ // 2. Destructors.
+ // 3. Conversion operator functions, e.g. operator int.
+ bool MangleReturnType = false;
+ if (FunctionTemplateDecl *PrimaryTemplate = FD->getPrimaryTemplate()) {
+ if (!(isa<CXXConstructorDecl>(FD) || isa<CXXDestructorDecl>(FD) ||
+ isa<CXXConversionDecl>(FD)))
+ MangleReturnType = true;
+
+ // Mangle the type of the primary template.
+ FD = PrimaryTemplate->getTemplatedDecl();
+ }
+
+ mangleBareFunctionType(FD->getType()->getAs<FunctionType>(),
+ MangleReturnType);
+}
+
+static const DeclContext *IgnoreLinkageSpecDecls(const DeclContext *DC) {
+ while (isa<LinkageSpecDecl>(DC)) {
+ DC = getEffectiveParentContext(DC);
+ }
+
+ return DC;
+}
+
+/// isStd - Return whether a given namespace is the 'std' namespace.
+static bool isStd(const NamespaceDecl *NS) {
+ if (!IgnoreLinkageSpecDecls(getEffectiveParentContext(NS))
+ ->isTranslationUnit())
+ return false;
+
+ const IdentifierInfo *II = NS->getOriginalNamespace()->getIdentifier();
+ return II && II->isStr("std");
+}
+
+// isStdNamespace - Return whether a given decl context is a toplevel 'std'
+// namespace.
+static bool isStdNamespace(const DeclContext *DC) {
+ if (!DC->isNamespace())
+ return false;
+
+ return isStd(cast<NamespaceDecl>(DC));
+}
+
+static const TemplateDecl *
+isTemplate(const NamedDecl *ND, const TemplateArgumentList *&TemplateArgs) {
+ // Check if we have a function template.
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(ND)){
+ if (const TemplateDecl *TD = FD->getPrimaryTemplate()) {
+ TemplateArgs = FD->getTemplateSpecializationArgs();
+ return TD;
+ }
+ }
+
+ // Check if we have a class template.
+ if (const ClassTemplateSpecializationDecl *Spec =
+ dyn_cast<ClassTemplateSpecializationDecl>(ND)) {
+ TemplateArgs = &Spec->getTemplateArgs();
+ return Spec->getSpecializedTemplate();
+ }
+
+ return 0;
+}
+
+static bool isLambda(const NamedDecl *ND) {
+ const CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(ND);
+ if (!Record)
+ return false;
+
+ return Record->isLambda();
+}
+
+void CXXNameMangler::mangleName(const NamedDecl *ND) {
+ // <name> ::= <nested-name>
+ // ::= <unscoped-name>
+ // ::= <unscoped-template-name> <template-args>
+ // ::= <local-name>
+ //
+ const DeclContext *DC = getEffectiveDeclContext(ND);
+
+ // If this is an extern variable declared locally, the relevant DeclContext
+ // is that of the containing namespace, or the translation unit.
+ // FIXME: This is a hack; extern variables declared locally should have
+ // a proper semantic declaration context!
+ if (isa<FunctionDecl>(DC) && ND->hasLinkage() && !isLambda(ND))
+ while (!DC->isNamespace() && !DC->isTranslationUnit())
+ DC = getEffectiveParentContext(DC);
+ else if (GetLocalClassDecl(ND)) {
+ mangleLocalName(ND);
+ return;
+ }
+
+ DC = IgnoreLinkageSpecDecls(DC);
+
+ if (DC->isTranslationUnit() || isStdNamespace(DC)) {
+ // Check if we have a template.
+ const TemplateArgumentList *TemplateArgs = 0;
+ if (const TemplateDecl *TD = isTemplate(ND, TemplateArgs)) {
+ mangleUnscopedTemplateName(TD);
+ TemplateParameterList *TemplateParameters = TD->getTemplateParameters();
+ mangleTemplateArgs(*TemplateParameters, *TemplateArgs);
+ return;
+ }
+
+ mangleUnscopedName(ND);
+ return;
+ }
+
+ if (isa<FunctionDecl>(DC) || isa<ObjCMethodDecl>(DC)) {
+ mangleLocalName(ND);
+ return;
+ }
+
+ mangleNestedName(ND, DC);
+}
+void CXXNameMangler::mangleName(const TemplateDecl *TD,
+ const TemplateArgument *TemplateArgs,
+ unsigned NumTemplateArgs) {
+ const DeclContext *DC = IgnoreLinkageSpecDecls(getEffectiveDeclContext(TD));
+
+ if (DC->isTranslationUnit() || isStdNamespace(DC)) {
+ mangleUnscopedTemplateName(TD);
+ TemplateParameterList *TemplateParameters = TD->getTemplateParameters();
+ mangleTemplateArgs(*TemplateParameters, TemplateArgs, NumTemplateArgs);
+ } else {
+ mangleNestedName(TD, TemplateArgs, NumTemplateArgs);
+ }
+}
+
+void CXXNameMangler::mangleUnscopedName(const NamedDecl *ND) {
+ // <unscoped-name> ::= <unqualified-name>
+ // ::= St <unqualified-name> # ::std::
+
+ if (isStdNamespace(IgnoreLinkageSpecDecls(getEffectiveDeclContext(ND))))
+ Out << "St";
+
+ mangleUnqualifiedName(ND);
+}
+
+void CXXNameMangler::mangleUnscopedTemplateName(const TemplateDecl *ND) {
+ // <unscoped-template-name> ::= <unscoped-name>
+ // ::= <substitution>
+ if (mangleSubstitution(ND))
+ return;
+
+ // <template-template-param> ::= <template-param>
+ if (const TemplateTemplateParmDecl *TTP
+ = dyn_cast<TemplateTemplateParmDecl>(ND)) {
+ mangleTemplateParameter(TTP->getIndex());
+ return;
+ }
+
+ mangleUnscopedName(ND->getTemplatedDecl());
+ addSubstitution(ND);
+}
+
+void CXXNameMangler::mangleUnscopedTemplateName(TemplateName Template) {
+ // <unscoped-template-name> ::= <unscoped-name>
+ // ::= <substitution>
+ if (TemplateDecl *TD = Template.getAsTemplateDecl())
+ return mangleUnscopedTemplateName(TD);
+
+ if (mangleSubstitution(Template))
+ return;
+
+ DependentTemplateName *Dependent = Template.getAsDependentTemplateName();
+ assert(Dependent && "Not a dependent template name?");
+ if (const IdentifierInfo *Id = Dependent->getIdentifier())
+ mangleSourceName(Id);
+ else
+ mangleOperatorName(Dependent->getOperator(), UnknownArity);
+
+ addSubstitution(Template);
+}
+
+void CXXNameMangler::mangleFloat(const llvm::APFloat &f) {
+ // ABI:
+ // Floating-point literals are encoded using a fixed-length
+ // lowercase hexadecimal string corresponding to the internal
+ // representation (IEEE on Itanium), high-order bytes first,
+ // without leading zeroes. For example: "Lf bf800000 E" is -1.0f
+ // on Itanium.
+ // The 'without leading zeroes' thing seems to be an editorial
+ // mistake; see the discussion on cxx-abi-dev beginning on
+ // 2012-01-16.
+
+ // Our requirements here are just barely wierd enough to justify
+ // using a custom algorithm instead of post-processing APInt::toString().
+
+ llvm::APInt valueBits = f.bitcastToAPInt();
+ unsigned numCharacters = (valueBits.getBitWidth() + 3) / 4;
+ assert(numCharacters != 0);
+
+ // Allocate a buffer of the right number of characters.
+ llvm::SmallVector<char, 20> buffer;
+ buffer.set_size(numCharacters);
+
+ // Fill the buffer left-to-right.
+ for (unsigned stringIndex = 0; stringIndex != numCharacters; ++stringIndex) {
+ // The bit-index of the next hex digit.
+ unsigned digitBitIndex = 4 * (numCharacters - stringIndex - 1);
+
+ // Project out 4 bits starting at 'digitIndex'.
+ llvm::integerPart hexDigit
+ = valueBits.getRawData()[digitBitIndex / llvm::integerPartWidth];
+ hexDigit >>= (digitBitIndex % llvm::integerPartWidth);
+ hexDigit &= 0xF;
+
+ // Map that over to a lowercase hex digit.
+ static const char charForHex[16] = {
+ '0', '1', '2', '3', '4', '5', '6', '7',
+ '8', '9', 'a', 'b', 'c', 'd', 'e', 'f'
+ };
+ buffer[stringIndex] = charForHex[hexDigit];
+ }
+
+ Out.write(buffer.data(), numCharacters);
+}
+
+void CXXNameMangler::mangleNumber(const llvm::APSInt &Value) {
+ if (Value.isSigned() && Value.isNegative()) {
+ Out << 'n';
+ Value.abs().print(Out, true);
+ } else
+ Value.print(Out, Value.isSigned());
+}
+
+void CXXNameMangler::mangleNumber(int64_t Number) {
+ // <number> ::= [n] <non-negative decimal integer>
+ if (Number < 0) {
+ Out << 'n';
+ Number = -Number;
+ }
+
+ Out << Number;
+}
+
+void CXXNameMangler::mangleCallOffset(int64_t NonVirtual, int64_t Virtual) {
+ // <call-offset> ::= h <nv-offset> _
+ // ::= v <v-offset> _
+ // <nv-offset> ::= <offset number> # non-virtual base override
+ // <v-offset> ::= <offset number> _ <virtual offset number>
+ // # virtual base override, with vcall offset
+ if (!Virtual) {
+ Out << 'h';
+ mangleNumber(NonVirtual);
+ Out << '_';
+ return;
+ }
+
+ Out << 'v';
+ mangleNumber(NonVirtual);
+ Out << '_';
+ mangleNumber(Virtual);
+ Out << '_';
+}
+
+void CXXNameMangler::manglePrefix(QualType type) {
+ if (const TemplateSpecializationType *TST =
+ type->getAs<TemplateSpecializationType>()) {
+ if (!mangleSubstitution(QualType(TST, 0))) {
+ mangleTemplatePrefix(TST->getTemplateName());
+
+ // FIXME: GCC does not appear to mangle the template arguments when
+ // the template in question is a dependent template name. Should we
+ // emulate that badness?
+ mangleTemplateArgs(TST->getTemplateName(), TST->getArgs(),
+ TST->getNumArgs());
+ addSubstitution(QualType(TST, 0));
+ }
+ } else if (const DependentTemplateSpecializationType *DTST
+ = type->getAs<DependentTemplateSpecializationType>()) {
+ TemplateName Template
+ = getASTContext().getDependentTemplateName(DTST->getQualifier(),
+ DTST->getIdentifier());
+ mangleTemplatePrefix(Template);
+
+ // FIXME: GCC does not appear to mangle the template arguments when
+ // the template in question is a dependent template name. Should we
+ // emulate that badness?
+ mangleTemplateArgs(Template, DTST->getArgs(), DTST->getNumArgs());
+ } else {
+ // We use the QualType mangle type variant here because it handles
+ // substitutions.
+ mangleType(type);
+ }
+}
+
+/// Mangle everything prior to the base-unresolved-name in an unresolved-name.
+///
+/// \param firstQualifierLookup - the entity found by unqualified lookup
+/// for the first name in the qualifier, if this is for a member expression
+/// \param recursive - true if this is being called recursively,
+/// i.e. if there is more prefix "to the right".
+void CXXNameMangler::mangleUnresolvedPrefix(NestedNameSpecifier *qualifier,
+ NamedDecl *firstQualifierLookup,
+ bool recursive) {
+
+ // x, ::x
+ // <unresolved-name> ::= [gs] <base-unresolved-name>
+
+ // T::x / decltype(p)::x
+ // <unresolved-name> ::= sr <unresolved-type> <base-unresolved-name>
+
+ // T::N::x /decltype(p)::N::x
+ // <unresolved-name> ::= srN <unresolved-type> <unresolved-qualifier-level>+ E
+ // <base-unresolved-name>
+
+ // A::x, N::y, A<T>::z; "gs" means leading "::"
+ // <unresolved-name> ::= [gs] sr <unresolved-qualifier-level>+ E
+ // <base-unresolved-name>
+
+ switch (qualifier->getKind()) {
+ case NestedNameSpecifier::Global:
+ Out << "gs";
+
+ // We want an 'sr' unless this is the entire NNS.
+ if (recursive)
+ Out << "sr";
+
+ // We never want an 'E' here.
+ return;
+
+ case NestedNameSpecifier::Namespace:
+ if (qualifier->getPrefix())
+ mangleUnresolvedPrefix(qualifier->getPrefix(), firstQualifierLookup,
+ /*recursive*/ true);
+ else
+ Out << "sr";
+ mangleSourceName(qualifier->getAsNamespace()->getIdentifier());
+ break;
+ case NestedNameSpecifier::NamespaceAlias:
+ if (qualifier->getPrefix())
+ mangleUnresolvedPrefix(qualifier->getPrefix(), firstQualifierLookup,
+ /*recursive*/ true);
+ else
+ Out << "sr";
+ mangleSourceName(qualifier->getAsNamespaceAlias()->getIdentifier());
+ break;
+
+ case NestedNameSpecifier::TypeSpec:
+ case NestedNameSpecifier::TypeSpecWithTemplate: {
+ const Type *type = qualifier->getAsType();
+
+ // We only want to use an unresolved-type encoding if this is one of:
+ // - a decltype
+ // - a template type parameter
+ // - a template template parameter with arguments
+ // In all of these cases, we should have no prefix.
+ if (qualifier->getPrefix()) {
+ mangleUnresolvedPrefix(qualifier->getPrefix(), firstQualifierLookup,
+ /*recursive*/ true);
+ } else {
+ // Otherwise, all the cases want this.
+ Out << "sr";
+ }
+
+ // Only certain other types are valid as prefixes; enumerate them.
+ switch (type->getTypeClass()) {
+ case Type::Builtin:
+ case Type::Complex:
+ case Type::Pointer:
+ case Type::BlockPointer:
+ case Type::LValueReference:
+ case Type::RValueReference:
+ case Type::MemberPointer:
+ case Type::ConstantArray:
+ case Type::IncompleteArray:
+ case Type::VariableArray:
+ case Type::DependentSizedArray:
+ case Type::DependentSizedExtVector:
+ case Type::Vector:
+ case Type::ExtVector:
+ case Type::FunctionProto:
+ case Type::FunctionNoProto:
+ case Type::Enum:
+ case Type::Paren:
+ case Type::Elaborated:
+ case Type::Attributed:
+ case Type::Auto:
+ case Type::PackExpansion:
+ case Type::ObjCObject:
+ case Type::ObjCInterface:
+ case Type::ObjCObjectPointer:
+ case Type::Atomic:
+ llvm_unreachable("type is illegal as a nested name specifier");
+
+ case Type::SubstTemplateTypeParmPack:
+ // FIXME: not clear how to mangle this!
+ // template <class T...> class A {
+ // template <class U...> void foo(decltype(T::foo(U())) x...);
+ // };
+ Out << "_SUBSTPACK_";
+ break;
+
+ // <unresolved-type> ::= <template-param>
+ // ::= <decltype>
+ // ::= <template-template-param> <template-args>
+ // (this last is not official yet)
+ case Type::TypeOfExpr:
+ case Type::TypeOf:
+ case Type::Decltype:
+ case Type::TemplateTypeParm:
+ case Type::UnaryTransform:
+ case Type::SubstTemplateTypeParm:
+ unresolvedType:
+ assert(!qualifier->getPrefix());
+
+ // We only get here recursively if we're followed by identifiers.
+ if (recursive) Out << 'N';
+
+ // This seems to do everything we want. It's not really
+ // sanctioned for a substituted template parameter, though.
+ mangleType(QualType(type, 0));
+
+ // We never want to print 'E' directly after an unresolved-type,
+ // so we return directly.
+ return;
+
+ case Type::Typedef:
+ mangleSourceName(cast<TypedefType>(type)->getDecl()->getIdentifier());
+ break;
+
+ case Type::UnresolvedUsing:
+ mangleSourceName(cast<UnresolvedUsingType>(type)->getDecl()
+ ->getIdentifier());
+ break;
+
+ case Type::Record:
+ mangleSourceName(cast<RecordType>(type)->getDecl()->getIdentifier());
+ break;
+
+ case Type::TemplateSpecialization: {
+ const TemplateSpecializationType *tst
+ = cast<TemplateSpecializationType>(type);
+ TemplateName name = tst->getTemplateName();
+ switch (name.getKind()) {
+ case TemplateName::Template:
+ case TemplateName::QualifiedTemplate: {
+ TemplateDecl *temp = name.getAsTemplateDecl();
+
+ // If the base is a template template parameter, this is an
+ // unresolved type.
+ assert(temp && "no template for template specialization type");
+ if (isa<TemplateTemplateParmDecl>(temp)) goto unresolvedType;
+
+ mangleSourceName(temp->getIdentifier());
+ break;
+ }
+
+ case TemplateName::OverloadedTemplate:
+ case TemplateName::DependentTemplate:
+ llvm_unreachable("invalid base for a template specialization type");
+
+ case TemplateName::SubstTemplateTemplateParm: {
+ SubstTemplateTemplateParmStorage *subst
+ = name.getAsSubstTemplateTemplateParm();
+ mangleExistingSubstitution(subst->getReplacement());
+ break;
+ }
+
+ case TemplateName::SubstTemplateTemplateParmPack: {
+ // FIXME: not clear how to mangle this!
+ // template <template <class U> class T...> class A {
+ // template <class U...> void foo(decltype(T<U>::foo) x...);
+ // };
+ Out << "_SUBSTPACK_";
+ break;
+ }
+ }
+
+ mangleUnresolvedTemplateArgs(tst->getArgs(), tst->getNumArgs());
+ break;
+ }
+
+ case Type::InjectedClassName:
+ mangleSourceName(cast<InjectedClassNameType>(type)->getDecl()
+ ->getIdentifier());
+ break;
+
+ case Type::DependentName:
+ mangleSourceName(cast<DependentNameType>(type)->getIdentifier());
+ break;
+
+ case Type::DependentTemplateSpecialization: {
+ const DependentTemplateSpecializationType *tst
+ = cast<DependentTemplateSpecializationType>(type);
+ mangleSourceName(tst->getIdentifier());
+ mangleUnresolvedTemplateArgs(tst->getArgs(), tst->getNumArgs());
+ break;
+ }
+ }
+ break;
+ }
+
+ case NestedNameSpecifier::Identifier:
+ // Member expressions can have these without prefixes.
+ if (qualifier->getPrefix()) {
+ mangleUnresolvedPrefix(qualifier->getPrefix(), firstQualifierLookup,
+ /*recursive*/ true);
+ } else if (firstQualifierLookup) {
+
+ // Try to make a proper qualifier out of the lookup result, and
+ // then just recurse on that.
+ NestedNameSpecifier *newQualifier;
+ if (TypeDecl *typeDecl = dyn_cast<TypeDecl>(firstQualifierLookup)) {
+ QualType type = getASTContext().getTypeDeclType(typeDecl);
+
+ // Pretend we had a different nested name specifier.
+ newQualifier = NestedNameSpecifier::Create(getASTContext(),
+ /*prefix*/ 0,
+ /*template*/ false,
+ type.getTypePtr());
+ } else if (NamespaceDecl *nspace =
+ dyn_cast<NamespaceDecl>(firstQualifierLookup)) {
+ newQualifier = NestedNameSpecifier::Create(getASTContext(),
+ /*prefix*/ 0,
+ nspace);
+ } else if (NamespaceAliasDecl *alias =
+ dyn_cast<NamespaceAliasDecl>(firstQualifierLookup)) {
+ newQualifier = NestedNameSpecifier::Create(getASTContext(),
+ /*prefix*/ 0,
+ alias);
+ } else {
+ // No sensible mangling to do here.
+ newQualifier = 0;
+ }
+
+ if (newQualifier)
+ return mangleUnresolvedPrefix(newQualifier, /*lookup*/ 0, recursive);
+
+ } else {
+ Out << "sr";
+ }
+
+ mangleSourceName(qualifier->getAsIdentifier());
+ break;
+ }
+
+ // If this was the innermost part of the NNS, and we fell out to
+ // here, append an 'E'.
+ if (!recursive)
+ Out << 'E';
+}
+
+/// Mangle an unresolved-name, which is generally used for names which
+/// weren't resolved to specific entities.
+void CXXNameMangler::mangleUnresolvedName(NestedNameSpecifier *qualifier,
+ NamedDecl *firstQualifierLookup,
+ DeclarationName name,
+ unsigned knownArity) {
+ if (qualifier) mangleUnresolvedPrefix(qualifier, firstQualifierLookup);
+ mangleUnqualifiedName(0, name, knownArity);
+}
+
+static const FieldDecl *FindFirstNamedDataMember(const RecordDecl *RD) {
+ assert(RD->isAnonymousStructOrUnion() &&
+ "Expected anonymous struct or union!");
+
+ for (RecordDecl::field_iterator I = RD->field_begin(), E = RD->field_end();
+ I != E; ++I) {
+ const FieldDecl *FD = *I;
+
+ if (FD->getIdentifier())
+ return FD;
+
+ if (const RecordType *RT = FD->getType()->getAs<RecordType>()) {
+ if (const FieldDecl *NamedDataMember =
+ FindFirstNamedDataMember(RT->getDecl()))
+ return NamedDataMember;
+ }
+ }
+
+ // We didn't find a named data member.
+ return 0;
+}
+
+void CXXNameMangler::mangleUnqualifiedName(const NamedDecl *ND,
+ DeclarationName Name,
+ unsigned KnownArity) {
+ // <unqualified-name> ::= <operator-name>
+ // ::= <ctor-dtor-name>
+ // ::= <source-name>
+ switch (Name.getNameKind()) {
+ case DeclarationName::Identifier: {
+ if (const IdentifierInfo *II = Name.getAsIdentifierInfo()) {
+ // We must avoid conflicts between internally- and externally-
+ // linked variable and function declaration names in the same TU:
+ // void test() { extern void foo(); }
+ // static void foo();
+ // This naming convention is the same as that followed by GCC,
+ // though it shouldn't actually matter.
+ if (ND && ND->getLinkage() == InternalLinkage &&
+ getEffectiveDeclContext(ND)->isFileContext())
+ Out << 'L';
+
+ mangleSourceName(II);
+ break;
+ }
+
+ // Otherwise, an anonymous entity. We must have a declaration.
+ assert(ND && "mangling empty name without declaration");
+
+ if (const NamespaceDecl *NS = dyn_cast<NamespaceDecl>(ND)) {
+ if (NS->isAnonymousNamespace()) {
+ // This is how gcc mangles these names.
+ Out << "12_GLOBAL__N_1";
+ break;
+ }
+ }
+
+ if (const VarDecl *VD = dyn_cast<VarDecl>(ND)) {
+ // We must have an anonymous union or struct declaration.
+ const RecordDecl *RD =
+ cast<RecordDecl>(VD->getType()->getAs<RecordType>()->getDecl());
+
+ // Itanium C++ ABI 5.1.2:
+ //
+ // For the purposes of mangling, the name of an anonymous union is
+ // considered to be the name of the first named data member found by a
+ // pre-order, depth-first, declaration-order walk of the data members of
+ // the anonymous union. If there is no such data member (i.e., if all of
+ // the data members in the union are unnamed), then there is no way for
+ // a program to refer to the anonymous union, and there is therefore no
+ // need to mangle its name.
+ const FieldDecl *FD = FindFirstNamedDataMember(RD);
+
+ // It's actually possible for various reasons for us to get here
+ // with an empty anonymous struct / union. Fortunately, it
+ // doesn't really matter what name we generate.
+ if (!FD) break;
+ assert(FD->getIdentifier() && "Data member name isn't an identifier!");
+
+ mangleSourceName(FD->getIdentifier());
+ break;
+ }
+
+ // We must have an anonymous struct.
+ const TagDecl *TD = cast<TagDecl>(ND);
+ if (const TypedefNameDecl *D = TD->getTypedefNameForAnonDecl()) {
+ assert(TD->getDeclContext() == D->getDeclContext() &&
+ "Typedef should not be in another decl context!");
+ assert(D->getDeclName().getAsIdentifierInfo() &&
+ "Typedef was not named!");
+ mangleSourceName(D->getDeclName().getAsIdentifierInfo());
+ break;
+ }
+
+ // <unnamed-type-name> ::= <closure-type-name>
+ //
+ // <closure-type-name> ::= Ul <lambda-sig> E [ <nonnegative number> ] _
+ // <lambda-sig> ::= <parameter-type>+ # Parameter types or 'v' for 'void'.
+ if (const CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(TD)) {
+ if (Record->isLambda() && Record->getLambdaManglingNumber()) {
+ mangleLambda(Record);
+ break;
+ }
+ }
+
+ // Get a unique id for the anonymous struct.
+ uint64_t AnonStructId = Context.getAnonymousStructId(TD);
+
+ // Mangle it as a source name in the form
+ // [n] $_<id>
+ // where n is the length of the string.
+ SmallString<8> Str;
+ Str += "$_";
+ Str += llvm::utostr(AnonStructId);
+
+ Out << Str.size();
+ Out << Str.str();
+ break;
+ }
+
+ case DeclarationName::ObjCZeroArgSelector:
+ case DeclarationName::ObjCOneArgSelector:
+ case DeclarationName::ObjCMultiArgSelector:
+ llvm_unreachable("Can't mangle Objective-C selector names here!");
+
+ case DeclarationName::CXXConstructorName:
+ if (ND == Structor)
+ // If the named decl is the C++ constructor we're mangling, use the type
+ // we were given.
+ mangleCXXCtorType(static_cast<CXXCtorType>(StructorType));
+ else
+ // Otherwise, use the complete constructor name. This is relevant if a
+ // class with a constructor is declared within a constructor.
+ mangleCXXCtorType(Ctor_Complete);
+ break;
+
+ case DeclarationName::CXXDestructorName:
+ if (ND == Structor)
+ // If the named decl is the C++ destructor we're mangling, use the type we
+ // were given.
+ mangleCXXDtorType(static_cast<CXXDtorType>(StructorType));
+ else
+ // Otherwise, use the complete destructor name. This is relevant if a
+ // class with a destructor is declared within a destructor.
+ mangleCXXDtorType(Dtor_Complete);
+ break;
+
+ case DeclarationName::CXXConversionFunctionName:
+ // <operator-name> ::= cv <type> # (cast)
+ Out << "cv";
+ mangleType(Name.getCXXNameType());
+ break;
+
+ case DeclarationName::CXXOperatorName: {
+ unsigned Arity;
+ if (ND) {
+ Arity = cast<FunctionDecl>(ND)->getNumParams();
+
+ // If we have a C++ member function, we need to include the 'this' pointer.
+ // FIXME: This does not make sense for operators that are static, but their
+ // names stay the same regardless of the arity (operator new for instance).
+ if (isa<CXXMethodDecl>(ND))
+ Arity++;
+ } else
+ Arity = KnownArity;
+
+ mangleOperatorName(Name.getCXXOverloadedOperator(), Arity);
+ break;
+ }
+
+ case DeclarationName::CXXLiteralOperatorName:
+ // FIXME: This mangling is not yet official.
+ Out << "li";
+ mangleSourceName(Name.getCXXLiteralIdentifier());
+ break;
+
+ case DeclarationName::CXXUsingDirective:
+ llvm_unreachable("Can't mangle a using directive name!");
+ }
+}
+
+void CXXNameMangler::mangleSourceName(const IdentifierInfo *II) {
+ // <source-name> ::= <positive length number> <identifier>
+ // <number> ::= [n] <non-negative decimal integer>
+ // <identifier> ::= <unqualified source code identifier>
+ Out << II->getLength() << II->getName();
+}
+
+void CXXNameMangler::mangleNestedName(const NamedDecl *ND,
+ const DeclContext *DC,
+ bool NoFunction) {
+ // <nested-name>
+ // ::= N [<CV-qualifiers>] [<ref-qualifier>] <prefix> <unqualified-name> E
+ // ::= N [<CV-qualifiers>] [<ref-qualifier>] <template-prefix>
+ // <template-args> E
+
+ Out << 'N';
+ if (const CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(ND)) {
+ mangleQualifiers(Qualifiers::fromCVRMask(Method->getTypeQualifiers()));
+ mangleRefQualifier(Method->getRefQualifier());
+ }
+
+ // Check if we have a template.
+ const TemplateArgumentList *TemplateArgs = 0;
+ if (const TemplateDecl *TD = isTemplate(ND, TemplateArgs)) {
+ mangleTemplatePrefix(TD);
+ TemplateParameterList *TemplateParameters = TD->getTemplateParameters();
+ mangleTemplateArgs(*TemplateParameters, *TemplateArgs);
+ }
+ else {
+ manglePrefix(DC, NoFunction);
+ mangleUnqualifiedName(ND);
+ }
+
+ Out << 'E';
+}
+void CXXNameMangler::mangleNestedName(const TemplateDecl *TD,
+ const TemplateArgument *TemplateArgs,
+ unsigned NumTemplateArgs) {
+ // <nested-name> ::= N [<CV-qualifiers>] <template-prefix> <template-args> E
+
+ Out << 'N';
+
+ mangleTemplatePrefix(TD);
+ TemplateParameterList *TemplateParameters = TD->getTemplateParameters();
+ mangleTemplateArgs(*TemplateParameters, TemplateArgs, NumTemplateArgs);
+
+ Out << 'E';
+}
+
+void CXXNameMangler::mangleLocalName(const NamedDecl *ND) {
+ // <local-name> := Z <function encoding> E <entity name> [<discriminator>]
+ // := Z <function encoding> E s [<discriminator>]
+ // <local-name> := Z <function encoding> E d [ <parameter number> ]
+ // _ <entity name>
+ // <discriminator> := _ <non-negative number>
+ const DeclContext *DC = getEffectiveDeclContext(ND);
+ if (isa<ObjCMethodDecl>(DC) && isa<FunctionDecl>(ND)) {
+ // Don't add objc method name mangling to locally declared function
+ mangleUnqualifiedName(ND);
+ return;
+ }
+
+ Out << 'Z';
+
+ if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(DC)) {
+ mangleObjCMethodName(MD);
+ } else if (const CXXRecordDecl *RD = GetLocalClassDecl(ND)) {
+ mangleFunctionEncoding(cast<FunctionDecl>(getEffectiveDeclContext(RD)));
+ Out << 'E';
+
+ // The parameter number is omitted for the last parameter, 0 for the
+ // second-to-last parameter, 1 for the third-to-last parameter, etc. The
+ // <entity name> will of course contain a <closure-type-name>: Its
+ // numbering will be local to the particular argument in which it appears
+ // -- other default arguments do not affect its encoding.
+ bool SkipDiscriminator = false;
+ if (RD->isLambda()) {
+ if (const ParmVarDecl *Parm
+ = dyn_cast_or_null<ParmVarDecl>(RD->getLambdaContextDecl())) {
+ if (const FunctionDecl *Func
+ = dyn_cast<FunctionDecl>(Parm->getDeclContext())) {
+ Out << 'd';
+ unsigned Num = Func->getNumParams() - Parm->getFunctionScopeIndex();
+ if (Num > 1)
+ mangleNumber(Num - 2);
+ Out << '_';
+ SkipDiscriminator = true;
+ }
+ }
+ }
+
+ // Mangle the name relative to the closest enclosing function.
+ if (ND == RD) // equality ok because RD derived from ND above
+ mangleUnqualifiedName(ND);
+ else
+ mangleNestedName(ND, DC, true /*NoFunction*/);
+
+ if (!SkipDiscriminator) {
+ unsigned disc;
+ if (Context.getNextDiscriminator(RD, disc)) {
+ if (disc < 10)
+ Out << '_' << disc;
+ else
+ Out << "__" << disc << '_';
+ }
+ }
+
+ return;
+ }
+ else
+ mangleFunctionEncoding(cast<FunctionDecl>(DC));
+
+ Out << 'E';
+ mangleUnqualifiedName(ND);
+}
+
+void CXXNameMangler::mangleLambda(const CXXRecordDecl *Lambda) {
+ // If the context of a closure type is an initializer for a class member
+ // (static or nonstatic), it is encoded in a qualified name with a final
+ // <prefix> of the form:
+ //
+ // <data-member-prefix> := <member source-name> M
+ //
+ // Technically, the data-member-prefix is part of the <prefix>. However,
+ // since a closure type will always be mangled with a prefix, it's easier
+ // to emit that last part of the prefix here.
+ if (Decl *Context = Lambda->getLambdaContextDecl()) {
+ if ((isa<VarDecl>(Context) || isa<FieldDecl>(Context)) &&
+ Context->getDeclContext()->isRecord()) {
+ if (const IdentifierInfo *Name
+ = cast<NamedDecl>(Context)->getIdentifier()) {
+ mangleSourceName(Name);
+ Out << 'M';
+ }
+ }
+ }
+
+ Out << "Ul";
+ DeclarationName Name
+ = getASTContext().DeclarationNames.getCXXOperatorName(OO_Call);
+ const FunctionProtoType *Proto
+ = cast<CXXMethodDecl>(*Lambda->lookup(Name).first)->getType()->
+ getAs<FunctionProtoType>();
+ mangleBareFunctionType(Proto, /*MangleReturnType=*/false);
+ Out << "E";
+
+ // The number is omitted for the first closure type with a given
+ // <lambda-sig> in a given context; it is n-2 for the nth closure type
+ // (in lexical order) with that same <lambda-sig> and context.
+ //
+ // The AST keeps track of the number for us.
+ unsigned Number = Lambda->getLambdaManglingNumber();
+ assert(Number > 0 && "Lambda should be mangled as an unnamed class");
+ if (Number > 1)
+ mangleNumber(Number - 2);
+ Out << '_';
+}
+
+void CXXNameMangler::manglePrefix(NestedNameSpecifier *qualifier) {
+ switch (qualifier->getKind()) {
+ case NestedNameSpecifier::Global:
+ // nothing
+ return;
+
+ case NestedNameSpecifier::Namespace:
+ mangleName(qualifier->getAsNamespace());
+ return;
+
+ case NestedNameSpecifier::NamespaceAlias:
+ mangleName(qualifier->getAsNamespaceAlias()->getNamespace());
+ return;
+
+ case NestedNameSpecifier::TypeSpec:
+ case NestedNameSpecifier::TypeSpecWithTemplate:
+ manglePrefix(QualType(qualifier->getAsType(), 0));
+ return;
+
+ case NestedNameSpecifier::Identifier:
+ // Member expressions can have these without prefixes, but that
+ // should end up in mangleUnresolvedPrefix instead.
+ assert(qualifier->getPrefix());
+ manglePrefix(qualifier->getPrefix());
+
+ mangleSourceName(qualifier->getAsIdentifier());
+ return;
+ }
+
+ llvm_unreachable("unexpected nested name specifier");
+}
+
+void CXXNameMangler::manglePrefix(const DeclContext *DC, bool NoFunction) {
+ // <prefix> ::= <prefix> <unqualified-name>
+ // ::= <template-prefix> <template-args>
+ // ::= <template-param>
+ // ::= # empty
+ // ::= <substitution>
+
+ DC = IgnoreLinkageSpecDecls(DC);
+
+ if (DC->isTranslationUnit())
+ return;
+
+ if (const BlockDecl *Block = dyn_cast<BlockDecl>(DC)) {
+ manglePrefix(getEffectiveParentContext(DC), NoFunction);
+ SmallString<64> Name;
+ llvm::raw_svector_ostream NameStream(Name);
+ Context.mangleBlock(Block, NameStream);
+ NameStream.flush();
+ Out << Name.size() << Name;
+ return;
+ }
+
+ const NamedDecl *ND = cast<NamedDecl>(DC);
+ if (mangleSubstitution(ND))
+ return;
+
+ // Check if we have a template.
+ const TemplateArgumentList *TemplateArgs = 0;
+ if (const TemplateDecl *TD = isTemplate(ND, TemplateArgs)) {
+ mangleTemplatePrefix(TD);
+ TemplateParameterList *TemplateParameters = TD->getTemplateParameters();
+ mangleTemplateArgs(*TemplateParameters, *TemplateArgs);
+ }
+ else if(NoFunction && (isa<FunctionDecl>(ND) || isa<ObjCMethodDecl>(ND)))
+ return;
+ else if (const ObjCMethodDecl *Method = dyn_cast<ObjCMethodDecl>(ND))
+ mangleObjCMethodName(Method);
+ else {
+ manglePrefix(getEffectiveDeclContext(ND), NoFunction);
+ mangleUnqualifiedName(ND);
+ }
+
+ addSubstitution(ND);
+}
+
+void CXXNameMangler::mangleTemplatePrefix(TemplateName Template) {
+ // <template-prefix> ::= <prefix> <template unqualified-name>
+ // ::= <template-param>
+ // ::= <substitution>
+ if (TemplateDecl *TD = Template.getAsTemplateDecl())
+ return mangleTemplatePrefix(TD);
+
+ if (QualifiedTemplateName *Qualified = Template.getAsQualifiedTemplateName())
+ manglePrefix(Qualified->getQualifier());
+
+ if (OverloadedTemplateStorage *Overloaded
+ = Template.getAsOverloadedTemplate()) {
+ mangleUnqualifiedName(0, (*Overloaded->begin())->getDeclName(),
+ UnknownArity);
+ return;
+ }
+
+ DependentTemplateName *Dependent = Template.getAsDependentTemplateName();
+ assert(Dependent && "Unknown template name kind?");
+ manglePrefix(Dependent->getQualifier());
+ mangleUnscopedTemplateName(Template);
+}
+
+void CXXNameMangler::mangleTemplatePrefix(const TemplateDecl *ND) {
+ // <template-prefix> ::= <prefix> <template unqualified-name>
+ // ::= <template-param>
+ // ::= <substitution>
+ // <template-template-param> ::= <template-param>
+ // <substitution>
+
+ if (mangleSubstitution(ND))
+ return;
+
+ // <template-template-param> ::= <template-param>
+ if (const TemplateTemplateParmDecl *TTP
+ = dyn_cast<TemplateTemplateParmDecl>(ND)) {
+ mangleTemplateParameter(TTP->getIndex());
+ return;
+ }
+
+ manglePrefix(getEffectiveDeclContext(ND));
+ mangleUnqualifiedName(ND->getTemplatedDecl());
+ addSubstitution(ND);
+}
+
+/// Mangles a template name under the production <type>. Required for
+/// template template arguments.
+/// <type> ::= <class-enum-type>
+/// ::= <template-param>
+/// ::= <substitution>
+void CXXNameMangler::mangleType(TemplateName TN) {
+ if (mangleSubstitution(TN))
+ return;
+
+ TemplateDecl *TD = 0;
+
+ switch (TN.getKind()) {
+ case TemplateName::QualifiedTemplate:
+ TD = TN.getAsQualifiedTemplateName()->getTemplateDecl();
+ goto HaveDecl;
+
+ case TemplateName::Template:
+ TD = TN.getAsTemplateDecl();
+ goto HaveDecl;
+
+ HaveDecl:
+ if (isa<TemplateTemplateParmDecl>(TD))
+ mangleTemplateParameter(cast<TemplateTemplateParmDecl>(TD)->getIndex());
+ else
+ mangleName(TD);
+ break;
+
+ case TemplateName::OverloadedTemplate:
+ llvm_unreachable("can't mangle an overloaded template name as a <type>");
+
+ case TemplateName::DependentTemplate: {
+ const DependentTemplateName *Dependent = TN.getAsDependentTemplateName();
+ assert(Dependent->isIdentifier());
+
+ // <class-enum-type> ::= <name>
+ // <name> ::= <nested-name>
+ mangleUnresolvedPrefix(Dependent->getQualifier(), 0);
+ mangleSourceName(Dependent->getIdentifier());
+ break;
+ }
+
+ case TemplateName::SubstTemplateTemplateParm: {
+ // Substituted template parameters are mangled as the substituted
+ // template. This will check for the substitution twice, which is
+ // fine, but we have to return early so that we don't try to *add*
+ // the substitution twice.
+ SubstTemplateTemplateParmStorage *subst
+ = TN.getAsSubstTemplateTemplateParm();
+ mangleType(subst->getReplacement());
+ return;
+ }
+
+ case TemplateName::SubstTemplateTemplateParmPack: {
+ // FIXME: not clear how to mangle this!
+ // template <template <class> class T...> class A {
+ // template <template <class> class U...> void foo(B<T,U> x...);
+ // };
+ Out << "_SUBSTPACK_";
+ break;
+ }
+ }
+
+ addSubstitution(TN);
+}
+
+void
+CXXNameMangler::mangleOperatorName(OverloadedOperatorKind OO, unsigned Arity) {
+ switch (OO) {
+ // <operator-name> ::= nw # new
+ case OO_New: Out << "nw"; break;
+ // ::= na # new[]
+ case OO_Array_New: Out << "na"; break;
+ // ::= dl # delete
+ case OO_Delete: Out << "dl"; break;
+ // ::= da # delete[]
+ case OO_Array_Delete: Out << "da"; break;
+ // ::= ps # + (unary)
+ // ::= pl # + (binary or unknown)
+ case OO_Plus:
+ Out << (Arity == 1? "ps" : "pl"); break;
+ // ::= ng # - (unary)
+ // ::= mi # - (binary or unknown)
+ case OO_Minus:
+ Out << (Arity == 1? "ng" : "mi"); break;
+ // ::= ad # & (unary)
+ // ::= an # & (binary or unknown)
+ case OO_Amp:
+ Out << (Arity == 1? "ad" : "an"); break;
+ // ::= de # * (unary)
+ // ::= ml # * (binary or unknown)
+ case OO_Star:
+ // Use binary when unknown.
+ Out << (Arity == 1? "de" : "ml"); break;
+ // ::= co # ~
+ case OO_Tilde: Out << "co"; break;
+ // ::= dv # /
+ case OO_Slash: Out << "dv"; break;
+ // ::= rm # %
+ case OO_Percent: Out << "rm"; break;
+ // ::= or # |
+ case OO_Pipe: Out << "or"; break;
+ // ::= eo # ^
+ case OO_Caret: Out << "eo"; break;
+ // ::= aS # =
+ case OO_Equal: Out << "aS"; break;
+ // ::= pL # +=
+ case OO_PlusEqual: Out << "pL"; break;
+ // ::= mI # -=
+ case OO_MinusEqual: Out << "mI"; break;
+ // ::= mL # *=
+ case OO_StarEqual: Out << "mL"; break;
+ // ::= dV # /=
+ case OO_SlashEqual: Out << "dV"; break;
+ // ::= rM # %=
+ case OO_PercentEqual: Out << "rM"; break;
+ // ::= aN # &=
+ case OO_AmpEqual: Out << "aN"; break;
+ // ::= oR # |=
+ case OO_PipeEqual: Out << "oR"; break;
+ // ::= eO # ^=
+ case OO_CaretEqual: Out << "eO"; break;
+ // ::= ls # <<
+ case OO_LessLess: Out << "ls"; break;
+ // ::= rs # >>
+ case OO_GreaterGreater: Out << "rs"; break;
+ // ::= lS # <<=
+ case OO_LessLessEqual: Out << "lS"; break;
+ // ::= rS # >>=
+ case OO_GreaterGreaterEqual: Out << "rS"; break;
+ // ::= eq # ==
+ case OO_EqualEqual: Out << "eq"; break;
+ // ::= ne # !=
+ case OO_ExclaimEqual: Out << "ne"; break;
+ // ::= lt # <
+ case OO_Less: Out << "lt"; break;
+ // ::= gt # >
+ case OO_Greater: Out << "gt"; break;
+ // ::= le # <=
+ case OO_LessEqual: Out << "le"; break;
+ // ::= ge # >=
+ case OO_GreaterEqual: Out << "ge"; break;
+ // ::= nt # !
+ case OO_Exclaim: Out << "nt"; break;
+ // ::= aa # &&
+ case OO_AmpAmp: Out << "aa"; break;
+ // ::= oo # ||
+ case OO_PipePipe: Out << "oo"; break;
+ // ::= pp # ++
+ case OO_PlusPlus: Out << "pp"; break;
+ // ::= mm # --
+ case OO_MinusMinus: Out << "mm"; break;
+ // ::= cm # ,
+ case OO_Comma: Out << "cm"; break;
+ // ::= pm # ->*
+ case OO_ArrowStar: Out << "pm"; break;
+ // ::= pt # ->
+ case OO_Arrow: Out << "pt"; break;
+ // ::= cl # ()
+ case OO_Call: Out << "cl"; break;
+ // ::= ix # []
+ case OO_Subscript: Out << "ix"; break;
+
+ // ::= qu # ?
+ // The conditional operator can't be overloaded, but we still handle it when
+ // mangling expressions.
+ case OO_Conditional: Out << "qu"; break;
+
+ case OO_None:
+ case NUM_OVERLOADED_OPERATORS:
+ llvm_unreachable("Not an overloaded operator");
+ }
+}
+
+void CXXNameMangler::mangleQualifiers(Qualifiers Quals) {
+ // <CV-qualifiers> ::= [r] [V] [K] # restrict (C99), volatile, const
+ if (Quals.hasRestrict())
+ Out << 'r';
+ if (Quals.hasVolatile())
+ Out << 'V';
+ if (Quals.hasConst())
+ Out << 'K';
+
+ if (Quals.hasAddressSpace()) {
+ // Extension:
+ //
+ // <type> ::= U <address-space-number>
+ //
+ // where <address-space-number> is a source name consisting of 'AS'
+ // followed by the address space <number>.
+ SmallString<64> ASString;
+ ASString = "AS" + llvm::utostr_32(Quals.getAddressSpace());
+ Out << 'U' << ASString.size() << ASString;
+ }
+
+ StringRef LifetimeName;
+ switch (Quals.getObjCLifetime()) {
+ // Objective-C ARC Extension:
+ //
+ // <type> ::= U "__strong"
+ // <type> ::= U "__weak"
+ // <type> ::= U "__autoreleasing"
+ case Qualifiers::OCL_None:
+ break;
+
+ case Qualifiers::OCL_Weak:
+ LifetimeName = "__weak";
+ break;
+
+ case Qualifiers::OCL_Strong:
+ LifetimeName = "__strong";
+ break;
+
+ case Qualifiers::OCL_Autoreleasing:
+ LifetimeName = "__autoreleasing";
+ break;
+
+ case Qualifiers::OCL_ExplicitNone:
+ // The __unsafe_unretained qualifier is *not* mangled, so that
+ // __unsafe_unretained types in ARC produce the same manglings as the
+ // equivalent (but, naturally, unqualified) types in non-ARC, providing
+ // better ABI compatibility.
+ //
+ // It's safe to do this because unqualified 'id' won't show up
+ // in any type signatures that need to be mangled.
+ break;
+ }
+ if (!LifetimeName.empty())
+ Out << 'U' << LifetimeName.size() << LifetimeName;
+}
+
+void CXXNameMangler::mangleRefQualifier(RefQualifierKind RefQualifier) {
+ // <ref-qualifier> ::= R # lvalue reference
+ // ::= O # rvalue-reference
+ // Proposal to Itanium C++ ABI list on 1/26/11
+ switch (RefQualifier) {
+ case RQ_None:
+ break;
+
+ case RQ_LValue:
+ Out << 'R';
+ break;
+
+ case RQ_RValue:
+ Out << 'O';
+ break;
+ }
+}
+
+void CXXNameMangler::mangleObjCMethodName(const ObjCMethodDecl *MD) {
+ Context.mangleObjCMethodName(MD, Out);
+}
+
+void CXXNameMangler::mangleType(QualType T) {
+ // If our type is instantiation-dependent but not dependent, we mangle
+ // it as it was written in the source, removing any top-level sugar.
+ // Otherwise, use the canonical type.
+ //
+ // FIXME: This is an approximation of the instantiation-dependent name
+ // mangling rules, since we should really be using the type as written and
+ // augmented via semantic analysis (i.e., with implicit conversions and
+ // default template arguments) for any instantiation-dependent type.
+ // Unfortunately, that requires several changes to our AST:
+ // - Instantiation-dependent TemplateSpecializationTypes will need to be
+ // uniqued, so that we can handle substitutions properly
+ // - Default template arguments will need to be represented in the
+ // TemplateSpecializationType, since they need to be mangled even though
+ // they aren't written.
+ // - Conversions on non-type template arguments need to be expressed, since
+ // they can affect the mangling of sizeof/alignof.
+ if (!T->isInstantiationDependentType() || T->isDependentType())
+ T = T.getCanonicalType();
+ else {
+ // Desugar any types that are purely sugar.
+ do {
+ // Don't desugar through template specialization types that aren't
+ // type aliases. We need to mangle the template arguments as written.
+ if (const TemplateSpecializationType *TST
+ = dyn_cast<TemplateSpecializationType>(T))
+ if (!TST->isTypeAlias())
+ break;
+
+ QualType Desugared
+ = T.getSingleStepDesugaredType(Context.getASTContext());
+ if (Desugared == T)
+ break;
+
+ T = Desugared;
+ } while (true);
+ }
+ SplitQualType split = T.split();
+ Qualifiers quals = split.Quals;
+ const Type *ty = split.Ty;
+
+ bool isSubstitutable = quals || !isa<BuiltinType>(T);
+ if (isSubstitutable && mangleSubstitution(T))
+ return;
+
+ // If we're mangling a qualified array type, push the qualifiers to
+ // the element type.
+ if (quals && isa<ArrayType>(T)) {
+ ty = Context.getASTContext().getAsArrayType(T);
+ quals = Qualifiers();
+
+ // Note that we don't update T: we want to add the
+ // substitution at the original type.
+ }
+
+ if (quals) {
+ mangleQualifiers(quals);
+ // Recurse: even if the qualified type isn't yet substitutable,
+ // the unqualified type might be.
+ mangleType(QualType(ty, 0));
+ } else {
+ switch (ty->getTypeClass()) {
+#define ABSTRACT_TYPE(CLASS, PARENT)
+#define NON_CANONICAL_TYPE(CLASS, PARENT) \
+ case Type::CLASS: \
+ llvm_unreachable("can't mangle non-canonical type " #CLASS "Type"); \
+ return;
+#define TYPE(CLASS, PARENT) \
+ case Type::CLASS: \
+ mangleType(static_cast<const CLASS##Type*>(ty)); \
+ break;
+#include "clang/AST/TypeNodes.def"
+ }
+ }
+
+ // Add the substitution.
+ if (isSubstitutable)
+ addSubstitution(T);
+}
+
+void CXXNameMangler::mangleNameOrStandardSubstitution(const NamedDecl *ND) {
+ if (!mangleStandardSubstitution(ND))
+ mangleName(ND);
+}
+
+void CXXNameMangler::mangleType(const BuiltinType *T) {
+ // <type> ::= <builtin-type>
+ // <builtin-type> ::= v # void
+ // ::= w # wchar_t
+ // ::= b # bool
+ // ::= c # char
+ // ::= a # signed char
+ // ::= h # unsigned char
+ // ::= s # short
+ // ::= t # unsigned short
+ // ::= i # int
+ // ::= j # unsigned int
+ // ::= l # long
+ // ::= m # unsigned long
+ // ::= x # long long, __int64
+ // ::= y # unsigned long long, __int64
+ // ::= n # __int128
+ // UNSUPPORTED: ::= o # unsigned __int128
+ // ::= f # float
+ // ::= d # double
+ // ::= e # long double, __float80
+ // UNSUPPORTED: ::= g # __float128
+ // UNSUPPORTED: ::= Dd # IEEE 754r decimal floating point (64 bits)
+ // UNSUPPORTED: ::= De # IEEE 754r decimal floating point (128 bits)
+ // UNSUPPORTED: ::= Df # IEEE 754r decimal floating point (32 bits)
+ // ::= Dh # IEEE 754r half-precision floating point (16 bits)
+ // ::= Di # char32_t
+ // ::= Ds # char16_t
+ // ::= Dn # std::nullptr_t (i.e., decltype(nullptr))
+ // ::= u <source-name> # vendor extended type
+ switch (T->getKind()) {
+ case BuiltinType::Void: Out << 'v'; break;
+ case BuiltinType::Bool: Out << 'b'; break;
+ case BuiltinType::Char_U: case BuiltinType::Char_S: Out << 'c'; break;
+ case BuiltinType::UChar: Out << 'h'; break;
+ case BuiltinType::UShort: Out << 't'; break;
+ case BuiltinType::UInt: Out << 'j'; break;
+ case BuiltinType::ULong: Out << 'm'; break;
+ case BuiltinType::ULongLong: Out << 'y'; break;
+ case BuiltinType::UInt128: Out << 'o'; break;
+ case BuiltinType::SChar: Out << 'a'; break;
+ case BuiltinType::WChar_S:
+ case BuiltinType::WChar_U: Out << 'w'; break;
+ case BuiltinType::Char16: Out << "Ds"; break;
+ case BuiltinType::Char32: Out << "Di"; break;
+ case BuiltinType::Short: Out << 's'; break;
+ case BuiltinType::Int: Out << 'i'; break;
+ case BuiltinType::Long: Out << 'l'; break;
+ case BuiltinType::LongLong: Out << 'x'; break;
+ case BuiltinType::Int128: Out << 'n'; break;
+ case BuiltinType::Half: Out << "Dh"; break;
+ case BuiltinType::Float: Out << 'f'; break;
+ case BuiltinType::Double: Out << 'd'; break;
+ case BuiltinType::LongDouble: Out << 'e'; break;
+ case BuiltinType::NullPtr: Out << "Dn"; break;
+
+#define BUILTIN_TYPE(Id, SingletonId)
+#define PLACEHOLDER_TYPE(Id, SingletonId) \
+ case BuiltinType::Id:
+#include "clang/AST/BuiltinTypes.def"
+ case BuiltinType::Dependent:
+ llvm_unreachable("mangling a placeholder type");
+ case BuiltinType::ObjCId: Out << "11objc_object"; break;
+ case BuiltinType::ObjCClass: Out << "10objc_class"; break;
+ case BuiltinType::ObjCSel: Out << "13objc_selector"; break;
+ }
+}
+
+// <type> ::= <function-type>
+// <function-type> ::= F [Y] <bare-function-type> E
+void CXXNameMangler::mangleType(const FunctionProtoType *T) {
+ Out << 'F';
+ // FIXME: We don't have enough information in the AST to produce the 'Y'
+ // encoding for extern "C" function types.
+ mangleBareFunctionType(T, /*MangleReturnType=*/true);
+ Out << 'E';
+}
+void CXXNameMangler::mangleType(const FunctionNoProtoType *T) {
+ llvm_unreachable("Can't mangle K&R function prototypes");
+}
+void CXXNameMangler::mangleBareFunctionType(const FunctionType *T,
+ bool MangleReturnType) {
+ // We should never be mangling something without a prototype.
+ const FunctionProtoType *Proto = cast<FunctionProtoType>(T);
+
+ // Record that we're in a function type. See mangleFunctionParam
+ // for details on what we're trying to achieve here.
+ FunctionTypeDepthState saved = FunctionTypeDepth.push();
+
+ // <bare-function-type> ::= <signature type>+
+ if (MangleReturnType) {
+ FunctionTypeDepth.enterResultType();
+ mangleType(Proto->getResultType());
+ FunctionTypeDepth.leaveResultType();
+ }
+
+ if (Proto->getNumArgs() == 0 && !Proto->isVariadic()) {
+ // <builtin-type> ::= v # void
+ Out << 'v';
+
+ FunctionTypeDepth.pop(saved);
+ return;
+ }
+
+ for (FunctionProtoType::arg_type_iterator Arg = Proto->arg_type_begin(),
+ ArgEnd = Proto->arg_type_end();
+ Arg != ArgEnd; ++Arg)
+ mangleType(Context.getASTContext().getSignatureParameterType(*Arg));
+
+ FunctionTypeDepth.pop(saved);
+
+ // <builtin-type> ::= z # ellipsis
+ if (Proto->isVariadic())
+ Out << 'z';
+}
+
+// <type> ::= <class-enum-type>
+// <class-enum-type> ::= <name>
+void CXXNameMangler::mangleType(const UnresolvedUsingType *T) {
+ mangleName(T->getDecl());
+}
+
+// <type> ::= <class-enum-type>
+// <class-enum-type> ::= <name>
+void CXXNameMangler::mangleType(const EnumType *T) {
+ mangleType(static_cast<const TagType*>(T));
+}
+void CXXNameMangler::mangleType(const RecordType *T) {
+ mangleType(static_cast<const TagType*>(T));
+}
+void CXXNameMangler::mangleType(const TagType *T) {
+ mangleName(T->getDecl());
+}
+
+// <type> ::= <array-type>
+// <array-type> ::= A <positive dimension number> _ <element type>
+// ::= A [<dimension expression>] _ <element type>
+void CXXNameMangler::mangleType(const ConstantArrayType *T) {
+ Out << 'A' << T->getSize() << '_';
+ mangleType(T->getElementType());
+}
+void CXXNameMangler::mangleType(const VariableArrayType *T) {
+ Out << 'A';
+ // decayed vla types (size 0) will just be skipped.
+ if (T->getSizeExpr())
+ mangleExpression(T->getSizeExpr());
+ Out << '_';
+ mangleType(T->getElementType());
+}
+void CXXNameMangler::mangleType(const DependentSizedArrayType *T) {
+ Out << 'A';
+ mangleExpression(T->getSizeExpr());
+ Out << '_';
+ mangleType(T->getElementType());
+}
+void CXXNameMangler::mangleType(const IncompleteArrayType *T) {
+ Out << "A_";
+ mangleType(T->getElementType());
+}
+
+// <type> ::= <pointer-to-member-type>
+// <pointer-to-member-type> ::= M <class type> <member type>
+void CXXNameMangler::mangleType(const MemberPointerType *T) {
+ Out << 'M';
+ mangleType(QualType(T->getClass(), 0));
+ QualType PointeeType = T->getPointeeType();
+ if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(PointeeType)) {
+ mangleQualifiers(Qualifiers::fromCVRMask(FPT->getTypeQuals()));
+ mangleRefQualifier(FPT->getRefQualifier());
+ mangleType(FPT);
+
+ // Itanium C++ ABI 5.1.8:
+ //
+ // The type of a non-static member function is considered to be different,
+ // for the purposes of substitution, from the type of a namespace-scope or
+ // static member function whose type appears similar. The types of two
+ // non-static member functions are considered to be different, for the
+ // purposes of substitution, if the functions are members of different
+ // classes. In other words, for the purposes of substitution, the class of
+ // which the function is a member is considered part of the type of
+ // function.
+
+ // We increment the SeqID here to emulate adding an entry to the
+ // substitution table. We can't actually add it because we don't want this
+ // particular function type to be substituted.
+ ++SeqID;
+ } else
+ mangleType(PointeeType);
+}
+
+// <type> ::= <template-param>
+void CXXNameMangler::mangleType(const TemplateTypeParmType *T) {
+ mangleTemplateParameter(T->getIndex());
+}
+
+// <type> ::= <template-param>
+void CXXNameMangler::mangleType(const SubstTemplateTypeParmPackType *T) {
+ // FIXME: not clear how to mangle this!
+ // template <class T...> class A {
+ // template <class U...> void foo(T(*)(U) x...);
+ // };
+ Out << "_SUBSTPACK_";
+}
+
+// <type> ::= P <type> # pointer-to
+void CXXNameMangler::mangleType(const PointerType *T) {
+ Out << 'P';
+ mangleType(T->getPointeeType());
+}
+void CXXNameMangler::mangleType(const ObjCObjectPointerType *T) {
+ Out << 'P';
+ mangleType(T->getPointeeType());
+}
+
+// <type> ::= R <type> # reference-to
+void CXXNameMangler::mangleType(const LValueReferenceType *T) {
+ Out << 'R';
+ mangleType(T->getPointeeType());
+}
+
+// <type> ::= O <type> # rvalue reference-to (C++0x)
+void CXXNameMangler::mangleType(const RValueReferenceType *T) {
+ Out << 'O';
+ mangleType(T->getPointeeType());
+}
+
+// <type> ::= C <type> # complex pair (C 2000)
+void CXXNameMangler::mangleType(const ComplexType *T) {
+ Out << 'C';
+ mangleType(T->getElementType());
+}
+
+// ARM's ABI for Neon vector types specifies that they should be mangled as
+// if they are structs (to match ARM's initial implementation). The
+// vector type must be one of the special types predefined by ARM.
+void CXXNameMangler::mangleNeonVectorType(const VectorType *T) {
+ QualType EltType = T->getElementType();
+ assert(EltType->isBuiltinType() && "Neon vector element not a BuiltinType");
+ const char *EltName = 0;
+ if (T->getVectorKind() == VectorType::NeonPolyVector) {
+ switch (cast<BuiltinType>(EltType)->getKind()) {
+ case BuiltinType::SChar: EltName = "poly8_t"; break;
+ case BuiltinType::Short: EltName = "poly16_t"; break;
+ default: llvm_unreachable("unexpected Neon polynomial vector element type");
+ }
+ } else {
+ switch (cast<BuiltinType>(EltType)->getKind()) {
+ case BuiltinType::SChar: EltName = "int8_t"; break;
+ case BuiltinType::UChar: EltName = "uint8_t"; break;
+ case BuiltinType::Short: EltName = "int16_t"; break;
+ case BuiltinType::UShort: EltName = "uint16_t"; break;
+ case BuiltinType::Int: EltName = "int32_t"; break;
+ case BuiltinType::UInt: EltName = "uint32_t"; break;
+ case BuiltinType::LongLong: EltName = "int64_t"; break;
+ case BuiltinType::ULongLong: EltName = "uint64_t"; break;
+ case BuiltinType::Float: EltName = "float32_t"; break;
+ default: llvm_unreachable("unexpected Neon vector element type");
+ }
+ }
+ const char *BaseName = 0;
+ unsigned BitSize = (T->getNumElements() *
+ getASTContext().getTypeSize(EltType));
+ if (BitSize == 64)
+ BaseName = "__simd64_";
+ else {
+ assert(BitSize == 128 && "Neon vector type not 64 or 128 bits");
+ BaseName = "__simd128_";
+ }
+ Out << strlen(BaseName) + strlen(EltName);
+ Out << BaseName << EltName;
+}
+
+// GNU extension: vector types
+// <type> ::= <vector-type>
+// <vector-type> ::= Dv <positive dimension number> _
+// <extended element type>
+// ::= Dv [<dimension expression>] _ <element type>
+// <extended element type> ::= <element type>
+// ::= p # AltiVec vector pixel
+void CXXNameMangler::mangleType(const VectorType *T) {
+ if ((T->getVectorKind() == VectorType::NeonVector ||
+ T->getVectorKind() == VectorType::NeonPolyVector)) {
+ mangleNeonVectorType(T);
+ return;
+ }
+ Out << "Dv" << T->getNumElements() << '_';
+ if (T->getVectorKind() == VectorType::AltiVecPixel)
+ Out << 'p';
+ else if (T->getVectorKind() == VectorType::AltiVecBool)
+ Out << 'b';
+ else
+ mangleType(T->getElementType());
+}
+void CXXNameMangler::mangleType(const ExtVectorType *T) {
+ mangleType(static_cast<const VectorType*>(T));
+}
+void CXXNameMangler::mangleType(const DependentSizedExtVectorType *T) {
+ Out << "Dv";
+ mangleExpression(T->getSizeExpr());
+ Out << '_';
+ mangleType(T->getElementType());
+}
+
+void CXXNameMangler::mangleType(const PackExpansionType *T) {
+ // <type> ::= Dp <type> # pack expansion (C++0x)
+ Out << "Dp";
+ mangleType(T->getPattern());
+}
+
+void CXXNameMangler::mangleType(const ObjCInterfaceType *T) {
+ mangleSourceName(T->getDecl()->getIdentifier());
+}
+
+void CXXNameMangler::mangleType(const ObjCObjectType *T) {
+ // We don't allow overloading by different protocol qualification,
+ // so mangling them isn't necessary.
+ mangleType(T->getBaseType());
+}
+
+void CXXNameMangler::mangleType(const BlockPointerType *T) {
+ Out << "U13block_pointer";
+ mangleType(T->getPointeeType());
+}
+
+void CXXNameMangler::mangleType(const InjectedClassNameType *T) {
+ // Mangle injected class name types as if the user had written the
+ // specialization out fully. It may not actually be possible to see
+ // this mangling, though.
+ mangleType(T->getInjectedSpecializationType());
+}
+
+void CXXNameMangler::mangleType(const TemplateSpecializationType *T) {
+ if (TemplateDecl *TD = T->getTemplateName().getAsTemplateDecl()) {
+ mangleName(TD, T->getArgs(), T->getNumArgs());
+ } else {
+ if (mangleSubstitution(QualType(T, 0)))
+ return;
+
+ mangleTemplatePrefix(T->getTemplateName());
+
+ // FIXME: GCC does not appear to mangle the template arguments when
+ // the template in question is a dependent template name. Should we
+ // emulate that badness?
+ mangleTemplateArgs(T->getTemplateName(), T->getArgs(), T->getNumArgs());
+ addSubstitution(QualType(T, 0));
+ }
+}
+
+void CXXNameMangler::mangleType(const DependentNameType *T) {
+ // Typename types are always nested
+ Out << 'N';
+ manglePrefix(T->getQualifier());
+ mangleSourceName(T->getIdentifier());
+ Out << 'E';
+}
+
+void CXXNameMangler::mangleType(const DependentTemplateSpecializationType *T) {
+ // Dependently-scoped template types are nested if they have a prefix.
+ Out << 'N';
+
+ // TODO: avoid making this TemplateName.
+ TemplateName Prefix =
+ getASTContext().getDependentTemplateName(T->getQualifier(),
+ T->getIdentifier());
+ mangleTemplatePrefix(Prefix);
+
+ // FIXME: GCC does not appear to mangle the template arguments when
+ // the template in question is a dependent template name. Should we
+ // emulate that badness?
+ mangleTemplateArgs(Prefix, T->getArgs(), T->getNumArgs());
+ Out << 'E';
+}
+
+void CXXNameMangler::mangleType(const TypeOfType *T) {
+ // FIXME: this is pretty unsatisfactory, but there isn't an obvious
+ // "extension with parameters" mangling.
+ Out << "u6typeof";
+}
+
+void CXXNameMangler::mangleType(const TypeOfExprType *T) {
+ // FIXME: this is pretty unsatisfactory, but there isn't an obvious
+ // "extension with parameters" mangling.
+ Out << "u6typeof";
+}
+
+void CXXNameMangler::mangleType(const DecltypeType *T) {
+ Expr *E = T->getUnderlyingExpr();
+
+ // type ::= Dt <expression> E # decltype of an id-expression
+ // # or class member access
+ // ::= DT <expression> E # decltype of an expression
+
+ // This purports to be an exhaustive list of id-expressions and
+ // class member accesses. Note that we do not ignore parentheses;
+ // parentheses change the semantics of decltype for these
+ // expressions (and cause the mangler to use the other form).
+ if (isa<DeclRefExpr>(E) ||
+ isa<MemberExpr>(E) ||
+ isa<UnresolvedLookupExpr>(E) ||
+ isa<DependentScopeDeclRefExpr>(E) ||
+ isa<CXXDependentScopeMemberExpr>(E) ||
+ isa<UnresolvedMemberExpr>(E))
+ Out << "Dt";
+ else
+ Out << "DT";
+ mangleExpression(E);
+ Out << 'E';
+}
+
+void CXXNameMangler::mangleType(const UnaryTransformType *T) {
+ // If this is dependent, we need to record that. If not, we simply
+ // mangle it as the underlying type since they are equivalent.
+ if (T->isDependentType()) {
+ Out << 'U';
+
+ switch (T->getUTTKind()) {
+ case UnaryTransformType::EnumUnderlyingType:
+ Out << "3eut";
+ break;
+ }
+ }
+
+ mangleType(T->getUnderlyingType());
+}
+
+void CXXNameMangler::mangleType(const AutoType *T) {
+ QualType D = T->getDeducedType();
+ // <builtin-type> ::= Da # dependent auto
+ if (D.isNull())
+ Out << "Da";
+ else
+ mangleType(D);
+}
+
+void CXXNameMangler::mangleType(const AtomicType *T) {
+ // <type> ::= U <source-name> <type> # vendor extended type qualifier
+ // (Until there's a standardized mangling...)
+ Out << "U7_Atomic";
+ mangleType(T->getValueType());
+}
+
+void CXXNameMangler::mangleIntegerLiteral(QualType T,
+ const llvm::APSInt &Value) {
+ // <expr-primary> ::= L <type> <value number> E # integer literal
+ Out << 'L';
+
+ mangleType(T);
+ if (T->isBooleanType()) {
+ // Boolean values are encoded as 0/1.
+ Out << (Value.getBoolValue() ? '1' : '0');
+ } else {
+ mangleNumber(Value);
+ }
+ Out << 'E';
+
+}
+
+/// Mangles a member expression.
+void CXXNameMangler::mangleMemberExpr(const Expr *base,
+ bool isArrow,
+ NestedNameSpecifier *qualifier,
+ NamedDecl *firstQualifierLookup,
+ DeclarationName member,
+ unsigned arity) {
+ // <expression> ::= dt <expression> <unresolved-name>
+ // ::= pt <expression> <unresolved-name>
+ if (base) {
+ if (base->isImplicitCXXThis()) {
+ // Note: GCC mangles member expressions to the implicit 'this' as
+ // *this., whereas we represent them as this->. The Itanium C++ ABI
+ // does not specify anything here, so we follow GCC.
+ Out << "dtdefpT";
+ } else {
+ Out << (isArrow ? "pt" : "dt");
+ mangleExpression(base);
+ }
+ }
+ mangleUnresolvedName(qualifier, firstQualifierLookup, member, arity);
+}
+
+/// Look at the callee of the given call expression and determine if
+/// it's a parenthesized id-expression which would have triggered ADL
+/// otherwise.
+static bool isParenthesizedADLCallee(const CallExpr *call) {
+ const Expr *callee = call->getCallee();
+ const Expr *fn = callee->IgnoreParens();
+
+ // Must be parenthesized. IgnoreParens() skips __extension__ nodes,
+ // too, but for those to appear in the callee, it would have to be
+ // parenthesized.
+ if (callee == fn) return false;
+
+ // Must be an unresolved lookup.
+ const UnresolvedLookupExpr *lookup = dyn_cast<UnresolvedLookupExpr>(fn);
+ if (!lookup) return false;
+
+ assert(!lookup->requiresADL());
+
+ // Must be an unqualified lookup.
+ if (lookup->getQualifier()) return false;
+
+ // Must not have found a class member. Note that if one is a class
+ // member, they're all class members.
+ if (lookup->getNumDecls() > 0 &&
+ (*lookup->decls_begin())->isCXXClassMember())
+ return false;
+
+ // Otherwise, ADL would have been triggered.
+ return true;
+}
+
+void CXXNameMangler::mangleExpression(const Expr *E, unsigned Arity) {
+ // <expression> ::= <unary operator-name> <expression>
+ // ::= <binary operator-name> <expression> <expression>
+ // ::= <trinary operator-name> <expression> <expression> <expression>
+ // ::= cv <type> expression # conversion with one argument
+ // ::= cv <type> _ <expression>* E # conversion with a different number of arguments
+ // ::= st <type> # sizeof (a type)
+ // ::= at <type> # alignof (a type)
+ // ::= <template-param>
+ // ::= <function-param>
+ // ::= sr <type> <unqualified-name> # dependent name
+ // ::= sr <type> <unqualified-name> <template-args> # dependent template-id
+ // ::= ds <expression> <expression> # expr.*expr
+ // ::= sZ <template-param> # size of a parameter pack
+ // ::= sZ <function-param> # size of a function parameter pack
+ // ::= <expr-primary>
+ // <expr-primary> ::= L <type> <value number> E # integer literal
+ // ::= L <type <value float> E # floating literal
+ // ::= L <mangled-name> E # external name
+ // ::= fpT # 'this' expression
+ QualType ImplicitlyConvertedToType;
+
+recurse:
+ switch (E->getStmtClass()) {
+ case Expr::NoStmtClass:
+#define ABSTRACT_STMT(Type)
+#define EXPR(Type, Base)
+#define STMT(Type, Base) \
+ case Expr::Type##Class:
+#include "clang/AST/StmtNodes.inc"
+ // fallthrough
+
+ // These all can only appear in local or variable-initialization
+ // contexts and so should never appear in a mangling.
+ case Expr::AddrLabelExprClass:
+ case Expr::DesignatedInitExprClass:
+ case Expr::ImplicitValueInitExprClass:
+ case Expr::ParenListExprClass:
+ case Expr::LambdaExprClass:
+ llvm_unreachable("unexpected statement kind");
+
+ // FIXME: invent manglings for all these.
+ case Expr::BlockExprClass:
+ case Expr::CXXPseudoDestructorExprClass:
+ case Expr::ChooseExprClass:
+ case Expr::CompoundLiteralExprClass:
+ case Expr::ExtVectorElementExprClass:
+ case Expr::GenericSelectionExprClass:
+ case Expr::ObjCEncodeExprClass:
+ case Expr::ObjCIsaExprClass:
+ case Expr::ObjCIvarRefExprClass:
+ case Expr::ObjCMessageExprClass:
+ case Expr::ObjCPropertyRefExprClass:
+ case Expr::ObjCProtocolExprClass:
+ case Expr::ObjCSelectorExprClass:
+ case Expr::ObjCStringLiteralClass:
+ case Expr::ObjCNumericLiteralClass:
+ case Expr::ObjCArrayLiteralClass:
+ case Expr::ObjCDictionaryLiteralClass:
+ case Expr::ObjCSubscriptRefExprClass:
+ case Expr::ObjCIndirectCopyRestoreExprClass:
+ case Expr::OffsetOfExprClass:
+ case Expr::PredefinedExprClass:
+ case Expr::ShuffleVectorExprClass:
+ case Expr::StmtExprClass:
+ case Expr::UnaryTypeTraitExprClass:
+ case Expr::BinaryTypeTraitExprClass:
+ case Expr::TypeTraitExprClass:
+ case Expr::ArrayTypeTraitExprClass:
+ case Expr::ExpressionTraitExprClass:
+ case Expr::VAArgExprClass:
+ case Expr::CXXUuidofExprClass:
+ case Expr::CXXNoexceptExprClass:
+ case Expr::CUDAKernelCallExprClass:
+ case Expr::AsTypeExprClass:
+ case Expr::PseudoObjectExprClass:
+ case Expr::AtomicExprClass:
+ {
+ // As bad as this diagnostic is, it's better than crashing.
+ DiagnosticsEngine &Diags = Context.getDiags();
+ unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error,
+ "cannot yet mangle expression type %0");
+ Diags.Report(E->getExprLoc(), DiagID)
+ << E->getStmtClassName() << E->getSourceRange();
+ break;
+ }
+
+ // Even gcc-4.5 doesn't mangle this.
+ case Expr::BinaryConditionalOperatorClass: {
+ DiagnosticsEngine &Diags = Context.getDiags();
+ unsigned DiagID =
+ Diags.getCustomDiagID(DiagnosticsEngine::Error,
+ "?: operator with omitted middle operand cannot be mangled");
+ Diags.Report(E->getExprLoc(), DiagID)
+ << E->getStmtClassName() << E->getSourceRange();
+ break;
+ }
+
+ // These are used for internal purposes and cannot be meaningfully mangled.
+ case Expr::OpaqueValueExprClass:
+ llvm_unreachable("cannot mangle opaque value; mangling wrong thing?");
+
+ case Expr::InitListExprClass: {
+ // Proposal by Jason Merrill, 2012-01-03
+ Out << "il";
+ const InitListExpr *InitList = cast<InitListExpr>(E);
+ for (unsigned i = 0, e = InitList->getNumInits(); i != e; ++i)
+ mangleExpression(InitList->getInit(i));
+ Out << "E";
+ break;
+ }
+
+ case Expr::CXXDefaultArgExprClass:
+ mangleExpression(cast<CXXDefaultArgExpr>(E)->getExpr(), Arity);
+ break;
+
+ case Expr::SubstNonTypeTemplateParmExprClass:
+ mangleExpression(cast<SubstNonTypeTemplateParmExpr>(E)->getReplacement(),
+ Arity);
+ break;
+
+ case Expr::UserDefinedLiteralClass:
+ // We follow g++'s approach of mangling a UDL as a call to the literal
+ // operator.
+ case Expr::CXXMemberCallExprClass: // fallthrough
+ case Expr::CallExprClass: {
+ const CallExpr *CE = cast<CallExpr>(E);
+
+ // <expression> ::= cp <simple-id> <expression>* E
+ // We use this mangling only when the call would use ADL except
+ // for being parenthesized. Per discussion with David
+ // Vandervoorde, 2011.04.25.
+ if (isParenthesizedADLCallee(CE)) {
+ Out << "cp";
+ // The callee here is a parenthesized UnresolvedLookupExpr with
+ // no qualifier and should always get mangled as a <simple-id>
+ // anyway.
+
+ // <expression> ::= cl <expression>* E
+ } else {
+ Out << "cl";
+ }
+
+ mangleExpression(CE->getCallee(), CE->getNumArgs());
+ for (unsigned I = 0, N = CE->getNumArgs(); I != N; ++I)
+ mangleExpression(CE->getArg(I));
+ Out << 'E';
+ break;
+ }
+
+ case Expr::CXXNewExprClass: {
+ const CXXNewExpr *New = cast<CXXNewExpr>(E);
+ if (New->isGlobalNew()) Out << "gs";
+ Out << (New->isArray() ? "na" : "nw");
+ for (CXXNewExpr::const_arg_iterator I = New->placement_arg_begin(),
+ E = New->placement_arg_end(); I != E; ++I)
+ mangleExpression(*I);
+ Out << '_';
+ mangleType(New->getAllocatedType());
+ if (New->hasInitializer()) {
+ // Proposal by Jason Merrill, 2012-01-03
+ if (New->getInitializationStyle() == CXXNewExpr::ListInit)
+ Out << "il";
+ else
+ Out << "pi";
+ const Expr *Init = New->getInitializer();
+ if (const CXXConstructExpr *CCE = dyn_cast<CXXConstructExpr>(Init)) {
+ // Directly inline the initializers.
+ for (CXXConstructExpr::const_arg_iterator I = CCE->arg_begin(),
+ E = CCE->arg_end();
+ I != E; ++I)
+ mangleExpression(*I);
+ } else if (const ParenListExpr *PLE = dyn_cast<ParenListExpr>(Init)) {
+ for (unsigned i = 0, e = PLE->getNumExprs(); i != e; ++i)
+ mangleExpression(PLE->getExpr(i));
+ } else if (New->getInitializationStyle() == CXXNewExpr::ListInit &&
+ isa<InitListExpr>(Init)) {
+ // Only take InitListExprs apart for list-initialization.
+ const InitListExpr *InitList = cast<InitListExpr>(Init);
+ for (unsigned i = 0, e = InitList->getNumInits(); i != e; ++i)
+ mangleExpression(InitList->getInit(i));
+ } else
+ mangleExpression(Init);
+ }
+ Out << 'E';
+ break;
+ }
+
+ case Expr::MemberExprClass: {
+ const MemberExpr *ME = cast<MemberExpr>(E);
+ mangleMemberExpr(ME->getBase(), ME->isArrow(),
+ ME->getQualifier(), 0, ME->getMemberDecl()->getDeclName(),
+ Arity);
+ break;
+ }
+
+ case Expr::UnresolvedMemberExprClass: {
+ const UnresolvedMemberExpr *ME = cast<UnresolvedMemberExpr>(E);
+ mangleMemberExpr(ME->getBase(), ME->isArrow(),
+ ME->getQualifier(), 0, ME->getMemberName(),
+ Arity);
+ if (ME->hasExplicitTemplateArgs())
+ mangleTemplateArgs(ME->getExplicitTemplateArgs());
+ break;
+ }
+
+ case Expr::CXXDependentScopeMemberExprClass: {
+ const CXXDependentScopeMemberExpr *ME
+ = cast<CXXDependentScopeMemberExpr>(E);
+ mangleMemberExpr(ME->getBase(), ME->isArrow(),
+ ME->getQualifier(), ME->getFirstQualifierFoundInScope(),
+ ME->getMember(), Arity);
+ if (ME->hasExplicitTemplateArgs())
+ mangleTemplateArgs(ME->getExplicitTemplateArgs());
+ break;
+ }
+
+ case Expr::UnresolvedLookupExprClass: {
+ const UnresolvedLookupExpr *ULE = cast<UnresolvedLookupExpr>(E);
+ mangleUnresolvedName(ULE->getQualifier(), 0, ULE->getName(), Arity);
+
+ // All the <unresolved-name> productions end in a
+ // base-unresolved-name, where <template-args> are just tacked
+ // onto the end.
+ if (ULE->hasExplicitTemplateArgs())
+ mangleTemplateArgs(ULE->getExplicitTemplateArgs());
+ break;
+ }
+
+ case Expr::CXXUnresolvedConstructExprClass: {
+ const CXXUnresolvedConstructExpr *CE = cast<CXXUnresolvedConstructExpr>(E);
+ unsigned N = CE->arg_size();
+
+ Out << "cv";
+ mangleType(CE->getType());
+ if (N != 1) Out << '_';
+ for (unsigned I = 0; I != N; ++I) mangleExpression(CE->getArg(I));
+ if (N != 1) Out << 'E';
+ break;
+ }
+
+ case Expr::CXXTemporaryObjectExprClass:
+ case Expr::CXXConstructExprClass: {
+ const CXXConstructExpr *CE = cast<CXXConstructExpr>(E);
+ unsigned N = CE->getNumArgs();
+
+ // Proposal by Jason Merrill, 2012-01-03
+ if (CE->isListInitialization())
+ Out << "tl";
+ else
+ Out << "cv";
+ mangleType(CE->getType());
+ if (N != 1) Out << '_';
+ for (unsigned I = 0; I != N; ++I) mangleExpression(CE->getArg(I));
+ if (N != 1) Out << 'E';
+ break;
+ }
+
+ case Expr::CXXScalarValueInitExprClass:
+ Out <<"cv";
+ mangleType(E->getType());
+ Out <<"_E";
+ break;
+
+ case Expr::UnaryExprOrTypeTraitExprClass: {
+ const UnaryExprOrTypeTraitExpr *SAE = cast<UnaryExprOrTypeTraitExpr>(E);
+
+ if (!SAE->isInstantiationDependent()) {
+ // Itanium C++ ABI:
+ // If the operand of a sizeof or alignof operator is not
+ // instantiation-dependent it is encoded as an integer literal
+ // reflecting the result of the operator.
+ //
+ // If the result of the operator is implicitly converted to a known
+ // integer type, that type is used for the literal; otherwise, the type
+ // of std::size_t or std::ptrdiff_t is used.
+ QualType T = (ImplicitlyConvertedToType.isNull() ||
+ !ImplicitlyConvertedToType->isIntegerType())? SAE->getType()
+ : ImplicitlyConvertedToType;
+ llvm::APSInt V = SAE->EvaluateKnownConstInt(Context.getASTContext());
+ mangleIntegerLiteral(T, V);
+ break;
+ }
+
+ switch(SAE->getKind()) {
+ case UETT_SizeOf:
+ Out << 's';
+ break;
+ case UETT_AlignOf:
+ Out << 'a';
+ break;
+ case UETT_VecStep:
+ DiagnosticsEngine &Diags = Context.getDiags();
+ unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error,
+ "cannot yet mangle vec_step expression");
+ Diags.Report(DiagID);
+ return;
+ }
+ if (SAE->isArgumentType()) {
+ Out << 't';
+ mangleType(SAE->getArgumentType());
+ } else {
+ Out << 'z';
+ mangleExpression(SAE->getArgumentExpr());
+ }
+ break;
+ }
+
+ case Expr::CXXThrowExprClass: {
+ const CXXThrowExpr *TE = cast<CXXThrowExpr>(E);
+
+ // Proposal from David Vandervoorde, 2010.06.30
+ if (TE->getSubExpr()) {
+ Out << "tw";
+ mangleExpression(TE->getSubExpr());
+ } else {
+ Out << "tr";
+ }
+ break;
+ }
+
+ case Expr::CXXTypeidExprClass: {
+ const CXXTypeidExpr *TIE = cast<CXXTypeidExpr>(E);
+
+ // Proposal from David Vandervoorde, 2010.06.30
+ if (TIE->isTypeOperand()) {
+ Out << "ti";
+ mangleType(TIE->getTypeOperand());
+ } else {
+ Out << "te";
+ mangleExpression(TIE->getExprOperand());
+ }
+ break;
+ }
+
+ case Expr::CXXDeleteExprClass: {
+ const CXXDeleteExpr *DE = cast<CXXDeleteExpr>(E);
+
+ // Proposal from David Vandervoorde, 2010.06.30
+ if (DE->isGlobalDelete()) Out << "gs";
+ Out << (DE->isArrayForm() ? "da" : "dl");
+ mangleExpression(DE->getArgument());
+ break;
+ }
+
+ case Expr::UnaryOperatorClass: {
+ const UnaryOperator *UO = cast<UnaryOperator>(E);
+ mangleOperatorName(UnaryOperator::getOverloadedOperator(UO->getOpcode()),
+ /*Arity=*/1);
+ mangleExpression(UO->getSubExpr());
+ break;
+ }
+
+ case Expr::ArraySubscriptExprClass: {
+ const ArraySubscriptExpr *AE = cast<ArraySubscriptExpr>(E);
+
+ // Array subscript is treated as a syntactically weird form of
+ // binary operator.
+ Out << "ix";
+ mangleExpression(AE->getLHS());
+ mangleExpression(AE->getRHS());
+ break;
+ }
+
+ case Expr::CompoundAssignOperatorClass: // fallthrough
+ case Expr::BinaryOperatorClass: {
+ const BinaryOperator *BO = cast<BinaryOperator>(E);
+ if (BO->getOpcode() == BO_PtrMemD)
+ Out << "ds";
+ else
+ mangleOperatorName(BinaryOperator::getOverloadedOperator(BO->getOpcode()),
+ /*Arity=*/2);
+ mangleExpression(BO->getLHS());
+ mangleExpression(BO->getRHS());
+ break;
+ }
+
+ case Expr::ConditionalOperatorClass: {
+ const ConditionalOperator *CO = cast<ConditionalOperator>(E);
+ mangleOperatorName(OO_Conditional, /*Arity=*/3);
+ mangleExpression(CO->getCond());
+ mangleExpression(CO->getLHS(), Arity);
+ mangleExpression(CO->getRHS(), Arity);
+ break;
+ }
+
+ case Expr::ImplicitCastExprClass: {
+ ImplicitlyConvertedToType = E->getType();
+ E = cast<ImplicitCastExpr>(E)->getSubExpr();
+ goto recurse;
+ }
+
+ case Expr::ObjCBridgedCastExprClass: {
+ // Mangle ownership casts as a vendor extended operator __bridge,
+ // __bridge_transfer, or __bridge_retain.
+ StringRef Kind = cast<ObjCBridgedCastExpr>(E)->getBridgeKindName();
+ Out << "v1U" << Kind.size() << Kind;
+ }
+ // Fall through to mangle the cast itself.
+
+ case Expr::CStyleCastExprClass:
+ case Expr::CXXStaticCastExprClass:
+ case Expr::CXXDynamicCastExprClass:
+ case Expr::CXXReinterpretCastExprClass:
+ case Expr::CXXConstCastExprClass:
+ case Expr::CXXFunctionalCastExprClass: {
+ const ExplicitCastExpr *ECE = cast<ExplicitCastExpr>(E);
+ Out << "cv";
+ mangleType(ECE->getType());
+ mangleExpression(ECE->getSubExpr());
+ break;
+ }
+
+ case Expr::CXXOperatorCallExprClass: {
+ const CXXOperatorCallExpr *CE = cast<CXXOperatorCallExpr>(E);
+ unsigned NumArgs = CE->getNumArgs();
+ mangleOperatorName(CE->getOperator(), /*Arity=*/NumArgs);
+ // Mangle the arguments.
+ for (unsigned i = 0; i != NumArgs; ++i)
+ mangleExpression(CE->getArg(i));
+ break;
+ }
+
+ case Expr::ParenExprClass:
+ mangleExpression(cast<ParenExpr>(E)->getSubExpr(), Arity);
+ break;
+
+ case Expr::DeclRefExprClass: {
+ const NamedDecl *D = cast<DeclRefExpr>(E)->getDecl();
+
+ switch (D->getKind()) {
+ default:
+ // <expr-primary> ::= L <mangled-name> E # external name
+ Out << 'L';
+ mangle(D, "_Z");
+ Out << 'E';
+ break;
+
+ case Decl::ParmVar:
+ mangleFunctionParam(cast<ParmVarDecl>(D));
+ break;
+
+ case Decl::EnumConstant: {
+ const EnumConstantDecl *ED = cast<EnumConstantDecl>(D);
+ mangleIntegerLiteral(ED->getType(), ED->getInitVal());
+ break;
+ }
+
+ case Decl::NonTypeTemplateParm: {
+ const NonTypeTemplateParmDecl *PD = cast<NonTypeTemplateParmDecl>(D);
+ mangleTemplateParameter(PD->getIndex());
+ break;
+ }
+
+ }
+
+ break;
+ }
+
+ case Expr::SubstNonTypeTemplateParmPackExprClass:
+ // FIXME: not clear how to mangle this!
+ // template <unsigned N...> class A {
+ // template <class U...> void foo(U (&x)[N]...);
+ // };
+ Out << "_SUBSTPACK_";
+ break;
+
+ case Expr::DependentScopeDeclRefExprClass: {
+ const DependentScopeDeclRefExpr *DRE = cast<DependentScopeDeclRefExpr>(E);
+ mangleUnresolvedName(DRE->getQualifier(), 0, DRE->getDeclName(), Arity);
+
+ // All the <unresolved-name> productions end in a
+ // base-unresolved-name, where <template-args> are just tacked
+ // onto the end.
+ if (DRE->hasExplicitTemplateArgs())
+ mangleTemplateArgs(DRE->getExplicitTemplateArgs());
+ break;
+ }
+
+ case Expr::CXXBindTemporaryExprClass:
+ mangleExpression(cast<CXXBindTemporaryExpr>(E)->getSubExpr());
+ break;
+
+ case Expr::ExprWithCleanupsClass:
+ mangleExpression(cast<ExprWithCleanups>(E)->getSubExpr(), Arity);
+ break;
+
+ case Expr::FloatingLiteralClass: {
+ const FloatingLiteral *FL = cast<FloatingLiteral>(E);
+ Out << 'L';
+ mangleType(FL->getType());
+ mangleFloat(FL->getValue());
+ Out << 'E';
+ break;
+ }
+
+ case Expr::CharacterLiteralClass:
+ Out << 'L';
+ mangleType(E->getType());
+ Out << cast<CharacterLiteral>(E)->getValue();
+ Out << 'E';
+ break;
+
+ // FIXME. __objc_yes/__objc_no are mangled same as true/false
+ case Expr::ObjCBoolLiteralExprClass:
+ Out << "Lb";
+ Out << (cast<ObjCBoolLiteralExpr>(E)->getValue() ? '1' : '0');
+ Out << 'E';
+ break;
+
+ case Expr::CXXBoolLiteralExprClass:
+ Out << "Lb";
+ Out << (cast<CXXBoolLiteralExpr>(E)->getValue() ? '1' : '0');
+ Out << 'E';
+ break;
+
+ case Expr::IntegerLiteralClass: {
+ llvm::APSInt Value(cast<IntegerLiteral>(E)->getValue());
+ if (E->getType()->isSignedIntegerType())
+ Value.setIsSigned(true);
+ mangleIntegerLiteral(E->getType(), Value);
+ break;
+ }
+
+ case Expr::ImaginaryLiteralClass: {
+ const ImaginaryLiteral *IE = cast<ImaginaryLiteral>(E);
+ // Mangle as if a complex literal.
+ // Proposal from David Vandevoorde, 2010.06.30.
+ Out << 'L';
+ mangleType(E->getType());
+ if (const FloatingLiteral *Imag =
+ dyn_cast<FloatingLiteral>(IE->getSubExpr())) {
+ // Mangle a floating-point zero of the appropriate type.
+ mangleFloat(llvm::APFloat(Imag->getValue().getSemantics()));
+ Out << '_';
+ mangleFloat(Imag->getValue());
+ } else {
+ Out << "0_";
+ llvm::APSInt Value(cast<IntegerLiteral>(IE->getSubExpr())->getValue());
+ if (IE->getSubExpr()->getType()->isSignedIntegerType())
+ Value.setIsSigned(true);
+ mangleNumber(Value);
+ }
+ Out << 'E';
+ break;
+ }
+
+ case Expr::StringLiteralClass: {
+ // Revised proposal from David Vandervoorde, 2010.07.15.
+ Out << 'L';
+ assert(isa<ConstantArrayType>(E->getType()));
+ mangleType(E->getType());
+ Out << 'E';
+ break;
+ }
+
+ case Expr::GNUNullExprClass:
+ // FIXME: should this really be mangled the same as nullptr?
+ // fallthrough
+
+ case Expr::CXXNullPtrLiteralExprClass: {
+ // Proposal from David Vandervoorde, 2010.06.30, as
+ // modified by ABI list discussion.
+ Out << "LDnE";
+ break;
+ }
+
+ case Expr::PackExpansionExprClass:
+ Out << "sp";
+ mangleExpression(cast<PackExpansionExpr>(E)->getPattern());
+ break;
+
+ case Expr::SizeOfPackExprClass: {
+ Out << "sZ";
+ const NamedDecl *Pack = cast<SizeOfPackExpr>(E)->getPack();
+ if (const TemplateTypeParmDecl *TTP = dyn_cast<TemplateTypeParmDecl>(Pack))
+ mangleTemplateParameter(TTP->getIndex());
+ else if (const NonTypeTemplateParmDecl *NTTP
+ = dyn_cast<NonTypeTemplateParmDecl>(Pack))
+ mangleTemplateParameter(NTTP->getIndex());
+ else if (const TemplateTemplateParmDecl *TempTP
+ = dyn_cast<TemplateTemplateParmDecl>(Pack))
+ mangleTemplateParameter(TempTP->getIndex());
+ else
+ mangleFunctionParam(cast<ParmVarDecl>(Pack));
+ break;
+ }
+
+ case Expr::MaterializeTemporaryExprClass: {
+ mangleExpression(cast<MaterializeTemporaryExpr>(E)->GetTemporaryExpr());
+ break;
+ }
+
+ case Expr::CXXThisExprClass:
+ Out << "fpT";
+ break;
+ }
+}
+
+/// Mangle an expression which refers to a parameter variable.
+///
+/// <expression> ::= <function-param>
+/// <function-param> ::= fp <top-level CV-qualifiers> _ # L == 0, I == 0
+/// <function-param> ::= fp <top-level CV-qualifiers>
+/// <parameter-2 non-negative number> _ # L == 0, I > 0
+/// <function-param> ::= fL <L-1 non-negative number>
+/// p <top-level CV-qualifiers> _ # L > 0, I == 0
+/// <function-param> ::= fL <L-1 non-negative number>
+/// p <top-level CV-qualifiers>
+/// <I-1 non-negative number> _ # L > 0, I > 0
+///
+/// L is the nesting depth of the parameter, defined as 1 if the
+/// parameter comes from the innermost function prototype scope
+/// enclosing the current context, 2 if from the next enclosing
+/// function prototype scope, and so on, with one special case: if
+/// we've processed the full parameter clause for the innermost
+/// function type, then L is one less. This definition conveniently
+/// makes it irrelevant whether a function's result type was written
+/// trailing or leading, but is otherwise overly complicated; the
+/// numbering was first designed without considering references to
+/// parameter in locations other than return types, and then the
+/// mangling had to be generalized without changing the existing
+/// manglings.
+///
+/// I is the zero-based index of the parameter within its parameter
+/// declaration clause. Note that the original ABI document describes
+/// this using 1-based ordinals.
+void CXXNameMangler::mangleFunctionParam(const ParmVarDecl *parm) {
+ unsigned parmDepth = parm->getFunctionScopeDepth();
+ unsigned parmIndex = parm->getFunctionScopeIndex();
+
+ // Compute 'L'.
+ // parmDepth does not include the declaring function prototype.
+ // FunctionTypeDepth does account for that.
+ assert(parmDepth < FunctionTypeDepth.getDepth());
+ unsigned nestingDepth = FunctionTypeDepth.getDepth() - parmDepth;
+ if (FunctionTypeDepth.isInResultType())
+ nestingDepth--;
+
+ if (nestingDepth == 0) {
+ Out << "fp";
+ } else {
+ Out << "fL" << (nestingDepth - 1) << 'p';
+ }
+
+ // Top-level qualifiers. We don't have to worry about arrays here,
+ // because parameters declared as arrays should already have been
+ // tranformed to have pointer type. FIXME: apparently these don't
+ // get mangled if used as an rvalue of a known non-class type?
+ assert(!parm->getType()->isArrayType()
+ && "parameter's type is still an array type?");
+ mangleQualifiers(parm->getType().getQualifiers());
+
+ // Parameter index.
+ if (parmIndex != 0) {
+ Out << (parmIndex - 1);
+ }
+ Out << '_';
+}
+
+void CXXNameMangler::mangleCXXCtorType(CXXCtorType T) {
+ // <ctor-dtor-name> ::= C1 # complete object constructor
+ // ::= C2 # base object constructor
+ // ::= C3 # complete object allocating constructor
+ //
+ switch (T) {
+ case Ctor_Complete:
+ Out << "C1";
+ break;
+ case Ctor_Base:
+ Out << "C2";
+ break;
+ case Ctor_CompleteAllocating:
+ Out << "C3";
+ break;
+ }
+}
+
+void CXXNameMangler::mangleCXXDtorType(CXXDtorType T) {
+ // <ctor-dtor-name> ::= D0 # deleting destructor
+ // ::= D1 # complete object destructor
+ // ::= D2 # base object destructor
+ //
+ switch (T) {
+ case Dtor_Deleting:
+ Out << "D0";
+ break;
+ case Dtor_Complete:
+ Out << "D1";
+ break;
+ case Dtor_Base:
+ Out << "D2";
+ break;
+ }
+}
+
+void CXXNameMangler::mangleTemplateArgs(
+ const ASTTemplateArgumentListInfo &TemplateArgs) {
+ // <template-args> ::= I <template-arg>+ E
+ Out << 'I';
+ for (unsigned i = 0, e = TemplateArgs.NumTemplateArgs; i != e; ++i)
+ mangleTemplateArg(0, TemplateArgs.getTemplateArgs()[i].getArgument());
+ Out << 'E';
+}
+
+void CXXNameMangler::mangleTemplateArgs(TemplateName Template,
+ const TemplateArgument *TemplateArgs,
+ unsigned NumTemplateArgs) {
+ if (TemplateDecl *TD = Template.getAsTemplateDecl())
+ return mangleTemplateArgs(*TD->getTemplateParameters(), TemplateArgs,
+ NumTemplateArgs);
+
+ mangleUnresolvedTemplateArgs(TemplateArgs, NumTemplateArgs);
+}
+
+void CXXNameMangler::mangleUnresolvedTemplateArgs(const TemplateArgument *args,
+ unsigned numArgs) {
+ // <template-args> ::= I <template-arg>+ E
+ Out << 'I';
+ for (unsigned i = 0; i != numArgs; ++i)
+ mangleTemplateArg(0, args[i]);
+ Out << 'E';
+}
+
+void CXXNameMangler::mangleTemplateArgs(const TemplateParameterList &PL,
+ const TemplateArgumentList &AL) {
+ // <template-args> ::= I <template-arg>+ E
+ Out << 'I';
+ for (unsigned i = 0, e = AL.size(); i != e; ++i)
+ mangleTemplateArg(PL.getParam(i), AL[i]);
+ Out << 'E';
+}
+
+void CXXNameMangler::mangleTemplateArgs(const TemplateParameterList &PL,
+ const TemplateArgument *TemplateArgs,
+ unsigned NumTemplateArgs) {
+ // <template-args> ::= I <template-arg>+ E
+ Out << 'I';
+ for (unsigned i = 0; i != NumTemplateArgs; ++i)
+ mangleTemplateArg(PL.getParam(i), TemplateArgs[i]);
+ Out << 'E';
+}
+
+void CXXNameMangler::mangleTemplateArg(const NamedDecl *P,
+ TemplateArgument A) {
+ // <template-arg> ::= <type> # type or template
+ // ::= X <expression> E # expression
+ // ::= <expr-primary> # simple expressions
+ // ::= J <template-arg>* E # argument pack
+ // ::= sp <expression> # pack expansion of (C++0x)
+ if (!A.isInstantiationDependent() || A.isDependent())
+ A = Context.getASTContext().getCanonicalTemplateArgument(A);
+
+ switch (A.getKind()) {
+ case TemplateArgument::Null:
+ llvm_unreachable("Cannot mangle NULL template argument");
+
+ case TemplateArgument::Type:
+ mangleType(A.getAsType());
+ break;
+ case TemplateArgument::Template:
+ // This is mangled as <type>.
+ mangleType(A.getAsTemplate());
+ break;
+ case TemplateArgument::TemplateExpansion:
+ // <type> ::= Dp <type> # pack expansion (C++0x)
+ Out << "Dp";
+ mangleType(A.getAsTemplateOrTemplatePattern());
+ break;
+ case TemplateArgument::Expression: {
+ // It's possible to end up with a DeclRefExpr here in certain
+ // dependent cases, in which case we should mangle as a
+ // declaration.
+ const Expr *E = A.getAsExpr()->IgnoreParens();
+ if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) {
+ const ValueDecl *D = DRE->getDecl();
+ if (isa<VarDecl>(D) || isa<FunctionDecl>(D)) {
+ Out << "L";
+ mangle(D, "_Z");
+ Out << 'E';
+ break;
+ }
+ }
+
+ Out << 'X';
+ mangleExpression(E);
+ Out << 'E';
+ break;
+ }
+ case TemplateArgument::Integral:
+ mangleIntegerLiteral(A.getIntegralType(), *A.getAsIntegral());
+ break;
+ case TemplateArgument::Declaration: {
+ assert(P && "Missing template parameter for declaration argument");
+ // <expr-primary> ::= L <mangled-name> E # external name
+ // <expr-primary> ::= L <type> 0 E
+ // Clang produces AST's where pointer-to-member-function expressions
+ // and pointer-to-function expressions are represented as a declaration not
+ // an expression. We compensate for it here to produce the correct mangling.
+ const NonTypeTemplateParmDecl *Parameter = cast<NonTypeTemplateParmDecl>(P);
+
+ // Handle NULL pointer arguments.
+ if (!A.getAsDecl()) {
+ Out << "L";
+ mangleType(Parameter->getType());
+ Out << "0E";
+ break;
+ }
+
+
+ NamedDecl *D = cast<NamedDecl>(A.getAsDecl());
+ bool compensateMangling = !Parameter->getType()->isReferenceType();
+ if (compensateMangling) {
+ Out << 'X';
+ mangleOperatorName(OO_Amp, 1);
+ }
+
+ Out << 'L';
+ // References to external entities use the mangled name; if the name would
+ // not normally be manged then mangle it as unqualified.
+ //
+ // FIXME: The ABI specifies that external names here should have _Z, but
+ // gcc leaves this off.
+ if (compensateMangling)
+ mangle(D, "_Z");
+ else
+ mangle(D, "Z");
+ Out << 'E';
+
+ if (compensateMangling)
+ Out << 'E';
+
+ break;
+ }
+
+ case TemplateArgument::Pack: {
+ // Note: proposal by Mike Herrick on 12/20/10
+ Out << 'J';
+ for (TemplateArgument::pack_iterator PA = A.pack_begin(),
+ PAEnd = A.pack_end();
+ PA != PAEnd; ++PA)
+ mangleTemplateArg(P, *PA);
+ Out << 'E';
+ }
+ }
+}
+
+void CXXNameMangler::mangleTemplateParameter(unsigned Index) {
+ // <template-param> ::= T_ # first template parameter
+ // ::= T <parameter-2 non-negative number> _
+ if (Index == 0)
+ Out << "T_";
+ else
+ Out << 'T' << (Index - 1) << '_';
+}
+
+void CXXNameMangler::mangleExistingSubstitution(QualType type) {
+ bool result = mangleSubstitution(type);
+ assert(result && "no existing substitution for type");
+ (void) result;
+}
+
+void CXXNameMangler::mangleExistingSubstitution(TemplateName tname) {
+ bool result = mangleSubstitution(tname);
+ assert(result && "no existing substitution for template name");
+ (void) result;
+}
+
+// <substitution> ::= S <seq-id> _
+// ::= S_
+bool CXXNameMangler::mangleSubstitution(const NamedDecl *ND) {
+ // Try one of the standard substitutions first.
+ if (mangleStandardSubstitution(ND))
+ return true;
+
+ ND = cast<NamedDecl>(ND->getCanonicalDecl());
+ return mangleSubstitution(reinterpret_cast<uintptr_t>(ND));
+}
+
+/// \brief Determine whether the given type has any qualifiers that are
+/// relevant for substitutions.
+static bool hasMangledSubstitutionQualifiers(QualType T) {
+ Qualifiers Qs = T.getQualifiers();
+ return Qs.getCVRQualifiers() || Qs.hasAddressSpace();
+}
+
+bool CXXNameMangler::mangleSubstitution(QualType T) {
+ if (!hasMangledSubstitutionQualifiers(T)) {
+ if (const RecordType *RT = T->getAs<RecordType>())
+ return mangleSubstitution(RT->getDecl());
+ }
+
+ uintptr_t TypePtr = reinterpret_cast<uintptr_t>(T.getAsOpaquePtr());
+
+ return mangleSubstitution(TypePtr);
+}
+
+bool CXXNameMangler::mangleSubstitution(TemplateName Template) {
+ if (TemplateDecl *TD = Template.getAsTemplateDecl())
+ return mangleSubstitution(TD);
+
+ Template = Context.getASTContext().getCanonicalTemplateName(Template);
+ return mangleSubstitution(
+ reinterpret_cast<uintptr_t>(Template.getAsVoidPointer()));
+}
+
+bool CXXNameMangler::mangleSubstitution(uintptr_t Ptr) {
+ llvm::DenseMap<uintptr_t, unsigned>::iterator I = Substitutions.find(Ptr);
+ if (I == Substitutions.end())
+ return false;
+
+ unsigned SeqID = I->second;
+ if (SeqID == 0)
+ Out << "S_";
+ else {
+ SeqID--;
+
+ // <seq-id> is encoded in base-36, using digits and upper case letters.
+ char Buffer[10];
+ char *BufferPtr = llvm::array_endof(Buffer);
+
+ if (SeqID == 0) *--BufferPtr = '0';
+
+ while (SeqID) {
+ assert(BufferPtr > Buffer && "Buffer overflow!");
+
+ char c = static_cast<char>(SeqID % 36);
+
+ *--BufferPtr = (c < 10 ? '0' + c : 'A' + c - 10);
+ SeqID /= 36;
+ }
+
+ Out << 'S'
+ << StringRef(BufferPtr, llvm::array_endof(Buffer)-BufferPtr)
+ << '_';
+ }
+
+ return true;
+}
+
+static bool isCharType(QualType T) {
+ if (T.isNull())
+ return false;
+
+ return T->isSpecificBuiltinType(BuiltinType::Char_S) ||
+ T->isSpecificBuiltinType(BuiltinType::Char_U);
+}
+
+/// isCharSpecialization - Returns whether a given type is a template
+/// specialization of a given name with a single argument of type char.
+static bool isCharSpecialization(QualType T, const char *Name) {
+ if (T.isNull())
+ return false;
+
+ const RecordType *RT = T->getAs<RecordType>();
+ if (!RT)
+ return false;
+
+ const ClassTemplateSpecializationDecl *SD =
+ dyn_cast<ClassTemplateSpecializationDecl>(RT->getDecl());
+ if (!SD)
+ return false;
+
+ if (!isStdNamespace(getEffectiveDeclContext(SD)))
+ return false;
+
+ const TemplateArgumentList &TemplateArgs = SD->getTemplateArgs();
+ if (TemplateArgs.size() != 1)
+ return false;
+
+ if (!isCharType(TemplateArgs[0].getAsType()))
+ return false;
+
+ return SD->getIdentifier()->getName() == Name;
+}
+
+template <std::size_t StrLen>
+static bool isStreamCharSpecialization(const ClassTemplateSpecializationDecl*SD,
+ const char (&Str)[StrLen]) {
+ if (!SD->getIdentifier()->isStr(Str))
+ return false;
+
+ const TemplateArgumentList &TemplateArgs = SD->getTemplateArgs();
+ if (TemplateArgs.size() != 2)
+ return false;
+
+ if (!isCharType(TemplateArgs[0].getAsType()))
+ return false;
+
+ if (!isCharSpecialization(TemplateArgs[1].getAsType(), "char_traits"))
+ return false;
+
+ return true;
+}
+
+bool CXXNameMangler::mangleStandardSubstitution(const NamedDecl *ND) {
+ // <substitution> ::= St # ::std::
+ if (const NamespaceDecl *NS = dyn_cast<NamespaceDecl>(ND)) {
+ if (isStd(NS)) {
+ Out << "St";
+ return true;
+ }
+ }
+
+ if (const ClassTemplateDecl *TD = dyn_cast<ClassTemplateDecl>(ND)) {
+ if (!isStdNamespace(getEffectiveDeclContext(TD)))
+ return false;
+
+ // <substitution> ::= Sa # ::std::allocator
+ if (TD->getIdentifier()->isStr("allocator")) {
+ Out << "Sa";
+ return true;
+ }
+
+ // <<substitution> ::= Sb # ::std::basic_string
+ if (TD->getIdentifier()->isStr("basic_string")) {
+ Out << "Sb";
+ return true;
+ }
+ }
+
+ if (const ClassTemplateSpecializationDecl *SD =
+ dyn_cast<ClassTemplateSpecializationDecl>(ND)) {
+ if (!isStdNamespace(getEffectiveDeclContext(SD)))
+ return false;
+
+ // <substitution> ::= Ss # ::std::basic_string<char,
+ // ::std::char_traits<char>,
+ // ::std::allocator<char> >
+ if (SD->getIdentifier()->isStr("basic_string")) {
+ const TemplateArgumentList &TemplateArgs = SD->getTemplateArgs();
+
+ if (TemplateArgs.size() != 3)
+ return false;
+
+ if (!isCharType(TemplateArgs[0].getAsType()))
+ return false;
+
+ if (!isCharSpecialization(TemplateArgs[1].getAsType(), "char_traits"))
+ return false;
+
+ if (!isCharSpecialization(TemplateArgs[2].getAsType(), "allocator"))
+ return false;
+
+ Out << "Ss";
+ return true;
+ }
+
+ // <substitution> ::= Si # ::std::basic_istream<char,
+ // ::std::char_traits<char> >
+ if (isStreamCharSpecialization(SD, "basic_istream")) {
+ Out << "Si";
+ return true;
+ }
+
+ // <substitution> ::= So # ::std::basic_ostream<char,
+ // ::std::char_traits<char> >
+ if (isStreamCharSpecialization(SD, "basic_ostream")) {
+ Out << "So";
+ return true;
+ }
+
+ // <substitution> ::= Sd # ::std::basic_iostream<char,
+ // ::std::char_traits<char> >
+ if (isStreamCharSpecialization(SD, "basic_iostream")) {
+ Out << "Sd";
+ return true;
+ }
+ }
+ return false;
+}
+
+void CXXNameMangler::addSubstitution(QualType T) {
+ if (!hasMangledSubstitutionQualifiers(T)) {
+ if (const RecordType *RT = T->getAs<RecordType>()) {
+ addSubstitution(RT->getDecl());
+ return;
+ }
+ }
+
+ uintptr_t TypePtr = reinterpret_cast<uintptr_t>(T.getAsOpaquePtr());
+ addSubstitution(TypePtr);
+}
+
+void CXXNameMangler::addSubstitution(TemplateName Template) {
+ if (TemplateDecl *TD = Template.getAsTemplateDecl())
+ return addSubstitution(TD);
+
+ Template = Context.getASTContext().getCanonicalTemplateName(Template);
+ addSubstitution(reinterpret_cast<uintptr_t>(Template.getAsVoidPointer()));
+}
+
+void CXXNameMangler::addSubstitution(uintptr_t Ptr) {
+ assert(!Substitutions.count(Ptr) && "Substitution already exists!");
+ Substitutions[Ptr] = SeqID++;
+}
+
+//
+
+/// \brief Mangles the name of the declaration D and emits that name to the
+/// given output stream.
+///
+/// If the declaration D requires a mangled name, this routine will emit that
+/// mangled name to \p os and return true. Otherwise, \p os will be unchanged
+/// and this routine will return false. In this case, the caller should just
+/// emit the identifier of the declaration (\c D->getIdentifier()) as its
+/// name.
+void ItaniumMangleContext::mangleName(const NamedDecl *D,
+ raw_ostream &Out) {
+ assert((isa<FunctionDecl>(D) || isa<VarDecl>(D)) &&
+ "Invalid mangleName() call, argument is not a variable or function!");
+ assert(!isa<CXXConstructorDecl>(D) && !isa<CXXDestructorDecl>(D) &&
+ "Invalid mangleName() call on 'structor decl!");
+
+ PrettyStackTraceDecl CrashInfo(D, SourceLocation(),
+ getASTContext().getSourceManager(),
+ "Mangling declaration");
+
+ CXXNameMangler Mangler(*this, Out, D);
+ return Mangler.mangle(D);
+}
+
+void ItaniumMangleContext::mangleCXXCtor(const CXXConstructorDecl *D,
+ CXXCtorType Type,
+ raw_ostream &Out) {
+ CXXNameMangler Mangler(*this, Out, D, Type);
+ Mangler.mangle(D);
+}
+
+void ItaniumMangleContext::mangleCXXDtor(const CXXDestructorDecl *D,
+ CXXDtorType Type,
+ raw_ostream &Out) {
+ CXXNameMangler Mangler(*this, Out, D, Type);
+ Mangler.mangle(D);
+}
+
+void ItaniumMangleContext::mangleThunk(const CXXMethodDecl *MD,
+ const ThunkInfo &Thunk,
+ raw_ostream &Out) {
+ // <special-name> ::= T <call-offset> <base encoding>
+ // # base is the nominal target function of thunk
+ // <special-name> ::= Tc <call-offset> <call-offset> <base encoding>
+ // # base is the nominal target function of thunk
+ // # first call-offset is 'this' adjustment
+ // # second call-offset is result adjustment
+
+ assert(!isa<CXXDestructorDecl>(MD) &&
+ "Use mangleCXXDtor for destructor decls!");
+ CXXNameMangler Mangler(*this, Out);
+ Mangler.getStream() << "_ZT";
+ if (!Thunk.Return.isEmpty())
+ Mangler.getStream() << 'c';
+
+ // Mangle the 'this' pointer adjustment.
+ Mangler.mangleCallOffset(Thunk.This.NonVirtual, Thunk.This.VCallOffsetOffset);
+
+ // Mangle the return pointer adjustment if there is one.
+ if (!Thunk.Return.isEmpty())
+ Mangler.mangleCallOffset(Thunk.Return.NonVirtual,
+ Thunk.Return.VBaseOffsetOffset);
+
+ Mangler.mangleFunctionEncoding(MD);
+}
+
+void
+ItaniumMangleContext::mangleCXXDtorThunk(const CXXDestructorDecl *DD,
+ CXXDtorType Type,
+ const ThisAdjustment &ThisAdjustment,
+ raw_ostream &Out) {
+ // <special-name> ::= T <call-offset> <base encoding>
+ // # base is the nominal target function of thunk
+ CXXNameMangler Mangler(*this, Out, DD, Type);
+ Mangler.getStream() << "_ZT";
+
+ // Mangle the 'this' pointer adjustment.
+ Mangler.mangleCallOffset(ThisAdjustment.NonVirtual,
+ ThisAdjustment.VCallOffsetOffset);
+
+ Mangler.mangleFunctionEncoding(DD);
+}
+
+/// mangleGuardVariable - Returns the mangled name for a guard variable
+/// for the passed in VarDecl.
+void ItaniumMangleContext::mangleItaniumGuardVariable(const VarDecl *D,
+ raw_ostream &Out) {
+ // <special-name> ::= GV <object name> # Guard variable for one-time
+ // # initialization
+ CXXNameMangler Mangler(*this, Out);
+ Mangler.getStream() << "_ZGV";
+ Mangler.mangleName(D);
+}
+
+void ItaniumMangleContext::mangleReferenceTemporary(const VarDecl *D,
+ raw_ostream &Out) {
+ // We match the GCC mangling here.
+ // <special-name> ::= GR <object name>
+ CXXNameMangler Mangler(*this, Out);
+ Mangler.getStream() << "_ZGR";
+ Mangler.mangleName(D);
+}
+
+void ItaniumMangleContext::mangleCXXVTable(const CXXRecordDecl *RD,
+ raw_ostream &Out) {
+ // <special-name> ::= TV <type> # virtual table
+ CXXNameMangler Mangler(*this, Out);
+ Mangler.getStream() << "_ZTV";
+ Mangler.mangleNameOrStandardSubstitution(RD);
+}
+
+void ItaniumMangleContext::mangleCXXVTT(const CXXRecordDecl *RD,
+ raw_ostream &Out) {
+ // <special-name> ::= TT <type> # VTT structure
+ CXXNameMangler Mangler(*this, Out);
+ Mangler.getStream() << "_ZTT";
+ Mangler.mangleNameOrStandardSubstitution(RD);
+}
+
+void ItaniumMangleContext::mangleCXXCtorVTable(const CXXRecordDecl *RD,
+ int64_t Offset,
+ const CXXRecordDecl *Type,
+ raw_ostream &Out) {
+ // <special-name> ::= TC <type> <offset number> _ <base type>
+ CXXNameMangler Mangler(*this, Out);
+ Mangler.getStream() << "_ZTC";
+ Mangler.mangleNameOrStandardSubstitution(RD);
+ Mangler.getStream() << Offset;
+ Mangler.getStream() << '_';
+ Mangler.mangleNameOrStandardSubstitution(Type);
+}
+
+void ItaniumMangleContext::mangleCXXRTTI(QualType Ty,
+ raw_ostream &Out) {
+ // <special-name> ::= TI <type> # typeinfo structure
+ assert(!Ty.hasQualifiers() && "RTTI info cannot have top-level qualifiers");
+ CXXNameMangler Mangler(*this, Out);
+ Mangler.getStream() << "_ZTI";
+ Mangler.mangleType(Ty);
+}
+
+void ItaniumMangleContext::mangleCXXRTTIName(QualType Ty,
+ raw_ostream &Out) {
+ // <special-name> ::= TS <type> # typeinfo name (null terminated byte string)
+ CXXNameMangler Mangler(*this, Out);
+ Mangler.getStream() << "_ZTS";
+ Mangler.mangleType(Ty);
+}
+
+MangleContext *clang::createItaniumMangleContext(ASTContext &Context,
+ DiagnosticsEngine &Diags) {
+ return new ItaniumMangleContext(Context, Diags);
+}
diff --git a/clang/lib/AST/LambdaMangleContext.cpp b/clang/lib/AST/LambdaMangleContext.cpp
new file mode 100644
index 0000000..f5272a7
--- /dev/null
+++ b/clang/lib/AST/LambdaMangleContext.cpp
@@ -0,0 +1,30 @@
+//===--- LambdaMangleContext.cpp - Context for mangling lambdas -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the LambdaMangleContext class, which keeps track of
+// the Itanium C++ ABI mangling numbers for lambda expressions.
+//
+//===----------------------------------------------------------------------===//
+#include "clang/AST/LambdaMangleContext.h"
+#include "clang/AST/DeclCXX.h"
+
+using namespace clang;
+
+unsigned LambdaMangleContext::getManglingNumber(CXXMethodDecl *CallOperator) {
+ const FunctionProtoType *Proto
+ = CallOperator->getType()->getAs<FunctionProtoType>();
+ ASTContext &Context = CallOperator->getASTContext();
+
+ QualType Key = Context.getFunctionType(Context.VoidTy,
+ Proto->arg_type_begin(),
+ Proto->getNumArgs(),
+ FunctionProtoType::ExtProtoInfo());
+ Key = Context.getCanonicalType(Key);
+ return ++ManglingNumbers[Key->castAs<FunctionProtoType>()];
+}
diff --git a/clang/lib/AST/Makefile b/clang/lib/AST/Makefile
new file mode 100644
index 0000000..65383c5
--- /dev/null
+++ b/clang/lib/AST/Makefile
@@ -0,0 +1,18 @@
+##===- clang/lib/AST/Makefile ------------------------------*- Makefile -*-===##
+#
+# The LLVM Compiler Infrastructure
+#
+# This file is distributed under the University of Illinois Open Source
+# License. See LICENSE.TXT for details.
+#
+##===----------------------------------------------------------------------===##
+#
+# This implements the AST library for the C-Language front-end.
+#
+##===----------------------------------------------------------------------===##
+
+CLANG_LEVEL := ../..
+LIBRARYNAME := clangAST
+
+include $(CLANG_LEVEL)/Makefile
+
diff --git a/clang/lib/AST/Mangle.cpp b/clang/lib/AST/Mangle.cpp
new file mode 100644
index 0000000..73c9f57
--- /dev/null
+++ b/clang/lib/AST/Mangle.cpp
@@ -0,0 +1,142 @@
+//===--- Mangle.cpp - Mangle C++ Names --------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Implements generic name mangling support for blocks and Objective-C.
+//
+//===----------------------------------------------------------------------===//
+#include "clang/AST/Mangle.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/Basic/ABI.h"
+#include "clang/Basic/SourceManager.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/ErrorHandling.h"
+
+#define MANGLE_CHECKER 0
+
+#if MANGLE_CHECKER
+#include <cxxabi.h>
+#endif
+
+using namespace clang;
+
+// FIXME: For blocks we currently mimic GCC's mangling scheme, which leaves
+// much to be desired. Come up with a better mangling scheme.
+
+namespace {
+
+static void mangleFunctionBlock(MangleContext &Context,
+ StringRef Outer,
+ const BlockDecl *BD,
+ raw_ostream &Out) {
+ Out << "__" << Outer << "_block_invoke_" << Context.getBlockId(BD, true);
+}
+
+static void checkMangleDC(const DeclContext *DC, const BlockDecl *BD) {
+#ifndef NDEBUG
+ const DeclContext *ExpectedDC = BD->getDeclContext();
+ while (isa<BlockDecl>(ExpectedDC) || isa<EnumDecl>(ExpectedDC))
+ ExpectedDC = ExpectedDC->getParent();
+ // In-class initializers for non-static data members are lexically defined
+ // within the class, but are mangled as if they were specified as constructor
+ // member initializers.
+ if (isa<CXXRecordDecl>(ExpectedDC) && DC != ExpectedDC)
+ DC = DC->getParent();
+ assert(DC == ExpectedDC && "Given decl context did not match expected!");
+#endif
+}
+
+}
+
+void MangleContext::anchor() { }
+
+void MangleContext::mangleGlobalBlock(const BlockDecl *BD,
+ raw_ostream &Out) {
+ Out << "__block_global_" << getBlockId(BD, false);
+}
+
+void MangleContext::mangleCtorBlock(const CXXConstructorDecl *CD,
+ CXXCtorType CT, const BlockDecl *BD,
+ raw_ostream &ResStream) {
+ checkMangleDC(CD, BD);
+ SmallString<64> Buffer;
+ llvm::raw_svector_ostream Out(Buffer);
+ mangleCXXCtor(CD, CT, Out);
+ Out.flush();
+ mangleFunctionBlock(*this, Buffer, BD, ResStream);
+}
+
+void MangleContext::mangleDtorBlock(const CXXDestructorDecl *DD,
+ CXXDtorType DT, const BlockDecl *BD,
+ raw_ostream &ResStream) {
+ checkMangleDC(DD, BD);
+ SmallString<64> Buffer;
+ llvm::raw_svector_ostream Out(Buffer);
+ mangleCXXDtor(DD, DT, Out);
+ Out.flush();
+ mangleFunctionBlock(*this, Buffer, BD, ResStream);
+}
+
+void MangleContext::mangleBlock(const DeclContext *DC, const BlockDecl *BD,
+ raw_ostream &Out) {
+ assert(!isa<CXXConstructorDecl>(DC) && !isa<CXXDestructorDecl>(DC));
+ checkMangleDC(DC, BD);
+
+ SmallString<64> Buffer;
+ llvm::raw_svector_ostream Stream(Buffer);
+ if (const ObjCMethodDecl *Method = dyn_cast<ObjCMethodDecl>(DC)) {
+ mangleObjCMethodName(Method, Stream);
+ } else {
+ const NamedDecl *ND = cast<NamedDecl>(DC);
+ if (IdentifierInfo *II = ND->getIdentifier())
+ Stream << II->getName();
+ else {
+ // FIXME: We were doing a mangleUnqualifiedName() before, but that's
+ // a private member of a class that will soon itself be private to the
+ // Itanium C++ ABI object. What should we do now? Right now, I'm just
+ // calling the mangleName() method on the MangleContext; is there a
+ // better way?
+ mangleName(ND, Stream);
+ }
+ }
+ Stream.flush();
+ mangleFunctionBlock(*this, Buffer, BD, Out);
+}
+
+void MangleContext::mangleObjCMethodName(const ObjCMethodDecl *MD,
+ raw_ostream &Out) {
+ SmallString<64> Name;
+ llvm::raw_svector_ostream OS(Name);
+
+ const ObjCContainerDecl *CD =
+ dyn_cast<ObjCContainerDecl>(MD->getDeclContext());
+ assert (CD && "Missing container decl in GetNameForMethod");
+ OS << (MD->isInstanceMethod() ? '-' : '+') << '[' << CD->getName();
+ if (const ObjCCategoryImplDecl *CID = dyn_cast<ObjCCategoryImplDecl>(CD))
+ OS << '(' << *CID << ')';
+ OS << ' ' << MD->getSelector().getAsString() << ']';
+
+ Out << OS.str().size() << OS.str();
+}
+
+void MangleContext::mangleBlock(const BlockDecl *BD,
+ raw_ostream &Out) {
+ const DeclContext *DC = BD->getDeclContext();
+ while (isa<BlockDecl>(DC) || isa<EnumDecl>(DC))
+ DC = DC->getParent();
+ if (DC->isFunctionOrMethod())
+ mangleBlock(DC, BD, Out);
+ else
+ mangleGlobalBlock(BD, Out);
+}
diff --git a/clang/lib/AST/MicrosoftCXXABI.cpp b/clang/lib/AST/MicrosoftCXXABI.cpp
new file mode 100644
index 0000000..f33d6fe
--- /dev/null
+++ b/clang/lib/AST/MicrosoftCXXABI.cpp
@@ -0,0 +1,71 @@
+//===------- MicrosoftCXXABI.cpp - AST support for the Microsoft C++ ABI --===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This provides C++ AST support targeting the Microsoft Visual C++
+// ABI.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CXXABI.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/RecordLayout.h"
+#include "clang/AST/Type.h"
+#include "clang/Basic/TargetInfo.h"
+
+using namespace clang;
+
+namespace {
+class MicrosoftCXXABI : public CXXABI {
+ ASTContext &Context;
+public:
+ MicrosoftCXXABI(ASTContext &Ctx) : Context(Ctx) { }
+
+ unsigned getMemberPointerSize(const MemberPointerType *MPT) const;
+
+ CallingConv getDefaultMethodCallConv() const {
+ if (Context.getTargetInfo().getTriple().getArch() == llvm::Triple::x86)
+ return CC_X86ThisCall;
+ else
+ return CC_C;
+ }
+
+ bool isNearlyEmpty(const CXXRecordDecl *RD) const {
+ // FIXME: Audit the corners
+ if (!RD->isDynamicClass())
+ return false;
+
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
+
+ // In the Microsoft ABI, classes can have one or two vtable pointers.
+ CharUnits PointerSize =
+ Context.toCharUnitsFromBits(Context.getTargetInfo().getPointerWidth(0));
+ return Layout.getNonVirtualSize() == PointerSize ||
+ Layout.getNonVirtualSize() == PointerSize * 2;
+ }
+};
+}
+
+unsigned MicrosoftCXXABI::getMemberPointerSize(const MemberPointerType *MPT) const {
+ QualType Pointee = MPT->getPointeeType();
+ CXXRecordDecl *RD = MPT->getClass()->getAsCXXRecordDecl();
+ if (RD->getNumVBases() > 0) {
+ if (Pointee->isFunctionType())
+ return 3;
+ else
+ return 2;
+ } else if (RD->getNumBases() > 1 && Pointee->isFunctionType())
+ return 2;
+ return 1;
+}
+
+CXXABI *clang::CreateMicrosoftCXXABI(ASTContext &Ctx) {
+ return new MicrosoftCXXABI(Ctx);
+}
+
diff --git a/clang/lib/AST/MicrosoftMangle.cpp b/clang/lib/AST/MicrosoftMangle.cpp
new file mode 100644
index 0000000..ba9856a
--- /dev/null
+++ b/clang/lib/AST/MicrosoftMangle.cpp
@@ -0,0 +1,1191 @@
+//===--- MicrosoftMangle.cpp - Microsoft Visual C++ Name Mangling ---------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This provides C++ name mangling targeting the Microsoft Visual C++ ABI.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/Mangle.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/CharUnits.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/Basic/ABI.h"
+
+using namespace clang;
+
+namespace {
+
+/// MicrosoftCXXNameMangler - Manage the mangling of a single name for the
+/// Microsoft Visual C++ ABI.
+class MicrosoftCXXNameMangler {
+ MangleContext &Context;
+ raw_ostream &Out;
+
+ ASTContext &getASTContext() const { return Context.getASTContext(); }
+
+public:
+ MicrosoftCXXNameMangler(MangleContext &C, raw_ostream &Out_)
+ : Context(C), Out(Out_) { }
+
+ void mangle(const NamedDecl *D, StringRef Prefix = "?");
+ void mangleName(const NamedDecl *ND);
+ void mangleFunctionEncoding(const FunctionDecl *FD);
+ void mangleVariableEncoding(const VarDecl *VD);
+ void mangleNumber(int64_t Number);
+ void mangleType(QualType T);
+
+private:
+ void mangleUnqualifiedName(const NamedDecl *ND) {
+ mangleUnqualifiedName(ND, ND->getDeclName());
+ }
+ void mangleUnqualifiedName(const NamedDecl *ND, DeclarationName Name);
+ void mangleSourceName(const IdentifierInfo *II);
+ void manglePostfix(const DeclContext *DC, bool NoFunction=false);
+ void mangleOperatorName(OverloadedOperatorKind OO);
+ void mangleQualifiers(Qualifiers Quals, bool IsMember);
+
+ void mangleObjCMethodName(const ObjCMethodDecl *MD);
+
+ // Declare manglers for every type class.
+#define ABSTRACT_TYPE(CLASS, PARENT)
+#define NON_CANONICAL_TYPE(CLASS, PARENT)
+#define TYPE(CLASS, PARENT) void mangleType(const CLASS##Type *T);
+#include "clang/AST/TypeNodes.def"
+
+ void mangleType(const TagType*);
+ void mangleType(const FunctionType *T, const FunctionDecl *D,
+ bool IsStructor, bool IsInstMethod);
+ void mangleType(const ArrayType *T, bool IsGlobal);
+ void mangleExtraDimensions(QualType T);
+ void mangleFunctionClass(const FunctionDecl *FD);
+ void mangleCallingConvention(const FunctionType *T, bool IsInstMethod = false);
+ void mangleThrowSpecification(const FunctionProtoType *T);
+
+};
+
+/// MicrosoftMangleContext - Overrides the default MangleContext for the
+/// Microsoft Visual C++ ABI.
+class MicrosoftMangleContext : public MangleContext {
+public:
+ MicrosoftMangleContext(ASTContext &Context,
+ DiagnosticsEngine &Diags) : MangleContext(Context, Diags) { }
+ virtual bool shouldMangleDeclName(const NamedDecl *D);
+ virtual void mangleName(const NamedDecl *D, raw_ostream &Out);
+ virtual void mangleThunk(const CXXMethodDecl *MD,
+ const ThunkInfo &Thunk,
+ raw_ostream &);
+ virtual void mangleCXXDtorThunk(const CXXDestructorDecl *DD, CXXDtorType Type,
+ const ThisAdjustment &ThisAdjustment,
+ raw_ostream &);
+ virtual void mangleCXXVTable(const CXXRecordDecl *RD,
+ raw_ostream &);
+ virtual void mangleCXXVTT(const CXXRecordDecl *RD,
+ raw_ostream &);
+ virtual void mangleCXXCtorVTable(const CXXRecordDecl *RD, int64_t Offset,
+ const CXXRecordDecl *Type,
+ raw_ostream &);
+ virtual void mangleCXXRTTI(QualType T, raw_ostream &);
+ virtual void mangleCXXRTTIName(QualType T, raw_ostream &);
+ virtual void mangleCXXCtor(const CXXConstructorDecl *D, CXXCtorType Type,
+ raw_ostream &);
+ virtual void mangleCXXDtor(const CXXDestructorDecl *D, CXXDtorType Type,
+ raw_ostream &);
+ virtual void mangleReferenceTemporary(const clang::VarDecl *,
+ raw_ostream &);
+};
+
+}
+
+static bool isInCLinkageSpecification(const Decl *D) {
+ D = D->getCanonicalDecl();
+ for (const DeclContext *DC = D->getDeclContext();
+ !DC->isTranslationUnit(); DC = DC->getParent()) {
+ if (const LinkageSpecDecl *Linkage = dyn_cast<LinkageSpecDecl>(DC))
+ return Linkage->getLanguage() == LinkageSpecDecl::lang_c;
+ }
+
+ return false;
+}
+
+bool MicrosoftMangleContext::shouldMangleDeclName(const NamedDecl *D) {
+ // In C, functions with no attributes never need to be mangled. Fastpath them.
+ if (!getASTContext().getLangOpts().CPlusPlus && !D->hasAttrs())
+ return false;
+
+ // Any decl can be declared with __asm("foo") on it, and this takes precedence
+ // over all other naming in the .o file.
+ if (D->hasAttr<AsmLabelAttr>())
+ return true;
+
+ // Clang's "overloadable" attribute extension to C/C++ implies name mangling
+ // (always) as does passing a C++ member function and a function
+ // whose name is not a simple identifier.
+ const FunctionDecl *FD = dyn_cast<FunctionDecl>(D);
+ if (FD && (FD->hasAttr<OverloadableAttr>() || isa<CXXMethodDecl>(FD) ||
+ !FD->getDeclName().isIdentifier()))
+ return true;
+
+ // Otherwise, no mangling is done outside C++ mode.
+ if (!getASTContext().getLangOpts().CPlusPlus)
+ return false;
+
+ // Variables at global scope with internal linkage are not mangled.
+ if (!FD) {
+ const DeclContext *DC = D->getDeclContext();
+ if (DC->isTranslationUnit() && D->getLinkage() == InternalLinkage)
+ return false;
+ }
+
+ // C functions and "main" are not mangled.
+ if ((FD && FD->isMain()) || isInCLinkageSpecification(D))
+ return false;
+
+ return true;
+}
+
+void MicrosoftCXXNameMangler::mangle(const NamedDecl *D,
+ StringRef Prefix) {
+ // MSVC doesn't mangle C++ names the same way it mangles extern "C" names.
+ // Therefore it's really important that we don't decorate the
+ // name with leading underscores or leading/trailing at signs. So, emit a
+ // asm marker at the start so we get the name right.
+ Out << '\01'; // LLVM IR Marker for __asm("foo")
+
+ // Any decl can be declared with __asm("foo") on it, and this takes precedence
+ // over all other naming in the .o file.
+ if (const AsmLabelAttr *ALA = D->getAttr<AsmLabelAttr>()) {
+ // If we have an asm name, then we use it as the mangling.
+ Out << ALA->getLabel();
+ return;
+ }
+
+ // <mangled-name> ::= ? <name> <type-encoding>
+ Out << Prefix;
+ mangleName(D);
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D))
+ mangleFunctionEncoding(FD);
+ else if (const VarDecl *VD = dyn_cast<VarDecl>(D))
+ mangleVariableEncoding(VD);
+ // TODO: Fields? Can MSVC even mangle them?
+}
+
+void MicrosoftCXXNameMangler::mangleFunctionEncoding(const FunctionDecl *FD) {
+ // <type-encoding> ::= <function-class> <function-type>
+
+ // Don't mangle in the type if this isn't a decl we should typically mangle.
+ if (!Context.shouldMangleDeclName(FD))
+ return;
+
+ // We should never ever see a FunctionNoProtoType at this point.
+ // We don't even know how to mangle their types anyway :).
+ const FunctionProtoType *FT = cast<FunctionProtoType>(FD->getType());
+
+ bool InStructor = false, InInstMethod = false;
+ const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD);
+ if (MD) {
+ if (MD->isInstance())
+ InInstMethod = true;
+ if (isa<CXXConstructorDecl>(MD) || isa<CXXDestructorDecl>(MD))
+ InStructor = true;
+ }
+
+ // First, the function class.
+ mangleFunctionClass(FD);
+
+ mangleType(FT, FD, InStructor, InInstMethod);
+}
+
+void MicrosoftCXXNameMangler::mangleVariableEncoding(const VarDecl *VD) {
+ // <type-encoding> ::= <storage-class> <variable-type>
+ // <storage-class> ::= 0 # private static member
+ // ::= 1 # protected static member
+ // ::= 2 # public static member
+ // ::= 3 # global
+ // ::= 4 # static local
+
+ // The first character in the encoding (after the name) is the storage class.
+ if (VD->isStaticDataMember()) {
+ // If it's a static member, it also encodes the access level.
+ switch (VD->getAccess()) {
+ default:
+ case AS_private: Out << '0'; break;
+ case AS_protected: Out << '1'; break;
+ case AS_public: Out << '2'; break;
+ }
+ }
+ else if (!VD->isStaticLocal())
+ Out << '3';
+ else
+ Out << '4';
+ // Now mangle the type.
+ // <variable-type> ::= <type> <cvr-qualifiers>
+ // ::= <type> A # pointers, references, arrays
+ // Pointers and references are odd. The type of 'int * const foo;' gets
+ // mangled as 'QAHA' instead of 'PAHB', for example.
+ QualType Ty = VD->getType();
+ if (Ty->isPointerType() || Ty->isReferenceType()) {
+ mangleType(Ty);
+ Out << 'A';
+ } else if (Ty->isArrayType()) {
+ // Global arrays are funny, too.
+ mangleType(cast<ArrayType>(Ty.getTypePtr()), true);
+ Out << 'A';
+ } else {
+ mangleType(Ty.getLocalUnqualifiedType());
+ mangleQualifiers(Ty.getLocalQualifiers(), false);
+ }
+}
+
+void MicrosoftCXXNameMangler::mangleName(const NamedDecl *ND) {
+ // <name> ::= <unscoped-name> {[<named-scope>]+ | [<nested-name>]}? @
+ const DeclContext *DC = ND->getDeclContext();
+
+ // Always start with the unqualified name.
+ mangleUnqualifiedName(ND);
+
+ // If this is an extern variable declared locally, the relevant DeclContext
+ // is that of the containing namespace, or the translation unit.
+ if (isa<FunctionDecl>(DC) && ND->hasLinkage())
+ while (!DC->isNamespace() && !DC->isTranslationUnit())
+ DC = DC->getParent();
+
+ manglePostfix(DC);
+
+ // Terminate the whole name with an '@'.
+ Out << '@';
+}
+
+void MicrosoftCXXNameMangler::mangleNumber(int64_t Number) {
+ // <number> ::= [?] <decimal digit> # <= 9
+ // ::= [?] <hex digit>+ @ # > 9; A = 0, B = 1, etc...
+ if (Number < 0) {
+ Out << '?';
+ Number = -Number;
+ }
+ if (Number >= 1 && Number <= 10) {
+ Out << Number-1;
+ } else {
+ // We have to build up the encoding in reverse order, so it will come
+ // out right when we write it out.
+ char Encoding[16];
+ char *EndPtr = Encoding+sizeof(Encoding);
+ char *CurPtr = EndPtr;
+ while (Number) {
+ *--CurPtr = 'A' + (Number % 16);
+ Number /= 16;
+ }
+ Out.write(CurPtr, EndPtr-CurPtr);
+ Out << '@';
+ }
+}
+
+void
+MicrosoftCXXNameMangler::mangleUnqualifiedName(const NamedDecl *ND,
+ DeclarationName Name) {
+ // <unqualified-name> ::= <operator-name>
+ // ::= <ctor-dtor-name>
+ // ::= <source-name>
+ switch (Name.getNameKind()) {
+ case DeclarationName::Identifier: {
+ if (const IdentifierInfo *II = Name.getAsIdentifierInfo()) {
+ mangleSourceName(II);
+ break;
+ }
+
+ // Otherwise, an anonymous entity. We must have a declaration.
+ assert(ND && "mangling empty name without declaration");
+
+ if (const NamespaceDecl *NS = dyn_cast<NamespaceDecl>(ND)) {
+ if (NS->isAnonymousNamespace()) {
+ Out << "?A";
+ break;
+ }
+ }
+
+ // We must have an anonymous struct.
+ const TagDecl *TD = cast<TagDecl>(ND);
+ if (const TypedefNameDecl *D = TD->getTypedefNameForAnonDecl()) {
+ assert(TD->getDeclContext() == D->getDeclContext() &&
+ "Typedef should not be in another decl context!");
+ assert(D->getDeclName().getAsIdentifierInfo() &&
+ "Typedef was not named!");
+ mangleSourceName(D->getDeclName().getAsIdentifierInfo());
+ break;
+ }
+
+ // When VC encounters an anonymous type with no tag and no typedef,
+ // it literally emits '<unnamed-tag>'.
+ Out << "<unnamed-tag>";
+ break;
+ }
+
+ case DeclarationName::ObjCZeroArgSelector:
+ case DeclarationName::ObjCOneArgSelector:
+ case DeclarationName::ObjCMultiArgSelector:
+ llvm_unreachable("Can't mangle Objective-C selector names here!");
+
+ case DeclarationName::CXXConstructorName:
+ Out << "?0";
+ break;
+
+ case DeclarationName::CXXDestructorName:
+ Out << "?1";
+ break;
+
+ case DeclarationName::CXXConversionFunctionName:
+ // <operator-name> ::= ?B # (cast)
+ // The target type is encoded as the return type.
+ Out << "?B";
+ break;
+
+ case DeclarationName::CXXOperatorName:
+ mangleOperatorName(Name.getCXXOverloadedOperator());
+ break;
+
+ case DeclarationName::CXXLiteralOperatorName:
+ // FIXME: Was this added in VS2010? Does MS even know how to mangle this?
+ llvm_unreachable("Don't know how to mangle literal operators yet!");
+
+ case DeclarationName::CXXUsingDirective:
+ llvm_unreachable("Can't mangle a using directive name!");
+ }
+}
+
+void MicrosoftCXXNameMangler::manglePostfix(const DeclContext *DC,
+ bool NoFunction) {
+ // <postfix> ::= <unqualified-name> [<postfix>]
+ // ::= <template-postfix> <template-args> [<postfix>]
+ // ::= <template-param>
+ // ::= <substitution> [<postfix>]
+
+ if (!DC) return;
+
+ while (isa<LinkageSpecDecl>(DC))
+ DC = DC->getParent();
+
+ if (DC->isTranslationUnit())
+ return;
+
+ if (const BlockDecl *BD = dyn_cast<BlockDecl>(DC)) {
+ Context.mangleBlock(BD, Out);
+ Out << '@';
+ return manglePostfix(DC->getParent(), NoFunction);
+ }
+
+ if (NoFunction && (isa<FunctionDecl>(DC) || isa<ObjCMethodDecl>(DC)))
+ return;
+ else if (const ObjCMethodDecl *Method = dyn_cast<ObjCMethodDecl>(DC))
+ mangleObjCMethodName(Method);
+ else {
+ mangleUnqualifiedName(cast<NamedDecl>(DC));
+ manglePostfix(DC->getParent(), NoFunction);
+ }
+}
+
+void MicrosoftCXXNameMangler::mangleOperatorName(OverloadedOperatorKind OO) {
+ switch (OO) {
+ // ?0 # constructor
+ // ?1 # destructor
+ // <operator-name> ::= ?2 # new
+ case OO_New: Out << "?2"; break;
+ // <operator-name> ::= ?3 # delete
+ case OO_Delete: Out << "?3"; break;
+ // <operator-name> ::= ?4 # =
+ case OO_Equal: Out << "?4"; break;
+ // <operator-name> ::= ?5 # >>
+ case OO_GreaterGreater: Out << "?5"; break;
+ // <operator-name> ::= ?6 # <<
+ case OO_LessLess: Out << "?6"; break;
+ // <operator-name> ::= ?7 # !
+ case OO_Exclaim: Out << "?7"; break;
+ // <operator-name> ::= ?8 # ==
+ case OO_EqualEqual: Out << "?8"; break;
+ // <operator-name> ::= ?9 # !=
+ case OO_ExclaimEqual: Out << "?9"; break;
+ // <operator-name> ::= ?A # []
+ case OO_Subscript: Out << "?A"; break;
+ // ?B # conversion
+ // <operator-name> ::= ?C # ->
+ case OO_Arrow: Out << "?C"; break;
+ // <operator-name> ::= ?D # *
+ case OO_Star: Out << "?D"; break;
+ // <operator-name> ::= ?E # ++
+ case OO_PlusPlus: Out << "?E"; break;
+ // <operator-name> ::= ?F # --
+ case OO_MinusMinus: Out << "?F"; break;
+ // <operator-name> ::= ?G # -
+ case OO_Minus: Out << "?G"; break;
+ // <operator-name> ::= ?H # +
+ case OO_Plus: Out << "?H"; break;
+ // <operator-name> ::= ?I # &
+ case OO_Amp: Out << "?I"; break;
+ // <operator-name> ::= ?J # ->*
+ case OO_ArrowStar: Out << "?J"; break;
+ // <operator-name> ::= ?K # /
+ case OO_Slash: Out << "?K"; break;
+ // <operator-name> ::= ?L # %
+ case OO_Percent: Out << "?L"; break;
+ // <operator-name> ::= ?M # <
+ case OO_Less: Out << "?M"; break;
+ // <operator-name> ::= ?N # <=
+ case OO_LessEqual: Out << "?N"; break;
+ // <operator-name> ::= ?O # >
+ case OO_Greater: Out << "?O"; break;
+ // <operator-name> ::= ?P # >=
+ case OO_GreaterEqual: Out << "?P"; break;
+ // <operator-name> ::= ?Q # ,
+ case OO_Comma: Out << "?Q"; break;
+ // <operator-name> ::= ?R # ()
+ case OO_Call: Out << "?R"; break;
+ // <operator-name> ::= ?S # ~
+ case OO_Tilde: Out << "?S"; break;
+ // <operator-name> ::= ?T # ^
+ case OO_Caret: Out << "?T"; break;
+ // <operator-name> ::= ?U # |
+ case OO_Pipe: Out << "?U"; break;
+ // <operator-name> ::= ?V # &&
+ case OO_AmpAmp: Out << "?V"; break;
+ // <operator-name> ::= ?W # ||
+ case OO_PipePipe: Out << "?W"; break;
+ // <operator-name> ::= ?X # *=
+ case OO_StarEqual: Out << "?X"; break;
+ // <operator-name> ::= ?Y # +=
+ case OO_PlusEqual: Out << "?Y"; break;
+ // <operator-name> ::= ?Z # -=
+ case OO_MinusEqual: Out << "?Z"; break;
+ // <operator-name> ::= ?_0 # /=
+ case OO_SlashEqual: Out << "?_0"; break;
+ // <operator-name> ::= ?_1 # %=
+ case OO_PercentEqual: Out << "?_1"; break;
+ // <operator-name> ::= ?_2 # >>=
+ case OO_GreaterGreaterEqual: Out << "?_2"; break;
+ // <operator-name> ::= ?_3 # <<=
+ case OO_LessLessEqual: Out << "?_3"; break;
+ // <operator-name> ::= ?_4 # &=
+ case OO_AmpEqual: Out << "?_4"; break;
+ // <operator-name> ::= ?_5 # |=
+ case OO_PipeEqual: Out << "?_5"; break;
+ // <operator-name> ::= ?_6 # ^=
+ case OO_CaretEqual: Out << "?_6"; break;
+ // ?_7 # vftable
+ // ?_8 # vbtable
+ // ?_9 # vcall
+ // ?_A # typeof
+ // ?_B # local static guard
+ // ?_C # string
+ // ?_D # vbase destructor
+ // ?_E # vector deleting destructor
+ // ?_F # default constructor closure
+ // ?_G # scalar deleting destructor
+ // ?_H # vector constructor iterator
+ // ?_I # vector destructor iterator
+ // ?_J # vector vbase constructor iterator
+ // ?_K # virtual displacement map
+ // ?_L # eh vector constructor iterator
+ // ?_M # eh vector destructor iterator
+ // ?_N # eh vector vbase constructor iterator
+ // ?_O # copy constructor closure
+ // ?_P<name> # udt returning <name>
+ // ?_Q # <unknown>
+ // ?_R0 # RTTI Type Descriptor
+ // ?_R1 # RTTI Base Class Descriptor at (a,b,c,d)
+ // ?_R2 # RTTI Base Class Array
+ // ?_R3 # RTTI Class Hierarchy Descriptor
+ // ?_R4 # RTTI Complete Object Locator
+ // ?_S # local vftable
+ // ?_T # local vftable constructor closure
+ // <operator-name> ::= ?_U # new[]
+ case OO_Array_New: Out << "?_U"; break;
+ // <operator-name> ::= ?_V # delete[]
+ case OO_Array_Delete: Out << "?_V"; break;
+
+ case OO_Conditional:
+ llvm_unreachable("Don't know how to mangle ?:");
+
+ case OO_None:
+ case NUM_OVERLOADED_OPERATORS:
+ llvm_unreachable("Not an overloaded operator");
+ }
+}
+
+void MicrosoftCXXNameMangler::mangleSourceName(const IdentifierInfo *II) {
+ // <source name> ::= <identifier> @
+ Out << II->getName() << '@';
+}
+
+void MicrosoftCXXNameMangler::mangleObjCMethodName(const ObjCMethodDecl *MD) {
+ Context.mangleObjCMethodName(MD, Out);
+}
+
+void MicrosoftCXXNameMangler::mangleQualifiers(Qualifiers Quals,
+ bool IsMember) {
+ // <cvr-qualifiers> ::= [E] [F] [I] <base-cvr-qualifiers>
+ // 'E' means __ptr64 (32-bit only); 'F' means __unaligned (32/64-bit only);
+ // 'I' means __restrict (32/64-bit).
+ // Note that the MSVC __restrict keyword isn't the same as the C99 restrict
+ // keyword!
+ // <base-cvr-qualifiers> ::= A # near
+ // ::= B # near const
+ // ::= C # near volatile
+ // ::= D # near const volatile
+ // ::= E # far (16-bit)
+ // ::= F # far const (16-bit)
+ // ::= G # far volatile (16-bit)
+ // ::= H # far const volatile (16-bit)
+ // ::= I # huge (16-bit)
+ // ::= J # huge const (16-bit)
+ // ::= K # huge volatile (16-bit)
+ // ::= L # huge const volatile (16-bit)
+ // ::= M <basis> # based
+ // ::= N <basis> # based const
+ // ::= O <basis> # based volatile
+ // ::= P <basis> # based const volatile
+ // ::= Q # near member
+ // ::= R # near const member
+ // ::= S # near volatile member
+ // ::= T # near const volatile member
+ // ::= U # far member (16-bit)
+ // ::= V # far const member (16-bit)
+ // ::= W # far volatile member (16-bit)
+ // ::= X # far const volatile member (16-bit)
+ // ::= Y # huge member (16-bit)
+ // ::= Z # huge const member (16-bit)
+ // ::= 0 # huge volatile member (16-bit)
+ // ::= 1 # huge const volatile member (16-bit)
+ // ::= 2 <basis> # based member
+ // ::= 3 <basis> # based const member
+ // ::= 4 <basis> # based volatile member
+ // ::= 5 <basis> # based const volatile member
+ // ::= 6 # near function (pointers only)
+ // ::= 7 # far function (pointers only)
+ // ::= 8 # near method (pointers only)
+ // ::= 9 # far method (pointers only)
+ // ::= _A <basis> # based function (pointers only)
+ // ::= _B <basis> # based function (far?) (pointers only)
+ // ::= _C <basis> # based method (pointers only)
+ // ::= _D <basis> # based method (far?) (pointers only)
+ // ::= _E # block (Clang)
+ // <basis> ::= 0 # __based(void)
+ // ::= 1 # __based(segment)?
+ // ::= 2 <name> # __based(name)
+ // ::= 3 # ?
+ // ::= 4 # ?
+ // ::= 5 # not really based
+ if (!IsMember) {
+ if (!Quals.hasVolatile()) {
+ if (!Quals.hasConst())
+ Out << 'A';
+ else
+ Out << 'B';
+ } else {
+ if (!Quals.hasConst())
+ Out << 'C';
+ else
+ Out << 'D';
+ }
+ } else {
+ if (!Quals.hasVolatile()) {
+ if (!Quals.hasConst())
+ Out << 'Q';
+ else
+ Out << 'R';
+ } else {
+ if (!Quals.hasConst())
+ Out << 'S';
+ else
+ Out << 'T';
+ }
+ }
+
+ // FIXME: For now, just drop all extension qualifiers on the floor.
+}
+
+void MicrosoftCXXNameMangler::mangleType(QualType T) {
+ // Only operate on the canonical type!
+ T = getASTContext().getCanonicalType(T);
+
+ Qualifiers Quals = T.getLocalQualifiers();
+ if (Quals) {
+ // We have to mangle these now, while we still have enough information.
+ // <pointer-cvr-qualifiers> ::= P # pointer
+ // ::= Q # const pointer
+ // ::= R # volatile pointer
+ // ::= S # const volatile pointer
+ if (T->isAnyPointerType() || T->isMemberPointerType() ||
+ T->isBlockPointerType()) {
+ if (!Quals.hasVolatile())
+ Out << 'Q';
+ else {
+ if (!Quals.hasConst())
+ Out << 'R';
+ else
+ Out << 'S';
+ }
+ } else
+ // Just emit qualifiers like normal.
+ // NB: When we mangle a pointer/reference type, and the pointee
+ // type has no qualifiers, the lack of qualifier gets mangled
+ // in there.
+ mangleQualifiers(Quals, false);
+ } else if (T->isAnyPointerType() || T->isMemberPointerType() ||
+ T->isBlockPointerType()) {
+ Out << 'P';
+ }
+ switch (T->getTypeClass()) {
+#define ABSTRACT_TYPE(CLASS, PARENT)
+#define NON_CANONICAL_TYPE(CLASS, PARENT) \
+case Type::CLASS: \
+llvm_unreachable("can't mangle non-canonical type " #CLASS "Type"); \
+return;
+#define TYPE(CLASS, PARENT) \
+case Type::CLASS: \
+mangleType(static_cast<const CLASS##Type*>(T.getTypePtr())); \
+break;
+#include "clang/AST/TypeNodes.def"
+ }
+}
+
+void MicrosoftCXXNameMangler::mangleType(const BuiltinType *T) {
+ // <type> ::= <builtin-type>
+ // <builtin-type> ::= X # void
+ // ::= C # signed char
+ // ::= D # char
+ // ::= E # unsigned char
+ // ::= F # short
+ // ::= G # unsigned short (or wchar_t if it's not a builtin)
+ // ::= H # int
+ // ::= I # unsigned int
+ // ::= J # long
+ // ::= K # unsigned long
+ // L # <none>
+ // ::= M # float
+ // ::= N # double
+ // ::= O # long double (__float80 is mangled differently)
+ // ::= _J # long long, __int64
+ // ::= _K # unsigned long long, __int64
+ // ::= _L # __int128
+ // ::= _M # unsigned __int128
+ // ::= _N # bool
+ // _O # <array in parameter>
+ // ::= _T # __float80 (Intel)
+ // ::= _W # wchar_t
+ // ::= _Z # __float80 (Digital Mars)
+ switch (T->getKind()) {
+ case BuiltinType::Void: Out << 'X'; break;
+ case BuiltinType::SChar: Out << 'C'; break;
+ case BuiltinType::Char_U: case BuiltinType::Char_S: Out << 'D'; break;
+ case BuiltinType::UChar: Out << 'E'; break;
+ case BuiltinType::Short: Out << 'F'; break;
+ case BuiltinType::UShort: Out << 'G'; break;
+ case BuiltinType::Int: Out << 'H'; break;
+ case BuiltinType::UInt: Out << 'I'; break;
+ case BuiltinType::Long: Out << 'J'; break;
+ case BuiltinType::ULong: Out << 'K'; break;
+ case BuiltinType::Float: Out << 'M'; break;
+ case BuiltinType::Double: Out << 'N'; break;
+ // TODO: Determine size and mangle accordingly
+ case BuiltinType::LongDouble: Out << 'O'; break;
+ case BuiltinType::LongLong: Out << "_J"; break;
+ case BuiltinType::ULongLong: Out << "_K"; break;
+ case BuiltinType::Int128: Out << "_L"; break;
+ case BuiltinType::UInt128: Out << "_M"; break;
+ case BuiltinType::Bool: Out << "_N"; break;
+ case BuiltinType::WChar_S:
+ case BuiltinType::WChar_U: Out << "_W"; break;
+
+#define BUILTIN_TYPE(Id, SingletonId)
+#define PLACEHOLDER_TYPE(Id, SingletonId) \
+ case BuiltinType::Id:
+#include "clang/AST/BuiltinTypes.def"
+ case BuiltinType::Dependent:
+ llvm_unreachable("placeholder types shouldn't get to name mangling");
+
+ case BuiltinType::ObjCId: Out << "PAUobjc_object@@"; break;
+ case BuiltinType::ObjCClass: Out << "PAUobjc_class@@"; break;
+ case BuiltinType::ObjCSel: Out << "PAUobjc_selector@@"; break;
+
+ case BuiltinType::Char16:
+ case BuiltinType::Char32:
+ case BuiltinType::Half:
+ case BuiltinType::NullPtr:
+ assert(0 && "Don't know how to mangle this type yet");
+ }
+}
+
+// <type> ::= <function-type>
+void MicrosoftCXXNameMangler::mangleType(const FunctionProtoType *T) {
+ // Structors only appear in decls, so at this point we know it's not a
+ // structor type.
+ // I'll probably have mangleType(MemberPointerType) call the mangleType()
+ // method directly.
+ mangleType(T, NULL, false, false);
+}
+void MicrosoftCXXNameMangler::mangleType(const FunctionNoProtoType *T) {
+ llvm_unreachable("Can't mangle K&R function prototypes");
+}
+
+void MicrosoftCXXNameMangler::mangleType(const FunctionType *T,
+ const FunctionDecl *D,
+ bool IsStructor,
+ bool IsInstMethod) {
+ // <function-type> ::= <this-cvr-qualifiers> <calling-convention>
+ // <return-type> <argument-list> <throw-spec>
+ const FunctionProtoType *Proto = cast<FunctionProtoType>(T);
+
+ // If this is a C++ instance method, mangle the CVR qualifiers for the
+ // this pointer.
+ if (IsInstMethod)
+ mangleQualifiers(Qualifiers::fromCVRMask(Proto->getTypeQuals()), false);
+
+ mangleCallingConvention(T, IsInstMethod);
+
+ // <return-type> ::= <type>
+ // ::= @ # structors (they have no declared return type)
+ if (IsStructor)
+ Out << '@';
+ else
+ mangleType(Proto->getResultType());
+
+ // <argument-list> ::= X # void
+ // ::= <type>+ @
+ // ::= <type>* Z # varargs
+ if (Proto->getNumArgs() == 0 && !Proto->isVariadic()) {
+ Out << 'X';
+ } else {
+ if (D) {
+ // If we got a decl, use the "types-as-written" to make sure arrays
+ // get mangled right.
+ for (FunctionDecl::param_const_iterator Parm = D->param_begin(),
+ ParmEnd = D->param_end();
+ Parm != ParmEnd; ++Parm)
+ mangleType((*Parm)->getTypeSourceInfo()->getType());
+ } else {
+ for (FunctionProtoType::arg_type_iterator Arg = Proto->arg_type_begin(),
+ ArgEnd = Proto->arg_type_end();
+ Arg != ArgEnd; ++Arg)
+ mangleType(*Arg);
+ }
+ // <builtin-type> ::= Z # ellipsis
+ if (Proto->isVariadic())
+ Out << 'Z';
+ else
+ Out << '@';
+ }
+
+ mangleThrowSpecification(Proto);
+}
+
+void MicrosoftCXXNameMangler::mangleFunctionClass(const FunctionDecl *FD) {
+ // <function-class> ::= A # private: near
+ // ::= B # private: far
+ // ::= C # private: static near
+ // ::= D # private: static far
+ // ::= E # private: virtual near
+ // ::= F # private: virtual far
+ // ::= G # private: thunk near
+ // ::= H # private: thunk far
+ // ::= I # protected: near
+ // ::= J # protected: far
+ // ::= K # protected: static near
+ // ::= L # protected: static far
+ // ::= M # protected: virtual near
+ // ::= N # protected: virtual far
+ // ::= O # protected: thunk near
+ // ::= P # protected: thunk far
+ // ::= Q # public: near
+ // ::= R # public: far
+ // ::= S # public: static near
+ // ::= T # public: static far
+ // ::= U # public: virtual near
+ // ::= V # public: virtual far
+ // ::= W # public: thunk near
+ // ::= X # public: thunk far
+ // ::= Y # global near
+ // ::= Z # global far
+ if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD)) {
+ switch (MD->getAccess()) {
+ default:
+ case AS_private:
+ if (MD->isStatic())
+ Out << 'C';
+ else if (MD->isVirtual())
+ Out << 'E';
+ else
+ Out << 'A';
+ break;
+ case AS_protected:
+ if (MD->isStatic())
+ Out << 'K';
+ else if (MD->isVirtual())
+ Out << 'M';
+ else
+ Out << 'I';
+ break;
+ case AS_public:
+ if (MD->isStatic())
+ Out << 'S';
+ else if (MD->isVirtual())
+ Out << 'U';
+ else
+ Out << 'Q';
+ }
+ } else
+ Out << 'Y';
+}
+void MicrosoftCXXNameMangler::mangleCallingConvention(const FunctionType *T,
+ bool IsInstMethod) {
+ // <calling-convention> ::= A # __cdecl
+ // ::= B # __export __cdecl
+ // ::= C # __pascal
+ // ::= D # __export __pascal
+ // ::= E # __thiscall
+ // ::= F # __export __thiscall
+ // ::= G # __stdcall
+ // ::= H # __export __stdcall
+ // ::= I # __fastcall
+ // ::= J # __export __fastcall
+ // The 'export' calling conventions are from a bygone era
+ // (*cough*Win16*cough*) when functions were declared for export with
+ // that keyword. (It didn't actually export them, it just made them so
+ // that they could be in a DLL and somebody from another module could call
+ // them.)
+ CallingConv CC = T->getCallConv();
+ if (CC == CC_Default)
+ CC = IsInstMethod ? getASTContext().getDefaultMethodCallConv() : CC_C;
+ switch (CC) {
+ default:
+ llvm_unreachable("Unsupported CC for mangling");
+ case CC_Default:
+ case CC_C: Out << 'A'; break;
+ case CC_X86Pascal: Out << 'C'; break;
+ case CC_X86ThisCall: Out << 'E'; break;
+ case CC_X86StdCall: Out << 'G'; break;
+ case CC_X86FastCall: Out << 'I'; break;
+ }
+}
+void MicrosoftCXXNameMangler::mangleThrowSpecification(
+ const FunctionProtoType *FT) {
+ // <throw-spec> ::= Z # throw(...) (default)
+ // ::= @ # throw() or __declspec/__attribute__((nothrow))
+ // ::= <type>+
+ // NOTE: Since the Microsoft compiler ignores throw specifications, they are
+ // all actually mangled as 'Z'. (They're ignored because their associated
+ // functionality isn't implemented, and probably never will be.)
+ Out << 'Z';
+}
+
+void MicrosoftCXXNameMangler::mangleType(const UnresolvedUsingType *T) {
+ llvm_unreachable("Don't know how to mangle UnresolvedUsingTypes yet!");
+}
+
+// <type> ::= <union-type> | <struct-type> | <class-type> | <enum-type>
+// <union-type> ::= T <name>
+// <struct-type> ::= U <name>
+// <class-type> ::= V <name>
+// <enum-type> ::= W <size> <name>
+void MicrosoftCXXNameMangler::mangleType(const EnumType *T) {
+ mangleType(static_cast<const TagType*>(T));
+}
+void MicrosoftCXXNameMangler::mangleType(const RecordType *T) {
+ mangleType(static_cast<const TagType*>(T));
+}
+void MicrosoftCXXNameMangler::mangleType(const TagType *T) {
+ switch (T->getDecl()->getTagKind()) {
+ case TTK_Union:
+ Out << 'T';
+ break;
+ case TTK_Struct:
+ Out << 'U';
+ break;
+ case TTK_Class:
+ Out << 'V';
+ break;
+ case TTK_Enum:
+ Out << 'W';
+ Out << getASTContext().getTypeSizeInChars(
+ cast<EnumDecl>(T->getDecl())->getIntegerType()).getQuantity();
+ break;
+ }
+ mangleName(T->getDecl());
+}
+
+// <type> ::= <array-type>
+// <array-type> ::= P <cvr-qualifiers> [Y <dimension-count> <dimension>+]
+// <element-type> # as global
+// ::= Q <cvr-qualifiers> [Y <dimension-count> <dimension>+]
+// <element-type> # as param
+// It's supposed to be the other way around, but for some strange reason, it
+// isn't. Today this behavior is retained for the sole purpose of backwards
+// compatibility.
+void MicrosoftCXXNameMangler::mangleType(const ArrayType *T, bool IsGlobal) {
+ // This isn't a recursive mangling, so now we have to do it all in this
+ // one call.
+ if (IsGlobal)
+ Out << 'P';
+ else
+ Out << 'Q';
+ mangleExtraDimensions(T->getElementType());
+}
+void MicrosoftCXXNameMangler::mangleType(const ConstantArrayType *T) {
+ mangleType(static_cast<const ArrayType *>(T), false);
+}
+void MicrosoftCXXNameMangler::mangleType(const VariableArrayType *T) {
+ mangleType(static_cast<const ArrayType *>(T), false);
+}
+void MicrosoftCXXNameMangler::mangleType(const DependentSizedArrayType *T) {
+ mangleType(static_cast<const ArrayType *>(T), false);
+}
+void MicrosoftCXXNameMangler::mangleType(const IncompleteArrayType *T) {
+ mangleType(static_cast<const ArrayType *>(T), false);
+}
+void MicrosoftCXXNameMangler::mangleExtraDimensions(QualType ElementTy) {
+ SmallVector<llvm::APInt, 3> Dimensions;
+ for (;;) {
+ if (ElementTy->isConstantArrayType()) {
+ const ConstantArrayType *CAT =
+ static_cast<const ConstantArrayType *>(ElementTy.getTypePtr());
+ Dimensions.push_back(CAT->getSize());
+ ElementTy = CAT->getElementType();
+ } else if (ElementTy->isVariableArrayType()) {
+ llvm_unreachable("Don't know how to mangle VLAs!");
+ } else if (ElementTy->isDependentSizedArrayType()) {
+ // The dependent expression has to be folded into a constant (TODO).
+ llvm_unreachable("Don't know how to mangle dependent-sized arrays!");
+ } else if (ElementTy->isIncompleteArrayType()) continue;
+ else break;
+ }
+ mangleQualifiers(ElementTy.getQualifiers(), false);
+ // If there are any additional dimensions, mangle them now.
+ if (Dimensions.size() > 0) {
+ Out << 'Y';
+ // <dimension-count> ::= <number> # number of extra dimensions
+ mangleNumber(Dimensions.size());
+ for (unsigned Dim = 0; Dim < Dimensions.size(); ++Dim) {
+ mangleNumber(Dimensions[Dim].getLimitedValue());
+ }
+ }
+ mangleType(ElementTy.getLocalUnqualifiedType());
+}
+
+// <type> ::= <pointer-to-member-type>
+// <pointer-to-member-type> ::= <pointer-cvr-qualifiers> <cvr-qualifiers>
+// <class name> <type>
+void MicrosoftCXXNameMangler::mangleType(const MemberPointerType *T) {
+ QualType PointeeType = T->getPointeeType();
+ if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(PointeeType)) {
+ Out << '8';
+ mangleName(cast<RecordType>(T->getClass())->getDecl());
+ mangleType(FPT, NULL, false, true);
+ } else {
+ mangleQualifiers(PointeeType.getQualifiers(), true);
+ mangleName(cast<RecordType>(T->getClass())->getDecl());
+ mangleType(PointeeType.getLocalUnqualifiedType());
+ }
+}
+
+void MicrosoftCXXNameMangler::mangleType(const TemplateTypeParmType *T) {
+ llvm_unreachable("Don't know how to mangle TemplateTypeParmTypes yet!");
+}
+
+void MicrosoftCXXNameMangler::mangleType(
+ const SubstTemplateTypeParmPackType *T) {
+ llvm_unreachable(
+ "Don't know how to mangle SubstTemplateTypeParmPackTypes yet!");
+}
+
+// <type> ::= <pointer-type>
+// <pointer-type> ::= <pointer-cvr-qualifiers> <cvr-qualifiers> <type>
+void MicrosoftCXXNameMangler::mangleType(const PointerType *T) {
+ QualType PointeeTy = T->getPointeeType();
+ if (PointeeTy->isArrayType()) {
+ // Pointers to arrays are mangled like arrays.
+ mangleExtraDimensions(T->getPointeeType());
+ } else if (PointeeTy->isFunctionType()) {
+ // Function pointers are special.
+ Out << '6';
+ mangleType(static_cast<const FunctionType *>(PointeeTy.getTypePtr()),
+ NULL, false, false);
+ } else {
+ if (!PointeeTy.hasQualifiers())
+ // Lack of qualifiers is mangled as 'A'.
+ Out << 'A';
+ mangleType(PointeeTy);
+ }
+}
+void MicrosoftCXXNameMangler::mangleType(const ObjCObjectPointerType *T) {
+ // Object pointers never have qualifiers.
+ Out << 'A';
+ mangleType(T->getPointeeType());
+}
+
+// <type> ::= <reference-type>
+// <reference-type> ::= A <cvr-qualifiers> <type>
+void MicrosoftCXXNameMangler::mangleType(const LValueReferenceType *T) {
+ Out << 'A';
+ QualType PointeeTy = T->getPointeeType();
+ if (!PointeeTy.hasQualifiers())
+ // Lack of qualifiers is mangled as 'A'.
+ Out << 'A';
+ mangleType(PointeeTy);
+}
+
+void MicrosoftCXXNameMangler::mangleType(const RValueReferenceType *T) {
+ llvm_unreachable("Don't know how to mangle RValueReferenceTypes yet!");
+}
+
+void MicrosoftCXXNameMangler::mangleType(const ComplexType *T) {
+ llvm_unreachable("Don't know how to mangle ComplexTypes yet!");
+}
+
+void MicrosoftCXXNameMangler::mangleType(const VectorType *T) {
+ llvm_unreachable("Don't know how to mangle VectorTypes yet!");
+}
+void MicrosoftCXXNameMangler::mangleType(const ExtVectorType *T) {
+ llvm_unreachable("Don't know how to mangle ExtVectorTypes yet!");
+}
+void MicrosoftCXXNameMangler::mangleType(const DependentSizedExtVectorType *T) {
+ llvm_unreachable(
+ "Don't know how to mangle DependentSizedExtVectorTypes yet!");
+}
+
+void MicrosoftCXXNameMangler::mangleType(const ObjCInterfaceType *T) {
+ // ObjC interfaces have structs underlying them.
+ Out << 'U';
+ mangleName(T->getDecl());
+}
+
+void MicrosoftCXXNameMangler::mangleType(const ObjCObjectType *T) {
+ // We don't allow overloading by different protocol qualification,
+ // so mangling them isn't necessary.
+ mangleType(T->getBaseType());
+}
+
+void MicrosoftCXXNameMangler::mangleType(const BlockPointerType *T) {
+ Out << "_E";
+ mangleType(T->getPointeeType());
+}
+
+void MicrosoftCXXNameMangler::mangleType(const InjectedClassNameType *T) {
+ llvm_unreachable("Don't know how to mangle InjectedClassNameTypes yet!");
+}
+
+void MicrosoftCXXNameMangler::mangleType(const TemplateSpecializationType *T) {
+ llvm_unreachable("Don't know how to mangle TemplateSpecializationTypes yet!");
+}
+
+void MicrosoftCXXNameMangler::mangleType(const DependentNameType *T) {
+ llvm_unreachable("Don't know how to mangle DependentNameTypes yet!");
+}
+
+void MicrosoftCXXNameMangler::mangleType(
+ const DependentTemplateSpecializationType *T) {
+ llvm_unreachable(
+ "Don't know how to mangle DependentTemplateSpecializationTypes yet!");
+}
+
+void MicrosoftCXXNameMangler::mangleType(const PackExpansionType *T) {
+ llvm_unreachable("Don't know how to mangle PackExpansionTypes yet!");
+}
+
+void MicrosoftCXXNameMangler::mangleType(const TypeOfType *T) {
+ llvm_unreachable("Don't know how to mangle TypeOfTypes yet!");
+}
+
+void MicrosoftCXXNameMangler::mangleType(const TypeOfExprType *T) {
+ llvm_unreachable("Don't know how to mangle TypeOfExprTypes yet!");
+}
+
+void MicrosoftCXXNameMangler::mangleType(const DecltypeType *T) {
+ llvm_unreachable("Don't know how to mangle DecltypeTypes yet!");
+}
+
+void MicrosoftCXXNameMangler::mangleType(const UnaryTransformType *T) {
+ llvm_unreachable("Don't know how to mangle UnaryTransformationTypes yet!");
+}
+
+void MicrosoftCXXNameMangler::mangleType(const AutoType *T) {
+ llvm_unreachable("Don't know how to mangle AutoTypes yet!");
+}
+
+void MicrosoftCXXNameMangler::mangleType(const AtomicType *T) {
+ llvm_unreachable("Don't know how to mangle AtomicTypes yet!");
+}
+
+void MicrosoftMangleContext::mangleName(const NamedDecl *D,
+ raw_ostream &Out) {
+ assert((isa<FunctionDecl>(D) || isa<VarDecl>(D)) &&
+ "Invalid mangleName() call, argument is not a variable or function!");
+ assert(!isa<CXXConstructorDecl>(D) && !isa<CXXDestructorDecl>(D) &&
+ "Invalid mangleName() call on 'structor decl!");
+
+ PrettyStackTraceDecl CrashInfo(D, SourceLocation(),
+ getASTContext().getSourceManager(),
+ "Mangling declaration");
+
+ MicrosoftCXXNameMangler Mangler(*this, Out);
+ return Mangler.mangle(D);
+}
+void MicrosoftMangleContext::mangleThunk(const CXXMethodDecl *MD,
+ const ThunkInfo &Thunk,
+ raw_ostream &) {
+ llvm_unreachable("Can't yet mangle thunks!");
+}
+void MicrosoftMangleContext::mangleCXXDtorThunk(const CXXDestructorDecl *DD,
+ CXXDtorType Type,
+ const ThisAdjustment &,
+ raw_ostream &) {
+ llvm_unreachable("Can't yet mangle destructor thunks!");
+}
+void MicrosoftMangleContext::mangleCXXVTable(const CXXRecordDecl *RD,
+ raw_ostream &) {
+ llvm_unreachable("Can't yet mangle virtual tables!");
+}
+void MicrosoftMangleContext::mangleCXXVTT(const CXXRecordDecl *RD,
+ raw_ostream &) {
+ llvm_unreachable("The MS C++ ABI does not have virtual table tables!");
+}
+void MicrosoftMangleContext::mangleCXXCtorVTable(const CXXRecordDecl *RD,
+ int64_t Offset,
+ const CXXRecordDecl *Type,
+ raw_ostream &) {
+ llvm_unreachable("The MS C++ ABI does not have constructor vtables!");
+}
+void MicrosoftMangleContext::mangleCXXRTTI(QualType T,
+ raw_ostream &) {
+ llvm_unreachable("Can't yet mangle RTTI!");
+}
+void MicrosoftMangleContext::mangleCXXRTTIName(QualType T,
+ raw_ostream &) {
+ llvm_unreachable("Can't yet mangle RTTI names!");
+}
+void MicrosoftMangleContext::mangleCXXCtor(const CXXConstructorDecl *D,
+ CXXCtorType Type,
+ raw_ostream & Out) {
+ MicrosoftCXXNameMangler mangler(*this, Out);
+ mangler.mangle(D);
+}
+void MicrosoftMangleContext::mangleCXXDtor(const CXXDestructorDecl *D,
+ CXXDtorType Type,
+ raw_ostream & Out) {
+ MicrosoftCXXNameMangler mangler(*this, Out);
+ mangler.mangle(D);
+}
+void MicrosoftMangleContext::mangleReferenceTemporary(const clang::VarDecl *,
+ raw_ostream &) {
+ llvm_unreachable("Can't yet mangle reference temporaries!");
+}
+
+MangleContext *clang::createMicrosoftMangleContext(ASTContext &Context,
+ DiagnosticsEngine &Diags) {
+ return new MicrosoftMangleContext(Context, Diags);
+}
diff --git a/clang/lib/AST/NSAPI.cpp b/clang/lib/AST/NSAPI.cpp
new file mode 100644
index 0000000..f5ea2c5
--- /dev/null
+++ b/clang/lib/AST/NSAPI.cpp
@@ -0,0 +1,312 @@
+//===--- NSAPI.cpp - NSFoundation APIs ------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/NSAPI.h"
+#include "clang/AST/ASTContext.h"
+
+using namespace clang;
+
+NSAPI::NSAPI(ASTContext &ctx)
+ : Ctx(ctx), ClassIds() {
+}
+
+IdentifierInfo *NSAPI::getNSClassId(NSClassIdKindKind K) const {
+ static const char *ClassName[NumClassIds] = {
+ "NSObject",
+ "NSString",
+ "NSArray",
+ "NSMutableArray",
+ "NSDictionary",
+ "NSMutableDictionary",
+ "NSNumber"
+ };
+
+ if (!ClassIds[K])
+ return (ClassIds[K] = &Ctx.Idents.get(ClassName[K]));
+
+ return ClassIds[K];
+}
+
+Selector NSAPI::getNSStringSelector(NSStringMethodKind MK) const {
+ if (NSStringSelectors[MK].isNull()) {
+ Selector Sel;
+ switch (MK) {
+ case NSStr_stringWithString:
+ Sel = Ctx.Selectors.getUnarySelector(&Ctx.Idents.get("stringWithString"));
+ break;
+ case NSStr_initWithString:
+ Sel = Ctx.Selectors.getUnarySelector(&Ctx.Idents.get("initWithString"));
+ break;
+ }
+ return (NSStringSelectors[MK] = Sel);
+ }
+
+ return NSStringSelectors[MK];
+}
+
+Selector NSAPI::getNSArraySelector(NSArrayMethodKind MK) const {
+ if (NSArraySelectors[MK].isNull()) {
+ Selector Sel;
+ switch (MK) {
+ case NSArr_array:
+ Sel = Ctx.Selectors.getNullarySelector(&Ctx.Idents.get("array"));
+ break;
+ case NSArr_arrayWithArray:
+ Sel = Ctx.Selectors.getUnarySelector(&Ctx.Idents.get("arrayWithArray"));
+ break;
+ case NSArr_arrayWithObject:
+ Sel = Ctx.Selectors.getUnarySelector(&Ctx.Idents.get("arrayWithObject"));
+ break;
+ case NSArr_arrayWithObjects:
+ Sel = Ctx.Selectors.getUnarySelector(&Ctx.Idents.get("arrayWithObjects"));
+ break;
+ case NSArr_arrayWithObjectsCount: {
+ IdentifierInfo *KeyIdents[] = {
+ &Ctx.Idents.get("arrayWithObjects"),
+ &Ctx.Idents.get("count")
+ };
+ Sel = Ctx.Selectors.getSelector(2, KeyIdents);
+ break;
+ }
+ case NSArr_initWithArray:
+ Sel = Ctx.Selectors.getUnarySelector(&Ctx.Idents.get("initWithArray"));
+ break;
+ case NSArr_initWithObjects:
+ Sel = Ctx.Selectors.getUnarySelector(&Ctx.Idents.get("initWithObjects"));
+ break;
+ case NSArr_objectAtIndex:
+ Sel = Ctx.Selectors.getUnarySelector(&Ctx.Idents.get("objectAtIndex"));
+ break;
+ case NSMutableArr_replaceObjectAtIndex: {
+ IdentifierInfo *KeyIdents[] = {
+ &Ctx.Idents.get("replaceObjectAtIndex"),
+ &Ctx.Idents.get("withObject")
+ };
+ Sel = Ctx.Selectors.getSelector(2, KeyIdents);
+ break;
+ }
+ }
+ return (NSArraySelectors[MK] = Sel);
+ }
+
+ return NSArraySelectors[MK];
+}
+
+llvm::Optional<NSAPI::NSArrayMethodKind>
+NSAPI::getNSArrayMethodKind(Selector Sel) {
+ for (unsigned i = 0; i != NumNSArrayMethods; ++i) {
+ NSArrayMethodKind MK = NSArrayMethodKind(i);
+ if (Sel == getNSArraySelector(MK))
+ return MK;
+ }
+
+ return llvm::Optional<NSArrayMethodKind>();
+}
+
+Selector NSAPI::getNSDictionarySelector(
+ NSDictionaryMethodKind MK) const {
+ if (NSDictionarySelectors[MK].isNull()) {
+ Selector Sel;
+ switch (MK) {
+ case NSDict_dictionary:
+ Sel = Ctx.Selectors.getNullarySelector(&Ctx.Idents.get("dictionary"));
+ break;
+ case NSDict_dictionaryWithDictionary:
+ Sel = Ctx.Selectors.getUnarySelector(
+ &Ctx.Idents.get("dictionaryWithDictionary"));
+ break;
+ case NSDict_dictionaryWithObjectForKey: {
+ IdentifierInfo *KeyIdents[] = {
+ &Ctx.Idents.get("dictionaryWithObject"),
+ &Ctx.Idents.get("forKey")
+ };
+ Sel = Ctx.Selectors.getSelector(2, KeyIdents);
+ break;
+ }
+ case NSDict_dictionaryWithObjectsForKeys: {
+ IdentifierInfo *KeyIdents[] = {
+ &Ctx.Idents.get("dictionaryWithObjects"),
+ &Ctx.Idents.get("forKeys")
+ };
+ Sel = Ctx.Selectors.getSelector(2, KeyIdents);
+ break;
+ }
+ case NSDict_dictionaryWithObjectsForKeysCount: {
+ IdentifierInfo *KeyIdents[] = {
+ &Ctx.Idents.get("dictionaryWithObjects"),
+ &Ctx.Idents.get("forKeys"),
+ &Ctx.Idents.get("count")
+ };
+ Sel = Ctx.Selectors.getSelector(3, KeyIdents);
+ break;
+ }
+ case NSDict_dictionaryWithObjectsAndKeys:
+ Sel = Ctx.Selectors.getUnarySelector(
+ &Ctx.Idents.get("dictionaryWithObjectsAndKeys"));
+ break;
+ case NSDict_initWithDictionary:
+ Sel = Ctx.Selectors.getUnarySelector(
+ &Ctx.Idents.get("initWithDictionary"));
+ break;
+ case NSDict_initWithObjectsAndKeys:
+ Sel = Ctx.Selectors.getUnarySelector(
+ &Ctx.Idents.get("initWithObjectsAndKeys"));
+ break;
+ case NSDict_objectForKey:
+ Sel = Ctx.Selectors.getUnarySelector(&Ctx.Idents.get("objectForKey"));
+ break;
+ case NSMutableDict_setObjectForKey: {
+ IdentifierInfo *KeyIdents[] = {
+ &Ctx.Idents.get("setObject"),
+ &Ctx.Idents.get("forKey")
+ };
+ Sel = Ctx.Selectors.getSelector(2, KeyIdents);
+ break;
+ }
+ }
+ return (NSDictionarySelectors[MK] = Sel);
+ }
+
+ return NSDictionarySelectors[MK];
+}
+
+llvm::Optional<NSAPI::NSDictionaryMethodKind>
+NSAPI::getNSDictionaryMethodKind(Selector Sel) {
+ for (unsigned i = 0; i != NumNSDictionaryMethods; ++i) {
+ NSDictionaryMethodKind MK = NSDictionaryMethodKind(i);
+ if (Sel == getNSDictionarySelector(MK))
+ return MK;
+ }
+
+ return llvm::Optional<NSDictionaryMethodKind>();
+}
+
+Selector NSAPI::getNSNumberLiteralSelector(NSNumberLiteralMethodKind MK,
+ bool Instance) const {
+ static const char *ClassSelectorName[NumNSNumberLiteralMethods] = {
+ "numberWithChar",
+ "numberWithUnsignedChar",
+ "numberWithShort",
+ "numberWithUnsignedShort",
+ "numberWithInt",
+ "numberWithUnsignedInt",
+ "numberWithLong",
+ "numberWithUnsignedLong",
+ "numberWithLongLong",
+ "numberWithUnsignedLongLong",
+ "numberWithFloat",
+ "numberWithDouble",
+ "numberWithBool",
+ "numberWithInteger",
+ "numberWithUnsignedInteger"
+ };
+ static const char *InstanceSelectorName[NumNSNumberLiteralMethods] = {
+ "initWithChar",
+ "initWithUnsignedChar",
+ "initWithShort",
+ "initWithUnsignedShort",
+ "initWithInt",
+ "initWithUnsignedInt",
+ "initWithLong",
+ "initWithUnsignedLong",
+ "initWithLongLong",
+ "initWithUnsignedLongLong",
+ "initWithFloat",
+ "initWithDouble",
+ "initWithBool",
+ "initWithInteger",
+ "initWithUnsignedInteger"
+ };
+
+ Selector *Sels;
+ const char **Names;
+ if (Instance) {
+ Sels = NSNumberInstanceSelectors;
+ Names = InstanceSelectorName;
+ } else {
+ Sels = NSNumberClassSelectors;
+ Names = ClassSelectorName;
+ }
+
+ if (Sels[MK].isNull())
+ Sels[MK] = Ctx.Selectors.getUnarySelector(&Ctx.Idents.get(Names[MK]));
+ return Sels[MK];
+}
+
+llvm::Optional<NSAPI::NSNumberLiteralMethodKind>
+NSAPI::getNSNumberLiteralMethodKind(Selector Sel) const {
+ for (unsigned i = 0; i != NumNSNumberLiteralMethods; ++i) {
+ NSNumberLiteralMethodKind MK = NSNumberLiteralMethodKind(i);
+ if (isNSNumberLiteralSelector(MK, Sel))
+ return MK;
+ }
+
+ return llvm::Optional<NSNumberLiteralMethodKind>();
+}
+
+llvm::Optional<NSAPI::NSNumberLiteralMethodKind>
+NSAPI::getNSNumberFactoryMethodKind(QualType T) {
+ const BuiltinType *BT = T->getAs<BuiltinType>();
+ if (!BT)
+ return llvm::Optional<NSAPI::NSNumberLiteralMethodKind>();
+
+ switch (BT->getKind()) {
+ case BuiltinType::Char_S:
+ case BuiltinType::SChar:
+ return NSAPI::NSNumberWithChar;
+ case BuiltinType::Char_U:
+ case BuiltinType::UChar:
+ return NSAPI::NSNumberWithUnsignedChar;
+ case BuiltinType::Short:
+ return NSAPI::NSNumberWithShort;
+ case BuiltinType::UShort:
+ return NSAPI::NSNumberWithUnsignedShort;
+ case BuiltinType::Int:
+ return NSAPI::NSNumberWithInt;
+ case BuiltinType::UInt:
+ return NSAPI::NSNumberWithUnsignedInt;
+ case BuiltinType::Long:
+ return NSAPI::NSNumberWithLong;
+ case BuiltinType::ULong:
+ return NSAPI::NSNumberWithUnsignedLong;
+ case BuiltinType::LongLong:
+ return NSAPI::NSNumberWithLongLong;
+ case BuiltinType::ULongLong:
+ return NSAPI::NSNumberWithUnsignedLongLong;
+ case BuiltinType::Float:
+ return NSAPI::NSNumberWithFloat;
+ case BuiltinType::Double:
+ return NSAPI::NSNumberWithDouble;
+ case BuiltinType::Bool:
+ return NSAPI::NSNumberWithBool;
+
+ case BuiltinType::Void:
+ case BuiltinType::WChar_U:
+ case BuiltinType::WChar_S:
+ case BuiltinType::Char16:
+ case BuiltinType::Char32:
+ case BuiltinType::Int128:
+ case BuiltinType::LongDouble:
+ case BuiltinType::UInt128:
+ case BuiltinType::NullPtr:
+ case BuiltinType::ObjCClass:
+ case BuiltinType::ObjCId:
+ case BuiltinType::ObjCSel:
+ case BuiltinType::BoundMember:
+ case BuiltinType::Dependent:
+ case BuiltinType::Overload:
+ case BuiltinType::UnknownAny:
+ case BuiltinType::ARCUnbridgedCast:
+ case BuiltinType::Half:
+ case BuiltinType::PseudoObject:
+ break;
+ }
+
+ return llvm::Optional<NSAPI::NSNumberLiteralMethodKind>();
+}
diff --git a/clang/lib/AST/NestedNameSpecifier.cpp b/clang/lib/AST/NestedNameSpecifier.cpp
new file mode 100644
index 0000000..dbf267b
--- /dev/null
+++ b/clang/lib/AST/NestedNameSpecifier.cpp
@@ -0,0 +1,633 @@
+//===--- NestedNameSpecifier.cpp - C++ nested name specifiers -----*- C++ -*-=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the NestedNameSpecifier class, which represents
+// a C++ nested-name-specifier.
+//
+//===----------------------------------------------------------------------===//
+#include "clang/AST/NestedNameSpecifier.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/PrettyPrinter.h"
+#include "clang/AST/Type.h"
+#include "clang/AST/TypeLoc.h"
+#include "llvm/Support/raw_ostream.h"
+#include <cassert>
+
+using namespace clang;
+
+NestedNameSpecifier *
+NestedNameSpecifier::FindOrInsert(const ASTContext &Context,
+ const NestedNameSpecifier &Mockup) {
+ llvm::FoldingSetNodeID ID;
+ Mockup.Profile(ID);
+
+ void *InsertPos = 0;
+ NestedNameSpecifier *NNS
+ = Context.NestedNameSpecifiers.FindNodeOrInsertPos(ID, InsertPos);
+ if (!NNS) {
+ NNS = new (Context, 4) NestedNameSpecifier(Mockup);
+ Context.NestedNameSpecifiers.InsertNode(NNS, InsertPos);
+ }
+
+ return NNS;
+}
+
+NestedNameSpecifier *
+NestedNameSpecifier::Create(const ASTContext &Context,
+ NestedNameSpecifier *Prefix, IdentifierInfo *II) {
+ assert(II && "Identifier cannot be NULL");
+ assert((!Prefix || Prefix->isDependent()) && "Prefix must be dependent");
+
+ NestedNameSpecifier Mockup;
+ Mockup.Prefix.setPointer(Prefix);
+ Mockup.Prefix.setInt(StoredIdentifier);
+ Mockup.Specifier = II;
+ return FindOrInsert(Context, Mockup);
+}
+
+NestedNameSpecifier *
+NestedNameSpecifier::Create(const ASTContext &Context,
+ NestedNameSpecifier *Prefix, NamespaceDecl *NS) {
+ assert(NS && "Namespace cannot be NULL");
+ assert((!Prefix ||
+ (Prefix->getAsType() == 0 && Prefix->getAsIdentifier() == 0)) &&
+ "Broken nested name specifier");
+ NestedNameSpecifier Mockup;
+ Mockup.Prefix.setPointer(Prefix);
+ Mockup.Prefix.setInt(StoredNamespaceOrAlias);
+ Mockup.Specifier = NS;
+ return FindOrInsert(Context, Mockup);
+}
+
+NestedNameSpecifier *
+NestedNameSpecifier::Create(const ASTContext &Context,
+ NestedNameSpecifier *Prefix,
+ NamespaceAliasDecl *Alias) {
+ assert(Alias && "Namespace alias cannot be NULL");
+ assert((!Prefix ||
+ (Prefix->getAsType() == 0 && Prefix->getAsIdentifier() == 0)) &&
+ "Broken nested name specifier");
+ NestedNameSpecifier Mockup;
+ Mockup.Prefix.setPointer(Prefix);
+ Mockup.Prefix.setInt(StoredNamespaceOrAlias);
+ Mockup.Specifier = Alias;
+ return FindOrInsert(Context, Mockup);
+}
+
+NestedNameSpecifier *
+NestedNameSpecifier::Create(const ASTContext &Context,
+ NestedNameSpecifier *Prefix,
+ bool Template, const Type *T) {
+ assert(T && "Type cannot be NULL");
+ NestedNameSpecifier Mockup;
+ Mockup.Prefix.setPointer(Prefix);
+ Mockup.Prefix.setInt(Template? StoredTypeSpecWithTemplate : StoredTypeSpec);
+ Mockup.Specifier = const_cast<Type*>(T);
+ return FindOrInsert(Context, Mockup);
+}
+
+NestedNameSpecifier *
+NestedNameSpecifier::Create(const ASTContext &Context, IdentifierInfo *II) {
+ assert(II && "Identifier cannot be NULL");
+ NestedNameSpecifier Mockup;
+ Mockup.Prefix.setPointer(0);
+ Mockup.Prefix.setInt(StoredIdentifier);
+ Mockup.Specifier = II;
+ return FindOrInsert(Context, Mockup);
+}
+
+NestedNameSpecifier *
+NestedNameSpecifier::GlobalSpecifier(const ASTContext &Context) {
+ if (!Context.GlobalNestedNameSpecifier)
+ Context.GlobalNestedNameSpecifier = new (Context, 4) NestedNameSpecifier();
+ return Context.GlobalNestedNameSpecifier;
+}
+
+NestedNameSpecifier::SpecifierKind NestedNameSpecifier::getKind() const {
+ if (Specifier == 0)
+ return Global;
+
+ switch (Prefix.getInt()) {
+ case StoredIdentifier:
+ return Identifier;
+
+ case StoredNamespaceOrAlias:
+ return isa<NamespaceDecl>(static_cast<NamedDecl *>(Specifier))? Namespace
+ : NamespaceAlias;
+
+ case StoredTypeSpec:
+ return TypeSpec;
+
+ case StoredTypeSpecWithTemplate:
+ return TypeSpecWithTemplate;
+ }
+
+ llvm_unreachable("Invalid NNS Kind!");
+}
+
+/// \brief Retrieve the namespace stored in this nested name
+/// specifier.
+NamespaceDecl *NestedNameSpecifier::getAsNamespace() const {
+ if (Prefix.getInt() == StoredNamespaceOrAlias)
+ return dyn_cast<NamespaceDecl>(static_cast<NamedDecl *>(Specifier));
+
+ return 0;
+}
+
+/// \brief Retrieve the namespace alias stored in this nested name
+/// specifier.
+NamespaceAliasDecl *NestedNameSpecifier::getAsNamespaceAlias() const {
+ if (Prefix.getInt() == StoredNamespaceOrAlias)
+ return dyn_cast<NamespaceAliasDecl>(static_cast<NamedDecl *>(Specifier));
+
+ return 0;
+}
+
+
+/// \brief Whether this nested name specifier refers to a dependent
+/// type or not.
+bool NestedNameSpecifier::isDependent() const {
+ switch (getKind()) {
+ case Identifier:
+ // Identifier specifiers always represent dependent types
+ return true;
+
+ case Namespace:
+ case NamespaceAlias:
+ case Global:
+ return false;
+
+ case TypeSpec:
+ case TypeSpecWithTemplate:
+ return getAsType()->isDependentType();
+ }
+
+ llvm_unreachable("Invalid NNS Kind!");
+}
+
+/// \brief Whether this nested name specifier refers to a dependent
+/// type or not.
+bool NestedNameSpecifier::isInstantiationDependent() const {
+ switch (getKind()) {
+ case Identifier:
+ // Identifier specifiers always represent dependent types
+ return true;
+
+ case Namespace:
+ case NamespaceAlias:
+ case Global:
+ return false;
+
+ case TypeSpec:
+ case TypeSpecWithTemplate:
+ return getAsType()->isInstantiationDependentType();
+ }
+
+ llvm_unreachable("Invalid NNS Kind!");
+}
+
+bool NestedNameSpecifier::containsUnexpandedParameterPack() const {
+ switch (getKind()) {
+ case Identifier:
+ return getPrefix() && getPrefix()->containsUnexpandedParameterPack();
+
+ case Namespace:
+ case NamespaceAlias:
+ case Global:
+ return false;
+
+ case TypeSpec:
+ case TypeSpecWithTemplate:
+ return getAsType()->containsUnexpandedParameterPack();
+ }
+
+ llvm_unreachable("Invalid NNS Kind!");
+}
+
+/// \brief Print this nested name specifier to the given output
+/// stream.
+void
+NestedNameSpecifier::print(raw_ostream &OS,
+ const PrintingPolicy &Policy) const {
+ if (getPrefix())
+ getPrefix()->print(OS, Policy);
+
+ switch (getKind()) {
+ case Identifier:
+ OS << getAsIdentifier()->getName();
+ break;
+
+ case Namespace:
+ if (getAsNamespace()->isAnonymousNamespace())
+ return;
+
+ OS << getAsNamespace()->getName();
+ break;
+
+ case NamespaceAlias:
+ OS << getAsNamespaceAlias()->getName();
+ break;
+
+ case Global:
+ break;
+
+ case TypeSpecWithTemplate:
+ OS << "template ";
+ // Fall through to print the type.
+
+ case TypeSpec: {
+ std::string TypeStr;
+ const Type *T = getAsType();
+
+ PrintingPolicy InnerPolicy(Policy);
+ InnerPolicy.SuppressScope = true;
+
+ // Nested-name-specifiers are intended to contain minimally-qualified
+ // types. An actual ElaboratedType will not occur, since we'll store
+ // just the type that is referred to in the nested-name-specifier (e.g.,
+ // a TypedefType, TagType, etc.). However, when we are dealing with
+ // dependent template-id types (e.g., Outer<T>::template Inner<U>),
+ // the type requires its own nested-name-specifier for uniqueness, so we
+ // suppress that nested-name-specifier during printing.
+ assert(!isa<ElaboratedType>(T) &&
+ "Elaborated type in nested-name-specifier");
+ if (const TemplateSpecializationType *SpecType
+ = dyn_cast<TemplateSpecializationType>(T)) {
+ // Print the template name without its corresponding
+ // nested-name-specifier.
+ SpecType->getTemplateName().print(OS, InnerPolicy, true);
+
+ // Print the template argument list.
+ TypeStr = TemplateSpecializationType::PrintTemplateArgumentList(
+ SpecType->getArgs(),
+ SpecType->getNumArgs(),
+ InnerPolicy);
+ } else {
+ // Print the type normally
+ TypeStr = QualType(T, 0).getAsString(InnerPolicy);
+ }
+ OS << TypeStr;
+ break;
+ }
+ }
+
+ OS << "::";
+}
+
+void NestedNameSpecifier::dump(const LangOptions &LO) {
+ print(llvm::errs(), PrintingPolicy(LO));
+}
+
+unsigned
+NestedNameSpecifierLoc::getLocalDataLength(NestedNameSpecifier *Qualifier) {
+ assert(Qualifier && "Expected a non-NULL qualifier");
+
+ // Location of the trailing '::'.
+ unsigned Length = sizeof(unsigned);
+
+ switch (Qualifier->getKind()) {
+ case NestedNameSpecifier::Global:
+ // Nothing more to add.
+ break;
+
+ case NestedNameSpecifier::Identifier:
+ case NestedNameSpecifier::Namespace:
+ case NestedNameSpecifier::NamespaceAlias:
+ // The location of the identifier or namespace name.
+ Length += sizeof(unsigned);
+ break;
+
+ case NestedNameSpecifier::TypeSpecWithTemplate:
+ case NestedNameSpecifier::TypeSpec:
+ // The "void*" that points at the TypeLoc data.
+ // Note: the 'template' keyword is part of the TypeLoc.
+ Length += sizeof(void *);
+ break;
+ }
+
+ return Length;
+}
+
+unsigned
+NestedNameSpecifierLoc::getDataLength(NestedNameSpecifier *Qualifier) {
+ unsigned Length = 0;
+ for (; Qualifier; Qualifier = Qualifier->getPrefix())
+ Length += getLocalDataLength(Qualifier);
+ return Length;
+}
+
+namespace {
+ /// \brief Load a (possibly unaligned) source location from a given address
+ /// and offset.
+ SourceLocation LoadSourceLocation(void *Data, unsigned Offset) {
+ unsigned Raw;
+ memcpy(&Raw, static_cast<char *>(Data) + Offset, sizeof(unsigned));
+ return SourceLocation::getFromRawEncoding(Raw);
+ }
+
+ /// \brief Load a (possibly unaligned) pointer from a given address and
+ /// offset.
+ void *LoadPointer(void *Data, unsigned Offset) {
+ void *Result;
+ memcpy(&Result, static_cast<char *>(Data) + Offset, sizeof(void*));
+ return Result;
+ }
+}
+
+SourceRange NestedNameSpecifierLoc::getSourceRange() const {
+ if (!Qualifier)
+ return SourceRange();
+
+ NestedNameSpecifierLoc First = *this;
+ while (NestedNameSpecifierLoc Prefix = First.getPrefix())
+ First = Prefix;
+
+ return SourceRange(First.getLocalSourceRange().getBegin(),
+ getLocalSourceRange().getEnd());
+}
+
+SourceRange NestedNameSpecifierLoc::getLocalSourceRange() const {
+ if (!Qualifier)
+ return SourceRange();
+
+ unsigned Offset = getDataLength(Qualifier->getPrefix());
+ switch (Qualifier->getKind()) {
+ case NestedNameSpecifier::Global:
+ return LoadSourceLocation(Data, Offset);
+
+ case NestedNameSpecifier::Identifier:
+ case NestedNameSpecifier::Namespace:
+ case NestedNameSpecifier::NamespaceAlias:
+ return SourceRange(LoadSourceLocation(Data, Offset),
+ LoadSourceLocation(Data, Offset + sizeof(unsigned)));
+
+ case NestedNameSpecifier::TypeSpecWithTemplate:
+ case NestedNameSpecifier::TypeSpec: {
+ // The "void*" that points at the TypeLoc data.
+ // Note: the 'template' keyword is part of the TypeLoc.
+ void *TypeData = LoadPointer(Data, Offset);
+ TypeLoc TL(Qualifier->getAsType(), TypeData);
+ return SourceRange(TL.getBeginLoc(),
+ LoadSourceLocation(Data, Offset + sizeof(void*)));
+ }
+ }
+
+ llvm_unreachable("Invalid NNS Kind!");
+}
+
+TypeLoc NestedNameSpecifierLoc::getTypeLoc() const {
+ assert((Qualifier->getKind() == NestedNameSpecifier::TypeSpec ||
+ Qualifier->getKind() == NestedNameSpecifier::TypeSpecWithTemplate) &&
+ "Nested-name-specifier location is not a type");
+
+ // The "void*" that points at the TypeLoc data.
+ unsigned Offset = getDataLength(Qualifier->getPrefix());
+ void *TypeData = LoadPointer(Data, Offset);
+ return TypeLoc(Qualifier->getAsType(), TypeData);
+}
+
+namespace {
+ void Append(char *Start, char *End, char *&Buffer, unsigned &BufferSize,
+ unsigned &BufferCapacity) {
+ if (BufferSize + (End - Start) > BufferCapacity) {
+ // Reallocate the buffer.
+ unsigned NewCapacity
+ = std::max((unsigned)(BufferCapacity? BufferCapacity * 2
+ : sizeof(void*) * 2),
+ (unsigned)(BufferSize + (End - Start)));
+ char *NewBuffer = static_cast<char *>(malloc(NewCapacity));
+ memcpy(NewBuffer, Buffer, BufferSize);
+
+ if (BufferCapacity)
+ free(Buffer);
+ Buffer = NewBuffer;
+ BufferCapacity = NewCapacity;
+ }
+
+ memcpy(Buffer + BufferSize, Start, End - Start);
+ BufferSize += End-Start;
+ }
+
+ /// \brief Save a source location to the given buffer.
+ void SaveSourceLocation(SourceLocation Loc, char *&Buffer,
+ unsigned &BufferSize, unsigned &BufferCapacity) {
+ unsigned Raw = Loc.getRawEncoding();
+ Append(reinterpret_cast<char *>(&Raw),
+ reinterpret_cast<char *>(&Raw) + sizeof(unsigned),
+ Buffer, BufferSize, BufferCapacity);
+ }
+
+ /// \brief Save a pointer to the given buffer.
+ void SavePointer(void *Ptr, char *&Buffer, unsigned &BufferSize,
+ unsigned &BufferCapacity) {
+ Append(reinterpret_cast<char *>(&Ptr),
+ reinterpret_cast<char *>(&Ptr) + sizeof(void *),
+ Buffer, BufferSize, BufferCapacity);
+ }
+}
+
+NestedNameSpecifierLocBuilder::
+NestedNameSpecifierLocBuilder(const NestedNameSpecifierLocBuilder &Other)
+ : Representation(Other.Representation), Buffer(0),
+ BufferSize(0), BufferCapacity(0)
+{
+ if (!Other.Buffer)
+ return;
+
+ if (Other.BufferCapacity == 0) {
+ // Shallow copy is okay.
+ Buffer = Other.Buffer;
+ BufferSize = Other.BufferSize;
+ return;
+ }
+
+ // Deep copy
+ BufferSize = Other.BufferSize;
+ BufferCapacity = Other.BufferSize;
+ Buffer = static_cast<char *>(malloc(BufferCapacity));
+ memcpy(Buffer, Other.Buffer, BufferSize);
+}
+
+NestedNameSpecifierLocBuilder &
+NestedNameSpecifierLocBuilder::
+operator=(const NestedNameSpecifierLocBuilder &Other) {
+ Representation = Other.Representation;
+
+ if (Buffer && Other.Buffer && BufferCapacity >= Other.BufferSize) {
+ // Re-use our storage.
+ BufferSize = Other.BufferSize;
+ memcpy(Buffer, Other.Buffer, BufferSize);
+ return *this;
+ }
+
+ // Free our storage, if we have any.
+ if (BufferCapacity) {
+ free(Buffer);
+ BufferCapacity = 0;
+ }
+
+ if (!Other.Buffer) {
+ // Empty.
+ Buffer = 0;
+ BufferSize = 0;
+ return *this;
+ }
+
+ if (Other.BufferCapacity == 0) {
+ // Shallow copy is okay.
+ Buffer = Other.Buffer;
+ BufferSize = Other.BufferSize;
+ return *this;
+ }
+
+ // Deep copy.
+ BufferSize = Other.BufferSize;
+ BufferCapacity = BufferSize;
+ Buffer = static_cast<char *>(malloc(BufferSize));
+ memcpy(Buffer, Other.Buffer, BufferSize);
+ return *this;
+}
+
+void NestedNameSpecifierLocBuilder::Extend(ASTContext &Context,
+ SourceLocation TemplateKWLoc,
+ TypeLoc TL,
+ SourceLocation ColonColonLoc) {
+ Representation = NestedNameSpecifier::Create(Context, Representation,
+ TemplateKWLoc.isValid(),
+ TL.getTypePtr());
+
+ // Push source-location info into the buffer.
+ SavePointer(TL.getOpaqueData(), Buffer, BufferSize, BufferCapacity);
+ SaveSourceLocation(ColonColonLoc, Buffer, BufferSize, BufferCapacity);
+}
+
+void NestedNameSpecifierLocBuilder::Extend(ASTContext &Context,
+ IdentifierInfo *Identifier,
+ SourceLocation IdentifierLoc,
+ SourceLocation ColonColonLoc) {
+ Representation = NestedNameSpecifier::Create(Context, Representation,
+ Identifier);
+
+ // Push source-location info into the buffer.
+ SaveSourceLocation(IdentifierLoc, Buffer, BufferSize, BufferCapacity);
+ SaveSourceLocation(ColonColonLoc, Buffer, BufferSize, BufferCapacity);
+}
+
+void NestedNameSpecifierLocBuilder::Extend(ASTContext &Context,
+ NamespaceDecl *Namespace,
+ SourceLocation NamespaceLoc,
+ SourceLocation ColonColonLoc) {
+ Representation = NestedNameSpecifier::Create(Context, Representation,
+ Namespace);
+
+ // Push source-location info into the buffer.
+ SaveSourceLocation(NamespaceLoc, Buffer, BufferSize, BufferCapacity);
+ SaveSourceLocation(ColonColonLoc, Buffer, BufferSize, BufferCapacity);
+}
+
+void NestedNameSpecifierLocBuilder::Extend(ASTContext &Context,
+ NamespaceAliasDecl *Alias,
+ SourceLocation AliasLoc,
+ SourceLocation ColonColonLoc) {
+ Representation = NestedNameSpecifier::Create(Context, Representation, Alias);
+
+ // Push source-location info into the buffer.
+ SaveSourceLocation(AliasLoc, Buffer, BufferSize, BufferCapacity);
+ SaveSourceLocation(ColonColonLoc, Buffer, BufferSize, BufferCapacity);
+}
+
+void NestedNameSpecifierLocBuilder::MakeGlobal(ASTContext &Context,
+ SourceLocation ColonColonLoc) {
+ assert(!Representation && "Already have a nested-name-specifier!?");
+ Representation = NestedNameSpecifier::GlobalSpecifier(Context);
+
+ // Push source-location info into the buffer.
+ SaveSourceLocation(ColonColonLoc, Buffer, BufferSize, BufferCapacity);
+}
+
+void NestedNameSpecifierLocBuilder::MakeTrivial(ASTContext &Context,
+ NestedNameSpecifier *Qualifier,
+ SourceRange R) {
+ Representation = Qualifier;
+
+ // Construct bogus (but well-formed) source information for the
+ // nested-name-specifier.
+ BufferSize = 0;
+ SmallVector<NestedNameSpecifier *, 4> Stack;
+ for (NestedNameSpecifier *NNS = Qualifier; NNS; NNS = NNS->getPrefix())
+ Stack.push_back(NNS);
+ while (!Stack.empty()) {
+ NestedNameSpecifier *NNS = Stack.back();
+ Stack.pop_back();
+ switch (NNS->getKind()) {
+ case NestedNameSpecifier::Identifier:
+ case NestedNameSpecifier::Namespace:
+ case NestedNameSpecifier::NamespaceAlias:
+ SaveSourceLocation(R.getBegin(), Buffer, BufferSize, BufferCapacity);
+ break;
+
+ case NestedNameSpecifier::TypeSpec:
+ case NestedNameSpecifier::TypeSpecWithTemplate: {
+ TypeSourceInfo *TSInfo
+ = Context.getTrivialTypeSourceInfo(QualType(NNS->getAsType(), 0),
+ R.getBegin());
+ SavePointer(TSInfo->getTypeLoc().getOpaqueData(), Buffer, BufferSize,
+ BufferCapacity);
+ break;
+ }
+
+ case NestedNameSpecifier::Global:
+ break;
+ }
+
+ // Save the location of the '::'.
+ SaveSourceLocation(Stack.empty()? R.getEnd() : R.getBegin(),
+ Buffer, BufferSize, BufferCapacity);
+ }
+}
+
+void NestedNameSpecifierLocBuilder::Adopt(NestedNameSpecifierLoc Other) {
+ if (BufferCapacity)
+ free(Buffer);
+
+ if (!Other) {
+ Representation = 0;
+ BufferSize = 0;
+ return;
+ }
+
+ // Rather than copying the data (which is wasteful), "adopt" the
+ // pointer (which points into the ASTContext) but set the capacity to zero to
+ // indicate that we don't own it.
+ Representation = Other.getNestedNameSpecifier();
+ Buffer = static_cast<char *>(Other.getOpaqueData());
+ BufferSize = Other.getDataLength();
+ BufferCapacity = 0;
+}
+
+NestedNameSpecifierLoc
+NestedNameSpecifierLocBuilder::getWithLocInContext(ASTContext &Context) const {
+ if (!Representation)
+ return NestedNameSpecifierLoc();
+
+ // If we adopted our data pointer from elsewhere in the AST context, there's
+ // no need to copy the memory.
+ if (BufferCapacity == 0)
+ return NestedNameSpecifierLoc(Representation, Buffer);
+
+ // FIXME: After copying the source-location information, should we free
+ // our (temporary) buffer and adopt the ASTContext-allocated memory?
+ // Doing so would optimize repeated calls to getWithLocInContext().
+ void *Mem = Context.Allocate(BufferSize, llvm::alignOf<void *>());
+ memcpy(Mem, Buffer, BufferSize);
+ return NestedNameSpecifierLoc(Representation, Mem);
+}
+
diff --git a/clang/lib/AST/ParentMap.cpp b/clang/lib/AST/ParentMap.cpp
new file mode 100644
index 0000000..64016d9
--- /dev/null
+++ b/clang/lib/AST/ParentMap.cpp
@@ -0,0 +1,130 @@
+//===--- ParentMap.cpp - Mappings from Stmts to their Parents ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the ParentMap class.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/ParentMap.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/Expr.h"
+#include "llvm/ADT/DenseMap.h"
+
+using namespace clang;
+
+typedef llvm::DenseMap<Stmt*, Stmt*> MapTy;
+
+static void BuildParentMap(MapTy& M, Stmt* S) {
+ for (Stmt::child_range I = S->children(); I; ++I)
+ if (*I) {
+ M[*I] = S;
+ BuildParentMap(M, *I);
+ }
+
+ // Also include the source expr tree of an OpaqueValueExpr in the map.
+ if (const OpaqueValueExpr *OVE = dyn_cast<OpaqueValueExpr>(S))
+ BuildParentMap(M, OVE->getSourceExpr());
+}
+
+ParentMap::ParentMap(Stmt* S) : Impl(0) {
+ if (S) {
+ MapTy *M = new MapTy();
+ BuildParentMap(*M, S);
+ Impl = M;
+ }
+}
+
+ParentMap::~ParentMap() {
+ delete (MapTy*) Impl;
+}
+
+void ParentMap::addStmt(Stmt* S) {
+ if (S) {
+ BuildParentMap(*(MapTy*) Impl, S);
+ }
+}
+
+Stmt* ParentMap::getParent(Stmt* S) const {
+ MapTy* M = (MapTy*) Impl;
+ MapTy::iterator I = M->find(S);
+ return I == M->end() ? 0 : I->second;
+}
+
+Stmt *ParentMap::getParentIgnoreParens(Stmt *S) const {
+ do { S = getParent(S); } while (S && isa<ParenExpr>(S));
+ return S;
+}
+
+Stmt *ParentMap::getParentIgnoreParenCasts(Stmt *S) const {
+ do {
+ S = getParent(S);
+ }
+ while (S && (isa<ParenExpr>(S) || isa<CastExpr>(S)));
+
+ return S;
+}
+
+Stmt *ParentMap::getParentIgnoreParenImpCasts(Stmt *S) const {
+ do {
+ S = getParent(S);
+ } while (S && isa<Expr>(S) && cast<Expr>(S)->IgnoreParenImpCasts() != S);
+
+ return S;
+}
+
+Stmt *ParentMap::getOuterParenParent(Stmt *S) const {
+ Stmt *Paren = 0;
+ while (isa<ParenExpr>(S)) {
+ Paren = S;
+ S = getParent(S);
+ };
+ return Paren;
+}
+
+bool ParentMap::isConsumedExpr(Expr* E) const {
+ Stmt *P = getParent(E);
+ Stmt *DirectChild = E;
+
+ // Ignore parents that are parentheses or casts.
+ while (P && (isa<ParenExpr>(P) || isa<CastExpr>(P))) {
+ DirectChild = P;
+ P = getParent(P);
+ }
+
+ if (!P)
+ return false;
+
+ switch (P->getStmtClass()) {
+ default:
+ return isa<Expr>(P);
+ case Stmt::DeclStmtClass:
+ return true;
+ case Stmt::BinaryOperatorClass: {
+ BinaryOperator *BE = cast<BinaryOperator>(P);
+ // If it is a comma, only the right side is consumed.
+ // If it isn't a comma, both sides are consumed.
+ return BE->getOpcode()!=BO_Comma ||DirectChild==BE->getRHS();
+ }
+ case Stmt::ForStmtClass:
+ return DirectChild == cast<ForStmt>(P)->getCond();
+ case Stmt::WhileStmtClass:
+ return DirectChild == cast<WhileStmt>(P)->getCond();
+ case Stmt::DoStmtClass:
+ return DirectChild == cast<DoStmt>(P)->getCond();
+ case Stmt::IfStmtClass:
+ return DirectChild == cast<IfStmt>(P)->getCond();
+ case Stmt::IndirectGotoStmtClass:
+ return DirectChild == cast<IndirectGotoStmt>(P)->getTarget();
+ case Stmt::SwitchStmtClass:
+ return DirectChild == cast<SwitchStmt>(P)->getCond();
+ case Stmt::ReturnStmtClass:
+ return true;
+ }
+}
+
diff --git a/clang/lib/AST/RecordLayout.cpp b/clang/lib/AST/RecordLayout.cpp
new file mode 100644
index 0000000..0114eba
--- /dev/null
+++ b/clang/lib/AST/RecordLayout.cpp
@@ -0,0 +1,89 @@
+//===-- RecordLayout.cpp - Layout information for a struct/union -*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the RecordLayout interface.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/RecordLayout.h"
+#include "clang/Basic/TargetInfo.h"
+
+using namespace clang;
+
+void ASTRecordLayout::Destroy(ASTContext &Ctx) {
+ if (FieldOffsets)
+ Ctx.Deallocate(FieldOffsets);
+ if (CXXInfo) {
+ Ctx.Deallocate(CXXInfo);
+ CXXInfo->~CXXRecordLayoutInfo();
+ }
+ this->~ASTRecordLayout();
+ Ctx.Deallocate(this);
+}
+
+ASTRecordLayout::ASTRecordLayout(const ASTContext &Ctx, CharUnits size,
+ CharUnits alignment, CharUnits datasize,
+ const uint64_t *fieldoffsets,
+ unsigned fieldcount)
+ : Size(size), DataSize(datasize), FieldOffsets(0), Alignment(alignment),
+ FieldCount(fieldcount), CXXInfo(0) {
+ if (FieldCount > 0) {
+ FieldOffsets = new (Ctx) uint64_t[FieldCount];
+ memcpy(FieldOffsets, fieldoffsets, FieldCount * sizeof(*FieldOffsets));
+ }
+}
+
+// Constructor for C++ records.
+ASTRecordLayout::ASTRecordLayout(const ASTContext &Ctx,
+ CharUnits size, CharUnits alignment,
+ CharUnits vfptroffset, CharUnits vbptroffset,
+ CharUnits datasize,
+ const uint64_t *fieldoffsets,
+ unsigned fieldcount,
+ CharUnits nonvirtualsize,
+ CharUnits nonvirtualalign,
+ CharUnits SizeOfLargestEmptySubobject,
+ const CXXRecordDecl *PrimaryBase,
+ bool IsPrimaryBaseVirtual,
+ const BaseOffsetsMapTy& BaseOffsets,
+ const BaseOffsetsMapTy& VBaseOffsets)
+ : Size(size), DataSize(datasize), FieldOffsets(0), Alignment(alignment),
+ FieldCount(fieldcount), CXXInfo(new (Ctx) CXXRecordLayoutInfo)
+{
+ if (FieldCount > 0) {
+ FieldOffsets = new (Ctx) uint64_t[FieldCount];
+ memcpy(FieldOffsets, fieldoffsets, FieldCount * sizeof(*FieldOffsets));
+ }
+
+ CXXInfo->PrimaryBase.setPointer(PrimaryBase);
+ CXXInfo->PrimaryBase.setInt(IsPrimaryBaseVirtual);
+ CXXInfo->NonVirtualSize = nonvirtualsize;
+ CXXInfo->NonVirtualAlign = nonvirtualalign;
+ CXXInfo->SizeOfLargestEmptySubobject = SizeOfLargestEmptySubobject;
+ CXXInfo->BaseOffsets = BaseOffsets;
+ CXXInfo->VBaseOffsets = VBaseOffsets;
+ CXXInfo->VFPtrOffset = vfptroffset;
+ CXXInfo->VBPtrOffset = vbptroffset;
+
+#ifndef NDEBUG
+ if (const CXXRecordDecl *PrimaryBase = getPrimaryBase()) {
+ if (isPrimaryBaseVirtual()) {
+ // Microsoft ABI doesn't have primary virtual base
+ if (Ctx.getTargetInfo().getCXXABI() != CXXABI_Microsoft) {
+ assert(getVBaseClassOffset(PrimaryBase).isZero() &&
+ "Primary virtual base must be at offset 0!");
+ }
+ } else {
+ assert(getBaseClassOffsetInBits(PrimaryBase) == 0 &&
+ "Primary base must be at offset 0!");
+ }
+ }
+#endif
+}
diff --git a/clang/lib/AST/RecordLayoutBuilder.cpp b/clang/lib/AST/RecordLayoutBuilder.cpp
new file mode 100644
index 0000000..c2d9294
--- /dev/null
+++ b/clang/lib/AST/RecordLayoutBuilder.cpp
@@ -0,0 +1,2488 @@
+//=== RecordLayoutBuilder.cpp - Helper class for building record layouts ---==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/Attr.h"
+#include "clang/AST/CXXInheritance.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/RecordLayout.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/Sema/SemaDiagnostic.h"
+#include "llvm/Support/Format.h"
+#include "llvm/ADT/SmallSet.h"
+#include "llvm/Support/MathExtras.h"
+#include "llvm/Support/CrashRecoveryContext.h"
+
+using namespace clang;
+
+namespace {
+
+/// BaseSubobjectInfo - Represents a single base subobject in a complete class.
+/// For a class hierarchy like
+///
+/// class A { };
+/// class B : A { };
+/// class C : A, B { };
+///
+/// The BaseSubobjectInfo graph for C will have three BaseSubobjectInfo
+/// instances, one for B and two for A.
+///
+/// If a base is virtual, it will only have one BaseSubobjectInfo allocated.
+struct BaseSubobjectInfo {
+ /// Class - The class for this base info.
+ const CXXRecordDecl *Class;
+
+ /// IsVirtual - Whether the BaseInfo represents a virtual base or not.
+ bool IsVirtual;
+
+ /// Bases - Information about the base subobjects.
+ SmallVector<BaseSubobjectInfo*, 4> Bases;
+
+ /// PrimaryVirtualBaseInfo - Holds the base info for the primary virtual base
+ /// of this base info (if one exists).
+ BaseSubobjectInfo *PrimaryVirtualBaseInfo;
+
+ // FIXME: Document.
+ const BaseSubobjectInfo *Derived;
+};
+
+/// EmptySubobjectMap - Keeps track of which empty subobjects exist at different
+/// offsets while laying out a C++ class.
+class EmptySubobjectMap {
+ const ASTContext &Context;
+ uint64_t CharWidth;
+
+ /// Class - The class whose empty entries we're keeping track of.
+ const CXXRecordDecl *Class;
+
+ /// EmptyClassOffsets - A map from offsets to empty record decls.
+ typedef SmallVector<const CXXRecordDecl *, 1> ClassVectorTy;
+ typedef llvm::DenseMap<CharUnits, ClassVectorTy> EmptyClassOffsetsMapTy;
+ EmptyClassOffsetsMapTy EmptyClassOffsets;
+
+ /// MaxEmptyClassOffset - The highest offset known to contain an empty
+ /// base subobject.
+ CharUnits MaxEmptyClassOffset;
+
+ /// ComputeEmptySubobjectSizes - Compute the size of the largest base or
+ /// member subobject that is empty.
+ void ComputeEmptySubobjectSizes();
+
+ void AddSubobjectAtOffset(const CXXRecordDecl *RD, CharUnits Offset);
+
+ void UpdateEmptyBaseSubobjects(const BaseSubobjectInfo *Info,
+ CharUnits Offset, bool PlacingEmptyBase);
+
+ void UpdateEmptyFieldSubobjects(const CXXRecordDecl *RD,
+ const CXXRecordDecl *Class,
+ CharUnits Offset);
+ void UpdateEmptyFieldSubobjects(const FieldDecl *FD, CharUnits Offset);
+
+ /// AnyEmptySubobjectsBeyondOffset - Returns whether there are any empty
+ /// subobjects beyond the given offset.
+ bool AnyEmptySubobjectsBeyondOffset(CharUnits Offset) const {
+ return Offset <= MaxEmptyClassOffset;
+ }
+
+ CharUnits
+ getFieldOffset(const ASTRecordLayout &Layout, unsigned FieldNo) const {
+ uint64_t FieldOffset = Layout.getFieldOffset(FieldNo);
+ assert(FieldOffset % CharWidth == 0 &&
+ "Field offset not at char boundary!");
+
+ return Context.toCharUnitsFromBits(FieldOffset);
+ }
+
+protected:
+ bool CanPlaceSubobjectAtOffset(const CXXRecordDecl *RD,
+ CharUnits Offset) const;
+
+ bool CanPlaceBaseSubobjectAtOffset(const BaseSubobjectInfo *Info,
+ CharUnits Offset);
+
+ bool CanPlaceFieldSubobjectAtOffset(const CXXRecordDecl *RD,
+ const CXXRecordDecl *Class,
+ CharUnits Offset) const;
+ bool CanPlaceFieldSubobjectAtOffset(const FieldDecl *FD,
+ CharUnits Offset) const;
+
+public:
+ /// This holds the size of the largest empty subobject (either a base
+ /// or a member). Will be zero if the record being built doesn't contain
+ /// any empty classes.
+ CharUnits SizeOfLargestEmptySubobject;
+
+ EmptySubobjectMap(const ASTContext &Context, const CXXRecordDecl *Class)
+ : Context(Context), CharWidth(Context.getCharWidth()), Class(Class) {
+ ComputeEmptySubobjectSizes();
+ }
+
+ /// CanPlaceBaseAtOffset - Return whether the given base class can be placed
+ /// at the given offset.
+ /// Returns false if placing the record will result in two components
+ /// (direct or indirect) of the same type having the same offset.
+ bool CanPlaceBaseAtOffset(const BaseSubobjectInfo *Info,
+ CharUnits Offset);
+
+ /// CanPlaceFieldAtOffset - Return whether a field can be placed at the given
+ /// offset.
+ bool CanPlaceFieldAtOffset(const FieldDecl *FD, CharUnits Offset);
+};
+
+void EmptySubobjectMap::ComputeEmptySubobjectSizes() {
+ // Check the bases.
+ for (CXXRecordDecl::base_class_const_iterator I = Class->bases_begin(),
+ E = Class->bases_end(); I != E; ++I) {
+ const CXXRecordDecl *BaseDecl =
+ cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+
+ CharUnits EmptySize;
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(BaseDecl);
+ if (BaseDecl->isEmpty()) {
+ // If the class decl is empty, get its size.
+ EmptySize = Layout.getSize();
+ } else {
+ // Otherwise, we get the largest empty subobject for the decl.
+ EmptySize = Layout.getSizeOfLargestEmptySubobject();
+ }
+
+ if (EmptySize > SizeOfLargestEmptySubobject)
+ SizeOfLargestEmptySubobject = EmptySize;
+ }
+
+ // Check the fields.
+ for (CXXRecordDecl::field_iterator I = Class->field_begin(),
+ E = Class->field_end(); I != E; ++I) {
+ const FieldDecl *FD = *I;
+
+ const RecordType *RT =
+ Context.getBaseElementType(FD->getType())->getAs<RecordType>();
+
+ // We only care about record types.
+ if (!RT)
+ continue;
+
+ CharUnits EmptySize;
+ const CXXRecordDecl *MemberDecl = cast<CXXRecordDecl>(RT->getDecl());
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(MemberDecl);
+ if (MemberDecl->isEmpty()) {
+ // If the class decl is empty, get its size.
+ EmptySize = Layout.getSize();
+ } else {
+ // Otherwise, we get the largest empty subobject for the decl.
+ EmptySize = Layout.getSizeOfLargestEmptySubobject();
+ }
+
+ if (EmptySize > SizeOfLargestEmptySubobject)
+ SizeOfLargestEmptySubobject = EmptySize;
+ }
+}
+
+bool
+EmptySubobjectMap::CanPlaceSubobjectAtOffset(const CXXRecordDecl *RD,
+ CharUnits Offset) const {
+ // We only need to check empty bases.
+ if (!RD->isEmpty())
+ return true;
+
+ EmptyClassOffsetsMapTy::const_iterator I = EmptyClassOffsets.find(Offset);
+ if (I == EmptyClassOffsets.end())
+ return true;
+
+ const ClassVectorTy& Classes = I->second;
+ if (std::find(Classes.begin(), Classes.end(), RD) == Classes.end())
+ return true;
+
+ // There is already an empty class of the same type at this offset.
+ return false;
+}
+
+void EmptySubobjectMap::AddSubobjectAtOffset(const CXXRecordDecl *RD,
+ CharUnits Offset) {
+ // We only care about empty bases.
+ if (!RD->isEmpty())
+ return;
+
+ // If we have empty structures inside an union, we can assign both
+ // the same offset. Just avoid pushing them twice in the list.
+ ClassVectorTy& Classes = EmptyClassOffsets[Offset];
+ if (std::find(Classes.begin(), Classes.end(), RD) != Classes.end())
+ return;
+
+ Classes.push_back(RD);
+
+ // Update the empty class offset.
+ if (Offset > MaxEmptyClassOffset)
+ MaxEmptyClassOffset = Offset;
+}
+
+bool
+EmptySubobjectMap::CanPlaceBaseSubobjectAtOffset(const BaseSubobjectInfo *Info,
+ CharUnits Offset) {
+ // We don't have to keep looking past the maximum offset that's known to
+ // contain an empty class.
+ if (!AnyEmptySubobjectsBeyondOffset(Offset))
+ return true;
+
+ if (!CanPlaceSubobjectAtOffset(Info->Class, Offset))
+ return false;
+
+ // Traverse all non-virtual bases.
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(Info->Class);
+ for (unsigned I = 0, E = Info->Bases.size(); I != E; ++I) {
+ BaseSubobjectInfo* Base = Info->Bases[I];
+ if (Base->IsVirtual)
+ continue;
+
+ CharUnits BaseOffset = Offset + Layout.getBaseClassOffset(Base->Class);
+
+ if (!CanPlaceBaseSubobjectAtOffset(Base, BaseOffset))
+ return false;
+ }
+
+ if (Info->PrimaryVirtualBaseInfo) {
+ BaseSubobjectInfo *PrimaryVirtualBaseInfo = Info->PrimaryVirtualBaseInfo;
+
+ if (Info == PrimaryVirtualBaseInfo->Derived) {
+ if (!CanPlaceBaseSubobjectAtOffset(PrimaryVirtualBaseInfo, Offset))
+ return false;
+ }
+ }
+
+ // Traverse all member variables.
+ unsigned FieldNo = 0;
+ for (CXXRecordDecl::field_iterator I = Info->Class->field_begin(),
+ E = Info->Class->field_end(); I != E; ++I, ++FieldNo) {
+ const FieldDecl *FD = *I;
+ if (FD->isBitField())
+ continue;
+
+ CharUnits FieldOffset = Offset + getFieldOffset(Layout, FieldNo);
+ if (!CanPlaceFieldSubobjectAtOffset(FD, FieldOffset))
+ return false;
+ }
+
+ return true;
+}
+
+void EmptySubobjectMap::UpdateEmptyBaseSubobjects(const BaseSubobjectInfo *Info,
+ CharUnits Offset,
+ bool PlacingEmptyBase) {
+ if (!PlacingEmptyBase && Offset >= SizeOfLargestEmptySubobject) {
+ // We know that the only empty subobjects that can conflict with empty
+ // subobject of non-empty bases, are empty bases that can be placed at
+ // offset zero. Because of this, we only need to keep track of empty base
+ // subobjects with offsets less than the size of the largest empty
+ // subobject for our class.
+ return;
+ }
+
+ AddSubobjectAtOffset(Info->Class, Offset);
+
+ // Traverse all non-virtual bases.
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(Info->Class);
+ for (unsigned I = 0, E = Info->Bases.size(); I != E; ++I) {
+ BaseSubobjectInfo* Base = Info->Bases[I];
+ if (Base->IsVirtual)
+ continue;
+
+ CharUnits BaseOffset = Offset + Layout.getBaseClassOffset(Base->Class);
+ UpdateEmptyBaseSubobjects(Base, BaseOffset, PlacingEmptyBase);
+ }
+
+ if (Info->PrimaryVirtualBaseInfo) {
+ BaseSubobjectInfo *PrimaryVirtualBaseInfo = Info->PrimaryVirtualBaseInfo;
+
+ if (Info == PrimaryVirtualBaseInfo->Derived)
+ UpdateEmptyBaseSubobjects(PrimaryVirtualBaseInfo, Offset,
+ PlacingEmptyBase);
+ }
+
+ // Traverse all member variables.
+ unsigned FieldNo = 0;
+ for (CXXRecordDecl::field_iterator I = Info->Class->field_begin(),
+ E = Info->Class->field_end(); I != E; ++I, ++FieldNo) {
+ const FieldDecl *FD = *I;
+ if (FD->isBitField())
+ continue;
+
+ CharUnits FieldOffset = Offset + getFieldOffset(Layout, FieldNo);
+ UpdateEmptyFieldSubobjects(FD, FieldOffset);
+ }
+}
+
+bool EmptySubobjectMap::CanPlaceBaseAtOffset(const BaseSubobjectInfo *Info,
+ CharUnits Offset) {
+ // If we know this class doesn't have any empty subobjects we don't need to
+ // bother checking.
+ if (SizeOfLargestEmptySubobject.isZero())
+ return true;
+
+ if (!CanPlaceBaseSubobjectAtOffset(Info, Offset))
+ return false;
+
+ // We are able to place the base at this offset. Make sure to update the
+ // empty base subobject map.
+ UpdateEmptyBaseSubobjects(Info, Offset, Info->Class->isEmpty());
+ return true;
+}
+
+bool
+EmptySubobjectMap::CanPlaceFieldSubobjectAtOffset(const CXXRecordDecl *RD,
+ const CXXRecordDecl *Class,
+ CharUnits Offset) const {
+ // We don't have to keep looking past the maximum offset that's known to
+ // contain an empty class.
+ if (!AnyEmptySubobjectsBeyondOffset(Offset))
+ return true;
+
+ if (!CanPlaceSubobjectAtOffset(RD, Offset))
+ return false;
+
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
+
+ // Traverse all non-virtual bases.
+ for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
+ E = RD->bases_end(); I != E; ++I) {
+ if (I->isVirtual())
+ continue;
+
+ const CXXRecordDecl *BaseDecl =
+ cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+
+ CharUnits BaseOffset = Offset + Layout.getBaseClassOffset(BaseDecl);
+ if (!CanPlaceFieldSubobjectAtOffset(BaseDecl, Class, BaseOffset))
+ return false;
+ }
+
+ if (RD == Class) {
+ // This is the most derived class, traverse virtual bases as well.
+ for (CXXRecordDecl::base_class_const_iterator I = RD->vbases_begin(),
+ E = RD->vbases_end(); I != E; ++I) {
+ const CXXRecordDecl *VBaseDecl =
+ cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+
+ CharUnits VBaseOffset = Offset + Layout.getVBaseClassOffset(VBaseDecl);
+ if (!CanPlaceFieldSubobjectAtOffset(VBaseDecl, Class, VBaseOffset))
+ return false;
+ }
+ }
+
+ // Traverse all member variables.
+ unsigned FieldNo = 0;
+ for (CXXRecordDecl::field_iterator I = RD->field_begin(), E = RD->field_end();
+ I != E; ++I, ++FieldNo) {
+ const FieldDecl *FD = *I;
+ if (FD->isBitField())
+ continue;
+
+ CharUnits FieldOffset = Offset + getFieldOffset(Layout, FieldNo);
+
+ if (!CanPlaceFieldSubobjectAtOffset(FD, FieldOffset))
+ return false;
+ }
+
+ return true;
+}
+
+bool
+EmptySubobjectMap::CanPlaceFieldSubobjectAtOffset(const FieldDecl *FD,
+ CharUnits Offset) const {
+ // We don't have to keep looking past the maximum offset that's known to
+ // contain an empty class.
+ if (!AnyEmptySubobjectsBeyondOffset(Offset))
+ return true;
+
+ QualType T = FD->getType();
+ if (const RecordType *RT = T->getAs<RecordType>()) {
+ const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
+ return CanPlaceFieldSubobjectAtOffset(RD, RD, Offset);
+ }
+
+ // If we have an array type we need to look at every element.
+ if (const ConstantArrayType *AT = Context.getAsConstantArrayType(T)) {
+ QualType ElemTy = Context.getBaseElementType(AT);
+ const RecordType *RT = ElemTy->getAs<RecordType>();
+ if (!RT)
+ return true;
+
+ const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
+
+ uint64_t NumElements = Context.getConstantArrayElementCount(AT);
+ CharUnits ElementOffset = Offset;
+ for (uint64_t I = 0; I != NumElements; ++I) {
+ // We don't have to keep looking past the maximum offset that's known to
+ // contain an empty class.
+ if (!AnyEmptySubobjectsBeyondOffset(ElementOffset))
+ return true;
+
+ if (!CanPlaceFieldSubobjectAtOffset(RD, RD, ElementOffset))
+ return false;
+
+ ElementOffset += Layout.getSize();
+ }
+ }
+
+ return true;
+}
+
+bool
+EmptySubobjectMap::CanPlaceFieldAtOffset(const FieldDecl *FD,
+ CharUnits Offset) {
+ if (!CanPlaceFieldSubobjectAtOffset(FD, Offset))
+ return false;
+
+ // We are able to place the member variable at this offset.
+ // Make sure to update the empty base subobject map.
+ UpdateEmptyFieldSubobjects(FD, Offset);
+ return true;
+}
+
+void EmptySubobjectMap::UpdateEmptyFieldSubobjects(const CXXRecordDecl *RD,
+ const CXXRecordDecl *Class,
+ CharUnits Offset) {
+ // We know that the only empty subobjects that can conflict with empty
+ // field subobjects are subobjects of empty bases that can be placed at offset
+ // zero. Because of this, we only need to keep track of empty field
+ // subobjects with offsets less than the size of the largest empty
+ // subobject for our class.
+ if (Offset >= SizeOfLargestEmptySubobject)
+ return;
+
+ AddSubobjectAtOffset(RD, Offset);
+
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
+
+ // Traverse all non-virtual bases.
+ for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
+ E = RD->bases_end(); I != E; ++I) {
+ if (I->isVirtual())
+ continue;
+
+ const CXXRecordDecl *BaseDecl =
+ cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+
+ CharUnits BaseOffset = Offset + Layout.getBaseClassOffset(BaseDecl);
+ UpdateEmptyFieldSubobjects(BaseDecl, Class, BaseOffset);
+ }
+
+ if (RD == Class) {
+ // This is the most derived class, traverse virtual bases as well.
+ for (CXXRecordDecl::base_class_const_iterator I = RD->vbases_begin(),
+ E = RD->vbases_end(); I != E; ++I) {
+ const CXXRecordDecl *VBaseDecl =
+ cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+
+ CharUnits VBaseOffset = Offset + Layout.getVBaseClassOffset(VBaseDecl);
+ UpdateEmptyFieldSubobjects(VBaseDecl, Class, VBaseOffset);
+ }
+ }
+
+ // Traverse all member variables.
+ unsigned FieldNo = 0;
+ for (CXXRecordDecl::field_iterator I = RD->field_begin(), E = RD->field_end();
+ I != E; ++I, ++FieldNo) {
+ const FieldDecl *FD = *I;
+ if (FD->isBitField())
+ continue;
+
+ CharUnits FieldOffset = Offset + getFieldOffset(Layout, FieldNo);
+
+ UpdateEmptyFieldSubobjects(FD, FieldOffset);
+ }
+}
+
+void EmptySubobjectMap::UpdateEmptyFieldSubobjects(const FieldDecl *FD,
+ CharUnits Offset) {
+ QualType T = FD->getType();
+ if (const RecordType *RT = T->getAs<RecordType>()) {
+ const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
+ UpdateEmptyFieldSubobjects(RD, RD, Offset);
+ return;
+ }
+
+ // If we have an array type we need to update every element.
+ if (const ConstantArrayType *AT = Context.getAsConstantArrayType(T)) {
+ QualType ElemTy = Context.getBaseElementType(AT);
+ const RecordType *RT = ElemTy->getAs<RecordType>();
+ if (!RT)
+ return;
+
+ const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
+
+ uint64_t NumElements = Context.getConstantArrayElementCount(AT);
+ CharUnits ElementOffset = Offset;
+
+ for (uint64_t I = 0; I != NumElements; ++I) {
+ // We know that the only empty subobjects that can conflict with empty
+ // field subobjects are subobjects of empty bases that can be placed at
+ // offset zero. Because of this, we only need to keep track of empty field
+ // subobjects with offsets less than the size of the largest empty
+ // subobject for our class.
+ if (ElementOffset >= SizeOfLargestEmptySubobject)
+ return;
+
+ UpdateEmptyFieldSubobjects(RD, RD, ElementOffset);
+ ElementOffset += Layout.getSize();
+ }
+ }
+}
+
+class RecordLayoutBuilder {
+protected:
+ // FIXME: Remove this and make the appropriate fields public.
+ friend class clang::ASTContext;
+
+ const ASTContext &Context;
+
+ EmptySubobjectMap *EmptySubobjects;
+
+ /// Size - The current size of the record layout.
+ uint64_t Size;
+
+ /// Alignment - The current alignment of the record layout.
+ CharUnits Alignment;
+
+ /// \brief The alignment if attribute packed is not used.
+ CharUnits UnpackedAlignment;
+
+ SmallVector<uint64_t, 16> FieldOffsets;
+
+ /// \brief Whether the external AST source has provided a layout for this
+ /// record.
+ unsigned ExternalLayout : 1;
+
+ /// \brief Whether we need to infer alignment, even when we have an
+ /// externally-provided layout.
+ unsigned InferAlignment : 1;
+
+ /// Packed - Whether the record is packed or not.
+ unsigned Packed : 1;
+
+ unsigned IsUnion : 1;
+
+ unsigned IsMac68kAlign : 1;
+
+ unsigned IsMsStruct : 1;
+
+ /// UnfilledBitsInLastByte - If the last field laid out was a bitfield,
+ /// this contains the number of bits in the last byte that can be used for
+ /// an adjacent bitfield if necessary.
+ unsigned char UnfilledBitsInLastByte;
+
+ /// MaxFieldAlignment - The maximum allowed field alignment. This is set by
+ /// #pragma pack.
+ CharUnits MaxFieldAlignment;
+
+ /// DataSize - The data size of the record being laid out.
+ uint64_t DataSize;
+
+ CharUnits NonVirtualSize;
+ CharUnits NonVirtualAlignment;
+
+ FieldDecl *ZeroLengthBitfield;
+
+ /// PrimaryBase - the primary base class (if one exists) of the class
+ /// we're laying out.
+ const CXXRecordDecl *PrimaryBase;
+
+ /// PrimaryBaseIsVirtual - Whether the primary base of the class we're laying
+ /// out is virtual.
+ bool PrimaryBaseIsVirtual;
+
+ /// VFPtrOffset - Virtual function table offset. Only for MS layout.
+ CharUnits VFPtrOffset;
+
+ /// VBPtrOffset - Virtual base table offset. Only for MS layout.
+ CharUnits VBPtrOffset;
+
+ typedef llvm::DenseMap<const CXXRecordDecl *, CharUnits> BaseOffsetsMapTy;
+
+ /// Bases - base classes and their offsets in the record.
+ BaseOffsetsMapTy Bases;
+
+ // VBases - virtual base classes and their offsets in the record.
+ BaseOffsetsMapTy VBases;
+
+ /// IndirectPrimaryBases - Virtual base classes, direct or indirect, that are
+ /// primary base classes for some other direct or indirect base class.
+ CXXIndirectPrimaryBaseSet IndirectPrimaryBases;
+
+ /// FirstNearlyEmptyVBase - The first nearly empty virtual base class in
+ /// inheritance graph order. Used for determining the primary base class.
+ const CXXRecordDecl *FirstNearlyEmptyVBase;
+
+ /// VisitedVirtualBases - A set of all the visited virtual bases, used to
+ /// avoid visiting virtual bases more than once.
+ llvm::SmallPtrSet<const CXXRecordDecl *, 4> VisitedVirtualBases;
+
+ /// \brief Externally-provided size.
+ uint64_t ExternalSize;
+
+ /// \brief Externally-provided alignment.
+ uint64_t ExternalAlign;
+
+ /// \brief Externally-provided field offsets.
+ llvm::DenseMap<const FieldDecl *, uint64_t> ExternalFieldOffsets;
+
+ /// \brief Externally-provided direct, non-virtual base offsets.
+ llvm::DenseMap<const CXXRecordDecl *, CharUnits> ExternalBaseOffsets;
+
+ /// \brief Externally-provided virtual base offsets.
+ llvm::DenseMap<const CXXRecordDecl *, CharUnits> ExternalVirtualBaseOffsets;
+
+ RecordLayoutBuilder(const ASTContext &Context,
+ EmptySubobjectMap *EmptySubobjects)
+ : Context(Context), EmptySubobjects(EmptySubobjects), Size(0),
+ Alignment(CharUnits::One()), UnpackedAlignment(CharUnits::One()),
+ ExternalLayout(false), InferAlignment(false),
+ Packed(false), IsUnion(false), IsMac68kAlign(false), IsMsStruct(false),
+ UnfilledBitsInLastByte(0), MaxFieldAlignment(CharUnits::Zero()),
+ DataSize(0), NonVirtualSize(CharUnits::Zero()),
+ NonVirtualAlignment(CharUnits::One()),
+ ZeroLengthBitfield(0), PrimaryBase(0),
+ PrimaryBaseIsVirtual(false),
+ VFPtrOffset(CharUnits::fromQuantity(-1)),
+ VBPtrOffset(CharUnits::fromQuantity(-1)),
+ FirstNearlyEmptyVBase(0) { }
+
+ /// Reset this RecordLayoutBuilder to a fresh state, using the given
+ /// alignment as the initial alignment. This is used for the
+ /// correct layout of vb-table pointers in MSVC.
+ void resetWithTargetAlignment(CharUnits TargetAlignment) {
+ const ASTContext &Context = this->Context;
+ EmptySubobjectMap *EmptySubobjects = this->EmptySubobjects;
+ this->~RecordLayoutBuilder();
+ new (this) RecordLayoutBuilder(Context, EmptySubobjects);
+ Alignment = UnpackedAlignment = TargetAlignment;
+ }
+
+ void Layout(const RecordDecl *D);
+ void Layout(const CXXRecordDecl *D);
+ void Layout(const ObjCInterfaceDecl *D);
+
+ void LayoutFields(const RecordDecl *D);
+ void LayoutField(const FieldDecl *D);
+ void LayoutWideBitField(uint64_t FieldSize, uint64_t TypeSize,
+ bool FieldPacked, const FieldDecl *D);
+ void LayoutBitField(const FieldDecl *D);
+
+ bool isMicrosoftCXXABI() const {
+ return Context.getTargetInfo().getCXXABI() == CXXABI_Microsoft;
+ }
+
+ void MSLayoutVirtualBases(const CXXRecordDecl *RD);
+
+ /// BaseSubobjectInfoAllocator - Allocator for BaseSubobjectInfo objects.
+ llvm::SpecificBumpPtrAllocator<BaseSubobjectInfo> BaseSubobjectInfoAllocator;
+
+ typedef llvm::DenseMap<const CXXRecordDecl *, BaseSubobjectInfo *>
+ BaseSubobjectInfoMapTy;
+
+ /// VirtualBaseInfo - Map from all the (direct or indirect) virtual bases
+ /// of the class we're laying out to their base subobject info.
+ BaseSubobjectInfoMapTy VirtualBaseInfo;
+
+ /// NonVirtualBaseInfo - Map from all the direct non-virtual bases of the
+ /// class we're laying out to their base subobject info.
+ BaseSubobjectInfoMapTy NonVirtualBaseInfo;
+
+ /// ComputeBaseSubobjectInfo - Compute the base subobject information for the
+ /// bases of the given class.
+ void ComputeBaseSubobjectInfo(const CXXRecordDecl *RD);
+
+ /// ComputeBaseSubobjectInfo - Compute the base subobject information for a
+ /// single class and all of its base classes.
+ BaseSubobjectInfo *ComputeBaseSubobjectInfo(const CXXRecordDecl *RD,
+ bool IsVirtual,
+ BaseSubobjectInfo *Derived);
+
+ /// DeterminePrimaryBase - Determine the primary base of the given class.
+ void DeterminePrimaryBase(const CXXRecordDecl *RD);
+
+ void SelectPrimaryVBase(const CXXRecordDecl *RD);
+
+ void EnsureVTablePointerAlignment(CharUnits UnpackedBaseAlign);
+
+ /// LayoutNonVirtualBases - Determines the primary base class (if any) and
+ /// lays it out. Will then proceed to lay out all non-virtual base clasess.
+ void LayoutNonVirtualBases(const CXXRecordDecl *RD);
+
+ /// LayoutNonVirtualBase - Lays out a single non-virtual base.
+ void LayoutNonVirtualBase(const BaseSubobjectInfo *Base);
+
+ void AddPrimaryVirtualBaseOffsets(const BaseSubobjectInfo *Info,
+ CharUnits Offset);
+
+ bool needsVFTable(const CXXRecordDecl *RD) const;
+ bool hasNewVirtualFunction(const CXXRecordDecl *RD) const;
+ bool isPossiblePrimaryBase(const CXXRecordDecl *Base) const;
+
+ /// LayoutVirtualBases - Lays out all the virtual bases.
+ void LayoutVirtualBases(const CXXRecordDecl *RD,
+ const CXXRecordDecl *MostDerivedClass);
+
+ /// LayoutVirtualBase - Lays out a single virtual base.
+ void LayoutVirtualBase(const BaseSubobjectInfo *Base);
+
+ /// LayoutBase - Will lay out a base and return the offset where it was
+ /// placed, in chars.
+ CharUnits LayoutBase(const BaseSubobjectInfo *Base);
+
+ /// InitializeLayout - Initialize record layout for the given record decl.
+ void InitializeLayout(const Decl *D);
+
+ /// FinishLayout - Finalize record layout. Adjust record size based on the
+ /// alignment.
+ void FinishLayout(const NamedDecl *D);
+
+ void UpdateAlignment(CharUnits NewAlignment, CharUnits UnpackedNewAlignment);
+ void UpdateAlignment(CharUnits NewAlignment) {
+ UpdateAlignment(NewAlignment, NewAlignment);
+ }
+
+ /// \brief Retrieve the externally-supplied field offset for the given
+ /// field.
+ ///
+ /// \param Field The field whose offset is being queried.
+ /// \param ComputedOffset The offset that we've computed for this field.
+ uint64_t updateExternalFieldOffset(const FieldDecl *Field,
+ uint64_t ComputedOffset);
+
+ void CheckFieldPadding(uint64_t Offset, uint64_t UnpaddedOffset,
+ uint64_t UnpackedOffset, unsigned UnpackedAlign,
+ bool isPacked, const FieldDecl *D);
+
+ DiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID);
+
+ CharUnits getSize() const {
+ assert(Size % Context.getCharWidth() == 0);
+ return Context.toCharUnitsFromBits(Size);
+ }
+ uint64_t getSizeInBits() const { return Size; }
+
+ void setSize(CharUnits NewSize) { Size = Context.toBits(NewSize); }
+ void setSize(uint64_t NewSize) { Size = NewSize; }
+
+ CharUnits getAligment() const { return Alignment; }
+
+ CharUnits getDataSize() const {
+ assert(DataSize % Context.getCharWidth() == 0);
+ return Context.toCharUnitsFromBits(DataSize);
+ }
+ uint64_t getDataSizeInBits() const { return DataSize; }
+
+ void setDataSize(CharUnits NewSize) { DataSize = Context.toBits(NewSize); }
+ void setDataSize(uint64_t NewSize) { DataSize = NewSize; }
+
+ RecordLayoutBuilder(const RecordLayoutBuilder&); // DO NOT IMPLEMENT
+ void operator=(const RecordLayoutBuilder&); // DO NOT IMPLEMENT
+public:
+ static const CXXMethodDecl *ComputeKeyFunction(const CXXRecordDecl *RD);
+};
+} // end anonymous namespace
+
+void
+RecordLayoutBuilder::SelectPrimaryVBase(const CXXRecordDecl *RD) {
+ for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
+ E = RD->bases_end(); I != E; ++I) {
+ assert(!I->getType()->isDependentType() &&
+ "Cannot layout class with dependent bases.");
+
+ const CXXRecordDecl *Base =
+ cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+
+ // Check if this is a nearly empty virtual base.
+ if (I->isVirtual() && Context.isNearlyEmpty(Base)) {
+ // If it's not an indirect primary base, then we've found our primary
+ // base.
+ if (!IndirectPrimaryBases.count(Base)) {
+ PrimaryBase = Base;
+ PrimaryBaseIsVirtual = true;
+ return;
+ }
+
+ // Is this the first nearly empty virtual base?
+ if (!FirstNearlyEmptyVBase)
+ FirstNearlyEmptyVBase = Base;
+ }
+
+ SelectPrimaryVBase(Base);
+ if (PrimaryBase)
+ return;
+ }
+}
+
+/// DeterminePrimaryBase - Determine the primary base of the given class.
+void RecordLayoutBuilder::DeterminePrimaryBase(const CXXRecordDecl *RD) {
+ // If the class isn't dynamic, it won't have a primary base.
+ if (!RD->isDynamicClass())
+ return;
+
+ // Compute all the primary virtual bases for all of our direct and
+ // indirect bases, and record all their primary virtual base classes.
+ RD->getIndirectPrimaryBases(IndirectPrimaryBases);
+
+ // If the record has a dynamic base class, attempt to choose a primary base
+ // class. It is the first (in direct base class order) non-virtual dynamic
+ // base class, if one exists.
+ for (CXXRecordDecl::base_class_const_iterator i = RD->bases_begin(),
+ e = RD->bases_end(); i != e; ++i) {
+ // Ignore virtual bases.
+ if (i->isVirtual())
+ continue;
+
+ const CXXRecordDecl *Base =
+ cast<CXXRecordDecl>(i->getType()->getAs<RecordType>()->getDecl());
+
+ if (isPossiblePrimaryBase(Base)) {
+ // We found it.
+ PrimaryBase = Base;
+ PrimaryBaseIsVirtual = false;
+ return;
+ }
+ }
+
+ // The Microsoft ABI doesn't have primary virtual bases.
+ if (isMicrosoftCXXABI()) {
+ assert(!PrimaryBase && "Should not get here with a primary base!");
+ return;
+ }
+
+ // Under the Itanium ABI, if there is no non-virtual primary base class,
+ // try to compute the primary virtual base. The primary virtual base is
+ // the first nearly empty virtual base that is not an indirect primary
+ // virtual base class, if one exists.
+ if (RD->getNumVBases() != 0) {
+ SelectPrimaryVBase(RD);
+ if (PrimaryBase)
+ return;
+ }
+
+ // Otherwise, it is the first indirect primary base class, if one exists.
+ if (FirstNearlyEmptyVBase) {
+ PrimaryBase = FirstNearlyEmptyVBase;
+ PrimaryBaseIsVirtual = true;
+ return;
+ }
+
+ assert(!PrimaryBase && "Should not get here with a primary base!");
+}
+
+BaseSubobjectInfo *
+RecordLayoutBuilder::ComputeBaseSubobjectInfo(const CXXRecordDecl *RD,
+ bool IsVirtual,
+ BaseSubobjectInfo *Derived) {
+ BaseSubobjectInfo *Info;
+
+ if (IsVirtual) {
+ // Check if we already have info about this virtual base.
+ BaseSubobjectInfo *&InfoSlot = VirtualBaseInfo[RD];
+ if (InfoSlot) {
+ assert(InfoSlot->Class == RD && "Wrong class for virtual base info!");
+ return InfoSlot;
+ }
+
+ // We don't, create it.
+ InfoSlot = new (BaseSubobjectInfoAllocator.Allocate()) BaseSubobjectInfo;
+ Info = InfoSlot;
+ } else {
+ Info = new (BaseSubobjectInfoAllocator.Allocate()) BaseSubobjectInfo;
+ }
+
+ Info->Class = RD;
+ Info->IsVirtual = IsVirtual;
+ Info->Derived = 0;
+ Info->PrimaryVirtualBaseInfo = 0;
+
+ const CXXRecordDecl *PrimaryVirtualBase = 0;
+ BaseSubobjectInfo *PrimaryVirtualBaseInfo = 0;
+
+ // Check if this base has a primary virtual base.
+ if (RD->getNumVBases()) {
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
+ if (Layout.isPrimaryBaseVirtual()) {
+ // This base does have a primary virtual base.
+ PrimaryVirtualBase = Layout.getPrimaryBase();
+ assert(PrimaryVirtualBase && "Didn't have a primary virtual base!");
+
+ // Now check if we have base subobject info about this primary base.
+ PrimaryVirtualBaseInfo = VirtualBaseInfo.lookup(PrimaryVirtualBase);
+
+ if (PrimaryVirtualBaseInfo) {
+ if (PrimaryVirtualBaseInfo->Derived) {
+ // We did have info about this primary base, and it turns out that it
+ // has already been claimed as a primary virtual base for another
+ // base.
+ PrimaryVirtualBase = 0;
+ } else {
+ // We can claim this base as our primary base.
+ Info->PrimaryVirtualBaseInfo = PrimaryVirtualBaseInfo;
+ PrimaryVirtualBaseInfo->Derived = Info;
+ }
+ }
+ }
+ }
+
+ // Now go through all direct bases.
+ for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
+ E = RD->bases_end(); I != E; ++I) {
+ bool IsVirtual = I->isVirtual();
+
+ const CXXRecordDecl *BaseDecl =
+ cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+
+ Info->Bases.push_back(ComputeBaseSubobjectInfo(BaseDecl, IsVirtual, Info));
+ }
+
+ if (PrimaryVirtualBase && !PrimaryVirtualBaseInfo) {
+ // Traversing the bases must have created the base info for our primary
+ // virtual base.
+ PrimaryVirtualBaseInfo = VirtualBaseInfo.lookup(PrimaryVirtualBase);
+ assert(PrimaryVirtualBaseInfo &&
+ "Did not create a primary virtual base!");
+
+ // Claim the primary virtual base as our primary virtual base.
+ Info->PrimaryVirtualBaseInfo = PrimaryVirtualBaseInfo;
+ PrimaryVirtualBaseInfo->Derived = Info;
+ }
+
+ return Info;
+}
+
+void RecordLayoutBuilder::ComputeBaseSubobjectInfo(const CXXRecordDecl *RD) {
+ for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
+ E = RD->bases_end(); I != E; ++I) {
+ bool IsVirtual = I->isVirtual();
+
+ const CXXRecordDecl *BaseDecl =
+ cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+
+ // Compute the base subobject info for this base.
+ BaseSubobjectInfo *Info = ComputeBaseSubobjectInfo(BaseDecl, IsVirtual, 0);
+
+ if (IsVirtual) {
+ // ComputeBaseInfo has already added this base for us.
+ assert(VirtualBaseInfo.count(BaseDecl) &&
+ "Did not add virtual base!");
+ } else {
+ // Add the base info to the map of non-virtual bases.
+ assert(!NonVirtualBaseInfo.count(BaseDecl) &&
+ "Non-virtual base already exists!");
+ NonVirtualBaseInfo.insert(std::make_pair(BaseDecl, Info));
+ }
+ }
+}
+
+void
+RecordLayoutBuilder::EnsureVTablePointerAlignment(CharUnits UnpackedBaseAlign) {
+ CharUnits BaseAlign = (Packed) ? CharUnits::One() : UnpackedBaseAlign;
+
+ // The maximum field alignment overrides base align.
+ if (!MaxFieldAlignment.isZero()) {
+ BaseAlign = std::min(BaseAlign, MaxFieldAlignment);
+ UnpackedBaseAlign = std::min(UnpackedBaseAlign, MaxFieldAlignment);
+ }
+
+ // Round up the current record size to pointer alignment.
+ setSize(getSize().RoundUpToAlignment(BaseAlign));
+ setDataSize(getSize());
+
+ // Update the alignment.
+ UpdateAlignment(BaseAlign, UnpackedBaseAlign);
+}
+
+void
+RecordLayoutBuilder::LayoutNonVirtualBases(const CXXRecordDecl *RD) {
+ // Then, determine the primary base class.
+ DeterminePrimaryBase(RD);
+
+ // Compute base subobject info.
+ ComputeBaseSubobjectInfo(RD);
+
+ // If we have a primary base class, lay it out.
+ if (PrimaryBase) {
+ if (PrimaryBaseIsVirtual) {
+ // If the primary virtual base was a primary virtual base of some other
+ // base class we'll have to steal it.
+ BaseSubobjectInfo *PrimaryBaseInfo = VirtualBaseInfo.lookup(PrimaryBase);
+ PrimaryBaseInfo->Derived = 0;
+
+ // We have a virtual primary base, insert it as an indirect primary base.
+ IndirectPrimaryBases.insert(PrimaryBase);
+
+ assert(!VisitedVirtualBases.count(PrimaryBase) &&
+ "vbase already visited!");
+ VisitedVirtualBases.insert(PrimaryBase);
+
+ LayoutVirtualBase(PrimaryBaseInfo);
+ } else {
+ BaseSubobjectInfo *PrimaryBaseInfo =
+ NonVirtualBaseInfo.lookup(PrimaryBase);
+ assert(PrimaryBaseInfo &&
+ "Did not find base info for non-virtual primary base!");
+
+ LayoutNonVirtualBase(PrimaryBaseInfo);
+ }
+
+ // If this class needs a vtable/vf-table and didn't get one from a
+ // primary base, add it in now.
+ } else if (needsVFTable(RD)) {
+ assert(DataSize == 0 && "Vtable pointer must be at offset zero!");
+ CharUnits PtrWidth =
+ Context.toCharUnitsFromBits(Context.getTargetInfo().getPointerWidth(0));
+ CharUnits PtrAlign =
+ Context.toCharUnitsFromBits(Context.getTargetInfo().getPointerAlign(0));
+ EnsureVTablePointerAlignment(PtrAlign);
+ if (isMicrosoftCXXABI())
+ VFPtrOffset = getSize();
+ setSize(getSize() + PtrWidth);
+ setDataSize(getSize());
+ }
+
+ bool HasDirectVirtualBases = false;
+ bool HasNonVirtualBaseWithVBTable = false;
+
+ // Now lay out the non-virtual bases.
+ for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
+ E = RD->bases_end(); I != E; ++I) {
+
+ // Ignore virtual bases, but remember that we saw one.
+ if (I->isVirtual()) {
+ HasDirectVirtualBases = true;
+ continue;
+ }
+
+ const CXXRecordDecl *BaseDecl =
+ cast<CXXRecordDecl>(I->getType()->castAs<RecordType>()->getDecl());
+
+ // Remember if this base has virtual bases itself.
+ if (BaseDecl->getNumVBases())
+ HasNonVirtualBaseWithVBTable = true;
+
+ // Skip the primary base, because we've already laid it out. The
+ // !PrimaryBaseIsVirtual check is required because we might have a
+ // non-virtual base of the same type as a primary virtual base.
+ if (BaseDecl == PrimaryBase && !PrimaryBaseIsVirtual)
+ continue;
+
+ // Lay out the base.
+ BaseSubobjectInfo *BaseInfo = NonVirtualBaseInfo.lookup(BaseDecl);
+ assert(BaseInfo && "Did not find base info for non-virtual base!");
+
+ LayoutNonVirtualBase(BaseInfo);
+ }
+
+ // In the MS ABI, add the vb-table pointer if we need one, which is
+ // whenever we have a virtual base and we can't re-use a vb-table
+ // pointer from a non-virtual base.
+ if (isMicrosoftCXXABI() &&
+ HasDirectVirtualBases && !HasNonVirtualBaseWithVBTable) {
+ CharUnits PtrWidth =
+ Context.toCharUnitsFromBits(Context.getTargetInfo().getPointerWidth(0));
+ CharUnits PtrAlign =
+ Context.toCharUnitsFromBits(Context.getTargetInfo().getPointerAlign(0));
+
+ // MSVC potentially over-aligns the vb-table pointer by giving it
+ // the max alignment of all the non-virtual objects in the class.
+ // This is completely unnecessary, but we're not here to pass
+ // judgment.
+ //
+ // Note that we've only laid out the non-virtual bases, so on the
+ // first pass Alignment won't be set correctly here, but if the
+ // vb-table doesn't end up aligned correctly we'll come through
+ // and redo the layout from scratch with the right alignment.
+ //
+ // TODO: Instead of doing this, just lay out the fields as if the
+ // vb-table were at offset zero, then retroactively bump the field
+ // offsets up.
+ PtrAlign = std::max(PtrAlign, Alignment);
+
+ EnsureVTablePointerAlignment(PtrAlign);
+ VBPtrOffset = getSize();
+ setSize(getSize() + PtrWidth);
+ setDataSize(getSize());
+ }
+}
+
+void RecordLayoutBuilder::LayoutNonVirtualBase(const BaseSubobjectInfo *Base) {
+ // Layout the base.
+ CharUnits Offset = LayoutBase(Base);
+
+ // Add its base class offset.
+ assert(!Bases.count(Base->Class) && "base offset already exists!");
+ Bases.insert(std::make_pair(Base->Class, Offset));
+
+ AddPrimaryVirtualBaseOffsets(Base, Offset);
+}
+
+void
+RecordLayoutBuilder::AddPrimaryVirtualBaseOffsets(const BaseSubobjectInfo *Info,
+ CharUnits Offset) {
+ // This base isn't interesting, it has no virtual bases.
+ if (!Info->Class->getNumVBases())
+ return;
+
+ // First, check if we have a virtual primary base to add offsets for.
+ if (Info->PrimaryVirtualBaseInfo) {
+ assert(Info->PrimaryVirtualBaseInfo->IsVirtual &&
+ "Primary virtual base is not virtual!");
+ if (Info->PrimaryVirtualBaseInfo->Derived == Info) {
+ // Add the offset.
+ assert(!VBases.count(Info->PrimaryVirtualBaseInfo->Class) &&
+ "primary vbase offset already exists!");
+ VBases.insert(std::make_pair(Info->PrimaryVirtualBaseInfo->Class,
+ Offset));
+
+ // Traverse the primary virtual base.
+ AddPrimaryVirtualBaseOffsets(Info->PrimaryVirtualBaseInfo, Offset);
+ }
+ }
+
+ // Now go through all direct non-virtual bases.
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(Info->Class);
+ for (unsigned I = 0, E = Info->Bases.size(); I != E; ++I) {
+ const BaseSubobjectInfo *Base = Info->Bases[I];
+ if (Base->IsVirtual)
+ continue;
+
+ CharUnits BaseOffset = Offset + Layout.getBaseClassOffset(Base->Class);
+ AddPrimaryVirtualBaseOffsets(Base, BaseOffset);
+ }
+}
+
+/// needsVFTable - Return true if this class needs a vtable or vf-table
+/// when laid out as a base class. These are treated the same because
+/// they're both always laid out at offset zero.
+///
+/// This function assumes that the class has no primary base.
+bool RecordLayoutBuilder::needsVFTable(const CXXRecordDecl *RD) const {
+ assert(!PrimaryBase);
+
+ // In the Itanium ABI, every dynamic class needs a vtable: even if
+ // this class has no virtual functions as a base class (i.e. it's
+ // non-polymorphic or only has virtual functions from virtual
+ // bases),x it still needs a vtable to locate its virtual bases.
+ if (!isMicrosoftCXXABI())
+ return RD->isDynamicClass();
+
+ // In the MS ABI, we need a vfptr if the class has virtual functions
+ // other than those declared by its virtual bases. The AST doesn't
+ // tell us that directly, and checking manually for virtual
+ // functions that aren't overrides is expensive, but there are
+ // some important shortcuts:
+
+ // - Non-polymorphic classes have no virtual functions at all.
+ if (!RD->isPolymorphic()) return false;
+
+ // - Polymorphic classes with no virtual bases must either declare
+ // virtual functions directly or inherit them, but in the latter
+ // case we would have a primary base.
+ if (RD->getNumVBases() == 0) return true;
+
+ return hasNewVirtualFunction(RD);
+}
+
+/// hasNewVirtualFunction - Does the given polymorphic class declare a
+/// virtual function that does not override a method from any of its
+/// base classes?
+bool
+RecordLayoutBuilder::hasNewVirtualFunction(const CXXRecordDecl *RD) const {
+ assert(RD->isPolymorphic());
+ if (!RD->getNumBases())
+ return true;
+
+ for (CXXRecordDecl::method_iterator method = RD->method_begin();
+ method != RD->method_end();
+ ++method) {
+ if (method->isVirtual() && !method->size_overridden_methods()) {
+ return true;
+ }
+ }
+ return false;
+}
+
+/// isPossiblePrimaryBase - Is the given base class an acceptable
+/// primary base class?
+bool
+RecordLayoutBuilder::isPossiblePrimaryBase(const CXXRecordDecl *Base) const {
+ // In the Itanium ABI, a class can be a primary base class if it has
+ // a vtable for any reason.
+ if (!isMicrosoftCXXABI())
+ return Base->isDynamicClass();
+
+ // In the MS ABI, a class can only be a primary base class if it
+ // provides a vf-table at a static offset. That means it has to be
+ // non-virtual base. The existence of a separate vb-table means
+ // that it's possible to get virtual functions only from a virtual
+ // base, which we have to guard against.
+
+ // First off, it has to have virtual functions.
+ if (!Base->isPolymorphic()) return false;
+
+ // If it has no virtual bases, then everything is at a static offset.
+ if (!Base->getNumVBases()) return true;
+
+ // Okay, just ask the base class's layout.
+ return (Context.getASTRecordLayout(Base).getVFPtrOffset()
+ != CharUnits::fromQuantity(-1));
+}
+
+void
+RecordLayoutBuilder::LayoutVirtualBases(const CXXRecordDecl *RD,
+ const CXXRecordDecl *MostDerivedClass) {
+ const CXXRecordDecl *PrimaryBase;
+ bool PrimaryBaseIsVirtual;
+
+ if (MostDerivedClass == RD) {
+ PrimaryBase = this->PrimaryBase;
+ PrimaryBaseIsVirtual = this->PrimaryBaseIsVirtual;
+ } else {
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
+ PrimaryBase = Layout.getPrimaryBase();
+ PrimaryBaseIsVirtual = Layout.isPrimaryBaseVirtual();
+ }
+
+ for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
+ E = RD->bases_end(); I != E; ++I) {
+ assert(!I->getType()->isDependentType() &&
+ "Cannot layout class with dependent bases.");
+
+ const CXXRecordDecl *BaseDecl =
+ cast<CXXRecordDecl>(I->getType()->castAs<RecordType>()->getDecl());
+
+ if (I->isVirtual()) {
+ if (PrimaryBase != BaseDecl || !PrimaryBaseIsVirtual) {
+ bool IndirectPrimaryBase = IndirectPrimaryBases.count(BaseDecl);
+
+ // Only lay out the virtual base if it's not an indirect primary base.
+ if (!IndirectPrimaryBase) {
+ // Only visit virtual bases once.
+ if (!VisitedVirtualBases.insert(BaseDecl))
+ continue;
+
+ const BaseSubobjectInfo *BaseInfo = VirtualBaseInfo.lookup(BaseDecl);
+ assert(BaseInfo && "Did not find virtual base info!");
+ LayoutVirtualBase(BaseInfo);
+ }
+ }
+ }
+
+ if (!BaseDecl->getNumVBases()) {
+ // This base isn't interesting since it doesn't have any virtual bases.
+ continue;
+ }
+
+ LayoutVirtualBases(BaseDecl, MostDerivedClass);
+ }
+}
+
+void RecordLayoutBuilder::MSLayoutVirtualBases(const CXXRecordDecl *RD) {
+
+ if (!RD->getNumVBases())
+ return;
+
+ // This is substantially simplified because there are no virtual
+ // primary bases.
+ for (CXXRecordDecl::base_class_const_iterator I = RD->vbases_begin(),
+ E = RD->vbases_end(); I != E; ++I) {
+ const CXXRecordDecl *BaseDecl = I->getType()->getAsCXXRecordDecl();
+ const BaseSubobjectInfo *BaseInfo = VirtualBaseInfo.lookup(BaseDecl);
+ assert(BaseInfo && "Did not find virtual base info!");
+
+ LayoutVirtualBase(BaseInfo);
+ }
+}
+
+void RecordLayoutBuilder::LayoutVirtualBase(const BaseSubobjectInfo *Base) {
+ assert(!Base->Derived && "Trying to lay out a primary virtual base!");
+
+ // Layout the base.
+ CharUnits Offset = LayoutBase(Base);
+
+ // Add its base class offset.
+ assert(!VBases.count(Base->Class) && "vbase offset already exists!");
+ VBases.insert(std::make_pair(Base->Class, Offset));
+
+ AddPrimaryVirtualBaseOffsets(Base, Offset);
+}
+
+CharUnits RecordLayoutBuilder::LayoutBase(const BaseSubobjectInfo *Base) {
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(Base->Class);
+
+
+ CharUnits Offset;
+
+ // Query the external layout to see if it provides an offset.
+ bool HasExternalLayout = false;
+ if (ExternalLayout) {
+ llvm::DenseMap<const CXXRecordDecl *, CharUnits>::iterator Known;
+ if (Base->IsVirtual) {
+ Known = ExternalVirtualBaseOffsets.find(Base->Class);
+ if (Known != ExternalVirtualBaseOffsets.end()) {
+ Offset = Known->second;
+ HasExternalLayout = true;
+ }
+ } else {
+ Known = ExternalBaseOffsets.find(Base->Class);
+ if (Known != ExternalBaseOffsets.end()) {
+ Offset = Known->second;
+ HasExternalLayout = true;
+ }
+ }
+ }
+
+ // If we have an empty base class, try to place it at offset 0.
+ if (Base->Class->isEmpty() &&
+ (!HasExternalLayout || Offset == CharUnits::Zero()) &&
+ EmptySubobjects->CanPlaceBaseAtOffset(Base, CharUnits::Zero())) {
+ setSize(std::max(getSize(), Layout.getSize()));
+
+ return CharUnits::Zero();
+ }
+
+ CharUnits UnpackedBaseAlign = Layout.getNonVirtualAlign();
+ CharUnits BaseAlign = (Packed) ? CharUnits::One() : UnpackedBaseAlign;
+
+ // The maximum field alignment overrides base align.
+ if (!MaxFieldAlignment.isZero()) {
+ BaseAlign = std::min(BaseAlign, MaxFieldAlignment);
+ UnpackedBaseAlign = std::min(UnpackedBaseAlign, MaxFieldAlignment);
+ }
+
+ if (!HasExternalLayout) {
+ // Round up the current record size to the base's alignment boundary.
+ Offset = getDataSize().RoundUpToAlignment(BaseAlign);
+
+ // Try to place the base.
+ while (!EmptySubobjects->CanPlaceBaseAtOffset(Base, Offset))
+ Offset += BaseAlign;
+ } else {
+ bool Allowed = EmptySubobjects->CanPlaceBaseAtOffset(Base, Offset);
+ (void)Allowed;
+ assert(Allowed && "Base subobject externally placed at overlapping offset");
+ }
+
+ if (!Base->Class->isEmpty()) {
+ // Update the data size.
+ setDataSize(Offset + Layout.getNonVirtualSize());
+
+ setSize(std::max(getSize(), getDataSize()));
+ } else
+ setSize(std::max(getSize(), Offset + Layout.getSize()));
+
+ // Remember max struct/class alignment.
+ UpdateAlignment(BaseAlign, UnpackedBaseAlign);
+
+ return Offset;
+}
+
+void RecordLayoutBuilder::InitializeLayout(const Decl *D) {
+ if (const RecordDecl *RD = dyn_cast<RecordDecl>(D))
+ IsUnion = RD->isUnion();
+
+ Packed = D->hasAttr<PackedAttr>();
+
+ IsMsStruct = D->hasAttr<MsStructAttr>();
+
+ // Honor the default struct packing maximum alignment flag.
+ if (unsigned DefaultMaxFieldAlignment = Context.getLangOpts().PackStruct) {
+ MaxFieldAlignment = CharUnits::fromQuantity(DefaultMaxFieldAlignment);
+ }
+
+ // mac68k alignment supersedes maximum field alignment and attribute aligned,
+ // and forces all structures to have 2-byte alignment. The IBM docs on it
+ // allude to additional (more complicated) semantics, especially with regard
+ // to bit-fields, but gcc appears not to follow that.
+ if (D->hasAttr<AlignMac68kAttr>()) {
+ IsMac68kAlign = true;
+ MaxFieldAlignment = CharUnits::fromQuantity(2);
+ Alignment = CharUnits::fromQuantity(2);
+ } else {
+ if (const MaxFieldAlignmentAttr *MFAA = D->getAttr<MaxFieldAlignmentAttr>())
+ MaxFieldAlignment = Context.toCharUnitsFromBits(MFAA->getAlignment());
+
+ if (unsigned MaxAlign = D->getMaxAlignment())
+ UpdateAlignment(Context.toCharUnitsFromBits(MaxAlign));
+ }
+
+ // If there is an external AST source, ask it for the various offsets.
+ if (const RecordDecl *RD = dyn_cast<RecordDecl>(D))
+ if (ExternalASTSource *External = Context.getExternalSource()) {
+ ExternalLayout = External->layoutRecordType(RD,
+ ExternalSize,
+ ExternalAlign,
+ ExternalFieldOffsets,
+ ExternalBaseOffsets,
+ ExternalVirtualBaseOffsets);
+
+ // Update based on external alignment.
+ if (ExternalLayout) {
+ if (ExternalAlign > 0) {
+ Alignment = Context.toCharUnitsFromBits(ExternalAlign);
+ UnpackedAlignment = Alignment;
+ } else {
+ // The external source didn't have alignment information; infer it.
+ InferAlignment = true;
+ }
+ }
+ }
+}
+
+void RecordLayoutBuilder::Layout(const RecordDecl *D) {
+ InitializeLayout(D);
+ LayoutFields(D);
+
+ // Finally, round the size of the total struct up to the alignment of the
+ // struct itself.
+ FinishLayout(D);
+}
+
+void RecordLayoutBuilder::Layout(const CXXRecordDecl *RD) {
+ InitializeLayout(RD);
+
+ // Lay out the vtable and the non-virtual bases.
+ LayoutNonVirtualBases(RD);
+
+ LayoutFields(RD);
+
+ NonVirtualSize = Context.toCharUnitsFromBits(
+ llvm::RoundUpToAlignment(getSizeInBits(),
+ Context.getTargetInfo().getCharAlign()));
+ NonVirtualAlignment = Alignment;
+
+ if (isMicrosoftCXXABI() &&
+ NonVirtualSize != NonVirtualSize.RoundUpToAlignment(Alignment)) {
+ CharUnits AlignMember =
+ NonVirtualSize.RoundUpToAlignment(Alignment) - NonVirtualSize;
+
+ setSize(getSize() + AlignMember);
+ setDataSize(getSize());
+
+ NonVirtualSize = Context.toCharUnitsFromBits(
+ llvm::RoundUpToAlignment(getSizeInBits(),
+ Context.getTargetInfo().getCharAlign()));
+
+ MSLayoutVirtualBases(RD);
+
+ } else {
+ // Lay out the virtual bases and add the primary virtual base offsets.
+ LayoutVirtualBases(RD, RD);
+ }
+
+ // Finally, round the size of the total struct up to the alignment
+ // of the struct itself.
+ FinishLayout(RD);
+
+#ifndef NDEBUG
+ // Check that we have base offsets for all bases.
+ for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
+ E = RD->bases_end(); I != E; ++I) {
+ if (I->isVirtual())
+ continue;
+
+ const CXXRecordDecl *BaseDecl =
+ cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+
+ assert(Bases.count(BaseDecl) && "Did not find base offset!");
+ }
+
+ // And all virtual bases.
+ for (CXXRecordDecl::base_class_const_iterator I = RD->vbases_begin(),
+ E = RD->vbases_end(); I != E; ++I) {
+ const CXXRecordDecl *BaseDecl =
+ cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+
+ assert(VBases.count(BaseDecl) && "Did not find base offset!");
+ }
+#endif
+}
+
+void RecordLayoutBuilder::Layout(const ObjCInterfaceDecl *D) {
+ if (ObjCInterfaceDecl *SD = D->getSuperClass()) {
+ const ASTRecordLayout &SL = Context.getASTObjCInterfaceLayout(SD);
+
+ UpdateAlignment(SL.getAlignment());
+
+ // We start laying out ivars not at the end of the superclass
+ // structure, but at the next byte following the last field.
+ setSize(SL.getDataSize());
+ setDataSize(getSize());
+ }
+
+ InitializeLayout(D);
+ // Layout each ivar sequentially.
+ for (const ObjCIvarDecl *IVD = D->all_declared_ivar_begin(); IVD;
+ IVD = IVD->getNextIvar())
+ LayoutField(IVD);
+
+ // Finally, round the size of the total struct up to the alignment of the
+ // struct itself.
+ FinishLayout(D);
+}
+
+void RecordLayoutBuilder::LayoutFields(const RecordDecl *D) {
+ // Layout each field, for now, just sequentially, respecting alignment. In
+ // the future, this will need to be tweakable by targets.
+ const FieldDecl *LastFD = 0;
+ ZeroLengthBitfield = 0;
+ unsigned RemainingInAlignment = 0;
+ for (RecordDecl::field_iterator Field = D->field_begin(),
+ FieldEnd = D->field_end(); Field != FieldEnd; ++Field) {
+ if (IsMsStruct) {
+ FieldDecl *FD = (*Field);
+ if (Context.ZeroBitfieldFollowsBitfield(FD, LastFD))
+ ZeroLengthBitfield = FD;
+ // Zero-length bitfields following non-bitfield members are
+ // ignored:
+ else if (Context.ZeroBitfieldFollowsNonBitfield(FD, LastFD))
+ continue;
+ // FIXME. streamline these conditions into a simple one.
+ else if (Context.BitfieldFollowsBitfield(FD, LastFD) ||
+ Context.BitfieldFollowsNonBitfield(FD, LastFD) ||
+ Context.NonBitfieldFollowsBitfield(FD, LastFD)) {
+ // 1) Adjacent bit fields are packed into the same 1-, 2-, or
+ // 4-byte allocation unit if the integral types are the same
+ // size and if the next bit field fits into the current
+ // allocation unit without crossing the boundary imposed by the
+ // common alignment requirements of the bit fields.
+ // 2) Establish a new alignment for a bitfield following
+ // a non-bitfield if size of their types differ.
+ // 3) Establish a new alignment for a non-bitfield following
+ // a bitfield if size of their types differ.
+ std::pair<uint64_t, unsigned> FieldInfo =
+ Context.getTypeInfo(FD->getType());
+ uint64_t TypeSize = FieldInfo.first;
+ unsigned FieldAlign = FieldInfo.second;
+ // This check is needed for 'long long' in -m32 mode.
+ if (TypeSize > FieldAlign &&
+ (Context.hasSameType(FD->getType(),
+ Context.UnsignedLongLongTy)
+ ||Context.hasSameType(FD->getType(),
+ Context.LongLongTy)))
+ FieldAlign = TypeSize;
+ FieldInfo = Context.getTypeInfo(LastFD->getType());
+ uint64_t TypeSizeLastFD = FieldInfo.first;
+ unsigned FieldAlignLastFD = FieldInfo.second;
+ // This check is needed for 'long long' in -m32 mode.
+ if (TypeSizeLastFD > FieldAlignLastFD &&
+ (Context.hasSameType(LastFD->getType(),
+ Context.UnsignedLongLongTy)
+ || Context.hasSameType(LastFD->getType(),
+ Context.LongLongTy)))
+ FieldAlignLastFD = TypeSizeLastFD;
+
+ if (TypeSizeLastFD != TypeSize) {
+ if (RemainingInAlignment &&
+ LastFD && LastFD->isBitField() &&
+ LastFD->getBitWidthValue(Context)) {
+ // If previous field was a bitfield with some remaining unfilled
+ // bits, pad the field so current field starts on its type boundary.
+ uint64_t FieldOffset =
+ getDataSizeInBits() - UnfilledBitsInLastByte;
+ uint64_t NewSizeInBits = RemainingInAlignment + FieldOffset;
+ setDataSize(llvm::RoundUpToAlignment(NewSizeInBits,
+ Context.getTargetInfo().getCharAlign()));
+ setSize(std::max(getSizeInBits(), getDataSizeInBits()));
+ RemainingInAlignment = 0;
+ }
+
+ uint64_t UnpaddedFieldOffset =
+ getDataSizeInBits() - UnfilledBitsInLastByte;
+ FieldAlign = std::max(FieldAlign, FieldAlignLastFD);
+
+ // The maximum field alignment overrides the aligned attribute.
+ if (!MaxFieldAlignment.isZero()) {
+ unsigned MaxFieldAlignmentInBits =
+ Context.toBits(MaxFieldAlignment);
+ FieldAlign = std::min(FieldAlign, MaxFieldAlignmentInBits);
+ }
+
+ uint64_t NewSizeInBits =
+ llvm::RoundUpToAlignment(UnpaddedFieldOffset, FieldAlign);
+ setDataSize(llvm::RoundUpToAlignment(NewSizeInBits,
+ Context.getTargetInfo().getCharAlign()));
+ UnfilledBitsInLastByte = getDataSizeInBits() - NewSizeInBits;
+ setSize(std::max(getSizeInBits(), getDataSizeInBits()));
+ }
+ if (FD->isBitField()) {
+ uint64_t FieldSize = FD->getBitWidthValue(Context);
+ assert (FieldSize > 0 && "LayoutFields - ms_struct layout");
+ if (RemainingInAlignment < FieldSize)
+ RemainingInAlignment = TypeSize - FieldSize;
+ else
+ RemainingInAlignment -= FieldSize;
+ }
+ }
+ else if (FD->isBitField()) {
+ uint64_t FieldSize = FD->getBitWidthValue(Context);
+ std::pair<uint64_t, unsigned> FieldInfo =
+ Context.getTypeInfo(FD->getType());
+ uint64_t TypeSize = FieldInfo.first;
+ RemainingInAlignment = TypeSize - FieldSize;
+ }
+ LastFD = FD;
+ }
+ else if (!Context.getTargetInfo().useBitFieldTypeAlignment() &&
+ Context.getTargetInfo().useZeroLengthBitfieldAlignment()) {
+ FieldDecl *FD = (*Field);
+ if (FD->isBitField() && FD->getBitWidthValue(Context) == 0)
+ ZeroLengthBitfield = FD;
+ }
+ LayoutField(*Field);
+ }
+ if (IsMsStruct && RemainingInAlignment &&
+ LastFD && LastFD->isBitField() && LastFD->getBitWidthValue(Context)) {
+ // If we ended a bitfield before the full length of the type then
+ // pad the struct out to the full length of the last type.
+ uint64_t FieldOffset =
+ getDataSizeInBits() - UnfilledBitsInLastByte;
+ uint64_t NewSizeInBits = RemainingInAlignment + FieldOffset;
+ setDataSize(llvm::RoundUpToAlignment(NewSizeInBits,
+ Context.getTargetInfo().getCharAlign()));
+ setSize(std::max(getSizeInBits(), getDataSizeInBits()));
+ }
+}
+
+void RecordLayoutBuilder::LayoutWideBitField(uint64_t FieldSize,
+ uint64_t TypeSize,
+ bool FieldPacked,
+ const FieldDecl *D) {
+ assert(Context.getLangOpts().CPlusPlus &&
+ "Can only have wide bit-fields in C++!");
+
+ // Itanium C++ ABI 2.4:
+ // If sizeof(T)*8 < n, let T' be the largest integral POD type with
+ // sizeof(T')*8 <= n.
+
+ QualType IntegralPODTypes[] = {
+ Context.UnsignedCharTy, Context.UnsignedShortTy, Context.UnsignedIntTy,
+ Context.UnsignedLongTy, Context.UnsignedLongLongTy
+ };
+
+ QualType Type;
+ for (unsigned I = 0, E = llvm::array_lengthof(IntegralPODTypes);
+ I != E; ++I) {
+ uint64_t Size = Context.getTypeSize(IntegralPODTypes[I]);
+
+ if (Size > FieldSize)
+ break;
+
+ Type = IntegralPODTypes[I];
+ }
+ assert(!Type.isNull() && "Did not find a type!");
+
+ CharUnits TypeAlign = Context.getTypeAlignInChars(Type);
+
+ // We're not going to use any of the unfilled bits in the last byte.
+ UnfilledBitsInLastByte = 0;
+
+ uint64_t FieldOffset;
+ uint64_t UnpaddedFieldOffset = getDataSizeInBits() - UnfilledBitsInLastByte;
+
+ if (IsUnion) {
+ setDataSize(std::max(getDataSizeInBits(), FieldSize));
+ FieldOffset = 0;
+ } else {
+ // The bitfield is allocated starting at the next offset aligned
+ // appropriately for T', with length n bits.
+ FieldOffset = llvm::RoundUpToAlignment(getDataSizeInBits(),
+ Context.toBits(TypeAlign));
+
+ uint64_t NewSizeInBits = FieldOffset + FieldSize;
+
+ setDataSize(llvm::RoundUpToAlignment(NewSizeInBits,
+ Context.getTargetInfo().getCharAlign()));
+ UnfilledBitsInLastByte = getDataSizeInBits() - NewSizeInBits;
+ }
+
+ // Place this field at the current location.
+ FieldOffsets.push_back(FieldOffset);
+
+ CheckFieldPadding(FieldOffset, UnpaddedFieldOffset, FieldOffset,
+ Context.toBits(TypeAlign), FieldPacked, D);
+
+ // Update the size.
+ setSize(std::max(getSizeInBits(), getDataSizeInBits()));
+
+ // Remember max struct/class alignment.
+ UpdateAlignment(TypeAlign);
+}
+
+void RecordLayoutBuilder::LayoutBitField(const FieldDecl *D) {
+ bool FieldPacked = Packed || D->hasAttr<PackedAttr>();
+ uint64_t UnpaddedFieldOffset = getDataSizeInBits() - UnfilledBitsInLastByte;
+ uint64_t FieldOffset = IsUnion ? 0 : UnpaddedFieldOffset;
+ uint64_t FieldSize = D->getBitWidthValue(Context);
+
+ std::pair<uint64_t, unsigned> FieldInfo = Context.getTypeInfo(D->getType());
+ uint64_t TypeSize = FieldInfo.first;
+ unsigned FieldAlign = FieldInfo.second;
+
+ // This check is needed for 'long long' in -m32 mode.
+ if (IsMsStruct && (TypeSize > FieldAlign) &&
+ (Context.hasSameType(D->getType(),
+ Context.UnsignedLongLongTy)
+ || Context.hasSameType(D->getType(), Context.LongLongTy)))
+ FieldAlign = TypeSize;
+
+ if (ZeroLengthBitfield) {
+ std::pair<uint64_t, unsigned> FieldInfo;
+ unsigned ZeroLengthBitfieldAlignment;
+ if (IsMsStruct) {
+ // If a zero-length bitfield is inserted after a bitfield,
+ // and the alignment of the zero-length bitfield is
+ // greater than the member that follows it, `bar', `bar'
+ // will be aligned as the type of the zero-length bitfield.
+ if (ZeroLengthBitfield != D) {
+ FieldInfo = Context.getTypeInfo(ZeroLengthBitfield->getType());
+ ZeroLengthBitfieldAlignment = FieldInfo.second;
+ // Ignore alignment of subsequent zero-length bitfields.
+ if ((ZeroLengthBitfieldAlignment > FieldAlign) || (FieldSize == 0))
+ FieldAlign = ZeroLengthBitfieldAlignment;
+ if (FieldSize)
+ ZeroLengthBitfield = 0;
+ }
+ } else {
+ // The alignment of a zero-length bitfield affects the alignment
+ // of the next member. The alignment is the max of the zero
+ // length bitfield's alignment and a target specific fixed value.
+ unsigned ZeroLengthBitfieldBoundary =
+ Context.getTargetInfo().getZeroLengthBitfieldBoundary();
+ if (ZeroLengthBitfieldBoundary > FieldAlign)
+ FieldAlign = ZeroLengthBitfieldBoundary;
+ }
+ }
+
+ if (FieldSize > TypeSize) {
+ LayoutWideBitField(FieldSize, TypeSize, FieldPacked, D);
+ return;
+ }
+
+ // The align if the field is not packed. This is to check if the attribute
+ // was unnecessary (-Wpacked).
+ unsigned UnpackedFieldAlign = FieldAlign;
+ uint64_t UnpackedFieldOffset = FieldOffset;
+ if (!Context.getTargetInfo().useBitFieldTypeAlignment() && !ZeroLengthBitfield)
+ UnpackedFieldAlign = 1;
+
+ if (FieldPacked ||
+ (!Context.getTargetInfo().useBitFieldTypeAlignment() && !ZeroLengthBitfield))
+ FieldAlign = 1;
+ FieldAlign = std::max(FieldAlign, D->getMaxAlignment());
+ UnpackedFieldAlign = std::max(UnpackedFieldAlign, D->getMaxAlignment());
+
+ // The maximum field alignment overrides the aligned attribute.
+ if (!MaxFieldAlignment.isZero() && FieldSize != 0) {
+ unsigned MaxFieldAlignmentInBits = Context.toBits(MaxFieldAlignment);
+ FieldAlign = std::min(FieldAlign, MaxFieldAlignmentInBits);
+ UnpackedFieldAlign = std::min(UnpackedFieldAlign, MaxFieldAlignmentInBits);
+ }
+
+ // Check if we need to add padding to give the field the correct alignment.
+ if (FieldSize == 0 ||
+ (MaxFieldAlignment.isZero() &&
+ (FieldOffset & (FieldAlign-1)) + FieldSize > TypeSize))
+ FieldOffset = llvm::RoundUpToAlignment(FieldOffset, FieldAlign);
+
+ if (FieldSize == 0 ||
+ (MaxFieldAlignment.isZero() &&
+ (UnpackedFieldOffset & (UnpackedFieldAlign-1)) + FieldSize > TypeSize))
+ UnpackedFieldOffset = llvm::RoundUpToAlignment(UnpackedFieldOffset,
+ UnpackedFieldAlign);
+
+ // Padding members don't affect overall alignment, unless zero length bitfield
+ // alignment is enabled.
+ if (!D->getIdentifier() && !Context.getTargetInfo().useZeroLengthBitfieldAlignment())
+ FieldAlign = UnpackedFieldAlign = 1;
+
+ if (!IsMsStruct)
+ ZeroLengthBitfield = 0;
+
+ if (ExternalLayout)
+ FieldOffset = updateExternalFieldOffset(D, FieldOffset);
+
+ // Place this field at the current location.
+ FieldOffsets.push_back(FieldOffset);
+
+ if (!ExternalLayout)
+ CheckFieldPadding(FieldOffset, UnpaddedFieldOffset, UnpackedFieldOffset,
+ UnpackedFieldAlign, FieldPacked, D);
+
+ // Update DataSize to include the last byte containing (part of) the bitfield.
+ if (IsUnion) {
+ // FIXME: I think FieldSize should be TypeSize here.
+ setDataSize(std::max(getDataSizeInBits(), FieldSize));
+ } else {
+ uint64_t NewSizeInBits = FieldOffset + FieldSize;
+
+ setDataSize(llvm::RoundUpToAlignment(NewSizeInBits,
+ Context.getTargetInfo().getCharAlign()));
+ UnfilledBitsInLastByte = getDataSizeInBits() - NewSizeInBits;
+ }
+
+ // Update the size.
+ setSize(std::max(getSizeInBits(), getDataSizeInBits()));
+
+ // Remember max struct/class alignment.
+ UpdateAlignment(Context.toCharUnitsFromBits(FieldAlign),
+ Context.toCharUnitsFromBits(UnpackedFieldAlign));
+}
+
+void RecordLayoutBuilder::LayoutField(const FieldDecl *D) {
+ if (D->isBitField()) {
+ LayoutBitField(D);
+ return;
+ }
+
+ uint64_t UnpaddedFieldOffset = getDataSizeInBits() - UnfilledBitsInLastByte;
+
+ // Reset the unfilled bits.
+ UnfilledBitsInLastByte = 0;
+
+ bool FieldPacked = Packed || D->hasAttr<PackedAttr>();
+ CharUnits FieldOffset =
+ IsUnion ? CharUnits::Zero() : getDataSize();
+ CharUnits FieldSize;
+ CharUnits FieldAlign;
+
+ if (D->getType()->isIncompleteArrayType()) {
+ // This is a flexible array member; we can't directly
+ // query getTypeInfo about these, so we figure it out here.
+ // Flexible array members don't have any size, but they
+ // have to be aligned appropriately for their element type.
+ FieldSize = CharUnits::Zero();
+ const ArrayType* ATy = Context.getAsArrayType(D->getType());
+ FieldAlign = Context.getTypeAlignInChars(ATy->getElementType());
+ } else if (const ReferenceType *RT = D->getType()->getAs<ReferenceType>()) {
+ unsigned AS = RT->getPointeeType().getAddressSpace();
+ FieldSize =
+ Context.toCharUnitsFromBits(Context.getTargetInfo().getPointerWidth(AS));
+ FieldAlign =
+ Context.toCharUnitsFromBits(Context.getTargetInfo().getPointerAlign(AS));
+ } else {
+ std::pair<CharUnits, CharUnits> FieldInfo =
+ Context.getTypeInfoInChars(D->getType());
+ FieldSize = FieldInfo.first;
+ FieldAlign = FieldInfo.second;
+
+ if (ZeroLengthBitfield) {
+ CharUnits ZeroLengthBitfieldBoundary =
+ Context.toCharUnitsFromBits(
+ Context.getTargetInfo().getZeroLengthBitfieldBoundary());
+ if (ZeroLengthBitfieldBoundary == CharUnits::Zero()) {
+ // If a zero-length bitfield is inserted after a bitfield,
+ // and the alignment of the zero-length bitfield is
+ // greater than the member that follows it, `bar', `bar'
+ // will be aligned as the type of the zero-length bitfield.
+ std::pair<CharUnits, CharUnits> FieldInfo =
+ Context.getTypeInfoInChars(ZeroLengthBitfield->getType());
+ CharUnits ZeroLengthBitfieldAlignment = FieldInfo.second;
+ if (ZeroLengthBitfieldAlignment > FieldAlign)
+ FieldAlign = ZeroLengthBitfieldAlignment;
+ } else if (ZeroLengthBitfieldBoundary > FieldAlign) {
+ // Align 'bar' based on a fixed alignment specified by the target.
+ assert(Context.getTargetInfo().useZeroLengthBitfieldAlignment() &&
+ "ZeroLengthBitfieldBoundary should only be used in conjunction"
+ " with useZeroLengthBitfieldAlignment.");
+ FieldAlign = ZeroLengthBitfieldBoundary;
+ }
+ ZeroLengthBitfield = 0;
+ }
+
+ if (Context.getLangOpts().MSBitfields || IsMsStruct) {
+ // If MS bitfield layout is required, figure out what type is being
+ // laid out and align the field to the width of that type.
+
+ // Resolve all typedefs down to their base type and round up the field
+ // alignment if necessary.
+ QualType T = Context.getBaseElementType(D->getType());
+ if (const BuiltinType *BTy = T->getAs<BuiltinType>()) {
+ CharUnits TypeSize = Context.getTypeSizeInChars(BTy);
+ if (TypeSize > FieldAlign)
+ FieldAlign = TypeSize;
+ }
+ }
+ }
+
+ // The align if the field is not packed. This is to check if the attribute
+ // was unnecessary (-Wpacked).
+ CharUnits UnpackedFieldAlign = FieldAlign;
+ CharUnits UnpackedFieldOffset = FieldOffset;
+
+ if (FieldPacked)
+ FieldAlign = CharUnits::One();
+ CharUnits MaxAlignmentInChars =
+ Context.toCharUnitsFromBits(D->getMaxAlignment());
+ FieldAlign = std::max(FieldAlign, MaxAlignmentInChars);
+ UnpackedFieldAlign = std::max(UnpackedFieldAlign, MaxAlignmentInChars);
+
+ // The maximum field alignment overrides the aligned attribute.
+ if (!MaxFieldAlignment.isZero()) {
+ FieldAlign = std::min(FieldAlign, MaxFieldAlignment);
+ UnpackedFieldAlign = std::min(UnpackedFieldAlign, MaxFieldAlignment);
+ }
+
+ // Round up the current record size to the field's alignment boundary.
+ FieldOffset = FieldOffset.RoundUpToAlignment(FieldAlign);
+ UnpackedFieldOffset =
+ UnpackedFieldOffset.RoundUpToAlignment(UnpackedFieldAlign);
+
+ if (ExternalLayout) {
+ FieldOffset = Context.toCharUnitsFromBits(
+ updateExternalFieldOffset(D, Context.toBits(FieldOffset)));
+
+ if (!IsUnion && EmptySubobjects) {
+ // Record the fact that we're placing a field at this offset.
+ bool Allowed = EmptySubobjects->CanPlaceFieldAtOffset(D, FieldOffset);
+ (void)Allowed;
+ assert(Allowed && "Externally-placed field cannot be placed here");
+ }
+ } else {
+ if (!IsUnion && EmptySubobjects) {
+ // Check if we can place the field at this offset.
+ while (!EmptySubobjects->CanPlaceFieldAtOffset(D, FieldOffset)) {
+ // We couldn't place the field at the offset. Try again at a new offset.
+ FieldOffset += FieldAlign;
+ }
+ }
+ }
+
+ // Place this field at the current location.
+ FieldOffsets.push_back(Context.toBits(FieldOffset));
+
+ if (!ExternalLayout)
+ CheckFieldPadding(Context.toBits(FieldOffset), UnpaddedFieldOffset,
+ Context.toBits(UnpackedFieldOffset),
+ Context.toBits(UnpackedFieldAlign), FieldPacked, D);
+
+ // Reserve space for this field.
+ uint64_t FieldSizeInBits = Context.toBits(FieldSize);
+ if (IsUnion)
+ setDataSize(std::max(getDataSizeInBits(), FieldSizeInBits));
+ else
+ setDataSize(FieldOffset + FieldSize);
+
+ // Update the size.
+ setSize(std::max(getSizeInBits(), getDataSizeInBits()));
+
+ // Remember max struct/class alignment.
+ UpdateAlignment(FieldAlign, UnpackedFieldAlign);
+}
+
+void RecordLayoutBuilder::FinishLayout(const NamedDecl *D) {
+ if (ExternalLayout) {
+ setSize(ExternalSize);
+ return;
+ }
+
+ // In C++, records cannot be of size 0.
+ if (Context.getLangOpts().CPlusPlus && getSizeInBits() == 0) {
+ if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D)) {
+ // Compatibility with gcc requires a class (pod or non-pod)
+ // which is not empty but of size 0; such as having fields of
+ // array of zero-length, remains of Size 0
+ if (RD->isEmpty())
+ setSize(CharUnits::One());
+ }
+ else
+ setSize(CharUnits::One());
+ }
+
+ // MSVC doesn't round up to the alignment of the record with virtual bases.
+ if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D)) {
+ if (isMicrosoftCXXABI() && RD->getNumVBases())
+ return;
+ }
+
+ // Finally, round the size of the record up to the alignment of the
+ // record itself.
+ uint64_t UnpaddedSize = getSizeInBits() - UnfilledBitsInLastByte;
+ uint64_t UnpackedSizeInBits =
+ llvm::RoundUpToAlignment(getSizeInBits(),
+ Context.toBits(UnpackedAlignment));
+ CharUnits UnpackedSize = Context.toCharUnitsFromBits(UnpackedSizeInBits);
+ setSize(llvm::RoundUpToAlignment(getSizeInBits(), Context.toBits(Alignment)));
+
+ unsigned CharBitNum = Context.getTargetInfo().getCharWidth();
+ if (const RecordDecl *RD = dyn_cast<RecordDecl>(D)) {
+ // Warn if padding was introduced to the struct/class/union.
+ if (getSizeInBits() > UnpaddedSize) {
+ unsigned PadSize = getSizeInBits() - UnpaddedSize;
+ bool InBits = true;
+ if (PadSize % CharBitNum == 0) {
+ PadSize = PadSize / CharBitNum;
+ InBits = false;
+ }
+ Diag(RD->getLocation(), diag::warn_padded_struct_size)
+ << Context.getTypeDeclType(RD)
+ << PadSize
+ << (InBits ? 1 : 0) /*(byte|bit)*/ << (PadSize > 1); // plural or not
+ }
+
+ // Warn if we packed it unnecessarily. If the alignment is 1 byte don't
+ // bother since there won't be alignment issues.
+ if (Packed && UnpackedAlignment > CharUnits::One() &&
+ getSize() == UnpackedSize)
+ Diag(D->getLocation(), diag::warn_unnecessary_packed)
+ << Context.getTypeDeclType(RD);
+ }
+}
+
+void RecordLayoutBuilder::UpdateAlignment(CharUnits NewAlignment,
+ CharUnits UnpackedNewAlignment) {
+ // The alignment is not modified when using 'mac68k' alignment or when
+ // we have an externally-supplied layout that also provides overall alignment.
+ if (IsMac68kAlign || (ExternalLayout && !InferAlignment))
+ return;
+
+ if (NewAlignment > Alignment) {
+ assert(llvm::isPowerOf2_32(NewAlignment.getQuantity() &&
+ "Alignment not a power of 2"));
+ Alignment = NewAlignment;
+ }
+
+ if (UnpackedNewAlignment > UnpackedAlignment) {
+ assert(llvm::isPowerOf2_32(UnpackedNewAlignment.getQuantity() &&
+ "Alignment not a power of 2"));
+ UnpackedAlignment = UnpackedNewAlignment;
+ }
+}
+
+uint64_t
+RecordLayoutBuilder::updateExternalFieldOffset(const FieldDecl *Field,
+ uint64_t ComputedOffset) {
+ assert(ExternalFieldOffsets.find(Field) != ExternalFieldOffsets.end() &&
+ "Field does not have an external offset");
+
+ uint64_t ExternalFieldOffset = ExternalFieldOffsets[Field];
+
+ if (InferAlignment && ExternalFieldOffset < ComputedOffset) {
+ // The externally-supplied field offset is before the field offset we
+ // computed. Assume that the structure is packed.
+ Alignment = CharUnits::fromQuantity(1);
+ InferAlignment = false;
+ }
+
+ // Use the externally-supplied field offset.
+ return ExternalFieldOffset;
+}
+
+void RecordLayoutBuilder::CheckFieldPadding(uint64_t Offset,
+ uint64_t UnpaddedOffset,
+ uint64_t UnpackedOffset,
+ unsigned UnpackedAlign,
+ bool isPacked,
+ const FieldDecl *D) {
+ // We let objc ivars without warning, objc interfaces generally are not used
+ // for padding tricks.
+ if (isa<ObjCIvarDecl>(D))
+ return;
+
+ // Don't warn about structs created without a SourceLocation. This can
+ // be done by clients of the AST, such as codegen.
+ if (D->getLocation().isInvalid())
+ return;
+
+ unsigned CharBitNum = Context.getTargetInfo().getCharWidth();
+
+ // Warn if padding was introduced to the struct/class.
+ if (!IsUnion && Offset > UnpaddedOffset) {
+ unsigned PadSize = Offset - UnpaddedOffset;
+ bool InBits = true;
+ if (PadSize % CharBitNum == 0) {
+ PadSize = PadSize / CharBitNum;
+ InBits = false;
+ }
+ if (D->getIdentifier())
+ Diag(D->getLocation(), diag::warn_padded_struct_field)
+ << (D->getParent()->isStruct() ? 0 : 1) // struct|class
+ << Context.getTypeDeclType(D->getParent())
+ << PadSize
+ << (InBits ? 1 : 0) /*(byte|bit)*/ << (PadSize > 1) // plural or not
+ << D->getIdentifier();
+ else
+ Diag(D->getLocation(), diag::warn_padded_struct_anon_field)
+ << (D->getParent()->isStruct() ? 0 : 1) // struct|class
+ << Context.getTypeDeclType(D->getParent())
+ << PadSize
+ << (InBits ? 1 : 0) /*(byte|bit)*/ << (PadSize > 1); // plural or not
+ }
+
+ // Warn if we packed it unnecessarily. If the alignment is 1 byte don't
+ // bother since there won't be alignment issues.
+ if (isPacked && UnpackedAlign > CharBitNum && Offset == UnpackedOffset)
+ Diag(D->getLocation(), diag::warn_unnecessary_packed)
+ << D->getIdentifier();
+}
+
+const CXXMethodDecl *
+RecordLayoutBuilder::ComputeKeyFunction(const CXXRecordDecl *RD) {
+ // If a class isn't polymorphic it doesn't have a key function.
+ if (!RD->isPolymorphic())
+ return 0;
+
+ // A class that is not externally visible doesn't have a key function. (Or
+ // at least, there's no point to assigning a key function to such a class;
+ // this doesn't affect the ABI.)
+ if (RD->getLinkage() != ExternalLinkage)
+ return 0;
+
+ // Template instantiations don't have key functions,see Itanium C++ ABI 5.2.6.
+ // Same behavior as GCC.
+ TemplateSpecializationKind TSK = RD->getTemplateSpecializationKind();
+ if (TSK == TSK_ImplicitInstantiation ||
+ TSK == TSK_ExplicitInstantiationDefinition)
+ return 0;
+
+ for (CXXRecordDecl::method_iterator I = RD->method_begin(),
+ E = RD->method_end(); I != E; ++I) {
+ const CXXMethodDecl *MD = *I;
+
+ if (!MD->isVirtual())
+ continue;
+
+ if (MD->isPure())
+ continue;
+
+ // Ignore implicit member functions, they are always marked as inline, but
+ // they don't have a body until they're defined.
+ if (MD->isImplicit())
+ continue;
+
+ if (MD->isInlineSpecified())
+ continue;
+
+ if (MD->hasInlineBody())
+ continue;
+
+ // We found it.
+ return MD;
+ }
+
+ return 0;
+}
+
+DiagnosticBuilder
+RecordLayoutBuilder::Diag(SourceLocation Loc, unsigned DiagID) {
+ return Context.getDiagnostics().Report(Loc, DiagID);
+}
+
+/// getASTRecordLayout - Get or compute information about the layout of the
+/// specified record (struct/union/class), which indicates its size and field
+/// position information.
+const ASTRecordLayout &
+ASTContext::getASTRecordLayout(const RecordDecl *D) const {
+ // These asserts test different things. A record has a definition
+ // as soon as we begin to parse the definition. That definition is
+ // not a complete definition (which is what isDefinition() tests)
+ // until we *finish* parsing the definition.
+
+ if (D->hasExternalLexicalStorage() && !D->getDefinition())
+ getExternalSource()->CompleteType(const_cast<RecordDecl*>(D));
+
+ D = D->getDefinition();
+ assert(D && "Cannot get layout of forward declarations!");
+ assert(D->isCompleteDefinition() && "Cannot layout type before complete!");
+
+ // Look up this layout, if already laid out, return what we have.
+ // Note that we can't save a reference to the entry because this function
+ // is recursive.
+ const ASTRecordLayout *Entry = ASTRecordLayouts[D];
+ if (Entry) return *Entry;
+
+ const ASTRecordLayout *NewEntry;
+
+ if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D)) {
+ EmptySubobjectMap EmptySubobjects(*this, RD);
+ RecordLayoutBuilder Builder(*this, &EmptySubobjects);
+ Builder.Layout(RD);
+
+ // MSVC gives the vb-table pointer an alignment equal to that of
+ // the non-virtual part of the structure. That's an inherently
+ // multi-pass operation. If our first pass doesn't give us
+ // adequate alignment, try again with the specified minimum
+ // alignment. This is *much* more maintainable than computing the
+ // alignment in advance in a separately-coded pass; it's also
+ // significantly more efficient in the common case where the
+ // vb-table doesn't need extra padding.
+ if (Builder.VBPtrOffset != CharUnits::fromQuantity(-1) &&
+ (Builder.VBPtrOffset % Builder.NonVirtualAlignment) != 0) {
+ Builder.resetWithTargetAlignment(Builder.NonVirtualAlignment);
+ Builder.Layout(RD);
+ }
+
+ // FIXME: This is not always correct. See the part about bitfields at
+ // http://www.codesourcery.com/public/cxx-abi/abi.html#POD for more info.
+ // FIXME: IsPODForThePurposeOfLayout should be stored in the record layout.
+ // This does not affect the calculations of MSVC layouts
+ bool IsPODForThePurposeOfLayout =
+ (!Builder.isMicrosoftCXXABI() && cast<CXXRecordDecl>(D)->isPOD());
+
+ // FIXME: This should be done in FinalizeLayout.
+ CharUnits DataSize =
+ IsPODForThePurposeOfLayout ? Builder.getSize() : Builder.getDataSize();
+ CharUnits NonVirtualSize =
+ IsPODForThePurposeOfLayout ? DataSize : Builder.NonVirtualSize;
+
+ NewEntry =
+ new (*this) ASTRecordLayout(*this, Builder.getSize(),
+ Builder.Alignment,
+ Builder.VFPtrOffset,
+ Builder.VBPtrOffset,
+ DataSize,
+ Builder.FieldOffsets.data(),
+ Builder.FieldOffsets.size(),
+ NonVirtualSize,
+ Builder.NonVirtualAlignment,
+ EmptySubobjects.SizeOfLargestEmptySubobject,
+ Builder.PrimaryBase,
+ Builder.PrimaryBaseIsVirtual,
+ Builder.Bases, Builder.VBases);
+ } else {
+ RecordLayoutBuilder Builder(*this, /*EmptySubobjects=*/0);
+ Builder.Layout(D);
+
+ NewEntry =
+ new (*this) ASTRecordLayout(*this, Builder.getSize(),
+ Builder.Alignment,
+ Builder.getSize(),
+ Builder.FieldOffsets.data(),
+ Builder.FieldOffsets.size());
+ }
+
+ ASTRecordLayouts[D] = NewEntry;
+
+ if (getLangOpts().DumpRecordLayouts) {
+ llvm::errs() << "\n*** Dumping AST Record Layout\n";
+ DumpRecordLayout(D, llvm::errs(), getLangOpts().DumpRecordLayoutsSimple);
+ }
+
+ return *NewEntry;
+}
+
+const CXXMethodDecl *ASTContext::getKeyFunction(const CXXRecordDecl *RD) {
+ RD = cast<CXXRecordDecl>(RD->getDefinition());
+ assert(RD && "Cannot get key function for forward declarations!");
+
+ const CXXMethodDecl *&Entry = KeyFunctions[RD];
+ if (!Entry)
+ Entry = RecordLayoutBuilder::ComputeKeyFunction(RD);
+
+ return Entry;
+}
+
+static uint64_t getFieldOffset(const ASTContext &C, const FieldDecl *FD) {
+ const ASTRecordLayout &Layout = C.getASTRecordLayout(FD->getParent());
+ return Layout.getFieldOffset(FD->getFieldIndex());
+}
+
+uint64_t ASTContext::getFieldOffset(const ValueDecl *VD) const {
+ uint64_t OffsetInBits;
+ if (const FieldDecl *FD = dyn_cast<FieldDecl>(VD)) {
+ OffsetInBits = ::getFieldOffset(*this, FD);
+ } else {
+ const IndirectFieldDecl *IFD = cast<IndirectFieldDecl>(VD);
+
+ OffsetInBits = 0;
+ for (IndirectFieldDecl::chain_iterator CI = IFD->chain_begin(),
+ CE = IFD->chain_end();
+ CI != CE; ++CI)
+ OffsetInBits += ::getFieldOffset(*this, cast<FieldDecl>(*CI));
+ }
+
+ return OffsetInBits;
+}
+
+/// getObjCLayout - Get or compute information about the layout of the
+/// given interface.
+///
+/// \param Impl - If given, also include the layout of the interface's
+/// implementation. This may differ by including synthesized ivars.
+const ASTRecordLayout &
+ASTContext::getObjCLayout(const ObjCInterfaceDecl *D,
+ const ObjCImplementationDecl *Impl) const {
+ // Retrieve the definition
+ if (D->hasExternalLexicalStorage() && !D->getDefinition())
+ getExternalSource()->CompleteType(const_cast<ObjCInterfaceDecl*>(D));
+ D = D->getDefinition();
+ assert(D && D->isThisDeclarationADefinition() && "Invalid interface decl!");
+
+ // Look up this layout, if already laid out, return what we have.
+ ObjCContainerDecl *Key =
+ Impl ? (ObjCContainerDecl*) Impl : (ObjCContainerDecl*) D;
+ if (const ASTRecordLayout *Entry = ObjCLayouts[Key])
+ return *Entry;
+
+ // Add in synthesized ivar count if laying out an implementation.
+ if (Impl) {
+ unsigned SynthCount = CountNonClassIvars(D);
+ // If there aren't any sythesized ivars then reuse the interface
+ // entry. Note we can't cache this because we simply free all
+ // entries later; however we shouldn't look up implementations
+ // frequently.
+ if (SynthCount == 0)
+ return getObjCLayout(D, 0);
+ }
+
+ RecordLayoutBuilder Builder(*this, /*EmptySubobjects=*/0);
+ Builder.Layout(D);
+
+ const ASTRecordLayout *NewEntry =
+ new (*this) ASTRecordLayout(*this, Builder.getSize(),
+ Builder.Alignment,
+ Builder.getDataSize(),
+ Builder.FieldOffsets.data(),
+ Builder.FieldOffsets.size());
+
+ ObjCLayouts[Key] = NewEntry;
+
+ return *NewEntry;
+}
+
+static void PrintOffset(raw_ostream &OS,
+ CharUnits Offset, unsigned IndentLevel) {
+ OS << llvm::format("%4" PRId64 " | ", (int64_t)Offset.getQuantity());
+ OS.indent(IndentLevel * 2);
+}
+
+static void DumpCXXRecordLayout(raw_ostream &OS,
+ const CXXRecordDecl *RD, const ASTContext &C,
+ CharUnits Offset,
+ unsigned IndentLevel,
+ const char* Description,
+ bool IncludeVirtualBases) {
+ const ASTRecordLayout &Layout = C.getASTRecordLayout(RD);
+
+ PrintOffset(OS, Offset, IndentLevel);
+ OS << C.getTypeDeclType(const_cast<CXXRecordDecl *>(RD)).getAsString();
+ if (Description)
+ OS << ' ' << Description;
+ if (RD->isEmpty())
+ OS << " (empty)";
+ OS << '\n';
+
+ IndentLevel++;
+
+ const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase();
+ bool HasVfptr = Layout.getVFPtrOffset() != CharUnits::fromQuantity(-1);
+ bool HasVbptr = Layout.getVBPtrOffset() != CharUnits::fromQuantity(-1);
+
+ // Vtable pointer.
+ if (RD->isDynamicClass() && !PrimaryBase &&
+ C.getTargetInfo().getCXXABI() != CXXABI_Microsoft) {
+ PrintOffset(OS, Offset, IndentLevel);
+ OS << '(' << *RD << " vtable pointer)\n";
+ }
+
+ // Dump (non-virtual) bases
+ for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
+ E = RD->bases_end(); I != E; ++I) {
+ assert(!I->getType()->isDependentType() &&
+ "Cannot layout class with dependent bases.");
+ if (I->isVirtual())
+ continue;
+
+ const CXXRecordDecl *Base =
+ cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+
+ CharUnits BaseOffset = Offset + Layout.getBaseClassOffset(Base);
+
+ DumpCXXRecordLayout(OS, Base, C, BaseOffset, IndentLevel,
+ Base == PrimaryBase ? "(primary base)" : "(base)",
+ /*IncludeVirtualBases=*/false);
+ }
+
+ // vfptr and vbptr (for Microsoft C++ ABI)
+ if (HasVfptr) {
+ PrintOffset(OS, Offset + Layout.getVFPtrOffset(), IndentLevel);
+ OS << '(' << *RD << " vftable pointer)\n";
+ }
+ if (HasVbptr) {
+ PrintOffset(OS, Offset + Layout.getVBPtrOffset(), IndentLevel);
+ OS << '(' << *RD << " vbtable pointer)\n";
+ }
+
+ // Dump fields.
+ uint64_t FieldNo = 0;
+ for (CXXRecordDecl::field_iterator I = RD->field_begin(),
+ E = RD->field_end(); I != E; ++I, ++FieldNo) {
+ const FieldDecl *Field = *I;
+ CharUnits FieldOffset = Offset +
+ C.toCharUnitsFromBits(Layout.getFieldOffset(FieldNo));
+
+ if (const RecordType *RT = Field->getType()->getAs<RecordType>()) {
+ if (const CXXRecordDecl *D = dyn_cast<CXXRecordDecl>(RT->getDecl())) {
+ DumpCXXRecordLayout(OS, D, C, FieldOffset, IndentLevel,
+ Field->getName().data(),
+ /*IncludeVirtualBases=*/true);
+ continue;
+ }
+ }
+
+ PrintOffset(OS, FieldOffset, IndentLevel);
+ OS << Field->getType().getAsString() << ' ' << *Field << '\n';
+ }
+
+ if (!IncludeVirtualBases)
+ return;
+
+ // Dump virtual bases.
+ for (CXXRecordDecl::base_class_const_iterator I = RD->vbases_begin(),
+ E = RD->vbases_end(); I != E; ++I) {
+ assert(I->isVirtual() && "Found non-virtual class!");
+ const CXXRecordDecl *VBase =
+ cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+
+ CharUnits VBaseOffset = Offset + Layout.getVBaseClassOffset(VBase);
+ DumpCXXRecordLayout(OS, VBase, C, VBaseOffset, IndentLevel,
+ VBase == PrimaryBase ?
+ "(primary virtual base)" : "(virtual base)",
+ /*IncludeVirtualBases=*/false);
+ }
+
+ OS << " sizeof=" << Layout.getSize().getQuantity();
+ OS << ", dsize=" << Layout.getDataSize().getQuantity();
+ OS << ", align=" << Layout.getAlignment().getQuantity() << '\n';
+ OS << " nvsize=" << Layout.getNonVirtualSize().getQuantity();
+ OS << ", nvalign=" << Layout.getNonVirtualAlign().getQuantity() << '\n';
+ OS << '\n';
+}
+
+void ASTContext::DumpRecordLayout(const RecordDecl *RD,
+ raw_ostream &OS,
+ bool Simple) const {
+ const ASTRecordLayout &Info = getASTRecordLayout(RD);
+
+ if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
+ if (!Simple)
+ return DumpCXXRecordLayout(OS, CXXRD, *this, CharUnits(), 0, 0,
+ /*IncludeVirtualBases=*/true);
+
+ OS << "Type: " << getTypeDeclType(RD).getAsString() << "\n";
+ if (!Simple) {
+ OS << "Record: ";
+ RD->dump();
+ }
+ OS << "\nLayout: ";
+ OS << "<ASTRecordLayout\n";
+ OS << " Size:" << toBits(Info.getSize()) << "\n";
+ OS << " DataSize:" << toBits(Info.getDataSize()) << "\n";
+ OS << " Alignment:" << toBits(Info.getAlignment()) << "\n";
+ OS << " FieldOffsets: [";
+ for (unsigned i = 0, e = Info.getFieldCount(); i != e; ++i) {
+ if (i) OS << ", ";
+ OS << Info.getFieldOffset(i);
+ }
+ OS << "]>\n";
+}
diff --git a/clang/lib/AST/SelectorLocationsKind.cpp b/clang/lib/AST/SelectorLocationsKind.cpp
new file mode 100644
index 0000000..671207a
--- /dev/null
+++ b/clang/lib/AST/SelectorLocationsKind.cpp
@@ -0,0 +1,128 @@
+//===--- SelectorLocationsKind.cpp - Kind of selector locations -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Describes whether the identifier locations for a selector are "standard"
+// or not.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/SelectorLocationsKind.h"
+#include "clang/AST/Expr.h"
+
+using namespace clang;
+
+static SourceLocation getStandardSelLoc(unsigned Index,
+ Selector Sel,
+ bool WithArgSpace,
+ SourceLocation ArgLoc,
+ SourceLocation EndLoc) {
+ unsigned NumSelArgs = Sel.getNumArgs();
+ if (NumSelArgs == 0) {
+ assert(Index == 0);
+ if (EndLoc.isInvalid())
+ return SourceLocation();
+ IdentifierInfo *II = Sel.getIdentifierInfoForSlot(0);
+ unsigned Len = II ? II->getLength() : 0;
+ return EndLoc.getLocWithOffset(-Len);
+ }
+
+ assert(Index < NumSelArgs);
+ if (ArgLoc.isInvalid())
+ return SourceLocation();
+ IdentifierInfo *II = Sel.getIdentifierInfoForSlot(Index);
+ unsigned Len = /* selector id */ (II ? II->getLength() : 0) + /* ':' */ 1;
+ if (WithArgSpace)
+ ++Len;
+ return ArgLoc.getLocWithOffset(-Len);
+}
+
+namespace {
+
+template <typename T>
+SourceLocation getArgLoc(T* Arg);
+
+template <>
+SourceLocation getArgLoc<Expr>(Expr *Arg) {
+ return Arg->getLocStart();
+}
+
+template <>
+SourceLocation getArgLoc<ParmVarDecl>(ParmVarDecl *Arg) {
+ SourceLocation Loc = Arg->getLocStart();
+ if (Loc.isInvalid())
+ return Loc;
+ // -1 to point to left paren of the method parameter's type.
+ return Loc.getLocWithOffset(-1);
+}
+
+template <typename T>
+SourceLocation getArgLoc(unsigned Index, ArrayRef<T*> Args) {
+ return Index < Args.size() ? getArgLoc(Args[Index]) : SourceLocation();
+}
+
+template <typename T>
+SelectorLocationsKind hasStandardSelLocs(Selector Sel,
+ ArrayRef<SourceLocation> SelLocs,
+ ArrayRef<T *> Args,
+ SourceLocation EndLoc) {
+ // Are selector locations in standard position with no space between args ?
+ unsigned i;
+ for (i = 0; i != SelLocs.size(); ++i) {
+ if (SelLocs[i] != getStandardSelectorLoc(i, Sel, /*WithArgSpace=*/false,
+ Args, EndLoc))
+ break;
+ }
+ if (i == SelLocs.size())
+ return SelLoc_StandardNoSpace;
+
+ // Are selector locations in standard position with space between args ?
+ for (i = 0; i != SelLocs.size(); ++i) {
+ if (SelLocs[i] != getStandardSelectorLoc(i, Sel, /*WithArgSpace=*/true,
+ Args, EndLoc))
+ return SelLoc_NonStandard;
+ }
+
+ return SelLoc_StandardWithSpace;
+}
+
+} // anonymous namespace
+
+SelectorLocationsKind
+clang::hasStandardSelectorLocs(Selector Sel,
+ ArrayRef<SourceLocation> SelLocs,
+ ArrayRef<Expr *> Args,
+ SourceLocation EndLoc) {
+ return hasStandardSelLocs(Sel, SelLocs, Args, EndLoc);
+}
+
+SourceLocation clang::getStandardSelectorLoc(unsigned Index,
+ Selector Sel,
+ bool WithArgSpace,
+ ArrayRef<Expr *> Args,
+ SourceLocation EndLoc) {
+ return getStandardSelLoc(Index, Sel, WithArgSpace,
+ getArgLoc(Index, Args), EndLoc);
+}
+
+SelectorLocationsKind
+clang::hasStandardSelectorLocs(Selector Sel,
+ ArrayRef<SourceLocation> SelLocs,
+ ArrayRef<ParmVarDecl *> Args,
+ SourceLocation EndLoc) {
+ return hasStandardSelLocs(Sel, SelLocs, Args, EndLoc);
+}
+
+SourceLocation clang::getStandardSelectorLoc(unsigned Index,
+ Selector Sel,
+ bool WithArgSpace,
+ ArrayRef<ParmVarDecl *> Args,
+ SourceLocation EndLoc) {
+ return getStandardSelLoc(Index, Sel, WithArgSpace,
+ getArgLoc(Index, Args), EndLoc);
+}
diff --git a/clang/lib/AST/Stmt.cpp b/clang/lib/AST/Stmt.cpp
new file mode 100644
index 0000000..e4d9f0a
--- /dev/null
+++ b/clang/lib/AST/Stmt.cpp
@@ -0,0 +1,867 @@
+//===--- Stmt.cpp - Statement AST Node Implementation ---------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the Stmt class and statement subclasses.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/Stmt.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/ExprObjC.h"
+#include "clang/AST/StmtCXX.h"
+#include "clang/AST/StmtObjC.h"
+#include "clang/AST/Type.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/ASTDiagnostic.h"
+#include "clang/Basic/TargetInfo.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace clang;
+
+static struct StmtClassNameTable {
+ const char *Name;
+ unsigned Counter;
+ unsigned Size;
+} StmtClassInfo[Stmt::lastStmtConstant+1];
+
+static StmtClassNameTable &getStmtInfoTableEntry(Stmt::StmtClass E) {
+ static bool Initialized = false;
+ if (Initialized)
+ return StmtClassInfo[E];
+
+ // Intialize the table on the first use.
+ Initialized = true;
+#define ABSTRACT_STMT(STMT)
+#define STMT(CLASS, PARENT) \
+ StmtClassInfo[(unsigned)Stmt::CLASS##Class].Name = #CLASS; \
+ StmtClassInfo[(unsigned)Stmt::CLASS##Class].Size = sizeof(CLASS);
+#include "clang/AST/StmtNodes.inc"
+
+ return StmtClassInfo[E];
+}
+
+const char *Stmt::getStmtClassName() const {
+ return getStmtInfoTableEntry((StmtClass) StmtBits.sClass).Name;
+}
+
+void Stmt::PrintStats() {
+ // Ensure the table is primed.
+ getStmtInfoTableEntry(Stmt::NullStmtClass);
+
+ unsigned sum = 0;
+ llvm::errs() << "\n*** Stmt/Expr Stats:\n";
+ for (int i = 0; i != Stmt::lastStmtConstant+1; i++) {
+ if (StmtClassInfo[i].Name == 0) continue;
+ sum += StmtClassInfo[i].Counter;
+ }
+ llvm::errs() << " " << sum << " stmts/exprs total.\n";
+ sum = 0;
+ for (int i = 0; i != Stmt::lastStmtConstant+1; i++) {
+ if (StmtClassInfo[i].Name == 0) continue;
+ if (StmtClassInfo[i].Counter == 0) continue;
+ llvm::errs() << " " << StmtClassInfo[i].Counter << " "
+ << StmtClassInfo[i].Name << ", " << StmtClassInfo[i].Size
+ << " each (" << StmtClassInfo[i].Counter*StmtClassInfo[i].Size
+ << " bytes)\n";
+ sum += StmtClassInfo[i].Counter*StmtClassInfo[i].Size;
+ }
+
+ llvm::errs() << "Total bytes = " << sum << "\n";
+}
+
+void Stmt::addStmtClass(StmtClass s) {
+ ++getStmtInfoTableEntry(s).Counter;
+}
+
+bool Stmt::StatisticsEnabled = false;
+void Stmt::EnableStatistics() {
+ StatisticsEnabled = true;
+}
+
+Stmt *Stmt::IgnoreImplicit() {
+ Stmt *s = this;
+
+ if (ExprWithCleanups *ewc = dyn_cast<ExprWithCleanups>(s))
+ s = ewc->getSubExpr();
+
+ while (ImplicitCastExpr *ice = dyn_cast<ImplicitCastExpr>(s))
+ s = ice->getSubExpr();
+
+ return s;
+}
+
+/// \brief Strip off all label-like statements.
+///
+/// This will strip off label statements, case statements, attributed
+/// statements and default statements recursively.
+const Stmt *Stmt::stripLabelLikeStatements() const {
+ const Stmt *S = this;
+ while (true) {
+ if (const LabelStmt *LS = dyn_cast<LabelStmt>(S))
+ S = LS->getSubStmt();
+ else if (const SwitchCase *SC = dyn_cast<SwitchCase>(S))
+ S = SC->getSubStmt();
+ else if (const AttributedStmt *AS = dyn_cast<AttributedStmt>(S))
+ S = AS->getSubStmt();
+ else
+ return S;
+ }
+}
+
+namespace {
+ struct good {};
+ struct bad {};
+
+ // These silly little functions have to be static inline to suppress
+ // unused warnings, and they have to be defined to suppress other
+ // warnings.
+ static inline good is_good(good) { return good(); }
+
+ typedef Stmt::child_range children_t();
+ template <class T> good implements_children(children_t T::*) {
+ return good();
+ }
+ static inline bad implements_children(children_t Stmt::*) {
+ return bad();
+ }
+
+ typedef SourceRange getSourceRange_t() const;
+ template <class T> good implements_getSourceRange(getSourceRange_t T::*) {
+ return good();
+ }
+ static inline bad implements_getSourceRange(getSourceRange_t Stmt::*) {
+ return bad();
+ }
+
+#define ASSERT_IMPLEMENTS_children(type) \
+ (void) sizeof(is_good(implements_children(&type::children)))
+#define ASSERT_IMPLEMENTS_getSourceRange(type) \
+ (void) sizeof(is_good(implements_getSourceRange(&type::getSourceRange)))
+}
+
+/// Check whether the various Stmt classes implement their member
+/// functions.
+static inline void check_implementations() {
+#define ABSTRACT_STMT(type)
+#define STMT(type, base) \
+ ASSERT_IMPLEMENTS_children(type); \
+ ASSERT_IMPLEMENTS_getSourceRange(type);
+#include "clang/AST/StmtNodes.inc"
+}
+
+Stmt::child_range Stmt::children() {
+ switch (getStmtClass()) {
+ case Stmt::NoStmtClass: llvm_unreachable("statement without class");
+#define ABSTRACT_STMT(type)
+#define STMT(type, base) \
+ case Stmt::type##Class: \
+ return static_cast<type*>(this)->children();
+#include "clang/AST/StmtNodes.inc"
+ }
+ llvm_unreachable("unknown statement kind!");
+}
+
+SourceRange Stmt::getSourceRange() const {
+ switch (getStmtClass()) {
+ case Stmt::NoStmtClass: llvm_unreachable("statement without class");
+#define ABSTRACT_STMT(type)
+#define STMT(type, base) \
+ case Stmt::type##Class: \
+ return static_cast<const type*>(this)->getSourceRange();
+#include "clang/AST/StmtNodes.inc"
+ }
+ llvm_unreachable("unknown statement kind!");
+}
+
+// Amusing macro metaprogramming hack: check whether a class provides
+// a more specific implementation of getLocStart() and getLocEnd().
+//
+// See also Expr.cpp:getExprLoc().
+namespace {
+ /// This implementation is used when a class provides a custom
+ /// implementation of getLocStart.
+ template <class S, class T>
+ SourceLocation getLocStartImpl(const Stmt *stmt,
+ SourceLocation (T::*v)() const) {
+ return static_cast<const S*>(stmt)->getLocStart();
+ }
+
+ /// This implementation is used when a class doesn't provide a custom
+ /// implementation of getLocStart. Overload resolution should pick it over
+ /// the implementation above because it's more specialized according to
+ /// function template partial ordering.
+ template <class S>
+ SourceLocation getLocStartImpl(const Stmt *stmt,
+ SourceLocation (Stmt::*v)() const) {
+ return static_cast<const S*>(stmt)->getSourceRange().getBegin();
+ }
+
+ /// This implementation is used when a class provides a custom
+ /// implementation of getLocEnd.
+ template <class S, class T>
+ SourceLocation getLocEndImpl(const Stmt *stmt,
+ SourceLocation (T::*v)() const) {
+ return static_cast<const S*>(stmt)->getLocEnd();
+ }
+
+ /// This implementation is used when a class doesn't provide a custom
+ /// implementation of getLocEnd. Overload resolution should pick it over
+ /// the implementation above because it's more specialized according to
+ /// function template partial ordering.
+ template <class S>
+ SourceLocation getLocEndImpl(const Stmt *stmt,
+ SourceLocation (Stmt::*v)() const) {
+ return static_cast<const S*>(stmt)->getSourceRange().getEnd();
+ }
+}
+
+SourceLocation Stmt::getLocStart() const {
+ switch (getStmtClass()) {
+ case Stmt::NoStmtClass: llvm_unreachable("statement without class");
+#define ABSTRACT_STMT(type)
+#define STMT(type, base) \
+ case Stmt::type##Class: \
+ return getLocStartImpl<type>(this, &type::getLocStart);
+#include "clang/AST/StmtNodes.inc"
+ }
+ llvm_unreachable("unknown statement kind");
+}
+
+SourceLocation Stmt::getLocEnd() const {
+ switch (getStmtClass()) {
+ case Stmt::NoStmtClass: llvm_unreachable("statement without class");
+#define ABSTRACT_STMT(type)
+#define STMT(type, base) \
+ case Stmt::type##Class: \
+ return getLocEndImpl<type>(this, &type::getLocEnd);
+#include "clang/AST/StmtNodes.inc"
+ }
+ llvm_unreachable("unknown statement kind");
+}
+
+void CompoundStmt::setStmts(ASTContext &C, Stmt **Stmts, unsigned NumStmts) {
+ if (this->Body)
+ C.Deallocate(Body);
+ this->CompoundStmtBits.NumStmts = NumStmts;
+
+ Body = new (C) Stmt*[NumStmts];
+ memcpy(Body, Stmts, sizeof(Stmt *) * NumStmts);
+}
+
+const char *LabelStmt::getName() const {
+ return getDecl()->getIdentifier()->getNameStart();
+}
+
+// This is defined here to avoid polluting Stmt.h with importing Expr.h
+SourceRange ReturnStmt::getSourceRange() const {
+ if (RetExpr)
+ return SourceRange(RetLoc, RetExpr->getLocEnd());
+ else
+ return SourceRange(RetLoc);
+}
+
+bool Stmt::hasImplicitControlFlow() const {
+ switch (StmtBits.sClass) {
+ default:
+ return false;
+
+ case CallExprClass:
+ case ConditionalOperatorClass:
+ case ChooseExprClass:
+ case StmtExprClass:
+ case DeclStmtClass:
+ return true;
+
+ case Stmt::BinaryOperatorClass: {
+ const BinaryOperator* B = cast<BinaryOperator>(this);
+ if (B->isLogicalOp() || B->getOpcode() == BO_Comma)
+ return true;
+ else
+ return false;
+ }
+ }
+}
+
+Expr *AsmStmt::getOutputExpr(unsigned i) {
+ return cast<Expr>(Exprs[i]);
+}
+
+/// getOutputConstraint - Return the constraint string for the specified
+/// output operand. All output constraints are known to be non-empty (either
+/// '=' or '+').
+StringRef AsmStmt::getOutputConstraint(unsigned i) const {
+ return getOutputConstraintLiteral(i)->getString();
+}
+
+/// getNumPlusOperands - Return the number of output operands that have a "+"
+/// constraint.
+unsigned AsmStmt::getNumPlusOperands() const {
+ unsigned Res = 0;
+ for (unsigned i = 0, e = getNumOutputs(); i != e; ++i)
+ if (isOutputPlusConstraint(i))
+ ++Res;
+ return Res;
+}
+
+Expr *AsmStmt::getInputExpr(unsigned i) {
+ return cast<Expr>(Exprs[i + NumOutputs]);
+}
+void AsmStmt::setInputExpr(unsigned i, Expr *E) {
+ Exprs[i + NumOutputs] = E;
+}
+
+
+/// getInputConstraint - Return the specified input constraint. Unlike output
+/// constraints, these can be empty.
+StringRef AsmStmt::getInputConstraint(unsigned i) const {
+ return getInputConstraintLiteral(i)->getString();
+}
+
+
+void AsmStmt::setOutputsAndInputsAndClobbers(ASTContext &C,
+ IdentifierInfo **Names,
+ StringLiteral **Constraints,
+ Stmt **Exprs,
+ unsigned NumOutputs,
+ unsigned NumInputs,
+ StringLiteral **Clobbers,
+ unsigned NumClobbers) {
+ this->NumOutputs = NumOutputs;
+ this->NumInputs = NumInputs;
+ this->NumClobbers = NumClobbers;
+
+ unsigned NumExprs = NumOutputs + NumInputs;
+
+ C.Deallocate(this->Names);
+ this->Names = new (C) IdentifierInfo*[NumExprs];
+ std::copy(Names, Names + NumExprs, this->Names);
+
+ C.Deallocate(this->Exprs);
+ this->Exprs = new (C) Stmt*[NumExprs];
+ std::copy(Exprs, Exprs + NumExprs, this->Exprs);
+
+ C.Deallocate(this->Constraints);
+ this->Constraints = new (C) StringLiteral*[NumExprs];
+ std::copy(Constraints, Constraints + NumExprs, this->Constraints);
+
+ C.Deallocate(this->Clobbers);
+ this->Clobbers = new (C) StringLiteral*[NumClobbers];
+ std::copy(Clobbers, Clobbers + NumClobbers, this->Clobbers);
+}
+
+/// getNamedOperand - Given a symbolic operand reference like %[foo],
+/// translate this into a numeric value needed to reference the same operand.
+/// This returns -1 if the operand name is invalid.
+int AsmStmt::getNamedOperand(StringRef SymbolicName) const {
+ unsigned NumPlusOperands = 0;
+
+ // Check if this is an output operand.
+ for (unsigned i = 0, e = getNumOutputs(); i != e; ++i) {
+ if (getOutputName(i) == SymbolicName)
+ return i;
+ }
+
+ for (unsigned i = 0, e = getNumInputs(); i != e; ++i)
+ if (getInputName(i) == SymbolicName)
+ return getNumOutputs() + NumPlusOperands + i;
+
+ // Not found.
+ return -1;
+}
+
+/// AnalyzeAsmString - Analyze the asm string of the current asm, decomposing
+/// it into pieces. If the asm string is erroneous, emit errors and return
+/// true, otherwise return false.
+unsigned AsmStmt::AnalyzeAsmString(SmallVectorImpl<AsmStringPiece>&Pieces,
+ ASTContext &C, unsigned &DiagOffs) const {
+ StringRef Str = getAsmString()->getString();
+ const char *StrStart = Str.begin();
+ const char *StrEnd = Str.end();
+ const char *CurPtr = StrStart;
+
+ // "Simple" inline asms have no constraints or operands, just convert the asm
+ // string to escape $'s.
+ if (isSimple()) {
+ std::string Result;
+ for (; CurPtr != StrEnd; ++CurPtr) {
+ switch (*CurPtr) {
+ case '$':
+ Result += "$$";
+ break;
+ default:
+ Result += *CurPtr;
+ break;
+ }
+ }
+ Pieces.push_back(AsmStringPiece(Result));
+ return 0;
+ }
+
+ // CurStringPiece - The current string that we are building up as we scan the
+ // asm string.
+ std::string CurStringPiece;
+
+ bool HasVariants = !C.getTargetInfo().hasNoAsmVariants();
+
+ while (1) {
+ // Done with the string?
+ if (CurPtr == StrEnd) {
+ if (!CurStringPiece.empty())
+ Pieces.push_back(AsmStringPiece(CurStringPiece));
+ return 0;
+ }
+
+ char CurChar = *CurPtr++;
+ switch (CurChar) {
+ case '$': CurStringPiece += "$$"; continue;
+ case '{': CurStringPiece += (HasVariants ? "$(" : "{"); continue;
+ case '|': CurStringPiece += (HasVariants ? "$|" : "|"); continue;
+ case '}': CurStringPiece += (HasVariants ? "$)" : "}"); continue;
+ case '%':
+ break;
+ default:
+ CurStringPiece += CurChar;
+ continue;
+ }
+
+ // Escaped "%" character in asm string.
+ if (CurPtr == StrEnd) {
+ // % at end of string is invalid (no escape).
+ DiagOffs = CurPtr-StrStart-1;
+ return diag::err_asm_invalid_escape;
+ }
+
+ char EscapedChar = *CurPtr++;
+ if (EscapedChar == '%') { // %% -> %
+ // Escaped percentage sign.
+ CurStringPiece += '%';
+ continue;
+ }
+
+ if (EscapedChar == '=') { // %= -> Generate an unique ID.
+ CurStringPiece += "${:uid}";
+ continue;
+ }
+
+ // Otherwise, we have an operand. If we have accumulated a string so far,
+ // add it to the Pieces list.
+ if (!CurStringPiece.empty()) {
+ Pieces.push_back(AsmStringPiece(CurStringPiece));
+ CurStringPiece.clear();
+ }
+
+ // Handle %x4 and %x[foo] by capturing x as the modifier character.
+ char Modifier = '\0';
+ if (isalpha(EscapedChar)) {
+ if (CurPtr == StrEnd) { // Premature end.
+ DiagOffs = CurPtr-StrStart-1;
+ return diag::err_asm_invalid_escape;
+ }
+ Modifier = EscapedChar;
+ EscapedChar = *CurPtr++;
+ }
+
+ if (isdigit(EscapedChar)) {
+ // %n - Assembler operand n
+ unsigned N = 0;
+
+ --CurPtr;
+ while (CurPtr != StrEnd && isdigit(*CurPtr))
+ N = N*10 + ((*CurPtr++)-'0');
+
+ unsigned NumOperands =
+ getNumOutputs() + getNumPlusOperands() + getNumInputs();
+ if (N >= NumOperands) {
+ DiagOffs = CurPtr-StrStart-1;
+ return diag::err_asm_invalid_operand_number;
+ }
+
+ Pieces.push_back(AsmStringPiece(N, Modifier));
+ continue;
+ }
+
+ // Handle %[foo], a symbolic operand reference.
+ if (EscapedChar == '[') {
+ DiagOffs = CurPtr-StrStart-1;
+
+ // Find the ']'.
+ const char *NameEnd = (const char*)memchr(CurPtr, ']', StrEnd-CurPtr);
+ if (NameEnd == 0)
+ return diag::err_asm_unterminated_symbolic_operand_name;
+ if (NameEnd == CurPtr)
+ return diag::err_asm_empty_symbolic_operand_name;
+
+ StringRef SymbolicName(CurPtr, NameEnd - CurPtr);
+
+ int N = getNamedOperand(SymbolicName);
+ if (N == -1) {
+ // Verify that an operand with that name exists.
+ DiagOffs = CurPtr-StrStart;
+ return diag::err_asm_unknown_symbolic_operand_name;
+ }
+ Pieces.push_back(AsmStringPiece(N, Modifier));
+
+ CurPtr = NameEnd+1;
+ continue;
+ }
+
+ DiagOffs = CurPtr-StrStart-1;
+ return diag::err_asm_invalid_escape;
+ }
+}
+
+QualType CXXCatchStmt::getCaughtType() const {
+ if (ExceptionDecl)
+ return ExceptionDecl->getType();
+ return QualType();
+}
+
+//===----------------------------------------------------------------------===//
+// Constructors
+//===----------------------------------------------------------------------===//
+
+AsmStmt::AsmStmt(ASTContext &C, SourceLocation asmloc, bool issimple,
+ bool isvolatile, bool msasm,
+ unsigned numoutputs, unsigned numinputs,
+ IdentifierInfo **names, StringLiteral **constraints,
+ Expr **exprs, StringLiteral *asmstr, unsigned numclobbers,
+ StringLiteral **clobbers, SourceLocation rparenloc)
+ : Stmt(AsmStmtClass), AsmLoc(asmloc), RParenLoc(rparenloc), AsmStr(asmstr)
+ , IsSimple(issimple), IsVolatile(isvolatile), MSAsm(msasm)
+ , NumOutputs(numoutputs), NumInputs(numinputs), NumClobbers(numclobbers) {
+
+ unsigned NumExprs = NumOutputs +NumInputs;
+
+ Names = new (C) IdentifierInfo*[NumExprs];
+ std::copy(names, names + NumExprs, Names);
+
+ Exprs = new (C) Stmt*[NumExprs];
+ std::copy(exprs, exprs + NumExprs, Exprs);
+
+ Constraints = new (C) StringLiteral*[NumExprs];
+ std::copy(constraints, constraints + NumExprs, Constraints);
+
+ Clobbers = new (C) StringLiteral*[NumClobbers];
+ std::copy(clobbers, clobbers + NumClobbers, Clobbers);
+}
+
+ObjCForCollectionStmt::ObjCForCollectionStmt(Stmt *Elem, Expr *Collect,
+ Stmt *Body, SourceLocation FCL,
+ SourceLocation RPL)
+: Stmt(ObjCForCollectionStmtClass) {
+ SubExprs[ELEM] = Elem;
+ SubExprs[COLLECTION] = reinterpret_cast<Stmt*>(Collect);
+ SubExprs[BODY] = Body;
+ ForLoc = FCL;
+ RParenLoc = RPL;
+}
+
+ObjCAtTryStmt::ObjCAtTryStmt(SourceLocation atTryLoc, Stmt *atTryStmt,
+ Stmt **CatchStmts, unsigned NumCatchStmts,
+ Stmt *atFinallyStmt)
+ : Stmt(ObjCAtTryStmtClass), AtTryLoc(atTryLoc),
+ NumCatchStmts(NumCatchStmts), HasFinally(atFinallyStmt != 0)
+{
+ Stmt **Stmts = getStmts();
+ Stmts[0] = atTryStmt;
+ for (unsigned I = 0; I != NumCatchStmts; ++I)
+ Stmts[I + 1] = CatchStmts[I];
+
+ if (HasFinally)
+ Stmts[NumCatchStmts + 1] = atFinallyStmt;
+}
+
+ObjCAtTryStmt *ObjCAtTryStmt::Create(ASTContext &Context,
+ SourceLocation atTryLoc,
+ Stmt *atTryStmt,
+ Stmt **CatchStmts,
+ unsigned NumCatchStmts,
+ Stmt *atFinallyStmt) {
+ unsigned Size = sizeof(ObjCAtTryStmt) +
+ (1 + NumCatchStmts + (atFinallyStmt != 0)) * sizeof(Stmt *);
+ void *Mem = Context.Allocate(Size, llvm::alignOf<ObjCAtTryStmt>());
+ return new (Mem) ObjCAtTryStmt(atTryLoc, atTryStmt, CatchStmts, NumCatchStmts,
+ atFinallyStmt);
+}
+
+ObjCAtTryStmt *ObjCAtTryStmt::CreateEmpty(ASTContext &Context,
+ unsigned NumCatchStmts,
+ bool HasFinally) {
+ unsigned Size = sizeof(ObjCAtTryStmt) +
+ (1 + NumCatchStmts + HasFinally) * sizeof(Stmt *);
+ void *Mem = Context.Allocate(Size, llvm::alignOf<ObjCAtTryStmt>());
+ return new (Mem) ObjCAtTryStmt(EmptyShell(), NumCatchStmts, HasFinally);
+}
+
+SourceRange ObjCAtTryStmt::getSourceRange() const {
+ SourceLocation EndLoc;
+ if (HasFinally)
+ EndLoc = getFinallyStmt()->getLocEnd();
+ else if (NumCatchStmts)
+ EndLoc = getCatchStmt(NumCatchStmts - 1)->getLocEnd();
+ else
+ EndLoc = getTryBody()->getLocEnd();
+
+ return SourceRange(AtTryLoc, EndLoc);
+}
+
+CXXTryStmt *CXXTryStmt::Create(ASTContext &C, SourceLocation tryLoc,
+ Stmt *tryBlock, Stmt **handlers,
+ unsigned numHandlers) {
+ std::size_t Size = sizeof(CXXTryStmt);
+ Size += ((numHandlers + 1) * sizeof(Stmt));
+
+ void *Mem = C.Allocate(Size, llvm::alignOf<CXXTryStmt>());
+ return new (Mem) CXXTryStmt(tryLoc, tryBlock, handlers, numHandlers);
+}
+
+CXXTryStmt *CXXTryStmt::Create(ASTContext &C, EmptyShell Empty,
+ unsigned numHandlers) {
+ std::size_t Size = sizeof(CXXTryStmt);
+ Size += ((numHandlers + 1) * sizeof(Stmt));
+
+ void *Mem = C.Allocate(Size, llvm::alignOf<CXXTryStmt>());
+ return new (Mem) CXXTryStmt(Empty, numHandlers);
+}
+
+CXXTryStmt::CXXTryStmt(SourceLocation tryLoc, Stmt *tryBlock,
+ Stmt **handlers, unsigned numHandlers)
+ : Stmt(CXXTryStmtClass), TryLoc(tryLoc), NumHandlers(numHandlers) {
+ Stmt **Stmts = reinterpret_cast<Stmt **>(this + 1);
+ Stmts[0] = tryBlock;
+ std::copy(handlers, handlers + NumHandlers, Stmts + 1);
+}
+
+CXXForRangeStmt::CXXForRangeStmt(DeclStmt *Range, DeclStmt *BeginEndStmt,
+ Expr *Cond, Expr *Inc, DeclStmt *LoopVar,
+ Stmt *Body, SourceLocation FL,
+ SourceLocation CL, SourceLocation RPL)
+ : Stmt(CXXForRangeStmtClass), ForLoc(FL), ColonLoc(CL), RParenLoc(RPL) {
+ SubExprs[RANGE] = Range;
+ SubExprs[BEGINEND] = BeginEndStmt;
+ SubExprs[COND] = reinterpret_cast<Stmt*>(Cond);
+ SubExprs[INC] = reinterpret_cast<Stmt*>(Inc);
+ SubExprs[LOOPVAR] = LoopVar;
+ SubExprs[BODY] = Body;
+}
+
+Expr *CXXForRangeStmt::getRangeInit() {
+ DeclStmt *RangeStmt = getRangeStmt();
+ VarDecl *RangeDecl = dyn_cast_or_null<VarDecl>(RangeStmt->getSingleDecl());
+ assert(RangeDecl &&& "for-range should have a single var decl");
+ return RangeDecl->getInit();
+}
+
+const Expr *CXXForRangeStmt::getRangeInit() const {
+ return const_cast<CXXForRangeStmt*>(this)->getRangeInit();
+}
+
+VarDecl *CXXForRangeStmt::getLoopVariable() {
+ Decl *LV = cast<DeclStmt>(getLoopVarStmt())->getSingleDecl();
+ assert(LV && "No loop variable in CXXForRangeStmt");
+ return cast<VarDecl>(LV);
+}
+
+const VarDecl *CXXForRangeStmt::getLoopVariable() const {
+ return const_cast<CXXForRangeStmt*>(this)->getLoopVariable();
+}
+
+IfStmt::IfStmt(ASTContext &C, SourceLocation IL, VarDecl *var, Expr *cond,
+ Stmt *then, SourceLocation EL, Stmt *elsev)
+ : Stmt(IfStmtClass), IfLoc(IL), ElseLoc(EL)
+{
+ setConditionVariable(C, var);
+ SubExprs[COND] = reinterpret_cast<Stmt*>(cond);
+ SubExprs[THEN] = then;
+ SubExprs[ELSE] = elsev;
+}
+
+VarDecl *IfStmt::getConditionVariable() const {
+ if (!SubExprs[VAR])
+ return 0;
+
+ DeclStmt *DS = cast<DeclStmt>(SubExprs[VAR]);
+ return cast<VarDecl>(DS->getSingleDecl());
+}
+
+void IfStmt::setConditionVariable(ASTContext &C, VarDecl *V) {
+ if (!V) {
+ SubExprs[VAR] = 0;
+ return;
+ }
+
+ SourceRange VarRange = V->getSourceRange();
+ SubExprs[VAR] = new (C) DeclStmt(DeclGroupRef(V), VarRange.getBegin(),
+ VarRange.getEnd());
+}
+
+ForStmt::ForStmt(ASTContext &C, Stmt *Init, Expr *Cond, VarDecl *condVar,
+ Expr *Inc, Stmt *Body, SourceLocation FL, SourceLocation LP,
+ SourceLocation RP)
+ : Stmt(ForStmtClass), ForLoc(FL), LParenLoc(LP), RParenLoc(RP)
+{
+ SubExprs[INIT] = Init;
+ setConditionVariable(C, condVar);
+ SubExprs[COND] = reinterpret_cast<Stmt*>(Cond);
+ SubExprs[INC] = reinterpret_cast<Stmt*>(Inc);
+ SubExprs[BODY] = Body;
+}
+
+VarDecl *ForStmt::getConditionVariable() const {
+ if (!SubExprs[CONDVAR])
+ return 0;
+
+ DeclStmt *DS = cast<DeclStmt>(SubExprs[CONDVAR]);
+ return cast<VarDecl>(DS->getSingleDecl());
+}
+
+void ForStmt::setConditionVariable(ASTContext &C, VarDecl *V) {
+ if (!V) {
+ SubExprs[CONDVAR] = 0;
+ return;
+ }
+
+ SourceRange VarRange = V->getSourceRange();
+ SubExprs[CONDVAR] = new (C) DeclStmt(DeclGroupRef(V), VarRange.getBegin(),
+ VarRange.getEnd());
+}
+
+SwitchStmt::SwitchStmt(ASTContext &C, VarDecl *Var, Expr *cond)
+ : Stmt(SwitchStmtClass), FirstCase(0), AllEnumCasesCovered(0)
+{
+ setConditionVariable(C, Var);
+ SubExprs[COND] = reinterpret_cast<Stmt*>(cond);
+ SubExprs[BODY] = NULL;
+}
+
+VarDecl *SwitchStmt::getConditionVariable() const {
+ if (!SubExprs[VAR])
+ return 0;
+
+ DeclStmt *DS = cast<DeclStmt>(SubExprs[VAR]);
+ return cast<VarDecl>(DS->getSingleDecl());
+}
+
+void SwitchStmt::setConditionVariable(ASTContext &C, VarDecl *V) {
+ if (!V) {
+ SubExprs[VAR] = 0;
+ return;
+ }
+
+ SourceRange VarRange = V->getSourceRange();
+ SubExprs[VAR] = new (C) DeclStmt(DeclGroupRef(V), VarRange.getBegin(),
+ VarRange.getEnd());
+}
+
+Stmt *SwitchCase::getSubStmt() {
+ if (isa<CaseStmt>(this))
+ return cast<CaseStmt>(this)->getSubStmt();
+ return cast<DefaultStmt>(this)->getSubStmt();
+}
+
+WhileStmt::WhileStmt(ASTContext &C, VarDecl *Var, Expr *cond, Stmt *body,
+ SourceLocation WL)
+ : Stmt(WhileStmtClass) {
+ setConditionVariable(C, Var);
+ SubExprs[COND] = reinterpret_cast<Stmt*>(cond);
+ SubExprs[BODY] = body;
+ WhileLoc = WL;
+}
+
+VarDecl *WhileStmt::getConditionVariable() const {
+ if (!SubExprs[VAR])
+ return 0;
+
+ DeclStmt *DS = cast<DeclStmt>(SubExprs[VAR]);
+ return cast<VarDecl>(DS->getSingleDecl());
+}
+
+void WhileStmt::setConditionVariable(ASTContext &C, VarDecl *V) {
+ if (!V) {
+ SubExprs[VAR] = 0;
+ return;
+ }
+
+ SourceRange VarRange = V->getSourceRange();
+ SubExprs[VAR] = new (C) DeclStmt(DeclGroupRef(V), VarRange.getBegin(),
+ VarRange.getEnd());
+}
+
+// IndirectGotoStmt
+LabelDecl *IndirectGotoStmt::getConstantTarget() {
+ if (AddrLabelExpr *E =
+ dyn_cast<AddrLabelExpr>(getTarget()->IgnoreParenImpCasts()))
+ return E->getLabel();
+ return 0;
+}
+
+// ReturnStmt
+const Expr* ReturnStmt::getRetValue() const {
+ return cast_or_null<Expr>(RetExpr);
+}
+Expr* ReturnStmt::getRetValue() {
+ return cast_or_null<Expr>(RetExpr);
+}
+
+SEHTryStmt::SEHTryStmt(bool IsCXXTry,
+ SourceLocation TryLoc,
+ Stmt *TryBlock,
+ Stmt *Handler)
+ : Stmt(SEHTryStmtClass),
+ IsCXXTry(IsCXXTry),
+ TryLoc(TryLoc)
+{
+ Children[TRY] = TryBlock;
+ Children[HANDLER] = Handler;
+}
+
+SEHTryStmt* SEHTryStmt::Create(ASTContext &C,
+ bool IsCXXTry,
+ SourceLocation TryLoc,
+ Stmt *TryBlock,
+ Stmt *Handler) {
+ return new(C) SEHTryStmt(IsCXXTry,TryLoc,TryBlock,Handler);
+}
+
+SEHExceptStmt* SEHTryStmt::getExceptHandler() const {
+ return dyn_cast<SEHExceptStmt>(getHandler());
+}
+
+SEHFinallyStmt* SEHTryStmt::getFinallyHandler() const {
+ return dyn_cast<SEHFinallyStmt>(getHandler());
+}
+
+SEHExceptStmt::SEHExceptStmt(SourceLocation Loc,
+ Expr *FilterExpr,
+ Stmt *Block)
+ : Stmt(SEHExceptStmtClass),
+ Loc(Loc)
+{
+ Children[FILTER_EXPR] = reinterpret_cast<Stmt*>(FilterExpr);
+ Children[BLOCK] = Block;
+}
+
+SEHExceptStmt* SEHExceptStmt::Create(ASTContext &C,
+ SourceLocation Loc,
+ Expr *FilterExpr,
+ Stmt *Block) {
+ return new(C) SEHExceptStmt(Loc,FilterExpr,Block);
+}
+
+SEHFinallyStmt::SEHFinallyStmt(SourceLocation Loc,
+ Stmt *Block)
+ : Stmt(SEHFinallyStmtClass),
+ Loc(Loc),
+ Block(Block)
+{}
+
+SEHFinallyStmt* SEHFinallyStmt::Create(ASTContext &C,
+ SourceLocation Loc,
+ Stmt *Block) {
+ return new(C)SEHFinallyStmt(Loc,Block);
+}
diff --git a/clang/lib/AST/StmtDumper.cpp b/clang/lib/AST/StmtDumper.cpp
new file mode 100644
index 0000000..b5e298c
--- /dev/null
+++ b/clang/lib/AST/StmtDumper.cpp
@@ -0,0 +1,763 @@
+//===--- StmtDumper.cpp - Dumping implementation for Stmt ASTs ------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the Stmt::dump/Stmt::print methods, which dump out the
+// AST in a form that exposes type details and other fields.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/StmtVisitor.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/PrettyPrinter.h"
+#include "clang/Basic/SourceManager.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace clang;
+
+//===----------------------------------------------------------------------===//
+// StmtDumper Visitor
+//===----------------------------------------------------------------------===//
+
+namespace {
+ class StmtDumper : public StmtVisitor<StmtDumper> {
+ SourceManager *SM;
+ raw_ostream &OS;
+ unsigned IndentLevel;
+
+ /// MaxDepth - When doing a normal dump (not dumpAll) we only want to dump
+ /// the first few levels of an AST. This keeps track of how many ast levels
+ /// are left.
+ unsigned MaxDepth;
+
+ /// LastLocFilename/LastLocLine - Keep track of the last location we print
+ /// out so that we can print out deltas from then on out.
+ const char *LastLocFilename;
+ unsigned LastLocLine;
+
+ public:
+ StmtDumper(SourceManager *sm, raw_ostream &os, unsigned maxDepth)
+ : SM(sm), OS(os), IndentLevel(0-1), MaxDepth(maxDepth) {
+ LastLocFilename = "";
+ LastLocLine = ~0U;
+ }
+
+ void DumpSubTree(Stmt *S) {
+ // Prune the recursion if not using dump all.
+ if (MaxDepth == 0) return;
+
+ ++IndentLevel;
+ if (S) {
+ if (DeclStmt* DS = dyn_cast<DeclStmt>(S))
+ VisitDeclStmt(DS);
+ else {
+ Visit(S);
+
+ // Print out children.
+ Stmt::child_range CI = S->children();
+ if (CI) {
+ while (CI) {
+ OS << '\n';
+ DumpSubTree(*CI++);
+ }
+ }
+ }
+ OS << ')';
+ } else {
+ Indent();
+ OS << "<<<NULL>>>";
+ }
+ --IndentLevel;
+ }
+
+ void DumpDeclarator(Decl *D);
+
+ void Indent() const {
+ for (int i = 0, e = IndentLevel; i < e; ++i)
+ OS << " ";
+ }
+
+ void DumpType(QualType T) {
+ SplitQualType T_split = T.split();
+ OS << "'" << QualType::getAsString(T_split) << "'";
+
+ if (!T.isNull()) {
+ // If the type is sugared, also dump a (shallow) desugared type.
+ SplitQualType D_split = T.getSplitDesugaredType();
+ if (T_split != D_split)
+ OS << ":'" << QualType::getAsString(D_split) << "'";
+ }
+ }
+ void DumpDeclRef(Decl *node);
+ void DumpStmt(const Stmt *Node) {
+ Indent();
+ OS << "(" << Node->getStmtClassName()
+ << " " << (void*)Node;
+ DumpSourceRange(Node);
+ }
+ void DumpValueKind(ExprValueKind K) {
+ switch (K) {
+ case VK_RValue: break;
+ case VK_LValue: OS << " lvalue"; break;
+ case VK_XValue: OS << " xvalue"; break;
+ }
+ }
+ void DumpObjectKind(ExprObjectKind K) {
+ switch (K) {
+ case OK_Ordinary: break;
+ case OK_BitField: OS << " bitfield"; break;
+ case OK_ObjCProperty: OS << " objcproperty"; break;
+ case OK_ObjCSubscript: OS << " objcsubscript"; break;
+ case OK_VectorComponent: OS << " vectorcomponent"; break;
+ }
+ }
+ void DumpExpr(const Expr *Node) {
+ DumpStmt(Node);
+ OS << ' ';
+ DumpType(Node->getType());
+ DumpValueKind(Node->getValueKind());
+ DumpObjectKind(Node->getObjectKind());
+ }
+ void DumpSourceRange(const Stmt *Node);
+ void DumpLocation(SourceLocation Loc);
+
+ // Stmts.
+ void VisitStmt(Stmt *Node);
+ void VisitDeclStmt(DeclStmt *Node);
+ void VisitLabelStmt(LabelStmt *Node);
+ void VisitGotoStmt(GotoStmt *Node);
+
+ // Exprs
+ void VisitExpr(Expr *Node);
+ void VisitCastExpr(CastExpr *Node);
+ void VisitDeclRefExpr(DeclRefExpr *Node);
+ void VisitPredefinedExpr(PredefinedExpr *Node);
+ void VisitCharacterLiteral(CharacterLiteral *Node);
+ void VisitIntegerLiteral(IntegerLiteral *Node);
+ void VisitFloatingLiteral(FloatingLiteral *Node);
+ void VisitStringLiteral(StringLiteral *Str);
+ void VisitUnaryOperator(UnaryOperator *Node);
+ void VisitUnaryExprOrTypeTraitExpr(UnaryExprOrTypeTraitExpr *Node);
+ void VisitMemberExpr(MemberExpr *Node);
+ void VisitExtVectorElementExpr(ExtVectorElementExpr *Node);
+ void VisitBinaryOperator(BinaryOperator *Node);
+ void VisitCompoundAssignOperator(CompoundAssignOperator *Node);
+ void VisitAddrLabelExpr(AddrLabelExpr *Node);
+ void VisitBlockExpr(BlockExpr *Node);
+ void VisitOpaqueValueExpr(OpaqueValueExpr *Node);
+
+ // C++
+ void VisitCXXNamedCastExpr(CXXNamedCastExpr *Node);
+ void VisitCXXBoolLiteralExpr(CXXBoolLiteralExpr *Node);
+ void VisitCXXThisExpr(CXXThisExpr *Node);
+ void VisitCXXFunctionalCastExpr(CXXFunctionalCastExpr *Node);
+ void VisitCXXConstructExpr(CXXConstructExpr *Node);
+ void VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *Node);
+ void VisitExprWithCleanups(ExprWithCleanups *Node);
+ void VisitUnresolvedLookupExpr(UnresolvedLookupExpr *Node);
+ void DumpCXXTemporary(CXXTemporary *Temporary);
+
+ // ObjC
+ void VisitObjCAtCatchStmt(ObjCAtCatchStmt *Node);
+ void VisitObjCEncodeExpr(ObjCEncodeExpr *Node);
+ void VisitObjCMessageExpr(ObjCMessageExpr* Node);
+ void VisitObjCSelectorExpr(ObjCSelectorExpr *Node);
+ void VisitObjCProtocolExpr(ObjCProtocolExpr *Node);
+ void VisitObjCPropertyRefExpr(ObjCPropertyRefExpr *Node);
+ void VisitObjCSubscriptRefExpr(ObjCSubscriptRefExpr *Node);
+ void VisitObjCIvarRefExpr(ObjCIvarRefExpr *Node);
+ void VisitObjCBoolLiteralExpr(ObjCBoolLiteralExpr *Node);
+ };
+}
+
+//===----------------------------------------------------------------------===//
+// Utilities
+//===----------------------------------------------------------------------===//
+
+void StmtDumper::DumpLocation(SourceLocation Loc) {
+ SourceLocation SpellingLoc = SM->getSpellingLoc(Loc);
+
+ // The general format we print out is filename:line:col, but we drop pieces
+ // that haven't changed since the last loc printed.
+ PresumedLoc PLoc = SM->getPresumedLoc(SpellingLoc);
+
+ if (PLoc.isInvalid()) {
+ OS << "<invalid sloc>";
+ return;
+ }
+
+ if (strcmp(PLoc.getFilename(), LastLocFilename) != 0) {
+ OS << PLoc.getFilename() << ':' << PLoc.getLine()
+ << ':' << PLoc.getColumn();
+ LastLocFilename = PLoc.getFilename();
+ LastLocLine = PLoc.getLine();
+ } else if (PLoc.getLine() != LastLocLine) {
+ OS << "line" << ':' << PLoc.getLine()
+ << ':' << PLoc.getColumn();
+ LastLocLine = PLoc.getLine();
+ } else {
+ OS << "col" << ':' << PLoc.getColumn();
+ }
+}
+
+void StmtDumper::DumpSourceRange(const Stmt *Node) {
+ // Can't translate locations if a SourceManager isn't available.
+ if (SM == 0) return;
+
+ // TODO: If the parent expression is available, we can print a delta vs its
+ // location.
+ SourceRange R = Node->getSourceRange();
+
+ OS << " <";
+ DumpLocation(R.getBegin());
+ if (R.getBegin() != R.getEnd()) {
+ OS << ", ";
+ DumpLocation(R.getEnd());
+ }
+ OS << ">";
+
+ // <t2.c:123:421[blah], t2.c:412:321>
+
+}
+
+
+//===----------------------------------------------------------------------===//
+// Stmt printing methods.
+//===----------------------------------------------------------------------===//
+
+void StmtDumper::VisitStmt(Stmt *Node) {
+ DumpStmt(Node);
+}
+
+void StmtDumper::DumpDeclarator(Decl *D) {
+ // FIXME: Need to complete/beautify this... this code simply shows the
+ // nodes are where they need to be.
+ if (TypedefDecl *localType = dyn_cast<TypedefDecl>(D)) {
+ OS << "\"typedef " << localType->getUnderlyingType().getAsString()
+ << ' ' << *localType << '"';
+ } else if (TypeAliasDecl *localType = dyn_cast<TypeAliasDecl>(D)) {
+ OS << "\"using " << *localType << " = "
+ << localType->getUnderlyingType().getAsString() << '"';
+ } else if (ValueDecl *VD = dyn_cast<ValueDecl>(D)) {
+ OS << "\"";
+ // Emit storage class for vardecls.
+ if (VarDecl *V = dyn_cast<VarDecl>(VD)) {
+ if (V->getStorageClass() != SC_None)
+ OS << VarDecl::getStorageClassSpecifierString(V->getStorageClass())
+ << " ";
+ }
+
+ std::string Name = VD->getNameAsString();
+ VD->getType().getAsStringInternal(Name,
+ PrintingPolicy(VD->getASTContext().getLangOpts()));
+ OS << Name;
+
+ // If this is a vardecl with an initializer, emit it.
+ if (VarDecl *V = dyn_cast<VarDecl>(VD)) {
+ if (V->getInit()) {
+ OS << " =\n";
+ DumpSubTree(V->getInit());
+ }
+ }
+ OS << '"';
+ } else if (TagDecl *TD = dyn_cast<TagDecl>(D)) {
+ // print a free standing tag decl (e.g. "struct x;").
+ const char *tagname;
+ if (const IdentifierInfo *II = TD->getIdentifier())
+ tagname = II->getNameStart();
+ else
+ tagname = "<anonymous>";
+ OS << '"' << TD->getKindName() << ' ' << tagname << ";\"";
+ // FIXME: print tag bodies.
+ } else if (UsingDirectiveDecl *UD = dyn_cast<UsingDirectiveDecl>(D)) {
+ // print using-directive decl (e.g. "using namespace x;")
+ const char *ns;
+ if (const IdentifierInfo *II = UD->getNominatedNamespace()->getIdentifier())
+ ns = II->getNameStart();
+ else
+ ns = "<anonymous>";
+ OS << '"' << UD->getDeclKindName() << ns << ";\"";
+ } else if (UsingDecl *UD = dyn_cast<UsingDecl>(D)) {
+ // print using decl (e.g. "using std::string;")
+ const char *tn = UD->isTypeName() ? "typename " : "";
+ OS << '"' << UD->getDeclKindName() << tn;
+ UD->getQualifier()->print(OS,
+ PrintingPolicy(UD->getASTContext().getLangOpts()));
+ OS << ";\"";
+ } else if (LabelDecl *LD = dyn_cast<LabelDecl>(D)) {
+ OS << "label " << *LD;
+ } else if (StaticAssertDecl *SAD = dyn_cast<StaticAssertDecl>(D)) {
+ OS << "\"static_assert(\n";
+ DumpSubTree(SAD->getAssertExpr());
+ OS << ",\n";
+ DumpSubTree(SAD->getMessage());
+ OS << ");\"";
+ } else {
+ llvm_unreachable("Unexpected decl");
+ }
+}
+
+void StmtDumper::VisitDeclStmt(DeclStmt *Node) {
+ DumpStmt(Node);
+ OS << "\n";
+ for (DeclStmt::decl_iterator DI = Node->decl_begin(), DE = Node->decl_end();
+ DI != DE; ++DI) {
+ Decl* D = *DI;
+ ++IndentLevel;
+ Indent();
+ OS << (void*) D << " ";
+ DumpDeclarator(D);
+ if (DI+1 != DE)
+ OS << "\n";
+ --IndentLevel;
+ }
+}
+
+void StmtDumper::VisitLabelStmt(LabelStmt *Node) {
+ DumpStmt(Node);
+ OS << " '" << Node->getName() << "'";
+}
+
+void StmtDumper::VisitGotoStmt(GotoStmt *Node) {
+ DumpStmt(Node);
+ OS << " '" << Node->getLabel()->getName()
+ << "':" << (void*)Node->getLabel();
+}
+
+//===----------------------------------------------------------------------===//
+// Expr printing methods.
+//===----------------------------------------------------------------------===//
+
+void StmtDumper::VisitExpr(Expr *Node) {
+ DumpExpr(Node);
+}
+
+static void DumpBasePath(raw_ostream &OS, CastExpr *Node) {
+ if (Node->path_empty())
+ return;
+
+ OS << " (";
+ bool First = true;
+ for (CastExpr::path_iterator
+ I = Node->path_begin(), E = Node->path_end(); I != E; ++I) {
+ const CXXBaseSpecifier *Base = *I;
+ if (!First)
+ OS << " -> ";
+
+ const CXXRecordDecl *RD =
+ cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl());
+
+ if (Base->isVirtual())
+ OS << "virtual ";
+ OS << RD->getName();
+ First = false;
+ }
+
+ OS << ')';
+}
+
+void StmtDumper::VisitCastExpr(CastExpr *Node) {
+ DumpExpr(Node);
+ OS << " <" << Node->getCastKindName();
+ DumpBasePath(OS, Node);
+ OS << ">";
+}
+
+void StmtDumper::VisitDeclRefExpr(DeclRefExpr *Node) {
+ DumpExpr(Node);
+
+ OS << " ";
+ DumpDeclRef(Node->getDecl());
+ if (Node->getDecl() != Node->getFoundDecl()) {
+ OS << " (";
+ DumpDeclRef(Node->getFoundDecl());
+ OS << ")";
+ }
+}
+
+void StmtDumper::DumpDeclRef(Decl *d) {
+ OS << d->getDeclKindName() << ' ' << (void*) d;
+
+ if (NamedDecl *nd = dyn_cast<NamedDecl>(d)) {
+ OS << " '";
+ nd->getDeclName().printName(OS);
+ OS << "'";
+ }
+
+ if (ValueDecl *vd = dyn_cast<ValueDecl>(d)) {
+ OS << ' '; DumpType(vd->getType());
+ }
+}
+
+void StmtDumper::VisitUnresolvedLookupExpr(UnresolvedLookupExpr *Node) {
+ DumpExpr(Node);
+ OS << " (";
+ if (!Node->requiresADL()) OS << "no ";
+ OS << "ADL) = '" << Node->getName() << '\'';
+
+ UnresolvedLookupExpr::decls_iterator
+ I = Node->decls_begin(), E = Node->decls_end();
+ if (I == E) OS << " empty";
+ for (; I != E; ++I)
+ OS << " " << (void*) *I;
+}
+
+void StmtDumper::VisitObjCIvarRefExpr(ObjCIvarRefExpr *Node) {
+ DumpExpr(Node);
+
+ OS << " " << Node->getDecl()->getDeclKindName()
+ << "Decl='" << *Node->getDecl()
+ << "' " << (void*)Node->getDecl();
+ if (Node->isFreeIvar())
+ OS << " isFreeIvar";
+}
+
+void StmtDumper::VisitPredefinedExpr(PredefinedExpr *Node) {
+ DumpExpr(Node);
+ switch (Node->getIdentType()) {
+ default: llvm_unreachable("unknown case");
+ case PredefinedExpr::Func: OS << " __func__"; break;
+ case PredefinedExpr::Function: OS << " __FUNCTION__"; break;
+ case PredefinedExpr::PrettyFunction: OS << " __PRETTY_FUNCTION__";break;
+ }
+}
+
+void StmtDumper::VisitCharacterLiteral(CharacterLiteral *Node) {
+ DumpExpr(Node);
+ OS << " " << Node->getValue();
+}
+
+void StmtDumper::VisitIntegerLiteral(IntegerLiteral *Node) {
+ DumpExpr(Node);
+
+ bool isSigned = Node->getType()->isSignedIntegerType();
+ OS << " " << Node->getValue().toString(10, isSigned);
+}
+void StmtDumper::VisitFloatingLiteral(FloatingLiteral *Node) {
+ DumpExpr(Node);
+ OS << " " << Node->getValueAsApproximateDouble();
+}
+
+void StmtDumper::VisitStringLiteral(StringLiteral *Str) {
+ DumpExpr(Str);
+ // FIXME: this doesn't print wstrings right.
+ OS << " ";
+ switch (Str->getKind()) {
+ case StringLiteral::Ascii: break; // No prefix
+ case StringLiteral::Wide: OS << 'L'; break;
+ case StringLiteral::UTF8: OS << "u8"; break;
+ case StringLiteral::UTF16: OS << 'u'; break;
+ case StringLiteral::UTF32: OS << 'U'; break;
+ }
+ OS << '"';
+ OS.write_escaped(Str->getString());
+ OS << '"';
+}
+
+void StmtDumper::VisitUnaryOperator(UnaryOperator *Node) {
+ DumpExpr(Node);
+ OS << " " << (Node->isPostfix() ? "postfix" : "prefix")
+ << " '" << UnaryOperator::getOpcodeStr(Node->getOpcode()) << "'";
+}
+void StmtDumper::VisitUnaryExprOrTypeTraitExpr(UnaryExprOrTypeTraitExpr *Node) {
+ DumpExpr(Node);
+ switch(Node->getKind()) {
+ case UETT_SizeOf:
+ OS << " sizeof ";
+ break;
+ case UETT_AlignOf:
+ OS << " __alignof ";
+ break;
+ case UETT_VecStep:
+ OS << " vec_step ";
+ break;
+ }
+ if (Node->isArgumentType())
+ DumpType(Node->getArgumentType());
+}
+
+void StmtDumper::VisitMemberExpr(MemberExpr *Node) {
+ DumpExpr(Node);
+ OS << " " << (Node->isArrow() ? "->" : ".")
+ << *Node->getMemberDecl() << ' '
+ << (void*)Node->getMemberDecl();
+}
+void StmtDumper::VisitExtVectorElementExpr(ExtVectorElementExpr *Node) {
+ DumpExpr(Node);
+ OS << " " << Node->getAccessor().getNameStart();
+}
+void StmtDumper::VisitBinaryOperator(BinaryOperator *Node) {
+ DumpExpr(Node);
+ OS << " '" << BinaryOperator::getOpcodeStr(Node->getOpcode()) << "'";
+}
+void StmtDumper::VisitCompoundAssignOperator(CompoundAssignOperator *Node) {
+ DumpExpr(Node);
+ OS << " '" << BinaryOperator::getOpcodeStr(Node->getOpcode())
+ << "' ComputeLHSTy=";
+ DumpType(Node->getComputationLHSType());
+ OS << " ComputeResultTy=";
+ DumpType(Node->getComputationResultType());
+}
+
+void StmtDumper::VisitBlockExpr(BlockExpr *Node) {
+ DumpExpr(Node);
+
+ BlockDecl *block = Node->getBlockDecl();
+ OS << " decl=" << block;
+
+ IndentLevel++;
+ if (block->capturesCXXThis()) {
+ OS << '\n'; Indent(); OS << "(capture this)";
+ }
+ for (BlockDecl::capture_iterator
+ i = block->capture_begin(), e = block->capture_end(); i != e; ++i) {
+ OS << '\n';
+ Indent();
+ OS << "(capture ";
+ if (i->isByRef()) OS << "byref ";
+ if (i->isNested()) OS << "nested ";
+ if (i->getVariable())
+ DumpDeclRef(i->getVariable());
+ if (i->hasCopyExpr()) DumpSubTree(i->getCopyExpr());
+ OS << ")";
+ }
+ IndentLevel--;
+
+ OS << '\n';
+ DumpSubTree(block->getBody());
+}
+
+void StmtDumper::VisitOpaqueValueExpr(OpaqueValueExpr *Node) {
+ DumpExpr(Node);
+
+ if (Expr *Source = Node->getSourceExpr()) {
+ OS << '\n';
+ DumpSubTree(Source);
+ }
+}
+
+// GNU extensions.
+
+void StmtDumper::VisitAddrLabelExpr(AddrLabelExpr *Node) {
+ DumpExpr(Node);
+ OS << " " << Node->getLabel()->getName()
+ << " " << (void*)Node->getLabel();
+}
+
+//===----------------------------------------------------------------------===//
+// C++ Expressions
+//===----------------------------------------------------------------------===//
+
+void StmtDumper::VisitCXXNamedCastExpr(CXXNamedCastExpr *Node) {
+ DumpExpr(Node);
+ OS << " " << Node->getCastName()
+ << "<" << Node->getTypeAsWritten().getAsString() << ">"
+ << " <" << Node->getCastKindName();
+ DumpBasePath(OS, Node);
+ OS << ">";
+}
+
+void StmtDumper::VisitCXXBoolLiteralExpr(CXXBoolLiteralExpr *Node) {
+ DumpExpr(Node);
+ OS << " " << (Node->getValue() ? "true" : "false");
+}
+
+void StmtDumper::VisitCXXThisExpr(CXXThisExpr *Node) {
+ DumpExpr(Node);
+ OS << " this";
+}
+
+void StmtDumper::VisitCXXFunctionalCastExpr(CXXFunctionalCastExpr *Node) {
+ DumpExpr(Node);
+ OS << " functional cast to " << Node->getTypeAsWritten().getAsString()
+ << " <" << Node->getCastKindName() << ">";
+}
+
+void StmtDumper::VisitCXXConstructExpr(CXXConstructExpr *Node) {
+ DumpExpr(Node);
+ CXXConstructorDecl *Ctor = Node->getConstructor();
+ DumpType(Ctor->getType());
+ if (Node->isElidable())
+ OS << " elidable";
+ if (Node->requiresZeroInitialization())
+ OS << " zeroing";
+}
+
+void StmtDumper::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *Node) {
+ DumpExpr(Node);
+ OS << " ";
+ DumpCXXTemporary(Node->getTemporary());
+}
+
+void StmtDumper::VisitExprWithCleanups(ExprWithCleanups *Node) {
+ DumpExpr(Node);
+ ++IndentLevel;
+ for (unsigned i = 0, e = Node->getNumObjects(); i != e; ++i) {
+ OS << "\n";
+ Indent();
+ OS << "(cleanup ";
+ DumpDeclRef(Node->getObject(i));
+ OS << ")";
+ }
+ --IndentLevel;
+}
+
+void StmtDumper::DumpCXXTemporary(CXXTemporary *Temporary) {
+ OS << "(CXXTemporary " << (void *)Temporary << ")";
+}
+
+//===----------------------------------------------------------------------===//
+// Obj-C Expressions
+//===----------------------------------------------------------------------===//
+
+void StmtDumper::VisitObjCMessageExpr(ObjCMessageExpr* Node) {
+ DumpExpr(Node);
+ OS << " selector=" << Node->getSelector().getAsString();
+ switch (Node->getReceiverKind()) {
+ case ObjCMessageExpr::Instance:
+ break;
+
+ case ObjCMessageExpr::Class:
+ OS << " class=";
+ DumpType(Node->getClassReceiver());
+ break;
+
+ case ObjCMessageExpr::SuperInstance:
+ OS << " super (instance)";
+ break;
+
+ case ObjCMessageExpr::SuperClass:
+ OS << " super (class)";
+ break;
+ }
+}
+
+void StmtDumper::VisitObjCAtCatchStmt(ObjCAtCatchStmt *Node) {
+ DumpStmt(Node);
+ if (VarDecl *CatchParam = Node->getCatchParamDecl()) {
+ OS << " catch parm = ";
+ DumpDeclarator(CatchParam);
+ } else {
+ OS << " catch all";
+ }
+}
+
+void StmtDumper::VisitObjCEncodeExpr(ObjCEncodeExpr *Node) {
+ DumpExpr(Node);
+ OS << " ";
+ DumpType(Node->getEncodedType());
+}
+
+void StmtDumper::VisitObjCSelectorExpr(ObjCSelectorExpr *Node) {
+ DumpExpr(Node);
+
+ OS << " " << Node->getSelector().getAsString();
+}
+
+void StmtDumper::VisitObjCProtocolExpr(ObjCProtocolExpr *Node) {
+ DumpExpr(Node);
+
+ OS << ' ' <<* Node->getProtocol();
+}
+
+void StmtDumper::VisitObjCPropertyRefExpr(ObjCPropertyRefExpr *Node) {
+ DumpExpr(Node);
+ if (Node->isImplicitProperty()) {
+ OS << " Kind=MethodRef Getter=\"";
+ if (Node->getImplicitPropertyGetter())
+ OS << Node->getImplicitPropertyGetter()->getSelector().getAsString();
+ else
+ OS << "(null)";
+
+ OS << "\" Setter=\"";
+ if (ObjCMethodDecl *Setter = Node->getImplicitPropertySetter())
+ OS << Setter->getSelector().getAsString();
+ else
+ OS << "(null)";
+ OS << "\"";
+ } else {
+ OS << " Kind=PropertyRef Property=\"" << *Node->getExplicitProperty() <<'"';
+ }
+
+ if (Node->isSuperReceiver())
+ OS << " super";
+
+ OS << " Messaging=";
+ if (Node->isMessagingGetter() && Node->isMessagingSetter())
+ OS << "Getter&Setter";
+ else if (Node->isMessagingGetter())
+ OS << "Getter";
+ else if (Node->isMessagingSetter())
+ OS << "Setter";
+}
+
+void StmtDumper::VisitObjCSubscriptRefExpr(ObjCSubscriptRefExpr *Node) {
+ DumpExpr(Node);
+ if (Node->isArraySubscriptRefExpr())
+ OS << " Kind=ArraySubscript GetterForArray=\"";
+ else
+ OS << " Kind=DictionarySubscript GetterForDictionary=\"";
+ if (Node->getAtIndexMethodDecl())
+ OS << Node->getAtIndexMethodDecl()->getSelector().getAsString();
+ else
+ OS << "(null)";
+
+ if (Node->isArraySubscriptRefExpr())
+ OS << "\" SetterForArray=\"";
+ else
+ OS << "\" SetterForDictionary=\"";
+ if (Node->setAtIndexMethodDecl())
+ OS << Node->setAtIndexMethodDecl()->getSelector().getAsString();
+ else
+ OS << "(null)";
+}
+
+void StmtDumper::VisitObjCBoolLiteralExpr(ObjCBoolLiteralExpr *Node) {
+ DumpExpr(Node);
+ OS << " " << (Node->getValue() ? "__objc_yes" : "__objc_no");
+}
+
+//===----------------------------------------------------------------------===//
+// Stmt method implementations
+//===----------------------------------------------------------------------===//
+
+/// dump - This does a local dump of the specified AST fragment. It dumps the
+/// specified node and a few nodes underneath it, but not the whole subtree.
+/// This is useful in a debugger.
+void Stmt::dump(SourceManager &SM) const {
+ dump(llvm::errs(), SM);
+}
+
+void Stmt::dump(raw_ostream &OS, SourceManager &SM) const {
+ StmtDumper P(&SM, OS, 4);
+ P.DumpSubTree(const_cast<Stmt*>(this));
+ OS << "\n";
+}
+
+/// dump - This does a local dump of the specified AST fragment. It dumps the
+/// specified node and a few nodes underneath it, but not the whole subtree.
+/// This is useful in a debugger.
+void Stmt::dump() const {
+ StmtDumper P(0, llvm::errs(), 4);
+ P.DumpSubTree(const_cast<Stmt*>(this));
+ llvm::errs() << "\n";
+}
+
+/// dumpAll - This does a dump of the specified AST fragment and all subtrees.
+void Stmt::dumpAll(SourceManager &SM) const {
+ StmtDumper P(&SM, llvm::errs(), ~0U);
+ P.DumpSubTree(const_cast<Stmt*>(this));
+ llvm::errs() << "\n";
+}
+
+/// dumpAll - This does a dump of the specified AST fragment and all subtrees.
+void Stmt::dumpAll() const {
+ StmtDumper P(0, llvm::errs(), ~0U);
+ P.DumpSubTree(const_cast<Stmt*>(this));
+ llvm::errs() << "\n";
+}
diff --git a/clang/lib/AST/StmtIterator.cpp b/clang/lib/AST/StmtIterator.cpp
new file mode 100644
index 0000000..9bf4aea
--- /dev/null
+++ b/clang/lib/AST/StmtIterator.cpp
@@ -0,0 +1,155 @@
+//===--- StmtIterator.cpp - Iterators for Statements ------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines internal methods for StmtIterator.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/StmtIterator.h"
+#include "clang/AST/Decl.h"
+
+using namespace clang;
+
+// FIXME: Add support for dependent-sized array types in C++?
+// Does it even make sense to build a CFG for an uninstantiated template?
+static inline const VariableArrayType *FindVA(const Type* t) {
+ while (const ArrayType *vt = dyn_cast<ArrayType>(t)) {
+ if (const VariableArrayType *vat = dyn_cast<VariableArrayType>(vt))
+ if (vat->getSizeExpr())
+ return vat;
+
+ t = vt->getElementType().getTypePtr();
+ }
+
+ return NULL;
+}
+
+void StmtIteratorBase::NextVA() {
+ assert (getVAPtr());
+
+ const VariableArrayType *p = getVAPtr();
+ p = FindVA(p->getElementType().getTypePtr());
+ setVAPtr(p);
+
+ if (p)
+ return;
+
+ if (inDecl()) {
+ if (VarDecl* VD = dyn_cast<VarDecl>(decl))
+ if (VD->Init)
+ return;
+
+ NextDecl();
+ }
+ else if (inDeclGroup()) {
+ if (VarDecl* VD = dyn_cast<VarDecl>(*DGI))
+ if (VD->Init)
+ return;
+
+ NextDecl();
+ }
+ else {
+ assert (inSizeOfTypeVA());
+ assert(!decl);
+ RawVAPtr = 0;
+ }
+}
+
+void StmtIteratorBase::NextDecl(bool ImmediateAdvance) {
+ assert (getVAPtr() == NULL);
+
+ if (inDecl()) {
+ assert(decl);
+
+ // FIXME: SIMPLIFY AWAY.
+ if (ImmediateAdvance)
+ decl = 0;
+ else if (HandleDecl(decl))
+ return;
+ }
+ else {
+ assert(inDeclGroup());
+
+ if (ImmediateAdvance)
+ ++DGI;
+
+ for ( ; DGI != DGE; ++DGI)
+ if (HandleDecl(*DGI))
+ return;
+ }
+
+ RawVAPtr = 0;
+}
+
+bool StmtIteratorBase::HandleDecl(Decl* D) {
+
+ if (VarDecl* VD = dyn_cast<VarDecl>(D)) {
+ if (const VariableArrayType* VAPtr = FindVA(VD->getType().getTypePtr())) {
+ setVAPtr(VAPtr);
+ return true;
+ }
+
+ if (VD->getInit())
+ return true;
+ }
+ else if (TypedefNameDecl* TD = dyn_cast<TypedefNameDecl>(D)) {
+ if (const VariableArrayType* VAPtr =
+ FindVA(TD->getUnderlyingType().getTypePtr())) {
+ setVAPtr(VAPtr);
+ return true;
+ }
+ }
+ else if (EnumConstantDecl* ECD = dyn_cast<EnumConstantDecl>(D)) {
+ if (ECD->getInitExpr())
+ return true;
+ }
+
+ return false;
+}
+
+StmtIteratorBase::StmtIteratorBase(Decl *d, Stmt **s)
+ : stmt(s), decl(d), RawVAPtr(d ? DeclMode : 0) {
+ if (decl)
+ NextDecl(false);
+}
+
+StmtIteratorBase::StmtIteratorBase(Decl** dgi, Decl** dge)
+ : stmt(0), DGI(dgi), RawVAPtr(DeclGroupMode), DGE(dge) {
+ NextDecl(false);
+}
+
+StmtIteratorBase::StmtIteratorBase(const VariableArrayType* t)
+ : stmt(0), decl(0), RawVAPtr(SizeOfTypeVAMode) {
+ RawVAPtr |= reinterpret_cast<uintptr_t>(t);
+}
+
+Stmt*& StmtIteratorBase::GetDeclExpr() const {
+
+ if (const VariableArrayType* VAPtr = getVAPtr()) {
+ assert (VAPtr->SizeExpr);
+ return const_cast<Stmt*&>(VAPtr->SizeExpr);
+ }
+
+ assert (inDecl() || inDeclGroup());
+
+ if (inDeclGroup()) {
+ VarDecl* VD = cast<VarDecl>(*DGI);
+ return *VD->getInitAddress();
+ }
+
+ assert (inDecl());
+
+ if (VarDecl* VD = dyn_cast<VarDecl>(decl)) {
+ assert (VD->Init);
+ return *VD->getInitAddress();
+ }
+
+ EnumConstantDecl* ECD = cast<EnumConstantDecl>(decl);
+ return ECD->Init;
+}
diff --git a/clang/lib/AST/StmtPrinter.cpp b/clang/lib/AST/StmtPrinter.cpp
new file mode 100644
index 0000000..0d1066b
--- /dev/null
+++ b/clang/lib/AST/StmtPrinter.cpp
@@ -0,0 +1,1902 @@
+//===--- StmtPrinter.cpp - Printing implementation for Stmt ASTs ----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the Stmt::dumpPretty/Stmt::printPretty methods, which
+// pretty print the AST back out to C code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/StmtVisitor.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/PrettyPrinter.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/ExprCXX.h"
+#include "llvm/ADT/SmallString.h"
+using namespace clang;
+
+//===----------------------------------------------------------------------===//
+// StmtPrinter Visitor
+//===----------------------------------------------------------------------===//
+
+namespace {
+ class StmtPrinter : public StmtVisitor<StmtPrinter> {
+ raw_ostream &OS;
+ ASTContext &Context;
+ unsigned IndentLevel;
+ clang::PrinterHelper* Helper;
+ PrintingPolicy Policy;
+
+ public:
+ StmtPrinter(raw_ostream &os, ASTContext &C, PrinterHelper* helper,
+ const PrintingPolicy &Policy,
+ unsigned Indentation = 0)
+ : OS(os), Context(C), IndentLevel(Indentation), Helper(helper),
+ Policy(Policy) {}
+
+ void PrintStmt(Stmt *S) {
+ PrintStmt(S, Policy.Indentation);
+ }
+
+ void PrintStmt(Stmt *S, int SubIndent) {
+ IndentLevel += SubIndent;
+ if (S && isa<Expr>(S)) {
+ // If this is an expr used in a stmt context, indent and newline it.
+ Indent();
+ Visit(S);
+ OS << ";\n";
+ } else if (S) {
+ Visit(S);
+ } else {
+ Indent() << "<<<NULL STATEMENT>>>\n";
+ }
+ IndentLevel -= SubIndent;
+ }
+
+ void PrintRawCompoundStmt(CompoundStmt *S);
+ void PrintRawDecl(Decl *D);
+ void PrintRawDeclStmt(DeclStmt *S);
+ void PrintRawIfStmt(IfStmt *If);
+ void PrintRawCXXCatchStmt(CXXCatchStmt *Catch);
+ void PrintCallArgs(CallExpr *E);
+ void PrintRawSEHExceptHandler(SEHExceptStmt *S);
+ void PrintRawSEHFinallyStmt(SEHFinallyStmt *S);
+
+ void PrintExpr(Expr *E) {
+ if (E)
+ Visit(E);
+ else
+ OS << "<null expr>";
+ }
+
+ raw_ostream &Indent(int Delta = 0) {
+ for (int i = 0, e = IndentLevel+Delta; i < e; ++i)
+ OS << " ";
+ return OS;
+ }
+
+ void Visit(Stmt* S) {
+ if (Helper && Helper->handledStmt(S,OS))
+ return;
+ else StmtVisitor<StmtPrinter>::Visit(S);
+ }
+
+ void VisitStmt(Stmt *Node) LLVM_ATTRIBUTE_UNUSED {
+ Indent() << "<<unknown stmt type>>\n";
+ }
+ void VisitExpr(Expr *Node) LLVM_ATTRIBUTE_UNUSED {
+ OS << "<<unknown expr type>>";
+ }
+ void VisitCXXNamedCastExpr(CXXNamedCastExpr *Node);
+
+#define ABSTRACT_STMT(CLASS)
+#define STMT(CLASS, PARENT) \
+ void Visit##CLASS(CLASS *Node);
+#include "clang/AST/StmtNodes.inc"
+ };
+}
+
+//===----------------------------------------------------------------------===//
+// Stmt printing methods.
+//===----------------------------------------------------------------------===//
+
+/// PrintRawCompoundStmt - Print a compound stmt without indenting the {, and
+/// with no newline after the }.
+void StmtPrinter::PrintRawCompoundStmt(CompoundStmt *Node) {
+ OS << "{\n";
+ for (CompoundStmt::body_iterator I = Node->body_begin(), E = Node->body_end();
+ I != E; ++I)
+ PrintStmt(*I);
+
+ Indent() << "}";
+}
+
+void StmtPrinter::PrintRawDecl(Decl *D) {
+ D->print(OS, Policy, IndentLevel);
+}
+
+void StmtPrinter::PrintRawDeclStmt(DeclStmt *S) {
+ DeclStmt::decl_iterator Begin = S->decl_begin(), End = S->decl_end();
+ SmallVector<Decl*, 2> Decls;
+ for ( ; Begin != End; ++Begin)
+ Decls.push_back(*Begin);
+
+ Decl::printGroup(Decls.data(), Decls.size(), OS, Policy, IndentLevel);
+}
+
+void StmtPrinter::VisitNullStmt(NullStmt *Node) {
+ Indent() << ";\n";
+}
+
+void StmtPrinter::VisitDeclStmt(DeclStmt *Node) {
+ Indent();
+ PrintRawDeclStmt(Node);
+ OS << ";\n";
+}
+
+void StmtPrinter::VisitCompoundStmt(CompoundStmt *Node) {
+ Indent();
+ PrintRawCompoundStmt(Node);
+ OS << "\n";
+}
+
+void StmtPrinter::VisitCaseStmt(CaseStmt *Node) {
+ Indent(-1) << "case ";
+ PrintExpr(Node->getLHS());
+ if (Node->getRHS()) {
+ OS << " ... ";
+ PrintExpr(Node->getRHS());
+ }
+ OS << ":\n";
+
+ PrintStmt(Node->getSubStmt(), 0);
+}
+
+void StmtPrinter::VisitDefaultStmt(DefaultStmt *Node) {
+ Indent(-1) << "default:\n";
+ PrintStmt(Node->getSubStmt(), 0);
+}
+
+void StmtPrinter::VisitLabelStmt(LabelStmt *Node) {
+ Indent(-1) << Node->getName() << ":\n";
+ PrintStmt(Node->getSubStmt(), 0);
+}
+
+void StmtPrinter::VisitAttributedStmt(AttributedStmt *Node) {
+ OS << "[[";
+ bool first = true;
+ for (AttrVec::const_iterator it = Node->getAttrs().begin(),
+ end = Node->getAttrs().end();
+ it != end; ++it) {
+ if (!first) {
+ OS << ", ";
+ first = false;
+ }
+ // TODO: check this
+ (*it)->printPretty(OS, Context);
+ }
+ OS << "]] ";
+ PrintStmt(Node->getSubStmt(), 0);
+}
+
+void StmtPrinter::PrintRawIfStmt(IfStmt *If) {
+ OS << "if (";
+ PrintExpr(If->getCond());
+ OS << ')';
+
+ if (CompoundStmt *CS = dyn_cast<CompoundStmt>(If->getThen())) {
+ OS << ' ';
+ PrintRawCompoundStmt(CS);
+ OS << (If->getElse() ? ' ' : '\n');
+ } else {
+ OS << '\n';
+ PrintStmt(If->getThen());
+ if (If->getElse()) Indent();
+ }
+
+ if (Stmt *Else = If->getElse()) {
+ OS << "else";
+
+ if (CompoundStmt *CS = dyn_cast<CompoundStmt>(Else)) {
+ OS << ' ';
+ PrintRawCompoundStmt(CS);
+ OS << '\n';
+ } else if (IfStmt *ElseIf = dyn_cast<IfStmt>(Else)) {
+ OS << ' ';
+ PrintRawIfStmt(ElseIf);
+ } else {
+ OS << '\n';
+ PrintStmt(If->getElse());
+ }
+ }
+}
+
+void StmtPrinter::VisitIfStmt(IfStmt *If) {
+ Indent();
+ PrintRawIfStmt(If);
+}
+
+void StmtPrinter::VisitSwitchStmt(SwitchStmt *Node) {
+ Indent() << "switch (";
+ PrintExpr(Node->getCond());
+ OS << ")";
+
+ // Pretty print compoundstmt bodies (very common).
+ if (CompoundStmt *CS = dyn_cast<CompoundStmt>(Node->getBody())) {
+ OS << " ";
+ PrintRawCompoundStmt(CS);
+ OS << "\n";
+ } else {
+ OS << "\n";
+ PrintStmt(Node->getBody());
+ }
+}
+
+void StmtPrinter::VisitWhileStmt(WhileStmt *Node) {
+ Indent() << "while (";
+ PrintExpr(Node->getCond());
+ OS << ")\n";
+ PrintStmt(Node->getBody());
+}
+
+void StmtPrinter::VisitDoStmt(DoStmt *Node) {
+ Indent() << "do ";
+ if (CompoundStmt *CS = dyn_cast<CompoundStmt>(Node->getBody())) {
+ PrintRawCompoundStmt(CS);
+ OS << " ";
+ } else {
+ OS << "\n";
+ PrintStmt(Node->getBody());
+ Indent();
+ }
+
+ OS << "while (";
+ PrintExpr(Node->getCond());
+ OS << ");\n";
+}
+
+void StmtPrinter::VisitForStmt(ForStmt *Node) {
+ Indent() << "for (";
+ if (Node->getInit()) {
+ if (DeclStmt *DS = dyn_cast<DeclStmt>(Node->getInit()))
+ PrintRawDeclStmt(DS);
+ else
+ PrintExpr(cast<Expr>(Node->getInit()));
+ }
+ OS << ";";
+ if (Node->getCond()) {
+ OS << " ";
+ PrintExpr(Node->getCond());
+ }
+ OS << ";";
+ if (Node->getInc()) {
+ OS << " ";
+ PrintExpr(Node->getInc());
+ }
+ OS << ") ";
+
+ if (CompoundStmt *CS = dyn_cast<CompoundStmt>(Node->getBody())) {
+ PrintRawCompoundStmt(CS);
+ OS << "\n";
+ } else {
+ OS << "\n";
+ PrintStmt(Node->getBody());
+ }
+}
+
+void StmtPrinter::VisitObjCForCollectionStmt(ObjCForCollectionStmt *Node) {
+ Indent() << "for (";
+ if (DeclStmt *DS = dyn_cast<DeclStmt>(Node->getElement()))
+ PrintRawDeclStmt(DS);
+ else
+ PrintExpr(cast<Expr>(Node->getElement()));
+ OS << " in ";
+ PrintExpr(Node->getCollection());
+ OS << ") ";
+
+ if (CompoundStmt *CS = dyn_cast<CompoundStmt>(Node->getBody())) {
+ PrintRawCompoundStmt(CS);
+ OS << "\n";
+ } else {
+ OS << "\n";
+ PrintStmt(Node->getBody());
+ }
+}
+
+void StmtPrinter::VisitCXXForRangeStmt(CXXForRangeStmt *Node) {
+ Indent() << "for (";
+ PrintingPolicy SubPolicy(Policy);
+ SubPolicy.SuppressInitializers = true;
+ Node->getLoopVariable()->print(OS, SubPolicy, IndentLevel);
+ OS << " : ";
+ PrintExpr(Node->getRangeInit());
+ OS << ") {\n";
+ PrintStmt(Node->getBody());
+ Indent() << "}\n";
+}
+
+void StmtPrinter::VisitMSDependentExistsStmt(MSDependentExistsStmt *Node) {
+ Indent();
+ if (Node->isIfExists())
+ OS << "__if_exists (";
+ else
+ OS << "__if_not_exists (";
+
+ if (NestedNameSpecifier *Qualifier
+ = Node->getQualifierLoc().getNestedNameSpecifier())
+ Qualifier->print(OS, Policy);
+
+ OS << Node->getNameInfo() << ") ";
+
+ PrintRawCompoundStmt(Node->getSubStmt());
+}
+
+void StmtPrinter::VisitGotoStmt(GotoStmt *Node) {
+ Indent() << "goto " << Node->getLabel()->getName() << ";\n";
+}
+
+void StmtPrinter::VisitIndirectGotoStmt(IndirectGotoStmt *Node) {
+ Indent() << "goto *";
+ PrintExpr(Node->getTarget());
+ OS << ";\n";
+}
+
+void StmtPrinter::VisitContinueStmt(ContinueStmt *Node) {
+ Indent() << "continue;\n";
+}
+
+void StmtPrinter::VisitBreakStmt(BreakStmt *Node) {
+ Indent() << "break;\n";
+}
+
+
+void StmtPrinter::VisitReturnStmt(ReturnStmt *Node) {
+ Indent() << "return";
+ if (Node->getRetValue()) {
+ OS << " ";
+ PrintExpr(Node->getRetValue());
+ }
+ OS << ";\n";
+}
+
+
+void StmtPrinter::VisitAsmStmt(AsmStmt *Node) {
+ Indent() << "asm ";
+
+ if (Node->isVolatile())
+ OS << "volatile ";
+
+ OS << "(";
+ VisitStringLiteral(Node->getAsmString());
+
+ // Outputs
+ if (Node->getNumOutputs() != 0 || Node->getNumInputs() != 0 ||
+ Node->getNumClobbers() != 0)
+ OS << " : ";
+
+ for (unsigned i = 0, e = Node->getNumOutputs(); i != e; ++i) {
+ if (i != 0)
+ OS << ", ";
+
+ if (!Node->getOutputName(i).empty()) {
+ OS << '[';
+ OS << Node->getOutputName(i);
+ OS << "] ";
+ }
+
+ VisitStringLiteral(Node->getOutputConstraintLiteral(i));
+ OS << " ";
+ Visit(Node->getOutputExpr(i));
+ }
+
+ // Inputs
+ if (Node->getNumInputs() != 0 || Node->getNumClobbers() != 0)
+ OS << " : ";
+
+ for (unsigned i = 0, e = Node->getNumInputs(); i != e; ++i) {
+ if (i != 0)
+ OS << ", ";
+
+ if (!Node->getInputName(i).empty()) {
+ OS << '[';
+ OS << Node->getInputName(i);
+ OS << "] ";
+ }
+
+ VisitStringLiteral(Node->getInputConstraintLiteral(i));
+ OS << " ";
+ Visit(Node->getInputExpr(i));
+ }
+
+ // Clobbers
+ if (Node->getNumClobbers() != 0)
+ OS << " : ";
+
+ for (unsigned i = 0, e = Node->getNumClobbers(); i != e; ++i) {
+ if (i != 0)
+ OS << ", ";
+
+ VisitStringLiteral(Node->getClobber(i));
+ }
+
+ OS << ");\n";
+}
+
+void StmtPrinter::VisitObjCAtTryStmt(ObjCAtTryStmt *Node) {
+ Indent() << "@try";
+ if (CompoundStmt *TS = dyn_cast<CompoundStmt>(Node->getTryBody())) {
+ PrintRawCompoundStmt(TS);
+ OS << "\n";
+ }
+
+ for (unsigned I = 0, N = Node->getNumCatchStmts(); I != N; ++I) {
+ ObjCAtCatchStmt *catchStmt = Node->getCatchStmt(I);
+ Indent() << "@catch(";
+ if (catchStmt->getCatchParamDecl()) {
+ if (Decl *DS = catchStmt->getCatchParamDecl())
+ PrintRawDecl(DS);
+ }
+ OS << ")";
+ if (CompoundStmt *CS = dyn_cast<CompoundStmt>(catchStmt->getCatchBody())) {
+ PrintRawCompoundStmt(CS);
+ OS << "\n";
+ }
+ }
+
+ if (ObjCAtFinallyStmt *FS = static_cast<ObjCAtFinallyStmt *>(
+ Node->getFinallyStmt())) {
+ Indent() << "@finally";
+ PrintRawCompoundStmt(dyn_cast<CompoundStmt>(FS->getFinallyBody()));
+ OS << "\n";
+ }
+}
+
+void StmtPrinter::VisitObjCAtFinallyStmt(ObjCAtFinallyStmt *Node) {
+}
+
+void StmtPrinter::VisitObjCAtCatchStmt (ObjCAtCatchStmt *Node) {
+ Indent() << "@catch (...) { /* todo */ } \n";
+}
+
+void StmtPrinter::VisitObjCAtThrowStmt(ObjCAtThrowStmt *Node) {
+ Indent() << "@throw";
+ if (Node->getThrowExpr()) {
+ OS << " ";
+ PrintExpr(Node->getThrowExpr());
+ }
+ OS << ";\n";
+}
+
+void StmtPrinter::VisitObjCAtSynchronizedStmt(ObjCAtSynchronizedStmt *Node) {
+ Indent() << "@synchronized (";
+ PrintExpr(Node->getSynchExpr());
+ OS << ")";
+ PrintRawCompoundStmt(Node->getSynchBody());
+ OS << "\n";
+}
+
+void StmtPrinter::VisitObjCAutoreleasePoolStmt(ObjCAutoreleasePoolStmt *Node) {
+ Indent() << "@autoreleasepool";
+ PrintRawCompoundStmt(dyn_cast<CompoundStmt>(Node->getSubStmt()));
+ OS << "\n";
+}
+
+void StmtPrinter::PrintRawCXXCatchStmt(CXXCatchStmt *Node) {
+ OS << "catch (";
+ if (Decl *ExDecl = Node->getExceptionDecl())
+ PrintRawDecl(ExDecl);
+ else
+ OS << "...";
+ OS << ") ";
+ PrintRawCompoundStmt(cast<CompoundStmt>(Node->getHandlerBlock()));
+}
+
+void StmtPrinter::VisitCXXCatchStmt(CXXCatchStmt *Node) {
+ Indent();
+ PrintRawCXXCatchStmt(Node);
+ OS << "\n";
+}
+
+void StmtPrinter::VisitCXXTryStmt(CXXTryStmt *Node) {
+ Indent() << "try ";
+ PrintRawCompoundStmt(Node->getTryBlock());
+ for (unsigned i = 0, e = Node->getNumHandlers(); i < e; ++i) {
+ OS << " ";
+ PrintRawCXXCatchStmt(Node->getHandler(i));
+ }
+ OS << "\n";
+}
+
+void StmtPrinter::VisitSEHTryStmt(SEHTryStmt *Node) {
+ Indent() << (Node->getIsCXXTry() ? "try " : "__try ");
+ PrintRawCompoundStmt(Node->getTryBlock());
+ SEHExceptStmt *E = Node->getExceptHandler();
+ SEHFinallyStmt *F = Node->getFinallyHandler();
+ if(E)
+ PrintRawSEHExceptHandler(E);
+ else {
+ assert(F && "Must have a finally block...");
+ PrintRawSEHFinallyStmt(F);
+ }
+ OS << "\n";
+}
+
+void StmtPrinter::PrintRawSEHFinallyStmt(SEHFinallyStmt *Node) {
+ OS << "__finally ";
+ PrintRawCompoundStmt(Node->getBlock());
+ OS << "\n";
+}
+
+void StmtPrinter::PrintRawSEHExceptHandler(SEHExceptStmt *Node) {
+ OS << "__except (";
+ VisitExpr(Node->getFilterExpr());
+ OS << ")\n";
+ PrintRawCompoundStmt(Node->getBlock());
+ OS << "\n";
+}
+
+void StmtPrinter::VisitSEHExceptStmt(SEHExceptStmt *Node) {
+ Indent();
+ PrintRawSEHExceptHandler(Node);
+ OS << "\n";
+}
+
+void StmtPrinter::VisitSEHFinallyStmt(SEHFinallyStmt *Node) {
+ Indent();
+ PrintRawSEHFinallyStmt(Node);
+ OS << "\n";
+}
+
+//===----------------------------------------------------------------------===//
+// Expr printing methods.
+//===----------------------------------------------------------------------===//
+
+void StmtPrinter::VisitDeclRefExpr(DeclRefExpr *Node) {
+ if (NestedNameSpecifier *Qualifier = Node->getQualifier())
+ Qualifier->print(OS, Policy);
+ if (Node->hasTemplateKeyword())
+ OS << "template ";
+ OS << Node->getNameInfo();
+ if (Node->hasExplicitTemplateArgs())
+ OS << TemplateSpecializationType::PrintTemplateArgumentList(
+ Node->getTemplateArgs(),
+ Node->getNumTemplateArgs(),
+ Policy);
+}
+
+void StmtPrinter::VisitDependentScopeDeclRefExpr(
+ DependentScopeDeclRefExpr *Node) {
+ if (NestedNameSpecifier *Qualifier = Node->getQualifier())
+ Qualifier->print(OS, Policy);
+ if (Node->hasTemplateKeyword())
+ OS << "template ";
+ OS << Node->getNameInfo();
+ if (Node->hasExplicitTemplateArgs())
+ OS << TemplateSpecializationType::PrintTemplateArgumentList(
+ Node->getTemplateArgs(),
+ Node->getNumTemplateArgs(),
+ Policy);
+}
+
+void StmtPrinter::VisitUnresolvedLookupExpr(UnresolvedLookupExpr *Node) {
+ if (Node->getQualifier())
+ Node->getQualifier()->print(OS, Policy);
+ if (Node->hasTemplateKeyword())
+ OS << "template ";
+ OS << Node->getNameInfo();
+ if (Node->hasExplicitTemplateArgs())
+ OS << TemplateSpecializationType::PrintTemplateArgumentList(
+ Node->getTemplateArgs(),
+ Node->getNumTemplateArgs(),
+ Policy);
+}
+
+void StmtPrinter::VisitObjCIvarRefExpr(ObjCIvarRefExpr *Node) {
+ if (Node->getBase()) {
+ PrintExpr(Node->getBase());
+ OS << (Node->isArrow() ? "->" : ".");
+ }
+ OS << *Node->getDecl();
+}
+
+void StmtPrinter::VisitObjCPropertyRefExpr(ObjCPropertyRefExpr *Node) {
+ if (Node->isSuperReceiver())
+ OS << "super.";
+ else if (Node->getBase()) {
+ PrintExpr(Node->getBase());
+ OS << ".";
+ }
+
+ if (Node->isImplicitProperty())
+ OS << Node->getImplicitPropertyGetter()->getSelector().getAsString();
+ else
+ OS << Node->getExplicitProperty()->getName();
+}
+
+void StmtPrinter::VisitObjCSubscriptRefExpr(ObjCSubscriptRefExpr *Node) {
+
+ PrintExpr(Node->getBaseExpr());
+ OS << "[";
+ PrintExpr(Node->getKeyExpr());
+ OS << "]";
+}
+
+void StmtPrinter::VisitPredefinedExpr(PredefinedExpr *Node) {
+ switch (Node->getIdentType()) {
+ default:
+ llvm_unreachable("unknown case");
+ case PredefinedExpr::Func:
+ OS << "__func__";
+ break;
+ case PredefinedExpr::Function:
+ OS << "__FUNCTION__";
+ break;
+ case PredefinedExpr::PrettyFunction:
+ OS << "__PRETTY_FUNCTION__";
+ break;
+ }
+}
+
+void StmtPrinter::VisitCharacterLiteral(CharacterLiteral *Node) {
+ unsigned value = Node->getValue();
+
+ switch (Node->getKind()) {
+ case CharacterLiteral::Ascii: break; // no prefix.
+ case CharacterLiteral::Wide: OS << 'L'; break;
+ case CharacterLiteral::UTF16: OS << 'u'; break;
+ case CharacterLiteral::UTF32: OS << 'U'; break;
+ }
+
+ switch (value) {
+ case '\\':
+ OS << "'\\\\'";
+ break;
+ case '\'':
+ OS << "'\\''";
+ break;
+ case '\a':
+ // TODO: K&R: the meaning of '\\a' is different in traditional C
+ OS << "'\\a'";
+ break;
+ case '\b':
+ OS << "'\\b'";
+ break;
+ // Nonstandard escape sequence.
+ /*case '\e':
+ OS << "'\\e'";
+ break;*/
+ case '\f':
+ OS << "'\\f'";
+ break;
+ case '\n':
+ OS << "'\\n'";
+ break;
+ case '\r':
+ OS << "'\\r'";
+ break;
+ case '\t':
+ OS << "'\\t'";
+ break;
+ case '\v':
+ OS << "'\\v'";
+ break;
+ default:
+ if (value < 256 && isprint(value)) {
+ OS << "'" << (char)value << "'";
+ } else if (value < 256) {
+ OS << "'\\x";
+ OS.write_hex(value) << "'";
+ } else {
+ // FIXME what to really do here?
+ OS << value;
+ }
+ }
+}
+
+void StmtPrinter::VisitIntegerLiteral(IntegerLiteral *Node) {
+ bool isSigned = Node->getType()->isSignedIntegerType();
+ OS << Node->getValue().toString(10, isSigned);
+
+ // Emit suffixes. Integer literals are always a builtin integer type.
+ switch (Node->getType()->getAs<BuiltinType>()->getKind()) {
+ default: llvm_unreachable("Unexpected type for integer literal!");
+ // FIXME: The Short and UShort cases are to handle cases where a short
+ // integeral literal is formed during template instantiation. They should
+ // be removed when template instantiation no longer needs integer literals.
+ case BuiltinType::Short:
+ case BuiltinType::UShort:
+ case BuiltinType::Int: break; // no suffix.
+ case BuiltinType::UInt: OS << 'U'; break;
+ case BuiltinType::Long: OS << 'L'; break;
+ case BuiltinType::ULong: OS << "UL"; break;
+ case BuiltinType::LongLong: OS << "LL"; break;
+ case BuiltinType::ULongLong: OS << "ULL"; break;
+ case BuiltinType::Int128: OS << "i128"; break;
+ case BuiltinType::UInt128: OS << "Ui128"; break;
+ }
+}
+void StmtPrinter::VisitFloatingLiteral(FloatingLiteral *Node) {
+ SmallString<16> Str;
+ Node->getValue().toString(Str);
+ OS << Str;
+}
+
+void StmtPrinter::VisitImaginaryLiteral(ImaginaryLiteral *Node) {
+ PrintExpr(Node->getSubExpr());
+ OS << "i";
+}
+
+void StmtPrinter::VisitStringLiteral(StringLiteral *Str) {
+ switch (Str->getKind()) {
+ case StringLiteral::Ascii: break; // no prefix.
+ case StringLiteral::Wide: OS << 'L'; break;
+ case StringLiteral::UTF8: OS << "u8"; break;
+ case StringLiteral::UTF16: OS << 'u'; break;
+ case StringLiteral::UTF32: OS << 'U'; break;
+ }
+ OS << '"';
+ static char Hex[] = "0123456789ABCDEF";
+
+ unsigned LastSlashX = Str->getLength();
+ for (unsigned I = 0, N = Str->getLength(); I != N; ++I) {
+ switch (uint32_t Char = Str->getCodeUnit(I)) {
+ default:
+ // FIXME: Convert UTF-8 back to codepoints before rendering.
+
+ // Convert UTF-16 surrogate pairs back to codepoints before rendering.
+ // Leave invalid surrogates alone; we'll use \x for those.
+ if (Str->getKind() == StringLiteral::UTF16 && I != N - 1 &&
+ Char >= 0xd800 && Char <= 0xdbff) {
+ uint32_t Trail = Str->getCodeUnit(I + 1);
+ if (Trail >= 0xdc00 && Trail <= 0xdfff) {
+ Char = 0x10000 + ((Char - 0xd800) << 10) + (Trail - 0xdc00);
+ ++I;
+ }
+ }
+
+ if (Char > 0xff) {
+ // If this is a wide string, output characters over 0xff using \x
+ // escapes. Otherwise, this is a UTF-16 or UTF-32 string, and Char is a
+ // codepoint: use \x escapes for invalid codepoints.
+ if (Str->getKind() == StringLiteral::Wide ||
+ (Char >= 0xd800 && Char <= 0xdfff) || Char >= 0x110000) {
+ // FIXME: Is this the best way to print wchar_t?
+ OS << "\\x";
+ int Shift = 28;
+ while ((Char >> Shift) == 0)
+ Shift -= 4;
+ for (/**/; Shift >= 0; Shift -= 4)
+ OS << Hex[(Char >> Shift) & 15];
+ LastSlashX = I;
+ break;
+ }
+
+ if (Char > 0xffff)
+ OS << "\\U00"
+ << Hex[(Char >> 20) & 15]
+ << Hex[(Char >> 16) & 15];
+ else
+ OS << "\\u";
+ OS << Hex[(Char >> 12) & 15]
+ << Hex[(Char >> 8) & 15]
+ << Hex[(Char >> 4) & 15]
+ << Hex[(Char >> 0) & 15];
+ break;
+ }
+
+ // If we used \x... for the previous character, and this character is a
+ // hexadecimal digit, prevent it being slurped as part of the \x.
+ if (LastSlashX + 1 == I) {
+ switch (Char) {
+ case '0': case '1': case '2': case '3': case '4':
+ case '5': case '6': case '7': case '8': case '9':
+ case 'a': case 'b': case 'c': case 'd': case 'e': case 'f':
+ case 'A': case 'B': case 'C': case 'D': case 'E': case 'F':
+ OS << "\"\"";
+ }
+ }
+
+ if (Char <= 0xff && isprint(Char))
+ OS << (char)Char;
+ else // Output anything hard as an octal escape.
+ OS << '\\'
+ << (char)('0' + ((Char >> 6) & 7))
+ << (char)('0' + ((Char >> 3) & 7))
+ << (char)('0' + ((Char >> 0) & 7));
+ break;
+ // Handle some common non-printable cases to make dumps prettier.
+ case '\\': OS << "\\\\"; break;
+ case '"': OS << "\\\""; break;
+ case '\n': OS << "\\n"; break;
+ case '\t': OS << "\\t"; break;
+ case '\a': OS << "\\a"; break;
+ case '\b': OS << "\\b"; break;
+ }
+ }
+ OS << '"';
+}
+void StmtPrinter::VisitParenExpr(ParenExpr *Node) {
+ OS << "(";
+ PrintExpr(Node->getSubExpr());
+ OS << ")";
+}
+void StmtPrinter::VisitUnaryOperator(UnaryOperator *Node) {
+ if (!Node->isPostfix()) {
+ OS << UnaryOperator::getOpcodeStr(Node->getOpcode());
+
+ // Print a space if this is an "identifier operator" like __real, or if
+ // it might be concatenated incorrectly like '+'.
+ switch (Node->getOpcode()) {
+ default: break;
+ case UO_Real:
+ case UO_Imag:
+ case UO_Extension:
+ OS << ' ';
+ break;
+ case UO_Plus:
+ case UO_Minus:
+ if (isa<UnaryOperator>(Node->getSubExpr()))
+ OS << ' ';
+ break;
+ }
+ }
+ PrintExpr(Node->getSubExpr());
+
+ if (Node->isPostfix())
+ OS << UnaryOperator::getOpcodeStr(Node->getOpcode());
+}
+
+void StmtPrinter::VisitOffsetOfExpr(OffsetOfExpr *Node) {
+ OS << "__builtin_offsetof(";
+ OS << Node->getTypeSourceInfo()->getType().getAsString(Policy) << ", ";
+ bool PrintedSomething = false;
+ for (unsigned i = 0, n = Node->getNumComponents(); i < n; ++i) {
+ OffsetOfExpr::OffsetOfNode ON = Node->getComponent(i);
+ if (ON.getKind() == OffsetOfExpr::OffsetOfNode::Array) {
+ // Array node
+ OS << "[";
+ PrintExpr(Node->getIndexExpr(ON.getArrayExprIndex()));
+ OS << "]";
+ PrintedSomething = true;
+ continue;
+ }
+
+ // Skip implicit base indirections.
+ if (ON.getKind() == OffsetOfExpr::OffsetOfNode::Base)
+ continue;
+
+ // Field or identifier node.
+ IdentifierInfo *Id = ON.getFieldName();
+ if (!Id)
+ continue;
+
+ if (PrintedSomething)
+ OS << ".";
+ else
+ PrintedSomething = true;
+ OS << Id->getName();
+ }
+ OS << ")";
+}
+
+void StmtPrinter::VisitUnaryExprOrTypeTraitExpr(UnaryExprOrTypeTraitExpr *Node){
+ switch(Node->getKind()) {
+ case UETT_SizeOf:
+ OS << "sizeof";
+ break;
+ case UETT_AlignOf:
+ OS << "__alignof";
+ break;
+ case UETT_VecStep:
+ OS << "vec_step";
+ break;
+ }
+ if (Node->isArgumentType())
+ OS << "(" << Node->getArgumentType().getAsString(Policy) << ")";
+ else {
+ OS << " ";
+ PrintExpr(Node->getArgumentExpr());
+ }
+}
+
+void StmtPrinter::VisitGenericSelectionExpr(GenericSelectionExpr *Node) {
+ OS << "_Generic(";
+ PrintExpr(Node->getControllingExpr());
+ for (unsigned i = 0; i != Node->getNumAssocs(); ++i) {
+ OS << ", ";
+ QualType T = Node->getAssocType(i);
+ if (T.isNull())
+ OS << "default";
+ else
+ OS << T.getAsString(Policy);
+ OS << ": ";
+ PrintExpr(Node->getAssocExpr(i));
+ }
+ OS << ")";
+}
+
+void StmtPrinter::VisitArraySubscriptExpr(ArraySubscriptExpr *Node) {
+ PrintExpr(Node->getLHS());
+ OS << "[";
+ PrintExpr(Node->getRHS());
+ OS << "]";
+}
+
+void StmtPrinter::PrintCallArgs(CallExpr *Call) {
+ for (unsigned i = 0, e = Call->getNumArgs(); i != e; ++i) {
+ if (isa<CXXDefaultArgExpr>(Call->getArg(i))) {
+ // Don't print any defaulted arguments
+ break;
+ }
+
+ if (i) OS << ", ";
+ PrintExpr(Call->getArg(i));
+ }
+}
+
+void StmtPrinter::VisitCallExpr(CallExpr *Call) {
+ PrintExpr(Call->getCallee());
+ OS << "(";
+ PrintCallArgs(Call);
+ OS << ")";
+}
+void StmtPrinter::VisitMemberExpr(MemberExpr *Node) {
+ // FIXME: Suppress printing implicit bases (like "this")
+ PrintExpr(Node->getBase());
+ if (FieldDecl *FD = dyn_cast<FieldDecl>(Node->getMemberDecl()))
+ if (FD->isAnonymousStructOrUnion())
+ return;
+ OS << (Node->isArrow() ? "->" : ".");
+ if (NestedNameSpecifier *Qualifier = Node->getQualifier())
+ Qualifier->print(OS, Policy);
+ if (Node->hasTemplateKeyword())
+ OS << "template ";
+ OS << Node->getMemberNameInfo();
+ if (Node->hasExplicitTemplateArgs())
+ OS << TemplateSpecializationType::PrintTemplateArgumentList(
+ Node->getTemplateArgs(),
+ Node->getNumTemplateArgs(),
+ Policy);
+}
+void StmtPrinter::VisitObjCIsaExpr(ObjCIsaExpr *Node) {
+ PrintExpr(Node->getBase());
+ OS << (Node->isArrow() ? "->isa" : ".isa");
+}
+
+void StmtPrinter::VisitExtVectorElementExpr(ExtVectorElementExpr *Node) {
+ PrintExpr(Node->getBase());
+ OS << ".";
+ OS << Node->getAccessor().getName();
+}
+void StmtPrinter::VisitCStyleCastExpr(CStyleCastExpr *Node) {
+ OS << "(" << Node->getType().getAsString(Policy) << ")";
+ PrintExpr(Node->getSubExpr());
+}
+void StmtPrinter::VisitCompoundLiteralExpr(CompoundLiteralExpr *Node) {
+ OS << "(" << Node->getType().getAsString(Policy) << ")";
+ PrintExpr(Node->getInitializer());
+}
+void StmtPrinter::VisitImplicitCastExpr(ImplicitCastExpr *Node) {
+ // No need to print anything, simply forward to the sub expression.
+ PrintExpr(Node->getSubExpr());
+}
+void StmtPrinter::VisitBinaryOperator(BinaryOperator *Node) {
+ PrintExpr(Node->getLHS());
+ OS << " " << BinaryOperator::getOpcodeStr(Node->getOpcode()) << " ";
+ PrintExpr(Node->getRHS());
+}
+void StmtPrinter::VisitCompoundAssignOperator(CompoundAssignOperator *Node) {
+ PrintExpr(Node->getLHS());
+ OS << " " << BinaryOperator::getOpcodeStr(Node->getOpcode()) << " ";
+ PrintExpr(Node->getRHS());
+}
+void StmtPrinter::VisitConditionalOperator(ConditionalOperator *Node) {
+ PrintExpr(Node->getCond());
+ OS << " ? ";
+ PrintExpr(Node->getLHS());
+ OS << " : ";
+ PrintExpr(Node->getRHS());
+}
+
+// GNU extensions.
+
+void
+StmtPrinter::VisitBinaryConditionalOperator(BinaryConditionalOperator *Node) {
+ PrintExpr(Node->getCommon());
+ OS << " ?: ";
+ PrintExpr(Node->getFalseExpr());
+}
+void StmtPrinter::VisitAddrLabelExpr(AddrLabelExpr *Node) {
+ OS << "&&" << Node->getLabel()->getName();
+}
+
+void StmtPrinter::VisitStmtExpr(StmtExpr *E) {
+ OS << "(";
+ PrintRawCompoundStmt(E->getSubStmt());
+ OS << ")";
+}
+
+void StmtPrinter::VisitChooseExpr(ChooseExpr *Node) {
+ OS << "__builtin_choose_expr(";
+ PrintExpr(Node->getCond());
+ OS << ", ";
+ PrintExpr(Node->getLHS());
+ OS << ", ";
+ PrintExpr(Node->getRHS());
+ OS << ")";
+}
+
+void StmtPrinter::VisitGNUNullExpr(GNUNullExpr *) {
+ OS << "__null";
+}
+
+void StmtPrinter::VisitShuffleVectorExpr(ShuffleVectorExpr *Node) {
+ OS << "__builtin_shufflevector(";
+ for (unsigned i = 0, e = Node->getNumSubExprs(); i != e; ++i) {
+ if (i) OS << ", ";
+ PrintExpr(Node->getExpr(i));
+ }
+ OS << ")";
+}
+
+void StmtPrinter::VisitInitListExpr(InitListExpr* Node) {
+ if (Node->getSyntacticForm()) {
+ Visit(Node->getSyntacticForm());
+ return;
+ }
+
+ OS << "{ ";
+ for (unsigned i = 0, e = Node->getNumInits(); i != e; ++i) {
+ if (i) OS << ", ";
+ if (Node->getInit(i))
+ PrintExpr(Node->getInit(i));
+ else
+ OS << "0";
+ }
+ OS << " }";
+}
+
+void StmtPrinter::VisitParenListExpr(ParenListExpr* Node) {
+ OS << "( ";
+ for (unsigned i = 0, e = Node->getNumExprs(); i != e; ++i) {
+ if (i) OS << ", ";
+ PrintExpr(Node->getExpr(i));
+ }
+ OS << " )";
+}
+
+void StmtPrinter::VisitDesignatedInitExpr(DesignatedInitExpr *Node) {
+ for (DesignatedInitExpr::designators_iterator D = Node->designators_begin(),
+ DEnd = Node->designators_end();
+ D != DEnd; ++D) {
+ if (D->isFieldDesignator()) {
+ if (D->getDotLoc().isInvalid())
+ OS << D->getFieldName()->getName() << ":";
+ else
+ OS << "." << D->getFieldName()->getName();
+ } else {
+ OS << "[";
+ if (D->isArrayDesignator()) {
+ PrintExpr(Node->getArrayIndex(*D));
+ } else {
+ PrintExpr(Node->getArrayRangeStart(*D));
+ OS << " ... ";
+ PrintExpr(Node->getArrayRangeEnd(*D));
+ }
+ OS << "]";
+ }
+ }
+
+ OS << " = ";
+ PrintExpr(Node->getInit());
+}
+
+void StmtPrinter::VisitImplicitValueInitExpr(ImplicitValueInitExpr *Node) {
+ if (Policy.LangOpts.CPlusPlus)
+ OS << "/*implicit*/" << Node->getType().getAsString(Policy) << "()";
+ else {
+ OS << "/*implicit*/(" << Node->getType().getAsString(Policy) << ")";
+ if (Node->getType()->isRecordType())
+ OS << "{}";
+ else
+ OS << 0;
+ }
+}
+
+void StmtPrinter::VisitVAArgExpr(VAArgExpr *Node) {
+ OS << "__builtin_va_arg(";
+ PrintExpr(Node->getSubExpr());
+ OS << ", ";
+ OS << Node->getType().getAsString(Policy);
+ OS << ")";
+}
+
+void StmtPrinter::VisitPseudoObjectExpr(PseudoObjectExpr *Node) {
+ PrintExpr(Node->getSyntacticForm());
+}
+
+void StmtPrinter::VisitAtomicExpr(AtomicExpr *Node) {
+ const char *Name = 0;
+ switch (Node->getOp()) {
+#define BUILTIN(ID, TYPE, ATTRS)
+#define ATOMIC_BUILTIN(ID, TYPE, ATTRS) \
+ case AtomicExpr::AO ## ID: \
+ Name = #ID "("; \
+ break;
+#include "clang/Basic/Builtins.def"
+ }
+ OS << Name;
+
+ // AtomicExpr stores its subexpressions in a permuted order.
+ PrintExpr(Node->getPtr());
+ OS << ", ";
+ if (Node->getOp() != AtomicExpr::AO__c11_atomic_load &&
+ Node->getOp() != AtomicExpr::AO__atomic_load_n) {
+ PrintExpr(Node->getVal1());
+ OS << ", ";
+ }
+ if (Node->getOp() == AtomicExpr::AO__atomic_exchange ||
+ Node->isCmpXChg()) {
+ PrintExpr(Node->getVal2());
+ OS << ", ";
+ }
+ if (Node->getOp() == AtomicExpr::AO__atomic_compare_exchange ||
+ Node->getOp() == AtomicExpr::AO__atomic_compare_exchange_n) {
+ PrintExpr(Node->getWeak());
+ OS << ", ";
+ }
+ if (Node->getOp() != AtomicExpr::AO__c11_atomic_init)
+ PrintExpr(Node->getOrder());
+ if (Node->isCmpXChg()) {
+ OS << ", ";
+ PrintExpr(Node->getOrderFail());
+ }
+ OS << ")";
+}
+
+// C++
+void StmtPrinter::VisitCXXOperatorCallExpr(CXXOperatorCallExpr *Node) {
+ const char *OpStrings[NUM_OVERLOADED_OPERATORS] = {
+ "",
+#define OVERLOADED_OPERATOR(Name,Spelling,Token,Unary,Binary,MemberOnly) \
+ Spelling,
+#include "clang/Basic/OperatorKinds.def"
+ };
+
+ OverloadedOperatorKind Kind = Node->getOperator();
+ if (Kind == OO_PlusPlus || Kind == OO_MinusMinus) {
+ if (Node->getNumArgs() == 1) {
+ OS << OpStrings[Kind] << ' ';
+ PrintExpr(Node->getArg(0));
+ } else {
+ PrintExpr(Node->getArg(0));
+ OS << ' ' << OpStrings[Kind];
+ }
+ } else if (Kind == OO_Call) {
+ PrintExpr(Node->getArg(0));
+ OS << '(';
+ for (unsigned ArgIdx = 1; ArgIdx < Node->getNumArgs(); ++ArgIdx) {
+ if (ArgIdx > 1)
+ OS << ", ";
+ if (!isa<CXXDefaultArgExpr>(Node->getArg(ArgIdx)))
+ PrintExpr(Node->getArg(ArgIdx));
+ }
+ OS << ')';
+ } else if (Kind == OO_Subscript) {
+ PrintExpr(Node->getArg(0));
+ OS << '[';
+ PrintExpr(Node->getArg(1));
+ OS << ']';
+ } else if (Node->getNumArgs() == 1) {
+ OS << OpStrings[Kind] << ' ';
+ PrintExpr(Node->getArg(0));
+ } else if (Node->getNumArgs() == 2) {
+ PrintExpr(Node->getArg(0));
+ OS << ' ' << OpStrings[Kind] << ' ';
+ PrintExpr(Node->getArg(1));
+ } else {
+ llvm_unreachable("unknown overloaded operator");
+ }
+}
+
+void StmtPrinter::VisitCXXMemberCallExpr(CXXMemberCallExpr *Node) {
+ VisitCallExpr(cast<CallExpr>(Node));
+}
+
+void StmtPrinter::VisitCUDAKernelCallExpr(CUDAKernelCallExpr *Node) {
+ PrintExpr(Node->getCallee());
+ OS << "<<<";
+ PrintCallArgs(Node->getConfig());
+ OS << ">>>(";
+ PrintCallArgs(Node);
+ OS << ")";
+}
+
+void StmtPrinter::VisitCXXNamedCastExpr(CXXNamedCastExpr *Node) {
+ OS << Node->getCastName() << '<';
+ OS << Node->getTypeAsWritten().getAsString(Policy) << ">(";
+ PrintExpr(Node->getSubExpr());
+ OS << ")";
+}
+
+void StmtPrinter::VisitCXXStaticCastExpr(CXXStaticCastExpr *Node) {
+ VisitCXXNamedCastExpr(Node);
+}
+
+void StmtPrinter::VisitCXXDynamicCastExpr(CXXDynamicCastExpr *Node) {
+ VisitCXXNamedCastExpr(Node);
+}
+
+void StmtPrinter::VisitCXXReinterpretCastExpr(CXXReinterpretCastExpr *Node) {
+ VisitCXXNamedCastExpr(Node);
+}
+
+void StmtPrinter::VisitCXXConstCastExpr(CXXConstCastExpr *Node) {
+ VisitCXXNamedCastExpr(Node);
+}
+
+void StmtPrinter::VisitCXXTypeidExpr(CXXTypeidExpr *Node) {
+ OS << "typeid(";
+ if (Node->isTypeOperand()) {
+ OS << Node->getTypeOperand().getAsString(Policy);
+ } else {
+ PrintExpr(Node->getExprOperand());
+ }
+ OS << ")";
+}
+
+void StmtPrinter::VisitCXXUuidofExpr(CXXUuidofExpr *Node) {
+ OS << "__uuidof(";
+ if (Node->isTypeOperand()) {
+ OS << Node->getTypeOperand().getAsString(Policy);
+ } else {
+ PrintExpr(Node->getExprOperand());
+ }
+ OS << ")";
+}
+
+void StmtPrinter::VisitUserDefinedLiteral(UserDefinedLiteral *Node) {
+ switch (Node->getLiteralOperatorKind()) {
+ case UserDefinedLiteral::LOK_Raw:
+ OS << cast<StringLiteral>(Node->getArg(0)->IgnoreImpCasts())->getString();
+ break;
+ case UserDefinedLiteral::LOK_Template: {
+ DeclRefExpr *DRE = cast<DeclRefExpr>(Node->getCallee()->IgnoreImpCasts());
+ const TemplateArgumentList *Args =
+ cast<FunctionDecl>(DRE->getDecl())->getTemplateSpecializationArgs();
+ assert(Args);
+ const TemplateArgument &Pack = Args->get(0);
+ for (TemplateArgument::pack_iterator I = Pack.pack_begin(),
+ E = Pack.pack_end(); I != E; ++I) {
+ char C = (char)I->getAsIntegral()->getZExtValue();
+ OS << C;
+ }
+ break;
+ }
+ case UserDefinedLiteral::LOK_Integer: {
+ // Print integer literal without suffix.
+ IntegerLiteral *Int = cast<IntegerLiteral>(Node->getCookedLiteral());
+ OS << Int->getValue().toString(10, /*isSigned*/false);
+ break;
+ }
+ case UserDefinedLiteral::LOK_Floating:
+ case UserDefinedLiteral::LOK_String:
+ case UserDefinedLiteral::LOK_Character:
+ PrintExpr(Node->getCookedLiteral());
+ break;
+ }
+ OS << Node->getUDSuffix()->getName();
+}
+
+void StmtPrinter::VisitCXXBoolLiteralExpr(CXXBoolLiteralExpr *Node) {
+ OS << (Node->getValue() ? "true" : "false");
+}
+
+void StmtPrinter::VisitCXXNullPtrLiteralExpr(CXXNullPtrLiteralExpr *Node) {
+ OS << "nullptr";
+}
+
+void StmtPrinter::VisitCXXThisExpr(CXXThisExpr *Node) {
+ OS << "this";
+}
+
+void StmtPrinter::VisitCXXThrowExpr(CXXThrowExpr *Node) {
+ if (Node->getSubExpr() == 0)
+ OS << "throw";
+ else {
+ OS << "throw ";
+ PrintExpr(Node->getSubExpr());
+ }
+}
+
+void StmtPrinter::VisitCXXDefaultArgExpr(CXXDefaultArgExpr *Node) {
+ // Nothing to print: we picked up the default argument
+}
+
+void StmtPrinter::VisitCXXFunctionalCastExpr(CXXFunctionalCastExpr *Node) {
+ OS << Node->getType().getAsString(Policy);
+ OS << "(";
+ PrintExpr(Node->getSubExpr());
+ OS << ")";
+}
+
+void StmtPrinter::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *Node) {
+ PrintExpr(Node->getSubExpr());
+}
+
+void StmtPrinter::VisitCXXTemporaryObjectExpr(CXXTemporaryObjectExpr *Node) {
+ OS << Node->getType().getAsString(Policy);
+ OS << "(";
+ for (CXXTemporaryObjectExpr::arg_iterator Arg = Node->arg_begin(),
+ ArgEnd = Node->arg_end();
+ Arg != ArgEnd; ++Arg) {
+ if (Arg != Node->arg_begin())
+ OS << ", ";
+ PrintExpr(*Arg);
+ }
+ OS << ")";
+}
+
+void StmtPrinter::VisitLambdaExpr(LambdaExpr *Node) {
+ OS << '[';
+ bool NeedComma = false;
+ switch (Node->getCaptureDefault()) {
+ case LCD_None:
+ break;
+
+ case LCD_ByCopy:
+ OS << '=';
+ NeedComma = true;
+ break;
+
+ case LCD_ByRef:
+ OS << '&';
+ NeedComma = true;
+ break;
+ }
+ for (LambdaExpr::capture_iterator C = Node->explicit_capture_begin(),
+ CEnd = Node->explicit_capture_end();
+ C != CEnd;
+ ++C) {
+ if (NeedComma)
+ OS << ", ";
+ NeedComma = true;
+
+ switch (C->getCaptureKind()) {
+ case LCK_This:
+ OS << "this";
+ break;
+
+ case LCK_ByRef:
+ if (Node->getCaptureDefault() != LCD_ByRef)
+ OS << '&';
+ OS << C->getCapturedVar()->getName();
+ break;
+
+ case LCK_ByCopy:
+ if (Node->getCaptureDefault() != LCD_ByCopy)
+ OS << '=';
+ OS << C->getCapturedVar()->getName();
+ break;
+ }
+ }
+ OS << ']';
+
+ if (Node->hasExplicitParameters()) {
+ OS << " (";
+ CXXMethodDecl *Method = Node->getCallOperator();
+ NeedComma = false;
+ for (CXXMethodDecl::param_iterator P = Method->param_begin(),
+ PEnd = Method->param_end();
+ P != PEnd; ++P) {
+ if (NeedComma) {
+ OS << ", ";
+ } else {
+ NeedComma = true;
+ }
+ std::string ParamStr = (*P)->getNameAsString();
+ (*P)->getOriginalType().getAsStringInternal(ParamStr, Policy);
+ OS << ParamStr;
+ }
+ if (Method->isVariadic()) {
+ if (NeedComma)
+ OS << ", ";
+ OS << "...";
+ }
+ OS << ')';
+
+ if (Node->isMutable())
+ OS << " mutable";
+
+ const FunctionProtoType *Proto
+ = Method->getType()->getAs<FunctionProtoType>();
+ {
+ std::string ExceptionSpec;
+ Proto->printExceptionSpecification(ExceptionSpec, Policy);
+ OS << ExceptionSpec;
+ }
+
+ // FIXME: Attributes
+
+ // Print the trailing return type if it was specified in the source.
+ if (Node->hasExplicitResultType())
+ OS << " -> " << Proto->getResultType().getAsString(Policy);
+ }
+
+ // Print the body.
+ CompoundStmt *Body = Node->getBody();
+ OS << ' ';
+ PrintStmt(Body);
+}
+
+void StmtPrinter::VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *Node) {
+ if (TypeSourceInfo *TSInfo = Node->getTypeSourceInfo())
+ OS << TSInfo->getType().getAsString(Policy) << "()";
+ else
+ OS << Node->getType().getAsString(Policy) << "()";
+}
+
+void StmtPrinter::VisitCXXNewExpr(CXXNewExpr *E) {
+ if (E->isGlobalNew())
+ OS << "::";
+ OS << "new ";
+ unsigned NumPlace = E->getNumPlacementArgs();
+ if (NumPlace > 0) {
+ OS << "(";
+ PrintExpr(E->getPlacementArg(0));
+ for (unsigned i = 1; i < NumPlace; ++i) {
+ OS << ", ";
+ PrintExpr(E->getPlacementArg(i));
+ }
+ OS << ") ";
+ }
+ if (E->isParenTypeId())
+ OS << "(";
+ std::string TypeS;
+ if (Expr *Size = E->getArraySize()) {
+ llvm::raw_string_ostream s(TypeS);
+ Size->printPretty(s, Context, Helper, Policy);
+ s.flush();
+ TypeS = "[" + TypeS + "]";
+ }
+ E->getAllocatedType().getAsStringInternal(TypeS, Policy);
+ OS << TypeS;
+ if (E->isParenTypeId())
+ OS << ")";
+
+ CXXNewExpr::InitializationStyle InitStyle = E->getInitializationStyle();
+ if (InitStyle) {
+ if (InitStyle == CXXNewExpr::CallInit)
+ OS << "(";
+ PrintExpr(E->getInitializer());
+ if (InitStyle == CXXNewExpr::CallInit)
+ OS << ")";
+ }
+}
+
+void StmtPrinter::VisitCXXDeleteExpr(CXXDeleteExpr *E) {
+ if (E->isGlobalDelete())
+ OS << "::";
+ OS << "delete ";
+ if (E->isArrayForm())
+ OS << "[] ";
+ PrintExpr(E->getArgument());
+}
+
+void StmtPrinter::VisitCXXPseudoDestructorExpr(CXXPseudoDestructorExpr *E) {
+ PrintExpr(E->getBase());
+ if (E->isArrow())
+ OS << "->";
+ else
+ OS << '.';
+ if (E->getQualifier())
+ E->getQualifier()->print(OS, Policy);
+
+ std::string TypeS;
+ if (IdentifierInfo *II = E->getDestroyedTypeIdentifier())
+ OS << II->getName();
+ else
+ E->getDestroyedType().getAsStringInternal(TypeS, Policy);
+ OS << TypeS;
+}
+
+void StmtPrinter::VisitCXXConstructExpr(CXXConstructExpr *E) {
+ for (unsigned i = 0, e = E->getNumArgs(); i != e; ++i) {
+ if (isa<CXXDefaultArgExpr>(E->getArg(i))) {
+ // Don't print any defaulted arguments
+ break;
+ }
+
+ if (i) OS << ", ";
+ PrintExpr(E->getArg(i));
+ }
+}
+
+void StmtPrinter::VisitExprWithCleanups(ExprWithCleanups *E) {
+ // Just forward to the sub expression.
+ PrintExpr(E->getSubExpr());
+}
+
+void
+StmtPrinter::VisitCXXUnresolvedConstructExpr(
+ CXXUnresolvedConstructExpr *Node) {
+ OS << Node->getTypeAsWritten().getAsString(Policy);
+ OS << "(";
+ for (CXXUnresolvedConstructExpr::arg_iterator Arg = Node->arg_begin(),
+ ArgEnd = Node->arg_end();
+ Arg != ArgEnd; ++Arg) {
+ if (Arg != Node->arg_begin())
+ OS << ", ";
+ PrintExpr(*Arg);
+ }
+ OS << ")";
+}
+
+void StmtPrinter::VisitCXXDependentScopeMemberExpr(
+ CXXDependentScopeMemberExpr *Node) {
+ if (!Node->isImplicitAccess()) {
+ PrintExpr(Node->getBase());
+ OS << (Node->isArrow() ? "->" : ".");
+ }
+ if (NestedNameSpecifier *Qualifier = Node->getQualifier())
+ Qualifier->print(OS, Policy);
+ if (Node->hasTemplateKeyword())
+ OS << "template ";
+ OS << Node->getMemberNameInfo();
+ if (Node->hasExplicitTemplateArgs()) {
+ OS << TemplateSpecializationType::PrintTemplateArgumentList(
+ Node->getTemplateArgs(),
+ Node->getNumTemplateArgs(),
+ Policy);
+ }
+}
+
+void StmtPrinter::VisitUnresolvedMemberExpr(UnresolvedMemberExpr *Node) {
+ if (!Node->isImplicitAccess()) {
+ PrintExpr(Node->getBase());
+ OS << (Node->isArrow() ? "->" : ".");
+ }
+ if (NestedNameSpecifier *Qualifier = Node->getQualifier())
+ Qualifier->print(OS, Policy);
+ if (Node->hasTemplateKeyword())
+ OS << "template ";
+ OS << Node->getMemberNameInfo();
+ if (Node->hasExplicitTemplateArgs()) {
+ OS << TemplateSpecializationType::PrintTemplateArgumentList(
+ Node->getTemplateArgs(),
+ Node->getNumTemplateArgs(),
+ Policy);
+ }
+}
+
+static const char *getTypeTraitName(UnaryTypeTrait UTT) {
+ switch (UTT) {
+ case UTT_HasNothrowAssign: return "__has_nothrow_assign";
+ case UTT_HasNothrowConstructor: return "__has_nothrow_constructor";
+ case UTT_HasNothrowCopy: return "__has_nothrow_copy";
+ case UTT_HasTrivialAssign: return "__has_trivial_assign";
+ case UTT_HasTrivialDefaultConstructor: return "__has_trivial_constructor";
+ case UTT_HasTrivialCopy: return "__has_trivial_copy";
+ case UTT_HasTrivialDestructor: return "__has_trivial_destructor";
+ case UTT_HasVirtualDestructor: return "__has_virtual_destructor";
+ case UTT_IsAbstract: return "__is_abstract";
+ case UTT_IsArithmetic: return "__is_arithmetic";
+ case UTT_IsArray: return "__is_array";
+ case UTT_IsClass: return "__is_class";
+ case UTT_IsCompleteType: return "__is_complete_type";
+ case UTT_IsCompound: return "__is_compound";
+ case UTT_IsConst: return "__is_const";
+ case UTT_IsEmpty: return "__is_empty";
+ case UTT_IsEnum: return "__is_enum";
+ case UTT_IsFinal: return "__is_final";
+ case UTT_IsFloatingPoint: return "__is_floating_point";
+ case UTT_IsFunction: return "__is_function";
+ case UTT_IsFundamental: return "__is_fundamental";
+ case UTT_IsIntegral: return "__is_integral";
+ case UTT_IsLiteral: return "__is_literal";
+ case UTT_IsLvalueReference: return "__is_lvalue_reference";
+ case UTT_IsMemberFunctionPointer: return "__is_member_function_pointer";
+ case UTT_IsMemberObjectPointer: return "__is_member_object_pointer";
+ case UTT_IsMemberPointer: return "__is_member_pointer";
+ case UTT_IsObject: return "__is_object";
+ case UTT_IsPOD: return "__is_pod";
+ case UTT_IsPointer: return "__is_pointer";
+ case UTT_IsPolymorphic: return "__is_polymorphic";
+ case UTT_IsReference: return "__is_reference";
+ case UTT_IsRvalueReference: return "__is_rvalue_reference";
+ case UTT_IsScalar: return "__is_scalar";
+ case UTT_IsSigned: return "__is_signed";
+ case UTT_IsStandardLayout: return "__is_standard_layout";
+ case UTT_IsTrivial: return "__is_trivial";
+ case UTT_IsTriviallyCopyable: return "__is_trivially_copyable";
+ case UTT_IsUnion: return "__is_union";
+ case UTT_IsUnsigned: return "__is_unsigned";
+ case UTT_IsVoid: return "__is_void";
+ case UTT_IsVolatile: return "__is_volatile";
+ }
+ llvm_unreachable("Type trait not covered by switch statement");
+}
+
+static const char *getTypeTraitName(BinaryTypeTrait BTT) {
+ switch (BTT) {
+ case BTT_IsBaseOf: return "__is_base_of";
+ case BTT_IsConvertible: return "__is_convertible";
+ case BTT_IsSame: return "__is_same";
+ case BTT_TypeCompatible: return "__builtin_types_compatible_p";
+ case BTT_IsConvertibleTo: return "__is_convertible_to";
+ case BTT_IsTriviallyAssignable: return "__is_trivially_assignable";
+ }
+ llvm_unreachable("Binary type trait not covered by switch");
+}
+
+static const char *getTypeTraitName(TypeTrait TT) {
+ switch (TT) {
+ case clang::TT_IsTriviallyConstructible:return "__is_trivially_constructible";
+ }
+ llvm_unreachable("Type trait not covered by switch");
+}
+
+static const char *getTypeTraitName(ArrayTypeTrait ATT) {
+ switch (ATT) {
+ case ATT_ArrayRank: return "__array_rank";
+ case ATT_ArrayExtent: return "__array_extent";
+ }
+ llvm_unreachable("Array type trait not covered by switch");
+}
+
+static const char *getExpressionTraitName(ExpressionTrait ET) {
+ switch (ET) {
+ case ET_IsLValueExpr: return "__is_lvalue_expr";
+ case ET_IsRValueExpr: return "__is_rvalue_expr";
+ }
+ llvm_unreachable("Expression type trait not covered by switch");
+}
+
+void StmtPrinter::VisitUnaryTypeTraitExpr(UnaryTypeTraitExpr *E) {
+ OS << getTypeTraitName(E->getTrait()) << "("
+ << E->getQueriedType().getAsString(Policy) << ")";
+}
+
+void StmtPrinter::VisitBinaryTypeTraitExpr(BinaryTypeTraitExpr *E) {
+ OS << getTypeTraitName(E->getTrait()) << "("
+ << E->getLhsType().getAsString(Policy) << ","
+ << E->getRhsType().getAsString(Policy) << ")";
+}
+
+void StmtPrinter::VisitTypeTraitExpr(TypeTraitExpr *E) {
+ OS << getTypeTraitName(E->getTrait()) << "(";
+ for (unsigned I = 0, N = E->getNumArgs(); I != N; ++I) {
+ if (I > 0)
+ OS << ", ";
+ OS << E->getArg(I)->getType().getAsString(Policy);
+ }
+ OS << ")";
+}
+
+void StmtPrinter::VisitArrayTypeTraitExpr(ArrayTypeTraitExpr *E) {
+ OS << getTypeTraitName(E->getTrait()) << "("
+ << E->getQueriedType().getAsString(Policy) << ")";
+}
+
+void StmtPrinter::VisitExpressionTraitExpr(ExpressionTraitExpr *E) {
+ OS << getExpressionTraitName(E->getTrait()) << "(";
+ PrintExpr(E->getQueriedExpression());
+ OS << ")";
+}
+
+void StmtPrinter::VisitCXXNoexceptExpr(CXXNoexceptExpr *E) {
+ OS << "noexcept(";
+ PrintExpr(E->getOperand());
+ OS << ")";
+}
+
+void StmtPrinter::VisitPackExpansionExpr(PackExpansionExpr *E) {
+ PrintExpr(E->getPattern());
+ OS << "...";
+}
+
+void StmtPrinter::VisitSizeOfPackExpr(SizeOfPackExpr *E) {
+ OS << "sizeof...(" << *E->getPack() << ")";
+}
+
+void StmtPrinter::VisitSubstNonTypeTemplateParmPackExpr(
+ SubstNonTypeTemplateParmPackExpr *Node) {
+ OS << *Node->getParameterPack();
+}
+
+void StmtPrinter::VisitSubstNonTypeTemplateParmExpr(
+ SubstNonTypeTemplateParmExpr *Node) {
+ Visit(Node->getReplacement());
+}
+
+void StmtPrinter::VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *Node){
+ PrintExpr(Node->GetTemporaryExpr());
+}
+
+// Obj-C
+
+void StmtPrinter::VisitObjCStringLiteral(ObjCStringLiteral *Node) {
+ OS << "@";
+ VisitStringLiteral(Node->getString());
+}
+
+void StmtPrinter::VisitObjCNumericLiteral(ObjCNumericLiteral *E) {
+ OS << "@";
+ Visit(E->getNumber());
+}
+
+void StmtPrinter::VisitObjCArrayLiteral(ObjCArrayLiteral *E) {
+ OS << "@[ ";
+ StmtRange ch = E->children();
+ if (ch.first != ch.second) {
+ while (1) {
+ Visit(*ch.first);
+ ++ch.first;
+ if (ch.first == ch.second) break;
+ OS << ", ";
+ }
+ }
+ OS << " ]";
+}
+
+void StmtPrinter::VisitObjCDictionaryLiteral(ObjCDictionaryLiteral *E) {
+ OS << "@{ ";
+ for (unsigned I = 0, N = E->getNumElements(); I != N; ++I) {
+ if (I > 0)
+ OS << ", ";
+
+ ObjCDictionaryElement Element = E->getKeyValueElement(I);
+ Visit(Element.Key);
+ OS << " : ";
+ Visit(Element.Value);
+ if (Element.isPackExpansion())
+ OS << "...";
+ }
+ OS << " }";
+}
+
+void StmtPrinter::VisitObjCEncodeExpr(ObjCEncodeExpr *Node) {
+ OS << "@encode(" << Node->getEncodedType().getAsString(Policy) << ')';
+}
+
+void StmtPrinter::VisitObjCSelectorExpr(ObjCSelectorExpr *Node) {
+ OS << "@selector(" << Node->getSelector().getAsString() << ')';
+}
+
+void StmtPrinter::VisitObjCProtocolExpr(ObjCProtocolExpr *Node) {
+ OS << "@protocol(" << *Node->getProtocol() << ')';
+}
+
+void StmtPrinter::VisitObjCMessageExpr(ObjCMessageExpr *Mess) {
+ OS << "[";
+ switch (Mess->getReceiverKind()) {
+ case ObjCMessageExpr::Instance:
+ PrintExpr(Mess->getInstanceReceiver());
+ break;
+
+ case ObjCMessageExpr::Class:
+ OS << Mess->getClassReceiver().getAsString(Policy);
+ break;
+
+ case ObjCMessageExpr::SuperInstance:
+ case ObjCMessageExpr::SuperClass:
+ OS << "Super";
+ break;
+ }
+
+ OS << ' ';
+ Selector selector = Mess->getSelector();
+ if (selector.isUnarySelector()) {
+ OS << selector.getNameForSlot(0);
+ } else {
+ for (unsigned i = 0, e = Mess->getNumArgs(); i != e; ++i) {
+ if (i < selector.getNumArgs()) {
+ if (i > 0) OS << ' ';
+ if (selector.getIdentifierInfoForSlot(i))
+ OS << selector.getIdentifierInfoForSlot(i)->getName() << ':';
+ else
+ OS << ":";
+ }
+ else OS << ", "; // Handle variadic methods.
+
+ PrintExpr(Mess->getArg(i));
+ }
+ }
+ OS << "]";
+}
+
+void StmtPrinter::VisitObjCBoolLiteralExpr(ObjCBoolLiteralExpr *Node) {
+ OS << (Node->getValue() ? "__objc_yes" : "__objc_no");
+}
+
+void
+StmtPrinter::VisitObjCIndirectCopyRestoreExpr(ObjCIndirectCopyRestoreExpr *E) {
+ PrintExpr(E->getSubExpr());
+}
+
+void
+StmtPrinter::VisitObjCBridgedCastExpr(ObjCBridgedCastExpr *E) {
+ OS << "(" << E->getBridgeKindName() << E->getType().getAsString(Policy)
+ << ")";
+ PrintExpr(E->getSubExpr());
+}
+
+void StmtPrinter::VisitBlockExpr(BlockExpr *Node) {
+ BlockDecl *BD = Node->getBlockDecl();
+ OS << "^";
+
+ const FunctionType *AFT = Node->getFunctionType();
+
+ if (isa<FunctionNoProtoType>(AFT)) {
+ OS << "()";
+ } else if (!BD->param_empty() || cast<FunctionProtoType>(AFT)->isVariadic()) {
+ OS << '(';
+ std::string ParamStr;
+ for (BlockDecl::param_iterator AI = BD->param_begin(),
+ E = BD->param_end(); AI != E; ++AI) {
+ if (AI != BD->param_begin()) OS << ", ";
+ ParamStr = (*AI)->getNameAsString();
+ (*AI)->getType().getAsStringInternal(ParamStr, Policy);
+ OS << ParamStr;
+ }
+
+ const FunctionProtoType *FT = cast<FunctionProtoType>(AFT);
+ if (FT->isVariadic()) {
+ if (!BD->param_empty()) OS << ", ";
+ OS << "...";
+ }
+ OS << ')';
+ }
+}
+
+void StmtPrinter::VisitOpaqueValueExpr(OpaqueValueExpr *Node) {
+ PrintExpr(Node->getSourceExpr());
+}
+
+void StmtPrinter::VisitAsTypeExpr(AsTypeExpr *Node) {
+ OS << "__builtin_astype(";
+ PrintExpr(Node->getSrcExpr());
+ OS << ", " << Node->getType().getAsString();
+ OS << ")";
+}
+
+//===----------------------------------------------------------------------===//
+// Stmt method implementations
+//===----------------------------------------------------------------------===//
+
+void Stmt::dumpPretty(ASTContext& Context) const {
+ printPretty(llvm::errs(), Context, 0,
+ PrintingPolicy(Context.getLangOpts()));
+}
+
+void Stmt::printPretty(raw_ostream &OS, ASTContext& Context,
+ PrinterHelper* Helper,
+ const PrintingPolicy &Policy,
+ unsigned Indentation) const {
+ if (this == 0) {
+ OS << "<NULL>";
+ return;
+ }
+
+ if (Policy.Dump && &Context) {
+ dump(OS, Context.getSourceManager());
+ return;
+ }
+
+ StmtPrinter P(OS, Context, Helper, Policy, Indentation);
+ P.Visit(const_cast<Stmt*>(this));
+}
+
+//===----------------------------------------------------------------------===//
+// PrinterHelper
+//===----------------------------------------------------------------------===//
+
+// Implement virtual destructor.
+PrinterHelper::~PrinterHelper() {}
diff --git a/clang/lib/AST/StmtProfile.cpp b/clang/lib/AST/StmtProfile.cpp
new file mode 100644
index 0000000..e50523a
--- /dev/null
+++ b/clang/lib/AST/StmtProfile.cpp
@@ -0,0 +1,1184 @@
+//===---- StmtProfile.cpp - Profile implementation for Stmt ASTs ----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the Stmt::Profile method, which builds a unique bit
+// representation that identifies a statement/expression.
+//
+//===----------------------------------------------------------------------===//
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/ExprObjC.h"
+#include "clang/AST/StmtVisitor.h"
+#include "llvm/ADT/FoldingSet.h"
+using namespace clang;
+
+namespace {
+ class StmtProfiler : public ConstStmtVisitor<StmtProfiler> {
+ llvm::FoldingSetNodeID &ID;
+ const ASTContext &Context;
+ bool Canonical;
+
+ public:
+ StmtProfiler(llvm::FoldingSetNodeID &ID, const ASTContext &Context,
+ bool Canonical)
+ : ID(ID), Context(Context), Canonical(Canonical) { }
+
+ void VisitStmt(const Stmt *S);
+
+#define STMT(Node, Base) void Visit##Node(const Node *S);
+#include "clang/AST/StmtNodes.inc"
+
+ /// \brief Visit a declaration that is referenced within an expression
+ /// or statement.
+ void VisitDecl(const Decl *D);
+
+ /// \brief Visit a type that is referenced within an expression or
+ /// statement.
+ void VisitType(QualType T);
+
+ /// \brief Visit a name that occurs within an expression or statement.
+ void VisitName(DeclarationName Name);
+
+ /// \brief Visit a nested-name-specifier that occurs within an expression
+ /// or statement.
+ void VisitNestedNameSpecifier(NestedNameSpecifier *NNS);
+
+ /// \brief Visit a template name that occurs within an expression or
+ /// statement.
+ void VisitTemplateName(TemplateName Name);
+
+ /// \brief Visit template arguments that occur within an expression or
+ /// statement.
+ void VisitTemplateArguments(const TemplateArgumentLoc *Args,
+ unsigned NumArgs);
+
+ /// \brief Visit a single template argument.
+ void VisitTemplateArgument(const TemplateArgument &Arg);
+ };
+}
+
+void StmtProfiler::VisitStmt(const Stmt *S) {
+ ID.AddInteger(S->getStmtClass());
+ for (Stmt::const_child_range C = S->children(); C; ++C) {
+ if (*C)
+ Visit(*C);
+ else
+ ID.AddInteger(0);
+ }
+}
+
+void StmtProfiler::VisitDeclStmt(const DeclStmt *S) {
+ VisitStmt(S);
+ for (DeclStmt::const_decl_iterator D = S->decl_begin(), DEnd = S->decl_end();
+ D != DEnd; ++D)
+ VisitDecl(*D);
+}
+
+void StmtProfiler::VisitNullStmt(const NullStmt *S) {
+ VisitStmt(S);
+}
+
+void StmtProfiler::VisitCompoundStmt(const CompoundStmt *S) {
+ VisitStmt(S);
+}
+
+void StmtProfiler::VisitSwitchCase(const SwitchCase *S) {
+ VisitStmt(S);
+}
+
+void StmtProfiler::VisitCaseStmt(const CaseStmt *S) {
+ VisitStmt(S);
+}
+
+void StmtProfiler::VisitDefaultStmt(const DefaultStmt *S) {
+ VisitStmt(S);
+}
+
+void StmtProfiler::VisitLabelStmt(const LabelStmt *S) {
+ VisitStmt(S);
+ VisitDecl(S->getDecl());
+}
+
+void StmtProfiler::VisitAttributedStmt(const AttributedStmt *S) {
+ VisitStmt(S);
+ // TODO: maybe visit attributes?
+}
+
+void StmtProfiler::VisitIfStmt(const IfStmt *S) {
+ VisitStmt(S);
+ VisitDecl(S->getConditionVariable());
+}
+
+void StmtProfiler::VisitSwitchStmt(const SwitchStmt *S) {
+ VisitStmt(S);
+ VisitDecl(S->getConditionVariable());
+}
+
+void StmtProfiler::VisitWhileStmt(const WhileStmt *S) {
+ VisitStmt(S);
+ VisitDecl(S->getConditionVariable());
+}
+
+void StmtProfiler::VisitDoStmt(const DoStmt *S) {
+ VisitStmt(S);
+}
+
+void StmtProfiler::VisitForStmt(const ForStmt *S) {
+ VisitStmt(S);
+}
+
+void StmtProfiler::VisitGotoStmt(const GotoStmt *S) {
+ VisitStmt(S);
+ VisitDecl(S->getLabel());
+}
+
+void StmtProfiler::VisitIndirectGotoStmt(const IndirectGotoStmt *S) {
+ VisitStmt(S);
+}
+
+void StmtProfiler::VisitContinueStmt(const ContinueStmt *S) {
+ VisitStmt(S);
+}
+
+void StmtProfiler::VisitBreakStmt(const BreakStmt *S) {
+ VisitStmt(S);
+}
+
+void StmtProfiler::VisitReturnStmt(const ReturnStmt *S) {
+ VisitStmt(S);
+}
+
+void StmtProfiler::VisitAsmStmt(const AsmStmt *S) {
+ VisitStmt(S);
+ ID.AddBoolean(S->isVolatile());
+ ID.AddBoolean(S->isSimple());
+ VisitStringLiteral(S->getAsmString());
+ ID.AddInteger(S->getNumOutputs());
+ for (unsigned I = 0, N = S->getNumOutputs(); I != N; ++I) {
+ ID.AddString(S->getOutputName(I));
+ VisitStringLiteral(S->getOutputConstraintLiteral(I));
+ }
+ ID.AddInteger(S->getNumInputs());
+ for (unsigned I = 0, N = S->getNumInputs(); I != N; ++I) {
+ ID.AddString(S->getInputName(I));
+ VisitStringLiteral(S->getInputConstraintLiteral(I));
+ }
+ ID.AddInteger(S->getNumClobbers());
+ for (unsigned I = 0, N = S->getNumClobbers(); I != N; ++I)
+ VisitStringLiteral(S->getClobber(I));
+}
+
+void StmtProfiler::VisitCXXCatchStmt(const CXXCatchStmt *S) {
+ VisitStmt(S);
+ VisitType(S->getCaughtType());
+}
+
+void StmtProfiler::VisitCXXTryStmt(const CXXTryStmt *S) {
+ VisitStmt(S);
+}
+
+void StmtProfiler::VisitCXXForRangeStmt(const CXXForRangeStmt *S) {
+ VisitStmt(S);
+}
+
+void StmtProfiler::VisitMSDependentExistsStmt(const MSDependentExistsStmt *S) {
+ VisitStmt(S);
+ ID.AddBoolean(S->isIfExists());
+ VisitNestedNameSpecifier(S->getQualifierLoc().getNestedNameSpecifier());
+ VisitName(S->getNameInfo().getName());
+}
+
+void StmtProfiler::VisitSEHTryStmt(const SEHTryStmt *S) {
+ VisitStmt(S);
+}
+
+void StmtProfiler::VisitSEHFinallyStmt(const SEHFinallyStmt *S) {
+ VisitStmt(S);
+}
+
+void StmtProfiler::VisitSEHExceptStmt(const SEHExceptStmt *S) {
+ VisitStmt(S);
+}
+
+void StmtProfiler::VisitObjCForCollectionStmt(const ObjCForCollectionStmt *S) {
+ VisitStmt(S);
+}
+
+void StmtProfiler::VisitObjCAtCatchStmt(const ObjCAtCatchStmt *S) {
+ VisitStmt(S);
+ ID.AddBoolean(S->hasEllipsis());
+ if (S->getCatchParamDecl())
+ VisitType(S->getCatchParamDecl()->getType());
+}
+
+void StmtProfiler::VisitObjCAtFinallyStmt(const ObjCAtFinallyStmt *S) {
+ VisitStmt(S);
+}
+
+void StmtProfiler::VisitObjCAtTryStmt(const ObjCAtTryStmt *S) {
+ VisitStmt(S);
+}
+
+void
+StmtProfiler::VisitObjCAtSynchronizedStmt(const ObjCAtSynchronizedStmt *S) {
+ VisitStmt(S);
+}
+
+void StmtProfiler::VisitObjCAtThrowStmt(const ObjCAtThrowStmt *S) {
+ VisitStmt(S);
+}
+
+void
+StmtProfiler::VisitObjCAutoreleasePoolStmt(const ObjCAutoreleasePoolStmt *S) {
+ VisitStmt(S);
+}
+
+void StmtProfiler::VisitExpr(const Expr *S) {
+ VisitStmt(S);
+}
+
+void StmtProfiler::VisitDeclRefExpr(const DeclRefExpr *S) {
+ VisitExpr(S);
+ if (!Canonical)
+ VisitNestedNameSpecifier(S->getQualifier());
+ VisitDecl(S->getDecl());
+ if (!Canonical)
+ VisitTemplateArguments(S->getTemplateArgs(), S->getNumTemplateArgs());
+}
+
+void StmtProfiler::VisitPredefinedExpr(const PredefinedExpr *S) {
+ VisitExpr(S);
+ ID.AddInteger(S->getIdentType());
+}
+
+void StmtProfiler::VisitIntegerLiteral(const IntegerLiteral *S) {
+ VisitExpr(S);
+ S->getValue().Profile(ID);
+}
+
+void StmtProfiler::VisitCharacterLiteral(const CharacterLiteral *S) {
+ VisitExpr(S);
+ ID.AddInteger(S->getKind());
+ ID.AddInteger(S->getValue());
+}
+
+void StmtProfiler::VisitFloatingLiteral(const FloatingLiteral *S) {
+ VisitExpr(S);
+ S->getValue().Profile(ID);
+ ID.AddBoolean(S->isExact());
+}
+
+void StmtProfiler::VisitImaginaryLiteral(const ImaginaryLiteral *S) {
+ VisitExpr(S);
+}
+
+void StmtProfiler::VisitStringLiteral(const StringLiteral *S) {
+ VisitExpr(S);
+ ID.AddString(S->getBytes());
+ ID.AddInteger(S->getKind());
+}
+
+void StmtProfiler::VisitParenExpr(const ParenExpr *S) {
+ VisitExpr(S);
+}
+
+void StmtProfiler::VisitParenListExpr(const ParenListExpr *S) {
+ VisitExpr(S);
+}
+
+void StmtProfiler::VisitUnaryOperator(const UnaryOperator *S) {
+ VisitExpr(S);
+ ID.AddInteger(S->getOpcode());
+}
+
+void StmtProfiler::VisitOffsetOfExpr(const OffsetOfExpr *S) {
+ VisitType(S->getTypeSourceInfo()->getType());
+ unsigned n = S->getNumComponents();
+ for (unsigned i = 0; i < n; ++i) {
+ const OffsetOfExpr::OffsetOfNode& ON = S->getComponent(i);
+ ID.AddInteger(ON.getKind());
+ switch (ON.getKind()) {
+ case OffsetOfExpr::OffsetOfNode::Array:
+ // Expressions handled below.
+ break;
+
+ case OffsetOfExpr::OffsetOfNode::Field:
+ VisitDecl(ON.getField());
+ break;
+
+ case OffsetOfExpr::OffsetOfNode::Identifier:
+ ID.AddPointer(ON.getFieldName());
+ break;
+
+ case OffsetOfExpr::OffsetOfNode::Base:
+ // These nodes are implicit, and therefore don't need profiling.
+ break;
+ }
+ }
+
+ VisitExpr(S);
+}
+
+void
+StmtProfiler::VisitUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr *S) {
+ VisitExpr(S);
+ ID.AddInteger(S->getKind());
+ if (S->isArgumentType())
+ VisitType(S->getArgumentType());
+}
+
+void StmtProfiler::VisitArraySubscriptExpr(const ArraySubscriptExpr *S) {
+ VisitExpr(S);
+}
+
+void StmtProfiler::VisitCallExpr(const CallExpr *S) {
+ VisitExpr(S);
+}
+
+void StmtProfiler::VisitMemberExpr(const MemberExpr *S) {
+ VisitExpr(S);
+ VisitDecl(S->getMemberDecl());
+ if (!Canonical)
+ VisitNestedNameSpecifier(S->getQualifier());
+ ID.AddBoolean(S->isArrow());
+}
+
+void StmtProfiler::VisitCompoundLiteralExpr(const CompoundLiteralExpr *S) {
+ VisitExpr(S);
+ ID.AddBoolean(S->isFileScope());
+}
+
+void StmtProfiler::VisitCastExpr(const CastExpr *S) {
+ VisitExpr(S);
+}
+
+void StmtProfiler::VisitImplicitCastExpr(const ImplicitCastExpr *S) {
+ VisitCastExpr(S);
+ ID.AddInteger(S->getValueKind());
+}
+
+void StmtProfiler::VisitExplicitCastExpr(const ExplicitCastExpr *S) {
+ VisitCastExpr(S);
+ VisitType(S->getTypeAsWritten());
+}
+
+void StmtProfiler::VisitCStyleCastExpr(const CStyleCastExpr *S) {
+ VisitExplicitCastExpr(S);
+}
+
+void StmtProfiler::VisitBinaryOperator(const BinaryOperator *S) {
+ VisitExpr(S);
+ ID.AddInteger(S->getOpcode());
+}
+
+void
+StmtProfiler::VisitCompoundAssignOperator(const CompoundAssignOperator *S) {
+ VisitBinaryOperator(S);
+}
+
+void StmtProfiler::VisitConditionalOperator(const ConditionalOperator *S) {
+ VisitExpr(S);
+}
+
+void StmtProfiler::VisitBinaryConditionalOperator(
+ const BinaryConditionalOperator *S) {
+ VisitExpr(S);
+}
+
+void StmtProfiler::VisitAddrLabelExpr(const AddrLabelExpr *S) {
+ VisitExpr(S);
+ VisitDecl(S->getLabel());
+}
+
+void StmtProfiler::VisitStmtExpr(const StmtExpr *S) {
+ VisitExpr(S);
+}
+
+void StmtProfiler::VisitShuffleVectorExpr(const ShuffleVectorExpr *S) {
+ VisitExpr(S);
+}
+
+void StmtProfiler::VisitChooseExpr(const ChooseExpr *S) {
+ VisitExpr(S);
+}
+
+void StmtProfiler::VisitGNUNullExpr(const GNUNullExpr *S) {
+ VisitExpr(S);
+}
+
+void StmtProfiler::VisitVAArgExpr(const VAArgExpr *S) {
+ VisitExpr(S);
+}
+
+void StmtProfiler::VisitInitListExpr(const InitListExpr *S) {
+ if (S->getSyntacticForm()) {
+ VisitInitListExpr(S->getSyntacticForm());
+ return;
+ }
+
+ VisitExpr(S);
+}
+
+void StmtProfiler::VisitDesignatedInitExpr(const DesignatedInitExpr *S) {
+ VisitExpr(S);
+ ID.AddBoolean(S->usesGNUSyntax());
+ for (DesignatedInitExpr::const_designators_iterator D =
+ S->designators_begin(), DEnd = S->designators_end();
+ D != DEnd; ++D) {
+ if (D->isFieldDesignator()) {
+ ID.AddInteger(0);
+ VisitName(D->getFieldName());
+ continue;
+ }
+
+ if (D->isArrayDesignator()) {
+ ID.AddInteger(1);
+ } else {
+ assert(D->isArrayRangeDesignator());
+ ID.AddInteger(2);
+ }
+ ID.AddInteger(D->getFirstExprIndex());
+ }
+}
+
+void StmtProfiler::VisitImplicitValueInitExpr(const ImplicitValueInitExpr *S) {
+ VisitExpr(S);
+}
+
+void StmtProfiler::VisitExtVectorElementExpr(const ExtVectorElementExpr *S) {
+ VisitExpr(S);
+ VisitName(&S->getAccessor());
+}
+
+void StmtProfiler::VisitBlockExpr(const BlockExpr *S) {
+ VisitExpr(S);
+ VisitDecl(S->getBlockDecl());
+}
+
+void StmtProfiler::VisitGenericSelectionExpr(const GenericSelectionExpr *S) {
+ VisitExpr(S);
+ for (unsigned i = 0; i != S->getNumAssocs(); ++i) {
+ QualType T = S->getAssocType(i);
+ if (T.isNull())
+ ID.AddPointer(0);
+ else
+ VisitType(T);
+ VisitExpr(S->getAssocExpr(i));
+ }
+}
+
+void StmtProfiler::VisitPseudoObjectExpr(const PseudoObjectExpr *S) {
+ VisitExpr(S);
+ for (PseudoObjectExpr::const_semantics_iterator
+ i = S->semantics_begin(), e = S->semantics_end(); i != e; ++i)
+ // Normally, we would not profile the source expressions of OVEs.
+ if (const OpaqueValueExpr *OVE = dyn_cast<OpaqueValueExpr>(*i))
+ Visit(OVE->getSourceExpr());
+}
+
+void StmtProfiler::VisitAtomicExpr(const AtomicExpr *S) {
+ VisitExpr(S);
+ ID.AddInteger(S->getOp());
+}
+
+static Stmt::StmtClass DecodeOperatorCall(const CXXOperatorCallExpr *S,
+ UnaryOperatorKind &UnaryOp,
+ BinaryOperatorKind &BinaryOp) {
+ switch (S->getOperator()) {
+ case OO_None:
+ case OO_New:
+ case OO_Delete:
+ case OO_Array_New:
+ case OO_Array_Delete:
+ case OO_Arrow:
+ case OO_Call:
+ case OO_Conditional:
+ case NUM_OVERLOADED_OPERATORS:
+ llvm_unreachable("Invalid operator call kind");
+
+ case OO_Plus:
+ if (S->getNumArgs() == 1) {
+ UnaryOp = UO_Plus;
+ return Stmt::UnaryOperatorClass;
+ }
+
+ BinaryOp = BO_Add;
+ return Stmt::BinaryOperatorClass;
+
+ case OO_Minus:
+ if (S->getNumArgs() == 1) {
+ UnaryOp = UO_Minus;
+ return Stmt::UnaryOperatorClass;
+ }
+
+ BinaryOp = BO_Sub;
+ return Stmt::BinaryOperatorClass;
+
+ case OO_Star:
+ if (S->getNumArgs() == 1) {
+ UnaryOp = UO_Minus;
+ return Stmt::UnaryOperatorClass;
+ }
+
+ BinaryOp = BO_Sub;
+ return Stmt::BinaryOperatorClass;
+
+ case OO_Slash:
+ BinaryOp = BO_Div;
+ return Stmt::BinaryOperatorClass;
+
+ case OO_Percent:
+ BinaryOp = BO_Rem;
+ return Stmt::BinaryOperatorClass;
+
+ case OO_Caret:
+ BinaryOp = BO_Xor;
+ return Stmt::BinaryOperatorClass;
+
+ case OO_Amp:
+ if (S->getNumArgs() == 1) {
+ UnaryOp = UO_AddrOf;
+ return Stmt::UnaryOperatorClass;
+ }
+
+ BinaryOp = BO_And;
+ return Stmt::BinaryOperatorClass;
+
+ case OO_Pipe:
+ BinaryOp = BO_Or;
+ return Stmt::BinaryOperatorClass;
+
+ case OO_Tilde:
+ UnaryOp = UO_Not;
+ return Stmt::UnaryOperatorClass;
+
+ case OO_Exclaim:
+ UnaryOp = UO_LNot;
+ return Stmt::UnaryOperatorClass;
+
+ case OO_Equal:
+ BinaryOp = BO_Assign;
+ return Stmt::BinaryOperatorClass;
+
+ case OO_Less:
+ BinaryOp = BO_LT;
+ return Stmt::BinaryOperatorClass;
+
+ case OO_Greater:
+ BinaryOp = BO_GT;
+ return Stmt::BinaryOperatorClass;
+
+ case OO_PlusEqual:
+ BinaryOp = BO_AddAssign;
+ return Stmt::CompoundAssignOperatorClass;
+
+ case OO_MinusEqual:
+ BinaryOp = BO_SubAssign;
+ return Stmt::CompoundAssignOperatorClass;
+
+ case OO_StarEqual:
+ BinaryOp = BO_MulAssign;
+ return Stmt::CompoundAssignOperatorClass;
+
+ case OO_SlashEqual:
+ BinaryOp = BO_DivAssign;
+ return Stmt::CompoundAssignOperatorClass;
+
+ case OO_PercentEqual:
+ BinaryOp = BO_RemAssign;
+ return Stmt::CompoundAssignOperatorClass;
+
+ case OO_CaretEqual:
+ BinaryOp = BO_XorAssign;
+ return Stmt::CompoundAssignOperatorClass;
+
+ case OO_AmpEqual:
+ BinaryOp = BO_AndAssign;
+ return Stmt::CompoundAssignOperatorClass;
+
+ case OO_PipeEqual:
+ BinaryOp = BO_OrAssign;
+ return Stmt::CompoundAssignOperatorClass;
+
+ case OO_LessLess:
+ BinaryOp = BO_Shl;
+ return Stmt::BinaryOperatorClass;
+
+ case OO_GreaterGreater:
+ BinaryOp = BO_Shr;
+ return Stmt::BinaryOperatorClass;
+
+ case OO_LessLessEqual:
+ BinaryOp = BO_ShlAssign;
+ return Stmt::CompoundAssignOperatorClass;
+
+ case OO_GreaterGreaterEqual:
+ BinaryOp = BO_ShrAssign;
+ return Stmt::CompoundAssignOperatorClass;
+
+ case OO_EqualEqual:
+ BinaryOp = BO_EQ;
+ return Stmt::BinaryOperatorClass;
+
+ case OO_ExclaimEqual:
+ BinaryOp = BO_NE;
+ return Stmt::BinaryOperatorClass;
+
+ case OO_LessEqual:
+ BinaryOp = BO_LE;
+ return Stmt::BinaryOperatorClass;
+
+ case OO_GreaterEqual:
+ BinaryOp = BO_GE;
+ return Stmt::BinaryOperatorClass;
+
+ case OO_AmpAmp:
+ BinaryOp = BO_LAnd;
+ return Stmt::BinaryOperatorClass;
+
+ case OO_PipePipe:
+ BinaryOp = BO_LOr;
+ return Stmt::BinaryOperatorClass;
+
+ case OO_PlusPlus:
+ UnaryOp = S->getNumArgs() == 1? UO_PreInc
+ : UO_PostInc;
+ return Stmt::UnaryOperatorClass;
+
+ case OO_MinusMinus:
+ UnaryOp = S->getNumArgs() == 1? UO_PreDec
+ : UO_PostDec;
+ return Stmt::UnaryOperatorClass;
+
+ case OO_Comma:
+ BinaryOp = BO_Comma;
+ return Stmt::BinaryOperatorClass;
+
+
+ case OO_ArrowStar:
+ BinaryOp = BO_PtrMemI;
+ return Stmt::BinaryOperatorClass;
+
+ case OO_Subscript:
+ return Stmt::ArraySubscriptExprClass;
+ }
+
+ llvm_unreachable("Invalid overloaded operator expression");
+}
+
+
+void StmtProfiler::VisitCXXOperatorCallExpr(const CXXOperatorCallExpr *S) {
+ if (S->isTypeDependent()) {
+ // Type-dependent operator calls are profiled like their underlying
+ // syntactic operator.
+ UnaryOperatorKind UnaryOp = UO_Extension;
+ BinaryOperatorKind BinaryOp = BO_Comma;
+ Stmt::StmtClass SC = DecodeOperatorCall(S, UnaryOp, BinaryOp);
+
+ ID.AddInteger(SC);
+ for (unsigned I = 0, N = S->getNumArgs(); I != N; ++I)
+ Visit(S->getArg(I));
+ if (SC == Stmt::UnaryOperatorClass)
+ ID.AddInteger(UnaryOp);
+ else if (SC == Stmt::BinaryOperatorClass ||
+ SC == Stmt::CompoundAssignOperatorClass)
+ ID.AddInteger(BinaryOp);
+ else
+ assert(SC == Stmt::ArraySubscriptExprClass);
+
+ return;
+ }
+
+ VisitCallExpr(S);
+ ID.AddInteger(S->getOperator());
+}
+
+void StmtProfiler::VisitCXXMemberCallExpr(const CXXMemberCallExpr *S) {
+ VisitCallExpr(S);
+}
+
+void StmtProfiler::VisitCUDAKernelCallExpr(const CUDAKernelCallExpr *S) {
+ VisitCallExpr(S);
+}
+
+void StmtProfiler::VisitAsTypeExpr(const AsTypeExpr *S) {
+ VisitExpr(S);
+}
+
+void StmtProfiler::VisitCXXNamedCastExpr(const CXXNamedCastExpr *S) {
+ VisitExplicitCastExpr(S);
+}
+
+void StmtProfiler::VisitCXXStaticCastExpr(const CXXStaticCastExpr *S) {
+ VisitCXXNamedCastExpr(S);
+}
+
+void StmtProfiler::VisitCXXDynamicCastExpr(const CXXDynamicCastExpr *S) {
+ VisitCXXNamedCastExpr(S);
+}
+
+void
+StmtProfiler::VisitCXXReinterpretCastExpr(const CXXReinterpretCastExpr *S) {
+ VisitCXXNamedCastExpr(S);
+}
+
+void StmtProfiler::VisitCXXConstCastExpr(const CXXConstCastExpr *S) {
+ VisitCXXNamedCastExpr(S);
+}
+
+void StmtProfiler::VisitUserDefinedLiteral(const UserDefinedLiteral *S) {
+ VisitCallExpr(S);
+}
+
+void StmtProfiler::VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *S) {
+ VisitExpr(S);
+ ID.AddBoolean(S->getValue());
+}
+
+void StmtProfiler::VisitCXXNullPtrLiteralExpr(const CXXNullPtrLiteralExpr *S) {
+ VisitExpr(S);
+}
+
+void StmtProfiler::VisitCXXTypeidExpr(const CXXTypeidExpr *S) {
+ VisitExpr(S);
+ if (S->isTypeOperand())
+ VisitType(S->getTypeOperand());
+}
+
+void StmtProfiler::VisitCXXUuidofExpr(const CXXUuidofExpr *S) {
+ VisitExpr(S);
+ if (S->isTypeOperand())
+ VisitType(S->getTypeOperand());
+}
+
+void StmtProfiler::VisitCXXThisExpr(const CXXThisExpr *S) {
+ VisitExpr(S);
+ ID.AddBoolean(S->isImplicit());
+}
+
+void StmtProfiler::VisitCXXThrowExpr(const CXXThrowExpr *S) {
+ VisitExpr(S);
+}
+
+void StmtProfiler::VisitCXXDefaultArgExpr(const CXXDefaultArgExpr *S) {
+ VisitExpr(S);
+ VisitDecl(S->getParam());
+}
+
+void StmtProfiler::VisitCXXBindTemporaryExpr(const CXXBindTemporaryExpr *S) {
+ VisitExpr(S);
+ VisitDecl(
+ const_cast<CXXDestructorDecl *>(S->getTemporary()->getDestructor()));
+}
+
+void StmtProfiler::VisitCXXConstructExpr(const CXXConstructExpr *S) {
+ VisitExpr(S);
+ VisitDecl(S->getConstructor());
+ ID.AddBoolean(S->isElidable());
+}
+
+void StmtProfiler::VisitCXXFunctionalCastExpr(const CXXFunctionalCastExpr *S) {
+ VisitExplicitCastExpr(S);
+}
+
+void
+StmtProfiler::VisitCXXTemporaryObjectExpr(const CXXTemporaryObjectExpr *S) {
+ VisitCXXConstructExpr(S);
+}
+
+void
+StmtProfiler::VisitLambdaExpr(const LambdaExpr *S) {
+ VisitExpr(S);
+ for (LambdaExpr::capture_iterator C = S->explicit_capture_begin(),
+ CEnd = S->explicit_capture_end();
+ C != CEnd; ++C) {
+ ID.AddInteger(C->getCaptureKind());
+ if (C->capturesVariable()) {
+ VisitDecl(C->getCapturedVar());
+ ID.AddBoolean(C->isPackExpansion());
+ }
+ }
+ // Note: If we actually needed to be able to match lambda
+ // expressions, we would have to consider parameters and return type
+ // here, among other things.
+ VisitStmt(S->getBody());
+}
+
+void
+StmtProfiler::VisitCXXScalarValueInitExpr(const CXXScalarValueInitExpr *S) {
+ VisitExpr(S);
+}
+
+void StmtProfiler::VisitCXXDeleteExpr(const CXXDeleteExpr *S) {
+ VisitExpr(S);
+ ID.AddBoolean(S->isGlobalDelete());
+ ID.AddBoolean(S->isArrayForm());
+ VisitDecl(S->getOperatorDelete());
+}
+
+
+void StmtProfiler::VisitCXXNewExpr(const CXXNewExpr *S) {
+ VisitExpr(S);
+ VisitType(S->getAllocatedType());
+ VisitDecl(S->getOperatorNew());
+ VisitDecl(S->getOperatorDelete());
+ ID.AddBoolean(S->isArray());
+ ID.AddInteger(S->getNumPlacementArgs());
+ ID.AddBoolean(S->isGlobalNew());
+ ID.AddBoolean(S->isParenTypeId());
+ ID.AddInteger(S->getInitializationStyle());
+}
+
+void
+StmtProfiler::VisitCXXPseudoDestructorExpr(const CXXPseudoDestructorExpr *S) {
+ VisitExpr(S);
+ ID.AddBoolean(S->isArrow());
+ VisitNestedNameSpecifier(S->getQualifier());
+ VisitType(S->getDestroyedType());
+}
+
+void StmtProfiler::VisitOverloadExpr(const OverloadExpr *S) {
+ VisitExpr(S);
+ VisitNestedNameSpecifier(S->getQualifier());
+ VisitName(S->getName());
+ ID.AddBoolean(S->hasExplicitTemplateArgs());
+ if (S->hasExplicitTemplateArgs())
+ VisitTemplateArguments(S->getExplicitTemplateArgs().getTemplateArgs(),
+ S->getExplicitTemplateArgs().NumTemplateArgs);
+}
+
+void
+StmtProfiler::VisitUnresolvedLookupExpr(const UnresolvedLookupExpr *S) {
+ VisitOverloadExpr(S);
+}
+
+void StmtProfiler::VisitUnaryTypeTraitExpr(const UnaryTypeTraitExpr *S) {
+ VisitExpr(S);
+ ID.AddInteger(S->getTrait());
+ VisitType(S->getQueriedType());
+}
+
+void StmtProfiler::VisitBinaryTypeTraitExpr(const BinaryTypeTraitExpr *S) {
+ VisitExpr(S);
+ ID.AddInteger(S->getTrait());
+ VisitType(S->getLhsType());
+ VisitType(S->getRhsType());
+}
+
+void StmtProfiler::VisitTypeTraitExpr(const TypeTraitExpr *S) {
+ VisitExpr(S);
+ ID.AddInteger(S->getTrait());
+ ID.AddInteger(S->getNumArgs());
+ for (unsigned I = 0, N = S->getNumArgs(); I != N; ++I)
+ VisitType(S->getArg(I)->getType());
+}
+
+void StmtProfiler::VisitArrayTypeTraitExpr(const ArrayTypeTraitExpr *S) {
+ VisitExpr(S);
+ ID.AddInteger(S->getTrait());
+ VisitType(S->getQueriedType());
+}
+
+void StmtProfiler::VisitExpressionTraitExpr(const ExpressionTraitExpr *S) {
+ VisitExpr(S);
+ ID.AddInteger(S->getTrait());
+ VisitExpr(S->getQueriedExpression());
+}
+
+void StmtProfiler::VisitDependentScopeDeclRefExpr(
+ const DependentScopeDeclRefExpr *S) {
+ VisitExpr(S);
+ VisitName(S->getDeclName());
+ VisitNestedNameSpecifier(S->getQualifier());
+ ID.AddBoolean(S->hasExplicitTemplateArgs());
+ if (S->hasExplicitTemplateArgs())
+ VisitTemplateArguments(S->getTemplateArgs(), S->getNumTemplateArgs());
+}
+
+void StmtProfiler::VisitExprWithCleanups(const ExprWithCleanups *S) {
+ VisitExpr(S);
+}
+
+void StmtProfiler::VisitCXXUnresolvedConstructExpr(
+ const CXXUnresolvedConstructExpr *S) {
+ VisitExpr(S);
+ VisitType(S->getTypeAsWritten());
+}
+
+void StmtProfiler::VisitCXXDependentScopeMemberExpr(
+ const CXXDependentScopeMemberExpr *S) {
+ ID.AddBoolean(S->isImplicitAccess());
+ if (!S->isImplicitAccess()) {
+ VisitExpr(S);
+ ID.AddBoolean(S->isArrow());
+ }
+ VisitNestedNameSpecifier(S->getQualifier());
+ VisitName(S->getMember());
+ ID.AddBoolean(S->hasExplicitTemplateArgs());
+ if (S->hasExplicitTemplateArgs())
+ VisitTemplateArguments(S->getTemplateArgs(), S->getNumTemplateArgs());
+}
+
+void StmtProfiler::VisitUnresolvedMemberExpr(const UnresolvedMemberExpr *S) {
+ ID.AddBoolean(S->isImplicitAccess());
+ if (!S->isImplicitAccess()) {
+ VisitExpr(S);
+ ID.AddBoolean(S->isArrow());
+ }
+ VisitNestedNameSpecifier(S->getQualifier());
+ VisitName(S->getMemberName());
+ ID.AddBoolean(S->hasExplicitTemplateArgs());
+ if (S->hasExplicitTemplateArgs())
+ VisitTemplateArguments(S->getTemplateArgs(), S->getNumTemplateArgs());
+}
+
+void StmtProfiler::VisitCXXNoexceptExpr(const CXXNoexceptExpr *S) {
+ VisitExpr(S);
+}
+
+void StmtProfiler::VisitPackExpansionExpr(const PackExpansionExpr *S) {
+ VisitExpr(S);
+}
+
+void StmtProfiler::VisitSizeOfPackExpr(const SizeOfPackExpr *S) {
+ VisitExpr(S);
+ VisitDecl(S->getPack());
+}
+
+void StmtProfiler::VisitSubstNonTypeTemplateParmPackExpr(
+ const SubstNonTypeTemplateParmPackExpr *S) {
+ VisitExpr(S);
+ VisitDecl(S->getParameterPack());
+ VisitTemplateArgument(S->getArgumentPack());
+}
+
+void StmtProfiler::VisitSubstNonTypeTemplateParmExpr(
+ const SubstNonTypeTemplateParmExpr *E) {
+ // Profile exactly as the replacement expression.
+ Visit(E->getReplacement());
+}
+
+void StmtProfiler::VisitMaterializeTemporaryExpr(
+ const MaterializeTemporaryExpr *S) {
+ VisitExpr(S);
+}
+
+void StmtProfiler::VisitOpaqueValueExpr(const OpaqueValueExpr *E) {
+ VisitExpr(E);
+}
+
+void StmtProfiler::VisitObjCStringLiteral(const ObjCStringLiteral *S) {
+ VisitExpr(S);
+}
+
+void StmtProfiler::VisitObjCNumericLiteral(const ObjCNumericLiteral *E) {
+ VisitExpr(E);
+}
+
+void StmtProfiler::VisitObjCArrayLiteral(const ObjCArrayLiteral *E) {
+ VisitExpr(E);
+}
+
+void StmtProfiler::VisitObjCDictionaryLiteral(const ObjCDictionaryLiteral *E) {
+ VisitExpr(E);
+}
+
+void StmtProfiler::VisitObjCEncodeExpr(const ObjCEncodeExpr *S) {
+ VisitExpr(S);
+ VisitType(S->getEncodedType());
+}
+
+void StmtProfiler::VisitObjCSelectorExpr(const ObjCSelectorExpr *S) {
+ VisitExpr(S);
+ VisitName(S->getSelector());
+}
+
+void StmtProfiler::VisitObjCProtocolExpr(const ObjCProtocolExpr *S) {
+ VisitExpr(S);
+ VisitDecl(S->getProtocol());
+}
+
+void StmtProfiler::VisitObjCIvarRefExpr(const ObjCIvarRefExpr *S) {
+ VisitExpr(S);
+ VisitDecl(S->getDecl());
+ ID.AddBoolean(S->isArrow());
+ ID.AddBoolean(S->isFreeIvar());
+}
+
+void StmtProfiler::VisitObjCPropertyRefExpr(const ObjCPropertyRefExpr *S) {
+ VisitExpr(S);
+ if (S->isImplicitProperty()) {
+ VisitDecl(S->getImplicitPropertyGetter());
+ VisitDecl(S->getImplicitPropertySetter());
+ } else {
+ VisitDecl(S->getExplicitProperty());
+ }
+ if (S->isSuperReceiver()) {
+ ID.AddBoolean(S->isSuperReceiver());
+ VisitType(S->getSuperReceiverType());
+ }
+}
+
+void StmtProfiler::VisitObjCSubscriptRefExpr(const ObjCSubscriptRefExpr *S) {
+ VisitExpr(S);
+ VisitDecl(S->getAtIndexMethodDecl());
+ VisitDecl(S->setAtIndexMethodDecl());
+}
+
+void StmtProfiler::VisitObjCMessageExpr(const ObjCMessageExpr *S) {
+ VisitExpr(S);
+ VisitName(S->getSelector());
+ VisitDecl(S->getMethodDecl());
+}
+
+void StmtProfiler::VisitObjCIsaExpr(const ObjCIsaExpr *S) {
+ VisitExpr(S);
+ ID.AddBoolean(S->isArrow());
+}
+
+void StmtProfiler::VisitObjCBoolLiteralExpr(const ObjCBoolLiteralExpr *S) {
+ VisitExpr(S);
+ ID.AddBoolean(S->getValue());
+}
+
+void StmtProfiler::VisitObjCIndirectCopyRestoreExpr(
+ const ObjCIndirectCopyRestoreExpr *S) {
+ VisitExpr(S);
+ ID.AddBoolean(S->shouldCopy());
+}
+
+void StmtProfiler::VisitObjCBridgedCastExpr(const ObjCBridgedCastExpr *S) {
+ VisitExplicitCastExpr(S);
+ ID.AddBoolean(S->getBridgeKind());
+}
+
+void StmtProfiler::VisitDecl(const Decl *D) {
+ ID.AddInteger(D? D->getKind() : 0);
+
+ if (Canonical && D) {
+ if (const NonTypeTemplateParmDecl *NTTP =
+ dyn_cast<NonTypeTemplateParmDecl>(D)) {
+ ID.AddInteger(NTTP->getDepth());
+ ID.AddInteger(NTTP->getIndex());
+ ID.AddBoolean(NTTP->isParameterPack());
+ VisitType(NTTP->getType());
+ return;
+ }
+
+ if (const ParmVarDecl *Parm = dyn_cast<ParmVarDecl>(D)) {
+ // The Itanium C++ ABI uses the type, scope depth, and scope
+ // index of a parameter when mangling expressions that involve
+ // function parameters, so we will use the parameter's type for
+ // establishing function parameter identity. That way, our
+ // definition of "equivalent" (per C++ [temp.over.link]) is at
+ // least as strong as the definition of "equivalent" used for
+ // name mangling.
+ VisitType(Parm->getType());
+ ID.AddInteger(Parm->getFunctionScopeDepth());
+ ID.AddInteger(Parm->getFunctionScopeIndex());
+ return;
+ }
+
+ if (const TemplateTypeParmDecl *TTP =
+ dyn_cast<TemplateTypeParmDecl>(D)) {
+ ID.AddInteger(TTP->getDepth());
+ ID.AddInteger(TTP->getIndex());
+ ID.AddBoolean(TTP->isParameterPack());
+ return;
+ }
+
+ if (const TemplateTemplateParmDecl *TTP =
+ dyn_cast<TemplateTemplateParmDecl>(D)) {
+ ID.AddInteger(TTP->getDepth());
+ ID.AddInteger(TTP->getIndex());
+ ID.AddBoolean(TTP->isParameterPack());
+ return;
+ }
+ }
+
+ ID.AddPointer(D? D->getCanonicalDecl() : 0);
+}
+
+void StmtProfiler::VisitType(QualType T) {
+ if (Canonical)
+ T = Context.getCanonicalType(T);
+
+ ID.AddPointer(T.getAsOpaquePtr());
+}
+
+void StmtProfiler::VisitName(DeclarationName Name) {
+ ID.AddPointer(Name.getAsOpaquePtr());
+}
+
+void StmtProfiler::VisitNestedNameSpecifier(NestedNameSpecifier *NNS) {
+ if (Canonical)
+ NNS = Context.getCanonicalNestedNameSpecifier(NNS);
+ ID.AddPointer(NNS);
+}
+
+void StmtProfiler::VisitTemplateName(TemplateName Name) {
+ if (Canonical)
+ Name = Context.getCanonicalTemplateName(Name);
+
+ Name.Profile(ID);
+}
+
+void StmtProfiler::VisitTemplateArguments(const TemplateArgumentLoc *Args,
+ unsigned NumArgs) {
+ ID.AddInteger(NumArgs);
+ for (unsigned I = 0; I != NumArgs; ++I)
+ VisitTemplateArgument(Args[I].getArgument());
+}
+
+void StmtProfiler::VisitTemplateArgument(const TemplateArgument &Arg) {
+ // Mostly repetitive with TemplateArgument::Profile!
+ ID.AddInteger(Arg.getKind());
+ switch (Arg.getKind()) {
+ case TemplateArgument::Null:
+ break;
+
+ case TemplateArgument::Type:
+ VisitType(Arg.getAsType());
+ break;
+
+ case TemplateArgument::Template:
+ case TemplateArgument::TemplateExpansion:
+ VisitTemplateName(Arg.getAsTemplateOrTemplatePattern());
+ break;
+
+ case TemplateArgument::Declaration:
+ VisitDecl(Arg.getAsDecl());
+ break;
+
+ case TemplateArgument::Integral:
+ Arg.getAsIntegral()->Profile(ID);
+ VisitType(Arg.getIntegralType());
+ break;
+
+ case TemplateArgument::Expression:
+ Visit(Arg.getAsExpr());
+ break;
+
+ case TemplateArgument::Pack:
+ const TemplateArgument *Pack = Arg.pack_begin();
+ for (unsigned i = 0, e = Arg.pack_size(); i != e; ++i)
+ VisitTemplateArgument(Pack[i]);
+ break;
+ }
+}
+
+void Stmt::Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context,
+ bool Canonical) const {
+ StmtProfiler Profiler(ID, Context, Canonical);
+ Profiler.Visit(this);
+}
diff --git a/clang/lib/AST/StmtViz.cpp b/clang/lib/AST/StmtViz.cpp
new file mode 100644
index 0000000..8be287e
--- /dev/null
+++ b/clang/lib/AST/StmtViz.cpp
@@ -0,0 +1,62 @@
+//===--- StmtViz.cpp - Graphviz visualization for Stmt ASTs -----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements Stmt::viewAST, which generates a Graphviz DOT file
+// that depicts the AST and then calls Graphviz/dot+gv on it.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/StmtGraphTraits.h"
+#include "clang/AST/Decl.h"
+#include "llvm/Support/GraphWriter.h"
+
+using namespace clang;
+
+void Stmt::viewAST() const {
+#ifndef NDEBUG
+ llvm::ViewGraph(this,"AST");
+#else
+ llvm::errs() << "Stmt::viewAST is only available in debug builds on "
+ << "systems with Graphviz or gv!\n";
+#endif
+}
+
+namespace llvm {
+template<>
+struct DOTGraphTraits<const Stmt*> : public DefaultDOTGraphTraits {
+ DOTGraphTraits (bool isSimple=false) : DefaultDOTGraphTraits(isSimple) {}
+
+ static std::string getNodeLabel(const Stmt* Node, const Stmt* Graph) {
+
+#ifndef NDEBUG
+ std::string OutSStr;
+ llvm::raw_string_ostream Out(OutSStr);
+
+ if (Node)
+ Out << Node->getStmtClassName();
+ else
+ Out << "<NULL>";
+
+ std::string OutStr = Out.str();
+ if (OutStr[0] == '\n') OutStr.erase(OutStr.begin());
+
+ // Process string output to make it nicer...
+ for (unsigned i = 0; i != OutStr.length(); ++i)
+ if (OutStr[i] == '\n') { // Left justify
+ OutStr[i] = '\\';
+ OutStr.insert(OutStr.begin()+i+1, 'l');
+ }
+
+ return OutStr;
+#else
+ return "";
+#endif
+ }
+};
+} // end namespace llvm
diff --git a/clang/lib/AST/TemplateBase.cpp b/clang/lib/AST/TemplateBase.cpp
new file mode 100644
index 0000000..531e03e
--- /dev/null
+++ b/clang/lib/AST/TemplateBase.cpp
@@ -0,0 +1,628 @@
+//===--- TemplateBase.cpp - Common template AST class implementation ------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements common classes used throughout C++ template
+// representations.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/TemplateBase.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclBase.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/Type.h"
+#include "clang/AST/TypeLoc.h"
+#include "clang/Basic/Diagnostic.h"
+#include "llvm/ADT/FoldingSet.h"
+#include "llvm/ADT/SmallString.h"
+#include <algorithm>
+#include <cctype>
+
+using namespace clang;
+
+/// \brief Print a template integral argument value.
+///
+/// \param TemplArg the TemplateArgument instance to print.
+///
+/// \param Out the raw_ostream instance to use for printing.
+static void printIntegral(const TemplateArgument &TemplArg,
+ raw_ostream &Out) {
+ const ::clang::Type *T = TemplArg.getIntegralType().getTypePtr();
+ const llvm::APSInt *Val = TemplArg.getAsIntegral();
+
+ if (T->isBooleanType()) {
+ Out << (Val->getBoolValue() ? "true" : "false");
+ } else if (T->isCharType()) {
+ const char Ch = Val->getZExtValue();
+ Out << ((Ch == '\'') ? "'\\" : "'");
+ Out.write_escaped(StringRef(&Ch, 1), /*UseHexEscapes=*/ true);
+ Out << "'";
+ } else {
+ Out << Val->toString(10);
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// TemplateArgument Implementation
+//===----------------------------------------------------------------------===//
+
+TemplateArgument TemplateArgument::CreatePackCopy(ASTContext &Context,
+ const TemplateArgument *Args,
+ unsigned NumArgs) {
+ if (NumArgs == 0)
+ return TemplateArgument(0, 0);
+
+ TemplateArgument *Storage = new (Context) TemplateArgument [NumArgs];
+ std::copy(Args, Args + NumArgs, Storage);
+ return TemplateArgument(Storage, NumArgs);
+}
+
+bool TemplateArgument::isDependent() const {
+ switch (getKind()) {
+ case Null:
+ llvm_unreachable("Should not have a NULL template argument");
+
+ case Type:
+ return getAsType()->isDependentType();
+
+ case Template:
+ return getAsTemplate().isDependent();
+
+ case TemplateExpansion:
+ return true;
+
+ case Declaration:
+ if (Decl *D = getAsDecl()) {
+ if (DeclContext *DC = dyn_cast<DeclContext>(D))
+ return DC->isDependentContext();
+ return D->getDeclContext()->isDependentContext();
+ }
+
+ return false;
+
+ case Integral:
+ // Never dependent
+ return false;
+
+ case Expression:
+ return (getAsExpr()->isTypeDependent() || getAsExpr()->isValueDependent());
+
+ case Pack:
+ for (pack_iterator P = pack_begin(), PEnd = pack_end(); P != PEnd; ++P) {
+ if (P->isDependent())
+ return true;
+ }
+
+ return false;
+ }
+
+ llvm_unreachable("Invalid TemplateArgument Kind!");
+}
+
+bool TemplateArgument::isInstantiationDependent() const {
+ switch (getKind()) {
+ case Null:
+ llvm_unreachable("Should not have a NULL template argument");
+
+ case Type:
+ return getAsType()->isInstantiationDependentType();
+
+ case Template:
+ return getAsTemplate().isInstantiationDependent();
+
+ case TemplateExpansion:
+ return true;
+
+ case Declaration:
+ if (Decl *D = getAsDecl()) {
+ if (DeclContext *DC = dyn_cast<DeclContext>(D))
+ return DC->isDependentContext();
+ return D->getDeclContext()->isDependentContext();
+ }
+ return false;
+
+ case Integral:
+ // Never dependent
+ return false;
+
+ case Expression:
+ return getAsExpr()->isInstantiationDependent();
+
+ case Pack:
+ for (pack_iterator P = pack_begin(), PEnd = pack_end(); P != PEnd; ++P) {
+ if (P->isInstantiationDependent())
+ return true;
+ }
+
+ return false;
+ }
+
+ llvm_unreachable("Invalid TemplateArgument Kind!");
+}
+
+bool TemplateArgument::isPackExpansion() const {
+ switch (getKind()) {
+ case Null:
+ case Declaration:
+ case Integral:
+ case Pack:
+ case Template:
+ return false;
+
+ case TemplateExpansion:
+ return true;
+
+ case Type:
+ return isa<PackExpansionType>(getAsType());
+
+ case Expression:
+ return isa<PackExpansionExpr>(getAsExpr());
+ }
+
+ llvm_unreachable("Invalid TemplateArgument Kind!");
+}
+
+bool TemplateArgument::containsUnexpandedParameterPack() const {
+ switch (getKind()) {
+ case Null:
+ case Declaration:
+ case Integral:
+ case TemplateExpansion:
+ break;
+
+ case Type:
+ if (getAsType()->containsUnexpandedParameterPack())
+ return true;
+ break;
+
+ case Template:
+ if (getAsTemplate().containsUnexpandedParameterPack())
+ return true;
+ break;
+
+ case Expression:
+ if (getAsExpr()->containsUnexpandedParameterPack())
+ return true;
+ break;
+
+ case Pack:
+ for (pack_iterator P = pack_begin(), PEnd = pack_end(); P != PEnd; ++P)
+ if (P->containsUnexpandedParameterPack())
+ return true;
+
+ break;
+ }
+
+ return false;
+}
+
+llvm::Optional<unsigned> TemplateArgument::getNumTemplateExpansions() const {
+ assert(Kind == TemplateExpansion);
+ if (TemplateArg.NumExpansions)
+ return TemplateArg.NumExpansions - 1;
+
+ return llvm::Optional<unsigned>();
+}
+
+void TemplateArgument::Profile(llvm::FoldingSetNodeID &ID,
+ const ASTContext &Context) const {
+ ID.AddInteger(Kind);
+ switch (Kind) {
+ case Null:
+ break;
+
+ case Type:
+ getAsType().Profile(ID);
+ break;
+
+ case Declaration:
+ ID.AddPointer(getAsDecl()? getAsDecl()->getCanonicalDecl() : 0);
+ break;
+
+ case Template:
+ case TemplateExpansion: {
+ TemplateName Template = getAsTemplateOrTemplatePattern();
+ if (TemplateTemplateParmDecl *TTP
+ = dyn_cast_or_null<TemplateTemplateParmDecl>(
+ Template.getAsTemplateDecl())) {
+ ID.AddBoolean(true);
+ ID.AddInteger(TTP->getDepth());
+ ID.AddInteger(TTP->getPosition());
+ ID.AddBoolean(TTP->isParameterPack());
+ } else {
+ ID.AddBoolean(false);
+ ID.AddPointer(Context.getCanonicalTemplateName(Template)
+ .getAsVoidPointer());
+ }
+ break;
+ }
+
+ case Integral:
+ getAsIntegral()->Profile(ID);
+ getIntegralType().Profile(ID);
+ break;
+
+ case Expression:
+ getAsExpr()->Profile(ID, Context, true);
+ break;
+
+ case Pack:
+ ID.AddInteger(Args.NumArgs);
+ for (unsigned I = 0; I != Args.NumArgs; ++I)
+ Args.Args[I].Profile(ID, Context);
+ }
+}
+
+bool TemplateArgument::structurallyEquals(const TemplateArgument &Other) const {
+ if (getKind() != Other.getKind()) return false;
+
+ switch (getKind()) {
+ case Null:
+ case Type:
+ case Declaration:
+ case Expression:
+ case Template:
+ case TemplateExpansion:
+ return TypeOrValue == Other.TypeOrValue;
+
+ case Integral:
+ return getIntegralType() == Other.getIntegralType() &&
+ *getAsIntegral() == *Other.getAsIntegral();
+
+ case Pack:
+ if (Args.NumArgs != Other.Args.NumArgs) return false;
+ for (unsigned I = 0, E = Args.NumArgs; I != E; ++I)
+ if (!Args.Args[I].structurallyEquals(Other.Args.Args[I]))
+ return false;
+ return true;
+ }
+
+ llvm_unreachable("Invalid TemplateArgument Kind!");
+}
+
+TemplateArgument TemplateArgument::getPackExpansionPattern() const {
+ assert(isPackExpansion());
+
+ switch (getKind()) {
+ case Type:
+ return getAsType()->getAs<PackExpansionType>()->getPattern();
+
+ case Expression:
+ return cast<PackExpansionExpr>(getAsExpr())->getPattern();
+
+ case TemplateExpansion:
+ return TemplateArgument(getAsTemplateOrTemplatePattern());
+
+ case Declaration:
+ case Integral:
+ case Pack:
+ case Null:
+ case Template:
+ return TemplateArgument();
+ }
+
+ llvm_unreachable("Invalid TemplateArgument Kind!");
+}
+
+void TemplateArgument::print(const PrintingPolicy &Policy,
+ raw_ostream &Out) const {
+ switch (getKind()) {
+ case Null:
+ Out << "<no value>";
+ break;
+
+ case Type: {
+ PrintingPolicy SubPolicy(Policy);
+ SubPolicy.SuppressStrongLifetime = true;
+ std::string TypeStr;
+ getAsType().getAsStringInternal(TypeStr, SubPolicy);
+ Out << TypeStr;
+ break;
+ }
+
+ case Declaration: {
+ if (NamedDecl *ND = dyn_cast_or_null<NamedDecl>(getAsDecl())) {
+ if (ND->getDeclName()) {
+ Out << *ND;
+ } else {
+ Out << "<anonymous>";
+ }
+ } else {
+ Out << "nullptr";
+ }
+ break;
+ }
+
+ case Template:
+ getAsTemplate().print(Out, Policy);
+ break;
+
+ case TemplateExpansion:
+ getAsTemplateOrTemplatePattern().print(Out, Policy);
+ Out << "...";
+ break;
+
+ case Integral: {
+ printIntegral(*this, Out);
+ break;
+ }
+
+ case Expression:
+ getAsExpr()->printPretty(Out, 0, Policy);
+ break;
+
+ case Pack:
+ Out << "<";
+ bool First = true;
+ for (TemplateArgument::pack_iterator P = pack_begin(), PEnd = pack_end();
+ P != PEnd; ++P) {
+ if (First)
+ First = false;
+ else
+ Out << ", ";
+
+ P->print(Policy, Out);
+ }
+ Out << ">";
+ break;
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// TemplateArgumentLoc Implementation
+//===----------------------------------------------------------------------===//
+
+TemplateArgumentLocInfo::TemplateArgumentLocInfo() {
+ memset((void*)this, 0, sizeof(TemplateArgumentLocInfo));
+}
+
+SourceRange TemplateArgumentLoc::getSourceRange() const {
+ switch (Argument.getKind()) {
+ case TemplateArgument::Expression:
+ return getSourceExpression()->getSourceRange();
+
+ case TemplateArgument::Declaration:
+ return getSourceDeclExpression()->getSourceRange();
+
+ case TemplateArgument::Type:
+ if (TypeSourceInfo *TSI = getTypeSourceInfo())
+ return TSI->getTypeLoc().getSourceRange();
+ else
+ return SourceRange();
+
+ case TemplateArgument::Template:
+ if (getTemplateQualifierLoc())
+ return SourceRange(getTemplateQualifierLoc().getBeginLoc(),
+ getTemplateNameLoc());
+ return SourceRange(getTemplateNameLoc());
+
+ case TemplateArgument::TemplateExpansion:
+ if (getTemplateQualifierLoc())
+ return SourceRange(getTemplateQualifierLoc().getBeginLoc(),
+ getTemplateEllipsisLoc());
+ return SourceRange(getTemplateNameLoc(), getTemplateEllipsisLoc());
+
+ case TemplateArgument::Integral:
+ case TemplateArgument::Pack:
+ case TemplateArgument::Null:
+ return SourceRange();
+ }
+
+ llvm_unreachable("Invalid TemplateArgument Kind!");
+}
+
+TemplateArgumentLoc
+TemplateArgumentLoc::getPackExpansionPattern(SourceLocation &Ellipsis,
+ llvm::Optional<unsigned> &NumExpansions,
+ ASTContext &Context) const {
+ assert(Argument.isPackExpansion());
+
+ switch (Argument.getKind()) {
+ case TemplateArgument::Type: {
+ // FIXME: We shouldn't ever have to worry about missing
+ // type-source info!
+ TypeSourceInfo *ExpansionTSInfo = getTypeSourceInfo();
+ if (!ExpansionTSInfo)
+ ExpansionTSInfo = Context.getTrivialTypeSourceInfo(
+ getArgument().getAsType(),
+ Ellipsis);
+ PackExpansionTypeLoc Expansion
+ = cast<PackExpansionTypeLoc>(ExpansionTSInfo->getTypeLoc());
+ Ellipsis = Expansion.getEllipsisLoc();
+
+ TypeLoc Pattern = Expansion.getPatternLoc();
+ NumExpansions = Expansion.getTypePtr()->getNumExpansions();
+
+ // FIXME: This is horrible. We know where the source location data is for
+ // the pattern, and we have the pattern's type, but we are forced to copy
+ // them into an ASTContext because TypeSourceInfo bundles them together
+ // and TemplateArgumentLoc traffics in TypeSourceInfo pointers.
+ TypeSourceInfo *PatternTSInfo
+ = Context.CreateTypeSourceInfo(Pattern.getType(),
+ Pattern.getFullDataSize());
+ memcpy(PatternTSInfo->getTypeLoc().getOpaqueData(),
+ Pattern.getOpaqueData(), Pattern.getFullDataSize());
+ return TemplateArgumentLoc(TemplateArgument(Pattern.getType()),
+ PatternTSInfo);
+ }
+
+ case TemplateArgument::Expression: {
+ PackExpansionExpr *Expansion
+ = cast<PackExpansionExpr>(Argument.getAsExpr());
+ Expr *Pattern = Expansion->getPattern();
+ Ellipsis = Expansion->getEllipsisLoc();
+ NumExpansions = Expansion->getNumExpansions();
+ return TemplateArgumentLoc(Pattern, Pattern);
+ }
+
+ case TemplateArgument::TemplateExpansion:
+ Ellipsis = getTemplateEllipsisLoc();
+ NumExpansions = Argument.getNumTemplateExpansions();
+ return TemplateArgumentLoc(Argument.getPackExpansionPattern(),
+ getTemplateQualifierLoc(),
+ getTemplateNameLoc());
+
+ case TemplateArgument::Declaration:
+ case TemplateArgument::Template:
+ case TemplateArgument::Integral:
+ case TemplateArgument::Pack:
+ case TemplateArgument::Null:
+ return TemplateArgumentLoc();
+ }
+
+ llvm_unreachable("Invalid TemplateArgument Kind!");
+}
+
+const DiagnosticBuilder &clang::operator<<(const DiagnosticBuilder &DB,
+ const TemplateArgument &Arg) {
+ switch (Arg.getKind()) {
+ case TemplateArgument::Null:
+ // This is bad, but not as bad as crashing because of argument
+ // count mismatches.
+ return DB << "(null template argument)";
+
+ case TemplateArgument::Type:
+ return DB << Arg.getAsType();
+
+ case TemplateArgument::Declaration:
+ if (Decl *D = Arg.getAsDecl())
+ return DB << D;
+ return DB << "nullptr";
+
+ case TemplateArgument::Integral:
+ return DB << Arg.getAsIntegral()->toString(10);
+
+ case TemplateArgument::Template:
+ return DB << Arg.getAsTemplate();
+
+ case TemplateArgument::TemplateExpansion:
+ return DB << Arg.getAsTemplateOrTemplatePattern() << "...";
+
+ case TemplateArgument::Expression: {
+ // This shouldn't actually ever happen, so it's okay that we're
+ // regurgitating an expression here.
+ // FIXME: We're guessing at LangOptions!
+ SmallString<32> Str;
+ llvm::raw_svector_ostream OS(Str);
+ LangOptions LangOpts;
+ LangOpts.CPlusPlus = true;
+ PrintingPolicy Policy(LangOpts);
+ Arg.getAsExpr()->printPretty(OS, 0, Policy);
+ return DB << OS.str();
+ }
+
+ case TemplateArgument::Pack: {
+ // FIXME: We're guessing at LangOptions!
+ SmallString<32> Str;
+ llvm::raw_svector_ostream OS(Str);
+ LangOptions LangOpts;
+ LangOpts.CPlusPlus = true;
+ PrintingPolicy Policy(LangOpts);
+ Arg.print(Policy, OS);
+ return DB << OS.str();
+ }
+ }
+
+ llvm_unreachable("Invalid TemplateArgument Kind!");
+}
+
+const ASTTemplateArgumentListInfo *
+ASTTemplateArgumentListInfo::Create(ASTContext &C,
+ const TemplateArgumentListInfo &List) {
+ std::size_t size = sizeof(CXXDependentScopeMemberExpr) +
+ ASTTemplateArgumentListInfo::sizeFor(List.size());
+ void *Mem = C.Allocate(size, llvm::alignOf<ASTTemplateArgumentListInfo>());
+ ASTTemplateArgumentListInfo *TAI = new (Mem) ASTTemplateArgumentListInfo();
+ TAI->initializeFrom(List);
+ return TAI;
+}
+
+void ASTTemplateArgumentListInfo::initializeFrom(
+ const TemplateArgumentListInfo &Info) {
+ LAngleLoc = Info.getLAngleLoc();
+ RAngleLoc = Info.getRAngleLoc();
+ NumTemplateArgs = Info.size();
+
+ TemplateArgumentLoc *ArgBuffer = getTemplateArgs();
+ for (unsigned i = 0; i != NumTemplateArgs; ++i)
+ new (&ArgBuffer[i]) TemplateArgumentLoc(Info[i]);
+}
+
+void ASTTemplateArgumentListInfo::initializeFrom(
+ const TemplateArgumentListInfo &Info,
+ bool &Dependent,
+ bool &InstantiationDependent,
+ bool &ContainsUnexpandedParameterPack) {
+ LAngleLoc = Info.getLAngleLoc();
+ RAngleLoc = Info.getRAngleLoc();
+ NumTemplateArgs = Info.size();
+
+ TemplateArgumentLoc *ArgBuffer = getTemplateArgs();
+ for (unsigned i = 0; i != NumTemplateArgs; ++i) {
+ Dependent = Dependent || Info[i].getArgument().isDependent();
+ InstantiationDependent = InstantiationDependent ||
+ Info[i].getArgument().isInstantiationDependent();
+ ContainsUnexpandedParameterPack
+ = ContainsUnexpandedParameterPack ||
+ Info[i].getArgument().containsUnexpandedParameterPack();
+
+ new (&ArgBuffer[i]) TemplateArgumentLoc(Info[i]);
+ }
+}
+
+void ASTTemplateArgumentListInfo::copyInto(
+ TemplateArgumentListInfo &Info) const {
+ Info.setLAngleLoc(LAngleLoc);
+ Info.setRAngleLoc(RAngleLoc);
+ for (unsigned I = 0; I != NumTemplateArgs; ++I)
+ Info.addArgument(getTemplateArgs()[I]);
+}
+
+std::size_t ASTTemplateArgumentListInfo::sizeFor(unsigned NumTemplateArgs) {
+ return sizeof(ASTTemplateArgumentListInfo) +
+ sizeof(TemplateArgumentLoc) * NumTemplateArgs;
+}
+
+void
+ASTTemplateKWAndArgsInfo::initializeFrom(SourceLocation TemplateKWLoc,
+ const TemplateArgumentListInfo &Info) {
+ Base::initializeFrom(Info);
+ setTemplateKeywordLoc(TemplateKWLoc);
+}
+
+void
+ASTTemplateKWAndArgsInfo
+::initializeFrom(SourceLocation TemplateKWLoc,
+ const TemplateArgumentListInfo &Info,
+ bool &Dependent,
+ bool &InstantiationDependent,
+ bool &ContainsUnexpandedParameterPack) {
+ Base::initializeFrom(Info, Dependent, InstantiationDependent,
+ ContainsUnexpandedParameterPack);
+ setTemplateKeywordLoc(TemplateKWLoc);
+}
+
+void
+ASTTemplateKWAndArgsInfo::initializeFrom(SourceLocation TemplateKWLoc) {
+ // No explicit template arguments, but template keyword loc is valid.
+ assert(TemplateKWLoc.isValid());
+ LAngleLoc = SourceLocation();
+ RAngleLoc = SourceLocation();
+ NumTemplateArgs = 0;
+ setTemplateKeywordLoc(TemplateKWLoc);
+}
+
+std::size_t
+ASTTemplateKWAndArgsInfo::sizeFor(unsigned NumTemplateArgs) {
+ // Add space for the template keyword location.
+ return Base::sizeFor(NumTemplateArgs) + sizeof(SourceLocation);
+}
+
diff --git a/clang/lib/AST/TemplateName.cpp b/clang/lib/AST/TemplateName.cpp
new file mode 100644
index 0000000..e89ba53
--- /dev/null
+++ b/clang/lib/AST/TemplateName.cpp
@@ -0,0 +1,176 @@
+//===--- TemplateName.h - C++ Template Name Representation-------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the TemplateName interface and subclasses.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/TemplateName.h"
+#include "clang/AST/TemplateBase.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/NestedNameSpecifier.h"
+#include "clang/AST/PrettyPrinter.h"
+#include "clang/Basic/Diagnostic.h"
+#include "clang/Basic/LangOptions.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace clang;
+using namespace llvm;
+
+TemplateArgument
+SubstTemplateTemplateParmPackStorage::getArgumentPack() const {
+ return TemplateArgument(Arguments, size());
+}
+
+void SubstTemplateTemplateParmStorage::Profile(llvm::FoldingSetNodeID &ID) {
+ Profile(ID, Parameter, Replacement);
+}
+
+void SubstTemplateTemplateParmStorage::Profile(llvm::FoldingSetNodeID &ID,
+ TemplateTemplateParmDecl *parameter,
+ TemplateName replacement) {
+ ID.AddPointer(parameter);
+ ID.AddPointer(replacement.getAsVoidPointer());
+}
+
+void SubstTemplateTemplateParmPackStorage::Profile(llvm::FoldingSetNodeID &ID,
+ ASTContext &Context) {
+ Profile(ID, Context, Parameter, TemplateArgument(Arguments, size()));
+}
+
+void SubstTemplateTemplateParmPackStorage::Profile(llvm::FoldingSetNodeID &ID,
+ ASTContext &Context,
+ TemplateTemplateParmDecl *Parameter,
+ const TemplateArgument &ArgPack) {
+ ID.AddPointer(Parameter);
+ ArgPack.Profile(ID, Context);
+}
+
+TemplateName::NameKind TemplateName::getKind() const {
+ if (Storage.is<TemplateDecl *>())
+ return Template;
+ if (Storage.is<DependentTemplateName *>())
+ return DependentTemplate;
+ if (Storage.is<QualifiedTemplateName *>())
+ return QualifiedTemplate;
+
+ UncommonTemplateNameStorage *uncommon
+ = Storage.get<UncommonTemplateNameStorage*>();
+ if (uncommon->getAsOverloadedStorage())
+ return OverloadedTemplate;
+ if (uncommon->getAsSubstTemplateTemplateParm())
+ return SubstTemplateTemplateParm;
+ return SubstTemplateTemplateParmPack;
+}
+
+TemplateDecl *TemplateName::getAsTemplateDecl() const {
+ if (TemplateDecl *Template = Storage.dyn_cast<TemplateDecl *>())
+ return Template;
+
+ if (QualifiedTemplateName *QTN = getAsQualifiedTemplateName())
+ return QTN->getTemplateDecl();
+
+ if (SubstTemplateTemplateParmStorage *sub = getAsSubstTemplateTemplateParm())
+ return sub->getReplacement().getAsTemplateDecl();
+
+ return 0;
+}
+
+bool TemplateName::isDependent() const {
+ if (TemplateDecl *Template = getAsTemplateDecl()) {
+ if (isa<TemplateTemplateParmDecl>(Template))
+ return true;
+ // FIXME: Hack, getDeclContext() can be null if Template is still
+ // initializing due to PCH reading, so we check it before using it.
+ // Should probably modify TemplateSpecializationType to allow constructing
+ // it without the isDependent() checking.
+ return Template->getDeclContext() &&
+ Template->getDeclContext()->isDependentContext();
+ }
+
+ assert(!getAsOverloadedTemplate() &&
+ "overloaded templates shouldn't survive to here");
+
+ return true;
+}
+
+bool TemplateName::isInstantiationDependent() const {
+ if (QualifiedTemplateName *QTN = getAsQualifiedTemplateName()) {
+ if (QTN->getQualifier()->isInstantiationDependent())
+ return true;
+ }
+
+ return isDependent();
+}
+
+bool TemplateName::containsUnexpandedParameterPack() const {
+ if (TemplateDecl *Template = getAsTemplateDecl()) {
+ if (TemplateTemplateParmDecl *TTP
+ = dyn_cast<TemplateTemplateParmDecl>(Template))
+ return TTP->isParameterPack();
+
+ return false;
+ }
+
+ if (DependentTemplateName *DTN = getAsDependentTemplateName())
+ return DTN->getQualifier() &&
+ DTN->getQualifier()->containsUnexpandedParameterPack();
+
+ return getAsSubstTemplateTemplateParmPack() != 0;
+}
+
+void
+TemplateName::print(raw_ostream &OS, const PrintingPolicy &Policy,
+ bool SuppressNNS) const {
+ if (TemplateDecl *Template = Storage.dyn_cast<TemplateDecl *>())
+ OS << *Template;
+ else if (QualifiedTemplateName *QTN = getAsQualifiedTemplateName()) {
+ if (!SuppressNNS)
+ QTN->getQualifier()->print(OS, Policy);
+ if (QTN->hasTemplateKeyword())
+ OS << "template ";
+ OS << *QTN->getDecl();
+ } else if (DependentTemplateName *DTN = getAsDependentTemplateName()) {
+ if (!SuppressNNS && DTN->getQualifier())
+ DTN->getQualifier()->print(OS, Policy);
+ OS << "template ";
+
+ if (DTN->isIdentifier())
+ OS << DTN->getIdentifier()->getName();
+ else
+ OS << "operator " << getOperatorSpelling(DTN->getOperator());
+ } else if (SubstTemplateTemplateParmStorage *subst
+ = getAsSubstTemplateTemplateParm()) {
+ subst->getReplacement().print(OS, Policy, SuppressNNS);
+ } else if (SubstTemplateTemplateParmPackStorage *SubstPack
+ = getAsSubstTemplateTemplateParmPack())
+ OS << *SubstPack->getParameterPack();
+ else {
+ OverloadedTemplateStorage *OTS = getAsOverloadedTemplate();
+ (*OTS->begin())->printName(OS);
+ }
+}
+
+const DiagnosticBuilder &clang::operator<<(const DiagnosticBuilder &DB,
+ TemplateName N) {
+ std::string NameStr;
+ raw_string_ostream OS(NameStr);
+ LangOptions LO;
+ LO.CPlusPlus = true;
+ LO.Bool = true;
+ N.print(OS, PrintingPolicy(LO));
+ OS.flush();
+ return DB << NameStr;
+}
+
+void TemplateName::dump() const {
+ LangOptions LO; // FIXME!
+ LO.CPlusPlus = true;
+ LO.Bool = true;
+ print(llvm::errs(), PrintingPolicy(LO));
+}
diff --git a/clang/lib/AST/Type.cpp b/clang/lib/AST/Type.cpp
new file mode 100644
index 0000000..3f6a094
--- /dev/null
+++ b/clang/lib/AST/Type.cpp
@@ -0,0 +1,2256 @@
+//===--- Type.cpp - Type representation and manipulation ------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements type-related functionality.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/CharUnits.h"
+#include "clang/AST/Type.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/PrettyPrinter.h"
+#include "clang/AST/TypeVisitor.h"
+#include "clang/Basic/Specifiers.h"
+#include "llvm/ADT/APSInt.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/Support/raw_ostream.h"
+#include <algorithm>
+using namespace clang;
+
+bool Qualifiers::isStrictSupersetOf(Qualifiers Other) const {
+ return (*this != Other) &&
+ // CVR qualifiers superset
+ (((Mask & CVRMask) | (Other.Mask & CVRMask)) == (Mask & CVRMask)) &&
+ // ObjC GC qualifiers superset
+ ((getObjCGCAttr() == Other.getObjCGCAttr()) ||
+ (hasObjCGCAttr() && !Other.hasObjCGCAttr())) &&
+ // Address space superset.
+ ((getAddressSpace() == Other.getAddressSpace()) ||
+ (hasAddressSpace()&& !Other.hasAddressSpace())) &&
+ // Lifetime qualifier superset.
+ ((getObjCLifetime() == Other.getObjCLifetime()) ||
+ (hasObjCLifetime() && !Other.hasObjCLifetime()));
+}
+
+const IdentifierInfo* QualType::getBaseTypeIdentifier() const {
+ const Type* ty = getTypePtr();
+ NamedDecl *ND = NULL;
+ if (ty->isPointerType() || ty->isReferenceType())
+ return ty->getPointeeType().getBaseTypeIdentifier();
+ else if (ty->isRecordType())
+ ND = ty->getAs<RecordType>()->getDecl();
+ else if (ty->isEnumeralType())
+ ND = ty->getAs<EnumType>()->getDecl();
+ else if (ty->getTypeClass() == Type::Typedef)
+ ND = ty->getAs<TypedefType>()->getDecl();
+ else if (ty->isArrayType())
+ return ty->castAsArrayTypeUnsafe()->
+ getElementType().getBaseTypeIdentifier();
+
+ if (ND)
+ return ND->getIdentifier();
+ return NULL;
+}
+
+bool QualType::isConstant(QualType T, ASTContext &Ctx) {
+ if (T.isConstQualified())
+ return true;
+
+ if (const ArrayType *AT = Ctx.getAsArrayType(T))
+ return AT->getElementType().isConstant(Ctx);
+
+ return false;
+}
+
+unsigned ConstantArrayType::getNumAddressingBits(ASTContext &Context,
+ QualType ElementType,
+ const llvm::APInt &NumElements) {
+ llvm::APSInt SizeExtended(NumElements, true);
+ unsigned SizeTypeBits = Context.getTypeSize(Context.getSizeType());
+ SizeExtended = SizeExtended.extend(std::max(SizeTypeBits,
+ SizeExtended.getBitWidth()) * 2);
+
+ uint64_t ElementSize
+ = Context.getTypeSizeInChars(ElementType).getQuantity();
+ llvm::APSInt TotalSize(llvm::APInt(SizeExtended.getBitWidth(), ElementSize));
+ TotalSize *= SizeExtended;
+
+ return TotalSize.getActiveBits();
+}
+
+unsigned ConstantArrayType::getMaxSizeBits(ASTContext &Context) {
+ unsigned Bits = Context.getTypeSize(Context.getSizeType());
+
+ // GCC appears to only allow 63 bits worth of address space when compiling
+ // for 64-bit, so we do the same.
+ if (Bits == 64)
+ --Bits;
+
+ return Bits;
+}
+
+DependentSizedArrayType::DependentSizedArrayType(const ASTContext &Context,
+ QualType et, QualType can,
+ Expr *e, ArraySizeModifier sm,
+ unsigned tq,
+ SourceRange brackets)
+ : ArrayType(DependentSizedArray, et, can, sm, tq,
+ (et->containsUnexpandedParameterPack() ||
+ (e && e->containsUnexpandedParameterPack()))),
+ Context(Context), SizeExpr((Stmt*) e), Brackets(brackets)
+{
+}
+
+void DependentSizedArrayType::Profile(llvm::FoldingSetNodeID &ID,
+ const ASTContext &Context,
+ QualType ET,
+ ArraySizeModifier SizeMod,
+ unsigned TypeQuals,
+ Expr *E) {
+ ID.AddPointer(ET.getAsOpaquePtr());
+ ID.AddInteger(SizeMod);
+ ID.AddInteger(TypeQuals);
+ E->Profile(ID, Context, true);
+}
+
+DependentSizedExtVectorType::DependentSizedExtVectorType(const
+ ASTContext &Context,
+ QualType ElementType,
+ QualType can,
+ Expr *SizeExpr,
+ SourceLocation loc)
+ : Type(DependentSizedExtVector, can, /*Dependent=*/true,
+ /*InstantiationDependent=*/true,
+ ElementType->isVariablyModifiedType(),
+ (ElementType->containsUnexpandedParameterPack() ||
+ (SizeExpr && SizeExpr->containsUnexpandedParameterPack()))),
+ Context(Context), SizeExpr(SizeExpr), ElementType(ElementType),
+ loc(loc)
+{
+}
+
+void
+DependentSizedExtVectorType::Profile(llvm::FoldingSetNodeID &ID,
+ const ASTContext &Context,
+ QualType ElementType, Expr *SizeExpr) {
+ ID.AddPointer(ElementType.getAsOpaquePtr());
+ SizeExpr->Profile(ID, Context, true);
+}
+
+VectorType::VectorType(QualType vecType, unsigned nElements, QualType canonType,
+ VectorKind vecKind)
+ : Type(Vector, canonType, vecType->isDependentType(),
+ vecType->isInstantiationDependentType(),
+ vecType->isVariablyModifiedType(),
+ vecType->containsUnexpandedParameterPack()),
+ ElementType(vecType)
+{
+ VectorTypeBits.VecKind = vecKind;
+ VectorTypeBits.NumElements = nElements;
+}
+
+VectorType::VectorType(TypeClass tc, QualType vecType, unsigned nElements,
+ QualType canonType, VectorKind vecKind)
+ : Type(tc, canonType, vecType->isDependentType(),
+ vecType->isInstantiationDependentType(),
+ vecType->isVariablyModifiedType(),
+ vecType->containsUnexpandedParameterPack()),
+ ElementType(vecType)
+{
+ VectorTypeBits.VecKind = vecKind;
+ VectorTypeBits.NumElements = nElements;
+}
+
+/// getArrayElementTypeNoTypeQual - If this is an array type, return the
+/// element type of the array, potentially with type qualifiers missing.
+/// This method should never be used when type qualifiers are meaningful.
+const Type *Type::getArrayElementTypeNoTypeQual() const {
+ // If this is directly an array type, return it.
+ if (const ArrayType *ATy = dyn_cast<ArrayType>(this))
+ return ATy->getElementType().getTypePtr();
+
+ // If the canonical form of this type isn't the right kind, reject it.
+ if (!isa<ArrayType>(CanonicalType))
+ return 0;
+
+ // If this is a typedef for an array type, strip the typedef off without
+ // losing all typedef information.
+ return cast<ArrayType>(getUnqualifiedDesugaredType())
+ ->getElementType().getTypePtr();
+}
+
+/// getDesugaredType - Return the specified type with any "sugar" removed from
+/// the type. This takes off typedefs, typeof's etc. If the outer level of
+/// the type is already concrete, it returns it unmodified. This is similar
+/// to getting the canonical type, but it doesn't remove *all* typedefs. For
+/// example, it returns "T*" as "T*", (not as "int*"), because the pointer is
+/// concrete.
+QualType QualType::getDesugaredType(QualType T, const ASTContext &Context) {
+ SplitQualType split = getSplitDesugaredType(T);
+ return Context.getQualifiedType(split.Ty, split.Quals);
+}
+
+QualType QualType::getSingleStepDesugaredTypeImpl(QualType type,
+ const ASTContext &Context) {
+ SplitQualType split = type.split();
+ QualType desugar = split.Ty->getLocallyUnqualifiedSingleStepDesugaredType();
+ return Context.getQualifiedType(desugar, split.Quals);
+}
+
+QualType Type::getLocallyUnqualifiedSingleStepDesugaredType() const {
+ switch (getTypeClass()) {
+#define ABSTRACT_TYPE(Class, Parent)
+#define TYPE(Class, Parent) \
+ case Type::Class: { \
+ const Class##Type *ty = cast<Class##Type>(this); \
+ if (!ty->isSugared()) return QualType(ty, 0); \
+ return ty->desugar(); \
+ }
+#include "clang/AST/TypeNodes.def"
+ }
+ llvm_unreachable("bad type kind!");
+}
+
+SplitQualType QualType::getSplitDesugaredType(QualType T) {
+ QualifierCollector Qs;
+
+ QualType Cur = T;
+ while (true) {
+ const Type *CurTy = Qs.strip(Cur);
+ switch (CurTy->getTypeClass()) {
+#define ABSTRACT_TYPE(Class, Parent)
+#define TYPE(Class, Parent) \
+ case Type::Class: { \
+ const Class##Type *Ty = cast<Class##Type>(CurTy); \
+ if (!Ty->isSugared()) \
+ return SplitQualType(Ty, Qs); \
+ Cur = Ty->desugar(); \
+ break; \
+ }
+#include "clang/AST/TypeNodes.def"
+ }
+ }
+}
+
+SplitQualType QualType::getSplitUnqualifiedTypeImpl(QualType type) {
+ SplitQualType split = type.split();
+
+ // All the qualifiers we've seen so far.
+ Qualifiers quals = split.Quals;
+
+ // The last type node we saw with any nodes inside it.
+ const Type *lastTypeWithQuals = split.Ty;
+
+ while (true) {
+ QualType next;
+
+ // Do a single-step desugar, aborting the loop if the type isn't
+ // sugared.
+ switch (split.Ty->getTypeClass()) {
+#define ABSTRACT_TYPE(Class, Parent)
+#define TYPE(Class, Parent) \
+ case Type::Class: { \
+ const Class##Type *ty = cast<Class##Type>(split.Ty); \
+ if (!ty->isSugared()) goto done; \
+ next = ty->desugar(); \
+ break; \
+ }
+#include "clang/AST/TypeNodes.def"
+ }
+
+ // Otherwise, split the underlying type. If that yields qualifiers,
+ // update the information.
+ split = next.split();
+ if (!split.Quals.empty()) {
+ lastTypeWithQuals = split.Ty;
+ quals.addConsistentQualifiers(split.Quals);
+ }
+ }
+
+ done:
+ return SplitQualType(lastTypeWithQuals, quals);
+}
+
+QualType QualType::IgnoreParens(QualType T) {
+ // FIXME: this seems inherently un-qualifiers-safe.
+ while (const ParenType *PT = T->getAs<ParenType>())
+ T = PT->getInnerType();
+ return T;
+}
+
+/// getUnqualifiedDesugaredType - Pull any qualifiers and syntactic
+/// sugar off the given type. This should produce an object of the
+/// same dynamic type as the canonical type.
+const Type *Type::getUnqualifiedDesugaredType() const {
+ const Type *Cur = this;
+
+ while (true) {
+ switch (Cur->getTypeClass()) {
+#define ABSTRACT_TYPE(Class, Parent)
+#define TYPE(Class, Parent) \
+ case Class: { \
+ const Class##Type *Ty = cast<Class##Type>(Cur); \
+ if (!Ty->isSugared()) return Cur; \
+ Cur = Ty->desugar().getTypePtr(); \
+ break; \
+ }
+#include "clang/AST/TypeNodes.def"
+ }
+ }
+}
+
+bool Type::isDerivedType() const {
+ switch (CanonicalType->getTypeClass()) {
+ case Pointer:
+ case VariableArray:
+ case ConstantArray:
+ case IncompleteArray:
+ case FunctionProto:
+ case FunctionNoProto:
+ case LValueReference:
+ case RValueReference:
+ case Record:
+ return true;
+ default:
+ return false;
+ }
+}
+bool Type::isClassType() const {
+ if (const RecordType *RT = getAs<RecordType>())
+ return RT->getDecl()->isClass();
+ return false;
+}
+bool Type::isStructureType() const {
+ if (const RecordType *RT = getAs<RecordType>())
+ return RT->getDecl()->isStruct();
+ return false;
+}
+bool Type::isStructureOrClassType() const {
+ if (const RecordType *RT = getAs<RecordType>())
+ return RT->getDecl()->isStruct() || RT->getDecl()->isClass();
+ return false;
+}
+bool Type::isVoidPointerType() const {
+ if (const PointerType *PT = getAs<PointerType>())
+ return PT->getPointeeType()->isVoidType();
+ return false;
+}
+
+bool Type::isUnionType() const {
+ if (const RecordType *RT = getAs<RecordType>())
+ return RT->getDecl()->isUnion();
+ return false;
+}
+
+bool Type::isComplexType() const {
+ if (const ComplexType *CT = dyn_cast<ComplexType>(CanonicalType))
+ return CT->getElementType()->isFloatingType();
+ return false;
+}
+
+bool Type::isComplexIntegerType() const {
+ // Check for GCC complex integer extension.
+ return getAsComplexIntegerType();
+}
+
+const ComplexType *Type::getAsComplexIntegerType() const {
+ if (const ComplexType *Complex = getAs<ComplexType>())
+ if (Complex->getElementType()->isIntegerType())
+ return Complex;
+ return 0;
+}
+
+QualType Type::getPointeeType() const {
+ if (const PointerType *PT = getAs<PointerType>())
+ return PT->getPointeeType();
+ if (const ObjCObjectPointerType *OPT = getAs<ObjCObjectPointerType>())
+ return OPT->getPointeeType();
+ if (const BlockPointerType *BPT = getAs<BlockPointerType>())
+ return BPT->getPointeeType();
+ if (const ReferenceType *RT = getAs<ReferenceType>())
+ return RT->getPointeeType();
+ return QualType();
+}
+
+const RecordType *Type::getAsStructureType() const {
+ // If this is directly a structure type, return it.
+ if (const RecordType *RT = dyn_cast<RecordType>(this)) {
+ if (RT->getDecl()->isStruct())
+ return RT;
+ }
+
+ // If the canonical form of this type isn't the right kind, reject it.
+ if (const RecordType *RT = dyn_cast<RecordType>(CanonicalType)) {
+ if (!RT->getDecl()->isStruct())
+ return 0;
+
+ // If this is a typedef for a structure type, strip the typedef off without
+ // losing all typedef information.
+ return cast<RecordType>(getUnqualifiedDesugaredType());
+ }
+ return 0;
+}
+
+const RecordType *Type::getAsUnionType() const {
+ // If this is directly a union type, return it.
+ if (const RecordType *RT = dyn_cast<RecordType>(this)) {
+ if (RT->getDecl()->isUnion())
+ return RT;
+ }
+
+ // If the canonical form of this type isn't the right kind, reject it.
+ if (const RecordType *RT = dyn_cast<RecordType>(CanonicalType)) {
+ if (!RT->getDecl()->isUnion())
+ return 0;
+
+ // If this is a typedef for a union type, strip the typedef off without
+ // losing all typedef information.
+ return cast<RecordType>(getUnqualifiedDesugaredType());
+ }
+
+ return 0;
+}
+
+ObjCObjectType::ObjCObjectType(QualType Canonical, QualType Base,
+ ObjCProtocolDecl * const *Protocols,
+ unsigned NumProtocols)
+ : Type(ObjCObject, Canonical, false, false, false, false),
+ BaseType(Base)
+{
+ ObjCObjectTypeBits.NumProtocols = NumProtocols;
+ assert(getNumProtocols() == NumProtocols &&
+ "bitfield overflow in protocol count");
+ if (NumProtocols)
+ memcpy(getProtocolStorage(), Protocols,
+ NumProtocols * sizeof(ObjCProtocolDecl*));
+}
+
+const ObjCObjectType *Type::getAsObjCQualifiedInterfaceType() const {
+ // There is no sugar for ObjCObjectType's, just return the canonical
+ // type pointer if it is the right class. There is no typedef information to
+ // return and these cannot be Address-space qualified.
+ if (const ObjCObjectType *T = getAs<ObjCObjectType>())
+ if (T->getNumProtocols() && T->getInterface())
+ return T;
+ return 0;
+}
+
+bool Type::isObjCQualifiedInterfaceType() const {
+ return getAsObjCQualifiedInterfaceType() != 0;
+}
+
+const ObjCObjectPointerType *Type::getAsObjCQualifiedIdType() const {
+ // There is no sugar for ObjCQualifiedIdType's, just return the canonical
+ // type pointer if it is the right class.
+ if (const ObjCObjectPointerType *OPT = getAs<ObjCObjectPointerType>()) {
+ if (OPT->isObjCQualifiedIdType())
+ return OPT;
+ }
+ return 0;
+}
+
+const ObjCObjectPointerType *Type::getAsObjCQualifiedClassType() const {
+ // There is no sugar for ObjCQualifiedClassType's, just return the canonical
+ // type pointer if it is the right class.
+ if (const ObjCObjectPointerType *OPT = getAs<ObjCObjectPointerType>()) {
+ if (OPT->isObjCQualifiedClassType())
+ return OPT;
+ }
+ return 0;
+}
+
+const ObjCObjectPointerType *Type::getAsObjCInterfacePointerType() const {
+ if (const ObjCObjectPointerType *OPT = getAs<ObjCObjectPointerType>()) {
+ if (OPT->getInterfaceType())
+ return OPT;
+ }
+ return 0;
+}
+
+const CXXRecordDecl *Type::getCXXRecordDeclForPointerType() const {
+ if (const PointerType *PT = getAs<PointerType>())
+ if (const RecordType *RT = PT->getPointeeType()->getAs<RecordType>())
+ return dyn_cast<CXXRecordDecl>(RT->getDecl());
+ return 0;
+}
+
+CXXRecordDecl *Type::getAsCXXRecordDecl() const {
+ if (const RecordType *RT = getAs<RecordType>())
+ return dyn_cast<CXXRecordDecl>(RT->getDecl());
+ else if (const InjectedClassNameType *Injected
+ = getAs<InjectedClassNameType>())
+ return Injected->getDecl();
+
+ return 0;
+}
+
+namespace {
+ class GetContainedAutoVisitor :
+ public TypeVisitor<GetContainedAutoVisitor, AutoType*> {
+ public:
+ using TypeVisitor<GetContainedAutoVisitor, AutoType*>::Visit;
+ AutoType *Visit(QualType T) {
+ if (T.isNull())
+ return 0;
+ return Visit(T.getTypePtr());
+ }
+
+ // The 'auto' type itself.
+ AutoType *VisitAutoType(const AutoType *AT) {
+ return const_cast<AutoType*>(AT);
+ }
+
+ // Only these types can contain the desired 'auto' type.
+ AutoType *VisitPointerType(const PointerType *T) {
+ return Visit(T->getPointeeType());
+ }
+ AutoType *VisitBlockPointerType(const BlockPointerType *T) {
+ return Visit(T->getPointeeType());
+ }
+ AutoType *VisitReferenceType(const ReferenceType *T) {
+ return Visit(T->getPointeeTypeAsWritten());
+ }
+ AutoType *VisitMemberPointerType(const MemberPointerType *T) {
+ return Visit(T->getPointeeType());
+ }
+ AutoType *VisitArrayType(const ArrayType *T) {
+ return Visit(T->getElementType());
+ }
+ AutoType *VisitDependentSizedExtVectorType(
+ const DependentSizedExtVectorType *T) {
+ return Visit(T->getElementType());
+ }
+ AutoType *VisitVectorType(const VectorType *T) {
+ return Visit(T->getElementType());
+ }
+ AutoType *VisitFunctionType(const FunctionType *T) {
+ return Visit(T->getResultType());
+ }
+ AutoType *VisitParenType(const ParenType *T) {
+ return Visit(T->getInnerType());
+ }
+ AutoType *VisitAttributedType(const AttributedType *T) {
+ return Visit(T->getModifiedType());
+ }
+ };
+}
+
+AutoType *Type::getContainedAutoType() const {
+ return GetContainedAutoVisitor().Visit(this);
+}
+
+bool Type::hasIntegerRepresentation() const {
+ if (const VectorType *VT = dyn_cast<VectorType>(CanonicalType))
+ return VT->getElementType()->isIntegerType();
+ else
+ return isIntegerType();
+}
+
+/// \brief Determine whether this type is an integral type.
+///
+/// This routine determines whether the given type is an integral type per
+/// C++ [basic.fundamental]p7. Although the C standard does not define the
+/// term "integral type", it has a similar term "integer type", and in C++
+/// the two terms are equivalent. However, C's "integer type" includes
+/// enumeration types, while C++'s "integer type" does not. The \c ASTContext
+/// parameter is used to determine whether we should be following the C or
+/// C++ rules when determining whether this type is an integral/integer type.
+///
+/// For cases where C permits "an integer type" and C++ permits "an integral
+/// type", use this routine.
+///
+/// For cases where C permits "an integer type" and C++ permits "an integral
+/// or enumeration type", use \c isIntegralOrEnumerationType() instead.
+///
+/// \param Ctx The context in which this type occurs.
+///
+/// \returns true if the type is considered an integral type, false otherwise.
+bool Type::isIntegralType(ASTContext &Ctx) const {
+ if (const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType))
+ return BT->getKind() >= BuiltinType::Bool &&
+ BT->getKind() <= BuiltinType::Int128;
+
+ if (!Ctx.getLangOpts().CPlusPlus)
+ if (const EnumType *ET = dyn_cast<EnumType>(CanonicalType))
+ return ET->getDecl()->isComplete(); // Complete enum types are integral in C.
+
+ return false;
+}
+
+
+bool Type::isIntegralOrUnscopedEnumerationType() const {
+ if (const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType))
+ return BT->getKind() >= BuiltinType::Bool &&
+ BT->getKind() <= BuiltinType::Int128;
+
+ // Check for a complete enum type; incomplete enum types are not properly an
+ // enumeration type in the sense required here.
+ // C++0x: However, if the underlying type of the enum is fixed, it is
+ // considered complete.
+ if (const EnumType *ET = dyn_cast<EnumType>(CanonicalType))
+ return ET->getDecl()->isComplete() && !ET->getDecl()->isScoped();
+
+ return false;
+}
+
+
+
+bool Type::isCharType() const {
+ if (const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType))
+ return BT->getKind() == BuiltinType::Char_U ||
+ BT->getKind() == BuiltinType::UChar ||
+ BT->getKind() == BuiltinType::Char_S ||
+ BT->getKind() == BuiltinType::SChar;
+ return false;
+}
+
+bool Type::isWideCharType() const {
+ if (const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType))
+ return BT->getKind() == BuiltinType::WChar_S ||
+ BT->getKind() == BuiltinType::WChar_U;
+ return false;
+}
+
+bool Type::isChar16Type() const {
+ if (const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType))
+ return BT->getKind() == BuiltinType::Char16;
+ return false;
+}
+
+bool Type::isChar32Type() const {
+ if (const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType))
+ return BT->getKind() == BuiltinType::Char32;
+ return false;
+}
+
+/// \brief Determine whether this type is any of the built-in character
+/// types.
+bool Type::isAnyCharacterType() const {
+ const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType);
+ if (BT == 0) return false;
+ switch (BT->getKind()) {
+ default: return false;
+ case BuiltinType::Char_U:
+ case BuiltinType::UChar:
+ case BuiltinType::WChar_U:
+ case BuiltinType::Char16:
+ case BuiltinType::Char32:
+ case BuiltinType::Char_S:
+ case BuiltinType::SChar:
+ case BuiltinType::WChar_S:
+ return true;
+ }
+}
+
+/// isSignedIntegerType - Return true if this is an integer type that is
+/// signed, according to C99 6.2.5p4 [char, signed char, short, int, long..],
+/// an enum decl which has a signed representation
+bool Type::isSignedIntegerType() const {
+ if (const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType)) {
+ return BT->getKind() >= BuiltinType::Char_S &&
+ BT->getKind() <= BuiltinType::Int128;
+ }
+
+ if (const EnumType *ET = dyn_cast<EnumType>(CanonicalType)) {
+ // Incomplete enum types are not treated as integer types.
+ // FIXME: In C++, enum types are never integer types.
+ if (ET->getDecl()->isComplete() && !ET->getDecl()->isScoped())
+ return ET->getDecl()->getIntegerType()->isSignedIntegerType();
+ }
+
+ return false;
+}
+
+bool Type::isSignedIntegerOrEnumerationType() const {
+ if (const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType)) {
+ return BT->getKind() >= BuiltinType::Char_S &&
+ BT->getKind() <= BuiltinType::Int128;
+ }
+
+ if (const EnumType *ET = dyn_cast<EnumType>(CanonicalType)) {
+ if (ET->getDecl()->isComplete())
+ return ET->getDecl()->getIntegerType()->isSignedIntegerType();
+ }
+
+ return false;
+}
+
+bool Type::hasSignedIntegerRepresentation() const {
+ if (const VectorType *VT = dyn_cast<VectorType>(CanonicalType))
+ return VT->getElementType()->isSignedIntegerType();
+ else
+ return isSignedIntegerType();
+}
+
+/// isUnsignedIntegerType - Return true if this is an integer type that is
+/// unsigned, according to C99 6.2.5p6 [which returns true for _Bool], an enum
+/// decl which has an unsigned representation
+bool Type::isUnsignedIntegerType() const {
+ if (const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType)) {
+ return BT->getKind() >= BuiltinType::Bool &&
+ BT->getKind() <= BuiltinType::UInt128;
+ }
+
+ if (const EnumType *ET = dyn_cast<EnumType>(CanonicalType)) {
+ // Incomplete enum types are not treated as integer types.
+ // FIXME: In C++, enum types are never integer types.
+ if (ET->getDecl()->isComplete() && !ET->getDecl()->isScoped())
+ return ET->getDecl()->getIntegerType()->isUnsignedIntegerType();
+ }
+
+ return false;
+}
+
+bool Type::isUnsignedIntegerOrEnumerationType() const {
+ if (const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType)) {
+ return BT->getKind() >= BuiltinType::Bool &&
+ BT->getKind() <= BuiltinType::UInt128;
+ }
+
+ if (const EnumType *ET = dyn_cast<EnumType>(CanonicalType)) {
+ if (ET->getDecl()->isComplete())
+ return ET->getDecl()->getIntegerType()->isUnsignedIntegerType();
+ }
+
+ return false;
+}
+
+bool Type::hasUnsignedIntegerRepresentation() const {
+ if (const VectorType *VT = dyn_cast<VectorType>(CanonicalType))
+ return VT->getElementType()->isUnsignedIntegerType();
+ else
+ return isUnsignedIntegerType();
+}
+
+bool Type::isFloatingType() const {
+ if (const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType))
+ return BT->getKind() >= BuiltinType::Half &&
+ BT->getKind() <= BuiltinType::LongDouble;
+ if (const ComplexType *CT = dyn_cast<ComplexType>(CanonicalType))
+ return CT->getElementType()->isFloatingType();
+ return false;
+}
+
+bool Type::hasFloatingRepresentation() const {
+ if (const VectorType *VT = dyn_cast<VectorType>(CanonicalType))
+ return VT->getElementType()->isFloatingType();
+ else
+ return isFloatingType();
+}
+
+bool Type::isRealFloatingType() const {
+ if (const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType))
+ return BT->isFloatingPoint();
+ return false;
+}
+
+bool Type::isRealType() const {
+ if (const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType))
+ return BT->getKind() >= BuiltinType::Bool &&
+ BT->getKind() <= BuiltinType::LongDouble;
+ if (const EnumType *ET = dyn_cast<EnumType>(CanonicalType))
+ return ET->getDecl()->isComplete() && !ET->getDecl()->isScoped();
+ return false;
+}
+
+bool Type::isArithmeticType() const {
+ if (const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType))
+ return BT->getKind() >= BuiltinType::Bool &&
+ BT->getKind() <= BuiltinType::LongDouble;
+ if (const EnumType *ET = dyn_cast<EnumType>(CanonicalType))
+ // GCC allows forward declaration of enum types (forbid by C99 6.7.2.3p2).
+ // If a body isn't seen by the time we get here, return false.
+ //
+ // C++0x: Enumerations are not arithmetic types. For now, just return
+ // false for scoped enumerations since that will disable any
+ // unwanted implicit conversions.
+ return !ET->getDecl()->isScoped() && ET->getDecl()->isComplete();
+ return isa<ComplexType>(CanonicalType);
+}
+
+Type::ScalarTypeKind Type::getScalarTypeKind() const {
+ assert(isScalarType());
+
+ const Type *T = CanonicalType.getTypePtr();
+ if (const BuiltinType *BT = dyn_cast<BuiltinType>(T)) {
+ if (BT->getKind() == BuiltinType::Bool) return STK_Bool;
+ if (BT->getKind() == BuiltinType::NullPtr) return STK_CPointer;
+ if (BT->isInteger()) return STK_Integral;
+ if (BT->isFloatingPoint()) return STK_Floating;
+ llvm_unreachable("unknown scalar builtin type");
+ } else if (isa<PointerType>(T)) {
+ return STK_CPointer;
+ } else if (isa<BlockPointerType>(T)) {
+ return STK_BlockPointer;
+ } else if (isa<ObjCObjectPointerType>(T)) {
+ return STK_ObjCObjectPointer;
+ } else if (isa<MemberPointerType>(T)) {
+ return STK_MemberPointer;
+ } else if (isa<EnumType>(T)) {
+ assert(cast<EnumType>(T)->getDecl()->isComplete());
+ return STK_Integral;
+ } else if (const ComplexType *CT = dyn_cast<ComplexType>(T)) {
+ if (CT->getElementType()->isRealFloatingType())
+ return STK_FloatingComplex;
+ return STK_IntegralComplex;
+ }
+
+ llvm_unreachable("unknown scalar type");
+}
+
+/// \brief Determines whether the type is a C++ aggregate type or C
+/// aggregate or union type.
+///
+/// An aggregate type is an array or a class type (struct, union, or
+/// class) that has no user-declared constructors, no private or
+/// protected non-static data members, no base classes, and no virtual
+/// functions (C++ [dcl.init.aggr]p1). The notion of an aggregate type
+/// subsumes the notion of C aggregates (C99 6.2.5p21) because it also
+/// includes union types.
+bool Type::isAggregateType() const {
+ if (const RecordType *Record = dyn_cast<RecordType>(CanonicalType)) {
+ if (CXXRecordDecl *ClassDecl = dyn_cast<CXXRecordDecl>(Record->getDecl()))
+ return ClassDecl->isAggregate();
+
+ return true;
+ }
+
+ return isa<ArrayType>(CanonicalType);
+}
+
+/// isConstantSizeType - Return true if this is not a variable sized type,
+/// according to the rules of C99 6.7.5p3. It is not legal to call this on
+/// incomplete types or dependent types.
+bool Type::isConstantSizeType() const {
+ assert(!isIncompleteType() && "This doesn't make sense for incomplete types");
+ assert(!isDependentType() && "This doesn't make sense for dependent types");
+ // The VAT must have a size, as it is known to be complete.
+ return !isa<VariableArrayType>(CanonicalType);
+}
+
+/// isIncompleteType - Return true if this is an incomplete type (C99 6.2.5p1)
+/// - a type that can describe objects, but which lacks information needed to
+/// determine its size.
+bool Type::isIncompleteType(NamedDecl **Def) const {
+ if (Def)
+ *Def = 0;
+
+ switch (CanonicalType->getTypeClass()) {
+ default: return false;
+ case Builtin:
+ // Void is the only incomplete builtin type. Per C99 6.2.5p19, it can never
+ // be completed.
+ return isVoidType();
+ case Enum: {
+ EnumDecl *EnumD = cast<EnumType>(CanonicalType)->getDecl();
+ if (Def)
+ *Def = EnumD;
+
+ // An enumeration with fixed underlying type is complete (C++0x 7.2p3).
+ if (EnumD->isFixed())
+ return false;
+
+ return !EnumD->isCompleteDefinition();
+ }
+ case Record: {
+ // A tagged type (struct/union/enum/class) is incomplete if the decl is a
+ // forward declaration, but not a full definition (C99 6.2.5p22).
+ RecordDecl *Rec = cast<RecordType>(CanonicalType)->getDecl();
+ if (Def)
+ *Def = Rec;
+ return !Rec->isCompleteDefinition();
+ }
+ case ConstantArray:
+ // An array is incomplete if its element type is incomplete
+ // (C++ [dcl.array]p1).
+ // We don't handle variable arrays (they're not allowed in C++) or
+ // dependent-sized arrays (dependent types are never treated as incomplete).
+ return cast<ArrayType>(CanonicalType)->getElementType()
+ ->isIncompleteType(Def);
+ case IncompleteArray:
+ // An array of unknown size is an incomplete type (C99 6.2.5p22).
+ return true;
+ case ObjCObject:
+ return cast<ObjCObjectType>(CanonicalType)->getBaseType()
+ ->isIncompleteType(Def);
+ case ObjCInterface: {
+ // ObjC interfaces are incomplete if they are @class, not @interface.
+ ObjCInterfaceDecl *Interface
+ = cast<ObjCInterfaceType>(CanonicalType)->getDecl();
+ if (Def)
+ *Def = Interface;
+ return !Interface->hasDefinition();
+ }
+ }
+}
+
+bool QualType::isPODType(ASTContext &Context) const {
+ // The compiler shouldn't query this for incomplete types, but the user might.
+ // We return false for that case. Except for incomplete arrays of PODs, which
+ // are PODs according to the standard.
+ if (isNull())
+ return 0;
+
+ if ((*this)->isIncompleteArrayType())
+ return Context.getBaseElementType(*this).isPODType(Context);
+
+ if ((*this)->isIncompleteType())
+ return false;
+
+ if (Context.getLangOpts().ObjCAutoRefCount) {
+ switch (getObjCLifetime()) {
+ case Qualifiers::OCL_ExplicitNone:
+ return true;
+
+ case Qualifiers::OCL_Strong:
+ case Qualifiers::OCL_Weak:
+ case Qualifiers::OCL_Autoreleasing:
+ return false;
+
+ case Qualifiers::OCL_None:
+ break;
+ }
+ }
+
+ QualType CanonicalType = getTypePtr()->CanonicalType;
+ switch (CanonicalType->getTypeClass()) {
+ // Everything not explicitly mentioned is not POD.
+ default: return false;
+ case Type::VariableArray:
+ case Type::ConstantArray:
+ // IncompleteArray is handled above.
+ return Context.getBaseElementType(*this).isPODType(Context);
+
+ case Type::ObjCObjectPointer:
+ case Type::BlockPointer:
+ case Type::Builtin:
+ case Type::Complex:
+ case Type::Pointer:
+ case Type::MemberPointer:
+ case Type::Vector:
+ case Type::ExtVector:
+ return true;
+
+ case Type::Enum:
+ return true;
+
+ case Type::Record:
+ if (CXXRecordDecl *ClassDecl
+ = dyn_cast<CXXRecordDecl>(cast<RecordType>(CanonicalType)->getDecl()))
+ return ClassDecl->isPOD();
+
+ // C struct/union is POD.
+ return true;
+ }
+}
+
+bool QualType::isTrivialType(ASTContext &Context) const {
+ // The compiler shouldn't query this for incomplete types, but the user might.
+ // We return false for that case. Except for incomplete arrays of PODs, which
+ // are PODs according to the standard.
+ if (isNull())
+ return 0;
+
+ if ((*this)->isArrayType())
+ return Context.getBaseElementType(*this).isTrivialType(Context);
+
+ // Return false for incomplete types after skipping any incomplete array
+ // types which are expressly allowed by the standard and thus our API.
+ if ((*this)->isIncompleteType())
+ return false;
+
+ if (Context.getLangOpts().ObjCAutoRefCount) {
+ switch (getObjCLifetime()) {
+ case Qualifiers::OCL_ExplicitNone:
+ return true;
+
+ case Qualifiers::OCL_Strong:
+ case Qualifiers::OCL_Weak:
+ case Qualifiers::OCL_Autoreleasing:
+ return false;
+
+ case Qualifiers::OCL_None:
+ if ((*this)->isObjCLifetimeType())
+ return false;
+ break;
+ }
+ }
+
+ QualType CanonicalType = getTypePtr()->CanonicalType;
+ if (CanonicalType->isDependentType())
+ return false;
+
+ // C++0x [basic.types]p9:
+ // Scalar types, trivial class types, arrays of such types, and
+ // cv-qualified versions of these types are collectively called trivial
+ // types.
+
+ // As an extension, Clang treats vector types as Scalar types.
+ if (CanonicalType->isScalarType() || CanonicalType->isVectorType())
+ return true;
+ if (const RecordType *RT = CanonicalType->getAs<RecordType>()) {
+ if (const CXXRecordDecl *ClassDecl =
+ dyn_cast<CXXRecordDecl>(RT->getDecl())) {
+ // C++0x [class]p5:
+ // A trivial class is a class that has a trivial default constructor
+ if (!ClassDecl->hasTrivialDefaultConstructor()) return false;
+ // and is trivially copyable.
+ if (!ClassDecl->isTriviallyCopyable()) return false;
+ }
+
+ return true;
+ }
+
+ // No other types can match.
+ return false;
+}
+
+bool QualType::isTriviallyCopyableType(ASTContext &Context) const {
+ if ((*this)->isArrayType())
+ return Context.getBaseElementType(*this).isTrivialType(Context);
+
+ if (Context.getLangOpts().ObjCAutoRefCount) {
+ switch (getObjCLifetime()) {
+ case Qualifiers::OCL_ExplicitNone:
+ return true;
+
+ case Qualifiers::OCL_Strong:
+ case Qualifiers::OCL_Weak:
+ case Qualifiers::OCL_Autoreleasing:
+ return false;
+
+ case Qualifiers::OCL_None:
+ if ((*this)->isObjCLifetimeType())
+ return false;
+ break;
+ }
+ }
+
+ // C++0x [basic.types]p9
+ // Scalar types, trivially copyable class types, arrays of such types, and
+ // cv-qualified versions of these types are collectively called trivial
+ // types.
+
+ QualType CanonicalType = getCanonicalType();
+ if (CanonicalType->isDependentType())
+ return false;
+
+ // Return false for incomplete types after skipping any incomplete array types
+ // which are expressly allowed by the standard and thus our API.
+ if (CanonicalType->isIncompleteType())
+ return false;
+
+ // As an extension, Clang treats vector types as Scalar types.
+ if (CanonicalType->isScalarType() || CanonicalType->isVectorType())
+ return true;
+
+ if (const RecordType *RT = CanonicalType->getAs<RecordType>()) {
+ if (const CXXRecordDecl *ClassDecl =
+ dyn_cast<CXXRecordDecl>(RT->getDecl())) {
+ if (!ClassDecl->isTriviallyCopyable()) return false;
+ }
+
+ return true;
+ }
+
+ // No other types can match.
+ return false;
+}
+
+
+
+bool Type::isLiteralType() const {
+ if (isDependentType())
+ return false;
+
+ // C++0x [basic.types]p10:
+ // A type is a literal type if it is:
+ // [...]
+ // -- an array of literal type.
+ // Extension: variable arrays cannot be literal types, since they're
+ // runtime-sized.
+ if (isVariableArrayType())
+ return false;
+ const Type *BaseTy = getBaseElementTypeUnsafe();
+ assert(BaseTy && "NULL element type");
+
+ // Return false for incomplete types after skipping any incomplete array
+ // types; those are expressly allowed by the standard and thus our API.
+ if (BaseTy->isIncompleteType())
+ return false;
+
+ // C++0x [basic.types]p10:
+ // A type is a literal type if it is:
+ // -- a scalar type; or
+ // As an extension, Clang treats vector types and complex types as
+ // literal types.
+ if (BaseTy->isScalarType() || BaseTy->isVectorType() ||
+ BaseTy->isAnyComplexType())
+ return true;
+ // -- a reference type; or
+ if (BaseTy->isReferenceType())
+ return true;
+ // -- a class type that has all of the following properties:
+ if (const RecordType *RT = BaseTy->getAs<RecordType>()) {
+ // -- a trivial destructor,
+ // -- every constructor call and full-expression in the
+ // brace-or-equal-initializers for non-static data members (if any)
+ // is a constant expression,
+ // -- it is an aggregate type or has at least one constexpr
+ // constructor or constructor template that is not a copy or move
+ // constructor, and
+ // -- all non-static data members and base classes of literal types
+ //
+ // We resolve DR1361 by ignoring the second bullet.
+ if (const CXXRecordDecl *ClassDecl =
+ dyn_cast<CXXRecordDecl>(RT->getDecl()))
+ return ClassDecl->isLiteral();
+
+ return true;
+ }
+
+ return false;
+}
+
+bool Type::isStandardLayoutType() const {
+ if (isDependentType())
+ return false;
+
+ // C++0x [basic.types]p9:
+ // Scalar types, standard-layout class types, arrays of such types, and
+ // cv-qualified versions of these types are collectively called
+ // standard-layout types.
+ const Type *BaseTy = getBaseElementTypeUnsafe();
+ assert(BaseTy && "NULL element type");
+
+ // Return false for incomplete types after skipping any incomplete array
+ // types which are expressly allowed by the standard and thus our API.
+ if (BaseTy->isIncompleteType())
+ return false;
+
+ // As an extension, Clang treats vector types as Scalar types.
+ if (BaseTy->isScalarType() || BaseTy->isVectorType()) return true;
+ if (const RecordType *RT = BaseTy->getAs<RecordType>()) {
+ if (const CXXRecordDecl *ClassDecl =
+ dyn_cast<CXXRecordDecl>(RT->getDecl()))
+ if (!ClassDecl->isStandardLayout())
+ return false;
+
+ // Default to 'true' for non-C++ class types.
+ // FIXME: This is a bit dubious, but plain C structs should trivially meet
+ // all the requirements of standard layout classes.
+ return true;
+ }
+
+ // No other types can match.
+ return false;
+}
+
+// This is effectively the intersection of isTrivialType and
+// isStandardLayoutType. We implement it directly to avoid redundant
+// conversions from a type to a CXXRecordDecl.
+bool QualType::isCXX11PODType(ASTContext &Context) const {
+ const Type *ty = getTypePtr();
+ if (ty->isDependentType())
+ return false;
+
+ if (Context.getLangOpts().ObjCAutoRefCount) {
+ switch (getObjCLifetime()) {
+ case Qualifiers::OCL_ExplicitNone:
+ return true;
+
+ case Qualifiers::OCL_Strong:
+ case Qualifiers::OCL_Weak:
+ case Qualifiers::OCL_Autoreleasing:
+ return false;
+
+ case Qualifiers::OCL_None:
+ if (ty->isObjCLifetimeType())
+ return false;
+ break;
+ }
+ }
+
+ // C++11 [basic.types]p9:
+ // Scalar types, POD classes, arrays of such types, and cv-qualified
+ // versions of these types are collectively called trivial types.
+ const Type *BaseTy = ty->getBaseElementTypeUnsafe();
+ assert(BaseTy && "NULL element type");
+
+ // Return false for incomplete types after skipping any incomplete array
+ // types which are expressly allowed by the standard and thus our API.
+ if (BaseTy->isIncompleteType())
+ return false;
+
+ // As an extension, Clang treats vector types as Scalar types.
+ if (BaseTy->isScalarType() || BaseTy->isVectorType()) return true;
+ if (const RecordType *RT = BaseTy->getAs<RecordType>()) {
+ if (const CXXRecordDecl *ClassDecl =
+ dyn_cast<CXXRecordDecl>(RT->getDecl())) {
+ // C++11 [class]p10:
+ // A POD struct is a non-union class that is both a trivial class [...]
+ if (!ClassDecl->isTrivial()) return false;
+
+ // C++11 [class]p10:
+ // A POD struct is a non-union class that is both a trivial class and
+ // a standard-layout class [...]
+ if (!ClassDecl->isStandardLayout()) return false;
+
+ // C++11 [class]p10:
+ // A POD struct is a non-union class that is both a trivial class and
+ // a standard-layout class, and has no non-static data members of type
+ // non-POD struct, non-POD union (or array of such types). [...]
+ //
+ // We don't directly query the recursive aspect as the requiremets for
+ // both standard-layout classes and trivial classes apply recursively
+ // already.
+ }
+
+ return true;
+ }
+
+ // No other types can match.
+ return false;
+}
+
+bool Type::isPromotableIntegerType() const {
+ if (const BuiltinType *BT = getAs<BuiltinType>())
+ switch (BT->getKind()) {
+ case BuiltinType::Bool:
+ case BuiltinType::Char_S:
+ case BuiltinType::Char_U:
+ case BuiltinType::SChar:
+ case BuiltinType::UChar:
+ case BuiltinType::Short:
+ case BuiltinType::UShort:
+ case BuiltinType::WChar_S:
+ case BuiltinType::WChar_U:
+ case BuiltinType::Char16:
+ case BuiltinType::Char32:
+ return true;
+ default:
+ return false;
+ }
+
+ // Enumerated types are promotable to their compatible integer types
+ // (C99 6.3.1.1) a.k.a. its underlying type (C++ [conv.prom]p2).
+ if (const EnumType *ET = getAs<EnumType>()){
+ if (this->isDependentType() || ET->getDecl()->getPromotionType().isNull()
+ || ET->getDecl()->isScoped())
+ return false;
+
+ return true;
+ }
+
+ return false;
+}
+
+bool Type::isSpecifierType() const {
+ // Note that this intentionally does not use the canonical type.
+ switch (getTypeClass()) {
+ case Builtin:
+ case Record:
+ case Enum:
+ case Typedef:
+ case Complex:
+ case TypeOfExpr:
+ case TypeOf:
+ case TemplateTypeParm:
+ case SubstTemplateTypeParm:
+ case TemplateSpecialization:
+ case Elaborated:
+ case DependentName:
+ case DependentTemplateSpecialization:
+ case ObjCInterface:
+ case ObjCObject:
+ case ObjCObjectPointer: // FIXME: object pointers aren't really specifiers
+ return true;
+ default:
+ return false;
+ }
+}
+
+ElaboratedTypeKeyword
+TypeWithKeyword::getKeywordForTypeSpec(unsigned TypeSpec) {
+ switch (TypeSpec) {
+ default: return ETK_None;
+ case TST_typename: return ETK_Typename;
+ case TST_class: return ETK_Class;
+ case TST_struct: return ETK_Struct;
+ case TST_union: return ETK_Union;
+ case TST_enum: return ETK_Enum;
+ }
+}
+
+TagTypeKind
+TypeWithKeyword::getTagTypeKindForTypeSpec(unsigned TypeSpec) {
+ switch(TypeSpec) {
+ case TST_class: return TTK_Class;
+ case TST_struct: return TTK_Struct;
+ case TST_union: return TTK_Union;
+ case TST_enum: return TTK_Enum;
+ }
+
+ llvm_unreachable("Type specifier is not a tag type kind.");
+}
+
+ElaboratedTypeKeyword
+TypeWithKeyword::getKeywordForTagTypeKind(TagTypeKind Kind) {
+ switch (Kind) {
+ case TTK_Class: return ETK_Class;
+ case TTK_Struct: return ETK_Struct;
+ case TTK_Union: return ETK_Union;
+ case TTK_Enum: return ETK_Enum;
+ }
+ llvm_unreachable("Unknown tag type kind.");
+}
+
+TagTypeKind
+TypeWithKeyword::getTagTypeKindForKeyword(ElaboratedTypeKeyword Keyword) {
+ switch (Keyword) {
+ case ETK_Class: return TTK_Class;
+ case ETK_Struct: return TTK_Struct;
+ case ETK_Union: return TTK_Union;
+ case ETK_Enum: return TTK_Enum;
+ case ETK_None: // Fall through.
+ case ETK_Typename:
+ llvm_unreachable("Elaborated type keyword is not a tag type kind.");
+ }
+ llvm_unreachable("Unknown elaborated type keyword.");
+}
+
+bool
+TypeWithKeyword::KeywordIsTagTypeKind(ElaboratedTypeKeyword Keyword) {
+ switch (Keyword) {
+ case ETK_None:
+ case ETK_Typename:
+ return false;
+ case ETK_Class:
+ case ETK_Struct:
+ case ETK_Union:
+ case ETK_Enum:
+ return true;
+ }
+ llvm_unreachable("Unknown elaborated type keyword.");
+}
+
+const char*
+TypeWithKeyword::getKeywordName(ElaboratedTypeKeyword Keyword) {
+ switch (Keyword) {
+ case ETK_None: return "";
+ case ETK_Typename: return "typename";
+ case ETK_Class: return "class";
+ case ETK_Struct: return "struct";
+ case ETK_Union: return "union";
+ case ETK_Enum: return "enum";
+ }
+
+ llvm_unreachable("Unknown elaborated type keyword.");
+}
+
+DependentTemplateSpecializationType::DependentTemplateSpecializationType(
+ ElaboratedTypeKeyword Keyword,
+ NestedNameSpecifier *NNS, const IdentifierInfo *Name,
+ unsigned NumArgs, const TemplateArgument *Args,
+ QualType Canon)
+ : TypeWithKeyword(Keyword, DependentTemplateSpecialization, Canon, true, true,
+ /*VariablyModified=*/false,
+ NNS && NNS->containsUnexpandedParameterPack()),
+ NNS(NNS), Name(Name), NumArgs(NumArgs) {
+ assert((!NNS || NNS->isDependent()) &&
+ "DependentTemplateSpecializatonType requires dependent qualifier");
+ for (unsigned I = 0; I != NumArgs; ++I) {
+ if (Args[I].containsUnexpandedParameterPack())
+ setContainsUnexpandedParameterPack();
+
+ new (&getArgBuffer()[I]) TemplateArgument(Args[I]);
+ }
+}
+
+void
+DependentTemplateSpecializationType::Profile(llvm::FoldingSetNodeID &ID,
+ const ASTContext &Context,
+ ElaboratedTypeKeyword Keyword,
+ NestedNameSpecifier *Qualifier,
+ const IdentifierInfo *Name,
+ unsigned NumArgs,
+ const TemplateArgument *Args) {
+ ID.AddInteger(Keyword);
+ ID.AddPointer(Qualifier);
+ ID.AddPointer(Name);
+ for (unsigned Idx = 0; Idx < NumArgs; ++Idx)
+ Args[Idx].Profile(ID, Context);
+}
+
+bool Type::isElaboratedTypeSpecifier() const {
+ ElaboratedTypeKeyword Keyword;
+ if (const ElaboratedType *Elab = dyn_cast<ElaboratedType>(this))
+ Keyword = Elab->getKeyword();
+ else if (const DependentNameType *DepName = dyn_cast<DependentNameType>(this))
+ Keyword = DepName->getKeyword();
+ else if (const DependentTemplateSpecializationType *DepTST =
+ dyn_cast<DependentTemplateSpecializationType>(this))
+ Keyword = DepTST->getKeyword();
+ else
+ return false;
+
+ return TypeWithKeyword::KeywordIsTagTypeKind(Keyword);
+}
+
+const char *Type::getTypeClassName() const {
+ switch (TypeBits.TC) {
+#define ABSTRACT_TYPE(Derived, Base)
+#define TYPE(Derived, Base) case Derived: return #Derived;
+#include "clang/AST/TypeNodes.def"
+ }
+
+ llvm_unreachable("Invalid type class.");
+}
+
+const char *BuiltinType::getName(const PrintingPolicy &Policy) const {
+ switch (getKind()) {
+ case Void: return "void";
+ case Bool: return Policy.Bool ? "bool" : "_Bool";
+ case Char_S: return "char";
+ case Char_U: return "char";
+ case SChar: return "signed char";
+ case Short: return "short";
+ case Int: return "int";
+ case Long: return "long";
+ case LongLong: return "long long";
+ case Int128: return "__int128";
+ case UChar: return "unsigned char";
+ case UShort: return "unsigned short";
+ case UInt: return "unsigned int";
+ case ULong: return "unsigned long";
+ case ULongLong: return "unsigned long long";
+ case UInt128: return "unsigned __int128";
+ case Half: return "half";
+ case Float: return "float";
+ case Double: return "double";
+ case LongDouble: return "long double";
+ case WChar_S:
+ case WChar_U: return "wchar_t";
+ case Char16: return "char16_t";
+ case Char32: return "char32_t";
+ case NullPtr: return "nullptr_t";
+ case Overload: return "<overloaded function type>";
+ case BoundMember: return "<bound member function type>";
+ case PseudoObject: return "<pseudo-object type>";
+ case Dependent: return "<dependent type>";
+ case UnknownAny: return "<unknown type>";
+ case ARCUnbridgedCast: return "<ARC unbridged cast type>";
+ case ObjCId: return "id";
+ case ObjCClass: return "Class";
+ case ObjCSel: return "SEL";
+ }
+
+ llvm_unreachable("Invalid builtin type.");
+}
+
+QualType QualType::getNonLValueExprType(ASTContext &Context) const {
+ if (const ReferenceType *RefType = getTypePtr()->getAs<ReferenceType>())
+ return RefType->getPointeeType();
+
+ // C++0x [basic.lval]:
+ // Class prvalues can have cv-qualified types; non-class prvalues always
+ // have cv-unqualified types.
+ //
+ // See also C99 6.3.2.1p2.
+ if (!Context.getLangOpts().CPlusPlus ||
+ (!getTypePtr()->isDependentType() && !getTypePtr()->isRecordType()))
+ return getUnqualifiedType();
+
+ return *this;
+}
+
+StringRef FunctionType::getNameForCallConv(CallingConv CC) {
+ switch (CC) {
+ case CC_Default:
+ llvm_unreachable("no name for default cc");
+
+ case CC_C: return "cdecl";
+ case CC_X86StdCall: return "stdcall";
+ case CC_X86FastCall: return "fastcall";
+ case CC_X86ThisCall: return "thiscall";
+ case CC_X86Pascal: return "pascal";
+ case CC_AAPCS: return "aapcs";
+ case CC_AAPCS_VFP: return "aapcs-vfp";
+ }
+
+ llvm_unreachable("Invalid calling convention.");
+}
+
+FunctionProtoType::FunctionProtoType(QualType result, const QualType *args,
+ unsigned numArgs, QualType canonical,
+ const ExtProtoInfo &epi)
+ : FunctionType(FunctionProto, result, epi.TypeQuals, epi.RefQualifier,
+ canonical,
+ result->isDependentType(),
+ result->isInstantiationDependentType(),
+ result->isVariablyModifiedType(),
+ result->containsUnexpandedParameterPack(),
+ epi.ExtInfo),
+ NumArgs(numArgs), NumExceptions(epi.NumExceptions),
+ ExceptionSpecType(epi.ExceptionSpecType),
+ HasAnyConsumedArgs(epi.ConsumedArguments != 0),
+ Variadic(epi.Variadic), HasTrailingReturn(epi.HasTrailingReturn)
+{
+ // Fill in the trailing argument array.
+ QualType *argSlot = reinterpret_cast<QualType*>(this+1);
+ for (unsigned i = 0; i != numArgs; ++i) {
+ if (args[i]->isDependentType())
+ setDependent();
+ else if (args[i]->isInstantiationDependentType())
+ setInstantiationDependent();
+
+ if (args[i]->containsUnexpandedParameterPack())
+ setContainsUnexpandedParameterPack();
+
+ argSlot[i] = args[i];
+ }
+
+ if (getExceptionSpecType() == EST_Dynamic) {
+ // Fill in the exception array.
+ QualType *exnSlot = argSlot + numArgs;
+ for (unsigned i = 0, e = epi.NumExceptions; i != e; ++i) {
+ if (epi.Exceptions[i]->isDependentType())
+ setDependent();
+ else if (epi.Exceptions[i]->isInstantiationDependentType())
+ setInstantiationDependent();
+
+ if (epi.Exceptions[i]->containsUnexpandedParameterPack())
+ setContainsUnexpandedParameterPack();
+
+ exnSlot[i] = epi.Exceptions[i];
+ }
+ } else if (getExceptionSpecType() == EST_ComputedNoexcept) {
+ // Store the noexcept expression and context.
+ Expr **noexSlot = reinterpret_cast<Expr**>(argSlot + numArgs);
+ *noexSlot = epi.NoexceptExpr;
+
+ if (epi.NoexceptExpr) {
+ if (epi.NoexceptExpr->isValueDependent()
+ || epi.NoexceptExpr->isTypeDependent())
+ setDependent();
+ else if (epi.NoexceptExpr->isInstantiationDependent())
+ setInstantiationDependent();
+ }
+ } else if (getExceptionSpecType() == EST_Uninstantiated) {
+ // Store the function decl from which we will resolve our
+ // exception specification.
+ FunctionDecl **slot = reinterpret_cast<FunctionDecl**>(argSlot + numArgs);
+ slot[0] = epi.ExceptionSpecDecl;
+ slot[1] = epi.ExceptionSpecTemplate;
+ // This exception specification doesn't make the type dependent, because
+ // it's not instantiated as part of instantiating the type.
+ }
+
+ if (epi.ConsumedArguments) {
+ bool *consumedArgs = const_cast<bool*>(getConsumedArgsBuffer());
+ for (unsigned i = 0; i != numArgs; ++i)
+ consumedArgs[i] = epi.ConsumedArguments[i];
+ }
+}
+
+FunctionProtoType::NoexceptResult
+FunctionProtoType::getNoexceptSpec(ASTContext &ctx) const {
+ ExceptionSpecificationType est = getExceptionSpecType();
+ if (est == EST_BasicNoexcept)
+ return NR_Nothrow;
+
+ if (est != EST_ComputedNoexcept)
+ return NR_NoNoexcept;
+
+ Expr *noexceptExpr = getNoexceptExpr();
+ if (!noexceptExpr)
+ return NR_BadNoexcept;
+ if (noexceptExpr->isValueDependent())
+ return NR_Dependent;
+
+ llvm::APSInt value;
+ bool isICE = noexceptExpr->isIntegerConstantExpr(value, ctx, 0,
+ /*evaluated*/false);
+ (void)isICE;
+ assert(isICE && "AST should not contain bad noexcept expressions.");
+
+ return value.getBoolValue() ? NR_Nothrow : NR_Throw;
+}
+
+bool FunctionProtoType::isTemplateVariadic() const {
+ for (unsigned ArgIdx = getNumArgs(); ArgIdx; --ArgIdx)
+ if (isa<PackExpansionType>(getArgType(ArgIdx - 1)))
+ return true;
+
+ return false;
+}
+
+void FunctionProtoType::Profile(llvm::FoldingSetNodeID &ID, QualType Result,
+ const QualType *ArgTys, unsigned NumArgs,
+ const ExtProtoInfo &epi,
+ const ASTContext &Context) {
+
+ // We have to be careful not to get ambiguous profile encodings.
+ // Note that valid type pointers are never ambiguous with anything else.
+ //
+ // The encoding grammar begins:
+ // type type* bool int bool
+ // If that final bool is true, then there is a section for the EH spec:
+ // bool type*
+ // This is followed by an optional "consumed argument" section of the
+ // same length as the first type sequence:
+ // bool*
+ // Finally, we have the ext info and trailing return type flag:
+ // int bool
+ //
+ // There is no ambiguity between the consumed arguments and an empty EH
+ // spec because of the leading 'bool' which unambiguously indicates
+ // whether the following bool is the EH spec or part of the arguments.
+
+ ID.AddPointer(Result.getAsOpaquePtr());
+ for (unsigned i = 0; i != NumArgs; ++i)
+ ID.AddPointer(ArgTys[i].getAsOpaquePtr());
+ // This method is relatively performance sensitive, so as a performance
+ // shortcut, use one AddInteger call instead of four for the next four
+ // fields.
+ assert(!(unsigned(epi.Variadic) & ~1) &&
+ !(unsigned(epi.TypeQuals) & ~255) &&
+ !(unsigned(epi.RefQualifier) & ~3) &&
+ !(unsigned(epi.ExceptionSpecType) & ~7) &&
+ "Values larger than expected.");
+ ID.AddInteger(unsigned(epi.Variadic) +
+ (epi.TypeQuals << 1) +
+ (epi.RefQualifier << 9) +
+ (epi.ExceptionSpecType << 11));
+ if (epi.ExceptionSpecType == EST_Dynamic) {
+ for (unsigned i = 0; i != epi.NumExceptions; ++i)
+ ID.AddPointer(epi.Exceptions[i].getAsOpaquePtr());
+ } else if (epi.ExceptionSpecType == EST_ComputedNoexcept && epi.NoexceptExpr){
+ epi.NoexceptExpr->Profile(ID, Context, false);
+ } else if (epi.ExceptionSpecType == EST_Uninstantiated) {
+ ID.AddPointer(epi.ExceptionSpecDecl->getCanonicalDecl());
+ }
+ if (epi.ConsumedArguments) {
+ for (unsigned i = 0; i != NumArgs; ++i)
+ ID.AddBoolean(epi.ConsumedArguments[i]);
+ }
+ epi.ExtInfo.Profile(ID);
+ ID.AddBoolean(epi.HasTrailingReturn);
+}
+
+void FunctionProtoType::Profile(llvm::FoldingSetNodeID &ID,
+ const ASTContext &Ctx) {
+ Profile(ID, getResultType(), arg_type_begin(), NumArgs, getExtProtoInfo(),
+ Ctx);
+}
+
+QualType TypedefType::desugar() const {
+ return getDecl()->getUnderlyingType();
+}
+
+TypeOfExprType::TypeOfExprType(Expr *E, QualType can)
+ : Type(TypeOfExpr, can, E->isTypeDependent(),
+ E->isInstantiationDependent(),
+ E->getType()->isVariablyModifiedType(),
+ E->containsUnexpandedParameterPack()),
+ TOExpr(E) {
+}
+
+bool TypeOfExprType::isSugared() const {
+ return !TOExpr->isTypeDependent();
+}
+
+QualType TypeOfExprType::desugar() const {
+ if (isSugared())
+ return getUnderlyingExpr()->getType();
+
+ return QualType(this, 0);
+}
+
+void DependentTypeOfExprType::Profile(llvm::FoldingSetNodeID &ID,
+ const ASTContext &Context, Expr *E) {
+ E->Profile(ID, Context, true);
+}
+
+DecltypeType::DecltypeType(Expr *E, QualType underlyingType, QualType can)
+ // C++11 [temp.type]p2: "If an expression e involves a template parameter,
+ // decltype(e) denotes a unique dependent type." Hence a decltype type is
+ // type-dependent even if its expression is only instantiation-dependent.
+ : Type(Decltype, can, E->isInstantiationDependent(),
+ E->isInstantiationDependent(),
+ E->getType()->isVariablyModifiedType(),
+ E->containsUnexpandedParameterPack()),
+ E(E),
+ UnderlyingType(underlyingType) {
+}
+
+bool DecltypeType::isSugared() const { return !E->isInstantiationDependent(); }
+
+QualType DecltypeType::desugar() const {
+ if (isSugared())
+ return getUnderlyingType();
+
+ return QualType(this, 0);
+}
+
+DependentDecltypeType::DependentDecltypeType(const ASTContext &Context, Expr *E)
+ : DecltypeType(E, Context.DependentTy), Context(Context) { }
+
+void DependentDecltypeType::Profile(llvm::FoldingSetNodeID &ID,
+ const ASTContext &Context, Expr *E) {
+ E->Profile(ID, Context, true);
+}
+
+TagType::TagType(TypeClass TC, const TagDecl *D, QualType can)
+ : Type(TC, can, D->isDependentType(),
+ /*InstantiationDependent=*/D->isDependentType(),
+ /*VariablyModified=*/false,
+ /*ContainsUnexpandedParameterPack=*/false),
+ decl(const_cast<TagDecl*>(D)) {}
+
+static TagDecl *getInterestingTagDecl(TagDecl *decl) {
+ for (TagDecl::redecl_iterator I = decl->redecls_begin(),
+ E = decl->redecls_end();
+ I != E; ++I) {
+ if (I->isCompleteDefinition() || I->isBeingDefined())
+ return *I;
+ }
+ // If there's no definition (not even in progress), return what we have.
+ return decl;
+}
+
+UnaryTransformType::UnaryTransformType(QualType BaseType,
+ QualType UnderlyingType,
+ UTTKind UKind,
+ QualType CanonicalType)
+ : Type(UnaryTransform, CanonicalType, UnderlyingType->isDependentType(),
+ UnderlyingType->isInstantiationDependentType(),
+ UnderlyingType->isVariablyModifiedType(),
+ BaseType->containsUnexpandedParameterPack())
+ , BaseType(BaseType), UnderlyingType(UnderlyingType), UKind(UKind)
+{}
+
+TagDecl *TagType::getDecl() const {
+ return getInterestingTagDecl(decl);
+}
+
+bool TagType::isBeingDefined() const {
+ return getDecl()->isBeingDefined();
+}
+
+CXXRecordDecl *InjectedClassNameType::getDecl() const {
+ return cast<CXXRecordDecl>(getInterestingTagDecl(Decl));
+}
+
+IdentifierInfo *TemplateTypeParmType::getIdentifier() const {
+ return isCanonicalUnqualified() ? 0 : getDecl()->getIdentifier();
+}
+
+SubstTemplateTypeParmPackType::
+SubstTemplateTypeParmPackType(const TemplateTypeParmType *Param,
+ QualType Canon,
+ const TemplateArgument &ArgPack)
+ : Type(SubstTemplateTypeParmPack, Canon, true, true, false, true),
+ Replaced(Param),
+ Arguments(ArgPack.pack_begin()), NumArguments(ArgPack.pack_size())
+{
+}
+
+TemplateArgument SubstTemplateTypeParmPackType::getArgumentPack() const {
+ return TemplateArgument(Arguments, NumArguments);
+}
+
+void SubstTemplateTypeParmPackType::Profile(llvm::FoldingSetNodeID &ID) {
+ Profile(ID, getReplacedParameter(), getArgumentPack());
+}
+
+void SubstTemplateTypeParmPackType::Profile(llvm::FoldingSetNodeID &ID,
+ const TemplateTypeParmType *Replaced,
+ const TemplateArgument &ArgPack) {
+ ID.AddPointer(Replaced);
+ ID.AddInteger(ArgPack.pack_size());
+ for (TemplateArgument::pack_iterator P = ArgPack.pack_begin(),
+ PEnd = ArgPack.pack_end();
+ P != PEnd; ++P)
+ ID.AddPointer(P->getAsType().getAsOpaquePtr());
+}
+
+bool TemplateSpecializationType::
+anyDependentTemplateArguments(const TemplateArgumentListInfo &Args,
+ bool &InstantiationDependent) {
+ return anyDependentTemplateArguments(Args.getArgumentArray(), Args.size(),
+ InstantiationDependent);
+}
+
+bool TemplateSpecializationType::
+anyDependentTemplateArguments(const TemplateArgumentLoc *Args, unsigned N,
+ bool &InstantiationDependent) {
+ for (unsigned i = 0; i != N; ++i) {
+ if (Args[i].getArgument().isDependent()) {
+ InstantiationDependent = true;
+ return true;
+ }
+
+ if (Args[i].getArgument().isInstantiationDependent())
+ InstantiationDependent = true;
+ }
+ return false;
+}
+
+bool TemplateSpecializationType::
+anyDependentTemplateArguments(const TemplateArgument *Args, unsigned N,
+ bool &InstantiationDependent) {
+ for (unsigned i = 0; i != N; ++i) {
+ if (Args[i].isDependent()) {
+ InstantiationDependent = true;
+ return true;
+ }
+
+ if (Args[i].isInstantiationDependent())
+ InstantiationDependent = true;
+ }
+ return false;
+}
+
+TemplateSpecializationType::
+TemplateSpecializationType(TemplateName T,
+ const TemplateArgument *Args, unsigned NumArgs,
+ QualType Canon, QualType AliasedType)
+ : Type(TemplateSpecialization,
+ Canon.isNull()? QualType(this, 0) : Canon,
+ Canon.isNull()? T.isDependent() : Canon->isDependentType(),
+ Canon.isNull()? T.isDependent()
+ : Canon->isInstantiationDependentType(),
+ false,
+ Canon.isNull()? T.containsUnexpandedParameterPack()
+ : Canon->containsUnexpandedParameterPack()),
+ Template(T), NumArgs(NumArgs), TypeAlias(!AliasedType.isNull()) {
+ assert(!T.getAsDependentTemplateName() &&
+ "Use DependentTemplateSpecializationType for dependent template-name");
+ assert((T.getKind() == TemplateName::Template ||
+ T.getKind() == TemplateName::SubstTemplateTemplateParm ||
+ T.getKind() == TemplateName::SubstTemplateTemplateParmPack) &&
+ "Unexpected template name for TemplateSpecializationType");
+ bool InstantiationDependent;
+ (void)InstantiationDependent;
+ assert((!Canon.isNull() ||
+ T.isDependent() ||
+ anyDependentTemplateArguments(Args, NumArgs,
+ InstantiationDependent)) &&
+ "No canonical type for non-dependent class template specialization");
+
+ TemplateArgument *TemplateArgs
+ = reinterpret_cast<TemplateArgument *>(this + 1);
+ for (unsigned Arg = 0; Arg < NumArgs; ++Arg) {
+ // Update dependent and variably-modified bits.
+ // If the canonical type exists and is non-dependent, the template
+ // specialization type can be non-dependent even if one of the type
+ // arguments is. Given:
+ // template<typename T> using U = int;
+ // U<T> is always non-dependent, irrespective of the type T.
+ if (Canon.isNull() && Args[Arg].isDependent())
+ setDependent();
+ else if (Args[Arg].isInstantiationDependent())
+ setInstantiationDependent();
+
+ if (Args[Arg].getKind() == TemplateArgument::Type &&
+ Args[Arg].getAsType()->isVariablyModifiedType())
+ setVariablyModified();
+ if (Canon.isNull() && Args[Arg].containsUnexpandedParameterPack())
+ setContainsUnexpandedParameterPack();
+
+ new (&TemplateArgs[Arg]) TemplateArgument(Args[Arg]);
+ }
+
+ // Store the aliased type if this is a type alias template specialization.
+ if (TypeAlias) {
+ TemplateArgument *Begin = reinterpret_cast<TemplateArgument *>(this + 1);
+ *reinterpret_cast<QualType*>(Begin + getNumArgs()) = AliasedType;
+ }
+}
+
+void
+TemplateSpecializationType::Profile(llvm::FoldingSetNodeID &ID,
+ TemplateName T,
+ const TemplateArgument *Args,
+ unsigned NumArgs,
+ const ASTContext &Context) {
+ T.Profile(ID);
+ for (unsigned Idx = 0; Idx < NumArgs; ++Idx)
+ Args[Idx].Profile(ID, Context);
+}
+
+QualType
+QualifierCollector::apply(const ASTContext &Context, QualType QT) const {
+ if (!hasNonFastQualifiers())
+ return QT.withFastQualifiers(getFastQualifiers());
+
+ return Context.getQualifiedType(QT, *this);
+}
+
+QualType
+QualifierCollector::apply(const ASTContext &Context, const Type *T) const {
+ if (!hasNonFastQualifiers())
+ return QualType(T, getFastQualifiers());
+
+ return Context.getQualifiedType(T, *this);
+}
+
+void ObjCObjectTypeImpl::Profile(llvm::FoldingSetNodeID &ID,
+ QualType BaseType,
+ ObjCProtocolDecl * const *Protocols,
+ unsigned NumProtocols) {
+ ID.AddPointer(BaseType.getAsOpaquePtr());
+ for (unsigned i = 0; i != NumProtocols; i++)
+ ID.AddPointer(Protocols[i]);
+}
+
+void ObjCObjectTypeImpl::Profile(llvm::FoldingSetNodeID &ID) {
+ Profile(ID, getBaseType(), qual_begin(), getNumProtocols());
+}
+
+namespace {
+
+/// \brief The cached properties of a type.
+class CachedProperties {
+ NamedDecl::LinkageInfo LV;
+ bool local;
+
+public:
+ CachedProperties(NamedDecl::LinkageInfo LV, bool local)
+ : LV(LV), local(local) {}
+
+ Linkage getLinkage() const { return LV.linkage(); }
+ Visibility getVisibility() const { return LV.visibility(); }
+ bool isVisibilityExplicit() const { return LV.visibilityExplicit(); }
+ bool hasLocalOrUnnamedType() const { return local; }
+
+ friend CachedProperties merge(CachedProperties L, CachedProperties R) {
+ NamedDecl::LinkageInfo MergedLV = L.LV;
+ MergedLV.merge(R.LV);
+ return CachedProperties(MergedLV,
+ L.hasLocalOrUnnamedType() | R.hasLocalOrUnnamedType());
+ }
+};
+}
+
+static CachedProperties computeCachedProperties(const Type *T);
+
+namespace clang {
+/// The type-property cache. This is templated so as to be
+/// instantiated at an internal type to prevent unnecessary symbol
+/// leakage.
+template <class Private> class TypePropertyCache {
+public:
+ static CachedProperties get(QualType T) {
+ return get(T.getTypePtr());
+ }
+
+ static CachedProperties get(const Type *T) {
+ ensure(T);
+ NamedDecl::LinkageInfo LV(T->TypeBits.getLinkage(),
+ T->TypeBits.getVisibility(),
+ T->TypeBits.isVisibilityExplicit());
+ return CachedProperties(LV, T->TypeBits.hasLocalOrUnnamedType());
+ }
+
+ static void ensure(const Type *T) {
+ // If the cache is valid, we're okay.
+ if (T->TypeBits.isCacheValid()) return;
+
+ // If this type is non-canonical, ask its canonical type for the
+ // relevant information.
+ if (!T->isCanonicalUnqualified()) {
+ const Type *CT = T->getCanonicalTypeInternal().getTypePtr();
+ ensure(CT);
+ T->TypeBits.CacheValidAndVisibility =
+ CT->TypeBits.CacheValidAndVisibility;
+ T->TypeBits.CachedExplicitVisibility =
+ CT->TypeBits.CachedExplicitVisibility;
+ T->TypeBits.CachedLinkage = CT->TypeBits.CachedLinkage;
+ T->TypeBits.CachedLocalOrUnnamed = CT->TypeBits.CachedLocalOrUnnamed;
+ return;
+ }
+
+ // Compute the cached properties and then set the cache.
+ CachedProperties Result = computeCachedProperties(T);
+ T->TypeBits.CacheValidAndVisibility = Result.getVisibility() + 1U;
+ T->TypeBits.CachedExplicitVisibility = Result.isVisibilityExplicit();
+ assert(T->TypeBits.isCacheValid() &&
+ T->TypeBits.getVisibility() == Result.getVisibility());
+ T->TypeBits.CachedLinkage = Result.getLinkage();
+ T->TypeBits.CachedLocalOrUnnamed = Result.hasLocalOrUnnamedType();
+ }
+};
+}
+
+// Instantiate the friend template at a private class. In a
+// reasonable implementation, these symbols will be internal.
+// It is terrible that this is the best way to accomplish this.
+namespace { class Private {}; }
+typedef TypePropertyCache<Private> Cache;
+
+static CachedProperties computeCachedProperties(const Type *T) {
+ switch (T->getTypeClass()) {
+#define TYPE(Class,Base)
+#define NON_CANONICAL_TYPE(Class,Base) case Type::Class:
+#include "clang/AST/TypeNodes.def"
+ llvm_unreachable("didn't expect a non-canonical type here");
+
+#define TYPE(Class,Base)
+#define DEPENDENT_TYPE(Class,Base) case Type::Class:
+#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class,Base) case Type::Class:
+#include "clang/AST/TypeNodes.def"
+ // Treat instantiation-dependent types as external.
+ assert(T->isInstantiationDependentType());
+ return CachedProperties(NamedDecl::LinkageInfo(), false);
+
+ case Type::Builtin:
+ // C++ [basic.link]p8:
+ // A type is said to have linkage if and only if:
+ // - it is a fundamental type (3.9.1); or
+ return CachedProperties(NamedDecl::LinkageInfo(), false);
+
+ case Type::Record:
+ case Type::Enum: {
+ const TagDecl *Tag = cast<TagType>(T)->getDecl();
+
+ // C++ [basic.link]p8:
+ // - it is a class or enumeration type that is named (or has a name
+ // for linkage purposes (7.1.3)) and the name has linkage; or
+ // - it is a specialization of a class template (14); or
+ NamedDecl::LinkageInfo LV = Tag->getLinkageAndVisibility();
+ bool IsLocalOrUnnamed =
+ Tag->getDeclContext()->isFunctionOrMethod() ||
+ (!Tag->getIdentifier() && !Tag->getTypedefNameForAnonDecl());
+ return CachedProperties(LV, IsLocalOrUnnamed);
+ }
+
+ // C++ [basic.link]p8:
+ // - it is a compound type (3.9.2) other than a class or enumeration,
+ // compounded exclusively from types that have linkage; or
+ case Type::Complex:
+ return Cache::get(cast<ComplexType>(T)->getElementType());
+ case Type::Pointer:
+ return Cache::get(cast<PointerType>(T)->getPointeeType());
+ case Type::BlockPointer:
+ return Cache::get(cast<BlockPointerType>(T)->getPointeeType());
+ case Type::LValueReference:
+ case Type::RValueReference:
+ return Cache::get(cast<ReferenceType>(T)->getPointeeType());
+ case Type::MemberPointer: {
+ const MemberPointerType *MPT = cast<MemberPointerType>(T);
+ return merge(Cache::get(MPT->getClass()),
+ Cache::get(MPT->getPointeeType()));
+ }
+ case Type::ConstantArray:
+ case Type::IncompleteArray:
+ case Type::VariableArray:
+ return Cache::get(cast<ArrayType>(T)->getElementType());
+ case Type::Vector:
+ case Type::ExtVector:
+ return Cache::get(cast<VectorType>(T)->getElementType());
+ case Type::FunctionNoProto:
+ return Cache::get(cast<FunctionType>(T)->getResultType());
+ case Type::FunctionProto: {
+ const FunctionProtoType *FPT = cast<FunctionProtoType>(T);
+ CachedProperties result = Cache::get(FPT->getResultType());
+ for (FunctionProtoType::arg_type_iterator ai = FPT->arg_type_begin(),
+ ae = FPT->arg_type_end(); ai != ae; ++ai)
+ result = merge(result, Cache::get(*ai));
+ return result;
+ }
+ case Type::ObjCInterface: {
+ NamedDecl::LinkageInfo LV =
+ cast<ObjCInterfaceType>(T)->getDecl()->getLinkageAndVisibility();
+ return CachedProperties(LV, false);
+ }
+ case Type::ObjCObject:
+ return Cache::get(cast<ObjCObjectType>(T)->getBaseType());
+ case Type::ObjCObjectPointer:
+ return Cache::get(cast<ObjCObjectPointerType>(T)->getPointeeType());
+ case Type::Atomic:
+ return Cache::get(cast<AtomicType>(T)->getValueType());
+ }
+
+ llvm_unreachable("unhandled type class");
+}
+
+/// \brief Determine the linkage of this type.
+Linkage Type::getLinkage() const {
+ Cache::ensure(this);
+ return TypeBits.getLinkage();
+}
+
+/// \brief Determine the linkage of this type.
+Visibility Type::getVisibility() const {
+ Cache::ensure(this);
+ return TypeBits.getVisibility();
+}
+
+bool Type::isVisibilityExplicit() const {
+ Cache::ensure(this);
+ return TypeBits.isVisibilityExplicit();
+}
+
+bool Type::hasUnnamedOrLocalType() const {
+ Cache::ensure(this);
+ return TypeBits.hasLocalOrUnnamedType();
+}
+
+std::pair<Linkage,Visibility> Type::getLinkageAndVisibility() const {
+ Cache::ensure(this);
+ return std::make_pair(TypeBits.getLinkage(), TypeBits.getVisibility());
+}
+
+void Type::ClearLinkageCache() {
+ TypeBits.CacheValidAndVisibility = 0;
+ if (QualType(this, 0) != CanonicalType)
+ CanonicalType->TypeBits.CacheValidAndVisibility = 0;
+}
+
+Qualifiers::ObjCLifetime Type::getObjCARCImplicitLifetime() const {
+ if (isObjCARCImplicitlyUnretainedType())
+ return Qualifiers::OCL_ExplicitNone;
+ return Qualifiers::OCL_Strong;
+}
+
+bool Type::isObjCARCImplicitlyUnretainedType() const {
+ assert(isObjCLifetimeType() &&
+ "cannot query implicit lifetime for non-inferrable type");
+
+ const Type *canon = getCanonicalTypeInternal().getTypePtr();
+
+ // Walk down to the base type. We don't care about qualifiers for this.
+ while (const ArrayType *array = dyn_cast<ArrayType>(canon))
+ canon = array->getElementType().getTypePtr();
+
+ if (const ObjCObjectPointerType *opt
+ = dyn_cast<ObjCObjectPointerType>(canon)) {
+ // Class and Class<Protocol> don't require retension.
+ if (opt->getObjectType()->isObjCClass())
+ return true;
+ }
+
+ return false;
+}
+
+bool Type::isObjCNSObjectType() const {
+ if (const TypedefType *typedefType = dyn_cast<TypedefType>(this))
+ return typedefType->getDecl()->hasAttr<ObjCNSObjectAttr>();
+ return false;
+}
+bool Type::isObjCRetainableType() const {
+ return isObjCObjectPointerType() ||
+ isBlockPointerType() ||
+ isObjCNSObjectType();
+}
+bool Type::isObjCIndirectLifetimeType() const {
+ if (isObjCLifetimeType())
+ return true;
+ if (const PointerType *OPT = getAs<PointerType>())
+ return OPT->getPointeeType()->isObjCIndirectLifetimeType();
+ if (const ReferenceType *Ref = getAs<ReferenceType>())
+ return Ref->getPointeeType()->isObjCIndirectLifetimeType();
+ if (const MemberPointerType *MemPtr = getAs<MemberPointerType>())
+ return MemPtr->getPointeeType()->isObjCIndirectLifetimeType();
+ return false;
+}
+
+/// Returns true if objects of this type have lifetime semantics under
+/// ARC.
+bool Type::isObjCLifetimeType() const {
+ const Type *type = this;
+ while (const ArrayType *array = type->getAsArrayTypeUnsafe())
+ type = array->getElementType().getTypePtr();
+ return type->isObjCRetainableType();
+}
+
+/// \brief Determine whether the given type T is a "bridgable" Objective-C type,
+/// which is either an Objective-C object pointer type or an
+bool Type::isObjCARCBridgableType() const {
+ return isObjCObjectPointerType() || isBlockPointerType();
+}
+
+/// \brief Determine whether the given type T is a "bridgeable" C type.
+bool Type::isCARCBridgableType() const {
+ const PointerType *Pointer = getAs<PointerType>();
+ if (!Pointer)
+ return false;
+
+ QualType Pointee = Pointer->getPointeeType();
+ return Pointee->isVoidType() || Pointee->isRecordType();
+}
+
+bool Type::hasSizedVLAType() const {
+ if (!isVariablyModifiedType()) return false;
+
+ if (const PointerType *ptr = getAs<PointerType>())
+ return ptr->getPointeeType()->hasSizedVLAType();
+ if (const ReferenceType *ref = getAs<ReferenceType>())
+ return ref->getPointeeType()->hasSizedVLAType();
+ if (const ArrayType *arr = getAsArrayTypeUnsafe()) {
+ if (isa<VariableArrayType>(arr) &&
+ cast<VariableArrayType>(arr)->getSizeExpr())
+ return true;
+
+ return arr->getElementType()->hasSizedVLAType();
+ }
+
+ return false;
+}
+
+QualType::DestructionKind QualType::isDestructedTypeImpl(QualType type) {
+ switch (type.getObjCLifetime()) {
+ case Qualifiers::OCL_None:
+ case Qualifiers::OCL_ExplicitNone:
+ case Qualifiers::OCL_Autoreleasing:
+ break;
+
+ case Qualifiers::OCL_Strong:
+ return DK_objc_strong_lifetime;
+ case Qualifiers::OCL_Weak:
+ return DK_objc_weak_lifetime;
+ }
+
+ /// Currently, the only destruction kind we recognize is C++ objects
+ /// with non-trivial destructors.
+ const CXXRecordDecl *record =
+ type->getBaseElementTypeUnsafe()->getAsCXXRecordDecl();
+ if (record && record->hasDefinition() && !record->hasTrivialDestructor())
+ return DK_cxx_destructor;
+
+ return DK_none;
+}
+
+bool QualType::hasTrivialAssignment(ASTContext &Context, bool Copying) const {
+ switch (getObjCLifetime()) {
+ case Qualifiers::OCL_None:
+ break;
+
+ case Qualifiers::OCL_ExplicitNone:
+ return true;
+
+ case Qualifiers::OCL_Autoreleasing:
+ case Qualifiers::OCL_Strong:
+ case Qualifiers::OCL_Weak:
+ return !Context.getLangOpts().ObjCAutoRefCount;
+ }
+
+ if (const CXXRecordDecl *Record
+ = getTypePtr()->getBaseElementTypeUnsafe()->getAsCXXRecordDecl())
+ return Copying ? Record->hasTrivialCopyAssignment() :
+ Record->hasTrivialMoveAssignment();
+
+ return true;
+}
diff --git a/clang/lib/AST/TypeLoc.cpp b/clang/lib/AST/TypeLoc.cpp
new file mode 100644
index 0000000..caa19b1
--- /dev/null
+++ b/clang/lib/AST/TypeLoc.cpp
@@ -0,0 +1,332 @@
+//===--- TypeLoc.cpp - Type Source Info Wrapper -----------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the TypeLoc subclasses implementations.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Support/raw_ostream.h"
+#include "clang/AST/TypeLocVisitor.h"
+#include "clang/AST/Expr.h"
+#include "llvm/Support/ErrorHandling.h"
+using namespace clang;
+
+//===----------------------------------------------------------------------===//
+// TypeLoc Implementation
+//===----------------------------------------------------------------------===//
+
+namespace {
+ class TypeLocRanger : public TypeLocVisitor<TypeLocRanger, SourceRange> {
+ public:
+#define ABSTRACT_TYPELOC(CLASS, PARENT)
+#define TYPELOC(CLASS, PARENT) \
+ SourceRange Visit##CLASS##TypeLoc(CLASS##TypeLoc TyLoc) { \
+ return TyLoc.getLocalSourceRange(); \
+ }
+#include "clang/AST/TypeLocNodes.def"
+ };
+}
+
+SourceRange TypeLoc::getLocalSourceRangeImpl(TypeLoc TL) {
+ if (TL.isNull()) return SourceRange();
+ return TypeLocRanger().Visit(TL);
+}
+
+namespace {
+ class TypeSizer : public TypeLocVisitor<TypeSizer, unsigned> {
+ public:
+#define ABSTRACT_TYPELOC(CLASS, PARENT)
+#define TYPELOC(CLASS, PARENT) \
+ unsigned Visit##CLASS##TypeLoc(CLASS##TypeLoc TyLoc) { \
+ return TyLoc.getFullDataSize(); \
+ }
+#include "clang/AST/TypeLocNodes.def"
+ };
+}
+
+/// \brief Returns the size of the type source info data block.
+unsigned TypeLoc::getFullDataSizeForType(QualType Ty) {
+ if (Ty.isNull()) return 0;
+ return TypeSizer().Visit(TypeLoc(Ty, 0));
+}
+
+namespace {
+ class NextLoc : public TypeLocVisitor<NextLoc, TypeLoc> {
+ public:
+#define ABSTRACT_TYPELOC(CLASS, PARENT)
+#define TYPELOC(CLASS, PARENT) \
+ TypeLoc Visit##CLASS##TypeLoc(CLASS##TypeLoc TyLoc) { \
+ return TyLoc.getNextTypeLoc(); \
+ }
+#include "clang/AST/TypeLocNodes.def"
+ };
+}
+
+/// \brief Get the next TypeLoc pointed by this TypeLoc, e.g for "int*" the
+/// TypeLoc is a PointerLoc and next TypeLoc is for "int".
+TypeLoc TypeLoc::getNextTypeLocImpl(TypeLoc TL) {
+ return NextLoc().Visit(TL);
+}
+
+/// \brief Initializes a type location, and all of its children
+/// recursively, as if the entire tree had been written in the
+/// given location.
+void TypeLoc::initializeImpl(ASTContext &Context, TypeLoc TL,
+ SourceLocation Loc) {
+ while (true) {
+ switch (TL.getTypeLocClass()) {
+#define ABSTRACT_TYPELOC(CLASS, PARENT)
+#define TYPELOC(CLASS, PARENT) \
+ case CLASS: { \
+ CLASS##TypeLoc TLCasted = cast<CLASS##TypeLoc>(TL); \
+ TLCasted.initializeLocal(Context, Loc); \
+ TL = TLCasted.getNextTypeLoc(); \
+ if (!TL) return; \
+ continue; \
+ }
+#include "clang/AST/TypeLocNodes.def"
+ }
+ }
+}
+
+SourceLocation TypeLoc::getBeginLoc() const {
+ TypeLoc Cur = *this;
+ while (true) {
+ switch (Cur.getTypeLocClass()) {
+ // FIXME: Currently QualifiedTypeLoc does not have a source range
+ // case Qualified:
+ case Elaborated:
+ case DependentName:
+ case DependentTemplateSpecialization:
+ break;
+ default:
+ TypeLoc Next = Cur.getNextTypeLoc();
+ if (Next.isNull()) break;
+ Cur = Next;
+ continue;
+ }
+ break;
+ }
+ return Cur.getLocalSourceRange().getBegin();
+}
+
+SourceLocation TypeLoc::getEndLoc() const {
+ TypeLoc Cur = *this;
+ TypeLoc Last;
+ while (true) {
+ switch (Cur.getTypeLocClass()) {
+ default:
+ if (!Last)
+ Last = Cur;
+ return Last.getLocalSourceRange().getEnd();
+ case Paren:
+ case ConstantArray:
+ case DependentSizedArray:
+ case IncompleteArray:
+ case VariableArray:
+ case FunctionProto:
+ case FunctionNoProto:
+ Last = Cur;
+ break;
+ case Pointer:
+ case BlockPointer:
+ case MemberPointer:
+ case LValueReference:
+ case RValueReference:
+ case PackExpansion:
+ if (!Last)
+ Last = Cur;
+ break;
+ case Qualified:
+ case Elaborated:
+ break;
+ }
+ Cur = Cur.getNextTypeLoc();
+ }
+}
+
+
+namespace {
+ struct TSTChecker : public TypeLocVisitor<TSTChecker, bool> {
+ // Overload resolution does the real work for us.
+ static bool isTypeSpec(TypeSpecTypeLoc _) { return true; }
+ static bool isTypeSpec(TypeLoc _) { return false; }
+
+#define ABSTRACT_TYPELOC(CLASS, PARENT)
+#define TYPELOC(CLASS, PARENT) \
+ bool Visit##CLASS##TypeLoc(CLASS##TypeLoc TyLoc) { \
+ return isTypeSpec(TyLoc); \
+ }
+#include "clang/AST/TypeLocNodes.def"
+ };
+}
+
+
+/// \brief Determines if the given type loc corresponds to a
+/// TypeSpecTypeLoc. Since there is not actually a TypeSpecType in
+/// the type hierarchy, this is made somewhat complicated.
+///
+/// There are a lot of types that currently use TypeSpecTypeLoc
+/// because it's a convenient base class. Ideally we would not accept
+/// those here, but ideally we would have better implementations for
+/// them.
+bool TypeSpecTypeLoc::classof(const TypeLoc *TL) {
+ if (TL->getType().hasLocalQualifiers()) return false;
+ return TSTChecker().Visit(*TL);
+}
+
+// Reimplemented to account for GNU/C++ extension
+// typeof unary-expression
+// where there are no parentheses.
+SourceRange TypeOfExprTypeLoc::getLocalSourceRange() const {
+ if (getRParenLoc().isValid())
+ return SourceRange(getTypeofLoc(), getRParenLoc());
+ else
+ return SourceRange(getTypeofLoc(),
+ getUnderlyingExpr()->getSourceRange().getEnd());
+}
+
+
+TypeSpecifierType BuiltinTypeLoc::getWrittenTypeSpec() const {
+ if (needsExtraLocalData())
+ return static_cast<TypeSpecifierType>(getWrittenBuiltinSpecs().Type);
+ switch (getTypePtr()->getKind()) {
+ case BuiltinType::Void:
+ return TST_void;
+ case BuiltinType::Bool:
+ return TST_bool;
+ case BuiltinType::Char_U:
+ case BuiltinType::Char_S:
+ return TST_char;
+ case BuiltinType::Char16:
+ return TST_char16;
+ case BuiltinType::Char32:
+ return TST_char32;
+ case BuiltinType::WChar_S:
+ case BuiltinType::WChar_U:
+ return TST_wchar;
+ case BuiltinType::UChar:
+ case BuiltinType::UShort:
+ case BuiltinType::UInt:
+ case BuiltinType::ULong:
+ case BuiltinType::ULongLong:
+ case BuiltinType::UInt128:
+ case BuiltinType::SChar:
+ case BuiltinType::Short:
+ case BuiltinType::Int:
+ case BuiltinType::Long:
+ case BuiltinType::LongLong:
+ case BuiltinType::Int128:
+ case BuiltinType::Half:
+ case BuiltinType::Float:
+ case BuiltinType::Double:
+ case BuiltinType::LongDouble:
+ llvm_unreachable("Builtin type needs extra local data!");
+ // Fall through, if the impossible happens.
+
+ case BuiltinType::NullPtr:
+ case BuiltinType::Overload:
+ case BuiltinType::Dependent:
+ case BuiltinType::BoundMember:
+ case BuiltinType::UnknownAny:
+ case BuiltinType::ARCUnbridgedCast:
+ case BuiltinType::PseudoObject:
+ case BuiltinType::ObjCId:
+ case BuiltinType::ObjCClass:
+ case BuiltinType::ObjCSel:
+ return TST_unspecified;
+ }
+
+ llvm_unreachable("Invalid BuiltinType Kind!");
+}
+
+TypeLoc TypeLoc::IgnoreParensImpl(TypeLoc TL) {
+ while (ParenTypeLoc* PTL = dyn_cast<ParenTypeLoc>(&TL))
+ TL = PTL->getInnerLoc();
+ return TL;
+}
+
+void ElaboratedTypeLoc::initializeLocal(ASTContext &Context,
+ SourceLocation Loc) {
+ setElaboratedKeywordLoc(Loc);
+ NestedNameSpecifierLocBuilder Builder;
+ Builder.MakeTrivial(Context, getTypePtr()->getQualifier(), Loc);
+ setQualifierLoc(Builder.getWithLocInContext(Context));
+}
+
+void DependentNameTypeLoc::initializeLocal(ASTContext &Context,
+ SourceLocation Loc) {
+ setElaboratedKeywordLoc(Loc);
+ NestedNameSpecifierLocBuilder Builder;
+ Builder.MakeTrivial(Context, getTypePtr()->getQualifier(), Loc);
+ setQualifierLoc(Builder.getWithLocInContext(Context));
+ setNameLoc(Loc);
+}
+
+void
+DependentTemplateSpecializationTypeLoc::initializeLocal(ASTContext &Context,
+ SourceLocation Loc) {
+ setElaboratedKeywordLoc(Loc);
+ if (getTypePtr()->getQualifier()) {
+ NestedNameSpecifierLocBuilder Builder;
+ Builder.MakeTrivial(Context, getTypePtr()->getQualifier(), Loc);
+ setQualifierLoc(Builder.getWithLocInContext(Context));
+ } else {
+ setQualifierLoc(NestedNameSpecifierLoc());
+ }
+ setTemplateKeywordLoc(Loc);
+ setTemplateNameLoc(Loc);
+ setLAngleLoc(Loc);
+ setRAngleLoc(Loc);
+ TemplateSpecializationTypeLoc::initializeArgLocs(Context, getNumArgs(),
+ getTypePtr()->getArgs(),
+ getArgInfos(), Loc);
+}
+
+void TemplateSpecializationTypeLoc::initializeArgLocs(ASTContext &Context,
+ unsigned NumArgs,
+ const TemplateArgument *Args,
+ TemplateArgumentLocInfo *ArgInfos,
+ SourceLocation Loc) {
+ for (unsigned i = 0, e = NumArgs; i != e; ++i) {
+ switch (Args[i].getKind()) {
+ case TemplateArgument::Null:
+ case TemplateArgument::Declaration:
+ case TemplateArgument::Integral:
+ case TemplateArgument::Pack:
+ case TemplateArgument::Expression:
+ ArgInfos[i] = TemplateArgumentLocInfo(Args[i].getAsExpr());
+ break;
+
+ case TemplateArgument::Type:
+ ArgInfos[i] = TemplateArgumentLocInfo(
+ Context.getTrivialTypeSourceInfo(Args[i].getAsType(),
+ Loc));
+ break;
+
+ case TemplateArgument::Template:
+ case TemplateArgument::TemplateExpansion: {
+ NestedNameSpecifierLocBuilder Builder;
+ TemplateName Template = Args[i].getAsTemplate();
+ if (DependentTemplateName *DTN = Template.getAsDependentTemplateName())
+ Builder.MakeTrivial(Context, DTN->getQualifier(), Loc);
+ else if (QualifiedTemplateName *QTN = Template.getAsQualifiedTemplateName())
+ Builder.MakeTrivial(Context, QTN->getQualifier(), Loc);
+
+ ArgInfos[i] = TemplateArgumentLocInfo(
+ Builder.getWithLocInContext(Context),
+ Loc,
+ Args[i].getKind() == TemplateArgument::Template
+ ? SourceLocation()
+ : Loc);
+ break;
+ }
+ }
+ }
+}
diff --git a/clang/lib/AST/TypePrinter.cpp b/clang/lib/AST/TypePrinter.cpp
new file mode 100644
index 0000000..3bf80e7
--- /dev/null
+++ b/clang/lib/AST/TypePrinter.cpp
@@ -0,0 +1,1232 @@
+//===--- TypePrinter.cpp - Pretty-Print Clang Types -----------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code to print types from Clang's type system.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/Type.h"
+#include "clang/AST/PrettyPrinter.h"
+#include "clang/Basic/LangOptions.h"
+#include "clang/Basic/SourceManager.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace clang;
+
+namespace {
+ /// \brief RAII object that enables printing of the ARC __strong lifetime
+ /// qualifier.
+ class IncludeStrongLifetimeRAII {
+ PrintingPolicy &Policy;
+ bool Old;
+
+ public:
+ explicit IncludeStrongLifetimeRAII(PrintingPolicy &Policy)
+ : Policy(Policy), Old(Policy.SuppressStrongLifetime) {
+ Policy.SuppressStrongLifetime = false;
+ }
+
+ ~IncludeStrongLifetimeRAII() {
+ Policy.SuppressStrongLifetime = Old;
+ }
+ };
+
+ class TypePrinter {
+ PrintingPolicy Policy;
+
+ public:
+ explicit TypePrinter(const PrintingPolicy &Policy) : Policy(Policy) { }
+
+ void print(const Type *ty, Qualifiers qs, std::string &buffer);
+ void print(QualType T, std::string &S);
+ void AppendScope(DeclContext *DC, std::string &S);
+ void printTag(TagDecl *T, std::string &S);
+#define ABSTRACT_TYPE(CLASS, PARENT)
+#define TYPE(CLASS, PARENT) \
+ void print##CLASS(const CLASS##Type *T, std::string &S);
+#include "clang/AST/TypeNodes.def"
+ };
+}
+
+static void AppendTypeQualList(std::string &S, unsigned TypeQuals) {
+ if (TypeQuals & Qualifiers::Const) {
+ if (!S.empty()) S += ' ';
+ S += "const";
+ }
+ if (TypeQuals & Qualifiers::Volatile) {
+ if (!S.empty()) S += ' ';
+ S += "volatile";
+ }
+ if (TypeQuals & Qualifiers::Restrict) {
+ if (!S.empty()) S += ' ';
+ S += "restrict";
+ }
+}
+
+void TypePrinter::print(QualType t, std::string &buffer) {
+ SplitQualType split = t.split();
+ print(split.Ty, split.Quals, buffer);
+}
+
+void TypePrinter::print(const Type *T, Qualifiers Quals, std::string &buffer) {
+ if (!T) {
+ buffer += "NULL TYPE";
+ return;
+ }
+
+ if (Policy.SuppressSpecifiers && T->isSpecifierType())
+ return;
+
+ // Print qualifiers as appropriate.
+
+ // CanPrefixQualifiers - We prefer to print type qualifiers before the type,
+ // so that we get "const int" instead of "int const", but we can't do this if
+ // the type is complex. For example if the type is "int*", we *must* print
+ // "int * const", printing "const int *" is different. Only do this when the
+ // type expands to a simple string.
+ bool CanPrefixQualifiers = false;
+ bool NeedARCStrongQualifier = false;
+ Type::TypeClass TC = T->getTypeClass();
+ if (const AutoType *AT = dyn_cast<AutoType>(T))
+ TC = AT->desugar()->getTypeClass();
+ if (const SubstTemplateTypeParmType *Subst
+ = dyn_cast<SubstTemplateTypeParmType>(T))
+ TC = Subst->getReplacementType()->getTypeClass();
+
+ switch (TC) {
+ case Type::Builtin:
+ case Type::Complex:
+ case Type::UnresolvedUsing:
+ case Type::Typedef:
+ case Type::TypeOfExpr:
+ case Type::TypeOf:
+ case Type::Decltype:
+ case Type::UnaryTransform:
+ case Type::Record:
+ case Type::Enum:
+ case Type::Elaborated:
+ case Type::TemplateTypeParm:
+ case Type::SubstTemplateTypeParmPack:
+ case Type::TemplateSpecialization:
+ case Type::InjectedClassName:
+ case Type::DependentName:
+ case Type::DependentTemplateSpecialization:
+ case Type::ObjCObject:
+ case Type::ObjCInterface:
+ case Type::Atomic:
+ CanPrefixQualifiers = true;
+ break;
+
+ case Type::ObjCObjectPointer:
+ CanPrefixQualifiers = T->isObjCIdType() || T->isObjCClassType() ||
+ T->isObjCQualifiedIdType() || T->isObjCQualifiedClassType();
+ break;
+
+ case Type::ConstantArray:
+ case Type::IncompleteArray:
+ case Type::VariableArray:
+ case Type::DependentSizedArray:
+ NeedARCStrongQualifier = true;
+ // Fall through
+
+ case Type::Pointer:
+ case Type::BlockPointer:
+ case Type::LValueReference:
+ case Type::RValueReference:
+ case Type::MemberPointer:
+ case Type::DependentSizedExtVector:
+ case Type::Vector:
+ case Type::ExtVector:
+ case Type::FunctionProto:
+ case Type::FunctionNoProto:
+ case Type::Paren:
+ case Type::Attributed:
+ case Type::PackExpansion:
+ case Type::SubstTemplateTypeParm:
+ case Type::Auto:
+ CanPrefixQualifiers = false;
+ break;
+ }
+
+ if (!CanPrefixQualifiers && !Quals.empty()) {
+ std::string qualsBuffer;
+ if (NeedARCStrongQualifier) {
+ IncludeStrongLifetimeRAII Strong(Policy);
+ Quals.getAsStringInternal(qualsBuffer, Policy);
+ } else {
+ Quals.getAsStringInternal(qualsBuffer, Policy);
+ }
+
+ if (!qualsBuffer.empty()) {
+ if (!buffer.empty()) {
+ qualsBuffer += ' ';
+ qualsBuffer += buffer;
+ }
+ std::swap(buffer, qualsBuffer);
+ }
+ }
+
+ switch (T->getTypeClass()) {
+#define ABSTRACT_TYPE(CLASS, PARENT)
+#define TYPE(CLASS, PARENT) case Type::CLASS: \
+ print##CLASS(cast<CLASS##Type>(T), buffer); \
+ break;
+#include "clang/AST/TypeNodes.def"
+ }
+
+ // If we're adding the qualifiers as a prefix, do it now.
+ if (CanPrefixQualifiers && !Quals.empty()) {
+ std::string qualsBuffer;
+ if (NeedARCStrongQualifier) {
+ IncludeStrongLifetimeRAII Strong(Policy);
+ Quals.getAsStringInternal(qualsBuffer, Policy);
+ } else {
+ Quals.getAsStringInternal(qualsBuffer, Policy);
+ }
+
+ if (!qualsBuffer.empty()) {
+ if (!buffer.empty()) {
+ qualsBuffer += ' ';
+ qualsBuffer += buffer;
+ }
+ std::swap(buffer, qualsBuffer);
+ }
+ }
+}
+
+void TypePrinter::printBuiltin(const BuiltinType *T, std::string &S) {
+ if (S.empty()) {
+ S = T->getName(Policy);
+ } else {
+ // Prefix the basic type, e.g. 'int X'.
+ S = ' ' + S;
+ S = T->getName(Policy) + S;
+ }
+}
+
+void TypePrinter::printComplex(const ComplexType *T, std::string &S) {
+ print(T->getElementType(), S);
+ S = "_Complex " + S;
+}
+
+void TypePrinter::printPointer(const PointerType *T, std::string &S) {
+ S = '*' + S;
+
+ // Handle things like 'int (*A)[4];' correctly.
+ // FIXME: this should include vectors, but vectors use attributes I guess.
+ if (isa<ArrayType>(T->getPointeeType()))
+ S = '(' + S + ')';
+
+ IncludeStrongLifetimeRAII Strong(Policy);
+ print(T->getPointeeType(), S);
+}
+
+void TypePrinter::printBlockPointer(const BlockPointerType *T, std::string &S) {
+ S = '^' + S;
+ print(T->getPointeeType(), S);
+}
+
+void TypePrinter::printLValueReference(const LValueReferenceType *T,
+ std::string &S) {
+ S = '&' + S;
+
+ // Handle things like 'int (&A)[4];' correctly.
+ // FIXME: this should include vectors, but vectors use attributes I guess.
+ if (isa<ArrayType>(T->getPointeeTypeAsWritten()))
+ S = '(' + S + ')';
+
+ IncludeStrongLifetimeRAII Strong(Policy);
+ print(T->getPointeeTypeAsWritten(), S);
+}
+
+void TypePrinter::printRValueReference(const RValueReferenceType *T,
+ std::string &S) {
+ S = "&&" + S;
+
+ // Handle things like 'int (&&A)[4];' correctly.
+ // FIXME: this should include vectors, but vectors use attributes I guess.
+ if (isa<ArrayType>(T->getPointeeTypeAsWritten()))
+ S = '(' + S + ')';
+
+ IncludeStrongLifetimeRAII Strong(Policy);
+ print(T->getPointeeTypeAsWritten(), S);
+}
+
+void TypePrinter::printMemberPointer(const MemberPointerType *T,
+ std::string &S) {
+ PrintingPolicy InnerPolicy(Policy);
+ Policy.SuppressTag = true;
+ std::string C = QualType(T->getClass(), 0).getAsString(InnerPolicy);
+ C += "::*";
+ S = C + S;
+
+ // Handle things like 'int (Cls::*A)[4];' correctly.
+ // FIXME: this should include vectors, but vectors use attributes I guess.
+ if (isa<ArrayType>(T->getPointeeType()))
+ S = '(' + S + ')';
+
+ IncludeStrongLifetimeRAII Strong(Policy);
+ print(T->getPointeeType(), S);
+}
+
+void TypePrinter::printConstantArray(const ConstantArrayType *T,
+ std::string &S) {
+ S += '[';
+ S += llvm::utostr(T->getSize().getZExtValue());
+ S += ']';
+
+ IncludeStrongLifetimeRAII Strong(Policy);
+ print(T->getElementType(), S);
+}
+
+void TypePrinter::printIncompleteArray(const IncompleteArrayType *T,
+ std::string &S) {
+ S += "[]";
+ IncludeStrongLifetimeRAII Strong(Policy);
+ print(T->getElementType(), S);
+}
+
+void TypePrinter::printVariableArray(const VariableArrayType *T,
+ std::string &S) {
+ S += '[';
+
+ if (T->getIndexTypeQualifiers().hasQualifiers()) {
+ AppendTypeQualList(S, T->getIndexTypeCVRQualifiers());
+ S += ' ';
+ }
+
+ if (T->getSizeModifier() == VariableArrayType::Static)
+ S += "static";
+ else if (T->getSizeModifier() == VariableArrayType::Star)
+ S += '*';
+
+ if (T->getSizeExpr()) {
+ std::string SStr;
+ llvm::raw_string_ostream s(SStr);
+ T->getSizeExpr()->printPretty(s, 0, Policy);
+ S += s.str();
+ }
+ S += ']';
+
+ IncludeStrongLifetimeRAII Strong(Policy);
+ print(T->getElementType(), S);
+}
+
+void TypePrinter::printDependentSizedArray(const DependentSizedArrayType *T,
+ std::string &S) {
+ S += '[';
+
+ if (T->getSizeExpr()) {
+ std::string SStr;
+ llvm::raw_string_ostream s(SStr);
+ T->getSizeExpr()->printPretty(s, 0, Policy);
+ S += s.str();
+ }
+ S += ']';
+
+ IncludeStrongLifetimeRAII Strong(Policy);
+ print(T->getElementType(), S);
+}
+
+void TypePrinter::printDependentSizedExtVector(
+ const DependentSizedExtVectorType *T,
+ std::string &S) {
+ print(T->getElementType(), S);
+
+ S += " __attribute__((ext_vector_type(";
+ if (T->getSizeExpr()) {
+ std::string SStr;
+ llvm::raw_string_ostream s(SStr);
+ T->getSizeExpr()->printPretty(s, 0, Policy);
+ S += s.str();
+ }
+ S += ")))";
+}
+
+void TypePrinter::printVector(const VectorType *T, std::string &S) {
+ switch (T->getVectorKind()) {
+ case VectorType::AltiVecPixel:
+ S = "__vector __pixel " + S;
+ break;
+ case VectorType::AltiVecBool:
+ print(T->getElementType(), S);
+ S = "__vector __bool " + S;
+ break;
+ case VectorType::AltiVecVector:
+ print(T->getElementType(), S);
+ S = "__vector " + S;
+ break;
+ case VectorType::NeonVector:
+ print(T->getElementType(), S);
+ S = ("__attribute__((neon_vector_type(" +
+ llvm::utostr_32(T->getNumElements()) + "))) " + S);
+ break;
+ case VectorType::NeonPolyVector:
+ print(T->getElementType(), S);
+ S = ("__attribute__((neon_polyvector_type(" +
+ llvm::utostr_32(T->getNumElements()) + "))) " + S);
+ break;
+ case VectorType::GenericVector: {
+ // FIXME: We prefer to print the size directly here, but have no way
+ // to get the size of the type.
+ print(T->getElementType(), S);
+ std::string V = "__attribute__((__vector_size__(";
+ V += llvm::utostr_32(T->getNumElements()); // convert back to bytes.
+ std::string ET;
+ print(T->getElementType(), ET);
+ V += " * sizeof(" + ET + ")))) ";
+ S = V + S;
+ break;
+ }
+ }
+}
+
+void TypePrinter::printExtVector(const ExtVectorType *T, std::string &S) {
+ S += " __attribute__((ext_vector_type(";
+ S += llvm::utostr_32(T->getNumElements());
+ S += ")))";
+ print(T->getElementType(), S);
+}
+
+void
+FunctionProtoType::printExceptionSpecification(std::string &S,
+ PrintingPolicy Policy) const {
+
+ if (hasDynamicExceptionSpec()) {
+ S += " throw(";
+ if (getExceptionSpecType() == EST_MSAny)
+ S += "...";
+ else
+ for (unsigned I = 0, N = getNumExceptions(); I != N; ++I) {
+ if (I)
+ S += ", ";
+
+ S += getExceptionType(I).getAsString(Policy);
+ }
+ S += ")";
+ } else if (isNoexceptExceptionSpec(getExceptionSpecType())) {
+ S += " noexcept";
+ if (getExceptionSpecType() == EST_ComputedNoexcept) {
+ S += "(";
+ llvm::raw_string_ostream EOut(S);
+ getNoexceptExpr()->printPretty(EOut, 0, Policy);
+ EOut.flush();
+ S += EOut.str();
+ S += ")";
+ }
+ }
+}
+
+void TypePrinter::printFunctionProto(const FunctionProtoType *T,
+ std::string &S) {
+ // If needed for precedence reasons, wrap the inner part in grouping parens.
+ if (!S.empty())
+ S = "(" + S + ")";
+
+ S += "(";
+ std::string Tmp;
+ PrintingPolicy ParamPolicy(Policy);
+ ParamPolicy.SuppressSpecifiers = false;
+ for (unsigned i = 0, e = T->getNumArgs(); i != e; ++i) {
+ if (i) S += ", ";
+ print(T->getArgType(i), Tmp);
+ S += Tmp;
+ Tmp.clear();
+ }
+
+ if (T->isVariadic()) {
+ if (T->getNumArgs())
+ S += ", ";
+ S += "...";
+ } else if (T->getNumArgs() == 0 && !Policy.LangOpts.CPlusPlus) {
+ // Do not emit int() if we have a proto, emit 'int(void)'.
+ S += "void";
+ }
+
+ S += ")";
+
+ FunctionType::ExtInfo Info = T->getExtInfo();
+ switch(Info.getCC()) {
+ case CC_Default: break;
+ case CC_C:
+ S += " __attribute__((cdecl))";
+ break;
+ case CC_X86StdCall:
+ S += " __attribute__((stdcall))";
+ break;
+ case CC_X86FastCall:
+ S += " __attribute__((fastcall))";
+ break;
+ case CC_X86ThisCall:
+ S += " __attribute__((thiscall))";
+ break;
+ case CC_X86Pascal:
+ S += " __attribute__((pascal))";
+ break;
+ case CC_AAPCS:
+ S += " __attribute__((pcs(\"aapcs\")))";
+ break;
+ case CC_AAPCS_VFP:
+ S += " __attribute__((pcs(\"aapcs-vfp\")))";
+ break;
+ }
+ if (Info.getNoReturn())
+ S += " __attribute__((noreturn))";
+ if (Info.getRegParm())
+ S += " __attribute__((regparm (" +
+ llvm::utostr_32(Info.getRegParm()) + ")))";
+
+ AppendTypeQualList(S, T->getTypeQuals());
+
+ switch (T->getRefQualifier()) {
+ case RQ_None:
+ break;
+
+ case RQ_LValue:
+ S += " &";
+ break;
+
+ case RQ_RValue:
+ S += " &&";
+ break;
+ }
+ T->printExceptionSpecification(S, Policy);
+ if (T->hasTrailingReturn()) {
+ std::string ResultS;
+ print(T->getResultType(), ResultS);
+ S = "auto " + S + " -> " + ResultS;
+ } else
+ print(T->getResultType(), S);
+}
+
+void TypePrinter::printFunctionNoProto(const FunctionNoProtoType *T,
+ std::string &S) {
+ // If needed for precedence reasons, wrap the inner part in grouping parens.
+ if (!S.empty())
+ S = "(" + S + ")";
+
+ S += "()";
+ if (T->getNoReturnAttr())
+ S += " __attribute__((noreturn))";
+ print(T->getResultType(), S);
+}
+
+static void printTypeSpec(const NamedDecl *D, std::string &S) {
+ IdentifierInfo *II = D->getIdentifier();
+ if (S.empty())
+ S = II->getName().str();
+ else
+ S = II->getName().str() + ' ' + S;
+}
+
+void TypePrinter::printUnresolvedUsing(const UnresolvedUsingType *T,
+ std::string &S) {
+ printTypeSpec(T->getDecl(), S);
+}
+
+void TypePrinter::printTypedef(const TypedefType *T, std::string &S) {
+ printTypeSpec(T->getDecl(), S);
+}
+
+void TypePrinter::printTypeOfExpr(const TypeOfExprType *T, std::string &S) {
+ if (!S.empty()) // Prefix the basic type, e.g. 'typeof(e) X'.
+ S = ' ' + S;
+ std::string Str;
+ llvm::raw_string_ostream s(Str);
+ T->getUnderlyingExpr()->printPretty(s, 0, Policy);
+ S = "typeof " + s.str() + S;
+}
+
+void TypePrinter::printTypeOf(const TypeOfType *T, std::string &S) {
+ if (!S.empty()) // Prefix the basic type, e.g. 'typeof(t) X'.
+ S = ' ' + S;
+ std::string Tmp;
+ print(T->getUnderlyingType(), Tmp);
+ S = "typeof(" + Tmp + ")" + S;
+}
+
+void TypePrinter::printDecltype(const DecltypeType *T, std::string &S) {
+ if (!S.empty()) // Prefix the basic type, e.g. 'decltype(t) X'.
+ S = ' ' + S;
+ std::string Str;
+ llvm::raw_string_ostream s(Str);
+ T->getUnderlyingExpr()->printPretty(s, 0, Policy);
+ S = "decltype(" + s.str() + ")" + S;
+}
+
+void TypePrinter::printUnaryTransform(const UnaryTransformType *T,
+ std::string &S) {
+ if (!S.empty())
+ S = ' ' + S;
+ std::string Str;
+ IncludeStrongLifetimeRAII Strong(Policy);
+ print(T->getBaseType(), Str);
+
+ switch (T->getUTTKind()) {
+ case UnaryTransformType::EnumUnderlyingType:
+ S = "__underlying_type(" + Str + ")" + S;
+ break;
+ }
+}
+
+void TypePrinter::printAuto(const AutoType *T, std::string &S) {
+ // If the type has been deduced, do not print 'auto'.
+ if (T->isDeduced()) {
+ print(T->getDeducedType(), S);
+ } else {
+ if (!S.empty()) // Prefix the basic type, e.g. 'auto X'.
+ S = ' ' + S;
+ S = "auto" + S;
+ }
+}
+
+void TypePrinter::printAtomic(const AtomicType *T, std::string &S) {
+ if (!S.empty())
+ S = ' ' + S;
+ std::string Str;
+ IncludeStrongLifetimeRAII Strong(Policy);
+ print(T->getValueType(), Str);
+
+ S = "_Atomic(" + Str + ")" + S;
+}
+
+/// Appends the given scope to the end of a string.
+void TypePrinter::AppendScope(DeclContext *DC, std::string &Buffer) {
+ if (DC->isTranslationUnit()) return;
+ AppendScope(DC->getParent(), Buffer);
+
+ unsigned OldSize = Buffer.size();
+
+ if (NamespaceDecl *NS = dyn_cast<NamespaceDecl>(DC)) {
+ if (Policy.SuppressUnwrittenScope &&
+ (NS->isAnonymousNamespace() || NS->isInline()))
+ return;
+ if (NS->getIdentifier())
+ Buffer += NS->getNameAsString();
+ else
+ Buffer += "<anonymous>";
+ } else if (ClassTemplateSpecializationDecl *Spec
+ = dyn_cast<ClassTemplateSpecializationDecl>(DC)) {
+ IncludeStrongLifetimeRAII Strong(Policy);
+ const TemplateArgumentList &TemplateArgs = Spec->getTemplateArgs();
+ std::string TemplateArgsStr
+ = TemplateSpecializationType::PrintTemplateArgumentList(
+ TemplateArgs.data(),
+ TemplateArgs.size(),
+ Policy);
+ Buffer += Spec->getIdentifier()->getName();
+ Buffer += TemplateArgsStr;
+ } else if (TagDecl *Tag = dyn_cast<TagDecl>(DC)) {
+ if (TypedefNameDecl *Typedef = Tag->getTypedefNameForAnonDecl())
+ Buffer += Typedef->getIdentifier()->getName();
+ else if (Tag->getIdentifier())
+ Buffer += Tag->getIdentifier()->getName();
+ else
+ return;
+ }
+
+ if (Buffer.size() != OldSize)
+ Buffer += "::";
+}
+
+void TypePrinter::printTag(TagDecl *D, std::string &InnerString) {
+ if (Policy.SuppressTag)
+ return;
+
+ std::string Buffer;
+ bool HasKindDecoration = false;
+
+ // bool SuppressTagKeyword
+ // = Policy.LangOpts.CPlusPlus || Policy.SuppressTagKeyword;
+
+ // We don't print tags unless this is an elaborated type.
+ // In C, we just assume every RecordType is an elaborated type.
+ if (!(Policy.LangOpts.CPlusPlus || Policy.SuppressTagKeyword ||
+ D->getTypedefNameForAnonDecl())) {
+ HasKindDecoration = true;
+ Buffer += D->getKindName();
+ Buffer += ' ';
+ }
+
+ // Compute the full nested-name-specifier for this type.
+ // In C, this will always be empty except when the type
+ // being printed is anonymous within other Record.
+ if (!Policy.SuppressScope)
+ AppendScope(D->getDeclContext(), Buffer);
+
+ if (const IdentifierInfo *II = D->getIdentifier())
+ Buffer += II->getNameStart();
+ else if (TypedefNameDecl *Typedef = D->getTypedefNameForAnonDecl()) {
+ assert(Typedef->getIdentifier() && "Typedef without identifier?");
+ Buffer += Typedef->getIdentifier()->getNameStart();
+ } else {
+ // Make an unambiguous representation for anonymous types, e.g.
+ // <anonymous enum at /usr/include/string.h:120:9>
+ llvm::raw_string_ostream OS(Buffer);
+
+ if (isa<CXXRecordDecl>(D) && cast<CXXRecordDecl>(D)->isLambda()) {
+ OS << "<lambda";
+ HasKindDecoration = true;
+ } else {
+ OS << "<anonymous";
+ }
+
+ if (Policy.AnonymousTagLocations) {
+ // Suppress the redundant tag keyword if we just printed one.
+ // We don't have to worry about ElaboratedTypes here because you can't
+ // refer to an anonymous type with one.
+ if (!HasKindDecoration)
+ OS << " " << D->getKindName();
+
+ PresumedLoc PLoc = D->getASTContext().getSourceManager().getPresumedLoc(
+ D->getLocation());
+ if (PLoc.isValid()) {
+ OS << " at " << PLoc.getFilename()
+ << ':' << PLoc.getLine()
+ << ':' << PLoc.getColumn();
+ }
+ }
+
+ OS << '>';
+ }
+
+ // If this is a class template specialization, print the template
+ // arguments.
+ if (ClassTemplateSpecializationDecl *Spec
+ = dyn_cast<ClassTemplateSpecializationDecl>(D)) {
+ const TemplateArgument *Args;
+ unsigned NumArgs;
+ if (TypeSourceInfo *TAW = Spec->getTypeAsWritten()) {
+ const TemplateSpecializationType *TST =
+ cast<TemplateSpecializationType>(TAW->getType());
+ Args = TST->getArgs();
+ NumArgs = TST->getNumArgs();
+ } else {
+ const TemplateArgumentList &TemplateArgs = Spec->getTemplateArgs();
+ Args = TemplateArgs.data();
+ NumArgs = TemplateArgs.size();
+ }
+ IncludeStrongLifetimeRAII Strong(Policy);
+ Buffer += TemplateSpecializationType::PrintTemplateArgumentList(Args,
+ NumArgs,
+ Policy);
+ }
+
+ if (!InnerString.empty()) {
+ Buffer += ' ';
+ Buffer += InnerString;
+ }
+
+ std::swap(Buffer, InnerString);
+}
+
+void TypePrinter::printRecord(const RecordType *T, std::string &S) {
+ printTag(T->getDecl(), S);
+}
+
+void TypePrinter::printEnum(const EnumType *T, std::string &S) {
+ printTag(T->getDecl(), S);
+}
+
+void TypePrinter::printTemplateTypeParm(const TemplateTypeParmType *T,
+ std::string &S) {
+ if (!S.empty()) // Prefix the basic type, e.g. 'parmname X'.
+ S = ' ' + S;
+
+ if (IdentifierInfo *Id = T->getIdentifier())
+ S = Id->getName().str() + S;
+ else
+ S = "type-parameter-" + llvm::utostr_32(T->getDepth()) + '-' +
+ llvm::utostr_32(T->getIndex()) + S;
+}
+
+void TypePrinter::printSubstTemplateTypeParm(const SubstTemplateTypeParmType *T,
+ std::string &S) {
+ IncludeStrongLifetimeRAII Strong(Policy);
+ print(T->getReplacementType(), S);
+}
+
+void TypePrinter::printSubstTemplateTypeParmPack(
+ const SubstTemplateTypeParmPackType *T,
+ std::string &S) {
+ IncludeStrongLifetimeRAII Strong(Policy);
+ printTemplateTypeParm(T->getReplacedParameter(), S);
+}
+
+void TypePrinter::printTemplateSpecialization(
+ const TemplateSpecializationType *T,
+ std::string &S) {
+ IncludeStrongLifetimeRAII Strong(Policy);
+ std::string SpecString;
+
+ {
+ llvm::raw_string_ostream OS(SpecString);
+ T->getTemplateName().print(OS, Policy);
+ }
+
+ SpecString += TemplateSpecializationType::PrintTemplateArgumentList(
+ T->getArgs(),
+ T->getNumArgs(),
+ Policy);
+ if (S.empty())
+ S.swap(SpecString);
+ else
+ S = SpecString + ' ' + S;
+}
+
+void TypePrinter::printInjectedClassName(const InjectedClassNameType *T,
+ std::string &S) {
+ printTemplateSpecialization(T->getInjectedTST(), S);
+}
+
+void TypePrinter::printElaborated(const ElaboratedType *T, std::string &S) {
+ std::string MyString;
+
+ {
+ llvm::raw_string_ostream OS(MyString);
+ OS << TypeWithKeyword::getKeywordName(T->getKeyword());
+ if (T->getKeyword() != ETK_None)
+ OS << " ";
+ NestedNameSpecifier* Qualifier = T->getQualifier();
+ if (Qualifier)
+ Qualifier->print(OS, Policy);
+ }
+
+ std::string TypeStr;
+ PrintingPolicy InnerPolicy(Policy);
+ InnerPolicy.SuppressTagKeyword = true;
+ InnerPolicy.SuppressScope = true;
+ TypePrinter(InnerPolicy).print(T->getNamedType(), TypeStr);
+
+ MyString += TypeStr;
+ if (S.empty())
+ S.swap(MyString);
+ else
+ S = MyString + ' ' + S;
+}
+
+void TypePrinter::printParen(const ParenType *T, std::string &S) {
+ if (!S.empty() && !isa<FunctionType>(T->getInnerType()))
+ S = '(' + S + ')';
+ print(T->getInnerType(), S);
+}
+
+void TypePrinter::printDependentName(const DependentNameType *T, std::string &S) {
+ std::string MyString;
+
+ {
+ llvm::raw_string_ostream OS(MyString);
+ OS << TypeWithKeyword::getKeywordName(T->getKeyword());
+ if (T->getKeyword() != ETK_None)
+ OS << " ";
+
+ T->getQualifier()->print(OS, Policy);
+
+ OS << T->getIdentifier()->getName();
+ }
+
+ if (S.empty())
+ S.swap(MyString);
+ else
+ S = MyString + ' ' + S;
+}
+
+void TypePrinter::printDependentTemplateSpecialization(
+ const DependentTemplateSpecializationType *T, std::string &S) {
+ IncludeStrongLifetimeRAII Strong(Policy);
+ std::string MyString;
+ {
+ llvm::raw_string_ostream OS(MyString);
+
+ OS << TypeWithKeyword::getKeywordName(T->getKeyword());
+ if (T->getKeyword() != ETK_None)
+ OS << " ";
+
+ if (T->getQualifier())
+ T->getQualifier()->print(OS, Policy);
+ OS << T->getIdentifier()->getName();
+ OS << TemplateSpecializationType::PrintTemplateArgumentList(
+ T->getArgs(),
+ T->getNumArgs(),
+ Policy);
+ }
+
+ if (S.empty())
+ S.swap(MyString);
+ else
+ S = MyString + ' ' + S;
+}
+
+void TypePrinter::printPackExpansion(const PackExpansionType *T,
+ std::string &S) {
+ print(T->getPattern(), S);
+ S += "...";
+}
+
+void TypePrinter::printAttributed(const AttributedType *T,
+ std::string &S) {
+ // Prefer the macro forms of the GC and ownership qualifiers.
+ if (T->getAttrKind() == AttributedType::attr_objc_gc ||
+ T->getAttrKind() == AttributedType::attr_objc_ownership)
+ return print(T->getEquivalentType(), S);
+
+ print(T->getModifiedType(), S);
+
+ // TODO: not all attributes are GCC-style attributes.
+ S += " __attribute__((";
+ switch (T->getAttrKind()) {
+ case AttributedType::attr_address_space:
+ S += "address_space(";
+ S += T->getEquivalentType().getAddressSpace();
+ S += ")";
+ break;
+
+ case AttributedType::attr_vector_size: {
+ S += "__vector_size__(";
+ if (const VectorType *vector =T->getEquivalentType()->getAs<VectorType>()) {
+ S += vector->getNumElements();
+ S += " * sizeof(";
+
+ std::string tmp;
+ print(vector->getElementType(), tmp);
+ S += tmp;
+ S += ")";
+ }
+ S += ")";
+ break;
+ }
+
+ case AttributedType::attr_neon_vector_type:
+ case AttributedType::attr_neon_polyvector_type: {
+ if (T->getAttrKind() == AttributedType::attr_neon_vector_type)
+ S += "neon_vector_type(";
+ else
+ S += "neon_polyvector_type(";
+ const VectorType *vector = T->getEquivalentType()->getAs<VectorType>();
+ S += llvm::utostr_32(vector->getNumElements());
+ S += ")";
+ break;
+ }
+
+ case AttributedType::attr_regparm: {
+ S += "regparm(";
+ QualType t = T->getEquivalentType();
+ while (!t->isFunctionType())
+ t = t->getPointeeType();
+ S += t->getAs<FunctionType>()->getRegParmType();
+ S += ")";
+ break;
+ }
+
+ case AttributedType::attr_objc_gc: {
+ S += "objc_gc(";
+
+ QualType tmp = T->getEquivalentType();
+ while (tmp.getObjCGCAttr() == Qualifiers::GCNone) {
+ QualType next = tmp->getPointeeType();
+ if (next == tmp) break;
+ tmp = next;
+ }
+
+ if (tmp.isObjCGCWeak())
+ S += "weak";
+ else
+ S += "strong";
+ S += ")";
+ break;
+ }
+
+ case AttributedType::attr_objc_ownership:
+ S += "objc_ownership(";
+ switch (T->getEquivalentType().getObjCLifetime()) {
+ case Qualifiers::OCL_None: llvm_unreachable("no ownership!");
+ case Qualifiers::OCL_ExplicitNone: S += "none"; break;
+ case Qualifiers::OCL_Strong: S += "strong"; break;
+ case Qualifiers::OCL_Weak: S += "weak"; break;
+ case Qualifiers::OCL_Autoreleasing: S += "autoreleasing"; break;
+ }
+ S += ")";
+ break;
+
+ case AttributedType::attr_noreturn: S += "noreturn"; break;
+ case AttributedType::attr_cdecl: S += "cdecl"; break;
+ case AttributedType::attr_fastcall: S += "fastcall"; break;
+ case AttributedType::attr_stdcall: S += "stdcall"; break;
+ case AttributedType::attr_thiscall: S += "thiscall"; break;
+ case AttributedType::attr_pascal: S += "pascal"; break;
+ case AttributedType::attr_pcs: {
+ S += "pcs(";
+ QualType t = T->getEquivalentType();
+ while (!t->isFunctionType())
+ t = t->getPointeeType();
+ S += (t->getAs<FunctionType>()->getCallConv() == CC_AAPCS ?
+ "\"aapcs\"" : "\"aapcs-vfp\"");
+ S += ")";
+ break;
+ }
+ }
+ S += "))";
+}
+
+void TypePrinter::printObjCInterface(const ObjCInterfaceType *T,
+ std::string &S) {
+ if (!S.empty()) // Prefix the basic type, e.g. 'typedefname X'.
+ S = ' ' + S;
+
+ std::string ObjCQIString = T->getDecl()->getNameAsString();
+ S = ObjCQIString + S;
+}
+
+void TypePrinter::printObjCObject(const ObjCObjectType *T,
+ std::string &S) {
+ if (T->qual_empty())
+ return print(T->getBaseType(), S);
+
+ std::string tmp;
+ print(T->getBaseType(), tmp);
+ tmp += '<';
+ bool isFirst = true;
+ for (ObjCObjectType::qual_iterator
+ I = T->qual_begin(), E = T->qual_end(); I != E; ++I) {
+ if (isFirst)
+ isFirst = false;
+ else
+ tmp += ',';
+ tmp += (*I)->getNameAsString();
+ }
+ tmp += '>';
+
+ if (!S.empty()) {
+ tmp += ' ';
+ tmp += S;
+ }
+ std::swap(tmp, S);
+}
+
+void TypePrinter::printObjCObjectPointer(const ObjCObjectPointerType *T,
+ std::string &S) {
+ std::string ObjCQIString;
+
+ T->getPointeeType().getLocalQualifiers().getAsStringInternal(ObjCQIString,
+ Policy);
+ if (!ObjCQIString.empty())
+ ObjCQIString += ' ';
+
+ if (T->isObjCIdType() || T->isObjCQualifiedIdType())
+ ObjCQIString += "id";
+ else if (T->isObjCClassType() || T->isObjCQualifiedClassType())
+ ObjCQIString += "Class";
+ else if (T->isObjCSelType())
+ ObjCQIString += "SEL";
+ else
+ ObjCQIString += T->getInterfaceDecl()->getNameAsString();
+
+ if (!T->qual_empty()) {
+ ObjCQIString += '<';
+ for (ObjCObjectPointerType::qual_iterator I = T->qual_begin(),
+ E = T->qual_end();
+ I != E; ++I) {
+ ObjCQIString += (*I)->getNameAsString();
+ if (I+1 != E)
+ ObjCQIString += ',';
+ }
+ ObjCQIString += '>';
+ }
+
+ if (!T->isObjCIdType() && !T->isObjCQualifiedIdType())
+ ObjCQIString += " *"; // Don't forget the implicit pointer.
+ else if (!S.empty()) // Prefix the basic type, e.g. 'typedefname X'.
+ S = ' ' + S;
+
+ S = ObjCQIString + S;
+}
+
+std::string TemplateSpecializationType::
+ PrintTemplateArgumentList(const TemplateArgumentListInfo &Args,
+ const PrintingPolicy &Policy) {
+ return PrintTemplateArgumentList(Args.getArgumentArray(),
+ Args.size(),
+ Policy);
+}
+
+std::string
+TemplateSpecializationType::PrintTemplateArgumentList(
+ const TemplateArgument *Args,
+ unsigned NumArgs,
+ const PrintingPolicy &Policy,
+ bool SkipBrackets) {
+ std::string SpecString;
+ if (!SkipBrackets)
+ SpecString += '<';
+
+ for (unsigned Arg = 0; Arg < NumArgs; ++Arg) {
+ if (SpecString.size() > unsigned(!SkipBrackets))
+ SpecString += ", ";
+
+ // Print the argument into a string.
+ std::string ArgString;
+ if (Args[Arg].getKind() == TemplateArgument::Pack) {
+ ArgString = PrintTemplateArgumentList(Args[Arg].pack_begin(),
+ Args[Arg].pack_size(),
+ Policy, true);
+ } else {
+ llvm::raw_string_ostream ArgOut(ArgString);
+ Args[Arg].print(Policy, ArgOut);
+ }
+
+ // If this is the first argument and its string representation
+ // begins with the global scope specifier ('::foo'), add a space
+ // to avoid printing the diagraph '<:'.
+ if (!Arg && !ArgString.empty() && ArgString[0] == ':')
+ SpecString += ' ';
+
+ SpecString += ArgString;
+ }
+
+ // If the last character of our string is '>', add another space to
+ // keep the two '>''s separate tokens. We don't *have* to do this in
+ // C++0x, but it's still good hygiene.
+ if (!SpecString.empty() && SpecString[SpecString.size() - 1] == '>')
+ SpecString += ' ';
+
+ if (!SkipBrackets)
+ SpecString += '>';
+
+ return SpecString;
+}
+
+// Sadly, repeat all that with TemplateArgLoc.
+std::string TemplateSpecializationType::
+PrintTemplateArgumentList(const TemplateArgumentLoc *Args, unsigned NumArgs,
+ const PrintingPolicy &Policy) {
+ std::string SpecString;
+ SpecString += '<';
+ for (unsigned Arg = 0; Arg < NumArgs; ++Arg) {
+ if (SpecString.size() > 1)
+ SpecString += ", ";
+
+ // Print the argument into a string.
+ std::string ArgString;
+ if (Args[Arg].getArgument().getKind() == TemplateArgument::Pack) {
+ ArgString = PrintTemplateArgumentList(
+ Args[Arg].getArgument().pack_begin(),
+ Args[Arg].getArgument().pack_size(),
+ Policy, true);
+ } else {
+ llvm::raw_string_ostream ArgOut(ArgString);
+ Args[Arg].getArgument().print(Policy, ArgOut);
+ }
+
+ // If this is the first argument and its string representation
+ // begins with the global scope specifier ('::foo'), add a space
+ // to avoid printing the diagraph '<:'.
+ if (!Arg && !ArgString.empty() && ArgString[0] == ':')
+ SpecString += ' ';
+
+ SpecString += ArgString;
+ }
+
+ // If the last character of our string is '>', add another space to
+ // keep the two '>''s separate tokens. We don't *have* to do this in
+ // C++0x, but it's still good hygiene.
+ if (SpecString[SpecString.size() - 1] == '>')
+ SpecString += ' ';
+
+ SpecString += '>';
+
+ return SpecString;
+}
+
+void QualType::dump(const char *msg) const {
+ std::string R = "identifier";
+ LangOptions LO;
+ getAsStringInternal(R, PrintingPolicy(LO));
+ if (msg)
+ llvm::errs() << msg << ": ";
+ llvm::errs() << R << "\n";
+}
+void QualType::dump() const {
+ dump("");
+}
+
+void Type::dump() const {
+ QualType(this, 0).dump();
+}
+
+std::string Qualifiers::getAsString() const {
+ LangOptions LO;
+ return getAsString(PrintingPolicy(LO));
+}
+
+// Appends qualifiers to the given string, separated by spaces. Will
+// prefix a space if the string is non-empty. Will not append a final
+// space.
+void Qualifiers::getAsStringInternal(std::string &S,
+ const PrintingPolicy& Policy) const {
+ AppendTypeQualList(S, getCVRQualifiers());
+ if (unsigned addrspace = getAddressSpace()) {
+ if (!S.empty()) S += ' ';
+ switch (addrspace) {
+ case LangAS::opencl_global:
+ S += "__global";
+ break;
+ case LangAS::opencl_local:
+ S += "__local";
+ break;
+ case LangAS::opencl_constant:
+ S += "__constant";
+ break;
+ default:
+ S += "__attribute__((address_space(";
+ S += llvm::utostr_32(addrspace);
+ S += ")))";
+ }
+ }
+ if (Qualifiers::GC gc = getObjCGCAttr()) {
+ if (!S.empty()) S += ' ';
+ if (gc == Qualifiers::Weak)
+ S += "__weak";
+ else
+ S += "__strong";
+ }
+ if (Qualifiers::ObjCLifetime lifetime = getObjCLifetime()) {
+ if (!S.empty() &&
+ !(lifetime == Qualifiers::OCL_Strong && Policy.SuppressStrongLifetime))
+ S += ' ';
+
+ switch (lifetime) {
+ case Qualifiers::OCL_None: llvm_unreachable("none but true");
+ case Qualifiers::OCL_ExplicitNone: S += "__unsafe_unretained"; break;
+ case Qualifiers::OCL_Strong:
+ if (!Policy.SuppressStrongLifetime)
+ S += "__strong";
+ break;
+
+ case Qualifiers::OCL_Weak: S += "__weak"; break;
+ case Qualifiers::OCL_Autoreleasing: S += "__autoreleasing"; break;
+ }
+ }
+}
+
+std::string QualType::getAsString(const Type *ty, Qualifiers qs) {
+ std::string buffer;
+ LangOptions options;
+ getAsStringInternal(ty, qs, buffer, PrintingPolicy(options));
+ return buffer;
+}
+
+void QualType::getAsStringInternal(const Type *ty, Qualifiers qs,
+ std::string &buffer,
+ const PrintingPolicy &policy) {
+ TypePrinter(policy).print(ty, qs, buffer);
+}
diff --git a/clang/lib/AST/VTTBuilder.cpp b/clang/lib/AST/VTTBuilder.cpp
new file mode 100644
index 0000000..f5ff624
--- /dev/null
+++ b/clang/lib/AST/VTTBuilder.cpp
@@ -0,0 +1,212 @@
+//===--- VTTBuilder.cpp - C++ VTT layout builder --------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code dealing with generation of the layout of virtual table
+// tables (VTT).
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/VTTBuilder.h"
+#include "clang/AST/CXXInheritance.h"
+#include "clang/AST/RecordLayout.h"
+#include "clang/Basic/TargetInfo.h"
+#include "llvm/Support/Format.h"
+#include <algorithm>
+#include <cstdio>
+
+using namespace clang;
+
+#define DUMP_OVERRIDERS 0
+
+VTTBuilder::VTTBuilder(ASTContext &Ctx,
+ const CXXRecordDecl *MostDerivedClass,
+ bool GenerateDefinition)
+ : Ctx(Ctx), MostDerivedClass(MostDerivedClass),
+ MostDerivedClassLayout(Ctx.getASTRecordLayout(MostDerivedClass)),
+ GenerateDefinition(GenerateDefinition) {
+ // Lay out this VTT.
+ LayoutVTT(BaseSubobject(MostDerivedClass, CharUnits::Zero()),
+ /*BaseIsVirtual=*/false);
+}
+
+void VTTBuilder::AddVTablePointer(BaseSubobject Base, uint64_t VTableIndex,
+ const CXXRecordDecl *VTableClass) {
+ // Store the vtable pointer index if we're generating the primary VTT.
+ if (VTableClass == MostDerivedClass) {
+ assert(!SecondaryVirtualPointerIndices.count(Base) &&
+ "A virtual pointer index already exists for this base subobject!");
+ SecondaryVirtualPointerIndices[Base] = VTTComponents.size();
+ }
+
+ if (!GenerateDefinition) {
+ VTTComponents.push_back(VTTComponent());
+ return;
+ }
+
+ VTTComponents.push_back(VTTComponent(VTableIndex, Base));
+}
+
+void VTTBuilder::LayoutSecondaryVTTs(BaseSubobject Base) {
+ const CXXRecordDecl *RD = Base.getBase();
+
+ for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
+ E = RD->bases_end(); I != E; ++I) {
+
+ // Don't layout virtual bases.
+ if (I->isVirtual())
+ continue;
+
+ const CXXRecordDecl *BaseDecl =
+ cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+
+ const ASTRecordLayout &Layout = Ctx.getASTRecordLayout(RD);
+ CharUnits BaseOffset = Base.getBaseOffset() +
+ Layout.getBaseClassOffset(BaseDecl);
+
+ // Layout the VTT for this base.
+ LayoutVTT(BaseSubobject(BaseDecl, BaseOffset), /*BaseIsVirtual=*/false);
+ }
+}
+
+void
+VTTBuilder::LayoutSecondaryVirtualPointers(BaseSubobject Base,
+ bool BaseIsMorallyVirtual,
+ uint64_t VTableIndex,
+ const CXXRecordDecl *VTableClass,
+ VisitedVirtualBasesSetTy &VBases) {
+ const CXXRecordDecl *RD = Base.getBase();
+
+ // We're not interested in bases that don't have virtual bases, and not
+ // morally virtual bases.
+ if (!RD->getNumVBases() && !BaseIsMorallyVirtual)
+ return;
+
+ for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
+ E = RD->bases_end(); I != E; ++I) {
+ const CXXRecordDecl *BaseDecl =
+ cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+
+ // Itanium C++ ABI 2.6.2:
+ // Secondary virtual pointers are present for all bases with either
+ // virtual bases or virtual function declarations overridden along a
+ // virtual path.
+ //
+ // If the base class is not dynamic, we don't want to add it, nor any
+ // of its base classes.
+ if (!BaseDecl->isDynamicClass())
+ continue;
+
+ bool BaseDeclIsMorallyVirtual = BaseIsMorallyVirtual;
+ bool BaseDeclIsNonVirtualPrimaryBase = false;
+ CharUnits BaseOffset;
+ if (I->isVirtual()) {
+ // Ignore virtual bases that we've already visited.
+ if (!VBases.insert(BaseDecl))
+ continue;
+
+ BaseOffset = MostDerivedClassLayout.getVBaseClassOffset(BaseDecl);
+ BaseDeclIsMorallyVirtual = true;
+ } else {
+ const ASTRecordLayout &Layout = Ctx.getASTRecordLayout(RD);
+
+ BaseOffset = Base.getBaseOffset() +
+ Layout.getBaseClassOffset(BaseDecl);
+
+ if (!Layout.isPrimaryBaseVirtual() &&
+ Layout.getPrimaryBase() == BaseDecl)
+ BaseDeclIsNonVirtualPrimaryBase = true;
+ }
+
+ // Itanium C++ ABI 2.6.2:
+ // Secondary virtual pointers: for each base class X which (a) has virtual
+ // bases or is reachable along a virtual path from D, and (b) is not a
+ // non-virtual primary base, the address of the virtual table for X-in-D
+ // or an appropriate construction virtual table.
+ if (!BaseDeclIsNonVirtualPrimaryBase &&
+ (BaseDecl->getNumVBases() || BaseDeclIsMorallyVirtual)) {
+ // Add the vtable pointer.
+ AddVTablePointer(BaseSubobject(BaseDecl, BaseOffset), VTableIndex,
+ VTableClass);
+ }
+
+ // And lay out the secondary virtual pointers for the base class.
+ LayoutSecondaryVirtualPointers(BaseSubobject(BaseDecl, BaseOffset),
+ BaseDeclIsMorallyVirtual, VTableIndex,
+ VTableClass, VBases);
+ }
+}
+
+void
+VTTBuilder::LayoutSecondaryVirtualPointers(BaseSubobject Base,
+ uint64_t VTableIndex) {
+ VisitedVirtualBasesSetTy VBases;
+ LayoutSecondaryVirtualPointers(Base, /*BaseIsMorallyVirtual=*/false,
+ VTableIndex, Base.getBase(), VBases);
+}
+
+void VTTBuilder::LayoutVirtualVTTs(const CXXRecordDecl *RD,
+ VisitedVirtualBasesSetTy &VBases) {
+ for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
+ E = RD->bases_end(); I != E; ++I) {
+ const CXXRecordDecl *BaseDecl =
+ cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+
+ // Check if this is a virtual base.
+ if (I->isVirtual()) {
+ // Check if we've seen this base before.
+ if (!VBases.insert(BaseDecl))
+ continue;
+
+ CharUnits BaseOffset =
+ MostDerivedClassLayout.getVBaseClassOffset(BaseDecl);
+
+ LayoutVTT(BaseSubobject(BaseDecl, BaseOffset), /*BaseIsVirtual=*/true);
+ }
+
+ // We only need to layout virtual VTTs for this base if it actually has
+ // virtual bases.
+ if (BaseDecl->getNumVBases())
+ LayoutVirtualVTTs(BaseDecl, VBases);
+ }
+}
+
+void VTTBuilder::LayoutVTT(BaseSubobject Base, bool BaseIsVirtual) {
+ const CXXRecordDecl *RD = Base.getBase();
+
+ // Itanium C++ ABI 2.6.2:
+ // An array of virtual table addresses, called the VTT, is declared for
+ // each class type that has indirect or direct virtual base classes.
+ if (RD->getNumVBases() == 0)
+ return;
+
+ bool IsPrimaryVTT = Base.getBase() == MostDerivedClass;
+
+ if (!IsPrimaryVTT) {
+ // Remember the sub-VTT index.
+ SubVTTIndicies[Base] = VTTComponents.size();
+ }
+
+ uint64_t VTableIndex = VTTVTables.size();
+ VTTVTables.push_back(VTTVTable(Base, BaseIsVirtual));
+
+ // Add the primary vtable pointer.
+ AddVTablePointer(Base, VTableIndex, RD);
+
+ // Add the secondary VTTs.
+ LayoutSecondaryVTTs(Base);
+
+ // Add the secondary virtual pointers.
+ LayoutSecondaryVirtualPointers(Base, VTableIndex);
+
+ // If this is the primary VTT, we want to lay out virtual VTTs as well.
+ if (IsPrimaryVTT) {
+ VisitedVirtualBasesSetTy VBases;
+ LayoutVirtualVTTs(Base.getBase(), VBases);
+ }
+}
diff --git a/clang/lib/AST/VTableBuilder.cpp b/clang/lib/AST/VTableBuilder.cpp
new file mode 100644
index 0000000..107d9fb
--- /dev/null
+++ b/clang/lib/AST/VTableBuilder.cpp
@@ -0,0 +1,2404 @@
+//===--- VTableBuilder.cpp - C++ vtable layout builder --------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code dealing with generation of the layout of virtual tables.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/VTableBuilder.h"
+#include "clang/AST/CXXInheritance.h"
+#include "clang/AST/RecordLayout.h"
+#include "clang/Basic/TargetInfo.h"
+#include "llvm/Support/Format.h"
+#include <algorithm>
+#include <cstdio>
+
+using namespace clang;
+
+#define DUMP_OVERRIDERS 0
+
+namespace {
+
+/// BaseOffset - Represents an offset from a derived class to a direct or
+/// indirect base class.
+struct BaseOffset {
+ /// DerivedClass - The derived class.
+ const CXXRecordDecl *DerivedClass;
+
+ /// VirtualBase - If the path from the derived class to the base class
+ /// involves a virtual base class, this holds its declaration.
+ const CXXRecordDecl *VirtualBase;
+
+ /// NonVirtualOffset - The offset from the derived class to the base class.
+ /// (Or the offset from the virtual base class to the base class, if the
+ /// path from the derived class to the base class involves a virtual base
+ /// class.
+ CharUnits NonVirtualOffset;
+
+ BaseOffset() : DerivedClass(0), VirtualBase(0),
+ NonVirtualOffset(CharUnits::Zero()) { }
+ BaseOffset(const CXXRecordDecl *DerivedClass,
+ const CXXRecordDecl *VirtualBase, CharUnits NonVirtualOffset)
+ : DerivedClass(DerivedClass), VirtualBase(VirtualBase),
+ NonVirtualOffset(NonVirtualOffset) { }
+
+ bool isEmpty() const { return NonVirtualOffset.isZero() && !VirtualBase; }
+};
+
+/// FinalOverriders - Contains the final overrider member functions for all
+/// member functions in the base subobjects of a class.
+class FinalOverriders {
+public:
+ /// OverriderInfo - Information about a final overrider.
+ struct OverriderInfo {
+ /// Method - The method decl of the overrider.
+ const CXXMethodDecl *Method;
+
+ /// Offset - the base offset of the overrider in the layout class.
+ CharUnits Offset;
+
+ OverriderInfo() : Method(0), Offset(CharUnits::Zero()) { }
+ };
+
+private:
+ /// MostDerivedClass - The most derived class for which the final overriders
+ /// are stored.
+ const CXXRecordDecl *MostDerivedClass;
+
+ /// MostDerivedClassOffset - If we're building final overriders for a
+ /// construction vtable, this holds the offset from the layout class to the
+ /// most derived class.
+ const CharUnits MostDerivedClassOffset;
+
+ /// LayoutClass - The class we're using for layout information. Will be
+ /// different than the most derived class if the final overriders are for a
+ /// construction vtable.
+ const CXXRecordDecl *LayoutClass;
+
+ ASTContext &Context;
+
+ /// MostDerivedClassLayout - the AST record layout of the most derived class.
+ const ASTRecordLayout &MostDerivedClassLayout;
+
+ /// MethodBaseOffsetPairTy - Uniquely identifies a member function
+ /// in a base subobject.
+ typedef std::pair<const CXXMethodDecl *, CharUnits> MethodBaseOffsetPairTy;
+
+ typedef llvm::DenseMap<MethodBaseOffsetPairTy,
+ OverriderInfo> OverridersMapTy;
+
+ /// OverridersMap - The final overriders for all virtual member functions of
+ /// all the base subobjects of the most derived class.
+ OverridersMapTy OverridersMap;
+
+ /// SubobjectsToOffsetsMapTy - A mapping from a base subobject (represented
+ /// as a record decl and a subobject number) and its offsets in the most
+ /// derived class as well as the layout class.
+ typedef llvm::DenseMap<std::pair<const CXXRecordDecl *, unsigned>,
+ CharUnits> SubobjectOffsetMapTy;
+
+ typedef llvm::DenseMap<const CXXRecordDecl *, unsigned> SubobjectCountMapTy;
+
+ /// ComputeBaseOffsets - Compute the offsets for all base subobjects of the
+ /// given base.
+ void ComputeBaseOffsets(BaseSubobject Base, bool IsVirtual,
+ CharUnits OffsetInLayoutClass,
+ SubobjectOffsetMapTy &SubobjectOffsets,
+ SubobjectOffsetMapTy &SubobjectLayoutClassOffsets,
+ SubobjectCountMapTy &SubobjectCounts);
+
+ typedef llvm::SmallPtrSet<const CXXRecordDecl *, 4> VisitedVirtualBasesSetTy;
+
+ /// dump - dump the final overriders for a base subobject, and all its direct
+ /// and indirect base subobjects.
+ void dump(raw_ostream &Out, BaseSubobject Base,
+ VisitedVirtualBasesSetTy& VisitedVirtualBases);
+
+public:
+ FinalOverriders(const CXXRecordDecl *MostDerivedClass,
+ CharUnits MostDerivedClassOffset,
+ const CXXRecordDecl *LayoutClass);
+
+ /// getOverrider - Get the final overrider for the given method declaration in
+ /// the subobject with the given base offset.
+ OverriderInfo getOverrider(const CXXMethodDecl *MD,
+ CharUnits BaseOffset) const {
+ assert(OverridersMap.count(std::make_pair(MD, BaseOffset)) &&
+ "Did not find overrider!");
+
+ return OverridersMap.lookup(std::make_pair(MD, BaseOffset));
+ }
+
+ /// dump - dump the final overriders.
+ void dump() {
+ VisitedVirtualBasesSetTy VisitedVirtualBases;
+ dump(llvm::errs(), BaseSubobject(MostDerivedClass, CharUnits::Zero()),
+ VisitedVirtualBases);
+ }
+
+};
+
+#define DUMP_OVERRIDERS 0
+
+FinalOverriders::FinalOverriders(const CXXRecordDecl *MostDerivedClass,
+ CharUnits MostDerivedClassOffset,
+ const CXXRecordDecl *LayoutClass)
+ : MostDerivedClass(MostDerivedClass),
+ MostDerivedClassOffset(MostDerivedClassOffset), LayoutClass(LayoutClass),
+ Context(MostDerivedClass->getASTContext()),
+ MostDerivedClassLayout(Context.getASTRecordLayout(MostDerivedClass)) {
+
+ // Compute base offsets.
+ SubobjectOffsetMapTy SubobjectOffsets;
+ SubobjectOffsetMapTy SubobjectLayoutClassOffsets;
+ SubobjectCountMapTy SubobjectCounts;
+ ComputeBaseOffsets(BaseSubobject(MostDerivedClass, CharUnits::Zero()),
+ /*IsVirtual=*/false,
+ MostDerivedClassOffset,
+ SubobjectOffsets, SubobjectLayoutClassOffsets,
+ SubobjectCounts);
+
+ // Get the the final overriders.
+ CXXFinalOverriderMap FinalOverriders;
+ MostDerivedClass->getFinalOverriders(FinalOverriders);
+
+ for (CXXFinalOverriderMap::const_iterator I = FinalOverriders.begin(),
+ E = FinalOverriders.end(); I != E; ++I) {
+ const CXXMethodDecl *MD = I->first;
+ const OverridingMethods& Methods = I->second;
+
+ for (OverridingMethods::const_iterator I = Methods.begin(),
+ E = Methods.end(); I != E; ++I) {
+ unsigned SubobjectNumber = I->first;
+ assert(SubobjectOffsets.count(std::make_pair(MD->getParent(),
+ SubobjectNumber)) &&
+ "Did not find subobject offset!");
+
+ CharUnits BaseOffset = SubobjectOffsets[std::make_pair(MD->getParent(),
+ SubobjectNumber)];
+
+ assert(I->second.size() == 1 && "Final overrider is not unique!");
+ const UniqueVirtualMethod &Method = I->second.front();
+
+ const CXXRecordDecl *OverriderRD = Method.Method->getParent();
+ assert(SubobjectLayoutClassOffsets.count(
+ std::make_pair(OverriderRD, Method.Subobject))
+ && "Did not find subobject offset!");
+ CharUnits OverriderOffset =
+ SubobjectLayoutClassOffsets[std::make_pair(OverriderRD,
+ Method.Subobject)];
+
+ OverriderInfo& Overrider = OverridersMap[std::make_pair(MD, BaseOffset)];
+ assert(!Overrider.Method && "Overrider should not exist yet!");
+
+ Overrider.Offset = OverriderOffset;
+ Overrider.Method = Method.Method;
+ }
+ }
+
+#if DUMP_OVERRIDERS
+ // And dump them (for now).
+ dump();
+#endif
+}
+
+static BaseOffset ComputeBaseOffset(ASTContext &Context,
+ const CXXRecordDecl *DerivedRD,
+ const CXXBasePath &Path) {
+ CharUnits NonVirtualOffset = CharUnits::Zero();
+
+ unsigned NonVirtualStart = 0;
+ const CXXRecordDecl *VirtualBase = 0;
+
+ // First, look for the virtual base class.
+ for (unsigned I = 0, E = Path.size(); I != E; ++I) {
+ const CXXBasePathElement &Element = Path[I];
+
+ if (Element.Base->isVirtual()) {
+ // FIXME: Can we break when we find the first virtual base?
+ // (If we can't, can't we just iterate over the path in reverse order?)
+ NonVirtualStart = I + 1;
+ QualType VBaseType = Element.Base->getType();
+ VirtualBase =
+ cast<CXXRecordDecl>(VBaseType->getAs<RecordType>()->getDecl());
+ }
+ }
+
+ // Now compute the non-virtual offset.
+ for (unsigned I = NonVirtualStart, E = Path.size(); I != E; ++I) {
+ const CXXBasePathElement &Element = Path[I];
+
+ // Check the base class offset.
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(Element.Class);
+
+ const RecordType *BaseType = Element.Base->getType()->getAs<RecordType>();
+ const CXXRecordDecl *Base = cast<CXXRecordDecl>(BaseType->getDecl());
+
+ NonVirtualOffset += Layout.getBaseClassOffset(Base);
+ }
+
+ // FIXME: This should probably use CharUnits or something. Maybe we should
+ // even change the base offsets in ASTRecordLayout to be specified in
+ // CharUnits.
+ return BaseOffset(DerivedRD, VirtualBase, NonVirtualOffset);
+
+}
+
+static BaseOffset ComputeBaseOffset(ASTContext &Context,
+ const CXXRecordDecl *BaseRD,
+ const CXXRecordDecl *DerivedRD) {
+ CXXBasePaths Paths(/*FindAmbiguities=*/false,
+ /*RecordPaths=*/true, /*DetectVirtual=*/false);
+
+ if (!const_cast<CXXRecordDecl *>(DerivedRD)->
+ isDerivedFrom(const_cast<CXXRecordDecl *>(BaseRD), Paths)) {
+ llvm_unreachable("Class must be derived from the passed in base class!");
+ }
+
+ return ComputeBaseOffset(Context, DerivedRD, Paths.front());
+}
+
+static BaseOffset
+ComputeReturnAdjustmentBaseOffset(ASTContext &Context,
+ const CXXMethodDecl *DerivedMD,
+ const CXXMethodDecl *BaseMD) {
+ const FunctionType *BaseFT = BaseMD->getType()->getAs<FunctionType>();
+ const FunctionType *DerivedFT = DerivedMD->getType()->getAs<FunctionType>();
+
+ // Canonicalize the return types.
+ CanQualType CanDerivedReturnType =
+ Context.getCanonicalType(DerivedFT->getResultType());
+ CanQualType CanBaseReturnType =
+ Context.getCanonicalType(BaseFT->getResultType());
+
+ assert(CanDerivedReturnType->getTypeClass() ==
+ CanBaseReturnType->getTypeClass() &&
+ "Types must have same type class!");
+
+ if (CanDerivedReturnType == CanBaseReturnType) {
+ // No adjustment needed.
+ return BaseOffset();
+ }
+
+ if (isa<ReferenceType>(CanDerivedReturnType)) {
+ CanDerivedReturnType =
+ CanDerivedReturnType->getAs<ReferenceType>()->getPointeeType();
+ CanBaseReturnType =
+ CanBaseReturnType->getAs<ReferenceType>()->getPointeeType();
+ } else if (isa<PointerType>(CanDerivedReturnType)) {
+ CanDerivedReturnType =
+ CanDerivedReturnType->getAs<PointerType>()->getPointeeType();
+ CanBaseReturnType =
+ CanBaseReturnType->getAs<PointerType>()->getPointeeType();
+ } else {
+ llvm_unreachable("Unexpected return type!");
+ }
+
+ // We need to compare unqualified types here; consider
+ // const T *Base::foo();
+ // T *Derived::foo();
+ if (CanDerivedReturnType.getUnqualifiedType() ==
+ CanBaseReturnType.getUnqualifiedType()) {
+ // No adjustment needed.
+ return BaseOffset();
+ }
+
+ const CXXRecordDecl *DerivedRD =
+ cast<CXXRecordDecl>(cast<RecordType>(CanDerivedReturnType)->getDecl());
+
+ const CXXRecordDecl *BaseRD =
+ cast<CXXRecordDecl>(cast<RecordType>(CanBaseReturnType)->getDecl());
+
+ return ComputeBaseOffset(Context, BaseRD, DerivedRD);
+}
+
+void
+FinalOverriders::ComputeBaseOffsets(BaseSubobject Base, bool IsVirtual,
+ CharUnits OffsetInLayoutClass,
+ SubobjectOffsetMapTy &SubobjectOffsets,
+ SubobjectOffsetMapTy &SubobjectLayoutClassOffsets,
+ SubobjectCountMapTy &SubobjectCounts) {
+ const CXXRecordDecl *RD = Base.getBase();
+
+ unsigned SubobjectNumber = 0;
+ if (!IsVirtual)
+ SubobjectNumber = ++SubobjectCounts[RD];
+
+ // Set up the subobject to offset mapping.
+ assert(!SubobjectOffsets.count(std::make_pair(RD, SubobjectNumber))
+ && "Subobject offset already exists!");
+ assert(!SubobjectLayoutClassOffsets.count(std::make_pair(RD, SubobjectNumber))
+ && "Subobject offset already exists!");
+
+ SubobjectOffsets[std::make_pair(RD, SubobjectNumber)] = Base.getBaseOffset();
+ SubobjectLayoutClassOffsets[std::make_pair(RD, SubobjectNumber)] =
+ OffsetInLayoutClass;
+
+ // Traverse our bases.
+ for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
+ E = RD->bases_end(); I != E; ++I) {
+ const CXXRecordDecl *BaseDecl =
+ cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+
+ CharUnits BaseOffset;
+ CharUnits BaseOffsetInLayoutClass;
+ if (I->isVirtual()) {
+ // Check if we've visited this virtual base before.
+ if (SubobjectOffsets.count(std::make_pair(BaseDecl, 0)))
+ continue;
+
+ const ASTRecordLayout &LayoutClassLayout =
+ Context.getASTRecordLayout(LayoutClass);
+
+ BaseOffset = MostDerivedClassLayout.getVBaseClassOffset(BaseDecl);
+ BaseOffsetInLayoutClass =
+ LayoutClassLayout.getVBaseClassOffset(BaseDecl);
+ } else {
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
+ CharUnits Offset = Layout.getBaseClassOffset(BaseDecl);
+
+ BaseOffset = Base.getBaseOffset() + Offset;
+ BaseOffsetInLayoutClass = OffsetInLayoutClass + Offset;
+ }
+
+ ComputeBaseOffsets(BaseSubobject(BaseDecl, BaseOffset),
+ I->isVirtual(), BaseOffsetInLayoutClass,
+ SubobjectOffsets, SubobjectLayoutClassOffsets,
+ SubobjectCounts);
+ }
+}
+
+void FinalOverriders::dump(raw_ostream &Out, BaseSubobject Base,
+ VisitedVirtualBasesSetTy &VisitedVirtualBases) {
+ const CXXRecordDecl *RD = Base.getBase();
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
+
+ for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
+ E = RD->bases_end(); I != E; ++I) {
+ const CXXRecordDecl *BaseDecl =
+ cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+
+ // Ignore bases that don't have any virtual member functions.
+ if (!BaseDecl->isPolymorphic())
+ continue;
+
+ CharUnits BaseOffset;
+ if (I->isVirtual()) {
+ if (!VisitedVirtualBases.insert(BaseDecl)) {
+ // We've visited this base before.
+ continue;
+ }
+
+ BaseOffset = MostDerivedClassLayout.getVBaseClassOffset(BaseDecl);
+ } else {
+ BaseOffset = Layout.getBaseClassOffset(BaseDecl) + Base.getBaseOffset();
+ }
+
+ dump(Out, BaseSubobject(BaseDecl, BaseOffset), VisitedVirtualBases);
+ }
+
+ Out << "Final overriders for (" << RD->getQualifiedNameAsString() << ", ";
+ Out << Base.getBaseOffset().getQuantity() << ")\n";
+
+ // Now dump the overriders for this base subobject.
+ for (CXXRecordDecl::method_iterator I = RD->method_begin(),
+ E = RD->method_end(); I != E; ++I) {
+ const CXXMethodDecl *MD = *I;
+
+ if (!MD->isVirtual())
+ continue;
+
+ OverriderInfo Overrider = getOverrider(MD, Base.getBaseOffset());
+
+ Out << " " << MD->getQualifiedNameAsString() << " - (";
+ Out << Overrider.Method->getQualifiedNameAsString();
+ Out << ", " << ", " << Overrider.Offset.getQuantity() << ')';
+
+ BaseOffset Offset;
+ if (!Overrider.Method->isPure())
+ Offset = ComputeReturnAdjustmentBaseOffset(Context, Overrider.Method, MD);
+
+ if (!Offset.isEmpty()) {
+ Out << " [ret-adj: ";
+ if (Offset.VirtualBase)
+ Out << Offset.VirtualBase->getQualifiedNameAsString() << " vbase, ";
+
+ Out << Offset.NonVirtualOffset.getQuantity() << " nv]";
+ }
+
+ Out << "\n";
+ }
+}
+
+/// VCallOffsetMap - Keeps track of vcall offsets when building a vtable.
+struct VCallOffsetMap {
+
+ typedef std::pair<const CXXMethodDecl *, CharUnits> MethodAndOffsetPairTy;
+
+ /// Offsets - Keeps track of methods and their offsets.
+ // FIXME: This should be a real map and not a vector.
+ SmallVector<MethodAndOffsetPairTy, 16> Offsets;
+
+ /// MethodsCanShareVCallOffset - Returns whether two virtual member functions
+ /// can share the same vcall offset.
+ static bool MethodsCanShareVCallOffset(const CXXMethodDecl *LHS,
+ const CXXMethodDecl *RHS);
+
+public:
+ /// AddVCallOffset - Adds a vcall offset to the map. Returns true if the
+ /// add was successful, or false if there was already a member function with
+ /// the same signature in the map.
+ bool AddVCallOffset(const CXXMethodDecl *MD, CharUnits OffsetOffset);
+
+ /// getVCallOffsetOffset - Returns the vcall offset offset (relative to the
+ /// vtable address point) for the given virtual member function.
+ CharUnits getVCallOffsetOffset(const CXXMethodDecl *MD);
+
+ // empty - Return whether the offset map is empty or not.
+ bool empty() const { return Offsets.empty(); }
+};
+
+static bool HasSameVirtualSignature(const CXXMethodDecl *LHS,
+ const CXXMethodDecl *RHS) {
+ const FunctionProtoType *LT =
+ cast<FunctionProtoType>(LHS->getType().getCanonicalType());
+ const FunctionProtoType *RT =
+ cast<FunctionProtoType>(RHS->getType().getCanonicalType());
+
+ // Fast-path matches in the canonical types.
+ if (LT == RT) return true;
+
+ // Force the signatures to match. We can't rely on the overrides
+ // list here because there isn't necessarily an inheritance
+ // relationship between the two methods.
+ if (LT->getTypeQuals() != RT->getTypeQuals() ||
+ LT->getNumArgs() != RT->getNumArgs())
+ return false;
+ for (unsigned I = 0, E = LT->getNumArgs(); I != E; ++I)
+ if (LT->getArgType(I) != RT->getArgType(I))
+ return false;
+ return true;
+}
+
+bool VCallOffsetMap::MethodsCanShareVCallOffset(const CXXMethodDecl *LHS,
+ const CXXMethodDecl *RHS) {
+ assert(LHS->isVirtual() && "LHS must be virtual!");
+ assert(RHS->isVirtual() && "LHS must be virtual!");
+
+ // A destructor can share a vcall offset with another destructor.
+ if (isa<CXXDestructorDecl>(LHS))
+ return isa<CXXDestructorDecl>(RHS);
+
+ // FIXME: We need to check more things here.
+
+ // The methods must have the same name.
+ DeclarationName LHSName = LHS->getDeclName();
+ DeclarationName RHSName = RHS->getDeclName();
+ if (LHSName != RHSName)
+ return false;
+
+ // And the same signatures.
+ return HasSameVirtualSignature(LHS, RHS);
+}
+
+bool VCallOffsetMap::AddVCallOffset(const CXXMethodDecl *MD,
+ CharUnits OffsetOffset) {
+ // Check if we can reuse an offset.
+ for (unsigned I = 0, E = Offsets.size(); I != E; ++I) {
+ if (MethodsCanShareVCallOffset(Offsets[I].first, MD))
+ return false;
+ }
+
+ // Add the offset.
+ Offsets.push_back(MethodAndOffsetPairTy(MD, OffsetOffset));
+ return true;
+}
+
+CharUnits VCallOffsetMap::getVCallOffsetOffset(const CXXMethodDecl *MD) {
+ // Look for an offset.
+ for (unsigned I = 0, E = Offsets.size(); I != E; ++I) {
+ if (MethodsCanShareVCallOffset(Offsets[I].first, MD))
+ return Offsets[I].second;
+ }
+
+ llvm_unreachable("Should always find a vcall offset offset!");
+}
+
+/// VCallAndVBaseOffsetBuilder - Class for building vcall and vbase offsets.
+class VCallAndVBaseOffsetBuilder {
+public:
+ typedef llvm::DenseMap<const CXXRecordDecl *, CharUnits>
+ VBaseOffsetOffsetsMapTy;
+
+private:
+ /// MostDerivedClass - The most derived class for which we're building vcall
+ /// and vbase offsets.
+ const CXXRecordDecl *MostDerivedClass;
+
+ /// LayoutClass - The class we're using for layout information. Will be
+ /// different than the most derived class if we're building a construction
+ /// vtable.
+ const CXXRecordDecl *LayoutClass;
+
+ /// Context - The ASTContext which we will use for layout information.
+ ASTContext &Context;
+
+ /// Components - vcall and vbase offset components
+ typedef SmallVector<VTableComponent, 64> VTableComponentVectorTy;
+ VTableComponentVectorTy Components;
+
+ /// VisitedVirtualBases - Visited virtual bases.
+ llvm::SmallPtrSet<const CXXRecordDecl *, 4> VisitedVirtualBases;
+
+ /// VCallOffsets - Keeps track of vcall offsets.
+ VCallOffsetMap VCallOffsets;
+
+
+ /// VBaseOffsetOffsets - Contains the offsets of the virtual base offsets,
+ /// relative to the address point.
+ VBaseOffsetOffsetsMapTy VBaseOffsetOffsets;
+
+ /// FinalOverriders - The final overriders of the most derived class.
+ /// (Can be null when we're not building a vtable of the most derived class).
+ const FinalOverriders *Overriders;
+
+ /// AddVCallAndVBaseOffsets - Add vcall offsets and vbase offsets for the
+ /// given base subobject.
+ void AddVCallAndVBaseOffsets(BaseSubobject Base, bool BaseIsVirtual,
+ CharUnits RealBaseOffset);
+
+ /// AddVCallOffsets - Add vcall offsets for the given base subobject.
+ void AddVCallOffsets(BaseSubobject Base, CharUnits VBaseOffset);
+
+ /// AddVBaseOffsets - Add vbase offsets for the given class.
+ void AddVBaseOffsets(const CXXRecordDecl *Base,
+ CharUnits OffsetInLayoutClass);
+
+ /// getCurrentOffsetOffset - Get the current vcall or vbase offset offset in
+ /// chars, relative to the vtable address point.
+ CharUnits getCurrentOffsetOffset() const;
+
+public:
+ VCallAndVBaseOffsetBuilder(const CXXRecordDecl *MostDerivedClass,
+ const CXXRecordDecl *LayoutClass,
+ const FinalOverriders *Overriders,
+ BaseSubobject Base, bool BaseIsVirtual,
+ CharUnits OffsetInLayoutClass)
+ : MostDerivedClass(MostDerivedClass), LayoutClass(LayoutClass),
+ Context(MostDerivedClass->getASTContext()), Overriders(Overriders) {
+
+ // Add vcall and vbase offsets.
+ AddVCallAndVBaseOffsets(Base, BaseIsVirtual, OffsetInLayoutClass);
+ }
+
+ /// Methods for iterating over the components.
+ typedef VTableComponentVectorTy::const_reverse_iterator const_iterator;
+ const_iterator components_begin() const { return Components.rbegin(); }
+ const_iterator components_end() const { return Components.rend(); }
+
+ const VCallOffsetMap &getVCallOffsets() const { return VCallOffsets; }
+ const VBaseOffsetOffsetsMapTy &getVBaseOffsetOffsets() const {
+ return VBaseOffsetOffsets;
+ }
+};
+
+void
+VCallAndVBaseOffsetBuilder::AddVCallAndVBaseOffsets(BaseSubobject Base,
+ bool BaseIsVirtual,
+ CharUnits RealBaseOffset) {
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(Base.getBase());
+
+ // Itanium C++ ABI 2.5.2:
+ // ..in classes sharing a virtual table with a primary base class, the vcall
+ // and vbase offsets added by the derived class all come before the vcall
+ // and vbase offsets required by the base class, so that the latter may be
+ // laid out as required by the base class without regard to additions from
+ // the derived class(es).
+
+ // (Since we're emitting the vcall and vbase offsets in reverse order, we'll
+ // emit them for the primary base first).
+ if (const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase()) {
+ bool PrimaryBaseIsVirtual = Layout.isPrimaryBaseVirtual();
+
+ CharUnits PrimaryBaseOffset;
+
+ // Get the base offset of the primary base.
+ if (PrimaryBaseIsVirtual) {
+ assert(Layout.getVBaseClassOffsetInBits(PrimaryBase) == 0 &&
+ "Primary vbase should have a zero offset!");
+
+ const ASTRecordLayout &MostDerivedClassLayout =
+ Context.getASTRecordLayout(MostDerivedClass);
+
+ PrimaryBaseOffset =
+ MostDerivedClassLayout.getVBaseClassOffset(PrimaryBase);
+ } else {
+ assert(Layout.getBaseClassOffsetInBits(PrimaryBase) == 0 &&
+ "Primary base should have a zero offset!");
+
+ PrimaryBaseOffset = Base.getBaseOffset();
+ }
+
+ AddVCallAndVBaseOffsets(
+ BaseSubobject(PrimaryBase,PrimaryBaseOffset),
+ PrimaryBaseIsVirtual, RealBaseOffset);
+ }
+
+ AddVBaseOffsets(Base.getBase(), RealBaseOffset);
+
+ // We only want to add vcall offsets for virtual bases.
+ if (BaseIsVirtual)
+ AddVCallOffsets(Base, RealBaseOffset);
+}
+
+CharUnits VCallAndVBaseOffsetBuilder::getCurrentOffsetOffset() const {
+ // OffsetIndex is the index of this vcall or vbase offset, relative to the
+ // vtable address point. (We subtract 3 to account for the information just
+ // above the address point, the RTTI info, the offset to top, and the
+ // vcall offset itself).
+ int64_t OffsetIndex = -(int64_t)(3 + Components.size());
+
+ CharUnits PointerWidth =
+ Context.toCharUnitsFromBits(Context.getTargetInfo().getPointerWidth(0));
+ CharUnits OffsetOffset = PointerWidth * OffsetIndex;
+ return OffsetOffset;
+}
+
+void VCallAndVBaseOffsetBuilder::AddVCallOffsets(BaseSubobject Base,
+ CharUnits VBaseOffset) {
+ const CXXRecordDecl *RD = Base.getBase();
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
+
+ const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase();
+
+ // Handle the primary base first.
+ // We only want to add vcall offsets if the base is non-virtual; a virtual
+ // primary base will have its vcall and vbase offsets emitted already.
+ if (PrimaryBase && !Layout.isPrimaryBaseVirtual()) {
+ // Get the base offset of the primary base.
+ assert(Layout.getBaseClassOffsetInBits(PrimaryBase) == 0 &&
+ "Primary base should have a zero offset!");
+
+ AddVCallOffsets(BaseSubobject(PrimaryBase, Base.getBaseOffset()),
+ VBaseOffset);
+ }
+
+ // Add the vcall offsets.
+ for (CXXRecordDecl::method_iterator I = RD->method_begin(),
+ E = RD->method_end(); I != E; ++I) {
+ const CXXMethodDecl *MD = *I;
+
+ if (!MD->isVirtual())
+ continue;
+
+ CharUnits OffsetOffset = getCurrentOffsetOffset();
+
+ // Don't add a vcall offset if we already have one for this member function
+ // signature.
+ if (!VCallOffsets.AddVCallOffset(MD, OffsetOffset))
+ continue;
+
+ CharUnits Offset = CharUnits::Zero();
+
+ if (Overriders) {
+ // Get the final overrider.
+ FinalOverriders::OverriderInfo Overrider =
+ Overriders->getOverrider(MD, Base.getBaseOffset());
+
+ /// The vcall offset is the offset from the virtual base to the object
+ /// where the function was overridden.
+ Offset = Overrider.Offset - VBaseOffset;
+ }
+
+ Components.push_back(
+ VTableComponent::MakeVCallOffset(Offset));
+ }
+
+ // And iterate over all non-virtual bases (ignoring the primary base).
+ for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
+ E = RD->bases_end(); I != E; ++I) {
+
+ if (I->isVirtual())
+ continue;
+
+ const CXXRecordDecl *BaseDecl =
+ cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+ if (BaseDecl == PrimaryBase)
+ continue;
+
+ // Get the base offset of this base.
+ CharUnits BaseOffset = Base.getBaseOffset() +
+ Layout.getBaseClassOffset(BaseDecl);
+
+ AddVCallOffsets(BaseSubobject(BaseDecl, BaseOffset),
+ VBaseOffset);
+ }
+}
+
+void
+VCallAndVBaseOffsetBuilder::AddVBaseOffsets(const CXXRecordDecl *RD,
+ CharUnits OffsetInLayoutClass) {
+ const ASTRecordLayout &LayoutClassLayout =
+ Context.getASTRecordLayout(LayoutClass);
+
+ // Add vbase offsets.
+ for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
+ E = RD->bases_end(); I != E; ++I) {
+ const CXXRecordDecl *BaseDecl =
+ cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+
+ // Check if this is a virtual base that we haven't visited before.
+ if (I->isVirtual() && VisitedVirtualBases.insert(BaseDecl)) {
+ CharUnits Offset =
+ LayoutClassLayout.getVBaseClassOffset(BaseDecl) - OffsetInLayoutClass;
+
+ // Add the vbase offset offset.
+ assert(!VBaseOffsetOffsets.count(BaseDecl) &&
+ "vbase offset offset already exists!");
+
+ CharUnits VBaseOffsetOffset = getCurrentOffsetOffset();
+ VBaseOffsetOffsets.insert(
+ std::make_pair(BaseDecl, VBaseOffsetOffset));
+
+ Components.push_back(
+ VTableComponent::MakeVBaseOffset(Offset));
+ }
+
+ // Check the base class looking for more vbase offsets.
+ AddVBaseOffsets(BaseDecl, OffsetInLayoutClass);
+ }
+}
+
+/// VTableBuilder - Class for building vtable layout information.
+class VTableBuilder {
+public:
+ /// PrimaryBasesSetVectorTy - A set vector of direct and indirect
+ /// primary bases.
+ typedef llvm::SmallSetVector<const CXXRecordDecl *, 8>
+ PrimaryBasesSetVectorTy;
+
+ typedef llvm::DenseMap<const CXXRecordDecl *, CharUnits>
+ VBaseOffsetOffsetsMapTy;
+
+ typedef llvm::DenseMap<BaseSubobject, uint64_t>
+ AddressPointsMapTy;
+
+private:
+ /// VTables - Global vtable information.
+ VTableContext &VTables;
+
+ /// MostDerivedClass - The most derived class for which we're building this
+ /// vtable.
+ const CXXRecordDecl *MostDerivedClass;
+
+ /// MostDerivedClassOffset - If we're building a construction vtable, this
+ /// holds the offset from the layout class to the most derived class.
+ const CharUnits MostDerivedClassOffset;
+
+ /// MostDerivedClassIsVirtual - Whether the most derived class is a virtual
+ /// base. (This only makes sense when building a construction vtable).
+ bool MostDerivedClassIsVirtual;
+
+ /// LayoutClass - The class we're using for layout information. Will be
+ /// different than the most derived class if we're building a construction
+ /// vtable.
+ const CXXRecordDecl *LayoutClass;
+
+ /// Context - The ASTContext which we will use for layout information.
+ ASTContext &Context;
+
+ /// FinalOverriders - The final overriders of the most derived class.
+ const FinalOverriders Overriders;
+
+ /// VCallOffsetsForVBases - Keeps track of vcall offsets for the virtual
+ /// bases in this vtable.
+ llvm::DenseMap<const CXXRecordDecl *, VCallOffsetMap> VCallOffsetsForVBases;
+
+ /// VBaseOffsetOffsets - Contains the offsets of the virtual base offsets for
+ /// the most derived class.
+ VBaseOffsetOffsetsMapTy VBaseOffsetOffsets;
+
+ /// Components - The components of the vtable being built.
+ SmallVector<VTableComponent, 64> Components;
+
+ /// AddressPoints - Address points for the vtable being built.
+ AddressPointsMapTy AddressPoints;
+
+ /// MethodInfo - Contains information about a method in a vtable.
+ /// (Used for computing 'this' pointer adjustment thunks.
+ struct MethodInfo {
+ /// BaseOffset - The base offset of this method.
+ const CharUnits BaseOffset;
+
+ /// BaseOffsetInLayoutClass - The base offset in the layout class of this
+ /// method.
+ const CharUnits BaseOffsetInLayoutClass;
+
+ /// VTableIndex - The index in the vtable that this method has.
+ /// (For destructors, this is the index of the complete destructor).
+ const uint64_t VTableIndex;
+
+ MethodInfo(CharUnits BaseOffset, CharUnits BaseOffsetInLayoutClass,
+ uint64_t VTableIndex)
+ : BaseOffset(BaseOffset),
+ BaseOffsetInLayoutClass(BaseOffsetInLayoutClass),
+ VTableIndex(VTableIndex) { }
+
+ MethodInfo()
+ : BaseOffset(CharUnits::Zero()),
+ BaseOffsetInLayoutClass(CharUnits::Zero()),
+ VTableIndex(0) { }
+ };
+
+ typedef llvm::DenseMap<const CXXMethodDecl *, MethodInfo> MethodInfoMapTy;
+
+ /// MethodInfoMap - The information for all methods in the vtable we're
+ /// currently building.
+ MethodInfoMapTy MethodInfoMap;
+
+ typedef llvm::DenseMap<uint64_t, ThunkInfo> VTableThunksMapTy;
+
+ /// VTableThunks - The thunks by vtable index in the vtable currently being
+ /// built.
+ VTableThunksMapTy VTableThunks;
+
+ typedef SmallVector<ThunkInfo, 1> ThunkInfoVectorTy;
+ typedef llvm::DenseMap<const CXXMethodDecl *, ThunkInfoVectorTy> ThunksMapTy;
+
+ /// Thunks - A map that contains all the thunks needed for all methods in the
+ /// most derived class for which the vtable is currently being built.
+ ThunksMapTy Thunks;
+
+ /// AddThunk - Add a thunk for the given method.
+ void AddThunk(const CXXMethodDecl *MD, const ThunkInfo &Thunk);
+
+ /// ComputeThisAdjustments - Compute the 'this' pointer adjustments for the
+ /// part of the vtable we're currently building.
+ void ComputeThisAdjustments();
+
+ typedef llvm::SmallPtrSet<const CXXRecordDecl *, 4> VisitedVirtualBasesSetTy;
+
+ /// PrimaryVirtualBases - All known virtual bases who are a primary base of
+ /// some other base.
+ VisitedVirtualBasesSetTy PrimaryVirtualBases;
+
+ /// ComputeReturnAdjustment - Compute the return adjustment given a return
+ /// adjustment base offset.
+ ReturnAdjustment ComputeReturnAdjustment(BaseOffset Offset);
+
+ /// ComputeThisAdjustmentBaseOffset - Compute the base offset for adjusting
+ /// the 'this' pointer from the base subobject to the derived subobject.
+ BaseOffset ComputeThisAdjustmentBaseOffset(BaseSubobject Base,
+ BaseSubobject Derived) const;
+
+ /// ComputeThisAdjustment - Compute the 'this' pointer adjustment for the
+ /// given virtual member function, its offset in the layout class and its
+ /// final overrider.
+ ThisAdjustment
+ ComputeThisAdjustment(const CXXMethodDecl *MD,
+ CharUnits BaseOffsetInLayoutClass,
+ FinalOverriders::OverriderInfo Overrider);
+
+ /// AddMethod - Add a single virtual member function to the vtable
+ /// components vector.
+ void AddMethod(const CXXMethodDecl *MD, ReturnAdjustment ReturnAdjustment);
+
+ /// IsOverriderUsed - Returns whether the overrider will ever be used in this
+ /// part of the vtable.
+ ///
+ /// Itanium C++ ABI 2.5.2:
+ ///
+ /// struct A { virtual void f(); };
+ /// struct B : virtual public A { int i; };
+ /// struct C : virtual public A { int j; };
+ /// struct D : public B, public C {};
+ ///
+ /// When B and C are declared, A is a primary base in each case, so although
+ /// vcall offsets are allocated in the A-in-B and A-in-C vtables, no this
+ /// adjustment is required and no thunk is generated. However, inside D
+ /// objects, A is no longer a primary base of C, so if we allowed calls to
+ /// C::f() to use the copy of A's vtable in the C subobject, we would need
+ /// to adjust this from C* to B::A*, which would require a third-party
+ /// thunk. Since we require that a call to C::f() first convert to A*,
+ /// C-in-D's copy of A's vtable is never referenced, so this is not
+ /// necessary.
+ bool IsOverriderUsed(const CXXMethodDecl *Overrider,
+ CharUnits BaseOffsetInLayoutClass,
+ const CXXRecordDecl *FirstBaseInPrimaryBaseChain,
+ CharUnits FirstBaseOffsetInLayoutClass) const;
+
+
+ /// AddMethods - Add the methods of this base subobject and all its
+ /// primary bases to the vtable components vector.
+ void AddMethods(BaseSubobject Base, CharUnits BaseOffsetInLayoutClass,
+ const CXXRecordDecl *FirstBaseInPrimaryBaseChain,
+ CharUnits FirstBaseOffsetInLayoutClass,
+ PrimaryBasesSetVectorTy &PrimaryBases);
+
+ // LayoutVTable - Layout the vtable for the given base class, including its
+ // secondary vtables and any vtables for virtual bases.
+ void LayoutVTable();
+
+ /// LayoutPrimaryAndSecondaryVTables - Layout the primary vtable for the
+ /// given base subobject, as well as all its secondary vtables.
+ ///
+ /// \param BaseIsMorallyVirtual whether the base subobject is a virtual base
+ /// or a direct or indirect base of a virtual base.
+ ///
+ /// \param BaseIsVirtualInLayoutClass - Whether the base subobject is virtual
+ /// in the layout class.
+ void LayoutPrimaryAndSecondaryVTables(BaseSubobject Base,
+ bool BaseIsMorallyVirtual,
+ bool BaseIsVirtualInLayoutClass,
+ CharUnits OffsetInLayoutClass);
+
+ /// LayoutSecondaryVTables - Layout the secondary vtables for the given base
+ /// subobject.
+ ///
+ /// \param BaseIsMorallyVirtual whether the base subobject is a virtual base
+ /// or a direct or indirect base of a virtual base.
+ void LayoutSecondaryVTables(BaseSubobject Base, bool BaseIsMorallyVirtual,
+ CharUnits OffsetInLayoutClass);
+
+ /// DeterminePrimaryVirtualBases - Determine the primary virtual bases in this
+ /// class hierarchy.
+ void DeterminePrimaryVirtualBases(const CXXRecordDecl *RD,
+ CharUnits OffsetInLayoutClass,
+ VisitedVirtualBasesSetTy &VBases);
+
+ /// LayoutVTablesForVirtualBases - Layout vtables for all virtual bases of the
+ /// given base (excluding any primary bases).
+ void LayoutVTablesForVirtualBases(const CXXRecordDecl *RD,
+ VisitedVirtualBasesSetTy &VBases);
+
+ /// isBuildingConstructionVTable - Return whether this vtable builder is
+ /// building a construction vtable.
+ bool isBuildingConstructorVTable() const {
+ return MostDerivedClass != LayoutClass;
+ }
+
+public:
+ VTableBuilder(VTableContext &VTables, const CXXRecordDecl *MostDerivedClass,
+ CharUnits MostDerivedClassOffset,
+ bool MostDerivedClassIsVirtual, const
+ CXXRecordDecl *LayoutClass)
+ : VTables(VTables), MostDerivedClass(MostDerivedClass),
+ MostDerivedClassOffset(MostDerivedClassOffset),
+ MostDerivedClassIsVirtual(MostDerivedClassIsVirtual),
+ LayoutClass(LayoutClass), Context(MostDerivedClass->getASTContext()),
+ Overriders(MostDerivedClass, MostDerivedClassOffset, LayoutClass) {
+
+ LayoutVTable();
+
+ if (Context.getLangOpts().DumpVTableLayouts)
+ dumpLayout(llvm::errs());
+ }
+
+ uint64_t getNumThunks() const {
+ return Thunks.size();
+ }
+
+ ThunksMapTy::const_iterator thunks_begin() const {
+ return Thunks.begin();
+ }
+
+ ThunksMapTy::const_iterator thunks_end() const {
+ return Thunks.end();
+ }
+
+ const VBaseOffsetOffsetsMapTy &getVBaseOffsetOffsets() const {
+ return VBaseOffsetOffsets;
+ }
+
+ const AddressPointsMapTy &getAddressPoints() const {
+ return AddressPoints;
+ }
+
+ /// getNumVTableComponents - Return the number of components in the vtable
+ /// currently built.
+ uint64_t getNumVTableComponents() const {
+ return Components.size();
+ }
+
+ const VTableComponent *vtable_component_begin() const {
+ return Components.begin();
+ }
+
+ const VTableComponent *vtable_component_end() const {
+ return Components.end();
+ }
+
+ AddressPointsMapTy::const_iterator address_points_begin() const {
+ return AddressPoints.begin();
+ }
+
+ AddressPointsMapTy::const_iterator address_points_end() const {
+ return AddressPoints.end();
+ }
+
+ VTableThunksMapTy::const_iterator vtable_thunks_begin() const {
+ return VTableThunks.begin();
+ }
+
+ VTableThunksMapTy::const_iterator vtable_thunks_end() const {
+ return VTableThunks.end();
+ }
+
+ /// dumpLayout - Dump the vtable layout.
+ void dumpLayout(raw_ostream&);
+};
+
+void VTableBuilder::AddThunk(const CXXMethodDecl *MD, const ThunkInfo &Thunk) {
+ assert(!isBuildingConstructorVTable() &&
+ "Can't add thunks for construction vtable");
+
+ SmallVector<ThunkInfo, 1> &ThunksVector = Thunks[MD];
+
+ // Check if we have this thunk already.
+ if (std::find(ThunksVector.begin(), ThunksVector.end(), Thunk) !=
+ ThunksVector.end())
+ return;
+
+ ThunksVector.push_back(Thunk);
+}
+
+typedef llvm::SmallPtrSet<const CXXMethodDecl *, 8> OverriddenMethodsSetTy;
+
+/// ComputeAllOverriddenMethods - Given a method decl, will return a set of all
+/// the overridden methods that the function decl overrides.
+static void
+ComputeAllOverriddenMethods(const CXXMethodDecl *MD,
+ OverriddenMethodsSetTy& OverriddenMethods) {
+ assert(MD->isVirtual() && "Method is not virtual!");
+
+ for (CXXMethodDecl::method_iterator I = MD->begin_overridden_methods(),
+ E = MD->end_overridden_methods(); I != E; ++I) {
+ const CXXMethodDecl *OverriddenMD = *I;
+
+ OverriddenMethods.insert(OverriddenMD);
+
+ ComputeAllOverriddenMethods(OverriddenMD, OverriddenMethods);
+ }
+}
+
+void VTableBuilder::ComputeThisAdjustments() {
+ // Now go through the method info map and see if any of the methods need
+ // 'this' pointer adjustments.
+ for (MethodInfoMapTy::const_iterator I = MethodInfoMap.begin(),
+ E = MethodInfoMap.end(); I != E; ++I) {
+ const CXXMethodDecl *MD = I->first;
+ const MethodInfo &MethodInfo = I->second;
+
+ // Ignore adjustments for unused function pointers.
+ uint64_t VTableIndex = MethodInfo.VTableIndex;
+ if (Components[VTableIndex].getKind() ==
+ VTableComponent::CK_UnusedFunctionPointer)
+ continue;
+
+ // Get the final overrider for this method.
+ FinalOverriders::OverriderInfo Overrider =
+ Overriders.getOverrider(MD, MethodInfo.BaseOffset);
+
+ // Check if we need an adjustment at all.
+ if (MethodInfo.BaseOffsetInLayoutClass == Overrider.Offset) {
+ // When a return thunk is needed by a derived class that overrides a
+ // virtual base, gcc uses a virtual 'this' adjustment as well.
+ // While the thunk itself might be needed by vtables in subclasses or
+ // in construction vtables, there doesn't seem to be a reason for using
+ // the thunk in this vtable. Still, we do so to match gcc.
+ if (VTableThunks.lookup(VTableIndex).Return.isEmpty())
+ continue;
+ }
+
+ ThisAdjustment ThisAdjustment =
+ ComputeThisAdjustment(MD, MethodInfo.BaseOffsetInLayoutClass, Overrider);
+
+ if (ThisAdjustment.isEmpty())
+ continue;
+
+ // Add it.
+ VTableThunks[VTableIndex].This = ThisAdjustment;
+
+ if (isa<CXXDestructorDecl>(MD)) {
+ // Add an adjustment for the deleting destructor as well.
+ VTableThunks[VTableIndex + 1].This = ThisAdjustment;
+ }
+ }
+
+ /// Clear the method info map.
+ MethodInfoMap.clear();
+
+ if (isBuildingConstructorVTable()) {
+ // We don't need to store thunk information for construction vtables.
+ return;
+ }
+
+ for (VTableThunksMapTy::const_iterator I = VTableThunks.begin(),
+ E = VTableThunks.end(); I != E; ++I) {
+ const VTableComponent &Component = Components[I->first];
+ const ThunkInfo &Thunk = I->second;
+ const CXXMethodDecl *MD;
+
+ switch (Component.getKind()) {
+ default:
+ llvm_unreachable("Unexpected vtable component kind!");
+ case VTableComponent::CK_FunctionPointer:
+ MD = Component.getFunctionDecl();
+ break;
+ case VTableComponent::CK_CompleteDtorPointer:
+ MD = Component.getDestructorDecl();
+ break;
+ case VTableComponent::CK_DeletingDtorPointer:
+ // We've already added the thunk when we saw the complete dtor pointer.
+ continue;
+ }
+
+ if (MD->getParent() == MostDerivedClass)
+ AddThunk(MD, Thunk);
+ }
+}
+
+ReturnAdjustment VTableBuilder::ComputeReturnAdjustment(BaseOffset Offset) {
+ ReturnAdjustment Adjustment;
+
+ if (!Offset.isEmpty()) {
+ if (Offset.VirtualBase) {
+ // Get the virtual base offset offset.
+ if (Offset.DerivedClass == MostDerivedClass) {
+ // We can get the offset offset directly from our map.
+ Adjustment.VBaseOffsetOffset =
+ VBaseOffsetOffsets.lookup(Offset.VirtualBase).getQuantity();
+ } else {
+ Adjustment.VBaseOffsetOffset =
+ VTables.getVirtualBaseOffsetOffset(Offset.DerivedClass,
+ Offset.VirtualBase).getQuantity();
+ }
+ }
+
+ Adjustment.NonVirtual = Offset.NonVirtualOffset.getQuantity();
+ }
+
+ return Adjustment;
+}
+
+BaseOffset
+VTableBuilder::ComputeThisAdjustmentBaseOffset(BaseSubobject Base,
+ BaseSubobject Derived) const {
+ const CXXRecordDecl *BaseRD = Base.getBase();
+ const CXXRecordDecl *DerivedRD = Derived.getBase();
+
+ CXXBasePaths Paths(/*FindAmbiguities=*/true,
+ /*RecordPaths=*/true, /*DetectVirtual=*/true);
+
+ if (!const_cast<CXXRecordDecl *>(DerivedRD)->
+ isDerivedFrom(const_cast<CXXRecordDecl *>(BaseRD), Paths)) {
+ llvm_unreachable("Class must be derived from the passed in base class!");
+ }
+
+ // We have to go through all the paths, and see which one leads us to the
+ // right base subobject.
+ for (CXXBasePaths::const_paths_iterator I = Paths.begin(), E = Paths.end();
+ I != E; ++I) {
+ BaseOffset Offset = ComputeBaseOffset(Context, DerivedRD, *I);
+
+ CharUnits OffsetToBaseSubobject = Offset.NonVirtualOffset;
+
+ if (Offset.VirtualBase) {
+ // If we have a virtual base class, the non-virtual offset is relative
+ // to the virtual base class offset.
+ const ASTRecordLayout &LayoutClassLayout =
+ Context.getASTRecordLayout(LayoutClass);
+
+ /// Get the virtual base offset, relative to the most derived class
+ /// layout.
+ OffsetToBaseSubobject +=
+ LayoutClassLayout.getVBaseClassOffset(Offset.VirtualBase);
+ } else {
+ // Otherwise, the non-virtual offset is relative to the derived class
+ // offset.
+ OffsetToBaseSubobject += Derived.getBaseOffset();
+ }
+
+ // Check if this path gives us the right base subobject.
+ if (OffsetToBaseSubobject == Base.getBaseOffset()) {
+ // Since we're going from the base class _to_ the derived class, we'll
+ // invert the non-virtual offset here.
+ Offset.NonVirtualOffset = -Offset.NonVirtualOffset;
+ return Offset;
+ }
+ }
+
+ return BaseOffset();
+}
+
+ThisAdjustment
+VTableBuilder::ComputeThisAdjustment(const CXXMethodDecl *MD,
+ CharUnits BaseOffsetInLayoutClass,
+ FinalOverriders::OverriderInfo Overrider) {
+ // Ignore adjustments for pure virtual member functions.
+ if (Overrider.Method->isPure())
+ return ThisAdjustment();
+
+ BaseSubobject OverriddenBaseSubobject(MD->getParent(),
+ BaseOffsetInLayoutClass);
+
+ BaseSubobject OverriderBaseSubobject(Overrider.Method->getParent(),
+ Overrider.Offset);
+
+ // Compute the adjustment offset.
+ BaseOffset Offset = ComputeThisAdjustmentBaseOffset(OverriddenBaseSubobject,
+ OverriderBaseSubobject);
+ if (Offset.isEmpty())
+ return ThisAdjustment();
+
+ ThisAdjustment Adjustment;
+
+ if (Offset.VirtualBase) {
+ // Get the vcall offset map for this virtual base.
+ VCallOffsetMap &VCallOffsets = VCallOffsetsForVBases[Offset.VirtualBase];
+
+ if (VCallOffsets.empty()) {
+ // We don't have vcall offsets for this virtual base, go ahead and
+ // build them.
+ VCallAndVBaseOffsetBuilder Builder(MostDerivedClass, MostDerivedClass,
+ /*FinalOverriders=*/0,
+ BaseSubobject(Offset.VirtualBase,
+ CharUnits::Zero()),
+ /*BaseIsVirtual=*/true,
+ /*OffsetInLayoutClass=*/
+ CharUnits::Zero());
+
+ VCallOffsets = Builder.getVCallOffsets();
+ }
+
+ Adjustment.VCallOffsetOffset =
+ VCallOffsets.getVCallOffsetOffset(MD).getQuantity();
+ }
+
+ // Set the non-virtual part of the adjustment.
+ Adjustment.NonVirtual = Offset.NonVirtualOffset.getQuantity();
+
+ return Adjustment;
+}
+
+void
+VTableBuilder::AddMethod(const CXXMethodDecl *MD,
+ ReturnAdjustment ReturnAdjustment) {
+ if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(MD)) {
+ assert(ReturnAdjustment.isEmpty() &&
+ "Destructor can't have return adjustment!");
+
+ // Add both the complete destructor and the deleting destructor.
+ Components.push_back(VTableComponent::MakeCompleteDtor(DD));
+ Components.push_back(VTableComponent::MakeDeletingDtor(DD));
+ } else {
+ // Add the return adjustment if necessary.
+ if (!ReturnAdjustment.isEmpty())
+ VTableThunks[Components.size()].Return = ReturnAdjustment;
+
+ // Add the function.
+ Components.push_back(VTableComponent::MakeFunction(MD));
+ }
+}
+
+/// OverridesIndirectMethodInBase - Return whether the given member function
+/// overrides any methods in the set of given bases.
+/// Unlike OverridesMethodInBase, this checks "overriders of overriders".
+/// For example, if we have:
+///
+/// struct A { virtual void f(); }
+/// struct B : A { virtual void f(); }
+/// struct C : B { virtual void f(); }
+///
+/// OverridesIndirectMethodInBase will return true if given C::f as the method
+/// and { A } as the set of bases.
+static bool
+OverridesIndirectMethodInBases(const CXXMethodDecl *MD,
+ VTableBuilder::PrimaryBasesSetVectorTy &Bases) {
+ if (Bases.count(MD->getParent()))
+ return true;
+
+ for (CXXMethodDecl::method_iterator I = MD->begin_overridden_methods(),
+ E = MD->end_overridden_methods(); I != E; ++I) {
+ const CXXMethodDecl *OverriddenMD = *I;
+
+ // Check "indirect overriders".
+ if (OverridesIndirectMethodInBases(OverriddenMD, Bases))
+ return true;
+ }
+
+ return false;
+}
+
+bool
+VTableBuilder::IsOverriderUsed(const CXXMethodDecl *Overrider,
+ CharUnits BaseOffsetInLayoutClass,
+ const CXXRecordDecl *FirstBaseInPrimaryBaseChain,
+ CharUnits FirstBaseOffsetInLayoutClass) const {
+ // If the base and the first base in the primary base chain have the same
+ // offsets, then this overrider will be used.
+ if (BaseOffsetInLayoutClass == FirstBaseOffsetInLayoutClass)
+ return true;
+
+ // We know now that Base (or a direct or indirect base of it) is a primary
+ // base in part of the class hierarchy, but not a primary base in the most
+ // derived class.
+
+ // If the overrider is the first base in the primary base chain, we know
+ // that the overrider will be used.
+ if (Overrider->getParent() == FirstBaseInPrimaryBaseChain)
+ return true;
+
+ VTableBuilder::PrimaryBasesSetVectorTy PrimaryBases;
+
+ const CXXRecordDecl *RD = FirstBaseInPrimaryBaseChain;
+ PrimaryBases.insert(RD);
+
+ // Now traverse the base chain, starting with the first base, until we find
+ // the base that is no longer a primary base.
+ while (true) {
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
+ const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase();
+
+ if (!PrimaryBase)
+ break;
+
+ if (Layout.isPrimaryBaseVirtual()) {
+ assert(Layout.getVBaseClassOffsetInBits(PrimaryBase) == 0 &&
+ "Primary base should always be at offset 0!");
+
+ const ASTRecordLayout &LayoutClassLayout =
+ Context.getASTRecordLayout(LayoutClass);
+
+ // Now check if this is the primary base that is not a primary base in the
+ // most derived class.
+ if (LayoutClassLayout.getVBaseClassOffset(PrimaryBase) !=
+ FirstBaseOffsetInLayoutClass) {
+ // We found it, stop walking the chain.
+ break;
+ }
+ } else {
+ assert(Layout.getBaseClassOffsetInBits(PrimaryBase) == 0 &&
+ "Primary base should always be at offset 0!");
+ }
+
+ if (!PrimaryBases.insert(PrimaryBase))
+ llvm_unreachable("Found a duplicate primary base!");
+
+ RD = PrimaryBase;
+ }
+
+ // If the final overrider is an override of one of the primary bases,
+ // then we know that it will be used.
+ return OverridesIndirectMethodInBases(Overrider, PrimaryBases);
+}
+
+/// FindNearestOverriddenMethod - Given a method, returns the overridden method
+/// from the nearest base. Returns null if no method was found.
+static const CXXMethodDecl *
+FindNearestOverriddenMethod(const CXXMethodDecl *MD,
+ VTableBuilder::PrimaryBasesSetVectorTy &Bases) {
+ OverriddenMethodsSetTy OverriddenMethods;
+ ComputeAllOverriddenMethods(MD, OverriddenMethods);
+
+ for (int I = Bases.size(), E = 0; I != E; --I) {
+ const CXXRecordDecl *PrimaryBase = Bases[I - 1];
+
+ // Now check the overriden methods.
+ for (OverriddenMethodsSetTy::const_iterator I = OverriddenMethods.begin(),
+ E = OverriddenMethods.end(); I != E; ++I) {
+ const CXXMethodDecl *OverriddenMD = *I;
+
+ // We found our overridden method.
+ if (OverriddenMD->getParent() == PrimaryBase)
+ return OverriddenMD;
+ }
+ }
+
+ return 0;
+}
+
+void
+VTableBuilder::AddMethods(BaseSubobject Base, CharUnits BaseOffsetInLayoutClass,
+ const CXXRecordDecl *FirstBaseInPrimaryBaseChain,
+ CharUnits FirstBaseOffsetInLayoutClass,
+ PrimaryBasesSetVectorTy &PrimaryBases) {
+ const CXXRecordDecl *RD = Base.getBase();
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
+
+ if (const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase()) {
+ CharUnits PrimaryBaseOffset;
+ CharUnits PrimaryBaseOffsetInLayoutClass;
+ if (Layout.isPrimaryBaseVirtual()) {
+ assert(Layout.getVBaseClassOffsetInBits(PrimaryBase) == 0 &&
+ "Primary vbase should have a zero offset!");
+
+ const ASTRecordLayout &MostDerivedClassLayout =
+ Context.getASTRecordLayout(MostDerivedClass);
+
+ PrimaryBaseOffset =
+ MostDerivedClassLayout.getVBaseClassOffset(PrimaryBase);
+
+ const ASTRecordLayout &LayoutClassLayout =
+ Context.getASTRecordLayout(LayoutClass);
+
+ PrimaryBaseOffsetInLayoutClass =
+ LayoutClassLayout.getVBaseClassOffset(PrimaryBase);
+ } else {
+ assert(Layout.getBaseClassOffsetInBits(PrimaryBase) == 0 &&
+ "Primary base should have a zero offset!");
+
+ PrimaryBaseOffset = Base.getBaseOffset();
+ PrimaryBaseOffsetInLayoutClass = BaseOffsetInLayoutClass;
+ }
+
+ AddMethods(BaseSubobject(PrimaryBase, PrimaryBaseOffset),
+ PrimaryBaseOffsetInLayoutClass, FirstBaseInPrimaryBaseChain,
+ FirstBaseOffsetInLayoutClass, PrimaryBases);
+
+ if (!PrimaryBases.insert(PrimaryBase))
+ llvm_unreachable("Found a duplicate primary base!");
+ }
+
+ // Now go through all virtual member functions and add them.
+ for (CXXRecordDecl::method_iterator I = RD->method_begin(),
+ E = RD->method_end(); I != E; ++I) {
+ const CXXMethodDecl *MD = *I;
+
+ if (!MD->isVirtual())
+ continue;
+
+ // Get the final overrider.
+ FinalOverriders::OverriderInfo Overrider =
+ Overriders.getOverrider(MD, Base.getBaseOffset());
+
+ // Check if this virtual member function overrides a method in a primary
+ // base. If this is the case, and the return type doesn't require adjustment
+ // then we can just use the member function from the primary base.
+ if (const CXXMethodDecl *OverriddenMD =
+ FindNearestOverriddenMethod(MD, PrimaryBases)) {
+ if (ComputeReturnAdjustmentBaseOffset(Context, MD,
+ OverriddenMD).isEmpty()) {
+ // Replace the method info of the overridden method with our own
+ // method.
+ assert(MethodInfoMap.count(OverriddenMD) &&
+ "Did not find the overridden method!");
+ MethodInfo &OverriddenMethodInfo = MethodInfoMap[OverriddenMD];
+
+ MethodInfo MethodInfo(Base.getBaseOffset(), BaseOffsetInLayoutClass,
+ OverriddenMethodInfo.VTableIndex);
+
+ assert(!MethodInfoMap.count(MD) &&
+ "Should not have method info for this method yet!");
+
+ MethodInfoMap.insert(std::make_pair(MD, MethodInfo));
+ MethodInfoMap.erase(OverriddenMD);
+
+ // If the overridden method exists in a virtual base class or a direct
+ // or indirect base class of a virtual base class, we need to emit a
+ // thunk if we ever have a class hierarchy where the base class is not
+ // a primary base in the complete object.
+ if (!isBuildingConstructorVTable() && OverriddenMD != MD) {
+ // Compute the this adjustment.
+ ThisAdjustment ThisAdjustment =
+ ComputeThisAdjustment(OverriddenMD, BaseOffsetInLayoutClass,
+ Overrider);
+
+ if (ThisAdjustment.VCallOffsetOffset &&
+ Overrider.Method->getParent() == MostDerivedClass) {
+
+ // There's no return adjustment from OverriddenMD and MD,
+ // but that doesn't mean there isn't one between MD and
+ // the final overrider.
+ BaseOffset ReturnAdjustmentOffset =
+ ComputeReturnAdjustmentBaseOffset(Context, Overrider.Method, MD);
+ ReturnAdjustment ReturnAdjustment =
+ ComputeReturnAdjustment(ReturnAdjustmentOffset);
+
+ // This is a virtual thunk for the most derived class, add it.
+ AddThunk(Overrider.Method,
+ ThunkInfo(ThisAdjustment, ReturnAdjustment));
+ }
+ }
+
+ continue;
+ }
+ }
+
+ // Insert the method info for this method.
+ MethodInfo MethodInfo(Base.getBaseOffset(), BaseOffsetInLayoutClass,
+ Components.size());
+
+ assert(!MethodInfoMap.count(MD) &&
+ "Should not have method info for this method yet!");
+ MethodInfoMap.insert(std::make_pair(MD, MethodInfo));
+
+ // Check if this overrider is going to be used.
+ const CXXMethodDecl *OverriderMD = Overrider.Method;
+ if (!IsOverriderUsed(OverriderMD, BaseOffsetInLayoutClass,
+ FirstBaseInPrimaryBaseChain,
+ FirstBaseOffsetInLayoutClass)) {
+ Components.push_back(VTableComponent::MakeUnusedFunction(OverriderMD));
+ continue;
+ }
+
+ // Check if this overrider needs a return adjustment.
+ // We don't want to do this for pure virtual member functions.
+ BaseOffset ReturnAdjustmentOffset;
+ if (!OverriderMD->isPure()) {
+ ReturnAdjustmentOffset =
+ ComputeReturnAdjustmentBaseOffset(Context, OverriderMD, MD);
+ }
+
+ ReturnAdjustment ReturnAdjustment =
+ ComputeReturnAdjustment(ReturnAdjustmentOffset);
+
+ AddMethod(Overrider.Method, ReturnAdjustment);
+ }
+}
+
+void VTableBuilder::LayoutVTable() {
+ LayoutPrimaryAndSecondaryVTables(BaseSubobject(MostDerivedClass,
+ CharUnits::Zero()),
+ /*BaseIsMorallyVirtual=*/false,
+ MostDerivedClassIsVirtual,
+ MostDerivedClassOffset);
+
+ VisitedVirtualBasesSetTy VBases;
+
+ // Determine the primary virtual bases.
+ DeterminePrimaryVirtualBases(MostDerivedClass, MostDerivedClassOffset,
+ VBases);
+ VBases.clear();
+
+ LayoutVTablesForVirtualBases(MostDerivedClass, VBases);
+
+ // -fapple-kext adds an extra entry at end of vtbl.
+ bool IsAppleKext = Context.getLangOpts().AppleKext;
+ if (IsAppleKext)
+ Components.push_back(VTableComponent::MakeVCallOffset(CharUnits::Zero()));
+}
+
+void
+VTableBuilder::LayoutPrimaryAndSecondaryVTables(BaseSubobject Base,
+ bool BaseIsMorallyVirtual,
+ bool BaseIsVirtualInLayoutClass,
+ CharUnits OffsetInLayoutClass) {
+ assert(Base.getBase()->isDynamicClass() && "class does not have a vtable!");
+
+ // Add vcall and vbase offsets for this vtable.
+ VCallAndVBaseOffsetBuilder Builder(MostDerivedClass, LayoutClass, &Overriders,
+ Base, BaseIsVirtualInLayoutClass,
+ OffsetInLayoutClass);
+ Components.append(Builder.components_begin(), Builder.components_end());
+
+ // Check if we need to add these vcall offsets.
+ if (BaseIsVirtualInLayoutClass && !Builder.getVCallOffsets().empty()) {
+ VCallOffsetMap &VCallOffsets = VCallOffsetsForVBases[Base.getBase()];
+
+ if (VCallOffsets.empty())
+ VCallOffsets = Builder.getVCallOffsets();
+ }
+
+ // If we're laying out the most derived class we want to keep track of the
+ // virtual base class offset offsets.
+ if (Base.getBase() == MostDerivedClass)
+ VBaseOffsetOffsets = Builder.getVBaseOffsetOffsets();
+
+ // Add the offset to top.
+ CharUnits OffsetToTop = MostDerivedClassOffset - OffsetInLayoutClass;
+ Components.push_back(
+ VTableComponent::MakeOffsetToTop(OffsetToTop));
+
+ // Next, add the RTTI.
+ Components.push_back(VTableComponent::MakeRTTI(MostDerivedClass));
+
+ uint64_t AddressPoint = Components.size();
+
+ // Now go through all virtual member functions and add them.
+ PrimaryBasesSetVectorTy PrimaryBases;
+ AddMethods(Base, OffsetInLayoutClass,
+ Base.getBase(), OffsetInLayoutClass,
+ PrimaryBases);
+
+ // Compute 'this' pointer adjustments.
+ ComputeThisAdjustments();
+
+ // Add all address points.
+ const CXXRecordDecl *RD = Base.getBase();
+ while (true) {
+ AddressPoints.insert(std::make_pair(
+ BaseSubobject(RD, OffsetInLayoutClass),
+ AddressPoint));
+
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
+ const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase();
+
+ if (!PrimaryBase)
+ break;
+
+ if (Layout.isPrimaryBaseVirtual()) {
+ // Check if this virtual primary base is a primary base in the layout
+ // class. If it's not, we don't want to add it.
+ const ASTRecordLayout &LayoutClassLayout =
+ Context.getASTRecordLayout(LayoutClass);
+
+ if (LayoutClassLayout.getVBaseClassOffset(PrimaryBase) !=
+ OffsetInLayoutClass) {
+ // We don't want to add this class (or any of its primary bases).
+ break;
+ }
+ }
+
+ RD = PrimaryBase;
+ }
+
+ // Layout secondary vtables.
+ LayoutSecondaryVTables(Base, BaseIsMorallyVirtual, OffsetInLayoutClass);
+}
+
+void VTableBuilder::LayoutSecondaryVTables(BaseSubobject Base,
+ bool BaseIsMorallyVirtual,
+ CharUnits OffsetInLayoutClass) {
+ // Itanium C++ ABI 2.5.2:
+ // Following the primary virtual table of a derived class are secondary
+ // virtual tables for each of its proper base classes, except any primary
+ // base(s) with which it shares its primary virtual table.
+
+ const CXXRecordDecl *RD = Base.getBase();
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
+ const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase();
+
+ for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
+ E = RD->bases_end(); I != E; ++I) {
+ // Ignore virtual bases, we'll emit them later.
+ if (I->isVirtual())
+ continue;
+
+ const CXXRecordDecl *BaseDecl =
+ cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+
+ // Ignore bases that don't have a vtable.
+ if (!BaseDecl->isDynamicClass())
+ continue;
+
+ if (isBuildingConstructorVTable()) {
+ // Itanium C++ ABI 2.6.4:
+ // Some of the base class subobjects may not need construction virtual
+ // tables, which will therefore not be present in the construction
+ // virtual table group, even though the subobject virtual tables are
+ // present in the main virtual table group for the complete object.
+ if (!BaseIsMorallyVirtual && !BaseDecl->getNumVBases())
+ continue;
+ }
+
+ // Get the base offset of this base.
+ CharUnits RelativeBaseOffset = Layout.getBaseClassOffset(BaseDecl);
+ CharUnits BaseOffset = Base.getBaseOffset() + RelativeBaseOffset;
+
+ CharUnits BaseOffsetInLayoutClass =
+ OffsetInLayoutClass + RelativeBaseOffset;
+
+ // Don't emit a secondary vtable for a primary base. We might however want
+ // to emit secondary vtables for other bases of this base.
+ if (BaseDecl == PrimaryBase) {
+ LayoutSecondaryVTables(BaseSubobject(BaseDecl, BaseOffset),
+ BaseIsMorallyVirtual, BaseOffsetInLayoutClass);
+ continue;
+ }
+
+ // Layout the primary vtable (and any secondary vtables) for this base.
+ LayoutPrimaryAndSecondaryVTables(
+ BaseSubobject(BaseDecl, BaseOffset),
+ BaseIsMorallyVirtual,
+ /*BaseIsVirtualInLayoutClass=*/false,
+ BaseOffsetInLayoutClass);
+ }
+}
+
+void
+VTableBuilder::DeterminePrimaryVirtualBases(const CXXRecordDecl *RD,
+ CharUnits OffsetInLayoutClass,
+ VisitedVirtualBasesSetTy &VBases) {
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
+
+ // Check if this base has a primary base.
+ if (const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase()) {
+
+ // Check if it's virtual.
+ if (Layout.isPrimaryBaseVirtual()) {
+ bool IsPrimaryVirtualBase = true;
+
+ if (isBuildingConstructorVTable()) {
+ // Check if the base is actually a primary base in the class we use for
+ // layout.
+ const ASTRecordLayout &LayoutClassLayout =
+ Context.getASTRecordLayout(LayoutClass);
+
+ CharUnits PrimaryBaseOffsetInLayoutClass =
+ LayoutClassLayout.getVBaseClassOffset(PrimaryBase);
+
+ // We know that the base is not a primary base in the layout class if
+ // the base offsets are different.
+ if (PrimaryBaseOffsetInLayoutClass != OffsetInLayoutClass)
+ IsPrimaryVirtualBase = false;
+ }
+
+ if (IsPrimaryVirtualBase)
+ PrimaryVirtualBases.insert(PrimaryBase);
+ }
+ }
+
+ // Traverse bases, looking for more primary virtual bases.
+ for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
+ E = RD->bases_end(); I != E; ++I) {
+ const CXXRecordDecl *BaseDecl =
+ cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+
+ CharUnits BaseOffsetInLayoutClass;
+
+ if (I->isVirtual()) {
+ if (!VBases.insert(BaseDecl))
+ continue;
+
+ const ASTRecordLayout &LayoutClassLayout =
+ Context.getASTRecordLayout(LayoutClass);
+
+ BaseOffsetInLayoutClass =
+ LayoutClassLayout.getVBaseClassOffset(BaseDecl);
+ } else {
+ BaseOffsetInLayoutClass =
+ OffsetInLayoutClass + Layout.getBaseClassOffset(BaseDecl);
+ }
+
+ DeterminePrimaryVirtualBases(BaseDecl, BaseOffsetInLayoutClass, VBases);
+ }
+}
+
+void
+VTableBuilder::LayoutVTablesForVirtualBases(const CXXRecordDecl *RD,
+ VisitedVirtualBasesSetTy &VBases) {
+ // Itanium C++ ABI 2.5.2:
+ // Then come the virtual base virtual tables, also in inheritance graph
+ // order, and again excluding primary bases (which share virtual tables with
+ // the classes for which they are primary).
+ for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
+ E = RD->bases_end(); I != E; ++I) {
+ const CXXRecordDecl *BaseDecl =
+ cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+
+ // Check if this base needs a vtable. (If it's virtual, not a primary base
+ // of some other class, and we haven't visited it before).
+ if (I->isVirtual() && BaseDecl->isDynamicClass() &&
+ !PrimaryVirtualBases.count(BaseDecl) && VBases.insert(BaseDecl)) {
+ const ASTRecordLayout &MostDerivedClassLayout =
+ Context.getASTRecordLayout(MostDerivedClass);
+ CharUnits BaseOffset =
+ MostDerivedClassLayout.getVBaseClassOffset(BaseDecl);
+
+ const ASTRecordLayout &LayoutClassLayout =
+ Context.getASTRecordLayout(LayoutClass);
+ CharUnits BaseOffsetInLayoutClass =
+ LayoutClassLayout.getVBaseClassOffset(BaseDecl);
+
+ LayoutPrimaryAndSecondaryVTables(
+ BaseSubobject(BaseDecl, BaseOffset),
+ /*BaseIsMorallyVirtual=*/true,
+ /*BaseIsVirtualInLayoutClass=*/true,
+ BaseOffsetInLayoutClass);
+ }
+
+ // We only need to check the base for virtual base vtables if it actually
+ // has virtual bases.
+ if (BaseDecl->getNumVBases())
+ LayoutVTablesForVirtualBases(BaseDecl, VBases);
+ }
+}
+
+/// dumpLayout - Dump the vtable layout.
+void VTableBuilder::dumpLayout(raw_ostream& Out) {
+
+ if (isBuildingConstructorVTable()) {
+ Out << "Construction vtable for ('";
+ Out << MostDerivedClass->getQualifiedNameAsString() << "', ";
+ Out << MostDerivedClassOffset.getQuantity() << ") in '";
+ Out << LayoutClass->getQualifiedNameAsString();
+ } else {
+ Out << "Vtable for '";
+ Out << MostDerivedClass->getQualifiedNameAsString();
+ }
+ Out << "' (" << Components.size() << " entries).\n";
+
+ // Iterate through the address points and insert them into a new map where
+ // they are keyed by the index and not the base object.
+ // Since an address point can be shared by multiple subobjects, we use an
+ // STL multimap.
+ std::multimap<uint64_t, BaseSubobject> AddressPointsByIndex;
+ for (AddressPointsMapTy::const_iterator I = AddressPoints.begin(),
+ E = AddressPoints.end(); I != E; ++I) {
+ const BaseSubobject& Base = I->first;
+ uint64_t Index = I->second;
+
+ AddressPointsByIndex.insert(std::make_pair(Index, Base));
+ }
+
+ for (unsigned I = 0, E = Components.size(); I != E; ++I) {
+ uint64_t Index = I;
+
+ Out << llvm::format("%4d | ", I);
+
+ const VTableComponent &Component = Components[I];
+
+ // Dump the component.
+ switch (Component.getKind()) {
+
+ case VTableComponent::CK_VCallOffset:
+ Out << "vcall_offset ("
+ << Component.getVCallOffset().getQuantity()
+ << ")";
+ break;
+
+ case VTableComponent::CK_VBaseOffset:
+ Out << "vbase_offset ("
+ << Component.getVBaseOffset().getQuantity()
+ << ")";
+ break;
+
+ case VTableComponent::CK_OffsetToTop:
+ Out << "offset_to_top ("
+ << Component.getOffsetToTop().getQuantity()
+ << ")";
+ break;
+
+ case VTableComponent::CK_RTTI:
+ Out << Component.getRTTIDecl()->getQualifiedNameAsString() << " RTTI";
+ break;
+
+ case VTableComponent::CK_FunctionPointer: {
+ const CXXMethodDecl *MD = Component.getFunctionDecl();
+
+ std::string Str =
+ PredefinedExpr::ComputeName(PredefinedExpr::PrettyFunctionNoVirtual,
+ MD);
+ Out << Str;
+ if (MD->isPure())
+ Out << " [pure]";
+
+ ThunkInfo Thunk = VTableThunks.lookup(I);
+ if (!Thunk.isEmpty()) {
+ // If this function pointer has a return adjustment, dump it.
+ if (!Thunk.Return.isEmpty()) {
+ Out << "\n [return adjustment: ";
+ Out << Thunk.Return.NonVirtual << " non-virtual";
+
+ if (Thunk.Return.VBaseOffsetOffset) {
+ Out << ", " << Thunk.Return.VBaseOffsetOffset;
+ Out << " vbase offset offset";
+ }
+
+ Out << ']';
+ }
+
+ // If this function pointer has a 'this' pointer adjustment, dump it.
+ if (!Thunk.This.isEmpty()) {
+ Out << "\n [this adjustment: ";
+ Out << Thunk.This.NonVirtual << " non-virtual";
+
+ if (Thunk.This.VCallOffsetOffset) {
+ Out << ", " << Thunk.This.VCallOffsetOffset;
+ Out << " vcall offset offset";
+ }
+
+ Out << ']';
+ }
+ }
+
+ break;
+ }
+
+ case VTableComponent::CK_CompleteDtorPointer:
+ case VTableComponent::CK_DeletingDtorPointer: {
+ bool IsComplete =
+ Component.getKind() == VTableComponent::CK_CompleteDtorPointer;
+
+ const CXXDestructorDecl *DD = Component.getDestructorDecl();
+
+ Out << DD->getQualifiedNameAsString();
+ if (IsComplete)
+ Out << "() [complete]";
+ else
+ Out << "() [deleting]";
+
+ if (DD->isPure())
+ Out << " [pure]";
+
+ ThunkInfo Thunk = VTableThunks.lookup(I);
+ if (!Thunk.isEmpty()) {
+ // If this destructor has a 'this' pointer adjustment, dump it.
+ if (!Thunk.This.isEmpty()) {
+ Out << "\n [this adjustment: ";
+ Out << Thunk.This.NonVirtual << " non-virtual";
+
+ if (Thunk.This.VCallOffsetOffset) {
+ Out << ", " << Thunk.This.VCallOffsetOffset;
+ Out << " vcall offset offset";
+ }
+
+ Out << ']';
+ }
+ }
+
+ break;
+ }
+
+ case VTableComponent::CK_UnusedFunctionPointer: {
+ const CXXMethodDecl *MD = Component.getUnusedFunctionDecl();
+
+ std::string Str =
+ PredefinedExpr::ComputeName(PredefinedExpr::PrettyFunctionNoVirtual,
+ MD);
+ Out << "[unused] " << Str;
+ if (MD->isPure())
+ Out << " [pure]";
+ }
+
+ }
+
+ Out << '\n';
+
+ // Dump the next address point.
+ uint64_t NextIndex = Index + 1;
+ if (AddressPointsByIndex.count(NextIndex)) {
+ if (AddressPointsByIndex.count(NextIndex) == 1) {
+ const BaseSubobject &Base =
+ AddressPointsByIndex.find(NextIndex)->second;
+
+ Out << " -- (" << Base.getBase()->getQualifiedNameAsString();
+ Out << ", " << Base.getBaseOffset().getQuantity();
+ Out << ") vtable address --\n";
+ } else {
+ CharUnits BaseOffset =
+ AddressPointsByIndex.lower_bound(NextIndex)->second.getBaseOffset();
+
+ // We store the class names in a set to get a stable order.
+ std::set<std::string> ClassNames;
+ for (std::multimap<uint64_t, BaseSubobject>::const_iterator I =
+ AddressPointsByIndex.lower_bound(NextIndex), E =
+ AddressPointsByIndex.upper_bound(NextIndex); I != E; ++I) {
+ assert(I->second.getBaseOffset() == BaseOffset &&
+ "Invalid base offset!");
+ const CXXRecordDecl *RD = I->second.getBase();
+ ClassNames.insert(RD->getQualifiedNameAsString());
+ }
+
+ for (std::set<std::string>::const_iterator I = ClassNames.begin(),
+ E = ClassNames.end(); I != E; ++I) {
+ Out << " -- (" << *I;
+ Out << ", " << BaseOffset.getQuantity() << ") vtable address --\n";
+ }
+ }
+ }
+ }
+
+ Out << '\n';
+
+ if (isBuildingConstructorVTable())
+ return;
+
+ if (MostDerivedClass->getNumVBases()) {
+ // We store the virtual base class names and their offsets in a map to get
+ // a stable order.
+
+ std::map<std::string, CharUnits> ClassNamesAndOffsets;
+ for (VBaseOffsetOffsetsMapTy::const_iterator I = VBaseOffsetOffsets.begin(),
+ E = VBaseOffsetOffsets.end(); I != E; ++I) {
+ std::string ClassName = I->first->getQualifiedNameAsString();
+ CharUnits OffsetOffset = I->second;
+ ClassNamesAndOffsets.insert(
+ std::make_pair(ClassName, OffsetOffset));
+ }
+
+ Out << "Virtual base offset offsets for '";
+ Out << MostDerivedClass->getQualifiedNameAsString() << "' (";
+ Out << ClassNamesAndOffsets.size();
+ Out << (ClassNamesAndOffsets.size() == 1 ? " entry" : " entries") << ").\n";
+
+ for (std::map<std::string, CharUnits>::const_iterator I =
+ ClassNamesAndOffsets.begin(), E = ClassNamesAndOffsets.end();
+ I != E; ++I)
+ Out << " " << I->first << " | " << I->second.getQuantity() << '\n';
+
+ Out << "\n";
+ }
+
+ if (!Thunks.empty()) {
+ // We store the method names in a map to get a stable order.
+ std::map<std::string, const CXXMethodDecl *> MethodNamesAndDecls;
+
+ for (ThunksMapTy::const_iterator I = Thunks.begin(), E = Thunks.end();
+ I != E; ++I) {
+ const CXXMethodDecl *MD = I->first;
+ std::string MethodName =
+ PredefinedExpr::ComputeName(PredefinedExpr::PrettyFunctionNoVirtual,
+ MD);
+
+ MethodNamesAndDecls.insert(std::make_pair(MethodName, MD));
+ }
+
+ for (std::map<std::string, const CXXMethodDecl *>::const_iterator I =
+ MethodNamesAndDecls.begin(), E = MethodNamesAndDecls.end();
+ I != E; ++I) {
+ const std::string &MethodName = I->first;
+ const CXXMethodDecl *MD = I->second;
+
+ ThunkInfoVectorTy ThunksVector = Thunks[MD];
+ std::sort(ThunksVector.begin(), ThunksVector.end());
+
+ Out << "Thunks for '" << MethodName << "' (" << ThunksVector.size();
+ Out << (ThunksVector.size() == 1 ? " entry" : " entries") << ").\n";
+
+ for (unsigned I = 0, E = ThunksVector.size(); I != E; ++I) {
+ const ThunkInfo &Thunk = ThunksVector[I];
+
+ Out << llvm::format("%4d | ", I);
+
+ // If this function pointer has a return pointer adjustment, dump it.
+ if (!Thunk.Return.isEmpty()) {
+ Out << "return adjustment: " << Thunk.This.NonVirtual;
+ Out << " non-virtual";
+ if (Thunk.Return.VBaseOffsetOffset) {
+ Out << ", " << Thunk.Return.VBaseOffsetOffset;
+ Out << " vbase offset offset";
+ }
+
+ if (!Thunk.This.isEmpty())
+ Out << "\n ";
+ }
+
+ // If this function pointer has a 'this' pointer adjustment, dump it.
+ if (!Thunk.This.isEmpty()) {
+ Out << "this adjustment: ";
+ Out << Thunk.This.NonVirtual << " non-virtual";
+
+ if (Thunk.This.VCallOffsetOffset) {
+ Out << ", " << Thunk.This.VCallOffsetOffset;
+ Out << " vcall offset offset";
+ }
+ }
+
+ Out << '\n';
+ }
+
+ Out << '\n';
+ }
+ }
+
+ // Compute the vtable indices for all the member functions.
+ // Store them in a map keyed by the index so we'll get a sorted table.
+ std::map<uint64_t, std::string> IndicesMap;
+
+ for (CXXRecordDecl::method_iterator i = MostDerivedClass->method_begin(),
+ e = MostDerivedClass->method_end(); i != e; ++i) {
+ const CXXMethodDecl *MD = *i;
+
+ // We only want virtual member functions.
+ if (!MD->isVirtual())
+ continue;
+
+ std::string MethodName =
+ PredefinedExpr::ComputeName(PredefinedExpr::PrettyFunctionNoVirtual,
+ MD);
+
+ if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(MD)) {
+ IndicesMap[VTables.getMethodVTableIndex(GlobalDecl(DD, Dtor_Complete))] =
+ MethodName + " [complete]";
+ IndicesMap[VTables.getMethodVTableIndex(GlobalDecl(DD, Dtor_Deleting))] =
+ MethodName + " [deleting]";
+ } else {
+ IndicesMap[VTables.getMethodVTableIndex(MD)] = MethodName;
+ }
+ }
+
+ // Print the vtable indices for all the member functions.
+ if (!IndicesMap.empty()) {
+ Out << "VTable indices for '";
+ Out << MostDerivedClass->getQualifiedNameAsString();
+ Out << "' (" << IndicesMap.size() << " entries).\n";
+
+ for (std::map<uint64_t, std::string>::const_iterator I = IndicesMap.begin(),
+ E = IndicesMap.end(); I != E; ++I) {
+ uint64_t VTableIndex = I->first;
+ const std::string &MethodName = I->second;
+
+ Out << llvm::format(" %4" PRIu64 " | ", VTableIndex) << MethodName
+ << '\n';
+ }
+ }
+
+ Out << '\n';
+}
+
+}
+
+VTableLayout::VTableLayout(uint64_t NumVTableComponents,
+ const VTableComponent *VTableComponents,
+ uint64_t NumVTableThunks,
+ const VTableThunkTy *VTableThunks,
+ const AddressPointsMapTy &AddressPoints)
+ : NumVTableComponents(NumVTableComponents),
+ VTableComponents(new VTableComponent[NumVTableComponents]),
+ NumVTableThunks(NumVTableThunks),
+ VTableThunks(new VTableThunkTy[NumVTableThunks]),
+ AddressPoints(AddressPoints) {
+ std::copy(VTableComponents, VTableComponents+NumVTableComponents,
+ this->VTableComponents.get());
+ std::copy(VTableThunks, VTableThunks+NumVTableThunks,
+ this->VTableThunks.get());
+}
+
+VTableLayout::~VTableLayout() { }
+
+VTableContext::~VTableContext() {
+ llvm::DeleteContainerSeconds(VTableLayouts);
+}
+
+static void
+CollectPrimaryBases(const CXXRecordDecl *RD, ASTContext &Context,
+ VTableBuilder::PrimaryBasesSetVectorTy &PrimaryBases) {
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
+ const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase();
+
+ if (!PrimaryBase)
+ return;
+
+ CollectPrimaryBases(PrimaryBase, Context, PrimaryBases);
+
+ if (!PrimaryBases.insert(PrimaryBase))
+ llvm_unreachable("Found a duplicate primary base!");
+}
+
+void VTableContext::ComputeMethodVTableIndices(const CXXRecordDecl *RD) {
+
+ // Itanium C++ ABI 2.5.2:
+ // The order of the virtual function pointers in a virtual table is the
+ // order of declaration of the corresponding member functions in the class.
+ //
+ // There is an entry for any virtual function declared in a class,
+ // whether it is a new function or overrides a base class function,
+ // unless it overrides a function from the primary base, and conversion
+ // between their return types does not require an adjustment.
+
+ int64_t CurrentIndex = 0;
+
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
+ const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase();
+
+ if (PrimaryBase) {
+ assert(PrimaryBase->isCompleteDefinition() &&
+ "Should have the definition decl of the primary base!");
+
+ // Since the record decl shares its vtable pointer with the primary base
+ // we need to start counting at the end of the primary base's vtable.
+ CurrentIndex = getNumVirtualFunctionPointers(PrimaryBase);
+ }
+
+ // Collect all the primary bases, so we can check whether methods override
+ // a method from the base.
+ VTableBuilder::PrimaryBasesSetVectorTy PrimaryBases;
+ CollectPrimaryBases(RD, Context, PrimaryBases);
+
+ const CXXDestructorDecl *ImplicitVirtualDtor = 0;
+
+ for (CXXRecordDecl::method_iterator i = RD->method_begin(),
+ e = RD->method_end(); i != e; ++i) {
+ const CXXMethodDecl *MD = *i;
+
+ // We only want virtual methods.
+ if (!MD->isVirtual())
+ continue;
+
+ // Check if this method overrides a method in the primary base.
+ if (const CXXMethodDecl *OverriddenMD =
+ FindNearestOverriddenMethod(MD, PrimaryBases)) {
+ // Check if converting from the return type of the method to the
+ // return type of the overridden method requires conversion.
+ if (ComputeReturnAdjustmentBaseOffset(Context, MD,
+ OverriddenMD).isEmpty()) {
+ // This index is shared between the index in the vtable of the primary
+ // base class.
+ if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(MD)) {
+ const CXXDestructorDecl *OverriddenDD =
+ cast<CXXDestructorDecl>(OverriddenMD);
+
+ // Add both the complete and deleting entries.
+ MethodVTableIndices[GlobalDecl(DD, Dtor_Complete)] =
+ getMethodVTableIndex(GlobalDecl(OverriddenDD, Dtor_Complete));
+ MethodVTableIndices[GlobalDecl(DD, Dtor_Deleting)] =
+ getMethodVTableIndex(GlobalDecl(OverriddenDD, Dtor_Deleting));
+ } else {
+ MethodVTableIndices[MD] = getMethodVTableIndex(OverriddenMD);
+ }
+
+ // We don't need to add an entry for this method.
+ continue;
+ }
+ }
+
+ if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(MD)) {
+ if (MD->isImplicit()) {
+ assert(!ImplicitVirtualDtor &&
+ "Did already see an implicit virtual dtor!");
+ ImplicitVirtualDtor = DD;
+ continue;
+ }
+
+ // Add the complete dtor.
+ MethodVTableIndices[GlobalDecl(DD, Dtor_Complete)] = CurrentIndex++;
+
+ // Add the deleting dtor.
+ MethodVTableIndices[GlobalDecl(DD, Dtor_Deleting)] = CurrentIndex++;
+ } else {
+ // Add the entry.
+ MethodVTableIndices[MD] = CurrentIndex++;
+ }
+ }
+
+ if (ImplicitVirtualDtor) {
+ // Itanium C++ ABI 2.5.2:
+ // If a class has an implicitly-defined virtual destructor,
+ // its entries come after the declared virtual function pointers.
+
+ // Add the complete dtor.
+ MethodVTableIndices[GlobalDecl(ImplicitVirtualDtor, Dtor_Complete)] =
+ CurrentIndex++;
+
+ // Add the deleting dtor.
+ MethodVTableIndices[GlobalDecl(ImplicitVirtualDtor, Dtor_Deleting)] =
+ CurrentIndex++;
+ }
+
+ NumVirtualFunctionPointers[RD] = CurrentIndex;
+}
+
+uint64_t VTableContext::getNumVirtualFunctionPointers(const CXXRecordDecl *RD) {
+ llvm::DenseMap<const CXXRecordDecl *, uint64_t>::iterator I =
+ NumVirtualFunctionPointers.find(RD);
+ if (I != NumVirtualFunctionPointers.end())
+ return I->second;
+
+ ComputeMethodVTableIndices(RD);
+
+ I = NumVirtualFunctionPointers.find(RD);
+ assert(I != NumVirtualFunctionPointers.end() && "Did not find entry!");
+ return I->second;
+}
+
+uint64_t VTableContext::getMethodVTableIndex(GlobalDecl GD) {
+ MethodVTableIndicesTy::iterator I = MethodVTableIndices.find(GD);
+ if (I != MethodVTableIndices.end())
+ return I->second;
+
+ const CXXRecordDecl *RD = cast<CXXMethodDecl>(GD.getDecl())->getParent();
+
+ ComputeMethodVTableIndices(RD);
+
+ I = MethodVTableIndices.find(GD);
+ assert(I != MethodVTableIndices.end() && "Did not find index!");
+ return I->second;
+}
+
+CharUnits
+VTableContext::getVirtualBaseOffsetOffset(const CXXRecordDecl *RD,
+ const CXXRecordDecl *VBase) {
+ ClassPairTy ClassPair(RD, VBase);
+
+ VirtualBaseClassOffsetOffsetsMapTy::iterator I =
+ VirtualBaseClassOffsetOffsets.find(ClassPair);
+ if (I != VirtualBaseClassOffsetOffsets.end())
+ return I->second;
+
+ VCallAndVBaseOffsetBuilder Builder(RD, RD, /*FinalOverriders=*/0,
+ BaseSubobject(RD, CharUnits::Zero()),
+ /*BaseIsVirtual=*/false,
+ /*OffsetInLayoutClass=*/CharUnits::Zero());
+
+ for (VCallAndVBaseOffsetBuilder::VBaseOffsetOffsetsMapTy::const_iterator I =
+ Builder.getVBaseOffsetOffsets().begin(),
+ E = Builder.getVBaseOffsetOffsets().end(); I != E; ++I) {
+ // Insert all types.
+ ClassPairTy ClassPair(RD, I->first);
+
+ VirtualBaseClassOffsetOffsets.insert(
+ std::make_pair(ClassPair, I->second));
+ }
+
+ I = VirtualBaseClassOffsetOffsets.find(ClassPair);
+ assert(I != VirtualBaseClassOffsetOffsets.end() && "Did not find index!");
+
+ return I->second;
+}
+
+static VTableLayout *CreateVTableLayout(const VTableBuilder &Builder) {
+ SmallVector<VTableLayout::VTableThunkTy, 1>
+ VTableThunks(Builder.vtable_thunks_begin(), Builder.vtable_thunks_end());
+ std::sort(VTableThunks.begin(), VTableThunks.end());
+
+ return new VTableLayout(Builder.getNumVTableComponents(),
+ Builder.vtable_component_begin(),
+ VTableThunks.size(),
+ VTableThunks.data(),
+ Builder.getAddressPoints());
+}
+
+void VTableContext::ComputeVTableRelatedInformation(const CXXRecordDecl *RD) {
+ const VTableLayout *&Entry = VTableLayouts[RD];
+
+ // Check if we've computed this information before.
+ if (Entry)
+ return;
+
+ VTableBuilder Builder(*this, RD, CharUnits::Zero(),
+ /*MostDerivedClassIsVirtual=*/0, RD);
+ Entry = CreateVTableLayout(Builder);
+
+ // Add the known thunks.
+ Thunks.insert(Builder.thunks_begin(), Builder.thunks_end());
+
+ // If we don't have the vbase information for this class, insert it.
+ // getVirtualBaseOffsetOffset will compute it separately without computing
+ // the rest of the vtable related information.
+ if (!RD->getNumVBases())
+ return;
+
+ const RecordType *VBaseRT =
+ RD->vbases_begin()->getType()->getAs<RecordType>();
+ const CXXRecordDecl *VBase = cast<CXXRecordDecl>(VBaseRT->getDecl());
+
+ if (VirtualBaseClassOffsetOffsets.count(std::make_pair(RD, VBase)))
+ return;
+
+ for (VTableBuilder::VBaseOffsetOffsetsMapTy::const_iterator I =
+ Builder.getVBaseOffsetOffsets().begin(),
+ E = Builder.getVBaseOffsetOffsets().end(); I != E; ++I) {
+ // Insert all types.
+ ClassPairTy ClassPair(RD, I->first);
+
+ VirtualBaseClassOffsetOffsets.insert(std::make_pair(ClassPair, I->second));
+ }
+}
+
+VTableLayout *VTableContext::createConstructionVTableLayout(
+ const CXXRecordDecl *MostDerivedClass,
+ CharUnits MostDerivedClassOffset,
+ bool MostDerivedClassIsVirtual,
+ const CXXRecordDecl *LayoutClass) {
+ VTableBuilder Builder(*this, MostDerivedClass, MostDerivedClassOffset,
+ MostDerivedClassIsVirtual, LayoutClass);
+ return CreateVTableLayout(Builder);
+}