summaryrefslogtreecommitdiff
path: root/clang/lib/CodeGen
diff options
context:
space:
mode:
authorCarlo Zancanaro <carlo@pc-4w14-0.cs.usyd.edu.au>2012-10-15 17:10:06 +1100
committerCarlo Zancanaro <carlo@pc-4w14-0.cs.usyd.edu.au>2012-10-15 17:10:06 +1100
commitbe1de4be954c80875ad4108e0a33e8e131b2f2c0 (patch)
tree1fbbecf276bf7c7bdcbb4dd446099d6d90eaa516 /clang/lib/CodeGen
parentc4626a62754862d20b41e8a46a3574264ea80e6d (diff)
parentf1bd2e48c5324d3f7cda4090c87f8a5b6f463ce2 (diff)
Merge branch 'master' of ssh://bitbucket.org/czan/honours
Diffstat (limited to 'clang/lib/CodeGen')
-rw-r--r--clang/lib/CodeGen/ABIInfo.h181
-rw-r--r--clang/lib/CodeGen/BackendUtil.cpp460
-rw-r--r--clang/lib/CodeGen/CGBlocks.cpp2044
-rw-r--r--clang/lib/CodeGen/CGBlocks.h229
-rw-r--r--clang/lib/CodeGen/CGBuilder.h28
-rw-r--r--clang/lib/CodeGen/CGBuiltin.cpp4524
-rw-r--r--clang/lib/CodeGen/CGCUDANV.cpp126
-rw-r--r--clang/lib/CodeGen/CGCUDARuntime.cpp55
-rw-r--r--clang/lib/CodeGen/CGCUDARuntime.h54
-rw-r--r--clang/lib/CodeGen/CGCXX.cpp392
-rw-r--r--clang/lib/CodeGen/CGCXXABI.cpp199
-rw-r--r--clang/lib/CodeGen/CGCXXABI.h262
-rw-r--r--clang/lib/CodeGen/CGCall.cpp2201
-rw-r--r--clang/lib/CodeGen/CGCall.h306
-rw-r--r--clang/lib/CodeGen/CGClass.cpp1841
-rw-r--r--clang/lib/CodeGen/CGCleanup.cpp1103
-rw-r--r--clang/lib/CodeGen/CGCleanup.h539
-rw-r--r--clang/lib/CodeGen/CGDebugInfo.cpp2668
-rw-r--r--clang/lib/CodeGen/CGDebugInfo.h322
-rw-r--r--clang/lib/CodeGen/CGDecl.cpp1564
-rw-r--r--clang/lib/CodeGen/CGDeclCXX.cpp464
-rw-r--r--clang/lib/CodeGen/CGException.cpp1595
-rw-r--r--clang/lib/CodeGen/CGExpr.cpp3256
-rw-r--r--clang/lib/CodeGen/CGExprAgg.cpp1343
-rw-r--r--clang/lib/CodeGen/CGExprCXX.cpp1833
-rw-r--r--clang/lib/CodeGen/CGExprComplex.cpp839
-rw-r--r--clang/lib/CodeGen/CGExprConstant.cpp1496
-rw-r--r--clang/lib/CodeGen/CGExprScalar.cpp2857
-rw-r--r--clang/lib/CodeGen/CGObjC.cpp2974
-rw-r--r--clang/lib/CodeGen/CGObjCGNU.cpp2671
-rw-r--r--clang/lib/CodeGen/CGObjCMac.cpp6373
-rw-r--r--clang/lib/CodeGen/CGObjCRuntime.cpp374
-rw-r--r--clang/lib/CodeGen/CGObjCRuntime.h286
-rw-r--r--clang/lib/CodeGen/CGOpenCLRuntime.cpp28
-rw-r--r--clang/lib/CodeGen/CGOpenCLRuntime.h46
-rw-r--r--clang/lib/CodeGen/CGRTTI.cpp1015
-rw-r--r--clang/lib/CodeGen/CGRecordLayout.h281
-rw-r--r--clang/lib/CodeGen/CGRecordLayoutBuilder.cpp1170
-rw-r--r--clang/lib/CodeGen/CGStmt.cpp1683
-rw-r--r--clang/lib/CodeGen/CGVTT.cpp192
-rw-r--r--clang/lib/CodeGen/CGVTables.cpp733
-rw-r--r--clang/lib/CodeGen/CGVTables.h141
-rw-r--r--clang/lib/CodeGen/CGValue.h451
-rw-r--r--clang/lib/CodeGen/CMakeLists.txt56
-rw-r--r--clang/lib/CodeGen/CodeGenAction.cpp448
-rw-r--r--clang/lib/CodeGen/CodeGenFunction.cpp1149
-rw-r--r--clang/lib/CodeGen/CodeGenFunction.h2702
-rw-r--r--clang/lib/CodeGen/CodeGenModule.cpp2667
-rw-r--r--clang/lib/CodeGen/CodeGenModule.h986
-rw-r--r--clang/lib/CodeGen/CodeGenTBAA.cpp163
-rw-r--r--clang/lib/CodeGen/CodeGenTBAA.h80
-rw-r--r--clang/lib/CodeGen/CodeGenTypes.cpp676
-rw-r--r--clang/lib/CodeGen/CodeGenTypes.h254
-rw-r--r--clang/lib/CodeGen/ItaniumCXXABI.cpp1202
-rw-r--r--clang/lib/CodeGen/Makefile19
-rw-r--r--clang/lib/CodeGen/MicrosoftCXXABI.cpp95
-rw-r--r--clang/lib/CodeGen/ModuleBuilder.cpp127
-rw-r--r--clang/lib/CodeGen/README.txt47
-rw-r--r--clang/lib/CodeGen/TargetInfo.cpp3698
-rw-r--r--clang/lib/CodeGen/TargetInfo.h170
60 files changed, 65738 insertions, 0 deletions
diff --git a/clang/lib/CodeGen/ABIInfo.h b/clang/lib/CodeGen/ABIInfo.h
new file mode 100644
index 0000000..2853bc8
--- /dev/null
+++ b/clang/lib/CodeGen/ABIInfo.h
@@ -0,0 +1,181 @@
+//===----- ABIInfo.h - ABI information access & encapsulation ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_CODEGEN_ABIINFO_H
+#define CLANG_CODEGEN_ABIINFO_H
+
+#include "clang/AST/Type.h"
+#include "llvm/Type.h"
+
+namespace llvm {
+ class Value;
+ class LLVMContext;
+ class TargetData;
+}
+
+namespace clang {
+ class ASTContext;
+
+ namespace CodeGen {
+ class CGFunctionInfo;
+ class CodeGenFunction;
+ class CodeGenTypes;
+ }
+
+ // FIXME: All of this stuff should be part of the target interface
+ // somehow. It is currently here because it is not clear how to factor
+ // the targets to support this, since the Targets currently live in a
+ // layer below types n'stuff.
+
+ /// ABIArgInfo - Helper class to encapsulate information about how a
+ /// specific C type should be passed to or returned from a function.
+ class ABIArgInfo {
+ public:
+ enum Kind {
+ /// Direct - Pass the argument directly using the normal converted LLVM
+ /// type, or by coercing to another specified type stored in
+ /// 'CoerceToType'). If an offset is specified (in UIntData), then the
+ /// argument passed is offset by some number of bytes in the memory
+ /// representation. A dummy argument is emitted before the real argument
+ /// if the specified type stored in "PaddingType" is not zero.
+ Direct,
+
+ /// Extend - Valid only for integer argument types. Same as 'direct'
+ /// but also emit a zero/sign extension attribute.
+ Extend,
+
+ /// Indirect - Pass the argument indirectly via a hidden pointer
+ /// with the specified alignment (0 indicates default alignment).
+ Indirect,
+
+ /// Ignore - Ignore the argument (treat as void). Useful for void and
+ /// empty structs.
+ Ignore,
+
+ /// Expand - Only valid for aggregate argument types. The structure should
+ /// be expanded into consecutive arguments for its constituent fields.
+ /// Currently expand is only allowed on structures whose fields
+ /// are all scalar types or are themselves expandable types.
+ Expand,
+
+ KindFirst=Direct, KindLast=Expand
+ };
+
+ private:
+ Kind TheKind;
+ llvm::Type *TypeData;
+ llvm::Type *PaddingType; // Currently allowed only for Direct.
+ unsigned UIntData;
+ bool BoolData0;
+ bool BoolData1;
+
+ ABIArgInfo(Kind K, llvm::Type *TD=0, unsigned UI=0,
+ bool B0 = false, bool B1 = false, llvm::Type* P = 0)
+ : TheKind(K), TypeData(TD), PaddingType(P), UIntData(UI), BoolData0(B0),
+ BoolData1(B1) {}
+
+ public:
+ ABIArgInfo() : TheKind(Direct), TypeData(0), UIntData(0) {}
+
+ static ABIArgInfo getDirect(llvm::Type *T = 0, unsigned Offset = 0,
+ llvm::Type *Padding = 0) {
+ return ABIArgInfo(Direct, T, Offset, false, false, Padding);
+ }
+ static ABIArgInfo getExtend(llvm::Type *T = 0) {
+ return ABIArgInfo(Extend, T, 0);
+ }
+ static ABIArgInfo getIgnore() {
+ return ABIArgInfo(Ignore);
+ }
+ static ABIArgInfo getIndirect(unsigned Alignment, bool ByVal = true
+ , bool Realign = false) {
+ return ABIArgInfo(Indirect, 0, Alignment, ByVal, Realign);
+ }
+ static ABIArgInfo getExpand() {
+ return ABIArgInfo(Expand);
+ }
+
+ Kind getKind() const { return TheKind; }
+ bool isDirect() const { return TheKind == Direct; }
+ bool isExtend() const { return TheKind == Extend; }
+ bool isIgnore() const { return TheKind == Ignore; }
+ bool isIndirect() const { return TheKind == Indirect; }
+ bool isExpand() const { return TheKind == Expand; }
+
+ bool canHaveCoerceToType() const {
+ return TheKind == Direct || TheKind == Extend;
+ }
+
+ // Direct/Extend accessors
+ unsigned getDirectOffset() const {
+ assert((isDirect() || isExtend()) && "Not a direct or extend kind");
+ return UIntData;
+ }
+
+ llvm::Type *getPaddingType() const {
+ return PaddingType;
+ }
+
+ llvm::Type *getCoerceToType() const {
+ assert(canHaveCoerceToType() && "Invalid kind!");
+ return TypeData;
+ }
+
+ void setCoerceToType(llvm::Type *T) {
+ assert(canHaveCoerceToType() && "Invalid kind!");
+ TypeData = T;
+ }
+
+ // Indirect accessors
+ unsigned getIndirectAlign() const {
+ assert(TheKind == Indirect && "Invalid kind!");
+ return UIntData;
+ }
+
+ bool getIndirectByVal() const {
+ assert(TheKind == Indirect && "Invalid kind!");
+ return BoolData0;
+ }
+
+ bool getIndirectRealign() const {
+ assert(TheKind == Indirect && "Invalid kind!");
+ return BoolData1;
+ }
+
+ void dump() const;
+ };
+
+ /// ABIInfo - Target specific hooks for defining how a type should be
+ /// passed or returned from functions.
+ class ABIInfo {
+ public:
+ CodeGen::CodeGenTypes &CGT;
+
+ ABIInfo(CodeGen::CodeGenTypes &cgt) : CGT(cgt) {}
+ virtual ~ABIInfo();
+
+ ASTContext &getContext() const;
+ llvm::LLVMContext &getVMContext() const;
+ const llvm::TargetData &getTargetData() const;
+
+ virtual void computeInfo(CodeGen::CGFunctionInfo &FI) const = 0;
+
+ /// EmitVAArg - Emit the target dependent code to load a value of
+ /// \arg Ty from the va_list pointed to by \arg VAListAddr.
+
+ // FIXME: This is a gaping layering violation if we wanted to drop
+ // the ABI information any lower than CodeGen. Of course, for
+ // VAArg handling it has to be at this level; there is no way to
+ // abstract this out.
+ virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGen::CodeGenFunction &CGF) const = 0;
+ };
+} // end namespace clang
+
+#endif
diff --git a/clang/lib/CodeGen/BackendUtil.cpp b/clang/lib/CodeGen/BackendUtil.cpp
new file mode 100644
index 0000000..2f44711
--- /dev/null
+++ b/clang/lib/CodeGen/BackendUtil.cpp
@@ -0,0 +1,460 @@
+//===--- BackendUtil.cpp - LLVM Backend Utilities -------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/CodeGen/BackendUtil.h"
+#include "clang/Basic/Diagnostic.h"
+#include "clang/Basic/TargetOptions.h"
+#include "clang/Basic/LangOptions.h"
+#include "clang/Frontend/CodeGenOptions.h"
+#include "clang/Frontend/FrontendDiagnostic.h"
+#include "llvm/Module.h"
+#include "llvm/PassManager.h"
+#include "llvm/Analysis/Verifier.h"
+#include "llvm/Assembly/PrintModulePass.h"
+#include "llvm/Bitcode/ReaderWriter.h"
+#include "llvm/CodeGen/RegAllocRegistry.h"
+#include "llvm/CodeGen/SchedulerRegistry.h"
+#include "llvm/MC/SubtargetFeature.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/FormattedStream.h"
+#include "llvm/Support/PrettyStackTrace.h"
+#include "llvm/Support/TargetRegistry.h"
+#include "llvm/Support/Timer.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Target/TargetData.h"
+#include "llvm/Target/TargetLibraryInfo.h"
+#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetOptions.h"
+#include "llvm/Transforms/Instrumentation.h"
+#include "llvm/Transforms/IPO.h"
+#include "llvm/Transforms/IPO/PassManagerBuilder.h"
+#include "llvm/Transforms/Scalar.h"
+using namespace clang;
+using namespace llvm;
+
+namespace {
+
+class EmitAssemblyHelper {
+ DiagnosticsEngine &Diags;
+ const CodeGenOptions &CodeGenOpts;
+ const clang::TargetOptions &TargetOpts;
+ const LangOptions &LangOpts;
+ Module *TheModule;
+
+ Timer CodeGenerationTime;
+
+ mutable PassManager *CodeGenPasses;
+ mutable PassManager *PerModulePasses;
+ mutable FunctionPassManager *PerFunctionPasses;
+
+private:
+ PassManager *getCodeGenPasses() const {
+ if (!CodeGenPasses) {
+ CodeGenPasses = new PassManager();
+ CodeGenPasses->add(new TargetData(TheModule));
+ }
+ return CodeGenPasses;
+ }
+
+ PassManager *getPerModulePasses() const {
+ if (!PerModulePasses) {
+ PerModulePasses = new PassManager();
+ PerModulePasses->add(new TargetData(TheModule));
+ }
+ return PerModulePasses;
+ }
+
+ FunctionPassManager *getPerFunctionPasses() const {
+ if (!PerFunctionPasses) {
+ PerFunctionPasses = new FunctionPassManager(TheModule);
+ PerFunctionPasses->add(new TargetData(TheModule));
+ }
+ return PerFunctionPasses;
+ }
+
+ void CreatePasses();
+
+ /// AddEmitPasses - Add passes necessary to emit assembly or LLVM IR.
+ ///
+ /// \return True on success.
+ bool AddEmitPasses(BackendAction Action, formatted_raw_ostream &OS);
+
+public:
+ EmitAssemblyHelper(DiagnosticsEngine &_Diags,
+ const CodeGenOptions &CGOpts,
+ const clang::TargetOptions &TOpts,
+ const LangOptions &LOpts,
+ Module *M)
+ : Diags(_Diags), CodeGenOpts(CGOpts), TargetOpts(TOpts), LangOpts(LOpts),
+ TheModule(M), CodeGenerationTime("Code Generation Time"),
+ CodeGenPasses(0), PerModulePasses(0), PerFunctionPasses(0) {}
+
+ ~EmitAssemblyHelper() {
+ delete CodeGenPasses;
+ delete PerModulePasses;
+ delete PerFunctionPasses;
+ }
+
+ void EmitAssembly(BackendAction Action, raw_ostream *OS);
+};
+
+}
+
+static void addObjCARCAPElimPass(const PassManagerBuilder &Builder, PassManagerBase &PM) {
+ if (Builder.OptLevel > 0)
+ PM.add(createObjCARCAPElimPass());
+}
+
+static void addObjCARCExpandPass(const PassManagerBuilder &Builder, PassManagerBase &PM) {
+ if (Builder.OptLevel > 0)
+ PM.add(createObjCARCExpandPass());
+}
+
+static void addObjCARCOptPass(const PassManagerBuilder &Builder, PassManagerBase &PM) {
+ if (Builder.OptLevel > 0)
+ PM.add(createObjCARCOptPass());
+}
+
+static void addAddressSanitizerPass(const PassManagerBuilder &Builder,
+ PassManagerBase &PM) {
+ PM.add(createAddressSanitizerPass());
+}
+
+static void addThreadSanitizerPass(const PassManagerBuilder &Builder,
+ PassManagerBase &PM) {
+ PM.add(createThreadSanitizerPass());
+}
+
+void EmitAssemblyHelper::CreatePasses() {
+ unsigned OptLevel = CodeGenOpts.OptimizationLevel;
+ CodeGenOptions::InliningMethod Inlining = CodeGenOpts.Inlining;
+
+ // Handle disabling of LLVM optimization, where we want to preserve the
+ // internal module before any optimization.
+ if (CodeGenOpts.DisableLLVMOpts) {
+ OptLevel = 0;
+ Inlining = CodeGenOpts.NoInlining;
+ }
+
+ PassManagerBuilder PMBuilder;
+ PMBuilder.OptLevel = OptLevel;
+ PMBuilder.SizeLevel = CodeGenOpts.OptimizeSize;
+
+ PMBuilder.DisableSimplifyLibCalls = !CodeGenOpts.SimplifyLibCalls;
+ PMBuilder.DisableUnitAtATime = !CodeGenOpts.UnitAtATime;
+ PMBuilder.DisableUnrollLoops = !CodeGenOpts.UnrollLoops;
+
+ // In ObjC ARC mode, add the main ARC optimization passes.
+ if (LangOpts.ObjCAutoRefCount) {
+ PMBuilder.addExtension(PassManagerBuilder::EP_EarlyAsPossible,
+ addObjCARCExpandPass);
+ PMBuilder.addExtension(PassManagerBuilder::EP_ModuleOptimizerEarly,
+ addObjCARCAPElimPass);
+ PMBuilder.addExtension(PassManagerBuilder::EP_ScalarOptimizerLate,
+ addObjCARCOptPass);
+ }
+
+ if (LangOpts.AddressSanitizer) {
+ PMBuilder.addExtension(PassManagerBuilder::EP_ScalarOptimizerLate,
+ addAddressSanitizerPass);
+ PMBuilder.addExtension(PassManagerBuilder::EP_EnabledOnOptLevel0,
+ addAddressSanitizerPass);
+ }
+
+ if (LangOpts.ThreadSanitizer) {
+ PMBuilder.addExtension(PassManagerBuilder::EP_OptimizerLast,
+ addThreadSanitizerPass);
+ PMBuilder.addExtension(PassManagerBuilder::EP_EnabledOnOptLevel0,
+ addThreadSanitizerPass);
+ }
+
+ // Figure out TargetLibraryInfo.
+ Triple TargetTriple(TheModule->getTargetTriple());
+ PMBuilder.LibraryInfo = new TargetLibraryInfo(TargetTriple);
+ if (!CodeGenOpts.SimplifyLibCalls)
+ PMBuilder.LibraryInfo->disableAllFunctions();
+
+ switch (Inlining) {
+ case CodeGenOptions::NoInlining: break;
+ case CodeGenOptions::NormalInlining: {
+ // FIXME: Derive these constants in a principled fashion.
+ unsigned Threshold = 225;
+ if (CodeGenOpts.OptimizeSize == 1) // -Os
+ Threshold = 75;
+ else if (CodeGenOpts.OptimizeSize == 2) // -Oz
+ Threshold = 25;
+ else if (OptLevel > 2)
+ Threshold = 275;
+ PMBuilder.Inliner = createFunctionInliningPass(Threshold);
+ break;
+ }
+ case CodeGenOptions::OnlyAlwaysInlining:
+ // Respect always_inline.
+ if (OptLevel == 0)
+ // Do not insert lifetime intrinsics at -O0.
+ PMBuilder.Inliner = createAlwaysInlinerPass(false);
+ else
+ PMBuilder.Inliner = createAlwaysInlinerPass();
+ break;
+ }
+
+
+ // Set up the per-function pass manager.
+ FunctionPassManager *FPM = getPerFunctionPasses();
+ if (CodeGenOpts.VerifyModule)
+ FPM->add(createVerifierPass());
+ PMBuilder.populateFunctionPassManager(*FPM);
+
+ // Set up the per-module pass manager.
+ PassManager *MPM = getPerModulePasses();
+
+ if (CodeGenOpts.EmitGcovArcs || CodeGenOpts.EmitGcovNotes) {
+ MPM->add(createGCOVProfilerPass(CodeGenOpts.EmitGcovNotes,
+ CodeGenOpts.EmitGcovArcs,
+ TargetTriple.isMacOSX()));
+
+ if (!CodeGenOpts.DebugInfo)
+ MPM->add(createStripSymbolsPass(true));
+ }
+
+
+ PMBuilder.populateModulePassManager(*MPM);
+}
+
+bool EmitAssemblyHelper::AddEmitPasses(BackendAction Action,
+ formatted_raw_ostream &OS) {
+ // Create the TargetMachine for generating code.
+ std::string Error;
+ std::string Triple = TheModule->getTargetTriple();
+ const llvm::Target *TheTarget = TargetRegistry::lookupTarget(Triple, Error);
+ if (!TheTarget) {
+ Diags.Report(diag::err_fe_unable_to_create_target) << Error;
+ return false;
+ }
+
+ // FIXME: Expose these capabilities via actual APIs!!!! Aside from just
+ // being gross, this is also totally broken if we ever care about
+ // concurrency.
+
+ TargetMachine::setAsmVerbosityDefault(CodeGenOpts.AsmVerbose);
+
+ TargetMachine::setFunctionSections(CodeGenOpts.FunctionSections);
+ TargetMachine::setDataSections (CodeGenOpts.DataSections);
+
+ // FIXME: Parse this earlier.
+ llvm::CodeModel::Model CM;
+ if (CodeGenOpts.CodeModel == "small") {
+ CM = llvm::CodeModel::Small;
+ } else if (CodeGenOpts.CodeModel == "kernel") {
+ CM = llvm::CodeModel::Kernel;
+ } else if (CodeGenOpts.CodeModel == "medium") {
+ CM = llvm::CodeModel::Medium;
+ } else if (CodeGenOpts.CodeModel == "large") {
+ CM = llvm::CodeModel::Large;
+ } else {
+ assert(CodeGenOpts.CodeModel.empty() && "Invalid code model!");
+ CM = llvm::CodeModel::Default;
+ }
+
+ SmallVector<const char *, 16> BackendArgs;
+ BackendArgs.push_back("clang"); // Fake program name.
+ if (!CodeGenOpts.DebugPass.empty()) {
+ BackendArgs.push_back("-debug-pass");
+ BackendArgs.push_back(CodeGenOpts.DebugPass.c_str());
+ }
+ if (!CodeGenOpts.LimitFloatPrecision.empty()) {
+ BackendArgs.push_back("-limit-float-precision");
+ BackendArgs.push_back(CodeGenOpts.LimitFloatPrecision.c_str());
+ }
+ if (llvm::TimePassesIsEnabled)
+ BackendArgs.push_back("-time-passes");
+ for (unsigned i = 0, e = CodeGenOpts.BackendOptions.size(); i != e; ++i)
+ BackendArgs.push_back(CodeGenOpts.BackendOptions[i].c_str());
+ if (CodeGenOpts.NoGlobalMerge)
+ BackendArgs.push_back("-global-merge=false");
+ BackendArgs.push_back(0);
+ llvm::cl::ParseCommandLineOptions(BackendArgs.size() - 1,
+ BackendArgs.data());
+
+ std::string FeaturesStr;
+ if (TargetOpts.Features.size()) {
+ SubtargetFeatures Features;
+ for (std::vector<std::string>::const_iterator
+ it = TargetOpts.Features.begin(),
+ ie = TargetOpts.Features.end(); it != ie; ++it)
+ Features.AddFeature(*it);
+ FeaturesStr = Features.getString();
+ }
+
+ llvm::Reloc::Model RM = llvm::Reloc::Default;
+ if (CodeGenOpts.RelocationModel == "static") {
+ RM = llvm::Reloc::Static;
+ } else if (CodeGenOpts.RelocationModel == "pic") {
+ RM = llvm::Reloc::PIC_;
+ } else {
+ assert(CodeGenOpts.RelocationModel == "dynamic-no-pic" &&
+ "Invalid PIC model!");
+ RM = llvm::Reloc::DynamicNoPIC;
+ }
+
+ CodeGenOpt::Level OptLevel = CodeGenOpt::Default;
+ switch (CodeGenOpts.OptimizationLevel) {
+ default: break;
+ case 0: OptLevel = CodeGenOpt::None; break;
+ case 3: OptLevel = CodeGenOpt::Aggressive; break;
+ }
+
+ llvm::TargetOptions Options;
+
+ // Set frame pointer elimination mode.
+ if (!CodeGenOpts.DisableFPElim) {
+ Options.NoFramePointerElim = false;
+ Options.NoFramePointerElimNonLeaf = false;
+ } else if (CodeGenOpts.OmitLeafFramePointer) {
+ Options.NoFramePointerElim = false;
+ Options.NoFramePointerElimNonLeaf = true;
+ } else {
+ Options.NoFramePointerElim = true;
+ Options.NoFramePointerElimNonLeaf = true;
+ }
+
+ // Set float ABI type.
+ if (CodeGenOpts.FloatABI == "soft" || CodeGenOpts.FloatABI == "softfp")
+ Options.FloatABIType = llvm::FloatABI::Soft;
+ else if (CodeGenOpts.FloatABI == "hard")
+ Options.FloatABIType = llvm::FloatABI::Hard;
+ else {
+ assert(CodeGenOpts.FloatABI.empty() && "Invalid float abi!");
+ Options.FloatABIType = llvm::FloatABI::Default;
+ }
+
+ Options.LessPreciseFPMADOption = CodeGenOpts.LessPreciseFPMAD;
+ Options.NoInfsFPMath = CodeGenOpts.NoInfsFPMath;
+ Options.NoNaNsFPMath = CodeGenOpts.NoNaNsFPMath;
+ Options.NoZerosInBSS = CodeGenOpts.NoZeroInitializedInBSS;
+ Options.UnsafeFPMath = CodeGenOpts.UnsafeFPMath;
+ Options.UseSoftFloat = CodeGenOpts.SoftFloat;
+ Options.StackAlignmentOverride = CodeGenOpts.StackAlignment;
+ Options.RealignStack = CodeGenOpts.StackRealignment;
+ Options.DisableTailCalls = CodeGenOpts.DisableTailCalls;
+ Options.TrapFuncName = CodeGenOpts.TrapFuncName;
+ Options.PositionIndependentExecutable = LangOpts.PIELevel != 0;
+
+ TargetMachine *TM = TheTarget->createTargetMachine(Triple, TargetOpts.CPU,
+ FeaturesStr, Options,
+ RM, CM, OptLevel);
+
+ if (CodeGenOpts.RelaxAll)
+ TM->setMCRelaxAll(true);
+ if (CodeGenOpts.SaveTempLabels)
+ TM->setMCSaveTempLabels(true);
+ if (CodeGenOpts.NoDwarf2CFIAsm)
+ TM->setMCUseCFI(false);
+ if (!CodeGenOpts.NoDwarfDirectoryAsm)
+ TM->setMCUseDwarfDirectory(true);
+ if (CodeGenOpts.NoExecStack)
+ TM->setMCNoExecStack(true);
+
+ // Create the code generator passes.
+ PassManager *PM = getCodeGenPasses();
+
+ // Add LibraryInfo.
+ TargetLibraryInfo *TLI = new TargetLibraryInfo();
+ if (!CodeGenOpts.SimplifyLibCalls)
+ TLI->disableAllFunctions();
+ PM->add(TLI);
+
+ // Normal mode, emit a .s or .o file by running the code generator. Note,
+ // this also adds codegenerator level optimization passes.
+ TargetMachine::CodeGenFileType CGFT = TargetMachine::CGFT_AssemblyFile;
+ if (Action == Backend_EmitObj)
+ CGFT = TargetMachine::CGFT_ObjectFile;
+ else if (Action == Backend_EmitMCNull)
+ CGFT = TargetMachine::CGFT_Null;
+ else
+ assert(Action == Backend_EmitAssembly && "Invalid action!");
+
+ // Add ObjC ARC final-cleanup optimizations. This is done as part of the
+ // "codegen" passes so that it isn't run multiple times when there is
+ // inlining happening.
+ if (LangOpts.ObjCAutoRefCount &&
+ CodeGenOpts.OptimizationLevel > 0)
+ PM->add(createObjCARCContractPass());
+
+ if (TM->addPassesToEmitFile(*PM, OS, CGFT,
+ /*DisableVerify=*/!CodeGenOpts.VerifyModule)) {
+ Diags.Report(diag::err_fe_unable_to_interface_with_target);
+ return false;
+ }
+
+ return true;
+}
+
+void EmitAssemblyHelper::EmitAssembly(BackendAction Action, raw_ostream *OS) {
+ TimeRegion Region(llvm::TimePassesIsEnabled ? &CodeGenerationTime : 0);
+ llvm::formatted_raw_ostream FormattedOS;
+
+ CreatePasses();
+ switch (Action) {
+ case Backend_EmitNothing:
+ break;
+
+ case Backend_EmitBC:
+ getPerModulePasses()->add(createBitcodeWriterPass(*OS));
+ break;
+
+ case Backend_EmitLL:
+ FormattedOS.setStream(*OS, formatted_raw_ostream::PRESERVE_STREAM);
+ getPerModulePasses()->add(createPrintModulePass(&FormattedOS));
+ break;
+
+ default:
+ FormattedOS.setStream(*OS, formatted_raw_ostream::PRESERVE_STREAM);
+ if (!AddEmitPasses(Action, FormattedOS))
+ return;
+ }
+
+ // Before executing passes, print the final values of the LLVM options.
+ cl::PrintOptionValues();
+
+ // Run passes. For now we do all passes at once, but eventually we
+ // would like to have the option of streaming code generation.
+
+ if (PerFunctionPasses) {
+ PrettyStackTraceString CrashInfo("Per-function optimization");
+
+ PerFunctionPasses->doInitialization();
+ for (Module::iterator I = TheModule->begin(),
+ E = TheModule->end(); I != E; ++I)
+ if (!I->isDeclaration())
+ PerFunctionPasses->run(*I);
+ PerFunctionPasses->doFinalization();
+ }
+
+ if (PerModulePasses) {
+ PrettyStackTraceString CrashInfo("Per-module optimization passes");
+ PerModulePasses->run(*TheModule);
+ }
+
+ if (CodeGenPasses) {
+ PrettyStackTraceString CrashInfo("Code generation");
+ CodeGenPasses->run(*TheModule);
+ }
+}
+
+void clang::EmitBackendOutput(DiagnosticsEngine &Diags,
+ const CodeGenOptions &CGOpts,
+ const clang::TargetOptions &TOpts,
+ const LangOptions &LOpts,
+ Module *M,
+ BackendAction Action, raw_ostream *OS) {
+ EmitAssemblyHelper AsmHelper(Diags, CGOpts, TOpts, LOpts, M);
+
+ AsmHelper.EmitAssembly(Action, OS);
+}
diff --git a/clang/lib/CodeGen/CGBlocks.cpp b/clang/lib/CodeGen/CGBlocks.cpp
new file mode 100644
index 0000000..f8c7bcd
--- /dev/null
+++ b/clang/lib/CodeGen/CGBlocks.cpp
@@ -0,0 +1,2044 @@
+//===--- CGBlocks.cpp - Emit LLVM Code for declarations -------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code to emit blocks.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CGDebugInfo.h"
+#include "CodeGenFunction.h"
+#include "CGObjCRuntime.h"
+#include "CodeGenModule.h"
+#include "CGBlocks.h"
+#include "clang/AST/DeclObjC.h"
+#include "llvm/Module.h"
+#include "llvm/ADT/SmallSet.h"
+#include "llvm/Target/TargetData.h"
+#include <algorithm>
+
+using namespace clang;
+using namespace CodeGen;
+
+CGBlockInfo::CGBlockInfo(const BlockDecl *block, StringRef name)
+ : Name(name), CXXThisIndex(0), CanBeGlobal(false), NeedsCopyDispose(false),
+ HasCXXObject(false), UsesStret(false), StructureType(0), Block(block),
+ DominatingIP(0) {
+
+ // Skip asm prefix, if any. 'name' is usually taken directly from
+ // the mangled name of the enclosing function.
+ if (!name.empty() && name[0] == '\01')
+ name = name.substr(1);
+}
+
+// Anchor the vtable to this translation unit.
+CodeGenModule::ByrefHelpers::~ByrefHelpers() {}
+
+/// Build the given block as a global block.
+static llvm::Constant *buildGlobalBlock(CodeGenModule &CGM,
+ const CGBlockInfo &blockInfo,
+ llvm::Constant *blockFn);
+
+/// Build the helper function to copy a block.
+static llvm::Constant *buildCopyHelper(CodeGenModule &CGM,
+ const CGBlockInfo &blockInfo) {
+ return CodeGenFunction(CGM).GenerateCopyHelperFunction(blockInfo);
+}
+
+/// Build the helper function to dipose of a block.
+static llvm::Constant *buildDisposeHelper(CodeGenModule &CGM,
+ const CGBlockInfo &blockInfo) {
+ return CodeGenFunction(CGM).GenerateDestroyHelperFunction(blockInfo);
+}
+
+/// Build the block descriptor constant for a block.
+static llvm::Constant *buildBlockDescriptor(CodeGenModule &CGM,
+ const CGBlockInfo &blockInfo) {
+ ASTContext &C = CGM.getContext();
+
+ llvm::Type *ulong = CGM.getTypes().ConvertType(C.UnsignedLongTy);
+ llvm::Type *i8p = CGM.getTypes().ConvertType(C.VoidPtrTy);
+
+ SmallVector<llvm::Constant*, 6> elements;
+
+ // reserved
+ elements.push_back(llvm::ConstantInt::get(ulong, 0));
+
+ // Size
+ // FIXME: What is the right way to say this doesn't fit? We should give
+ // a user diagnostic in that case. Better fix would be to change the
+ // API to size_t.
+ elements.push_back(llvm::ConstantInt::get(ulong,
+ blockInfo.BlockSize.getQuantity()));
+
+ // Optional copy/dispose helpers.
+ if (blockInfo.NeedsCopyDispose) {
+ // copy_func_helper_decl
+ elements.push_back(buildCopyHelper(CGM, blockInfo));
+
+ // destroy_func_decl
+ elements.push_back(buildDisposeHelper(CGM, blockInfo));
+ }
+
+ // Signature. Mandatory ObjC-style method descriptor @encode sequence.
+ std::string typeAtEncoding =
+ CGM.getContext().getObjCEncodingForBlock(blockInfo.getBlockExpr());
+ elements.push_back(llvm::ConstantExpr::getBitCast(
+ CGM.GetAddrOfConstantCString(typeAtEncoding), i8p));
+
+ // GC layout.
+ if (C.getLangOpts().ObjC1)
+ elements.push_back(CGM.getObjCRuntime().BuildGCBlockLayout(CGM, blockInfo));
+ else
+ elements.push_back(llvm::Constant::getNullValue(i8p));
+
+ llvm::Constant *init = llvm::ConstantStruct::getAnon(elements);
+
+ llvm::GlobalVariable *global =
+ new llvm::GlobalVariable(CGM.getModule(), init->getType(), true,
+ llvm::GlobalValue::InternalLinkage,
+ init, "__block_descriptor_tmp");
+
+ return llvm::ConstantExpr::getBitCast(global, CGM.getBlockDescriptorType());
+}
+
+/*
+ Purely notional variadic template describing the layout of a block.
+
+ template <class _ResultType, class... _ParamTypes, class... _CaptureTypes>
+ struct Block_literal {
+ /// Initialized to one of:
+ /// extern void *_NSConcreteStackBlock[];
+ /// extern void *_NSConcreteGlobalBlock[];
+ ///
+ /// In theory, we could start one off malloc'ed by setting
+ /// BLOCK_NEEDS_FREE, giving it a refcount of 1, and using
+ /// this isa:
+ /// extern void *_NSConcreteMallocBlock[];
+ struct objc_class *isa;
+
+ /// These are the flags (with corresponding bit number) that the
+ /// compiler is actually supposed to know about.
+ /// 25. BLOCK_HAS_COPY_DISPOSE - indicates that the block
+ /// descriptor provides copy and dispose helper functions
+ /// 26. BLOCK_HAS_CXX_OBJ - indicates that there's a captured
+ /// object with a nontrivial destructor or copy constructor
+ /// 28. BLOCK_IS_GLOBAL - indicates that the block is allocated
+ /// as global memory
+ /// 29. BLOCK_USE_STRET - indicates that the block function
+ /// uses stret, which objc_msgSend needs to know about
+ /// 30. BLOCK_HAS_SIGNATURE - indicates that the block has an
+ /// @encoded signature string
+ /// And we're not supposed to manipulate these:
+ /// 24. BLOCK_NEEDS_FREE - indicates that the block has been moved
+ /// to malloc'ed memory
+ /// 27. BLOCK_IS_GC - indicates that the block has been moved to
+ /// to GC-allocated memory
+ /// Additionally, the bottom 16 bits are a reference count which
+ /// should be zero on the stack.
+ int flags;
+
+ /// Reserved; should be zero-initialized.
+ int reserved;
+
+ /// Function pointer generated from block literal.
+ _ResultType (*invoke)(Block_literal *, _ParamTypes...);
+
+ /// Block description metadata generated from block literal.
+ struct Block_descriptor *block_descriptor;
+
+ /// Captured values follow.
+ _CapturesTypes captures...;
+ };
+ */
+
+/// The number of fields in a block header.
+const unsigned BlockHeaderSize = 5;
+
+namespace {
+ /// A chunk of data that we actually have to capture in the block.
+ struct BlockLayoutChunk {
+ CharUnits Alignment;
+ CharUnits Size;
+ const BlockDecl::Capture *Capture; // null for 'this'
+ llvm::Type *Type;
+
+ BlockLayoutChunk(CharUnits align, CharUnits size,
+ const BlockDecl::Capture *capture,
+ llvm::Type *type)
+ : Alignment(align), Size(size), Capture(capture), Type(type) {}
+
+ /// Tell the block info that this chunk has the given field index.
+ void setIndex(CGBlockInfo &info, unsigned index) {
+ if (!Capture)
+ info.CXXThisIndex = index;
+ else
+ info.Captures[Capture->getVariable()]
+ = CGBlockInfo::Capture::makeIndex(index);
+ }
+ };
+
+ /// Order by descending alignment.
+ bool operator<(const BlockLayoutChunk &left, const BlockLayoutChunk &right) {
+ return left.Alignment > right.Alignment;
+ }
+}
+
+/// Determines if the given type is safe for constant capture in C++.
+static bool isSafeForCXXConstantCapture(QualType type) {
+ const RecordType *recordType =
+ type->getBaseElementTypeUnsafe()->getAs<RecordType>();
+
+ // Only records can be unsafe.
+ if (!recordType) return true;
+
+ const CXXRecordDecl *record = cast<CXXRecordDecl>(recordType->getDecl());
+
+ // Maintain semantics for classes with non-trivial dtors or copy ctors.
+ if (!record->hasTrivialDestructor()) return false;
+ if (!record->hasTrivialCopyConstructor()) return false;
+
+ // Otherwise, we just have to make sure there aren't any mutable
+ // fields that might have changed since initialization.
+ return !record->hasMutableFields();
+}
+
+/// It is illegal to modify a const object after initialization.
+/// Therefore, if a const object has a constant initializer, we don't
+/// actually need to keep storage for it in the block; we'll just
+/// rematerialize it at the start of the block function. This is
+/// acceptable because we make no promises about address stability of
+/// captured variables.
+static llvm::Constant *tryCaptureAsConstant(CodeGenModule &CGM,
+ CodeGenFunction *CGF,
+ const VarDecl *var) {
+ QualType type = var->getType();
+
+ // We can only do this if the variable is const.
+ if (!type.isConstQualified()) return 0;
+
+ // Furthermore, in C++ we have to worry about mutable fields:
+ // C++ [dcl.type.cv]p4:
+ // Except that any class member declared mutable can be
+ // modified, any attempt to modify a const object during its
+ // lifetime results in undefined behavior.
+ if (CGM.getLangOpts().CPlusPlus && !isSafeForCXXConstantCapture(type))
+ return 0;
+
+ // If the variable doesn't have any initializer (shouldn't this be
+ // invalid?), it's not clear what we should do. Maybe capture as
+ // zero?
+ const Expr *init = var->getInit();
+ if (!init) return 0;
+
+ return CGM.EmitConstantInit(*var, CGF);
+}
+
+/// Get the low bit of a nonzero character count. This is the
+/// alignment of the nth byte if the 0th byte is universally aligned.
+static CharUnits getLowBit(CharUnits v) {
+ return CharUnits::fromQuantity(v.getQuantity() & (~v.getQuantity() + 1));
+}
+
+static void initializeForBlockHeader(CodeGenModule &CGM, CGBlockInfo &info,
+ SmallVectorImpl<llvm::Type*> &elementTypes) {
+ ASTContext &C = CGM.getContext();
+
+ // The header is basically a 'struct { void *; int; int; void *; void *; }'.
+ CharUnits ptrSize, ptrAlign, intSize, intAlign;
+ llvm::tie(ptrSize, ptrAlign) = C.getTypeInfoInChars(C.VoidPtrTy);
+ llvm::tie(intSize, intAlign) = C.getTypeInfoInChars(C.IntTy);
+
+ // Are there crazy embedded platforms where this isn't true?
+ assert(intSize <= ptrSize && "layout assumptions horribly violated");
+
+ CharUnits headerSize = ptrSize;
+ if (2 * intSize < ptrAlign) headerSize += ptrSize;
+ else headerSize += 2 * intSize;
+ headerSize += 2 * ptrSize;
+
+ info.BlockAlign = ptrAlign;
+ info.BlockSize = headerSize;
+
+ assert(elementTypes.empty());
+ llvm::Type *i8p = CGM.getTypes().ConvertType(C.VoidPtrTy);
+ llvm::Type *intTy = CGM.getTypes().ConvertType(C.IntTy);
+ elementTypes.push_back(i8p);
+ elementTypes.push_back(intTy);
+ elementTypes.push_back(intTy);
+ elementTypes.push_back(i8p);
+ elementTypes.push_back(CGM.getBlockDescriptorType());
+
+ assert(elementTypes.size() == BlockHeaderSize);
+}
+
+/// Compute the layout of the given block. Attempts to lay the block
+/// out with minimal space requirements.
+static void computeBlockInfo(CodeGenModule &CGM, CodeGenFunction *CGF,
+ CGBlockInfo &info) {
+ ASTContext &C = CGM.getContext();
+ const BlockDecl *block = info.getBlockDecl();
+
+ SmallVector<llvm::Type*, 8> elementTypes;
+ initializeForBlockHeader(CGM, info, elementTypes);
+
+ if (!block->hasCaptures()) {
+ info.StructureType =
+ llvm::StructType::get(CGM.getLLVMContext(), elementTypes, true);
+ info.CanBeGlobal = true;
+ return;
+ }
+
+ // Collect the layout chunks.
+ SmallVector<BlockLayoutChunk, 16> layout;
+ layout.reserve(block->capturesCXXThis() +
+ (block->capture_end() - block->capture_begin()));
+
+ CharUnits maxFieldAlign;
+
+ // First, 'this'.
+ if (block->capturesCXXThis()) {
+ const DeclContext *DC = block->getDeclContext();
+ for (; isa<BlockDecl>(DC); DC = cast<BlockDecl>(DC)->getDeclContext())
+ ;
+ QualType thisType;
+ if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(DC))
+ thisType = C.getPointerType(C.getRecordType(RD));
+ else
+ thisType = cast<CXXMethodDecl>(DC)->getThisType(C);
+
+ llvm::Type *llvmType = CGM.getTypes().ConvertType(thisType);
+ std::pair<CharUnits,CharUnits> tinfo
+ = CGM.getContext().getTypeInfoInChars(thisType);
+ maxFieldAlign = std::max(maxFieldAlign, tinfo.second);
+
+ layout.push_back(BlockLayoutChunk(tinfo.second, tinfo.first, 0, llvmType));
+ }
+
+ // Next, all the block captures.
+ for (BlockDecl::capture_const_iterator ci = block->capture_begin(),
+ ce = block->capture_end(); ci != ce; ++ci) {
+ const VarDecl *variable = ci->getVariable();
+
+ if (ci->isByRef()) {
+ // We have to copy/dispose of the __block reference.
+ info.NeedsCopyDispose = true;
+
+ // Just use void* instead of a pointer to the byref type.
+ QualType byRefPtrTy = C.VoidPtrTy;
+
+ llvm::Type *llvmType = CGM.getTypes().ConvertType(byRefPtrTy);
+ std::pair<CharUnits,CharUnits> tinfo
+ = CGM.getContext().getTypeInfoInChars(byRefPtrTy);
+ maxFieldAlign = std::max(maxFieldAlign, tinfo.second);
+
+ layout.push_back(BlockLayoutChunk(tinfo.second, tinfo.first,
+ &*ci, llvmType));
+ continue;
+ }
+
+ // Otherwise, build a layout chunk with the size and alignment of
+ // the declaration.
+ if (llvm::Constant *constant = tryCaptureAsConstant(CGM, CGF, variable)) {
+ info.Captures[variable] = CGBlockInfo::Capture::makeConstant(constant);
+ continue;
+ }
+
+ // If we have a lifetime qualifier, honor it for capture purposes.
+ // That includes *not* copying it if it's __unsafe_unretained.
+ if (Qualifiers::ObjCLifetime lifetime
+ = variable->getType().getObjCLifetime()) {
+ switch (lifetime) {
+ case Qualifiers::OCL_None: llvm_unreachable("impossible");
+ case Qualifiers::OCL_ExplicitNone:
+ case Qualifiers::OCL_Autoreleasing:
+ break;
+
+ case Qualifiers::OCL_Strong:
+ case Qualifiers::OCL_Weak:
+ info.NeedsCopyDispose = true;
+ }
+
+ // Block pointers require copy/dispose. So do Objective-C pointers.
+ } else if (variable->getType()->isObjCRetainableType()) {
+ info.NeedsCopyDispose = true;
+
+ // So do types that require non-trivial copy construction.
+ } else if (ci->hasCopyExpr()) {
+ info.NeedsCopyDispose = true;
+ info.HasCXXObject = true;
+
+ // And so do types with destructors.
+ } else if (CGM.getLangOpts().CPlusPlus) {
+ if (const CXXRecordDecl *record =
+ variable->getType()->getAsCXXRecordDecl()) {
+ if (!record->hasTrivialDestructor()) {
+ info.HasCXXObject = true;
+ info.NeedsCopyDispose = true;
+ }
+ }
+ }
+
+ QualType VT = variable->getType();
+ CharUnits size = C.getTypeSizeInChars(VT);
+ CharUnits align = C.getDeclAlign(variable);
+
+ maxFieldAlign = std::max(maxFieldAlign, align);
+
+ llvm::Type *llvmType =
+ CGM.getTypes().ConvertTypeForMem(VT);
+
+ layout.push_back(BlockLayoutChunk(align, size, &*ci, llvmType));
+ }
+
+ // If that was everything, we're done here.
+ if (layout.empty()) {
+ info.StructureType =
+ llvm::StructType::get(CGM.getLLVMContext(), elementTypes, true);
+ info.CanBeGlobal = true;
+ return;
+ }
+
+ // Sort the layout by alignment. We have to use a stable sort here
+ // to get reproducible results. There should probably be an
+ // llvm::array_pod_stable_sort.
+ std::stable_sort(layout.begin(), layout.end());
+
+ CharUnits &blockSize = info.BlockSize;
+ info.BlockAlign = std::max(maxFieldAlign, info.BlockAlign);
+
+ // Assuming that the first byte in the header is maximally aligned,
+ // get the alignment of the first byte following the header.
+ CharUnits endAlign = getLowBit(blockSize);
+
+ // If the end of the header isn't satisfactorily aligned for the
+ // maximum thing, look for things that are okay with the header-end
+ // alignment, and keep appending them until we get something that's
+ // aligned right. This algorithm is only guaranteed optimal if
+ // that condition is satisfied at some point; otherwise we can get
+ // things like:
+ // header // next byte has alignment 4
+ // something_with_size_5; // next byte has alignment 1
+ // something_with_alignment_8;
+ // which has 7 bytes of padding, as opposed to the naive solution
+ // which might have less (?).
+ if (endAlign < maxFieldAlign) {
+ SmallVectorImpl<BlockLayoutChunk>::iterator
+ li = layout.begin() + 1, le = layout.end();
+
+ // Look for something that the header end is already
+ // satisfactorily aligned for.
+ for (; li != le && endAlign < li->Alignment; ++li)
+ ;
+
+ // If we found something that's naturally aligned for the end of
+ // the header, keep adding things...
+ if (li != le) {
+ SmallVectorImpl<BlockLayoutChunk>::iterator first = li;
+ for (; li != le; ++li) {
+ assert(endAlign >= li->Alignment);
+
+ li->setIndex(info, elementTypes.size());
+ elementTypes.push_back(li->Type);
+ blockSize += li->Size;
+ endAlign = getLowBit(blockSize);
+
+ // ...until we get to the alignment of the maximum field.
+ if (endAlign >= maxFieldAlign)
+ break;
+ }
+
+ // Don't re-append everything we just appended.
+ layout.erase(first, li);
+ }
+ }
+
+ // At this point, we just have to add padding if the end align still
+ // isn't aligned right.
+ if (endAlign < maxFieldAlign) {
+ CharUnits padding = maxFieldAlign - endAlign;
+
+ elementTypes.push_back(llvm::ArrayType::get(CGM.Int8Ty,
+ padding.getQuantity()));
+ blockSize += padding;
+
+ endAlign = getLowBit(blockSize);
+ assert(endAlign >= maxFieldAlign);
+ }
+
+ // Slam everything else on now. This works because they have
+ // strictly decreasing alignment and we expect that size is always a
+ // multiple of alignment.
+ for (SmallVectorImpl<BlockLayoutChunk>::iterator
+ li = layout.begin(), le = layout.end(); li != le; ++li) {
+ assert(endAlign >= li->Alignment);
+ li->setIndex(info, elementTypes.size());
+ elementTypes.push_back(li->Type);
+ blockSize += li->Size;
+ endAlign = getLowBit(blockSize);
+ }
+
+ info.StructureType =
+ llvm::StructType::get(CGM.getLLVMContext(), elementTypes, true);
+}
+
+/// Enter the scope of a block. This should be run at the entrance to
+/// a full-expression so that the block's cleanups are pushed at the
+/// right place in the stack.
+static void enterBlockScope(CodeGenFunction &CGF, BlockDecl *block) {
+ assert(CGF.HaveInsertPoint());
+
+ // Allocate the block info and place it at the head of the list.
+ CGBlockInfo &blockInfo =
+ *new CGBlockInfo(block, CGF.CurFn->getName());
+ blockInfo.NextBlockInfo = CGF.FirstBlockInfo;
+ CGF.FirstBlockInfo = &blockInfo;
+
+ // Compute information about the layout, etc., of this block,
+ // pushing cleanups as necessary.
+ computeBlockInfo(CGF.CGM, &CGF, blockInfo);
+
+ // Nothing else to do if it can be global.
+ if (blockInfo.CanBeGlobal) return;
+
+ // Make the allocation for the block.
+ blockInfo.Address =
+ CGF.CreateTempAlloca(blockInfo.StructureType, "block");
+ blockInfo.Address->setAlignment(blockInfo.BlockAlign.getQuantity());
+
+ // If there are cleanups to emit, enter them (but inactive).
+ if (!blockInfo.NeedsCopyDispose) return;
+
+ // Walk through the captures (in order) and find the ones not
+ // captured by constant.
+ for (BlockDecl::capture_const_iterator ci = block->capture_begin(),
+ ce = block->capture_end(); ci != ce; ++ci) {
+ // Ignore __block captures; there's nothing special in the
+ // on-stack block that we need to do for them.
+ if (ci->isByRef()) continue;
+
+ // Ignore variables that are constant-captured.
+ const VarDecl *variable = ci->getVariable();
+ CGBlockInfo::Capture &capture = blockInfo.getCapture(variable);
+ if (capture.isConstant()) continue;
+
+ // Ignore objects that aren't destructed.
+ QualType::DestructionKind dtorKind =
+ variable->getType().isDestructedType();
+ if (dtorKind == QualType::DK_none) continue;
+
+ CodeGenFunction::Destroyer *destroyer;
+
+ // Block captures count as local values and have imprecise semantics.
+ // They also can't be arrays, so need to worry about that.
+ if (dtorKind == QualType::DK_objc_strong_lifetime) {
+ destroyer = CodeGenFunction::destroyARCStrongImprecise;
+ } else {
+ destroyer = CGF.getDestroyer(dtorKind);
+ }
+
+ // GEP down to the address.
+ llvm::Value *addr = CGF.Builder.CreateStructGEP(blockInfo.Address,
+ capture.getIndex());
+
+ // We can use that GEP as the dominating IP.
+ if (!blockInfo.DominatingIP)
+ blockInfo.DominatingIP = cast<llvm::Instruction>(addr);
+
+ CleanupKind cleanupKind = InactiveNormalCleanup;
+ bool useArrayEHCleanup = CGF.needsEHCleanup(dtorKind);
+ if (useArrayEHCleanup)
+ cleanupKind = InactiveNormalAndEHCleanup;
+
+ CGF.pushDestroy(cleanupKind, addr, variable->getType(),
+ destroyer, useArrayEHCleanup);
+
+ // Remember where that cleanup was.
+ capture.setCleanup(CGF.EHStack.stable_begin());
+ }
+}
+
+/// Enter a full-expression with a non-trivial number of objects to
+/// clean up. This is in this file because, at the moment, the only
+/// kind of cleanup object is a BlockDecl*.
+void CodeGenFunction::enterNonTrivialFullExpression(const ExprWithCleanups *E) {
+ assert(E->getNumObjects() != 0);
+ ArrayRef<ExprWithCleanups::CleanupObject> cleanups = E->getObjects();
+ for (ArrayRef<ExprWithCleanups::CleanupObject>::iterator
+ i = cleanups.begin(), e = cleanups.end(); i != e; ++i) {
+ enterBlockScope(*this, *i);
+ }
+}
+
+/// Find the layout for the given block in a linked list and remove it.
+static CGBlockInfo *findAndRemoveBlockInfo(CGBlockInfo **head,
+ const BlockDecl *block) {
+ while (true) {
+ assert(head && *head);
+ CGBlockInfo *cur = *head;
+
+ // If this is the block we're looking for, splice it out of the list.
+ if (cur->getBlockDecl() == block) {
+ *head = cur->NextBlockInfo;
+ return cur;
+ }
+
+ head = &cur->NextBlockInfo;
+ }
+}
+
+/// Destroy a chain of block layouts.
+void CodeGenFunction::destroyBlockInfos(CGBlockInfo *head) {
+ assert(head && "destroying an empty chain");
+ do {
+ CGBlockInfo *cur = head;
+ head = cur->NextBlockInfo;
+ delete cur;
+ } while (head != 0);
+}
+
+/// Emit a block literal expression in the current function.
+llvm::Value *CodeGenFunction::EmitBlockLiteral(const BlockExpr *blockExpr) {
+ // If the block has no captures, we won't have a pre-computed
+ // layout for it.
+ if (!blockExpr->getBlockDecl()->hasCaptures()) {
+ CGBlockInfo blockInfo(blockExpr->getBlockDecl(), CurFn->getName());
+ computeBlockInfo(CGM, this, blockInfo);
+ blockInfo.BlockExpression = blockExpr;
+ return EmitBlockLiteral(blockInfo);
+ }
+
+ // Find the block info for this block and take ownership of it.
+ OwningPtr<CGBlockInfo> blockInfo;
+ blockInfo.reset(findAndRemoveBlockInfo(&FirstBlockInfo,
+ blockExpr->getBlockDecl()));
+
+ blockInfo->BlockExpression = blockExpr;
+ return EmitBlockLiteral(*blockInfo);
+}
+
+llvm::Value *CodeGenFunction::EmitBlockLiteral(const CGBlockInfo &blockInfo) {
+ // Using the computed layout, generate the actual block function.
+ bool isLambdaConv = blockInfo.getBlockDecl()->isConversionFromLambda();
+ llvm::Constant *blockFn
+ = CodeGenFunction(CGM).GenerateBlockFunction(CurGD, blockInfo,
+ CurFuncDecl, LocalDeclMap,
+ isLambdaConv);
+ blockFn = llvm::ConstantExpr::getBitCast(blockFn, VoidPtrTy);
+
+ // If there is nothing to capture, we can emit this as a global block.
+ if (blockInfo.CanBeGlobal)
+ return buildGlobalBlock(CGM, blockInfo, blockFn);
+
+ // Otherwise, we have to emit this as a local block.
+
+ llvm::Constant *isa = CGM.getNSConcreteStackBlock();
+ isa = llvm::ConstantExpr::getBitCast(isa, VoidPtrTy);
+
+ // Build the block descriptor.
+ llvm::Constant *descriptor = buildBlockDescriptor(CGM, blockInfo);
+
+ llvm::AllocaInst *blockAddr = blockInfo.Address;
+ assert(blockAddr && "block has no address!");
+
+ // Compute the initial on-stack block flags.
+ BlockFlags flags = BLOCK_HAS_SIGNATURE;
+ if (blockInfo.NeedsCopyDispose) flags |= BLOCK_HAS_COPY_DISPOSE;
+ if (blockInfo.HasCXXObject) flags |= BLOCK_HAS_CXX_OBJ;
+ if (blockInfo.UsesStret) flags |= BLOCK_USE_STRET;
+
+ // Initialize the block literal.
+ Builder.CreateStore(isa, Builder.CreateStructGEP(blockAddr, 0, "block.isa"));
+ Builder.CreateStore(llvm::ConstantInt::get(IntTy, flags.getBitMask()),
+ Builder.CreateStructGEP(blockAddr, 1, "block.flags"));
+ Builder.CreateStore(llvm::ConstantInt::get(IntTy, 0),
+ Builder.CreateStructGEP(blockAddr, 2, "block.reserved"));
+ Builder.CreateStore(blockFn, Builder.CreateStructGEP(blockAddr, 3,
+ "block.invoke"));
+ Builder.CreateStore(descriptor, Builder.CreateStructGEP(blockAddr, 4,
+ "block.descriptor"));
+
+ // Finally, capture all the values into the block.
+ const BlockDecl *blockDecl = blockInfo.getBlockDecl();
+
+ // First, 'this'.
+ if (blockDecl->capturesCXXThis()) {
+ llvm::Value *addr = Builder.CreateStructGEP(blockAddr,
+ blockInfo.CXXThisIndex,
+ "block.captured-this.addr");
+ Builder.CreateStore(LoadCXXThis(), addr);
+ }
+
+ // Next, captured variables.
+ for (BlockDecl::capture_const_iterator ci = blockDecl->capture_begin(),
+ ce = blockDecl->capture_end(); ci != ce; ++ci) {
+ const VarDecl *variable = ci->getVariable();
+ const CGBlockInfo::Capture &capture = blockInfo.getCapture(variable);
+
+ // Ignore constant captures.
+ if (capture.isConstant()) continue;
+
+ QualType type = variable->getType();
+
+ // This will be a [[type]]*, except that a byref entry will just be
+ // an i8**.
+ llvm::Value *blockField =
+ Builder.CreateStructGEP(blockAddr, capture.getIndex(),
+ "block.captured");
+
+ // Compute the address of the thing we're going to move into the
+ // block literal.
+ llvm::Value *src;
+ if (ci->isNested()) {
+ // We need to use the capture from the enclosing block.
+ const CGBlockInfo::Capture &enclosingCapture =
+ BlockInfo->getCapture(variable);
+
+ // This is a [[type]]*, except that a byref entry wil just be an i8**.
+ src = Builder.CreateStructGEP(LoadBlockStruct(),
+ enclosingCapture.getIndex(),
+ "block.capture.addr");
+ } else if (blockDecl->isConversionFromLambda()) {
+ // The lambda capture in a lambda's conversion-to-block-pointer is
+ // special; we'll simply emit it directly.
+ src = 0;
+ } else {
+ // This is a [[type]]*.
+ src = LocalDeclMap[variable];
+ }
+
+ // For byrefs, we just write the pointer to the byref struct into
+ // the block field. There's no need to chase the forwarding
+ // pointer at this point, since we're building something that will
+ // live a shorter life than the stack byref anyway.
+ if (ci->isByRef()) {
+ // Get a void* that points to the byref struct.
+ if (ci->isNested())
+ src = Builder.CreateLoad(src, "byref.capture");
+ else
+ src = Builder.CreateBitCast(src, VoidPtrTy);
+
+ // Write that void* into the capture field.
+ Builder.CreateStore(src, blockField);
+
+ // If we have a copy constructor, evaluate that into the block field.
+ } else if (const Expr *copyExpr = ci->getCopyExpr()) {
+ if (blockDecl->isConversionFromLambda()) {
+ // If we have a lambda conversion, emit the expression
+ // directly into the block instead.
+ CharUnits Align = getContext().getTypeAlignInChars(type);
+ AggValueSlot Slot =
+ AggValueSlot::forAddr(blockField, Align, Qualifiers(),
+ AggValueSlot::IsDestructed,
+ AggValueSlot::DoesNotNeedGCBarriers,
+ AggValueSlot::IsNotAliased);
+ EmitAggExpr(copyExpr, Slot);
+ } else {
+ EmitSynthesizedCXXCopyCtor(blockField, src, copyExpr);
+ }
+
+ // If it's a reference variable, copy the reference into the block field.
+ } else if (type->isReferenceType()) {
+ Builder.CreateStore(Builder.CreateLoad(src, "ref.val"), blockField);
+
+ // Otherwise, fake up a POD copy into the block field.
+ } else {
+ // Fake up a new variable so that EmitScalarInit doesn't think
+ // we're referring to the variable in its own initializer.
+ ImplicitParamDecl blockFieldPseudoVar(/*DC*/ 0, SourceLocation(),
+ /*name*/ 0, type);
+
+ // We use one of these or the other depending on whether the
+ // reference is nested.
+ DeclRefExpr declRef(const_cast<VarDecl*>(variable),
+ /*refersToEnclosing*/ ci->isNested(), type,
+ VK_LValue, SourceLocation());
+
+ ImplicitCastExpr l2r(ImplicitCastExpr::OnStack, type, CK_LValueToRValue,
+ &declRef, VK_RValue);
+ EmitExprAsInit(&l2r, &blockFieldPseudoVar,
+ MakeAddrLValue(blockField, type,
+ getContext().getDeclAlign(variable)),
+ /*captured by init*/ false);
+ }
+
+ // Activate the cleanup if layout pushed one.
+ if (!ci->isByRef()) {
+ EHScopeStack::stable_iterator cleanup = capture.getCleanup();
+ if (cleanup.isValid())
+ ActivateCleanupBlock(cleanup, blockInfo.DominatingIP);
+ }
+ }
+
+ // Cast to the converted block-pointer type, which happens (somewhat
+ // unfortunately) to be a pointer to function type.
+ llvm::Value *result =
+ Builder.CreateBitCast(blockAddr,
+ ConvertType(blockInfo.getBlockExpr()->getType()));
+
+ return result;
+}
+
+
+llvm::Type *CodeGenModule::getBlockDescriptorType() {
+ if (BlockDescriptorType)
+ return BlockDescriptorType;
+
+ llvm::Type *UnsignedLongTy =
+ getTypes().ConvertType(getContext().UnsignedLongTy);
+
+ // struct __block_descriptor {
+ // unsigned long reserved;
+ // unsigned long block_size;
+ //
+ // // later, the following will be added
+ //
+ // struct {
+ // void (*copyHelper)();
+ // void (*copyHelper)();
+ // } helpers; // !!! optional
+ //
+ // const char *signature; // the block signature
+ // const char *layout; // reserved
+ // };
+ BlockDescriptorType =
+ llvm::StructType::create("struct.__block_descriptor",
+ UnsignedLongTy, UnsignedLongTy, NULL);
+
+ // Now form a pointer to that.
+ BlockDescriptorType = llvm::PointerType::getUnqual(BlockDescriptorType);
+ return BlockDescriptorType;
+}
+
+llvm::Type *CodeGenModule::getGenericBlockLiteralType() {
+ if (GenericBlockLiteralType)
+ return GenericBlockLiteralType;
+
+ llvm::Type *BlockDescPtrTy = getBlockDescriptorType();
+
+ // struct __block_literal_generic {
+ // void *__isa;
+ // int __flags;
+ // int __reserved;
+ // void (*__invoke)(void *);
+ // struct __block_descriptor *__descriptor;
+ // };
+ GenericBlockLiteralType =
+ llvm::StructType::create("struct.__block_literal_generic",
+ VoidPtrTy, IntTy, IntTy, VoidPtrTy,
+ BlockDescPtrTy, NULL);
+
+ return GenericBlockLiteralType;
+}
+
+
+RValue CodeGenFunction::EmitBlockCallExpr(const CallExpr* E,
+ ReturnValueSlot ReturnValue) {
+ const BlockPointerType *BPT =
+ E->getCallee()->getType()->getAs<BlockPointerType>();
+
+ llvm::Value *Callee = EmitScalarExpr(E->getCallee());
+
+ // Get a pointer to the generic block literal.
+ llvm::Type *BlockLiteralTy =
+ llvm::PointerType::getUnqual(CGM.getGenericBlockLiteralType());
+
+ // Bitcast the callee to a block literal.
+ llvm::Value *BlockLiteral =
+ Builder.CreateBitCast(Callee, BlockLiteralTy, "block.literal");
+
+ // Get the function pointer from the literal.
+ llvm::Value *FuncPtr = Builder.CreateStructGEP(BlockLiteral, 3);
+
+ BlockLiteral = Builder.CreateBitCast(BlockLiteral, VoidPtrTy);
+
+ // Add the block literal.
+ CallArgList Args;
+ Args.add(RValue::get(BlockLiteral), getContext().VoidPtrTy);
+
+ QualType FnType = BPT->getPointeeType();
+
+ // And the rest of the arguments.
+ EmitCallArgs(Args, FnType->getAs<FunctionProtoType>(),
+ E->arg_begin(), E->arg_end());
+
+ // Load the function.
+ llvm::Value *Func = Builder.CreateLoad(FuncPtr);
+
+ const FunctionType *FuncTy = FnType->castAs<FunctionType>();
+ const CGFunctionInfo &FnInfo =
+ CGM.getTypes().arrangeFunctionCall(Args, FuncTy);
+
+ // Cast the function pointer to the right type.
+ llvm::Type *BlockFTy = CGM.getTypes().GetFunctionType(FnInfo);
+
+ llvm::Type *BlockFTyPtr = llvm::PointerType::getUnqual(BlockFTy);
+ Func = Builder.CreateBitCast(Func, BlockFTyPtr);
+
+ // And call the block.
+ return EmitCall(FnInfo, Func, ReturnValue, Args);
+}
+
+llvm::Value *CodeGenFunction::GetAddrOfBlockDecl(const VarDecl *variable,
+ bool isByRef) {
+ assert(BlockInfo && "evaluating block ref without block information?");
+ const CGBlockInfo::Capture &capture = BlockInfo->getCapture(variable);
+
+ // Handle constant captures.
+ if (capture.isConstant()) return LocalDeclMap[variable];
+
+ llvm::Value *addr =
+ Builder.CreateStructGEP(LoadBlockStruct(), capture.getIndex(),
+ "block.capture.addr");
+
+ if (isByRef) {
+ // addr should be a void** right now. Load, then cast the result
+ // to byref*.
+
+ addr = Builder.CreateLoad(addr);
+ llvm::PointerType *byrefPointerType
+ = llvm::PointerType::get(BuildByRefType(variable), 0);
+ addr = Builder.CreateBitCast(addr, byrefPointerType,
+ "byref.addr");
+
+ // Follow the forwarding pointer.
+ addr = Builder.CreateStructGEP(addr, 1, "byref.forwarding");
+ addr = Builder.CreateLoad(addr, "byref.addr.forwarded");
+
+ // Cast back to byref* and GEP over to the actual object.
+ addr = Builder.CreateBitCast(addr, byrefPointerType);
+ addr = Builder.CreateStructGEP(addr, getByRefValueLLVMField(variable),
+ variable->getNameAsString());
+ }
+
+ if (variable->getType()->isReferenceType())
+ addr = Builder.CreateLoad(addr, "ref.tmp");
+
+ return addr;
+}
+
+llvm::Constant *
+CodeGenModule::GetAddrOfGlobalBlock(const BlockExpr *blockExpr,
+ const char *name) {
+ CGBlockInfo blockInfo(blockExpr->getBlockDecl(), name);
+ blockInfo.BlockExpression = blockExpr;
+
+ // Compute information about the layout, etc., of this block.
+ computeBlockInfo(*this, 0, blockInfo);
+
+ // Using that metadata, generate the actual block function.
+ llvm::Constant *blockFn;
+ {
+ llvm::DenseMap<const Decl*, llvm::Value*> LocalDeclMap;
+ blockFn = CodeGenFunction(*this).GenerateBlockFunction(GlobalDecl(),
+ blockInfo,
+ 0, LocalDeclMap,
+ false);
+ }
+ blockFn = llvm::ConstantExpr::getBitCast(blockFn, VoidPtrTy);
+
+ return buildGlobalBlock(*this, blockInfo, blockFn);
+}
+
+static llvm::Constant *buildGlobalBlock(CodeGenModule &CGM,
+ const CGBlockInfo &blockInfo,
+ llvm::Constant *blockFn) {
+ assert(blockInfo.CanBeGlobal);
+
+ // Generate the constants for the block literal initializer.
+ llvm::Constant *fields[BlockHeaderSize];
+
+ // isa
+ fields[0] = CGM.getNSConcreteGlobalBlock();
+
+ // __flags
+ BlockFlags flags = BLOCK_IS_GLOBAL | BLOCK_HAS_SIGNATURE;
+ if (blockInfo.UsesStret) flags |= BLOCK_USE_STRET;
+
+ fields[1] = llvm::ConstantInt::get(CGM.IntTy, flags.getBitMask());
+
+ // Reserved
+ fields[2] = llvm::Constant::getNullValue(CGM.IntTy);
+
+ // Function
+ fields[3] = blockFn;
+
+ // Descriptor
+ fields[4] = buildBlockDescriptor(CGM, blockInfo);
+
+ llvm::Constant *init = llvm::ConstantStruct::getAnon(fields);
+
+ llvm::GlobalVariable *literal =
+ new llvm::GlobalVariable(CGM.getModule(),
+ init->getType(),
+ /*constant*/ true,
+ llvm::GlobalVariable::InternalLinkage,
+ init,
+ "__block_literal_global");
+ literal->setAlignment(blockInfo.BlockAlign.getQuantity());
+
+ // Return a constant of the appropriately-casted type.
+ llvm::Type *requiredType =
+ CGM.getTypes().ConvertType(blockInfo.getBlockExpr()->getType());
+ return llvm::ConstantExpr::getBitCast(literal, requiredType);
+}
+
+llvm::Function *
+CodeGenFunction::GenerateBlockFunction(GlobalDecl GD,
+ const CGBlockInfo &blockInfo,
+ const Decl *outerFnDecl,
+ const DeclMapTy &ldm,
+ bool IsLambdaConversionToBlock) {
+ const BlockDecl *blockDecl = blockInfo.getBlockDecl();
+
+ // Check if we should generate debug info for this block function.
+ if (CGM.getModuleDebugInfo())
+ DebugInfo = CGM.getModuleDebugInfo();
+
+ BlockInfo = &blockInfo;
+
+ // Arrange for local static and local extern declarations to appear
+ // to be local to this function as well, in case they're directly
+ // referenced in a block.
+ for (DeclMapTy::const_iterator i = ldm.begin(), e = ldm.end(); i != e; ++i) {
+ const VarDecl *var = dyn_cast<VarDecl>(i->first);
+ if (var && !var->hasLocalStorage())
+ LocalDeclMap[var] = i->second;
+ }
+
+ // Begin building the function declaration.
+
+ // Build the argument list.
+ FunctionArgList args;
+
+ // The first argument is the block pointer. Just take it as a void*
+ // and cast it later.
+ QualType selfTy = getContext().VoidPtrTy;
+ IdentifierInfo *II = &CGM.getContext().Idents.get(".block_descriptor");
+
+ ImplicitParamDecl selfDecl(const_cast<BlockDecl*>(blockDecl),
+ SourceLocation(), II, selfTy);
+ args.push_back(&selfDecl);
+
+ // Now add the rest of the parameters.
+ for (BlockDecl::param_const_iterator i = blockDecl->param_begin(),
+ e = blockDecl->param_end(); i != e; ++i)
+ args.push_back(*i);
+
+ // Create the function declaration.
+ const FunctionProtoType *fnType = blockInfo.getBlockExpr()->getFunctionType();
+ const CGFunctionInfo &fnInfo =
+ CGM.getTypes().arrangeFunctionDeclaration(fnType->getResultType(), args,
+ fnType->getExtInfo(),
+ fnType->isVariadic());
+ if (CGM.ReturnTypeUsesSRet(fnInfo))
+ blockInfo.UsesStret = true;
+
+ llvm::FunctionType *fnLLVMType = CGM.getTypes().GetFunctionType(fnInfo);
+
+ MangleBuffer name;
+ CGM.getBlockMangledName(GD, name, blockDecl);
+ llvm::Function *fn =
+ llvm::Function::Create(fnLLVMType, llvm::GlobalValue::InternalLinkage,
+ name.getString(), &CGM.getModule());
+ CGM.SetInternalFunctionAttributes(blockDecl, fn, fnInfo);
+
+ // Begin generating the function.
+ StartFunction(blockDecl, fnType->getResultType(), fn, fnInfo, args,
+ blockInfo.getBlockExpr()->getBody()->getLocStart());
+ CurFuncDecl = outerFnDecl; // StartFunction sets this to blockDecl
+
+ // Okay. Undo some of what StartFunction did.
+
+ // Pull the 'self' reference out of the local decl map.
+ llvm::Value *blockAddr = LocalDeclMap[&selfDecl];
+ LocalDeclMap.erase(&selfDecl);
+ BlockPointer = Builder.CreateBitCast(blockAddr,
+ blockInfo.StructureType->getPointerTo(),
+ "block");
+
+ // If we have a C++ 'this' reference, go ahead and force it into
+ // existence now.
+ if (blockDecl->capturesCXXThis()) {
+ llvm::Value *addr = Builder.CreateStructGEP(BlockPointer,
+ blockInfo.CXXThisIndex,
+ "block.captured-this");
+ CXXThisValue = Builder.CreateLoad(addr, "this");
+ }
+
+ // LoadObjCSelf() expects there to be an entry for 'self' in LocalDeclMap;
+ // appease it.
+ if (const ObjCMethodDecl *method
+ = dyn_cast_or_null<ObjCMethodDecl>(CurFuncDecl)) {
+ const VarDecl *self = method->getSelfDecl();
+
+ // There might not be a capture for 'self', but if there is...
+ if (blockInfo.Captures.count(self)) {
+ const CGBlockInfo::Capture &capture = blockInfo.getCapture(self);
+ llvm::Value *selfAddr = Builder.CreateStructGEP(BlockPointer,
+ capture.getIndex(),
+ "block.captured-self");
+ LocalDeclMap[self] = selfAddr;
+ }
+ }
+
+ // Also force all the constant captures.
+ for (BlockDecl::capture_const_iterator ci = blockDecl->capture_begin(),
+ ce = blockDecl->capture_end(); ci != ce; ++ci) {
+ const VarDecl *variable = ci->getVariable();
+ const CGBlockInfo::Capture &capture = blockInfo.getCapture(variable);
+ if (!capture.isConstant()) continue;
+
+ unsigned align = getContext().getDeclAlign(variable).getQuantity();
+
+ llvm::AllocaInst *alloca =
+ CreateMemTemp(variable->getType(), "block.captured-const");
+ alloca->setAlignment(align);
+
+ Builder.CreateStore(capture.getConstant(), alloca, align);
+
+ LocalDeclMap[variable] = alloca;
+ }
+
+ // Save a spot to insert the debug information for all the DeclRefExprs.
+ llvm::BasicBlock *entry = Builder.GetInsertBlock();
+ llvm::BasicBlock::iterator entry_ptr = Builder.GetInsertPoint();
+ --entry_ptr;
+
+ if (IsLambdaConversionToBlock)
+ EmitLambdaBlockInvokeBody();
+ else
+ EmitStmt(blockDecl->getBody());
+
+ // Remember where we were...
+ llvm::BasicBlock *resume = Builder.GetInsertBlock();
+
+ // Go back to the entry.
+ ++entry_ptr;
+ Builder.SetInsertPoint(entry, entry_ptr);
+
+ // Emit debug information for all the DeclRefExprs.
+ // FIXME: also for 'this'
+ if (CGDebugInfo *DI = getDebugInfo()) {
+ for (BlockDecl::capture_const_iterator ci = blockDecl->capture_begin(),
+ ce = blockDecl->capture_end(); ci != ce; ++ci) {
+ const VarDecl *variable = ci->getVariable();
+ DI->EmitLocation(Builder, variable->getLocation());
+
+ const CGBlockInfo::Capture &capture = blockInfo.getCapture(variable);
+ if (capture.isConstant()) {
+ DI->EmitDeclareOfAutoVariable(variable, LocalDeclMap[variable],
+ Builder);
+ continue;
+ }
+
+ DI->EmitDeclareOfBlockDeclRefVariable(variable, BlockPointer,
+ Builder, blockInfo);
+ }
+ }
+
+ // And resume where we left off.
+ if (resume == 0)
+ Builder.ClearInsertionPoint();
+ else
+ Builder.SetInsertPoint(resume);
+
+ FinishFunction(cast<CompoundStmt>(blockDecl->getBody())->getRBracLoc());
+
+ return fn;
+}
+
+/*
+ notes.push_back(HelperInfo());
+ HelperInfo &note = notes.back();
+ note.index = capture.getIndex();
+ note.RequiresCopying = (ci->hasCopyExpr() || BlockRequiresCopying(type));
+ note.cxxbar_import = ci->getCopyExpr();
+
+ if (ci->isByRef()) {
+ note.flag = BLOCK_FIELD_IS_BYREF;
+ if (type.isObjCGCWeak())
+ note.flag |= BLOCK_FIELD_IS_WEAK;
+ } else if (type->isBlockPointerType()) {
+ note.flag = BLOCK_FIELD_IS_BLOCK;
+ } else {
+ note.flag = BLOCK_FIELD_IS_OBJECT;
+ }
+ */
+
+
+
+llvm::Constant *
+CodeGenFunction::GenerateCopyHelperFunction(const CGBlockInfo &blockInfo) {
+ ASTContext &C = getContext();
+
+ FunctionArgList args;
+ ImplicitParamDecl dstDecl(0, SourceLocation(), 0, C.VoidPtrTy);
+ args.push_back(&dstDecl);
+ ImplicitParamDecl srcDecl(0, SourceLocation(), 0, C.VoidPtrTy);
+ args.push_back(&srcDecl);
+
+ const CGFunctionInfo &FI =
+ CGM.getTypes().arrangeFunctionDeclaration(C.VoidTy, args,
+ FunctionType::ExtInfo(),
+ /*variadic*/ false);
+
+ // FIXME: it would be nice if these were mergeable with things with
+ // identical semantics.
+ llvm::FunctionType *LTy = CGM.getTypes().GetFunctionType(FI);
+
+ llvm::Function *Fn =
+ llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage,
+ "__copy_helper_block_", &CGM.getModule());
+
+ IdentifierInfo *II
+ = &CGM.getContext().Idents.get("__copy_helper_block_");
+
+ // Check if we should generate debug info for this block helper function.
+ if (CGM.getModuleDebugInfo())
+ DebugInfo = CGM.getModuleDebugInfo();
+
+ FunctionDecl *FD = FunctionDecl::Create(C,
+ C.getTranslationUnitDecl(),
+ SourceLocation(),
+ SourceLocation(), II, C.VoidTy, 0,
+ SC_Static,
+ SC_None,
+ false,
+ false);
+ StartFunction(FD, C.VoidTy, Fn, FI, args, SourceLocation());
+
+ llvm::Type *structPtrTy = blockInfo.StructureType->getPointerTo();
+
+ llvm::Value *src = GetAddrOfLocalVar(&srcDecl);
+ src = Builder.CreateLoad(src);
+ src = Builder.CreateBitCast(src, structPtrTy, "block.source");
+
+ llvm::Value *dst = GetAddrOfLocalVar(&dstDecl);
+ dst = Builder.CreateLoad(dst);
+ dst = Builder.CreateBitCast(dst, structPtrTy, "block.dest");
+
+ const BlockDecl *blockDecl = blockInfo.getBlockDecl();
+
+ for (BlockDecl::capture_const_iterator ci = blockDecl->capture_begin(),
+ ce = blockDecl->capture_end(); ci != ce; ++ci) {
+ const VarDecl *variable = ci->getVariable();
+ QualType type = variable->getType();
+
+ const CGBlockInfo::Capture &capture = blockInfo.getCapture(variable);
+ if (capture.isConstant()) continue;
+
+ const Expr *copyExpr = ci->getCopyExpr();
+ BlockFieldFlags flags;
+
+ bool isARCWeakCapture = false;
+
+ if (copyExpr) {
+ assert(!ci->isByRef());
+ // don't bother computing flags
+
+ } else if (ci->isByRef()) {
+ flags = BLOCK_FIELD_IS_BYREF;
+ if (type.isObjCGCWeak())
+ flags |= BLOCK_FIELD_IS_WEAK;
+
+ } else if (type->isObjCRetainableType()) {
+ flags = BLOCK_FIELD_IS_OBJECT;
+ if (type->isBlockPointerType())
+ flags = BLOCK_FIELD_IS_BLOCK;
+
+ // Special rules for ARC captures:
+ if (getLangOpts().ObjCAutoRefCount) {
+ Qualifiers qs = type.getQualifiers();
+
+ // Don't generate special copy logic for a captured object
+ // unless it's __strong or __weak.
+ if (!qs.hasStrongOrWeakObjCLifetime())
+ continue;
+
+ // Support __weak direct captures.
+ if (qs.getObjCLifetime() == Qualifiers::OCL_Weak)
+ isARCWeakCapture = true;
+ }
+ } else {
+ continue;
+ }
+
+ unsigned index = capture.getIndex();
+ llvm::Value *srcField = Builder.CreateStructGEP(src, index);
+ llvm::Value *dstField = Builder.CreateStructGEP(dst, index);
+
+ // If there's an explicit copy expression, we do that.
+ if (copyExpr) {
+ EmitSynthesizedCXXCopyCtor(dstField, srcField, copyExpr);
+ } else if (isARCWeakCapture) {
+ EmitARCCopyWeak(dstField, srcField);
+ } else {
+ llvm::Value *srcValue = Builder.CreateLoad(srcField, "blockcopy.src");
+ srcValue = Builder.CreateBitCast(srcValue, VoidPtrTy);
+ llvm::Value *dstAddr = Builder.CreateBitCast(dstField, VoidPtrTy);
+ Builder.CreateCall3(CGM.getBlockObjectAssign(), dstAddr, srcValue,
+ llvm::ConstantInt::get(Int32Ty, flags.getBitMask()));
+ }
+ }
+
+ FinishFunction();
+
+ return llvm::ConstantExpr::getBitCast(Fn, VoidPtrTy);
+}
+
+llvm::Constant *
+CodeGenFunction::GenerateDestroyHelperFunction(const CGBlockInfo &blockInfo) {
+ ASTContext &C = getContext();
+
+ FunctionArgList args;
+ ImplicitParamDecl srcDecl(0, SourceLocation(), 0, C.VoidPtrTy);
+ args.push_back(&srcDecl);
+
+ const CGFunctionInfo &FI =
+ CGM.getTypes().arrangeFunctionDeclaration(C.VoidTy, args,
+ FunctionType::ExtInfo(),
+ /*variadic*/ false);
+
+ // FIXME: We'd like to put these into a mergable by content, with
+ // internal linkage.
+ llvm::FunctionType *LTy = CGM.getTypes().GetFunctionType(FI);
+
+ llvm::Function *Fn =
+ llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage,
+ "__destroy_helper_block_", &CGM.getModule());
+
+ // Check if we should generate debug info for this block destroy function.
+ if (CGM.getModuleDebugInfo())
+ DebugInfo = CGM.getModuleDebugInfo();
+
+ IdentifierInfo *II
+ = &CGM.getContext().Idents.get("__destroy_helper_block_");
+
+ FunctionDecl *FD = FunctionDecl::Create(C, C.getTranslationUnitDecl(),
+ SourceLocation(),
+ SourceLocation(), II, C.VoidTy, 0,
+ SC_Static,
+ SC_None,
+ false, false);
+ StartFunction(FD, C.VoidTy, Fn, FI, args, SourceLocation());
+
+ llvm::Type *structPtrTy = blockInfo.StructureType->getPointerTo();
+
+ llvm::Value *src = GetAddrOfLocalVar(&srcDecl);
+ src = Builder.CreateLoad(src);
+ src = Builder.CreateBitCast(src, structPtrTy, "block");
+
+ const BlockDecl *blockDecl = blockInfo.getBlockDecl();
+
+ CodeGenFunction::RunCleanupsScope cleanups(*this);
+
+ for (BlockDecl::capture_const_iterator ci = blockDecl->capture_begin(),
+ ce = blockDecl->capture_end(); ci != ce; ++ci) {
+ const VarDecl *variable = ci->getVariable();
+ QualType type = variable->getType();
+
+ const CGBlockInfo::Capture &capture = blockInfo.getCapture(variable);
+ if (capture.isConstant()) continue;
+
+ BlockFieldFlags flags;
+ const CXXDestructorDecl *dtor = 0;
+
+ bool isARCWeakCapture = false;
+
+ if (ci->isByRef()) {
+ flags = BLOCK_FIELD_IS_BYREF;
+ if (type.isObjCGCWeak())
+ flags |= BLOCK_FIELD_IS_WEAK;
+ } else if (const CXXRecordDecl *record = type->getAsCXXRecordDecl()) {
+ if (record->hasTrivialDestructor())
+ continue;
+ dtor = record->getDestructor();
+ } else if (type->isObjCRetainableType()) {
+ flags = BLOCK_FIELD_IS_OBJECT;
+ if (type->isBlockPointerType())
+ flags = BLOCK_FIELD_IS_BLOCK;
+
+ // Special rules for ARC captures.
+ if (getLangOpts().ObjCAutoRefCount) {
+ Qualifiers qs = type.getQualifiers();
+
+ // Don't generate special dispose logic for a captured object
+ // unless it's __strong or __weak.
+ if (!qs.hasStrongOrWeakObjCLifetime())
+ continue;
+
+ // Support __weak direct captures.
+ if (qs.getObjCLifetime() == Qualifiers::OCL_Weak)
+ isARCWeakCapture = true;
+ }
+ } else {
+ continue;
+ }
+
+ unsigned index = capture.getIndex();
+ llvm::Value *srcField = Builder.CreateStructGEP(src, index);
+
+ // If there's an explicit copy expression, we do that.
+ if (dtor) {
+ PushDestructorCleanup(dtor, srcField);
+
+ // If this is a __weak capture, emit the release directly.
+ } else if (isARCWeakCapture) {
+ EmitARCDestroyWeak(srcField);
+
+ // Otherwise we call _Block_object_dispose. It wouldn't be too
+ // hard to just emit this as a cleanup if we wanted to make sure
+ // that things were done in reverse.
+ } else {
+ llvm::Value *value = Builder.CreateLoad(srcField);
+ value = Builder.CreateBitCast(value, VoidPtrTy);
+ BuildBlockRelease(value, flags);
+ }
+ }
+
+ cleanups.ForceCleanup();
+
+ FinishFunction();
+
+ return llvm::ConstantExpr::getBitCast(Fn, VoidPtrTy);
+}
+
+namespace {
+
+/// Emits the copy/dispose helper functions for a __block object of id type.
+class ObjectByrefHelpers : public CodeGenModule::ByrefHelpers {
+ BlockFieldFlags Flags;
+
+public:
+ ObjectByrefHelpers(CharUnits alignment, BlockFieldFlags flags)
+ : ByrefHelpers(alignment), Flags(flags) {}
+
+ void emitCopy(CodeGenFunction &CGF, llvm::Value *destField,
+ llvm::Value *srcField) {
+ destField = CGF.Builder.CreateBitCast(destField, CGF.VoidPtrTy);
+
+ srcField = CGF.Builder.CreateBitCast(srcField, CGF.VoidPtrPtrTy);
+ llvm::Value *srcValue = CGF.Builder.CreateLoad(srcField);
+
+ unsigned flags = (Flags | BLOCK_BYREF_CALLER).getBitMask();
+
+ llvm::Value *flagsVal = llvm::ConstantInt::get(CGF.Int32Ty, flags);
+ llvm::Value *fn = CGF.CGM.getBlockObjectAssign();
+ CGF.Builder.CreateCall3(fn, destField, srcValue, flagsVal);
+ }
+
+ void emitDispose(CodeGenFunction &CGF, llvm::Value *field) {
+ field = CGF.Builder.CreateBitCast(field, CGF.Int8PtrTy->getPointerTo(0));
+ llvm::Value *value = CGF.Builder.CreateLoad(field);
+
+ CGF.BuildBlockRelease(value, Flags | BLOCK_BYREF_CALLER);
+ }
+
+ void profileImpl(llvm::FoldingSetNodeID &id) const {
+ id.AddInteger(Flags.getBitMask());
+ }
+};
+
+/// Emits the copy/dispose helpers for an ARC __block __weak variable.
+class ARCWeakByrefHelpers : public CodeGenModule::ByrefHelpers {
+public:
+ ARCWeakByrefHelpers(CharUnits alignment) : ByrefHelpers(alignment) {}
+
+ void emitCopy(CodeGenFunction &CGF, llvm::Value *destField,
+ llvm::Value *srcField) {
+ CGF.EmitARCMoveWeak(destField, srcField);
+ }
+
+ void emitDispose(CodeGenFunction &CGF, llvm::Value *field) {
+ CGF.EmitARCDestroyWeak(field);
+ }
+
+ void profileImpl(llvm::FoldingSetNodeID &id) const {
+ // 0 is distinguishable from all pointers and byref flags
+ id.AddInteger(0);
+ }
+};
+
+/// Emits the copy/dispose helpers for an ARC __block __strong variable
+/// that's not of block-pointer type.
+class ARCStrongByrefHelpers : public CodeGenModule::ByrefHelpers {
+public:
+ ARCStrongByrefHelpers(CharUnits alignment) : ByrefHelpers(alignment) {}
+
+ void emitCopy(CodeGenFunction &CGF, llvm::Value *destField,
+ llvm::Value *srcField) {
+ // Do a "move" by copying the value and then zeroing out the old
+ // variable.
+
+ llvm::LoadInst *value = CGF.Builder.CreateLoad(srcField);
+ value->setAlignment(Alignment.getQuantity());
+
+ llvm::Value *null =
+ llvm::ConstantPointerNull::get(cast<llvm::PointerType>(value->getType()));
+
+ llvm::StoreInst *store = CGF.Builder.CreateStore(value, destField);
+ store->setAlignment(Alignment.getQuantity());
+
+ store = CGF.Builder.CreateStore(null, srcField);
+ store->setAlignment(Alignment.getQuantity());
+ }
+
+ void emitDispose(CodeGenFunction &CGF, llvm::Value *field) {
+ llvm::LoadInst *value = CGF.Builder.CreateLoad(field);
+ value->setAlignment(Alignment.getQuantity());
+
+ CGF.EmitARCRelease(value, /*precise*/ false);
+ }
+
+ void profileImpl(llvm::FoldingSetNodeID &id) const {
+ // 1 is distinguishable from all pointers and byref flags
+ id.AddInteger(1);
+ }
+};
+
+/// Emits the copy/dispose helpers for an ARC __block __strong
+/// variable that's of block-pointer type.
+class ARCStrongBlockByrefHelpers : public CodeGenModule::ByrefHelpers {
+public:
+ ARCStrongBlockByrefHelpers(CharUnits alignment) : ByrefHelpers(alignment) {}
+
+ void emitCopy(CodeGenFunction &CGF, llvm::Value *destField,
+ llvm::Value *srcField) {
+ // Do the copy with objc_retainBlock; that's all that
+ // _Block_object_assign would do anyway, and we'd have to pass the
+ // right arguments to make sure it doesn't get no-op'ed.
+ llvm::LoadInst *oldValue = CGF.Builder.CreateLoad(srcField);
+ oldValue->setAlignment(Alignment.getQuantity());
+
+ llvm::Value *copy = CGF.EmitARCRetainBlock(oldValue, /*mandatory*/ true);
+
+ llvm::StoreInst *store = CGF.Builder.CreateStore(copy, destField);
+ store->setAlignment(Alignment.getQuantity());
+ }
+
+ void emitDispose(CodeGenFunction &CGF, llvm::Value *field) {
+ llvm::LoadInst *value = CGF.Builder.CreateLoad(field);
+ value->setAlignment(Alignment.getQuantity());
+
+ CGF.EmitARCRelease(value, /*precise*/ false);
+ }
+
+ void profileImpl(llvm::FoldingSetNodeID &id) const {
+ // 2 is distinguishable from all pointers and byref flags
+ id.AddInteger(2);
+ }
+};
+
+/// Emits the copy/dispose helpers for a __block variable with a
+/// nontrivial copy constructor or destructor.
+class CXXByrefHelpers : public CodeGenModule::ByrefHelpers {
+ QualType VarType;
+ const Expr *CopyExpr;
+
+public:
+ CXXByrefHelpers(CharUnits alignment, QualType type,
+ const Expr *copyExpr)
+ : ByrefHelpers(alignment), VarType(type), CopyExpr(copyExpr) {}
+
+ bool needsCopy() const { return CopyExpr != 0; }
+ void emitCopy(CodeGenFunction &CGF, llvm::Value *destField,
+ llvm::Value *srcField) {
+ if (!CopyExpr) return;
+ CGF.EmitSynthesizedCXXCopyCtor(destField, srcField, CopyExpr);
+ }
+
+ void emitDispose(CodeGenFunction &CGF, llvm::Value *field) {
+ EHScopeStack::stable_iterator cleanupDepth = CGF.EHStack.stable_begin();
+ CGF.PushDestructorCleanup(VarType, field);
+ CGF.PopCleanupBlocks(cleanupDepth);
+ }
+
+ void profileImpl(llvm::FoldingSetNodeID &id) const {
+ id.AddPointer(VarType.getCanonicalType().getAsOpaquePtr());
+ }
+};
+} // end anonymous namespace
+
+static llvm::Constant *
+generateByrefCopyHelper(CodeGenFunction &CGF,
+ llvm::StructType &byrefType,
+ CodeGenModule::ByrefHelpers &byrefInfo) {
+ ASTContext &Context = CGF.getContext();
+
+ QualType R = Context.VoidTy;
+
+ FunctionArgList args;
+ ImplicitParamDecl dst(0, SourceLocation(), 0, Context.VoidPtrTy);
+ args.push_back(&dst);
+
+ ImplicitParamDecl src(0, SourceLocation(), 0, Context.VoidPtrTy);
+ args.push_back(&src);
+
+ const CGFunctionInfo &FI =
+ CGF.CGM.getTypes().arrangeFunctionDeclaration(R, args,
+ FunctionType::ExtInfo(),
+ /*variadic*/ false);
+
+ CodeGenTypes &Types = CGF.CGM.getTypes();
+ llvm::FunctionType *LTy = Types.GetFunctionType(FI);
+
+ // FIXME: We'd like to put these into a mergable by content, with
+ // internal linkage.
+ llvm::Function *Fn =
+ llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage,
+ "__Block_byref_object_copy_", &CGF.CGM.getModule());
+
+ IdentifierInfo *II
+ = &Context.Idents.get("__Block_byref_object_copy_");
+
+ FunctionDecl *FD = FunctionDecl::Create(Context,
+ Context.getTranslationUnitDecl(),
+ SourceLocation(),
+ SourceLocation(), II, R, 0,
+ SC_Static,
+ SC_None,
+ false, false);
+
+ CGF.StartFunction(FD, R, Fn, FI, args, SourceLocation());
+
+ if (byrefInfo.needsCopy()) {
+ llvm::Type *byrefPtrType = byrefType.getPointerTo(0);
+
+ // dst->x
+ llvm::Value *destField = CGF.GetAddrOfLocalVar(&dst);
+ destField = CGF.Builder.CreateLoad(destField);
+ destField = CGF.Builder.CreateBitCast(destField, byrefPtrType);
+ destField = CGF.Builder.CreateStructGEP(destField, 6, "x");
+
+ // src->x
+ llvm::Value *srcField = CGF.GetAddrOfLocalVar(&src);
+ srcField = CGF.Builder.CreateLoad(srcField);
+ srcField = CGF.Builder.CreateBitCast(srcField, byrefPtrType);
+ srcField = CGF.Builder.CreateStructGEP(srcField, 6, "x");
+
+ byrefInfo.emitCopy(CGF, destField, srcField);
+ }
+
+ CGF.FinishFunction();
+
+ return llvm::ConstantExpr::getBitCast(Fn, CGF.Int8PtrTy);
+}
+
+/// Build the copy helper for a __block variable.
+static llvm::Constant *buildByrefCopyHelper(CodeGenModule &CGM,
+ llvm::StructType &byrefType,
+ CodeGenModule::ByrefHelpers &info) {
+ CodeGenFunction CGF(CGM);
+ return generateByrefCopyHelper(CGF, byrefType, info);
+}
+
+/// Generate code for a __block variable's dispose helper.
+static llvm::Constant *
+generateByrefDisposeHelper(CodeGenFunction &CGF,
+ llvm::StructType &byrefType,
+ CodeGenModule::ByrefHelpers &byrefInfo) {
+ ASTContext &Context = CGF.getContext();
+ QualType R = Context.VoidTy;
+
+ FunctionArgList args;
+ ImplicitParamDecl src(0, SourceLocation(), 0, Context.VoidPtrTy);
+ args.push_back(&src);
+
+ const CGFunctionInfo &FI =
+ CGF.CGM.getTypes().arrangeFunctionDeclaration(R, args,
+ FunctionType::ExtInfo(),
+ /*variadic*/ false);
+
+ CodeGenTypes &Types = CGF.CGM.getTypes();
+ llvm::FunctionType *LTy = Types.GetFunctionType(FI);
+
+ // FIXME: We'd like to put these into a mergable by content, with
+ // internal linkage.
+ llvm::Function *Fn =
+ llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage,
+ "__Block_byref_object_dispose_",
+ &CGF.CGM.getModule());
+
+ IdentifierInfo *II
+ = &Context.Idents.get("__Block_byref_object_dispose_");
+
+ FunctionDecl *FD = FunctionDecl::Create(Context,
+ Context.getTranslationUnitDecl(),
+ SourceLocation(),
+ SourceLocation(), II, R, 0,
+ SC_Static,
+ SC_None,
+ false, false);
+ CGF.StartFunction(FD, R, Fn, FI, args, SourceLocation());
+
+ if (byrefInfo.needsDispose()) {
+ llvm::Value *V = CGF.GetAddrOfLocalVar(&src);
+ V = CGF.Builder.CreateLoad(V);
+ V = CGF.Builder.CreateBitCast(V, byrefType.getPointerTo(0));
+ V = CGF.Builder.CreateStructGEP(V, 6, "x");
+
+ byrefInfo.emitDispose(CGF, V);
+ }
+
+ CGF.FinishFunction();
+
+ return llvm::ConstantExpr::getBitCast(Fn, CGF.Int8PtrTy);
+}
+
+/// Build the dispose helper for a __block variable.
+static llvm::Constant *buildByrefDisposeHelper(CodeGenModule &CGM,
+ llvm::StructType &byrefType,
+ CodeGenModule::ByrefHelpers &info) {
+ CodeGenFunction CGF(CGM);
+ return generateByrefDisposeHelper(CGF, byrefType, info);
+}
+
+///
+template <class T> static T *buildByrefHelpers(CodeGenModule &CGM,
+ llvm::StructType &byrefTy,
+ T &byrefInfo) {
+ // Increase the field's alignment to be at least pointer alignment,
+ // since the layout of the byref struct will guarantee at least that.
+ byrefInfo.Alignment = std::max(byrefInfo.Alignment,
+ CharUnits::fromQuantity(CGM.PointerAlignInBytes));
+
+ llvm::FoldingSetNodeID id;
+ byrefInfo.Profile(id);
+
+ void *insertPos;
+ CodeGenModule::ByrefHelpers *node
+ = CGM.ByrefHelpersCache.FindNodeOrInsertPos(id, insertPos);
+ if (node) return static_cast<T*>(node);
+
+ byrefInfo.CopyHelper = buildByrefCopyHelper(CGM, byrefTy, byrefInfo);
+ byrefInfo.DisposeHelper = buildByrefDisposeHelper(CGM, byrefTy, byrefInfo);
+
+ T *copy = new (CGM.getContext()) T(byrefInfo);
+ CGM.ByrefHelpersCache.InsertNode(copy, insertPos);
+ return copy;
+}
+
+CodeGenModule::ByrefHelpers *
+CodeGenFunction::buildByrefHelpers(llvm::StructType &byrefType,
+ const AutoVarEmission &emission) {
+ const VarDecl &var = *emission.Variable;
+ QualType type = var.getType();
+
+ if (const CXXRecordDecl *record = type->getAsCXXRecordDecl()) {
+ const Expr *copyExpr = CGM.getContext().getBlockVarCopyInits(&var);
+ if (!copyExpr && record->hasTrivialDestructor()) return 0;
+
+ CXXByrefHelpers byrefInfo(emission.Alignment, type, copyExpr);
+ return ::buildByrefHelpers(CGM, byrefType, byrefInfo);
+ }
+
+ // Otherwise, if we don't have a retainable type, there's nothing to do.
+ // that the runtime does extra copies.
+ if (!type->isObjCRetainableType()) return 0;
+
+ Qualifiers qs = type.getQualifiers();
+
+ // If we have lifetime, that dominates.
+ if (Qualifiers::ObjCLifetime lifetime = qs.getObjCLifetime()) {
+ assert(getLangOpts().ObjCAutoRefCount);
+
+ switch (lifetime) {
+ case Qualifiers::OCL_None: llvm_unreachable("impossible");
+
+ // These are just bits as far as the runtime is concerned.
+ case Qualifiers::OCL_ExplicitNone:
+ case Qualifiers::OCL_Autoreleasing:
+ return 0;
+
+ // Tell the runtime that this is ARC __weak, called by the
+ // byref routines.
+ case Qualifiers::OCL_Weak: {
+ ARCWeakByrefHelpers byrefInfo(emission.Alignment);
+ return ::buildByrefHelpers(CGM, byrefType, byrefInfo);
+ }
+
+ // ARC __strong __block variables need to be retained.
+ case Qualifiers::OCL_Strong:
+ // Block pointers need to be copied, and there's no direct
+ // transfer possible.
+ if (type->isBlockPointerType()) {
+ ARCStrongBlockByrefHelpers byrefInfo(emission.Alignment);
+ return ::buildByrefHelpers(CGM, byrefType, byrefInfo);
+
+ // Otherwise, we transfer ownership of the retain from the stack
+ // to the heap.
+ } else {
+ ARCStrongByrefHelpers byrefInfo(emission.Alignment);
+ return ::buildByrefHelpers(CGM, byrefType, byrefInfo);
+ }
+ }
+ llvm_unreachable("fell out of lifetime switch!");
+ }
+
+ BlockFieldFlags flags;
+ if (type->isBlockPointerType()) {
+ flags |= BLOCK_FIELD_IS_BLOCK;
+ } else if (CGM.getContext().isObjCNSObjectType(type) ||
+ type->isObjCObjectPointerType()) {
+ flags |= BLOCK_FIELD_IS_OBJECT;
+ } else {
+ return 0;
+ }
+
+ if (type.isObjCGCWeak())
+ flags |= BLOCK_FIELD_IS_WEAK;
+
+ ObjectByrefHelpers byrefInfo(emission.Alignment, flags);
+ return ::buildByrefHelpers(CGM, byrefType, byrefInfo);
+}
+
+unsigned CodeGenFunction::getByRefValueLLVMField(const ValueDecl *VD) const {
+ assert(ByRefValueInfo.count(VD) && "Did not find value!");
+
+ return ByRefValueInfo.find(VD)->second.second;
+}
+
+llvm::Value *CodeGenFunction::BuildBlockByrefAddress(llvm::Value *BaseAddr,
+ const VarDecl *V) {
+ llvm::Value *Loc = Builder.CreateStructGEP(BaseAddr, 1, "forwarding");
+ Loc = Builder.CreateLoad(Loc);
+ Loc = Builder.CreateStructGEP(Loc, getByRefValueLLVMField(V),
+ V->getNameAsString());
+ return Loc;
+}
+
+/// BuildByRefType - This routine changes a __block variable declared as T x
+/// into:
+///
+/// struct {
+/// void *__isa;
+/// void *__forwarding;
+/// int32_t __flags;
+/// int32_t __size;
+/// void *__copy_helper; // only if needed
+/// void *__destroy_helper; // only if needed
+/// char padding[X]; // only if needed
+/// T x;
+/// } x
+///
+llvm::Type *CodeGenFunction::BuildByRefType(const VarDecl *D) {
+ std::pair<llvm::Type *, unsigned> &Info = ByRefValueInfo[D];
+ if (Info.first)
+ return Info.first;
+
+ QualType Ty = D->getType();
+
+ SmallVector<llvm::Type *, 8> types;
+
+ llvm::StructType *ByRefType =
+ llvm::StructType::create(getLLVMContext(),
+ "struct.__block_byref_" + D->getNameAsString());
+
+ // void *__isa;
+ types.push_back(Int8PtrTy);
+
+ // void *__forwarding;
+ types.push_back(llvm::PointerType::getUnqual(ByRefType));
+
+ // int32_t __flags;
+ types.push_back(Int32Ty);
+
+ // int32_t __size;
+ types.push_back(Int32Ty);
+
+ bool HasCopyAndDispose =
+ (Ty->isObjCRetainableType()) || getContext().getBlockVarCopyInits(D);
+ if (HasCopyAndDispose) {
+ /// void *__copy_helper;
+ types.push_back(Int8PtrTy);
+
+ /// void *__destroy_helper;
+ types.push_back(Int8PtrTy);
+ }
+
+ bool Packed = false;
+ CharUnits Align = getContext().getDeclAlign(D);
+ if (Align > getContext().toCharUnitsFromBits(Target.getPointerAlign(0))) {
+ // We have to insert padding.
+
+ // The struct above has 2 32-bit integers.
+ unsigned CurrentOffsetInBytes = 4 * 2;
+
+ // And either 2 or 4 pointers.
+ CurrentOffsetInBytes += (HasCopyAndDispose ? 4 : 2) *
+ CGM.getTargetData().getTypeAllocSize(Int8PtrTy);
+
+ // Align the offset.
+ unsigned AlignedOffsetInBytes =
+ llvm::RoundUpToAlignment(CurrentOffsetInBytes, Align.getQuantity());
+
+ unsigned NumPaddingBytes = AlignedOffsetInBytes - CurrentOffsetInBytes;
+ if (NumPaddingBytes > 0) {
+ llvm::Type *Ty = Int8Ty;
+ // FIXME: We need a sema error for alignment larger than the minimum of
+ // the maximal stack alignment and the alignment of malloc on the system.
+ if (NumPaddingBytes > 1)
+ Ty = llvm::ArrayType::get(Ty, NumPaddingBytes);
+
+ types.push_back(Ty);
+
+ // We want a packed struct.
+ Packed = true;
+ }
+ }
+
+ // T x;
+ types.push_back(ConvertTypeForMem(Ty));
+
+ ByRefType->setBody(types, Packed);
+
+ Info.first = ByRefType;
+
+ Info.second = types.size() - 1;
+
+ return Info.first;
+}
+
+/// Initialize the structural components of a __block variable, i.e.
+/// everything but the actual object.
+void CodeGenFunction::emitByrefStructureInit(const AutoVarEmission &emission) {
+ // Find the address of the local.
+ llvm::Value *addr = emission.Address;
+
+ // That's an alloca of the byref structure type.
+ llvm::StructType *byrefType = cast<llvm::StructType>(
+ cast<llvm::PointerType>(addr->getType())->getElementType());
+
+ // Build the byref helpers if necessary. This is null if we don't need any.
+ CodeGenModule::ByrefHelpers *helpers =
+ buildByrefHelpers(*byrefType, emission);
+
+ const VarDecl &D = *emission.Variable;
+ QualType type = D.getType();
+
+ llvm::Value *V;
+
+ // Initialize the 'isa', which is just 0 or 1.
+ int isa = 0;
+ if (type.isObjCGCWeak())
+ isa = 1;
+ V = Builder.CreateIntToPtr(Builder.getInt32(isa), Int8PtrTy, "isa");
+ Builder.CreateStore(V, Builder.CreateStructGEP(addr, 0, "byref.isa"));
+
+ // Store the address of the variable into its own forwarding pointer.
+ Builder.CreateStore(addr,
+ Builder.CreateStructGEP(addr, 1, "byref.forwarding"));
+
+ // Blocks ABI:
+ // c) the flags field is set to either 0 if no helper functions are
+ // needed or BLOCK_HAS_COPY_DISPOSE if they are,
+ BlockFlags flags;
+ if (helpers) flags |= BLOCK_HAS_COPY_DISPOSE;
+ Builder.CreateStore(llvm::ConstantInt::get(IntTy, flags.getBitMask()),
+ Builder.CreateStructGEP(addr, 2, "byref.flags"));
+
+ CharUnits byrefSize = CGM.GetTargetTypeStoreSize(byrefType);
+ V = llvm::ConstantInt::get(IntTy, byrefSize.getQuantity());
+ Builder.CreateStore(V, Builder.CreateStructGEP(addr, 3, "byref.size"));
+
+ if (helpers) {
+ llvm::Value *copy_helper = Builder.CreateStructGEP(addr, 4);
+ Builder.CreateStore(helpers->CopyHelper, copy_helper);
+
+ llvm::Value *destroy_helper = Builder.CreateStructGEP(addr, 5);
+ Builder.CreateStore(helpers->DisposeHelper, destroy_helper);
+ }
+}
+
+void CodeGenFunction::BuildBlockRelease(llvm::Value *V, BlockFieldFlags flags) {
+ llvm::Value *F = CGM.getBlockObjectDispose();
+ llvm::Value *N;
+ V = Builder.CreateBitCast(V, Int8PtrTy);
+ N = llvm::ConstantInt::get(Int32Ty, flags.getBitMask());
+ Builder.CreateCall2(F, V, N);
+}
+
+namespace {
+ struct CallBlockRelease : EHScopeStack::Cleanup {
+ llvm::Value *Addr;
+ CallBlockRelease(llvm::Value *Addr) : Addr(Addr) {}
+
+ void Emit(CodeGenFunction &CGF, Flags flags) {
+ // Should we be passing FIELD_IS_WEAK here?
+ CGF.BuildBlockRelease(Addr, BLOCK_FIELD_IS_BYREF);
+ }
+ };
+}
+
+/// Enter a cleanup to destroy a __block variable. Note that this
+/// cleanup should be a no-op if the variable hasn't left the stack
+/// yet; if a cleanup is required for the variable itself, that needs
+/// to be done externally.
+void CodeGenFunction::enterByrefCleanup(const AutoVarEmission &emission) {
+ // We don't enter this cleanup if we're in pure-GC mode.
+ if (CGM.getLangOpts().getGC() == LangOptions::GCOnly)
+ return;
+
+ EHStack.pushCleanup<CallBlockRelease>(NormalAndEHCleanup, emission.Address);
+}
+
+/// Adjust the declaration of something from the blocks API.
+static void configureBlocksRuntimeObject(CodeGenModule &CGM,
+ llvm::Constant *C) {
+ if (!CGM.getLangOpts().BlocksRuntimeOptional) return;
+
+ llvm::GlobalValue *GV = cast<llvm::GlobalValue>(C->stripPointerCasts());
+ if (GV->isDeclaration() &&
+ GV->getLinkage() == llvm::GlobalValue::ExternalLinkage)
+ GV->setLinkage(llvm::GlobalValue::ExternalWeakLinkage);
+}
+
+llvm::Constant *CodeGenModule::getBlockObjectDispose() {
+ if (BlockObjectDispose)
+ return BlockObjectDispose;
+
+ llvm::Type *args[] = { Int8PtrTy, Int32Ty };
+ llvm::FunctionType *fty
+ = llvm::FunctionType::get(VoidTy, args, false);
+ BlockObjectDispose = CreateRuntimeFunction(fty, "_Block_object_dispose");
+ configureBlocksRuntimeObject(*this, BlockObjectDispose);
+ return BlockObjectDispose;
+}
+
+llvm::Constant *CodeGenModule::getBlockObjectAssign() {
+ if (BlockObjectAssign)
+ return BlockObjectAssign;
+
+ llvm::Type *args[] = { Int8PtrTy, Int8PtrTy, Int32Ty };
+ llvm::FunctionType *fty
+ = llvm::FunctionType::get(VoidTy, args, false);
+ BlockObjectAssign = CreateRuntimeFunction(fty, "_Block_object_assign");
+ configureBlocksRuntimeObject(*this, BlockObjectAssign);
+ return BlockObjectAssign;
+}
+
+llvm::Constant *CodeGenModule::getNSConcreteGlobalBlock() {
+ if (NSConcreteGlobalBlock)
+ return NSConcreteGlobalBlock;
+
+ NSConcreteGlobalBlock = GetOrCreateLLVMGlobal("_NSConcreteGlobalBlock",
+ Int8PtrTy->getPointerTo(), 0);
+ configureBlocksRuntimeObject(*this, NSConcreteGlobalBlock);
+ return NSConcreteGlobalBlock;
+}
+
+llvm::Constant *CodeGenModule::getNSConcreteStackBlock() {
+ if (NSConcreteStackBlock)
+ return NSConcreteStackBlock;
+
+ NSConcreteStackBlock = GetOrCreateLLVMGlobal("_NSConcreteStackBlock",
+ Int8PtrTy->getPointerTo(), 0);
+ configureBlocksRuntimeObject(*this, NSConcreteStackBlock);
+ return NSConcreteStackBlock;
+}
diff --git a/clang/lib/CodeGen/CGBlocks.h b/clang/lib/CodeGen/CGBlocks.h
new file mode 100644
index 0000000..095cfdb
--- /dev/null
+++ b/clang/lib/CodeGen/CGBlocks.h
@@ -0,0 +1,229 @@
+//===-- CGBlocks.h - state for LLVM CodeGen for blocks ----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This is the internal state used for llvm translation for block literals.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_CODEGEN_CGBLOCKS_H
+#define CLANG_CODEGEN_CGBLOCKS_H
+
+#include "CodeGenTypes.h"
+#include "clang/AST/Type.h"
+#include "llvm/Module.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/AST/CharUnits.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/ExprObjC.h"
+
+#include "CodeGenFunction.h"
+#include "CGBuilder.h"
+#include "CGCall.h"
+#include "CGValue.h"
+
+namespace llvm {
+ class Module;
+ class Constant;
+ class Function;
+ class GlobalValue;
+ class TargetData;
+ class FunctionType;
+ class PointerType;
+ class Value;
+ class LLVMContext;
+}
+
+namespace clang {
+
+namespace CodeGen {
+
+class CodeGenModule;
+class CGBlockInfo;
+
+enum BlockFlag_t {
+ BLOCK_HAS_COPY_DISPOSE = (1 << 25),
+ BLOCK_HAS_CXX_OBJ = (1 << 26),
+ BLOCK_IS_GLOBAL = (1 << 28),
+ BLOCK_USE_STRET = (1 << 29),
+ BLOCK_HAS_SIGNATURE = (1 << 30)
+};
+class BlockFlags {
+ uint32_t flags;
+
+ BlockFlags(uint32_t flags) : flags(flags) {}
+public:
+ BlockFlags() : flags(0) {}
+ BlockFlags(BlockFlag_t flag) : flags(flag) {}
+
+ uint32_t getBitMask() const { return flags; }
+ bool empty() const { return flags == 0; }
+
+ friend BlockFlags operator|(BlockFlags l, BlockFlags r) {
+ return BlockFlags(l.flags | r.flags);
+ }
+ friend BlockFlags &operator|=(BlockFlags &l, BlockFlags r) {
+ l.flags |= r.flags;
+ return l;
+ }
+ friend bool operator&(BlockFlags l, BlockFlags r) {
+ return (l.flags & r.flags);
+ }
+};
+inline BlockFlags operator|(BlockFlag_t l, BlockFlag_t r) {
+ return BlockFlags(l) | BlockFlags(r);
+}
+
+enum BlockFieldFlag_t {
+ BLOCK_FIELD_IS_OBJECT = 0x03, /* id, NSObject, __attribute__((NSObject)),
+ block, ... */
+ BLOCK_FIELD_IS_BLOCK = 0x07, /* a block variable */
+
+ BLOCK_FIELD_IS_BYREF = 0x08, /* the on stack structure holding the __block
+ variable */
+ BLOCK_FIELD_IS_WEAK = 0x10, /* declared __weak, only used in byref copy
+ helpers */
+ BLOCK_FIELD_IS_ARC = 0x40, /* field has ARC-specific semantics */
+ BLOCK_BYREF_CALLER = 128, /* called from __block (byref) copy/dispose
+ support routines */
+ BLOCK_BYREF_CURRENT_MAX = 256
+};
+
+class BlockFieldFlags {
+ uint32_t flags;
+
+ BlockFieldFlags(uint32_t flags) : flags(flags) {}
+public:
+ BlockFieldFlags() : flags(0) {}
+ BlockFieldFlags(BlockFieldFlag_t flag) : flags(flag) {}
+
+ uint32_t getBitMask() const { return flags; }
+ bool empty() const { return flags == 0; }
+
+ /// Answers whether the flags indicate that this field is an object
+ /// or block pointer that requires _Block_object_assign/dispose.
+ bool isSpecialPointer() const { return flags & BLOCK_FIELD_IS_OBJECT; }
+
+ friend BlockFieldFlags operator|(BlockFieldFlags l, BlockFieldFlags r) {
+ return BlockFieldFlags(l.flags | r.flags);
+ }
+ friend BlockFieldFlags &operator|=(BlockFieldFlags &l, BlockFieldFlags r) {
+ l.flags |= r.flags;
+ return l;
+ }
+ friend bool operator&(BlockFieldFlags l, BlockFieldFlags r) {
+ return (l.flags & r.flags);
+ }
+};
+inline BlockFieldFlags operator|(BlockFieldFlag_t l, BlockFieldFlag_t r) {
+ return BlockFieldFlags(l) | BlockFieldFlags(r);
+}
+
+/// CGBlockInfo - Information to generate a block literal.
+class CGBlockInfo {
+public:
+ /// Name - The name of the block, kindof.
+ llvm::StringRef Name;
+
+ /// The field index of 'this' within the block, if there is one.
+ unsigned CXXThisIndex;
+
+ class Capture {
+ uintptr_t Data;
+ EHScopeStack::stable_iterator Cleanup;
+
+ public:
+ bool isIndex() const { return (Data & 1) != 0; }
+ bool isConstant() const { return !isIndex(); }
+ unsigned getIndex() const { assert(isIndex()); return Data >> 1; }
+ llvm::Value *getConstant() const {
+ assert(isConstant());
+ return reinterpret_cast<llvm::Value*>(Data);
+ }
+ EHScopeStack::stable_iterator getCleanup() const {
+ assert(isIndex());
+ return Cleanup;
+ }
+ void setCleanup(EHScopeStack::stable_iterator cleanup) {
+ assert(isIndex());
+ Cleanup = cleanup;
+ }
+
+ static Capture makeIndex(unsigned index) {
+ Capture v;
+ v.Data = (index << 1) | 1;
+ return v;
+ }
+
+ static Capture makeConstant(llvm::Value *value) {
+ Capture v;
+ v.Data = reinterpret_cast<uintptr_t>(value);
+ return v;
+ }
+ };
+
+ /// CanBeGlobal - True if the block can be global, i.e. it has
+ /// no non-constant captures.
+ bool CanBeGlobal : 1;
+
+ /// True if the block needs a custom copy or dispose function.
+ bool NeedsCopyDispose : 1;
+
+ /// HasCXXObject - True if the block's custom copy/dispose functions
+ /// need to be run even in GC mode.
+ bool HasCXXObject : 1;
+
+ /// UsesStret : True if the block uses an stret return. Mutable
+ /// because it gets set later in the block-creation process.
+ mutable bool UsesStret : 1;
+
+ /// The mapping of allocated indexes within the block.
+ llvm::DenseMap<const VarDecl*, Capture> Captures;
+
+ llvm::AllocaInst *Address;
+ llvm::StructType *StructureType;
+ const BlockDecl *Block;
+ const BlockExpr *BlockExpression;
+ CharUnits BlockSize;
+ CharUnits BlockAlign;
+
+ /// An instruction which dominates the full-expression that the
+ /// block is inside.
+ llvm::Instruction *DominatingIP;
+
+ /// The next block in the block-info chain. Invalid if this block
+ /// info is not part of the CGF's block-info chain, which is true
+ /// if it corresponds to a global block or a block whose expression
+ /// has been encountered.
+ CGBlockInfo *NextBlockInfo;
+
+ const Capture &getCapture(const VarDecl *var) const {
+ return const_cast<CGBlockInfo*>(this)->getCapture(var);
+ }
+ Capture &getCapture(const VarDecl *var) {
+ llvm::DenseMap<const VarDecl*, Capture>::iterator
+ it = Captures.find(var);
+ assert(it != Captures.end() && "no entry for variable!");
+ return it->second;
+ }
+
+ const BlockDecl *getBlockDecl() const { return Block; }
+ const BlockExpr *getBlockExpr() const {
+ assert(BlockExpression);
+ assert(BlockExpression->getBlockDecl() == Block);
+ return BlockExpression;
+ }
+
+ CGBlockInfo(const BlockDecl *blockDecl, llvm::StringRef Name);
+};
+
+} // end namespace CodeGen
+} // end namespace clang
+
+#endif
diff --git a/clang/lib/CodeGen/CGBuilder.h b/clang/lib/CodeGen/CGBuilder.h
new file mode 100644
index 0000000..8120217
--- /dev/null
+++ b/clang/lib/CodeGen/CGBuilder.h
@@ -0,0 +1,28 @@
+//===-- CGBuilder.h - Choose IRBuilder implementation ----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_CODEGEN_CGBUILDER_H
+#define CLANG_CODEGEN_CGBUILDER_H
+
+#include "llvm/Support/IRBuilder.h"
+
+namespace clang {
+namespace CodeGen {
+
+// Don't preserve names on values in an optimized build.
+#ifdef NDEBUG
+typedef llvm::IRBuilder<false> CGBuilderTy;
+#else
+typedef llvm::IRBuilder<> CGBuilderTy;
+#endif
+
+} // end namespace CodeGen
+} // end namespace clang
+
+#endif
diff --git a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp
new file mode 100644
index 0000000..e30b513
--- /dev/null
+++ b/clang/lib/CodeGen/CGBuiltin.cpp
@@ -0,0 +1,4524 @@
+//===---- CGBuiltin.cpp - Emit LLVM Code for builtins ---------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code to emit Builtin calls as LLVM code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "TargetInfo.h"
+#include "CodeGenFunction.h"
+#include "CodeGenModule.h"
+#include "CGObjCRuntime.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Decl.h"
+#include "clang/Basic/TargetBuiltins.h"
+#include "llvm/Intrinsics.h"
+#include "llvm/Target/TargetData.h"
+
+using namespace clang;
+using namespace CodeGen;
+using namespace llvm;
+
+/// getBuiltinLibFunction - Given a builtin id for a function like
+/// "__builtin_fabsf", return a Function* for "fabsf".
+llvm::Value *CodeGenModule::getBuiltinLibFunction(const FunctionDecl *FD,
+ unsigned BuiltinID) {
+ assert(Context.BuiltinInfo.isLibFunction(BuiltinID));
+
+ // Get the name, skip over the __builtin_ prefix (if necessary).
+ StringRef Name;
+ GlobalDecl D(FD);
+
+ // If the builtin has been declared explicitly with an assembler label,
+ // use the mangled name. This differs from the plain label on platforms
+ // that prefix labels.
+ if (FD->hasAttr<AsmLabelAttr>())
+ Name = getMangledName(D);
+ else
+ Name = Context.BuiltinInfo.GetName(BuiltinID) + 10;
+
+ llvm::FunctionType *Ty =
+ cast<llvm::FunctionType>(getTypes().ConvertType(FD->getType()));
+
+ return GetOrCreateLLVMFunction(Name, Ty, D, /*ForVTable=*/false);
+}
+
+/// Emit the conversions required to turn the given value into an
+/// integer of the given size.
+static Value *EmitToInt(CodeGenFunction &CGF, llvm::Value *V,
+ QualType T, llvm::IntegerType *IntType) {
+ V = CGF.EmitToMemory(V, T);
+
+ if (V->getType()->isPointerTy())
+ return CGF.Builder.CreatePtrToInt(V, IntType);
+
+ assert(V->getType() == IntType);
+ return V;
+}
+
+static Value *EmitFromInt(CodeGenFunction &CGF, llvm::Value *V,
+ QualType T, llvm::Type *ResultType) {
+ V = CGF.EmitFromMemory(V, T);
+
+ if (ResultType->isPointerTy())
+ return CGF.Builder.CreateIntToPtr(V, ResultType);
+
+ assert(V->getType() == ResultType);
+ return V;
+}
+
+/// Utility to insert an atomic instruction based on Instrinsic::ID
+/// and the expression node.
+static RValue EmitBinaryAtomic(CodeGenFunction &CGF,
+ llvm::AtomicRMWInst::BinOp Kind,
+ const CallExpr *E) {
+ QualType T = E->getType();
+ assert(E->getArg(0)->getType()->isPointerType());
+ assert(CGF.getContext().hasSameUnqualifiedType(T,
+ E->getArg(0)->getType()->getPointeeType()));
+ assert(CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType()));
+
+ llvm::Value *DestPtr = CGF.EmitScalarExpr(E->getArg(0));
+ unsigned AddrSpace =
+ cast<llvm::PointerType>(DestPtr->getType())->getAddressSpace();
+
+ llvm::IntegerType *IntType =
+ llvm::IntegerType::get(CGF.getLLVMContext(),
+ CGF.getContext().getTypeSize(T));
+ llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace);
+
+ llvm::Value *Args[2];
+ Args[0] = CGF.Builder.CreateBitCast(DestPtr, IntPtrType);
+ Args[1] = CGF.EmitScalarExpr(E->getArg(1));
+ llvm::Type *ValueType = Args[1]->getType();
+ Args[1] = EmitToInt(CGF, Args[1], T, IntType);
+
+ llvm::Value *Result =
+ CGF.Builder.CreateAtomicRMW(Kind, Args[0], Args[1],
+ llvm::SequentiallyConsistent);
+ Result = EmitFromInt(CGF, Result, T, ValueType);
+ return RValue::get(Result);
+}
+
+/// Utility to insert an atomic instruction based Instrinsic::ID and
+/// the expression node, where the return value is the result of the
+/// operation.
+static RValue EmitBinaryAtomicPost(CodeGenFunction &CGF,
+ llvm::AtomicRMWInst::BinOp Kind,
+ const CallExpr *E,
+ Instruction::BinaryOps Op) {
+ QualType T = E->getType();
+ assert(E->getArg(0)->getType()->isPointerType());
+ assert(CGF.getContext().hasSameUnqualifiedType(T,
+ E->getArg(0)->getType()->getPointeeType()));
+ assert(CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType()));
+
+ llvm::Value *DestPtr = CGF.EmitScalarExpr(E->getArg(0));
+ unsigned AddrSpace =
+ cast<llvm::PointerType>(DestPtr->getType())->getAddressSpace();
+
+ llvm::IntegerType *IntType =
+ llvm::IntegerType::get(CGF.getLLVMContext(),
+ CGF.getContext().getTypeSize(T));
+ llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace);
+
+ llvm::Value *Args[2];
+ Args[1] = CGF.EmitScalarExpr(E->getArg(1));
+ llvm::Type *ValueType = Args[1]->getType();
+ Args[1] = EmitToInt(CGF, Args[1], T, IntType);
+ Args[0] = CGF.Builder.CreateBitCast(DestPtr, IntPtrType);
+
+ llvm::Value *Result =
+ CGF.Builder.CreateAtomicRMW(Kind, Args[0], Args[1],
+ llvm::SequentiallyConsistent);
+ Result = CGF.Builder.CreateBinOp(Op, Result, Args[1]);
+ Result = EmitFromInt(CGF, Result, T, ValueType);
+ return RValue::get(Result);
+}
+
+/// EmitFAbs - Emit a call to fabs/fabsf/fabsl, depending on the type of ValTy,
+/// which must be a scalar floating point type.
+static Value *EmitFAbs(CodeGenFunction &CGF, Value *V, QualType ValTy) {
+ const BuiltinType *ValTyP = ValTy->getAs<BuiltinType>();
+ assert(ValTyP && "isn't scalar fp type!");
+
+ StringRef FnName;
+ switch (ValTyP->getKind()) {
+ default: llvm_unreachable("Isn't a scalar fp type!");
+ case BuiltinType::Float: FnName = "fabsf"; break;
+ case BuiltinType::Double: FnName = "fabs"; break;
+ case BuiltinType::LongDouble: FnName = "fabsl"; break;
+ }
+
+ // The prototype is something that takes and returns whatever V's type is.
+ llvm::FunctionType *FT = llvm::FunctionType::get(V->getType(), V->getType(),
+ false);
+ llvm::Value *Fn = CGF.CGM.CreateRuntimeFunction(FT, FnName);
+
+ return CGF.Builder.CreateCall(Fn, V, "abs");
+}
+
+static RValue emitLibraryCall(CodeGenFunction &CGF, const FunctionDecl *Fn,
+ const CallExpr *E, llvm::Value *calleeValue) {
+ return CGF.EmitCall(E->getCallee()->getType(), calleeValue,
+ ReturnValueSlot(), E->arg_begin(), E->arg_end(), Fn);
+}
+
+RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
+ unsigned BuiltinID, const CallExpr *E) {
+ // See if we can constant fold this builtin. If so, don't emit it at all.
+ Expr::EvalResult Result;
+ if (E->EvaluateAsRValue(Result, CGM.getContext()) &&
+ !Result.hasSideEffects()) {
+ if (Result.Val.isInt())
+ return RValue::get(llvm::ConstantInt::get(getLLVMContext(),
+ Result.Val.getInt()));
+ if (Result.Val.isFloat())
+ return RValue::get(llvm::ConstantFP::get(getLLVMContext(),
+ Result.Val.getFloat()));
+ }
+
+ switch (BuiltinID) {
+ default: break; // Handle intrinsics and libm functions below.
+ case Builtin::BI__builtin___CFStringMakeConstantString:
+ case Builtin::BI__builtin___NSStringMakeConstantString:
+ return RValue::get(CGM.EmitConstantExpr(E, E->getType(), 0));
+ case Builtin::BI__builtin_stdarg_start:
+ case Builtin::BI__builtin_va_start:
+ case Builtin::BI__builtin_va_end: {
+ Value *ArgValue = EmitVAListRef(E->getArg(0));
+ llvm::Type *DestType = Int8PtrTy;
+ if (ArgValue->getType() != DestType)
+ ArgValue = Builder.CreateBitCast(ArgValue, DestType,
+ ArgValue->getName().data());
+
+ Intrinsic::ID inst = (BuiltinID == Builtin::BI__builtin_va_end) ?
+ Intrinsic::vaend : Intrinsic::vastart;
+ return RValue::get(Builder.CreateCall(CGM.getIntrinsic(inst), ArgValue));
+ }
+ case Builtin::BI__builtin_va_copy: {
+ Value *DstPtr = EmitVAListRef(E->getArg(0));
+ Value *SrcPtr = EmitVAListRef(E->getArg(1));
+
+ llvm::Type *Type = Int8PtrTy;
+
+ DstPtr = Builder.CreateBitCast(DstPtr, Type);
+ SrcPtr = Builder.CreateBitCast(SrcPtr, Type);
+ return RValue::get(Builder.CreateCall2(CGM.getIntrinsic(Intrinsic::vacopy),
+ DstPtr, SrcPtr));
+ }
+ case Builtin::BI__builtin_abs:
+ case Builtin::BI__builtin_labs:
+ case Builtin::BI__builtin_llabs: {
+ Value *ArgValue = EmitScalarExpr(E->getArg(0));
+
+ Value *NegOp = Builder.CreateNeg(ArgValue, "neg");
+ Value *CmpResult =
+ Builder.CreateICmpSGE(ArgValue,
+ llvm::Constant::getNullValue(ArgValue->getType()),
+ "abscond");
+ Value *Result =
+ Builder.CreateSelect(CmpResult, ArgValue, NegOp, "abs");
+
+ return RValue::get(Result);
+ }
+ case Builtin::BI__builtin_ctzs:
+ case Builtin::BI__builtin_ctz:
+ case Builtin::BI__builtin_ctzl:
+ case Builtin::BI__builtin_ctzll: {
+ Value *ArgValue = EmitScalarExpr(E->getArg(0));
+
+ llvm::Type *ArgType = ArgValue->getType();
+ Value *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType);
+
+ llvm::Type *ResultType = ConvertType(E->getType());
+ Value *ZeroUndef = Builder.getInt1(Target.isCLZForZeroUndef());
+ Value *Result = Builder.CreateCall2(F, ArgValue, ZeroUndef);
+ if (Result->getType() != ResultType)
+ Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
+ "cast");
+ return RValue::get(Result);
+ }
+ case Builtin::BI__builtin_clzs:
+ case Builtin::BI__builtin_clz:
+ case Builtin::BI__builtin_clzl:
+ case Builtin::BI__builtin_clzll: {
+ Value *ArgValue = EmitScalarExpr(E->getArg(0));
+
+ llvm::Type *ArgType = ArgValue->getType();
+ Value *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
+
+ llvm::Type *ResultType = ConvertType(E->getType());
+ Value *ZeroUndef = Builder.getInt1(Target.isCLZForZeroUndef());
+ Value *Result = Builder.CreateCall2(F, ArgValue, ZeroUndef);
+ if (Result->getType() != ResultType)
+ Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
+ "cast");
+ return RValue::get(Result);
+ }
+ case Builtin::BI__builtin_ffs:
+ case Builtin::BI__builtin_ffsl:
+ case Builtin::BI__builtin_ffsll: {
+ // ffs(x) -> x ? cttz(x) + 1 : 0
+ Value *ArgValue = EmitScalarExpr(E->getArg(0));
+
+ llvm::Type *ArgType = ArgValue->getType();
+ Value *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType);
+
+ llvm::Type *ResultType = ConvertType(E->getType());
+ Value *Tmp = Builder.CreateAdd(Builder.CreateCall2(F, ArgValue,
+ Builder.getTrue()),
+ llvm::ConstantInt::get(ArgType, 1));
+ Value *Zero = llvm::Constant::getNullValue(ArgType);
+ Value *IsZero = Builder.CreateICmpEQ(ArgValue, Zero, "iszero");
+ Value *Result = Builder.CreateSelect(IsZero, Zero, Tmp, "ffs");
+ if (Result->getType() != ResultType)
+ Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
+ "cast");
+ return RValue::get(Result);
+ }
+ case Builtin::BI__builtin_parity:
+ case Builtin::BI__builtin_parityl:
+ case Builtin::BI__builtin_parityll: {
+ // parity(x) -> ctpop(x) & 1
+ Value *ArgValue = EmitScalarExpr(E->getArg(0));
+
+ llvm::Type *ArgType = ArgValue->getType();
+ Value *F = CGM.getIntrinsic(Intrinsic::ctpop, ArgType);
+
+ llvm::Type *ResultType = ConvertType(E->getType());
+ Value *Tmp = Builder.CreateCall(F, ArgValue);
+ Value *Result = Builder.CreateAnd(Tmp, llvm::ConstantInt::get(ArgType, 1));
+ if (Result->getType() != ResultType)
+ Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
+ "cast");
+ return RValue::get(Result);
+ }
+ case Builtin::BI__builtin_popcount:
+ case Builtin::BI__builtin_popcountl:
+ case Builtin::BI__builtin_popcountll: {
+ Value *ArgValue = EmitScalarExpr(E->getArg(0));
+
+ llvm::Type *ArgType = ArgValue->getType();
+ Value *F = CGM.getIntrinsic(Intrinsic::ctpop, ArgType);
+
+ llvm::Type *ResultType = ConvertType(E->getType());
+ Value *Result = Builder.CreateCall(F, ArgValue);
+ if (Result->getType() != ResultType)
+ Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
+ "cast");
+ return RValue::get(Result);
+ }
+ case Builtin::BI__builtin_expect: {
+ Value *ArgValue = EmitScalarExpr(E->getArg(0));
+ llvm::Type *ArgType = ArgValue->getType();
+
+ Value *FnExpect = CGM.getIntrinsic(Intrinsic::expect, ArgType);
+ Value *ExpectedValue = EmitScalarExpr(E->getArg(1));
+
+ Value *Result = Builder.CreateCall2(FnExpect, ArgValue, ExpectedValue,
+ "expval");
+ return RValue::get(Result);
+ }
+ case Builtin::BI__builtin_bswap32:
+ case Builtin::BI__builtin_bswap64: {
+ Value *ArgValue = EmitScalarExpr(E->getArg(0));
+ llvm::Type *ArgType = ArgValue->getType();
+ Value *F = CGM.getIntrinsic(Intrinsic::bswap, ArgType);
+ return RValue::get(Builder.CreateCall(F, ArgValue));
+ }
+ case Builtin::BI__builtin_object_size: {
+ // We pass this builtin onto the optimizer so that it can
+ // figure out the object size in more complex cases.
+ llvm::Type *ResType = ConvertType(E->getType());
+
+ // LLVM only supports 0 and 2, make sure that we pass along that
+ // as a boolean.
+ Value *Ty = EmitScalarExpr(E->getArg(1));
+ ConstantInt *CI = dyn_cast<ConstantInt>(Ty);
+ assert(CI);
+ uint64_t val = CI->getZExtValue();
+ CI = ConstantInt::get(Builder.getInt1Ty(), (val & 0x2) >> 1);
+
+ Value *F = CGM.getIntrinsic(Intrinsic::objectsize, ResType);
+ return RValue::get(Builder.CreateCall2(F,
+ EmitScalarExpr(E->getArg(0)),
+ CI));
+ }
+ case Builtin::BI__builtin_prefetch: {
+ Value *Locality, *RW, *Address = EmitScalarExpr(E->getArg(0));
+ // FIXME: Technically these constants should of type 'int', yes?
+ RW = (E->getNumArgs() > 1) ? EmitScalarExpr(E->getArg(1)) :
+ llvm::ConstantInt::get(Int32Ty, 0);
+ Locality = (E->getNumArgs() > 2) ? EmitScalarExpr(E->getArg(2)) :
+ llvm::ConstantInt::get(Int32Ty, 3);
+ Value *Data = llvm::ConstantInt::get(Int32Ty, 1);
+ Value *F = CGM.getIntrinsic(Intrinsic::prefetch);
+ return RValue::get(Builder.CreateCall4(F, Address, RW, Locality, Data));
+ }
+ case Builtin::BI__builtin_trap: {
+ Value *F = CGM.getIntrinsic(Intrinsic::trap);
+ return RValue::get(Builder.CreateCall(F));
+ }
+ case Builtin::BI__builtin_unreachable: {
+ if (CatchUndefined)
+ EmitBranch(getTrapBB());
+ else
+ Builder.CreateUnreachable();
+
+ // We do need to preserve an insertion point.
+ EmitBlock(createBasicBlock("unreachable.cont"));
+
+ return RValue::get(0);
+ }
+
+ case Builtin::BI__builtin_powi:
+ case Builtin::BI__builtin_powif:
+ case Builtin::BI__builtin_powil: {
+ Value *Base = EmitScalarExpr(E->getArg(0));
+ Value *Exponent = EmitScalarExpr(E->getArg(1));
+ llvm::Type *ArgType = Base->getType();
+ Value *F = CGM.getIntrinsic(Intrinsic::powi, ArgType);
+ return RValue::get(Builder.CreateCall2(F, Base, Exponent));
+ }
+
+ case Builtin::BI__builtin_isgreater:
+ case Builtin::BI__builtin_isgreaterequal:
+ case Builtin::BI__builtin_isless:
+ case Builtin::BI__builtin_islessequal:
+ case Builtin::BI__builtin_islessgreater:
+ case Builtin::BI__builtin_isunordered: {
+ // Ordered comparisons: we know the arguments to these are matching scalar
+ // floating point values.
+ Value *LHS = EmitScalarExpr(E->getArg(0));
+ Value *RHS = EmitScalarExpr(E->getArg(1));
+
+ switch (BuiltinID) {
+ default: llvm_unreachable("Unknown ordered comparison");
+ case Builtin::BI__builtin_isgreater:
+ LHS = Builder.CreateFCmpOGT(LHS, RHS, "cmp");
+ break;
+ case Builtin::BI__builtin_isgreaterequal:
+ LHS = Builder.CreateFCmpOGE(LHS, RHS, "cmp");
+ break;
+ case Builtin::BI__builtin_isless:
+ LHS = Builder.CreateFCmpOLT(LHS, RHS, "cmp");
+ break;
+ case Builtin::BI__builtin_islessequal:
+ LHS = Builder.CreateFCmpOLE(LHS, RHS, "cmp");
+ break;
+ case Builtin::BI__builtin_islessgreater:
+ LHS = Builder.CreateFCmpONE(LHS, RHS, "cmp");
+ break;
+ case Builtin::BI__builtin_isunordered:
+ LHS = Builder.CreateFCmpUNO(LHS, RHS, "cmp");
+ break;
+ }
+ // ZExt bool to int type.
+ return RValue::get(Builder.CreateZExt(LHS, ConvertType(E->getType())));
+ }
+ case Builtin::BI__builtin_isnan: {
+ Value *V = EmitScalarExpr(E->getArg(0));
+ V = Builder.CreateFCmpUNO(V, V, "cmp");
+ return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType())));
+ }
+
+ case Builtin::BI__builtin_isinf: {
+ // isinf(x) --> fabs(x) == infinity
+ Value *V = EmitScalarExpr(E->getArg(0));
+ V = EmitFAbs(*this, V, E->getArg(0)->getType());
+
+ V = Builder.CreateFCmpOEQ(V, ConstantFP::getInfinity(V->getType()),"isinf");
+ return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType())));
+ }
+
+ // TODO: BI__builtin_isinf_sign
+ // isinf_sign(x) -> isinf(x) ? (signbit(x) ? -1 : 1) : 0
+
+ case Builtin::BI__builtin_isnormal: {
+ // isnormal(x) --> x == x && fabsf(x) < infinity && fabsf(x) >= float_min
+ Value *V = EmitScalarExpr(E->getArg(0));
+ Value *Eq = Builder.CreateFCmpOEQ(V, V, "iseq");
+
+ Value *Abs = EmitFAbs(*this, V, E->getArg(0)->getType());
+ Value *IsLessThanInf =
+ Builder.CreateFCmpULT(Abs, ConstantFP::getInfinity(V->getType()),"isinf");
+ APFloat Smallest = APFloat::getSmallestNormalized(
+ getContext().getFloatTypeSemantics(E->getArg(0)->getType()));
+ Value *IsNormal =
+ Builder.CreateFCmpUGE(Abs, ConstantFP::get(V->getContext(), Smallest),
+ "isnormal");
+ V = Builder.CreateAnd(Eq, IsLessThanInf, "and");
+ V = Builder.CreateAnd(V, IsNormal, "and");
+ return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType())));
+ }
+
+ case Builtin::BI__builtin_isfinite: {
+ // isfinite(x) --> x == x && fabs(x) != infinity;
+ Value *V = EmitScalarExpr(E->getArg(0));
+ Value *Eq = Builder.CreateFCmpOEQ(V, V, "iseq");
+
+ Value *Abs = EmitFAbs(*this, V, E->getArg(0)->getType());
+ Value *IsNotInf =
+ Builder.CreateFCmpUNE(Abs, ConstantFP::getInfinity(V->getType()),"isinf");
+
+ V = Builder.CreateAnd(Eq, IsNotInf, "and");
+ return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType())));
+ }
+
+ case Builtin::BI__builtin_fpclassify: {
+ Value *V = EmitScalarExpr(E->getArg(5));
+ llvm::Type *Ty = ConvertType(E->getArg(5)->getType());
+
+ // Create Result
+ BasicBlock *Begin = Builder.GetInsertBlock();
+ BasicBlock *End = createBasicBlock("fpclassify_end", this->CurFn);
+ Builder.SetInsertPoint(End);
+ PHINode *Result =
+ Builder.CreatePHI(ConvertType(E->getArg(0)->getType()), 4,
+ "fpclassify_result");
+
+ // if (V==0) return FP_ZERO
+ Builder.SetInsertPoint(Begin);
+ Value *IsZero = Builder.CreateFCmpOEQ(V, Constant::getNullValue(Ty),
+ "iszero");
+ Value *ZeroLiteral = EmitScalarExpr(E->getArg(4));
+ BasicBlock *NotZero = createBasicBlock("fpclassify_not_zero", this->CurFn);
+ Builder.CreateCondBr(IsZero, End, NotZero);
+ Result->addIncoming(ZeroLiteral, Begin);
+
+ // if (V != V) return FP_NAN
+ Builder.SetInsertPoint(NotZero);
+ Value *IsNan = Builder.CreateFCmpUNO(V, V, "cmp");
+ Value *NanLiteral = EmitScalarExpr(E->getArg(0));
+ BasicBlock *NotNan = createBasicBlock("fpclassify_not_nan", this->CurFn);
+ Builder.CreateCondBr(IsNan, End, NotNan);
+ Result->addIncoming(NanLiteral, NotZero);
+
+ // if (fabs(V) == infinity) return FP_INFINITY
+ Builder.SetInsertPoint(NotNan);
+ Value *VAbs = EmitFAbs(*this, V, E->getArg(5)->getType());
+ Value *IsInf =
+ Builder.CreateFCmpOEQ(VAbs, ConstantFP::getInfinity(V->getType()),
+ "isinf");
+ Value *InfLiteral = EmitScalarExpr(E->getArg(1));
+ BasicBlock *NotInf = createBasicBlock("fpclassify_not_inf", this->CurFn);
+ Builder.CreateCondBr(IsInf, End, NotInf);
+ Result->addIncoming(InfLiteral, NotNan);
+
+ // if (fabs(V) >= MIN_NORMAL) return FP_NORMAL else FP_SUBNORMAL
+ Builder.SetInsertPoint(NotInf);
+ APFloat Smallest = APFloat::getSmallestNormalized(
+ getContext().getFloatTypeSemantics(E->getArg(5)->getType()));
+ Value *IsNormal =
+ Builder.CreateFCmpUGE(VAbs, ConstantFP::get(V->getContext(), Smallest),
+ "isnormal");
+ Value *NormalResult =
+ Builder.CreateSelect(IsNormal, EmitScalarExpr(E->getArg(2)),
+ EmitScalarExpr(E->getArg(3)));
+ Builder.CreateBr(End);
+ Result->addIncoming(NormalResult, NotInf);
+
+ // return Result
+ Builder.SetInsertPoint(End);
+ return RValue::get(Result);
+ }
+
+ case Builtin::BIalloca:
+ case Builtin::BI__builtin_alloca: {
+ Value *Size = EmitScalarExpr(E->getArg(0));
+ return RValue::get(Builder.CreateAlloca(Builder.getInt8Ty(), Size));
+ }
+ case Builtin::BIbzero:
+ case Builtin::BI__builtin_bzero: {
+ Value *Address = EmitScalarExpr(E->getArg(0));
+ Value *SizeVal = EmitScalarExpr(E->getArg(1));
+ unsigned Align = GetPointeeAlignment(E->getArg(0));
+ Builder.CreateMemSet(Address, Builder.getInt8(0), SizeVal, Align, false);
+ return RValue::get(Address);
+ }
+ case Builtin::BImemcpy:
+ case Builtin::BI__builtin_memcpy: {
+ Value *Address = EmitScalarExpr(E->getArg(0));
+ Value *SrcAddr = EmitScalarExpr(E->getArg(1));
+ Value *SizeVal = EmitScalarExpr(E->getArg(2));
+ unsigned Align = std::min(GetPointeeAlignment(E->getArg(0)),
+ GetPointeeAlignment(E->getArg(1)));
+ Builder.CreateMemCpy(Address, SrcAddr, SizeVal, Align, false);
+ return RValue::get(Address);
+ }
+
+ case Builtin::BI__builtin___memcpy_chk: {
+ // fold __builtin_memcpy_chk(x, y, cst1, cst2) to memset iff cst1<=cst2.
+ llvm::APSInt Size, DstSize;
+ if (!E->getArg(2)->EvaluateAsInt(Size, CGM.getContext()) ||
+ !E->getArg(3)->EvaluateAsInt(DstSize, CGM.getContext()))
+ break;
+ if (Size.ugt(DstSize))
+ break;
+ Value *Dest = EmitScalarExpr(E->getArg(0));
+ Value *Src = EmitScalarExpr(E->getArg(1));
+ Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size);
+ unsigned Align = std::min(GetPointeeAlignment(E->getArg(0)),
+ GetPointeeAlignment(E->getArg(1)));
+ Builder.CreateMemCpy(Dest, Src, SizeVal, Align, false);
+ return RValue::get(Dest);
+ }
+
+ case Builtin::BI__builtin_objc_memmove_collectable: {
+ Value *Address = EmitScalarExpr(E->getArg(0));
+ Value *SrcAddr = EmitScalarExpr(E->getArg(1));
+ Value *SizeVal = EmitScalarExpr(E->getArg(2));
+ CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this,
+ Address, SrcAddr, SizeVal);
+ return RValue::get(Address);
+ }
+
+ case Builtin::BI__builtin___memmove_chk: {
+ // fold __builtin_memmove_chk(x, y, cst1, cst2) to memset iff cst1<=cst2.
+ llvm::APSInt Size, DstSize;
+ if (!E->getArg(2)->EvaluateAsInt(Size, CGM.getContext()) ||
+ !E->getArg(3)->EvaluateAsInt(DstSize, CGM.getContext()))
+ break;
+ if (Size.ugt(DstSize))
+ break;
+ Value *Dest = EmitScalarExpr(E->getArg(0));
+ Value *Src = EmitScalarExpr(E->getArg(1));
+ Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size);
+ unsigned Align = std::min(GetPointeeAlignment(E->getArg(0)),
+ GetPointeeAlignment(E->getArg(1)));
+ Builder.CreateMemMove(Dest, Src, SizeVal, Align, false);
+ return RValue::get(Dest);
+ }
+
+ case Builtin::BImemmove:
+ case Builtin::BI__builtin_memmove: {
+ Value *Address = EmitScalarExpr(E->getArg(0));
+ Value *SrcAddr = EmitScalarExpr(E->getArg(1));
+ Value *SizeVal = EmitScalarExpr(E->getArg(2));
+ unsigned Align = std::min(GetPointeeAlignment(E->getArg(0)),
+ GetPointeeAlignment(E->getArg(1)));
+ Builder.CreateMemMove(Address, SrcAddr, SizeVal, Align, false);
+ return RValue::get(Address);
+ }
+ case Builtin::BImemset:
+ case Builtin::BI__builtin_memset: {
+ Value *Address = EmitScalarExpr(E->getArg(0));
+ Value *ByteVal = Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)),
+ Builder.getInt8Ty());
+ Value *SizeVal = EmitScalarExpr(E->getArg(2));
+ unsigned Align = GetPointeeAlignment(E->getArg(0));
+ Builder.CreateMemSet(Address, ByteVal, SizeVal, Align, false);
+ return RValue::get(Address);
+ }
+ case Builtin::BI__builtin___memset_chk: {
+ // fold __builtin_memset_chk(x, y, cst1, cst2) to memset iff cst1<=cst2.
+ llvm::APSInt Size, DstSize;
+ if (!E->getArg(2)->EvaluateAsInt(Size, CGM.getContext()) ||
+ !E->getArg(3)->EvaluateAsInt(DstSize, CGM.getContext()))
+ break;
+ if (Size.ugt(DstSize))
+ break;
+ Value *Address = EmitScalarExpr(E->getArg(0));
+ Value *ByteVal = Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)),
+ Builder.getInt8Ty());
+ Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size);
+ unsigned Align = GetPointeeAlignment(E->getArg(0));
+ Builder.CreateMemSet(Address, ByteVal, SizeVal, Align, false);
+
+ return RValue::get(Address);
+ }
+ case Builtin::BI__builtin_dwarf_cfa: {
+ // The offset in bytes from the first argument to the CFA.
+ //
+ // Why on earth is this in the frontend? Is there any reason at
+ // all that the backend can't reasonably determine this while
+ // lowering llvm.eh.dwarf.cfa()?
+ //
+ // TODO: If there's a satisfactory reason, add a target hook for
+ // this instead of hard-coding 0, which is correct for most targets.
+ int32_t Offset = 0;
+
+ Value *F = CGM.getIntrinsic(Intrinsic::eh_dwarf_cfa);
+ return RValue::get(Builder.CreateCall(F,
+ llvm::ConstantInt::get(Int32Ty, Offset)));
+ }
+ case Builtin::BI__builtin_return_address: {
+ Value *Depth = EmitScalarExpr(E->getArg(0));
+ Depth = Builder.CreateIntCast(Depth, Int32Ty, false);
+ Value *F = CGM.getIntrinsic(Intrinsic::returnaddress);
+ return RValue::get(Builder.CreateCall(F, Depth));
+ }
+ case Builtin::BI__builtin_frame_address: {
+ Value *Depth = EmitScalarExpr(E->getArg(0));
+ Depth = Builder.CreateIntCast(Depth, Int32Ty, false);
+ Value *F = CGM.getIntrinsic(Intrinsic::frameaddress);
+ return RValue::get(Builder.CreateCall(F, Depth));
+ }
+ case Builtin::BI__builtin_extract_return_addr: {
+ Value *Address = EmitScalarExpr(E->getArg(0));
+ Value *Result = getTargetHooks().decodeReturnAddress(*this, Address);
+ return RValue::get(Result);
+ }
+ case Builtin::BI__builtin_frob_return_addr: {
+ Value *Address = EmitScalarExpr(E->getArg(0));
+ Value *Result = getTargetHooks().encodeReturnAddress(*this, Address);
+ return RValue::get(Result);
+ }
+ case Builtin::BI__builtin_dwarf_sp_column: {
+ llvm::IntegerType *Ty
+ = cast<llvm::IntegerType>(ConvertType(E->getType()));
+ int Column = getTargetHooks().getDwarfEHStackPointer(CGM);
+ if (Column == -1) {
+ CGM.ErrorUnsupported(E, "__builtin_dwarf_sp_column");
+ return RValue::get(llvm::UndefValue::get(Ty));
+ }
+ return RValue::get(llvm::ConstantInt::get(Ty, Column, true));
+ }
+ case Builtin::BI__builtin_init_dwarf_reg_size_table: {
+ Value *Address = EmitScalarExpr(E->getArg(0));
+ if (getTargetHooks().initDwarfEHRegSizeTable(*this, Address))
+ CGM.ErrorUnsupported(E, "__builtin_init_dwarf_reg_size_table");
+ return RValue::get(llvm::UndefValue::get(ConvertType(E->getType())));
+ }
+ case Builtin::BI__builtin_eh_return: {
+ Value *Int = EmitScalarExpr(E->getArg(0));
+ Value *Ptr = EmitScalarExpr(E->getArg(1));
+
+ llvm::IntegerType *IntTy = cast<llvm::IntegerType>(Int->getType());
+ assert((IntTy->getBitWidth() == 32 || IntTy->getBitWidth() == 64) &&
+ "LLVM's __builtin_eh_return only supports 32- and 64-bit variants");
+ Value *F = CGM.getIntrinsic(IntTy->getBitWidth() == 32
+ ? Intrinsic::eh_return_i32
+ : Intrinsic::eh_return_i64);
+ Builder.CreateCall2(F, Int, Ptr);
+ Builder.CreateUnreachable();
+
+ // We do need to preserve an insertion point.
+ EmitBlock(createBasicBlock("builtin_eh_return.cont"));
+
+ return RValue::get(0);
+ }
+ case Builtin::BI__builtin_unwind_init: {
+ Value *F = CGM.getIntrinsic(Intrinsic::eh_unwind_init);
+ return RValue::get(Builder.CreateCall(F));
+ }
+ case Builtin::BI__builtin_extend_pointer: {
+ // Extends a pointer to the size of an _Unwind_Word, which is
+ // uint64_t on all platforms. Generally this gets poked into a
+ // register and eventually used as an address, so if the
+ // addressing registers are wider than pointers and the platform
+ // doesn't implicitly ignore high-order bits when doing
+ // addressing, we need to make sure we zext / sext based on
+ // the platform's expectations.
+ //
+ // See: http://gcc.gnu.org/ml/gcc-bugs/2002-02/msg00237.html
+
+ // Cast the pointer to intptr_t.
+ Value *Ptr = EmitScalarExpr(E->getArg(0));
+ Value *Result = Builder.CreatePtrToInt(Ptr, IntPtrTy, "extend.cast");
+
+ // If that's 64 bits, we're done.
+ if (IntPtrTy->getBitWidth() == 64)
+ return RValue::get(Result);
+
+ // Otherwise, ask the codegen data what to do.
+ if (getTargetHooks().extendPointerWithSExt())
+ return RValue::get(Builder.CreateSExt(Result, Int64Ty, "extend.sext"));
+ else
+ return RValue::get(Builder.CreateZExt(Result, Int64Ty, "extend.zext"));
+ }
+ case Builtin::BI__builtin_setjmp: {
+ // Buffer is a void**.
+ Value *Buf = EmitScalarExpr(E->getArg(0));
+
+ // Store the frame pointer to the setjmp buffer.
+ Value *FrameAddr =
+ Builder.CreateCall(CGM.getIntrinsic(Intrinsic::frameaddress),
+ ConstantInt::get(Int32Ty, 0));
+ Builder.CreateStore(FrameAddr, Buf);
+
+ // Store the stack pointer to the setjmp buffer.
+ Value *StackAddr =
+ Builder.CreateCall(CGM.getIntrinsic(Intrinsic::stacksave));
+ Value *StackSaveSlot =
+ Builder.CreateGEP(Buf, ConstantInt::get(Int32Ty, 2));
+ Builder.CreateStore(StackAddr, StackSaveSlot);
+
+ // Call LLVM's EH setjmp, which is lightweight.
+ Value *F = CGM.getIntrinsic(Intrinsic::eh_sjlj_setjmp);
+ Buf = Builder.CreateBitCast(Buf, Int8PtrTy);
+ return RValue::get(Builder.CreateCall(F, Buf));
+ }
+ case Builtin::BI__builtin_longjmp: {
+ Value *Buf = EmitScalarExpr(E->getArg(0));
+ Buf = Builder.CreateBitCast(Buf, Int8PtrTy);
+
+ // Call LLVM's EH longjmp, which is lightweight.
+ Builder.CreateCall(CGM.getIntrinsic(Intrinsic::eh_sjlj_longjmp), Buf);
+
+ // longjmp doesn't return; mark this as unreachable.
+ Builder.CreateUnreachable();
+
+ // We do need to preserve an insertion point.
+ EmitBlock(createBasicBlock("longjmp.cont"));
+
+ return RValue::get(0);
+ }
+ case Builtin::BI__sync_fetch_and_add:
+ case Builtin::BI__sync_fetch_and_sub:
+ case Builtin::BI__sync_fetch_and_or:
+ case Builtin::BI__sync_fetch_and_and:
+ case Builtin::BI__sync_fetch_and_xor:
+ case Builtin::BI__sync_add_and_fetch:
+ case Builtin::BI__sync_sub_and_fetch:
+ case Builtin::BI__sync_and_and_fetch:
+ case Builtin::BI__sync_or_and_fetch:
+ case Builtin::BI__sync_xor_and_fetch:
+ case Builtin::BI__sync_val_compare_and_swap:
+ case Builtin::BI__sync_bool_compare_and_swap:
+ case Builtin::BI__sync_lock_test_and_set:
+ case Builtin::BI__sync_lock_release:
+ case Builtin::BI__sync_swap:
+ llvm_unreachable("Shouldn't make it through sema");
+ case Builtin::BI__sync_fetch_and_add_1:
+ case Builtin::BI__sync_fetch_and_add_2:
+ case Builtin::BI__sync_fetch_and_add_4:
+ case Builtin::BI__sync_fetch_and_add_8:
+ case Builtin::BI__sync_fetch_and_add_16:
+ return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Add, E);
+ case Builtin::BI__sync_fetch_and_sub_1:
+ case Builtin::BI__sync_fetch_and_sub_2:
+ case Builtin::BI__sync_fetch_and_sub_4:
+ case Builtin::BI__sync_fetch_and_sub_8:
+ case Builtin::BI__sync_fetch_and_sub_16:
+ return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Sub, E);
+ case Builtin::BI__sync_fetch_and_or_1:
+ case Builtin::BI__sync_fetch_and_or_2:
+ case Builtin::BI__sync_fetch_and_or_4:
+ case Builtin::BI__sync_fetch_and_or_8:
+ case Builtin::BI__sync_fetch_and_or_16:
+ return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Or, E);
+ case Builtin::BI__sync_fetch_and_and_1:
+ case Builtin::BI__sync_fetch_and_and_2:
+ case Builtin::BI__sync_fetch_and_and_4:
+ case Builtin::BI__sync_fetch_and_and_8:
+ case Builtin::BI__sync_fetch_and_and_16:
+ return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::And, E);
+ case Builtin::BI__sync_fetch_and_xor_1:
+ case Builtin::BI__sync_fetch_and_xor_2:
+ case Builtin::BI__sync_fetch_and_xor_4:
+ case Builtin::BI__sync_fetch_and_xor_8:
+ case Builtin::BI__sync_fetch_and_xor_16:
+ return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xor, E);
+
+ // Clang extensions: not overloaded yet.
+ case Builtin::BI__sync_fetch_and_min:
+ return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Min, E);
+ case Builtin::BI__sync_fetch_and_max:
+ return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Max, E);
+ case Builtin::BI__sync_fetch_and_umin:
+ return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::UMin, E);
+ case Builtin::BI__sync_fetch_and_umax:
+ return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::UMax, E);
+
+ case Builtin::BI__sync_add_and_fetch_1:
+ case Builtin::BI__sync_add_and_fetch_2:
+ case Builtin::BI__sync_add_and_fetch_4:
+ case Builtin::BI__sync_add_and_fetch_8:
+ case Builtin::BI__sync_add_and_fetch_16:
+ return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Add, E,
+ llvm::Instruction::Add);
+ case Builtin::BI__sync_sub_and_fetch_1:
+ case Builtin::BI__sync_sub_and_fetch_2:
+ case Builtin::BI__sync_sub_and_fetch_4:
+ case Builtin::BI__sync_sub_and_fetch_8:
+ case Builtin::BI__sync_sub_and_fetch_16:
+ return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Sub, E,
+ llvm::Instruction::Sub);
+ case Builtin::BI__sync_and_and_fetch_1:
+ case Builtin::BI__sync_and_and_fetch_2:
+ case Builtin::BI__sync_and_and_fetch_4:
+ case Builtin::BI__sync_and_and_fetch_8:
+ case Builtin::BI__sync_and_and_fetch_16:
+ return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::And, E,
+ llvm::Instruction::And);
+ case Builtin::BI__sync_or_and_fetch_1:
+ case Builtin::BI__sync_or_and_fetch_2:
+ case Builtin::BI__sync_or_and_fetch_4:
+ case Builtin::BI__sync_or_and_fetch_8:
+ case Builtin::BI__sync_or_and_fetch_16:
+ return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Or, E,
+ llvm::Instruction::Or);
+ case Builtin::BI__sync_xor_and_fetch_1:
+ case Builtin::BI__sync_xor_and_fetch_2:
+ case Builtin::BI__sync_xor_and_fetch_4:
+ case Builtin::BI__sync_xor_and_fetch_8:
+ case Builtin::BI__sync_xor_and_fetch_16:
+ return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Xor, E,
+ llvm::Instruction::Xor);
+
+ case Builtin::BI__sync_val_compare_and_swap_1:
+ case Builtin::BI__sync_val_compare_and_swap_2:
+ case Builtin::BI__sync_val_compare_and_swap_4:
+ case Builtin::BI__sync_val_compare_and_swap_8:
+ case Builtin::BI__sync_val_compare_and_swap_16: {
+ QualType T = E->getType();
+ llvm::Value *DestPtr = EmitScalarExpr(E->getArg(0));
+ unsigned AddrSpace =
+ cast<llvm::PointerType>(DestPtr->getType())->getAddressSpace();
+
+ llvm::IntegerType *IntType =
+ llvm::IntegerType::get(getLLVMContext(),
+ getContext().getTypeSize(T));
+ llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace);
+
+ Value *Args[3];
+ Args[0] = Builder.CreateBitCast(DestPtr, IntPtrType);
+ Args[1] = EmitScalarExpr(E->getArg(1));
+ llvm::Type *ValueType = Args[1]->getType();
+ Args[1] = EmitToInt(*this, Args[1], T, IntType);
+ Args[2] = EmitToInt(*this, EmitScalarExpr(E->getArg(2)), T, IntType);
+
+ Value *Result = Builder.CreateAtomicCmpXchg(Args[0], Args[1], Args[2],
+ llvm::SequentiallyConsistent);
+ Result = EmitFromInt(*this, Result, T, ValueType);
+ return RValue::get(Result);
+ }
+
+ case Builtin::BI__sync_bool_compare_and_swap_1:
+ case Builtin::BI__sync_bool_compare_and_swap_2:
+ case Builtin::BI__sync_bool_compare_and_swap_4:
+ case Builtin::BI__sync_bool_compare_and_swap_8:
+ case Builtin::BI__sync_bool_compare_and_swap_16: {
+ QualType T = E->getArg(1)->getType();
+ llvm::Value *DestPtr = EmitScalarExpr(E->getArg(0));
+ unsigned AddrSpace =
+ cast<llvm::PointerType>(DestPtr->getType())->getAddressSpace();
+
+ llvm::IntegerType *IntType =
+ llvm::IntegerType::get(getLLVMContext(),
+ getContext().getTypeSize(T));
+ llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace);
+
+ Value *Args[3];
+ Args[0] = Builder.CreateBitCast(DestPtr, IntPtrType);
+ Args[1] = EmitToInt(*this, EmitScalarExpr(E->getArg(1)), T, IntType);
+ Args[2] = EmitToInt(*this, EmitScalarExpr(E->getArg(2)), T, IntType);
+
+ Value *OldVal = Args[1];
+ Value *PrevVal = Builder.CreateAtomicCmpXchg(Args[0], Args[1], Args[2],
+ llvm::SequentiallyConsistent);
+ Value *Result = Builder.CreateICmpEQ(PrevVal, OldVal);
+ // zext bool to int.
+ Result = Builder.CreateZExt(Result, ConvertType(E->getType()));
+ return RValue::get(Result);
+ }
+
+ case Builtin::BI__sync_swap_1:
+ case Builtin::BI__sync_swap_2:
+ case Builtin::BI__sync_swap_4:
+ case Builtin::BI__sync_swap_8:
+ case Builtin::BI__sync_swap_16:
+ return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xchg, E);
+
+ case Builtin::BI__sync_lock_test_and_set_1:
+ case Builtin::BI__sync_lock_test_and_set_2:
+ case Builtin::BI__sync_lock_test_and_set_4:
+ case Builtin::BI__sync_lock_test_and_set_8:
+ case Builtin::BI__sync_lock_test_and_set_16:
+ return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xchg, E);
+
+ case Builtin::BI__sync_lock_release_1:
+ case Builtin::BI__sync_lock_release_2:
+ case Builtin::BI__sync_lock_release_4:
+ case Builtin::BI__sync_lock_release_8:
+ case Builtin::BI__sync_lock_release_16: {
+ Value *Ptr = EmitScalarExpr(E->getArg(0));
+ QualType ElTy = E->getArg(0)->getType()->getPointeeType();
+ CharUnits StoreSize = getContext().getTypeSizeInChars(ElTy);
+ llvm::Type *ITy = llvm::IntegerType::get(getLLVMContext(),
+ StoreSize.getQuantity() * 8);
+ Ptr = Builder.CreateBitCast(Ptr, ITy->getPointerTo());
+ llvm::StoreInst *Store =
+ Builder.CreateStore(llvm::Constant::getNullValue(ITy), Ptr);
+ Store->setAlignment(StoreSize.getQuantity());
+ Store->setAtomic(llvm::Release);
+ return RValue::get(0);
+ }
+
+ case Builtin::BI__sync_synchronize: {
+ // We assume this is supposed to correspond to a C++0x-style
+ // sequentially-consistent fence (i.e. this is only usable for
+ // synchonization, not device I/O or anything like that). This intrinsic
+ // is really badly designed in the sense that in theory, there isn't
+ // any way to safely use it... but in practice, it mostly works
+ // to use it with non-atomic loads and stores to get acquire/release
+ // semantics.
+ Builder.CreateFence(llvm::SequentiallyConsistent);
+ return RValue::get(0);
+ }
+
+ case Builtin::BI__c11_atomic_is_lock_free:
+ case Builtin::BI__atomic_is_lock_free: {
+ // Call "bool __atomic_is_lock_free(size_t size, void *ptr)". For the
+ // __c11 builtin, ptr is 0 (indicating a properly-aligned object), since
+ // _Atomic(T) is always properly-aligned.
+ const char *LibCallName = "__atomic_is_lock_free";
+ CallArgList Args;
+ Args.add(RValue::get(EmitScalarExpr(E->getArg(0))),
+ getContext().getSizeType());
+ if (BuiltinID == Builtin::BI__atomic_is_lock_free)
+ Args.add(RValue::get(EmitScalarExpr(E->getArg(1))),
+ getContext().VoidPtrTy);
+ else
+ Args.add(RValue::get(llvm::Constant::getNullValue(VoidPtrTy)),
+ getContext().VoidPtrTy);
+ const CGFunctionInfo &FuncInfo =
+ CGM.getTypes().arrangeFunctionCall(E->getType(), Args,
+ FunctionType::ExtInfo(),
+ RequiredArgs::All);
+ llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FuncInfo);
+ llvm::Constant *Func = CGM.CreateRuntimeFunction(FTy, LibCallName);
+ return EmitCall(FuncInfo, Func, ReturnValueSlot(), Args);
+ }
+
+ case Builtin::BI__atomic_test_and_set: {
+ // Look at the argument type to determine whether this is a volatile
+ // operation. The parameter type is always volatile.
+ QualType PtrTy = E->getArg(0)->IgnoreImpCasts()->getType();
+ bool Volatile =
+ PtrTy->castAs<PointerType>()->getPointeeType().isVolatileQualified();
+
+ Value *Ptr = EmitScalarExpr(E->getArg(0));
+ unsigned AddrSpace =
+ cast<llvm::PointerType>(Ptr->getType())->getAddressSpace();
+ Ptr = Builder.CreateBitCast(Ptr, Int8Ty->getPointerTo(AddrSpace));
+ Value *NewVal = Builder.getInt8(1);
+ Value *Order = EmitScalarExpr(E->getArg(1));
+ if (isa<llvm::ConstantInt>(Order)) {
+ int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
+ AtomicRMWInst *Result = 0;
+ switch (ord) {
+ case 0: // memory_order_relaxed
+ default: // invalid order
+ Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg,
+ Ptr, NewVal,
+ llvm::Monotonic);
+ break;
+ case 1: // memory_order_consume
+ case 2: // memory_order_acquire
+ Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg,
+ Ptr, NewVal,
+ llvm::Acquire);
+ break;
+ case 3: // memory_order_release
+ Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg,
+ Ptr, NewVal,
+ llvm::Release);
+ break;
+ case 4: // memory_order_acq_rel
+ Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg,
+ Ptr, NewVal,
+ llvm::AcquireRelease);
+ break;
+ case 5: // memory_order_seq_cst
+ Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg,
+ Ptr, NewVal,
+ llvm::SequentiallyConsistent);
+ break;
+ }
+ Result->setVolatile(Volatile);
+ return RValue::get(Builder.CreateIsNotNull(Result, "tobool"));
+ }
+
+ llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
+
+ llvm::BasicBlock *BBs[5] = {
+ createBasicBlock("monotonic", CurFn),
+ createBasicBlock("acquire", CurFn),
+ createBasicBlock("release", CurFn),
+ createBasicBlock("acqrel", CurFn),
+ createBasicBlock("seqcst", CurFn)
+ };
+ llvm::AtomicOrdering Orders[5] = {
+ llvm::Monotonic, llvm::Acquire, llvm::Release,
+ llvm::AcquireRelease, llvm::SequentiallyConsistent
+ };
+
+ Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
+ llvm::SwitchInst *SI = Builder.CreateSwitch(Order, BBs[0]);
+
+ Builder.SetInsertPoint(ContBB);
+ PHINode *Result = Builder.CreatePHI(Int8Ty, 5, "was_set");
+
+ for (unsigned i = 0; i < 5; ++i) {
+ Builder.SetInsertPoint(BBs[i]);
+ AtomicRMWInst *RMW = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg,
+ Ptr, NewVal, Orders[i]);
+ RMW->setVolatile(Volatile);
+ Result->addIncoming(RMW, BBs[i]);
+ Builder.CreateBr(ContBB);
+ }
+
+ SI->addCase(Builder.getInt32(0), BBs[0]);
+ SI->addCase(Builder.getInt32(1), BBs[1]);
+ SI->addCase(Builder.getInt32(2), BBs[1]);
+ SI->addCase(Builder.getInt32(3), BBs[2]);
+ SI->addCase(Builder.getInt32(4), BBs[3]);
+ SI->addCase(Builder.getInt32(5), BBs[4]);
+
+ Builder.SetInsertPoint(ContBB);
+ return RValue::get(Builder.CreateIsNotNull(Result, "tobool"));
+ }
+
+ case Builtin::BI__atomic_clear: {
+ QualType PtrTy = E->getArg(0)->IgnoreImpCasts()->getType();
+ bool Volatile =
+ PtrTy->castAs<PointerType>()->getPointeeType().isVolatileQualified();
+
+ Value *Ptr = EmitScalarExpr(E->getArg(0));
+ unsigned AddrSpace =
+ cast<llvm::PointerType>(Ptr->getType())->getAddressSpace();
+ Ptr = Builder.CreateBitCast(Ptr, Int8Ty->getPointerTo(AddrSpace));
+ Value *NewVal = Builder.getInt8(0);
+ Value *Order = EmitScalarExpr(E->getArg(1));
+ if (isa<llvm::ConstantInt>(Order)) {
+ int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
+ StoreInst *Store = Builder.CreateStore(NewVal, Ptr, Volatile);
+ Store->setAlignment(1);
+ switch (ord) {
+ case 0: // memory_order_relaxed
+ default: // invalid order
+ Store->setOrdering(llvm::Monotonic);
+ break;
+ case 3: // memory_order_release
+ Store->setOrdering(llvm::Release);
+ break;
+ case 5: // memory_order_seq_cst
+ Store->setOrdering(llvm::SequentiallyConsistent);
+ break;
+ }
+ return RValue::get(0);
+ }
+
+ llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
+
+ llvm::BasicBlock *BBs[3] = {
+ createBasicBlock("monotonic", CurFn),
+ createBasicBlock("release", CurFn),
+ createBasicBlock("seqcst", CurFn)
+ };
+ llvm::AtomicOrdering Orders[3] = {
+ llvm::Monotonic, llvm::Release, llvm::SequentiallyConsistent
+ };
+
+ Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
+ llvm::SwitchInst *SI = Builder.CreateSwitch(Order, BBs[0]);
+
+ for (unsigned i = 0; i < 3; ++i) {
+ Builder.SetInsertPoint(BBs[i]);
+ StoreInst *Store = Builder.CreateStore(NewVal, Ptr, Volatile);
+ Store->setAlignment(1);
+ Store->setOrdering(Orders[i]);
+ Builder.CreateBr(ContBB);
+ }
+
+ SI->addCase(Builder.getInt32(0), BBs[0]);
+ SI->addCase(Builder.getInt32(3), BBs[1]);
+ SI->addCase(Builder.getInt32(5), BBs[2]);
+
+ Builder.SetInsertPoint(ContBB);
+ return RValue::get(0);
+ }
+
+ case Builtin::BI__atomic_thread_fence:
+ case Builtin::BI__atomic_signal_fence:
+ case Builtin::BI__c11_atomic_thread_fence:
+ case Builtin::BI__c11_atomic_signal_fence: {
+ llvm::SynchronizationScope Scope;
+ if (BuiltinID == Builtin::BI__atomic_signal_fence ||
+ BuiltinID == Builtin::BI__c11_atomic_signal_fence)
+ Scope = llvm::SingleThread;
+ else
+ Scope = llvm::CrossThread;
+ Value *Order = EmitScalarExpr(E->getArg(0));
+ if (isa<llvm::ConstantInt>(Order)) {
+ int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
+ switch (ord) {
+ case 0: // memory_order_relaxed
+ default: // invalid order
+ break;
+ case 1: // memory_order_consume
+ case 2: // memory_order_acquire
+ Builder.CreateFence(llvm::Acquire, Scope);
+ break;
+ case 3: // memory_order_release
+ Builder.CreateFence(llvm::Release, Scope);
+ break;
+ case 4: // memory_order_acq_rel
+ Builder.CreateFence(llvm::AcquireRelease, Scope);
+ break;
+ case 5: // memory_order_seq_cst
+ Builder.CreateFence(llvm::SequentiallyConsistent, Scope);
+ break;
+ }
+ return RValue::get(0);
+ }
+
+ llvm::BasicBlock *AcquireBB, *ReleaseBB, *AcqRelBB, *SeqCstBB;
+ AcquireBB = createBasicBlock("acquire", CurFn);
+ ReleaseBB = createBasicBlock("release", CurFn);
+ AcqRelBB = createBasicBlock("acqrel", CurFn);
+ SeqCstBB = createBasicBlock("seqcst", CurFn);
+ llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
+
+ Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
+ llvm::SwitchInst *SI = Builder.CreateSwitch(Order, ContBB);
+
+ Builder.SetInsertPoint(AcquireBB);
+ Builder.CreateFence(llvm::Acquire, Scope);
+ Builder.CreateBr(ContBB);
+ SI->addCase(Builder.getInt32(1), AcquireBB);
+ SI->addCase(Builder.getInt32(2), AcquireBB);
+
+ Builder.SetInsertPoint(ReleaseBB);
+ Builder.CreateFence(llvm::Release, Scope);
+ Builder.CreateBr(ContBB);
+ SI->addCase(Builder.getInt32(3), ReleaseBB);
+
+ Builder.SetInsertPoint(AcqRelBB);
+ Builder.CreateFence(llvm::AcquireRelease, Scope);
+ Builder.CreateBr(ContBB);
+ SI->addCase(Builder.getInt32(4), AcqRelBB);
+
+ Builder.SetInsertPoint(SeqCstBB);
+ Builder.CreateFence(llvm::SequentiallyConsistent, Scope);
+ Builder.CreateBr(ContBB);
+ SI->addCase(Builder.getInt32(5), SeqCstBB);
+
+ Builder.SetInsertPoint(ContBB);
+ return RValue::get(0);
+ }
+
+ // Library functions with special handling.
+ case Builtin::BIsqrt:
+ case Builtin::BIsqrtf:
+ case Builtin::BIsqrtl: {
+ // TODO: there is currently no set of optimizer flags
+ // sufficient for us to rewrite sqrt to @llvm.sqrt.
+ // -fmath-errno=0 is not good enough; we need finiteness.
+ // We could probably precondition the call with an ult
+ // against 0, but is that worth the complexity?
+ break;
+ }
+
+ case Builtin::BIpow:
+ case Builtin::BIpowf:
+ case Builtin::BIpowl: {
+ // Rewrite sqrt to intrinsic if allowed.
+ if (!FD->hasAttr<ConstAttr>())
+ break;
+ Value *Base = EmitScalarExpr(E->getArg(0));
+ Value *Exponent = EmitScalarExpr(E->getArg(1));
+ llvm::Type *ArgType = Base->getType();
+ Value *F = CGM.getIntrinsic(Intrinsic::pow, ArgType);
+ return RValue::get(Builder.CreateCall2(F, Base, Exponent));
+ }
+
+ case Builtin::BIfma:
+ case Builtin::BIfmaf:
+ case Builtin::BIfmal:
+ case Builtin::BI__builtin_fma:
+ case Builtin::BI__builtin_fmaf:
+ case Builtin::BI__builtin_fmal: {
+ // Rewrite fma to intrinsic.
+ Value *FirstArg = EmitScalarExpr(E->getArg(0));
+ llvm::Type *ArgType = FirstArg->getType();
+ Value *F = CGM.getIntrinsic(Intrinsic::fma, ArgType);
+ return RValue::get(Builder.CreateCall3(F, FirstArg,
+ EmitScalarExpr(E->getArg(1)),
+ EmitScalarExpr(E->getArg(2))));
+ }
+
+ case Builtin::BI__builtin_signbit:
+ case Builtin::BI__builtin_signbitf:
+ case Builtin::BI__builtin_signbitl: {
+ LLVMContext &C = CGM.getLLVMContext();
+
+ Value *Arg = EmitScalarExpr(E->getArg(0));
+ llvm::Type *ArgTy = Arg->getType();
+ if (ArgTy->isPPC_FP128Ty())
+ break; // FIXME: I'm not sure what the right implementation is here.
+ int ArgWidth = ArgTy->getPrimitiveSizeInBits();
+ llvm::Type *ArgIntTy = llvm::IntegerType::get(C, ArgWidth);
+ Value *BCArg = Builder.CreateBitCast(Arg, ArgIntTy);
+ Value *ZeroCmp = llvm::Constant::getNullValue(ArgIntTy);
+ Value *Result = Builder.CreateICmpSLT(BCArg, ZeroCmp);
+ return RValue::get(Builder.CreateZExt(Result, ConvertType(E->getType())));
+ }
+ case Builtin::BI__builtin_annotation: {
+ llvm::Value *AnnVal = EmitScalarExpr(E->getArg(0));
+ llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::annotation,
+ AnnVal->getType());
+
+ // Get the annotation string, go through casts. Sema requires this to be a
+ // non-wide string literal, potentially casted, so the cast<> is safe.
+ const Expr *AnnotationStrExpr = E->getArg(1)->IgnoreParenCasts();
+ llvm::StringRef Str = cast<StringLiteral>(AnnotationStrExpr)->getString();
+ return RValue::get(EmitAnnotationCall(F, AnnVal, Str, E->getExprLoc()));
+ }
+ }
+
+ // If this is an alias for a lib function (e.g. __builtin_sin), emit
+ // the call using the normal call path, but using the unmangled
+ // version of the function name.
+ if (getContext().BuiltinInfo.isLibFunction(BuiltinID))
+ return emitLibraryCall(*this, FD, E,
+ CGM.getBuiltinLibFunction(FD, BuiltinID));
+
+ // If this is a predefined lib function (e.g. malloc), emit the call
+ // using exactly the normal call path.
+ if (getContext().BuiltinInfo.isPredefinedLibFunction(BuiltinID))
+ return emitLibraryCall(*this, FD, E, EmitScalarExpr(E->getCallee()));
+
+ // See if we have a target specific intrinsic.
+ const char *Name = getContext().BuiltinInfo.GetName(BuiltinID);
+ Intrinsic::ID IntrinsicID = Intrinsic::not_intrinsic;
+ if (const char *Prefix =
+ llvm::Triple::getArchTypePrefix(Target.getTriple().getArch()))
+ IntrinsicID = Intrinsic::getIntrinsicForGCCBuiltin(Prefix, Name);
+
+ if (IntrinsicID != Intrinsic::not_intrinsic) {
+ SmallVector<Value*, 16> Args;
+
+ // Find out if any arguments are required to be integer constant
+ // expressions.
+ unsigned ICEArguments = 0;
+ ASTContext::GetBuiltinTypeError Error;
+ getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
+ assert(Error == ASTContext::GE_None && "Should not codegen an error");
+
+ Function *F = CGM.getIntrinsic(IntrinsicID);
+ llvm::FunctionType *FTy = F->getFunctionType();
+
+ for (unsigned i = 0, e = E->getNumArgs(); i != e; ++i) {
+ Value *ArgValue;
+ // If this is a normal argument, just emit it as a scalar.
+ if ((ICEArguments & (1 << i)) == 0) {
+ ArgValue = EmitScalarExpr(E->getArg(i));
+ } else {
+ // If this is required to be a constant, constant fold it so that we
+ // know that the generated intrinsic gets a ConstantInt.
+ llvm::APSInt Result;
+ bool IsConst = E->getArg(i)->isIntegerConstantExpr(Result,getContext());
+ assert(IsConst && "Constant arg isn't actually constant?");
+ (void)IsConst;
+ ArgValue = llvm::ConstantInt::get(getLLVMContext(), Result);
+ }
+
+ // If the intrinsic arg type is different from the builtin arg type
+ // we need to do a bit cast.
+ llvm::Type *PTy = FTy->getParamType(i);
+ if (PTy != ArgValue->getType()) {
+ assert(PTy->canLosslesslyBitCastTo(FTy->getParamType(i)) &&
+ "Must be able to losslessly bit cast to param");
+ ArgValue = Builder.CreateBitCast(ArgValue, PTy);
+ }
+
+ Args.push_back(ArgValue);
+ }
+
+ Value *V = Builder.CreateCall(F, Args);
+ QualType BuiltinRetType = E->getType();
+
+ llvm::Type *RetTy = VoidTy;
+ if (!BuiltinRetType->isVoidType())
+ RetTy = ConvertType(BuiltinRetType);
+
+ if (RetTy != V->getType()) {
+ assert(V->getType()->canLosslesslyBitCastTo(RetTy) &&
+ "Must be able to losslessly bit cast result type");
+ V = Builder.CreateBitCast(V, RetTy);
+ }
+
+ return RValue::get(V);
+ }
+
+ // See if we have a target specific builtin that needs to be lowered.
+ if (Value *V = EmitTargetBuiltinExpr(BuiltinID, E))
+ return RValue::get(V);
+
+ ErrorUnsupported(E, "builtin function");
+
+ // Unknown builtin, for now just dump it out and return undef.
+ if (hasAggregateLLVMType(E->getType()))
+ return RValue::getAggregate(CreateMemTemp(E->getType()));
+ return RValue::get(llvm::UndefValue::get(ConvertType(E->getType())));
+}
+
+Value *CodeGenFunction::EmitTargetBuiltinExpr(unsigned BuiltinID,
+ const CallExpr *E) {
+ switch (Target.getTriple().getArch()) {
+ case llvm::Triple::arm:
+ case llvm::Triple::thumb:
+ return EmitARMBuiltinExpr(BuiltinID, E);
+ case llvm::Triple::x86:
+ case llvm::Triple::x86_64:
+ return EmitX86BuiltinExpr(BuiltinID, E);
+ case llvm::Triple::ppc:
+ case llvm::Triple::ppc64:
+ return EmitPPCBuiltinExpr(BuiltinID, E);
+ case llvm::Triple::hexagon:
+ return EmitHexagonBuiltinExpr(BuiltinID, E);
+ default:
+ return 0;
+ }
+}
+
+static llvm::VectorType *GetNeonType(CodeGenFunction *CGF,
+ NeonTypeFlags TypeFlags) {
+ int IsQuad = TypeFlags.isQuad();
+ switch (TypeFlags.getEltType()) {
+ case NeonTypeFlags::Int8:
+ case NeonTypeFlags::Poly8:
+ return llvm::VectorType::get(CGF->Int8Ty, 8 << IsQuad);
+ case NeonTypeFlags::Int16:
+ case NeonTypeFlags::Poly16:
+ case NeonTypeFlags::Float16:
+ return llvm::VectorType::get(CGF->Int16Ty, 4 << IsQuad);
+ case NeonTypeFlags::Int32:
+ return llvm::VectorType::get(CGF->Int32Ty, 2 << IsQuad);
+ case NeonTypeFlags::Int64:
+ return llvm::VectorType::get(CGF->Int64Ty, 1 << IsQuad);
+ case NeonTypeFlags::Float32:
+ return llvm::VectorType::get(CGF->FloatTy, 2 << IsQuad);
+ }
+ llvm_unreachable("Invalid NeonTypeFlags element type!");
+}
+
+Value *CodeGenFunction::EmitNeonSplat(Value *V, Constant *C) {
+ unsigned nElts = cast<llvm::VectorType>(V->getType())->getNumElements();
+ Value* SV = llvm::ConstantVector::getSplat(nElts, C);
+ return Builder.CreateShuffleVector(V, V, SV, "lane");
+}
+
+Value *CodeGenFunction::EmitNeonCall(Function *F, SmallVectorImpl<Value*> &Ops,
+ const char *name,
+ unsigned shift, bool rightshift) {
+ unsigned j = 0;
+ for (Function::const_arg_iterator ai = F->arg_begin(), ae = F->arg_end();
+ ai != ae; ++ai, ++j)
+ if (shift > 0 && shift == j)
+ Ops[j] = EmitNeonShiftVector(Ops[j], ai->getType(), rightshift);
+ else
+ Ops[j] = Builder.CreateBitCast(Ops[j], ai->getType(), name);
+
+ return Builder.CreateCall(F, Ops, name);
+}
+
+Value *CodeGenFunction::EmitNeonShiftVector(Value *V, llvm::Type *Ty,
+ bool neg) {
+ int SV = cast<ConstantInt>(V)->getSExtValue();
+
+ llvm::VectorType *VTy = cast<llvm::VectorType>(Ty);
+ llvm::Constant *C = ConstantInt::get(VTy->getElementType(), neg ? -SV : SV);
+ return llvm::ConstantVector::getSplat(VTy->getNumElements(), C);
+}
+
+/// GetPointeeAlignment - Given an expression with a pointer type, find the
+/// alignment of the type referenced by the pointer. Skip over implicit
+/// casts.
+unsigned CodeGenFunction::GetPointeeAlignment(const Expr *Addr) {
+ unsigned Align = 1;
+ // Check if the type is a pointer. The implicit cast operand might not be.
+ while (Addr->getType()->isPointerType()) {
+ QualType PtTy = Addr->getType()->getPointeeType();
+
+ // Can't get alignment of incomplete types.
+ if (!PtTy->isIncompleteType()) {
+ unsigned NewA = getContext().getTypeAlignInChars(PtTy).getQuantity();
+ if (NewA > Align)
+ Align = NewA;
+ }
+
+ // If the address is an implicit cast, repeat with the cast operand.
+ if (const ImplicitCastExpr *CastAddr = dyn_cast<ImplicitCastExpr>(Addr)) {
+ Addr = CastAddr->getSubExpr();
+ continue;
+ }
+ break;
+ }
+ return Align;
+}
+
+/// GetPointeeAlignmentValue - Given an expression with a pointer type, find
+/// the alignment of the type referenced by the pointer. Skip over implicit
+/// casts. Return the alignment as an llvm::Value.
+Value *CodeGenFunction::GetPointeeAlignmentValue(const Expr *Addr) {
+ return llvm::ConstantInt::get(Int32Ty, GetPointeeAlignment(Addr));
+}
+
+Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
+ const CallExpr *E) {
+ if (BuiltinID == ARM::BI__clear_cache) {
+ const FunctionDecl *FD = E->getDirectCallee();
+ // Oddly people write this call without args on occasion and gcc accepts
+ // it - it's also marked as varargs in the description file.
+ SmallVector<Value*, 2> Ops;
+ for (unsigned i = 0; i < E->getNumArgs(); i++)
+ Ops.push_back(EmitScalarExpr(E->getArg(i)));
+ llvm::Type *Ty = CGM.getTypes().ConvertType(FD->getType());
+ llvm::FunctionType *FTy = cast<llvm::FunctionType>(Ty);
+ StringRef Name = FD->getName();
+ return Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, Name), Ops);
+ }
+
+ if (BuiltinID == ARM::BI__builtin_arm_ldrexd) {
+ Function *F = CGM.getIntrinsic(Intrinsic::arm_ldrexd);
+
+ Value *LdPtr = EmitScalarExpr(E->getArg(0));
+ Value *Val = Builder.CreateCall(F, LdPtr, "ldrexd");
+
+ Value *Val0 = Builder.CreateExtractValue(Val, 1);
+ Value *Val1 = Builder.CreateExtractValue(Val, 0);
+ Val0 = Builder.CreateZExt(Val0, Int64Ty);
+ Val1 = Builder.CreateZExt(Val1, Int64Ty);
+
+ Value *ShiftCst = llvm::ConstantInt::get(Int64Ty, 32);
+ Val = Builder.CreateShl(Val0, ShiftCst, "shl", true /* nuw */);
+ return Builder.CreateOr(Val, Val1);
+ }
+
+ if (BuiltinID == ARM::BI__builtin_arm_strexd) {
+ Function *F = CGM.getIntrinsic(Intrinsic::arm_strexd);
+ llvm::Type *STy = llvm::StructType::get(Int32Ty, Int32Ty, NULL);
+
+ Value *One = llvm::ConstantInt::get(Int32Ty, 1);
+ Value *Tmp = Builder.CreateAlloca(Int64Ty, One);
+ Value *Val = EmitScalarExpr(E->getArg(0));
+ Builder.CreateStore(Val, Tmp);
+
+ Value *LdPtr = Builder.CreateBitCast(Tmp,llvm::PointerType::getUnqual(STy));
+ Val = Builder.CreateLoad(LdPtr);
+
+ Value *Arg0 = Builder.CreateExtractValue(Val, 0);
+ Value *Arg1 = Builder.CreateExtractValue(Val, 1);
+ Value *StPtr = EmitScalarExpr(E->getArg(1));
+ return Builder.CreateCall3(F, Arg0, Arg1, StPtr, "strexd");
+ }
+
+ SmallVector<Value*, 4> Ops;
+ for (unsigned i = 0, e = E->getNumArgs() - 1; i != e; i++)
+ Ops.push_back(EmitScalarExpr(E->getArg(i)));
+
+ // vget_lane and vset_lane are not overloaded and do not have an extra
+ // argument that specifies the vector type.
+ switch (BuiltinID) {
+ default: break;
+ case ARM::BI__builtin_neon_vget_lane_i8:
+ case ARM::BI__builtin_neon_vget_lane_i16:
+ case ARM::BI__builtin_neon_vget_lane_i32:
+ case ARM::BI__builtin_neon_vget_lane_i64:
+ case ARM::BI__builtin_neon_vget_lane_f32:
+ case ARM::BI__builtin_neon_vgetq_lane_i8:
+ case ARM::BI__builtin_neon_vgetq_lane_i16:
+ case ARM::BI__builtin_neon_vgetq_lane_i32:
+ case ARM::BI__builtin_neon_vgetq_lane_i64:
+ case ARM::BI__builtin_neon_vgetq_lane_f32:
+ return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
+ "vget_lane");
+ case ARM::BI__builtin_neon_vset_lane_i8:
+ case ARM::BI__builtin_neon_vset_lane_i16:
+ case ARM::BI__builtin_neon_vset_lane_i32:
+ case ARM::BI__builtin_neon_vset_lane_i64:
+ case ARM::BI__builtin_neon_vset_lane_f32:
+ case ARM::BI__builtin_neon_vsetq_lane_i8:
+ case ARM::BI__builtin_neon_vsetq_lane_i16:
+ case ARM::BI__builtin_neon_vsetq_lane_i32:
+ case ARM::BI__builtin_neon_vsetq_lane_i64:
+ case ARM::BI__builtin_neon_vsetq_lane_f32:
+ Ops.push_back(EmitScalarExpr(E->getArg(2)));
+ return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane");
+ }
+
+ // Get the last argument, which specifies the vector type.
+ llvm::APSInt Result;
+ const Expr *Arg = E->getArg(E->getNumArgs()-1);
+ if (!Arg->isIntegerConstantExpr(Result, getContext()))
+ return 0;
+
+ if (BuiltinID == ARM::BI__builtin_arm_vcvtr_f ||
+ BuiltinID == ARM::BI__builtin_arm_vcvtr_d) {
+ // Determine the overloaded type of this builtin.
+ llvm::Type *Ty;
+ if (BuiltinID == ARM::BI__builtin_arm_vcvtr_f)
+ Ty = FloatTy;
+ else
+ Ty = DoubleTy;
+
+ // Determine whether this is an unsigned conversion or not.
+ bool usgn = Result.getZExtValue() == 1;
+ unsigned Int = usgn ? Intrinsic::arm_vcvtru : Intrinsic::arm_vcvtr;
+
+ // Call the appropriate intrinsic.
+ Function *F = CGM.getIntrinsic(Int, Ty);
+ return Builder.CreateCall(F, Ops, "vcvtr");
+ }
+
+ // Determine the type of this overloaded NEON intrinsic.
+ NeonTypeFlags Type(Result.getZExtValue());
+ bool usgn = Type.isUnsigned();
+ bool quad = Type.isQuad();
+ bool rightShift = false;
+
+ llvm::VectorType *VTy = GetNeonType(this, Type);
+ llvm::Type *Ty = VTy;
+ if (!Ty)
+ return 0;
+
+ unsigned Int;
+ switch (BuiltinID) {
+ default: return 0;
+ case ARM::BI__builtin_neon_vabd_v:
+ case ARM::BI__builtin_neon_vabdq_v:
+ Int = usgn ? Intrinsic::arm_neon_vabdu : Intrinsic::arm_neon_vabds;
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vabd");
+ case ARM::BI__builtin_neon_vabs_v:
+ case ARM::BI__builtin_neon_vabsq_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vabs, Ty),
+ Ops, "vabs");
+ case ARM::BI__builtin_neon_vaddhn_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vaddhn, Ty),
+ Ops, "vaddhn");
+ case ARM::BI__builtin_neon_vcale_v:
+ std::swap(Ops[0], Ops[1]);
+ case ARM::BI__builtin_neon_vcage_v: {
+ Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vacged);
+ return EmitNeonCall(F, Ops, "vcage");
+ }
+ case ARM::BI__builtin_neon_vcaleq_v:
+ std::swap(Ops[0], Ops[1]);
+ case ARM::BI__builtin_neon_vcageq_v: {
+ Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vacgeq);
+ return EmitNeonCall(F, Ops, "vcage");
+ }
+ case ARM::BI__builtin_neon_vcalt_v:
+ std::swap(Ops[0], Ops[1]);
+ case ARM::BI__builtin_neon_vcagt_v: {
+ Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vacgtd);
+ return EmitNeonCall(F, Ops, "vcagt");
+ }
+ case ARM::BI__builtin_neon_vcaltq_v:
+ std::swap(Ops[0], Ops[1]);
+ case ARM::BI__builtin_neon_vcagtq_v: {
+ Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vacgtq);
+ return EmitNeonCall(F, Ops, "vcagt");
+ }
+ case ARM::BI__builtin_neon_vcls_v:
+ case ARM::BI__builtin_neon_vclsq_v: {
+ Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vcls, Ty);
+ return EmitNeonCall(F, Ops, "vcls");
+ }
+ case ARM::BI__builtin_neon_vclz_v:
+ case ARM::BI__builtin_neon_vclzq_v: {
+ Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vclz, Ty);
+ return EmitNeonCall(F, Ops, "vclz");
+ }
+ case ARM::BI__builtin_neon_vcnt_v:
+ case ARM::BI__builtin_neon_vcntq_v: {
+ Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vcnt, Ty);
+ return EmitNeonCall(F, Ops, "vcnt");
+ }
+ case ARM::BI__builtin_neon_vcvt_f16_v: {
+ assert(Type.getEltType() == NeonTypeFlags::Float16 && !quad &&
+ "unexpected vcvt_f16_v builtin");
+ Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vcvtfp2hf);
+ return EmitNeonCall(F, Ops, "vcvt");
+ }
+ case ARM::BI__builtin_neon_vcvt_f32_f16: {
+ assert(Type.getEltType() == NeonTypeFlags::Float16 && !quad &&
+ "unexpected vcvt_f32_f16 builtin");
+ Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vcvthf2fp);
+ return EmitNeonCall(F, Ops, "vcvt");
+ }
+ case ARM::BI__builtin_neon_vcvt_f32_v:
+ case ARM::BI__builtin_neon_vcvtq_f32_v:
+ Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+ Ty = GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float32, false, quad));
+ return usgn ? Builder.CreateUIToFP(Ops[0], Ty, "vcvt")
+ : Builder.CreateSIToFP(Ops[0], Ty, "vcvt");
+ case ARM::BI__builtin_neon_vcvt_s32_v:
+ case ARM::BI__builtin_neon_vcvt_u32_v:
+ case ARM::BI__builtin_neon_vcvtq_s32_v:
+ case ARM::BI__builtin_neon_vcvtq_u32_v: {
+ llvm::Type *FloatTy =
+ GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float32, false, quad));
+ Ops[0] = Builder.CreateBitCast(Ops[0], FloatTy);
+ return usgn ? Builder.CreateFPToUI(Ops[0], Ty, "vcvt")
+ : Builder.CreateFPToSI(Ops[0], Ty, "vcvt");
+ }
+ case ARM::BI__builtin_neon_vcvt_n_f32_v:
+ case ARM::BI__builtin_neon_vcvtq_n_f32_v: {
+ llvm::Type *FloatTy =
+ GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float32, false, quad));
+ llvm::Type *Tys[2] = { FloatTy, Ty };
+ Int = usgn ? Intrinsic::arm_neon_vcvtfxu2fp
+ : Intrinsic::arm_neon_vcvtfxs2fp;
+ Function *F = CGM.getIntrinsic(Int, Tys);
+ return EmitNeonCall(F, Ops, "vcvt_n");
+ }
+ case ARM::BI__builtin_neon_vcvt_n_s32_v:
+ case ARM::BI__builtin_neon_vcvt_n_u32_v:
+ case ARM::BI__builtin_neon_vcvtq_n_s32_v:
+ case ARM::BI__builtin_neon_vcvtq_n_u32_v: {
+ llvm::Type *FloatTy =
+ GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float32, false, quad));
+ llvm::Type *Tys[2] = { Ty, FloatTy };
+ Int = usgn ? Intrinsic::arm_neon_vcvtfp2fxu
+ : Intrinsic::arm_neon_vcvtfp2fxs;
+ Function *F = CGM.getIntrinsic(Int, Tys);
+ return EmitNeonCall(F, Ops, "vcvt_n");
+ }
+ case ARM::BI__builtin_neon_vext_v:
+ case ARM::BI__builtin_neon_vextq_v: {
+ int CV = cast<ConstantInt>(Ops[2])->getSExtValue();
+ SmallVector<Constant*, 16> Indices;
+ for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i)
+ Indices.push_back(ConstantInt::get(Int32Ty, i+CV));
+
+ Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+ Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
+ Value *SV = llvm::ConstantVector::get(Indices);
+ return Builder.CreateShuffleVector(Ops[0], Ops[1], SV, "vext");
+ }
+ case ARM::BI__builtin_neon_vhadd_v:
+ case ARM::BI__builtin_neon_vhaddq_v:
+ Int = usgn ? Intrinsic::arm_neon_vhaddu : Intrinsic::arm_neon_vhadds;
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vhadd");
+ case ARM::BI__builtin_neon_vhsub_v:
+ case ARM::BI__builtin_neon_vhsubq_v:
+ Int = usgn ? Intrinsic::arm_neon_vhsubu : Intrinsic::arm_neon_vhsubs;
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vhsub");
+ case ARM::BI__builtin_neon_vld1_v:
+ case ARM::BI__builtin_neon_vld1q_v:
+ Ops.push_back(GetPointeeAlignmentValue(E->getArg(0)));
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vld1, Ty),
+ Ops, "vld1");
+ case ARM::BI__builtin_neon_vld1_lane_v:
+ case ARM::BI__builtin_neon_vld1q_lane_v: {
+ Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
+ Ty = llvm::PointerType::getUnqual(VTy->getElementType());
+ Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+ LoadInst *Ld = Builder.CreateLoad(Ops[0]);
+ Value *Align = GetPointeeAlignmentValue(E->getArg(0));
+ Ld->setAlignment(cast<ConstantInt>(Align)->getZExtValue());
+ return Builder.CreateInsertElement(Ops[1], Ld, Ops[2], "vld1_lane");
+ }
+ case ARM::BI__builtin_neon_vld1_dup_v:
+ case ARM::BI__builtin_neon_vld1q_dup_v: {
+ Value *V = UndefValue::get(Ty);
+ Ty = llvm::PointerType::getUnqual(VTy->getElementType());
+ Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+ LoadInst *Ld = Builder.CreateLoad(Ops[0]);
+ Value *Align = GetPointeeAlignmentValue(E->getArg(0));
+ Ld->setAlignment(cast<ConstantInt>(Align)->getZExtValue());
+ llvm::Constant *CI = ConstantInt::get(Int32Ty, 0);
+ Ops[0] = Builder.CreateInsertElement(V, Ld, CI);
+ return EmitNeonSplat(Ops[0], CI);
+ }
+ case ARM::BI__builtin_neon_vld2_v:
+ case ARM::BI__builtin_neon_vld2q_v: {
+ Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld2, Ty);
+ Value *Align = GetPointeeAlignmentValue(E->getArg(1));
+ Ops[1] = Builder.CreateCall2(F, Ops[1], Align, "vld2");
+ Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
+ Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+ return Builder.CreateStore(Ops[1], Ops[0]);
+ }
+ case ARM::BI__builtin_neon_vld3_v:
+ case ARM::BI__builtin_neon_vld3q_v: {
+ Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld3, Ty);
+ Value *Align = GetPointeeAlignmentValue(E->getArg(1));
+ Ops[1] = Builder.CreateCall2(F, Ops[1], Align, "vld3");
+ Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
+ Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+ return Builder.CreateStore(Ops[1], Ops[0]);
+ }
+ case ARM::BI__builtin_neon_vld4_v:
+ case ARM::BI__builtin_neon_vld4q_v: {
+ Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld4, Ty);
+ Value *Align = GetPointeeAlignmentValue(E->getArg(1));
+ Ops[1] = Builder.CreateCall2(F, Ops[1], Align, "vld4");
+ Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
+ Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+ return Builder.CreateStore(Ops[1], Ops[0]);
+ }
+ case ARM::BI__builtin_neon_vld2_lane_v:
+ case ARM::BI__builtin_neon_vld2q_lane_v: {
+ Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld2lane, Ty);
+ Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
+ Ops[3] = Builder.CreateBitCast(Ops[3], Ty);
+ Ops.push_back(GetPointeeAlignmentValue(E->getArg(1)));
+ Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), "vld2_lane");
+ Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
+ Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+ return Builder.CreateStore(Ops[1], Ops[0]);
+ }
+ case ARM::BI__builtin_neon_vld3_lane_v:
+ case ARM::BI__builtin_neon_vld3q_lane_v: {
+ Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld3lane, Ty);
+ Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
+ Ops[3] = Builder.CreateBitCast(Ops[3], Ty);
+ Ops[4] = Builder.CreateBitCast(Ops[4], Ty);
+ Ops.push_back(GetPointeeAlignmentValue(E->getArg(1)));
+ Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), "vld3_lane");
+ Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
+ Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+ return Builder.CreateStore(Ops[1], Ops[0]);
+ }
+ case ARM::BI__builtin_neon_vld4_lane_v:
+ case ARM::BI__builtin_neon_vld4q_lane_v: {
+ Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld4lane, Ty);
+ Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
+ Ops[3] = Builder.CreateBitCast(Ops[3], Ty);
+ Ops[4] = Builder.CreateBitCast(Ops[4], Ty);
+ Ops[5] = Builder.CreateBitCast(Ops[5], Ty);
+ Ops.push_back(GetPointeeAlignmentValue(E->getArg(1)));
+ Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), "vld3_lane");
+ Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
+ Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+ return Builder.CreateStore(Ops[1], Ops[0]);
+ }
+ case ARM::BI__builtin_neon_vld2_dup_v:
+ case ARM::BI__builtin_neon_vld3_dup_v:
+ case ARM::BI__builtin_neon_vld4_dup_v: {
+ // Handle 64-bit elements as a special-case. There is no "dup" needed.
+ if (VTy->getElementType()->getPrimitiveSizeInBits() == 64) {
+ switch (BuiltinID) {
+ case ARM::BI__builtin_neon_vld2_dup_v:
+ Int = Intrinsic::arm_neon_vld2;
+ break;
+ case ARM::BI__builtin_neon_vld3_dup_v:
+ Int = Intrinsic::arm_neon_vld3;
+ break;
+ case ARM::BI__builtin_neon_vld4_dup_v:
+ Int = Intrinsic::arm_neon_vld4;
+ break;
+ default: llvm_unreachable("unknown vld_dup intrinsic?");
+ }
+ Function *F = CGM.getIntrinsic(Int, Ty);
+ Value *Align = GetPointeeAlignmentValue(E->getArg(1));
+ Ops[1] = Builder.CreateCall2(F, Ops[1], Align, "vld_dup");
+ Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
+ Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+ return Builder.CreateStore(Ops[1], Ops[0]);
+ }
+ switch (BuiltinID) {
+ case ARM::BI__builtin_neon_vld2_dup_v:
+ Int = Intrinsic::arm_neon_vld2lane;
+ break;
+ case ARM::BI__builtin_neon_vld3_dup_v:
+ Int = Intrinsic::arm_neon_vld3lane;
+ break;
+ case ARM::BI__builtin_neon_vld4_dup_v:
+ Int = Intrinsic::arm_neon_vld4lane;
+ break;
+ default: llvm_unreachable("unknown vld_dup intrinsic?");
+ }
+ Function *F = CGM.getIntrinsic(Int, Ty);
+ llvm::StructType *STy = cast<llvm::StructType>(F->getReturnType());
+
+ SmallVector<Value*, 6> Args;
+ Args.push_back(Ops[1]);
+ Args.append(STy->getNumElements(), UndefValue::get(Ty));
+
+ llvm::Constant *CI = ConstantInt::get(Int32Ty, 0);
+ Args.push_back(CI);
+ Args.push_back(GetPointeeAlignmentValue(E->getArg(1)));
+
+ Ops[1] = Builder.CreateCall(F, Args, "vld_dup");
+ // splat lane 0 to all elts in each vector of the result.
+ for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
+ Value *Val = Builder.CreateExtractValue(Ops[1], i);
+ Value *Elt = Builder.CreateBitCast(Val, Ty);
+ Elt = EmitNeonSplat(Elt, CI);
+ Elt = Builder.CreateBitCast(Elt, Val->getType());
+ Ops[1] = Builder.CreateInsertValue(Ops[1], Elt, i);
+ }
+ Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
+ Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+ return Builder.CreateStore(Ops[1], Ops[0]);
+ }
+ case ARM::BI__builtin_neon_vmax_v:
+ case ARM::BI__builtin_neon_vmaxq_v:
+ Int = usgn ? Intrinsic::arm_neon_vmaxu : Intrinsic::arm_neon_vmaxs;
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmax");
+ case ARM::BI__builtin_neon_vmin_v:
+ case ARM::BI__builtin_neon_vminq_v:
+ Int = usgn ? Intrinsic::arm_neon_vminu : Intrinsic::arm_neon_vmins;
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmin");
+ case ARM::BI__builtin_neon_vmovl_v: {
+ llvm::Type *DTy =llvm::VectorType::getTruncatedElementVectorType(VTy);
+ Ops[0] = Builder.CreateBitCast(Ops[0], DTy);
+ if (usgn)
+ return Builder.CreateZExt(Ops[0], Ty, "vmovl");
+ return Builder.CreateSExt(Ops[0], Ty, "vmovl");
+ }
+ case ARM::BI__builtin_neon_vmovn_v: {
+ llvm::Type *QTy = llvm::VectorType::getExtendedElementVectorType(VTy);
+ Ops[0] = Builder.CreateBitCast(Ops[0], QTy);
+ return Builder.CreateTrunc(Ops[0], Ty, "vmovn");
+ }
+ case ARM::BI__builtin_neon_vmul_v:
+ case ARM::BI__builtin_neon_vmulq_v:
+ assert(Type.isPoly() && "vmul builtin only supported for polynomial types");
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vmulp, Ty),
+ Ops, "vmul");
+ case ARM::BI__builtin_neon_vmull_v:
+ Int = usgn ? Intrinsic::arm_neon_vmullu : Intrinsic::arm_neon_vmulls;
+ Int = Type.isPoly() ? (unsigned)Intrinsic::arm_neon_vmullp : Int;
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmull");
+ case ARM::BI__builtin_neon_vpadal_v:
+ case ARM::BI__builtin_neon_vpadalq_v: {
+ Int = usgn ? Intrinsic::arm_neon_vpadalu : Intrinsic::arm_neon_vpadals;
+ // The source operand type has twice as many elements of half the size.
+ unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits();
+ llvm::Type *EltTy =
+ llvm::IntegerType::get(getLLVMContext(), EltBits / 2);
+ llvm::Type *NarrowTy =
+ llvm::VectorType::get(EltTy, VTy->getNumElements() * 2);
+ llvm::Type *Tys[2] = { Ty, NarrowTy };
+ return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vpadal");
+ }
+ case ARM::BI__builtin_neon_vpadd_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vpadd, Ty),
+ Ops, "vpadd");
+ case ARM::BI__builtin_neon_vpaddl_v:
+ case ARM::BI__builtin_neon_vpaddlq_v: {
+ Int = usgn ? Intrinsic::arm_neon_vpaddlu : Intrinsic::arm_neon_vpaddls;
+ // The source operand type has twice as many elements of half the size.
+ unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits();
+ llvm::Type *EltTy = llvm::IntegerType::get(getLLVMContext(), EltBits / 2);
+ llvm::Type *NarrowTy =
+ llvm::VectorType::get(EltTy, VTy->getNumElements() * 2);
+ llvm::Type *Tys[2] = { Ty, NarrowTy };
+ return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vpaddl");
+ }
+ case ARM::BI__builtin_neon_vpmax_v:
+ Int = usgn ? Intrinsic::arm_neon_vpmaxu : Intrinsic::arm_neon_vpmaxs;
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpmax");
+ case ARM::BI__builtin_neon_vpmin_v:
+ Int = usgn ? Intrinsic::arm_neon_vpminu : Intrinsic::arm_neon_vpmins;
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpmin");
+ case ARM::BI__builtin_neon_vqabs_v:
+ case ARM::BI__builtin_neon_vqabsq_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqabs, Ty),
+ Ops, "vqabs");
+ case ARM::BI__builtin_neon_vqadd_v:
+ case ARM::BI__builtin_neon_vqaddq_v:
+ Int = usgn ? Intrinsic::arm_neon_vqaddu : Intrinsic::arm_neon_vqadds;
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqadd");
+ case ARM::BI__builtin_neon_vqdmlal_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqdmlal, Ty),
+ Ops, "vqdmlal");
+ case ARM::BI__builtin_neon_vqdmlsl_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqdmlsl, Ty),
+ Ops, "vqdmlsl");
+ case ARM::BI__builtin_neon_vqdmulh_v:
+ case ARM::BI__builtin_neon_vqdmulhq_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqdmulh, Ty),
+ Ops, "vqdmulh");
+ case ARM::BI__builtin_neon_vqdmull_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqdmull, Ty),
+ Ops, "vqdmull");
+ case ARM::BI__builtin_neon_vqmovn_v:
+ Int = usgn ? Intrinsic::arm_neon_vqmovnu : Intrinsic::arm_neon_vqmovns;
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqmovn");
+ case ARM::BI__builtin_neon_vqmovun_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqmovnsu, Ty),
+ Ops, "vqdmull");
+ case ARM::BI__builtin_neon_vqneg_v:
+ case ARM::BI__builtin_neon_vqnegq_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqneg, Ty),
+ Ops, "vqneg");
+ case ARM::BI__builtin_neon_vqrdmulh_v:
+ case ARM::BI__builtin_neon_vqrdmulhq_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqrdmulh, Ty),
+ Ops, "vqrdmulh");
+ case ARM::BI__builtin_neon_vqrshl_v:
+ case ARM::BI__builtin_neon_vqrshlq_v:
+ Int = usgn ? Intrinsic::arm_neon_vqrshiftu : Intrinsic::arm_neon_vqrshifts;
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshl");
+ case ARM::BI__builtin_neon_vqrshrn_n_v:
+ Int = usgn ? Intrinsic::arm_neon_vqrshiftnu : Intrinsic::arm_neon_vqrshiftns;
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshrn_n",
+ 1, true);
+ case ARM::BI__builtin_neon_vqrshrun_n_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqrshiftnsu, Ty),
+ Ops, "vqrshrun_n", 1, true);
+ case ARM::BI__builtin_neon_vqshl_v:
+ case ARM::BI__builtin_neon_vqshlq_v:
+ Int = usgn ? Intrinsic::arm_neon_vqshiftu : Intrinsic::arm_neon_vqshifts;
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshl");
+ case ARM::BI__builtin_neon_vqshl_n_v:
+ case ARM::BI__builtin_neon_vqshlq_n_v:
+ Int = usgn ? Intrinsic::arm_neon_vqshiftu : Intrinsic::arm_neon_vqshifts;
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshl_n",
+ 1, false);
+ case ARM::BI__builtin_neon_vqshlu_n_v:
+ case ARM::BI__builtin_neon_vqshluq_n_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqshiftsu, Ty),
+ Ops, "vqshlu", 1, false);
+ case ARM::BI__builtin_neon_vqshrn_n_v:
+ Int = usgn ? Intrinsic::arm_neon_vqshiftnu : Intrinsic::arm_neon_vqshiftns;
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshrn_n",
+ 1, true);
+ case ARM::BI__builtin_neon_vqshrun_n_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqshiftnsu, Ty),
+ Ops, "vqshrun_n", 1, true);
+ case ARM::BI__builtin_neon_vqsub_v:
+ case ARM::BI__builtin_neon_vqsubq_v:
+ Int = usgn ? Intrinsic::arm_neon_vqsubu : Intrinsic::arm_neon_vqsubs;
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqsub");
+ case ARM::BI__builtin_neon_vraddhn_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vraddhn, Ty),
+ Ops, "vraddhn");
+ case ARM::BI__builtin_neon_vrecpe_v:
+ case ARM::BI__builtin_neon_vrecpeq_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrecpe, Ty),
+ Ops, "vrecpe");
+ case ARM::BI__builtin_neon_vrecps_v:
+ case ARM::BI__builtin_neon_vrecpsq_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrecps, Ty),
+ Ops, "vrecps");
+ case ARM::BI__builtin_neon_vrhadd_v:
+ case ARM::BI__builtin_neon_vrhaddq_v:
+ Int = usgn ? Intrinsic::arm_neon_vrhaddu : Intrinsic::arm_neon_vrhadds;
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrhadd");
+ case ARM::BI__builtin_neon_vrshl_v:
+ case ARM::BI__builtin_neon_vrshlq_v:
+ Int = usgn ? Intrinsic::arm_neon_vrshiftu : Intrinsic::arm_neon_vrshifts;
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrshl");
+ case ARM::BI__builtin_neon_vrshrn_n_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrshiftn, Ty),
+ Ops, "vrshrn_n", 1, true);
+ case ARM::BI__builtin_neon_vrshr_n_v:
+ case ARM::BI__builtin_neon_vrshrq_n_v:
+ Int = usgn ? Intrinsic::arm_neon_vrshiftu : Intrinsic::arm_neon_vrshifts;
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrshr_n", 1, true);
+ case ARM::BI__builtin_neon_vrsqrte_v:
+ case ARM::BI__builtin_neon_vrsqrteq_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrsqrte, Ty),
+ Ops, "vrsqrte");
+ case ARM::BI__builtin_neon_vrsqrts_v:
+ case ARM::BI__builtin_neon_vrsqrtsq_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrsqrts, Ty),
+ Ops, "vrsqrts");
+ case ARM::BI__builtin_neon_vrsra_n_v:
+ case ARM::BI__builtin_neon_vrsraq_n_v:
+ Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+ Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
+ Ops[2] = EmitNeonShiftVector(Ops[2], Ty, true);
+ Int = usgn ? Intrinsic::arm_neon_vrshiftu : Intrinsic::arm_neon_vrshifts;
+ Ops[1] = Builder.CreateCall2(CGM.getIntrinsic(Int, Ty), Ops[1], Ops[2]);
+ return Builder.CreateAdd(Ops[0], Ops[1], "vrsra_n");
+ case ARM::BI__builtin_neon_vrsubhn_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrsubhn, Ty),
+ Ops, "vrsubhn");
+ case ARM::BI__builtin_neon_vshl_v:
+ case ARM::BI__builtin_neon_vshlq_v:
+ Int = usgn ? Intrinsic::arm_neon_vshiftu : Intrinsic::arm_neon_vshifts;
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vshl");
+ case ARM::BI__builtin_neon_vshll_n_v:
+ Int = usgn ? Intrinsic::arm_neon_vshiftlu : Intrinsic::arm_neon_vshiftls;
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vshll", 1);
+ case ARM::BI__builtin_neon_vshl_n_v:
+ case ARM::BI__builtin_neon_vshlq_n_v:
+ Ops[1] = EmitNeonShiftVector(Ops[1], Ty, false);
+ return Builder.CreateShl(Builder.CreateBitCast(Ops[0],Ty), Ops[1], "vshl_n");
+ case ARM::BI__builtin_neon_vshrn_n_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vshiftn, Ty),
+ Ops, "vshrn_n", 1, true);
+ case ARM::BI__builtin_neon_vshr_n_v:
+ case ARM::BI__builtin_neon_vshrq_n_v:
+ Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+ Ops[1] = EmitNeonShiftVector(Ops[1], Ty, false);
+ if (usgn)
+ return Builder.CreateLShr(Ops[0], Ops[1], "vshr_n");
+ else
+ return Builder.CreateAShr(Ops[0], Ops[1], "vshr_n");
+ case ARM::BI__builtin_neon_vsri_n_v:
+ case ARM::BI__builtin_neon_vsriq_n_v:
+ rightShift = true;
+ case ARM::BI__builtin_neon_vsli_n_v:
+ case ARM::BI__builtin_neon_vsliq_n_v:
+ Ops[2] = EmitNeonShiftVector(Ops[2], Ty, rightShift);
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vshiftins, Ty),
+ Ops, "vsli_n");
+ case ARM::BI__builtin_neon_vsra_n_v:
+ case ARM::BI__builtin_neon_vsraq_n_v:
+ Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+ Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
+ Ops[2] = EmitNeonShiftVector(Ops[2], Ty, false);
+ if (usgn)
+ Ops[1] = Builder.CreateLShr(Ops[1], Ops[2], "vsra_n");
+ else
+ Ops[1] = Builder.CreateAShr(Ops[1], Ops[2], "vsra_n");
+ return Builder.CreateAdd(Ops[0], Ops[1]);
+ case ARM::BI__builtin_neon_vst1_v:
+ case ARM::BI__builtin_neon_vst1q_v:
+ Ops.push_back(GetPointeeAlignmentValue(E->getArg(0)));
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst1, Ty),
+ Ops, "");
+ case ARM::BI__builtin_neon_vst1_lane_v:
+ case ARM::BI__builtin_neon_vst1q_lane_v: {
+ Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
+ Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2]);
+ Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
+ StoreInst *St = Builder.CreateStore(Ops[1],
+ Builder.CreateBitCast(Ops[0], Ty));
+ Value *Align = GetPointeeAlignmentValue(E->getArg(0));
+ St->setAlignment(cast<ConstantInt>(Align)->getZExtValue());
+ return St;
+ }
+ case ARM::BI__builtin_neon_vst2_v:
+ case ARM::BI__builtin_neon_vst2q_v:
+ Ops.push_back(GetPointeeAlignmentValue(E->getArg(0)));
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst2, Ty),
+ Ops, "");
+ case ARM::BI__builtin_neon_vst2_lane_v:
+ case ARM::BI__builtin_neon_vst2q_lane_v:
+ Ops.push_back(GetPointeeAlignmentValue(E->getArg(0)));
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst2lane, Ty),
+ Ops, "");
+ case ARM::BI__builtin_neon_vst3_v:
+ case ARM::BI__builtin_neon_vst3q_v:
+ Ops.push_back(GetPointeeAlignmentValue(E->getArg(0)));
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst3, Ty),
+ Ops, "");
+ case ARM::BI__builtin_neon_vst3_lane_v:
+ case ARM::BI__builtin_neon_vst3q_lane_v:
+ Ops.push_back(GetPointeeAlignmentValue(E->getArg(0)));
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst3lane, Ty),
+ Ops, "");
+ case ARM::BI__builtin_neon_vst4_v:
+ case ARM::BI__builtin_neon_vst4q_v:
+ Ops.push_back(GetPointeeAlignmentValue(E->getArg(0)));
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst4, Ty),
+ Ops, "");
+ case ARM::BI__builtin_neon_vst4_lane_v:
+ case ARM::BI__builtin_neon_vst4q_lane_v:
+ Ops.push_back(GetPointeeAlignmentValue(E->getArg(0)));
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst4lane, Ty),
+ Ops, "");
+ case ARM::BI__builtin_neon_vsubhn_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vsubhn, Ty),
+ Ops, "vsubhn");
+ case ARM::BI__builtin_neon_vtbl1_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl1),
+ Ops, "vtbl1");
+ case ARM::BI__builtin_neon_vtbl2_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl2),
+ Ops, "vtbl2");
+ case ARM::BI__builtin_neon_vtbl3_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl3),
+ Ops, "vtbl3");
+ case ARM::BI__builtin_neon_vtbl4_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl4),
+ Ops, "vtbl4");
+ case ARM::BI__builtin_neon_vtbx1_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx1),
+ Ops, "vtbx1");
+ case ARM::BI__builtin_neon_vtbx2_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx2),
+ Ops, "vtbx2");
+ case ARM::BI__builtin_neon_vtbx3_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx3),
+ Ops, "vtbx3");
+ case ARM::BI__builtin_neon_vtbx4_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx4),
+ Ops, "vtbx4");
+ case ARM::BI__builtin_neon_vtst_v:
+ case ARM::BI__builtin_neon_vtstq_v: {
+ Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+ Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
+ Ops[0] = Builder.CreateAnd(Ops[0], Ops[1]);
+ Ops[0] = Builder.CreateICmp(ICmpInst::ICMP_NE, Ops[0],
+ ConstantAggregateZero::get(Ty));
+ return Builder.CreateSExt(Ops[0], Ty, "vtst");
+ }
+ case ARM::BI__builtin_neon_vtrn_v:
+ case ARM::BI__builtin_neon_vtrnq_v: {
+ Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
+ Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
+ Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
+ Value *SV = 0;
+
+ for (unsigned vi = 0; vi != 2; ++vi) {
+ SmallVector<Constant*, 16> Indices;
+ for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) {
+ Indices.push_back(Builder.getInt32(i+vi));
+ Indices.push_back(Builder.getInt32(i+e+vi));
+ }
+ Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ops[0], vi);
+ SV = llvm::ConstantVector::get(Indices);
+ SV = Builder.CreateShuffleVector(Ops[1], Ops[2], SV, "vtrn");
+ SV = Builder.CreateStore(SV, Addr);
+ }
+ return SV;
+ }
+ case ARM::BI__builtin_neon_vuzp_v:
+ case ARM::BI__builtin_neon_vuzpq_v: {
+ Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
+ Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
+ Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
+ Value *SV = 0;
+
+ for (unsigned vi = 0; vi != 2; ++vi) {
+ SmallVector<Constant*, 16> Indices;
+ for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i)
+ Indices.push_back(ConstantInt::get(Int32Ty, 2*i+vi));
+
+ Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ops[0], vi);
+ SV = llvm::ConstantVector::get(Indices);
+ SV = Builder.CreateShuffleVector(Ops[1], Ops[2], SV, "vuzp");
+ SV = Builder.CreateStore(SV, Addr);
+ }
+ return SV;
+ }
+ case ARM::BI__builtin_neon_vzip_v:
+ case ARM::BI__builtin_neon_vzipq_v: {
+ Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
+ Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
+ Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
+ Value *SV = 0;
+
+ for (unsigned vi = 0; vi != 2; ++vi) {
+ SmallVector<Constant*, 16> Indices;
+ for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) {
+ Indices.push_back(ConstantInt::get(Int32Ty, (i + vi*e) >> 1));
+ Indices.push_back(ConstantInt::get(Int32Ty, ((i + vi*e) >> 1)+e));
+ }
+ Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ops[0], vi);
+ SV = llvm::ConstantVector::get(Indices);
+ SV = Builder.CreateShuffleVector(Ops[1], Ops[2], SV, "vzip");
+ SV = Builder.CreateStore(SV, Addr);
+ }
+ return SV;
+ }
+ }
+}
+
+llvm::Value *CodeGenFunction::
+BuildVector(ArrayRef<llvm::Value*> Ops) {
+ assert((Ops.size() & (Ops.size() - 1)) == 0 &&
+ "Not a power-of-two sized vector!");
+ bool AllConstants = true;
+ for (unsigned i = 0, e = Ops.size(); i != e && AllConstants; ++i)
+ AllConstants &= isa<Constant>(Ops[i]);
+
+ // If this is a constant vector, create a ConstantVector.
+ if (AllConstants) {
+ SmallVector<llvm::Constant*, 16> CstOps;
+ for (unsigned i = 0, e = Ops.size(); i != e; ++i)
+ CstOps.push_back(cast<Constant>(Ops[i]));
+ return llvm::ConstantVector::get(CstOps);
+ }
+
+ // Otherwise, insertelement the values to build the vector.
+ Value *Result =
+ llvm::UndefValue::get(llvm::VectorType::get(Ops[0]->getType(), Ops.size()));
+
+ for (unsigned i = 0, e = Ops.size(); i != e; ++i)
+ Result = Builder.CreateInsertElement(Result, Ops[i], Builder.getInt32(i));
+
+ return Result;
+}
+
+Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
+ const CallExpr *E) {
+ SmallVector<Value*, 4> Ops;
+
+ // Find out if any arguments are required to be integer constant expressions.
+ unsigned ICEArguments = 0;
+ ASTContext::GetBuiltinTypeError Error;
+ getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
+ assert(Error == ASTContext::GE_None && "Should not codegen an error");
+
+ for (unsigned i = 0, e = E->getNumArgs(); i != e; i++) {
+ // If this is a normal argument, just emit it as a scalar.
+ if ((ICEArguments & (1 << i)) == 0) {
+ Ops.push_back(EmitScalarExpr(E->getArg(i)));
+ continue;
+ }
+
+ // If this is required to be a constant, constant fold it so that we know
+ // that the generated intrinsic gets a ConstantInt.
+ llvm::APSInt Result;
+ bool IsConst = E->getArg(i)->isIntegerConstantExpr(Result, getContext());
+ assert(IsConst && "Constant arg isn't actually constant?"); (void)IsConst;
+ Ops.push_back(llvm::ConstantInt::get(getLLVMContext(), Result));
+ }
+
+ switch (BuiltinID) {
+ default: return 0;
+ case X86::BI__builtin_ia32_vec_init_v8qi:
+ case X86::BI__builtin_ia32_vec_init_v4hi:
+ case X86::BI__builtin_ia32_vec_init_v2si:
+ return Builder.CreateBitCast(BuildVector(Ops),
+ llvm::Type::getX86_MMXTy(getLLVMContext()));
+ case X86::BI__builtin_ia32_vec_ext_v2si:
+ return Builder.CreateExtractElement(Ops[0],
+ llvm::ConstantInt::get(Ops[1]->getType(), 0));
+ case X86::BI__builtin_ia32_ldmxcsr: {
+ llvm::Type *PtrTy = Int8PtrTy;
+ Value *One = llvm::ConstantInt::get(Int32Ty, 1);
+ Value *Tmp = Builder.CreateAlloca(Int32Ty, One);
+ Builder.CreateStore(Ops[0], Tmp);
+ return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_ldmxcsr),
+ Builder.CreateBitCast(Tmp, PtrTy));
+ }
+ case X86::BI__builtin_ia32_stmxcsr: {
+ llvm::Type *PtrTy = Int8PtrTy;
+ Value *One = llvm::ConstantInt::get(Int32Ty, 1);
+ Value *Tmp = Builder.CreateAlloca(Int32Ty, One);
+ Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_stmxcsr),
+ Builder.CreateBitCast(Tmp, PtrTy));
+ return Builder.CreateLoad(Tmp, "stmxcsr");
+ }
+ case X86::BI__builtin_ia32_storehps:
+ case X86::BI__builtin_ia32_storelps: {
+ llvm::Type *PtrTy = llvm::PointerType::getUnqual(Int64Ty);
+ llvm::Type *VecTy = llvm::VectorType::get(Int64Ty, 2);
+
+ // cast val v2i64
+ Ops[1] = Builder.CreateBitCast(Ops[1], VecTy, "cast");
+
+ // extract (0, 1)
+ unsigned Index = BuiltinID == X86::BI__builtin_ia32_storelps ? 0 : 1;
+ llvm::Value *Idx = llvm::ConstantInt::get(Int32Ty, Index);
+ Ops[1] = Builder.CreateExtractElement(Ops[1], Idx, "extract");
+
+ // cast pointer to i64 & store
+ Ops[0] = Builder.CreateBitCast(Ops[0], PtrTy);
+ return Builder.CreateStore(Ops[1], Ops[0]);
+ }
+ case X86::BI__builtin_ia32_palignr: {
+ unsigned shiftVal = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
+
+ // If palignr is shifting the pair of input vectors less than 9 bytes,
+ // emit a shuffle instruction.
+ if (shiftVal <= 8) {
+ SmallVector<llvm::Constant*, 8> Indices;
+ for (unsigned i = 0; i != 8; ++i)
+ Indices.push_back(llvm::ConstantInt::get(Int32Ty, shiftVal + i));
+
+ Value* SV = llvm::ConstantVector::get(Indices);
+ return Builder.CreateShuffleVector(Ops[1], Ops[0], SV, "palignr");
+ }
+
+ // If palignr is shifting the pair of input vectors more than 8 but less
+ // than 16 bytes, emit a logical right shift of the destination.
+ if (shiftVal < 16) {
+ // MMX has these as 1 x i64 vectors for some odd optimization reasons.
+ llvm::Type *VecTy = llvm::VectorType::get(Int64Ty, 1);
+
+ Ops[0] = Builder.CreateBitCast(Ops[0], VecTy, "cast");
+ Ops[1] = llvm::ConstantInt::get(VecTy, (shiftVal-8) * 8);
+
+ // create i32 constant
+ llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_mmx_psrl_q);
+ return Builder.CreateCall(F, makeArrayRef(&Ops[0], 2), "palignr");
+ }
+
+ // If palignr is shifting the pair of vectors more than 16 bytes, emit zero.
+ return llvm::Constant::getNullValue(ConvertType(E->getType()));
+ }
+ case X86::BI__builtin_ia32_palignr128: {
+ unsigned shiftVal = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
+
+ // If palignr is shifting the pair of input vectors less than 17 bytes,
+ // emit a shuffle instruction.
+ if (shiftVal <= 16) {
+ SmallVector<llvm::Constant*, 16> Indices;
+ for (unsigned i = 0; i != 16; ++i)
+ Indices.push_back(llvm::ConstantInt::get(Int32Ty, shiftVal + i));
+
+ Value* SV = llvm::ConstantVector::get(Indices);
+ return Builder.CreateShuffleVector(Ops[1], Ops[0], SV, "palignr");
+ }
+
+ // If palignr is shifting the pair of input vectors more than 16 but less
+ // than 32 bytes, emit a logical right shift of the destination.
+ if (shiftVal < 32) {
+ llvm::Type *VecTy = llvm::VectorType::get(Int64Ty, 2);
+
+ Ops[0] = Builder.CreateBitCast(Ops[0], VecTy, "cast");
+ Ops[1] = llvm::ConstantInt::get(Int32Ty, (shiftVal-16) * 8);
+
+ // create i32 constant
+ llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_sse2_psrl_dq);
+ return Builder.CreateCall(F, makeArrayRef(&Ops[0], 2), "palignr");
+ }
+
+ // If palignr is shifting the pair of vectors more than 32 bytes, emit zero.
+ return llvm::Constant::getNullValue(ConvertType(E->getType()));
+ }
+ case X86::BI__builtin_ia32_palignr256: {
+ unsigned shiftVal = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
+
+ // If palignr is shifting the pair of input vectors less than 17 bytes,
+ // emit a shuffle instruction.
+ if (shiftVal <= 16) {
+ SmallVector<llvm::Constant*, 32> Indices;
+ // 256-bit palignr operates on 128-bit lanes so we need to handle that
+ for (unsigned l = 0; l != 2; ++l) {
+ unsigned LaneStart = l * 16;
+ unsigned LaneEnd = (l+1) * 16;
+ for (unsigned i = 0; i != 16; ++i) {
+ unsigned Idx = shiftVal + i + LaneStart;
+ if (Idx >= LaneEnd) Idx += 16; // end of lane, switch operand
+ Indices.push_back(llvm::ConstantInt::get(Int32Ty, Idx));
+ }
+ }
+
+ Value* SV = llvm::ConstantVector::get(Indices);
+ return Builder.CreateShuffleVector(Ops[1], Ops[0], SV, "palignr");
+ }
+
+ // If palignr is shifting the pair of input vectors more than 16 but less
+ // than 32 bytes, emit a logical right shift of the destination.
+ if (shiftVal < 32) {
+ llvm::Type *VecTy = llvm::VectorType::get(Int64Ty, 4);
+
+ Ops[0] = Builder.CreateBitCast(Ops[0], VecTy, "cast");
+ Ops[1] = llvm::ConstantInt::get(Int32Ty, (shiftVal-16) * 8);
+
+ // create i32 constant
+ llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_avx2_psrl_dq);
+ return Builder.CreateCall(F, makeArrayRef(&Ops[0], 2), "palignr");
+ }
+
+ // If palignr is shifting the pair of vectors more than 32 bytes, emit zero.
+ return llvm::Constant::getNullValue(ConvertType(E->getType()));
+ }
+ case X86::BI__builtin_ia32_movntps:
+ case X86::BI__builtin_ia32_movntpd:
+ case X86::BI__builtin_ia32_movntdq:
+ case X86::BI__builtin_ia32_movnti: {
+ llvm::MDNode *Node = llvm::MDNode::get(getLLVMContext(),
+ Builder.getInt32(1));
+
+ // Convert the type of the pointer to a pointer to the stored type.
+ Value *BC = Builder.CreateBitCast(Ops[0],
+ llvm::PointerType::getUnqual(Ops[1]->getType()),
+ "cast");
+ StoreInst *SI = Builder.CreateStore(Ops[1], BC);
+ SI->setMetadata(CGM.getModule().getMDKindID("nontemporal"), Node);
+ SI->setAlignment(16);
+ return SI;
+ }
+ // 3DNow!
+ case X86::BI__builtin_ia32_pswapdsf:
+ case X86::BI__builtin_ia32_pswapdsi: {
+ const char *name = 0;
+ Intrinsic::ID ID = Intrinsic::not_intrinsic;
+ switch(BuiltinID) {
+ default: llvm_unreachable("Unsupported intrinsic!");
+ case X86::BI__builtin_ia32_pswapdsf:
+ case X86::BI__builtin_ia32_pswapdsi:
+ name = "pswapd";
+ ID = Intrinsic::x86_3dnowa_pswapd;
+ break;
+ }
+ llvm::Type *MMXTy = llvm::Type::getX86_MMXTy(getLLVMContext());
+ Ops[0] = Builder.CreateBitCast(Ops[0], MMXTy, "cast");
+ llvm::Function *F = CGM.getIntrinsic(ID);
+ return Builder.CreateCall(F, Ops, name);
+ }
+ }
+}
+
+
+Value *CodeGenFunction::EmitHexagonBuiltinExpr(unsigned BuiltinID,
+ const CallExpr *E) {
+ llvm::SmallVector<Value*, 4> Ops;
+
+ for (unsigned i = 0, e = E->getNumArgs(); i != e; i++)
+ Ops.push_back(EmitScalarExpr(E->getArg(i)));
+
+ Intrinsic::ID ID = Intrinsic::not_intrinsic;
+
+ switch (BuiltinID) {
+ default: return 0;
+
+ case Hexagon::BI__builtin_HEXAGON_C2_cmpeq:
+ ID = Intrinsic::hexagon_C2_cmpeq; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C2_cmpgt:
+ ID = Intrinsic::hexagon_C2_cmpgt; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C2_cmpgtu:
+ ID = Intrinsic::hexagon_C2_cmpgtu; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C2_cmpeqp:
+ ID = Intrinsic::hexagon_C2_cmpeqp; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C2_cmpgtp:
+ ID = Intrinsic::hexagon_C2_cmpgtp; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C2_cmpgtup:
+ ID = Intrinsic::hexagon_C2_cmpgtup; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C2_bitsset:
+ ID = Intrinsic::hexagon_C2_bitsset; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C2_bitsclr:
+ ID = Intrinsic::hexagon_C2_bitsclr; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C2_cmpeqi:
+ ID = Intrinsic::hexagon_C2_cmpeqi; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C2_cmpgti:
+ ID = Intrinsic::hexagon_C2_cmpgti; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C2_cmpgtui:
+ ID = Intrinsic::hexagon_C2_cmpgtui; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C2_cmpgei:
+ ID = Intrinsic::hexagon_C2_cmpgei; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C2_cmpgeui:
+ ID = Intrinsic::hexagon_C2_cmpgeui; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C2_cmplt:
+ ID = Intrinsic::hexagon_C2_cmplt; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C2_cmpltu:
+ ID = Intrinsic::hexagon_C2_cmpltu; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C2_bitsclri:
+ ID = Intrinsic::hexagon_C2_bitsclri; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C2_and:
+ ID = Intrinsic::hexagon_C2_and; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C2_or:
+ ID = Intrinsic::hexagon_C2_or; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C2_xor:
+ ID = Intrinsic::hexagon_C2_xor; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C2_andn:
+ ID = Intrinsic::hexagon_C2_andn; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C2_not:
+ ID = Intrinsic::hexagon_C2_not; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C2_orn:
+ ID = Intrinsic::hexagon_C2_orn; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C2_pxfer_map:
+ ID = Intrinsic::hexagon_C2_pxfer_map; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C2_any8:
+ ID = Intrinsic::hexagon_C2_any8; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C2_all8:
+ ID = Intrinsic::hexagon_C2_all8; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C2_vitpack:
+ ID = Intrinsic::hexagon_C2_vitpack; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C2_mux:
+ ID = Intrinsic::hexagon_C2_mux; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C2_muxii:
+ ID = Intrinsic::hexagon_C2_muxii; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C2_muxir:
+ ID = Intrinsic::hexagon_C2_muxir; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C2_muxri:
+ ID = Intrinsic::hexagon_C2_muxri; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C2_vmux:
+ ID = Intrinsic::hexagon_C2_vmux; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C2_mask:
+ ID = Intrinsic::hexagon_C2_mask; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vcmpbeq:
+ ID = Intrinsic::hexagon_A2_vcmpbeq; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vcmpbgtu:
+ ID = Intrinsic::hexagon_A2_vcmpbgtu; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vcmpheq:
+ ID = Intrinsic::hexagon_A2_vcmpheq; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vcmphgt:
+ ID = Intrinsic::hexagon_A2_vcmphgt; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vcmphgtu:
+ ID = Intrinsic::hexagon_A2_vcmphgtu; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vcmpweq:
+ ID = Intrinsic::hexagon_A2_vcmpweq; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vcmpwgt:
+ ID = Intrinsic::hexagon_A2_vcmpwgt; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vcmpwgtu:
+ ID = Intrinsic::hexagon_A2_vcmpwgtu; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C2_tfrpr:
+ ID = Intrinsic::hexagon_C2_tfrpr; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C2_tfrrp:
+ ID = Intrinsic::hexagon_C2_tfrrp; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_acc_hh_s0:
+ ID = Intrinsic::hexagon_M2_mpy_acc_hh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_acc_hh_s1:
+ ID = Intrinsic::hexagon_M2_mpy_acc_hh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_acc_hl_s0:
+ ID = Intrinsic::hexagon_M2_mpy_acc_hl_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_acc_hl_s1:
+ ID = Intrinsic::hexagon_M2_mpy_acc_hl_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_acc_lh_s0:
+ ID = Intrinsic::hexagon_M2_mpy_acc_lh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_acc_lh_s1:
+ ID = Intrinsic::hexagon_M2_mpy_acc_lh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_acc_ll_s0:
+ ID = Intrinsic::hexagon_M2_mpy_acc_ll_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_acc_ll_s1:
+ ID = Intrinsic::hexagon_M2_mpy_acc_ll_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_nac_hh_s0:
+ ID = Intrinsic::hexagon_M2_mpy_nac_hh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_nac_hh_s1:
+ ID = Intrinsic::hexagon_M2_mpy_nac_hh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_nac_hl_s0:
+ ID = Intrinsic::hexagon_M2_mpy_nac_hl_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_nac_hl_s1:
+ ID = Intrinsic::hexagon_M2_mpy_nac_hl_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_nac_lh_s0:
+ ID = Intrinsic::hexagon_M2_mpy_nac_lh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_nac_lh_s1:
+ ID = Intrinsic::hexagon_M2_mpy_nac_lh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_nac_ll_s0:
+ ID = Intrinsic::hexagon_M2_mpy_nac_ll_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_nac_ll_s1:
+ ID = Intrinsic::hexagon_M2_mpy_nac_ll_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_acc_sat_hh_s0:
+ ID = Intrinsic::hexagon_M2_mpy_acc_sat_hh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_acc_sat_hh_s1:
+ ID = Intrinsic::hexagon_M2_mpy_acc_sat_hh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_acc_sat_hl_s0:
+ ID = Intrinsic::hexagon_M2_mpy_acc_sat_hl_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_acc_sat_hl_s1:
+ ID = Intrinsic::hexagon_M2_mpy_acc_sat_hl_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_acc_sat_lh_s0:
+ ID = Intrinsic::hexagon_M2_mpy_acc_sat_lh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_acc_sat_lh_s1:
+ ID = Intrinsic::hexagon_M2_mpy_acc_sat_lh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_acc_sat_ll_s0:
+ ID = Intrinsic::hexagon_M2_mpy_acc_sat_ll_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_acc_sat_ll_s1:
+ ID = Intrinsic::hexagon_M2_mpy_acc_sat_ll_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_nac_sat_hh_s0:
+ ID = Intrinsic::hexagon_M2_mpy_nac_sat_hh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_nac_sat_hh_s1:
+ ID = Intrinsic::hexagon_M2_mpy_nac_sat_hh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_nac_sat_hl_s0:
+ ID = Intrinsic::hexagon_M2_mpy_nac_sat_hl_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_nac_sat_hl_s1:
+ ID = Intrinsic::hexagon_M2_mpy_nac_sat_hl_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_nac_sat_lh_s0:
+ ID = Intrinsic::hexagon_M2_mpy_nac_sat_lh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_nac_sat_lh_s1:
+ ID = Intrinsic::hexagon_M2_mpy_nac_sat_lh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_nac_sat_ll_s0:
+ ID = Intrinsic::hexagon_M2_mpy_nac_sat_ll_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_nac_sat_ll_s1:
+ ID = Intrinsic::hexagon_M2_mpy_nac_sat_ll_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_hh_s0:
+ ID = Intrinsic::hexagon_M2_mpy_hh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_hh_s1:
+ ID = Intrinsic::hexagon_M2_mpy_hh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_hl_s0:
+ ID = Intrinsic::hexagon_M2_mpy_hl_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_hl_s1:
+ ID = Intrinsic::hexagon_M2_mpy_hl_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_lh_s0:
+ ID = Intrinsic::hexagon_M2_mpy_lh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_lh_s1:
+ ID = Intrinsic::hexagon_M2_mpy_lh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_ll_s0:
+ ID = Intrinsic::hexagon_M2_mpy_ll_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_ll_s1:
+ ID = Intrinsic::hexagon_M2_mpy_ll_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_sat_hh_s0:
+ ID = Intrinsic::hexagon_M2_mpy_sat_hh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_sat_hh_s1:
+ ID = Intrinsic::hexagon_M2_mpy_sat_hh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_sat_hl_s0:
+ ID = Intrinsic::hexagon_M2_mpy_sat_hl_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_sat_hl_s1:
+ ID = Intrinsic::hexagon_M2_mpy_sat_hl_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_sat_lh_s0:
+ ID = Intrinsic::hexagon_M2_mpy_sat_lh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_sat_lh_s1:
+ ID = Intrinsic::hexagon_M2_mpy_sat_lh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_sat_ll_s0:
+ ID = Intrinsic::hexagon_M2_mpy_sat_ll_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_sat_ll_s1:
+ ID = Intrinsic::hexagon_M2_mpy_sat_ll_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_rnd_hh_s0:
+ ID = Intrinsic::hexagon_M2_mpy_rnd_hh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_rnd_hh_s1:
+ ID = Intrinsic::hexagon_M2_mpy_rnd_hh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_rnd_hl_s0:
+ ID = Intrinsic::hexagon_M2_mpy_rnd_hl_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_rnd_hl_s1:
+ ID = Intrinsic::hexagon_M2_mpy_rnd_hl_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_rnd_lh_s0:
+ ID = Intrinsic::hexagon_M2_mpy_rnd_lh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_rnd_lh_s1:
+ ID = Intrinsic::hexagon_M2_mpy_rnd_lh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_rnd_ll_s0:
+ ID = Intrinsic::hexagon_M2_mpy_rnd_ll_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_rnd_ll_s1:
+ ID = Intrinsic::hexagon_M2_mpy_rnd_ll_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_sat_rnd_hh_s0:
+ ID = Intrinsic::hexagon_M2_mpy_sat_rnd_hh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_sat_rnd_hh_s1:
+ ID = Intrinsic::hexagon_M2_mpy_sat_rnd_hh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_sat_rnd_hl_s0:
+ ID = Intrinsic::hexagon_M2_mpy_sat_rnd_hl_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_sat_rnd_hl_s1:
+ ID = Intrinsic::hexagon_M2_mpy_sat_rnd_hl_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_sat_rnd_lh_s0:
+ ID = Intrinsic::hexagon_M2_mpy_sat_rnd_lh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_sat_rnd_lh_s1:
+ ID = Intrinsic::hexagon_M2_mpy_sat_rnd_lh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_sat_rnd_ll_s0:
+ ID = Intrinsic::hexagon_M2_mpy_sat_rnd_ll_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_sat_rnd_ll_s1:
+ ID = Intrinsic::hexagon_M2_mpy_sat_rnd_ll_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyd_acc_hh_s0:
+ ID = Intrinsic::hexagon_M2_mpyd_acc_hh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyd_acc_hh_s1:
+ ID = Intrinsic::hexagon_M2_mpyd_acc_hh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyd_acc_hl_s0:
+ ID = Intrinsic::hexagon_M2_mpyd_acc_hl_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyd_acc_hl_s1:
+ ID = Intrinsic::hexagon_M2_mpyd_acc_hl_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyd_acc_lh_s0:
+ ID = Intrinsic::hexagon_M2_mpyd_acc_lh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyd_acc_lh_s1:
+ ID = Intrinsic::hexagon_M2_mpyd_acc_lh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyd_acc_ll_s0:
+ ID = Intrinsic::hexagon_M2_mpyd_acc_ll_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyd_acc_ll_s1:
+ ID = Intrinsic::hexagon_M2_mpyd_acc_ll_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyd_nac_hh_s0:
+ ID = Intrinsic::hexagon_M2_mpyd_nac_hh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyd_nac_hh_s1:
+ ID = Intrinsic::hexagon_M2_mpyd_nac_hh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyd_nac_hl_s0:
+ ID = Intrinsic::hexagon_M2_mpyd_nac_hl_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyd_nac_hl_s1:
+ ID = Intrinsic::hexagon_M2_mpyd_nac_hl_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyd_nac_lh_s0:
+ ID = Intrinsic::hexagon_M2_mpyd_nac_lh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyd_nac_lh_s1:
+ ID = Intrinsic::hexagon_M2_mpyd_nac_lh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyd_nac_ll_s0:
+ ID = Intrinsic::hexagon_M2_mpyd_nac_ll_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyd_nac_ll_s1:
+ ID = Intrinsic::hexagon_M2_mpyd_nac_ll_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyd_hh_s0:
+ ID = Intrinsic::hexagon_M2_mpyd_hh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyd_hh_s1:
+ ID = Intrinsic::hexagon_M2_mpyd_hh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyd_hl_s0:
+ ID = Intrinsic::hexagon_M2_mpyd_hl_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyd_hl_s1:
+ ID = Intrinsic::hexagon_M2_mpyd_hl_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyd_lh_s0:
+ ID = Intrinsic::hexagon_M2_mpyd_lh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyd_lh_s1:
+ ID = Intrinsic::hexagon_M2_mpyd_lh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyd_ll_s0:
+ ID = Intrinsic::hexagon_M2_mpyd_ll_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyd_ll_s1:
+ ID = Intrinsic::hexagon_M2_mpyd_ll_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyd_rnd_hh_s0:
+ ID = Intrinsic::hexagon_M2_mpyd_rnd_hh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyd_rnd_hh_s1:
+ ID = Intrinsic::hexagon_M2_mpyd_rnd_hh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyd_rnd_hl_s0:
+ ID = Intrinsic::hexagon_M2_mpyd_rnd_hl_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyd_rnd_hl_s1:
+ ID = Intrinsic::hexagon_M2_mpyd_rnd_hl_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyd_rnd_lh_s0:
+ ID = Intrinsic::hexagon_M2_mpyd_rnd_lh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyd_rnd_lh_s1:
+ ID = Intrinsic::hexagon_M2_mpyd_rnd_lh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyd_rnd_ll_s0:
+ ID = Intrinsic::hexagon_M2_mpyd_rnd_ll_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyd_rnd_ll_s1:
+ ID = Intrinsic::hexagon_M2_mpyd_rnd_ll_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyu_acc_hh_s0:
+ ID = Intrinsic::hexagon_M2_mpyu_acc_hh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyu_acc_hh_s1:
+ ID = Intrinsic::hexagon_M2_mpyu_acc_hh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyu_acc_hl_s0:
+ ID = Intrinsic::hexagon_M2_mpyu_acc_hl_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyu_acc_hl_s1:
+ ID = Intrinsic::hexagon_M2_mpyu_acc_hl_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyu_acc_lh_s0:
+ ID = Intrinsic::hexagon_M2_mpyu_acc_lh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyu_acc_lh_s1:
+ ID = Intrinsic::hexagon_M2_mpyu_acc_lh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyu_acc_ll_s0:
+ ID = Intrinsic::hexagon_M2_mpyu_acc_ll_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyu_acc_ll_s1:
+ ID = Intrinsic::hexagon_M2_mpyu_acc_ll_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyu_nac_hh_s0:
+ ID = Intrinsic::hexagon_M2_mpyu_nac_hh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyu_nac_hh_s1:
+ ID = Intrinsic::hexagon_M2_mpyu_nac_hh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyu_nac_hl_s0:
+ ID = Intrinsic::hexagon_M2_mpyu_nac_hl_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyu_nac_hl_s1:
+ ID = Intrinsic::hexagon_M2_mpyu_nac_hl_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyu_nac_lh_s0:
+ ID = Intrinsic::hexagon_M2_mpyu_nac_lh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyu_nac_lh_s1:
+ ID = Intrinsic::hexagon_M2_mpyu_nac_lh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyu_nac_ll_s0:
+ ID = Intrinsic::hexagon_M2_mpyu_nac_ll_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyu_nac_ll_s1:
+ ID = Intrinsic::hexagon_M2_mpyu_nac_ll_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyu_hh_s0:
+ ID = Intrinsic::hexagon_M2_mpyu_hh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyu_hh_s1:
+ ID = Intrinsic::hexagon_M2_mpyu_hh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyu_hl_s0:
+ ID = Intrinsic::hexagon_M2_mpyu_hl_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyu_hl_s1:
+ ID = Intrinsic::hexagon_M2_mpyu_hl_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyu_lh_s0:
+ ID = Intrinsic::hexagon_M2_mpyu_lh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyu_lh_s1:
+ ID = Intrinsic::hexagon_M2_mpyu_lh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyu_ll_s0:
+ ID = Intrinsic::hexagon_M2_mpyu_ll_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyu_ll_s1:
+ ID = Intrinsic::hexagon_M2_mpyu_ll_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyud_acc_hh_s0:
+ ID = Intrinsic::hexagon_M2_mpyud_acc_hh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyud_acc_hh_s1:
+ ID = Intrinsic::hexagon_M2_mpyud_acc_hh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyud_acc_hl_s0:
+ ID = Intrinsic::hexagon_M2_mpyud_acc_hl_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyud_acc_hl_s1:
+ ID = Intrinsic::hexagon_M2_mpyud_acc_hl_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyud_acc_lh_s0:
+ ID = Intrinsic::hexagon_M2_mpyud_acc_lh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyud_acc_lh_s1:
+ ID = Intrinsic::hexagon_M2_mpyud_acc_lh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyud_acc_ll_s0:
+ ID = Intrinsic::hexagon_M2_mpyud_acc_ll_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyud_acc_ll_s1:
+ ID = Intrinsic::hexagon_M2_mpyud_acc_ll_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyud_nac_hh_s0:
+ ID = Intrinsic::hexagon_M2_mpyud_nac_hh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyud_nac_hh_s1:
+ ID = Intrinsic::hexagon_M2_mpyud_nac_hh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyud_nac_hl_s0:
+ ID = Intrinsic::hexagon_M2_mpyud_nac_hl_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyud_nac_hl_s1:
+ ID = Intrinsic::hexagon_M2_mpyud_nac_hl_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyud_nac_lh_s0:
+ ID = Intrinsic::hexagon_M2_mpyud_nac_lh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyud_nac_lh_s1:
+ ID = Intrinsic::hexagon_M2_mpyud_nac_lh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyud_nac_ll_s0:
+ ID = Intrinsic::hexagon_M2_mpyud_nac_ll_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyud_nac_ll_s1:
+ ID = Intrinsic::hexagon_M2_mpyud_nac_ll_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyud_hh_s0:
+ ID = Intrinsic::hexagon_M2_mpyud_hh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyud_hh_s1:
+ ID = Intrinsic::hexagon_M2_mpyud_hh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyud_hl_s0:
+ ID = Intrinsic::hexagon_M2_mpyud_hl_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyud_hl_s1:
+ ID = Intrinsic::hexagon_M2_mpyud_hl_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyud_lh_s0:
+ ID = Intrinsic::hexagon_M2_mpyud_lh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyud_lh_s1:
+ ID = Intrinsic::hexagon_M2_mpyud_lh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyud_ll_s0:
+ ID = Intrinsic::hexagon_M2_mpyud_ll_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyud_ll_s1:
+ ID = Intrinsic::hexagon_M2_mpyud_ll_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpysmi:
+ ID = Intrinsic::hexagon_M2_mpysmi; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_macsip:
+ ID = Intrinsic::hexagon_M2_macsip; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_macsin:
+ ID = Intrinsic::hexagon_M2_macsin; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_dpmpyss_s0:
+ ID = Intrinsic::hexagon_M2_dpmpyss_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_dpmpyss_acc_s0:
+ ID = Intrinsic::hexagon_M2_dpmpyss_acc_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_dpmpyss_nac_s0:
+ ID = Intrinsic::hexagon_M2_dpmpyss_nac_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_dpmpyuu_s0:
+ ID = Intrinsic::hexagon_M2_dpmpyuu_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_dpmpyuu_acc_s0:
+ ID = Intrinsic::hexagon_M2_dpmpyuu_acc_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_dpmpyuu_nac_s0:
+ ID = Intrinsic::hexagon_M2_dpmpyuu_nac_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_up:
+ ID = Intrinsic::hexagon_M2_mpy_up; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyu_up:
+ ID = Intrinsic::hexagon_M2_mpyu_up; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_dpmpyss_rnd_s0:
+ ID = Intrinsic::hexagon_M2_dpmpyss_rnd_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyi:
+ ID = Intrinsic::hexagon_M2_mpyi; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyui:
+ ID = Intrinsic::hexagon_M2_mpyui; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_maci:
+ ID = Intrinsic::hexagon_M2_maci; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_acci:
+ ID = Intrinsic::hexagon_M2_acci; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_accii:
+ ID = Intrinsic::hexagon_M2_accii; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_nacci:
+ ID = Intrinsic::hexagon_M2_nacci; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_naccii:
+ ID = Intrinsic::hexagon_M2_naccii; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_subacc:
+ ID = Intrinsic::hexagon_M2_subacc; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vmpy2s_s0:
+ ID = Intrinsic::hexagon_M2_vmpy2s_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vmpy2s_s1:
+ ID = Intrinsic::hexagon_M2_vmpy2s_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vmac2s_s0:
+ ID = Intrinsic::hexagon_M2_vmac2s_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vmac2s_s1:
+ ID = Intrinsic::hexagon_M2_vmac2s_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vmpy2s_s0pack:
+ ID = Intrinsic::hexagon_M2_vmpy2s_s0pack; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vmpy2s_s1pack:
+ ID = Intrinsic::hexagon_M2_vmpy2s_s1pack; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vmac2:
+ ID = Intrinsic::hexagon_M2_vmac2; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vmpy2es_s0:
+ ID = Intrinsic::hexagon_M2_vmpy2es_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vmpy2es_s1:
+ ID = Intrinsic::hexagon_M2_vmpy2es_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vmac2es_s0:
+ ID = Intrinsic::hexagon_M2_vmac2es_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vmac2es_s1:
+ ID = Intrinsic::hexagon_M2_vmac2es_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vmac2es:
+ ID = Intrinsic::hexagon_M2_vmac2es; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vrmac_s0:
+ ID = Intrinsic::hexagon_M2_vrmac_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vrmpy_s0:
+ ID = Intrinsic::hexagon_M2_vrmpy_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vdmpyrs_s0:
+ ID = Intrinsic::hexagon_M2_vdmpyrs_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vdmpyrs_s1:
+ ID = Intrinsic::hexagon_M2_vdmpyrs_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vdmacs_s0:
+ ID = Intrinsic::hexagon_M2_vdmacs_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vdmacs_s1:
+ ID = Intrinsic::hexagon_M2_vdmacs_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vdmpys_s0:
+ ID = Intrinsic::hexagon_M2_vdmpys_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vdmpys_s1:
+ ID = Intrinsic::hexagon_M2_vdmpys_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_cmpyrs_s0:
+ ID = Intrinsic::hexagon_M2_cmpyrs_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_cmpyrs_s1:
+ ID = Intrinsic::hexagon_M2_cmpyrs_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_cmpyrsc_s0:
+ ID = Intrinsic::hexagon_M2_cmpyrsc_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_cmpyrsc_s1:
+ ID = Intrinsic::hexagon_M2_cmpyrsc_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_cmacs_s0:
+ ID = Intrinsic::hexagon_M2_cmacs_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_cmacs_s1:
+ ID = Intrinsic::hexagon_M2_cmacs_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_cmacsc_s0:
+ ID = Intrinsic::hexagon_M2_cmacsc_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_cmacsc_s1:
+ ID = Intrinsic::hexagon_M2_cmacsc_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_cmpys_s0:
+ ID = Intrinsic::hexagon_M2_cmpys_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_cmpys_s1:
+ ID = Intrinsic::hexagon_M2_cmpys_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_cmpysc_s0:
+ ID = Intrinsic::hexagon_M2_cmpysc_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_cmpysc_s1:
+ ID = Intrinsic::hexagon_M2_cmpysc_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_cnacs_s0:
+ ID = Intrinsic::hexagon_M2_cnacs_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_cnacs_s1:
+ ID = Intrinsic::hexagon_M2_cnacs_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_cnacsc_s0:
+ ID = Intrinsic::hexagon_M2_cnacsc_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_cnacsc_s1:
+ ID = Intrinsic::hexagon_M2_cnacsc_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vrcmpys_s1:
+ ID = Intrinsic::hexagon_M2_vrcmpys_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vrcmpys_acc_s1:
+ ID = Intrinsic::hexagon_M2_vrcmpys_acc_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vrcmpys_s1rp:
+ ID = Intrinsic::hexagon_M2_vrcmpys_s1rp; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mmacls_s0:
+ ID = Intrinsic::hexagon_M2_mmacls_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mmacls_s1:
+ ID = Intrinsic::hexagon_M2_mmacls_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mmachs_s0:
+ ID = Intrinsic::hexagon_M2_mmachs_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mmachs_s1:
+ ID = Intrinsic::hexagon_M2_mmachs_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mmpyl_s0:
+ ID = Intrinsic::hexagon_M2_mmpyl_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mmpyl_s1:
+ ID = Intrinsic::hexagon_M2_mmpyl_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mmpyh_s0:
+ ID = Intrinsic::hexagon_M2_mmpyh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mmpyh_s1:
+ ID = Intrinsic::hexagon_M2_mmpyh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mmacls_rs0:
+ ID = Intrinsic::hexagon_M2_mmacls_rs0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mmacls_rs1:
+ ID = Intrinsic::hexagon_M2_mmacls_rs1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mmachs_rs0:
+ ID = Intrinsic::hexagon_M2_mmachs_rs0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mmachs_rs1:
+ ID = Intrinsic::hexagon_M2_mmachs_rs1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mmpyl_rs0:
+ ID = Intrinsic::hexagon_M2_mmpyl_rs0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mmpyl_rs1:
+ ID = Intrinsic::hexagon_M2_mmpyl_rs1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mmpyh_rs0:
+ ID = Intrinsic::hexagon_M2_mmpyh_rs0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mmpyh_rs1:
+ ID = Intrinsic::hexagon_M2_mmpyh_rs1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_hmmpyl_rs1:
+ ID = Intrinsic::hexagon_M2_hmmpyl_rs1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_hmmpyh_rs1:
+ ID = Intrinsic::hexagon_M2_hmmpyh_rs1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mmaculs_s0:
+ ID = Intrinsic::hexagon_M2_mmaculs_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mmaculs_s1:
+ ID = Intrinsic::hexagon_M2_mmaculs_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mmacuhs_s0:
+ ID = Intrinsic::hexagon_M2_mmacuhs_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mmacuhs_s1:
+ ID = Intrinsic::hexagon_M2_mmacuhs_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mmpyul_s0:
+ ID = Intrinsic::hexagon_M2_mmpyul_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mmpyul_s1:
+ ID = Intrinsic::hexagon_M2_mmpyul_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mmpyuh_s0:
+ ID = Intrinsic::hexagon_M2_mmpyuh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mmpyuh_s1:
+ ID = Intrinsic::hexagon_M2_mmpyuh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mmaculs_rs0:
+ ID = Intrinsic::hexagon_M2_mmaculs_rs0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mmaculs_rs1:
+ ID = Intrinsic::hexagon_M2_mmaculs_rs1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mmacuhs_rs0:
+ ID = Intrinsic::hexagon_M2_mmacuhs_rs0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mmacuhs_rs1:
+ ID = Intrinsic::hexagon_M2_mmacuhs_rs1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mmpyul_rs0:
+ ID = Intrinsic::hexagon_M2_mmpyul_rs0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mmpyul_rs1:
+ ID = Intrinsic::hexagon_M2_mmpyul_rs1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mmpyuh_rs0:
+ ID = Intrinsic::hexagon_M2_mmpyuh_rs0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mmpyuh_rs1:
+ ID = Intrinsic::hexagon_M2_mmpyuh_rs1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vrcmaci_s0:
+ ID = Intrinsic::hexagon_M2_vrcmaci_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vrcmacr_s0:
+ ID = Intrinsic::hexagon_M2_vrcmacr_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vrcmaci_s0c:
+ ID = Intrinsic::hexagon_M2_vrcmaci_s0c; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vrcmacr_s0c:
+ ID = Intrinsic::hexagon_M2_vrcmacr_s0c; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_cmaci_s0:
+ ID = Intrinsic::hexagon_M2_cmaci_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_cmacr_s0:
+ ID = Intrinsic::hexagon_M2_cmacr_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vrcmpyi_s0:
+ ID = Intrinsic::hexagon_M2_vrcmpyi_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vrcmpyr_s0:
+ ID = Intrinsic::hexagon_M2_vrcmpyr_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vrcmpyi_s0c:
+ ID = Intrinsic::hexagon_M2_vrcmpyi_s0c; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vrcmpyr_s0c:
+ ID = Intrinsic::hexagon_M2_vrcmpyr_s0c; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_cmpyi_s0:
+ ID = Intrinsic::hexagon_M2_cmpyi_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_cmpyr_s0:
+ ID = Intrinsic::hexagon_M2_cmpyr_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vcmpy_s0_sat_i:
+ ID = Intrinsic::hexagon_M2_vcmpy_s0_sat_i; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vcmpy_s0_sat_r:
+ ID = Intrinsic::hexagon_M2_vcmpy_s0_sat_r; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vcmpy_s1_sat_i:
+ ID = Intrinsic::hexagon_M2_vcmpy_s1_sat_i; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vcmpy_s1_sat_r:
+ ID = Intrinsic::hexagon_M2_vcmpy_s1_sat_r; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vcmac_s0_sat_i:
+ ID = Intrinsic::hexagon_M2_vcmac_s0_sat_i; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vcmac_s0_sat_r:
+ ID = Intrinsic::hexagon_M2_vcmac_s0_sat_r; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_vcrotate:
+ ID = Intrinsic::hexagon_S2_vcrotate; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_add:
+ ID = Intrinsic::hexagon_A2_add; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_sub:
+ ID = Intrinsic::hexagon_A2_sub; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_addsat:
+ ID = Intrinsic::hexagon_A2_addsat; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_subsat:
+ ID = Intrinsic::hexagon_A2_subsat; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_addi:
+ ID = Intrinsic::hexagon_A2_addi; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_addh_l16_ll:
+ ID = Intrinsic::hexagon_A2_addh_l16_ll; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_addh_l16_hl:
+ ID = Intrinsic::hexagon_A2_addh_l16_hl; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_addh_l16_sat_ll:
+ ID = Intrinsic::hexagon_A2_addh_l16_sat_ll; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_addh_l16_sat_hl:
+ ID = Intrinsic::hexagon_A2_addh_l16_sat_hl; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_subh_l16_ll:
+ ID = Intrinsic::hexagon_A2_subh_l16_ll; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_subh_l16_hl:
+ ID = Intrinsic::hexagon_A2_subh_l16_hl; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_subh_l16_sat_ll:
+ ID = Intrinsic::hexagon_A2_subh_l16_sat_ll; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_subh_l16_sat_hl:
+ ID = Intrinsic::hexagon_A2_subh_l16_sat_hl; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_addh_h16_ll:
+ ID = Intrinsic::hexagon_A2_addh_h16_ll; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_addh_h16_lh:
+ ID = Intrinsic::hexagon_A2_addh_h16_lh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_addh_h16_hl:
+ ID = Intrinsic::hexagon_A2_addh_h16_hl; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_addh_h16_hh:
+ ID = Intrinsic::hexagon_A2_addh_h16_hh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_addh_h16_sat_ll:
+ ID = Intrinsic::hexagon_A2_addh_h16_sat_ll; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_addh_h16_sat_lh:
+ ID = Intrinsic::hexagon_A2_addh_h16_sat_lh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_addh_h16_sat_hl:
+ ID = Intrinsic::hexagon_A2_addh_h16_sat_hl; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_addh_h16_sat_hh:
+ ID = Intrinsic::hexagon_A2_addh_h16_sat_hh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_subh_h16_ll:
+ ID = Intrinsic::hexagon_A2_subh_h16_ll; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_subh_h16_lh:
+ ID = Intrinsic::hexagon_A2_subh_h16_lh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_subh_h16_hl:
+ ID = Intrinsic::hexagon_A2_subh_h16_hl; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_subh_h16_hh:
+ ID = Intrinsic::hexagon_A2_subh_h16_hh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_subh_h16_sat_ll:
+ ID = Intrinsic::hexagon_A2_subh_h16_sat_ll; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_subh_h16_sat_lh:
+ ID = Intrinsic::hexagon_A2_subh_h16_sat_lh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_subh_h16_sat_hl:
+ ID = Intrinsic::hexagon_A2_subh_h16_sat_hl; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_subh_h16_sat_hh:
+ ID = Intrinsic::hexagon_A2_subh_h16_sat_hh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_aslh:
+ ID = Intrinsic::hexagon_A2_aslh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_asrh:
+ ID = Intrinsic::hexagon_A2_asrh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_addp:
+ ID = Intrinsic::hexagon_A2_addp; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_addpsat:
+ ID = Intrinsic::hexagon_A2_addpsat; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_addsp:
+ ID = Intrinsic::hexagon_A2_addsp; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_subp:
+ ID = Intrinsic::hexagon_A2_subp; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_neg:
+ ID = Intrinsic::hexagon_A2_neg; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_negsat:
+ ID = Intrinsic::hexagon_A2_negsat; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_abs:
+ ID = Intrinsic::hexagon_A2_abs; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_abssat:
+ ID = Intrinsic::hexagon_A2_abssat; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vconj:
+ ID = Intrinsic::hexagon_A2_vconj; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_negp:
+ ID = Intrinsic::hexagon_A2_negp; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_absp:
+ ID = Intrinsic::hexagon_A2_absp; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_max:
+ ID = Intrinsic::hexagon_A2_max; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_maxu:
+ ID = Intrinsic::hexagon_A2_maxu; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_min:
+ ID = Intrinsic::hexagon_A2_min; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_minu:
+ ID = Intrinsic::hexagon_A2_minu; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_maxp:
+ ID = Intrinsic::hexagon_A2_maxp; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_maxup:
+ ID = Intrinsic::hexagon_A2_maxup; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_minp:
+ ID = Intrinsic::hexagon_A2_minp; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_minup:
+ ID = Intrinsic::hexagon_A2_minup; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_tfr:
+ ID = Intrinsic::hexagon_A2_tfr; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_tfrsi:
+ ID = Intrinsic::hexagon_A2_tfrsi; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_tfrp:
+ ID = Intrinsic::hexagon_A2_tfrp; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_tfrpi:
+ ID = Intrinsic::hexagon_A2_tfrpi; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_zxtb:
+ ID = Intrinsic::hexagon_A2_zxtb; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_sxtb:
+ ID = Intrinsic::hexagon_A2_sxtb; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_zxth:
+ ID = Intrinsic::hexagon_A2_zxth; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_sxth:
+ ID = Intrinsic::hexagon_A2_sxth; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_combinew:
+ ID = Intrinsic::hexagon_A2_combinew; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_combineii:
+ ID = Intrinsic::hexagon_A2_combineii; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_combine_hh:
+ ID = Intrinsic::hexagon_A2_combine_hh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_combine_hl:
+ ID = Intrinsic::hexagon_A2_combine_hl; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_combine_lh:
+ ID = Intrinsic::hexagon_A2_combine_lh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_combine_ll:
+ ID = Intrinsic::hexagon_A2_combine_ll; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_tfril:
+ ID = Intrinsic::hexagon_A2_tfril; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_tfrih:
+ ID = Intrinsic::hexagon_A2_tfrih; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_and:
+ ID = Intrinsic::hexagon_A2_and; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_or:
+ ID = Intrinsic::hexagon_A2_or; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_xor:
+ ID = Intrinsic::hexagon_A2_xor; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_not:
+ ID = Intrinsic::hexagon_A2_not; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_xor_xacc:
+ ID = Intrinsic::hexagon_M2_xor_xacc; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_subri:
+ ID = Intrinsic::hexagon_A2_subri; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_andir:
+ ID = Intrinsic::hexagon_A2_andir; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_orir:
+ ID = Intrinsic::hexagon_A2_orir; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_andp:
+ ID = Intrinsic::hexagon_A2_andp; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_orp:
+ ID = Intrinsic::hexagon_A2_orp; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_xorp:
+ ID = Intrinsic::hexagon_A2_xorp; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_notp:
+ ID = Intrinsic::hexagon_A2_notp; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_sxtw:
+ ID = Intrinsic::hexagon_A2_sxtw; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_sat:
+ ID = Intrinsic::hexagon_A2_sat; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_sath:
+ ID = Intrinsic::hexagon_A2_sath; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_satuh:
+ ID = Intrinsic::hexagon_A2_satuh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_satub:
+ ID = Intrinsic::hexagon_A2_satub; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_satb:
+ ID = Intrinsic::hexagon_A2_satb; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vaddub:
+ ID = Intrinsic::hexagon_A2_vaddub; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vaddubs:
+ ID = Intrinsic::hexagon_A2_vaddubs; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vaddh:
+ ID = Intrinsic::hexagon_A2_vaddh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vaddhs:
+ ID = Intrinsic::hexagon_A2_vaddhs; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vadduhs:
+ ID = Intrinsic::hexagon_A2_vadduhs; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vaddw:
+ ID = Intrinsic::hexagon_A2_vaddw; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vaddws:
+ ID = Intrinsic::hexagon_A2_vaddws; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_svavgh:
+ ID = Intrinsic::hexagon_A2_svavgh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_svavghs:
+ ID = Intrinsic::hexagon_A2_svavghs; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_svnavgh:
+ ID = Intrinsic::hexagon_A2_svnavgh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_svaddh:
+ ID = Intrinsic::hexagon_A2_svaddh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_svaddhs:
+ ID = Intrinsic::hexagon_A2_svaddhs; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_svadduhs:
+ ID = Intrinsic::hexagon_A2_svadduhs; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_svsubh:
+ ID = Intrinsic::hexagon_A2_svsubh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_svsubhs:
+ ID = Intrinsic::hexagon_A2_svsubhs; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_svsubuhs:
+ ID = Intrinsic::hexagon_A2_svsubuhs; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vraddub:
+ ID = Intrinsic::hexagon_A2_vraddub; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vraddub_acc:
+ ID = Intrinsic::hexagon_A2_vraddub_acc; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vradduh:
+ ID = Intrinsic::hexagon_M2_vradduh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vsubub:
+ ID = Intrinsic::hexagon_A2_vsubub; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vsububs:
+ ID = Intrinsic::hexagon_A2_vsububs; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vsubh:
+ ID = Intrinsic::hexagon_A2_vsubh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vsubhs:
+ ID = Intrinsic::hexagon_A2_vsubhs; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vsubuhs:
+ ID = Intrinsic::hexagon_A2_vsubuhs; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vsubw:
+ ID = Intrinsic::hexagon_A2_vsubw; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vsubws:
+ ID = Intrinsic::hexagon_A2_vsubws; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vabsh:
+ ID = Intrinsic::hexagon_A2_vabsh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vabshsat:
+ ID = Intrinsic::hexagon_A2_vabshsat; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vabsw:
+ ID = Intrinsic::hexagon_A2_vabsw; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vabswsat:
+ ID = Intrinsic::hexagon_A2_vabswsat; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vabsdiffw:
+ ID = Intrinsic::hexagon_M2_vabsdiffw; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vabsdiffh:
+ ID = Intrinsic::hexagon_M2_vabsdiffh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vrsadub:
+ ID = Intrinsic::hexagon_A2_vrsadub; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vrsadub_acc:
+ ID = Intrinsic::hexagon_A2_vrsadub_acc; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vavgub:
+ ID = Intrinsic::hexagon_A2_vavgub; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vavguh:
+ ID = Intrinsic::hexagon_A2_vavguh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vavgh:
+ ID = Intrinsic::hexagon_A2_vavgh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vnavgh:
+ ID = Intrinsic::hexagon_A2_vnavgh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vavgw:
+ ID = Intrinsic::hexagon_A2_vavgw; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vnavgw:
+ ID = Intrinsic::hexagon_A2_vnavgw; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vavgwr:
+ ID = Intrinsic::hexagon_A2_vavgwr; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vnavgwr:
+ ID = Intrinsic::hexagon_A2_vnavgwr; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vavgwcr:
+ ID = Intrinsic::hexagon_A2_vavgwcr; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vnavgwcr:
+ ID = Intrinsic::hexagon_A2_vnavgwcr; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vavghcr:
+ ID = Intrinsic::hexagon_A2_vavghcr; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vnavghcr:
+ ID = Intrinsic::hexagon_A2_vnavghcr; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vavguw:
+ ID = Intrinsic::hexagon_A2_vavguw; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vavguwr:
+ ID = Intrinsic::hexagon_A2_vavguwr; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vavgubr:
+ ID = Intrinsic::hexagon_A2_vavgubr; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vavguhr:
+ ID = Intrinsic::hexagon_A2_vavguhr; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vavghr:
+ ID = Intrinsic::hexagon_A2_vavghr; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vnavghr:
+ ID = Intrinsic::hexagon_A2_vnavghr; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vminh:
+ ID = Intrinsic::hexagon_A2_vminh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vmaxh:
+ ID = Intrinsic::hexagon_A2_vmaxh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vminub:
+ ID = Intrinsic::hexagon_A2_vminub; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vmaxub:
+ ID = Intrinsic::hexagon_A2_vmaxub; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vminuh:
+ ID = Intrinsic::hexagon_A2_vminuh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vmaxuh:
+ ID = Intrinsic::hexagon_A2_vmaxuh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vminw:
+ ID = Intrinsic::hexagon_A2_vminw; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vmaxw:
+ ID = Intrinsic::hexagon_A2_vmaxw; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vminuw:
+ ID = Intrinsic::hexagon_A2_vminuw; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vmaxuw:
+ ID = Intrinsic::hexagon_A2_vmaxuw; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asr_r_r:
+ ID = Intrinsic::hexagon_S2_asr_r_r; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asl_r_r:
+ ID = Intrinsic::hexagon_S2_asl_r_r; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsr_r_r:
+ ID = Intrinsic::hexagon_S2_lsr_r_r; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsl_r_r:
+ ID = Intrinsic::hexagon_S2_lsl_r_r; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asr_r_p:
+ ID = Intrinsic::hexagon_S2_asr_r_p; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asl_r_p:
+ ID = Intrinsic::hexagon_S2_asl_r_p; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsr_r_p:
+ ID = Intrinsic::hexagon_S2_lsr_r_p; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsl_r_p:
+ ID = Intrinsic::hexagon_S2_lsl_r_p; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asr_r_r_acc:
+ ID = Intrinsic::hexagon_S2_asr_r_r_acc; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asl_r_r_acc:
+ ID = Intrinsic::hexagon_S2_asl_r_r_acc; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsr_r_r_acc:
+ ID = Intrinsic::hexagon_S2_lsr_r_r_acc; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsl_r_r_acc:
+ ID = Intrinsic::hexagon_S2_lsl_r_r_acc; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asr_r_p_acc:
+ ID = Intrinsic::hexagon_S2_asr_r_p_acc; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asl_r_p_acc:
+ ID = Intrinsic::hexagon_S2_asl_r_p_acc; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsr_r_p_acc:
+ ID = Intrinsic::hexagon_S2_lsr_r_p_acc; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsl_r_p_acc:
+ ID = Intrinsic::hexagon_S2_lsl_r_p_acc; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asr_r_r_nac:
+ ID = Intrinsic::hexagon_S2_asr_r_r_nac; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asl_r_r_nac:
+ ID = Intrinsic::hexagon_S2_asl_r_r_nac; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsr_r_r_nac:
+ ID = Intrinsic::hexagon_S2_lsr_r_r_nac; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsl_r_r_nac:
+ ID = Intrinsic::hexagon_S2_lsl_r_r_nac; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asr_r_p_nac:
+ ID = Intrinsic::hexagon_S2_asr_r_p_nac; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asl_r_p_nac:
+ ID = Intrinsic::hexagon_S2_asl_r_p_nac; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsr_r_p_nac:
+ ID = Intrinsic::hexagon_S2_lsr_r_p_nac; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsl_r_p_nac:
+ ID = Intrinsic::hexagon_S2_lsl_r_p_nac; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asr_r_r_and:
+ ID = Intrinsic::hexagon_S2_asr_r_r_and; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asl_r_r_and:
+ ID = Intrinsic::hexagon_S2_asl_r_r_and; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsr_r_r_and:
+ ID = Intrinsic::hexagon_S2_lsr_r_r_and; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsl_r_r_and:
+ ID = Intrinsic::hexagon_S2_lsl_r_r_and; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asr_r_r_or:
+ ID = Intrinsic::hexagon_S2_asr_r_r_or; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asl_r_r_or:
+ ID = Intrinsic::hexagon_S2_asl_r_r_or; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsr_r_r_or:
+ ID = Intrinsic::hexagon_S2_lsr_r_r_or; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsl_r_r_or:
+ ID = Intrinsic::hexagon_S2_lsl_r_r_or; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asr_r_p_and:
+ ID = Intrinsic::hexagon_S2_asr_r_p_and; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asl_r_p_and:
+ ID = Intrinsic::hexagon_S2_asl_r_p_and; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsr_r_p_and:
+ ID = Intrinsic::hexagon_S2_lsr_r_p_and; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsl_r_p_and:
+ ID = Intrinsic::hexagon_S2_lsl_r_p_and; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asr_r_p_or:
+ ID = Intrinsic::hexagon_S2_asr_r_p_or; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asl_r_p_or:
+ ID = Intrinsic::hexagon_S2_asl_r_p_or; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsr_r_p_or:
+ ID = Intrinsic::hexagon_S2_lsr_r_p_or; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsl_r_p_or:
+ ID = Intrinsic::hexagon_S2_lsl_r_p_or; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asr_r_r_sat:
+ ID = Intrinsic::hexagon_S2_asr_r_r_sat; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asl_r_r_sat:
+ ID = Intrinsic::hexagon_S2_asl_r_r_sat; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asr_i_r:
+ ID = Intrinsic::hexagon_S2_asr_i_r; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r:
+ ID = Intrinsic::hexagon_S2_lsr_i_r; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asl_i_r:
+ ID = Intrinsic::hexagon_S2_asl_i_r; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asr_i_p:
+ ID = Intrinsic::hexagon_S2_asr_i_p; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p:
+ ID = Intrinsic::hexagon_S2_lsr_i_p; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asl_i_p:
+ ID = Intrinsic::hexagon_S2_asl_i_p; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_acc:
+ ID = Intrinsic::hexagon_S2_asr_i_r_acc; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_acc:
+ ID = Intrinsic::hexagon_S2_lsr_i_r_acc; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_acc:
+ ID = Intrinsic::hexagon_S2_asl_i_r_acc; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_acc:
+ ID = Intrinsic::hexagon_S2_asr_i_p_acc; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_acc:
+ ID = Intrinsic::hexagon_S2_lsr_i_p_acc; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_acc:
+ ID = Intrinsic::hexagon_S2_asl_i_p_acc; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_nac:
+ ID = Intrinsic::hexagon_S2_asr_i_r_nac; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_nac:
+ ID = Intrinsic::hexagon_S2_lsr_i_r_nac; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_nac:
+ ID = Intrinsic::hexagon_S2_asl_i_r_nac; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_nac:
+ ID = Intrinsic::hexagon_S2_asr_i_p_nac; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_nac:
+ ID = Intrinsic::hexagon_S2_lsr_i_p_nac; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_nac:
+ ID = Intrinsic::hexagon_S2_asl_i_p_nac; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_xacc:
+ ID = Intrinsic::hexagon_S2_lsr_i_r_xacc; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_xacc:
+ ID = Intrinsic::hexagon_S2_asl_i_r_xacc; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_xacc:
+ ID = Intrinsic::hexagon_S2_lsr_i_p_xacc; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_xacc:
+ ID = Intrinsic::hexagon_S2_asl_i_p_xacc; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_and:
+ ID = Intrinsic::hexagon_S2_asr_i_r_and; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_and:
+ ID = Intrinsic::hexagon_S2_lsr_i_r_and; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_and:
+ ID = Intrinsic::hexagon_S2_asl_i_r_and; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_or:
+ ID = Intrinsic::hexagon_S2_asr_i_r_or; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_or:
+ ID = Intrinsic::hexagon_S2_lsr_i_r_or; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_or:
+ ID = Intrinsic::hexagon_S2_asl_i_r_or; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_and:
+ ID = Intrinsic::hexagon_S2_asr_i_p_and; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_and:
+ ID = Intrinsic::hexagon_S2_lsr_i_p_and; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_and:
+ ID = Intrinsic::hexagon_S2_asl_i_p_and; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_or:
+ ID = Intrinsic::hexagon_S2_asr_i_p_or; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_or:
+ ID = Intrinsic::hexagon_S2_lsr_i_p_or; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_or:
+ ID = Intrinsic::hexagon_S2_asl_i_p_or; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_sat:
+ ID = Intrinsic::hexagon_S2_asl_i_r_sat; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_rnd:
+ ID = Intrinsic::hexagon_S2_asr_i_r_rnd; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_rnd_goodsyntax:
+ ID = Intrinsic::hexagon_S2_asr_i_r_rnd_goodsyntax; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_addasl_rrri:
+ ID = Intrinsic::hexagon_S2_addasl_rrri; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_valignib:
+ ID = Intrinsic::hexagon_S2_valignib; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_valignrb:
+ ID = Intrinsic::hexagon_S2_valignrb; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_vspliceib:
+ ID = Intrinsic::hexagon_S2_vspliceib; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_vsplicerb:
+ ID = Intrinsic::hexagon_S2_vsplicerb; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_vsplatrh:
+ ID = Intrinsic::hexagon_S2_vsplatrh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_vsplatrb:
+ ID = Intrinsic::hexagon_S2_vsplatrb; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_insert:
+ ID = Intrinsic::hexagon_S2_insert; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_tableidxb_goodsyntax:
+ ID = Intrinsic::hexagon_S2_tableidxb_goodsyntax; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_tableidxh_goodsyntax:
+ ID = Intrinsic::hexagon_S2_tableidxh_goodsyntax; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_tableidxw_goodsyntax:
+ ID = Intrinsic::hexagon_S2_tableidxw_goodsyntax; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_tableidxd_goodsyntax:
+ ID = Intrinsic::hexagon_S2_tableidxd_goodsyntax; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_extractu:
+ ID = Intrinsic::hexagon_S2_extractu; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_insertp:
+ ID = Intrinsic::hexagon_S2_insertp; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_extractup:
+ ID = Intrinsic::hexagon_S2_extractup; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_insert_rp:
+ ID = Intrinsic::hexagon_S2_insert_rp; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_extractu_rp:
+ ID = Intrinsic::hexagon_S2_extractu_rp; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_insertp_rp:
+ ID = Intrinsic::hexagon_S2_insertp_rp; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_extractup_rp:
+ ID = Intrinsic::hexagon_S2_extractup_rp; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_tstbit_i:
+ ID = Intrinsic::hexagon_S2_tstbit_i; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_setbit_i:
+ ID = Intrinsic::hexagon_S2_setbit_i; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_togglebit_i:
+ ID = Intrinsic::hexagon_S2_togglebit_i; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_clrbit_i:
+ ID = Intrinsic::hexagon_S2_clrbit_i; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_tstbit_r:
+ ID = Intrinsic::hexagon_S2_tstbit_r; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_setbit_r:
+ ID = Intrinsic::hexagon_S2_setbit_r; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_togglebit_r:
+ ID = Intrinsic::hexagon_S2_togglebit_r; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_clrbit_r:
+ ID = Intrinsic::hexagon_S2_clrbit_r; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asr_i_vh:
+ ID = Intrinsic::hexagon_S2_asr_i_vh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsr_i_vh:
+ ID = Intrinsic::hexagon_S2_lsr_i_vh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asl_i_vh:
+ ID = Intrinsic::hexagon_S2_asl_i_vh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asr_r_vh:
+ ID = Intrinsic::hexagon_S2_asr_r_vh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asl_r_vh:
+ ID = Intrinsic::hexagon_S2_asl_r_vh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsr_r_vh:
+ ID = Intrinsic::hexagon_S2_lsr_r_vh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsl_r_vh:
+ ID = Intrinsic::hexagon_S2_lsl_r_vh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asr_i_vw:
+ ID = Intrinsic::hexagon_S2_asr_i_vw; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asr_i_svw_trun:
+ ID = Intrinsic::hexagon_S2_asr_i_svw_trun; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asr_r_svw_trun:
+ ID = Intrinsic::hexagon_S2_asr_r_svw_trun; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsr_i_vw:
+ ID = Intrinsic::hexagon_S2_lsr_i_vw; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asl_i_vw:
+ ID = Intrinsic::hexagon_S2_asl_i_vw; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asr_r_vw:
+ ID = Intrinsic::hexagon_S2_asr_r_vw; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asl_r_vw:
+ ID = Intrinsic::hexagon_S2_asl_r_vw; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsr_r_vw:
+ ID = Intrinsic::hexagon_S2_lsr_r_vw; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsl_r_vw:
+ ID = Intrinsic::hexagon_S2_lsl_r_vw; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_vrndpackwh:
+ ID = Intrinsic::hexagon_S2_vrndpackwh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_vrndpackwhs:
+ ID = Intrinsic::hexagon_S2_vrndpackwhs; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_vsxtbh:
+ ID = Intrinsic::hexagon_S2_vsxtbh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_vzxtbh:
+ ID = Intrinsic::hexagon_S2_vzxtbh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_vsathub:
+ ID = Intrinsic::hexagon_S2_vsathub; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_svsathub:
+ ID = Intrinsic::hexagon_S2_svsathub; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_svsathb:
+ ID = Intrinsic::hexagon_S2_svsathb; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_vsathb:
+ ID = Intrinsic::hexagon_S2_vsathb; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_vtrunohb:
+ ID = Intrinsic::hexagon_S2_vtrunohb; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_vtrunewh:
+ ID = Intrinsic::hexagon_S2_vtrunewh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_vtrunowh:
+ ID = Intrinsic::hexagon_S2_vtrunowh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_vtrunehb:
+ ID = Intrinsic::hexagon_S2_vtrunehb; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_vsxthw:
+ ID = Intrinsic::hexagon_S2_vsxthw; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_vzxthw:
+ ID = Intrinsic::hexagon_S2_vzxthw; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_vsatwh:
+ ID = Intrinsic::hexagon_S2_vsatwh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_vsatwuh:
+ ID = Intrinsic::hexagon_S2_vsatwuh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_packhl:
+ ID = Intrinsic::hexagon_S2_packhl; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_swiz:
+ ID = Intrinsic::hexagon_A2_swiz; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_vsathub_nopack:
+ ID = Intrinsic::hexagon_S2_vsathub_nopack; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_vsathb_nopack:
+ ID = Intrinsic::hexagon_S2_vsathb_nopack; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_vsatwh_nopack:
+ ID = Intrinsic::hexagon_S2_vsatwh_nopack; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_vsatwuh_nopack:
+ ID = Intrinsic::hexagon_S2_vsatwuh_nopack; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_shuffob:
+ ID = Intrinsic::hexagon_S2_shuffob; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_shuffeb:
+ ID = Intrinsic::hexagon_S2_shuffeb; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_shuffoh:
+ ID = Intrinsic::hexagon_S2_shuffoh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_shuffeh:
+ ID = Intrinsic::hexagon_S2_shuffeh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_parityp:
+ ID = Intrinsic::hexagon_S2_parityp; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lfsp:
+ ID = Intrinsic::hexagon_S2_lfsp; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_clbnorm:
+ ID = Intrinsic::hexagon_S2_clbnorm; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_clb:
+ ID = Intrinsic::hexagon_S2_clb; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_cl0:
+ ID = Intrinsic::hexagon_S2_cl0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_cl1:
+ ID = Intrinsic::hexagon_S2_cl1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_clbp:
+ ID = Intrinsic::hexagon_S2_clbp; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_cl0p:
+ ID = Intrinsic::hexagon_S2_cl0p; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_cl1p:
+ ID = Intrinsic::hexagon_S2_cl1p; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_brev:
+ ID = Intrinsic::hexagon_S2_brev; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_ct0:
+ ID = Intrinsic::hexagon_S2_ct0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_ct1:
+ ID = Intrinsic::hexagon_S2_ct1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_interleave:
+ ID = Intrinsic::hexagon_S2_interleave; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_deinterleave:
+ ID = Intrinsic::hexagon_S2_deinterleave; break;
+
+ case Hexagon::BI__builtin_SI_to_SXTHI_asrh:
+ ID = Intrinsic::hexagon_SI_to_SXTHI_asrh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A4_orn:
+ ID = Intrinsic::hexagon_A4_orn; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A4_andn:
+ ID = Intrinsic::hexagon_A4_andn; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A4_ornp:
+ ID = Intrinsic::hexagon_A4_ornp; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A4_andnp:
+ ID = Intrinsic::hexagon_A4_andnp; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A4_combineir:
+ ID = Intrinsic::hexagon_A4_combineir; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A4_combineri:
+ ID = Intrinsic::hexagon_A4_combineri; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C4_cmpneqi:
+ ID = Intrinsic::hexagon_C4_cmpneqi; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C4_cmpneq:
+ ID = Intrinsic::hexagon_C4_cmpneq; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C4_cmpltei:
+ ID = Intrinsic::hexagon_C4_cmpltei; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C4_cmplte:
+ ID = Intrinsic::hexagon_C4_cmplte; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C4_cmplteui:
+ ID = Intrinsic::hexagon_C4_cmplteui; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C4_cmplteu:
+ ID = Intrinsic::hexagon_C4_cmplteu; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A4_rcmpneq:
+ ID = Intrinsic::hexagon_A4_rcmpneq; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A4_rcmpneqi:
+ ID = Intrinsic::hexagon_A4_rcmpneqi; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A4_rcmpeq:
+ ID = Intrinsic::hexagon_A4_rcmpeq; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A4_rcmpeqi:
+ ID = Intrinsic::hexagon_A4_rcmpeqi; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C4_fastcorner9:
+ ID = Intrinsic::hexagon_C4_fastcorner9; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C4_fastcorner9_not:
+ ID = Intrinsic::hexagon_C4_fastcorner9_not; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C4_and_andn:
+ ID = Intrinsic::hexagon_C4_and_andn; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C4_and_and:
+ ID = Intrinsic::hexagon_C4_and_and; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C4_and_orn:
+ ID = Intrinsic::hexagon_C4_and_orn; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C4_and_or:
+ ID = Intrinsic::hexagon_C4_and_or; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C4_or_andn:
+ ID = Intrinsic::hexagon_C4_or_andn; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C4_or_and:
+ ID = Intrinsic::hexagon_C4_or_and; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C4_or_orn:
+ ID = Intrinsic::hexagon_C4_or_orn; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C4_or_or:
+ ID = Intrinsic::hexagon_C4_or_or; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S4_addaddi:
+ ID = Intrinsic::hexagon_S4_addaddi; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S4_subaddi:
+ ID = Intrinsic::hexagon_S4_subaddi; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M4_xor_xacc:
+ ID = Intrinsic::hexagon_M4_xor_xacc; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M4_and_and:
+ ID = Intrinsic::hexagon_M4_and_and; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M4_and_or:
+ ID = Intrinsic::hexagon_M4_and_or; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M4_and_xor:
+ ID = Intrinsic::hexagon_M4_and_xor; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M4_and_andn:
+ ID = Intrinsic::hexagon_M4_and_andn; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M4_xor_and:
+ ID = Intrinsic::hexagon_M4_xor_and; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M4_xor_or:
+ ID = Intrinsic::hexagon_M4_xor_or; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M4_xor_andn:
+ ID = Intrinsic::hexagon_M4_xor_andn; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M4_or_and:
+ ID = Intrinsic::hexagon_M4_or_and; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M4_or_or:
+ ID = Intrinsic::hexagon_M4_or_or; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M4_or_xor:
+ ID = Intrinsic::hexagon_M4_or_xor; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M4_or_andn:
+ ID = Intrinsic::hexagon_M4_or_andn; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S4_or_andix:
+ ID = Intrinsic::hexagon_S4_or_andix; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S4_or_andi:
+ ID = Intrinsic::hexagon_S4_or_andi; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S4_or_ori:
+ ID = Intrinsic::hexagon_S4_or_ori; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A4_modwrapu:
+ ID = Intrinsic::hexagon_A4_modwrapu; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A4_cround_rr:
+ ID = Intrinsic::hexagon_A4_cround_rr; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A4_round_ri:
+ ID = Intrinsic::hexagon_A4_round_ri; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A4_round_rr:
+ ID = Intrinsic::hexagon_A4_round_rr; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A4_round_ri_sat:
+ ID = Intrinsic::hexagon_A4_round_ri_sat; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A4_round_rr_sat:
+ ID = Intrinsic::hexagon_A4_round_rr_sat; break;
+
+ }
+
+ llvm::Function *F = CGM.getIntrinsic(ID);
+ return Builder.CreateCall(F, Ops, "");
+}
+
+Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
+ const CallExpr *E) {
+ SmallVector<Value*, 4> Ops;
+
+ for (unsigned i = 0, e = E->getNumArgs(); i != e; i++)
+ Ops.push_back(EmitScalarExpr(E->getArg(i)));
+
+ Intrinsic::ID ID = Intrinsic::not_intrinsic;
+
+ switch (BuiltinID) {
+ default: return 0;
+
+ // vec_ld, vec_lvsl, vec_lvsr
+ case PPC::BI__builtin_altivec_lvx:
+ case PPC::BI__builtin_altivec_lvxl:
+ case PPC::BI__builtin_altivec_lvebx:
+ case PPC::BI__builtin_altivec_lvehx:
+ case PPC::BI__builtin_altivec_lvewx:
+ case PPC::BI__builtin_altivec_lvsl:
+ case PPC::BI__builtin_altivec_lvsr:
+ {
+ Ops[1] = Builder.CreateBitCast(Ops[1], Int8PtrTy);
+
+ Ops[0] = Builder.CreateGEP(Ops[1], Ops[0]);
+ Ops.pop_back();
+
+ switch (BuiltinID) {
+ default: llvm_unreachable("Unsupported ld/lvsl/lvsr intrinsic!");
+ case PPC::BI__builtin_altivec_lvx:
+ ID = Intrinsic::ppc_altivec_lvx;
+ break;
+ case PPC::BI__builtin_altivec_lvxl:
+ ID = Intrinsic::ppc_altivec_lvxl;
+ break;
+ case PPC::BI__builtin_altivec_lvebx:
+ ID = Intrinsic::ppc_altivec_lvebx;
+ break;
+ case PPC::BI__builtin_altivec_lvehx:
+ ID = Intrinsic::ppc_altivec_lvehx;
+ break;
+ case PPC::BI__builtin_altivec_lvewx:
+ ID = Intrinsic::ppc_altivec_lvewx;
+ break;
+ case PPC::BI__builtin_altivec_lvsl:
+ ID = Intrinsic::ppc_altivec_lvsl;
+ break;
+ case PPC::BI__builtin_altivec_lvsr:
+ ID = Intrinsic::ppc_altivec_lvsr;
+ break;
+ }
+ llvm::Function *F = CGM.getIntrinsic(ID);
+ return Builder.CreateCall(F, Ops, "");
+ }
+
+ // vec_st
+ case PPC::BI__builtin_altivec_stvx:
+ case PPC::BI__builtin_altivec_stvxl:
+ case PPC::BI__builtin_altivec_stvebx:
+ case PPC::BI__builtin_altivec_stvehx:
+ case PPC::BI__builtin_altivec_stvewx:
+ {
+ Ops[2] = Builder.CreateBitCast(Ops[2], Int8PtrTy);
+ Ops[1] = Builder.CreateGEP(Ops[2], Ops[1]);
+ Ops.pop_back();
+
+ switch (BuiltinID) {
+ default: llvm_unreachable("Unsupported st intrinsic!");
+ case PPC::BI__builtin_altivec_stvx:
+ ID = Intrinsic::ppc_altivec_stvx;
+ break;
+ case PPC::BI__builtin_altivec_stvxl:
+ ID = Intrinsic::ppc_altivec_stvxl;
+ break;
+ case PPC::BI__builtin_altivec_stvebx:
+ ID = Intrinsic::ppc_altivec_stvebx;
+ break;
+ case PPC::BI__builtin_altivec_stvehx:
+ ID = Intrinsic::ppc_altivec_stvehx;
+ break;
+ case PPC::BI__builtin_altivec_stvewx:
+ ID = Intrinsic::ppc_altivec_stvewx;
+ break;
+ }
+ llvm::Function *F = CGM.getIntrinsic(ID);
+ return Builder.CreateCall(F, Ops, "");
+ }
+ }
+}
diff --git a/clang/lib/CodeGen/CGCUDANV.cpp b/clang/lib/CodeGen/CGCUDANV.cpp
new file mode 100644
index 0000000..88a0bdc
--- /dev/null
+++ b/clang/lib/CodeGen/CGCUDANV.cpp
@@ -0,0 +1,126 @@
+//===----- CGCUDANV.cpp - Interface to NVIDIA CUDA Runtime ----------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This provides a class for CUDA code generation targeting the NVIDIA CUDA
+// runtime library.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CGCUDARuntime.h"
+#include "CodeGenFunction.h"
+#include "CodeGenModule.h"
+#include "clang/AST/Decl.h"
+#include "llvm/BasicBlock.h"
+#include "llvm/Constants.h"
+#include "llvm/DerivedTypes.h"
+#include "llvm/Support/CallSite.h"
+
+#include <vector>
+
+using namespace clang;
+using namespace CodeGen;
+
+namespace {
+
+class CGNVCUDARuntime : public CGCUDARuntime {
+
+private:
+ llvm::Type *IntTy, *SizeTy;
+ llvm::PointerType *CharPtrTy, *VoidPtrTy;
+
+ llvm::Constant *getSetupArgumentFn() const;
+ llvm::Constant *getLaunchFn() const;
+
+public:
+ CGNVCUDARuntime(CodeGenModule &CGM);
+
+ void EmitDeviceStubBody(CodeGenFunction &CGF, FunctionArgList &Args);
+};
+
+}
+
+CGNVCUDARuntime::CGNVCUDARuntime(CodeGenModule &CGM) : CGCUDARuntime(CGM) {
+ CodeGen::CodeGenTypes &Types = CGM.getTypes();
+ ASTContext &Ctx = CGM.getContext();
+
+ IntTy = Types.ConvertType(Ctx.IntTy);
+ SizeTy = Types.ConvertType(Ctx.getSizeType());
+
+ CharPtrTy = llvm::PointerType::getUnqual(Types.ConvertType(Ctx.CharTy));
+ VoidPtrTy = cast<llvm::PointerType>(Types.ConvertType(Ctx.VoidPtrTy));
+}
+
+llvm::Constant *CGNVCUDARuntime::getSetupArgumentFn() const {
+ // cudaError_t cudaSetupArgument(void *, size_t, size_t)
+ std::vector<llvm::Type*> Params;
+ Params.push_back(VoidPtrTy);
+ Params.push_back(SizeTy);
+ Params.push_back(SizeTy);
+ return CGM.CreateRuntimeFunction(llvm::FunctionType::get(IntTy,
+ Params, false),
+ "cudaSetupArgument");
+}
+
+llvm::Constant *CGNVCUDARuntime::getLaunchFn() const {
+ // cudaError_t cudaLaunch(char *)
+ std::vector<llvm::Type*> Params;
+ Params.push_back(CharPtrTy);
+ return CGM.CreateRuntimeFunction(llvm::FunctionType::get(IntTy,
+ Params, false),
+ "cudaLaunch");
+}
+
+void CGNVCUDARuntime::EmitDeviceStubBody(CodeGenFunction &CGF,
+ FunctionArgList &Args) {
+ // Build the argument value list and the argument stack struct type.
+ llvm::SmallVector<llvm::Value *, 16> ArgValues;
+ std::vector<llvm::Type *> ArgTypes;
+ for (FunctionArgList::const_iterator I = Args.begin(), E = Args.end();
+ I != E; ++I) {
+ llvm::Value *V = CGF.GetAddrOfLocalVar(*I);
+ ArgValues.push_back(V);
+ assert(isa<llvm::PointerType>(V->getType()) && "Arg type not PointerType");
+ ArgTypes.push_back(cast<llvm::PointerType>(V->getType())->getElementType());
+ }
+ llvm::StructType *ArgStackTy = llvm::StructType::get(
+ CGF.getLLVMContext(), ArgTypes);
+
+ llvm::BasicBlock *EndBlock = CGF.createBasicBlock("setup.end");
+
+ // Emit the calls to cudaSetupArgument
+ llvm::Constant *cudaSetupArgFn = getSetupArgumentFn();
+ for (unsigned I = 0, E = Args.size(); I != E; ++I) {
+ llvm::Value *Args[3];
+ llvm::BasicBlock *NextBlock = CGF.createBasicBlock("setup.next");
+ Args[0] = CGF.Builder.CreatePointerCast(ArgValues[I], VoidPtrTy);
+ Args[1] = CGF.Builder.CreateIntCast(
+ llvm::ConstantExpr::getSizeOf(ArgTypes[I]),
+ SizeTy, false);
+ Args[2] = CGF.Builder.CreateIntCast(
+ llvm::ConstantExpr::getOffsetOf(ArgStackTy, I),
+ SizeTy, false);
+ llvm::CallSite CS = CGF.EmitCallOrInvoke(cudaSetupArgFn, Args);
+ llvm::Constant *Zero = llvm::ConstantInt::get(IntTy, 0);
+ llvm::Value *CSZero = CGF.Builder.CreateICmpEQ(CS.getInstruction(), Zero);
+ CGF.Builder.CreateCondBr(CSZero, NextBlock, EndBlock);
+ CGF.EmitBlock(NextBlock);
+ }
+
+ // Emit the call to cudaLaunch
+ llvm::Constant *cudaLaunchFn = getLaunchFn();
+ llvm::Value *Arg = CGF.Builder.CreatePointerCast(CGF.CurFn, CharPtrTy);
+ CGF.EmitCallOrInvoke(cudaLaunchFn, Arg);
+ CGF.EmitBranch(EndBlock);
+
+ CGF.EmitBlock(EndBlock);
+}
+
+CGCUDARuntime *CodeGen::CreateNVCUDARuntime(CodeGenModule &CGM) {
+ return new CGNVCUDARuntime(CGM);
+}
diff --git a/clang/lib/CodeGen/CGCUDARuntime.cpp b/clang/lib/CodeGen/CGCUDARuntime.cpp
new file mode 100644
index 0000000..77dc248
--- /dev/null
+++ b/clang/lib/CodeGen/CGCUDARuntime.cpp
@@ -0,0 +1,55 @@
+//===----- CGCUDARuntime.cpp - Interface to CUDA Runtimes -----------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This provides an abstract class for CUDA code generation. Concrete
+// subclasses of this implement code generation for specific CUDA
+// runtime libraries.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CGCUDARuntime.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/ExprCXX.h"
+#include "CGCall.h"
+#include "CodeGenFunction.h"
+
+using namespace clang;
+using namespace CodeGen;
+
+CGCUDARuntime::~CGCUDARuntime() {}
+
+RValue CGCUDARuntime::EmitCUDAKernelCallExpr(CodeGenFunction &CGF,
+ const CUDAKernelCallExpr *E,
+ ReturnValueSlot ReturnValue) {
+ llvm::BasicBlock *ConfigOKBlock = CGF.createBasicBlock("kcall.configok");
+ llvm::BasicBlock *ContBlock = CGF.createBasicBlock("kcall.end");
+
+ CodeGenFunction::ConditionalEvaluation eval(CGF);
+ CGF.EmitBranchOnBoolExpr(E->getConfig(), ContBlock, ConfigOKBlock);
+
+ eval.begin(CGF);
+ CGF.EmitBlock(ConfigOKBlock);
+
+ const Decl *TargetDecl = 0;
+ if (const ImplicitCastExpr *CE = dyn_cast<ImplicitCastExpr>(E->getCallee())) {
+ if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(CE->getSubExpr())) {
+ TargetDecl = DRE->getDecl();
+ }
+ }
+
+ llvm::Value *Callee = CGF.EmitScalarExpr(E->getCallee());
+ CGF.EmitCall(E->getCallee()->getType(), Callee, ReturnValue,
+ E->arg_begin(), E->arg_end(), TargetDecl);
+ CGF.EmitBranch(ContBlock);
+
+ CGF.EmitBlock(ContBlock);
+ eval.end(CGF);
+
+ return RValue::get(0);
+}
diff --git a/clang/lib/CodeGen/CGCUDARuntime.h b/clang/lib/CodeGen/CGCUDARuntime.h
new file mode 100644
index 0000000..a99a67a
--- /dev/null
+++ b/clang/lib/CodeGen/CGCUDARuntime.h
@@ -0,0 +1,54 @@
+//===----- CGCUDARuntime.h - Interface to CUDA Runtimes ---------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This provides an abstract class for CUDA code generation. Concrete
+// subclasses of this implement code generation for specific CUDA
+// runtime libraries.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_CODEGEN_CUDARUNTIME_H
+#define CLANG_CODEGEN_CUDARUNTIME_H
+
+namespace clang {
+
+class CUDAKernelCallExpr;
+
+namespace CodeGen {
+
+class CodeGenFunction;
+class CodeGenModule;
+class FunctionArgList;
+class ReturnValueSlot;
+class RValue;
+
+class CGCUDARuntime {
+protected:
+ CodeGenModule &CGM;
+
+public:
+ CGCUDARuntime(CodeGenModule &CGM) : CGM(CGM) {}
+ virtual ~CGCUDARuntime();
+
+ virtual RValue EmitCUDAKernelCallExpr(CodeGenFunction &CGF,
+ const CUDAKernelCallExpr *E,
+ ReturnValueSlot ReturnValue);
+
+ virtual void EmitDeviceStubBody(CodeGenFunction &CGF,
+ FunctionArgList &Args) = 0;
+
+};
+
+/// Creates an instance of a CUDA runtime class.
+CGCUDARuntime *CreateNVCUDARuntime(CodeGenModule &CGM);
+
+}
+}
+
+#endif
diff --git a/clang/lib/CodeGen/CGCXX.cpp b/clang/lib/CodeGen/CGCXX.cpp
new file mode 100644
index 0000000..7c08650
--- /dev/null
+++ b/clang/lib/CodeGen/CGCXX.cpp
@@ -0,0 +1,392 @@
+//===--- CGCXX.cpp - Emit LLVM Code for declarations ----------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code dealing with C++ code generation.
+//
+//===----------------------------------------------------------------------===//
+
+// We might split this into multiple files if it gets too unwieldy
+
+#include "CGCXXABI.h"
+#include "CodeGenFunction.h"
+#include "CodeGenModule.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/RecordLayout.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/Mangle.h"
+#include "clang/AST/StmtCXX.h"
+#include "clang/Frontend/CodeGenOptions.h"
+#include "llvm/ADT/StringExtras.h"
+using namespace clang;
+using namespace CodeGen;
+
+/// Try to emit a base destructor as an alias to its primary
+/// base-class destructor.
+bool CodeGenModule::TryEmitBaseDestructorAsAlias(const CXXDestructorDecl *D) {
+ if (!getCodeGenOpts().CXXCtorDtorAliases)
+ return true;
+
+ // If the destructor doesn't have a trivial body, we have to emit it
+ // separately.
+ if (!D->hasTrivialBody())
+ return true;
+
+ const CXXRecordDecl *Class = D->getParent();
+
+ // If we need to manipulate a VTT parameter, give up.
+ if (Class->getNumVBases()) {
+ // Extra Credit: passing extra parameters is perfectly safe
+ // in many calling conventions, so only bail out if the ctor's
+ // calling convention is nonstandard.
+ return true;
+ }
+
+ // If any field has a non-trivial destructor, we have to emit the
+ // destructor separately.
+ for (CXXRecordDecl::field_iterator I = Class->field_begin(),
+ E = Class->field_end(); I != E; ++I)
+ if ((*I)->getType().isDestructedType())
+ return true;
+
+ // Try to find a unique base class with a non-trivial destructor.
+ const CXXRecordDecl *UniqueBase = 0;
+ for (CXXRecordDecl::base_class_const_iterator I = Class->bases_begin(),
+ E = Class->bases_end(); I != E; ++I) {
+
+ // We're in the base destructor, so skip virtual bases.
+ if (I->isVirtual()) continue;
+
+ // Skip base classes with trivial destructors.
+ const CXXRecordDecl *Base
+ = cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+ if (Base->hasTrivialDestructor()) continue;
+
+ // If we've already found a base class with a non-trivial
+ // destructor, give up.
+ if (UniqueBase) return true;
+ UniqueBase = Base;
+ }
+
+ // If we didn't find any bases with a non-trivial destructor, then
+ // the base destructor is actually effectively trivial, which can
+ // happen if it was needlessly user-defined or if there are virtual
+ // bases with non-trivial destructors.
+ if (!UniqueBase)
+ return true;
+
+ /// If we don't have a definition for the destructor yet, don't
+ /// emit. We can't emit aliases to declarations; that's just not
+ /// how aliases work.
+ const CXXDestructorDecl *BaseD = UniqueBase->getDestructor();
+ if (!BaseD->isImplicit() && !BaseD->hasBody())
+ return true;
+
+ // If the base is at a non-zero offset, give up.
+ const ASTRecordLayout &ClassLayout = Context.getASTRecordLayout(Class);
+ if (ClassLayout.getBaseClassOffsetInBits(UniqueBase) != 0)
+ return true;
+
+ return TryEmitDefinitionAsAlias(GlobalDecl(D, Dtor_Base),
+ GlobalDecl(BaseD, Dtor_Base));
+}
+
+/// Try to emit a definition as a global alias for another definition.
+bool CodeGenModule::TryEmitDefinitionAsAlias(GlobalDecl AliasDecl,
+ GlobalDecl TargetDecl) {
+ if (!getCodeGenOpts().CXXCtorDtorAliases)
+ return true;
+
+ // The alias will use the linkage of the referrent. If we can't
+ // support aliases with that linkage, fail.
+ llvm::GlobalValue::LinkageTypes Linkage
+ = getFunctionLinkage(cast<FunctionDecl>(AliasDecl.getDecl()));
+
+ switch (Linkage) {
+ // We can definitely emit aliases to definitions with external linkage.
+ case llvm::GlobalValue::ExternalLinkage:
+ case llvm::GlobalValue::ExternalWeakLinkage:
+ break;
+
+ // Same with local linkage.
+ case llvm::GlobalValue::InternalLinkage:
+ case llvm::GlobalValue::PrivateLinkage:
+ case llvm::GlobalValue::LinkerPrivateLinkage:
+ break;
+
+ // We should try to support linkonce linkages.
+ case llvm::GlobalValue::LinkOnceAnyLinkage:
+ case llvm::GlobalValue::LinkOnceODRLinkage:
+ return true;
+
+ // Other linkages will probably never be supported.
+ default:
+ return true;
+ }
+
+ llvm::GlobalValue::LinkageTypes TargetLinkage
+ = getFunctionLinkage(cast<FunctionDecl>(TargetDecl.getDecl()));
+
+ if (llvm::GlobalValue::isWeakForLinker(TargetLinkage))
+ return true;
+
+ // Derive the type for the alias.
+ llvm::PointerType *AliasType
+ = getTypes().GetFunctionType(AliasDecl)->getPointerTo();
+
+ // Find the referrent. Some aliases might require a bitcast, in
+ // which case the caller is responsible for ensuring the soundness
+ // of these semantics.
+ llvm::GlobalValue *Ref = cast<llvm::GlobalValue>(GetAddrOfGlobal(TargetDecl));
+ llvm::Constant *Aliasee = Ref;
+ if (Ref->getType() != AliasType)
+ Aliasee = llvm::ConstantExpr::getBitCast(Ref, AliasType);
+
+ // Create the alias with no name.
+ llvm::GlobalAlias *Alias =
+ new llvm::GlobalAlias(AliasType, Linkage, "", Aliasee, &getModule());
+
+ // Switch any previous uses to the alias.
+ StringRef MangledName = getMangledName(AliasDecl);
+ llvm::GlobalValue *Entry = GetGlobalValue(MangledName);
+ if (Entry) {
+ assert(Entry->isDeclaration() && "definition already exists for alias");
+ assert(Entry->getType() == AliasType &&
+ "declaration exists with different type");
+ Alias->takeName(Entry);
+ Entry->replaceAllUsesWith(Alias);
+ Entry->eraseFromParent();
+ } else {
+ Alias->setName(MangledName);
+ }
+
+ // Finally, set up the alias with its proper name and attributes.
+ SetCommonAttributes(cast<NamedDecl>(AliasDecl.getDecl()), Alias);
+
+ return false;
+}
+
+void CodeGenModule::EmitCXXConstructors(const CXXConstructorDecl *D) {
+ // The constructor used for constructing this as a complete class;
+ // constucts the virtual bases, then calls the base constructor.
+ if (!D->getParent()->isAbstract()) {
+ // We don't need to emit the complete ctor if the class is abstract.
+ EmitGlobal(GlobalDecl(D, Ctor_Complete));
+ }
+
+ // The constructor used for constructing this as a base class;
+ // ignores virtual bases.
+ EmitGlobal(GlobalDecl(D, Ctor_Base));
+}
+
+void CodeGenModule::EmitCXXConstructor(const CXXConstructorDecl *ctor,
+ CXXCtorType ctorType) {
+ // The complete constructor is equivalent to the base constructor
+ // for classes with no virtual bases. Try to emit it as an alias.
+ if (ctorType == Ctor_Complete &&
+ !ctor->getParent()->getNumVBases() &&
+ !TryEmitDefinitionAsAlias(GlobalDecl(ctor, Ctor_Complete),
+ GlobalDecl(ctor, Ctor_Base)))
+ return;
+
+ const CGFunctionInfo &fnInfo =
+ getTypes().arrangeCXXConstructorDeclaration(ctor, ctorType);
+
+ llvm::Function *fn =
+ cast<llvm::Function>(GetAddrOfCXXConstructor(ctor, ctorType, &fnInfo));
+ setFunctionLinkage(ctor, fn);
+
+ CodeGenFunction(*this).GenerateCode(GlobalDecl(ctor, ctorType), fn, fnInfo);
+
+ SetFunctionDefinitionAttributes(ctor, fn);
+ SetLLVMFunctionAttributesForDefinition(ctor, fn);
+}
+
+llvm::GlobalValue *
+CodeGenModule::GetAddrOfCXXConstructor(const CXXConstructorDecl *ctor,
+ CXXCtorType ctorType,
+ const CGFunctionInfo *fnInfo) {
+ GlobalDecl GD(ctor, ctorType);
+
+ StringRef name = getMangledName(GD);
+ if (llvm::GlobalValue *existing = GetGlobalValue(name))
+ return existing;
+
+ if (!fnInfo)
+ fnInfo = &getTypes().arrangeCXXConstructorDeclaration(ctor, ctorType);
+
+ llvm::FunctionType *fnType = getTypes().GetFunctionType(*fnInfo);
+ return cast<llvm::Function>(GetOrCreateLLVMFunction(name, fnType, GD,
+ /*ForVTable=*/false));
+}
+
+void CodeGenModule::EmitCXXDestructors(const CXXDestructorDecl *D) {
+ // The destructor in a virtual table is always a 'deleting'
+ // destructor, which calls the complete destructor and then uses the
+ // appropriate operator delete.
+ if (D->isVirtual())
+ EmitGlobal(GlobalDecl(D, Dtor_Deleting));
+
+ // The destructor used for destructing this as a most-derived class;
+ // call the base destructor and then destructs any virtual bases.
+ EmitGlobal(GlobalDecl(D, Dtor_Complete));
+
+ // The destructor used for destructing this as a base class; ignores
+ // virtual bases.
+ EmitGlobal(GlobalDecl(D, Dtor_Base));
+}
+
+void CodeGenModule::EmitCXXDestructor(const CXXDestructorDecl *dtor,
+ CXXDtorType dtorType) {
+ // The complete destructor is equivalent to the base destructor for
+ // classes with no virtual bases, so try to emit it as an alias.
+ if (dtorType == Dtor_Complete &&
+ !dtor->getParent()->getNumVBases() &&
+ !TryEmitDefinitionAsAlias(GlobalDecl(dtor, Dtor_Complete),
+ GlobalDecl(dtor, Dtor_Base)))
+ return;
+
+ // The base destructor is equivalent to the base destructor of its
+ // base class if there is exactly one non-virtual base class with a
+ // non-trivial destructor, there are no fields with a non-trivial
+ // destructor, and the body of the destructor is trivial.
+ if (dtorType == Dtor_Base && !TryEmitBaseDestructorAsAlias(dtor))
+ return;
+
+ const CGFunctionInfo &fnInfo =
+ getTypes().arrangeCXXDestructor(dtor, dtorType);
+
+ llvm::Function *fn =
+ cast<llvm::Function>(GetAddrOfCXXDestructor(dtor, dtorType, &fnInfo));
+ setFunctionLinkage(dtor, fn);
+
+ CodeGenFunction(*this).GenerateCode(GlobalDecl(dtor, dtorType), fn, fnInfo);
+
+ SetFunctionDefinitionAttributes(dtor, fn);
+ SetLLVMFunctionAttributesForDefinition(dtor, fn);
+}
+
+llvm::GlobalValue *
+CodeGenModule::GetAddrOfCXXDestructor(const CXXDestructorDecl *dtor,
+ CXXDtorType dtorType,
+ const CGFunctionInfo *fnInfo) {
+ GlobalDecl GD(dtor, dtorType);
+
+ StringRef name = getMangledName(GD);
+ if (llvm::GlobalValue *existing = GetGlobalValue(name))
+ return existing;
+
+ if (!fnInfo) fnInfo = &getTypes().arrangeCXXDestructor(dtor, dtorType);
+
+ llvm::FunctionType *fnType = getTypes().GetFunctionType(*fnInfo);
+ return cast<llvm::Function>(GetOrCreateLLVMFunction(name, fnType, GD,
+ /*ForVTable=*/false));
+}
+
+static llvm::Value *BuildVirtualCall(CodeGenFunction &CGF, uint64_t VTableIndex,
+ llvm::Value *This, llvm::Type *Ty) {
+ Ty = Ty->getPointerTo()->getPointerTo();
+
+ llvm::Value *VTable = CGF.GetVTablePtr(This, Ty);
+ llvm::Value *VFuncPtr =
+ CGF.Builder.CreateConstInBoundsGEP1_64(VTable, VTableIndex, "vfn");
+ return CGF.Builder.CreateLoad(VFuncPtr);
+}
+
+llvm::Value *
+CodeGenFunction::BuildVirtualCall(const CXXMethodDecl *MD, llvm::Value *This,
+ llvm::Type *Ty) {
+ MD = MD->getCanonicalDecl();
+ uint64_t VTableIndex = CGM.getVTableContext().getMethodVTableIndex(MD);
+
+ return ::BuildVirtualCall(*this, VTableIndex, This, Ty);
+}
+
+/// BuildVirtualCall - This routine is to support gcc's kext ABI making
+/// indirect call to virtual functions. It makes the call through indexing
+/// into the vtable.
+llvm::Value *
+CodeGenFunction::BuildAppleKextVirtualCall(const CXXMethodDecl *MD,
+ NestedNameSpecifier *Qual,
+ llvm::Type *Ty) {
+ llvm::Value *VTable = 0;
+ assert((Qual->getKind() == NestedNameSpecifier::TypeSpec) &&
+ "BuildAppleKextVirtualCall - bad Qual kind");
+
+ const Type *QTy = Qual->getAsType();
+ QualType T = QualType(QTy, 0);
+ const RecordType *RT = T->getAs<RecordType>();
+ assert(RT && "BuildAppleKextVirtualCall - Qual type must be record");
+ const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
+
+ if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(MD))
+ return BuildAppleKextVirtualDestructorCall(DD, Dtor_Complete, RD);
+
+ VTable = CGM.getVTables().GetAddrOfVTable(RD);
+ Ty = Ty->getPointerTo()->getPointerTo();
+ VTable = Builder.CreateBitCast(VTable, Ty);
+ assert(VTable && "BuildVirtualCall = kext vtbl pointer is null");
+ MD = MD->getCanonicalDecl();
+ uint64_t VTableIndex = CGM.getVTableContext().getMethodVTableIndex(MD);
+ uint64_t AddressPoint =
+ CGM.getVTableContext().getVTableLayout(RD)
+ .getAddressPoint(BaseSubobject(RD, CharUnits::Zero()));
+ VTableIndex += AddressPoint;
+ llvm::Value *VFuncPtr =
+ Builder.CreateConstInBoundsGEP1_64(VTable, VTableIndex, "vfnkxt");
+ return Builder.CreateLoad(VFuncPtr);
+}
+
+/// BuildVirtualCall - This routine makes indirect vtable call for
+/// call to virtual destructors. It returns 0 if it could not do it.
+llvm::Value *
+CodeGenFunction::BuildAppleKextVirtualDestructorCall(
+ const CXXDestructorDecl *DD,
+ CXXDtorType Type,
+ const CXXRecordDecl *RD) {
+ llvm::Value * Callee = 0;
+ const CXXMethodDecl *MD = cast<CXXMethodDecl>(DD);
+ // FIXME. Dtor_Base dtor is always direct!!
+ // It need be somehow inline expanded into the caller.
+ // -O does that. But need to support -O0 as well.
+ if (MD->isVirtual() && Type != Dtor_Base) {
+ // Compute the function type we're calling.
+ const CGFunctionInfo &FInfo =
+ CGM.getTypes().arrangeCXXDestructor(cast<CXXDestructorDecl>(MD),
+ Dtor_Complete);
+ llvm::Type *Ty = CGM.getTypes().GetFunctionType(FInfo);
+
+ llvm::Value *VTable = CGM.getVTables().GetAddrOfVTable(RD);
+ Ty = Ty->getPointerTo()->getPointerTo();
+ VTable = Builder.CreateBitCast(VTable, Ty);
+ DD = cast<CXXDestructorDecl>(DD->getCanonicalDecl());
+ uint64_t VTableIndex =
+ CGM.getVTableContext().getMethodVTableIndex(GlobalDecl(DD, Type));
+ uint64_t AddressPoint =
+ CGM.getVTableContext().getVTableLayout(RD)
+ .getAddressPoint(BaseSubobject(RD, CharUnits::Zero()));
+ VTableIndex += AddressPoint;
+ llvm::Value *VFuncPtr =
+ Builder.CreateConstInBoundsGEP1_64(VTable, VTableIndex, "vfnkxt");
+ Callee = Builder.CreateLoad(VFuncPtr);
+ }
+ return Callee;
+}
+
+llvm::Value *
+CodeGenFunction::BuildVirtualCall(const CXXDestructorDecl *DD, CXXDtorType Type,
+ llvm::Value *This, llvm::Type *Ty) {
+ DD = cast<CXXDestructorDecl>(DD->getCanonicalDecl());
+ uint64_t VTableIndex =
+ CGM.getVTableContext().getMethodVTableIndex(GlobalDecl(DD, Type));
+
+ return ::BuildVirtualCall(*this, VTableIndex, This, Ty);
+}
+
diff --git a/clang/lib/CodeGen/CGCXXABI.cpp b/clang/lib/CodeGen/CGCXXABI.cpp
new file mode 100644
index 0000000..befebbe
--- /dev/null
+++ b/clang/lib/CodeGen/CGCXXABI.cpp
@@ -0,0 +1,199 @@
+//===----- CGCXXABI.cpp - Interface to C++ ABIs -----------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This provides an abstract class for C++ code generation. Concrete subclasses
+// of this implement code generation for specific C++ ABIs.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CGCXXABI.h"
+
+using namespace clang;
+using namespace CodeGen;
+
+CGCXXABI::~CGCXXABI() { }
+
+static void ErrorUnsupportedABI(CodeGenFunction &CGF,
+ StringRef S) {
+ DiagnosticsEngine &Diags = CGF.CGM.getDiags();
+ unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error,
+ "cannot yet compile %1 in this ABI");
+ Diags.Report(CGF.getContext().getFullLoc(CGF.CurCodeDecl->getLocation()),
+ DiagID)
+ << S;
+}
+
+static llvm::Constant *GetBogusMemberPointer(CodeGenModule &CGM,
+ QualType T) {
+ return llvm::Constant::getNullValue(CGM.getTypes().ConvertType(T));
+}
+
+llvm::Type *
+CGCXXABI::ConvertMemberPointerType(const MemberPointerType *MPT) {
+ return CGM.getTypes().ConvertType(CGM.getContext().getPointerDiffType());
+}
+
+llvm::Value *CGCXXABI::EmitLoadOfMemberFunctionPointer(CodeGenFunction &CGF,
+ llvm::Value *&This,
+ llvm::Value *MemPtr,
+ const MemberPointerType *MPT) {
+ ErrorUnsupportedABI(CGF, "calls through member pointers");
+
+ const FunctionProtoType *FPT =
+ MPT->getPointeeType()->getAs<FunctionProtoType>();
+ const CXXRecordDecl *RD =
+ cast<CXXRecordDecl>(MPT->getClass()->getAs<RecordType>()->getDecl());
+ llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(
+ CGM.getTypes().arrangeCXXMethodType(RD, FPT));
+ return llvm::Constant::getNullValue(FTy->getPointerTo());
+}
+
+llvm::Value *CGCXXABI::EmitMemberDataPointerAddress(CodeGenFunction &CGF,
+ llvm::Value *Base,
+ llvm::Value *MemPtr,
+ const MemberPointerType *MPT) {
+ ErrorUnsupportedABI(CGF, "loads of member pointers");
+ llvm::Type *Ty = CGF.ConvertType(MPT->getPointeeType())->getPointerTo();
+ return llvm::Constant::getNullValue(Ty);
+}
+
+llvm::Value *CGCXXABI::EmitMemberPointerConversion(CodeGenFunction &CGF,
+ const CastExpr *E,
+ llvm::Value *Src) {
+ ErrorUnsupportedABI(CGF, "member function pointer conversions");
+ return GetBogusMemberPointer(CGM, E->getType());
+}
+
+llvm::Constant *CGCXXABI::EmitMemberPointerConversion(const CastExpr *E,
+ llvm::Constant *Src) {
+ return GetBogusMemberPointer(CGM, E->getType());
+}
+
+llvm::Value *
+CGCXXABI::EmitMemberPointerComparison(CodeGenFunction &CGF,
+ llvm::Value *L,
+ llvm::Value *R,
+ const MemberPointerType *MPT,
+ bool Inequality) {
+ ErrorUnsupportedABI(CGF, "member function pointer comparison");
+ return CGF.Builder.getFalse();
+}
+
+llvm::Value *
+CGCXXABI::EmitMemberPointerIsNotNull(CodeGenFunction &CGF,
+ llvm::Value *MemPtr,
+ const MemberPointerType *MPT) {
+ ErrorUnsupportedABI(CGF, "member function pointer null testing");
+ return CGF.Builder.getFalse();
+}
+
+llvm::Constant *
+CGCXXABI::EmitNullMemberPointer(const MemberPointerType *MPT) {
+ return GetBogusMemberPointer(CGM, QualType(MPT, 0));
+}
+
+llvm::Constant *CGCXXABI::EmitMemberPointer(const CXXMethodDecl *MD) {
+ return GetBogusMemberPointer(CGM,
+ CGM.getContext().getMemberPointerType(MD->getType(),
+ MD->getParent()->getTypeForDecl()));
+}
+
+llvm::Constant *CGCXXABI::EmitMemberDataPointer(const MemberPointerType *MPT,
+ CharUnits offset) {
+ return GetBogusMemberPointer(CGM, QualType(MPT, 0));
+}
+
+llvm::Constant *CGCXXABI::EmitMemberPointer(const APValue &MP, QualType MPT) {
+ return GetBogusMemberPointer(CGM, MPT);
+}
+
+bool CGCXXABI::isZeroInitializable(const MemberPointerType *MPT) {
+ // Fake answer.
+ return true;
+}
+
+void CGCXXABI::BuildThisParam(CodeGenFunction &CGF, FunctionArgList &params) {
+ const CXXMethodDecl *MD = cast<CXXMethodDecl>(CGF.CurGD.getDecl());
+
+ // FIXME: I'm not entirely sure I like using a fake decl just for code
+ // generation. Maybe we can come up with a better way?
+ ImplicitParamDecl *ThisDecl
+ = ImplicitParamDecl::Create(CGM.getContext(), 0, MD->getLocation(),
+ &CGM.getContext().Idents.get("this"),
+ MD->getThisType(CGM.getContext()));
+ params.push_back(ThisDecl);
+ getThisDecl(CGF) = ThisDecl;
+}
+
+void CGCXXABI::EmitThisParam(CodeGenFunction &CGF) {
+ /// Initialize the 'this' slot.
+ assert(getThisDecl(CGF) && "no 'this' variable for function");
+ getThisValue(CGF)
+ = CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(getThisDecl(CGF)),
+ "this");
+}
+
+void CGCXXABI::EmitReturnFromThunk(CodeGenFunction &CGF,
+ RValue RV, QualType ResultType) {
+ CGF.EmitReturnOfRValue(RV, ResultType);
+}
+
+CharUnits CGCXXABI::GetArrayCookieSize(const CXXNewExpr *expr) {
+ return CharUnits::Zero();
+}
+
+llvm::Value *CGCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
+ llvm::Value *NewPtr,
+ llvm::Value *NumElements,
+ const CXXNewExpr *expr,
+ QualType ElementType) {
+ // Should never be called.
+ ErrorUnsupportedABI(CGF, "array cookie initialization");
+ return 0;
+}
+
+void CGCXXABI::ReadArrayCookie(CodeGenFunction &CGF, llvm::Value *Ptr,
+ const CXXDeleteExpr *expr, QualType ElementType,
+ llvm::Value *&NumElements,
+ llvm::Value *&AllocPtr, CharUnits &CookieSize) {
+ ErrorUnsupportedABI(CGF, "array cookie reading");
+
+ // This should be enough to avoid assertions.
+ NumElements = 0;
+ AllocPtr = llvm::Constant::getNullValue(CGF.Builder.getInt8PtrTy());
+ CookieSize = CharUnits::Zero();
+}
+
+void CGCXXABI::EmitGuardedInit(CodeGenFunction &CGF,
+ const VarDecl &D,
+ llvm::GlobalVariable *GV,
+ bool PerformInit) {
+ ErrorUnsupportedABI(CGF, "static local variable initialization");
+}
+
+/// Returns the adjustment, in bytes, required for the given
+/// member-pointer operation. Returns null if no adjustment is
+/// required.
+llvm::Constant *CGCXXABI::getMemberPointerAdjustment(const CastExpr *E) {
+ assert(E->getCastKind() == CK_DerivedToBaseMemberPointer ||
+ E->getCastKind() == CK_BaseToDerivedMemberPointer);
+
+ QualType derivedType;
+ if (E->getCastKind() == CK_DerivedToBaseMemberPointer)
+ derivedType = E->getSubExpr()->getType();
+ else
+ derivedType = E->getType();
+
+ const CXXRecordDecl *derivedClass =
+ derivedType->castAs<MemberPointerType>()->getClass()->getAsCXXRecordDecl();
+
+ return CGM.GetNonVirtualBaseClassOffset(derivedClass,
+ E->path_begin(),
+ E->path_end());
+}
diff --git a/clang/lib/CodeGen/CGCXXABI.h b/clang/lib/CodeGen/CGCXXABI.h
new file mode 100644
index 0000000..4e045f5
--- /dev/null
+++ b/clang/lib/CodeGen/CGCXXABI.h
@@ -0,0 +1,262 @@
+//===----- CGCXXABI.h - Interface to C++ ABIs -------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This provides an abstract class for C++ code generation. Concrete subclasses
+// of this implement code generation for specific C++ ABIs.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_CODEGEN_CXXABI_H
+#define CLANG_CODEGEN_CXXABI_H
+
+#include "clang/Basic/LLVM.h"
+
+#include "CodeGenFunction.h"
+
+namespace llvm {
+ class Constant;
+ class Type;
+ class Value;
+}
+
+namespace clang {
+ class CastExpr;
+ class CXXConstructorDecl;
+ class CXXDestructorDecl;
+ class CXXMethodDecl;
+ class CXXRecordDecl;
+ class FieldDecl;
+ class MangleContext;
+
+namespace CodeGen {
+ class CodeGenFunction;
+ class CodeGenModule;
+
+/// Implements C++ ABI-specific code generation functions.
+class CGCXXABI {
+protected:
+ CodeGenModule &CGM;
+ OwningPtr<MangleContext> MangleCtx;
+
+ CGCXXABI(CodeGenModule &CGM)
+ : CGM(CGM), MangleCtx(CGM.getContext().createMangleContext()) {}
+
+protected:
+ ImplicitParamDecl *&getThisDecl(CodeGenFunction &CGF) {
+ return CGF.CXXABIThisDecl;
+ }
+ llvm::Value *&getThisValue(CodeGenFunction &CGF) {
+ return CGF.CXXABIThisValue;
+ }
+
+ ImplicitParamDecl *&getVTTDecl(CodeGenFunction &CGF) {
+ return CGF.CXXVTTDecl;
+ }
+ llvm::Value *&getVTTValue(CodeGenFunction &CGF) {
+ return CGF.CXXVTTValue;
+ }
+
+ /// Build a parameter variable suitable for 'this'.
+ void BuildThisParam(CodeGenFunction &CGF, FunctionArgList &Params);
+
+ /// Perform prolog initialization of the parameter variable suitable
+ /// for 'this' emitted by BuildThisParam.
+ void EmitThisParam(CodeGenFunction &CGF);
+
+ ASTContext &getContext() const { return CGM.getContext(); }
+
+public:
+
+ virtual ~CGCXXABI();
+
+ /// Gets the mangle context.
+ MangleContext &getMangleContext() {
+ return *MangleCtx;
+ }
+
+ /// Find the LLVM type used to represent the given member pointer
+ /// type.
+ virtual llvm::Type *
+ ConvertMemberPointerType(const MemberPointerType *MPT);
+
+ /// Load a member function from an object and a member function
+ /// pointer. Apply the this-adjustment and set 'This' to the
+ /// adjusted value.
+ virtual llvm::Value *
+ EmitLoadOfMemberFunctionPointer(CodeGenFunction &CGF,
+ llvm::Value *&This,
+ llvm::Value *MemPtr,
+ const MemberPointerType *MPT);
+
+ /// Calculate an l-value from an object and a data member pointer.
+ virtual llvm::Value *EmitMemberDataPointerAddress(CodeGenFunction &CGF,
+ llvm::Value *Base,
+ llvm::Value *MemPtr,
+ const MemberPointerType *MPT);
+
+ /// Perform a derived-to-base, base-to-derived, or bitcast member
+ /// pointer conversion.
+ virtual llvm::Value *EmitMemberPointerConversion(CodeGenFunction &CGF,
+ const CastExpr *E,
+ llvm::Value *Src);
+
+ /// Perform a derived-to-base, base-to-derived, or bitcast member
+ /// pointer conversion on a constant value.
+ virtual llvm::Constant *EmitMemberPointerConversion(const CastExpr *E,
+ llvm::Constant *Src);
+
+ /// Return true if the given member pointer can be zero-initialized
+ /// (in the C++ sense) with an LLVM zeroinitializer.
+ virtual bool isZeroInitializable(const MemberPointerType *MPT);
+
+ /// Create a null member pointer of the given type.
+ virtual llvm::Constant *EmitNullMemberPointer(const MemberPointerType *MPT);
+
+ /// Create a member pointer for the given method.
+ virtual llvm::Constant *EmitMemberPointer(const CXXMethodDecl *MD);
+
+ /// Create a member pointer for the given field.
+ virtual llvm::Constant *EmitMemberDataPointer(const MemberPointerType *MPT,
+ CharUnits offset);
+
+ /// Create a member pointer for the given member pointer constant.
+ virtual llvm::Constant *EmitMemberPointer(const APValue &MP, QualType MPT);
+
+ /// Emit a comparison between two member pointers. Returns an i1.
+ virtual llvm::Value *
+ EmitMemberPointerComparison(CodeGenFunction &CGF,
+ llvm::Value *L,
+ llvm::Value *R,
+ const MemberPointerType *MPT,
+ bool Inequality);
+
+ /// Determine if a member pointer is non-null. Returns an i1.
+ virtual llvm::Value *
+ EmitMemberPointerIsNotNull(CodeGenFunction &CGF,
+ llvm::Value *MemPtr,
+ const MemberPointerType *MPT);
+
+protected:
+ /// A utility method for computing the offset required for the given
+ /// base-to-derived or derived-to-base member-pointer conversion.
+ /// Does not handle virtual conversions (in case we ever fully
+ /// support an ABI that allows this). Returns null if no adjustment
+ /// is required.
+ llvm::Constant *getMemberPointerAdjustment(const CastExpr *E);
+
+public:
+ /// Build the signature of the given constructor variant by adding
+ /// any required parameters. For convenience, ResTy has been
+ /// initialized to 'void', and ArgTys has been initialized with the
+ /// type of 'this' (although this may be changed by the ABI) and
+ /// will have the formal parameters added to it afterwards.
+ ///
+ /// If there are ever any ABIs where the implicit parameters are
+ /// intermixed with the formal parameters, we can address those
+ /// then.
+ virtual void BuildConstructorSignature(const CXXConstructorDecl *Ctor,
+ CXXCtorType T,
+ CanQualType &ResTy,
+ SmallVectorImpl<CanQualType> &ArgTys) = 0;
+
+ /// Build the signature of the given destructor variant by adding
+ /// any required parameters. For convenience, ResTy has been
+ /// initialized to 'void' and ArgTys has been initialized with the
+ /// type of 'this' (although this may be changed by the ABI).
+ virtual void BuildDestructorSignature(const CXXDestructorDecl *Dtor,
+ CXXDtorType T,
+ CanQualType &ResTy,
+ SmallVectorImpl<CanQualType> &ArgTys) = 0;
+
+ /// Build the ABI-specific portion of the parameter list for a
+ /// function. This generally involves a 'this' parameter and
+ /// possibly some extra data for constructors and destructors.
+ ///
+ /// ABIs may also choose to override the return type, which has been
+ /// initialized with the formal return type of the function.
+ virtual void BuildInstanceFunctionParams(CodeGenFunction &CGF,
+ QualType &ResTy,
+ FunctionArgList &Params) = 0;
+
+ /// Emit the ABI-specific prolog for the function.
+ virtual void EmitInstanceFunctionProlog(CodeGenFunction &CGF) = 0;
+
+ virtual void EmitReturnFromThunk(CodeGenFunction &CGF,
+ RValue RV, QualType ResultType);
+
+ /**************************** Array cookies ******************************/
+
+ /// Returns the extra size required in order to store the array
+ /// cookie for the given type. May return 0 to indicate that no
+ /// array cookie is required.
+ ///
+ /// Several cases are filtered out before this method is called:
+ /// - non-array allocations never need a cookie
+ /// - calls to ::operator new(size_t, void*) never need a cookie
+ ///
+ /// \param ElementType - the allocated type of the expression,
+ /// i.e. the pointee type of the expression result type
+ virtual CharUnits GetArrayCookieSize(const CXXNewExpr *expr);
+
+ /// Initialize the array cookie for the given allocation.
+ ///
+ /// \param NewPtr - a char* which is the presumed-non-null
+ /// return value of the allocation function
+ /// \param NumElements - the computed number of elements,
+ /// potentially collapsed from the multidimensional array case
+ /// \param ElementType - the base element allocated type,
+ /// i.e. the allocated type after stripping all array types
+ virtual llvm::Value *InitializeArrayCookie(CodeGenFunction &CGF,
+ llvm::Value *NewPtr,
+ llvm::Value *NumElements,
+ const CXXNewExpr *expr,
+ QualType ElementType);
+
+ /// Reads the array cookie associated with the given pointer,
+ /// if it has one.
+ ///
+ /// \param Ptr - a pointer to the first element in the array
+ /// \param ElementType - the base element type of elements of the array
+ /// \param NumElements - an out parameter which will be initialized
+ /// with the number of elements allocated, or zero if there is no
+ /// cookie
+ /// \param AllocPtr - an out parameter which will be initialized
+ /// with a char* pointing to the address returned by the allocation
+ /// function
+ /// \param CookieSize - an out parameter which will be initialized
+ /// with the size of the cookie, or zero if there is no cookie
+ virtual void ReadArrayCookie(CodeGenFunction &CGF, llvm::Value *Ptr,
+ const CXXDeleteExpr *expr,
+ QualType ElementType, llvm::Value *&NumElements,
+ llvm::Value *&AllocPtr, CharUnits &CookieSize);
+
+ /*************************** Static local guards ****************************/
+
+ /// Emits the guarded initializer and destructor setup for the given
+ /// variable, given that it couldn't be emitted as a constant.
+ /// If \p PerformInit is false, the initialization has been folded to a
+ /// constant and should not be performed.
+ ///
+ /// The variable may be:
+ /// - a static local variable
+ /// - a static data member of a class template instantiation
+ virtual void EmitGuardedInit(CodeGenFunction &CGF, const VarDecl &D,
+ llvm::GlobalVariable *DeclPtr, bool PerformInit);
+
+};
+
+/// Creates an instance of a C++ ABI class.
+CGCXXABI *CreateARMCXXABI(CodeGenModule &CGM);
+CGCXXABI *CreateItaniumCXXABI(CodeGenModule &CGM);
+CGCXXABI *CreateMicrosoftCXXABI(CodeGenModule &CGM);
+
+}
+}
+
+#endif
diff --git a/clang/lib/CodeGen/CGCall.cpp b/clang/lib/CodeGen/CGCall.cpp
new file mode 100644
index 0000000..82ee4fc
--- /dev/null
+++ b/clang/lib/CodeGen/CGCall.cpp
@@ -0,0 +1,2201 @@
+//===--- CGCall.cpp - Encapsulate calling convention details ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// These classes wrap the information about a call or function
+// definition used to handle ABI compliancy.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CGCall.h"
+#include "CGCXXABI.h"
+#include "ABIInfo.h"
+#include "CodeGenFunction.h"
+#include "CodeGenModule.h"
+#include "TargetInfo.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/Frontend/CodeGenOptions.h"
+#include "llvm/Attributes.h"
+#include "llvm/Support/CallSite.h"
+#include "llvm/Target/TargetData.h"
+#include "llvm/InlineAsm.h"
+#include "llvm/Transforms/Utils/Local.h"
+using namespace clang;
+using namespace CodeGen;
+
+/***/
+
+static unsigned ClangCallConvToLLVMCallConv(CallingConv CC) {
+ switch (CC) {
+ default: return llvm::CallingConv::C;
+ case CC_X86StdCall: return llvm::CallingConv::X86_StdCall;
+ case CC_X86FastCall: return llvm::CallingConv::X86_FastCall;
+ case CC_X86ThisCall: return llvm::CallingConv::X86_ThisCall;
+ case CC_AAPCS: return llvm::CallingConv::ARM_AAPCS;
+ case CC_AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP;
+ // TODO: add support for CC_X86Pascal to llvm
+ }
+}
+
+/// Derives the 'this' type for codegen purposes, i.e. ignoring method
+/// qualification.
+/// FIXME: address space qualification?
+static CanQualType GetThisType(ASTContext &Context, const CXXRecordDecl *RD) {
+ QualType RecTy = Context.getTagDeclType(RD)->getCanonicalTypeInternal();
+ return Context.getPointerType(CanQualType::CreateUnsafe(RecTy));
+}
+
+/// Returns the canonical formal type of the given C++ method.
+static CanQual<FunctionProtoType> GetFormalType(const CXXMethodDecl *MD) {
+ return MD->getType()->getCanonicalTypeUnqualified()
+ .getAs<FunctionProtoType>();
+}
+
+/// Returns the "extra-canonicalized" return type, which discards
+/// qualifiers on the return type. Codegen doesn't care about them,
+/// and it makes ABI code a little easier to be able to assume that
+/// all parameter and return types are top-level unqualified.
+static CanQualType GetReturnType(QualType RetTy) {
+ return RetTy->getCanonicalTypeUnqualified().getUnqualifiedType();
+}
+
+/// Arrange the argument and result information for a value of the
+/// given unprototyped function type.
+const CGFunctionInfo &
+CodeGenTypes::arrangeFunctionType(CanQual<FunctionNoProtoType> FTNP) {
+ // When translating an unprototyped function type, always use a
+ // variadic type.
+ return arrangeFunctionType(FTNP->getResultType().getUnqualifiedType(),
+ ArrayRef<CanQualType>(),
+ FTNP->getExtInfo(),
+ RequiredArgs(0));
+}
+
+/// Arrange the argument and result information for a value of the
+/// given function type, on top of any implicit parameters already
+/// stored.
+static const CGFunctionInfo &arrangeFunctionType(CodeGenTypes &CGT,
+ SmallVectorImpl<CanQualType> &argTypes,
+ CanQual<FunctionProtoType> FTP) {
+ RequiredArgs required = RequiredArgs::forPrototypePlus(FTP, argTypes.size());
+ // FIXME: Kill copy.
+ for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i)
+ argTypes.push_back(FTP->getArgType(i));
+ CanQualType resultType = FTP->getResultType().getUnqualifiedType();
+ return CGT.arrangeFunctionType(resultType, argTypes,
+ FTP->getExtInfo(), required);
+}
+
+/// Arrange the argument and result information for a value of the
+/// given function type.
+const CGFunctionInfo &
+CodeGenTypes::arrangeFunctionType(CanQual<FunctionProtoType> FTP) {
+ SmallVector<CanQualType, 16> argTypes;
+ return ::arrangeFunctionType(*this, argTypes, FTP);
+}
+
+static CallingConv getCallingConventionForDecl(const Decl *D) {
+ // Set the appropriate calling convention for the Function.
+ if (D->hasAttr<StdCallAttr>())
+ return CC_X86StdCall;
+
+ if (D->hasAttr<FastCallAttr>())
+ return CC_X86FastCall;
+
+ if (D->hasAttr<ThisCallAttr>())
+ return CC_X86ThisCall;
+
+ if (D->hasAttr<PascalAttr>())
+ return CC_X86Pascal;
+
+ if (PcsAttr *PCS = D->getAttr<PcsAttr>())
+ return (PCS->getPCS() == PcsAttr::AAPCS ? CC_AAPCS : CC_AAPCS_VFP);
+
+ return CC_C;
+}
+
+/// Arrange the argument and result information for a call to an
+/// unknown C++ non-static member function of the given abstract type.
+/// The member function must be an ordinary function, i.e. not a
+/// constructor or destructor.
+const CGFunctionInfo &
+CodeGenTypes::arrangeCXXMethodType(const CXXRecordDecl *RD,
+ const FunctionProtoType *FTP) {
+ SmallVector<CanQualType, 16> argTypes;
+
+ // Add the 'this' pointer.
+ argTypes.push_back(GetThisType(Context, RD));
+
+ return ::arrangeFunctionType(*this, argTypes,
+ FTP->getCanonicalTypeUnqualified().getAs<FunctionProtoType>());
+}
+
+/// Arrange the argument and result information for a declaration or
+/// definition of the given C++ non-static member function. The
+/// member function must be an ordinary function, i.e. not a
+/// constructor or destructor.
+const CGFunctionInfo &
+CodeGenTypes::arrangeCXXMethodDeclaration(const CXXMethodDecl *MD) {
+ assert(!isa<CXXConstructorDecl>(MD) && "wrong method for contructors!");
+ assert(!isa<CXXDestructorDecl>(MD) && "wrong method for destructors!");
+
+ CanQual<FunctionProtoType> prototype = GetFormalType(MD);
+
+ if (MD->isInstance()) {
+ // The abstract case is perfectly fine.
+ return arrangeCXXMethodType(MD->getParent(), prototype.getTypePtr());
+ }
+
+ return arrangeFunctionType(prototype);
+}
+
+/// Arrange the argument and result information for a declaration
+/// or definition to the given constructor variant.
+const CGFunctionInfo &
+CodeGenTypes::arrangeCXXConstructorDeclaration(const CXXConstructorDecl *D,
+ CXXCtorType ctorKind) {
+ SmallVector<CanQualType, 16> argTypes;
+ argTypes.push_back(GetThisType(Context, D->getParent()));
+ CanQualType resultType = Context.VoidTy;
+
+ TheCXXABI.BuildConstructorSignature(D, ctorKind, resultType, argTypes);
+
+ CanQual<FunctionProtoType> FTP = GetFormalType(D);
+
+ RequiredArgs required = RequiredArgs::forPrototypePlus(FTP, argTypes.size());
+
+ // Add the formal parameters.
+ for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i)
+ argTypes.push_back(FTP->getArgType(i));
+
+ return arrangeFunctionType(resultType, argTypes, FTP->getExtInfo(), required);
+}
+
+/// Arrange the argument and result information for a declaration,
+/// definition, or call to the given destructor variant. It so
+/// happens that all three cases produce the same information.
+const CGFunctionInfo &
+CodeGenTypes::arrangeCXXDestructor(const CXXDestructorDecl *D,
+ CXXDtorType dtorKind) {
+ SmallVector<CanQualType, 2> argTypes;
+ argTypes.push_back(GetThisType(Context, D->getParent()));
+ CanQualType resultType = Context.VoidTy;
+
+ TheCXXABI.BuildDestructorSignature(D, dtorKind, resultType, argTypes);
+
+ CanQual<FunctionProtoType> FTP = GetFormalType(D);
+ assert(FTP->getNumArgs() == 0 && "dtor with formal parameters");
+
+ return arrangeFunctionType(resultType, argTypes, FTP->getExtInfo(),
+ RequiredArgs::All);
+}
+
+/// Arrange the argument and result information for the declaration or
+/// definition of the given function.
+const CGFunctionInfo &
+CodeGenTypes::arrangeFunctionDeclaration(const FunctionDecl *FD) {
+ if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD))
+ if (MD->isInstance())
+ return arrangeCXXMethodDeclaration(MD);
+
+ CanQualType FTy = FD->getType()->getCanonicalTypeUnqualified();
+
+ assert(isa<FunctionType>(FTy));
+
+ // When declaring a function without a prototype, always use a
+ // non-variadic type.
+ if (isa<FunctionNoProtoType>(FTy)) {
+ CanQual<FunctionNoProtoType> noProto = FTy.getAs<FunctionNoProtoType>();
+ return arrangeFunctionType(noProto->getResultType(),
+ ArrayRef<CanQualType>(),
+ noProto->getExtInfo(),
+ RequiredArgs::All);
+ }
+
+ assert(isa<FunctionProtoType>(FTy));
+ return arrangeFunctionType(FTy.getAs<FunctionProtoType>());
+}
+
+/// Arrange the argument and result information for the declaration or
+/// definition of an Objective-C method.
+const CGFunctionInfo &
+CodeGenTypes::arrangeObjCMethodDeclaration(const ObjCMethodDecl *MD) {
+ // It happens that this is the same as a call with no optional
+ // arguments, except also using the formal 'self' type.
+ return arrangeObjCMessageSendSignature(MD, MD->getSelfDecl()->getType());
+}
+
+/// Arrange the argument and result information for the function type
+/// through which to perform a send to the given Objective-C method,
+/// using the given receiver type. The receiver type is not always
+/// the 'self' type of the method or even an Objective-C pointer type.
+/// This is *not* the right method for actually performing such a
+/// message send, due to the possibility of optional arguments.
+const CGFunctionInfo &
+CodeGenTypes::arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD,
+ QualType receiverType) {
+ SmallVector<CanQualType, 16> argTys;
+ argTys.push_back(Context.getCanonicalParamType(receiverType));
+ argTys.push_back(Context.getCanonicalParamType(Context.getObjCSelType()));
+ // FIXME: Kill copy?
+ for (ObjCMethodDecl::param_const_iterator i = MD->param_begin(),
+ e = MD->param_end(); i != e; ++i) {
+ argTys.push_back(Context.getCanonicalParamType((*i)->getType()));
+ }
+
+ FunctionType::ExtInfo einfo;
+ einfo = einfo.withCallingConv(getCallingConventionForDecl(MD));
+
+ if (getContext().getLangOpts().ObjCAutoRefCount &&
+ MD->hasAttr<NSReturnsRetainedAttr>())
+ einfo = einfo.withProducesResult(true);
+
+ RequiredArgs required =
+ (MD->isVariadic() ? RequiredArgs(argTys.size()) : RequiredArgs::All);
+
+ return arrangeFunctionType(GetReturnType(MD->getResultType()), argTys,
+ einfo, required);
+}
+
+const CGFunctionInfo &
+CodeGenTypes::arrangeGlobalDeclaration(GlobalDecl GD) {
+ // FIXME: Do we need to handle ObjCMethodDecl?
+ const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
+
+ if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD))
+ return arrangeCXXConstructorDeclaration(CD, GD.getCtorType());
+
+ if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(FD))
+ return arrangeCXXDestructor(DD, GD.getDtorType());
+
+ return arrangeFunctionDeclaration(FD);
+}
+
+/// Figure out the rules for calling a function with the given formal
+/// type using the given arguments. The arguments are necessary
+/// because the function might be unprototyped, in which case it's
+/// target-dependent in crazy ways.
+const CGFunctionInfo &
+CodeGenTypes::arrangeFunctionCall(const CallArgList &args,
+ const FunctionType *fnType) {
+ RequiredArgs required = RequiredArgs::All;
+ if (const FunctionProtoType *proto = dyn_cast<FunctionProtoType>(fnType)) {
+ if (proto->isVariadic())
+ required = RequiredArgs(proto->getNumArgs());
+ } else if (CGM.getTargetCodeGenInfo()
+ .isNoProtoCallVariadic(args, cast<FunctionNoProtoType>(fnType))) {
+ required = RequiredArgs(0);
+ }
+
+ return arrangeFunctionCall(fnType->getResultType(), args,
+ fnType->getExtInfo(), required);
+}
+
+const CGFunctionInfo &
+CodeGenTypes::arrangeFunctionCall(QualType resultType,
+ const CallArgList &args,
+ const FunctionType::ExtInfo &info,
+ RequiredArgs required) {
+ // FIXME: Kill copy.
+ SmallVector<CanQualType, 16> argTypes;
+ for (CallArgList::const_iterator i = args.begin(), e = args.end();
+ i != e; ++i)
+ argTypes.push_back(Context.getCanonicalParamType(i->Ty));
+ return arrangeFunctionType(GetReturnType(resultType), argTypes, info,
+ required);
+}
+
+const CGFunctionInfo &
+CodeGenTypes::arrangeFunctionDeclaration(QualType resultType,
+ const FunctionArgList &args,
+ const FunctionType::ExtInfo &info,
+ bool isVariadic) {
+ // FIXME: Kill copy.
+ SmallVector<CanQualType, 16> argTypes;
+ for (FunctionArgList::const_iterator i = args.begin(), e = args.end();
+ i != e; ++i)
+ argTypes.push_back(Context.getCanonicalParamType((*i)->getType()));
+
+ RequiredArgs required =
+ (isVariadic ? RequiredArgs(args.size()) : RequiredArgs::All);
+ return arrangeFunctionType(GetReturnType(resultType), argTypes, info,
+ required);
+}
+
+const CGFunctionInfo &CodeGenTypes::arrangeNullaryFunction() {
+ return arrangeFunctionType(getContext().VoidTy, ArrayRef<CanQualType>(),
+ FunctionType::ExtInfo(), RequiredArgs::All);
+}
+
+/// Arrange the argument and result information for an abstract value
+/// of a given function type. This is the method which all of the
+/// above functions ultimately defer to.
+const CGFunctionInfo &
+CodeGenTypes::arrangeFunctionType(CanQualType resultType,
+ ArrayRef<CanQualType> argTypes,
+ const FunctionType::ExtInfo &info,
+ RequiredArgs required) {
+#ifndef NDEBUG
+ for (ArrayRef<CanQualType>::const_iterator
+ I = argTypes.begin(), E = argTypes.end(); I != E; ++I)
+ assert(I->isCanonicalAsParam());
+#endif
+
+ unsigned CC = ClangCallConvToLLVMCallConv(info.getCC());
+
+ // Lookup or create unique function info.
+ llvm::FoldingSetNodeID ID;
+ CGFunctionInfo::Profile(ID, info, required, resultType, argTypes);
+
+ void *insertPos = 0;
+ CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, insertPos);
+ if (FI)
+ return *FI;
+
+ // Construct the function info. We co-allocate the ArgInfos.
+ FI = CGFunctionInfo::create(CC, info, resultType, argTypes, required);
+ FunctionInfos.InsertNode(FI, insertPos);
+
+ bool inserted = FunctionsBeingProcessed.insert(FI); (void)inserted;
+ assert(inserted && "Recursively being processed?");
+
+ // Compute ABI information.
+ getABIInfo().computeInfo(*FI);
+
+ // Loop over all of the computed argument and return value info. If any of
+ // them are direct or extend without a specified coerce type, specify the
+ // default now.
+ ABIArgInfo &retInfo = FI->getReturnInfo();
+ if (retInfo.canHaveCoerceToType() && retInfo.getCoerceToType() == 0)
+ retInfo.setCoerceToType(ConvertType(FI->getReturnType()));
+
+ for (CGFunctionInfo::arg_iterator I = FI->arg_begin(), E = FI->arg_end();
+ I != E; ++I)
+ if (I->info.canHaveCoerceToType() && I->info.getCoerceToType() == 0)
+ I->info.setCoerceToType(ConvertType(I->type));
+
+ bool erased = FunctionsBeingProcessed.erase(FI); (void)erased;
+ assert(erased && "Not in set?");
+
+ return *FI;
+}
+
+CGFunctionInfo *CGFunctionInfo::create(unsigned llvmCC,
+ const FunctionType::ExtInfo &info,
+ CanQualType resultType,
+ ArrayRef<CanQualType> argTypes,
+ RequiredArgs required) {
+ void *buffer = operator new(sizeof(CGFunctionInfo) +
+ sizeof(ArgInfo) * (argTypes.size() + 1));
+ CGFunctionInfo *FI = new(buffer) CGFunctionInfo();
+ FI->CallingConvention = llvmCC;
+ FI->EffectiveCallingConvention = llvmCC;
+ FI->ASTCallingConvention = info.getCC();
+ FI->NoReturn = info.getNoReturn();
+ FI->ReturnsRetained = info.getProducesResult();
+ FI->Required = required;
+ FI->HasRegParm = info.getHasRegParm();
+ FI->RegParm = info.getRegParm();
+ FI->NumArgs = argTypes.size();
+ FI->getArgsBuffer()[0].type = resultType;
+ for (unsigned i = 0, e = argTypes.size(); i != e; ++i)
+ FI->getArgsBuffer()[i + 1].type = argTypes[i];
+ return FI;
+}
+
+/***/
+
+void CodeGenTypes::GetExpandedTypes(QualType type,
+ SmallVectorImpl<llvm::Type*> &expandedTypes) {
+ if (const ConstantArrayType *AT = Context.getAsConstantArrayType(type)) {
+ uint64_t NumElts = AT->getSize().getZExtValue();
+ for (uint64_t Elt = 0; Elt < NumElts; ++Elt)
+ GetExpandedTypes(AT->getElementType(), expandedTypes);
+ } else if (const RecordType *RT = type->getAs<RecordType>()) {
+ const RecordDecl *RD = RT->getDecl();
+ assert(!RD->hasFlexibleArrayMember() &&
+ "Cannot expand structure with flexible array.");
+ if (RD->isUnion()) {
+ // Unions can be here only in degenerative cases - all the fields are same
+ // after flattening. Thus we have to use the "largest" field.
+ const FieldDecl *LargestFD = 0;
+ CharUnits UnionSize = CharUnits::Zero();
+
+ for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
+ i != e; ++i) {
+ const FieldDecl *FD = *i;
+ assert(!FD->isBitField() &&
+ "Cannot expand structure with bit-field members.");
+ CharUnits FieldSize = getContext().getTypeSizeInChars(FD->getType());
+ if (UnionSize < FieldSize) {
+ UnionSize = FieldSize;
+ LargestFD = FD;
+ }
+ }
+ if (LargestFD)
+ GetExpandedTypes(LargestFD->getType(), expandedTypes);
+ } else {
+ for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
+ i != e; ++i) {
+ const FieldDecl *FD = *i;
+ assert(!FD->isBitField() &&
+ "Cannot expand structure with bit-field members.");
+ GetExpandedTypes(FD->getType(), expandedTypes);
+ }
+ }
+ } else if (const ComplexType *CT = type->getAs<ComplexType>()) {
+ llvm::Type *EltTy = ConvertType(CT->getElementType());
+ expandedTypes.push_back(EltTy);
+ expandedTypes.push_back(EltTy);
+ } else
+ expandedTypes.push_back(ConvertType(type));
+}
+
+llvm::Function::arg_iterator
+CodeGenFunction::ExpandTypeFromArgs(QualType Ty, LValue LV,
+ llvm::Function::arg_iterator AI) {
+ assert(LV.isSimple() &&
+ "Unexpected non-simple lvalue during struct expansion.");
+
+ if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
+ unsigned NumElts = AT->getSize().getZExtValue();
+ QualType EltTy = AT->getElementType();
+ for (unsigned Elt = 0; Elt < NumElts; ++Elt) {
+ llvm::Value *EltAddr = Builder.CreateConstGEP2_32(LV.getAddress(), 0, Elt);
+ LValue LV = MakeAddrLValue(EltAddr, EltTy);
+ AI = ExpandTypeFromArgs(EltTy, LV, AI);
+ }
+ } else if (const RecordType *RT = Ty->getAs<RecordType>()) {
+ RecordDecl *RD = RT->getDecl();
+ if (RD->isUnion()) {
+ // Unions can be here only in degenerative cases - all the fields are same
+ // after flattening. Thus we have to use the "largest" field.
+ const FieldDecl *LargestFD = 0;
+ CharUnits UnionSize = CharUnits::Zero();
+
+ for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
+ i != e; ++i) {
+ const FieldDecl *FD = *i;
+ assert(!FD->isBitField() &&
+ "Cannot expand structure with bit-field members.");
+ CharUnits FieldSize = getContext().getTypeSizeInChars(FD->getType());
+ if (UnionSize < FieldSize) {
+ UnionSize = FieldSize;
+ LargestFD = FD;
+ }
+ }
+ if (LargestFD) {
+ // FIXME: What are the right qualifiers here?
+ LValue SubLV = EmitLValueForField(LV, LargestFD);
+ AI = ExpandTypeFromArgs(LargestFD->getType(), SubLV, AI);
+ }
+ } else {
+ for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
+ i != e; ++i) {
+ FieldDecl *FD = *i;
+ QualType FT = FD->getType();
+
+ // FIXME: What are the right qualifiers here?
+ LValue SubLV = EmitLValueForField(LV, FD);
+ AI = ExpandTypeFromArgs(FT, SubLV, AI);
+ }
+ }
+ } else if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
+ QualType EltTy = CT->getElementType();
+ llvm::Value *RealAddr = Builder.CreateStructGEP(LV.getAddress(), 0, "real");
+ EmitStoreThroughLValue(RValue::get(AI++), MakeAddrLValue(RealAddr, EltTy));
+ llvm::Value *ImagAddr = Builder.CreateStructGEP(LV.getAddress(), 1, "imag");
+ EmitStoreThroughLValue(RValue::get(AI++), MakeAddrLValue(ImagAddr, EltTy));
+ } else {
+ EmitStoreThroughLValue(RValue::get(AI), LV);
+ ++AI;
+ }
+
+ return AI;
+}
+
+/// EnterStructPointerForCoercedAccess - Given a struct pointer that we are
+/// accessing some number of bytes out of it, try to gep into the struct to get
+/// at its inner goodness. Dive as deep as possible without entering an element
+/// with an in-memory size smaller than DstSize.
+static llvm::Value *
+EnterStructPointerForCoercedAccess(llvm::Value *SrcPtr,
+ llvm::StructType *SrcSTy,
+ uint64_t DstSize, CodeGenFunction &CGF) {
+ // We can't dive into a zero-element struct.
+ if (SrcSTy->getNumElements() == 0) return SrcPtr;
+
+ llvm::Type *FirstElt = SrcSTy->getElementType(0);
+
+ // If the first elt is at least as large as what we're looking for, or if the
+ // first element is the same size as the whole struct, we can enter it.
+ uint64_t FirstEltSize =
+ CGF.CGM.getTargetData().getTypeAllocSize(FirstElt);
+ if (FirstEltSize < DstSize &&
+ FirstEltSize < CGF.CGM.getTargetData().getTypeAllocSize(SrcSTy))
+ return SrcPtr;
+
+ // GEP into the first element.
+ SrcPtr = CGF.Builder.CreateConstGEP2_32(SrcPtr, 0, 0, "coerce.dive");
+
+ // If the first element is a struct, recurse.
+ llvm::Type *SrcTy =
+ cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
+ if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy))
+ return EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF);
+
+ return SrcPtr;
+}
+
+/// CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both
+/// are either integers or pointers. This does a truncation of the value if it
+/// is too large or a zero extension if it is too small.
+static llvm::Value *CoerceIntOrPtrToIntOrPtr(llvm::Value *Val,
+ llvm::Type *Ty,
+ CodeGenFunction &CGF) {
+ if (Val->getType() == Ty)
+ return Val;
+
+ if (isa<llvm::PointerType>(Val->getType())) {
+ // If this is Pointer->Pointer avoid conversion to and from int.
+ if (isa<llvm::PointerType>(Ty))
+ return CGF.Builder.CreateBitCast(Val, Ty, "coerce.val");
+
+ // Convert the pointer to an integer so we can play with its width.
+ Val = CGF.Builder.CreatePtrToInt(Val, CGF.IntPtrTy, "coerce.val.pi");
+ }
+
+ llvm::Type *DestIntTy = Ty;
+ if (isa<llvm::PointerType>(DestIntTy))
+ DestIntTy = CGF.IntPtrTy;
+
+ if (Val->getType() != DestIntTy)
+ Val = CGF.Builder.CreateIntCast(Val, DestIntTy, false, "coerce.val.ii");
+
+ if (isa<llvm::PointerType>(Ty))
+ Val = CGF.Builder.CreateIntToPtr(Val, Ty, "coerce.val.ip");
+ return Val;
+}
+
+
+
+/// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as
+/// a pointer to an object of type \arg Ty.
+///
+/// This safely handles the case when the src type is smaller than the
+/// destination type; in this situation the values of bits which not
+/// present in the src are undefined.
+static llvm::Value *CreateCoercedLoad(llvm::Value *SrcPtr,
+ llvm::Type *Ty,
+ CodeGenFunction &CGF) {
+ llvm::Type *SrcTy =
+ cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
+
+ // If SrcTy and Ty are the same, just do a load.
+ if (SrcTy == Ty)
+ return CGF.Builder.CreateLoad(SrcPtr);
+
+ uint64_t DstSize = CGF.CGM.getTargetData().getTypeAllocSize(Ty);
+
+ if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) {
+ SrcPtr = EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF);
+ SrcTy = cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
+ }
+
+ uint64_t SrcSize = CGF.CGM.getTargetData().getTypeAllocSize(SrcTy);
+
+ // If the source and destination are integer or pointer types, just do an
+ // extension or truncation to the desired type.
+ if ((isa<llvm::IntegerType>(Ty) || isa<llvm::PointerType>(Ty)) &&
+ (isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy))) {
+ llvm::LoadInst *Load = CGF.Builder.CreateLoad(SrcPtr);
+ return CoerceIntOrPtrToIntOrPtr(Load, Ty, CGF);
+ }
+
+ // If load is legal, just bitcast the src pointer.
+ if (SrcSize >= DstSize) {
+ // Generally SrcSize is never greater than DstSize, since this means we are
+ // losing bits. However, this can happen in cases where the structure has
+ // additional padding, for example due to a user specified alignment.
+ //
+ // FIXME: Assert that we aren't truncating non-padding bits when have access
+ // to that information.
+ llvm::Value *Casted =
+ CGF.Builder.CreateBitCast(SrcPtr, llvm::PointerType::getUnqual(Ty));
+ llvm::LoadInst *Load = CGF.Builder.CreateLoad(Casted);
+ // FIXME: Use better alignment / avoid requiring aligned load.
+ Load->setAlignment(1);
+ return Load;
+ }
+
+ // Otherwise do coercion through memory. This is stupid, but
+ // simple.
+ llvm::Value *Tmp = CGF.CreateTempAlloca(Ty);
+ llvm::Value *Casted =
+ CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(SrcTy));
+ llvm::StoreInst *Store =
+ CGF.Builder.CreateStore(CGF.Builder.CreateLoad(SrcPtr), Casted);
+ // FIXME: Use better alignment / avoid requiring aligned store.
+ Store->setAlignment(1);
+ return CGF.Builder.CreateLoad(Tmp);
+}
+
+// Function to store a first-class aggregate into memory. We prefer to
+// store the elements rather than the aggregate to be more friendly to
+// fast-isel.
+// FIXME: Do we need to recurse here?
+static void BuildAggStore(CodeGenFunction &CGF, llvm::Value *Val,
+ llvm::Value *DestPtr, bool DestIsVolatile,
+ bool LowAlignment) {
+ // Prefer scalar stores to first-class aggregate stores.
+ if (llvm::StructType *STy =
+ dyn_cast<llvm::StructType>(Val->getType())) {
+ for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
+ llvm::Value *EltPtr = CGF.Builder.CreateConstGEP2_32(DestPtr, 0, i);
+ llvm::Value *Elt = CGF.Builder.CreateExtractValue(Val, i);
+ llvm::StoreInst *SI = CGF.Builder.CreateStore(Elt, EltPtr,
+ DestIsVolatile);
+ if (LowAlignment)
+ SI->setAlignment(1);
+ }
+ } else {
+ llvm::StoreInst *SI = CGF.Builder.CreateStore(Val, DestPtr, DestIsVolatile);
+ if (LowAlignment)
+ SI->setAlignment(1);
+ }
+}
+
+/// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src,
+/// where the source and destination may have different types.
+///
+/// This safely handles the case when the src type is larger than the
+/// destination type; the upper bits of the src will be lost.
+static void CreateCoercedStore(llvm::Value *Src,
+ llvm::Value *DstPtr,
+ bool DstIsVolatile,
+ CodeGenFunction &CGF) {
+ llvm::Type *SrcTy = Src->getType();
+ llvm::Type *DstTy =
+ cast<llvm::PointerType>(DstPtr->getType())->getElementType();
+ if (SrcTy == DstTy) {
+ CGF.Builder.CreateStore(Src, DstPtr, DstIsVolatile);
+ return;
+ }
+
+ uint64_t SrcSize = CGF.CGM.getTargetData().getTypeAllocSize(SrcTy);
+
+ if (llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(DstTy)) {
+ DstPtr = EnterStructPointerForCoercedAccess(DstPtr, DstSTy, SrcSize, CGF);
+ DstTy = cast<llvm::PointerType>(DstPtr->getType())->getElementType();
+ }
+
+ // If the source and destination are integer or pointer types, just do an
+ // extension or truncation to the desired type.
+ if ((isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy)) &&
+ (isa<llvm::IntegerType>(DstTy) || isa<llvm::PointerType>(DstTy))) {
+ Src = CoerceIntOrPtrToIntOrPtr(Src, DstTy, CGF);
+ CGF.Builder.CreateStore(Src, DstPtr, DstIsVolatile);
+ return;
+ }
+
+ uint64_t DstSize = CGF.CGM.getTargetData().getTypeAllocSize(DstTy);
+
+ // If store is legal, just bitcast the src pointer.
+ if (SrcSize <= DstSize) {
+ llvm::Value *Casted =
+ CGF.Builder.CreateBitCast(DstPtr, llvm::PointerType::getUnqual(SrcTy));
+ // FIXME: Use better alignment / avoid requiring aligned store.
+ BuildAggStore(CGF, Src, Casted, DstIsVolatile, true);
+ } else {
+ // Otherwise do coercion through memory. This is stupid, but
+ // simple.
+
+ // Generally SrcSize is never greater than DstSize, since this means we are
+ // losing bits. However, this can happen in cases where the structure has
+ // additional padding, for example due to a user specified alignment.
+ //
+ // FIXME: Assert that we aren't truncating non-padding bits when have access
+ // to that information.
+ llvm::Value *Tmp = CGF.CreateTempAlloca(SrcTy);
+ CGF.Builder.CreateStore(Src, Tmp);
+ llvm::Value *Casted =
+ CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(DstTy));
+ llvm::LoadInst *Load = CGF.Builder.CreateLoad(Casted);
+ // FIXME: Use better alignment / avoid requiring aligned load.
+ Load->setAlignment(1);
+ CGF.Builder.CreateStore(Load, DstPtr, DstIsVolatile);
+ }
+}
+
+/***/
+
+bool CodeGenModule::ReturnTypeUsesSRet(const CGFunctionInfo &FI) {
+ return FI.getReturnInfo().isIndirect();
+}
+
+bool CodeGenModule::ReturnTypeUsesFPRet(QualType ResultType) {
+ if (const BuiltinType *BT = ResultType->getAs<BuiltinType>()) {
+ switch (BT->getKind()) {
+ default:
+ return false;
+ case BuiltinType::Float:
+ return getContext().getTargetInfo().useObjCFPRetForRealType(TargetInfo::Float);
+ case BuiltinType::Double:
+ return getContext().getTargetInfo().useObjCFPRetForRealType(TargetInfo::Double);
+ case BuiltinType::LongDouble:
+ return getContext().getTargetInfo().useObjCFPRetForRealType(
+ TargetInfo::LongDouble);
+ }
+ }
+
+ return false;
+}
+
+bool CodeGenModule::ReturnTypeUsesFP2Ret(QualType ResultType) {
+ if (const ComplexType *CT = ResultType->getAs<ComplexType>()) {
+ if (const BuiltinType *BT = CT->getElementType()->getAs<BuiltinType>()) {
+ if (BT->getKind() == BuiltinType::LongDouble)
+ return getContext().getTargetInfo().useObjCFP2RetForComplexLongDouble();
+ }
+ }
+
+ return false;
+}
+
+llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) {
+ const CGFunctionInfo &FI = arrangeGlobalDeclaration(GD);
+ return GetFunctionType(FI);
+}
+
+llvm::FunctionType *
+CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI) {
+
+ bool Inserted = FunctionsBeingProcessed.insert(&FI); (void)Inserted;
+ assert(Inserted && "Recursively being processed?");
+
+ SmallVector<llvm::Type*, 8> argTypes;
+ llvm::Type *resultType = 0;
+
+ const ABIArgInfo &retAI = FI.getReturnInfo();
+ switch (retAI.getKind()) {
+ case ABIArgInfo::Expand:
+ llvm_unreachable("Invalid ABI kind for return argument");
+
+ case ABIArgInfo::Extend:
+ case ABIArgInfo::Direct:
+ resultType = retAI.getCoerceToType();
+ break;
+
+ case ABIArgInfo::Indirect: {
+ assert(!retAI.getIndirectAlign() && "Align unused on indirect return.");
+ resultType = llvm::Type::getVoidTy(getLLVMContext());
+
+ QualType ret = FI.getReturnType();
+ llvm::Type *ty = ConvertType(ret);
+ unsigned addressSpace = Context.getTargetAddressSpace(ret);
+ argTypes.push_back(llvm::PointerType::get(ty, addressSpace));
+ break;
+ }
+
+ case ABIArgInfo::Ignore:
+ resultType = llvm::Type::getVoidTy(getLLVMContext());
+ break;
+ }
+
+ for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
+ ie = FI.arg_end(); it != ie; ++it) {
+ const ABIArgInfo &argAI = it->info;
+
+ switch (argAI.getKind()) {
+ case ABIArgInfo::Ignore:
+ break;
+
+ case ABIArgInfo::Indirect: {
+ // indirect arguments are always on the stack, which is addr space #0.
+ llvm::Type *LTy = ConvertTypeForMem(it->type);
+ argTypes.push_back(LTy->getPointerTo());
+ break;
+ }
+
+ case ABIArgInfo::Extend:
+ case ABIArgInfo::Direct: {
+ // Insert a padding type to ensure proper alignment.
+ if (llvm::Type *PaddingType = argAI.getPaddingType())
+ argTypes.push_back(PaddingType);
+ // If the coerce-to type is a first class aggregate, flatten it. Either
+ // way is semantically identical, but fast-isel and the optimizer
+ // generally likes scalar values better than FCAs.
+ llvm::Type *argType = argAI.getCoerceToType();
+ if (llvm::StructType *st = dyn_cast<llvm::StructType>(argType)) {
+ for (unsigned i = 0, e = st->getNumElements(); i != e; ++i)
+ argTypes.push_back(st->getElementType(i));
+ } else {
+ argTypes.push_back(argType);
+ }
+ break;
+ }
+
+ case ABIArgInfo::Expand:
+ GetExpandedTypes(it->type, argTypes);
+ break;
+ }
+ }
+
+ bool Erased = FunctionsBeingProcessed.erase(&FI); (void)Erased;
+ assert(Erased && "Not in set?");
+
+ return llvm::FunctionType::get(resultType, argTypes, FI.isVariadic());
+}
+
+llvm::Type *CodeGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) {
+ const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
+ const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
+
+ if (!isFuncTypeConvertible(FPT))
+ return llvm::StructType::get(getLLVMContext());
+
+ const CGFunctionInfo *Info;
+ if (isa<CXXDestructorDecl>(MD))
+ Info = &arrangeCXXDestructor(cast<CXXDestructorDecl>(MD), GD.getDtorType());
+ else
+ Info = &arrangeCXXMethodDeclaration(MD);
+ return GetFunctionType(*Info);
+}
+
+void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
+ const Decl *TargetDecl,
+ AttributeListType &PAL,
+ unsigned &CallingConv) {
+ llvm::Attributes FuncAttrs;
+ llvm::Attributes RetAttrs;
+
+ CallingConv = FI.getEffectiveCallingConvention();
+
+ if (FI.isNoReturn())
+ FuncAttrs |= llvm::Attribute::NoReturn;
+
+ // FIXME: handle sseregparm someday...
+ if (TargetDecl) {
+ if (TargetDecl->hasAttr<ReturnsTwiceAttr>())
+ FuncAttrs |= llvm::Attribute::ReturnsTwice;
+ if (TargetDecl->hasAttr<NoThrowAttr>())
+ FuncAttrs |= llvm::Attribute::NoUnwind;
+ else if (const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
+ const FunctionProtoType *FPT = Fn->getType()->getAs<FunctionProtoType>();
+ if (FPT && FPT->isNothrow(getContext()))
+ FuncAttrs |= llvm::Attribute::NoUnwind;
+ }
+
+ if (TargetDecl->hasAttr<NoReturnAttr>())
+ FuncAttrs |= llvm::Attribute::NoReturn;
+
+ if (TargetDecl->hasAttr<ReturnsTwiceAttr>())
+ FuncAttrs |= llvm::Attribute::ReturnsTwice;
+
+ // 'const' and 'pure' attribute functions are also nounwind.
+ if (TargetDecl->hasAttr<ConstAttr>()) {
+ FuncAttrs |= llvm::Attribute::ReadNone;
+ FuncAttrs |= llvm::Attribute::NoUnwind;
+ } else if (TargetDecl->hasAttr<PureAttr>()) {
+ FuncAttrs |= llvm::Attribute::ReadOnly;
+ FuncAttrs |= llvm::Attribute::NoUnwind;
+ }
+ if (TargetDecl->hasAttr<MallocAttr>())
+ RetAttrs |= llvm::Attribute::NoAlias;
+ }
+
+ if (CodeGenOpts.OptimizeSize)
+ FuncAttrs |= llvm::Attribute::OptimizeForSize;
+ if (CodeGenOpts.DisableRedZone)
+ FuncAttrs |= llvm::Attribute::NoRedZone;
+ if (CodeGenOpts.NoImplicitFloat)
+ FuncAttrs |= llvm::Attribute::NoImplicitFloat;
+
+ QualType RetTy = FI.getReturnType();
+ unsigned Index = 1;
+ const ABIArgInfo &RetAI = FI.getReturnInfo();
+ switch (RetAI.getKind()) {
+ case ABIArgInfo::Extend:
+ if (RetTy->hasSignedIntegerRepresentation())
+ RetAttrs |= llvm::Attribute::SExt;
+ else if (RetTy->hasUnsignedIntegerRepresentation())
+ RetAttrs |= llvm::Attribute::ZExt;
+ break;
+ case ABIArgInfo::Direct:
+ case ABIArgInfo::Ignore:
+ break;
+
+ case ABIArgInfo::Indirect:
+ PAL.push_back(llvm::AttributeWithIndex::get(Index,
+ llvm::Attribute::StructRet));
+ ++Index;
+ // sret disables readnone and readonly
+ FuncAttrs &= ~(llvm::Attribute::ReadOnly |
+ llvm::Attribute::ReadNone);
+ break;
+
+ case ABIArgInfo::Expand:
+ llvm_unreachable("Invalid ABI kind for return argument");
+ }
+
+ if (RetAttrs)
+ PAL.push_back(llvm::AttributeWithIndex::get(0, RetAttrs));
+
+ // FIXME: RegParm should be reduced in case of global register variable.
+ signed RegParm;
+ if (FI.getHasRegParm())
+ RegParm = FI.getRegParm();
+ else
+ RegParm = CodeGenOpts.NumRegisterParameters;
+
+ unsigned PointerWidth = getContext().getTargetInfo().getPointerWidth(0);
+ for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
+ ie = FI.arg_end(); it != ie; ++it) {
+ QualType ParamType = it->type;
+ const ABIArgInfo &AI = it->info;
+ llvm::Attributes Attrs;
+
+ // 'restrict' -> 'noalias' is done in EmitFunctionProlog when we
+ // have the corresponding parameter variable. It doesn't make
+ // sense to do it here because parameters are so messed up.
+ switch (AI.getKind()) {
+ case ABIArgInfo::Extend:
+ if (ParamType->isSignedIntegerOrEnumerationType())
+ Attrs |= llvm::Attribute::SExt;
+ else if (ParamType->isUnsignedIntegerOrEnumerationType())
+ Attrs |= llvm::Attribute::ZExt;
+ // FALL THROUGH
+ case ABIArgInfo::Direct:
+ if (RegParm > 0 &&
+ (ParamType->isIntegerType() || ParamType->isPointerType() ||
+ ParamType->isReferenceType())) {
+ RegParm -=
+ (Context.getTypeSize(ParamType) + PointerWidth - 1) / PointerWidth;
+ if (RegParm >= 0)
+ Attrs |= llvm::Attribute::InReg;
+ }
+ // FIXME: handle sseregparm someday...
+
+ // Increment Index if there is padding.
+ Index += (AI.getPaddingType() != 0);
+
+ if (llvm::StructType *STy =
+ dyn_cast<llvm::StructType>(AI.getCoerceToType()))
+ Index += STy->getNumElements()-1; // 1 will be added below.
+ break;
+
+ case ABIArgInfo::Indirect:
+ if (AI.getIndirectByVal())
+ Attrs |= llvm::Attribute::ByVal;
+
+ Attrs |=
+ llvm::Attribute::constructAlignmentFromInt(AI.getIndirectAlign());
+ // byval disables readnone and readonly.
+ FuncAttrs &= ~(llvm::Attribute::ReadOnly |
+ llvm::Attribute::ReadNone);
+ break;
+
+ case ABIArgInfo::Ignore:
+ // Skip increment, no matching LLVM parameter.
+ continue;
+
+ case ABIArgInfo::Expand: {
+ SmallVector<llvm::Type*, 8> types;
+ // FIXME: This is rather inefficient. Do we ever actually need to do
+ // anything here? The result should be just reconstructed on the other
+ // side, so extension should be a non-issue.
+ getTypes().GetExpandedTypes(ParamType, types);
+ Index += types.size();
+ continue;
+ }
+ }
+
+ if (Attrs)
+ PAL.push_back(llvm::AttributeWithIndex::get(Index, Attrs));
+ ++Index;
+ }
+ if (FuncAttrs)
+ PAL.push_back(llvm::AttributeWithIndex::get(~0, FuncAttrs));
+}
+
+/// An argument came in as a promoted argument; demote it back to its
+/// declared type.
+static llvm::Value *emitArgumentDemotion(CodeGenFunction &CGF,
+ const VarDecl *var,
+ llvm::Value *value) {
+ llvm::Type *varType = CGF.ConvertType(var->getType());
+
+ // This can happen with promotions that actually don't change the
+ // underlying type, like the enum promotions.
+ if (value->getType() == varType) return value;
+
+ assert((varType->isIntegerTy() || varType->isFloatingPointTy())
+ && "unexpected promotion type");
+
+ if (isa<llvm::IntegerType>(varType))
+ return CGF.Builder.CreateTrunc(value, varType, "arg.unpromote");
+
+ return CGF.Builder.CreateFPCast(value, varType, "arg.unpromote");
+}
+
+void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
+ llvm::Function *Fn,
+ const FunctionArgList &Args) {
+ // If this is an implicit-return-zero function, go ahead and
+ // initialize the return value. TODO: it might be nice to have
+ // a more general mechanism for this that didn't require synthesized
+ // return statements.
+ if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl)) {
+ if (FD->hasImplicitReturnZero()) {
+ QualType RetTy = FD->getResultType().getUnqualifiedType();
+ llvm::Type* LLVMTy = CGM.getTypes().ConvertType(RetTy);
+ llvm::Constant* Zero = llvm::Constant::getNullValue(LLVMTy);
+ Builder.CreateStore(Zero, ReturnValue);
+ }
+ }
+
+ // FIXME: We no longer need the types from FunctionArgList; lift up and
+ // simplify.
+
+ // Emit allocs for param decls. Give the LLVM Argument nodes names.
+ llvm::Function::arg_iterator AI = Fn->arg_begin();
+
+ // Name the struct return argument.
+ if (CGM.ReturnTypeUsesSRet(FI)) {
+ AI->setName("agg.result");
+ AI->addAttr(llvm::Attribute::NoAlias);
+ ++AI;
+ }
+
+ assert(FI.arg_size() == Args.size() &&
+ "Mismatch between function signature & arguments.");
+ unsigned ArgNo = 1;
+ CGFunctionInfo::const_arg_iterator info_it = FI.arg_begin();
+ for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
+ i != e; ++i, ++info_it, ++ArgNo) {
+ const VarDecl *Arg = *i;
+ QualType Ty = info_it->type;
+ const ABIArgInfo &ArgI = info_it->info;
+
+ bool isPromoted =
+ isa<ParmVarDecl>(Arg) && cast<ParmVarDecl>(Arg)->isKNRPromoted();
+
+ switch (ArgI.getKind()) {
+ case ABIArgInfo::Indirect: {
+ llvm::Value *V = AI;
+
+ if (hasAggregateLLVMType(Ty)) {
+ // Aggregates and complex variables are accessed by reference. All we
+ // need to do is realign the value, if requested
+ if (ArgI.getIndirectRealign()) {
+ llvm::Value *AlignedTemp = CreateMemTemp(Ty, "coerce");
+
+ // Copy from the incoming argument pointer to the temporary with the
+ // appropriate alignment.
+ //
+ // FIXME: We should have a common utility for generating an aggregate
+ // copy.
+ llvm::Type *I8PtrTy = Builder.getInt8PtrTy();
+ CharUnits Size = getContext().getTypeSizeInChars(Ty);
+ llvm::Value *Dst = Builder.CreateBitCast(AlignedTemp, I8PtrTy);
+ llvm::Value *Src = Builder.CreateBitCast(V, I8PtrTy);
+ Builder.CreateMemCpy(Dst,
+ Src,
+ llvm::ConstantInt::get(IntPtrTy,
+ Size.getQuantity()),
+ ArgI.getIndirectAlign(),
+ false);
+ V = AlignedTemp;
+ }
+ } else {
+ // Load scalar value from indirect argument.
+ CharUnits Alignment = getContext().getTypeAlignInChars(Ty);
+ V = EmitLoadOfScalar(V, false, Alignment.getQuantity(), Ty);
+
+ if (isPromoted)
+ V = emitArgumentDemotion(*this, Arg, V);
+ }
+ EmitParmDecl(*Arg, V, ArgNo);
+ break;
+ }
+
+ case ABIArgInfo::Extend:
+ case ABIArgInfo::Direct: {
+ // Skip the dummy padding argument.
+ if (ArgI.getPaddingType())
+ ++AI;
+
+ // If we have the trivial case, handle it with no muss and fuss.
+ if (!isa<llvm::StructType>(ArgI.getCoerceToType()) &&
+ ArgI.getCoerceToType() == ConvertType(Ty) &&
+ ArgI.getDirectOffset() == 0) {
+ assert(AI != Fn->arg_end() && "Argument mismatch!");
+ llvm::Value *V = AI;
+
+ if (Arg->getType().isRestrictQualified())
+ AI->addAttr(llvm::Attribute::NoAlias);
+
+ // Ensure the argument is the correct type.
+ if (V->getType() != ArgI.getCoerceToType())
+ V = Builder.CreateBitCast(V, ArgI.getCoerceToType());
+
+ if (isPromoted)
+ V = emitArgumentDemotion(*this, Arg, V);
+
+ EmitParmDecl(*Arg, V, ArgNo);
+ break;
+ }
+
+ llvm::AllocaInst *Alloca = CreateMemTemp(Ty, Arg->getName());
+
+ // The alignment we need to use is the max of the requested alignment for
+ // the argument plus the alignment required by our access code below.
+ unsigned AlignmentToUse =
+ CGM.getTargetData().getABITypeAlignment(ArgI.getCoerceToType());
+ AlignmentToUse = std::max(AlignmentToUse,
+ (unsigned)getContext().getDeclAlign(Arg).getQuantity());
+
+ Alloca->setAlignment(AlignmentToUse);
+ llvm::Value *V = Alloca;
+ llvm::Value *Ptr = V; // Pointer to store into.
+
+ // If the value is offset in memory, apply the offset now.
+ if (unsigned Offs = ArgI.getDirectOffset()) {
+ Ptr = Builder.CreateBitCast(Ptr, Builder.getInt8PtrTy());
+ Ptr = Builder.CreateConstGEP1_32(Ptr, Offs);
+ Ptr = Builder.CreateBitCast(Ptr,
+ llvm::PointerType::getUnqual(ArgI.getCoerceToType()));
+ }
+
+ // If the coerce-to type is a first class aggregate, we flatten it and
+ // pass the elements. Either way is semantically identical, but fast-isel
+ // and the optimizer generally likes scalar values better than FCAs.
+ llvm::StructType *STy = dyn_cast<llvm::StructType>(ArgI.getCoerceToType());
+ if (STy && STy->getNumElements() > 1) {
+ uint64_t SrcSize = CGM.getTargetData().getTypeAllocSize(STy);
+ llvm::Type *DstTy =
+ cast<llvm::PointerType>(Ptr->getType())->getElementType();
+ uint64_t DstSize = CGM.getTargetData().getTypeAllocSize(DstTy);
+
+ if (SrcSize <= DstSize) {
+ Ptr = Builder.CreateBitCast(Ptr, llvm::PointerType::getUnqual(STy));
+
+ for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
+ assert(AI != Fn->arg_end() && "Argument mismatch!");
+ AI->setName(Arg->getName() + ".coerce" + Twine(i));
+ llvm::Value *EltPtr = Builder.CreateConstGEP2_32(Ptr, 0, i);
+ Builder.CreateStore(AI++, EltPtr);
+ }
+ } else {
+ llvm::AllocaInst *TempAlloca =
+ CreateTempAlloca(ArgI.getCoerceToType(), "coerce");
+ TempAlloca->setAlignment(AlignmentToUse);
+ llvm::Value *TempV = TempAlloca;
+
+ for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
+ assert(AI != Fn->arg_end() && "Argument mismatch!");
+ AI->setName(Arg->getName() + ".coerce" + Twine(i));
+ llvm::Value *EltPtr = Builder.CreateConstGEP2_32(TempV, 0, i);
+ Builder.CreateStore(AI++, EltPtr);
+ }
+
+ Builder.CreateMemCpy(Ptr, TempV, DstSize, AlignmentToUse);
+ }
+ } else {
+ // Simple case, just do a coerced store of the argument into the alloca.
+ assert(AI != Fn->arg_end() && "Argument mismatch!");
+ AI->setName(Arg->getName() + ".coerce");
+ CreateCoercedStore(AI++, Ptr, /*DestIsVolatile=*/false, *this);
+ }
+
+
+ // Match to what EmitParmDecl is expecting for this type.
+ if (!CodeGenFunction::hasAggregateLLVMType(Ty)) {
+ V = EmitLoadOfScalar(V, false, AlignmentToUse, Ty);
+ if (isPromoted)
+ V = emitArgumentDemotion(*this, Arg, V);
+ }
+ EmitParmDecl(*Arg, V, ArgNo);
+ continue; // Skip ++AI increment, already done.
+ }
+
+ case ABIArgInfo::Expand: {
+ // If this structure was expanded into multiple arguments then
+ // we need to create a temporary and reconstruct it from the
+ // arguments.
+ llvm::AllocaInst *Alloca = CreateMemTemp(Ty);
+ CharUnits Align = getContext().getDeclAlign(Arg);
+ Alloca->setAlignment(Align.getQuantity());
+ LValue LV = MakeAddrLValue(Alloca, Ty, Align);
+ llvm::Function::arg_iterator End = ExpandTypeFromArgs(Ty, LV, AI);
+ EmitParmDecl(*Arg, Alloca, ArgNo);
+
+ // Name the arguments used in expansion and increment AI.
+ unsigned Index = 0;
+ for (; AI != End; ++AI, ++Index)
+ AI->setName(Arg->getName() + "." + Twine(Index));
+ continue;
+ }
+
+ case ABIArgInfo::Ignore:
+ // Initialize the local variable appropriately.
+ if (hasAggregateLLVMType(Ty))
+ EmitParmDecl(*Arg, CreateMemTemp(Ty), ArgNo);
+ else
+ EmitParmDecl(*Arg, llvm::UndefValue::get(ConvertType(Arg->getType())),
+ ArgNo);
+
+ // Skip increment, no matching LLVM parameter.
+ continue;
+ }
+
+ ++AI;
+ }
+ assert(AI == Fn->arg_end() && "Argument mismatch!");
+}
+
+static void eraseUnusedBitCasts(llvm::Instruction *insn) {
+ while (insn->use_empty()) {
+ llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(insn);
+ if (!bitcast) return;
+
+ // This is "safe" because we would have used a ConstantExpr otherwise.
+ insn = cast<llvm::Instruction>(bitcast->getOperand(0));
+ bitcast->eraseFromParent();
+ }
+}
+
+/// Try to emit a fused autorelease of a return result.
+static llvm::Value *tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF,
+ llvm::Value *result) {
+ // We must be immediately followed the cast.
+ llvm::BasicBlock *BB = CGF.Builder.GetInsertBlock();
+ if (BB->empty()) return 0;
+ if (&BB->back() != result) return 0;
+
+ llvm::Type *resultType = result->getType();
+
+ // result is in a BasicBlock and is therefore an Instruction.
+ llvm::Instruction *generator = cast<llvm::Instruction>(result);
+
+ SmallVector<llvm::Instruction*,4> insnsToKill;
+
+ // Look for:
+ // %generator = bitcast %type1* %generator2 to %type2*
+ while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(generator)) {
+ // We would have emitted this as a constant if the operand weren't
+ // an Instruction.
+ generator = cast<llvm::Instruction>(bitcast->getOperand(0));
+
+ // Require the generator to be immediately followed by the cast.
+ if (generator->getNextNode() != bitcast)
+ return 0;
+
+ insnsToKill.push_back(bitcast);
+ }
+
+ // Look for:
+ // %generator = call i8* @objc_retain(i8* %originalResult)
+ // or
+ // %generator = call i8* @objc_retainAutoreleasedReturnValue(i8* %originalResult)
+ llvm::CallInst *call = dyn_cast<llvm::CallInst>(generator);
+ if (!call) return 0;
+
+ bool doRetainAutorelease;
+
+ if (call->getCalledValue() == CGF.CGM.getARCEntrypoints().objc_retain) {
+ doRetainAutorelease = true;
+ } else if (call->getCalledValue() == CGF.CGM.getARCEntrypoints()
+ .objc_retainAutoreleasedReturnValue) {
+ doRetainAutorelease = false;
+
+ // Look for an inline asm immediately preceding the call and kill it, too.
+ llvm::Instruction *prev = call->getPrevNode();
+ if (llvm::CallInst *asmCall = dyn_cast_or_null<llvm::CallInst>(prev))
+ if (asmCall->getCalledValue()
+ == CGF.CGM.getARCEntrypoints().retainAutoreleasedReturnValueMarker)
+ insnsToKill.push_back(prev);
+ } else {
+ return 0;
+ }
+
+ result = call->getArgOperand(0);
+ insnsToKill.push_back(call);
+
+ // Keep killing bitcasts, for sanity. Note that we no longer care
+ // about precise ordering as long as there's exactly one use.
+ while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(result)) {
+ if (!bitcast->hasOneUse()) break;
+ insnsToKill.push_back(bitcast);
+ result = bitcast->getOperand(0);
+ }
+
+ // Delete all the unnecessary instructions, from latest to earliest.
+ for (SmallVectorImpl<llvm::Instruction*>::iterator
+ i = insnsToKill.begin(), e = insnsToKill.end(); i != e; ++i)
+ (*i)->eraseFromParent();
+
+ // Do the fused retain/autorelease if we were asked to.
+ if (doRetainAutorelease)
+ result = CGF.EmitARCRetainAutoreleaseReturnValue(result);
+
+ // Cast back to the result type.
+ return CGF.Builder.CreateBitCast(result, resultType);
+}
+
+/// If this is a +1 of the value of an immutable 'self', remove it.
+static llvm::Value *tryRemoveRetainOfSelf(CodeGenFunction &CGF,
+ llvm::Value *result) {
+ // This is only applicable to a method with an immutable 'self'.
+ const ObjCMethodDecl *method = dyn_cast<ObjCMethodDecl>(CGF.CurCodeDecl);
+ if (!method) return 0;
+ const VarDecl *self = method->getSelfDecl();
+ if (!self->getType().isConstQualified()) return 0;
+
+ // Look for a retain call.
+ llvm::CallInst *retainCall =
+ dyn_cast<llvm::CallInst>(result->stripPointerCasts());
+ if (!retainCall ||
+ retainCall->getCalledValue() != CGF.CGM.getARCEntrypoints().objc_retain)
+ return 0;
+
+ // Look for an ordinary load of 'self'.
+ llvm::Value *retainedValue = retainCall->getArgOperand(0);
+ llvm::LoadInst *load =
+ dyn_cast<llvm::LoadInst>(retainedValue->stripPointerCasts());
+ if (!load || load->isAtomic() || load->isVolatile() ||
+ load->getPointerOperand() != CGF.GetAddrOfLocalVar(self))
+ return 0;
+
+ // Okay! Burn it all down. This relies for correctness on the
+ // assumption that the retain is emitted as part of the return and
+ // that thereafter everything is used "linearly".
+ llvm::Type *resultType = result->getType();
+ eraseUnusedBitCasts(cast<llvm::Instruction>(result));
+ assert(retainCall->use_empty());
+ retainCall->eraseFromParent();
+ eraseUnusedBitCasts(cast<llvm::Instruction>(retainedValue));
+
+ return CGF.Builder.CreateBitCast(load, resultType);
+}
+
+/// Emit an ARC autorelease of the result of a function.
+///
+/// \return the value to actually return from the function
+static llvm::Value *emitAutoreleaseOfResult(CodeGenFunction &CGF,
+ llvm::Value *result) {
+ // If we're returning 'self', kill the initial retain. This is a
+ // heuristic attempt to "encourage correctness" in the really unfortunate
+ // case where we have a return of self during a dealloc and we desperately
+ // need to avoid the possible autorelease.
+ if (llvm::Value *self = tryRemoveRetainOfSelf(CGF, result))
+ return self;
+
+ // At -O0, try to emit a fused retain/autorelease.
+ if (CGF.shouldUseFusedARCCalls())
+ if (llvm::Value *fused = tryEmitFusedAutoreleaseOfResult(CGF, result))
+ return fused;
+
+ return CGF.EmitARCAutoreleaseReturnValue(result);
+}
+
+/// Heuristically search for a dominating store to the return-value slot.
+static llvm::StoreInst *findDominatingStoreToReturnValue(CodeGenFunction &CGF) {
+ // If there are multiple uses of the return-value slot, just check
+ // for something immediately preceding the IP. Sometimes this can
+ // happen with how we generate implicit-returns; it can also happen
+ // with noreturn cleanups.
+ if (!CGF.ReturnValue->hasOneUse()) {
+ llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
+ if (IP->empty()) return 0;
+ llvm::StoreInst *store = dyn_cast<llvm::StoreInst>(&IP->back());
+ if (!store) return 0;
+ if (store->getPointerOperand() != CGF.ReturnValue) return 0;
+ assert(!store->isAtomic() && !store->isVolatile()); // see below
+ return store;
+ }
+
+ llvm::StoreInst *store =
+ dyn_cast<llvm::StoreInst>(CGF.ReturnValue->use_back());
+ if (!store) return 0;
+
+ // These aren't actually possible for non-coerced returns, and we
+ // only care about non-coerced returns on this code path.
+ assert(!store->isAtomic() && !store->isVolatile());
+
+ // Now do a first-and-dirty dominance check: just walk up the
+ // single-predecessors chain from the current insertion point.
+ llvm::BasicBlock *StoreBB = store->getParent();
+ llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
+ while (IP != StoreBB) {
+ if (!(IP = IP->getSinglePredecessor()))
+ return 0;
+ }
+
+ // Okay, the store's basic block dominates the insertion point; we
+ // can do our thing.
+ return store;
+}
+
+void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI) {
+ // Functions with no result always return void.
+ if (ReturnValue == 0) {
+ Builder.CreateRetVoid();
+ return;
+ }
+
+ llvm::DebugLoc RetDbgLoc;
+ llvm::Value *RV = 0;
+ QualType RetTy = FI.getReturnType();
+ const ABIArgInfo &RetAI = FI.getReturnInfo();
+
+ switch (RetAI.getKind()) {
+ case ABIArgInfo::Indirect: {
+ unsigned Alignment = getContext().getTypeAlignInChars(RetTy).getQuantity();
+ if (RetTy->isAnyComplexType()) {
+ ComplexPairTy RT = LoadComplexFromAddr(ReturnValue, false);
+ StoreComplexToAddr(RT, CurFn->arg_begin(), false);
+ } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
+ // Do nothing; aggregrates get evaluated directly into the destination.
+ } else {
+ EmitStoreOfScalar(Builder.CreateLoad(ReturnValue), CurFn->arg_begin(),
+ false, Alignment, RetTy);
+ }
+ break;
+ }
+
+ case ABIArgInfo::Extend:
+ case ABIArgInfo::Direct:
+ if (RetAI.getCoerceToType() == ConvertType(RetTy) &&
+ RetAI.getDirectOffset() == 0) {
+ // The internal return value temp always will have pointer-to-return-type
+ // type, just do a load.
+
+ // If there is a dominating store to ReturnValue, we can elide
+ // the load, zap the store, and usually zap the alloca.
+ if (llvm::StoreInst *SI = findDominatingStoreToReturnValue(*this)) {
+ // Get the stored value and nuke the now-dead store.
+ RetDbgLoc = SI->getDebugLoc();
+ RV = SI->getValueOperand();
+ SI->eraseFromParent();
+
+ // If that was the only use of the return value, nuke it as well now.
+ if (ReturnValue->use_empty() && isa<llvm::AllocaInst>(ReturnValue)) {
+ cast<llvm::AllocaInst>(ReturnValue)->eraseFromParent();
+ ReturnValue = 0;
+ }
+
+ // Otherwise, we have to do a simple load.
+ } else {
+ RV = Builder.CreateLoad(ReturnValue);
+ }
+ } else {
+ llvm::Value *V = ReturnValue;
+ // If the value is offset in memory, apply the offset now.
+ if (unsigned Offs = RetAI.getDirectOffset()) {
+ V = Builder.CreateBitCast(V, Builder.getInt8PtrTy());
+ V = Builder.CreateConstGEP1_32(V, Offs);
+ V = Builder.CreateBitCast(V,
+ llvm::PointerType::getUnqual(RetAI.getCoerceToType()));
+ }
+
+ RV = CreateCoercedLoad(V, RetAI.getCoerceToType(), *this);
+ }
+
+ // In ARC, end functions that return a retainable type with a call
+ // to objc_autoreleaseReturnValue.
+ if (AutoreleaseResult) {
+ assert(getLangOpts().ObjCAutoRefCount &&
+ !FI.isReturnsRetained() &&
+ RetTy->isObjCRetainableType());
+ RV = emitAutoreleaseOfResult(*this, RV);
+ }
+
+ break;
+
+ case ABIArgInfo::Ignore:
+ break;
+
+ case ABIArgInfo::Expand:
+ llvm_unreachable("Invalid ABI kind for return argument");
+ }
+
+ llvm::Instruction *Ret = RV ? Builder.CreateRet(RV) : Builder.CreateRetVoid();
+ if (!RetDbgLoc.isUnknown())
+ Ret->setDebugLoc(RetDbgLoc);
+}
+
+void CodeGenFunction::EmitDelegateCallArg(CallArgList &args,
+ const VarDecl *param) {
+ // StartFunction converted the ABI-lowered parameter(s) into a
+ // local alloca. We need to turn that into an r-value suitable
+ // for EmitCall.
+ llvm::Value *local = GetAddrOfLocalVar(param);
+
+ QualType type = param->getType();
+
+ // For the most part, we just need to load the alloca, except:
+ // 1) aggregate r-values are actually pointers to temporaries, and
+ // 2) references to aggregates are pointers directly to the aggregate.
+ // I don't know why references to non-aggregates are different here.
+ if (const ReferenceType *ref = type->getAs<ReferenceType>()) {
+ if (hasAggregateLLVMType(ref->getPointeeType()))
+ return args.add(RValue::getAggregate(local), type);
+
+ // Locals which are references to scalars are represented
+ // with allocas holding the pointer.
+ return args.add(RValue::get(Builder.CreateLoad(local)), type);
+ }
+
+ if (type->isAnyComplexType()) {
+ ComplexPairTy complex = LoadComplexFromAddr(local, /*volatile*/ false);
+ return args.add(RValue::getComplex(complex), type);
+ }
+
+ if (hasAggregateLLVMType(type))
+ return args.add(RValue::getAggregate(local), type);
+
+ unsigned alignment = getContext().getDeclAlign(param).getQuantity();
+ llvm::Value *value = EmitLoadOfScalar(local, false, alignment, type);
+ return args.add(RValue::get(value), type);
+}
+
+static bool isProvablyNull(llvm::Value *addr) {
+ return isa<llvm::ConstantPointerNull>(addr);
+}
+
+static bool isProvablyNonNull(llvm::Value *addr) {
+ return isa<llvm::AllocaInst>(addr);
+}
+
+/// Emit the actual writing-back of a writeback.
+static void emitWriteback(CodeGenFunction &CGF,
+ const CallArgList::Writeback &writeback) {
+ llvm::Value *srcAddr = writeback.Address;
+ assert(!isProvablyNull(srcAddr) &&
+ "shouldn't have writeback for provably null argument");
+
+ llvm::BasicBlock *contBB = 0;
+
+ // If the argument wasn't provably non-null, we need to null check
+ // before doing the store.
+ bool provablyNonNull = isProvablyNonNull(srcAddr);
+ if (!provablyNonNull) {
+ llvm::BasicBlock *writebackBB = CGF.createBasicBlock("icr.writeback");
+ contBB = CGF.createBasicBlock("icr.done");
+
+ llvm::Value *isNull = CGF.Builder.CreateIsNull(srcAddr, "icr.isnull");
+ CGF.Builder.CreateCondBr(isNull, contBB, writebackBB);
+ CGF.EmitBlock(writebackBB);
+ }
+
+ // Load the value to writeback.
+ llvm::Value *value = CGF.Builder.CreateLoad(writeback.Temporary);
+
+ // Cast it back, in case we're writing an id to a Foo* or something.
+ value = CGF.Builder.CreateBitCast(value,
+ cast<llvm::PointerType>(srcAddr->getType())->getElementType(),
+ "icr.writeback-cast");
+
+ // Perform the writeback.
+ QualType srcAddrType = writeback.AddressType;
+ CGF.EmitStoreThroughLValue(RValue::get(value),
+ CGF.MakeAddrLValue(srcAddr, srcAddrType));
+
+ // Jump to the continuation block.
+ if (!provablyNonNull)
+ CGF.EmitBlock(contBB);
+}
+
+static void emitWritebacks(CodeGenFunction &CGF,
+ const CallArgList &args) {
+ for (CallArgList::writeback_iterator
+ i = args.writeback_begin(), e = args.writeback_end(); i != e; ++i)
+ emitWriteback(CGF, *i);
+}
+
+/// Emit an argument that's being passed call-by-writeback. That is,
+/// we are passing the address of
+static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args,
+ const ObjCIndirectCopyRestoreExpr *CRE) {
+ llvm::Value *srcAddr = CGF.EmitScalarExpr(CRE->getSubExpr());
+
+ // The dest and src types don't necessarily match in LLVM terms
+ // because of the crazy ObjC compatibility rules.
+
+ llvm::PointerType *destType =
+ cast<llvm::PointerType>(CGF.ConvertType(CRE->getType()));
+
+ // If the address is a constant null, just pass the appropriate null.
+ if (isProvablyNull(srcAddr)) {
+ args.add(RValue::get(llvm::ConstantPointerNull::get(destType)),
+ CRE->getType());
+ return;
+ }
+
+ QualType srcAddrType =
+ CRE->getSubExpr()->getType()->castAs<PointerType>()->getPointeeType();
+
+ // Create the temporary.
+ llvm::Value *temp = CGF.CreateTempAlloca(destType->getElementType(),
+ "icr.temp");
+
+ // Zero-initialize it if we're not doing a copy-initialization.
+ bool shouldCopy = CRE->shouldCopy();
+ if (!shouldCopy) {
+ llvm::Value *null =
+ llvm::ConstantPointerNull::get(
+ cast<llvm::PointerType>(destType->getElementType()));
+ CGF.Builder.CreateStore(null, temp);
+ }
+
+ llvm::BasicBlock *contBB = 0;
+
+ // If the address is *not* known to be non-null, we need to switch.
+ llvm::Value *finalArgument;
+
+ bool provablyNonNull = isProvablyNonNull(srcAddr);
+ if (provablyNonNull) {
+ finalArgument = temp;
+ } else {
+ llvm::Value *isNull = CGF.Builder.CreateIsNull(srcAddr, "icr.isnull");
+
+ finalArgument = CGF.Builder.CreateSelect(isNull,
+ llvm::ConstantPointerNull::get(destType),
+ temp, "icr.argument");
+
+ // If we need to copy, then the load has to be conditional, which
+ // means we need control flow.
+ if (shouldCopy) {
+ contBB = CGF.createBasicBlock("icr.cont");
+ llvm::BasicBlock *copyBB = CGF.createBasicBlock("icr.copy");
+ CGF.Builder.CreateCondBr(isNull, contBB, copyBB);
+ CGF.EmitBlock(copyBB);
+ }
+ }
+
+ // Perform a copy if necessary.
+ if (shouldCopy) {
+ LValue srcLV = CGF.MakeAddrLValue(srcAddr, srcAddrType);
+ RValue srcRV = CGF.EmitLoadOfLValue(srcLV);
+ assert(srcRV.isScalar());
+
+ llvm::Value *src = srcRV.getScalarVal();
+ src = CGF.Builder.CreateBitCast(src, destType->getElementType(),
+ "icr.cast");
+
+ // Use an ordinary store, not a store-to-lvalue.
+ CGF.Builder.CreateStore(src, temp);
+ }
+
+ // Finish the control flow if we needed it.
+ if (shouldCopy && !provablyNonNull)
+ CGF.EmitBlock(contBB);
+
+ args.addWriteback(srcAddr, srcAddrType, temp);
+ args.add(RValue::get(finalArgument), CRE->getType());
+}
+
+void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E,
+ QualType type) {
+ if (const ObjCIndirectCopyRestoreExpr *CRE
+ = dyn_cast<ObjCIndirectCopyRestoreExpr>(E)) {
+ assert(getContext().getLangOpts().ObjCAutoRefCount);
+ assert(getContext().hasSameType(E->getType(), type));
+ return emitWritebackArg(*this, args, CRE);
+ }
+
+ assert(type->isReferenceType() == E->isGLValue() &&
+ "reference binding to unmaterialized r-value!");
+
+ if (E->isGLValue()) {
+ assert(E->getObjectKind() == OK_Ordinary);
+ return args.add(EmitReferenceBindingToExpr(E, /*InitializedDecl=*/0),
+ type);
+ }
+
+ if (hasAggregateLLVMType(type) && !E->getType()->isAnyComplexType() &&
+ isa<ImplicitCastExpr>(E) &&
+ cast<CastExpr>(E)->getCastKind() == CK_LValueToRValue) {
+ LValue L = EmitLValue(cast<CastExpr>(E)->getSubExpr());
+ assert(L.isSimple());
+ args.add(L.asAggregateRValue(), type, /*NeedsCopy*/true);
+ return;
+ }
+
+ args.add(EmitAnyExprToTemp(E), type);
+}
+
+// In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
+// optimizer it can aggressively ignore unwind edges.
+void
+CodeGenFunction::AddObjCARCExceptionMetadata(llvm::Instruction *Inst) {
+ if (CGM.getCodeGenOpts().OptimizationLevel != 0 &&
+ !CGM.getCodeGenOpts().ObjCAutoRefCountExceptions)
+ Inst->setMetadata("clang.arc.no_objc_arc_exceptions",
+ CGM.getNoObjCARCExceptionsMetadata());
+}
+
+/// Emits a call or invoke instruction to the given function, depending
+/// on the current state of the EH stack.
+llvm::CallSite
+CodeGenFunction::EmitCallOrInvoke(llvm::Value *Callee,
+ ArrayRef<llvm::Value *> Args,
+ const Twine &Name) {
+ llvm::BasicBlock *InvokeDest = getInvokeDest();
+
+ llvm::Instruction *Inst;
+ if (!InvokeDest)
+ Inst = Builder.CreateCall(Callee, Args, Name);
+ else {
+ llvm::BasicBlock *ContBB = createBasicBlock("invoke.cont");
+ Inst = Builder.CreateInvoke(Callee, ContBB, InvokeDest, Args, Name);
+ EmitBlock(ContBB);
+ }
+
+ // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
+ // optimizer it can aggressively ignore unwind edges.
+ if (CGM.getLangOpts().ObjCAutoRefCount)
+ AddObjCARCExceptionMetadata(Inst);
+
+ return Inst;
+}
+
+llvm::CallSite
+CodeGenFunction::EmitCallOrInvoke(llvm::Value *Callee,
+ const Twine &Name) {
+ return EmitCallOrInvoke(Callee, ArrayRef<llvm::Value *>(), Name);
+}
+
+static void checkArgMatches(llvm::Value *Elt, unsigned &ArgNo,
+ llvm::FunctionType *FTy) {
+ if (ArgNo < FTy->getNumParams())
+ assert(Elt->getType() == FTy->getParamType(ArgNo));
+ else
+ assert(FTy->isVarArg());
+ ++ArgNo;
+}
+
+void CodeGenFunction::ExpandTypeToArgs(QualType Ty, RValue RV,
+ SmallVector<llvm::Value*,16> &Args,
+ llvm::FunctionType *IRFuncTy) {
+ if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
+ unsigned NumElts = AT->getSize().getZExtValue();
+ QualType EltTy = AT->getElementType();
+ llvm::Value *Addr = RV.getAggregateAddr();
+ for (unsigned Elt = 0; Elt < NumElts; ++Elt) {
+ llvm::Value *EltAddr = Builder.CreateConstGEP2_32(Addr, 0, Elt);
+ LValue LV = MakeAddrLValue(EltAddr, EltTy);
+ RValue EltRV;
+ if (EltTy->isAnyComplexType())
+ // FIXME: Volatile?
+ EltRV = RValue::getComplex(LoadComplexFromAddr(LV.getAddress(), false));
+ else if (CodeGenFunction::hasAggregateLLVMType(EltTy))
+ EltRV = LV.asAggregateRValue();
+ else
+ EltRV = EmitLoadOfLValue(LV);
+ ExpandTypeToArgs(EltTy, EltRV, Args, IRFuncTy);
+ }
+ } else if (const RecordType *RT = Ty->getAs<RecordType>()) {
+ RecordDecl *RD = RT->getDecl();
+ assert(RV.isAggregate() && "Unexpected rvalue during struct expansion");
+ LValue LV = MakeAddrLValue(RV.getAggregateAddr(), Ty);
+
+ if (RD->isUnion()) {
+ const FieldDecl *LargestFD = 0;
+ CharUnits UnionSize = CharUnits::Zero();
+
+ for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
+ i != e; ++i) {
+ const FieldDecl *FD = *i;
+ assert(!FD->isBitField() &&
+ "Cannot expand structure with bit-field members.");
+ CharUnits FieldSize = getContext().getTypeSizeInChars(FD->getType());
+ if (UnionSize < FieldSize) {
+ UnionSize = FieldSize;
+ LargestFD = FD;
+ }
+ }
+ if (LargestFD) {
+ RValue FldRV = EmitRValueForField(LV, LargestFD);
+ ExpandTypeToArgs(LargestFD->getType(), FldRV, Args, IRFuncTy);
+ }
+ } else {
+ for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
+ i != e; ++i) {
+ FieldDecl *FD = *i;
+
+ RValue FldRV = EmitRValueForField(LV, FD);
+ ExpandTypeToArgs(FD->getType(), FldRV, Args, IRFuncTy);
+ }
+ }
+ } else if (Ty->isAnyComplexType()) {
+ ComplexPairTy CV = RV.getComplexVal();
+ Args.push_back(CV.first);
+ Args.push_back(CV.second);
+ } else {
+ assert(RV.isScalar() &&
+ "Unexpected non-scalar rvalue during struct expansion.");
+
+ // Insert a bitcast as needed.
+ llvm::Value *V = RV.getScalarVal();
+ if (Args.size() < IRFuncTy->getNumParams() &&
+ V->getType() != IRFuncTy->getParamType(Args.size()))
+ V = Builder.CreateBitCast(V, IRFuncTy->getParamType(Args.size()));
+
+ Args.push_back(V);
+ }
+}
+
+
+RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
+ llvm::Value *Callee,
+ ReturnValueSlot ReturnValue,
+ const CallArgList &CallArgs,
+ const Decl *TargetDecl,
+ llvm::Instruction **callOrInvoke) {
+ // FIXME: We no longer need the types from CallArgs; lift up and simplify.
+ SmallVector<llvm::Value*, 16> Args;
+
+ // Handle struct-return functions by passing a pointer to the
+ // location that we would like to return into.
+ QualType RetTy = CallInfo.getReturnType();
+ const ABIArgInfo &RetAI = CallInfo.getReturnInfo();
+
+ // IRArgNo - Keep track of the argument number in the callee we're looking at.
+ unsigned IRArgNo = 0;
+ llvm::FunctionType *IRFuncTy =
+ cast<llvm::FunctionType>(
+ cast<llvm::PointerType>(Callee->getType())->getElementType());
+
+ // If the call returns a temporary with struct return, create a temporary
+ // alloca to hold the result, unless one is given to us.
+ if (CGM.ReturnTypeUsesSRet(CallInfo)) {
+ llvm::Value *Value = ReturnValue.getValue();
+ if (!Value)
+ Value = CreateMemTemp(RetTy);
+ Args.push_back(Value);
+ checkArgMatches(Value, IRArgNo, IRFuncTy);
+ }
+
+ assert(CallInfo.arg_size() == CallArgs.size() &&
+ "Mismatch between function signature & arguments.");
+ CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin();
+ for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end();
+ I != E; ++I, ++info_it) {
+ const ABIArgInfo &ArgInfo = info_it->info;
+ RValue RV = I->RV;
+
+ unsigned TypeAlign =
+ getContext().getTypeAlignInChars(I->Ty).getQuantity();
+ switch (ArgInfo.getKind()) {
+ case ABIArgInfo::Indirect: {
+ if (RV.isScalar() || RV.isComplex()) {
+ // Make a temporary alloca to pass the argument.
+ llvm::AllocaInst *AI = CreateMemTemp(I->Ty);
+ if (ArgInfo.getIndirectAlign() > AI->getAlignment())
+ AI->setAlignment(ArgInfo.getIndirectAlign());
+ Args.push_back(AI);
+
+ if (RV.isScalar())
+ EmitStoreOfScalar(RV.getScalarVal(), Args.back(), false,
+ TypeAlign, I->Ty);
+ else
+ StoreComplexToAddr(RV.getComplexVal(), Args.back(), false);
+
+ // Validate argument match.
+ checkArgMatches(AI, IRArgNo, IRFuncTy);
+ } else {
+ // We want to avoid creating an unnecessary temporary+copy here;
+ // however, we need one in two cases:
+ // 1. If the argument is not byval, and we are required to copy the
+ // source. (This case doesn't occur on any common architecture.)
+ // 2. If the argument is byval, RV is not sufficiently aligned, and
+ // we cannot force it to be sufficiently aligned.
+ llvm::Value *Addr = RV.getAggregateAddr();
+ unsigned Align = ArgInfo.getIndirectAlign();
+ const llvm::TargetData *TD = &CGM.getTargetData();
+ if ((!ArgInfo.getIndirectByVal() && I->NeedsCopy) ||
+ (ArgInfo.getIndirectByVal() && TypeAlign < Align &&
+ llvm::getOrEnforceKnownAlignment(Addr, Align, TD) < Align)) {
+ // Create an aligned temporary, and copy to it.
+ llvm::AllocaInst *AI = CreateMemTemp(I->Ty);
+ if (Align > AI->getAlignment())
+ AI->setAlignment(Align);
+ Args.push_back(AI);
+ EmitAggregateCopy(AI, Addr, I->Ty, RV.isVolatileQualified());
+
+ // Validate argument match.
+ checkArgMatches(AI, IRArgNo, IRFuncTy);
+ } else {
+ // Skip the extra memcpy call.
+ Args.push_back(Addr);
+
+ // Validate argument match.
+ checkArgMatches(Addr, IRArgNo, IRFuncTy);
+ }
+ }
+ break;
+ }
+
+ case ABIArgInfo::Ignore:
+ break;
+
+ case ABIArgInfo::Extend:
+ case ABIArgInfo::Direct: {
+ // Insert a padding argument to ensure proper alignment.
+ if (llvm::Type *PaddingType = ArgInfo.getPaddingType()) {
+ Args.push_back(llvm::UndefValue::get(PaddingType));
+ ++IRArgNo;
+ }
+
+ if (!isa<llvm::StructType>(ArgInfo.getCoerceToType()) &&
+ ArgInfo.getCoerceToType() == ConvertType(info_it->type) &&
+ ArgInfo.getDirectOffset() == 0) {
+ llvm::Value *V;
+ if (RV.isScalar())
+ V = RV.getScalarVal();
+ else
+ V = Builder.CreateLoad(RV.getAggregateAddr());
+
+ // If the argument doesn't match, perform a bitcast to coerce it. This
+ // can happen due to trivial type mismatches.
+ if (IRArgNo < IRFuncTy->getNumParams() &&
+ V->getType() != IRFuncTy->getParamType(IRArgNo))
+ V = Builder.CreateBitCast(V, IRFuncTy->getParamType(IRArgNo));
+ Args.push_back(V);
+
+ checkArgMatches(V, IRArgNo, IRFuncTy);
+ break;
+ }
+
+ // FIXME: Avoid the conversion through memory if possible.
+ llvm::Value *SrcPtr;
+ if (RV.isScalar()) {
+ SrcPtr = CreateMemTemp(I->Ty, "coerce");
+ EmitStoreOfScalar(RV.getScalarVal(), SrcPtr, false, TypeAlign, I->Ty);
+ } else if (RV.isComplex()) {
+ SrcPtr = CreateMemTemp(I->Ty, "coerce");
+ StoreComplexToAddr(RV.getComplexVal(), SrcPtr, false);
+ } else
+ SrcPtr = RV.getAggregateAddr();
+
+ // If the value is offset in memory, apply the offset now.
+ if (unsigned Offs = ArgInfo.getDirectOffset()) {
+ SrcPtr = Builder.CreateBitCast(SrcPtr, Builder.getInt8PtrTy());
+ SrcPtr = Builder.CreateConstGEP1_32(SrcPtr, Offs);
+ SrcPtr = Builder.CreateBitCast(SrcPtr,
+ llvm::PointerType::getUnqual(ArgInfo.getCoerceToType()));
+
+ }
+
+ // If the coerce-to type is a first class aggregate, we flatten it and
+ // pass the elements. Either way is semantically identical, but fast-isel
+ // and the optimizer generally likes scalar values better than FCAs.
+ if (llvm::StructType *STy =
+ dyn_cast<llvm::StructType>(ArgInfo.getCoerceToType())) {
+ SrcPtr = Builder.CreateBitCast(SrcPtr,
+ llvm::PointerType::getUnqual(STy));
+ for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
+ llvm::Value *EltPtr = Builder.CreateConstGEP2_32(SrcPtr, 0, i);
+ llvm::LoadInst *LI = Builder.CreateLoad(EltPtr);
+ // We don't know what we're loading from.
+ LI->setAlignment(1);
+ Args.push_back(LI);
+
+ // Validate argument match.
+ checkArgMatches(LI, IRArgNo, IRFuncTy);
+ }
+ } else {
+ // In the simple case, just pass the coerced loaded value.
+ Args.push_back(CreateCoercedLoad(SrcPtr, ArgInfo.getCoerceToType(),
+ *this));
+
+ // Validate argument match.
+ checkArgMatches(Args.back(), IRArgNo, IRFuncTy);
+ }
+
+ break;
+ }
+
+ case ABIArgInfo::Expand:
+ ExpandTypeToArgs(I->Ty, RV, Args, IRFuncTy);
+ IRArgNo = Args.size();
+ break;
+ }
+ }
+
+ // If the callee is a bitcast of a function to a varargs pointer to function
+ // type, check to see if we can remove the bitcast. This handles some cases
+ // with unprototyped functions.
+ if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Callee))
+ if (llvm::Function *CalleeF = dyn_cast<llvm::Function>(CE->getOperand(0))) {
+ llvm::PointerType *CurPT=cast<llvm::PointerType>(Callee->getType());
+ llvm::FunctionType *CurFT =
+ cast<llvm::FunctionType>(CurPT->getElementType());
+ llvm::FunctionType *ActualFT = CalleeF->getFunctionType();
+
+ if (CE->getOpcode() == llvm::Instruction::BitCast &&
+ ActualFT->getReturnType() == CurFT->getReturnType() &&
+ ActualFT->getNumParams() == CurFT->getNumParams() &&
+ ActualFT->getNumParams() == Args.size() &&
+ (CurFT->isVarArg() || !ActualFT->isVarArg())) {
+ bool ArgsMatch = true;
+ for (unsigned i = 0, e = ActualFT->getNumParams(); i != e; ++i)
+ if (ActualFT->getParamType(i) != CurFT->getParamType(i)) {
+ ArgsMatch = false;
+ break;
+ }
+
+ // Strip the cast if we can get away with it. This is a nice cleanup,
+ // but also allows us to inline the function at -O0 if it is marked
+ // always_inline.
+ if (ArgsMatch)
+ Callee = CalleeF;
+ }
+ }
+
+ unsigned CallingConv;
+ CodeGen::AttributeListType AttributeList;
+ CGM.ConstructAttributeList(CallInfo, TargetDecl, AttributeList, CallingConv);
+ llvm::AttrListPtr Attrs = llvm::AttrListPtr::get(AttributeList.begin(),
+ AttributeList.end());
+
+ llvm::BasicBlock *InvokeDest = 0;
+ if (!(Attrs.getFnAttributes() & llvm::Attribute::NoUnwind))
+ InvokeDest = getInvokeDest();
+
+ llvm::CallSite CS;
+ if (!InvokeDest) {
+ CS = Builder.CreateCall(Callee, Args);
+ } else {
+ llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
+ CS = Builder.CreateInvoke(Callee, Cont, InvokeDest, Args);
+ EmitBlock(Cont);
+ }
+ if (callOrInvoke)
+ *callOrInvoke = CS.getInstruction();
+
+ CS.setAttributes(Attrs);
+ CS.setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
+
+ // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
+ // optimizer it can aggressively ignore unwind edges.
+ if (CGM.getLangOpts().ObjCAutoRefCount)
+ AddObjCARCExceptionMetadata(CS.getInstruction());
+
+ // If the call doesn't return, finish the basic block and clear the
+ // insertion point; this allows the rest of IRgen to discard
+ // unreachable code.
+ if (CS.doesNotReturn()) {
+ Builder.CreateUnreachable();
+ Builder.ClearInsertionPoint();
+
+ // FIXME: For now, emit a dummy basic block because expr emitters in
+ // generally are not ready to handle emitting expressions at unreachable
+ // points.
+ EnsureInsertPoint();
+
+ // Return a reasonable RValue.
+ return GetUndefRValue(RetTy);
+ }
+
+ llvm::Instruction *CI = CS.getInstruction();
+ if (Builder.isNamePreserving() && !CI->getType()->isVoidTy())
+ CI->setName("call");
+
+ // Emit any writebacks immediately. Arguably this should happen
+ // after any return-value munging.
+ if (CallArgs.hasWritebacks())
+ emitWritebacks(*this, CallArgs);
+
+ switch (RetAI.getKind()) {
+ case ABIArgInfo::Indirect: {
+ unsigned Alignment = getContext().getTypeAlignInChars(RetTy).getQuantity();
+ if (RetTy->isAnyComplexType())
+ return RValue::getComplex(LoadComplexFromAddr(Args[0], false));
+ if (CodeGenFunction::hasAggregateLLVMType(RetTy))
+ return RValue::getAggregate(Args[0]);
+ return RValue::get(EmitLoadOfScalar(Args[0], false, Alignment, RetTy));
+ }
+
+ case ABIArgInfo::Ignore:
+ // If we are ignoring an argument that had a result, make sure to
+ // construct the appropriate return value for our caller.
+ return GetUndefRValue(RetTy);
+
+ case ABIArgInfo::Extend:
+ case ABIArgInfo::Direct: {
+ llvm::Type *RetIRTy = ConvertType(RetTy);
+ if (RetAI.getCoerceToType() == RetIRTy && RetAI.getDirectOffset() == 0) {
+ if (RetTy->isAnyComplexType()) {
+ llvm::Value *Real = Builder.CreateExtractValue(CI, 0);
+ llvm::Value *Imag = Builder.CreateExtractValue(CI, 1);
+ return RValue::getComplex(std::make_pair(Real, Imag));
+ }
+ if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
+ llvm::Value *DestPtr = ReturnValue.getValue();
+ bool DestIsVolatile = ReturnValue.isVolatile();
+
+ if (!DestPtr) {
+ DestPtr = CreateMemTemp(RetTy, "agg.tmp");
+ DestIsVolatile = false;
+ }
+ BuildAggStore(*this, CI, DestPtr, DestIsVolatile, false);
+ return RValue::getAggregate(DestPtr);
+ }
+
+ // If the argument doesn't match, perform a bitcast to coerce it. This
+ // can happen due to trivial type mismatches.
+ llvm::Value *V = CI;
+ if (V->getType() != RetIRTy)
+ V = Builder.CreateBitCast(V, RetIRTy);
+ return RValue::get(V);
+ }
+
+ llvm::Value *DestPtr = ReturnValue.getValue();
+ bool DestIsVolatile = ReturnValue.isVolatile();
+
+ if (!DestPtr) {
+ DestPtr = CreateMemTemp(RetTy, "coerce");
+ DestIsVolatile = false;
+ }
+
+ // If the value is offset in memory, apply the offset now.
+ llvm::Value *StorePtr = DestPtr;
+ if (unsigned Offs = RetAI.getDirectOffset()) {
+ StorePtr = Builder.CreateBitCast(StorePtr, Builder.getInt8PtrTy());
+ StorePtr = Builder.CreateConstGEP1_32(StorePtr, Offs);
+ StorePtr = Builder.CreateBitCast(StorePtr,
+ llvm::PointerType::getUnqual(RetAI.getCoerceToType()));
+ }
+ CreateCoercedStore(CI, StorePtr, DestIsVolatile, *this);
+
+ unsigned Alignment = getContext().getTypeAlignInChars(RetTy).getQuantity();
+ if (RetTy->isAnyComplexType())
+ return RValue::getComplex(LoadComplexFromAddr(DestPtr, false));
+ if (CodeGenFunction::hasAggregateLLVMType(RetTy))
+ return RValue::getAggregate(DestPtr);
+ return RValue::get(EmitLoadOfScalar(DestPtr, false, Alignment, RetTy));
+ }
+
+ case ABIArgInfo::Expand:
+ llvm_unreachable("Invalid ABI kind for return argument");
+ }
+
+ llvm_unreachable("Unhandled ABIArgInfo::Kind");
+}
+
+/* VarArg handling */
+
+llvm::Value *CodeGenFunction::EmitVAArg(llvm::Value *VAListAddr, QualType Ty) {
+ return CGM.getTypes().getABIInfo().EmitVAArg(VAListAddr, Ty, *this);
+}
diff --git a/clang/lib/CodeGen/CGCall.h b/clang/lib/CodeGen/CGCall.h
new file mode 100644
index 0000000..dead7bd
--- /dev/null
+++ b/clang/lib/CodeGen/CGCall.h
@@ -0,0 +1,306 @@
+//===----- CGCall.h - Encapsulate calling convention details ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// These classes wrap the information about a call or function
+// definition used to handle ABI compliancy.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_CODEGEN_CGCALL_H
+#define CLANG_CODEGEN_CGCALL_H
+
+#include "llvm/ADT/FoldingSet.h"
+#include "llvm/Value.h"
+#include "clang/AST/Type.h"
+#include "clang/AST/CanonicalType.h"
+
+#include "CGValue.h"
+
+// FIXME: Restructure so we don't have to expose so much stuff.
+#include "ABIInfo.h"
+
+namespace llvm {
+ struct AttributeWithIndex;
+ class Function;
+ class Type;
+ class Value;
+
+ template<typename T, unsigned> class SmallVector;
+}
+
+namespace clang {
+ class ASTContext;
+ class Decl;
+ class FunctionDecl;
+ class ObjCMethodDecl;
+ class VarDecl;
+
+namespace CodeGen {
+ typedef SmallVector<llvm::AttributeWithIndex, 8> AttributeListType;
+
+ struct CallArg {
+ RValue RV;
+ QualType Ty;
+ bool NeedsCopy;
+ CallArg(RValue rv, QualType ty, bool needscopy)
+ : RV(rv), Ty(ty), NeedsCopy(needscopy)
+ { }
+ };
+
+ /// CallArgList - Type for representing both the value and type of
+ /// arguments in a call.
+ class CallArgList :
+ public SmallVector<CallArg, 16> {
+ public:
+ struct Writeback {
+ /// The original argument.
+ llvm::Value *Address;
+
+ /// The pointee type of the original argument.
+ QualType AddressType;
+
+ /// The temporary alloca.
+ llvm::Value *Temporary;
+ };
+
+ void add(RValue rvalue, QualType type, bool needscopy = false) {
+ push_back(CallArg(rvalue, type, needscopy));
+ }
+
+ void addFrom(const CallArgList &other) {
+ insert(end(), other.begin(), other.end());
+ Writebacks.insert(Writebacks.end(),
+ other.Writebacks.begin(), other.Writebacks.end());
+ }
+
+ void addWriteback(llvm::Value *address, QualType addressType,
+ llvm::Value *temporary) {
+ Writeback writeback;
+ writeback.Address = address;
+ writeback.AddressType = addressType;
+ writeback.Temporary = temporary;
+ Writebacks.push_back(writeback);
+ }
+
+ bool hasWritebacks() const { return !Writebacks.empty(); }
+
+ typedef SmallVectorImpl<Writeback>::const_iterator writeback_iterator;
+ writeback_iterator writeback_begin() const { return Writebacks.begin(); }
+ writeback_iterator writeback_end() const { return Writebacks.end(); }
+
+ private:
+ SmallVector<Writeback, 1> Writebacks;
+ };
+
+ /// A class for recording the number of arguments that a function
+ /// signature requires.
+ class RequiredArgs {
+ /// The number of required arguments, or ~0 if the signature does
+ /// not permit optional arguments.
+ unsigned NumRequired;
+ public:
+ enum All_t { All };
+
+ RequiredArgs(All_t _) : NumRequired(~0U) {}
+ explicit RequiredArgs(unsigned n) : NumRequired(n) {
+ assert(n != ~0U);
+ }
+
+ /// Compute the arguments required by the given formal prototype,
+ /// given that there may be some additional, non-formal arguments
+ /// in play.
+ static RequiredArgs forPrototypePlus(const FunctionProtoType *prototype,
+ unsigned additional) {
+ if (!prototype->isVariadic()) return All;
+ return RequiredArgs(prototype->getNumArgs() + additional);
+ }
+
+ static RequiredArgs forPrototype(const FunctionProtoType *prototype) {
+ return forPrototypePlus(prototype, 0);
+ }
+
+ static RequiredArgs forPrototype(CanQual<FunctionProtoType> prototype) {
+ return forPrototype(prototype.getTypePtr());
+ }
+
+ static RequiredArgs forPrototypePlus(CanQual<FunctionProtoType> prototype,
+ unsigned additional) {
+ return forPrototypePlus(prototype.getTypePtr(), additional);
+ }
+
+ bool allowsOptionalArgs() const { return NumRequired != ~0U; }
+ bool getNumRequiredArgs() const {
+ assert(allowsOptionalArgs());
+ return NumRequired;
+ }
+
+ unsigned getOpaqueData() const { return NumRequired; }
+ static RequiredArgs getFromOpaqueData(unsigned value) {
+ if (value == ~0U) return All;
+ return RequiredArgs(value);
+ }
+ };
+
+ /// FunctionArgList - Type for representing both the decl and type
+ /// of parameters to a function. The decl must be either a
+ /// ParmVarDecl or ImplicitParamDecl.
+ class FunctionArgList : public SmallVector<const VarDecl*, 16> {
+ };
+
+ /// CGFunctionInfo - Class to encapsulate the information about a
+ /// function definition.
+ class CGFunctionInfo : public llvm::FoldingSetNode {
+ struct ArgInfo {
+ CanQualType type;
+ ABIArgInfo info;
+ };
+
+ /// The LLVM::CallingConv to use for this function (as specified by the
+ /// user).
+ unsigned CallingConvention : 8;
+
+ /// The LLVM::CallingConv to actually use for this function, which may
+ /// depend on the ABI.
+ unsigned EffectiveCallingConvention : 8;
+
+ /// The clang::CallingConv that this was originally created with.
+ unsigned ASTCallingConvention : 8;
+
+ /// Whether this function is noreturn.
+ unsigned NoReturn : 1;
+
+ /// Whether this function is returns-retained.
+ unsigned ReturnsRetained : 1;
+
+ /// How many arguments to pass inreg.
+ unsigned HasRegParm : 1;
+ unsigned RegParm : 4;
+
+ RequiredArgs Required;
+
+ unsigned NumArgs;
+ ArgInfo *getArgsBuffer() {
+ return reinterpret_cast<ArgInfo*>(this+1);
+ }
+ const ArgInfo *getArgsBuffer() const {
+ return reinterpret_cast<const ArgInfo*>(this + 1);
+ }
+
+ CGFunctionInfo() : Required(RequiredArgs::All) {}
+
+ public:
+ static CGFunctionInfo *create(unsigned llvmCC,
+ const FunctionType::ExtInfo &extInfo,
+ CanQualType resultType,
+ ArrayRef<CanQualType> argTypes,
+ RequiredArgs required);
+
+ typedef const ArgInfo *const_arg_iterator;
+ typedef ArgInfo *arg_iterator;
+
+ const_arg_iterator arg_begin() const { return getArgsBuffer() + 1; }
+ const_arg_iterator arg_end() const { return getArgsBuffer() + 1 + NumArgs; }
+ arg_iterator arg_begin() { return getArgsBuffer() + 1; }
+ arg_iterator arg_end() { return getArgsBuffer() + 1 + NumArgs; }
+
+ unsigned arg_size() const { return NumArgs; }
+
+ bool isVariadic() const { return Required.allowsOptionalArgs(); }
+ RequiredArgs getRequiredArgs() const { return Required; }
+
+ bool isNoReturn() const { return NoReturn; }
+
+ /// In ARC, whether this function retains its return value. This
+ /// is not always reliable for call sites.
+ bool isReturnsRetained() const { return ReturnsRetained; }
+
+ /// getASTCallingConvention() - Return the AST-specified calling
+ /// convention.
+ CallingConv getASTCallingConvention() const {
+ return CallingConv(ASTCallingConvention);
+ }
+
+ /// getCallingConvention - Return the user specified calling
+ /// convention, which has been translated into an LLVM CC.
+ unsigned getCallingConvention() const { return CallingConvention; }
+
+ /// getEffectiveCallingConvention - Return the actual calling convention to
+ /// use, which may depend on the ABI.
+ unsigned getEffectiveCallingConvention() const {
+ return EffectiveCallingConvention;
+ }
+ void setEffectiveCallingConvention(unsigned Value) {
+ EffectiveCallingConvention = Value;
+ }
+
+ bool getHasRegParm() const { return HasRegParm; }
+ unsigned getRegParm() const { return RegParm; }
+
+ FunctionType::ExtInfo getExtInfo() const {
+ return FunctionType::ExtInfo(isNoReturn(),
+ getHasRegParm(), getRegParm(),
+ getASTCallingConvention(),
+ isReturnsRetained());
+ }
+
+ CanQualType getReturnType() const { return getArgsBuffer()[0].type; }
+
+ ABIArgInfo &getReturnInfo() { return getArgsBuffer()[0].info; }
+ const ABIArgInfo &getReturnInfo() const { return getArgsBuffer()[0].info; }
+
+ void Profile(llvm::FoldingSetNodeID &ID) {
+ ID.AddInteger(getASTCallingConvention());
+ ID.AddBoolean(NoReturn);
+ ID.AddBoolean(ReturnsRetained);
+ ID.AddBoolean(HasRegParm);
+ ID.AddInteger(RegParm);
+ ID.AddInteger(Required.getOpaqueData());
+ getReturnType().Profile(ID);
+ for (arg_iterator it = arg_begin(), ie = arg_end(); it != ie; ++it)
+ it->type.Profile(ID);
+ }
+ static void Profile(llvm::FoldingSetNodeID &ID,
+ const FunctionType::ExtInfo &info,
+ RequiredArgs required,
+ CanQualType resultType,
+ ArrayRef<CanQualType> argTypes) {
+ ID.AddInteger(info.getCC());
+ ID.AddBoolean(info.getNoReturn());
+ ID.AddBoolean(info.getProducesResult());
+ ID.AddBoolean(info.getHasRegParm());
+ ID.AddInteger(info.getRegParm());
+ ID.AddInteger(required.getOpaqueData());
+ resultType.Profile(ID);
+ for (ArrayRef<CanQualType>::iterator
+ i = argTypes.begin(), e = argTypes.end(); i != e; ++i) {
+ i->Profile(ID);
+ }
+ }
+ };
+
+ /// ReturnValueSlot - Contains the address where the return value of a
+ /// function can be stored, and whether the address is volatile or not.
+ class ReturnValueSlot {
+ llvm::PointerIntPair<llvm::Value *, 1, bool> Value;
+
+ public:
+ ReturnValueSlot() {}
+ ReturnValueSlot(llvm::Value *Value, bool IsVolatile)
+ : Value(Value, IsVolatile) {}
+
+ bool isNull() const { return !getValue(); }
+
+ bool isVolatile() const { return Value.getInt(); }
+ llvm::Value *getValue() const { return Value.getPointer(); }
+ };
+
+} // end namespace CodeGen
+} // end namespace clang
+
+#endif
diff --git a/clang/lib/CodeGen/CGClass.cpp b/clang/lib/CodeGen/CGClass.cpp
new file mode 100644
index 0000000..2aedf95
--- /dev/null
+++ b/clang/lib/CodeGen/CGClass.cpp
@@ -0,0 +1,1841 @@
+//===--- CGClass.cpp - Emit LLVM Code for C++ classes ---------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code dealing with C++ code generation of classes
+//
+//===----------------------------------------------------------------------===//
+
+#include "CGBlocks.h"
+#include "CGDebugInfo.h"
+#include "CodeGenFunction.h"
+#include "clang/AST/CXXInheritance.h"
+#include "clang/AST/EvaluatedExprVisitor.h"
+#include "clang/AST/RecordLayout.h"
+#include "clang/AST/StmtCXX.h"
+#include "clang/Frontend/CodeGenOptions.h"
+
+using namespace clang;
+using namespace CodeGen;
+
+static CharUnits
+ComputeNonVirtualBaseClassOffset(ASTContext &Context,
+ const CXXRecordDecl *DerivedClass,
+ CastExpr::path_const_iterator Start,
+ CastExpr::path_const_iterator End) {
+ CharUnits Offset = CharUnits::Zero();
+
+ const CXXRecordDecl *RD = DerivedClass;
+
+ for (CastExpr::path_const_iterator I = Start; I != End; ++I) {
+ const CXXBaseSpecifier *Base = *I;
+ assert(!Base->isVirtual() && "Should not see virtual bases here!");
+
+ // Get the layout.
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
+
+ const CXXRecordDecl *BaseDecl =
+ cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl());
+
+ // Add the offset.
+ Offset += Layout.getBaseClassOffset(BaseDecl);
+
+ RD = BaseDecl;
+ }
+
+ return Offset;
+}
+
+llvm::Constant *
+CodeGenModule::GetNonVirtualBaseClassOffset(const CXXRecordDecl *ClassDecl,
+ CastExpr::path_const_iterator PathBegin,
+ CastExpr::path_const_iterator PathEnd) {
+ assert(PathBegin != PathEnd && "Base path should not be empty!");
+
+ CharUnits Offset =
+ ComputeNonVirtualBaseClassOffset(getContext(), ClassDecl,
+ PathBegin, PathEnd);
+ if (Offset.isZero())
+ return 0;
+
+ llvm::Type *PtrDiffTy =
+ Types.ConvertType(getContext().getPointerDiffType());
+
+ return llvm::ConstantInt::get(PtrDiffTy, Offset.getQuantity());
+}
+
+/// Gets the address of a direct base class within a complete object.
+/// This should only be used for (1) non-virtual bases or (2) virtual bases
+/// when the type is known to be complete (e.g. in complete destructors).
+///
+/// The object pointed to by 'This' is assumed to be non-null.
+llvm::Value *
+CodeGenFunction::GetAddressOfDirectBaseInCompleteClass(llvm::Value *This,
+ const CXXRecordDecl *Derived,
+ const CXXRecordDecl *Base,
+ bool BaseIsVirtual) {
+ // 'this' must be a pointer (in some address space) to Derived.
+ assert(This->getType()->isPointerTy() &&
+ cast<llvm::PointerType>(This->getType())->getElementType()
+ == ConvertType(Derived));
+
+ // Compute the offset of the virtual base.
+ CharUnits Offset;
+ const ASTRecordLayout &Layout = getContext().getASTRecordLayout(Derived);
+ if (BaseIsVirtual)
+ Offset = Layout.getVBaseClassOffset(Base);
+ else
+ Offset = Layout.getBaseClassOffset(Base);
+
+ // Shift and cast down to the base type.
+ // TODO: for complete types, this should be possible with a GEP.
+ llvm::Value *V = This;
+ if (Offset.isPositive()) {
+ V = Builder.CreateBitCast(V, Int8PtrTy);
+ V = Builder.CreateConstInBoundsGEP1_64(V, Offset.getQuantity());
+ }
+ V = Builder.CreateBitCast(V, ConvertType(Base)->getPointerTo());
+
+ return V;
+}
+
+static llvm::Value *
+ApplyNonVirtualAndVirtualOffset(CodeGenFunction &CGF, llvm::Value *ThisPtr,
+ CharUnits NonVirtual, llvm::Value *Virtual) {
+ llvm::Type *PtrDiffTy =
+ CGF.ConvertType(CGF.getContext().getPointerDiffType());
+
+ llvm::Value *NonVirtualOffset = 0;
+ if (!NonVirtual.isZero())
+ NonVirtualOffset = llvm::ConstantInt::get(PtrDiffTy,
+ NonVirtual.getQuantity());
+
+ llvm::Value *BaseOffset;
+ if (Virtual) {
+ if (NonVirtualOffset)
+ BaseOffset = CGF.Builder.CreateAdd(Virtual, NonVirtualOffset);
+ else
+ BaseOffset = Virtual;
+ } else
+ BaseOffset = NonVirtualOffset;
+
+ // Apply the base offset.
+ ThisPtr = CGF.Builder.CreateBitCast(ThisPtr, CGF.Int8PtrTy);
+ ThisPtr = CGF.Builder.CreateGEP(ThisPtr, BaseOffset, "add.ptr");
+
+ return ThisPtr;
+}
+
+llvm::Value *
+CodeGenFunction::GetAddressOfBaseClass(llvm::Value *Value,
+ const CXXRecordDecl *Derived,
+ CastExpr::path_const_iterator PathBegin,
+ CastExpr::path_const_iterator PathEnd,
+ bool NullCheckValue) {
+ assert(PathBegin != PathEnd && "Base path should not be empty!");
+
+ CastExpr::path_const_iterator Start = PathBegin;
+ const CXXRecordDecl *VBase = 0;
+
+ // Get the virtual base.
+ if ((*Start)->isVirtual()) {
+ VBase =
+ cast<CXXRecordDecl>((*Start)->getType()->getAs<RecordType>()->getDecl());
+ ++Start;
+ }
+
+ CharUnits NonVirtualOffset =
+ ComputeNonVirtualBaseClassOffset(getContext(), VBase ? VBase : Derived,
+ Start, PathEnd);
+
+ // Get the base pointer type.
+ llvm::Type *BasePtrTy =
+ ConvertType((PathEnd[-1])->getType())->getPointerTo();
+
+ if (NonVirtualOffset.isZero() && !VBase) {
+ // Just cast back.
+ return Builder.CreateBitCast(Value, BasePtrTy);
+ }
+
+ llvm::BasicBlock *CastNull = 0;
+ llvm::BasicBlock *CastNotNull = 0;
+ llvm::BasicBlock *CastEnd = 0;
+
+ if (NullCheckValue) {
+ CastNull = createBasicBlock("cast.null");
+ CastNotNull = createBasicBlock("cast.notnull");
+ CastEnd = createBasicBlock("cast.end");
+
+ llvm::Value *IsNull = Builder.CreateIsNull(Value);
+ Builder.CreateCondBr(IsNull, CastNull, CastNotNull);
+ EmitBlock(CastNotNull);
+ }
+
+ llvm::Value *VirtualOffset = 0;
+
+ if (VBase) {
+ if (Derived->hasAttr<FinalAttr>()) {
+ VirtualOffset = 0;
+
+ const ASTRecordLayout &Layout = getContext().getASTRecordLayout(Derived);
+
+ CharUnits VBaseOffset = Layout.getVBaseClassOffset(VBase);
+ NonVirtualOffset += VBaseOffset;
+ } else
+ VirtualOffset = GetVirtualBaseClassOffset(Value, Derived, VBase);
+ }
+
+ // Apply the offsets.
+ Value = ApplyNonVirtualAndVirtualOffset(*this, Value,
+ NonVirtualOffset,
+ VirtualOffset);
+
+ // Cast back.
+ Value = Builder.CreateBitCast(Value, BasePtrTy);
+
+ if (NullCheckValue) {
+ Builder.CreateBr(CastEnd);
+ EmitBlock(CastNull);
+ Builder.CreateBr(CastEnd);
+ EmitBlock(CastEnd);
+
+ llvm::PHINode *PHI = Builder.CreatePHI(Value->getType(), 2);
+ PHI->addIncoming(Value, CastNotNull);
+ PHI->addIncoming(llvm::Constant::getNullValue(Value->getType()),
+ CastNull);
+ Value = PHI;
+ }
+
+ return Value;
+}
+
+llvm::Value *
+CodeGenFunction::GetAddressOfDerivedClass(llvm::Value *Value,
+ const CXXRecordDecl *Derived,
+ CastExpr::path_const_iterator PathBegin,
+ CastExpr::path_const_iterator PathEnd,
+ bool NullCheckValue) {
+ assert(PathBegin != PathEnd && "Base path should not be empty!");
+
+ QualType DerivedTy =
+ getContext().getCanonicalType(getContext().getTagDeclType(Derived));
+ llvm::Type *DerivedPtrTy = ConvertType(DerivedTy)->getPointerTo();
+
+ llvm::Value *NonVirtualOffset =
+ CGM.GetNonVirtualBaseClassOffset(Derived, PathBegin, PathEnd);
+
+ if (!NonVirtualOffset) {
+ // No offset, we can just cast back.
+ return Builder.CreateBitCast(Value, DerivedPtrTy);
+ }
+
+ llvm::BasicBlock *CastNull = 0;
+ llvm::BasicBlock *CastNotNull = 0;
+ llvm::BasicBlock *CastEnd = 0;
+
+ if (NullCheckValue) {
+ CastNull = createBasicBlock("cast.null");
+ CastNotNull = createBasicBlock("cast.notnull");
+ CastEnd = createBasicBlock("cast.end");
+
+ llvm::Value *IsNull = Builder.CreateIsNull(Value);
+ Builder.CreateCondBr(IsNull, CastNull, CastNotNull);
+ EmitBlock(CastNotNull);
+ }
+
+ // Apply the offset.
+ Value = Builder.CreateBitCast(Value, Int8PtrTy);
+ Value = Builder.CreateGEP(Value, Builder.CreateNeg(NonVirtualOffset),
+ "sub.ptr");
+
+ // Just cast.
+ Value = Builder.CreateBitCast(Value, DerivedPtrTy);
+
+ if (NullCheckValue) {
+ Builder.CreateBr(CastEnd);
+ EmitBlock(CastNull);
+ Builder.CreateBr(CastEnd);
+ EmitBlock(CastEnd);
+
+ llvm::PHINode *PHI = Builder.CreatePHI(Value->getType(), 2);
+ PHI->addIncoming(Value, CastNotNull);
+ PHI->addIncoming(llvm::Constant::getNullValue(Value->getType()),
+ CastNull);
+ Value = PHI;
+ }
+
+ return Value;
+}
+
+/// GetVTTParameter - Return the VTT parameter that should be passed to a
+/// base constructor/destructor with virtual bases.
+static llvm::Value *GetVTTParameter(CodeGenFunction &CGF, GlobalDecl GD,
+ bool ForVirtualBase) {
+ if (!CodeGenVTables::needsVTTParameter(GD)) {
+ // This constructor/destructor does not need a VTT parameter.
+ return 0;
+ }
+
+ const CXXRecordDecl *RD = cast<CXXMethodDecl>(CGF.CurFuncDecl)->getParent();
+ const CXXRecordDecl *Base = cast<CXXMethodDecl>(GD.getDecl())->getParent();
+
+ llvm::Value *VTT;
+
+ uint64_t SubVTTIndex;
+
+ // If the record matches the base, this is the complete ctor/dtor
+ // variant calling the base variant in a class with virtual bases.
+ if (RD == Base) {
+ assert(!CodeGenVTables::needsVTTParameter(CGF.CurGD) &&
+ "doing no-op VTT offset in base dtor/ctor?");
+ assert(!ForVirtualBase && "Can't have same class as virtual base!");
+ SubVTTIndex = 0;
+ } else {
+ const ASTRecordLayout &Layout =
+ CGF.getContext().getASTRecordLayout(RD);
+ CharUnits BaseOffset = ForVirtualBase ?
+ Layout.getVBaseClassOffset(Base) :
+ Layout.getBaseClassOffset(Base);
+
+ SubVTTIndex =
+ CGF.CGM.getVTables().getSubVTTIndex(RD, BaseSubobject(Base, BaseOffset));
+ assert(SubVTTIndex != 0 && "Sub-VTT index must be greater than zero!");
+ }
+
+ if (CodeGenVTables::needsVTTParameter(CGF.CurGD)) {
+ // A VTT parameter was passed to the constructor, use it.
+ VTT = CGF.LoadCXXVTT();
+ VTT = CGF.Builder.CreateConstInBoundsGEP1_64(VTT, SubVTTIndex);
+ } else {
+ // We're the complete constructor, so get the VTT by name.
+ VTT = CGF.CGM.getVTables().GetAddrOfVTT(RD);
+ VTT = CGF.Builder.CreateConstInBoundsGEP2_64(VTT, 0, SubVTTIndex);
+ }
+
+ return VTT;
+}
+
+namespace {
+ /// Call the destructor for a direct base class.
+ struct CallBaseDtor : EHScopeStack::Cleanup {
+ const CXXRecordDecl *BaseClass;
+ bool BaseIsVirtual;
+ CallBaseDtor(const CXXRecordDecl *Base, bool BaseIsVirtual)
+ : BaseClass(Base), BaseIsVirtual(BaseIsVirtual) {}
+
+ void Emit(CodeGenFunction &CGF, Flags flags) {
+ const CXXRecordDecl *DerivedClass =
+ cast<CXXMethodDecl>(CGF.CurCodeDecl)->getParent();
+
+ const CXXDestructorDecl *D = BaseClass->getDestructor();
+ llvm::Value *Addr =
+ CGF.GetAddressOfDirectBaseInCompleteClass(CGF.LoadCXXThis(),
+ DerivedClass, BaseClass,
+ BaseIsVirtual);
+ CGF.EmitCXXDestructorCall(D, Dtor_Base, BaseIsVirtual, Addr);
+ }
+ };
+
+ /// A visitor which checks whether an initializer uses 'this' in a
+ /// way which requires the vtable to be properly set.
+ struct DynamicThisUseChecker : EvaluatedExprVisitor<DynamicThisUseChecker> {
+ typedef EvaluatedExprVisitor<DynamicThisUseChecker> super;
+
+ bool UsesThis;
+
+ DynamicThisUseChecker(ASTContext &C) : super(C), UsesThis(false) {}
+
+ // Black-list all explicit and implicit references to 'this'.
+ //
+ // Do we need to worry about external references to 'this' derived
+ // from arbitrary code? If so, then anything which runs arbitrary
+ // external code might potentially access the vtable.
+ void VisitCXXThisExpr(CXXThisExpr *E) { UsesThis = true; }
+ };
+}
+
+static bool BaseInitializerUsesThis(ASTContext &C, const Expr *Init) {
+ DynamicThisUseChecker Checker(C);
+ Checker.Visit(const_cast<Expr*>(Init));
+ return Checker.UsesThis;
+}
+
+static void EmitBaseInitializer(CodeGenFunction &CGF,
+ const CXXRecordDecl *ClassDecl,
+ CXXCtorInitializer *BaseInit,
+ CXXCtorType CtorType) {
+ assert(BaseInit->isBaseInitializer() &&
+ "Must have base initializer!");
+
+ llvm::Value *ThisPtr = CGF.LoadCXXThis();
+
+ const Type *BaseType = BaseInit->getBaseClass();
+ CXXRecordDecl *BaseClassDecl =
+ cast<CXXRecordDecl>(BaseType->getAs<RecordType>()->getDecl());
+
+ bool isBaseVirtual = BaseInit->isBaseVirtual();
+
+ // The base constructor doesn't construct virtual bases.
+ if (CtorType == Ctor_Base && isBaseVirtual)
+ return;
+
+ // If the initializer for the base (other than the constructor
+ // itself) accesses 'this' in any way, we need to initialize the
+ // vtables.
+ if (BaseInitializerUsesThis(CGF.getContext(), BaseInit->getInit()))
+ CGF.InitializeVTablePointers(ClassDecl);
+
+ // We can pretend to be a complete class because it only matters for
+ // virtual bases, and we only do virtual bases for complete ctors.
+ llvm::Value *V =
+ CGF.GetAddressOfDirectBaseInCompleteClass(ThisPtr, ClassDecl,
+ BaseClassDecl,
+ isBaseVirtual);
+ CharUnits Alignment = CGF.getContext().getTypeAlignInChars(BaseType);
+ AggValueSlot AggSlot =
+ AggValueSlot::forAddr(V, Alignment, Qualifiers(),
+ AggValueSlot::IsDestructed,
+ AggValueSlot::DoesNotNeedGCBarriers,
+ AggValueSlot::IsNotAliased);
+
+ CGF.EmitAggExpr(BaseInit->getInit(), AggSlot);
+
+ if (CGF.CGM.getLangOpts().Exceptions &&
+ !BaseClassDecl->hasTrivialDestructor())
+ CGF.EHStack.pushCleanup<CallBaseDtor>(EHCleanup, BaseClassDecl,
+ isBaseVirtual);
+}
+
+static void EmitAggMemberInitializer(CodeGenFunction &CGF,
+ LValue LHS,
+ Expr *Init,
+ llvm::Value *ArrayIndexVar,
+ QualType T,
+ ArrayRef<VarDecl *> ArrayIndexes,
+ unsigned Index) {
+ if (Index == ArrayIndexes.size()) {
+ LValue LV = LHS;
+ { // Scope for Cleanups.
+ CodeGenFunction::RunCleanupsScope Cleanups(CGF);
+
+ if (ArrayIndexVar) {
+ // If we have an array index variable, load it and use it as an offset.
+ // Then, increment the value.
+ llvm::Value *Dest = LHS.getAddress();
+ llvm::Value *ArrayIndex = CGF.Builder.CreateLoad(ArrayIndexVar);
+ Dest = CGF.Builder.CreateInBoundsGEP(Dest, ArrayIndex, "destaddress");
+ llvm::Value *Next = llvm::ConstantInt::get(ArrayIndex->getType(), 1);
+ Next = CGF.Builder.CreateAdd(ArrayIndex, Next, "inc");
+ CGF.Builder.CreateStore(Next, ArrayIndexVar);
+
+ // Update the LValue.
+ LV.setAddress(Dest);
+ CharUnits Align = CGF.getContext().getTypeAlignInChars(T);
+ LV.setAlignment(std::min(Align, LV.getAlignment()));
+ }
+
+ if (!CGF.hasAggregateLLVMType(T)) {
+ CGF.EmitScalarInit(Init, /*decl*/ 0, LV, false);
+ } else if (T->isAnyComplexType()) {
+ CGF.EmitComplexExprIntoAddr(Init, LV.getAddress(),
+ LV.isVolatileQualified());
+ } else {
+ AggValueSlot Slot =
+ AggValueSlot::forLValue(LV,
+ AggValueSlot::IsDestructed,
+ AggValueSlot::DoesNotNeedGCBarriers,
+ AggValueSlot::IsNotAliased);
+
+ CGF.EmitAggExpr(Init, Slot);
+ }
+ }
+
+ // Now, outside of the initializer cleanup scope, destroy the backing array
+ // for a std::initializer_list member.
+ CGF.MaybeEmitStdInitializerListCleanup(LV.getAddress(), Init);
+
+ return;
+ }
+
+ const ConstantArrayType *Array = CGF.getContext().getAsConstantArrayType(T);
+ assert(Array && "Array initialization without the array type?");
+ llvm::Value *IndexVar
+ = CGF.GetAddrOfLocalVar(ArrayIndexes[Index]);
+ assert(IndexVar && "Array index variable not loaded");
+
+ // Initialize this index variable to zero.
+ llvm::Value* Zero
+ = llvm::Constant::getNullValue(
+ CGF.ConvertType(CGF.getContext().getSizeType()));
+ CGF.Builder.CreateStore(Zero, IndexVar);
+
+ // Start the loop with a block that tests the condition.
+ llvm::BasicBlock *CondBlock = CGF.createBasicBlock("for.cond");
+ llvm::BasicBlock *AfterFor = CGF.createBasicBlock("for.end");
+
+ CGF.EmitBlock(CondBlock);
+
+ llvm::BasicBlock *ForBody = CGF.createBasicBlock("for.body");
+ // Generate: if (loop-index < number-of-elements) fall to the loop body,
+ // otherwise, go to the block after the for-loop.
+ uint64_t NumElements = Array->getSize().getZExtValue();
+ llvm::Value *Counter = CGF.Builder.CreateLoad(IndexVar);
+ llvm::Value *NumElementsPtr =
+ llvm::ConstantInt::get(Counter->getType(), NumElements);
+ llvm::Value *IsLess = CGF.Builder.CreateICmpULT(Counter, NumElementsPtr,
+ "isless");
+
+ // If the condition is true, execute the body.
+ CGF.Builder.CreateCondBr(IsLess, ForBody, AfterFor);
+
+ CGF.EmitBlock(ForBody);
+ llvm::BasicBlock *ContinueBlock = CGF.createBasicBlock("for.inc");
+
+ {
+ CodeGenFunction::RunCleanupsScope Cleanups(CGF);
+
+ // Inside the loop body recurse to emit the inner loop or, eventually, the
+ // constructor call.
+ EmitAggMemberInitializer(CGF, LHS, Init, ArrayIndexVar,
+ Array->getElementType(), ArrayIndexes, Index + 1);
+ }
+
+ CGF.EmitBlock(ContinueBlock);
+
+ // Emit the increment of the loop counter.
+ llvm::Value *NextVal = llvm::ConstantInt::get(Counter->getType(), 1);
+ Counter = CGF.Builder.CreateLoad(IndexVar);
+ NextVal = CGF.Builder.CreateAdd(Counter, NextVal, "inc");
+ CGF.Builder.CreateStore(NextVal, IndexVar);
+
+ // Finally, branch back up to the condition for the next iteration.
+ CGF.EmitBranch(CondBlock);
+
+ // Emit the fall-through block.
+ CGF.EmitBlock(AfterFor, true);
+}
+
+namespace {
+ struct CallMemberDtor : EHScopeStack::Cleanup {
+ llvm::Value *V;
+ CXXDestructorDecl *Dtor;
+
+ CallMemberDtor(llvm::Value *V, CXXDestructorDecl *Dtor)
+ : V(V), Dtor(Dtor) {}
+
+ void Emit(CodeGenFunction &CGF, Flags flags) {
+ CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete, /*ForVirtualBase=*/false,
+ V);
+ }
+ };
+}
+
+static bool hasTrivialCopyOrMoveConstructor(const CXXRecordDecl *Record,
+ bool Moving) {
+ return Moving ? Record->hasTrivialMoveConstructor() :
+ Record->hasTrivialCopyConstructor();
+}
+
+static void EmitMemberInitializer(CodeGenFunction &CGF,
+ const CXXRecordDecl *ClassDecl,
+ CXXCtorInitializer *MemberInit,
+ const CXXConstructorDecl *Constructor,
+ FunctionArgList &Args) {
+ assert(MemberInit->isAnyMemberInitializer() &&
+ "Must have member initializer!");
+ assert(MemberInit->getInit() && "Must have initializer!");
+
+ // non-static data member initializers.
+ FieldDecl *Field = MemberInit->getAnyMember();
+ QualType FieldType = Field->getType();
+
+ llvm::Value *ThisPtr = CGF.LoadCXXThis();
+ QualType RecordTy = CGF.getContext().getTypeDeclType(ClassDecl);
+ LValue LHS;
+
+ // If we are initializing an anonymous union field, drill down to the field.
+ if (MemberInit->isIndirectMemberInitializer()) {
+ LHS = CGF.EmitLValueForAnonRecordField(ThisPtr,
+ MemberInit->getIndirectMember(), 0);
+ FieldType = MemberInit->getIndirectMember()->getAnonField()->getType();
+ } else {
+ LValue ThisLHSLV = CGF.MakeNaturalAlignAddrLValue(ThisPtr, RecordTy);
+ LHS = CGF.EmitLValueForFieldInitialization(ThisLHSLV, Field);
+ }
+
+ // Special case: if we are in a copy or move constructor, and we are copying
+ // an array of PODs or classes with trivial copy constructors, ignore the
+ // AST and perform the copy we know is equivalent.
+ // FIXME: This is hacky at best... if we had a bit more explicit information
+ // in the AST, we could generalize it more easily.
+ const ConstantArrayType *Array
+ = CGF.getContext().getAsConstantArrayType(FieldType);
+ if (Array && Constructor->isImplicitlyDefined() &&
+ Constructor->isCopyOrMoveConstructor()) {
+ QualType BaseElementTy = CGF.getContext().getBaseElementType(Array);
+ const CXXRecordDecl *Record = BaseElementTy->getAsCXXRecordDecl();
+ if (BaseElementTy.isPODType(CGF.getContext()) ||
+ (Record && hasTrivialCopyOrMoveConstructor(Record,
+ Constructor->isMoveConstructor()))) {
+ // Find the source pointer. We knows it's the last argument because
+ // we know we're in a copy constructor.
+ unsigned SrcArgIndex = Args.size() - 1;
+ llvm::Value *SrcPtr
+ = CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(Args[SrcArgIndex]));
+ LValue ThisRHSLV = CGF.MakeNaturalAlignAddrLValue(SrcPtr, RecordTy);
+ LValue Src = CGF.EmitLValueForFieldInitialization(ThisRHSLV, Field);
+
+ // Copy the aggregate.
+ CGF.EmitAggregateCopy(LHS.getAddress(), Src.getAddress(), FieldType,
+ LHS.isVolatileQualified());
+ return;
+ }
+ }
+
+ ArrayRef<VarDecl *> ArrayIndexes;
+ if (MemberInit->getNumArrayIndices())
+ ArrayIndexes = MemberInit->getArrayIndexes();
+ CGF.EmitInitializerForField(Field, LHS, MemberInit->getInit(), ArrayIndexes);
+}
+
+void CodeGenFunction::EmitInitializerForField(FieldDecl *Field,
+ LValue LHS, Expr *Init,
+ ArrayRef<VarDecl *> ArrayIndexes) {
+ QualType FieldType = Field->getType();
+ if (!hasAggregateLLVMType(FieldType)) {
+ if (LHS.isSimple()) {
+ EmitExprAsInit(Init, Field, LHS, false);
+ } else {
+ RValue RHS = RValue::get(EmitScalarExpr(Init));
+ EmitStoreThroughLValue(RHS, LHS);
+ }
+ } else if (FieldType->isAnyComplexType()) {
+ EmitComplexExprIntoAddr(Init, LHS.getAddress(), LHS.isVolatileQualified());
+ } else {
+ llvm::Value *ArrayIndexVar = 0;
+ if (ArrayIndexes.size()) {
+ llvm::Type *SizeTy = ConvertType(getContext().getSizeType());
+
+ // The LHS is a pointer to the first object we'll be constructing, as
+ // a flat array.
+ QualType BaseElementTy = getContext().getBaseElementType(FieldType);
+ llvm::Type *BasePtr = ConvertType(BaseElementTy);
+ BasePtr = llvm::PointerType::getUnqual(BasePtr);
+ llvm::Value *BaseAddrPtr = Builder.CreateBitCast(LHS.getAddress(),
+ BasePtr);
+ LHS = MakeAddrLValue(BaseAddrPtr, BaseElementTy);
+
+ // Create an array index that will be used to walk over all of the
+ // objects we're constructing.
+ ArrayIndexVar = CreateTempAlloca(SizeTy, "object.index");
+ llvm::Value *Zero = llvm::Constant::getNullValue(SizeTy);
+ Builder.CreateStore(Zero, ArrayIndexVar);
+
+
+ // Emit the block variables for the array indices, if any.
+ for (unsigned I = 0, N = ArrayIndexes.size(); I != N; ++I)
+ EmitAutoVarDecl(*ArrayIndexes[I]);
+ }
+
+ EmitAggMemberInitializer(*this, LHS, Init, ArrayIndexVar, FieldType,
+ ArrayIndexes, 0);
+
+ if (!CGM.getLangOpts().Exceptions)
+ return;
+
+ // FIXME: If we have an array of classes w/ non-trivial destructors,
+ // we need to destroy in reverse order of construction along the exception
+ // path.
+ const RecordType *RT = FieldType->getAs<RecordType>();
+ if (!RT)
+ return;
+
+ CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
+ if (!RD->hasTrivialDestructor())
+ EHStack.pushCleanup<CallMemberDtor>(EHCleanup, LHS.getAddress(),
+ RD->getDestructor());
+ }
+}
+
+/// Checks whether the given constructor is a valid subject for the
+/// complete-to-base constructor delegation optimization, i.e.
+/// emitting the complete constructor as a simple call to the base
+/// constructor.
+static bool IsConstructorDelegationValid(const CXXConstructorDecl *Ctor) {
+
+ // Currently we disable the optimization for classes with virtual
+ // bases because (1) the addresses of parameter variables need to be
+ // consistent across all initializers but (2) the delegate function
+ // call necessarily creates a second copy of the parameter variable.
+ //
+ // The limiting example (purely theoretical AFAIK):
+ // struct A { A(int &c) { c++; } };
+ // struct B : virtual A {
+ // B(int count) : A(count) { printf("%d\n", count); }
+ // };
+ // ...although even this example could in principle be emitted as a
+ // delegation since the address of the parameter doesn't escape.
+ if (Ctor->getParent()->getNumVBases()) {
+ // TODO: white-list trivial vbase initializers. This case wouldn't
+ // be subject to the restrictions below.
+
+ // TODO: white-list cases where:
+ // - there are no non-reference parameters to the constructor
+ // - the initializers don't access any non-reference parameters
+ // - the initializers don't take the address of non-reference
+ // parameters
+ // - etc.
+ // If we ever add any of the above cases, remember that:
+ // - function-try-blocks will always blacklist this optimization
+ // - we need to perform the constructor prologue and cleanup in
+ // EmitConstructorBody.
+
+ return false;
+ }
+
+ // We also disable the optimization for variadic functions because
+ // it's impossible to "re-pass" varargs.
+ if (Ctor->getType()->getAs<FunctionProtoType>()->isVariadic())
+ return false;
+
+ // FIXME: Decide if we can do a delegation of a delegating constructor.
+ if (Ctor->isDelegatingConstructor())
+ return false;
+
+ return true;
+}
+
+/// EmitConstructorBody - Emits the body of the current constructor.
+void CodeGenFunction::EmitConstructorBody(FunctionArgList &Args) {
+ const CXXConstructorDecl *Ctor = cast<CXXConstructorDecl>(CurGD.getDecl());
+ CXXCtorType CtorType = CurGD.getCtorType();
+
+ // Before we go any further, try the complete->base constructor
+ // delegation optimization.
+ if (CtorType == Ctor_Complete && IsConstructorDelegationValid(Ctor)) {
+ if (CGDebugInfo *DI = getDebugInfo())
+ DI->EmitLocation(Builder, Ctor->getLocEnd());
+ EmitDelegateCXXConstructorCall(Ctor, Ctor_Base, Args);
+ return;
+ }
+
+ Stmt *Body = Ctor->getBody();
+
+ // Enter the function-try-block before the constructor prologue if
+ // applicable.
+ bool IsTryBody = (Body && isa<CXXTryStmt>(Body));
+ if (IsTryBody)
+ EnterCXXTryStmt(*cast<CXXTryStmt>(Body), true);
+
+ EHScopeStack::stable_iterator CleanupDepth = EHStack.stable_begin();
+
+ // TODO: in restricted cases, we can emit the vbase initializers of
+ // a complete ctor and then delegate to the base ctor.
+
+ // Emit the constructor prologue, i.e. the base and member
+ // initializers.
+ EmitCtorPrologue(Ctor, CtorType, Args);
+
+ // Emit the body of the statement.
+ if (IsTryBody)
+ EmitStmt(cast<CXXTryStmt>(Body)->getTryBlock());
+ else if (Body)
+ EmitStmt(Body);
+
+ // Emit any cleanup blocks associated with the member or base
+ // initializers, which includes (along the exceptional path) the
+ // destructors for those members and bases that were fully
+ // constructed.
+ PopCleanupBlocks(CleanupDepth);
+
+ if (IsTryBody)
+ ExitCXXTryStmt(*cast<CXXTryStmt>(Body), true);
+}
+
+/// EmitCtorPrologue - This routine generates necessary code to initialize
+/// base classes and non-static data members belonging to this constructor.
+void CodeGenFunction::EmitCtorPrologue(const CXXConstructorDecl *CD,
+ CXXCtorType CtorType,
+ FunctionArgList &Args) {
+ if (CD->isDelegatingConstructor())
+ return EmitDelegatingCXXConstructorCall(CD, Args);
+
+ const CXXRecordDecl *ClassDecl = CD->getParent();
+
+ SmallVector<CXXCtorInitializer *, 8> MemberInitializers;
+
+ for (CXXConstructorDecl::init_const_iterator B = CD->init_begin(),
+ E = CD->init_end();
+ B != E; ++B) {
+ CXXCtorInitializer *Member = (*B);
+
+ if (Member->isBaseInitializer()) {
+ EmitBaseInitializer(*this, ClassDecl, Member, CtorType);
+ } else {
+ assert(Member->isAnyMemberInitializer() &&
+ "Delegating initializer on non-delegating constructor");
+ MemberInitializers.push_back(Member);
+ }
+ }
+
+ InitializeVTablePointers(ClassDecl);
+
+ for (unsigned I = 0, E = MemberInitializers.size(); I != E; ++I)
+ EmitMemberInitializer(*this, ClassDecl, MemberInitializers[I], CD, Args);
+}
+
+static bool
+FieldHasTrivialDestructorBody(ASTContext &Context, const FieldDecl *Field);
+
+static bool
+HasTrivialDestructorBody(ASTContext &Context,
+ const CXXRecordDecl *BaseClassDecl,
+ const CXXRecordDecl *MostDerivedClassDecl)
+{
+ // If the destructor is trivial we don't have to check anything else.
+ if (BaseClassDecl->hasTrivialDestructor())
+ return true;
+
+ if (!BaseClassDecl->getDestructor()->hasTrivialBody())
+ return false;
+
+ // Check fields.
+ for (CXXRecordDecl::field_iterator I = BaseClassDecl->field_begin(),
+ E = BaseClassDecl->field_end(); I != E; ++I) {
+ const FieldDecl *Field = *I;
+
+ if (!FieldHasTrivialDestructorBody(Context, Field))
+ return false;
+ }
+
+ // Check non-virtual bases.
+ for (CXXRecordDecl::base_class_const_iterator I =
+ BaseClassDecl->bases_begin(), E = BaseClassDecl->bases_end();
+ I != E; ++I) {
+ if (I->isVirtual())
+ continue;
+
+ const CXXRecordDecl *NonVirtualBase =
+ cast<CXXRecordDecl>(I->getType()->castAs<RecordType>()->getDecl());
+ if (!HasTrivialDestructorBody(Context, NonVirtualBase,
+ MostDerivedClassDecl))
+ return false;
+ }
+
+ if (BaseClassDecl == MostDerivedClassDecl) {
+ // Check virtual bases.
+ for (CXXRecordDecl::base_class_const_iterator I =
+ BaseClassDecl->vbases_begin(), E = BaseClassDecl->vbases_end();
+ I != E; ++I) {
+ const CXXRecordDecl *VirtualBase =
+ cast<CXXRecordDecl>(I->getType()->castAs<RecordType>()->getDecl());
+ if (!HasTrivialDestructorBody(Context, VirtualBase,
+ MostDerivedClassDecl))
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static bool
+FieldHasTrivialDestructorBody(ASTContext &Context,
+ const FieldDecl *Field)
+{
+ QualType FieldBaseElementType = Context.getBaseElementType(Field->getType());
+
+ const RecordType *RT = FieldBaseElementType->getAs<RecordType>();
+ if (!RT)
+ return true;
+
+ CXXRecordDecl *FieldClassDecl = cast<CXXRecordDecl>(RT->getDecl());
+ return HasTrivialDestructorBody(Context, FieldClassDecl, FieldClassDecl);
+}
+
+/// CanSkipVTablePointerInitialization - Check whether we need to initialize
+/// any vtable pointers before calling this destructor.
+static bool CanSkipVTablePointerInitialization(ASTContext &Context,
+ const CXXDestructorDecl *Dtor) {
+ if (!Dtor->hasTrivialBody())
+ return false;
+
+ // Check the fields.
+ const CXXRecordDecl *ClassDecl = Dtor->getParent();
+ for (CXXRecordDecl::field_iterator I = ClassDecl->field_begin(),
+ E = ClassDecl->field_end(); I != E; ++I) {
+ const FieldDecl *Field = *I;
+
+ if (!FieldHasTrivialDestructorBody(Context, Field))
+ return false;
+ }
+
+ return true;
+}
+
+/// EmitDestructorBody - Emits the body of the current destructor.
+void CodeGenFunction::EmitDestructorBody(FunctionArgList &Args) {
+ const CXXDestructorDecl *Dtor = cast<CXXDestructorDecl>(CurGD.getDecl());
+ CXXDtorType DtorType = CurGD.getDtorType();
+
+ // The call to operator delete in a deleting destructor happens
+ // outside of the function-try-block, which means it's always
+ // possible to delegate the destructor body to the complete
+ // destructor. Do so.
+ if (DtorType == Dtor_Deleting) {
+ EnterDtorCleanups(Dtor, Dtor_Deleting);
+ EmitCXXDestructorCall(Dtor, Dtor_Complete, /*ForVirtualBase=*/false,
+ LoadCXXThis());
+ PopCleanupBlock();
+ return;
+ }
+
+ Stmt *Body = Dtor->getBody();
+
+ // If the body is a function-try-block, enter the try before
+ // anything else.
+ bool isTryBody = (Body && isa<CXXTryStmt>(Body));
+ if (isTryBody)
+ EnterCXXTryStmt(*cast<CXXTryStmt>(Body), true);
+
+ // Enter the epilogue cleanups.
+ RunCleanupsScope DtorEpilogue(*this);
+
+ // If this is the complete variant, just invoke the base variant;
+ // the epilogue will destruct the virtual bases. But we can't do
+ // this optimization if the body is a function-try-block, because
+ // we'd introduce *two* handler blocks.
+ switch (DtorType) {
+ case Dtor_Deleting: llvm_unreachable("already handled deleting case");
+
+ case Dtor_Complete:
+ // Enter the cleanup scopes for virtual bases.
+ EnterDtorCleanups(Dtor, Dtor_Complete);
+
+ if (!isTryBody) {
+ EmitCXXDestructorCall(Dtor, Dtor_Base, /*ForVirtualBase=*/false,
+ LoadCXXThis());
+ break;
+ }
+ // Fallthrough: act like we're in the base variant.
+
+ case Dtor_Base:
+ // Enter the cleanup scopes for fields and non-virtual bases.
+ EnterDtorCleanups(Dtor, Dtor_Base);
+
+ // Initialize the vtable pointers before entering the body.
+ if (!CanSkipVTablePointerInitialization(getContext(), Dtor))
+ InitializeVTablePointers(Dtor->getParent());
+
+ if (isTryBody)
+ EmitStmt(cast<CXXTryStmt>(Body)->getTryBlock());
+ else if (Body)
+ EmitStmt(Body);
+ else {
+ assert(Dtor->isImplicit() && "bodyless dtor not implicit");
+ // nothing to do besides what's in the epilogue
+ }
+ // -fapple-kext must inline any call to this dtor into
+ // the caller's body.
+ if (getContext().getLangOpts().AppleKext)
+ CurFn->addFnAttr(llvm::Attribute::AlwaysInline);
+ break;
+ }
+
+ // Jump out through the epilogue cleanups.
+ DtorEpilogue.ForceCleanup();
+
+ // Exit the try if applicable.
+ if (isTryBody)
+ ExitCXXTryStmt(*cast<CXXTryStmt>(Body), true);
+}
+
+namespace {
+ /// Call the operator delete associated with the current destructor.
+ struct CallDtorDelete : EHScopeStack::Cleanup {
+ CallDtorDelete() {}
+
+ void Emit(CodeGenFunction &CGF, Flags flags) {
+ const CXXDestructorDecl *Dtor = cast<CXXDestructorDecl>(CGF.CurCodeDecl);
+ const CXXRecordDecl *ClassDecl = Dtor->getParent();
+ CGF.EmitDeleteCall(Dtor->getOperatorDelete(), CGF.LoadCXXThis(),
+ CGF.getContext().getTagDeclType(ClassDecl));
+ }
+ };
+
+ class DestroyField : public EHScopeStack::Cleanup {
+ const FieldDecl *field;
+ CodeGenFunction::Destroyer *destroyer;
+ bool useEHCleanupForArray;
+
+ public:
+ DestroyField(const FieldDecl *field, CodeGenFunction::Destroyer *destroyer,
+ bool useEHCleanupForArray)
+ : field(field), destroyer(destroyer),
+ useEHCleanupForArray(useEHCleanupForArray) {}
+
+ void Emit(CodeGenFunction &CGF, Flags flags) {
+ // Find the address of the field.
+ llvm::Value *thisValue = CGF.LoadCXXThis();
+ QualType RecordTy = CGF.getContext().getTagDeclType(field->getParent());
+ LValue ThisLV = CGF.MakeAddrLValue(thisValue, RecordTy);
+ LValue LV = CGF.EmitLValueForField(ThisLV, field);
+ assert(LV.isSimple());
+
+ CGF.emitDestroy(LV.getAddress(), field->getType(), destroyer,
+ flags.isForNormalCleanup() && useEHCleanupForArray);
+ }
+ };
+}
+
+/// EmitDtorEpilogue - Emit all code that comes at the end of class's
+/// destructor. This is to call destructors on members and base classes
+/// in reverse order of their construction.
+void CodeGenFunction::EnterDtorCleanups(const CXXDestructorDecl *DD,
+ CXXDtorType DtorType) {
+ assert(!DD->isTrivial() &&
+ "Should not emit dtor epilogue for trivial dtor!");
+
+ // The deleting-destructor phase just needs to call the appropriate
+ // operator delete that Sema picked up.
+ if (DtorType == Dtor_Deleting) {
+ assert(DD->getOperatorDelete() &&
+ "operator delete missing - EmitDtorEpilogue");
+ EHStack.pushCleanup<CallDtorDelete>(NormalAndEHCleanup);
+ return;
+ }
+
+ const CXXRecordDecl *ClassDecl = DD->getParent();
+
+ // Unions have no bases and do not call field destructors.
+ if (ClassDecl->isUnion())
+ return;
+
+ // The complete-destructor phase just destructs all the virtual bases.
+ if (DtorType == Dtor_Complete) {
+
+ // We push them in the forward order so that they'll be popped in
+ // the reverse order.
+ for (CXXRecordDecl::base_class_const_iterator I =
+ ClassDecl->vbases_begin(), E = ClassDecl->vbases_end();
+ I != E; ++I) {
+ const CXXBaseSpecifier &Base = *I;
+ CXXRecordDecl *BaseClassDecl
+ = cast<CXXRecordDecl>(Base.getType()->getAs<RecordType>()->getDecl());
+
+ // Ignore trivial destructors.
+ if (BaseClassDecl->hasTrivialDestructor())
+ continue;
+
+ EHStack.pushCleanup<CallBaseDtor>(NormalAndEHCleanup,
+ BaseClassDecl,
+ /*BaseIsVirtual*/ true);
+ }
+
+ return;
+ }
+
+ assert(DtorType == Dtor_Base);
+
+ // Destroy non-virtual bases.
+ for (CXXRecordDecl::base_class_const_iterator I =
+ ClassDecl->bases_begin(), E = ClassDecl->bases_end(); I != E; ++I) {
+ const CXXBaseSpecifier &Base = *I;
+
+ // Ignore virtual bases.
+ if (Base.isVirtual())
+ continue;
+
+ CXXRecordDecl *BaseClassDecl = Base.getType()->getAsCXXRecordDecl();
+
+ // Ignore trivial destructors.
+ if (BaseClassDecl->hasTrivialDestructor())
+ continue;
+
+ EHStack.pushCleanup<CallBaseDtor>(NormalAndEHCleanup,
+ BaseClassDecl,
+ /*BaseIsVirtual*/ false);
+ }
+
+ // Destroy direct fields.
+ SmallVector<const FieldDecl *, 16> FieldDecls;
+ for (CXXRecordDecl::field_iterator I = ClassDecl->field_begin(),
+ E = ClassDecl->field_end(); I != E; ++I) {
+ const FieldDecl *field = *I;
+ QualType type = field->getType();
+ QualType::DestructionKind dtorKind = type.isDestructedType();
+ if (!dtorKind) continue;
+
+ // Anonymous union members do not have their destructors called.
+ const RecordType *RT = type->getAsUnionType();
+ if (RT && RT->getDecl()->isAnonymousStructOrUnion()) continue;
+
+ CleanupKind cleanupKind = getCleanupKind(dtorKind);
+ EHStack.pushCleanup<DestroyField>(cleanupKind, field,
+ getDestroyer(dtorKind),
+ cleanupKind & EHCleanup);
+ }
+}
+
+/// EmitCXXAggrConstructorCall - Emit a loop to call a particular
+/// constructor for each of several members of an array.
+///
+/// \param ctor the constructor to call for each element
+/// \param argBegin,argEnd the arguments to evaluate and pass to the
+/// constructor
+/// \param arrayType the type of the array to initialize
+/// \param arrayBegin an arrayType*
+/// \param zeroInitialize true if each element should be
+/// zero-initialized before it is constructed
+void
+CodeGenFunction::EmitCXXAggrConstructorCall(const CXXConstructorDecl *ctor,
+ const ConstantArrayType *arrayType,
+ llvm::Value *arrayBegin,
+ CallExpr::const_arg_iterator argBegin,
+ CallExpr::const_arg_iterator argEnd,
+ bool zeroInitialize) {
+ QualType elementType;
+ llvm::Value *numElements =
+ emitArrayLength(arrayType, elementType, arrayBegin);
+
+ EmitCXXAggrConstructorCall(ctor, numElements, arrayBegin,
+ argBegin, argEnd, zeroInitialize);
+}
+
+/// EmitCXXAggrConstructorCall - Emit a loop to call a particular
+/// constructor for each of several members of an array.
+///
+/// \param ctor the constructor to call for each element
+/// \param numElements the number of elements in the array;
+/// may be zero
+/// \param argBegin,argEnd the arguments to evaluate and pass to the
+/// constructor
+/// \param arrayBegin a T*, where T is the type constructed by ctor
+/// \param zeroInitialize true if each element should be
+/// zero-initialized before it is constructed
+void
+CodeGenFunction::EmitCXXAggrConstructorCall(const CXXConstructorDecl *ctor,
+ llvm::Value *numElements,
+ llvm::Value *arrayBegin,
+ CallExpr::const_arg_iterator argBegin,
+ CallExpr::const_arg_iterator argEnd,
+ bool zeroInitialize) {
+
+ // It's legal for numElements to be zero. This can happen both
+ // dynamically, because x can be zero in 'new A[x]', and statically,
+ // because of GCC extensions that permit zero-length arrays. There
+ // are probably legitimate places where we could assume that this
+ // doesn't happen, but it's not clear that it's worth it.
+ llvm::BranchInst *zeroCheckBranch = 0;
+
+ // Optimize for a constant count.
+ llvm::ConstantInt *constantCount
+ = dyn_cast<llvm::ConstantInt>(numElements);
+ if (constantCount) {
+ // Just skip out if the constant count is zero.
+ if (constantCount->isZero()) return;
+
+ // Otherwise, emit the check.
+ } else {
+ llvm::BasicBlock *loopBB = createBasicBlock("new.ctorloop");
+ llvm::Value *iszero = Builder.CreateIsNull(numElements, "isempty");
+ zeroCheckBranch = Builder.CreateCondBr(iszero, loopBB, loopBB);
+ EmitBlock(loopBB);
+ }
+
+ // Find the end of the array.
+ llvm::Value *arrayEnd = Builder.CreateInBoundsGEP(arrayBegin, numElements,
+ "arrayctor.end");
+
+ // Enter the loop, setting up a phi for the current location to initialize.
+ llvm::BasicBlock *entryBB = Builder.GetInsertBlock();
+ llvm::BasicBlock *loopBB = createBasicBlock("arrayctor.loop");
+ EmitBlock(loopBB);
+ llvm::PHINode *cur = Builder.CreatePHI(arrayBegin->getType(), 2,
+ "arrayctor.cur");
+ cur->addIncoming(arrayBegin, entryBB);
+
+ // Inside the loop body, emit the constructor call on the array element.
+
+ QualType type = getContext().getTypeDeclType(ctor->getParent());
+
+ // Zero initialize the storage, if requested.
+ if (zeroInitialize)
+ EmitNullInitialization(cur, type);
+
+ // C++ [class.temporary]p4:
+ // There are two contexts in which temporaries are destroyed at a different
+ // point than the end of the full-expression. The first context is when a
+ // default constructor is called to initialize an element of an array.
+ // If the constructor has one or more default arguments, the destruction of
+ // every temporary created in a default argument expression is sequenced
+ // before the construction of the next array element, if any.
+
+ {
+ RunCleanupsScope Scope(*this);
+
+ // Evaluate the constructor and its arguments in a regular
+ // partial-destroy cleanup.
+ if (getLangOpts().Exceptions &&
+ !ctor->getParent()->hasTrivialDestructor()) {
+ Destroyer *destroyer = destroyCXXObject;
+ pushRegularPartialArrayCleanup(arrayBegin, cur, type, *destroyer);
+ }
+
+ EmitCXXConstructorCall(ctor, Ctor_Complete, /*ForVirtualBase=*/ false,
+ cur, argBegin, argEnd);
+ }
+
+ // Go to the next element.
+ llvm::Value *next =
+ Builder.CreateInBoundsGEP(cur, llvm::ConstantInt::get(SizeTy, 1),
+ "arrayctor.next");
+ cur->addIncoming(next, Builder.GetInsertBlock());
+
+ // Check whether that's the end of the loop.
+ llvm::Value *done = Builder.CreateICmpEQ(next, arrayEnd, "arrayctor.done");
+ llvm::BasicBlock *contBB = createBasicBlock("arrayctor.cont");
+ Builder.CreateCondBr(done, contBB, loopBB);
+
+ // Patch the earlier check to skip over the loop.
+ if (zeroCheckBranch) zeroCheckBranch->setSuccessor(0, contBB);
+
+ EmitBlock(contBB);
+}
+
+void CodeGenFunction::destroyCXXObject(CodeGenFunction &CGF,
+ llvm::Value *addr,
+ QualType type) {
+ const RecordType *rtype = type->castAs<RecordType>();
+ const CXXRecordDecl *record = cast<CXXRecordDecl>(rtype->getDecl());
+ const CXXDestructorDecl *dtor = record->getDestructor();
+ assert(!dtor->isTrivial());
+ CGF.EmitCXXDestructorCall(dtor, Dtor_Complete, /*for vbase*/ false,
+ addr);
+}
+
+void
+CodeGenFunction::EmitCXXConstructorCall(const CXXConstructorDecl *D,
+ CXXCtorType Type, bool ForVirtualBase,
+ llvm::Value *This,
+ CallExpr::const_arg_iterator ArgBeg,
+ CallExpr::const_arg_iterator ArgEnd) {
+
+ CGDebugInfo *DI = getDebugInfo();
+ if (DI && CGM.getCodeGenOpts().LimitDebugInfo) {
+ // If debug info for this class has not been emitted then this is the
+ // right time to do so.
+ const CXXRecordDecl *Parent = D->getParent();
+ DI->getOrCreateRecordType(CGM.getContext().getTypeDeclType(Parent),
+ Parent->getLocation());
+ }
+
+ if (D->isTrivial()) {
+ if (ArgBeg == ArgEnd) {
+ // Trivial default constructor, no codegen required.
+ assert(D->isDefaultConstructor() &&
+ "trivial 0-arg ctor not a default ctor");
+ return;
+ }
+
+ assert(ArgBeg + 1 == ArgEnd && "unexpected argcount for trivial ctor");
+ assert(D->isCopyOrMoveConstructor() &&
+ "trivial 1-arg ctor not a copy/move ctor");
+
+ const Expr *E = (*ArgBeg);
+ QualType Ty = E->getType();
+ llvm::Value *Src = EmitLValue(E).getAddress();
+ EmitAggregateCopy(This, Src, Ty);
+ return;
+ }
+
+ llvm::Value *VTT = GetVTTParameter(*this, GlobalDecl(D, Type), ForVirtualBase);
+ llvm::Value *Callee = CGM.GetAddrOfCXXConstructor(D, Type);
+
+ EmitCXXMemberCall(D, Callee, ReturnValueSlot(), This, VTT, ArgBeg, ArgEnd);
+}
+
+void
+CodeGenFunction::EmitSynthesizedCXXCopyCtorCall(const CXXConstructorDecl *D,
+ llvm::Value *This, llvm::Value *Src,
+ CallExpr::const_arg_iterator ArgBeg,
+ CallExpr::const_arg_iterator ArgEnd) {
+ if (D->isTrivial()) {
+ assert(ArgBeg + 1 == ArgEnd && "unexpected argcount for trivial ctor");
+ assert(D->isCopyOrMoveConstructor() &&
+ "trivial 1-arg ctor not a copy/move ctor");
+ EmitAggregateCopy(This, Src, (*ArgBeg)->getType());
+ return;
+ }
+ llvm::Value *Callee = CGM.GetAddrOfCXXConstructor(D,
+ clang::Ctor_Complete);
+ assert(D->isInstance() &&
+ "Trying to emit a member call expr on a static method!");
+
+ const FunctionProtoType *FPT = D->getType()->getAs<FunctionProtoType>();
+
+ CallArgList Args;
+
+ // Push the this ptr.
+ Args.add(RValue::get(This), D->getThisType(getContext()));
+
+
+ // Push the src ptr.
+ QualType QT = *(FPT->arg_type_begin());
+ llvm::Type *t = CGM.getTypes().ConvertType(QT);
+ Src = Builder.CreateBitCast(Src, t);
+ Args.add(RValue::get(Src), QT);
+
+ // Skip over first argument (Src).
+ ++ArgBeg;
+ CallExpr::const_arg_iterator Arg = ArgBeg;
+ for (FunctionProtoType::arg_type_iterator I = FPT->arg_type_begin()+1,
+ E = FPT->arg_type_end(); I != E; ++I, ++Arg) {
+ assert(Arg != ArgEnd && "Running over edge of argument list!");
+ EmitCallArg(Args, *Arg, *I);
+ }
+ // Either we've emitted all the call args, or we have a call to a
+ // variadic function.
+ assert((Arg == ArgEnd || FPT->isVariadic()) &&
+ "Extra arguments in non-variadic function!");
+ // If we still have any arguments, emit them using the type of the argument.
+ for (; Arg != ArgEnd; ++Arg) {
+ QualType ArgType = Arg->getType();
+ EmitCallArg(Args, *Arg, ArgType);
+ }
+
+ EmitCall(CGM.getTypes().arrangeFunctionCall(Args, FPT), Callee,
+ ReturnValueSlot(), Args, D);
+}
+
+void
+CodeGenFunction::EmitDelegateCXXConstructorCall(const CXXConstructorDecl *Ctor,
+ CXXCtorType CtorType,
+ const FunctionArgList &Args) {
+ CallArgList DelegateArgs;
+
+ FunctionArgList::const_iterator I = Args.begin(), E = Args.end();
+ assert(I != E && "no parameters to constructor");
+
+ // this
+ DelegateArgs.add(RValue::get(LoadCXXThis()), (*I)->getType());
+ ++I;
+
+ // vtt
+ if (llvm::Value *VTT = GetVTTParameter(*this, GlobalDecl(Ctor, CtorType),
+ /*ForVirtualBase=*/false)) {
+ QualType VoidPP = getContext().getPointerType(getContext().VoidPtrTy);
+ DelegateArgs.add(RValue::get(VTT), VoidPP);
+
+ if (CodeGenVTables::needsVTTParameter(CurGD)) {
+ assert(I != E && "cannot skip vtt parameter, already done with args");
+ assert((*I)->getType() == VoidPP && "skipping parameter not of vtt type");
+ ++I;
+ }
+ }
+
+ // Explicit arguments.
+ for (; I != E; ++I) {
+ const VarDecl *param = *I;
+ EmitDelegateCallArg(DelegateArgs, param);
+ }
+
+ EmitCall(CGM.getTypes().arrangeCXXConstructorDeclaration(Ctor, CtorType),
+ CGM.GetAddrOfCXXConstructor(Ctor, CtorType),
+ ReturnValueSlot(), DelegateArgs, Ctor);
+}
+
+namespace {
+ struct CallDelegatingCtorDtor : EHScopeStack::Cleanup {
+ const CXXDestructorDecl *Dtor;
+ llvm::Value *Addr;
+ CXXDtorType Type;
+
+ CallDelegatingCtorDtor(const CXXDestructorDecl *D, llvm::Value *Addr,
+ CXXDtorType Type)
+ : Dtor(D), Addr(Addr), Type(Type) {}
+
+ void Emit(CodeGenFunction &CGF, Flags flags) {
+ CGF.EmitCXXDestructorCall(Dtor, Type, /*ForVirtualBase=*/false,
+ Addr);
+ }
+ };
+}
+
+void
+CodeGenFunction::EmitDelegatingCXXConstructorCall(const CXXConstructorDecl *Ctor,
+ const FunctionArgList &Args) {
+ assert(Ctor->isDelegatingConstructor());
+
+ llvm::Value *ThisPtr = LoadCXXThis();
+
+ QualType Ty = getContext().getTagDeclType(Ctor->getParent());
+ CharUnits Alignment = getContext().getTypeAlignInChars(Ty);
+ AggValueSlot AggSlot =
+ AggValueSlot::forAddr(ThisPtr, Alignment, Qualifiers(),
+ AggValueSlot::IsDestructed,
+ AggValueSlot::DoesNotNeedGCBarriers,
+ AggValueSlot::IsNotAliased);
+
+ EmitAggExpr(Ctor->init_begin()[0]->getInit(), AggSlot);
+
+ const CXXRecordDecl *ClassDecl = Ctor->getParent();
+ if (CGM.getLangOpts().Exceptions && !ClassDecl->hasTrivialDestructor()) {
+ CXXDtorType Type =
+ CurGD.getCtorType() == Ctor_Complete ? Dtor_Complete : Dtor_Base;
+
+ EHStack.pushCleanup<CallDelegatingCtorDtor>(EHCleanup,
+ ClassDecl->getDestructor(),
+ ThisPtr, Type);
+ }
+}
+
+void CodeGenFunction::EmitCXXDestructorCall(const CXXDestructorDecl *DD,
+ CXXDtorType Type,
+ bool ForVirtualBase,
+ llvm::Value *This) {
+ llvm::Value *VTT = GetVTTParameter(*this, GlobalDecl(DD, Type),
+ ForVirtualBase);
+ llvm::Value *Callee = 0;
+ if (getContext().getLangOpts().AppleKext)
+ Callee = BuildAppleKextVirtualDestructorCall(DD, Type,
+ DD->getParent());
+
+ if (!Callee)
+ Callee = CGM.GetAddrOfCXXDestructor(DD, Type);
+
+ EmitCXXMemberCall(DD, Callee, ReturnValueSlot(), This, VTT, 0, 0);
+}
+
+namespace {
+ struct CallLocalDtor : EHScopeStack::Cleanup {
+ const CXXDestructorDecl *Dtor;
+ llvm::Value *Addr;
+
+ CallLocalDtor(const CXXDestructorDecl *D, llvm::Value *Addr)
+ : Dtor(D), Addr(Addr) {}
+
+ void Emit(CodeGenFunction &CGF, Flags flags) {
+ CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete,
+ /*ForVirtualBase=*/false, Addr);
+ }
+ };
+}
+
+void CodeGenFunction::PushDestructorCleanup(const CXXDestructorDecl *D,
+ llvm::Value *Addr) {
+ EHStack.pushCleanup<CallLocalDtor>(NormalAndEHCleanup, D, Addr);
+}
+
+void CodeGenFunction::PushDestructorCleanup(QualType T, llvm::Value *Addr) {
+ CXXRecordDecl *ClassDecl = T->getAsCXXRecordDecl();
+ if (!ClassDecl) return;
+ if (ClassDecl->hasTrivialDestructor()) return;
+
+ const CXXDestructorDecl *D = ClassDecl->getDestructor();
+ assert(D && D->isUsed() && "destructor not marked as used!");
+ PushDestructorCleanup(D, Addr);
+}
+
+llvm::Value *
+CodeGenFunction::GetVirtualBaseClassOffset(llvm::Value *This,
+ const CXXRecordDecl *ClassDecl,
+ const CXXRecordDecl *BaseClassDecl) {
+ llvm::Value *VTablePtr = GetVTablePtr(This, Int8PtrTy);
+ CharUnits VBaseOffsetOffset =
+ CGM.getVTableContext().getVirtualBaseOffsetOffset(ClassDecl, BaseClassDecl);
+
+ llvm::Value *VBaseOffsetPtr =
+ Builder.CreateConstGEP1_64(VTablePtr, VBaseOffsetOffset.getQuantity(),
+ "vbase.offset.ptr");
+ llvm::Type *PtrDiffTy =
+ ConvertType(getContext().getPointerDiffType());
+
+ VBaseOffsetPtr = Builder.CreateBitCast(VBaseOffsetPtr,
+ PtrDiffTy->getPointerTo());
+
+ llvm::Value *VBaseOffset = Builder.CreateLoad(VBaseOffsetPtr, "vbase.offset");
+
+ return VBaseOffset;
+}
+
+void
+CodeGenFunction::InitializeVTablePointer(BaseSubobject Base,
+ const CXXRecordDecl *NearestVBase,
+ CharUnits OffsetFromNearestVBase,
+ llvm::Constant *VTable,
+ const CXXRecordDecl *VTableClass) {
+ const CXXRecordDecl *RD = Base.getBase();
+
+ // Compute the address point.
+ llvm::Value *VTableAddressPoint;
+
+ // Check if we need to use a vtable from the VTT.
+ if (CodeGenVTables::needsVTTParameter(CurGD) &&
+ (RD->getNumVBases() || NearestVBase)) {
+ // Get the secondary vpointer index.
+ uint64_t VirtualPointerIndex =
+ CGM.getVTables().getSecondaryVirtualPointerIndex(VTableClass, Base);
+
+ /// Load the VTT.
+ llvm::Value *VTT = LoadCXXVTT();
+ if (VirtualPointerIndex)
+ VTT = Builder.CreateConstInBoundsGEP1_64(VTT, VirtualPointerIndex);
+
+ // And load the address point from the VTT.
+ VTableAddressPoint = Builder.CreateLoad(VTT);
+ } else {
+ uint64_t AddressPoint =
+ CGM.getVTableContext().getVTableLayout(VTableClass).getAddressPoint(Base);
+ VTableAddressPoint =
+ Builder.CreateConstInBoundsGEP2_64(VTable, 0, AddressPoint);
+ }
+
+ // Compute where to store the address point.
+ llvm::Value *VirtualOffset = 0;
+ CharUnits NonVirtualOffset = CharUnits::Zero();
+
+ if (CodeGenVTables::needsVTTParameter(CurGD) && NearestVBase) {
+ // We need to use the virtual base offset offset because the virtual base
+ // might have a different offset in the most derived class.
+ VirtualOffset = GetVirtualBaseClassOffset(LoadCXXThis(), VTableClass,
+ NearestVBase);
+ NonVirtualOffset = OffsetFromNearestVBase;
+ } else {
+ // We can just use the base offset in the complete class.
+ NonVirtualOffset = Base.getBaseOffset();
+ }
+
+ // Apply the offsets.
+ llvm::Value *VTableField = LoadCXXThis();
+
+ if (!NonVirtualOffset.isZero() || VirtualOffset)
+ VTableField = ApplyNonVirtualAndVirtualOffset(*this, VTableField,
+ NonVirtualOffset,
+ VirtualOffset);
+
+ // Finally, store the address point.
+ llvm::Type *AddressPointPtrTy =
+ VTableAddressPoint->getType()->getPointerTo();
+ VTableField = Builder.CreateBitCast(VTableField, AddressPointPtrTy);
+ llvm::StoreInst *Store = Builder.CreateStore(VTableAddressPoint, VTableField);
+ CGM.DecorateInstruction(Store, CGM.getTBAAInfoForVTablePtr());
+}
+
+void
+CodeGenFunction::InitializeVTablePointers(BaseSubobject Base,
+ const CXXRecordDecl *NearestVBase,
+ CharUnits OffsetFromNearestVBase,
+ bool BaseIsNonVirtualPrimaryBase,
+ llvm::Constant *VTable,
+ const CXXRecordDecl *VTableClass,
+ VisitedVirtualBasesSetTy& VBases) {
+ // If this base is a non-virtual primary base the address point has already
+ // been set.
+ if (!BaseIsNonVirtualPrimaryBase) {
+ // Initialize the vtable pointer for this base.
+ InitializeVTablePointer(Base, NearestVBase, OffsetFromNearestVBase,
+ VTable, VTableClass);
+ }
+
+ const CXXRecordDecl *RD = Base.getBase();
+
+ // Traverse bases.
+ for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
+ E = RD->bases_end(); I != E; ++I) {
+ CXXRecordDecl *BaseDecl
+ = cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+
+ // Ignore classes without a vtable.
+ if (!BaseDecl->isDynamicClass())
+ continue;
+
+ CharUnits BaseOffset;
+ CharUnits BaseOffsetFromNearestVBase;
+ bool BaseDeclIsNonVirtualPrimaryBase;
+
+ if (I->isVirtual()) {
+ // Check if we've visited this virtual base before.
+ if (!VBases.insert(BaseDecl))
+ continue;
+
+ const ASTRecordLayout &Layout =
+ getContext().getASTRecordLayout(VTableClass);
+
+ BaseOffset = Layout.getVBaseClassOffset(BaseDecl);
+ BaseOffsetFromNearestVBase = CharUnits::Zero();
+ BaseDeclIsNonVirtualPrimaryBase = false;
+ } else {
+ const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
+
+ BaseOffset = Base.getBaseOffset() + Layout.getBaseClassOffset(BaseDecl);
+ BaseOffsetFromNearestVBase =
+ OffsetFromNearestVBase + Layout.getBaseClassOffset(BaseDecl);
+ BaseDeclIsNonVirtualPrimaryBase = Layout.getPrimaryBase() == BaseDecl;
+ }
+
+ InitializeVTablePointers(BaseSubobject(BaseDecl, BaseOffset),
+ I->isVirtual() ? BaseDecl : NearestVBase,
+ BaseOffsetFromNearestVBase,
+ BaseDeclIsNonVirtualPrimaryBase,
+ VTable, VTableClass, VBases);
+ }
+}
+
+void CodeGenFunction::InitializeVTablePointers(const CXXRecordDecl *RD) {
+ // Ignore classes without a vtable.
+ if (!RD->isDynamicClass())
+ return;
+
+ // Get the VTable.
+ llvm::Constant *VTable = CGM.getVTables().GetAddrOfVTable(RD);
+
+ // Initialize the vtable pointers for this class and all of its bases.
+ VisitedVirtualBasesSetTy VBases;
+ InitializeVTablePointers(BaseSubobject(RD, CharUnits::Zero()),
+ /*NearestVBase=*/0,
+ /*OffsetFromNearestVBase=*/CharUnits::Zero(),
+ /*BaseIsNonVirtualPrimaryBase=*/false,
+ VTable, RD, VBases);
+}
+
+llvm::Value *CodeGenFunction::GetVTablePtr(llvm::Value *This,
+ llvm::Type *Ty) {
+ llvm::Value *VTablePtrSrc = Builder.CreateBitCast(This, Ty->getPointerTo());
+ llvm::Instruction *VTable = Builder.CreateLoad(VTablePtrSrc, "vtable");
+ CGM.DecorateInstruction(VTable, CGM.getTBAAInfoForVTablePtr());
+ return VTable;
+}
+
+static const CXXRecordDecl *getMostDerivedClassDecl(const Expr *Base) {
+ const Expr *E = Base;
+
+ while (true) {
+ E = E->IgnoreParens();
+ if (const CastExpr *CE = dyn_cast<CastExpr>(E)) {
+ if (CE->getCastKind() == CK_DerivedToBase ||
+ CE->getCastKind() == CK_UncheckedDerivedToBase ||
+ CE->getCastKind() == CK_NoOp) {
+ E = CE->getSubExpr();
+ continue;
+ }
+ }
+
+ break;
+ }
+
+ QualType DerivedType = E->getType();
+ if (const PointerType *PTy = DerivedType->getAs<PointerType>())
+ DerivedType = PTy->getPointeeType();
+
+ return cast<CXXRecordDecl>(DerivedType->castAs<RecordType>()->getDecl());
+}
+
+// FIXME: Ideally Expr::IgnoreParenNoopCasts should do this, but it doesn't do
+// quite what we want.
+static const Expr *skipNoOpCastsAndParens(const Expr *E) {
+ while (true) {
+ if (const ParenExpr *PE = dyn_cast<ParenExpr>(E)) {
+ E = PE->getSubExpr();
+ continue;
+ }
+
+ if (const CastExpr *CE = dyn_cast<CastExpr>(E)) {
+ if (CE->getCastKind() == CK_NoOp) {
+ E = CE->getSubExpr();
+ continue;
+ }
+ }
+ if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) {
+ if (UO->getOpcode() == UO_Extension) {
+ E = UO->getSubExpr();
+ continue;
+ }
+ }
+ return E;
+ }
+}
+
+/// canDevirtualizeMemberFunctionCall - Checks whether the given virtual member
+/// function call on the given expr can be devirtualized.
+static bool canDevirtualizeMemberFunctionCall(const Expr *Base,
+ const CXXMethodDecl *MD) {
+ // If the most derived class is marked final, we know that no subclass can
+ // override this member function and so we can devirtualize it. For example:
+ //
+ // struct A { virtual void f(); }
+ // struct B final : A { };
+ //
+ // void f(B *b) {
+ // b->f();
+ // }
+ //
+ const CXXRecordDecl *MostDerivedClassDecl = getMostDerivedClassDecl(Base);
+ if (MostDerivedClassDecl->hasAttr<FinalAttr>())
+ return true;
+
+ // If the member function is marked 'final', we know that it can't be
+ // overridden and can therefore devirtualize it.
+ if (MD->hasAttr<FinalAttr>())
+ return true;
+
+ // Similarly, if the class itself is marked 'final' it can't be overridden
+ // and we can therefore devirtualize the member function call.
+ if (MD->getParent()->hasAttr<FinalAttr>())
+ return true;
+
+ Base = skipNoOpCastsAndParens(Base);
+ if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Base)) {
+ if (const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl())) {
+ // This is a record decl. We know the type and can devirtualize it.
+ return VD->getType()->isRecordType();
+ }
+
+ return false;
+ }
+
+ // We can always devirtualize calls on temporary object expressions.
+ if (isa<CXXConstructExpr>(Base))
+ return true;
+
+ // And calls on bound temporaries.
+ if (isa<CXXBindTemporaryExpr>(Base))
+ return true;
+
+ // Check if this is a call expr that returns a record type.
+ if (const CallExpr *CE = dyn_cast<CallExpr>(Base))
+ return CE->getCallReturnType()->isRecordType();
+
+ // We can't devirtualize the call.
+ return false;
+}
+
+static bool UseVirtualCall(ASTContext &Context,
+ const CXXOperatorCallExpr *CE,
+ const CXXMethodDecl *MD) {
+ if (!MD->isVirtual())
+ return false;
+
+ // When building with -fapple-kext, all calls must go through the vtable since
+ // the kernel linker can do runtime patching of vtables.
+ if (Context.getLangOpts().AppleKext)
+ return true;
+
+ return !canDevirtualizeMemberFunctionCall(CE->getArg(0), MD);
+}
+
+llvm::Value *
+CodeGenFunction::EmitCXXOperatorMemberCallee(const CXXOperatorCallExpr *E,
+ const CXXMethodDecl *MD,
+ llvm::Value *This) {
+ llvm::FunctionType *fnType =
+ CGM.getTypes().GetFunctionType(
+ CGM.getTypes().arrangeCXXMethodDeclaration(MD));
+
+ if (UseVirtualCall(getContext(), E, MD))
+ return BuildVirtualCall(MD, This, fnType);
+
+ return CGM.GetAddrOfFunction(MD, fnType);
+}
+
+void CodeGenFunction::EmitForwardingCallToLambda(const CXXRecordDecl *Lambda,
+ CallArgList &CallArgs) {
+ // Lookup the call operator
+ DeclarationName Name
+ = getContext().DeclarationNames.getCXXOperatorName(OO_Call);
+ DeclContext::lookup_const_result Calls = Lambda->lookup(Name);
+ CXXMethodDecl *CallOperator = cast<CXXMethodDecl>(*Calls.first++);
+ const FunctionProtoType *FPT =
+ CallOperator->getType()->getAs<FunctionProtoType>();
+ QualType ResultType = FPT->getResultType();
+
+ // Get the address of the call operator.
+ GlobalDecl GD(CallOperator);
+ const CGFunctionInfo &CalleeFnInfo =
+ CGM.getTypes().arrangeFunctionCall(ResultType, CallArgs, FPT->getExtInfo(),
+ RequiredArgs::forPrototypePlus(FPT, 1));
+ llvm::Type *Ty = CGM.getTypes().GetFunctionType(CalleeFnInfo);
+ llvm::Value *Callee = CGM.GetAddrOfFunction(GD, Ty);
+
+ // Determine whether we have a return value slot to use.
+ ReturnValueSlot Slot;
+ if (!ResultType->isVoidType() &&
+ CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::Indirect &&
+ hasAggregateLLVMType(CurFnInfo->getReturnType()))
+ Slot = ReturnValueSlot(ReturnValue, ResultType.isVolatileQualified());
+
+ // Now emit our call.
+ RValue RV = EmitCall(CalleeFnInfo, Callee, Slot, CallArgs, CallOperator);
+
+ // Forward the returned value
+ if (!ResultType->isVoidType() && Slot.isNull())
+ EmitReturnOfRValue(RV, ResultType);
+}
+
+void CodeGenFunction::EmitLambdaBlockInvokeBody() {
+ const BlockDecl *BD = BlockInfo->getBlockDecl();
+ const VarDecl *variable = BD->capture_begin()->getVariable();
+ const CXXRecordDecl *Lambda = variable->getType()->getAsCXXRecordDecl();
+
+ // Start building arguments for forwarding call
+ CallArgList CallArgs;
+
+ QualType ThisType = getContext().getPointerType(getContext().getRecordType(Lambda));
+ llvm::Value *ThisPtr = GetAddrOfBlockDecl(variable, false);
+ CallArgs.add(RValue::get(ThisPtr), ThisType);
+
+ // Add the rest of the parameters.
+ for (BlockDecl::param_const_iterator I = BD->param_begin(),
+ E = BD->param_end(); I != E; ++I) {
+ ParmVarDecl *param = *I;
+ EmitDelegateCallArg(CallArgs, param);
+ }
+
+ EmitForwardingCallToLambda(Lambda, CallArgs);
+}
+
+void CodeGenFunction::EmitLambdaToBlockPointerBody(FunctionArgList &Args) {
+ if (cast<CXXMethodDecl>(CurFuncDecl)->isVariadic()) {
+ // FIXME: Making this work correctly is nasty because it requires either
+ // cloning the body of the call operator or making the call operator forward.
+ CGM.ErrorUnsupported(CurFuncDecl, "lambda conversion to variadic function");
+ return;
+ }
+
+ EmitFunctionBody(Args);
+}
+
+void CodeGenFunction::EmitLambdaDelegatingInvokeBody(const CXXMethodDecl *MD) {
+ const CXXRecordDecl *Lambda = MD->getParent();
+
+ // Start building arguments for forwarding call
+ CallArgList CallArgs;
+
+ QualType ThisType = getContext().getPointerType(getContext().getRecordType(Lambda));
+ llvm::Value *ThisPtr = llvm::UndefValue::get(getTypes().ConvertType(ThisType));
+ CallArgs.add(RValue::get(ThisPtr), ThisType);
+
+ // Add the rest of the parameters.
+ for (FunctionDecl::param_const_iterator I = MD->param_begin(),
+ E = MD->param_end(); I != E; ++I) {
+ ParmVarDecl *param = *I;
+ EmitDelegateCallArg(CallArgs, param);
+ }
+
+ EmitForwardingCallToLambda(Lambda, CallArgs);
+}
+
+void CodeGenFunction::EmitLambdaStaticInvokeFunction(const CXXMethodDecl *MD) {
+ if (MD->isVariadic()) {
+ // FIXME: Making this work correctly is nasty because it requires either
+ // cloning the body of the call operator or making the call operator forward.
+ CGM.ErrorUnsupported(MD, "lambda conversion to variadic function");
+ return;
+ }
+
+ EmitLambdaDelegatingInvokeBody(MD);
+}
diff --git a/clang/lib/CodeGen/CGCleanup.cpp b/clang/lib/CodeGen/CGCleanup.cpp
new file mode 100644
index 0000000..b00e2a2
--- /dev/null
+++ b/clang/lib/CodeGen/CGCleanup.cpp
@@ -0,0 +1,1103 @@
+//===--- CGCleanup.cpp - Bookkeeping and code emission for cleanups -------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains code dealing with the IR generation for cleanups
+// and related information.
+//
+// A "cleanup" is a piece of code which needs to be executed whenever
+// control transfers out of a particular scope. This can be
+// conditionalized to occur only on exceptional control flow, only on
+// normal control flow, or both.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenFunction.h"
+#include "CGCleanup.h"
+
+using namespace clang;
+using namespace CodeGen;
+
+bool DominatingValue<RValue>::saved_type::needsSaving(RValue rv) {
+ if (rv.isScalar())
+ return DominatingLLVMValue::needsSaving(rv.getScalarVal());
+ if (rv.isAggregate())
+ return DominatingLLVMValue::needsSaving(rv.getAggregateAddr());
+ return true;
+}
+
+DominatingValue<RValue>::saved_type
+DominatingValue<RValue>::saved_type::save(CodeGenFunction &CGF, RValue rv) {
+ if (rv.isScalar()) {
+ llvm::Value *V = rv.getScalarVal();
+
+ // These automatically dominate and don't need to be saved.
+ if (!DominatingLLVMValue::needsSaving(V))
+ return saved_type(V, ScalarLiteral);
+
+ // Everything else needs an alloca.
+ llvm::Value *addr = CGF.CreateTempAlloca(V->getType(), "saved-rvalue");
+ CGF.Builder.CreateStore(V, addr);
+ return saved_type(addr, ScalarAddress);
+ }
+
+ if (rv.isComplex()) {
+ CodeGenFunction::ComplexPairTy V = rv.getComplexVal();
+ llvm::Type *ComplexTy =
+ llvm::StructType::get(V.first->getType(), V.second->getType(),
+ (void*) 0);
+ llvm::Value *addr = CGF.CreateTempAlloca(ComplexTy, "saved-complex");
+ CGF.StoreComplexToAddr(V, addr, /*volatile*/ false);
+ return saved_type(addr, ComplexAddress);
+ }
+
+ assert(rv.isAggregate());
+ llvm::Value *V = rv.getAggregateAddr(); // TODO: volatile?
+ if (!DominatingLLVMValue::needsSaving(V))
+ return saved_type(V, AggregateLiteral);
+
+ llvm::Value *addr = CGF.CreateTempAlloca(V->getType(), "saved-rvalue");
+ CGF.Builder.CreateStore(V, addr);
+ return saved_type(addr, AggregateAddress);
+}
+
+/// Given a saved r-value produced by SaveRValue, perform the code
+/// necessary to restore it to usability at the current insertion
+/// point.
+RValue DominatingValue<RValue>::saved_type::restore(CodeGenFunction &CGF) {
+ switch (K) {
+ case ScalarLiteral:
+ return RValue::get(Value);
+ case ScalarAddress:
+ return RValue::get(CGF.Builder.CreateLoad(Value));
+ case AggregateLiteral:
+ return RValue::getAggregate(Value);
+ case AggregateAddress:
+ return RValue::getAggregate(CGF.Builder.CreateLoad(Value));
+ case ComplexAddress:
+ return RValue::getComplex(CGF.LoadComplexFromAddr(Value, false));
+ }
+
+ llvm_unreachable("bad saved r-value kind");
+}
+
+/// Push an entry of the given size onto this protected-scope stack.
+char *EHScopeStack::allocate(size_t Size) {
+ if (!StartOfBuffer) {
+ unsigned Capacity = 1024;
+ while (Capacity < Size) Capacity *= 2;
+ StartOfBuffer = new char[Capacity];
+ StartOfData = EndOfBuffer = StartOfBuffer + Capacity;
+ } else if (static_cast<size_t>(StartOfData - StartOfBuffer) < Size) {
+ unsigned CurrentCapacity = EndOfBuffer - StartOfBuffer;
+ unsigned UsedCapacity = CurrentCapacity - (StartOfData - StartOfBuffer);
+
+ unsigned NewCapacity = CurrentCapacity;
+ do {
+ NewCapacity *= 2;
+ } while (NewCapacity < UsedCapacity + Size);
+
+ char *NewStartOfBuffer = new char[NewCapacity];
+ char *NewEndOfBuffer = NewStartOfBuffer + NewCapacity;
+ char *NewStartOfData = NewEndOfBuffer - UsedCapacity;
+ memcpy(NewStartOfData, StartOfData, UsedCapacity);
+ delete [] StartOfBuffer;
+ StartOfBuffer = NewStartOfBuffer;
+ EndOfBuffer = NewEndOfBuffer;
+ StartOfData = NewStartOfData;
+ }
+
+ assert(StartOfBuffer + Size <= StartOfData);
+ StartOfData -= Size;
+ return StartOfData;
+}
+
+EHScopeStack::stable_iterator
+EHScopeStack::getInnermostActiveNormalCleanup() const {
+ for (stable_iterator si = getInnermostNormalCleanup(), se = stable_end();
+ si != se; ) {
+ EHCleanupScope &cleanup = cast<EHCleanupScope>(*find(si));
+ if (cleanup.isActive()) return si;
+ si = cleanup.getEnclosingNormalCleanup();
+ }
+ return stable_end();
+}
+
+EHScopeStack::stable_iterator EHScopeStack::getInnermostActiveEHScope() const {
+ for (stable_iterator si = getInnermostEHScope(), se = stable_end();
+ si != se; ) {
+ // Skip over inactive cleanups.
+ EHCleanupScope *cleanup = dyn_cast<EHCleanupScope>(&*find(si));
+ if (cleanup && !cleanup->isActive()) {
+ si = cleanup->getEnclosingEHScope();
+ continue;
+ }
+
+ // All other scopes are always active.
+ return si;
+ }
+
+ return stable_end();
+}
+
+
+void *EHScopeStack::pushCleanup(CleanupKind Kind, size_t Size) {
+ assert(((Size % sizeof(void*)) == 0) && "cleanup type is misaligned");
+ char *Buffer = allocate(EHCleanupScope::getSizeForCleanupSize(Size));
+ bool IsNormalCleanup = Kind & NormalCleanup;
+ bool IsEHCleanup = Kind & EHCleanup;
+ bool IsActive = !(Kind & InactiveCleanup);
+ EHCleanupScope *Scope =
+ new (Buffer) EHCleanupScope(IsNormalCleanup,
+ IsEHCleanup,
+ IsActive,
+ Size,
+ BranchFixups.size(),
+ InnermostNormalCleanup,
+ InnermostEHScope);
+ if (IsNormalCleanup)
+ InnermostNormalCleanup = stable_begin();
+ if (IsEHCleanup)
+ InnermostEHScope = stable_begin();
+
+ return Scope->getCleanupBuffer();
+}
+
+void EHScopeStack::popCleanup() {
+ assert(!empty() && "popping exception stack when not empty");
+
+ assert(isa<EHCleanupScope>(*begin()));
+ EHCleanupScope &Cleanup = cast<EHCleanupScope>(*begin());
+ InnermostNormalCleanup = Cleanup.getEnclosingNormalCleanup();
+ InnermostEHScope = Cleanup.getEnclosingEHScope();
+ StartOfData += Cleanup.getAllocatedSize();
+
+ // Destroy the cleanup.
+ Cleanup.~EHCleanupScope();
+
+ // Check whether we can shrink the branch-fixups stack.
+ if (!BranchFixups.empty()) {
+ // If we no longer have any normal cleanups, all the fixups are
+ // complete.
+ if (!hasNormalCleanups())
+ BranchFixups.clear();
+
+ // Otherwise we can still trim out unnecessary nulls.
+ else
+ popNullFixups();
+ }
+}
+
+EHFilterScope *EHScopeStack::pushFilter(unsigned numFilters) {
+ assert(getInnermostEHScope() == stable_end());
+ char *buffer = allocate(EHFilterScope::getSizeForNumFilters(numFilters));
+ EHFilterScope *filter = new (buffer) EHFilterScope(numFilters);
+ InnermostEHScope = stable_begin();
+ return filter;
+}
+
+void EHScopeStack::popFilter() {
+ assert(!empty() && "popping exception stack when not empty");
+
+ EHFilterScope &filter = cast<EHFilterScope>(*begin());
+ StartOfData += EHFilterScope::getSizeForNumFilters(filter.getNumFilters());
+
+ InnermostEHScope = filter.getEnclosingEHScope();
+}
+
+EHCatchScope *EHScopeStack::pushCatch(unsigned numHandlers) {
+ char *buffer = allocate(EHCatchScope::getSizeForNumHandlers(numHandlers));
+ EHCatchScope *scope =
+ new (buffer) EHCatchScope(numHandlers, InnermostEHScope);
+ InnermostEHScope = stable_begin();
+ return scope;
+}
+
+void EHScopeStack::pushTerminate() {
+ char *Buffer = allocate(EHTerminateScope::getSize());
+ new (Buffer) EHTerminateScope(InnermostEHScope);
+ InnermostEHScope = stable_begin();
+}
+
+/// Remove any 'null' fixups on the stack. However, we can't pop more
+/// fixups than the fixup depth on the innermost normal cleanup, or
+/// else fixups that we try to add to that cleanup will end up in the
+/// wrong place. We *could* try to shrink fixup depths, but that's
+/// actually a lot of work for little benefit.
+void EHScopeStack::popNullFixups() {
+ // We expect this to only be called when there's still an innermost
+ // normal cleanup; otherwise there really shouldn't be any fixups.
+ assert(hasNormalCleanups());
+
+ EHScopeStack::iterator it = find(InnermostNormalCleanup);
+ unsigned MinSize = cast<EHCleanupScope>(*it).getFixupDepth();
+ assert(BranchFixups.size() >= MinSize && "fixup stack out of order");
+
+ while (BranchFixups.size() > MinSize &&
+ BranchFixups.back().Destination == 0)
+ BranchFixups.pop_back();
+}
+
+void CodeGenFunction::initFullExprCleanup() {
+ // Create a variable to decide whether the cleanup needs to be run.
+ llvm::AllocaInst *active
+ = CreateTempAlloca(Builder.getInt1Ty(), "cleanup.cond");
+
+ // Initialize it to false at a site that's guaranteed to be run
+ // before each evaluation.
+ setBeforeOutermostConditional(Builder.getFalse(), active);
+
+ // Initialize it to true at the current location.
+ Builder.CreateStore(Builder.getTrue(), active);
+
+ // Set that as the active flag in the cleanup.
+ EHCleanupScope &cleanup = cast<EHCleanupScope>(*EHStack.begin());
+ assert(cleanup.getActiveFlag() == 0 && "cleanup already has active flag?");
+ cleanup.setActiveFlag(active);
+
+ if (cleanup.isNormalCleanup()) cleanup.setTestFlagInNormalCleanup();
+ if (cleanup.isEHCleanup()) cleanup.setTestFlagInEHCleanup();
+}
+
+void EHScopeStack::Cleanup::anchor() {}
+
+/// All the branch fixups on the EH stack have propagated out past the
+/// outermost normal cleanup; resolve them all by adding cases to the
+/// given switch instruction.
+static void ResolveAllBranchFixups(CodeGenFunction &CGF,
+ llvm::SwitchInst *Switch,
+ llvm::BasicBlock *CleanupEntry) {
+ llvm::SmallPtrSet<llvm::BasicBlock*, 4> CasesAdded;
+
+ for (unsigned I = 0, E = CGF.EHStack.getNumBranchFixups(); I != E; ++I) {
+ // Skip this fixup if its destination isn't set.
+ BranchFixup &Fixup = CGF.EHStack.getBranchFixup(I);
+ if (Fixup.Destination == 0) continue;
+
+ // If there isn't an OptimisticBranchBlock, then InitialBranch is
+ // still pointing directly to its destination; forward it to the
+ // appropriate cleanup entry. This is required in the specific
+ // case of
+ // { std::string s; goto lbl; }
+ // lbl:
+ // i.e. where there's an unresolved fixup inside a single cleanup
+ // entry which we're currently popping.
+ if (Fixup.OptimisticBranchBlock == 0) {
+ new llvm::StoreInst(CGF.Builder.getInt32(Fixup.DestinationIndex),
+ CGF.getNormalCleanupDestSlot(),
+ Fixup.InitialBranch);
+ Fixup.InitialBranch->setSuccessor(0, CleanupEntry);
+ }
+
+ // Don't add this case to the switch statement twice.
+ if (!CasesAdded.insert(Fixup.Destination)) continue;
+
+ Switch->addCase(CGF.Builder.getInt32(Fixup.DestinationIndex),
+ Fixup.Destination);
+ }
+
+ CGF.EHStack.clearFixups();
+}
+
+/// Transitions the terminator of the given exit-block of a cleanup to
+/// be a cleanup switch.
+static llvm::SwitchInst *TransitionToCleanupSwitch(CodeGenFunction &CGF,
+ llvm::BasicBlock *Block) {
+ // If it's a branch, turn it into a switch whose default
+ // destination is its original target.
+ llvm::TerminatorInst *Term = Block->getTerminator();
+ assert(Term && "can't transition block without terminator");
+
+ if (llvm::BranchInst *Br = dyn_cast<llvm::BranchInst>(Term)) {
+ assert(Br->isUnconditional());
+ llvm::LoadInst *Load =
+ new llvm::LoadInst(CGF.getNormalCleanupDestSlot(), "cleanup.dest", Term);
+ llvm::SwitchInst *Switch =
+ llvm::SwitchInst::Create(Load, Br->getSuccessor(0), 4, Block);
+ Br->eraseFromParent();
+ return Switch;
+ } else {
+ return cast<llvm::SwitchInst>(Term);
+ }
+}
+
+void CodeGenFunction::ResolveBranchFixups(llvm::BasicBlock *Block) {
+ assert(Block && "resolving a null target block");
+ if (!EHStack.getNumBranchFixups()) return;
+
+ assert(EHStack.hasNormalCleanups() &&
+ "branch fixups exist with no normal cleanups on stack");
+
+ llvm::SmallPtrSet<llvm::BasicBlock*, 4> ModifiedOptimisticBlocks;
+ bool ResolvedAny = false;
+
+ for (unsigned I = 0, E = EHStack.getNumBranchFixups(); I != E; ++I) {
+ // Skip this fixup if its destination doesn't match.
+ BranchFixup &Fixup = EHStack.getBranchFixup(I);
+ if (Fixup.Destination != Block) continue;
+
+ Fixup.Destination = 0;
+ ResolvedAny = true;
+
+ // If it doesn't have an optimistic branch block, LatestBranch is
+ // already pointing to the right place.
+ llvm::BasicBlock *BranchBB = Fixup.OptimisticBranchBlock;
+ if (!BranchBB)
+ continue;
+
+ // Don't process the same optimistic branch block twice.
+ if (!ModifiedOptimisticBlocks.insert(BranchBB))
+ continue;
+
+ llvm::SwitchInst *Switch = TransitionToCleanupSwitch(*this, BranchBB);
+
+ // Add a case to the switch.
+ Switch->addCase(Builder.getInt32(Fixup.DestinationIndex), Block);
+ }
+
+ if (ResolvedAny)
+ EHStack.popNullFixups();
+}
+
+/// Pops cleanup blocks until the given savepoint is reached.
+void CodeGenFunction::PopCleanupBlocks(EHScopeStack::stable_iterator Old) {
+ assert(Old.isValid());
+
+ while (EHStack.stable_begin() != Old) {
+ EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.begin());
+
+ // As long as Old strictly encloses the scope's enclosing normal
+ // cleanup, we're going to emit another normal cleanup which
+ // fallthrough can propagate through.
+ bool FallThroughIsBranchThrough =
+ Old.strictlyEncloses(Scope.getEnclosingNormalCleanup());
+
+ PopCleanupBlock(FallThroughIsBranchThrough);
+ }
+}
+
+static llvm::BasicBlock *CreateNormalEntry(CodeGenFunction &CGF,
+ EHCleanupScope &Scope) {
+ assert(Scope.isNormalCleanup());
+ llvm::BasicBlock *Entry = Scope.getNormalBlock();
+ if (!Entry) {
+ Entry = CGF.createBasicBlock("cleanup");
+ Scope.setNormalBlock(Entry);
+ }
+ return Entry;
+}
+
+/// Attempts to reduce a cleanup's entry block to a fallthrough. This
+/// is basically llvm::MergeBlockIntoPredecessor, except
+/// simplified/optimized for the tighter constraints on cleanup blocks.
+///
+/// Returns the new block, whatever it is.
+static llvm::BasicBlock *SimplifyCleanupEntry(CodeGenFunction &CGF,
+ llvm::BasicBlock *Entry) {
+ llvm::BasicBlock *Pred = Entry->getSinglePredecessor();
+ if (!Pred) return Entry;
+
+ llvm::BranchInst *Br = dyn_cast<llvm::BranchInst>(Pred->getTerminator());
+ if (!Br || Br->isConditional()) return Entry;
+ assert(Br->getSuccessor(0) == Entry);
+
+ // If we were previously inserting at the end of the cleanup entry
+ // block, we'll need to continue inserting at the end of the
+ // predecessor.
+ bool WasInsertBlock = CGF.Builder.GetInsertBlock() == Entry;
+ assert(!WasInsertBlock || CGF.Builder.GetInsertPoint() == Entry->end());
+
+ // Kill the branch.
+ Br->eraseFromParent();
+
+ // Replace all uses of the entry with the predecessor, in case there
+ // are phis in the cleanup.
+ Entry->replaceAllUsesWith(Pred);
+
+ // Merge the blocks.
+ Pred->getInstList().splice(Pred->end(), Entry->getInstList());
+
+ // Kill the entry block.
+ Entry->eraseFromParent();
+
+ if (WasInsertBlock)
+ CGF.Builder.SetInsertPoint(Pred);
+
+ return Pred;
+}
+
+static void EmitCleanup(CodeGenFunction &CGF,
+ EHScopeStack::Cleanup *Fn,
+ EHScopeStack::Cleanup::Flags flags,
+ llvm::Value *ActiveFlag) {
+ // EH cleanups always occur within a terminate scope.
+ if (flags.isForEHCleanup()) CGF.EHStack.pushTerminate();
+
+ // If there's an active flag, load it and skip the cleanup if it's
+ // false.
+ llvm::BasicBlock *ContBB = 0;
+ if (ActiveFlag) {
+ ContBB = CGF.createBasicBlock("cleanup.done");
+ llvm::BasicBlock *CleanupBB = CGF.createBasicBlock("cleanup.action");
+ llvm::Value *IsActive
+ = CGF.Builder.CreateLoad(ActiveFlag, "cleanup.is_active");
+ CGF.Builder.CreateCondBr(IsActive, CleanupBB, ContBB);
+ CGF.EmitBlock(CleanupBB);
+ }
+
+ // Ask the cleanup to emit itself.
+ Fn->Emit(CGF, flags);
+ assert(CGF.HaveInsertPoint() && "cleanup ended with no insertion point?");
+
+ // Emit the continuation block if there was an active flag.
+ if (ActiveFlag)
+ CGF.EmitBlock(ContBB);
+
+ // Leave the terminate scope.
+ if (flags.isForEHCleanup()) CGF.EHStack.popTerminate();
+}
+
+static void ForwardPrebranchedFallthrough(llvm::BasicBlock *Exit,
+ llvm::BasicBlock *From,
+ llvm::BasicBlock *To) {
+ // Exit is the exit block of a cleanup, so it always terminates in
+ // an unconditional branch or a switch.
+ llvm::TerminatorInst *Term = Exit->getTerminator();
+
+ if (llvm::BranchInst *Br = dyn_cast<llvm::BranchInst>(Term)) {
+ assert(Br->isUnconditional() && Br->getSuccessor(0) == From);
+ Br->setSuccessor(0, To);
+ } else {
+ llvm::SwitchInst *Switch = cast<llvm::SwitchInst>(Term);
+ for (unsigned I = 0, E = Switch->getNumSuccessors(); I != E; ++I)
+ if (Switch->getSuccessor(I) == From)
+ Switch->setSuccessor(I, To);
+ }
+}
+
+/// We don't need a normal entry block for the given cleanup.
+/// Optimistic fixup branches can cause these blocks to come into
+/// existence anyway; if so, destroy it.
+///
+/// The validity of this transformation is very much specific to the
+/// exact ways in which we form branches to cleanup entries.
+static void destroyOptimisticNormalEntry(CodeGenFunction &CGF,
+ EHCleanupScope &scope) {
+ llvm::BasicBlock *entry = scope.getNormalBlock();
+ if (!entry) return;
+
+ // Replace all the uses with unreachable.
+ llvm::BasicBlock *unreachableBB = CGF.getUnreachableBlock();
+ for (llvm::BasicBlock::use_iterator
+ i = entry->use_begin(), e = entry->use_end(); i != e; ) {
+ llvm::Use &use = i.getUse();
+ ++i;
+
+ use.set(unreachableBB);
+
+ // The only uses should be fixup switches.
+ llvm::SwitchInst *si = cast<llvm::SwitchInst>(use.getUser());
+ if (si->getNumCases() == 1 && si->getDefaultDest() == unreachableBB) {
+ // Replace the switch with a branch.
+ llvm::BranchInst::Create(si->case_begin().getCaseSuccessor(), si);
+
+ // The switch operand is a load from the cleanup-dest alloca.
+ llvm::LoadInst *condition = cast<llvm::LoadInst>(si->getCondition());
+
+ // Destroy the switch.
+ si->eraseFromParent();
+
+ // Destroy the load.
+ assert(condition->getOperand(0) == CGF.NormalCleanupDest);
+ assert(condition->use_empty());
+ condition->eraseFromParent();
+ }
+ }
+
+ assert(entry->use_empty());
+ delete entry;
+}
+
+/// Pops a cleanup block. If the block includes a normal cleanup, the
+/// current insertion point is threaded through the cleanup, as are
+/// any branch fixups on the cleanup.
+void CodeGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) {
+ assert(!EHStack.empty() && "cleanup stack is empty!");
+ assert(isa<EHCleanupScope>(*EHStack.begin()) && "top not a cleanup!");
+ EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.begin());
+ assert(Scope.getFixupDepth() <= EHStack.getNumBranchFixups());
+
+ // Remember activation information.
+ bool IsActive = Scope.isActive();
+ llvm::Value *NormalActiveFlag =
+ Scope.shouldTestFlagInNormalCleanup() ? Scope.getActiveFlag() : 0;
+ llvm::Value *EHActiveFlag =
+ Scope.shouldTestFlagInEHCleanup() ? Scope.getActiveFlag() : 0;
+
+ // Check whether we need an EH cleanup. This is only true if we've
+ // generated a lazy EH cleanup block.
+ llvm::BasicBlock *EHEntry = Scope.getCachedEHDispatchBlock();
+ assert(Scope.hasEHBranches() == (EHEntry != 0));
+ bool RequiresEHCleanup = (EHEntry != 0);
+ EHScopeStack::stable_iterator EHParent = Scope.getEnclosingEHScope();
+
+ // Check the three conditions which might require a normal cleanup:
+
+ // - whether there are branch fix-ups through this cleanup
+ unsigned FixupDepth = Scope.getFixupDepth();
+ bool HasFixups = EHStack.getNumBranchFixups() != FixupDepth;
+
+ // - whether there are branch-throughs or branch-afters
+ bool HasExistingBranches = Scope.hasBranches();
+
+ // - whether there's a fallthrough
+ llvm::BasicBlock *FallthroughSource = Builder.GetInsertBlock();
+ bool HasFallthrough = (FallthroughSource != 0 && IsActive);
+
+ // Branch-through fall-throughs leave the insertion point set to the
+ // end of the last cleanup, which points to the current scope. The
+ // rest of IR gen doesn't need to worry about this; it only happens
+ // during the execution of PopCleanupBlocks().
+ bool HasPrebranchedFallthrough =
+ (FallthroughSource && FallthroughSource->getTerminator());
+
+ // If this is a normal cleanup, then having a prebranched
+ // fallthrough implies that the fallthrough source unconditionally
+ // jumps here.
+ assert(!Scope.isNormalCleanup() || !HasPrebranchedFallthrough ||
+ (Scope.getNormalBlock() &&
+ FallthroughSource->getTerminator()->getSuccessor(0)
+ == Scope.getNormalBlock()));
+
+ bool RequiresNormalCleanup = false;
+ if (Scope.isNormalCleanup() &&
+ (HasFixups || HasExistingBranches || HasFallthrough)) {
+ RequiresNormalCleanup = true;
+ }
+
+ // If we have a prebranched fallthrough into an inactive normal
+ // cleanup, rewrite it so that it leads to the appropriate place.
+ if (Scope.isNormalCleanup() && HasPrebranchedFallthrough && !IsActive) {
+ llvm::BasicBlock *prebranchDest;
+
+ // If the prebranch is semantically branching through the next
+ // cleanup, just forward it to the next block, leaving the
+ // insertion point in the prebranched block.
+ if (FallthroughIsBranchThrough) {
+ EHScope &enclosing = *EHStack.find(Scope.getEnclosingNormalCleanup());
+ prebranchDest = CreateNormalEntry(*this, cast<EHCleanupScope>(enclosing));
+
+ // Otherwise, we need to make a new block. If the normal cleanup
+ // isn't being used at all, we could actually reuse the normal
+ // entry block, but this is simpler, and it avoids conflicts with
+ // dead optimistic fixup branches.
+ } else {
+ prebranchDest = createBasicBlock("forwarded-prebranch");
+ EmitBlock(prebranchDest);
+ }
+
+ llvm::BasicBlock *normalEntry = Scope.getNormalBlock();
+ assert(normalEntry && !normalEntry->use_empty());
+
+ ForwardPrebranchedFallthrough(FallthroughSource,
+ normalEntry, prebranchDest);
+ }
+
+ // If we don't need the cleanup at all, we're done.
+ if (!RequiresNormalCleanup && !RequiresEHCleanup) {
+ destroyOptimisticNormalEntry(*this, Scope);
+ EHStack.popCleanup(); // safe because there are no fixups
+ assert(EHStack.getNumBranchFixups() == 0 ||
+ EHStack.hasNormalCleanups());
+ return;
+ }
+
+ // Copy the cleanup emission data out. Note that SmallVector
+ // guarantees maximal alignment for its buffer regardless of its
+ // type parameter.
+ SmallVector<char, 8*sizeof(void*)> CleanupBuffer;
+ CleanupBuffer.reserve(Scope.getCleanupSize());
+ memcpy(CleanupBuffer.data(),
+ Scope.getCleanupBuffer(), Scope.getCleanupSize());
+ CleanupBuffer.set_size(Scope.getCleanupSize());
+ EHScopeStack::Cleanup *Fn =
+ reinterpret_cast<EHScopeStack::Cleanup*>(CleanupBuffer.data());
+
+ EHScopeStack::Cleanup::Flags cleanupFlags;
+ if (Scope.isNormalCleanup())
+ cleanupFlags.setIsNormalCleanupKind();
+ if (Scope.isEHCleanup())
+ cleanupFlags.setIsEHCleanupKind();
+
+ if (!RequiresNormalCleanup) {
+ destroyOptimisticNormalEntry(*this, Scope);
+ EHStack.popCleanup();
+ } else {
+ // If we have a fallthrough and no other need for the cleanup,
+ // emit it directly.
+ if (HasFallthrough && !HasPrebranchedFallthrough &&
+ !HasFixups && !HasExistingBranches) {
+
+ destroyOptimisticNormalEntry(*this, Scope);
+ EHStack.popCleanup();
+
+ EmitCleanup(*this, Fn, cleanupFlags, NormalActiveFlag);
+
+ // Otherwise, the best approach is to thread everything through
+ // the cleanup block and then try to clean up after ourselves.
+ } else {
+ // Force the entry block to exist.
+ llvm::BasicBlock *NormalEntry = CreateNormalEntry(*this, Scope);
+
+ // I. Set up the fallthrough edge in.
+
+ CGBuilderTy::InsertPoint savedInactiveFallthroughIP;
+
+ // If there's a fallthrough, we need to store the cleanup
+ // destination index. For fall-throughs this is always zero.
+ if (HasFallthrough) {
+ if (!HasPrebranchedFallthrough)
+ Builder.CreateStore(Builder.getInt32(0), getNormalCleanupDestSlot());
+
+ // Otherwise, save and clear the IP if we don't have fallthrough
+ // because the cleanup is inactive.
+ } else if (FallthroughSource) {
+ assert(!IsActive && "source without fallthrough for active cleanup");
+ savedInactiveFallthroughIP = Builder.saveAndClearIP();
+ }
+
+ // II. Emit the entry block. This implicitly branches to it if
+ // we have fallthrough. All the fixups and existing branches
+ // should already be branched to it.
+ EmitBlock(NormalEntry);
+
+ // III. Figure out where we're going and build the cleanup
+ // epilogue.
+
+ bool HasEnclosingCleanups =
+ (Scope.getEnclosingNormalCleanup() != EHStack.stable_end());
+
+ // Compute the branch-through dest if we need it:
+ // - if there are branch-throughs threaded through the scope
+ // - if fall-through is a branch-through
+ // - if there are fixups that will be optimistically forwarded
+ // to the enclosing cleanup
+ llvm::BasicBlock *BranchThroughDest = 0;
+ if (Scope.hasBranchThroughs() ||
+ (FallthroughSource && FallthroughIsBranchThrough) ||
+ (HasFixups && HasEnclosingCleanups)) {
+ assert(HasEnclosingCleanups);
+ EHScope &S = *EHStack.find(Scope.getEnclosingNormalCleanup());
+ BranchThroughDest = CreateNormalEntry(*this, cast<EHCleanupScope>(S));
+ }
+
+ llvm::BasicBlock *FallthroughDest = 0;
+ SmallVector<llvm::Instruction*, 2> InstsToAppend;
+
+ // If there's exactly one branch-after and no other threads,
+ // we can route it without a switch.
+ if (!Scope.hasBranchThroughs() && !HasFixups && !HasFallthrough &&
+ Scope.getNumBranchAfters() == 1) {
+ assert(!BranchThroughDest || !IsActive);
+
+ // TODO: clean up the possibly dead stores to the cleanup dest slot.
+ llvm::BasicBlock *BranchAfter = Scope.getBranchAfterBlock(0);
+ InstsToAppend.push_back(llvm::BranchInst::Create(BranchAfter));
+
+ // Build a switch-out if we need it:
+ // - if there are branch-afters threaded through the scope
+ // - if fall-through is a branch-after
+ // - if there are fixups that have nowhere left to go and
+ // so must be immediately resolved
+ } else if (Scope.getNumBranchAfters() ||
+ (HasFallthrough && !FallthroughIsBranchThrough) ||
+ (HasFixups && !HasEnclosingCleanups)) {
+
+ llvm::BasicBlock *Default =
+ (BranchThroughDest ? BranchThroughDest : getUnreachableBlock());
+
+ // TODO: base this on the number of branch-afters and fixups
+ const unsigned SwitchCapacity = 10;
+
+ llvm::LoadInst *Load =
+ new llvm::LoadInst(getNormalCleanupDestSlot(), "cleanup.dest");
+ llvm::SwitchInst *Switch =
+ llvm::SwitchInst::Create(Load, Default, SwitchCapacity);
+
+ InstsToAppend.push_back(Load);
+ InstsToAppend.push_back(Switch);
+
+ // Branch-after fallthrough.
+ if (FallthroughSource && !FallthroughIsBranchThrough) {
+ FallthroughDest = createBasicBlock("cleanup.cont");
+ if (HasFallthrough)
+ Switch->addCase(Builder.getInt32(0), FallthroughDest);
+ }
+
+ for (unsigned I = 0, E = Scope.getNumBranchAfters(); I != E; ++I) {
+ Switch->addCase(Scope.getBranchAfterIndex(I),
+ Scope.getBranchAfterBlock(I));
+ }
+
+ // If there aren't any enclosing cleanups, we can resolve all
+ // the fixups now.
+ if (HasFixups && !HasEnclosingCleanups)
+ ResolveAllBranchFixups(*this, Switch, NormalEntry);
+ } else {
+ // We should always have a branch-through destination in this case.
+ assert(BranchThroughDest);
+ InstsToAppend.push_back(llvm::BranchInst::Create(BranchThroughDest));
+ }
+
+ // IV. Pop the cleanup and emit it.
+ EHStack.popCleanup();
+ assert(EHStack.hasNormalCleanups() == HasEnclosingCleanups);
+
+ EmitCleanup(*this, Fn, cleanupFlags, NormalActiveFlag);
+
+ // Append the prepared cleanup prologue from above.
+ llvm::BasicBlock *NormalExit = Builder.GetInsertBlock();
+ for (unsigned I = 0, E = InstsToAppend.size(); I != E; ++I)
+ NormalExit->getInstList().push_back(InstsToAppend[I]);
+
+ // Optimistically hope that any fixups will continue falling through.
+ for (unsigned I = FixupDepth, E = EHStack.getNumBranchFixups();
+ I < E; ++I) {
+ BranchFixup &Fixup = EHStack.getBranchFixup(I);
+ if (!Fixup.Destination) continue;
+ if (!Fixup.OptimisticBranchBlock) {
+ new llvm::StoreInst(Builder.getInt32(Fixup.DestinationIndex),
+ getNormalCleanupDestSlot(),
+ Fixup.InitialBranch);
+ Fixup.InitialBranch->setSuccessor(0, NormalEntry);
+ }
+ Fixup.OptimisticBranchBlock = NormalExit;
+ }
+
+ // V. Set up the fallthrough edge out.
+
+ // Case 1: a fallthrough source exists but doesn't branch to the
+ // cleanup because the cleanup is inactive.
+ if (!HasFallthrough && FallthroughSource) {
+ // Prebranched fallthrough was forwarded earlier.
+ // Non-prebranched fallthrough doesn't need to be forwarded.
+ // Either way, all we need to do is restore the IP we cleared before.
+ assert(!IsActive);
+ Builder.restoreIP(savedInactiveFallthroughIP);
+
+ // Case 2: a fallthrough source exists and should branch to the
+ // cleanup, but we're not supposed to branch through to the next
+ // cleanup.
+ } else if (HasFallthrough && FallthroughDest) {
+ assert(!FallthroughIsBranchThrough);
+ EmitBlock(FallthroughDest);
+
+ // Case 3: a fallthrough source exists and should branch to the
+ // cleanup and then through to the next.
+ } else if (HasFallthrough) {
+ // Everything is already set up for this.
+
+ // Case 4: no fallthrough source exists.
+ } else {
+ Builder.ClearInsertionPoint();
+ }
+
+ // VI. Assorted cleaning.
+
+ // Check whether we can merge NormalEntry into a single predecessor.
+ // This might invalidate (non-IR) pointers to NormalEntry.
+ llvm::BasicBlock *NewNormalEntry =
+ SimplifyCleanupEntry(*this, NormalEntry);
+
+ // If it did invalidate those pointers, and NormalEntry was the same
+ // as NormalExit, go back and patch up the fixups.
+ if (NewNormalEntry != NormalEntry && NormalEntry == NormalExit)
+ for (unsigned I = FixupDepth, E = EHStack.getNumBranchFixups();
+ I < E; ++I)
+ EHStack.getBranchFixup(I).OptimisticBranchBlock = NewNormalEntry;
+ }
+ }
+
+ assert(EHStack.hasNormalCleanups() || EHStack.getNumBranchFixups() == 0);
+
+ // Emit the EH cleanup if required.
+ if (RequiresEHCleanup) {
+ CGBuilderTy::InsertPoint SavedIP = Builder.saveAndClearIP();
+
+ EmitBlock(EHEntry);
+
+ cleanupFlags.setIsForEHCleanup();
+ EmitCleanup(*this, Fn, cleanupFlags, EHActiveFlag);
+
+ Builder.CreateBr(getEHDispatchBlock(EHParent));
+
+ Builder.restoreIP(SavedIP);
+
+ SimplifyCleanupEntry(*this, EHEntry);
+ }
+}
+
+/// isObviouslyBranchWithoutCleanups - Return true if a branch to the
+/// specified destination obviously has no cleanups to run. 'false' is always
+/// a conservatively correct answer for this method.
+bool CodeGenFunction::isObviouslyBranchWithoutCleanups(JumpDest Dest) const {
+ assert(Dest.getScopeDepth().encloses(EHStack.stable_begin())
+ && "stale jump destination");
+
+ // Calculate the innermost active normal cleanup.
+ EHScopeStack::stable_iterator TopCleanup =
+ EHStack.getInnermostActiveNormalCleanup();
+
+ // If we're not in an active normal cleanup scope, or if the
+ // destination scope is within the innermost active normal cleanup
+ // scope, we don't need to worry about fixups.
+ if (TopCleanup == EHStack.stable_end() ||
+ TopCleanup.encloses(Dest.getScopeDepth())) // works for invalid
+ return true;
+
+ // Otherwise, we might need some cleanups.
+ return false;
+}
+
+
+/// Terminate the current block by emitting a branch which might leave
+/// the current cleanup-protected scope. The target scope may not yet
+/// be known, in which case this will require a fixup.
+///
+/// As a side-effect, this method clears the insertion point.
+void CodeGenFunction::EmitBranchThroughCleanup(JumpDest Dest) {
+ assert(Dest.getScopeDepth().encloses(EHStack.stable_begin())
+ && "stale jump destination");
+
+ if (!HaveInsertPoint())
+ return;
+
+ // Create the branch.
+ llvm::BranchInst *BI = Builder.CreateBr(Dest.getBlock());
+
+ // Calculate the innermost active normal cleanup.
+ EHScopeStack::stable_iterator
+ TopCleanup = EHStack.getInnermostActiveNormalCleanup();
+
+ // If we're not in an active normal cleanup scope, or if the
+ // destination scope is within the innermost active normal cleanup
+ // scope, we don't need to worry about fixups.
+ if (TopCleanup == EHStack.stable_end() ||
+ TopCleanup.encloses(Dest.getScopeDepth())) { // works for invalid
+ Builder.ClearInsertionPoint();
+ return;
+ }
+
+ // If we can't resolve the destination cleanup scope, just add this
+ // to the current cleanup scope as a branch fixup.
+ if (!Dest.getScopeDepth().isValid()) {
+ BranchFixup &Fixup = EHStack.addBranchFixup();
+ Fixup.Destination = Dest.getBlock();
+ Fixup.DestinationIndex = Dest.getDestIndex();
+ Fixup.InitialBranch = BI;
+ Fixup.OptimisticBranchBlock = 0;
+
+ Builder.ClearInsertionPoint();
+ return;
+ }
+
+ // Otherwise, thread through all the normal cleanups in scope.
+
+ // Store the index at the start.
+ llvm::ConstantInt *Index = Builder.getInt32(Dest.getDestIndex());
+ new llvm::StoreInst(Index, getNormalCleanupDestSlot(), BI);
+
+ // Adjust BI to point to the first cleanup block.
+ {
+ EHCleanupScope &Scope =
+ cast<EHCleanupScope>(*EHStack.find(TopCleanup));
+ BI->setSuccessor(0, CreateNormalEntry(*this, Scope));
+ }
+
+ // Add this destination to all the scopes involved.
+ EHScopeStack::stable_iterator I = TopCleanup;
+ EHScopeStack::stable_iterator E = Dest.getScopeDepth();
+ if (E.strictlyEncloses(I)) {
+ while (true) {
+ EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.find(I));
+ assert(Scope.isNormalCleanup());
+ I = Scope.getEnclosingNormalCleanup();
+
+ // If this is the last cleanup we're propagating through, tell it
+ // that there's a resolved jump moving through it.
+ if (!E.strictlyEncloses(I)) {
+ Scope.addBranchAfter(Index, Dest.getBlock());
+ break;
+ }
+
+ // Otherwise, tell the scope that there's a jump propoagating
+ // through it. If this isn't new information, all the rest of
+ // the work has been done before.
+ if (!Scope.addBranchThrough(Dest.getBlock()))
+ break;
+ }
+ }
+
+ Builder.ClearInsertionPoint();
+}
+
+static bool IsUsedAsNormalCleanup(EHScopeStack &EHStack,
+ EHScopeStack::stable_iterator C) {
+ // If we needed a normal block for any reason, that counts.
+ if (cast<EHCleanupScope>(*EHStack.find(C)).getNormalBlock())
+ return true;
+
+ // Check whether any enclosed cleanups were needed.
+ for (EHScopeStack::stable_iterator
+ I = EHStack.getInnermostNormalCleanup();
+ I != C; ) {
+ assert(C.strictlyEncloses(I));
+ EHCleanupScope &S = cast<EHCleanupScope>(*EHStack.find(I));
+ if (S.getNormalBlock()) return true;
+ I = S.getEnclosingNormalCleanup();
+ }
+
+ return false;
+}
+
+static bool IsUsedAsEHCleanup(EHScopeStack &EHStack,
+ EHScopeStack::stable_iterator cleanup) {
+ // If we needed an EH block for any reason, that counts.
+ if (EHStack.find(cleanup)->hasEHBranches())
+ return true;
+
+ // Check whether any enclosed cleanups were needed.
+ for (EHScopeStack::stable_iterator
+ i = EHStack.getInnermostEHScope(); i != cleanup; ) {
+ assert(cleanup.strictlyEncloses(i));
+
+ EHScope &scope = *EHStack.find(i);
+ if (scope.hasEHBranches())
+ return true;
+
+ i = scope.getEnclosingEHScope();
+ }
+
+ return false;
+}
+
+enum ForActivation_t {
+ ForActivation,
+ ForDeactivation
+};
+
+/// The given cleanup block is changing activation state. Configure a
+/// cleanup variable if necessary.
+///
+/// It would be good if we had some way of determining if there were
+/// extra uses *after* the change-over point.
+static void SetupCleanupBlockActivation(CodeGenFunction &CGF,
+ EHScopeStack::stable_iterator C,
+ ForActivation_t kind,
+ llvm::Instruction *dominatingIP) {
+ EHCleanupScope &Scope = cast<EHCleanupScope>(*CGF.EHStack.find(C));
+
+ // We always need the flag if we're activating the cleanup in a
+ // conditional context, because we have to assume that the current
+ // location doesn't necessarily dominate the cleanup's code.
+ bool isActivatedInConditional =
+ (kind == ForActivation && CGF.isInConditionalBranch());
+
+ bool needFlag = false;
+
+ // Calculate whether the cleanup was used:
+
+ // - as a normal cleanup
+ if (Scope.isNormalCleanup() &&
+ (isActivatedInConditional || IsUsedAsNormalCleanup(CGF.EHStack, C))) {
+ Scope.setTestFlagInNormalCleanup();
+ needFlag = true;
+ }
+
+ // - as an EH cleanup
+ if (Scope.isEHCleanup() &&
+ (isActivatedInConditional || IsUsedAsEHCleanup(CGF.EHStack, C))) {
+ Scope.setTestFlagInEHCleanup();
+ needFlag = true;
+ }
+
+ // If it hasn't yet been used as either, we're done.
+ if (!needFlag) return;
+
+ llvm::AllocaInst *var = Scope.getActiveFlag();
+ if (!var) {
+ var = CGF.CreateTempAlloca(CGF.Builder.getInt1Ty(), "cleanup.isactive");
+ Scope.setActiveFlag(var);
+
+ assert(dominatingIP && "no existing variable and no dominating IP!");
+
+ // Initialize to true or false depending on whether it was
+ // active up to this point.
+ llvm::Value *value = CGF.Builder.getInt1(kind == ForDeactivation);
+
+ // If we're in a conditional block, ignore the dominating IP and
+ // use the outermost conditional branch.
+ if (CGF.isInConditionalBranch()) {
+ CGF.setBeforeOutermostConditional(value, var);
+ } else {
+ new llvm::StoreInst(value, var, dominatingIP);
+ }
+ }
+
+ CGF.Builder.CreateStore(CGF.Builder.getInt1(kind == ForActivation), var);
+}
+
+/// Activate a cleanup that was created in an inactivated state.
+void CodeGenFunction::ActivateCleanupBlock(EHScopeStack::stable_iterator C,
+ llvm::Instruction *dominatingIP) {
+ assert(C != EHStack.stable_end() && "activating bottom of stack?");
+ EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.find(C));
+ assert(!Scope.isActive() && "double activation");
+
+ SetupCleanupBlockActivation(*this, C, ForActivation, dominatingIP);
+
+ Scope.setActive(true);
+}
+
+/// Deactive a cleanup that was created in an active state.
+void CodeGenFunction::DeactivateCleanupBlock(EHScopeStack::stable_iterator C,
+ llvm::Instruction *dominatingIP) {
+ assert(C != EHStack.stable_end() && "deactivating bottom of stack?");
+ EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.find(C));
+ assert(Scope.isActive() && "double deactivation");
+
+ // If it's the top of the stack, just pop it.
+ if (C == EHStack.stable_begin()) {
+ // If it's a normal cleanup, we need to pretend that the
+ // fallthrough is unreachable.
+ CGBuilderTy::InsertPoint SavedIP = Builder.saveAndClearIP();
+ PopCleanupBlock();
+ Builder.restoreIP(SavedIP);
+ return;
+ }
+
+ // Otherwise, follow the general case.
+ SetupCleanupBlockActivation(*this, C, ForDeactivation, dominatingIP);
+
+ Scope.setActive(false);
+}
+
+llvm::Value *CodeGenFunction::getNormalCleanupDestSlot() {
+ if (!NormalCleanupDest)
+ NormalCleanupDest =
+ CreateTempAlloca(Builder.getInt32Ty(), "cleanup.dest.slot");
+ return NormalCleanupDest;
+}
+
+/// Emits all the code to cause the given temporary to be cleaned up.
+void CodeGenFunction::EmitCXXTemporary(const CXXTemporary *Temporary,
+ QualType TempType,
+ llvm::Value *Ptr) {
+ pushDestroy(NormalAndEHCleanup, Ptr, TempType, destroyCXXObject,
+ /*useEHCleanup*/ true);
+}
diff --git a/clang/lib/CodeGen/CGCleanup.h b/clang/lib/CodeGen/CGCleanup.h
new file mode 100644
index 0000000..7726e44
--- /dev/null
+++ b/clang/lib/CodeGen/CGCleanup.h
@@ -0,0 +1,539 @@
+//===-- CGCleanup.h - Classes for cleanups IR generation --------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// These classes support the generation of LLVM IR for cleanups.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_CODEGEN_CGCLEANUP_H
+#define CLANG_CODEGEN_CGCLEANUP_H
+
+/// EHScopeStack is defined in CodeGenFunction.h, but its
+/// implementation is in this file and in CGCleanup.cpp.
+#include "CodeGenFunction.h"
+
+namespace llvm {
+ class Value;
+ class BasicBlock;
+}
+
+namespace clang {
+namespace CodeGen {
+
+/// A protected scope for zero-cost EH handling.
+class EHScope {
+ llvm::BasicBlock *CachedLandingPad;
+ llvm::BasicBlock *CachedEHDispatchBlock;
+
+ EHScopeStack::stable_iterator EnclosingEHScope;
+
+ class CommonBitFields {
+ friend class EHScope;
+ unsigned Kind : 2;
+ };
+ enum { NumCommonBits = 2 };
+
+protected:
+ class CatchBitFields {
+ friend class EHCatchScope;
+ unsigned : NumCommonBits;
+
+ unsigned NumHandlers : 32 - NumCommonBits;
+ };
+
+ class CleanupBitFields {
+ friend class EHCleanupScope;
+ unsigned : NumCommonBits;
+
+ /// Whether this cleanup needs to be run along normal edges.
+ unsigned IsNormalCleanup : 1;
+
+ /// Whether this cleanup needs to be run along exception edges.
+ unsigned IsEHCleanup : 1;
+
+ /// Whether this cleanup is currently active.
+ unsigned IsActive : 1;
+
+ /// Whether the normal cleanup should test the activation flag.
+ unsigned TestFlagInNormalCleanup : 1;
+
+ /// Whether the EH cleanup should test the activation flag.
+ unsigned TestFlagInEHCleanup : 1;
+
+ /// The amount of extra storage needed by the Cleanup.
+ /// Always a multiple of the scope-stack alignment.
+ unsigned CleanupSize : 12;
+
+ /// The number of fixups required by enclosing scopes (not including
+ /// this one). If this is the top cleanup scope, all the fixups
+ /// from this index onwards belong to this scope.
+ unsigned FixupDepth : 32 - 17 - NumCommonBits; // currently 13
+ };
+
+ class FilterBitFields {
+ friend class EHFilterScope;
+ unsigned : NumCommonBits;
+
+ unsigned NumFilters : 32 - NumCommonBits;
+ };
+
+ union {
+ CommonBitFields CommonBits;
+ CatchBitFields CatchBits;
+ CleanupBitFields CleanupBits;
+ FilterBitFields FilterBits;
+ };
+
+public:
+ enum Kind { Cleanup, Catch, Terminate, Filter };
+
+ EHScope(Kind kind, EHScopeStack::stable_iterator enclosingEHScope)
+ : CachedLandingPad(0), CachedEHDispatchBlock(0),
+ EnclosingEHScope(enclosingEHScope) {
+ CommonBits.Kind = kind;
+ }
+
+ Kind getKind() const { return static_cast<Kind>(CommonBits.Kind); }
+
+ llvm::BasicBlock *getCachedLandingPad() const {
+ return CachedLandingPad;
+ }
+
+ void setCachedLandingPad(llvm::BasicBlock *block) {
+ CachedLandingPad = block;
+ }
+
+ llvm::BasicBlock *getCachedEHDispatchBlock() const {
+ return CachedEHDispatchBlock;
+ }
+
+ void setCachedEHDispatchBlock(llvm::BasicBlock *block) {
+ CachedEHDispatchBlock = block;
+ }
+
+ bool hasEHBranches() const {
+ if (llvm::BasicBlock *block = getCachedEHDispatchBlock())
+ return !block->use_empty();
+ return false;
+ }
+
+ EHScopeStack::stable_iterator getEnclosingEHScope() const {
+ return EnclosingEHScope;
+ }
+};
+
+/// A scope which attempts to handle some, possibly all, types of
+/// exceptions.
+///
+/// Objective C @finally blocks are represented using a cleanup scope
+/// after the catch scope.
+class EHCatchScope : public EHScope {
+ // In effect, we have a flexible array member
+ // Handler Handlers[0];
+ // But that's only standard in C99, not C++, so we have to do
+ // annoying pointer arithmetic instead.
+
+public:
+ struct Handler {
+ /// A type info value, or null (C++ null, not an LLVM null pointer)
+ /// for a catch-all.
+ llvm::Value *Type;
+
+ /// The catch handler for this type.
+ llvm::BasicBlock *Block;
+
+ bool isCatchAll() const { return Type == 0; }
+ };
+
+private:
+ friend class EHScopeStack;
+
+ Handler *getHandlers() {
+ return reinterpret_cast<Handler*>(this+1);
+ }
+
+ const Handler *getHandlers() const {
+ return reinterpret_cast<const Handler*>(this+1);
+ }
+
+public:
+ static size_t getSizeForNumHandlers(unsigned N) {
+ return sizeof(EHCatchScope) + N * sizeof(Handler);
+ }
+
+ EHCatchScope(unsigned numHandlers,
+ EHScopeStack::stable_iterator enclosingEHScope)
+ : EHScope(Catch, enclosingEHScope) {
+ CatchBits.NumHandlers = numHandlers;
+ }
+
+ unsigned getNumHandlers() const {
+ return CatchBits.NumHandlers;
+ }
+
+ void setCatchAllHandler(unsigned I, llvm::BasicBlock *Block) {
+ setHandler(I, /*catchall*/ 0, Block);
+ }
+
+ void setHandler(unsigned I, llvm::Value *Type, llvm::BasicBlock *Block) {
+ assert(I < getNumHandlers());
+ getHandlers()[I].Type = Type;
+ getHandlers()[I].Block = Block;
+ }
+
+ const Handler &getHandler(unsigned I) const {
+ assert(I < getNumHandlers());
+ return getHandlers()[I];
+ }
+
+ typedef const Handler *iterator;
+ iterator begin() const { return getHandlers(); }
+ iterator end() const { return getHandlers() + getNumHandlers(); }
+
+ static bool classof(const EHScope *Scope) {
+ return Scope->getKind() == Catch;
+ }
+};
+
+/// A cleanup scope which generates the cleanup blocks lazily.
+class EHCleanupScope : public EHScope {
+ /// The nearest normal cleanup scope enclosing this one.
+ EHScopeStack::stable_iterator EnclosingNormal;
+
+ /// The nearest EH scope enclosing this one.
+ EHScopeStack::stable_iterator EnclosingEH;
+
+ /// The dual entry/exit block along the normal edge. This is lazily
+ /// created if needed before the cleanup is popped.
+ llvm::BasicBlock *NormalBlock;
+
+ /// An optional i1 variable indicating whether this cleanup has been
+ /// activated yet.
+ llvm::AllocaInst *ActiveFlag;
+
+ /// Extra information required for cleanups that have resolved
+ /// branches through them. This has to be allocated on the side
+ /// because everything on the cleanup stack has be trivially
+ /// movable.
+ struct ExtInfo {
+ /// The destinations of normal branch-afters and branch-throughs.
+ llvm::SmallPtrSet<llvm::BasicBlock*, 4> Branches;
+
+ /// Normal branch-afters.
+ SmallVector<std::pair<llvm::BasicBlock*,llvm::ConstantInt*>, 4>
+ BranchAfters;
+ };
+ mutable struct ExtInfo *ExtInfo;
+
+ struct ExtInfo &getExtInfo() {
+ if (!ExtInfo) ExtInfo = new struct ExtInfo();
+ return *ExtInfo;
+ }
+
+ const struct ExtInfo &getExtInfo() const {
+ if (!ExtInfo) ExtInfo = new struct ExtInfo();
+ return *ExtInfo;
+ }
+
+public:
+ /// Gets the size required for a lazy cleanup scope with the given
+ /// cleanup-data requirements.
+ static size_t getSizeForCleanupSize(size_t Size) {
+ return sizeof(EHCleanupScope) + Size;
+ }
+
+ size_t getAllocatedSize() const {
+ return sizeof(EHCleanupScope) + CleanupBits.CleanupSize;
+ }
+
+ EHCleanupScope(bool isNormal, bool isEH, bool isActive,
+ unsigned cleanupSize, unsigned fixupDepth,
+ EHScopeStack::stable_iterator enclosingNormal,
+ EHScopeStack::stable_iterator enclosingEH)
+ : EHScope(EHScope::Cleanup, enclosingEH), EnclosingNormal(enclosingNormal),
+ NormalBlock(0), ActiveFlag(0), ExtInfo(0) {
+ CleanupBits.IsNormalCleanup = isNormal;
+ CleanupBits.IsEHCleanup = isEH;
+ CleanupBits.IsActive = isActive;
+ CleanupBits.TestFlagInNormalCleanup = false;
+ CleanupBits.TestFlagInEHCleanup = false;
+ CleanupBits.CleanupSize = cleanupSize;
+ CleanupBits.FixupDepth = fixupDepth;
+
+ assert(CleanupBits.CleanupSize == cleanupSize && "cleanup size overflow");
+ }
+
+ ~EHCleanupScope() {
+ delete ExtInfo;
+ }
+
+ bool isNormalCleanup() const { return CleanupBits.IsNormalCleanup; }
+ llvm::BasicBlock *getNormalBlock() const { return NormalBlock; }
+ void setNormalBlock(llvm::BasicBlock *BB) { NormalBlock = BB; }
+
+ bool isEHCleanup() const { return CleanupBits.IsEHCleanup; }
+ llvm::BasicBlock *getEHBlock() const { return getCachedEHDispatchBlock(); }
+ void setEHBlock(llvm::BasicBlock *BB) { setCachedEHDispatchBlock(BB); }
+
+ bool isActive() const { return CleanupBits.IsActive; }
+ void setActive(bool A) { CleanupBits.IsActive = A; }
+
+ llvm::AllocaInst *getActiveFlag() const { return ActiveFlag; }
+ void setActiveFlag(llvm::AllocaInst *Var) { ActiveFlag = Var; }
+
+ void setTestFlagInNormalCleanup() {
+ CleanupBits.TestFlagInNormalCleanup = true;
+ }
+ bool shouldTestFlagInNormalCleanup() const {
+ return CleanupBits.TestFlagInNormalCleanup;
+ }
+
+ void setTestFlagInEHCleanup() {
+ CleanupBits.TestFlagInEHCleanup = true;
+ }
+ bool shouldTestFlagInEHCleanup() const {
+ return CleanupBits.TestFlagInEHCleanup;
+ }
+
+ unsigned getFixupDepth() const { return CleanupBits.FixupDepth; }
+ EHScopeStack::stable_iterator getEnclosingNormalCleanup() const {
+ return EnclosingNormal;
+ }
+
+ size_t getCleanupSize() const { return CleanupBits.CleanupSize; }
+ void *getCleanupBuffer() { return this + 1; }
+
+ EHScopeStack::Cleanup *getCleanup() {
+ return reinterpret_cast<EHScopeStack::Cleanup*>(getCleanupBuffer());
+ }
+
+ /// True if this cleanup scope has any branch-afters or branch-throughs.
+ bool hasBranches() const { return ExtInfo && !ExtInfo->Branches.empty(); }
+
+ /// Add a branch-after to this cleanup scope. A branch-after is a
+ /// branch from a point protected by this (normal) cleanup to a
+ /// point in the normal cleanup scope immediately containing it.
+ /// For example,
+ /// for (;;) { A a; break; }
+ /// contains a branch-after.
+ ///
+ /// Branch-afters each have their own destination out of the
+ /// cleanup, guaranteed distinct from anything else threaded through
+ /// it. Therefore branch-afters usually force a switch after the
+ /// cleanup.
+ void addBranchAfter(llvm::ConstantInt *Index,
+ llvm::BasicBlock *Block) {
+ struct ExtInfo &ExtInfo = getExtInfo();
+ if (ExtInfo.Branches.insert(Block))
+ ExtInfo.BranchAfters.push_back(std::make_pair(Block, Index));
+ }
+
+ /// Return the number of unique branch-afters on this scope.
+ unsigned getNumBranchAfters() const {
+ return ExtInfo ? ExtInfo->BranchAfters.size() : 0;
+ }
+
+ llvm::BasicBlock *getBranchAfterBlock(unsigned I) const {
+ assert(I < getNumBranchAfters());
+ return ExtInfo->BranchAfters[I].first;
+ }
+
+ llvm::ConstantInt *getBranchAfterIndex(unsigned I) const {
+ assert(I < getNumBranchAfters());
+ return ExtInfo->BranchAfters[I].second;
+ }
+
+ /// Add a branch-through to this cleanup scope. A branch-through is
+ /// a branch from a scope protected by this (normal) cleanup to an
+ /// enclosing scope other than the immediately-enclosing normal
+ /// cleanup scope.
+ ///
+ /// In the following example, the branch through B's scope is a
+ /// branch-through, while the branch through A's scope is a
+ /// branch-after:
+ /// for (;;) { A a; B b; break; }
+ ///
+ /// All branch-throughs have a common destination out of the
+ /// cleanup, one possibly shared with the fall-through. Therefore
+ /// branch-throughs usually don't force a switch after the cleanup.
+ ///
+ /// \return true if the branch-through was new to this scope
+ bool addBranchThrough(llvm::BasicBlock *Block) {
+ return getExtInfo().Branches.insert(Block);
+ }
+
+ /// Determines if this cleanup scope has any branch throughs.
+ bool hasBranchThroughs() const {
+ if (!ExtInfo) return false;
+ return (ExtInfo->BranchAfters.size() != ExtInfo->Branches.size());
+ }
+
+ static bool classof(const EHScope *Scope) {
+ return (Scope->getKind() == Cleanup);
+ }
+};
+
+/// An exceptions scope which filters exceptions thrown through it.
+/// Only exceptions matching the filter types will be permitted to be
+/// thrown.
+///
+/// This is used to implement C++ exception specifications.
+class EHFilterScope : public EHScope {
+ // Essentially ends in a flexible array member:
+ // llvm::Value *FilterTypes[0];
+
+ llvm::Value **getFilters() {
+ return reinterpret_cast<llvm::Value**>(this+1);
+ }
+
+ llvm::Value * const *getFilters() const {
+ return reinterpret_cast<llvm::Value* const *>(this+1);
+ }
+
+public:
+ EHFilterScope(unsigned numFilters)
+ : EHScope(Filter, EHScopeStack::stable_end()) {
+ FilterBits.NumFilters = numFilters;
+ }
+
+ static size_t getSizeForNumFilters(unsigned numFilters) {
+ return sizeof(EHFilterScope) + numFilters * sizeof(llvm::Value*);
+ }
+
+ unsigned getNumFilters() const { return FilterBits.NumFilters; }
+
+ void setFilter(unsigned i, llvm::Value *filterValue) {
+ assert(i < getNumFilters());
+ getFilters()[i] = filterValue;
+ }
+
+ llvm::Value *getFilter(unsigned i) const {
+ assert(i < getNumFilters());
+ return getFilters()[i];
+ }
+
+ static bool classof(const EHScope *scope) {
+ return scope->getKind() == Filter;
+ }
+};
+
+/// An exceptions scope which calls std::terminate if any exception
+/// reaches it.
+class EHTerminateScope : public EHScope {
+public:
+ EHTerminateScope(EHScopeStack::stable_iterator enclosingEHScope)
+ : EHScope(Terminate, enclosingEHScope) {}
+ static size_t getSize() { return sizeof(EHTerminateScope); }
+
+ static bool classof(const EHScope *scope) {
+ return scope->getKind() == Terminate;
+ }
+};
+
+/// A non-stable pointer into the scope stack.
+class EHScopeStack::iterator {
+ char *Ptr;
+
+ friend class EHScopeStack;
+ explicit iterator(char *Ptr) : Ptr(Ptr) {}
+
+public:
+ iterator() : Ptr(0) {}
+
+ EHScope *get() const {
+ return reinterpret_cast<EHScope*>(Ptr);
+ }
+
+ EHScope *operator->() const { return get(); }
+ EHScope &operator*() const { return *get(); }
+
+ iterator &operator++() {
+ switch (get()->getKind()) {
+ case EHScope::Catch:
+ Ptr += EHCatchScope::getSizeForNumHandlers(
+ static_cast<const EHCatchScope*>(get())->getNumHandlers());
+ break;
+
+ case EHScope::Filter:
+ Ptr += EHFilterScope::getSizeForNumFilters(
+ static_cast<const EHFilterScope*>(get())->getNumFilters());
+ break;
+
+ case EHScope::Cleanup:
+ Ptr += static_cast<const EHCleanupScope*>(get())
+ ->getAllocatedSize();
+ break;
+
+ case EHScope::Terminate:
+ Ptr += EHTerminateScope::getSize();
+ break;
+ }
+
+ return *this;
+ }
+
+ iterator next() {
+ iterator copy = *this;
+ ++copy;
+ return copy;
+ }
+
+ iterator operator++(int) {
+ iterator copy = *this;
+ operator++();
+ return copy;
+ }
+
+ bool encloses(iterator other) const { return Ptr >= other.Ptr; }
+ bool strictlyEncloses(iterator other) const { return Ptr > other.Ptr; }
+
+ bool operator==(iterator other) const { return Ptr == other.Ptr; }
+ bool operator!=(iterator other) const { return Ptr != other.Ptr; }
+};
+
+inline EHScopeStack::iterator EHScopeStack::begin() const {
+ return iterator(StartOfData);
+}
+
+inline EHScopeStack::iterator EHScopeStack::end() const {
+ return iterator(EndOfBuffer);
+}
+
+inline void EHScopeStack::popCatch() {
+ assert(!empty() && "popping exception stack when not empty");
+
+ EHCatchScope &scope = cast<EHCatchScope>(*begin());
+ InnermostEHScope = scope.getEnclosingEHScope();
+ StartOfData += EHCatchScope::getSizeForNumHandlers(scope.getNumHandlers());
+}
+
+inline void EHScopeStack::popTerminate() {
+ assert(!empty() && "popping exception stack when not empty");
+
+ EHTerminateScope &scope = cast<EHTerminateScope>(*begin());
+ InnermostEHScope = scope.getEnclosingEHScope();
+ StartOfData += EHTerminateScope::getSize();
+}
+
+inline EHScopeStack::iterator EHScopeStack::find(stable_iterator sp) const {
+ assert(sp.isValid() && "finding invalid savepoint");
+ assert(sp.Size <= stable_begin().Size && "finding savepoint after pop");
+ return iterator(EndOfBuffer - sp.Size);
+}
+
+inline EHScopeStack::stable_iterator
+EHScopeStack::stabilize(iterator ir) const {
+ assert(StartOfData <= ir.Ptr && ir.Ptr <= EndOfBuffer);
+ return stable_iterator(EndOfBuffer - ir.Ptr);
+}
+
+}
+}
+
+#endif
diff --git a/clang/lib/CodeGen/CGDebugInfo.cpp b/clang/lib/CodeGen/CGDebugInfo.cpp
new file mode 100644
index 0000000..d286d24
--- /dev/null
+++ b/clang/lib/CodeGen/CGDebugInfo.cpp
@@ -0,0 +1,2668 @@
+//===--- CGDebugInfo.cpp - Emit Debug Information for a Module ------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This coordinates the debug information generation while generating code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CGDebugInfo.h"
+#include "CodeGenFunction.h"
+#include "CodeGenModule.h"
+#include "CGBlocks.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclFriend.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/RecordLayout.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/Basic/Version.h"
+#include "clang/Frontend/CodeGenOptions.h"
+#include "llvm/Constants.h"
+#include "llvm/DerivedTypes.h"
+#include "llvm/Instructions.h"
+#include "llvm/Intrinsics.h"
+#include "llvm/Module.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/Dwarf.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Target/TargetData.h"
+using namespace clang;
+using namespace clang::CodeGen;
+
+CGDebugInfo::CGDebugInfo(CodeGenModule &CGM)
+ : CGM(CGM), DBuilder(CGM.getModule()),
+ BlockLiteralGenericSet(false) {
+ CreateCompileUnit();
+}
+
+CGDebugInfo::~CGDebugInfo() {
+ assert(LexicalBlockStack.empty() &&
+ "Region stack mismatch, stack not empty!");
+}
+
+void CGDebugInfo::setLocation(SourceLocation Loc) {
+ // If the new location isn't valid return.
+ if (!Loc.isValid()) return;
+
+ CurLoc = CGM.getContext().getSourceManager().getExpansionLoc(Loc);
+
+ // If we've changed files in the middle of a lexical scope go ahead
+ // and create a new lexical scope with file node if it's different
+ // from the one in the scope.
+ if (LexicalBlockStack.empty()) return;
+
+ SourceManager &SM = CGM.getContext().getSourceManager();
+ PresumedLoc PCLoc = SM.getPresumedLoc(CurLoc);
+ PresumedLoc PPLoc = SM.getPresumedLoc(PrevLoc);
+
+ if (PCLoc.isInvalid() || PPLoc.isInvalid() ||
+ !strcmp(PPLoc.getFilename(), PCLoc.getFilename()))
+ return;
+
+ llvm::MDNode *LB = LexicalBlockStack.back();
+ llvm::DIScope Scope = llvm::DIScope(LB);
+ if (Scope.isLexicalBlockFile()) {
+ llvm::DILexicalBlockFile LBF = llvm::DILexicalBlockFile(LB);
+ llvm::DIDescriptor D
+ = DBuilder.createLexicalBlockFile(LBF.getScope(),
+ getOrCreateFile(CurLoc));
+ llvm::MDNode *N = D;
+ LexicalBlockStack.pop_back();
+ LexicalBlockStack.push_back(N);
+ } else if (Scope.isLexicalBlock()) {
+ llvm::DIDescriptor D
+ = DBuilder.createLexicalBlockFile(Scope, getOrCreateFile(CurLoc));
+ llvm::MDNode *N = D;
+ LexicalBlockStack.pop_back();
+ LexicalBlockStack.push_back(N);
+ }
+}
+
+/// getContextDescriptor - Get context info for the decl.
+llvm::DIDescriptor CGDebugInfo::getContextDescriptor(const Decl *Context) {
+ if (!Context)
+ return TheCU;
+
+ llvm::DenseMap<const Decl *, llvm::WeakVH>::iterator
+ I = RegionMap.find(Context);
+ if (I != RegionMap.end())
+ return llvm::DIDescriptor(dyn_cast_or_null<llvm::MDNode>(&*I->second));
+
+ // Check namespace.
+ if (const NamespaceDecl *NSDecl = dyn_cast<NamespaceDecl>(Context))
+ return llvm::DIDescriptor(getOrCreateNameSpace(NSDecl));
+
+ if (const RecordDecl *RDecl = dyn_cast<RecordDecl>(Context)) {
+ if (!RDecl->isDependentType()) {
+ llvm::DIType Ty = getOrCreateType(CGM.getContext().getTypeDeclType(RDecl),
+ getOrCreateMainFile());
+ return llvm::DIDescriptor(Ty);
+ }
+ }
+ return TheCU;
+}
+
+/// getFunctionName - Get function name for the given FunctionDecl. If the
+/// name is constructred on demand (e.g. C++ destructor) then the name
+/// is stored on the side.
+StringRef CGDebugInfo::getFunctionName(const FunctionDecl *FD) {
+ assert (FD && "Invalid FunctionDecl!");
+ IdentifierInfo *FII = FD->getIdentifier();
+ FunctionTemplateSpecializationInfo *Info
+ = FD->getTemplateSpecializationInfo();
+ if (!Info && FII)
+ return FII->getName();
+
+ // Otherwise construct human readable name for debug info.
+ std::string NS = FD->getNameAsString();
+
+ // Add any template specialization args.
+ if (Info) {
+ const TemplateArgumentList *TArgs = Info->TemplateArguments;
+ const TemplateArgument *Args = TArgs->data();
+ unsigned NumArgs = TArgs->size();
+ PrintingPolicy Policy(CGM.getLangOpts());
+ NS += TemplateSpecializationType::PrintTemplateArgumentList(Args,
+ NumArgs,
+ Policy);
+ }
+
+ // Copy this name on the side and use its reference.
+ char *StrPtr = DebugInfoNames.Allocate<char>(NS.length());
+ memcpy(StrPtr, NS.data(), NS.length());
+ return StringRef(StrPtr, NS.length());
+}
+
+StringRef CGDebugInfo::getObjCMethodName(const ObjCMethodDecl *OMD) {
+ SmallString<256> MethodName;
+ llvm::raw_svector_ostream OS(MethodName);
+ OS << (OMD->isInstanceMethod() ? '-' : '+') << '[';
+ const DeclContext *DC = OMD->getDeclContext();
+ if (const ObjCImplementationDecl *OID =
+ dyn_cast<const ObjCImplementationDecl>(DC)) {
+ OS << OID->getName();
+ } else if (const ObjCInterfaceDecl *OID =
+ dyn_cast<const ObjCInterfaceDecl>(DC)) {
+ OS << OID->getName();
+ } else if (const ObjCCategoryImplDecl *OCD =
+ dyn_cast<const ObjCCategoryImplDecl>(DC)){
+ OS << ((NamedDecl *)OCD)->getIdentifier()->getNameStart() << '(' <<
+ OCD->getIdentifier()->getNameStart() << ')';
+ }
+ OS << ' ' << OMD->getSelector().getAsString() << ']';
+
+ char *StrPtr = DebugInfoNames.Allocate<char>(OS.tell());
+ memcpy(StrPtr, MethodName.begin(), OS.tell());
+ return StringRef(StrPtr, OS.tell());
+}
+
+/// getSelectorName - Return selector name. This is used for debugging
+/// info.
+StringRef CGDebugInfo::getSelectorName(Selector S) {
+ const std::string &SName = S.getAsString();
+ char *StrPtr = DebugInfoNames.Allocate<char>(SName.size());
+ memcpy(StrPtr, SName.data(), SName.size());
+ return StringRef(StrPtr, SName.size());
+}
+
+/// getClassName - Get class name including template argument list.
+StringRef
+CGDebugInfo::getClassName(const RecordDecl *RD) {
+ const ClassTemplateSpecializationDecl *Spec
+ = dyn_cast<ClassTemplateSpecializationDecl>(RD);
+ if (!Spec)
+ return RD->getName();
+
+ const TemplateArgument *Args;
+ unsigned NumArgs;
+ if (TypeSourceInfo *TAW = Spec->getTypeAsWritten()) {
+ const TemplateSpecializationType *TST =
+ cast<TemplateSpecializationType>(TAW->getType());
+ Args = TST->getArgs();
+ NumArgs = TST->getNumArgs();
+ } else {
+ const TemplateArgumentList &TemplateArgs = Spec->getTemplateArgs();
+ Args = TemplateArgs.data();
+ NumArgs = TemplateArgs.size();
+ }
+ StringRef Name = RD->getIdentifier()->getName();
+ PrintingPolicy Policy(CGM.getLangOpts());
+ std::string TemplateArgList =
+ TemplateSpecializationType::PrintTemplateArgumentList(Args, NumArgs, Policy);
+
+ // Copy this name on the side and use its reference.
+ size_t Length = Name.size() + TemplateArgList.size();
+ char *StrPtr = DebugInfoNames.Allocate<char>(Length);
+ memcpy(StrPtr, Name.data(), Name.size());
+ memcpy(StrPtr + Name.size(), TemplateArgList.data(), TemplateArgList.size());
+ return StringRef(StrPtr, Length);
+}
+
+/// getOrCreateFile - Get the file debug info descriptor for the input location.
+llvm::DIFile CGDebugInfo::getOrCreateFile(SourceLocation Loc) {
+ if (!Loc.isValid())
+ // If Location is not valid then use main input file.
+ return DBuilder.createFile(TheCU.getFilename(), TheCU.getDirectory());
+
+ SourceManager &SM = CGM.getContext().getSourceManager();
+ PresumedLoc PLoc = SM.getPresumedLoc(Loc);
+
+ if (PLoc.isInvalid() || StringRef(PLoc.getFilename()).empty())
+ // If the location is not valid then use main input file.
+ return DBuilder.createFile(TheCU.getFilename(), TheCU.getDirectory());
+
+ // Cache the results.
+ const char *fname = PLoc.getFilename();
+ llvm::DenseMap<const char *, llvm::WeakVH>::iterator it =
+ DIFileCache.find(fname);
+
+ if (it != DIFileCache.end()) {
+ // Verify that the information still exists.
+ if (&*it->second)
+ return llvm::DIFile(cast<llvm::MDNode>(it->second));
+ }
+
+ llvm::DIFile F = DBuilder.createFile(PLoc.getFilename(), getCurrentDirname());
+
+ DIFileCache[fname] = F;
+ return F;
+}
+
+/// getOrCreateMainFile - Get the file info for main compile unit.
+llvm::DIFile CGDebugInfo::getOrCreateMainFile() {
+ return DBuilder.createFile(TheCU.getFilename(), TheCU.getDirectory());
+}
+
+/// getLineNumber - Get line number for the location. If location is invalid
+/// then use current location.
+unsigned CGDebugInfo::getLineNumber(SourceLocation Loc) {
+ if (Loc.isInvalid() && CurLoc.isInvalid())
+ return 0;
+ SourceManager &SM = CGM.getContext().getSourceManager();
+ PresumedLoc PLoc = SM.getPresumedLoc(Loc.isValid() ? Loc : CurLoc);
+ return PLoc.isValid()? PLoc.getLine() : 0;
+}
+
+/// getColumnNumber - Get column number for the location. If location is
+/// invalid then use current location.
+unsigned CGDebugInfo::getColumnNumber(SourceLocation Loc) {
+ if (Loc.isInvalid() && CurLoc.isInvalid())
+ return 0;
+ SourceManager &SM = CGM.getContext().getSourceManager();
+ PresumedLoc PLoc = SM.getPresumedLoc(Loc.isValid() ? Loc : CurLoc);
+ return PLoc.isValid()? PLoc.getColumn() : 0;
+}
+
+StringRef CGDebugInfo::getCurrentDirname() {
+ if (!CGM.getCodeGenOpts().DebugCompilationDir.empty())
+ return CGM.getCodeGenOpts().DebugCompilationDir;
+
+ if (!CWDName.empty())
+ return CWDName;
+ SmallString<256> CWD;
+ llvm::sys::fs::current_path(CWD);
+ char *CompDirnamePtr = DebugInfoNames.Allocate<char>(CWD.size());
+ memcpy(CompDirnamePtr, CWD.data(), CWD.size());
+ return CWDName = StringRef(CompDirnamePtr, CWD.size());
+}
+
+/// CreateCompileUnit - Create new compile unit.
+void CGDebugInfo::CreateCompileUnit() {
+
+ // Get absolute path name.
+ SourceManager &SM = CGM.getContext().getSourceManager();
+ std::string MainFileName = CGM.getCodeGenOpts().MainFileName;
+ if (MainFileName.empty())
+ MainFileName = "<unknown>";
+
+ // The main file name provided via the "-main-file-name" option contains just
+ // the file name itself with no path information. This file name may have had
+ // a relative path, so we look into the actual file entry for the main
+ // file to determine the real absolute path for the file.
+ std::string MainFileDir;
+ if (const FileEntry *MainFile = SM.getFileEntryForID(SM.getMainFileID())) {
+ MainFileDir = MainFile->getDir()->getName();
+ if (MainFileDir != ".")
+ MainFileName = MainFileDir + "/" + MainFileName;
+ }
+
+ // Save filename string.
+ char *FilenamePtr = DebugInfoNames.Allocate<char>(MainFileName.length());
+ memcpy(FilenamePtr, MainFileName.c_str(), MainFileName.length());
+ StringRef Filename(FilenamePtr, MainFileName.length());
+
+ unsigned LangTag;
+ const LangOptions &LO = CGM.getLangOpts();
+ if (LO.CPlusPlus) {
+ if (LO.ObjC1)
+ LangTag = llvm::dwarf::DW_LANG_ObjC_plus_plus;
+ else
+ LangTag = llvm::dwarf::DW_LANG_C_plus_plus;
+ } else if (LO.ObjC1) {
+ LangTag = llvm::dwarf::DW_LANG_ObjC;
+ } else if (LO.C99) {
+ LangTag = llvm::dwarf::DW_LANG_C99;
+ } else {
+ LangTag = llvm::dwarf::DW_LANG_C89;
+ }
+
+ std::string Producer = getClangFullVersion();
+
+ // Figure out which version of the ObjC runtime we have.
+ unsigned RuntimeVers = 0;
+ if (LO.ObjC1)
+ RuntimeVers = LO.ObjCNonFragileABI ? 2 : 1;
+
+ // Create new compile unit.
+ DBuilder.createCompileUnit(
+ LangTag, Filename, getCurrentDirname(),
+ Producer,
+ LO.Optimize, CGM.getCodeGenOpts().DwarfDebugFlags, RuntimeVers);
+ // FIXME - Eliminate TheCU.
+ TheCU = llvm::DICompileUnit(DBuilder.getCU());
+}
+
+/// CreateType - Get the Basic type from the cache or create a new
+/// one if necessary.
+llvm::DIType CGDebugInfo::CreateType(const BuiltinType *BT) {
+ unsigned Encoding = 0;
+ const char *BTName = NULL;
+ switch (BT->getKind()) {
+#define BUILTIN_TYPE(Id, SingletonId)
+#define PLACEHOLDER_TYPE(Id, SingletonId) \
+ case BuiltinType::Id:
+#include "clang/AST/BuiltinTypes.def"
+ case BuiltinType::Dependent:
+ llvm_unreachable("Unexpected builtin type");
+ case BuiltinType::NullPtr:
+ return DBuilder.
+ createNullPtrType(BT->getName(CGM.getContext().getLangOpts()));
+ case BuiltinType::Void:
+ return llvm::DIType();
+ case BuiltinType::ObjCClass:
+ return DBuilder.createForwardDecl(llvm::dwarf::DW_TAG_structure_type,
+ "objc_class", getOrCreateMainFile(),
+ 0);
+ case BuiltinType::ObjCId: {
+ // typedef struct objc_class *Class;
+ // typedef struct objc_object {
+ // Class isa;
+ // } *id;
+
+ // TODO: Cache these two types to avoid duplicates.
+ llvm::DIType OCTy =
+ DBuilder.createForwardDecl(llvm::dwarf::DW_TAG_structure_type,
+ "objc_class", getOrCreateMainFile(),
+ 0);
+ unsigned Size = CGM.getContext().getTypeSize(CGM.getContext().VoidPtrTy);
+
+ llvm::DIType ISATy = DBuilder.createPointerType(OCTy, Size);
+
+ SmallVector<llvm::Value *, 16> EltTys;
+ llvm::DIType FieldTy =
+ DBuilder.createMemberType(getOrCreateMainFile(), "isa",
+ getOrCreateMainFile(), 0, Size,
+ 0, 0, 0, ISATy);
+ EltTys.push_back(FieldTy);
+ llvm::DIArray Elements = DBuilder.getOrCreateArray(EltTys);
+
+ return DBuilder.createStructType(TheCU, "objc_object",
+ getOrCreateMainFile(),
+ 0, 0, 0, 0, Elements);
+ }
+ case BuiltinType::ObjCSel: {
+ return
+ DBuilder.createForwardDecl(llvm::dwarf::DW_TAG_structure_type,
+ "objc_selector", getOrCreateMainFile(),
+ 0);
+ }
+ case BuiltinType::UChar:
+ case BuiltinType::Char_U: Encoding = llvm::dwarf::DW_ATE_unsigned_char; break;
+ case BuiltinType::Char_S:
+ case BuiltinType::SChar: Encoding = llvm::dwarf::DW_ATE_signed_char; break;
+ case BuiltinType::Char16:
+ case BuiltinType::Char32: Encoding = llvm::dwarf::DW_ATE_UTF; break;
+ case BuiltinType::UShort:
+ case BuiltinType::UInt:
+ case BuiltinType::UInt128:
+ case BuiltinType::ULong:
+ case BuiltinType::WChar_U:
+ case BuiltinType::ULongLong: Encoding = llvm::dwarf::DW_ATE_unsigned; break;
+ case BuiltinType::Short:
+ case BuiltinType::Int:
+ case BuiltinType::Int128:
+ case BuiltinType::Long:
+ case BuiltinType::WChar_S:
+ case BuiltinType::LongLong: Encoding = llvm::dwarf::DW_ATE_signed; break;
+ case BuiltinType::Bool: Encoding = llvm::dwarf::DW_ATE_boolean; break;
+ case BuiltinType::Half:
+ case BuiltinType::Float:
+ case BuiltinType::LongDouble:
+ case BuiltinType::Double: Encoding = llvm::dwarf::DW_ATE_float; break;
+ }
+
+ switch (BT->getKind()) {
+ case BuiltinType::Long: BTName = "long int"; break;
+ case BuiltinType::LongLong: BTName = "long long int"; break;
+ case BuiltinType::ULong: BTName = "long unsigned int"; break;
+ case BuiltinType::ULongLong: BTName = "long long unsigned int"; break;
+ default:
+ BTName = BT->getName(CGM.getContext().getLangOpts());
+ break;
+ }
+ // Bit size, align and offset of the type.
+ uint64_t Size = CGM.getContext().getTypeSize(BT);
+ uint64_t Align = CGM.getContext().getTypeAlign(BT);
+ llvm::DIType DbgTy =
+ DBuilder.createBasicType(BTName, Size, Align, Encoding);
+ return DbgTy;
+}
+
+llvm::DIType CGDebugInfo::CreateType(const ComplexType *Ty) {
+ // Bit size, align and offset of the type.
+ unsigned Encoding = llvm::dwarf::DW_ATE_complex_float;
+ if (Ty->isComplexIntegerType())
+ Encoding = llvm::dwarf::DW_ATE_lo_user;
+
+ uint64_t Size = CGM.getContext().getTypeSize(Ty);
+ uint64_t Align = CGM.getContext().getTypeAlign(Ty);
+ llvm::DIType DbgTy =
+ DBuilder.createBasicType("complex", Size, Align, Encoding);
+
+ return DbgTy;
+}
+
+/// CreateCVRType - Get the qualified type from the cache or create
+/// a new one if necessary.
+llvm::DIType CGDebugInfo::CreateQualifiedType(QualType Ty, llvm::DIFile Unit) {
+ QualifierCollector Qc;
+ const Type *T = Qc.strip(Ty);
+
+ // Ignore these qualifiers for now.
+ Qc.removeObjCGCAttr();
+ Qc.removeAddressSpace();
+ Qc.removeObjCLifetime();
+
+ // We will create one Derived type for one qualifier and recurse to handle any
+ // additional ones.
+ unsigned Tag;
+ if (Qc.hasConst()) {
+ Tag = llvm::dwarf::DW_TAG_const_type;
+ Qc.removeConst();
+ } else if (Qc.hasVolatile()) {
+ Tag = llvm::dwarf::DW_TAG_volatile_type;
+ Qc.removeVolatile();
+ } else if (Qc.hasRestrict()) {
+ Tag = llvm::dwarf::DW_TAG_restrict_type;
+ Qc.removeRestrict();
+ } else {
+ assert(Qc.empty() && "Unknown type qualifier for debug info");
+ return getOrCreateType(QualType(T, 0), Unit);
+ }
+
+ llvm::DIType FromTy = getOrCreateType(Qc.apply(CGM.getContext(), T), Unit);
+
+ // No need to fill in the Name, Line, Size, Alignment, Offset in case of
+ // CVR derived types.
+ llvm::DIType DbgTy = DBuilder.createQualifiedType(Tag, FromTy);
+
+ return DbgTy;
+}
+
+llvm::DIType CGDebugInfo::CreateType(const ObjCObjectPointerType *Ty,
+ llvm::DIFile Unit) {
+ llvm::DIType DbgTy =
+ CreatePointerLikeType(llvm::dwarf::DW_TAG_pointer_type, Ty,
+ Ty->getPointeeType(), Unit);
+ return DbgTy;
+}
+
+llvm::DIType CGDebugInfo::CreateType(const PointerType *Ty,
+ llvm::DIFile Unit) {
+ return CreatePointerLikeType(llvm::dwarf::DW_TAG_pointer_type, Ty,
+ Ty->getPointeeType(), Unit);
+}
+
+// Creates a forward declaration for a RecordDecl in the given context.
+llvm::DIType CGDebugInfo::createRecordFwdDecl(const RecordDecl *RD,
+ llvm::DIDescriptor Ctx) {
+ llvm::DIFile DefUnit = getOrCreateFile(RD->getLocation());
+ unsigned Line = getLineNumber(RD->getLocation());
+ StringRef RDName = RD->getName();
+
+ // Get the tag.
+ const CXXRecordDecl *CXXDecl = dyn_cast<CXXRecordDecl>(RD);
+ unsigned Tag = 0;
+ if (CXXDecl) {
+ RDName = getClassName(RD);
+ Tag = llvm::dwarf::DW_TAG_class_type;
+ }
+ else if (RD->isStruct())
+ Tag = llvm::dwarf::DW_TAG_structure_type;
+ else if (RD->isUnion())
+ Tag = llvm::dwarf::DW_TAG_union_type;
+ else
+ llvm_unreachable("Unknown RecordDecl type!");
+
+ // Create the type.
+ return DBuilder.createForwardDecl(Tag, RDName, DefUnit, Line);
+}
+
+// Walk up the context chain and create forward decls for record decls,
+// and normal descriptors for namespaces.
+llvm::DIDescriptor CGDebugInfo::createContextChain(const Decl *Context) {
+ if (!Context)
+ return TheCU;
+
+ // See if we already have the parent.
+ llvm::DenseMap<const Decl *, llvm::WeakVH>::iterator
+ I = RegionMap.find(Context);
+ if (I != RegionMap.end())
+ return llvm::DIDescriptor(dyn_cast_or_null<llvm::MDNode>(&*I->second));
+
+ // Check namespace.
+ if (const NamespaceDecl *NSDecl = dyn_cast<NamespaceDecl>(Context))
+ return llvm::DIDescriptor(getOrCreateNameSpace(NSDecl));
+
+ if (const RecordDecl *RD = dyn_cast<RecordDecl>(Context)) {
+ if (!RD->isDependentType()) {
+ llvm::DIType Ty = getOrCreateLimitedType(CGM.getContext().getTypeDeclType(RD),
+ getOrCreateMainFile());
+ return llvm::DIDescriptor(Ty);
+ }
+ }
+ return TheCU;
+}
+
+/// CreatePointeeType - Create Pointee type. If Pointee is a record
+/// then emit record's fwd if debug info size reduction is enabled.
+llvm::DIType CGDebugInfo::CreatePointeeType(QualType PointeeTy,
+ llvm::DIFile Unit) {
+ if (!CGM.getCodeGenOpts().LimitDebugInfo)
+ return getOrCreateType(PointeeTy, Unit);
+
+ // Limit debug info for the pointee type.
+
+ // If we have an existing type, use that, it's still smaller than creating
+ // a new type.
+ llvm::DIType Ty = getTypeOrNull(PointeeTy);
+ if (Ty.Verify()) return Ty;
+
+ // Handle qualifiers.
+ if (PointeeTy.hasLocalQualifiers())
+ return CreateQualifiedType(PointeeTy, Unit);
+
+ if (const RecordType *RTy = dyn_cast<RecordType>(PointeeTy)) {
+ RecordDecl *RD = RTy->getDecl();
+ llvm::DIDescriptor FDContext =
+ getContextDescriptor(cast<Decl>(RD->getDeclContext()));
+ llvm::DIType RetTy = createRecordFwdDecl(RD, FDContext);
+ TypeCache[QualType(RTy, 0).getAsOpaquePtr()] = RetTy;
+ return RetTy;
+ }
+ return getOrCreateType(PointeeTy, Unit);
+
+}
+
+llvm::DIType CGDebugInfo::CreatePointerLikeType(unsigned Tag,
+ const Type *Ty,
+ QualType PointeeTy,
+ llvm::DIFile Unit) {
+ if (Tag == llvm::dwarf::DW_TAG_reference_type)
+ return DBuilder.createReferenceType(CreatePointeeType(PointeeTy, Unit));
+
+ // Bit size, align and offset of the type.
+ // Size is always the size of a pointer. We can't use getTypeSize here
+ // because that does not return the correct value for references.
+ unsigned AS = CGM.getContext().getTargetAddressSpace(PointeeTy);
+ uint64_t Size = CGM.getContext().getTargetInfo().getPointerWidth(AS);
+ uint64_t Align = CGM.getContext().getTypeAlign(Ty);
+
+ return DBuilder.createPointerType(CreatePointeeType(PointeeTy, Unit),
+ Size, Align);
+}
+
+llvm::DIType CGDebugInfo::CreateType(const BlockPointerType *Ty,
+ llvm::DIFile Unit) {
+ if (BlockLiteralGenericSet)
+ return BlockLiteralGeneric;
+
+ SmallVector<llvm::Value *, 8> EltTys;
+ llvm::DIType FieldTy;
+ QualType FType;
+ uint64_t FieldSize, FieldOffset;
+ unsigned FieldAlign;
+ llvm::DIArray Elements;
+ llvm::DIType EltTy, DescTy;
+
+ FieldOffset = 0;
+ FType = CGM.getContext().UnsignedLongTy;
+ EltTys.push_back(CreateMemberType(Unit, FType, "reserved", &FieldOffset));
+ EltTys.push_back(CreateMemberType(Unit, FType, "Size", &FieldOffset));
+
+ Elements = DBuilder.getOrCreateArray(EltTys);
+ EltTys.clear();
+
+ unsigned Flags = llvm::DIDescriptor::FlagAppleBlock;
+ unsigned LineNo = getLineNumber(CurLoc);
+
+ EltTy = DBuilder.createStructType(Unit, "__block_descriptor",
+ Unit, LineNo, FieldOffset, 0,
+ Flags, Elements);
+
+ // Bit size, align and offset of the type.
+ uint64_t Size = CGM.getContext().getTypeSize(Ty);
+
+ DescTy = DBuilder.createPointerType(EltTy, Size);
+
+ FieldOffset = 0;
+ FType = CGM.getContext().getPointerType(CGM.getContext().VoidTy);
+ EltTys.push_back(CreateMemberType(Unit, FType, "__isa", &FieldOffset));
+ FType = CGM.getContext().IntTy;
+ EltTys.push_back(CreateMemberType(Unit, FType, "__flags", &FieldOffset));
+ EltTys.push_back(CreateMemberType(Unit, FType, "__reserved", &FieldOffset));
+ FType = CGM.getContext().getPointerType(CGM.getContext().VoidTy);
+ EltTys.push_back(CreateMemberType(Unit, FType, "__FuncPtr", &FieldOffset));
+
+ FType = CGM.getContext().getPointerType(CGM.getContext().VoidTy);
+ FieldTy = DescTy;
+ FieldSize = CGM.getContext().getTypeSize(Ty);
+ FieldAlign = CGM.getContext().getTypeAlign(Ty);
+ FieldTy = DBuilder.createMemberType(Unit, "__descriptor", Unit,
+ LineNo, FieldSize, FieldAlign,
+ FieldOffset, 0, FieldTy);
+ EltTys.push_back(FieldTy);
+
+ FieldOffset += FieldSize;
+ Elements = DBuilder.getOrCreateArray(EltTys);
+
+ EltTy = DBuilder.createStructType(Unit, "__block_literal_generic",
+ Unit, LineNo, FieldOffset, 0,
+ Flags, Elements);
+
+ BlockLiteralGenericSet = true;
+ BlockLiteralGeneric = DBuilder.createPointerType(EltTy, Size);
+ return BlockLiteralGeneric;
+}
+
+llvm::DIType CGDebugInfo::CreateType(const TypedefType *Ty, llvm::DIFile Unit) {
+ // Typedefs are derived from some other type. If we have a typedef of a
+ // typedef, make sure to emit the whole chain.
+ llvm::DIType Src = getOrCreateType(Ty->getDecl()->getUnderlyingType(), Unit);
+ if (!Src.Verify())
+ return llvm::DIType();
+ // We don't set size information, but do specify where the typedef was
+ // declared.
+ unsigned Line = getLineNumber(Ty->getDecl()->getLocation());
+ const TypedefNameDecl *TyDecl = Ty->getDecl();
+
+ llvm::DIDescriptor TypedefContext =
+ getContextDescriptor(cast<Decl>(Ty->getDecl()->getDeclContext()));
+
+ return
+ DBuilder.createTypedef(Src, TyDecl->getName(), Unit, Line, TypedefContext);
+}
+
+llvm::DIType CGDebugInfo::CreateType(const FunctionType *Ty,
+ llvm::DIFile Unit) {
+ SmallVector<llvm::Value *, 16> EltTys;
+
+ // Add the result type at least.
+ EltTys.push_back(getOrCreateType(Ty->getResultType(), Unit));
+
+ // Set up remainder of arguments if there is a prototype.
+ // FIXME: IF NOT, HOW IS THIS REPRESENTED? llvm-gcc doesn't represent '...'!
+ if (isa<FunctionNoProtoType>(Ty))
+ EltTys.push_back(DBuilder.createUnspecifiedParameter());
+ else if (const FunctionProtoType *FTP = dyn_cast<FunctionProtoType>(Ty)) {
+ for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i)
+ EltTys.push_back(getOrCreateType(FTP->getArgType(i), Unit));
+ }
+
+ llvm::DIArray EltTypeArray = DBuilder.getOrCreateArray(EltTys);
+
+ llvm::DIType DbgTy = DBuilder.createSubroutineType(Unit, EltTypeArray);
+ return DbgTy;
+}
+
+
+void CGDebugInfo::
+CollectRecordStaticVars(const RecordDecl *RD, llvm::DIType FwdDecl) {
+
+ for (RecordDecl::decl_iterator I = RD->decls_begin(), E = RD->decls_end();
+ I != E; ++I)
+ if (const VarDecl *V = dyn_cast<VarDecl>(*I)) {
+ if (V->getInit()) {
+ const APValue *Value = V->evaluateValue();
+ if (Value && Value->isInt()) {
+ llvm::ConstantInt *CI
+ = llvm::ConstantInt::get(CGM.getLLVMContext(), Value->getInt());
+
+ // Create the descriptor for static variable.
+ llvm::DIFile VUnit = getOrCreateFile(V->getLocation());
+ StringRef VName = V->getName();
+ llvm::DIType VTy = getOrCreateType(V->getType(), VUnit);
+ // Do not use DIGlobalVariable for enums.
+ if (VTy.getTag() != llvm::dwarf::DW_TAG_enumeration_type) {
+ DBuilder.createStaticVariable(FwdDecl, VName, VName, VUnit,
+ getLineNumber(V->getLocation()),
+ VTy, true, CI);
+ }
+ }
+ }
+ }
+}
+
+llvm::DIType CGDebugInfo::createFieldType(StringRef name,
+ QualType type,
+ uint64_t sizeInBitsOverride,
+ SourceLocation loc,
+ AccessSpecifier AS,
+ uint64_t offsetInBits,
+ llvm::DIFile tunit,
+ llvm::DIDescriptor scope) {
+ llvm::DIType debugType = getOrCreateType(type, tunit);
+
+ // Get the location for the field.
+ llvm::DIFile file = getOrCreateFile(loc);
+ unsigned line = getLineNumber(loc);
+
+ uint64_t sizeInBits = 0;
+ unsigned alignInBits = 0;
+ if (!type->isIncompleteArrayType()) {
+ llvm::tie(sizeInBits, alignInBits) = CGM.getContext().getTypeInfo(type);
+
+ if (sizeInBitsOverride)
+ sizeInBits = sizeInBitsOverride;
+ }
+
+ unsigned flags = 0;
+ if (AS == clang::AS_private)
+ flags |= llvm::DIDescriptor::FlagPrivate;
+ else if (AS == clang::AS_protected)
+ flags |= llvm::DIDescriptor::FlagProtected;
+
+ return DBuilder.createMemberType(scope, name, file, line, sizeInBits,
+ alignInBits, offsetInBits, flags, debugType);
+}
+
+/// CollectRecordFields - A helper function to collect debug info for
+/// record fields. This is used while creating debug info entry for a Record.
+void CGDebugInfo::
+CollectRecordFields(const RecordDecl *record, llvm::DIFile tunit,
+ SmallVectorImpl<llvm::Value *> &elements,
+ llvm::DIType RecordTy) {
+ unsigned fieldNo = 0;
+ const ASTRecordLayout &layout = CGM.getContext().getASTRecordLayout(record);
+ const CXXRecordDecl *CXXDecl = dyn_cast<CXXRecordDecl>(record);
+
+ // For C++11 Lambdas a Fields will be the same as a Capture, but the Capture
+ // has the name and the location of the variable so we should iterate over
+ // both concurrently.
+ if (CXXDecl && CXXDecl->isLambda()) {
+ RecordDecl::field_iterator Field = CXXDecl->field_begin();
+ unsigned fieldno = 0;
+ for (CXXRecordDecl::capture_const_iterator I = CXXDecl->captures_begin(),
+ E = CXXDecl->captures_end(); I != E; ++I, ++Field, ++fieldno) {
+ const LambdaExpr::Capture C = *I;
+ // TODO: Need to handle 'this' in some way by probably renaming the
+ // this of the lambda class and having a field member of 'this'.
+ if (C.capturesVariable()) {
+ VarDecl *V = C.getCapturedVar();
+ llvm::DIFile VUnit = getOrCreateFile(C.getLocation());
+ StringRef VName = V->getName();
+ uint64_t SizeInBitsOverride = 0;
+ if (Field->isBitField()) {
+ SizeInBitsOverride = Field->getBitWidthValue(CGM.getContext());
+ assert(SizeInBitsOverride && "found named 0-width bitfield");
+ }
+ llvm::DIType fieldType
+ = createFieldType(VName, Field->getType(), SizeInBitsOverride, C.getLocation(),
+ Field->getAccess(), layout.getFieldOffset(fieldno),
+ VUnit, RecordTy);
+ elements.push_back(fieldType);
+ }
+ }
+ } else {
+ bool IsMsStruct = record->hasAttr<MsStructAttr>();
+ const FieldDecl *LastFD = 0;
+ for (RecordDecl::field_iterator I = record->field_begin(),
+ E = record->field_end();
+ I != E; ++I, ++fieldNo) {
+ FieldDecl *field = *I;
+
+ if (IsMsStruct) {
+ // Zero-length bitfields following non-bitfield members are ignored
+ if (CGM.getContext().ZeroBitfieldFollowsNonBitfield((field), LastFD)) {
+ --fieldNo;
+ continue;
+ }
+ LastFD = field;
+ }
+
+ StringRef name = field->getName();
+ QualType type = field->getType();
+
+ // Ignore unnamed fields unless they're anonymous structs/unions.
+ if (name.empty() && !type->isRecordType()) {
+ LastFD = field;
+ continue;
+ }
+
+ uint64_t SizeInBitsOverride = 0;
+ if (field->isBitField()) {
+ SizeInBitsOverride = field->getBitWidthValue(CGM.getContext());
+ assert(SizeInBitsOverride && "found named 0-width bitfield");
+ }
+
+ llvm::DIType fieldType
+ = createFieldType(name, type, SizeInBitsOverride,
+ field->getLocation(), field->getAccess(),
+ layout.getFieldOffset(fieldNo), tunit, RecordTy);
+
+ elements.push_back(fieldType);
+ }
+ }
+}
+
+/// getOrCreateMethodType - CXXMethodDecl's type is a FunctionType. This
+/// function type is not updated to include implicit "this" pointer. Use this
+/// routine to get a method type which includes "this" pointer.
+llvm::DIType
+CGDebugInfo::getOrCreateMethodType(const CXXMethodDecl *Method,
+ llvm::DIFile Unit) {
+ llvm::DIType FnTy
+ = getOrCreateType(QualType(Method->getType()->getAs<FunctionProtoType>(),
+ 0),
+ Unit);
+
+ // Add "this" pointer.
+ llvm::DIArray Args = llvm::DICompositeType(FnTy).getTypeArray();
+ assert (Args.getNumElements() && "Invalid number of arguments!");
+
+ SmallVector<llvm::Value *, 16> Elts;
+
+ // First element is always return type. For 'void' functions it is NULL.
+ Elts.push_back(Args.getElement(0));
+
+ if (!Method->isStatic()) {
+ // "this" pointer is always first argument.
+ QualType ThisPtr = Method->getThisType(CGM.getContext());
+
+ const CXXRecordDecl *RD = Method->getParent();
+ if (isa<ClassTemplateSpecializationDecl>(RD)) {
+ // Create pointer type directly in this case.
+ const PointerType *ThisPtrTy = cast<PointerType>(ThisPtr);
+ QualType PointeeTy = ThisPtrTy->getPointeeType();
+ unsigned AS = CGM.getContext().getTargetAddressSpace(PointeeTy);
+ uint64_t Size = CGM.getContext().getTargetInfo().getPointerWidth(AS);
+ uint64_t Align = CGM.getContext().getTypeAlign(ThisPtrTy);
+ llvm::DIType PointeeType = getOrCreateType(PointeeTy, Unit);
+ llvm::DIType ThisPtrType = DBuilder.createPointerType(PointeeType, Size, Align);
+ TypeCache[ThisPtr.getAsOpaquePtr()] = ThisPtrType;
+ // TODO: This and the artificial type below are misleading, the
+ // types aren't artificial the argument is, but the current
+ // metadata doesn't represent that.
+ ThisPtrType = DBuilder.createArtificialType(ThisPtrType);
+ Elts.push_back(ThisPtrType);
+ } else {
+ llvm::DIType ThisPtrType = getOrCreateType(ThisPtr, Unit);
+ TypeCache[ThisPtr.getAsOpaquePtr()] = ThisPtrType;
+ ThisPtrType = DBuilder.createArtificialType(ThisPtrType);
+ Elts.push_back(ThisPtrType);
+ }
+ }
+
+ // Copy rest of the arguments.
+ for (unsigned i = 1, e = Args.getNumElements(); i != e; ++i)
+ Elts.push_back(Args.getElement(i));
+
+ llvm::DIArray EltTypeArray = DBuilder.getOrCreateArray(Elts);
+
+ return DBuilder.createSubroutineType(Unit, EltTypeArray);
+}
+
+/// isFunctionLocalClass - Return true if CXXRecordDecl is defined
+/// inside a function.
+static bool isFunctionLocalClass(const CXXRecordDecl *RD) {
+ if (const CXXRecordDecl *NRD = dyn_cast<CXXRecordDecl>(RD->getDeclContext()))
+ return isFunctionLocalClass(NRD);
+ if (isa<FunctionDecl>(RD->getDeclContext()))
+ return true;
+ return false;
+}
+
+/// CreateCXXMemberFunction - A helper function to create a DISubprogram for
+/// a single member function GlobalDecl.
+llvm::DISubprogram
+CGDebugInfo::CreateCXXMemberFunction(const CXXMethodDecl *Method,
+ llvm::DIFile Unit,
+ llvm::DIType RecordTy) {
+ bool IsCtorOrDtor =
+ isa<CXXConstructorDecl>(Method) || isa<CXXDestructorDecl>(Method);
+
+ StringRef MethodName = getFunctionName(Method);
+ llvm::DIType MethodTy = getOrCreateMethodType(Method, Unit);
+
+ // Since a single ctor/dtor corresponds to multiple functions, it doesn't
+ // make sense to give a single ctor/dtor a linkage name.
+ StringRef MethodLinkageName;
+ if (!IsCtorOrDtor && !isFunctionLocalClass(Method->getParent()))
+ MethodLinkageName = CGM.getMangledName(Method);
+
+ // Get the location for the method.
+ llvm::DIFile MethodDefUnit = getOrCreateFile(Method->getLocation());
+ unsigned MethodLine = getLineNumber(Method->getLocation());
+
+ // Collect virtual method info.
+ llvm::DIType ContainingType;
+ unsigned Virtuality = 0;
+ unsigned VIndex = 0;
+
+ if (Method->isVirtual()) {
+ if (Method->isPure())
+ Virtuality = llvm::dwarf::DW_VIRTUALITY_pure_virtual;
+ else
+ Virtuality = llvm::dwarf::DW_VIRTUALITY_virtual;
+
+ // It doesn't make sense to give a virtual destructor a vtable index,
+ // since a single destructor has two entries in the vtable.
+ if (!isa<CXXDestructorDecl>(Method))
+ VIndex = CGM.getVTableContext().getMethodVTableIndex(Method);
+ ContainingType = RecordTy;
+ }
+
+ unsigned Flags = 0;
+ if (Method->isImplicit())
+ Flags |= llvm::DIDescriptor::FlagArtificial;
+ AccessSpecifier Access = Method->getAccess();
+ if (Access == clang::AS_private)
+ Flags |= llvm::DIDescriptor::FlagPrivate;
+ else if (Access == clang::AS_protected)
+ Flags |= llvm::DIDescriptor::FlagProtected;
+ if (const CXXConstructorDecl *CXXC = dyn_cast<CXXConstructorDecl>(Method)) {
+ if (CXXC->isExplicit())
+ Flags |= llvm::DIDescriptor::FlagExplicit;
+ } else if (const CXXConversionDecl *CXXC =
+ dyn_cast<CXXConversionDecl>(Method)) {
+ if (CXXC->isExplicit())
+ Flags |= llvm::DIDescriptor::FlagExplicit;
+ }
+ if (Method->hasPrototype())
+ Flags |= llvm::DIDescriptor::FlagPrototyped;
+
+ llvm::DIArray TParamsArray = CollectFunctionTemplateParams(Method, Unit);
+ llvm::DISubprogram SP =
+ DBuilder.createMethod(RecordTy, MethodName, MethodLinkageName,
+ MethodDefUnit, MethodLine,
+ MethodTy, /*isLocalToUnit=*/false,
+ /* isDefinition=*/ false,
+ Virtuality, VIndex, ContainingType,
+ Flags, CGM.getLangOpts().Optimize, NULL,
+ TParamsArray);
+
+ SPCache[Method->getCanonicalDecl()] = llvm::WeakVH(SP);
+
+ return SP;
+}
+
+/// CollectCXXMemberFunctions - A helper function to collect debug info for
+/// C++ member functions. This is used while creating debug info entry for
+/// a Record.
+void CGDebugInfo::
+CollectCXXMemberFunctions(const CXXRecordDecl *RD, llvm::DIFile Unit,
+ SmallVectorImpl<llvm::Value *> &EltTys,
+ llvm::DIType RecordTy) {
+
+ // Since we want more than just the individual member decls if we
+ // have templated functions iterate over every declaration to gather
+ // the functions.
+ for(DeclContext::decl_iterator I = RD->decls_begin(),
+ E = RD->decls_end(); I != E; ++I) {
+ Decl *D = *I;
+ if (D->isImplicit() && !D->isUsed())
+ continue;
+
+ if (const CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(D))
+ EltTys.push_back(CreateCXXMemberFunction(Method, Unit, RecordTy));
+ else if (FunctionTemplateDecl *FTD = dyn_cast<FunctionTemplateDecl>(D))
+ for (FunctionTemplateDecl::spec_iterator SI = FTD->spec_begin(),
+ SE = FTD->spec_end(); SI != SE; ++SI) {
+ FunctionDecl *FD = *SI;
+ if (CXXMethodDecl *M = dyn_cast<CXXMethodDecl>(FD))
+ EltTys.push_back(CreateCXXMemberFunction(M, Unit, RecordTy));
+ }
+ }
+}
+
+/// CollectCXXFriends - A helper function to collect debug info for
+/// C++ base classes. This is used while creating debug info entry for
+/// a Record.
+void CGDebugInfo::
+CollectCXXFriends(const CXXRecordDecl *RD, llvm::DIFile Unit,
+ SmallVectorImpl<llvm::Value *> &EltTys,
+ llvm::DIType RecordTy) {
+ for (CXXRecordDecl::friend_iterator BI = RD->friend_begin(),
+ BE = RD->friend_end(); BI != BE; ++BI) {
+ if ((*BI)->isUnsupportedFriend())
+ continue;
+ if (TypeSourceInfo *TInfo = (*BI)->getFriendType())
+ EltTys.push_back(DBuilder.createFriend(RecordTy,
+ getOrCreateType(TInfo->getType(),
+ Unit)));
+ }
+}
+
+/// CollectCXXBases - A helper function to collect debug info for
+/// C++ base classes. This is used while creating debug info entry for
+/// a Record.
+void CGDebugInfo::
+CollectCXXBases(const CXXRecordDecl *RD, llvm::DIFile Unit,
+ SmallVectorImpl<llvm::Value *> &EltTys,
+ llvm::DIType RecordTy) {
+
+ const ASTRecordLayout &RL = CGM.getContext().getASTRecordLayout(RD);
+ for (CXXRecordDecl::base_class_const_iterator BI = RD->bases_begin(),
+ BE = RD->bases_end(); BI != BE; ++BI) {
+ unsigned BFlags = 0;
+ uint64_t BaseOffset;
+
+ const CXXRecordDecl *Base =
+ cast<CXXRecordDecl>(BI->getType()->getAs<RecordType>()->getDecl());
+
+ if (BI->isVirtual()) {
+ // virtual base offset offset is -ve. The code generator emits dwarf
+ // expression where it expects +ve number.
+ BaseOffset =
+ 0 - CGM.getVTableContext()
+ .getVirtualBaseOffsetOffset(RD, Base).getQuantity();
+ BFlags = llvm::DIDescriptor::FlagVirtual;
+ } else
+ BaseOffset = RL.getBaseClassOffsetInBits(Base);
+ // FIXME: Inconsistent units for BaseOffset. It is in bytes when
+ // BI->isVirtual() and bits when not.
+
+ AccessSpecifier Access = BI->getAccessSpecifier();
+ if (Access == clang::AS_private)
+ BFlags |= llvm::DIDescriptor::FlagPrivate;
+ else if (Access == clang::AS_protected)
+ BFlags |= llvm::DIDescriptor::FlagProtected;
+
+ llvm::DIType DTy =
+ DBuilder.createInheritance(RecordTy,
+ getOrCreateType(BI->getType(), Unit),
+ BaseOffset, BFlags);
+ EltTys.push_back(DTy);
+ }
+}
+
+/// CollectTemplateParams - A helper function to collect template parameters.
+llvm::DIArray CGDebugInfo::
+CollectTemplateParams(const TemplateParameterList *TPList,
+ const TemplateArgumentList &TAList,
+ llvm::DIFile Unit) {
+ SmallVector<llvm::Value *, 16> TemplateParams;
+ for (unsigned i = 0, e = TAList.size(); i != e; ++i) {
+ const TemplateArgument &TA = TAList[i];
+ const NamedDecl *ND = TPList->getParam(i);
+ if (TA.getKind() == TemplateArgument::Type) {
+ llvm::DIType TTy = getOrCreateType(TA.getAsType(), Unit);
+ llvm::DITemplateTypeParameter TTP =
+ DBuilder.createTemplateTypeParameter(TheCU, ND->getName(), TTy);
+ TemplateParams.push_back(TTP);
+ } else if (TA.getKind() == TemplateArgument::Integral) {
+ llvm::DIType TTy = getOrCreateType(TA.getIntegralType(), Unit);
+ llvm::DITemplateValueParameter TVP =
+ DBuilder.createTemplateValueParameter(TheCU, ND->getName(), TTy,
+ TA.getAsIntegral()->getZExtValue());
+ TemplateParams.push_back(TVP);
+ }
+ }
+ return DBuilder.getOrCreateArray(TemplateParams);
+}
+
+/// CollectFunctionTemplateParams - A helper function to collect debug
+/// info for function template parameters.
+llvm::DIArray CGDebugInfo::
+CollectFunctionTemplateParams(const FunctionDecl *FD, llvm::DIFile Unit) {
+ if (FD->getTemplatedKind() ==
+ FunctionDecl::TK_FunctionTemplateSpecialization) {
+ const TemplateParameterList *TList =
+ FD->getTemplateSpecializationInfo()->getTemplate()
+ ->getTemplateParameters();
+ return
+ CollectTemplateParams(TList, *FD->getTemplateSpecializationArgs(), Unit);
+ }
+ return llvm::DIArray();
+}
+
+/// CollectCXXTemplateParams - A helper function to collect debug info for
+/// template parameters.
+llvm::DIArray CGDebugInfo::
+CollectCXXTemplateParams(const ClassTemplateSpecializationDecl *TSpecial,
+ llvm::DIFile Unit) {
+ llvm::PointerUnion<ClassTemplateDecl *,
+ ClassTemplatePartialSpecializationDecl *>
+ PU = TSpecial->getSpecializedTemplateOrPartial();
+
+ TemplateParameterList *TPList = PU.is<ClassTemplateDecl *>() ?
+ PU.get<ClassTemplateDecl *>()->getTemplateParameters() :
+ PU.get<ClassTemplatePartialSpecializationDecl *>()->getTemplateParameters();
+ const TemplateArgumentList &TAList = TSpecial->getTemplateInstantiationArgs();
+ return CollectTemplateParams(TPList, TAList, Unit);
+}
+
+/// getOrCreateVTablePtrType - Return debug info descriptor for vtable.
+llvm::DIType CGDebugInfo::getOrCreateVTablePtrType(llvm::DIFile Unit) {
+ if (VTablePtrType.isValid())
+ return VTablePtrType;
+
+ ASTContext &Context = CGM.getContext();
+
+ /* Function type */
+ llvm::Value *STy = getOrCreateType(Context.IntTy, Unit);
+ llvm::DIArray SElements = DBuilder.getOrCreateArray(STy);
+ llvm::DIType SubTy = DBuilder.createSubroutineType(Unit, SElements);
+ unsigned Size = Context.getTypeSize(Context.VoidPtrTy);
+ llvm::DIType vtbl_ptr_type = DBuilder.createPointerType(SubTy, Size, 0,
+ "__vtbl_ptr_type");
+ VTablePtrType = DBuilder.createPointerType(vtbl_ptr_type, Size);
+ return VTablePtrType;
+}
+
+/// getVTableName - Get vtable name for the given Class.
+StringRef CGDebugInfo::getVTableName(const CXXRecordDecl *RD) {
+ // Construct gdb compatible name name.
+ std::string Name = "_vptr$" + RD->getNameAsString();
+
+ // Copy this name on the side and use its reference.
+ char *StrPtr = DebugInfoNames.Allocate<char>(Name.length());
+ memcpy(StrPtr, Name.data(), Name.length());
+ return StringRef(StrPtr, Name.length());
+}
+
+
+/// CollectVTableInfo - If the C++ class has vtable info then insert appropriate
+/// debug info entry in EltTys vector.
+void CGDebugInfo::
+CollectVTableInfo(const CXXRecordDecl *RD, llvm::DIFile Unit,
+ SmallVectorImpl<llvm::Value *> &EltTys) {
+ const ASTRecordLayout &RL = CGM.getContext().getASTRecordLayout(RD);
+
+ // If there is a primary base then it will hold vtable info.
+ if (RL.getPrimaryBase())
+ return;
+
+ // If this class is not dynamic then there is not any vtable info to collect.
+ if (!RD->isDynamicClass())
+ return;
+
+ unsigned Size = CGM.getContext().getTypeSize(CGM.getContext().VoidPtrTy);
+ llvm::DIType VPTR
+ = DBuilder.createMemberType(Unit, getVTableName(RD), Unit,
+ 0, Size, 0, 0, 0,
+ getOrCreateVTablePtrType(Unit));
+ EltTys.push_back(VPTR);
+}
+
+/// getOrCreateRecordType - Emit record type's standalone debug info.
+llvm::DIType CGDebugInfo::getOrCreateRecordType(QualType RTy,
+ SourceLocation Loc) {
+ llvm::DIType T = getOrCreateType(RTy, getOrCreateFile(Loc));
+ return T;
+}
+
+/// getOrCreateInterfaceType - Emit an objective c interface type standalone
+/// debug info.
+llvm::DIType CGDebugInfo::getOrCreateInterfaceType(QualType D,
+ SourceLocation Loc) {
+ llvm::DIType T = getOrCreateType(D, getOrCreateFile(Loc));
+ DBuilder.retainType(T);
+ return T;
+}
+
+/// CreateType - get structure or union type.
+llvm::DIType CGDebugInfo::CreateType(const RecordType *Ty) {
+ RecordDecl *RD = Ty->getDecl();
+
+ // Get overall information about the record type for the debug info.
+ llvm::DIFile DefUnit = getOrCreateFile(RD->getLocation());
+
+ // Records and classes and unions can all be recursive. To handle them, we
+ // first generate a debug descriptor for the struct as a forward declaration.
+ // Then (if it is a definition) we go through and get debug info for all of
+ // its members. Finally, we create a descriptor for the complete type (which
+ // may refer to the forward decl if the struct is recursive) and replace all
+ // uses of the forward declaration with the final definition.
+
+ llvm::DIType FwdDecl = getOrCreateLimitedType(QualType(Ty, 0), DefUnit);
+
+ if (FwdDecl.isForwardDecl())
+ return FwdDecl;
+
+ llvm::TrackingVH<llvm::MDNode> FwdDeclNode(FwdDecl);
+
+ // Push the struct on region stack.
+ LexicalBlockStack.push_back(FwdDeclNode);
+ RegionMap[Ty->getDecl()] = llvm::WeakVH(FwdDecl);
+
+ // Add this to the completed types cache since we're completing it.
+ CompletedTypeCache[QualType(Ty, 0).getAsOpaquePtr()] = FwdDecl;
+
+ // Convert all the elements.
+ SmallVector<llvm::Value *, 16> EltTys;
+
+ // Note: The split of CXXDecl information here is intentional, the
+ // gdb tests will depend on a certain ordering at printout. The debug
+ // information offsets are still correct if we merge them all together
+ // though.
+ const CXXRecordDecl *CXXDecl = dyn_cast<CXXRecordDecl>(RD);
+ if (CXXDecl) {
+ CollectCXXBases(CXXDecl, DefUnit, EltTys, FwdDecl);
+ CollectVTableInfo(CXXDecl, DefUnit, EltTys);
+ }
+
+ // Collect static variables with initializers and other fields.
+ CollectRecordStaticVars(RD, FwdDecl);
+ CollectRecordFields(RD, DefUnit, EltTys, FwdDecl);
+ llvm::DIArray TParamsArray;
+ if (CXXDecl) {
+ CollectCXXMemberFunctions(CXXDecl, DefUnit, EltTys, FwdDecl);
+ CollectCXXFriends(CXXDecl, DefUnit, EltTys, FwdDecl);
+ if (const ClassTemplateSpecializationDecl *TSpecial
+ = dyn_cast<ClassTemplateSpecializationDecl>(RD))
+ TParamsArray = CollectCXXTemplateParams(TSpecial, DefUnit);
+ }
+
+ LexicalBlockStack.pop_back();
+ RegionMap.erase(Ty->getDecl());
+
+ llvm::DIArray Elements = DBuilder.getOrCreateArray(EltTys);
+ // FIXME: Magic numbers ahoy! These should be changed when we
+ // get some enums in llvm/Analysis/DebugInfo.h to refer to
+ // them.
+ if (RD->isUnion())
+ FwdDeclNode->replaceOperandWith(10, Elements);
+ else if (CXXDecl) {
+ FwdDeclNode->replaceOperandWith(10, Elements);
+ FwdDeclNode->replaceOperandWith(13, TParamsArray);
+ } else
+ FwdDeclNode->replaceOperandWith(10, Elements);
+
+ RegionMap[Ty->getDecl()] = llvm::WeakVH(FwdDeclNode);
+ return llvm::DIType(FwdDeclNode);
+}
+
+/// CreateType - get objective-c object type.
+llvm::DIType CGDebugInfo::CreateType(const ObjCObjectType *Ty,
+ llvm::DIFile Unit) {
+ // Ignore protocols.
+ return getOrCreateType(Ty->getBaseType(), Unit);
+}
+
+/// CreateType - get objective-c interface type.
+llvm::DIType CGDebugInfo::CreateType(const ObjCInterfaceType *Ty,
+ llvm::DIFile Unit) {
+ ObjCInterfaceDecl *ID = Ty->getDecl();
+ if (!ID)
+ return llvm::DIType();
+
+ // Get overall information about the record type for the debug info.
+ llvm::DIFile DefUnit = getOrCreateFile(ID->getLocation());
+ unsigned Line = getLineNumber(ID->getLocation());
+ unsigned RuntimeLang = TheCU.getLanguage();
+
+ // If this is just a forward declaration return a special forward-declaration
+ // debug type since we won't be able to lay out the entire type.
+ ObjCInterfaceDecl *Def = ID->getDefinition();
+ if (!Def) {
+ llvm::DIType FwdDecl =
+ DBuilder.createForwardDecl(llvm::dwarf::DW_TAG_structure_type,
+ ID->getName(), DefUnit, Line,
+ RuntimeLang);
+ return FwdDecl;
+ }
+
+ ID = Def;
+
+ // Bit size, align and offset of the type.
+ uint64_t Size = CGM.getContext().getTypeSize(Ty);
+ uint64_t Align = CGM.getContext().getTypeAlign(Ty);
+
+ unsigned Flags = 0;
+ if (ID->getImplementation())
+ Flags |= llvm::DIDescriptor::FlagObjcClassComplete;
+
+ llvm::DIType RealDecl =
+ DBuilder.createStructType(Unit, ID->getName(), DefUnit,
+ Line, Size, Align, Flags,
+ llvm::DIArray(), RuntimeLang);
+
+ // Otherwise, insert it into the CompletedTypeCache so that recursive uses
+ // will find it and we're emitting the complete type.
+ CompletedTypeCache[QualType(Ty, 0).getAsOpaquePtr()] = RealDecl;
+ // Push the struct on region stack.
+ llvm::TrackingVH<llvm::MDNode> FwdDeclNode(RealDecl);
+
+ LexicalBlockStack.push_back(FwdDeclNode);
+ RegionMap[Ty->getDecl()] = llvm::WeakVH(RealDecl);
+
+ // Convert all the elements.
+ SmallVector<llvm::Value *, 16> EltTys;
+
+ ObjCInterfaceDecl *SClass = ID->getSuperClass();
+ if (SClass) {
+ llvm::DIType SClassTy =
+ getOrCreateType(CGM.getContext().getObjCInterfaceType(SClass), Unit);
+ if (!SClassTy.isValid())
+ return llvm::DIType();
+
+ llvm::DIType InhTag =
+ DBuilder.createInheritance(RealDecl, SClassTy, 0, 0);
+ EltTys.push_back(InhTag);
+ }
+
+ for (ObjCContainerDecl::prop_iterator I = ID->prop_begin(),
+ E = ID->prop_end(); I != E; ++I) {
+ const ObjCPropertyDecl *PD = *I;
+ SourceLocation Loc = PD->getLocation();
+ llvm::DIFile PUnit = getOrCreateFile(Loc);
+ unsigned PLine = getLineNumber(Loc);
+ ObjCMethodDecl *Getter = PD->getGetterMethodDecl();
+ ObjCMethodDecl *Setter = PD->getSetterMethodDecl();
+ llvm::MDNode *PropertyNode =
+ DBuilder.createObjCProperty(PD->getName(),
+ PUnit, PLine,
+ (Getter && Getter->isImplicit()) ? "" :
+ getSelectorName(PD->getGetterName()),
+ (Setter && Setter->isImplicit()) ? "" :
+ getSelectorName(PD->getSetterName()),
+ PD->getPropertyAttributes(),
+ getOrCreateType(PD->getType(), PUnit));
+ EltTys.push_back(PropertyNode);
+ }
+
+ const ASTRecordLayout &RL = CGM.getContext().getASTObjCInterfaceLayout(ID);
+ unsigned FieldNo = 0;
+ for (ObjCIvarDecl *Field = ID->all_declared_ivar_begin(); Field;
+ Field = Field->getNextIvar(), ++FieldNo) {
+ llvm::DIType FieldTy = getOrCreateType(Field->getType(), Unit);
+ if (!FieldTy.isValid())
+ return llvm::DIType();
+
+ StringRef FieldName = Field->getName();
+
+ // Ignore unnamed fields.
+ if (FieldName.empty())
+ continue;
+
+ // Get the location for the field.
+ llvm::DIFile FieldDefUnit = getOrCreateFile(Field->getLocation());
+ unsigned FieldLine = getLineNumber(Field->getLocation());
+ QualType FType = Field->getType();
+ uint64_t FieldSize = 0;
+ unsigned FieldAlign = 0;
+
+ if (!FType->isIncompleteArrayType()) {
+
+ // Bit size, align and offset of the type.
+ FieldSize = Field->isBitField()
+ ? Field->getBitWidthValue(CGM.getContext())
+ : CGM.getContext().getTypeSize(FType);
+ FieldAlign = CGM.getContext().getTypeAlign(FType);
+ }
+
+ // We can't know the offset of our ivar in the structure if we're using
+ // the non-fragile abi and the debugger should ignore the value anyways.
+ // Call it the FieldNo+1 due to how debuggers use the information,
+ // e.g. negating the value when it needs a lookup in the dynamic table.
+ uint64_t FieldOffset = CGM.getLangOpts().ObjCNonFragileABI ? FieldNo+1
+ : RL.getFieldOffset(FieldNo);
+
+ unsigned Flags = 0;
+ if (Field->getAccessControl() == ObjCIvarDecl::Protected)
+ Flags = llvm::DIDescriptor::FlagProtected;
+ else if (Field->getAccessControl() == ObjCIvarDecl::Private)
+ Flags = llvm::DIDescriptor::FlagPrivate;
+
+ llvm::MDNode *PropertyNode = NULL;
+ if (ObjCImplementationDecl *ImpD = ID->getImplementation()) {
+ if (ObjCPropertyImplDecl *PImpD =
+ ImpD->FindPropertyImplIvarDecl(Field->getIdentifier())) {
+ if (ObjCPropertyDecl *PD = PImpD->getPropertyDecl()) {
+ SourceLocation Loc = PD->getLocation();
+ llvm::DIFile PUnit = getOrCreateFile(Loc);
+ unsigned PLine = getLineNumber(Loc);
+ ObjCMethodDecl *Getter = PD->getGetterMethodDecl();
+ ObjCMethodDecl *Setter = PD->getSetterMethodDecl();
+ PropertyNode =
+ DBuilder.createObjCProperty(PD->getName(),
+ PUnit, PLine,
+ (Getter && Getter->isImplicit()) ? "" :
+ getSelectorName(PD->getGetterName()),
+ (Setter && Setter->isImplicit()) ? "" :
+ getSelectorName(PD->getSetterName()),
+ PD->getPropertyAttributes(),
+ getOrCreateType(PD->getType(), PUnit));
+ }
+ }
+ }
+ FieldTy = DBuilder.createObjCIVar(FieldName, FieldDefUnit,
+ FieldLine, FieldSize, FieldAlign,
+ FieldOffset, Flags, FieldTy,
+ PropertyNode);
+ EltTys.push_back(FieldTy);
+ }
+
+ llvm::DIArray Elements = DBuilder.getOrCreateArray(EltTys);
+ FwdDeclNode->replaceOperandWith(10, Elements);
+
+ LexicalBlockStack.pop_back();
+ return llvm::DIType(FwdDeclNode);
+}
+
+llvm::DIType CGDebugInfo::CreateType(const VectorType *Ty, llvm::DIFile Unit) {
+ llvm::DIType ElementTy = getOrCreateType(Ty->getElementType(), Unit);
+ int64_t NumElems = Ty->getNumElements();
+ int64_t LowerBound = 0;
+ if (NumElems == 0)
+ // If number of elements are not known then this is an unbounded array.
+ // Use Low = 1, Hi = 0 to express such arrays.
+ LowerBound = 1;
+ else
+ --NumElems;
+
+ llvm::Value *Subscript = DBuilder.getOrCreateSubrange(LowerBound, NumElems);
+ llvm::DIArray SubscriptArray = DBuilder.getOrCreateArray(Subscript);
+
+ uint64_t Size = CGM.getContext().getTypeSize(Ty);
+ uint64_t Align = CGM.getContext().getTypeAlign(Ty);
+
+ return
+ DBuilder.createVectorType(Size, Align, ElementTy, SubscriptArray);
+}
+
+llvm::DIType CGDebugInfo::CreateType(const ArrayType *Ty,
+ llvm::DIFile Unit) {
+ uint64_t Size;
+ uint64_t Align;
+
+
+ // FIXME: make getTypeAlign() aware of VLAs and incomplete array types
+ if (const VariableArrayType *VAT = dyn_cast<VariableArrayType>(Ty)) {
+ Size = 0;
+ Align =
+ CGM.getContext().getTypeAlign(CGM.getContext().getBaseElementType(VAT));
+ } else if (Ty->isIncompleteArrayType()) {
+ Size = 0;
+ Align = CGM.getContext().getTypeAlign(Ty->getElementType());
+ } else if (Ty->isDependentSizedArrayType() || Ty->isIncompleteType()) {
+ Size = 0;
+ Align = 0;
+ } else {
+ // Size and align of the whole array, not the element type.
+ Size = CGM.getContext().getTypeSize(Ty);
+ Align = CGM.getContext().getTypeAlign(Ty);
+ }
+
+ // Add the dimensions of the array. FIXME: This loses CV qualifiers from
+ // interior arrays, do we care? Why aren't nested arrays represented the
+ // obvious/recursive way?
+ SmallVector<llvm::Value *, 8> Subscripts;
+ QualType EltTy(Ty, 0);
+ if (Ty->isIncompleteArrayType())
+ EltTy = Ty->getElementType();
+ else {
+ while ((Ty = dyn_cast<ArrayType>(EltTy))) {
+ int64_t UpperBound = 0;
+ int64_t LowerBound = 0;
+ if (const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(Ty)) {
+ if (CAT->getSize().getZExtValue())
+ UpperBound = CAT->getSize().getZExtValue() - 1;
+ } else
+ // This is an unbounded array. Use Low = 1, Hi = 0 to express such
+ // arrays.
+ LowerBound = 1;
+
+ // FIXME: Verify this is right for VLAs.
+ Subscripts.push_back(DBuilder.getOrCreateSubrange(LowerBound,
+ UpperBound));
+ EltTy = Ty->getElementType();
+ }
+ }
+
+ llvm::DIArray SubscriptArray = DBuilder.getOrCreateArray(Subscripts);
+
+ llvm::DIType DbgTy =
+ DBuilder.createArrayType(Size, Align, getOrCreateType(EltTy, Unit),
+ SubscriptArray);
+ return DbgTy;
+}
+
+llvm::DIType CGDebugInfo::CreateType(const LValueReferenceType *Ty,
+ llvm::DIFile Unit) {
+ return CreatePointerLikeType(llvm::dwarf::DW_TAG_reference_type,
+ Ty, Ty->getPointeeType(), Unit);
+}
+
+llvm::DIType CGDebugInfo::CreateType(const RValueReferenceType *Ty,
+ llvm::DIFile Unit) {
+ return CreatePointerLikeType(llvm::dwarf::DW_TAG_rvalue_reference_type,
+ Ty, Ty->getPointeeType(), Unit);
+}
+
+llvm::DIType CGDebugInfo::CreateType(const MemberPointerType *Ty,
+ llvm::DIFile U) {
+ QualType PointerDiffTy = CGM.getContext().getPointerDiffType();
+ llvm::DIType PointerDiffDITy = getOrCreateType(PointerDiffTy, U);
+
+ if (!Ty->getPointeeType()->isFunctionType()) {
+ // We have a data member pointer type.
+ return PointerDiffDITy;
+ }
+
+ // We have a member function pointer type. Treat it as a struct with two
+ // ptrdiff_t members.
+ std::pair<uint64_t, unsigned> Info = CGM.getContext().getTypeInfo(Ty);
+
+ uint64_t FieldOffset = 0;
+ llvm::Value *ElementTypes[2];
+
+ // FIXME: This should probably be a function type instead.
+ ElementTypes[0] =
+ DBuilder.createMemberType(U, "ptr", U, 0,
+ Info.first, Info.second, FieldOffset, 0,
+ PointerDiffDITy);
+ FieldOffset += Info.first;
+
+ ElementTypes[1] =
+ DBuilder.createMemberType(U, "ptr", U, 0,
+ Info.first, Info.second, FieldOffset, 0,
+ PointerDiffDITy);
+
+ llvm::DIArray Elements = DBuilder.getOrCreateArray(ElementTypes);
+
+ return DBuilder.createStructType(U, StringRef("test"),
+ U, 0, FieldOffset,
+ 0, 0, Elements);
+}
+
+llvm::DIType CGDebugInfo::CreateType(const AtomicType *Ty,
+ llvm::DIFile U) {
+ // Ignore the atomic wrapping
+ // FIXME: What is the correct representation?
+ return getOrCreateType(Ty->getValueType(), U);
+}
+
+/// CreateEnumType - get enumeration type.
+llvm::DIType CGDebugInfo::CreateEnumType(const EnumDecl *ED) {
+ llvm::DIFile Unit = getOrCreateFile(ED->getLocation());
+ SmallVector<llvm::Value *, 16> Enumerators;
+
+ // Create DIEnumerator elements for each enumerator.
+ for (EnumDecl::enumerator_iterator
+ Enum = ED->enumerator_begin(), EnumEnd = ED->enumerator_end();
+ Enum != EnumEnd; ++Enum) {
+ Enumerators.push_back(
+ DBuilder.createEnumerator(Enum->getName(),
+ Enum->getInitVal().getZExtValue()));
+ }
+
+ // Return a CompositeType for the enum itself.
+ llvm::DIArray EltArray = DBuilder.getOrCreateArray(Enumerators);
+
+ llvm::DIFile DefUnit = getOrCreateFile(ED->getLocation());
+ unsigned Line = getLineNumber(ED->getLocation());
+ uint64_t Size = 0;
+ uint64_t Align = 0;
+ if (!ED->getTypeForDecl()->isIncompleteType()) {
+ Size = CGM.getContext().getTypeSize(ED->getTypeForDecl());
+ Align = CGM.getContext().getTypeAlign(ED->getTypeForDecl());
+ }
+ llvm::DIDescriptor EnumContext =
+ getContextDescriptor(cast<Decl>(ED->getDeclContext()));
+ llvm::DIType DbgTy =
+ DBuilder.createEnumerationType(EnumContext, ED->getName(), DefUnit, Line,
+ Size, Align, EltArray);
+ return DbgTy;
+}
+
+static QualType UnwrapTypeForDebugInfo(QualType T) {
+ do {
+ QualType LastT = T;
+ switch (T->getTypeClass()) {
+ default:
+ return T;
+ case Type::TemplateSpecialization:
+ T = cast<TemplateSpecializationType>(T)->desugar();
+ break;
+ case Type::TypeOfExpr:
+ T = cast<TypeOfExprType>(T)->getUnderlyingExpr()->getType();
+ break;
+ case Type::TypeOf:
+ T = cast<TypeOfType>(T)->getUnderlyingType();
+ break;
+ case Type::Decltype:
+ T = cast<DecltypeType>(T)->getUnderlyingType();
+ break;
+ case Type::UnaryTransform:
+ T = cast<UnaryTransformType>(T)->getUnderlyingType();
+ break;
+ case Type::Attributed:
+ T = cast<AttributedType>(T)->getEquivalentType();
+ break;
+ case Type::Elaborated:
+ T = cast<ElaboratedType>(T)->getNamedType();
+ break;
+ case Type::Paren:
+ T = cast<ParenType>(T)->getInnerType();
+ break;
+ case Type::SubstTemplateTypeParm:
+ T = cast<SubstTemplateTypeParmType>(T)->getReplacementType();
+ break;
+ case Type::Auto:
+ T = cast<AutoType>(T)->getDeducedType();
+ break;
+ }
+
+ assert(T != LastT && "Type unwrapping failed to unwrap!");
+ if (T == LastT)
+ return T;
+ } while (true);
+}
+
+/// getType - Get the type from the cache or return null type if it doesn't exist.
+llvm::DIType CGDebugInfo::getTypeOrNull(QualType Ty) {
+
+ // Unwrap the type as needed for debug information.
+ Ty = UnwrapTypeForDebugInfo(Ty);
+
+ // Check for existing entry.
+ llvm::DenseMap<void *, llvm::WeakVH>::iterator it =
+ TypeCache.find(Ty.getAsOpaquePtr());
+ if (it != TypeCache.end()) {
+ // Verify that the debug info still exists.
+ if (&*it->second)
+ return llvm::DIType(cast<llvm::MDNode>(it->second));
+ }
+
+ return llvm::DIType();
+}
+
+/// getCompletedTypeOrNull - Get the type from the cache or return null if it
+/// doesn't exist.
+llvm::DIType CGDebugInfo::getCompletedTypeOrNull(QualType Ty) {
+
+ // Unwrap the type as needed for debug information.
+ Ty = UnwrapTypeForDebugInfo(Ty);
+
+ // Check for existing entry.
+ llvm::DenseMap<void *, llvm::WeakVH>::iterator it =
+ CompletedTypeCache.find(Ty.getAsOpaquePtr());
+ if (it != CompletedTypeCache.end()) {
+ // Verify that the debug info still exists.
+ if (&*it->second)
+ return llvm::DIType(cast<llvm::MDNode>(it->second));
+ }
+
+ return llvm::DIType();
+}
+
+
+/// getOrCreateType - Get the type from the cache or create a new
+/// one if necessary.
+llvm::DIType CGDebugInfo::getOrCreateType(QualType Ty, llvm::DIFile Unit) {
+ if (Ty.isNull())
+ return llvm::DIType();
+
+ // Unwrap the type as needed for debug information.
+ Ty = UnwrapTypeForDebugInfo(Ty);
+
+ llvm::DIType T = getCompletedTypeOrNull(Ty);
+
+ if (T.Verify()) return T;
+
+ // Otherwise create the type.
+ llvm::DIType Res = CreateTypeNode(Ty, Unit);
+
+ llvm::DIType TC = getTypeOrNull(Ty);
+ if (TC.Verify() && TC.isForwardDecl())
+ ReplaceMap.push_back(std::make_pair(Ty.getAsOpaquePtr(), TC));
+
+ // And update the type cache.
+ TypeCache[Ty.getAsOpaquePtr()] = Res;
+
+ if (!Res.isForwardDecl())
+ CompletedTypeCache[Ty.getAsOpaquePtr()] = Res;
+ return Res;
+}
+
+/// CreateTypeNode - Create a new debug type node.
+llvm::DIType CGDebugInfo::CreateTypeNode(QualType Ty, llvm::DIFile Unit) {
+ // Handle qualifiers, which recursively handles what they refer to.
+ if (Ty.hasLocalQualifiers())
+ return CreateQualifiedType(Ty, Unit);
+
+ const char *Diag = 0;
+
+ // Work out details of type.
+ switch (Ty->getTypeClass()) {
+#define TYPE(Class, Base)
+#define ABSTRACT_TYPE(Class, Base)
+#define NON_CANONICAL_TYPE(Class, Base)
+#define DEPENDENT_TYPE(Class, Base) case Type::Class:
+#include "clang/AST/TypeNodes.def"
+ llvm_unreachable("Dependent types cannot show up in debug information");
+
+ case Type::ExtVector:
+ case Type::Vector:
+ return CreateType(cast<VectorType>(Ty), Unit);
+ case Type::ObjCObjectPointer:
+ return CreateType(cast<ObjCObjectPointerType>(Ty), Unit);
+ case Type::ObjCObject:
+ return CreateType(cast<ObjCObjectType>(Ty), Unit);
+ case Type::ObjCInterface:
+ return CreateType(cast<ObjCInterfaceType>(Ty), Unit);
+ case Type::Builtin:
+ return CreateType(cast<BuiltinType>(Ty));
+ case Type::Complex:
+ return CreateType(cast<ComplexType>(Ty));
+ case Type::Pointer:
+ return CreateType(cast<PointerType>(Ty), Unit);
+ case Type::BlockPointer:
+ return CreateType(cast<BlockPointerType>(Ty), Unit);
+ case Type::Typedef:
+ return CreateType(cast<TypedefType>(Ty), Unit);
+ case Type::Record:
+ return CreateType(cast<RecordType>(Ty));
+ case Type::Enum:
+ return CreateEnumType(cast<EnumType>(Ty)->getDecl());
+ case Type::FunctionProto:
+ case Type::FunctionNoProto:
+ return CreateType(cast<FunctionType>(Ty), Unit);
+ case Type::ConstantArray:
+ case Type::VariableArray:
+ case Type::IncompleteArray:
+ return CreateType(cast<ArrayType>(Ty), Unit);
+
+ case Type::LValueReference:
+ return CreateType(cast<LValueReferenceType>(Ty), Unit);
+ case Type::RValueReference:
+ return CreateType(cast<RValueReferenceType>(Ty), Unit);
+
+ case Type::MemberPointer:
+ return CreateType(cast<MemberPointerType>(Ty), Unit);
+
+ case Type::Atomic:
+ return CreateType(cast<AtomicType>(Ty), Unit);
+
+ case Type::Attributed:
+ case Type::TemplateSpecialization:
+ case Type::Elaborated:
+ case Type::Paren:
+ case Type::SubstTemplateTypeParm:
+ case Type::TypeOfExpr:
+ case Type::TypeOf:
+ case Type::Decltype:
+ case Type::UnaryTransform:
+ case Type::Auto:
+ llvm_unreachable("type should have been unwrapped!");
+ }
+
+ assert(Diag && "Fall through without a diagnostic?");
+ unsigned DiagID = CGM.getDiags().getCustomDiagID(DiagnosticsEngine::Error,
+ "debug information for %0 is not yet supported");
+ CGM.getDiags().Report(DiagID)
+ << Diag;
+ return llvm::DIType();
+}
+
+/// getOrCreateLimitedType - Get the type from the cache or create a new
+/// limited type if necessary.
+llvm::DIType CGDebugInfo::getOrCreateLimitedType(QualType Ty,
+ llvm::DIFile Unit) {
+ if (Ty.isNull())
+ return llvm::DIType();
+
+ // Unwrap the type as needed for debug information.
+ Ty = UnwrapTypeForDebugInfo(Ty);
+
+ llvm::DIType T = getTypeOrNull(Ty);
+
+ // We may have cached a forward decl when we could have created
+ // a non-forward decl. Go ahead and create a non-forward decl
+ // now.
+ if (T.Verify() && !T.isForwardDecl()) return T;
+
+ // Otherwise create the type.
+ llvm::DIType Res = CreateLimitedTypeNode(Ty, Unit);
+
+ if (T.Verify() && T.isForwardDecl())
+ ReplaceMap.push_back(std::make_pair(Ty.getAsOpaquePtr(), T));
+
+ // And update the type cache.
+ TypeCache[Ty.getAsOpaquePtr()] = Res;
+ return Res;
+}
+
+// TODO: Currently used for context chains when limiting debug info.
+llvm::DIType CGDebugInfo::CreateLimitedType(const RecordType *Ty) {
+ RecordDecl *RD = Ty->getDecl();
+
+ // Get overall information about the record type for the debug info.
+ llvm::DIFile DefUnit = getOrCreateFile(RD->getLocation());
+ unsigned Line = getLineNumber(RD->getLocation());
+ StringRef RDName = RD->getName();
+
+ llvm::DIDescriptor RDContext;
+ if (CGM.getCodeGenOpts().LimitDebugInfo)
+ RDContext = createContextChain(cast<Decl>(RD->getDeclContext()));
+ else
+ RDContext = getContextDescriptor(cast<Decl>(RD->getDeclContext()));
+
+ // If this is just a forward declaration, construct an appropriately
+ // marked node and just return it.
+ if (!RD->getDefinition())
+ return createRecordFwdDecl(RD, RDContext);
+
+ uint64_t Size = CGM.getContext().getTypeSize(Ty);
+ uint64_t Align = CGM.getContext().getTypeAlign(Ty);
+ const CXXRecordDecl *CXXDecl = dyn_cast<CXXRecordDecl>(RD);
+ llvm::TrackingVH<llvm::MDNode> RealDecl;
+
+ if (RD->isUnion())
+ RealDecl = DBuilder.createUnionType(RDContext, RDName, DefUnit, Line,
+ Size, Align, 0, llvm::DIArray());
+ else if (CXXDecl) {
+ RDName = getClassName(RD);
+
+ // FIXME: This could be a struct type giving a default visibility different
+ // than C++ class type, but needs llvm metadata changes first.
+ RealDecl = DBuilder.createClassType(RDContext, RDName, DefUnit, Line,
+ Size, Align, 0, 0, llvm::DIType(),
+ llvm::DIArray(), llvm::DIType(),
+ llvm::DIArray());
+ } else
+ RealDecl = DBuilder.createStructType(RDContext, RDName, DefUnit, Line,
+ Size, Align, 0, llvm::DIArray());
+
+ RegionMap[Ty->getDecl()] = llvm::WeakVH(RealDecl);
+ TypeCache[QualType(Ty, 0).getAsOpaquePtr()] = llvm::DIType(RealDecl);
+
+ if (CXXDecl) {
+ // A class's primary base or the class itself contains the vtable.
+ llvm::MDNode *ContainingType = NULL;
+ const ASTRecordLayout &RL = CGM.getContext().getASTRecordLayout(RD);
+ if (const CXXRecordDecl *PBase = RL.getPrimaryBase()) {
+ // Seek non virtual primary base root.
+ while (1) {
+ const ASTRecordLayout &BRL = CGM.getContext().getASTRecordLayout(PBase);
+ const CXXRecordDecl *PBT = BRL.getPrimaryBase();
+ if (PBT && !BRL.isPrimaryBaseVirtual())
+ PBase = PBT;
+ else
+ break;
+ }
+ ContainingType =
+ getOrCreateType(QualType(PBase->getTypeForDecl(), 0), DefUnit);
+ }
+ else if (CXXDecl->isDynamicClass())
+ ContainingType = RealDecl;
+
+ RealDecl->replaceOperandWith(12, ContainingType);
+ }
+ return llvm::DIType(RealDecl);
+}
+
+/// CreateLimitedTypeNode - Create a new debug type node, but only forward
+/// declare composite types that haven't been processed yet.
+llvm::DIType CGDebugInfo::CreateLimitedTypeNode(QualType Ty,llvm::DIFile Unit) {
+
+ // Work out details of type.
+ switch (Ty->getTypeClass()) {
+#define TYPE(Class, Base)
+#define ABSTRACT_TYPE(Class, Base)
+#define NON_CANONICAL_TYPE(Class, Base)
+#define DEPENDENT_TYPE(Class, Base) case Type::Class:
+ #include "clang/AST/TypeNodes.def"
+ llvm_unreachable("Dependent types cannot show up in debug information");
+
+ case Type::Record:
+ return CreateLimitedType(cast<RecordType>(Ty));
+ default:
+ return CreateTypeNode(Ty, Unit);
+ }
+}
+
+/// CreateMemberType - Create new member and increase Offset by FType's size.
+llvm::DIType CGDebugInfo::CreateMemberType(llvm::DIFile Unit, QualType FType,
+ StringRef Name,
+ uint64_t *Offset) {
+ llvm::DIType FieldTy = CGDebugInfo::getOrCreateType(FType, Unit);
+ uint64_t FieldSize = CGM.getContext().getTypeSize(FType);
+ unsigned FieldAlign = CGM.getContext().getTypeAlign(FType);
+ llvm::DIType Ty = DBuilder.createMemberType(Unit, Name, Unit, 0,
+ FieldSize, FieldAlign,
+ *Offset, 0, FieldTy);
+ *Offset += FieldSize;
+ return Ty;
+}
+
+/// getFunctionDeclaration - Return debug info descriptor to describe method
+/// declaration for the given method definition.
+llvm::DISubprogram CGDebugInfo::getFunctionDeclaration(const Decl *D) {
+ const FunctionDecl *FD = dyn_cast<FunctionDecl>(D);
+ if (!FD) return llvm::DISubprogram();
+
+ // Setup context.
+ getContextDescriptor(cast<Decl>(D->getDeclContext()));
+
+ llvm::DenseMap<const FunctionDecl *, llvm::WeakVH>::iterator
+ MI = SPCache.find(FD->getCanonicalDecl());
+ if (MI != SPCache.end()) {
+ llvm::DISubprogram SP(dyn_cast_or_null<llvm::MDNode>(&*MI->second));
+ if (SP.isSubprogram() && !llvm::DISubprogram(SP).isDefinition())
+ return SP;
+ }
+
+ for (FunctionDecl::redecl_iterator I = FD->redecls_begin(),
+ E = FD->redecls_end(); I != E; ++I) {
+ const FunctionDecl *NextFD = *I;
+ llvm::DenseMap<const FunctionDecl *, llvm::WeakVH>::iterator
+ MI = SPCache.find(NextFD->getCanonicalDecl());
+ if (MI != SPCache.end()) {
+ llvm::DISubprogram SP(dyn_cast_or_null<llvm::MDNode>(&*MI->second));
+ if (SP.isSubprogram() && !llvm::DISubprogram(SP).isDefinition())
+ return SP;
+ }
+ }
+ return llvm::DISubprogram();
+}
+
+// getOrCreateFunctionType - Construct DIType. If it is a c++ method, include
+// implicit parameter "this".
+llvm::DIType CGDebugInfo::getOrCreateFunctionType(const Decl * D,
+ QualType FnType,
+ llvm::DIFile F) {
+ if (const CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(D))
+ return getOrCreateMethodType(Method, F);
+ if (const ObjCMethodDecl *OMethod = dyn_cast<ObjCMethodDecl>(D)) {
+ // Add "self" and "_cmd"
+ SmallVector<llvm::Value *, 16> Elts;
+
+ // First element is always return type. For 'void' functions it is NULL.
+ Elts.push_back(getOrCreateType(OMethod->getResultType(), F));
+ // "self" pointer is always first argument.
+ Elts.push_back(getOrCreateType(OMethod->getSelfDecl()->getType(), F));
+ // "cmd" pointer is always second argument.
+ Elts.push_back(getOrCreateType(OMethod->getCmdDecl()->getType(), F));
+ // Get rest of the arguments.
+ for (ObjCMethodDecl::param_const_iterator PI = OMethod->param_begin(),
+ PE = OMethod->param_end(); PI != PE; ++PI)
+ Elts.push_back(getOrCreateType((*PI)->getType(), F));
+
+ llvm::DIArray EltTypeArray = DBuilder.getOrCreateArray(Elts);
+ return DBuilder.createSubroutineType(F, EltTypeArray);
+ }
+ return getOrCreateType(FnType, F);
+}
+
+/// EmitFunctionStart - Constructs the debug code for entering a function.
+void CGDebugInfo::EmitFunctionStart(GlobalDecl GD, QualType FnType,
+ llvm::Function *Fn,
+ CGBuilderTy &Builder) {
+
+ StringRef Name;
+ StringRef LinkageName;
+
+ FnBeginRegionCount.push_back(LexicalBlockStack.size());
+
+ const Decl *D = GD.getDecl();
+ // Use the location of the declaration.
+ SourceLocation Loc = D->getLocation();
+
+ unsigned Flags = 0;
+ llvm::DIFile Unit = getOrCreateFile(Loc);
+ llvm::DIDescriptor FDContext(Unit);
+ llvm::DIArray TParamsArray;
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ // If there is a DISubprogram for this function available then use it.
+ llvm::DenseMap<const FunctionDecl *, llvm::WeakVH>::iterator
+ FI = SPCache.find(FD->getCanonicalDecl());
+ if (FI != SPCache.end()) {
+ llvm::DIDescriptor SP(dyn_cast_or_null<llvm::MDNode>(&*FI->second));
+ if (SP.isSubprogram() && llvm::DISubprogram(SP).isDefinition()) {
+ llvm::MDNode *SPN = SP;
+ LexicalBlockStack.push_back(SPN);
+ RegionMap[D] = llvm::WeakVH(SP);
+ return;
+ }
+ }
+ Name = getFunctionName(FD);
+ // Use mangled name as linkage name for c/c++ functions.
+ if (FD->hasPrototype()) {
+ LinkageName = CGM.getMangledName(GD);
+ Flags |= llvm::DIDescriptor::FlagPrototyped;
+ }
+ if (LinkageName == Name)
+ LinkageName = StringRef();
+
+ if (const NamespaceDecl *NSDecl =
+ dyn_cast_or_null<NamespaceDecl>(FD->getDeclContext()))
+ FDContext = getOrCreateNameSpace(NSDecl);
+ else if (const RecordDecl *RDecl =
+ dyn_cast_or_null<RecordDecl>(FD->getDeclContext()))
+ FDContext = getContextDescriptor(cast<Decl>(RDecl->getDeclContext()));
+
+ // Collect template parameters.
+ TParamsArray = CollectFunctionTemplateParams(FD, Unit);
+ } else if (const ObjCMethodDecl *OMD = dyn_cast<ObjCMethodDecl>(D)) {
+ Name = getObjCMethodName(OMD);
+ Flags |= llvm::DIDescriptor::FlagPrototyped;
+ } else {
+ // Use llvm function name.
+ Name = Fn->getName();
+ Flags |= llvm::DIDescriptor::FlagPrototyped;
+ }
+ if (!Name.empty() && Name[0] == '\01')
+ Name = Name.substr(1);
+
+ unsigned LineNo = getLineNumber(Loc);
+ if (D->isImplicit())
+ Flags |= llvm::DIDescriptor::FlagArtificial;
+
+ llvm::DISubprogram SPDecl = getFunctionDeclaration(D);
+ llvm::DISubprogram SP =
+ DBuilder.createFunction(FDContext, Name, LinkageName, Unit,
+ LineNo, getOrCreateFunctionType(D, FnType, Unit),
+ Fn->hasInternalLinkage(), true/*definition*/,
+ getLineNumber(CurLoc),
+ Flags, CGM.getLangOpts().Optimize, Fn,
+ TParamsArray, SPDecl);
+
+ // Push function on region stack.
+ llvm::MDNode *SPN = SP;
+ LexicalBlockStack.push_back(SPN);
+ RegionMap[D] = llvm::WeakVH(SP);
+}
+
+/// EmitLocation - Emit metadata to indicate a change in line/column
+/// information in the source file.
+void CGDebugInfo::EmitLocation(CGBuilderTy &Builder, SourceLocation Loc) {
+
+ // Update our current location
+ setLocation(Loc);
+
+ if (CurLoc.isInvalid() || CurLoc.isMacroID()) return;
+
+ // Don't bother if things are the same as last time.
+ SourceManager &SM = CGM.getContext().getSourceManager();
+ if (CurLoc == PrevLoc ||
+ SM.getExpansionLoc(CurLoc) == SM.getExpansionLoc(PrevLoc))
+ // New Builder may not be in sync with CGDebugInfo.
+ if (!Builder.getCurrentDebugLocation().isUnknown())
+ return;
+
+ // Update last state.
+ PrevLoc = CurLoc;
+
+ llvm::MDNode *Scope = LexicalBlockStack.back();
+ Builder.SetCurrentDebugLocation(llvm::DebugLoc::get(getLineNumber(CurLoc),
+ getColumnNumber(CurLoc),
+ Scope));
+}
+
+/// CreateLexicalBlock - Creates a new lexical block node and pushes it on
+/// the stack.
+void CGDebugInfo::CreateLexicalBlock(SourceLocation Loc) {
+ llvm::DIDescriptor D =
+ DBuilder.createLexicalBlock(LexicalBlockStack.empty() ?
+ llvm::DIDescriptor() :
+ llvm::DIDescriptor(LexicalBlockStack.back()),
+ getOrCreateFile(CurLoc),
+ getLineNumber(CurLoc),
+ getColumnNumber(CurLoc));
+ llvm::MDNode *DN = D;
+ LexicalBlockStack.push_back(DN);
+}
+
+/// EmitLexicalBlockStart - Constructs the debug code for entering a declarative
+/// region - beginning of a DW_TAG_lexical_block.
+void CGDebugInfo::EmitLexicalBlockStart(CGBuilderTy &Builder, SourceLocation Loc) {
+ // Set our current location.
+ setLocation(Loc);
+
+ // Create a new lexical block and push it on the stack.
+ CreateLexicalBlock(Loc);
+
+ // Emit a line table change for the current location inside the new scope.
+ Builder.SetCurrentDebugLocation(llvm::DebugLoc::get(getLineNumber(Loc),
+ getColumnNumber(Loc),
+ LexicalBlockStack.back()));
+}
+
+/// EmitLexicalBlockEnd - Constructs the debug code for exiting a declarative
+/// region - end of a DW_TAG_lexical_block.
+void CGDebugInfo::EmitLexicalBlockEnd(CGBuilderTy &Builder, SourceLocation Loc) {
+ assert(!LexicalBlockStack.empty() && "Region stack mismatch, stack empty!");
+
+ // Provide an entry in the line table for the end of the block.
+ EmitLocation(Builder, Loc);
+
+ LexicalBlockStack.pop_back();
+}
+
+/// EmitFunctionEnd - Constructs the debug code for exiting a function.
+void CGDebugInfo::EmitFunctionEnd(CGBuilderTy &Builder) {
+ assert(!LexicalBlockStack.empty() && "Region stack mismatch, stack empty!");
+ unsigned RCount = FnBeginRegionCount.back();
+ assert(RCount <= LexicalBlockStack.size() && "Region stack mismatch");
+
+ // Pop all regions for this function.
+ while (LexicalBlockStack.size() != RCount)
+ EmitLexicalBlockEnd(Builder, CurLoc);
+ FnBeginRegionCount.pop_back();
+}
+
+// EmitTypeForVarWithBlocksAttr - Build up structure info for the byref.
+// See BuildByRefType.
+llvm::DIType CGDebugInfo::EmitTypeForVarWithBlocksAttr(const ValueDecl *VD,
+ uint64_t *XOffset) {
+
+ SmallVector<llvm::Value *, 5> EltTys;
+ QualType FType;
+ uint64_t FieldSize, FieldOffset;
+ unsigned FieldAlign;
+
+ llvm::DIFile Unit = getOrCreateFile(VD->getLocation());
+ QualType Type = VD->getType();
+
+ FieldOffset = 0;
+ FType = CGM.getContext().getPointerType(CGM.getContext().VoidTy);
+ EltTys.push_back(CreateMemberType(Unit, FType, "__isa", &FieldOffset));
+ EltTys.push_back(CreateMemberType(Unit, FType, "__forwarding", &FieldOffset));
+ FType = CGM.getContext().IntTy;
+ EltTys.push_back(CreateMemberType(Unit, FType, "__flags", &FieldOffset));
+ EltTys.push_back(CreateMemberType(Unit, FType, "__size", &FieldOffset));
+
+ bool HasCopyAndDispose = CGM.getContext().BlockRequiresCopying(Type);
+ if (HasCopyAndDispose) {
+ FType = CGM.getContext().getPointerType(CGM.getContext().VoidTy);
+ EltTys.push_back(CreateMemberType(Unit, FType, "__copy_helper",
+ &FieldOffset));
+ EltTys.push_back(CreateMemberType(Unit, FType, "__destroy_helper",
+ &FieldOffset));
+ }
+
+ CharUnits Align = CGM.getContext().getDeclAlign(VD);
+ if (Align > CGM.getContext().toCharUnitsFromBits(
+ CGM.getContext().getTargetInfo().getPointerAlign(0))) {
+ CharUnits FieldOffsetInBytes
+ = CGM.getContext().toCharUnitsFromBits(FieldOffset);
+ CharUnits AlignedOffsetInBytes
+ = FieldOffsetInBytes.RoundUpToAlignment(Align);
+ CharUnits NumPaddingBytes
+ = AlignedOffsetInBytes - FieldOffsetInBytes;
+
+ if (NumPaddingBytes.isPositive()) {
+ llvm::APInt pad(32, NumPaddingBytes.getQuantity());
+ FType = CGM.getContext().getConstantArrayType(CGM.getContext().CharTy,
+ pad, ArrayType::Normal, 0);
+ EltTys.push_back(CreateMemberType(Unit, FType, "", &FieldOffset));
+ }
+ }
+
+ FType = Type;
+ llvm::DIType FieldTy = CGDebugInfo::getOrCreateType(FType, Unit);
+ FieldSize = CGM.getContext().getTypeSize(FType);
+ FieldAlign = CGM.getContext().toBits(Align);
+
+ *XOffset = FieldOffset;
+ FieldTy = DBuilder.createMemberType(Unit, VD->getName(), Unit,
+ 0, FieldSize, FieldAlign,
+ FieldOffset, 0, FieldTy);
+ EltTys.push_back(FieldTy);
+ FieldOffset += FieldSize;
+
+ llvm::DIArray Elements = DBuilder.getOrCreateArray(EltTys);
+
+ unsigned Flags = llvm::DIDescriptor::FlagBlockByrefStruct;
+
+ return DBuilder.createStructType(Unit, "", Unit, 0, FieldOffset, 0, Flags,
+ Elements);
+}
+
+/// EmitDeclare - Emit local variable declaration debug info.
+void CGDebugInfo::EmitDeclare(const VarDecl *VD, unsigned Tag,
+ llvm::Value *Storage,
+ unsigned ArgNo, CGBuilderTy &Builder) {
+ assert(!LexicalBlockStack.empty() && "Region stack mismatch, stack empty!");
+
+ llvm::DIFile Unit = getOrCreateFile(VD->getLocation());
+ llvm::DIType Ty;
+ uint64_t XOffset = 0;
+ if (VD->hasAttr<BlocksAttr>())
+ Ty = EmitTypeForVarWithBlocksAttr(VD, &XOffset);
+ else
+ Ty = getOrCreateType(VD->getType(), Unit);
+
+ // If there is not any debug info for type then do not emit debug info
+ // for this variable.
+ if (!Ty)
+ return;
+
+ if (llvm::Argument *Arg = dyn_cast<llvm::Argument>(Storage)) {
+ // If Storage is an aggregate returned as 'sret' then let debugger know
+ // about this.
+ if (Arg->hasStructRetAttr())
+ Ty = DBuilder.createReferenceType(Ty);
+ else if (CXXRecordDecl *Record = VD->getType()->getAsCXXRecordDecl()) {
+ // If an aggregate variable has non trivial destructor or non trivial copy
+ // constructor than it is pass indirectly. Let debug info know about this
+ // by using reference of the aggregate type as a argument type.
+ if (!Record->hasTrivialCopyConstructor() ||
+ !Record->hasTrivialDestructor())
+ Ty = DBuilder.createReferenceType(Ty);
+ }
+ }
+
+ // Get location information.
+ unsigned Line = getLineNumber(VD->getLocation());
+ unsigned Column = getColumnNumber(VD->getLocation());
+ unsigned Flags = 0;
+ if (VD->isImplicit())
+ Flags |= llvm::DIDescriptor::FlagArtificial;
+ llvm::MDNode *Scope = LexicalBlockStack.back();
+
+ StringRef Name = VD->getName();
+ if (!Name.empty()) {
+ if (VD->hasAttr<BlocksAttr>()) {
+ CharUnits offset = CharUnits::fromQuantity(32);
+ SmallVector<llvm::Value *, 9> addr;
+ llvm::Type *Int64Ty = CGM.Int64Ty;
+ addr.push_back(llvm::ConstantInt::get(Int64Ty, llvm::DIBuilder::OpPlus));
+ // offset of __forwarding field
+ offset = CGM.getContext().toCharUnitsFromBits(
+ CGM.getContext().getTargetInfo().getPointerWidth(0));
+ addr.push_back(llvm::ConstantInt::get(Int64Ty, offset.getQuantity()));
+ addr.push_back(llvm::ConstantInt::get(Int64Ty, llvm::DIBuilder::OpDeref));
+ addr.push_back(llvm::ConstantInt::get(Int64Ty, llvm::DIBuilder::OpPlus));
+ // offset of x field
+ offset = CGM.getContext().toCharUnitsFromBits(XOffset);
+ addr.push_back(llvm::ConstantInt::get(Int64Ty, offset.getQuantity()));
+
+ // Create the descriptor for the variable.
+ llvm::DIVariable D =
+ DBuilder.createComplexVariable(Tag,
+ llvm::DIDescriptor(Scope),
+ VD->getName(), Unit, Line, Ty,
+ addr, ArgNo);
+
+ // Insert an llvm.dbg.declare into the current block.
+ llvm::Instruction *Call =
+ DBuilder.insertDeclare(Storage, D, Builder.GetInsertBlock());
+ Call->setDebugLoc(llvm::DebugLoc::get(Line, Column, Scope));
+ return;
+ }
+ // Create the descriptor for the variable.
+ llvm::DIVariable D =
+ DBuilder.createLocalVariable(Tag, llvm::DIDescriptor(Scope),
+ Name, Unit, Line, Ty,
+ CGM.getLangOpts().Optimize, Flags, ArgNo);
+
+ // Insert an llvm.dbg.declare into the current block.
+ llvm::Instruction *Call =
+ DBuilder.insertDeclare(Storage, D, Builder.GetInsertBlock());
+ Call->setDebugLoc(llvm::DebugLoc::get(Line, Column, Scope));
+ return;
+ }
+
+ // If VD is an anonymous union then Storage represents value for
+ // all union fields.
+ if (const RecordType *RT = dyn_cast<RecordType>(VD->getType())) {
+ const RecordDecl *RD = cast<RecordDecl>(RT->getDecl());
+ if (RD->isUnion()) {
+ for (RecordDecl::field_iterator I = RD->field_begin(),
+ E = RD->field_end();
+ I != E; ++I) {
+ FieldDecl *Field = *I;
+ llvm::DIType FieldTy = getOrCreateType(Field->getType(), Unit);
+ StringRef FieldName = Field->getName();
+
+ // Ignore unnamed fields. Do not ignore unnamed records.
+ if (FieldName.empty() && !isa<RecordType>(Field->getType()))
+ continue;
+
+ // Use VarDecl's Tag, Scope and Line number.
+ llvm::DIVariable D =
+ DBuilder.createLocalVariable(Tag, llvm::DIDescriptor(Scope),
+ FieldName, Unit, Line, FieldTy,
+ CGM.getLangOpts().Optimize, Flags,
+ ArgNo);
+
+ // Insert an llvm.dbg.declare into the current block.
+ llvm::Instruction *Call =
+ DBuilder.insertDeclare(Storage, D, Builder.GetInsertBlock());
+ Call->setDebugLoc(llvm::DebugLoc::get(Line, Column, Scope));
+ }
+ }
+ }
+}
+
+void CGDebugInfo::EmitDeclareOfAutoVariable(const VarDecl *VD,
+ llvm::Value *Storage,
+ CGBuilderTy &Builder) {
+ EmitDeclare(VD, llvm::dwarf::DW_TAG_auto_variable, Storage, 0, Builder);
+}
+
+void CGDebugInfo::EmitDeclareOfBlockDeclRefVariable(
+ const VarDecl *VD, llvm::Value *Storage, CGBuilderTy &Builder,
+ const CGBlockInfo &blockInfo) {
+ assert(!LexicalBlockStack.empty() && "Region stack mismatch, stack empty!");
+
+ if (Builder.GetInsertBlock() == 0)
+ return;
+
+ bool isByRef = VD->hasAttr<BlocksAttr>();
+
+ uint64_t XOffset = 0;
+ llvm::DIFile Unit = getOrCreateFile(VD->getLocation());
+ llvm::DIType Ty;
+ if (isByRef)
+ Ty = EmitTypeForVarWithBlocksAttr(VD, &XOffset);
+ else
+ Ty = getOrCreateType(VD->getType(), Unit);
+
+ // Get location information.
+ unsigned Line = getLineNumber(VD->getLocation());
+ unsigned Column = getColumnNumber(VD->getLocation());
+
+ const llvm::TargetData &target = CGM.getTargetData();
+
+ CharUnits offset = CharUnits::fromQuantity(
+ target.getStructLayout(blockInfo.StructureType)
+ ->getElementOffset(blockInfo.getCapture(VD).getIndex()));
+
+ SmallVector<llvm::Value *, 9> addr;
+ llvm::Type *Int64Ty = CGM.Int64Ty;
+ addr.push_back(llvm::ConstantInt::get(Int64Ty, llvm::DIBuilder::OpPlus));
+ addr.push_back(llvm::ConstantInt::get(Int64Ty, offset.getQuantity()));
+ if (isByRef) {
+ addr.push_back(llvm::ConstantInt::get(Int64Ty, llvm::DIBuilder::OpDeref));
+ addr.push_back(llvm::ConstantInt::get(Int64Ty, llvm::DIBuilder::OpPlus));
+ // offset of __forwarding field
+ offset = CGM.getContext()
+ .toCharUnitsFromBits(target.getPointerSizeInBits());
+ addr.push_back(llvm::ConstantInt::get(Int64Ty, offset.getQuantity()));
+ addr.push_back(llvm::ConstantInt::get(Int64Ty, llvm::DIBuilder::OpDeref));
+ addr.push_back(llvm::ConstantInt::get(Int64Ty, llvm::DIBuilder::OpPlus));
+ // offset of x field
+ offset = CGM.getContext().toCharUnitsFromBits(XOffset);
+ addr.push_back(llvm::ConstantInt::get(Int64Ty, offset.getQuantity()));
+ }
+
+ // Create the descriptor for the variable.
+ llvm::DIVariable D =
+ DBuilder.createComplexVariable(llvm::dwarf::DW_TAG_auto_variable,
+ llvm::DIDescriptor(LexicalBlockStack.back()),
+ VD->getName(), Unit, Line, Ty, addr);
+ // Insert an llvm.dbg.declare into the current block.
+ llvm::Instruction *Call =
+ DBuilder.insertDeclare(Storage, D, Builder.GetInsertPoint());
+ Call->setDebugLoc(llvm::DebugLoc::get(Line, Column,
+ LexicalBlockStack.back()));
+}
+
+/// EmitDeclareOfArgVariable - Emit call to llvm.dbg.declare for an argument
+/// variable declaration.
+void CGDebugInfo::EmitDeclareOfArgVariable(const VarDecl *VD, llvm::Value *AI,
+ unsigned ArgNo,
+ CGBuilderTy &Builder) {
+ EmitDeclare(VD, llvm::dwarf::DW_TAG_arg_variable, AI, ArgNo, Builder);
+}
+
+namespace {
+ struct BlockLayoutChunk {
+ uint64_t OffsetInBits;
+ const BlockDecl::Capture *Capture;
+ };
+ bool operator<(const BlockLayoutChunk &l, const BlockLayoutChunk &r) {
+ return l.OffsetInBits < r.OffsetInBits;
+ }
+}
+
+void CGDebugInfo::EmitDeclareOfBlockLiteralArgVariable(const CGBlockInfo &block,
+ llvm::Value *addr,
+ CGBuilderTy &Builder) {
+ ASTContext &C = CGM.getContext();
+ const BlockDecl *blockDecl = block.getBlockDecl();
+
+ // Collect some general information about the block's location.
+ SourceLocation loc = blockDecl->getCaretLocation();
+ llvm::DIFile tunit = getOrCreateFile(loc);
+ unsigned line = getLineNumber(loc);
+ unsigned column = getColumnNumber(loc);
+
+ // Build the debug-info type for the block literal.
+ getContextDescriptor(cast<Decl>(blockDecl->getDeclContext()));
+
+ const llvm::StructLayout *blockLayout =
+ CGM.getTargetData().getStructLayout(block.StructureType);
+
+ SmallVector<llvm::Value*, 16> fields;
+ fields.push_back(createFieldType("__isa", C.VoidPtrTy, 0, loc, AS_public,
+ blockLayout->getElementOffsetInBits(0),
+ tunit, tunit));
+ fields.push_back(createFieldType("__flags", C.IntTy, 0, loc, AS_public,
+ blockLayout->getElementOffsetInBits(1),
+ tunit, tunit));
+ fields.push_back(createFieldType("__reserved", C.IntTy, 0, loc, AS_public,
+ blockLayout->getElementOffsetInBits(2),
+ tunit, tunit));
+ fields.push_back(createFieldType("__FuncPtr", C.VoidPtrTy, 0, loc, AS_public,
+ blockLayout->getElementOffsetInBits(3),
+ tunit, tunit));
+ fields.push_back(createFieldType("__descriptor",
+ C.getPointerType(block.NeedsCopyDispose ?
+ C.getBlockDescriptorExtendedType() :
+ C.getBlockDescriptorType()),
+ 0, loc, AS_public,
+ blockLayout->getElementOffsetInBits(4),
+ tunit, tunit));
+
+ // We want to sort the captures by offset, not because DWARF
+ // requires this, but because we're paranoid about debuggers.
+ SmallVector<BlockLayoutChunk, 8> chunks;
+
+ // 'this' capture.
+ if (blockDecl->capturesCXXThis()) {
+ BlockLayoutChunk chunk;
+ chunk.OffsetInBits =
+ blockLayout->getElementOffsetInBits(block.CXXThisIndex);
+ chunk.Capture = 0;
+ chunks.push_back(chunk);
+ }
+
+ // Variable captures.
+ for (BlockDecl::capture_const_iterator
+ i = blockDecl->capture_begin(), e = blockDecl->capture_end();
+ i != e; ++i) {
+ const BlockDecl::Capture &capture = *i;
+ const VarDecl *variable = capture.getVariable();
+ const CGBlockInfo::Capture &captureInfo = block.getCapture(variable);
+
+ // Ignore constant captures.
+ if (captureInfo.isConstant())
+ continue;
+
+ BlockLayoutChunk chunk;
+ chunk.OffsetInBits =
+ blockLayout->getElementOffsetInBits(captureInfo.getIndex());
+ chunk.Capture = &capture;
+ chunks.push_back(chunk);
+ }
+
+ // Sort by offset.
+ llvm::array_pod_sort(chunks.begin(), chunks.end());
+
+ for (SmallVectorImpl<BlockLayoutChunk>::iterator
+ i = chunks.begin(), e = chunks.end(); i != e; ++i) {
+ uint64_t offsetInBits = i->OffsetInBits;
+ const BlockDecl::Capture *capture = i->Capture;
+
+ // If we have a null capture, this must be the C++ 'this' capture.
+ if (!capture) {
+ const CXXMethodDecl *method =
+ cast<CXXMethodDecl>(blockDecl->getNonClosureContext());
+ QualType type = method->getThisType(C);
+
+ fields.push_back(createFieldType("this", type, 0, loc, AS_public,
+ offsetInBits, tunit, tunit));
+ continue;
+ }
+
+ const VarDecl *variable = capture->getVariable();
+ StringRef name = variable->getName();
+
+ llvm::DIType fieldType;
+ if (capture->isByRef()) {
+ std::pair<uint64_t,unsigned> ptrInfo = C.getTypeInfo(C.VoidPtrTy);
+
+ // FIXME: this creates a second copy of this type!
+ uint64_t xoffset;
+ fieldType = EmitTypeForVarWithBlocksAttr(variable, &xoffset);
+ fieldType = DBuilder.createPointerType(fieldType, ptrInfo.first);
+ fieldType = DBuilder.createMemberType(tunit, name, tunit, line,
+ ptrInfo.first, ptrInfo.second,
+ offsetInBits, 0, fieldType);
+ } else {
+ fieldType = createFieldType(name, variable->getType(), 0,
+ loc, AS_public, offsetInBits, tunit, tunit);
+ }
+ fields.push_back(fieldType);
+ }
+
+ SmallString<36> typeName;
+ llvm::raw_svector_ostream(typeName)
+ << "__block_literal_" << CGM.getUniqueBlockCount();
+
+ llvm::DIArray fieldsArray = DBuilder.getOrCreateArray(fields);
+
+ llvm::DIType type =
+ DBuilder.createStructType(tunit, typeName.str(), tunit, line,
+ CGM.getContext().toBits(block.BlockSize),
+ CGM.getContext().toBits(block.BlockAlign),
+ 0, fieldsArray);
+ type = DBuilder.createPointerType(type, CGM.PointerWidthInBits);
+
+ // Get overall information about the block.
+ unsigned flags = llvm::DIDescriptor::FlagArtificial;
+ llvm::MDNode *scope = LexicalBlockStack.back();
+ StringRef name = ".block_descriptor";
+
+ // Create the descriptor for the parameter.
+ llvm::DIVariable debugVar =
+ DBuilder.createLocalVariable(llvm::dwarf::DW_TAG_arg_variable,
+ llvm::DIDescriptor(scope),
+ name, tunit, line, type,
+ CGM.getLangOpts().Optimize, flags,
+ cast<llvm::Argument>(addr)->getArgNo() + 1);
+
+ // Insert an llvm.dbg.value into the current block.
+ llvm::Instruction *declare =
+ DBuilder.insertDbgValueIntrinsic(addr, 0, debugVar,
+ Builder.GetInsertBlock());
+ declare->setDebugLoc(llvm::DebugLoc::get(line, column, scope));
+}
+
+/// EmitGlobalVariable - Emit information about a global variable.
+void CGDebugInfo::EmitGlobalVariable(llvm::GlobalVariable *Var,
+ const VarDecl *D) {
+ // Create global variable debug descriptor.
+ llvm::DIFile Unit = getOrCreateFile(D->getLocation());
+ unsigned LineNo = getLineNumber(D->getLocation());
+
+ setLocation(D->getLocation());
+
+ QualType T = D->getType();
+ if (T->isIncompleteArrayType()) {
+
+ // CodeGen turns int[] into int[1] so we'll do the same here.
+ llvm::APSInt ConstVal(32);
+
+ ConstVal = 1;
+ QualType ET = CGM.getContext().getAsArrayType(T)->getElementType();
+
+ T = CGM.getContext().getConstantArrayType(ET, ConstVal,
+ ArrayType::Normal, 0);
+ }
+ StringRef DeclName = D->getName();
+ StringRef LinkageName;
+ if (D->getDeclContext() && !isa<FunctionDecl>(D->getDeclContext())
+ && !isa<ObjCMethodDecl>(D->getDeclContext()))
+ LinkageName = Var->getName();
+ if (LinkageName == DeclName)
+ LinkageName = StringRef();
+ llvm::DIDescriptor DContext =
+ getContextDescriptor(dyn_cast<Decl>(D->getDeclContext()));
+ DBuilder.createStaticVariable(DContext, DeclName, LinkageName,
+ Unit, LineNo, getOrCreateType(T, Unit),
+ Var->hasInternalLinkage(), Var);
+}
+
+/// EmitGlobalVariable - Emit information about an objective-c interface.
+void CGDebugInfo::EmitGlobalVariable(llvm::GlobalVariable *Var,
+ ObjCInterfaceDecl *ID) {
+ // Create global variable debug descriptor.
+ llvm::DIFile Unit = getOrCreateFile(ID->getLocation());
+ unsigned LineNo = getLineNumber(ID->getLocation());
+
+ StringRef Name = ID->getName();
+
+ QualType T = CGM.getContext().getObjCInterfaceType(ID);
+ if (T->isIncompleteArrayType()) {
+
+ // CodeGen turns int[] into int[1] so we'll do the same here.
+ llvm::APSInt ConstVal(32);
+
+ ConstVal = 1;
+ QualType ET = CGM.getContext().getAsArrayType(T)->getElementType();
+
+ T = CGM.getContext().getConstantArrayType(ET, ConstVal,
+ ArrayType::Normal, 0);
+ }
+
+ DBuilder.createGlobalVariable(Name, Unit, LineNo,
+ getOrCreateType(T, Unit),
+ Var->hasInternalLinkage(), Var);
+}
+
+/// EmitGlobalVariable - Emit global variable's debug info.
+void CGDebugInfo::EmitGlobalVariable(const ValueDecl *VD,
+ llvm::Constant *Init) {
+ // Create the descriptor for the variable.
+ llvm::DIFile Unit = getOrCreateFile(VD->getLocation());
+ StringRef Name = VD->getName();
+ llvm::DIType Ty = getOrCreateType(VD->getType(), Unit);
+ if (const EnumConstantDecl *ECD = dyn_cast<EnumConstantDecl>(VD)) {
+ if (const EnumDecl *ED = dyn_cast<EnumDecl>(ECD->getDeclContext()))
+ Ty = CreateEnumType(ED);
+ }
+ // Do not use DIGlobalVariable for enums.
+ if (Ty.getTag() == llvm::dwarf::DW_TAG_enumeration_type)
+ return;
+ DBuilder.createStaticVariable(Unit, Name, Name, Unit,
+ getLineNumber(VD->getLocation()),
+ Ty, true, Init);
+}
+
+/// getOrCreateNamesSpace - Return namespace descriptor for the given
+/// namespace decl.
+llvm::DINameSpace
+CGDebugInfo::getOrCreateNameSpace(const NamespaceDecl *NSDecl) {
+ llvm::DenseMap<const NamespaceDecl *, llvm::WeakVH>::iterator I =
+ NameSpaceCache.find(NSDecl);
+ if (I != NameSpaceCache.end())
+ return llvm::DINameSpace(cast<llvm::MDNode>(I->second));
+
+ unsigned LineNo = getLineNumber(NSDecl->getLocation());
+ llvm::DIFile FileD = getOrCreateFile(NSDecl->getLocation());
+ llvm::DIDescriptor Context =
+ getContextDescriptor(dyn_cast<Decl>(NSDecl->getDeclContext()));
+ llvm::DINameSpace NS =
+ DBuilder.createNameSpace(Context, NSDecl->getName(), FileD, LineNo);
+ NameSpaceCache[NSDecl] = llvm::WeakVH(NS);
+ return NS;
+}
+
+void CGDebugInfo::finalize(void) {
+ for (std::vector<std::pair<void *, llvm::WeakVH> >::const_iterator VI
+ = ReplaceMap.begin(), VE = ReplaceMap.end(); VI != VE; ++VI) {
+ llvm::DIType Ty, RepTy;
+ // Verify that the debug info still exists.
+ if (&*VI->second)
+ Ty = llvm::DIType(cast<llvm::MDNode>(VI->second));
+
+ llvm::DenseMap<void *, llvm::WeakVH>::iterator it =
+ TypeCache.find(VI->first);
+ if (it != TypeCache.end()) {
+ // Verify that the debug info still exists.
+ if (&*it->second)
+ RepTy = llvm::DIType(cast<llvm::MDNode>(it->second));
+ }
+
+ if (Ty.Verify() && Ty.isForwardDecl() && RepTy.Verify()) {
+ Ty.replaceAllUsesWith(RepTy);
+ }
+ }
+ DBuilder.finalize();
+}
diff --git a/clang/lib/CodeGen/CGDebugInfo.h b/clang/lib/CodeGen/CGDebugInfo.h
new file mode 100644
index 0000000..ec7705c
--- /dev/null
+++ b/clang/lib/CodeGen/CGDebugInfo.h
@@ -0,0 +1,322 @@
+//===--- CGDebugInfo.h - DebugInfo for LLVM CodeGen -------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This is the source level debug info generator for llvm translation.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_CODEGEN_CGDEBUGINFO_H
+#define CLANG_CODEGEN_CGDEBUGINFO_H
+
+#include "clang/AST/Type.h"
+#include "clang/AST/Expr.h"
+#include "clang/Basic/SourceLocation.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/Analysis/DebugInfo.h"
+#include "llvm/Analysis/DIBuilder.h"
+#include "llvm/Support/ValueHandle.h"
+#include "llvm/Support/Allocator.h"
+
+#include "CGBuilder.h"
+
+namespace llvm {
+ class MDNode;
+}
+
+namespace clang {
+ class VarDecl;
+ class ObjCInterfaceDecl;
+ class ClassTemplateSpecializationDecl;
+ class GlobalDecl;
+
+namespace CodeGen {
+ class CodeGenModule;
+ class CodeGenFunction;
+ class CGBlockInfo;
+
+/// CGDebugInfo - This class gathers all debug information during compilation
+/// and is responsible for emitting to llvm globals or pass directly to
+/// the backend.
+class CGDebugInfo {
+ CodeGenModule &CGM;
+ llvm::DIBuilder DBuilder;
+ llvm::DICompileUnit TheCU;
+ SourceLocation CurLoc, PrevLoc;
+ llvm::DIType VTablePtrType;
+
+ /// TypeCache - Cache of previously constructed Types.
+ llvm::DenseMap<void *, llvm::WeakVH> TypeCache;
+
+ /// CompleteTypeCache - Cache of previously constructed complete RecordTypes.
+ llvm::DenseMap<void *, llvm::WeakVH> CompletedTypeCache;
+
+ /// ReplaceMap - Cache of forward declared types to RAUW at the end of
+ /// compilation.
+ std::vector<std::pair<void *, llvm::WeakVH> >ReplaceMap;
+
+ bool BlockLiteralGenericSet;
+ llvm::DIType BlockLiteralGeneric;
+
+ // LexicalBlockStack - Keep track of our current nested lexical block.
+ std::vector<llvm::TrackingVH<llvm::MDNode> > LexicalBlockStack;
+ llvm::DenseMap<const Decl *, llvm::WeakVH> RegionMap;
+ // FnBeginRegionCount - Keep track of LexicalBlockStack counter at the
+ // beginning of a function. This is used to pop unbalanced regions at
+ // the end of a function.
+ std::vector<unsigned> FnBeginRegionCount;
+
+ /// DebugInfoNames - This is a storage for names that are
+ /// constructed on demand. For example, C++ destructors, C++ operators etc..
+ llvm::BumpPtrAllocator DebugInfoNames;
+ StringRef CWDName;
+
+ llvm::DenseMap<const char *, llvm::WeakVH> DIFileCache;
+ llvm::DenseMap<const FunctionDecl *, llvm::WeakVH> SPCache;
+ llvm::DenseMap<const NamespaceDecl *, llvm::WeakVH> NameSpaceCache;
+
+ /// Helper functions for getOrCreateType.
+ llvm::DIType CreateType(const BuiltinType *Ty);
+ llvm::DIType CreateType(const ComplexType *Ty);
+ llvm::DIType CreateQualifiedType(QualType Ty, llvm::DIFile F);
+ llvm::DIType CreateType(const TypedefType *Ty, llvm::DIFile F);
+ llvm::DIType CreateType(const ObjCObjectPointerType *Ty,
+ llvm::DIFile F);
+ llvm::DIType CreateType(const PointerType *Ty, llvm::DIFile F);
+ llvm::DIType CreateType(const BlockPointerType *Ty, llvm::DIFile F);
+ llvm::DIType CreateType(const FunctionType *Ty, llvm::DIFile F);
+ llvm::DIType CreateType(const RecordType *Ty);
+ llvm::DIType CreateLimitedType(const RecordType *Ty);
+ llvm::DIType CreateType(const ObjCInterfaceType *Ty, llvm::DIFile F);
+ llvm::DIType CreateType(const ObjCObjectType *Ty, llvm::DIFile F);
+ llvm::DIType CreateType(const VectorType *Ty, llvm::DIFile F);
+ llvm::DIType CreateType(const ArrayType *Ty, llvm::DIFile F);
+ llvm::DIType CreateType(const LValueReferenceType *Ty, llvm::DIFile F);
+ llvm::DIType CreateType(const RValueReferenceType *Ty, llvm::DIFile Unit);
+ llvm::DIType CreateType(const MemberPointerType *Ty, llvm::DIFile F);
+ llvm::DIType CreateType(const AtomicType *Ty, llvm::DIFile F);
+ llvm::DIType CreateEnumType(const EnumDecl *ED);
+ llvm::DIType getTypeOrNull(const QualType);
+ llvm::DIType getCompletedTypeOrNull(const QualType);
+ llvm::DIType getOrCreateMethodType(const CXXMethodDecl *Method,
+ llvm::DIFile F);
+ llvm::DIType getOrCreateFunctionType(const Decl *D, QualType FnType,
+ llvm::DIFile F);
+ llvm::DIType getOrCreateVTablePtrType(llvm::DIFile F);
+ llvm::DINameSpace getOrCreateNameSpace(const NamespaceDecl *N);
+ llvm::DIType CreatePointeeType(QualType PointeeTy, llvm::DIFile F);
+ llvm::DIType CreatePointerLikeType(unsigned Tag,
+ const Type *Ty, QualType PointeeTy,
+ llvm::DIFile F);
+
+ llvm::DISubprogram CreateCXXMemberFunction(const CXXMethodDecl *Method,
+ llvm::DIFile F,
+ llvm::DIType RecordTy);
+
+ void CollectCXXMemberFunctions(const CXXRecordDecl *Decl,
+ llvm::DIFile F,
+ SmallVectorImpl<llvm::Value *> &E,
+ llvm::DIType T);
+
+ void CollectCXXFriends(const CXXRecordDecl *Decl,
+ llvm::DIFile F,
+ SmallVectorImpl<llvm::Value *> &EltTys,
+ llvm::DIType RecordTy);
+
+ void CollectCXXBases(const CXXRecordDecl *Decl,
+ llvm::DIFile F,
+ SmallVectorImpl<llvm::Value *> &EltTys,
+ llvm::DIType RecordTy);
+
+ llvm::DIArray
+ CollectTemplateParams(const TemplateParameterList *TPList,
+ const TemplateArgumentList &TAList,
+ llvm::DIFile Unit);
+ llvm::DIArray
+ CollectFunctionTemplateParams(const FunctionDecl *FD, llvm::DIFile Unit);
+ llvm::DIArray
+ CollectCXXTemplateParams(const ClassTemplateSpecializationDecl *TS,
+ llvm::DIFile F);
+
+ llvm::DIType createFieldType(StringRef name, QualType type,
+ uint64_t sizeInBitsOverride, SourceLocation loc,
+ AccessSpecifier AS, uint64_t offsetInBits,
+ llvm::DIFile tunit,
+ llvm::DIDescriptor scope);
+ void CollectRecordStaticVars(const RecordDecl *, llvm::DIType);
+ void CollectRecordFields(const RecordDecl *Decl, llvm::DIFile F,
+ SmallVectorImpl<llvm::Value *> &E,
+ llvm::DIType RecordTy);
+
+ void CollectVTableInfo(const CXXRecordDecl *Decl,
+ llvm::DIFile F,
+ SmallVectorImpl<llvm::Value *> &EltTys);
+
+ // CreateLexicalBlock - Create a new lexical block node and push it on
+ // the stack.
+ void CreateLexicalBlock(SourceLocation Loc);
+
+public:
+ CGDebugInfo(CodeGenModule &CGM);
+ ~CGDebugInfo();
+
+ void finalize(void);
+
+ /// setLocation - Update the current source location. If \arg loc is
+ /// invalid it is ignored.
+ void setLocation(SourceLocation Loc);
+
+ /// EmitLocation - Emit metadata to indicate a change in line/column
+ /// information in the source file.
+ void EmitLocation(CGBuilderTy &Builder, SourceLocation Loc);
+
+ /// EmitFunctionStart - Emit a call to llvm.dbg.function.start to indicate
+ /// start of a new function.
+ void EmitFunctionStart(GlobalDecl GD, QualType FnType,
+ llvm::Function *Fn, CGBuilderTy &Builder);
+
+ /// EmitFunctionEnd - Constructs the debug code for exiting a function.
+ void EmitFunctionEnd(CGBuilderTy &Builder);
+
+ /// EmitLexicalBlockStart - Emit metadata to indicate the beginning of a
+ /// new lexical block and push the block onto the stack.
+ void EmitLexicalBlockStart(CGBuilderTy &Builder, SourceLocation Loc);
+
+ /// EmitLexicalBlockEnd - Emit metadata to indicate the end of a new lexical
+ /// block and pop the current block.
+ void EmitLexicalBlockEnd(CGBuilderTy &Builder, SourceLocation Loc);
+
+ /// EmitDeclareOfAutoVariable - Emit call to llvm.dbg.declare for an automatic
+ /// variable declaration.
+ void EmitDeclareOfAutoVariable(const VarDecl *Decl, llvm::Value *AI,
+ CGBuilderTy &Builder);
+
+ /// EmitDeclareOfBlockDeclRefVariable - Emit call to llvm.dbg.declare for an
+ /// imported variable declaration in a block.
+ void EmitDeclareOfBlockDeclRefVariable(const VarDecl *variable,
+ llvm::Value *storage,
+ CGBuilderTy &Builder,
+ const CGBlockInfo &blockInfo);
+
+ /// EmitDeclareOfArgVariable - Emit call to llvm.dbg.declare for an argument
+ /// variable declaration.
+ void EmitDeclareOfArgVariable(const VarDecl *Decl, llvm::Value *AI,
+ unsigned ArgNo, CGBuilderTy &Builder);
+
+ /// EmitDeclareOfBlockLiteralArgVariable - Emit call to
+ /// llvm.dbg.declare for the block-literal argument to a block
+ /// invocation function.
+ void EmitDeclareOfBlockLiteralArgVariable(const CGBlockInfo &block,
+ llvm::Value *addr,
+ CGBuilderTy &Builder);
+
+ /// EmitGlobalVariable - Emit information about a global variable.
+ void EmitGlobalVariable(llvm::GlobalVariable *GV, const VarDecl *Decl);
+
+ /// EmitGlobalVariable - Emit information about an objective-c interface.
+ void EmitGlobalVariable(llvm::GlobalVariable *GV, ObjCInterfaceDecl *Decl);
+
+ /// EmitGlobalVariable - Emit global variable's debug info.
+ void EmitGlobalVariable(const ValueDecl *VD, llvm::Constant *Init);
+
+ /// getOrCreateRecordType - Emit record type's standalone debug info.
+ llvm::DIType getOrCreateRecordType(QualType Ty, SourceLocation L);
+
+ /// getOrCreateInterfaceType - Emit an objective c interface type standalone
+ /// debug info.
+ llvm::DIType getOrCreateInterfaceType(QualType Ty,
+ SourceLocation Loc);
+
+private:
+ /// EmitDeclare - Emit call to llvm.dbg.declare for a variable declaration.
+ void EmitDeclare(const VarDecl *decl, unsigned Tag, llvm::Value *AI,
+ unsigned ArgNo, CGBuilderTy &Builder);
+
+ // EmitTypeForVarWithBlocksAttr - Build up structure info for the byref.
+ // See BuildByRefType.
+ llvm::DIType EmitTypeForVarWithBlocksAttr(const ValueDecl *VD,
+ uint64_t *OffSet);
+
+ /// getContextDescriptor - Get context info for the decl.
+ llvm::DIDescriptor getContextDescriptor(const Decl *Decl);
+
+ /// createRecordFwdDecl - Create a forward decl for a RecordType in a given
+ /// context.
+ llvm::DIType createRecordFwdDecl(const RecordDecl *, llvm::DIDescriptor);
+
+ /// createContextChain - Create a set of decls for the context chain.
+ llvm::DIDescriptor createContextChain(const Decl *Decl);
+
+ /// getCurrentDirname - Return current directory name.
+ StringRef getCurrentDirname();
+
+ /// CreateCompileUnit - Create new compile unit.
+ void CreateCompileUnit();
+
+ /// getOrCreateFile - Get the file debug info descriptor for the input
+ /// location.
+ llvm::DIFile getOrCreateFile(SourceLocation Loc);
+
+ /// getOrCreateMainFile - Get the file info for main compile unit.
+ llvm::DIFile getOrCreateMainFile();
+
+ /// getOrCreateType - Get the type from the cache or create a new type if
+ /// necessary.
+ llvm::DIType getOrCreateType(QualType Ty, llvm::DIFile F);
+
+ /// getOrCreateLimitedType - Get the type from the cache or create a new
+ /// partial type if necessary.
+ llvm::DIType getOrCreateLimitedType(QualType Ty, llvm::DIFile F);
+
+ /// CreateTypeNode - Create type metadata for a source language type.
+ llvm::DIType CreateTypeNode(QualType Ty, llvm::DIFile F);
+
+ /// CreateLimitedTypeNode - Create type metadata for a source language
+ /// type, but only partial types for records.
+ llvm::DIType CreateLimitedTypeNode(QualType Ty, llvm::DIFile F);
+
+ /// CreateMemberType - Create new member and increase Offset by FType's size.
+ llvm::DIType CreateMemberType(llvm::DIFile Unit, QualType FType,
+ StringRef Name, uint64_t *Offset);
+
+ /// getFunctionDeclaration - Return debug info descriptor to describe method
+ /// declaration for the given method definition.
+ llvm::DISubprogram getFunctionDeclaration(const Decl *D);
+
+ /// getFunctionName - Get function name for the given FunctionDecl. If the
+ /// name is constructred on demand (e.g. C++ destructor) then the name
+ /// is stored on the side.
+ StringRef getFunctionName(const FunctionDecl *FD);
+
+ /// getObjCMethodName - Returns the unmangled name of an Objective-C method.
+ /// This is the display name for the debugging info.
+ StringRef getObjCMethodName(const ObjCMethodDecl *FD);
+
+ /// getSelectorName - Return selector name. This is used for debugging
+ /// info.
+ StringRef getSelectorName(Selector S);
+
+ /// getClassName - Get class name including template argument list.
+ StringRef getClassName(const RecordDecl *RD);
+
+ /// getVTableName - Get vtable name for the given Class.
+ StringRef getVTableName(const CXXRecordDecl *Decl);
+
+ /// getLineNumber - Get line number for the location. If location is invalid
+ /// then use current location.
+ unsigned getLineNumber(SourceLocation Loc);
+
+ /// getColumnNumber - Get column number for the location. If location is
+ /// invalid then use current location.
+ unsigned getColumnNumber(SourceLocation Loc);
+};
+} // namespace CodeGen
+} // namespace clang
+
+
+#endif
diff --git a/clang/lib/CodeGen/CGDecl.cpp b/clang/lib/CodeGen/CGDecl.cpp
new file mode 100644
index 0000000..6447779
--- /dev/null
+++ b/clang/lib/CodeGen/CGDecl.cpp
@@ -0,0 +1,1564 @@
+//===--- CGDecl.cpp - Emit LLVM Code for declarations ---------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code to emit Decl nodes as LLVM code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CGDebugInfo.h"
+#include "CodeGenFunction.h"
+#include "CodeGenModule.h"
+#include "CGOpenCLRuntime.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/CharUnits.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/Frontend/CodeGenOptions.h"
+#include "llvm/GlobalVariable.h"
+#include "llvm/Intrinsics.h"
+#include "llvm/Target/TargetData.h"
+#include "llvm/Type.h"
+using namespace clang;
+using namespace CodeGen;
+
+
+void CodeGenFunction::EmitDecl(const Decl &D) {
+ switch (D.getKind()) {
+ case Decl::TranslationUnit:
+ case Decl::Namespace:
+ case Decl::UnresolvedUsingTypename:
+ case Decl::ClassTemplateSpecialization:
+ case Decl::ClassTemplatePartialSpecialization:
+ case Decl::TemplateTypeParm:
+ case Decl::UnresolvedUsingValue:
+ case Decl::NonTypeTemplateParm:
+ case Decl::CXXMethod:
+ case Decl::CXXConstructor:
+ case Decl::CXXDestructor:
+ case Decl::CXXConversion:
+ case Decl::Field:
+ case Decl::IndirectField:
+ case Decl::ObjCIvar:
+ case Decl::ObjCAtDefsField:
+ case Decl::ParmVar:
+ case Decl::ImplicitParam:
+ case Decl::ClassTemplate:
+ case Decl::FunctionTemplate:
+ case Decl::TypeAliasTemplate:
+ case Decl::TemplateTemplateParm:
+ case Decl::ObjCMethod:
+ case Decl::ObjCCategory:
+ case Decl::ObjCProtocol:
+ case Decl::ObjCInterface:
+ case Decl::ObjCCategoryImpl:
+ case Decl::ObjCImplementation:
+ case Decl::ObjCProperty:
+ case Decl::ObjCCompatibleAlias:
+ case Decl::AccessSpec:
+ case Decl::LinkageSpec:
+ case Decl::ObjCPropertyImpl:
+ case Decl::FileScopeAsm:
+ case Decl::Friend:
+ case Decl::FriendTemplate:
+ case Decl::Block:
+ case Decl::ClassScopeFunctionSpecialization:
+ llvm_unreachable("Declaration should not be in declstmts!");
+ case Decl::Function: // void X();
+ case Decl::Record: // struct/union/class X;
+ case Decl::Enum: // enum X;
+ case Decl::EnumConstant: // enum ? { X = ? }
+ case Decl::CXXRecord: // struct/union/class X; [C++]
+ case Decl::Using: // using X; [C++]
+ case Decl::UsingShadow:
+ case Decl::UsingDirective: // using namespace X; [C++]
+ case Decl::NamespaceAlias:
+ case Decl::StaticAssert: // static_assert(X, ""); [C++0x]
+ case Decl::Label: // __label__ x;
+ case Decl::Import:
+ // None of these decls require codegen support.
+ return;
+
+ case Decl::Var: {
+ const VarDecl &VD = cast<VarDecl>(D);
+ assert(VD.isLocalVarDecl() &&
+ "Should not see file-scope variables inside a function!");
+ return EmitVarDecl(VD);
+ }
+
+ case Decl::Typedef: // typedef int X;
+ case Decl::TypeAlias: { // using X = int; [C++0x]
+ const TypedefNameDecl &TD = cast<TypedefNameDecl>(D);
+ QualType Ty = TD.getUnderlyingType();
+
+ if (Ty->isVariablyModifiedType())
+ EmitVariablyModifiedType(Ty);
+ }
+ }
+}
+
+/// EmitVarDecl - This method handles emission of any variable declaration
+/// inside a function, including static vars etc.
+void CodeGenFunction::EmitVarDecl(const VarDecl &D) {
+ switch (D.getStorageClass()) {
+ case SC_None:
+ case SC_Auto:
+ case SC_Register:
+ return EmitAutoVarDecl(D);
+ case SC_Static: {
+ llvm::GlobalValue::LinkageTypes Linkage =
+ llvm::GlobalValue::InternalLinkage;
+
+ // If the function definition has some sort of weak linkage, its
+ // static variables should also be weak so that they get properly
+ // uniqued. We can't do this in C, though, because there's no
+ // standard way to agree on which variables are the same (i.e.
+ // there's no mangling).
+ if (getContext().getLangOpts().CPlusPlus)
+ if (llvm::GlobalValue::isWeakForLinker(CurFn->getLinkage()))
+ Linkage = CurFn->getLinkage();
+
+ return EmitStaticVarDecl(D, Linkage);
+ }
+ case SC_Extern:
+ case SC_PrivateExtern:
+ // Don't emit it now, allow it to be emitted lazily on its first use.
+ return;
+ case SC_OpenCLWorkGroupLocal:
+ return CGM.getOpenCLRuntime().EmitWorkGroupLocalVarDecl(*this, D);
+ }
+
+ llvm_unreachable("Unknown storage class");
+}
+
+static std::string GetStaticDeclName(CodeGenFunction &CGF, const VarDecl &D,
+ const char *Separator) {
+ CodeGenModule &CGM = CGF.CGM;
+ if (CGF.getContext().getLangOpts().CPlusPlus) {
+ StringRef Name = CGM.getMangledName(&D);
+ return Name.str();
+ }
+
+ std::string ContextName;
+ if (!CGF.CurFuncDecl) {
+ // Better be in a block declared in global scope.
+ const NamedDecl *ND = cast<NamedDecl>(&D);
+ const DeclContext *DC = ND->getDeclContext();
+ if (const BlockDecl *BD = dyn_cast<BlockDecl>(DC)) {
+ MangleBuffer Name;
+ CGM.getBlockMangledName(GlobalDecl(), Name, BD);
+ ContextName = Name.getString();
+ }
+ else
+ llvm_unreachable("Unknown context for block static var decl");
+ } else if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(CGF.CurFuncDecl)) {
+ StringRef Name = CGM.getMangledName(FD);
+ ContextName = Name.str();
+ } else if (isa<ObjCMethodDecl>(CGF.CurFuncDecl))
+ ContextName = CGF.CurFn->getName();
+ else
+ llvm_unreachable("Unknown context for static var decl");
+
+ return ContextName + Separator + D.getNameAsString();
+}
+
+llvm::GlobalVariable *
+CodeGenFunction::CreateStaticVarDecl(const VarDecl &D,
+ const char *Separator,
+ llvm::GlobalValue::LinkageTypes Linkage) {
+ QualType Ty = D.getType();
+ assert(Ty->isConstantSizeType() && "VLAs can't be static");
+
+ // Use the label if the variable is renamed with the asm-label extension.
+ std::string Name;
+ if (D.hasAttr<AsmLabelAttr>())
+ Name = CGM.getMangledName(&D);
+ else
+ Name = GetStaticDeclName(*this, D, Separator);
+
+ llvm::Type *LTy = CGM.getTypes().ConvertTypeForMem(Ty);
+ llvm::GlobalVariable *GV =
+ new llvm::GlobalVariable(CGM.getModule(), LTy,
+ Ty.isConstant(getContext()), Linkage,
+ CGM.EmitNullConstant(D.getType()), Name, 0,
+ D.isThreadSpecified(),
+ CGM.getContext().getTargetAddressSpace(Ty));
+ GV->setAlignment(getContext().getDeclAlign(&D).getQuantity());
+ if (Linkage != llvm::GlobalValue::InternalLinkage)
+ GV->setVisibility(CurFn->getVisibility());
+ return GV;
+}
+
+/// hasNontrivialDestruction - Determine whether a type's destruction is
+/// non-trivial. If so, and the variable uses static initialization, we must
+/// register its destructor to run on exit.
+static bool hasNontrivialDestruction(QualType T) {
+ CXXRecordDecl *RD = T->getBaseElementTypeUnsafe()->getAsCXXRecordDecl();
+ return RD && !RD->hasTrivialDestructor();
+}
+
+/// AddInitializerToStaticVarDecl - Add the initializer for 'D' to the
+/// global variable that has already been created for it. If the initializer
+/// has a different type than GV does, this may free GV and return a different
+/// one. Otherwise it just returns GV.
+llvm::GlobalVariable *
+CodeGenFunction::AddInitializerToStaticVarDecl(const VarDecl &D,
+ llvm::GlobalVariable *GV) {
+ llvm::Constant *Init = CGM.EmitConstantInit(D, this);
+
+ // If constant emission failed, then this should be a C++ static
+ // initializer.
+ if (!Init) {
+ if (!getContext().getLangOpts().CPlusPlus)
+ CGM.ErrorUnsupported(D.getInit(), "constant l-value expression");
+ else if (Builder.GetInsertBlock()) {
+ // Since we have a static initializer, this global variable can't
+ // be constant.
+ GV->setConstant(false);
+
+ EmitCXXGuardedInit(D, GV, /*PerformInit*/true);
+ }
+ return GV;
+ }
+
+ // The initializer may differ in type from the global. Rewrite
+ // the global to match the initializer. (We have to do this
+ // because some types, like unions, can't be completely represented
+ // in the LLVM type system.)
+ if (GV->getType()->getElementType() != Init->getType()) {
+ llvm::GlobalVariable *OldGV = GV;
+
+ GV = new llvm::GlobalVariable(CGM.getModule(), Init->getType(),
+ OldGV->isConstant(),
+ OldGV->getLinkage(), Init, "",
+ /*InsertBefore*/ OldGV,
+ D.isThreadSpecified(),
+ CGM.getContext().getTargetAddressSpace(D.getType()));
+ GV->setVisibility(OldGV->getVisibility());
+
+ // Steal the name of the old global
+ GV->takeName(OldGV);
+
+ // Replace all uses of the old global with the new global
+ llvm::Constant *NewPtrForOldDecl =
+ llvm::ConstantExpr::getBitCast(GV, OldGV->getType());
+ OldGV->replaceAllUsesWith(NewPtrForOldDecl);
+
+ // Erase the old global, since it is no longer used.
+ OldGV->eraseFromParent();
+ }
+
+ GV->setConstant(CGM.isTypeConstant(D.getType(), true));
+ GV->setInitializer(Init);
+
+ if (hasNontrivialDestruction(D.getType())) {
+ // We have a constant initializer, but a nontrivial destructor. We still
+ // need to perform a guarded "initialization" in order to register the
+ // destructor.
+ EmitCXXGuardedInit(D, GV, /*PerformInit*/false);
+ }
+
+ return GV;
+}
+
+void CodeGenFunction::EmitStaticVarDecl(const VarDecl &D,
+ llvm::GlobalValue::LinkageTypes Linkage) {
+ llvm::Value *&DMEntry = LocalDeclMap[&D];
+ assert(DMEntry == 0 && "Decl already exists in localdeclmap!");
+
+ // Check to see if we already have a global variable for this
+ // declaration. This can happen when double-emitting function
+ // bodies, e.g. with complete and base constructors.
+ llvm::Constant *addr =
+ CGM.getStaticLocalDeclAddress(&D);
+
+ llvm::GlobalVariable *var;
+ if (addr) {
+ var = cast<llvm::GlobalVariable>(addr->stripPointerCasts());
+ } else {
+ addr = var = CreateStaticVarDecl(D, ".", Linkage);
+ }
+
+ // Store into LocalDeclMap before generating initializer to handle
+ // circular references.
+ DMEntry = addr;
+ CGM.setStaticLocalDeclAddress(&D, addr);
+
+ // We can't have a VLA here, but we can have a pointer to a VLA,
+ // even though that doesn't really make any sense.
+ // Make sure to evaluate VLA bounds now so that we have them for later.
+ if (D.getType()->isVariablyModifiedType())
+ EmitVariablyModifiedType(D.getType());
+
+ // Save the type in case adding the initializer forces a type change.
+ llvm::Type *expectedType = addr->getType();
+
+ // If this value has an initializer, emit it.
+ if (D.getInit())
+ var = AddInitializerToStaticVarDecl(D, var);
+
+ var->setAlignment(getContext().getDeclAlign(&D).getQuantity());
+
+ if (D.hasAttr<AnnotateAttr>())
+ CGM.AddGlobalAnnotations(&D, var);
+
+ if (const SectionAttr *SA = D.getAttr<SectionAttr>())
+ var->setSection(SA->getName());
+
+ if (D.hasAttr<UsedAttr>())
+ CGM.AddUsedGlobal(var);
+
+ // We may have to cast the constant because of the initializer
+ // mismatch above.
+ //
+ // FIXME: It is really dangerous to store this in the map; if anyone
+ // RAUW's the GV uses of this constant will be invalid.
+ llvm::Constant *castedAddr = llvm::ConstantExpr::getBitCast(var, expectedType);
+ DMEntry = castedAddr;
+ CGM.setStaticLocalDeclAddress(&D, castedAddr);
+
+ // Emit global variable debug descriptor for static vars.
+ CGDebugInfo *DI = getDebugInfo();
+ if (DI) {
+ DI->setLocation(D.getLocation());
+ DI->EmitGlobalVariable(var, &D);
+ }
+}
+
+namespace {
+ struct DestroyObject : EHScopeStack::Cleanup {
+ DestroyObject(llvm::Value *addr, QualType type,
+ CodeGenFunction::Destroyer *destroyer,
+ bool useEHCleanupForArray)
+ : addr(addr), type(type), destroyer(destroyer),
+ useEHCleanupForArray(useEHCleanupForArray) {}
+
+ llvm::Value *addr;
+ QualType type;
+ CodeGenFunction::Destroyer *destroyer;
+ bool useEHCleanupForArray;
+
+ void Emit(CodeGenFunction &CGF, Flags flags) {
+ // Don't use an EH cleanup recursively from an EH cleanup.
+ bool useEHCleanupForArray =
+ flags.isForNormalCleanup() && this->useEHCleanupForArray;
+
+ CGF.emitDestroy(addr, type, destroyer, useEHCleanupForArray);
+ }
+ };
+
+ struct DestroyNRVOVariable : EHScopeStack::Cleanup {
+ DestroyNRVOVariable(llvm::Value *addr,
+ const CXXDestructorDecl *Dtor,
+ llvm::Value *NRVOFlag)
+ : Dtor(Dtor), NRVOFlag(NRVOFlag), Loc(addr) {}
+
+ const CXXDestructorDecl *Dtor;
+ llvm::Value *NRVOFlag;
+ llvm::Value *Loc;
+
+ void Emit(CodeGenFunction &CGF, Flags flags) {
+ // Along the exceptions path we always execute the dtor.
+ bool NRVO = flags.isForNormalCleanup() && NRVOFlag;
+
+ llvm::BasicBlock *SkipDtorBB = 0;
+ if (NRVO) {
+ // If we exited via NRVO, we skip the destructor call.
+ llvm::BasicBlock *RunDtorBB = CGF.createBasicBlock("nrvo.unused");
+ SkipDtorBB = CGF.createBasicBlock("nrvo.skipdtor");
+ llvm::Value *DidNRVO = CGF.Builder.CreateLoad(NRVOFlag, "nrvo.val");
+ CGF.Builder.CreateCondBr(DidNRVO, SkipDtorBB, RunDtorBB);
+ CGF.EmitBlock(RunDtorBB);
+ }
+
+ CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete,
+ /*ForVirtualBase=*/false, Loc);
+
+ if (NRVO) CGF.EmitBlock(SkipDtorBB);
+ }
+ };
+
+ struct CallStackRestore : EHScopeStack::Cleanup {
+ llvm::Value *Stack;
+ CallStackRestore(llvm::Value *Stack) : Stack(Stack) {}
+ void Emit(CodeGenFunction &CGF, Flags flags) {
+ llvm::Value *V = CGF.Builder.CreateLoad(Stack);
+ llvm::Value *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stackrestore);
+ CGF.Builder.CreateCall(F, V);
+ }
+ };
+
+ struct ExtendGCLifetime : EHScopeStack::Cleanup {
+ const VarDecl &Var;
+ ExtendGCLifetime(const VarDecl *var) : Var(*var) {}
+
+ void Emit(CodeGenFunction &CGF, Flags flags) {
+ // Compute the address of the local variable, in case it's a
+ // byref or something.
+ DeclRefExpr DRE(const_cast<VarDecl*>(&Var), false,
+ Var.getType(), VK_LValue, SourceLocation());
+ llvm::Value *value = CGF.EmitLoadOfScalar(CGF.EmitDeclRefLValue(&DRE));
+ CGF.EmitExtendGCLifetime(value);
+ }
+ };
+
+ struct CallCleanupFunction : EHScopeStack::Cleanup {
+ llvm::Constant *CleanupFn;
+ const CGFunctionInfo &FnInfo;
+ const VarDecl &Var;
+
+ CallCleanupFunction(llvm::Constant *CleanupFn, const CGFunctionInfo *Info,
+ const VarDecl *Var)
+ : CleanupFn(CleanupFn), FnInfo(*Info), Var(*Var) {}
+
+ void Emit(CodeGenFunction &CGF, Flags flags) {
+ DeclRefExpr DRE(const_cast<VarDecl*>(&Var), false,
+ Var.getType(), VK_LValue, SourceLocation());
+ // Compute the address of the local variable, in case it's a byref
+ // or something.
+ llvm::Value *Addr = CGF.EmitDeclRefLValue(&DRE).getAddress();
+
+ // In some cases, the type of the function argument will be different from
+ // the type of the pointer. An example of this is
+ // void f(void* arg);
+ // __attribute__((cleanup(f))) void *g;
+ //
+ // To fix this we insert a bitcast here.
+ QualType ArgTy = FnInfo.arg_begin()->type;
+ llvm::Value *Arg =
+ CGF.Builder.CreateBitCast(Addr, CGF.ConvertType(ArgTy));
+
+ CallArgList Args;
+ Args.add(RValue::get(Arg),
+ CGF.getContext().getPointerType(Var.getType()));
+ CGF.EmitCall(FnInfo, CleanupFn, ReturnValueSlot(), Args);
+ }
+ };
+}
+
+/// EmitAutoVarWithLifetime - Does the setup required for an automatic
+/// variable with lifetime.
+static void EmitAutoVarWithLifetime(CodeGenFunction &CGF, const VarDecl &var,
+ llvm::Value *addr,
+ Qualifiers::ObjCLifetime lifetime) {
+ switch (lifetime) {
+ case Qualifiers::OCL_None:
+ llvm_unreachable("present but none");
+
+ case Qualifiers::OCL_ExplicitNone:
+ // nothing to do
+ break;
+
+ case Qualifiers::OCL_Strong: {
+ CodeGenFunction::Destroyer *destroyer =
+ (var.hasAttr<ObjCPreciseLifetimeAttr>()
+ ? CodeGenFunction::destroyARCStrongPrecise
+ : CodeGenFunction::destroyARCStrongImprecise);
+
+ CleanupKind cleanupKind = CGF.getARCCleanupKind();
+ CGF.pushDestroy(cleanupKind, addr, var.getType(), destroyer,
+ cleanupKind & EHCleanup);
+ break;
+ }
+ case Qualifiers::OCL_Autoreleasing:
+ // nothing to do
+ break;
+
+ case Qualifiers::OCL_Weak:
+ // __weak objects always get EH cleanups; otherwise, exceptions
+ // could cause really nasty crashes instead of mere leaks.
+ CGF.pushDestroy(NormalAndEHCleanup, addr, var.getType(),
+ CodeGenFunction::destroyARCWeak,
+ /*useEHCleanup*/ true);
+ break;
+ }
+}
+
+static bool isAccessedBy(const VarDecl &var, const Stmt *s) {
+ if (const Expr *e = dyn_cast<Expr>(s)) {
+ // Skip the most common kinds of expressions that make
+ // hierarchy-walking expensive.
+ s = e = e->IgnoreParenCasts();
+
+ if (const DeclRefExpr *ref = dyn_cast<DeclRefExpr>(e))
+ return (ref->getDecl() == &var);
+ }
+
+ for (Stmt::const_child_range children = s->children(); children; ++children)
+ // children might be null; as in missing decl or conditional of an if-stmt.
+ if ((*children) && isAccessedBy(var, *children))
+ return true;
+
+ return false;
+}
+
+static bool isAccessedBy(const ValueDecl *decl, const Expr *e) {
+ if (!decl) return false;
+ if (!isa<VarDecl>(decl)) return false;
+ const VarDecl *var = cast<VarDecl>(decl);
+ return isAccessedBy(*var, e);
+}
+
+static void drillIntoBlockVariable(CodeGenFunction &CGF,
+ LValue &lvalue,
+ const VarDecl *var) {
+ lvalue.setAddress(CGF.BuildBlockByrefAddress(lvalue.getAddress(), var));
+}
+
+void CodeGenFunction::EmitScalarInit(const Expr *init,
+ const ValueDecl *D,
+ LValue lvalue,
+ bool capturedByInit) {
+ Qualifiers::ObjCLifetime lifetime = lvalue.getObjCLifetime();
+ if (!lifetime) {
+ llvm::Value *value = EmitScalarExpr(init);
+ if (capturedByInit)
+ drillIntoBlockVariable(*this, lvalue, cast<VarDecl>(D));
+ EmitStoreThroughLValue(RValue::get(value), lvalue, true);
+ return;
+ }
+
+ // If we're emitting a value with lifetime, we have to do the
+ // initialization *before* we leave the cleanup scopes.
+ if (const ExprWithCleanups *ewc = dyn_cast<ExprWithCleanups>(init)) {
+ enterFullExpression(ewc);
+ init = ewc->getSubExpr();
+ }
+ CodeGenFunction::RunCleanupsScope Scope(*this);
+
+ // We have to maintain the illusion that the variable is
+ // zero-initialized. If the variable might be accessed in its
+ // initializer, zero-initialize before running the initializer, then
+ // actually perform the initialization with an assign.
+ bool accessedByInit = false;
+ if (lifetime != Qualifiers::OCL_ExplicitNone)
+ accessedByInit = (capturedByInit || isAccessedBy(D, init));
+ if (accessedByInit) {
+ LValue tempLV = lvalue;
+ // Drill down to the __block object if necessary.
+ if (capturedByInit) {
+ // We can use a simple GEP for this because it can't have been
+ // moved yet.
+ tempLV.setAddress(Builder.CreateStructGEP(tempLV.getAddress(),
+ getByRefValueLLVMField(cast<VarDecl>(D))));
+ }
+
+ llvm::PointerType *ty
+ = cast<llvm::PointerType>(tempLV.getAddress()->getType());
+ ty = cast<llvm::PointerType>(ty->getElementType());
+
+ llvm::Value *zero = llvm::ConstantPointerNull::get(ty);
+
+ // If __weak, we want to use a barrier under certain conditions.
+ if (lifetime == Qualifiers::OCL_Weak)
+ EmitARCInitWeak(tempLV.getAddress(), zero);
+
+ // Otherwise just do a simple store.
+ else
+ EmitStoreOfScalar(zero, tempLV, /* isInitialization */ true);
+ }
+
+ // Emit the initializer.
+ llvm::Value *value = 0;
+
+ switch (lifetime) {
+ case Qualifiers::OCL_None:
+ llvm_unreachable("present but none");
+
+ case Qualifiers::OCL_ExplicitNone:
+ // nothing to do
+ value = EmitScalarExpr(init);
+ break;
+
+ case Qualifiers::OCL_Strong: {
+ value = EmitARCRetainScalarExpr(init);
+ break;
+ }
+
+ case Qualifiers::OCL_Weak: {
+ // No way to optimize a producing initializer into this. It's not
+ // worth optimizing for, because the value will immediately
+ // disappear in the common case.
+ value = EmitScalarExpr(init);
+
+ if (capturedByInit) drillIntoBlockVariable(*this, lvalue, cast<VarDecl>(D));
+ if (accessedByInit)
+ EmitARCStoreWeak(lvalue.getAddress(), value, /*ignored*/ true);
+ else
+ EmitARCInitWeak(lvalue.getAddress(), value);
+ return;
+ }
+
+ case Qualifiers::OCL_Autoreleasing:
+ value = EmitARCRetainAutoreleaseScalarExpr(init);
+ break;
+ }
+
+ if (capturedByInit) drillIntoBlockVariable(*this, lvalue, cast<VarDecl>(D));
+
+ // If the variable might have been accessed by its initializer, we
+ // might have to initialize with a barrier. We have to do this for
+ // both __weak and __strong, but __weak got filtered out above.
+ if (accessedByInit && lifetime == Qualifiers::OCL_Strong) {
+ llvm::Value *oldValue = EmitLoadOfScalar(lvalue);
+ EmitStoreOfScalar(value, lvalue, /* isInitialization */ true);
+ EmitARCRelease(oldValue, /*precise*/ false);
+ return;
+ }
+
+ EmitStoreOfScalar(value, lvalue, /* isInitialization */ true);
+}
+
+/// EmitScalarInit - Initialize the given lvalue with the given object.
+void CodeGenFunction::EmitScalarInit(llvm::Value *init, LValue lvalue) {
+ Qualifiers::ObjCLifetime lifetime = lvalue.getObjCLifetime();
+ if (!lifetime)
+ return EmitStoreThroughLValue(RValue::get(init), lvalue, true);
+
+ switch (lifetime) {
+ case Qualifiers::OCL_None:
+ llvm_unreachable("present but none");
+
+ case Qualifiers::OCL_ExplicitNone:
+ // nothing to do
+ break;
+
+ case Qualifiers::OCL_Strong:
+ init = EmitARCRetain(lvalue.getType(), init);
+ break;
+
+ case Qualifiers::OCL_Weak:
+ // Initialize and then skip the primitive store.
+ EmitARCInitWeak(lvalue.getAddress(), init);
+ return;
+
+ case Qualifiers::OCL_Autoreleasing:
+ init = EmitARCRetainAutorelease(lvalue.getType(), init);
+ break;
+ }
+
+ EmitStoreOfScalar(init, lvalue, /* isInitialization */ true);
+}
+
+/// canEmitInitWithFewStoresAfterMemset - Decide whether we can emit the
+/// non-zero parts of the specified initializer with equal or fewer than
+/// NumStores scalar stores.
+static bool canEmitInitWithFewStoresAfterMemset(llvm::Constant *Init,
+ unsigned &NumStores) {
+ // Zero and Undef never requires any extra stores.
+ if (isa<llvm::ConstantAggregateZero>(Init) ||
+ isa<llvm::ConstantPointerNull>(Init) ||
+ isa<llvm::UndefValue>(Init))
+ return true;
+ if (isa<llvm::ConstantInt>(Init) || isa<llvm::ConstantFP>(Init) ||
+ isa<llvm::ConstantVector>(Init) || isa<llvm::BlockAddress>(Init) ||
+ isa<llvm::ConstantExpr>(Init))
+ return Init->isNullValue() || NumStores--;
+
+ // See if we can emit each element.
+ if (isa<llvm::ConstantArray>(Init) || isa<llvm::ConstantStruct>(Init)) {
+ for (unsigned i = 0, e = Init->getNumOperands(); i != e; ++i) {
+ llvm::Constant *Elt = cast<llvm::Constant>(Init->getOperand(i));
+ if (!canEmitInitWithFewStoresAfterMemset(Elt, NumStores))
+ return false;
+ }
+ return true;
+ }
+
+ if (llvm::ConstantDataSequential *CDS =
+ dyn_cast<llvm::ConstantDataSequential>(Init)) {
+ for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) {
+ llvm::Constant *Elt = CDS->getElementAsConstant(i);
+ if (!canEmitInitWithFewStoresAfterMemset(Elt, NumStores))
+ return false;
+ }
+ return true;
+ }
+
+ // Anything else is hard and scary.
+ return false;
+}
+
+/// emitStoresForInitAfterMemset - For inits that
+/// canEmitInitWithFewStoresAfterMemset returned true for, emit the scalar
+/// stores that would be required.
+static void emitStoresForInitAfterMemset(llvm::Constant *Init, llvm::Value *Loc,
+ bool isVolatile, CGBuilderTy &Builder) {
+ // Zero doesn't require a store.
+ if (Init->isNullValue() || isa<llvm::UndefValue>(Init))
+ return;
+
+ if (isa<llvm::ConstantInt>(Init) || isa<llvm::ConstantFP>(Init) ||
+ isa<llvm::ConstantVector>(Init) || isa<llvm::BlockAddress>(Init) ||
+ isa<llvm::ConstantExpr>(Init)) {
+ Builder.CreateStore(Init, Loc, isVolatile);
+ return;
+ }
+
+ if (llvm::ConstantDataSequential *CDS =
+ dyn_cast<llvm::ConstantDataSequential>(Init)) {
+ for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) {
+ llvm::Constant *Elt = CDS->getElementAsConstant(i);
+
+ // Get a pointer to the element and emit it.
+ emitStoresForInitAfterMemset(Elt, Builder.CreateConstGEP2_32(Loc, 0, i),
+ isVolatile, Builder);
+ }
+ return;
+ }
+
+ assert((isa<llvm::ConstantStruct>(Init) || isa<llvm::ConstantArray>(Init)) &&
+ "Unknown value type!");
+
+ for (unsigned i = 0, e = Init->getNumOperands(); i != e; ++i) {
+ llvm::Constant *Elt = cast<llvm::Constant>(Init->getOperand(i));
+ // Get a pointer to the element and emit it.
+ emitStoresForInitAfterMemset(Elt, Builder.CreateConstGEP2_32(Loc, 0, i),
+ isVolatile, Builder);
+ }
+}
+
+
+/// shouldUseMemSetPlusStoresToInitialize - Decide whether we should use memset
+/// plus some stores to initialize a local variable instead of using a memcpy
+/// from a constant global. It is beneficial to use memset if the global is all
+/// zeros, or mostly zeros and large.
+static bool shouldUseMemSetPlusStoresToInitialize(llvm::Constant *Init,
+ uint64_t GlobalSize) {
+ // If a global is all zeros, always use a memset.
+ if (isa<llvm::ConstantAggregateZero>(Init)) return true;
+
+
+ // If a non-zero global is <= 32 bytes, always use a memcpy. If it is large,
+ // do it if it will require 6 or fewer scalar stores.
+ // TODO: Should budget depends on the size? Avoiding a large global warrants
+ // plopping in more stores.
+ unsigned StoreBudget = 6;
+ uint64_t SizeLimit = 32;
+
+ return GlobalSize > SizeLimit &&
+ canEmitInitWithFewStoresAfterMemset(Init, StoreBudget);
+}
+
+
+/// EmitAutoVarDecl - Emit code and set up an entry in LocalDeclMap for a
+/// variable declaration with auto, register, or no storage class specifier.
+/// These turn into simple stack objects, or GlobalValues depending on target.
+void CodeGenFunction::EmitAutoVarDecl(const VarDecl &D) {
+ AutoVarEmission emission = EmitAutoVarAlloca(D);
+ EmitAutoVarInit(emission);
+ EmitAutoVarCleanups(emission);
+}
+
+/// EmitAutoVarAlloca - Emit the alloca and debug information for a
+/// local variable. Does not emit initalization or destruction.
+CodeGenFunction::AutoVarEmission
+CodeGenFunction::EmitAutoVarAlloca(const VarDecl &D) {
+ QualType Ty = D.getType();
+
+ AutoVarEmission emission(D);
+
+ bool isByRef = D.hasAttr<BlocksAttr>();
+ emission.IsByRef = isByRef;
+
+ CharUnits alignment = getContext().getDeclAlign(&D);
+ emission.Alignment = alignment;
+
+ // If the type is variably-modified, emit all the VLA sizes for it.
+ if (Ty->isVariablyModifiedType())
+ EmitVariablyModifiedType(Ty);
+
+ llvm::Value *DeclPtr;
+ if (Ty->isConstantSizeType()) {
+ if (!Target.useGlobalsForAutomaticVariables()) {
+ bool NRVO = getContext().getLangOpts().ElideConstructors &&
+ D.isNRVOVariable();
+
+ // If this value is a POD array or struct with a statically
+ // determinable constant initializer, there are optimizations we can do.
+ //
+ // TODO: We should constant-evaluate the initializer of any variable,
+ // as long as it is initialized by a constant expression. Currently,
+ // isConstantInitializer produces wrong answers for structs with
+ // reference or bitfield members, and a few other cases, and checking
+ // for POD-ness protects us from some of these.
+ if (D.getInit() &&
+ (Ty->isArrayType() || Ty->isRecordType()) &&
+ (Ty.isPODType(getContext()) ||
+ getContext().getBaseElementType(Ty)->isObjCObjectPointerType()) &&
+ D.getInit()->isConstantInitializer(getContext(), false)) {
+
+ // If the variable's a const type, and it's neither an NRVO
+ // candidate nor a __block variable and has no mutable members,
+ // emit it as a global instead.
+ if (CGM.getCodeGenOpts().MergeAllConstants && !NRVO && !isByRef &&
+ CGM.isTypeConstant(Ty, true)) {
+ EmitStaticVarDecl(D, llvm::GlobalValue::InternalLinkage);
+
+ emission.Address = 0; // signal this condition to later callbacks
+ assert(emission.wasEmittedAsGlobal());
+ return emission;
+ }
+
+ // Otherwise, tell the initialization code that we're in this case.
+ emission.IsConstantAggregate = true;
+ }
+
+ // A normal fixed sized variable becomes an alloca in the entry block,
+ // unless it's an NRVO variable.
+ llvm::Type *LTy = ConvertTypeForMem(Ty);
+
+ if (NRVO) {
+ // The named return value optimization: allocate this variable in the
+ // return slot, so that we can elide the copy when returning this
+ // variable (C++0x [class.copy]p34).
+ DeclPtr = ReturnValue;
+
+ if (const RecordType *RecordTy = Ty->getAs<RecordType>()) {
+ if (!cast<CXXRecordDecl>(RecordTy->getDecl())->hasTrivialDestructor()) {
+ // Create a flag that is used to indicate when the NRVO was applied
+ // to this variable. Set it to zero to indicate that NRVO was not
+ // applied.
+ llvm::Value *Zero = Builder.getFalse();
+ llvm::Value *NRVOFlag = CreateTempAlloca(Zero->getType(), "nrvo");
+ EnsureInsertPoint();
+ Builder.CreateStore(Zero, NRVOFlag);
+
+ // Record the NRVO flag for this variable.
+ NRVOFlags[&D] = NRVOFlag;
+ emission.NRVOFlag = NRVOFlag;
+ }
+ }
+ } else {
+ if (isByRef)
+ LTy = BuildByRefType(&D);
+
+ llvm::AllocaInst *Alloc = CreateTempAlloca(LTy);
+ Alloc->setName(D.getName());
+
+ CharUnits allocaAlignment = alignment;
+ if (isByRef)
+ allocaAlignment = std::max(allocaAlignment,
+ getContext().toCharUnitsFromBits(Target.getPointerAlign(0)));
+ Alloc->setAlignment(allocaAlignment.getQuantity());
+ DeclPtr = Alloc;
+ }
+ } else {
+ // Targets that don't support recursion emit locals as globals.
+ const char *Class =
+ D.getStorageClass() == SC_Register ? ".reg." : ".auto.";
+ DeclPtr = CreateStaticVarDecl(D, Class,
+ llvm::GlobalValue::InternalLinkage);
+ }
+ } else {
+ EnsureInsertPoint();
+
+ if (!DidCallStackSave) {
+ // Save the stack.
+ llvm::Value *Stack = CreateTempAlloca(Int8PtrTy, "saved_stack");
+
+ llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::stacksave);
+ llvm::Value *V = Builder.CreateCall(F);
+
+ Builder.CreateStore(V, Stack);
+
+ DidCallStackSave = true;
+
+ // Push a cleanup block and restore the stack there.
+ // FIXME: in general circumstances, this should be an EH cleanup.
+ EHStack.pushCleanup<CallStackRestore>(NormalCleanup, Stack);
+ }
+
+ llvm::Value *elementCount;
+ QualType elementType;
+ llvm::tie(elementCount, elementType) = getVLASize(Ty);
+
+ llvm::Type *llvmTy = ConvertTypeForMem(elementType);
+
+ // Allocate memory for the array.
+ llvm::AllocaInst *vla = Builder.CreateAlloca(llvmTy, elementCount, "vla");
+ vla->setAlignment(alignment.getQuantity());
+
+ DeclPtr = vla;
+ }
+
+ llvm::Value *&DMEntry = LocalDeclMap[&D];
+ assert(DMEntry == 0 && "Decl already exists in localdeclmap!");
+ DMEntry = DeclPtr;
+ emission.Address = DeclPtr;
+
+ // Emit debug info for local var declaration.
+ if (HaveInsertPoint())
+ if (CGDebugInfo *DI = getDebugInfo()) {
+ DI->setLocation(D.getLocation());
+ if (Target.useGlobalsForAutomaticVariables()) {
+ DI->EmitGlobalVariable(static_cast<llvm::GlobalVariable *>(DeclPtr), &D);
+ } else
+ DI->EmitDeclareOfAutoVariable(&D, DeclPtr, Builder);
+ }
+
+ if (D.hasAttr<AnnotateAttr>())
+ EmitVarAnnotations(&D, emission.Address);
+
+ return emission;
+}
+
+/// Determines whether the given __block variable is potentially
+/// captured by the given expression.
+static bool isCapturedBy(const VarDecl &var, const Expr *e) {
+ // Skip the most common kinds of expressions that make
+ // hierarchy-walking expensive.
+ e = e->IgnoreParenCasts();
+
+ if (const BlockExpr *be = dyn_cast<BlockExpr>(e)) {
+ const BlockDecl *block = be->getBlockDecl();
+ for (BlockDecl::capture_const_iterator i = block->capture_begin(),
+ e = block->capture_end(); i != e; ++i) {
+ if (i->getVariable() == &var)
+ return true;
+ }
+
+ // No need to walk into the subexpressions.
+ return false;
+ }
+
+ if (const StmtExpr *SE = dyn_cast<StmtExpr>(e)) {
+ const CompoundStmt *CS = SE->getSubStmt();
+ for (CompoundStmt::const_body_iterator BI = CS->body_begin(),
+ BE = CS->body_end(); BI != BE; ++BI)
+ if (Expr *E = dyn_cast<Expr>((*BI))) {
+ if (isCapturedBy(var, E))
+ return true;
+ }
+ else if (DeclStmt *DS = dyn_cast<DeclStmt>((*BI))) {
+ // special case declarations
+ for (DeclStmt::decl_iterator I = DS->decl_begin(), E = DS->decl_end();
+ I != E; ++I) {
+ if (VarDecl *VD = dyn_cast<VarDecl>((*I))) {
+ Expr *Init = VD->getInit();
+ if (Init && isCapturedBy(var, Init))
+ return true;
+ }
+ }
+ }
+ else
+ // FIXME. Make safe assumption assuming arbitrary statements cause capturing.
+ // Later, provide code to poke into statements for capture analysis.
+ return true;
+ return false;
+ }
+
+ for (Stmt::const_child_range children = e->children(); children; ++children)
+ if (isCapturedBy(var, cast<Expr>(*children)))
+ return true;
+
+ return false;
+}
+
+/// \brief Determine whether the given initializer is trivial in the sense
+/// that it requires no code to be generated.
+static bool isTrivialInitializer(const Expr *Init) {
+ if (!Init)
+ return true;
+
+ if (const CXXConstructExpr *Construct = dyn_cast<CXXConstructExpr>(Init))
+ if (CXXConstructorDecl *Constructor = Construct->getConstructor())
+ if (Constructor->isTrivial() &&
+ Constructor->isDefaultConstructor() &&
+ !Construct->requiresZeroInitialization())
+ return true;
+
+ return false;
+}
+void CodeGenFunction::EmitAutoVarInit(const AutoVarEmission &emission) {
+ assert(emission.Variable && "emission was not valid!");
+
+ // If this was emitted as a global constant, we're done.
+ if (emission.wasEmittedAsGlobal()) return;
+
+ const VarDecl &D = *emission.Variable;
+ QualType type = D.getType();
+
+ // If this local has an initializer, emit it now.
+ const Expr *Init = D.getInit();
+
+ // If we are at an unreachable point, we don't need to emit the initializer
+ // unless it contains a label.
+ if (!HaveInsertPoint()) {
+ if (!Init || !ContainsLabel(Init)) return;
+ EnsureInsertPoint();
+ }
+
+ // Initialize the structure of a __block variable.
+ if (emission.IsByRef)
+ emitByrefStructureInit(emission);
+
+ if (isTrivialInitializer(Init))
+ return;
+
+ CharUnits alignment = emission.Alignment;
+
+ // Check whether this is a byref variable that's potentially
+ // captured and moved by its own initializer. If so, we'll need to
+ // emit the initializer first, then copy into the variable.
+ bool capturedByInit = emission.IsByRef && isCapturedBy(D, Init);
+
+ llvm::Value *Loc =
+ capturedByInit ? emission.Address : emission.getObjectAddress(*this);
+
+ llvm::Constant *constant = 0;
+ if (emission.IsConstantAggregate) {
+ assert(!capturedByInit && "constant init contains a capturing block?");
+ constant = CGM.EmitConstantInit(D, this);
+ }
+
+ if (!constant) {
+ LValue lv = MakeAddrLValue(Loc, type, alignment);
+ lv.setNonGC(true);
+ return EmitExprAsInit(Init, &D, lv, capturedByInit);
+ }
+
+ // If this is a simple aggregate initialization, we can optimize it
+ // in various ways.
+ bool isVolatile = type.isVolatileQualified();
+
+ llvm::Value *SizeVal =
+ llvm::ConstantInt::get(IntPtrTy,
+ getContext().getTypeSizeInChars(type).getQuantity());
+
+ llvm::Type *BP = Int8PtrTy;
+ if (Loc->getType() != BP)
+ Loc = Builder.CreateBitCast(Loc, BP);
+
+ // If the initializer is all or mostly zeros, codegen with memset then do
+ // a few stores afterward.
+ if (shouldUseMemSetPlusStoresToInitialize(constant,
+ CGM.getTargetData().getTypeAllocSize(constant->getType()))) {
+ Builder.CreateMemSet(Loc, llvm::ConstantInt::get(Int8Ty, 0), SizeVal,
+ alignment.getQuantity(), isVolatile);
+ if (!constant->isNullValue()) {
+ Loc = Builder.CreateBitCast(Loc, constant->getType()->getPointerTo());
+ emitStoresForInitAfterMemset(constant, Loc, isVolatile, Builder);
+ }
+ } else {
+ // Otherwise, create a temporary global with the initializer then
+ // memcpy from the global to the alloca.
+ std::string Name = GetStaticDeclName(*this, D, ".");
+ llvm::GlobalVariable *GV =
+ new llvm::GlobalVariable(CGM.getModule(), constant->getType(), true,
+ llvm::GlobalValue::PrivateLinkage,
+ constant, Name, 0, false, 0);
+ GV->setAlignment(alignment.getQuantity());
+ GV->setUnnamedAddr(true);
+
+ llvm::Value *SrcPtr = GV;
+ if (SrcPtr->getType() != BP)
+ SrcPtr = Builder.CreateBitCast(SrcPtr, BP);
+
+ Builder.CreateMemCpy(Loc, SrcPtr, SizeVal, alignment.getQuantity(),
+ isVolatile);
+ }
+}
+
+/// Emit an expression as an initializer for a variable at the given
+/// location. The expression is not necessarily the normal
+/// initializer for the variable, and the address is not necessarily
+/// its normal location.
+///
+/// \param init the initializing expression
+/// \param var the variable to act as if we're initializing
+/// \param loc the address to initialize; its type is a pointer
+/// to the LLVM mapping of the variable's type
+/// \param alignment the alignment of the address
+/// \param capturedByInit true if the variable is a __block variable
+/// whose address is potentially changed by the initializer
+void CodeGenFunction::EmitExprAsInit(const Expr *init,
+ const ValueDecl *D,
+ LValue lvalue,
+ bool capturedByInit) {
+ QualType type = D->getType();
+
+ if (type->isReferenceType()) {
+ RValue rvalue = EmitReferenceBindingToExpr(init, D);
+ if (capturedByInit)
+ drillIntoBlockVariable(*this, lvalue, cast<VarDecl>(D));
+ EmitStoreThroughLValue(rvalue, lvalue, true);
+ } else if (!hasAggregateLLVMType(type)) {
+ EmitScalarInit(init, D, lvalue, capturedByInit);
+ } else if (type->isAnyComplexType()) {
+ ComplexPairTy complex = EmitComplexExpr(init);
+ if (capturedByInit)
+ drillIntoBlockVariable(*this, lvalue, cast<VarDecl>(D));
+ StoreComplexToAddr(complex, lvalue.getAddress(), lvalue.isVolatile());
+ } else {
+ // TODO: how can we delay here if D is captured by its initializer?
+ EmitAggExpr(init, AggValueSlot::forLValue(lvalue,
+ AggValueSlot::IsDestructed,
+ AggValueSlot::DoesNotNeedGCBarriers,
+ AggValueSlot::IsNotAliased));
+ MaybeEmitStdInitializerListCleanup(lvalue.getAddress(), init);
+ }
+}
+
+/// Enter a destroy cleanup for the given local variable.
+void CodeGenFunction::emitAutoVarTypeCleanup(
+ const CodeGenFunction::AutoVarEmission &emission,
+ QualType::DestructionKind dtorKind) {
+ assert(dtorKind != QualType::DK_none);
+
+ // Note that for __block variables, we want to destroy the
+ // original stack object, not the possibly forwarded object.
+ llvm::Value *addr = emission.getObjectAddress(*this);
+
+ const VarDecl *var = emission.Variable;
+ QualType type = var->getType();
+
+ CleanupKind cleanupKind = NormalAndEHCleanup;
+ CodeGenFunction::Destroyer *destroyer = 0;
+
+ switch (dtorKind) {
+ case QualType::DK_none:
+ llvm_unreachable("no cleanup for trivially-destructible variable");
+
+ case QualType::DK_cxx_destructor:
+ // If there's an NRVO flag on the emission, we need a different
+ // cleanup.
+ if (emission.NRVOFlag) {
+ assert(!type->isArrayType());
+ CXXDestructorDecl *dtor = type->getAsCXXRecordDecl()->getDestructor();
+ EHStack.pushCleanup<DestroyNRVOVariable>(cleanupKind, addr, dtor,
+ emission.NRVOFlag);
+ return;
+ }
+ break;
+
+ case QualType::DK_objc_strong_lifetime:
+ // Suppress cleanups for pseudo-strong variables.
+ if (var->isARCPseudoStrong()) return;
+
+ // Otherwise, consider whether to use an EH cleanup or not.
+ cleanupKind = getARCCleanupKind();
+
+ // Use the imprecise destroyer by default.
+ if (!var->hasAttr<ObjCPreciseLifetimeAttr>())
+ destroyer = CodeGenFunction::destroyARCStrongImprecise;
+ break;
+
+ case QualType::DK_objc_weak_lifetime:
+ break;
+ }
+
+ // If we haven't chosen a more specific destroyer, use the default.
+ if (!destroyer) destroyer = getDestroyer(dtorKind);
+
+ // Use an EH cleanup in array destructors iff the destructor itself
+ // is being pushed as an EH cleanup.
+ bool useEHCleanup = (cleanupKind & EHCleanup);
+ EHStack.pushCleanup<DestroyObject>(cleanupKind, addr, type, destroyer,
+ useEHCleanup);
+}
+
+void CodeGenFunction::EmitAutoVarCleanups(const AutoVarEmission &emission) {
+ assert(emission.Variable && "emission was not valid!");
+
+ // If this was emitted as a global constant, we're done.
+ if (emission.wasEmittedAsGlobal()) return;
+
+ // If we don't have an insertion point, we're done. Sema prevents
+ // us from jumping into any of these scopes anyway.
+ if (!HaveInsertPoint()) return;
+
+ const VarDecl &D = *emission.Variable;
+
+ // Check the type for a cleanup.
+ if (QualType::DestructionKind dtorKind = D.getType().isDestructedType())
+ emitAutoVarTypeCleanup(emission, dtorKind);
+
+ // In GC mode, honor objc_precise_lifetime.
+ if (getLangOpts().getGC() != LangOptions::NonGC &&
+ D.hasAttr<ObjCPreciseLifetimeAttr>()) {
+ EHStack.pushCleanup<ExtendGCLifetime>(NormalCleanup, &D);
+ }
+
+ // Handle the cleanup attribute.
+ if (const CleanupAttr *CA = D.getAttr<CleanupAttr>()) {
+ const FunctionDecl *FD = CA->getFunctionDecl();
+
+ llvm::Constant *F = CGM.GetAddrOfFunction(FD);
+ assert(F && "Could not find function!");
+
+ const CGFunctionInfo &Info = CGM.getTypes().arrangeFunctionDeclaration(FD);
+ EHStack.pushCleanup<CallCleanupFunction>(NormalAndEHCleanup, F, &Info, &D);
+ }
+
+ // If this is a block variable, call _Block_object_destroy
+ // (on the unforwarded address).
+ if (emission.IsByRef)
+ enterByrefCleanup(emission);
+}
+
+CodeGenFunction::Destroyer *
+CodeGenFunction::getDestroyer(QualType::DestructionKind kind) {
+ switch (kind) {
+ case QualType::DK_none: llvm_unreachable("no destroyer for trivial dtor");
+ case QualType::DK_cxx_destructor:
+ return destroyCXXObject;
+ case QualType::DK_objc_strong_lifetime:
+ return destroyARCStrongPrecise;
+ case QualType::DK_objc_weak_lifetime:
+ return destroyARCWeak;
+ }
+ llvm_unreachable("Unknown DestructionKind");
+}
+
+/// pushDestroy - Push the standard destructor for the given type.
+void CodeGenFunction::pushDestroy(QualType::DestructionKind dtorKind,
+ llvm::Value *addr, QualType type) {
+ assert(dtorKind && "cannot push destructor for trivial type");
+
+ CleanupKind cleanupKind = getCleanupKind(dtorKind);
+ pushDestroy(cleanupKind, addr, type, getDestroyer(dtorKind),
+ cleanupKind & EHCleanup);
+}
+
+void CodeGenFunction::pushDestroy(CleanupKind cleanupKind, llvm::Value *addr,
+ QualType type, Destroyer *destroyer,
+ bool useEHCleanupForArray) {
+ pushFullExprCleanup<DestroyObject>(cleanupKind, addr, type,
+ destroyer, useEHCleanupForArray);
+}
+
+/// emitDestroy - Immediately perform the destruction of the given
+/// object.
+///
+/// \param addr - the address of the object; a type*
+/// \param type - the type of the object; if an array type, all
+/// objects are destroyed in reverse order
+/// \param destroyer - the function to call to destroy individual
+/// elements
+/// \param useEHCleanupForArray - whether an EH cleanup should be
+/// used when destroying array elements, in case one of the
+/// destructions throws an exception
+void CodeGenFunction::emitDestroy(llvm::Value *addr, QualType type,
+ Destroyer *destroyer,
+ bool useEHCleanupForArray) {
+ const ArrayType *arrayType = getContext().getAsArrayType(type);
+ if (!arrayType)
+ return destroyer(*this, addr, type);
+
+ llvm::Value *begin = addr;
+ llvm::Value *length = emitArrayLength(arrayType, type, begin);
+
+ // Normally we have to check whether the array is zero-length.
+ bool checkZeroLength = true;
+
+ // But if the array length is constant, we can suppress that.
+ if (llvm::ConstantInt *constLength = dyn_cast<llvm::ConstantInt>(length)) {
+ // ...and if it's constant zero, we can just skip the entire thing.
+ if (constLength->isZero()) return;
+ checkZeroLength = false;
+ }
+
+ llvm::Value *end = Builder.CreateInBoundsGEP(begin, length);
+ emitArrayDestroy(begin, end, type, destroyer,
+ checkZeroLength, useEHCleanupForArray);
+}
+
+/// emitArrayDestroy - Destroys all the elements of the given array,
+/// beginning from last to first. The array cannot be zero-length.
+///
+/// \param begin - a type* denoting the first element of the array
+/// \param end - a type* denoting one past the end of the array
+/// \param type - the element type of the array
+/// \param destroyer - the function to call to destroy elements
+/// \param useEHCleanup - whether to push an EH cleanup to destroy
+/// the remaining elements in case the destruction of a single
+/// element throws
+void CodeGenFunction::emitArrayDestroy(llvm::Value *begin,
+ llvm::Value *end,
+ QualType type,
+ Destroyer *destroyer,
+ bool checkZeroLength,
+ bool useEHCleanup) {
+ assert(!type->isArrayType());
+
+ // The basic structure here is a do-while loop, because we don't
+ // need to check for the zero-element case.
+ llvm::BasicBlock *bodyBB = createBasicBlock("arraydestroy.body");
+ llvm::BasicBlock *doneBB = createBasicBlock("arraydestroy.done");
+
+ if (checkZeroLength) {
+ llvm::Value *isEmpty = Builder.CreateICmpEQ(begin, end,
+ "arraydestroy.isempty");
+ Builder.CreateCondBr(isEmpty, doneBB, bodyBB);
+ }
+
+ // Enter the loop body, making that address the current address.
+ llvm::BasicBlock *entryBB = Builder.GetInsertBlock();
+ EmitBlock(bodyBB);
+ llvm::PHINode *elementPast =
+ Builder.CreatePHI(begin->getType(), 2, "arraydestroy.elementPast");
+ elementPast->addIncoming(end, entryBB);
+
+ // Shift the address back by one element.
+ llvm::Value *negativeOne = llvm::ConstantInt::get(SizeTy, -1, true);
+ llvm::Value *element = Builder.CreateInBoundsGEP(elementPast, negativeOne,
+ "arraydestroy.element");
+
+ if (useEHCleanup)
+ pushRegularPartialArrayCleanup(begin, element, type, destroyer);
+
+ // Perform the actual destruction there.
+ destroyer(*this, element, type);
+
+ if (useEHCleanup)
+ PopCleanupBlock();
+
+ // Check whether we've reached the end.
+ llvm::Value *done = Builder.CreateICmpEQ(element, begin, "arraydestroy.done");
+ Builder.CreateCondBr(done, doneBB, bodyBB);
+ elementPast->addIncoming(element, Builder.GetInsertBlock());
+
+ // Done.
+ EmitBlock(doneBB);
+}
+
+/// Perform partial array destruction as if in an EH cleanup. Unlike
+/// emitArrayDestroy, the element type here may still be an array type.
+static void emitPartialArrayDestroy(CodeGenFunction &CGF,
+ llvm::Value *begin, llvm::Value *end,
+ QualType type,
+ CodeGenFunction::Destroyer *destroyer) {
+ // If the element type is itself an array, drill down.
+ unsigned arrayDepth = 0;
+ while (const ArrayType *arrayType = CGF.getContext().getAsArrayType(type)) {
+ // VLAs don't require a GEP index to walk into.
+ if (!isa<VariableArrayType>(arrayType))
+ arrayDepth++;
+ type = arrayType->getElementType();
+ }
+
+ if (arrayDepth) {
+ llvm::Value *zero = llvm::ConstantInt::get(CGF.SizeTy, arrayDepth+1);
+
+ SmallVector<llvm::Value*,4> gepIndices(arrayDepth, zero);
+ begin = CGF.Builder.CreateInBoundsGEP(begin, gepIndices, "pad.arraybegin");
+ end = CGF.Builder.CreateInBoundsGEP(end, gepIndices, "pad.arrayend");
+ }
+
+ // Destroy the array. We don't ever need an EH cleanup because we
+ // assume that we're in an EH cleanup ourselves, so a throwing
+ // destructor causes an immediate terminate.
+ CGF.emitArrayDestroy(begin, end, type, destroyer,
+ /*checkZeroLength*/ true, /*useEHCleanup*/ false);
+}
+
+namespace {
+ /// RegularPartialArrayDestroy - a cleanup which performs a partial
+ /// array destroy where the end pointer is regularly determined and
+ /// does not need to be loaded from a local.
+ class RegularPartialArrayDestroy : public EHScopeStack::Cleanup {
+ llvm::Value *ArrayBegin;
+ llvm::Value *ArrayEnd;
+ QualType ElementType;
+ CodeGenFunction::Destroyer *Destroyer;
+ public:
+ RegularPartialArrayDestroy(llvm::Value *arrayBegin, llvm::Value *arrayEnd,
+ QualType elementType,
+ CodeGenFunction::Destroyer *destroyer)
+ : ArrayBegin(arrayBegin), ArrayEnd(arrayEnd),
+ ElementType(elementType), Destroyer(destroyer) {}
+
+ void Emit(CodeGenFunction &CGF, Flags flags) {
+ emitPartialArrayDestroy(CGF, ArrayBegin, ArrayEnd,
+ ElementType, Destroyer);
+ }
+ };
+
+ /// IrregularPartialArrayDestroy - a cleanup which performs a
+ /// partial array destroy where the end pointer is irregularly
+ /// determined and must be loaded from a local.
+ class IrregularPartialArrayDestroy : public EHScopeStack::Cleanup {
+ llvm::Value *ArrayBegin;
+ llvm::Value *ArrayEndPointer;
+ QualType ElementType;
+ CodeGenFunction::Destroyer *Destroyer;
+ public:
+ IrregularPartialArrayDestroy(llvm::Value *arrayBegin,
+ llvm::Value *arrayEndPointer,
+ QualType elementType,
+ CodeGenFunction::Destroyer *destroyer)
+ : ArrayBegin(arrayBegin), ArrayEndPointer(arrayEndPointer),
+ ElementType(elementType), Destroyer(destroyer) {}
+
+ void Emit(CodeGenFunction &CGF, Flags flags) {
+ llvm::Value *arrayEnd = CGF.Builder.CreateLoad(ArrayEndPointer);
+ emitPartialArrayDestroy(CGF, ArrayBegin, arrayEnd,
+ ElementType, Destroyer);
+ }
+ };
+}
+
+/// pushIrregularPartialArrayCleanup - Push an EH cleanup to destroy
+/// already-constructed elements of the given array. The cleanup
+/// may be popped with DeactivateCleanupBlock or PopCleanupBlock.
+///
+/// \param elementType - the immediate element type of the array;
+/// possibly still an array type
+/// \param array - a value of type elementType*
+/// \param destructionKind - the kind of destruction required
+/// \param initializedElementCount - a value of type size_t* holding
+/// the number of successfully-constructed elements
+void CodeGenFunction::pushIrregularPartialArrayCleanup(llvm::Value *arrayBegin,
+ llvm::Value *arrayEndPointer,
+ QualType elementType,
+ Destroyer *destroyer) {
+ pushFullExprCleanup<IrregularPartialArrayDestroy>(EHCleanup,
+ arrayBegin, arrayEndPointer,
+ elementType, destroyer);
+}
+
+/// pushRegularPartialArrayCleanup - Push an EH cleanup to destroy
+/// already-constructed elements of the given array. The cleanup
+/// may be popped with DeactivateCleanupBlock or PopCleanupBlock.
+///
+/// \param elementType - the immediate element type of the array;
+/// possibly still an array type
+/// \param array - a value of type elementType*
+/// \param destructionKind - the kind of destruction required
+/// \param initializedElementCount - a value of type size_t* holding
+/// the number of successfully-constructed elements
+void CodeGenFunction::pushRegularPartialArrayCleanup(llvm::Value *arrayBegin,
+ llvm::Value *arrayEnd,
+ QualType elementType,
+ Destroyer *destroyer) {
+ pushFullExprCleanup<RegularPartialArrayDestroy>(EHCleanup,
+ arrayBegin, arrayEnd,
+ elementType, destroyer);
+}
+
+namespace {
+ /// A cleanup to perform a release of an object at the end of a
+ /// function. This is used to balance out the incoming +1 of a
+ /// ns_consumed argument when we can't reasonably do that just by
+ /// not doing the initial retain for a __block argument.
+ struct ConsumeARCParameter : EHScopeStack::Cleanup {
+ ConsumeARCParameter(llvm::Value *param) : Param(param) {}
+
+ llvm::Value *Param;
+
+ void Emit(CodeGenFunction &CGF, Flags flags) {
+ CGF.EmitARCRelease(Param, /*precise*/ false);
+ }
+ };
+}
+
+/// Emit an alloca (or GlobalValue depending on target)
+/// for the specified parameter and set up LocalDeclMap.
+void CodeGenFunction::EmitParmDecl(const VarDecl &D, llvm::Value *Arg,
+ unsigned ArgNo) {
+ // FIXME: Why isn't ImplicitParamDecl a ParmVarDecl?
+ assert((isa<ParmVarDecl>(D) || isa<ImplicitParamDecl>(D)) &&
+ "Invalid argument to EmitParmDecl");
+
+ Arg->setName(D.getName());
+
+ // Use better IR generation for certain implicit parameters.
+ if (isa<ImplicitParamDecl>(D)) {
+ // The only implicit argument a block has is its literal.
+ if (BlockInfo) {
+ LocalDeclMap[&D] = Arg;
+
+ if (CGDebugInfo *DI = getDebugInfo()) {
+ DI->setLocation(D.getLocation());
+ DI->EmitDeclareOfBlockLiteralArgVariable(*BlockInfo, Arg, Builder);
+ }
+
+ return;
+ }
+ }
+
+ QualType Ty = D.getType();
+
+ llvm::Value *DeclPtr;
+ // If this is an aggregate or variable sized value, reuse the input pointer.
+ if (!Ty->isConstantSizeType() ||
+ CodeGenFunction::hasAggregateLLVMType(Ty)) {
+ DeclPtr = Arg;
+ } else {
+ // Otherwise, create a temporary to hold the value.
+ llvm::AllocaInst *Alloc = CreateTempAlloca(ConvertTypeForMem(Ty),
+ D.getName() + ".addr");
+ Alloc->setAlignment(getContext().getDeclAlign(&D).getQuantity());
+ DeclPtr = Alloc;
+
+ bool doStore = true;
+
+ Qualifiers qs = Ty.getQualifiers();
+
+ if (Qualifiers::ObjCLifetime lt = qs.getObjCLifetime()) {
+ // We honor __attribute__((ns_consumed)) for types with lifetime.
+ // For __strong, it's handled by just skipping the initial retain;
+ // otherwise we have to balance out the initial +1 with an extra
+ // cleanup to do the release at the end of the function.
+ bool isConsumed = D.hasAttr<NSConsumedAttr>();
+
+ // 'self' is always formally __strong, but if this is not an
+ // init method then we don't want to retain it.
+ if (D.isARCPseudoStrong()) {
+ const ObjCMethodDecl *method = cast<ObjCMethodDecl>(CurCodeDecl);
+ assert(&D == method->getSelfDecl());
+ assert(lt == Qualifiers::OCL_Strong);
+ assert(qs.hasConst());
+ assert(method->getMethodFamily() != OMF_init);
+ (void) method;
+ lt = Qualifiers::OCL_ExplicitNone;
+ }
+
+ if (lt == Qualifiers::OCL_Strong) {
+ if (!isConsumed)
+ // Don't use objc_retainBlock for block pointers, because we
+ // don't want to Block_copy something just because we got it
+ // as a parameter.
+ Arg = EmitARCRetainNonBlock(Arg);
+ } else {
+ // Push the cleanup for a consumed parameter.
+ if (isConsumed)
+ EHStack.pushCleanup<ConsumeARCParameter>(getARCCleanupKind(), Arg);
+
+ if (lt == Qualifiers::OCL_Weak) {
+ EmitARCInitWeak(DeclPtr, Arg);
+ doStore = false; // The weak init is a store, no need to do two.
+ }
+ }
+
+ // Enter the cleanup scope.
+ EmitAutoVarWithLifetime(*this, D, DeclPtr, lt);
+ }
+
+ // Store the initial value into the alloca.
+ if (doStore) {
+ LValue lv = MakeAddrLValue(DeclPtr, Ty,
+ getContext().getDeclAlign(&D));
+ EmitStoreOfScalar(Arg, lv, /* isInitialization */ true);
+ }
+ }
+
+ llvm::Value *&DMEntry = LocalDeclMap[&D];
+ assert(DMEntry == 0 && "Decl already exists in localdeclmap!");
+ DMEntry = DeclPtr;
+
+ // Emit debug info for param declaration.
+ if (CGDebugInfo *DI = getDebugInfo())
+ DI->EmitDeclareOfArgVariable(&D, DeclPtr, ArgNo, Builder);
+
+ if (D.hasAttr<AnnotateAttr>())
+ EmitVarAnnotations(&D, DeclPtr);
+}
diff --git a/clang/lib/CodeGen/CGDeclCXX.cpp b/clang/lib/CodeGen/CGDeclCXX.cpp
new file mode 100644
index 0000000..10f0b83
--- /dev/null
+++ b/clang/lib/CodeGen/CGDeclCXX.cpp
@@ -0,0 +1,464 @@
+//===--- CGDeclCXX.cpp - Emit LLVM Code for C++ declarations --------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code dealing with code generation of C++ declarations
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenFunction.h"
+#include "CGObjCRuntime.h"
+#include "CGCXXABI.h"
+#include "clang/Frontend/CodeGenOptions.h"
+#include "llvm/Intrinsics.h"
+
+using namespace clang;
+using namespace CodeGen;
+
+static void EmitDeclInit(CodeGenFunction &CGF, const VarDecl &D,
+ llvm::Constant *DeclPtr) {
+ assert(D.hasGlobalStorage() && "VarDecl must have global storage!");
+ assert(!D.getType()->isReferenceType() &&
+ "Should not call EmitDeclInit on a reference!");
+
+ ASTContext &Context = CGF.getContext();
+
+ CharUnits alignment = Context.getDeclAlign(&D);
+ QualType type = D.getType();
+ LValue lv = CGF.MakeAddrLValue(DeclPtr, type, alignment);
+
+ const Expr *Init = D.getInit();
+ if (!CGF.hasAggregateLLVMType(type)) {
+ CodeGenModule &CGM = CGF.CGM;
+ if (lv.isObjCStrong())
+ CGM.getObjCRuntime().EmitObjCGlobalAssign(CGF, CGF.EmitScalarExpr(Init),
+ DeclPtr, D.isThreadSpecified());
+ else if (lv.isObjCWeak())
+ CGM.getObjCRuntime().EmitObjCWeakAssign(CGF, CGF.EmitScalarExpr(Init),
+ DeclPtr);
+ else
+ CGF.EmitScalarInit(Init, &D, lv, false);
+ } else if (type->isAnyComplexType()) {
+ CGF.EmitComplexExprIntoAddr(Init, DeclPtr, lv.isVolatile());
+ } else {
+ CGF.EmitAggExpr(Init, AggValueSlot::forLValue(lv,AggValueSlot::IsDestructed,
+ AggValueSlot::DoesNotNeedGCBarriers,
+ AggValueSlot::IsNotAliased));
+ }
+}
+
+/// Emit code to cause the destruction of the given variable with
+/// static storage duration.
+static void EmitDeclDestroy(CodeGenFunction &CGF, const VarDecl &D,
+ llvm::Constant *addr) {
+ CodeGenModule &CGM = CGF.CGM;
+
+ // FIXME: __attribute__((cleanup)) ?
+
+ QualType type = D.getType();
+ QualType::DestructionKind dtorKind = type.isDestructedType();
+
+ switch (dtorKind) {
+ case QualType::DK_none:
+ return;
+
+ case QualType::DK_cxx_destructor:
+ break;
+
+ case QualType::DK_objc_strong_lifetime:
+ case QualType::DK_objc_weak_lifetime:
+ // We don't care about releasing objects during process teardown.
+ return;
+ }
+
+ llvm::Constant *function;
+ llvm::Constant *argument;
+
+ // Special-case non-array C++ destructors, where there's a function
+ // with the right signature that we can just call.
+ const CXXRecordDecl *record = 0;
+ if (dtorKind == QualType::DK_cxx_destructor &&
+ (record = type->getAsCXXRecordDecl())) {
+ assert(!record->hasTrivialDestructor());
+ CXXDestructorDecl *dtor = record->getDestructor();
+
+ function = CGM.GetAddrOfCXXDestructor(dtor, Dtor_Complete);
+ argument = addr;
+
+ // Otherwise, the standard logic requires a helper function.
+ } else {
+ function = CodeGenFunction(CGM).generateDestroyHelper(addr, type,
+ CGF.getDestroyer(dtorKind),
+ CGF.needsEHCleanup(dtorKind));
+ argument = llvm::Constant::getNullValue(CGF.Int8PtrTy);
+ }
+
+ CGF.EmitCXXGlobalDtorRegistration(function, argument);
+}
+
+/// Emit code to cause the variable at the given address to be considered as
+/// constant from this point onwards.
+static void EmitDeclInvariant(CodeGenFunction &CGF, const VarDecl &D,
+ llvm::Constant *Addr) {
+ // Don't emit the intrinsic if we're not optimizing.
+ if (!CGF.CGM.getCodeGenOpts().OptimizationLevel)
+ return;
+
+ // Grab the llvm.invariant.start intrinsic.
+ llvm::Intrinsic::ID InvStartID = llvm::Intrinsic::invariant_start;
+ llvm::Constant *InvariantStart = CGF.CGM.getIntrinsic(InvStartID);
+
+ // Emit a call with the size in bytes of the object.
+ CharUnits WidthChars = CGF.getContext().getTypeSizeInChars(D.getType());
+ uint64_t Width = WidthChars.getQuantity();
+ llvm::Value *Args[2] = { llvm::ConstantInt::getSigned(CGF.Int64Ty, Width),
+ llvm::ConstantExpr::getBitCast(Addr, CGF.Int8PtrTy)};
+ CGF.Builder.CreateCall(InvariantStart, Args);
+}
+
+void CodeGenFunction::EmitCXXGlobalVarDeclInit(const VarDecl &D,
+ llvm::Constant *DeclPtr,
+ bool PerformInit) {
+
+ const Expr *Init = D.getInit();
+ QualType T = D.getType();
+
+ if (!T->isReferenceType()) {
+ if (PerformInit)
+ EmitDeclInit(*this, D, DeclPtr);
+ if (CGM.isTypeConstant(D.getType(), true))
+ EmitDeclInvariant(*this, D, DeclPtr);
+ else
+ EmitDeclDestroy(*this, D, DeclPtr);
+ return;
+ }
+
+ assert(PerformInit && "cannot have constant initializer which needs "
+ "destruction for reference");
+ unsigned Alignment = getContext().getDeclAlign(&D).getQuantity();
+ RValue RV = EmitReferenceBindingToExpr(Init, &D);
+ EmitStoreOfScalar(RV.getScalarVal(), DeclPtr, false, Alignment, T);
+}
+
+/// Register a global destructor using __cxa_atexit.
+static void emitGlobalDtorWithCXAAtExit(CodeGenFunction &CGF,
+ llvm::Constant *dtor,
+ llvm::Constant *addr) {
+ // We're assuming that the destructor function is something we can
+ // reasonably call with the default CC. Go ahead and cast it to the
+ // right prototype.
+ llvm::Type *dtorTy =
+ llvm::FunctionType::get(CGF.VoidTy, CGF.Int8PtrTy, false)->getPointerTo();
+
+ // extern "C" int __cxa_atexit(void (*f)(void *), void *p, void *d);
+ llvm::Type *paramTys[] = { dtorTy, CGF.Int8PtrTy, CGF.Int8PtrTy };
+ llvm::FunctionType *atexitTy =
+ llvm::FunctionType::get(CGF.IntTy, paramTys, false);
+
+ // Fetch the actual function.
+ llvm::Constant *atexit =
+ CGF.CGM.CreateRuntimeFunction(atexitTy, "__cxa_atexit");
+ if (llvm::Function *fn = dyn_cast<llvm::Function>(atexit))
+ fn->setDoesNotThrow();
+
+ // Create a variable that binds the atexit to this shared object.
+ llvm::Constant *handle =
+ CGF.CGM.CreateRuntimeVariable(CGF.Int8Ty, "__dso_handle");
+
+ llvm::Value *args[] = {
+ llvm::ConstantExpr::getBitCast(dtor, dtorTy),
+ llvm::ConstantExpr::getBitCast(addr, CGF.Int8PtrTy),
+ handle
+ };
+ CGF.Builder.CreateCall(atexit, args);
+}
+
+static llvm::Function *
+CreateGlobalInitOrDestructFunction(CodeGenModule &CGM,
+ llvm::FunctionType *ty,
+ const Twine &name);
+
+/// Create a stub function, suitable for being passed to atexit,
+/// which passes the given address to the given destructor function.
+static llvm::Constant *createAtExitStub(CodeGenModule &CGM,
+ llvm::Constant *dtor,
+ llvm::Constant *addr) {
+ // Get the destructor function type, void(*)(void).
+ llvm::FunctionType *ty = llvm::FunctionType::get(CGM.VoidTy, false);
+ llvm::Function *fn =
+ CreateGlobalInitOrDestructFunction(CGM, ty,
+ Twine("__dtor_", addr->getName()));
+
+ CodeGenFunction CGF(CGM);
+
+ CGF.StartFunction(GlobalDecl(), CGM.getContext().VoidTy, fn,
+ CGM.getTypes().arrangeNullaryFunction(),
+ FunctionArgList(), SourceLocation());
+
+ llvm::CallInst *call = CGF.Builder.CreateCall(dtor, addr);
+
+ // Make sure the call and the callee agree on calling convention.
+ if (llvm::Function *dtorFn =
+ dyn_cast<llvm::Function>(dtor->stripPointerCasts()))
+ call->setCallingConv(dtorFn->getCallingConv());
+
+ CGF.FinishFunction();
+
+ return fn;
+}
+
+/// Register a global destructor using atexit.
+static void emitGlobalDtorWithAtExit(CodeGenFunction &CGF,
+ llvm::Constant *dtor,
+ llvm::Constant *addr) {
+ // Create a function which calls the destructor.
+ llvm::Constant *dtorStub = createAtExitStub(CGF.CGM, dtor, addr);
+
+ // extern "C" int atexit(void (*f)(void));
+ llvm::FunctionType *atexitTy =
+ llvm::FunctionType::get(CGF.IntTy, dtorStub->getType(), false);
+
+ llvm::Constant *atexit =
+ CGF.CGM.CreateRuntimeFunction(atexitTy, "atexit");
+ if (llvm::Function *atexitFn = dyn_cast<llvm::Function>(atexit))
+ atexitFn->setDoesNotThrow();
+
+ CGF.Builder.CreateCall(atexit, dtorStub);
+}
+
+void CodeGenFunction::EmitCXXGlobalDtorRegistration(llvm::Constant *dtor,
+ llvm::Constant *addr) {
+ // Use __cxa_atexit if available.
+ if (CGM.getCodeGenOpts().CXAAtExit) {
+ emitGlobalDtorWithCXAAtExit(*this, dtor, addr);
+ return;
+ }
+
+ // In Apple kexts, we want to add a global destructor entry.
+ // FIXME: shouldn't this be guarded by some variable?
+ if (CGM.getContext().getLangOpts().AppleKext) {
+ // Generate a global destructor entry.
+ CGM.AddCXXDtorEntry(dtor, addr);
+ return;
+ }
+
+ // Otherwise, we just use atexit.
+ emitGlobalDtorWithAtExit(*this, dtor, addr);
+}
+
+void CodeGenFunction::EmitCXXGuardedInit(const VarDecl &D,
+ llvm::GlobalVariable *DeclPtr,
+ bool PerformInit) {
+ // If we've been asked to forbid guard variables, emit an error now.
+ // This diagnostic is hard-coded for Darwin's use case; we can find
+ // better phrasing if someone else needs it.
+ if (CGM.getCodeGenOpts().ForbidGuardVariables)
+ CGM.Error(D.getLocation(),
+ "this initialization requires a guard variable, which "
+ "the kernel does not support");
+
+ CGM.getCXXABI().EmitGuardedInit(*this, D, DeclPtr, PerformInit);
+}
+
+static llvm::Function *
+CreateGlobalInitOrDestructFunction(CodeGenModule &CGM,
+ llvm::FunctionType *FTy,
+ const Twine &Name) {
+ llvm::Function *Fn =
+ llvm::Function::Create(FTy, llvm::GlobalValue::InternalLinkage,
+ Name, &CGM.getModule());
+ if (!CGM.getContext().getLangOpts().AppleKext) {
+ // Set the section if needed.
+ if (const char *Section =
+ CGM.getContext().getTargetInfo().getStaticInitSectionSpecifier())
+ Fn->setSection(Section);
+ }
+
+ if (!CGM.getLangOpts().Exceptions)
+ Fn->setDoesNotThrow();
+
+ return Fn;
+}
+
+void
+CodeGenModule::EmitCXXGlobalVarDeclInitFunc(const VarDecl *D,
+ llvm::GlobalVariable *Addr,
+ bool PerformInit) {
+ llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false);
+
+ // Create a variable initialization function.
+ llvm::Function *Fn =
+ CreateGlobalInitOrDestructFunction(*this, FTy, "__cxx_global_var_init");
+
+ CodeGenFunction(*this).GenerateCXXGlobalVarDeclInitFunc(Fn, D, Addr,
+ PerformInit);
+
+ if (D->hasAttr<InitPriorityAttr>()) {
+ unsigned int order = D->getAttr<InitPriorityAttr>()->getPriority();
+ OrderGlobalInits Key(order, PrioritizedCXXGlobalInits.size());
+ PrioritizedCXXGlobalInits.push_back(std::make_pair(Key, Fn));
+ DelayedCXXInitPosition.erase(D);
+ }
+ else {
+ llvm::DenseMap<const Decl *, unsigned>::iterator I =
+ DelayedCXXInitPosition.find(D);
+ if (I == DelayedCXXInitPosition.end()) {
+ CXXGlobalInits.push_back(Fn);
+ } else {
+ assert(CXXGlobalInits[I->second] == 0);
+ CXXGlobalInits[I->second] = Fn;
+ DelayedCXXInitPosition.erase(I);
+ }
+ }
+}
+
+void
+CodeGenModule::EmitCXXGlobalInitFunc() {
+ while (!CXXGlobalInits.empty() && !CXXGlobalInits.back())
+ CXXGlobalInits.pop_back();
+
+ if (CXXGlobalInits.empty() && PrioritizedCXXGlobalInits.empty())
+ return;
+
+ llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false);
+
+ // Create our global initialization function.
+ llvm::Function *Fn =
+ CreateGlobalInitOrDestructFunction(*this, FTy, "_GLOBAL__I_a");
+
+ if (!PrioritizedCXXGlobalInits.empty()) {
+ SmallVector<llvm::Constant*, 8> LocalCXXGlobalInits;
+ llvm::array_pod_sort(PrioritizedCXXGlobalInits.begin(),
+ PrioritizedCXXGlobalInits.end());
+ for (unsigned i = 0; i < PrioritizedCXXGlobalInits.size(); i++) {
+ llvm::Function *Fn = PrioritizedCXXGlobalInits[i].second;
+ LocalCXXGlobalInits.push_back(Fn);
+ }
+ LocalCXXGlobalInits.append(CXXGlobalInits.begin(), CXXGlobalInits.end());
+ CodeGenFunction(*this).GenerateCXXGlobalInitFunc(Fn,
+ &LocalCXXGlobalInits[0],
+ LocalCXXGlobalInits.size());
+ }
+ else
+ CodeGenFunction(*this).GenerateCXXGlobalInitFunc(Fn,
+ &CXXGlobalInits[0],
+ CXXGlobalInits.size());
+ AddGlobalCtor(Fn);
+ CXXGlobalInits.clear();
+ PrioritizedCXXGlobalInits.clear();
+}
+
+void CodeGenModule::EmitCXXGlobalDtorFunc() {
+ if (CXXGlobalDtors.empty())
+ return;
+
+ llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false);
+
+ // Create our global destructor function.
+ llvm::Function *Fn =
+ CreateGlobalInitOrDestructFunction(*this, FTy, "_GLOBAL__D_a");
+
+ CodeGenFunction(*this).GenerateCXXGlobalDtorsFunc(Fn, CXXGlobalDtors);
+ AddGlobalDtor(Fn);
+}
+
+/// Emit the code necessary to initialize the given global variable.
+void CodeGenFunction::GenerateCXXGlobalVarDeclInitFunc(llvm::Function *Fn,
+ const VarDecl *D,
+ llvm::GlobalVariable *Addr,
+ bool PerformInit) {
+ StartFunction(GlobalDecl(), getContext().VoidTy, Fn,
+ getTypes().arrangeNullaryFunction(),
+ FunctionArgList(), SourceLocation());
+
+ // Use guarded initialization if the global variable is weak. This
+ // occurs for, e.g., instantiated static data members and
+ // definitions explicitly marked weak.
+ if (Addr->getLinkage() == llvm::GlobalValue::WeakODRLinkage ||
+ Addr->getLinkage() == llvm::GlobalValue::WeakAnyLinkage) {
+ EmitCXXGuardedInit(*D, Addr, PerformInit);
+ } else {
+ EmitCXXGlobalVarDeclInit(*D, Addr, PerformInit);
+ }
+
+ FinishFunction();
+}
+
+void CodeGenFunction::GenerateCXXGlobalInitFunc(llvm::Function *Fn,
+ llvm::Constant **Decls,
+ unsigned NumDecls) {
+ StartFunction(GlobalDecl(), getContext().VoidTy, Fn,
+ getTypes().arrangeNullaryFunction(),
+ FunctionArgList(), SourceLocation());
+
+ RunCleanupsScope Scope(*this);
+
+ // When building in Objective-C++ ARC mode, create an autorelease pool
+ // around the global initializers.
+ if (getLangOpts().ObjCAutoRefCount && getLangOpts().CPlusPlus) {
+ llvm::Value *token = EmitObjCAutoreleasePoolPush();
+ EmitObjCAutoreleasePoolCleanup(token);
+ }
+
+ for (unsigned i = 0; i != NumDecls; ++i)
+ if (Decls[i])
+ Builder.CreateCall(Decls[i]);
+
+ Scope.ForceCleanup();
+
+ FinishFunction();
+}
+
+void CodeGenFunction::GenerateCXXGlobalDtorsFunc(llvm::Function *Fn,
+ const std::vector<std::pair<llvm::WeakVH, llvm::Constant*> >
+ &DtorsAndObjects) {
+ StartFunction(GlobalDecl(), getContext().VoidTy, Fn,
+ getTypes().arrangeNullaryFunction(),
+ FunctionArgList(), SourceLocation());
+
+ // Emit the dtors, in reverse order from construction.
+ for (unsigned i = 0, e = DtorsAndObjects.size(); i != e; ++i) {
+ llvm::Value *Callee = DtorsAndObjects[e - i - 1].first;
+ llvm::CallInst *CI = Builder.CreateCall(Callee,
+ DtorsAndObjects[e - i - 1].second);
+ // Make sure the call and the callee agree on calling convention.
+ if (llvm::Function *F = dyn_cast<llvm::Function>(Callee))
+ CI->setCallingConv(F->getCallingConv());
+ }
+
+ FinishFunction();
+}
+
+/// generateDestroyHelper - Generates a helper function which, when
+/// invoked, destroys the given object.
+llvm::Function *
+CodeGenFunction::generateDestroyHelper(llvm::Constant *addr,
+ QualType type,
+ Destroyer *destroyer,
+ bool useEHCleanupForArray) {
+ FunctionArgList args;
+ ImplicitParamDecl dst(0, SourceLocation(), 0, getContext().VoidPtrTy);
+ args.push_back(&dst);
+
+ const CGFunctionInfo &FI =
+ CGM.getTypes().arrangeFunctionDeclaration(getContext().VoidTy, args,
+ FunctionType::ExtInfo(),
+ /*variadic*/ false);
+ llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI);
+ llvm::Function *fn =
+ CreateGlobalInitOrDestructFunction(CGM, FTy, "__cxx_global_array_dtor");
+
+ StartFunction(GlobalDecl(), getContext().VoidTy, fn, FI, args,
+ SourceLocation());
+
+ emitDestroy(addr, type, destroyer, useEHCleanupForArray);
+
+ FinishFunction();
+
+ return fn;
+}
diff --git a/clang/lib/CodeGen/CGException.cpp b/clang/lib/CodeGen/CGException.cpp
new file mode 100644
index 0000000..95e0030
--- /dev/null
+++ b/clang/lib/CodeGen/CGException.cpp
@@ -0,0 +1,1595 @@
+//===--- CGException.cpp - Emit LLVM Code for C++ exceptions --------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code dealing with C++ exception related code generation.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenFunction.h"
+#include "CGCleanup.h"
+#include "CGObjCRuntime.h"
+#include "TargetInfo.h"
+#include "clang/AST/StmtCXX.h"
+#include "llvm/Intrinsics.h"
+#include "llvm/Support/CallSite.h"
+
+using namespace clang;
+using namespace CodeGen;
+
+static llvm::Constant *getAllocateExceptionFn(CodeGenFunction &CGF) {
+ // void *__cxa_allocate_exception(size_t thrown_size);
+
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(CGF.Int8PtrTy, CGF.SizeTy, /*IsVarArgs=*/false);
+
+ return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_allocate_exception");
+}
+
+static llvm::Constant *getFreeExceptionFn(CodeGenFunction &CGF) {
+ // void __cxa_free_exception(void *thrown_exception);
+
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(CGF.VoidTy, CGF.Int8PtrTy, /*IsVarArgs=*/false);
+
+ return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_free_exception");
+}
+
+static llvm::Constant *getThrowFn(CodeGenFunction &CGF) {
+ // void __cxa_throw(void *thrown_exception, std::type_info *tinfo,
+ // void (*dest) (void *));
+
+ llvm::Type *Args[3] = { CGF.Int8PtrTy, CGF.Int8PtrTy, CGF.Int8PtrTy };
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(CGF.VoidTy, Args, /*IsVarArgs=*/false);
+
+ return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_throw");
+}
+
+static llvm::Constant *getReThrowFn(CodeGenFunction &CGF) {
+ // void __cxa_rethrow();
+
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(CGF.VoidTy, /*IsVarArgs=*/false);
+
+ return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_rethrow");
+}
+
+static llvm::Constant *getGetExceptionPtrFn(CodeGenFunction &CGF) {
+ // void *__cxa_get_exception_ptr(void*);
+
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(CGF.Int8PtrTy, CGF.Int8PtrTy, /*IsVarArgs=*/false);
+
+ return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_get_exception_ptr");
+}
+
+static llvm::Constant *getBeginCatchFn(CodeGenFunction &CGF) {
+ // void *__cxa_begin_catch(void*);
+
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(CGF.Int8PtrTy, CGF.Int8PtrTy, /*IsVarArgs=*/false);
+
+ return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_begin_catch");
+}
+
+static llvm::Constant *getEndCatchFn(CodeGenFunction &CGF) {
+ // void __cxa_end_catch();
+
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(CGF.VoidTy, /*IsVarArgs=*/false);
+
+ return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_end_catch");
+}
+
+static llvm::Constant *getUnexpectedFn(CodeGenFunction &CGF) {
+ // void __cxa_call_unexepcted(void *thrown_exception);
+
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(CGF.VoidTy, CGF.Int8PtrTy, /*IsVarArgs=*/false);
+
+ return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_call_unexpected");
+}
+
+llvm::Constant *CodeGenFunction::getUnwindResumeFn() {
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(VoidTy, Int8PtrTy, /*IsVarArgs=*/false);
+
+ if (CGM.getLangOpts().SjLjExceptions)
+ return CGM.CreateRuntimeFunction(FTy, "_Unwind_SjLj_Resume");
+ return CGM.CreateRuntimeFunction(FTy, "_Unwind_Resume");
+}
+
+llvm::Constant *CodeGenFunction::getUnwindResumeOrRethrowFn() {
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(VoidTy, Int8PtrTy, /*IsVarArgs=*/false);
+
+ if (CGM.getLangOpts().SjLjExceptions)
+ return CGM.CreateRuntimeFunction(FTy, "_Unwind_SjLj_Resume_or_Rethrow");
+ return CGM.CreateRuntimeFunction(FTy, "_Unwind_Resume_or_Rethrow");
+}
+
+static llvm::Constant *getTerminateFn(CodeGenFunction &CGF) {
+ // void __terminate();
+
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(CGF.VoidTy, /*IsVarArgs=*/false);
+
+ StringRef name;
+
+ // In C++, use std::terminate().
+ if (CGF.getLangOpts().CPlusPlus)
+ name = "_ZSt9terminatev"; // FIXME: mangling!
+ else if (CGF.getLangOpts().ObjC1 &&
+ CGF.CGM.getCodeGenOpts().ObjCRuntimeHasTerminate)
+ name = "objc_terminate";
+ else
+ name = "abort";
+ return CGF.CGM.CreateRuntimeFunction(FTy, name);
+}
+
+static llvm::Constant *getCatchallRethrowFn(CodeGenFunction &CGF,
+ StringRef Name) {
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(CGF.VoidTy, CGF.Int8PtrTy, /*IsVarArgs=*/false);
+
+ return CGF.CGM.CreateRuntimeFunction(FTy, Name);
+}
+
+namespace {
+ /// The exceptions personality for a function.
+ struct EHPersonality {
+ const char *PersonalityFn;
+
+ // If this is non-null, this personality requires a non-standard
+ // function for rethrowing an exception after a catchall cleanup.
+ // This function must have prototype void(void*).
+ const char *CatchallRethrowFn;
+
+ static const EHPersonality &get(const LangOptions &Lang);
+ static const EHPersonality GNU_C;
+ static const EHPersonality GNU_C_SJLJ;
+ static const EHPersonality GNU_ObjC;
+ static const EHPersonality GNU_ObjCXX;
+ static const EHPersonality NeXT_ObjC;
+ static const EHPersonality GNU_CPlusPlus;
+ static const EHPersonality GNU_CPlusPlus_SJLJ;
+ };
+}
+
+const EHPersonality EHPersonality::GNU_C = { "__gcc_personality_v0", 0 };
+const EHPersonality EHPersonality::GNU_C_SJLJ = { "__gcc_personality_sj0", 0 };
+const EHPersonality EHPersonality::NeXT_ObjC = { "__objc_personality_v0", 0 };
+const EHPersonality EHPersonality::GNU_CPlusPlus = { "__gxx_personality_v0", 0};
+const EHPersonality
+EHPersonality::GNU_CPlusPlus_SJLJ = { "__gxx_personality_sj0", 0 };
+const EHPersonality
+EHPersonality::GNU_ObjC = {"__gnu_objc_personality_v0", "objc_exception_throw"};
+const EHPersonality
+EHPersonality::GNU_ObjCXX = { "__gnustep_objcxx_personality_v0", 0 };
+
+static const EHPersonality &getCPersonality(const LangOptions &L) {
+ if (L.SjLjExceptions)
+ return EHPersonality::GNU_C_SJLJ;
+ return EHPersonality::GNU_C;
+}
+
+static const EHPersonality &getObjCPersonality(const LangOptions &L) {
+ if (L.NeXTRuntime) {
+ if (L.ObjCNonFragileABI) return EHPersonality::NeXT_ObjC;
+ else return getCPersonality(L);
+ } else {
+ return EHPersonality::GNU_ObjC;
+ }
+}
+
+static const EHPersonality &getCXXPersonality(const LangOptions &L) {
+ if (L.SjLjExceptions)
+ return EHPersonality::GNU_CPlusPlus_SJLJ;
+ else
+ return EHPersonality::GNU_CPlusPlus;
+}
+
+/// Determines the personality function to use when both C++
+/// and Objective-C exceptions are being caught.
+static const EHPersonality &getObjCXXPersonality(const LangOptions &L) {
+ // The ObjC personality defers to the C++ personality for non-ObjC
+ // handlers. Unlike the C++ case, we use the same personality
+ // function on targets using (backend-driven) SJLJ EH.
+ if (L.NeXTRuntime) {
+ if (L.ObjCNonFragileABI)
+ return EHPersonality::NeXT_ObjC;
+
+ // In the fragile ABI, just use C++ exception handling and hope
+ // they're not doing crazy exception mixing.
+ else
+ return getCXXPersonality(L);
+ }
+
+ // The GNU runtime's personality function inherently doesn't support
+ // mixed EH. Use the C++ personality just to avoid returning null.
+ return EHPersonality::GNU_ObjCXX;
+}
+
+const EHPersonality &EHPersonality::get(const LangOptions &L) {
+ if (L.CPlusPlus && L.ObjC1)
+ return getObjCXXPersonality(L);
+ else if (L.CPlusPlus)
+ return getCXXPersonality(L);
+ else if (L.ObjC1)
+ return getObjCPersonality(L);
+ else
+ return getCPersonality(L);
+}
+
+static llvm::Constant *getPersonalityFn(CodeGenModule &CGM,
+ const EHPersonality &Personality) {
+ llvm::Constant *Fn =
+ CGM.CreateRuntimeFunction(llvm::FunctionType::get(CGM.Int32Ty, true),
+ Personality.PersonalityFn);
+ return Fn;
+}
+
+static llvm::Constant *getOpaquePersonalityFn(CodeGenModule &CGM,
+ const EHPersonality &Personality) {
+ llvm::Constant *Fn = getPersonalityFn(CGM, Personality);
+ return llvm::ConstantExpr::getBitCast(Fn, CGM.Int8PtrTy);
+}
+
+/// Check whether a personality function could reasonably be swapped
+/// for a C++ personality function.
+static bool PersonalityHasOnlyCXXUses(llvm::Constant *Fn) {
+ for (llvm::Constant::use_iterator
+ I = Fn->use_begin(), E = Fn->use_end(); I != E; ++I) {
+ llvm::User *User = *I;
+
+ // Conditionally white-list bitcasts.
+ if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(User)) {
+ if (CE->getOpcode() != llvm::Instruction::BitCast) return false;
+ if (!PersonalityHasOnlyCXXUses(CE))
+ return false;
+ continue;
+ }
+
+ // Otherwise, it has to be a landingpad instruction.
+ llvm::LandingPadInst *LPI = dyn_cast<llvm::LandingPadInst>(User);
+ if (!LPI) return false;
+
+ for (unsigned I = 0, E = LPI->getNumClauses(); I != E; ++I) {
+ // Look for something that would've been returned by the ObjC
+ // runtime's GetEHType() method.
+ llvm::Value *Val = LPI->getClause(I)->stripPointerCasts();
+ if (LPI->isCatch(I)) {
+ // Check if the catch value has the ObjC prefix.
+ if (llvm::GlobalVariable *GV = dyn_cast<llvm::GlobalVariable>(Val))
+ // ObjC EH selector entries are always global variables with
+ // names starting like this.
+ if (GV->getName().startswith("OBJC_EHTYPE"))
+ return false;
+ } else {
+ // Check if any of the filter values have the ObjC prefix.
+ llvm::Constant *CVal = cast<llvm::Constant>(Val);
+ for (llvm::User::op_iterator
+ II = CVal->op_begin(), IE = CVal->op_end(); II != IE; ++II) {
+ if (llvm::GlobalVariable *GV =
+ cast<llvm::GlobalVariable>((*II)->stripPointerCasts()))
+ // ObjC EH selector entries are always global variables with
+ // names starting like this.
+ if (GV->getName().startswith("OBJC_EHTYPE"))
+ return false;
+ }
+ }
+ }
+ }
+
+ return true;
+}
+
+/// Try to use the C++ personality function in ObjC++. Not doing this
+/// can cause some incompatibilities with gcc, which is more
+/// aggressive about only using the ObjC++ personality in a function
+/// when it really needs it.
+void CodeGenModule::SimplifyPersonality() {
+ // For now, this is really a Darwin-specific operation.
+ if (!Context.getTargetInfo().getTriple().isOSDarwin())
+ return;
+
+ // If we're not in ObjC++ -fexceptions, there's nothing to do.
+ if (!LangOpts.CPlusPlus || !LangOpts.ObjC1 || !LangOpts.Exceptions)
+ return;
+
+ const EHPersonality &ObjCXX = EHPersonality::get(LangOpts);
+ const EHPersonality &CXX = getCXXPersonality(LangOpts);
+ if (&ObjCXX == &CXX)
+ return;
+
+ assert(std::strcmp(ObjCXX.PersonalityFn, CXX.PersonalityFn) != 0 &&
+ "Different EHPersonalities using the same personality function.");
+
+ llvm::Function *Fn = getModule().getFunction(ObjCXX.PersonalityFn);
+
+ // Nothing to do if it's unused.
+ if (!Fn || Fn->use_empty()) return;
+
+ // Can't do the optimization if it has non-C++ uses.
+ if (!PersonalityHasOnlyCXXUses(Fn)) return;
+
+ // Create the C++ personality function and kill off the old
+ // function.
+ llvm::Constant *CXXFn = getPersonalityFn(*this, CXX);
+
+ // This can happen if the user is screwing with us.
+ if (Fn->getType() != CXXFn->getType()) return;
+
+ Fn->replaceAllUsesWith(CXXFn);
+ Fn->eraseFromParent();
+}
+
+/// Returns the value to inject into a selector to indicate the
+/// presence of a catch-all.
+static llvm::Constant *getCatchAllValue(CodeGenFunction &CGF) {
+ // Possibly we should use @llvm.eh.catch.all.value here.
+ return llvm::ConstantPointerNull::get(CGF.Int8PtrTy);
+}
+
+namespace {
+ /// A cleanup to free the exception object if its initialization
+ /// throws.
+ struct FreeException : EHScopeStack::Cleanup {
+ llvm::Value *exn;
+ FreeException(llvm::Value *exn) : exn(exn) {}
+ void Emit(CodeGenFunction &CGF, Flags flags) {
+ CGF.Builder.CreateCall(getFreeExceptionFn(CGF), exn)
+ ->setDoesNotThrow();
+ }
+ };
+}
+
+// Emits an exception expression into the given location. This
+// differs from EmitAnyExprToMem only in that, if a final copy-ctor
+// call is required, an exception within that copy ctor causes
+// std::terminate to be invoked.
+static void EmitAnyExprToExn(CodeGenFunction &CGF, const Expr *e,
+ llvm::Value *addr) {
+ // Make sure the exception object is cleaned up if there's an
+ // exception during initialization.
+ CGF.pushFullExprCleanup<FreeException>(EHCleanup, addr);
+ EHScopeStack::stable_iterator cleanup = CGF.EHStack.stable_begin();
+
+ // __cxa_allocate_exception returns a void*; we need to cast this
+ // to the appropriate type for the object.
+ llvm::Type *ty = CGF.ConvertTypeForMem(e->getType())->getPointerTo();
+ llvm::Value *typedAddr = CGF.Builder.CreateBitCast(addr, ty);
+
+ // FIXME: this isn't quite right! If there's a final unelided call
+ // to a copy constructor, then according to [except.terminate]p1 we
+ // must call std::terminate() if that constructor throws, because
+ // technically that copy occurs after the exception expression is
+ // evaluated but before the exception is caught. But the best way
+ // to handle that is to teach EmitAggExpr to do the final copy
+ // differently if it can't be elided.
+ CGF.EmitAnyExprToMem(e, typedAddr, e->getType().getQualifiers(),
+ /*IsInit*/ true);
+
+ // Deactivate the cleanup block.
+ CGF.DeactivateCleanupBlock(cleanup, cast<llvm::Instruction>(typedAddr));
+}
+
+llvm::Value *CodeGenFunction::getExceptionSlot() {
+ if (!ExceptionSlot)
+ ExceptionSlot = CreateTempAlloca(Int8PtrTy, "exn.slot");
+ return ExceptionSlot;
+}
+
+llvm::Value *CodeGenFunction::getEHSelectorSlot() {
+ if (!EHSelectorSlot)
+ EHSelectorSlot = CreateTempAlloca(Int32Ty, "ehselector.slot");
+ return EHSelectorSlot;
+}
+
+llvm::Value *CodeGenFunction::getExceptionFromSlot() {
+ return Builder.CreateLoad(getExceptionSlot(), "exn");
+}
+
+llvm::Value *CodeGenFunction::getSelectorFromSlot() {
+ return Builder.CreateLoad(getEHSelectorSlot(), "sel");
+}
+
+void CodeGenFunction::EmitCXXThrowExpr(const CXXThrowExpr *E) {
+ if (!E->getSubExpr()) {
+ if (getInvokeDest()) {
+ Builder.CreateInvoke(getReThrowFn(*this),
+ getUnreachableBlock(),
+ getInvokeDest())
+ ->setDoesNotReturn();
+ } else {
+ Builder.CreateCall(getReThrowFn(*this))->setDoesNotReturn();
+ Builder.CreateUnreachable();
+ }
+
+ // throw is an expression, and the expression emitters expect us
+ // to leave ourselves at a valid insertion point.
+ EmitBlock(createBasicBlock("throw.cont"));
+
+ return;
+ }
+
+ QualType ThrowType = E->getSubExpr()->getType();
+
+ // Now allocate the exception object.
+ llvm::Type *SizeTy = ConvertType(getContext().getSizeType());
+ uint64_t TypeSize = getContext().getTypeSizeInChars(ThrowType).getQuantity();
+
+ llvm::Constant *AllocExceptionFn = getAllocateExceptionFn(*this);
+ llvm::CallInst *ExceptionPtr =
+ Builder.CreateCall(AllocExceptionFn,
+ llvm::ConstantInt::get(SizeTy, TypeSize),
+ "exception");
+ ExceptionPtr->setDoesNotThrow();
+
+ EmitAnyExprToExn(*this, E->getSubExpr(), ExceptionPtr);
+
+ // Now throw the exception.
+ llvm::Constant *TypeInfo = CGM.GetAddrOfRTTIDescriptor(ThrowType,
+ /*ForEH=*/true);
+
+ // The address of the destructor. If the exception type has a
+ // trivial destructor (or isn't a record), we just pass null.
+ llvm::Constant *Dtor = 0;
+ if (const RecordType *RecordTy = ThrowType->getAs<RecordType>()) {
+ CXXRecordDecl *Record = cast<CXXRecordDecl>(RecordTy->getDecl());
+ if (!Record->hasTrivialDestructor()) {
+ CXXDestructorDecl *DtorD = Record->getDestructor();
+ Dtor = CGM.GetAddrOfCXXDestructor(DtorD, Dtor_Complete);
+ Dtor = llvm::ConstantExpr::getBitCast(Dtor, Int8PtrTy);
+ }
+ }
+ if (!Dtor) Dtor = llvm::Constant::getNullValue(Int8PtrTy);
+
+ if (getInvokeDest()) {
+ llvm::InvokeInst *ThrowCall =
+ Builder.CreateInvoke3(getThrowFn(*this),
+ getUnreachableBlock(), getInvokeDest(),
+ ExceptionPtr, TypeInfo, Dtor);
+ ThrowCall->setDoesNotReturn();
+ } else {
+ llvm::CallInst *ThrowCall =
+ Builder.CreateCall3(getThrowFn(*this), ExceptionPtr, TypeInfo, Dtor);
+ ThrowCall->setDoesNotReturn();
+ Builder.CreateUnreachable();
+ }
+
+ // throw is an expression, and the expression emitters expect us
+ // to leave ourselves at a valid insertion point.
+ EmitBlock(createBasicBlock("throw.cont"));
+}
+
+void CodeGenFunction::EmitStartEHSpec(const Decl *D) {
+ if (!CGM.getLangOpts().CXXExceptions)
+ return;
+
+ const FunctionDecl* FD = dyn_cast_or_null<FunctionDecl>(D);
+ if (FD == 0)
+ return;
+ const FunctionProtoType *Proto = FD->getType()->getAs<FunctionProtoType>();
+ if (Proto == 0)
+ return;
+
+ ExceptionSpecificationType EST = Proto->getExceptionSpecType();
+ if (isNoexceptExceptionSpec(EST)) {
+ if (Proto->getNoexceptSpec(getContext()) == FunctionProtoType::NR_Nothrow) {
+ // noexcept functions are simple terminate scopes.
+ EHStack.pushTerminate();
+ }
+ } else if (EST == EST_Dynamic || EST == EST_DynamicNone) {
+ unsigned NumExceptions = Proto->getNumExceptions();
+ EHFilterScope *Filter = EHStack.pushFilter(NumExceptions);
+
+ for (unsigned I = 0; I != NumExceptions; ++I) {
+ QualType Ty = Proto->getExceptionType(I);
+ QualType ExceptType = Ty.getNonReferenceType().getUnqualifiedType();
+ llvm::Value *EHType = CGM.GetAddrOfRTTIDescriptor(ExceptType,
+ /*ForEH=*/true);
+ Filter->setFilter(I, EHType);
+ }
+ }
+}
+
+/// Emit the dispatch block for a filter scope if necessary.
+static void emitFilterDispatchBlock(CodeGenFunction &CGF,
+ EHFilterScope &filterScope) {
+ llvm::BasicBlock *dispatchBlock = filterScope.getCachedEHDispatchBlock();
+ if (!dispatchBlock) return;
+ if (dispatchBlock->use_empty()) {
+ delete dispatchBlock;
+ return;
+ }
+
+ CGF.EmitBlockAfterUses(dispatchBlock);
+
+ // If this isn't a catch-all filter, we need to check whether we got
+ // here because the filter triggered.
+ if (filterScope.getNumFilters()) {
+ // Load the selector value.
+ llvm::Value *selector = CGF.getSelectorFromSlot();
+ llvm::BasicBlock *unexpectedBB = CGF.createBasicBlock("ehspec.unexpected");
+
+ llvm::Value *zero = CGF.Builder.getInt32(0);
+ llvm::Value *failsFilter =
+ CGF.Builder.CreateICmpSLT(selector, zero, "ehspec.fails");
+ CGF.Builder.CreateCondBr(failsFilter, unexpectedBB, CGF.getEHResumeBlock());
+
+ CGF.EmitBlock(unexpectedBB);
+ }
+
+ // Call __cxa_call_unexpected. This doesn't need to be an invoke
+ // because __cxa_call_unexpected magically filters exceptions
+ // according to the last landing pad the exception was thrown
+ // into. Seriously.
+ llvm::Value *exn = CGF.getExceptionFromSlot();
+ CGF.Builder.CreateCall(getUnexpectedFn(CGF), exn)
+ ->setDoesNotReturn();
+ CGF.Builder.CreateUnreachable();
+}
+
+void CodeGenFunction::EmitEndEHSpec(const Decl *D) {
+ if (!CGM.getLangOpts().CXXExceptions)
+ return;
+
+ const FunctionDecl* FD = dyn_cast_or_null<FunctionDecl>(D);
+ if (FD == 0)
+ return;
+ const FunctionProtoType *Proto = FD->getType()->getAs<FunctionProtoType>();
+ if (Proto == 0)
+ return;
+
+ ExceptionSpecificationType EST = Proto->getExceptionSpecType();
+ if (isNoexceptExceptionSpec(EST)) {
+ if (Proto->getNoexceptSpec(getContext()) == FunctionProtoType::NR_Nothrow) {
+ EHStack.popTerminate();
+ }
+ } else if (EST == EST_Dynamic || EST == EST_DynamicNone) {
+ EHFilterScope &filterScope = cast<EHFilterScope>(*EHStack.begin());
+ emitFilterDispatchBlock(*this, filterScope);
+ EHStack.popFilter();
+ }
+}
+
+void CodeGenFunction::EmitCXXTryStmt(const CXXTryStmt &S) {
+ EnterCXXTryStmt(S);
+ EmitStmt(S.getTryBlock());
+ ExitCXXTryStmt(S);
+}
+
+void CodeGenFunction::EnterCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock) {
+ unsigned NumHandlers = S.getNumHandlers();
+ EHCatchScope *CatchScope = EHStack.pushCatch(NumHandlers);
+
+ for (unsigned I = 0; I != NumHandlers; ++I) {
+ const CXXCatchStmt *C = S.getHandler(I);
+
+ llvm::BasicBlock *Handler = createBasicBlock("catch");
+ if (C->getExceptionDecl()) {
+ // FIXME: Dropping the reference type on the type into makes it
+ // impossible to correctly implement catch-by-reference
+ // semantics for pointers. Unfortunately, this is what all
+ // existing compilers do, and it's not clear that the standard
+ // personality routine is capable of doing this right. See C++ DR 388:
+ // http://www.open-std.org/jtc1/sc22/wg21/docs/cwg_active.html#388
+ QualType CaughtType = C->getCaughtType();
+ CaughtType = CaughtType.getNonReferenceType().getUnqualifiedType();
+
+ llvm::Value *TypeInfo = 0;
+ if (CaughtType->isObjCObjectPointerType())
+ TypeInfo = CGM.getObjCRuntime().GetEHType(CaughtType);
+ else
+ TypeInfo = CGM.GetAddrOfRTTIDescriptor(CaughtType, /*ForEH=*/true);
+ CatchScope->setHandler(I, TypeInfo, Handler);
+ } else {
+ // No exception decl indicates '...', a catch-all.
+ CatchScope->setCatchAllHandler(I, Handler);
+ }
+ }
+}
+
+llvm::BasicBlock *
+CodeGenFunction::getEHDispatchBlock(EHScopeStack::stable_iterator si) {
+ // The dispatch block for the end of the scope chain is a block that
+ // just resumes unwinding.
+ if (si == EHStack.stable_end())
+ return getEHResumeBlock();
+
+ // Otherwise, we should look at the actual scope.
+ EHScope &scope = *EHStack.find(si);
+
+ llvm::BasicBlock *dispatchBlock = scope.getCachedEHDispatchBlock();
+ if (!dispatchBlock) {
+ switch (scope.getKind()) {
+ case EHScope::Catch: {
+ // Apply a special case to a single catch-all.
+ EHCatchScope &catchScope = cast<EHCatchScope>(scope);
+ if (catchScope.getNumHandlers() == 1 &&
+ catchScope.getHandler(0).isCatchAll()) {
+ dispatchBlock = catchScope.getHandler(0).Block;
+
+ // Otherwise, make a dispatch block.
+ } else {
+ dispatchBlock = createBasicBlock("catch.dispatch");
+ }
+ break;
+ }
+
+ case EHScope::Cleanup:
+ dispatchBlock = createBasicBlock("ehcleanup");
+ break;
+
+ case EHScope::Filter:
+ dispatchBlock = createBasicBlock("filter.dispatch");
+ break;
+
+ case EHScope::Terminate:
+ dispatchBlock = getTerminateHandler();
+ break;
+ }
+ scope.setCachedEHDispatchBlock(dispatchBlock);
+ }
+ return dispatchBlock;
+}
+
+/// Check whether this is a non-EH scope, i.e. a scope which doesn't
+/// affect exception handling. Currently, the only non-EH scopes are
+/// normal-only cleanup scopes.
+static bool isNonEHScope(const EHScope &S) {
+ switch (S.getKind()) {
+ case EHScope::Cleanup:
+ return !cast<EHCleanupScope>(S).isEHCleanup();
+ case EHScope::Filter:
+ case EHScope::Catch:
+ case EHScope::Terminate:
+ return false;
+ }
+
+ llvm_unreachable("Invalid EHScope Kind!");
+}
+
+llvm::BasicBlock *CodeGenFunction::getInvokeDestImpl() {
+ assert(EHStack.requiresLandingPad());
+ assert(!EHStack.empty());
+
+ if (!CGM.getLangOpts().Exceptions)
+ return 0;
+
+ // Check the innermost scope for a cached landing pad. If this is
+ // a non-EH cleanup, we'll check enclosing scopes in EmitLandingPad.
+ llvm::BasicBlock *LP = EHStack.begin()->getCachedLandingPad();
+ if (LP) return LP;
+
+ // Build the landing pad for this scope.
+ LP = EmitLandingPad();
+ assert(LP);
+
+ // Cache the landing pad on the innermost scope. If this is a
+ // non-EH scope, cache the landing pad on the enclosing scope, too.
+ for (EHScopeStack::iterator ir = EHStack.begin(); true; ++ir) {
+ ir->setCachedLandingPad(LP);
+ if (!isNonEHScope(*ir)) break;
+ }
+
+ return LP;
+}
+
+// This code contains a hack to work around a design flaw in
+// LLVM's EH IR which breaks semantics after inlining. This same
+// hack is implemented in llvm-gcc.
+//
+// The LLVM EH abstraction is basically a thin veneer over the
+// traditional GCC zero-cost design: for each range of instructions
+// in the function, there is (at most) one "landing pad" with an
+// associated chain of EH actions. A language-specific personality
+// function interprets this chain of actions and (1) decides whether
+// or not to resume execution at the landing pad and (2) if so,
+// provides an integer indicating why it's stopping. In LLVM IR,
+// the association of a landing pad with a range of instructions is
+// achieved via an invoke instruction, the chain of actions becomes
+// the arguments to the @llvm.eh.selector call, and the selector
+// call returns the integer indicator. Other than the required
+// presence of two intrinsic function calls in the landing pad,
+// the IR exactly describes the layout of the output code.
+//
+// A principal advantage of this design is that it is completely
+// language-agnostic; in theory, the LLVM optimizers can treat
+// landing pads neutrally, and targets need only know how to lower
+// the intrinsics to have a functioning exceptions system (assuming
+// that platform exceptions follow something approximately like the
+// GCC design). Unfortunately, landing pads cannot be combined in a
+// language-agnostic way: given selectors A and B, there is no way
+// to make a single landing pad which faithfully represents the
+// semantics of propagating an exception first through A, then
+// through B, without knowing how the personality will interpret the
+// (lowered form of the) selectors. This means that inlining has no
+// choice but to crudely chain invokes (i.e., to ignore invokes in
+// the inlined function, but to turn all unwindable calls into
+// invokes), which is only semantically valid if every unwind stops
+// at every landing pad.
+//
+// Therefore, the invoke-inline hack is to guarantee that every
+// landing pad has a catch-all.
+enum CleanupHackLevel_t {
+ /// A level of hack that requires that all landing pads have
+ /// catch-alls.
+ CHL_MandatoryCatchall,
+
+ /// A level of hack that requires that all landing pads handle
+ /// cleanups.
+ CHL_MandatoryCleanup,
+
+ /// No hacks at all; ideal IR generation.
+ CHL_Ideal
+};
+const CleanupHackLevel_t CleanupHackLevel = CHL_MandatoryCleanup;
+
+llvm::BasicBlock *CodeGenFunction::EmitLandingPad() {
+ assert(EHStack.requiresLandingPad());
+
+ EHScope &innermostEHScope = *EHStack.find(EHStack.getInnermostEHScope());
+ switch (innermostEHScope.getKind()) {
+ case EHScope::Terminate:
+ return getTerminateLandingPad();
+
+ case EHScope::Catch:
+ case EHScope::Cleanup:
+ case EHScope::Filter:
+ if (llvm::BasicBlock *lpad = innermostEHScope.getCachedLandingPad())
+ return lpad;
+ }
+
+ // Save the current IR generation state.
+ CGBuilderTy::InsertPoint savedIP = Builder.saveAndClearIP();
+
+ const EHPersonality &personality = EHPersonality::get(getLangOpts());
+
+ // Create and configure the landing pad.
+ llvm::BasicBlock *lpad = createBasicBlock("lpad");
+ EmitBlock(lpad);
+
+ llvm::LandingPadInst *LPadInst =
+ Builder.CreateLandingPad(llvm::StructType::get(Int8PtrTy, Int32Ty, NULL),
+ getOpaquePersonalityFn(CGM, personality), 0);
+
+ llvm::Value *LPadExn = Builder.CreateExtractValue(LPadInst, 0);
+ Builder.CreateStore(LPadExn, getExceptionSlot());
+ llvm::Value *LPadSel = Builder.CreateExtractValue(LPadInst, 1);
+ Builder.CreateStore(LPadSel, getEHSelectorSlot());
+
+ // Save the exception pointer. It's safe to use a single exception
+ // pointer per function because EH cleanups can never have nested
+ // try/catches.
+ // Build the landingpad instruction.
+
+ // Accumulate all the handlers in scope.
+ bool hasCatchAll = false;
+ bool hasCleanup = false;
+ bool hasFilter = false;
+ SmallVector<llvm::Value*, 4> filterTypes;
+ llvm::SmallPtrSet<llvm::Value*, 4> catchTypes;
+ for (EHScopeStack::iterator I = EHStack.begin(), E = EHStack.end();
+ I != E; ++I) {
+
+ switch (I->getKind()) {
+ case EHScope::Cleanup:
+ // If we have a cleanup, remember that.
+ hasCleanup = (hasCleanup || cast<EHCleanupScope>(*I).isEHCleanup());
+ continue;
+
+ case EHScope::Filter: {
+ assert(I.next() == EHStack.end() && "EH filter is not end of EH stack");
+ assert(!hasCatchAll && "EH filter reached after catch-all");
+
+ // Filter scopes get added to the landingpad in weird ways.
+ EHFilterScope &filter = cast<EHFilterScope>(*I);
+ hasFilter = true;
+
+ // Add all the filter values.
+ for (unsigned i = 0, e = filter.getNumFilters(); i != e; ++i)
+ filterTypes.push_back(filter.getFilter(i));
+ goto done;
+ }
+
+ case EHScope::Terminate:
+ // Terminate scopes are basically catch-alls.
+ assert(!hasCatchAll);
+ hasCatchAll = true;
+ goto done;
+
+ case EHScope::Catch:
+ break;
+ }
+
+ EHCatchScope &catchScope = cast<EHCatchScope>(*I);
+ for (unsigned hi = 0, he = catchScope.getNumHandlers(); hi != he; ++hi) {
+ EHCatchScope::Handler handler = catchScope.getHandler(hi);
+
+ // If this is a catch-all, register that and abort.
+ if (!handler.Type) {
+ assert(!hasCatchAll);
+ hasCatchAll = true;
+ goto done;
+ }
+
+ // Check whether we already have a handler for this type.
+ if (catchTypes.insert(handler.Type))
+ // If not, add it directly to the landingpad.
+ LPadInst->addClause(handler.Type);
+ }
+ }
+
+ done:
+ // If we have a catch-all, add null to the landingpad.
+ assert(!(hasCatchAll && hasFilter));
+ if (hasCatchAll) {
+ LPadInst->addClause(getCatchAllValue(*this));
+
+ // If we have an EH filter, we need to add those handlers in the
+ // right place in the landingpad, which is to say, at the end.
+ } else if (hasFilter) {
+ // Create a filter expression: a constant array indicating which filter
+ // types there are. The personality routine only lands here if the filter
+ // doesn't match.
+ llvm::SmallVector<llvm::Constant*, 8> Filters;
+ llvm::ArrayType *AType =
+ llvm::ArrayType::get(!filterTypes.empty() ?
+ filterTypes[0]->getType() : Int8PtrTy,
+ filterTypes.size());
+
+ for (unsigned i = 0, e = filterTypes.size(); i != e; ++i)
+ Filters.push_back(cast<llvm::Constant>(filterTypes[i]));
+ llvm::Constant *FilterArray = llvm::ConstantArray::get(AType, Filters);
+ LPadInst->addClause(FilterArray);
+
+ // Also check whether we need a cleanup.
+ if (hasCleanup)
+ LPadInst->setCleanup(true);
+
+ // Otherwise, signal that we at least have cleanups.
+ } else if (CleanupHackLevel == CHL_MandatoryCatchall || hasCleanup) {
+ if (CleanupHackLevel == CHL_MandatoryCatchall)
+ LPadInst->addClause(getCatchAllValue(*this));
+ else
+ LPadInst->setCleanup(true);
+ }
+
+ assert((LPadInst->getNumClauses() > 0 || LPadInst->isCleanup()) &&
+ "landingpad instruction has no clauses!");
+
+ // Tell the backend how to generate the landing pad.
+ Builder.CreateBr(getEHDispatchBlock(EHStack.getInnermostEHScope()));
+
+ // Restore the old IR generation state.
+ Builder.restoreIP(savedIP);
+
+ return lpad;
+}
+
+namespace {
+ /// A cleanup to call __cxa_end_catch. In many cases, the caught
+ /// exception type lets us state definitively that the thrown exception
+ /// type does not have a destructor. In particular:
+ /// - Catch-alls tell us nothing, so we have to conservatively
+ /// assume that the thrown exception might have a destructor.
+ /// - Catches by reference behave according to their base types.
+ /// - Catches of non-record types will only trigger for exceptions
+ /// of non-record types, which never have destructors.
+ /// - Catches of record types can trigger for arbitrary subclasses
+ /// of the caught type, so we have to assume the actual thrown
+ /// exception type might have a throwing destructor, even if the
+ /// caught type's destructor is trivial or nothrow.
+ struct CallEndCatch : EHScopeStack::Cleanup {
+ CallEndCatch(bool MightThrow) : MightThrow(MightThrow) {}
+ bool MightThrow;
+
+ void Emit(CodeGenFunction &CGF, Flags flags) {
+ if (!MightThrow) {
+ CGF.Builder.CreateCall(getEndCatchFn(CGF))->setDoesNotThrow();
+ return;
+ }
+
+ CGF.EmitCallOrInvoke(getEndCatchFn(CGF));
+ }
+ };
+}
+
+/// Emits a call to __cxa_begin_catch and enters a cleanup to call
+/// __cxa_end_catch.
+///
+/// \param EndMightThrow - true if __cxa_end_catch might throw
+static llvm::Value *CallBeginCatch(CodeGenFunction &CGF,
+ llvm::Value *Exn,
+ bool EndMightThrow) {
+ llvm::CallInst *Call = CGF.Builder.CreateCall(getBeginCatchFn(CGF), Exn);
+ Call->setDoesNotThrow();
+
+ CGF.EHStack.pushCleanup<CallEndCatch>(NormalAndEHCleanup, EndMightThrow);
+
+ return Call;
+}
+
+/// A "special initializer" callback for initializing a catch
+/// parameter during catch initialization.
+static void InitCatchParam(CodeGenFunction &CGF,
+ const VarDecl &CatchParam,
+ llvm::Value *ParamAddr) {
+ // Load the exception from where the landing pad saved it.
+ llvm::Value *Exn = CGF.getExceptionFromSlot();
+
+ CanQualType CatchType =
+ CGF.CGM.getContext().getCanonicalType(CatchParam.getType());
+ llvm::Type *LLVMCatchTy = CGF.ConvertTypeForMem(CatchType);
+
+ // If we're catching by reference, we can just cast the object
+ // pointer to the appropriate pointer.
+ if (isa<ReferenceType>(CatchType)) {
+ QualType CaughtType = cast<ReferenceType>(CatchType)->getPointeeType();
+ bool EndCatchMightThrow = CaughtType->isRecordType();
+
+ // __cxa_begin_catch returns the adjusted object pointer.
+ llvm::Value *AdjustedExn = CallBeginCatch(CGF, Exn, EndCatchMightThrow);
+
+ // We have no way to tell the personality function that we're
+ // catching by reference, so if we're catching a pointer,
+ // __cxa_begin_catch will actually return that pointer by value.
+ if (const PointerType *PT = dyn_cast<PointerType>(CaughtType)) {
+ QualType PointeeType = PT->getPointeeType();
+
+ // When catching by reference, generally we should just ignore
+ // this by-value pointer and use the exception object instead.
+ if (!PointeeType->isRecordType()) {
+
+ // Exn points to the struct _Unwind_Exception header, which
+ // we have to skip past in order to reach the exception data.
+ unsigned HeaderSize =
+ CGF.CGM.getTargetCodeGenInfo().getSizeOfUnwindException();
+ AdjustedExn = CGF.Builder.CreateConstGEP1_32(Exn, HeaderSize);
+
+ // However, if we're catching a pointer-to-record type that won't
+ // work, because the personality function might have adjusted
+ // the pointer. There's actually no way for us to fully satisfy
+ // the language/ABI contract here: we can't use Exn because it
+ // might have the wrong adjustment, but we can't use the by-value
+ // pointer because it's off by a level of abstraction.
+ //
+ // The current solution is to dump the adjusted pointer into an
+ // alloca, which breaks language semantics (because changing the
+ // pointer doesn't change the exception) but at least works.
+ // The better solution would be to filter out non-exact matches
+ // and rethrow them, but this is tricky because the rethrow
+ // really needs to be catchable by other sites at this landing
+ // pad. The best solution is to fix the personality function.
+ } else {
+ // Pull the pointer for the reference type off.
+ llvm::Type *PtrTy =
+ cast<llvm::PointerType>(LLVMCatchTy)->getElementType();
+
+ // Create the temporary and write the adjusted pointer into it.
+ llvm::Value *ExnPtrTmp = CGF.CreateTempAlloca(PtrTy, "exn.byref.tmp");
+ llvm::Value *Casted = CGF.Builder.CreateBitCast(AdjustedExn, PtrTy);
+ CGF.Builder.CreateStore(Casted, ExnPtrTmp);
+
+ // Bind the reference to the temporary.
+ AdjustedExn = ExnPtrTmp;
+ }
+ }
+
+ llvm::Value *ExnCast =
+ CGF.Builder.CreateBitCast(AdjustedExn, LLVMCatchTy, "exn.byref");
+ CGF.Builder.CreateStore(ExnCast, ParamAddr);
+ return;
+ }
+
+ // Non-aggregates (plus complexes).
+ bool IsComplex = false;
+ if (!CGF.hasAggregateLLVMType(CatchType) ||
+ (IsComplex = CatchType->isAnyComplexType())) {
+ llvm::Value *AdjustedExn = CallBeginCatch(CGF, Exn, false);
+
+ // If the catch type is a pointer type, __cxa_begin_catch returns
+ // the pointer by value.
+ if (CatchType->hasPointerRepresentation()) {
+ llvm::Value *CastExn =
+ CGF.Builder.CreateBitCast(AdjustedExn, LLVMCatchTy, "exn.casted");
+
+ switch (CatchType.getQualifiers().getObjCLifetime()) {
+ case Qualifiers::OCL_Strong:
+ CastExn = CGF.EmitARCRetainNonBlock(CastExn);
+ // fallthrough
+
+ case Qualifiers::OCL_None:
+ case Qualifiers::OCL_ExplicitNone:
+ case Qualifiers::OCL_Autoreleasing:
+ CGF.Builder.CreateStore(CastExn, ParamAddr);
+ return;
+
+ case Qualifiers::OCL_Weak:
+ CGF.EmitARCInitWeak(ParamAddr, CastExn);
+ return;
+ }
+ llvm_unreachable("bad ownership qualifier!");
+ }
+
+ // Otherwise, it returns a pointer into the exception object.
+
+ llvm::Type *PtrTy = LLVMCatchTy->getPointerTo(0); // addrspace 0 ok
+ llvm::Value *Cast = CGF.Builder.CreateBitCast(AdjustedExn, PtrTy);
+
+ if (IsComplex) {
+ CGF.StoreComplexToAddr(CGF.LoadComplexFromAddr(Cast, /*volatile*/ false),
+ ParamAddr, /*volatile*/ false);
+ } else {
+ unsigned Alignment =
+ CGF.getContext().getDeclAlign(&CatchParam).getQuantity();
+ llvm::Value *ExnLoad = CGF.Builder.CreateLoad(Cast, "exn.scalar");
+ CGF.EmitStoreOfScalar(ExnLoad, ParamAddr, /*volatile*/ false, Alignment,
+ CatchType);
+ }
+ return;
+ }
+
+ assert(isa<RecordType>(CatchType) && "unexpected catch type!");
+
+ llvm::Type *PtrTy = LLVMCatchTy->getPointerTo(0); // addrspace 0 ok
+
+ // Check for a copy expression. If we don't have a copy expression,
+ // that means a trivial copy is okay.
+ const Expr *copyExpr = CatchParam.getInit();
+ if (!copyExpr) {
+ llvm::Value *rawAdjustedExn = CallBeginCatch(CGF, Exn, true);
+ llvm::Value *adjustedExn = CGF.Builder.CreateBitCast(rawAdjustedExn, PtrTy);
+ CGF.EmitAggregateCopy(ParamAddr, adjustedExn, CatchType);
+ return;
+ }
+
+ // We have to call __cxa_get_exception_ptr to get the adjusted
+ // pointer before copying.
+ llvm::CallInst *rawAdjustedExn =
+ CGF.Builder.CreateCall(getGetExceptionPtrFn(CGF), Exn);
+ rawAdjustedExn->setDoesNotThrow();
+
+ // Cast that to the appropriate type.
+ llvm::Value *adjustedExn = CGF.Builder.CreateBitCast(rawAdjustedExn, PtrTy);
+
+ // The copy expression is defined in terms of an OpaqueValueExpr.
+ // Find it and map it to the adjusted expression.
+ CodeGenFunction::OpaqueValueMapping
+ opaque(CGF, OpaqueValueExpr::findInCopyConstruct(copyExpr),
+ CGF.MakeAddrLValue(adjustedExn, CatchParam.getType()));
+
+ // Call the copy ctor in a terminate scope.
+ CGF.EHStack.pushTerminate();
+
+ // Perform the copy construction.
+ CharUnits Alignment = CGF.getContext().getDeclAlign(&CatchParam);
+ CGF.EmitAggExpr(copyExpr,
+ AggValueSlot::forAddr(ParamAddr, Alignment, Qualifiers(),
+ AggValueSlot::IsNotDestructed,
+ AggValueSlot::DoesNotNeedGCBarriers,
+ AggValueSlot::IsNotAliased));
+
+ // Leave the terminate scope.
+ CGF.EHStack.popTerminate();
+
+ // Undo the opaque value mapping.
+ opaque.pop();
+
+ // Finally we can call __cxa_begin_catch.
+ CallBeginCatch(CGF, Exn, true);
+}
+
+/// Begins a catch statement by initializing the catch variable and
+/// calling __cxa_begin_catch.
+static void BeginCatch(CodeGenFunction &CGF, const CXXCatchStmt *S) {
+ // We have to be very careful with the ordering of cleanups here:
+ // C++ [except.throw]p4:
+ // The destruction [of the exception temporary] occurs
+ // immediately after the destruction of the object declared in
+ // the exception-declaration in the handler.
+ //
+ // So the precise ordering is:
+ // 1. Construct catch variable.
+ // 2. __cxa_begin_catch
+ // 3. Enter __cxa_end_catch cleanup
+ // 4. Enter dtor cleanup
+ //
+ // We do this by using a slightly abnormal initialization process.
+ // Delegation sequence:
+ // - ExitCXXTryStmt opens a RunCleanupsScope
+ // - EmitAutoVarAlloca creates the variable and debug info
+ // - InitCatchParam initializes the variable from the exception
+ // - CallBeginCatch calls __cxa_begin_catch
+ // - CallBeginCatch enters the __cxa_end_catch cleanup
+ // - EmitAutoVarCleanups enters the variable destructor cleanup
+ // - EmitCXXTryStmt emits the code for the catch body
+ // - EmitCXXTryStmt close the RunCleanupsScope
+
+ VarDecl *CatchParam = S->getExceptionDecl();
+ if (!CatchParam) {
+ llvm::Value *Exn = CGF.getExceptionFromSlot();
+ CallBeginCatch(CGF, Exn, true);
+ return;
+ }
+
+ // Emit the local.
+ CodeGenFunction::AutoVarEmission var = CGF.EmitAutoVarAlloca(*CatchParam);
+ InitCatchParam(CGF, *CatchParam, var.getObjectAddress(CGF));
+ CGF.EmitAutoVarCleanups(var);
+}
+
+namespace {
+ struct CallRethrow : EHScopeStack::Cleanup {
+ void Emit(CodeGenFunction &CGF, Flags flags) {
+ CGF.EmitCallOrInvoke(getReThrowFn(CGF));
+ }
+ };
+}
+
+/// Emit the structure of the dispatch block for the given catch scope.
+/// It is an invariant that the dispatch block already exists.
+static void emitCatchDispatchBlock(CodeGenFunction &CGF,
+ EHCatchScope &catchScope) {
+ llvm::BasicBlock *dispatchBlock = catchScope.getCachedEHDispatchBlock();
+ assert(dispatchBlock);
+
+ // If there's only a single catch-all, getEHDispatchBlock returned
+ // that catch-all as the dispatch block.
+ if (catchScope.getNumHandlers() == 1 &&
+ catchScope.getHandler(0).isCatchAll()) {
+ assert(dispatchBlock == catchScope.getHandler(0).Block);
+ return;
+ }
+
+ CGBuilderTy::InsertPoint savedIP = CGF.Builder.saveIP();
+ CGF.EmitBlockAfterUses(dispatchBlock);
+
+ // Select the right handler.
+ llvm::Value *llvm_eh_typeid_for =
+ CGF.CGM.getIntrinsic(llvm::Intrinsic::eh_typeid_for);
+
+ // Load the selector value.
+ llvm::Value *selector = CGF.getSelectorFromSlot();
+
+ // Test against each of the exception types we claim to catch.
+ for (unsigned i = 0, e = catchScope.getNumHandlers(); ; ++i) {
+ assert(i < e && "ran off end of handlers!");
+ const EHCatchScope::Handler &handler = catchScope.getHandler(i);
+
+ llvm::Value *typeValue = handler.Type;
+ assert(typeValue && "fell into catch-all case!");
+ typeValue = CGF.Builder.CreateBitCast(typeValue, CGF.Int8PtrTy);
+
+ // Figure out the next block.
+ bool nextIsEnd;
+ llvm::BasicBlock *nextBlock;
+
+ // If this is the last handler, we're at the end, and the next
+ // block is the block for the enclosing EH scope.
+ if (i + 1 == e) {
+ nextBlock = CGF.getEHDispatchBlock(catchScope.getEnclosingEHScope());
+ nextIsEnd = true;
+
+ // If the next handler is a catch-all, we're at the end, and the
+ // next block is that handler.
+ } else if (catchScope.getHandler(i+1).isCatchAll()) {
+ nextBlock = catchScope.getHandler(i+1).Block;
+ nextIsEnd = true;
+
+ // Otherwise, we're not at the end and we need a new block.
+ } else {
+ nextBlock = CGF.createBasicBlock("catch.fallthrough");
+ nextIsEnd = false;
+ }
+
+ // Figure out the catch type's index in the LSDA's type table.
+ llvm::CallInst *typeIndex =
+ CGF.Builder.CreateCall(llvm_eh_typeid_for, typeValue);
+ typeIndex->setDoesNotThrow();
+
+ llvm::Value *matchesTypeIndex =
+ CGF.Builder.CreateICmpEQ(selector, typeIndex, "matches");
+ CGF.Builder.CreateCondBr(matchesTypeIndex, handler.Block, nextBlock);
+
+ // If the next handler is a catch-all, we're completely done.
+ if (nextIsEnd) {
+ CGF.Builder.restoreIP(savedIP);
+ return;
+ }
+ // Otherwise we need to emit and continue at that block.
+ CGF.EmitBlock(nextBlock);
+ }
+}
+
+void CodeGenFunction::popCatchScope() {
+ EHCatchScope &catchScope = cast<EHCatchScope>(*EHStack.begin());
+ if (catchScope.hasEHBranches())
+ emitCatchDispatchBlock(*this, catchScope);
+ EHStack.popCatch();
+}
+
+void CodeGenFunction::ExitCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock) {
+ unsigned NumHandlers = S.getNumHandlers();
+ EHCatchScope &CatchScope = cast<EHCatchScope>(*EHStack.begin());
+ assert(CatchScope.getNumHandlers() == NumHandlers);
+
+ // If the catch was not required, bail out now.
+ if (!CatchScope.hasEHBranches()) {
+ EHStack.popCatch();
+ return;
+ }
+
+ // Emit the structure of the EH dispatch for this catch.
+ emitCatchDispatchBlock(*this, CatchScope);
+
+ // Copy the handler blocks off before we pop the EH stack. Emitting
+ // the handlers might scribble on this memory.
+ SmallVector<EHCatchScope::Handler, 8> Handlers(NumHandlers);
+ memcpy(Handlers.data(), CatchScope.begin(),
+ NumHandlers * sizeof(EHCatchScope::Handler));
+
+ EHStack.popCatch();
+
+ // The fall-through block.
+ llvm::BasicBlock *ContBB = createBasicBlock("try.cont");
+
+ // We just emitted the body of the try; jump to the continue block.
+ if (HaveInsertPoint())
+ Builder.CreateBr(ContBB);
+
+ // Determine if we need an implicit rethrow for all these catch handlers.
+ bool ImplicitRethrow = false;
+ if (IsFnTryBlock)
+ ImplicitRethrow = isa<CXXDestructorDecl>(CurCodeDecl) ||
+ isa<CXXConstructorDecl>(CurCodeDecl);
+
+ // Perversely, we emit the handlers backwards precisely because we
+ // want them to appear in source order. In all of these cases, the
+ // catch block will have exactly one predecessor, which will be a
+ // particular block in the catch dispatch. However, in the case of
+ // a catch-all, one of the dispatch blocks will branch to two
+ // different handlers, and EmitBlockAfterUses will cause the second
+ // handler to be moved before the first.
+ for (unsigned I = NumHandlers; I != 0; --I) {
+ llvm::BasicBlock *CatchBlock = Handlers[I-1].Block;
+ EmitBlockAfterUses(CatchBlock);
+
+ // Catch the exception if this isn't a catch-all.
+ const CXXCatchStmt *C = S.getHandler(I-1);
+
+ // Enter a cleanup scope, including the catch variable and the
+ // end-catch.
+ RunCleanupsScope CatchScope(*this);
+
+ // Initialize the catch variable and set up the cleanups.
+ BeginCatch(*this, C);
+
+ // If there's an implicit rethrow, push a normal "cleanup" to call
+ // _cxa_rethrow. This needs to happen before __cxa_end_catch is
+ // called, and so it is pushed after BeginCatch.
+ if (ImplicitRethrow)
+ EHStack.pushCleanup<CallRethrow>(NormalCleanup);
+
+ // Perform the body of the catch.
+ EmitStmt(C->getHandlerBlock());
+
+ // Fall out through the catch cleanups.
+ CatchScope.ForceCleanup();
+
+ // Branch out of the try.
+ if (HaveInsertPoint())
+ Builder.CreateBr(ContBB);
+ }
+
+ EmitBlock(ContBB);
+}
+
+namespace {
+ struct CallEndCatchForFinally : EHScopeStack::Cleanup {
+ llvm::Value *ForEHVar;
+ llvm::Value *EndCatchFn;
+ CallEndCatchForFinally(llvm::Value *ForEHVar, llvm::Value *EndCatchFn)
+ : ForEHVar(ForEHVar), EndCatchFn(EndCatchFn) {}
+
+ void Emit(CodeGenFunction &CGF, Flags flags) {
+ llvm::BasicBlock *EndCatchBB = CGF.createBasicBlock("finally.endcatch");
+ llvm::BasicBlock *CleanupContBB =
+ CGF.createBasicBlock("finally.cleanup.cont");
+
+ llvm::Value *ShouldEndCatch =
+ CGF.Builder.CreateLoad(ForEHVar, "finally.endcatch");
+ CGF.Builder.CreateCondBr(ShouldEndCatch, EndCatchBB, CleanupContBB);
+ CGF.EmitBlock(EndCatchBB);
+ CGF.EmitCallOrInvoke(EndCatchFn); // catch-all, so might throw
+ CGF.EmitBlock(CleanupContBB);
+ }
+ };
+
+ struct PerformFinally : EHScopeStack::Cleanup {
+ const Stmt *Body;
+ llvm::Value *ForEHVar;
+ llvm::Value *EndCatchFn;
+ llvm::Value *RethrowFn;
+ llvm::Value *SavedExnVar;
+
+ PerformFinally(const Stmt *Body, llvm::Value *ForEHVar,
+ llvm::Value *EndCatchFn,
+ llvm::Value *RethrowFn, llvm::Value *SavedExnVar)
+ : Body(Body), ForEHVar(ForEHVar), EndCatchFn(EndCatchFn),
+ RethrowFn(RethrowFn), SavedExnVar(SavedExnVar) {}
+
+ void Emit(CodeGenFunction &CGF, Flags flags) {
+ // Enter a cleanup to call the end-catch function if one was provided.
+ if (EndCatchFn)
+ CGF.EHStack.pushCleanup<CallEndCatchForFinally>(NormalAndEHCleanup,
+ ForEHVar, EndCatchFn);
+
+ // Save the current cleanup destination in case there are
+ // cleanups in the finally block.
+ llvm::Value *SavedCleanupDest =
+ CGF.Builder.CreateLoad(CGF.getNormalCleanupDestSlot(),
+ "cleanup.dest.saved");
+
+ // Emit the finally block.
+ CGF.EmitStmt(Body);
+
+ // If the end of the finally is reachable, check whether this was
+ // for EH. If so, rethrow.
+ if (CGF.HaveInsertPoint()) {
+ llvm::BasicBlock *RethrowBB = CGF.createBasicBlock("finally.rethrow");
+ llvm::BasicBlock *ContBB = CGF.createBasicBlock("finally.cont");
+
+ llvm::Value *ShouldRethrow =
+ CGF.Builder.CreateLoad(ForEHVar, "finally.shouldthrow");
+ CGF.Builder.CreateCondBr(ShouldRethrow, RethrowBB, ContBB);
+
+ CGF.EmitBlock(RethrowBB);
+ if (SavedExnVar) {
+ CGF.EmitCallOrInvoke(RethrowFn, CGF.Builder.CreateLoad(SavedExnVar));
+ } else {
+ CGF.EmitCallOrInvoke(RethrowFn);
+ }
+ CGF.Builder.CreateUnreachable();
+
+ CGF.EmitBlock(ContBB);
+
+ // Restore the cleanup destination.
+ CGF.Builder.CreateStore(SavedCleanupDest,
+ CGF.getNormalCleanupDestSlot());
+ }
+
+ // Leave the end-catch cleanup. As an optimization, pretend that
+ // the fallthrough path was inaccessible; we've dynamically proven
+ // that we're not in the EH case along that path.
+ if (EndCatchFn) {
+ CGBuilderTy::InsertPoint SavedIP = CGF.Builder.saveAndClearIP();
+ CGF.PopCleanupBlock();
+ CGF.Builder.restoreIP(SavedIP);
+ }
+
+ // Now make sure we actually have an insertion point or the
+ // cleanup gods will hate us.
+ CGF.EnsureInsertPoint();
+ }
+ };
+}
+
+/// Enters a finally block for an implementation using zero-cost
+/// exceptions. This is mostly general, but hard-codes some
+/// language/ABI-specific behavior in the catch-all sections.
+void CodeGenFunction::FinallyInfo::enter(CodeGenFunction &CGF,
+ const Stmt *body,
+ llvm::Constant *beginCatchFn,
+ llvm::Constant *endCatchFn,
+ llvm::Constant *rethrowFn) {
+ assert((beginCatchFn != 0) == (endCatchFn != 0) &&
+ "begin/end catch functions not paired");
+ assert(rethrowFn && "rethrow function is required");
+
+ BeginCatchFn = beginCatchFn;
+
+ // The rethrow function has one of the following two types:
+ // void (*)()
+ // void (*)(void*)
+ // In the latter case we need to pass it the exception object.
+ // But we can't use the exception slot because the @finally might
+ // have a landing pad (which would overwrite the exception slot).
+ llvm::FunctionType *rethrowFnTy =
+ cast<llvm::FunctionType>(
+ cast<llvm::PointerType>(rethrowFn->getType())->getElementType());
+ SavedExnVar = 0;
+ if (rethrowFnTy->getNumParams())
+ SavedExnVar = CGF.CreateTempAlloca(CGF.Int8PtrTy, "finally.exn");
+
+ // A finally block is a statement which must be executed on any edge
+ // out of a given scope. Unlike a cleanup, the finally block may
+ // contain arbitrary control flow leading out of itself. In
+ // addition, finally blocks should always be executed, even if there
+ // are no catch handlers higher on the stack. Therefore, we
+ // surround the protected scope with a combination of a normal
+ // cleanup (to catch attempts to break out of the block via normal
+ // control flow) and an EH catch-all (semantically "outside" any try
+ // statement to which the finally block might have been attached).
+ // The finally block itself is generated in the context of a cleanup
+ // which conditionally leaves the catch-all.
+
+ // Jump destination for performing the finally block on an exception
+ // edge. We'll never actually reach this block, so unreachable is
+ // fine.
+ RethrowDest = CGF.getJumpDestInCurrentScope(CGF.getUnreachableBlock());
+
+ // Whether the finally block is being executed for EH purposes.
+ ForEHVar = CGF.CreateTempAlloca(CGF.Builder.getInt1Ty(), "finally.for-eh");
+ CGF.Builder.CreateStore(CGF.Builder.getFalse(), ForEHVar);
+
+ // Enter a normal cleanup which will perform the @finally block.
+ CGF.EHStack.pushCleanup<PerformFinally>(NormalCleanup, body,
+ ForEHVar, endCatchFn,
+ rethrowFn, SavedExnVar);
+
+ // Enter a catch-all scope.
+ llvm::BasicBlock *catchBB = CGF.createBasicBlock("finally.catchall");
+ EHCatchScope *catchScope = CGF.EHStack.pushCatch(1);
+ catchScope->setCatchAllHandler(0, catchBB);
+}
+
+void CodeGenFunction::FinallyInfo::exit(CodeGenFunction &CGF) {
+ // Leave the finally catch-all.
+ EHCatchScope &catchScope = cast<EHCatchScope>(*CGF.EHStack.begin());
+ llvm::BasicBlock *catchBB = catchScope.getHandler(0).Block;
+
+ CGF.popCatchScope();
+
+ // If there are any references to the catch-all block, emit it.
+ if (catchBB->use_empty()) {
+ delete catchBB;
+ } else {
+ CGBuilderTy::InsertPoint savedIP = CGF.Builder.saveAndClearIP();
+ CGF.EmitBlock(catchBB);
+
+ llvm::Value *exn = 0;
+
+ // If there's a begin-catch function, call it.
+ if (BeginCatchFn) {
+ exn = CGF.getExceptionFromSlot();
+ CGF.Builder.CreateCall(BeginCatchFn, exn)->setDoesNotThrow();
+ }
+
+ // If we need to remember the exception pointer to rethrow later, do so.
+ if (SavedExnVar) {
+ if (!exn) exn = CGF.getExceptionFromSlot();
+ CGF.Builder.CreateStore(exn, SavedExnVar);
+ }
+
+ // Tell the cleanups in the finally block that we're do this for EH.
+ CGF.Builder.CreateStore(CGF.Builder.getTrue(), ForEHVar);
+
+ // Thread a jump through the finally cleanup.
+ CGF.EmitBranchThroughCleanup(RethrowDest);
+
+ CGF.Builder.restoreIP(savedIP);
+ }
+
+ // Finally, leave the @finally cleanup.
+ CGF.PopCleanupBlock();
+}
+
+llvm::BasicBlock *CodeGenFunction::getTerminateLandingPad() {
+ if (TerminateLandingPad)
+ return TerminateLandingPad;
+
+ CGBuilderTy::InsertPoint SavedIP = Builder.saveAndClearIP();
+
+ // This will get inserted at the end of the function.
+ TerminateLandingPad = createBasicBlock("terminate.lpad");
+ Builder.SetInsertPoint(TerminateLandingPad);
+
+ // Tell the backend that this is a landing pad.
+ const EHPersonality &Personality = EHPersonality::get(CGM.getLangOpts());
+ llvm::LandingPadInst *LPadInst =
+ Builder.CreateLandingPad(llvm::StructType::get(Int8PtrTy, Int32Ty, NULL),
+ getOpaquePersonalityFn(CGM, Personality), 0);
+ LPadInst->addClause(getCatchAllValue(*this));
+
+ llvm::CallInst *TerminateCall = Builder.CreateCall(getTerminateFn(*this));
+ TerminateCall->setDoesNotReturn();
+ TerminateCall->setDoesNotThrow();
+ Builder.CreateUnreachable();
+
+ // Restore the saved insertion state.
+ Builder.restoreIP(SavedIP);
+
+ return TerminateLandingPad;
+}
+
+llvm::BasicBlock *CodeGenFunction::getTerminateHandler() {
+ if (TerminateHandler)
+ return TerminateHandler;
+
+ CGBuilderTy::InsertPoint SavedIP = Builder.saveAndClearIP();
+
+ // Set up the terminate handler. This block is inserted at the very
+ // end of the function by FinishFunction.
+ TerminateHandler = createBasicBlock("terminate.handler");
+ Builder.SetInsertPoint(TerminateHandler);
+ llvm::CallInst *TerminateCall = Builder.CreateCall(getTerminateFn(*this));
+ TerminateCall->setDoesNotReturn();
+ TerminateCall->setDoesNotThrow();
+ Builder.CreateUnreachable();
+
+ // Restore the saved insertion state.
+ Builder.restoreIP(SavedIP);
+
+ return TerminateHandler;
+}
+
+llvm::BasicBlock *CodeGenFunction::getEHResumeBlock() {
+ if (EHResumeBlock) return EHResumeBlock;
+
+ CGBuilderTy::InsertPoint SavedIP = Builder.saveIP();
+
+ // We emit a jump to a notional label at the outermost unwind state.
+ EHResumeBlock = createBasicBlock("eh.resume");
+ Builder.SetInsertPoint(EHResumeBlock);
+
+ const EHPersonality &Personality = EHPersonality::get(CGM.getLangOpts());
+
+ // This can always be a call because we necessarily didn't find
+ // anything on the EH stack which needs our help.
+ const char *RethrowName = Personality.CatchallRethrowFn;
+ if (RethrowName != 0) {
+ Builder.CreateCall(getCatchallRethrowFn(*this, RethrowName),
+ getExceptionFromSlot())
+ ->setDoesNotReturn();
+ } else {
+ switch (CleanupHackLevel) {
+ case CHL_MandatoryCatchall:
+ // In mandatory-catchall mode, we need to use
+ // _Unwind_Resume_or_Rethrow, or whatever the personality's
+ // equivalent is.
+ Builder.CreateCall(getUnwindResumeOrRethrowFn(),
+ getExceptionFromSlot())
+ ->setDoesNotReturn();
+ break;
+ case CHL_MandatoryCleanup: {
+ // In mandatory-cleanup mode, we should use 'resume'.
+
+ // Recreate the landingpad's return value for the 'resume' instruction.
+ llvm::Value *Exn = getExceptionFromSlot();
+ llvm::Value *Sel = getSelectorFromSlot();
+
+ llvm::Type *LPadType = llvm::StructType::get(Exn->getType(),
+ Sel->getType(), NULL);
+ llvm::Value *LPadVal = llvm::UndefValue::get(LPadType);
+ LPadVal = Builder.CreateInsertValue(LPadVal, Exn, 0, "lpad.val");
+ LPadVal = Builder.CreateInsertValue(LPadVal, Sel, 1, "lpad.val");
+
+ Builder.CreateResume(LPadVal);
+ Builder.restoreIP(SavedIP);
+ return EHResumeBlock;
+ }
+ case CHL_Ideal:
+ // In an idealized mode where we don't have to worry about the
+ // optimizer combining landing pads, we should just use
+ // _Unwind_Resume (or the personality's equivalent).
+ Builder.CreateCall(getUnwindResumeFn(), getExceptionFromSlot())
+ ->setDoesNotReturn();
+ break;
+ }
+ }
+
+ Builder.CreateUnreachable();
+
+ Builder.restoreIP(SavedIP);
+
+ return EHResumeBlock;
+}
diff --git a/clang/lib/CodeGen/CGExpr.cpp b/clang/lib/CodeGen/CGExpr.cpp
new file mode 100644
index 0000000..5f2b1f0
--- /dev/null
+++ b/clang/lib/CodeGen/CGExpr.cpp
@@ -0,0 +1,3256 @@
+//===--- CGExpr.cpp - Emit LLVM Code from Expressions ---------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code to emit Expr nodes as LLVM code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenFunction.h"
+#include "CodeGenModule.h"
+#include "CGCall.h"
+#include "CGCXXABI.h"
+#include "CGDebugInfo.h"
+#include "CGRecordLayout.h"
+#include "CGObjCRuntime.h"
+#include "TargetInfo.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/Frontend/CodeGenOptions.h"
+#include "llvm/Intrinsics.h"
+#include "llvm/LLVMContext.h"
+#include "llvm/Support/MDBuilder.h"
+#include "llvm/Target/TargetData.h"
+using namespace clang;
+using namespace CodeGen;
+
+//===--------------------------------------------------------------------===//
+// Miscellaneous Helper Methods
+//===--------------------------------------------------------------------===//
+
+llvm::Value *CodeGenFunction::EmitCastToVoidPtr(llvm::Value *value) {
+ unsigned addressSpace =
+ cast<llvm::PointerType>(value->getType())->getAddressSpace();
+
+ llvm::PointerType *destType = Int8PtrTy;
+ if (addressSpace)
+ destType = llvm::Type::getInt8PtrTy(getLLVMContext(), addressSpace);
+
+ if (value->getType() == destType) return value;
+ return Builder.CreateBitCast(value, destType);
+}
+
+/// CreateTempAlloca - This creates a alloca and inserts it into the entry
+/// block.
+llvm::AllocaInst *CodeGenFunction::CreateTempAlloca(llvm::Type *Ty,
+ const Twine &Name) {
+ if (!Builder.isNamePreserving())
+ return new llvm::AllocaInst(Ty, 0, "", AllocaInsertPt);
+ return new llvm::AllocaInst(Ty, 0, Name, AllocaInsertPt);
+}
+
+void CodeGenFunction::InitTempAlloca(llvm::AllocaInst *Var,
+ llvm::Value *Init) {
+ llvm::StoreInst *Store = new llvm::StoreInst(Init, Var);
+ llvm::BasicBlock *Block = AllocaInsertPt->getParent();
+ Block->getInstList().insertAfter(&*AllocaInsertPt, Store);
+}
+
+llvm::AllocaInst *CodeGenFunction::CreateIRTemp(QualType Ty,
+ const Twine &Name) {
+ llvm::AllocaInst *Alloc = CreateTempAlloca(ConvertType(Ty), Name);
+ // FIXME: Should we prefer the preferred type alignment here?
+ CharUnits Align = getContext().getTypeAlignInChars(Ty);
+ Alloc->setAlignment(Align.getQuantity());
+ return Alloc;
+}
+
+llvm::AllocaInst *CodeGenFunction::CreateMemTemp(QualType Ty,
+ const Twine &Name) {
+ llvm::AllocaInst *Alloc = CreateTempAlloca(ConvertTypeForMem(Ty), Name);
+ // FIXME: Should we prefer the preferred type alignment here?
+ CharUnits Align = getContext().getTypeAlignInChars(Ty);
+ Alloc->setAlignment(Align.getQuantity());
+ return Alloc;
+}
+
+/// EvaluateExprAsBool - Perform the usual unary conversions on the specified
+/// expression and compare the result against zero, returning an Int1Ty value.
+llvm::Value *CodeGenFunction::EvaluateExprAsBool(const Expr *E) {
+ if (const MemberPointerType *MPT = E->getType()->getAs<MemberPointerType>()) {
+ llvm::Value *MemPtr = EmitScalarExpr(E);
+ return CGM.getCXXABI().EmitMemberPointerIsNotNull(*this, MemPtr, MPT);
+ }
+
+ QualType BoolTy = getContext().BoolTy;
+ if (!E->getType()->isAnyComplexType())
+ return EmitScalarConversion(EmitScalarExpr(E), E->getType(), BoolTy);
+
+ return EmitComplexToScalarConversion(EmitComplexExpr(E), E->getType(),BoolTy);
+}
+
+/// EmitIgnoredExpr - Emit code to compute the specified expression,
+/// ignoring the result.
+void CodeGenFunction::EmitIgnoredExpr(const Expr *E) {
+ if (E->isRValue())
+ return (void) EmitAnyExpr(E, AggValueSlot::ignored(), true);
+
+ // Just emit it as an l-value and drop the result.
+ EmitLValue(E);
+}
+
+/// EmitAnyExpr - Emit code to compute the specified expression which
+/// can have any type. The result is returned as an RValue struct.
+/// If this is an aggregate expression, AggSlot indicates where the
+/// result should be returned.
+RValue CodeGenFunction::EmitAnyExpr(const Expr *E, AggValueSlot AggSlot,
+ bool IgnoreResult) {
+ if (!hasAggregateLLVMType(E->getType()))
+ return RValue::get(EmitScalarExpr(E, IgnoreResult));
+ else if (E->getType()->isAnyComplexType())
+ return RValue::getComplex(EmitComplexExpr(E, IgnoreResult, IgnoreResult));
+
+ EmitAggExpr(E, AggSlot, IgnoreResult);
+ return AggSlot.asRValue();
+}
+
+/// EmitAnyExprToTemp - Similary to EmitAnyExpr(), however, the result will
+/// always be accessible even if no aggregate location is provided.
+RValue CodeGenFunction::EmitAnyExprToTemp(const Expr *E) {
+ AggValueSlot AggSlot = AggValueSlot::ignored();
+
+ if (hasAggregateLLVMType(E->getType()) &&
+ !E->getType()->isAnyComplexType())
+ AggSlot = CreateAggTemp(E->getType(), "agg.tmp");
+ return EmitAnyExpr(E, AggSlot);
+}
+
+/// EmitAnyExprToMem - Evaluate an expression into a given memory
+/// location.
+void CodeGenFunction::EmitAnyExprToMem(const Expr *E,
+ llvm::Value *Location,
+ Qualifiers Quals,
+ bool IsInit) {
+ // FIXME: This function should take an LValue as an argument.
+ if (E->getType()->isAnyComplexType()) {
+ EmitComplexExprIntoAddr(E, Location, Quals.hasVolatile());
+ } else if (hasAggregateLLVMType(E->getType())) {
+ CharUnits Alignment = getContext().getTypeAlignInChars(E->getType());
+ EmitAggExpr(E, AggValueSlot::forAddr(Location, Alignment, Quals,
+ AggValueSlot::IsDestructed_t(IsInit),
+ AggValueSlot::DoesNotNeedGCBarriers,
+ AggValueSlot::IsAliased_t(!IsInit)));
+ } else {
+ RValue RV = RValue::get(EmitScalarExpr(E, /*Ignore*/ false));
+ LValue LV = MakeAddrLValue(Location, E->getType());
+ EmitStoreThroughLValue(RV, LV);
+ }
+}
+
+namespace {
+/// \brief An adjustment to be made to the temporary created when emitting a
+/// reference binding, which accesses a particular subobject of that temporary.
+ struct SubobjectAdjustment {
+ enum { DerivedToBaseAdjustment, FieldAdjustment } Kind;
+
+ union {
+ struct {
+ const CastExpr *BasePath;
+ const CXXRecordDecl *DerivedClass;
+ } DerivedToBase;
+
+ FieldDecl *Field;
+ };
+
+ SubobjectAdjustment(const CastExpr *BasePath,
+ const CXXRecordDecl *DerivedClass)
+ : Kind(DerivedToBaseAdjustment) {
+ DerivedToBase.BasePath = BasePath;
+ DerivedToBase.DerivedClass = DerivedClass;
+ }
+
+ SubobjectAdjustment(FieldDecl *Field)
+ : Kind(FieldAdjustment) {
+ this->Field = Field;
+ }
+ };
+}
+
+static llvm::Value *
+CreateReferenceTemporary(CodeGenFunction &CGF, QualType Type,
+ const NamedDecl *InitializedDecl) {
+ if (const VarDecl *VD = dyn_cast_or_null<VarDecl>(InitializedDecl)) {
+ if (VD->hasGlobalStorage()) {
+ SmallString<256> Name;
+ llvm::raw_svector_ostream Out(Name);
+ CGF.CGM.getCXXABI().getMangleContext().mangleReferenceTemporary(VD, Out);
+ Out.flush();
+
+ llvm::Type *RefTempTy = CGF.ConvertTypeForMem(Type);
+
+ // Create the reference temporary.
+ llvm::GlobalValue *RefTemp =
+ new llvm::GlobalVariable(CGF.CGM.getModule(),
+ RefTempTy, /*isConstant=*/false,
+ llvm::GlobalValue::InternalLinkage,
+ llvm::Constant::getNullValue(RefTempTy),
+ Name.str());
+ return RefTemp;
+ }
+ }
+
+ return CGF.CreateMemTemp(Type, "ref.tmp");
+}
+
+static llvm::Value *
+EmitExprForReferenceBinding(CodeGenFunction &CGF, const Expr *E,
+ llvm::Value *&ReferenceTemporary,
+ const CXXDestructorDecl *&ReferenceTemporaryDtor,
+ QualType &ObjCARCReferenceLifetimeType,
+ const NamedDecl *InitializedDecl) {
+ // Look through single-element init lists that claim to be lvalues. They're
+ // just syntactic wrappers in this case.
+ if (const InitListExpr *ILE = dyn_cast<InitListExpr>(E)) {
+ if (ILE->getNumInits() == 1 && ILE->isGLValue())
+ E = ILE->getInit(0);
+ }
+
+ // Look through expressions for materialized temporaries (for now).
+ if (const MaterializeTemporaryExpr *M
+ = dyn_cast<MaterializeTemporaryExpr>(E)) {
+ // Objective-C++ ARC:
+ // If we are binding a reference to a temporary that has ownership, we
+ // need to perform retain/release operations on the temporary.
+ if (CGF.getContext().getLangOpts().ObjCAutoRefCount &&
+ E->getType()->isObjCLifetimeType() &&
+ (E->getType().getObjCLifetime() == Qualifiers::OCL_Strong ||
+ E->getType().getObjCLifetime() == Qualifiers::OCL_Weak ||
+ E->getType().getObjCLifetime() == Qualifiers::OCL_Autoreleasing))
+ ObjCARCReferenceLifetimeType = E->getType();
+
+ E = M->GetTemporaryExpr();
+ }
+
+ if (const CXXDefaultArgExpr *DAE = dyn_cast<CXXDefaultArgExpr>(E))
+ E = DAE->getExpr();
+
+ if (const ExprWithCleanups *EWC = dyn_cast<ExprWithCleanups>(E)) {
+ CGF.enterFullExpression(EWC);
+ CodeGenFunction::RunCleanupsScope Scope(CGF);
+
+ return EmitExprForReferenceBinding(CGF, EWC->getSubExpr(),
+ ReferenceTemporary,
+ ReferenceTemporaryDtor,
+ ObjCARCReferenceLifetimeType,
+ InitializedDecl);
+ }
+
+ RValue RV;
+ if (E->isGLValue()) {
+ // Emit the expression as an lvalue.
+ LValue LV = CGF.EmitLValue(E);
+
+ if (LV.isSimple())
+ return LV.getAddress();
+
+ // We have to load the lvalue.
+ RV = CGF.EmitLoadOfLValue(LV);
+ } else {
+ if (!ObjCARCReferenceLifetimeType.isNull()) {
+ ReferenceTemporary = CreateReferenceTemporary(CGF,
+ ObjCARCReferenceLifetimeType,
+ InitializedDecl);
+
+
+ LValue RefTempDst = CGF.MakeAddrLValue(ReferenceTemporary,
+ ObjCARCReferenceLifetimeType);
+
+ CGF.EmitScalarInit(E, dyn_cast_or_null<ValueDecl>(InitializedDecl),
+ RefTempDst, false);
+
+ bool ExtendsLifeOfTemporary = false;
+ if (const VarDecl *Var = dyn_cast_or_null<VarDecl>(InitializedDecl)) {
+ if (Var->extendsLifetimeOfTemporary())
+ ExtendsLifeOfTemporary = true;
+ } else if (InitializedDecl && isa<FieldDecl>(InitializedDecl)) {
+ ExtendsLifeOfTemporary = true;
+ }
+
+ if (!ExtendsLifeOfTemporary) {
+ // Since the lifetime of this temporary isn't going to be extended,
+ // we need to clean it up ourselves at the end of the full expression.
+ switch (ObjCARCReferenceLifetimeType.getObjCLifetime()) {
+ case Qualifiers::OCL_None:
+ case Qualifiers::OCL_ExplicitNone:
+ case Qualifiers::OCL_Autoreleasing:
+ break;
+
+ case Qualifiers::OCL_Strong: {
+ assert(!ObjCARCReferenceLifetimeType->isArrayType());
+ CleanupKind cleanupKind = CGF.getARCCleanupKind();
+ CGF.pushDestroy(cleanupKind,
+ ReferenceTemporary,
+ ObjCARCReferenceLifetimeType,
+ CodeGenFunction::destroyARCStrongImprecise,
+ cleanupKind & EHCleanup);
+ break;
+ }
+
+ case Qualifiers::OCL_Weak:
+ assert(!ObjCARCReferenceLifetimeType->isArrayType());
+ CGF.pushDestroy(NormalAndEHCleanup,
+ ReferenceTemporary,
+ ObjCARCReferenceLifetimeType,
+ CodeGenFunction::destroyARCWeak,
+ /*useEHCleanupForArray*/ true);
+ break;
+ }
+
+ ObjCARCReferenceLifetimeType = QualType();
+ }
+
+ return ReferenceTemporary;
+ }
+
+ SmallVector<SubobjectAdjustment, 2> Adjustments;
+ while (true) {
+ E = E->IgnoreParens();
+
+ if (const CastExpr *CE = dyn_cast<CastExpr>(E)) {
+ if ((CE->getCastKind() == CK_DerivedToBase ||
+ CE->getCastKind() == CK_UncheckedDerivedToBase) &&
+ E->getType()->isRecordType()) {
+ E = CE->getSubExpr();
+ CXXRecordDecl *Derived
+ = cast<CXXRecordDecl>(E->getType()->getAs<RecordType>()->getDecl());
+ Adjustments.push_back(SubobjectAdjustment(CE, Derived));
+ continue;
+ }
+
+ if (CE->getCastKind() == CK_NoOp) {
+ E = CE->getSubExpr();
+ continue;
+ }
+ } else if (const MemberExpr *ME = dyn_cast<MemberExpr>(E)) {
+ if (!ME->isArrow() && ME->getBase()->isRValue()) {
+ assert(ME->getBase()->getType()->isRecordType());
+ if (FieldDecl *Field = dyn_cast<FieldDecl>(ME->getMemberDecl())) {
+ E = ME->getBase();
+ Adjustments.push_back(SubobjectAdjustment(Field));
+ continue;
+ }
+ }
+ }
+
+ if (const OpaqueValueExpr *opaque = dyn_cast<OpaqueValueExpr>(E))
+ if (opaque->getType()->isRecordType())
+ return CGF.EmitOpaqueValueLValue(opaque).getAddress();
+
+ // Nothing changed.
+ break;
+ }
+
+ // Create a reference temporary if necessary.
+ AggValueSlot AggSlot = AggValueSlot::ignored();
+ if (CGF.hasAggregateLLVMType(E->getType()) &&
+ !E->getType()->isAnyComplexType()) {
+ ReferenceTemporary = CreateReferenceTemporary(CGF, E->getType(),
+ InitializedDecl);
+ CharUnits Alignment = CGF.getContext().getTypeAlignInChars(E->getType());
+ AggValueSlot::IsDestructed_t isDestructed
+ = AggValueSlot::IsDestructed_t(InitializedDecl != 0);
+ AggSlot = AggValueSlot::forAddr(ReferenceTemporary, Alignment,
+ Qualifiers(), isDestructed,
+ AggValueSlot::DoesNotNeedGCBarriers,
+ AggValueSlot::IsNotAliased);
+ }
+
+ if (InitializedDecl) {
+ // Get the destructor for the reference temporary.
+ if (const RecordType *RT = E->getType()->getAs<RecordType>()) {
+ CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(RT->getDecl());
+ if (!ClassDecl->hasTrivialDestructor())
+ ReferenceTemporaryDtor = ClassDecl->getDestructor();
+ }
+ }
+
+ RV = CGF.EmitAnyExpr(E, AggSlot);
+
+ // Check if need to perform derived-to-base casts and/or field accesses, to
+ // get from the temporary object we created (and, potentially, for which we
+ // extended the lifetime) to the subobject we're binding the reference to.
+ if (!Adjustments.empty()) {
+ llvm::Value *Object = RV.getAggregateAddr();
+ for (unsigned I = Adjustments.size(); I != 0; --I) {
+ SubobjectAdjustment &Adjustment = Adjustments[I-1];
+ switch (Adjustment.Kind) {
+ case SubobjectAdjustment::DerivedToBaseAdjustment:
+ Object =
+ CGF.GetAddressOfBaseClass(Object,
+ Adjustment.DerivedToBase.DerivedClass,
+ Adjustment.DerivedToBase.BasePath->path_begin(),
+ Adjustment.DerivedToBase.BasePath->path_end(),
+ /*NullCheckValue=*/false);
+ break;
+
+ case SubobjectAdjustment::FieldAdjustment: {
+ LValue LV = CGF.MakeAddrLValue(Object, E->getType());
+ LV = CGF.EmitLValueForField(LV, Adjustment.Field);
+ if (LV.isSimple()) {
+ Object = LV.getAddress();
+ break;
+ }
+
+ // For non-simple lvalues, we actually have to create a copy of
+ // the object we're binding to.
+ QualType T = Adjustment.Field->getType().getNonReferenceType()
+ .getUnqualifiedType();
+ Object = CreateReferenceTemporary(CGF, T, InitializedDecl);
+ LValue TempLV = CGF.MakeAddrLValue(Object,
+ Adjustment.Field->getType());
+ CGF.EmitStoreThroughLValue(CGF.EmitLoadOfLValue(LV), TempLV);
+ break;
+ }
+
+ }
+ }
+
+ return Object;
+ }
+ }
+
+ if (RV.isAggregate())
+ return RV.getAggregateAddr();
+
+ // Create a temporary variable that we can bind the reference to.
+ ReferenceTemporary = CreateReferenceTemporary(CGF, E->getType(),
+ InitializedDecl);
+
+
+ unsigned Alignment =
+ CGF.getContext().getTypeAlignInChars(E->getType()).getQuantity();
+ if (RV.isScalar())
+ CGF.EmitStoreOfScalar(RV.getScalarVal(), ReferenceTemporary,
+ /*Volatile=*/false, Alignment, E->getType());
+ else
+ CGF.StoreComplexToAddr(RV.getComplexVal(), ReferenceTemporary,
+ /*Volatile=*/false);
+ return ReferenceTemporary;
+}
+
+RValue
+CodeGenFunction::EmitReferenceBindingToExpr(const Expr *E,
+ const NamedDecl *InitializedDecl) {
+ llvm::Value *ReferenceTemporary = 0;
+ const CXXDestructorDecl *ReferenceTemporaryDtor = 0;
+ QualType ObjCARCReferenceLifetimeType;
+ llvm::Value *Value = EmitExprForReferenceBinding(*this, E, ReferenceTemporary,
+ ReferenceTemporaryDtor,
+ ObjCARCReferenceLifetimeType,
+ InitializedDecl);
+ if (!ReferenceTemporaryDtor && ObjCARCReferenceLifetimeType.isNull())
+ return RValue::get(Value);
+
+ // Make sure to call the destructor for the reference temporary.
+ const VarDecl *VD = dyn_cast_or_null<VarDecl>(InitializedDecl);
+ if (VD && VD->hasGlobalStorage()) {
+ if (ReferenceTemporaryDtor) {
+ llvm::Constant *DtorFn =
+ CGM.GetAddrOfCXXDestructor(ReferenceTemporaryDtor, Dtor_Complete);
+ EmitCXXGlobalDtorRegistration(DtorFn,
+ cast<llvm::Constant>(ReferenceTemporary));
+ } else {
+ assert(!ObjCARCReferenceLifetimeType.isNull());
+ // Note: We intentionally do not register a global "destructor" to
+ // release the object.
+ }
+
+ return RValue::get(Value);
+ }
+
+ if (ReferenceTemporaryDtor)
+ PushDestructorCleanup(ReferenceTemporaryDtor, ReferenceTemporary);
+ else {
+ switch (ObjCARCReferenceLifetimeType.getObjCLifetime()) {
+ case Qualifiers::OCL_None:
+ llvm_unreachable(
+ "Not a reference temporary that needs to be deallocated");
+ case Qualifiers::OCL_ExplicitNone:
+ case Qualifiers::OCL_Autoreleasing:
+ // Nothing to do.
+ break;
+
+ case Qualifiers::OCL_Strong: {
+ bool precise = VD && VD->hasAttr<ObjCPreciseLifetimeAttr>();
+ CleanupKind cleanupKind = getARCCleanupKind();
+ pushDestroy(cleanupKind, ReferenceTemporary, ObjCARCReferenceLifetimeType,
+ precise ? destroyARCStrongPrecise : destroyARCStrongImprecise,
+ cleanupKind & EHCleanup);
+ break;
+ }
+
+ case Qualifiers::OCL_Weak: {
+ // __weak objects always get EH cleanups; otherwise, exceptions
+ // could cause really nasty crashes instead of mere leaks.
+ pushDestroy(NormalAndEHCleanup, ReferenceTemporary,
+ ObjCARCReferenceLifetimeType, destroyARCWeak, true);
+ break;
+ }
+ }
+ }
+
+ return RValue::get(Value);
+}
+
+
+/// getAccessedFieldNo - Given an encoded value and a result number, return the
+/// input field number being accessed.
+unsigned CodeGenFunction::getAccessedFieldNo(unsigned Idx,
+ const llvm::Constant *Elts) {
+ return cast<llvm::ConstantInt>(Elts->getAggregateElement(Idx))
+ ->getZExtValue();
+}
+
+void CodeGenFunction::EmitCheck(llvm::Value *Address, unsigned Size) {
+ if (!CatchUndefined)
+ return;
+
+ // This needs to be to the standard address space.
+ Address = Builder.CreateBitCast(Address, Int8PtrTy);
+
+ llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::objectsize, IntPtrTy);
+
+ // In time, people may want to control this and use a 1 here.
+ llvm::Value *Arg = Builder.getFalse();
+ llvm::Value *C = Builder.CreateCall2(F, Address, Arg);
+ llvm::BasicBlock *Cont = createBasicBlock();
+ llvm::BasicBlock *Check = createBasicBlock();
+ llvm::Value *NegativeOne = llvm::ConstantInt::get(IntPtrTy, -1ULL);
+ Builder.CreateCondBr(Builder.CreateICmpEQ(C, NegativeOne), Cont, Check);
+
+ EmitBlock(Check);
+ Builder.CreateCondBr(Builder.CreateICmpUGE(C,
+ llvm::ConstantInt::get(IntPtrTy, Size)),
+ Cont, getTrapBB());
+ EmitBlock(Cont);
+}
+
+
+CodeGenFunction::ComplexPairTy CodeGenFunction::
+EmitComplexPrePostIncDec(const UnaryOperator *E, LValue LV,
+ bool isInc, bool isPre) {
+ ComplexPairTy InVal = LoadComplexFromAddr(LV.getAddress(),
+ LV.isVolatileQualified());
+
+ llvm::Value *NextVal;
+ if (isa<llvm::IntegerType>(InVal.first->getType())) {
+ uint64_t AmountVal = isInc ? 1 : -1;
+ NextVal = llvm::ConstantInt::get(InVal.first->getType(), AmountVal, true);
+
+ // Add the inc/dec to the real part.
+ NextVal = Builder.CreateAdd(InVal.first, NextVal, isInc ? "inc" : "dec");
+ } else {
+ QualType ElemTy = E->getType()->getAs<ComplexType>()->getElementType();
+ llvm::APFloat FVal(getContext().getFloatTypeSemantics(ElemTy), 1);
+ if (!isInc)
+ FVal.changeSign();
+ NextVal = llvm::ConstantFP::get(getLLVMContext(), FVal);
+
+ // Add the inc/dec to the real part.
+ NextVal = Builder.CreateFAdd(InVal.first, NextVal, isInc ? "inc" : "dec");
+ }
+
+ ComplexPairTy IncVal(NextVal, InVal.second);
+
+ // Store the updated result through the lvalue.
+ StoreComplexToAddr(IncVal, LV.getAddress(), LV.isVolatileQualified());
+
+ // If this is a postinc, return the value read from memory, otherwise use the
+ // updated value.
+ return isPre ? IncVal : InVal;
+}
+
+
+//===----------------------------------------------------------------------===//
+// LValue Expression Emission
+//===----------------------------------------------------------------------===//
+
+RValue CodeGenFunction::GetUndefRValue(QualType Ty) {
+ if (Ty->isVoidType())
+ return RValue::get(0);
+
+ if (const ComplexType *CTy = Ty->getAs<ComplexType>()) {
+ llvm::Type *EltTy = ConvertType(CTy->getElementType());
+ llvm::Value *U = llvm::UndefValue::get(EltTy);
+ return RValue::getComplex(std::make_pair(U, U));
+ }
+
+ // If this is a use of an undefined aggregate type, the aggregate must have an
+ // identifiable address. Just because the contents of the value are undefined
+ // doesn't mean that the address can't be taken and compared.
+ if (hasAggregateLLVMType(Ty)) {
+ llvm::Value *DestPtr = CreateMemTemp(Ty, "undef.agg.tmp");
+ return RValue::getAggregate(DestPtr);
+ }
+
+ return RValue::get(llvm::UndefValue::get(ConvertType(Ty)));
+}
+
+RValue CodeGenFunction::EmitUnsupportedRValue(const Expr *E,
+ const char *Name) {
+ ErrorUnsupported(E, Name);
+ return GetUndefRValue(E->getType());
+}
+
+LValue CodeGenFunction::EmitUnsupportedLValue(const Expr *E,
+ const char *Name) {
+ ErrorUnsupported(E, Name);
+ llvm::Type *Ty = llvm::PointerType::getUnqual(ConvertType(E->getType()));
+ return MakeAddrLValue(llvm::UndefValue::get(Ty), E->getType());
+}
+
+LValue CodeGenFunction::EmitCheckedLValue(const Expr *E) {
+ LValue LV = EmitLValue(E);
+ if (!isa<DeclRefExpr>(E) && !LV.isBitField() && LV.isSimple())
+ EmitCheck(LV.getAddress(),
+ getContext().getTypeSizeInChars(E->getType()).getQuantity());
+ return LV;
+}
+
+/// EmitLValue - Emit code to compute a designator that specifies the location
+/// of the expression.
+///
+/// This can return one of two things: a simple address or a bitfield reference.
+/// In either case, the LLVM Value* in the LValue structure is guaranteed to be
+/// an LLVM pointer type.
+///
+/// If this returns a bitfield reference, nothing about the pointee type of the
+/// LLVM value is known: For example, it may not be a pointer to an integer.
+///
+/// If this returns a normal address, and if the lvalue's C type is fixed size,
+/// this method guarantees that the returned pointer type will point to an LLVM
+/// type of the same size of the lvalue's type. If the lvalue has a variable
+/// length type, this is not possible.
+///
+LValue CodeGenFunction::EmitLValue(const Expr *E) {
+ switch (E->getStmtClass()) {
+ default: return EmitUnsupportedLValue(E, "l-value expression");
+
+ case Expr::ObjCPropertyRefExprClass:
+ llvm_unreachable("cannot emit a property reference directly");
+
+ case Expr::ObjCSelectorExprClass:
+ return EmitObjCSelectorLValue(cast<ObjCSelectorExpr>(E));
+ case Expr::ObjCIsaExprClass:
+ return EmitObjCIsaExpr(cast<ObjCIsaExpr>(E));
+ case Expr::BinaryOperatorClass:
+ return EmitBinaryOperatorLValue(cast<BinaryOperator>(E));
+ case Expr::CompoundAssignOperatorClass:
+ if (!E->getType()->isAnyComplexType())
+ return EmitCompoundAssignmentLValue(cast<CompoundAssignOperator>(E));
+ return EmitComplexCompoundAssignmentLValue(cast<CompoundAssignOperator>(E));
+ case Expr::CallExprClass:
+ case Expr::CXXMemberCallExprClass:
+ case Expr::CXXOperatorCallExprClass:
+ case Expr::UserDefinedLiteralClass:
+ return EmitCallExprLValue(cast<CallExpr>(E));
+ case Expr::VAArgExprClass:
+ return EmitVAArgExprLValue(cast<VAArgExpr>(E));
+ case Expr::DeclRefExprClass:
+ return EmitDeclRefLValue(cast<DeclRefExpr>(E));
+ case Expr::ParenExprClass:
+ return EmitLValue(cast<ParenExpr>(E)->getSubExpr());
+ case Expr::GenericSelectionExprClass:
+ return EmitLValue(cast<GenericSelectionExpr>(E)->getResultExpr());
+ case Expr::PredefinedExprClass:
+ return EmitPredefinedLValue(cast<PredefinedExpr>(E));
+ case Expr::StringLiteralClass:
+ return EmitStringLiteralLValue(cast<StringLiteral>(E));
+ case Expr::ObjCEncodeExprClass:
+ return EmitObjCEncodeExprLValue(cast<ObjCEncodeExpr>(E));
+ case Expr::PseudoObjectExprClass:
+ return EmitPseudoObjectLValue(cast<PseudoObjectExpr>(E));
+ case Expr::InitListExprClass:
+ assert(cast<InitListExpr>(E)->getNumInits() == 1 &&
+ "Only single-element init list can be lvalue.");
+ return EmitLValue(cast<InitListExpr>(E)->getInit(0));
+
+ case Expr::CXXTemporaryObjectExprClass:
+ case Expr::CXXConstructExprClass:
+ return EmitCXXConstructLValue(cast<CXXConstructExpr>(E));
+ case Expr::CXXBindTemporaryExprClass:
+ return EmitCXXBindTemporaryLValue(cast<CXXBindTemporaryExpr>(E));
+ case Expr::LambdaExprClass:
+ return EmitLambdaLValue(cast<LambdaExpr>(E));
+
+ case Expr::ExprWithCleanupsClass: {
+ const ExprWithCleanups *cleanups = cast<ExprWithCleanups>(E);
+ enterFullExpression(cleanups);
+ RunCleanupsScope Scope(*this);
+ return EmitLValue(cleanups->getSubExpr());
+ }
+
+ case Expr::CXXScalarValueInitExprClass:
+ return EmitNullInitializationLValue(cast<CXXScalarValueInitExpr>(E));
+ case Expr::CXXDefaultArgExprClass:
+ return EmitLValue(cast<CXXDefaultArgExpr>(E)->getExpr());
+ case Expr::CXXTypeidExprClass:
+ return EmitCXXTypeidLValue(cast<CXXTypeidExpr>(E));
+
+ case Expr::ObjCMessageExprClass:
+ return EmitObjCMessageExprLValue(cast<ObjCMessageExpr>(E));
+ case Expr::ObjCIvarRefExprClass:
+ return EmitObjCIvarRefLValue(cast<ObjCIvarRefExpr>(E));
+ case Expr::StmtExprClass:
+ return EmitStmtExprLValue(cast<StmtExpr>(E));
+ case Expr::UnaryOperatorClass:
+ return EmitUnaryOpLValue(cast<UnaryOperator>(E));
+ case Expr::ArraySubscriptExprClass:
+ return EmitArraySubscriptExpr(cast<ArraySubscriptExpr>(E));
+ case Expr::ExtVectorElementExprClass:
+ return EmitExtVectorElementExpr(cast<ExtVectorElementExpr>(E));
+ case Expr::MemberExprClass:
+ return EmitMemberExpr(cast<MemberExpr>(E));
+ case Expr::CompoundLiteralExprClass:
+ return EmitCompoundLiteralLValue(cast<CompoundLiteralExpr>(E));
+ case Expr::ConditionalOperatorClass:
+ return EmitConditionalOperatorLValue(cast<ConditionalOperator>(E));
+ case Expr::BinaryConditionalOperatorClass:
+ return EmitConditionalOperatorLValue(cast<BinaryConditionalOperator>(E));
+ case Expr::ChooseExprClass:
+ return EmitLValue(cast<ChooseExpr>(E)->getChosenSubExpr(getContext()));
+ case Expr::OpaqueValueExprClass:
+ return EmitOpaqueValueLValue(cast<OpaqueValueExpr>(E));
+ case Expr::SubstNonTypeTemplateParmExprClass:
+ return EmitLValue(cast<SubstNonTypeTemplateParmExpr>(E)->getReplacement());
+ case Expr::ImplicitCastExprClass:
+ case Expr::CStyleCastExprClass:
+ case Expr::CXXFunctionalCastExprClass:
+ case Expr::CXXStaticCastExprClass:
+ case Expr::CXXDynamicCastExprClass:
+ case Expr::CXXReinterpretCastExprClass:
+ case Expr::CXXConstCastExprClass:
+ case Expr::ObjCBridgedCastExprClass:
+ return EmitCastLValue(cast<CastExpr>(E));
+
+ case Expr::MaterializeTemporaryExprClass:
+ return EmitMaterializeTemporaryExpr(cast<MaterializeTemporaryExpr>(E));
+ }
+}
+
+/// Given an object of the given canonical type, can we safely copy a
+/// value out of it based on its initializer?
+static bool isConstantEmittableObjectType(QualType type) {
+ assert(type.isCanonical());
+ assert(!type->isReferenceType());
+
+ // Must be const-qualified but non-volatile.
+ Qualifiers qs = type.getLocalQualifiers();
+ if (!qs.hasConst() || qs.hasVolatile()) return false;
+
+ // Otherwise, all object types satisfy this except C++ classes with
+ // mutable subobjects or non-trivial copy/destroy behavior.
+ if (const RecordType *RT = dyn_cast<RecordType>(type))
+ if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl()))
+ if (RD->hasMutableFields() || !RD->isTrivial())
+ return false;
+
+ return true;
+}
+
+/// Can we constant-emit a load of a reference to a variable of the
+/// given type? This is different from predicates like
+/// Decl::isUsableInConstantExpressions because we do want it to apply
+/// in situations that don't necessarily satisfy the language's rules
+/// for this (e.g. C++'s ODR-use rules). For example, we want to able
+/// to do this with const float variables even if those variables
+/// aren't marked 'constexpr'.
+enum ConstantEmissionKind {
+ CEK_None,
+ CEK_AsReferenceOnly,
+ CEK_AsValueOrReference,
+ CEK_AsValueOnly
+};
+static ConstantEmissionKind checkVarTypeForConstantEmission(QualType type) {
+ type = type.getCanonicalType();
+ if (const ReferenceType *ref = dyn_cast<ReferenceType>(type)) {
+ if (isConstantEmittableObjectType(ref->getPointeeType()))
+ return CEK_AsValueOrReference;
+ return CEK_AsReferenceOnly;
+ }
+ if (isConstantEmittableObjectType(type))
+ return CEK_AsValueOnly;
+ return CEK_None;
+}
+
+/// Try to emit a reference to the given value without producing it as
+/// an l-value. This is actually more than an optimization: we can't
+/// produce an l-value for variables that we never actually captured
+/// in a block or lambda, which means const int variables or constexpr
+/// literals or similar.
+CodeGenFunction::ConstantEmission
+CodeGenFunction::tryEmitAsConstant(DeclRefExpr *refExpr) {
+ ValueDecl *value = refExpr->getDecl();
+
+ // The value needs to be an enum constant or a constant variable.
+ ConstantEmissionKind CEK;
+ if (isa<ParmVarDecl>(value)) {
+ CEK = CEK_None;
+ } else if (VarDecl *var = dyn_cast<VarDecl>(value)) {
+ CEK = checkVarTypeForConstantEmission(var->getType());
+ } else if (isa<EnumConstantDecl>(value)) {
+ CEK = CEK_AsValueOnly;
+ } else {
+ CEK = CEK_None;
+ }
+ if (CEK == CEK_None) return ConstantEmission();
+
+ Expr::EvalResult result;
+ bool resultIsReference;
+ QualType resultType;
+
+ // It's best to evaluate all the way as an r-value if that's permitted.
+ if (CEK != CEK_AsReferenceOnly &&
+ refExpr->EvaluateAsRValue(result, getContext())) {
+ resultIsReference = false;
+ resultType = refExpr->getType();
+
+ // Otherwise, try to evaluate as an l-value.
+ } else if (CEK != CEK_AsValueOnly &&
+ refExpr->EvaluateAsLValue(result, getContext())) {
+ resultIsReference = true;
+ resultType = value->getType();
+
+ // Failure.
+ } else {
+ return ConstantEmission();
+ }
+
+ // In any case, if the initializer has side-effects, abandon ship.
+ if (result.HasSideEffects)
+ return ConstantEmission();
+
+ // Emit as a constant.
+ llvm::Constant *C = CGM.EmitConstantValue(result.Val, resultType, this);
+
+ // Make sure we emit a debug reference to the global variable.
+ // This should probably fire even for
+ if (isa<VarDecl>(value)) {
+ if (!getContext().DeclMustBeEmitted(cast<VarDecl>(value)))
+ EmitDeclRefExprDbgValue(refExpr, C);
+ } else {
+ assert(isa<EnumConstantDecl>(value));
+ EmitDeclRefExprDbgValue(refExpr, C);
+ }
+
+ // If we emitted a reference constant, we need to dereference that.
+ if (resultIsReference)
+ return ConstantEmission::forReference(C);
+
+ return ConstantEmission::forValue(C);
+}
+
+llvm::Value *CodeGenFunction::EmitLoadOfScalar(LValue lvalue) {
+ return EmitLoadOfScalar(lvalue.getAddress(), lvalue.isVolatile(),
+ lvalue.getAlignment().getQuantity(),
+ lvalue.getType(), lvalue.getTBAAInfo());
+}
+
+static bool hasBooleanRepresentation(QualType Ty) {
+ if (Ty->isBooleanType())
+ return true;
+
+ if (const EnumType *ET = Ty->getAs<EnumType>())
+ return ET->getDecl()->getIntegerType()->isBooleanType();
+
+ if (const AtomicType *AT = Ty->getAs<AtomicType>())
+ return hasBooleanRepresentation(AT->getValueType());
+
+ return false;
+}
+
+llvm::MDNode *CodeGenFunction::getRangeForLoadFromType(QualType Ty) {
+ const EnumType *ET = Ty->getAs<EnumType>();
+ bool IsRegularCPlusPlusEnum = (getLangOpts().CPlusPlus && ET &&
+ CGM.getCodeGenOpts().StrictEnums &&
+ !ET->getDecl()->isFixed());
+ bool IsBool = hasBooleanRepresentation(Ty);
+ llvm::Type *LTy;
+ if (!IsBool && !IsRegularCPlusPlusEnum)
+ return NULL;
+
+ llvm::APInt Min;
+ llvm::APInt End;
+ if (IsBool) {
+ Min = llvm::APInt(8, 0);
+ End = llvm::APInt(8, 2);
+ LTy = Int8Ty;
+ } else {
+ const EnumDecl *ED = ET->getDecl();
+ LTy = ConvertTypeForMem(ED->getIntegerType());
+ unsigned Bitwidth = LTy->getScalarSizeInBits();
+ unsigned NumNegativeBits = ED->getNumNegativeBits();
+ unsigned NumPositiveBits = ED->getNumPositiveBits();
+
+ if (NumNegativeBits) {
+ unsigned NumBits = std::max(NumNegativeBits, NumPositiveBits + 1);
+ assert(NumBits <= Bitwidth);
+ End = llvm::APInt(Bitwidth, 1) << (NumBits - 1);
+ Min = -End;
+ } else {
+ assert(NumPositiveBits <= Bitwidth);
+ End = llvm::APInt(Bitwidth, 1) << NumPositiveBits;
+ Min = llvm::APInt(Bitwidth, 0);
+ }
+ }
+
+ llvm::MDBuilder MDHelper(getLLVMContext());
+ return MDHelper.createRange(Min, End);
+}
+
+llvm::Value *CodeGenFunction::EmitLoadOfScalar(llvm::Value *Addr, bool Volatile,
+ unsigned Alignment, QualType Ty,
+ llvm::MDNode *TBAAInfo) {
+ llvm::LoadInst *Load = Builder.CreateLoad(Addr);
+ if (Volatile)
+ Load->setVolatile(true);
+ if (Alignment)
+ Load->setAlignment(Alignment);
+ if (TBAAInfo)
+ CGM.DecorateInstruction(Load, TBAAInfo);
+ // If this is an atomic type, all normal reads must be atomic
+ if (Ty->isAtomicType())
+ Load->setAtomic(llvm::SequentiallyConsistent);
+
+ if (CGM.getCodeGenOpts().OptimizationLevel > 0)
+ if (llvm::MDNode *RangeInfo = getRangeForLoadFromType(Ty))
+ Load->setMetadata(llvm::LLVMContext::MD_range, RangeInfo);
+
+ return EmitFromMemory(Load, Ty);
+}
+
+llvm::Value *CodeGenFunction::EmitToMemory(llvm::Value *Value, QualType Ty) {
+ // Bool has a different representation in memory than in registers.
+ if (hasBooleanRepresentation(Ty)) {
+ // This should really always be an i1, but sometimes it's already
+ // an i8, and it's awkward to track those cases down.
+ if (Value->getType()->isIntegerTy(1))
+ return Builder.CreateZExt(Value, Builder.getInt8Ty(), "frombool");
+ assert(Value->getType()->isIntegerTy(8) && "value rep of bool not i1/i8");
+ }
+
+ return Value;
+}
+
+llvm::Value *CodeGenFunction::EmitFromMemory(llvm::Value *Value, QualType Ty) {
+ // Bool has a different representation in memory than in registers.
+ if (hasBooleanRepresentation(Ty)) {
+ assert(Value->getType()->isIntegerTy(8) && "memory rep of bool not i8");
+ return Builder.CreateTrunc(Value, Builder.getInt1Ty(), "tobool");
+ }
+
+ return Value;
+}
+
+void CodeGenFunction::EmitStoreOfScalar(llvm::Value *Value, llvm::Value *Addr,
+ bool Volatile, unsigned Alignment,
+ QualType Ty,
+ llvm::MDNode *TBAAInfo,
+ bool isInit) {
+ Value = EmitToMemory(Value, Ty);
+
+ llvm::StoreInst *Store = Builder.CreateStore(Value, Addr, Volatile);
+ if (Alignment)
+ Store->setAlignment(Alignment);
+ if (TBAAInfo)
+ CGM.DecorateInstruction(Store, TBAAInfo);
+ if (!isInit && Ty->isAtomicType())
+ Store->setAtomic(llvm::SequentiallyConsistent);
+}
+
+void CodeGenFunction::EmitStoreOfScalar(llvm::Value *value, LValue lvalue,
+ bool isInit) {
+ EmitStoreOfScalar(value, lvalue.getAddress(), lvalue.isVolatile(),
+ lvalue.getAlignment().getQuantity(), lvalue.getType(),
+ lvalue.getTBAAInfo(), isInit);
+}
+
+/// EmitLoadOfLValue - Given an expression that represents a value lvalue, this
+/// method emits the address of the lvalue, then loads the result as an rvalue,
+/// returning the rvalue.
+RValue CodeGenFunction::EmitLoadOfLValue(LValue LV) {
+ if (LV.isObjCWeak()) {
+ // load of a __weak object.
+ llvm::Value *AddrWeakObj = LV.getAddress();
+ return RValue::get(CGM.getObjCRuntime().EmitObjCWeakRead(*this,
+ AddrWeakObj));
+ }
+ if (LV.getQuals().getObjCLifetime() == Qualifiers::OCL_Weak)
+ return RValue::get(EmitARCLoadWeak(LV.getAddress()));
+
+ if (LV.isSimple()) {
+ assert(!LV.getType()->isFunctionType());
+
+ // Everything needs a load.
+ return RValue::get(EmitLoadOfScalar(LV));
+ }
+
+ if (LV.isVectorElt()) {
+ llvm::LoadInst *Load = Builder.CreateLoad(LV.getVectorAddr(),
+ LV.isVolatileQualified());
+ Load->setAlignment(LV.getAlignment().getQuantity());
+ return RValue::get(Builder.CreateExtractElement(Load, LV.getVectorIdx(),
+ "vecext"));
+ }
+
+ // If this is a reference to a subset of the elements of a vector, either
+ // shuffle the input or extract/insert them as appropriate.
+ if (LV.isExtVectorElt())
+ return EmitLoadOfExtVectorElementLValue(LV);
+
+ assert(LV.isBitField() && "Unknown LValue type!");
+ return EmitLoadOfBitfieldLValue(LV);
+}
+
+RValue CodeGenFunction::EmitLoadOfBitfieldLValue(LValue LV) {
+ const CGBitFieldInfo &Info = LV.getBitFieldInfo();
+
+ // Get the output type.
+ llvm::Type *ResLTy = ConvertType(LV.getType());
+ unsigned ResSizeInBits = CGM.getTargetData().getTypeSizeInBits(ResLTy);
+
+ // Compute the result as an OR of all of the individual component accesses.
+ llvm::Value *Res = 0;
+ for (unsigned i = 0, e = Info.getNumComponents(); i != e; ++i) {
+ const CGBitFieldInfo::AccessInfo &AI = Info.getComponent(i);
+
+ // Get the field pointer.
+ llvm::Value *Ptr = LV.getBitFieldBaseAddr();
+
+ // Only offset by the field index if used, so that incoming values are not
+ // required to be structures.
+ if (AI.FieldIndex)
+ Ptr = Builder.CreateStructGEP(Ptr, AI.FieldIndex, "bf.field");
+
+ // Offset by the byte offset, if used.
+ if (!AI.FieldByteOffset.isZero()) {
+ Ptr = EmitCastToVoidPtr(Ptr);
+ Ptr = Builder.CreateConstGEP1_32(Ptr, AI.FieldByteOffset.getQuantity(),
+ "bf.field.offs");
+ }
+
+ // Cast to the access type.
+ llvm::Type *PTy = llvm::Type::getIntNPtrTy(getLLVMContext(), AI.AccessWidth,
+ CGM.getContext().getTargetAddressSpace(LV.getType()));
+ Ptr = Builder.CreateBitCast(Ptr, PTy);
+
+ // Perform the load.
+ llvm::LoadInst *Load = Builder.CreateLoad(Ptr, LV.isVolatileQualified());
+ if (!AI.AccessAlignment.isZero())
+ Load->setAlignment(AI.AccessAlignment.getQuantity());
+
+ // Shift out unused low bits and mask out unused high bits.
+ llvm::Value *Val = Load;
+ if (AI.FieldBitStart)
+ Val = Builder.CreateLShr(Load, AI.FieldBitStart);
+ Val = Builder.CreateAnd(Val, llvm::APInt::getLowBitsSet(AI.AccessWidth,
+ AI.TargetBitWidth),
+ "bf.clear");
+
+ // Extend or truncate to the target size.
+ if (AI.AccessWidth < ResSizeInBits)
+ Val = Builder.CreateZExt(Val, ResLTy);
+ else if (AI.AccessWidth > ResSizeInBits)
+ Val = Builder.CreateTrunc(Val, ResLTy);
+
+ // Shift into place, and OR into the result.
+ if (AI.TargetBitOffset)
+ Val = Builder.CreateShl(Val, AI.TargetBitOffset);
+ Res = Res ? Builder.CreateOr(Res, Val) : Val;
+ }
+
+ // If the bit-field is signed, perform the sign-extension.
+ //
+ // FIXME: This can easily be folded into the load of the high bits, which
+ // could also eliminate the mask of high bits in some situations.
+ if (Info.isSigned()) {
+ unsigned ExtraBits = ResSizeInBits - Info.getSize();
+ if (ExtraBits)
+ Res = Builder.CreateAShr(Builder.CreateShl(Res, ExtraBits),
+ ExtraBits, "bf.val.sext");
+ }
+
+ return RValue::get(Res);
+}
+
+// If this is a reference to a subset of the elements of a vector, create an
+// appropriate shufflevector.
+RValue CodeGenFunction::EmitLoadOfExtVectorElementLValue(LValue LV) {
+ llvm::LoadInst *Load = Builder.CreateLoad(LV.getExtVectorAddr(),
+ LV.isVolatileQualified());
+ Load->setAlignment(LV.getAlignment().getQuantity());
+ llvm::Value *Vec = Load;
+
+ const llvm::Constant *Elts = LV.getExtVectorElts();
+
+ // If the result of the expression is a non-vector type, we must be extracting
+ // a single element. Just codegen as an extractelement.
+ const VectorType *ExprVT = LV.getType()->getAs<VectorType>();
+ if (!ExprVT) {
+ unsigned InIdx = getAccessedFieldNo(0, Elts);
+ llvm::Value *Elt = llvm::ConstantInt::get(Int32Ty, InIdx);
+ return RValue::get(Builder.CreateExtractElement(Vec, Elt));
+ }
+
+ // Always use shuffle vector to try to retain the original program structure
+ unsigned NumResultElts = ExprVT->getNumElements();
+
+ SmallVector<llvm::Constant*, 4> Mask;
+ for (unsigned i = 0; i != NumResultElts; ++i)
+ Mask.push_back(Builder.getInt32(getAccessedFieldNo(i, Elts)));
+
+ llvm::Value *MaskV = llvm::ConstantVector::get(Mask);
+ Vec = Builder.CreateShuffleVector(Vec, llvm::UndefValue::get(Vec->getType()),
+ MaskV);
+ return RValue::get(Vec);
+}
+
+
+
+/// EmitStoreThroughLValue - Store the specified rvalue into the specified
+/// lvalue, where both are guaranteed to the have the same type, and that type
+/// is 'Ty'.
+void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit) {
+ if (!Dst.isSimple()) {
+ if (Dst.isVectorElt()) {
+ // Read/modify/write the vector, inserting the new element.
+ llvm::LoadInst *Load = Builder.CreateLoad(Dst.getVectorAddr(),
+ Dst.isVolatileQualified());
+ Load->setAlignment(Dst.getAlignment().getQuantity());
+ llvm::Value *Vec = Load;
+ Vec = Builder.CreateInsertElement(Vec, Src.getScalarVal(),
+ Dst.getVectorIdx(), "vecins");
+ llvm::StoreInst *Store = Builder.CreateStore(Vec, Dst.getVectorAddr(),
+ Dst.isVolatileQualified());
+ Store->setAlignment(Dst.getAlignment().getQuantity());
+ return;
+ }
+
+ // If this is an update of extended vector elements, insert them as
+ // appropriate.
+ if (Dst.isExtVectorElt())
+ return EmitStoreThroughExtVectorComponentLValue(Src, Dst);
+
+ assert(Dst.isBitField() && "Unknown LValue type");
+ return EmitStoreThroughBitfieldLValue(Src, Dst);
+ }
+
+ // There's special magic for assigning into an ARC-qualified l-value.
+ if (Qualifiers::ObjCLifetime Lifetime = Dst.getQuals().getObjCLifetime()) {
+ switch (Lifetime) {
+ case Qualifiers::OCL_None:
+ llvm_unreachable("present but none");
+
+ case Qualifiers::OCL_ExplicitNone:
+ // nothing special
+ break;
+
+ case Qualifiers::OCL_Strong:
+ EmitARCStoreStrong(Dst, Src.getScalarVal(), /*ignore*/ true);
+ return;
+
+ case Qualifiers::OCL_Weak:
+ EmitARCStoreWeak(Dst.getAddress(), Src.getScalarVal(), /*ignore*/ true);
+ return;
+
+ case Qualifiers::OCL_Autoreleasing:
+ Src = RValue::get(EmitObjCExtendObjectLifetime(Dst.getType(),
+ Src.getScalarVal()));
+ // fall into the normal path
+ break;
+ }
+ }
+
+ if (Dst.isObjCWeak() && !Dst.isNonGC()) {
+ // load of a __weak object.
+ llvm::Value *LvalueDst = Dst.getAddress();
+ llvm::Value *src = Src.getScalarVal();
+ CGM.getObjCRuntime().EmitObjCWeakAssign(*this, src, LvalueDst);
+ return;
+ }
+
+ if (Dst.isObjCStrong() && !Dst.isNonGC()) {
+ // load of a __strong object.
+ llvm::Value *LvalueDst = Dst.getAddress();
+ llvm::Value *src = Src.getScalarVal();
+ if (Dst.isObjCIvar()) {
+ assert(Dst.getBaseIvarExp() && "BaseIvarExp is NULL");
+ llvm::Type *ResultType = ConvertType(getContext().LongTy);
+ llvm::Value *RHS = EmitScalarExpr(Dst.getBaseIvarExp());
+ llvm::Value *dst = RHS;
+ RHS = Builder.CreatePtrToInt(RHS, ResultType, "sub.ptr.rhs.cast");
+ llvm::Value *LHS =
+ Builder.CreatePtrToInt(LvalueDst, ResultType, "sub.ptr.lhs.cast");
+ llvm::Value *BytesBetween = Builder.CreateSub(LHS, RHS, "ivar.offset");
+ CGM.getObjCRuntime().EmitObjCIvarAssign(*this, src, dst,
+ BytesBetween);
+ } else if (Dst.isGlobalObjCRef()) {
+ CGM.getObjCRuntime().EmitObjCGlobalAssign(*this, src, LvalueDst,
+ Dst.isThreadLocalRef());
+ }
+ else
+ CGM.getObjCRuntime().EmitObjCStrongCastAssign(*this, src, LvalueDst);
+ return;
+ }
+
+ assert(Src.isScalar() && "Can't emit an agg store with this method");
+ EmitStoreOfScalar(Src.getScalarVal(), Dst, isInit);
+}
+
+void CodeGenFunction::EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst,
+ llvm::Value **Result) {
+ const CGBitFieldInfo &Info = Dst.getBitFieldInfo();
+
+ // Get the output type.
+ llvm::Type *ResLTy = ConvertTypeForMem(Dst.getType());
+ unsigned ResSizeInBits = CGM.getTargetData().getTypeSizeInBits(ResLTy);
+
+ // Get the source value, truncated to the width of the bit-field.
+ llvm::Value *SrcVal = Src.getScalarVal();
+
+ if (hasBooleanRepresentation(Dst.getType()))
+ SrcVal = Builder.CreateIntCast(SrcVal, ResLTy, /*IsSigned=*/false);
+
+ SrcVal = Builder.CreateAnd(SrcVal, llvm::APInt::getLowBitsSet(ResSizeInBits,
+ Info.getSize()),
+ "bf.value");
+
+ // Return the new value of the bit-field, if requested.
+ if (Result) {
+ // Cast back to the proper type for result.
+ llvm::Type *SrcTy = Src.getScalarVal()->getType();
+ llvm::Value *ReloadVal = Builder.CreateIntCast(SrcVal, SrcTy, false,
+ "bf.reload.val");
+
+ // Sign extend if necessary.
+ if (Info.isSigned()) {
+ unsigned ExtraBits = ResSizeInBits - Info.getSize();
+ if (ExtraBits)
+ ReloadVal = Builder.CreateAShr(Builder.CreateShl(ReloadVal, ExtraBits),
+ ExtraBits, "bf.reload.sext");
+ }
+
+ *Result = ReloadVal;
+ }
+
+ // Iterate over the components, writing each piece to memory.
+ for (unsigned i = 0, e = Info.getNumComponents(); i != e; ++i) {
+ const CGBitFieldInfo::AccessInfo &AI = Info.getComponent(i);
+
+ // Get the field pointer.
+ llvm::Value *Ptr = Dst.getBitFieldBaseAddr();
+ unsigned addressSpace =
+ cast<llvm::PointerType>(Ptr->getType())->getAddressSpace();
+
+ // Only offset by the field index if used, so that incoming values are not
+ // required to be structures.
+ if (AI.FieldIndex)
+ Ptr = Builder.CreateStructGEP(Ptr, AI.FieldIndex, "bf.field");
+
+ // Offset by the byte offset, if used.
+ if (!AI.FieldByteOffset.isZero()) {
+ Ptr = EmitCastToVoidPtr(Ptr);
+ Ptr = Builder.CreateConstGEP1_32(Ptr, AI.FieldByteOffset.getQuantity(),
+ "bf.field.offs");
+ }
+
+ // Cast to the access type.
+ llvm::Type *AccessLTy =
+ llvm::Type::getIntNTy(getLLVMContext(), AI.AccessWidth);
+
+ llvm::Type *PTy = AccessLTy->getPointerTo(addressSpace);
+ Ptr = Builder.CreateBitCast(Ptr, PTy);
+
+ // Extract the piece of the bit-field value to write in this access, limited
+ // to the values that are part of this access.
+ llvm::Value *Val = SrcVal;
+ if (AI.TargetBitOffset)
+ Val = Builder.CreateLShr(Val, AI.TargetBitOffset);
+ Val = Builder.CreateAnd(Val, llvm::APInt::getLowBitsSet(ResSizeInBits,
+ AI.TargetBitWidth));
+
+ // Extend or truncate to the access size.
+ if (ResSizeInBits < AI.AccessWidth)
+ Val = Builder.CreateZExt(Val, AccessLTy);
+ else if (ResSizeInBits > AI.AccessWidth)
+ Val = Builder.CreateTrunc(Val, AccessLTy);
+
+ // Shift into the position in memory.
+ if (AI.FieldBitStart)
+ Val = Builder.CreateShl(Val, AI.FieldBitStart);
+
+ // If necessary, load and OR in bits that are outside of the bit-field.
+ if (AI.TargetBitWidth != AI.AccessWidth) {
+ llvm::LoadInst *Load = Builder.CreateLoad(Ptr, Dst.isVolatileQualified());
+ if (!AI.AccessAlignment.isZero())
+ Load->setAlignment(AI.AccessAlignment.getQuantity());
+
+ // Compute the mask for zeroing the bits that are part of the bit-field.
+ llvm::APInt InvMask =
+ ~llvm::APInt::getBitsSet(AI.AccessWidth, AI.FieldBitStart,
+ AI.FieldBitStart + AI.TargetBitWidth);
+
+ // Apply the mask and OR in to the value to write.
+ Val = Builder.CreateOr(Builder.CreateAnd(Load, InvMask), Val);
+ }
+
+ // Write the value.
+ llvm::StoreInst *Store = Builder.CreateStore(Val, Ptr,
+ Dst.isVolatileQualified());
+ if (!AI.AccessAlignment.isZero())
+ Store->setAlignment(AI.AccessAlignment.getQuantity());
+ }
+}
+
+void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src,
+ LValue Dst) {
+ // This access turns into a read/modify/write of the vector. Load the input
+ // value now.
+ llvm::LoadInst *Load = Builder.CreateLoad(Dst.getExtVectorAddr(),
+ Dst.isVolatileQualified());
+ Load->setAlignment(Dst.getAlignment().getQuantity());
+ llvm::Value *Vec = Load;
+ const llvm::Constant *Elts = Dst.getExtVectorElts();
+
+ llvm::Value *SrcVal = Src.getScalarVal();
+
+ if (const VectorType *VTy = Dst.getType()->getAs<VectorType>()) {
+ unsigned NumSrcElts = VTy->getNumElements();
+ unsigned NumDstElts =
+ cast<llvm::VectorType>(Vec->getType())->getNumElements();
+ if (NumDstElts == NumSrcElts) {
+ // Use shuffle vector is the src and destination are the same number of
+ // elements and restore the vector mask since it is on the side it will be
+ // stored.
+ SmallVector<llvm::Constant*, 4> Mask(NumDstElts);
+ for (unsigned i = 0; i != NumSrcElts; ++i)
+ Mask[getAccessedFieldNo(i, Elts)] = Builder.getInt32(i);
+
+ llvm::Value *MaskV = llvm::ConstantVector::get(Mask);
+ Vec = Builder.CreateShuffleVector(SrcVal,
+ llvm::UndefValue::get(Vec->getType()),
+ MaskV);
+ } else if (NumDstElts > NumSrcElts) {
+ // Extended the source vector to the same length and then shuffle it
+ // into the destination.
+ // FIXME: since we're shuffling with undef, can we just use the indices
+ // into that? This could be simpler.
+ SmallVector<llvm::Constant*, 4> ExtMask;
+ for (unsigned i = 0; i != NumSrcElts; ++i)
+ ExtMask.push_back(Builder.getInt32(i));
+ ExtMask.resize(NumDstElts, llvm::UndefValue::get(Int32Ty));
+ llvm::Value *ExtMaskV = llvm::ConstantVector::get(ExtMask);
+ llvm::Value *ExtSrcVal =
+ Builder.CreateShuffleVector(SrcVal,
+ llvm::UndefValue::get(SrcVal->getType()),
+ ExtMaskV);
+ // build identity
+ SmallVector<llvm::Constant*, 4> Mask;
+ for (unsigned i = 0; i != NumDstElts; ++i)
+ Mask.push_back(Builder.getInt32(i));
+
+ // modify when what gets shuffled in
+ for (unsigned i = 0; i != NumSrcElts; ++i)
+ Mask[getAccessedFieldNo(i, Elts)] = Builder.getInt32(i+NumDstElts);
+ llvm::Value *MaskV = llvm::ConstantVector::get(Mask);
+ Vec = Builder.CreateShuffleVector(Vec, ExtSrcVal, MaskV);
+ } else {
+ // We should never shorten the vector
+ llvm_unreachable("unexpected shorten vector length");
+ }
+ } else {
+ // If the Src is a scalar (not a vector) it must be updating one element.
+ unsigned InIdx = getAccessedFieldNo(0, Elts);
+ llvm::Value *Elt = llvm::ConstantInt::get(Int32Ty, InIdx);
+ Vec = Builder.CreateInsertElement(Vec, SrcVal, Elt);
+ }
+
+ llvm::StoreInst *Store = Builder.CreateStore(Vec, Dst.getExtVectorAddr(),
+ Dst.isVolatileQualified());
+ Store->setAlignment(Dst.getAlignment().getQuantity());
+}
+
+// setObjCGCLValueClass - sets class of he lvalue for the purpose of
+// generating write-barries API. It is currently a global, ivar,
+// or neither.
+static void setObjCGCLValueClass(const ASTContext &Ctx, const Expr *E,
+ LValue &LV,
+ bool IsMemberAccess=false) {
+ if (Ctx.getLangOpts().getGC() == LangOptions::NonGC)
+ return;
+
+ if (isa<ObjCIvarRefExpr>(E)) {
+ QualType ExpTy = E->getType();
+ if (IsMemberAccess && ExpTy->isPointerType()) {
+ // If ivar is a structure pointer, assigning to field of
+ // this struct follows gcc's behavior and makes it a non-ivar
+ // writer-barrier conservatively.
+ ExpTy = ExpTy->getAs<PointerType>()->getPointeeType();
+ if (ExpTy->isRecordType()) {
+ LV.setObjCIvar(false);
+ return;
+ }
+ }
+ LV.setObjCIvar(true);
+ ObjCIvarRefExpr *Exp = cast<ObjCIvarRefExpr>(const_cast<Expr*>(E));
+ LV.setBaseIvarExp(Exp->getBase());
+ LV.setObjCArray(E->getType()->isArrayType());
+ return;
+ }
+
+ if (const DeclRefExpr *Exp = dyn_cast<DeclRefExpr>(E)) {
+ if (const VarDecl *VD = dyn_cast<VarDecl>(Exp->getDecl())) {
+ if (VD->hasGlobalStorage()) {
+ LV.setGlobalObjCRef(true);
+ LV.setThreadLocalRef(VD->isThreadSpecified());
+ }
+ }
+ LV.setObjCArray(E->getType()->isArrayType());
+ return;
+ }
+
+ if (const UnaryOperator *Exp = dyn_cast<UnaryOperator>(E)) {
+ setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
+ return;
+ }
+
+ if (const ParenExpr *Exp = dyn_cast<ParenExpr>(E)) {
+ setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
+ if (LV.isObjCIvar()) {
+ // If cast is to a structure pointer, follow gcc's behavior and make it
+ // a non-ivar write-barrier.
+ QualType ExpTy = E->getType();
+ if (ExpTy->isPointerType())
+ ExpTy = ExpTy->getAs<PointerType>()->getPointeeType();
+ if (ExpTy->isRecordType())
+ LV.setObjCIvar(false);
+ }
+ return;
+ }
+
+ if (const GenericSelectionExpr *Exp = dyn_cast<GenericSelectionExpr>(E)) {
+ setObjCGCLValueClass(Ctx, Exp->getResultExpr(), LV);
+ return;
+ }
+
+ if (const ImplicitCastExpr *Exp = dyn_cast<ImplicitCastExpr>(E)) {
+ setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
+ return;
+ }
+
+ if (const CStyleCastExpr *Exp = dyn_cast<CStyleCastExpr>(E)) {
+ setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
+ return;
+ }
+
+ if (const ObjCBridgedCastExpr *Exp = dyn_cast<ObjCBridgedCastExpr>(E)) {
+ setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
+ return;
+ }
+
+ if (const ArraySubscriptExpr *Exp = dyn_cast<ArraySubscriptExpr>(E)) {
+ setObjCGCLValueClass(Ctx, Exp->getBase(), LV);
+ if (LV.isObjCIvar() && !LV.isObjCArray())
+ // Using array syntax to assigning to what an ivar points to is not
+ // same as assigning to the ivar itself. {id *Names;} Names[i] = 0;
+ LV.setObjCIvar(false);
+ else if (LV.isGlobalObjCRef() && !LV.isObjCArray())
+ // Using array syntax to assigning to what global points to is not
+ // same as assigning to the global itself. {id *G;} G[i] = 0;
+ LV.setGlobalObjCRef(false);
+ return;
+ }
+
+ if (const MemberExpr *Exp = dyn_cast<MemberExpr>(E)) {
+ setObjCGCLValueClass(Ctx, Exp->getBase(), LV, true);
+ // We don't know if member is an 'ivar', but this flag is looked at
+ // only in the context of LV.isObjCIvar().
+ LV.setObjCArray(E->getType()->isArrayType());
+ return;
+ }
+}
+
+static llvm::Value *
+EmitBitCastOfLValueToProperType(CodeGenFunction &CGF,
+ llvm::Value *V, llvm::Type *IRType,
+ StringRef Name = StringRef()) {
+ unsigned AS = cast<llvm::PointerType>(V->getType())->getAddressSpace();
+ return CGF.Builder.CreateBitCast(V, IRType->getPointerTo(AS), Name);
+}
+
+static LValue EmitGlobalVarDeclLValue(CodeGenFunction &CGF,
+ const Expr *E, const VarDecl *VD) {
+ assert((VD->hasExternalStorage() || VD->isFileVarDecl()) &&
+ "Var decl must have external storage or be a file var decl!");
+
+ llvm::Value *V = CGF.CGM.GetAddrOfGlobalVar(VD);
+ llvm::Type *RealVarTy = CGF.getTypes().ConvertTypeForMem(VD->getType());
+ V = EmitBitCastOfLValueToProperType(CGF, V, RealVarTy);
+ CharUnits Alignment = CGF.getContext().getDeclAlign(VD);
+ QualType T = E->getType();
+ LValue LV;
+ if (VD->getType()->isReferenceType()) {
+ llvm::LoadInst *LI = CGF.Builder.CreateLoad(V);
+ LI->setAlignment(Alignment.getQuantity());
+ V = LI;
+ LV = CGF.MakeNaturalAlignAddrLValue(V, T);
+ } else {
+ LV = CGF.MakeAddrLValue(V, E->getType(), Alignment);
+ }
+ setObjCGCLValueClass(CGF.getContext(), E, LV);
+ return LV;
+}
+
+static LValue EmitFunctionDeclLValue(CodeGenFunction &CGF,
+ const Expr *E, const FunctionDecl *FD) {
+ llvm::Value *V = CGF.CGM.GetAddrOfFunction(FD);
+ if (!FD->hasPrototype()) {
+ if (const FunctionProtoType *Proto =
+ FD->getType()->getAs<FunctionProtoType>()) {
+ // Ugly case: for a K&R-style definition, the type of the definition
+ // isn't the same as the type of a use. Correct for this with a
+ // bitcast.
+ QualType NoProtoType =
+ CGF.getContext().getFunctionNoProtoType(Proto->getResultType());
+ NoProtoType = CGF.getContext().getPointerType(NoProtoType);
+ V = CGF.Builder.CreateBitCast(V, CGF.ConvertType(NoProtoType));
+ }
+ }
+ CharUnits Alignment = CGF.getContext().getDeclAlign(FD);
+ return CGF.MakeAddrLValue(V, E->getType(), Alignment);
+}
+
+LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) {
+ const NamedDecl *ND = E->getDecl();
+ CharUnits Alignment = getContext().getDeclAlign(ND);
+ QualType T = E->getType();
+
+ // FIXME: We should be able to assert this for FunctionDecls as well!
+ // FIXME: We should be able to assert this for all DeclRefExprs, not just
+ // those with a valid source location.
+ assert((ND->isUsed(false) || !isa<VarDecl>(ND) ||
+ !E->getLocation().isValid()) &&
+ "Should not use decl without marking it used!");
+
+ if (ND->hasAttr<WeakRefAttr>()) {
+ const ValueDecl *VD = cast<ValueDecl>(ND);
+ llvm::Constant *Aliasee = CGM.GetWeakRefReference(VD);
+ return MakeAddrLValue(Aliasee, E->getType(), Alignment);
+ }
+
+ if (const VarDecl *VD = dyn_cast<VarDecl>(ND)) {
+ // Check if this is a global variable.
+ if (VD->hasExternalStorage() || VD->isFileVarDecl())
+ return EmitGlobalVarDeclLValue(*this, E, VD);
+
+ bool isBlockVariable = VD->hasAttr<BlocksAttr>();
+
+ bool NonGCable = VD->hasLocalStorage() &&
+ !VD->getType()->isReferenceType() &&
+ !isBlockVariable;
+
+ llvm::Value *V = LocalDeclMap[VD];
+ if (!V && VD->isStaticLocal())
+ V = CGM.getStaticLocalDeclAddress(VD);
+
+ // Use special handling for lambdas.
+ if (!V) {
+ if (FieldDecl *FD = LambdaCaptureFields.lookup(VD)) {
+ QualType LambdaTagType = getContext().getTagDeclType(FD->getParent());
+ LValue LambdaLV = MakeNaturalAlignAddrLValue(CXXABIThisValue,
+ LambdaTagType);
+ return EmitLValueForField(LambdaLV, FD);
+ }
+
+ assert(isa<BlockDecl>(CurCodeDecl) && E->refersToEnclosingLocal());
+ CharUnits alignment = getContext().getDeclAlign(VD);
+ return MakeAddrLValue(GetAddrOfBlockDecl(VD, isBlockVariable),
+ E->getType(), alignment);
+ }
+
+ assert(V && "DeclRefExpr not entered in LocalDeclMap?");
+
+ if (isBlockVariable)
+ V = BuildBlockByrefAddress(V, VD);
+
+ LValue LV;
+ if (VD->getType()->isReferenceType()) {
+ llvm::LoadInst *LI = Builder.CreateLoad(V);
+ LI->setAlignment(Alignment.getQuantity());
+ V = LI;
+ LV = MakeNaturalAlignAddrLValue(V, T);
+ } else {
+ LV = MakeAddrLValue(V, T, Alignment);
+ }
+
+ if (NonGCable) {
+ LV.getQuals().removeObjCGCAttr();
+ LV.setNonGC(true);
+ }
+ setObjCGCLValueClass(getContext(), E, LV);
+ return LV;
+ }
+
+ if (const FunctionDecl *fn = dyn_cast<FunctionDecl>(ND))
+ return EmitFunctionDeclLValue(*this, E, fn);
+
+ llvm_unreachable("Unhandled DeclRefExpr");
+}
+
+LValue CodeGenFunction::EmitUnaryOpLValue(const UnaryOperator *E) {
+ // __extension__ doesn't affect lvalue-ness.
+ if (E->getOpcode() == UO_Extension)
+ return EmitLValue(E->getSubExpr());
+
+ QualType ExprTy = getContext().getCanonicalType(E->getSubExpr()->getType());
+ switch (E->getOpcode()) {
+ default: llvm_unreachable("Unknown unary operator lvalue!");
+ case UO_Deref: {
+ QualType T = E->getSubExpr()->getType()->getPointeeType();
+ assert(!T.isNull() && "CodeGenFunction::EmitUnaryOpLValue: Illegal type");
+
+ LValue LV = MakeNaturalAlignAddrLValue(EmitScalarExpr(E->getSubExpr()), T);
+ LV.getQuals().setAddressSpace(ExprTy.getAddressSpace());
+
+ // We should not generate __weak write barrier on indirect reference
+ // of a pointer to object; as in void foo (__weak id *param); *param = 0;
+ // But, we continue to generate __strong write barrier on indirect write
+ // into a pointer to object.
+ if (getContext().getLangOpts().ObjC1 &&
+ getContext().getLangOpts().getGC() != LangOptions::NonGC &&
+ LV.isObjCWeak())
+ LV.setNonGC(!E->isOBJCGCCandidate(getContext()));
+ return LV;
+ }
+ case UO_Real:
+ case UO_Imag: {
+ LValue LV = EmitLValue(E->getSubExpr());
+ assert(LV.isSimple() && "real/imag on non-ordinary l-value");
+ llvm::Value *Addr = LV.getAddress();
+
+ // __real is valid on scalars. This is a faster way of testing that.
+ // __imag can only produce an rvalue on scalars.
+ if (E->getOpcode() == UO_Real &&
+ !cast<llvm::PointerType>(Addr->getType())
+ ->getElementType()->isStructTy()) {
+ assert(E->getSubExpr()->getType()->isArithmeticType());
+ return LV;
+ }
+
+ assert(E->getSubExpr()->getType()->isAnyComplexType());
+
+ unsigned Idx = E->getOpcode() == UO_Imag;
+ return MakeAddrLValue(Builder.CreateStructGEP(LV.getAddress(),
+ Idx, "idx"),
+ ExprTy);
+ }
+ case UO_PreInc:
+ case UO_PreDec: {
+ LValue LV = EmitLValue(E->getSubExpr());
+ bool isInc = E->getOpcode() == UO_PreInc;
+
+ if (E->getType()->isAnyComplexType())
+ EmitComplexPrePostIncDec(E, LV, isInc, true/*isPre*/);
+ else
+ EmitScalarPrePostIncDec(E, LV, isInc, true/*isPre*/);
+ return LV;
+ }
+ }
+}
+
+LValue CodeGenFunction::EmitStringLiteralLValue(const StringLiteral *E) {
+ return MakeAddrLValue(CGM.GetAddrOfConstantStringFromLiteral(E),
+ E->getType());
+}
+
+LValue CodeGenFunction::EmitObjCEncodeExprLValue(const ObjCEncodeExpr *E) {
+ return MakeAddrLValue(CGM.GetAddrOfConstantStringFromObjCEncode(E),
+ E->getType());
+}
+
+
+LValue CodeGenFunction::EmitPredefinedLValue(const PredefinedExpr *E) {
+ switch (E->getIdentType()) {
+ default:
+ return EmitUnsupportedLValue(E, "predefined expression");
+
+ case PredefinedExpr::Func:
+ case PredefinedExpr::Function:
+ case PredefinedExpr::PrettyFunction: {
+ unsigned Type = E->getIdentType();
+ std::string GlobalVarName;
+
+ switch (Type) {
+ default: llvm_unreachable("Invalid type");
+ case PredefinedExpr::Func:
+ GlobalVarName = "__func__.";
+ break;
+ case PredefinedExpr::Function:
+ GlobalVarName = "__FUNCTION__.";
+ break;
+ case PredefinedExpr::PrettyFunction:
+ GlobalVarName = "__PRETTY_FUNCTION__.";
+ break;
+ }
+
+ StringRef FnName = CurFn->getName();
+ if (FnName.startswith("\01"))
+ FnName = FnName.substr(1);
+ GlobalVarName += FnName;
+
+ const Decl *CurDecl = CurCodeDecl;
+ if (CurDecl == 0)
+ CurDecl = getContext().getTranslationUnitDecl();
+
+ std::string FunctionName =
+ (isa<BlockDecl>(CurDecl)
+ ? FnName.str()
+ : PredefinedExpr::ComputeName((PredefinedExpr::IdentType)Type, CurDecl));
+
+ llvm::Constant *C =
+ CGM.GetAddrOfConstantCString(FunctionName, GlobalVarName.c_str());
+ return MakeAddrLValue(C, E->getType());
+ }
+ }
+}
+
+llvm::BasicBlock *CodeGenFunction::getTrapBB() {
+ const CodeGenOptions &GCO = CGM.getCodeGenOpts();
+
+ // If we are not optimzing, don't collapse all calls to trap in the function
+ // to the same call, that way, in the debugger they can see which operation
+ // did in fact fail. If we are optimizing, we collapse all calls to trap down
+ // to just one per function to save on codesize.
+ if (GCO.OptimizationLevel && TrapBB)
+ return TrapBB;
+
+ llvm::BasicBlock *Cont = 0;
+ if (HaveInsertPoint()) {
+ Cont = createBasicBlock("cont");
+ EmitBranch(Cont);
+ }
+ TrapBB = createBasicBlock("trap");
+ EmitBlock(TrapBB);
+
+ llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::trap);
+ llvm::CallInst *TrapCall = Builder.CreateCall(F);
+ TrapCall->setDoesNotReturn();
+ TrapCall->setDoesNotThrow();
+ Builder.CreateUnreachable();
+
+ if (Cont)
+ EmitBlock(Cont);
+ return TrapBB;
+}
+
+/// isSimpleArrayDecayOperand - If the specified expr is a simple decay from an
+/// array to pointer, return the array subexpression.
+static const Expr *isSimpleArrayDecayOperand(const Expr *E) {
+ // If this isn't just an array->pointer decay, bail out.
+ const CastExpr *CE = dyn_cast<CastExpr>(E);
+ if (CE == 0 || CE->getCastKind() != CK_ArrayToPointerDecay)
+ return 0;
+
+ // If this is a decay from variable width array, bail out.
+ const Expr *SubExpr = CE->getSubExpr();
+ if (SubExpr->getType()->isVariableArrayType())
+ return 0;
+
+ return SubExpr;
+}
+
+LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E) {
+ // The index must always be an integer, which is not an aggregate. Emit it.
+ llvm::Value *Idx = EmitScalarExpr(E->getIdx());
+ QualType IdxTy = E->getIdx()->getType();
+ bool IdxSigned = IdxTy->isSignedIntegerOrEnumerationType();
+
+ // If the base is a vector type, then we are forming a vector element lvalue
+ // with this subscript.
+ if (E->getBase()->getType()->isVectorType()) {
+ // Emit the vector as an lvalue to get its address.
+ LValue LHS = EmitLValue(E->getBase());
+ assert(LHS.isSimple() && "Can only subscript lvalue vectors here!");
+ Idx = Builder.CreateIntCast(Idx, Int32Ty, IdxSigned, "vidx");
+ return LValue::MakeVectorElt(LHS.getAddress(), Idx,
+ E->getBase()->getType(), LHS.getAlignment());
+ }
+
+ // Extend or truncate the index type to 32 or 64-bits.
+ if (Idx->getType() != IntPtrTy)
+ Idx = Builder.CreateIntCast(Idx, IntPtrTy, IdxSigned, "idxprom");
+
+ // FIXME: As llvm implements the object size checking, this can come out.
+ if (CatchUndefined) {
+ if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E->getBase())){
+ if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(ICE->getSubExpr())) {
+ if (ICE->getCastKind() == CK_ArrayToPointerDecay) {
+ if (const ConstantArrayType *CAT
+ = getContext().getAsConstantArrayType(DRE->getType())) {
+ llvm::APInt Size = CAT->getSize();
+ llvm::BasicBlock *Cont = createBasicBlock("cont");
+ Builder.CreateCondBr(Builder.CreateICmpULE(Idx,
+ llvm::ConstantInt::get(Idx->getType(), Size)),
+ Cont, getTrapBB());
+ EmitBlock(Cont);
+ }
+ }
+ }
+ }
+ }
+
+ // We know that the pointer points to a type of the correct size, unless the
+ // size is a VLA or Objective-C interface.
+ llvm::Value *Address = 0;
+ CharUnits ArrayAlignment;
+ if (const VariableArrayType *vla =
+ getContext().getAsVariableArrayType(E->getType())) {
+ // The base must be a pointer, which is not an aggregate. Emit
+ // it. It needs to be emitted first in case it's what captures
+ // the VLA bounds.
+ Address = EmitScalarExpr(E->getBase());
+
+ // The element count here is the total number of non-VLA elements.
+ llvm::Value *numElements = getVLASize(vla).first;
+
+ // Effectively, the multiply by the VLA size is part of the GEP.
+ // GEP indexes are signed, and scaling an index isn't permitted to
+ // signed-overflow, so we use the same semantics for our explicit
+ // multiply. We suppress this if overflow is not undefined behavior.
+ if (getLangOpts().isSignedOverflowDefined()) {
+ Idx = Builder.CreateMul(Idx, numElements);
+ Address = Builder.CreateGEP(Address, Idx, "arrayidx");
+ } else {
+ Idx = Builder.CreateNSWMul(Idx, numElements);
+ Address = Builder.CreateInBoundsGEP(Address, Idx, "arrayidx");
+ }
+ } else if (const ObjCObjectType *OIT = E->getType()->getAs<ObjCObjectType>()){
+ // Indexing over an interface, as in "NSString *P; P[4];"
+ llvm::Value *InterfaceSize =
+ llvm::ConstantInt::get(Idx->getType(),
+ getContext().getTypeSizeInChars(OIT).getQuantity());
+
+ Idx = Builder.CreateMul(Idx, InterfaceSize);
+
+ // The base must be a pointer, which is not an aggregate. Emit it.
+ llvm::Value *Base = EmitScalarExpr(E->getBase());
+ Address = EmitCastToVoidPtr(Base);
+ Address = Builder.CreateGEP(Address, Idx, "arrayidx");
+ Address = Builder.CreateBitCast(Address, Base->getType());
+ } else if (const Expr *Array = isSimpleArrayDecayOperand(E->getBase())) {
+ // If this is A[i] where A is an array, the frontend will have decayed the
+ // base to be a ArrayToPointerDecay implicit cast. While correct, it is
+ // inefficient at -O0 to emit a "gep A, 0, 0" when codegen'ing it, then a
+ // "gep x, i" here. Emit one "gep A, 0, i".
+ assert(Array->getType()->isArrayType() &&
+ "Array to pointer decay must have array source type!");
+ LValue ArrayLV = EmitLValue(Array);
+ llvm::Value *ArrayPtr = ArrayLV.getAddress();
+ llvm::Value *Zero = llvm::ConstantInt::get(Int32Ty, 0);
+ llvm::Value *Args[] = { Zero, Idx };
+
+ // Propagate the alignment from the array itself to the result.
+ ArrayAlignment = ArrayLV.getAlignment();
+
+ if (getContext().getLangOpts().isSignedOverflowDefined())
+ Address = Builder.CreateGEP(ArrayPtr, Args, "arrayidx");
+ else
+ Address = Builder.CreateInBoundsGEP(ArrayPtr, Args, "arrayidx");
+ } else {
+ // The base must be a pointer, which is not an aggregate. Emit it.
+ llvm::Value *Base = EmitScalarExpr(E->getBase());
+ if (getContext().getLangOpts().isSignedOverflowDefined())
+ Address = Builder.CreateGEP(Base, Idx, "arrayidx");
+ else
+ Address = Builder.CreateInBoundsGEP(Base, Idx, "arrayidx");
+ }
+
+ QualType T = E->getBase()->getType()->getPointeeType();
+ assert(!T.isNull() &&
+ "CodeGenFunction::EmitArraySubscriptExpr(): Illegal base type");
+
+
+ // Limit the alignment to that of the result type.
+ LValue LV;
+ if (!ArrayAlignment.isZero()) {
+ CharUnits Align = getContext().getTypeAlignInChars(T);
+ ArrayAlignment = std::min(Align, ArrayAlignment);
+ LV = MakeAddrLValue(Address, T, ArrayAlignment);
+ } else {
+ LV = MakeNaturalAlignAddrLValue(Address, T);
+ }
+
+ LV.getQuals().setAddressSpace(E->getBase()->getType().getAddressSpace());
+
+ if (getContext().getLangOpts().ObjC1 &&
+ getContext().getLangOpts().getGC() != LangOptions::NonGC) {
+ LV.setNonGC(!E->isOBJCGCCandidate(getContext()));
+ setObjCGCLValueClass(getContext(), E, LV);
+ }
+ return LV;
+}
+
+static
+llvm::Constant *GenerateConstantVector(CGBuilderTy &Builder,
+ SmallVector<unsigned, 4> &Elts) {
+ SmallVector<llvm::Constant*, 4> CElts;
+ for (unsigned i = 0, e = Elts.size(); i != e; ++i)
+ CElts.push_back(Builder.getInt32(Elts[i]));
+
+ return llvm::ConstantVector::get(CElts);
+}
+
+LValue CodeGenFunction::
+EmitExtVectorElementExpr(const ExtVectorElementExpr *E) {
+ // Emit the base vector as an l-value.
+ LValue Base;
+
+ // ExtVectorElementExpr's base can either be a vector or pointer to vector.
+ if (E->isArrow()) {
+ // If it is a pointer to a vector, emit the address and form an lvalue with
+ // it.
+ llvm::Value *Ptr = EmitScalarExpr(E->getBase());
+ const PointerType *PT = E->getBase()->getType()->getAs<PointerType>();
+ Base = MakeAddrLValue(Ptr, PT->getPointeeType());
+ Base.getQuals().removeObjCGCAttr();
+ } else if (E->getBase()->isGLValue()) {
+ // Otherwise, if the base is an lvalue ( as in the case of foo.x.x),
+ // emit the base as an lvalue.
+ assert(E->getBase()->getType()->isVectorType());
+ Base = EmitLValue(E->getBase());
+ } else {
+ // Otherwise, the base is a normal rvalue (as in (V+V).x), emit it as such.
+ assert(E->getBase()->getType()->isVectorType() &&
+ "Result must be a vector");
+ llvm::Value *Vec = EmitScalarExpr(E->getBase());
+
+ // Store the vector to memory (because LValue wants an address).
+ llvm::Value *VecMem = CreateMemTemp(E->getBase()->getType());
+ Builder.CreateStore(Vec, VecMem);
+ Base = MakeAddrLValue(VecMem, E->getBase()->getType());
+ }
+
+ QualType type =
+ E->getType().withCVRQualifiers(Base.getQuals().getCVRQualifiers());
+
+ // Encode the element access list into a vector of unsigned indices.
+ SmallVector<unsigned, 4> Indices;
+ E->getEncodedElementAccess(Indices);
+
+ if (Base.isSimple()) {
+ llvm::Constant *CV = GenerateConstantVector(Builder, Indices);
+ return LValue::MakeExtVectorElt(Base.getAddress(), CV, type,
+ Base.getAlignment());
+ }
+ assert(Base.isExtVectorElt() && "Can only subscript lvalue vec elts here!");
+
+ llvm::Constant *BaseElts = Base.getExtVectorElts();
+ SmallVector<llvm::Constant *, 4> CElts;
+
+ for (unsigned i = 0, e = Indices.size(); i != e; ++i)
+ CElts.push_back(BaseElts->getAggregateElement(Indices[i]));
+ llvm::Constant *CV = llvm::ConstantVector::get(CElts);
+ return LValue::MakeExtVectorElt(Base.getExtVectorAddr(), CV, type,
+ Base.getAlignment());
+}
+
+LValue CodeGenFunction::EmitMemberExpr(const MemberExpr *E) {
+ Expr *BaseExpr = E->getBase();
+
+ // If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar.
+ LValue BaseLV;
+ if (E->isArrow())
+ BaseLV = MakeNaturalAlignAddrLValue(EmitScalarExpr(BaseExpr),
+ BaseExpr->getType()->getPointeeType());
+ else
+ BaseLV = EmitLValue(BaseExpr);
+
+ NamedDecl *ND = E->getMemberDecl();
+ if (FieldDecl *Field = dyn_cast<FieldDecl>(ND)) {
+ LValue LV = EmitLValueForField(BaseLV, Field);
+ setObjCGCLValueClass(getContext(), E, LV);
+ return LV;
+ }
+
+ if (VarDecl *VD = dyn_cast<VarDecl>(ND))
+ return EmitGlobalVarDeclLValue(*this, E, VD);
+
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(ND))
+ return EmitFunctionDeclLValue(*this, E, FD);
+
+ llvm_unreachable("Unhandled member declaration!");
+}
+
+LValue CodeGenFunction::EmitLValueForBitfield(llvm::Value *BaseValue,
+ const FieldDecl *Field,
+ unsigned CVRQualifiers) {
+ const CGRecordLayout &RL =
+ CGM.getTypes().getCGRecordLayout(Field->getParent());
+ const CGBitFieldInfo &Info = RL.getBitFieldInfo(Field);
+ return LValue::MakeBitfield(BaseValue, Info,
+ Field->getType().withCVRQualifiers(CVRQualifiers));
+}
+
+/// EmitLValueForAnonRecordField - Given that the field is a member of
+/// an anonymous struct or union buried inside a record, and given
+/// that the base value is a pointer to the enclosing record, derive
+/// an lvalue for the ultimate field.
+LValue CodeGenFunction::EmitLValueForAnonRecordField(llvm::Value *BaseValue,
+ const IndirectFieldDecl *Field,
+ unsigned CVRQualifiers) {
+ IndirectFieldDecl::chain_iterator I = Field->chain_begin(),
+ IEnd = Field->chain_end();
+ while (true) {
+ QualType RecordTy =
+ getContext().getTypeDeclType(cast<FieldDecl>(*I)->getParent());
+ LValue LV = EmitLValueForField(MakeAddrLValue(BaseValue, RecordTy),
+ cast<FieldDecl>(*I));
+ if (++I == IEnd) return LV;
+
+ assert(LV.isSimple());
+ BaseValue = LV.getAddress();
+ CVRQualifiers |= LV.getVRQualifiers();
+ }
+}
+
+LValue CodeGenFunction::EmitLValueForField(LValue base,
+ const FieldDecl *field) {
+ if (field->isBitField())
+ return EmitLValueForBitfield(base.getAddress(), field,
+ base.getVRQualifiers());
+
+ const RecordDecl *rec = field->getParent();
+ QualType type = field->getType();
+ CharUnits alignment = getContext().getDeclAlign(field);
+
+ // FIXME: It should be impossible to have an LValue without alignment for a
+ // complete type.
+ if (!base.getAlignment().isZero())
+ alignment = std::min(alignment, base.getAlignment());
+
+ bool mayAlias = rec->hasAttr<MayAliasAttr>();
+
+ llvm::Value *addr = base.getAddress();
+ unsigned cvr = base.getVRQualifiers();
+ if (rec->isUnion()) {
+ // For unions, there is no pointer adjustment.
+ assert(!type->isReferenceType() && "union has reference member");
+ } else {
+ // For structs, we GEP to the field that the record layout suggests.
+ unsigned idx = CGM.getTypes().getCGRecordLayout(rec).getLLVMFieldNo(field);
+ addr = Builder.CreateStructGEP(addr, idx, field->getName());
+
+ // If this is a reference field, load the reference right now.
+ if (const ReferenceType *refType = type->getAs<ReferenceType>()) {
+ llvm::LoadInst *load = Builder.CreateLoad(addr, "ref");
+ if (cvr & Qualifiers::Volatile) load->setVolatile(true);
+ load->setAlignment(alignment.getQuantity());
+
+ if (CGM.shouldUseTBAA()) {
+ llvm::MDNode *tbaa;
+ if (mayAlias)
+ tbaa = CGM.getTBAAInfo(getContext().CharTy);
+ else
+ tbaa = CGM.getTBAAInfo(type);
+ CGM.DecorateInstruction(load, tbaa);
+ }
+
+ addr = load;
+ mayAlias = false;
+ type = refType->getPointeeType();
+ if (type->isIncompleteType())
+ alignment = CharUnits();
+ else
+ alignment = getContext().getTypeAlignInChars(type);
+ cvr = 0; // qualifiers don't recursively apply to referencee
+ }
+ }
+
+ // Make sure that the address is pointing to the right type. This is critical
+ // for both unions and structs. A union needs a bitcast, a struct element
+ // will need a bitcast if the LLVM type laid out doesn't match the desired
+ // type.
+ addr = EmitBitCastOfLValueToProperType(*this, addr,
+ CGM.getTypes().ConvertTypeForMem(type),
+ field->getName());
+
+ if (field->hasAttr<AnnotateAttr>())
+ addr = EmitFieldAnnotations(field, addr);
+
+ LValue LV = MakeAddrLValue(addr, type, alignment);
+ LV.getQuals().addCVRQualifiers(cvr);
+
+ // __weak attribute on a field is ignored.
+ if (LV.getQuals().getObjCGCAttr() == Qualifiers::Weak)
+ LV.getQuals().removeObjCGCAttr();
+
+ // Fields of may_alias structs act like 'char' for TBAA purposes.
+ // FIXME: this should get propagated down through anonymous structs
+ // and unions.
+ if (mayAlias && LV.getTBAAInfo())
+ LV.setTBAAInfo(CGM.getTBAAInfo(getContext().CharTy));
+
+ return LV;
+}
+
+LValue
+CodeGenFunction::EmitLValueForFieldInitialization(LValue Base,
+ const FieldDecl *Field) {
+ QualType FieldType = Field->getType();
+
+ if (!FieldType->isReferenceType())
+ return EmitLValueForField(Base, Field);
+
+ const CGRecordLayout &RL =
+ CGM.getTypes().getCGRecordLayout(Field->getParent());
+ unsigned idx = RL.getLLVMFieldNo(Field);
+ llvm::Value *V = Builder.CreateStructGEP(Base.getAddress(), idx);
+ assert(!FieldType.getObjCGCAttr() && "fields cannot have GC attrs");
+
+ // Make sure that the address is pointing to the right type. This is critical
+ // for both unions and structs. A union needs a bitcast, a struct element
+ // will need a bitcast if the LLVM type laid out doesn't match the desired
+ // type.
+ llvm::Type *llvmType = ConvertTypeForMem(FieldType);
+ V = EmitBitCastOfLValueToProperType(*this, V, llvmType, Field->getName());
+
+ CharUnits Alignment = getContext().getDeclAlign(Field);
+
+ // FIXME: It should be impossible to have an LValue without alignment for a
+ // complete type.
+ if (!Base.getAlignment().isZero())
+ Alignment = std::min(Alignment, Base.getAlignment());
+
+ return MakeAddrLValue(V, FieldType, Alignment);
+}
+
+LValue CodeGenFunction::EmitCompoundLiteralLValue(const CompoundLiteralExpr *E){
+ if (E->isFileScope()) {
+ llvm::Value *GlobalPtr = CGM.GetAddrOfConstantCompoundLiteral(E);
+ return MakeAddrLValue(GlobalPtr, E->getType());
+ }
+
+ llvm::Value *DeclPtr = CreateMemTemp(E->getType(), ".compoundliteral");
+ const Expr *InitExpr = E->getInitializer();
+ LValue Result = MakeAddrLValue(DeclPtr, E->getType());
+
+ EmitAnyExprToMem(InitExpr, DeclPtr, E->getType().getQualifiers(),
+ /*Init*/ true);
+
+ return Result;
+}
+
+LValue CodeGenFunction::
+EmitConditionalOperatorLValue(const AbstractConditionalOperator *expr) {
+ if (!expr->isGLValue()) {
+ // ?: here should be an aggregate.
+ assert((hasAggregateLLVMType(expr->getType()) &&
+ !expr->getType()->isAnyComplexType()) &&
+ "Unexpected conditional operator!");
+ return EmitAggExprToLValue(expr);
+ }
+
+ OpaqueValueMapping binding(*this, expr);
+
+ const Expr *condExpr = expr->getCond();
+ bool CondExprBool;
+ if (ConstantFoldsToSimpleInteger(condExpr, CondExprBool)) {
+ const Expr *live = expr->getTrueExpr(), *dead = expr->getFalseExpr();
+ if (!CondExprBool) std::swap(live, dead);
+
+ if (!ContainsLabel(dead))
+ return EmitLValue(live);
+ }
+
+ llvm::BasicBlock *lhsBlock = createBasicBlock("cond.true");
+ llvm::BasicBlock *rhsBlock = createBasicBlock("cond.false");
+ llvm::BasicBlock *contBlock = createBasicBlock("cond.end");
+
+ ConditionalEvaluation eval(*this);
+ EmitBranchOnBoolExpr(condExpr, lhsBlock, rhsBlock);
+
+ // Any temporaries created here are conditional.
+ EmitBlock(lhsBlock);
+ eval.begin(*this);
+ LValue lhs = EmitLValue(expr->getTrueExpr());
+ eval.end(*this);
+
+ if (!lhs.isSimple())
+ return EmitUnsupportedLValue(expr, "conditional operator");
+
+ lhsBlock = Builder.GetInsertBlock();
+ Builder.CreateBr(contBlock);
+
+ // Any temporaries created here are conditional.
+ EmitBlock(rhsBlock);
+ eval.begin(*this);
+ LValue rhs = EmitLValue(expr->getFalseExpr());
+ eval.end(*this);
+ if (!rhs.isSimple())
+ return EmitUnsupportedLValue(expr, "conditional operator");
+ rhsBlock = Builder.GetInsertBlock();
+
+ EmitBlock(contBlock);
+
+ llvm::PHINode *phi = Builder.CreatePHI(lhs.getAddress()->getType(), 2,
+ "cond-lvalue");
+ phi->addIncoming(lhs.getAddress(), lhsBlock);
+ phi->addIncoming(rhs.getAddress(), rhsBlock);
+ return MakeAddrLValue(phi, expr->getType());
+}
+
+/// EmitCastLValue - Casts are never lvalues unless that cast is a dynamic_cast.
+/// If the cast is a dynamic_cast, we can have the usual lvalue result,
+/// otherwise if a cast is needed by the code generator in an lvalue context,
+/// then it must mean that we need the address of an aggregate in order to
+/// access one of its fields. This can happen for all the reasons that casts
+/// are permitted with aggregate result, including noop aggregate casts, and
+/// cast from scalar to union.
+LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) {
+ switch (E->getCastKind()) {
+ case CK_ToVoid:
+ return EmitUnsupportedLValue(E, "unexpected cast lvalue");
+
+ case CK_Dependent:
+ llvm_unreachable("dependent cast kind in IR gen!");
+
+ // These two casts are currently treated as no-ops, although they could
+ // potentially be real operations depending on the target's ABI.
+ case CK_NonAtomicToAtomic:
+ case CK_AtomicToNonAtomic:
+
+ case CK_NoOp:
+ case CK_LValueToRValue:
+ if (!E->getSubExpr()->Classify(getContext()).isPRValue()
+ || E->getType()->isRecordType())
+ return EmitLValue(E->getSubExpr());
+ // Fall through to synthesize a temporary.
+
+ case CK_BitCast:
+ case CK_ArrayToPointerDecay:
+ case CK_FunctionToPointerDecay:
+ case CK_NullToMemberPointer:
+ case CK_NullToPointer:
+ case CK_IntegralToPointer:
+ case CK_PointerToIntegral:
+ case CK_PointerToBoolean:
+ case CK_VectorSplat:
+ case CK_IntegralCast:
+ case CK_IntegralToBoolean:
+ case CK_IntegralToFloating:
+ case CK_FloatingToIntegral:
+ case CK_FloatingToBoolean:
+ case CK_FloatingCast:
+ case CK_FloatingRealToComplex:
+ case CK_FloatingComplexToReal:
+ case CK_FloatingComplexToBoolean:
+ case CK_FloatingComplexCast:
+ case CK_FloatingComplexToIntegralComplex:
+ case CK_IntegralRealToComplex:
+ case CK_IntegralComplexToReal:
+ case CK_IntegralComplexToBoolean:
+ case CK_IntegralComplexCast:
+ case CK_IntegralComplexToFloatingComplex:
+ case CK_DerivedToBaseMemberPointer:
+ case CK_BaseToDerivedMemberPointer:
+ case CK_MemberPointerToBoolean:
+ case CK_ReinterpretMemberPointer:
+ case CK_AnyPointerToBlockPointerCast:
+ case CK_ARCProduceObject:
+ case CK_ARCConsumeObject:
+ case CK_ARCReclaimReturnedObject:
+ case CK_ARCExtendBlockObject:
+ case CK_CopyAndAutoreleaseBlockObject: {
+ // These casts only produce lvalues when we're binding a reference to a
+ // temporary realized from a (converted) pure rvalue. Emit the expression
+ // as a value, copy it into a temporary, and return an lvalue referring to
+ // that temporary.
+ llvm::Value *V = CreateMemTemp(E->getType(), "ref.temp");
+ EmitAnyExprToMem(E, V, E->getType().getQualifiers(), false);
+ return MakeAddrLValue(V, E->getType());
+ }
+
+ case CK_Dynamic: {
+ LValue LV = EmitLValue(E->getSubExpr());
+ llvm::Value *V = LV.getAddress();
+ const CXXDynamicCastExpr *DCE = cast<CXXDynamicCastExpr>(E);
+ return MakeAddrLValue(EmitDynamicCast(V, DCE), E->getType());
+ }
+
+ case CK_ConstructorConversion:
+ case CK_UserDefinedConversion:
+ case CK_CPointerToObjCPointerCast:
+ case CK_BlockPointerToObjCPointerCast:
+ return EmitLValue(E->getSubExpr());
+
+ case CK_UncheckedDerivedToBase:
+ case CK_DerivedToBase: {
+ const RecordType *DerivedClassTy =
+ E->getSubExpr()->getType()->getAs<RecordType>();
+ CXXRecordDecl *DerivedClassDecl =
+ cast<CXXRecordDecl>(DerivedClassTy->getDecl());
+
+ LValue LV = EmitLValue(E->getSubExpr());
+ llvm::Value *This = LV.getAddress();
+
+ // Perform the derived-to-base conversion
+ llvm::Value *Base =
+ GetAddressOfBaseClass(This, DerivedClassDecl,
+ E->path_begin(), E->path_end(),
+ /*NullCheckValue=*/false);
+
+ return MakeAddrLValue(Base, E->getType());
+ }
+ case CK_ToUnion:
+ return EmitAggExprToLValue(E);
+ case CK_BaseToDerived: {
+ const RecordType *DerivedClassTy = E->getType()->getAs<RecordType>();
+ CXXRecordDecl *DerivedClassDecl =
+ cast<CXXRecordDecl>(DerivedClassTy->getDecl());
+
+ LValue LV = EmitLValue(E->getSubExpr());
+
+ // Perform the base-to-derived conversion
+ llvm::Value *Derived =
+ GetAddressOfDerivedClass(LV.getAddress(), DerivedClassDecl,
+ E->path_begin(), E->path_end(),
+ /*NullCheckValue=*/false);
+
+ return MakeAddrLValue(Derived, E->getType());
+ }
+ case CK_LValueBitCast: {
+ // This must be a reinterpret_cast (or c-style equivalent).
+ const ExplicitCastExpr *CE = cast<ExplicitCastExpr>(E);
+
+ LValue LV = EmitLValue(E->getSubExpr());
+ llvm::Value *V = Builder.CreateBitCast(LV.getAddress(),
+ ConvertType(CE->getTypeAsWritten()));
+ return MakeAddrLValue(V, E->getType());
+ }
+ case CK_ObjCObjectLValueCast: {
+ LValue LV = EmitLValue(E->getSubExpr());
+ QualType ToType = getContext().getLValueReferenceType(E->getType());
+ llvm::Value *V = Builder.CreateBitCast(LV.getAddress(),
+ ConvertType(ToType));
+ return MakeAddrLValue(V, E->getType());
+ }
+ }
+
+ llvm_unreachable("Unhandled lvalue cast kind?");
+}
+
+LValue CodeGenFunction::EmitNullInitializationLValue(
+ const CXXScalarValueInitExpr *E) {
+ QualType Ty = E->getType();
+ LValue LV = MakeAddrLValue(CreateMemTemp(Ty), Ty);
+ EmitNullInitialization(LV.getAddress(), Ty);
+ return LV;
+}
+
+LValue CodeGenFunction::EmitOpaqueValueLValue(const OpaqueValueExpr *e) {
+ assert(OpaqueValueMappingData::shouldBindAsLValue(e));
+ return getOpaqueLValueMapping(e);
+}
+
+LValue CodeGenFunction::EmitMaterializeTemporaryExpr(
+ const MaterializeTemporaryExpr *E) {
+ RValue RV = EmitReferenceBindingToExpr(E, /*InitializedDecl=*/0);
+ return MakeAddrLValue(RV.getScalarVal(), E->getType());
+}
+
+RValue CodeGenFunction::EmitRValueForField(LValue LV,
+ const FieldDecl *FD) {
+ QualType FT = FD->getType();
+ LValue FieldLV = EmitLValueForField(LV, FD);
+ if (FT->isAnyComplexType())
+ return RValue::getComplex(
+ LoadComplexFromAddr(FieldLV.getAddress(),
+ FieldLV.isVolatileQualified()));
+ else if (CodeGenFunction::hasAggregateLLVMType(FT))
+ return FieldLV.asAggregateRValue();
+
+ return EmitLoadOfLValue(FieldLV);
+}
+
+//===--------------------------------------------------------------------===//
+// Expression Emission
+//===--------------------------------------------------------------------===//
+
+RValue CodeGenFunction::EmitCallExpr(const CallExpr *E,
+ ReturnValueSlot ReturnValue) {
+ if (CGDebugInfo *DI = getDebugInfo())
+ DI->EmitLocation(Builder, E->getLocStart());
+
+ // Builtins never have block type.
+ if (E->getCallee()->getType()->isBlockPointerType())
+ return EmitBlockCallExpr(E, ReturnValue);
+
+ if (const CXXMemberCallExpr *CE = dyn_cast<CXXMemberCallExpr>(E))
+ return EmitCXXMemberCallExpr(CE, ReturnValue);
+
+ if (const CUDAKernelCallExpr *CE = dyn_cast<CUDAKernelCallExpr>(E))
+ return EmitCUDAKernelCallExpr(CE, ReturnValue);
+
+ const Decl *TargetDecl = E->getCalleeDecl();
+ if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl)) {
+ if (unsigned builtinID = FD->getBuiltinID())
+ return EmitBuiltinExpr(FD, builtinID, E);
+ }
+
+ if (const CXXOperatorCallExpr *CE = dyn_cast<CXXOperatorCallExpr>(E))
+ if (const CXXMethodDecl *MD = dyn_cast_or_null<CXXMethodDecl>(TargetDecl))
+ return EmitCXXOperatorMemberCallExpr(CE, MD, ReturnValue);
+
+ if (const CXXPseudoDestructorExpr *PseudoDtor
+ = dyn_cast<CXXPseudoDestructorExpr>(E->getCallee()->IgnoreParens())) {
+ QualType DestroyedType = PseudoDtor->getDestroyedType();
+ if (getContext().getLangOpts().ObjCAutoRefCount &&
+ DestroyedType->isObjCLifetimeType() &&
+ (DestroyedType.getObjCLifetime() == Qualifiers::OCL_Strong ||
+ DestroyedType.getObjCLifetime() == Qualifiers::OCL_Weak)) {
+ // Automatic Reference Counting:
+ // If the pseudo-expression names a retainable object with weak or
+ // strong lifetime, the object shall be released.
+ Expr *BaseExpr = PseudoDtor->getBase();
+ llvm::Value *BaseValue = NULL;
+ Qualifiers BaseQuals;
+
+ // If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar.
+ if (PseudoDtor->isArrow()) {
+ BaseValue = EmitScalarExpr(BaseExpr);
+ const PointerType *PTy = BaseExpr->getType()->getAs<PointerType>();
+ BaseQuals = PTy->getPointeeType().getQualifiers();
+ } else {
+ LValue BaseLV = EmitLValue(BaseExpr);
+ BaseValue = BaseLV.getAddress();
+ QualType BaseTy = BaseExpr->getType();
+ BaseQuals = BaseTy.getQualifiers();
+ }
+
+ switch (PseudoDtor->getDestroyedType().getObjCLifetime()) {
+ case Qualifiers::OCL_None:
+ case Qualifiers::OCL_ExplicitNone:
+ case Qualifiers::OCL_Autoreleasing:
+ break;
+
+ case Qualifiers::OCL_Strong:
+ EmitARCRelease(Builder.CreateLoad(BaseValue,
+ PseudoDtor->getDestroyedType().isVolatileQualified()),
+ /*precise*/ true);
+ break;
+
+ case Qualifiers::OCL_Weak:
+ EmitARCDestroyWeak(BaseValue);
+ break;
+ }
+ } else {
+ // C++ [expr.pseudo]p1:
+ // The result shall only be used as the operand for the function call
+ // operator (), and the result of such a call has type void. The only
+ // effect is the evaluation of the postfix-expression before the dot or
+ // arrow.
+ EmitScalarExpr(E->getCallee());
+ }
+
+ return RValue::get(0);
+ }
+
+ llvm::Value *Callee = EmitScalarExpr(E->getCallee());
+ return EmitCall(E->getCallee()->getType(), Callee, ReturnValue,
+ E->arg_begin(), E->arg_end(), TargetDecl);
+}
+
+LValue CodeGenFunction::EmitBinaryOperatorLValue(const BinaryOperator *E) {
+ // Comma expressions just emit their LHS then their RHS as an l-value.
+ if (E->getOpcode() == BO_Comma) {
+ EmitIgnoredExpr(E->getLHS());
+ EnsureInsertPoint();
+ return EmitLValue(E->getRHS());
+ }
+
+ if (E->getOpcode() == BO_PtrMemD ||
+ E->getOpcode() == BO_PtrMemI)
+ return EmitPointerToDataMemberBinaryExpr(E);
+
+ assert(E->getOpcode() == BO_Assign && "unexpected binary l-value");
+
+ // Note that in all of these cases, __block variables need the RHS
+ // evaluated first just in case the variable gets moved by the RHS.
+
+ if (!hasAggregateLLVMType(E->getType())) {
+ switch (E->getLHS()->getType().getObjCLifetime()) {
+ case Qualifiers::OCL_Strong:
+ return EmitARCStoreStrong(E, /*ignored*/ false).first;
+
+ case Qualifiers::OCL_Autoreleasing:
+ return EmitARCStoreAutoreleasing(E).first;
+
+ // No reason to do any of these differently.
+ case Qualifiers::OCL_None:
+ case Qualifiers::OCL_ExplicitNone:
+ case Qualifiers::OCL_Weak:
+ break;
+ }
+
+ RValue RV = EmitAnyExpr(E->getRHS());
+ LValue LV = EmitLValue(E->getLHS());
+ EmitStoreThroughLValue(RV, LV);
+ return LV;
+ }
+
+ if (E->getType()->isAnyComplexType())
+ return EmitComplexAssignmentLValue(E);
+
+ return EmitAggExprToLValue(E);
+}
+
+LValue CodeGenFunction::EmitCallExprLValue(const CallExpr *E) {
+ RValue RV = EmitCallExpr(E);
+
+ if (!RV.isScalar())
+ return MakeAddrLValue(RV.getAggregateAddr(), E->getType());
+
+ assert(E->getCallReturnType()->isReferenceType() &&
+ "Can't have a scalar return unless the return type is a "
+ "reference type!");
+
+ return MakeAddrLValue(RV.getScalarVal(), E->getType());
+}
+
+LValue CodeGenFunction::EmitVAArgExprLValue(const VAArgExpr *E) {
+ // FIXME: This shouldn't require another copy.
+ return EmitAggExprToLValue(E);
+}
+
+LValue CodeGenFunction::EmitCXXConstructLValue(const CXXConstructExpr *E) {
+ assert(E->getType()->getAsCXXRecordDecl()->hasTrivialDestructor()
+ && "binding l-value to type which needs a temporary");
+ AggValueSlot Slot = CreateAggTemp(E->getType());
+ EmitCXXConstructExpr(E, Slot);
+ return MakeAddrLValue(Slot.getAddr(), E->getType());
+}
+
+LValue
+CodeGenFunction::EmitCXXTypeidLValue(const CXXTypeidExpr *E) {
+ return MakeAddrLValue(EmitCXXTypeidExpr(E), E->getType());
+}
+
+LValue
+CodeGenFunction::EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr *E) {
+ AggValueSlot Slot = CreateAggTemp(E->getType(), "temp.lvalue");
+ Slot.setExternallyDestructed();
+ EmitAggExpr(E->getSubExpr(), Slot);
+ EmitCXXTemporary(E->getTemporary(), E->getType(), Slot.getAddr());
+ return MakeAddrLValue(Slot.getAddr(), E->getType());
+}
+
+LValue
+CodeGenFunction::EmitLambdaLValue(const LambdaExpr *E) {
+ AggValueSlot Slot = CreateAggTemp(E->getType(), "temp.lvalue");
+ EmitLambdaExpr(E, Slot);
+ return MakeAddrLValue(Slot.getAddr(), E->getType());
+}
+
+LValue CodeGenFunction::EmitObjCMessageExprLValue(const ObjCMessageExpr *E) {
+ RValue RV = EmitObjCMessageExpr(E);
+
+ if (!RV.isScalar())
+ return MakeAddrLValue(RV.getAggregateAddr(), E->getType());
+
+ assert(E->getMethodDecl()->getResultType()->isReferenceType() &&
+ "Can't have a scalar return unless the return type is a "
+ "reference type!");
+
+ return MakeAddrLValue(RV.getScalarVal(), E->getType());
+}
+
+LValue CodeGenFunction::EmitObjCSelectorLValue(const ObjCSelectorExpr *E) {
+ llvm::Value *V =
+ CGM.getObjCRuntime().GetSelector(Builder, E->getSelector(), true);
+ return MakeAddrLValue(V, E->getType());
+}
+
+llvm::Value *CodeGenFunction::EmitIvarOffset(const ObjCInterfaceDecl *Interface,
+ const ObjCIvarDecl *Ivar) {
+ return CGM.getObjCRuntime().EmitIvarOffset(*this, Interface, Ivar);
+}
+
+LValue CodeGenFunction::EmitLValueForIvar(QualType ObjectTy,
+ llvm::Value *BaseValue,
+ const ObjCIvarDecl *Ivar,
+ unsigned CVRQualifiers) {
+ return CGM.getObjCRuntime().EmitObjCValueForIvar(*this, ObjectTy, BaseValue,
+ Ivar, CVRQualifiers);
+}
+
+LValue CodeGenFunction::EmitObjCIvarRefLValue(const ObjCIvarRefExpr *E) {
+ // FIXME: A lot of the code below could be shared with EmitMemberExpr.
+ llvm::Value *BaseValue = 0;
+ const Expr *BaseExpr = E->getBase();
+ Qualifiers BaseQuals;
+ QualType ObjectTy;
+ if (E->isArrow()) {
+ BaseValue = EmitScalarExpr(BaseExpr);
+ ObjectTy = BaseExpr->getType()->getPointeeType();
+ BaseQuals = ObjectTy.getQualifiers();
+ } else {
+ LValue BaseLV = EmitLValue(BaseExpr);
+ // FIXME: this isn't right for bitfields.
+ BaseValue = BaseLV.getAddress();
+ ObjectTy = BaseExpr->getType();
+ BaseQuals = ObjectTy.getQualifiers();
+ }
+
+ LValue LV =
+ EmitLValueForIvar(ObjectTy, BaseValue, E->getDecl(),
+ BaseQuals.getCVRQualifiers());
+ setObjCGCLValueClass(getContext(), E, LV);
+ return LV;
+}
+
+LValue CodeGenFunction::EmitStmtExprLValue(const StmtExpr *E) {
+ // Can only get l-value for message expression returning aggregate type
+ RValue RV = EmitAnyExprToTemp(E);
+ return MakeAddrLValue(RV.getAggregateAddr(), E->getType());
+}
+
+RValue CodeGenFunction::EmitCall(QualType CalleeType, llvm::Value *Callee,
+ ReturnValueSlot ReturnValue,
+ CallExpr::const_arg_iterator ArgBeg,
+ CallExpr::const_arg_iterator ArgEnd,
+ const Decl *TargetDecl) {
+ // Get the actual function type. The callee type will always be a pointer to
+ // function type or a block pointer type.
+ assert(CalleeType->isFunctionPointerType() &&
+ "Call must have function pointer type!");
+
+ CalleeType = getContext().getCanonicalType(CalleeType);
+
+ const FunctionType *FnType
+ = cast<FunctionType>(cast<PointerType>(CalleeType)->getPointeeType());
+
+ CallArgList Args;
+ EmitCallArgs(Args, dyn_cast<FunctionProtoType>(FnType), ArgBeg, ArgEnd);
+
+ const CGFunctionInfo &FnInfo =
+ CGM.getTypes().arrangeFunctionCall(Args, FnType);
+
+ // C99 6.5.2.2p6:
+ // If the expression that denotes the called function has a type
+ // that does not include a prototype, [the default argument
+ // promotions are performed]. If the number of arguments does not
+ // equal the number of parameters, the behavior is undefined. If
+ // the function is defined with a type that includes a prototype,
+ // and either the prototype ends with an ellipsis (, ...) or the
+ // types of the arguments after promotion are not compatible with
+ // the types of the parameters, the behavior is undefined. If the
+ // function is defined with a type that does not include a
+ // prototype, and the types of the arguments after promotion are
+ // not compatible with those of the parameters after promotion,
+ // the behavior is undefined [except in some trivial cases].
+ // That is, in the general case, we should assume that a call
+ // through an unprototyped function type works like a *non-variadic*
+ // call. The way we make this work is to cast to the exact type
+ // of the promoted arguments.
+ if (isa<FunctionNoProtoType>(FnType) && !FnInfo.isVariadic()) {
+ llvm::Type *CalleeTy = getTypes().GetFunctionType(FnInfo);
+ CalleeTy = CalleeTy->getPointerTo();
+ Callee = Builder.CreateBitCast(Callee, CalleeTy, "callee.knr.cast");
+ }
+
+ return EmitCall(FnInfo, Callee, ReturnValue, Args, TargetDecl);
+}
+
+LValue CodeGenFunction::
+EmitPointerToDataMemberBinaryExpr(const BinaryOperator *E) {
+ llvm::Value *BaseV;
+ if (E->getOpcode() == BO_PtrMemI)
+ BaseV = EmitScalarExpr(E->getLHS());
+ else
+ BaseV = EmitLValue(E->getLHS()).getAddress();
+
+ llvm::Value *OffsetV = EmitScalarExpr(E->getRHS());
+
+ const MemberPointerType *MPT
+ = E->getRHS()->getType()->getAs<MemberPointerType>();
+
+ llvm::Value *AddV =
+ CGM.getCXXABI().EmitMemberDataPointerAddress(*this, BaseV, OffsetV, MPT);
+
+ return MakeAddrLValue(AddV, MPT->getPointeeType());
+}
+
+static void
+EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, llvm::Value *Dest,
+ llvm::Value *Ptr, llvm::Value *Val1, llvm::Value *Val2,
+ uint64_t Size, unsigned Align, llvm::AtomicOrdering Order) {
+ llvm::AtomicRMWInst::BinOp Op = llvm::AtomicRMWInst::Add;
+ llvm::Instruction::BinaryOps PostOp = (llvm::Instruction::BinaryOps)0;
+
+ switch (E->getOp()) {
+ case AtomicExpr::AO__c11_atomic_init:
+ llvm_unreachable("Already handled!");
+
+ case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
+ case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
+ case AtomicExpr::AO__atomic_compare_exchange:
+ case AtomicExpr::AO__atomic_compare_exchange_n: {
+ // Note that cmpxchg only supports specifying one ordering and
+ // doesn't support weak cmpxchg, at least at the moment.
+ llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1);
+ LoadVal1->setAlignment(Align);
+ llvm::LoadInst *LoadVal2 = CGF.Builder.CreateLoad(Val2);
+ LoadVal2->setAlignment(Align);
+ llvm::AtomicCmpXchgInst *CXI =
+ CGF.Builder.CreateAtomicCmpXchg(Ptr, LoadVal1, LoadVal2, Order);
+ CXI->setVolatile(E->isVolatile());
+ llvm::StoreInst *StoreVal1 = CGF.Builder.CreateStore(CXI, Val1);
+ StoreVal1->setAlignment(Align);
+ llvm::Value *Cmp = CGF.Builder.CreateICmpEQ(CXI, LoadVal1);
+ CGF.EmitStoreOfScalar(Cmp, CGF.MakeAddrLValue(Dest, E->getType()));
+ return;
+ }
+
+ case AtomicExpr::AO__c11_atomic_load:
+ case AtomicExpr::AO__atomic_load_n:
+ case AtomicExpr::AO__atomic_load: {
+ llvm::LoadInst *Load = CGF.Builder.CreateLoad(Ptr);
+ Load->setAtomic(Order);
+ Load->setAlignment(Size);
+ Load->setVolatile(E->isVolatile());
+ llvm::StoreInst *StoreDest = CGF.Builder.CreateStore(Load, Dest);
+ StoreDest->setAlignment(Align);
+ return;
+ }
+
+ case AtomicExpr::AO__c11_atomic_store:
+ case AtomicExpr::AO__atomic_store:
+ case AtomicExpr::AO__atomic_store_n: {
+ assert(!Dest && "Store does not return a value");
+ llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1);
+ LoadVal1->setAlignment(Align);
+ llvm::StoreInst *Store = CGF.Builder.CreateStore(LoadVal1, Ptr);
+ Store->setAtomic(Order);
+ Store->setAlignment(Size);
+ Store->setVolatile(E->isVolatile());
+ return;
+ }
+
+ case AtomicExpr::AO__c11_atomic_exchange:
+ case AtomicExpr::AO__atomic_exchange_n:
+ case AtomicExpr::AO__atomic_exchange:
+ Op = llvm::AtomicRMWInst::Xchg;
+ break;
+
+ case AtomicExpr::AO__atomic_add_fetch:
+ PostOp = llvm::Instruction::Add;
+ // Fall through.
+ case AtomicExpr::AO__c11_atomic_fetch_add:
+ case AtomicExpr::AO__atomic_fetch_add:
+ Op = llvm::AtomicRMWInst::Add;
+ break;
+
+ case AtomicExpr::AO__atomic_sub_fetch:
+ PostOp = llvm::Instruction::Sub;
+ // Fall through.
+ case AtomicExpr::AO__c11_atomic_fetch_sub:
+ case AtomicExpr::AO__atomic_fetch_sub:
+ Op = llvm::AtomicRMWInst::Sub;
+ break;
+
+ case AtomicExpr::AO__atomic_and_fetch:
+ PostOp = llvm::Instruction::And;
+ // Fall through.
+ case AtomicExpr::AO__c11_atomic_fetch_and:
+ case AtomicExpr::AO__atomic_fetch_and:
+ Op = llvm::AtomicRMWInst::And;
+ break;
+
+ case AtomicExpr::AO__atomic_or_fetch:
+ PostOp = llvm::Instruction::Or;
+ // Fall through.
+ case AtomicExpr::AO__c11_atomic_fetch_or:
+ case AtomicExpr::AO__atomic_fetch_or:
+ Op = llvm::AtomicRMWInst::Or;
+ break;
+
+ case AtomicExpr::AO__atomic_xor_fetch:
+ PostOp = llvm::Instruction::Xor;
+ // Fall through.
+ case AtomicExpr::AO__c11_atomic_fetch_xor:
+ case AtomicExpr::AO__atomic_fetch_xor:
+ Op = llvm::AtomicRMWInst::Xor;
+ break;
+
+ case AtomicExpr::AO__atomic_nand_fetch:
+ PostOp = llvm::Instruction::And;
+ // Fall through.
+ case AtomicExpr::AO__atomic_fetch_nand:
+ Op = llvm::AtomicRMWInst::Nand;
+ break;
+ }
+
+ llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1);
+ LoadVal1->setAlignment(Align);
+ llvm::AtomicRMWInst *RMWI =
+ CGF.Builder.CreateAtomicRMW(Op, Ptr, LoadVal1, Order);
+ RMWI->setVolatile(E->isVolatile());
+
+ // For __atomic_*_fetch operations, perform the operation again to
+ // determine the value which was written.
+ llvm::Value *Result = RMWI;
+ if (PostOp)
+ Result = CGF.Builder.CreateBinOp(PostOp, RMWI, LoadVal1);
+ if (E->getOp() == AtomicExpr::AO__atomic_nand_fetch)
+ Result = CGF.Builder.CreateNot(Result);
+ llvm::StoreInst *StoreDest = CGF.Builder.CreateStore(Result, Dest);
+ StoreDest->setAlignment(Align);
+}
+
+// This function emits any expression (scalar, complex, or aggregate)
+// into a temporary alloca.
+static llvm::Value *
+EmitValToTemp(CodeGenFunction &CGF, Expr *E) {
+ llvm::Value *DeclPtr = CGF.CreateMemTemp(E->getType(), ".atomictmp");
+ CGF.EmitAnyExprToMem(E, DeclPtr, E->getType().getQualifiers(),
+ /*Init*/ true);
+ return DeclPtr;
+}
+
+static RValue ConvertTempToRValue(CodeGenFunction &CGF, QualType Ty,
+ llvm::Value *Dest) {
+ if (Ty->isAnyComplexType())
+ return RValue::getComplex(CGF.LoadComplexFromAddr(Dest, false));
+ if (CGF.hasAggregateLLVMType(Ty))
+ return RValue::getAggregate(Dest);
+ return RValue::get(CGF.EmitLoadOfScalar(CGF.MakeAddrLValue(Dest, Ty)));
+}
+
+RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
+ QualType AtomicTy = E->getPtr()->getType()->getPointeeType();
+ QualType MemTy = AtomicTy;
+ if (const AtomicType *AT = AtomicTy->getAs<AtomicType>())
+ MemTy = AT->getValueType();
+ CharUnits sizeChars = getContext().getTypeSizeInChars(AtomicTy);
+ uint64_t Size = sizeChars.getQuantity();
+ CharUnits alignChars = getContext().getTypeAlignInChars(AtomicTy);
+ unsigned Align = alignChars.getQuantity();
+ unsigned MaxInlineWidth =
+ getContext().getTargetInfo().getMaxAtomicInlineWidth();
+ bool UseLibcall = (Size != Align || Size > MaxInlineWidth);
+
+
+
+ llvm::Value *Ptr, *Order, *OrderFail = 0, *Val1 = 0, *Val2 = 0;
+ Ptr = EmitScalarExpr(E->getPtr());
+
+ if (E->getOp() == AtomicExpr::AO__c11_atomic_init) {
+ assert(!Dest && "Init does not return a value");
+ if (!hasAggregateLLVMType(E->getVal1()->getType())) {
+ QualType PointeeType
+ = E->getPtr()->getType()->getAs<PointerType>()->getPointeeType();
+ EmitScalarInit(EmitScalarExpr(E->getVal1()),
+ LValue::MakeAddr(Ptr, PointeeType, alignChars,
+ getContext()));
+ } else if (E->getType()->isAnyComplexType()) {
+ EmitComplexExprIntoAddr(E->getVal1(), Ptr, E->isVolatile());
+ } else {
+ AggValueSlot Slot = AggValueSlot::forAddr(Ptr, alignChars,
+ AtomicTy.getQualifiers(),
+ AggValueSlot::IsNotDestructed,
+ AggValueSlot::DoesNotNeedGCBarriers,
+ AggValueSlot::IsNotAliased);
+ EmitAggExpr(E->getVal1(), Slot);
+ }
+ return RValue::get(0);
+ }
+
+ Order = EmitScalarExpr(E->getOrder());
+
+ switch (E->getOp()) {
+ case AtomicExpr::AO__c11_atomic_init:
+ llvm_unreachable("Already handled!");
+
+ case AtomicExpr::AO__c11_atomic_load:
+ case AtomicExpr::AO__atomic_load_n:
+ break;
+
+ case AtomicExpr::AO__atomic_load:
+ Dest = EmitScalarExpr(E->getVal1());
+ break;
+
+ case AtomicExpr::AO__atomic_store:
+ Val1 = EmitScalarExpr(E->getVal1());
+ break;
+
+ case AtomicExpr::AO__atomic_exchange:
+ Val1 = EmitScalarExpr(E->getVal1());
+ Dest = EmitScalarExpr(E->getVal2());
+ break;
+
+ case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
+ case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
+ case AtomicExpr::AO__atomic_compare_exchange_n:
+ case AtomicExpr::AO__atomic_compare_exchange:
+ Val1 = EmitScalarExpr(E->getVal1());
+ if (E->getOp() == AtomicExpr::AO__atomic_compare_exchange)
+ Val2 = EmitScalarExpr(E->getVal2());
+ else
+ Val2 = EmitValToTemp(*this, E->getVal2());
+ OrderFail = EmitScalarExpr(E->getOrderFail());
+ // Evaluate and discard the 'weak' argument.
+ if (E->getNumSubExprs() == 6)
+ EmitScalarExpr(E->getWeak());
+ break;
+
+ case AtomicExpr::AO__c11_atomic_fetch_add:
+ case AtomicExpr::AO__c11_atomic_fetch_sub:
+ if (MemTy->isPointerType()) {
+ // For pointer arithmetic, we're required to do a bit of math:
+ // adding 1 to an int* is not the same as adding 1 to a uintptr_t.
+ // ... but only for the C11 builtins. The GNU builtins expect the
+ // user to multiply by sizeof(T).
+ QualType Val1Ty = E->getVal1()->getType();
+ llvm::Value *Val1Scalar = EmitScalarExpr(E->getVal1());
+ CharUnits PointeeIncAmt =
+ getContext().getTypeSizeInChars(MemTy->getPointeeType());
+ Val1Scalar = Builder.CreateMul(Val1Scalar, CGM.getSize(PointeeIncAmt));
+ Val1 = CreateMemTemp(Val1Ty, ".atomictmp");
+ EmitStoreOfScalar(Val1Scalar, MakeAddrLValue(Val1, Val1Ty));
+ break;
+ }
+ // Fall through.
+ case AtomicExpr::AO__atomic_fetch_add:
+ case AtomicExpr::AO__atomic_fetch_sub:
+ case AtomicExpr::AO__atomic_add_fetch:
+ case AtomicExpr::AO__atomic_sub_fetch:
+ case AtomicExpr::AO__c11_atomic_store:
+ case AtomicExpr::AO__c11_atomic_exchange:
+ case AtomicExpr::AO__atomic_store_n:
+ case AtomicExpr::AO__atomic_exchange_n:
+ case AtomicExpr::AO__c11_atomic_fetch_and:
+ case AtomicExpr::AO__c11_atomic_fetch_or:
+ case AtomicExpr::AO__c11_atomic_fetch_xor:
+ case AtomicExpr::AO__atomic_fetch_and:
+ case AtomicExpr::AO__atomic_fetch_or:
+ case AtomicExpr::AO__atomic_fetch_xor:
+ case AtomicExpr::AO__atomic_fetch_nand:
+ case AtomicExpr::AO__atomic_and_fetch:
+ case AtomicExpr::AO__atomic_or_fetch:
+ case AtomicExpr::AO__atomic_xor_fetch:
+ case AtomicExpr::AO__atomic_nand_fetch:
+ Val1 = EmitValToTemp(*this, E->getVal1());
+ break;
+ }
+
+ if (!E->getType()->isVoidType() && !Dest)
+ Dest = CreateMemTemp(E->getType(), ".atomicdst");
+
+ // Use a library call. See: http://gcc.gnu.org/wiki/Atomic/GCCMM/LIbrary .
+ if (UseLibcall) {
+
+ llvm::SmallVector<QualType, 5> Params;
+ CallArgList Args;
+ // Size is always the first parameter
+ Args.add(RValue::get(llvm::ConstantInt::get(SizeTy, Size)),
+ getContext().getSizeType());
+ // Atomic address is always the second parameter
+ Args.add(RValue::get(EmitCastToVoidPtr(Ptr)),
+ getContext().VoidPtrTy);
+
+ const char* LibCallName;
+ QualType RetTy = getContext().VoidTy;
+ switch (E->getOp()) {
+ // There is only one libcall for compare an exchange, because there is no
+ // optimisation benefit possible from a libcall version of a weak compare
+ // and exchange.
+ // bool __atomic_compare_exchange(size_t size, void *obj, void *expected,
+ // void *desired, int success, int failure)
+ case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
+ case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
+ case AtomicExpr::AO__atomic_compare_exchange:
+ case AtomicExpr::AO__atomic_compare_exchange_n:
+ LibCallName = "__atomic_compare_exchange";
+ RetTy = getContext().BoolTy;
+ Args.add(RValue::get(EmitCastToVoidPtr(Val1)),
+ getContext().VoidPtrTy);
+ Args.add(RValue::get(EmitCastToVoidPtr(Val2)),
+ getContext().VoidPtrTy);
+ Args.add(RValue::get(Order),
+ getContext().IntTy);
+ Order = OrderFail;
+ break;
+ // void __atomic_exchange(size_t size, void *mem, void *val, void *return,
+ // int order)
+ case AtomicExpr::AO__c11_atomic_exchange:
+ case AtomicExpr::AO__atomic_exchange_n:
+ case AtomicExpr::AO__atomic_exchange:
+ LibCallName = "__atomic_exchange";
+ Args.add(RValue::get(EmitCastToVoidPtr(Val1)),
+ getContext().VoidPtrTy);
+ Args.add(RValue::get(EmitCastToVoidPtr(Dest)),
+ getContext().VoidPtrTy);
+ break;
+ // void __atomic_store(size_t size, void *mem, void *val, int order)
+ case AtomicExpr::AO__c11_atomic_store:
+ case AtomicExpr::AO__atomic_store:
+ case AtomicExpr::AO__atomic_store_n:
+ LibCallName = "__atomic_store";
+ Args.add(RValue::get(EmitCastToVoidPtr(Val1)),
+ getContext().VoidPtrTy);
+ break;
+ // void __atomic_load(size_t size, void *mem, void *return, int order)
+ case AtomicExpr::AO__c11_atomic_load:
+ case AtomicExpr::AO__atomic_load:
+ case AtomicExpr::AO__atomic_load_n:
+ LibCallName = "__atomic_load";
+ Args.add(RValue::get(EmitCastToVoidPtr(Dest)),
+ getContext().VoidPtrTy);
+ break;
+#if 0
+ // These are only defined for 1-16 byte integers. It is not clear what
+ // their semantics would be on anything else...
+ case AtomicExpr::Add: LibCallName = "__atomic_fetch_add_generic"; break;
+ case AtomicExpr::Sub: LibCallName = "__atomic_fetch_sub_generic"; break;
+ case AtomicExpr::And: LibCallName = "__atomic_fetch_and_generic"; break;
+ case AtomicExpr::Or: LibCallName = "__atomic_fetch_or_generic"; break;
+ case AtomicExpr::Xor: LibCallName = "__atomic_fetch_xor_generic"; break;
+#endif
+ default: return EmitUnsupportedRValue(E, "atomic library call");
+ }
+ // order is always the last parameter
+ Args.add(RValue::get(Order),
+ getContext().IntTy);
+
+ const CGFunctionInfo &FuncInfo =
+ CGM.getTypes().arrangeFunctionCall(RetTy, Args,
+ FunctionType::ExtInfo(), RequiredArgs::All);
+ llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FuncInfo);
+ llvm::Constant *Func = CGM.CreateRuntimeFunction(FTy, LibCallName);
+ RValue Res = EmitCall(FuncInfo, Func, ReturnValueSlot(), Args);
+ if (E->isCmpXChg())
+ return Res;
+ if (E->getType()->isVoidType())
+ return RValue::get(0);
+ return ConvertTempToRValue(*this, E->getType(), Dest);
+ }
+
+ llvm::Type *IPtrTy =
+ llvm::IntegerType::get(getLLVMContext(), Size * 8)->getPointerTo();
+ llvm::Value *OrigDest = Dest;
+ Ptr = Builder.CreateBitCast(Ptr, IPtrTy);
+ if (Val1) Val1 = Builder.CreateBitCast(Val1, IPtrTy);
+ if (Val2) Val2 = Builder.CreateBitCast(Val2, IPtrTy);
+ if (Dest && !E->isCmpXChg()) Dest = Builder.CreateBitCast(Dest, IPtrTy);
+
+ if (isa<llvm::ConstantInt>(Order)) {
+ int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
+ switch (ord) {
+ case 0: // memory_order_relaxed
+ EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
+ llvm::Monotonic);
+ break;
+ case 1: // memory_order_consume
+ case 2: // memory_order_acquire
+ EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
+ llvm::Acquire);
+ break;
+ case 3: // memory_order_release
+ EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
+ llvm::Release);
+ break;
+ case 4: // memory_order_acq_rel
+ EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
+ llvm::AcquireRelease);
+ break;
+ case 5: // memory_order_seq_cst
+ EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
+ llvm::SequentiallyConsistent);
+ break;
+ default: // invalid order
+ // We should not ever get here normally, but it's hard to
+ // enforce that in general.
+ break;
+ }
+ if (E->getType()->isVoidType())
+ return RValue::get(0);
+ return ConvertTempToRValue(*this, E->getType(), OrigDest);
+ }
+
+ // Long case, when Order isn't obviously constant.
+
+ bool IsStore = E->getOp() == AtomicExpr::AO__c11_atomic_store ||
+ E->getOp() == AtomicExpr::AO__atomic_store ||
+ E->getOp() == AtomicExpr::AO__atomic_store_n;
+ bool IsLoad = E->getOp() == AtomicExpr::AO__c11_atomic_load ||
+ E->getOp() == AtomicExpr::AO__atomic_load ||
+ E->getOp() == AtomicExpr::AO__atomic_load_n;
+
+ // Create all the relevant BB's
+ llvm::BasicBlock *MonotonicBB = 0, *AcquireBB = 0, *ReleaseBB = 0,
+ *AcqRelBB = 0, *SeqCstBB = 0;
+ MonotonicBB = createBasicBlock("monotonic", CurFn);
+ if (!IsStore)
+ AcquireBB = createBasicBlock("acquire", CurFn);
+ if (!IsLoad)
+ ReleaseBB = createBasicBlock("release", CurFn);
+ if (!IsLoad && !IsStore)
+ AcqRelBB = createBasicBlock("acqrel", CurFn);
+ SeqCstBB = createBasicBlock("seqcst", CurFn);
+ llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
+
+ // Create the switch for the split
+ // MonotonicBB is arbitrarily chosen as the default case; in practice, this
+ // doesn't matter unless someone is crazy enough to use something that
+ // doesn't fold to a constant for the ordering.
+ Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
+ llvm::SwitchInst *SI = Builder.CreateSwitch(Order, MonotonicBB);
+
+ // Emit all the different atomics
+ Builder.SetInsertPoint(MonotonicBB);
+ EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
+ llvm::Monotonic);
+ Builder.CreateBr(ContBB);
+ if (!IsStore) {
+ Builder.SetInsertPoint(AcquireBB);
+ EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
+ llvm::Acquire);
+ Builder.CreateBr(ContBB);
+ SI->addCase(Builder.getInt32(1), AcquireBB);
+ SI->addCase(Builder.getInt32(2), AcquireBB);
+ }
+ if (!IsLoad) {
+ Builder.SetInsertPoint(ReleaseBB);
+ EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
+ llvm::Release);
+ Builder.CreateBr(ContBB);
+ SI->addCase(Builder.getInt32(3), ReleaseBB);
+ }
+ if (!IsLoad && !IsStore) {
+ Builder.SetInsertPoint(AcqRelBB);
+ EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
+ llvm::AcquireRelease);
+ Builder.CreateBr(ContBB);
+ SI->addCase(Builder.getInt32(4), AcqRelBB);
+ }
+ Builder.SetInsertPoint(SeqCstBB);
+ EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
+ llvm::SequentiallyConsistent);
+ Builder.CreateBr(ContBB);
+ SI->addCase(Builder.getInt32(5), SeqCstBB);
+
+ // Cleanup and return
+ Builder.SetInsertPoint(ContBB);
+ if (E->getType()->isVoidType())
+ return RValue::get(0);
+ return ConvertTempToRValue(*this, E->getType(), OrigDest);
+}
+
+void CodeGenFunction::SetFPAccuracy(llvm::Value *Val, float Accuracy) {
+ assert(Val->getType()->isFPOrFPVectorTy());
+ if (Accuracy == 0.0 || !isa<llvm::Instruction>(Val))
+ return;
+
+ llvm::MDBuilder MDHelper(getLLVMContext());
+ llvm::MDNode *Node = MDHelper.createFPMath(Accuracy);
+
+ cast<llvm::Instruction>(Val)->setMetadata(llvm::LLVMContext::MD_fpmath, Node);
+}
+
+namespace {
+ struct LValueOrRValue {
+ LValue LV;
+ RValue RV;
+ };
+}
+
+static LValueOrRValue emitPseudoObjectExpr(CodeGenFunction &CGF,
+ const PseudoObjectExpr *E,
+ bool forLValue,
+ AggValueSlot slot) {
+ llvm::SmallVector<CodeGenFunction::OpaqueValueMappingData, 4> opaques;
+
+ // Find the result expression, if any.
+ const Expr *resultExpr = E->getResultExpr();
+ LValueOrRValue result;
+
+ for (PseudoObjectExpr::const_semantics_iterator
+ i = E->semantics_begin(), e = E->semantics_end(); i != e; ++i) {
+ const Expr *semantic = *i;
+
+ // If this semantic expression is an opaque value, bind it
+ // to the result of its source expression.
+ if (const OpaqueValueExpr *ov = dyn_cast<OpaqueValueExpr>(semantic)) {
+
+ // If this is the result expression, we may need to evaluate
+ // directly into the slot.
+ typedef CodeGenFunction::OpaqueValueMappingData OVMA;
+ OVMA opaqueData;
+ if (ov == resultExpr && ov->isRValue() && !forLValue &&
+ CodeGenFunction::hasAggregateLLVMType(ov->getType()) &&
+ !ov->getType()->isAnyComplexType()) {
+ CGF.EmitAggExpr(ov->getSourceExpr(), slot);
+
+ LValue LV = CGF.MakeAddrLValue(slot.getAddr(), ov->getType());
+ opaqueData = OVMA::bind(CGF, ov, LV);
+ result.RV = slot.asRValue();
+
+ // Otherwise, emit as normal.
+ } else {
+ opaqueData = OVMA::bind(CGF, ov, ov->getSourceExpr());
+
+ // If this is the result, also evaluate the result now.
+ if (ov == resultExpr) {
+ if (forLValue)
+ result.LV = CGF.EmitLValue(ov);
+ else
+ result.RV = CGF.EmitAnyExpr(ov, slot);
+ }
+ }
+
+ opaques.push_back(opaqueData);
+
+ // Otherwise, if the expression is the result, evaluate it
+ // and remember the result.
+ } else if (semantic == resultExpr) {
+ if (forLValue)
+ result.LV = CGF.EmitLValue(semantic);
+ else
+ result.RV = CGF.EmitAnyExpr(semantic, slot);
+
+ // Otherwise, evaluate the expression in an ignored context.
+ } else {
+ CGF.EmitIgnoredExpr(semantic);
+ }
+ }
+
+ // Unbind all the opaques now.
+ for (unsigned i = 0, e = opaques.size(); i != e; ++i)
+ opaques[i].unbind(CGF);
+
+ return result;
+}
+
+RValue CodeGenFunction::EmitPseudoObjectRValue(const PseudoObjectExpr *E,
+ AggValueSlot slot) {
+ return emitPseudoObjectExpr(*this, E, false, slot).RV;
+}
+
+LValue CodeGenFunction::EmitPseudoObjectLValue(const PseudoObjectExpr *E) {
+ return emitPseudoObjectExpr(*this, E, true, AggValueSlot::ignored()).LV;
+}
diff --git a/clang/lib/CodeGen/CGExprAgg.cpp b/clang/lib/CodeGen/CGExprAgg.cpp
new file mode 100644
index 0000000..7b0e0f5
--- /dev/null
+++ b/clang/lib/CodeGen/CGExprAgg.cpp
@@ -0,0 +1,1343 @@
+//===--- CGExprAgg.cpp - Emit LLVM Code from Aggregate Expressions --------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code to emit Aggregate Expr nodes as LLVM code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenFunction.h"
+#include "CodeGenModule.h"
+#include "CGObjCRuntime.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/StmtVisitor.h"
+#include "llvm/Constants.h"
+#include "llvm/Function.h"
+#include "llvm/GlobalVariable.h"
+#include "llvm/Intrinsics.h"
+using namespace clang;
+using namespace CodeGen;
+
+//===----------------------------------------------------------------------===//
+// Aggregate Expression Emitter
+//===----------------------------------------------------------------------===//
+
+namespace {
+class AggExprEmitter : public StmtVisitor<AggExprEmitter> {
+ CodeGenFunction &CGF;
+ CGBuilderTy &Builder;
+ AggValueSlot Dest;
+ bool IgnoreResult;
+
+ /// We want to use 'dest' as the return slot except under two
+ /// conditions:
+ /// - The destination slot requires garbage collection, so we
+ /// need to use the GC API.
+ /// - The destination slot is potentially aliased.
+ bool shouldUseDestForReturnSlot() const {
+ return !(Dest.requiresGCollection() || Dest.isPotentiallyAliased());
+ }
+
+ ReturnValueSlot getReturnValueSlot() const {
+ if (!shouldUseDestForReturnSlot())
+ return ReturnValueSlot();
+
+ return ReturnValueSlot(Dest.getAddr(), Dest.isVolatile());
+ }
+
+ AggValueSlot EnsureSlot(QualType T) {
+ if (!Dest.isIgnored()) return Dest;
+ return CGF.CreateAggTemp(T, "agg.tmp.ensured");
+ }
+
+public:
+ AggExprEmitter(CodeGenFunction &cgf, AggValueSlot Dest,
+ bool ignore)
+ : CGF(cgf), Builder(CGF.Builder), Dest(Dest),
+ IgnoreResult(ignore) {
+ }
+
+ //===--------------------------------------------------------------------===//
+ // Utilities
+ //===--------------------------------------------------------------------===//
+
+ /// EmitAggLoadOfLValue - Given an expression with aggregate type that
+ /// represents a value lvalue, this method emits the address of the lvalue,
+ /// then loads the result into DestPtr.
+ void EmitAggLoadOfLValue(const Expr *E);
+
+ /// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
+ void EmitFinalDestCopy(const Expr *E, LValue Src, bool Ignore = false);
+ void EmitFinalDestCopy(const Expr *E, RValue Src, bool Ignore = false,
+ unsigned Alignment = 0);
+
+ void EmitMoveFromReturnSlot(const Expr *E, RValue Src);
+
+ void EmitStdInitializerList(llvm::Value *DestPtr, InitListExpr *InitList);
+ void EmitArrayInit(llvm::Value *DestPtr, llvm::ArrayType *AType,
+ QualType elementType, InitListExpr *E);
+
+ AggValueSlot::NeedsGCBarriers_t needsGC(QualType T) {
+ if (CGF.getLangOpts().getGC() && TypeRequiresGCollection(T))
+ return AggValueSlot::NeedsGCBarriers;
+ return AggValueSlot::DoesNotNeedGCBarriers;
+ }
+
+ bool TypeRequiresGCollection(QualType T);
+
+ //===--------------------------------------------------------------------===//
+ // Visitor Methods
+ //===--------------------------------------------------------------------===//
+
+ void VisitStmt(Stmt *S) {
+ CGF.ErrorUnsupported(S, "aggregate expression");
+ }
+ void VisitParenExpr(ParenExpr *PE) { Visit(PE->getSubExpr()); }
+ void VisitGenericSelectionExpr(GenericSelectionExpr *GE) {
+ Visit(GE->getResultExpr());
+ }
+ void VisitUnaryExtension(UnaryOperator *E) { Visit(E->getSubExpr()); }
+ void VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *E) {
+ return Visit(E->getReplacement());
+ }
+
+ // l-values.
+ void VisitDeclRefExpr(DeclRefExpr *E) {
+ // For aggregates, we should always be able to emit the variable
+ // as an l-value unless it's a reference. This is due to the fact
+ // that we can't actually ever see a normal l2r conversion on an
+ // aggregate in C++, and in C there's no language standard
+ // actively preventing us from listing variables in the captures
+ // list of a block.
+ if (E->getDecl()->getType()->isReferenceType()) {
+ if (CodeGenFunction::ConstantEmission result
+ = CGF.tryEmitAsConstant(E)) {
+ EmitFinalDestCopy(E, result.getReferenceLValue(CGF, E));
+ return;
+ }
+ }
+
+ EmitAggLoadOfLValue(E);
+ }
+
+ void VisitMemberExpr(MemberExpr *ME) { EmitAggLoadOfLValue(ME); }
+ void VisitUnaryDeref(UnaryOperator *E) { EmitAggLoadOfLValue(E); }
+ void VisitStringLiteral(StringLiteral *E) { EmitAggLoadOfLValue(E); }
+ void VisitCompoundLiteralExpr(CompoundLiteralExpr *E);
+ void VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
+ EmitAggLoadOfLValue(E);
+ }
+ void VisitPredefinedExpr(const PredefinedExpr *E) {
+ EmitAggLoadOfLValue(E);
+ }
+
+ // Operators.
+ void VisitCastExpr(CastExpr *E);
+ void VisitCallExpr(const CallExpr *E);
+ void VisitStmtExpr(const StmtExpr *E);
+ void VisitBinaryOperator(const BinaryOperator *BO);
+ void VisitPointerToDataMemberBinaryOperator(const BinaryOperator *BO);
+ void VisitBinAssign(const BinaryOperator *E);
+ void VisitBinComma(const BinaryOperator *E);
+
+ void VisitObjCMessageExpr(ObjCMessageExpr *E);
+ void VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) {
+ EmitAggLoadOfLValue(E);
+ }
+
+ void VisitAbstractConditionalOperator(const AbstractConditionalOperator *CO);
+ void VisitChooseExpr(const ChooseExpr *CE);
+ void VisitInitListExpr(InitListExpr *E);
+ void VisitImplicitValueInitExpr(ImplicitValueInitExpr *E);
+ void VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
+ Visit(DAE->getExpr());
+ }
+ void VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E);
+ void VisitCXXConstructExpr(const CXXConstructExpr *E);
+ void VisitLambdaExpr(LambdaExpr *E);
+ void VisitExprWithCleanups(ExprWithCleanups *E);
+ void VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E);
+ void VisitCXXTypeidExpr(CXXTypeidExpr *E) { EmitAggLoadOfLValue(E); }
+ void VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E);
+ void VisitOpaqueValueExpr(OpaqueValueExpr *E);
+
+ void VisitPseudoObjectExpr(PseudoObjectExpr *E) {
+ if (E->isGLValue()) {
+ LValue LV = CGF.EmitPseudoObjectLValue(E);
+ return EmitFinalDestCopy(E, LV);
+ }
+
+ CGF.EmitPseudoObjectRValue(E, EnsureSlot(E->getType()));
+ }
+
+ void VisitVAArgExpr(VAArgExpr *E);
+
+ void EmitInitializationToLValue(Expr *E, LValue Address);
+ void EmitNullInitializationToLValue(LValue Address);
+ // case Expr::ChooseExprClass:
+ void VisitCXXThrowExpr(const CXXThrowExpr *E) { CGF.EmitCXXThrowExpr(E); }
+ void VisitAtomicExpr(AtomicExpr *E) {
+ CGF.EmitAtomicExpr(E, EnsureSlot(E->getType()).getAddr());
+ }
+};
+} // end anonymous namespace.
+
+//===----------------------------------------------------------------------===//
+// Utilities
+//===----------------------------------------------------------------------===//
+
+/// EmitAggLoadOfLValue - Given an expression with aggregate type that
+/// represents a value lvalue, this method emits the address of the lvalue,
+/// then loads the result into DestPtr.
+void AggExprEmitter::EmitAggLoadOfLValue(const Expr *E) {
+ LValue LV = CGF.EmitLValue(E);
+ EmitFinalDestCopy(E, LV);
+}
+
+/// \brief True if the given aggregate type requires special GC API calls.
+bool AggExprEmitter::TypeRequiresGCollection(QualType T) {
+ // Only record types have members that might require garbage collection.
+ const RecordType *RecordTy = T->getAs<RecordType>();
+ if (!RecordTy) return false;
+
+ // Don't mess with non-trivial C++ types.
+ RecordDecl *Record = RecordTy->getDecl();
+ if (isa<CXXRecordDecl>(Record) &&
+ (!cast<CXXRecordDecl>(Record)->hasTrivialCopyConstructor() ||
+ !cast<CXXRecordDecl>(Record)->hasTrivialDestructor()))
+ return false;
+
+ // Check whether the type has an object member.
+ return Record->hasObjectMember();
+}
+
+/// \brief Perform the final move to DestPtr if for some reason
+/// getReturnValueSlot() didn't use it directly.
+///
+/// The idea is that you do something like this:
+/// RValue Result = EmitSomething(..., getReturnValueSlot());
+/// EmitMoveFromReturnSlot(E, Result);
+///
+/// If nothing interferes, this will cause the result to be emitted
+/// directly into the return value slot. Otherwise, a final move
+/// will be performed.
+void AggExprEmitter::EmitMoveFromReturnSlot(const Expr *E, RValue Src) {
+ if (shouldUseDestForReturnSlot()) {
+ // Logically, Dest.getAddr() should equal Src.getAggregateAddr().
+ // The possibility of undef rvalues complicates that a lot,
+ // though, so we can't really assert.
+ return;
+ }
+
+ // Otherwise, do a final copy,
+ assert(Dest.getAddr() != Src.getAggregateAddr());
+ std::pair<CharUnits, CharUnits> TypeInfo =
+ CGF.getContext().getTypeInfoInChars(E->getType());
+ CharUnits Alignment = std::min(TypeInfo.second, Dest.getAlignment());
+ EmitFinalDestCopy(E, Src, /*Ignore*/ true, Alignment.getQuantity());
+}
+
+/// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
+void AggExprEmitter::EmitFinalDestCopy(const Expr *E, RValue Src, bool Ignore,
+ unsigned Alignment) {
+ assert(Src.isAggregate() && "value must be aggregate value!");
+
+ // If Dest is ignored, then we're evaluating an aggregate expression
+ // in a context (like an expression statement) that doesn't care
+ // about the result. C says that an lvalue-to-rvalue conversion is
+ // performed in these cases; C++ says that it is not. In either
+ // case, we don't actually need to do anything unless the value is
+ // volatile.
+ if (Dest.isIgnored()) {
+ if (!Src.isVolatileQualified() ||
+ CGF.CGM.getLangOpts().CPlusPlus ||
+ (IgnoreResult && Ignore))
+ return;
+
+ // If the source is volatile, we must read from it; to do that, we need
+ // some place to put it.
+ Dest = CGF.CreateAggTemp(E->getType(), "agg.tmp");
+ }
+
+ if (Dest.requiresGCollection()) {
+ CharUnits size = CGF.getContext().getTypeSizeInChars(E->getType());
+ llvm::Type *SizeTy = CGF.ConvertType(CGF.getContext().getSizeType());
+ llvm::Value *SizeVal = llvm::ConstantInt::get(SizeTy, size.getQuantity());
+ CGF.CGM.getObjCRuntime().EmitGCMemmoveCollectable(CGF,
+ Dest.getAddr(),
+ Src.getAggregateAddr(),
+ SizeVal);
+ return;
+ }
+ // If the result of the assignment is used, copy the LHS there also.
+ // FIXME: Pass VolatileDest as well. I think we also need to merge volatile
+ // from the source as well, as we can't eliminate it if either operand
+ // is volatile, unless copy has volatile for both source and destination..
+ CGF.EmitAggregateCopy(Dest.getAddr(), Src.getAggregateAddr(), E->getType(),
+ Dest.isVolatile()|Src.isVolatileQualified(),
+ Alignment);
+}
+
+/// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
+void AggExprEmitter::EmitFinalDestCopy(const Expr *E, LValue Src, bool Ignore) {
+ assert(Src.isSimple() && "Can't have aggregate bitfield, vector, etc");
+
+ CharUnits Alignment = std::min(Src.getAlignment(), Dest.getAlignment());
+ EmitFinalDestCopy(E, Src.asAggregateRValue(), Ignore, Alignment.getQuantity());
+}
+
+static QualType GetStdInitializerListElementType(QualType T) {
+ // Just assume that this is really std::initializer_list.
+ ClassTemplateSpecializationDecl *specialization =
+ cast<ClassTemplateSpecializationDecl>(T->castAs<RecordType>()->getDecl());
+ return specialization->getTemplateArgs()[0].getAsType();
+}
+
+/// \brief Prepare cleanup for the temporary array.
+static void EmitStdInitializerListCleanup(CodeGenFunction &CGF,
+ QualType arrayType,
+ llvm::Value *addr,
+ const InitListExpr *initList) {
+ QualType::DestructionKind dtorKind = arrayType.isDestructedType();
+ if (!dtorKind)
+ return; // Type doesn't need destroying.
+ if (dtorKind != QualType::DK_cxx_destructor) {
+ CGF.ErrorUnsupported(initList, "ObjC ARC type in initializer_list");
+ return;
+ }
+
+ CodeGenFunction::Destroyer *destroyer = CGF.getDestroyer(dtorKind);
+ CGF.pushDestroy(NormalAndEHCleanup, addr, arrayType, destroyer,
+ /*EHCleanup=*/true);
+}
+
+/// \brief Emit the initializer for a std::initializer_list initialized with a
+/// real initializer list.
+void AggExprEmitter::EmitStdInitializerList(llvm::Value *destPtr,
+ InitListExpr *initList) {
+ // We emit an array containing the elements, then have the init list point
+ // at the array.
+ ASTContext &ctx = CGF.getContext();
+ unsigned numInits = initList->getNumInits();
+ QualType element = GetStdInitializerListElementType(initList->getType());
+ llvm::APInt size(ctx.getTypeSize(ctx.getSizeType()), numInits);
+ QualType array = ctx.getConstantArrayType(element, size, ArrayType::Normal,0);
+ llvm::Type *LTy = CGF.ConvertTypeForMem(array);
+ llvm::AllocaInst *alloc = CGF.CreateTempAlloca(LTy);
+ alloc->setAlignment(ctx.getTypeAlignInChars(array).getQuantity());
+ alloc->setName(".initlist.");
+
+ EmitArrayInit(alloc, cast<llvm::ArrayType>(LTy), element, initList);
+
+ // FIXME: The diagnostics are somewhat out of place here.
+ RecordDecl *record = initList->getType()->castAs<RecordType>()->getDecl();
+ RecordDecl::field_iterator field = record->field_begin();
+ if (field == record->field_end()) {
+ CGF.ErrorUnsupported(initList, "weird std::initializer_list");
+ return;
+ }
+
+ QualType elementPtr = ctx.getPointerType(element.withConst());
+
+ // Start pointer.
+ if (!ctx.hasSameType(field->getType(), elementPtr)) {
+ CGF.ErrorUnsupported(initList, "weird std::initializer_list");
+ return;
+ }
+ LValue DestLV = CGF.MakeNaturalAlignAddrLValue(destPtr, initList->getType());
+ LValue start = CGF.EmitLValueForFieldInitialization(DestLV, *field);
+ llvm::Value *arrayStart = Builder.CreateStructGEP(alloc, 0, "arraystart");
+ CGF.EmitStoreThroughLValue(RValue::get(arrayStart), start);
+ ++field;
+
+ if (field == record->field_end()) {
+ CGF.ErrorUnsupported(initList, "weird std::initializer_list");
+ return;
+ }
+ LValue endOrLength = CGF.EmitLValueForFieldInitialization(DestLV, *field);
+ if (ctx.hasSameType(field->getType(), elementPtr)) {
+ // End pointer.
+ llvm::Value *arrayEnd = Builder.CreateStructGEP(alloc,numInits, "arrayend");
+ CGF.EmitStoreThroughLValue(RValue::get(arrayEnd), endOrLength);
+ } else if(ctx.hasSameType(field->getType(), ctx.getSizeType())) {
+ // Length.
+ CGF.EmitStoreThroughLValue(RValue::get(Builder.getInt(size)), endOrLength);
+ } else {
+ CGF.ErrorUnsupported(initList, "weird std::initializer_list");
+ return;
+ }
+
+ if (!Dest.isExternallyDestructed())
+ EmitStdInitializerListCleanup(CGF, array, alloc, initList);
+}
+
+/// \brief Emit initialization of an array from an initializer list.
+void AggExprEmitter::EmitArrayInit(llvm::Value *DestPtr, llvm::ArrayType *AType,
+ QualType elementType, InitListExpr *E) {
+ uint64_t NumInitElements = E->getNumInits();
+
+ uint64_t NumArrayElements = AType->getNumElements();
+ assert(NumInitElements <= NumArrayElements);
+
+ // DestPtr is an array*. Construct an elementType* by drilling
+ // down a level.
+ llvm::Value *zero = llvm::ConstantInt::get(CGF.SizeTy, 0);
+ llvm::Value *indices[] = { zero, zero };
+ llvm::Value *begin =
+ Builder.CreateInBoundsGEP(DestPtr, indices, "arrayinit.begin");
+
+ // Exception safety requires us to destroy all the
+ // already-constructed members if an initializer throws.
+ // For that, we'll need an EH cleanup.
+ QualType::DestructionKind dtorKind = elementType.isDestructedType();
+ llvm::AllocaInst *endOfInit = 0;
+ EHScopeStack::stable_iterator cleanup;
+ llvm::Instruction *cleanupDominator = 0;
+ if (CGF.needsEHCleanup(dtorKind)) {
+ // In principle we could tell the cleanup where we are more
+ // directly, but the control flow can get so varied here that it
+ // would actually be quite complex. Therefore we go through an
+ // alloca.
+ endOfInit = CGF.CreateTempAlloca(begin->getType(),
+ "arrayinit.endOfInit");
+ cleanupDominator = Builder.CreateStore(begin, endOfInit);
+ CGF.pushIrregularPartialArrayCleanup(begin, endOfInit, elementType,
+ CGF.getDestroyer(dtorKind));
+ cleanup = CGF.EHStack.stable_begin();
+
+ // Otherwise, remember that we didn't need a cleanup.
+ } else {
+ dtorKind = QualType::DK_none;
+ }
+
+ llvm::Value *one = llvm::ConstantInt::get(CGF.SizeTy, 1);
+
+ // The 'current element to initialize'. The invariants on this
+ // variable are complicated. Essentially, after each iteration of
+ // the loop, it points to the last initialized element, except
+ // that it points to the beginning of the array before any
+ // elements have been initialized.
+ llvm::Value *element = begin;
+
+ // Emit the explicit initializers.
+ for (uint64_t i = 0; i != NumInitElements; ++i) {
+ // Advance to the next element.
+ if (i > 0) {
+ element = Builder.CreateInBoundsGEP(element, one, "arrayinit.element");
+
+ // Tell the cleanup that it needs to destroy up to this
+ // element. TODO: some of these stores can be trivially
+ // observed to be unnecessary.
+ if (endOfInit) Builder.CreateStore(element, endOfInit);
+ }
+
+ // If these are nested std::initializer_list inits, do them directly,
+ // because they are conceptually the same "location".
+ InitListExpr *initList = dyn_cast<InitListExpr>(E->getInit(i));
+ if (initList && initList->initializesStdInitializerList()) {
+ EmitStdInitializerList(element, initList);
+ } else {
+ LValue elementLV = CGF.MakeAddrLValue(element, elementType);
+ EmitInitializationToLValue(E->getInit(i), elementLV);
+ }
+ }
+
+ // Check whether there's a non-trivial array-fill expression.
+ // Note that this will be a CXXConstructExpr even if the element
+ // type is an array (or array of array, etc.) of class type.
+ Expr *filler = E->getArrayFiller();
+ bool hasTrivialFiller = true;
+ if (CXXConstructExpr *cons = dyn_cast_or_null<CXXConstructExpr>(filler)) {
+ assert(cons->getConstructor()->isDefaultConstructor());
+ hasTrivialFiller = cons->getConstructor()->isTrivial();
+ }
+
+ // Any remaining elements need to be zero-initialized, possibly
+ // using the filler expression. We can skip this if the we're
+ // emitting to zeroed memory.
+ if (NumInitElements != NumArrayElements &&
+ !(Dest.isZeroed() && hasTrivialFiller &&
+ CGF.getTypes().isZeroInitializable(elementType))) {
+
+ // Use an actual loop. This is basically
+ // do { *array++ = filler; } while (array != end);
+
+ // Advance to the start of the rest of the array.
+ if (NumInitElements) {
+ element = Builder.CreateInBoundsGEP(element, one, "arrayinit.start");
+ if (endOfInit) Builder.CreateStore(element, endOfInit);
+ }
+
+ // Compute the end of the array.
+ llvm::Value *end = Builder.CreateInBoundsGEP(begin,
+ llvm::ConstantInt::get(CGF.SizeTy, NumArrayElements),
+ "arrayinit.end");
+
+ llvm::BasicBlock *entryBB = Builder.GetInsertBlock();
+ llvm::BasicBlock *bodyBB = CGF.createBasicBlock("arrayinit.body");
+
+ // Jump into the body.
+ CGF.EmitBlock(bodyBB);
+ llvm::PHINode *currentElement =
+ Builder.CreatePHI(element->getType(), 2, "arrayinit.cur");
+ currentElement->addIncoming(element, entryBB);
+
+ // Emit the actual filler expression.
+ LValue elementLV = CGF.MakeAddrLValue(currentElement, elementType);
+ if (filler)
+ EmitInitializationToLValue(filler, elementLV);
+ else
+ EmitNullInitializationToLValue(elementLV);
+
+ // Move on to the next element.
+ llvm::Value *nextElement =
+ Builder.CreateInBoundsGEP(currentElement, one, "arrayinit.next");
+
+ // Tell the EH cleanup that we finished with the last element.
+ if (endOfInit) Builder.CreateStore(nextElement, endOfInit);
+
+ // Leave the loop if we're done.
+ llvm::Value *done = Builder.CreateICmpEQ(nextElement, end,
+ "arrayinit.done");
+ llvm::BasicBlock *endBB = CGF.createBasicBlock("arrayinit.end");
+ Builder.CreateCondBr(done, endBB, bodyBB);
+ currentElement->addIncoming(nextElement, Builder.GetInsertBlock());
+
+ CGF.EmitBlock(endBB);
+ }
+
+ // Leave the partial-array cleanup if we entered one.
+ if (dtorKind) CGF.DeactivateCleanupBlock(cleanup, cleanupDominator);
+}
+
+//===----------------------------------------------------------------------===//
+// Visitor Methods
+//===----------------------------------------------------------------------===//
+
+void AggExprEmitter::VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E){
+ Visit(E->GetTemporaryExpr());
+}
+
+void AggExprEmitter::VisitOpaqueValueExpr(OpaqueValueExpr *e) {
+ EmitFinalDestCopy(e, CGF.getOpaqueLValueMapping(e));
+}
+
+void
+AggExprEmitter::VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
+ if (E->getType().isPODType(CGF.getContext())) {
+ // For a POD type, just emit a load of the lvalue + a copy, because our
+ // compound literal might alias the destination.
+ // FIXME: This is a band-aid; the real problem appears to be in our handling
+ // of assignments, where we store directly into the LHS without checking
+ // whether anything in the RHS aliases.
+ EmitAggLoadOfLValue(E);
+ return;
+ }
+
+ AggValueSlot Slot = EnsureSlot(E->getType());
+ CGF.EmitAggExpr(E->getInitializer(), Slot);
+}
+
+
+void AggExprEmitter::VisitCastExpr(CastExpr *E) {
+ switch (E->getCastKind()) {
+ case CK_Dynamic: {
+ assert(isa<CXXDynamicCastExpr>(E) && "CK_Dynamic without a dynamic_cast?");
+ LValue LV = CGF.EmitCheckedLValue(E->getSubExpr());
+ // FIXME: Do we also need to handle property references here?
+ if (LV.isSimple())
+ CGF.EmitDynamicCast(LV.getAddress(), cast<CXXDynamicCastExpr>(E));
+ else
+ CGF.CGM.ErrorUnsupported(E, "non-simple lvalue dynamic_cast");
+
+ if (!Dest.isIgnored())
+ CGF.CGM.ErrorUnsupported(E, "lvalue dynamic_cast with a destination");
+ break;
+ }
+
+ case CK_ToUnion: {
+ if (Dest.isIgnored()) break;
+
+ // GCC union extension
+ QualType Ty = E->getSubExpr()->getType();
+ QualType PtrTy = CGF.getContext().getPointerType(Ty);
+ llvm::Value *CastPtr = Builder.CreateBitCast(Dest.getAddr(),
+ CGF.ConvertType(PtrTy));
+ EmitInitializationToLValue(E->getSubExpr(),
+ CGF.MakeAddrLValue(CastPtr, Ty));
+ break;
+ }
+
+ case CK_DerivedToBase:
+ case CK_BaseToDerived:
+ case CK_UncheckedDerivedToBase: {
+ llvm_unreachable("cannot perform hierarchy conversion in EmitAggExpr: "
+ "should have been unpacked before we got here");
+ }
+
+ case CK_LValueToRValue: // hope for downstream optimization
+ case CK_NoOp:
+ case CK_AtomicToNonAtomic:
+ case CK_NonAtomicToAtomic:
+ case CK_UserDefinedConversion:
+ case CK_ConstructorConversion:
+ assert(CGF.getContext().hasSameUnqualifiedType(E->getSubExpr()->getType(),
+ E->getType()) &&
+ "Implicit cast types must be compatible");
+ Visit(E->getSubExpr());
+ break;
+
+ case CK_LValueBitCast:
+ llvm_unreachable("should not be emitting lvalue bitcast as rvalue");
+
+ case CK_Dependent:
+ case CK_BitCast:
+ case CK_ArrayToPointerDecay:
+ case CK_FunctionToPointerDecay:
+ case CK_NullToPointer:
+ case CK_NullToMemberPointer:
+ case CK_BaseToDerivedMemberPointer:
+ case CK_DerivedToBaseMemberPointer:
+ case CK_MemberPointerToBoolean:
+ case CK_ReinterpretMemberPointer:
+ case CK_IntegralToPointer:
+ case CK_PointerToIntegral:
+ case CK_PointerToBoolean:
+ case CK_ToVoid:
+ case CK_VectorSplat:
+ case CK_IntegralCast:
+ case CK_IntegralToBoolean:
+ case CK_IntegralToFloating:
+ case CK_FloatingToIntegral:
+ case CK_FloatingToBoolean:
+ case CK_FloatingCast:
+ case CK_CPointerToObjCPointerCast:
+ case CK_BlockPointerToObjCPointerCast:
+ case CK_AnyPointerToBlockPointerCast:
+ case CK_ObjCObjectLValueCast:
+ case CK_FloatingRealToComplex:
+ case CK_FloatingComplexToReal:
+ case CK_FloatingComplexToBoolean:
+ case CK_FloatingComplexCast:
+ case CK_FloatingComplexToIntegralComplex:
+ case CK_IntegralRealToComplex:
+ case CK_IntegralComplexToReal:
+ case CK_IntegralComplexToBoolean:
+ case CK_IntegralComplexCast:
+ case CK_IntegralComplexToFloatingComplex:
+ case CK_ARCProduceObject:
+ case CK_ARCConsumeObject:
+ case CK_ARCReclaimReturnedObject:
+ case CK_ARCExtendBlockObject:
+ case CK_CopyAndAutoreleaseBlockObject:
+ llvm_unreachable("cast kind invalid for aggregate types");
+ }
+}
+
+void AggExprEmitter::VisitCallExpr(const CallExpr *E) {
+ if (E->getCallReturnType()->isReferenceType()) {
+ EmitAggLoadOfLValue(E);
+ return;
+ }
+
+ RValue RV = CGF.EmitCallExpr(E, getReturnValueSlot());
+ EmitMoveFromReturnSlot(E, RV);
+}
+
+void AggExprEmitter::VisitObjCMessageExpr(ObjCMessageExpr *E) {
+ RValue RV = CGF.EmitObjCMessageExpr(E, getReturnValueSlot());
+ EmitMoveFromReturnSlot(E, RV);
+}
+
+void AggExprEmitter::VisitBinComma(const BinaryOperator *E) {
+ CGF.EmitIgnoredExpr(E->getLHS());
+ Visit(E->getRHS());
+}
+
+void AggExprEmitter::VisitStmtExpr(const StmtExpr *E) {
+ CodeGenFunction::StmtExprEvaluation eval(CGF);
+ CGF.EmitCompoundStmt(*E->getSubStmt(), true, Dest);
+}
+
+void AggExprEmitter::VisitBinaryOperator(const BinaryOperator *E) {
+ if (E->getOpcode() == BO_PtrMemD || E->getOpcode() == BO_PtrMemI)
+ VisitPointerToDataMemberBinaryOperator(E);
+ else
+ CGF.ErrorUnsupported(E, "aggregate binary expression");
+}
+
+void AggExprEmitter::VisitPointerToDataMemberBinaryOperator(
+ const BinaryOperator *E) {
+ LValue LV = CGF.EmitPointerToDataMemberBinaryExpr(E);
+ EmitFinalDestCopy(E, LV);
+}
+
+void AggExprEmitter::VisitBinAssign(const BinaryOperator *E) {
+ // For an assignment to work, the value on the right has
+ // to be compatible with the value on the left.
+ assert(CGF.getContext().hasSameUnqualifiedType(E->getLHS()->getType(),
+ E->getRHS()->getType())
+ && "Invalid assignment");
+
+ if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E->getLHS()))
+ if (const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl()))
+ if (VD->hasAttr<BlocksAttr>() &&
+ E->getRHS()->HasSideEffects(CGF.getContext())) {
+ // When __block variable on LHS, the RHS must be evaluated first
+ // as it may change the 'forwarding' field via call to Block_copy.
+ LValue RHS = CGF.EmitLValue(E->getRHS());
+ LValue LHS = CGF.EmitLValue(E->getLHS());
+ Dest = AggValueSlot::forLValue(LHS, AggValueSlot::IsDestructed,
+ needsGC(E->getLHS()->getType()),
+ AggValueSlot::IsAliased);
+ EmitFinalDestCopy(E, RHS, true);
+ return;
+ }
+
+ LValue LHS = CGF.EmitLValue(E->getLHS());
+
+ // Codegen the RHS so that it stores directly into the LHS.
+ AggValueSlot LHSSlot =
+ AggValueSlot::forLValue(LHS, AggValueSlot::IsDestructed,
+ needsGC(E->getLHS()->getType()),
+ AggValueSlot::IsAliased);
+ CGF.EmitAggExpr(E->getRHS(), LHSSlot, false);
+ EmitFinalDestCopy(E, LHS, true);
+}
+
+void AggExprEmitter::
+VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
+ llvm::BasicBlock *LHSBlock = CGF.createBasicBlock("cond.true");
+ llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("cond.false");
+ llvm::BasicBlock *ContBlock = CGF.createBasicBlock("cond.end");
+
+ // Bind the common expression if necessary.
+ CodeGenFunction::OpaqueValueMapping binding(CGF, E);
+
+ CodeGenFunction::ConditionalEvaluation eval(CGF);
+ CGF.EmitBranchOnBoolExpr(E->getCond(), LHSBlock, RHSBlock);
+
+ // Save whether the destination's lifetime is externally managed.
+ bool isExternallyDestructed = Dest.isExternallyDestructed();
+
+ eval.begin(CGF);
+ CGF.EmitBlock(LHSBlock);
+ Visit(E->getTrueExpr());
+ eval.end(CGF);
+
+ assert(CGF.HaveInsertPoint() && "expression evaluation ended with no IP!");
+ CGF.Builder.CreateBr(ContBlock);
+
+ // If the result of an agg expression is unused, then the emission
+ // of the LHS might need to create a destination slot. That's fine
+ // with us, and we can safely emit the RHS into the same slot, but
+ // we shouldn't claim that it's already being destructed.
+ Dest.setExternallyDestructed(isExternallyDestructed);
+
+ eval.begin(CGF);
+ CGF.EmitBlock(RHSBlock);
+ Visit(E->getFalseExpr());
+ eval.end(CGF);
+
+ CGF.EmitBlock(ContBlock);
+}
+
+void AggExprEmitter::VisitChooseExpr(const ChooseExpr *CE) {
+ Visit(CE->getChosenSubExpr(CGF.getContext()));
+}
+
+void AggExprEmitter::VisitVAArgExpr(VAArgExpr *VE) {
+ llvm::Value *ArgValue = CGF.EmitVAListRef(VE->getSubExpr());
+ llvm::Value *ArgPtr = CGF.EmitVAArg(ArgValue, VE->getType());
+
+ if (!ArgPtr) {
+ CGF.ErrorUnsupported(VE, "aggregate va_arg expression");
+ return;
+ }
+
+ EmitFinalDestCopy(VE, CGF.MakeAddrLValue(ArgPtr, VE->getType()));
+}
+
+void AggExprEmitter::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) {
+ // Ensure that we have a slot, but if we already do, remember
+ // whether it was externally destructed.
+ bool wasExternallyDestructed = Dest.isExternallyDestructed();
+ Dest = EnsureSlot(E->getType());
+
+ // We're going to push a destructor if there isn't already one.
+ Dest.setExternallyDestructed();
+
+ Visit(E->getSubExpr());
+
+ // Push that destructor we promised.
+ if (!wasExternallyDestructed)
+ CGF.EmitCXXTemporary(E->getTemporary(), E->getType(), Dest.getAddr());
+}
+
+void
+AggExprEmitter::VisitCXXConstructExpr(const CXXConstructExpr *E) {
+ AggValueSlot Slot = EnsureSlot(E->getType());
+ CGF.EmitCXXConstructExpr(E, Slot);
+}
+
+void
+AggExprEmitter::VisitLambdaExpr(LambdaExpr *E) {
+ AggValueSlot Slot = EnsureSlot(E->getType());
+ CGF.EmitLambdaExpr(E, Slot);
+}
+
+void AggExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) {
+ CGF.enterFullExpression(E);
+ CodeGenFunction::RunCleanupsScope cleanups(CGF);
+ Visit(E->getSubExpr());
+}
+
+void AggExprEmitter::VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E) {
+ QualType T = E->getType();
+ AggValueSlot Slot = EnsureSlot(T);
+ EmitNullInitializationToLValue(CGF.MakeAddrLValue(Slot.getAddr(), T));
+}
+
+void AggExprEmitter::VisitImplicitValueInitExpr(ImplicitValueInitExpr *E) {
+ QualType T = E->getType();
+ AggValueSlot Slot = EnsureSlot(T);
+ EmitNullInitializationToLValue(CGF.MakeAddrLValue(Slot.getAddr(), T));
+}
+
+/// isSimpleZero - If emitting this value will obviously just cause a store of
+/// zero to memory, return true. This can return false if uncertain, so it just
+/// handles simple cases.
+static bool isSimpleZero(const Expr *E, CodeGenFunction &CGF) {
+ E = E->IgnoreParens();
+
+ // 0
+ if (const IntegerLiteral *IL = dyn_cast<IntegerLiteral>(E))
+ return IL->getValue() == 0;
+ // +0.0
+ if (const FloatingLiteral *FL = dyn_cast<FloatingLiteral>(E))
+ return FL->getValue().isPosZero();
+ // int()
+ if ((isa<ImplicitValueInitExpr>(E) || isa<CXXScalarValueInitExpr>(E)) &&
+ CGF.getTypes().isZeroInitializable(E->getType()))
+ return true;
+ // (int*)0 - Null pointer expressions.
+ if (const CastExpr *ICE = dyn_cast<CastExpr>(E))
+ return ICE->getCastKind() == CK_NullToPointer;
+ // '\0'
+ if (const CharacterLiteral *CL = dyn_cast<CharacterLiteral>(E))
+ return CL->getValue() == 0;
+
+ // Otherwise, hard case: conservatively return false.
+ return false;
+}
+
+
+void
+AggExprEmitter::EmitInitializationToLValue(Expr* E, LValue LV) {
+ QualType type = LV.getType();
+ // FIXME: Ignore result?
+ // FIXME: Are initializers affected by volatile?
+ if (Dest.isZeroed() && isSimpleZero(E, CGF)) {
+ // Storing "i32 0" to a zero'd memory location is a noop.
+ } else if (isa<ImplicitValueInitExpr>(E)) {
+ EmitNullInitializationToLValue(LV);
+ } else if (type->isReferenceType()) {
+ RValue RV = CGF.EmitReferenceBindingToExpr(E, /*InitializedDecl=*/0);
+ CGF.EmitStoreThroughLValue(RV, LV);
+ } else if (type->isAnyComplexType()) {
+ CGF.EmitComplexExprIntoAddr(E, LV.getAddress(), false);
+ } else if (CGF.hasAggregateLLVMType(type)) {
+ CGF.EmitAggExpr(E, AggValueSlot::forLValue(LV,
+ AggValueSlot::IsDestructed,
+ AggValueSlot::DoesNotNeedGCBarriers,
+ AggValueSlot::IsNotAliased,
+ Dest.isZeroed()));
+ } else if (LV.isSimple()) {
+ CGF.EmitScalarInit(E, /*D=*/0, LV, /*Captured=*/false);
+ } else {
+ CGF.EmitStoreThroughLValue(RValue::get(CGF.EmitScalarExpr(E)), LV);
+ }
+}
+
+void AggExprEmitter::EmitNullInitializationToLValue(LValue lv) {
+ QualType type = lv.getType();
+
+ // If the destination slot is already zeroed out before the aggregate is
+ // copied into it, we don't have to emit any zeros here.
+ if (Dest.isZeroed() && CGF.getTypes().isZeroInitializable(type))
+ return;
+
+ if (!CGF.hasAggregateLLVMType(type)) {
+ // For non-aggregates, we can store zero.
+ llvm::Value *null = llvm::Constant::getNullValue(CGF.ConvertType(type));
+ // Note that the following is not equivalent to
+ // EmitStoreThroughBitfieldLValue for ARC types.
+ if (lv.isBitField()) {
+ CGF.EmitStoreThroughBitfieldLValue(RValue::get(null), lv);
+ } else {
+ assert(lv.isSimple());
+ CGF.EmitStoreOfScalar(null, lv, /* isInitialization */ true);
+ }
+ } else {
+ // There's a potential optimization opportunity in combining
+ // memsets; that would be easy for arrays, but relatively
+ // difficult for structures with the current code.
+ CGF.EmitNullInitialization(lv.getAddress(), lv.getType());
+ }
+}
+
+void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
+#if 0
+ // FIXME: Assess perf here? Figure out what cases are worth optimizing here
+ // (Length of globals? Chunks of zeroed-out space?).
+ //
+ // If we can, prefer a copy from a global; this is a lot less code for long
+ // globals, and it's easier for the current optimizers to analyze.
+ if (llvm::Constant* C = CGF.CGM.EmitConstantExpr(E, E->getType(), &CGF)) {
+ llvm::GlobalVariable* GV =
+ new llvm::GlobalVariable(CGF.CGM.getModule(), C->getType(), true,
+ llvm::GlobalValue::InternalLinkage, C, "");
+ EmitFinalDestCopy(E, CGF.MakeAddrLValue(GV, E->getType()));
+ return;
+ }
+#endif
+ if (E->hadArrayRangeDesignator())
+ CGF.ErrorUnsupported(E, "GNU array range designator extension");
+
+ if (E->initializesStdInitializerList()) {
+ EmitStdInitializerList(Dest.getAddr(), E);
+ return;
+ }
+
+ AggValueSlot Dest = EnsureSlot(E->getType());
+ LValue DestLV = CGF.MakeAddrLValue(Dest.getAddr(), E->getType(),
+ Dest.getAlignment());
+
+ // Handle initialization of an array.
+ if (E->getType()->isArrayType()) {
+ if (E->isStringLiteralInit())
+ return Visit(E->getInit(0));
+
+ QualType elementType =
+ CGF.getContext().getAsArrayType(E->getType())->getElementType();
+
+ llvm::PointerType *APType =
+ cast<llvm::PointerType>(Dest.getAddr()->getType());
+ llvm::ArrayType *AType =
+ cast<llvm::ArrayType>(APType->getElementType());
+
+ EmitArrayInit(Dest.getAddr(), AType, elementType, E);
+ return;
+ }
+
+ assert(E->getType()->isRecordType() && "Only support structs/unions here!");
+
+ // Do struct initialization; this code just sets each individual member
+ // to the approprate value. This makes bitfield support automatic;
+ // the disadvantage is that the generated code is more difficult for
+ // the optimizer, especially with bitfields.
+ unsigned NumInitElements = E->getNumInits();
+ RecordDecl *record = E->getType()->castAs<RecordType>()->getDecl();
+
+ if (record->isUnion()) {
+ // Only initialize one field of a union. The field itself is
+ // specified by the initializer list.
+ if (!E->getInitializedFieldInUnion()) {
+ // Empty union; we have nothing to do.
+
+#ifndef NDEBUG
+ // Make sure that it's really an empty and not a failure of
+ // semantic analysis.
+ for (RecordDecl::field_iterator Field = record->field_begin(),
+ FieldEnd = record->field_end();
+ Field != FieldEnd; ++Field)
+ assert(Field->isUnnamedBitfield() && "Only unnamed bitfields allowed");
+#endif
+ return;
+ }
+
+ // FIXME: volatility
+ FieldDecl *Field = E->getInitializedFieldInUnion();
+
+ LValue FieldLoc = CGF.EmitLValueForFieldInitialization(DestLV, Field);
+ if (NumInitElements) {
+ // Store the initializer into the field
+ EmitInitializationToLValue(E->getInit(0), FieldLoc);
+ } else {
+ // Default-initialize to null.
+ EmitNullInitializationToLValue(FieldLoc);
+ }
+
+ return;
+ }
+
+ // We'll need to enter cleanup scopes in case any of the member
+ // initializers throw an exception.
+ SmallVector<EHScopeStack::stable_iterator, 16> cleanups;
+ llvm::Instruction *cleanupDominator = 0;
+
+ // Here we iterate over the fields; this makes it simpler to both
+ // default-initialize fields and skip over unnamed fields.
+ unsigned curInitIndex = 0;
+ for (RecordDecl::field_iterator field = record->field_begin(),
+ fieldEnd = record->field_end();
+ field != fieldEnd; ++field) {
+ // We're done once we hit the flexible array member.
+ if (field->getType()->isIncompleteArrayType())
+ break;
+
+ // Always skip anonymous bitfields.
+ if (field->isUnnamedBitfield())
+ continue;
+
+ // We're done if we reach the end of the explicit initializers, we
+ // have a zeroed object, and the rest of the fields are
+ // zero-initializable.
+ if (curInitIndex == NumInitElements && Dest.isZeroed() &&
+ CGF.getTypes().isZeroInitializable(E->getType()))
+ break;
+
+
+ LValue LV = CGF.EmitLValueForFieldInitialization(DestLV, *field);
+ // We never generate write-barries for initialized fields.
+ LV.setNonGC(true);
+
+ if (curInitIndex < NumInitElements) {
+ // Store the initializer into the field.
+ EmitInitializationToLValue(E->getInit(curInitIndex++), LV);
+ } else {
+ // We're out of initalizers; default-initialize to null
+ EmitNullInitializationToLValue(LV);
+ }
+
+ // Push a destructor if necessary.
+ // FIXME: if we have an array of structures, all explicitly
+ // initialized, we can end up pushing a linear number of cleanups.
+ bool pushedCleanup = false;
+ if (QualType::DestructionKind dtorKind
+ = field->getType().isDestructedType()) {
+ assert(LV.isSimple());
+ if (CGF.needsEHCleanup(dtorKind)) {
+ if (!cleanupDominator)
+ cleanupDominator = CGF.Builder.CreateUnreachable(); // placeholder
+
+ CGF.pushDestroy(EHCleanup, LV.getAddress(), field->getType(),
+ CGF.getDestroyer(dtorKind), false);
+ cleanups.push_back(CGF.EHStack.stable_begin());
+ pushedCleanup = true;
+ }
+ }
+
+ // If the GEP didn't get used because of a dead zero init or something
+ // else, clean it up for -O0 builds and general tidiness.
+ if (!pushedCleanup && LV.isSimple())
+ if (llvm::GetElementPtrInst *GEP =
+ dyn_cast<llvm::GetElementPtrInst>(LV.getAddress()))
+ if (GEP->use_empty())
+ GEP->eraseFromParent();
+ }
+
+ // Deactivate all the partial cleanups in reverse order, which
+ // generally means popping them.
+ for (unsigned i = cleanups.size(); i != 0; --i)
+ CGF.DeactivateCleanupBlock(cleanups[i-1], cleanupDominator);
+
+ // Destroy the placeholder if we made one.
+ if (cleanupDominator)
+ cleanupDominator->eraseFromParent();
+}
+
+//===----------------------------------------------------------------------===//
+// Entry Points into this File
+//===----------------------------------------------------------------------===//
+
+/// GetNumNonZeroBytesInInit - Get an approximate count of the number of
+/// non-zero bytes that will be stored when outputting the initializer for the
+/// specified initializer expression.
+static CharUnits GetNumNonZeroBytesInInit(const Expr *E, CodeGenFunction &CGF) {
+ E = E->IgnoreParens();
+
+ // 0 and 0.0 won't require any non-zero stores!
+ if (isSimpleZero(E, CGF)) return CharUnits::Zero();
+
+ // If this is an initlist expr, sum up the size of sizes of the (present)
+ // elements. If this is something weird, assume the whole thing is non-zero.
+ const InitListExpr *ILE = dyn_cast<InitListExpr>(E);
+ if (ILE == 0 || !CGF.getTypes().isZeroInitializable(ILE->getType()))
+ return CGF.getContext().getTypeSizeInChars(E->getType());
+
+ // InitListExprs for structs have to be handled carefully. If there are
+ // reference members, we need to consider the size of the reference, not the
+ // referencee. InitListExprs for unions and arrays can't have references.
+ if (const RecordType *RT = E->getType()->getAs<RecordType>()) {
+ if (!RT->isUnionType()) {
+ RecordDecl *SD = E->getType()->getAs<RecordType>()->getDecl();
+ CharUnits NumNonZeroBytes = CharUnits::Zero();
+
+ unsigned ILEElement = 0;
+ for (RecordDecl::field_iterator Field = SD->field_begin(),
+ FieldEnd = SD->field_end(); Field != FieldEnd; ++Field) {
+ // We're done once we hit the flexible array member or run out of
+ // InitListExpr elements.
+ if (Field->getType()->isIncompleteArrayType() ||
+ ILEElement == ILE->getNumInits())
+ break;
+ if (Field->isUnnamedBitfield())
+ continue;
+
+ const Expr *E = ILE->getInit(ILEElement++);
+
+ // Reference values are always non-null and have the width of a pointer.
+ if (Field->getType()->isReferenceType())
+ NumNonZeroBytes += CGF.getContext().toCharUnitsFromBits(
+ CGF.getContext().getTargetInfo().getPointerWidth(0));
+ else
+ NumNonZeroBytes += GetNumNonZeroBytesInInit(E, CGF);
+ }
+
+ return NumNonZeroBytes;
+ }
+ }
+
+
+ CharUnits NumNonZeroBytes = CharUnits::Zero();
+ for (unsigned i = 0, e = ILE->getNumInits(); i != e; ++i)
+ NumNonZeroBytes += GetNumNonZeroBytesInInit(ILE->getInit(i), CGF);
+ return NumNonZeroBytes;
+}
+
+/// CheckAggExprForMemSetUse - If the initializer is large and has a lot of
+/// zeros in it, emit a memset and avoid storing the individual zeros.
+///
+static void CheckAggExprForMemSetUse(AggValueSlot &Slot, const Expr *E,
+ CodeGenFunction &CGF) {
+ // If the slot is already known to be zeroed, nothing to do. Don't mess with
+ // volatile stores.
+ if (Slot.isZeroed() || Slot.isVolatile() || Slot.getAddr() == 0) return;
+
+ // C++ objects with a user-declared constructor don't need zero'ing.
+ if (CGF.getContext().getLangOpts().CPlusPlus)
+ if (const RecordType *RT = CGF.getContext()
+ .getBaseElementType(E->getType())->getAs<RecordType>()) {
+ const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
+ if (RD->hasUserDeclaredConstructor())
+ return;
+ }
+
+ // If the type is 16-bytes or smaller, prefer individual stores over memset.
+ std::pair<CharUnits, CharUnits> TypeInfo =
+ CGF.getContext().getTypeInfoInChars(E->getType());
+ if (TypeInfo.first <= CharUnits::fromQuantity(16))
+ return;
+
+ // Check to see if over 3/4 of the initializer are known to be zero. If so,
+ // we prefer to emit memset + individual stores for the rest.
+ CharUnits NumNonZeroBytes = GetNumNonZeroBytesInInit(E, CGF);
+ if (NumNonZeroBytes*4 > TypeInfo.first)
+ return;
+
+ // Okay, it seems like a good idea to use an initial memset, emit the call.
+ llvm::Constant *SizeVal = CGF.Builder.getInt64(TypeInfo.first.getQuantity());
+ CharUnits Align = TypeInfo.second;
+
+ llvm::Value *Loc = Slot.getAddr();
+
+ Loc = CGF.Builder.CreateBitCast(Loc, CGF.Int8PtrTy);
+ CGF.Builder.CreateMemSet(Loc, CGF.Builder.getInt8(0), SizeVal,
+ Align.getQuantity(), false);
+
+ // Tell the AggExprEmitter that the slot is known zero.
+ Slot.setZeroed();
+}
+
+
+
+
+/// EmitAggExpr - Emit the computation of the specified expression of aggregate
+/// type. The result is computed into DestPtr. Note that if DestPtr is null,
+/// the value of the aggregate expression is not needed. If VolatileDest is
+/// true, DestPtr cannot be 0.
+///
+/// \param IsInitializer - true if this evaluation is initializing an
+/// object whose lifetime is already being managed.
+void CodeGenFunction::EmitAggExpr(const Expr *E, AggValueSlot Slot,
+ bool IgnoreResult) {
+ assert(E && hasAggregateLLVMType(E->getType()) &&
+ "Invalid aggregate expression to emit");
+ assert((Slot.getAddr() != 0 || Slot.isIgnored()) &&
+ "slot has bits but no address");
+
+ // Optimize the slot if possible.
+ CheckAggExprForMemSetUse(Slot, E, *this);
+
+ AggExprEmitter(*this, Slot, IgnoreResult).Visit(const_cast<Expr*>(E));
+}
+
+LValue CodeGenFunction::EmitAggExprToLValue(const Expr *E) {
+ assert(hasAggregateLLVMType(E->getType()) && "Invalid argument!");
+ llvm::Value *Temp = CreateMemTemp(E->getType());
+ LValue LV = MakeAddrLValue(Temp, E->getType());
+ EmitAggExpr(E, AggValueSlot::forLValue(LV, AggValueSlot::IsNotDestructed,
+ AggValueSlot::DoesNotNeedGCBarriers,
+ AggValueSlot::IsNotAliased));
+ return LV;
+}
+
+void CodeGenFunction::EmitAggregateCopy(llvm::Value *DestPtr,
+ llvm::Value *SrcPtr, QualType Ty,
+ bool isVolatile, unsigned Alignment) {
+ assert(!Ty->isAnyComplexType() && "Shouldn't happen for complex");
+
+ if (getContext().getLangOpts().CPlusPlus) {
+ if (const RecordType *RT = Ty->getAs<RecordType>()) {
+ CXXRecordDecl *Record = cast<CXXRecordDecl>(RT->getDecl());
+ assert((Record->hasTrivialCopyConstructor() ||
+ Record->hasTrivialCopyAssignment() ||
+ Record->hasTrivialMoveConstructor() ||
+ Record->hasTrivialMoveAssignment()) &&
+ "Trying to aggregate-copy a type without a trivial copy "
+ "constructor or assignment operator");
+ // Ignore empty classes in C++.
+ if (Record->isEmpty())
+ return;
+ }
+ }
+
+ // Aggregate assignment turns into llvm.memcpy. This is almost valid per
+ // C99 6.5.16.1p3, which states "If the value being stored in an object is
+ // read from another object that overlaps in anyway the storage of the first
+ // object, then the overlap shall be exact and the two objects shall have
+ // qualified or unqualified versions of a compatible type."
+ //
+ // memcpy is not defined if the source and destination pointers are exactly
+ // equal, but other compilers do this optimization, and almost every memcpy
+ // implementation handles this case safely. If there is a libc that does not
+ // safely handle this, we can add a target hook.
+
+ // Get size and alignment info for this aggregate.
+ std::pair<CharUnits, CharUnits> TypeInfo =
+ getContext().getTypeInfoInChars(Ty);
+
+ if (!Alignment)
+ Alignment = TypeInfo.second.getQuantity();
+
+ // FIXME: Handle variable sized types.
+
+ // FIXME: If we have a volatile struct, the optimizer can remove what might
+ // appear to be `extra' memory ops:
+ //
+ // volatile struct { int i; } a, b;
+ //
+ // int main() {
+ // a = b;
+ // a = b;
+ // }
+ //
+ // we need to use a different call here. We use isVolatile to indicate when
+ // either the source or the destination is volatile.
+
+ llvm::PointerType *DPT = cast<llvm::PointerType>(DestPtr->getType());
+ llvm::Type *DBP =
+ llvm::Type::getInt8PtrTy(getLLVMContext(), DPT->getAddressSpace());
+ DestPtr = Builder.CreateBitCast(DestPtr, DBP);
+
+ llvm::PointerType *SPT = cast<llvm::PointerType>(SrcPtr->getType());
+ llvm::Type *SBP =
+ llvm::Type::getInt8PtrTy(getLLVMContext(), SPT->getAddressSpace());
+ SrcPtr = Builder.CreateBitCast(SrcPtr, SBP);
+
+ // Don't do any of the memmove_collectable tests if GC isn't set.
+ if (CGM.getLangOpts().getGC() == LangOptions::NonGC) {
+ // fall through
+ } else if (const RecordType *RecordTy = Ty->getAs<RecordType>()) {
+ RecordDecl *Record = RecordTy->getDecl();
+ if (Record->hasObjectMember()) {
+ CharUnits size = TypeInfo.first;
+ llvm::Type *SizeTy = ConvertType(getContext().getSizeType());
+ llvm::Value *SizeVal = llvm::ConstantInt::get(SizeTy, size.getQuantity());
+ CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, DestPtr, SrcPtr,
+ SizeVal);
+ return;
+ }
+ } else if (Ty->isArrayType()) {
+ QualType BaseType = getContext().getBaseElementType(Ty);
+ if (const RecordType *RecordTy = BaseType->getAs<RecordType>()) {
+ if (RecordTy->getDecl()->hasObjectMember()) {
+ CharUnits size = TypeInfo.first;
+ llvm::Type *SizeTy = ConvertType(getContext().getSizeType());
+ llvm::Value *SizeVal =
+ llvm::ConstantInt::get(SizeTy, size.getQuantity());
+ CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, DestPtr, SrcPtr,
+ SizeVal);
+ return;
+ }
+ }
+ }
+
+ Builder.CreateMemCpy(DestPtr, SrcPtr,
+ llvm::ConstantInt::get(IntPtrTy,
+ TypeInfo.first.getQuantity()),
+ Alignment, isVolatile);
+}
+
+void CodeGenFunction::MaybeEmitStdInitializerListCleanup(llvm::Value *loc,
+ const Expr *init) {
+ const ExprWithCleanups *cleanups = dyn_cast<ExprWithCleanups>(init);
+ if (cleanups)
+ init = cleanups->getSubExpr();
+
+ if (isa<InitListExpr>(init) &&
+ cast<InitListExpr>(init)->initializesStdInitializerList()) {
+ // We initialized this std::initializer_list with an initializer list.
+ // A backing array was created. Push a cleanup for it.
+ EmitStdInitializerListCleanup(loc, cast<InitListExpr>(init));
+ }
+}
+
+static void EmitRecursiveStdInitializerListCleanup(CodeGenFunction &CGF,
+ llvm::Value *arrayStart,
+ const InitListExpr *init) {
+ // Check if there are any recursive cleanups to do, i.e. if we have
+ // std::initializer_list<std::initializer_list<obj>> list = {{obj()}};
+ // then we need to destroy the inner array as well.
+ for (unsigned i = 0, e = init->getNumInits(); i != e; ++i) {
+ const InitListExpr *subInit = dyn_cast<InitListExpr>(init->getInit(i));
+ if (!subInit || !subInit->initializesStdInitializerList())
+ continue;
+
+ // This one needs to be destroyed. Get the address of the std::init_list.
+ llvm::Value *offset = llvm::ConstantInt::get(CGF.SizeTy, i);
+ llvm::Value *loc = CGF.Builder.CreateInBoundsGEP(arrayStart, offset,
+ "std.initlist");
+ CGF.EmitStdInitializerListCleanup(loc, subInit);
+ }
+}
+
+void CodeGenFunction::EmitStdInitializerListCleanup(llvm::Value *loc,
+ const InitListExpr *init) {
+ ASTContext &ctx = getContext();
+ QualType element = GetStdInitializerListElementType(init->getType());
+ unsigned numInits = init->getNumInits();
+ llvm::APInt size(ctx.getTypeSize(ctx.getSizeType()), numInits);
+ QualType array =ctx.getConstantArrayType(element, size, ArrayType::Normal, 0);
+ QualType arrayPtr = ctx.getPointerType(array);
+ llvm::Type *arrayPtrType = ConvertType(arrayPtr);
+
+ // lvalue is the location of a std::initializer_list, which as its first
+ // element has a pointer to the array we want to destroy.
+ llvm::Value *startPointer = Builder.CreateStructGEP(loc, 0, "startPointer");
+ llvm::Value *startAddress = Builder.CreateLoad(startPointer, "startAddress");
+
+ ::EmitRecursiveStdInitializerListCleanup(*this, startAddress, init);
+
+ llvm::Value *arrayAddress =
+ Builder.CreateBitCast(startAddress, arrayPtrType, "arrayAddress");
+ ::EmitStdInitializerListCleanup(*this, array, arrayAddress, init);
+}
diff --git a/clang/lib/CodeGen/CGExprCXX.cpp b/clang/lib/CodeGen/CGExprCXX.cpp
new file mode 100644
index 0000000..c69c883
--- /dev/null
+++ b/clang/lib/CodeGen/CGExprCXX.cpp
@@ -0,0 +1,1833 @@
+//===--- CGExprCXX.cpp - Emit LLVM Code for C++ expressions ---------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code dealing with code generation of C++ expressions
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Frontend/CodeGenOptions.h"
+#include "CodeGenFunction.h"
+#include "CGCUDARuntime.h"
+#include "CGCXXABI.h"
+#include "CGObjCRuntime.h"
+#include "CGDebugInfo.h"
+#include "llvm/Intrinsics.h"
+#include "llvm/Support/CallSite.h"
+
+using namespace clang;
+using namespace CodeGen;
+
+RValue CodeGenFunction::EmitCXXMemberCall(const CXXMethodDecl *MD,
+ llvm::Value *Callee,
+ ReturnValueSlot ReturnValue,
+ llvm::Value *This,
+ llvm::Value *VTT,
+ CallExpr::const_arg_iterator ArgBeg,
+ CallExpr::const_arg_iterator ArgEnd) {
+ assert(MD->isInstance() &&
+ "Trying to emit a member call expr on a static method!");
+
+ CallArgList Args;
+
+ // Push the this ptr.
+ Args.add(RValue::get(This), MD->getThisType(getContext()));
+
+ // If there is a VTT parameter, emit it.
+ if (VTT) {
+ QualType T = getContext().getPointerType(getContext().VoidPtrTy);
+ Args.add(RValue::get(VTT), T);
+ }
+
+ const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>();
+ RequiredArgs required = RequiredArgs::forPrototypePlus(FPT, Args.size());
+
+ // And the rest of the call args.
+ EmitCallArgs(Args, FPT, ArgBeg, ArgEnd);
+
+ return EmitCall(CGM.getTypes().arrangeFunctionCall(FPT->getResultType(), Args,
+ FPT->getExtInfo(),
+ required),
+ Callee, ReturnValue, Args, MD);
+}
+
+static const CXXRecordDecl *getMostDerivedClassDecl(const Expr *Base) {
+ const Expr *E = Base;
+
+ while (true) {
+ E = E->IgnoreParens();
+ if (const CastExpr *CE = dyn_cast<CastExpr>(E)) {
+ if (CE->getCastKind() == CK_DerivedToBase ||
+ CE->getCastKind() == CK_UncheckedDerivedToBase ||
+ CE->getCastKind() == CK_NoOp) {
+ E = CE->getSubExpr();
+ continue;
+ }
+ }
+
+ break;
+ }
+
+ QualType DerivedType = E->getType();
+ if (const PointerType *PTy = DerivedType->getAs<PointerType>())
+ DerivedType = PTy->getPointeeType();
+
+ return cast<CXXRecordDecl>(DerivedType->castAs<RecordType>()->getDecl());
+}
+
+// FIXME: Ideally Expr::IgnoreParenNoopCasts should do this, but it doesn't do
+// quite what we want.
+static const Expr *skipNoOpCastsAndParens(const Expr *E) {
+ while (true) {
+ if (const ParenExpr *PE = dyn_cast<ParenExpr>(E)) {
+ E = PE->getSubExpr();
+ continue;
+ }
+
+ if (const CastExpr *CE = dyn_cast<CastExpr>(E)) {
+ if (CE->getCastKind() == CK_NoOp) {
+ E = CE->getSubExpr();
+ continue;
+ }
+ }
+ if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) {
+ if (UO->getOpcode() == UO_Extension) {
+ E = UO->getSubExpr();
+ continue;
+ }
+ }
+ return E;
+ }
+}
+
+/// canDevirtualizeMemberFunctionCalls - Checks whether virtual calls on given
+/// expr can be devirtualized.
+static bool canDevirtualizeMemberFunctionCalls(ASTContext &Context,
+ const Expr *Base,
+ const CXXMethodDecl *MD) {
+
+ // When building with -fapple-kext, all calls must go through the vtable since
+ // the kernel linker can do runtime patching of vtables.
+ if (Context.getLangOpts().AppleKext)
+ return false;
+
+ // If the most derived class is marked final, we know that no subclass can
+ // override this member function and so we can devirtualize it. For example:
+ //
+ // struct A { virtual void f(); }
+ // struct B final : A { };
+ //
+ // void f(B *b) {
+ // b->f();
+ // }
+ //
+ const CXXRecordDecl *MostDerivedClassDecl = getMostDerivedClassDecl(Base);
+ if (MostDerivedClassDecl->hasAttr<FinalAttr>())
+ return true;
+
+ // If the member function is marked 'final', we know that it can't be
+ // overridden and can therefore devirtualize it.
+ if (MD->hasAttr<FinalAttr>())
+ return true;
+
+ // Similarly, if the class itself is marked 'final' it can't be overridden
+ // and we can therefore devirtualize the member function call.
+ if (MD->getParent()->hasAttr<FinalAttr>())
+ return true;
+
+ Base = skipNoOpCastsAndParens(Base);
+ if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Base)) {
+ if (const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl())) {
+ // This is a record decl. We know the type and can devirtualize it.
+ return VD->getType()->isRecordType();
+ }
+
+ return false;
+ }
+
+ // We can always devirtualize calls on temporary object expressions.
+ if (isa<CXXConstructExpr>(Base))
+ return true;
+
+ // And calls on bound temporaries.
+ if (isa<CXXBindTemporaryExpr>(Base))
+ return true;
+
+ // Check if this is a call expr that returns a record type.
+ if (const CallExpr *CE = dyn_cast<CallExpr>(Base))
+ return CE->getCallReturnType()->isRecordType();
+
+ // We can't devirtualize the call.
+ return false;
+}
+
+// Note: This function also emit constructor calls to support a MSVC
+// extensions allowing explicit constructor function call.
+RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE,
+ ReturnValueSlot ReturnValue) {
+ const Expr *callee = CE->getCallee()->IgnoreParens();
+
+ if (isa<BinaryOperator>(callee))
+ return EmitCXXMemberPointerCallExpr(CE, ReturnValue);
+
+ const MemberExpr *ME = cast<MemberExpr>(callee);
+ const CXXMethodDecl *MD = cast<CXXMethodDecl>(ME->getMemberDecl());
+
+ CGDebugInfo *DI = getDebugInfo();
+ if (DI && CGM.getCodeGenOpts().LimitDebugInfo
+ && !isa<CallExpr>(ME->getBase())) {
+ QualType PQTy = ME->getBase()->IgnoreParenImpCasts()->getType();
+ if (const PointerType * PTy = dyn_cast<PointerType>(PQTy)) {
+ DI->getOrCreateRecordType(PTy->getPointeeType(),
+ MD->getParent()->getLocation());
+ }
+ }
+
+ if (MD->isStatic()) {
+ // The method is static, emit it as we would a regular call.
+ llvm::Value *Callee = CGM.GetAddrOfFunction(MD);
+ return EmitCall(getContext().getPointerType(MD->getType()), Callee,
+ ReturnValue, CE->arg_begin(), CE->arg_end());
+ }
+
+ // Compute the object pointer.
+ llvm::Value *This;
+ if (ME->isArrow())
+ This = EmitScalarExpr(ME->getBase());
+ else
+ This = EmitLValue(ME->getBase()).getAddress();
+
+ if (MD->isTrivial()) {
+ if (isa<CXXDestructorDecl>(MD)) return RValue::get(0);
+ if (isa<CXXConstructorDecl>(MD) &&
+ cast<CXXConstructorDecl>(MD)->isDefaultConstructor())
+ return RValue::get(0);
+
+ if (MD->isCopyAssignmentOperator() || MD->isMoveAssignmentOperator()) {
+ // We don't like to generate the trivial copy/move assignment operator
+ // when it isn't necessary; just produce the proper effect here.
+ llvm::Value *RHS = EmitLValue(*CE->arg_begin()).getAddress();
+ EmitAggregateCopy(This, RHS, CE->getType());
+ return RValue::get(This);
+ }
+
+ if (isa<CXXConstructorDecl>(MD) &&
+ cast<CXXConstructorDecl>(MD)->isCopyOrMoveConstructor()) {
+ // Trivial move and copy ctor are the same.
+ llvm::Value *RHS = EmitLValue(*CE->arg_begin()).getAddress();
+ EmitSynthesizedCXXCopyCtorCall(cast<CXXConstructorDecl>(MD), This, RHS,
+ CE->arg_begin(), CE->arg_end());
+ return RValue::get(This);
+ }
+ llvm_unreachable("unknown trivial member function");
+ }
+
+ // Compute the function type we're calling.
+ const CGFunctionInfo *FInfo = 0;
+ if (isa<CXXDestructorDecl>(MD))
+ FInfo = &CGM.getTypes().arrangeCXXDestructor(cast<CXXDestructorDecl>(MD),
+ Dtor_Complete);
+ else if (isa<CXXConstructorDecl>(MD))
+ FInfo = &CGM.getTypes().arrangeCXXConstructorDeclaration(
+ cast<CXXConstructorDecl>(MD),
+ Ctor_Complete);
+ else
+ FInfo = &CGM.getTypes().arrangeCXXMethodDeclaration(MD);
+
+ llvm::Type *Ty = CGM.getTypes().GetFunctionType(*FInfo);
+
+ // C++ [class.virtual]p12:
+ // Explicit qualification with the scope operator (5.1) suppresses the
+ // virtual call mechanism.
+ //
+ // We also don't emit a virtual call if the base expression has a record type
+ // because then we know what the type is.
+ bool UseVirtualCall;
+ UseVirtualCall = MD->isVirtual() && !ME->hasQualifier()
+ && !canDevirtualizeMemberFunctionCalls(getContext(),
+ ME->getBase(), MD);
+ llvm::Value *Callee;
+ if (const CXXDestructorDecl *Dtor = dyn_cast<CXXDestructorDecl>(MD)) {
+ if (UseVirtualCall) {
+ Callee = BuildVirtualCall(Dtor, Dtor_Complete, This, Ty);
+ } else {
+ if (getContext().getLangOpts().AppleKext &&
+ MD->isVirtual() &&
+ ME->hasQualifier())
+ Callee = BuildAppleKextVirtualCall(MD, ME->getQualifier(), Ty);
+ else
+ Callee = CGM.GetAddrOfFunction(GlobalDecl(Dtor, Dtor_Complete), Ty);
+ }
+ } else if (const CXXConstructorDecl *Ctor =
+ dyn_cast<CXXConstructorDecl>(MD)) {
+ Callee = CGM.GetAddrOfFunction(GlobalDecl(Ctor, Ctor_Complete), Ty);
+ } else if (UseVirtualCall) {
+ Callee = BuildVirtualCall(MD, This, Ty);
+ } else {
+ if (getContext().getLangOpts().AppleKext &&
+ MD->isVirtual() &&
+ ME->hasQualifier())
+ Callee = BuildAppleKextVirtualCall(MD, ME->getQualifier(), Ty);
+ else
+ Callee = CGM.GetAddrOfFunction(MD, Ty);
+ }
+
+ return EmitCXXMemberCall(MD, Callee, ReturnValue, This, /*VTT=*/0,
+ CE->arg_begin(), CE->arg_end());
+}
+
+RValue
+CodeGenFunction::EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E,
+ ReturnValueSlot ReturnValue) {
+ const BinaryOperator *BO =
+ cast<BinaryOperator>(E->getCallee()->IgnoreParens());
+ const Expr *BaseExpr = BO->getLHS();
+ const Expr *MemFnExpr = BO->getRHS();
+
+ const MemberPointerType *MPT =
+ MemFnExpr->getType()->castAs<MemberPointerType>();
+
+ const FunctionProtoType *FPT =
+ MPT->getPointeeType()->castAs<FunctionProtoType>();
+ const CXXRecordDecl *RD =
+ cast<CXXRecordDecl>(MPT->getClass()->getAs<RecordType>()->getDecl());
+
+ // Get the member function pointer.
+ llvm::Value *MemFnPtr = EmitScalarExpr(MemFnExpr);
+
+ // Emit the 'this' pointer.
+ llvm::Value *This;
+
+ if (BO->getOpcode() == BO_PtrMemI)
+ This = EmitScalarExpr(BaseExpr);
+ else
+ This = EmitLValue(BaseExpr).getAddress();
+
+ // Ask the ABI to load the callee. Note that This is modified.
+ llvm::Value *Callee =
+ CGM.getCXXABI().EmitLoadOfMemberFunctionPointer(*this, This, MemFnPtr, MPT);
+
+ CallArgList Args;
+
+ QualType ThisType =
+ getContext().getPointerType(getContext().getTagDeclType(RD));
+
+ // Push the this ptr.
+ Args.add(RValue::get(This), ThisType);
+
+ // And the rest of the call args
+ EmitCallArgs(Args, FPT, E->arg_begin(), E->arg_end());
+ return EmitCall(CGM.getTypes().arrangeFunctionCall(Args, FPT), Callee,
+ ReturnValue, Args);
+}
+
+RValue
+CodeGenFunction::EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E,
+ const CXXMethodDecl *MD,
+ ReturnValueSlot ReturnValue) {
+ assert(MD->isInstance() &&
+ "Trying to emit a member call expr on a static method!");
+ LValue LV = EmitLValue(E->getArg(0));
+ llvm::Value *This = LV.getAddress();
+
+ if ((MD->isCopyAssignmentOperator() || MD->isMoveAssignmentOperator()) &&
+ MD->isTrivial()) {
+ llvm::Value *Src = EmitLValue(E->getArg(1)).getAddress();
+ QualType Ty = E->getType();
+ EmitAggregateCopy(This, Src, Ty);
+ return RValue::get(This);
+ }
+
+ llvm::Value *Callee = EmitCXXOperatorMemberCallee(E, MD, This);
+ return EmitCXXMemberCall(MD, Callee, ReturnValue, This, /*VTT=*/0,
+ E->arg_begin() + 1, E->arg_end());
+}
+
+RValue CodeGenFunction::EmitCUDAKernelCallExpr(const CUDAKernelCallExpr *E,
+ ReturnValueSlot ReturnValue) {
+ return CGM.getCUDARuntime().EmitCUDAKernelCallExpr(*this, E, ReturnValue);
+}
+
+static void EmitNullBaseClassInitialization(CodeGenFunction &CGF,
+ llvm::Value *DestPtr,
+ const CXXRecordDecl *Base) {
+ if (Base->isEmpty())
+ return;
+
+ DestPtr = CGF.EmitCastToVoidPtr(DestPtr);
+
+ const ASTRecordLayout &Layout = CGF.getContext().getASTRecordLayout(Base);
+ CharUnits Size = Layout.getNonVirtualSize();
+ CharUnits Align = Layout.getNonVirtualAlign();
+
+ llvm::Value *SizeVal = CGF.CGM.getSize(Size);
+
+ // If the type contains a pointer to data member we can't memset it to zero.
+ // Instead, create a null constant and copy it to the destination.
+ // TODO: there are other patterns besides zero that we can usefully memset,
+ // like -1, which happens to be the pattern used by member-pointers.
+ // TODO: isZeroInitializable can be over-conservative in the case where a
+ // virtual base contains a member pointer.
+ if (!CGF.CGM.getTypes().isZeroInitializable(Base)) {
+ llvm::Constant *NullConstant = CGF.CGM.EmitNullConstantForBase(Base);
+
+ llvm::GlobalVariable *NullVariable =
+ new llvm::GlobalVariable(CGF.CGM.getModule(), NullConstant->getType(),
+ /*isConstant=*/true,
+ llvm::GlobalVariable::PrivateLinkage,
+ NullConstant, Twine());
+ NullVariable->setAlignment(Align.getQuantity());
+ llvm::Value *SrcPtr = CGF.EmitCastToVoidPtr(NullVariable);
+
+ // Get and call the appropriate llvm.memcpy overload.
+ CGF.Builder.CreateMemCpy(DestPtr, SrcPtr, SizeVal, Align.getQuantity());
+ return;
+ }
+
+ // Otherwise, just memset the whole thing to zero. This is legal
+ // because in LLVM, all default initializers (other than the ones we just
+ // handled above) are guaranteed to have a bit pattern of all zeros.
+ CGF.Builder.CreateMemSet(DestPtr, CGF.Builder.getInt8(0), SizeVal,
+ Align.getQuantity());
+}
+
+void
+CodeGenFunction::EmitCXXConstructExpr(const CXXConstructExpr *E,
+ AggValueSlot Dest) {
+ assert(!Dest.isIgnored() && "Must have a destination!");
+ const CXXConstructorDecl *CD = E->getConstructor();
+
+ // If we require zero initialization before (or instead of) calling the
+ // constructor, as can be the case with a non-user-provided default
+ // constructor, emit the zero initialization now, unless destination is
+ // already zeroed.
+ if (E->requiresZeroInitialization() && !Dest.isZeroed()) {
+ switch (E->getConstructionKind()) {
+ case CXXConstructExpr::CK_Delegating:
+ assert(0 && "Delegating constructor should not need zeroing");
+ case CXXConstructExpr::CK_Complete:
+ EmitNullInitialization(Dest.getAddr(), E->getType());
+ break;
+ case CXXConstructExpr::CK_VirtualBase:
+ case CXXConstructExpr::CK_NonVirtualBase:
+ EmitNullBaseClassInitialization(*this, Dest.getAddr(), CD->getParent());
+ break;
+ }
+ }
+
+ // If this is a call to a trivial default constructor, do nothing.
+ if (CD->isTrivial() && CD->isDefaultConstructor())
+ return;
+
+ // Elide the constructor if we're constructing from a temporary.
+ // The temporary check is required because Sema sets this on NRVO
+ // returns.
+ if (getContext().getLangOpts().ElideConstructors && E->isElidable()) {
+ assert(getContext().hasSameUnqualifiedType(E->getType(),
+ E->getArg(0)->getType()));
+ if (E->getArg(0)->isTemporaryObject(getContext(), CD->getParent())) {
+ EmitAggExpr(E->getArg(0), Dest);
+ return;
+ }
+ }
+
+ if (const ConstantArrayType *arrayType
+ = getContext().getAsConstantArrayType(E->getType())) {
+ EmitCXXAggrConstructorCall(CD, arrayType, Dest.getAddr(),
+ E->arg_begin(), E->arg_end());
+ } else {
+ CXXCtorType Type = Ctor_Complete;
+ bool ForVirtualBase = false;
+
+ switch (E->getConstructionKind()) {
+ case CXXConstructExpr::CK_Delegating:
+ // We should be emitting a constructor; GlobalDecl will assert this
+ Type = CurGD.getCtorType();
+ break;
+
+ case CXXConstructExpr::CK_Complete:
+ Type = Ctor_Complete;
+ break;
+
+ case CXXConstructExpr::CK_VirtualBase:
+ ForVirtualBase = true;
+ // fall-through
+
+ case CXXConstructExpr::CK_NonVirtualBase:
+ Type = Ctor_Base;
+ }
+
+ // Call the constructor.
+ EmitCXXConstructorCall(CD, Type, ForVirtualBase, Dest.getAddr(),
+ E->arg_begin(), E->arg_end());
+ }
+}
+
+void
+CodeGenFunction::EmitSynthesizedCXXCopyCtor(llvm::Value *Dest,
+ llvm::Value *Src,
+ const Expr *Exp) {
+ if (const ExprWithCleanups *E = dyn_cast<ExprWithCleanups>(Exp))
+ Exp = E->getSubExpr();
+ assert(isa<CXXConstructExpr>(Exp) &&
+ "EmitSynthesizedCXXCopyCtor - unknown copy ctor expr");
+ const CXXConstructExpr* E = cast<CXXConstructExpr>(Exp);
+ const CXXConstructorDecl *CD = E->getConstructor();
+ RunCleanupsScope Scope(*this);
+
+ // If we require zero initialization before (or instead of) calling the
+ // constructor, as can be the case with a non-user-provided default
+ // constructor, emit the zero initialization now.
+ // FIXME. Do I still need this for a copy ctor synthesis?
+ if (E->requiresZeroInitialization())
+ EmitNullInitialization(Dest, E->getType());
+
+ assert(!getContext().getAsConstantArrayType(E->getType())
+ && "EmitSynthesizedCXXCopyCtor - Copied-in Array");
+ EmitSynthesizedCXXCopyCtorCall(CD, Dest, Src,
+ E->arg_begin(), E->arg_end());
+}
+
+static CharUnits CalculateCookiePadding(CodeGenFunction &CGF,
+ const CXXNewExpr *E) {
+ if (!E->isArray())
+ return CharUnits::Zero();
+
+ // No cookie is required if the operator new[] being used is the
+ // reserved placement operator new[].
+ if (E->getOperatorNew()->isReservedGlobalPlacementOperator())
+ return CharUnits::Zero();
+
+ return CGF.CGM.getCXXABI().GetArrayCookieSize(E);
+}
+
+static llvm::Value *EmitCXXNewAllocSize(CodeGenFunction &CGF,
+ const CXXNewExpr *e,
+ unsigned minElements,
+ llvm::Value *&numElements,
+ llvm::Value *&sizeWithoutCookie) {
+ QualType type = e->getAllocatedType();
+
+ if (!e->isArray()) {
+ CharUnits typeSize = CGF.getContext().getTypeSizeInChars(type);
+ sizeWithoutCookie
+ = llvm::ConstantInt::get(CGF.SizeTy, typeSize.getQuantity());
+ return sizeWithoutCookie;
+ }
+
+ // The width of size_t.
+ unsigned sizeWidth = CGF.SizeTy->getBitWidth();
+
+ // Figure out the cookie size.
+ llvm::APInt cookieSize(sizeWidth,
+ CalculateCookiePadding(CGF, e).getQuantity());
+
+ // Emit the array size expression.
+ // We multiply the size of all dimensions for NumElements.
+ // e.g for 'int[2][3]', ElemType is 'int' and NumElements is 6.
+ numElements = CGF.EmitScalarExpr(e->getArraySize());
+ assert(isa<llvm::IntegerType>(numElements->getType()));
+
+ // The number of elements can be have an arbitrary integer type;
+ // essentially, we need to multiply it by a constant factor, add a
+ // cookie size, and verify that the result is representable as a
+ // size_t. That's just a gloss, though, and it's wrong in one
+ // important way: if the count is negative, it's an error even if
+ // the cookie size would bring the total size >= 0.
+ bool isSigned
+ = e->getArraySize()->getType()->isSignedIntegerOrEnumerationType();
+ llvm::IntegerType *numElementsType
+ = cast<llvm::IntegerType>(numElements->getType());
+ unsigned numElementsWidth = numElementsType->getBitWidth();
+
+ // Compute the constant factor.
+ llvm::APInt arraySizeMultiplier(sizeWidth, 1);
+ while (const ConstantArrayType *CAT
+ = CGF.getContext().getAsConstantArrayType(type)) {
+ type = CAT->getElementType();
+ arraySizeMultiplier *= CAT->getSize();
+ }
+
+ CharUnits typeSize = CGF.getContext().getTypeSizeInChars(type);
+ llvm::APInt typeSizeMultiplier(sizeWidth, typeSize.getQuantity());
+ typeSizeMultiplier *= arraySizeMultiplier;
+
+ // This will be a size_t.
+ llvm::Value *size;
+
+ // If someone is doing 'new int[42]' there is no need to do a dynamic check.
+ // Don't bloat the -O0 code.
+ if (llvm::ConstantInt *numElementsC =
+ dyn_cast<llvm::ConstantInt>(numElements)) {
+ const llvm::APInt &count = numElementsC->getValue();
+
+ bool hasAnyOverflow = false;
+
+ // If 'count' was a negative number, it's an overflow.
+ if (isSigned && count.isNegative())
+ hasAnyOverflow = true;
+
+ // We want to do all this arithmetic in size_t. If numElements is
+ // wider than that, check whether it's already too big, and if so,
+ // overflow.
+ else if (numElementsWidth > sizeWidth &&
+ numElementsWidth - sizeWidth > count.countLeadingZeros())
+ hasAnyOverflow = true;
+
+ // Okay, compute a count at the right width.
+ llvm::APInt adjustedCount = count.zextOrTrunc(sizeWidth);
+
+ // If there is a brace-initializer, we cannot allocate fewer elements than
+ // there are initializers. If we do, that's treated like an overflow.
+ if (adjustedCount.ult(minElements))
+ hasAnyOverflow = true;
+
+ // Scale numElements by that. This might overflow, but we don't
+ // care because it only overflows if allocationSize does, too, and
+ // if that overflows then we shouldn't use this.
+ numElements = llvm::ConstantInt::get(CGF.SizeTy,
+ adjustedCount * arraySizeMultiplier);
+
+ // Compute the size before cookie, and track whether it overflowed.
+ bool overflow;
+ llvm::APInt allocationSize
+ = adjustedCount.umul_ov(typeSizeMultiplier, overflow);
+ hasAnyOverflow |= overflow;
+
+ // Add in the cookie, and check whether it's overflowed.
+ if (cookieSize != 0) {
+ // Save the current size without a cookie. This shouldn't be
+ // used if there was overflow.
+ sizeWithoutCookie = llvm::ConstantInt::get(CGF.SizeTy, allocationSize);
+
+ allocationSize = allocationSize.uadd_ov(cookieSize, overflow);
+ hasAnyOverflow |= overflow;
+ }
+
+ // On overflow, produce a -1 so operator new will fail.
+ if (hasAnyOverflow) {
+ size = llvm::Constant::getAllOnesValue(CGF.SizeTy);
+ } else {
+ size = llvm::ConstantInt::get(CGF.SizeTy, allocationSize);
+ }
+
+ // Otherwise, we might need to use the overflow intrinsics.
+ } else {
+ // There are up to five conditions we need to test for:
+ // 1) if isSigned, we need to check whether numElements is negative;
+ // 2) if numElementsWidth > sizeWidth, we need to check whether
+ // numElements is larger than something representable in size_t;
+ // 3) if minElements > 0, we need to check whether numElements is smaller
+ // than that.
+ // 4) we need to compute
+ // sizeWithoutCookie := numElements * typeSizeMultiplier
+ // and check whether it overflows; and
+ // 5) if we need a cookie, we need to compute
+ // size := sizeWithoutCookie + cookieSize
+ // and check whether it overflows.
+
+ llvm::Value *hasOverflow = 0;
+
+ // If numElementsWidth > sizeWidth, then one way or another, we're
+ // going to have to do a comparison for (2), and this happens to
+ // take care of (1), too.
+ if (numElementsWidth > sizeWidth) {
+ llvm::APInt threshold(numElementsWidth, 1);
+ threshold <<= sizeWidth;
+
+ llvm::Value *thresholdV
+ = llvm::ConstantInt::get(numElementsType, threshold);
+
+ hasOverflow = CGF.Builder.CreateICmpUGE(numElements, thresholdV);
+ numElements = CGF.Builder.CreateTrunc(numElements, CGF.SizeTy);
+
+ // Otherwise, if we're signed, we want to sext up to size_t.
+ } else if (isSigned) {
+ if (numElementsWidth < sizeWidth)
+ numElements = CGF.Builder.CreateSExt(numElements, CGF.SizeTy);
+
+ // If there's a non-1 type size multiplier, then we can do the
+ // signedness check at the same time as we do the multiply
+ // because a negative number times anything will cause an
+ // unsigned overflow. Otherwise, we have to do it here. But at least
+ // in this case, we can subsume the >= minElements check.
+ if (typeSizeMultiplier == 1)
+ hasOverflow = CGF.Builder.CreateICmpSLT(numElements,
+ llvm::ConstantInt::get(CGF.SizeTy, minElements));
+
+ // Otherwise, zext up to size_t if necessary.
+ } else if (numElementsWidth < sizeWidth) {
+ numElements = CGF.Builder.CreateZExt(numElements, CGF.SizeTy);
+ }
+
+ assert(numElements->getType() == CGF.SizeTy);
+
+ if (minElements) {
+ // Don't allow allocation of fewer elements than we have initializers.
+ if (!hasOverflow) {
+ hasOverflow = CGF.Builder.CreateICmpULT(numElements,
+ llvm::ConstantInt::get(CGF.SizeTy, minElements));
+ } else if (numElementsWidth > sizeWidth) {
+ // The other existing overflow subsumes this check.
+ // We do an unsigned comparison, since any signed value < -1 is
+ // taken care of either above or below.
+ hasOverflow = CGF.Builder.CreateOr(hasOverflow,
+ CGF.Builder.CreateICmpULT(numElements,
+ llvm::ConstantInt::get(CGF.SizeTy, minElements)));
+ }
+ }
+
+ size = numElements;
+
+ // Multiply by the type size if necessary. This multiplier
+ // includes all the factors for nested arrays.
+ //
+ // This step also causes numElements to be scaled up by the
+ // nested-array factor if necessary. Overflow on this computation
+ // can be ignored because the result shouldn't be used if
+ // allocation fails.
+ if (typeSizeMultiplier != 1) {
+ llvm::Value *umul_with_overflow
+ = CGF.CGM.getIntrinsic(llvm::Intrinsic::umul_with_overflow, CGF.SizeTy);
+
+ llvm::Value *tsmV =
+ llvm::ConstantInt::get(CGF.SizeTy, typeSizeMultiplier);
+ llvm::Value *result =
+ CGF.Builder.CreateCall2(umul_with_overflow, size, tsmV);
+
+ llvm::Value *overflowed = CGF.Builder.CreateExtractValue(result, 1);
+ if (hasOverflow)
+ hasOverflow = CGF.Builder.CreateOr(hasOverflow, overflowed);
+ else
+ hasOverflow = overflowed;
+
+ size = CGF.Builder.CreateExtractValue(result, 0);
+
+ // Also scale up numElements by the array size multiplier.
+ if (arraySizeMultiplier != 1) {
+ // If the base element type size is 1, then we can re-use the
+ // multiply we just did.
+ if (typeSize.isOne()) {
+ assert(arraySizeMultiplier == typeSizeMultiplier);
+ numElements = size;
+
+ // Otherwise we need a separate multiply.
+ } else {
+ llvm::Value *asmV =
+ llvm::ConstantInt::get(CGF.SizeTy, arraySizeMultiplier);
+ numElements = CGF.Builder.CreateMul(numElements, asmV);
+ }
+ }
+ } else {
+ // numElements doesn't need to be scaled.
+ assert(arraySizeMultiplier == 1);
+ }
+
+ // Add in the cookie size if necessary.
+ if (cookieSize != 0) {
+ sizeWithoutCookie = size;
+
+ llvm::Value *uadd_with_overflow
+ = CGF.CGM.getIntrinsic(llvm::Intrinsic::uadd_with_overflow, CGF.SizeTy);
+
+ llvm::Value *cookieSizeV = llvm::ConstantInt::get(CGF.SizeTy, cookieSize);
+ llvm::Value *result =
+ CGF.Builder.CreateCall2(uadd_with_overflow, size, cookieSizeV);
+
+ llvm::Value *overflowed = CGF.Builder.CreateExtractValue(result, 1);
+ if (hasOverflow)
+ hasOverflow = CGF.Builder.CreateOr(hasOverflow, overflowed);
+ else
+ hasOverflow = overflowed;
+
+ size = CGF.Builder.CreateExtractValue(result, 0);
+ }
+
+ // If we had any possibility of dynamic overflow, make a select to
+ // overwrite 'size' with an all-ones value, which should cause
+ // operator new to throw.
+ if (hasOverflow)
+ size = CGF.Builder.CreateSelect(hasOverflow,
+ llvm::Constant::getAllOnesValue(CGF.SizeTy),
+ size);
+ }
+
+ if (cookieSize == 0)
+ sizeWithoutCookie = size;
+ else
+ assert(sizeWithoutCookie && "didn't set sizeWithoutCookie?");
+
+ return size;
+}
+
+static void StoreAnyExprIntoOneUnit(CodeGenFunction &CGF, const Expr *Init,
+ QualType AllocType, llvm::Value *NewPtr) {
+
+ CharUnits Alignment = CGF.getContext().getTypeAlignInChars(AllocType);
+ if (!CGF.hasAggregateLLVMType(AllocType))
+ CGF.EmitScalarInit(Init, 0, CGF.MakeAddrLValue(NewPtr, AllocType,
+ Alignment),
+ false);
+ else if (AllocType->isAnyComplexType())
+ CGF.EmitComplexExprIntoAddr(Init, NewPtr,
+ AllocType.isVolatileQualified());
+ else {
+ AggValueSlot Slot
+ = AggValueSlot::forAddr(NewPtr, Alignment, AllocType.getQualifiers(),
+ AggValueSlot::IsDestructed,
+ AggValueSlot::DoesNotNeedGCBarriers,
+ AggValueSlot::IsNotAliased);
+ CGF.EmitAggExpr(Init, Slot);
+
+ CGF.MaybeEmitStdInitializerListCleanup(NewPtr, Init);
+ }
+}
+
+void
+CodeGenFunction::EmitNewArrayInitializer(const CXXNewExpr *E,
+ QualType elementType,
+ llvm::Value *beginPtr,
+ llvm::Value *numElements) {
+ if (!E->hasInitializer())
+ return; // We have a POD type.
+
+ llvm::Value *explicitPtr = beginPtr;
+ // Find the end of the array, hoisted out of the loop.
+ llvm::Value *endPtr =
+ Builder.CreateInBoundsGEP(beginPtr, numElements, "array.end");
+
+ unsigned initializerElements = 0;
+
+ const Expr *Init = E->getInitializer();
+ llvm::AllocaInst *endOfInit = 0;
+ QualType::DestructionKind dtorKind = elementType.isDestructedType();
+ EHScopeStack::stable_iterator cleanup;
+ llvm::Instruction *cleanupDominator = 0;
+ // If the initializer is an initializer list, first do the explicit elements.
+ if (const InitListExpr *ILE = dyn_cast<InitListExpr>(Init)) {
+ initializerElements = ILE->getNumInits();
+
+ // Enter a partial-destruction cleanup if necessary.
+ if (needsEHCleanup(dtorKind)) {
+ // In principle we could tell the cleanup where we are more
+ // directly, but the control flow can get so varied here that it
+ // would actually be quite complex. Therefore we go through an
+ // alloca.
+ endOfInit = CreateTempAlloca(beginPtr->getType(), "array.endOfInit");
+ cleanupDominator = Builder.CreateStore(beginPtr, endOfInit);
+ pushIrregularPartialArrayCleanup(beginPtr, endOfInit, elementType,
+ getDestroyer(dtorKind));
+ cleanup = EHStack.stable_begin();
+ }
+
+ for (unsigned i = 0, e = ILE->getNumInits(); i != e; ++i) {
+ // Tell the cleanup that it needs to destroy up to this
+ // element. TODO: some of these stores can be trivially
+ // observed to be unnecessary.
+ if (endOfInit) Builder.CreateStore(explicitPtr, endOfInit);
+ StoreAnyExprIntoOneUnit(*this, ILE->getInit(i), elementType, explicitPtr);
+ explicitPtr =Builder.CreateConstGEP1_32(explicitPtr, 1, "array.exp.next");
+ }
+
+ // The remaining elements are filled with the array filler expression.
+ Init = ILE->getArrayFiller();
+ }
+
+ // Create the continuation block.
+ llvm::BasicBlock *contBB = createBasicBlock("new.loop.end");
+
+ // If the number of elements isn't constant, we have to now check if there is
+ // anything left to initialize.
+ if (llvm::ConstantInt *constNum = dyn_cast<llvm::ConstantInt>(numElements)) {
+ // If all elements have already been initialized, skip the whole loop.
+ if (constNum->getZExtValue() <= initializerElements) {
+ // If there was a cleanup, deactivate it.
+ if (cleanupDominator)
+ DeactivateCleanupBlock(cleanup, cleanupDominator);;
+ return;
+ }
+ } else {
+ llvm::BasicBlock *nonEmptyBB = createBasicBlock("new.loop.nonempty");
+ llvm::Value *isEmpty = Builder.CreateICmpEQ(explicitPtr, endPtr,
+ "array.isempty");
+ Builder.CreateCondBr(isEmpty, contBB, nonEmptyBB);
+ EmitBlock(nonEmptyBB);
+ }
+
+ // Enter the loop.
+ llvm::BasicBlock *entryBB = Builder.GetInsertBlock();
+ llvm::BasicBlock *loopBB = createBasicBlock("new.loop");
+
+ EmitBlock(loopBB);
+
+ // Set up the current-element phi.
+ llvm::PHINode *curPtr =
+ Builder.CreatePHI(explicitPtr->getType(), 2, "array.cur");
+ curPtr->addIncoming(explicitPtr, entryBB);
+
+ // Store the new cleanup position for irregular cleanups.
+ if (endOfInit) Builder.CreateStore(curPtr, endOfInit);
+
+ // Enter a partial-destruction cleanup if necessary.
+ if (!cleanupDominator && needsEHCleanup(dtorKind)) {
+ pushRegularPartialArrayCleanup(beginPtr, curPtr, elementType,
+ getDestroyer(dtorKind));
+ cleanup = EHStack.stable_begin();
+ cleanupDominator = Builder.CreateUnreachable();
+ }
+
+ // Emit the initializer into this element.
+ StoreAnyExprIntoOneUnit(*this, Init, E->getAllocatedType(), curPtr);
+
+ // Leave the cleanup if we entered one.
+ if (cleanupDominator) {
+ DeactivateCleanupBlock(cleanup, cleanupDominator);
+ cleanupDominator->eraseFromParent();
+ }
+
+ // Advance to the next element.
+ llvm::Value *nextPtr = Builder.CreateConstGEP1_32(curPtr, 1, "array.next");
+
+ // Check whether we've gotten to the end of the array and, if so,
+ // exit the loop.
+ llvm::Value *isEnd = Builder.CreateICmpEQ(nextPtr, endPtr, "array.atend");
+ Builder.CreateCondBr(isEnd, contBB, loopBB);
+ curPtr->addIncoming(nextPtr, Builder.GetInsertBlock());
+
+ EmitBlock(contBB);
+}
+
+static void EmitZeroMemSet(CodeGenFunction &CGF, QualType T,
+ llvm::Value *NewPtr, llvm::Value *Size) {
+ CGF.EmitCastToVoidPtr(NewPtr);
+ CharUnits Alignment = CGF.getContext().getTypeAlignInChars(T);
+ CGF.Builder.CreateMemSet(NewPtr, CGF.Builder.getInt8(0), Size,
+ Alignment.getQuantity(), false);
+}
+
+static void EmitNewInitializer(CodeGenFunction &CGF, const CXXNewExpr *E,
+ QualType ElementType,
+ llvm::Value *NewPtr,
+ llvm::Value *NumElements,
+ llvm::Value *AllocSizeWithoutCookie) {
+ const Expr *Init = E->getInitializer();
+ if (E->isArray()) {
+ if (const CXXConstructExpr *CCE = dyn_cast_or_null<CXXConstructExpr>(Init)){
+ CXXConstructorDecl *Ctor = CCE->getConstructor();
+ bool RequiresZeroInitialization = false;
+ if (Ctor->isTrivial()) {
+ // If new expression did not specify value-initialization, then there
+ // is no initialization.
+ if (!CCE->requiresZeroInitialization() || Ctor->getParent()->isEmpty())
+ return;
+
+ if (CGF.CGM.getTypes().isZeroInitializable(ElementType)) {
+ // Optimization: since zero initialization will just set the memory
+ // to all zeroes, generate a single memset to do it in one shot.
+ EmitZeroMemSet(CGF, ElementType, NewPtr, AllocSizeWithoutCookie);
+ return;
+ }
+
+ RequiresZeroInitialization = true;
+ }
+
+ CGF.EmitCXXAggrConstructorCall(Ctor, NumElements, NewPtr,
+ CCE->arg_begin(), CCE->arg_end(),
+ RequiresZeroInitialization);
+ return;
+ } else if (Init && isa<ImplicitValueInitExpr>(Init) &&
+ CGF.CGM.getTypes().isZeroInitializable(ElementType)) {
+ // Optimization: since zero initialization will just set the memory
+ // to all zeroes, generate a single memset to do it in one shot.
+ EmitZeroMemSet(CGF, ElementType, NewPtr, AllocSizeWithoutCookie);
+ return;
+ }
+ CGF.EmitNewArrayInitializer(E, ElementType, NewPtr, NumElements);
+ return;
+ }
+
+ if (!Init)
+ return;
+
+ StoreAnyExprIntoOneUnit(CGF, Init, E->getAllocatedType(), NewPtr);
+}
+
+namespace {
+ /// A cleanup to call the given 'operator delete' function upon
+ /// abnormal exit from a new expression.
+ class CallDeleteDuringNew : public EHScopeStack::Cleanup {
+ size_t NumPlacementArgs;
+ const FunctionDecl *OperatorDelete;
+ llvm::Value *Ptr;
+ llvm::Value *AllocSize;
+
+ RValue *getPlacementArgs() { return reinterpret_cast<RValue*>(this+1); }
+
+ public:
+ static size_t getExtraSize(size_t NumPlacementArgs) {
+ return NumPlacementArgs * sizeof(RValue);
+ }
+
+ CallDeleteDuringNew(size_t NumPlacementArgs,
+ const FunctionDecl *OperatorDelete,
+ llvm::Value *Ptr,
+ llvm::Value *AllocSize)
+ : NumPlacementArgs(NumPlacementArgs), OperatorDelete(OperatorDelete),
+ Ptr(Ptr), AllocSize(AllocSize) {}
+
+ void setPlacementArg(unsigned I, RValue Arg) {
+ assert(I < NumPlacementArgs && "index out of range");
+ getPlacementArgs()[I] = Arg;
+ }
+
+ void Emit(CodeGenFunction &CGF, Flags flags) {
+ const FunctionProtoType *FPT
+ = OperatorDelete->getType()->getAs<FunctionProtoType>();
+ assert(FPT->getNumArgs() == NumPlacementArgs + 1 ||
+ (FPT->getNumArgs() == 2 && NumPlacementArgs == 0));
+
+ CallArgList DeleteArgs;
+
+ // The first argument is always a void*.
+ FunctionProtoType::arg_type_iterator AI = FPT->arg_type_begin();
+ DeleteArgs.add(RValue::get(Ptr), *AI++);
+
+ // A member 'operator delete' can take an extra 'size_t' argument.
+ if (FPT->getNumArgs() == NumPlacementArgs + 2)
+ DeleteArgs.add(RValue::get(AllocSize), *AI++);
+
+ // Pass the rest of the arguments, which must match exactly.
+ for (unsigned I = 0; I != NumPlacementArgs; ++I)
+ DeleteArgs.add(getPlacementArgs()[I], *AI++);
+
+ // Call 'operator delete'.
+ CGF.EmitCall(CGF.CGM.getTypes().arrangeFunctionCall(DeleteArgs, FPT),
+ CGF.CGM.GetAddrOfFunction(OperatorDelete),
+ ReturnValueSlot(), DeleteArgs, OperatorDelete);
+ }
+ };
+
+ /// A cleanup to call the given 'operator delete' function upon
+ /// abnormal exit from a new expression when the new expression is
+ /// conditional.
+ class CallDeleteDuringConditionalNew : public EHScopeStack::Cleanup {
+ size_t NumPlacementArgs;
+ const FunctionDecl *OperatorDelete;
+ DominatingValue<RValue>::saved_type Ptr;
+ DominatingValue<RValue>::saved_type AllocSize;
+
+ DominatingValue<RValue>::saved_type *getPlacementArgs() {
+ return reinterpret_cast<DominatingValue<RValue>::saved_type*>(this+1);
+ }
+
+ public:
+ static size_t getExtraSize(size_t NumPlacementArgs) {
+ return NumPlacementArgs * sizeof(DominatingValue<RValue>::saved_type);
+ }
+
+ CallDeleteDuringConditionalNew(size_t NumPlacementArgs,
+ const FunctionDecl *OperatorDelete,
+ DominatingValue<RValue>::saved_type Ptr,
+ DominatingValue<RValue>::saved_type AllocSize)
+ : NumPlacementArgs(NumPlacementArgs), OperatorDelete(OperatorDelete),
+ Ptr(Ptr), AllocSize(AllocSize) {}
+
+ void setPlacementArg(unsigned I, DominatingValue<RValue>::saved_type Arg) {
+ assert(I < NumPlacementArgs && "index out of range");
+ getPlacementArgs()[I] = Arg;
+ }
+
+ void Emit(CodeGenFunction &CGF, Flags flags) {
+ const FunctionProtoType *FPT
+ = OperatorDelete->getType()->getAs<FunctionProtoType>();
+ assert(FPT->getNumArgs() == NumPlacementArgs + 1 ||
+ (FPT->getNumArgs() == 2 && NumPlacementArgs == 0));
+
+ CallArgList DeleteArgs;
+
+ // The first argument is always a void*.
+ FunctionProtoType::arg_type_iterator AI = FPT->arg_type_begin();
+ DeleteArgs.add(Ptr.restore(CGF), *AI++);
+
+ // A member 'operator delete' can take an extra 'size_t' argument.
+ if (FPT->getNumArgs() == NumPlacementArgs + 2) {
+ RValue RV = AllocSize.restore(CGF);
+ DeleteArgs.add(RV, *AI++);
+ }
+
+ // Pass the rest of the arguments, which must match exactly.
+ for (unsigned I = 0; I != NumPlacementArgs; ++I) {
+ RValue RV = getPlacementArgs()[I].restore(CGF);
+ DeleteArgs.add(RV, *AI++);
+ }
+
+ // Call 'operator delete'.
+ CGF.EmitCall(CGF.CGM.getTypes().arrangeFunctionCall(DeleteArgs, FPT),
+ CGF.CGM.GetAddrOfFunction(OperatorDelete),
+ ReturnValueSlot(), DeleteArgs, OperatorDelete);
+ }
+ };
+}
+
+/// Enter a cleanup to call 'operator delete' if the initializer in a
+/// new-expression throws.
+static void EnterNewDeleteCleanup(CodeGenFunction &CGF,
+ const CXXNewExpr *E,
+ llvm::Value *NewPtr,
+ llvm::Value *AllocSize,
+ const CallArgList &NewArgs) {
+ // If we're not inside a conditional branch, then the cleanup will
+ // dominate and we can do the easier (and more efficient) thing.
+ if (!CGF.isInConditionalBranch()) {
+ CallDeleteDuringNew *Cleanup = CGF.EHStack
+ .pushCleanupWithExtra<CallDeleteDuringNew>(EHCleanup,
+ E->getNumPlacementArgs(),
+ E->getOperatorDelete(),
+ NewPtr, AllocSize);
+ for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; ++I)
+ Cleanup->setPlacementArg(I, NewArgs[I+1].RV);
+
+ return;
+ }
+
+ // Otherwise, we need to save all this stuff.
+ DominatingValue<RValue>::saved_type SavedNewPtr =
+ DominatingValue<RValue>::save(CGF, RValue::get(NewPtr));
+ DominatingValue<RValue>::saved_type SavedAllocSize =
+ DominatingValue<RValue>::save(CGF, RValue::get(AllocSize));
+
+ CallDeleteDuringConditionalNew *Cleanup = CGF.EHStack
+ .pushCleanupWithExtra<CallDeleteDuringConditionalNew>(EHCleanup,
+ E->getNumPlacementArgs(),
+ E->getOperatorDelete(),
+ SavedNewPtr,
+ SavedAllocSize);
+ for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; ++I)
+ Cleanup->setPlacementArg(I,
+ DominatingValue<RValue>::save(CGF, NewArgs[I+1].RV));
+
+ CGF.initFullExprCleanup();
+}
+
+llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
+ // The element type being allocated.
+ QualType allocType = getContext().getBaseElementType(E->getAllocatedType());
+
+ // 1. Build a call to the allocation function.
+ FunctionDecl *allocator = E->getOperatorNew();
+ const FunctionProtoType *allocatorType =
+ allocator->getType()->castAs<FunctionProtoType>();
+
+ CallArgList allocatorArgs;
+
+ // The allocation size is the first argument.
+ QualType sizeType = getContext().getSizeType();
+
+ // If there is a brace-initializer, cannot allocate fewer elements than inits.
+ unsigned minElements = 0;
+ if (E->isArray() && E->hasInitializer()) {
+ if (const InitListExpr *ILE = dyn_cast<InitListExpr>(E->getInitializer()))
+ minElements = ILE->getNumInits();
+ }
+
+ llvm::Value *numElements = 0;
+ llvm::Value *allocSizeWithoutCookie = 0;
+ llvm::Value *allocSize =
+ EmitCXXNewAllocSize(*this, E, minElements, numElements,
+ allocSizeWithoutCookie);
+
+ allocatorArgs.add(RValue::get(allocSize), sizeType);
+
+ // Emit the rest of the arguments.
+ // FIXME: Ideally, this should just use EmitCallArgs.
+ CXXNewExpr::const_arg_iterator placementArg = E->placement_arg_begin();
+
+ // First, use the types from the function type.
+ // We start at 1 here because the first argument (the allocation size)
+ // has already been emitted.
+ for (unsigned i = 1, e = allocatorType->getNumArgs(); i != e;
+ ++i, ++placementArg) {
+ QualType argType = allocatorType->getArgType(i);
+
+ assert(getContext().hasSameUnqualifiedType(argType.getNonReferenceType(),
+ placementArg->getType()) &&
+ "type mismatch in call argument!");
+
+ EmitCallArg(allocatorArgs, *placementArg, argType);
+ }
+
+ // Either we've emitted all the call args, or we have a call to a
+ // variadic function.
+ assert((placementArg == E->placement_arg_end() ||
+ allocatorType->isVariadic()) &&
+ "Extra arguments to non-variadic function!");
+
+ // If we still have any arguments, emit them using the type of the argument.
+ for (CXXNewExpr::const_arg_iterator placementArgsEnd = E->placement_arg_end();
+ placementArg != placementArgsEnd; ++placementArg) {
+ EmitCallArg(allocatorArgs, *placementArg, placementArg->getType());
+ }
+
+ // Emit the allocation call. If the allocator is a global placement
+ // operator, just "inline" it directly.
+ RValue RV;
+ if (allocator->isReservedGlobalPlacementOperator()) {
+ assert(allocatorArgs.size() == 2);
+ RV = allocatorArgs[1].RV;
+ // TODO: kill any unnecessary computations done for the size
+ // argument.
+ } else {
+ RV = EmitCall(CGM.getTypes().arrangeFunctionCall(allocatorArgs,
+ allocatorType),
+ CGM.GetAddrOfFunction(allocator), ReturnValueSlot(),
+ allocatorArgs, allocator);
+ }
+
+ // Emit a null check on the allocation result if the allocation
+ // function is allowed to return null (because it has a non-throwing
+ // exception spec; for this part, we inline
+ // CXXNewExpr::shouldNullCheckAllocation()) and we have an
+ // interesting initializer.
+ bool nullCheck = allocatorType->isNothrow(getContext()) &&
+ (!allocType.isPODType(getContext()) || E->hasInitializer());
+
+ llvm::BasicBlock *nullCheckBB = 0;
+ llvm::BasicBlock *contBB = 0;
+
+ llvm::Value *allocation = RV.getScalarVal();
+ unsigned AS =
+ cast<llvm::PointerType>(allocation->getType())->getAddressSpace();
+
+ // The null-check means that the initializer is conditionally
+ // evaluated.
+ ConditionalEvaluation conditional(*this);
+
+ if (nullCheck) {
+ conditional.begin(*this);
+
+ nullCheckBB = Builder.GetInsertBlock();
+ llvm::BasicBlock *notNullBB = createBasicBlock("new.notnull");
+ contBB = createBasicBlock("new.cont");
+
+ llvm::Value *isNull = Builder.CreateIsNull(allocation, "new.isnull");
+ Builder.CreateCondBr(isNull, contBB, notNullBB);
+ EmitBlock(notNullBB);
+ }
+
+ // If there's an operator delete, enter a cleanup to call it if an
+ // exception is thrown.
+ EHScopeStack::stable_iterator operatorDeleteCleanup;
+ llvm::Instruction *cleanupDominator = 0;
+ if (E->getOperatorDelete() &&
+ !E->getOperatorDelete()->isReservedGlobalPlacementOperator()) {
+ EnterNewDeleteCleanup(*this, E, allocation, allocSize, allocatorArgs);
+ operatorDeleteCleanup = EHStack.stable_begin();
+ cleanupDominator = Builder.CreateUnreachable();
+ }
+
+ assert((allocSize == allocSizeWithoutCookie) ==
+ CalculateCookiePadding(*this, E).isZero());
+ if (allocSize != allocSizeWithoutCookie) {
+ assert(E->isArray());
+ allocation = CGM.getCXXABI().InitializeArrayCookie(*this, allocation,
+ numElements,
+ E, allocType);
+ }
+
+ llvm::Type *elementPtrTy
+ = ConvertTypeForMem(allocType)->getPointerTo(AS);
+ llvm::Value *result = Builder.CreateBitCast(allocation, elementPtrTy);
+
+ EmitNewInitializer(*this, E, allocType, result, numElements,
+ allocSizeWithoutCookie);
+ if (E->isArray()) {
+ // NewPtr is a pointer to the base element type. If we're
+ // allocating an array of arrays, we'll need to cast back to the
+ // array pointer type.
+ llvm::Type *resultType = ConvertTypeForMem(E->getType());
+ if (result->getType() != resultType)
+ result = Builder.CreateBitCast(result, resultType);
+ }
+
+ // Deactivate the 'operator delete' cleanup if we finished
+ // initialization.
+ if (operatorDeleteCleanup.isValid()) {
+ DeactivateCleanupBlock(operatorDeleteCleanup, cleanupDominator);
+ cleanupDominator->eraseFromParent();
+ }
+
+ if (nullCheck) {
+ conditional.end(*this);
+
+ llvm::BasicBlock *notNullBB = Builder.GetInsertBlock();
+ EmitBlock(contBB);
+
+ llvm::PHINode *PHI = Builder.CreatePHI(result->getType(), 2);
+ PHI->addIncoming(result, notNullBB);
+ PHI->addIncoming(llvm::Constant::getNullValue(result->getType()),
+ nullCheckBB);
+
+ result = PHI;
+ }
+
+ return result;
+}
+
+void CodeGenFunction::EmitDeleteCall(const FunctionDecl *DeleteFD,
+ llvm::Value *Ptr,
+ QualType DeleteTy) {
+ assert(DeleteFD->getOverloadedOperator() == OO_Delete);
+
+ const FunctionProtoType *DeleteFTy =
+ DeleteFD->getType()->getAs<FunctionProtoType>();
+
+ CallArgList DeleteArgs;
+
+ // Check if we need to pass the size to the delete operator.
+ llvm::Value *Size = 0;
+ QualType SizeTy;
+ if (DeleteFTy->getNumArgs() == 2) {
+ SizeTy = DeleteFTy->getArgType(1);
+ CharUnits DeleteTypeSize = getContext().getTypeSizeInChars(DeleteTy);
+ Size = llvm::ConstantInt::get(ConvertType(SizeTy),
+ DeleteTypeSize.getQuantity());
+ }
+
+ QualType ArgTy = DeleteFTy->getArgType(0);
+ llvm::Value *DeletePtr = Builder.CreateBitCast(Ptr, ConvertType(ArgTy));
+ DeleteArgs.add(RValue::get(DeletePtr), ArgTy);
+
+ if (Size)
+ DeleteArgs.add(RValue::get(Size), SizeTy);
+
+ // Emit the call to delete.
+ EmitCall(CGM.getTypes().arrangeFunctionCall(DeleteArgs, DeleteFTy),
+ CGM.GetAddrOfFunction(DeleteFD), ReturnValueSlot(),
+ DeleteArgs, DeleteFD);
+}
+
+namespace {
+ /// Calls the given 'operator delete' on a single object.
+ struct CallObjectDelete : EHScopeStack::Cleanup {
+ llvm::Value *Ptr;
+ const FunctionDecl *OperatorDelete;
+ QualType ElementType;
+
+ CallObjectDelete(llvm::Value *Ptr,
+ const FunctionDecl *OperatorDelete,
+ QualType ElementType)
+ : Ptr(Ptr), OperatorDelete(OperatorDelete), ElementType(ElementType) {}
+
+ void Emit(CodeGenFunction &CGF, Flags flags) {
+ CGF.EmitDeleteCall(OperatorDelete, Ptr, ElementType);
+ }
+ };
+}
+
+/// Emit the code for deleting a single object.
+static void EmitObjectDelete(CodeGenFunction &CGF,
+ const FunctionDecl *OperatorDelete,
+ llvm::Value *Ptr,
+ QualType ElementType,
+ bool UseGlobalDelete) {
+ // Find the destructor for the type, if applicable. If the
+ // destructor is virtual, we'll just emit the vcall and return.
+ const CXXDestructorDecl *Dtor = 0;
+ if (const RecordType *RT = ElementType->getAs<RecordType>()) {
+ CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
+ if (RD->hasDefinition() && !RD->hasTrivialDestructor()) {
+ Dtor = RD->getDestructor();
+
+ if (Dtor->isVirtual()) {
+ if (UseGlobalDelete) {
+ // If we're supposed to call the global delete, make sure we do so
+ // even if the destructor throws.
+ CGF.EHStack.pushCleanup<CallObjectDelete>(NormalAndEHCleanup,
+ Ptr, OperatorDelete,
+ ElementType);
+ }
+
+ llvm::Type *Ty =
+ CGF.getTypes().GetFunctionType(
+ CGF.getTypes().arrangeCXXDestructor(Dtor, Dtor_Complete));
+
+ llvm::Value *Callee
+ = CGF.BuildVirtualCall(Dtor,
+ UseGlobalDelete? Dtor_Complete : Dtor_Deleting,
+ Ptr, Ty);
+ CGF.EmitCXXMemberCall(Dtor, Callee, ReturnValueSlot(), Ptr, /*VTT=*/0,
+ 0, 0);
+
+ if (UseGlobalDelete) {
+ CGF.PopCleanupBlock();
+ }
+
+ return;
+ }
+ }
+ }
+
+ // Make sure that we call delete even if the dtor throws.
+ // This doesn't have to a conditional cleanup because we're going
+ // to pop it off in a second.
+ CGF.EHStack.pushCleanup<CallObjectDelete>(NormalAndEHCleanup,
+ Ptr, OperatorDelete, ElementType);
+
+ if (Dtor)
+ CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete,
+ /*ForVirtualBase=*/false, Ptr);
+ else if (CGF.getLangOpts().ObjCAutoRefCount &&
+ ElementType->isObjCLifetimeType()) {
+ switch (ElementType.getObjCLifetime()) {
+ case Qualifiers::OCL_None:
+ case Qualifiers::OCL_ExplicitNone:
+ case Qualifiers::OCL_Autoreleasing:
+ break;
+
+ case Qualifiers::OCL_Strong: {
+ // Load the pointer value.
+ llvm::Value *PtrValue = CGF.Builder.CreateLoad(Ptr,
+ ElementType.isVolatileQualified());
+
+ CGF.EmitARCRelease(PtrValue, /*precise*/ true);
+ break;
+ }
+
+ case Qualifiers::OCL_Weak:
+ CGF.EmitARCDestroyWeak(Ptr);
+ break;
+ }
+ }
+
+ CGF.PopCleanupBlock();
+}
+
+namespace {
+ /// Calls the given 'operator delete' on an array of objects.
+ struct CallArrayDelete : EHScopeStack::Cleanup {
+ llvm::Value *Ptr;
+ const FunctionDecl *OperatorDelete;
+ llvm::Value *NumElements;
+ QualType ElementType;
+ CharUnits CookieSize;
+
+ CallArrayDelete(llvm::Value *Ptr,
+ const FunctionDecl *OperatorDelete,
+ llvm::Value *NumElements,
+ QualType ElementType,
+ CharUnits CookieSize)
+ : Ptr(Ptr), OperatorDelete(OperatorDelete), NumElements(NumElements),
+ ElementType(ElementType), CookieSize(CookieSize) {}
+
+ void Emit(CodeGenFunction &CGF, Flags flags) {
+ const FunctionProtoType *DeleteFTy =
+ OperatorDelete->getType()->getAs<FunctionProtoType>();
+ assert(DeleteFTy->getNumArgs() == 1 || DeleteFTy->getNumArgs() == 2);
+
+ CallArgList Args;
+
+ // Pass the pointer as the first argument.
+ QualType VoidPtrTy = DeleteFTy->getArgType(0);
+ llvm::Value *DeletePtr
+ = CGF.Builder.CreateBitCast(Ptr, CGF.ConvertType(VoidPtrTy));
+ Args.add(RValue::get(DeletePtr), VoidPtrTy);
+
+ // Pass the original requested size as the second argument.
+ if (DeleteFTy->getNumArgs() == 2) {
+ QualType size_t = DeleteFTy->getArgType(1);
+ llvm::IntegerType *SizeTy
+ = cast<llvm::IntegerType>(CGF.ConvertType(size_t));
+
+ CharUnits ElementTypeSize =
+ CGF.CGM.getContext().getTypeSizeInChars(ElementType);
+
+ // The size of an element, multiplied by the number of elements.
+ llvm::Value *Size
+ = llvm::ConstantInt::get(SizeTy, ElementTypeSize.getQuantity());
+ Size = CGF.Builder.CreateMul(Size, NumElements);
+
+ // Plus the size of the cookie if applicable.
+ if (!CookieSize.isZero()) {
+ llvm::Value *CookieSizeV
+ = llvm::ConstantInt::get(SizeTy, CookieSize.getQuantity());
+ Size = CGF.Builder.CreateAdd(Size, CookieSizeV);
+ }
+
+ Args.add(RValue::get(Size), size_t);
+ }
+
+ // Emit the call to delete.
+ CGF.EmitCall(CGF.getTypes().arrangeFunctionCall(Args, DeleteFTy),
+ CGF.CGM.GetAddrOfFunction(OperatorDelete),
+ ReturnValueSlot(), Args, OperatorDelete);
+ }
+ };
+}
+
+/// Emit the code for deleting an array of objects.
+static void EmitArrayDelete(CodeGenFunction &CGF,
+ const CXXDeleteExpr *E,
+ llvm::Value *deletedPtr,
+ QualType elementType) {
+ llvm::Value *numElements = 0;
+ llvm::Value *allocatedPtr = 0;
+ CharUnits cookieSize;
+ CGF.CGM.getCXXABI().ReadArrayCookie(CGF, deletedPtr, E, elementType,
+ numElements, allocatedPtr, cookieSize);
+
+ assert(allocatedPtr && "ReadArrayCookie didn't set allocated pointer");
+
+ // Make sure that we call delete even if one of the dtors throws.
+ const FunctionDecl *operatorDelete = E->getOperatorDelete();
+ CGF.EHStack.pushCleanup<CallArrayDelete>(NormalAndEHCleanup,
+ allocatedPtr, operatorDelete,
+ numElements, elementType,
+ cookieSize);
+
+ // Destroy the elements.
+ if (QualType::DestructionKind dtorKind = elementType.isDestructedType()) {
+ assert(numElements && "no element count for a type with a destructor!");
+
+ llvm::Value *arrayEnd =
+ CGF.Builder.CreateInBoundsGEP(deletedPtr, numElements, "delete.end");
+
+ // Note that it is legal to allocate a zero-length array, and we
+ // can never fold the check away because the length should always
+ // come from a cookie.
+ CGF.emitArrayDestroy(deletedPtr, arrayEnd, elementType,
+ CGF.getDestroyer(dtorKind),
+ /*checkZeroLength*/ true,
+ CGF.needsEHCleanup(dtorKind));
+ }
+
+ // Pop the cleanup block.
+ CGF.PopCleanupBlock();
+}
+
+void CodeGenFunction::EmitCXXDeleteExpr(const CXXDeleteExpr *E) {
+
+ // Get at the argument before we performed the implicit conversion
+ // to void*.
+ const Expr *Arg = E->getArgument();
+ while (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Arg)) {
+ if (ICE->getCastKind() != CK_UserDefinedConversion &&
+ ICE->getType()->isVoidPointerType())
+ Arg = ICE->getSubExpr();
+ else
+ break;
+ }
+
+ llvm::Value *Ptr = EmitScalarExpr(Arg);
+
+ // Null check the pointer.
+ llvm::BasicBlock *DeleteNotNull = createBasicBlock("delete.notnull");
+ llvm::BasicBlock *DeleteEnd = createBasicBlock("delete.end");
+
+ llvm::Value *IsNull = Builder.CreateIsNull(Ptr, "isnull");
+
+ Builder.CreateCondBr(IsNull, DeleteEnd, DeleteNotNull);
+ EmitBlock(DeleteNotNull);
+
+ // We might be deleting a pointer to array. If so, GEP down to the
+ // first non-array element.
+ // (this assumes that A(*)[3][7] is converted to [3 x [7 x %A]]*)
+ QualType DeleteTy = Arg->getType()->getAs<PointerType>()->getPointeeType();
+ if (DeleteTy->isConstantArrayType()) {
+ llvm::Value *Zero = Builder.getInt32(0);
+ SmallVector<llvm::Value*,8> GEP;
+
+ GEP.push_back(Zero); // point at the outermost array
+
+ // For each layer of array type we're pointing at:
+ while (const ConstantArrayType *Arr
+ = getContext().getAsConstantArrayType(DeleteTy)) {
+ // 1. Unpeel the array type.
+ DeleteTy = Arr->getElementType();
+
+ // 2. GEP to the first element of the array.
+ GEP.push_back(Zero);
+ }
+
+ Ptr = Builder.CreateInBoundsGEP(Ptr, GEP, "del.first");
+ }
+
+ assert(ConvertTypeForMem(DeleteTy) ==
+ cast<llvm::PointerType>(Ptr->getType())->getElementType());
+
+ if (E->isArrayForm()) {
+ EmitArrayDelete(*this, E, Ptr, DeleteTy);
+ } else {
+ EmitObjectDelete(*this, E->getOperatorDelete(), Ptr, DeleteTy,
+ E->isGlobalDelete());
+ }
+
+ EmitBlock(DeleteEnd);
+}
+
+static llvm::Constant *getBadTypeidFn(CodeGenFunction &CGF) {
+ // void __cxa_bad_typeid();
+ llvm::FunctionType *FTy = llvm::FunctionType::get(CGF.VoidTy, false);
+
+ return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_typeid");
+}
+
+static void EmitBadTypeidCall(CodeGenFunction &CGF) {
+ llvm::Value *Fn = getBadTypeidFn(CGF);
+ CGF.EmitCallOrInvoke(Fn).setDoesNotReturn();
+ CGF.Builder.CreateUnreachable();
+}
+
+static llvm::Value *EmitTypeidFromVTable(CodeGenFunction &CGF,
+ const Expr *E,
+ llvm::Type *StdTypeInfoPtrTy) {
+ // Get the vtable pointer.
+ llvm::Value *ThisPtr = CGF.EmitLValue(E).getAddress();
+
+ // C++ [expr.typeid]p2:
+ // If the glvalue expression is obtained by applying the unary * operator to
+ // a pointer and the pointer is a null pointer value, the typeid expression
+ // throws the std::bad_typeid exception.
+ if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E->IgnoreParens())) {
+ if (UO->getOpcode() == UO_Deref) {
+ llvm::BasicBlock *BadTypeidBlock =
+ CGF.createBasicBlock("typeid.bad_typeid");
+ llvm::BasicBlock *EndBlock =
+ CGF.createBasicBlock("typeid.end");
+
+ llvm::Value *IsNull = CGF.Builder.CreateIsNull(ThisPtr);
+ CGF.Builder.CreateCondBr(IsNull, BadTypeidBlock, EndBlock);
+
+ CGF.EmitBlock(BadTypeidBlock);
+ EmitBadTypeidCall(CGF);
+ CGF.EmitBlock(EndBlock);
+ }
+ }
+
+ llvm::Value *Value = CGF.GetVTablePtr(ThisPtr,
+ StdTypeInfoPtrTy->getPointerTo());
+
+ // Load the type info.
+ Value = CGF.Builder.CreateConstInBoundsGEP1_64(Value, -1ULL);
+ return CGF.Builder.CreateLoad(Value);
+}
+
+llvm::Value *CodeGenFunction::EmitCXXTypeidExpr(const CXXTypeidExpr *E) {
+ llvm::Type *StdTypeInfoPtrTy =
+ ConvertType(E->getType())->getPointerTo();
+
+ if (E->isTypeOperand()) {
+ llvm::Constant *TypeInfo =
+ CGM.GetAddrOfRTTIDescriptor(E->getTypeOperand());
+ return Builder.CreateBitCast(TypeInfo, StdTypeInfoPtrTy);
+ }
+
+ // C++ [expr.typeid]p2:
+ // When typeid is applied to a glvalue expression whose type is a
+ // polymorphic class type, the result refers to a std::type_info object
+ // representing the type of the most derived object (that is, the dynamic
+ // type) to which the glvalue refers.
+ if (E->getExprOperand()->isGLValue()) {
+ if (const RecordType *RT =
+ E->getExprOperand()->getType()->getAs<RecordType>()) {
+ const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
+ if (RD->isPolymorphic())
+ return EmitTypeidFromVTable(*this, E->getExprOperand(),
+ StdTypeInfoPtrTy);
+ }
+ }
+
+ QualType OperandTy = E->getExprOperand()->getType();
+ return Builder.CreateBitCast(CGM.GetAddrOfRTTIDescriptor(OperandTy),
+ StdTypeInfoPtrTy);
+}
+
+static llvm::Constant *getDynamicCastFn(CodeGenFunction &CGF) {
+ // void *__dynamic_cast(const void *sub,
+ // const abi::__class_type_info *src,
+ // const abi::__class_type_info *dst,
+ // std::ptrdiff_t src2dst_offset);
+
+ llvm::Type *Int8PtrTy = CGF.Int8PtrTy;
+ llvm::Type *PtrDiffTy =
+ CGF.ConvertType(CGF.getContext().getPointerDiffType());
+
+ llvm::Type *Args[4] = { Int8PtrTy, Int8PtrTy, Int8PtrTy, PtrDiffTy };
+
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(Int8PtrTy, Args, false);
+
+ return CGF.CGM.CreateRuntimeFunction(FTy, "__dynamic_cast");
+}
+
+static llvm::Constant *getBadCastFn(CodeGenFunction &CGF) {
+ // void __cxa_bad_cast();
+ llvm::FunctionType *FTy = llvm::FunctionType::get(CGF.VoidTy, false);
+ return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_cast");
+}
+
+static void EmitBadCastCall(CodeGenFunction &CGF) {
+ llvm::Value *Fn = getBadCastFn(CGF);
+ CGF.EmitCallOrInvoke(Fn).setDoesNotReturn();
+ CGF.Builder.CreateUnreachable();
+}
+
+static llvm::Value *
+EmitDynamicCastCall(CodeGenFunction &CGF, llvm::Value *Value,
+ QualType SrcTy, QualType DestTy,
+ llvm::BasicBlock *CastEnd) {
+ llvm::Type *PtrDiffLTy =
+ CGF.ConvertType(CGF.getContext().getPointerDiffType());
+ llvm::Type *DestLTy = CGF.ConvertType(DestTy);
+
+ if (const PointerType *PTy = DestTy->getAs<PointerType>()) {
+ if (PTy->getPointeeType()->isVoidType()) {
+ // C++ [expr.dynamic.cast]p7:
+ // If T is "pointer to cv void," then the result is a pointer to the
+ // most derived object pointed to by v.
+
+ // Get the vtable pointer.
+ llvm::Value *VTable = CGF.GetVTablePtr(Value, PtrDiffLTy->getPointerTo());
+
+ // Get the offset-to-top from the vtable.
+ llvm::Value *OffsetToTop =
+ CGF.Builder.CreateConstInBoundsGEP1_64(VTable, -2ULL);
+ OffsetToTop = CGF.Builder.CreateLoad(OffsetToTop, "offset.to.top");
+
+ // Finally, add the offset to the pointer.
+ Value = CGF.EmitCastToVoidPtr(Value);
+ Value = CGF.Builder.CreateInBoundsGEP(Value, OffsetToTop);
+
+ return CGF.Builder.CreateBitCast(Value, DestLTy);
+ }
+ }
+
+ QualType SrcRecordTy;
+ QualType DestRecordTy;
+
+ if (const PointerType *DestPTy = DestTy->getAs<PointerType>()) {
+ SrcRecordTy = SrcTy->castAs<PointerType>()->getPointeeType();
+ DestRecordTy = DestPTy->getPointeeType();
+ } else {
+ SrcRecordTy = SrcTy;
+ DestRecordTy = DestTy->castAs<ReferenceType>()->getPointeeType();
+ }
+
+ assert(SrcRecordTy->isRecordType() && "source type must be a record type!");
+ assert(DestRecordTy->isRecordType() && "dest type must be a record type!");
+
+ llvm::Value *SrcRTTI =
+ CGF.CGM.GetAddrOfRTTIDescriptor(SrcRecordTy.getUnqualifiedType());
+ llvm::Value *DestRTTI =
+ CGF.CGM.GetAddrOfRTTIDescriptor(DestRecordTy.getUnqualifiedType());
+
+ // FIXME: Actually compute a hint here.
+ llvm::Value *OffsetHint = llvm::ConstantInt::get(PtrDiffLTy, -1ULL);
+
+ // Emit the call to __dynamic_cast.
+ Value = CGF.EmitCastToVoidPtr(Value);
+ Value = CGF.Builder.CreateCall4(getDynamicCastFn(CGF), Value,
+ SrcRTTI, DestRTTI, OffsetHint);
+ Value = CGF.Builder.CreateBitCast(Value, DestLTy);
+
+ /// C++ [expr.dynamic.cast]p9:
+ /// A failed cast to reference type throws std::bad_cast
+ if (DestTy->isReferenceType()) {
+ llvm::BasicBlock *BadCastBlock =
+ CGF.createBasicBlock("dynamic_cast.bad_cast");
+
+ llvm::Value *IsNull = CGF.Builder.CreateIsNull(Value);
+ CGF.Builder.CreateCondBr(IsNull, BadCastBlock, CastEnd);
+
+ CGF.EmitBlock(BadCastBlock);
+ EmitBadCastCall(CGF);
+ }
+
+ return Value;
+}
+
+static llvm::Value *EmitDynamicCastToNull(CodeGenFunction &CGF,
+ QualType DestTy) {
+ llvm::Type *DestLTy = CGF.ConvertType(DestTy);
+ if (DestTy->isPointerType())
+ return llvm::Constant::getNullValue(DestLTy);
+
+ /// C++ [expr.dynamic.cast]p9:
+ /// A failed cast to reference type throws std::bad_cast
+ EmitBadCastCall(CGF);
+
+ CGF.EmitBlock(CGF.createBasicBlock("dynamic_cast.end"));
+ return llvm::UndefValue::get(DestLTy);
+}
+
+llvm::Value *CodeGenFunction::EmitDynamicCast(llvm::Value *Value,
+ const CXXDynamicCastExpr *DCE) {
+ QualType DestTy = DCE->getTypeAsWritten();
+
+ if (DCE->isAlwaysNull())
+ return EmitDynamicCastToNull(*this, DestTy);
+
+ QualType SrcTy = DCE->getSubExpr()->getType();
+
+ // C++ [expr.dynamic.cast]p4:
+ // If the value of v is a null pointer value in the pointer case, the result
+ // is the null pointer value of type T.
+ bool ShouldNullCheckSrcValue = SrcTy->isPointerType();
+
+ llvm::BasicBlock *CastNull = 0;
+ llvm::BasicBlock *CastNotNull = 0;
+ llvm::BasicBlock *CastEnd = createBasicBlock("dynamic_cast.end");
+
+ if (ShouldNullCheckSrcValue) {
+ CastNull = createBasicBlock("dynamic_cast.null");
+ CastNotNull = createBasicBlock("dynamic_cast.notnull");
+
+ llvm::Value *IsNull = Builder.CreateIsNull(Value);
+ Builder.CreateCondBr(IsNull, CastNull, CastNotNull);
+ EmitBlock(CastNotNull);
+ }
+
+ Value = EmitDynamicCastCall(*this, Value, SrcTy, DestTy, CastEnd);
+
+ if (ShouldNullCheckSrcValue) {
+ EmitBranch(CastEnd);
+
+ EmitBlock(CastNull);
+ EmitBranch(CastEnd);
+ }
+
+ EmitBlock(CastEnd);
+
+ if (ShouldNullCheckSrcValue) {
+ llvm::PHINode *PHI = Builder.CreatePHI(Value->getType(), 2);
+ PHI->addIncoming(Value, CastNotNull);
+ PHI->addIncoming(llvm::Constant::getNullValue(Value->getType()), CastNull);
+
+ Value = PHI;
+ }
+
+ return Value;
+}
+
+void CodeGenFunction::EmitLambdaExpr(const LambdaExpr *E, AggValueSlot Slot) {
+ RunCleanupsScope Scope(*this);
+ LValue SlotLV = MakeAddrLValue(Slot.getAddr(), E->getType(),
+ Slot.getAlignment());
+
+ CXXRecordDecl::field_iterator CurField = E->getLambdaClass()->field_begin();
+ for (LambdaExpr::capture_init_iterator i = E->capture_init_begin(),
+ e = E->capture_init_end();
+ i != e; ++i, ++CurField) {
+ // Emit initialization
+
+ LValue LV = EmitLValueForFieldInitialization(SlotLV, *CurField);
+ ArrayRef<VarDecl *> ArrayIndexes;
+ if (CurField->getType()->isArrayType())
+ ArrayIndexes = E->getCaptureInitIndexVars(i);
+ EmitInitializerForField(*CurField, LV, *i, ArrayIndexes);
+ }
+}
diff --git a/clang/lib/CodeGen/CGExprComplex.cpp b/clang/lib/CodeGen/CGExprComplex.cpp
new file mode 100644
index 0000000..0233745
--- /dev/null
+++ b/clang/lib/CodeGen/CGExprComplex.cpp
@@ -0,0 +1,839 @@
+//===--- CGExprComplex.cpp - Emit LLVM Code for Complex Exprs -------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code to emit Expr nodes with complex types as LLVM code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenFunction.h"
+#include "CodeGenModule.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/StmtVisitor.h"
+#include "llvm/Constants.h"
+#include "llvm/Function.h"
+#include "llvm/ADT/SmallString.h"
+using namespace clang;
+using namespace CodeGen;
+
+//===----------------------------------------------------------------------===//
+// Complex Expression Emitter
+//===----------------------------------------------------------------------===//
+
+typedef CodeGenFunction::ComplexPairTy ComplexPairTy;
+
+namespace {
+class ComplexExprEmitter
+ : public StmtVisitor<ComplexExprEmitter, ComplexPairTy> {
+ CodeGenFunction &CGF;
+ CGBuilderTy &Builder;
+ // True is we should ignore the value of a
+ bool IgnoreReal;
+ bool IgnoreImag;
+public:
+ ComplexExprEmitter(CodeGenFunction &cgf, bool ir=false, bool ii=false)
+ : CGF(cgf), Builder(CGF.Builder), IgnoreReal(ir), IgnoreImag(ii) {
+ }
+
+
+ //===--------------------------------------------------------------------===//
+ // Utilities
+ //===--------------------------------------------------------------------===//
+
+ bool TestAndClearIgnoreReal() {
+ bool I = IgnoreReal;
+ IgnoreReal = false;
+ return I;
+ }
+ bool TestAndClearIgnoreImag() {
+ bool I = IgnoreImag;
+ IgnoreImag = false;
+ return I;
+ }
+
+ /// EmitLoadOfLValue - Given an expression with complex type that represents a
+ /// value l-value, this method emits the address of the l-value, then loads
+ /// and returns the result.
+ ComplexPairTy EmitLoadOfLValue(const Expr *E) {
+ return EmitLoadOfLValue(CGF.EmitLValue(E));
+ }
+
+ ComplexPairTy EmitLoadOfLValue(LValue LV) {
+ assert(LV.isSimple() && "complex l-value must be simple");
+ return EmitLoadOfComplex(LV.getAddress(), LV.isVolatileQualified());
+ }
+
+ /// EmitLoadOfComplex - Given a pointer to a complex value, emit code to load
+ /// the real and imaginary pieces.
+ ComplexPairTy EmitLoadOfComplex(llvm::Value *SrcPtr, bool isVolatile);
+
+ /// EmitStoreThroughLValue - Given an l-value of complex type, store
+ /// a complex number into it.
+ void EmitStoreThroughLValue(ComplexPairTy Val, LValue LV) {
+ assert(LV.isSimple() && "complex l-value must be simple");
+ return EmitStoreOfComplex(Val, LV.getAddress(), LV.isVolatileQualified());
+ }
+
+ /// EmitStoreOfComplex - Store the specified real/imag parts into the
+ /// specified value pointer.
+ void EmitStoreOfComplex(ComplexPairTy Val, llvm::Value *ResPtr, bool isVol);
+
+ /// EmitComplexToComplexCast - Emit a cast from complex value Val to DestType.
+ ComplexPairTy EmitComplexToComplexCast(ComplexPairTy Val, QualType SrcType,
+ QualType DestType);
+
+ //===--------------------------------------------------------------------===//
+ // Visitor Methods
+ //===--------------------------------------------------------------------===//
+
+ ComplexPairTy Visit(Expr *E) {
+ return StmtVisitor<ComplexExprEmitter, ComplexPairTy>::Visit(E);
+ }
+
+ ComplexPairTy VisitStmt(Stmt *S) {
+ S->dump(CGF.getContext().getSourceManager());
+ llvm_unreachable("Stmt can't have complex result type!");
+ }
+ ComplexPairTy VisitExpr(Expr *S);
+ ComplexPairTy VisitParenExpr(ParenExpr *PE) { return Visit(PE->getSubExpr());}
+ ComplexPairTy VisitGenericSelectionExpr(GenericSelectionExpr *GE) {
+ return Visit(GE->getResultExpr());
+ }
+ ComplexPairTy VisitImaginaryLiteral(const ImaginaryLiteral *IL);
+ ComplexPairTy
+ VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *PE) {
+ return Visit(PE->getReplacement());
+ }
+
+ // l-values.
+ ComplexPairTy VisitDeclRefExpr(DeclRefExpr *E) {
+ if (CodeGenFunction::ConstantEmission result = CGF.tryEmitAsConstant(E)) {
+ if (result.isReference())
+ return EmitLoadOfLValue(result.getReferenceLValue(CGF, E));
+
+ llvm::ConstantStruct *pair =
+ cast<llvm::ConstantStruct>(result.getValue());
+ return ComplexPairTy(pair->getOperand(0), pair->getOperand(1));
+ }
+ return EmitLoadOfLValue(E);
+ }
+ ComplexPairTy VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) {
+ return EmitLoadOfLValue(E);
+ }
+ ComplexPairTy VisitObjCMessageExpr(ObjCMessageExpr *E) {
+ return CGF.EmitObjCMessageExpr(E).getComplexVal();
+ }
+ ComplexPairTy VisitArraySubscriptExpr(Expr *E) { return EmitLoadOfLValue(E); }
+ ComplexPairTy VisitMemberExpr(const Expr *E) { return EmitLoadOfLValue(E); }
+ ComplexPairTy VisitOpaqueValueExpr(OpaqueValueExpr *E) {
+ if (E->isGLValue())
+ return EmitLoadOfLValue(CGF.getOpaqueLValueMapping(E));
+ return CGF.getOpaqueRValueMapping(E).getComplexVal();
+ }
+
+ ComplexPairTy VisitPseudoObjectExpr(PseudoObjectExpr *E) {
+ return CGF.EmitPseudoObjectRValue(E).getComplexVal();
+ }
+
+ // FIXME: CompoundLiteralExpr
+
+ ComplexPairTy EmitCast(CastExpr::CastKind CK, Expr *Op, QualType DestTy);
+ ComplexPairTy VisitImplicitCastExpr(ImplicitCastExpr *E) {
+ // Unlike for scalars, we don't have to worry about function->ptr demotion
+ // here.
+ return EmitCast(E->getCastKind(), E->getSubExpr(), E->getType());
+ }
+ ComplexPairTy VisitCastExpr(CastExpr *E) {
+ return EmitCast(E->getCastKind(), E->getSubExpr(), E->getType());
+ }
+ ComplexPairTy VisitCallExpr(const CallExpr *E);
+ ComplexPairTy VisitStmtExpr(const StmtExpr *E);
+
+ // Operators.
+ ComplexPairTy VisitPrePostIncDec(const UnaryOperator *E,
+ bool isInc, bool isPre) {
+ LValue LV = CGF.EmitLValue(E->getSubExpr());
+ return CGF.EmitComplexPrePostIncDec(E, LV, isInc, isPre);
+ }
+ ComplexPairTy VisitUnaryPostDec(const UnaryOperator *E) {
+ return VisitPrePostIncDec(E, false, false);
+ }
+ ComplexPairTy VisitUnaryPostInc(const UnaryOperator *E) {
+ return VisitPrePostIncDec(E, true, false);
+ }
+ ComplexPairTy VisitUnaryPreDec(const UnaryOperator *E) {
+ return VisitPrePostIncDec(E, false, true);
+ }
+ ComplexPairTy VisitUnaryPreInc(const UnaryOperator *E) {
+ return VisitPrePostIncDec(E, true, true);
+ }
+ ComplexPairTy VisitUnaryDeref(const Expr *E) { return EmitLoadOfLValue(E); }
+ ComplexPairTy VisitUnaryPlus (const UnaryOperator *E) {
+ TestAndClearIgnoreReal();
+ TestAndClearIgnoreImag();
+ return Visit(E->getSubExpr());
+ }
+ ComplexPairTy VisitUnaryMinus (const UnaryOperator *E);
+ ComplexPairTy VisitUnaryNot (const UnaryOperator *E);
+ // LNot,Real,Imag never return complex.
+ ComplexPairTy VisitUnaryExtension(const UnaryOperator *E) {
+ return Visit(E->getSubExpr());
+ }
+ ComplexPairTy VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
+ return Visit(DAE->getExpr());
+ }
+ ComplexPairTy VisitExprWithCleanups(ExprWithCleanups *E) {
+ CGF.enterFullExpression(E);
+ CodeGenFunction::RunCleanupsScope Scope(CGF);
+ return Visit(E->getSubExpr());
+ }
+ ComplexPairTy VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E) {
+ assert(E->getType()->isAnyComplexType() && "Expected complex type!");
+ QualType Elem = E->getType()->getAs<ComplexType>()->getElementType();
+ llvm::Constant *Null = llvm::Constant::getNullValue(CGF.ConvertType(Elem));
+ return ComplexPairTy(Null, Null);
+ }
+ ComplexPairTy VisitImplicitValueInitExpr(ImplicitValueInitExpr *E) {
+ assert(E->getType()->isAnyComplexType() && "Expected complex type!");
+ QualType Elem = E->getType()->getAs<ComplexType>()->getElementType();
+ llvm::Constant *Null =
+ llvm::Constant::getNullValue(CGF.ConvertType(Elem));
+ return ComplexPairTy(Null, Null);
+ }
+
+ struct BinOpInfo {
+ ComplexPairTy LHS;
+ ComplexPairTy RHS;
+ QualType Ty; // Computation Type.
+ };
+
+ BinOpInfo EmitBinOps(const BinaryOperator *E);
+ LValue EmitCompoundAssignLValue(const CompoundAssignOperator *E,
+ ComplexPairTy (ComplexExprEmitter::*Func)
+ (const BinOpInfo &),
+ ComplexPairTy &Val);
+ ComplexPairTy EmitCompoundAssign(const CompoundAssignOperator *E,
+ ComplexPairTy (ComplexExprEmitter::*Func)
+ (const BinOpInfo &));
+
+ ComplexPairTy EmitBinAdd(const BinOpInfo &Op);
+ ComplexPairTy EmitBinSub(const BinOpInfo &Op);
+ ComplexPairTy EmitBinMul(const BinOpInfo &Op);
+ ComplexPairTy EmitBinDiv(const BinOpInfo &Op);
+
+ ComplexPairTy VisitBinAdd(const BinaryOperator *E) {
+ return EmitBinAdd(EmitBinOps(E));
+ }
+ ComplexPairTy VisitBinSub(const BinaryOperator *E) {
+ return EmitBinSub(EmitBinOps(E));
+ }
+ ComplexPairTy VisitBinMul(const BinaryOperator *E) {
+ return EmitBinMul(EmitBinOps(E));
+ }
+ ComplexPairTy VisitBinDiv(const BinaryOperator *E) {
+ return EmitBinDiv(EmitBinOps(E));
+ }
+
+ // Compound assignments.
+ ComplexPairTy VisitBinAddAssign(const CompoundAssignOperator *E) {
+ return EmitCompoundAssign(E, &ComplexExprEmitter::EmitBinAdd);
+ }
+ ComplexPairTy VisitBinSubAssign(const CompoundAssignOperator *E) {
+ return EmitCompoundAssign(E, &ComplexExprEmitter::EmitBinSub);
+ }
+ ComplexPairTy VisitBinMulAssign(const CompoundAssignOperator *E) {
+ return EmitCompoundAssign(E, &ComplexExprEmitter::EmitBinMul);
+ }
+ ComplexPairTy VisitBinDivAssign(const CompoundAssignOperator *E) {
+ return EmitCompoundAssign(E, &ComplexExprEmitter::EmitBinDiv);
+ }
+
+ // GCC rejects rem/and/or/xor for integer complex.
+ // Logical and/or always return int, never complex.
+
+ // No comparisons produce a complex result.
+
+ LValue EmitBinAssignLValue(const BinaryOperator *E,
+ ComplexPairTy &Val);
+ ComplexPairTy VisitBinAssign (const BinaryOperator *E);
+ ComplexPairTy VisitBinComma (const BinaryOperator *E);
+
+
+ ComplexPairTy
+ VisitAbstractConditionalOperator(const AbstractConditionalOperator *CO);
+ ComplexPairTy VisitChooseExpr(ChooseExpr *CE);
+
+ ComplexPairTy VisitInitListExpr(InitListExpr *E);
+
+ ComplexPairTy VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
+ return EmitLoadOfLValue(E);
+ }
+
+ ComplexPairTy VisitVAArgExpr(VAArgExpr *E);
+
+ ComplexPairTy VisitAtomicExpr(AtomicExpr *E) {
+ return CGF.EmitAtomicExpr(E).getComplexVal();
+ }
+};
+} // end anonymous namespace.
+
+//===----------------------------------------------------------------------===//
+// Utilities
+//===----------------------------------------------------------------------===//
+
+/// EmitLoadOfComplex - Given an RValue reference for a complex, emit code to
+/// load the real and imaginary pieces, returning them as Real/Imag.
+ComplexPairTy ComplexExprEmitter::EmitLoadOfComplex(llvm::Value *SrcPtr,
+ bool isVolatile) {
+ llvm::Value *Real=0, *Imag=0;
+
+ if (!IgnoreReal || isVolatile) {
+ llvm::Value *RealP = Builder.CreateStructGEP(SrcPtr, 0,
+ SrcPtr->getName() + ".realp");
+ Real = Builder.CreateLoad(RealP, isVolatile, SrcPtr->getName() + ".real");
+ }
+
+ if (!IgnoreImag || isVolatile) {
+ llvm::Value *ImagP = Builder.CreateStructGEP(SrcPtr, 1,
+ SrcPtr->getName() + ".imagp");
+ Imag = Builder.CreateLoad(ImagP, isVolatile, SrcPtr->getName() + ".imag");
+ }
+ return ComplexPairTy(Real, Imag);
+}
+
+/// EmitStoreOfComplex - Store the specified real/imag parts into the
+/// specified value pointer.
+void ComplexExprEmitter::EmitStoreOfComplex(ComplexPairTy Val, llvm::Value *Ptr,
+ bool isVolatile) {
+ llvm::Value *RealPtr = Builder.CreateStructGEP(Ptr, 0, "real");
+ llvm::Value *ImagPtr = Builder.CreateStructGEP(Ptr, 1, "imag");
+
+ Builder.CreateStore(Val.first, RealPtr, isVolatile);
+ Builder.CreateStore(Val.second, ImagPtr, isVolatile);
+}
+
+
+
+//===----------------------------------------------------------------------===//
+// Visitor Methods
+//===----------------------------------------------------------------------===//
+
+ComplexPairTy ComplexExprEmitter::VisitExpr(Expr *E) {
+ CGF.ErrorUnsupported(E, "complex expression");
+ llvm::Type *EltTy =
+ CGF.ConvertType(E->getType()->getAs<ComplexType>()->getElementType());
+ llvm::Value *U = llvm::UndefValue::get(EltTy);
+ return ComplexPairTy(U, U);
+}
+
+ComplexPairTy ComplexExprEmitter::
+VisitImaginaryLiteral(const ImaginaryLiteral *IL) {
+ llvm::Value *Imag = CGF.EmitScalarExpr(IL->getSubExpr());
+ return ComplexPairTy(llvm::Constant::getNullValue(Imag->getType()), Imag);
+}
+
+
+ComplexPairTy ComplexExprEmitter::VisitCallExpr(const CallExpr *E) {
+ if (E->getCallReturnType()->isReferenceType())
+ return EmitLoadOfLValue(E);
+
+ return CGF.EmitCallExpr(E).getComplexVal();
+}
+
+ComplexPairTy ComplexExprEmitter::VisitStmtExpr(const StmtExpr *E) {
+ CodeGenFunction::StmtExprEvaluation eval(CGF);
+ return CGF.EmitCompoundStmt(*E->getSubStmt(), true).getComplexVal();
+}
+
+/// EmitComplexToComplexCast - Emit a cast from complex value Val to DestType.
+ComplexPairTy ComplexExprEmitter::EmitComplexToComplexCast(ComplexPairTy Val,
+ QualType SrcType,
+ QualType DestType) {
+ // Get the src/dest element type.
+ SrcType = SrcType->getAs<ComplexType>()->getElementType();
+ DestType = DestType->getAs<ComplexType>()->getElementType();
+
+ // C99 6.3.1.6: When a value of complex type is converted to another
+ // complex type, both the real and imaginary parts follow the conversion
+ // rules for the corresponding real types.
+ Val.first = CGF.EmitScalarConversion(Val.first, SrcType, DestType);
+ Val.second = CGF.EmitScalarConversion(Val.second, SrcType, DestType);
+ return Val;
+}
+
+ComplexPairTy ComplexExprEmitter::EmitCast(CastExpr::CastKind CK, Expr *Op,
+ QualType DestTy) {
+ switch (CK) {
+ case CK_Dependent: llvm_unreachable("dependent cast kind in IR gen!");
+
+ // Atomic to non-atomic casts may be more than a no-op for some platforms and
+ // for some types.
+ case CK_AtomicToNonAtomic:
+ case CK_NonAtomicToAtomic:
+ case CK_NoOp:
+ case CK_LValueToRValue:
+ case CK_UserDefinedConversion:
+ return Visit(Op);
+
+ case CK_LValueBitCast: {
+ llvm::Value *V = CGF.EmitLValue(Op).getAddress();
+ V = Builder.CreateBitCast(V,
+ CGF.ConvertType(CGF.getContext().getPointerType(DestTy)));
+ // FIXME: Are the qualifiers correct here?
+ return EmitLoadOfComplex(V, DestTy.isVolatileQualified());
+ }
+
+ case CK_BitCast:
+ case CK_BaseToDerived:
+ case CK_DerivedToBase:
+ case CK_UncheckedDerivedToBase:
+ case CK_Dynamic:
+ case CK_ToUnion:
+ case CK_ArrayToPointerDecay:
+ case CK_FunctionToPointerDecay:
+ case CK_NullToPointer:
+ case CK_NullToMemberPointer:
+ case CK_BaseToDerivedMemberPointer:
+ case CK_DerivedToBaseMemberPointer:
+ case CK_MemberPointerToBoolean:
+ case CK_ReinterpretMemberPointer:
+ case CK_ConstructorConversion:
+ case CK_IntegralToPointer:
+ case CK_PointerToIntegral:
+ case CK_PointerToBoolean:
+ case CK_ToVoid:
+ case CK_VectorSplat:
+ case CK_IntegralCast:
+ case CK_IntegralToBoolean:
+ case CK_IntegralToFloating:
+ case CK_FloatingToIntegral:
+ case CK_FloatingToBoolean:
+ case CK_FloatingCast:
+ case CK_CPointerToObjCPointerCast:
+ case CK_BlockPointerToObjCPointerCast:
+ case CK_AnyPointerToBlockPointerCast:
+ case CK_ObjCObjectLValueCast:
+ case CK_FloatingComplexToReal:
+ case CK_FloatingComplexToBoolean:
+ case CK_IntegralComplexToReal:
+ case CK_IntegralComplexToBoolean:
+ case CK_ARCProduceObject:
+ case CK_ARCConsumeObject:
+ case CK_ARCReclaimReturnedObject:
+ case CK_ARCExtendBlockObject:
+ case CK_CopyAndAutoreleaseBlockObject:
+ llvm_unreachable("invalid cast kind for complex value");
+
+ case CK_FloatingRealToComplex:
+ case CK_IntegralRealToComplex: {
+ llvm::Value *Elt = CGF.EmitScalarExpr(Op);
+
+ // Convert the input element to the element type of the complex.
+ DestTy = DestTy->getAs<ComplexType>()->getElementType();
+ Elt = CGF.EmitScalarConversion(Elt, Op->getType(), DestTy);
+
+ // Return (realval, 0).
+ return ComplexPairTy(Elt, llvm::Constant::getNullValue(Elt->getType()));
+ }
+
+ case CK_FloatingComplexCast:
+ case CK_FloatingComplexToIntegralComplex:
+ case CK_IntegralComplexCast:
+ case CK_IntegralComplexToFloatingComplex:
+ return EmitComplexToComplexCast(Visit(Op), Op->getType(), DestTy);
+ }
+
+ llvm_unreachable("unknown cast resulting in complex value");
+}
+
+ComplexPairTy ComplexExprEmitter::VisitUnaryMinus(const UnaryOperator *E) {
+ TestAndClearIgnoreReal();
+ TestAndClearIgnoreImag();
+ ComplexPairTy Op = Visit(E->getSubExpr());
+
+ llvm::Value *ResR, *ResI;
+ if (Op.first->getType()->isFloatingPointTy()) {
+ ResR = Builder.CreateFNeg(Op.first, "neg.r");
+ ResI = Builder.CreateFNeg(Op.second, "neg.i");
+ } else {
+ ResR = Builder.CreateNeg(Op.first, "neg.r");
+ ResI = Builder.CreateNeg(Op.second, "neg.i");
+ }
+ return ComplexPairTy(ResR, ResI);
+}
+
+ComplexPairTy ComplexExprEmitter::VisitUnaryNot(const UnaryOperator *E) {
+ TestAndClearIgnoreReal();
+ TestAndClearIgnoreImag();
+ // ~(a+ib) = a + i*-b
+ ComplexPairTy Op = Visit(E->getSubExpr());
+ llvm::Value *ResI;
+ if (Op.second->getType()->isFloatingPointTy())
+ ResI = Builder.CreateFNeg(Op.second, "conj.i");
+ else
+ ResI = Builder.CreateNeg(Op.second, "conj.i");
+
+ return ComplexPairTy(Op.first, ResI);
+}
+
+ComplexPairTy ComplexExprEmitter::EmitBinAdd(const BinOpInfo &Op) {
+ llvm::Value *ResR, *ResI;
+
+ if (Op.LHS.first->getType()->isFloatingPointTy()) {
+ ResR = Builder.CreateFAdd(Op.LHS.first, Op.RHS.first, "add.r");
+ ResI = Builder.CreateFAdd(Op.LHS.second, Op.RHS.second, "add.i");
+ } else {
+ ResR = Builder.CreateAdd(Op.LHS.first, Op.RHS.first, "add.r");
+ ResI = Builder.CreateAdd(Op.LHS.second, Op.RHS.second, "add.i");
+ }
+ return ComplexPairTy(ResR, ResI);
+}
+
+ComplexPairTy ComplexExprEmitter::EmitBinSub(const BinOpInfo &Op) {
+ llvm::Value *ResR, *ResI;
+ if (Op.LHS.first->getType()->isFloatingPointTy()) {
+ ResR = Builder.CreateFSub(Op.LHS.first, Op.RHS.first, "sub.r");
+ ResI = Builder.CreateFSub(Op.LHS.second, Op.RHS.second, "sub.i");
+ } else {
+ ResR = Builder.CreateSub(Op.LHS.first, Op.RHS.first, "sub.r");
+ ResI = Builder.CreateSub(Op.LHS.second, Op.RHS.second, "sub.i");
+ }
+ return ComplexPairTy(ResR, ResI);
+}
+
+
+ComplexPairTy ComplexExprEmitter::EmitBinMul(const BinOpInfo &Op) {
+ using llvm::Value;
+ Value *ResR, *ResI;
+
+ if (Op.LHS.first->getType()->isFloatingPointTy()) {
+ Value *ResRl = Builder.CreateFMul(Op.LHS.first, Op.RHS.first, "mul.rl");
+ Value *ResRr = Builder.CreateFMul(Op.LHS.second, Op.RHS.second,"mul.rr");
+ ResR = Builder.CreateFSub(ResRl, ResRr, "mul.r");
+
+ Value *ResIl = Builder.CreateFMul(Op.LHS.second, Op.RHS.first, "mul.il");
+ Value *ResIr = Builder.CreateFMul(Op.LHS.first, Op.RHS.second, "mul.ir");
+ ResI = Builder.CreateFAdd(ResIl, ResIr, "mul.i");
+ } else {
+ Value *ResRl = Builder.CreateMul(Op.LHS.first, Op.RHS.first, "mul.rl");
+ Value *ResRr = Builder.CreateMul(Op.LHS.second, Op.RHS.second,"mul.rr");
+ ResR = Builder.CreateSub(ResRl, ResRr, "mul.r");
+
+ Value *ResIl = Builder.CreateMul(Op.LHS.second, Op.RHS.first, "mul.il");
+ Value *ResIr = Builder.CreateMul(Op.LHS.first, Op.RHS.second, "mul.ir");
+ ResI = Builder.CreateAdd(ResIl, ResIr, "mul.i");
+ }
+ return ComplexPairTy(ResR, ResI);
+}
+
+ComplexPairTy ComplexExprEmitter::EmitBinDiv(const BinOpInfo &Op) {
+ llvm::Value *LHSr = Op.LHS.first, *LHSi = Op.LHS.second;
+ llvm::Value *RHSr = Op.RHS.first, *RHSi = Op.RHS.second;
+
+
+ llvm::Value *DSTr, *DSTi;
+ if (Op.LHS.first->getType()->isFloatingPointTy()) {
+ // (a+ib) / (c+id) = ((ac+bd)/(cc+dd)) + i((bc-ad)/(cc+dd))
+ llvm::Value *Tmp1 = Builder.CreateFMul(LHSr, RHSr); // a*c
+ llvm::Value *Tmp2 = Builder.CreateFMul(LHSi, RHSi); // b*d
+ llvm::Value *Tmp3 = Builder.CreateFAdd(Tmp1, Tmp2); // ac+bd
+
+ llvm::Value *Tmp4 = Builder.CreateFMul(RHSr, RHSr); // c*c
+ llvm::Value *Tmp5 = Builder.CreateFMul(RHSi, RHSi); // d*d
+ llvm::Value *Tmp6 = Builder.CreateFAdd(Tmp4, Tmp5); // cc+dd
+
+ llvm::Value *Tmp7 = Builder.CreateFMul(LHSi, RHSr); // b*c
+ llvm::Value *Tmp8 = Builder.CreateFMul(LHSr, RHSi); // a*d
+ llvm::Value *Tmp9 = Builder.CreateFSub(Tmp7, Tmp8); // bc-ad
+
+ DSTr = Builder.CreateFDiv(Tmp3, Tmp6);
+ DSTi = Builder.CreateFDiv(Tmp9, Tmp6);
+ } else {
+ // (a+ib) / (c+id) = ((ac+bd)/(cc+dd)) + i((bc-ad)/(cc+dd))
+ llvm::Value *Tmp1 = Builder.CreateMul(LHSr, RHSr); // a*c
+ llvm::Value *Tmp2 = Builder.CreateMul(LHSi, RHSi); // b*d
+ llvm::Value *Tmp3 = Builder.CreateAdd(Tmp1, Tmp2); // ac+bd
+
+ llvm::Value *Tmp4 = Builder.CreateMul(RHSr, RHSr); // c*c
+ llvm::Value *Tmp5 = Builder.CreateMul(RHSi, RHSi); // d*d
+ llvm::Value *Tmp6 = Builder.CreateAdd(Tmp4, Tmp5); // cc+dd
+
+ llvm::Value *Tmp7 = Builder.CreateMul(LHSi, RHSr); // b*c
+ llvm::Value *Tmp8 = Builder.CreateMul(LHSr, RHSi); // a*d
+ llvm::Value *Tmp9 = Builder.CreateSub(Tmp7, Tmp8); // bc-ad
+
+ if (Op.Ty->getAs<ComplexType>()->getElementType()->isUnsignedIntegerType()) {
+ DSTr = Builder.CreateUDiv(Tmp3, Tmp6);
+ DSTi = Builder.CreateUDiv(Tmp9, Tmp6);
+ } else {
+ DSTr = Builder.CreateSDiv(Tmp3, Tmp6);
+ DSTi = Builder.CreateSDiv(Tmp9, Tmp6);
+ }
+ }
+
+ return ComplexPairTy(DSTr, DSTi);
+}
+
+ComplexExprEmitter::BinOpInfo
+ComplexExprEmitter::EmitBinOps(const BinaryOperator *E) {
+ TestAndClearIgnoreReal();
+ TestAndClearIgnoreImag();
+ BinOpInfo Ops;
+ Ops.LHS = Visit(E->getLHS());
+ Ops.RHS = Visit(E->getRHS());
+ Ops.Ty = E->getType();
+ return Ops;
+}
+
+
+LValue ComplexExprEmitter::
+EmitCompoundAssignLValue(const CompoundAssignOperator *E,
+ ComplexPairTy (ComplexExprEmitter::*Func)(const BinOpInfo&),
+ ComplexPairTy &Val) {
+ TestAndClearIgnoreReal();
+ TestAndClearIgnoreImag();
+ QualType LHSTy = E->getLHS()->getType();
+
+ BinOpInfo OpInfo;
+
+ // Load the RHS and LHS operands.
+ // __block variables need to have the rhs evaluated first, plus this should
+ // improve codegen a little.
+ OpInfo.Ty = E->getComputationResultType();
+
+ // The RHS should have been converted to the computation type.
+ assert(OpInfo.Ty->isAnyComplexType());
+ assert(CGF.getContext().hasSameUnqualifiedType(OpInfo.Ty,
+ E->getRHS()->getType()));
+ OpInfo.RHS = Visit(E->getRHS());
+
+ LValue LHS = CGF.EmitLValue(E->getLHS());
+
+ // Load from the l-value.
+ ComplexPairTy LHSComplexPair = EmitLoadOfLValue(LHS);
+
+ OpInfo.LHS = EmitComplexToComplexCast(LHSComplexPair, LHSTy, OpInfo.Ty);
+
+ // Expand the binary operator.
+ ComplexPairTy Result = (this->*Func)(OpInfo);
+
+ // Truncate the result back to the LHS type.
+ Result = EmitComplexToComplexCast(Result, OpInfo.Ty, LHSTy);
+ Val = Result;
+
+ // Store the result value into the LHS lvalue.
+ EmitStoreThroughLValue(Result, LHS);
+
+ return LHS;
+}
+
+// Compound assignments.
+ComplexPairTy ComplexExprEmitter::
+EmitCompoundAssign(const CompoundAssignOperator *E,
+ ComplexPairTy (ComplexExprEmitter::*Func)(const BinOpInfo&)){
+ ComplexPairTy Val;
+ LValue LV = EmitCompoundAssignLValue(E, Func, Val);
+
+ // The result of an assignment in C is the assigned r-value.
+ if (!CGF.getContext().getLangOpts().CPlusPlus)
+ return Val;
+
+ // If the lvalue is non-volatile, return the computed value of the assignment.
+ if (!LV.isVolatileQualified())
+ return Val;
+
+ return EmitLoadOfComplex(LV.getAddress(), LV.isVolatileQualified());
+}
+
+LValue ComplexExprEmitter::EmitBinAssignLValue(const BinaryOperator *E,
+ ComplexPairTy &Val) {
+ assert(CGF.getContext().hasSameUnqualifiedType(E->getLHS()->getType(),
+ E->getRHS()->getType()) &&
+ "Invalid assignment");
+ TestAndClearIgnoreReal();
+ TestAndClearIgnoreImag();
+
+ // Emit the RHS. __block variables need the RHS evaluated first.
+ Val = Visit(E->getRHS());
+
+ // Compute the address to store into.
+ LValue LHS = CGF.EmitLValue(E->getLHS());
+
+ // Store the result value into the LHS lvalue.
+ EmitStoreThroughLValue(Val, LHS);
+
+ return LHS;
+}
+
+ComplexPairTy ComplexExprEmitter::VisitBinAssign(const BinaryOperator *E) {
+ ComplexPairTy Val;
+ LValue LV = EmitBinAssignLValue(E, Val);
+
+ // The result of an assignment in C is the assigned r-value.
+ if (!CGF.getContext().getLangOpts().CPlusPlus)
+ return Val;
+
+ // If the lvalue is non-volatile, return the computed value of the assignment.
+ if (!LV.isVolatileQualified())
+ return Val;
+
+ return EmitLoadOfComplex(LV.getAddress(), LV.isVolatileQualified());
+}
+
+ComplexPairTy ComplexExprEmitter::VisitBinComma(const BinaryOperator *E) {
+ CGF.EmitIgnoredExpr(E->getLHS());
+ return Visit(E->getRHS());
+}
+
+ComplexPairTy ComplexExprEmitter::
+VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
+ TestAndClearIgnoreReal();
+ TestAndClearIgnoreImag();
+ llvm::BasicBlock *LHSBlock = CGF.createBasicBlock("cond.true");
+ llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("cond.false");
+ llvm::BasicBlock *ContBlock = CGF.createBasicBlock("cond.end");
+
+ // Bind the common expression if necessary.
+ CodeGenFunction::OpaqueValueMapping binding(CGF, E);
+
+ CodeGenFunction::ConditionalEvaluation eval(CGF);
+ CGF.EmitBranchOnBoolExpr(E->getCond(), LHSBlock, RHSBlock);
+
+ eval.begin(CGF);
+ CGF.EmitBlock(LHSBlock);
+ ComplexPairTy LHS = Visit(E->getTrueExpr());
+ LHSBlock = Builder.GetInsertBlock();
+ CGF.EmitBranch(ContBlock);
+ eval.end(CGF);
+
+ eval.begin(CGF);
+ CGF.EmitBlock(RHSBlock);
+ ComplexPairTy RHS = Visit(E->getFalseExpr());
+ RHSBlock = Builder.GetInsertBlock();
+ CGF.EmitBlock(ContBlock);
+ eval.end(CGF);
+
+ // Create a PHI node for the real part.
+ llvm::PHINode *RealPN = Builder.CreatePHI(LHS.first->getType(), 2, "cond.r");
+ RealPN->addIncoming(LHS.first, LHSBlock);
+ RealPN->addIncoming(RHS.first, RHSBlock);
+
+ // Create a PHI node for the imaginary part.
+ llvm::PHINode *ImagPN = Builder.CreatePHI(LHS.first->getType(), 2, "cond.i");
+ ImagPN->addIncoming(LHS.second, LHSBlock);
+ ImagPN->addIncoming(RHS.second, RHSBlock);
+
+ return ComplexPairTy(RealPN, ImagPN);
+}
+
+ComplexPairTy ComplexExprEmitter::VisitChooseExpr(ChooseExpr *E) {
+ return Visit(E->getChosenSubExpr(CGF.getContext()));
+}
+
+ComplexPairTy ComplexExprEmitter::VisitInitListExpr(InitListExpr *E) {
+ bool Ignore = TestAndClearIgnoreReal();
+ (void)Ignore;
+ assert (Ignore == false && "init list ignored");
+ Ignore = TestAndClearIgnoreImag();
+ (void)Ignore;
+ assert (Ignore == false && "init list ignored");
+
+ if (E->getNumInits() == 2) {
+ llvm::Value *Real = CGF.EmitScalarExpr(E->getInit(0));
+ llvm::Value *Imag = CGF.EmitScalarExpr(E->getInit(1));
+ return ComplexPairTy(Real, Imag);
+ } else if (E->getNumInits() == 1) {
+ return Visit(E->getInit(0));
+ }
+
+ // Empty init list intializes to null
+ assert(E->getNumInits() == 0 && "Unexpected number of inits");
+ QualType Ty = E->getType()->getAs<ComplexType>()->getElementType();
+ llvm::Type* LTy = CGF.ConvertType(Ty);
+ llvm::Value* zeroConstant = llvm::Constant::getNullValue(LTy);
+ return ComplexPairTy(zeroConstant, zeroConstant);
+}
+
+ComplexPairTy ComplexExprEmitter::VisitVAArgExpr(VAArgExpr *E) {
+ llvm::Value *ArgValue = CGF.EmitVAListRef(E->getSubExpr());
+ llvm::Value *ArgPtr = CGF.EmitVAArg(ArgValue, E->getType());
+
+ if (!ArgPtr) {
+ CGF.ErrorUnsupported(E, "complex va_arg expression");
+ llvm::Type *EltTy =
+ CGF.ConvertType(E->getType()->getAs<ComplexType>()->getElementType());
+ llvm::Value *U = llvm::UndefValue::get(EltTy);
+ return ComplexPairTy(U, U);
+ }
+
+ // FIXME Volatility.
+ return EmitLoadOfComplex(ArgPtr, false);
+}
+
+//===----------------------------------------------------------------------===//
+// Entry Point into this File
+//===----------------------------------------------------------------------===//
+
+/// EmitComplexExpr - Emit the computation of the specified expression of
+/// complex type, ignoring the result.
+ComplexPairTy CodeGenFunction::EmitComplexExpr(const Expr *E, bool IgnoreReal,
+ bool IgnoreImag) {
+ assert(E && E->getType()->isAnyComplexType() &&
+ "Invalid complex expression to emit");
+
+ return ComplexExprEmitter(*this, IgnoreReal, IgnoreImag)
+ .Visit(const_cast<Expr*>(E));
+}
+
+/// EmitComplexExprIntoAddr - Emit the computation of the specified expression
+/// of complex type, storing into the specified Value*.
+void CodeGenFunction::EmitComplexExprIntoAddr(const Expr *E,
+ llvm::Value *DestAddr,
+ bool DestIsVolatile) {
+ assert(E && E->getType()->isAnyComplexType() &&
+ "Invalid complex expression to emit");
+ ComplexExprEmitter Emitter(*this);
+ ComplexPairTy Val = Emitter.Visit(const_cast<Expr*>(E));
+ Emitter.EmitStoreOfComplex(Val, DestAddr, DestIsVolatile);
+}
+
+/// StoreComplexToAddr - Store a complex number into the specified address.
+void CodeGenFunction::StoreComplexToAddr(ComplexPairTy V,
+ llvm::Value *DestAddr,
+ bool DestIsVolatile) {
+ ComplexExprEmitter(*this).EmitStoreOfComplex(V, DestAddr, DestIsVolatile);
+}
+
+/// LoadComplexFromAddr - Load a complex number from the specified address.
+ComplexPairTy CodeGenFunction::LoadComplexFromAddr(llvm::Value *SrcAddr,
+ bool SrcIsVolatile) {
+ return ComplexExprEmitter(*this).EmitLoadOfComplex(SrcAddr, SrcIsVolatile);
+}
+
+LValue CodeGenFunction::EmitComplexAssignmentLValue(const BinaryOperator *E) {
+ assert(E->getOpcode() == BO_Assign);
+ ComplexPairTy Val; // ignored
+ return ComplexExprEmitter(*this).EmitBinAssignLValue(E, Val);
+}
+
+LValue CodeGenFunction::
+EmitComplexCompoundAssignmentLValue(const CompoundAssignOperator *E) {
+ ComplexPairTy(ComplexExprEmitter::*Op)(const ComplexExprEmitter::BinOpInfo &);
+ switch (E->getOpcode()) {
+ case BO_MulAssign: Op = &ComplexExprEmitter::EmitBinMul; break;
+ case BO_DivAssign: Op = &ComplexExprEmitter::EmitBinDiv; break;
+ case BO_SubAssign: Op = &ComplexExprEmitter::EmitBinSub; break;
+ case BO_AddAssign: Op = &ComplexExprEmitter::EmitBinAdd; break;
+
+ default:
+ llvm_unreachable("unexpected complex compound assignment");
+ }
+
+ ComplexPairTy Val; // ignored
+ return ComplexExprEmitter(*this).EmitCompoundAssignLValue(E, Op, Val);
+}
diff --git a/clang/lib/CodeGen/CGExprConstant.cpp b/clang/lib/CodeGen/CGExprConstant.cpp
new file mode 100644
index 0000000..bc9f9ef
--- /dev/null
+++ b/clang/lib/CodeGen/CGExprConstant.cpp
@@ -0,0 +1,1496 @@
+//===--- CGExprConstant.cpp - Emit LLVM Code from Constant Expressions ----===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code to emit Constant Expr nodes as LLVM code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenFunction.h"
+#include "CodeGenModule.h"
+#include "CGCXXABI.h"
+#include "CGObjCRuntime.h"
+#include "CGRecordLayout.h"
+#include "clang/AST/APValue.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/RecordLayout.h"
+#include "clang/AST/StmtVisitor.h"
+#include "clang/Basic/Builtins.h"
+#include "llvm/Constants.h"
+#include "llvm/Function.h"
+#include "llvm/GlobalVariable.h"
+#include "llvm/Target/TargetData.h"
+using namespace clang;
+using namespace CodeGen;
+
+//===----------------------------------------------------------------------===//
+// ConstStructBuilder
+//===----------------------------------------------------------------------===//
+
+namespace {
+class ConstStructBuilder {
+ CodeGenModule &CGM;
+ CodeGenFunction *CGF;
+
+ bool Packed;
+ CharUnits NextFieldOffsetInChars;
+ CharUnits LLVMStructAlignment;
+ SmallVector<llvm::Constant *, 32> Elements;
+public:
+ static llvm::Constant *BuildStruct(CodeGenModule &CGM, CodeGenFunction *CGF,
+ InitListExpr *ILE);
+ static llvm::Constant *BuildStruct(CodeGenModule &CGM, CodeGenFunction *CGF,
+ const APValue &Value, QualType ValTy);
+
+private:
+ ConstStructBuilder(CodeGenModule &CGM, CodeGenFunction *CGF)
+ : CGM(CGM), CGF(CGF), Packed(false),
+ NextFieldOffsetInChars(CharUnits::Zero()),
+ LLVMStructAlignment(CharUnits::One()) { }
+
+ void AppendVTablePointer(BaseSubobject Base, llvm::Constant *VTable,
+ const CXXRecordDecl *VTableClass);
+
+ void AppendField(const FieldDecl *Field, uint64_t FieldOffset,
+ llvm::Constant *InitExpr);
+
+ void AppendBytes(CharUnits FieldOffsetInChars, llvm::Constant *InitCst);
+
+ void AppendBitField(const FieldDecl *Field, uint64_t FieldOffset,
+ llvm::ConstantInt *InitExpr);
+
+ void AppendPadding(CharUnits PadSize);
+
+ void AppendTailPadding(CharUnits RecordSize);
+
+ void ConvertStructToPacked();
+
+ bool Build(InitListExpr *ILE);
+ void Build(const APValue &Val, const RecordDecl *RD, bool IsPrimaryBase,
+ llvm::Constant *VTable, const CXXRecordDecl *VTableClass,
+ CharUnits BaseOffset);
+ llvm::Constant *Finalize(QualType Ty);
+
+ CharUnits getAlignment(const llvm::Constant *C) const {
+ if (Packed) return CharUnits::One();
+ return CharUnits::fromQuantity(
+ CGM.getTargetData().getABITypeAlignment(C->getType()));
+ }
+
+ CharUnits getSizeInChars(const llvm::Constant *C) const {
+ return CharUnits::fromQuantity(
+ CGM.getTargetData().getTypeAllocSize(C->getType()));
+ }
+};
+
+void ConstStructBuilder::AppendVTablePointer(BaseSubobject Base,
+ llvm::Constant *VTable,
+ const CXXRecordDecl *VTableClass) {
+ // Find the appropriate vtable within the vtable group.
+ uint64_t AddressPoint =
+ CGM.getVTableContext().getVTableLayout(VTableClass).getAddressPoint(Base);
+ llvm::Value *Indices[] = {
+ llvm::ConstantInt::get(CGM.Int64Ty, 0),
+ llvm::ConstantInt::get(CGM.Int64Ty, AddressPoint)
+ };
+ llvm::Constant *VTableAddressPoint =
+ llvm::ConstantExpr::getInBoundsGetElementPtr(VTable, Indices);
+
+ // Add the vtable at the start of the object.
+ AppendBytes(Base.getBaseOffset(), VTableAddressPoint);
+}
+
+void ConstStructBuilder::
+AppendField(const FieldDecl *Field, uint64_t FieldOffset,
+ llvm::Constant *InitCst) {
+ const ASTContext &Context = CGM.getContext();
+
+ CharUnits FieldOffsetInChars = Context.toCharUnitsFromBits(FieldOffset);
+
+ AppendBytes(FieldOffsetInChars, InitCst);
+}
+
+void ConstStructBuilder::
+AppendBytes(CharUnits FieldOffsetInChars, llvm::Constant *InitCst) {
+
+ assert(NextFieldOffsetInChars <= FieldOffsetInChars
+ && "Field offset mismatch!");
+
+ CharUnits FieldAlignment = getAlignment(InitCst);
+
+ // Round up the field offset to the alignment of the field type.
+ CharUnits AlignedNextFieldOffsetInChars =
+ NextFieldOffsetInChars.RoundUpToAlignment(FieldAlignment);
+
+ if (AlignedNextFieldOffsetInChars > FieldOffsetInChars) {
+ assert(!Packed && "Alignment is wrong even with a packed struct!");
+
+ // Convert the struct to a packed struct.
+ ConvertStructToPacked();
+
+ AlignedNextFieldOffsetInChars = NextFieldOffsetInChars;
+ }
+
+ if (AlignedNextFieldOffsetInChars < FieldOffsetInChars) {
+ // We need to append padding.
+ AppendPadding(FieldOffsetInChars - NextFieldOffsetInChars);
+
+ assert(NextFieldOffsetInChars == FieldOffsetInChars &&
+ "Did not add enough padding!");
+
+ AlignedNextFieldOffsetInChars = NextFieldOffsetInChars;
+ }
+
+ // Add the field.
+ Elements.push_back(InitCst);
+ NextFieldOffsetInChars = AlignedNextFieldOffsetInChars +
+ getSizeInChars(InitCst);
+
+ if (Packed)
+ assert(LLVMStructAlignment == CharUnits::One() &&
+ "Packed struct not byte-aligned!");
+ else
+ LLVMStructAlignment = std::max(LLVMStructAlignment, FieldAlignment);
+}
+
+void ConstStructBuilder::AppendBitField(const FieldDecl *Field,
+ uint64_t FieldOffset,
+ llvm::ConstantInt *CI) {
+ const ASTContext &Context = CGM.getContext();
+ const uint64_t CharWidth = Context.getCharWidth();
+ uint64_t NextFieldOffsetInBits = Context.toBits(NextFieldOffsetInChars);
+ if (FieldOffset > NextFieldOffsetInBits) {
+ // We need to add padding.
+ CharUnits PadSize = Context.toCharUnitsFromBits(
+ llvm::RoundUpToAlignment(FieldOffset - NextFieldOffsetInBits,
+ Context.getTargetInfo().getCharAlign()));
+
+ AppendPadding(PadSize);
+ }
+
+ uint64_t FieldSize = Field->getBitWidthValue(Context);
+
+ llvm::APInt FieldValue = CI->getValue();
+
+ // Promote the size of FieldValue if necessary
+ // FIXME: This should never occur, but currently it can because initializer
+ // constants are cast to bool, and because clang is not enforcing bitfield
+ // width limits.
+ if (FieldSize > FieldValue.getBitWidth())
+ FieldValue = FieldValue.zext(FieldSize);
+
+ // Truncate the size of FieldValue to the bit field size.
+ if (FieldSize < FieldValue.getBitWidth())
+ FieldValue = FieldValue.trunc(FieldSize);
+
+ NextFieldOffsetInBits = Context.toBits(NextFieldOffsetInChars);
+ if (FieldOffset < NextFieldOffsetInBits) {
+ // Either part of the field or the entire field can go into the previous
+ // byte.
+ assert(!Elements.empty() && "Elements can't be empty!");
+
+ unsigned BitsInPreviousByte = NextFieldOffsetInBits - FieldOffset;
+
+ bool FitsCompletelyInPreviousByte =
+ BitsInPreviousByte >= FieldValue.getBitWidth();
+
+ llvm::APInt Tmp = FieldValue;
+
+ if (!FitsCompletelyInPreviousByte) {
+ unsigned NewFieldWidth = FieldSize - BitsInPreviousByte;
+
+ if (CGM.getTargetData().isBigEndian()) {
+ Tmp = Tmp.lshr(NewFieldWidth);
+ Tmp = Tmp.trunc(BitsInPreviousByte);
+
+ // We want the remaining high bits.
+ FieldValue = FieldValue.trunc(NewFieldWidth);
+ } else {
+ Tmp = Tmp.trunc(BitsInPreviousByte);
+
+ // We want the remaining low bits.
+ FieldValue = FieldValue.lshr(BitsInPreviousByte);
+ FieldValue = FieldValue.trunc(NewFieldWidth);
+ }
+ }
+
+ Tmp = Tmp.zext(CharWidth);
+ if (CGM.getTargetData().isBigEndian()) {
+ if (FitsCompletelyInPreviousByte)
+ Tmp = Tmp.shl(BitsInPreviousByte - FieldValue.getBitWidth());
+ } else {
+ Tmp = Tmp.shl(CharWidth - BitsInPreviousByte);
+ }
+
+ // 'or' in the bits that go into the previous byte.
+ llvm::Value *LastElt = Elements.back();
+ if (llvm::ConstantInt *Val = dyn_cast<llvm::ConstantInt>(LastElt))
+ Tmp |= Val->getValue();
+ else {
+ assert(isa<llvm::UndefValue>(LastElt));
+ // If there is an undef field that we're adding to, it can either be a
+ // scalar undef (in which case, we just replace it with our field) or it
+ // is an array. If it is an array, we have to pull one byte off the
+ // array so that the other undef bytes stay around.
+ if (!isa<llvm::IntegerType>(LastElt->getType())) {
+ // The undef padding will be a multibyte array, create a new smaller
+ // padding and then an hole for our i8 to get plopped into.
+ assert(isa<llvm::ArrayType>(LastElt->getType()) &&
+ "Expected array padding of undefs");
+ llvm::ArrayType *AT = cast<llvm::ArrayType>(LastElt->getType());
+ assert(AT->getElementType()->isIntegerTy(CharWidth) &&
+ AT->getNumElements() != 0 &&
+ "Expected non-empty array padding of undefs");
+
+ // Remove the padding array.
+ NextFieldOffsetInChars -= CharUnits::fromQuantity(AT->getNumElements());
+ Elements.pop_back();
+
+ // Add the padding back in two chunks.
+ AppendPadding(CharUnits::fromQuantity(AT->getNumElements()-1));
+ AppendPadding(CharUnits::One());
+ assert(isa<llvm::UndefValue>(Elements.back()) &&
+ Elements.back()->getType()->isIntegerTy(CharWidth) &&
+ "Padding addition didn't work right");
+ }
+ }
+
+ Elements.back() = llvm::ConstantInt::get(CGM.getLLVMContext(), Tmp);
+
+ if (FitsCompletelyInPreviousByte)
+ return;
+ }
+
+ while (FieldValue.getBitWidth() > CharWidth) {
+ llvm::APInt Tmp;
+
+ if (CGM.getTargetData().isBigEndian()) {
+ // We want the high bits.
+ Tmp =
+ FieldValue.lshr(FieldValue.getBitWidth() - CharWidth).trunc(CharWidth);
+ } else {
+ // We want the low bits.
+ Tmp = FieldValue.trunc(CharWidth);
+
+ FieldValue = FieldValue.lshr(CharWidth);
+ }
+
+ Elements.push_back(llvm::ConstantInt::get(CGM.getLLVMContext(), Tmp));
+ ++NextFieldOffsetInChars;
+
+ FieldValue = FieldValue.trunc(FieldValue.getBitWidth() - CharWidth);
+ }
+
+ assert(FieldValue.getBitWidth() > 0 &&
+ "Should have at least one bit left!");
+ assert(FieldValue.getBitWidth() <= CharWidth &&
+ "Should not have more than a byte left!");
+
+ if (FieldValue.getBitWidth() < CharWidth) {
+ if (CGM.getTargetData().isBigEndian()) {
+ unsigned BitWidth = FieldValue.getBitWidth();
+
+ FieldValue = FieldValue.zext(CharWidth) << (CharWidth - BitWidth);
+ } else
+ FieldValue = FieldValue.zext(CharWidth);
+ }
+
+ // Append the last element.
+ Elements.push_back(llvm::ConstantInt::get(CGM.getLLVMContext(),
+ FieldValue));
+ ++NextFieldOffsetInChars;
+}
+
+void ConstStructBuilder::AppendPadding(CharUnits PadSize) {
+ if (PadSize.isZero())
+ return;
+
+ llvm::Type *Ty = CGM.Int8Ty;
+ if (PadSize > CharUnits::One())
+ Ty = llvm::ArrayType::get(Ty, PadSize.getQuantity());
+
+ llvm::Constant *C = llvm::UndefValue::get(Ty);
+ Elements.push_back(C);
+ assert(getAlignment(C) == CharUnits::One() &&
+ "Padding must have 1 byte alignment!");
+
+ NextFieldOffsetInChars += getSizeInChars(C);
+}
+
+void ConstStructBuilder::AppendTailPadding(CharUnits RecordSize) {
+ assert(NextFieldOffsetInChars <= RecordSize &&
+ "Size mismatch!");
+
+ AppendPadding(RecordSize - NextFieldOffsetInChars);
+}
+
+void ConstStructBuilder::ConvertStructToPacked() {
+ SmallVector<llvm::Constant *, 16> PackedElements;
+ CharUnits ElementOffsetInChars = CharUnits::Zero();
+
+ for (unsigned i = 0, e = Elements.size(); i != e; ++i) {
+ llvm::Constant *C = Elements[i];
+
+ CharUnits ElementAlign = CharUnits::fromQuantity(
+ CGM.getTargetData().getABITypeAlignment(C->getType()));
+ CharUnits AlignedElementOffsetInChars =
+ ElementOffsetInChars.RoundUpToAlignment(ElementAlign);
+
+ if (AlignedElementOffsetInChars > ElementOffsetInChars) {
+ // We need some padding.
+ CharUnits NumChars =
+ AlignedElementOffsetInChars - ElementOffsetInChars;
+
+ llvm::Type *Ty = CGM.Int8Ty;
+ if (NumChars > CharUnits::One())
+ Ty = llvm::ArrayType::get(Ty, NumChars.getQuantity());
+
+ llvm::Constant *Padding = llvm::UndefValue::get(Ty);
+ PackedElements.push_back(Padding);
+ ElementOffsetInChars += getSizeInChars(Padding);
+ }
+
+ PackedElements.push_back(C);
+ ElementOffsetInChars += getSizeInChars(C);
+ }
+
+ assert(ElementOffsetInChars == NextFieldOffsetInChars &&
+ "Packing the struct changed its size!");
+
+ Elements.swap(PackedElements);
+ LLVMStructAlignment = CharUnits::One();
+ Packed = true;
+}
+
+bool ConstStructBuilder::Build(InitListExpr *ILE) {
+ if (ILE->initializesStdInitializerList()) {
+ //CGM.ErrorUnsupported(ILE, "global std::initializer_list");
+ return false;
+ }
+
+ RecordDecl *RD = ILE->getType()->getAs<RecordType>()->getDecl();
+ const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
+
+ unsigned FieldNo = 0;
+ unsigned ElementNo = 0;
+ const FieldDecl *LastFD = 0;
+ bool IsMsStruct = RD->hasAttr<MsStructAttr>();
+
+ for (RecordDecl::field_iterator Field = RD->field_begin(),
+ FieldEnd = RD->field_end(); Field != FieldEnd; ++Field, ++FieldNo) {
+ if (IsMsStruct) {
+ // Zero-length bitfields following non-bitfield members are
+ // ignored:
+ if (CGM.getContext().ZeroBitfieldFollowsNonBitfield((*Field), LastFD)) {
+ --FieldNo;
+ continue;
+ }
+ LastFD = (*Field);
+ }
+
+ // If this is a union, skip all the fields that aren't being initialized.
+ if (RD->isUnion() && ILE->getInitializedFieldInUnion() != *Field)
+ continue;
+
+ // Don't emit anonymous bitfields, they just affect layout.
+ if (Field->isUnnamedBitfield()) {
+ LastFD = (*Field);
+ continue;
+ }
+
+ // Get the initializer. A struct can include fields without initializers,
+ // we just use explicit null values for them.
+ llvm::Constant *EltInit;
+ if (ElementNo < ILE->getNumInits())
+ EltInit = CGM.EmitConstantExpr(ILE->getInit(ElementNo++),
+ Field->getType(), CGF);
+ else
+ EltInit = CGM.EmitNullConstant(Field->getType());
+
+ if (!EltInit)
+ return false;
+
+ if (!Field->isBitField()) {
+ // Handle non-bitfield members.
+ AppendField(*Field, Layout.getFieldOffset(FieldNo), EltInit);
+ } else {
+ // Otherwise we have a bitfield.
+ AppendBitField(*Field, Layout.getFieldOffset(FieldNo),
+ cast<llvm::ConstantInt>(EltInit));
+ }
+ }
+
+ return true;
+}
+
+namespace {
+struct BaseInfo {
+ BaseInfo(const CXXRecordDecl *Decl, CharUnits Offset, unsigned Index)
+ : Decl(Decl), Offset(Offset), Index(Index) {
+ }
+
+ const CXXRecordDecl *Decl;
+ CharUnits Offset;
+ unsigned Index;
+
+ bool operator<(const BaseInfo &O) const { return Offset < O.Offset; }
+};
+}
+
+void ConstStructBuilder::Build(const APValue &Val, const RecordDecl *RD,
+ bool IsPrimaryBase, llvm::Constant *VTable,
+ const CXXRecordDecl *VTableClass,
+ CharUnits Offset) {
+ const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
+
+ if (const CXXRecordDecl *CD = dyn_cast<CXXRecordDecl>(RD)) {
+ // Add a vtable pointer, if we need one and it hasn't already been added.
+ if (CD->isDynamicClass() && !IsPrimaryBase)
+ AppendVTablePointer(BaseSubobject(CD, Offset), VTable, VTableClass);
+
+ // Accumulate and sort bases, in order to visit them in address order, which
+ // may not be the same as declaration order.
+ llvm::SmallVector<BaseInfo, 8> Bases;
+ Bases.reserve(CD->getNumBases());
+ unsigned BaseNo = 0;
+ for (CXXRecordDecl::base_class_const_iterator Base = CD->bases_begin(),
+ BaseEnd = CD->bases_end(); Base != BaseEnd; ++Base, ++BaseNo) {
+ assert(!Base->isVirtual() && "should not have virtual bases here");
+ const CXXRecordDecl *BD = Base->getType()->getAsCXXRecordDecl();
+ CharUnits BaseOffset = Layout.getBaseClassOffset(BD);
+ Bases.push_back(BaseInfo(BD, BaseOffset, BaseNo));
+ }
+ std::stable_sort(Bases.begin(), Bases.end());
+
+ for (unsigned I = 0, N = Bases.size(); I != N; ++I) {
+ BaseInfo &Base = Bases[I];
+
+ bool IsPrimaryBase = Layout.getPrimaryBase() == Base.Decl;
+ Build(Val.getStructBase(Base.Index), Base.Decl, IsPrimaryBase,
+ VTable, VTableClass, Offset + Base.Offset);
+ }
+ }
+
+ unsigned FieldNo = 0;
+ const FieldDecl *LastFD = 0;
+ bool IsMsStruct = RD->hasAttr<MsStructAttr>();
+ uint64_t OffsetBits = CGM.getContext().toBits(Offset);
+
+ for (RecordDecl::field_iterator Field = RD->field_begin(),
+ FieldEnd = RD->field_end(); Field != FieldEnd; ++Field, ++FieldNo) {
+ if (IsMsStruct) {
+ // Zero-length bitfields following non-bitfield members are
+ // ignored:
+ if (CGM.getContext().ZeroBitfieldFollowsNonBitfield((*Field), LastFD)) {
+ --FieldNo;
+ continue;
+ }
+ LastFD = (*Field);
+ }
+
+ // If this is a union, skip all the fields that aren't being initialized.
+ if (RD->isUnion() && Val.getUnionField() != *Field)
+ continue;
+
+ // Don't emit anonymous bitfields, they just affect layout.
+ if (Field->isUnnamedBitfield()) {
+ LastFD = (*Field);
+ continue;
+ }
+
+ // Emit the value of the initializer.
+ const APValue &FieldValue =
+ RD->isUnion() ? Val.getUnionValue() : Val.getStructField(FieldNo);
+ llvm::Constant *EltInit =
+ CGM.EmitConstantValueForMemory(FieldValue, Field->getType(), CGF);
+ assert(EltInit && "EmitConstantValue can't fail");
+
+ if (!Field->isBitField()) {
+ // Handle non-bitfield members.
+ AppendField(*Field, Layout.getFieldOffset(FieldNo) + OffsetBits, EltInit);
+ } else {
+ // Otherwise we have a bitfield.
+ AppendBitField(*Field, Layout.getFieldOffset(FieldNo) + OffsetBits,
+ cast<llvm::ConstantInt>(EltInit));
+ }
+ }
+}
+
+llvm::Constant *ConstStructBuilder::Finalize(QualType Ty) {
+ RecordDecl *RD = Ty->getAs<RecordType>()->getDecl();
+ const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
+
+ CharUnits LayoutSizeInChars = Layout.getSize();
+
+ if (NextFieldOffsetInChars > LayoutSizeInChars) {
+ // If the struct is bigger than the size of the record type,
+ // we must have a flexible array member at the end.
+ assert(RD->hasFlexibleArrayMember() &&
+ "Must have flexible array member if struct is bigger than type!");
+
+ // No tail padding is necessary.
+ } else {
+ // Append tail padding if necessary.
+ AppendTailPadding(LayoutSizeInChars);
+
+ CharUnits LLVMSizeInChars =
+ NextFieldOffsetInChars.RoundUpToAlignment(LLVMStructAlignment);
+
+ // Check if we need to convert the struct to a packed struct.
+ if (NextFieldOffsetInChars <= LayoutSizeInChars &&
+ LLVMSizeInChars > LayoutSizeInChars) {
+ assert(!Packed && "Size mismatch!");
+
+ ConvertStructToPacked();
+ assert(NextFieldOffsetInChars <= LayoutSizeInChars &&
+ "Converting to packed did not help!");
+ }
+
+ assert(LayoutSizeInChars == NextFieldOffsetInChars &&
+ "Tail padding mismatch!");
+ }
+
+ // Pick the type to use. If the type is layout identical to the ConvertType
+ // type then use it, otherwise use whatever the builder produced for us.
+ llvm::StructType *STy =
+ llvm::ConstantStruct::getTypeForElements(CGM.getLLVMContext(),
+ Elements, Packed);
+ llvm::Type *ValTy = CGM.getTypes().ConvertType(Ty);
+ if (llvm::StructType *ValSTy = dyn_cast<llvm::StructType>(ValTy)) {
+ if (ValSTy->isLayoutIdentical(STy))
+ STy = ValSTy;
+ }
+
+ llvm::Constant *Result = llvm::ConstantStruct::get(STy, Elements);
+
+ assert(NextFieldOffsetInChars.RoundUpToAlignment(getAlignment(Result)) ==
+ getSizeInChars(Result) && "Size mismatch!");
+
+ return Result;
+}
+
+llvm::Constant *ConstStructBuilder::BuildStruct(CodeGenModule &CGM,
+ CodeGenFunction *CGF,
+ InitListExpr *ILE) {
+ ConstStructBuilder Builder(CGM, CGF);
+
+ if (!Builder.Build(ILE))
+ return 0;
+
+ return Builder.Finalize(ILE->getType());
+}
+
+llvm::Constant *ConstStructBuilder::BuildStruct(CodeGenModule &CGM,
+ CodeGenFunction *CGF,
+ const APValue &Val,
+ QualType ValTy) {
+ ConstStructBuilder Builder(CGM, CGF);
+
+ const RecordDecl *RD = ValTy->castAs<RecordType>()->getDecl();
+ const CXXRecordDecl *CD = dyn_cast<CXXRecordDecl>(RD);
+ llvm::Constant *VTable = 0;
+ if (CD && CD->isDynamicClass())
+ VTable = CGM.getVTables().GetAddrOfVTable(CD);
+
+ Builder.Build(Val, RD, false, VTable, CD, CharUnits::Zero());
+
+ return Builder.Finalize(ValTy);
+}
+
+
+//===----------------------------------------------------------------------===//
+// ConstExprEmitter
+//===----------------------------------------------------------------------===//
+
+/// This class only needs to handle two cases:
+/// 1) Literals (this is used by APValue emission to emit literals).
+/// 2) Arrays, structs and unions (outside C++11 mode, we don't currently
+/// constant fold these types).
+class ConstExprEmitter :
+ public StmtVisitor<ConstExprEmitter, llvm::Constant*> {
+ CodeGenModule &CGM;
+ CodeGenFunction *CGF;
+ llvm::LLVMContext &VMContext;
+public:
+ ConstExprEmitter(CodeGenModule &cgm, CodeGenFunction *cgf)
+ : CGM(cgm), CGF(cgf), VMContext(cgm.getLLVMContext()) {
+ }
+
+ //===--------------------------------------------------------------------===//
+ // Visitor Methods
+ //===--------------------------------------------------------------------===//
+
+ llvm::Constant *VisitStmt(Stmt *S) {
+ return 0;
+ }
+
+ llvm::Constant *VisitParenExpr(ParenExpr *PE) {
+ return Visit(PE->getSubExpr());
+ }
+
+ llvm::Constant *
+ VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *PE) {
+ return Visit(PE->getReplacement());
+ }
+
+ llvm::Constant *VisitGenericSelectionExpr(GenericSelectionExpr *GE) {
+ return Visit(GE->getResultExpr());
+ }
+
+ llvm::Constant *VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
+ return Visit(E->getInitializer());
+ }
+
+ llvm::Constant *VisitCastExpr(CastExpr* E) {
+ Expr *subExpr = E->getSubExpr();
+ llvm::Constant *C = CGM.EmitConstantExpr(subExpr, subExpr->getType(), CGF);
+ if (!C) return 0;
+
+ llvm::Type *destType = ConvertType(E->getType());
+
+ switch (E->getCastKind()) {
+ case CK_ToUnion: {
+ // GCC cast to union extension
+ assert(E->getType()->isUnionType() &&
+ "Destination type is not union type!");
+
+ // Build a struct with the union sub-element as the first member,
+ // and padded to the appropriate size
+ SmallVector<llvm::Constant*, 2> Elts;
+ SmallVector<llvm::Type*, 2> Types;
+ Elts.push_back(C);
+ Types.push_back(C->getType());
+ unsigned CurSize = CGM.getTargetData().getTypeAllocSize(C->getType());
+ unsigned TotalSize = CGM.getTargetData().getTypeAllocSize(destType);
+
+ assert(CurSize <= TotalSize && "Union size mismatch!");
+ if (unsigned NumPadBytes = TotalSize - CurSize) {
+ llvm::Type *Ty = CGM.Int8Ty;
+ if (NumPadBytes > 1)
+ Ty = llvm::ArrayType::get(Ty, NumPadBytes);
+
+ Elts.push_back(llvm::UndefValue::get(Ty));
+ Types.push_back(Ty);
+ }
+
+ llvm::StructType* STy =
+ llvm::StructType::get(C->getType()->getContext(), Types, false);
+ return llvm::ConstantStruct::get(STy, Elts);
+ }
+
+ case CK_LValueToRValue:
+ case CK_AtomicToNonAtomic:
+ case CK_NonAtomicToAtomic:
+ case CK_NoOp:
+ return C;
+
+ case CK_Dependent: llvm_unreachable("saw dependent cast!");
+
+ case CK_ReinterpretMemberPointer:
+ case CK_DerivedToBaseMemberPointer:
+ case CK_BaseToDerivedMemberPointer:
+ return CGM.getCXXABI().EmitMemberPointerConversion(E, C);
+
+ // These will never be supported.
+ case CK_ObjCObjectLValueCast:
+ case CK_ARCProduceObject:
+ case CK_ARCConsumeObject:
+ case CK_ARCReclaimReturnedObject:
+ case CK_ARCExtendBlockObject:
+ case CK_CopyAndAutoreleaseBlockObject:
+ return 0;
+
+ // These don't need to be handled here because Evaluate knows how to
+ // evaluate them in the cases where they can be folded.
+ case CK_BitCast:
+ case CK_ToVoid:
+ case CK_Dynamic:
+ case CK_LValueBitCast:
+ case CK_NullToMemberPointer:
+ case CK_UserDefinedConversion:
+ case CK_ConstructorConversion:
+ case CK_CPointerToObjCPointerCast:
+ case CK_BlockPointerToObjCPointerCast:
+ case CK_AnyPointerToBlockPointerCast:
+ case CK_ArrayToPointerDecay:
+ case CK_FunctionToPointerDecay:
+ case CK_BaseToDerived:
+ case CK_DerivedToBase:
+ case CK_UncheckedDerivedToBase:
+ case CK_MemberPointerToBoolean:
+ case CK_VectorSplat:
+ case CK_FloatingRealToComplex:
+ case CK_FloatingComplexToReal:
+ case CK_FloatingComplexToBoolean:
+ case CK_FloatingComplexCast:
+ case CK_FloatingComplexToIntegralComplex:
+ case CK_IntegralRealToComplex:
+ case CK_IntegralComplexToReal:
+ case CK_IntegralComplexToBoolean:
+ case CK_IntegralComplexCast:
+ case CK_IntegralComplexToFloatingComplex:
+ case CK_PointerToIntegral:
+ case CK_PointerToBoolean:
+ case CK_NullToPointer:
+ case CK_IntegralCast:
+ case CK_IntegralToPointer:
+ case CK_IntegralToBoolean:
+ case CK_IntegralToFloating:
+ case CK_FloatingToIntegral:
+ case CK_FloatingToBoolean:
+ case CK_FloatingCast:
+ return 0;
+ }
+ llvm_unreachable("Invalid CastKind");
+ }
+
+ llvm::Constant *VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
+ return Visit(DAE->getExpr());
+ }
+
+ llvm::Constant *VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E) {
+ return Visit(E->GetTemporaryExpr());
+ }
+
+ llvm::Constant *EmitArrayInitialization(InitListExpr *ILE) {
+ if (ILE->isStringLiteralInit())
+ return Visit(ILE->getInit(0));
+
+ llvm::ArrayType *AType =
+ cast<llvm::ArrayType>(ConvertType(ILE->getType()));
+ llvm::Type *ElemTy = AType->getElementType();
+ unsigned NumInitElements = ILE->getNumInits();
+ unsigned NumElements = AType->getNumElements();
+
+ // Initialising an array requires us to automatically
+ // initialise any elements that have not been initialised explicitly
+ unsigned NumInitableElts = std::min(NumInitElements, NumElements);
+
+ // Copy initializer elements.
+ std::vector<llvm::Constant*> Elts;
+ Elts.reserve(NumInitableElts + NumElements);
+
+ bool RewriteType = false;
+ for (unsigned i = 0; i < NumInitableElts; ++i) {
+ Expr *Init = ILE->getInit(i);
+ llvm::Constant *C = CGM.EmitConstantExpr(Init, Init->getType(), CGF);
+ if (!C)
+ return 0;
+ RewriteType |= (C->getType() != ElemTy);
+ Elts.push_back(C);
+ }
+
+ // Initialize remaining array elements.
+ // FIXME: This doesn't handle member pointers correctly!
+ llvm::Constant *fillC;
+ if (Expr *filler = ILE->getArrayFiller())
+ fillC = CGM.EmitConstantExpr(filler, filler->getType(), CGF);
+ else
+ fillC = llvm::Constant::getNullValue(ElemTy);
+ if (!fillC)
+ return 0;
+ RewriteType |= (fillC->getType() != ElemTy);
+ Elts.resize(NumElements, fillC);
+
+ if (RewriteType) {
+ // FIXME: Try to avoid packing the array
+ std::vector<llvm::Type*> Types;
+ Types.reserve(NumInitableElts + NumElements);
+ for (unsigned i = 0, e = Elts.size(); i < e; ++i)
+ Types.push_back(Elts[i]->getType());
+ llvm::StructType *SType = llvm::StructType::get(AType->getContext(),
+ Types, true);
+ return llvm::ConstantStruct::get(SType, Elts);
+ }
+
+ return llvm::ConstantArray::get(AType, Elts);
+ }
+
+ llvm::Constant *EmitStructInitialization(InitListExpr *ILE) {
+ return ConstStructBuilder::BuildStruct(CGM, CGF, ILE);
+ }
+
+ llvm::Constant *EmitUnionInitialization(InitListExpr *ILE) {
+ return ConstStructBuilder::BuildStruct(CGM, CGF, ILE);
+ }
+
+ llvm::Constant *VisitImplicitValueInitExpr(ImplicitValueInitExpr* E) {
+ return CGM.EmitNullConstant(E->getType());
+ }
+
+ llvm::Constant *VisitInitListExpr(InitListExpr *ILE) {
+ if (ILE->getType()->isArrayType())
+ return EmitArrayInitialization(ILE);
+
+ if (ILE->getType()->isRecordType())
+ return EmitStructInitialization(ILE);
+
+ if (ILE->getType()->isUnionType())
+ return EmitUnionInitialization(ILE);
+
+ return 0;
+ }
+
+ llvm::Constant *VisitCXXConstructExpr(CXXConstructExpr *E) {
+ if (!E->getConstructor()->isTrivial())
+ return 0;
+
+ QualType Ty = E->getType();
+
+ // FIXME: We should not have to call getBaseElementType here.
+ const RecordType *RT =
+ CGM.getContext().getBaseElementType(Ty)->getAs<RecordType>();
+ const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
+
+ // If the class doesn't have a trivial destructor, we can't emit it as a
+ // constant expr.
+ if (!RD->hasTrivialDestructor())
+ return 0;
+
+ // Only copy and default constructors can be trivial.
+
+
+ if (E->getNumArgs()) {
+ assert(E->getNumArgs() == 1 && "trivial ctor with > 1 argument");
+ assert(E->getConstructor()->isCopyOrMoveConstructor() &&
+ "trivial ctor has argument but isn't a copy/move ctor");
+
+ Expr *Arg = E->getArg(0);
+ assert(CGM.getContext().hasSameUnqualifiedType(Ty, Arg->getType()) &&
+ "argument to copy ctor is of wrong type");
+
+ return Visit(Arg);
+ }
+
+ return CGM.EmitNullConstant(Ty);
+ }
+
+ llvm::Constant *VisitStringLiteral(StringLiteral *E) {
+ return CGM.GetConstantArrayFromStringLiteral(E);
+ }
+
+ llvm::Constant *VisitObjCEncodeExpr(ObjCEncodeExpr *E) {
+ // This must be an @encode initializing an array in a static initializer.
+ // Don't emit it as the address of the string, emit the string data itself
+ // as an inline array.
+ std::string Str;
+ CGM.getContext().getObjCEncodingForType(E->getEncodedType(), Str);
+ const ConstantArrayType *CAT = cast<ConstantArrayType>(E->getType());
+
+ // Resize the string to the right size, adding zeros at the end, or
+ // truncating as needed.
+ Str.resize(CAT->getSize().getZExtValue(), '\0');
+ return llvm::ConstantDataArray::getString(VMContext, Str, false);
+ }
+
+ llvm::Constant *VisitUnaryExtension(const UnaryOperator *E) {
+ return Visit(E->getSubExpr());
+ }
+
+ // Utility methods
+ llvm::Type *ConvertType(QualType T) {
+ return CGM.getTypes().ConvertType(T);
+ }
+
+public:
+ llvm::Constant *EmitLValue(APValue::LValueBase LVBase) {
+ if (const ValueDecl *Decl = LVBase.dyn_cast<const ValueDecl*>()) {
+ if (Decl->hasAttr<WeakRefAttr>())
+ return CGM.GetWeakRefReference(Decl);
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(Decl))
+ return CGM.GetAddrOfFunction(FD);
+ if (const VarDecl* VD = dyn_cast<VarDecl>(Decl)) {
+ // We can never refer to a variable with local storage.
+ if (!VD->hasLocalStorage()) {
+ if (VD->isFileVarDecl() || VD->hasExternalStorage())
+ return CGM.GetAddrOfGlobalVar(VD);
+ else if (VD->isLocalVarDecl()) {
+ assert(CGF && "Can't access static local vars without CGF");
+ return CGF->GetAddrOfStaticLocalVar(VD);
+ }
+ }
+ }
+ return 0;
+ }
+
+ Expr *E = const_cast<Expr*>(LVBase.get<const Expr*>());
+ switch (E->getStmtClass()) {
+ default: break;
+ case Expr::CompoundLiteralExprClass: {
+ // Note that due to the nature of compound literals, this is guaranteed
+ // to be the only use of the variable, so we just generate it here.
+ CompoundLiteralExpr *CLE = cast<CompoundLiteralExpr>(E);
+ llvm::Constant* C = CGM.EmitConstantExpr(CLE->getInitializer(),
+ CLE->getType(), CGF);
+ // FIXME: "Leaked" on failure.
+ if (C)
+ C = new llvm::GlobalVariable(CGM.getModule(), C->getType(),
+ E->getType().isConstant(CGM.getContext()),
+ llvm::GlobalValue::InternalLinkage,
+ C, ".compoundliteral", 0, false,
+ CGM.getContext().getTargetAddressSpace(E->getType()));
+ return C;
+ }
+ case Expr::StringLiteralClass:
+ return CGM.GetAddrOfConstantStringFromLiteral(cast<StringLiteral>(E));
+ case Expr::ObjCEncodeExprClass:
+ return CGM.GetAddrOfConstantStringFromObjCEncode(cast<ObjCEncodeExpr>(E));
+ case Expr::ObjCStringLiteralClass: {
+ ObjCStringLiteral* SL = cast<ObjCStringLiteral>(E);
+ llvm::Constant *C =
+ CGM.getObjCRuntime().GenerateConstantString(SL->getString());
+ return llvm::ConstantExpr::getBitCast(C, ConvertType(E->getType()));
+ }
+ case Expr::PredefinedExprClass: {
+ unsigned Type = cast<PredefinedExpr>(E)->getIdentType();
+ if (CGF) {
+ LValue Res = CGF->EmitPredefinedLValue(cast<PredefinedExpr>(E));
+ return cast<llvm::Constant>(Res.getAddress());
+ } else if (Type == PredefinedExpr::PrettyFunction) {
+ return CGM.GetAddrOfConstantCString("top level", ".tmp");
+ }
+
+ return CGM.GetAddrOfConstantCString("", ".tmp");
+ }
+ case Expr::AddrLabelExprClass: {
+ assert(CGF && "Invalid address of label expression outside function.");
+ llvm::Constant *Ptr =
+ CGF->GetAddrOfLabel(cast<AddrLabelExpr>(E)->getLabel());
+ return llvm::ConstantExpr::getBitCast(Ptr, ConvertType(E->getType()));
+ }
+ case Expr::CallExprClass: {
+ CallExpr* CE = cast<CallExpr>(E);
+ unsigned builtin = CE->isBuiltinCall();
+ if (builtin !=
+ Builtin::BI__builtin___CFStringMakeConstantString &&
+ builtin !=
+ Builtin::BI__builtin___NSStringMakeConstantString)
+ break;
+ const Expr *Arg = CE->getArg(0)->IgnoreParenCasts();
+ const StringLiteral *Literal = cast<StringLiteral>(Arg);
+ if (builtin ==
+ Builtin::BI__builtin___NSStringMakeConstantString) {
+ return CGM.getObjCRuntime().GenerateConstantString(Literal);
+ }
+ // FIXME: need to deal with UCN conversion issues.
+ return CGM.GetAddrOfConstantCFString(Literal);
+ }
+ case Expr::BlockExprClass: {
+ std::string FunctionName;
+ if (CGF)
+ FunctionName = CGF->CurFn->getName();
+ else
+ FunctionName = "global";
+
+ return CGM.GetAddrOfGlobalBlock(cast<BlockExpr>(E), FunctionName.c_str());
+ }
+ case Expr::CXXTypeidExprClass: {
+ CXXTypeidExpr *Typeid = cast<CXXTypeidExpr>(E);
+ QualType T;
+ if (Typeid->isTypeOperand())
+ T = Typeid->getTypeOperand();
+ else
+ T = Typeid->getExprOperand()->getType();
+ return CGM.GetAddrOfRTTIDescriptor(T);
+ }
+ }
+
+ return 0;
+ }
+};
+
+} // end anonymous namespace.
+
+llvm::Constant *CodeGenModule::EmitConstantInit(const VarDecl &D,
+ CodeGenFunction *CGF) {
+ if (const APValue *Value = D.evaluateValue())
+ return EmitConstantValueForMemory(*Value, D.getType(), CGF);
+
+ // FIXME: Implement C++11 [basic.start.init]p2: if the initializer of a
+ // reference is a constant expression, and the reference binds to a temporary,
+ // then constant initialization is performed. ConstExprEmitter will
+ // incorrectly emit a prvalue constant in this case, and the calling code
+ // interprets that as the (pointer) value of the reference, rather than the
+ // desired value of the referee.
+ if (D.getType()->isReferenceType())
+ return 0;
+
+ const Expr *E = D.getInit();
+ assert(E && "No initializer to emit");
+
+ llvm::Constant* C = ConstExprEmitter(*this, CGF).Visit(const_cast<Expr*>(E));
+ if (C && C->getType()->isIntegerTy(1)) {
+ llvm::Type *BoolTy = getTypes().ConvertTypeForMem(E->getType());
+ C = llvm::ConstantExpr::getZExt(C, BoolTy);
+ }
+ return C;
+}
+
+llvm::Constant *CodeGenModule::EmitConstantExpr(const Expr *E,
+ QualType DestType,
+ CodeGenFunction *CGF) {
+ Expr::EvalResult Result;
+
+ bool Success = false;
+
+ if (DestType->isReferenceType())
+ Success = E->EvaluateAsLValue(Result, Context);
+ else
+ Success = E->EvaluateAsRValue(Result, Context);
+
+ llvm::Constant *C = 0;
+ if (Success && !Result.HasSideEffects)
+ C = EmitConstantValue(Result.Val, DestType, CGF);
+ else
+ C = ConstExprEmitter(*this, CGF).Visit(const_cast<Expr*>(E));
+
+ if (C && C->getType()->isIntegerTy(1)) {
+ llvm::Type *BoolTy = getTypes().ConvertTypeForMem(E->getType());
+ C = llvm::ConstantExpr::getZExt(C, BoolTy);
+ }
+ return C;
+}
+
+llvm::Constant *CodeGenModule::EmitConstantValue(const APValue &Value,
+ QualType DestType,
+ CodeGenFunction *CGF) {
+ switch (Value.getKind()) {
+ case APValue::Uninitialized:
+ llvm_unreachable("Constant expressions should be initialized.");
+ case APValue::LValue: {
+ llvm::Type *DestTy = getTypes().ConvertTypeForMem(DestType);
+ llvm::Constant *Offset =
+ llvm::ConstantInt::get(Int64Ty, Value.getLValueOffset().getQuantity());
+
+ llvm::Constant *C;
+ if (APValue::LValueBase LVBase = Value.getLValueBase()) {
+ // An array can be represented as an lvalue referring to the base.
+ if (isa<llvm::ArrayType>(DestTy)) {
+ assert(Offset->isNullValue() && "offset on array initializer");
+ return ConstExprEmitter(*this, CGF).Visit(
+ const_cast<Expr*>(LVBase.get<const Expr*>()));
+ }
+
+ C = ConstExprEmitter(*this, CGF).EmitLValue(LVBase);
+
+ // Apply offset if necessary.
+ if (!Offset->isNullValue()) {
+ llvm::Constant *Casted = llvm::ConstantExpr::getBitCast(C, Int8PtrTy);
+ Casted = llvm::ConstantExpr::getGetElementPtr(Casted, Offset);
+ C = llvm::ConstantExpr::getBitCast(Casted, C->getType());
+ }
+
+ // Convert to the appropriate type; this could be an lvalue for
+ // an integer.
+ if (isa<llvm::PointerType>(DestTy))
+ return llvm::ConstantExpr::getBitCast(C, DestTy);
+
+ return llvm::ConstantExpr::getPtrToInt(C, DestTy);
+ } else {
+ C = Offset;
+
+ // Convert to the appropriate type; this could be an lvalue for
+ // an integer.
+ if (isa<llvm::PointerType>(DestTy))
+ return llvm::ConstantExpr::getIntToPtr(C, DestTy);
+
+ // If the types don't match this should only be a truncate.
+ if (C->getType() != DestTy)
+ return llvm::ConstantExpr::getTrunc(C, DestTy);
+
+ return C;
+ }
+ }
+ case APValue::Int:
+ return llvm::ConstantInt::get(VMContext, Value.getInt());
+ case APValue::ComplexInt: {
+ llvm::Constant *Complex[2];
+
+ Complex[0] = llvm::ConstantInt::get(VMContext,
+ Value.getComplexIntReal());
+ Complex[1] = llvm::ConstantInt::get(VMContext,
+ Value.getComplexIntImag());
+
+ // FIXME: the target may want to specify that this is packed.
+ llvm::StructType *STy = llvm::StructType::get(Complex[0]->getType(),
+ Complex[1]->getType(),
+ NULL);
+ return llvm::ConstantStruct::get(STy, Complex);
+ }
+ case APValue::Float: {
+ const llvm::APFloat &Init = Value.getFloat();
+ if (&Init.getSemantics() == &llvm::APFloat::IEEEhalf)
+ return llvm::ConstantInt::get(VMContext, Init.bitcastToAPInt());
+ else
+ return llvm::ConstantFP::get(VMContext, Init);
+ }
+ case APValue::ComplexFloat: {
+ llvm::Constant *Complex[2];
+
+ Complex[0] = llvm::ConstantFP::get(VMContext,
+ Value.getComplexFloatReal());
+ Complex[1] = llvm::ConstantFP::get(VMContext,
+ Value.getComplexFloatImag());
+
+ // FIXME: the target may want to specify that this is packed.
+ llvm::StructType *STy = llvm::StructType::get(Complex[0]->getType(),
+ Complex[1]->getType(),
+ NULL);
+ return llvm::ConstantStruct::get(STy, Complex);
+ }
+ case APValue::Vector: {
+ SmallVector<llvm::Constant *, 4> Inits;
+ unsigned NumElts = Value.getVectorLength();
+
+ for (unsigned i = 0; i != NumElts; ++i) {
+ const APValue &Elt = Value.getVectorElt(i);
+ if (Elt.isInt())
+ Inits.push_back(llvm::ConstantInt::get(VMContext, Elt.getInt()));
+ else
+ Inits.push_back(llvm::ConstantFP::get(VMContext, Elt.getFloat()));
+ }
+ return llvm::ConstantVector::get(Inits);
+ }
+ case APValue::AddrLabelDiff: {
+ const AddrLabelExpr *LHSExpr = Value.getAddrLabelDiffLHS();
+ const AddrLabelExpr *RHSExpr = Value.getAddrLabelDiffRHS();
+ llvm::Constant *LHS = EmitConstantExpr(LHSExpr, LHSExpr->getType(), CGF);
+ llvm::Constant *RHS = EmitConstantExpr(RHSExpr, RHSExpr->getType(), CGF);
+
+ // Compute difference
+ llvm::Type *ResultType = getTypes().ConvertType(DestType);
+ LHS = llvm::ConstantExpr::getPtrToInt(LHS, IntPtrTy);
+ RHS = llvm::ConstantExpr::getPtrToInt(RHS, IntPtrTy);
+ llvm::Constant *AddrLabelDiff = llvm::ConstantExpr::getSub(LHS, RHS);
+
+ // LLVM is a bit sensitive about the exact format of the
+ // address-of-label difference; make sure to truncate after
+ // the subtraction.
+ return llvm::ConstantExpr::getTruncOrBitCast(AddrLabelDiff, ResultType);
+ }
+ case APValue::Struct:
+ case APValue::Union:
+ return ConstStructBuilder::BuildStruct(*this, CGF, Value, DestType);
+ case APValue::Array: {
+ const ArrayType *CAT = Context.getAsArrayType(DestType);
+ unsigned NumElements = Value.getArraySize();
+ unsigned NumInitElts = Value.getArrayInitializedElts();
+
+ std::vector<llvm::Constant*> Elts;
+ Elts.reserve(NumElements);
+
+ // Emit array filler, if there is one.
+ llvm::Constant *Filler = 0;
+ if (Value.hasArrayFiller())
+ Filler = EmitConstantValueForMemory(Value.getArrayFiller(),
+ CAT->getElementType(), CGF);
+
+ // Emit initializer elements.
+ llvm::Type *CommonElementType = 0;
+ for (unsigned I = 0; I < NumElements; ++I) {
+ llvm::Constant *C = Filler;
+ if (I < NumInitElts)
+ C = EmitConstantValueForMemory(Value.getArrayInitializedElt(I),
+ CAT->getElementType(), CGF);
+ if (I == 0)
+ CommonElementType = C->getType();
+ else if (C->getType() != CommonElementType)
+ CommonElementType = 0;
+ Elts.push_back(C);
+ }
+
+ if (!CommonElementType) {
+ // FIXME: Try to avoid packing the array
+ std::vector<llvm::Type*> Types;
+ Types.reserve(NumElements);
+ for (unsigned i = 0, e = Elts.size(); i < e; ++i)
+ Types.push_back(Elts[i]->getType());
+ llvm::StructType *SType = llvm::StructType::get(VMContext, Types, true);
+ return llvm::ConstantStruct::get(SType, Elts);
+ }
+
+ llvm::ArrayType *AType =
+ llvm::ArrayType::get(CommonElementType, NumElements);
+ return llvm::ConstantArray::get(AType, Elts);
+ }
+ case APValue::MemberPointer:
+ return getCXXABI().EmitMemberPointer(Value, DestType);
+ }
+ llvm_unreachable("Unknown APValue kind");
+}
+
+llvm::Constant *
+CodeGenModule::EmitConstantValueForMemory(const APValue &Value,
+ QualType DestType,
+ CodeGenFunction *CGF) {
+ llvm::Constant *C = EmitConstantValue(Value, DestType, CGF);
+ if (C->getType()->isIntegerTy(1)) {
+ llvm::Type *BoolTy = getTypes().ConvertTypeForMem(DestType);
+ C = llvm::ConstantExpr::getZExt(C, BoolTy);
+ }
+ return C;
+}
+
+llvm::Constant *
+CodeGenModule::GetAddrOfConstantCompoundLiteral(const CompoundLiteralExpr *E) {
+ assert(E->isFileScope() && "not a file-scope compound literal expr");
+ return ConstExprEmitter(*this, 0).EmitLValue(E);
+}
+
+llvm::Constant *
+CodeGenModule::getMemberPointerConstant(const UnaryOperator *uo) {
+ // Member pointer constants always have a very particular form.
+ const MemberPointerType *type = cast<MemberPointerType>(uo->getType());
+ const ValueDecl *decl = cast<DeclRefExpr>(uo->getSubExpr())->getDecl();
+
+ // A member function pointer.
+ if (const CXXMethodDecl *method = dyn_cast<CXXMethodDecl>(decl))
+ return getCXXABI().EmitMemberPointer(method);
+
+ // Otherwise, a member data pointer.
+ uint64_t fieldOffset = getContext().getFieldOffset(decl);
+ CharUnits chars = getContext().toCharUnitsFromBits((int64_t) fieldOffset);
+ return getCXXABI().EmitMemberDataPointer(type, chars);
+}
+
+static void
+FillInNullDataMemberPointers(CodeGenModule &CGM, QualType T,
+ SmallVectorImpl<llvm::Constant *> &Elements,
+ uint64_t StartOffset) {
+ assert(StartOffset % CGM.getContext().getCharWidth() == 0 &&
+ "StartOffset not byte aligned!");
+
+ if (CGM.getTypes().isZeroInitializable(T))
+ return;
+
+ if (const ConstantArrayType *CAT =
+ CGM.getContext().getAsConstantArrayType(T)) {
+ QualType ElementTy = CAT->getElementType();
+ uint64_t ElementSize = CGM.getContext().getTypeSize(ElementTy);
+
+ for (uint64_t I = 0, E = CAT->getSize().getZExtValue(); I != E; ++I) {
+ FillInNullDataMemberPointers(CGM, ElementTy, Elements,
+ StartOffset + I * ElementSize);
+ }
+ } else if (const RecordType *RT = T->getAs<RecordType>()) {
+ const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
+ const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
+
+ // Go through all bases and fill in any null pointer to data members.
+ for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
+ E = RD->bases_end(); I != E; ++I) {
+ if (I->isVirtual()) {
+ // Ignore virtual bases.
+ continue;
+ }
+
+ const CXXRecordDecl *BaseDecl =
+ cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+
+ // Ignore empty bases.
+ if (BaseDecl->isEmpty())
+ continue;
+
+ // Ignore bases that don't have any pointer to data members.
+ if (CGM.getTypes().isZeroInitializable(BaseDecl))
+ continue;
+
+ uint64_t BaseOffset = Layout.getBaseClassOffsetInBits(BaseDecl);
+ FillInNullDataMemberPointers(CGM, I->getType(),
+ Elements, StartOffset + BaseOffset);
+ }
+
+ // Visit all fields.
+ unsigned FieldNo = 0;
+ for (RecordDecl::field_iterator I = RD->field_begin(),
+ E = RD->field_end(); I != E; ++I, ++FieldNo) {
+ QualType FieldType = I->getType();
+
+ if (CGM.getTypes().isZeroInitializable(FieldType))
+ continue;
+
+ uint64_t FieldOffset = StartOffset + Layout.getFieldOffset(FieldNo);
+ FillInNullDataMemberPointers(CGM, FieldType, Elements, FieldOffset);
+ }
+ } else {
+ assert(T->isMemberPointerType() && "Should only see member pointers here!");
+ assert(!T->getAs<MemberPointerType>()->getPointeeType()->isFunctionType() &&
+ "Should only see pointers to data members here!");
+
+ CharUnits StartIndex = CGM.getContext().toCharUnitsFromBits(StartOffset);
+ CharUnits EndIndex = StartIndex + CGM.getContext().getTypeSizeInChars(T);
+
+ // FIXME: hardcodes Itanium member pointer representation!
+ llvm::Constant *NegativeOne =
+ llvm::ConstantInt::get(CGM.Int8Ty, -1ULL, /*isSigned*/true);
+
+ // Fill in the null data member pointer.
+ for (CharUnits I = StartIndex; I != EndIndex; ++I)
+ Elements[I.getQuantity()] = NegativeOne;
+ }
+}
+
+static llvm::Constant *EmitNullConstantForBase(CodeGenModule &CGM,
+ llvm::Type *baseType,
+ const CXXRecordDecl *base);
+
+static llvm::Constant *EmitNullConstant(CodeGenModule &CGM,
+ const CXXRecordDecl *record,
+ bool asCompleteObject) {
+ const CGRecordLayout &layout = CGM.getTypes().getCGRecordLayout(record);
+ llvm::StructType *structure =
+ (asCompleteObject ? layout.getLLVMType()
+ : layout.getBaseSubobjectLLVMType());
+
+ unsigned numElements = structure->getNumElements();
+ std::vector<llvm::Constant *> elements(numElements);
+
+ // Fill in all the bases.
+ for (CXXRecordDecl::base_class_const_iterator
+ I = record->bases_begin(), E = record->bases_end(); I != E; ++I) {
+ if (I->isVirtual()) {
+ // Ignore virtual bases; if we're laying out for a complete
+ // object, we'll lay these out later.
+ continue;
+ }
+
+ const CXXRecordDecl *base =
+ cast<CXXRecordDecl>(I->getType()->castAs<RecordType>()->getDecl());
+
+ // Ignore empty bases.
+ if (base->isEmpty())
+ continue;
+
+ unsigned fieldIndex = layout.getNonVirtualBaseLLVMFieldNo(base);
+ llvm::Type *baseType = structure->getElementType(fieldIndex);
+ elements[fieldIndex] = EmitNullConstantForBase(CGM, baseType, base);
+ }
+
+ // Fill in all the fields.
+ for (RecordDecl::field_iterator I = record->field_begin(),
+ E = record->field_end(); I != E; ++I) {
+ const FieldDecl *field = *I;
+
+ // Fill in non-bitfields. (Bitfields always use a zero pattern, which we
+ // will fill in later.)
+ if (!field->isBitField()) {
+ unsigned fieldIndex = layout.getLLVMFieldNo(field);
+ elements[fieldIndex] = CGM.EmitNullConstant(field->getType());
+ }
+
+ // For unions, stop after the first named field.
+ if (record->isUnion() && field->getDeclName())
+ break;
+ }
+
+ // Fill in the virtual bases, if we're working with the complete object.
+ if (asCompleteObject) {
+ for (CXXRecordDecl::base_class_const_iterator
+ I = record->vbases_begin(), E = record->vbases_end(); I != E; ++I) {
+ const CXXRecordDecl *base =
+ cast<CXXRecordDecl>(I->getType()->castAs<RecordType>()->getDecl());
+
+ // Ignore empty bases.
+ if (base->isEmpty())
+ continue;
+
+ unsigned fieldIndex = layout.getVirtualBaseIndex(base);
+
+ // We might have already laid this field out.
+ if (elements[fieldIndex]) continue;
+
+ llvm::Type *baseType = structure->getElementType(fieldIndex);
+ elements[fieldIndex] = EmitNullConstantForBase(CGM, baseType, base);
+ }
+ }
+
+ // Now go through all other fields and zero them out.
+ for (unsigned i = 0; i != numElements; ++i) {
+ if (!elements[i])
+ elements[i] = llvm::Constant::getNullValue(structure->getElementType(i));
+ }
+
+ return llvm::ConstantStruct::get(structure, elements);
+}
+
+/// Emit the null constant for a base subobject.
+static llvm::Constant *EmitNullConstantForBase(CodeGenModule &CGM,
+ llvm::Type *baseType,
+ const CXXRecordDecl *base) {
+ const CGRecordLayout &baseLayout = CGM.getTypes().getCGRecordLayout(base);
+
+ // Just zero out bases that don't have any pointer to data members.
+ if (baseLayout.isZeroInitializableAsBase())
+ return llvm::Constant::getNullValue(baseType);
+
+ // If the base type is a struct, we can just use its null constant.
+ if (isa<llvm::StructType>(baseType)) {
+ return EmitNullConstant(CGM, base, /*complete*/ false);
+ }
+
+ // Otherwise, some bases are represented as arrays of i8 if the size
+ // of the base is smaller than its corresponding LLVM type. Figure
+ // out how many elements this base array has.
+ llvm::ArrayType *baseArrayType = cast<llvm::ArrayType>(baseType);
+ unsigned numBaseElements = baseArrayType->getNumElements();
+
+ // Fill in null data member pointers.
+ SmallVector<llvm::Constant *, 16> baseElements(numBaseElements);
+ FillInNullDataMemberPointers(CGM, CGM.getContext().getTypeDeclType(base),
+ baseElements, 0);
+
+ // Now go through all other elements and zero them out.
+ if (numBaseElements) {
+ llvm::Constant *i8_zero = llvm::Constant::getNullValue(CGM.Int8Ty);
+ for (unsigned i = 0; i != numBaseElements; ++i) {
+ if (!baseElements[i])
+ baseElements[i] = i8_zero;
+ }
+ }
+
+ return llvm::ConstantArray::get(baseArrayType, baseElements);
+}
+
+llvm::Constant *CodeGenModule::EmitNullConstant(QualType T) {
+ if (getTypes().isZeroInitializable(T))
+ return llvm::Constant::getNullValue(getTypes().ConvertTypeForMem(T));
+
+ if (const ConstantArrayType *CAT = Context.getAsConstantArrayType(T)) {
+ llvm::ArrayType *ATy =
+ cast<llvm::ArrayType>(getTypes().ConvertTypeForMem(T));
+
+ QualType ElementTy = CAT->getElementType();
+
+ llvm::Constant *Element = EmitNullConstant(ElementTy);
+ unsigned NumElements = CAT->getSize().getZExtValue();
+
+ if (Element->isNullValue())
+ return llvm::ConstantAggregateZero::get(ATy);
+
+ SmallVector<llvm::Constant *, 8> Array(NumElements, Element);
+ return llvm::ConstantArray::get(ATy, Array);
+ }
+
+ if (const RecordType *RT = T->getAs<RecordType>()) {
+ const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
+ return ::EmitNullConstant(*this, RD, /*complete object*/ true);
+ }
+
+ assert(T->isMemberPointerType() && "Should only see member pointers here!");
+ assert(!T->getAs<MemberPointerType>()->getPointeeType()->isFunctionType() &&
+ "Should only see pointers to data members here!");
+
+ // Itanium C++ ABI 2.3:
+ // A NULL pointer is represented as -1.
+ return getCXXABI().EmitNullMemberPointer(T->castAs<MemberPointerType>());
+}
+
+llvm::Constant *
+CodeGenModule::EmitNullConstantForBase(const CXXRecordDecl *Record) {
+ return ::EmitNullConstant(*this, Record, false);
+}
diff --git a/clang/lib/CodeGen/CGExprScalar.cpp b/clang/lib/CodeGen/CGExprScalar.cpp
new file mode 100644
index 0000000..18891f7
--- /dev/null
+++ b/clang/lib/CodeGen/CGExprScalar.cpp
@@ -0,0 +1,2857 @@
+//===--- CGExprScalar.cpp - Emit LLVM Code for Scalar Exprs ---------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code to emit Expr nodes with scalar LLVM types as LLVM code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Frontend/CodeGenOptions.h"
+#include "CodeGenFunction.h"
+#include "CGCXXABI.h"
+#include "CGObjCRuntime.h"
+#include "CodeGenModule.h"
+#include "CGDebugInfo.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/RecordLayout.h"
+#include "clang/AST/StmtVisitor.h"
+#include "clang/Basic/TargetInfo.h"
+#include "llvm/Constants.h"
+#include "llvm/Function.h"
+#include "llvm/GlobalVariable.h"
+#include "llvm/Intrinsics.h"
+#include "llvm/Module.h"
+#include "llvm/Support/CFG.h"
+#include "llvm/Target/TargetData.h"
+#include <cstdarg>
+
+using namespace clang;
+using namespace CodeGen;
+using llvm::Value;
+
+//===----------------------------------------------------------------------===//
+// Scalar Expression Emitter
+//===----------------------------------------------------------------------===//
+
+namespace {
+struct BinOpInfo {
+ Value *LHS;
+ Value *RHS;
+ QualType Ty; // Computation Type.
+ BinaryOperator::Opcode Opcode; // Opcode of BinOp to perform
+ const Expr *E; // Entire expr, for error unsupported. May not be binop.
+};
+
+static bool MustVisitNullValue(const Expr *E) {
+ // If a null pointer expression's type is the C++0x nullptr_t, then
+ // it's not necessarily a simple constant and it must be evaluated
+ // for its potential side effects.
+ return E->getType()->isNullPtrType();
+}
+
+class ScalarExprEmitter
+ : public StmtVisitor<ScalarExprEmitter, Value*> {
+ CodeGenFunction &CGF;
+ CGBuilderTy &Builder;
+ bool IgnoreResultAssign;
+ llvm::LLVMContext &VMContext;
+public:
+
+ ScalarExprEmitter(CodeGenFunction &cgf, bool ira=false)
+ : CGF(cgf), Builder(CGF.Builder), IgnoreResultAssign(ira),
+ VMContext(cgf.getLLVMContext()) {
+ }
+
+ //===--------------------------------------------------------------------===//
+ // Utilities
+ //===--------------------------------------------------------------------===//
+
+ bool TestAndClearIgnoreResultAssign() {
+ bool I = IgnoreResultAssign;
+ IgnoreResultAssign = false;
+ return I;
+ }
+
+ llvm::Type *ConvertType(QualType T) { return CGF.ConvertType(T); }
+ LValue EmitLValue(const Expr *E) { return CGF.EmitLValue(E); }
+ LValue EmitCheckedLValue(const Expr *E) { return CGF.EmitCheckedLValue(E); }
+
+ Value *EmitLoadOfLValue(LValue LV) {
+ return CGF.EmitLoadOfLValue(LV).getScalarVal();
+ }
+
+ /// EmitLoadOfLValue - Given an expression with complex type that represents a
+ /// value l-value, this method emits the address of the l-value, then loads
+ /// and returns the result.
+ Value *EmitLoadOfLValue(const Expr *E) {
+ return EmitLoadOfLValue(EmitCheckedLValue(E));
+ }
+
+ /// EmitConversionToBool - Convert the specified expression value to a
+ /// boolean (i1) truth value. This is equivalent to "Val != 0".
+ Value *EmitConversionToBool(Value *Src, QualType DstTy);
+
+ /// EmitScalarConversion - Emit a conversion from the specified type to the
+ /// specified destination type, both of which are LLVM scalar types.
+ Value *EmitScalarConversion(Value *Src, QualType SrcTy, QualType DstTy);
+
+ /// EmitComplexToScalarConversion - Emit a conversion from the specified
+ /// complex type to the specified destination type, where the destination type
+ /// is an LLVM scalar type.
+ Value *EmitComplexToScalarConversion(CodeGenFunction::ComplexPairTy Src,
+ QualType SrcTy, QualType DstTy);
+
+ /// EmitNullValue - Emit a value that corresponds to null for the given type.
+ Value *EmitNullValue(QualType Ty);
+
+ /// EmitFloatToBoolConversion - Perform an FP to boolean conversion.
+ Value *EmitFloatToBoolConversion(Value *V) {
+ // Compare against 0.0 for fp scalars.
+ llvm::Value *Zero = llvm::Constant::getNullValue(V->getType());
+ return Builder.CreateFCmpUNE(V, Zero, "tobool");
+ }
+
+ /// EmitPointerToBoolConversion - Perform a pointer to boolean conversion.
+ Value *EmitPointerToBoolConversion(Value *V) {
+ Value *Zero = llvm::ConstantPointerNull::get(
+ cast<llvm::PointerType>(V->getType()));
+ return Builder.CreateICmpNE(V, Zero, "tobool");
+ }
+
+ Value *EmitIntToBoolConversion(Value *V) {
+ // Because of the type rules of C, we often end up computing a
+ // logical value, then zero extending it to int, then wanting it
+ // as a logical value again. Optimize this common case.
+ if (llvm::ZExtInst *ZI = dyn_cast<llvm::ZExtInst>(V)) {
+ if (ZI->getOperand(0)->getType() == Builder.getInt1Ty()) {
+ Value *Result = ZI->getOperand(0);
+ // If there aren't any more uses, zap the instruction to save space.
+ // Note that there can be more uses, for example if this
+ // is the result of an assignment.
+ if (ZI->use_empty())
+ ZI->eraseFromParent();
+ return Result;
+ }
+ }
+
+ return Builder.CreateIsNotNull(V, "tobool");
+ }
+
+ //===--------------------------------------------------------------------===//
+ // Visitor Methods
+ //===--------------------------------------------------------------------===//
+
+ Value *Visit(Expr *E) {
+ return StmtVisitor<ScalarExprEmitter, Value*>::Visit(E);
+ }
+
+ Value *VisitStmt(Stmt *S) {
+ S->dump(CGF.getContext().getSourceManager());
+ llvm_unreachable("Stmt can't have complex result type!");
+ }
+ Value *VisitExpr(Expr *S);
+
+ Value *VisitParenExpr(ParenExpr *PE) {
+ return Visit(PE->getSubExpr());
+ }
+ Value *VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *E) {
+ return Visit(E->getReplacement());
+ }
+ Value *VisitGenericSelectionExpr(GenericSelectionExpr *GE) {
+ return Visit(GE->getResultExpr());
+ }
+
+ // Leaves.
+ Value *VisitIntegerLiteral(const IntegerLiteral *E) {
+ return Builder.getInt(E->getValue());
+ }
+ Value *VisitFloatingLiteral(const FloatingLiteral *E) {
+ return llvm::ConstantFP::get(VMContext, E->getValue());
+ }
+ Value *VisitCharacterLiteral(const CharacterLiteral *E) {
+ return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
+ }
+ Value *VisitObjCBoolLiteralExpr(const ObjCBoolLiteralExpr *E) {
+ return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
+ }
+ Value *VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *E) {
+ return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
+ }
+ Value *VisitCXXScalarValueInitExpr(const CXXScalarValueInitExpr *E) {
+ return EmitNullValue(E->getType());
+ }
+ Value *VisitGNUNullExpr(const GNUNullExpr *E) {
+ return EmitNullValue(E->getType());
+ }
+ Value *VisitOffsetOfExpr(OffsetOfExpr *E);
+ Value *VisitUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr *E);
+ Value *VisitAddrLabelExpr(const AddrLabelExpr *E) {
+ llvm::Value *V = CGF.GetAddrOfLabel(E->getLabel());
+ return Builder.CreateBitCast(V, ConvertType(E->getType()));
+ }
+
+ Value *VisitSizeOfPackExpr(SizeOfPackExpr *E) {
+ return llvm::ConstantInt::get(ConvertType(E->getType()),E->getPackLength());
+ }
+
+ Value *VisitPseudoObjectExpr(PseudoObjectExpr *E) {
+ return CGF.EmitPseudoObjectRValue(E).getScalarVal();
+ }
+
+ Value *VisitOpaqueValueExpr(OpaqueValueExpr *E) {
+ if (E->isGLValue())
+ return EmitLoadOfLValue(CGF.getOpaqueLValueMapping(E));
+
+ // Otherwise, assume the mapping is the scalar directly.
+ return CGF.getOpaqueRValueMapping(E).getScalarVal();
+ }
+
+ // l-values.
+ Value *VisitDeclRefExpr(DeclRefExpr *E) {
+ if (CodeGenFunction::ConstantEmission result = CGF.tryEmitAsConstant(E)) {
+ if (result.isReference())
+ return EmitLoadOfLValue(result.getReferenceLValue(CGF, E));
+ return result.getValue();
+ }
+ return EmitLoadOfLValue(E);
+ }
+
+ Value *VisitObjCSelectorExpr(ObjCSelectorExpr *E) {
+ return CGF.EmitObjCSelectorExpr(E);
+ }
+ Value *VisitObjCProtocolExpr(ObjCProtocolExpr *E) {
+ return CGF.EmitObjCProtocolExpr(E);
+ }
+ Value *VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) {
+ return EmitLoadOfLValue(E);
+ }
+ Value *VisitObjCMessageExpr(ObjCMessageExpr *E) {
+ if (E->getMethodDecl() &&
+ E->getMethodDecl()->getResultType()->isReferenceType())
+ return EmitLoadOfLValue(E);
+ return CGF.EmitObjCMessageExpr(E).getScalarVal();
+ }
+
+ Value *VisitObjCIsaExpr(ObjCIsaExpr *E) {
+ LValue LV = CGF.EmitObjCIsaExpr(E);
+ Value *V = CGF.EmitLoadOfLValue(LV).getScalarVal();
+ return V;
+ }
+
+ Value *VisitArraySubscriptExpr(ArraySubscriptExpr *E);
+ Value *VisitShuffleVectorExpr(ShuffleVectorExpr *E);
+ Value *VisitMemberExpr(MemberExpr *E);
+ Value *VisitExtVectorElementExpr(Expr *E) { return EmitLoadOfLValue(E); }
+ Value *VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
+ return EmitLoadOfLValue(E);
+ }
+
+ Value *VisitInitListExpr(InitListExpr *E);
+
+ Value *VisitImplicitValueInitExpr(const ImplicitValueInitExpr *E) {
+ return CGF.CGM.EmitNullConstant(E->getType());
+ }
+ Value *VisitExplicitCastExpr(ExplicitCastExpr *E) {
+ if (E->getType()->isVariablyModifiedType())
+ CGF.EmitVariablyModifiedType(E->getType());
+ return VisitCastExpr(E);
+ }
+ Value *VisitCastExpr(CastExpr *E);
+
+ Value *VisitCallExpr(const CallExpr *E) {
+ if (E->getCallReturnType()->isReferenceType())
+ return EmitLoadOfLValue(E);
+
+ return CGF.EmitCallExpr(E).getScalarVal();
+ }
+
+ Value *VisitStmtExpr(const StmtExpr *E);
+
+ // Unary Operators.
+ Value *VisitUnaryPostDec(const UnaryOperator *E) {
+ LValue LV = EmitLValue(E->getSubExpr());
+ return EmitScalarPrePostIncDec(E, LV, false, false);
+ }
+ Value *VisitUnaryPostInc(const UnaryOperator *E) {
+ LValue LV = EmitLValue(E->getSubExpr());
+ return EmitScalarPrePostIncDec(E, LV, true, false);
+ }
+ Value *VisitUnaryPreDec(const UnaryOperator *E) {
+ LValue LV = EmitLValue(E->getSubExpr());
+ return EmitScalarPrePostIncDec(E, LV, false, true);
+ }
+ Value *VisitUnaryPreInc(const UnaryOperator *E) {
+ LValue LV = EmitLValue(E->getSubExpr());
+ return EmitScalarPrePostIncDec(E, LV, true, true);
+ }
+
+ llvm::Value *EmitAddConsiderOverflowBehavior(const UnaryOperator *E,
+ llvm::Value *InVal,
+ llvm::Value *NextVal,
+ bool IsInc);
+
+ llvm::Value *EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
+ bool isInc, bool isPre);
+
+
+ Value *VisitUnaryAddrOf(const UnaryOperator *E) {
+ if (isa<MemberPointerType>(E->getType())) // never sugared
+ return CGF.CGM.getMemberPointerConstant(E);
+
+ return EmitLValue(E->getSubExpr()).getAddress();
+ }
+ Value *VisitUnaryDeref(const UnaryOperator *E) {
+ if (E->getType()->isVoidType())
+ return Visit(E->getSubExpr()); // the actual value should be unused
+ return EmitLoadOfLValue(E);
+ }
+ Value *VisitUnaryPlus(const UnaryOperator *E) {
+ // This differs from gcc, though, most likely due to a bug in gcc.
+ TestAndClearIgnoreResultAssign();
+ return Visit(E->getSubExpr());
+ }
+ Value *VisitUnaryMinus (const UnaryOperator *E);
+ Value *VisitUnaryNot (const UnaryOperator *E);
+ Value *VisitUnaryLNot (const UnaryOperator *E);
+ Value *VisitUnaryReal (const UnaryOperator *E);
+ Value *VisitUnaryImag (const UnaryOperator *E);
+ Value *VisitUnaryExtension(const UnaryOperator *E) {
+ return Visit(E->getSubExpr());
+ }
+
+ // C++
+ Value *VisitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *E) {
+ return EmitLoadOfLValue(E);
+ }
+
+ Value *VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
+ return Visit(DAE->getExpr());
+ }
+ Value *VisitCXXThisExpr(CXXThisExpr *TE) {
+ return CGF.LoadCXXThis();
+ }
+
+ Value *VisitExprWithCleanups(ExprWithCleanups *E) {
+ CGF.enterFullExpression(E);
+ CodeGenFunction::RunCleanupsScope Scope(CGF);
+ return Visit(E->getSubExpr());
+ }
+ Value *VisitCXXNewExpr(const CXXNewExpr *E) {
+ return CGF.EmitCXXNewExpr(E);
+ }
+ Value *VisitCXXDeleteExpr(const CXXDeleteExpr *E) {
+ CGF.EmitCXXDeleteExpr(E);
+ return 0;
+ }
+ Value *VisitUnaryTypeTraitExpr(const UnaryTypeTraitExpr *E) {
+ return Builder.getInt1(E->getValue());
+ }
+
+ Value *VisitBinaryTypeTraitExpr(const BinaryTypeTraitExpr *E) {
+ return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
+ }
+
+ Value *VisitArrayTypeTraitExpr(const ArrayTypeTraitExpr *E) {
+ return llvm::ConstantInt::get(Builder.getInt32Ty(), E->getValue());
+ }
+
+ Value *VisitExpressionTraitExpr(const ExpressionTraitExpr *E) {
+ return llvm::ConstantInt::get(Builder.getInt1Ty(), E->getValue());
+ }
+
+ Value *VisitCXXPseudoDestructorExpr(const CXXPseudoDestructorExpr *E) {
+ // C++ [expr.pseudo]p1:
+ // The result shall only be used as the operand for the function call
+ // operator (), and the result of such a call has type void. The only
+ // effect is the evaluation of the postfix-expression before the dot or
+ // arrow.
+ CGF.EmitScalarExpr(E->getBase());
+ return 0;
+ }
+
+ Value *VisitCXXNullPtrLiteralExpr(const CXXNullPtrLiteralExpr *E) {
+ return EmitNullValue(E->getType());
+ }
+
+ Value *VisitCXXThrowExpr(const CXXThrowExpr *E) {
+ CGF.EmitCXXThrowExpr(E);
+ return 0;
+ }
+
+ Value *VisitCXXNoexceptExpr(const CXXNoexceptExpr *E) {
+ return Builder.getInt1(E->getValue());
+ }
+
+ // Binary Operators.
+ Value *EmitMul(const BinOpInfo &Ops) {
+ if (Ops.Ty->isSignedIntegerOrEnumerationType()) {
+ switch (CGF.getContext().getLangOpts().getSignedOverflowBehavior()) {
+ case LangOptions::SOB_Undefined:
+ return Builder.CreateNSWMul(Ops.LHS, Ops.RHS, "mul");
+ case LangOptions::SOB_Defined:
+ return Builder.CreateMul(Ops.LHS, Ops.RHS, "mul");
+ case LangOptions::SOB_Trapping:
+ return EmitOverflowCheckedBinOp(Ops);
+ }
+ }
+
+ if (Ops.LHS->getType()->isFPOrFPVectorTy())
+ return Builder.CreateFMul(Ops.LHS, Ops.RHS, "mul");
+ return Builder.CreateMul(Ops.LHS, Ops.RHS, "mul");
+ }
+ bool isTrapvOverflowBehavior() {
+ return CGF.getContext().getLangOpts().getSignedOverflowBehavior()
+ == LangOptions::SOB_Trapping;
+ }
+ /// Create a binary op that checks for overflow.
+ /// Currently only supports +, - and *.
+ Value *EmitOverflowCheckedBinOp(const BinOpInfo &Ops);
+ // Emit the overflow BB when -ftrapv option is activated.
+ void EmitOverflowBB(llvm::BasicBlock *overflowBB) {
+ Builder.SetInsertPoint(overflowBB);
+ llvm::Function *Trap = CGF.CGM.getIntrinsic(llvm::Intrinsic::trap);
+ Builder.CreateCall(Trap);
+ Builder.CreateUnreachable();
+ }
+ // Check for undefined division and modulus behaviors.
+ void EmitUndefinedBehaviorIntegerDivAndRemCheck(const BinOpInfo &Ops,
+ llvm::Value *Zero,bool isDiv);
+ Value *EmitDiv(const BinOpInfo &Ops);
+ Value *EmitRem(const BinOpInfo &Ops);
+ Value *EmitAdd(const BinOpInfo &Ops);
+ Value *EmitSub(const BinOpInfo &Ops);
+ Value *EmitShl(const BinOpInfo &Ops);
+ Value *EmitShr(const BinOpInfo &Ops);
+ Value *EmitAnd(const BinOpInfo &Ops) {
+ return Builder.CreateAnd(Ops.LHS, Ops.RHS, "and");
+ }
+ Value *EmitXor(const BinOpInfo &Ops) {
+ return Builder.CreateXor(Ops.LHS, Ops.RHS, "xor");
+ }
+ Value *EmitOr (const BinOpInfo &Ops) {
+ return Builder.CreateOr(Ops.LHS, Ops.RHS, "or");
+ }
+
+ BinOpInfo EmitBinOps(const BinaryOperator *E);
+ LValue EmitCompoundAssignLValue(const CompoundAssignOperator *E,
+ Value *(ScalarExprEmitter::*F)(const BinOpInfo &),
+ Value *&Result);
+
+ Value *EmitCompoundAssign(const CompoundAssignOperator *E,
+ Value *(ScalarExprEmitter::*F)(const BinOpInfo &));
+
+ // Binary operators and binary compound assignment operators.
+#define HANDLEBINOP(OP) \
+ Value *VisitBin ## OP(const BinaryOperator *E) { \
+ return Emit ## OP(EmitBinOps(E)); \
+ } \
+ Value *VisitBin ## OP ## Assign(const CompoundAssignOperator *E) { \
+ return EmitCompoundAssign(E, &ScalarExprEmitter::Emit ## OP); \
+ }
+ HANDLEBINOP(Mul)
+ HANDLEBINOP(Div)
+ HANDLEBINOP(Rem)
+ HANDLEBINOP(Add)
+ HANDLEBINOP(Sub)
+ HANDLEBINOP(Shl)
+ HANDLEBINOP(Shr)
+ HANDLEBINOP(And)
+ HANDLEBINOP(Xor)
+ HANDLEBINOP(Or)
+#undef HANDLEBINOP
+
+ // Comparisons.
+ Value *EmitCompare(const BinaryOperator *E, unsigned UICmpOpc,
+ unsigned SICmpOpc, unsigned FCmpOpc);
+#define VISITCOMP(CODE, UI, SI, FP) \
+ Value *VisitBin##CODE(const BinaryOperator *E) { \
+ return EmitCompare(E, llvm::ICmpInst::UI, llvm::ICmpInst::SI, \
+ llvm::FCmpInst::FP); }
+ VISITCOMP(LT, ICMP_ULT, ICMP_SLT, FCMP_OLT)
+ VISITCOMP(GT, ICMP_UGT, ICMP_SGT, FCMP_OGT)
+ VISITCOMP(LE, ICMP_ULE, ICMP_SLE, FCMP_OLE)
+ VISITCOMP(GE, ICMP_UGE, ICMP_SGE, FCMP_OGE)
+ VISITCOMP(EQ, ICMP_EQ , ICMP_EQ , FCMP_OEQ)
+ VISITCOMP(NE, ICMP_NE , ICMP_NE , FCMP_UNE)
+#undef VISITCOMP
+
+ Value *VisitBinAssign (const BinaryOperator *E);
+
+ Value *VisitBinLAnd (const BinaryOperator *E);
+ Value *VisitBinLOr (const BinaryOperator *E);
+ Value *VisitBinComma (const BinaryOperator *E);
+
+ Value *VisitBinPtrMemD(const Expr *E) { return EmitLoadOfLValue(E); }
+ Value *VisitBinPtrMemI(const Expr *E) { return EmitLoadOfLValue(E); }
+
+ // Other Operators.
+ Value *VisitBlockExpr(const BlockExpr *BE);
+ Value *VisitAbstractConditionalOperator(const AbstractConditionalOperator *);
+ Value *VisitChooseExpr(ChooseExpr *CE);
+ Value *VisitVAArgExpr(VAArgExpr *VE);
+ Value *VisitObjCStringLiteral(const ObjCStringLiteral *E) {
+ return CGF.EmitObjCStringLiteral(E);
+ }
+ Value *VisitObjCNumericLiteral(ObjCNumericLiteral *E) {
+ return CGF.EmitObjCNumericLiteral(E);
+ }
+ Value *VisitObjCArrayLiteral(ObjCArrayLiteral *E) {
+ return CGF.EmitObjCArrayLiteral(E);
+ }
+ Value *VisitObjCDictionaryLiteral(ObjCDictionaryLiteral *E) {
+ return CGF.EmitObjCDictionaryLiteral(E);
+ }
+ Value *VisitAsTypeExpr(AsTypeExpr *CE);
+ Value *VisitAtomicExpr(AtomicExpr *AE);
+};
+} // end anonymous namespace.
+
+//===----------------------------------------------------------------------===//
+// Utilities
+//===----------------------------------------------------------------------===//
+
+/// EmitConversionToBool - Convert the specified expression value to a
+/// boolean (i1) truth value. This is equivalent to "Val != 0".
+Value *ScalarExprEmitter::EmitConversionToBool(Value *Src, QualType SrcType) {
+ assert(SrcType.isCanonical() && "EmitScalarConversion strips typedefs");
+
+ if (SrcType->isRealFloatingType())
+ return EmitFloatToBoolConversion(Src);
+
+ if (const MemberPointerType *MPT = dyn_cast<MemberPointerType>(SrcType))
+ return CGF.CGM.getCXXABI().EmitMemberPointerIsNotNull(CGF, Src, MPT);
+
+ assert((SrcType->isIntegerType() || isa<llvm::PointerType>(Src->getType())) &&
+ "Unknown scalar type to convert");
+
+ if (isa<llvm::IntegerType>(Src->getType()))
+ return EmitIntToBoolConversion(Src);
+
+ assert(isa<llvm::PointerType>(Src->getType()));
+ return EmitPointerToBoolConversion(Src);
+}
+
+/// EmitScalarConversion - Emit a conversion from the specified type to the
+/// specified destination type, both of which are LLVM scalar types.
+Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType,
+ QualType DstType) {
+ SrcType = CGF.getContext().getCanonicalType(SrcType);
+ DstType = CGF.getContext().getCanonicalType(DstType);
+ if (SrcType == DstType) return Src;
+
+ if (DstType->isVoidType()) return 0;
+
+ llvm::Type *SrcTy = Src->getType();
+
+ // Floating casts might be a bit special: if we're doing casts to / from half
+ // FP, we should go via special intrinsics.
+ if (SrcType->isHalfType()) {
+ Src = Builder.CreateCall(CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16), Src);
+ SrcType = CGF.getContext().FloatTy;
+ SrcTy = CGF.FloatTy;
+ }
+
+ // Handle conversions to bool first, they are special: comparisons against 0.
+ if (DstType->isBooleanType())
+ return EmitConversionToBool(Src, SrcType);
+
+ llvm::Type *DstTy = ConvertType(DstType);
+
+ // Ignore conversions like int -> uint.
+ if (SrcTy == DstTy)
+ return Src;
+
+ // Handle pointer conversions next: pointers can only be converted to/from
+ // other pointers and integers. Check for pointer types in terms of LLVM, as
+ // some native types (like Obj-C id) may map to a pointer type.
+ if (isa<llvm::PointerType>(DstTy)) {
+ // The source value may be an integer, or a pointer.
+ if (isa<llvm::PointerType>(SrcTy))
+ return Builder.CreateBitCast(Src, DstTy, "conv");
+
+ assert(SrcType->isIntegerType() && "Not ptr->ptr or int->ptr conversion?");
+ // First, convert to the correct width so that we control the kind of
+ // extension.
+ llvm::Type *MiddleTy = CGF.IntPtrTy;
+ bool InputSigned = SrcType->isSignedIntegerOrEnumerationType();
+ llvm::Value* IntResult =
+ Builder.CreateIntCast(Src, MiddleTy, InputSigned, "conv");
+ // Then, cast to pointer.
+ return Builder.CreateIntToPtr(IntResult, DstTy, "conv");
+ }
+
+ if (isa<llvm::PointerType>(SrcTy)) {
+ // Must be an ptr to int cast.
+ assert(isa<llvm::IntegerType>(DstTy) && "not ptr->int?");
+ return Builder.CreatePtrToInt(Src, DstTy, "conv");
+ }
+
+ // A scalar can be splatted to an extended vector of the same element type
+ if (DstType->isExtVectorType() && !SrcType->isVectorType()) {
+ // Cast the scalar to element type
+ QualType EltTy = DstType->getAs<ExtVectorType>()->getElementType();
+ llvm::Value *Elt = EmitScalarConversion(Src, SrcType, EltTy);
+
+ // Insert the element in element zero of an undef vector
+ llvm::Value *UnV = llvm::UndefValue::get(DstTy);
+ llvm::Value *Idx = Builder.getInt32(0);
+ UnV = Builder.CreateInsertElement(UnV, Elt, Idx);
+
+ // Splat the element across to all elements
+ unsigned NumElements = cast<llvm::VectorType>(DstTy)->getNumElements();
+ llvm::Constant *Mask = llvm::ConstantVector::getSplat(NumElements,
+ Builder.getInt32(0));
+ llvm::Value *Yay = Builder.CreateShuffleVector(UnV, UnV, Mask, "splat");
+ return Yay;
+ }
+
+ // Allow bitcast from vector to integer/fp of the same size.
+ if (isa<llvm::VectorType>(SrcTy) ||
+ isa<llvm::VectorType>(DstTy))
+ return Builder.CreateBitCast(Src, DstTy, "conv");
+
+ // Finally, we have the arithmetic types: real int/float.
+ Value *Res = NULL;
+ llvm::Type *ResTy = DstTy;
+
+ // Cast to half via float
+ if (DstType->isHalfType())
+ DstTy = CGF.FloatTy;
+
+ if (isa<llvm::IntegerType>(SrcTy)) {
+ bool InputSigned = SrcType->isSignedIntegerOrEnumerationType();
+ if (isa<llvm::IntegerType>(DstTy))
+ Res = Builder.CreateIntCast(Src, DstTy, InputSigned, "conv");
+ else if (InputSigned)
+ Res = Builder.CreateSIToFP(Src, DstTy, "conv");
+ else
+ Res = Builder.CreateUIToFP(Src, DstTy, "conv");
+ } else if (isa<llvm::IntegerType>(DstTy)) {
+ assert(SrcTy->isFloatingPointTy() && "Unknown real conversion");
+ if (DstType->isSignedIntegerOrEnumerationType())
+ Res = Builder.CreateFPToSI(Src, DstTy, "conv");
+ else
+ Res = Builder.CreateFPToUI(Src, DstTy, "conv");
+ } else {
+ assert(SrcTy->isFloatingPointTy() && DstTy->isFloatingPointTy() &&
+ "Unknown real conversion");
+ if (DstTy->getTypeID() < SrcTy->getTypeID())
+ Res = Builder.CreateFPTrunc(Src, DstTy, "conv");
+ else
+ Res = Builder.CreateFPExt(Src, DstTy, "conv");
+ }
+
+ if (DstTy != ResTy) {
+ assert(ResTy->isIntegerTy(16) && "Only half FP requires extra conversion");
+ Res = Builder.CreateCall(CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_to_fp16), Res);
+ }
+
+ return Res;
+}
+
+/// EmitComplexToScalarConversion - Emit a conversion from the specified complex
+/// type to the specified destination type, where the destination type is an
+/// LLVM scalar type.
+Value *ScalarExprEmitter::
+EmitComplexToScalarConversion(CodeGenFunction::ComplexPairTy Src,
+ QualType SrcTy, QualType DstTy) {
+ // Get the source element type.
+ SrcTy = SrcTy->getAs<ComplexType>()->getElementType();
+
+ // Handle conversions to bool first, they are special: comparisons against 0.
+ if (DstTy->isBooleanType()) {
+ // Complex != 0 -> (Real != 0) | (Imag != 0)
+ Src.first = EmitScalarConversion(Src.first, SrcTy, DstTy);
+ Src.second = EmitScalarConversion(Src.second, SrcTy, DstTy);
+ return Builder.CreateOr(Src.first, Src.second, "tobool");
+ }
+
+ // C99 6.3.1.7p2: "When a value of complex type is converted to a real type,
+ // the imaginary part of the complex value is discarded and the value of the
+ // real part is converted according to the conversion rules for the
+ // corresponding real type.
+ return EmitScalarConversion(Src.first, SrcTy, DstTy);
+}
+
+Value *ScalarExprEmitter::EmitNullValue(QualType Ty) {
+ if (const MemberPointerType *MPT = Ty->getAs<MemberPointerType>())
+ return CGF.CGM.getCXXABI().EmitNullMemberPointer(MPT);
+
+ return llvm::Constant::getNullValue(ConvertType(Ty));
+}
+
+//===----------------------------------------------------------------------===//
+// Visitor Methods
+//===----------------------------------------------------------------------===//
+
+Value *ScalarExprEmitter::VisitExpr(Expr *E) {
+ CGF.ErrorUnsupported(E, "scalar expression");
+ if (E->getType()->isVoidType())
+ return 0;
+ return llvm::UndefValue::get(CGF.ConvertType(E->getType()));
+}
+
+Value *ScalarExprEmitter::VisitShuffleVectorExpr(ShuffleVectorExpr *E) {
+ // Vector Mask Case
+ if (E->getNumSubExprs() == 2 ||
+ (E->getNumSubExprs() == 3 && E->getExpr(2)->getType()->isVectorType())) {
+ Value *LHS = CGF.EmitScalarExpr(E->getExpr(0));
+ Value *RHS = CGF.EmitScalarExpr(E->getExpr(1));
+ Value *Mask;
+
+ llvm::VectorType *LTy = cast<llvm::VectorType>(LHS->getType());
+ unsigned LHSElts = LTy->getNumElements();
+
+ if (E->getNumSubExprs() == 3) {
+ Mask = CGF.EmitScalarExpr(E->getExpr(2));
+
+ // Shuffle LHS & RHS into one input vector.
+ SmallVector<llvm::Constant*, 32> concat;
+ for (unsigned i = 0; i != LHSElts; ++i) {
+ concat.push_back(Builder.getInt32(2*i));
+ concat.push_back(Builder.getInt32(2*i+1));
+ }
+
+ Value* CV = llvm::ConstantVector::get(concat);
+ LHS = Builder.CreateShuffleVector(LHS, RHS, CV, "concat");
+ LHSElts *= 2;
+ } else {
+ Mask = RHS;
+ }
+
+ llvm::VectorType *MTy = cast<llvm::VectorType>(Mask->getType());
+ llvm::Constant* EltMask;
+
+ // Treat vec3 like vec4.
+ if ((LHSElts == 6) && (E->getNumSubExprs() == 3))
+ EltMask = llvm::ConstantInt::get(MTy->getElementType(),
+ (1 << llvm::Log2_32(LHSElts+2))-1);
+ else if ((LHSElts == 3) && (E->getNumSubExprs() == 2))
+ EltMask = llvm::ConstantInt::get(MTy->getElementType(),
+ (1 << llvm::Log2_32(LHSElts+1))-1);
+ else
+ EltMask = llvm::ConstantInt::get(MTy->getElementType(),
+ (1 << llvm::Log2_32(LHSElts))-1);
+
+ // Mask off the high bits of each shuffle index.
+ Value *MaskBits = llvm::ConstantVector::getSplat(MTy->getNumElements(),
+ EltMask);
+ Mask = Builder.CreateAnd(Mask, MaskBits, "mask");
+
+ // newv = undef
+ // mask = mask & maskbits
+ // for each elt
+ // n = extract mask i
+ // x = extract val n
+ // newv = insert newv, x, i
+ llvm::VectorType *RTy = llvm::VectorType::get(LTy->getElementType(),
+ MTy->getNumElements());
+ Value* NewV = llvm::UndefValue::get(RTy);
+ for (unsigned i = 0, e = MTy->getNumElements(); i != e; ++i) {
+ Value *IIndx = Builder.getInt32(i);
+ Value *Indx = Builder.CreateExtractElement(Mask, IIndx, "shuf_idx");
+ Indx = Builder.CreateZExt(Indx, CGF.Int32Ty, "idx_zext");
+
+ // Handle vec3 special since the index will be off by one for the RHS.
+ if ((LHSElts == 6) && (E->getNumSubExprs() == 3)) {
+ Value *cmpIndx, *newIndx;
+ cmpIndx = Builder.CreateICmpUGT(Indx, Builder.getInt32(3),
+ "cmp_shuf_idx");
+ newIndx = Builder.CreateSub(Indx, Builder.getInt32(1), "shuf_idx_adj");
+ Indx = Builder.CreateSelect(cmpIndx, newIndx, Indx, "sel_shuf_idx");
+ }
+ Value *VExt = Builder.CreateExtractElement(LHS, Indx, "shuf_elt");
+ NewV = Builder.CreateInsertElement(NewV, VExt, IIndx, "shuf_ins");
+ }
+ return NewV;
+ }
+
+ Value* V1 = CGF.EmitScalarExpr(E->getExpr(0));
+ Value* V2 = CGF.EmitScalarExpr(E->getExpr(1));
+
+ // Handle vec3 special since the index will be off by one for the RHS.
+ llvm::VectorType *VTy = cast<llvm::VectorType>(V1->getType());
+ SmallVector<llvm::Constant*, 32> indices;
+ for (unsigned i = 2; i < E->getNumSubExprs(); i++) {
+ unsigned Idx = E->getShuffleMaskIdx(CGF.getContext(), i-2);
+ if (VTy->getNumElements() == 3 && Idx > 3)
+ Idx -= 1;
+ indices.push_back(Builder.getInt32(Idx));
+ }
+
+ Value *SV = llvm::ConstantVector::get(indices);
+ return Builder.CreateShuffleVector(V1, V2, SV, "shuffle");
+}
+Value *ScalarExprEmitter::VisitMemberExpr(MemberExpr *E) {
+ llvm::APSInt Value;
+ if (E->EvaluateAsInt(Value, CGF.getContext(), Expr::SE_AllowSideEffects)) {
+ if (E->isArrow())
+ CGF.EmitScalarExpr(E->getBase());
+ else
+ EmitLValue(E->getBase());
+ return Builder.getInt(Value);
+ }
+
+ // Emit debug info for aggregate now, if it was delayed to reduce
+ // debug info size.
+ CGDebugInfo *DI = CGF.getDebugInfo();
+ if (DI && CGF.CGM.getCodeGenOpts().LimitDebugInfo) {
+ QualType PQTy = E->getBase()->IgnoreParenImpCasts()->getType();
+ if (const PointerType * PTy = dyn_cast<PointerType>(PQTy))
+ if (FieldDecl *M = dyn_cast<FieldDecl>(E->getMemberDecl()))
+ DI->getOrCreateRecordType(PTy->getPointeeType(),
+ M->getParent()->getLocation());
+ }
+ return EmitLoadOfLValue(E);
+}
+
+Value *ScalarExprEmitter::VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
+ TestAndClearIgnoreResultAssign();
+
+ // Emit subscript expressions in rvalue context's. For most cases, this just
+ // loads the lvalue formed by the subscript expr. However, we have to be
+ // careful, because the base of a vector subscript is occasionally an rvalue,
+ // so we can't get it as an lvalue.
+ if (!E->getBase()->getType()->isVectorType())
+ return EmitLoadOfLValue(E);
+
+ // Handle the vector case. The base must be a vector, the index must be an
+ // integer value.
+ Value *Base = Visit(E->getBase());
+ Value *Idx = Visit(E->getIdx());
+ bool IdxSigned = E->getIdx()->getType()->isSignedIntegerOrEnumerationType();
+ Idx = Builder.CreateIntCast(Idx, CGF.Int32Ty, IdxSigned, "vecidxcast");
+ return Builder.CreateExtractElement(Base, Idx, "vecext");
+}
+
+static llvm::Constant *getMaskElt(llvm::ShuffleVectorInst *SVI, unsigned Idx,
+ unsigned Off, llvm::Type *I32Ty) {
+ int MV = SVI->getMaskValue(Idx);
+ if (MV == -1)
+ return llvm::UndefValue::get(I32Ty);
+ return llvm::ConstantInt::get(I32Ty, Off+MV);
+}
+
+Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) {
+ bool Ignore = TestAndClearIgnoreResultAssign();
+ (void)Ignore;
+ assert (Ignore == false && "init list ignored");
+ unsigned NumInitElements = E->getNumInits();
+
+ if (E->hadArrayRangeDesignator())
+ CGF.ErrorUnsupported(E, "GNU array range designator extension");
+
+ llvm::VectorType *VType =
+ dyn_cast<llvm::VectorType>(ConvertType(E->getType()));
+
+ if (!VType) {
+ if (NumInitElements == 0) {
+ // C++11 value-initialization for the scalar.
+ return EmitNullValue(E->getType());
+ }
+ // We have a scalar in braces. Just use the first element.
+ return Visit(E->getInit(0));
+ }
+
+ unsigned ResElts = VType->getNumElements();
+
+ // Loop over initializers collecting the Value for each, and remembering
+ // whether the source was swizzle (ExtVectorElementExpr). This will allow
+ // us to fold the shuffle for the swizzle into the shuffle for the vector
+ // initializer, since LLVM optimizers generally do not want to touch
+ // shuffles.
+ unsigned CurIdx = 0;
+ bool VIsUndefShuffle = false;
+ llvm::Value *V = llvm::UndefValue::get(VType);
+ for (unsigned i = 0; i != NumInitElements; ++i) {
+ Expr *IE = E->getInit(i);
+ Value *Init = Visit(IE);
+ SmallVector<llvm::Constant*, 16> Args;
+
+ llvm::VectorType *VVT = dyn_cast<llvm::VectorType>(Init->getType());
+
+ // Handle scalar elements. If the scalar initializer is actually one
+ // element of a different vector of the same width, use shuffle instead of
+ // extract+insert.
+ if (!VVT) {
+ if (isa<ExtVectorElementExpr>(IE)) {
+ llvm::ExtractElementInst *EI = cast<llvm::ExtractElementInst>(Init);
+
+ if (EI->getVectorOperandType()->getNumElements() == ResElts) {
+ llvm::ConstantInt *C = cast<llvm::ConstantInt>(EI->getIndexOperand());
+ Value *LHS = 0, *RHS = 0;
+ if (CurIdx == 0) {
+ // insert into undef -> shuffle (src, undef)
+ Args.push_back(C);
+ Args.resize(ResElts, llvm::UndefValue::get(CGF.Int32Ty));
+
+ LHS = EI->getVectorOperand();
+ RHS = V;
+ VIsUndefShuffle = true;
+ } else if (VIsUndefShuffle) {
+ // insert into undefshuffle && size match -> shuffle (v, src)
+ llvm::ShuffleVectorInst *SVV = cast<llvm::ShuffleVectorInst>(V);
+ for (unsigned j = 0; j != CurIdx; ++j)
+ Args.push_back(getMaskElt(SVV, j, 0, CGF.Int32Ty));
+ Args.push_back(Builder.getInt32(ResElts + C->getZExtValue()));
+ Args.resize(ResElts, llvm::UndefValue::get(CGF.Int32Ty));
+
+ LHS = cast<llvm::ShuffleVectorInst>(V)->getOperand(0);
+ RHS = EI->getVectorOperand();
+ VIsUndefShuffle = false;
+ }
+ if (!Args.empty()) {
+ llvm::Constant *Mask = llvm::ConstantVector::get(Args);
+ V = Builder.CreateShuffleVector(LHS, RHS, Mask);
+ ++CurIdx;
+ continue;
+ }
+ }
+ }
+ V = Builder.CreateInsertElement(V, Init, Builder.getInt32(CurIdx),
+ "vecinit");
+ VIsUndefShuffle = false;
+ ++CurIdx;
+ continue;
+ }
+
+ unsigned InitElts = VVT->getNumElements();
+
+ // If the initializer is an ExtVecEltExpr (a swizzle), and the swizzle's
+ // input is the same width as the vector being constructed, generate an
+ // optimized shuffle of the swizzle input into the result.
+ unsigned Offset = (CurIdx == 0) ? 0 : ResElts;
+ if (isa<ExtVectorElementExpr>(IE)) {
+ llvm::ShuffleVectorInst *SVI = cast<llvm::ShuffleVectorInst>(Init);
+ Value *SVOp = SVI->getOperand(0);
+ llvm::VectorType *OpTy = cast<llvm::VectorType>(SVOp->getType());
+
+ if (OpTy->getNumElements() == ResElts) {
+ for (unsigned j = 0; j != CurIdx; ++j) {
+ // If the current vector initializer is a shuffle with undef, merge
+ // this shuffle directly into it.
+ if (VIsUndefShuffle) {
+ Args.push_back(getMaskElt(cast<llvm::ShuffleVectorInst>(V), j, 0,
+ CGF.Int32Ty));
+ } else {
+ Args.push_back(Builder.getInt32(j));
+ }
+ }
+ for (unsigned j = 0, je = InitElts; j != je; ++j)
+ Args.push_back(getMaskElt(SVI, j, Offset, CGF.Int32Ty));
+ Args.resize(ResElts, llvm::UndefValue::get(CGF.Int32Ty));
+
+ if (VIsUndefShuffle)
+ V = cast<llvm::ShuffleVectorInst>(V)->getOperand(0);
+
+ Init = SVOp;
+ }
+ }
+
+ // Extend init to result vector length, and then shuffle its contribution
+ // to the vector initializer into V.
+ if (Args.empty()) {
+ for (unsigned j = 0; j != InitElts; ++j)
+ Args.push_back(Builder.getInt32(j));
+ Args.resize(ResElts, llvm::UndefValue::get(CGF.Int32Ty));
+ llvm::Constant *Mask = llvm::ConstantVector::get(Args);
+ Init = Builder.CreateShuffleVector(Init, llvm::UndefValue::get(VVT),
+ Mask, "vext");
+
+ Args.clear();
+ for (unsigned j = 0; j != CurIdx; ++j)
+ Args.push_back(Builder.getInt32(j));
+ for (unsigned j = 0; j != InitElts; ++j)
+ Args.push_back(Builder.getInt32(j+Offset));
+ Args.resize(ResElts, llvm::UndefValue::get(CGF.Int32Ty));
+ }
+
+ // If V is undef, make sure it ends up on the RHS of the shuffle to aid
+ // merging subsequent shuffles into this one.
+ if (CurIdx == 0)
+ std::swap(V, Init);
+ llvm::Constant *Mask = llvm::ConstantVector::get(Args);
+ V = Builder.CreateShuffleVector(V, Init, Mask, "vecinit");
+ VIsUndefShuffle = isa<llvm::UndefValue>(Init);
+ CurIdx += InitElts;
+ }
+
+ // FIXME: evaluate codegen vs. shuffling against constant null vector.
+ // Emit remaining default initializers.
+ llvm::Type *EltTy = VType->getElementType();
+
+ // Emit remaining default initializers
+ for (/* Do not initialize i*/; CurIdx < ResElts; ++CurIdx) {
+ Value *Idx = Builder.getInt32(CurIdx);
+ llvm::Value *Init = llvm::Constant::getNullValue(EltTy);
+ V = Builder.CreateInsertElement(V, Init, Idx, "vecinit");
+ }
+ return V;
+}
+
+static bool ShouldNullCheckClassCastValue(const CastExpr *CE) {
+ const Expr *E = CE->getSubExpr();
+
+ if (CE->getCastKind() == CK_UncheckedDerivedToBase)
+ return false;
+
+ if (isa<CXXThisExpr>(E)) {
+ // We always assume that 'this' is never null.
+ return false;
+ }
+
+ if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(CE)) {
+ // And that glvalue casts are never null.
+ if (ICE->getValueKind() != VK_RValue)
+ return false;
+ }
+
+ return true;
+}
+
+// VisitCastExpr - Emit code for an explicit or implicit cast. Implicit casts
+// have to handle a more broad range of conversions than explicit casts, as they
+// handle things like function to ptr-to-function decay etc.
+Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
+ Expr *E = CE->getSubExpr();
+ QualType DestTy = CE->getType();
+ CastKind Kind = CE->getCastKind();
+
+ if (!DestTy->isVoidType())
+ TestAndClearIgnoreResultAssign();
+
+ // Since almost all cast kinds apply to scalars, this switch doesn't have
+ // a default case, so the compiler will warn on a missing case. The cases
+ // are in the same order as in the CastKind enum.
+ switch (Kind) {
+ case CK_Dependent: llvm_unreachable("dependent cast kind in IR gen!");
+
+ case CK_LValueBitCast:
+ case CK_ObjCObjectLValueCast: {
+ Value *V = EmitLValue(E).getAddress();
+ V = Builder.CreateBitCast(V,
+ ConvertType(CGF.getContext().getPointerType(DestTy)));
+ return EmitLoadOfLValue(CGF.MakeNaturalAlignAddrLValue(V, DestTy));
+ }
+
+ case CK_CPointerToObjCPointerCast:
+ case CK_BlockPointerToObjCPointerCast:
+ case CK_AnyPointerToBlockPointerCast:
+ case CK_BitCast: {
+ Value *Src = Visit(const_cast<Expr*>(E));
+ return Builder.CreateBitCast(Src, ConvertType(DestTy));
+ }
+ case CK_AtomicToNonAtomic:
+ case CK_NonAtomicToAtomic:
+ case CK_NoOp:
+ case CK_UserDefinedConversion:
+ return Visit(const_cast<Expr*>(E));
+
+ case CK_BaseToDerived: {
+ const CXXRecordDecl *DerivedClassDecl =
+ DestTy->getCXXRecordDeclForPointerType();
+
+ return CGF.GetAddressOfDerivedClass(Visit(E), DerivedClassDecl,
+ CE->path_begin(), CE->path_end(),
+ ShouldNullCheckClassCastValue(CE));
+ }
+ case CK_UncheckedDerivedToBase:
+ case CK_DerivedToBase: {
+ const RecordType *DerivedClassTy =
+ E->getType()->getAs<PointerType>()->getPointeeType()->getAs<RecordType>();
+ CXXRecordDecl *DerivedClassDecl =
+ cast<CXXRecordDecl>(DerivedClassTy->getDecl());
+
+ return CGF.GetAddressOfBaseClass(Visit(E), DerivedClassDecl,
+ CE->path_begin(), CE->path_end(),
+ ShouldNullCheckClassCastValue(CE));
+ }
+ case CK_Dynamic: {
+ Value *V = Visit(const_cast<Expr*>(E));
+ const CXXDynamicCastExpr *DCE = cast<CXXDynamicCastExpr>(CE);
+ return CGF.EmitDynamicCast(V, DCE);
+ }
+
+ case CK_ArrayToPointerDecay: {
+ assert(E->getType()->isArrayType() &&
+ "Array to pointer decay must have array source type!");
+
+ Value *V = EmitLValue(E).getAddress(); // Bitfields can't be arrays.
+
+ // Note that VLA pointers are always decayed, so we don't need to do
+ // anything here.
+ if (!E->getType()->isVariableArrayType()) {
+ assert(isa<llvm::PointerType>(V->getType()) && "Expected pointer");
+ assert(isa<llvm::ArrayType>(cast<llvm::PointerType>(V->getType())
+ ->getElementType()) &&
+ "Expected pointer to array");
+ V = Builder.CreateStructGEP(V, 0, "arraydecay");
+ }
+
+ // Make sure the array decay ends up being the right type. This matters if
+ // the array type was of an incomplete type.
+ return CGF.Builder.CreateBitCast(V, ConvertType(CE->getType()));
+ }
+ case CK_FunctionToPointerDecay:
+ return EmitLValue(E).getAddress();
+
+ case CK_NullToPointer:
+ if (MustVisitNullValue(E))
+ (void) Visit(E);
+
+ return llvm::ConstantPointerNull::get(
+ cast<llvm::PointerType>(ConvertType(DestTy)));
+
+ case CK_NullToMemberPointer: {
+ if (MustVisitNullValue(E))
+ (void) Visit(E);
+
+ const MemberPointerType *MPT = CE->getType()->getAs<MemberPointerType>();
+ return CGF.CGM.getCXXABI().EmitNullMemberPointer(MPT);
+ }
+
+ case CK_ReinterpretMemberPointer:
+ case CK_BaseToDerivedMemberPointer:
+ case CK_DerivedToBaseMemberPointer: {
+ Value *Src = Visit(E);
+
+ // Note that the AST doesn't distinguish between checked and
+ // unchecked member pointer conversions, so we always have to
+ // implement checked conversions here. This is inefficient when
+ // actual control flow may be required in order to perform the
+ // check, which it is for data member pointers (but not member
+ // function pointers on Itanium and ARM).
+ return CGF.CGM.getCXXABI().EmitMemberPointerConversion(CGF, CE, Src);
+ }
+
+ case CK_ARCProduceObject:
+ return CGF.EmitARCRetainScalarExpr(E);
+ case CK_ARCConsumeObject:
+ return CGF.EmitObjCConsumeObject(E->getType(), Visit(E));
+ case CK_ARCReclaimReturnedObject: {
+ llvm::Value *value = Visit(E);
+ value = CGF.EmitARCRetainAutoreleasedReturnValue(value);
+ return CGF.EmitObjCConsumeObject(E->getType(), value);
+ }
+ case CK_ARCExtendBlockObject:
+ return CGF.EmitARCExtendBlockObject(E);
+
+ case CK_CopyAndAutoreleaseBlockObject:
+ return CGF.EmitBlockCopyAndAutorelease(Visit(E), E->getType());
+
+ case CK_FloatingRealToComplex:
+ case CK_FloatingComplexCast:
+ case CK_IntegralRealToComplex:
+ case CK_IntegralComplexCast:
+ case CK_IntegralComplexToFloatingComplex:
+ case CK_FloatingComplexToIntegralComplex:
+ case CK_ConstructorConversion:
+ case CK_ToUnion:
+ llvm_unreachable("scalar cast to non-scalar value");
+
+ case CK_LValueToRValue:
+ assert(CGF.getContext().hasSameUnqualifiedType(E->getType(), DestTy));
+ assert(E->isGLValue() && "lvalue-to-rvalue applied to r-value!");
+ return Visit(const_cast<Expr*>(E));
+
+ case CK_IntegralToPointer: {
+ Value *Src = Visit(const_cast<Expr*>(E));
+
+ // First, convert to the correct width so that we control the kind of
+ // extension.
+ llvm::Type *MiddleTy = CGF.IntPtrTy;
+ bool InputSigned = E->getType()->isSignedIntegerOrEnumerationType();
+ llvm::Value* IntResult =
+ Builder.CreateIntCast(Src, MiddleTy, InputSigned, "conv");
+
+ return Builder.CreateIntToPtr(IntResult, ConvertType(DestTy));
+ }
+ case CK_PointerToIntegral:
+ assert(!DestTy->isBooleanType() && "bool should use PointerToBool");
+ return Builder.CreatePtrToInt(Visit(E), ConvertType(DestTy));
+
+ case CK_ToVoid: {
+ CGF.EmitIgnoredExpr(E);
+ return 0;
+ }
+ case CK_VectorSplat: {
+ llvm::Type *DstTy = ConvertType(DestTy);
+ Value *Elt = Visit(const_cast<Expr*>(E));
+ Elt = EmitScalarConversion(Elt, E->getType(),
+ DestTy->getAs<VectorType>()->getElementType());
+
+ // Insert the element in element zero of an undef vector
+ llvm::Value *UnV = llvm::UndefValue::get(DstTy);
+ llvm::Value *Idx = Builder.getInt32(0);
+ UnV = Builder.CreateInsertElement(UnV, Elt, Idx);
+
+ // Splat the element across to all elements
+ unsigned NumElements = cast<llvm::VectorType>(DstTy)->getNumElements();
+ llvm::Constant *Zero = Builder.getInt32(0);
+ llvm::Constant *Mask = llvm::ConstantVector::getSplat(NumElements, Zero);
+ llvm::Value *Yay = Builder.CreateShuffleVector(UnV, UnV, Mask, "splat");
+ return Yay;
+ }
+
+ case CK_IntegralCast:
+ case CK_IntegralToFloating:
+ case CK_FloatingToIntegral:
+ case CK_FloatingCast:
+ return EmitScalarConversion(Visit(E), E->getType(), DestTy);
+ case CK_IntegralToBoolean:
+ return EmitIntToBoolConversion(Visit(E));
+ case CK_PointerToBoolean:
+ return EmitPointerToBoolConversion(Visit(E));
+ case CK_FloatingToBoolean:
+ return EmitFloatToBoolConversion(Visit(E));
+ case CK_MemberPointerToBoolean: {
+ llvm::Value *MemPtr = Visit(E);
+ const MemberPointerType *MPT = E->getType()->getAs<MemberPointerType>();
+ return CGF.CGM.getCXXABI().EmitMemberPointerIsNotNull(CGF, MemPtr, MPT);
+ }
+
+ case CK_FloatingComplexToReal:
+ case CK_IntegralComplexToReal:
+ return CGF.EmitComplexExpr(E, false, true).first;
+
+ case CK_FloatingComplexToBoolean:
+ case CK_IntegralComplexToBoolean: {
+ CodeGenFunction::ComplexPairTy V = CGF.EmitComplexExpr(E);
+
+ // TODO: kill this function off, inline appropriate case here
+ return EmitComplexToScalarConversion(V, E->getType(), DestTy);
+ }
+
+ }
+
+ llvm_unreachable("unknown scalar cast");
+}
+
+Value *ScalarExprEmitter::VisitStmtExpr(const StmtExpr *E) {
+ CodeGenFunction::StmtExprEvaluation eval(CGF);
+ return CGF.EmitCompoundStmt(*E->getSubStmt(), !E->getType()->isVoidType())
+ .getScalarVal();
+}
+
+//===----------------------------------------------------------------------===//
+// Unary Operators
+//===----------------------------------------------------------------------===//
+
+llvm::Value *ScalarExprEmitter::
+EmitAddConsiderOverflowBehavior(const UnaryOperator *E,
+ llvm::Value *InVal,
+ llvm::Value *NextVal, bool IsInc) {
+ switch (CGF.getContext().getLangOpts().getSignedOverflowBehavior()) {
+ case LangOptions::SOB_Undefined:
+ return Builder.CreateNSWAdd(InVal, NextVal, IsInc ? "inc" : "dec");
+ case LangOptions::SOB_Defined:
+ return Builder.CreateAdd(InVal, NextVal, IsInc ? "inc" : "dec");
+ case LangOptions::SOB_Trapping:
+ BinOpInfo BinOp;
+ BinOp.LHS = InVal;
+ BinOp.RHS = NextVal;
+ BinOp.Ty = E->getType();
+ BinOp.Opcode = BO_Add;
+ BinOp.E = E;
+ return EmitOverflowCheckedBinOp(BinOp);
+ }
+ llvm_unreachable("Unknown SignedOverflowBehaviorTy");
+}
+
+llvm::Value *
+ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
+ bool isInc, bool isPre) {
+
+ QualType type = E->getSubExpr()->getType();
+ llvm::Value *value = EmitLoadOfLValue(LV);
+ llvm::Value *input = value;
+ llvm::PHINode *atomicPHI = 0;
+
+ int amount = (isInc ? 1 : -1);
+
+ if (const AtomicType *atomicTy = type->getAs<AtomicType>()) {
+ llvm::BasicBlock *startBB = Builder.GetInsertBlock();
+ llvm::BasicBlock *opBB = CGF.createBasicBlock("atomic_op", CGF.CurFn);
+ Builder.CreateBr(opBB);
+ Builder.SetInsertPoint(opBB);
+ atomicPHI = Builder.CreatePHI(value->getType(), 2);
+ atomicPHI->addIncoming(value, startBB);
+ type = atomicTy->getValueType();
+ value = atomicPHI;
+ }
+
+ // Special case of integer increment that we have to check first: bool++.
+ // Due to promotion rules, we get:
+ // bool++ -> bool = bool + 1
+ // -> bool = (int)bool + 1
+ // -> bool = ((int)bool + 1 != 0)
+ // An interesting aspect of this is that increment is always true.
+ // Decrement does not have this property.
+ if (isInc && type->isBooleanType()) {
+ value = Builder.getTrue();
+
+ // Most common case by far: integer increment.
+ } else if (type->isIntegerType()) {
+
+ llvm::Value *amt = llvm::ConstantInt::get(value->getType(), amount);
+
+ // Note that signed integer inc/dec with width less than int can't
+ // overflow because of promotion rules; we're just eliding a few steps here.
+ if (type->isSignedIntegerOrEnumerationType() &&
+ value->getType()->getPrimitiveSizeInBits() >=
+ CGF.IntTy->getBitWidth())
+ value = EmitAddConsiderOverflowBehavior(E, value, amt, isInc);
+ else
+ value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec");
+
+ // Next most common: pointer increment.
+ } else if (const PointerType *ptr = type->getAs<PointerType>()) {
+ QualType type = ptr->getPointeeType();
+
+ // VLA types don't have constant size.
+ if (const VariableArrayType *vla
+ = CGF.getContext().getAsVariableArrayType(type)) {
+ llvm::Value *numElts = CGF.getVLASize(vla).first;
+ if (!isInc) numElts = Builder.CreateNSWNeg(numElts, "vla.negsize");
+ if (CGF.getContext().getLangOpts().isSignedOverflowDefined())
+ value = Builder.CreateGEP(value, numElts, "vla.inc");
+ else
+ value = Builder.CreateInBoundsGEP(value, numElts, "vla.inc");
+
+ // Arithmetic on function pointers (!) is just +-1.
+ } else if (type->isFunctionType()) {
+ llvm::Value *amt = Builder.getInt32(amount);
+
+ value = CGF.EmitCastToVoidPtr(value);
+ if (CGF.getContext().getLangOpts().isSignedOverflowDefined())
+ value = Builder.CreateGEP(value, amt, "incdec.funcptr");
+ else
+ value = Builder.CreateInBoundsGEP(value, amt, "incdec.funcptr");
+ value = Builder.CreateBitCast(value, input->getType());
+
+ // For everything else, we can just do a simple increment.
+ } else {
+ llvm::Value *amt = Builder.getInt32(amount);
+ if (CGF.getContext().getLangOpts().isSignedOverflowDefined())
+ value = Builder.CreateGEP(value, amt, "incdec.ptr");
+ else
+ value = Builder.CreateInBoundsGEP(value, amt, "incdec.ptr");
+ }
+
+ // Vector increment/decrement.
+ } else if (type->isVectorType()) {
+ if (type->hasIntegerRepresentation()) {
+ llvm::Value *amt = llvm::ConstantInt::get(value->getType(), amount);
+
+ value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec");
+ } else {
+ value = Builder.CreateFAdd(
+ value,
+ llvm::ConstantFP::get(value->getType(), amount),
+ isInc ? "inc" : "dec");
+ }
+
+ // Floating point.
+ } else if (type->isRealFloatingType()) {
+ // Add the inc/dec to the real part.
+ llvm::Value *amt;
+
+ if (type->isHalfType()) {
+ // Another special case: half FP increment should be done via float
+ value =
+ Builder.CreateCall(CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16),
+ input);
+ }
+
+ if (value->getType()->isFloatTy())
+ amt = llvm::ConstantFP::get(VMContext,
+ llvm::APFloat(static_cast<float>(amount)));
+ else if (value->getType()->isDoubleTy())
+ amt = llvm::ConstantFP::get(VMContext,
+ llvm::APFloat(static_cast<double>(amount)));
+ else {
+ llvm::APFloat F(static_cast<float>(amount));
+ bool ignored;
+ F.convert(CGF.Target.getLongDoubleFormat(), llvm::APFloat::rmTowardZero,
+ &ignored);
+ amt = llvm::ConstantFP::get(VMContext, F);
+ }
+ value = Builder.CreateFAdd(value, amt, isInc ? "inc" : "dec");
+
+ if (type->isHalfType())
+ value =
+ Builder.CreateCall(CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_to_fp16),
+ value);
+
+ // Objective-C pointer types.
+ } else {
+ const ObjCObjectPointerType *OPT = type->castAs<ObjCObjectPointerType>();
+ value = CGF.EmitCastToVoidPtr(value);
+
+ CharUnits size = CGF.getContext().getTypeSizeInChars(OPT->getObjectType());
+ if (!isInc) size = -size;
+ llvm::Value *sizeValue =
+ llvm::ConstantInt::get(CGF.SizeTy, size.getQuantity());
+
+ if (CGF.getContext().getLangOpts().isSignedOverflowDefined())
+ value = Builder.CreateGEP(value, sizeValue, "incdec.objptr");
+ else
+ value = Builder.CreateInBoundsGEP(value, sizeValue, "incdec.objptr");
+ value = Builder.CreateBitCast(value, input->getType());
+ }
+
+ if (atomicPHI) {
+ llvm::BasicBlock *opBB = Builder.GetInsertBlock();
+ llvm::BasicBlock *contBB = CGF.createBasicBlock("atomic_cont", CGF.CurFn);
+ llvm::Value *old = Builder.CreateAtomicCmpXchg(LV.getAddress(), atomicPHI,
+ value, llvm::SequentiallyConsistent);
+ atomicPHI->addIncoming(old, opBB);
+ llvm::Value *success = Builder.CreateICmpEQ(old, atomicPHI);
+ Builder.CreateCondBr(success, contBB, opBB);
+ Builder.SetInsertPoint(contBB);
+ return isPre ? value : input;
+ }
+
+ // Store the updated result through the lvalue.
+ if (LV.isBitField())
+ CGF.EmitStoreThroughBitfieldLValue(RValue::get(value), LV, &value);
+ else
+ CGF.EmitStoreThroughLValue(RValue::get(value), LV);
+
+ // If this is a postinc, return the value read from memory, otherwise use the
+ // updated value.
+ return isPre ? value : input;
+}
+
+
+
+Value *ScalarExprEmitter::VisitUnaryMinus(const UnaryOperator *E) {
+ TestAndClearIgnoreResultAssign();
+ // Emit unary minus with EmitSub so we handle overflow cases etc.
+ BinOpInfo BinOp;
+ BinOp.RHS = Visit(E->getSubExpr());
+
+ if (BinOp.RHS->getType()->isFPOrFPVectorTy())
+ BinOp.LHS = llvm::ConstantFP::getZeroValueForNegation(BinOp.RHS->getType());
+ else
+ BinOp.LHS = llvm::Constant::getNullValue(BinOp.RHS->getType());
+ BinOp.Ty = E->getType();
+ BinOp.Opcode = BO_Sub;
+ BinOp.E = E;
+ return EmitSub(BinOp);
+}
+
+Value *ScalarExprEmitter::VisitUnaryNot(const UnaryOperator *E) {
+ TestAndClearIgnoreResultAssign();
+ Value *Op = Visit(E->getSubExpr());
+ return Builder.CreateNot(Op, "neg");
+}
+
+Value *ScalarExprEmitter::VisitUnaryLNot(const UnaryOperator *E) {
+
+ // Perform vector logical not on comparison with zero vector.
+ if (E->getType()->isExtVectorType()) {
+ Value *Oper = Visit(E->getSubExpr());
+ Value *Zero = llvm::Constant::getNullValue(Oper->getType());
+ Value *Result = Builder.CreateICmp(llvm::CmpInst::ICMP_EQ, Oper, Zero, "cmp");
+ return Builder.CreateSExt(Result, ConvertType(E->getType()), "sext");
+ }
+
+ // Compare operand to zero.
+ Value *BoolVal = CGF.EvaluateExprAsBool(E->getSubExpr());
+
+ // Invert value.
+ // TODO: Could dynamically modify easy computations here. For example, if
+ // the operand is an icmp ne, turn into icmp eq.
+ BoolVal = Builder.CreateNot(BoolVal, "lnot");
+
+ // ZExt result to the expr type.
+ return Builder.CreateZExt(BoolVal, ConvertType(E->getType()), "lnot.ext");
+}
+
+Value *ScalarExprEmitter::VisitOffsetOfExpr(OffsetOfExpr *E) {
+ // Try folding the offsetof to a constant.
+ llvm::APSInt Value;
+ if (E->EvaluateAsInt(Value, CGF.getContext()))
+ return Builder.getInt(Value);
+
+ // Loop over the components of the offsetof to compute the value.
+ unsigned n = E->getNumComponents();
+ llvm::Type* ResultType = ConvertType(E->getType());
+ llvm::Value* Result = llvm::Constant::getNullValue(ResultType);
+ QualType CurrentType = E->getTypeSourceInfo()->getType();
+ for (unsigned i = 0; i != n; ++i) {
+ OffsetOfExpr::OffsetOfNode ON = E->getComponent(i);
+ llvm::Value *Offset = 0;
+ switch (ON.getKind()) {
+ case OffsetOfExpr::OffsetOfNode::Array: {
+ // Compute the index
+ Expr *IdxExpr = E->getIndexExpr(ON.getArrayExprIndex());
+ llvm::Value* Idx = CGF.EmitScalarExpr(IdxExpr);
+ bool IdxSigned = IdxExpr->getType()->isSignedIntegerOrEnumerationType();
+ Idx = Builder.CreateIntCast(Idx, ResultType, IdxSigned, "conv");
+
+ // Save the element type
+ CurrentType =
+ CGF.getContext().getAsArrayType(CurrentType)->getElementType();
+
+ // Compute the element size
+ llvm::Value* ElemSize = llvm::ConstantInt::get(ResultType,
+ CGF.getContext().getTypeSizeInChars(CurrentType).getQuantity());
+
+ // Multiply out to compute the result
+ Offset = Builder.CreateMul(Idx, ElemSize);
+ break;
+ }
+
+ case OffsetOfExpr::OffsetOfNode::Field: {
+ FieldDecl *MemberDecl = ON.getField();
+ RecordDecl *RD = CurrentType->getAs<RecordType>()->getDecl();
+ const ASTRecordLayout &RL = CGF.getContext().getASTRecordLayout(RD);
+
+ // Compute the index of the field in its parent.
+ unsigned i = 0;
+ // FIXME: It would be nice if we didn't have to loop here!
+ for (RecordDecl::field_iterator Field = RD->field_begin(),
+ FieldEnd = RD->field_end();
+ Field != FieldEnd; (void)++Field, ++i) {
+ if (*Field == MemberDecl)
+ break;
+ }
+ assert(i < RL.getFieldCount() && "offsetof field in wrong type");
+
+ // Compute the offset to the field
+ int64_t OffsetInt = RL.getFieldOffset(i) /
+ CGF.getContext().getCharWidth();
+ Offset = llvm::ConstantInt::get(ResultType, OffsetInt);
+
+ // Save the element type.
+ CurrentType = MemberDecl->getType();
+ break;
+ }
+
+ case OffsetOfExpr::OffsetOfNode::Identifier:
+ llvm_unreachable("dependent __builtin_offsetof");
+
+ case OffsetOfExpr::OffsetOfNode::Base: {
+ if (ON.getBase()->isVirtual()) {
+ CGF.ErrorUnsupported(E, "virtual base in offsetof");
+ continue;
+ }
+
+ RecordDecl *RD = CurrentType->getAs<RecordType>()->getDecl();
+ const ASTRecordLayout &RL = CGF.getContext().getASTRecordLayout(RD);
+
+ // Save the element type.
+ CurrentType = ON.getBase()->getType();
+
+ // Compute the offset to the base.
+ const RecordType *BaseRT = CurrentType->getAs<RecordType>();
+ CXXRecordDecl *BaseRD = cast<CXXRecordDecl>(BaseRT->getDecl());
+ int64_t OffsetInt = RL.getBaseClassOffsetInBits(BaseRD) /
+ CGF.getContext().getCharWidth();
+ Offset = llvm::ConstantInt::get(ResultType, OffsetInt);
+ break;
+ }
+ }
+ Result = Builder.CreateAdd(Result, Offset);
+ }
+ return Result;
+}
+
+/// VisitUnaryExprOrTypeTraitExpr - Return the size or alignment of the type of
+/// argument of the sizeof expression as an integer.
+Value *
+ScalarExprEmitter::VisitUnaryExprOrTypeTraitExpr(
+ const UnaryExprOrTypeTraitExpr *E) {
+ QualType TypeToSize = E->getTypeOfArgument();
+ if (E->getKind() == UETT_SizeOf) {
+ if (const VariableArrayType *VAT =
+ CGF.getContext().getAsVariableArrayType(TypeToSize)) {
+ if (E->isArgumentType()) {
+ // sizeof(type) - make sure to emit the VLA size.
+ CGF.EmitVariablyModifiedType(TypeToSize);
+ } else {
+ // C99 6.5.3.4p2: If the argument is an expression of type
+ // VLA, it is evaluated.
+ CGF.EmitIgnoredExpr(E->getArgumentExpr());
+ }
+
+ QualType eltType;
+ llvm::Value *numElts;
+ llvm::tie(numElts, eltType) = CGF.getVLASize(VAT);
+
+ llvm::Value *size = numElts;
+
+ // Scale the number of non-VLA elements by the non-VLA element size.
+ CharUnits eltSize = CGF.getContext().getTypeSizeInChars(eltType);
+ if (!eltSize.isOne())
+ size = CGF.Builder.CreateNUWMul(CGF.CGM.getSize(eltSize), numElts);
+
+ return size;
+ }
+ }
+
+ // If this isn't sizeof(vla), the result must be constant; use the constant
+ // folding logic so we don't have to duplicate it here.
+ return Builder.getInt(E->EvaluateKnownConstInt(CGF.getContext()));
+}
+
+Value *ScalarExprEmitter::VisitUnaryReal(const UnaryOperator *E) {
+ Expr *Op = E->getSubExpr();
+ if (Op->getType()->isAnyComplexType()) {
+ // If it's an l-value, load through the appropriate subobject l-value.
+ // Note that we have to ask E because Op might be an l-value that
+ // this won't work for, e.g. an Obj-C property.
+ if (E->isGLValue())
+ return CGF.EmitLoadOfLValue(CGF.EmitLValue(E)).getScalarVal();
+
+ // Otherwise, calculate and project.
+ return CGF.EmitComplexExpr(Op, false, true).first;
+ }
+
+ return Visit(Op);
+}
+
+Value *ScalarExprEmitter::VisitUnaryImag(const UnaryOperator *E) {
+ Expr *Op = E->getSubExpr();
+ if (Op->getType()->isAnyComplexType()) {
+ // If it's an l-value, load through the appropriate subobject l-value.
+ // Note that we have to ask E because Op might be an l-value that
+ // this won't work for, e.g. an Obj-C property.
+ if (Op->isGLValue())
+ return CGF.EmitLoadOfLValue(CGF.EmitLValue(E)).getScalarVal();
+
+ // Otherwise, calculate and project.
+ return CGF.EmitComplexExpr(Op, true, false).second;
+ }
+
+ // __imag on a scalar returns zero. Emit the subexpr to ensure side
+ // effects are evaluated, but not the actual value.
+ if (Op->isGLValue())
+ CGF.EmitLValue(Op);
+ else
+ CGF.EmitScalarExpr(Op, true);
+ return llvm::Constant::getNullValue(ConvertType(E->getType()));
+}
+
+//===----------------------------------------------------------------------===//
+// Binary Operators
+//===----------------------------------------------------------------------===//
+
+BinOpInfo ScalarExprEmitter::EmitBinOps(const BinaryOperator *E) {
+ TestAndClearIgnoreResultAssign();
+ BinOpInfo Result;
+ Result.LHS = Visit(E->getLHS());
+ Result.RHS = Visit(E->getRHS());
+ Result.Ty = E->getType();
+ Result.Opcode = E->getOpcode();
+ Result.E = E;
+ return Result;
+}
+
+LValue ScalarExprEmitter::EmitCompoundAssignLValue(
+ const CompoundAssignOperator *E,
+ Value *(ScalarExprEmitter::*Func)(const BinOpInfo &),
+ Value *&Result) {
+ QualType LHSTy = E->getLHS()->getType();
+ BinOpInfo OpInfo;
+
+ if (E->getComputationResultType()->isAnyComplexType()) {
+ // This needs to go through the complex expression emitter, but it's a tad
+ // complicated to do that... I'm leaving it out for now. (Note that we do
+ // actually need the imaginary part of the RHS for multiplication and
+ // division.)
+ CGF.ErrorUnsupported(E, "complex compound assignment");
+ Result = llvm::UndefValue::get(CGF.ConvertType(E->getType()));
+ return LValue();
+ }
+
+ // Emit the RHS first. __block variables need to have the rhs evaluated
+ // first, plus this should improve codegen a little.
+ OpInfo.RHS = Visit(E->getRHS());
+ OpInfo.Ty = E->getComputationResultType();
+ OpInfo.Opcode = E->getOpcode();
+ OpInfo.E = E;
+ // Load/convert the LHS.
+ LValue LHSLV = EmitCheckedLValue(E->getLHS());
+ OpInfo.LHS = EmitLoadOfLValue(LHSLV);
+ OpInfo.LHS = EmitScalarConversion(OpInfo.LHS, LHSTy,
+ E->getComputationLHSType());
+
+ llvm::PHINode *atomicPHI = 0;
+ if (const AtomicType *atomicTy = OpInfo.Ty->getAs<AtomicType>()) {
+ // FIXME: For floating point types, we should be saving and restoring the
+ // floating point environment in the loop.
+ llvm::BasicBlock *startBB = Builder.GetInsertBlock();
+ llvm::BasicBlock *opBB = CGF.createBasicBlock("atomic_op", CGF.CurFn);
+ Builder.CreateBr(opBB);
+ Builder.SetInsertPoint(opBB);
+ atomicPHI = Builder.CreatePHI(OpInfo.LHS->getType(), 2);
+ atomicPHI->addIncoming(OpInfo.LHS, startBB);
+ OpInfo.Ty = atomicTy->getValueType();
+ OpInfo.LHS = atomicPHI;
+ }
+
+ // Expand the binary operator.
+ Result = (this->*Func)(OpInfo);
+
+ // Convert the result back to the LHS type.
+ Result = EmitScalarConversion(Result, E->getComputationResultType(), LHSTy);
+
+ if (atomicPHI) {
+ llvm::BasicBlock *opBB = Builder.GetInsertBlock();
+ llvm::BasicBlock *contBB = CGF.createBasicBlock("atomic_cont", CGF.CurFn);
+ llvm::Value *old = Builder.CreateAtomicCmpXchg(LHSLV.getAddress(), atomicPHI,
+ Result, llvm::SequentiallyConsistent);
+ atomicPHI->addIncoming(old, opBB);
+ llvm::Value *success = Builder.CreateICmpEQ(old, atomicPHI);
+ Builder.CreateCondBr(success, contBB, opBB);
+ Builder.SetInsertPoint(contBB);
+ return LHSLV;
+ }
+
+ // Store the result value into the LHS lvalue. Bit-fields are handled
+ // specially because the result is altered by the store, i.e., [C99 6.5.16p1]
+ // 'An assignment expression has the value of the left operand after the
+ // assignment...'.
+ if (LHSLV.isBitField())
+ CGF.EmitStoreThroughBitfieldLValue(RValue::get(Result), LHSLV, &Result);
+ else
+ CGF.EmitStoreThroughLValue(RValue::get(Result), LHSLV);
+
+ return LHSLV;
+}
+
+Value *ScalarExprEmitter::EmitCompoundAssign(const CompoundAssignOperator *E,
+ Value *(ScalarExprEmitter::*Func)(const BinOpInfo &)) {
+ bool Ignore = TestAndClearIgnoreResultAssign();
+ Value *RHS;
+ LValue LHS = EmitCompoundAssignLValue(E, Func, RHS);
+
+ // If the result is clearly ignored, return now.
+ if (Ignore)
+ return 0;
+
+ // The result of an assignment in C is the assigned r-value.
+ if (!CGF.getContext().getLangOpts().CPlusPlus)
+ return RHS;
+
+ // If the lvalue is non-volatile, return the computed value of the assignment.
+ if (!LHS.isVolatileQualified())
+ return RHS;
+
+ // Otherwise, reload the value.
+ return EmitLoadOfLValue(LHS);
+}
+
+void ScalarExprEmitter::EmitUndefinedBehaviorIntegerDivAndRemCheck(
+ const BinOpInfo &Ops,
+ llvm::Value *Zero, bool isDiv) {
+ llvm::Function::iterator insertPt = Builder.GetInsertBlock();
+ llvm::BasicBlock *contBB =
+ CGF.createBasicBlock(isDiv ? "div.cont" : "rem.cont", CGF.CurFn,
+ llvm::next(insertPt));
+ llvm::BasicBlock *overflowBB = CGF.createBasicBlock("overflow", CGF.CurFn);
+
+ llvm::IntegerType *Ty = cast<llvm::IntegerType>(Zero->getType());
+
+ if (Ops.Ty->hasSignedIntegerRepresentation()) {
+ llvm::Value *IntMin =
+ Builder.getInt(llvm::APInt::getSignedMinValue(Ty->getBitWidth()));
+ llvm::Value *NegOne = llvm::ConstantInt::get(Ty, -1ULL);
+
+ llvm::Value *Cond1 = Builder.CreateICmpEQ(Ops.RHS, Zero);
+ llvm::Value *LHSCmp = Builder.CreateICmpEQ(Ops.LHS, IntMin);
+ llvm::Value *RHSCmp = Builder.CreateICmpEQ(Ops.RHS, NegOne);
+ llvm::Value *Cond2 = Builder.CreateAnd(LHSCmp, RHSCmp, "and");
+ Builder.CreateCondBr(Builder.CreateOr(Cond1, Cond2, "or"),
+ overflowBB, contBB);
+ } else {
+ CGF.Builder.CreateCondBr(Builder.CreateICmpEQ(Ops.RHS, Zero),
+ overflowBB, contBB);
+ }
+ EmitOverflowBB(overflowBB);
+ Builder.SetInsertPoint(contBB);
+}
+
+Value *ScalarExprEmitter::EmitDiv(const BinOpInfo &Ops) {
+ if (isTrapvOverflowBehavior()) {
+ llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty));
+
+ if (Ops.Ty->isIntegerType())
+ EmitUndefinedBehaviorIntegerDivAndRemCheck(Ops, Zero, true);
+ else if (Ops.Ty->isRealFloatingType()) {
+ llvm::Function::iterator insertPt = Builder.GetInsertBlock();
+ llvm::BasicBlock *DivCont = CGF.createBasicBlock("div.cont", CGF.CurFn,
+ llvm::next(insertPt));
+ llvm::BasicBlock *overflowBB = CGF.createBasicBlock("overflow",
+ CGF.CurFn);
+ CGF.Builder.CreateCondBr(Builder.CreateFCmpOEQ(Ops.RHS, Zero),
+ overflowBB, DivCont);
+ EmitOverflowBB(overflowBB);
+ Builder.SetInsertPoint(DivCont);
+ }
+ }
+ if (Ops.LHS->getType()->isFPOrFPVectorTy()) {
+ llvm::Value *Val = Builder.CreateFDiv(Ops.LHS, Ops.RHS, "div");
+ if (CGF.getContext().getLangOpts().OpenCL) {
+ // OpenCL 1.1 7.4: minimum accuracy of single precision / is 2.5ulp
+ llvm::Type *ValTy = Val->getType();
+ if (ValTy->isFloatTy() ||
+ (isa<llvm::VectorType>(ValTy) &&
+ cast<llvm::VectorType>(ValTy)->getElementType()->isFloatTy()))
+ CGF.SetFPAccuracy(Val, 2.5);
+ }
+ return Val;
+ }
+ else if (Ops.Ty->hasUnsignedIntegerRepresentation())
+ return Builder.CreateUDiv(Ops.LHS, Ops.RHS, "div");
+ else
+ return Builder.CreateSDiv(Ops.LHS, Ops.RHS, "div");
+}
+
+Value *ScalarExprEmitter::EmitRem(const BinOpInfo &Ops) {
+ // Rem in C can't be a floating point type: C99 6.5.5p2.
+ if (isTrapvOverflowBehavior()) {
+ llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty));
+
+ if (Ops.Ty->isIntegerType())
+ EmitUndefinedBehaviorIntegerDivAndRemCheck(Ops, Zero, false);
+ }
+
+ if (Ops.Ty->hasUnsignedIntegerRepresentation())
+ return Builder.CreateURem(Ops.LHS, Ops.RHS, "rem");
+ else
+ return Builder.CreateSRem(Ops.LHS, Ops.RHS, "rem");
+}
+
+Value *ScalarExprEmitter::EmitOverflowCheckedBinOp(const BinOpInfo &Ops) {
+ unsigned IID;
+ unsigned OpID = 0;
+
+ switch (Ops.Opcode) {
+ case BO_Add:
+ case BO_AddAssign:
+ OpID = 1;
+ IID = llvm::Intrinsic::sadd_with_overflow;
+ break;
+ case BO_Sub:
+ case BO_SubAssign:
+ OpID = 2;
+ IID = llvm::Intrinsic::ssub_with_overflow;
+ break;
+ case BO_Mul:
+ case BO_MulAssign:
+ OpID = 3;
+ IID = llvm::Intrinsic::smul_with_overflow;
+ break;
+ default:
+ llvm_unreachable("Unsupported operation for overflow detection");
+ }
+ OpID <<= 1;
+ OpID |= 1;
+
+ llvm::Type *opTy = CGF.CGM.getTypes().ConvertType(Ops.Ty);
+
+ llvm::Function *intrinsic = CGF.CGM.getIntrinsic(IID, opTy);
+
+ Value *resultAndOverflow = Builder.CreateCall2(intrinsic, Ops.LHS, Ops.RHS);
+ Value *result = Builder.CreateExtractValue(resultAndOverflow, 0);
+ Value *overflow = Builder.CreateExtractValue(resultAndOverflow, 1);
+
+ // Branch in case of overflow.
+ llvm::BasicBlock *initialBB = Builder.GetInsertBlock();
+ llvm::Function::iterator insertPt = initialBB;
+ llvm::BasicBlock *continueBB = CGF.createBasicBlock("nooverflow", CGF.CurFn,
+ llvm::next(insertPt));
+ llvm::BasicBlock *overflowBB = CGF.createBasicBlock("overflow", CGF.CurFn);
+
+ Builder.CreateCondBr(overflow, overflowBB, continueBB);
+
+ // Handle overflow with llvm.trap.
+ const std::string *handlerName =
+ &CGF.getContext().getLangOpts().OverflowHandler;
+ if (handlerName->empty()) {
+ EmitOverflowBB(overflowBB);
+ Builder.SetInsertPoint(continueBB);
+ return result;
+ }
+
+ // If an overflow handler is set, then we want to call it and then use its
+ // result, if it returns.
+ Builder.SetInsertPoint(overflowBB);
+
+ // Get the overflow handler.
+ llvm::Type *Int8Ty = CGF.Int8Ty;
+ llvm::Type *argTypes[] = { CGF.Int64Ty, CGF.Int64Ty, Int8Ty, Int8Ty };
+ llvm::FunctionType *handlerTy =
+ llvm::FunctionType::get(CGF.Int64Ty, argTypes, true);
+ llvm::Value *handler = CGF.CGM.CreateRuntimeFunction(handlerTy, *handlerName);
+
+ // Sign extend the args to 64-bit, so that we can use the same handler for
+ // all types of overflow.
+ llvm::Value *lhs = Builder.CreateSExt(Ops.LHS, CGF.Int64Ty);
+ llvm::Value *rhs = Builder.CreateSExt(Ops.RHS, CGF.Int64Ty);
+
+ // Call the handler with the two arguments, the operation, and the size of
+ // the result.
+ llvm::Value *handlerResult = Builder.CreateCall4(handler, lhs, rhs,
+ Builder.getInt8(OpID),
+ Builder.getInt8(cast<llvm::IntegerType>(opTy)->getBitWidth()));
+
+ // Truncate the result back to the desired size.
+ handlerResult = Builder.CreateTrunc(handlerResult, opTy);
+ Builder.CreateBr(continueBB);
+
+ Builder.SetInsertPoint(continueBB);
+ llvm::PHINode *phi = Builder.CreatePHI(opTy, 2);
+ phi->addIncoming(result, initialBB);
+ phi->addIncoming(handlerResult, overflowBB);
+
+ return phi;
+}
+
+/// Emit pointer + index arithmetic.
+static Value *emitPointerArithmetic(CodeGenFunction &CGF,
+ const BinOpInfo &op,
+ bool isSubtraction) {
+ // Must have binary (not unary) expr here. Unary pointer
+ // increment/decrement doesn't use this path.
+ const BinaryOperator *expr = cast<BinaryOperator>(op.E);
+
+ Value *pointer = op.LHS;
+ Expr *pointerOperand = expr->getLHS();
+ Value *index = op.RHS;
+ Expr *indexOperand = expr->getRHS();
+
+ // In a subtraction, the LHS is always the pointer.
+ if (!isSubtraction && !pointer->getType()->isPointerTy()) {
+ std::swap(pointer, index);
+ std::swap(pointerOperand, indexOperand);
+ }
+
+ unsigned width = cast<llvm::IntegerType>(index->getType())->getBitWidth();
+ if (width != CGF.PointerWidthInBits) {
+ // Zero-extend or sign-extend the pointer value according to
+ // whether the index is signed or not.
+ bool isSigned = indexOperand->getType()->isSignedIntegerOrEnumerationType();
+ index = CGF.Builder.CreateIntCast(index, CGF.PtrDiffTy, isSigned,
+ "idx.ext");
+ }
+
+ // If this is subtraction, negate the index.
+ if (isSubtraction)
+ index = CGF.Builder.CreateNeg(index, "idx.neg");
+
+ const PointerType *pointerType
+ = pointerOperand->getType()->getAs<PointerType>();
+ if (!pointerType) {
+ QualType objectType = pointerOperand->getType()
+ ->castAs<ObjCObjectPointerType>()
+ ->getPointeeType();
+ llvm::Value *objectSize
+ = CGF.CGM.getSize(CGF.getContext().getTypeSizeInChars(objectType));
+
+ index = CGF.Builder.CreateMul(index, objectSize);
+
+ Value *result = CGF.Builder.CreateBitCast(pointer, CGF.VoidPtrTy);
+ result = CGF.Builder.CreateGEP(result, index, "add.ptr");
+ return CGF.Builder.CreateBitCast(result, pointer->getType());
+ }
+
+ QualType elementType = pointerType->getPointeeType();
+ if (const VariableArrayType *vla
+ = CGF.getContext().getAsVariableArrayType(elementType)) {
+ // The element count here is the total number of non-VLA elements.
+ llvm::Value *numElements = CGF.getVLASize(vla).first;
+
+ // Effectively, the multiply by the VLA size is part of the GEP.
+ // GEP indexes are signed, and scaling an index isn't permitted to
+ // signed-overflow, so we use the same semantics for our explicit
+ // multiply. We suppress this if overflow is not undefined behavior.
+ if (CGF.getLangOpts().isSignedOverflowDefined()) {
+ index = CGF.Builder.CreateMul(index, numElements, "vla.index");
+ pointer = CGF.Builder.CreateGEP(pointer, index, "add.ptr");
+ } else {
+ index = CGF.Builder.CreateNSWMul(index, numElements, "vla.index");
+ pointer = CGF.Builder.CreateInBoundsGEP(pointer, index, "add.ptr");
+ }
+ return pointer;
+ }
+
+ // Explicitly handle GNU void* and function pointer arithmetic extensions. The
+ // GNU void* casts amount to no-ops since our void* type is i8*, but this is
+ // future proof.
+ if (elementType->isVoidType() || elementType->isFunctionType()) {
+ Value *result = CGF.Builder.CreateBitCast(pointer, CGF.VoidPtrTy);
+ result = CGF.Builder.CreateGEP(result, index, "add.ptr");
+ return CGF.Builder.CreateBitCast(result, pointer->getType());
+ }
+
+ if (CGF.getLangOpts().isSignedOverflowDefined())
+ return CGF.Builder.CreateGEP(pointer, index, "add.ptr");
+
+ return CGF.Builder.CreateInBoundsGEP(pointer, index, "add.ptr");
+}
+
+Value *ScalarExprEmitter::EmitAdd(const BinOpInfo &op) {
+ if (op.LHS->getType()->isPointerTy() ||
+ op.RHS->getType()->isPointerTy())
+ return emitPointerArithmetic(CGF, op, /*subtraction*/ false);
+
+ if (op.Ty->isSignedIntegerOrEnumerationType()) {
+ switch (CGF.getContext().getLangOpts().getSignedOverflowBehavior()) {
+ case LangOptions::SOB_Undefined:
+ return Builder.CreateNSWAdd(op.LHS, op.RHS, "add");
+ case LangOptions::SOB_Defined:
+ return Builder.CreateAdd(op.LHS, op.RHS, "add");
+ case LangOptions::SOB_Trapping:
+ return EmitOverflowCheckedBinOp(op);
+ }
+ }
+
+ if (op.LHS->getType()->isFPOrFPVectorTy())
+ return Builder.CreateFAdd(op.LHS, op.RHS, "add");
+
+ return Builder.CreateAdd(op.LHS, op.RHS, "add");
+}
+
+Value *ScalarExprEmitter::EmitSub(const BinOpInfo &op) {
+ // The LHS is always a pointer if either side is.
+ if (!op.LHS->getType()->isPointerTy()) {
+ if (op.Ty->isSignedIntegerOrEnumerationType()) {
+ switch (CGF.getContext().getLangOpts().getSignedOverflowBehavior()) {
+ case LangOptions::SOB_Undefined:
+ return Builder.CreateNSWSub(op.LHS, op.RHS, "sub");
+ case LangOptions::SOB_Defined:
+ return Builder.CreateSub(op.LHS, op.RHS, "sub");
+ case LangOptions::SOB_Trapping:
+ return EmitOverflowCheckedBinOp(op);
+ }
+ }
+
+ if (op.LHS->getType()->isFPOrFPVectorTy())
+ return Builder.CreateFSub(op.LHS, op.RHS, "sub");
+
+ return Builder.CreateSub(op.LHS, op.RHS, "sub");
+ }
+
+ // If the RHS is not a pointer, then we have normal pointer
+ // arithmetic.
+ if (!op.RHS->getType()->isPointerTy())
+ return emitPointerArithmetic(CGF, op, /*subtraction*/ true);
+
+ // Otherwise, this is a pointer subtraction.
+
+ // Do the raw subtraction part.
+ llvm::Value *LHS
+ = Builder.CreatePtrToInt(op.LHS, CGF.PtrDiffTy, "sub.ptr.lhs.cast");
+ llvm::Value *RHS
+ = Builder.CreatePtrToInt(op.RHS, CGF.PtrDiffTy, "sub.ptr.rhs.cast");
+ Value *diffInChars = Builder.CreateSub(LHS, RHS, "sub.ptr.sub");
+
+ // Okay, figure out the element size.
+ const BinaryOperator *expr = cast<BinaryOperator>(op.E);
+ QualType elementType = expr->getLHS()->getType()->getPointeeType();
+
+ llvm::Value *divisor = 0;
+
+ // For a variable-length array, this is going to be non-constant.
+ if (const VariableArrayType *vla
+ = CGF.getContext().getAsVariableArrayType(elementType)) {
+ llvm::Value *numElements;
+ llvm::tie(numElements, elementType) = CGF.getVLASize(vla);
+
+ divisor = numElements;
+
+ // Scale the number of non-VLA elements by the non-VLA element size.
+ CharUnits eltSize = CGF.getContext().getTypeSizeInChars(elementType);
+ if (!eltSize.isOne())
+ divisor = CGF.Builder.CreateNUWMul(CGF.CGM.getSize(eltSize), divisor);
+
+ // For everything elese, we can just compute it, safe in the
+ // assumption that Sema won't let anything through that we can't
+ // safely compute the size of.
+ } else {
+ CharUnits elementSize;
+ // Handle GCC extension for pointer arithmetic on void* and
+ // function pointer types.
+ if (elementType->isVoidType() || elementType->isFunctionType())
+ elementSize = CharUnits::One();
+ else
+ elementSize = CGF.getContext().getTypeSizeInChars(elementType);
+
+ // Don't even emit the divide for element size of 1.
+ if (elementSize.isOne())
+ return diffInChars;
+
+ divisor = CGF.CGM.getSize(elementSize);
+ }
+
+ // Otherwise, do a full sdiv. This uses the "exact" form of sdiv, since
+ // pointer difference in C is only defined in the case where both operands
+ // are pointing to elements of an array.
+ return Builder.CreateExactSDiv(diffInChars, divisor, "sub.ptr.div");
+}
+
+Value *ScalarExprEmitter::EmitShl(const BinOpInfo &Ops) {
+ // LLVM requires the LHS and RHS to be the same type: promote or truncate the
+ // RHS to the same size as the LHS.
+ Value *RHS = Ops.RHS;
+ if (Ops.LHS->getType() != RHS->getType())
+ RHS = Builder.CreateIntCast(RHS, Ops.LHS->getType(), false, "sh_prom");
+
+ if (CGF.CatchUndefined
+ && isa<llvm::IntegerType>(Ops.LHS->getType())) {
+ unsigned Width = cast<llvm::IntegerType>(Ops.LHS->getType())->getBitWidth();
+ llvm::BasicBlock *Cont = CGF.createBasicBlock("cont");
+ CGF.Builder.CreateCondBr(Builder.CreateICmpULT(RHS,
+ llvm::ConstantInt::get(RHS->getType(), Width)),
+ Cont, CGF.getTrapBB());
+ CGF.EmitBlock(Cont);
+ }
+
+ return Builder.CreateShl(Ops.LHS, RHS, "shl");
+}
+
+Value *ScalarExprEmitter::EmitShr(const BinOpInfo &Ops) {
+ // LLVM requires the LHS and RHS to be the same type: promote or truncate the
+ // RHS to the same size as the LHS.
+ Value *RHS = Ops.RHS;
+ if (Ops.LHS->getType() != RHS->getType())
+ RHS = Builder.CreateIntCast(RHS, Ops.LHS->getType(), false, "sh_prom");
+
+ if (CGF.CatchUndefined
+ && isa<llvm::IntegerType>(Ops.LHS->getType())) {
+ unsigned Width = cast<llvm::IntegerType>(Ops.LHS->getType())->getBitWidth();
+ llvm::BasicBlock *Cont = CGF.createBasicBlock("cont");
+ CGF.Builder.CreateCondBr(Builder.CreateICmpULT(RHS,
+ llvm::ConstantInt::get(RHS->getType(), Width)),
+ Cont, CGF.getTrapBB());
+ CGF.EmitBlock(Cont);
+ }
+
+ if (Ops.Ty->hasUnsignedIntegerRepresentation())
+ return Builder.CreateLShr(Ops.LHS, RHS, "shr");
+ return Builder.CreateAShr(Ops.LHS, RHS, "shr");
+}
+
+enum IntrinsicType { VCMPEQ, VCMPGT };
+// return corresponding comparison intrinsic for given vector type
+static llvm::Intrinsic::ID GetIntrinsic(IntrinsicType IT,
+ BuiltinType::Kind ElemKind) {
+ switch (ElemKind) {
+ default: llvm_unreachable("unexpected element type");
+ case BuiltinType::Char_U:
+ case BuiltinType::UChar:
+ return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequb_p :
+ llvm::Intrinsic::ppc_altivec_vcmpgtub_p;
+ case BuiltinType::Char_S:
+ case BuiltinType::SChar:
+ return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequb_p :
+ llvm::Intrinsic::ppc_altivec_vcmpgtsb_p;
+ case BuiltinType::UShort:
+ return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequh_p :
+ llvm::Intrinsic::ppc_altivec_vcmpgtuh_p;
+ case BuiltinType::Short:
+ return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequh_p :
+ llvm::Intrinsic::ppc_altivec_vcmpgtsh_p;
+ case BuiltinType::UInt:
+ case BuiltinType::ULong:
+ return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequw_p :
+ llvm::Intrinsic::ppc_altivec_vcmpgtuw_p;
+ case BuiltinType::Int:
+ case BuiltinType::Long:
+ return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequw_p :
+ llvm::Intrinsic::ppc_altivec_vcmpgtsw_p;
+ case BuiltinType::Float:
+ return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpeqfp_p :
+ llvm::Intrinsic::ppc_altivec_vcmpgtfp_p;
+ }
+}
+
+Value *ScalarExprEmitter::EmitCompare(const BinaryOperator *E,unsigned UICmpOpc,
+ unsigned SICmpOpc, unsigned FCmpOpc) {
+ TestAndClearIgnoreResultAssign();
+ Value *Result;
+ QualType LHSTy = E->getLHS()->getType();
+ if (const MemberPointerType *MPT = LHSTy->getAs<MemberPointerType>()) {
+ assert(E->getOpcode() == BO_EQ ||
+ E->getOpcode() == BO_NE);
+ Value *LHS = CGF.EmitScalarExpr(E->getLHS());
+ Value *RHS = CGF.EmitScalarExpr(E->getRHS());
+ Result = CGF.CGM.getCXXABI().EmitMemberPointerComparison(
+ CGF, LHS, RHS, MPT, E->getOpcode() == BO_NE);
+ } else if (!LHSTy->isAnyComplexType()) {
+ Value *LHS = Visit(E->getLHS());
+ Value *RHS = Visit(E->getRHS());
+
+ // If AltiVec, the comparison results in a numeric type, so we use
+ // intrinsics comparing vectors and giving 0 or 1 as a result
+ if (LHSTy->isVectorType() && !E->getType()->isVectorType()) {
+ // constants for mapping CR6 register bits to predicate result
+ enum { CR6_EQ=0, CR6_EQ_REV, CR6_LT, CR6_LT_REV } CR6;
+
+ llvm::Intrinsic::ID ID = llvm::Intrinsic::not_intrinsic;
+
+ // in several cases vector arguments order will be reversed
+ Value *FirstVecArg = LHS,
+ *SecondVecArg = RHS;
+
+ QualType ElTy = LHSTy->getAs<VectorType>()->getElementType();
+ const BuiltinType *BTy = ElTy->getAs<BuiltinType>();
+ BuiltinType::Kind ElementKind = BTy->getKind();
+
+ switch(E->getOpcode()) {
+ default: llvm_unreachable("is not a comparison operation");
+ case BO_EQ:
+ CR6 = CR6_LT;
+ ID = GetIntrinsic(VCMPEQ, ElementKind);
+ break;
+ case BO_NE:
+ CR6 = CR6_EQ;
+ ID = GetIntrinsic(VCMPEQ, ElementKind);
+ break;
+ case BO_LT:
+ CR6 = CR6_LT;
+ ID = GetIntrinsic(VCMPGT, ElementKind);
+ std::swap(FirstVecArg, SecondVecArg);
+ break;
+ case BO_GT:
+ CR6 = CR6_LT;
+ ID = GetIntrinsic(VCMPGT, ElementKind);
+ break;
+ case BO_LE:
+ if (ElementKind == BuiltinType::Float) {
+ CR6 = CR6_LT;
+ ID = llvm::Intrinsic::ppc_altivec_vcmpgefp_p;
+ std::swap(FirstVecArg, SecondVecArg);
+ }
+ else {
+ CR6 = CR6_EQ;
+ ID = GetIntrinsic(VCMPGT, ElementKind);
+ }
+ break;
+ case BO_GE:
+ if (ElementKind == BuiltinType::Float) {
+ CR6 = CR6_LT;
+ ID = llvm::Intrinsic::ppc_altivec_vcmpgefp_p;
+ }
+ else {
+ CR6 = CR6_EQ;
+ ID = GetIntrinsic(VCMPGT, ElementKind);
+ std::swap(FirstVecArg, SecondVecArg);
+ }
+ break;
+ }
+
+ Value *CR6Param = Builder.getInt32(CR6);
+ llvm::Function *F = CGF.CGM.getIntrinsic(ID);
+ Result = Builder.CreateCall3(F, CR6Param, FirstVecArg, SecondVecArg, "");
+ return EmitScalarConversion(Result, CGF.getContext().BoolTy, E->getType());
+ }
+
+ if (LHS->getType()->isFPOrFPVectorTy()) {
+ Result = Builder.CreateFCmp((llvm::CmpInst::Predicate)FCmpOpc,
+ LHS, RHS, "cmp");
+ } else if (LHSTy->hasSignedIntegerRepresentation()) {
+ Result = Builder.CreateICmp((llvm::ICmpInst::Predicate)SICmpOpc,
+ LHS, RHS, "cmp");
+ } else {
+ // Unsigned integers and pointers.
+ Result = Builder.CreateICmp((llvm::ICmpInst::Predicate)UICmpOpc,
+ LHS, RHS, "cmp");
+ }
+
+ // If this is a vector comparison, sign extend the result to the appropriate
+ // vector integer type and return it (don't convert to bool).
+ if (LHSTy->isVectorType())
+ return Builder.CreateSExt(Result, ConvertType(E->getType()), "sext");
+
+ } else {
+ // Complex Comparison: can only be an equality comparison.
+ CodeGenFunction::ComplexPairTy LHS = CGF.EmitComplexExpr(E->getLHS());
+ CodeGenFunction::ComplexPairTy RHS = CGF.EmitComplexExpr(E->getRHS());
+
+ QualType CETy = LHSTy->getAs<ComplexType>()->getElementType();
+
+ Value *ResultR, *ResultI;
+ if (CETy->isRealFloatingType()) {
+ ResultR = Builder.CreateFCmp((llvm::FCmpInst::Predicate)FCmpOpc,
+ LHS.first, RHS.first, "cmp.r");
+ ResultI = Builder.CreateFCmp((llvm::FCmpInst::Predicate)FCmpOpc,
+ LHS.second, RHS.second, "cmp.i");
+ } else {
+ // Complex comparisons can only be equality comparisons. As such, signed
+ // and unsigned opcodes are the same.
+ ResultR = Builder.CreateICmp((llvm::ICmpInst::Predicate)UICmpOpc,
+ LHS.first, RHS.first, "cmp.r");
+ ResultI = Builder.CreateICmp((llvm::ICmpInst::Predicate)UICmpOpc,
+ LHS.second, RHS.second, "cmp.i");
+ }
+
+ if (E->getOpcode() == BO_EQ) {
+ Result = Builder.CreateAnd(ResultR, ResultI, "and.ri");
+ } else {
+ assert(E->getOpcode() == BO_NE &&
+ "Complex comparison other than == or != ?");
+ Result = Builder.CreateOr(ResultR, ResultI, "or.ri");
+ }
+ }
+
+ return EmitScalarConversion(Result, CGF.getContext().BoolTy, E->getType());
+}
+
+Value *ScalarExprEmitter::VisitBinAssign(const BinaryOperator *E) {
+ bool Ignore = TestAndClearIgnoreResultAssign();
+
+ Value *RHS;
+ LValue LHS;
+
+ switch (E->getLHS()->getType().getObjCLifetime()) {
+ case Qualifiers::OCL_Strong:
+ llvm::tie(LHS, RHS) = CGF.EmitARCStoreStrong(E, Ignore);
+ break;
+
+ case Qualifiers::OCL_Autoreleasing:
+ llvm::tie(LHS,RHS) = CGF.EmitARCStoreAutoreleasing(E);
+ break;
+
+ case Qualifiers::OCL_Weak:
+ RHS = Visit(E->getRHS());
+ LHS = EmitCheckedLValue(E->getLHS());
+ RHS = CGF.EmitARCStoreWeak(LHS.getAddress(), RHS, Ignore);
+ break;
+
+ // No reason to do any of these differently.
+ case Qualifiers::OCL_None:
+ case Qualifiers::OCL_ExplicitNone:
+ // __block variables need to have the rhs evaluated first, plus
+ // this should improve codegen just a little.
+ RHS = Visit(E->getRHS());
+ LHS = EmitCheckedLValue(E->getLHS());
+
+ // Store the value into the LHS. Bit-fields are handled specially
+ // because the result is altered by the store, i.e., [C99 6.5.16p1]
+ // 'An assignment expression has the value of the left operand after
+ // the assignment...'.
+ if (LHS.isBitField())
+ CGF.EmitStoreThroughBitfieldLValue(RValue::get(RHS), LHS, &RHS);
+ else
+ CGF.EmitStoreThroughLValue(RValue::get(RHS), LHS);
+ }
+
+ // If the result is clearly ignored, return now.
+ if (Ignore)
+ return 0;
+
+ // The result of an assignment in C is the assigned r-value.
+ if (!CGF.getContext().getLangOpts().CPlusPlus)
+ return RHS;
+
+ // If the lvalue is non-volatile, return the computed value of the assignment.
+ if (!LHS.isVolatileQualified())
+ return RHS;
+
+ // Otherwise, reload the value.
+ return EmitLoadOfLValue(LHS);
+}
+
+Value *ScalarExprEmitter::VisitBinLAnd(const BinaryOperator *E) {
+
+ // Perform vector logical and on comparisons with zero vectors.
+ if (E->getType()->isVectorType()) {
+ Value *LHS = Visit(E->getLHS());
+ Value *RHS = Visit(E->getRHS());
+ Value *Zero = llvm::ConstantAggregateZero::get(LHS->getType());
+ LHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, LHS, Zero, "cmp");
+ RHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, RHS, Zero, "cmp");
+ Value *And = Builder.CreateAnd(LHS, RHS);
+ return Builder.CreateSExt(And, Zero->getType(), "sext");
+ }
+
+ llvm::Type *ResTy = ConvertType(E->getType());
+
+ // If we have 0 && RHS, see if we can elide RHS, if so, just return 0.
+ // If we have 1 && X, just emit X without inserting the control flow.
+ bool LHSCondVal;
+ if (CGF.ConstantFoldsToSimpleInteger(E->getLHS(), LHSCondVal)) {
+ if (LHSCondVal) { // If we have 1 && X, just emit X.
+ Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
+ // ZExt result to int or bool.
+ return Builder.CreateZExtOrBitCast(RHSCond, ResTy, "land.ext");
+ }
+
+ // 0 && RHS: If it is safe, just elide the RHS, and return 0/false.
+ if (!CGF.ContainsLabel(E->getRHS()))
+ return llvm::Constant::getNullValue(ResTy);
+ }
+
+ llvm::BasicBlock *ContBlock = CGF.createBasicBlock("land.end");
+ llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("land.rhs");
+
+ CodeGenFunction::ConditionalEvaluation eval(CGF);
+
+ // Branch on the LHS first. If it is false, go to the failure (cont) block.
+ CGF.EmitBranchOnBoolExpr(E->getLHS(), RHSBlock, ContBlock);
+
+ // Any edges into the ContBlock are now from an (indeterminate number of)
+ // edges from this first condition. All of these values will be false. Start
+ // setting up the PHI node in the Cont Block for this.
+ llvm::PHINode *PN = llvm::PHINode::Create(llvm::Type::getInt1Ty(VMContext), 2,
+ "", ContBlock);
+ for (llvm::pred_iterator PI = pred_begin(ContBlock), PE = pred_end(ContBlock);
+ PI != PE; ++PI)
+ PN->addIncoming(llvm::ConstantInt::getFalse(VMContext), *PI);
+
+ eval.begin(CGF);
+ CGF.EmitBlock(RHSBlock);
+ Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
+ eval.end(CGF);
+
+ // Reaquire the RHS block, as there may be subblocks inserted.
+ RHSBlock = Builder.GetInsertBlock();
+
+ // Emit an unconditional branch from this block to ContBlock. Insert an entry
+ // into the phi node for the edge with the value of RHSCond.
+ if (CGF.getDebugInfo())
+ // There is no need to emit line number for unconditional branch.
+ Builder.SetCurrentDebugLocation(llvm::DebugLoc());
+ CGF.EmitBlock(ContBlock);
+ PN->addIncoming(RHSCond, RHSBlock);
+
+ // ZExt result to int.
+ return Builder.CreateZExtOrBitCast(PN, ResTy, "land.ext");
+}
+
+Value *ScalarExprEmitter::VisitBinLOr(const BinaryOperator *E) {
+
+ // Perform vector logical or on comparisons with zero vectors.
+ if (E->getType()->isVectorType()) {
+ Value *LHS = Visit(E->getLHS());
+ Value *RHS = Visit(E->getRHS());
+ Value *Zero = llvm::ConstantAggregateZero::get(LHS->getType());
+ LHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, LHS, Zero, "cmp");
+ RHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, RHS, Zero, "cmp");
+ Value *Or = Builder.CreateOr(LHS, RHS);
+ return Builder.CreateSExt(Or, Zero->getType(), "sext");
+ }
+
+ llvm::Type *ResTy = ConvertType(E->getType());
+
+ // If we have 1 || RHS, see if we can elide RHS, if so, just return 1.
+ // If we have 0 || X, just emit X without inserting the control flow.
+ bool LHSCondVal;
+ if (CGF.ConstantFoldsToSimpleInteger(E->getLHS(), LHSCondVal)) {
+ if (!LHSCondVal) { // If we have 0 || X, just emit X.
+ Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
+ // ZExt result to int or bool.
+ return Builder.CreateZExtOrBitCast(RHSCond, ResTy, "lor.ext");
+ }
+
+ // 1 || RHS: If it is safe, just elide the RHS, and return 1/true.
+ if (!CGF.ContainsLabel(E->getRHS()))
+ return llvm::ConstantInt::get(ResTy, 1);
+ }
+
+ llvm::BasicBlock *ContBlock = CGF.createBasicBlock("lor.end");
+ llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("lor.rhs");
+
+ CodeGenFunction::ConditionalEvaluation eval(CGF);
+
+ // Branch on the LHS first. If it is true, go to the success (cont) block.
+ CGF.EmitBranchOnBoolExpr(E->getLHS(), ContBlock, RHSBlock);
+
+ // Any edges into the ContBlock are now from an (indeterminate number of)
+ // edges from this first condition. All of these values will be true. Start
+ // setting up the PHI node in the Cont Block for this.
+ llvm::PHINode *PN = llvm::PHINode::Create(llvm::Type::getInt1Ty(VMContext), 2,
+ "", ContBlock);
+ for (llvm::pred_iterator PI = pred_begin(ContBlock), PE = pred_end(ContBlock);
+ PI != PE; ++PI)
+ PN->addIncoming(llvm::ConstantInt::getTrue(VMContext), *PI);
+
+ eval.begin(CGF);
+
+ // Emit the RHS condition as a bool value.
+ CGF.EmitBlock(RHSBlock);
+ Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
+
+ eval.end(CGF);
+
+ // Reaquire the RHS block, as there may be subblocks inserted.
+ RHSBlock = Builder.GetInsertBlock();
+
+ // Emit an unconditional branch from this block to ContBlock. Insert an entry
+ // into the phi node for the edge with the value of RHSCond.
+ CGF.EmitBlock(ContBlock);
+ PN->addIncoming(RHSCond, RHSBlock);
+
+ // ZExt result to int.
+ return Builder.CreateZExtOrBitCast(PN, ResTy, "lor.ext");
+}
+
+Value *ScalarExprEmitter::VisitBinComma(const BinaryOperator *E) {
+ CGF.EmitIgnoredExpr(E->getLHS());
+ CGF.EnsureInsertPoint();
+ return Visit(E->getRHS());
+}
+
+//===----------------------------------------------------------------------===//
+// Other Operators
+//===----------------------------------------------------------------------===//
+
+/// isCheapEnoughToEvaluateUnconditionally - Return true if the specified
+/// expression is cheap enough and side-effect-free enough to evaluate
+/// unconditionally instead of conditionally. This is used to convert control
+/// flow into selects in some cases.
+static bool isCheapEnoughToEvaluateUnconditionally(const Expr *E,
+ CodeGenFunction &CGF) {
+ E = E->IgnoreParens();
+
+ // Anything that is an integer or floating point constant is fine.
+ if (E->isConstantInitializer(CGF.getContext(), false))
+ return true;
+
+ // Non-volatile automatic variables too, to get "cond ? X : Y" where
+ // X and Y are local variables.
+ if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E))
+ if (const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl()))
+ if (VD->hasLocalStorage() && !(CGF.getContext()
+ .getCanonicalType(VD->getType())
+ .isVolatileQualified()))
+ return true;
+
+ return false;
+}
+
+
+Value *ScalarExprEmitter::
+VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
+ TestAndClearIgnoreResultAssign();
+
+ // Bind the common expression if necessary.
+ CodeGenFunction::OpaqueValueMapping binding(CGF, E);
+
+ Expr *condExpr = E->getCond();
+ Expr *lhsExpr = E->getTrueExpr();
+ Expr *rhsExpr = E->getFalseExpr();
+
+ // If the condition constant folds and can be elided, try to avoid emitting
+ // the condition and the dead arm.
+ bool CondExprBool;
+ if (CGF.ConstantFoldsToSimpleInteger(condExpr, CondExprBool)) {
+ Expr *live = lhsExpr, *dead = rhsExpr;
+ if (!CondExprBool) std::swap(live, dead);
+
+ // If the dead side doesn't have labels we need, just emit the Live part.
+ if (!CGF.ContainsLabel(dead)) {
+ Value *Result = Visit(live);
+
+ // If the live part is a throw expression, it acts like it has a void
+ // type, so evaluating it returns a null Value*. However, a conditional
+ // with non-void type must return a non-null Value*.
+ if (!Result && !E->getType()->isVoidType())
+ Result = llvm::UndefValue::get(CGF.ConvertType(E->getType()));
+
+ return Result;
+ }
+ }
+
+ // OpenCL: If the condition is a vector, we can treat this condition like
+ // the select function.
+ if (CGF.getContext().getLangOpts().OpenCL
+ && condExpr->getType()->isVectorType()) {
+ llvm::Value *CondV = CGF.EmitScalarExpr(condExpr);
+ llvm::Value *LHS = Visit(lhsExpr);
+ llvm::Value *RHS = Visit(rhsExpr);
+
+ llvm::Type *condType = ConvertType(condExpr->getType());
+ llvm::VectorType *vecTy = cast<llvm::VectorType>(condType);
+
+ unsigned numElem = vecTy->getNumElements();
+ llvm::Type *elemType = vecTy->getElementType();
+
+ llvm::Value *zeroVec = llvm::Constant::getNullValue(vecTy);
+ llvm::Value *TestMSB = Builder.CreateICmpSLT(CondV, zeroVec);
+ llvm::Value *tmp = Builder.CreateSExt(TestMSB,
+ llvm::VectorType::get(elemType,
+ numElem),
+ "sext");
+ llvm::Value *tmp2 = Builder.CreateNot(tmp);
+
+ // Cast float to int to perform ANDs if necessary.
+ llvm::Value *RHSTmp = RHS;
+ llvm::Value *LHSTmp = LHS;
+ bool wasCast = false;
+ llvm::VectorType *rhsVTy = cast<llvm::VectorType>(RHS->getType());
+ if (rhsVTy->getElementType()->isFloatTy()) {
+ RHSTmp = Builder.CreateBitCast(RHS, tmp2->getType());
+ LHSTmp = Builder.CreateBitCast(LHS, tmp->getType());
+ wasCast = true;
+ }
+
+ llvm::Value *tmp3 = Builder.CreateAnd(RHSTmp, tmp2);
+ llvm::Value *tmp4 = Builder.CreateAnd(LHSTmp, tmp);
+ llvm::Value *tmp5 = Builder.CreateOr(tmp3, tmp4, "cond");
+ if (wasCast)
+ tmp5 = Builder.CreateBitCast(tmp5, RHS->getType());
+
+ return tmp5;
+ }
+
+ // If this is a really simple expression (like x ? 4 : 5), emit this as a
+ // select instead of as control flow. We can only do this if it is cheap and
+ // safe to evaluate the LHS and RHS unconditionally.
+ if (isCheapEnoughToEvaluateUnconditionally(lhsExpr, CGF) &&
+ isCheapEnoughToEvaluateUnconditionally(rhsExpr, CGF)) {
+ llvm::Value *CondV = CGF.EvaluateExprAsBool(condExpr);
+ llvm::Value *LHS = Visit(lhsExpr);
+ llvm::Value *RHS = Visit(rhsExpr);
+ if (!LHS) {
+ // If the conditional has void type, make sure we return a null Value*.
+ assert(!RHS && "LHS and RHS types must match");
+ return 0;
+ }
+ return Builder.CreateSelect(CondV, LHS, RHS, "cond");
+ }
+
+ llvm::BasicBlock *LHSBlock = CGF.createBasicBlock("cond.true");
+ llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("cond.false");
+ llvm::BasicBlock *ContBlock = CGF.createBasicBlock("cond.end");
+
+ CodeGenFunction::ConditionalEvaluation eval(CGF);
+ CGF.EmitBranchOnBoolExpr(condExpr, LHSBlock, RHSBlock);
+
+ CGF.EmitBlock(LHSBlock);
+ eval.begin(CGF);
+ Value *LHS = Visit(lhsExpr);
+ eval.end(CGF);
+
+ LHSBlock = Builder.GetInsertBlock();
+ Builder.CreateBr(ContBlock);
+
+ CGF.EmitBlock(RHSBlock);
+ eval.begin(CGF);
+ Value *RHS = Visit(rhsExpr);
+ eval.end(CGF);
+
+ RHSBlock = Builder.GetInsertBlock();
+ CGF.EmitBlock(ContBlock);
+
+ // If the LHS or RHS is a throw expression, it will be legitimately null.
+ if (!LHS)
+ return RHS;
+ if (!RHS)
+ return LHS;
+
+ // Create a PHI node for the real part.
+ llvm::PHINode *PN = Builder.CreatePHI(LHS->getType(), 2, "cond");
+ PN->addIncoming(LHS, LHSBlock);
+ PN->addIncoming(RHS, RHSBlock);
+ return PN;
+}
+
+Value *ScalarExprEmitter::VisitChooseExpr(ChooseExpr *E) {
+ return Visit(E->getChosenSubExpr(CGF.getContext()));
+}
+
+Value *ScalarExprEmitter::VisitVAArgExpr(VAArgExpr *VE) {
+ llvm::Value *ArgValue = CGF.EmitVAListRef(VE->getSubExpr());
+ llvm::Value *ArgPtr = CGF.EmitVAArg(ArgValue, VE->getType());
+
+ // If EmitVAArg fails, we fall back to the LLVM instruction.
+ if (!ArgPtr)
+ return Builder.CreateVAArg(ArgValue, ConvertType(VE->getType()));
+
+ // FIXME Volatility.
+ return Builder.CreateLoad(ArgPtr);
+}
+
+Value *ScalarExprEmitter::VisitBlockExpr(const BlockExpr *block) {
+ return CGF.EmitBlockLiteral(block);
+}
+
+Value *ScalarExprEmitter::VisitAsTypeExpr(AsTypeExpr *E) {
+ Value *Src = CGF.EmitScalarExpr(E->getSrcExpr());
+ llvm::Type *DstTy = ConvertType(E->getType());
+
+ // Going from vec4->vec3 or vec3->vec4 is a special case and requires
+ // a shuffle vector instead of a bitcast.
+ llvm::Type *SrcTy = Src->getType();
+ if (isa<llvm::VectorType>(DstTy) && isa<llvm::VectorType>(SrcTy)) {
+ unsigned numElementsDst = cast<llvm::VectorType>(DstTy)->getNumElements();
+ unsigned numElementsSrc = cast<llvm::VectorType>(SrcTy)->getNumElements();
+ if ((numElementsDst == 3 && numElementsSrc == 4)
+ || (numElementsDst == 4 && numElementsSrc == 3)) {
+
+
+ // In the case of going from int4->float3, a bitcast is needed before
+ // doing a shuffle.
+ llvm::Type *srcElemTy =
+ cast<llvm::VectorType>(SrcTy)->getElementType();
+ llvm::Type *dstElemTy =
+ cast<llvm::VectorType>(DstTy)->getElementType();
+
+ if ((srcElemTy->isIntegerTy() && dstElemTy->isFloatTy())
+ || (srcElemTy->isFloatTy() && dstElemTy->isIntegerTy())) {
+ // Create a float type of the same size as the source or destination.
+ llvm::VectorType *newSrcTy = llvm::VectorType::get(dstElemTy,
+ numElementsSrc);
+
+ Src = Builder.CreateBitCast(Src, newSrcTy, "astypeCast");
+ }
+
+ llvm::Value *UnV = llvm::UndefValue::get(Src->getType());
+
+ SmallVector<llvm::Constant*, 3> Args;
+ Args.push_back(Builder.getInt32(0));
+ Args.push_back(Builder.getInt32(1));
+ Args.push_back(Builder.getInt32(2));
+
+ if (numElementsDst == 4)
+ Args.push_back(llvm::UndefValue::get(CGF.Int32Ty));
+
+ llvm::Constant *Mask = llvm::ConstantVector::get(Args);
+
+ return Builder.CreateShuffleVector(Src, UnV, Mask, "astype");
+ }
+ }
+
+ return Builder.CreateBitCast(Src, DstTy, "astype");
+}
+
+Value *ScalarExprEmitter::VisitAtomicExpr(AtomicExpr *E) {
+ return CGF.EmitAtomicExpr(E).getScalarVal();
+}
+
+//===----------------------------------------------------------------------===//
+// Entry Point into this File
+//===----------------------------------------------------------------------===//
+
+/// EmitScalarExpr - Emit the computation of the specified expression of scalar
+/// type, ignoring the result.
+Value *CodeGenFunction::EmitScalarExpr(const Expr *E, bool IgnoreResultAssign) {
+ assert(E && !hasAggregateLLVMType(E->getType()) &&
+ "Invalid scalar expression to emit");
+
+ if (isa<CXXDefaultArgExpr>(E))
+ disableDebugInfo();
+ Value *V = ScalarExprEmitter(*this, IgnoreResultAssign)
+ .Visit(const_cast<Expr*>(E));
+ if (isa<CXXDefaultArgExpr>(E))
+ enableDebugInfo();
+ return V;
+}
+
+/// EmitScalarConversion - Emit a conversion from the specified type to the
+/// specified destination type, both of which are LLVM scalar types.
+Value *CodeGenFunction::EmitScalarConversion(Value *Src, QualType SrcTy,
+ QualType DstTy) {
+ assert(!hasAggregateLLVMType(SrcTy) && !hasAggregateLLVMType(DstTy) &&
+ "Invalid scalar expression to emit");
+ return ScalarExprEmitter(*this).EmitScalarConversion(Src, SrcTy, DstTy);
+}
+
+/// EmitComplexToScalarConversion - Emit a conversion from the specified complex
+/// type to the specified destination type, where the destination type is an
+/// LLVM scalar type.
+Value *CodeGenFunction::EmitComplexToScalarConversion(ComplexPairTy Src,
+ QualType SrcTy,
+ QualType DstTy) {
+ assert(SrcTy->isAnyComplexType() && !hasAggregateLLVMType(DstTy) &&
+ "Invalid complex -> scalar conversion");
+ return ScalarExprEmitter(*this).EmitComplexToScalarConversion(Src, SrcTy,
+ DstTy);
+}
+
+
+llvm::Value *CodeGenFunction::
+EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
+ bool isInc, bool isPre) {
+ return ScalarExprEmitter(*this).EmitScalarPrePostIncDec(E, LV, isInc, isPre);
+}
+
+LValue CodeGenFunction::EmitObjCIsaExpr(const ObjCIsaExpr *E) {
+ llvm::Value *V;
+ // object->isa or (*object).isa
+ // Generate code as for: *(Class*)object
+ // build Class* type
+ llvm::Type *ClassPtrTy = ConvertType(E->getType());
+
+ Expr *BaseExpr = E->getBase();
+ if (BaseExpr->isRValue()) {
+ V = CreateMemTemp(E->getType(), "resval");
+ llvm::Value *Src = EmitScalarExpr(BaseExpr);
+ Builder.CreateStore(Src, V);
+ V = ScalarExprEmitter(*this).EmitLoadOfLValue(
+ MakeNaturalAlignAddrLValue(V, E->getType()));
+ } else {
+ if (E->isArrow())
+ V = ScalarExprEmitter(*this).EmitLoadOfLValue(BaseExpr);
+ else
+ V = EmitLValue(BaseExpr).getAddress();
+ }
+
+ // build Class* type
+ ClassPtrTy = ClassPtrTy->getPointerTo();
+ V = Builder.CreateBitCast(V, ClassPtrTy);
+ return MakeNaturalAlignAddrLValue(V, E->getType());
+}
+
+
+LValue CodeGenFunction::EmitCompoundAssignmentLValue(
+ const CompoundAssignOperator *E) {
+ ScalarExprEmitter Scalar(*this);
+ Value *Result = 0;
+ switch (E->getOpcode()) {
+#define COMPOUND_OP(Op) \
+ case BO_##Op##Assign: \
+ return Scalar.EmitCompoundAssignLValue(E, &ScalarExprEmitter::Emit##Op, \
+ Result)
+ COMPOUND_OP(Mul);
+ COMPOUND_OP(Div);
+ COMPOUND_OP(Rem);
+ COMPOUND_OP(Add);
+ COMPOUND_OP(Sub);
+ COMPOUND_OP(Shl);
+ COMPOUND_OP(Shr);
+ COMPOUND_OP(And);
+ COMPOUND_OP(Xor);
+ COMPOUND_OP(Or);
+#undef COMPOUND_OP
+
+ case BO_PtrMemD:
+ case BO_PtrMemI:
+ case BO_Mul:
+ case BO_Div:
+ case BO_Rem:
+ case BO_Add:
+ case BO_Sub:
+ case BO_Shl:
+ case BO_Shr:
+ case BO_LT:
+ case BO_GT:
+ case BO_LE:
+ case BO_GE:
+ case BO_EQ:
+ case BO_NE:
+ case BO_And:
+ case BO_Xor:
+ case BO_Or:
+ case BO_LAnd:
+ case BO_LOr:
+ case BO_Assign:
+ case BO_Comma:
+ llvm_unreachable("Not valid compound assignment operators");
+ }
+
+ llvm_unreachable("Unhandled compound assignment operator");
+}
diff --git a/clang/lib/CodeGen/CGObjC.cpp b/clang/lib/CodeGen/CGObjC.cpp
new file mode 100644
index 0000000..d0aa0f5
--- /dev/null
+++ b/clang/lib/CodeGen/CGObjC.cpp
@@ -0,0 +1,2974 @@
+//===---- CGBuiltin.cpp - Emit LLVM Code for builtins ---------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code to emit Objective-C code as LLVM code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CGDebugInfo.h"
+#include "CGObjCRuntime.h"
+#include "CodeGenFunction.h"
+#include "CodeGenModule.h"
+#include "TargetInfo.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/StmtObjC.h"
+#include "clang/Basic/Diagnostic.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/Target/TargetData.h"
+#include "llvm/InlineAsm.h"
+using namespace clang;
+using namespace CodeGen;
+
+typedef llvm::PointerIntPair<llvm::Value*,1,bool> TryEmitResult;
+static TryEmitResult
+tryEmitARCRetainScalarExpr(CodeGenFunction &CGF, const Expr *e);
+static RValue AdjustRelatedResultType(CodeGenFunction &CGF,
+ const Expr *E,
+ const ObjCMethodDecl *Method,
+ RValue Result);
+
+/// Given the address of a variable of pointer type, find the correct
+/// null to store into it.
+static llvm::Constant *getNullForVariable(llvm::Value *addr) {
+ llvm::Type *type =
+ cast<llvm::PointerType>(addr->getType())->getElementType();
+ return llvm::ConstantPointerNull::get(cast<llvm::PointerType>(type));
+}
+
+/// Emits an instance of NSConstantString representing the object.
+llvm::Value *CodeGenFunction::EmitObjCStringLiteral(const ObjCStringLiteral *E)
+{
+ llvm::Constant *C =
+ CGM.getObjCRuntime().GenerateConstantString(E->getString());
+ // FIXME: This bitcast should just be made an invariant on the Runtime.
+ return llvm::ConstantExpr::getBitCast(C, ConvertType(E->getType()));
+}
+
+/// EmitObjCNumericLiteral - This routine generates code for
+/// the appropriate +[NSNumber numberWith<Type>:] method.
+///
+llvm::Value *
+CodeGenFunction::EmitObjCNumericLiteral(const ObjCNumericLiteral *E) {
+ // Generate the correct selector for this literal's concrete type.
+ const Expr *NL = E->getNumber();
+ // Get the method.
+ const ObjCMethodDecl *Method = E->getObjCNumericLiteralMethod();
+ assert(Method && "NSNumber method is null");
+ Selector Sel = Method->getSelector();
+
+ // Generate a reference to the class pointer, which will be the receiver.
+ QualType ResultType = E->getType(); // should be NSNumber *
+ const ObjCObjectPointerType *InterfacePointerType =
+ ResultType->getAsObjCInterfacePointerType();
+ ObjCInterfaceDecl *NSNumberDecl =
+ InterfacePointerType->getObjectType()->getInterface();
+ CGObjCRuntime &Runtime = CGM.getObjCRuntime();
+ llvm::Value *Receiver = Runtime.GetClass(Builder, NSNumberDecl);
+
+ const ParmVarDecl *argDecl = *Method->param_begin();
+ QualType ArgQT = argDecl->getType().getUnqualifiedType();
+ RValue RV = EmitAnyExpr(NL);
+ CallArgList Args;
+ Args.add(RV, ArgQT);
+
+ RValue result = Runtime.GenerateMessageSend(*this, ReturnValueSlot(),
+ ResultType, Sel, Receiver, Args,
+ NSNumberDecl, Method);
+ return Builder.CreateBitCast(result.getScalarVal(),
+ ConvertType(E->getType()));
+}
+
+llvm::Value *CodeGenFunction::EmitObjCCollectionLiteral(const Expr *E,
+ const ObjCMethodDecl *MethodWithObjects) {
+ ASTContext &Context = CGM.getContext();
+ const ObjCDictionaryLiteral *DLE = 0;
+ const ObjCArrayLiteral *ALE = dyn_cast<ObjCArrayLiteral>(E);
+ if (!ALE)
+ DLE = cast<ObjCDictionaryLiteral>(E);
+
+ // Compute the type of the array we're initializing.
+ uint64_t NumElements =
+ ALE ? ALE->getNumElements() : DLE->getNumElements();
+ llvm::APInt APNumElements(Context.getTypeSize(Context.getSizeType()),
+ NumElements);
+ QualType ElementType = Context.getObjCIdType().withConst();
+ QualType ElementArrayType
+ = Context.getConstantArrayType(ElementType, APNumElements,
+ ArrayType::Normal, /*IndexTypeQuals=*/0);
+
+ // Allocate the temporary array(s).
+ llvm::Value *Objects = CreateMemTemp(ElementArrayType, "objects");
+ llvm::Value *Keys = 0;
+ if (DLE)
+ Keys = CreateMemTemp(ElementArrayType, "keys");
+
+ // Perform the actual initialialization of the array(s).
+ for (uint64_t i = 0; i < NumElements; i++) {
+ if (ALE) {
+ // Emit the initializer.
+ const Expr *Rhs = ALE->getElement(i);
+ LValue LV = LValue::MakeAddr(Builder.CreateStructGEP(Objects, i),
+ ElementType,
+ Context.getTypeAlignInChars(Rhs->getType()),
+ Context);
+ EmitScalarInit(Rhs, /*D=*/0, LV, /*capturedByInit=*/false);
+ } else {
+ // Emit the key initializer.
+ const Expr *Key = DLE->getKeyValueElement(i).Key;
+ LValue KeyLV = LValue::MakeAddr(Builder.CreateStructGEP(Keys, i),
+ ElementType,
+ Context.getTypeAlignInChars(Key->getType()),
+ Context);
+ EmitScalarInit(Key, /*D=*/0, KeyLV, /*capturedByInit=*/false);
+
+ // Emit the value initializer.
+ const Expr *Value = DLE->getKeyValueElement(i).Value;
+ LValue ValueLV = LValue::MakeAddr(Builder.CreateStructGEP(Objects, i),
+ ElementType,
+ Context.getTypeAlignInChars(Value->getType()),
+ Context);
+ EmitScalarInit(Value, /*D=*/0, ValueLV, /*capturedByInit=*/false);
+ }
+ }
+
+ // Generate the argument list.
+ CallArgList Args;
+ ObjCMethodDecl::param_const_iterator PI = MethodWithObjects->param_begin();
+ const ParmVarDecl *argDecl = *PI++;
+ QualType ArgQT = argDecl->getType().getUnqualifiedType();
+ Args.add(RValue::get(Objects), ArgQT);
+ if (DLE) {
+ argDecl = *PI++;
+ ArgQT = argDecl->getType().getUnqualifiedType();
+ Args.add(RValue::get(Keys), ArgQT);
+ }
+ argDecl = *PI;
+ ArgQT = argDecl->getType().getUnqualifiedType();
+ llvm::Value *Count =
+ llvm::ConstantInt::get(CGM.getTypes().ConvertType(ArgQT), NumElements);
+ Args.add(RValue::get(Count), ArgQT);
+
+ // Generate a reference to the class pointer, which will be the receiver.
+ Selector Sel = MethodWithObjects->getSelector();
+ QualType ResultType = E->getType();
+ const ObjCObjectPointerType *InterfacePointerType
+ = ResultType->getAsObjCInterfacePointerType();
+ ObjCInterfaceDecl *Class
+ = InterfacePointerType->getObjectType()->getInterface();
+ CGObjCRuntime &Runtime = CGM.getObjCRuntime();
+ llvm::Value *Receiver = Runtime.GetClass(Builder, Class);
+
+ // Generate the message send.
+ RValue result
+ = Runtime.GenerateMessageSend(*this, ReturnValueSlot(),
+ MethodWithObjects->getResultType(),
+ Sel,
+ Receiver, Args, Class,
+ MethodWithObjects);
+ return Builder.CreateBitCast(result.getScalarVal(),
+ ConvertType(E->getType()));
+}
+
+llvm::Value *CodeGenFunction::EmitObjCArrayLiteral(const ObjCArrayLiteral *E) {
+ return EmitObjCCollectionLiteral(E, E->getArrayWithObjectsMethod());
+}
+
+llvm::Value *CodeGenFunction::EmitObjCDictionaryLiteral(
+ const ObjCDictionaryLiteral *E) {
+ return EmitObjCCollectionLiteral(E, E->getDictWithObjectsMethod());
+}
+
+/// Emit a selector.
+llvm::Value *CodeGenFunction::EmitObjCSelectorExpr(const ObjCSelectorExpr *E) {
+ // Untyped selector.
+ // Note that this implementation allows for non-constant strings to be passed
+ // as arguments to @selector(). Currently, the only thing preventing this
+ // behaviour is the type checking in the front end.
+ return CGM.getObjCRuntime().GetSelector(Builder, E->getSelector());
+}
+
+llvm::Value *CodeGenFunction::EmitObjCProtocolExpr(const ObjCProtocolExpr *E) {
+ // FIXME: This should pass the Decl not the name.
+ return CGM.getObjCRuntime().GenerateProtocolRef(Builder, E->getProtocol());
+}
+
+/// \brief Adjust the type of the result of an Objective-C message send
+/// expression when the method has a related result type.
+static RValue AdjustRelatedResultType(CodeGenFunction &CGF,
+ const Expr *E,
+ const ObjCMethodDecl *Method,
+ RValue Result) {
+ if (!Method)
+ return Result;
+
+ if (!Method->hasRelatedResultType() ||
+ CGF.getContext().hasSameType(E->getType(), Method->getResultType()) ||
+ !Result.isScalar())
+ return Result;
+
+ // We have applied a related result type. Cast the rvalue appropriately.
+ return RValue::get(CGF.Builder.CreateBitCast(Result.getScalarVal(),
+ CGF.ConvertType(E->getType())));
+}
+
+/// Decide whether to extend the lifetime of the receiver of a
+/// returns-inner-pointer message.
+static bool
+shouldExtendReceiverForInnerPointerMessage(const ObjCMessageExpr *message) {
+ switch (message->getReceiverKind()) {
+
+ // For a normal instance message, we should extend unless the
+ // receiver is loaded from a variable with precise lifetime.
+ case ObjCMessageExpr::Instance: {
+ const Expr *receiver = message->getInstanceReceiver();
+ const ImplicitCastExpr *ice = dyn_cast<ImplicitCastExpr>(receiver);
+ if (!ice || ice->getCastKind() != CK_LValueToRValue) return true;
+ receiver = ice->getSubExpr()->IgnoreParens();
+
+ // Only __strong variables.
+ if (receiver->getType().getObjCLifetime() != Qualifiers::OCL_Strong)
+ return true;
+
+ // All ivars and fields have precise lifetime.
+ if (isa<MemberExpr>(receiver) || isa<ObjCIvarRefExpr>(receiver))
+ return false;
+
+ // Otherwise, check for variables.
+ const DeclRefExpr *declRef = dyn_cast<DeclRefExpr>(ice->getSubExpr());
+ if (!declRef) return true;
+ const VarDecl *var = dyn_cast<VarDecl>(declRef->getDecl());
+ if (!var) return true;
+
+ // All variables have precise lifetime except local variables with
+ // automatic storage duration that aren't specially marked.
+ return (var->hasLocalStorage() &&
+ !var->hasAttr<ObjCPreciseLifetimeAttr>());
+ }
+
+ case ObjCMessageExpr::Class:
+ case ObjCMessageExpr::SuperClass:
+ // It's never necessary for class objects.
+ return false;
+
+ case ObjCMessageExpr::SuperInstance:
+ // We generally assume that 'self' lives throughout a method call.
+ return false;
+ }
+
+ llvm_unreachable("invalid receiver kind");
+}
+
+RValue CodeGenFunction::EmitObjCMessageExpr(const ObjCMessageExpr *E,
+ ReturnValueSlot Return) {
+ // Only the lookup mechanism and first two arguments of the method
+ // implementation vary between runtimes. We can get the receiver and
+ // arguments in generic code.
+
+ bool isDelegateInit = E->isDelegateInitCall();
+
+ const ObjCMethodDecl *method = E->getMethodDecl();
+
+ // We don't retain the receiver in delegate init calls, and this is
+ // safe because the receiver value is always loaded from 'self',
+ // which we zero out. We don't want to Block_copy block receivers,
+ // though.
+ bool retainSelf =
+ (!isDelegateInit &&
+ CGM.getLangOpts().ObjCAutoRefCount &&
+ method &&
+ method->hasAttr<NSConsumesSelfAttr>());
+
+ CGObjCRuntime &Runtime = CGM.getObjCRuntime();
+ bool isSuperMessage = false;
+ bool isClassMessage = false;
+ ObjCInterfaceDecl *OID = 0;
+ // Find the receiver
+ QualType ReceiverType;
+ llvm::Value *Receiver = 0;
+ switch (E->getReceiverKind()) {
+ case ObjCMessageExpr::Instance:
+ ReceiverType = E->getInstanceReceiver()->getType();
+ if (retainSelf) {
+ TryEmitResult ter = tryEmitARCRetainScalarExpr(*this,
+ E->getInstanceReceiver());
+ Receiver = ter.getPointer();
+ if (ter.getInt()) retainSelf = false;
+ } else
+ Receiver = EmitScalarExpr(E->getInstanceReceiver());
+ break;
+
+ case ObjCMessageExpr::Class: {
+ ReceiverType = E->getClassReceiver();
+ const ObjCObjectType *ObjTy = ReceiverType->getAs<ObjCObjectType>();
+ assert(ObjTy && "Invalid Objective-C class message send");
+ OID = ObjTy->getInterface();
+ assert(OID && "Invalid Objective-C class message send");
+ Receiver = Runtime.GetClass(Builder, OID);
+ isClassMessage = true;
+ break;
+ }
+
+ case ObjCMessageExpr::SuperInstance:
+ ReceiverType = E->getSuperType();
+ Receiver = LoadObjCSelf();
+ isSuperMessage = true;
+ break;
+
+ case ObjCMessageExpr::SuperClass:
+ ReceiverType = E->getSuperType();
+ Receiver = LoadObjCSelf();
+ isSuperMessage = true;
+ isClassMessage = true;
+ break;
+ }
+
+ if (retainSelf)
+ Receiver = EmitARCRetainNonBlock(Receiver);
+
+ // In ARC, we sometimes want to "extend the lifetime"
+ // (i.e. retain+autorelease) of receivers of returns-inner-pointer
+ // messages.
+ if (getLangOpts().ObjCAutoRefCount && method &&
+ method->hasAttr<ObjCReturnsInnerPointerAttr>() &&
+ shouldExtendReceiverForInnerPointerMessage(E))
+ Receiver = EmitARCRetainAutorelease(ReceiverType, Receiver);
+
+ QualType ResultType =
+ method ? method->getResultType() : E->getType();
+
+ CallArgList Args;
+ EmitCallArgs(Args, method, E->arg_begin(), E->arg_end());
+
+ // For delegate init calls in ARC, do an unsafe store of null into
+ // self. This represents the call taking direct ownership of that
+ // value. We have to do this after emitting the other call
+ // arguments because they might also reference self, but we don't
+ // have to worry about any of them modifying self because that would
+ // be an undefined read and write of an object in unordered
+ // expressions.
+ if (isDelegateInit) {
+ assert(getLangOpts().ObjCAutoRefCount &&
+ "delegate init calls should only be marked in ARC");
+
+ // Do an unsafe store of null into self.
+ llvm::Value *selfAddr =
+ LocalDeclMap[cast<ObjCMethodDecl>(CurCodeDecl)->getSelfDecl()];
+ assert(selfAddr && "no self entry for a delegate init call?");
+
+ Builder.CreateStore(getNullForVariable(selfAddr), selfAddr);
+ }
+
+ RValue result;
+ if (isSuperMessage) {
+ // super is only valid in an Objective-C method
+ const ObjCMethodDecl *OMD = cast<ObjCMethodDecl>(CurFuncDecl);
+ bool isCategoryImpl = isa<ObjCCategoryImplDecl>(OMD->getDeclContext());
+ result = Runtime.GenerateMessageSendSuper(*this, Return, ResultType,
+ E->getSelector(),
+ OMD->getClassInterface(),
+ isCategoryImpl,
+ Receiver,
+ isClassMessage,
+ Args,
+ method);
+ } else {
+ result = Runtime.GenerateMessageSend(*this, Return, ResultType,
+ E->getSelector(),
+ Receiver, Args, OID,
+ method);
+ }
+
+ // For delegate init calls in ARC, implicitly store the result of
+ // the call back into self. This takes ownership of the value.
+ if (isDelegateInit) {
+ llvm::Value *selfAddr =
+ LocalDeclMap[cast<ObjCMethodDecl>(CurCodeDecl)->getSelfDecl()];
+ llvm::Value *newSelf = result.getScalarVal();
+
+ // The delegate return type isn't necessarily a matching type; in
+ // fact, it's quite likely to be 'id'.
+ llvm::Type *selfTy =
+ cast<llvm::PointerType>(selfAddr->getType())->getElementType();
+ newSelf = Builder.CreateBitCast(newSelf, selfTy);
+
+ Builder.CreateStore(newSelf, selfAddr);
+ }
+
+ return AdjustRelatedResultType(*this, E, method, result);
+}
+
+namespace {
+struct FinishARCDealloc : EHScopeStack::Cleanup {
+ void Emit(CodeGenFunction &CGF, Flags flags) {
+ const ObjCMethodDecl *method = cast<ObjCMethodDecl>(CGF.CurCodeDecl);
+
+ const ObjCImplDecl *impl = cast<ObjCImplDecl>(method->getDeclContext());
+ const ObjCInterfaceDecl *iface = impl->getClassInterface();
+ if (!iface->getSuperClass()) return;
+
+ bool isCategory = isa<ObjCCategoryImplDecl>(impl);
+
+ // Call [super dealloc] if we have a superclass.
+ llvm::Value *self = CGF.LoadObjCSelf();
+
+ CallArgList args;
+ CGF.CGM.getObjCRuntime().GenerateMessageSendSuper(CGF, ReturnValueSlot(),
+ CGF.getContext().VoidTy,
+ method->getSelector(),
+ iface,
+ isCategory,
+ self,
+ /*is class msg*/ false,
+ args,
+ method);
+ }
+};
+}
+
+/// StartObjCMethod - Begin emission of an ObjCMethod. This generates
+/// the LLVM function and sets the other context used by
+/// CodeGenFunction.
+void CodeGenFunction::StartObjCMethod(const ObjCMethodDecl *OMD,
+ const ObjCContainerDecl *CD,
+ SourceLocation StartLoc) {
+ FunctionArgList args;
+ // Check if we should generate debug info for this method.
+ if (CGM.getModuleDebugInfo() && !OMD->hasAttr<NoDebugAttr>())
+ DebugInfo = CGM.getModuleDebugInfo();
+
+ llvm::Function *Fn = CGM.getObjCRuntime().GenerateMethod(OMD, CD);
+
+ const CGFunctionInfo &FI = CGM.getTypes().arrangeObjCMethodDeclaration(OMD);
+ CGM.SetInternalFunctionAttributes(OMD, Fn, FI);
+
+ args.push_back(OMD->getSelfDecl());
+ args.push_back(OMD->getCmdDecl());
+
+ for (ObjCMethodDecl::param_const_iterator PI = OMD->param_begin(),
+ E = OMD->param_end(); PI != E; ++PI)
+ args.push_back(*PI);
+
+ CurGD = OMD;
+
+ StartFunction(OMD, OMD->getResultType(), Fn, FI, args, StartLoc);
+
+ // In ARC, certain methods get an extra cleanup.
+ if (CGM.getLangOpts().ObjCAutoRefCount &&
+ OMD->isInstanceMethod() &&
+ OMD->getSelector().isUnarySelector()) {
+ const IdentifierInfo *ident =
+ OMD->getSelector().getIdentifierInfoForSlot(0);
+ if (ident->isStr("dealloc"))
+ EHStack.pushCleanup<FinishARCDealloc>(getARCCleanupKind());
+ }
+}
+
+static llvm::Value *emitARCRetainLoadOfScalar(CodeGenFunction &CGF,
+ LValue lvalue, QualType type);
+
+/// Generate an Objective-C method. An Objective-C method is a C function with
+/// its pointer, name, and types registered in the class struture.
+void CodeGenFunction::GenerateObjCMethod(const ObjCMethodDecl *OMD) {
+ StartObjCMethod(OMD, OMD->getClassInterface(), OMD->getLocStart());
+ EmitStmt(OMD->getBody());
+ FinishFunction(OMD->getBodyRBrace());
+}
+
+/// emitStructGetterCall - Call the runtime function to load a property
+/// into the return value slot.
+static void emitStructGetterCall(CodeGenFunction &CGF, ObjCIvarDecl *ivar,
+ bool isAtomic, bool hasStrong) {
+ ASTContext &Context = CGF.getContext();
+
+ llvm::Value *src =
+ CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(), CGF.LoadObjCSelf(),
+ ivar, 0).getAddress();
+
+ // objc_copyStruct (ReturnValue, &structIvar,
+ // sizeof (Type of Ivar), isAtomic, false);
+ CallArgList args;
+
+ llvm::Value *dest = CGF.Builder.CreateBitCast(CGF.ReturnValue, CGF.VoidPtrTy);
+ args.add(RValue::get(dest), Context.VoidPtrTy);
+
+ src = CGF.Builder.CreateBitCast(src, CGF.VoidPtrTy);
+ args.add(RValue::get(src), Context.VoidPtrTy);
+
+ CharUnits size = CGF.getContext().getTypeSizeInChars(ivar->getType());
+ args.add(RValue::get(CGF.CGM.getSize(size)), Context.getSizeType());
+ args.add(RValue::get(CGF.Builder.getInt1(isAtomic)), Context.BoolTy);
+ args.add(RValue::get(CGF.Builder.getInt1(hasStrong)), Context.BoolTy);
+
+ llvm::Value *fn = CGF.CGM.getObjCRuntime().GetGetStructFunction();
+ CGF.EmitCall(CGF.getTypes().arrangeFunctionCall(Context.VoidTy, args,
+ FunctionType::ExtInfo(),
+ RequiredArgs::All),
+ fn, ReturnValueSlot(), args);
+}
+
+/// Determine whether the given architecture supports unaligned atomic
+/// accesses. They don't have to be fast, just faster than a function
+/// call and a mutex.
+static bool hasUnalignedAtomics(llvm::Triple::ArchType arch) {
+ // FIXME: Allow unaligned atomic load/store on x86. (It is not
+ // currently supported by the backend.)
+ return 0;
+}
+
+/// Return the maximum size that permits atomic accesses for the given
+/// architecture.
+static CharUnits getMaxAtomicAccessSize(CodeGenModule &CGM,
+ llvm::Triple::ArchType arch) {
+ // ARM has 8-byte atomic accesses, but it's not clear whether we
+ // want to rely on them here.
+
+ // In the default case, just assume that any size up to a pointer is
+ // fine given adequate alignment.
+ return CharUnits::fromQuantity(CGM.PointerSizeInBytes);
+}
+
+namespace {
+ class PropertyImplStrategy {
+ public:
+ enum StrategyKind {
+ /// The 'native' strategy is to use the architecture's provided
+ /// reads and writes.
+ Native,
+
+ /// Use objc_setProperty and objc_getProperty.
+ GetSetProperty,
+
+ /// Use objc_setProperty for the setter, but use expression
+ /// evaluation for the getter.
+ SetPropertyAndExpressionGet,
+
+ /// Use objc_copyStruct.
+ CopyStruct,
+
+ /// The 'expression' strategy is to emit normal assignment or
+ /// lvalue-to-rvalue expressions.
+ Expression
+ };
+
+ StrategyKind getKind() const { return StrategyKind(Kind); }
+
+ bool hasStrongMember() const { return HasStrong; }
+ bool isAtomic() const { return IsAtomic; }
+ bool isCopy() const { return IsCopy; }
+
+ CharUnits getIvarSize() const { return IvarSize; }
+ CharUnits getIvarAlignment() const { return IvarAlignment; }
+
+ PropertyImplStrategy(CodeGenModule &CGM,
+ const ObjCPropertyImplDecl *propImpl);
+
+ private:
+ unsigned Kind : 8;
+ unsigned IsAtomic : 1;
+ unsigned IsCopy : 1;
+ unsigned HasStrong : 1;
+
+ CharUnits IvarSize;
+ CharUnits IvarAlignment;
+ };
+}
+
+/// Pick an implementation strategy for the the given property synthesis.
+PropertyImplStrategy::PropertyImplStrategy(CodeGenModule &CGM,
+ const ObjCPropertyImplDecl *propImpl) {
+ const ObjCPropertyDecl *prop = propImpl->getPropertyDecl();
+ ObjCPropertyDecl::SetterKind setterKind = prop->getSetterKind();
+
+ IsCopy = (setterKind == ObjCPropertyDecl::Copy);
+ IsAtomic = prop->isAtomic();
+ HasStrong = false; // doesn't matter here.
+
+ // Evaluate the ivar's size and alignment.
+ ObjCIvarDecl *ivar = propImpl->getPropertyIvarDecl();
+ QualType ivarType = ivar->getType();
+ llvm::tie(IvarSize, IvarAlignment)
+ = CGM.getContext().getTypeInfoInChars(ivarType);
+
+ // If we have a copy property, we always have to use getProperty/setProperty.
+ // TODO: we could actually use setProperty and an expression for non-atomics.
+ if (IsCopy) {
+ Kind = GetSetProperty;
+ return;
+ }
+
+ // Handle retain.
+ if (setterKind == ObjCPropertyDecl::Retain) {
+ // In GC-only, there's nothing special that needs to be done.
+ if (CGM.getLangOpts().getGC() == LangOptions::GCOnly) {
+ // fallthrough
+
+ // In ARC, if the property is non-atomic, use expression emission,
+ // which translates to objc_storeStrong. This isn't required, but
+ // it's slightly nicer.
+ } else if (CGM.getLangOpts().ObjCAutoRefCount && !IsAtomic) {
+ Kind = Expression;
+ return;
+
+ // Otherwise, we need to at least use setProperty. However, if
+ // the property isn't atomic, we can use normal expression
+ // emission for the getter.
+ } else if (!IsAtomic) {
+ Kind = SetPropertyAndExpressionGet;
+ return;
+
+ // Otherwise, we have to use both setProperty and getProperty.
+ } else {
+ Kind = GetSetProperty;
+ return;
+ }
+ }
+
+ // If we're not atomic, just use expression accesses.
+ if (!IsAtomic) {
+ Kind = Expression;
+ return;
+ }
+
+ // Properties on bitfield ivars need to be emitted using expression
+ // accesses even if they're nominally atomic.
+ if (ivar->isBitField()) {
+ Kind = Expression;
+ return;
+ }
+
+ // GC-qualified or ARC-qualified ivars need to be emitted as
+ // expressions. This actually works out to being atomic anyway,
+ // except for ARC __strong, but that should trigger the above code.
+ if (ivarType.hasNonTrivialObjCLifetime() ||
+ (CGM.getLangOpts().getGC() &&
+ CGM.getContext().getObjCGCAttrKind(ivarType))) {
+ Kind = Expression;
+ return;
+ }
+
+ // Compute whether the ivar has strong members.
+ if (CGM.getLangOpts().getGC())
+ if (const RecordType *recordType = ivarType->getAs<RecordType>())
+ HasStrong = recordType->getDecl()->hasObjectMember();
+
+ // We can never access structs with object members with a native
+ // access, because we need to use write barriers. This is what
+ // objc_copyStruct is for.
+ if (HasStrong) {
+ Kind = CopyStruct;
+ return;
+ }
+
+ // Otherwise, this is target-dependent and based on the size and
+ // alignment of the ivar.
+
+ // If the size of the ivar is not a power of two, give up. We don't
+ // want to get into the business of doing compare-and-swaps.
+ if (!IvarSize.isPowerOfTwo()) {
+ Kind = CopyStruct;
+ return;
+ }
+
+ llvm::Triple::ArchType arch =
+ CGM.getContext().getTargetInfo().getTriple().getArch();
+
+ // Most architectures require memory to fit within a single cache
+ // line, so the alignment has to be at least the size of the access.
+ // Otherwise we have to grab a lock.
+ if (IvarAlignment < IvarSize && !hasUnalignedAtomics(arch)) {
+ Kind = CopyStruct;
+ return;
+ }
+
+ // If the ivar's size exceeds the architecture's maximum atomic
+ // access size, we have to use CopyStruct.
+ if (IvarSize > getMaxAtomicAccessSize(CGM, arch)) {
+ Kind = CopyStruct;
+ return;
+ }
+
+ // Otherwise, we can use native loads and stores.
+ Kind = Native;
+}
+
+/// GenerateObjCGetter - Generate an Objective-C property getter
+/// function. The given Decl must be an ObjCImplementationDecl. @synthesize
+/// is illegal within a category.
+void CodeGenFunction::GenerateObjCGetter(ObjCImplementationDecl *IMP,
+ const ObjCPropertyImplDecl *PID) {
+ llvm::Constant *AtomicHelperFn =
+ GenerateObjCAtomicGetterCopyHelperFunction(PID);
+ const ObjCPropertyDecl *PD = PID->getPropertyDecl();
+ ObjCMethodDecl *OMD = PD->getGetterMethodDecl();
+ assert(OMD && "Invalid call to generate getter (empty method)");
+ StartObjCMethod(OMD, IMP->getClassInterface(), OMD->getLocStart());
+
+ generateObjCGetterBody(IMP, PID, AtomicHelperFn);
+
+ FinishFunction();
+}
+
+static bool hasTrivialGetExpr(const ObjCPropertyImplDecl *propImpl) {
+ const Expr *getter = propImpl->getGetterCXXConstructor();
+ if (!getter) return true;
+
+ // Sema only makes only of these when the ivar has a C++ class type,
+ // so the form is pretty constrained.
+
+ // If the property has a reference type, we might just be binding a
+ // reference, in which case the result will be a gl-value. We should
+ // treat this as a non-trivial operation.
+ if (getter->isGLValue())
+ return false;
+
+ // If we selected a trivial copy-constructor, we're okay.
+ if (const CXXConstructExpr *construct = dyn_cast<CXXConstructExpr>(getter))
+ return (construct->getConstructor()->isTrivial());
+
+ // The constructor might require cleanups (in which case it's never
+ // trivial).
+ assert(isa<ExprWithCleanups>(getter));
+ return false;
+}
+
+/// emitCPPObjectAtomicGetterCall - Call the runtime function to
+/// copy the ivar into the resturn slot.
+static void emitCPPObjectAtomicGetterCall(CodeGenFunction &CGF,
+ llvm::Value *returnAddr,
+ ObjCIvarDecl *ivar,
+ llvm::Constant *AtomicHelperFn) {
+ // objc_copyCppObjectAtomic (&returnSlot, &CppObjectIvar,
+ // AtomicHelperFn);
+ CallArgList args;
+
+ // The 1st argument is the return Slot.
+ args.add(RValue::get(returnAddr), CGF.getContext().VoidPtrTy);
+
+ // The 2nd argument is the address of the ivar.
+ llvm::Value *ivarAddr =
+ CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(),
+ CGF.LoadObjCSelf(), ivar, 0).getAddress();
+ ivarAddr = CGF.Builder.CreateBitCast(ivarAddr, CGF.Int8PtrTy);
+ args.add(RValue::get(ivarAddr), CGF.getContext().VoidPtrTy);
+
+ // Third argument is the helper function.
+ args.add(RValue::get(AtomicHelperFn), CGF.getContext().VoidPtrTy);
+
+ llvm::Value *copyCppAtomicObjectFn =
+ CGF.CGM.getObjCRuntime().GetCppAtomicObjectFunction();
+ CGF.EmitCall(CGF.getTypes().arrangeFunctionCall(CGF.getContext().VoidTy, args,
+ FunctionType::ExtInfo(),
+ RequiredArgs::All),
+ copyCppAtomicObjectFn, ReturnValueSlot(), args);
+}
+
+void
+CodeGenFunction::generateObjCGetterBody(const ObjCImplementationDecl *classImpl,
+ const ObjCPropertyImplDecl *propImpl,
+ llvm::Constant *AtomicHelperFn) {
+ // If there's a non-trivial 'get' expression, we just have to emit that.
+ if (!hasTrivialGetExpr(propImpl)) {
+ if (!AtomicHelperFn) {
+ ReturnStmt ret(SourceLocation(), propImpl->getGetterCXXConstructor(),
+ /*nrvo*/ 0);
+ EmitReturnStmt(ret);
+ }
+ else {
+ ObjCIvarDecl *ivar = propImpl->getPropertyIvarDecl();
+ emitCPPObjectAtomicGetterCall(*this, ReturnValue,
+ ivar, AtomicHelperFn);
+ }
+ return;
+ }
+
+ const ObjCPropertyDecl *prop = propImpl->getPropertyDecl();
+ QualType propType = prop->getType();
+ ObjCMethodDecl *getterMethod = prop->getGetterMethodDecl();
+
+ ObjCIvarDecl *ivar = propImpl->getPropertyIvarDecl();
+
+ // Pick an implementation strategy.
+ PropertyImplStrategy strategy(CGM, propImpl);
+ switch (strategy.getKind()) {
+ case PropertyImplStrategy::Native: {
+ LValue LV = EmitLValueForIvar(TypeOfSelfObject(), LoadObjCSelf(), ivar, 0);
+
+ // Currently, all atomic accesses have to be through integer
+ // types, so there's no point in trying to pick a prettier type.
+ llvm::Type *bitcastType =
+ llvm::Type::getIntNTy(getLLVMContext(),
+ getContext().toBits(strategy.getIvarSize()));
+ bitcastType = bitcastType->getPointerTo(); // addrspace 0 okay
+
+ // Perform an atomic load. This does not impose ordering constraints.
+ llvm::Value *ivarAddr = LV.getAddress();
+ ivarAddr = Builder.CreateBitCast(ivarAddr, bitcastType);
+ llvm::LoadInst *load = Builder.CreateLoad(ivarAddr, "load");
+ load->setAlignment(strategy.getIvarAlignment().getQuantity());
+ load->setAtomic(llvm::Unordered);
+
+ // Store that value into the return address. Doing this with a
+ // bitcast is likely to produce some pretty ugly IR, but it's not
+ // the *most* terrible thing in the world.
+ Builder.CreateStore(load, Builder.CreateBitCast(ReturnValue, bitcastType));
+
+ // Make sure we don't do an autorelease.
+ AutoreleaseResult = false;
+ return;
+ }
+
+ case PropertyImplStrategy::GetSetProperty: {
+ llvm::Value *getPropertyFn =
+ CGM.getObjCRuntime().GetPropertyGetFunction();
+ if (!getPropertyFn) {
+ CGM.ErrorUnsupported(propImpl, "Obj-C getter requiring atomic copy");
+ return;
+ }
+
+ // Return (ivar-type) objc_getProperty((id) self, _cmd, offset, true).
+ // FIXME: Can't this be simpler? This might even be worse than the
+ // corresponding gcc code.
+ llvm::Value *cmd =
+ Builder.CreateLoad(LocalDeclMap[getterMethod->getCmdDecl()], "cmd");
+ llvm::Value *self = Builder.CreateBitCast(LoadObjCSelf(), VoidPtrTy);
+ llvm::Value *ivarOffset =
+ EmitIvarOffset(classImpl->getClassInterface(), ivar);
+
+ CallArgList args;
+ args.add(RValue::get(self), getContext().getObjCIdType());
+ args.add(RValue::get(cmd), getContext().getObjCSelType());
+ args.add(RValue::get(ivarOffset), getContext().getPointerDiffType());
+ args.add(RValue::get(Builder.getInt1(strategy.isAtomic())),
+ getContext().BoolTy);
+
+ // FIXME: We shouldn't need to get the function info here, the
+ // runtime already should have computed it to build the function.
+ RValue RV = EmitCall(getTypes().arrangeFunctionCall(propType, args,
+ FunctionType::ExtInfo(),
+ RequiredArgs::All),
+ getPropertyFn, ReturnValueSlot(), args);
+
+ // We need to fix the type here. Ivars with copy & retain are
+ // always objects so we don't need to worry about complex or
+ // aggregates.
+ RV = RValue::get(Builder.CreateBitCast(RV.getScalarVal(),
+ getTypes().ConvertType(propType)));
+
+ EmitReturnOfRValue(RV, propType);
+
+ // objc_getProperty does an autorelease, so we should suppress ours.
+ AutoreleaseResult = false;
+
+ return;
+ }
+
+ case PropertyImplStrategy::CopyStruct:
+ emitStructGetterCall(*this, ivar, strategy.isAtomic(),
+ strategy.hasStrongMember());
+ return;
+
+ case PropertyImplStrategy::Expression:
+ case PropertyImplStrategy::SetPropertyAndExpressionGet: {
+ LValue LV = EmitLValueForIvar(TypeOfSelfObject(), LoadObjCSelf(), ivar, 0);
+
+ QualType ivarType = ivar->getType();
+ if (ivarType->isAnyComplexType()) {
+ ComplexPairTy pair = LoadComplexFromAddr(LV.getAddress(),
+ LV.isVolatileQualified());
+ StoreComplexToAddr(pair, ReturnValue, LV.isVolatileQualified());
+ } else if (hasAggregateLLVMType(ivarType)) {
+ // The return value slot is guaranteed to not be aliased, but
+ // that's not necessarily the same as "on the stack", so
+ // we still potentially need objc_memmove_collectable.
+ EmitAggregateCopy(ReturnValue, LV.getAddress(), ivarType);
+ } else {
+ llvm::Value *value;
+ if (propType->isReferenceType()) {
+ value = LV.getAddress();
+ } else {
+ // We want to load and autoreleaseReturnValue ARC __weak ivars.
+ if (LV.getQuals().getObjCLifetime() == Qualifiers::OCL_Weak) {
+ value = emitARCRetainLoadOfScalar(*this, LV, ivarType);
+
+ // Otherwise we want to do a simple load, suppressing the
+ // final autorelease.
+ } else {
+ value = EmitLoadOfLValue(LV).getScalarVal();
+ AutoreleaseResult = false;
+ }
+
+ value = Builder.CreateBitCast(value, ConvertType(propType));
+ }
+
+ EmitReturnOfRValue(RValue::get(value), propType);
+ }
+ return;
+ }
+
+ }
+ llvm_unreachable("bad @property implementation strategy!");
+}
+
+/// emitStructSetterCall - Call the runtime function to store the value
+/// from the first formal parameter into the given ivar.
+static void emitStructSetterCall(CodeGenFunction &CGF, ObjCMethodDecl *OMD,
+ ObjCIvarDecl *ivar) {
+ // objc_copyStruct (&structIvar, &Arg,
+ // sizeof (struct something), true, false);
+ CallArgList args;
+
+ // The first argument is the address of the ivar.
+ llvm::Value *ivarAddr = CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(),
+ CGF.LoadObjCSelf(), ivar, 0)
+ .getAddress();
+ ivarAddr = CGF.Builder.CreateBitCast(ivarAddr, CGF.Int8PtrTy);
+ args.add(RValue::get(ivarAddr), CGF.getContext().VoidPtrTy);
+
+ // The second argument is the address of the parameter variable.
+ ParmVarDecl *argVar = *OMD->param_begin();
+ DeclRefExpr argRef(argVar, false, argVar->getType().getNonReferenceType(),
+ VK_LValue, SourceLocation());
+ llvm::Value *argAddr = CGF.EmitLValue(&argRef).getAddress();
+ argAddr = CGF.Builder.CreateBitCast(argAddr, CGF.Int8PtrTy);
+ args.add(RValue::get(argAddr), CGF.getContext().VoidPtrTy);
+
+ // The third argument is the sizeof the type.
+ llvm::Value *size =
+ CGF.CGM.getSize(CGF.getContext().getTypeSizeInChars(ivar->getType()));
+ args.add(RValue::get(size), CGF.getContext().getSizeType());
+
+ // The fourth argument is the 'isAtomic' flag.
+ args.add(RValue::get(CGF.Builder.getTrue()), CGF.getContext().BoolTy);
+
+ // The fifth argument is the 'hasStrong' flag.
+ // FIXME: should this really always be false?
+ args.add(RValue::get(CGF.Builder.getFalse()), CGF.getContext().BoolTy);
+
+ llvm::Value *copyStructFn = CGF.CGM.getObjCRuntime().GetSetStructFunction();
+ CGF.EmitCall(CGF.getTypes().arrangeFunctionCall(CGF.getContext().VoidTy, args,
+ FunctionType::ExtInfo(),
+ RequiredArgs::All),
+ copyStructFn, ReturnValueSlot(), args);
+}
+
+/// emitCPPObjectAtomicSetterCall - Call the runtime function to store
+/// the value from the first formal parameter into the given ivar, using
+/// the Cpp API for atomic Cpp objects with non-trivial copy assignment.
+static void emitCPPObjectAtomicSetterCall(CodeGenFunction &CGF,
+ ObjCMethodDecl *OMD,
+ ObjCIvarDecl *ivar,
+ llvm::Constant *AtomicHelperFn) {
+ // objc_copyCppObjectAtomic (&CppObjectIvar, &Arg,
+ // AtomicHelperFn);
+ CallArgList args;
+
+ // The first argument is the address of the ivar.
+ llvm::Value *ivarAddr =
+ CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(),
+ CGF.LoadObjCSelf(), ivar, 0).getAddress();
+ ivarAddr = CGF.Builder.CreateBitCast(ivarAddr, CGF.Int8PtrTy);
+ args.add(RValue::get(ivarAddr), CGF.getContext().VoidPtrTy);
+
+ // The second argument is the address of the parameter variable.
+ ParmVarDecl *argVar = *OMD->param_begin();
+ DeclRefExpr argRef(argVar, false, argVar->getType().getNonReferenceType(),
+ VK_LValue, SourceLocation());
+ llvm::Value *argAddr = CGF.EmitLValue(&argRef).getAddress();
+ argAddr = CGF.Builder.CreateBitCast(argAddr, CGF.Int8PtrTy);
+ args.add(RValue::get(argAddr), CGF.getContext().VoidPtrTy);
+
+ // Third argument is the helper function.
+ args.add(RValue::get(AtomicHelperFn), CGF.getContext().VoidPtrTy);
+
+ llvm::Value *copyCppAtomicObjectFn =
+ CGF.CGM.getObjCRuntime().GetCppAtomicObjectFunction();
+ CGF.EmitCall(CGF.getTypes().arrangeFunctionCall(CGF.getContext().VoidTy, args,
+ FunctionType::ExtInfo(),
+ RequiredArgs::All),
+ copyCppAtomicObjectFn, ReturnValueSlot(), args);
+
+
+}
+
+
+static bool hasTrivialSetExpr(const ObjCPropertyImplDecl *PID) {
+ Expr *setter = PID->getSetterCXXAssignment();
+ if (!setter) return true;
+
+ // Sema only makes only of these when the ivar has a C++ class type,
+ // so the form is pretty constrained.
+
+ // An operator call is trivial if the function it calls is trivial.
+ // This also implies that there's nothing non-trivial going on with
+ // the arguments, because operator= can only be trivial if it's a
+ // synthesized assignment operator and therefore both parameters are
+ // references.
+ if (CallExpr *call = dyn_cast<CallExpr>(setter)) {
+ if (const FunctionDecl *callee
+ = dyn_cast_or_null<FunctionDecl>(call->getCalleeDecl()))
+ if (callee->isTrivial())
+ return true;
+ return false;
+ }
+
+ assert(isa<ExprWithCleanups>(setter));
+ return false;
+}
+
+static bool UseOptimizedSetter(CodeGenModule &CGM) {
+ if (CGM.getLangOpts().getGC() != LangOptions::NonGC)
+ return false;
+ const TargetInfo &Target = CGM.getContext().getTargetInfo();
+
+ if (Target.getPlatformName() != "macosx")
+ return false;
+
+ return Target.getPlatformMinVersion() >= VersionTuple(10, 8);
+}
+
+void
+CodeGenFunction::generateObjCSetterBody(const ObjCImplementationDecl *classImpl,
+ const ObjCPropertyImplDecl *propImpl,
+ llvm::Constant *AtomicHelperFn) {
+ const ObjCPropertyDecl *prop = propImpl->getPropertyDecl();
+ ObjCIvarDecl *ivar = propImpl->getPropertyIvarDecl();
+ ObjCMethodDecl *setterMethod = prop->getSetterMethodDecl();
+
+ // Just use the setter expression if Sema gave us one and it's
+ // non-trivial.
+ if (!hasTrivialSetExpr(propImpl)) {
+ if (!AtomicHelperFn)
+ // If non-atomic, assignment is called directly.
+ EmitStmt(propImpl->getSetterCXXAssignment());
+ else
+ // If atomic, assignment is called via a locking api.
+ emitCPPObjectAtomicSetterCall(*this, setterMethod, ivar,
+ AtomicHelperFn);
+ return;
+ }
+
+ PropertyImplStrategy strategy(CGM, propImpl);
+ switch (strategy.getKind()) {
+ case PropertyImplStrategy::Native: {
+ llvm::Value *argAddr = LocalDeclMap[*setterMethod->param_begin()];
+
+ LValue ivarLValue =
+ EmitLValueForIvar(TypeOfSelfObject(), LoadObjCSelf(), ivar, /*quals*/ 0);
+ llvm::Value *ivarAddr = ivarLValue.getAddress();
+
+ // Currently, all atomic accesses have to be through integer
+ // types, so there's no point in trying to pick a prettier type.
+ llvm::Type *bitcastType =
+ llvm::Type::getIntNTy(getLLVMContext(),
+ getContext().toBits(strategy.getIvarSize()));
+ bitcastType = bitcastType->getPointerTo(); // addrspace 0 okay
+
+ // Cast both arguments to the chosen operation type.
+ argAddr = Builder.CreateBitCast(argAddr, bitcastType);
+ ivarAddr = Builder.CreateBitCast(ivarAddr, bitcastType);
+
+ // This bitcast load is likely to cause some nasty IR.
+ llvm::Value *load = Builder.CreateLoad(argAddr);
+
+ // Perform an atomic store. There are no memory ordering requirements.
+ llvm::StoreInst *store = Builder.CreateStore(load, ivarAddr);
+ store->setAlignment(strategy.getIvarAlignment().getQuantity());
+ store->setAtomic(llvm::Unordered);
+ return;
+ }
+
+ case PropertyImplStrategy::GetSetProperty:
+ case PropertyImplStrategy::SetPropertyAndExpressionGet: {
+
+ llvm::Value *setOptimizedPropertyFn = 0;
+ llvm::Value *setPropertyFn = 0;
+ if (UseOptimizedSetter(CGM)) {
+ // 10.8 code and GC is off
+ setOptimizedPropertyFn =
+ CGM.getObjCRuntime()
+ .GetOptimizedPropertySetFunction(strategy.isAtomic(),
+ strategy.isCopy());
+ if (!setOptimizedPropertyFn) {
+ CGM.ErrorUnsupported(propImpl, "Obj-C optimized setter - NYI");
+ return;
+ }
+ }
+ else {
+ setPropertyFn = CGM.getObjCRuntime().GetPropertySetFunction();
+ if (!setPropertyFn) {
+ CGM.ErrorUnsupported(propImpl, "Obj-C setter requiring atomic copy");
+ return;
+ }
+ }
+
+ // Emit objc_setProperty((id) self, _cmd, offset, arg,
+ // <is-atomic>, <is-copy>).
+ llvm::Value *cmd =
+ Builder.CreateLoad(LocalDeclMap[setterMethod->getCmdDecl()]);
+ llvm::Value *self =
+ Builder.CreateBitCast(LoadObjCSelf(), VoidPtrTy);
+ llvm::Value *ivarOffset =
+ EmitIvarOffset(classImpl->getClassInterface(), ivar);
+ llvm::Value *arg = LocalDeclMap[*setterMethod->param_begin()];
+ arg = Builder.CreateBitCast(Builder.CreateLoad(arg, "arg"), VoidPtrTy);
+
+ CallArgList args;
+ args.add(RValue::get(self), getContext().getObjCIdType());
+ args.add(RValue::get(cmd), getContext().getObjCSelType());
+ if (setOptimizedPropertyFn) {
+ args.add(RValue::get(arg), getContext().getObjCIdType());
+ args.add(RValue::get(ivarOffset), getContext().getPointerDiffType());
+ EmitCall(getTypes().arrangeFunctionCall(getContext().VoidTy, args,
+ FunctionType::ExtInfo(),
+ RequiredArgs::All),
+ setOptimizedPropertyFn, ReturnValueSlot(), args);
+ } else {
+ args.add(RValue::get(ivarOffset), getContext().getPointerDiffType());
+ args.add(RValue::get(arg), getContext().getObjCIdType());
+ args.add(RValue::get(Builder.getInt1(strategy.isAtomic())),
+ getContext().BoolTy);
+ args.add(RValue::get(Builder.getInt1(strategy.isCopy())),
+ getContext().BoolTy);
+ // FIXME: We shouldn't need to get the function info here, the runtime
+ // already should have computed it to build the function.
+ EmitCall(getTypes().arrangeFunctionCall(getContext().VoidTy, args,
+ FunctionType::ExtInfo(),
+ RequiredArgs::All),
+ setPropertyFn, ReturnValueSlot(), args);
+ }
+
+ return;
+ }
+
+ case PropertyImplStrategy::CopyStruct:
+ emitStructSetterCall(*this, setterMethod, ivar);
+ return;
+
+ case PropertyImplStrategy::Expression:
+ break;
+ }
+
+ // Otherwise, fake up some ASTs and emit a normal assignment.
+ ValueDecl *selfDecl = setterMethod->getSelfDecl();
+ DeclRefExpr self(selfDecl, false, selfDecl->getType(),
+ VK_LValue, SourceLocation());
+ ImplicitCastExpr selfLoad(ImplicitCastExpr::OnStack,
+ selfDecl->getType(), CK_LValueToRValue, &self,
+ VK_RValue);
+ ObjCIvarRefExpr ivarRef(ivar, ivar->getType().getNonReferenceType(),
+ SourceLocation(), &selfLoad, true, true);
+
+ ParmVarDecl *argDecl = *setterMethod->param_begin();
+ QualType argType = argDecl->getType().getNonReferenceType();
+ DeclRefExpr arg(argDecl, false, argType, VK_LValue, SourceLocation());
+ ImplicitCastExpr argLoad(ImplicitCastExpr::OnStack,
+ argType.getUnqualifiedType(), CK_LValueToRValue,
+ &arg, VK_RValue);
+
+ // The property type can differ from the ivar type in some situations with
+ // Objective-C pointer types, we can always bit cast the RHS in these cases.
+ // The following absurdity is just to ensure well-formed IR.
+ CastKind argCK = CK_NoOp;
+ if (ivarRef.getType()->isObjCObjectPointerType()) {
+ if (argLoad.getType()->isObjCObjectPointerType())
+ argCK = CK_BitCast;
+ else if (argLoad.getType()->isBlockPointerType())
+ argCK = CK_BlockPointerToObjCPointerCast;
+ else
+ argCK = CK_CPointerToObjCPointerCast;
+ } else if (ivarRef.getType()->isBlockPointerType()) {
+ if (argLoad.getType()->isBlockPointerType())
+ argCK = CK_BitCast;
+ else
+ argCK = CK_AnyPointerToBlockPointerCast;
+ } else if (ivarRef.getType()->isPointerType()) {
+ argCK = CK_BitCast;
+ }
+ ImplicitCastExpr argCast(ImplicitCastExpr::OnStack,
+ ivarRef.getType(), argCK, &argLoad,
+ VK_RValue);
+ Expr *finalArg = &argLoad;
+ if (!getContext().hasSameUnqualifiedType(ivarRef.getType(),
+ argLoad.getType()))
+ finalArg = &argCast;
+
+
+ BinaryOperator assign(&ivarRef, finalArg, BO_Assign,
+ ivarRef.getType(), VK_RValue, OK_Ordinary,
+ SourceLocation());
+ EmitStmt(&assign);
+}
+
+/// GenerateObjCSetter - Generate an Objective-C property setter
+/// function. The given Decl must be an ObjCImplementationDecl. @synthesize
+/// is illegal within a category.
+void CodeGenFunction::GenerateObjCSetter(ObjCImplementationDecl *IMP,
+ const ObjCPropertyImplDecl *PID) {
+ llvm::Constant *AtomicHelperFn =
+ GenerateObjCAtomicSetterCopyHelperFunction(PID);
+ const ObjCPropertyDecl *PD = PID->getPropertyDecl();
+ ObjCMethodDecl *OMD = PD->getSetterMethodDecl();
+ assert(OMD && "Invalid call to generate setter (empty method)");
+ StartObjCMethod(OMD, IMP->getClassInterface(), OMD->getLocStart());
+
+ generateObjCSetterBody(IMP, PID, AtomicHelperFn);
+
+ FinishFunction();
+}
+
+namespace {
+ struct DestroyIvar : EHScopeStack::Cleanup {
+ private:
+ llvm::Value *addr;
+ const ObjCIvarDecl *ivar;
+ CodeGenFunction::Destroyer *destroyer;
+ bool useEHCleanupForArray;
+ public:
+ DestroyIvar(llvm::Value *addr, const ObjCIvarDecl *ivar,
+ CodeGenFunction::Destroyer *destroyer,
+ bool useEHCleanupForArray)
+ : addr(addr), ivar(ivar), destroyer(destroyer),
+ useEHCleanupForArray(useEHCleanupForArray) {}
+
+ void Emit(CodeGenFunction &CGF, Flags flags) {
+ LValue lvalue
+ = CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(), addr, ivar, /*CVR*/ 0);
+ CGF.emitDestroy(lvalue.getAddress(), ivar->getType(), destroyer,
+ flags.isForNormalCleanup() && useEHCleanupForArray);
+ }
+ };
+}
+
+/// Like CodeGenFunction::destroyARCStrong, but do it with a call.
+static void destroyARCStrongWithStore(CodeGenFunction &CGF,
+ llvm::Value *addr,
+ QualType type) {
+ llvm::Value *null = getNullForVariable(addr);
+ CGF.EmitARCStoreStrongCall(addr, null, /*ignored*/ true);
+}
+
+static void emitCXXDestructMethod(CodeGenFunction &CGF,
+ ObjCImplementationDecl *impl) {
+ CodeGenFunction::RunCleanupsScope scope(CGF);
+
+ llvm::Value *self = CGF.LoadObjCSelf();
+
+ const ObjCInterfaceDecl *iface = impl->getClassInterface();
+ for (const ObjCIvarDecl *ivar = iface->all_declared_ivar_begin();
+ ivar; ivar = ivar->getNextIvar()) {
+ QualType type = ivar->getType();
+
+ // Check whether the ivar is a destructible type.
+ QualType::DestructionKind dtorKind = type.isDestructedType();
+ if (!dtorKind) continue;
+
+ CodeGenFunction::Destroyer *destroyer = 0;
+
+ // Use a call to objc_storeStrong to destroy strong ivars, for the
+ // general benefit of the tools.
+ if (dtorKind == QualType::DK_objc_strong_lifetime) {
+ destroyer = destroyARCStrongWithStore;
+
+ // Otherwise use the default for the destruction kind.
+ } else {
+ destroyer = CGF.getDestroyer(dtorKind);
+ }
+
+ CleanupKind cleanupKind = CGF.getCleanupKind(dtorKind);
+
+ CGF.EHStack.pushCleanup<DestroyIvar>(cleanupKind, self, ivar, destroyer,
+ cleanupKind & EHCleanup);
+ }
+
+ assert(scope.requiresCleanups() && "nothing to do in .cxx_destruct?");
+}
+
+void CodeGenFunction::GenerateObjCCtorDtorMethod(ObjCImplementationDecl *IMP,
+ ObjCMethodDecl *MD,
+ bool ctor) {
+ MD->createImplicitParams(CGM.getContext(), IMP->getClassInterface());
+ StartObjCMethod(MD, IMP->getClassInterface(), MD->getLocStart());
+
+ // Emit .cxx_construct.
+ if (ctor) {
+ // Suppress the final autorelease in ARC.
+ AutoreleaseResult = false;
+
+ SmallVector<CXXCtorInitializer *, 8> IvarInitializers;
+ for (ObjCImplementationDecl::init_const_iterator B = IMP->init_begin(),
+ E = IMP->init_end(); B != E; ++B) {
+ CXXCtorInitializer *IvarInit = (*B);
+ FieldDecl *Field = IvarInit->getAnyMember();
+ ObjCIvarDecl *Ivar = cast<ObjCIvarDecl>(Field);
+ LValue LV = EmitLValueForIvar(TypeOfSelfObject(),
+ LoadObjCSelf(), Ivar, 0);
+ EmitAggExpr(IvarInit->getInit(),
+ AggValueSlot::forLValue(LV, AggValueSlot::IsDestructed,
+ AggValueSlot::DoesNotNeedGCBarriers,
+ AggValueSlot::IsNotAliased));
+ }
+ // constructor returns 'self'.
+ CodeGenTypes &Types = CGM.getTypes();
+ QualType IdTy(CGM.getContext().getObjCIdType());
+ llvm::Value *SelfAsId =
+ Builder.CreateBitCast(LoadObjCSelf(), Types.ConvertType(IdTy));
+ EmitReturnOfRValue(RValue::get(SelfAsId), IdTy);
+
+ // Emit .cxx_destruct.
+ } else {
+ emitCXXDestructMethod(*this, IMP);
+ }
+ FinishFunction();
+}
+
+bool CodeGenFunction::IndirectObjCSetterArg(const CGFunctionInfo &FI) {
+ CGFunctionInfo::const_arg_iterator it = FI.arg_begin();
+ it++; it++;
+ const ABIArgInfo &AI = it->info;
+ // FIXME. Is this sufficient check?
+ return (AI.getKind() == ABIArgInfo::Indirect);
+}
+
+bool CodeGenFunction::IvarTypeWithAggrGCObjects(QualType Ty) {
+ if (CGM.getLangOpts().getGC() == LangOptions::NonGC)
+ return false;
+ if (const RecordType *FDTTy = Ty.getTypePtr()->getAs<RecordType>())
+ return FDTTy->getDecl()->hasObjectMember();
+ return false;
+}
+
+llvm::Value *CodeGenFunction::LoadObjCSelf() {
+ const ObjCMethodDecl *OMD = cast<ObjCMethodDecl>(CurFuncDecl);
+ return Builder.CreateLoad(LocalDeclMap[OMD->getSelfDecl()], "self");
+}
+
+QualType CodeGenFunction::TypeOfSelfObject() {
+ const ObjCMethodDecl *OMD = cast<ObjCMethodDecl>(CurFuncDecl);
+ ImplicitParamDecl *selfDecl = OMD->getSelfDecl();
+ const ObjCObjectPointerType *PTy = cast<ObjCObjectPointerType>(
+ getContext().getCanonicalType(selfDecl->getType()));
+ return PTy->getPointeeType();
+}
+
+void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){
+ llvm::Constant *EnumerationMutationFn =
+ CGM.getObjCRuntime().EnumerationMutationFunction();
+
+ if (!EnumerationMutationFn) {
+ CGM.ErrorUnsupported(&S, "Obj-C fast enumeration for this runtime");
+ return;
+ }
+
+ CGDebugInfo *DI = getDebugInfo();
+ if (DI)
+ DI->EmitLexicalBlockStart(Builder, S.getSourceRange().getBegin());
+
+ // The local variable comes into scope immediately.
+ AutoVarEmission variable = AutoVarEmission::invalid();
+ if (const DeclStmt *SD = dyn_cast<DeclStmt>(S.getElement()))
+ variable = EmitAutoVarAlloca(*cast<VarDecl>(SD->getSingleDecl()));
+
+ JumpDest LoopEnd = getJumpDestInCurrentScope("forcoll.end");
+
+ // Fast enumeration state.
+ QualType StateTy = CGM.getObjCFastEnumerationStateType();
+ llvm::Value *StatePtr = CreateMemTemp(StateTy, "state.ptr");
+ EmitNullInitialization(StatePtr, StateTy);
+
+ // Number of elements in the items array.
+ static const unsigned NumItems = 16;
+
+ // Fetch the countByEnumeratingWithState:objects:count: selector.
+ IdentifierInfo *II[] = {
+ &CGM.getContext().Idents.get("countByEnumeratingWithState"),
+ &CGM.getContext().Idents.get("objects"),
+ &CGM.getContext().Idents.get("count")
+ };
+ Selector FastEnumSel =
+ CGM.getContext().Selectors.getSelector(llvm::array_lengthof(II), &II[0]);
+
+ QualType ItemsTy =
+ getContext().getConstantArrayType(getContext().getObjCIdType(),
+ llvm::APInt(32, NumItems),
+ ArrayType::Normal, 0);
+ llvm::Value *ItemsPtr = CreateMemTemp(ItemsTy, "items.ptr");
+
+ // Emit the collection pointer. In ARC, we do a retain.
+ llvm::Value *Collection;
+ if (getLangOpts().ObjCAutoRefCount) {
+ Collection = EmitARCRetainScalarExpr(S.getCollection());
+
+ // Enter a cleanup to do the release.
+ EmitObjCConsumeObject(S.getCollection()->getType(), Collection);
+ } else {
+ Collection = EmitScalarExpr(S.getCollection());
+ }
+
+ // The 'continue' label needs to appear within the cleanup for the
+ // collection object.
+ JumpDest AfterBody = getJumpDestInCurrentScope("forcoll.next");
+
+ // Send it our message:
+ CallArgList Args;
+
+ // The first argument is a temporary of the enumeration-state type.
+ Args.add(RValue::get(StatePtr), getContext().getPointerType(StateTy));
+
+ // The second argument is a temporary array with space for NumItems
+ // pointers. We'll actually be loading elements from the array
+ // pointer written into the control state; this buffer is so that
+ // collections that *aren't* backed by arrays can still queue up
+ // batches of elements.
+ Args.add(RValue::get(ItemsPtr), getContext().getPointerType(ItemsTy));
+
+ // The third argument is the capacity of that temporary array.
+ llvm::Type *UnsignedLongLTy = ConvertType(getContext().UnsignedLongTy);
+ llvm::Constant *Count = llvm::ConstantInt::get(UnsignedLongLTy, NumItems);
+ Args.add(RValue::get(Count), getContext().UnsignedLongTy);
+
+ // Start the enumeration.
+ RValue CountRV =
+ CGM.getObjCRuntime().GenerateMessageSend(*this, ReturnValueSlot(),
+ getContext().UnsignedLongTy,
+ FastEnumSel,
+ Collection, Args);
+
+ // The initial number of objects that were returned in the buffer.
+ llvm::Value *initialBufferLimit = CountRV.getScalarVal();
+
+ llvm::BasicBlock *EmptyBB = createBasicBlock("forcoll.empty");
+ llvm::BasicBlock *LoopInitBB = createBasicBlock("forcoll.loopinit");
+
+ llvm::Value *zero = llvm::Constant::getNullValue(UnsignedLongLTy);
+
+ // If the limit pointer was zero to begin with, the collection is
+ // empty; skip all this.
+ Builder.CreateCondBr(Builder.CreateICmpEQ(initialBufferLimit, zero, "iszero"),
+ EmptyBB, LoopInitBB);
+
+ // Otherwise, initialize the loop.
+ EmitBlock(LoopInitBB);
+
+ // Save the initial mutations value. This is the value at an
+ // address that was written into the state object by
+ // countByEnumeratingWithState:objects:count:.
+ llvm::Value *StateMutationsPtrPtr =
+ Builder.CreateStructGEP(StatePtr, 2, "mutationsptr.ptr");
+ llvm::Value *StateMutationsPtr = Builder.CreateLoad(StateMutationsPtrPtr,
+ "mutationsptr");
+
+ llvm::Value *initialMutations =
+ Builder.CreateLoad(StateMutationsPtr, "forcoll.initial-mutations");
+
+ // Start looping. This is the point we return to whenever we have a
+ // fresh, non-empty batch of objects.
+ llvm::BasicBlock *LoopBodyBB = createBasicBlock("forcoll.loopbody");
+ EmitBlock(LoopBodyBB);
+
+ // The current index into the buffer.
+ llvm::PHINode *index = Builder.CreatePHI(UnsignedLongLTy, 3, "forcoll.index");
+ index->addIncoming(zero, LoopInitBB);
+
+ // The current buffer size.
+ llvm::PHINode *count = Builder.CreatePHI(UnsignedLongLTy, 3, "forcoll.count");
+ count->addIncoming(initialBufferLimit, LoopInitBB);
+
+ // Check whether the mutations value has changed from where it was
+ // at start. StateMutationsPtr should actually be invariant between
+ // refreshes.
+ StateMutationsPtr = Builder.CreateLoad(StateMutationsPtrPtr, "mutationsptr");
+ llvm::Value *currentMutations
+ = Builder.CreateLoad(StateMutationsPtr, "statemutations");
+
+ llvm::BasicBlock *WasMutatedBB = createBasicBlock("forcoll.mutated");
+ llvm::BasicBlock *WasNotMutatedBB = createBasicBlock("forcoll.notmutated");
+
+ Builder.CreateCondBr(Builder.CreateICmpEQ(currentMutations, initialMutations),
+ WasNotMutatedBB, WasMutatedBB);
+
+ // If so, call the enumeration-mutation function.
+ EmitBlock(WasMutatedBB);
+ llvm::Value *V =
+ Builder.CreateBitCast(Collection,
+ ConvertType(getContext().getObjCIdType()));
+ CallArgList Args2;
+ Args2.add(RValue::get(V), getContext().getObjCIdType());
+ // FIXME: We shouldn't need to get the function info here, the runtime already
+ // should have computed it to build the function.
+ EmitCall(CGM.getTypes().arrangeFunctionCall(getContext().VoidTy, Args2,
+ FunctionType::ExtInfo(),
+ RequiredArgs::All),
+ EnumerationMutationFn, ReturnValueSlot(), Args2);
+
+ // Otherwise, or if the mutation function returns, just continue.
+ EmitBlock(WasNotMutatedBB);
+
+ // Initialize the element variable.
+ RunCleanupsScope elementVariableScope(*this);
+ bool elementIsVariable;
+ LValue elementLValue;
+ QualType elementType;
+ if (const DeclStmt *SD = dyn_cast<DeclStmt>(S.getElement())) {
+ // Initialize the variable, in case it's a __block variable or something.
+ EmitAutoVarInit(variable);
+
+ const VarDecl* D = cast<VarDecl>(SD->getSingleDecl());
+ DeclRefExpr tempDRE(const_cast<VarDecl*>(D), false, D->getType(),
+ VK_LValue, SourceLocation());
+ elementLValue = EmitLValue(&tempDRE);
+ elementType = D->getType();
+ elementIsVariable = true;
+
+ if (D->isARCPseudoStrong())
+ elementLValue.getQuals().setObjCLifetime(Qualifiers::OCL_ExplicitNone);
+ } else {
+ elementLValue = LValue(); // suppress warning
+ elementType = cast<Expr>(S.getElement())->getType();
+ elementIsVariable = false;
+ }
+ llvm::Type *convertedElementType = ConvertType(elementType);
+
+ // Fetch the buffer out of the enumeration state.
+ // TODO: this pointer should actually be invariant between
+ // refreshes, which would help us do certain loop optimizations.
+ llvm::Value *StateItemsPtr =
+ Builder.CreateStructGEP(StatePtr, 1, "stateitems.ptr");
+ llvm::Value *EnumStateItems =
+ Builder.CreateLoad(StateItemsPtr, "stateitems");
+
+ // Fetch the value at the current index from the buffer.
+ llvm::Value *CurrentItemPtr =
+ Builder.CreateGEP(EnumStateItems, index, "currentitem.ptr");
+ llvm::Value *CurrentItem = Builder.CreateLoad(CurrentItemPtr);
+
+ // Cast that value to the right type.
+ CurrentItem = Builder.CreateBitCast(CurrentItem, convertedElementType,
+ "currentitem");
+
+ // Make sure we have an l-value. Yes, this gets evaluated every
+ // time through the loop.
+ if (!elementIsVariable) {
+ elementLValue = EmitLValue(cast<Expr>(S.getElement()));
+ EmitStoreThroughLValue(RValue::get(CurrentItem), elementLValue);
+ } else {
+ EmitScalarInit(CurrentItem, elementLValue);
+ }
+
+ // If we do have an element variable, this assignment is the end of
+ // its initialization.
+ if (elementIsVariable)
+ EmitAutoVarCleanups(variable);
+
+ // Perform the loop body, setting up break and continue labels.
+ BreakContinueStack.push_back(BreakContinue(LoopEnd, AfterBody));
+ {
+ RunCleanupsScope Scope(*this);
+ EmitStmt(S.getBody());
+ }
+ BreakContinueStack.pop_back();
+
+ // Destroy the element variable now.
+ elementVariableScope.ForceCleanup();
+
+ // Check whether there are more elements.
+ EmitBlock(AfterBody.getBlock());
+
+ llvm::BasicBlock *FetchMoreBB = createBasicBlock("forcoll.refetch");
+
+ // First we check in the local buffer.
+ llvm::Value *indexPlusOne
+ = Builder.CreateAdd(index, llvm::ConstantInt::get(UnsignedLongLTy, 1));
+
+ // If we haven't overrun the buffer yet, we can continue.
+ Builder.CreateCondBr(Builder.CreateICmpULT(indexPlusOne, count),
+ LoopBodyBB, FetchMoreBB);
+
+ index->addIncoming(indexPlusOne, AfterBody.getBlock());
+ count->addIncoming(count, AfterBody.getBlock());
+
+ // Otherwise, we have to fetch more elements.
+ EmitBlock(FetchMoreBB);
+
+ CountRV =
+ CGM.getObjCRuntime().GenerateMessageSend(*this, ReturnValueSlot(),
+ getContext().UnsignedLongTy,
+ FastEnumSel,
+ Collection, Args);
+
+ // If we got a zero count, we're done.
+ llvm::Value *refetchCount = CountRV.getScalarVal();
+
+ // (note that the message send might split FetchMoreBB)
+ index->addIncoming(zero, Builder.GetInsertBlock());
+ count->addIncoming(refetchCount, Builder.GetInsertBlock());
+
+ Builder.CreateCondBr(Builder.CreateICmpEQ(refetchCount, zero),
+ EmptyBB, LoopBodyBB);
+
+ // No more elements.
+ EmitBlock(EmptyBB);
+
+ if (!elementIsVariable) {
+ // If the element was not a declaration, set it to be null.
+
+ llvm::Value *null = llvm::Constant::getNullValue(convertedElementType);
+ elementLValue = EmitLValue(cast<Expr>(S.getElement()));
+ EmitStoreThroughLValue(RValue::get(null), elementLValue);
+ }
+
+ if (DI)
+ DI->EmitLexicalBlockEnd(Builder, S.getSourceRange().getEnd());
+
+ // Leave the cleanup we entered in ARC.
+ if (getLangOpts().ObjCAutoRefCount)
+ PopCleanupBlock();
+
+ EmitBlock(LoopEnd.getBlock());
+}
+
+void CodeGenFunction::EmitObjCAtTryStmt(const ObjCAtTryStmt &S) {
+ CGM.getObjCRuntime().EmitTryStmt(*this, S);
+}
+
+void CodeGenFunction::EmitObjCAtThrowStmt(const ObjCAtThrowStmt &S) {
+ CGM.getObjCRuntime().EmitThrowStmt(*this, S);
+}
+
+void CodeGenFunction::EmitObjCAtSynchronizedStmt(
+ const ObjCAtSynchronizedStmt &S) {
+ CGM.getObjCRuntime().EmitSynchronizedStmt(*this, S);
+}
+
+/// Produce the code for a CK_ARCProduceObject. Just does a
+/// primitive retain.
+llvm::Value *CodeGenFunction::EmitObjCProduceObject(QualType type,
+ llvm::Value *value) {
+ return EmitARCRetain(type, value);
+}
+
+namespace {
+ struct CallObjCRelease : EHScopeStack::Cleanup {
+ CallObjCRelease(llvm::Value *object) : object(object) {}
+ llvm::Value *object;
+
+ void Emit(CodeGenFunction &CGF, Flags flags) {
+ CGF.EmitARCRelease(object, /*precise*/ true);
+ }
+ };
+}
+
+/// Produce the code for a CK_ARCConsumeObject. Does a primitive
+/// release at the end of the full-expression.
+llvm::Value *CodeGenFunction::EmitObjCConsumeObject(QualType type,
+ llvm::Value *object) {
+ // If we're in a conditional branch, we need to make the cleanup
+ // conditional.
+ pushFullExprCleanup<CallObjCRelease>(getARCCleanupKind(), object);
+ return object;
+}
+
+llvm::Value *CodeGenFunction::EmitObjCExtendObjectLifetime(QualType type,
+ llvm::Value *value) {
+ return EmitARCRetainAutorelease(type, value);
+}
+
+
+static llvm::Constant *createARCRuntimeFunction(CodeGenModule &CGM,
+ llvm::FunctionType *type,
+ StringRef fnName) {
+ llvm::Constant *fn = CGM.CreateRuntimeFunction(type, fnName);
+
+ // In -fobjc-no-arc-runtime, emit weak references to the runtime
+ // support library.
+ if (!CGM.getCodeGenOpts().ObjCRuntimeHasARC)
+ if (llvm::Function *f = dyn_cast<llvm::Function>(fn))
+ f->setLinkage(llvm::Function::ExternalWeakLinkage);
+
+ return fn;
+}
+
+/// Perform an operation having the signature
+/// i8* (i8*)
+/// where a null input causes a no-op and returns null.
+static llvm::Value *emitARCValueOperation(CodeGenFunction &CGF,
+ llvm::Value *value,
+ llvm::Constant *&fn,
+ StringRef fnName) {
+ if (isa<llvm::ConstantPointerNull>(value)) return value;
+
+ if (!fn) {
+ std::vector<llvm::Type*> args(1, CGF.Int8PtrTy);
+ llvm::FunctionType *fnType =
+ llvm::FunctionType::get(CGF.Int8PtrTy, args, false);
+ fn = createARCRuntimeFunction(CGF.CGM, fnType, fnName);
+ }
+
+ // Cast the argument to 'id'.
+ llvm::Type *origType = value->getType();
+ value = CGF.Builder.CreateBitCast(value, CGF.Int8PtrTy);
+
+ // Call the function.
+ llvm::CallInst *call = CGF.Builder.CreateCall(fn, value);
+ call->setDoesNotThrow();
+
+ // Cast the result back to the original type.
+ return CGF.Builder.CreateBitCast(call, origType);
+}
+
+/// Perform an operation having the following signature:
+/// i8* (i8**)
+static llvm::Value *emitARCLoadOperation(CodeGenFunction &CGF,
+ llvm::Value *addr,
+ llvm::Constant *&fn,
+ StringRef fnName) {
+ if (!fn) {
+ std::vector<llvm::Type*> args(1, CGF.Int8PtrPtrTy);
+ llvm::FunctionType *fnType =
+ llvm::FunctionType::get(CGF.Int8PtrTy, args, false);
+ fn = createARCRuntimeFunction(CGF.CGM, fnType, fnName);
+ }
+
+ // Cast the argument to 'id*'.
+ llvm::Type *origType = addr->getType();
+ addr = CGF.Builder.CreateBitCast(addr, CGF.Int8PtrPtrTy);
+
+ // Call the function.
+ llvm::CallInst *call = CGF.Builder.CreateCall(fn, addr);
+ call->setDoesNotThrow();
+
+ // Cast the result back to a dereference of the original type.
+ llvm::Value *result = call;
+ if (origType != CGF.Int8PtrPtrTy)
+ result = CGF.Builder.CreateBitCast(result,
+ cast<llvm::PointerType>(origType)->getElementType());
+
+ return result;
+}
+
+/// Perform an operation having the following signature:
+/// i8* (i8**, i8*)
+static llvm::Value *emitARCStoreOperation(CodeGenFunction &CGF,
+ llvm::Value *addr,
+ llvm::Value *value,
+ llvm::Constant *&fn,
+ StringRef fnName,
+ bool ignored) {
+ assert(cast<llvm::PointerType>(addr->getType())->getElementType()
+ == value->getType());
+
+ if (!fn) {
+ llvm::Type *argTypes[] = { CGF.Int8PtrPtrTy, CGF.Int8PtrTy };
+
+ llvm::FunctionType *fnType
+ = llvm::FunctionType::get(CGF.Int8PtrTy, argTypes, false);
+ fn = createARCRuntimeFunction(CGF.CGM, fnType, fnName);
+ }
+
+ llvm::Type *origType = value->getType();
+
+ addr = CGF.Builder.CreateBitCast(addr, CGF.Int8PtrPtrTy);
+ value = CGF.Builder.CreateBitCast(value, CGF.Int8PtrTy);
+
+ llvm::CallInst *result = CGF.Builder.CreateCall2(fn, addr, value);
+ result->setDoesNotThrow();
+
+ if (ignored) return 0;
+
+ return CGF.Builder.CreateBitCast(result, origType);
+}
+
+/// Perform an operation having the following signature:
+/// void (i8**, i8**)
+static void emitARCCopyOperation(CodeGenFunction &CGF,
+ llvm::Value *dst,
+ llvm::Value *src,
+ llvm::Constant *&fn,
+ StringRef fnName) {
+ assert(dst->getType() == src->getType());
+
+ if (!fn) {
+ std::vector<llvm::Type*> argTypes(2, CGF.Int8PtrPtrTy);
+ llvm::FunctionType *fnType
+ = llvm::FunctionType::get(CGF.Builder.getVoidTy(), argTypes, false);
+ fn = createARCRuntimeFunction(CGF.CGM, fnType, fnName);
+ }
+
+ dst = CGF.Builder.CreateBitCast(dst, CGF.Int8PtrPtrTy);
+ src = CGF.Builder.CreateBitCast(src, CGF.Int8PtrPtrTy);
+
+ llvm::CallInst *result = CGF.Builder.CreateCall2(fn, dst, src);
+ result->setDoesNotThrow();
+}
+
+/// Produce the code to do a retain. Based on the type, calls one of:
+/// call i8* @objc_retain(i8* %value)
+/// call i8* @objc_retainBlock(i8* %value)
+llvm::Value *CodeGenFunction::EmitARCRetain(QualType type, llvm::Value *value) {
+ if (type->isBlockPointerType())
+ return EmitARCRetainBlock(value, /*mandatory*/ false);
+ else
+ return EmitARCRetainNonBlock(value);
+}
+
+/// Retain the given object, with normal retain semantics.
+/// call i8* @objc_retain(i8* %value)
+llvm::Value *CodeGenFunction::EmitARCRetainNonBlock(llvm::Value *value) {
+ return emitARCValueOperation(*this, value,
+ CGM.getARCEntrypoints().objc_retain,
+ "objc_retain");
+}
+
+/// Retain the given block, with _Block_copy semantics.
+/// call i8* @objc_retainBlock(i8* %value)
+///
+/// \param mandatory - If false, emit the call with metadata
+/// indicating that it's okay for the optimizer to eliminate this call
+/// if it can prove that the block never escapes except down the stack.
+llvm::Value *CodeGenFunction::EmitARCRetainBlock(llvm::Value *value,
+ bool mandatory) {
+ llvm::Value *result
+ = emitARCValueOperation(*this, value,
+ CGM.getARCEntrypoints().objc_retainBlock,
+ "objc_retainBlock");
+
+ // If the copy isn't mandatory, add !clang.arc.copy_on_escape to
+ // tell the optimizer that it doesn't need to do this copy if the
+ // block doesn't escape, where being passed as an argument doesn't
+ // count as escaping.
+ if (!mandatory && isa<llvm::Instruction>(result)) {
+ llvm::CallInst *call
+ = cast<llvm::CallInst>(result->stripPointerCasts());
+ assert(call->getCalledValue() == CGM.getARCEntrypoints().objc_retainBlock);
+
+ SmallVector<llvm::Value*,1> args;
+ call->setMetadata("clang.arc.copy_on_escape",
+ llvm::MDNode::get(Builder.getContext(), args));
+ }
+
+ return result;
+}
+
+/// Retain the given object which is the result of a function call.
+/// call i8* @objc_retainAutoreleasedReturnValue(i8* %value)
+///
+/// Yes, this function name is one character away from a different
+/// call with completely different semantics.
+llvm::Value *
+CodeGenFunction::EmitARCRetainAutoreleasedReturnValue(llvm::Value *value) {
+ // Fetch the void(void) inline asm which marks that we're going to
+ // retain the autoreleased return value.
+ llvm::InlineAsm *&marker
+ = CGM.getARCEntrypoints().retainAutoreleasedReturnValueMarker;
+ if (!marker) {
+ StringRef assembly
+ = CGM.getTargetCodeGenInfo()
+ .getARCRetainAutoreleasedReturnValueMarker();
+
+ // If we have an empty assembly string, there's nothing to do.
+ if (assembly.empty()) {
+
+ // Otherwise, at -O0, build an inline asm that we're going to call
+ // in a moment.
+ } else if (CGM.getCodeGenOpts().OptimizationLevel == 0) {
+ llvm::FunctionType *type =
+ llvm::FunctionType::get(VoidTy, /*variadic*/false);
+
+ marker = llvm::InlineAsm::get(type, assembly, "", /*sideeffects*/ true);
+
+ // If we're at -O1 and above, we don't want to litter the code
+ // with this marker yet, so leave a breadcrumb for the ARC
+ // optimizer to pick up.
+ } else {
+ llvm::NamedMDNode *metadata =
+ CGM.getModule().getOrInsertNamedMetadata(
+ "clang.arc.retainAutoreleasedReturnValueMarker");
+ assert(metadata->getNumOperands() <= 1);
+ if (metadata->getNumOperands() == 0) {
+ llvm::Value *string = llvm::MDString::get(getLLVMContext(), assembly);
+ metadata->addOperand(llvm::MDNode::get(getLLVMContext(), string));
+ }
+ }
+ }
+
+ // Call the marker asm if we made one, which we do only at -O0.
+ if (marker) Builder.CreateCall(marker);
+
+ return emitARCValueOperation(*this, value,
+ CGM.getARCEntrypoints().objc_retainAutoreleasedReturnValue,
+ "objc_retainAutoreleasedReturnValue");
+}
+
+/// Release the given object.
+/// call void @objc_release(i8* %value)
+void CodeGenFunction::EmitARCRelease(llvm::Value *value, bool precise) {
+ if (isa<llvm::ConstantPointerNull>(value)) return;
+
+ llvm::Constant *&fn = CGM.getARCEntrypoints().objc_release;
+ if (!fn) {
+ std::vector<llvm::Type*> args(1, Int8PtrTy);
+ llvm::FunctionType *fnType =
+ llvm::FunctionType::get(Builder.getVoidTy(), args, false);
+ fn = createARCRuntimeFunction(CGM, fnType, "objc_release");
+ }
+
+ // Cast the argument to 'id'.
+ value = Builder.CreateBitCast(value, Int8PtrTy);
+
+ // Call objc_release.
+ llvm::CallInst *call = Builder.CreateCall(fn, value);
+ call->setDoesNotThrow();
+
+ if (!precise) {
+ SmallVector<llvm::Value*,1> args;
+ call->setMetadata("clang.imprecise_release",
+ llvm::MDNode::get(Builder.getContext(), args));
+ }
+}
+
+/// Store into a strong object. Always calls this:
+/// call void @objc_storeStrong(i8** %addr, i8* %value)
+llvm::Value *CodeGenFunction::EmitARCStoreStrongCall(llvm::Value *addr,
+ llvm::Value *value,
+ bool ignored) {
+ assert(cast<llvm::PointerType>(addr->getType())->getElementType()
+ == value->getType());
+
+ llvm::Constant *&fn = CGM.getARCEntrypoints().objc_storeStrong;
+ if (!fn) {
+ llvm::Type *argTypes[] = { Int8PtrPtrTy, Int8PtrTy };
+ llvm::FunctionType *fnType
+ = llvm::FunctionType::get(Builder.getVoidTy(), argTypes, false);
+ fn = createARCRuntimeFunction(CGM, fnType, "objc_storeStrong");
+ }
+
+ addr = Builder.CreateBitCast(addr, Int8PtrPtrTy);
+ llvm::Value *castValue = Builder.CreateBitCast(value, Int8PtrTy);
+
+ Builder.CreateCall2(fn, addr, castValue)->setDoesNotThrow();
+
+ if (ignored) return 0;
+ return value;
+}
+
+/// Store into a strong object. Sometimes calls this:
+/// call void @objc_storeStrong(i8** %addr, i8* %value)
+/// Other times, breaks it down into components.
+llvm::Value *CodeGenFunction::EmitARCStoreStrong(LValue dst,
+ llvm::Value *newValue,
+ bool ignored) {
+ QualType type = dst.getType();
+ bool isBlock = type->isBlockPointerType();
+
+ // Use a store barrier at -O0 unless this is a block type or the
+ // lvalue is inadequately aligned.
+ if (shouldUseFusedARCCalls() &&
+ !isBlock &&
+ (dst.getAlignment().isZero() ||
+ dst.getAlignment() >= CharUnits::fromQuantity(PointerAlignInBytes))) {
+ return EmitARCStoreStrongCall(dst.getAddress(), newValue, ignored);
+ }
+
+ // Otherwise, split it out.
+
+ // Retain the new value.
+ newValue = EmitARCRetain(type, newValue);
+
+ // Read the old value.
+ llvm::Value *oldValue = EmitLoadOfScalar(dst);
+
+ // Store. We do this before the release so that any deallocs won't
+ // see the old value.
+ EmitStoreOfScalar(newValue, dst);
+
+ // Finally, release the old value.
+ EmitARCRelease(oldValue, /*precise*/ false);
+
+ return newValue;
+}
+
+/// Autorelease the given object.
+/// call i8* @objc_autorelease(i8* %value)
+llvm::Value *CodeGenFunction::EmitARCAutorelease(llvm::Value *value) {
+ return emitARCValueOperation(*this, value,
+ CGM.getARCEntrypoints().objc_autorelease,
+ "objc_autorelease");
+}
+
+/// Autorelease the given object.
+/// call i8* @objc_autoreleaseReturnValue(i8* %value)
+llvm::Value *
+CodeGenFunction::EmitARCAutoreleaseReturnValue(llvm::Value *value) {
+ return emitARCValueOperation(*this, value,
+ CGM.getARCEntrypoints().objc_autoreleaseReturnValue,
+ "objc_autoreleaseReturnValue");
+}
+
+/// Do a fused retain/autorelease of the given object.
+/// call i8* @objc_retainAutoreleaseReturnValue(i8* %value)
+llvm::Value *
+CodeGenFunction::EmitARCRetainAutoreleaseReturnValue(llvm::Value *value) {
+ return emitARCValueOperation(*this, value,
+ CGM.getARCEntrypoints().objc_retainAutoreleaseReturnValue,
+ "objc_retainAutoreleaseReturnValue");
+}
+
+/// Do a fused retain/autorelease of the given object.
+/// call i8* @objc_retainAutorelease(i8* %value)
+/// or
+/// %retain = call i8* @objc_retainBlock(i8* %value)
+/// call i8* @objc_autorelease(i8* %retain)
+llvm::Value *CodeGenFunction::EmitARCRetainAutorelease(QualType type,
+ llvm::Value *value) {
+ if (!type->isBlockPointerType())
+ return EmitARCRetainAutoreleaseNonBlock(value);
+
+ if (isa<llvm::ConstantPointerNull>(value)) return value;
+
+ llvm::Type *origType = value->getType();
+ value = Builder.CreateBitCast(value, Int8PtrTy);
+ value = EmitARCRetainBlock(value, /*mandatory*/ true);
+ value = EmitARCAutorelease(value);
+ return Builder.CreateBitCast(value, origType);
+}
+
+/// Do a fused retain/autorelease of the given object.
+/// call i8* @objc_retainAutorelease(i8* %value)
+llvm::Value *
+CodeGenFunction::EmitARCRetainAutoreleaseNonBlock(llvm::Value *value) {
+ return emitARCValueOperation(*this, value,
+ CGM.getARCEntrypoints().objc_retainAutorelease,
+ "objc_retainAutorelease");
+}
+
+/// i8* @objc_loadWeak(i8** %addr)
+/// Essentially objc_autorelease(objc_loadWeakRetained(addr)).
+llvm::Value *CodeGenFunction::EmitARCLoadWeak(llvm::Value *addr) {
+ return emitARCLoadOperation(*this, addr,
+ CGM.getARCEntrypoints().objc_loadWeak,
+ "objc_loadWeak");
+}
+
+/// i8* @objc_loadWeakRetained(i8** %addr)
+llvm::Value *CodeGenFunction::EmitARCLoadWeakRetained(llvm::Value *addr) {
+ return emitARCLoadOperation(*this, addr,
+ CGM.getARCEntrypoints().objc_loadWeakRetained,
+ "objc_loadWeakRetained");
+}
+
+/// i8* @objc_storeWeak(i8** %addr, i8* %value)
+/// Returns %value.
+llvm::Value *CodeGenFunction::EmitARCStoreWeak(llvm::Value *addr,
+ llvm::Value *value,
+ bool ignored) {
+ return emitARCStoreOperation(*this, addr, value,
+ CGM.getARCEntrypoints().objc_storeWeak,
+ "objc_storeWeak", ignored);
+}
+
+/// i8* @objc_initWeak(i8** %addr, i8* %value)
+/// Returns %value. %addr is known to not have a current weak entry.
+/// Essentially equivalent to:
+/// *addr = nil; objc_storeWeak(addr, value);
+void CodeGenFunction::EmitARCInitWeak(llvm::Value *addr, llvm::Value *value) {
+ // If we're initializing to null, just write null to memory; no need
+ // to get the runtime involved. But don't do this if optimization
+ // is enabled, because accounting for this would make the optimizer
+ // much more complicated.
+ if (isa<llvm::ConstantPointerNull>(value) &&
+ CGM.getCodeGenOpts().OptimizationLevel == 0) {
+ Builder.CreateStore(value, addr);
+ return;
+ }
+
+ emitARCStoreOperation(*this, addr, value,
+ CGM.getARCEntrypoints().objc_initWeak,
+ "objc_initWeak", /*ignored*/ true);
+}
+
+/// void @objc_destroyWeak(i8** %addr)
+/// Essentially objc_storeWeak(addr, nil).
+void CodeGenFunction::EmitARCDestroyWeak(llvm::Value *addr) {
+ llvm::Constant *&fn = CGM.getARCEntrypoints().objc_destroyWeak;
+ if (!fn) {
+ std::vector<llvm::Type*> args(1, Int8PtrPtrTy);
+ llvm::FunctionType *fnType =
+ llvm::FunctionType::get(Builder.getVoidTy(), args, false);
+ fn = createARCRuntimeFunction(CGM, fnType, "objc_destroyWeak");
+ }
+
+ // Cast the argument to 'id*'.
+ addr = Builder.CreateBitCast(addr, Int8PtrPtrTy);
+
+ llvm::CallInst *call = Builder.CreateCall(fn, addr);
+ call->setDoesNotThrow();
+}
+
+/// void @objc_moveWeak(i8** %dest, i8** %src)
+/// Disregards the current value in %dest. Leaves %src pointing to nothing.
+/// Essentially (objc_copyWeak(dest, src), objc_destroyWeak(src)).
+void CodeGenFunction::EmitARCMoveWeak(llvm::Value *dst, llvm::Value *src) {
+ emitARCCopyOperation(*this, dst, src,
+ CGM.getARCEntrypoints().objc_moveWeak,
+ "objc_moveWeak");
+}
+
+/// void @objc_copyWeak(i8** %dest, i8** %src)
+/// Disregards the current value in %dest. Essentially
+/// objc_release(objc_initWeak(dest, objc_readWeakRetained(src)))
+void CodeGenFunction::EmitARCCopyWeak(llvm::Value *dst, llvm::Value *src) {
+ emitARCCopyOperation(*this, dst, src,
+ CGM.getARCEntrypoints().objc_copyWeak,
+ "objc_copyWeak");
+}
+
+/// Produce the code to do a objc_autoreleasepool_push.
+/// call i8* @objc_autoreleasePoolPush(void)
+llvm::Value *CodeGenFunction::EmitObjCAutoreleasePoolPush() {
+ llvm::Constant *&fn = CGM.getRREntrypoints().objc_autoreleasePoolPush;
+ if (!fn) {
+ llvm::FunctionType *fnType =
+ llvm::FunctionType::get(Int8PtrTy, false);
+ fn = createARCRuntimeFunction(CGM, fnType, "objc_autoreleasePoolPush");
+ }
+
+ llvm::CallInst *call = Builder.CreateCall(fn);
+ call->setDoesNotThrow();
+
+ return call;
+}
+
+/// Produce the code to do a primitive release.
+/// call void @objc_autoreleasePoolPop(i8* %ptr)
+void CodeGenFunction::EmitObjCAutoreleasePoolPop(llvm::Value *value) {
+ assert(value->getType() == Int8PtrTy);
+
+ llvm::Constant *&fn = CGM.getRREntrypoints().objc_autoreleasePoolPop;
+ if (!fn) {
+ std::vector<llvm::Type*> args(1, Int8PtrTy);
+ llvm::FunctionType *fnType =
+ llvm::FunctionType::get(Builder.getVoidTy(), args, false);
+
+ // We don't want to use a weak import here; instead we should not
+ // fall into this path.
+ fn = createARCRuntimeFunction(CGM, fnType, "objc_autoreleasePoolPop");
+ }
+
+ llvm::CallInst *call = Builder.CreateCall(fn, value);
+ call->setDoesNotThrow();
+}
+
+/// Produce the code to do an MRR version objc_autoreleasepool_push.
+/// Which is: [[NSAutoreleasePool alloc] init];
+/// Where alloc is declared as: + (id) alloc; in NSAutoreleasePool class.
+/// init is declared as: - (id) init; in its NSObject super class.
+///
+llvm::Value *CodeGenFunction::EmitObjCMRRAutoreleasePoolPush() {
+ CGObjCRuntime &Runtime = CGM.getObjCRuntime();
+ llvm::Value *Receiver = Runtime.EmitNSAutoreleasePoolClassRef(Builder);
+ // [NSAutoreleasePool alloc]
+ IdentifierInfo *II = &CGM.getContext().Idents.get("alloc");
+ Selector AllocSel = getContext().Selectors.getSelector(0, &II);
+ CallArgList Args;
+ RValue AllocRV =
+ Runtime.GenerateMessageSend(*this, ReturnValueSlot(),
+ getContext().getObjCIdType(),
+ AllocSel, Receiver, Args);
+
+ // [Receiver init]
+ Receiver = AllocRV.getScalarVal();
+ II = &CGM.getContext().Idents.get("init");
+ Selector InitSel = getContext().Selectors.getSelector(0, &II);
+ RValue InitRV =
+ Runtime.GenerateMessageSend(*this, ReturnValueSlot(),
+ getContext().getObjCIdType(),
+ InitSel, Receiver, Args);
+ return InitRV.getScalarVal();
+}
+
+/// Produce the code to do a primitive release.
+/// [tmp drain];
+void CodeGenFunction::EmitObjCMRRAutoreleasePoolPop(llvm::Value *Arg) {
+ IdentifierInfo *II = &CGM.getContext().Idents.get("drain");
+ Selector DrainSel = getContext().Selectors.getSelector(0, &II);
+ CallArgList Args;
+ CGM.getObjCRuntime().GenerateMessageSend(*this, ReturnValueSlot(),
+ getContext().VoidTy, DrainSel, Arg, Args);
+}
+
+void CodeGenFunction::destroyARCStrongPrecise(CodeGenFunction &CGF,
+ llvm::Value *addr,
+ QualType type) {
+ llvm::Value *ptr = CGF.Builder.CreateLoad(addr, "strongdestroy");
+ CGF.EmitARCRelease(ptr, /*precise*/ true);
+}
+
+void CodeGenFunction::destroyARCStrongImprecise(CodeGenFunction &CGF,
+ llvm::Value *addr,
+ QualType type) {
+ llvm::Value *ptr = CGF.Builder.CreateLoad(addr, "strongdestroy");
+ CGF.EmitARCRelease(ptr, /*precise*/ false);
+}
+
+void CodeGenFunction::destroyARCWeak(CodeGenFunction &CGF,
+ llvm::Value *addr,
+ QualType type) {
+ CGF.EmitARCDestroyWeak(addr);
+}
+
+namespace {
+ struct CallObjCAutoreleasePoolObject : EHScopeStack::Cleanup {
+ llvm::Value *Token;
+
+ CallObjCAutoreleasePoolObject(llvm::Value *token) : Token(token) {}
+
+ void Emit(CodeGenFunction &CGF, Flags flags) {
+ CGF.EmitObjCAutoreleasePoolPop(Token);
+ }
+ };
+ struct CallObjCMRRAutoreleasePoolObject : EHScopeStack::Cleanup {
+ llvm::Value *Token;
+
+ CallObjCMRRAutoreleasePoolObject(llvm::Value *token) : Token(token) {}
+
+ void Emit(CodeGenFunction &CGF, Flags flags) {
+ CGF.EmitObjCMRRAutoreleasePoolPop(Token);
+ }
+ };
+}
+
+void CodeGenFunction::EmitObjCAutoreleasePoolCleanup(llvm::Value *Ptr) {
+ if (CGM.getLangOpts().ObjCAutoRefCount)
+ EHStack.pushCleanup<CallObjCAutoreleasePoolObject>(NormalCleanup, Ptr);
+ else
+ EHStack.pushCleanup<CallObjCMRRAutoreleasePoolObject>(NormalCleanup, Ptr);
+}
+
+static TryEmitResult tryEmitARCRetainLoadOfScalar(CodeGenFunction &CGF,
+ LValue lvalue,
+ QualType type) {
+ switch (type.getObjCLifetime()) {
+ case Qualifiers::OCL_None:
+ case Qualifiers::OCL_ExplicitNone:
+ case Qualifiers::OCL_Strong:
+ case Qualifiers::OCL_Autoreleasing:
+ return TryEmitResult(CGF.EmitLoadOfLValue(lvalue).getScalarVal(),
+ false);
+
+ case Qualifiers::OCL_Weak:
+ return TryEmitResult(CGF.EmitARCLoadWeakRetained(lvalue.getAddress()),
+ true);
+ }
+
+ llvm_unreachable("impossible lifetime!");
+}
+
+static TryEmitResult tryEmitARCRetainLoadOfScalar(CodeGenFunction &CGF,
+ const Expr *e) {
+ e = e->IgnoreParens();
+ QualType type = e->getType();
+
+ // If we're loading retained from a __strong xvalue, we can avoid
+ // an extra retain/release pair by zeroing out the source of this
+ // "move" operation.
+ if (e->isXValue() &&
+ !type.isConstQualified() &&
+ type.getObjCLifetime() == Qualifiers::OCL_Strong) {
+ // Emit the lvalue.
+ LValue lv = CGF.EmitLValue(e);
+
+ // Load the object pointer.
+ llvm::Value *result = CGF.EmitLoadOfLValue(lv).getScalarVal();
+
+ // Set the source pointer to NULL.
+ CGF.EmitStoreOfScalar(getNullForVariable(lv.getAddress()), lv);
+
+ return TryEmitResult(result, true);
+ }
+
+ // As a very special optimization, in ARC++, if the l-value is the
+ // result of a non-volatile assignment, do a simple retain of the
+ // result of the call to objc_storeWeak instead of reloading.
+ if (CGF.getLangOpts().CPlusPlus &&
+ !type.isVolatileQualified() &&
+ type.getObjCLifetime() == Qualifiers::OCL_Weak &&
+ isa<BinaryOperator>(e) &&
+ cast<BinaryOperator>(e)->getOpcode() == BO_Assign)
+ return TryEmitResult(CGF.EmitScalarExpr(e), false);
+
+ return tryEmitARCRetainLoadOfScalar(CGF, CGF.EmitLValue(e), type);
+}
+
+static llvm::Value *emitARCRetainAfterCall(CodeGenFunction &CGF,
+ llvm::Value *value);
+
+/// Given that the given expression is some sort of call (which does
+/// not return retained), emit a retain following it.
+static llvm::Value *emitARCRetainCall(CodeGenFunction &CGF, const Expr *e) {
+ llvm::Value *value = CGF.EmitScalarExpr(e);
+ return emitARCRetainAfterCall(CGF, value);
+}
+
+static llvm::Value *emitARCRetainAfterCall(CodeGenFunction &CGF,
+ llvm::Value *value) {
+ if (llvm::CallInst *call = dyn_cast<llvm::CallInst>(value)) {
+ CGBuilderTy::InsertPoint ip = CGF.Builder.saveIP();
+
+ // Place the retain immediately following the call.
+ CGF.Builder.SetInsertPoint(call->getParent(),
+ ++llvm::BasicBlock::iterator(call));
+ value = CGF.EmitARCRetainAutoreleasedReturnValue(value);
+
+ CGF.Builder.restoreIP(ip);
+ return value;
+ } else if (llvm::InvokeInst *invoke = dyn_cast<llvm::InvokeInst>(value)) {
+ CGBuilderTy::InsertPoint ip = CGF.Builder.saveIP();
+
+ // Place the retain at the beginning of the normal destination block.
+ llvm::BasicBlock *BB = invoke->getNormalDest();
+ CGF.Builder.SetInsertPoint(BB, BB->begin());
+ value = CGF.EmitARCRetainAutoreleasedReturnValue(value);
+
+ CGF.Builder.restoreIP(ip);
+ return value;
+
+ // Bitcasts can arise because of related-result returns. Rewrite
+ // the operand.
+ } else if (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(value)) {
+ llvm::Value *operand = bitcast->getOperand(0);
+ operand = emitARCRetainAfterCall(CGF, operand);
+ bitcast->setOperand(0, operand);
+ return bitcast;
+
+ // Generic fall-back case.
+ } else {
+ // Retain using the non-block variant: we never need to do a copy
+ // of a block that's been returned to us.
+ return CGF.EmitARCRetainNonBlock(value);
+ }
+}
+
+/// Determine whether it might be important to emit a separate
+/// objc_retain_block on the result of the given expression, or
+/// whether it's okay to just emit it in a +1 context.
+static bool shouldEmitSeparateBlockRetain(const Expr *e) {
+ assert(e->getType()->isBlockPointerType());
+ e = e->IgnoreParens();
+
+ // For future goodness, emit block expressions directly in +1
+ // contexts if we can.
+ if (isa<BlockExpr>(e))
+ return false;
+
+ if (const CastExpr *cast = dyn_cast<CastExpr>(e)) {
+ switch (cast->getCastKind()) {
+ // Emitting these operations in +1 contexts is goodness.
+ case CK_LValueToRValue:
+ case CK_ARCReclaimReturnedObject:
+ case CK_ARCConsumeObject:
+ case CK_ARCProduceObject:
+ return false;
+
+ // These operations preserve a block type.
+ case CK_NoOp:
+ case CK_BitCast:
+ return shouldEmitSeparateBlockRetain(cast->getSubExpr());
+
+ // These operations are known to be bad (or haven't been considered).
+ case CK_AnyPointerToBlockPointerCast:
+ default:
+ return true;
+ }
+ }
+
+ return true;
+}
+
+/// Try to emit a PseudoObjectExpr at +1.
+///
+/// This massively duplicates emitPseudoObjectRValue.
+static TryEmitResult tryEmitARCRetainPseudoObject(CodeGenFunction &CGF,
+ const PseudoObjectExpr *E) {
+ llvm::SmallVector<CodeGenFunction::OpaqueValueMappingData, 4> opaques;
+
+ // Find the result expression.
+ const Expr *resultExpr = E->getResultExpr();
+ assert(resultExpr);
+ TryEmitResult result;
+
+ for (PseudoObjectExpr::const_semantics_iterator
+ i = E->semantics_begin(), e = E->semantics_end(); i != e; ++i) {
+ const Expr *semantic = *i;
+
+ // If this semantic expression is an opaque value, bind it
+ // to the result of its source expression.
+ if (const OpaqueValueExpr *ov = dyn_cast<OpaqueValueExpr>(semantic)) {
+ typedef CodeGenFunction::OpaqueValueMappingData OVMA;
+ OVMA opaqueData;
+
+ // If this semantic is the result of the pseudo-object
+ // expression, try to evaluate the source as +1.
+ if (ov == resultExpr) {
+ assert(!OVMA::shouldBindAsLValue(ov));
+ result = tryEmitARCRetainScalarExpr(CGF, ov->getSourceExpr());
+ opaqueData = OVMA::bind(CGF, ov, RValue::get(result.getPointer()));
+
+ // Otherwise, just bind it.
+ } else {
+ opaqueData = OVMA::bind(CGF, ov, ov->getSourceExpr());
+ }
+ opaques.push_back(opaqueData);
+
+ // Otherwise, if the expression is the result, evaluate it
+ // and remember the result.
+ } else if (semantic == resultExpr) {
+ result = tryEmitARCRetainScalarExpr(CGF, semantic);
+
+ // Otherwise, evaluate the expression in an ignored context.
+ } else {
+ CGF.EmitIgnoredExpr(semantic);
+ }
+ }
+
+ // Unbind all the opaques now.
+ for (unsigned i = 0, e = opaques.size(); i != e; ++i)
+ opaques[i].unbind(CGF);
+
+ return result;
+}
+
+static TryEmitResult
+tryEmitARCRetainScalarExpr(CodeGenFunction &CGF, const Expr *e) {
+ // Look through cleanups.
+ if (const ExprWithCleanups *cleanups = dyn_cast<ExprWithCleanups>(e)) {
+ CGF.enterFullExpression(cleanups);
+ CodeGenFunction::RunCleanupsScope scope(CGF);
+ return tryEmitARCRetainScalarExpr(CGF, cleanups->getSubExpr());
+ }
+
+ // The desired result type, if it differs from the type of the
+ // ultimate opaque expression.
+ llvm::Type *resultType = 0;
+
+ while (true) {
+ e = e->IgnoreParens();
+
+ // There's a break at the end of this if-chain; anything
+ // that wants to keep looping has to explicitly continue.
+ if (const CastExpr *ce = dyn_cast<CastExpr>(e)) {
+ switch (ce->getCastKind()) {
+ // No-op casts don't change the type, so we just ignore them.
+ case CK_NoOp:
+ e = ce->getSubExpr();
+ continue;
+
+ case CK_LValueToRValue: {
+ TryEmitResult loadResult
+ = tryEmitARCRetainLoadOfScalar(CGF, ce->getSubExpr());
+ if (resultType) {
+ llvm::Value *value = loadResult.getPointer();
+ value = CGF.Builder.CreateBitCast(value, resultType);
+ loadResult.setPointer(value);
+ }
+ return loadResult;
+ }
+
+ // These casts can change the type, so remember that and
+ // soldier on. We only need to remember the outermost such
+ // cast, though.
+ case CK_CPointerToObjCPointerCast:
+ case CK_BlockPointerToObjCPointerCast:
+ case CK_AnyPointerToBlockPointerCast:
+ case CK_BitCast:
+ if (!resultType)
+ resultType = CGF.ConvertType(ce->getType());
+ e = ce->getSubExpr();
+ assert(e->getType()->hasPointerRepresentation());
+ continue;
+
+ // For consumptions, just emit the subexpression and thus elide
+ // the retain/release pair.
+ case CK_ARCConsumeObject: {
+ llvm::Value *result = CGF.EmitScalarExpr(ce->getSubExpr());
+ if (resultType) result = CGF.Builder.CreateBitCast(result, resultType);
+ return TryEmitResult(result, true);
+ }
+
+ // Block extends are net +0. Naively, we could just recurse on
+ // the subexpression, but actually we need to ensure that the
+ // value is copied as a block, so there's a little filter here.
+ case CK_ARCExtendBlockObject: {
+ llvm::Value *result; // will be a +0 value
+
+ // If we can't safely assume the sub-expression will produce a
+ // block-copied value, emit the sub-expression at +0.
+ if (shouldEmitSeparateBlockRetain(ce->getSubExpr())) {
+ result = CGF.EmitScalarExpr(ce->getSubExpr());
+
+ // Otherwise, try to emit the sub-expression at +1 recursively.
+ } else {
+ TryEmitResult subresult
+ = tryEmitARCRetainScalarExpr(CGF, ce->getSubExpr());
+ result = subresult.getPointer();
+
+ // If that produced a retained value, just use that,
+ // possibly casting down.
+ if (subresult.getInt()) {
+ if (resultType)
+ result = CGF.Builder.CreateBitCast(result, resultType);
+ return TryEmitResult(result, true);
+ }
+
+ // Otherwise it's +0.
+ }
+
+ // Retain the object as a block, then cast down.
+ result = CGF.EmitARCRetainBlock(result, /*mandatory*/ true);
+ if (resultType) result = CGF.Builder.CreateBitCast(result, resultType);
+ return TryEmitResult(result, true);
+ }
+
+ // For reclaims, emit the subexpression as a retained call and
+ // skip the consumption.
+ case CK_ARCReclaimReturnedObject: {
+ llvm::Value *result = emitARCRetainCall(CGF, ce->getSubExpr());
+ if (resultType) result = CGF.Builder.CreateBitCast(result, resultType);
+ return TryEmitResult(result, true);
+ }
+
+ default:
+ break;
+ }
+
+ // Skip __extension__.
+ } else if (const UnaryOperator *op = dyn_cast<UnaryOperator>(e)) {
+ if (op->getOpcode() == UO_Extension) {
+ e = op->getSubExpr();
+ continue;
+ }
+
+ // For calls and message sends, use the retained-call logic.
+ // Delegate inits are a special case in that they're the only
+ // returns-retained expression that *isn't* surrounded by
+ // a consume.
+ } else if (isa<CallExpr>(e) ||
+ (isa<ObjCMessageExpr>(e) &&
+ !cast<ObjCMessageExpr>(e)->isDelegateInitCall())) {
+ llvm::Value *result = emitARCRetainCall(CGF, e);
+ if (resultType) result = CGF.Builder.CreateBitCast(result, resultType);
+ return TryEmitResult(result, true);
+
+ // Look through pseudo-object expressions.
+ } else if (const PseudoObjectExpr *pseudo = dyn_cast<PseudoObjectExpr>(e)) {
+ TryEmitResult result
+ = tryEmitARCRetainPseudoObject(CGF, pseudo);
+ if (resultType) {
+ llvm::Value *value = result.getPointer();
+ value = CGF.Builder.CreateBitCast(value, resultType);
+ result.setPointer(value);
+ }
+ return result;
+ }
+
+ // Conservatively halt the search at any other expression kind.
+ break;
+ }
+
+ // We didn't find an obvious production, so emit what we've got and
+ // tell the caller that we didn't manage to retain.
+ llvm::Value *result = CGF.EmitScalarExpr(e);
+ if (resultType) result = CGF.Builder.CreateBitCast(result, resultType);
+ return TryEmitResult(result, false);
+}
+
+static llvm::Value *emitARCRetainLoadOfScalar(CodeGenFunction &CGF,
+ LValue lvalue,
+ QualType type) {
+ TryEmitResult result = tryEmitARCRetainLoadOfScalar(CGF, lvalue, type);
+ llvm::Value *value = result.getPointer();
+ if (!result.getInt())
+ value = CGF.EmitARCRetain(type, value);
+ return value;
+}
+
+/// EmitARCRetainScalarExpr - Semantically equivalent to
+/// EmitARCRetainObject(e->getType(), EmitScalarExpr(e)), but making a
+/// best-effort attempt to peephole expressions that naturally produce
+/// retained objects.
+llvm::Value *CodeGenFunction::EmitARCRetainScalarExpr(const Expr *e) {
+ TryEmitResult result = tryEmitARCRetainScalarExpr(*this, e);
+ llvm::Value *value = result.getPointer();
+ if (!result.getInt())
+ value = EmitARCRetain(e->getType(), value);
+ return value;
+}
+
+llvm::Value *
+CodeGenFunction::EmitARCRetainAutoreleaseScalarExpr(const Expr *e) {
+ TryEmitResult result = tryEmitARCRetainScalarExpr(*this, e);
+ llvm::Value *value = result.getPointer();
+ if (result.getInt())
+ value = EmitARCAutorelease(value);
+ else
+ value = EmitARCRetainAutorelease(e->getType(), value);
+ return value;
+}
+
+llvm::Value *CodeGenFunction::EmitARCExtendBlockObject(const Expr *e) {
+ llvm::Value *result;
+ bool doRetain;
+
+ if (shouldEmitSeparateBlockRetain(e)) {
+ result = EmitScalarExpr(e);
+ doRetain = true;
+ } else {
+ TryEmitResult subresult = tryEmitARCRetainScalarExpr(*this, e);
+ result = subresult.getPointer();
+ doRetain = !subresult.getInt();
+ }
+
+ if (doRetain)
+ result = EmitARCRetainBlock(result, /*mandatory*/ true);
+ return EmitObjCConsumeObject(e->getType(), result);
+}
+
+llvm::Value *CodeGenFunction::EmitObjCThrowOperand(const Expr *expr) {
+ // In ARC, retain and autorelease the expression.
+ if (getLangOpts().ObjCAutoRefCount) {
+ // Do so before running any cleanups for the full-expression.
+ // tryEmitARCRetainScalarExpr does make an effort to do things
+ // inside cleanups, but there are crazy cases like
+ // @throw A().foo;
+ // where a full retain+autorelease is required and would
+ // otherwise happen after the destructor for the temporary.
+ if (const ExprWithCleanups *ewc = dyn_cast<ExprWithCleanups>(expr)) {
+ enterFullExpression(ewc);
+ expr = ewc->getSubExpr();
+ }
+
+ CodeGenFunction::RunCleanupsScope cleanups(*this);
+ return EmitARCRetainAutoreleaseScalarExpr(expr);
+ }
+
+ // Otherwise, use the normal scalar-expression emission. The
+ // exception machinery doesn't do anything special with the
+ // exception like retaining it, so there's no safety associated with
+ // only running cleanups after the throw has started, and when it
+ // matters it tends to be substantially inferior code.
+ return EmitScalarExpr(expr);
+}
+
+std::pair<LValue,llvm::Value*>
+CodeGenFunction::EmitARCStoreStrong(const BinaryOperator *e,
+ bool ignored) {
+ // Evaluate the RHS first.
+ TryEmitResult result = tryEmitARCRetainScalarExpr(*this, e->getRHS());
+ llvm::Value *value = result.getPointer();
+
+ bool hasImmediateRetain = result.getInt();
+
+ // If we didn't emit a retained object, and the l-value is of block
+ // type, then we need to emit the block-retain immediately in case
+ // it invalidates the l-value.
+ if (!hasImmediateRetain && e->getType()->isBlockPointerType()) {
+ value = EmitARCRetainBlock(value, /*mandatory*/ false);
+ hasImmediateRetain = true;
+ }
+
+ LValue lvalue = EmitLValue(e->getLHS());
+
+ // If the RHS was emitted retained, expand this.
+ if (hasImmediateRetain) {
+ llvm::Value *oldValue =
+ EmitLoadOfScalar(lvalue);
+ EmitStoreOfScalar(value, lvalue);
+ EmitARCRelease(oldValue, /*precise*/ false);
+ } else {
+ value = EmitARCStoreStrong(lvalue, value, ignored);
+ }
+
+ return std::pair<LValue,llvm::Value*>(lvalue, value);
+}
+
+std::pair<LValue,llvm::Value*>
+CodeGenFunction::EmitARCStoreAutoreleasing(const BinaryOperator *e) {
+ llvm::Value *value = EmitARCRetainAutoreleaseScalarExpr(e->getRHS());
+ LValue lvalue = EmitLValue(e->getLHS());
+
+ EmitStoreOfScalar(value, lvalue);
+
+ return std::pair<LValue,llvm::Value*>(lvalue, value);
+}
+
+void CodeGenFunction::EmitObjCAutoreleasePoolStmt(
+ const ObjCAutoreleasePoolStmt &ARPS) {
+ const Stmt *subStmt = ARPS.getSubStmt();
+ const CompoundStmt &S = cast<CompoundStmt>(*subStmt);
+
+ CGDebugInfo *DI = getDebugInfo();
+ if (DI)
+ DI->EmitLexicalBlockStart(Builder, S.getLBracLoc());
+
+ // Keep track of the current cleanup stack depth.
+ RunCleanupsScope Scope(*this);
+ if (CGM.getCodeGenOpts().ObjCRuntimeHasARC) {
+ llvm::Value *token = EmitObjCAutoreleasePoolPush();
+ EHStack.pushCleanup<CallObjCAutoreleasePoolObject>(NormalCleanup, token);
+ } else {
+ llvm::Value *token = EmitObjCMRRAutoreleasePoolPush();
+ EHStack.pushCleanup<CallObjCMRRAutoreleasePoolObject>(NormalCleanup, token);
+ }
+
+ for (CompoundStmt::const_body_iterator I = S.body_begin(),
+ E = S.body_end(); I != E; ++I)
+ EmitStmt(*I);
+
+ if (DI)
+ DI->EmitLexicalBlockEnd(Builder, S.getRBracLoc());
+}
+
+/// EmitExtendGCLifetime - Given a pointer to an Objective-C object,
+/// make sure it survives garbage collection until this point.
+void CodeGenFunction::EmitExtendGCLifetime(llvm::Value *object) {
+ // We just use an inline assembly.
+ llvm::FunctionType *extenderType
+ = llvm::FunctionType::get(VoidTy, VoidPtrTy, RequiredArgs::All);
+ llvm::Value *extender
+ = llvm::InlineAsm::get(extenderType,
+ /* assembly */ "",
+ /* constraints */ "r",
+ /* side effects */ true);
+
+ object = Builder.CreateBitCast(object, VoidPtrTy);
+ Builder.CreateCall(extender, object)->setDoesNotThrow();
+}
+
+/// GenerateObjCAtomicSetterCopyHelperFunction - Given a c++ object type with
+/// non-trivial copy assignment function, produce following helper function.
+/// static void copyHelper(Ty *dest, const Ty *source) { *dest = *source; }
+///
+llvm::Constant *
+CodeGenFunction::GenerateObjCAtomicSetterCopyHelperFunction(
+ const ObjCPropertyImplDecl *PID) {
+ // FIXME. This api is for NeXt runtime only for now.
+ if (!getLangOpts().CPlusPlus || !getLangOpts().NeXTRuntime)
+ return 0;
+ QualType Ty = PID->getPropertyIvarDecl()->getType();
+ if (!Ty->isRecordType())
+ return 0;
+ const ObjCPropertyDecl *PD = PID->getPropertyDecl();
+ if ((!(PD->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_atomic)))
+ return 0;
+ llvm::Constant * HelperFn = 0;
+ if (hasTrivialSetExpr(PID))
+ return 0;
+ assert(PID->getSetterCXXAssignment() && "SetterCXXAssignment - null");
+ if ((HelperFn = CGM.getAtomicSetterHelperFnMap(Ty)))
+ return HelperFn;
+
+ ASTContext &C = getContext();
+ IdentifierInfo *II
+ = &CGM.getContext().Idents.get("__assign_helper_atomic_property_");
+ FunctionDecl *FD = FunctionDecl::Create(C,
+ C.getTranslationUnitDecl(),
+ SourceLocation(),
+ SourceLocation(), II, C.VoidTy, 0,
+ SC_Static,
+ SC_None,
+ false,
+ false);
+
+ QualType DestTy = C.getPointerType(Ty);
+ QualType SrcTy = Ty;
+ SrcTy.addConst();
+ SrcTy = C.getPointerType(SrcTy);
+
+ FunctionArgList args;
+ ImplicitParamDecl dstDecl(FD, SourceLocation(), 0, DestTy);
+ args.push_back(&dstDecl);
+ ImplicitParamDecl srcDecl(FD, SourceLocation(), 0, SrcTy);
+ args.push_back(&srcDecl);
+
+ const CGFunctionInfo &FI =
+ CGM.getTypes().arrangeFunctionDeclaration(C.VoidTy, args,
+ FunctionType::ExtInfo(),
+ RequiredArgs::All);
+
+ llvm::FunctionType *LTy = CGM.getTypes().GetFunctionType(FI);
+
+ llvm::Function *Fn =
+ llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage,
+ "__assign_helper_atomic_property_",
+ &CGM.getModule());
+
+ if (CGM.getModuleDebugInfo())
+ DebugInfo = CGM.getModuleDebugInfo();
+
+
+ StartFunction(FD, C.VoidTy, Fn, FI, args, SourceLocation());
+
+ DeclRefExpr DstExpr(&dstDecl, false, DestTy,
+ VK_RValue, SourceLocation());
+ UnaryOperator DST(&DstExpr, UO_Deref, DestTy->getPointeeType(),
+ VK_LValue, OK_Ordinary, SourceLocation());
+
+ DeclRefExpr SrcExpr(&srcDecl, false, SrcTy,
+ VK_RValue, SourceLocation());
+ UnaryOperator SRC(&SrcExpr, UO_Deref, SrcTy->getPointeeType(),
+ VK_LValue, OK_Ordinary, SourceLocation());
+
+ Expr *Args[2] = { &DST, &SRC };
+ CallExpr *CalleeExp = cast<CallExpr>(PID->getSetterCXXAssignment());
+ CXXOperatorCallExpr TheCall(C, OO_Equal, CalleeExp->getCallee(),
+ Args, 2, DestTy->getPointeeType(),
+ VK_LValue, SourceLocation());
+
+ EmitStmt(&TheCall);
+
+ FinishFunction();
+ HelperFn = llvm::ConstantExpr::getBitCast(Fn, VoidPtrTy);
+ CGM.setAtomicSetterHelperFnMap(Ty, HelperFn);
+ return HelperFn;
+}
+
+llvm::Constant *
+CodeGenFunction::GenerateObjCAtomicGetterCopyHelperFunction(
+ const ObjCPropertyImplDecl *PID) {
+ // FIXME. This api is for NeXt runtime only for now.
+ if (!getLangOpts().CPlusPlus || !getLangOpts().NeXTRuntime)
+ return 0;
+ const ObjCPropertyDecl *PD = PID->getPropertyDecl();
+ QualType Ty = PD->getType();
+ if (!Ty->isRecordType())
+ return 0;
+ if ((!(PD->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_atomic)))
+ return 0;
+ llvm::Constant * HelperFn = 0;
+
+ if (hasTrivialGetExpr(PID))
+ return 0;
+ assert(PID->getGetterCXXConstructor() && "getGetterCXXConstructor - null");
+ if ((HelperFn = CGM.getAtomicGetterHelperFnMap(Ty)))
+ return HelperFn;
+
+
+ ASTContext &C = getContext();
+ IdentifierInfo *II
+ = &CGM.getContext().Idents.get("__copy_helper_atomic_property_");
+ FunctionDecl *FD = FunctionDecl::Create(C,
+ C.getTranslationUnitDecl(),
+ SourceLocation(),
+ SourceLocation(), II, C.VoidTy, 0,
+ SC_Static,
+ SC_None,
+ false,
+ false);
+
+ QualType DestTy = C.getPointerType(Ty);
+ QualType SrcTy = Ty;
+ SrcTy.addConst();
+ SrcTy = C.getPointerType(SrcTy);
+
+ FunctionArgList args;
+ ImplicitParamDecl dstDecl(FD, SourceLocation(), 0, DestTy);
+ args.push_back(&dstDecl);
+ ImplicitParamDecl srcDecl(FD, SourceLocation(), 0, SrcTy);
+ args.push_back(&srcDecl);
+
+ const CGFunctionInfo &FI =
+ CGM.getTypes().arrangeFunctionDeclaration(C.VoidTy, args,
+ FunctionType::ExtInfo(),
+ RequiredArgs::All);
+
+ llvm::FunctionType *LTy = CGM.getTypes().GetFunctionType(FI);
+
+ llvm::Function *Fn =
+ llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage,
+ "__copy_helper_atomic_property_", &CGM.getModule());
+
+ if (CGM.getModuleDebugInfo())
+ DebugInfo = CGM.getModuleDebugInfo();
+
+
+ StartFunction(FD, C.VoidTy, Fn, FI, args, SourceLocation());
+
+ DeclRefExpr SrcExpr(&srcDecl, false, SrcTy,
+ VK_RValue, SourceLocation());
+
+ UnaryOperator SRC(&SrcExpr, UO_Deref, SrcTy->getPointeeType(),
+ VK_LValue, OK_Ordinary, SourceLocation());
+
+ CXXConstructExpr *CXXConstExpr =
+ cast<CXXConstructExpr>(PID->getGetterCXXConstructor());
+
+ SmallVector<Expr*, 4> ConstructorArgs;
+ ConstructorArgs.push_back(&SRC);
+ CXXConstructExpr::arg_iterator A = CXXConstExpr->arg_begin();
+ ++A;
+
+ for (CXXConstructExpr::arg_iterator AEnd = CXXConstExpr->arg_end();
+ A != AEnd; ++A)
+ ConstructorArgs.push_back(*A);
+
+ CXXConstructExpr *TheCXXConstructExpr =
+ CXXConstructExpr::Create(C, Ty, SourceLocation(),
+ CXXConstExpr->getConstructor(),
+ CXXConstExpr->isElidable(),
+ &ConstructorArgs[0], ConstructorArgs.size(),
+ CXXConstExpr->hadMultipleCandidates(),
+ CXXConstExpr->isListInitialization(),
+ CXXConstExpr->requiresZeroInitialization(),
+ CXXConstExpr->getConstructionKind(),
+ SourceRange());
+
+ DeclRefExpr DstExpr(&dstDecl, false, DestTy,
+ VK_RValue, SourceLocation());
+
+ RValue DV = EmitAnyExpr(&DstExpr);
+ CharUnits Alignment
+ = getContext().getTypeAlignInChars(TheCXXConstructExpr->getType());
+ EmitAggExpr(TheCXXConstructExpr,
+ AggValueSlot::forAddr(DV.getScalarVal(), Alignment, Qualifiers(),
+ AggValueSlot::IsDestructed,
+ AggValueSlot::DoesNotNeedGCBarriers,
+ AggValueSlot::IsNotAliased));
+
+ FinishFunction();
+ HelperFn = llvm::ConstantExpr::getBitCast(Fn, VoidPtrTy);
+ CGM.setAtomicGetterHelperFnMap(Ty, HelperFn);
+ return HelperFn;
+}
+
+llvm::Value *
+CodeGenFunction::EmitBlockCopyAndAutorelease(llvm::Value *Block, QualType Ty) {
+ // Get selectors for retain/autorelease.
+ IdentifierInfo *CopyID = &getContext().Idents.get("copy");
+ Selector CopySelector =
+ getContext().Selectors.getNullarySelector(CopyID);
+ IdentifierInfo *AutoreleaseID = &getContext().Idents.get("autorelease");
+ Selector AutoreleaseSelector =
+ getContext().Selectors.getNullarySelector(AutoreleaseID);
+
+ // Emit calls to retain/autorelease.
+ CGObjCRuntime &Runtime = CGM.getObjCRuntime();
+ llvm::Value *Val = Block;
+ RValue Result;
+ Result = Runtime.GenerateMessageSend(*this, ReturnValueSlot(),
+ Ty, CopySelector,
+ Val, CallArgList(), 0, 0);
+ Val = Result.getScalarVal();
+ Result = Runtime.GenerateMessageSend(*this, ReturnValueSlot(),
+ Ty, AutoreleaseSelector,
+ Val, CallArgList(), 0, 0);
+ Val = Result.getScalarVal();
+ return Val;
+}
+
+
+CGObjCRuntime::~CGObjCRuntime() {}
diff --git a/clang/lib/CodeGen/CGObjCGNU.cpp b/clang/lib/CodeGen/CGObjCGNU.cpp
new file mode 100644
index 0000000..db0bd95
--- /dev/null
+++ b/clang/lib/CodeGen/CGObjCGNU.cpp
@@ -0,0 +1,2671 @@
+//===------- CGObjCGNU.cpp - Emit LLVM Code from ASTs for a Module --------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This provides Objective-C code generation targeting the GNU runtime. The
+// class in this file generates structures used by the GNU Objective-C runtime
+// library. These structures are defined in objc/objc.h and objc/objc-api.h in
+// the GNU runtime distribution.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CGObjCRuntime.h"
+#include "CodeGenModule.h"
+#include "CodeGenFunction.h"
+#include "CGCleanup.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/RecordLayout.h"
+#include "clang/AST/StmtObjC.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/FileManager.h"
+
+#include "llvm/Intrinsics.h"
+#include "llvm/Module.h"
+#include "llvm/LLVMContext.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/Support/CallSite.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Target/TargetData.h"
+
+#include <cstdarg>
+
+
+using namespace clang;
+using namespace CodeGen;
+
+
+namespace {
+/// Class that lazily initialises the runtime function. Avoids inserting the
+/// types and the function declaration into a module if they're not used, and
+/// avoids constructing the type more than once if it's used more than once.
+class LazyRuntimeFunction {
+ CodeGenModule *CGM;
+ std::vector<llvm::Type*> ArgTys;
+ const char *FunctionName;
+ llvm::Constant *Function;
+ public:
+ /// Constructor leaves this class uninitialized, because it is intended to
+ /// be used as a field in another class and not all of the types that are
+ /// used as arguments will necessarily be available at construction time.
+ LazyRuntimeFunction() : CGM(0), FunctionName(0), Function(0) {}
+
+ /// Initialises the lazy function with the name, return type, and the types
+ /// of the arguments.
+ END_WITH_NULL
+ void init(CodeGenModule *Mod, const char *name,
+ llvm::Type *RetTy, ...) {
+ CGM =Mod;
+ FunctionName = name;
+ Function = 0;
+ ArgTys.clear();
+ va_list Args;
+ va_start(Args, RetTy);
+ while (llvm::Type *ArgTy = va_arg(Args, llvm::Type*))
+ ArgTys.push_back(ArgTy);
+ va_end(Args);
+ // Push the return type on at the end so we can pop it off easily
+ ArgTys.push_back(RetTy);
+ }
+ /// Overloaded cast operator, allows the class to be implicitly cast to an
+ /// LLVM constant.
+ operator llvm::Constant*() {
+ if (!Function) {
+ if (0 == FunctionName) return 0;
+ // We put the return type on the end of the vector, so pop it back off
+ llvm::Type *RetTy = ArgTys.back();
+ ArgTys.pop_back();
+ llvm::FunctionType *FTy = llvm::FunctionType::get(RetTy, ArgTys, false);
+ Function =
+ cast<llvm::Constant>(CGM->CreateRuntimeFunction(FTy, FunctionName));
+ // We won't need to use the types again, so we may as well clean up the
+ // vector now
+ ArgTys.resize(0);
+ }
+ return Function;
+ }
+ operator llvm::Function*() {
+ return cast<llvm::Function>((llvm::Constant*)*this);
+ }
+
+};
+
+
+/// GNU Objective-C runtime code generation. This class implements the parts of
+/// Objective-C support that are specific to the GNU family of runtimes (GCC and
+/// GNUstep).
+class CGObjCGNU : public CGObjCRuntime {
+protected:
+ /// The LLVM module into which output is inserted
+ llvm::Module &TheModule;
+ /// strut objc_super. Used for sending messages to super. This structure
+ /// contains the receiver (object) and the expected class.
+ llvm::StructType *ObjCSuperTy;
+ /// struct objc_super*. The type of the argument to the superclass message
+ /// lookup functions.
+ llvm::PointerType *PtrToObjCSuperTy;
+ /// LLVM type for selectors. Opaque pointer (i8*) unless a header declaring
+ /// SEL is included in a header somewhere, in which case it will be whatever
+ /// type is declared in that header, most likely {i8*, i8*}.
+ llvm::PointerType *SelectorTy;
+ /// LLVM i8 type. Cached here to avoid repeatedly getting it in all of the
+ /// places where it's used
+ llvm::IntegerType *Int8Ty;
+ /// Pointer to i8 - LLVM type of char*, for all of the places where the
+ /// runtime needs to deal with C strings.
+ llvm::PointerType *PtrToInt8Ty;
+ /// Instance Method Pointer type. This is a pointer to a function that takes,
+ /// at a minimum, an object and a selector, and is the generic type for
+ /// Objective-C methods. Due to differences between variadic / non-variadic
+ /// calling conventions, it must always be cast to the correct type before
+ /// actually being used.
+ llvm::PointerType *IMPTy;
+ /// Type of an untyped Objective-C object. Clang treats id as a built-in type
+ /// when compiling Objective-C code, so this may be an opaque pointer (i8*),
+ /// but if the runtime header declaring it is included then it may be a
+ /// pointer to a structure.
+ llvm::PointerType *IdTy;
+ /// Pointer to a pointer to an Objective-C object. Used in the new ABI
+ /// message lookup function and some GC-related functions.
+ llvm::PointerType *PtrToIdTy;
+ /// The clang type of id. Used when using the clang CGCall infrastructure to
+ /// call Objective-C methods.
+ CanQualType ASTIdTy;
+ /// LLVM type for C int type.
+ llvm::IntegerType *IntTy;
+ /// LLVM type for an opaque pointer. This is identical to PtrToInt8Ty, but is
+ /// used in the code to document the difference between i8* meaning a pointer
+ /// to a C string and i8* meaning a pointer to some opaque type.
+ llvm::PointerType *PtrTy;
+ /// LLVM type for C long type. The runtime uses this in a lot of places where
+ /// it should be using intptr_t, but we can't fix this without breaking
+ /// compatibility with GCC...
+ llvm::IntegerType *LongTy;
+ /// LLVM type for C size_t. Used in various runtime data structures.
+ llvm::IntegerType *SizeTy;
+ /// LLVM type for C intptr_t.
+ llvm::IntegerType *IntPtrTy;
+ /// LLVM type for C ptrdiff_t. Mainly used in property accessor functions.
+ llvm::IntegerType *PtrDiffTy;
+ /// LLVM type for C int*. Used for GCC-ABI-compatible non-fragile instance
+ /// variables.
+ llvm::PointerType *PtrToIntTy;
+ /// LLVM type for Objective-C BOOL type.
+ llvm::Type *BoolTy;
+ /// 32-bit integer type, to save us needing to look it up every time it's used.
+ llvm::IntegerType *Int32Ty;
+ /// 64-bit integer type, to save us needing to look it up every time it's used.
+ llvm::IntegerType *Int64Ty;
+ /// Metadata kind used to tie method lookups to message sends. The GNUstep
+ /// runtime provides some LLVM passes that can use this to do things like
+ /// automatic IMP caching and speculative inlining.
+ unsigned msgSendMDKind;
+ /// Helper function that generates a constant string and returns a pointer to
+ /// the start of the string. The result of this function can be used anywhere
+ /// where the C code specifies const char*.
+ llvm::Constant *MakeConstantString(const std::string &Str,
+ const std::string &Name="") {
+ llvm::Constant *ConstStr = CGM.GetAddrOfConstantCString(Str, Name.c_str());
+ return llvm::ConstantExpr::getGetElementPtr(ConstStr, Zeros);
+ }
+ /// Emits a linkonce_odr string, whose name is the prefix followed by the
+ /// string value. This allows the linker to combine the strings between
+ /// different modules. Used for EH typeinfo names, selector strings, and a
+ /// few other things.
+ llvm::Constant *ExportUniqueString(const std::string &Str,
+ const std::string prefix) {
+ std::string name = prefix + Str;
+ llvm::Constant *ConstStr = TheModule.getGlobalVariable(name);
+ if (!ConstStr) {
+ llvm::Constant *value = llvm::ConstantDataArray::getString(VMContext,Str);
+ ConstStr = new llvm::GlobalVariable(TheModule, value->getType(), true,
+ llvm::GlobalValue::LinkOnceODRLinkage, value, prefix + Str);
+ }
+ return llvm::ConstantExpr::getGetElementPtr(ConstStr, Zeros);
+ }
+ /// Generates a global structure, initialized by the elements in the vector.
+ /// The element types must match the types of the structure elements in the
+ /// first argument.
+ llvm::GlobalVariable *MakeGlobal(llvm::StructType *Ty,
+ llvm::ArrayRef<llvm::Constant*> V,
+ StringRef Name="",
+ llvm::GlobalValue::LinkageTypes linkage
+ =llvm::GlobalValue::InternalLinkage) {
+ llvm::Constant *C = llvm::ConstantStruct::get(Ty, V);
+ return new llvm::GlobalVariable(TheModule, Ty, false,
+ linkage, C, Name);
+ }
+ /// Generates a global array. The vector must contain the same number of
+ /// elements that the array type declares, of the type specified as the array
+ /// element type.
+ llvm::GlobalVariable *MakeGlobal(llvm::ArrayType *Ty,
+ llvm::ArrayRef<llvm::Constant*> V,
+ StringRef Name="",
+ llvm::GlobalValue::LinkageTypes linkage
+ =llvm::GlobalValue::InternalLinkage) {
+ llvm::Constant *C = llvm::ConstantArray::get(Ty, V);
+ return new llvm::GlobalVariable(TheModule, Ty, false,
+ linkage, C, Name);
+ }
+ /// Generates a global array, inferring the array type from the specified
+ /// element type and the size of the initialiser.
+ llvm::GlobalVariable *MakeGlobalArray(llvm::Type *Ty,
+ llvm::ArrayRef<llvm::Constant*> V,
+ StringRef Name="",
+ llvm::GlobalValue::LinkageTypes linkage
+ =llvm::GlobalValue::InternalLinkage) {
+ llvm::ArrayType *ArrayTy = llvm::ArrayType::get(Ty, V.size());
+ return MakeGlobal(ArrayTy, V, Name, linkage);
+ }
+ /// Ensures that the value has the required type, by inserting a bitcast if
+ /// required. This function lets us avoid inserting bitcasts that are
+ /// redundant.
+ llvm::Value* EnforceType(CGBuilderTy B, llvm::Value *V, llvm::Type *Ty){
+ if (V->getType() == Ty) return V;
+ return B.CreateBitCast(V, Ty);
+ }
+ // Some zeros used for GEPs in lots of places.
+ llvm::Constant *Zeros[2];
+ /// Null pointer value. Mainly used as a terminator in various arrays.
+ llvm::Constant *NULLPtr;
+ /// LLVM context.
+ llvm::LLVMContext &VMContext;
+private:
+ /// Placeholder for the class. Lots of things refer to the class before we've
+ /// actually emitted it. We use this alias as a placeholder, and then replace
+ /// it with a pointer to the class structure before finally emitting the
+ /// module.
+ llvm::GlobalAlias *ClassPtrAlias;
+ /// Placeholder for the metaclass. Lots of things refer to the class before
+ /// we've / actually emitted it. We use this alias as a placeholder, and then
+ /// replace / it with a pointer to the metaclass structure before finally
+ /// emitting the / module.
+ llvm::GlobalAlias *MetaClassPtrAlias;
+ /// All of the classes that have been generated for this compilation units.
+ std::vector<llvm::Constant*> Classes;
+ /// All of the categories that have been generated for this compilation units.
+ std::vector<llvm::Constant*> Categories;
+ /// All of the Objective-C constant strings that have been generated for this
+ /// compilation units.
+ std::vector<llvm::Constant*> ConstantStrings;
+ /// Map from string values to Objective-C constant strings in the output.
+ /// Used to prevent emitting Objective-C strings more than once. This should
+ /// not be required at all - CodeGenModule should manage this list.
+ llvm::StringMap<llvm::Constant*> ObjCStrings;
+ /// All of the protocols that have been declared.
+ llvm::StringMap<llvm::Constant*> ExistingProtocols;
+ /// For each variant of a selector, we store the type encoding and a
+ /// placeholder value. For an untyped selector, the type will be the empty
+ /// string. Selector references are all done via the module's selector table,
+ /// so we create an alias as a placeholder and then replace it with the real
+ /// value later.
+ typedef std::pair<std::string, llvm::GlobalAlias*> TypedSelector;
+ /// Type of the selector map. This is roughly equivalent to the structure
+ /// used in the GNUstep runtime, which maintains a list of all of the valid
+ /// types for a selector in a table.
+ typedef llvm::DenseMap<Selector, SmallVector<TypedSelector, 2> >
+ SelectorMap;
+ /// A map from selectors to selector types. This allows us to emit all
+ /// selectors of the same name and type together.
+ SelectorMap SelectorTable;
+
+ /// Selectors related to memory management. When compiling in GC mode, we
+ /// omit these.
+ Selector RetainSel, ReleaseSel, AutoreleaseSel;
+ /// Runtime functions used for memory management in GC mode. Note that clang
+ /// supports code generation for calling these functions, but neither GNU
+ /// runtime actually supports this API properly yet.
+ LazyRuntimeFunction IvarAssignFn, StrongCastAssignFn, MemMoveFn, WeakReadFn,
+ WeakAssignFn, GlobalAssignFn;
+
+ typedef std::pair<std::string, std::string> ClassAliasPair;
+ /// All classes that have aliases set for them.
+ std::vector<ClassAliasPair> ClassAliases;
+
+protected:
+ /// Function used for throwing Objective-C exceptions.
+ LazyRuntimeFunction ExceptionThrowFn;
+ /// Function used for rethrowing exceptions, used at the end of @finally or
+ /// @synchronize blocks.
+ LazyRuntimeFunction ExceptionReThrowFn;
+ /// Function called when entering a catch function. This is required for
+ /// differentiating Objective-C exceptions and foreign exceptions.
+ LazyRuntimeFunction EnterCatchFn;
+ /// Function called when exiting from a catch block. Used to do exception
+ /// cleanup.
+ LazyRuntimeFunction ExitCatchFn;
+ /// Function called when entering an @synchronize block. Acquires the lock.
+ LazyRuntimeFunction SyncEnterFn;
+ /// Function called when exiting an @synchronize block. Releases the lock.
+ LazyRuntimeFunction SyncExitFn;
+
+private:
+
+ /// Function called if fast enumeration detects that the collection is
+ /// modified during the update.
+ LazyRuntimeFunction EnumerationMutationFn;
+ /// Function for implementing synthesized property getters that return an
+ /// object.
+ LazyRuntimeFunction GetPropertyFn;
+ /// Function for implementing synthesized property setters that return an
+ /// object.
+ LazyRuntimeFunction SetPropertyFn;
+ /// Function used for non-object declared property getters.
+ LazyRuntimeFunction GetStructPropertyFn;
+ /// Function used for non-object declared property setters.
+ LazyRuntimeFunction SetStructPropertyFn;
+
+ /// The version of the runtime that this class targets. Must match the
+ /// version in the runtime.
+ int RuntimeVersion;
+ /// The version of the protocol class. Used to differentiate between ObjC1
+ /// and ObjC2 protocols. Objective-C 1 protocols can not contain optional
+ /// components and can not contain declared properties. We always emit
+ /// Objective-C 2 property structures, but we have to pretend that they're
+ /// Objective-C 1 property structures when targeting the GCC runtime or it
+ /// will abort.
+ const int ProtocolVersion;
+private:
+ /// Generates an instance variable list structure. This is a structure
+ /// containing a size and an array of structures containing instance variable
+ /// metadata. This is used purely for introspection in the fragile ABI. In
+ /// the non-fragile ABI, it's used for instance variable fixup.
+ llvm::Constant *GenerateIvarList(ArrayRef<llvm::Constant *> IvarNames,
+ ArrayRef<llvm::Constant *> IvarTypes,
+ ArrayRef<llvm::Constant *> IvarOffsets);
+ /// Generates a method list structure. This is a structure containing a size
+ /// and an array of structures containing method metadata.
+ ///
+ /// This structure is used by both classes and categories, and contains a next
+ /// pointer allowing them to be chained together in a linked list.
+ llvm::Constant *GenerateMethodList(const StringRef &ClassName,
+ const StringRef &CategoryName,
+ ArrayRef<Selector> MethodSels,
+ ArrayRef<llvm::Constant *> MethodTypes,
+ bool isClassMethodList);
+ /// Emits an empty protocol. This is used for @protocol() where no protocol
+ /// is found. The runtime will (hopefully) fix up the pointer to refer to the
+ /// real protocol.
+ llvm::Constant *GenerateEmptyProtocol(const std::string &ProtocolName);
+ /// Generates a list of property metadata structures. This follows the same
+ /// pattern as method and instance variable metadata lists.
+ llvm::Constant *GeneratePropertyList(const ObjCImplementationDecl *OID,
+ SmallVectorImpl<Selector> &InstanceMethodSels,
+ SmallVectorImpl<llvm::Constant*> &InstanceMethodTypes);
+ /// Generates a list of referenced protocols. Classes, categories, and
+ /// protocols all use this structure.
+ llvm::Constant *GenerateProtocolList(ArrayRef<std::string> Protocols);
+ /// To ensure that all protocols are seen by the runtime, we add a category on
+ /// a class defined in the runtime, declaring no methods, but adopting the
+ /// protocols. This is a horribly ugly hack, but it allows us to collect all
+ /// of the protocols without changing the ABI.
+ void GenerateProtocolHolderCategory(void);
+ /// Generates a class structure.
+ llvm::Constant *GenerateClassStructure(
+ llvm::Constant *MetaClass,
+ llvm::Constant *SuperClass,
+ unsigned info,
+ const char *Name,
+ llvm::Constant *Version,
+ llvm::Constant *InstanceSize,
+ llvm::Constant *IVars,
+ llvm::Constant *Methods,
+ llvm::Constant *Protocols,
+ llvm::Constant *IvarOffsets,
+ llvm::Constant *Properties,
+ llvm::Constant *StrongIvarBitmap,
+ llvm::Constant *WeakIvarBitmap,
+ bool isMeta=false);
+ /// Generates a method list. This is used by protocols to define the required
+ /// and optional methods.
+ llvm::Constant *GenerateProtocolMethodList(
+ ArrayRef<llvm::Constant *> MethodNames,
+ ArrayRef<llvm::Constant *> MethodTypes);
+ /// Returns a selector with the specified type encoding. An empty string is
+ /// used to return an untyped selector (with the types field set to NULL).
+ llvm::Value *GetSelector(CGBuilderTy &Builder, Selector Sel,
+ const std::string &TypeEncoding, bool lval);
+ /// Returns the variable used to store the offset of an instance variable.
+ llvm::GlobalVariable *ObjCIvarOffsetVariable(const ObjCInterfaceDecl *ID,
+ const ObjCIvarDecl *Ivar);
+ /// Emits a reference to a class. This allows the linker to object if there
+ /// is no class of the matching name.
+ void EmitClassRef(const std::string &className);
+ /// Emits a pointer to the named class
+ llvm::Value *GetClassNamed(CGBuilderTy &Builder, const std::string &Name,
+ bool isWeak);
+protected:
+ /// Looks up the method for sending a message to the specified object. This
+ /// mechanism differs between the GCC and GNU runtimes, so this method must be
+ /// overridden in subclasses.
+ virtual llvm::Value *LookupIMP(CodeGenFunction &CGF,
+ llvm::Value *&Receiver,
+ llvm::Value *cmd,
+ llvm::MDNode *node) = 0;
+ /// Looks up the method for sending a message to a superclass. This
+ /// mechanism differs between the GCC and GNU runtimes, so this method must
+ /// be overridden in subclasses.
+ virtual llvm::Value *LookupIMPSuper(CodeGenFunction &CGF,
+ llvm::Value *ObjCSuper,
+ llvm::Value *cmd) = 0;
+ /// Libobjc2 uses a bitfield representation where small(ish) bitfields are
+ /// stored in a 64-bit value with the low bit set to 1 and the remaining 63
+ /// bits set to their values, LSB first, while larger ones are stored in a
+ /// structure of this / form:
+ ///
+ /// struct { int32_t length; int32_t values[length]; };
+ ///
+ /// The values in the array are stored in host-endian format, with the least
+ /// significant bit being assumed to come first in the bitfield. Therefore,
+ /// a bitfield with the 64th bit set will be (int64_t)&{ 2, [0, 1<<31] },
+ /// while a bitfield / with the 63rd bit set will be 1<<64.
+ llvm::Constant *MakeBitField(ArrayRef<bool> bits);
+public:
+ CGObjCGNU(CodeGenModule &cgm, unsigned runtimeABIVersion,
+ unsigned protocolClassVersion);
+
+ virtual llvm::Constant *GenerateConstantString(const StringLiteral *);
+
+ virtual RValue
+ GenerateMessageSend(CodeGenFunction &CGF,
+ ReturnValueSlot Return,
+ QualType ResultType,
+ Selector Sel,
+ llvm::Value *Receiver,
+ const CallArgList &CallArgs,
+ const ObjCInterfaceDecl *Class,
+ const ObjCMethodDecl *Method);
+ virtual RValue
+ GenerateMessageSendSuper(CodeGenFunction &CGF,
+ ReturnValueSlot Return,
+ QualType ResultType,
+ Selector Sel,
+ const ObjCInterfaceDecl *Class,
+ bool isCategoryImpl,
+ llvm::Value *Receiver,
+ bool IsClassMessage,
+ const CallArgList &CallArgs,
+ const ObjCMethodDecl *Method);
+ virtual llvm::Value *GetClass(CGBuilderTy &Builder,
+ const ObjCInterfaceDecl *OID);
+ virtual llvm::Value *GetSelector(CGBuilderTy &Builder, Selector Sel,
+ bool lval = false);
+ virtual llvm::Value *GetSelector(CGBuilderTy &Builder, const ObjCMethodDecl
+ *Method);
+ virtual llvm::Constant *GetEHType(QualType T);
+
+ virtual llvm::Function *GenerateMethod(const ObjCMethodDecl *OMD,
+ const ObjCContainerDecl *CD);
+ virtual void GenerateCategory(const ObjCCategoryImplDecl *CMD);
+ virtual void GenerateClass(const ObjCImplementationDecl *ClassDecl);
+ virtual void RegisterAlias(const ObjCCompatibleAliasDecl *OAD);
+ virtual llvm::Value *GenerateProtocolRef(CGBuilderTy &Builder,
+ const ObjCProtocolDecl *PD);
+ virtual void GenerateProtocol(const ObjCProtocolDecl *PD);
+ virtual llvm::Function *ModuleInitFunction();
+ virtual llvm::Constant *GetPropertyGetFunction();
+ virtual llvm::Constant *GetPropertySetFunction();
+ virtual llvm::Constant *GetOptimizedPropertySetFunction(bool atomic,
+ bool copy);
+ virtual llvm::Constant *GetSetStructFunction();
+ virtual llvm::Constant *GetCppAtomicObjectFunction();
+ virtual llvm::Constant *GetGetStructFunction();
+ virtual llvm::Constant *EnumerationMutationFunction();
+
+ virtual void EmitTryStmt(CodeGenFunction &CGF,
+ const ObjCAtTryStmt &S);
+ virtual void EmitSynchronizedStmt(CodeGenFunction &CGF,
+ const ObjCAtSynchronizedStmt &S);
+ virtual void EmitThrowStmt(CodeGenFunction &CGF,
+ const ObjCAtThrowStmt &S);
+ virtual llvm::Value * EmitObjCWeakRead(CodeGenFunction &CGF,
+ llvm::Value *AddrWeakObj);
+ virtual void EmitObjCWeakAssign(CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dst);
+ virtual void EmitObjCGlobalAssign(CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dest,
+ bool threadlocal=false);
+ virtual void EmitObjCIvarAssign(CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dest,
+ llvm::Value *ivarOffset);
+ virtual void EmitObjCStrongCastAssign(CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dest);
+ virtual void EmitGCMemmoveCollectable(CodeGenFunction &CGF,
+ llvm::Value *DestPtr,
+ llvm::Value *SrcPtr,
+ llvm::Value *Size);
+ virtual LValue EmitObjCValueForIvar(CodeGenFunction &CGF,
+ QualType ObjectTy,
+ llvm::Value *BaseValue,
+ const ObjCIvarDecl *Ivar,
+ unsigned CVRQualifiers);
+ virtual llvm::Value *EmitIvarOffset(CodeGenFunction &CGF,
+ const ObjCInterfaceDecl *Interface,
+ const ObjCIvarDecl *Ivar);
+ virtual llvm::Value *EmitNSAutoreleasePoolClassRef(CGBuilderTy &Builder);
+ virtual llvm::Constant *BuildGCBlockLayout(CodeGenModule &CGM,
+ const CGBlockInfo &blockInfo) {
+ return NULLPtr;
+ }
+
+ virtual llvm::GlobalVariable *GetClassGlobal(const std::string &Name) {
+ return 0;
+ }
+};
+/// Class representing the legacy GCC Objective-C ABI. This is the default when
+/// -fobjc-nonfragile-abi is not specified.
+///
+/// The GCC ABI target actually generates code that is approximately compatible
+/// with the new GNUstep runtime ABI, but refrains from using any features that
+/// would not work with the GCC runtime. For example, clang always generates
+/// the extended form of the class structure, and the extra fields are simply
+/// ignored by GCC libobjc.
+class CGObjCGCC : public CGObjCGNU {
+ /// The GCC ABI message lookup function. Returns an IMP pointing to the
+ /// method implementation for this message.
+ LazyRuntimeFunction MsgLookupFn;
+ /// The GCC ABI superclass message lookup function. Takes a pointer to a
+ /// structure describing the receiver and the class, and a selector as
+ /// arguments. Returns the IMP for the corresponding method.
+ LazyRuntimeFunction MsgLookupSuperFn;
+protected:
+ virtual llvm::Value *LookupIMP(CodeGenFunction &CGF,
+ llvm::Value *&Receiver,
+ llvm::Value *cmd,
+ llvm::MDNode *node) {
+ CGBuilderTy &Builder = CGF.Builder;
+ llvm::Value *args[] = {
+ EnforceType(Builder, Receiver, IdTy),
+ EnforceType(Builder, cmd, SelectorTy) };
+ llvm::CallSite imp = CGF.EmitCallOrInvoke(MsgLookupFn, args);
+ imp->setMetadata(msgSendMDKind, node);
+ return imp.getInstruction();
+ }
+ virtual llvm::Value *LookupIMPSuper(CodeGenFunction &CGF,
+ llvm::Value *ObjCSuper,
+ llvm::Value *cmd) {
+ CGBuilderTy &Builder = CGF.Builder;
+ llvm::Value *lookupArgs[] = {EnforceType(Builder, ObjCSuper,
+ PtrToObjCSuperTy), cmd};
+ return Builder.CreateCall(MsgLookupSuperFn, lookupArgs);
+ }
+ public:
+ CGObjCGCC(CodeGenModule &Mod) : CGObjCGNU(Mod, 8, 2) {
+ // IMP objc_msg_lookup(id, SEL);
+ MsgLookupFn.init(&CGM, "objc_msg_lookup", IMPTy, IdTy, SelectorTy, NULL);
+ // IMP objc_msg_lookup_super(struct objc_super*, SEL);
+ MsgLookupSuperFn.init(&CGM, "objc_msg_lookup_super", IMPTy,
+ PtrToObjCSuperTy, SelectorTy, NULL);
+ }
+};
+/// Class used when targeting the new GNUstep runtime ABI.
+class CGObjCGNUstep : public CGObjCGNU {
+ /// The slot lookup function. Returns a pointer to a cacheable structure
+ /// that contains (among other things) the IMP.
+ LazyRuntimeFunction SlotLookupFn;
+ /// The GNUstep ABI superclass message lookup function. Takes a pointer to
+ /// a structure describing the receiver and the class, and a selector as
+ /// arguments. Returns the slot for the corresponding method. Superclass
+ /// message lookup rarely changes, so this is a good caching opportunity.
+ LazyRuntimeFunction SlotLookupSuperFn;
+ /// Type of an slot structure pointer. This is returned by the various
+ /// lookup functions.
+ llvm::Type *SlotTy;
+ protected:
+ virtual llvm::Value *LookupIMP(CodeGenFunction &CGF,
+ llvm::Value *&Receiver,
+ llvm::Value *cmd,
+ llvm::MDNode *node) {
+ CGBuilderTy &Builder = CGF.Builder;
+ llvm::Function *LookupFn = SlotLookupFn;
+
+ // Store the receiver on the stack so that we can reload it later
+ llvm::Value *ReceiverPtr = CGF.CreateTempAlloca(Receiver->getType());
+ Builder.CreateStore(Receiver, ReceiverPtr);
+
+ llvm::Value *self;
+
+ if (isa<ObjCMethodDecl>(CGF.CurCodeDecl)) {
+ self = CGF.LoadObjCSelf();
+ } else {
+ self = llvm::ConstantPointerNull::get(IdTy);
+ }
+
+ // The lookup function is guaranteed not to capture the receiver pointer.
+ LookupFn->setDoesNotCapture(1);
+
+ llvm::Value *args[] = {
+ EnforceType(Builder, ReceiverPtr, PtrToIdTy),
+ EnforceType(Builder, cmd, SelectorTy),
+ EnforceType(Builder, self, IdTy) };
+ llvm::CallSite slot = CGF.EmitCallOrInvoke(LookupFn, args);
+ slot.setOnlyReadsMemory();
+ slot->setMetadata(msgSendMDKind, node);
+
+ // Load the imp from the slot
+ llvm::Value *imp =
+ Builder.CreateLoad(Builder.CreateStructGEP(slot.getInstruction(), 4));
+
+ // The lookup function may have changed the receiver, so make sure we use
+ // the new one.
+ Receiver = Builder.CreateLoad(ReceiverPtr, true);
+ return imp;
+ }
+ virtual llvm::Value *LookupIMPSuper(CodeGenFunction &CGF,
+ llvm::Value *ObjCSuper,
+ llvm::Value *cmd) {
+ CGBuilderTy &Builder = CGF.Builder;
+ llvm::Value *lookupArgs[] = {ObjCSuper, cmd};
+
+ llvm::CallInst *slot = Builder.CreateCall(SlotLookupSuperFn, lookupArgs);
+ slot->setOnlyReadsMemory();
+
+ return Builder.CreateLoad(Builder.CreateStructGEP(slot, 4));
+ }
+ public:
+ CGObjCGNUstep(CodeGenModule &Mod) : CGObjCGNU(Mod, 9, 3) {
+ llvm::StructType *SlotStructTy = llvm::StructType::get(PtrTy,
+ PtrTy, PtrTy, IntTy, IMPTy, NULL);
+ SlotTy = llvm::PointerType::getUnqual(SlotStructTy);
+ // Slot_t objc_msg_lookup_sender(id *receiver, SEL selector, id sender);
+ SlotLookupFn.init(&CGM, "objc_msg_lookup_sender", SlotTy, PtrToIdTy,
+ SelectorTy, IdTy, NULL);
+ // Slot_t objc_msg_lookup_super(struct objc_super*, SEL);
+ SlotLookupSuperFn.init(&CGM, "objc_slot_lookup_super", SlotTy,
+ PtrToObjCSuperTy, SelectorTy, NULL);
+ // If we're in ObjC++ mode, then we want to make
+ if (CGM.getLangOpts().CPlusPlus) {
+ llvm::Type *VoidTy = llvm::Type::getVoidTy(VMContext);
+ // void *__cxa_begin_catch(void *e)
+ EnterCatchFn.init(&CGM, "__cxa_begin_catch", PtrTy, PtrTy, NULL);
+ // void __cxa_end_catch(void)
+ ExitCatchFn.init(&CGM, "__cxa_end_catch", VoidTy, NULL);
+ // void _Unwind_Resume_or_Rethrow(void*)
+ ExceptionReThrowFn.init(&CGM, "_Unwind_Resume_or_Rethrow", VoidTy, PtrTy, NULL);
+ }
+ }
+};
+
+} // end anonymous namespace
+
+
+/// Emits a reference to a dummy variable which is emitted with each class.
+/// This ensures that a linker error will be generated when trying to link
+/// together modules where a referenced class is not defined.
+void CGObjCGNU::EmitClassRef(const std::string &className) {
+ std::string symbolRef = "__objc_class_ref_" + className;
+ // Don't emit two copies of the same symbol
+ if (TheModule.getGlobalVariable(symbolRef))
+ return;
+ std::string symbolName = "__objc_class_name_" + className;
+ llvm::GlobalVariable *ClassSymbol = TheModule.getGlobalVariable(symbolName);
+ if (!ClassSymbol) {
+ ClassSymbol = new llvm::GlobalVariable(TheModule, LongTy, false,
+ llvm::GlobalValue::ExternalLinkage, 0, symbolName);
+ }
+ new llvm::GlobalVariable(TheModule, ClassSymbol->getType(), true,
+ llvm::GlobalValue::WeakAnyLinkage, ClassSymbol, symbolRef);
+}
+
+static std::string SymbolNameForMethod(const StringRef &ClassName,
+ const StringRef &CategoryName, const Selector MethodName,
+ bool isClassMethod) {
+ std::string MethodNameColonStripped = MethodName.getAsString();
+ std::replace(MethodNameColonStripped.begin(), MethodNameColonStripped.end(),
+ ':', '_');
+ return (Twine(isClassMethod ? "_c_" : "_i_") + ClassName + "_" +
+ CategoryName + "_" + MethodNameColonStripped).str();
+}
+
+CGObjCGNU::CGObjCGNU(CodeGenModule &cgm, unsigned runtimeABIVersion,
+ unsigned protocolClassVersion)
+ : CGObjCRuntime(cgm), TheModule(CGM.getModule()),
+ VMContext(cgm.getLLVMContext()), ClassPtrAlias(0), MetaClassPtrAlias(0),
+ RuntimeVersion(runtimeABIVersion), ProtocolVersion(protocolClassVersion) {
+
+ msgSendMDKind = VMContext.getMDKindID("GNUObjCMessageSend");
+
+ CodeGenTypes &Types = CGM.getTypes();
+ IntTy = cast<llvm::IntegerType>(
+ Types.ConvertType(CGM.getContext().IntTy));
+ LongTy = cast<llvm::IntegerType>(
+ Types.ConvertType(CGM.getContext().LongTy));
+ SizeTy = cast<llvm::IntegerType>(
+ Types.ConvertType(CGM.getContext().getSizeType()));
+ PtrDiffTy = cast<llvm::IntegerType>(
+ Types.ConvertType(CGM.getContext().getPointerDiffType()));
+ BoolTy = CGM.getTypes().ConvertType(CGM.getContext().BoolTy);
+
+ Int8Ty = llvm::Type::getInt8Ty(VMContext);
+ // C string type. Used in lots of places.
+ PtrToInt8Ty = llvm::PointerType::getUnqual(Int8Ty);
+
+ Zeros[0] = llvm::ConstantInt::get(LongTy, 0);
+ Zeros[1] = Zeros[0];
+ NULLPtr = llvm::ConstantPointerNull::get(PtrToInt8Ty);
+ // Get the selector Type.
+ QualType selTy = CGM.getContext().getObjCSelType();
+ if (QualType() == selTy) {
+ SelectorTy = PtrToInt8Ty;
+ } else {
+ SelectorTy = cast<llvm::PointerType>(CGM.getTypes().ConvertType(selTy));
+ }
+
+ PtrToIntTy = llvm::PointerType::getUnqual(IntTy);
+ PtrTy = PtrToInt8Ty;
+
+ Int32Ty = llvm::Type::getInt32Ty(VMContext);
+ Int64Ty = llvm::Type::getInt64Ty(VMContext);
+
+ IntPtrTy =
+ TheModule.getPointerSize() == llvm::Module::Pointer32 ? Int32Ty : Int64Ty;
+
+ // Object type
+ QualType UnqualIdTy = CGM.getContext().getObjCIdType();
+ ASTIdTy = CanQualType();
+ if (UnqualIdTy != QualType()) {
+ ASTIdTy = CGM.getContext().getCanonicalType(UnqualIdTy);
+ IdTy = cast<llvm::PointerType>(CGM.getTypes().ConvertType(ASTIdTy));
+ } else {
+ IdTy = PtrToInt8Ty;
+ }
+ PtrToIdTy = llvm::PointerType::getUnqual(IdTy);
+
+ ObjCSuperTy = llvm::StructType::get(IdTy, IdTy, NULL);
+ PtrToObjCSuperTy = llvm::PointerType::getUnqual(ObjCSuperTy);
+
+ llvm::Type *VoidTy = llvm::Type::getVoidTy(VMContext);
+
+ // void objc_exception_throw(id);
+ ExceptionThrowFn.init(&CGM, "objc_exception_throw", VoidTy, IdTy, NULL);
+ ExceptionReThrowFn.init(&CGM, "objc_exception_throw", VoidTy, IdTy, NULL);
+ // int objc_sync_enter(id);
+ SyncEnterFn.init(&CGM, "objc_sync_enter", IntTy, IdTy, NULL);
+ // int objc_sync_exit(id);
+ SyncExitFn.init(&CGM, "objc_sync_exit", IntTy, IdTy, NULL);
+
+ // void objc_enumerationMutation (id)
+ EnumerationMutationFn.init(&CGM, "objc_enumerationMutation", VoidTy,
+ IdTy, NULL);
+
+ // id objc_getProperty(id, SEL, ptrdiff_t, BOOL)
+ GetPropertyFn.init(&CGM, "objc_getProperty", IdTy, IdTy, SelectorTy,
+ PtrDiffTy, BoolTy, NULL);
+ // void objc_setProperty(id, SEL, ptrdiff_t, id, BOOL, BOOL)
+ SetPropertyFn.init(&CGM, "objc_setProperty", VoidTy, IdTy, SelectorTy,
+ PtrDiffTy, IdTy, BoolTy, BoolTy, NULL);
+ // void objc_setPropertyStruct(void*, void*, ptrdiff_t, BOOL, BOOL)
+ GetStructPropertyFn.init(&CGM, "objc_getPropertyStruct", VoidTy, PtrTy, PtrTy,
+ PtrDiffTy, BoolTy, BoolTy, NULL);
+ // void objc_setPropertyStruct(void*, void*, ptrdiff_t, BOOL, BOOL)
+ SetStructPropertyFn.init(&CGM, "objc_setPropertyStruct", VoidTy, PtrTy, PtrTy,
+ PtrDiffTy, BoolTy, BoolTy, NULL);
+
+ // IMP type
+ llvm::Type *IMPArgs[] = { IdTy, SelectorTy };
+ IMPTy = llvm::PointerType::getUnqual(llvm::FunctionType::get(IdTy, IMPArgs,
+ true));
+
+ const LangOptions &Opts = CGM.getLangOpts();
+ if ((Opts.getGC() != LangOptions::NonGC) || Opts.ObjCAutoRefCount)
+ RuntimeVersion = 10;
+
+ // Don't bother initialising the GC stuff unless we're compiling in GC mode
+ if (Opts.getGC() != LangOptions::NonGC) {
+ // This is a bit of an hack. We should sort this out by having a proper
+ // CGObjCGNUstep subclass for GC, but we may want to really support the old
+ // ABI and GC added in ObjectiveC2.framework, so we fudge it a bit for now
+ // Get selectors needed in GC mode
+ RetainSel = GetNullarySelector("retain", CGM.getContext());
+ ReleaseSel = GetNullarySelector("release", CGM.getContext());
+ AutoreleaseSel = GetNullarySelector("autorelease", CGM.getContext());
+
+ // Get functions needed in GC mode
+
+ // id objc_assign_ivar(id, id, ptrdiff_t);
+ IvarAssignFn.init(&CGM, "objc_assign_ivar", IdTy, IdTy, IdTy, PtrDiffTy,
+ NULL);
+ // id objc_assign_strongCast (id, id*)
+ StrongCastAssignFn.init(&CGM, "objc_assign_strongCast", IdTy, IdTy,
+ PtrToIdTy, NULL);
+ // id objc_assign_global(id, id*);
+ GlobalAssignFn.init(&CGM, "objc_assign_global", IdTy, IdTy, PtrToIdTy,
+ NULL);
+ // id objc_assign_weak(id, id*);
+ WeakAssignFn.init(&CGM, "objc_assign_weak", IdTy, IdTy, PtrToIdTy, NULL);
+ // id objc_read_weak(id*);
+ WeakReadFn.init(&CGM, "objc_read_weak", IdTy, PtrToIdTy, NULL);
+ // void *objc_memmove_collectable(void*, void *, size_t);
+ MemMoveFn.init(&CGM, "objc_memmove_collectable", PtrTy, PtrTy, PtrTy,
+ SizeTy, NULL);
+ }
+}
+
+llvm::Value *CGObjCGNU::GetClassNamed(CGBuilderTy &Builder,
+ const std::string &Name,
+ bool isWeak) {
+ llvm::Value *ClassName = CGM.GetAddrOfConstantCString(Name);
+ // With the incompatible ABI, this will need to be replaced with a direct
+ // reference to the class symbol. For the compatible nonfragile ABI we are
+ // still performing this lookup at run time but emitting the symbol for the
+ // class externally so that we can make the switch later.
+ //
+ // Libobjc2 contains an LLVM pass that replaces calls to objc_lookup_class
+ // with memoized versions or with static references if it's safe to do so.
+ if (!isWeak)
+ EmitClassRef(Name);
+ ClassName = Builder.CreateStructGEP(ClassName, 0);
+
+ llvm::Constant *ClassLookupFn =
+ CGM.CreateRuntimeFunction(llvm::FunctionType::get(IdTy, PtrToInt8Ty, true),
+ "objc_lookup_class");
+ return Builder.CreateCall(ClassLookupFn, ClassName);
+}
+
+// This has to perform the lookup every time, since posing and related
+// techniques can modify the name -> class mapping.
+llvm::Value *CGObjCGNU::GetClass(CGBuilderTy &Builder,
+ const ObjCInterfaceDecl *OID) {
+ return GetClassNamed(Builder, OID->getNameAsString(), OID->isWeakImported());
+}
+llvm::Value *CGObjCGNU::EmitNSAutoreleasePoolClassRef(CGBuilderTy &Builder) {
+ return GetClassNamed(Builder, "NSAutoreleasePool", false);
+}
+
+llvm::Value *CGObjCGNU::GetSelector(CGBuilderTy &Builder, Selector Sel,
+ const std::string &TypeEncoding, bool lval) {
+
+ SmallVector<TypedSelector, 2> &Types = SelectorTable[Sel];
+ llvm::GlobalAlias *SelValue = 0;
+
+
+ for (SmallVectorImpl<TypedSelector>::iterator i = Types.begin(),
+ e = Types.end() ; i!=e ; i++) {
+ if (i->first == TypeEncoding) {
+ SelValue = i->second;
+ break;
+ }
+ }
+ if (0 == SelValue) {
+ SelValue = new llvm::GlobalAlias(SelectorTy,
+ llvm::GlobalValue::PrivateLinkage,
+ ".objc_selector_"+Sel.getAsString(), NULL,
+ &TheModule);
+ Types.push_back(TypedSelector(TypeEncoding, SelValue));
+ }
+
+ if (lval) {
+ llvm::Value *tmp = Builder.CreateAlloca(SelValue->getType());
+ Builder.CreateStore(SelValue, tmp);
+ return tmp;
+ }
+ return SelValue;
+}
+
+llvm::Value *CGObjCGNU::GetSelector(CGBuilderTy &Builder, Selector Sel,
+ bool lval) {
+ return GetSelector(Builder, Sel, std::string(), lval);
+}
+
+llvm::Value *CGObjCGNU::GetSelector(CGBuilderTy &Builder, const ObjCMethodDecl
+ *Method) {
+ std::string SelTypes;
+ CGM.getContext().getObjCEncodingForMethodDecl(Method, SelTypes);
+ return GetSelector(Builder, Method->getSelector(), SelTypes, false);
+}
+
+llvm::Constant *CGObjCGNU::GetEHType(QualType T) {
+ if (!CGM.getLangOpts().CPlusPlus) {
+ if (T->isObjCIdType()
+ || T->isObjCQualifiedIdType()) {
+ // With the old ABI, there was only one kind of catchall, which broke
+ // foreign exceptions. With the new ABI, we use __objc_id_typeinfo as
+ // a pointer indicating object catchalls, and NULL to indicate real
+ // catchalls
+ if (CGM.getLangOpts().ObjCNonFragileABI) {
+ return MakeConstantString("@id");
+ } else {
+ return 0;
+ }
+ }
+
+ // All other types should be Objective-C interface pointer types.
+ const ObjCObjectPointerType *OPT =
+ T->getAs<ObjCObjectPointerType>();
+ assert(OPT && "Invalid @catch type.");
+ const ObjCInterfaceDecl *IDecl =
+ OPT->getObjectType()->getInterface();
+ assert(IDecl && "Invalid @catch type.");
+ return MakeConstantString(IDecl->getIdentifier()->getName());
+ }
+ // For Objective-C++, we want to provide the ability to catch both C++ and
+ // Objective-C objects in the same function.
+
+ // There's a particular fixed type info for 'id'.
+ if (T->isObjCIdType() ||
+ T->isObjCQualifiedIdType()) {
+ llvm::Constant *IDEHType =
+ CGM.getModule().getGlobalVariable("__objc_id_type_info");
+ if (!IDEHType)
+ IDEHType =
+ new llvm::GlobalVariable(CGM.getModule(), PtrToInt8Ty,
+ false,
+ llvm::GlobalValue::ExternalLinkage,
+ 0, "__objc_id_type_info");
+ return llvm::ConstantExpr::getBitCast(IDEHType, PtrToInt8Ty);
+ }
+
+ const ObjCObjectPointerType *PT =
+ T->getAs<ObjCObjectPointerType>();
+ assert(PT && "Invalid @catch type.");
+ const ObjCInterfaceType *IT = PT->getInterfaceType();
+ assert(IT && "Invalid @catch type.");
+ std::string className = IT->getDecl()->getIdentifier()->getName();
+
+ std::string typeinfoName = "__objc_eh_typeinfo_" + className;
+
+ // Return the existing typeinfo if it exists
+ llvm::Constant *typeinfo = TheModule.getGlobalVariable(typeinfoName);
+ if (typeinfo)
+ return llvm::ConstantExpr::getBitCast(typeinfo, PtrToInt8Ty);
+
+ // Otherwise create it.
+
+ // vtable for gnustep::libobjc::__objc_class_type_info
+ // It's quite ugly hard-coding this. Ideally we'd generate it using the host
+ // platform's name mangling.
+ const char *vtableName = "_ZTVN7gnustep7libobjc22__objc_class_type_infoE";
+ llvm::Constant *Vtable = TheModule.getGlobalVariable(vtableName);
+ if (!Vtable) {
+ Vtable = new llvm::GlobalVariable(TheModule, PtrToInt8Ty, true,
+ llvm::GlobalValue::ExternalLinkage, 0, vtableName);
+ }
+ llvm::Constant *Two = llvm::ConstantInt::get(IntTy, 2);
+ Vtable = llvm::ConstantExpr::getGetElementPtr(Vtable, Two);
+ Vtable = llvm::ConstantExpr::getBitCast(Vtable, PtrToInt8Ty);
+
+ llvm::Constant *typeName =
+ ExportUniqueString(className, "__objc_eh_typename_");
+
+ std::vector<llvm::Constant*> fields;
+ fields.push_back(Vtable);
+ fields.push_back(typeName);
+ llvm::Constant *TI =
+ MakeGlobal(llvm::StructType::get(PtrToInt8Ty, PtrToInt8Ty,
+ NULL), fields, "__objc_eh_typeinfo_" + className,
+ llvm::GlobalValue::LinkOnceODRLinkage);
+ return llvm::ConstantExpr::getBitCast(TI, PtrToInt8Ty);
+}
+
+/// Generate an NSConstantString object.
+llvm::Constant *CGObjCGNU::GenerateConstantString(const StringLiteral *SL) {
+
+ std::string Str = SL->getString().str();
+
+ // Look for an existing one
+ llvm::StringMap<llvm::Constant*>::iterator old = ObjCStrings.find(Str);
+ if (old != ObjCStrings.end())
+ return old->getValue();
+
+ StringRef StringClass = CGM.getLangOpts().ObjCConstantStringClass;
+
+ if (StringClass.empty()) StringClass = "NXConstantString";
+
+ std::string Sym = "_OBJC_CLASS_";
+ Sym += StringClass;
+
+ llvm::Constant *isa = TheModule.getNamedGlobal(Sym);
+
+ if (!isa)
+ isa = new llvm::GlobalVariable(TheModule, IdTy, /* isConstant */false,
+ llvm::GlobalValue::ExternalWeakLinkage, 0, Sym);
+ else if (isa->getType() != PtrToIdTy)
+ isa = llvm::ConstantExpr::getBitCast(isa, PtrToIdTy);
+
+ std::vector<llvm::Constant*> Ivars;
+ Ivars.push_back(isa);
+ Ivars.push_back(MakeConstantString(Str));
+ Ivars.push_back(llvm::ConstantInt::get(IntTy, Str.size()));
+ llvm::Constant *ObjCStr = MakeGlobal(
+ llvm::StructType::get(PtrToIdTy, PtrToInt8Ty, IntTy, NULL),
+ Ivars, ".objc_str");
+ ObjCStr = llvm::ConstantExpr::getBitCast(ObjCStr, PtrToInt8Ty);
+ ObjCStrings[Str] = ObjCStr;
+ ConstantStrings.push_back(ObjCStr);
+ return ObjCStr;
+}
+
+///Generates a message send where the super is the receiver. This is a message
+///send to self with special delivery semantics indicating which class's method
+///should be called.
+RValue
+CGObjCGNU::GenerateMessageSendSuper(CodeGenFunction &CGF,
+ ReturnValueSlot Return,
+ QualType ResultType,
+ Selector Sel,
+ const ObjCInterfaceDecl *Class,
+ bool isCategoryImpl,
+ llvm::Value *Receiver,
+ bool IsClassMessage,
+ const CallArgList &CallArgs,
+ const ObjCMethodDecl *Method) {
+ CGBuilderTy &Builder = CGF.Builder;
+ if (CGM.getLangOpts().getGC() == LangOptions::GCOnly) {
+ if (Sel == RetainSel || Sel == AutoreleaseSel) {
+ return RValue::get(EnforceType(Builder, Receiver,
+ CGM.getTypes().ConvertType(ResultType)));
+ }
+ if (Sel == ReleaseSel) {
+ return RValue::get(0);
+ }
+ }
+
+ llvm::Value *cmd = GetSelector(Builder, Sel);
+
+
+ CallArgList ActualArgs;
+
+ ActualArgs.add(RValue::get(EnforceType(Builder, Receiver, IdTy)), ASTIdTy);
+ ActualArgs.add(RValue::get(cmd), CGF.getContext().getObjCSelType());
+ ActualArgs.addFrom(CallArgs);
+
+ MessageSendInfo MSI = getMessageSendInfo(Method, ResultType, ActualArgs);
+
+ llvm::Value *ReceiverClass = 0;
+ if (isCategoryImpl) {
+ llvm::Constant *classLookupFunction = 0;
+ if (IsClassMessage) {
+ classLookupFunction = CGM.CreateRuntimeFunction(llvm::FunctionType::get(
+ IdTy, PtrTy, true), "objc_get_meta_class");
+ } else {
+ classLookupFunction = CGM.CreateRuntimeFunction(llvm::FunctionType::get(
+ IdTy, PtrTy, true), "objc_get_class");
+ }
+ ReceiverClass = Builder.CreateCall(classLookupFunction,
+ MakeConstantString(Class->getNameAsString()));
+ } else {
+ // Set up global aliases for the metaclass or class pointer if they do not
+ // already exist. These will are forward-references which will be set to
+ // pointers to the class and metaclass structure created for the runtime
+ // load function. To send a message to super, we look up the value of the
+ // super_class pointer from either the class or metaclass structure.
+ if (IsClassMessage) {
+ if (!MetaClassPtrAlias) {
+ MetaClassPtrAlias = new llvm::GlobalAlias(IdTy,
+ llvm::GlobalValue::InternalLinkage, ".objc_metaclass_ref" +
+ Class->getNameAsString(), NULL, &TheModule);
+ }
+ ReceiverClass = MetaClassPtrAlias;
+ } else {
+ if (!ClassPtrAlias) {
+ ClassPtrAlias = new llvm::GlobalAlias(IdTy,
+ llvm::GlobalValue::InternalLinkage, ".objc_class_ref" +
+ Class->getNameAsString(), NULL, &TheModule);
+ }
+ ReceiverClass = ClassPtrAlias;
+ }
+ }
+ // Cast the pointer to a simplified version of the class structure
+ ReceiverClass = Builder.CreateBitCast(ReceiverClass,
+ llvm::PointerType::getUnqual(
+ llvm::StructType::get(IdTy, IdTy, NULL)));
+ // Get the superclass pointer
+ ReceiverClass = Builder.CreateStructGEP(ReceiverClass, 1);
+ // Load the superclass pointer
+ ReceiverClass = Builder.CreateLoad(ReceiverClass);
+ // Construct the structure used to look up the IMP
+ llvm::StructType *ObjCSuperTy = llvm::StructType::get(
+ Receiver->getType(), IdTy, NULL);
+ llvm::Value *ObjCSuper = Builder.CreateAlloca(ObjCSuperTy);
+
+ Builder.CreateStore(Receiver, Builder.CreateStructGEP(ObjCSuper, 0));
+ Builder.CreateStore(ReceiverClass, Builder.CreateStructGEP(ObjCSuper, 1));
+
+ ObjCSuper = EnforceType(Builder, ObjCSuper, PtrToObjCSuperTy);
+
+ // Get the IMP
+ llvm::Value *imp = LookupIMPSuper(CGF, ObjCSuper, cmd);
+ imp = EnforceType(Builder, imp, MSI.MessengerType);
+
+ llvm::Value *impMD[] = {
+ llvm::MDString::get(VMContext, Sel.getAsString()),
+ llvm::MDString::get(VMContext, Class->getSuperClass()->getNameAsString()),
+ llvm::ConstantInt::get(llvm::Type::getInt1Ty(VMContext), IsClassMessage)
+ };
+ llvm::MDNode *node = llvm::MDNode::get(VMContext, impMD);
+
+ llvm::Instruction *call;
+ RValue msgRet = CGF.EmitCall(MSI.CallInfo, imp, Return, ActualArgs, 0, &call);
+ call->setMetadata(msgSendMDKind, node);
+ return msgRet;
+}
+
+/// Generate code for a message send expression.
+RValue
+CGObjCGNU::GenerateMessageSend(CodeGenFunction &CGF,
+ ReturnValueSlot Return,
+ QualType ResultType,
+ Selector Sel,
+ llvm::Value *Receiver,
+ const CallArgList &CallArgs,
+ const ObjCInterfaceDecl *Class,
+ const ObjCMethodDecl *Method) {
+ CGBuilderTy &Builder = CGF.Builder;
+
+ // Strip out message sends to retain / release in GC mode
+ if (CGM.getLangOpts().getGC() == LangOptions::GCOnly) {
+ if (Sel == RetainSel || Sel == AutoreleaseSel) {
+ return RValue::get(EnforceType(Builder, Receiver,
+ CGM.getTypes().ConvertType(ResultType)));
+ }
+ if (Sel == ReleaseSel) {
+ return RValue::get(0);
+ }
+ }
+
+ // If the return type is something that goes in an integer register, the
+ // runtime will handle 0 returns. For other cases, we fill in the 0 value
+ // ourselves.
+ //
+ // The language spec says the result of this kind of message send is
+ // undefined, but lots of people seem to have forgotten to read that
+ // paragraph and insist on sending messages to nil that have structure
+ // returns. With GCC, this generates a random return value (whatever happens
+ // to be on the stack / in those registers at the time) on most platforms,
+ // and generates an illegal instruction trap on SPARC. With LLVM it corrupts
+ // the stack.
+ bool isPointerSizedReturn = (ResultType->isAnyPointerType() ||
+ ResultType->isIntegralOrEnumerationType() || ResultType->isVoidType());
+
+ llvm::BasicBlock *startBB = 0;
+ llvm::BasicBlock *messageBB = 0;
+ llvm::BasicBlock *continueBB = 0;
+
+ if (!isPointerSizedReturn) {
+ startBB = Builder.GetInsertBlock();
+ messageBB = CGF.createBasicBlock("msgSend");
+ continueBB = CGF.createBasicBlock("continue");
+
+ llvm::Value *isNil = Builder.CreateICmpEQ(Receiver,
+ llvm::Constant::getNullValue(Receiver->getType()));
+ Builder.CreateCondBr(isNil, continueBB, messageBB);
+ CGF.EmitBlock(messageBB);
+ }
+
+ IdTy = cast<llvm::PointerType>(CGM.getTypes().ConvertType(ASTIdTy));
+ llvm::Value *cmd;
+ if (Method)
+ cmd = GetSelector(Builder, Method);
+ else
+ cmd = GetSelector(Builder, Sel);
+ cmd = EnforceType(Builder, cmd, SelectorTy);
+ Receiver = EnforceType(Builder, Receiver, IdTy);
+
+ llvm::Value *impMD[] = {
+ llvm::MDString::get(VMContext, Sel.getAsString()),
+ llvm::MDString::get(VMContext, Class ? Class->getNameAsString() :""),
+ llvm::ConstantInt::get(llvm::Type::getInt1Ty(VMContext), Class!=0)
+ };
+ llvm::MDNode *node = llvm::MDNode::get(VMContext, impMD);
+
+ CallArgList ActualArgs;
+ ActualArgs.add(RValue::get(Receiver), ASTIdTy);
+ ActualArgs.add(RValue::get(cmd), CGF.getContext().getObjCSelType());
+ ActualArgs.addFrom(CallArgs);
+
+ MessageSendInfo MSI = getMessageSendInfo(Method, ResultType, ActualArgs);
+
+ // Get the IMP to call
+ llvm::Value *imp;
+
+ // If we have non-legacy dispatch specified, we try using the objc_msgSend()
+ // functions. These are not supported on all platforms (or all runtimes on a
+ // given platform), so we
+ switch (CGM.getCodeGenOpts().getObjCDispatchMethod()) {
+ case CodeGenOptions::Legacy:
+ imp = LookupIMP(CGF, Receiver, cmd, node);
+ break;
+ case CodeGenOptions::Mixed:
+ case CodeGenOptions::NonLegacy:
+ if (CGM.ReturnTypeUsesFPRet(ResultType)) {
+ imp = CGM.CreateRuntimeFunction(llvm::FunctionType::get(IdTy, IdTy, true),
+ "objc_msgSend_fpret");
+ } else if (CGM.ReturnTypeUsesSRet(MSI.CallInfo)) {
+ // The actual types here don't matter - we're going to bitcast the
+ // function anyway
+ imp = CGM.CreateRuntimeFunction(llvm::FunctionType::get(IdTy, IdTy, true),
+ "objc_msgSend_stret");
+ } else {
+ imp = CGM.CreateRuntimeFunction(llvm::FunctionType::get(IdTy, IdTy, true),
+ "objc_msgSend");
+ }
+ }
+
+ // Reset the receiver in case the lookup modified it
+ ActualArgs[0] = CallArg(RValue::get(Receiver), ASTIdTy, false);
+
+ imp = EnforceType(Builder, imp, MSI.MessengerType);
+
+ llvm::Instruction *call;
+ RValue msgRet = CGF.EmitCall(MSI.CallInfo, imp, Return, ActualArgs,
+ 0, &call);
+ call->setMetadata(msgSendMDKind, node);
+
+
+ if (!isPointerSizedReturn) {
+ messageBB = CGF.Builder.GetInsertBlock();
+ CGF.Builder.CreateBr(continueBB);
+ CGF.EmitBlock(continueBB);
+ if (msgRet.isScalar()) {
+ llvm::Value *v = msgRet.getScalarVal();
+ llvm::PHINode *phi = Builder.CreatePHI(v->getType(), 2);
+ phi->addIncoming(v, messageBB);
+ phi->addIncoming(llvm::Constant::getNullValue(v->getType()), startBB);
+ msgRet = RValue::get(phi);
+ } else if (msgRet.isAggregate()) {
+ llvm::Value *v = msgRet.getAggregateAddr();
+ llvm::PHINode *phi = Builder.CreatePHI(v->getType(), 2);
+ llvm::PointerType *RetTy = cast<llvm::PointerType>(v->getType());
+ llvm::AllocaInst *NullVal =
+ CGF.CreateTempAlloca(RetTy->getElementType(), "null");
+ CGF.InitTempAlloca(NullVal,
+ llvm::Constant::getNullValue(RetTy->getElementType()));
+ phi->addIncoming(v, messageBB);
+ phi->addIncoming(NullVal, startBB);
+ msgRet = RValue::getAggregate(phi);
+ } else /* isComplex() */ {
+ std::pair<llvm::Value*,llvm::Value*> v = msgRet.getComplexVal();
+ llvm::PHINode *phi = Builder.CreatePHI(v.first->getType(), 2);
+ phi->addIncoming(v.first, messageBB);
+ phi->addIncoming(llvm::Constant::getNullValue(v.first->getType()),
+ startBB);
+ llvm::PHINode *phi2 = Builder.CreatePHI(v.second->getType(), 2);
+ phi2->addIncoming(v.second, messageBB);
+ phi2->addIncoming(llvm::Constant::getNullValue(v.second->getType()),
+ startBB);
+ msgRet = RValue::getComplex(phi, phi2);
+ }
+ }
+ return msgRet;
+}
+
+/// Generates a MethodList. Used in construction of a objc_class and
+/// objc_category structures.
+llvm::Constant *CGObjCGNU::
+GenerateMethodList(const StringRef &ClassName,
+ const StringRef &CategoryName,
+ ArrayRef<Selector> MethodSels,
+ ArrayRef<llvm::Constant *> MethodTypes,
+ bool isClassMethodList) {
+ if (MethodSels.empty())
+ return NULLPtr;
+ // Get the method structure type.
+ llvm::StructType *ObjCMethodTy = llvm::StructType::get(
+ PtrToInt8Ty, // Really a selector, but the runtime creates it us.
+ PtrToInt8Ty, // Method types
+ IMPTy, //Method pointer
+ NULL);
+ std::vector<llvm::Constant*> Methods;
+ std::vector<llvm::Constant*> Elements;
+ for (unsigned int i = 0, e = MethodTypes.size(); i < e; ++i) {
+ Elements.clear();
+ llvm::Constant *Method =
+ TheModule.getFunction(SymbolNameForMethod(ClassName, CategoryName,
+ MethodSels[i],
+ isClassMethodList));
+ assert(Method && "Can't generate metadata for method that doesn't exist");
+ llvm::Constant *C = MakeConstantString(MethodSels[i].getAsString());
+ Elements.push_back(C);
+ Elements.push_back(MethodTypes[i]);
+ Method = llvm::ConstantExpr::getBitCast(Method,
+ IMPTy);
+ Elements.push_back(Method);
+ Methods.push_back(llvm::ConstantStruct::get(ObjCMethodTy, Elements));
+ }
+
+ // Array of method structures
+ llvm::ArrayType *ObjCMethodArrayTy = llvm::ArrayType::get(ObjCMethodTy,
+ Methods.size());
+ llvm::Constant *MethodArray = llvm::ConstantArray::get(ObjCMethodArrayTy,
+ Methods);
+
+ // Structure containing list pointer, array and array count
+ llvm::StructType *ObjCMethodListTy = llvm::StructType::create(VMContext);
+ llvm::Type *NextPtrTy = llvm::PointerType::getUnqual(ObjCMethodListTy);
+ ObjCMethodListTy->setBody(
+ NextPtrTy,
+ IntTy,
+ ObjCMethodArrayTy,
+ NULL);
+
+ Methods.clear();
+ Methods.push_back(llvm::ConstantPointerNull::get(
+ llvm::PointerType::getUnqual(ObjCMethodListTy)));
+ Methods.push_back(llvm::ConstantInt::get(Int32Ty, MethodTypes.size()));
+ Methods.push_back(MethodArray);
+
+ // Create an instance of the structure
+ return MakeGlobal(ObjCMethodListTy, Methods, ".objc_method_list");
+}
+
+/// Generates an IvarList. Used in construction of a objc_class.
+llvm::Constant *CGObjCGNU::
+GenerateIvarList(ArrayRef<llvm::Constant *> IvarNames,
+ ArrayRef<llvm::Constant *> IvarTypes,
+ ArrayRef<llvm::Constant *> IvarOffsets) {
+ if (IvarNames.size() == 0)
+ return NULLPtr;
+ // Get the method structure type.
+ llvm::StructType *ObjCIvarTy = llvm::StructType::get(
+ PtrToInt8Ty,
+ PtrToInt8Ty,
+ IntTy,
+ NULL);
+ std::vector<llvm::Constant*> Ivars;
+ std::vector<llvm::Constant*> Elements;
+ for (unsigned int i = 0, e = IvarNames.size() ; i < e ; i++) {
+ Elements.clear();
+ Elements.push_back(IvarNames[i]);
+ Elements.push_back(IvarTypes[i]);
+ Elements.push_back(IvarOffsets[i]);
+ Ivars.push_back(llvm::ConstantStruct::get(ObjCIvarTy, Elements));
+ }
+
+ // Array of method structures
+ llvm::ArrayType *ObjCIvarArrayTy = llvm::ArrayType::get(ObjCIvarTy,
+ IvarNames.size());
+
+
+ Elements.clear();
+ Elements.push_back(llvm::ConstantInt::get(IntTy, (int)IvarNames.size()));
+ Elements.push_back(llvm::ConstantArray::get(ObjCIvarArrayTy, Ivars));
+ // Structure containing array and array count
+ llvm::StructType *ObjCIvarListTy = llvm::StructType::get(IntTy,
+ ObjCIvarArrayTy,
+ NULL);
+
+ // Create an instance of the structure
+ return MakeGlobal(ObjCIvarListTy, Elements, ".objc_ivar_list");
+}
+
+/// Generate a class structure
+llvm::Constant *CGObjCGNU::GenerateClassStructure(
+ llvm::Constant *MetaClass,
+ llvm::Constant *SuperClass,
+ unsigned info,
+ const char *Name,
+ llvm::Constant *Version,
+ llvm::Constant *InstanceSize,
+ llvm::Constant *IVars,
+ llvm::Constant *Methods,
+ llvm::Constant *Protocols,
+ llvm::Constant *IvarOffsets,
+ llvm::Constant *Properties,
+ llvm::Constant *StrongIvarBitmap,
+ llvm::Constant *WeakIvarBitmap,
+ bool isMeta) {
+ // Set up the class structure
+ // Note: Several of these are char*s when they should be ids. This is
+ // because the runtime performs this translation on load.
+ //
+ // Fields marked New ABI are part of the GNUstep runtime. We emit them
+ // anyway; the classes will still work with the GNU runtime, they will just
+ // be ignored.
+ llvm::StructType *ClassTy = llvm::StructType::get(
+ PtrToInt8Ty, // isa
+ PtrToInt8Ty, // super_class
+ PtrToInt8Ty, // name
+ LongTy, // version
+ LongTy, // info
+ LongTy, // instance_size
+ IVars->getType(), // ivars
+ Methods->getType(), // methods
+ // These are all filled in by the runtime, so we pretend
+ PtrTy, // dtable
+ PtrTy, // subclass_list
+ PtrTy, // sibling_class
+ PtrTy, // protocols
+ PtrTy, // gc_object_type
+ // New ABI:
+ LongTy, // abi_version
+ IvarOffsets->getType(), // ivar_offsets
+ Properties->getType(), // properties
+ IntPtrTy, // strong_pointers
+ IntPtrTy, // weak_pointers
+ NULL);
+ llvm::Constant *Zero = llvm::ConstantInt::get(LongTy, 0);
+ // Fill in the structure
+ std::vector<llvm::Constant*> Elements;
+ Elements.push_back(llvm::ConstantExpr::getBitCast(MetaClass, PtrToInt8Ty));
+ Elements.push_back(SuperClass);
+ Elements.push_back(MakeConstantString(Name, ".class_name"));
+ Elements.push_back(Zero);
+ Elements.push_back(llvm::ConstantInt::get(LongTy, info));
+ if (isMeta) {
+ llvm::TargetData td(&TheModule);
+ Elements.push_back(
+ llvm::ConstantInt::get(LongTy,
+ td.getTypeSizeInBits(ClassTy) /
+ CGM.getContext().getCharWidth()));
+ } else
+ Elements.push_back(InstanceSize);
+ Elements.push_back(IVars);
+ Elements.push_back(Methods);
+ Elements.push_back(NULLPtr);
+ Elements.push_back(NULLPtr);
+ Elements.push_back(NULLPtr);
+ Elements.push_back(llvm::ConstantExpr::getBitCast(Protocols, PtrTy));
+ Elements.push_back(NULLPtr);
+ Elements.push_back(llvm::ConstantInt::get(LongTy, 1));
+ Elements.push_back(IvarOffsets);
+ Elements.push_back(Properties);
+ Elements.push_back(StrongIvarBitmap);
+ Elements.push_back(WeakIvarBitmap);
+ // Create an instance of the structure
+ // This is now an externally visible symbol, so that we can speed up class
+ // messages in the next ABI. We may already have some weak references to
+ // this, so check and fix them properly.
+ std::string ClassSym((isMeta ? "_OBJC_METACLASS_": "_OBJC_CLASS_") +
+ std::string(Name));
+ llvm::GlobalVariable *ClassRef = TheModule.getNamedGlobal(ClassSym);
+ llvm::Constant *Class = MakeGlobal(ClassTy, Elements, ClassSym,
+ llvm::GlobalValue::ExternalLinkage);
+ if (ClassRef) {
+ ClassRef->replaceAllUsesWith(llvm::ConstantExpr::getBitCast(Class,
+ ClassRef->getType()));
+ ClassRef->removeFromParent();
+ Class->setName(ClassSym);
+ }
+ return Class;
+}
+
+llvm::Constant *CGObjCGNU::
+GenerateProtocolMethodList(ArrayRef<llvm::Constant *> MethodNames,
+ ArrayRef<llvm::Constant *> MethodTypes) {
+ // Get the method structure type.
+ llvm::StructType *ObjCMethodDescTy = llvm::StructType::get(
+ PtrToInt8Ty, // Really a selector, but the runtime does the casting for us.
+ PtrToInt8Ty,
+ NULL);
+ std::vector<llvm::Constant*> Methods;
+ std::vector<llvm::Constant*> Elements;
+ for (unsigned int i = 0, e = MethodTypes.size() ; i < e ; i++) {
+ Elements.clear();
+ Elements.push_back(MethodNames[i]);
+ Elements.push_back(MethodTypes[i]);
+ Methods.push_back(llvm::ConstantStruct::get(ObjCMethodDescTy, Elements));
+ }
+ llvm::ArrayType *ObjCMethodArrayTy = llvm::ArrayType::get(ObjCMethodDescTy,
+ MethodNames.size());
+ llvm::Constant *Array = llvm::ConstantArray::get(ObjCMethodArrayTy,
+ Methods);
+ llvm::StructType *ObjCMethodDescListTy = llvm::StructType::get(
+ IntTy, ObjCMethodArrayTy, NULL);
+ Methods.clear();
+ Methods.push_back(llvm::ConstantInt::get(IntTy, MethodNames.size()));
+ Methods.push_back(Array);
+ return MakeGlobal(ObjCMethodDescListTy, Methods, ".objc_method_list");
+}
+
+// Create the protocol list structure used in classes, categories and so on
+llvm::Constant *CGObjCGNU::GenerateProtocolList(ArrayRef<std::string>Protocols){
+ llvm::ArrayType *ProtocolArrayTy = llvm::ArrayType::get(PtrToInt8Ty,
+ Protocols.size());
+ llvm::StructType *ProtocolListTy = llvm::StructType::get(
+ PtrTy, //Should be a recurisve pointer, but it's always NULL here.
+ SizeTy,
+ ProtocolArrayTy,
+ NULL);
+ std::vector<llvm::Constant*> Elements;
+ for (const std::string *iter = Protocols.begin(), *endIter = Protocols.end();
+ iter != endIter ; iter++) {
+ llvm::Constant *protocol = 0;
+ llvm::StringMap<llvm::Constant*>::iterator value =
+ ExistingProtocols.find(*iter);
+ if (value == ExistingProtocols.end()) {
+ protocol = GenerateEmptyProtocol(*iter);
+ } else {
+ protocol = value->getValue();
+ }
+ llvm::Constant *Ptr = llvm::ConstantExpr::getBitCast(protocol,
+ PtrToInt8Ty);
+ Elements.push_back(Ptr);
+ }
+ llvm::Constant * ProtocolArray = llvm::ConstantArray::get(ProtocolArrayTy,
+ Elements);
+ Elements.clear();
+ Elements.push_back(NULLPtr);
+ Elements.push_back(llvm::ConstantInt::get(LongTy, Protocols.size()));
+ Elements.push_back(ProtocolArray);
+ return MakeGlobal(ProtocolListTy, Elements, ".objc_protocol_list");
+}
+
+llvm::Value *CGObjCGNU::GenerateProtocolRef(CGBuilderTy &Builder,
+ const ObjCProtocolDecl *PD) {
+ llvm::Value *protocol = ExistingProtocols[PD->getNameAsString()];
+ llvm::Type *T =
+ CGM.getTypes().ConvertType(CGM.getContext().getObjCProtoType());
+ return Builder.CreateBitCast(protocol, llvm::PointerType::getUnqual(T));
+}
+
+llvm::Constant *CGObjCGNU::GenerateEmptyProtocol(
+ const std::string &ProtocolName) {
+ SmallVector<std::string, 0> EmptyStringVector;
+ SmallVector<llvm::Constant*, 0> EmptyConstantVector;
+
+ llvm::Constant *ProtocolList = GenerateProtocolList(EmptyStringVector);
+ llvm::Constant *MethodList =
+ GenerateProtocolMethodList(EmptyConstantVector, EmptyConstantVector);
+ // Protocols are objects containing lists of the methods implemented and
+ // protocols adopted.
+ llvm::StructType *ProtocolTy = llvm::StructType::get(IdTy,
+ PtrToInt8Ty,
+ ProtocolList->getType(),
+ MethodList->getType(),
+ MethodList->getType(),
+ MethodList->getType(),
+ MethodList->getType(),
+ NULL);
+ std::vector<llvm::Constant*> Elements;
+ // The isa pointer must be set to a magic number so the runtime knows it's
+ // the correct layout.
+ Elements.push_back(llvm::ConstantExpr::getIntToPtr(
+ llvm::ConstantInt::get(Int32Ty, ProtocolVersion), IdTy));
+ Elements.push_back(MakeConstantString(ProtocolName, ".objc_protocol_name"));
+ Elements.push_back(ProtocolList);
+ Elements.push_back(MethodList);
+ Elements.push_back(MethodList);
+ Elements.push_back(MethodList);
+ Elements.push_back(MethodList);
+ return MakeGlobal(ProtocolTy, Elements, ".objc_protocol");
+}
+
+void CGObjCGNU::GenerateProtocol(const ObjCProtocolDecl *PD) {
+ ASTContext &Context = CGM.getContext();
+ std::string ProtocolName = PD->getNameAsString();
+
+ // Use the protocol definition, if there is one.
+ if (const ObjCProtocolDecl *Def = PD->getDefinition())
+ PD = Def;
+
+ SmallVector<std::string, 16> Protocols;
+ for (ObjCProtocolDecl::protocol_iterator PI = PD->protocol_begin(),
+ E = PD->protocol_end(); PI != E; ++PI)
+ Protocols.push_back((*PI)->getNameAsString());
+ SmallVector<llvm::Constant*, 16> InstanceMethodNames;
+ SmallVector<llvm::Constant*, 16> InstanceMethodTypes;
+ SmallVector<llvm::Constant*, 16> OptionalInstanceMethodNames;
+ SmallVector<llvm::Constant*, 16> OptionalInstanceMethodTypes;
+ for (ObjCProtocolDecl::instmeth_iterator iter = PD->instmeth_begin(),
+ E = PD->instmeth_end(); iter != E; iter++) {
+ std::string TypeStr;
+ Context.getObjCEncodingForMethodDecl(*iter, TypeStr);
+ if ((*iter)->getImplementationControl() == ObjCMethodDecl::Optional) {
+ InstanceMethodNames.push_back(
+ MakeConstantString((*iter)->getSelector().getAsString()));
+ InstanceMethodTypes.push_back(MakeConstantString(TypeStr));
+ } else {
+ OptionalInstanceMethodNames.push_back(
+ MakeConstantString((*iter)->getSelector().getAsString()));
+ OptionalInstanceMethodTypes.push_back(MakeConstantString(TypeStr));
+ }
+ }
+ // Collect information about class methods:
+ SmallVector<llvm::Constant*, 16> ClassMethodNames;
+ SmallVector<llvm::Constant*, 16> ClassMethodTypes;
+ SmallVector<llvm::Constant*, 16> OptionalClassMethodNames;
+ SmallVector<llvm::Constant*, 16> OptionalClassMethodTypes;
+ for (ObjCProtocolDecl::classmeth_iterator
+ iter = PD->classmeth_begin(), endIter = PD->classmeth_end();
+ iter != endIter ; iter++) {
+ std::string TypeStr;
+ Context.getObjCEncodingForMethodDecl((*iter),TypeStr);
+ if ((*iter)->getImplementationControl() == ObjCMethodDecl::Optional) {
+ ClassMethodNames.push_back(
+ MakeConstantString((*iter)->getSelector().getAsString()));
+ ClassMethodTypes.push_back(MakeConstantString(TypeStr));
+ } else {
+ OptionalClassMethodNames.push_back(
+ MakeConstantString((*iter)->getSelector().getAsString()));
+ OptionalClassMethodTypes.push_back(MakeConstantString(TypeStr));
+ }
+ }
+
+ llvm::Constant *ProtocolList = GenerateProtocolList(Protocols);
+ llvm::Constant *InstanceMethodList =
+ GenerateProtocolMethodList(InstanceMethodNames, InstanceMethodTypes);
+ llvm::Constant *ClassMethodList =
+ GenerateProtocolMethodList(ClassMethodNames, ClassMethodTypes);
+ llvm::Constant *OptionalInstanceMethodList =
+ GenerateProtocolMethodList(OptionalInstanceMethodNames,
+ OptionalInstanceMethodTypes);
+ llvm::Constant *OptionalClassMethodList =
+ GenerateProtocolMethodList(OptionalClassMethodNames,
+ OptionalClassMethodTypes);
+
+ // Property metadata: name, attributes, isSynthesized, setter name, setter
+ // types, getter name, getter types.
+ // The isSynthesized value is always set to 0 in a protocol. It exists to
+ // simplify the runtime library by allowing it to use the same data
+ // structures for protocol metadata everywhere.
+ llvm::StructType *PropertyMetadataTy = llvm::StructType::get(
+ PtrToInt8Ty, Int8Ty, Int8Ty, PtrToInt8Ty, PtrToInt8Ty, PtrToInt8Ty,
+ PtrToInt8Ty, NULL);
+ std::vector<llvm::Constant*> Properties;
+ std::vector<llvm::Constant*> OptionalProperties;
+
+ // Add all of the property methods need adding to the method list and to the
+ // property metadata list.
+ for (ObjCContainerDecl::prop_iterator
+ iter = PD->prop_begin(), endIter = PD->prop_end();
+ iter != endIter ; iter++) {
+ std::vector<llvm::Constant*> Fields;
+ ObjCPropertyDecl *property = (*iter);
+
+ Fields.push_back(MakeConstantString(property->getNameAsString()));
+ Fields.push_back(llvm::ConstantInt::get(Int8Ty,
+ property->getPropertyAttributes()));
+ Fields.push_back(llvm::ConstantInt::get(Int8Ty, 0));
+ if (ObjCMethodDecl *getter = property->getGetterMethodDecl()) {
+ std::string TypeStr;
+ Context.getObjCEncodingForMethodDecl(getter,TypeStr);
+ llvm::Constant *TypeEncoding = MakeConstantString(TypeStr);
+ InstanceMethodTypes.push_back(TypeEncoding);
+ Fields.push_back(MakeConstantString(getter->getSelector().getAsString()));
+ Fields.push_back(TypeEncoding);
+ } else {
+ Fields.push_back(NULLPtr);
+ Fields.push_back(NULLPtr);
+ }
+ if (ObjCMethodDecl *setter = property->getSetterMethodDecl()) {
+ std::string TypeStr;
+ Context.getObjCEncodingForMethodDecl(setter,TypeStr);
+ llvm::Constant *TypeEncoding = MakeConstantString(TypeStr);
+ InstanceMethodTypes.push_back(TypeEncoding);
+ Fields.push_back(MakeConstantString(setter->getSelector().getAsString()));
+ Fields.push_back(TypeEncoding);
+ } else {
+ Fields.push_back(NULLPtr);
+ Fields.push_back(NULLPtr);
+ }
+ if (property->getPropertyImplementation() == ObjCPropertyDecl::Optional) {
+ OptionalProperties.push_back(llvm::ConstantStruct::get(PropertyMetadataTy, Fields));
+ } else {
+ Properties.push_back(llvm::ConstantStruct::get(PropertyMetadataTy, Fields));
+ }
+ }
+ llvm::Constant *PropertyArray = llvm::ConstantArray::get(
+ llvm::ArrayType::get(PropertyMetadataTy, Properties.size()), Properties);
+ llvm::Constant* PropertyListInitFields[] =
+ {llvm::ConstantInt::get(IntTy, Properties.size()), NULLPtr, PropertyArray};
+
+ llvm::Constant *PropertyListInit =
+ llvm::ConstantStruct::getAnon(PropertyListInitFields);
+ llvm::Constant *PropertyList = new llvm::GlobalVariable(TheModule,
+ PropertyListInit->getType(), false, llvm::GlobalValue::InternalLinkage,
+ PropertyListInit, ".objc_property_list");
+
+ llvm::Constant *OptionalPropertyArray =
+ llvm::ConstantArray::get(llvm::ArrayType::get(PropertyMetadataTy,
+ OptionalProperties.size()) , OptionalProperties);
+ llvm::Constant* OptionalPropertyListInitFields[] = {
+ llvm::ConstantInt::get(IntTy, OptionalProperties.size()), NULLPtr,
+ OptionalPropertyArray };
+
+ llvm::Constant *OptionalPropertyListInit =
+ llvm::ConstantStruct::getAnon(OptionalPropertyListInitFields);
+ llvm::Constant *OptionalPropertyList = new llvm::GlobalVariable(TheModule,
+ OptionalPropertyListInit->getType(), false,
+ llvm::GlobalValue::InternalLinkage, OptionalPropertyListInit,
+ ".objc_property_list");
+
+ // Protocols are objects containing lists of the methods implemented and
+ // protocols adopted.
+ llvm::StructType *ProtocolTy = llvm::StructType::get(IdTy,
+ PtrToInt8Ty,
+ ProtocolList->getType(),
+ InstanceMethodList->getType(),
+ ClassMethodList->getType(),
+ OptionalInstanceMethodList->getType(),
+ OptionalClassMethodList->getType(),
+ PropertyList->getType(),
+ OptionalPropertyList->getType(),
+ NULL);
+ std::vector<llvm::Constant*> Elements;
+ // The isa pointer must be set to a magic number so the runtime knows it's
+ // the correct layout.
+ Elements.push_back(llvm::ConstantExpr::getIntToPtr(
+ llvm::ConstantInt::get(Int32Ty, ProtocolVersion), IdTy));
+ Elements.push_back(MakeConstantString(ProtocolName, ".objc_protocol_name"));
+ Elements.push_back(ProtocolList);
+ Elements.push_back(InstanceMethodList);
+ Elements.push_back(ClassMethodList);
+ Elements.push_back(OptionalInstanceMethodList);
+ Elements.push_back(OptionalClassMethodList);
+ Elements.push_back(PropertyList);
+ Elements.push_back(OptionalPropertyList);
+ ExistingProtocols[ProtocolName] =
+ llvm::ConstantExpr::getBitCast(MakeGlobal(ProtocolTy, Elements,
+ ".objc_protocol"), IdTy);
+}
+void CGObjCGNU::GenerateProtocolHolderCategory(void) {
+ // Collect information about instance methods
+ SmallVector<Selector, 1> MethodSels;
+ SmallVector<llvm::Constant*, 1> MethodTypes;
+
+ std::vector<llvm::Constant*> Elements;
+ const std::string ClassName = "__ObjC_Protocol_Holder_Ugly_Hack";
+ const std::string CategoryName = "AnotherHack";
+ Elements.push_back(MakeConstantString(CategoryName));
+ Elements.push_back(MakeConstantString(ClassName));
+ // Instance method list
+ Elements.push_back(llvm::ConstantExpr::getBitCast(GenerateMethodList(
+ ClassName, CategoryName, MethodSels, MethodTypes, false), PtrTy));
+ // Class method list
+ Elements.push_back(llvm::ConstantExpr::getBitCast(GenerateMethodList(
+ ClassName, CategoryName, MethodSels, MethodTypes, true), PtrTy));
+ // Protocol list
+ llvm::ArrayType *ProtocolArrayTy = llvm::ArrayType::get(PtrTy,
+ ExistingProtocols.size());
+ llvm::StructType *ProtocolListTy = llvm::StructType::get(
+ PtrTy, //Should be a recurisve pointer, but it's always NULL here.
+ SizeTy,
+ ProtocolArrayTy,
+ NULL);
+ std::vector<llvm::Constant*> ProtocolElements;
+ for (llvm::StringMapIterator<llvm::Constant*> iter =
+ ExistingProtocols.begin(), endIter = ExistingProtocols.end();
+ iter != endIter ; iter++) {
+ llvm::Constant *Ptr = llvm::ConstantExpr::getBitCast(iter->getValue(),
+ PtrTy);
+ ProtocolElements.push_back(Ptr);
+ }
+ llvm::Constant * ProtocolArray = llvm::ConstantArray::get(ProtocolArrayTy,
+ ProtocolElements);
+ ProtocolElements.clear();
+ ProtocolElements.push_back(NULLPtr);
+ ProtocolElements.push_back(llvm::ConstantInt::get(LongTy,
+ ExistingProtocols.size()));
+ ProtocolElements.push_back(ProtocolArray);
+ Elements.push_back(llvm::ConstantExpr::getBitCast(MakeGlobal(ProtocolListTy,
+ ProtocolElements, ".objc_protocol_list"), PtrTy));
+ Categories.push_back(llvm::ConstantExpr::getBitCast(
+ MakeGlobal(llvm::StructType::get(PtrToInt8Ty, PtrToInt8Ty,
+ PtrTy, PtrTy, PtrTy, NULL), Elements), PtrTy));
+}
+
+/// Libobjc2 uses a bitfield representation where small(ish) bitfields are
+/// stored in a 64-bit value with the low bit set to 1 and the remaining 63
+/// bits set to their values, LSB first, while larger ones are stored in a
+/// structure of this / form:
+///
+/// struct { int32_t length; int32_t values[length]; };
+///
+/// The values in the array are stored in host-endian format, with the least
+/// significant bit being assumed to come first in the bitfield. Therefore, a
+/// bitfield with the 64th bit set will be (int64_t)&{ 2, [0, 1<<31] }, while a
+/// bitfield / with the 63rd bit set will be 1<<64.
+llvm::Constant *CGObjCGNU::MakeBitField(ArrayRef<bool> bits) {
+ int bitCount = bits.size();
+ int ptrBits =
+ (TheModule.getPointerSize() == llvm::Module::Pointer32) ? 32 : 64;
+ if (bitCount < ptrBits) {
+ uint64_t val = 1;
+ for (int i=0 ; i<bitCount ; ++i) {
+ if (bits[i]) val |= 1ULL<<(i+1);
+ }
+ return llvm::ConstantInt::get(IntPtrTy, val);
+ }
+ llvm::SmallVector<llvm::Constant*, 8> values;
+ int v=0;
+ while (v < bitCount) {
+ int32_t word = 0;
+ for (int i=0 ; (i<32) && (v<bitCount) ; ++i) {
+ if (bits[v]) word |= 1<<i;
+ v++;
+ }
+ values.push_back(llvm::ConstantInt::get(Int32Ty, word));
+ }
+ llvm::ArrayType *arrayTy = llvm::ArrayType::get(Int32Ty, values.size());
+ llvm::Constant *array = llvm::ConstantArray::get(arrayTy, values);
+ llvm::Constant *fields[2] = {
+ llvm::ConstantInt::get(Int32Ty, values.size()),
+ array };
+ llvm::Constant *GS = MakeGlobal(llvm::StructType::get(Int32Ty, arrayTy,
+ NULL), fields);
+ llvm::Constant *ptr = llvm::ConstantExpr::getPtrToInt(GS, IntPtrTy);
+ return ptr;
+}
+
+void CGObjCGNU::GenerateCategory(const ObjCCategoryImplDecl *OCD) {
+ std::string ClassName = OCD->getClassInterface()->getNameAsString();
+ std::string CategoryName = OCD->getNameAsString();
+ // Collect information about instance methods
+ SmallVector<Selector, 16> InstanceMethodSels;
+ SmallVector<llvm::Constant*, 16> InstanceMethodTypes;
+ for (ObjCCategoryImplDecl::instmeth_iterator
+ iter = OCD->instmeth_begin(), endIter = OCD->instmeth_end();
+ iter != endIter ; iter++) {
+ InstanceMethodSels.push_back((*iter)->getSelector());
+ std::string TypeStr;
+ CGM.getContext().getObjCEncodingForMethodDecl(*iter,TypeStr);
+ InstanceMethodTypes.push_back(MakeConstantString(TypeStr));
+ }
+
+ // Collect information about class methods
+ SmallVector<Selector, 16> ClassMethodSels;
+ SmallVector<llvm::Constant*, 16> ClassMethodTypes;
+ for (ObjCCategoryImplDecl::classmeth_iterator
+ iter = OCD->classmeth_begin(), endIter = OCD->classmeth_end();
+ iter != endIter ; iter++) {
+ ClassMethodSels.push_back((*iter)->getSelector());
+ std::string TypeStr;
+ CGM.getContext().getObjCEncodingForMethodDecl(*iter,TypeStr);
+ ClassMethodTypes.push_back(MakeConstantString(TypeStr));
+ }
+
+ // Collect the names of referenced protocols
+ SmallVector<std::string, 16> Protocols;
+ const ObjCCategoryDecl *CatDecl = OCD->getCategoryDecl();
+ const ObjCList<ObjCProtocolDecl> &Protos = CatDecl->getReferencedProtocols();
+ for (ObjCList<ObjCProtocolDecl>::iterator I = Protos.begin(),
+ E = Protos.end(); I != E; ++I)
+ Protocols.push_back((*I)->getNameAsString());
+
+ std::vector<llvm::Constant*> Elements;
+ Elements.push_back(MakeConstantString(CategoryName));
+ Elements.push_back(MakeConstantString(ClassName));
+ // Instance method list
+ Elements.push_back(llvm::ConstantExpr::getBitCast(GenerateMethodList(
+ ClassName, CategoryName, InstanceMethodSels, InstanceMethodTypes,
+ false), PtrTy));
+ // Class method list
+ Elements.push_back(llvm::ConstantExpr::getBitCast(GenerateMethodList(
+ ClassName, CategoryName, ClassMethodSels, ClassMethodTypes, true),
+ PtrTy));
+ // Protocol list
+ Elements.push_back(llvm::ConstantExpr::getBitCast(
+ GenerateProtocolList(Protocols), PtrTy));
+ Categories.push_back(llvm::ConstantExpr::getBitCast(
+ MakeGlobal(llvm::StructType::get(PtrToInt8Ty, PtrToInt8Ty,
+ PtrTy, PtrTy, PtrTy, NULL), Elements), PtrTy));
+}
+
+llvm::Constant *CGObjCGNU::GeneratePropertyList(const ObjCImplementationDecl *OID,
+ SmallVectorImpl<Selector> &InstanceMethodSels,
+ SmallVectorImpl<llvm::Constant*> &InstanceMethodTypes) {
+ ASTContext &Context = CGM.getContext();
+ //
+ // Property metadata: name, attributes, isSynthesized, setter name, setter
+ // types, getter name, getter types.
+ llvm::StructType *PropertyMetadataTy = llvm::StructType::get(
+ PtrToInt8Ty, Int8Ty, Int8Ty, PtrToInt8Ty, PtrToInt8Ty, PtrToInt8Ty,
+ PtrToInt8Ty, NULL);
+ std::vector<llvm::Constant*> Properties;
+
+
+ // Add all of the property methods need adding to the method list and to the
+ // property metadata list.
+ for (ObjCImplDecl::propimpl_iterator
+ iter = OID->propimpl_begin(), endIter = OID->propimpl_end();
+ iter != endIter ; iter++) {
+ std::vector<llvm::Constant*> Fields;
+ ObjCPropertyDecl *property = (*iter)->getPropertyDecl();
+ ObjCPropertyImplDecl *propertyImpl = *iter;
+ bool isSynthesized = (propertyImpl->getPropertyImplementation() ==
+ ObjCPropertyImplDecl::Synthesize);
+
+ Fields.push_back(MakeConstantString(property->getNameAsString()));
+ Fields.push_back(llvm::ConstantInt::get(Int8Ty,
+ property->getPropertyAttributes()));
+ Fields.push_back(llvm::ConstantInt::get(Int8Ty, isSynthesized));
+ if (ObjCMethodDecl *getter = property->getGetterMethodDecl()) {
+ std::string TypeStr;
+ Context.getObjCEncodingForMethodDecl(getter,TypeStr);
+ llvm::Constant *TypeEncoding = MakeConstantString(TypeStr);
+ if (isSynthesized) {
+ InstanceMethodTypes.push_back(TypeEncoding);
+ InstanceMethodSels.push_back(getter->getSelector());
+ }
+ Fields.push_back(MakeConstantString(getter->getSelector().getAsString()));
+ Fields.push_back(TypeEncoding);
+ } else {
+ Fields.push_back(NULLPtr);
+ Fields.push_back(NULLPtr);
+ }
+ if (ObjCMethodDecl *setter = property->getSetterMethodDecl()) {
+ std::string TypeStr;
+ Context.getObjCEncodingForMethodDecl(setter,TypeStr);
+ llvm::Constant *TypeEncoding = MakeConstantString(TypeStr);
+ if (isSynthesized) {
+ InstanceMethodTypes.push_back(TypeEncoding);
+ InstanceMethodSels.push_back(setter->getSelector());
+ }
+ Fields.push_back(MakeConstantString(setter->getSelector().getAsString()));
+ Fields.push_back(TypeEncoding);
+ } else {
+ Fields.push_back(NULLPtr);
+ Fields.push_back(NULLPtr);
+ }
+ Properties.push_back(llvm::ConstantStruct::get(PropertyMetadataTy, Fields));
+ }
+ llvm::ArrayType *PropertyArrayTy =
+ llvm::ArrayType::get(PropertyMetadataTy, Properties.size());
+ llvm::Constant *PropertyArray = llvm::ConstantArray::get(PropertyArrayTy,
+ Properties);
+ llvm::Constant* PropertyListInitFields[] =
+ {llvm::ConstantInt::get(IntTy, Properties.size()), NULLPtr, PropertyArray};
+
+ llvm::Constant *PropertyListInit =
+ llvm::ConstantStruct::getAnon(PropertyListInitFields);
+ return new llvm::GlobalVariable(TheModule, PropertyListInit->getType(), false,
+ llvm::GlobalValue::InternalLinkage, PropertyListInit,
+ ".objc_property_list");
+}
+
+void CGObjCGNU::RegisterAlias(const ObjCCompatibleAliasDecl *OAD) {
+ // Get the class declaration for which the alias is specified.
+ ObjCInterfaceDecl *ClassDecl =
+ const_cast<ObjCInterfaceDecl *>(OAD->getClassInterface());
+ std::string ClassName = ClassDecl->getNameAsString();
+ std::string AliasName = OAD->getNameAsString();
+ ClassAliases.push_back(ClassAliasPair(ClassName,AliasName));
+}
+
+void CGObjCGNU::GenerateClass(const ObjCImplementationDecl *OID) {
+ ASTContext &Context = CGM.getContext();
+
+ // Get the superclass name.
+ const ObjCInterfaceDecl * SuperClassDecl =
+ OID->getClassInterface()->getSuperClass();
+ std::string SuperClassName;
+ if (SuperClassDecl) {
+ SuperClassName = SuperClassDecl->getNameAsString();
+ EmitClassRef(SuperClassName);
+ }
+
+ // Get the class name
+ ObjCInterfaceDecl *ClassDecl =
+ const_cast<ObjCInterfaceDecl *>(OID->getClassInterface());
+ std::string ClassName = ClassDecl->getNameAsString();
+ // Emit the symbol that is used to generate linker errors if this class is
+ // referenced in other modules but not declared.
+ std::string classSymbolName = "__objc_class_name_" + ClassName;
+ if (llvm::GlobalVariable *symbol =
+ TheModule.getGlobalVariable(classSymbolName)) {
+ symbol->setInitializer(llvm::ConstantInt::get(LongTy, 0));
+ } else {
+ new llvm::GlobalVariable(TheModule, LongTy, false,
+ llvm::GlobalValue::ExternalLinkage, llvm::ConstantInt::get(LongTy, 0),
+ classSymbolName);
+ }
+
+ // Get the size of instances.
+ int instanceSize =
+ Context.getASTObjCImplementationLayout(OID).getSize().getQuantity();
+
+ // Collect information about instance variables.
+ SmallVector<llvm::Constant*, 16> IvarNames;
+ SmallVector<llvm::Constant*, 16> IvarTypes;
+ SmallVector<llvm::Constant*, 16> IvarOffsets;
+
+ std::vector<llvm::Constant*> IvarOffsetValues;
+ SmallVector<bool, 16> WeakIvars;
+ SmallVector<bool, 16> StrongIvars;
+
+ int superInstanceSize = !SuperClassDecl ? 0 :
+ Context.getASTObjCInterfaceLayout(SuperClassDecl).getSize().getQuantity();
+ // For non-fragile ivars, set the instance size to 0 - {the size of just this
+ // class}. The runtime will then set this to the correct value on load.
+ if (CGM.getContext().getLangOpts().ObjCNonFragileABI) {
+ instanceSize = 0 - (instanceSize - superInstanceSize);
+ }
+
+ for (const ObjCIvarDecl *IVD = ClassDecl->all_declared_ivar_begin(); IVD;
+ IVD = IVD->getNextIvar()) {
+ // Store the name
+ IvarNames.push_back(MakeConstantString(IVD->getNameAsString()));
+ // Get the type encoding for this ivar
+ std::string TypeStr;
+ Context.getObjCEncodingForType(IVD->getType(), TypeStr);
+ IvarTypes.push_back(MakeConstantString(TypeStr));
+ // Get the offset
+ uint64_t BaseOffset = ComputeIvarBaseOffset(CGM, OID, IVD);
+ uint64_t Offset = BaseOffset;
+ if (CGM.getContext().getLangOpts().ObjCNonFragileABI) {
+ Offset = BaseOffset - superInstanceSize;
+ }
+ llvm::Constant *OffsetValue = llvm::ConstantInt::get(IntTy, Offset);
+ // Create the direct offset value
+ std::string OffsetName = "__objc_ivar_offset_value_" + ClassName +"." +
+ IVD->getNameAsString();
+ llvm::GlobalVariable *OffsetVar = TheModule.getGlobalVariable(OffsetName);
+ if (OffsetVar) {
+ OffsetVar->setInitializer(OffsetValue);
+ // If this is the real definition, change its linkage type so that
+ // different modules will use this one, rather than their private
+ // copy.
+ OffsetVar->setLinkage(llvm::GlobalValue::ExternalLinkage);
+ } else
+ OffsetVar = new llvm::GlobalVariable(TheModule, IntTy,
+ false, llvm::GlobalValue::ExternalLinkage,
+ OffsetValue,
+ "__objc_ivar_offset_value_" + ClassName +"." +
+ IVD->getNameAsString());
+ IvarOffsets.push_back(OffsetValue);
+ IvarOffsetValues.push_back(OffsetVar);
+ Qualifiers::ObjCLifetime lt = IVD->getType().getQualifiers().getObjCLifetime();
+ switch (lt) {
+ case Qualifiers::OCL_Strong:
+ StrongIvars.push_back(true);
+ WeakIvars.push_back(false);
+ break;
+ case Qualifiers::OCL_Weak:
+ StrongIvars.push_back(false);
+ WeakIvars.push_back(true);
+ break;
+ default:
+ StrongIvars.push_back(false);
+ WeakIvars.push_back(false);
+ }
+ }
+ llvm::Constant *StrongIvarBitmap = MakeBitField(StrongIvars);
+ llvm::Constant *WeakIvarBitmap = MakeBitField(WeakIvars);
+ llvm::GlobalVariable *IvarOffsetArray =
+ MakeGlobalArray(PtrToIntTy, IvarOffsetValues, ".ivar.offsets");
+
+
+ // Collect information about instance methods
+ SmallVector<Selector, 16> InstanceMethodSels;
+ SmallVector<llvm::Constant*, 16> InstanceMethodTypes;
+ for (ObjCImplementationDecl::instmeth_iterator
+ iter = OID->instmeth_begin(), endIter = OID->instmeth_end();
+ iter != endIter ; iter++) {
+ InstanceMethodSels.push_back((*iter)->getSelector());
+ std::string TypeStr;
+ Context.getObjCEncodingForMethodDecl((*iter),TypeStr);
+ InstanceMethodTypes.push_back(MakeConstantString(TypeStr));
+ }
+
+ llvm::Constant *Properties = GeneratePropertyList(OID, InstanceMethodSels,
+ InstanceMethodTypes);
+
+
+ // Collect information about class methods
+ SmallVector<Selector, 16> ClassMethodSels;
+ SmallVector<llvm::Constant*, 16> ClassMethodTypes;
+ for (ObjCImplementationDecl::classmeth_iterator
+ iter = OID->classmeth_begin(), endIter = OID->classmeth_end();
+ iter != endIter ; iter++) {
+ ClassMethodSels.push_back((*iter)->getSelector());
+ std::string TypeStr;
+ Context.getObjCEncodingForMethodDecl((*iter),TypeStr);
+ ClassMethodTypes.push_back(MakeConstantString(TypeStr));
+ }
+ // Collect the names of referenced protocols
+ SmallVector<std::string, 16> Protocols;
+ for (ObjCInterfaceDecl::protocol_iterator
+ I = ClassDecl->protocol_begin(),
+ E = ClassDecl->protocol_end(); I != E; ++I)
+ Protocols.push_back((*I)->getNameAsString());
+
+
+
+ // Get the superclass pointer.
+ llvm::Constant *SuperClass;
+ if (!SuperClassName.empty()) {
+ SuperClass = MakeConstantString(SuperClassName, ".super_class_name");
+ } else {
+ SuperClass = llvm::ConstantPointerNull::get(PtrToInt8Ty);
+ }
+ // Empty vector used to construct empty method lists
+ SmallVector<llvm::Constant*, 1> empty;
+ // Generate the method and instance variable lists
+ llvm::Constant *MethodList = GenerateMethodList(ClassName, "",
+ InstanceMethodSels, InstanceMethodTypes, false);
+ llvm::Constant *ClassMethodList = GenerateMethodList(ClassName, "",
+ ClassMethodSels, ClassMethodTypes, true);
+ llvm::Constant *IvarList = GenerateIvarList(IvarNames, IvarTypes,
+ IvarOffsets);
+ // Irrespective of whether we are compiling for a fragile or non-fragile ABI,
+ // we emit a symbol containing the offset for each ivar in the class. This
+ // allows code compiled for the non-Fragile ABI to inherit from code compiled
+ // for the legacy ABI, without causing problems. The converse is also
+ // possible, but causes all ivar accesses to be fragile.
+
+ // Offset pointer for getting at the correct field in the ivar list when
+ // setting up the alias. These are: The base address for the global, the
+ // ivar array (second field), the ivar in this list (set for each ivar), and
+ // the offset (third field in ivar structure)
+ llvm::Type *IndexTy = Int32Ty;
+ llvm::Constant *offsetPointerIndexes[] = {Zeros[0],
+ llvm::ConstantInt::get(IndexTy, 1), 0,
+ llvm::ConstantInt::get(IndexTy, 2) };
+
+ unsigned ivarIndex = 0;
+ for (const ObjCIvarDecl *IVD = ClassDecl->all_declared_ivar_begin(); IVD;
+ IVD = IVD->getNextIvar()) {
+ const std::string Name = "__objc_ivar_offset_" + ClassName + '.'
+ + IVD->getNameAsString();
+ offsetPointerIndexes[2] = llvm::ConstantInt::get(IndexTy, ivarIndex);
+ // Get the correct ivar field
+ llvm::Constant *offsetValue = llvm::ConstantExpr::getGetElementPtr(
+ IvarList, offsetPointerIndexes);
+ // Get the existing variable, if one exists.
+ llvm::GlobalVariable *offset = TheModule.getNamedGlobal(Name);
+ if (offset) {
+ offset->setInitializer(offsetValue);
+ // If this is the real definition, change its linkage type so that
+ // different modules will use this one, rather than their private
+ // copy.
+ offset->setLinkage(llvm::GlobalValue::ExternalLinkage);
+ } else {
+ // Add a new alias if there isn't one already.
+ offset = new llvm::GlobalVariable(TheModule, offsetValue->getType(),
+ false, llvm::GlobalValue::ExternalLinkage, offsetValue, Name);
+ (void) offset; // Silence dead store warning.
+ }
+ ++ivarIndex;
+ }
+ llvm::Constant *ZeroPtr = llvm::ConstantInt::get(IntPtrTy, 0);
+ //Generate metaclass for class methods
+ llvm::Constant *MetaClassStruct = GenerateClassStructure(NULLPtr,
+ NULLPtr, 0x12L, ClassName.c_str(), 0, Zeros[0], GenerateIvarList(
+ empty, empty, empty), ClassMethodList, NULLPtr,
+ NULLPtr, NULLPtr, ZeroPtr, ZeroPtr, true);
+
+ // Generate the class structure
+ llvm::Constant *ClassStruct =
+ GenerateClassStructure(MetaClassStruct, SuperClass, 0x11L,
+ ClassName.c_str(), 0,
+ llvm::ConstantInt::get(LongTy, instanceSize), IvarList,
+ MethodList, GenerateProtocolList(Protocols), IvarOffsetArray,
+ Properties, StrongIvarBitmap, WeakIvarBitmap);
+
+ // Resolve the class aliases, if they exist.
+ if (ClassPtrAlias) {
+ ClassPtrAlias->replaceAllUsesWith(
+ llvm::ConstantExpr::getBitCast(ClassStruct, IdTy));
+ ClassPtrAlias->eraseFromParent();
+ ClassPtrAlias = 0;
+ }
+ if (MetaClassPtrAlias) {
+ MetaClassPtrAlias->replaceAllUsesWith(
+ llvm::ConstantExpr::getBitCast(MetaClassStruct, IdTy));
+ MetaClassPtrAlias->eraseFromParent();
+ MetaClassPtrAlias = 0;
+ }
+
+ // Add class structure to list to be added to the symtab later
+ ClassStruct = llvm::ConstantExpr::getBitCast(ClassStruct, PtrToInt8Ty);
+ Classes.push_back(ClassStruct);
+}
+
+
+llvm::Function *CGObjCGNU::ModuleInitFunction() {
+ // Only emit an ObjC load function if no Objective-C stuff has been called
+ if (Classes.empty() && Categories.empty() && ConstantStrings.empty() &&
+ ExistingProtocols.empty() && SelectorTable.empty())
+ return NULL;
+
+ // Add all referenced protocols to a category.
+ GenerateProtocolHolderCategory();
+
+ llvm::StructType *SelStructTy = dyn_cast<llvm::StructType>(
+ SelectorTy->getElementType());
+ llvm::Type *SelStructPtrTy = SelectorTy;
+ if (SelStructTy == 0) {
+ SelStructTy = llvm::StructType::get(PtrToInt8Ty, PtrToInt8Ty, NULL);
+ SelStructPtrTy = llvm::PointerType::getUnqual(SelStructTy);
+ }
+
+ std::vector<llvm::Constant*> Elements;
+ llvm::Constant *Statics = NULLPtr;
+ // Generate statics list:
+ if (ConstantStrings.size()) {
+ llvm::ArrayType *StaticsArrayTy = llvm::ArrayType::get(PtrToInt8Ty,
+ ConstantStrings.size() + 1);
+ ConstantStrings.push_back(NULLPtr);
+
+ StringRef StringClass = CGM.getLangOpts().ObjCConstantStringClass;
+
+ if (StringClass.empty()) StringClass = "NXConstantString";
+
+ Elements.push_back(MakeConstantString(StringClass,
+ ".objc_static_class_name"));
+ Elements.push_back(llvm::ConstantArray::get(StaticsArrayTy,
+ ConstantStrings));
+ llvm::StructType *StaticsListTy =
+ llvm::StructType::get(PtrToInt8Ty, StaticsArrayTy, NULL);
+ llvm::Type *StaticsListPtrTy =
+ llvm::PointerType::getUnqual(StaticsListTy);
+ Statics = MakeGlobal(StaticsListTy, Elements, ".objc_statics");
+ llvm::ArrayType *StaticsListArrayTy =
+ llvm::ArrayType::get(StaticsListPtrTy, 2);
+ Elements.clear();
+ Elements.push_back(Statics);
+ Elements.push_back(llvm::Constant::getNullValue(StaticsListPtrTy));
+ Statics = MakeGlobal(StaticsListArrayTy, Elements, ".objc_statics_ptr");
+ Statics = llvm::ConstantExpr::getBitCast(Statics, PtrTy);
+ }
+ // Array of classes, categories, and constant objects
+ llvm::ArrayType *ClassListTy = llvm::ArrayType::get(PtrToInt8Ty,
+ Classes.size() + Categories.size() + 2);
+ llvm::StructType *SymTabTy = llvm::StructType::get(LongTy, SelStructPtrTy,
+ llvm::Type::getInt16Ty(VMContext),
+ llvm::Type::getInt16Ty(VMContext),
+ ClassListTy, NULL);
+
+ Elements.clear();
+ // Pointer to an array of selectors used in this module.
+ std::vector<llvm::Constant*> Selectors;
+ std::vector<llvm::GlobalAlias*> SelectorAliases;
+ for (SelectorMap::iterator iter = SelectorTable.begin(),
+ iterEnd = SelectorTable.end(); iter != iterEnd ; ++iter) {
+
+ std::string SelNameStr = iter->first.getAsString();
+ llvm::Constant *SelName = ExportUniqueString(SelNameStr, ".objc_sel_name");
+
+ SmallVectorImpl<TypedSelector> &Types = iter->second;
+ for (SmallVectorImpl<TypedSelector>::iterator i = Types.begin(),
+ e = Types.end() ; i!=e ; i++) {
+
+ llvm::Constant *SelectorTypeEncoding = NULLPtr;
+ if (!i->first.empty())
+ SelectorTypeEncoding = MakeConstantString(i->first, ".objc_sel_types");
+
+ Elements.push_back(SelName);
+ Elements.push_back(SelectorTypeEncoding);
+ Selectors.push_back(llvm::ConstantStruct::get(SelStructTy, Elements));
+ Elements.clear();
+
+ // Store the selector alias for later replacement
+ SelectorAliases.push_back(i->second);
+ }
+ }
+ unsigned SelectorCount = Selectors.size();
+ // NULL-terminate the selector list. This should not actually be required,
+ // because the selector list has a length field. Unfortunately, the GCC
+ // runtime decides to ignore the length field and expects a NULL terminator,
+ // and GCC cooperates with this by always setting the length to 0.
+ Elements.push_back(NULLPtr);
+ Elements.push_back(NULLPtr);
+ Selectors.push_back(llvm::ConstantStruct::get(SelStructTy, Elements));
+ Elements.clear();
+
+ // Number of static selectors
+ Elements.push_back(llvm::ConstantInt::get(LongTy, SelectorCount));
+ llvm::Constant *SelectorList = MakeGlobalArray(SelStructTy, Selectors,
+ ".objc_selector_list");
+ Elements.push_back(llvm::ConstantExpr::getBitCast(SelectorList,
+ SelStructPtrTy));
+
+ // Now that all of the static selectors exist, create pointers to them.
+ for (unsigned int i=0 ; i<SelectorCount ; i++) {
+
+ llvm::Constant *Idxs[] = {Zeros[0],
+ llvm::ConstantInt::get(Int32Ty, i), Zeros[0]};
+ // FIXME: We're generating redundant loads and stores here!
+ llvm::Constant *SelPtr = llvm::ConstantExpr::getGetElementPtr(SelectorList,
+ makeArrayRef(Idxs, 2));
+ // If selectors are defined as an opaque type, cast the pointer to this
+ // type.
+ SelPtr = llvm::ConstantExpr::getBitCast(SelPtr, SelectorTy);
+ SelectorAliases[i]->replaceAllUsesWith(SelPtr);
+ SelectorAliases[i]->eraseFromParent();
+ }
+
+ // Number of classes defined.
+ Elements.push_back(llvm::ConstantInt::get(llvm::Type::getInt16Ty(VMContext),
+ Classes.size()));
+ // Number of categories defined
+ Elements.push_back(llvm::ConstantInt::get(llvm::Type::getInt16Ty(VMContext),
+ Categories.size()));
+ // Create an array of classes, then categories, then static object instances
+ Classes.insert(Classes.end(), Categories.begin(), Categories.end());
+ // NULL-terminated list of static object instances (mainly constant strings)
+ Classes.push_back(Statics);
+ Classes.push_back(NULLPtr);
+ llvm::Constant *ClassList = llvm::ConstantArray::get(ClassListTy, Classes);
+ Elements.push_back(ClassList);
+ // Construct the symbol table
+ llvm::Constant *SymTab= MakeGlobal(SymTabTy, Elements);
+
+ // The symbol table is contained in a module which has some version-checking
+ // constants
+ llvm::StructType * ModuleTy = llvm::StructType::get(LongTy, LongTy,
+ PtrToInt8Ty, llvm::PointerType::getUnqual(SymTabTy),
+ (RuntimeVersion >= 10) ? IntTy : NULL, NULL);
+ Elements.clear();
+ // Runtime version, used for ABI compatibility checking.
+ Elements.push_back(llvm::ConstantInt::get(LongTy, RuntimeVersion));
+ // sizeof(ModuleTy)
+ llvm::TargetData td(&TheModule);
+ Elements.push_back(
+ llvm::ConstantInt::get(LongTy,
+ td.getTypeSizeInBits(ModuleTy) /
+ CGM.getContext().getCharWidth()));
+
+ // The path to the source file where this module was declared
+ SourceManager &SM = CGM.getContext().getSourceManager();
+ const FileEntry *mainFile = SM.getFileEntryForID(SM.getMainFileID());
+ std::string path =
+ std::string(mainFile->getDir()->getName()) + '/' + mainFile->getName();
+ Elements.push_back(MakeConstantString(path, ".objc_source_file_name"));
+ Elements.push_back(SymTab);
+
+ if (RuntimeVersion >= 10)
+ switch (CGM.getLangOpts().getGC()) {
+ case LangOptions::GCOnly:
+ Elements.push_back(llvm::ConstantInt::get(IntTy, 2));
+ break;
+ case LangOptions::NonGC:
+ if (CGM.getLangOpts().ObjCAutoRefCount)
+ Elements.push_back(llvm::ConstantInt::get(IntTy, 1));
+ else
+ Elements.push_back(llvm::ConstantInt::get(IntTy, 0));
+ break;
+ case LangOptions::HybridGC:
+ Elements.push_back(llvm::ConstantInt::get(IntTy, 1));
+ break;
+ }
+
+ llvm::Value *Module = MakeGlobal(ModuleTy, Elements);
+
+ // Create the load function calling the runtime entry point with the module
+ // structure
+ llvm::Function * LoadFunction = llvm::Function::Create(
+ llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext), false),
+ llvm::GlobalValue::InternalLinkage, ".objc_load_function",
+ &TheModule);
+ llvm::BasicBlock *EntryBB =
+ llvm::BasicBlock::Create(VMContext, "entry", LoadFunction);
+ CGBuilderTy Builder(VMContext);
+ Builder.SetInsertPoint(EntryBB);
+
+ llvm::FunctionType *FT =
+ llvm::FunctionType::get(Builder.getVoidTy(),
+ llvm::PointerType::getUnqual(ModuleTy), true);
+ llvm::Value *Register = CGM.CreateRuntimeFunction(FT, "__objc_exec_class");
+ Builder.CreateCall(Register, Module);
+
+ if (!ClassAliases.empty()) {
+ llvm::Type *ArgTypes[2] = {PtrTy, PtrToInt8Ty};
+ llvm::FunctionType *RegisterAliasTy =
+ llvm::FunctionType::get(Builder.getVoidTy(),
+ ArgTypes, false);
+ llvm::Function *RegisterAlias = llvm::Function::Create(
+ RegisterAliasTy,
+ llvm::GlobalValue::ExternalWeakLinkage, "class_registerAlias_np",
+ &TheModule);
+ llvm::BasicBlock *AliasBB =
+ llvm::BasicBlock::Create(VMContext, "alias", LoadFunction);
+ llvm::BasicBlock *NoAliasBB =
+ llvm::BasicBlock::Create(VMContext, "no_alias", LoadFunction);
+
+ // Branch based on whether the runtime provided class_registerAlias_np()
+ llvm::Value *HasRegisterAlias = Builder.CreateICmpNE(RegisterAlias,
+ llvm::Constant::getNullValue(RegisterAlias->getType()));
+ Builder.CreateCondBr(HasRegisterAlias, AliasBB, NoAliasBB);
+
+ // The true branch (has alias registration fucntion):
+ Builder.SetInsertPoint(AliasBB);
+ // Emit alias registration calls:
+ for (std::vector<ClassAliasPair>::iterator iter = ClassAliases.begin();
+ iter != ClassAliases.end(); ++iter) {
+ llvm::Constant *TheClass =
+ TheModule.getGlobalVariable(("_OBJC_CLASS_" + iter->first).c_str(),
+ true);
+ if (0 != TheClass) {
+ TheClass = llvm::ConstantExpr::getBitCast(TheClass, PtrTy);
+ Builder.CreateCall2(RegisterAlias, TheClass,
+ MakeConstantString(iter->second));
+ }
+ }
+ // Jump to end:
+ Builder.CreateBr(NoAliasBB);
+
+ // Missing alias registration function, just return from the function:
+ Builder.SetInsertPoint(NoAliasBB);
+ }
+ Builder.CreateRetVoid();
+
+ return LoadFunction;
+}
+
+llvm::Function *CGObjCGNU::GenerateMethod(const ObjCMethodDecl *OMD,
+ const ObjCContainerDecl *CD) {
+ const ObjCCategoryImplDecl *OCD =
+ dyn_cast<ObjCCategoryImplDecl>(OMD->getDeclContext());
+ StringRef CategoryName = OCD ? OCD->getName() : "";
+ StringRef ClassName = CD->getName();
+ Selector MethodName = OMD->getSelector();
+ bool isClassMethod = !OMD->isInstanceMethod();
+
+ CodeGenTypes &Types = CGM.getTypes();
+ llvm::FunctionType *MethodTy =
+ Types.GetFunctionType(Types.arrangeObjCMethodDeclaration(OMD));
+ std::string FunctionName = SymbolNameForMethod(ClassName, CategoryName,
+ MethodName, isClassMethod);
+
+ llvm::Function *Method
+ = llvm::Function::Create(MethodTy,
+ llvm::GlobalValue::InternalLinkage,
+ FunctionName,
+ &TheModule);
+ return Method;
+}
+
+llvm::Constant *CGObjCGNU::GetPropertyGetFunction() {
+ return GetPropertyFn;
+}
+
+llvm::Constant *CGObjCGNU::GetPropertySetFunction() {
+ return SetPropertyFn;
+}
+
+llvm::Constant *CGObjCGNU::GetOptimizedPropertySetFunction(bool atomic,
+ bool copy) {
+ return 0;
+}
+
+llvm::Constant *CGObjCGNU::GetGetStructFunction() {
+ return GetStructPropertyFn;
+}
+llvm::Constant *CGObjCGNU::GetSetStructFunction() {
+ return SetStructPropertyFn;
+}
+llvm::Constant *CGObjCGNU::GetCppAtomicObjectFunction() {
+ return 0;
+}
+
+llvm::Constant *CGObjCGNU::EnumerationMutationFunction() {
+ return EnumerationMutationFn;
+}
+
+void CGObjCGNU::EmitSynchronizedStmt(CodeGenFunction &CGF,
+ const ObjCAtSynchronizedStmt &S) {
+ EmitAtSynchronizedStmt(CGF, S, SyncEnterFn, SyncExitFn);
+}
+
+
+void CGObjCGNU::EmitTryStmt(CodeGenFunction &CGF,
+ const ObjCAtTryStmt &S) {
+ // Unlike the Apple non-fragile runtimes, which also uses
+ // unwind-based zero cost exceptions, the GNU Objective C runtime's
+ // EH support isn't a veneer over C++ EH. Instead, exception
+ // objects are created by __objc_exception_throw and destroyed by
+ // the personality function; this avoids the need for bracketing
+ // catch handlers with calls to __blah_begin_catch/__blah_end_catch
+ // (or even _Unwind_DeleteException), but probably doesn't
+ // interoperate very well with foreign exceptions.
+ //
+ // In Objective-C++ mode, we actually emit something equivalent to the C++
+ // exception handler.
+ EmitTryCatchStmt(CGF, S, EnterCatchFn, ExitCatchFn, ExceptionReThrowFn);
+ return ;
+}
+
+void CGObjCGNU::EmitThrowStmt(CodeGenFunction &CGF,
+ const ObjCAtThrowStmt &S) {
+ llvm::Value *ExceptionAsObject;
+
+ if (const Expr *ThrowExpr = S.getThrowExpr()) {
+ llvm::Value *Exception = CGF.EmitObjCThrowOperand(ThrowExpr);
+ ExceptionAsObject = Exception;
+ } else {
+ assert((!CGF.ObjCEHValueStack.empty() && CGF.ObjCEHValueStack.back()) &&
+ "Unexpected rethrow outside @catch block.");
+ ExceptionAsObject = CGF.ObjCEHValueStack.back();
+ }
+ ExceptionAsObject = CGF.Builder.CreateBitCast(ExceptionAsObject, IdTy);
+
+ // Note: This may have to be an invoke, if we want to support constructs like:
+ // @try {
+ // @throw(obj);
+ // }
+ // @catch(id) ...
+ //
+ // This is effectively turning @throw into an incredibly-expensive goto, but
+ // it may happen as a result of inlining followed by missed optimizations, or
+ // as a result of stupidity.
+ llvm::BasicBlock *UnwindBB = CGF.getInvokeDest();
+ if (!UnwindBB) {
+ CGF.Builder.CreateCall(ExceptionThrowFn, ExceptionAsObject);
+ CGF.Builder.CreateUnreachable();
+ } else {
+ CGF.Builder.CreateInvoke(ExceptionThrowFn, UnwindBB, UnwindBB,
+ ExceptionAsObject);
+ }
+ // Clear the insertion point to indicate we are in unreachable code.
+ CGF.Builder.ClearInsertionPoint();
+}
+
+llvm::Value * CGObjCGNU::EmitObjCWeakRead(CodeGenFunction &CGF,
+ llvm::Value *AddrWeakObj) {
+ CGBuilderTy B = CGF.Builder;
+ AddrWeakObj = EnforceType(B, AddrWeakObj, PtrToIdTy);
+ return B.CreateCall(WeakReadFn, AddrWeakObj);
+}
+
+void CGObjCGNU::EmitObjCWeakAssign(CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dst) {
+ CGBuilderTy B = CGF.Builder;
+ src = EnforceType(B, src, IdTy);
+ dst = EnforceType(B, dst, PtrToIdTy);
+ B.CreateCall2(WeakAssignFn, src, dst);
+}
+
+void CGObjCGNU::EmitObjCGlobalAssign(CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dst,
+ bool threadlocal) {
+ CGBuilderTy B = CGF.Builder;
+ src = EnforceType(B, src, IdTy);
+ dst = EnforceType(B, dst, PtrToIdTy);
+ if (!threadlocal)
+ B.CreateCall2(GlobalAssignFn, src, dst);
+ else
+ // FIXME. Add threadloca assign API
+ llvm_unreachable("EmitObjCGlobalAssign - Threal Local API NYI");
+}
+
+void CGObjCGNU::EmitObjCIvarAssign(CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dst,
+ llvm::Value *ivarOffset) {
+ CGBuilderTy B = CGF.Builder;
+ src = EnforceType(B, src, IdTy);
+ dst = EnforceType(B, dst, IdTy);
+ B.CreateCall3(IvarAssignFn, src, dst, ivarOffset);
+}
+
+void CGObjCGNU::EmitObjCStrongCastAssign(CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dst) {
+ CGBuilderTy B = CGF.Builder;
+ src = EnforceType(B, src, IdTy);
+ dst = EnforceType(B, dst, PtrToIdTy);
+ B.CreateCall2(StrongCastAssignFn, src, dst);
+}
+
+void CGObjCGNU::EmitGCMemmoveCollectable(CodeGenFunction &CGF,
+ llvm::Value *DestPtr,
+ llvm::Value *SrcPtr,
+ llvm::Value *Size) {
+ CGBuilderTy B = CGF.Builder;
+ DestPtr = EnforceType(B, DestPtr, PtrTy);
+ SrcPtr = EnforceType(B, SrcPtr, PtrTy);
+
+ B.CreateCall3(MemMoveFn, DestPtr, SrcPtr, Size);
+}
+
+llvm::GlobalVariable *CGObjCGNU::ObjCIvarOffsetVariable(
+ const ObjCInterfaceDecl *ID,
+ const ObjCIvarDecl *Ivar) {
+ const std::string Name = "__objc_ivar_offset_" + ID->getNameAsString()
+ + '.' + Ivar->getNameAsString();
+ // Emit the variable and initialize it with what we think the correct value
+ // is. This allows code compiled with non-fragile ivars to work correctly
+ // when linked against code which isn't (most of the time).
+ llvm::GlobalVariable *IvarOffsetPointer = TheModule.getNamedGlobal(Name);
+ if (!IvarOffsetPointer) {
+ // This will cause a run-time crash if we accidentally use it. A value of
+ // 0 would seem more sensible, but will silently overwrite the isa pointer
+ // causing a great deal of confusion.
+ uint64_t Offset = -1;
+ // We can't call ComputeIvarBaseOffset() here if we have the
+ // implementation, because it will create an invalid ASTRecordLayout object
+ // that we are then stuck with forever, so we only initialize the ivar
+ // offset variable with a guess if we only have the interface. The
+ // initializer will be reset later anyway, when we are generating the class
+ // description.
+ if (!CGM.getContext().getObjCImplementation(
+ const_cast<ObjCInterfaceDecl *>(ID)))
+ Offset = ComputeIvarBaseOffset(CGM, ID, Ivar);
+
+ llvm::ConstantInt *OffsetGuess = llvm::ConstantInt::get(Int32Ty, Offset,
+ /*isSigned*/true);
+ // Don't emit the guess in non-PIC code because the linker will not be able
+ // to replace it with the real version for a library. In non-PIC code you
+ // must compile with the fragile ABI if you want to use ivars from a
+ // GCC-compiled class.
+ if (CGM.getLangOpts().PICLevel || CGM.getLangOpts().PIELevel) {
+ llvm::GlobalVariable *IvarOffsetGV = new llvm::GlobalVariable(TheModule,
+ Int32Ty, false,
+ llvm::GlobalValue::PrivateLinkage, OffsetGuess, Name+".guess");
+ IvarOffsetPointer = new llvm::GlobalVariable(TheModule,
+ IvarOffsetGV->getType(), false, llvm::GlobalValue::LinkOnceAnyLinkage,
+ IvarOffsetGV, Name);
+ } else {
+ IvarOffsetPointer = new llvm::GlobalVariable(TheModule,
+ llvm::Type::getInt32PtrTy(VMContext), false,
+ llvm::GlobalValue::ExternalLinkage, 0, Name);
+ }
+ }
+ return IvarOffsetPointer;
+}
+
+LValue CGObjCGNU::EmitObjCValueForIvar(CodeGenFunction &CGF,
+ QualType ObjectTy,
+ llvm::Value *BaseValue,
+ const ObjCIvarDecl *Ivar,
+ unsigned CVRQualifiers) {
+ const ObjCInterfaceDecl *ID =
+ ObjectTy->getAs<ObjCObjectType>()->getInterface();
+ return EmitValueForIvarAtOffset(CGF, ID, BaseValue, Ivar, CVRQualifiers,
+ EmitIvarOffset(CGF, ID, Ivar));
+}
+
+static const ObjCInterfaceDecl *FindIvarInterface(ASTContext &Context,
+ const ObjCInterfaceDecl *OID,
+ const ObjCIvarDecl *OIVD) {
+ for (const ObjCIvarDecl *next = OID->all_declared_ivar_begin(); next;
+ next = next->getNextIvar()) {
+ if (OIVD == next)
+ return OID;
+ }
+
+ // Otherwise check in the super class.
+ if (const ObjCInterfaceDecl *Super = OID->getSuperClass())
+ return FindIvarInterface(Context, Super, OIVD);
+
+ return 0;
+}
+
+llvm::Value *CGObjCGNU::EmitIvarOffset(CodeGenFunction &CGF,
+ const ObjCInterfaceDecl *Interface,
+ const ObjCIvarDecl *Ivar) {
+ if (CGM.getLangOpts().ObjCNonFragileABI) {
+ Interface = FindIvarInterface(CGM.getContext(), Interface, Ivar);
+ if (RuntimeVersion < 10)
+ return CGF.Builder.CreateZExtOrBitCast(
+ CGF.Builder.CreateLoad(CGF.Builder.CreateLoad(
+ ObjCIvarOffsetVariable(Interface, Ivar), false, "ivar")),
+ PtrDiffTy);
+ std::string name = "__objc_ivar_offset_value_" +
+ Interface->getNameAsString() +"." + Ivar->getNameAsString();
+ llvm::Value *Offset = TheModule.getGlobalVariable(name);
+ if (!Offset)
+ Offset = new llvm::GlobalVariable(TheModule, IntTy,
+ false, llvm::GlobalValue::LinkOnceAnyLinkage,
+ llvm::Constant::getNullValue(IntTy), name);
+ Offset = CGF.Builder.CreateLoad(Offset);
+ if (Offset->getType() != PtrDiffTy)
+ Offset = CGF.Builder.CreateZExtOrBitCast(Offset, PtrDiffTy);
+ return Offset;
+ }
+ uint64_t Offset = ComputeIvarBaseOffset(CGF.CGM, Interface, Ivar);
+ return llvm::ConstantInt::get(PtrDiffTy, Offset, /*isSigned*/true);
+}
+
+CGObjCRuntime *
+clang::CodeGen::CreateGNUObjCRuntime(CodeGenModule &CGM) {
+ if (CGM.getLangOpts().ObjCNonFragileABI)
+ return new CGObjCGNUstep(CGM);
+ return new CGObjCGCC(CGM);
+}
diff --git a/clang/lib/CodeGen/CGObjCMac.cpp b/clang/lib/CodeGen/CGObjCMac.cpp
new file mode 100644
index 0000000..e5246f1
--- /dev/null
+++ b/clang/lib/CodeGen/CGObjCMac.cpp
@@ -0,0 +1,6373 @@
+//===------- CGObjCMac.cpp - Interface to Apple Objective-C Runtime -------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This provides Objective-C code generation targeting the Apple runtime.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CGObjCRuntime.h"
+
+#include "CGRecordLayout.h"
+#include "CodeGenModule.h"
+#include "CodeGenFunction.h"
+#include "CGBlocks.h"
+#include "CGCleanup.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/RecordLayout.h"
+#include "clang/AST/StmtObjC.h"
+#include "clang/Basic/LangOptions.h"
+#include "clang/Frontend/CodeGenOptions.h"
+
+#include "llvm/InlineAsm.h"
+#include "llvm/IntrinsicInst.h"
+#include "llvm/LLVMContext.h"
+#include "llvm/Module.h"
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/SetVector.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/Support/CallSite.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Target/TargetData.h"
+#include <cstdio>
+
+using namespace clang;
+using namespace CodeGen;
+
+namespace {
+
+// FIXME: We should find a nicer way to make the labels for metadata, string
+// concatenation is lame.
+
+class ObjCCommonTypesHelper {
+protected:
+ llvm::LLVMContext &VMContext;
+
+private:
+ // The types of these functions don't really matter because we
+ // should always bitcast before calling them.
+
+ /// id objc_msgSend (id, SEL, ...)
+ ///
+ /// The default messenger, used for sends whose ABI is unchanged from
+ /// the all-integer/pointer case.
+ llvm::Constant *getMessageSendFn() const {
+ // Add the non-lazy-bind attribute, since objc_msgSend is likely to
+ // be called a lot.
+ llvm::Type *params[] = { ObjectPtrTy, SelectorPtrTy };
+ return CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy,
+ params, true),
+ "objc_msgSend",
+ llvm::Attribute::NonLazyBind);
+ }
+
+ /// void objc_msgSend_stret (id, SEL, ...)
+ ///
+ /// The messenger used when the return value is an aggregate returned
+ /// by indirect reference in the first argument, and therefore the
+ /// self and selector parameters are shifted over by one.
+ llvm::Constant *getMessageSendStretFn() const {
+ llvm::Type *params[] = { ObjectPtrTy, SelectorPtrTy };
+ return CGM.CreateRuntimeFunction(llvm::FunctionType::get(CGM.VoidTy,
+ params, true),
+ "objc_msgSend_stret");
+
+ }
+
+ /// [double | long double] objc_msgSend_fpret(id self, SEL op, ...)
+ ///
+ /// The messenger used when the return value is returned on the x87
+ /// floating-point stack; without a special entrypoint, the nil case
+ /// would be unbalanced.
+ llvm::Constant *getMessageSendFpretFn() const {
+ llvm::Type *params[] = { ObjectPtrTy, SelectorPtrTy };
+ return CGM.CreateRuntimeFunction(llvm::FunctionType::get(CGM.DoubleTy,
+ params, true),
+ "objc_msgSend_fpret");
+
+ }
+
+ /// _Complex long double objc_msgSend_fp2ret(id self, SEL op, ...)
+ ///
+ /// The messenger used when the return value is returned in two values on the
+ /// x87 floating point stack; without a special entrypoint, the nil case
+ /// would be unbalanced. Only used on 64-bit X86.
+ llvm::Constant *getMessageSendFp2retFn() const {
+ llvm::Type *params[] = { ObjectPtrTy, SelectorPtrTy };
+ llvm::Type *longDoubleType = llvm::Type::getX86_FP80Ty(VMContext);
+ llvm::Type *resultType =
+ llvm::StructType::get(longDoubleType, longDoubleType, NULL);
+
+ return CGM.CreateRuntimeFunction(llvm::FunctionType::get(resultType,
+ params, true),
+ "objc_msgSend_fp2ret");
+ }
+
+ /// id objc_msgSendSuper(struct objc_super *super, SEL op, ...)
+ ///
+ /// The messenger used for super calls, which have different dispatch
+ /// semantics. The class passed is the superclass of the current
+ /// class.
+ llvm::Constant *getMessageSendSuperFn() const {
+ llvm::Type *params[] = { SuperPtrTy, SelectorPtrTy };
+ return CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy,
+ params, true),
+ "objc_msgSendSuper");
+ }
+
+ /// id objc_msgSendSuper2(struct objc_super *super, SEL op, ...)
+ ///
+ /// A slightly different messenger used for super calls. The class
+ /// passed is the current class.
+ llvm::Constant *getMessageSendSuperFn2() const {
+ llvm::Type *params[] = { SuperPtrTy, SelectorPtrTy };
+ return CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy,
+ params, true),
+ "objc_msgSendSuper2");
+ }
+
+ /// void objc_msgSendSuper_stret(void *stretAddr, struct objc_super *super,
+ /// SEL op, ...)
+ ///
+ /// The messenger used for super calls which return an aggregate indirectly.
+ llvm::Constant *getMessageSendSuperStretFn() const {
+ llvm::Type *params[] = { Int8PtrTy, SuperPtrTy, SelectorPtrTy };
+ return CGM.CreateRuntimeFunction(
+ llvm::FunctionType::get(CGM.VoidTy, params, true),
+ "objc_msgSendSuper_stret");
+ }
+
+ /// void objc_msgSendSuper2_stret(void * stretAddr, struct objc_super *super,
+ /// SEL op, ...)
+ ///
+ /// objc_msgSendSuper_stret with the super2 semantics.
+ llvm::Constant *getMessageSendSuperStretFn2() const {
+ llvm::Type *params[] = { Int8PtrTy, SuperPtrTy, SelectorPtrTy };
+ return CGM.CreateRuntimeFunction(
+ llvm::FunctionType::get(CGM.VoidTy, params, true),
+ "objc_msgSendSuper2_stret");
+ }
+
+ llvm::Constant *getMessageSendSuperFpretFn() const {
+ // There is no objc_msgSendSuper_fpret? How can that work?
+ return getMessageSendSuperFn();
+ }
+
+ llvm::Constant *getMessageSendSuperFpretFn2() const {
+ // There is no objc_msgSendSuper_fpret? How can that work?
+ return getMessageSendSuperFn2();
+ }
+
+protected:
+ CodeGen::CodeGenModule &CGM;
+
+public:
+ llvm::Type *ShortTy, *IntTy, *LongTy, *LongLongTy;
+ llvm::Type *Int8PtrTy, *Int8PtrPtrTy;
+
+ /// ObjectPtrTy - LLVM type for object handles (typeof(id))
+ llvm::Type *ObjectPtrTy;
+
+ /// PtrObjectPtrTy - LLVM type for id *
+ llvm::Type *PtrObjectPtrTy;
+
+ /// SelectorPtrTy - LLVM type for selector handles (typeof(SEL))
+ llvm::Type *SelectorPtrTy;
+
+private:
+ /// ProtocolPtrTy - LLVM type for external protocol handles
+ /// (typeof(Protocol))
+ llvm::Type *ExternalProtocolPtrTy;
+
+public:
+ llvm::Type *getExternalProtocolPtrTy() {
+ if (!ExternalProtocolPtrTy) {
+ // FIXME: It would be nice to unify this with the opaque type, so that the
+ // IR comes out a bit cleaner.
+ CodeGen::CodeGenTypes &Types = CGM.getTypes();
+ ASTContext &Ctx = CGM.getContext();
+ llvm::Type *T = Types.ConvertType(Ctx.getObjCProtoType());
+ ExternalProtocolPtrTy = llvm::PointerType::getUnqual(T);
+ }
+
+ return ExternalProtocolPtrTy;
+ }
+
+ // SuperCTy - clang type for struct objc_super.
+ QualType SuperCTy;
+ // SuperPtrCTy - clang type for struct objc_super *.
+ QualType SuperPtrCTy;
+
+ /// SuperTy - LLVM type for struct objc_super.
+ llvm::StructType *SuperTy;
+ /// SuperPtrTy - LLVM type for struct objc_super *.
+ llvm::Type *SuperPtrTy;
+
+ /// PropertyTy - LLVM type for struct objc_property (struct _prop_t
+ /// in GCC parlance).
+ llvm::StructType *PropertyTy;
+
+ /// PropertyListTy - LLVM type for struct objc_property_list
+ /// (_prop_list_t in GCC parlance).
+ llvm::StructType *PropertyListTy;
+ /// PropertyListPtrTy - LLVM type for struct objc_property_list*.
+ llvm::Type *PropertyListPtrTy;
+
+ // MethodTy - LLVM type for struct objc_method.
+ llvm::StructType *MethodTy;
+
+ /// CacheTy - LLVM type for struct objc_cache.
+ llvm::Type *CacheTy;
+ /// CachePtrTy - LLVM type for struct objc_cache *.
+ llvm::Type *CachePtrTy;
+
+ llvm::Constant *getGetPropertyFn() {
+ CodeGen::CodeGenTypes &Types = CGM.getTypes();
+ ASTContext &Ctx = CGM.getContext();
+ // id objc_getProperty (id, SEL, ptrdiff_t, bool)
+ SmallVector<CanQualType,4> Params;
+ CanQualType IdType = Ctx.getCanonicalParamType(Ctx.getObjCIdType());
+ CanQualType SelType = Ctx.getCanonicalParamType(Ctx.getObjCSelType());
+ Params.push_back(IdType);
+ Params.push_back(SelType);
+ Params.push_back(Ctx.getPointerDiffType()->getCanonicalTypeUnqualified());
+ Params.push_back(Ctx.BoolTy);
+ llvm::FunctionType *FTy =
+ Types.GetFunctionType(Types.arrangeFunctionType(IdType, Params,
+ FunctionType::ExtInfo(),
+ RequiredArgs::All));
+ return CGM.CreateRuntimeFunction(FTy, "objc_getProperty");
+ }
+
+ llvm::Constant *getSetPropertyFn() {
+ CodeGen::CodeGenTypes &Types = CGM.getTypes();
+ ASTContext &Ctx = CGM.getContext();
+ // void objc_setProperty (id, SEL, ptrdiff_t, id, bool, bool)
+ SmallVector<CanQualType,6> Params;
+ CanQualType IdType = Ctx.getCanonicalParamType(Ctx.getObjCIdType());
+ CanQualType SelType = Ctx.getCanonicalParamType(Ctx.getObjCSelType());
+ Params.push_back(IdType);
+ Params.push_back(SelType);
+ Params.push_back(Ctx.getPointerDiffType()->getCanonicalTypeUnqualified());
+ Params.push_back(IdType);
+ Params.push_back(Ctx.BoolTy);
+ Params.push_back(Ctx.BoolTy);
+ llvm::FunctionType *FTy =
+ Types.GetFunctionType(Types.arrangeFunctionType(Ctx.VoidTy, Params,
+ FunctionType::ExtInfo(),
+ RequiredArgs::All));
+ return CGM.CreateRuntimeFunction(FTy, "objc_setProperty");
+ }
+
+ llvm::Constant *getOptimizedSetPropertyFn(bool atomic, bool copy) {
+ CodeGen::CodeGenTypes &Types = CGM.getTypes();
+ ASTContext &Ctx = CGM.getContext();
+ // void objc_setProperty_atomic(id self, SEL _cmd,
+ // id newValue, ptrdiff_t offset);
+ // void objc_setProperty_nonatomic(id self, SEL _cmd,
+ // id newValue, ptrdiff_t offset);
+ // void objc_setProperty_atomic_copy(id self, SEL _cmd,
+ // id newValue, ptrdiff_t offset);
+ // void objc_setProperty_nonatomic_copy(id self, SEL _cmd,
+ // id newValue, ptrdiff_t offset);
+
+ SmallVector<CanQualType,4> Params;
+ CanQualType IdType = Ctx.getCanonicalParamType(Ctx.getObjCIdType());
+ CanQualType SelType = Ctx.getCanonicalParamType(Ctx.getObjCSelType());
+ Params.push_back(IdType);
+ Params.push_back(SelType);
+ Params.push_back(IdType);
+ Params.push_back(Ctx.getPointerDiffType()->getCanonicalTypeUnqualified());
+ llvm::FunctionType *FTy =
+ Types.GetFunctionType(Types.arrangeFunctionType(Ctx.VoidTy, Params,
+ FunctionType::ExtInfo(),
+ RequiredArgs::All));
+ const char *name;
+ if (atomic && copy)
+ name = "objc_setProperty_atomic_copy";
+ else if (atomic && !copy)
+ name = "objc_setProperty_atomic";
+ else if (!atomic && copy)
+ name = "objc_setProperty_nonatomic_copy";
+ else
+ name = "objc_setProperty_nonatomic";
+
+ return CGM.CreateRuntimeFunction(FTy, name);
+ }
+
+ llvm::Constant *getCopyStructFn() {
+ CodeGen::CodeGenTypes &Types = CGM.getTypes();
+ ASTContext &Ctx = CGM.getContext();
+ // void objc_copyStruct (void *, const void *, size_t, bool, bool)
+ SmallVector<CanQualType,5> Params;
+ Params.push_back(Ctx.VoidPtrTy);
+ Params.push_back(Ctx.VoidPtrTy);
+ Params.push_back(Ctx.LongTy);
+ Params.push_back(Ctx.BoolTy);
+ Params.push_back(Ctx.BoolTy);
+ llvm::FunctionType *FTy =
+ Types.GetFunctionType(Types.arrangeFunctionType(Ctx.VoidTy, Params,
+ FunctionType::ExtInfo(),
+ RequiredArgs::All));
+ return CGM.CreateRuntimeFunction(FTy, "objc_copyStruct");
+ }
+
+ /// This routine declares and returns address of:
+ /// void objc_copyCppObjectAtomic(
+ /// void *dest, const void *src,
+ /// void (*copyHelper) (void *dest, const void *source));
+ llvm::Constant *getCppAtomicObjectFunction() {
+ CodeGen::CodeGenTypes &Types = CGM.getTypes();
+ ASTContext &Ctx = CGM.getContext();
+ /// void objc_copyCppObjectAtomic(void *dest, const void *src, void *helper);
+ SmallVector<CanQualType,3> Params;
+ Params.push_back(Ctx.VoidPtrTy);
+ Params.push_back(Ctx.VoidPtrTy);
+ Params.push_back(Ctx.VoidPtrTy);
+ llvm::FunctionType *FTy =
+ Types.GetFunctionType(Types.arrangeFunctionType(Ctx.VoidTy, Params,
+ FunctionType::ExtInfo(),
+ RequiredArgs::All));
+ return CGM.CreateRuntimeFunction(FTy, "objc_copyCppObjectAtomic");
+ }
+
+ llvm::Constant *getEnumerationMutationFn() {
+ CodeGen::CodeGenTypes &Types = CGM.getTypes();
+ ASTContext &Ctx = CGM.getContext();
+ // void objc_enumerationMutation (id)
+ SmallVector<CanQualType,1> Params;
+ Params.push_back(Ctx.getCanonicalParamType(Ctx.getObjCIdType()));
+ llvm::FunctionType *FTy =
+ Types.GetFunctionType(Types.arrangeFunctionType(Ctx.VoidTy, Params,
+ FunctionType::ExtInfo(),
+ RequiredArgs::All));
+ return CGM.CreateRuntimeFunction(FTy, "objc_enumerationMutation");
+ }
+
+ /// GcReadWeakFn -- LLVM objc_read_weak (id *src) function.
+ llvm::Constant *getGcReadWeakFn() {
+ // id objc_read_weak (id *)
+ llvm::Type *args[] = { ObjectPtrTy->getPointerTo() };
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(ObjectPtrTy, args, false);
+ return CGM.CreateRuntimeFunction(FTy, "objc_read_weak");
+ }
+
+ /// GcAssignWeakFn -- LLVM objc_assign_weak function.
+ llvm::Constant *getGcAssignWeakFn() {
+ // id objc_assign_weak (id, id *)
+ llvm::Type *args[] = { ObjectPtrTy, ObjectPtrTy->getPointerTo() };
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(ObjectPtrTy, args, false);
+ return CGM.CreateRuntimeFunction(FTy, "objc_assign_weak");
+ }
+
+ /// GcAssignGlobalFn -- LLVM objc_assign_global function.
+ llvm::Constant *getGcAssignGlobalFn() {
+ // id objc_assign_global(id, id *)
+ llvm::Type *args[] = { ObjectPtrTy, ObjectPtrTy->getPointerTo() };
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(ObjectPtrTy, args, false);
+ return CGM.CreateRuntimeFunction(FTy, "objc_assign_global");
+ }
+
+ /// GcAssignThreadLocalFn -- LLVM objc_assign_threadlocal function.
+ llvm::Constant *getGcAssignThreadLocalFn() {
+ // id objc_assign_threadlocal(id src, id * dest)
+ llvm::Type *args[] = { ObjectPtrTy, ObjectPtrTy->getPointerTo() };
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(ObjectPtrTy, args, false);
+ return CGM.CreateRuntimeFunction(FTy, "objc_assign_threadlocal");
+ }
+
+ /// GcAssignIvarFn -- LLVM objc_assign_ivar function.
+ llvm::Constant *getGcAssignIvarFn() {
+ // id objc_assign_ivar(id, id *, ptrdiff_t)
+ llvm::Type *args[] = { ObjectPtrTy, ObjectPtrTy->getPointerTo(),
+ CGM.PtrDiffTy };
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(ObjectPtrTy, args, false);
+ return CGM.CreateRuntimeFunction(FTy, "objc_assign_ivar");
+ }
+
+ /// GcMemmoveCollectableFn -- LLVM objc_memmove_collectable function.
+ llvm::Constant *GcMemmoveCollectableFn() {
+ // void *objc_memmove_collectable(void *dst, const void *src, size_t size)
+ llvm::Type *args[] = { Int8PtrTy, Int8PtrTy, LongTy };
+ llvm::FunctionType *FTy = llvm::FunctionType::get(Int8PtrTy, args, false);
+ return CGM.CreateRuntimeFunction(FTy, "objc_memmove_collectable");
+ }
+
+ /// GcAssignStrongCastFn -- LLVM objc_assign_strongCast function.
+ llvm::Constant *getGcAssignStrongCastFn() {
+ // id objc_assign_strongCast(id, id *)
+ llvm::Type *args[] = { ObjectPtrTy, ObjectPtrTy->getPointerTo() };
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(ObjectPtrTy, args, false);
+ return CGM.CreateRuntimeFunction(FTy, "objc_assign_strongCast");
+ }
+
+ /// ExceptionThrowFn - LLVM objc_exception_throw function.
+ llvm::Constant *getExceptionThrowFn() {
+ // void objc_exception_throw(id)
+ llvm::Type *args[] = { ObjectPtrTy };
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(CGM.VoidTy, args, false);
+ return CGM.CreateRuntimeFunction(FTy, "objc_exception_throw");
+ }
+
+ /// ExceptionRethrowFn - LLVM objc_exception_rethrow function.
+ llvm::Constant *getExceptionRethrowFn() {
+ // void objc_exception_rethrow(void)
+ llvm::FunctionType *FTy = llvm::FunctionType::get(CGM.VoidTy, false);
+ return CGM.CreateRuntimeFunction(FTy, "objc_exception_rethrow");
+ }
+
+ /// SyncEnterFn - LLVM object_sync_enter function.
+ llvm::Constant *getSyncEnterFn() {
+ // void objc_sync_enter (id)
+ llvm::Type *args[] = { ObjectPtrTy };
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(CGM.VoidTy, args, false);
+ return CGM.CreateRuntimeFunction(FTy, "objc_sync_enter");
+ }
+
+ /// SyncExitFn - LLVM object_sync_exit function.
+ llvm::Constant *getSyncExitFn() {
+ // void objc_sync_exit (id)
+ llvm::Type *args[] = { ObjectPtrTy };
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(CGM.VoidTy, args, false);
+ return CGM.CreateRuntimeFunction(FTy, "objc_sync_exit");
+ }
+
+ llvm::Constant *getSendFn(bool IsSuper) const {
+ return IsSuper ? getMessageSendSuperFn() : getMessageSendFn();
+ }
+
+ llvm::Constant *getSendFn2(bool IsSuper) const {
+ return IsSuper ? getMessageSendSuperFn2() : getMessageSendFn();
+ }
+
+ llvm::Constant *getSendStretFn(bool IsSuper) const {
+ return IsSuper ? getMessageSendSuperStretFn() : getMessageSendStretFn();
+ }
+
+ llvm::Constant *getSendStretFn2(bool IsSuper) const {
+ return IsSuper ? getMessageSendSuperStretFn2() : getMessageSendStretFn();
+ }
+
+ llvm::Constant *getSendFpretFn(bool IsSuper) const {
+ return IsSuper ? getMessageSendSuperFpretFn() : getMessageSendFpretFn();
+ }
+
+ llvm::Constant *getSendFpretFn2(bool IsSuper) const {
+ return IsSuper ? getMessageSendSuperFpretFn2() : getMessageSendFpretFn();
+ }
+
+ llvm::Constant *getSendFp2retFn(bool IsSuper) const {
+ return IsSuper ? getMessageSendSuperFn() : getMessageSendFp2retFn();
+ }
+
+ llvm::Constant *getSendFp2RetFn2(bool IsSuper) const {
+ return IsSuper ? getMessageSendSuperFn2() : getMessageSendFp2retFn();
+ }
+
+ ObjCCommonTypesHelper(CodeGen::CodeGenModule &cgm);
+ ~ObjCCommonTypesHelper(){}
+};
+
+/// ObjCTypesHelper - Helper class that encapsulates lazy
+/// construction of varies types used during ObjC generation.
+class ObjCTypesHelper : public ObjCCommonTypesHelper {
+public:
+ /// SymtabTy - LLVM type for struct objc_symtab.
+ llvm::StructType *SymtabTy;
+ /// SymtabPtrTy - LLVM type for struct objc_symtab *.
+ llvm::Type *SymtabPtrTy;
+ /// ModuleTy - LLVM type for struct objc_module.
+ llvm::StructType *ModuleTy;
+
+ /// ProtocolTy - LLVM type for struct objc_protocol.
+ llvm::StructType *ProtocolTy;
+ /// ProtocolPtrTy - LLVM type for struct objc_protocol *.
+ llvm::Type *ProtocolPtrTy;
+ /// ProtocolExtensionTy - LLVM type for struct
+ /// objc_protocol_extension.
+ llvm::StructType *ProtocolExtensionTy;
+ /// ProtocolExtensionTy - LLVM type for struct
+ /// objc_protocol_extension *.
+ llvm::Type *ProtocolExtensionPtrTy;
+ /// MethodDescriptionTy - LLVM type for struct
+ /// objc_method_description.
+ llvm::StructType *MethodDescriptionTy;
+ /// MethodDescriptionListTy - LLVM type for struct
+ /// objc_method_description_list.
+ llvm::StructType *MethodDescriptionListTy;
+ /// MethodDescriptionListPtrTy - LLVM type for struct
+ /// objc_method_description_list *.
+ llvm::Type *MethodDescriptionListPtrTy;
+ /// ProtocolListTy - LLVM type for struct objc_property_list.
+ llvm::StructType *ProtocolListTy;
+ /// ProtocolListPtrTy - LLVM type for struct objc_property_list*.
+ llvm::Type *ProtocolListPtrTy;
+ /// CategoryTy - LLVM type for struct objc_category.
+ llvm::StructType *CategoryTy;
+ /// ClassTy - LLVM type for struct objc_class.
+ llvm::StructType *ClassTy;
+ /// ClassPtrTy - LLVM type for struct objc_class *.
+ llvm::Type *ClassPtrTy;
+ /// ClassExtensionTy - LLVM type for struct objc_class_ext.
+ llvm::StructType *ClassExtensionTy;
+ /// ClassExtensionPtrTy - LLVM type for struct objc_class_ext *.
+ llvm::Type *ClassExtensionPtrTy;
+ // IvarTy - LLVM type for struct objc_ivar.
+ llvm::StructType *IvarTy;
+ /// IvarListTy - LLVM type for struct objc_ivar_list.
+ llvm::Type *IvarListTy;
+ /// IvarListPtrTy - LLVM type for struct objc_ivar_list *.
+ llvm::Type *IvarListPtrTy;
+ /// MethodListTy - LLVM type for struct objc_method_list.
+ llvm::Type *MethodListTy;
+ /// MethodListPtrTy - LLVM type for struct objc_method_list *.
+ llvm::Type *MethodListPtrTy;
+
+ /// ExceptionDataTy - LLVM type for struct _objc_exception_data.
+ llvm::Type *ExceptionDataTy;
+
+ /// ExceptionTryEnterFn - LLVM objc_exception_try_enter function.
+ llvm::Constant *getExceptionTryEnterFn() {
+ llvm::Type *params[] = { ExceptionDataTy->getPointerTo() };
+ return CGM.CreateRuntimeFunction(
+ llvm::FunctionType::get(CGM.VoidTy, params, false),
+ "objc_exception_try_enter");
+ }
+
+ /// ExceptionTryExitFn - LLVM objc_exception_try_exit function.
+ llvm::Constant *getExceptionTryExitFn() {
+ llvm::Type *params[] = { ExceptionDataTy->getPointerTo() };
+ return CGM.CreateRuntimeFunction(
+ llvm::FunctionType::get(CGM.VoidTy, params, false),
+ "objc_exception_try_exit");
+ }
+
+ /// ExceptionExtractFn - LLVM objc_exception_extract function.
+ llvm::Constant *getExceptionExtractFn() {
+ llvm::Type *params[] = { ExceptionDataTy->getPointerTo() };
+ return CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy,
+ params, false),
+ "objc_exception_extract");
+ }
+
+ /// ExceptionMatchFn - LLVM objc_exception_match function.
+ llvm::Constant *getExceptionMatchFn() {
+ llvm::Type *params[] = { ClassPtrTy, ObjectPtrTy };
+ return CGM.CreateRuntimeFunction(
+ llvm::FunctionType::get(CGM.Int32Ty, params, false),
+ "objc_exception_match");
+
+ }
+
+ /// SetJmpFn - LLVM _setjmp function.
+ llvm::Constant *getSetJmpFn() {
+ // This is specifically the prototype for x86.
+ llvm::Type *params[] = { CGM.Int32Ty->getPointerTo() };
+ return CGM.CreateRuntimeFunction(llvm::FunctionType::get(CGM.Int32Ty,
+ params, false),
+ "_setjmp",
+ llvm::Attribute::ReturnsTwice);
+ }
+
+public:
+ ObjCTypesHelper(CodeGen::CodeGenModule &cgm);
+ ~ObjCTypesHelper() {}
+};
+
+/// ObjCNonFragileABITypesHelper - will have all types needed by objective-c's
+/// modern abi
+class ObjCNonFragileABITypesHelper : public ObjCCommonTypesHelper {
+public:
+
+ // MethodListnfABITy - LLVM for struct _method_list_t
+ llvm::StructType *MethodListnfABITy;
+
+ // MethodListnfABIPtrTy - LLVM for struct _method_list_t*
+ llvm::Type *MethodListnfABIPtrTy;
+
+ // ProtocolnfABITy = LLVM for struct _protocol_t
+ llvm::StructType *ProtocolnfABITy;
+
+ // ProtocolnfABIPtrTy = LLVM for struct _protocol_t*
+ llvm::Type *ProtocolnfABIPtrTy;
+
+ // ProtocolListnfABITy - LLVM for struct _objc_protocol_list
+ llvm::StructType *ProtocolListnfABITy;
+
+ // ProtocolListnfABIPtrTy - LLVM for struct _objc_protocol_list*
+ llvm::Type *ProtocolListnfABIPtrTy;
+
+ // ClassnfABITy - LLVM for struct _class_t
+ llvm::StructType *ClassnfABITy;
+
+ // ClassnfABIPtrTy - LLVM for struct _class_t*
+ llvm::Type *ClassnfABIPtrTy;
+
+ // IvarnfABITy - LLVM for struct _ivar_t
+ llvm::StructType *IvarnfABITy;
+
+ // IvarListnfABITy - LLVM for struct _ivar_list_t
+ llvm::StructType *IvarListnfABITy;
+
+ // IvarListnfABIPtrTy = LLVM for struct _ivar_list_t*
+ llvm::Type *IvarListnfABIPtrTy;
+
+ // ClassRonfABITy - LLVM for struct _class_ro_t
+ llvm::StructType *ClassRonfABITy;
+
+ // ImpnfABITy - LLVM for id (*)(id, SEL, ...)
+ llvm::Type *ImpnfABITy;
+
+ // CategorynfABITy - LLVM for struct _category_t
+ llvm::StructType *CategorynfABITy;
+
+ // New types for nonfragile abi messaging.
+
+ // MessageRefTy - LLVM for:
+ // struct _message_ref_t {
+ // IMP messenger;
+ // SEL name;
+ // };
+ llvm::StructType *MessageRefTy;
+ // MessageRefCTy - clang type for struct _message_ref_t
+ QualType MessageRefCTy;
+
+ // MessageRefPtrTy - LLVM for struct _message_ref_t*
+ llvm::Type *MessageRefPtrTy;
+ // MessageRefCPtrTy - clang type for struct _message_ref_t*
+ QualType MessageRefCPtrTy;
+
+ // MessengerTy - Type of the messenger (shown as IMP above)
+ llvm::FunctionType *MessengerTy;
+
+ // SuperMessageRefTy - LLVM for:
+ // struct _super_message_ref_t {
+ // SUPER_IMP messenger;
+ // SEL name;
+ // };
+ llvm::StructType *SuperMessageRefTy;
+
+ // SuperMessageRefPtrTy - LLVM for struct _super_message_ref_t*
+ llvm::Type *SuperMessageRefPtrTy;
+
+ llvm::Constant *getMessageSendFixupFn() {
+ // id objc_msgSend_fixup(id, struct message_ref_t*, ...)
+ llvm::Type *params[] = { ObjectPtrTy, MessageRefPtrTy };
+ return CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy,
+ params, true),
+ "objc_msgSend_fixup");
+ }
+
+ llvm::Constant *getMessageSendFpretFixupFn() {
+ // id objc_msgSend_fpret_fixup(id, struct message_ref_t*, ...)
+ llvm::Type *params[] = { ObjectPtrTy, MessageRefPtrTy };
+ return CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy,
+ params, true),
+ "objc_msgSend_fpret_fixup");
+ }
+
+ llvm::Constant *getMessageSendStretFixupFn() {
+ // id objc_msgSend_stret_fixup(id, struct message_ref_t*, ...)
+ llvm::Type *params[] = { ObjectPtrTy, MessageRefPtrTy };
+ return CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy,
+ params, true),
+ "objc_msgSend_stret_fixup");
+ }
+
+ llvm::Constant *getMessageSendSuper2FixupFn() {
+ // id objc_msgSendSuper2_fixup (struct objc_super *,
+ // struct _super_message_ref_t*, ...)
+ llvm::Type *params[] = { SuperPtrTy, SuperMessageRefPtrTy };
+ return CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy,
+ params, true),
+ "objc_msgSendSuper2_fixup");
+ }
+
+ llvm::Constant *getMessageSendSuper2StretFixupFn() {
+ // id objc_msgSendSuper2_stret_fixup(struct objc_super *,
+ // struct _super_message_ref_t*, ...)
+ llvm::Type *params[] = { SuperPtrTy, SuperMessageRefPtrTy };
+ return CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy,
+ params, true),
+ "objc_msgSendSuper2_stret_fixup");
+ }
+
+ llvm::Constant *getObjCEndCatchFn() {
+ return CGM.CreateRuntimeFunction(llvm::FunctionType::get(CGM.VoidTy, false),
+ "objc_end_catch");
+
+ }
+
+ llvm::Constant *getObjCBeginCatchFn() {
+ llvm::Type *params[] = { Int8PtrTy };
+ return CGM.CreateRuntimeFunction(llvm::FunctionType::get(Int8PtrTy,
+ params, false),
+ "objc_begin_catch");
+ }
+
+ llvm::StructType *EHTypeTy;
+ llvm::Type *EHTypePtrTy;
+
+ ObjCNonFragileABITypesHelper(CodeGen::CodeGenModule &cgm);
+ ~ObjCNonFragileABITypesHelper(){}
+};
+
+class CGObjCCommonMac : public CodeGen::CGObjCRuntime {
+public:
+ // FIXME - accessibility
+ class GC_IVAR {
+ public:
+ unsigned ivar_bytepos;
+ unsigned ivar_size;
+ GC_IVAR(unsigned bytepos = 0, unsigned size = 0)
+ : ivar_bytepos(bytepos), ivar_size(size) {}
+
+ // Allow sorting based on byte pos.
+ bool operator<(const GC_IVAR &b) const {
+ return ivar_bytepos < b.ivar_bytepos;
+ }
+ };
+
+ class SKIP_SCAN {
+ public:
+ unsigned skip;
+ unsigned scan;
+ SKIP_SCAN(unsigned _skip = 0, unsigned _scan = 0)
+ : skip(_skip), scan(_scan) {}
+ };
+
+protected:
+ llvm::LLVMContext &VMContext;
+ // FIXME! May not be needing this after all.
+ unsigned ObjCABI;
+
+ // gc ivar layout bitmap calculation helper caches.
+ SmallVector<GC_IVAR, 16> SkipIvars;
+ SmallVector<GC_IVAR, 16> IvarsInfo;
+
+ /// LazySymbols - Symbols to generate a lazy reference for. See
+ /// DefinedSymbols and FinishModule().
+ llvm::SetVector<IdentifierInfo*> LazySymbols;
+
+ /// DefinedSymbols - External symbols which are defined by this
+ /// module. The symbols in this list and LazySymbols are used to add
+ /// special linker symbols which ensure that Objective-C modules are
+ /// linked properly.
+ llvm::SetVector<IdentifierInfo*> DefinedSymbols;
+
+ /// ClassNames - uniqued class names.
+ llvm::DenseMap<IdentifierInfo*, llvm::GlobalVariable*> ClassNames;
+
+ /// MethodVarNames - uniqued method variable names.
+ llvm::DenseMap<Selector, llvm::GlobalVariable*> MethodVarNames;
+
+ /// DefinedCategoryNames - list of category names in form Class_Category.
+ llvm::SetVector<std::string> DefinedCategoryNames;
+
+ /// MethodVarTypes - uniqued method type signatures. We have to use
+ /// a StringMap here because have no other unique reference.
+ llvm::StringMap<llvm::GlobalVariable*> MethodVarTypes;
+
+ /// MethodDefinitions - map of methods which have been defined in
+ /// this translation unit.
+ llvm::DenseMap<const ObjCMethodDecl*, llvm::Function*> MethodDefinitions;
+
+ /// PropertyNames - uniqued method variable names.
+ llvm::DenseMap<IdentifierInfo*, llvm::GlobalVariable*> PropertyNames;
+
+ /// ClassReferences - uniqued class references.
+ llvm::DenseMap<IdentifierInfo*, llvm::GlobalVariable*> ClassReferences;
+
+ /// SelectorReferences - uniqued selector references.
+ llvm::DenseMap<Selector, llvm::GlobalVariable*> SelectorReferences;
+
+ /// Protocols - Protocols for which an objc_protocol structure has
+ /// been emitted. Forward declarations are handled by creating an
+ /// empty structure whose initializer is filled in when/if defined.
+ llvm::DenseMap<IdentifierInfo*, llvm::GlobalVariable*> Protocols;
+
+ /// DefinedProtocols - Protocols which have actually been
+ /// defined. We should not need this, see FIXME in GenerateProtocol.
+ llvm::DenseSet<IdentifierInfo*> DefinedProtocols;
+
+ /// DefinedClasses - List of defined classes.
+ llvm::SmallVector<llvm::GlobalValue*, 16> DefinedClasses;
+
+ /// DefinedNonLazyClasses - List of defined "non-lazy" classes.
+ llvm::SmallVector<llvm::GlobalValue*, 16> DefinedNonLazyClasses;
+
+ /// DefinedCategories - List of defined categories.
+ llvm::SmallVector<llvm::GlobalValue*, 16> DefinedCategories;
+
+ /// DefinedNonLazyCategories - List of defined "non-lazy" categories.
+ llvm::SmallVector<llvm::GlobalValue*, 16> DefinedNonLazyCategories;
+
+ /// GetNameForMethod - Return a name for the given method.
+ /// \param[out] NameOut - The return value.
+ void GetNameForMethod(const ObjCMethodDecl *OMD,
+ const ObjCContainerDecl *CD,
+ SmallVectorImpl<char> &NameOut);
+
+ /// GetMethodVarName - Return a unique constant for the given
+ /// selector's name. The return value has type char *.
+ llvm::Constant *GetMethodVarName(Selector Sel);
+ llvm::Constant *GetMethodVarName(IdentifierInfo *Ident);
+
+ /// GetMethodVarType - Return a unique constant for the given
+ /// method's type encoding string. The return value has type char *.
+
+ // FIXME: This is a horrible name.
+ llvm::Constant *GetMethodVarType(const ObjCMethodDecl *D,
+ bool Extended = false);
+ llvm::Constant *GetMethodVarType(const FieldDecl *D);
+
+ /// GetPropertyName - Return a unique constant for the given
+ /// name. The return value has type char *.
+ llvm::Constant *GetPropertyName(IdentifierInfo *Ident);
+
+ // FIXME: This can be dropped once string functions are unified.
+ llvm::Constant *GetPropertyTypeString(const ObjCPropertyDecl *PD,
+ const Decl *Container);
+
+ /// GetClassName - Return a unique constant for the given selector's
+ /// name. The return value has type char *.
+ llvm::Constant *GetClassName(IdentifierInfo *Ident);
+
+ llvm::Function *GetMethodDefinition(const ObjCMethodDecl *MD);
+
+ /// BuildIvarLayout - Builds ivar layout bitmap for the class
+ /// implementation for the __strong or __weak case.
+ ///
+ llvm::Constant *BuildIvarLayout(const ObjCImplementationDecl *OI,
+ bool ForStrongLayout);
+
+ llvm::Constant *BuildIvarLayoutBitmap(std::string &BitMap);
+
+ void BuildAggrIvarRecordLayout(const RecordType *RT,
+ unsigned int BytePos, bool ForStrongLayout,
+ bool &HasUnion);
+ void BuildAggrIvarLayout(const ObjCImplementationDecl *OI,
+ const llvm::StructLayout *Layout,
+ const RecordDecl *RD,
+ ArrayRef<const FieldDecl*> RecFields,
+ unsigned int BytePos, bool ForStrongLayout,
+ bool &HasUnion);
+
+ /// GetIvarLayoutName - Returns a unique constant for the given
+ /// ivar layout bitmap.
+ llvm::Constant *GetIvarLayoutName(IdentifierInfo *Ident,
+ const ObjCCommonTypesHelper &ObjCTypes);
+
+ /// EmitPropertyList - Emit the given property list. The return
+ /// value has type PropertyListPtrTy.
+ llvm::Constant *EmitPropertyList(Twine Name,
+ const Decl *Container,
+ const ObjCContainerDecl *OCD,
+ const ObjCCommonTypesHelper &ObjCTypes);
+
+ /// EmitProtocolMethodTypes - Generate the array of extended method type
+ /// strings. The return value has type Int8PtrPtrTy.
+ llvm::Constant *EmitProtocolMethodTypes(Twine Name,
+ ArrayRef<llvm::Constant*> MethodTypes,
+ const ObjCCommonTypesHelper &ObjCTypes);
+
+ /// PushProtocolProperties - Push protocol's property on the input stack.
+ void PushProtocolProperties(
+ llvm::SmallPtrSet<const IdentifierInfo*, 16> &PropertySet,
+ llvm::SmallVectorImpl<llvm::Constant*> &Properties,
+ const Decl *Container,
+ const ObjCProtocolDecl *PROTO,
+ const ObjCCommonTypesHelper &ObjCTypes);
+
+ /// GetProtocolRef - Return a reference to the internal protocol
+ /// description, creating an empty one if it has not been
+ /// defined. The return value has type ProtocolPtrTy.
+ llvm::Constant *GetProtocolRef(const ObjCProtocolDecl *PD);
+
+ /// CreateMetadataVar - Create a global variable with internal
+ /// linkage for use by the Objective-C runtime.
+ ///
+ /// This is a convenience wrapper which not only creates the
+ /// variable, but also sets the section and alignment and adds the
+ /// global to the "llvm.used" list.
+ ///
+ /// \param Name - The variable name.
+ /// \param Init - The variable initializer; this is also used to
+ /// define the type of the variable.
+ /// \param Section - The section the variable should go into, or 0.
+ /// \param Align - The alignment for the variable, or 0.
+ /// \param AddToUsed - Whether the variable should be added to
+ /// "llvm.used".
+ llvm::GlobalVariable *CreateMetadataVar(Twine Name,
+ llvm::Constant *Init,
+ const char *Section,
+ unsigned Align,
+ bool AddToUsed);
+
+ CodeGen::RValue EmitMessageSend(CodeGen::CodeGenFunction &CGF,
+ ReturnValueSlot Return,
+ QualType ResultType,
+ llvm::Value *Sel,
+ llvm::Value *Arg0,
+ QualType Arg0Ty,
+ bool IsSuper,
+ const CallArgList &CallArgs,
+ const ObjCMethodDecl *OMD,
+ const ObjCCommonTypesHelper &ObjCTypes);
+
+ /// EmitImageInfo - Emit the image info marker used to encode some module
+ /// level information.
+ void EmitImageInfo();
+
+public:
+ CGObjCCommonMac(CodeGen::CodeGenModule &cgm) :
+ CGObjCRuntime(cgm), VMContext(cgm.getLLVMContext()) { }
+
+ virtual llvm::Constant *GenerateConstantString(const StringLiteral *SL);
+
+ virtual llvm::Function *GenerateMethod(const ObjCMethodDecl *OMD,
+ const ObjCContainerDecl *CD=0);
+
+ virtual void GenerateProtocol(const ObjCProtocolDecl *PD);
+
+ /// GetOrEmitProtocol - Get the protocol object for the given
+ /// declaration, emitting it if necessary. The return value has type
+ /// ProtocolPtrTy.
+ virtual llvm::Constant *GetOrEmitProtocol(const ObjCProtocolDecl *PD)=0;
+
+ /// GetOrEmitProtocolRef - Get a forward reference to the protocol
+ /// object for the given declaration, emitting it if needed. These
+ /// forward references will be filled in with empty bodies if no
+ /// definition is seen. The return value has type ProtocolPtrTy.
+ virtual llvm::Constant *GetOrEmitProtocolRef(const ObjCProtocolDecl *PD)=0;
+ virtual llvm::Constant *BuildGCBlockLayout(CodeGen::CodeGenModule &CGM,
+ const CGBlockInfo &blockInfo);
+
+};
+
+class CGObjCMac : public CGObjCCommonMac {
+private:
+ ObjCTypesHelper ObjCTypes;
+
+ /// EmitModuleInfo - Another marker encoding module level
+ /// information.
+ void EmitModuleInfo();
+
+ /// EmitModuleSymols - Emit module symbols, the list of defined
+ /// classes and categories. The result has type SymtabPtrTy.
+ llvm::Constant *EmitModuleSymbols();
+
+ /// FinishModule - Write out global data structures at the end of
+ /// processing a translation unit.
+ void FinishModule();
+
+ /// EmitClassExtension - Generate the class extension structure used
+ /// to store the weak ivar layout and properties. The return value
+ /// has type ClassExtensionPtrTy.
+ llvm::Constant *EmitClassExtension(const ObjCImplementationDecl *ID);
+
+ /// EmitClassRef - Return a Value*, of type ObjCTypes.ClassPtrTy,
+ /// for the given class.
+ llvm::Value *EmitClassRef(CGBuilderTy &Builder,
+ const ObjCInterfaceDecl *ID);
+
+ llvm::Value *EmitClassRefFromId(CGBuilderTy &Builder,
+ IdentifierInfo *II);
+
+ llvm::Value *EmitNSAutoreleasePoolClassRef(CGBuilderTy &Builder);
+
+ /// EmitSuperClassRef - Emits reference to class's main metadata class.
+ llvm::Value *EmitSuperClassRef(const ObjCInterfaceDecl *ID);
+
+ /// EmitIvarList - Emit the ivar list for the given
+ /// implementation. If ForClass is true the list of class ivars
+ /// (i.e. metaclass ivars) is emitted, otherwise the list of
+ /// interface ivars will be emitted. The return value has type
+ /// IvarListPtrTy.
+ llvm::Constant *EmitIvarList(const ObjCImplementationDecl *ID,
+ bool ForClass);
+
+ /// EmitMetaClass - Emit a forward reference to the class structure
+ /// for the metaclass of the given interface. The return value has
+ /// type ClassPtrTy.
+ llvm::Constant *EmitMetaClassRef(const ObjCInterfaceDecl *ID);
+
+ /// EmitMetaClass - Emit a class structure for the metaclass of the
+ /// given implementation. The return value has type ClassPtrTy.
+ llvm::Constant *EmitMetaClass(const ObjCImplementationDecl *ID,
+ llvm::Constant *Protocols,
+ ArrayRef<llvm::Constant*> Methods);
+
+ llvm::Constant *GetMethodConstant(const ObjCMethodDecl *MD);
+
+ llvm::Constant *GetMethodDescriptionConstant(const ObjCMethodDecl *MD);
+
+ /// EmitMethodList - Emit the method list for the given
+ /// implementation. The return value has type MethodListPtrTy.
+ llvm::Constant *EmitMethodList(Twine Name,
+ const char *Section,
+ ArrayRef<llvm::Constant*> Methods);
+
+ /// EmitMethodDescList - Emit a method description list for a list of
+ /// method declarations.
+ /// - TypeName: The name for the type containing the methods.
+ /// - IsProtocol: True iff these methods are for a protocol.
+ /// - ClassMethds: True iff these are class methods.
+ /// - Required: When true, only "required" methods are
+ /// listed. Similarly, when false only "optional" methods are
+ /// listed. For classes this should always be true.
+ /// - begin, end: The method list to output.
+ ///
+ /// The return value has type MethodDescriptionListPtrTy.
+ llvm::Constant *EmitMethodDescList(Twine Name,
+ const char *Section,
+ ArrayRef<llvm::Constant*> Methods);
+
+ /// GetOrEmitProtocol - Get the protocol object for the given
+ /// declaration, emitting it if necessary. The return value has type
+ /// ProtocolPtrTy.
+ virtual llvm::Constant *GetOrEmitProtocol(const ObjCProtocolDecl *PD);
+
+ /// GetOrEmitProtocolRef - Get a forward reference to the protocol
+ /// object for the given declaration, emitting it if needed. These
+ /// forward references will be filled in with empty bodies if no
+ /// definition is seen. The return value has type ProtocolPtrTy.
+ virtual llvm::Constant *GetOrEmitProtocolRef(const ObjCProtocolDecl *PD);
+
+ /// EmitProtocolExtension - Generate the protocol extension
+ /// structure used to store optional instance and class methods, and
+ /// protocol properties. The return value has type
+ /// ProtocolExtensionPtrTy.
+ llvm::Constant *
+ EmitProtocolExtension(const ObjCProtocolDecl *PD,
+ ArrayRef<llvm::Constant*> OptInstanceMethods,
+ ArrayRef<llvm::Constant*> OptClassMethods,
+ ArrayRef<llvm::Constant*> MethodTypesExt);
+
+ /// EmitProtocolList - Generate the list of referenced
+ /// protocols. The return value has type ProtocolListPtrTy.
+ llvm::Constant *EmitProtocolList(Twine Name,
+ ObjCProtocolDecl::protocol_iterator begin,
+ ObjCProtocolDecl::protocol_iterator end);
+
+ /// EmitSelector - Return a Value*, of type ObjCTypes.SelectorPtrTy,
+ /// for the given selector.
+ llvm::Value *EmitSelector(CGBuilderTy &Builder, Selector Sel,
+ bool lval=false);
+
+public:
+ CGObjCMac(CodeGen::CodeGenModule &cgm);
+
+ virtual llvm::Function *ModuleInitFunction();
+
+ virtual CodeGen::RValue GenerateMessageSend(CodeGen::CodeGenFunction &CGF,
+ ReturnValueSlot Return,
+ QualType ResultType,
+ Selector Sel,
+ llvm::Value *Receiver,
+ const CallArgList &CallArgs,
+ const ObjCInterfaceDecl *Class,
+ const ObjCMethodDecl *Method);
+
+ virtual CodeGen::RValue
+ GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF,
+ ReturnValueSlot Return,
+ QualType ResultType,
+ Selector Sel,
+ const ObjCInterfaceDecl *Class,
+ bool isCategoryImpl,
+ llvm::Value *Receiver,
+ bool IsClassMessage,
+ const CallArgList &CallArgs,
+ const ObjCMethodDecl *Method);
+
+ virtual llvm::Value *GetClass(CGBuilderTy &Builder,
+ const ObjCInterfaceDecl *ID);
+
+ virtual llvm::Value *GetSelector(CGBuilderTy &Builder, Selector Sel,
+ bool lval = false);
+
+ /// The NeXT/Apple runtimes do not support typed selectors; just emit an
+ /// untyped one.
+ virtual llvm::Value *GetSelector(CGBuilderTy &Builder,
+ const ObjCMethodDecl *Method);
+
+ virtual llvm::Constant *GetEHType(QualType T);
+
+ virtual void GenerateCategory(const ObjCCategoryImplDecl *CMD);
+
+ virtual void GenerateClass(const ObjCImplementationDecl *ClassDecl);
+
+ virtual void RegisterAlias(const ObjCCompatibleAliasDecl *OAD) {}
+
+ virtual llvm::Value *GenerateProtocolRef(CGBuilderTy &Builder,
+ const ObjCProtocolDecl *PD);
+
+ virtual llvm::Constant *GetPropertyGetFunction();
+ virtual llvm::Constant *GetPropertySetFunction();
+ virtual llvm::Constant *GetOptimizedPropertySetFunction(bool atomic,
+ bool copy);
+ virtual llvm::Constant *GetGetStructFunction();
+ virtual llvm::Constant *GetSetStructFunction();
+ virtual llvm::Constant *GetCppAtomicObjectFunction();
+ virtual llvm::Constant *EnumerationMutationFunction();
+
+ virtual void EmitTryStmt(CodeGen::CodeGenFunction &CGF,
+ const ObjCAtTryStmt &S);
+ virtual void EmitSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
+ const ObjCAtSynchronizedStmt &S);
+ void EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF, const Stmt &S);
+ virtual void EmitThrowStmt(CodeGen::CodeGenFunction &CGF,
+ const ObjCAtThrowStmt &S);
+ virtual llvm::Value * EmitObjCWeakRead(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *AddrWeakObj);
+ virtual void EmitObjCWeakAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dst);
+ virtual void EmitObjCGlobalAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dest,
+ bool threadlocal = false);
+ virtual void EmitObjCIvarAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dest,
+ llvm::Value *ivarOffset);
+ virtual void EmitObjCStrongCastAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dest);
+ virtual void EmitGCMemmoveCollectable(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *dest, llvm::Value *src,
+ llvm::Value *size);
+
+ virtual LValue EmitObjCValueForIvar(CodeGen::CodeGenFunction &CGF,
+ QualType ObjectTy,
+ llvm::Value *BaseValue,
+ const ObjCIvarDecl *Ivar,
+ unsigned CVRQualifiers);
+ virtual llvm::Value *EmitIvarOffset(CodeGen::CodeGenFunction &CGF,
+ const ObjCInterfaceDecl *Interface,
+ const ObjCIvarDecl *Ivar);
+
+ /// GetClassGlobal - Return the global variable for the Objective-C
+ /// class of the given name.
+ virtual llvm::GlobalVariable *GetClassGlobal(const std::string &Name) {
+ llvm_unreachable("CGObjCMac::GetClassGlobal");
+ }
+};
+
+class CGObjCNonFragileABIMac : public CGObjCCommonMac {
+private:
+ ObjCNonFragileABITypesHelper ObjCTypes;
+ llvm::GlobalVariable* ObjCEmptyCacheVar;
+ llvm::GlobalVariable* ObjCEmptyVtableVar;
+
+ /// SuperClassReferences - uniqued super class references.
+ llvm::DenseMap<IdentifierInfo*, llvm::GlobalVariable*> SuperClassReferences;
+
+ /// MetaClassReferences - uniqued meta class references.
+ llvm::DenseMap<IdentifierInfo*, llvm::GlobalVariable*> MetaClassReferences;
+
+ /// EHTypeReferences - uniqued class ehtype references.
+ llvm::DenseMap<IdentifierInfo*, llvm::GlobalVariable*> EHTypeReferences;
+
+ /// VTableDispatchMethods - List of methods for which we generate
+ /// vtable-based message dispatch.
+ llvm::DenseSet<Selector> VTableDispatchMethods;
+
+ /// DefinedMetaClasses - List of defined meta-classes.
+ std::vector<llvm::GlobalValue*> DefinedMetaClasses;
+
+ /// isVTableDispatchedSelector - Returns true if SEL is a
+ /// vtable-based selector.
+ bool isVTableDispatchedSelector(Selector Sel);
+
+ /// FinishNonFragileABIModule - Write out global data structures at the end of
+ /// processing a translation unit.
+ void FinishNonFragileABIModule();
+
+ /// AddModuleClassList - Add the given list of class pointers to the
+ /// module with the provided symbol and section names.
+ void AddModuleClassList(ArrayRef<llvm::GlobalValue*> Container,
+ const char *SymbolName,
+ const char *SectionName);
+
+ llvm::GlobalVariable * BuildClassRoTInitializer(unsigned flags,
+ unsigned InstanceStart,
+ unsigned InstanceSize,
+ const ObjCImplementationDecl *ID);
+ llvm::GlobalVariable * BuildClassMetaData(std::string &ClassName,
+ llvm::Constant *IsAGV,
+ llvm::Constant *SuperClassGV,
+ llvm::Constant *ClassRoGV,
+ bool HiddenVisibility);
+
+ llvm::Constant *GetMethodConstant(const ObjCMethodDecl *MD);
+
+ llvm::Constant *GetMethodDescriptionConstant(const ObjCMethodDecl *MD);
+
+ /// EmitMethodList - Emit the method list for the given
+ /// implementation. The return value has type MethodListnfABITy.
+ llvm::Constant *EmitMethodList(Twine Name,
+ const char *Section,
+ ArrayRef<llvm::Constant*> Methods);
+ /// EmitIvarList - Emit the ivar list for the given
+ /// implementation. If ForClass is true the list of class ivars
+ /// (i.e. metaclass ivars) is emitted, otherwise the list of
+ /// interface ivars will be emitted. The return value has type
+ /// IvarListnfABIPtrTy.
+ llvm::Constant *EmitIvarList(const ObjCImplementationDecl *ID);
+
+ llvm::Constant *EmitIvarOffsetVar(const ObjCInterfaceDecl *ID,
+ const ObjCIvarDecl *Ivar,
+ unsigned long int offset);
+
+ /// GetOrEmitProtocol - Get the protocol object for the given
+ /// declaration, emitting it if necessary. The return value has type
+ /// ProtocolPtrTy.
+ virtual llvm::Constant *GetOrEmitProtocol(const ObjCProtocolDecl *PD);
+
+ /// GetOrEmitProtocolRef - Get a forward reference to the protocol
+ /// object for the given declaration, emitting it if needed. These
+ /// forward references will be filled in with empty bodies if no
+ /// definition is seen. The return value has type ProtocolPtrTy.
+ virtual llvm::Constant *GetOrEmitProtocolRef(const ObjCProtocolDecl *PD);
+
+ /// EmitProtocolList - Generate the list of referenced
+ /// protocols. The return value has type ProtocolListPtrTy.
+ llvm::Constant *EmitProtocolList(Twine Name,
+ ObjCProtocolDecl::protocol_iterator begin,
+ ObjCProtocolDecl::protocol_iterator end);
+
+ CodeGen::RValue EmitVTableMessageSend(CodeGen::CodeGenFunction &CGF,
+ ReturnValueSlot Return,
+ QualType ResultType,
+ Selector Sel,
+ llvm::Value *Receiver,
+ QualType Arg0Ty,
+ bool IsSuper,
+ const CallArgList &CallArgs,
+ const ObjCMethodDecl *Method);
+
+ /// GetClassGlobal - Return the global variable for the Objective-C
+ /// class of the given name.
+ llvm::GlobalVariable *GetClassGlobal(const std::string &Name);
+
+ /// EmitClassRef - Return a Value*, of type ObjCTypes.ClassPtrTy,
+ /// for the given class reference.
+ llvm::Value *EmitClassRef(CGBuilderTy &Builder,
+ const ObjCInterfaceDecl *ID);
+
+ llvm::Value *EmitClassRefFromId(CGBuilderTy &Builder,
+ IdentifierInfo *II);
+
+ llvm::Value *EmitNSAutoreleasePoolClassRef(CGBuilderTy &Builder);
+
+ /// EmitSuperClassRef - Return a Value*, of type ObjCTypes.ClassPtrTy,
+ /// for the given super class reference.
+ llvm::Value *EmitSuperClassRef(CGBuilderTy &Builder,
+ const ObjCInterfaceDecl *ID);
+
+ /// EmitMetaClassRef - Return a Value * of the address of _class_t
+ /// meta-data
+ llvm::Value *EmitMetaClassRef(CGBuilderTy &Builder,
+ const ObjCInterfaceDecl *ID);
+
+ /// ObjCIvarOffsetVariable - Returns the ivar offset variable for
+ /// the given ivar.
+ ///
+ llvm::GlobalVariable * ObjCIvarOffsetVariable(
+ const ObjCInterfaceDecl *ID,
+ const ObjCIvarDecl *Ivar);
+
+ /// EmitSelector - Return a Value*, of type ObjCTypes.SelectorPtrTy,
+ /// for the given selector.
+ llvm::Value *EmitSelector(CGBuilderTy &Builder, Selector Sel,
+ bool lval=false);
+
+ /// GetInterfaceEHType - Get the cached ehtype for the given Objective-C
+ /// interface. The return value has type EHTypePtrTy.
+ llvm::Constant *GetInterfaceEHType(const ObjCInterfaceDecl *ID,
+ bool ForDefinition);
+
+ const char *getMetaclassSymbolPrefix() const {
+ return "OBJC_METACLASS_$_";
+ }
+
+ const char *getClassSymbolPrefix() const {
+ return "OBJC_CLASS_$_";
+ }
+
+ void GetClassSizeInfo(const ObjCImplementationDecl *OID,
+ uint32_t &InstanceStart,
+ uint32_t &InstanceSize);
+
+ // Shamelessly stolen from Analysis/CFRefCount.cpp
+ Selector GetNullarySelector(const char* name) const {
+ IdentifierInfo* II = &CGM.getContext().Idents.get(name);
+ return CGM.getContext().Selectors.getSelector(0, &II);
+ }
+
+ Selector GetUnarySelector(const char* name) const {
+ IdentifierInfo* II = &CGM.getContext().Idents.get(name);
+ return CGM.getContext().Selectors.getSelector(1, &II);
+ }
+
+ /// ImplementationIsNonLazy - Check whether the given category or
+ /// class implementation is "non-lazy".
+ bool ImplementationIsNonLazy(const ObjCImplDecl *OD) const;
+
+public:
+ CGObjCNonFragileABIMac(CodeGen::CodeGenModule &cgm);
+ // FIXME. All stubs for now!
+ virtual llvm::Function *ModuleInitFunction();
+
+ virtual CodeGen::RValue GenerateMessageSend(CodeGen::CodeGenFunction &CGF,
+ ReturnValueSlot Return,
+ QualType ResultType,
+ Selector Sel,
+ llvm::Value *Receiver,
+ const CallArgList &CallArgs,
+ const ObjCInterfaceDecl *Class,
+ const ObjCMethodDecl *Method);
+
+ virtual CodeGen::RValue
+ GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF,
+ ReturnValueSlot Return,
+ QualType ResultType,
+ Selector Sel,
+ const ObjCInterfaceDecl *Class,
+ bool isCategoryImpl,
+ llvm::Value *Receiver,
+ bool IsClassMessage,
+ const CallArgList &CallArgs,
+ const ObjCMethodDecl *Method);
+
+ virtual llvm::Value *GetClass(CGBuilderTy &Builder,
+ const ObjCInterfaceDecl *ID);
+
+ virtual llvm::Value *GetSelector(CGBuilderTy &Builder, Selector Sel,
+ bool lvalue = false)
+ { return EmitSelector(Builder, Sel, lvalue); }
+
+ /// The NeXT/Apple runtimes do not support typed selectors; just emit an
+ /// untyped one.
+ virtual llvm::Value *GetSelector(CGBuilderTy &Builder,
+ const ObjCMethodDecl *Method)
+ { return EmitSelector(Builder, Method->getSelector()); }
+
+ virtual void GenerateCategory(const ObjCCategoryImplDecl *CMD);
+
+ virtual void GenerateClass(const ObjCImplementationDecl *ClassDecl);
+
+ virtual void RegisterAlias(const ObjCCompatibleAliasDecl *OAD) {}
+
+ virtual llvm::Value *GenerateProtocolRef(CGBuilderTy &Builder,
+ const ObjCProtocolDecl *PD);
+
+ virtual llvm::Constant *GetEHType(QualType T);
+
+ virtual llvm::Constant *GetPropertyGetFunction() {
+ return ObjCTypes.getGetPropertyFn();
+ }
+ virtual llvm::Constant *GetPropertySetFunction() {
+ return ObjCTypes.getSetPropertyFn();
+ }
+
+ virtual llvm::Constant *GetOptimizedPropertySetFunction(bool atomic,
+ bool copy) {
+ return ObjCTypes.getOptimizedSetPropertyFn(atomic, copy);
+ }
+
+ virtual llvm::Constant *GetSetStructFunction() {
+ return ObjCTypes.getCopyStructFn();
+ }
+ virtual llvm::Constant *GetGetStructFunction() {
+ return ObjCTypes.getCopyStructFn();
+ }
+ virtual llvm::Constant *GetCppAtomicObjectFunction() {
+ return ObjCTypes.getCppAtomicObjectFunction();
+ }
+
+ virtual llvm::Constant *EnumerationMutationFunction() {
+ return ObjCTypes.getEnumerationMutationFn();
+ }
+
+ virtual void EmitTryStmt(CodeGen::CodeGenFunction &CGF,
+ const ObjCAtTryStmt &S);
+ virtual void EmitSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
+ const ObjCAtSynchronizedStmt &S);
+ virtual void EmitThrowStmt(CodeGen::CodeGenFunction &CGF,
+ const ObjCAtThrowStmt &S);
+ virtual llvm::Value * EmitObjCWeakRead(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *AddrWeakObj);
+ virtual void EmitObjCWeakAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dst);
+ virtual void EmitObjCGlobalAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dest,
+ bool threadlocal = false);
+ virtual void EmitObjCIvarAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dest,
+ llvm::Value *ivarOffset);
+ virtual void EmitObjCStrongCastAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dest);
+ virtual void EmitGCMemmoveCollectable(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *dest, llvm::Value *src,
+ llvm::Value *size);
+ virtual LValue EmitObjCValueForIvar(CodeGen::CodeGenFunction &CGF,
+ QualType ObjectTy,
+ llvm::Value *BaseValue,
+ const ObjCIvarDecl *Ivar,
+ unsigned CVRQualifiers);
+ virtual llvm::Value *EmitIvarOffset(CodeGen::CodeGenFunction &CGF,
+ const ObjCInterfaceDecl *Interface,
+ const ObjCIvarDecl *Ivar);
+};
+
+/// A helper class for performing the null-initialization of a return
+/// value.
+struct NullReturnState {
+ llvm::BasicBlock *NullBB;
+ llvm::BasicBlock *callBB;
+ NullReturnState() : NullBB(0), callBB(0) {}
+
+ void init(CodeGenFunction &CGF, llvm::Value *receiver) {
+ // Make blocks for the null-init and call edges.
+ NullBB = CGF.createBasicBlock("msgSend.nullinit");
+ callBB = CGF.createBasicBlock("msgSend.call");
+
+ // Check for a null receiver and, if there is one, jump to the
+ // null-init test.
+ llvm::Value *isNull = CGF.Builder.CreateIsNull(receiver);
+ CGF.Builder.CreateCondBr(isNull, NullBB, callBB);
+
+ // Otherwise, start performing the call.
+ CGF.EmitBlock(callBB);
+ }
+
+ RValue complete(CodeGenFunction &CGF, RValue result, QualType resultType,
+ const CallArgList &CallArgs,
+ const ObjCMethodDecl *Method) {
+ if (!NullBB) return result;
+
+ llvm::Value *NullInitPtr = 0;
+ if (result.isScalar() && !resultType->isVoidType()) {
+ NullInitPtr = CGF.CreateTempAlloca(result.getScalarVal()->getType());
+ CGF.Builder.CreateStore(result.getScalarVal(), NullInitPtr);
+ }
+
+ // Finish the call path.
+ llvm::BasicBlock *contBB = CGF.createBasicBlock("msgSend.cont");
+ if (CGF.HaveInsertPoint()) CGF.Builder.CreateBr(contBB);
+
+ // Emit the null-init block and perform the null-initialization there.
+ CGF.EmitBlock(NullBB);
+
+ // Release consumed arguments along the null-receiver path.
+ if (Method) {
+ CallArgList::const_iterator I = CallArgs.begin();
+ for (ObjCMethodDecl::param_const_iterator i = Method->param_begin(),
+ e = Method->param_end(); i != e; ++i, ++I) {
+ const ParmVarDecl *ParamDecl = (*i);
+ if (ParamDecl->hasAttr<NSConsumedAttr>()) {
+ RValue RV = I->RV;
+ assert(RV.isScalar() &&
+ "NullReturnState::complete - arg not on object");
+ CGF.EmitARCRelease(RV.getScalarVal(), true);
+ }
+ }
+ }
+
+ if (result.isScalar()) {
+ if (NullInitPtr)
+ CGF.EmitNullInitialization(NullInitPtr, resultType);
+ // Jump to the continuation block.
+ CGF.EmitBlock(contBB);
+ return NullInitPtr ? RValue::get(CGF.Builder.CreateLoad(NullInitPtr))
+ : result;
+ }
+
+ if (!resultType->isAnyComplexType()) {
+ assert(result.isAggregate() && "null init of non-aggregate result?");
+ CGF.EmitNullInitialization(result.getAggregateAddr(), resultType);
+ // Jump to the continuation block.
+ CGF.EmitBlock(contBB);
+ return result;
+ }
+
+ // _Complex type
+ // FIXME. Now easy to handle any other scalar type whose result is returned
+ // in memory due to ABI limitations.
+ CGF.EmitBlock(contBB);
+ CodeGenFunction::ComplexPairTy CallCV = result.getComplexVal();
+ llvm::Type *MemberType = CallCV.first->getType();
+ llvm::Constant *ZeroCV = llvm::Constant::getNullValue(MemberType);
+ // Create phi instruction for scalar complex value.
+ llvm::PHINode *PHIReal = CGF.Builder.CreatePHI(MemberType, 2);
+ PHIReal->addIncoming(ZeroCV, NullBB);
+ PHIReal->addIncoming(CallCV.first, callBB);
+ llvm::PHINode *PHIImag = CGF.Builder.CreatePHI(MemberType, 2);
+ PHIImag->addIncoming(ZeroCV, NullBB);
+ PHIImag->addIncoming(CallCV.second, callBB);
+ return RValue::getComplex(PHIReal, PHIImag);
+ }
+};
+
+} // end anonymous namespace
+
+/* *** Helper Functions *** */
+
+/// getConstantGEP() - Help routine to construct simple GEPs.
+static llvm::Constant *getConstantGEP(llvm::LLVMContext &VMContext,
+ llvm::Constant *C,
+ unsigned idx0,
+ unsigned idx1) {
+ llvm::Value *Idxs[] = {
+ llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), idx0),
+ llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), idx1)
+ };
+ return llvm::ConstantExpr::getGetElementPtr(C, Idxs);
+}
+
+/// hasObjCExceptionAttribute - Return true if this class or any super
+/// class has the __objc_exception__ attribute.
+static bool hasObjCExceptionAttribute(ASTContext &Context,
+ const ObjCInterfaceDecl *OID) {
+ if (OID->hasAttr<ObjCExceptionAttr>())
+ return true;
+ if (const ObjCInterfaceDecl *Super = OID->getSuperClass())
+ return hasObjCExceptionAttribute(Context, Super);
+ return false;
+}
+
+/* *** CGObjCMac Public Interface *** */
+
+CGObjCMac::CGObjCMac(CodeGen::CodeGenModule &cgm) : CGObjCCommonMac(cgm),
+ ObjCTypes(cgm) {
+ ObjCABI = 1;
+ EmitImageInfo();
+}
+
+/// GetClass - Return a reference to the class for the given interface
+/// decl.
+llvm::Value *CGObjCMac::GetClass(CGBuilderTy &Builder,
+ const ObjCInterfaceDecl *ID) {
+ return EmitClassRef(Builder, ID);
+}
+
+/// GetSelector - Return the pointer to the unique'd string for this selector.
+llvm::Value *CGObjCMac::GetSelector(CGBuilderTy &Builder, Selector Sel,
+ bool lval) {
+ return EmitSelector(Builder, Sel, lval);
+}
+llvm::Value *CGObjCMac::GetSelector(CGBuilderTy &Builder, const ObjCMethodDecl
+ *Method) {
+ return EmitSelector(Builder, Method->getSelector());
+}
+
+llvm::Constant *CGObjCMac::GetEHType(QualType T) {
+ if (T->isObjCIdType() ||
+ T->isObjCQualifiedIdType()) {
+ return CGM.GetAddrOfRTTIDescriptor(
+ CGM.getContext().getObjCIdRedefinitionType(), /*ForEH=*/true);
+ }
+ if (T->isObjCClassType() ||
+ T->isObjCQualifiedClassType()) {
+ return CGM.GetAddrOfRTTIDescriptor(
+ CGM.getContext().getObjCClassRedefinitionType(), /*ForEH=*/true);
+ }
+ if (T->isObjCObjectPointerType())
+ return CGM.GetAddrOfRTTIDescriptor(T, /*ForEH=*/true);
+
+ llvm_unreachable("asking for catch type for ObjC type in fragile runtime");
+}
+
+/// Generate a constant CFString object.
+/*
+ struct __builtin_CFString {
+ const int *isa; // point to __CFConstantStringClassReference
+ int flags;
+ const char *str;
+ long length;
+ };
+*/
+
+/// or Generate a constant NSString object.
+/*
+ struct __builtin_NSString {
+ const int *isa; // point to __NSConstantStringClassReference
+ const char *str;
+ unsigned int length;
+ };
+*/
+
+llvm::Constant *CGObjCCommonMac::GenerateConstantString(
+ const StringLiteral *SL) {
+ return (CGM.getLangOpts().NoConstantCFStrings == 0 ?
+ CGM.GetAddrOfConstantCFString(SL) :
+ CGM.GetAddrOfConstantString(SL));
+}
+
+enum {
+ kCFTaggedObjectID_Integer = (1 << 1) + 1
+};
+
+/// Generates a message send where the super is the receiver. This is
+/// a message send to self with special delivery semantics indicating
+/// which class's method should be called.
+CodeGen::RValue
+CGObjCMac::GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF,
+ ReturnValueSlot Return,
+ QualType ResultType,
+ Selector Sel,
+ const ObjCInterfaceDecl *Class,
+ bool isCategoryImpl,
+ llvm::Value *Receiver,
+ bool IsClassMessage,
+ const CodeGen::CallArgList &CallArgs,
+ const ObjCMethodDecl *Method) {
+ // Create and init a super structure; this is a (receiver, class)
+ // pair we will pass to objc_msgSendSuper.
+ llvm::Value *ObjCSuper =
+ CGF.CreateTempAlloca(ObjCTypes.SuperTy, "objc_super");
+ llvm::Value *ReceiverAsObject =
+ CGF.Builder.CreateBitCast(Receiver, ObjCTypes.ObjectPtrTy);
+ CGF.Builder.CreateStore(ReceiverAsObject,
+ CGF.Builder.CreateStructGEP(ObjCSuper, 0));
+
+ // If this is a class message the metaclass is passed as the target.
+ llvm::Value *Target;
+ if (IsClassMessage) {
+ if (isCategoryImpl) {
+ // Message sent to 'super' in a class method defined in a category
+ // implementation requires an odd treatment.
+ // If we are in a class method, we must retrieve the
+ // _metaclass_ for the current class, pointed at by
+ // the class's "isa" pointer. The following assumes that
+ // isa" is the first ivar in a class (which it must be).
+ Target = EmitClassRef(CGF.Builder, Class->getSuperClass());
+ Target = CGF.Builder.CreateStructGEP(Target, 0);
+ Target = CGF.Builder.CreateLoad(Target);
+ } else {
+ llvm::Value *MetaClassPtr = EmitMetaClassRef(Class);
+ llvm::Value *SuperPtr = CGF.Builder.CreateStructGEP(MetaClassPtr, 1);
+ llvm::Value *Super = CGF.Builder.CreateLoad(SuperPtr);
+ Target = Super;
+ }
+ }
+ else if (isCategoryImpl)
+ Target = EmitClassRef(CGF.Builder, Class->getSuperClass());
+ else {
+ llvm::Value *ClassPtr = EmitSuperClassRef(Class);
+ ClassPtr = CGF.Builder.CreateStructGEP(ClassPtr, 1);
+ Target = CGF.Builder.CreateLoad(ClassPtr);
+ }
+ // FIXME: We shouldn't need to do this cast, rectify the ASTContext and
+ // ObjCTypes types.
+ llvm::Type *ClassTy =
+ CGM.getTypes().ConvertType(CGF.getContext().getObjCClassType());
+ Target = CGF.Builder.CreateBitCast(Target, ClassTy);
+ CGF.Builder.CreateStore(Target,
+ CGF.Builder.CreateStructGEP(ObjCSuper, 1));
+ return EmitMessageSend(CGF, Return, ResultType,
+ EmitSelector(CGF.Builder, Sel),
+ ObjCSuper, ObjCTypes.SuperPtrCTy,
+ true, CallArgs, Method, ObjCTypes);
+}
+
+/// Generate code for a message send expression.
+CodeGen::RValue CGObjCMac::GenerateMessageSend(CodeGen::CodeGenFunction &CGF,
+ ReturnValueSlot Return,
+ QualType ResultType,
+ Selector Sel,
+ llvm::Value *Receiver,
+ const CallArgList &CallArgs,
+ const ObjCInterfaceDecl *Class,
+ const ObjCMethodDecl *Method) {
+ return EmitMessageSend(CGF, Return, ResultType,
+ EmitSelector(CGF.Builder, Sel),
+ Receiver, CGF.getContext().getObjCIdType(),
+ false, CallArgs, Method, ObjCTypes);
+}
+
+CodeGen::RValue
+CGObjCCommonMac::EmitMessageSend(CodeGen::CodeGenFunction &CGF,
+ ReturnValueSlot Return,
+ QualType ResultType,
+ llvm::Value *Sel,
+ llvm::Value *Arg0,
+ QualType Arg0Ty,
+ bool IsSuper,
+ const CallArgList &CallArgs,
+ const ObjCMethodDecl *Method,
+ const ObjCCommonTypesHelper &ObjCTypes) {
+ CallArgList ActualArgs;
+ if (!IsSuper)
+ Arg0 = CGF.Builder.CreateBitCast(Arg0, ObjCTypes.ObjectPtrTy);
+ ActualArgs.add(RValue::get(Arg0), Arg0Ty);
+ ActualArgs.add(RValue::get(Sel), CGF.getContext().getObjCSelType());
+ ActualArgs.addFrom(CallArgs);
+
+ // If we're calling a method, use the formal signature.
+ MessageSendInfo MSI = getMessageSendInfo(Method, ResultType, ActualArgs);
+
+ if (Method)
+ assert(CGM.getContext().getCanonicalType(Method->getResultType()) ==
+ CGM.getContext().getCanonicalType(ResultType) &&
+ "Result type mismatch!");
+
+ NullReturnState nullReturn;
+
+ llvm::Constant *Fn = NULL;
+ if (CGM.ReturnTypeUsesSRet(MSI.CallInfo)) {
+ if (!IsSuper) nullReturn.init(CGF, Arg0);
+ Fn = (ObjCABI == 2) ? ObjCTypes.getSendStretFn2(IsSuper)
+ : ObjCTypes.getSendStretFn(IsSuper);
+ } else if (CGM.ReturnTypeUsesFPRet(ResultType)) {
+ Fn = (ObjCABI == 2) ? ObjCTypes.getSendFpretFn2(IsSuper)
+ : ObjCTypes.getSendFpretFn(IsSuper);
+ } else if (CGM.ReturnTypeUsesFP2Ret(ResultType)) {
+ Fn = (ObjCABI == 2) ? ObjCTypes.getSendFp2RetFn2(IsSuper)
+ : ObjCTypes.getSendFp2retFn(IsSuper);
+ } else {
+ Fn = (ObjCABI == 2) ? ObjCTypes.getSendFn2(IsSuper)
+ : ObjCTypes.getSendFn(IsSuper);
+ }
+
+ bool requiresnullCheck = false;
+ if (CGM.getLangOpts().ObjCAutoRefCount && Method)
+ for (ObjCMethodDecl::param_const_iterator i = Method->param_begin(),
+ e = Method->param_end(); i != e; ++i) {
+ const ParmVarDecl *ParamDecl = (*i);
+ if (ParamDecl->hasAttr<NSConsumedAttr>()) {
+ if (!nullReturn.NullBB)
+ nullReturn.init(CGF, Arg0);
+ requiresnullCheck = true;
+ break;
+ }
+ }
+
+ Fn = llvm::ConstantExpr::getBitCast(Fn, MSI.MessengerType);
+ RValue rvalue = CGF.EmitCall(MSI.CallInfo, Fn, Return, ActualArgs);
+ return nullReturn.complete(CGF, rvalue, ResultType, CallArgs,
+ requiresnullCheck ? Method : 0);
+}
+
+static Qualifiers::GC GetGCAttrTypeForType(ASTContext &Ctx, QualType FQT) {
+ if (FQT.isObjCGCStrong())
+ return Qualifiers::Strong;
+
+ if (FQT.isObjCGCWeak() || FQT.getObjCLifetime() == Qualifiers::OCL_Weak)
+ return Qualifiers::Weak;
+
+ // check for __unsafe_unretained
+ if (FQT.getObjCLifetime() == Qualifiers::OCL_ExplicitNone)
+ return Qualifiers::GCNone;
+
+ if (FQT->isObjCObjectPointerType() || FQT->isBlockPointerType())
+ return Qualifiers::Strong;
+
+ if (const PointerType *PT = FQT->getAs<PointerType>())
+ return GetGCAttrTypeForType(Ctx, PT->getPointeeType());
+
+ return Qualifiers::GCNone;
+}
+
+llvm::Constant *CGObjCCommonMac::BuildGCBlockLayout(CodeGenModule &CGM,
+ const CGBlockInfo &blockInfo) {
+ llvm::Constant *nullPtr = llvm::Constant::getNullValue(CGM.Int8PtrTy);
+
+ if (CGM.getLangOpts().getGC() == LangOptions::NonGC &&
+ !CGM.getLangOpts().ObjCAutoRefCount)
+ return nullPtr;
+
+ bool hasUnion = false;
+ SkipIvars.clear();
+ IvarsInfo.clear();
+ unsigned WordSizeInBits = CGM.getContext().getTargetInfo().getPointerWidth(0);
+ unsigned ByteSizeInBits = CGM.getContext().getTargetInfo().getCharWidth();
+
+ // __isa is the first field in block descriptor and must assume by runtime's
+ // convention that it is GC'able.
+ IvarsInfo.push_back(GC_IVAR(0, 1));
+
+ const BlockDecl *blockDecl = blockInfo.getBlockDecl();
+
+ // Calculate the basic layout of the block structure.
+ const llvm::StructLayout *layout =
+ CGM.getTargetData().getStructLayout(blockInfo.StructureType);
+
+ // Ignore the optional 'this' capture: C++ objects are not assumed
+ // to be GC'ed.
+
+ // Walk the captured variables.
+ for (BlockDecl::capture_const_iterator ci = blockDecl->capture_begin(),
+ ce = blockDecl->capture_end(); ci != ce; ++ci) {
+ const VarDecl *variable = ci->getVariable();
+ QualType type = variable->getType();
+
+ const CGBlockInfo::Capture &capture = blockInfo.getCapture(variable);
+
+ // Ignore constant captures.
+ if (capture.isConstant()) continue;
+
+ uint64_t fieldOffset = layout->getElementOffset(capture.getIndex());
+
+ // __block variables are passed by their descriptor address.
+ if (ci->isByRef()) {
+ IvarsInfo.push_back(GC_IVAR(fieldOffset, /*size in words*/ 1));
+ continue;
+ }
+
+ assert(!type->isArrayType() && "array variable should not be caught");
+ if (const RecordType *record = type->getAs<RecordType>()) {
+ BuildAggrIvarRecordLayout(record, fieldOffset, true, hasUnion);
+ continue;
+ }
+
+ Qualifiers::GC GCAttr = GetGCAttrTypeForType(CGM.getContext(), type);
+ unsigned fieldSize = CGM.getContext().getTypeSize(type);
+
+ if (GCAttr == Qualifiers::Strong)
+ IvarsInfo.push_back(GC_IVAR(fieldOffset,
+ fieldSize / WordSizeInBits));
+ else if (GCAttr == Qualifiers::GCNone || GCAttr == Qualifiers::Weak)
+ SkipIvars.push_back(GC_IVAR(fieldOffset,
+ fieldSize / ByteSizeInBits));
+ }
+
+ if (IvarsInfo.empty())
+ return nullPtr;
+
+ // Sort on byte position; captures might not be allocated in order,
+ // and unions can do funny things.
+ llvm::array_pod_sort(IvarsInfo.begin(), IvarsInfo.end());
+ llvm::array_pod_sort(SkipIvars.begin(), SkipIvars.end());
+
+ std::string BitMap;
+ llvm::Constant *C = BuildIvarLayoutBitmap(BitMap);
+ if (CGM.getLangOpts().ObjCGCBitmapPrint) {
+ printf("\n block variable layout for block: ");
+ const unsigned char *s = (unsigned char*)BitMap.c_str();
+ for (unsigned i = 0, e = BitMap.size(); i < e; i++)
+ if (!(s[i] & 0xf0))
+ printf("0x0%x%s", s[i], s[i] != 0 ? ", " : "");
+ else
+ printf("0x%x%s", s[i], s[i] != 0 ? ", " : "");
+ printf("\n");
+ }
+
+ return C;
+}
+
+llvm::Value *CGObjCMac::GenerateProtocolRef(CGBuilderTy &Builder,
+ const ObjCProtocolDecl *PD) {
+ // FIXME: I don't understand why gcc generates this, or where it is
+ // resolved. Investigate. Its also wasteful to look this up over and over.
+ LazySymbols.insert(&CGM.getContext().Idents.get("Protocol"));
+
+ return llvm::ConstantExpr::getBitCast(GetProtocolRef(PD),
+ ObjCTypes.getExternalProtocolPtrTy());
+}
+
+void CGObjCCommonMac::GenerateProtocol(const ObjCProtocolDecl *PD) {
+ // FIXME: We shouldn't need this, the protocol decl should contain enough
+ // information to tell us whether this was a declaration or a definition.
+ DefinedProtocols.insert(PD->getIdentifier());
+
+ // If we have generated a forward reference to this protocol, emit
+ // it now. Otherwise do nothing, the protocol objects are lazily
+ // emitted.
+ if (Protocols.count(PD->getIdentifier()))
+ GetOrEmitProtocol(PD);
+}
+
+llvm::Constant *CGObjCCommonMac::GetProtocolRef(const ObjCProtocolDecl *PD) {
+ if (DefinedProtocols.count(PD->getIdentifier()))
+ return GetOrEmitProtocol(PD);
+
+ return GetOrEmitProtocolRef(PD);
+}
+
+/*
+// APPLE LOCAL radar 4585769 - Objective-C 1.0 extensions
+struct _objc_protocol {
+struct _objc_protocol_extension *isa;
+char *protocol_name;
+struct _objc_protocol_list *protocol_list;
+struct _objc__method_prototype_list *instance_methods;
+struct _objc__method_prototype_list *class_methods
+};
+
+See EmitProtocolExtension().
+*/
+llvm::Constant *CGObjCMac::GetOrEmitProtocol(const ObjCProtocolDecl *PD) {
+ llvm::GlobalVariable *Entry = Protocols[PD->getIdentifier()];
+
+ // Early exit if a defining object has already been generated.
+ if (Entry && Entry->hasInitializer())
+ return Entry;
+
+ // Use the protocol definition, if there is one.
+ if (const ObjCProtocolDecl *Def = PD->getDefinition())
+ PD = Def;
+
+ // FIXME: I don't understand why gcc generates this, or where it is
+ // resolved. Investigate. Its also wasteful to look this up over and over.
+ LazySymbols.insert(&CGM.getContext().Idents.get("Protocol"));
+
+ // Construct method lists.
+ std::vector<llvm::Constant*> InstanceMethods, ClassMethods;
+ std::vector<llvm::Constant*> OptInstanceMethods, OptClassMethods;
+ std::vector<llvm::Constant*> MethodTypesExt, OptMethodTypesExt;
+ for (ObjCProtocolDecl::instmeth_iterator
+ i = PD->instmeth_begin(), e = PD->instmeth_end(); i != e; ++i) {
+ ObjCMethodDecl *MD = *i;
+ llvm::Constant *C = GetMethodDescriptionConstant(MD);
+ if (!C)
+ return GetOrEmitProtocolRef(PD);
+
+ if (MD->getImplementationControl() == ObjCMethodDecl::Optional) {
+ OptInstanceMethods.push_back(C);
+ OptMethodTypesExt.push_back(GetMethodVarType(MD, true));
+ } else {
+ InstanceMethods.push_back(C);
+ MethodTypesExt.push_back(GetMethodVarType(MD, true));
+ }
+ }
+
+ for (ObjCProtocolDecl::classmeth_iterator
+ i = PD->classmeth_begin(), e = PD->classmeth_end(); i != e; ++i) {
+ ObjCMethodDecl *MD = *i;
+ llvm::Constant *C = GetMethodDescriptionConstant(MD);
+ if (!C)
+ return GetOrEmitProtocolRef(PD);
+
+ if (MD->getImplementationControl() == ObjCMethodDecl::Optional) {
+ OptClassMethods.push_back(C);
+ OptMethodTypesExt.push_back(GetMethodVarType(MD, true));
+ } else {
+ ClassMethods.push_back(C);
+ MethodTypesExt.push_back(GetMethodVarType(MD, true));
+ }
+ }
+
+ MethodTypesExt.insert(MethodTypesExt.end(),
+ OptMethodTypesExt.begin(), OptMethodTypesExt.end());
+
+ llvm::Constant *Values[] = {
+ EmitProtocolExtension(PD, OptInstanceMethods, OptClassMethods,
+ MethodTypesExt),
+ GetClassName(PD->getIdentifier()),
+ EmitProtocolList("\01L_OBJC_PROTOCOL_REFS_" + PD->getName(),
+ PD->protocol_begin(),
+ PD->protocol_end()),
+ EmitMethodDescList("\01L_OBJC_PROTOCOL_INSTANCE_METHODS_" + PD->getName(),
+ "__OBJC,__cat_inst_meth,regular,no_dead_strip",
+ InstanceMethods),
+ EmitMethodDescList("\01L_OBJC_PROTOCOL_CLASS_METHODS_" + PD->getName(),
+ "__OBJC,__cat_cls_meth,regular,no_dead_strip",
+ ClassMethods)
+ };
+ llvm::Constant *Init = llvm::ConstantStruct::get(ObjCTypes.ProtocolTy,
+ Values);
+
+ if (Entry) {
+ // Already created, fix the linkage and update the initializer.
+ Entry->setLinkage(llvm::GlobalValue::InternalLinkage);
+ Entry->setInitializer(Init);
+ } else {
+ Entry =
+ new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ProtocolTy, false,
+ llvm::GlobalValue::InternalLinkage,
+ Init,
+ "\01L_OBJC_PROTOCOL_" + PD->getName());
+ Entry->setSection("__OBJC,__protocol,regular,no_dead_strip");
+ // FIXME: Is this necessary? Why only for protocol?
+ Entry->setAlignment(4);
+
+ Protocols[PD->getIdentifier()] = Entry;
+ }
+ CGM.AddUsedGlobal(Entry);
+
+ return Entry;
+}
+
+llvm::Constant *CGObjCMac::GetOrEmitProtocolRef(const ObjCProtocolDecl *PD) {
+ llvm::GlobalVariable *&Entry = Protocols[PD->getIdentifier()];
+
+ if (!Entry) {
+ // We use the initializer as a marker of whether this is a forward
+ // reference or not. At module finalization we add the empty
+ // contents for protocols which were referenced but never defined.
+ Entry =
+ new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ProtocolTy, false,
+ llvm::GlobalValue::ExternalLinkage,
+ 0,
+ "\01L_OBJC_PROTOCOL_" + PD->getName());
+ Entry->setSection("__OBJC,__protocol,regular,no_dead_strip");
+ // FIXME: Is this necessary? Why only for protocol?
+ Entry->setAlignment(4);
+ }
+
+ return Entry;
+}
+
+/*
+ struct _objc_protocol_extension {
+ uint32_t size;
+ struct objc_method_description_list *optional_instance_methods;
+ struct objc_method_description_list *optional_class_methods;
+ struct objc_property_list *instance_properties;
+ const char ** extendedMethodTypes;
+ };
+*/
+llvm::Constant *
+CGObjCMac::EmitProtocolExtension(const ObjCProtocolDecl *PD,
+ ArrayRef<llvm::Constant*> OptInstanceMethods,
+ ArrayRef<llvm::Constant*> OptClassMethods,
+ ArrayRef<llvm::Constant*> MethodTypesExt) {
+ uint64_t Size =
+ CGM.getTargetData().getTypeAllocSize(ObjCTypes.ProtocolExtensionTy);
+ llvm::Constant *Values[] = {
+ llvm::ConstantInt::get(ObjCTypes.IntTy, Size),
+ EmitMethodDescList("\01L_OBJC_PROTOCOL_INSTANCE_METHODS_OPT_"
+ + PD->getName(),
+ "__OBJC,__cat_inst_meth,regular,no_dead_strip",
+ OptInstanceMethods),
+ EmitMethodDescList("\01L_OBJC_PROTOCOL_CLASS_METHODS_OPT_" + PD->getName(),
+ "__OBJC,__cat_cls_meth,regular,no_dead_strip",
+ OptClassMethods),
+ EmitPropertyList("\01L_OBJC_$_PROP_PROTO_LIST_" + PD->getName(), 0, PD,
+ ObjCTypes),
+ EmitProtocolMethodTypes("\01L_OBJC_PROTOCOL_METHOD_TYPES_" + PD->getName(),
+ MethodTypesExt, ObjCTypes)
+ };
+
+ // Return null if no extension bits are used.
+ if (Values[1]->isNullValue() && Values[2]->isNullValue() &&
+ Values[3]->isNullValue() && Values[4]->isNullValue())
+ return llvm::Constant::getNullValue(ObjCTypes.ProtocolExtensionPtrTy);
+
+ llvm::Constant *Init =
+ llvm::ConstantStruct::get(ObjCTypes.ProtocolExtensionTy, Values);
+
+ // No special section, but goes in llvm.used
+ return CreateMetadataVar("\01L_OBJC_PROTOCOLEXT_" + PD->getName(),
+ Init,
+ 0, 0, true);
+}
+
+/*
+ struct objc_protocol_list {
+ struct objc_protocol_list *next;
+ long count;
+ Protocol *list[];
+ };
+*/
+llvm::Constant *
+CGObjCMac::EmitProtocolList(Twine Name,
+ ObjCProtocolDecl::protocol_iterator begin,
+ ObjCProtocolDecl::protocol_iterator end) {
+ llvm::SmallVector<llvm::Constant*, 16> ProtocolRefs;
+
+ for (; begin != end; ++begin)
+ ProtocolRefs.push_back(GetProtocolRef(*begin));
+
+ // Just return null for empty protocol lists
+ if (ProtocolRefs.empty())
+ return llvm::Constant::getNullValue(ObjCTypes.ProtocolListPtrTy);
+
+ // This list is null terminated.
+ ProtocolRefs.push_back(llvm::Constant::getNullValue(ObjCTypes.ProtocolPtrTy));
+
+ llvm::Constant *Values[3];
+ // This field is only used by the runtime.
+ Values[0] = llvm::Constant::getNullValue(ObjCTypes.ProtocolListPtrTy);
+ Values[1] = llvm::ConstantInt::get(ObjCTypes.LongTy,
+ ProtocolRefs.size() - 1);
+ Values[2] =
+ llvm::ConstantArray::get(llvm::ArrayType::get(ObjCTypes.ProtocolPtrTy,
+ ProtocolRefs.size()),
+ ProtocolRefs);
+
+ llvm::Constant *Init = llvm::ConstantStruct::getAnon(Values);
+ llvm::GlobalVariable *GV =
+ CreateMetadataVar(Name, Init, "__OBJC,__cat_cls_meth,regular,no_dead_strip",
+ 4, false);
+ return llvm::ConstantExpr::getBitCast(GV, ObjCTypes.ProtocolListPtrTy);
+}
+
+void CGObjCCommonMac::
+PushProtocolProperties(llvm::SmallPtrSet<const IdentifierInfo*,16> &PropertySet,
+ llvm::SmallVectorImpl<llvm::Constant*> &Properties,
+ const Decl *Container,
+ const ObjCProtocolDecl *PROTO,
+ const ObjCCommonTypesHelper &ObjCTypes) {
+ for (ObjCProtocolDecl::protocol_iterator P = PROTO->protocol_begin(),
+ E = PROTO->protocol_end(); P != E; ++P)
+ PushProtocolProperties(PropertySet, Properties, Container, (*P), ObjCTypes);
+ for (ObjCContainerDecl::prop_iterator I = PROTO->prop_begin(),
+ E = PROTO->prop_end(); I != E; ++I) {
+ const ObjCPropertyDecl *PD = *I;
+ if (!PropertySet.insert(PD->getIdentifier()))
+ continue;
+ llvm::Constant *Prop[] = {
+ GetPropertyName(PD->getIdentifier()),
+ GetPropertyTypeString(PD, Container)
+ };
+ Properties.push_back(llvm::ConstantStruct::get(ObjCTypes.PropertyTy, Prop));
+ }
+}
+
+/*
+ struct _objc_property {
+ const char * const name;
+ const char * const attributes;
+ };
+
+ struct _objc_property_list {
+ uint32_t entsize; // sizeof (struct _objc_property)
+ uint32_t prop_count;
+ struct _objc_property[prop_count];
+ };
+*/
+llvm::Constant *CGObjCCommonMac::EmitPropertyList(Twine Name,
+ const Decl *Container,
+ const ObjCContainerDecl *OCD,
+ const ObjCCommonTypesHelper &ObjCTypes) {
+ llvm::SmallVector<llvm::Constant*, 16> Properties;
+ llvm::SmallPtrSet<const IdentifierInfo*, 16> PropertySet;
+ for (ObjCContainerDecl::prop_iterator I = OCD->prop_begin(),
+ E = OCD->prop_end(); I != E; ++I) {
+ const ObjCPropertyDecl *PD = *I;
+ PropertySet.insert(PD->getIdentifier());
+ llvm::Constant *Prop[] = {
+ GetPropertyName(PD->getIdentifier()),
+ GetPropertyTypeString(PD, Container)
+ };
+ Properties.push_back(llvm::ConstantStruct::get(ObjCTypes.PropertyTy,
+ Prop));
+ }
+ if (const ObjCInterfaceDecl *OID = dyn_cast<ObjCInterfaceDecl>(OCD)) {
+ for (ObjCInterfaceDecl::all_protocol_iterator
+ P = OID->all_referenced_protocol_begin(),
+ E = OID->all_referenced_protocol_end(); P != E; ++P)
+ PushProtocolProperties(PropertySet, Properties, Container, (*P),
+ ObjCTypes);
+ }
+ else if (const ObjCCategoryDecl *CD = dyn_cast<ObjCCategoryDecl>(OCD)) {
+ for (ObjCCategoryDecl::protocol_iterator P = CD->protocol_begin(),
+ E = CD->protocol_end(); P != E; ++P)
+ PushProtocolProperties(PropertySet, Properties, Container, (*P),
+ ObjCTypes);
+ }
+
+ // Return null for empty list.
+ if (Properties.empty())
+ return llvm::Constant::getNullValue(ObjCTypes.PropertyListPtrTy);
+
+ unsigned PropertySize =
+ CGM.getTargetData().getTypeAllocSize(ObjCTypes.PropertyTy);
+ llvm::Constant *Values[3];
+ Values[0] = llvm::ConstantInt::get(ObjCTypes.IntTy, PropertySize);
+ Values[1] = llvm::ConstantInt::get(ObjCTypes.IntTy, Properties.size());
+ llvm::ArrayType *AT = llvm::ArrayType::get(ObjCTypes.PropertyTy,
+ Properties.size());
+ Values[2] = llvm::ConstantArray::get(AT, Properties);
+ llvm::Constant *Init = llvm::ConstantStruct::getAnon(Values);
+
+ llvm::GlobalVariable *GV =
+ CreateMetadataVar(Name, Init,
+ (ObjCABI == 2) ? "__DATA, __objc_const" :
+ "__OBJC,__property,regular,no_dead_strip",
+ (ObjCABI == 2) ? 8 : 4,
+ true);
+ return llvm::ConstantExpr::getBitCast(GV, ObjCTypes.PropertyListPtrTy);
+}
+
+llvm::Constant *
+CGObjCCommonMac::EmitProtocolMethodTypes(Twine Name,
+ ArrayRef<llvm::Constant*> MethodTypes,
+ const ObjCCommonTypesHelper &ObjCTypes) {
+ // Return null for empty list.
+ if (MethodTypes.empty())
+ return llvm::Constant::getNullValue(ObjCTypes.Int8PtrPtrTy);
+
+ llvm::ArrayType *AT = llvm::ArrayType::get(ObjCTypes.Int8PtrTy,
+ MethodTypes.size());
+ llvm::Constant *Init = llvm::ConstantArray::get(AT, MethodTypes);
+
+ llvm::GlobalVariable *GV =
+ CreateMetadataVar(Name, Init,
+ (ObjCABI == 2) ? "__DATA, __objc_const" : 0,
+ (ObjCABI == 2) ? 8 : 4,
+ true);
+ return llvm::ConstantExpr::getBitCast(GV, ObjCTypes.Int8PtrPtrTy);
+}
+
+/*
+ struct objc_method_description_list {
+ int count;
+ struct objc_method_description list[];
+ };
+*/
+llvm::Constant *
+CGObjCMac::GetMethodDescriptionConstant(const ObjCMethodDecl *MD) {
+ llvm::Constant *Desc[] = {
+ llvm::ConstantExpr::getBitCast(GetMethodVarName(MD->getSelector()),
+ ObjCTypes.SelectorPtrTy),
+ GetMethodVarType(MD)
+ };
+ if (!Desc[1])
+ return 0;
+
+ return llvm::ConstantStruct::get(ObjCTypes.MethodDescriptionTy,
+ Desc);
+}
+
+llvm::Constant *
+CGObjCMac::EmitMethodDescList(Twine Name, const char *Section,
+ ArrayRef<llvm::Constant*> Methods) {
+ // Return null for empty list.
+ if (Methods.empty())
+ return llvm::Constant::getNullValue(ObjCTypes.MethodDescriptionListPtrTy);
+
+ llvm::Constant *Values[2];
+ Values[0] = llvm::ConstantInt::get(ObjCTypes.IntTy, Methods.size());
+ llvm::ArrayType *AT = llvm::ArrayType::get(ObjCTypes.MethodDescriptionTy,
+ Methods.size());
+ Values[1] = llvm::ConstantArray::get(AT, Methods);
+ llvm::Constant *Init = llvm::ConstantStruct::getAnon(Values);
+
+ llvm::GlobalVariable *GV = CreateMetadataVar(Name, Init, Section, 4, true);
+ return llvm::ConstantExpr::getBitCast(GV,
+ ObjCTypes.MethodDescriptionListPtrTy);
+}
+
+/*
+ struct _objc_category {
+ char *category_name;
+ char *class_name;
+ struct _objc_method_list *instance_methods;
+ struct _objc_method_list *class_methods;
+ struct _objc_protocol_list *protocols;
+ uint32_t size; // <rdar://4585769>
+ struct _objc_property_list *instance_properties;
+ };
+*/
+void CGObjCMac::GenerateCategory(const ObjCCategoryImplDecl *OCD) {
+ unsigned Size = CGM.getTargetData().getTypeAllocSize(ObjCTypes.CategoryTy);
+
+ // FIXME: This is poor design, the OCD should have a pointer to the category
+ // decl. Additionally, note that Category can be null for the @implementation
+ // w/o an @interface case. Sema should just create one for us as it does for
+ // @implementation so everyone else can live life under a clear blue sky.
+ const ObjCInterfaceDecl *Interface = OCD->getClassInterface();
+ const ObjCCategoryDecl *Category =
+ Interface->FindCategoryDeclaration(OCD->getIdentifier());
+
+ SmallString<256> ExtName;
+ llvm::raw_svector_ostream(ExtName) << Interface->getName() << '_'
+ << OCD->getName();
+
+ llvm::SmallVector<llvm::Constant*, 16> InstanceMethods, ClassMethods;
+ for (ObjCCategoryImplDecl::instmeth_iterator
+ i = OCD->instmeth_begin(), e = OCD->instmeth_end(); i != e; ++i) {
+ // Instance methods should always be defined.
+ InstanceMethods.push_back(GetMethodConstant(*i));
+ }
+ for (ObjCCategoryImplDecl::classmeth_iterator
+ i = OCD->classmeth_begin(), e = OCD->classmeth_end(); i != e; ++i) {
+ // Class methods should always be defined.
+ ClassMethods.push_back(GetMethodConstant(*i));
+ }
+
+ llvm::Constant *Values[7];
+ Values[0] = GetClassName(OCD->getIdentifier());
+ Values[1] = GetClassName(Interface->getIdentifier());
+ LazySymbols.insert(Interface->getIdentifier());
+ Values[2] =
+ EmitMethodList("\01L_OBJC_CATEGORY_INSTANCE_METHODS_" + ExtName.str(),
+ "__OBJC,__cat_inst_meth,regular,no_dead_strip",
+ InstanceMethods);
+ Values[3] =
+ EmitMethodList("\01L_OBJC_CATEGORY_CLASS_METHODS_" + ExtName.str(),
+ "__OBJC,__cat_cls_meth,regular,no_dead_strip",
+ ClassMethods);
+ if (Category) {
+ Values[4] =
+ EmitProtocolList("\01L_OBJC_CATEGORY_PROTOCOLS_" + ExtName.str(),
+ Category->protocol_begin(),
+ Category->protocol_end());
+ } else {
+ Values[4] = llvm::Constant::getNullValue(ObjCTypes.ProtocolListPtrTy);
+ }
+ Values[5] = llvm::ConstantInt::get(ObjCTypes.IntTy, Size);
+
+ // If there is no category @interface then there can be no properties.
+ if (Category) {
+ Values[6] = EmitPropertyList("\01l_OBJC_$_PROP_LIST_" + ExtName.str(),
+ OCD, Category, ObjCTypes);
+ } else {
+ Values[6] = llvm::Constant::getNullValue(ObjCTypes.PropertyListPtrTy);
+ }
+
+ llvm::Constant *Init = llvm::ConstantStruct::get(ObjCTypes.CategoryTy,
+ Values);
+
+ llvm::GlobalVariable *GV =
+ CreateMetadataVar("\01L_OBJC_CATEGORY_" + ExtName.str(), Init,
+ "__OBJC,__category,regular,no_dead_strip",
+ 4, true);
+ DefinedCategories.push_back(GV);
+ DefinedCategoryNames.insert(ExtName.str());
+ // method definition entries must be clear for next implementation.
+ MethodDefinitions.clear();
+}
+
+// FIXME: Get from somewhere?
+enum ClassFlags {
+ eClassFlags_Factory = 0x00001,
+ eClassFlags_Meta = 0x00002,
+ // <rdr://5142207>
+ eClassFlags_HasCXXStructors = 0x02000,
+ eClassFlags_Hidden = 0x20000,
+ eClassFlags_ABI2_Hidden = 0x00010,
+ eClassFlags_ABI2_HasCXXStructors = 0x00004 // <rdr://4923634>
+};
+
+/*
+ struct _objc_class {
+ Class isa;
+ Class super_class;
+ const char *name;
+ long version;
+ long info;
+ long instance_size;
+ struct _objc_ivar_list *ivars;
+ struct _objc_method_list *methods;
+ struct _objc_cache *cache;
+ struct _objc_protocol_list *protocols;
+ // Objective-C 1.0 extensions (<rdr://4585769>)
+ const char *ivar_layout;
+ struct _objc_class_ext *ext;
+ };
+
+ See EmitClassExtension();
+*/
+void CGObjCMac::GenerateClass(const ObjCImplementationDecl *ID) {
+ DefinedSymbols.insert(ID->getIdentifier());
+
+ std::string ClassName = ID->getNameAsString();
+ // FIXME: Gross
+ ObjCInterfaceDecl *Interface =
+ const_cast<ObjCInterfaceDecl*>(ID->getClassInterface());
+ llvm::Constant *Protocols =
+ EmitProtocolList("\01L_OBJC_CLASS_PROTOCOLS_" + ID->getName(),
+ Interface->all_referenced_protocol_begin(),
+ Interface->all_referenced_protocol_end());
+ unsigned Flags = eClassFlags_Factory;
+ if (ID->hasCXXStructors())
+ Flags |= eClassFlags_HasCXXStructors;
+ unsigned Size =
+ CGM.getContext().getASTObjCImplementationLayout(ID).getSize().getQuantity();
+
+ // FIXME: Set CXX-structors flag.
+ if (ID->getClassInterface()->getVisibility() == HiddenVisibility)
+ Flags |= eClassFlags_Hidden;
+
+ llvm::SmallVector<llvm::Constant*, 16> InstanceMethods, ClassMethods;
+ for (ObjCImplementationDecl::instmeth_iterator
+ i = ID->instmeth_begin(), e = ID->instmeth_end(); i != e; ++i) {
+ // Instance methods should always be defined.
+ InstanceMethods.push_back(GetMethodConstant(*i));
+ }
+ for (ObjCImplementationDecl::classmeth_iterator
+ i = ID->classmeth_begin(), e = ID->classmeth_end(); i != e; ++i) {
+ // Class methods should always be defined.
+ ClassMethods.push_back(GetMethodConstant(*i));
+ }
+
+ for (ObjCImplementationDecl::propimpl_iterator
+ i = ID->propimpl_begin(), e = ID->propimpl_end(); i != e; ++i) {
+ ObjCPropertyImplDecl *PID = *i;
+
+ if (PID->getPropertyImplementation() == ObjCPropertyImplDecl::Synthesize) {
+ ObjCPropertyDecl *PD = PID->getPropertyDecl();
+
+ if (ObjCMethodDecl *MD = PD->getGetterMethodDecl())
+ if (llvm::Constant *C = GetMethodConstant(MD))
+ InstanceMethods.push_back(C);
+ if (ObjCMethodDecl *MD = PD->getSetterMethodDecl())
+ if (llvm::Constant *C = GetMethodConstant(MD))
+ InstanceMethods.push_back(C);
+ }
+ }
+
+ llvm::Constant *Values[12];
+ Values[ 0] = EmitMetaClass(ID, Protocols, ClassMethods);
+ if (ObjCInterfaceDecl *Super = Interface->getSuperClass()) {
+ // Record a reference to the super class.
+ LazySymbols.insert(Super->getIdentifier());
+
+ Values[ 1] =
+ llvm::ConstantExpr::getBitCast(GetClassName(Super->getIdentifier()),
+ ObjCTypes.ClassPtrTy);
+ } else {
+ Values[ 1] = llvm::Constant::getNullValue(ObjCTypes.ClassPtrTy);
+ }
+ Values[ 2] = GetClassName(ID->getIdentifier());
+ // Version is always 0.
+ Values[ 3] = llvm::ConstantInt::get(ObjCTypes.LongTy, 0);
+ Values[ 4] = llvm::ConstantInt::get(ObjCTypes.LongTy, Flags);
+ Values[ 5] = llvm::ConstantInt::get(ObjCTypes.LongTy, Size);
+ Values[ 6] = EmitIvarList(ID, false);
+ Values[ 7] =
+ EmitMethodList("\01L_OBJC_INSTANCE_METHODS_" + ID->getName(),
+ "__OBJC,__inst_meth,regular,no_dead_strip",
+ InstanceMethods);
+ // cache is always NULL.
+ Values[ 8] = llvm::Constant::getNullValue(ObjCTypes.CachePtrTy);
+ Values[ 9] = Protocols;
+ Values[10] = BuildIvarLayout(ID, true);
+ Values[11] = EmitClassExtension(ID);
+ llvm::Constant *Init = llvm::ConstantStruct::get(ObjCTypes.ClassTy,
+ Values);
+ std::string Name("\01L_OBJC_CLASS_");
+ Name += ClassName;
+ const char *Section = "__OBJC,__class,regular,no_dead_strip";
+ // Check for a forward reference.
+ llvm::GlobalVariable *GV = CGM.getModule().getGlobalVariable(Name);
+ if (GV) {
+ assert(GV->getType()->getElementType() == ObjCTypes.ClassTy &&
+ "Forward metaclass reference has incorrect type.");
+ GV->setLinkage(llvm::GlobalValue::InternalLinkage);
+ GV->setInitializer(Init);
+ GV->setSection(Section);
+ GV->setAlignment(4);
+ CGM.AddUsedGlobal(GV);
+ }
+ else
+ GV = CreateMetadataVar(Name, Init, Section, 4, true);
+ DefinedClasses.push_back(GV);
+ // method definition entries must be clear for next implementation.
+ MethodDefinitions.clear();
+}
+
+llvm::Constant *CGObjCMac::EmitMetaClass(const ObjCImplementationDecl *ID,
+ llvm::Constant *Protocols,
+ ArrayRef<llvm::Constant*> Methods) {
+ unsigned Flags = eClassFlags_Meta;
+ unsigned Size = CGM.getTargetData().getTypeAllocSize(ObjCTypes.ClassTy);
+
+ if (ID->getClassInterface()->getVisibility() == HiddenVisibility)
+ Flags |= eClassFlags_Hidden;
+
+ llvm::Constant *Values[12];
+ // The isa for the metaclass is the root of the hierarchy.
+ const ObjCInterfaceDecl *Root = ID->getClassInterface();
+ while (const ObjCInterfaceDecl *Super = Root->getSuperClass())
+ Root = Super;
+ Values[ 0] =
+ llvm::ConstantExpr::getBitCast(GetClassName(Root->getIdentifier()),
+ ObjCTypes.ClassPtrTy);
+ // The super class for the metaclass is emitted as the name of the
+ // super class. The runtime fixes this up to point to the
+ // *metaclass* for the super class.
+ if (ObjCInterfaceDecl *Super = ID->getClassInterface()->getSuperClass()) {
+ Values[ 1] =
+ llvm::ConstantExpr::getBitCast(GetClassName(Super->getIdentifier()),
+ ObjCTypes.ClassPtrTy);
+ } else {
+ Values[ 1] = llvm::Constant::getNullValue(ObjCTypes.ClassPtrTy);
+ }
+ Values[ 2] = GetClassName(ID->getIdentifier());
+ // Version is always 0.
+ Values[ 3] = llvm::ConstantInt::get(ObjCTypes.LongTy, 0);
+ Values[ 4] = llvm::ConstantInt::get(ObjCTypes.LongTy, Flags);
+ Values[ 5] = llvm::ConstantInt::get(ObjCTypes.LongTy, Size);
+ Values[ 6] = EmitIvarList(ID, true);
+ Values[ 7] =
+ EmitMethodList("\01L_OBJC_CLASS_METHODS_" + ID->getNameAsString(),
+ "__OBJC,__cls_meth,regular,no_dead_strip",
+ Methods);
+ // cache is always NULL.
+ Values[ 8] = llvm::Constant::getNullValue(ObjCTypes.CachePtrTy);
+ Values[ 9] = Protocols;
+ // ivar_layout for metaclass is always NULL.
+ Values[10] = llvm::Constant::getNullValue(ObjCTypes.Int8PtrTy);
+ // The class extension is always unused for metaclasses.
+ Values[11] = llvm::Constant::getNullValue(ObjCTypes.ClassExtensionPtrTy);
+ llvm::Constant *Init = llvm::ConstantStruct::get(ObjCTypes.ClassTy,
+ Values);
+
+ std::string Name("\01L_OBJC_METACLASS_");
+ Name += ID->getNameAsCString();
+
+ // Check for a forward reference.
+ llvm::GlobalVariable *GV = CGM.getModule().getGlobalVariable(Name);
+ if (GV) {
+ assert(GV->getType()->getElementType() == ObjCTypes.ClassTy &&
+ "Forward metaclass reference has incorrect type.");
+ GV->setLinkage(llvm::GlobalValue::InternalLinkage);
+ GV->setInitializer(Init);
+ } else {
+ GV = new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ClassTy, false,
+ llvm::GlobalValue::InternalLinkage,
+ Init, Name);
+ }
+ GV->setSection("__OBJC,__meta_class,regular,no_dead_strip");
+ GV->setAlignment(4);
+ CGM.AddUsedGlobal(GV);
+
+ return GV;
+}
+
+llvm::Constant *CGObjCMac::EmitMetaClassRef(const ObjCInterfaceDecl *ID) {
+ std::string Name = "\01L_OBJC_METACLASS_" + ID->getNameAsString();
+
+ // FIXME: Should we look these up somewhere other than the module. Its a bit
+ // silly since we only generate these while processing an implementation, so
+ // exactly one pointer would work if know when we entered/exitted an
+ // implementation block.
+
+ // Check for an existing forward reference.
+ // Previously, metaclass with internal linkage may have been defined.
+ // pass 'true' as 2nd argument so it is returned.
+ if (llvm::GlobalVariable *GV = CGM.getModule().getGlobalVariable(Name,
+ true)) {
+ assert(GV->getType()->getElementType() == ObjCTypes.ClassTy &&
+ "Forward metaclass reference has incorrect type.");
+ return GV;
+ } else {
+ // Generate as an external reference to keep a consistent
+ // module. This will be patched up when we emit the metaclass.
+ return new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ClassTy, false,
+ llvm::GlobalValue::ExternalLinkage,
+ 0,
+ Name);
+ }
+}
+
+llvm::Value *CGObjCMac::EmitSuperClassRef(const ObjCInterfaceDecl *ID) {
+ std::string Name = "\01L_OBJC_CLASS_" + ID->getNameAsString();
+
+ if (llvm::GlobalVariable *GV = CGM.getModule().getGlobalVariable(Name,
+ true)) {
+ assert(GV->getType()->getElementType() == ObjCTypes.ClassTy &&
+ "Forward class metadata reference has incorrect type.");
+ return GV;
+ } else {
+ return new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ClassTy, false,
+ llvm::GlobalValue::ExternalLinkage,
+ 0,
+ Name);
+ }
+}
+
+/*
+ struct objc_class_ext {
+ uint32_t size;
+ const char *weak_ivar_layout;
+ struct _objc_property_list *properties;
+ };
+*/
+llvm::Constant *
+CGObjCMac::EmitClassExtension(const ObjCImplementationDecl *ID) {
+ uint64_t Size =
+ CGM.getTargetData().getTypeAllocSize(ObjCTypes.ClassExtensionTy);
+
+ llvm::Constant *Values[3];
+ Values[0] = llvm::ConstantInt::get(ObjCTypes.IntTy, Size);
+ Values[1] = BuildIvarLayout(ID, false);
+ Values[2] = EmitPropertyList("\01l_OBJC_$_PROP_LIST_" + ID->getName(),
+ ID, ID->getClassInterface(), ObjCTypes);
+
+ // Return null if no extension bits are used.
+ if (Values[1]->isNullValue() && Values[2]->isNullValue())
+ return llvm::Constant::getNullValue(ObjCTypes.ClassExtensionPtrTy);
+
+ llvm::Constant *Init =
+ llvm::ConstantStruct::get(ObjCTypes.ClassExtensionTy, Values);
+ return CreateMetadataVar("\01L_OBJC_CLASSEXT_" + ID->getName(),
+ Init, "__OBJC,__class_ext,regular,no_dead_strip",
+ 4, true);
+}
+
+/*
+ struct objc_ivar {
+ char *ivar_name;
+ char *ivar_type;
+ int ivar_offset;
+ };
+
+ struct objc_ivar_list {
+ int ivar_count;
+ struct objc_ivar list[count];
+ };
+*/
+llvm::Constant *CGObjCMac::EmitIvarList(const ObjCImplementationDecl *ID,
+ bool ForClass) {
+ std::vector<llvm::Constant*> Ivars;
+
+ // When emitting the root class GCC emits ivar entries for the
+ // actual class structure. It is not clear if we need to follow this
+ // behavior; for now lets try and get away with not doing it. If so,
+ // the cleanest solution would be to make up an ObjCInterfaceDecl
+ // for the class.
+ if (ForClass)
+ return llvm::Constant::getNullValue(ObjCTypes.IvarListPtrTy);
+
+ const ObjCInterfaceDecl *OID = ID->getClassInterface();
+
+ for (const ObjCIvarDecl *IVD = OID->all_declared_ivar_begin();
+ IVD; IVD = IVD->getNextIvar()) {
+ // Ignore unnamed bit-fields.
+ if (!IVD->getDeclName())
+ continue;
+ llvm::Constant *Ivar[] = {
+ GetMethodVarName(IVD->getIdentifier()),
+ GetMethodVarType(IVD),
+ llvm::ConstantInt::get(ObjCTypes.IntTy,
+ ComputeIvarBaseOffset(CGM, OID, IVD))
+ };
+ Ivars.push_back(llvm::ConstantStruct::get(ObjCTypes.IvarTy, Ivar));
+ }
+
+ // Return null for empty list.
+ if (Ivars.empty())
+ return llvm::Constant::getNullValue(ObjCTypes.IvarListPtrTy);
+
+ llvm::Constant *Values[2];
+ Values[0] = llvm::ConstantInt::get(ObjCTypes.IntTy, Ivars.size());
+ llvm::ArrayType *AT = llvm::ArrayType::get(ObjCTypes.IvarTy,
+ Ivars.size());
+ Values[1] = llvm::ConstantArray::get(AT, Ivars);
+ llvm::Constant *Init = llvm::ConstantStruct::getAnon(Values);
+
+ llvm::GlobalVariable *GV;
+ if (ForClass)
+ GV = CreateMetadataVar("\01L_OBJC_CLASS_VARIABLES_" + ID->getName(),
+ Init, "__OBJC,__class_vars,regular,no_dead_strip",
+ 4, true);
+ else
+ GV = CreateMetadataVar("\01L_OBJC_INSTANCE_VARIABLES_" + ID->getName(),
+ Init, "__OBJC,__instance_vars,regular,no_dead_strip",
+ 4, true);
+ return llvm::ConstantExpr::getBitCast(GV, ObjCTypes.IvarListPtrTy);
+}
+
+/*
+ struct objc_method {
+ SEL method_name;
+ char *method_types;
+ void *method;
+ };
+
+ struct objc_method_list {
+ struct objc_method_list *obsolete;
+ int count;
+ struct objc_method methods_list[count];
+ };
+*/
+
+/// GetMethodConstant - Return a struct objc_method constant for the
+/// given method if it has been defined. The result is null if the
+/// method has not been defined. The return value has type MethodPtrTy.
+llvm::Constant *CGObjCMac::GetMethodConstant(const ObjCMethodDecl *MD) {
+ llvm::Function *Fn = GetMethodDefinition(MD);
+ if (!Fn)
+ return 0;
+
+ llvm::Constant *Method[] = {
+ llvm::ConstantExpr::getBitCast(GetMethodVarName(MD->getSelector()),
+ ObjCTypes.SelectorPtrTy),
+ GetMethodVarType(MD),
+ llvm::ConstantExpr::getBitCast(Fn, ObjCTypes.Int8PtrTy)
+ };
+ return llvm::ConstantStruct::get(ObjCTypes.MethodTy, Method);
+}
+
+llvm::Constant *CGObjCMac::EmitMethodList(Twine Name,
+ const char *Section,
+ ArrayRef<llvm::Constant*> Methods) {
+ // Return null for empty list.
+ if (Methods.empty())
+ return llvm::Constant::getNullValue(ObjCTypes.MethodListPtrTy);
+
+ llvm::Constant *Values[3];
+ Values[0] = llvm::Constant::getNullValue(ObjCTypes.Int8PtrTy);
+ Values[1] = llvm::ConstantInt::get(ObjCTypes.IntTy, Methods.size());
+ llvm::ArrayType *AT = llvm::ArrayType::get(ObjCTypes.MethodTy,
+ Methods.size());
+ Values[2] = llvm::ConstantArray::get(AT, Methods);
+ llvm::Constant *Init = llvm::ConstantStruct::getAnon(Values);
+
+ llvm::GlobalVariable *GV = CreateMetadataVar(Name, Init, Section, 4, true);
+ return llvm::ConstantExpr::getBitCast(GV, ObjCTypes.MethodListPtrTy);
+}
+
+llvm::Function *CGObjCCommonMac::GenerateMethod(const ObjCMethodDecl *OMD,
+ const ObjCContainerDecl *CD) {
+ SmallString<256> Name;
+ GetNameForMethod(OMD, CD, Name);
+
+ CodeGenTypes &Types = CGM.getTypes();
+ llvm::FunctionType *MethodTy =
+ Types.GetFunctionType(Types.arrangeObjCMethodDeclaration(OMD));
+ llvm::Function *Method =
+ llvm::Function::Create(MethodTy,
+ llvm::GlobalValue::InternalLinkage,
+ Name.str(),
+ &CGM.getModule());
+ MethodDefinitions.insert(std::make_pair(OMD, Method));
+
+ return Method;
+}
+
+llvm::GlobalVariable *
+CGObjCCommonMac::CreateMetadataVar(Twine Name,
+ llvm::Constant *Init,
+ const char *Section,
+ unsigned Align,
+ bool AddToUsed) {
+ llvm::Type *Ty = Init->getType();
+ llvm::GlobalVariable *GV =
+ new llvm::GlobalVariable(CGM.getModule(), Ty, false,
+ llvm::GlobalValue::InternalLinkage, Init, Name);
+ if (Section)
+ GV->setSection(Section);
+ if (Align)
+ GV->setAlignment(Align);
+ if (AddToUsed)
+ CGM.AddUsedGlobal(GV);
+ return GV;
+}
+
+llvm::Function *CGObjCMac::ModuleInitFunction() {
+ // Abuse this interface function as a place to finalize.
+ FinishModule();
+ return NULL;
+}
+
+llvm::Constant *CGObjCMac::GetPropertyGetFunction() {
+ return ObjCTypes.getGetPropertyFn();
+}
+
+llvm::Constant *CGObjCMac::GetPropertySetFunction() {
+ return ObjCTypes.getSetPropertyFn();
+}
+
+llvm::Constant *CGObjCMac::GetOptimizedPropertySetFunction(bool atomic,
+ bool copy) {
+ return ObjCTypes.getOptimizedSetPropertyFn(atomic, copy);
+}
+
+llvm::Constant *CGObjCMac::GetGetStructFunction() {
+ return ObjCTypes.getCopyStructFn();
+}
+llvm::Constant *CGObjCMac::GetSetStructFunction() {
+ return ObjCTypes.getCopyStructFn();
+}
+
+llvm::Constant *CGObjCMac::GetCppAtomicObjectFunction() {
+ return ObjCTypes.getCppAtomicObjectFunction();
+}
+
+llvm::Constant *CGObjCMac::EnumerationMutationFunction() {
+ return ObjCTypes.getEnumerationMutationFn();
+}
+
+void CGObjCMac::EmitTryStmt(CodeGenFunction &CGF, const ObjCAtTryStmt &S) {
+ return EmitTryOrSynchronizedStmt(CGF, S);
+}
+
+void CGObjCMac::EmitSynchronizedStmt(CodeGenFunction &CGF,
+ const ObjCAtSynchronizedStmt &S) {
+ return EmitTryOrSynchronizedStmt(CGF, S);
+}
+
+namespace {
+ struct PerformFragileFinally : EHScopeStack::Cleanup {
+ const Stmt &S;
+ llvm::Value *SyncArgSlot;
+ llvm::Value *CallTryExitVar;
+ llvm::Value *ExceptionData;
+ ObjCTypesHelper &ObjCTypes;
+ PerformFragileFinally(const Stmt *S,
+ llvm::Value *SyncArgSlot,
+ llvm::Value *CallTryExitVar,
+ llvm::Value *ExceptionData,
+ ObjCTypesHelper *ObjCTypes)
+ : S(*S), SyncArgSlot(SyncArgSlot), CallTryExitVar(CallTryExitVar),
+ ExceptionData(ExceptionData), ObjCTypes(*ObjCTypes) {}
+
+ void Emit(CodeGenFunction &CGF, Flags flags) {
+ // Check whether we need to call objc_exception_try_exit.
+ // In optimized code, this branch will always be folded.
+ llvm::BasicBlock *FinallyCallExit =
+ CGF.createBasicBlock("finally.call_exit");
+ llvm::BasicBlock *FinallyNoCallExit =
+ CGF.createBasicBlock("finally.no_call_exit");
+ CGF.Builder.CreateCondBr(CGF.Builder.CreateLoad(CallTryExitVar),
+ FinallyCallExit, FinallyNoCallExit);
+
+ CGF.EmitBlock(FinallyCallExit);
+ CGF.Builder.CreateCall(ObjCTypes.getExceptionTryExitFn(), ExceptionData)
+ ->setDoesNotThrow();
+
+ CGF.EmitBlock(FinallyNoCallExit);
+
+ if (isa<ObjCAtTryStmt>(S)) {
+ if (const ObjCAtFinallyStmt* FinallyStmt =
+ cast<ObjCAtTryStmt>(S).getFinallyStmt()) {
+ // Save the current cleanup destination in case there's
+ // control flow inside the finally statement.
+ llvm::Value *CurCleanupDest =
+ CGF.Builder.CreateLoad(CGF.getNormalCleanupDestSlot());
+
+ CGF.EmitStmt(FinallyStmt->getFinallyBody());
+
+ if (CGF.HaveInsertPoint()) {
+ CGF.Builder.CreateStore(CurCleanupDest,
+ CGF.getNormalCleanupDestSlot());
+ } else {
+ // Currently, the end of the cleanup must always exist.
+ CGF.EnsureInsertPoint();
+ }
+ }
+ } else {
+ // Emit objc_sync_exit(expr); as finally's sole statement for
+ // @synchronized.
+ llvm::Value *SyncArg = CGF.Builder.CreateLoad(SyncArgSlot);
+ CGF.Builder.CreateCall(ObjCTypes.getSyncExitFn(), SyncArg)
+ ->setDoesNotThrow();
+ }
+ }
+ };
+
+ class FragileHazards {
+ CodeGenFunction &CGF;
+ SmallVector<llvm::Value*, 20> Locals;
+ llvm::DenseSet<llvm::BasicBlock*> BlocksBeforeTry;
+
+ llvm::InlineAsm *ReadHazard;
+ llvm::InlineAsm *WriteHazard;
+
+ llvm::FunctionType *GetAsmFnType();
+
+ void collectLocals();
+ void emitReadHazard(CGBuilderTy &Builder);
+
+ public:
+ FragileHazards(CodeGenFunction &CGF);
+
+ void emitWriteHazard();
+ void emitHazardsInNewBlocks();
+ };
+}
+
+/// Create the fragile-ABI read and write hazards based on the current
+/// state of the function, which is presumed to be immediately prior
+/// to a @try block. These hazards are used to maintain correct
+/// semantics in the face of optimization and the fragile ABI's
+/// cavalier use of setjmp/longjmp.
+FragileHazards::FragileHazards(CodeGenFunction &CGF) : CGF(CGF) {
+ collectLocals();
+
+ if (Locals.empty()) return;
+
+ // Collect all the blocks in the function.
+ for (llvm::Function::iterator
+ I = CGF.CurFn->begin(), E = CGF.CurFn->end(); I != E; ++I)
+ BlocksBeforeTry.insert(&*I);
+
+ llvm::FunctionType *AsmFnTy = GetAsmFnType();
+
+ // Create a read hazard for the allocas. This inhibits dead-store
+ // optimizations and forces the values to memory. This hazard is
+ // inserted before any 'throwing' calls in the protected scope to
+ // reflect the possibility that the variables might be read from the
+ // catch block if the call throws.
+ {
+ std::string Constraint;
+ for (unsigned I = 0, E = Locals.size(); I != E; ++I) {
+ if (I) Constraint += ',';
+ Constraint += "*m";
+ }
+
+ ReadHazard = llvm::InlineAsm::get(AsmFnTy, "", Constraint, true, false);
+ }
+
+ // Create a write hazard for the allocas. This inhibits folding
+ // loads across the hazard. This hazard is inserted at the
+ // beginning of the catch path to reflect the possibility that the
+ // variables might have been written within the protected scope.
+ {
+ std::string Constraint;
+ for (unsigned I = 0, E = Locals.size(); I != E; ++I) {
+ if (I) Constraint += ',';
+ Constraint += "=*m";
+ }
+
+ WriteHazard = llvm::InlineAsm::get(AsmFnTy, "", Constraint, true, false);
+ }
+}
+
+/// Emit a write hazard at the current location.
+void FragileHazards::emitWriteHazard() {
+ if (Locals.empty()) return;
+
+ CGF.Builder.CreateCall(WriteHazard, Locals)->setDoesNotThrow();
+}
+
+void FragileHazards::emitReadHazard(CGBuilderTy &Builder) {
+ assert(!Locals.empty());
+ Builder.CreateCall(ReadHazard, Locals)->setDoesNotThrow();
+}
+
+/// Emit read hazards in all the protected blocks, i.e. all the blocks
+/// which have been inserted since the beginning of the try.
+void FragileHazards::emitHazardsInNewBlocks() {
+ if (Locals.empty()) return;
+
+ CGBuilderTy Builder(CGF.getLLVMContext());
+
+ // Iterate through all blocks, skipping those prior to the try.
+ for (llvm::Function::iterator
+ FI = CGF.CurFn->begin(), FE = CGF.CurFn->end(); FI != FE; ++FI) {
+ llvm::BasicBlock &BB = *FI;
+ if (BlocksBeforeTry.count(&BB)) continue;
+
+ // Walk through all the calls in the block.
+ for (llvm::BasicBlock::iterator
+ BI = BB.begin(), BE = BB.end(); BI != BE; ++BI) {
+ llvm::Instruction &I = *BI;
+
+ // Ignore instructions that aren't non-intrinsic calls.
+ // These are the only calls that can possibly call longjmp.
+ if (!isa<llvm::CallInst>(I) && !isa<llvm::InvokeInst>(I)) continue;
+ if (isa<llvm::IntrinsicInst>(I))
+ continue;
+
+ // Ignore call sites marked nounwind. This may be questionable,
+ // since 'nounwind' doesn't necessarily mean 'does not call longjmp'.
+ llvm::CallSite CS(&I);
+ if (CS.doesNotThrow()) continue;
+
+ // Insert a read hazard before the call. This will ensure that
+ // any writes to the locals are performed before making the
+ // call. If the call throws, then this is sufficient to
+ // guarantee correctness as long as it doesn't also write to any
+ // locals.
+ Builder.SetInsertPoint(&BB, BI);
+ emitReadHazard(Builder);
+ }
+ }
+}
+
+static void addIfPresent(llvm::DenseSet<llvm::Value*> &S, llvm::Value *V) {
+ if (V) S.insert(V);
+}
+
+void FragileHazards::collectLocals() {
+ // Compute a set of allocas to ignore.
+ llvm::DenseSet<llvm::Value*> AllocasToIgnore;
+ addIfPresent(AllocasToIgnore, CGF.ReturnValue);
+ addIfPresent(AllocasToIgnore, CGF.NormalCleanupDest);
+
+ // Collect all the allocas currently in the function. This is
+ // probably way too aggressive.
+ llvm::BasicBlock &Entry = CGF.CurFn->getEntryBlock();
+ for (llvm::BasicBlock::iterator
+ I = Entry.begin(), E = Entry.end(); I != E; ++I)
+ if (isa<llvm::AllocaInst>(*I) && !AllocasToIgnore.count(&*I))
+ Locals.push_back(&*I);
+}
+
+llvm::FunctionType *FragileHazards::GetAsmFnType() {
+ SmallVector<llvm::Type *, 16> tys(Locals.size());
+ for (unsigned i = 0, e = Locals.size(); i != e; ++i)
+ tys[i] = Locals[i]->getType();
+ return llvm::FunctionType::get(CGF.VoidTy, tys, false);
+}
+
+/*
+
+ Objective-C setjmp-longjmp (sjlj) Exception Handling
+ --
+
+ A catch buffer is a setjmp buffer plus:
+ - a pointer to the exception that was caught
+ - a pointer to the previous exception data buffer
+ - two pointers of reserved storage
+ Therefore catch buffers form a stack, with a pointer to the top
+ of the stack kept in thread-local storage.
+
+ objc_exception_try_enter pushes a catch buffer onto the EH stack.
+ objc_exception_try_exit pops the given catch buffer, which is
+ required to be the top of the EH stack.
+ objc_exception_throw pops the top of the EH stack, writes the
+ thrown exception into the appropriate field, and longjmps
+ to the setjmp buffer. It crashes the process (with a printf
+ and an abort()) if there are no catch buffers on the stack.
+ objc_exception_extract just reads the exception pointer out of the
+ catch buffer.
+
+ There's no reason an implementation couldn't use a light-weight
+ setjmp here --- something like __builtin_setjmp, but API-compatible
+ with the heavyweight setjmp. This will be more important if we ever
+ want to implement correct ObjC/C++ exception interactions for the
+ fragile ABI.
+
+ Note that for this use of setjmp/longjmp to be correct, we may need
+ to mark some local variables volatile: if a non-volatile local
+ variable is modified between the setjmp and the longjmp, it has
+ indeterminate value. For the purposes of LLVM IR, it may be
+ sufficient to make loads and stores within the @try (to variables
+ declared outside the @try) volatile. This is necessary for
+ optimized correctness, but is not currently being done; this is
+ being tracked as rdar://problem/8160285
+
+ The basic framework for a @try-catch-finally is as follows:
+ {
+ objc_exception_data d;
+ id _rethrow = null;
+ bool _call_try_exit = true;
+
+ objc_exception_try_enter(&d);
+ if (!setjmp(d.jmp_buf)) {
+ ... try body ...
+ } else {
+ // exception path
+ id _caught = objc_exception_extract(&d);
+
+ // enter new try scope for handlers
+ if (!setjmp(d.jmp_buf)) {
+ ... match exception and execute catch blocks ...
+
+ // fell off end, rethrow.
+ _rethrow = _caught;
+ ... jump-through-finally to finally_rethrow ...
+ } else {
+ // exception in catch block
+ _rethrow = objc_exception_extract(&d);
+ _call_try_exit = false;
+ ... jump-through-finally to finally_rethrow ...
+ }
+ }
+ ... jump-through-finally to finally_end ...
+
+ finally:
+ if (_call_try_exit)
+ objc_exception_try_exit(&d);
+
+ ... finally block ....
+ ... dispatch to finally destination ...
+
+ finally_rethrow:
+ objc_exception_throw(_rethrow);
+
+ finally_end:
+ }
+
+ This framework differs slightly from the one gcc uses, in that gcc
+ uses _rethrow to determine if objc_exception_try_exit should be called
+ and if the object should be rethrown. This breaks in the face of
+ throwing nil and introduces unnecessary branches.
+
+ We specialize this framework for a few particular circumstances:
+
+ - If there are no catch blocks, then we avoid emitting the second
+ exception handling context.
+
+ - If there is a catch-all catch block (i.e. @catch(...) or @catch(id
+ e)) we avoid emitting the code to rethrow an uncaught exception.
+
+ - FIXME: If there is no @finally block we can do a few more
+ simplifications.
+
+ Rethrows and Jumps-Through-Finally
+ --
+
+ '@throw;' is supported by pushing the currently-caught exception
+ onto ObjCEHStack while the @catch blocks are emitted.
+
+ Branches through the @finally block are handled with an ordinary
+ normal cleanup. We do not register an EH cleanup; fragile-ABI ObjC
+ exceptions are not compatible with C++ exceptions, and this is
+ hardly the only place where this will go wrong.
+
+ @synchronized(expr) { stmt; } is emitted as if it were:
+ id synch_value = expr;
+ objc_sync_enter(synch_value);
+ @try { stmt; } @finally { objc_sync_exit(synch_value); }
+*/
+
+void CGObjCMac::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
+ const Stmt &S) {
+ bool isTry = isa<ObjCAtTryStmt>(S);
+
+ // A destination for the fall-through edges of the catch handlers to
+ // jump to.
+ CodeGenFunction::JumpDest FinallyEnd =
+ CGF.getJumpDestInCurrentScope("finally.end");
+
+ // A destination for the rethrow edge of the catch handlers to jump
+ // to.
+ CodeGenFunction::JumpDest FinallyRethrow =
+ CGF.getJumpDestInCurrentScope("finally.rethrow");
+
+ // For @synchronized, call objc_sync_enter(sync.expr). The
+ // evaluation of the expression must occur before we enter the
+ // @synchronized. We can't avoid a temp here because we need the
+ // value to be preserved. If the backend ever does liveness
+ // correctly after setjmp, this will be unnecessary.
+ llvm::Value *SyncArgSlot = 0;
+ if (!isTry) {
+ llvm::Value *SyncArg =
+ CGF.EmitScalarExpr(cast<ObjCAtSynchronizedStmt>(S).getSynchExpr());
+ SyncArg = CGF.Builder.CreateBitCast(SyncArg, ObjCTypes.ObjectPtrTy);
+ CGF.Builder.CreateCall(ObjCTypes.getSyncEnterFn(), SyncArg)
+ ->setDoesNotThrow();
+
+ SyncArgSlot = CGF.CreateTempAlloca(SyncArg->getType(), "sync.arg");
+ CGF.Builder.CreateStore(SyncArg, SyncArgSlot);
+ }
+
+ // Allocate memory for the setjmp buffer. This needs to be kept
+ // live throughout the try and catch blocks.
+ llvm::Value *ExceptionData = CGF.CreateTempAlloca(ObjCTypes.ExceptionDataTy,
+ "exceptiondata.ptr");
+
+ // Create the fragile hazards. Note that this will not capture any
+ // of the allocas required for exception processing, but will
+ // capture the current basic block (which extends all the way to the
+ // setjmp call) as "before the @try".
+ FragileHazards Hazards(CGF);
+
+ // Create a flag indicating whether the cleanup needs to call
+ // objc_exception_try_exit. This is true except when
+ // - no catches match and we're branching through the cleanup
+ // just to rethrow the exception, or
+ // - a catch matched and we're falling out of the catch handler.
+ // The setjmp-safety rule here is that we should always store to this
+ // variable in a place that dominates the branch through the cleanup
+ // without passing through any setjmps.
+ llvm::Value *CallTryExitVar = CGF.CreateTempAlloca(CGF.Builder.getInt1Ty(),
+ "_call_try_exit");
+
+ // A slot containing the exception to rethrow. Only needed when we
+ // have both a @catch and a @finally.
+ llvm::Value *PropagatingExnVar = 0;
+
+ // Push a normal cleanup to leave the try scope.
+ CGF.EHStack.pushCleanup<PerformFragileFinally>(NormalCleanup, &S,
+ SyncArgSlot,
+ CallTryExitVar,
+ ExceptionData,
+ &ObjCTypes);
+
+ // Enter a try block:
+ // - Call objc_exception_try_enter to push ExceptionData on top of
+ // the EH stack.
+ CGF.Builder.CreateCall(ObjCTypes.getExceptionTryEnterFn(), ExceptionData)
+ ->setDoesNotThrow();
+
+ // - Call setjmp on the exception data buffer.
+ llvm::Constant *Zero = llvm::ConstantInt::get(CGF.Builder.getInt32Ty(), 0);
+ llvm::Value *GEPIndexes[] = { Zero, Zero, Zero };
+ llvm::Value *SetJmpBuffer =
+ CGF.Builder.CreateGEP(ExceptionData, GEPIndexes, "setjmp_buffer");
+ llvm::CallInst *SetJmpResult =
+ CGF.Builder.CreateCall(ObjCTypes.getSetJmpFn(), SetJmpBuffer, "setjmp_result");
+ SetJmpResult->setDoesNotThrow();
+ SetJmpResult->setCanReturnTwice();
+
+ // If setjmp returned 0, enter the protected block; otherwise,
+ // branch to the handler.
+ llvm::BasicBlock *TryBlock = CGF.createBasicBlock("try");
+ llvm::BasicBlock *TryHandler = CGF.createBasicBlock("try.handler");
+ llvm::Value *DidCatch =
+ CGF.Builder.CreateIsNotNull(SetJmpResult, "did_catch_exception");
+ CGF.Builder.CreateCondBr(DidCatch, TryHandler, TryBlock);
+
+ // Emit the protected block.
+ CGF.EmitBlock(TryBlock);
+ CGF.Builder.CreateStore(CGF.Builder.getTrue(), CallTryExitVar);
+ CGF.EmitStmt(isTry ? cast<ObjCAtTryStmt>(S).getTryBody()
+ : cast<ObjCAtSynchronizedStmt>(S).getSynchBody());
+
+ CGBuilderTy::InsertPoint TryFallthroughIP = CGF.Builder.saveAndClearIP();
+
+ // Emit the exception handler block.
+ CGF.EmitBlock(TryHandler);
+
+ // Don't optimize loads of the in-scope locals across this point.
+ Hazards.emitWriteHazard();
+
+ // For a @synchronized (or a @try with no catches), just branch
+ // through the cleanup to the rethrow block.
+ if (!isTry || !cast<ObjCAtTryStmt>(S).getNumCatchStmts()) {
+ // Tell the cleanup not to re-pop the exit.
+ CGF.Builder.CreateStore(CGF.Builder.getFalse(), CallTryExitVar);
+ CGF.EmitBranchThroughCleanup(FinallyRethrow);
+
+ // Otherwise, we have to match against the caught exceptions.
+ } else {
+ // Retrieve the exception object. We may emit multiple blocks but
+ // nothing can cross this so the value is already in SSA form.
+ llvm::CallInst *Caught =
+ CGF.Builder.CreateCall(ObjCTypes.getExceptionExtractFn(),
+ ExceptionData, "caught");
+ Caught->setDoesNotThrow();
+
+ // Push the exception to rethrow onto the EH value stack for the
+ // benefit of any @throws in the handlers.
+ CGF.ObjCEHValueStack.push_back(Caught);
+
+ const ObjCAtTryStmt* AtTryStmt = cast<ObjCAtTryStmt>(&S);
+
+ bool HasFinally = (AtTryStmt->getFinallyStmt() != 0);
+
+ llvm::BasicBlock *CatchBlock = 0;
+ llvm::BasicBlock *CatchHandler = 0;
+ if (HasFinally) {
+ // Save the currently-propagating exception before
+ // objc_exception_try_enter clears the exception slot.
+ PropagatingExnVar = CGF.CreateTempAlloca(Caught->getType(),
+ "propagating_exception");
+ CGF.Builder.CreateStore(Caught, PropagatingExnVar);
+
+ // Enter a new exception try block (in case a @catch block
+ // throws an exception).
+ CGF.Builder.CreateCall(ObjCTypes.getExceptionTryEnterFn(), ExceptionData)
+ ->setDoesNotThrow();
+
+ llvm::CallInst *SetJmpResult =
+ CGF.Builder.CreateCall(ObjCTypes.getSetJmpFn(), SetJmpBuffer,
+ "setjmp.result");
+ SetJmpResult->setDoesNotThrow();
+ SetJmpResult->setCanReturnTwice();
+
+ llvm::Value *Threw =
+ CGF.Builder.CreateIsNotNull(SetJmpResult, "did_catch_exception");
+
+ CatchBlock = CGF.createBasicBlock("catch");
+ CatchHandler = CGF.createBasicBlock("catch_for_catch");
+ CGF.Builder.CreateCondBr(Threw, CatchHandler, CatchBlock);
+
+ CGF.EmitBlock(CatchBlock);
+ }
+
+ CGF.Builder.CreateStore(CGF.Builder.getInt1(HasFinally), CallTryExitVar);
+
+ // Handle catch list. As a special case we check if everything is
+ // matched and avoid generating code for falling off the end if
+ // so.
+ bool AllMatched = false;
+ for (unsigned I = 0, N = AtTryStmt->getNumCatchStmts(); I != N; ++I) {
+ const ObjCAtCatchStmt *CatchStmt = AtTryStmt->getCatchStmt(I);
+
+ const VarDecl *CatchParam = CatchStmt->getCatchParamDecl();
+ const ObjCObjectPointerType *OPT = 0;
+
+ // catch(...) always matches.
+ if (!CatchParam) {
+ AllMatched = true;
+ } else {
+ OPT = CatchParam->getType()->getAs<ObjCObjectPointerType>();
+
+ // catch(id e) always matches under this ABI, since only
+ // ObjC exceptions end up here in the first place.
+ // FIXME: For the time being we also match id<X>; this should
+ // be rejected by Sema instead.
+ if (OPT && (OPT->isObjCIdType() || OPT->isObjCQualifiedIdType()))
+ AllMatched = true;
+ }
+
+ // If this is a catch-all, we don't need to test anything.
+ if (AllMatched) {
+ CodeGenFunction::RunCleanupsScope CatchVarCleanups(CGF);
+
+ if (CatchParam) {
+ CGF.EmitAutoVarDecl(*CatchParam);
+ assert(CGF.HaveInsertPoint() && "DeclStmt destroyed insert point?");
+
+ // These types work out because ConvertType(id) == i8*.
+ CGF.Builder.CreateStore(Caught, CGF.GetAddrOfLocalVar(CatchParam));
+ }
+
+ CGF.EmitStmt(CatchStmt->getCatchBody());
+
+ // The scope of the catch variable ends right here.
+ CatchVarCleanups.ForceCleanup();
+
+ CGF.EmitBranchThroughCleanup(FinallyEnd);
+ break;
+ }
+
+ assert(OPT && "Unexpected non-object pointer type in @catch");
+ const ObjCObjectType *ObjTy = OPT->getObjectType();
+
+ // FIXME: @catch (Class c) ?
+ ObjCInterfaceDecl *IDecl = ObjTy->getInterface();
+ assert(IDecl && "Catch parameter must have Objective-C type!");
+
+ // Check if the @catch block matches the exception object.
+ llvm::Value *Class = EmitClassRef(CGF.Builder, IDecl);
+
+ llvm::CallInst *Match =
+ CGF.Builder.CreateCall2(ObjCTypes.getExceptionMatchFn(),
+ Class, Caught, "match");
+ Match->setDoesNotThrow();
+
+ llvm::BasicBlock *MatchedBlock = CGF.createBasicBlock("match");
+ llvm::BasicBlock *NextCatchBlock = CGF.createBasicBlock("catch.next");
+
+ CGF.Builder.CreateCondBr(CGF.Builder.CreateIsNotNull(Match, "matched"),
+ MatchedBlock, NextCatchBlock);
+
+ // Emit the @catch block.
+ CGF.EmitBlock(MatchedBlock);
+
+ // Collect any cleanups for the catch variable. The scope lasts until
+ // the end of the catch body.
+ CodeGenFunction::RunCleanupsScope CatchVarCleanups(CGF);
+
+ CGF.EmitAutoVarDecl(*CatchParam);
+ assert(CGF.HaveInsertPoint() && "DeclStmt destroyed insert point?");
+
+ // Initialize the catch variable.
+ llvm::Value *Tmp =
+ CGF.Builder.CreateBitCast(Caught,
+ CGF.ConvertType(CatchParam->getType()));
+ CGF.Builder.CreateStore(Tmp, CGF.GetAddrOfLocalVar(CatchParam));
+
+ CGF.EmitStmt(CatchStmt->getCatchBody());
+
+ // We're done with the catch variable.
+ CatchVarCleanups.ForceCleanup();
+
+ CGF.EmitBranchThroughCleanup(FinallyEnd);
+
+ CGF.EmitBlock(NextCatchBlock);
+ }
+
+ CGF.ObjCEHValueStack.pop_back();
+
+ // If nothing wanted anything to do with the caught exception,
+ // kill the extract call.
+ if (Caught->use_empty())
+ Caught->eraseFromParent();
+
+ if (!AllMatched)
+ CGF.EmitBranchThroughCleanup(FinallyRethrow);
+
+ if (HasFinally) {
+ // Emit the exception handler for the @catch blocks.
+ CGF.EmitBlock(CatchHandler);
+
+ // In theory we might now need a write hazard, but actually it's
+ // unnecessary because there's no local-accessing code between
+ // the try's write hazard and here.
+ //Hazards.emitWriteHazard();
+
+ // Extract the new exception and save it to the
+ // propagating-exception slot.
+ assert(PropagatingExnVar);
+ llvm::CallInst *NewCaught =
+ CGF.Builder.CreateCall(ObjCTypes.getExceptionExtractFn(),
+ ExceptionData, "caught");
+ NewCaught->setDoesNotThrow();
+ CGF.Builder.CreateStore(NewCaught, PropagatingExnVar);
+
+ // Don't pop the catch handler; the throw already did.
+ CGF.Builder.CreateStore(CGF.Builder.getFalse(), CallTryExitVar);
+ CGF.EmitBranchThroughCleanup(FinallyRethrow);
+ }
+ }
+
+ // Insert read hazards as required in the new blocks.
+ Hazards.emitHazardsInNewBlocks();
+
+ // Pop the cleanup.
+ CGF.Builder.restoreIP(TryFallthroughIP);
+ if (CGF.HaveInsertPoint())
+ CGF.Builder.CreateStore(CGF.Builder.getTrue(), CallTryExitVar);
+ CGF.PopCleanupBlock();
+ CGF.EmitBlock(FinallyEnd.getBlock(), true);
+
+ // Emit the rethrow block.
+ CGBuilderTy::InsertPoint SavedIP = CGF.Builder.saveAndClearIP();
+ CGF.EmitBlock(FinallyRethrow.getBlock(), true);
+ if (CGF.HaveInsertPoint()) {
+ // If we have a propagating-exception variable, check it.
+ llvm::Value *PropagatingExn;
+ if (PropagatingExnVar) {
+ PropagatingExn = CGF.Builder.CreateLoad(PropagatingExnVar);
+
+ // Otherwise, just look in the buffer for the exception to throw.
+ } else {
+ llvm::CallInst *Caught =
+ CGF.Builder.CreateCall(ObjCTypes.getExceptionExtractFn(),
+ ExceptionData);
+ Caught->setDoesNotThrow();
+ PropagatingExn = Caught;
+ }
+
+ CGF.Builder.CreateCall(ObjCTypes.getExceptionThrowFn(), PropagatingExn)
+ ->setDoesNotThrow();
+ CGF.Builder.CreateUnreachable();
+ }
+
+ CGF.Builder.restoreIP(SavedIP);
+}
+
+void CGObjCMac::EmitThrowStmt(CodeGen::CodeGenFunction &CGF,
+ const ObjCAtThrowStmt &S) {
+ llvm::Value *ExceptionAsObject;
+
+ if (const Expr *ThrowExpr = S.getThrowExpr()) {
+ llvm::Value *Exception = CGF.EmitObjCThrowOperand(ThrowExpr);
+ ExceptionAsObject =
+ CGF.Builder.CreateBitCast(Exception, ObjCTypes.ObjectPtrTy);
+ } else {
+ assert((!CGF.ObjCEHValueStack.empty() && CGF.ObjCEHValueStack.back()) &&
+ "Unexpected rethrow outside @catch block.");
+ ExceptionAsObject = CGF.ObjCEHValueStack.back();
+ }
+
+ CGF.Builder.CreateCall(ObjCTypes.getExceptionThrowFn(), ExceptionAsObject)
+ ->setDoesNotReturn();
+ CGF.Builder.CreateUnreachable();
+
+ // Clear the insertion point to indicate we are in unreachable code.
+ CGF.Builder.ClearInsertionPoint();
+}
+
+/// EmitObjCWeakRead - Code gen for loading value of a __weak
+/// object: objc_read_weak (id *src)
+///
+llvm::Value * CGObjCMac::EmitObjCWeakRead(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *AddrWeakObj) {
+ llvm::Type* DestTy =
+ cast<llvm::PointerType>(AddrWeakObj->getType())->getElementType();
+ AddrWeakObj = CGF.Builder.CreateBitCast(AddrWeakObj,
+ ObjCTypes.PtrObjectPtrTy);
+ llvm::Value *read_weak = CGF.Builder.CreateCall(ObjCTypes.getGcReadWeakFn(),
+ AddrWeakObj, "weakread");
+ read_weak = CGF.Builder.CreateBitCast(read_weak, DestTy);
+ return read_weak;
+}
+
+/// EmitObjCWeakAssign - Code gen for assigning to a __weak object.
+/// objc_assign_weak (id src, id *dst)
+///
+void CGObjCMac::EmitObjCWeakAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dst) {
+ llvm::Type * SrcTy = src->getType();
+ if (!isa<llvm::PointerType>(SrcTy)) {
+ unsigned Size = CGM.getTargetData().getTypeAllocSize(SrcTy);
+ assert(Size <= 8 && "does not support size > 8");
+ src = (Size == 4) ? CGF.Builder.CreateBitCast(src, ObjCTypes.IntTy)
+ : CGF.Builder.CreateBitCast(src, ObjCTypes.LongLongTy);
+ src = CGF.Builder.CreateIntToPtr(src, ObjCTypes.Int8PtrTy);
+ }
+ src = CGF.Builder.CreateBitCast(src, ObjCTypes.ObjectPtrTy);
+ dst = CGF.Builder.CreateBitCast(dst, ObjCTypes.PtrObjectPtrTy);
+ CGF.Builder.CreateCall2(ObjCTypes.getGcAssignWeakFn(),
+ src, dst, "weakassign");
+ return;
+}
+
+/// EmitObjCGlobalAssign - Code gen for assigning to a __strong object.
+/// objc_assign_global (id src, id *dst)
+///
+void CGObjCMac::EmitObjCGlobalAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dst,
+ bool threadlocal) {
+ llvm::Type * SrcTy = src->getType();
+ if (!isa<llvm::PointerType>(SrcTy)) {
+ unsigned Size = CGM.getTargetData().getTypeAllocSize(SrcTy);
+ assert(Size <= 8 && "does not support size > 8");
+ src = (Size == 4) ? CGF.Builder.CreateBitCast(src, ObjCTypes.IntTy)
+ : CGF.Builder.CreateBitCast(src, ObjCTypes.LongLongTy);
+ src = CGF.Builder.CreateIntToPtr(src, ObjCTypes.Int8PtrTy);
+ }
+ src = CGF.Builder.CreateBitCast(src, ObjCTypes.ObjectPtrTy);
+ dst = CGF.Builder.CreateBitCast(dst, ObjCTypes.PtrObjectPtrTy);
+ if (!threadlocal)
+ CGF.Builder.CreateCall2(ObjCTypes.getGcAssignGlobalFn(),
+ src, dst, "globalassign");
+ else
+ CGF.Builder.CreateCall2(ObjCTypes.getGcAssignThreadLocalFn(),
+ src, dst, "threadlocalassign");
+ return;
+}
+
+/// EmitObjCIvarAssign - Code gen for assigning to a __strong object.
+/// objc_assign_ivar (id src, id *dst, ptrdiff_t ivaroffset)
+///
+void CGObjCMac::EmitObjCIvarAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dst,
+ llvm::Value *ivarOffset) {
+ assert(ivarOffset && "EmitObjCIvarAssign - ivarOffset is NULL");
+ llvm::Type * SrcTy = src->getType();
+ if (!isa<llvm::PointerType>(SrcTy)) {
+ unsigned Size = CGM.getTargetData().getTypeAllocSize(SrcTy);
+ assert(Size <= 8 && "does not support size > 8");
+ src = (Size == 4) ? CGF.Builder.CreateBitCast(src, ObjCTypes.IntTy)
+ : CGF.Builder.CreateBitCast(src, ObjCTypes.LongLongTy);
+ src = CGF.Builder.CreateIntToPtr(src, ObjCTypes.Int8PtrTy);
+ }
+ src = CGF.Builder.CreateBitCast(src, ObjCTypes.ObjectPtrTy);
+ dst = CGF.Builder.CreateBitCast(dst, ObjCTypes.PtrObjectPtrTy);
+ CGF.Builder.CreateCall3(ObjCTypes.getGcAssignIvarFn(),
+ src, dst, ivarOffset);
+ return;
+}
+
+/// EmitObjCStrongCastAssign - Code gen for assigning to a __strong cast object.
+/// objc_assign_strongCast (id src, id *dst)
+///
+void CGObjCMac::EmitObjCStrongCastAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dst) {
+ llvm::Type * SrcTy = src->getType();
+ if (!isa<llvm::PointerType>(SrcTy)) {
+ unsigned Size = CGM.getTargetData().getTypeAllocSize(SrcTy);
+ assert(Size <= 8 && "does not support size > 8");
+ src = (Size == 4) ? CGF.Builder.CreateBitCast(src, ObjCTypes.IntTy)
+ : CGF.Builder.CreateBitCast(src, ObjCTypes.LongLongTy);
+ src = CGF.Builder.CreateIntToPtr(src, ObjCTypes.Int8PtrTy);
+ }
+ src = CGF.Builder.CreateBitCast(src, ObjCTypes.ObjectPtrTy);
+ dst = CGF.Builder.CreateBitCast(dst, ObjCTypes.PtrObjectPtrTy);
+ CGF.Builder.CreateCall2(ObjCTypes.getGcAssignStrongCastFn(),
+ src, dst, "weakassign");
+ return;
+}
+
+void CGObjCMac::EmitGCMemmoveCollectable(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *DestPtr,
+ llvm::Value *SrcPtr,
+ llvm::Value *size) {
+ SrcPtr = CGF.Builder.CreateBitCast(SrcPtr, ObjCTypes.Int8PtrTy);
+ DestPtr = CGF.Builder.CreateBitCast(DestPtr, ObjCTypes.Int8PtrTy);
+ CGF.Builder.CreateCall3(ObjCTypes.GcMemmoveCollectableFn(),
+ DestPtr, SrcPtr, size);
+ return;
+}
+
+/// EmitObjCValueForIvar - Code Gen for ivar reference.
+///
+LValue CGObjCMac::EmitObjCValueForIvar(CodeGen::CodeGenFunction &CGF,
+ QualType ObjectTy,
+ llvm::Value *BaseValue,
+ const ObjCIvarDecl *Ivar,
+ unsigned CVRQualifiers) {
+ const ObjCInterfaceDecl *ID =
+ ObjectTy->getAs<ObjCObjectType>()->getInterface();
+ return EmitValueForIvarAtOffset(CGF, ID, BaseValue, Ivar, CVRQualifiers,
+ EmitIvarOffset(CGF, ID, Ivar));
+}
+
+llvm::Value *CGObjCMac::EmitIvarOffset(CodeGen::CodeGenFunction &CGF,
+ const ObjCInterfaceDecl *Interface,
+ const ObjCIvarDecl *Ivar) {
+ uint64_t Offset = ComputeIvarBaseOffset(CGM, Interface, Ivar);
+ return llvm::ConstantInt::get(
+ CGM.getTypes().ConvertType(CGM.getContext().LongTy),
+ Offset);
+}
+
+/* *** Private Interface *** */
+
+/// EmitImageInfo - Emit the image info marker used to encode some module
+/// level information.
+///
+/// See: <rdr://4810609&4810587&4810587>
+/// struct IMAGE_INFO {
+/// unsigned version;
+/// unsigned flags;
+/// };
+enum ImageInfoFlags {
+ eImageInfo_FixAndContinue = (1 << 0),
+ eImageInfo_GarbageCollected = (1 << 1),
+ eImageInfo_GCOnly = (1 << 2),
+ eImageInfo_OptimizedByDyld = (1 << 3), // FIXME: When is this set.
+
+ // A flag indicating that the module has no instances of a @synthesize of a
+ // superclass variable. <rdar://problem/6803242>
+ eImageInfo_CorrectedSynthesize = (1 << 4)
+};
+
+void CGObjCCommonMac::EmitImageInfo() {
+ unsigned version = 0; // Version is unused?
+ const char *Section = (ObjCABI == 1) ?
+ "__OBJC, __image_info,regular" :
+ "__DATA, __objc_imageinfo, regular, no_dead_strip";
+
+ // Generate module-level named metadata to convey this information to the
+ // linker and code-gen.
+ llvm::Module &Mod = CGM.getModule();
+
+ // Add the ObjC ABI version to the module flags.
+ Mod.addModuleFlag(llvm::Module::Error, "Objective-C Version", ObjCABI);
+ Mod.addModuleFlag(llvm::Module::Error, "Objective-C Image Info Version",
+ version);
+ Mod.addModuleFlag(llvm::Module::Error, "Objective-C Image Info Section",
+ llvm::MDString::get(VMContext,Section));
+
+ if (CGM.getLangOpts().getGC() == LangOptions::NonGC) {
+ // Non-GC overrides those files which specify GC.
+ Mod.addModuleFlag(llvm::Module::Override,
+ "Objective-C Garbage Collection", (uint32_t)0);
+ } else {
+ // Add the ObjC garbage collection value.
+ Mod.addModuleFlag(llvm::Module::Error,
+ "Objective-C Garbage Collection",
+ eImageInfo_GarbageCollected);
+
+ if (CGM.getLangOpts().getGC() == LangOptions::GCOnly) {
+ // Add the ObjC GC Only value.
+ Mod.addModuleFlag(llvm::Module::Error, "Objective-C GC Only",
+ eImageInfo_GCOnly);
+
+ // Require that GC be specified and set to eImageInfo_GarbageCollected.
+ llvm::Value *Ops[2] = {
+ llvm::MDString::get(VMContext, "Objective-C Garbage Collection"),
+ llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext),
+ eImageInfo_GarbageCollected)
+ };
+ Mod.addModuleFlag(llvm::Module::Require, "Objective-C GC Only",
+ llvm::MDNode::get(VMContext, Ops));
+ }
+ }
+}
+
+// struct objc_module {
+// unsigned long version;
+// unsigned long size;
+// const char *name;
+// Symtab symtab;
+// };
+
+// FIXME: Get from somewhere
+static const int ModuleVersion = 7;
+
+void CGObjCMac::EmitModuleInfo() {
+ uint64_t Size = CGM.getTargetData().getTypeAllocSize(ObjCTypes.ModuleTy);
+
+ llvm::Constant *Values[] = {
+ llvm::ConstantInt::get(ObjCTypes.LongTy, ModuleVersion),
+ llvm::ConstantInt::get(ObjCTypes.LongTy, Size),
+ // This used to be the filename, now it is unused. <rdr://4327263>
+ GetClassName(&CGM.getContext().Idents.get("")),
+ EmitModuleSymbols()
+ };
+ CreateMetadataVar("\01L_OBJC_MODULES",
+ llvm::ConstantStruct::get(ObjCTypes.ModuleTy, Values),
+ "__OBJC,__module_info,regular,no_dead_strip",
+ 4, true);
+}
+
+llvm::Constant *CGObjCMac::EmitModuleSymbols() {
+ unsigned NumClasses = DefinedClasses.size();
+ unsigned NumCategories = DefinedCategories.size();
+
+ // Return null if no symbols were defined.
+ if (!NumClasses && !NumCategories)
+ return llvm::Constant::getNullValue(ObjCTypes.SymtabPtrTy);
+
+ llvm::Constant *Values[5];
+ Values[0] = llvm::ConstantInt::get(ObjCTypes.LongTy, 0);
+ Values[1] = llvm::Constant::getNullValue(ObjCTypes.SelectorPtrTy);
+ Values[2] = llvm::ConstantInt::get(ObjCTypes.ShortTy, NumClasses);
+ Values[3] = llvm::ConstantInt::get(ObjCTypes.ShortTy, NumCategories);
+
+ // The runtime expects exactly the list of defined classes followed
+ // by the list of defined categories, in a single array.
+ SmallVector<llvm::Constant*, 8> Symbols(NumClasses + NumCategories);
+ for (unsigned i=0; i<NumClasses; i++)
+ Symbols[i] = llvm::ConstantExpr::getBitCast(DefinedClasses[i],
+ ObjCTypes.Int8PtrTy);
+ for (unsigned i=0; i<NumCategories; i++)
+ Symbols[NumClasses + i] =
+ llvm::ConstantExpr::getBitCast(DefinedCategories[i],
+ ObjCTypes.Int8PtrTy);
+
+ Values[4] =
+ llvm::ConstantArray::get(llvm::ArrayType::get(ObjCTypes.Int8PtrTy,
+ Symbols.size()),
+ Symbols);
+
+ llvm::Constant *Init = llvm::ConstantStruct::getAnon(Values);
+
+ llvm::GlobalVariable *GV =
+ CreateMetadataVar("\01L_OBJC_SYMBOLS", Init,
+ "__OBJC,__symbols,regular,no_dead_strip",
+ 4, true);
+ return llvm::ConstantExpr::getBitCast(GV, ObjCTypes.SymtabPtrTy);
+}
+
+llvm::Value *CGObjCMac::EmitClassRefFromId(CGBuilderTy &Builder,
+ IdentifierInfo *II) {
+ LazySymbols.insert(II);
+
+ llvm::GlobalVariable *&Entry = ClassReferences[II];
+
+ if (!Entry) {
+ llvm::Constant *Casted =
+ llvm::ConstantExpr::getBitCast(GetClassName(II),
+ ObjCTypes.ClassPtrTy);
+ Entry =
+ CreateMetadataVar("\01L_OBJC_CLASS_REFERENCES_", Casted,
+ "__OBJC,__cls_refs,literal_pointers,no_dead_strip",
+ 4, true);
+ }
+
+ return Builder.CreateLoad(Entry);
+}
+
+llvm::Value *CGObjCMac::EmitClassRef(CGBuilderTy &Builder,
+ const ObjCInterfaceDecl *ID) {
+ return EmitClassRefFromId(Builder, ID->getIdentifier());
+}
+
+llvm::Value *CGObjCMac::EmitNSAutoreleasePoolClassRef(CGBuilderTy &Builder) {
+ IdentifierInfo *II = &CGM.getContext().Idents.get("NSAutoreleasePool");
+ return EmitClassRefFromId(Builder, II);
+}
+
+llvm::Value *CGObjCMac::EmitSelector(CGBuilderTy &Builder, Selector Sel,
+ bool lvalue) {
+ llvm::GlobalVariable *&Entry = SelectorReferences[Sel];
+
+ if (!Entry) {
+ llvm::Constant *Casted =
+ llvm::ConstantExpr::getBitCast(GetMethodVarName(Sel),
+ ObjCTypes.SelectorPtrTy);
+ Entry =
+ CreateMetadataVar("\01L_OBJC_SELECTOR_REFERENCES_", Casted,
+ "__OBJC,__message_refs,literal_pointers,no_dead_strip",
+ 4, true);
+ }
+
+ if (lvalue)
+ return Entry;
+ return Builder.CreateLoad(Entry);
+}
+
+llvm::Constant *CGObjCCommonMac::GetClassName(IdentifierInfo *Ident) {
+ llvm::GlobalVariable *&Entry = ClassNames[Ident];
+
+ if (!Entry)
+ Entry = CreateMetadataVar("\01L_OBJC_CLASS_NAME_",
+ llvm::ConstantDataArray::getString(VMContext,
+ Ident->getNameStart()),
+ ((ObjCABI == 2) ?
+ "__TEXT,__objc_classname,cstring_literals" :
+ "__TEXT,__cstring,cstring_literals"),
+ 1, true);
+
+ return getConstantGEP(VMContext, Entry, 0, 0);
+}
+
+llvm::Function *CGObjCCommonMac::GetMethodDefinition(const ObjCMethodDecl *MD) {
+ llvm::DenseMap<const ObjCMethodDecl*, llvm::Function*>::iterator
+ I = MethodDefinitions.find(MD);
+ if (I != MethodDefinitions.end())
+ return I->second;
+
+ return NULL;
+}
+
+/// GetIvarLayoutName - Returns a unique constant for the given
+/// ivar layout bitmap.
+llvm::Constant *CGObjCCommonMac::GetIvarLayoutName(IdentifierInfo *Ident,
+ const ObjCCommonTypesHelper &ObjCTypes) {
+ return llvm::Constant::getNullValue(ObjCTypes.Int8PtrTy);
+}
+
+void CGObjCCommonMac::BuildAggrIvarRecordLayout(const RecordType *RT,
+ unsigned int BytePos,
+ bool ForStrongLayout,
+ bool &HasUnion) {
+ const RecordDecl *RD = RT->getDecl();
+ // FIXME - Use iterator.
+ SmallVector<const FieldDecl*, 16> Fields(RD->field_begin(), RD->field_end());
+ llvm::Type *Ty = CGM.getTypes().ConvertType(QualType(RT, 0));
+ const llvm::StructLayout *RecLayout =
+ CGM.getTargetData().getStructLayout(cast<llvm::StructType>(Ty));
+
+ BuildAggrIvarLayout(0, RecLayout, RD, Fields, BytePos,
+ ForStrongLayout, HasUnion);
+}
+
+void CGObjCCommonMac::BuildAggrIvarLayout(const ObjCImplementationDecl *OI,
+ const llvm::StructLayout *Layout,
+ const RecordDecl *RD,
+ ArrayRef<const FieldDecl*> RecFields,
+ unsigned int BytePos, bool ForStrongLayout,
+ bool &HasUnion) {
+ bool IsUnion = (RD && RD->isUnion());
+ uint64_t MaxUnionIvarSize = 0;
+ uint64_t MaxSkippedUnionIvarSize = 0;
+ const FieldDecl *MaxField = 0;
+ const FieldDecl *MaxSkippedField = 0;
+ const FieldDecl *LastFieldBitfieldOrUnnamed = 0;
+ uint64_t MaxFieldOffset = 0;
+ uint64_t MaxSkippedFieldOffset = 0;
+ uint64_t LastBitfieldOrUnnamedOffset = 0;
+ uint64_t FirstFieldDelta = 0;
+
+ if (RecFields.empty())
+ return;
+ unsigned WordSizeInBits = CGM.getContext().getTargetInfo().getPointerWidth(0);
+ unsigned ByteSizeInBits = CGM.getContext().getTargetInfo().getCharWidth();
+ if (!RD && CGM.getLangOpts().ObjCAutoRefCount) {
+ const FieldDecl *FirstField = RecFields[0];
+ FirstFieldDelta =
+ ComputeIvarBaseOffset(CGM, OI, cast<ObjCIvarDecl>(FirstField));
+ }
+
+ for (unsigned i = 0, e = RecFields.size(); i != e; ++i) {
+ const FieldDecl *Field = RecFields[i];
+ uint64_t FieldOffset;
+ if (RD) {
+ // Note that 'i' here is actually the field index inside RD of Field,
+ // although this dependency is hidden.
+ const ASTRecordLayout &RL = CGM.getContext().getASTRecordLayout(RD);
+ FieldOffset = (RL.getFieldOffset(i) / ByteSizeInBits) - FirstFieldDelta;
+ } else
+ FieldOffset =
+ ComputeIvarBaseOffset(CGM, OI, cast<ObjCIvarDecl>(Field)) - FirstFieldDelta;
+
+ // Skip over unnamed or bitfields
+ if (!Field->getIdentifier() || Field->isBitField()) {
+ LastFieldBitfieldOrUnnamed = Field;
+ LastBitfieldOrUnnamedOffset = FieldOffset;
+ continue;
+ }
+
+ LastFieldBitfieldOrUnnamed = 0;
+ QualType FQT = Field->getType();
+ if (FQT->isRecordType() || FQT->isUnionType()) {
+ if (FQT->isUnionType())
+ HasUnion = true;
+
+ BuildAggrIvarRecordLayout(FQT->getAs<RecordType>(),
+ BytePos + FieldOffset,
+ ForStrongLayout, HasUnion);
+ continue;
+ }
+
+ if (const ArrayType *Array = CGM.getContext().getAsArrayType(FQT)) {
+ const ConstantArrayType *CArray =
+ dyn_cast_or_null<ConstantArrayType>(Array);
+ uint64_t ElCount = CArray->getSize().getZExtValue();
+ assert(CArray && "only array with known element size is supported");
+ FQT = CArray->getElementType();
+ while (const ArrayType *Array = CGM.getContext().getAsArrayType(FQT)) {
+ const ConstantArrayType *CArray =
+ dyn_cast_or_null<ConstantArrayType>(Array);
+ ElCount *= CArray->getSize().getZExtValue();
+ FQT = CArray->getElementType();
+ }
+
+ assert(!FQT->isUnionType() &&
+ "layout for array of unions not supported");
+ if (FQT->isRecordType() && ElCount) {
+ int OldIndex = IvarsInfo.size() - 1;
+ int OldSkIndex = SkipIvars.size() -1;
+
+ const RecordType *RT = FQT->getAs<RecordType>();
+ BuildAggrIvarRecordLayout(RT, BytePos + FieldOffset,
+ ForStrongLayout, HasUnion);
+
+ // Replicate layout information for each array element. Note that
+ // one element is already done.
+ uint64_t ElIx = 1;
+ for (int FirstIndex = IvarsInfo.size() - 1,
+ FirstSkIndex = SkipIvars.size() - 1 ;ElIx < ElCount; ElIx++) {
+ uint64_t Size = CGM.getContext().getTypeSize(RT)/ByteSizeInBits;
+ for (int i = OldIndex+1; i <= FirstIndex; ++i)
+ IvarsInfo.push_back(GC_IVAR(IvarsInfo[i].ivar_bytepos + Size*ElIx,
+ IvarsInfo[i].ivar_size));
+ for (int i = OldSkIndex+1; i <= FirstSkIndex; ++i)
+ SkipIvars.push_back(GC_IVAR(SkipIvars[i].ivar_bytepos + Size*ElIx,
+ SkipIvars[i].ivar_size));
+ }
+ continue;
+ }
+ }
+ // At this point, we are done with Record/Union and array there of.
+ // For other arrays we are down to its element type.
+ Qualifiers::GC GCAttr = GetGCAttrTypeForType(CGM.getContext(), FQT);
+
+ unsigned FieldSize = CGM.getContext().getTypeSize(Field->getType());
+ if ((ForStrongLayout && GCAttr == Qualifiers::Strong)
+ || (!ForStrongLayout && GCAttr == Qualifiers::Weak)) {
+ if (IsUnion) {
+ uint64_t UnionIvarSize = FieldSize / WordSizeInBits;
+ if (UnionIvarSize > MaxUnionIvarSize) {
+ MaxUnionIvarSize = UnionIvarSize;
+ MaxField = Field;
+ MaxFieldOffset = FieldOffset;
+ }
+ } else {
+ IvarsInfo.push_back(GC_IVAR(BytePos + FieldOffset,
+ FieldSize / WordSizeInBits));
+ }
+ } else if ((ForStrongLayout &&
+ (GCAttr == Qualifiers::GCNone || GCAttr == Qualifiers::Weak))
+ || (!ForStrongLayout && GCAttr != Qualifiers::Weak)) {
+ if (IsUnion) {
+ // FIXME: Why the asymmetry? We divide by word size in bits on other
+ // side.
+ uint64_t UnionIvarSize = FieldSize;
+ if (UnionIvarSize > MaxSkippedUnionIvarSize) {
+ MaxSkippedUnionIvarSize = UnionIvarSize;
+ MaxSkippedField = Field;
+ MaxSkippedFieldOffset = FieldOffset;
+ }
+ } else {
+ // FIXME: Why the asymmetry, we divide by byte size in bits here?
+ SkipIvars.push_back(GC_IVAR(BytePos + FieldOffset,
+ FieldSize / ByteSizeInBits));
+ }
+ }
+ }
+
+ if (LastFieldBitfieldOrUnnamed) {
+ if (LastFieldBitfieldOrUnnamed->isBitField()) {
+ // Last field was a bitfield. Must update skip info.
+ uint64_t BitFieldSize
+ = LastFieldBitfieldOrUnnamed->getBitWidthValue(CGM.getContext());
+ GC_IVAR skivar;
+ skivar.ivar_bytepos = BytePos + LastBitfieldOrUnnamedOffset;
+ skivar.ivar_size = (BitFieldSize / ByteSizeInBits)
+ + ((BitFieldSize % ByteSizeInBits) != 0);
+ SkipIvars.push_back(skivar);
+ } else {
+ assert(!LastFieldBitfieldOrUnnamed->getIdentifier() &&"Expected unnamed");
+ // Last field was unnamed. Must update skip info.
+ unsigned FieldSize
+ = CGM.getContext().getTypeSize(LastFieldBitfieldOrUnnamed->getType());
+ SkipIvars.push_back(GC_IVAR(BytePos + LastBitfieldOrUnnamedOffset,
+ FieldSize / ByteSizeInBits));
+ }
+ }
+
+ if (MaxField)
+ IvarsInfo.push_back(GC_IVAR(BytePos + MaxFieldOffset,
+ MaxUnionIvarSize));
+ if (MaxSkippedField)
+ SkipIvars.push_back(GC_IVAR(BytePos + MaxSkippedFieldOffset,
+ MaxSkippedUnionIvarSize));
+}
+
+/// BuildIvarLayoutBitmap - This routine is the horsework for doing all
+/// the computations and returning the layout bitmap (for ivar or blocks) in
+/// the given argument BitMap string container. Routine reads
+/// two containers, IvarsInfo and SkipIvars which are assumed to be
+/// filled already by the caller.
+llvm::Constant *CGObjCCommonMac::BuildIvarLayoutBitmap(std::string &BitMap) {
+ unsigned int WordsToScan, WordsToSkip;
+ llvm::Type *PtrTy = CGM.Int8PtrTy;
+
+ // Build the string of skip/scan nibbles
+ SmallVector<SKIP_SCAN, 32> SkipScanIvars;
+ unsigned int WordSize =
+ CGM.getTypes().getTargetData().getTypeAllocSize(PtrTy);
+ if (IvarsInfo[0].ivar_bytepos == 0) {
+ WordsToSkip = 0;
+ WordsToScan = IvarsInfo[0].ivar_size;
+ } else {
+ WordsToSkip = IvarsInfo[0].ivar_bytepos/WordSize;
+ WordsToScan = IvarsInfo[0].ivar_size;
+ }
+ for (unsigned int i=1, Last=IvarsInfo.size(); i != Last; i++) {
+ unsigned int TailPrevGCObjC =
+ IvarsInfo[i-1].ivar_bytepos + IvarsInfo[i-1].ivar_size * WordSize;
+ if (IvarsInfo[i].ivar_bytepos == TailPrevGCObjC) {
+ // consecutive 'scanned' object pointers.
+ WordsToScan += IvarsInfo[i].ivar_size;
+ } else {
+ // Skip over 'gc'able object pointer which lay over each other.
+ if (TailPrevGCObjC > IvarsInfo[i].ivar_bytepos)
+ continue;
+ // Must skip over 1 or more words. We save current skip/scan values
+ // and start a new pair.
+ SKIP_SCAN SkScan;
+ SkScan.skip = WordsToSkip;
+ SkScan.scan = WordsToScan;
+ SkipScanIvars.push_back(SkScan);
+
+ // Skip the hole.
+ SkScan.skip = (IvarsInfo[i].ivar_bytepos - TailPrevGCObjC) / WordSize;
+ SkScan.scan = 0;
+ SkipScanIvars.push_back(SkScan);
+ WordsToSkip = 0;
+ WordsToScan = IvarsInfo[i].ivar_size;
+ }
+ }
+ if (WordsToScan > 0) {
+ SKIP_SCAN SkScan;
+ SkScan.skip = WordsToSkip;
+ SkScan.scan = WordsToScan;
+ SkipScanIvars.push_back(SkScan);
+ }
+
+ if (!SkipIvars.empty()) {
+ unsigned int LastIndex = SkipIvars.size()-1;
+ int LastByteSkipped =
+ SkipIvars[LastIndex].ivar_bytepos + SkipIvars[LastIndex].ivar_size;
+ LastIndex = IvarsInfo.size()-1;
+ int LastByteScanned =
+ IvarsInfo[LastIndex].ivar_bytepos +
+ IvarsInfo[LastIndex].ivar_size * WordSize;
+ // Compute number of bytes to skip at the tail end of the last ivar scanned.
+ if (LastByteSkipped > LastByteScanned) {
+ unsigned int TotalWords = (LastByteSkipped + (WordSize -1)) / WordSize;
+ SKIP_SCAN SkScan;
+ SkScan.skip = TotalWords - (LastByteScanned/WordSize);
+ SkScan.scan = 0;
+ SkipScanIvars.push_back(SkScan);
+ }
+ }
+ // Mini optimization of nibbles such that an 0xM0 followed by 0x0N is produced
+ // as 0xMN.
+ int SkipScan = SkipScanIvars.size()-1;
+ for (int i = 0; i <= SkipScan; i++) {
+ if ((i < SkipScan) && SkipScanIvars[i].skip && SkipScanIvars[i].scan == 0
+ && SkipScanIvars[i+1].skip == 0 && SkipScanIvars[i+1].scan) {
+ // 0xM0 followed by 0x0N detected.
+ SkipScanIvars[i].scan = SkipScanIvars[i+1].scan;
+ for (int j = i+1; j < SkipScan; j++)
+ SkipScanIvars[j] = SkipScanIvars[j+1];
+ --SkipScan;
+ }
+ }
+
+ // Generate the string.
+ for (int i = 0; i <= SkipScan; i++) {
+ unsigned char byte;
+ unsigned int skip_small = SkipScanIvars[i].skip % 0xf;
+ unsigned int scan_small = SkipScanIvars[i].scan % 0xf;
+ unsigned int skip_big = SkipScanIvars[i].skip / 0xf;
+ unsigned int scan_big = SkipScanIvars[i].scan / 0xf;
+
+ // first skip big.
+ for (unsigned int ix = 0; ix < skip_big; ix++)
+ BitMap += (unsigned char)(0xf0);
+
+ // next (skip small, scan)
+ if (skip_small) {
+ byte = skip_small << 4;
+ if (scan_big > 0) {
+ byte |= 0xf;
+ --scan_big;
+ } else if (scan_small) {
+ byte |= scan_small;
+ scan_small = 0;
+ }
+ BitMap += byte;
+ }
+ // next scan big
+ for (unsigned int ix = 0; ix < scan_big; ix++)
+ BitMap += (unsigned char)(0x0f);
+ // last scan small
+ if (scan_small) {
+ byte = scan_small;
+ BitMap += byte;
+ }
+ }
+ // null terminate string.
+ unsigned char zero = 0;
+ BitMap += zero;
+
+ llvm::GlobalVariable * Entry =
+ CreateMetadataVar("\01L_OBJC_CLASS_NAME_",
+ llvm::ConstantDataArray::getString(VMContext, BitMap,false),
+ ((ObjCABI == 2) ?
+ "__TEXT,__objc_classname,cstring_literals" :
+ "__TEXT,__cstring,cstring_literals"),
+ 1, true);
+ return getConstantGEP(VMContext, Entry, 0, 0);
+}
+
+/// BuildIvarLayout - Builds ivar layout bitmap for the class
+/// implementation for the __strong or __weak case.
+/// The layout map displays which words in ivar list must be skipped
+/// and which must be scanned by GC (see below). String is built of bytes.
+/// Each byte is divided up in two nibbles (4-bit each). Left nibble is count
+/// of words to skip and right nibble is count of words to scan. So, each
+/// nibble represents up to 15 workds to skip or scan. Skipping the rest is
+/// represented by a 0x00 byte which also ends the string.
+/// 1. when ForStrongLayout is true, following ivars are scanned:
+/// - id, Class
+/// - object *
+/// - __strong anything
+///
+/// 2. When ForStrongLayout is false, following ivars are scanned:
+/// - __weak anything
+///
+llvm::Constant *CGObjCCommonMac::BuildIvarLayout(
+ const ObjCImplementationDecl *OMD,
+ bool ForStrongLayout) {
+ bool hasUnion = false;
+
+ llvm::Type *PtrTy = CGM.Int8PtrTy;
+ if (CGM.getLangOpts().getGC() == LangOptions::NonGC &&
+ !CGM.getLangOpts().ObjCAutoRefCount)
+ return llvm::Constant::getNullValue(PtrTy);
+
+ const ObjCInterfaceDecl *OI = OMD->getClassInterface();
+ SmallVector<const FieldDecl*, 32> RecFields;
+ if (CGM.getLangOpts().ObjCAutoRefCount) {
+ for (const ObjCIvarDecl *IVD = OI->all_declared_ivar_begin();
+ IVD; IVD = IVD->getNextIvar())
+ RecFields.push_back(cast<FieldDecl>(IVD));
+ }
+ else {
+ SmallVector<const ObjCIvarDecl*, 32> Ivars;
+ CGM.getContext().DeepCollectObjCIvars(OI, true, Ivars);
+
+ // FIXME: This is not ideal; we shouldn't have to do this copy.
+ RecFields.append(Ivars.begin(), Ivars.end());
+ }
+
+ if (RecFields.empty())
+ return llvm::Constant::getNullValue(PtrTy);
+
+ SkipIvars.clear();
+ IvarsInfo.clear();
+
+ BuildAggrIvarLayout(OMD, 0, 0, RecFields, 0, ForStrongLayout, hasUnion);
+ if (IvarsInfo.empty())
+ return llvm::Constant::getNullValue(PtrTy);
+ // Sort on byte position in case we encounterred a union nested in
+ // the ivar list.
+ if (hasUnion && !IvarsInfo.empty())
+ std::sort(IvarsInfo.begin(), IvarsInfo.end());
+ if (hasUnion && !SkipIvars.empty())
+ std::sort(SkipIvars.begin(), SkipIvars.end());
+
+ std::string BitMap;
+ llvm::Constant *C = BuildIvarLayoutBitmap(BitMap);
+
+ if (CGM.getLangOpts().ObjCGCBitmapPrint) {
+ printf("\n%s ivar layout for class '%s': ",
+ ForStrongLayout ? "strong" : "weak",
+ OMD->getClassInterface()->getName().data());
+ const unsigned char *s = (unsigned char*)BitMap.c_str();
+ for (unsigned i = 0, e = BitMap.size(); i < e; i++)
+ if (!(s[i] & 0xf0))
+ printf("0x0%x%s", s[i], s[i] != 0 ? ", " : "");
+ else
+ printf("0x%x%s", s[i], s[i] != 0 ? ", " : "");
+ printf("\n");
+ }
+ return C;
+}
+
+llvm::Constant *CGObjCCommonMac::GetMethodVarName(Selector Sel) {
+ llvm::GlobalVariable *&Entry = MethodVarNames[Sel];
+
+ // FIXME: Avoid std::string in "Sel.getAsString()"
+ if (!Entry)
+ Entry = CreateMetadataVar("\01L_OBJC_METH_VAR_NAME_",
+ llvm::ConstantDataArray::getString(VMContext, Sel.getAsString()),
+ ((ObjCABI == 2) ?
+ "__TEXT,__objc_methname,cstring_literals" :
+ "__TEXT,__cstring,cstring_literals"),
+ 1, true);
+
+ return getConstantGEP(VMContext, Entry, 0, 0);
+}
+
+// FIXME: Merge into a single cstring creation function.
+llvm::Constant *CGObjCCommonMac::GetMethodVarName(IdentifierInfo *ID) {
+ return GetMethodVarName(CGM.getContext().Selectors.getNullarySelector(ID));
+}
+
+llvm::Constant *CGObjCCommonMac::GetMethodVarType(const FieldDecl *Field) {
+ std::string TypeStr;
+ CGM.getContext().getObjCEncodingForType(Field->getType(), TypeStr, Field);
+
+ llvm::GlobalVariable *&Entry = MethodVarTypes[TypeStr];
+
+ if (!Entry)
+ Entry = CreateMetadataVar("\01L_OBJC_METH_VAR_TYPE_",
+ llvm::ConstantDataArray::getString(VMContext, TypeStr),
+ ((ObjCABI == 2) ?
+ "__TEXT,__objc_methtype,cstring_literals" :
+ "__TEXT,__cstring,cstring_literals"),
+ 1, true);
+
+ return getConstantGEP(VMContext, Entry, 0, 0);
+}
+
+llvm::Constant *CGObjCCommonMac::GetMethodVarType(const ObjCMethodDecl *D,
+ bool Extended) {
+ std::string TypeStr;
+ if (CGM.getContext().getObjCEncodingForMethodDecl(D, TypeStr, Extended))
+ return 0;
+
+ llvm::GlobalVariable *&Entry = MethodVarTypes[TypeStr];
+
+ if (!Entry)
+ Entry = CreateMetadataVar("\01L_OBJC_METH_VAR_TYPE_",
+ llvm::ConstantDataArray::getString(VMContext, TypeStr),
+ ((ObjCABI == 2) ?
+ "__TEXT,__objc_methtype,cstring_literals" :
+ "__TEXT,__cstring,cstring_literals"),
+ 1, true);
+
+ return getConstantGEP(VMContext, Entry, 0, 0);
+}
+
+// FIXME: Merge into a single cstring creation function.
+llvm::Constant *CGObjCCommonMac::GetPropertyName(IdentifierInfo *Ident) {
+ llvm::GlobalVariable *&Entry = PropertyNames[Ident];
+
+ if (!Entry)
+ Entry = CreateMetadataVar("\01L_OBJC_PROP_NAME_ATTR_",
+ llvm::ConstantDataArray::getString(VMContext,
+ Ident->getNameStart()),
+ "__TEXT,__cstring,cstring_literals",
+ 1, true);
+
+ return getConstantGEP(VMContext, Entry, 0, 0);
+}
+
+// FIXME: Merge into a single cstring creation function.
+// FIXME: This Decl should be more precise.
+llvm::Constant *
+CGObjCCommonMac::GetPropertyTypeString(const ObjCPropertyDecl *PD,
+ const Decl *Container) {
+ std::string TypeStr;
+ CGM.getContext().getObjCEncodingForPropertyDecl(PD, Container, TypeStr);
+ return GetPropertyName(&CGM.getContext().Idents.get(TypeStr));
+}
+
+void CGObjCCommonMac::GetNameForMethod(const ObjCMethodDecl *D,
+ const ObjCContainerDecl *CD,
+ SmallVectorImpl<char> &Name) {
+ llvm::raw_svector_ostream OS(Name);
+ assert (CD && "Missing container decl in GetNameForMethod");
+ OS << '\01' << (D->isInstanceMethod() ? '-' : '+')
+ << '[' << CD->getName();
+ if (const ObjCCategoryImplDecl *CID =
+ dyn_cast<ObjCCategoryImplDecl>(D->getDeclContext()))
+ OS << '(' << *CID << ')';
+ OS << ' ' << D->getSelector().getAsString() << ']';
+}
+
+void CGObjCMac::FinishModule() {
+ EmitModuleInfo();
+
+ // Emit the dummy bodies for any protocols which were referenced but
+ // never defined.
+ for (llvm::DenseMap<IdentifierInfo*, llvm::GlobalVariable*>::iterator
+ I = Protocols.begin(), e = Protocols.end(); I != e; ++I) {
+ if (I->second->hasInitializer())
+ continue;
+
+ llvm::Constant *Values[5];
+ Values[0] = llvm::Constant::getNullValue(ObjCTypes.ProtocolExtensionPtrTy);
+ Values[1] = GetClassName(I->first);
+ Values[2] = llvm::Constant::getNullValue(ObjCTypes.ProtocolListPtrTy);
+ Values[3] = Values[4] =
+ llvm::Constant::getNullValue(ObjCTypes.MethodDescriptionListPtrTy);
+ I->second->setLinkage(llvm::GlobalValue::InternalLinkage);
+ I->second->setInitializer(llvm::ConstantStruct::get(ObjCTypes.ProtocolTy,
+ Values));
+ CGM.AddUsedGlobal(I->second);
+ }
+
+ // Add assembler directives to add lazy undefined symbol references
+ // for classes which are referenced but not defined. This is
+ // important for correct linker interaction.
+ //
+ // FIXME: It would be nice if we had an LLVM construct for this.
+ if (!LazySymbols.empty() || !DefinedSymbols.empty()) {
+ SmallString<256> Asm;
+ Asm += CGM.getModule().getModuleInlineAsm();
+ if (!Asm.empty() && Asm.back() != '\n')
+ Asm += '\n';
+
+ llvm::raw_svector_ostream OS(Asm);
+ for (llvm::SetVector<IdentifierInfo*>::iterator I = DefinedSymbols.begin(),
+ e = DefinedSymbols.end(); I != e; ++I)
+ OS << "\t.objc_class_name_" << (*I)->getName() << "=0\n"
+ << "\t.globl .objc_class_name_" << (*I)->getName() << "\n";
+ for (llvm::SetVector<IdentifierInfo*>::iterator I = LazySymbols.begin(),
+ e = LazySymbols.end(); I != e; ++I) {
+ OS << "\t.lazy_reference .objc_class_name_" << (*I)->getName() << "\n";
+ }
+
+ for (size_t i = 0, e = DefinedCategoryNames.size(); i < e; ++i) {
+ OS << "\t.objc_category_name_" << DefinedCategoryNames[i] << "=0\n"
+ << "\t.globl .objc_category_name_" << DefinedCategoryNames[i] << "\n";
+ }
+
+ CGM.getModule().setModuleInlineAsm(OS.str());
+ }
+}
+
+CGObjCNonFragileABIMac::CGObjCNonFragileABIMac(CodeGen::CodeGenModule &cgm)
+ : CGObjCCommonMac(cgm),
+ ObjCTypes(cgm) {
+ ObjCEmptyCacheVar = ObjCEmptyVtableVar = NULL;
+ ObjCABI = 2;
+}
+
+/* *** */
+
+ObjCCommonTypesHelper::ObjCCommonTypesHelper(CodeGen::CodeGenModule &cgm)
+ : VMContext(cgm.getLLVMContext()), CGM(cgm), ExternalProtocolPtrTy(0)
+{
+ CodeGen::CodeGenTypes &Types = CGM.getTypes();
+ ASTContext &Ctx = CGM.getContext();
+
+ ShortTy = Types.ConvertType(Ctx.ShortTy);
+ IntTy = Types.ConvertType(Ctx.IntTy);
+ LongTy = Types.ConvertType(Ctx.LongTy);
+ LongLongTy = Types.ConvertType(Ctx.LongLongTy);
+ Int8PtrTy = CGM.Int8PtrTy;
+ Int8PtrPtrTy = CGM.Int8PtrPtrTy;
+
+ ObjectPtrTy = Types.ConvertType(Ctx.getObjCIdType());
+ PtrObjectPtrTy = llvm::PointerType::getUnqual(ObjectPtrTy);
+ SelectorPtrTy = Types.ConvertType(Ctx.getObjCSelType());
+
+ // I'm not sure I like this. The implicit coordination is a bit
+ // gross. We should solve this in a reasonable fashion because this
+ // is a pretty common task (match some runtime data structure with
+ // an LLVM data structure).
+
+ // FIXME: This is leaked.
+ // FIXME: Merge with rewriter code?
+
+ // struct _objc_super {
+ // id self;
+ // Class cls;
+ // }
+ RecordDecl *RD = RecordDecl::Create(Ctx, TTK_Struct,
+ Ctx.getTranslationUnitDecl(),
+ SourceLocation(), SourceLocation(),
+ &Ctx.Idents.get("_objc_super"));
+ RD->addDecl(FieldDecl::Create(Ctx, RD, SourceLocation(), SourceLocation(), 0,
+ Ctx.getObjCIdType(), 0, 0, false, false));
+ RD->addDecl(FieldDecl::Create(Ctx, RD, SourceLocation(), SourceLocation(), 0,
+ Ctx.getObjCClassType(), 0, 0, false, false));
+ RD->completeDefinition();
+
+ SuperCTy = Ctx.getTagDeclType(RD);
+ SuperPtrCTy = Ctx.getPointerType(SuperCTy);
+
+ SuperTy = cast<llvm::StructType>(Types.ConvertType(SuperCTy));
+ SuperPtrTy = llvm::PointerType::getUnqual(SuperTy);
+
+ // struct _prop_t {
+ // char *name;
+ // char *attributes;
+ // }
+ PropertyTy = llvm::StructType::create("struct._prop_t",
+ Int8PtrTy, Int8PtrTy, NULL);
+
+ // struct _prop_list_t {
+ // uint32_t entsize; // sizeof(struct _prop_t)
+ // uint32_t count_of_properties;
+ // struct _prop_t prop_list[count_of_properties];
+ // }
+ PropertyListTy =
+ llvm::StructType::create("struct._prop_list_t", IntTy, IntTy,
+ llvm::ArrayType::get(PropertyTy, 0), NULL);
+ // struct _prop_list_t *
+ PropertyListPtrTy = llvm::PointerType::getUnqual(PropertyListTy);
+
+ // struct _objc_method {
+ // SEL _cmd;
+ // char *method_type;
+ // char *_imp;
+ // }
+ MethodTy = llvm::StructType::create("struct._objc_method",
+ SelectorPtrTy, Int8PtrTy, Int8PtrTy,
+ NULL);
+
+ // struct _objc_cache *
+ CacheTy = llvm::StructType::create(VMContext, "struct._objc_cache");
+ CachePtrTy = llvm::PointerType::getUnqual(CacheTy);
+
+}
+
+ObjCTypesHelper::ObjCTypesHelper(CodeGen::CodeGenModule &cgm)
+ : ObjCCommonTypesHelper(cgm) {
+ // struct _objc_method_description {
+ // SEL name;
+ // char *types;
+ // }
+ MethodDescriptionTy =
+ llvm::StructType::create("struct._objc_method_description",
+ SelectorPtrTy, Int8PtrTy, NULL);
+
+ // struct _objc_method_description_list {
+ // int count;
+ // struct _objc_method_description[1];
+ // }
+ MethodDescriptionListTy =
+ llvm::StructType::create("struct._objc_method_description_list",
+ IntTy,
+ llvm::ArrayType::get(MethodDescriptionTy, 0),NULL);
+
+ // struct _objc_method_description_list *
+ MethodDescriptionListPtrTy =
+ llvm::PointerType::getUnqual(MethodDescriptionListTy);
+
+ // Protocol description structures
+
+ // struct _objc_protocol_extension {
+ // uint32_t size; // sizeof(struct _objc_protocol_extension)
+ // struct _objc_method_description_list *optional_instance_methods;
+ // struct _objc_method_description_list *optional_class_methods;
+ // struct _objc_property_list *instance_properties;
+ // const char ** extendedMethodTypes;
+ // }
+ ProtocolExtensionTy =
+ llvm::StructType::create("struct._objc_protocol_extension",
+ IntTy, MethodDescriptionListPtrTy,
+ MethodDescriptionListPtrTy, PropertyListPtrTy,
+ Int8PtrPtrTy, NULL);
+
+ // struct _objc_protocol_extension *
+ ProtocolExtensionPtrTy = llvm::PointerType::getUnqual(ProtocolExtensionTy);
+
+ // Handle recursive construction of Protocol and ProtocolList types
+
+ ProtocolTy =
+ llvm::StructType::create(VMContext, "struct._objc_protocol");
+
+ ProtocolListTy =
+ llvm::StructType::create(VMContext, "struct._objc_protocol_list");
+ ProtocolListTy->setBody(llvm::PointerType::getUnqual(ProtocolListTy),
+ LongTy,
+ llvm::ArrayType::get(ProtocolTy, 0),
+ NULL);
+
+ // struct _objc_protocol {
+ // struct _objc_protocol_extension *isa;
+ // char *protocol_name;
+ // struct _objc_protocol **_objc_protocol_list;
+ // struct _objc_method_description_list *instance_methods;
+ // struct _objc_method_description_list *class_methods;
+ // }
+ ProtocolTy->setBody(ProtocolExtensionPtrTy, Int8PtrTy,
+ llvm::PointerType::getUnqual(ProtocolListTy),
+ MethodDescriptionListPtrTy,
+ MethodDescriptionListPtrTy,
+ NULL);
+
+ // struct _objc_protocol_list *
+ ProtocolListPtrTy = llvm::PointerType::getUnqual(ProtocolListTy);
+
+ ProtocolPtrTy = llvm::PointerType::getUnqual(ProtocolTy);
+
+ // Class description structures
+
+ // struct _objc_ivar {
+ // char *ivar_name;
+ // char *ivar_type;
+ // int ivar_offset;
+ // }
+ IvarTy = llvm::StructType::create("struct._objc_ivar",
+ Int8PtrTy, Int8PtrTy, IntTy, NULL);
+
+ // struct _objc_ivar_list *
+ IvarListTy =
+ llvm::StructType::create(VMContext, "struct._objc_ivar_list");
+ IvarListPtrTy = llvm::PointerType::getUnqual(IvarListTy);
+
+ // struct _objc_method_list *
+ MethodListTy =
+ llvm::StructType::create(VMContext, "struct._objc_method_list");
+ MethodListPtrTy = llvm::PointerType::getUnqual(MethodListTy);
+
+ // struct _objc_class_extension *
+ ClassExtensionTy =
+ llvm::StructType::create("struct._objc_class_extension",
+ IntTy, Int8PtrTy, PropertyListPtrTy, NULL);
+ ClassExtensionPtrTy = llvm::PointerType::getUnqual(ClassExtensionTy);
+
+ ClassTy = llvm::StructType::create(VMContext, "struct._objc_class");
+
+ // struct _objc_class {
+ // Class isa;
+ // Class super_class;
+ // char *name;
+ // long version;
+ // long info;
+ // long instance_size;
+ // struct _objc_ivar_list *ivars;
+ // struct _objc_method_list *methods;
+ // struct _objc_cache *cache;
+ // struct _objc_protocol_list *protocols;
+ // char *ivar_layout;
+ // struct _objc_class_ext *ext;
+ // };
+ ClassTy->setBody(llvm::PointerType::getUnqual(ClassTy),
+ llvm::PointerType::getUnqual(ClassTy),
+ Int8PtrTy,
+ LongTy,
+ LongTy,
+ LongTy,
+ IvarListPtrTy,
+ MethodListPtrTy,
+ CachePtrTy,
+ ProtocolListPtrTy,
+ Int8PtrTy,
+ ClassExtensionPtrTy,
+ NULL);
+
+ ClassPtrTy = llvm::PointerType::getUnqual(ClassTy);
+
+ // struct _objc_category {
+ // char *category_name;
+ // char *class_name;
+ // struct _objc_method_list *instance_method;
+ // struct _objc_method_list *class_method;
+ // uint32_t size; // sizeof(struct _objc_category)
+ // struct _objc_property_list *instance_properties;// category's @property
+ // }
+ CategoryTy =
+ llvm::StructType::create("struct._objc_category",
+ Int8PtrTy, Int8PtrTy, MethodListPtrTy,
+ MethodListPtrTy, ProtocolListPtrTy,
+ IntTy, PropertyListPtrTy, NULL);
+
+ // Global metadata structures
+
+ // struct _objc_symtab {
+ // long sel_ref_cnt;
+ // SEL *refs;
+ // short cls_def_cnt;
+ // short cat_def_cnt;
+ // char *defs[cls_def_cnt + cat_def_cnt];
+ // }
+ SymtabTy =
+ llvm::StructType::create("struct._objc_symtab",
+ LongTy, SelectorPtrTy, ShortTy, ShortTy,
+ llvm::ArrayType::get(Int8PtrTy, 0), NULL);
+ SymtabPtrTy = llvm::PointerType::getUnqual(SymtabTy);
+
+ // struct _objc_module {
+ // long version;
+ // long size; // sizeof(struct _objc_module)
+ // char *name;
+ // struct _objc_symtab* symtab;
+ // }
+ ModuleTy =
+ llvm::StructType::create("struct._objc_module",
+ LongTy, LongTy, Int8PtrTy, SymtabPtrTy, NULL);
+
+
+ // FIXME: This is the size of the setjmp buffer and should be target
+ // specific. 18 is what's used on 32-bit X86.
+ uint64_t SetJmpBufferSize = 18;
+
+ // Exceptions
+ llvm::Type *StackPtrTy = llvm::ArrayType::get(CGM.Int8PtrTy, 4);
+
+ ExceptionDataTy =
+ llvm::StructType::create("struct._objc_exception_data",
+ llvm::ArrayType::get(CGM.Int32Ty,SetJmpBufferSize),
+ StackPtrTy, NULL);
+
+}
+
+ObjCNonFragileABITypesHelper::ObjCNonFragileABITypesHelper(CodeGen::CodeGenModule &cgm)
+ : ObjCCommonTypesHelper(cgm) {
+ // struct _method_list_t {
+ // uint32_t entsize; // sizeof(struct _objc_method)
+ // uint32_t method_count;
+ // struct _objc_method method_list[method_count];
+ // }
+ MethodListnfABITy =
+ llvm::StructType::create("struct.__method_list_t", IntTy, IntTy,
+ llvm::ArrayType::get(MethodTy, 0), NULL);
+ // struct method_list_t *
+ MethodListnfABIPtrTy = llvm::PointerType::getUnqual(MethodListnfABITy);
+
+ // struct _protocol_t {
+ // id isa; // NULL
+ // const char * const protocol_name;
+ // const struct _protocol_list_t * protocol_list; // super protocols
+ // const struct method_list_t * const instance_methods;
+ // const struct method_list_t * const class_methods;
+ // const struct method_list_t *optionalInstanceMethods;
+ // const struct method_list_t *optionalClassMethods;
+ // const struct _prop_list_t * properties;
+ // const uint32_t size; // sizeof(struct _protocol_t)
+ // const uint32_t flags; // = 0
+ // const char ** extendedMethodTypes;
+ // }
+
+ // Holder for struct _protocol_list_t *
+ ProtocolListnfABITy =
+ llvm::StructType::create(VMContext, "struct._objc_protocol_list");
+
+ ProtocolnfABITy =
+ llvm::StructType::create("struct._protocol_t", ObjectPtrTy, Int8PtrTy,
+ llvm::PointerType::getUnqual(ProtocolListnfABITy),
+ MethodListnfABIPtrTy, MethodListnfABIPtrTy,
+ MethodListnfABIPtrTy, MethodListnfABIPtrTy,
+ PropertyListPtrTy, IntTy, IntTy, Int8PtrPtrTy,
+ NULL);
+
+ // struct _protocol_t*
+ ProtocolnfABIPtrTy = llvm::PointerType::getUnqual(ProtocolnfABITy);
+
+ // struct _protocol_list_t {
+ // long protocol_count; // Note, this is 32/64 bit
+ // struct _protocol_t *[protocol_count];
+ // }
+ ProtocolListnfABITy->setBody(LongTy,
+ llvm::ArrayType::get(ProtocolnfABIPtrTy, 0),
+ NULL);
+
+ // struct _objc_protocol_list*
+ ProtocolListnfABIPtrTy = llvm::PointerType::getUnqual(ProtocolListnfABITy);
+
+ // struct _ivar_t {
+ // unsigned long int *offset; // pointer to ivar offset location
+ // char *name;
+ // char *type;
+ // uint32_t alignment;
+ // uint32_t size;
+ // }
+ IvarnfABITy =
+ llvm::StructType::create("struct._ivar_t",
+ llvm::PointerType::getUnqual(LongTy),
+ Int8PtrTy, Int8PtrTy, IntTy, IntTy, NULL);
+
+ // struct _ivar_list_t {
+ // uint32 entsize; // sizeof(struct _ivar_t)
+ // uint32 count;
+ // struct _iver_t list[count];
+ // }
+ IvarListnfABITy =
+ llvm::StructType::create("struct._ivar_list_t", IntTy, IntTy,
+ llvm::ArrayType::get(IvarnfABITy, 0), NULL);
+
+ IvarListnfABIPtrTy = llvm::PointerType::getUnqual(IvarListnfABITy);
+
+ // struct _class_ro_t {
+ // uint32_t const flags;
+ // uint32_t const instanceStart;
+ // uint32_t const instanceSize;
+ // uint32_t const reserved; // only when building for 64bit targets
+ // const uint8_t * const ivarLayout;
+ // const char *const name;
+ // const struct _method_list_t * const baseMethods;
+ // const struct _objc_protocol_list *const baseProtocols;
+ // const struct _ivar_list_t *const ivars;
+ // const uint8_t * const weakIvarLayout;
+ // const struct _prop_list_t * const properties;
+ // }
+
+ // FIXME. Add 'reserved' field in 64bit abi mode!
+ ClassRonfABITy = llvm::StructType::create("struct._class_ro_t",
+ IntTy, IntTy, IntTy, Int8PtrTy,
+ Int8PtrTy, MethodListnfABIPtrTy,
+ ProtocolListnfABIPtrTy,
+ IvarListnfABIPtrTy,
+ Int8PtrTy, PropertyListPtrTy, NULL);
+
+ // ImpnfABITy - LLVM for id (*)(id, SEL, ...)
+ llvm::Type *params[] = { ObjectPtrTy, SelectorPtrTy };
+ ImpnfABITy = llvm::FunctionType::get(ObjectPtrTy, params, false)
+ ->getPointerTo();
+
+ // struct _class_t {
+ // struct _class_t *isa;
+ // struct _class_t * const superclass;
+ // void *cache;
+ // IMP *vtable;
+ // struct class_ro_t *ro;
+ // }
+
+ ClassnfABITy = llvm::StructType::create(VMContext, "struct._class_t");
+ ClassnfABITy->setBody(llvm::PointerType::getUnqual(ClassnfABITy),
+ llvm::PointerType::getUnqual(ClassnfABITy),
+ CachePtrTy,
+ llvm::PointerType::getUnqual(ImpnfABITy),
+ llvm::PointerType::getUnqual(ClassRonfABITy),
+ NULL);
+
+ // LLVM for struct _class_t *
+ ClassnfABIPtrTy = llvm::PointerType::getUnqual(ClassnfABITy);
+
+ // struct _category_t {
+ // const char * const name;
+ // struct _class_t *const cls;
+ // const struct _method_list_t * const instance_methods;
+ // const struct _method_list_t * const class_methods;
+ // const struct _protocol_list_t * const protocols;
+ // const struct _prop_list_t * const properties;
+ // }
+ CategorynfABITy = llvm::StructType::create("struct._category_t",
+ Int8PtrTy, ClassnfABIPtrTy,
+ MethodListnfABIPtrTy,
+ MethodListnfABIPtrTy,
+ ProtocolListnfABIPtrTy,
+ PropertyListPtrTy,
+ NULL);
+
+ // New types for nonfragile abi messaging.
+ CodeGen::CodeGenTypes &Types = CGM.getTypes();
+ ASTContext &Ctx = CGM.getContext();
+
+ // MessageRefTy - LLVM for:
+ // struct _message_ref_t {
+ // IMP messenger;
+ // SEL name;
+ // };
+
+ // First the clang type for struct _message_ref_t
+ RecordDecl *RD = RecordDecl::Create(Ctx, TTK_Struct,
+ Ctx.getTranslationUnitDecl(),
+ SourceLocation(), SourceLocation(),
+ &Ctx.Idents.get("_message_ref_t"));
+ RD->addDecl(FieldDecl::Create(Ctx, RD, SourceLocation(), SourceLocation(), 0,
+ Ctx.VoidPtrTy, 0, 0, false, false));
+ RD->addDecl(FieldDecl::Create(Ctx, RD, SourceLocation(), SourceLocation(), 0,
+ Ctx.getObjCSelType(), 0, 0, false, false));
+ RD->completeDefinition();
+
+ MessageRefCTy = Ctx.getTagDeclType(RD);
+ MessageRefCPtrTy = Ctx.getPointerType(MessageRefCTy);
+ MessageRefTy = cast<llvm::StructType>(Types.ConvertType(MessageRefCTy));
+
+ // MessageRefPtrTy - LLVM for struct _message_ref_t*
+ MessageRefPtrTy = llvm::PointerType::getUnqual(MessageRefTy);
+
+ // SuperMessageRefTy - LLVM for:
+ // struct _super_message_ref_t {
+ // SUPER_IMP messenger;
+ // SEL name;
+ // };
+ SuperMessageRefTy =
+ llvm::StructType::create("struct._super_message_ref_t",
+ ImpnfABITy, SelectorPtrTy, NULL);
+
+ // SuperMessageRefPtrTy - LLVM for struct _super_message_ref_t*
+ SuperMessageRefPtrTy = llvm::PointerType::getUnqual(SuperMessageRefTy);
+
+
+ // struct objc_typeinfo {
+ // const void** vtable; // objc_ehtype_vtable + 2
+ // const char* name; // c++ typeinfo string
+ // Class cls;
+ // };
+ EHTypeTy =
+ llvm::StructType::create("struct._objc_typeinfo",
+ llvm::PointerType::getUnqual(Int8PtrTy),
+ Int8PtrTy, ClassnfABIPtrTy, NULL);
+ EHTypePtrTy = llvm::PointerType::getUnqual(EHTypeTy);
+}
+
+llvm::Function *CGObjCNonFragileABIMac::ModuleInitFunction() {
+ FinishNonFragileABIModule();
+
+ return NULL;
+}
+
+void CGObjCNonFragileABIMac::
+AddModuleClassList(ArrayRef<llvm::GlobalValue*> Container,
+ const char *SymbolName,
+ const char *SectionName) {
+ unsigned NumClasses = Container.size();
+
+ if (!NumClasses)
+ return;
+
+ SmallVector<llvm::Constant*, 8> Symbols(NumClasses);
+ for (unsigned i=0; i<NumClasses; i++)
+ Symbols[i] = llvm::ConstantExpr::getBitCast(Container[i],
+ ObjCTypes.Int8PtrTy);
+ llvm::Constant *Init =
+ llvm::ConstantArray::get(llvm::ArrayType::get(ObjCTypes.Int8PtrTy,
+ Symbols.size()),
+ Symbols);
+
+ llvm::GlobalVariable *GV =
+ new llvm::GlobalVariable(CGM.getModule(), Init->getType(), false,
+ llvm::GlobalValue::InternalLinkage,
+ Init,
+ SymbolName);
+ GV->setAlignment(CGM.getTargetData().getABITypeAlignment(Init->getType()));
+ GV->setSection(SectionName);
+ CGM.AddUsedGlobal(GV);
+}
+
+void CGObjCNonFragileABIMac::FinishNonFragileABIModule() {
+ // nonfragile abi has no module definition.
+
+ // Build list of all implemented class addresses in array
+ // L_OBJC_LABEL_CLASS_$.
+ AddModuleClassList(DefinedClasses,
+ "\01L_OBJC_LABEL_CLASS_$",
+ "__DATA, __objc_classlist, regular, no_dead_strip");
+
+ for (unsigned i = 0, e = DefinedClasses.size(); i < e; i++) {
+ llvm::GlobalValue *IMPLGV = DefinedClasses[i];
+ if (IMPLGV->getLinkage() != llvm::GlobalValue::ExternalWeakLinkage)
+ continue;
+ IMPLGV->setLinkage(llvm::GlobalValue::ExternalLinkage);
+ }
+
+ for (unsigned i = 0, e = DefinedMetaClasses.size(); i < e; i++) {
+ llvm::GlobalValue *IMPLGV = DefinedMetaClasses[i];
+ if (IMPLGV->getLinkage() != llvm::GlobalValue::ExternalWeakLinkage)
+ continue;
+ IMPLGV->setLinkage(llvm::GlobalValue::ExternalLinkage);
+ }
+
+ AddModuleClassList(DefinedNonLazyClasses,
+ "\01L_OBJC_LABEL_NONLAZY_CLASS_$",
+ "__DATA, __objc_nlclslist, regular, no_dead_strip");
+
+ // Build list of all implemented category addresses in array
+ // L_OBJC_LABEL_CATEGORY_$.
+ AddModuleClassList(DefinedCategories,
+ "\01L_OBJC_LABEL_CATEGORY_$",
+ "__DATA, __objc_catlist, regular, no_dead_strip");
+ AddModuleClassList(DefinedNonLazyCategories,
+ "\01L_OBJC_LABEL_NONLAZY_CATEGORY_$",
+ "__DATA, __objc_nlcatlist, regular, no_dead_strip");
+
+ EmitImageInfo();
+}
+
+/// isVTableDispatchedSelector - Returns true if SEL is not in the list of
+/// VTableDispatchMethods; false otherwise. What this means is that
+/// except for the 19 selectors in the list, we generate 32bit-style
+/// message dispatch call for all the rest.
+bool CGObjCNonFragileABIMac::isVTableDispatchedSelector(Selector Sel) {
+ // At various points we've experimented with using vtable-based
+ // dispatch for all methods.
+ switch (CGM.getCodeGenOpts().getObjCDispatchMethod()) {
+ case CodeGenOptions::Legacy:
+ return false;
+ case CodeGenOptions::NonLegacy:
+ return true;
+ case CodeGenOptions::Mixed:
+ break;
+ }
+
+ // If so, see whether this selector is in the white-list of things which must
+ // use the new dispatch convention. We lazily build a dense set for this.
+ if (VTableDispatchMethods.empty()) {
+ VTableDispatchMethods.insert(GetNullarySelector("alloc"));
+ VTableDispatchMethods.insert(GetNullarySelector("class"));
+ VTableDispatchMethods.insert(GetNullarySelector("self"));
+ VTableDispatchMethods.insert(GetNullarySelector("isFlipped"));
+ VTableDispatchMethods.insert(GetNullarySelector("length"));
+ VTableDispatchMethods.insert(GetNullarySelector("count"));
+
+ // These are vtable-based if GC is disabled.
+ // Optimistically use vtable dispatch for hybrid compiles.
+ if (CGM.getLangOpts().getGC() != LangOptions::GCOnly) {
+ VTableDispatchMethods.insert(GetNullarySelector("retain"));
+ VTableDispatchMethods.insert(GetNullarySelector("release"));
+ VTableDispatchMethods.insert(GetNullarySelector("autorelease"));
+ }
+
+ VTableDispatchMethods.insert(GetUnarySelector("allocWithZone"));
+ VTableDispatchMethods.insert(GetUnarySelector("isKindOfClass"));
+ VTableDispatchMethods.insert(GetUnarySelector("respondsToSelector"));
+ VTableDispatchMethods.insert(GetUnarySelector("objectForKey"));
+ VTableDispatchMethods.insert(GetUnarySelector("objectAtIndex"));
+ VTableDispatchMethods.insert(GetUnarySelector("isEqualToString"));
+ VTableDispatchMethods.insert(GetUnarySelector("isEqual"));
+
+ // These are vtable-based if GC is enabled.
+ // Optimistically use vtable dispatch for hybrid compiles.
+ if (CGM.getLangOpts().getGC() != LangOptions::NonGC) {
+ VTableDispatchMethods.insert(GetNullarySelector("hash"));
+ VTableDispatchMethods.insert(GetUnarySelector("addObject"));
+
+ // "countByEnumeratingWithState:objects:count"
+ IdentifierInfo *KeyIdents[] = {
+ &CGM.getContext().Idents.get("countByEnumeratingWithState"),
+ &CGM.getContext().Idents.get("objects"),
+ &CGM.getContext().Idents.get("count")
+ };
+ VTableDispatchMethods.insert(
+ CGM.getContext().Selectors.getSelector(3, KeyIdents));
+ }
+ }
+
+ return VTableDispatchMethods.count(Sel);
+}
+
+// Metadata flags
+enum MetaDataDlags {
+ CLS = 0x0,
+ CLS_META = 0x1,
+ CLS_ROOT = 0x2,
+ OBJC2_CLS_HIDDEN = 0x10,
+ CLS_EXCEPTION = 0x20,
+
+ /// (Obsolete) ARC-specific: this class has a .release_ivars method
+ CLS_HAS_IVAR_RELEASER = 0x40,
+ /// class was compiled with -fobjc-arr
+ CLS_COMPILED_BY_ARC = 0x80 // (1<<7)
+};
+/// BuildClassRoTInitializer - generate meta-data for:
+/// struct _class_ro_t {
+/// uint32_t const flags;
+/// uint32_t const instanceStart;
+/// uint32_t const instanceSize;
+/// uint32_t const reserved; // only when building for 64bit targets
+/// const uint8_t * const ivarLayout;
+/// const char *const name;
+/// const struct _method_list_t * const baseMethods;
+/// const struct _protocol_list_t *const baseProtocols;
+/// const struct _ivar_list_t *const ivars;
+/// const uint8_t * const weakIvarLayout;
+/// const struct _prop_list_t * const properties;
+/// }
+///
+llvm::GlobalVariable * CGObjCNonFragileABIMac::BuildClassRoTInitializer(
+ unsigned flags,
+ unsigned InstanceStart,
+ unsigned InstanceSize,
+ const ObjCImplementationDecl *ID) {
+ std::string ClassName = ID->getNameAsString();
+ llvm::Constant *Values[10]; // 11 for 64bit targets!
+
+ if (CGM.getLangOpts().ObjCAutoRefCount)
+ flags |= CLS_COMPILED_BY_ARC;
+
+ Values[ 0] = llvm::ConstantInt::get(ObjCTypes.IntTy, flags);
+ Values[ 1] = llvm::ConstantInt::get(ObjCTypes.IntTy, InstanceStart);
+ Values[ 2] = llvm::ConstantInt::get(ObjCTypes.IntTy, InstanceSize);
+ // FIXME. For 64bit targets add 0 here.
+ Values[ 3] = (flags & CLS_META) ? GetIvarLayoutName(0, ObjCTypes)
+ : BuildIvarLayout(ID, true);
+ Values[ 4] = GetClassName(ID->getIdentifier());
+ // const struct _method_list_t * const baseMethods;
+ std::vector<llvm::Constant*> Methods;
+ std::string MethodListName("\01l_OBJC_$_");
+ if (flags & CLS_META) {
+ MethodListName += "CLASS_METHODS_" + ID->getNameAsString();
+ for (ObjCImplementationDecl::classmeth_iterator
+ i = ID->classmeth_begin(), e = ID->classmeth_end(); i != e; ++i) {
+ // Class methods should always be defined.
+ Methods.push_back(GetMethodConstant(*i));
+ }
+ } else {
+ MethodListName += "INSTANCE_METHODS_" + ID->getNameAsString();
+ for (ObjCImplementationDecl::instmeth_iterator
+ i = ID->instmeth_begin(), e = ID->instmeth_end(); i != e; ++i) {
+ // Instance methods should always be defined.
+ Methods.push_back(GetMethodConstant(*i));
+ }
+ for (ObjCImplementationDecl::propimpl_iterator
+ i = ID->propimpl_begin(), e = ID->propimpl_end(); i != e; ++i) {
+ ObjCPropertyImplDecl *PID = *i;
+
+ if (PID->getPropertyImplementation() == ObjCPropertyImplDecl::Synthesize){
+ ObjCPropertyDecl *PD = PID->getPropertyDecl();
+
+ if (ObjCMethodDecl *MD = PD->getGetterMethodDecl())
+ if (llvm::Constant *C = GetMethodConstant(MD))
+ Methods.push_back(C);
+ if (ObjCMethodDecl *MD = PD->getSetterMethodDecl())
+ if (llvm::Constant *C = GetMethodConstant(MD))
+ Methods.push_back(C);
+ }
+ }
+ }
+ Values[ 5] = EmitMethodList(MethodListName,
+ "__DATA, __objc_const", Methods);
+
+ const ObjCInterfaceDecl *OID = ID->getClassInterface();
+ assert(OID && "CGObjCNonFragileABIMac::BuildClassRoTInitializer");
+ Values[ 6] = EmitProtocolList("\01l_OBJC_CLASS_PROTOCOLS_$_"
+ + OID->getName(),
+ OID->all_referenced_protocol_begin(),
+ OID->all_referenced_protocol_end());
+
+ if (flags & CLS_META)
+ Values[ 7] = llvm::Constant::getNullValue(ObjCTypes.IvarListnfABIPtrTy);
+ else
+ Values[ 7] = EmitIvarList(ID);
+ Values[ 8] = (flags & CLS_META) ? GetIvarLayoutName(0, ObjCTypes)
+ : BuildIvarLayout(ID, false);
+ if (flags & CLS_META)
+ Values[ 9] = llvm::Constant::getNullValue(ObjCTypes.PropertyListPtrTy);
+ else
+ Values[ 9] = EmitPropertyList("\01l_OBJC_$_PROP_LIST_" + ID->getName(),
+ ID, ID->getClassInterface(), ObjCTypes);
+ llvm::Constant *Init = llvm::ConstantStruct::get(ObjCTypes.ClassRonfABITy,
+ Values);
+ llvm::GlobalVariable *CLASS_RO_GV =
+ new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ClassRonfABITy, false,
+ llvm::GlobalValue::InternalLinkage,
+ Init,
+ (flags & CLS_META) ?
+ std::string("\01l_OBJC_METACLASS_RO_$_")+ClassName :
+ std::string("\01l_OBJC_CLASS_RO_$_")+ClassName);
+ CLASS_RO_GV->setAlignment(
+ CGM.getTargetData().getABITypeAlignment(ObjCTypes.ClassRonfABITy));
+ CLASS_RO_GV->setSection("__DATA, __objc_const");
+ return CLASS_RO_GV;
+
+}
+
+/// BuildClassMetaData - This routine defines that to-level meta-data
+/// for the given ClassName for:
+/// struct _class_t {
+/// struct _class_t *isa;
+/// struct _class_t * const superclass;
+/// void *cache;
+/// IMP *vtable;
+/// struct class_ro_t *ro;
+/// }
+///
+llvm::GlobalVariable * CGObjCNonFragileABIMac::BuildClassMetaData(
+ std::string &ClassName,
+ llvm::Constant *IsAGV,
+ llvm::Constant *SuperClassGV,
+ llvm::Constant *ClassRoGV,
+ bool HiddenVisibility) {
+ llvm::Constant *Values[] = {
+ IsAGV,
+ SuperClassGV,
+ ObjCEmptyCacheVar, // &ObjCEmptyCacheVar
+ ObjCEmptyVtableVar, // &ObjCEmptyVtableVar
+ ClassRoGV // &CLASS_RO_GV
+ };
+ if (!Values[1])
+ Values[1] = llvm::Constant::getNullValue(ObjCTypes.ClassnfABIPtrTy);
+ llvm::Constant *Init = llvm::ConstantStruct::get(ObjCTypes.ClassnfABITy,
+ Values);
+ llvm::GlobalVariable *GV = GetClassGlobal(ClassName);
+ GV->setInitializer(Init);
+ GV->setSection("__DATA, __objc_data");
+ GV->setAlignment(
+ CGM.getTargetData().getABITypeAlignment(ObjCTypes.ClassnfABITy));
+ if (HiddenVisibility)
+ GV->setVisibility(llvm::GlobalValue::HiddenVisibility);
+ return GV;
+}
+
+bool
+CGObjCNonFragileABIMac::ImplementationIsNonLazy(const ObjCImplDecl *OD) const {
+ return OD->getClassMethod(GetNullarySelector("load")) != 0;
+}
+
+void CGObjCNonFragileABIMac::GetClassSizeInfo(const ObjCImplementationDecl *OID,
+ uint32_t &InstanceStart,
+ uint32_t &InstanceSize) {
+ const ASTRecordLayout &RL =
+ CGM.getContext().getASTObjCImplementationLayout(OID);
+
+ // InstanceSize is really instance end.
+ InstanceSize = RL.getDataSize().getQuantity();
+
+ // If there are no fields, the start is the same as the end.
+ if (!RL.getFieldCount())
+ InstanceStart = InstanceSize;
+ else
+ InstanceStart = RL.getFieldOffset(0) / CGM.getContext().getCharWidth();
+}
+
+void CGObjCNonFragileABIMac::GenerateClass(const ObjCImplementationDecl *ID) {
+ std::string ClassName = ID->getNameAsString();
+ if (!ObjCEmptyCacheVar) {
+ ObjCEmptyCacheVar = new llvm::GlobalVariable(
+ CGM.getModule(),
+ ObjCTypes.CacheTy,
+ false,
+ llvm::GlobalValue::ExternalLinkage,
+ 0,
+ "_objc_empty_cache");
+
+ ObjCEmptyVtableVar = new llvm::GlobalVariable(
+ CGM.getModule(),
+ ObjCTypes.ImpnfABITy,
+ false,
+ llvm::GlobalValue::ExternalLinkage,
+ 0,
+ "_objc_empty_vtable");
+ }
+ assert(ID->getClassInterface() &&
+ "CGObjCNonFragileABIMac::GenerateClass - class is 0");
+ // FIXME: Is this correct (that meta class size is never computed)?
+ uint32_t InstanceStart =
+ CGM.getTargetData().getTypeAllocSize(ObjCTypes.ClassnfABITy);
+ uint32_t InstanceSize = InstanceStart;
+ uint32_t flags = CLS_META;
+ std::string ObjCMetaClassName(getMetaclassSymbolPrefix());
+ std::string ObjCClassName(getClassSymbolPrefix());
+
+ llvm::GlobalVariable *SuperClassGV, *IsAGV;
+
+ bool classIsHidden =
+ ID->getClassInterface()->getVisibility() == HiddenVisibility;
+ if (classIsHidden)
+ flags |= OBJC2_CLS_HIDDEN;
+ if (ID->hasCXXStructors())
+ flags |= eClassFlags_ABI2_HasCXXStructors;
+ if (!ID->getClassInterface()->getSuperClass()) {
+ // class is root
+ flags |= CLS_ROOT;
+ SuperClassGV = GetClassGlobal(ObjCClassName + ClassName);
+ IsAGV = GetClassGlobal(ObjCMetaClassName + ClassName);
+ } else {
+ // Has a root. Current class is not a root.
+ const ObjCInterfaceDecl *Root = ID->getClassInterface();
+ while (const ObjCInterfaceDecl *Super = Root->getSuperClass())
+ Root = Super;
+ IsAGV = GetClassGlobal(ObjCMetaClassName + Root->getNameAsString());
+ if (Root->isWeakImported())
+ IsAGV->setLinkage(llvm::GlobalValue::ExternalWeakLinkage);
+ // work on super class metadata symbol.
+ std::string SuperClassName =
+ ObjCMetaClassName +
+ ID->getClassInterface()->getSuperClass()->getNameAsString();
+ SuperClassGV = GetClassGlobal(SuperClassName);
+ if (ID->getClassInterface()->getSuperClass()->isWeakImported())
+ SuperClassGV->setLinkage(llvm::GlobalValue::ExternalWeakLinkage);
+ }
+ llvm::GlobalVariable *CLASS_RO_GV = BuildClassRoTInitializer(flags,
+ InstanceStart,
+ InstanceSize,ID);
+ std::string TClassName = ObjCMetaClassName + ClassName;
+ llvm::GlobalVariable *MetaTClass =
+ BuildClassMetaData(TClassName, IsAGV, SuperClassGV, CLASS_RO_GV,
+ classIsHidden);
+ DefinedMetaClasses.push_back(MetaTClass);
+
+ // Metadata for the class
+ flags = CLS;
+ if (classIsHidden)
+ flags |= OBJC2_CLS_HIDDEN;
+ if (ID->hasCXXStructors())
+ flags |= eClassFlags_ABI2_HasCXXStructors;
+
+ if (hasObjCExceptionAttribute(CGM.getContext(), ID->getClassInterface()))
+ flags |= CLS_EXCEPTION;
+
+ if (!ID->getClassInterface()->getSuperClass()) {
+ flags |= CLS_ROOT;
+ SuperClassGV = 0;
+ } else {
+ // Has a root. Current class is not a root.
+ std::string RootClassName =
+ ID->getClassInterface()->getSuperClass()->getNameAsString();
+ SuperClassGV = GetClassGlobal(ObjCClassName + RootClassName);
+ if (ID->getClassInterface()->getSuperClass()->isWeakImported())
+ SuperClassGV->setLinkage(llvm::GlobalValue::ExternalWeakLinkage);
+ }
+ GetClassSizeInfo(ID, InstanceStart, InstanceSize);
+ CLASS_RO_GV = BuildClassRoTInitializer(flags,
+ InstanceStart,
+ InstanceSize,
+ ID);
+
+ TClassName = ObjCClassName + ClassName;
+ llvm::GlobalVariable *ClassMD =
+ BuildClassMetaData(TClassName, MetaTClass, SuperClassGV, CLASS_RO_GV,
+ classIsHidden);
+ DefinedClasses.push_back(ClassMD);
+
+ // Determine if this class is also "non-lazy".
+ if (ImplementationIsNonLazy(ID))
+ DefinedNonLazyClasses.push_back(ClassMD);
+
+ // Force the definition of the EHType if necessary.
+ if (flags & CLS_EXCEPTION)
+ GetInterfaceEHType(ID->getClassInterface(), true);
+ // Make sure method definition entries are all clear for next implementation.
+ MethodDefinitions.clear();
+}
+
+/// GenerateProtocolRef - This routine is called to generate code for
+/// a protocol reference expression; as in:
+/// @code
+/// @protocol(Proto1);
+/// @endcode
+/// It generates a weak reference to l_OBJC_PROTOCOL_REFERENCE_$_Proto1
+/// which will hold address of the protocol meta-data.
+///
+llvm::Value *CGObjCNonFragileABIMac::GenerateProtocolRef(CGBuilderTy &Builder,
+ const ObjCProtocolDecl *PD) {
+
+ // This routine is called for @protocol only. So, we must build definition
+ // of protocol's meta-data (not a reference to it!)
+ //
+ llvm::Constant *Init =
+ llvm::ConstantExpr::getBitCast(GetOrEmitProtocol(PD),
+ ObjCTypes.getExternalProtocolPtrTy());
+
+ std::string ProtocolName("\01l_OBJC_PROTOCOL_REFERENCE_$_");
+ ProtocolName += PD->getName();
+
+ llvm::GlobalVariable *PTGV = CGM.getModule().getGlobalVariable(ProtocolName);
+ if (PTGV)
+ return Builder.CreateLoad(PTGV);
+ PTGV = new llvm::GlobalVariable(
+ CGM.getModule(),
+ Init->getType(), false,
+ llvm::GlobalValue::WeakAnyLinkage,
+ Init,
+ ProtocolName);
+ PTGV->setSection("__DATA, __objc_protorefs, coalesced, no_dead_strip");
+ PTGV->setVisibility(llvm::GlobalValue::HiddenVisibility);
+ CGM.AddUsedGlobal(PTGV);
+ return Builder.CreateLoad(PTGV);
+}
+
+/// GenerateCategory - Build metadata for a category implementation.
+/// struct _category_t {
+/// const char * const name;
+/// struct _class_t *const cls;
+/// const struct _method_list_t * const instance_methods;
+/// const struct _method_list_t * const class_methods;
+/// const struct _protocol_list_t * const protocols;
+/// const struct _prop_list_t * const properties;
+/// }
+///
+void CGObjCNonFragileABIMac::GenerateCategory(const ObjCCategoryImplDecl *OCD) {
+ const ObjCInterfaceDecl *Interface = OCD->getClassInterface();
+ const char *Prefix = "\01l_OBJC_$_CATEGORY_";
+ std::string ExtCatName(Prefix + Interface->getNameAsString()+
+ "_$_" + OCD->getNameAsString());
+ std::string ExtClassName(getClassSymbolPrefix() +
+ Interface->getNameAsString());
+
+ llvm::Constant *Values[6];
+ Values[0] = GetClassName(OCD->getIdentifier());
+ // meta-class entry symbol
+ llvm::GlobalVariable *ClassGV = GetClassGlobal(ExtClassName);
+ if (Interface->isWeakImported())
+ ClassGV->setLinkage(llvm::GlobalValue::ExternalWeakLinkage);
+
+ Values[1] = ClassGV;
+ std::vector<llvm::Constant*> Methods;
+ std::string MethodListName(Prefix);
+ MethodListName += "INSTANCE_METHODS_" + Interface->getNameAsString() +
+ "_$_" + OCD->getNameAsString();
+
+ for (ObjCCategoryImplDecl::instmeth_iterator
+ i = OCD->instmeth_begin(), e = OCD->instmeth_end(); i != e; ++i) {
+ // Instance methods should always be defined.
+ Methods.push_back(GetMethodConstant(*i));
+ }
+
+ Values[2] = EmitMethodList(MethodListName,
+ "__DATA, __objc_const",
+ Methods);
+
+ MethodListName = Prefix;
+ MethodListName += "CLASS_METHODS_" + Interface->getNameAsString() + "_$_" +
+ OCD->getNameAsString();
+ Methods.clear();
+ for (ObjCCategoryImplDecl::classmeth_iterator
+ i = OCD->classmeth_begin(), e = OCD->classmeth_end(); i != e; ++i) {
+ // Class methods should always be defined.
+ Methods.push_back(GetMethodConstant(*i));
+ }
+
+ Values[3] = EmitMethodList(MethodListName,
+ "__DATA, __objc_const",
+ Methods);
+ const ObjCCategoryDecl *Category =
+ Interface->FindCategoryDeclaration(OCD->getIdentifier());
+ if (Category) {
+ SmallString<256> ExtName;
+ llvm::raw_svector_ostream(ExtName) << Interface->getName() << "_$_"
+ << OCD->getName();
+ Values[4] = EmitProtocolList("\01l_OBJC_CATEGORY_PROTOCOLS_$_"
+ + Interface->getName() + "_$_"
+ + Category->getName(),
+ Category->protocol_begin(),
+ Category->protocol_end());
+ Values[5] = EmitPropertyList("\01l_OBJC_$_PROP_LIST_" + ExtName.str(),
+ OCD, Category, ObjCTypes);
+ } else {
+ Values[4] = llvm::Constant::getNullValue(ObjCTypes.ProtocolListnfABIPtrTy);
+ Values[5] = llvm::Constant::getNullValue(ObjCTypes.PropertyListPtrTy);
+ }
+
+ llvm::Constant *Init =
+ llvm::ConstantStruct::get(ObjCTypes.CategorynfABITy,
+ Values);
+ llvm::GlobalVariable *GCATV
+ = new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.CategorynfABITy,
+ false,
+ llvm::GlobalValue::InternalLinkage,
+ Init,
+ ExtCatName);
+ GCATV->setAlignment(
+ CGM.getTargetData().getABITypeAlignment(ObjCTypes.CategorynfABITy));
+ GCATV->setSection("__DATA, __objc_const");
+ CGM.AddUsedGlobal(GCATV);
+ DefinedCategories.push_back(GCATV);
+
+ // Determine if this category is also "non-lazy".
+ if (ImplementationIsNonLazy(OCD))
+ DefinedNonLazyCategories.push_back(GCATV);
+ // method definition entries must be clear for next implementation.
+ MethodDefinitions.clear();
+}
+
+/// GetMethodConstant - Return a struct objc_method constant for the
+/// given method if it has been defined. The result is null if the
+/// method has not been defined. The return value has type MethodPtrTy.
+llvm::Constant *CGObjCNonFragileABIMac::GetMethodConstant(
+ const ObjCMethodDecl *MD) {
+ llvm::Function *Fn = GetMethodDefinition(MD);
+ if (!Fn)
+ return 0;
+
+ llvm::Constant *Method[] = {
+ llvm::ConstantExpr::getBitCast(GetMethodVarName(MD->getSelector()),
+ ObjCTypes.SelectorPtrTy),
+ GetMethodVarType(MD),
+ llvm::ConstantExpr::getBitCast(Fn, ObjCTypes.Int8PtrTy)
+ };
+ return llvm::ConstantStruct::get(ObjCTypes.MethodTy, Method);
+}
+
+/// EmitMethodList - Build meta-data for method declarations
+/// struct _method_list_t {
+/// uint32_t entsize; // sizeof(struct _objc_method)
+/// uint32_t method_count;
+/// struct _objc_method method_list[method_count];
+/// }
+///
+llvm::Constant *
+CGObjCNonFragileABIMac::EmitMethodList(Twine Name,
+ const char *Section,
+ ArrayRef<llvm::Constant*> Methods) {
+ // Return null for empty list.
+ if (Methods.empty())
+ return llvm::Constant::getNullValue(ObjCTypes.MethodListnfABIPtrTy);
+
+ llvm::Constant *Values[3];
+ // sizeof(struct _objc_method)
+ unsigned Size = CGM.getTargetData().getTypeAllocSize(ObjCTypes.MethodTy);
+ Values[0] = llvm::ConstantInt::get(ObjCTypes.IntTy, Size);
+ // method_count
+ Values[1] = llvm::ConstantInt::get(ObjCTypes.IntTy, Methods.size());
+ llvm::ArrayType *AT = llvm::ArrayType::get(ObjCTypes.MethodTy,
+ Methods.size());
+ Values[2] = llvm::ConstantArray::get(AT, Methods);
+ llvm::Constant *Init = llvm::ConstantStruct::getAnon(Values);
+
+ llvm::GlobalVariable *GV =
+ new llvm::GlobalVariable(CGM.getModule(), Init->getType(), false,
+ llvm::GlobalValue::InternalLinkage, Init, Name);
+ GV->setAlignment(CGM.getTargetData().getABITypeAlignment(Init->getType()));
+ GV->setSection(Section);
+ CGM.AddUsedGlobal(GV);
+ return llvm::ConstantExpr::getBitCast(GV, ObjCTypes.MethodListnfABIPtrTy);
+}
+
+/// ObjCIvarOffsetVariable - Returns the ivar offset variable for
+/// the given ivar.
+llvm::GlobalVariable *
+CGObjCNonFragileABIMac::ObjCIvarOffsetVariable(const ObjCInterfaceDecl *ID,
+ const ObjCIvarDecl *Ivar) {
+ const ObjCInterfaceDecl *Container = Ivar->getContainingInterface();
+ std::string Name = "OBJC_IVAR_$_" + Container->getNameAsString() +
+ '.' + Ivar->getNameAsString();
+ llvm::GlobalVariable *IvarOffsetGV =
+ CGM.getModule().getGlobalVariable(Name);
+ if (!IvarOffsetGV)
+ IvarOffsetGV =
+ new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.LongTy,
+ false,
+ llvm::GlobalValue::ExternalLinkage,
+ 0,
+ Name);
+ return IvarOffsetGV;
+}
+
+llvm::Constant *
+CGObjCNonFragileABIMac::EmitIvarOffsetVar(const ObjCInterfaceDecl *ID,
+ const ObjCIvarDecl *Ivar,
+ unsigned long int Offset) {
+ llvm::GlobalVariable *IvarOffsetGV = ObjCIvarOffsetVariable(ID, Ivar);
+ IvarOffsetGV->setInitializer(llvm::ConstantInt::get(ObjCTypes.LongTy,
+ Offset));
+ IvarOffsetGV->setAlignment(
+ CGM.getTargetData().getABITypeAlignment(ObjCTypes.LongTy));
+
+ // FIXME: This matches gcc, but shouldn't the visibility be set on the use as
+ // well (i.e., in ObjCIvarOffsetVariable).
+ if (Ivar->getAccessControl() == ObjCIvarDecl::Private ||
+ Ivar->getAccessControl() == ObjCIvarDecl::Package ||
+ ID->getVisibility() == HiddenVisibility)
+ IvarOffsetGV->setVisibility(llvm::GlobalValue::HiddenVisibility);
+ else
+ IvarOffsetGV->setVisibility(llvm::GlobalValue::DefaultVisibility);
+ IvarOffsetGV->setSection("__DATA, __objc_ivar");
+ return IvarOffsetGV;
+}
+
+/// EmitIvarList - Emit the ivar list for the given
+/// implementation. The return value has type
+/// IvarListnfABIPtrTy.
+/// struct _ivar_t {
+/// unsigned long int *offset; // pointer to ivar offset location
+/// char *name;
+/// char *type;
+/// uint32_t alignment;
+/// uint32_t size;
+/// }
+/// struct _ivar_list_t {
+/// uint32 entsize; // sizeof(struct _ivar_t)
+/// uint32 count;
+/// struct _iver_t list[count];
+/// }
+///
+
+llvm::Constant *CGObjCNonFragileABIMac::EmitIvarList(
+ const ObjCImplementationDecl *ID) {
+
+ std::vector<llvm::Constant*> Ivars;
+
+ const ObjCInterfaceDecl *OID = ID->getClassInterface();
+ assert(OID && "CGObjCNonFragileABIMac::EmitIvarList - null interface");
+
+ // FIXME. Consolidate this with similar code in GenerateClass.
+
+ for (const ObjCIvarDecl *IVD = OID->all_declared_ivar_begin();
+ IVD; IVD = IVD->getNextIvar()) {
+ // Ignore unnamed bit-fields.
+ if (!IVD->getDeclName())
+ continue;
+ llvm::Constant *Ivar[5];
+ Ivar[0] = EmitIvarOffsetVar(ID->getClassInterface(), IVD,
+ ComputeIvarBaseOffset(CGM, ID, IVD));
+ Ivar[1] = GetMethodVarName(IVD->getIdentifier());
+ Ivar[2] = GetMethodVarType(IVD);
+ llvm::Type *FieldTy =
+ CGM.getTypes().ConvertTypeForMem(IVD->getType());
+ unsigned Size = CGM.getTargetData().getTypeAllocSize(FieldTy);
+ unsigned Align = CGM.getContext().getPreferredTypeAlign(
+ IVD->getType().getTypePtr()) >> 3;
+ Align = llvm::Log2_32(Align);
+ Ivar[3] = llvm::ConstantInt::get(ObjCTypes.IntTy, Align);
+ // NOTE. Size of a bitfield does not match gcc's, because of the
+ // way bitfields are treated special in each. But I am told that
+ // 'size' for bitfield ivars is ignored by the runtime so it does
+ // not matter. If it matters, there is enough info to get the
+ // bitfield right!
+ Ivar[4] = llvm::ConstantInt::get(ObjCTypes.IntTy, Size);
+ Ivars.push_back(llvm::ConstantStruct::get(ObjCTypes.IvarnfABITy, Ivar));
+ }
+ // Return null for empty list.
+ if (Ivars.empty())
+ return llvm::Constant::getNullValue(ObjCTypes.IvarListnfABIPtrTy);
+
+ llvm::Constant *Values[3];
+ unsigned Size = CGM.getTargetData().getTypeAllocSize(ObjCTypes.IvarnfABITy);
+ Values[0] = llvm::ConstantInt::get(ObjCTypes.IntTy, Size);
+ Values[1] = llvm::ConstantInt::get(ObjCTypes.IntTy, Ivars.size());
+ llvm::ArrayType *AT = llvm::ArrayType::get(ObjCTypes.IvarnfABITy,
+ Ivars.size());
+ Values[2] = llvm::ConstantArray::get(AT, Ivars);
+ llvm::Constant *Init = llvm::ConstantStruct::getAnon(Values);
+ const char *Prefix = "\01l_OBJC_$_INSTANCE_VARIABLES_";
+ llvm::GlobalVariable *GV =
+ new llvm::GlobalVariable(CGM.getModule(), Init->getType(), false,
+ llvm::GlobalValue::InternalLinkage,
+ Init,
+ Prefix + OID->getName());
+ GV->setAlignment(
+ CGM.getTargetData().getABITypeAlignment(Init->getType()));
+ GV->setSection("__DATA, __objc_const");
+
+ CGM.AddUsedGlobal(GV);
+ return llvm::ConstantExpr::getBitCast(GV, ObjCTypes.IvarListnfABIPtrTy);
+}
+
+llvm::Constant *CGObjCNonFragileABIMac::GetOrEmitProtocolRef(
+ const ObjCProtocolDecl *PD) {
+ llvm::GlobalVariable *&Entry = Protocols[PD->getIdentifier()];
+
+ if (!Entry) {
+ // We use the initializer as a marker of whether this is a forward
+ // reference or not. At module finalization we add the empty
+ // contents for protocols which were referenced but never defined.
+ Entry =
+ new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ProtocolnfABITy, false,
+ llvm::GlobalValue::ExternalLinkage,
+ 0,
+ "\01l_OBJC_PROTOCOL_$_" + PD->getName());
+ Entry->setSection("__DATA,__datacoal_nt,coalesced");
+ }
+
+ return Entry;
+}
+
+/// GetOrEmitProtocol - Generate the protocol meta-data:
+/// @code
+/// struct _protocol_t {
+/// id isa; // NULL
+/// const char * const protocol_name;
+/// const struct _protocol_list_t * protocol_list; // super protocols
+/// const struct method_list_t * const instance_methods;
+/// const struct method_list_t * const class_methods;
+/// const struct method_list_t *optionalInstanceMethods;
+/// const struct method_list_t *optionalClassMethods;
+/// const struct _prop_list_t * properties;
+/// const uint32_t size; // sizeof(struct _protocol_t)
+/// const uint32_t flags; // = 0
+/// const char ** extendedMethodTypes;
+/// }
+/// @endcode
+///
+
+llvm::Constant *CGObjCNonFragileABIMac::GetOrEmitProtocol(
+ const ObjCProtocolDecl *PD) {
+ llvm::GlobalVariable *Entry = Protocols[PD->getIdentifier()];
+
+ // Early exit if a defining object has already been generated.
+ if (Entry && Entry->hasInitializer())
+ return Entry;
+
+ // Use the protocol definition, if there is one.
+ if (const ObjCProtocolDecl *Def = PD->getDefinition())
+ PD = Def;
+
+ // Construct method lists.
+ std::vector<llvm::Constant*> InstanceMethods, ClassMethods;
+ std::vector<llvm::Constant*> OptInstanceMethods, OptClassMethods;
+ std::vector<llvm::Constant*> MethodTypesExt, OptMethodTypesExt;
+ for (ObjCProtocolDecl::instmeth_iterator
+ i = PD->instmeth_begin(), e = PD->instmeth_end(); i != e; ++i) {
+ ObjCMethodDecl *MD = *i;
+ llvm::Constant *C = GetMethodDescriptionConstant(MD);
+ if (!C)
+ return GetOrEmitProtocolRef(PD);
+
+ if (MD->getImplementationControl() == ObjCMethodDecl::Optional) {
+ OptInstanceMethods.push_back(C);
+ OptMethodTypesExt.push_back(GetMethodVarType(MD, true));
+ } else {
+ InstanceMethods.push_back(C);
+ MethodTypesExt.push_back(GetMethodVarType(MD, true));
+ }
+ }
+
+ for (ObjCProtocolDecl::classmeth_iterator
+ i = PD->classmeth_begin(), e = PD->classmeth_end(); i != e; ++i) {
+ ObjCMethodDecl *MD = *i;
+ llvm::Constant *C = GetMethodDescriptionConstant(MD);
+ if (!C)
+ return GetOrEmitProtocolRef(PD);
+
+ if (MD->getImplementationControl() == ObjCMethodDecl::Optional) {
+ OptClassMethods.push_back(C);
+ OptMethodTypesExt.push_back(GetMethodVarType(MD, true));
+ } else {
+ ClassMethods.push_back(C);
+ MethodTypesExt.push_back(GetMethodVarType(MD, true));
+ }
+ }
+
+ MethodTypesExt.insert(MethodTypesExt.end(),
+ OptMethodTypesExt.begin(), OptMethodTypesExt.end());
+
+ llvm::Constant *Values[11];
+ // isa is NULL
+ Values[0] = llvm::Constant::getNullValue(ObjCTypes.ObjectPtrTy);
+ Values[1] = GetClassName(PD->getIdentifier());
+ Values[2] = EmitProtocolList("\01l_OBJC_$_PROTOCOL_REFS_" + PD->getName(),
+ PD->protocol_begin(),
+ PD->protocol_end());
+
+ Values[3] = EmitMethodList("\01l_OBJC_$_PROTOCOL_INSTANCE_METHODS_"
+ + PD->getName(),
+ "__DATA, __objc_const",
+ InstanceMethods);
+ Values[4] = EmitMethodList("\01l_OBJC_$_PROTOCOL_CLASS_METHODS_"
+ + PD->getName(),
+ "__DATA, __objc_const",
+ ClassMethods);
+ Values[5] = EmitMethodList("\01l_OBJC_$_PROTOCOL_INSTANCE_METHODS_OPT_"
+ + PD->getName(),
+ "__DATA, __objc_const",
+ OptInstanceMethods);
+ Values[6] = EmitMethodList("\01l_OBJC_$_PROTOCOL_CLASS_METHODS_OPT_"
+ + PD->getName(),
+ "__DATA, __objc_const",
+ OptClassMethods);
+ Values[7] = EmitPropertyList("\01l_OBJC_$_PROP_LIST_" + PD->getName(),
+ 0, PD, ObjCTypes);
+ uint32_t Size =
+ CGM.getTargetData().getTypeAllocSize(ObjCTypes.ProtocolnfABITy);
+ Values[8] = llvm::ConstantInt::get(ObjCTypes.IntTy, Size);
+ Values[9] = llvm::Constant::getNullValue(ObjCTypes.IntTy);
+ Values[10] = EmitProtocolMethodTypes("\01l_OBJC_$_PROTOCOL_METHOD_TYPES_"
+ + PD->getName(),
+ MethodTypesExt, ObjCTypes);
+ llvm::Constant *Init = llvm::ConstantStruct::get(ObjCTypes.ProtocolnfABITy,
+ Values);
+
+ if (Entry) {
+ // Already created, fix the linkage and update the initializer.
+ Entry->setLinkage(llvm::GlobalValue::WeakAnyLinkage);
+ Entry->setInitializer(Init);
+ } else {
+ Entry =
+ new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ProtocolnfABITy,
+ false, llvm::GlobalValue::WeakAnyLinkage, Init,
+ "\01l_OBJC_PROTOCOL_$_" + PD->getName());
+ Entry->setAlignment(
+ CGM.getTargetData().getABITypeAlignment(ObjCTypes.ProtocolnfABITy));
+ Entry->setSection("__DATA,__datacoal_nt,coalesced");
+
+ Protocols[PD->getIdentifier()] = Entry;
+ }
+ Entry->setVisibility(llvm::GlobalValue::HiddenVisibility);
+ CGM.AddUsedGlobal(Entry);
+
+ // Use this protocol meta-data to build protocol list table in section
+ // __DATA, __objc_protolist
+ llvm::GlobalVariable *PTGV =
+ new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ProtocolnfABIPtrTy,
+ false, llvm::GlobalValue::WeakAnyLinkage, Entry,
+ "\01l_OBJC_LABEL_PROTOCOL_$_" + PD->getName());
+ PTGV->setAlignment(
+ CGM.getTargetData().getABITypeAlignment(ObjCTypes.ProtocolnfABIPtrTy));
+ PTGV->setSection("__DATA, __objc_protolist, coalesced, no_dead_strip");
+ PTGV->setVisibility(llvm::GlobalValue::HiddenVisibility);
+ CGM.AddUsedGlobal(PTGV);
+ return Entry;
+}
+
+/// EmitProtocolList - Generate protocol list meta-data:
+/// @code
+/// struct _protocol_list_t {
+/// long protocol_count; // Note, this is 32/64 bit
+/// struct _protocol_t[protocol_count];
+/// }
+/// @endcode
+///
+llvm::Constant *
+CGObjCNonFragileABIMac::EmitProtocolList(Twine Name,
+ ObjCProtocolDecl::protocol_iterator begin,
+ ObjCProtocolDecl::protocol_iterator end) {
+ llvm::SmallVector<llvm::Constant*, 16> ProtocolRefs;
+
+ // Just return null for empty protocol lists
+ if (begin == end)
+ return llvm::Constant::getNullValue(ObjCTypes.ProtocolListnfABIPtrTy);
+
+ // FIXME: We shouldn't need to do this lookup here, should we?
+ SmallString<256> TmpName;
+ Name.toVector(TmpName);
+ llvm::GlobalVariable *GV =
+ CGM.getModule().getGlobalVariable(TmpName.str(), true);
+ if (GV)
+ return llvm::ConstantExpr::getBitCast(GV, ObjCTypes.ProtocolListnfABIPtrTy);
+
+ for (; begin != end; ++begin)
+ ProtocolRefs.push_back(GetProtocolRef(*begin)); // Implemented???
+
+ // This list is null terminated.
+ ProtocolRefs.push_back(llvm::Constant::getNullValue(
+ ObjCTypes.ProtocolnfABIPtrTy));
+
+ llvm::Constant *Values[2];
+ Values[0] =
+ llvm::ConstantInt::get(ObjCTypes.LongTy, ProtocolRefs.size() - 1);
+ Values[1] =
+ llvm::ConstantArray::get(llvm::ArrayType::get(ObjCTypes.ProtocolnfABIPtrTy,
+ ProtocolRefs.size()),
+ ProtocolRefs);
+
+ llvm::Constant *Init = llvm::ConstantStruct::getAnon(Values);
+ GV = new llvm::GlobalVariable(CGM.getModule(), Init->getType(), false,
+ llvm::GlobalValue::InternalLinkage,
+ Init, Name);
+ GV->setSection("__DATA, __objc_const");
+ GV->setAlignment(
+ CGM.getTargetData().getABITypeAlignment(Init->getType()));
+ CGM.AddUsedGlobal(GV);
+ return llvm::ConstantExpr::getBitCast(GV,
+ ObjCTypes.ProtocolListnfABIPtrTy);
+}
+
+/// GetMethodDescriptionConstant - This routine build following meta-data:
+/// struct _objc_method {
+/// SEL _cmd;
+/// char *method_type;
+/// char *_imp;
+/// }
+
+llvm::Constant *
+CGObjCNonFragileABIMac::GetMethodDescriptionConstant(const ObjCMethodDecl *MD) {
+ llvm::Constant *Desc[3];
+ Desc[0] =
+ llvm::ConstantExpr::getBitCast(GetMethodVarName(MD->getSelector()),
+ ObjCTypes.SelectorPtrTy);
+ Desc[1] = GetMethodVarType(MD);
+ if (!Desc[1])
+ return 0;
+
+ // Protocol methods have no implementation. So, this entry is always NULL.
+ Desc[2] = llvm::Constant::getNullValue(ObjCTypes.Int8PtrTy);
+ return llvm::ConstantStruct::get(ObjCTypes.MethodTy, Desc);
+}
+
+/// EmitObjCValueForIvar - Code Gen for nonfragile ivar reference.
+/// This code gen. amounts to generating code for:
+/// @code
+/// (type *)((char *)base + _OBJC_IVAR_$_.ivar;
+/// @encode
+///
+LValue CGObjCNonFragileABIMac::EmitObjCValueForIvar(
+ CodeGen::CodeGenFunction &CGF,
+ QualType ObjectTy,
+ llvm::Value *BaseValue,
+ const ObjCIvarDecl *Ivar,
+ unsigned CVRQualifiers) {
+ ObjCInterfaceDecl *ID = ObjectTy->getAs<ObjCObjectType>()->getInterface();
+ llvm::Value *Offset = EmitIvarOffset(CGF, ID, Ivar);
+ if (llvm::LoadInst *LI = dyn_cast<llvm::LoadInst>(Offset))
+ LI->setMetadata(CGM.getModule().getMDKindID("invariant.load"),
+ llvm::MDNode::get(VMContext,
+ ArrayRef<llvm::Value*>()));
+ return EmitValueForIvarAtOffset(CGF, ID, BaseValue, Ivar, CVRQualifiers,
+ Offset);
+}
+
+llvm::Value *CGObjCNonFragileABIMac::EmitIvarOffset(
+ CodeGen::CodeGenFunction &CGF,
+ const ObjCInterfaceDecl *Interface,
+ const ObjCIvarDecl *Ivar) {
+ return CGF.Builder.CreateLoad(ObjCIvarOffsetVariable(Interface, Ivar),"ivar");
+}
+
+static void appendSelectorForMessageRefTable(std::string &buffer,
+ Selector selector) {
+ if (selector.isUnarySelector()) {
+ buffer += selector.getNameForSlot(0);
+ return;
+ }
+
+ for (unsigned i = 0, e = selector.getNumArgs(); i != e; ++i) {
+ buffer += selector.getNameForSlot(i);
+ buffer += '_';
+ }
+}
+
+/// Emit a "v-table" message send. We emit a weak hidden-visibility
+/// struct, initially containing the selector pointer and a pointer to
+/// a "fixup" variant of the appropriate objc_msgSend. To call, we
+/// load and call the function pointer, passing the address of the
+/// struct as the second parameter. The runtime determines whether
+/// the selector is currently emitted using vtable dispatch; if so, it
+/// substitutes a stub function which simply tail-calls through the
+/// appropriate vtable slot, and if not, it substitues a stub function
+/// which tail-calls objc_msgSend. Both stubs adjust the selector
+/// argument to correctly point to the selector.
+RValue
+CGObjCNonFragileABIMac::EmitVTableMessageSend(CodeGenFunction &CGF,
+ ReturnValueSlot returnSlot,
+ QualType resultType,
+ Selector selector,
+ llvm::Value *arg0,
+ QualType arg0Type,
+ bool isSuper,
+ const CallArgList &formalArgs,
+ const ObjCMethodDecl *method) {
+ // Compute the actual arguments.
+ CallArgList args;
+
+ // First argument: the receiver / super-call structure.
+ if (!isSuper)
+ arg0 = CGF.Builder.CreateBitCast(arg0, ObjCTypes.ObjectPtrTy);
+ args.add(RValue::get(arg0), arg0Type);
+
+ // Second argument: a pointer to the message ref structure. Leave
+ // the actual argument value blank for now.
+ args.add(RValue::get(0), ObjCTypes.MessageRefCPtrTy);
+
+ args.insert(args.end(), formalArgs.begin(), formalArgs.end());
+
+ MessageSendInfo MSI = getMessageSendInfo(method, resultType, args);
+
+ NullReturnState nullReturn;
+
+ // Find the function to call and the mangled name for the message
+ // ref structure. Using a different mangled name wouldn't actually
+ // be a problem; it would just be a waste.
+ //
+ // The runtime currently never uses vtable dispatch for anything
+ // except normal, non-super message-sends.
+ // FIXME: don't use this for that.
+ llvm::Constant *fn = 0;
+ std::string messageRefName("\01l_");
+ if (CGM.ReturnTypeUsesSRet(MSI.CallInfo)) {
+ if (isSuper) {
+ fn = ObjCTypes.getMessageSendSuper2StretFixupFn();
+ messageRefName += "objc_msgSendSuper2_stret_fixup";
+ } else {
+ nullReturn.init(CGF, arg0);
+ fn = ObjCTypes.getMessageSendStretFixupFn();
+ messageRefName += "objc_msgSend_stret_fixup";
+ }
+ } else if (!isSuper && CGM.ReturnTypeUsesFPRet(resultType)) {
+ fn = ObjCTypes.getMessageSendFpretFixupFn();
+ messageRefName += "objc_msgSend_fpret_fixup";
+ } else {
+ if (isSuper) {
+ fn = ObjCTypes.getMessageSendSuper2FixupFn();
+ messageRefName += "objc_msgSendSuper2_fixup";
+ } else {
+ fn = ObjCTypes.getMessageSendFixupFn();
+ messageRefName += "objc_msgSend_fixup";
+ }
+ }
+ assert(fn && "CGObjCNonFragileABIMac::EmitMessageSend");
+ messageRefName += '_';
+
+ // Append the selector name, except use underscores anywhere we
+ // would have used colons.
+ appendSelectorForMessageRefTable(messageRefName, selector);
+
+ llvm::GlobalVariable *messageRef
+ = CGM.getModule().getGlobalVariable(messageRefName);
+ if (!messageRef) {
+ // Build the message ref structure.
+ llvm::Constant *values[] = { fn, GetMethodVarName(selector) };
+ llvm::Constant *init = llvm::ConstantStruct::getAnon(values);
+ messageRef = new llvm::GlobalVariable(CGM.getModule(),
+ init->getType(),
+ /*constant*/ false,
+ llvm::GlobalValue::WeakAnyLinkage,
+ init,
+ messageRefName);
+ messageRef->setVisibility(llvm::GlobalValue::HiddenVisibility);
+ messageRef->setAlignment(16);
+ messageRef->setSection("__DATA, __objc_msgrefs, coalesced");
+ }
+
+ bool requiresnullCheck = false;
+ if (CGM.getLangOpts().ObjCAutoRefCount && method)
+ for (ObjCMethodDecl::param_const_iterator i = method->param_begin(),
+ e = method->param_end(); i != e; ++i) {
+ const ParmVarDecl *ParamDecl = (*i);
+ if (ParamDecl->hasAttr<NSConsumedAttr>()) {
+ if (!nullReturn.NullBB)
+ nullReturn.init(CGF, arg0);
+ requiresnullCheck = true;
+ break;
+ }
+ }
+
+ llvm::Value *mref =
+ CGF.Builder.CreateBitCast(messageRef, ObjCTypes.MessageRefPtrTy);
+
+ // Update the message ref argument.
+ args[1].RV = RValue::get(mref);
+
+ // Load the function to call from the message ref table.
+ llvm::Value *callee = CGF.Builder.CreateStructGEP(mref, 0);
+ callee = CGF.Builder.CreateLoad(callee, "msgSend_fn");
+
+ callee = CGF.Builder.CreateBitCast(callee, MSI.MessengerType);
+
+ RValue result = CGF.EmitCall(MSI.CallInfo, callee, returnSlot, args);
+ return nullReturn.complete(CGF, result, resultType, formalArgs,
+ requiresnullCheck ? method : 0);
+}
+
+/// Generate code for a message send expression in the nonfragile abi.
+CodeGen::RValue
+CGObjCNonFragileABIMac::GenerateMessageSend(CodeGen::CodeGenFunction &CGF,
+ ReturnValueSlot Return,
+ QualType ResultType,
+ Selector Sel,
+ llvm::Value *Receiver,
+ const CallArgList &CallArgs,
+ const ObjCInterfaceDecl *Class,
+ const ObjCMethodDecl *Method) {
+ return isVTableDispatchedSelector(Sel)
+ ? EmitVTableMessageSend(CGF, Return, ResultType, Sel,
+ Receiver, CGF.getContext().getObjCIdType(),
+ false, CallArgs, Method)
+ : EmitMessageSend(CGF, Return, ResultType,
+ EmitSelector(CGF.Builder, Sel),
+ Receiver, CGF.getContext().getObjCIdType(),
+ false, CallArgs, Method, ObjCTypes);
+}
+
+llvm::GlobalVariable *
+CGObjCNonFragileABIMac::GetClassGlobal(const std::string &Name) {
+ llvm::GlobalVariable *GV = CGM.getModule().getGlobalVariable(Name);
+
+ if (!GV) {
+ GV = new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ClassnfABITy,
+ false, llvm::GlobalValue::ExternalLinkage,
+ 0, Name);
+ }
+
+ return GV;
+}
+
+llvm::Value *CGObjCNonFragileABIMac::EmitClassRefFromId(CGBuilderTy &Builder,
+ IdentifierInfo *II) {
+ llvm::GlobalVariable *&Entry = ClassReferences[II];
+
+ if (!Entry) {
+ std::string ClassName(getClassSymbolPrefix() + II->getName().str());
+ llvm::GlobalVariable *ClassGV = GetClassGlobal(ClassName);
+ Entry =
+ new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ClassnfABIPtrTy,
+ false, llvm::GlobalValue::InternalLinkage,
+ ClassGV,
+ "\01L_OBJC_CLASSLIST_REFERENCES_$_");
+ Entry->setAlignment(
+ CGM.getTargetData().getABITypeAlignment(
+ ObjCTypes.ClassnfABIPtrTy));
+ Entry->setSection("__DATA, __objc_classrefs, regular, no_dead_strip");
+ CGM.AddUsedGlobal(Entry);
+ }
+
+ return Builder.CreateLoad(Entry);
+}
+
+llvm::Value *CGObjCNonFragileABIMac::EmitClassRef(CGBuilderTy &Builder,
+ const ObjCInterfaceDecl *ID) {
+ return EmitClassRefFromId(Builder, ID->getIdentifier());
+}
+
+llvm::Value *CGObjCNonFragileABIMac::EmitNSAutoreleasePoolClassRef(
+ CGBuilderTy &Builder) {
+ IdentifierInfo *II = &CGM.getContext().Idents.get("NSAutoreleasePool");
+ return EmitClassRefFromId(Builder, II);
+}
+
+llvm::Value *
+CGObjCNonFragileABIMac::EmitSuperClassRef(CGBuilderTy &Builder,
+ const ObjCInterfaceDecl *ID) {
+ llvm::GlobalVariable *&Entry = SuperClassReferences[ID->getIdentifier()];
+
+ if (!Entry) {
+ std::string ClassName(getClassSymbolPrefix() + ID->getNameAsString());
+ llvm::GlobalVariable *ClassGV = GetClassGlobal(ClassName);
+ Entry =
+ new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ClassnfABIPtrTy,
+ false, llvm::GlobalValue::InternalLinkage,
+ ClassGV,
+ "\01L_OBJC_CLASSLIST_SUP_REFS_$_");
+ Entry->setAlignment(
+ CGM.getTargetData().getABITypeAlignment(
+ ObjCTypes.ClassnfABIPtrTy));
+ Entry->setSection("__DATA, __objc_superrefs, regular, no_dead_strip");
+ CGM.AddUsedGlobal(Entry);
+ }
+
+ return Builder.CreateLoad(Entry);
+}
+
+/// EmitMetaClassRef - Return a Value * of the address of _class_t
+/// meta-data
+///
+llvm::Value *CGObjCNonFragileABIMac::EmitMetaClassRef(CGBuilderTy &Builder,
+ const ObjCInterfaceDecl *ID) {
+ llvm::GlobalVariable * &Entry = MetaClassReferences[ID->getIdentifier()];
+ if (Entry)
+ return Builder.CreateLoad(Entry);
+
+ std::string MetaClassName(getMetaclassSymbolPrefix() + ID->getNameAsString());
+ llvm::GlobalVariable *MetaClassGV = GetClassGlobal(MetaClassName);
+ Entry =
+ new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ClassnfABIPtrTy, false,
+ llvm::GlobalValue::InternalLinkage,
+ MetaClassGV,
+ "\01L_OBJC_CLASSLIST_SUP_REFS_$_");
+ Entry->setAlignment(
+ CGM.getTargetData().getABITypeAlignment(
+ ObjCTypes.ClassnfABIPtrTy));
+
+ Entry->setSection("__DATA, __objc_superrefs, regular, no_dead_strip");
+ CGM.AddUsedGlobal(Entry);
+
+ return Builder.CreateLoad(Entry);
+}
+
+/// GetClass - Return a reference to the class for the given interface
+/// decl.
+llvm::Value *CGObjCNonFragileABIMac::GetClass(CGBuilderTy &Builder,
+ const ObjCInterfaceDecl *ID) {
+ if (ID->isWeakImported()) {
+ std::string ClassName(getClassSymbolPrefix() + ID->getNameAsString());
+ llvm::GlobalVariable *ClassGV = GetClassGlobal(ClassName);
+ ClassGV->setLinkage(llvm::GlobalValue::ExternalWeakLinkage);
+ }
+
+ return EmitClassRef(Builder, ID);
+}
+
+/// Generates a message send where the super is the receiver. This is
+/// a message send to self with special delivery semantics indicating
+/// which class's method should be called.
+CodeGen::RValue
+CGObjCNonFragileABIMac::GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF,
+ ReturnValueSlot Return,
+ QualType ResultType,
+ Selector Sel,
+ const ObjCInterfaceDecl *Class,
+ bool isCategoryImpl,
+ llvm::Value *Receiver,
+ bool IsClassMessage,
+ const CodeGen::CallArgList &CallArgs,
+ const ObjCMethodDecl *Method) {
+ // ...
+ // Create and init a super structure; this is a (receiver, class)
+ // pair we will pass to objc_msgSendSuper.
+ llvm::Value *ObjCSuper =
+ CGF.CreateTempAlloca(ObjCTypes.SuperTy, "objc_super");
+
+ llvm::Value *ReceiverAsObject =
+ CGF.Builder.CreateBitCast(Receiver, ObjCTypes.ObjectPtrTy);
+ CGF.Builder.CreateStore(ReceiverAsObject,
+ CGF.Builder.CreateStructGEP(ObjCSuper, 0));
+
+ // If this is a class message the metaclass is passed as the target.
+ llvm::Value *Target;
+ if (IsClassMessage) {
+ if (isCategoryImpl) {
+ // Message sent to "super' in a class method defined in
+ // a category implementation.
+ Target = EmitClassRef(CGF.Builder, Class);
+ Target = CGF.Builder.CreateStructGEP(Target, 0);
+ Target = CGF.Builder.CreateLoad(Target);
+ } else
+ Target = EmitMetaClassRef(CGF.Builder, Class);
+ } else
+ Target = EmitSuperClassRef(CGF.Builder, Class);
+
+ // FIXME: We shouldn't need to do this cast, rectify the ASTContext and
+ // ObjCTypes types.
+ llvm::Type *ClassTy =
+ CGM.getTypes().ConvertType(CGF.getContext().getObjCClassType());
+ Target = CGF.Builder.CreateBitCast(Target, ClassTy);
+ CGF.Builder.CreateStore(Target,
+ CGF.Builder.CreateStructGEP(ObjCSuper, 1));
+
+ return (isVTableDispatchedSelector(Sel))
+ ? EmitVTableMessageSend(CGF, Return, ResultType, Sel,
+ ObjCSuper, ObjCTypes.SuperPtrCTy,
+ true, CallArgs, Method)
+ : EmitMessageSend(CGF, Return, ResultType,
+ EmitSelector(CGF.Builder, Sel),
+ ObjCSuper, ObjCTypes.SuperPtrCTy,
+ true, CallArgs, Method, ObjCTypes);
+}
+
+llvm::Value *CGObjCNonFragileABIMac::EmitSelector(CGBuilderTy &Builder,
+ Selector Sel, bool lval) {
+ llvm::GlobalVariable *&Entry = SelectorReferences[Sel];
+
+ if (!Entry) {
+ llvm::Constant *Casted =
+ llvm::ConstantExpr::getBitCast(GetMethodVarName(Sel),
+ ObjCTypes.SelectorPtrTy);
+ Entry =
+ new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.SelectorPtrTy, false,
+ llvm::GlobalValue::InternalLinkage,
+ Casted, "\01L_OBJC_SELECTOR_REFERENCES_");
+ Entry->setSection("__DATA, __objc_selrefs, literal_pointers, no_dead_strip");
+ CGM.AddUsedGlobal(Entry);
+ }
+
+ if (lval)
+ return Entry;
+ llvm::LoadInst* LI = Builder.CreateLoad(Entry);
+
+ LI->setMetadata(CGM.getModule().getMDKindID("invariant.load"),
+ llvm::MDNode::get(VMContext,
+ ArrayRef<llvm::Value*>()));
+ return LI;
+}
+/// EmitObjCIvarAssign - Code gen for assigning to a __strong object.
+/// objc_assign_ivar (id src, id *dst, ptrdiff_t)
+///
+void CGObjCNonFragileABIMac::EmitObjCIvarAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src,
+ llvm::Value *dst,
+ llvm::Value *ivarOffset) {
+ llvm::Type * SrcTy = src->getType();
+ if (!isa<llvm::PointerType>(SrcTy)) {
+ unsigned Size = CGM.getTargetData().getTypeAllocSize(SrcTy);
+ assert(Size <= 8 && "does not support size > 8");
+ src = (Size == 4 ? CGF.Builder.CreateBitCast(src, ObjCTypes.IntTy)
+ : CGF.Builder.CreateBitCast(src, ObjCTypes.LongTy));
+ src = CGF.Builder.CreateIntToPtr(src, ObjCTypes.Int8PtrTy);
+ }
+ src = CGF.Builder.CreateBitCast(src, ObjCTypes.ObjectPtrTy);
+ dst = CGF.Builder.CreateBitCast(dst, ObjCTypes.PtrObjectPtrTy);
+ CGF.Builder.CreateCall3(ObjCTypes.getGcAssignIvarFn(),
+ src, dst, ivarOffset);
+ return;
+}
+
+/// EmitObjCStrongCastAssign - Code gen for assigning to a __strong cast object.
+/// objc_assign_strongCast (id src, id *dst)
+///
+void CGObjCNonFragileABIMac::EmitObjCStrongCastAssign(
+ CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dst) {
+ llvm::Type * SrcTy = src->getType();
+ if (!isa<llvm::PointerType>(SrcTy)) {
+ unsigned Size = CGM.getTargetData().getTypeAllocSize(SrcTy);
+ assert(Size <= 8 && "does not support size > 8");
+ src = (Size == 4 ? CGF.Builder.CreateBitCast(src, ObjCTypes.IntTy)
+ : CGF.Builder.CreateBitCast(src, ObjCTypes.LongTy));
+ src = CGF.Builder.CreateIntToPtr(src, ObjCTypes.Int8PtrTy);
+ }
+ src = CGF.Builder.CreateBitCast(src, ObjCTypes.ObjectPtrTy);
+ dst = CGF.Builder.CreateBitCast(dst, ObjCTypes.PtrObjectPtrTy);
+ CGF.Builder.CreateCall2(ObjCTypes.getGcAssignStrongCastFn(),
+ src, dst, "weakassign");
+ return;
+}
+
+void CGObjCNonFragileABIMac::EmitGCMemmoveCollectable(
+ CodeGen::CodeGenFunction &CGF,
+ llvm::Value *DestPtr,
+ llvm::Value *SrcPtr,
+ llvm::Value *Size) {
+ SrcPtr = CGF.Builder.CreateBitCast(SrcPtr, ObjCTypes.Int8PtrTy);
+ DestPtr = CGF.Builder.CreateBitCast(DestPtr, ObjCTypes.Int8PtrTy);
+ CGF.Builder.CreateCall3(ObjCTypes.GcMemmoveCollectableFn(),
+ DestPtr, SrcPtr, Size);
+ return;
+}
+
+/// EmitObjCWeakRead - Code gen for loading value of a __weak
+/// object: objc_read_weak (id *src)
+///
+llvm::Value * CGObjCNonFragileABIMac::EmitObjCWeakRead(
+ CodeGen::CodeGenFunction &CGF,
+ llvm::Value *AddrWeakObj) {
+ llvm::Type* DestTy =
+ cast<llvm::PointerType>(AddrWeakObj->getType())->getElementType();
+ AddrWeakObj = CGF.Builder.CreateBitCast(AddrWeakObj, ObjCTypes.PtrObjectPtrTy);
+ llvm::Value *read_weak = CGF.Builder.CreateCall(ObjCTypes.getGcReadWeakFn(),
+ AddrWeakObj, "weakread");
+ read_weak = CGF.Builder.CreateBitCast(read_weak, DestTy);
+ return read_weak;
+}
+
+/// EmitObjCWeakAssign - Code gen for assigning to a __weak object.
+/// objc_assign_weak (id src, id *dst)
+///
+void CGObjCNonFragileABIMac::EmitObjCWeakAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dst) {
+ llvm::Type * SrcTy = src->getType();
+ if (!isa<llvm::PointerType>(SrcTy)) {
+ unsigned Size = CGM.getTargetData().getTypeAllocSize(SrcTy);
+ assert(Size <= 8 && "does not support size > 8");
+ src = (Size == 4 ? CGF.Builder.CreateBitCast(src, ObjCTypes.IntTy)
+ : CGF.Builder.CreateBitCast(src, ObjCTypes.LongTy));
+ src = CGF.Builder.CreateIntToPtr(src, ObjCTypes.Int8PtrTy);
+ }
+ src = CGF.Builder.CreateBitCast(src, ObjCTypes.ObjectPtrTy);
+ dst = CGF.Builder.CreateBitCast(dst, ObjCTypes.PtrObjectPtrTy);
+ CGF.Builder.CreateCall2(ObjCTypes.getGcAssignWeakFn(),
+ src, dst, "weakassign");
+ return;
+}
+
+/// EmitObjCGlobalAssign - Code gen for assigning to a __strong object.
+/// objc_assign_global (id src, id *dst)
+///
+void CGObjCNonFragileABIMac::EmitObjCGlobalAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dst,
+ bool threadlocal) {
+ llvm::Type * SrcTy = src->getType();
+ if (!isa<llvm::PointerType>(SrcTy)) {
+ unsigned Size = CGM.getTargetData().getTypeAllocSize(SrcTy);
+ assert(Size <= 8 && "does not support size > 8");
+ src = (Size == 4 ? CGF.Builder.CreateBitCast(src, ObjCTypes.IntTy)
+ : CGF.Builder.CreateBitCast(src, ObjCTypes.LongTy));
+ src = CGF.Builder.CreateIntToPtr(src, ObjCTypes.Int8PtrTy);
+ }
+ src = CGF.Builder.CreateBitCast(src, ObjCTypes.ObjectPtrTy);
+ dst = CGF.Builder.CreateBitCast(dst, ObjCTypes.PtrObjectPtrTy);
+ if (!threadlocal)
+ CGF.Builder.CreateCall2(ObjCTypes.getGcAssignGlobalFn(),
+ src, dst, "globalassign");
+ else
+ CGF.Builder.CreateCall2(ObjCTypes.getGcAssignThreadLocalFn(),
+ src, dst, "threadlocalassign");
+ return;
+}
+
+void
+CGObjCNonFragileABIMac::EmitSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
+ const ObjCAtSynchronizedStmt &S) {
+ EmitAtSynchronizedStmt(CGF, S,
+ cast<llvm::Function>(ObjCTypes.getSyncEnterFn()),
+ cast<llvm::Function>(ObjCTypes.getSyncExitFn()));
+}
+
+llvm::Constant *
+CGObjCNonFragileABIMac::GetEHType(QualType T) {
+ // There's a particular fixed type info for 'id'.
+ if (T->isObjCIdType() ||
+ T->isObjCQualifiedIdType()) {
+ llvm::Constant *IDEHType =
+ CGM.getModule().getGlobalVariable("OBJC_EHTYPE_id");
+ if (!IDEHType)
+ IDEHType =
+ new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.EHTypeTy,
+ false,
+ llvm::GlobalValue::ExternalLinkage,
+ 0, "OBJC_EHTYPE_id");
+ return IDEHType;
+ }
+
+ // All other types should be Objective-C interface pointer types.
+ const ObjCObjectPointerType *PT =
+ T->getAs<ObjCObjectPointerType>();
+ assert(PT && "Invalid @catch type.");
+ const ObjCInterfaceType *IT = PT->getInterfaceType();
+ assert(IT && "Invalid @catch type.");
+ return GetInterfaceEHType(IT->getDecl(), false);
+}
+
+void CGObjCNonFragileABIMac::EmitTryStmt(CodeGen::CodeGenFunction &CGF,
+ const ObjCAtTryStmt &S) {
+ EmitTryCatchStmt(CGF, S,
+ cast<llvm::Function>(ObjCTypes.getObjCBeginCatchFn()),
+ cast<llvm::Function>(ObjCTypes.getObjCEndCatchFn()),
+ cast<llvm::Function>(ObjCTypes.getExceptionRethrowFn()));
+}
+
+/// EmitThrowStmt - Generate code for a throw statement.
+void CGObjCNonFragileABIMac::EmitThrowStmt(CodeGen::CodeGenFunction &CGF,
+ const ObjCAtThrowStmt &S) {
+ if (const Expr *ThrowExpr = S.getThrowExpr()) {
+ llvm::Value *Exception = CGF.EmitObjCThrowOperand(ThrowExpr);
+ Exception = CGF.Builder.CreateBitCast(Exception, ObjCTypes.ObjectPtrTy);
+ CGF.EmitCallOrInvoke(ObjCTypes.getExceptionThrowFn(), Exception)
+ .setDoesNotReturn();
+ } else {
+ CGF.EmitCallOrInvoke(ObjCTypes.getExceptionRethrowFn())
+ .setDoesNotReturn();
+ }
+
+ CGF.Builder.CreateUnreachable();
+ CGF.Builder.ClearInsertionPoint();
+}
+
+llvm::Constant *
+CGObjCNonFragileABIMac::GetInterfaceEHType(const ObjCInterfaceDecl *ID,
+ bool ForDefinition) {
+ llvm::GlobalVariable * &Entry = EHTypeReferences[ID->getIdentifier()];
+
+ // If we don't need a definition, return the entry if found or check
+ // if we use an external reference.
+ if (!ForDefinition) {
+ if (Entry)
+ return Entry;
+
+ // If this type (or a super class) has the __objc_exception__
+ // attribute, emit an external reference.
+ if (hasObjCExceptionAttribute(CGM.getContext(), ID))
+ return Entry =
+ new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.EHTypeTy, false,
+ llvm::GlobalValue::ExternalLinkage,
+ 0,
+ ("OBJC_EHTYPE_$_" +
+ ID->getIdentifier()->getName()));
+ }
+
+ // Otherwise we need to either make a new entry or fill in the
+ // initializer.
+ assert((!Entry || !Entry->hasInitializer()) && "Duplicate EHType definition");
+ std::string ClassName(getClassSymbolPrefix() + ID->getNameAsString());
+ std::string VTableName = "objc_ehtype_vtable";
+ llvm::GlobalVariable *VTableGV =
+ CGM.getModule().getGlobalVariable(VTableName);
+ if (!VTableGV)
+ VTableGV = new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.Int8PtrTy,
+ false,
+ llvm::GlobalValue::ExternalLinkage,
+ 0, VTableName);
+
+ llvm::Value *VTableIdx = llvm::ConstantInt::get(CGM.Int32Ty, 2);
+
+ llvm::Constant *Values[] = {
+ llvm::ConstantExpr::getGetElementPtr(VTableGV, VTableIdx),
+ GetClassName(ID->getIdentifier()),
+ GetClassGlobal(ClassName)
+ };
+ llvm::Constant *Init =
+ llvm::ConstantStruct::get(ObjCTypes.EHTypeTy, Values);
+
+ if (Entry) {
+ Entry->setInitializer(Init);
+ } else {
+ Entry = new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.EHTypeTy, false,
+ llvm::GlobalValue::WeakAnyLinkage,
+ Init,
+ ("OBJC_EHTYPE_$_" +
+ ID->getIdentifier()->getName()));
+ }
+
+ if (CGM.getLangOpts().getVisibilityMode() == HiddenVisibility)
+ Entry->setVisibility(llvm::GlobalValue::HiddenVisibility);
+ Entry->setAlignment(CGM.getTargetData().getABITypeAlignment(
+ ObjCTypes.EHTypeTy));
+
+ if (ForDefinition) {
+ Entry->setSection("__DATA,__objc_const");
+ Entry->setLinkage(llvm::GlobalValue::ExternalLinkage);
+ } else {
+ Entry->setSection("__DATA,__datacoal_nt,coalesced");
+ }
+
+ return Entry;
+}
+
+/* *** */
+
+CodeGen::CGObjCRuntime *
+CodeGen::CreateMacObjCRuntime(CodeGen::CodeGenModule &CGM) {
+ if (CGM.getLangOpts().ObjCNonFragileABI)
+ return new CGObjCNonFragileABIMac(CGM);
+ return new CGObjCMac(CGM);
+}
diff --git a/clang/lib/CodeGen/CGObjCRuntime.cpp b/clang/lib/CodeGen/CGObjCRuntime.cpp
new file mode 100644
index 0000000..9370096
--- /dev/null
+++ b/clang/lib/CodeGen/CGObjCRuntime.cpp
@@ -0,0 +1,374 @@
+//==- CGObjCRuntime.cpp - Interface to Shared Objective-C Runtime Features ==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This abstract class defines the interface for Objective-C runtime-specific
+// code generation. It provides some concrete helper methods for functionality
+// shared between all (or most) of the Objective-C runtimes supported by clang.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CGObjCRuntime.h"
+
+#include "CGRecordLayout.h"
+#include "CodeGenModule.h"
+#include "CodeGenFunction.h"
+#include "CGCleanup.h"
+
+#include "clang/AST/RecordLayout.h"
+#include "clang/AST/StmtObjC.h"
+
+#include "llvm/Support/CallSite.h"
+
+using namespace clang;
+using namespace CodeGen;
+
+static uint64_t LookupFieldBitOffset(CodeGen::CodeGenModule &CGM,
+ const ObjCInterfaceDecl *OID,
+ const ObjCImplementationDecl *ID,
+ const ObjCIvarDecl *Ivar) {
+ const ObjCInterfaceDecl *Container = Ivar->getContainingInterface();
+
+ // FIXME: We should eliminate the need to have ObjCImplementationDecl passed
+ // in here; it should never be necessary because that should be the lexical
+ // decl context for the ivar.
+
+ // If we know have an implementation (and the ivar is in it) then
+ // look up in the implementation layout.
+ const ASTRecordLayout *RL;
+ if (ID && declaresSameEntity(ID->getClassInterface(), Container))
+ RL = &CGM.getContext().getASTObjCImplementationLayout(ID);
+ else
+ RL = &CGM.getContext().getASTObjCInterfaceLayout(Container);
+
+ // Compute field index.
+ //
+ // FIXME: The index here is closely tied to how ASTContext::getObjCLayout is
+ // implemented. This should be fixed to get the information from the layout
+ // directly.
+ unsigned Index = 0;
+
+ for (const ObjCIvarDecl *IVD = Container->all_declared_ivar_begin();
+ IVD; IVD = IVD->getNextIvar()) {
+ if (Ivar == IVD)
+ break;
+ ++Index;
+ }
+ assert(Index < RL->getFieldCount() && "Ivar is not inside record layout!");
+
+ return RL->getFieldOffset(Index);
+}
+
+uint64_t CGObjCRuntime::ComputeIvarBaseOffset(CodeGen::CodeGenModule &CGM,
+ const ObjCInterfaceDecl *OID,
+ const ObjCIvarDecl *Ivar) {
+ return LookupFieldBitOffset(CGM, OID, 0, Ivar) /
+ CGM.getContext().getCharWidth();
+}
+
+uint64_t CGObjCRuntime::ComputeIvarBaseOffset(CodeGen::CodeGenModule &CGM,
+ const ObjCImplementationDecl *OID,
+ const ObjCIvarDecl *Ivar) {
+ return LookupFieldBitOffset(CGM, OID->getClassInterface(), OID, Ivar) /
+ CGM.getContext().getCharWidth();
+}
+
+LValue CGObjCRuntime::EmitValueForIvarAtOffset(CodeGen::CodeGenFunction &CGF,
+ const ObjCInterfaceDecl *OID,
+ llvm::Value *BaseValue,
+ const ObjCIvarDecl *Ivar,
+ unsigned CVRQualifiers,
+ llvm::Value *Offset) {
+ // Compute (type*) ( (char *) BaseValue + Offset)
+ llvm::Type *I8Ptr = CGF.Int8PtrTy;
+ QualType IvarTy = Ivar->getType();
+ llvm::Type *LTy = CGF.CGM.getTypes().ConvertTypeForMem(IvarTy);
+ llvm::Value *V = CGF.Builder.CreateBitCast(BaseValue, I8Ptr);
+ V = CGF.Builder.CreateInBoundsGEP(V, Offset, "add.ptr");
+ V = CGF.Builder.CreateBitCast(V, llvm::PointerType::getUnqual(LTy));
+
+ if (!Ivar->isBitField()) {
+ LValue LV = CGF.MakeNaturalAlignAddrLValue(V, IvarTy);
+ LV.getQuals().addCVRQualifiers(CVRQualifiers);
+ return LV;
+ }
+
+ // We need to compute an access strategy for this bit-field. We are given the
+ // offset to the first byte in the bit-field, the sub-byte offset is taken
+ // from the original layout. We reuse the normal bit-field access strategy by
+ // treating this as an access to a struct where the bit-field is in byte 0,
+ // and adjust the containing type size as appropriate.
+ //
+ // FIXME: Note that currently we make a very conservative estimate of the
+ // alignment of the bit-field, because (a) it is not clear what guarantees the
+ // runtime makes us, and (b) we don't have a way to specify that the struct is
+ // at an alignment plus offset.
+ //
+ // Note, there is a subtle invariant here: we can only call this routine on
+ // non-synthesized ivars but we may be called for synthesized ivars. However,
+ // a synthesized ivar can never be a bit-field, so this is safe.
+ const ASTRecordLayout &RL =
+ CGF.CGM.getContext().getASTObjCInterfaceLayout(OID);
+ uint64_t TypeSizeInBits = CGF.CGM.getContext().toBits(RL.getSize());
+ uint64_t FieldBitOffset = LookupFieldBitOffset(CGF.CGM, OID, 0, Ivar);
+ uint64_t BitOffset = FieldBitOffset % CGF.CGM.getContext().getCharWidth();
+ uint64_t ContainingTypeAlign = CGF.CGM.getContext().getTargetInfo().getCharAlign();
+ uint64_t ContainingTypeSize = TypeSizeInBits - (FieldBitOffset - BitOffset);
+ uint64_t BitFieldSize = Ivar->getBitWidthValue(CGF.getContext());
+
+ // Allocate a new CGBitFieldInfo object to describe this access.
+ //
+ // FIXME: This is incredibly wasteful, these should be uniqued or part of some
+ // layout object. However, this is blocked on other cleanups to the
+ // Objective-C code, so for now we just live with allocating a bunch of these
+ // objects.
+ CGBitFieldInfo *Info = new (CGF.CGM.getContext()) CGBitFieldInfo(
+ CGBitFieldInfo::MakeInfo(CGF.CGM.getTypes(), Ivar, BitOffset, BitFieldSize,
+ ContainingTypeSize, ContainingTypeAlign));
+
+ return LValue::MakeBitfield(V, *Info,
+ IvarTy.withCVRQualifiers(CVRQualifiers));
+}
+
+namespace {
+ struct CatchHandler {
+ const VarDecl *Variable;
+ const Stmt *Body;
+ llvm::BasicBlock *Block;
+ llvm::Value *TypeInfo;
+ };
+
+ struct CallObjCEndCatch : EHScopeStack::Cleanup {
+ CallObjCEndCatch(bool MightThrow, llvm::Value *Fn) :
+ MightThrow(MightThrow), Fn(Fn) {}
+ bool MightThrow;
+ llvm::Value *Fn;
+
+ void Emit(CodeGenFunction &CGF, Flags flags) {
+ if (!MightThrow) {
+ CGF.Builder.CreateCall(Fn)->setDoesNotThrow();
+ return;
+ }
+
+ CGF.EmitCallOrInvoke(Fn);
+ }
+ };
+}
+
+
+void CGObjCRuntime::EmitTryCatchStmt(CodeGenFunction &CGF,
+ const ObjCAtTryStmt &S,
+ llvm::Constant *beginCatchFn,
+ llvm::Constant *endCatchFn,
+ llvm::Constant *exceptionRethrowFn) {
+ // Jump destination for falling out of catch bodies.
+ CodeGenFunction::JumpDest Cont;
+ if (S.getNumCatchStmts())
+ Cont = CGF.getJumpDestInCurrentScope("eh.cont");
+
+ CodeGenFunction::FinallyInfo FinallyInfo;
+ if (const ObjCAtFinallyStmt *Finally = S.getFinallyStmt())
+ FinallyInfo.enter(CGF, Finally->getFinallyBody(),
+ beginCatchFn, endCatchFn, exceptionRethrowFn);
+
+ SmallVector<CatchHandler, 8> Handlers;
+
+ // Enter the catch, if there is one.
+ if (S.getNumCatchStmts()) {
+ for (unsigned I = 0, N = S.getNumCatchStmts(); I != N; ++I) {
+ const ObjCAtCatchStmt *CatchStmt = S.getCatchStmt(I);
+ const VarDecl *CatchDecl = CatchStmt->getCatchParamDecl();
+
+ Handlers.push_back(CatchHandler());
+ CatchHandler &Handler = Handlers.back();
+ Handler.Variable = CatchDecl;
+ Handler.Body = CatchStmt->getCatchBody();
+ Handler.Block = CGF.createBasicBlock("catch");
+
+ // @catch(...) always matches.
+ if (!CatchDecl) {
+ Handler.TypeInfo = 0; // catch-all
+ // Don't consider any other catches.
+ break;
+ }
+
+ Handler.TypeInfo = GetEHType(CatchDecl->getType());
+ }
+
+ EHCatchScope *Catch = CGF.EHStack.pushCatch(Handlers.size());
+ for (unsigned I = 0, E = Handlers.size(); I != E; ++I)
+ Catch->setHandler(I, Handlers[I].TypeInfo, Handlers[I].Block);
+ }
+
+ // Emit the try body.
+ CGF.EmitStmt(S.getTryBody());
+
+ // Leave the try.
+ if (S.getNumCatchStmts())
+ CGF.popCatchScope();
+
+ // Remember where we were.
+ CGBuilderTy::InsertPoint SavedIP = CGF.Builder.saveAndClearIP();
+
+ // Emit the handlers.
+ for (unsigned I = 0, E = Handlers.size(); I != E; ++I) {
+ CatchHandler &Handler = Handlers[I];
+
+ CGF.EmitBlock(Handler.Block);
+ llvm::Value *RawExn = CGF.getExceptionFromSlot();
+
+ // Enter the catch.
+ llvm::Value *Exn = RawExn;
+ if (beginCatchFn) {
+ Exn = CGF.Builder.CreateCall(beginCatchFn, RawExn, "exn.adjusted");
+ cast<llvm::CallInst>(Exn)->setDoesNotThrow();
+ }
+
+ CodeGenFunction::LexicalScope cleanups(CGF, Handler.Body->getSourceRange());
+
+ if (endCatchFn) {
+ // Add a cleanup to leave the catch.
+ bool EndCatchMightThrow = (Handler.Variable == 0);
+
+ CGF.EHStack.pushCleanup<CallObjCEndCatch>(NormalAndEHCleanup,
+ EndCatchMightThrow,
+ endCatchFn);
+ }
+
+ // Bind the catch parameter if it exists.
+ if (const VarDecl *CatchParam = Handler.Variable) {
+ llvm::Type *CatchType = CGF.ConvertType(CatchParam->getType());
+ llvm::Value *CastExn = CGF.Builder.CreateBitCast(Exn, CatchType);
+
+ CGF.EmitAutoVarDecl(*CatchParam);
+
+ llvm::Value *CatchParamAddr = CGF.GetAddrOfLocalVar(CatchParam);
+
+ switch (CatchParam->getType().getQualifiers().getObjCLifetime()) {
+ case Qualifiers::OCL_Strong:
+ CastExn = CGF.EmitARCRetainNonBlock(CastExn);
+ // fallthrough
+
+ case Qualifiers::OCL_None:
+ case Qualifiers::OCL_ExplicitNone:
+ case Qualifiers::OCL_Autoreleasing:
+ CGF.Builder.CreateStore(CastExn, CatchParamAddr);
+ break;
+
+ case Qualifiers::OCL_Weak:
+ CGF.EmitARCInitWeak(CatchParamAddr, CastExn);
+ break;
+ }
+ }
+
+ CGF.ObjCEHValueStack.push_back(Exn);
+ CGF.EmitStmt(Handler.Body);
+ CGF.ObjCEHValueStack.pop_back();
+
+ // Leave any cleanups associated with the catch.
+ cleanups.ForceCleanup();
+
+ CGF.EmitBranchThroughCleanup(Cont);
+ }
+
+ // Go back to the try-statement fallthrough.
+ CGF.Builder.restoreIP(SavedIP);
+
+ // Pop out of the finally.
+ if (S.getFinallyStmt())
+ FinallyInfo.exit(CGF);
+
+ if (Cont.isValid())
+ CGF.EmitBlock(Cont.getBlock());
+}
+
+namespace {
+ struct CallSyncExit : EHScopeStack::Cleanup {
+ llvm::Value *SyncExitFn;
+ llvm::Value *SyncArg;
+ CallSyncExit(llvm::Value *SyncExitFn, llvm::Value *SyncArg)
+ : SyncExitFn(SyncExitFn), SyncArg(SyncArg) {}
+
+ void Emit(CodeGenFunction &CGF, Flags flags) {
+ CGF.Builder.CreateCall(SyncExitFn, SyncArg)->setDoesNotThrow();
+ }
+ };
+}
+
+void CGObjCRuntime::EmitAtSynchronizedStmt(CodeGenFunction &CGF,
+ const ObjCAtSynchronizedStmt &S,
+ llvm::Function *syncEnterFn,
+ llvm::Function *syncExitFn) {
+ CodeGenFunction::RunCleanupsScope cleanups(CGF);
+
+ // Evaluate the lock operand. This is guaranteed to dominate the
+ // ARC release and lock-release cleanups.
+ const Expr *lockExpr = S.getSynchExpr();
+ llvm::Value *lock;
+ if (CGF.getLangOpts().ObjCAutoRefCount) {
+ lock = CGF.EmitARCRetainScalarExpr(lockExpr);
+ lock = CGF.EmitObjCConsumeObject(lockExpr->getType(), lock);
+ } else {
+ lock = CGF.EmitScalarExpr(lockExpr);
+ }
+ lock = CGF.Builder.CreateBitCast(lock, CGF.VoidPtrTy);
+
+ // Acquire the lock.
+ CGF.Builder.CreateCall(syncEnterFn, lock)->setDoesNotThrow();
+
+ // Register an all-paths cleanup to release the lock.
+ CGF.EHStack.pushCleanup<CallSyncExit>(NormalAndEHCleanup, syncExitFn, lock);
+
+ // Emit the body of the statement.
+ CGF.EmitStmt(S.getSynchBody());
+}
+
+/// Compute the pointer-to-function type to which a message send
+/// should be casted in order to correctly call the given method
+/// with the given arguments.
+///
+/// \param method - may be null
+/// \param resultType - the result type to use if there's no method
+/// \param argInfo - the actual arguments, including implicit ones
+CGObjCRuntime::MessageSendInfo
+CGObjCRuntime::getMessageSendInfo(const ObjCMethodDecl *method,
+ QualType resultType,
+ CallArgList &callArgs) {
+ // If there's a method, use information from that.
+ if (method) {
+ const CGFunctionInfo &signature =
+ CGM.getTypes().arrangeObjCMessageSendSignature(method, callArgs[0].Ty);
+
+ llvm::PointerType *signatureType =
+ CGM.getTypes().GetFunctionType(signature)->getPointerTo();
+
+ // If that's not variadic, there's no need to recompute the ABI
+ // arrangement.
+ if (!signature.isVariadic())
+ return MessageSendInfo(signature, signatureType);
+
+ // Otherwise, there is.
+ FunctionType::ExtInfo einfo = signature.getExtInfo();
+ const CGFunctionInfo &argsInfo =
+ CGM.getTypes().arrangeFunctionCall(resultType, callArgs, einfo,
+ signature.getRequiredArgs());
+
+ return MessageSendInfo(argsInfo, signatureType);
+ }
+
+ // There's no method; just use a default CC.
+ const CGFunctionInfo &argsInfo =
+ CGM.getTypes().arrangeFunctionCall(resultType, callArgs,
+ FunctionType::ExtInfo(),
+ RequiredArgs::All);
+
+ // Derive the signature to call from that.
+ llvm::PointerType *signatureType =
+ CGM.getTypes().GetFunctionType(argsInfo)->getPointerTo();
+ return MessageSendInfo(argsInfo, signatureType);
+}
diff --git a/clang/lib/CodeGen/CGObjCRuntime.h b/clang/lib/CodeGen/CGObjCRuntime.h
new file mode 100644
index 0000000..ccf4d4d
--- /dev/null
+++ b/clang/lib/CodeGen/CGObjCRuntime.h
@@ -0,0 +1,286 @@
+//===----- CGObjCRuntime.h - Interface to ObjC Runtimes ---------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This provides an abstract class for Objective-C code generation. Concrete
+// subclasses of this implement code generation for specific Objective-C
+// runtime libraries.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_CODEGEN_OBCJRUNTIME_H
+#define CLANG_CODEGEN_OBCJRUNTIME_H
+#include "clang/Basic/IdentifierTable.h" // Selector
+#include "clang/AST/DeclObjC.h"
+
+#include "CGBuilder.h"
+#include "CGCall.h"
+#include "CGValue.h"
+
+namespace llvm {
+ class Constant;
+ class Function;
+ class Module;
+ class StructLayout;
+ class StructType;
+ class Type;
+ class Value;
+}
+
+namespace clang {
+namespace CodeGen {
+ class CodeGenFunction;
+}
+
+ class FieldDecl;
+ class ObjCAtTryStmt;
+ class ObjCAtThrowStmt;
+ class ObjCAtSynchronizedStmt;
+ class ObjCContainerDecl;
+ class ObjCCategoryImplDecl;
+ class ObjCImplementationDecl;
+ class ObjCInterfaceDecl;
+ class ObjCMessageExpr;
+ class ObjCMethodDecl;
+ class ObjCProtocolDecl;
+ class Selector;
+ class ObjCIvarDecl;
+ class ObjCStringLiteral;
+ class BlockDeclRefExpr;
+
+namespace CodeGen {
+ class CodeGenModule;
+ class CGBlockInfo;
+
+// FIXME: Several methods should be pure virtual but aren't to avoid the
+// partially-implemented subclass breaking.
+
+/// Implements runtime-specific code generation functions.
+class CGObjCRuntime {
+protected:
+ CodeGen::CodeGenModule &CGM;
+ CGObjCRuntime(CodeGen::CodeGenModule &CGM) : CGM(CGM) {}
+
+ // Utility functions for unified ivar access. These need to
+ // eventually be folded into other places (the structure layout
+ // code).
+
+ /// Compute an offset to the given ivar, suitable for passing to
+ /// EmitValueForIvarAtOffset. Note that the correct handling of
+ /// bit-fields is carefully coordinated by these two, use caution!
+ ///
+ /// The latter overload is suitable for computing the offset of a
+ /// sythesized ivar.
+ uint64_t ComputeIvarBaseOffset(CodeGen::CodeGenModule &CGM,
+ const ObjCInterfaceDecl *OID,
+ const ObjCIvarDecl *Ivar);
+ uint64_t ComputeIvarBaseOffset(CodeGen::CodeGenModule &CGM,
+ const ObjCImplementationDecl *OID,
+ const ObjCIvarDecl *Ivar);
+
+ LValue EmitValueForIvarAtOffset(CodeGen::CodeGenFunction &CGF,
+ const ObjCInterfaceDecl *OID,
+ llvm::Value *BaseValue,
+ const ObjCIvarDecl *Ivar,
+ unsigned CVRQualifiers,
+ llvm::Value *Offset);
+ /// Emits a try / catch statement. This function is intended to be called by
+ /// subclasses, and provides a generic mechanism for generating these, which
+ /// should be usable by all runtimes. The caller must provide the functions to
+ /// call when entering and exiting a @catch() block, and the function used to
+ /// rethrow exceptions. If the begin and end catch functions are NULL, then
+ /// the function assumes that the EH personality function provides the
+ /// thrown object directly.
+ void EmitTryCatchStmt(CodeGenFunction &CGF,
+ const ObjCAtTryStmt &S,
+ llvm::Constant *beginCatchFn,
+ llvm::Constant *endCatchFn,
+ llvm::Constant *exceptionRethrowFn);
+ /// Emits an @synchronize() statement, using the syncEnterFn and syncExitFn
+ /// arguments as the functions called to lock and unlock the object. This
+ /// function can be called by subclasses that use zero-cost exception
+ /// handling.
+ void EmitAtSynchronizedStmt(CodeGenFunction &CGF,
+ const ObjCAtSynchronizedStmt &S,
+ llvm::Function *syncEnterFn,
+ llvm::Function *syncExitFn);
+
+public:
+ virtual ~CGObjCRuntime();
+
+ /// Generate the function required to register all Objective-C components in
+ /// this compilation unit with the runtime library.
+ virtual llvm::Function *ModuleInitFunction() = 0;
+
+ /// Get a selector for the specified name and type values. The
+ /// return value should have the LLVM type for pointer-to
+ /// ASTContext::getObjCSelType().
+ virtual llvm::Value *GetSelector(CGBuilderTy &Builder,
+ Selector Sel, bool lval=false) = 0;
+
+ /// Get a typed selector.
+ virtual llvm::Value *GetSelector(CGBuilderTy &Builder,
+ const ObjCMethodDecl *Method) = 0;
+
+ /// Get the type constant to catch for the given ObjC pointer type.
+ /// This is used externally to implement catching ObjC types in C++.
+ /// Runtimes which don't support this should add the appropriate
+ /// error to Sema.
+ virtual llvm::Constant *GetEHType(QualType T) = 0;
+
+ /// Generate a constant string object.
+ virtual llvm::Constant *GenerateConstantString(const StringLiteral *) = 0;
+
+ /// Generate a category. A category contains a list of methods (and
+ /// accompanying metadata) and a list of protocols.
+ virtual void GenerateCategory(const ObjCCategoryImplDecl *OCD) = 0;
+
+ /// Generate a class structure for this class.
+ virtual void GenerateClass(const ObjCImplementationDecl *OID) = 0;
+
+ /// Register an class alias.
+ virtual void RegisterAlias(const ObjCCompatibleAliasDecl *OAD) = 0;
+
+ /// Generate an Objective-C message send operation.
+ ///
+ /// \param Method - The method being called, this may be null if synthesizing
+ /// a property setter or getter.
+ virtual CodeGen::RValue
+ GenerateMessageSend(CodeGen::CodeGenFunction &CGF,
+ ReturnValueSlot ReturnSlot,
+ QualType ResultType,
+ Selector Sel,
+ llvm::Value *Receiver,
+ const CallArgList &CallArgs,
+ const ObjCInterfaceDecl *Class = 0,
+ const ObjCMethodDecl *Method = 0) = 0;
+
+ /// Generate an Objective-C message send operation to the super
+ /// class initiated in a method for Class and with the given Self
+ /// object.
+ ///
+ /// \param Method - The method being called, this may be null if synthesizing
+ /// a property setter or getter.
+ virtual CodeGen::RValue
+ GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF,
+ ReturnValueSlot ReturnSlot,
+ QualType ResultType,
+ Selector Sel,
+ const ObjCInterfaceDecl *Class,
+ bool isCategoryImpl,
+ llvm::Value *Self,
+ bool IsClassMessage,
+ const CallArgList &CallArgs,
+ const ObjCMethodDecl *Method = 0) = 0;
+
+ /// Emit the code to return the named protocol as an object, as in a
+ /// @protocol expression.
+ virtual llvm::Value *GenerateProtocolRef(CGBuilderTy &Builder,
+ const ObjCProtocolDecl *OPD) = 0;
+
+ /// Generate the named protocol. Protocols contain method metadata but no
+ /// implementations.
+ virtual void GenerateProtocol(const ObjCProtocolDecl *OPD) = 0;
+
+ /// Generate a function preamble for a method with the specified
+ /// types.
+
+ // FIXME: Current this just generates the Function definition, but really this
+ // should also be generating the loads of the parameters, as the runtime
+ // should have full control over how parameters are passed.
+ virtual llvm::Function *GenerateMethod(const ObjCMethodDecl *OMD,
+ const ObjCContainerDecl *CD) = 0;
+
+ /// Return the runtime function for getting properties.
+ virtual llvm::Constant *GetPropertyGetFunction() = 0;
+
+ /// Return the runtime function for setting properties.
+ virtual llvm::Constant *GetPropertySetFunction() = 0;
+
+ /// Return the runtime function for optimized setting properties.
+ virtual llvm::Constant *GetOptimizedPropertySetFunction(bool atomic,
+ bool copy) = 0;
+
+ // API for atomic copying of qualified aggregates in getter.
+ virtual llvm::Constant *GetGetStructFunction() = 0;
+ // API for atomic copying of qualified aggregates in setter.
+ virtual llvm::Constant *GetSetStructFunction() = 0;
+ // API for atomic copying of qualified aggregates with non-trivial copy
+ // assignment (c++) in setter/getter.
+ virtual llvm::Constant *GetCppAtomicObjectFunction() = 0;
+
+ /// GetClass - Return a reference to the class for the given
+ /// interface decl.
+ virtual llvm::Value *GetClass(CGBuilderTy &Builder,
+ const ObjCInterfaceDecl *OID) = 0;
+
+
+ virtual llvm::Value *EmitNSAutoreleasePoolClassRef(CGBuilderTy &Builder) {
+ llvm_unreachable("autoreleasepool unsupported in this ABI");
+ }
+
+ /// EnumerationMutationFunction - Return the function that's called by the
+ /// compiler when a mutation is detected during foreach iteration.
+ virtual llvm::Constant *EnumerationMutationFunction() = 0;
+
+ virtual void EmitSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
+ const ObjCAtSynchronizedStmt &S) = 0;
+ virtual void EmitTryStmt(CodeGen::CodeGenFunction &CGF,
+ const ObjCAtTryStmt &S) = 0;
+ virtual void EmitThrowStmt(CodeGen::CodeGenFunction &CGF,
+ const ObjCAtThrowStmt &S) = 0;
+ virtual llvm::Value *EmitObjCWeakRead(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *AddrWeakObj) = 0;
+ virtual void EmitObjCWeakAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dest) = 0;
+ virtual void EmitObjCGlobalAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dest,
+ bool threadlocal=false) = 0;
+ virtual void EmitObjCIvarAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dest,
+ llvm::Value *ivarOffset) = 0;
+ virtual void EmitObjCStrongCastAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dest) = 0;
+
+ virtual LValue EmitObjCValueForIvar(CodeGen::CodeGenFunction &CGF,
+ QualType ObjectTy,
+ llvm::Value *BaseValue,
+ const ObjCIvarDecl *Ivar,
+ unsigned CVRQualifiers) = 0;
+ virtual llvm::Value *EmitIvarOffset(CodeGen::CodeGenFunction &CGF,
+ const ObjCInterfaceDecl *Interface,
+ const ObjCIvarDecl *Ivar) = 0;
+ virtual void EmitGCMemmoveCollectable(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *DestPtr,
+ llvm::Value *SrcPtr,
+ llvm::Value *Size) = 0;
+ virtual llvm::Constant *BuildGCBlockLayout(CodeGen::CodeGenModule &CGM,
+ const CodeGen::CGBlockInfo &blockInfo) = 0;
+ virtual llvm::GlobalVariable *GetClassGlobal(const std::string &Name) = 0;
+
+ struct MessageSendInfo {
+ const CGFunctionInfo &CallInfo;
+ llvm::PointerType *MessengerType;
+
+ MessageSendInfo(const CGFunctionInfo &callInfo,
+ llvm::PointerType *messengerType)
+ : CallInfo(callInfo), MessengerType(messengerType) {}
+ };
+
+ MessageSendInfo getMessageSendInfo(const ObjCMethodDecl *method,
+ QualType resultType,
+ CallArgList &callArgs);
+};
+
+/// Creates an instance of an Objective-C runtime class.
+//TODO: This should include some way of selecting which runtime to target.
+CGObjCRuntime *CreateGNUObjCRuntime(CodeGenModule &CGM);
+CGObjCRuntime *CreateMacObjCRuntime(CodeGenModule &CGM);
+}
+}
+#endif
diff --git a/clang/lib/CodeGen/CGOpenCLRuntime.cpp b/clang/lib/CodeGen/CGOpenCLRuntime.cpp
new file mode 100644
index 0000000..3a0e116
--- /dev/null
+++ b/clang/lib/CodeGen/CGOpenCLRuntime.cpp
@@ -0,0 +1,28 @@
+//===----- CGOpenCLRuntime.cpp - Interface to OpenCL Runtimes -------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This provides an abstract class for OpenCL code generation. Concrete
+// subclasses of this implement code generation for specific OpenCL
+// runtime libraries.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CGOpenCLRuntime.h"
+#include "CodeGenFunction.h"
+#include "llvm/GlobalValue.h"
+
+using namespace clang;
+using namespace CodeGen;
+
+CGOpenCLRuntime::~CGOpenCLRuntime() {}
+
+void CGOpenCLRuntime::EmitWorkGroupLocalVarDecl(CodeGenFunction &CGF,
+ const VarDecl &D) {
+ return CGF.EmitStaticVarDecl(D, llvm::GlobalValue::InternalLinkage);
+}
diff --git a/clang/lib/CodeGen/CGOpenCLRuntime.h b/clang/lib/CodeGen/CGOpenCLRuntime.h
new file mode 100644
index 0000000..9a8430f
--- /dev/null
+++ b/clang/lib/CodeGen/CGOpenCLRuntime.h
@@ -0,0 +1,46 @@
+//===----- CGOpenCLRuntime.h - Interface to OpenCL Runtimes -----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This provides an abstract class for OpenCL code generation. Concrete
+// subclasses of this implement code generation for specific OpenCL
+// runtime libraries.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_CODEGEN_OPENCLRUNTIME_H
+#define CLANG_CODEGEN_OPENCLRUNTIME_H
+
+namespace clang {
+
+class VarDecl;
+
+namespace CodeGen {
+
+class CodeGenFunction;
+class CodeGenModule;
+
+class CGOpenCLRuntime {
+protected:
+ CodeGenModule &CGM;
+
+public:
+ CGOpenCLRuntime(CodeGenModule &CGM) : CGM(CGM) {}
+ virtual ~CGOpenCLRuntime();
+
+ /// Emit the IR required for a work-group-local variable declaration, and add
+ /// an entry to CGF's LocalDeclMap for D. The base class does this using
+ /// CodeGenFunction::EmitStaticVarDecl to emit an internal global for D.
+ virtual void EmitWorkGroupLocalVarDecl(CodeGenFunction &CGF,
+ const VarDecl &D);
+};
+
+}
+}
+
+#endif
diff --git a/clang/lib/CodeGen/CGRTTI.cpp b/clang/lib/CodeGen/CGRTTI.cpp
new file mode 100644
index 0000000..19973b4
--- /dev/null
+++ b/clang/lib/CodeGen/CGRTTI.cpp
@@ -0,0 +1,1015 @@
+//===--- CGCXXRTTI.cpp - Emit LLVM Code for C++ RTTI descriptors ----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code dealing with C++ code generation of RTTI descriptors.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenModule.h"
+#include "CGCXXABI.h"
+#include "clang/AST/RecordLayout.h"
+#include "clang/AST/Type.h"
+#include "clang/Frontend/CodeGenOptions.h"
+#include "CGObjCRuntime.h"
+
+using namespace clang;
+using namespace CodeGen;
+
+namespace {
+class RTTIBuilder {
+ CodeGenModule &CGM; // Per-module state.
+ llvm::LLVMContext &VMContext;
+
+ /// Fields - The fields of the RTTI descriptor currently being built.
+ SmallVector<llvm::Constant *, 16> Fields;
+
+ /// GetAddrOfTypeName - Returns the mangled type name of the given type.
+ llvm::GlobalVariable *
+ GetAddrOfTypeName(QualType Ty, llvm::GlobalVariable::LinkageTypes Linkage);
+
+ /// GetAddrOfExternalRTTIDescriptor - Returns the constant for the RTTI
+ /// descriptor of the given type.
+ llvm::Constant *GetAddrOfExternalRTTIDescriptor(QualType Ty);
+
+ /// BuildVTablePointer - Build the vtable pointer for the given type.
+ void BuildVTablePointer(const Type *Ty);
+
+ /// BuildSIClassTypeInfo - Build an abi::__si_class_type_info, used for single
+ /// inheritance, according to the Itanium C++ ABI, 2.9.5p6b.
+ void BuildSIClassTypeInfo(const CXXRecordDecl *RD);
+
+ /// BuildVMIClassTypeInfo - Build an abi::__vmi_class_type_info, used for
+ /// classes with bases that do not satisfy the abi::__si_class_type_info
+ /// constraints, according ti the Itanium C++ ABI, 2.9.5p5c.
+ void BuildVMIClassTypeInfo(const CXXRecordDecl *RD);
+
+ /// BuildPointerTypeInfo - Build an abi::__pointer_type_info struct, used
+ /// for pointer types.
+ void BuildPointerTypeInfo(QualType PointeeTy);
+
+ /// BuildObjCObjectTypeInfo - Build the appropriate kind of
+ /// type_info for an object type.
+ void BuildObjCObjectTypeInfo(const ObjCObjectType *Ty);
+
+ /// BuildPointerToMemberTypeInfo - Build an abi::__pointer_to_member_type_info
+ /// struct, used for member pointer types.
+ void BuildPointerToMemberTypeInfo(const MemberPointerType *Ty);
+
+public:
+ RTTIBuilder(CodeGenModule &CGM) : CGM(CGM),
+ VMContext(CGM.getModule().getContext()) { }
+
+ // Pointer type info flags.
+ enum {
+ /// PTI_Const - Type has const qualifier.
+ PTI_Const = 0x1,
+
+ /// PTI_Volatile - Type has volatile qualifier.
+ PTI_Volatile = 0x2,
+
+ /// PTI_Restrict - Type has restrict qualifier.
+ PTI_Restrict = 0x4,
+
+ /// PTI_Incomplete - Type is incomplete.
+ PTI_Incomplete = 0x8,
+
+ /// PTI_ContainingClassIncomplete - Containing class is incomplete.
+ /// (in pointer to member).
+ PTI_ContainingClassIncomplete = 0x10
+ };
+
+ // VMI type info flags.
+ enum {
+ /// VMI_NonDiamondRepeat - Class has non-diamond repeated inheritance.
+ VMI_NonDiamondRepeat = 0x1,
+
+ /// VMI_DiamondShaped - Class is diamond shaped.
+ VMI_DiamondShaped = 0x2
+ };
+
+ // Base class type info flags.
+ enum {
+ /// BCTI_Virtual - Base class is virtual.
+ BCTI_Virtual = 0x1,
+
+ /// BCTI_Public - Base class is public.
+ BCTI_Public = 0x2
+ };
+
+ /// BuildTypeInfo - Build the RTTI type info struct for the given type.
+ ///
+ /// \param Force - true to force the creation of this RTTI value
+ /// \param ForEH - true if this is for exception handling
+ llvm::Constant *BuildTypeInfo(QualType Ty, bool Force = false);
+};
+}
+
+llvm::GlobalVariable *
+RTTIBuilder::GetAddrOfTypeName(QualType Ty,
+ llvm::GlobalVariable::LinkageTypes Linkage) {
+ SmallString<256> OutName;
+ llvm::raw_svector_ostream Out(OutName);
+ CGM.getCXXABI().getMangleContext().mangleCXXRTTIName(Ty, Out);
+ Out.flush();
+ StringRef Name = OutName.str();
+
+ // We know that the mangled name of the type starts at index 4 of the
+ // mangled name of the typename, so we can just index into it in order to
+ // get the mangled name of the type.
+ llvm::Constant *Init = llvm::ConstantDataArray::getString(VMContext,
+ Name.substr(4));
+
+ llvm::GlobalVariable *GV =
+ CGM.CreateOrReplaceCXXRuntimeVariable(Name, Init->getType(), Linkage);
+
+ GV->setInitializer(Init);
+
+ return GV;
+}
+
+llvm::Constant *RTTIBuilder::GetAddrOfExternalRTTIDescriptor(QualType Ty) {
+ // Mangle the RTTI name.
+ SmallString<256> OutName;
+ llvm::raw_svector_ostream Out(OutName);
+ CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, Out);
+ Out.flush();
+ StringRef Name = OutName.str();
+
+ // Look for an existing global.
+ llvm::GlobalVariable *GV = CGM.getModule().getNamedGlobal(Name);
+
+ if (!GV) {
+ // Create a new global variable.
+ GV = new llvm::GlobalVariable(CGM.getModule(), CGM.Int8PtrTy,
+ /*Constant=*/true,
+ llvm::GlobalValue::ExternalLinkage, 0, Name);
+ }
+
+ return llvm::ConstantExpr::getBitCast(GV, CGM.Int8PtrTy);
+}
+
+/// TypeInfoIsInStandardLibrary - Given a builtin type, returns whether the type
+/// info for that type is defined in the standard library.
+static bool TypeInfoIsInStandardLibrary(const BuiltinType *Ty) {
+ // Itanium C++ ABI 2.9.2:
+ // Basic type information (e.g. for "int", "bool", etc.) will be kept in
+ // the run-time support library. Specifically, the run-time support
+ // library should contain type_info objects for the types X, X* and
+ // X const*, for every X in: void, std::nullptr_t, bool, wchar_t, char,
+ // unsigned char, signed char, short, unsigned short, int, unsigned int,
+ // long, unsigned long, long long, unsigned long long, float, double,
+ // long double, char16_t, char32_t, and the IEEE 754r decimal and
+ // half-precision floating point types.
+ switch (Ty->getKind()) {
+ case BuiltinType::Void:
+ case BuiltinType::NullPtr:
+ case BuiltinType::Bool:
+ case BuiltinType::WChar_S:
+ case BuiltinType::WChar_U:
+ case BuiltinType::Char_U:
+ case BuiltinType::Char_S:
+ case BuiltinType::UChar:
+ case BuiltinType::SChar:
+ case BuiltinType::Short:
+ case BuiltinType::UShort:
+ case BuiltinType::Int:
+ case BuiltinType::UInt:
+ case BuiltinType::Long:
+ case BuiltinType::ULong:
+ case BuiltinType::LongLong:
+ case BuiltinType::ULongLong:
+ case BuiltinType::Half:
+ case BuiltinType::Float:
+ case BuiltinType::Double:
+ case BuiltinType::LongDouble:
+ case BuiltinType::Char16:
+ case BuiltinType::Char32:
+ case BuiltinType::Int128:
+ case BuiltinType::UInt128:
+ return true;
+
+ case BuiltinType::Dependent:
+#define BUILTIN_TYPE(Id, SingletonId)
+#define PLACEHOLDER_TYPE(Id, SingletonId) \
+ case BuiltinType::Id:
+#include "clang/AST/BuiltinTypes.def"
+ llvm_unreachable("asking for RRTI for a placeholder type!");
+
+ case BuiltinType::ObjCId:
+ case BuiltinType::ObjCClass:
+ case BuiltinType::ObjCSel:
+ llvm_unreachable("FIXME: Objective-C types are unsupported!");
+ }
+
+ llvm_unreachable("Invalid BuiltinType Kind!");
+}
+
+static bool TypeInfoIsInStandardLibrary(const PointerType *PointerTy) {
+ QualType PointeeTy = PointerTy->getPointeeType();
+ const BuiltinType *BuiltinTy = dyn_cast<BuiltinType>(PointeeTy);
+ if (!BuiltinTy)
+ return false;
+
+ // Check the qualifiers.
+ Qualifiers Quals = PointeeTy.getQualifiers();
+ Quals.removeConst();
+
+ if (!Quals.empty())
+ return false;
+
+ return TypeInfoIsInStandardLibrary(BuiltinTy);
+}
+
+/// IsStandardLibraryRTTIDescriptor - Returns whether the type
+/// information for the given type exists in the standard library.
+static bool IsStandardLibraryRTTIDescriptor(QualType Ty) {
+ // Type info for builtin types is defined in the standard library.
+ if (const BuiltinType *BuiltinTy = dyn_cast<BuiltinType>(Ty))
+ return TypeInfoIsInStandardLibrary(BuiltinTy);
+
+ // Type info for some pointer types to builtin types is defined in the
+ // standard library.
+ if (const PointerType *PointerTy = dyn_cast<PointerType>(Ty))
+ return TypeInfoIsInStandardLibrary(PointerTy);
+
+ return false;
+}
+
+/// ShouldUseExternalRTTIDescriptor - Returns whether the type information for
+/// the given type exists somewhere else, and that we should not emit the type
+/// information in this translation unit. Assumes that it is not a
+/// standard-library type.
+static bool ShouldUseExternalRTTIDescriptor(CodeGenModule &CGM, QualType Ty) {
+ ASTContext &Context = CGM.getContext();
+
+ // If RTTI is disabled, don't consider key functions.
+ if (!Context.getLangOpts().RTTI) return false;
+
+ if (const RecordType *RecordTy = dyn_cast<RecordType>(Ty)) {
+ const CXXRecordDecl *RD = cast<CXXRecordDecl>(RecordTy->getDecl());
+ if (!RD->hasDefinition())
+ return false;
+
+ if (!RD->isDynamicClass())
+ return false;
+
+ return !CGM.getVTables().ShouldEmitVTableInThisTU(RD);
+ }
+
+ return false;
+}
+
+/// IsIncompleteClassType - Returns whether the given record type is incomplete.
+static bool IsIncompleteClassType(const RecordType *RecordTy) {
+ return !RecordTy->getDecl()->isCompleteDefinition();
+}
+
+/// ContainsIncompleteClassType - Returns whether the given type contains an
+/// incomplete class type. This is true if
+///
+/// * The given type is an incomplete class type.
+/// * The given type is a pointer type whose pointee type contains an
+/// incomplete class type.
+/// * The given type is a member pointer type whose class is an incomplete
+/// class type.
+/// * The given type is a member pointer type whoise pointee type contains an
+/// incomplete class type.
+/// is an indirect or direct pointer to an incomplete class type.
+static bool ContainsIncompleteClassType(QualType Ty) {
+ if (const RecordType *RecordTy = dyn_cast<RecordType>(Ty)) {
+ if (IsIncompleteClassType(RecordTy))
+ return true;
+ }
+
+ if (const PointerType *PointerTy = dyn_cast<PointerType>(Ty))
+ return ContainsIncompleteClassType(PointerTy->getPointeeType());
+
+ if (const MemberPointerType *MemberPointerTy =
+ dyn_cast<MemberPointerType>(Ty)) {
+ // Check if the class type is incomplete.
+ const RecordType *ClassType = cast<RecordType>(MemberPointerTy->getClass());
+ if (IsIncompleteClassType(ClassType))
+ return true;
+
+ return ContainsIncompleteClassType(MemberPointerTy->getPointeeType());
+ }
+
+ return false;
+}
+
+/// getTypeInfoLinkage - Return the linkage that the type info and type info
+/// name constants should have for the given type.
+static llvm::GlobalVariable::LinkageTypes
+getTypeInfoLinkage(CodeGenModule &CGM, QualType Ty) {
+ // Itanium C++ ABI 2.9.5p7:
+ // In addition, it and all of the intermediate abi::__pointer_type_info
+ // structs in the chain down to the abi::__class_type_info for the
+ // incomplete class type must be prevented from resolving to the
+ // corresponding type_info structs for the complete class type, possibly
+ // by making them local static objects. Finally, a dummy class RTTI is
+ // generated for the incomplete type that will not resolve to the final
+ // complete class RTTI (because the latter need not exist), possibly by
+ // making it a local static object.
+ if (ContainsIncompleteClassType(Ty))
+ return llvm::GlobalValue::InternalLinkage;
+
+ switch (Ty->getLinkage()) {
+ case NoLinkage:
+ case InternalLinkage:
+ case UniqueExternalLinkage:
+ return llvm::GlobalValue::InternalLinkage;
+
+ case ExternalLinkage:
+ if (!CGM.getLangOpts().RTTI) {
+ // RTTI is not enabled, which means that this type info struct is going
+ // to be used for exception handling. Give it linkonce_odr linkage.
+ return llvm::GlobalValue::LinkOnceODRLinkage;
+ }
+
+ if (const RecordType *Record = dyn_cast<RecordType>(Ty)) {
+ const CXXRecordDecl *RD = cast<CXXRecordDecl>(Record->getDecl());
+ if (RD->hasAttr<WeakAttr>())
+ return llvm::GlobalValue::WeakODRLinkage;
+ if (RD->isDynamicClass())
+ return CGM.getVTableLinkage(RD);
+ }
+
+ return llvm::GlobalValue::LinkOnceODRLinkage;
+ }
+
+ llvm_unreachable("Invalid linkage!");
+}
+
+// CanUseSingleInheritance - Return whether the given record decl has a "single,
+// public, non-virtual base at offset zero (i.e. the derived class is dynamic
+// iff the base is)", according to Itanium C++ ABI, 2.95p6b.
+static bool CanUseSingleInheritance(const CXXRecordDecl *RD) {
+ // Check the number of bases.
+ if (RD->getNumBases() != 1)
+ return false;
+
+ // Get the base.
+ CXXRecordDecl::base_class_const_iterator Base = RD->bases_begin();
+
+ // Check that the base is not virtual.
+ if (Base->isVirtual())
+ return false;
+
+ // Check that the base is public.
+ if (Base->getAccessSpecifier() != AS_public)
+ return false;
+
+ // Check that the class is dynamic iff the base is.
+ const CXXRecordDecl *BaseDecl =
+ cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl());
+ if (!BaseDecl->isEmpty() &&
+ BaseDecl->isDynamicClass() != RD->isDynamicClass())
+ return false;
+
+ return true;
+}
+
+void RTTIBuilder::BuildVTablePointer(const Type *Ty) {
+ // abi::__class_type_info.
+ static const char * const ClassTypeInfo =
+ "_ZTVN10__cxxabiv117__class_type_infoE";
+ // abi::__si_class_type_info.
+ static const char * const SIClassTypeInfo =
+ "_ZTVN10__cxxabiv120__si_class_type_infoE";
+ // abi::__vmi_class_type_info.
+ static const char * const VMIClassTypeInfo =
+ "_ZTVN10__cxxabiv121__vmi_class_type_infoE";
+
+ const char *VTableName = 0;
+
+ switch (Ty->getTypeClass()) {
+#define TYPE(Class, Base)
+#define ABSTRACT_TYPE(Class, Base)
+#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
+#define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
+#define DEPENDENT_TYPE(Class, Base) case Type::Class:
+#include "clang/AST/TypeNodes.def"
+ llvm_unreachable("Non-canonical and dependent types shouldn't get here");
+
+ case Type::LValueReference:
+ case Type::RValueReference:
+ llvm_unreachable("References shouldn't get here");
+
+ case Type::Builtin:
+ // GCC treats vector and complex types as fundamental types.
+ case Type::Vector:
+ case Type::ExtVector:
+ case Type::Complex:
+ case Type::Atomic:
+ // FIXME: GCC treats block pointers as fundamental types?!
+ case Type::BlockPointer:
+ // abi::__fundamental_type_info.
+ VTableName = "_ZTVN10__cxxabiv123__fundamental_type_infoE";
+ break;
+
+ case Type::ConstantArray:
+ case Type::IncompleteArray:
+ case Type::VariableArray:
+ // abi::__array_type_info.
+ VTableName = "_ZTVN10__cxxabiv117__array_type_infoE";
+ break;
+
+ case Type::FunctionNoProto:
+ case Type::FunctionProto:
+ // abi::__function_type_info.
+ VTableName = "_ZTVN10__cxxabiv120__function_type_infoE";
+ break;
+
+ case Type::Enum:
+ // abi::__enum_type_info.
+ VTableName = "_ZTVN10__cxxabiv116__enum_type_infoE";
+ break;
+
+ case Type::Record: {
+ const CXXRecordDecl *RD =
+ cast<CXXRecordDecl>(cast<RecordType>(Ty)->getDecl());
+
+ if (!RD->hasDefinition() || !RD->getNumBases()) {
+ VTableName = ClassTypeInfo;
+ } else if (CanUseSingleInheritance(RD)) {
+ VTableName = SIClassTypeInfo;
+ } else {
+ VTableName = VMIClassTypeInfo;
+ }
+
+ break;
+ }
+
+ case Type::ObjCObject:
+ // Ignore protocol qualifiers.
+ Ty = cast<ObjCObjectType>(Ty)->getBaseType().getTypePtr();
+
+ // Handle id and Class.
+ if (isa<BuiltinType>(Ty)) {
+ VTableName = ClassTypeInfo;
+ break;
+ }
+
+ assert(isa<ObjCInterfaceType>(Ty));
+ // Fall through.
+
+ case Type::ObjCInterface:
+ if (cast<ObjCInterfaceType>(Ty)->getDecl()->getSuperClass()) {
+ VTableName = SIClassTypeInfo;
+ } else {
+ VTableName = ClassTypeInfo;
+ }
+ break;
+
+ case Type::ObjCObjectPointer:
+ case Type::Pointer:
+ // abi::__pointer_type_info.
+ VTableName = "_ZTVN10__cxxabiv119__pointer_type_infoE";
+ break;
+
+ case Type::MemberPointer:
+ // abi::__pointer_to_member_type_info.
+ VTableName = "_ZTVN10__cxxabiv129__pointer_to_member_type_infoE";
+ break;
+ }
+
+ llvm::Constant *VTable =
+ CGM.getModule().getOrInsertGlobal(VTableName, CGM.Int8PtrTy);
+
+ llvm::Type *PtrDiffTy =
+ CGM.getTypes().ConvertType(CGM.getContext().getPointerDiffType());
+
+ // The vtable address point is 2.
+ llvm::Constant *Two = llvm::ConstantInt::get(PtrDiffTy, 2);
+ VTable = llvm::ConstantExpr::getInBoundsGetElementPtr(VTable, Two);
+ VTable = llvm::ConstantExpr::getBitCast(VTable, CGM.Int8PtrTy);
+
+ Fields.push_back(VTable);
+}
+
+// maybeUpdateRTTILinkage - Will update the linkage of the RTTI data structures
+// from available_externally to the correct linkage if necessary. An example of
+// this is:
+//
+// struct A {
+// virtual void f();
+// };
+//
+// const std::type_info &g() {
+// return typeid(A);
+// }
+//
+// void A::f() { }
+//
+// When we're generating the typeid(A) expression, we do not yet know that
+// A's key function is defined in this translation unit, so we will give the
+// typeinfo and typename structures available_externally linkage. When A::f
+// forces the vtable to be generated, we need to change the linkage of the
+// typeinfo and typename structs, otherwise we'll end up with undefined
+// externals when linking.
+static void
+maybeUpdateRTTILinkage(CodeGenModule &CGM, llvm::GlobalVariable *GV,
+ QualType Ty) {
+ // We're only interested in globals with available_externally linkage.
+ if (!GV->hasAvailableExternallyLinkage())
+ return;
+
+ // Get the real linkage for the type.
+ llvm::GlobalVariable::LinkageTypes Linkage = getTypeInfoLinkage(CGM, Ty);
+
+ // If variable is supposed to have available_externally linkage, we don't
+ // need to do anything.
+ if (Linkage == llvm::GlobalVariable::AvailableExternallyLinkage)
+ return;
+
+ // Update the typeinfo linkage.
+ GV->setLinkage(Linkage);
+
+ // Get the typename global.
+ SmallString<256> OutName;
+ llvm::raw_svector_ostream Out(OutName);
+ CGM.getCXXABI().getMangleContext().mangleCXXRTTIName(Ty, Out);
+ Out.flush();
+ StringRef Name = OutName.str();
+
+ llvm::GlobalVariable *TypeNameGV = CGM.getModule().getNamedGlobal(Name);
+
+ assert(TypeNameGV->hasAvailableExternallyLinkage() &&
+ "Type name has different linkage from type info!");
+
+ // And update its linkage.
+ TypeNameGV->setLinkage(Linkage);
+}
+
+llvm::Constant *RTTIBuilder::BuildTypeInfo(QualType Ty, bool Force) {
+ // We want to operate on the canonical type.
+ Ty = CGM.getContext().getCanonicalType(Ty);
+
+ // Check if we've already emitted an RTTI descriptor for this type.
+ SmallString<256> OutName;
+ llvm::raw_svector_ostream Out(OutName);
+ CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, Out);
+ Out.flush();
+ StringRef Name = OutName.str();
+
+ llvm::GlobalVariable *OldGV = CGM.getModule().getNamedGlobal(Name);
+ if (OldGV && !OldGV->isDeclaration()) {
+ maybeUpdateRTTILinkage(CGM, OldGV, Ty);
+
+ return llvm::ConstantExpr::getBitCast(OldGV, CGM.Int8PtrTy);
+ }
+
+ // Check if there is already an external RTTI descriptor for this type.
+ bool IsStdLib = IsStandardLibraryRTTIDescriptor(Ty);
+ if (!Force && (IsStdLib || ShouldUseExternalRTTIDescriptor(CGM, Ty)))
+ return GetAddrOfExternalRTTIDescriptor(Ty);
+
+ // Emit the standard library with external linkage.
+ llvm::GlobalVariable::LinkageTypes Linkage;
+ if (IsStdLib)
+ Linkage = llvm::GlobalValue::ExternalLinkage;
+ else
+ Linkage = getTypeInfoLinkage(CGM, Ty);
+
+ // Add the vtable pointer.
+ BuildVTablePointer(cast<Type>(Ty));
+
+ // And the name.
+ llvm::GlobalVariable *TypeName = GetAddrOfTypeName(Ty, Linkage);
+
+ Fields.push_back(llvm::ConstantExpr::getBitCast(TypeName, CGM.Int8PtrTy));
+
+ switch (Ty->getTypeClass()) {
+#define TYPE(Class, Base)
+#define ABSTRACT_TYPE(Class, Base)
+#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
+#define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
+#define DEPENDENT_TYPE(Class, Base) case Type::Class:
+#include "clang/AST/TypeNodes.def"
+ llvm_unreachable("Non-canonical and dependent types shouldn't get here");
+
+ // GCC treats vector types as fundamental types.
+ case Type::Builtin:
+ case Type::Vector:
+ case Type::ExtVector:
+ case Type::Complex:
+ case Type::BlockPointer:
+ // Itanium C++ ABI 2.9.5p4:
+ // abi::__fundamental_type_info adds no data members to std::type_info.
+ break;
+
+ case Type::LValueReference:
+ case Type::RValueReference:
+ llvm_unreachable("References shouldn't get here");
+
+ case Type::ConstantArray:
+ case Type::IncompleteArray:
+ case Type::VariableArray:
+ // Itanium C++ ABI 2.9.5p5:
+ // abi::__array_type_info adds no data members to std::type_info.
+ break;
+
+ case Type::FunctionNoProto:
+ case Type::FunctionProto:
+ // Itanium C++ ABI 2.9.5p5:
+ // abi::__function_type_info adds no data members to std::type_info.
+ break;
+
+ case Type::Enum:
+ // Itanium C++ ABI 2.9.5p5:
+ // abi::__enum_type_info adds no data members to std::type_info.
+ break;
+
+ case Type::Record: {
+ const CXXRecordDecl *RD =
+ cast<CXXRecordDecl>(cast<RecordType>(Ty)->getDecl());
+ if (!RD->hasDefinition() || !RD->getNumBases()) {
+ // We don't need to emit any fields.
+ break;
+ }
+
+ if (CanUseSingleInheritance(RD))
+ BuildSIClassTypeInfo(RD);
+ else
+ BuildVMIClassTypeInfo(RD);
+
+ break;
+ }
+
+ case Type::ObjCObject:
+ case Type::ObjCInterface:
+ BuildObjCObjectTypeInfo(cast<ObjCObjectType>(Ty));
+ break;
+
+ case Type::ObjCObjectPointer:
+ BuildPointerTypeInfo(cast<ObjCObjectPointerType>(Ty)->getPointeeType());
+ break;
+
+ case Type::Pointer:
+ BuildPointerTypeInfo(cast<PointerType>(Ty)->getPointeeType());
+ break;
+
+ case Type::MemberPointer:
+ BuildPointerToMemberTypeInfo(cast<MemberPointerType>(Ty));
+ break;
+
+ case Type::Atomic:
+ // No fields, at least for the moment.
+ break;
+ }
+
+ llvm::Constant *Init = llvm::ConstantStruct::getAnon(Fields);
+
+ llvm::GlobalVariable *GV =
+ new llvm::GlobalVariable(CGM.getModule(), Init->getType(),
+ /*Constant=*/true, Linkage, Init, Name);
+
+ // If there's already an old global variable, replace it with the new one.
+ if (OldGV) {
+ GV->takeName(OldGV);
+ llvm::Constant *NewPtr =
+ llvm::ConstantExpr::getBitCast(GV, OldGV->getType());
+ OldGV->replaceAllUsesWith(NewPtr);
+ OldGV->eraseFromParent();
+ }
+
+ // GCC only relies on the uniqueness of the type names, not the
+ // type_infos themselves, so we can emit these as hidden symbols.
+ // But don't do this if we're worried about strict visibility
+ // compatibility.
+ if (const RecordType *RT = dyn_cast<RecordType>(Ty)) {
+ const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
+
+ CGM.setTypeVisibility(GV, RD, CodeGenModule::TVK_ForRTTI);
+ CGM.setTypeVisibility(TypeName, RD, CodeGenModule::TVK_ForRTTIName);
+ } else {
+ Visibility TypeInfoVisibility = DefaultVisibility;
+ if (CGM.getCodeGenOpts().HiddenWeakVTables &&
+ Linkage == llvm::GlobalValue::LinkOnceODRLinkage)
+ TypeInfoVisibility = HiddenVisibility;
+
+ // The type name should have the same visibility as the type itself.
+ Visibility ExplicitVisibility = Ty->getVisibility();
+ TypeName->setVisibility(CodeGenModule::
+ GetLLVMVisibility(ExplicitVisibility));
+
+ TypeInfoVisibility = minVisibility(TypeInfoVisibility, Ty->getVisibility());
+ GV->setVisibility(CodeGenModule::GetLLVMVisibility(TypeInfoVisibility));
+ }
+
+ GV->setUnnamedAddr(true);
+
+ return llvm::ConstantExpr::getBitCast(GV, CGM.Int8PtrTy);
+}
+
+/// ComputeQualifierFlags - Compute the pointer type info flags from the
+/// given qualifier.
+static unsigned ComputeQualifierFlags(Qualifiers Quals) {
+ unsigned Flags = 0;
+
+ if (Quals.hasConst())
+ Flags |= RTTIBuilder::PTI_Const;
+ if (Quals.hasVolatile())
+ Flags |= RTTIBuilder::PTI_Volatile;
+ if (Quals.hasRestrict())
+ Flags |= RTTIBuilder::PTI_Restrict;
+
+ return Flags;
+}
+
+/// BuildObjCObjectTypeInfo - Build the appropriate kind of type_info
+/// for the given Objective-C object type.
+void RTTIBuilder::BuildObjCObjectTypeInfo(const ObjCObjectType *OT) {
+ // Drop qualifiers.
+ const Type *T = OT->getBaseType().getTypePtr();
+ assert(isa<BuiltinType>(T) || isa<ObjCInterfaceType>(T));
+
+ // The builtin types are abi::__class_type_infos and don't require
+ // extra fields.
+ if (isa<BuiltinType>(T)) return;
+
+ ObjCInterfaceDecl *Class = cast<ObjCInterfaceType>(T)->getDecl();
+ ObjCInterfaceDecl *Super = Class->getSuperClass();
+
+ // Root classes are also __class_type_info.
+ if (!Super) return;
+
+ QualType SuperTy = CGM.getContext().getObjCInterfaceType(Super);
+
+ // Everything else is single inheritance.
+ llvm::Constant *BaseTypeInfo = RTTIBuilder(CGM).BuildTypeInfo(SuperTy);
+ Fields.push_back(BaseTypeInfo);
+}
+
+/// BuildSIClassTypeInfo - Build an abi::__si_class_type_info, used for single
+/// inheritance, according to the Itanium C++ ABI, 2.95p6b.
+void RTTIBuilder::BuildSIClassTypeInfo(const CXXRecordDecl *RD) {
+ // Itanium C++ ABI 2.9.5p6b:
+ // It adds to abi::__class_type_info a single member pointing to the
+ // type_info structure for the base type,
+ llvm::Constant *BaseTypeInfo =
+ RTTIBuilder(CGM).BuildTypeInfo(RD->bases_begin()->getType());
+ Fields.push_back(BaseTypeInfo);
+}
+
+namespace {
+ /// SeenBases - Contains virtual and non-virtual bases seen when traversing
+ /// a class hierarchy.
+ struct SeenBases {
+ llvm::SmallPtrSet<const CXXRecordDecl *, 16> NonVirtualBases;
+ llvm::SmallPtrSet<const CXXRecordDecl *, 16> VirtualBases;
+ };
+}
+
+/// ComputeVMIClassTypeInfoFlags - Compute the value of the flags member in
+/// abi::__vmi_class_type_info.
+///
+static unsigned ComputeVMIClassTypeInfoFlags(const CXXBaseSpecifier *Base,
+ SeenBases &Bases) {
+
+ unsigned Flags = 0;
+
+ const CXXRecordDecl *BaseDecl =
+ cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl());
+
+ if (Base->isVirtual()) {
+ if (Bases.VirtualBases.count(BaseDecl)) {
+ // If this virtual base has been seen before, then the class is diamond
+ // shaped.
+ Flags |= RTTIBuilder::VMI_DiamondShaped;
+ } else {
+ if (Bases.NonVirtualBases.count(BaseDecl))
+ Flags |= RTTIBuilder::VMI_NonDiamondRepeat;
+
+ // Mark the virtual base as seen.
+ Bases.VirtualBases.insert(BaseDecl);
+ }
+ } else {
+ if (Bases.NonVirtualBases.count(BaseDecl)) {
+ // If this non-virtual base has been seen before, then the class has non-
+ // diamond shaped repeated inheritance.
+ Flags |= RTTIBuilder::VMI_NonDiamondRepeat;
+ } else {
+ if (Bases.VirtualBases.count(BaseDecl))
+ Flags |= RTTIBuilder::VMI_NonDiamondRepeat;
+
+ // Mark the non-virtual base as seen.
+ Bases.NonVirtualBases.insert(BaseDecl);
+ }
+ }
+
+ // Walk all bases.
+ for (CXXRecordDecl::base_class_const_iterator I = BaseDecl->bases_begin(),
+ E = BaseDecl->bases_end(); I != E; ++I)
+ Flags |= ComputeVMIClassTypeInfoFlags(I, Bases);
+
+ return Flags;
+}
+
+static unsigned ComputeVMIClassTypeInfoFlags(const CXXRecordDecl *RD) {
+ unsigned Flags = 0;
+ SeenBases Bases;
+
+ // Walk all bases.
+ for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
+ E = RD->bases_end(); I != E; ++I)
+ Flags |= ComputeVMIClassTypeInfoFlags(I, Bases);
+
+ return Flags;
+}
+
+/// BuildVMIClassTypeInfo - Build an abi::__vmi_class_type_info, used for
+/// classes with bases that do not satisfy the abi::__si_class_type_info
+/// constraints, according ti the Itanium C++ ABI, 2.9.5p5c.
+void RTTIBuilder::BuildVMIClassTypeInfo(const CXXRecordDecl *RD) {
+ llvm::Type *UnsignedIntLTy =
+ CGM.getTypes().ConvertType(CGM.getContext().UnsignedIntTy);
+
+ // Itanium C++ ABI 2.9.5p6c:
+ // __flags is a word with flags describing details about the class
+ // structure, which may be referenced by using the __flags_masks
+ // enumeration. These flags refer to both direct and indirect bases.
+ unsigned Flags = ComputeVMIClassTypeInfoFlags(RD);
+ Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, Flags));
+
+ // Itanium C++ ABI 2.9.5p6c:
+ // __base_count is a word with the number of direct proper base class
+ // descriptions that follow.
+ Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, RD->getNumBases()));
+
+ if (!RD->getNumBases())
+ return;
+
+ llvm::Type *LongLTy =
+ CGM.getTypes().ConvertType(CGM.getContext().LongTy);
+
+ // Now add the base class descriptions.
+
+ // Itanium C++ ABI 2.9.5p6c:
+ // __base_info[] is an array of base class descriptions -- one for every
+ // direct proper base. Each description is of the type:
+ //
+ // struct abi::__base_class_type_info {
+ // public:
+ // const __class_type_info *__base_type;
+ // long __offset_flags;
+ //
+ // enum __offset_flags_masks {
+ // __virtual_mask = 0x1,
+ // __public_mask = 0x2,
+ // __offset_shift = 8
+ // };
+ // };
+ for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
+ E = RD->bases_end(); I != E; ++I) {
+ const CXXBaseSpecifier *Base = I;
+
+ // The __base_type member points to the RTTI for the base type.
+ Fields.push_back(RTTIBuilder(CGM).BuildTypeInfo(Base->getType()));
+
+ const CXXRecordDecl *BaseDecl =
+ cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl());
+
+ int64_t OffsetFlags = 0;
+
+ // All but the lower 8 bits of __offset_flags are a signed offset.
+ // For a non-virtual base, this is the offset in the object of the base
+ // subobject. For a virtual base, this is the offset in the virtual table of
+ // the virtual base offset for the virtual base referenced (negative).
+ CharUnits Offset;
+ if (Base->isVirtual())
+ Offset =
+ CGM.getVTableContext().getVirtualBaseOffsetOffset(RD, BaseDecl);
+ else {
+ const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
+ Offset = Layout.getBaseClassOffset(BaseDecl);
+ };
+
+ OffsetFlags = Offset.getQuantity() << 8;
+
+ // The low-order byte of __offset_flags contains flags, as given by the
+ // masks from the enumeration __offset_flags_masks.
+ if (Base->isVirtual())
+ OffsetFlags |= BCTI_Virtual;
+ if (Base->getAccessSpecifier() == AS_public)
+ OffsetFlags |= BCTI_Public;
+
+ Fields.push_back(llvm::ConstantInt::get(LongLTy, OffsetFlags));
+ }
+}
+
+/// BuildPointerTypeInfo - Build an abi::__pointer_type_info struct,
+/// used for pointer types.
+void RTTIBuilder::BuildPointerTypeInfo(QualType PointeeTy) {
+ Qualifiers Quals;
+ QualType UnqualifiedPointeeTy =
+ CGM.getContext().getUnqualifiedArrayType(PointeeTy, Quals);
+
+ // Itanium C++ ABI 2.9.5p7:
+ // __flags is a flag word describing the cv-qualification and other
+ // attributes of the type pointed to
+ unsigned Flags = ComputeQualifierFlags(Quals);
+
+ // Itanium C++ ABI 2.9.5p7:
+ // When the abi::__pbase_type_info is for a direct or indirect pointer to an
+ // incomplete class type, the incomplete target type flag is set.
+ if (ContainsIncompleteClassType(UnqualifiedPointeeTy))
+ Flags |= PTI_Incomplete;
+
+ llvm::Type *UnsignedIntLTy =
+ CGM.getTypes().ConvertType(CGM.getContext().UnsignedIntTy);
+ Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, Flags));
+
+ // Itanium C++ ABI 2.9.5p7:
+ // __pointee is a pointer to the std::type_info derivation for the
+ // unqualified type being pointed to.
+ llvm::Constant *PointeeTypeInfo =
+ RTTIBuilder(CGM).BuildTypeInfo(UnqualifiedPointeeTy);
+ Fields.push_back(PointeeTypeInfo);
+}
+
+/// BuildPointerToMemberTypeInfo - Build an abi::__pointer_to_member_type_info
+/// struct, used for member pointer types.
+void RTTIBuilder::BuildPointerToMemberTypeInfo(const MemberPointerType *Ty) {
+ QualType PointeeTy = Ty->getPointeeType();
+
+ Qualifiers Quals;
+ QualType UnqualifiedPointeeTy =
+ CGM.getContext().getUnqualifiedArrayType(PointeeTy, Quals);
+
+ // Itanium C++ ABI 2.9.5p7:
+ // __flags is a flag word describing the cv-qualification and other
+ // attributes of the type pointed to.
+ unsigned Flags = ComputeQualifierFlags(Quals);
+
+ const RecordType *ClassType = cast<RecordType>(Ty->getClass());
+
+ // Itanium C++ ABI 2.9.5p7:
+ // When the abi::__pbase_type_info is for a direct or indirect pointer to an
+ // incomplete class type, the incomplete target type flag is set.
+ if (ContainsIncompleteClassType(UnqualifiedPointeeTy))
+ Flags |= PTI_Incomplete;
+
+ if (IsIncompleteClassType(ClassType))
+ Flags |= PTI_ContainingClassIncomplete;
+
+ llvm::Type *UnsignedIntLTy =
+ CGM.getTypes().ConvertType(CGM.getContext().UnsignedIntTy);
+ Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, Flags));
+
+ // Itanium C++ ABI 2.9.5p7:
+ // __pointee is a pointer to the std::type_info derivation for the
+ // unqualified type being pointed to.
+ llvm::Constant *PointeeTypeInfo =
+ RTTIBuilder(CGM).BuildTypeInfo(UnqualifiedPointeeTy);
+ Fields.push_back(PointeeTypeInfo);
+
+ // Itanium C++ ABI 2.9.5p9:
+ // __context is a pointer to an abi::__class_type_info corresponding to the
+ // class type containing the member pointed to
+ // (e.g., the "A" in "int A::*").
+ Fields.push_back(RTTIBuilder(CGM).BuildTypeInfo(QualType(ClassType, 0)));
+}
+
+llvm::Constant *CodeGenModule::GetAddrOfRTTIDescriptor(QualType Ty,
+ bool ForEH) {
+ // Return a bogus pointer if RTTI is disabled, unless it's for EH.
+ // FIXME: should we even be calling this method if RTTI is disabled
+ // and it's not for EH?
+ if (!ForEH && !getContext().getLangOpts().RTTI)
+ return llvm::Constant::getNullValue(Int8PtrTy);
+
+ if (ForEH && Ty->isObjCObjectPointerType() && !LangOpts.NeXTRuntime)
+ return ObjCRuntime->GetEHType(Ty);
+
+ return RTTIBuilder(*this).BuildTypeInfo(Ty);
+}
+
+void CodeGenModule::EmitFundamentalRTTIDescriptor(QualType Type) {
+ QualType PointerType = Context.getPointerType(Type);
+ QualType PointerTypeConst = Context.getPointerType(Type.withConst());
+ RTTIBuilder(*this).BuildTypeInfo(Type, true);
+ RTTIBuilder(*this).BuildTypeInfo(PointerType, true);
+ RTTIBuilder(*this).BuildTypeInfo(PointerTypeConst, true);
+}
+
+void CodeGenModule::EmitFundamentalRTTIDescriptors() {
+ QualType FundamentalTypes[] = { Context.VoidTy, Context.NullPtrTy,
+ Context.BoolTy, Context.WCharTy,
+ Context.CharTy, Context.UnsignedCharTy,
+ Context.SignedCharTy, Context.ShortTy,
+ Context.UnsignedShortTy, Context.IntTy,
+ Context.UnsignedIntTy, Context.LongTy,
+ Context.UnsignedLongTy, Context.LongLongTy,
+ Context.UnsignedLongLongTy, Context.FloatTy,
+ Context.DoubleTy, Context.LongDoubleTy,
+ Context.Char16Ty, Context.Char32Ty };
+ for (unsigned i = 0; i < sizeof(FundamentalTypes)/sizeof(QualType); ++i)
+ EmitFundamentalRTTIDescriptor(FundamentalTypes[i]);
+}
diff --git a/clang/lib/CodeGen/CGRecordLayout.h b/clang/lib/CodeGen/CGRecordLayout.h
new file mode 100644
index 0000000..25a0a50
--- /dev/null
+++ b/clang/lib/CodeGen/CGRecordLayout.h
@@ -0,0 +1,281 @@
+//===--- CGRecordLayout.h - LLVM Record Layout Information ------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_CODEGEN_CGRECORDLAYOUT_H
+#define CLANG_CODEGEN_CGRECORDLAYOUT_H
+
+#include "clang/AST/CharUnits.h"
+#include "clang/AST/Decl.h"
+#include "clang/Basic/LLVM.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/DerivedTypes.h"
+
+namespace llvm {
+ class StructType;
+}
+
+namespace clang {
+namespace CodeGen {
+
+/// \brief Helper object for describing how to generate the code for access to a
+/// bit-field.
+///
+/// This structure is intended to describe the "policy" of how the bit-field
+/// should be accessed, which may be target, language, or ABI dependent.
+class CGBitFieldInfo {
+public:
+ /// Descriptor for a single component of a bit-field access. The entire
+ /// bit-field is constituted of a bitwise OR of all of the individual
+ /// components.
+ ///
+ /// Each component describes an accessed value, which is how the component
+ /// should be transferred to/from memory, and a target placement, which is how
+ /// that component fits into the constituted bit-field. The pseudo-IR for a
+ /// load is:
+ ///
+ /// %0 = gep %base, 0, FieldIndex
+ /// %1 = gep (i8*) %0, FieldByteOffset
+ /// %2 = (i(AccessWidth) *) %1
+ /// %3 = load %2, align AccessAlignment
+ /// %4 = shr %3, FieldBitStart
+ ///
+ /// and the composed bit-field is formed as the boolean OR of all accesses,
+ /// masked to TargetBitWidth bits and shifted to TargetBitOffset.
+ struct AccessInfo {
+ /// Offset of the field to load in the LLVM structure, if any.
+ unsigned FieldIndex;
+
+ /// Byte offset from the field address, if any. This should generally be
+ /// unused as the cleanest IR comes from having a well-constructed LLVM type
+ /// with proper GEP instructions, but sometimes its use is required, for
+ /// example if an access is intended to straddle an LLVM field boundary.
+ CharUnits FieldByteOffset;
+
+ /// Bit offset in the accessed value to use. The width is implied by \see
+ /// TargetBitWidth.
+ unsigned FieldBitStart;
+
+ /// Bit width of the memory access to perform.
+ unsigned AccessWidth;
+
+ /// The alignment of the memory access, or 0 if the default alignment should
+ /// be used.
+ //
+ // FIXME: Remove use of 0 to encode default, instead have IRgen do the right
+ // thing when it generates the code, if avoiding align directives is
+ // desired.
+ CharUnits AccessAlignment;
+
+ /// Offset for the target value.
+ unsigned TargetBitOffset;
+
+ /// Number of bits in the access that are destined for the bit-field.
+ unsigned TargetBitWidth;
+ };
+
+private:
+ /// The components to use to access the bit-field. We may need up to three
+ /// separate components to support up to i64 bit-field access (4 + 2 + 1 byte
+ /// accesses).
+ //
+ // FIXME: De-hardcode this, just allocate following the struct.
+ AccessInfo Components[3];
+
+ /// The total size of the bit-field, in bits.
+ unsigned Size;
+
+ /// The number of access components to use.
+ unsigned NumComponents;
+
+ /// Whether the bit-field is signed.
+ bool IsSigned : 1;
+
+public:
+ CGBitFieldInfo(unsigned Size, unsigned NumComponents, AccessInfo *_Components,
+ bool IsSigned) : Size(Size), NumComponents(NumComponents),
+ IsSigned(IsSigned) {
+ assert(NumComponents <= 3 && "invalid number of components!");
+ for (unsigned i = 0; i != NumComponents; ++i)
+ Components[i] = _Components[i];
+
+ // Check some invariants.
+ unsigned AccessedSize = 0;
+ for (unsigned i = 0, e = getNumComponents(); i != e; ++i) {
+ const AccessInfo &AI = getComponent(i);
+ AccessedSize += AI.TargetBitWidth;
+
+ // We shouldn't try to load 0 bits.
+ assert(AI.TargetBitWidth > 0);
+
+ // We can't load more bits than we accessed.
+ assert(AI.FieldBitStart + AI.TargetBitWidth <= AI.AccessWidth);
+
+ // We shouldn't put any bits outside the result size.
+ assert(AI.TargetBitWidth + AI.TargetBitOffset <= Size);
+ }
+
+ // Check that the total number of target bits matches the total bit-field
+ // size.
+ assert(AccessedSize == Size && "Total size does not match accessed size!");
+ }
+
+public:
+ /// \brief Check whether this bit-field access is (i.e., should be sign
+ /// extended on loads).
+ bool isSigned() const { return IsSigned; }
+
+ /// \brief Get the size of the bit-field, in bits.
+ unsigned getSize() const { return Size; }
+
+ /// @name Component Access
+ /// @{
+
+ unsigned getNumComponents() const { return NumComponents; }
+
+ const AccessInfo &getComponent(unsigned Index) const {
+ assert(Index < getNumComponents() && "Invalid access!");
+ return Components[Index];
+ }
+
+ /// @}
+
+ void print(raw_ostream &OS) const;
+ void dump() const;
+
+ /// \brief Given a bit-field decl, build an appropriate helper object for
+ /// accessing that field (which is expected to have the given offset and
+ /// size).
+ static CGBitFieldInfo MakeInfo(class CodeGenTypes &Types, const FieldDecl *FD,
+ uint64_t FieldOffset, uint64_t FieldSize);
+
+ /// \brief Given a bit-field decl, build an appropriate helper object for
+ /// accessing that field (which is expected to have the given offset and
+ /// size). The field decl should be known to be contained within a type of at
+ /// least the given size and with the given alignment.
+ static CGBitFieldInfo MakeInfo(CodeGenTypes &Types, const FieldDecl *FD,
+ uint64_t FieldOffset, uint64_t FieldSize,
+ uint64_t ContainingTypeSizeInBits,
+ unsigned ContainingTypeAlign);
+};
+
+/// CGRecordLayout - This class handles struct and union layout info while
+/// lowering AST types to LLVM types.
+///
+/// These layout objects are only created on demand as IR generation requires.
+class CGRecordLayout {
+ friend class CodeGenTypes;
+
+ CGRecordLayout(const CGRecordLayout&); // DO NOT IMPLEMENT
+ void operator=(const CGRecordLayout&); // DO NOT IMPLEMENT
+
+private:
+ /// The LLVM type corresponding to this record layout; used when
+ /// laying it out as a complete object.
+ llvm::StructType *CompleteObjectType;
+
+ /// The LLVM type for the non-virtual part of this record layout;
+ /// used when laying it out as a base subobject.
+ llvm::StructType *BaseSubobjectType;
+
+ /// Map from (non-bit-field) struct field to the corresponding llvm struct
+ /// type field no. This info is populated by record builder.
+ llvm::DenseMap<const FieldDecl *, unsigned> FieldInfo;
+
+ /// Map from (bit-field) struct field to the corresponding llvm struct type
+ /// field no. This info is populated by record builder.
+ llvm::DenseMap<const FieldDecl *, CGBitFieldInfo> BitFields;
+
+ // FIXME: Maybe we could use a CXXBaseSpecifier as the key and use a single
+ // map for both virtual and non virtual bases.
+ llvm::DenseMap<const CXXRecordDecl *, unsigned> NonVirtualBases;
+
+ /// Map from virtual bases to their field index in the complete object.
+ llvm::DenseMap<const CXXRecordDecl *, unsigned> CompleteObjectVirtualBases;
+
+ /// False if any direct or indirect subobject of this class, when
+ /// considered as a complete object, requires a non-zero bitpattern
+ /// when zero-initialized.
+ bool IsZeroInitializable : 1;
+
+ /// False if any direct or indirect subobject of this class, when
+ /// considered as a base subobject, requires a non-zero bitpattern
+ /// when zero-initialized.
+ bool IsZeroInitializableAsBase : 1;
+
+public:
+ CGRecordLayout(llvm::StructType *CompleteObjectType,
+ llvm::StructType *BaseSubobjectType,
+ bool IsZeroInitializable,
+ bool IsZeroInitializableAsBase)
+ : CompleteObjectType(CompleteObjectType),
+ BaseSubobjectType(BaseSubobjectType),
+ IsZeroInitializable(IsZeroInitializable),
+ IsZeroInitializableAsBase(IsZeroInitializableAsBase) {}
+
+ /// \brief Return the "complete object" LLVM type associated with
+ /// this record.
+ llvm::StructType *getLLVMType() const {
+ return CompleteObjectType;
+ }
+
+ /// \brief Return the "base subobject" LLVM type associated with
+ /// this record.
+ llvm::StructType *getBaseSubobjectLLVMType() const {
+ return BaseSubobjectType;
+ }
+
+ /// \brief Check whether this struct can be C++ zero-initialized
+ /// with a zeroinitializer.
+ bool isZeroInitializable() const {
+ return IsZeroInitializable;
+ }
+
+ /// \brief Check whether this struct can be C++ zero-initialized
+ /// with a zeroinitializer when considered as a base subobject.
+ bool isZeroInitializableAsBase() const {
+ return IsZeroInitializableAsBase;
+ }
+
+ /// \brief Return llvm::StructType element number that corresponds to the
+ /// field FD.
+ unsigned getLLVMFieldNo(const FieldDecl *FD) const {
+ assert(!FD->isBitField() && "Invalid call for bit-field decl!");
+ assert(FieldInfo.count(FD) && "Invalid field for record!");
+ return FieldInfo.lookup(FD);
+ }
+
+ unsigned getNonVirtualBaseLLVMFieldNo(const CXXRecordDecl *RD) const {
+ assert(NonVirtualBases.count(RD) && "Invalid non-virtual base!");
+ return NonVirtualBases.lookup(RD);
+ }
+
+ /// \brief Return the LLVM field index corresponding to the given
+ /// virtual base. Only valid when operating on the complete object.
+ unsigned getVirtualBaseIndex(const CXXRecordDecl *base) const {
+ assert(CompleteObjectVirtualBases.count(base) && "Invalid virtual base!");
+ return CompleteObjectVirtualBases.lookup(base);
+ }
+
+ /// \brief Return the BitFieldInfo that corresponds to the field FD.
+ const CGBitFieldInfo &getBitFieldInfo(const FieldDecl *FD) const {
+ assert(FD->isBitField() && "Invalid call for non bit-field decl!");
+ llvm::DenseMap<const FieldDecl *, CGBitFieldInfo>::const_iterator
+ it = BitFields.find(FD);
+ assert(it != BitFields.end() && "Unable to find bitfield info");
+ return it->second;
+ }
+
+ void print(raw_ostream &OS) const;
+ void dump() const;
+};
+
+} // end namespace CodeGen
+} // end namespace clang
+
+#endif
diff --git a/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp b/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp
new file mode 100644
index 0000000..1193e97
--- /dev/null
+++ b/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp
@@ -0,0 +1,1170 @@
+//===--- CGRecordLayoutBuilder.cpp - CGRecordLayout builder ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Builder implementation for CGRecordLayout objects.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CGRecordLayout.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Attr.h"
+#include "clang/AST/CXXInheritance.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/RecordLayout.h"
+#include "clang/Frontend/CodeGenOptions.h"
+#include "CodeGenTypes.h"
+#include "CGCXXABI.h"
+#include "llvm/DerivedTypes.h"
+#include "llvm/Type.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Target/TargetData.h"
+using namespace clang;
+using namespace CodeGen;
+
+namespace {
+
+class CGRecordLayoutBuilder {
+public:
+ /// FieldTypes - Holds the LLVM types that the struct is created from.
+ ///
+ SmallVector<llvm::Type *, 16> FieldTypes;
+
+ /// BaseSubobjectType - Holds the LLVM type for the non-virtual part
+ /// of the struct. For example, consider:
+ ///
+ /// struct A { int i; };
+ /// struct B { void *v; };
+ /// struct C : virtual A, B { };
+ ///
+ /// The LLVM type of C will be
+ /// %struct.C = type { i32 (...)**, %struct.A, i32, %struct.B }
+ ///
+ /// And the LLVM type of the non-virtual base struct will be
+ /// %struct.C.base = type { i32 (...)**, %struct.A, i32 }
+ ///
+ /// This only gets initialized if the base subobject type is
+ /// different from the complete-object type.
+ llvm::StructType *BaseSubobjectType;
+
+ /// FieldInfo - Holds a field and its corresponding LLVM field number.
+ llvm::DenseMap<const FieldDecl *, unsigned> Fields;
+
+ /// BitFieldInfo - Holds location and size information about a bit field.
+ llvm::DenseMap<const FieldDecl *, CGBitFieldInfo> BitFields;
+
+ llvm::DenseMap<const CXXRecordDecl *, unsigned> NonVirtualBases;
+ llvm::DenseMap<const CXXRecordDecl *, unsigned> VirtualBases;
+
+ /// IndirectPrimaryBases - Virtual base classes, direct or indirect, that are
+ /// primary base classes for some other direct or indirect base class.
+ CXXIndirectPrimaryBaseSet IndirectPrimaryBases;
+
+ /// LaidOutVirtualBases - A set of all laid out virtual bases, used to avoid
+ /// avoid laying out virtual bases more than once.
+ llvm::SmallPtrSet<const CXXRecordDecl *, 4> LaidOutVirtualBases;
+
+ /// IsZeroInitializable - Whether this struct can be C++
+ /// zero-initialized with an LLVM zeroinitializer.
+ bool IsZeroInitializable;
+ bool IsZeroInitializableAsBase;
+
+ /// Packed - Whether the resulting LLVM struct will be packed or not.
+ bool Packed;
+
+ /// IsMsStruct - Whether ms_struct is in effect or not
+ bool IsMsStruct;
+
+private:
+ CodeGenTypes &Types;
+
+ /// LastLaidOutBaseInfo - Contains the offset and non-virtual size of the
+ /// last base laid out. Used so that we can replace the last laid out base
+ /// type with an i8 array if needed.
+ struct LastLaidOutBaseInfo {
+ CharUnits Offset;
+ CharUnits NonVirtualSize;
+
+ bool isValid() const { return !NonVirtualSize.isZero(); }
+ void invalidate() { NonVirtualSize = CharUnits::Zero(); }
+
+ } LastLaidOutBase;
+
+ /// Alignment - Contains the alignment of the RecordDecl.
+ CharUnits Alignment;
+
+ /// BitsAvailableInLastField - If a bit field spans only part of a LLVM field,
+ /// this will have the number of bits still available in the field.
+ char BitsAvailableInLastField;
+
+ /// NextFieldOffset - Holds the next field offset.
+ CharUnits NextFieldOffset;
+
+ /// LayoutUnionField - Will layout a field in an union and return the type
+ /// that the field will have.
+ llvm::Type *LayoutUnionField(const FieldDecl *Field,
+ const ASTRecordLayout &Layout);
+
+ /// LayoutUnion - Will layout a union RecordDecl.
+ void LayoutUnion(const RecordDecl *D);
+
+ /// LayoutField - try to layout all fields in the record decl.
+ /// Returns false if the operation failed because the struct is not packed.
+ bool LayoutFields(const RecordDecl *D);
+
+ /// Layout a single base, virtual or non-virtual
+ bool LayoutBase(const CXXRecordDecl *base,
+ const CGRecordLayout &baseLayout,
+ CharUnits baseOffset);
+
+ /// LayoutVirtualBase - layout a single virtual base.
+ bool LayoutVirtualBase(const CXXRecordDecl *base,
+ CharUnits baseOffset);
+
+ /// LayoutVirtualBases - layout the virtual bases of a record decl.
+ bool LayoutVirtualBases(const CXXRecordDecl *RD,
+ const ASTRecordLayout &Layout);
+
+ /// MSLayoutVirtualBases - layout the virtual bases of a record decl,
+ /// like MSVC.
+ bool MSLayoutVirtualBases(const CXXRecordDecl *RD,
+ const ASTRecordLayout &Layout);
+
+ /// LayoutNonVirtualBase - layout a single non-virtual base.
+ bool LayoutNonVirtualBase(const CXXRecordDecl *base,
+ CharUnits baseOffset);
+
+ /// LayoutNonVirtualBases - layout the virtual bases of a record decl.
+ bool LayoutNonVirtualBases(const CXXRecordDecl *RD,
+ const ASTRecordLayout &Layout);
+
+ /// ComputeNonVirtualBaseType - Compute the non-virtual base field types.
+ bool ComputeNonVirtualBaseType(const CXXRecordDecl *RD);
+
+ /// LayoutField - layout a single field. Returns false if the operation failed
+ /// because the current struct is not packed.
+ bool LayoutField(const FieldDecl *D, uint64_t FieldOffset);
+
+ /// LayoutBitField - layout a single bit field.
+ void LayoutBitField(const FieldDecl *D, uint64_t FieldOffset);
+
+ /// AppendField - Appends a field with the given offset and type.
+ void AppendField(CharUnits fieldOffset, llvm::Type *FieldTy);
+
+ /// AppendPadding - Appends enough padding bytes so that the total
+ /// struct size is a multiple of the field alignment.
+ void AppendPadding(CharUnits fieldOffset, CharUnits fieldAlignment);
+
+ /// ResizeLastBaseFieldIfNecessary - Fields and bases can be laid out in the
+ /// tail padding of a previous base. If this happens, the type of the previous
+ /// base needs to be changed to an array of i8. Returns true if the last
+ /// laid out base was resized.
+ bool ResizeLastBaseFieldIfNecessary(CharUnits offset);
+
+ /// getByteArrayType - Returns a byte array type with the given number of
+ /// elements.
+ llvm::Type *getByteArrayType(CharUnits NumBytes);
+
+ /// AppendBytes - Append a given number of bytes to the record.
+ void AppendBytes(CharUnits numBytes);
+
+ /// AppendTailPadding - Append enough tail padding so that the type will have
+ /// the passed size.
+ void AppendTailPadding(CharUnits RecordSize);
+
+ CharUnits getTypeAlignment(llvm::Type *Ty) const;
+
+ /// getAlignmentAsLLVMStruct - Returns the maximum alignment of all the
+ /// LLVM element types.
+ CharUnits getAlignmentAsLLVMStruct() const;
+
+ /// CheckZeroInitializable - Check if the given type contains a pointer
+ /// to data member.
+ void CheckZeroInitializable(QualType T);
+
+public:
+ CGRecordLayoutBuilder(CodeGenTypes &Types)
+ : BaseSubobjectType(0),
+ IsZeroInitializable(true), IsZeroInitializableAsBase(true),
+ Packed(false), IsMsStruct(false),
+ Types(Types), BitsAvailableInLastField(0) { }
+
+ /// Layout - Will layout a RecordDecl.
+ void Layout(const RecordDecl *D);
+};
+
+}
+
+void CGRecordLayoutBuilder::Layout(const RecordDecl *D) {
+ Alignment = Types.getContext().getASTRecordLayout(D).getAlignment();
+ Packed = D->hasAttr<PackedAttr>();
+
+ IsMsStruct = D->hasAttr<MsStructAttr>();
+
+ if (D->isUnion()) {
+ LayoutUnion(D);
+ return;
+ }
+
+ if (LayoutFields(D))
+ return;
+
+ // We weren't able to layout the struct. Try again with a packed struct
+ Packed = true;
+ LastLaidOutBase.invalidate();
+ NextFieldOffset = CharUnits::Zero();
+ FieldTypes.clear();
+ Fields.clear();
+ BitFields.clear();
+ NonVirtualBases.clear();
+ VirtualBases.clear();
+
+ LayoutFields(D);
+}
+
+CGBitFieldInfo CGBitFieldInfo::MakeInfo(CodeGenTypes &Types,
+ const FieldDecl *FD,
+ uint64_t FieldOffset,
+ uint64_t FieldSize,
+ uint64_t ContainingTypeSizeInBits,
+ unsigned ContainingTypeAlign) {
+ llvm::Type *Ty = Types.ConvertTypeForMem(FD->getType());
+ CharUnits TypeSizeInBytes =
+ CharUnits::fromQuantity(Types.getTargetData().getTypeAllocSize(Ty));
+ uint64_t TypeSizeInBits = Types.getContext().toBits(TypeSizeInBytes);
+
+ bool IsSigned = FD->getType()->isSignedIntegerOrEnumerationType();
+
+ if (FieldSize > TypeSizeInBits) {
+ // We have a wide bit-field. The extra bits are only used for padding, so
+ // if we have a bitfield of type T, with size N:
+ //
+ // T t : N;
+ //
+ // We can just assume that it's:
+ //
+ // T t : sizeof(T);
+ //
+ FieldSize = TypeSizeInBits;
+ }
+
+ // in big-endian machines the first fields are in higher bit positions,
+ // so revert the offset. The byte offsets are reversed(back) later.
+ if (Types.getTargetData().isBigEndian()) {
+ FieldOffset = ((ContainingTypeSizeInBits)-FieldOffset-FieldSize);
+ }
+
+ // Compute the access components. The policy we use is to start by attempting
+ // to access using the width of the bit-field type itself and to always access
+ // at aligned indices of that type. If such an access would fail because it
+ // extends past the bound of the type, then we reduce size to the next smaller
+ // power of two and retry. The current algorithm assumes pow2 sized types,
+ // although this is easy to fix.
+ //
+ assert(llvm::isPowerOf2_32(TypeSizeInBits) && "Unexpected type size!");
+ CGBitFieldInfo::AccessInfo Components[3];
+ unsigned NumComponents = 0;
+ unsigned AccessedTargetBits = 0; // The number of target bits accessed.
+ unsigned AccessWidth = TypeSizeInBits; // The current access width to attempt.
+
+ // If requested, widen the initial bit-field access to be register sized. The
+ // theory is that this is most likely to allow multiple accesses into the same
+ // structure to be coalesced, and that the backend should be smart enough to
+ // narrow the store if no coalescing is ever done.
+ //
+ // The subsequent code will handle align these access to common boundaries and
+ // guaranteeing that we do not access past the end of the structure.
+ if (Types.getCodeGenOpts().UseRegisterSizedBitfieldAccess) {
+ if (AccessWidth < Types.getTarget().getRegisterWidth())
+ AccessWidth = Types.getTarget().getRegisterWidth();
+ }
+
+ // Round down from the field offset to find the first access position that is
+ // at an aligned offset of the initial access type.
+ uint64_t AccessStart = FieldOffset - (FieldOffset % AccessWidth);
+
+ // Adjust initial access size to fit within record.
+ while (AccessWidth > Types.getTarget().getCharWidth() &&
+ AccessStart + AccessWidth > ContainingTypeSizeInBits) {
+ AccessWidth >>= 1;
+ AccessStart = FieldOffset - (FieldOffset % AccessWidth);
+ }
+
+ while (AccessedTargetBits < FieldSize) {
+ // Check that we can access using a type of this size, without reading off
+ // the end of the structure. This can occur with packed structures and
+ // -fno-bitfield-type-align, for example.
+ if (AccessStart + AccessWidth > ContainingTypeSizeInBits) {
+ // If so, reduce access size to the next smaller power-of-two and retry.
+ AccessWidth >>= 1;
+ assert(AccessWidth >= Types.getTarget().getCharWidth()
+ && "Cannot access under byte size!");
+ continue;
+ }
+
+ // Otherwise, add an access component.
+
+ // First, compute the bits inside this access which are part of the
+ // target. We are reading bits [AccessStart, AccessStart + AccessWidth); the
+ // intersection with [FieldOffset, FieldOffset + FieldSize) gives the bits
+ // in the target that we are reading.
+ assert(FieldOffset < AccessStart + AccessWidth && "Invalid access start!");
+ assert(AccessStart < FieldOffset + FieldSize && "Invalid access start!");
+ uint64_t AccessBitsInFieldStart = std::max(AccessStart, FieldOffset);
+ uint64_t AccessBitsInFieldSize =
+ std::min(AccessWidth + AccessStart,
+ FieldOffset + FieldSize) - AccessBitsInFieldStart;
+
+ assert(NumComponents < 3 && "Unexpected number of components!");
+ CGBitFieldInfo::AccessInfo &AI = Components[NumComponents++];
+ AI.FieldIndex = 0;
+ // FIXME: We still follow the old access pattern of only using the field
+ // byte offset. We should switch this once we fix the struct layout to be
+ // pretty.
+
+ // on big-endian machines we reverted the bit offset because first fields are
+ // in higher bits. But this also reverts the bytes, so fix this here by reverting
+ // the byte offset on big-endian machines.
+ if (Types.getTargetData().isBigEndian()) {
+ AI.FieldByteOffset = Types.getContext().toCharUnitsFromBits(
+ ContainingTypeSizeInBits - AccessStart - AccessWidth);
+ } else {
+ AI.FieldByteOffset = Types.getContext().toCharUnitsFromBits(AccessStart);
+ }
+ AI.FieldBitStart = AccessBitsInFieldStart - AccessStart;
+ AI.AccessWidth = AccessWidth;
+ AI.AccessAlignment = Types.getContext().toCharUnitsFromBits(
+ llvm::MinAlign(ContainingTypeAlign, AccessStart));
+ AI.TargetBitOffset = AccessedTargetBits;
+ AI.TargetBitWidth = AccessBitsInFieldSize;
+
+ AccessStart += AccessWidth;
+ AccessedTargetBits += AI.TargetBitWidth;
+ }
+
+ assert(AccessedTargetBits == FieldSize && "Invalid bit-field access!");
+ return CGBitFieldInfo(FieldSize, NumComponents, Components, IsSigned);
+}
+
+CGBitFieldInfo CGBitFieldInfo::MakeInfo(CodeGenTypes &Types,
+ const FieldDecl *FD,
+ uint64_t FieldOffset,
+ uint64_t FieldSize) {
+ const RecordDecl *RD = FD->getParent();
+ const ASTRecordLayout &RL = Types.getContext().getASTRecordLayout(RD);
+ uint64_t ContainingTypeSizeInBits = Types.getContext().toBits(RL.getSize());
+ unsigned ContainingTypeAlign = Types.getContext().toBits(RL.getAlignment());
+
+ return MakeInfo(Types, FD, FieldOffset, FieldSize, ContainingTypeSizeInBits,
+ ContainingTypeAlign);
+}
+
+void CGRecordLayoutBuilder::LayoutBitField(const FieldDecl *D,
+ uint64_t fieldOffset) {
+ uint64_t fieldSize = D->getBitWidthValue(Types.getContext());
+
+ if (fieldSize == 0)
+ return;
+
+ uint64_t nextFieldOffsetInBits = Types.getContext().toBits(NextFieldOffset);
+ CharUnits numBytesToAppend;
+ unsigned charAlign = Types.getContext().getTargetInfo().getCharAlign();
+
+ if (fieldOffset < nextFieldOffsetInBits && !BitsAvailableInLastField) {
+ assert(fieldOffset % charAlign == 0 &&
+ "Field offset not aligned correctly");
+
+ CharUnits fieldOffsetInCharUnits =
+ Types.getContext().toCharUnitsFromBits(fieldOffset);
+
+ // Try to resize the last base field.
+ if (ResizeLastBaseFieldIfNecessary(fieldOffsetInCharUnits))
+ nextFieldOffsetInBits = Types.getContext().toBits(NextFieldOffset);
+ }
+
+ if (fieldOffset < nextFieldOffsetInBits) {
+ assert(BitsAvailableInLastField && "Bitfield size mismatch!");
+ assert(!NextFieldOffset.isZero() && "Must have laid out at least one byte");
+
+ // The bitfield begins in the previous bit-field.
+ numBytesToAppend = Types.getContext().toCharUnitsFromBits(
+ llvm::RoundUpToAlignment(fieldSize - BitsAvailableInLastField,
+ charAlign));
+ } else {
+ assert(fieldOffset % charAlign == 0 &&
+ "Field offset not aligned correctly");
+
+ // Append padding if necessary.
+ AppendPadding(Types.getContext().toCharUnitsFromBits(fieldOffset),
+ CharUnits::One());
+
+ numBytesToAppend = Types.getContext().toCharUnitsFromBits(
+ llvm::RoundUpToAlignment(fieldSize, charAlign));
+
+ assert(!numBytesToAppend.isZero() && "No bytes to append!");
+ }
+
+ // Add the bit field info.
+ BitFields.insert(std::make_pair(D,
+ CGBitFieldInfo::MakeInfo(Types, D, fieldOffset, fieldSize)));
+
+ AppendBytes(numBytesToAppend);
+
+ BitsAvailableInLastField =
+ Types.getContext().toBits(NextFieldOffset) - (fieldOffset + fieldSize);
+}
+
+bool CGRecordLayoutBuilder::LayoutField(const FieldDecl *D,
+ uint64_t fieldOffset) {
+ // If the field is packed, then we need a packed struct.
+ if (!Packed && D->hasAttr<PackedAttr>())
+ return false;
+
+ if (D->isBitField()) {
+ // We must use packed structs for unnamed bit fields since they
+ // don't affect the struct alignment.
+ if (!Packed && !D->getDeclName())
+ return false;
+
+ LayoutBitField(D, fieldOffset);
+ return true;
+ }
+
+ CheckZeroInitializable(D->getType());
+
+ assert(fieldOffset % Types.getTarget().getCharWidth() == 0
+ && "field offset is not on a byte boundary!");
+ CharUnits fieldOffsetInBytes
+ = Types.getContext().toCharUnitsFromBits(fieldOffset);
+
+ llvm::Type *Ty = Types.ConvertTypeForMem(D->getType());
+ CharUnits typeAlignment = getTypeAlignment(Ty);
+
+ // If the type alignment is larger then the struct alignment, we must use
+ // a packed struct.
+ if (typeAlignment > Alignment) {
+ assert(!Packed && "Alignment is wrong even with packed struct!");
+ return false;
+ }
+
+ if (!Packed) {
+ if (const RecordType *RT = D->getType()->getAs<RecordType>()) {
+ const RecordDecl *RD = cast<RecordDecl>(RT->getDecl());
+ if (const MaxFieldAlignmentAttr *MFAA =
+ RD->getAttr<MaxFieldAlignmentAttr>()) {
+ if (MFAA->getAlignment() != Types.getContext().toBits(typeAlignment))
+ return false;
+ }
+ }
+ }
+
+ // Round up the field offset to the alignment of the field type.
+ CharUnits alignedNextFieldOffsetInBytes =
+ NextFieldOffset.RoundUpToAlignment(typeAlignment);
+
+ if (fieldOffsetInBytes < alignedNextFieldOffsetInBytes) {
+ // Try to resize the last base field.
+ if (ResizeLastBaseFieldIfNecessary(fieldOffsetInBytes)) {
+ alignedNextFieldOffsetInBytes =
+ NextFieldOffset.RoundUpToAlignment(typeAlignment);
+ }
+ }
+
+ if (fieldOffsetInBytes < alignedNextFieldOffsetInBytes) {
+ assert(!Packed && "Could not place field even with packed struct!");
+ return false;
+ }
+
+ AppendPadding(fieldOffsetInBytes, typeAlignment);
+
+ // Now append the field.
+ Fields[D] = FieldTypes.size();
+ AppendField(fieldOffsetInBytes, Ty);
+
+ LastLaidOutBase.invalidate();
+ return true;
+}
+
+llvm::Type *
+CGRecordLayoutBuilder::LayoutUnionField(const FieldDecl *Field,
+ const ASTRecordLayout &Layout) {
+ if (Field->isBitField()) {
+ uint64_t FieldSize = Field->getBitWidthValue(Types.getContext());
+
+ // Ignore zero sized bit fields.
+ if (FieldSize == 0)
+ return 0;
+
+ llvm::Type *FieldTy = llvm::Type::getInt8Ty(Types.getLLVMContext());
+ CharUnits NumBytesToAppend = Types.getContext().toCharUnitsFromBits(
+ llvm::RoundUpToAlignment(FieldSize,
+ Types.getContext().getTargetInfo().getCharAlign()));
+
+ if (NumBytesToAppend > CharUnits::One())
+ FieldTy = llvm::ArrayType::get(FieldTy, NumBytesToAppend.getQuantity());
+
+ // Add the bit field info.
+ BitFields.insert(std::make_pair(Field,
+ CGBitFieldInfo::MakeInfo(Types, Field, 0, FieldSize)));
+ return FieldTy;
+ }
+
+ // This is a regular union field.
+ Fields[Field] = 0;
+ return Types.ConvertTypeForMem(Field->getType());
+}
+
+void CGRecordLayoutBuilder::LayoutUnion(const RecordDecl *D) {
+ assert(D->isUnion() && "Can't call LayoutUnion on a non-union record!");
+
+ const ASTRecordLayout &layout = Types.getContext().getASTRecordLayout(D);
+
+ llvm::Type *unionType = 0;
+ CharUnits unionSize = CharUnits::Zero();
+ CharUnits unionAlign = CharUnits::Zero();
+
+ bool hasOnlyZeroSizedBitFields = true;
+ bool checkedFirstFieldZeroInit = false;
+
+ unsigned fieldNo = 0;
+ for (RecordDecl::field_iterator field = D->field_begin(),
+ fieldEnd = D->field_end(); field != fieldEnd; ++field, ++fieldNo) {
+ assert(layout.getFieldOffset(fieldNo) == 0 &&
+ "Union field offset did not start at the beginning of record!");
+ llvm::Type *fieldType = LayoutUnionField(*field, layout);
+
+ if (!fieldType)
+ continue;
+
+ if (field->getDeclName() && !checkedFirstFieldZeroInit) {
+ CheckZeroInitializable(field->getType());
+ checkedFirstFieldZeroInit = true;
+ }
+
+ hasOnlyZeroSizedBitFields = false;
+
+ CharUnits fieldAlign = CharUnits::fromQuantity(
+ Types.getTargetData().getABITypeAlignment(fieldType));
+ CharUnits fieldSize = CharUnits::fromQuantity(
+ Types.getTargetData().getTypeAllocSize(fieldType));
+
+ if (fieldAlign < unionAlign)
+ continue;
+
+ if (fieldAlign > unionAlign || fieldSize > unionSize) {
+ unionType = fieldType;
+ unionAlign = fieldAlign;
+ unionSize = fieldSize;
+ }
+ }
+
+ // Now add our field.
+ if (unionType) {
+ AppendField(CharUnits::Zero(), unionType);
+
+ if (getTypeAlignment(unionType) > layout.getAlignment()) {
+ // We need a packed struct.
+ Packed = true;
+ unionAlign = CharUnits::One();
+ }
+ }
+ if (unionAlign.isZero()) {
+ (void)hasOnlyZeroSizedBitFields;
+ assert(hasOnlyZeroSizedBitFields &&
+ "0-align record did not have all zero-sized bit-fields!");
+ unionAlign = CharUnits::One();
+ }
+
+ // Append tail padding.
+ CharUnits recordSize = layout.getSize();
+ if (recordSize > unionSize)
+ AppendPadding(recordSize, unionAlign);
+}
+
+bool CGRecordLayoutBuilder::LayoutBase(const CXXRecordDecl *base,
+ const CGRecordLayout &baseLayout,
+ CharUnits baseOffset) {
+ ResizeLastBaseFieldIfNecessary(baseOffset);
+
+ AppendPadding(baseOffset, CharUnits::One());
+
+ const ASTRecordLayout &baseASTLayout
+ = Types.getContext().getASTRecordLayout(base);
+
+ LastLaidOutBase.Offset = NextFieldOffset;
+ LastLaidOutBase.NonVirtualSize = baseASTLayout.getNonVirtualSize();
+
+ llvm::StructType *subobjectType = baseLayout.getBaseSubobjectLLVMType();
+ if (getTypeAlignment(subobjectType) > Alignment)
+ return false;
+
+ AppendField(baseOffset, subobjectType);
+ return true;
+}
+
+bool CGRecordLayoutBuilder::LayoutNonVirtualBase(const CXXRecordDecl *base,
+ CharUnits baseOffset) {
+ // Ignore empty bases.
+ if (base->isEmpty()) return true;
+
+ const CGRecordLayout &baseLayout = Types.getCGRecordLayout(base);
+ if (IsZeroInitializableAsBase) {
+ assert(IsZeroInitializable &&
+ "class zero-initializable as base but not as complete object");
+
+ IsZeroInitializable = IsZeroInitializableAsBase =
+ baseLayout.isZeroInitializableAsBase();
+ }
+
+ if (!LayoutBase(base, baseLayout, baseOffset))
+ return false;
+ NonVirtualBases[base] = (FieldTypes.size() - 1);
+ return true;
+}
+
+bool
+CGRecordLayoutBuilder::LayoutVirtualBase(const CXXRecordDecl *base,
+ CharUnits baseOffset) {
+ // Ignore empty bases.
+ if (base->isEmpty()) return true;
+
+ const CGRecordLayout &baseLayout = Types.getCGRecordLayout(base);
+ if (IsZeroInitializable)
+ IsZeroInitializable = baseLayout.isZeroInitializableAsBase();
+
+ if (!LayoutBase(base, baseLayout, baseOffset))
+ return false;
+ VirtualBases[base] = (FieldTypes.size() - 1);
+ return true;
+}
+
+bool
+CGRecordLayoutBuilder::MSLayoutVirtualBases(const CXXRecordDecl *RD,
+ const ASTRecordLayout &Layout) {
+ if (!RD->getNumVBases())
+ return true;
+
+ // The vbases list is uniqued and ordered by a depth-first
+ // traversal, which is what we need here.
+ for (CXXRecordDecl::base_class_const_iterator I = RD->vbases_begin(),
+ E = RD->vbases_end(); I != E; ++I) {
+
+ const CXXRecordDecl *BaseDecl =
+ cast<CXXRecordDecl>(I->getType()->castAs<RecordType>()->getDecl());
+
+ CharUnits vbaseOffset = Layout.getVBaseClassOffset(BaseDecl);
+ if (!LayoutVirtualBase(BaseDecl, vbaseOffset))
+ return false;
+ }
+ return true;
+}
+
+/// LayoutVirtualBases - layout the non-virtual bases of a record decl.
+bool
+CGRecordLayoutBuilder::LayoutVirtualBases(const CXXRecordDecl *RD,
+ const ASTRecordLayout &Layout) {
+ for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
+ E = RD->bases_end(); I != E; ++I) {
+ const CXXRecordDecl *BaseDecl =
+ cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+
+ // We only want to lay out virtual bases that aren't indirect primary bases
+ // of some other base.
+ if (I->isVirtual() && !IndirectPrimaryBases.count(BaseDecl)) {
+ // Only lay out the base once.
+ if (!LaidOutVirtualBases.insert(BaseDecl))
+ continue;
+
+ CharUnits vbaseOffset = Layout.getVBaseClassOffset(BaseDecl);
+ if (!LayoutVirtualBase(BaseDecl, vbaseOffset))
+ return false;
+ }
+
+ if (!BaseDecl->getNumVBases()) {
+ // This base isn't interesting since it doesn't have any virtual bases.
+ continue;
+ }
+
+ if (!LayoutVirtualBases(BaseDecl, Layout))
+ return false;
+ }
+ return true;
+}
+
+bool
+CGRecordLayoutBuilder::LayoutNonVirtualBases(const CXXRecordDecl *RD,
+ const ASTRecordLayout &Layout) {
+ const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase();
+
+ // If we have a primary base, lay it out first.
+ if (PrimaryBase) {
+ if (!Layout.isPrimaryBaseVirtual()) {
+ if (!LayoutNonVirtualBase(PrimaryBase, CharUnits::Zero()))
+ return false;
+ } else {
+ if (!LayoutVirtualBase(PrimaryBase, CharUnits::Zero()))
+ return false;
+ }
+
+ // Otherwise, add a vtable / vf-table if the layout says to do so.
+ } else if (Types.getContext().getTargetInfo().getCXXABI() == CXXABI_Microsoft
+ ? Layout.getVFPtrOffset() != CharUnits::fromQuantity(-1)
+ : RD->isDynamicClass()) {
+ llvm::Type *FunctionType =
+ llvm::FunctionType::get(llvm::Type::getInt32Ty(Types.getLLVMContext()),
+ /*isVarArg=*/true);
+ llvm::Type *VTableTy = FunctionType->getPointerTo();
+
+ assert(NextFieldOffset.isZero() &&
+ "VTable pointer must come first!");
+ AppendField(CharUnits::Zero(), VTableTy->getPointerTo());
+ }
+
+ // Layout the non-virtual bases.
+ for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
+ E = RD->bases_end(); I != E; ++I) {
+ if (I->isVirtual())
+ continue;
+
+ const CXXRecordDecl *BaseDecl =
+ cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+
+ // We've already laid out the primary base.
+ if (BaseDecl == PrimaryBase && !Layout.isPrimaryBaseVirtual())
+ continue;
+
+ if (!LayoutNonVirtualBase(BaseDecl, Layout.getBaseClassOffset(BaseDecl)))
+ return false;
+ }
+
+ // Add a vb-table pointer if the layout insists.
+ if (Layout.getVBPtrOffset() != CharUnits::fromQuantity(-1)) {
+ CharUnits VBPtrOffset = Layout.getVBPtrOffset();
+ llvm::Type *Vbptr = llvm::Type::getInt32PtrTy(Types.getLLVMContext());
+ AppendPadding(VBPtrOffset, getTypeAlignment(Vbptr));
+ AppendField(VBPtrOffset, Vbptr);
+ }
+
+ return true;
+}
+
+bool
+CGRecordLayoutBuilder::ComputeNonVirtualBaseType(const CXXRecordDecl *RD) {
+ const ASTRecordLayout &Layout = Types.getContext().getASTRecordLayout(RD);
+
+ CharUnits NonVirtualSize = Layout.getNonVirtualSize();
+ CharUnits NonVirtualAlign = Layout.getNonVirtualAlign();
+ CharUnits AlignedNonVirtualTypeSize =
+ NonVirtualSize.RoundUpToAlignment(NonVirtualAlign);
+
+ // First check if we can use the same fields as for the complete class.
+ CharUnits RecordSize = Layout.getSize();
+ if (AlignedNonVirtualTypeSize == RecordSize)
+ return true;
+
+ // Check if we need padding.
+ CharUnits AlignedNextFieldOffset =
+ NextFieldOffset.RoundUpToAlignment(getAlignmentAsLLVMStruct());
+
+ if (AlignedNextFieldOffset > AlignedNonVirtualTypeSize) {
+ assert(!Packed && "cannot layout even as packed struct");
+ return false; // Needs packing.
+ }
+
+ bool needsPadding = (AlignedNonVirtualTypeSize != AlignedNextFieldOffset);
+ if (needsPadding) {
+ CharUnits NumBytes = AlignedNonVirtualTypeSize - AlignedNextFieldOffset;
+ FieldTypes.push_back(getByteArrayType(NumBytes));
+ }
+
+ BaseSubobjectType = llvm::StructType::create(Types.getLLVMContext(),
+ FieldTypes, "", Packed);
+ Types.addRecordTypeName(RD, BaseSubobjectType, ".base");
+
+ // Pull the padding back off.
+ if (needsPadding)
+ FieldTypes.pop_back();
+
+ return true;
+}
+
+bool CGRecordLayoutBuilder::LayoutFields(const RecordDecl *D) {
+ assert(!D->isUnion() && "Can't call LayoutFields on a union!");
+ assert(!Alignment.isZero() && "Did not set alignment!");
+
+ const ASTRecordLayout &Layout = Types.getContext().getASTRecordLayout(D);
+
+ const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D);
+ if (RD)
+ if (!LayoutNonVirtualBases(RD, Layout))
+ return false;
+
+ unsigned FieldNo = 0;
+ const FieldDecl *LastFD = 0;
+
+ for (RecordDecl::field_iterator Field = D->field_begin(),
+ FieldEnd = D->field_end(); Field != FieldEnd; ++Field, ++FieldNo) {
+ if (IsMsStruct) {
+ // Zero-length bitfields following non-bitfield members are
+ // ignored:
+ const FieldDecl *FD = (*Field);
+ if (Types.getContext().ZeroBitfieldFollowsNonBitfield(FD, LastFD)) {
+ --FieldNo;
+ continue;
+ }
+ LastFD = FD;
+ }
+
+ if (!LayoutField(*Field, Layout.getFieldOffset(FieldNo))) {
+ assert(!Packed &&
+ "Could not layout fields even with a packed LLVM struct!");
+ return false;
+ }
+ }
+
+ if (RD) {
+ // We've laid out the non-virtual bases and the fields, now compute the
+ // non-virtual base field types.
+ if (!ComputeNonVirtualBaseType(RD)) {
+ assert(!Packed && "Could not layout even with a packed LLVM struct!");
+ return false;
+ }
+
+ // Lay out the virtual bases. The MS ABI uses a different
+ // algorithm here due to the lack of primary virtual bases.
+ if (Types.getContext().getTargetInfo().getCXXABI() != CXXABI_Microsoft) {
+ RD->getIndirectPrimaryBases(IndirectPrimaryBases);
+ if (Layout.isPrimaryBaseVirtual())
+ IndirectPrimaryBases.insert(Layout.getPrimaryBase());
+
+ if (!LayoutVirtualBases(RD, Layout))
+ return false;
+ } else {
+ if (!MSLayoutVirtualBases(RD, Layout))
+ return false;
+ }
+ }
+
+ // Append tail padding if necessary.
+ AppendTailPadding(Layout.getSize());
+
+ return true;
+}
+
+void CGRecordLayoutBuilder::AppendTailPadding(CharUnits RecordSize) {
+ ResizeLastBaseFieldIfNecessary(RecordSize);
+
+ assert(NextFieldOffset <= RecordSize && "Size mismatch!");
+
+ CharUnits AlignedNextFieldOffset =
+ NextFieldOffset.RoundUpToAlignment(getAlignmentAsLLVMStruct());
+
+ if (AlignedNextFieldOffset == RecordSize) {
+ // We don't need any padding.
+ return;
+ }
+
+ CharUnits NumPadBytes = RecordSize - NextFieldOffset;
+ AppendBytes(NumPadBytes);
+}
+
+void CGRecordLayoutBuilder::AppendField(CharUnits fieldOffset,
+ llvm::Type *fieldType) {
+ CharUnits fieldSize =
+ CharUnits::fromQuantity(Types.getTargetData().getTypeAllocSize(fieldType));
+
+ FieldTypes.push_back(fieldType);
+
+ NextFieldOffset = fieldOffset + fieldSize;
+ BitsAvailableInLastField = 0;
+}
+
+void CGRecordLayoutBuilder::AppendPadding(CharUnits fieldOffset,
+ CharUnits fieldAlignment) {
+ assert(NextFieldOffset <= fieldOffset &&
+ "Incorrect field layout!");
+
+ // Do nothing if we're already at the right offset.
+ if (fieldOffset == NextFieldOffset) return;
+
+ // If we're not emitting a packed LLVM type, try to avoid adding
+ // unnecessary padding fields.
+ if (!Packed) {
+ // Round up the field offset to the alignment of the field type.
+ CharUnits alignedNextFieldOffset =
+ NextFieldOffset.RoundUpToAlignment(fieldAlignment);
+ assert(alignedNextFieldOffset <= fieldOffset);
+
+ // If that's the right offset, we're done.
+ if (alignedNextFieldOffset == fieldOffset) return;
+ }
+
+ // Otherwise we need explicit padding.
+ CharUnits padding = fieldOffset - NextFieldOffset;
+ AppendBytes(padding);
+}
+
+bool CGRecordLayoutBuilder::ResizeLastBaseFieldIfNecessary(CharUnits offset) {
+ // Check if we have a base to resize.
+ if (!LastLaidOutBase.isValid())
+ return false;
+
+ // This offset does not overlap with the tail padding.
+ if (offset >= NextFieldOffset)
+ return false;
+
+ // Restore the field offset and append an i8 array instead.
+ FieldTypes.pop_back();
+ NextFieldOffset = LastLaidOutBase.Offset;
+ AppendBytes(LastLaidOutBase.NonVirtualSize);
+ LastLaidOutBase.invalidate();
+
+ return true;
+}
+
+llvm::Type *CGRecordLayoutBuilder::getByteArrayType(CharUnits numBytes) {
+ assert(!numBytes.isZero() && "Empty byte arrays aren't allowed.");
+
+ llvm::Type *Ty = llvm::Type::getInt8Ty(Types.getLLVMContext());
+ if (numBytes > CharUnits::One())
+ Ty = llvm::ArrayType::get(Ty, numBytes.getQuantity());
+
+ return Ty;
+}
+
+void CGRecordLayoutBuilder::AppendBytes(CharUnits numBytes) {
+ if (numBytes.isZero())
+ return;
+
+ // Append the padding field
+ AppendField(NextFieldOffset, getByteArrayType(numBytes));
+}
+
+CharUnits CGRecordLayoutBuilder::getTypeAlignment(llvm::Type *Ty) const {
+ if (Packed)
+ return CharUnits::One();
+
+ return CharUnits::fromQuantity(Types.getTargetData().getABITypeAlignment(Ty));
+}
+
+CharUnits CGRecordLayoutBuilder::getAlignmentAsLLVMStruct() const {
+ if (Packed)
+ return CharUnits::One();
+
+ CharUnits maxAlignment = CharUnits::One();
+ for (size_t i = 0; i != FieldTypes.size(); ++i)
+ maxAlignment = std::max(maxAlignment, getTypeAlignment(FieldTypes[i]));
+
+ return maxAlignment;
+}
+
+/// Merge in whether a field of the given type is zero-initializable.
+void CGRecordLayoutBuilder::CheckZeroInitializable(QualType T) {
+ // This record already contains a member pointer.
+ if (!IsZeroInitializableAsBase)
+ return;
+
+ // Can only have member pointers if we're compiling C++.
+ if (!Types.getContext().getLangOpts().CPlusPlus)
+ return;
+
+ const Type *elementType = T->getBaseElementTypeUnsafe();
+
+ if (const MemberPointerType *MPT = elementType->getAs<MemberPointerType>()) {
+ if (!Types.getCXXABI().isZeroInitializable(MPT))
+ IsZeroInitializable = IsZeroInitializableAsBase = false;
+ } else if (const RecordType *RT = elementType->getAs<RecordType>()) {
+ const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
+ const CGRecordLayout &Layout = Types.getCGRecordLayout(RD);
+ if (!Layout.isZeroInitializable())
+ IsZeroInitializable = IsZeroInitializableAsBase = false;
+ }
+}
+
+CGRecordLayout *CodeGenTypes::ComputeRecordLayout(const RecordDecl *D,
+ llvm::StructType *Ty) {
+ CGRecordLayoutBuilder Builder(*this);
+
+ Builder.Layout(D);
+
+ Ty->setBody(Builder.FieldTypes, Builder.Packed);
+
+ // If we're in C++, compute the base subobject type.
+ llvm::StructType *BaseTy = 0;
+ if (isa<CXXRecordDecl>(D) && !D->isUnion()) {
+ BaseTy = Builder.BaseSubobjectType;
+ if (!BaseTy) BaseTy = Ty;
+ }
+
+ CGRecordLayout *RL =
+ new CGRecordLayout(Ty, BaseTy, Builder.IsZeroInitializable,
+ Builder.IsZeroInitializableAsBase);
+
+ RL->NonVirtualBases.swap(Builder.NonVirtualBases);
+ RL->CompleteObjectVirtualBases.swap(Builder.VirtualBases);
+
+ // Add all the field numbers.
+ RL->FieldInfo.swap(Builder.Fields);
+
+ // Add bitfield info.
+ RL->BitFields.swap(Builder.BitFields);
+
+ // Dump the layout, if requested.
+ if (getContext().getLangOpts().DumpRecordLayouts) {
+ llvm::errs() << "\n*** Dumping IRgen Record Layout\n";
+ llvm::errs() << "Record: ";
+ D->dump();
+ llvm::errs() << "\nLayout: ";
+ RL->dump();
+ }
+
+#ifndef NDEBUG
+ // Verify that the computed LLVM struct size matches the AST layout size.
+ const ASTRecordLayout &Layout = getContext().getASTRecordLayout(D);
+
+ uint64_t TypeSizeInBits = getContext().toBits(Layout.getSize());
+ assert(TypeSizeInBits == getTargetData().getTypeAllocSizeInBits(Ty) &&
+ "Type size mismatch!");
+
+ if (BaseTy) {
+ CharUnits NonVirtualSize = Layout.getNonVirtualSize();
+ CharUnits NonVirtualAlign = Layout.getNonVirtualAlign();
+ CharUnits AlignedNonVirtualTypeSize =
+ NonVirtualSize.RoundUpToAlignment(NonVirtualAlign);
+
+ uint64_t AlignedNonVirtualTypeSizeInBits =
+ getContext().toBits(AlignedNonVirtualTypeSize);
+
+ assert(AlignedNonVirtualTypeSizeInBits ==
+ getTargetData().getTypeAllocSizeInBits(BaseTy) &&
+ "Type size mismatch!");
+ }
+
+ // Verify that the LLVM and AST field offsets agree.
+ llvm::StructType *ST =
+ dyn_cast<llvm::StructType>(RL->getLLVMType());
+ const llvm::StructLayout *SL = getTargetData().getStructLayout(ST);
+
+ const ASTRecordLayout &AST_RL = getContext().getASTRecordLayout(D);
+ RecordDecl::field_iterator it = D->field_begin();
+ const FieldDecl *LastFD = 0;
+ bool IsMsStruct = D->hasAttr<MsStructAttr>();
+ for (unsigned i = 0, e = AST_RL.getFieldCount(); i != e; ++i, ++it) {
+ const FieldDecl *FD = *it;
+
+ // For non-bit-fields, just check that the LLVM struct offset matches the
+ // AST offset.
+ if (!FD->isBitField()) {
+ unsigned FieldNo = RL->getLLVMFieldNo(FD);
+ assert(AST_RL.getFieldOffset(i) == SL->getElementOffsetInBits(FieldNo) &&
+ "Invalid field offset!");
+ LastFD = FD;
+ continue;
+ }
+
+ if (IsMsStruct) {
+ // Zero-length bitfields following non-bitfield members are
+ // ignored:
+ if (getContext().ZeroBitfieldFollowsNonBitfield(FD, LastFD)) {
+ --i;
+ continue;
+ }
+ LastFD = FD;
+ }
+
+ // Ignore unnamed bit-fields.
+ if (!FD->getDeclName()) {
+ LastFD = FD;
+ continue;
+ }
+
+ const CGBitFieldInfo &Info = RL->getBitFieldInfo(FD);
+ for (unsigned i = 0, e = Info.getNumComponents(); i != e; ++i) {
+ const CGBitFieldInfo::AccessInfo &AI = Info.getComponent(i);
+
+ // Verify that every component access is within the structure.
+ uint64_t FieldOffset = SL->getElementOffsetInBits(AI.FieldIndex);
+ uint64_t AccessBitOffset = FieldOffset +
+ getContext().toBits(AI.FieldByteOffset);
+ assert(AccessBitOffset + AI.AccessWidth <= TypeSizeInBits &&
+ "Invalid bit-field access (out of range)!");
+ }
+ }
+#endif
+
+ return RL;
+}
+
+void CGRecordLayout::print(raw_ostream &OS) const {
+ OS << "<CGRecordLayout\n";
+ OS << " LLVMType:" << *CompleteObjectType << "\n";
+ if (BaseSubobjectType)
+ OS << " NonVirtualBaseLLVMType:" << *BaseSubobjectType << "\n";
+ OS << " IsZeroInitializable:" << IsZeroInitializable << "\n";
+ OS << " BitFields:[\n";
+
+ // Print bit-field infos in declaration order.
+ std::vector<std::pair<unsigned, const CGBitFieldInfo*> > BFIs;
+ for (llvm::DenseMap<const FieldDecl*, CGBitFieldInfo>::const_iterator
+ it = BitFields.begin(), ie = BitFields.end();
+ it != ie; ++it) {
+ const RecordDecl *RD = it->first->getParent();
+ unsigned Index = 0;
+ for (RecordDecl::field_iterator
+ it2 = RD->field_begin(); *it2 != it->first; ++it2)
+ ++Index;
+ BFIs.push_back(std::make_pair(Index, &it->second));
+ }
+ llvm::array_pod_sort(BFIs.begin(), BFIs.end());
+ for (unsigned i = 0, e = BFIs.size(); i != e; ++i) {
+ OS.indent(4);
+ BFIs[i].second->print(OS);
+ OS << "\n";
+ }
+
+ OS << "]>\n";
+}
+
+void CGRecordLayout::dump() const {
+ print(llvm::errs());
+}
+
+void CGBitFieldInfo::print(raw_ostream &OS) const {
+ OS << "<CGBitFieldInfo";
+ OS << " Size:" << Size;
+ OS << " IsSigned:" << IsSigned << "\n";
+
+ OS.indent(4 + strlen("<CGBitFieldInfo"));
+ OS << " NumComponents:" << getNumComponents();
+ OS << " Components: [";
+ if (getNumComponents()) {
+ OS << "\n";
+ for (unsigned i = 0, e = getNumComponents(); i != e; ++i) {
+ const AccessInfo &AI = getComponent(i);
+ OS.indent(8);
+ OS << "<AccessInfo"
+ << " FieldIndex:" << AI.FieldIndex
+ << " FieldByteOffset:" << AI.FieldByteOffset.getQuantity()
+ << " FieldBitStart:" << AI.FieldBitStart
+ << " AccessWidth:" << AI.AccessWidth << "\n";
+ OS.indent(8 + strlen("<AccessInfo"));
+ OS << " AccessAlignment:" << AI.AccessAlignment.getQuantity()
+ << " TargetBitOffset:" << AI.TargetBitOffset
+ << " TargetBitWidth:" << AI.TargetBitWidth
+ << ">\n";
+ }
+ OS.indent(4);
+ }
+ OS << "]>";
+}
+
+void CGBitFieldInfo::dump() const {
+ print(llvm::errs());
+}
diff --git a/clang/lib/CodeGen/CGStmt.cpp b/clang/lib/CodeGen/CGStmt.cpp
new file mode 100644
index 0000000..a1d0789
--- /dev/null
+++ b/clang/lib/CodeGen/CGStmt.cpp
@@ -0,0 +1,1683 @@
+//===--- CGStmt.cpp - Emit LLVM Code from Statements ----------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code to emit Stmt nodes as LLVM code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CGDebugInfo.h"
+#include "CodeGenModule.h"
+#include "CodeGenFunction.h"
+#include "TargetInfo.h"
+#include "clang/AST/StmtVisitor.h"
+#include "clang/Basic/PrettyStackTrace.h"
+#include "clang/Basic/TargetInfo.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/InlineAsm.h"
+#include "llvm/Intrinsics.h"
+#include "llvm/Target/TargetData.h"
+using namespace clang;
+using namespace CodeGen;
+
+//===----------------------------------------------------------------------===//
+// Statement Emission
+//===----------------------------------------------------------------------===//
+
+void CodeGenFunction::EmitStopPoint(const Stmt *S) {
+ if (CGDebugInfo *DI = getDebugInfo()) {
+ SourceLocation Loc;
+ if (isa<DeclStmt>(S))
+ Loc = S->getLocEnd();
+ else
+ Loc = S->getLocStart();
+ DI->EmitLocation(Builder, Loc);
+ }
+}
+
+void CodeGenFunction::EmitStmt(const Stmt *S) {
+ assert(S && "Null statement?");
+
+ // These statements have their own debug info handling.
+ if (EmitSimpleStmt(S))
+ return;
+
+ // Check if we are generating unreachable code.
+ if (!HaveInsertPoint()) {
+ // If so, and the statement doesn't contain a label, then we do not need to
+ // generate actual code. This is safe because (1) the current point is
+ // unreachable, so we don't need to execute the code, and (2) we've already
+ // handled the statements which update internal data structures (like the
+ // local variable map) which could be used by subsequent statements.
+ if (!ContainsLabel(S)) {
+ // Verify that any decl statements were handled as simple, they may be in
+ // scope of subsequent reachable statements.
+ assert(!isa<DeclStmt>(*S) && "Unexpected DeclStmt!");
+ return;
+ }
+
+ // Otherwise, make a new block to hold the code.
+ EnsureInsertPoint();
+ }
+
+ // Generate a stoppoint if we are emitting debug info.
+ EmitStopPoint(S);
+
+ switch (S->getStmtClass()) {
+ case Stmt::NoStmtClass:
+ case Stmt::CXXCatchStmtClass:
+ case Stmt::SEHExceptStmtClass:
+ case Stmt::SEHFinallyStmtClass:
+ case Stmt::MSDependentExistsStmtClass:
+ llvm_unreachable("invalid statement class to emit generically");
+ case Stmt::NullStmtClass:
+ case Stmt::CompoundStmtClass:
+ case Stmt::DeclStmtClass:
+ case Stmt::LabelStmtClass:
+ case Stmt::AttributedStmtClass:
+ case Stmt::GotoStmtClass:
+ case Stmt::BreakStmtClass:
+ case Stmt::ContinueStmtClass:
+ case Stmt::DefaultStmtClass:
+ case Stmt::CaseStmtClass:
+ llvm_unreachable("should have emitted these statements as simple");
+
+#define STMT(Type, Base)
+#define ABSTRACT_STMT(Op)
+#define EXPR(Type, Base) \
+ case Stmt::Type##Class:
+#include "clang/AST/StmtNodes.inc"
+ {
+ // Remember the block we came in on.
+ llvm::BasicBlock *incoming = Builder.GetInsertBlock();
+ assert(incoming && "expression emission must have an insertion point");
+
+ EmitIgnoredExpr(cast<Expr>(S));
+
+ llvm::BasicBlock *outgoing = Builder.GetInsertBlock();
+ assert(outgoing && "expression emission cleared block!");
+
+ // The expression emitters assume (reasonably!) that the insertion
+ // point is always set. To maintain that, the call-emission code
+ // for noreturn functions has to enter a new block with no
+ // predecessors. We want to kill that block and mark the current
+ // insertion point unreachable in the common case of a call like
+ // "exit();". Since expression emission doesn't otherwise create
+ // blocks with no predecessors, we can just test for that.
+ // However, we must be careful not to do this to our incoming
+ // block, because *statement* emission does sometimes create
+ // reachable blocks which will have no predecessors until later in
+ // the function. This occurs with, e.g., labels that are not
+ // reachable by fallthrough.
+ if (incoming != outgoing && outgoing->use_empty()) {
+ outgoing->eraseFromParent();
+ Builder.ClearInsertionPoint();
+ }
+ break;
+ }
+
+ case Stmt::IndirectGotoStmtClass:
+ EmitIndirectGotoStmt(cast<IndirectGotoStmt>(*S)); break;
+
+ case Stmt::IfStmtClass: EmitIfStmt(cast<IfStmt>(*S)); break;
+ case Stmt::WhileStmtClass: EmitWhileStmt(cast<WhileStmt>(*S)); break;
+ case Stmt::DoStmtClass: EmitDoStmt(cast<DoStmt>(*S)); break;
+ case Stmt::ForStmtClass: EmitForStmt(cast<ForStmt>(*S)); break;
+
+ case Stmt::ReturnStmtClass: EmitReturnStmt(cast<ReturnStmt>(*S)); break;
+
+ case Stmt::SwitchStmtClass: EmitSwitchStmt(cast<SwitchStmt>(*S)); break;
+ case Stmt::AsmStmtClass: EmitAsmStmt(cast<AsmStmt>(*S)); break;
+
+ case Stmt::ObjCAtTryStmtClass:
+ EmitObjCAtTryStmt(cast<ObjCAtTryStmt>(*S));
+ break;
+ case Stmt::ObjCAtCatchStmtClass:
+ llvm_unreachable(
+ "@catch statements should be handled by EmitObjCAtTryStmt");
+ case Stmt::ObjCAtFinallyStmtClass:
+ llvm_unreachable(
+ "@finally statements should be handled by EmitObjCAtTryStmt");
+ case Stmt::ObjCAtThrowStmtClass:
+ EmitObjCAtThrowStmt(cast<ObjCAtThrowStmt>(*S));
+ break;
+ case Stmt::ObjCAtSynchronizedStmtClass:
+ EmitObjCAtSynchronizedStmt(cast<ObjCAtSynchronizedStmt>(*S));
+ break;
+ case Stmt::ObjCForCollectionStmtClass:
+ EmitObjCForCollectionStmt(cast<ObjCForCollectionStmt>(*S));
+ break;
+ case Stmt::ObjCAutoreleasePoolStmtClass:
+ EmitObjCAutoreleasePoolStmt(cast<ObjCAutoreleasePoolStmt>(*S));
+ break;
+
+ case Stmt::CXXTryStmtClass:
+ EmitCXXTryStmt(cast<CXXTryStmt>(*S));
+ break;
+ case Stmt::CXXForRangeStmtClass:
+ EmitCXXForRangeStmt(cast<CXXForRangeStmt>(*S));
+ case Stmt::SEHTryStmtClass:
+ // FIXME Not yet implemented
+ break;
+ }
+}
+
+bool CodeGenFunction::EmitSimpleStmt(const Stmt *S) {
+ switch (S->getStmtClass()) {
+ default: return false;
+ case Stmt::NullStmtClass: break;
+ case Stmt::CompoundStmtClass: EmitCompoundStmt(cast<CompoundStmt>(*S)); break;
+ case Stmt::DeclStmtClass: EmitDeclStmt(cast<DeclStmt>(*S)); break;
+ case Stmt::LabelStmtClass: EmitLabelStmt(cast<LabelStmt>(*S)); break;
+ case Stmt::AttributedStmtClass:
+ EmitAttributedStmt(cast<AttributedStmt>(*S)); break;
+ case Stmt::GotoStmtClass: EmitGotoStmt(cast<GotoStmt>(*S)); break;
+ case Stmt::BreakStmtClass: EmitBreakStmt(cast<BreakStmt>(*S)); break;
+ case Stmt::ContinueStmtClass: EmitContinueStmt(cast<ContinueStmt>(*S)); break;
+ case Stmt::DefaultStmtClass: EmitDefaultStmt(cast<DefaultStmt>(*S)); break;
+ case Stmt::CaseStmtClass: EmitCaseStmt(cast<CaseStmt>(*S)); break;
+ }
+
+ return true;
+}
+
+/// EmitCompoundStmt - Emit a compound statement {..} node. If GetLast is true,
+/// this captures the expression result of the last sub-statement and returns it
+/// (for use by the statement expression extension).
+RValue CodeGenFunction::EmitCompoundStmt(const CompoundStmt &S, bool GetLast,
+ AggValueSlot AggSlot) {
+ PrettyStackTraceLoc CrashInfo(getContext().getSourceManager(),S.getLBracLoc(),
+ "LLVM IR generation of compound statement ('{}')");
+
+ // Keep track of the current cleanup stack depth, including debug scopes.
+ LexicalScope Scope(*this, S.getSourceRange());
+
+ for (CompoundStmt::const_body_iterator I = S.body_begin(),
+ E = S.body_end()-GetLast; I != E; ++I)
+ EmitStmt(*I);
+
+ RValue RV;
+ if (!GetLast)
+ RV = RValue::get(0);
+ else {
+ // We have to special case labels here. They are statements, but when put
+ // at the end of a statement expression, they yield the value of their
+ // subexpression. Handle this by walking through all labels we encounter,
+ // emitting them before we evaluate the subexpr.
+ const Stmt *LastStmt = S.body_back();
+ while (const LabelStmt *LS = dyn_cast<LabelStmt>(LastStmt)) {
+ EmitLabel(LS->getDecl());
+ LastStmt = LS->getSubStmt();
+ }
+
+ EnsureInsertPoint();
+
+ RV = EmitAnyExpr(cast<Expr>(LastStmt), AggSlot);
+ }
+
+ return RV;
+}
+
+void CodeGenFunction::SimplifyForwardingBlocks(llvm::BasicBlock *BB) {
+ llvm::BranchInst *BI = dyn_cast<llvm::BranchInst>(BB->getTerminator());
+
+ // If there is a cleanup stack, then we it isn't worth trying to
+ // simplify this block (we would need to remove it from the scope map
+ // and cleanup entry).
+ if (!EHStack.empty())
+ return;
+
+ // Can only simplify direct branches.
+ if (!BI || !BI->isUnconditional())
+ return;
+
+ BB->replaceAllUsesWith(BI->getSuccessor(0));
+ BI->eraseFromParent();
+ BB->eraseFromParent();
+}
+
+void CodeGenFunction::EmitBlock(llvm::BasicBlock *BB, bool IsFinished) {
+ llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
+
+ // Fall out of the current block (if necessary).
+ EmitBranch(BB);
+
+ if (IsFinished && BB->use_empty()) {
+ delete BB;
+ return;
+ }
+
+ // Place the block after the current block, if possible, or else at
+ // the end of the function.
+ if (CurBB && CurBB->getParent())
+ CurFn->getBasicBlockList().insertAfter(CurBB, BB);
+ else
+ CurFn->getBasicBlockList().push_back(BB);
+ Builder.SetInsertPoint(BB);
+}
+
+void CodeGenFunction::EmitBranch(llvm::BasicBlock *Target) {
+ // Emit a branch from the current block to the target one if this
+ // was a real block. If this was just a fall-through block after a
+ // terminator, don't emit it.
+ llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
+
+ if (!CurBB || CurBB->getTerminator()) {
+ // If there is no insert point or the previous block is already
+ // terminated, don't touch it.
+ } else {
+ // Otherwise, create a fall-through branch.
+ Builder.CreateBr(Target);
+ }
+
+ Builder.ClearInsertionPoint();
+}
+
+void CodeGenFunction::EmitBlockAfterUses(llvm::BasicBlock *block) {
+ bool inserted = false;
+ for (llvm::BasicBlock::use_iterator
+ i = block->use_begin(), e = block->use_end(); i != e; ++i) {
+ if (llvm::Instruction *insn = dyn_cast<llvm::Instruction>(*i)) {
+ CurFn->getBasicBlockList().insertAfter(insn->getParent(), block);
+ inserted = true;
+ break;
+ }
+ }
+
+ if (!inserted)
+ CurFn->getBasicBlockList().push_back(block);
+
+ Builder.SetInsertPoint(block);
+}
+
+CodeGenFunction::JumpDest
+CodeGenFunction::getJumpDestForLabel(const LabelDecl *D) {
+ JumpDest &Dest = LabelMap[D];
+ if (Dest.isValid()) return Dest;
+
+ // Create, but don't insert, the new block.
+ Dest = JumpDest(createBasicBlock(D->getName()),
+ EHScopeStack::stable_iterator::invalid(),
+ NextCleanupDestIndex++);
+ return Dest;
+}
+
+void CodeGenFunction::EmitLabel(const LabelDecl *D) {
+ JumpDest &Dest = LabelMap[D];
+
+ // If we didn't need a forward reference to this label, just go
+ // ahead and create a destination at the current scope.
+ if (!Dest.isValid()) {
+ Dest = getJumpDestInCurrentScope(D->getName());
+
+ // Otherwise, we need to give this label a target depth and remove
+ // it from the branch-fixups list.
+ } else {
+ assert(!Dest.getScopeDepth().isValid() && "already emitted label!");
+ Dest = JumpDest(Dest.getBlock(),
+ EHStack.stable_begin(),
+ Dest.getDestIndex());
+
+ ResolveBranchFixups(Dest.getBlock());
+ }
+
+ EmitBlock(Dest.getBlock());
+}
+
+
+void CodeGenFunction::EmitLabelStmt(const LabelStmt &S) {
+ EmitLabel(S.getDecl());
+ EmitStmt(S.getSubStmt());
+}
+
+void CodeGenFunction::EmitAttributedStmt(const AttributedStmt &S) {
+ EmitStmt(S.getSubStmt());
+}
+
+void CodeGenFunction::EmitGotoStmt(const GotoStmt &S) {
+ // If this code is reachable then emit a stop point (if generating
+ // debug info). We have to do this ourselves because we are on the
+ // "simple" statement path.
+ if (HaveInsertPoint())
+ EmitStopPoint(&S);
+
+ EmitBranchThroughCleanup(getJumpDestForLabel(S.getLabel()));
+}
+
+
+void CodeGenFunction::EmitIndirectGotoStmt(const IndirectGotoStmt &S) {
+ if (const LabelDecl *Target = S.getConstantTarget()) {
+ EmitBranchThroughCleanup(getJumpDestForLabel(Target));
+ return;
+ }
+
+ // Ensure that we have an i8* for our PHI node.
+ llvm::Value *V = Builder.CreateBitCast(EmitScalarExpr(S.getTarget()),
+ Int8PtrTy, "addr");
+ llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
+
+
+ // Get the basic block for the indirect goto.
+ llvm::BasicBlock *IndGotoBB = GetIndirectGotoBlock();
+
+ // The first instruction in the block has to be the PHI for the switch dest,
+ // add an entry for this branch.
+ cast<llvm::PHINode>(IndGotoBB->begin())->addIncoming(V, CurBB);
+
+ EmitBranch(IndGotoBB);
+}
+
+void CodeGenFunction::EmitIfStmt(const IfStmt &S) {
+ // C99 6.8.4.1: The first substatement is executed if the expression compares
+ // unequal to 0. The condition must be a scalar type.
+ RunCleanupsScope ConditionScope(*this);
+
+ if (S.getConditionVariable())
+ EmitAutoVarDecl(*S.getConditionVariable());
+
+ // If the condition constant folds and can be elided, try to avoid emitting
+ // the condition and the dead arm of the if/else.
+ bool CondConstant;
+ if (ConstantFoldsToSimpleInteger(S.getCond(), CondConstant)) {
+ // Figure out which block (then or else) is executed.
+ const Stmt *Executed = S.getThen();
+ const Stmt *Skipped = S.getElse();
+ if (!CondConstant) // Condition false?
+ std::swap(Executed, Skipped);
+
+ // If the skipped block has no labels in it, just emit the executed block.
+ // This avoids emitting dead code and simplifies the CFG substantially.
+ if (!ContainsLabel(Skipped)) {
+ if (Executed) {
+ RunCleanupsScope ExecutedScope(*this);
+ EmitStmt(Executed);
+ }
+ return;
+ }
+ }
+
+ // Otherwise, the condition did not fold, or we couldn't elide it. Just emit
+ // the conditional branch.
+ llvm::BasicBlock *ThenBlock = createBasicBlock("if.then");
+ llvm::BasicBlock *ContBlock = createBasicBlock("if.end");
+ llvm::BasicBlock *ElseBlock = ContBlock;
+ if (S.getElse())
+ ElseBlock = createBasicBlock("if.else");
+ EmitBranchOnBoolExpr(S.getCond(), ThenBlock, ElseBlock);
+
+ // Emit the 'then' code.
+ EmitBlock(ThenBlock);
+ {
+ RunCleanupsScope ThenScope(*this);
+ EmitStmt(S.getThen());
+ }
+ EmitBranch(ContBlock);
+
+ // Emit the 'else' code if present.
+ if (const Stmt *Else = S.getElse()) {
+ // There is no need to emit line number for unconditional branch.
+ if (getDebugInfo())
+ Builder.SetCurrentDebugLocation(llvm::DebugLoc());
+ EmitBlock(ElseBlock);
+ {
+ RunCleanupsScope ElseScope(*this);
+ EmitStmt(Else);
+ }
+ // There is no need to emit line number for unconditional branch.
+ if (getDebugInfo())
+ Builder.SetCurrentDebugLocation(llvm::DebugLoc());
+ EmitBranch(ContBlock);
+ }
+
+ // Emit the continuation block for code after the if.
+ EmitBlock(ContBlock, true);
+}
+
+void CodeGenFunction::EmitWhileStmt(const WhileStmt &S) {
+ // Emit the header for the loop, which will also become
+ // the continue target.
+ JumpDest LoopHeader = getJumpDestInCurrentScope("while.cond");
+ EmitBlock(LoopHeader.getBlock());
+
+ // Create an exit block for when the condition fails, which will
+ // also become the break target.
+ JumpDest LoopExit = getJumpDestInCurrentScope("while.end");
+
+ // Store the blocks to use for break and continue.
+ BreakContinueStack.push_back(BreakContinue(LoopExit, LoopHeader));
+
+ // C++ [stmt.while]p2:
+ // When the condition of a while statement is a declaration, the
+ // scope of the variable that is declared extends from its point
+ // of declaration (3.3.2) to the end of the while statement.
+ // [...]
+ // The object created in a condition is destroyed and created
+ // with each iteration of the loop.
+ RunCleanupsScope ConditionScope(*this);
+
+ if (S.getConditionVariable())
+ EmitAutoVarDecl(*S.getConditionVariable());
+
+ // Evaluate the conditional in the while header. C99 6.8.5.1: The
+ // evaluation of the controlling expression takes place before each
+ // execution of the loop body.
+ llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
+
+ // while(1) is common, avoid extra exit blocks. Be sure
+ // to correctly handle break/continue though.
+ bool EmitBoolCondBranch = true;
+ if (llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal))
+ if (C->isOne())
+ EmitBoolCondBranch = false;
+
+ // As long as the condition is true, go to the loop body.
+ llvm::BasicBlock *LoopBody = createBasicBlock("while.body");
+ if (EmitBoolCondBranch) {
+ llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
+ if (ConditionScope.requiresCleanups())
+ ExitBlock = createBasicBlock("while.exit");
+
+ Builder.CreateCondBr(BoolCondVal, LoopBody, ExitBlock);
+
+ if (ExitBlock != LoopExit.getBlock()) {
+ EmitBlock(ExitBlock);
+ EmitBranchThroughCleanup(LoopExit);
+ }
+ }
+
+ // Emit the loop body. We have to emit this in a cleanup scope
+ // because it might be a singleton DeclStmt.
+ {
+ RunCleanupsScope BodyScope(*this);
+ EmitBlock(LoopBody);
+ EmitStmt(S.getBody());
+ }
+
+ BreakContinueStack.pop_back();
+
+ // Immediately force cleanup.
+ ConditionScope.ForceCleanup();
+
+ // Branch to the loop header again.
+ EmitBranch(LoopHeader.getBlock());
+
+ // Emit the exit block.
+ EmitBlock(LoopExit.getBlock(), true);
+
+ // The LoopHeader typically is just a branch if we skipped emitting
+ // a branch, try to erase it.
+ if (!EmitBoolCondBranch)
+ SimplifyForwardingBlocks(LoopHeader.getBlock());
+}
+
+void CodeGenFunction::EmitDoStmt(const DoStmt &S) {
+ JumpDest LoopExit = getJumpDestInCurrentScope("do.end");
+ JumpDest LoopCond = getJumpDestInCurrentScope("do.cond");
+
+ // Store the blocks to use for break and continue.
+ BreakContinueStack.push_back(BreakContinue(LoopExit, LoopCond));
+
+ // Emit the body of the loop.
+ llvm::BasicBlock *LoopBody = createBasicBlock("do.body");
+ EmitBlock(LoopBody);
+ {
+ RunCleanupsScope BodyScope(*this);
+ EmitStmt(S.getBody());
+ }
+
+ BreakContinueStack.pop_back();
+
+ EmitBlock(LoopCond.getBlock());
+
+ // C99 6.8.5.2: "The evaluation of the controlling expression takes place
+ // after each execution of the loop body."
+
+ // Evaluate the conditional in the while header.
+ // C99 6.8.5p2/p4: The first substatement is executed if the expression
+ // compares unequal to 0. The condition must be a scalar type.
+ llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
+
+ // "do {} while (0)" is common in macros, avoid extra blocks. Be sure
+ // to correctly handle break/continue though.
+ bool EmitBoolCondBranch = true;
+ if (llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal))
+ if (C->isZero())
+ EmitBoolCondBranch = false;
+
+ // As long as the condition is true, iterate the loop.
+ if (EmitBoolCondBranch)
+ Builder.CreateCondBr(BoolCondVal, LoopBody, LoopExit.getBlock());
+
+ // Emit the exit block.
+ EmitBlock(LoopExit.getBlock());
+
+ // The DoCond block typically is just a branch if we skipped
+ // emitting a branch, try to erase it.
+ if (!EmitBoolCondBranch)
+ SimplifyForwardingBlocks(LoopCond.getBlock());
+}
+
+void CodeGenFunction::EmitForStmt(const ForStmt &S) {
+ JumpDest LoopExit = getJumpDestInCurrentScope("for.end");
+
+ RunCleanupsScope ForScope(*this);
+
+ CGDebugInfo *DI = getDebugInfo();
+ if (DI)
+ DI->EmitLexicalBlockStart(Builder, S.getSourceRange().getBegin());
+
+ // Evaluate the first part before the loop.
+ if (S.getInit())
+ EmitStmt(S.getInit());
+
+ // Start the loop with a block that tests the condition.
+ // If there's an increment, the continue scope will be overwritten
+ // later.
+ JumpDest Continue = getJumpDestInCurrentScope("for.cond");
+ llvm::BasicBlock *CondBlock = Continue.getBlock();
+ EmitBlock(CondBlock);
+
+ // Create a cleanup scope for the condition variable cleanups.
+ RunCleanupsScope ConditionScope(*this);
+
+ llvm::Value *BoolCondVal = 0;
+ if (S.getCond()) {
+ // If the for statement has a condition scope, emit the local variable
+ // declaration.
+ llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
+ if (S.getConditionVariable()) {
+ EmitAutoVarDecl(*S.getConditionVariable());
+ }
+
+ // If there are any cleanups between here and the loop-exit scope,
+ // create a block to stage a loop exit along.
+ if (ForScope.requiresCleanups())
+ ExitBlock = createBasicBlock("for.cond.cleanup");
+
+ // As long as the condition is true, iterate the loop.
+ llvm::BasicBlock *ForBody = createBasicBlock("for.body");
+
+ // C99 6.8.5p2/p4: The first substatement is executed if the expression
+ // compares unequal to 0. The condition must be a scalar type.
+ BoolCondVal = EvaluateExprAsBool(S.getCond());
+ Builder.CreateCondBr(BoolCondVal, ForBody, ExitBlock);
+
+ if (ExitBlock != LoopExit.getBlock()) {
+ EmitBlock(ExitBlock);
+ EmitBranchThroughCleanup(LoopExit);
+ }
+
+ EmitBlock(ForBody);
+ } else {
+ // Treat it as a non-zero constant. Don't even create a new block for the
+ // body, just fall into it.
+ }
+
+ // If the for loop doesn't have an increment we can just use the
+ // condition as the continue block. Otherwise we'll need to create
+ // a block for it (in the current scope, i.e. in the scope of the
+ // condition), and that we will become our continue block.
+ if (S.getInc())
+ Continue = getJumpDestInCurrentScope("for.inc");
+
+ // Store the blocks to use for break and continue.
+ BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
+
+ {
+ // Create a separate cleanup scope for the body, in case it is not
+ // a compound statement.
+ RunCleanupsScope BodyScope(*this);
+ EmitStmt(S.getBody());
+ }
+
+ // If there is an increment, emit it next.
+ if (S.getInc()) {
+ EmitBlock(Continue.getBlock());
+ EmitStmt(S.getInc());
+ }
+
+ BreakContinueStack.pop_back();
+
+ ConditionScope.ForceCleanup();
+ EmitBranch(CondBlock);
+
+ ForScope.ForceCleanup();
+
+ if (DI)
+ DI->EmitLexicalBlockEnd(Builder, S.getSourceRange().getEnd());
+
+ // Emit the fall-through block.
+ EmitBlock(LoopExit.getBlock(), true);
+}
+
+void CodeGenFunction::EmitCXXForRangeStmt(const CXXForRangeStmt &S) {
+ JumpDest LoopExit = getJumpDestInCurrentScope("for.end");
+
+ RunCleanupsScope ForScope(*this);
+
+ CGDebugInfo *DI = getDebugInfo();
+ if (DI)
+ DI->EmitLexicalBlockStart(Builder, S.getSourceRange().getBegin());
+
+ // Evaluate the first pieces before the loop.
+ EmitStmt(S.getRangeStmt());
+ EmitStmt(S.getBeginEndStmt());
+
+ // Start the loop with a block that tests the condition.
+ // If there's an increment, the continue scope will be overwritten
+ // later.
+ llvm::BasicBlock *CondBlock = createBasicBlock("for.cond");
+ EmitBlock(CondBlock);
+
+ // If there are any cleanups between here and the loop-exit scope,
+ // create a block to stage a loop exit along.
+ llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
+ if (ForScope.requiresCleanups())
+ ExitBlock = createBasicBlock("for.cond.cleanup");
+
+ // The loop body, consisting of the specified body and the loop variable.
+ llvm::BasicBlock *ForBody = createBasicBlock("for.body");
+
+ // The body is executed if the expression, contextually converted
+ // to bool, is true.
+ llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
+ Builder.CreateCondBr(BoolCondVal, ForBody, ExitBlock);
+
+ if (ExitBlock != LoopExit.getBlock()) {
+ EmitBlock(ExitBlock);
+ EmitBranchThroughCleanup(LoopExit);
+ }
+
+ EmitBlock(ForBody);
+
+ // Create a block for the increment. In case of a 'continue', we jump there.
+ JumpDest Continue = getJumpDestInCurrentScope("for.inc");
+
+ // Store the blocks to use for break and continue.
+ BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
+
+ {
+ // Create a separate cleanup scope for the loop variable and body.
+ RunCleanupsScope BodyScope(*this);
+ EmitStmt(S.getLoopVarStmt());
+ EmitStmt(S.getBody());
+ }
+
+ // If there is an increment, emit it next.
+ EmitBlock(Continue.getBlock());
+ EmitStmt(S.getInc());
+
+ BreakContinueStack.pop_back();
+
+ EmitBranch(CondBlock);
+
+ ForScope.ForceCleanup();
+
+ if (DI)
+ DI->EmitLexicalBlockEnd(Builder, S.getSourceRange().getEnd());
+
+ // Emit the fall-through block.
+ EmitBlock(LoopExit.getBlock(), true);
+}
+
+void CodeGenFunction::EmitReturnOfRValue(RValue RV, QualType Ty) {
+ if (RV.isScalar()) {
+ Builder.CreateStore(RV.getScalarVal(), ReturnValue);
+ } else if (RV.isAggregate()) {
+ EmitAggregateCopy(ReturnValue, RV.getAggregateAddr(), Ty);
+ } else {
+ StoreComplexToAddr(RV.getComplexVal(), ReturnValue, false);
+ }
+ EmitBranchThroughCleanup(ReturnBlock);
+}
+
+/// EmitReturnStmt - Note that due to GCC extensions, this can have an operand
+/// if the function returns void, or may be missing one if the function returns
+/// non-void. Fun stuff :).
+void CodeGenFunction::EmitReturnStmt(const ReturnStmt &S) {
+ // Emit the result value, even if unused, to evalute the side effects.
+ const Expr *RV = S.getRetValue();
+
+ // FIXME: Clean this up by using an LValue for ReturnTemp,
+ // EmitStoreThroughLValue, and EmitAnyExpr.
+ if (S.getNRVOCandidate() && S.getNRVOCandidate()->isNRVOVariable() &&
+ !Target.useGlobalsForAutomaticVariables()) {
+ // Apply the named return value optimization for this return statement,
+ // which means doing nothing: the appropriate result has already been
+ // constructed into the NRVO variable.
+
+ // If there is an NRVO flag for this variable, set it to 1 into indicate
+ // that the cleanup code should not destroy the variable.
+ if (llvm::Value *NRVOFlag = NRVOFlags[S.getNRVOCandidate()])
+ Builder.CreateStore(Builder.getTrue(), NRVOFlag);
+ } else if (!ReturnValue) {
+ // Make sure not to return anything, but evaluate the expression
+ // for side effects.
+ if (RV)
+ EmitAnyExpr(RV);
+ } else if (RV == 0) {
+ // Do nothing (return value is left uninitialized)
+ } else if (FnRetTy->isReferenceType()) {
+ // If this function returns a reference, take the address of the expression
+ // rather than the value.
+ RValue Result = EmitReferenceBindingToExpr(RV, /*InitializedDecl=*/0);
+ Builder.CreateStore(Result.getScalarVal(), ReturnValue);
+ } else if (!hasAggregateLLVMType(RV->getType())) {
+ Builder.CreateStore(EmitScalarExpr(RV), ReturnValue);
+ } else if (RV->getType()->isAnyComplexType()) {
+ EmitComplexExprIntoAddr(RV, ReturnValue, false);
+ } else {
+ CharUnits Alignment = getContext().getTypeAlignInChars(RV->getType());
+ EmitAggExpr(RV, AggValueSlot::forAddr(ReturnValue, Alignment, Qualifiers(),
+ AggValueSlot::IsDestructed,
+ AggValueSlot::DoesNotNeedGCBarriers,
+ AggValueSlot::IsNotAliased));
+ }
+
+ EmitBranchThroughCleanup(ReturnBlock);
+}
+
+void CodeGenFunction::EmitDeclStmt(const DeclStmt &S) {
+ // As long as debug info is modeled with instructions, we have to ensure we
+ // have a place to insert here and write the stop point here.
+ if (HaveInsertPoint())
+ EmitStopPoint(&S);
+
+ for (DeclStmt::const_decl_iterator I = S.decl_begin(), E = S.decl_end();
+ I != E; ++I)
+ EmitDecl(**I);
+}
+
+void CodeGenFunction::EmitBreakStmt(const BreakStmt &S) {
+ assert(!BreakContinueStack.empty() && "break stmt not in a loop or switch!");
+
+ // If this code is reachable then emit a stop point (if generating
+ // debug info). We have to do this ourselves because we are on the
+ // "simple" statement path.
+ if (HaveInsertPoint())
+ EmitStopPoint(&S);
+
+ JumpDest Block = BreakContinueStack.back().BreakBlock;
+ EmitBranchThroughCleanup(Block);
+}
+
+void CodeGenFunction::EmitContinueStmt(const ContinueStmt &S) {
+ assert(!BreakContinueStack.empty() && "continue stmt not in a loop!");
+
+ // If this code is reachable then emit a stop point (if generating
+ // debug info). We have to do this ourselves because we are on the
+ // "simple" statement path.
+ if (HaveInsertPoint())
+ EmitStopPoint(&S);
+
+ JumpDest Block = BreakContinueStack.back().ContinueBlock;
+ EmitBranchThroughCleanup(Block);
+}
+
+/// EmitCaseStmtRange - If case statement range is not too big then
+/// add multiple cases to switch instruction, one for each value within
+/// the range. If range is too big then emit "if" condition check.
+void CodeGenFunction::EmitCaseStmtRange(const CaseStmt &S) {
+ assert(S.getRHS() && "Expected RHS value in CaseStmt");
+
+ llvm::APSInt LHS = S.getLHS()->EvaluateKnownConstInt(getContext());
+ llvm::APSInt RHS = S.getRHS()->EvaluateKnownConstInt(getContext());
+
+ // Emit the code for this case. We do this first to make sure it is
+ // properly chained from our predecessor before generating the
+ // switch machinery to enter this block.
+ EmitBlock(createBasicBlock("sw.bb"));
+ llvm::BasicBlock *CaseDest = Builder.GetInsertBlock();
+ EmitStmt(S.getSubStmt());
+
+ // If range is empty, do nothing.
+ if (LHS.isSigned() ? RHS.slt(LHS) : RHS.ult(LHS))
+ return;
+
+ llvm::APInt Range = RHS - LHS;
+ // FIXME: parameters such as this should not be hardcoded.
+ if (Range.ult(llvm::APInt(Range.getBitWidth(), 64))) {
+ // Range is small enough to add multiple switch instruction cases.
+ for (unsigned i = 0, e = Range.getZExtValue() + 1; i != e; ++i) {
+ SwitchInsn->addCase(Builder.getInt(LHS), CaseDest);
+ LHS++;
+ }
+ return;
+ }
+
+ // The range is too big. Emit "if" condition into a new block,
+ // making sure to save and restore the current insertion point.
+ llvm::BasicBlock *RestoreBB = Builder.GetInsertBlock();
+
+ // Push this test onto the chain of range checks (which terminates
+ // in the default basic block). The switch's default will be changed
+ // to the top of this chain after switch emission is complete.
+ llvm::BasicBlock *FalseDest = CaseRangeBlock;
+ CaseRangeBlock = createBasicBlock("sw.caserange");
+
+ CurFn->getBasicBlockList().push_back(CaseRangeBlock);
+ Builder.SetInsertPoint(CaseRangeBlock);
+
+ // Emit range check.
+ llvm::Value *Diff =
+ Builder.CreateSub(SwitchInsn->getCondition(), Builder.getInt(LHS));
+ llvm::Value *Cond =
+ Builder.CreateICmpULE(Diff, Builder.getInt(Range), "inbounds");
+ Builder.CreateCondBr(Cond, CaseDest, FalseDest);
+
+ // Restore the appropriate insertion point.
+ if (RestoreBB)
+ Builder.SetInsertPoint(RestoreBB);
+ else
+ Builder.ClearInsertionPoint();
+}
+
+void CodeGenFunction::EmitCaseStmt(const CaseStmt &S) {
+ // If there is no enclosing switch instance that we're aware of, then this
+ // case statement and its block can be elided. This situation only happens
+ // when we've constant-folded the switch, are emitting the constant case,
+ // and part of the constant case includes another case statement. For
+ // instance: switch (4) { case 4: do { case 5: } while (1); }
+ if (!SwitchInsn) {
+ EmitStmt(S.getSubStmt());
+ return;
+ }
+
+ // Handle case ranges.
+ if (S.getRHS()) {
+ EmitCaseStmtRange(S);
+ return;
+ }
+
+ llvm::ConstantInt *CaseVal =
+ Builder.getInt(S.getLHS()->EvaluateKnownConstInt(getContext()));
+
+ // If the body of the case is just a 'break', and if there was no fallthrough,
+ // try to not emit an empty block.
+ if ((CGM.getCodeGenOpts().OptimizationLevel > 0) && isa<BreakStmt>(S.getSubStmt())) {
+ JumpDest Block = BreakContinueStack.back().BreakBlock;
+
+ // Only do this optimization if there are no cleanups that need emitting.
+ if (isObviouslyBranchWithoutCleanups(Block)) {
+ SwitchInsn->addCase(CaseVal, Block.getBlock());
+
+ // If there was a fallthrough into this case, make sure to redirect it to
+ // the end of the switch as well.
+ if (Builder.GetInsertBlock()) {
+ Builder.CreateBr(Block.getBlock());
+ Builder.ClearInsertionPoint();
+ }
+ return;
+ }
+ }
+
+ EmitBlock(createBasicBlock("sw.bb"));
+ llvm::BasicBlock *CaseDest = Builder.GetInsertBlock();
+ SwitchInsn->addCase(CaseVal, CaseDest);
+
+ // Recursively emitting the statement is acceptable, but is not wonderful for
+ // code where we have many case statements nested together, i.e.:
+ // case 1:
+ // case 2:
+ // case 3: etc.
+ // Handling this recursively will create a new block for each case statement
+ // that falls through to the next case which is IR intensive. It also causes
+ // deep recursion which can run into stack depth limitations. Handle
+ // sequential non-range case statements specially.
+ const CaseStmt *CurCase = &S;
+ const CaseStmt *NextCase = dyn_cast<CaseStmt>(S.getSubStmt());
+
+ // Otherwise, iteratively add consecutive cases to this switch stmt.
+ while (NextCase && NextCase->getRHS() == 0) {
+ CurCase = NextCase;
+ llvm::ConstantInt *CaseVal =
+ Builder.getInt(CurCase->getLHS()->EvaluateKnownConstInt(getContext()));
+ SwitchInsn->addCase(CaseVal, CaseDest);
+ NextCase = dyn_cast<CaseStmt>(CurCase->getSubStmt());
+ }
+
+ // Normal default recursion for non-cases.
+ EmitStmt(CurCase->getSubStmt());
+}
+
+void CodeGenFunction::EmitDefaultStmt(const DefaultStmt &S) {
+ llvm::BasicBlock *DefaultBlock = SwitchInsn->getDefaultDest();
+ assert(DefaultBlock->empty() &&
+ "EmitDefaultStmt: Default block already defined?");
+ EmitBlock(DefaultBlock);
+ EmitStmt(S.getSubStmt());
+}
+
+/// CollectStatementsForCase - Given the body of a 'switch' statement and a
+/// constant value that is being switched on, see if we can dead code eliminate
+/// the body of the switch to a simple series of statements to emit. Basically,
+/// on a switch (5) we want to find these statements:
+/// case 5:
+/// printf(...); <--
+/// ++i; <--
+/// break;
+///
+/// and add them to the ResultStmts vector. If it is unsafe to do this
+/// transformation (for example, one of the elided statements contains a label
+/// that might be jumped to), return CSFC_Failure. If we handled it and 'S'
+/// should include statements after it (e.g. the printf() line is a substmt of
+/// the case) then return CSFC_FallThrough. If we handled it and found a break
+/// statement, then return CSFC_Success.
+///
+/// If Case is non-null, then we are looking for the specified case, checking
+/// that nothing we jump over contains labels. If Case is null, then we found
+/// the case and are looking for the break.
+///
+/// If the recursive walk actually finds our Case, then we set FoundCase to
+/// true.
+///
+enum CSFC_Result { CSFC_Failure, CSFC_FallThrough, CSFC_Success };
+static CSFC_Result CollectStatementsForCase(const Stmt *S,
+ const SwitchCase *Case,
+ bool &FoundCase,
+ SmallVectorImpl<const Stmt*> &ResultStmts) {
+ // If this is a null statement, just succeed.
+ if (S == 0)
+ return Case ? CSFC_Success : CSFC_FallThrough;
+
+ // If this is the switchcase (case 4: or default) that we're looking for, then
+ // we're in business. Just add the substatement.
+ if (const SwitchCase *SC = dyn_cast<SwitchCase>(S)) {
+ if (S == Case) {
+ FoundCase = true;
+ return CollectStatementsForCase(SC->getSubStmt(), 0, FoundCase,
+ ResultStmts);
+ }
+
+ // Otherwise, this is some other case or default statement, just ignore it.
+ return CollectStatementsForCase(SC->getSubStmt(), Case, FoundCase,
+ ResultStmts);
+ }
+
+ // If we are in the live part of the code and we found our break statement,
+ // return a success!
+ if (Case == 0 && isa<BreakStmt>(S))
+ return CSFC_Success;
+
+ // If this is a switch statement, then it might contain the SwitchCase, the
+ // break, or neither.
+ if (const CompoundStmt *CS = dyn_cast<CompoundStmt>(S)) {
+ // Handle this as two cases: we might be looking for the SwitchCase (if so
+ // the skipped statements must be skippable) or we might already have it.
+ CompoundStmt::const_body_iterator I = CS->body_begin(), E = CS->body_end();
+ if (Case) {
+ // Keep track of whether we see a skipped declaration. The code could be
+ // using the declaration even if it is skipped, so we can't optimize out
+ // the decl if the kept statements might refer to it.
+ bool HadSkippedDecl = false;
+
+ // If we're looking for the case, just see if we can skip each of the
+ // substatements.
+ for (; Case && I != E; ++I) {
+ HadSkippedDecl |= isa<DeclStmt>(*I);
+
+ switch (CollectStatementsForCase(*I, Case, FoundCase, ResultStmts)) {
+ case CSFC_Failure: return CSFC_Failure;
+ case CSFC_Success:
+ // A successful result means that either 1) that the statement doesn't
+ // have the case and is skippable, or 2) does contain the case value
+ // and also contains the break to exit the switch. In the later case,
+ // we just verify the rest of the statements are elidable.
+ if (FoundCase) {
+ // If we found the case and skipped declarations, we can't do the
+ // optimization.
+ if (HadSkippedDecl)
+ return CSFC_Failure;
+
+ for (++I; I != E; ++I)
+ if (CodeGenFunction::ContainsLabel(*I, true))
+ return CSFC_Failure;
+ return CSFC_Success;
+ }
+ break;
+ case CSFC_FallThrough:
+ // If we have a fallthrough condition, then we must have found the
+ // case started to include statements. Consider the rest of the
+ // statements in the compound statement as candidates for inclusion.
+ assert(FoundCase && "Didn't find case but returned fallthrough?");
+ // We recursively found Case, so we're not looking for it anymore.
+ Case = 0;
+
+ // If we found the case and skipped declarations, we can't do the
+ // optimization.
+ if (HadSkippedDecl)
+ return CSFC_Failure;
+ break;
+ }
+ }
+ }
+
+ // If we have statements in our range, then we know that the statements are
+ // live and need to be added to the set of statements we're tracking.
+ for (; I != E; ++I) {
+ switch (CollectStatementsForCase(*I, 0, FoundCase, ResultStmts)) {
+ case CSFC_Failure: return CSFC_Failure;
+ case CSFC_FallThrough:
+ // A fallthrough result means that the statement was simple and just
+ // included in ResultStmt, keep adding them afterwards.
+ break;
+ case CSFC_Success:
+ // A successful result means that we found the break statement and
+ // stopped statement inclusion. We just ensure that any leftover stmts
+ // are skippable and return success ourselves.
+ for (++I; I != E; ++I)
+ if (CodeGenFunction::ContainsLabel(*I, true))
+ return CSFC_Failure;
+ return CSFC_Success;
+ }
+ }
+
+ return Case ? CSFC_Success : CSFC_FallThrough;
+ }
+
+ // Okay, this is some other statement that we don't handle explicitly, like a
+ // for statement or increment etc. If we are skipping over this statement,
+ // just verify it doesn't have labels, which would make it invalid to elide.
+ if (Case) {
+ if (CodeGenFunction::ContainsLabel(S, true))
+ return CSFC_Failure;
+ return CSFC_Success;
+ }
+
+ // Otherwise, we want to include this statement. Everything is cool with that
+ // so long as it doesn't contain a break out of the switch we're in.
+ if (CodeGenFunction::containsBreak(S)) return CSFC_Failure;
+
+ // Otherwise, everything is great. Include the statement and tell the caller
+ // that we fall through and include the next statement as well.
+ ResultStmts.push_back(S);
+ return CSFC_FallThrough;
+}
+
+/// FindCaseStatementsForValue - Find the case statement being jumped to and
+/// then invoke CollectStatementsForCase to find the list of statements to emit
+/// for a switch on constant. See the comment above CollectStatementsForCase
+/// for more details.
+static bool FindCaseStatementsForValue(const SwitchStmt &S,
+ const llvm::APInt &ConstantCondValue,
+ SmallVectorImpl<const Stmt*> &ResultStmts,
+ ASTContext &C) {
+ // First step, find the switch case that is being branched to. We can do this
+ // efficiently by scanning the SwitchCase list.
+ const SwitchCase *Case = S.getSwitchCaseList();
+ const DefaultStmt *DefaultCase = 0;
+
+ for (; Case; Case = Case->getNextSwitchCase()) {
+ // It's either a default or case. Just remember the default statement in
+ // case we're not jumping to any numbered cases.
+ if (const DefaultStmt *DS = dyn_cast<DefaultStmt>(Case)) {
+ DefaultCase = DS;
+ continue;
+ }
+
+ // Check to see if this case is the one we're looking for.
+ const CaseStmt *CS = cast<CaseStmt>(Case);
+ // Don't handle case ranges yet.
+ if (CS->getRHS()) return false;
+
+ // If we found our case, remember it as 'case'.
+ if (CS->getLHS()->EvaluateKnownConstInt(C) == ConstantCondValue)
+ break;
+ }
+
+ // If we didn't find a matching case, we use a default if it exists, or we
+ // elide the whole switch body!
+ if (Case == 0) {
+ // It is safe to elide the body of the switch if it doesn't contain labels
+ // etc. If it is safe, return successfully with an empty ResultStmts list.
+ if (DefaultCase == 0)
+ return !CodeGenFunction::ContainsLabel(&S);
+ Case = DefaultCase;
+ }
+
+ // Ok, we know which case is being jumped to, try to collect all the
+ // statements that follow it. This can fail for a variety of reasons. Also,
+ // check to see that the recursive walk actually found our case statement.
+ // Insane cases like this can fail to find it in the recursive walk since we
+ // don't handle every stmt kind:
+ // switch (4) {
+ // while (1) {
+ // case 4: ...
+ bool FoundCase = false;
+ return CollectStatementsForCase(S.getBody(), Case, FoundCase,
+ ResultStmts) != CSFC_Failure &&
+ FoundCase;
+}
+
+void CodeGenFunction::EmitSwitchStmt(const SwitchStmt &S) {
+ JumpDest SwitchExit = getJumpDestInCurrentScope("sw.epilog");
+
+ RunCleanupsScope ConditionScope(*this);
+
+ if (S.getConditionVariable())
+ EmitAutoVarDecl(*S.getConditionVariable());
+
+ // Handle nested switch statements.
+ llvm::SwitchInst *SavedSwitchInsn = SwitchInsn;
+ llvm::BasicBlock *SavedCRBlock = CaseRangeBlock;
+
+ // See if we can constant fold the condition of the switch and therefore only
+ // emit the live case statement (if any) of the switch.
+ llvm::APInt ConstantCondValue;
+ if (ConstantFoldsToSimpleInteger(S.getCond(), ConstantCondValue)) {
+ SmallVector<const Stmt*, 4> CaseStmts;
+ if (FindCaseStatementsForValue(S, ConstantCondValue, CaseStmts,
+ getContext())) {
+ RunCleanupsScope ExecutedScope(*this);
+
+ // At this point, we are no longer "within" a switch instance, so
+ // we can temporarily enforce this to ensure that any embedded case
+ // statements are not emitted.
+ SwitchInsn = 0;
+
+ // Okay, we can dead code eliminate everything except this case. Emit the
+ // specified series of statements and we're good.
+ for (unsigned i = 0, e = CaseStmts.size(); i != e; ++i)
+ EmitStmt(CaseStmts[i]);
+
+ // Now we want to restore the saved switch instance so that nested
+ // switches continue to function properly
+ SwitchInsn = SavedSwitchInsn;
+
+ return;
+ }
+ }
+
+ llvm::Value *CondV = EmitScalarExpr(S.getCond());
+
+ // Create basic block to hold stuff that comes after switch
+ // statement. We also need to create a default block now so that
+ // explicit case ranges tests can have a place to jump to on
+ // failure.
+ llvm::BasicBlock *DefaultBlock = createBasicBlock("sw.default");
+ SwitchInsn = Builder.CreateSwitch(CondV, DefaultBlock);
+ CaseRangeBlock = DefaultBlock;
+
+ // Clear the insertion point to indicate we are in unreachable code.
+ Builder.ClearInsertionPoint();
+
+ // All break statements jump to NextBlock. If BreakContinueStack is non empty
+ // then reuse last ContinueBlock.
+ JumpDest OuterContinue;
+ if (!BreakContinueStack.empty())
+ OuterContinue = BreakContinueStack.back().ContinueBlock;
+
+ BreakContinueStack.push_back(BreakContinue(SwitchExit, OuterContinue));
+
+ // Emit switch body.
+ EmitStmt(S.getBody());
+
+ BreakContinueStack.pop_back();
+
+ // Update the default block in case explicit case range tests have
+ // been chained on top.
+ SwitchInsn->setDefaultDest(CaseRangeBlock);
+
+ // If a default was never emitted:
+ if (!DefaultBlock->getParent()) {
+ // If we have cleanups, emit the default block so that there's a
+ // place to jump through the cleanups from.
+ if (ConditionScope.requiresCleanups()) {
+ EmitBlock(DefaultBlock);
+
+ // Otherwise, just forward the default block to the switch end.
+ } else {
+ DefaultBlock->replaceAllUsesWith(SwitchExit.getBlock());
+ delete DefaultBlock;
+ }
+ }
+
+ ConditionScope.ForceCleanup();
+
+ // Emit continuation.
+ EmitBlock(SwitchExit.getBlock(), true);
+
+ SwitchInsn = SavedSwitchInsn;
+ CaseRangeBlock = SavedCRBlock;
+}
+
+static std::string
+SimplifyConstraint(const char *Constraint, const TargetInfo &Target,
+ SmallVectorImpl<TargetInfo::ConstraintInfo> *OutCons=0) {
+ std::string Result;
+
+ while (*Constraint) {
+ switch (*Constraint) {
+ default:
+ Result += Target.convertConstraint(Constraint);
+ break;
+ // Ignore these
+ case '*':
+ case '?':
+ case '!':
+ case '=': // Will see this and the following in mult-alt constraints.
+ case '+':
+ break;
+ case ',':
+ Result += "|";
+ break;
+ case 'g':
+ Result += "imr";
+ break;
+ case '[': {
+ assert(OutCons &&
+ "Must pass output names to constraints with a symbolic name");
+ unsigned Index;
+ bool result = Target.resolveSymbolicName(Constraint,
+ &(*OutCons)[0],
+ OutCons->size(), Index);
+ assert(result && "Could not resolve symbolic name"); (void)result;
+ Result += llvm::utostr(Index);
+ break;
+ }
+ }
+
+ Constraint++;
+ }
+
+ return Result;
+}
+
+/// AddVariableConstraints - Look at AsmExpr and if it is a variable declared
+/// as using a particular register add that as a constraint that will be used
+/// in this asm stmt.
+static std::string
+AddVariableConstraints(const std::string &Constraint, const Expr &AsmExpr,
+ const TargetInfo &Target, CodeGenModule &CGM,
+ const AsmStmt &Stmt) {
+ const DeclRefExpr *AsmDeclRef = dyn_cast<DeclRefExpr>(&AsmExpr);
+ if (!AsmDeclRef)
+ return Constraint;
+ const ValueDecl &Value = *AsmDeclRef->getDecl();
+ const VarDecl *Variable = dyn_cast<VarDecl>(&Value);
+ if (!Variable)
+ return Constraint;
+ if (Variable->getStorageClass() != SC_Register)
+ return Constraint;
+ AsmLabelAttr *Attr = Variable->getAttr<AsmLabelAttr>();
+ if (!Attr)
+ return Constraint;
+ StringRef Register = Attr->getLabel();
+ assert(Target.isValidGCCRegisterName(Register));
+ // We're using validateOutputConstraint here because we only care if
+ // this is a register constraint.
+ TargetInfo::ConstraintInfo Info(Constraint, "");
+ if (Target.validateOutputConstraint(Info) &&
+ !Info.allowsRegister()) {
+ CGM.ErrorUnsupported(&Stmt, "__asm__");
+ return Constraint;
+ }
+ // Canonicalize the register here before returning it.
+ Register = Target.getNormalizedGCCRegisterName(Register);
+ return "{" + Register.str() + "}";
+}
+
+llvm::Value*
+CodeGenFunction::EmitAsmInputLValue(const AsmStmt &S,
+ const TargetInfo::ConstraintInfo &Info,
+ LValue InputValue, QualType InputType,
+ std::string &ConstraintStr) {
+ llvm::Value *Arg;
+ if (Info.allowsRegister() || !Info.allowsMemory()) {
+ if (!CodeGenFunction::hasAggregateLLVMType(InputType)) {
+ Arg = EmitLoadOfLValue(InputValue).getScalarVal();
+ } else {
+ llvm::Type *Ty = ConvertType(InputType);
+ uint64_t Size = CGM.getTargetData().getTypeSizeInBits(Ty);
+ if (Size <= 64 && llvm::isPowerOf2_64(Size)) {
+ Ty = llvm::IntegerType::get(getLLVMContext(), Size);
+ Ty = llvm::PointerType::getUnqual(Ty);
+
+ Arg = Builder.CreateLoad(Builder.CreateBitCast(InputValue.getAddress(),
+ Ty));
+ } else {
+ Arg = InputValue.getAddress();
+ ConstraintStr += '*';
+ }
+ }
+ } else {
+ Arg = InputValue.getAddress();
+ ConstraintStr += '*';
+ }
+
+ return Arg;
+}
+
+llvm::Value* CodeGenFunction::EmitAsmInput(const AsmStmt &S,
+ const TargetInfo::ConstraintInfo &Info,
+ const Expr *InputExpr,
+ std::string &ConstraintStr) {
+ if (Info.allowsRegister() || !Info.allowsMemory())
+ if (!CodeGenFunction::hasAggregateLLVMType(InputExpr->getType()))
+ return EmitScalarExpr(InputExpr);
+
+ InputExpr = InputExpr->IgnoreParenNoopCasts(getContext());
+ LValue Dest = EmitLValue(InputExpr);
+ return EmitAsmInputLValue(S, Info, Dest, InputExpr->getType(), ConstraintStr);
+}
+
+/// getAsmSrcLocInfo - Return the !srcloc metadata node to attach to an inline
+/// asm call instruction. The !srcloc MDNode contains a list of constant
+/// integers which are the source locations of the start of each line in the
+/// asm.
+static llvm::MDNode *getAsmSrcLocInfo(const StringLiteral *Str,
+ CodeGenFunction &CGF) {
+ SmallVector<llvm::Value *, 8> Locs;
+ // Add the location of the first line to the MDNode.
+ Locs.push_back(llvm::ConstantInt::get(CGF.Int32Ty,
+ Str->getLocStart().getRawEncoding()));
+ StringRef StrVal = Str->getString();
+ if (!StrVal.empty()) {
+ const SourceManager &SM = CGF.CGM.getContext().getSourceManager();
+ const LangOptions &LangOpts = CGF.CGM.getLangOpts();
+
+ // Add the location of the start of each subsequent line of the asm to the
+ // MDNode.
+ for (unsigned i = 0, e = StrVal.size()-1; i != e; ++i) {
+ if (StrVal[i] != '\n') continue;
+ SourceLocation LineLoc = Str->getLocationOfByte(i+1, SM, LangOpts,
+ CGF.Target);
+ Locs.push_back(llvm::ConstantInt::get(CGF.Int32Ty,
+ LineLoc.getRawEncoding()));
+ }
+ }
+
+ return llvm::MDNode::get(CGF.getLLVMContext(), Locs);
+}
+
+void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
+ // Analyze the asm string to decompose it into its pieces. We know that Sema
+ // has already done this, so it is guaranteed to be successful.
+ SmallVector<AsmStmt::AsmStringPiece, 4> Pieces;
+ unsigned DiagOffs;
+ S.AnalyzeAsmString(Pieces, getContext(), DiagOffs);
+
+ // Assemble the pieces into the final asm string.
+ std::string AsmString;
+ for (unsigned i = 0, e = Pieces.size(); i != e; ++i) {
+ if (Pieces[i].isString())
+ AsmString += Pieces[i].getString();
+ else if (Pieces[i].getModifier() == '\0')
+ AsmString += '$' + llvm::utostr(Pieces[i].getOperandNo());
+ else
+ AsmString += "${" + llvm::utostr(Pieces[i].getOperandNo()) + ':' +
+ Pieces[i].getModifier() + '}';
+ }
+
+ // Get all the output and input constraints together.
+ SmallVector<TargetInfo::ConstraintInfo, 4> OutputConstraintInfos;
+ SmallVector<TargetInfo::ConstraintInfo, 4> InputConstraintInfos;
+
+ for (unsigned i = 0, e = S.getNumOutputs(); i != e; i++) {
+ TargetInfo::ConstraintInfo Info(S.getOutputConstraint(i),
+ S.getOutputName(i));
+ bool IsValid = Target.validateOutputConstraint(Info); (void)IsValid;
+ assert(IsValid && "Failed to parse output constraint");
+ OutputConstraintInfos.push_back(Info);
+ }
+
+ for (unsigned i = 0, e = S.getNumInputs(); i != e; i++) {
+ TargetInfo::ConstraintInfo Info(S.getInputConstraint(i),
+ S.getInputName(i));
+ bool IsValid = Target.validateInputConstraint(OutputConstraintInfos.data(),
+ S.getNumOutputs(), Info);
+ assert(IsValid && "Failed to parse input constraint"); (void)IsValid;
+ InputConstraintInfos.push_back(Info);
+ }
+
+ std::string Constraints;
+
+ std::vector<LValue> ResultRegDests;
+ std::vector<QualType> ResultRegQualTys;
+ std::vector<llvm::Type *> ResultRegTypes;
+ std::vector<llvm::Type *> ResultTruncRegTypes;
+ std::vector<llvm::Type*> ArgTypes;
+ std::vector<llvm::Value*> Args;
+
+ // Keep track of inout constraints.
+ std::string InOutConstraints;
+ std::vector<llvm::Value*> InOutArgs;
+ std::vector<llvm::Type*> InOutArgTypes;
+
+ for (unsigned i = 0, e = S.getNumOutputs(); i != e; i++) {
+ TargetInfo::ConstraintInfo &Info = OutputConstraintInfos[i];
+
+ // Simplify the output constraint.
+ std::string OutputConstraint(S.getOutputConstraint(i));
+ OutputConstraint = SimplifyConstraint(OutputConstraint.c_str() + 1, Target);
+
+ const Expr *OutExpr = S.getOutputExpr(i);
+ OutExpr = OutExpr->IgnoreParenNoopCasts(getContext());
+
+ OutputConstraint = AddVariableConstraints(OutputConstraint, *OutExpr,
+ Target, CGM, S);
+
+ LValue Dest = EmitLValue(OutExpr);
+ if (!Constraints.empty())
+ Constraints += ',';
+
+ // If this is a register output, then make the inline asm return it
+ // by-value. If this is a memory result, return the value by-reference.
+ if (!Info.allowsMemory() && !hasAggregateLLVMType(OutExpr->getType())) {
+ Constraints += "=" + OutputConstraint;
+ ResultRegQualTys.push_back(OutExpr->getType());
+ ResultRegDests.push_back(Dest);
+ ResultRegTypes.push_back(ConvertTypeForMem(OutExpr->getType()));
+ ResultTruncRegTypes.push_back(ResultRegTypes.back());
+
+ // If this output is tied to an input, and if the input is larger, then
+ // we need to set the actual result type of the inline asm node to be the
+ // same as the input type.
+ if (Info.hasMatchingInput()) {
+ unsigned InputNo;
+ for (InputNo = 0; InputNo != S.getNumInputs(); ++InputNo) {
+ TargetInfo::ConstraintInfo &Input = InputConstraintInfos[InputNo];
+ if (Input.hasTiedOperand() && Input.getTiedOperand() == i)
+ break;
+ }
+ assert(InputNo != S.getNumInputs() && "Didn't find matching input!");
+
+ QualType InputTy = S.getInputExpr(InputNo)->getType();
+ QualType OutputType = OutExpr->getType();
+
+ uint64_t InputSize = getContext().getTypeSize(InputTy);
+ if (getContext().getTypeSize(OutputType) < InputSize) {
+ // Form the asm to return the value as a larger integer or fp type.
+ ResultRegTypes.back() = ConvertType(InputTy);
+ }
+ }
+ if (llvm::Type* AdjTy =
+ getTargetHooks().adjustInlineAsmType(*this, OutputConstraint,
+ ResultRegTypes.back()))
+ ResultRegTypes.back() = AdjTy;
+ } else {
+ ArgTypes.push_back(Dest.getAddress()->getType());
+ Args.push_back(Dest.getAddress());
+ Constraints += "=*";
+ Constraints += OutputConstraint;
+ }
+
+ if (Info.isReadWrite()) {
+ InOutConstraints += ',';
+
+ const Expr *InputExpr = S.getOutputExpr(i);
+ llvm::Value *Arg = EmitAsmInputLValue(S, Info, Dest, InputExpr->getType(),
+ InOutConstraints);
+
+ if (llvm::Type* AdjTy =
+ getTargetHooks().adjustInlineAsmType(*this, OutputConstraint,
+ Arg->getType()))
+ Arg = Builder.CreateBitCast(Arg, AdjTy);
+
+ if (Info.allowsRegister())
+ InOutConstraints += llvm::utostr(i);
+ else
+ InOutConstraints += OutputConstraint;
+
+ InOutArgTypes.push_back(Arg->getType());
+ InOutArgs.push_back(Arg);
+ }
+ }
+
+ unsigned NumConstraints = S.getNumOutputs() + S.getNumInputs();
+
+ for (unsigned i = 0, e = S.getNumInputs(); i != e; i++) {
+ const Expr *InputExpr = S.getInputExpr(i);
+
+ TargetInfo::ConstraintInfo &Info = InputConstraintInfos[i];
+
+ if (!Constraints.empty())
+ Constraints += ',';
+
+ // Simplify the input constraint.
+ std::string InputConstraint(S.getInputConstraint(i));
+ InputConstraint = SimplifyConstraint(InputConstraint.c_str(), Target,
+ &OutputConstraintInfos);
+
+ InputConstraint =
+ AddVariableConstraints(InputConstraint,
+ *InputExpr->IgnoreParenNoopCasts(getContext()),
+ Target, CGM, S);
+
+ llvm::Value *Arg = EmitAsmInput(S, Info, InputExpr, Constraints);
+
+ // If this input argument is tied to a larger output result, extend the
+ // input to be the same size as the output. The LLVM backend wants to see
+ // the input and output of a matching constraint be the same size. Note
+ // that GCC does not define what the top bits are here. We use zext because
+ // that is usually cheaper, but LLVM IR should really get an anyext someday.
+ if (Info.hasTiedOperand()) {
+ unsigned Output = Info.getTiedOperand();
+ QualType OutputType = S.getOutputExpr(Output)->getType();
+ QualType InputTy = InputExpr->getType();
+
+ if (getContext().getTypeSize(OutputType) >
+ getContext().getTypeSize(InputTy)) {
+ // Use ptrtoint as appropriate so that we can do our extension.
+ if (isa<llvm::PointerType>(Arg->getType()))
+ Arg = Builder.CreatePtrToInt(Arg, IntPtrTy);
+ llvm::Type *OutputTy = ConvertType(OutputType);
+ if (isa<llvm::IntegerType>(OutputTy))
+ Arg = Builder.CreateZExt(Arg, OutputTy);
+ else if (isa<llvm::PointerType>(OutputTy))
+ Arg = Builder.CreateZExt(Arg, IntPtrTy);
+ else {
+ assert(OutputTy->isFloatingPointTy() && "Unexpected output type");
+ Arg = Builder.CreateFPExt(Arg, OutputTy);
+ }
+ }
+ }
+ if (llvm::Type* AdjTy =
+ getTargetHooks().adjustInlineAsmType(*this, InputConstraint,
+ Arg->getType()))
+ Arg = Builder.CreateBitCast(Arg, AdjTy);
+
+ ArgTypes.push_back(Arg->getType());
+ Args.push_back(Arg);
+ Constraints += InputConstraint;
+ }
+
+ // Append the "input" part of inout constraints last.
+ for (unsigned i = 0, e = InOutArgs.size(); i != e; i++) {
+ ArgTypes.push_back(InOutArgTypes[i]);
+ Args.push_back(InOutArgs[i]);
+ }
+ Constraints += InOutConstraints;
+
+ // Clobbers
+ for (unsigned i = 0, e = S.getNumClobbers(); i != e; i++) {
+ StringRef Clobber = S.getClobber(i)->getString();
+
+ if (Clobber != "memory" && Clobber != "cc")
+ Clobber = Target.getNormalizedGCCRegisterName(Clobber);
+
+ if (i != 0 || NumConstraints != 0)
+ Constraints += ',';
+
+ Constraints += "~{";
+ Constraints += Clobber;
+ Constraints += '}';
+ }
+
+ // Add machine specific clobbers
+ std::string MachineClobbers = Target.getClobbers();
+ if (!MachineClobbers.empty()) {
+ if (!Constraints.empty())
+ Constraints += ',';
+ Constraints += MachineClobbers;
+ }
+
+ llvm::Type *ResultType;
+ if (ResultRegTypes.empty())
+ ResultType = VoidTy;
+ else if (ResultRegTypes.size() == 1)
+ ResultType = ResultRegTypes[0];
+ else
+ ResultType = llvm::StructType::get(getLLVMContext(), ResultRegTypes);
+
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(ResultType, ArgTypes, false);
+
+ llvm::InlineAsm *IA =
+ llvm::InlineAsm::get(FTy, AsmString, Constraints,
+ S.isVolatile() || S.getNumOutputs() == 0);
+ llvm::CallInst *Result = Builder.CreateCall(IA, Args);
+ Result->addAttribute(~0, llvm::Attribute::NoUnwind);
+
+ // Slap the source location of the inline asm into a !srcloc metadata on the
+ // call.
+ Result->setMetadata("srcloc", getAsmSrcLocInfo(S.getAsmString(), *this));
+
+ // Extract all of the register value results from the asm.
+ std::vector<llvm::Value*> RegResults;
+ if (ResultRegTypes.size() == 1) {
+ RegResults.push_back(Result);
+ } else {
+ for (unsigned i = 0, e = ResultRegTypes.size(); i != e; ++i) {
+ llvm::Value *Tmp = Builder.CreateExtractValue(Result, i, "asmresult");
+ RegResults.push_back(Tmp);
+ }
+ }
+
+ for (unsigned i = 0, e = RegResults.size(); i != e; ++i) {
+ llvm::Value *Tmp = RegResults[i];
+
+ // If the result type of the LLVM IR asm doesn't match the result type of
+ // the expression, do the conversion.
+ if (ResultRegTypes[i] != ResultTruncRegTypes[i]) {
+ llvm::Type *TruncTy = ResultTruncRegTypes[i];
+
+ // Truncate the integer result to the right size, note that TruncTy can be
+ // a pointer.
+ if (TruncTy->isFloatingPointTy())
+ Tmp = Builder.CreateFPTrunc(Tmp, TruncTy);
+ else if (TruncTy->isPointerTy() && Tmp->getType()->isIntegerTy()) {
+ uint64_t ResSize = CGM.getTargetData().getTypeSizeInBits(TruncTy);
+ Tmp = Builder.CreateTrunc(Tmp,
+ llvm::IntegerType::get(getLLVMContext(), (unsigned)ResSize));
+ Tmp = Builder.CreateIntToPtr(Tmp, TruncTy);
+ } else if (Tmp->getType()->isPointerTy() && TruncTy->isIntegerTy()) {
+ uint64_t TmpSize =CGM.getTargetData().getTypeSizeInBits(Tmp->getType());
+ Tmp = Builder.CreatePtrToInt(Tmp,
+ llvm::IntegerType::get(getLLVMContext(), (unsigned)TmpSize));
+ Tmp = Builder.CreateTrunc(Tmp, TruncTy);
+ } else if (TruncTy->isIntegerTy()) {
+ Tmp = Builder.CreateTrunc(Tmp, TruncTy);
+ } else if (TruncTy->isVectorTy()) {
+ Tmp = Builder.CreateBitCast(Tmp, TruncTy);
+ }
+ }
+
+ EmitStoreThroughLValue(RValue::get(Tmp), ResultRegDests[i]);
+ }
+}
diff --git a/clang/lib/CodeGen/CGVTT.cpp b/clang/lib/CodeGen/CGVTT.cpp
new file mode 100644
index 0000000..98be872
--- /dev/null
+++ b/clang/lib/CodeGen/CGVTT.cpp
@@ -0,0 +1,192 @@
+//===--- CGVTT.cpp - Emit LLVM Code for C++ VTTs --------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code dealing with C++ code generation of VTTs (vtable tables).
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenModule.h"
+#include "CGCXXABI.h"
+#include "clang/AST/RecordLayout.h"
+#include "clang/AST/VTTBuilder.h"
+using namespace clang;
+using namespace CodeGen;
+
+static llvm::Constant *
+GetAddrOfVTTVTable(CodeGenVTables &CGVT, const CXXRecordDecl *MostDerivedClass,
+ const VTTVTable &VTable,
+ llvm::GlobalVariable::LinkageTypes Linkage,
+ llvm::DenseMap<BaseSubobject, uint64_t> &AddressPoints) {
+ if (VTable.getBase() == MostDerivedClass) {
+ assert(VTable.getBaseOffset().isZero() &&
+ "Most derived class vtable must have a zero offset!");
+ // This is a regular vtable.
+ return CGVT.GetAddrOfVTable(MostDerivedClass);
+ }
+
+ return CGVT.GenerateConstructionVTable(MostDerivedClass,
+ VTable.getBaseSubobject(),
+ VTable.isVirtual(),
+ Linkage,
+ AddressPoints);
+}
+
+void
+CodeGenVTables::EmitVTTDefinition(llvm::GlobalVariable *VTT,
+ llvm::GlobalVariable::LinkageTypes Linkage,
+ const CXXRecordDecl *RD) {
+ VTTBuilder Builder(CGM.getContext(), RD, /*GenerateDefinition=*/true);
+
+ llvm::Type *Int8PtrTy = CGM.Int8PtrTy, *Int64Ty = CGM.Int64Ty;
+ llvm::ArrayType *ArrayType =
+ llvm::ArrayType::get(Int8PtrTy, Builder.getVTTComponents().size());
+
+ SmallVector<llvm::Constant *, 8> VTables;
+ SmallVector<VTableAddressPointsMapTy, 8> VTableAddressPoints;
+ for (const VTTVTable *i = Builder.getVTTVTables().begin(),
+ *e = Builder.getVTTVTables().end(); i != e; ++i) {
+ VTableAddressPoints.push_back(VTableAddressPointsMapTy());
+ VTables.push_back(GetAddrOfVTTVTable(*this, RD, *i, Linkage,
+ VTableAddressPoints.back()));
+ }
+
+ SmallVector<llvm::Constant *, 8> VTTComponents;
+ for (const VTTComponent *i = Builder.getVTTComponents().begin(),
+ *e = Builder.getVTTComponents().end(); i != e; ++i) {
+ const VTTVTable &VTTVT = Builder.getVTTVTables()[i->VTableIndex];
+ llvm::Constant *VTable = VTables[i->VTableIndex];
+ uint64_t AddressPoint;
+ if (VTTVT.getBase() == RD) {
+ // Just get the address point for the regular vtable.
+ AddressPoint = VTContext.getVTableLayout(RD)
+ .getAddressPoint(i->VTableBase);
+ assert(AddressPoint != 0 && "Did not find vtable address point!");
+ } else {
+ AddressPoint = VTableAddressPoints[i->VTableIndex].lookup(i->VTableBase);
+ assert(AddressPoint != 0 && "Did not find ctor vtable address point!");
+ }
+
+ llvm::Value *Idxs[] = {
+ llvm::ConstantInt::get(Int64Ty, 0),
+ llvm::ConstantInt::get(Int64Ty, AddressPoint)
+ };
+
+ llvm::Constant *Init =
+ llvm::ConstantExpr::getInBoundsGetElementPtr(VTable, Idxs);
+
+ Init = llvm::ConstantExpr::getBitCast(Init, Int8PtrTy);
+
+ VTTComponents.push_back(Init);
+ }
+
+ llvm::Constant *Init = llvm::ConstantArray::get(ArrayType, VTTComponents);
+
+ VTT->setInitializer(Init);
+
+ // Set the correct linkage.
+ VTT->setLinkage(Linkage);
+
+ // Set the right visibility.
+ CGM.setTypeVisibility(VTT, RD, CodeGenModule::TVK_ForVTT);
+}
+
+llvm::GlobalVariable *CodeGenVTables::GetAddrOfVTT(const CXXRecordDecl *RD) {
+ assert(RD->getNumVBases() && "Only classes with virtual bases need a VTT");
+
+ SmallString<256> OutName;
+ llvm::raw_svector_ostream Out(OutName);
+ CGM.getCXXABI().getMangleContext().mangleCXXVTT(RD, Out);
+ Out.flush();
+ StringRef Name = OutName.str();
+
+ // This will also defer the definition of the VTT.
+ (void) GetAddrOfVTable(RD);
+
+ VTTBuilder Builder(CGM.getContext(), RD, /*GenerateDefinition=*/false);
+
+ llvm::ArrayType *ArrayType =
+ llvm::ArrayType::get(CGM.Int8PtrTy, Builder.getVTTComponents().size());
+
+ llvm::GlobalVariable *GV =
+ CGM.CreateOrReplaceCXXRuntimeVariable(Name, ArrayType,
+ llvm::GlobalValue::ExternalLinkage);
+ GV->setUnnamedAddr(true);
+ return GV;
+}
+
+bool CodeGenVTables::needsVTTParameter(GlobalDecl GD) {
+ const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
+
+ // We don't have any virtual bases, just return early.
+ if (!MD->getParent()->getNumVBases())
+ return false;
+
+ // Check if we have a base constructor.
+ if (isa<CXXConstructorDecl>(MD) && GD.getCtorType() == Ctor_Base)
+ return true;
+
+ // Check if we have a base destructor.
+ if (isa<CXXDestructorDecl>(MD) && GD.getDtorType() == Dtor_Base)
+ return true;
+
+ return false;
+}
+
+uint64_t CodeGenVTables::getSubVTTIndex(const CXXRecordDecl *RD,
+ BaseSubobject Base) {
+ BaseSubobjectPairTy ClassSubobjectPair(RD, Base);
+
+ SubVTTIndiciesMapTy::iterator I = SubVTTIndicies.find(ClassSubobjectPair);
+ if (I != SubVTTIndicies.end())
+ return I->second;
+
+ VTTBuilder Builder(CGM.getContext(), RD, /*GenerateDefinition=*/false);
+
+ for (llvm::DenseMap<BaseSubobject, uint64_t>::const_iterator I =
+ Builder.getSubVTTIndicies().begin(),
+ E = Builder.getSubVTTIndicies().end(); I != E; ++I) {
+ // Insert all indices.
+ BaseSubobjectPairTy ClassSubobjectPair(RD, I->first);
+
+ SubVTTIndicies.insert(std::make_pair(ClassSubobjectPair, I->second));
+ }
+
+ I = SubVTTIndicies.find(ClassSubobjectPair);
+ assert(I != SubVTTIndicies.end() && "Did not find index!");
+
+ return I->second;
+}
+
+uint64_t
+CodeGenVTables::getSecondaryVirtualPointerIndex(const CXXRecordDecl *RD,
+ BaseSubobject Base) {
+ SecondaryVirtualPointerIndicesMapTy::iterator I =
+ SecondaryVirtualPointerIndices.find(std::make_pair(RD, Base));
+
+ if (I != SecondaryVirtualPointerIndices.end())
+ return I->second;
+
+ VTTBuilder Builder(CGM.getContext(), RD, /*GenerateDefinition=*/false);
+
+ // Insert all secondary vpointer indices.
+ for (llvm::DenseMap<BaseSubobject, uint64_t>::const_iterator I =
+ Builder.getSecondaryVirtualPointerIndices().begin(),
+ E = Builder.getSecondaryVirtualPointerIndices().end(); I != E; ++I) {
+ std::pair<const CXXRecordDecl *, BaseSubobject> Pair =
+ std::make_pair(RD, I->first);
+
+ SecondaryVirtualPointerIndices.insert(std::make_pair(Pair, I->second));
+ }
+
+ I = SecondaryVirtualPointerIndices.find(std::make_pair(RD, Base));
+ assert(I != SecondaryVirtualPointerIndices.end() && "Did not find index!");
+
+ return I->second;
+}
+
diff --git a/clang/lib/CodeGen/CGVTables.cpp b/clang/lib/CodeGen/CGVTables.cpp
new file mode 100644
index 0000000..17a0537
--- /dev/null
+++ b/clang/lib/CodeGen/CGVTables.cpp
@@ -0,0 +1,733 @@
+//===--- CGVTables.cpp - Emit LLVM Code for C++ vtables -------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code dealing with C++ code generation of virtual tables.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenModule.h"
+#include "CodeGenFunction.h"
+#include "CGCXXABI.h"
+#include "clang/AST/CXXInheritance.h"
+#include "clang/AST/RecordLayout.h"
+#include "clang/Frontend/CodeGenOptions.h"
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/SetVector.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/Format.h"
+#include "llvm/Transforms/Utils/Cloning.h"
+#include <algorithm>
+#include <cstdio>
+
+using namespace clang;
+using namespace CodeGen;
+
+CodeGenVTables::CodeGenVTables(CodeGenModule &CGM)
+ : CGM(CGM), VTContext(CGM.getContext()) { }
+
+bool CodeGenVTables::ShouldEmitVTableInThisTU(const CXXRecordDecl *RD) {
+ assert(RD->isDynamicClass() && "Non dynamic classes have no VTable.");
+
+ TemplateSpecializationKind TSK = RD->getTemplateSpecializationKind();
+ if (TSK == TSK_ExplicitInstantiationDeclaration)
+ return false;
+
+ const CXXMethodDecl *KeyFunction = CGM.getContext().getKeyFunction(RD);
+ if (!KeyFunction)
+ return true;
+
+ // Itanium C++ ABI, 5.2.6 Instantiated Templates:
+ // An instantiation of a class template requires:
+ // - In the object where instantiated, the virtual table...
+ if (TSK == TSK_ImplicitInstantiation ||
+ TSK == TSK_ExplicitInstantiationDefinition)
+ return true;
+
+ // If we're building with optimization, we always emit VTables since that
+ // allows for virtual function calls to be devirtualized.
+ // (We don't want to do this in -fapple-kext mode however).
+ if (CGM.getCodeGenOpts().OptimizationLevel && !CGM.getLangOpts().AppleKext)
+ return true;
+
+ return KeyFunction->hasBody();
+}
+
+llvm::Constant *CodeGenModule::GetAddrOfThunk(GlobalDecl GD,
+ const ThunkInfo &Thunk) {
+ const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
+
+ // Compute the mangled name.
+ SmallString<256> Name;
+ llvm::raw_svector_ostream Out(Name);
+ if (const CXXDestructorDecl* DD = dyn_cast<CXXDestructorDecl>(MD))
+ getCXXABI().getMangleContext().mangleCXXDtorThunk(DD, GD.getDtorType(),
+ Thunk.This, Out);
+ else
+ getCXXABI().getMangleContext().mangleThunk(MD, Thunk, Out);
+ Out.flush();
+
+ llvm::Type *Ty = getTypes().GetFunctionTypeForVTable(GD);
+ return GetOrCreateLLVMFunction(Name, Ty, GD, /*ForVTable=*/true);
+}
+
+static llvm::Value *PerformTypeAdjustment(CodeGenFunction &CGF,
+ llvm::Value *Ptr,
+ int64_t NonVirtualAdjustment,
+ int64_t VirtualAdjustment) {
+ if (!NonVirtualAdjustment && !VirtualAdjustment)
+ return Ptr;
+
+ llvm::Type *Int8PtrTy = CGF.Int8PtrTy;
+ llvm::Value *V = CGF.Builder.CreateBitCast(Ptr, Int8PtrTy);
+
+ if (NonVirtualAdjustment) {
+ // Do the non-virtual adjustment.
+ V = CGF.Builder.CreateConstInBoundsGEP1_64(V, NonVirtualAdjustment);
+ }
+
+ if (VirtualAdjustment) {
+ llvm::Type *PtrDiffTy =
+ CGF.ConvertType(CGF.getContext().getPointerDiffType());
+
+ // Do the virtual adjustment.
+ llvm::Value *VTablePtrPtr =
+ CGF.Builder.CreateBitCast(V, Int8PtrTy->getPointerTo());
+
+ llvm::Value *VTablePtr = CGF.Builder.CreateLoad(VTablePtrPtr);
+
+ llvm::Value *OffsetPtr =
+ CGF.Builder.CreateConstInBoundsGEP1_64(VTablePtr, VirtualAdjustment);
+
+ OffsetPtr = CGF.Builder.CreateBitCast(OffsetPtr, PtrDiffTy->getPointerTo());
+
+ // Load the adjustment offset from the vtable.
+ llvm::Value *Offset = CGF.Builder.CreateLoad(OffsetPtr);
+
+ // Adjust our pointer.
+ V = CGF.Builder.CreateInBoundsGEP(V, Offset);
+ }
+
+ // Cast back to the original type.
+ return CGF.Builder.CreateBitCast(V, Ptr->getType());
+}
+
+static void setThunkVisibility(CodeGenModule &CGM, const CXXMethodDecl *MD,
+ const ThunkInfo &Thunk, llvm::Function *Fn) {
+ CGM.setGlobalVisibility(Fn, MD);
+
+ if (!CGM.getCodeGenOpts().HiddenWeakVTables)
+ return;
+
+ // If the thunk has weak/linkonce linkage, but the function must be
+ // emitted in every translation unit that references it, then we can
+ // emit its thunks with hidden visibility, since its thunks must be
+ // emitted when the function is.
+
+ // This follows CodeGenModule::setTypeVisibility; see the comments
+ // there for explanation.
+
+ if ((Fn->getLinkage() != llvm::GlobalVariable::LinkOnceODRLinkage &&
+ Fn->getLinkage() != llvm::GlobalVariable::WeakODRLinkage) ||
+ Fn->getVisibility() != llvm::GlobalVariable::DefaultVisibility)
+ return;
+
+ if (MD->getExplicitVisibility())
+ return;
+
+ switch (MD->getTemplateSpecializationKind()) {
+ case TSK_ExplicitInstantiationDefinition:
+ case TSK_ExplicitInstantiationDeclaration:
+ return;
+
+ case TSK_Undeclared:
+ break;
+
+ case TSK_ExplicitSpecialization:
+ case TSK_ImplicitInstantiation:
+ if (!CGM.getCodeGenOpts().HiddenWeakTemplateVTables)
+ return;
+ break;
+ }
+
+ // If there's an explicit definition, and that definition is
+ // out-of-line, then we can't assume that all users will have a
+ // definition to emit.
+ const FunctionDecl *Def = 0;
+ if (MD->hasBody(Def) && Def->isOutOfLine())
+ return;
+
+ Fn->setVisibility(llvm::GlobalValue::HiddenVisibility);
+}
+
+#ifndef NDEBUG
+static bool similar(const ABIArgInfo &infoL, CanQualType typeL,
+ const ABIArgInfo &infoR, CanQualType typeR) {
+ return (infoL.getKind() == infoR.getKind() &&
+ (typeL == typeR ||
+ (isa<PointerType>(typeL) && isa<PointerType>(typeR)) ||
+ (isa<ReferenceType>(typeL) && isa<ReferenceType>(typeR))));
+}
+#endif
+
+static RValue PerformReturnAdjustment(CodeGenFunction &CGF,
+ QualType ResultType, RValue RV,
+ const ThunkInfo &Thunk) {
+ // Emit the return adjustment.
+ bool NullCheckValue = !ResultType->isReferenceType();
+
+ llvm::BasicBlock *AdjustNull = 0;
+ llvm::BasicBlock *AdjustNotNull = 0;
+ llvm::BasicBlock *AdjustEnd = 0;
+
+ llvm::Value *ReturnValue = RV.getScalarVal();
+
+ if (NullCheckValue) {
+ AdjustNull = CGF.createBasicBlock("adjust.null");
+ AdjustNotNull = CGF.createBasicBlock("adjust.notnull");
+ AdjustEnd = CGF.createBasicBlock("adjust.end");
+
+ llvm::Value *IsNull = CGF.Builder.CreateIsNull(ReturnValue);
+ CGF.Builder.CreateCondBr(IsNull, AdjustNull, AdjustNotNull);
+ CGF.EmitBlock(AdjustNotNull);
+ }
+
+ ReturnValue = PerformTypeAdjustment(CGF, ReturnValue,
+ Thunk.Return.NonVirtual,
+ Thunk.Return.VBaseOffsetOffset);
+
+ if (NullCheckValue) {
+ CGF.Builder.CreateBr(AdjustEnd);
+ CGF.EmitBlock(AdjustNull);
+ CGF.Builder.CreateBr(AdjustEnd);
+ CGF.EmitBlock(AdjustEnd);
+
+ llvm::PHINode *PHI = CGF.Builder.CreatePHI(ReturnValue->getType(), 2);
+ PHI->addIncoming(ReturnValue, AdjustNotNull);
+ PHI->addIncoming(llvm::Constant::getNullValue(ReturnValue->getType()),
+ AdjustNull);
+ ReturnValue = PHI;
+ }
+
+ return RValue::get(ReturnValue);
+}
+
+// This function does roughly the same thing as GenerateThunk, but in a
+// very different way, so that va_start and va_end work correctly.
+// FIXME: This function assumes "this" is the first non-sret LLVM argument of
+// a function, and that there is an alloca built in the entry block
+// for all accesses to "this".
+// FIXME: This function assumes there is only one "ret" statement per function.
+// FIXME: Cloning isn't correct in the presence of indirect goto!
+// FIXME: This implementation of thunks bloats codesize by duplicating the
+// function definition. There are alternatives:
+// 1. Add some sort of stub support to LLVM for cases where we can
+// do a this adjustment, then a sibcall.
+// 2. We could transform the definition to take a va_list instead of an
+// actual variable argument list, then have the thunks (including a
+// no-op thunk for the regular definition) call va_start/va_end.
+// There's a bit of per-call overhead for this solution, but it's
+// better for codesize if the definition is long.
+void CodeGenFunction::GenerateVarArgsThunk(
+ llvm::Function *Fn,
+ const CGFunctionInfo &FnInfo,
+ GlobalDecl GD, const ThunkInfo &Thunk) {
+ const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
+ const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
+ QualType ResultType = FPT->getResultType();
+
+ // Get the original function
+ assert(FnInfo.isVariadic());
+ llvm::Type *Ty = CGM.getTypes().GetFunctionType(FnInfo);
+ llvm::Value *Callee = CGM.GetAddrOfFunction(GD, Ty, /*ForVTable=*/true);
+ llvm::Function *BaseFn = cast<llvm::Function>(Callee);
+
+ // Clone to thunk.
+ llvm::Function *NewFn = llvm::CloneFunction(BaseFn);
+ CGM.getModule().getFunctionList().push_back(NewFn);
+ Fn->replaceAllUsesWith(NewFn);
+ NewFn->takeName(Fn);
+ Fn->eraseFromParent();
+ Fn = NewFn;
+
+ // "Initialize" CGF (minimally).
+ CurFn = Fn;
+
+ // Get the "this" value
+ llvm::Function::arg_iterator AI = Fn->arg_begin();
+ if (CGM.ReturnTypeUsesSRet(FnInfo))
+ ++AI;
+
+ // Find the first store of "this", which will be to the alloca associated
+ // with "this".
+ llvm::Value *ThisPtr = &*AI;
+ llvm::BasicBlock *EntryBB = Fn->begin();
+ llvm::Instruction *ThisStore = 0;
+ for (llvm::BasicBlock::iterator I = EntryBB->begin(), E = EntryBB->end();
+ I != E; I++) {
+ if (isa<llvm::StoreInst>(I) && I->getOperand(0) == ThisPtr) {
+ ThisStore = cast<llvm::StoreInst>(I);
+ break;
+ }
+ }
+ assert(ThisStore && "Store of this should be in entry block?");
+ // Adjust "this", if necessary.
+ Builder.SetInsertPoint(ThisStore);
+ llvm::Value *AdjustedThisPtr =
+ PerformTypeAdjustment(*this, ThisPtr,
+ Thunk.This.NonVirtual,
+ Thunk.This.VCallOffsetOffset);
+ ThisStore->setOperand(0, AdjustedThisPtr);
+
+ if (!Thunk.Return.isEmpty()) {
+ // Fix up the returned value, if necessary.
+ for (llvm::Function::iterator I = Fn->begin(), E = Fn->end(); I != E; I++) {
+ llvm::Instruction *T = I->getTerminator();
+ if (isa<llvm::ReturnInst>(T)) {
+ RValue RV = RValue::get(T->getOperand(0));
+ T->eraseFromParent();
+ Builder.SetInsertPoint(&*I);
+ RV = PerformReturnAdjustment(*this, ResultType, RV, Thunk);
+ Builder.CreateRet(RV.getScalarVal());
+ break;
+ }
+ }
+ }
+}
+
+void CodeGenFunction::GenerateThunk(llvm::Function *Fn,
+ const CGFunctionInfo &FnInfo,
+ GlobalDecl GD, const ThunkInfo &Thunk) {
+ const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
+ const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
+ QualType ResultType = FPT->getResultType();
+ QualType ThisType = MD->getThisType(getContext());
+
+ FunctionArgList FunctionArgs;
+
+ // FIXME: It would be nice if more of this code could be shared with
+ // CodeGenFunction::GenerateCode.
+
+ // Create the implicit 'this' parameter declaration.
+ CurGD = GD;
+ CGM.getCXXABI().BuildInstanceFunctionParams(*this, ResultType, FunctionArgs);
+
+ // Add the rest of the parameters.
+ for (FunctionDecl::param_const_iterator I = MD->param_begin(),
+ E = MD->param_end(); I != E; ++I) {
+ ParmVarDecl *Param = *I;
+
+ FunctionArgs.push_back(Param);
+ }
+
+ StartFunction(GlobalDecl(), ResultType, Fn, FnInfo, FunctionArgs,
+ SourceLocation());
+
+ CGM.getCXXABI().EmitInstanceFunctionProlog(*this);
+ CXXThisValue = CXXABIThisValue;
+
+ // Adjust the 'this' pointer if necessary.
+ llvm::Value *AdjustedThisPtr =
+ PerformTypeAdjustment(*this, LoadCXXThis(),
+ Thunk.This.NonVirtual,
+ Thunk.This.VCallOffsetOffset);
+
+ CallArgList CallArgs;
+
+ // Add our adjusted 'this' pointer.
+ CallArgs.add(RValue::get(AdjustedThisPtr), ThisType);
+
+ // Add the rest of the parameters.
+ for (FunctionDecl::param_const_iterator I = MD->param_begin(),
+ E = MD->param_end(); I != E; ++I) {
+ ParmVarDecl *param = *I;
+ EmitDelegateCallArg(CallArgs, param);
+ }
+
+ // Get our callee.
+ llvm::Type *Ty =
+ CGM.getTypes().GetFunctionType(CGM.getTypes().arrangeGlobalDeclaration(GD));
+ llvm::Value *Callee = CGM.GetAddrOfFunction(GD, Ty, /*ForVTable=*/true);
+
+#ifndef NDEBUG
+ const CGFunctionInfo &CallFnInfo =
+ CGM.getTypes().arrangeFunctionCall(ResultType, CallArgs, FPT->getExtInfo(),
+ RequiredArgs::forPrototypePlus(FPT, 1));
+ assert(CallFnInfo.getRegParm() == FnInfo.getRegParm() &&
+ CallFnInfo.isNoReturn() == FnInfo.isNoReturn() &&
+ CallFnInfo.getCallingConvention() == FnInfo.getCallingConvention());
+ assert(similar(CallFnInfo.getReturnInfo(), CallFnInfo.getReturnType(),
+ FnInfo.getReturnInfo(), FnInfo.getReturnType()));
+ assert(CallFnInfo.arg_size() == FnInfo.arg_size());
+ for (unsigned i = 0, e = FnInfo.arg_size(); i != e; ++i)
+ assert(similar(CallFnInfo.arg_begin()[i].info,
+ CallFnInfo.arg_begin()[i].type,
+ FnInfo.arg_begin()[i].info, FnInfo.arg_begin()[i].type));
+#endif
+
+ // Determine whether we have a return value slot to use.
+ ReturnValueSlot Slot;
+ if (!ResultType->isVoidType() &&
+ FnInfo.getReturnInfo().getKind() == ABIArgInfo::Indirect &&
+ hasAggregateLLVMType(CurFnInfo->getReturnType()))
+ Slot = ReturnValueSlot(ReturnValue, ResultType.isVolatileQualified());
+
+ // Now emit our call.
+ RValue RV = EmitCall(FnInfo, Callee, Slot, CallArgs, MD);
+
+ if (!Thunk.Return.isEmpty())
+ RV = PerformReturnAdjustment(*this, ResultType, RV, Thunk);
+
+ if (!ResultType->isVoidType() && Slot.isNull())
+ CGM.getCXXABI().EmitReturnFromThunk(*this, RV, ResultType);
+
+ FinishFunction();
+
+ // Set the right linkage.
+ CGM.setFunctionLinkage(MD, Fn);
+
+ // Set the right visibility.
+ setThunkVisibility(CGM, MD, Thunk, Fn);
+}
+
+void CodeGenVTables::EmitThunk(GlobalDecl GD, const ThunkInfo &Thunk,
+ bool UseAvailableExternallyLinkage)
+{
+ const CGFunctionInfo &FnInfo = CGM.getTypes().arrangeGlobalDeclaration(GD);
+
+ // FIXME: re-use FnInfo in this computation.
+ llvm::Constant *Entry = CGM.GetAddrOfThunk(GD, Thunk);
+
+ // Strip off a bitcast if we got one back.
+ if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Entry)) {
+ assert(CE->getOpcode() == llvm::Instruction::BitCast);
+ Entry = CE->getOperand(0);
+ }
+
+ // There's already a declaration with the same name, check if it has the same
+ // type or if we need to replace it.
+ if (cast<llvm::GlobalValue>(Entry)->getType()->getElementType() !=
+ CGM.getTypes().GetFunctionTypeForVTable(GD)) {
+ llvm::GlobalValue *OldThunkFn = cast<llvm::GlobalValue>(Entry);
+
+ // If the types mismatch then we have to rewrite the definition.
+ assert(OldThunkFn->isDeclaration() &&
+ "Shouldn't replace non-declaration");
+
+ // Remove the name from the old thunk function and get a new thunk.
+ OldThunkFn->setName(StringRef());
+ Entry = CGM.GetAddrOfThunk(GD, Thunk);
+
+ // If needed, replace the old thunk with a bitcast.
+ if (!OldThunkFn->use_empty()) {
+ llvm::Constant *NewPtrForOldDecl =
+ llvm::ConstantExpr::getBitCast(Entry, OldThunkFn->getType());
+ OldThunkFn->replaceAllUsesWith(NewPtrForOldDecl);
+ }
+
+ // Remove the old thunk.
+ OldThunkFn->eraseFromParent();
+ }
+
+ llvm::Function *ThunkFn = cast<llvm::Function>(Entry);
+
+ if (!ThunkFn->isDeclaration()) {
+ if (UseAvailableExternallyLinkage) {
+ // There is already a thunk emitted for this function, do nothing.
+ return;
+ }
+
+ // If a function has a body, it should have available_externally linkage.
+ assert(ThunkFn->hasAvailableExternallyLinkage() &&
+ "Function should have available_externally linkage!");
+
+ // Change the linkage.
+ CGM.setFunctionLinkage(cast<CXXMethodDecl>(GD.getDecl()), ThunkFn);
+ return;
+ }
+
+ if (ThunkFn->isVarArg()) {
+ // Varargs thunks are special; we can't just generate a call because
+ // we can't copy the varargs. Our implementation is rather
+ // expensive/sucky at the moment, so don't generate the thunk unless
+ // we have to.
+ // FIXME: Do something better here; GenerateVarArgsThunk is extremely ugly.
+ if (!UseAvailableExternallyLinkage)
+ CodeGenFunction(CGM).GenerateVarArgsThunk(ThunkFn, FnInfo, GD, Thunk);
+ } else {
+ // Normal thunk body generation.
+ CodeGenFunction(CGM).GenerateThunk(ThunkFn, FnInfo, GD, Thunk);
+ }
+
+ if (UseAvailableExternallyLinkage)
+ ThunkFn->setLinkage(llvm::GlobalValue::AvailableExternallyLinkage);
+}
+
+void CodeGenVTables::MaybeEmitThunkAvailableExternally(GlobalDecl GD,
+ const ThunkInfo &Thunk) {
+ // We only want to do this when building with optimizations.
+ if (!CGM.getCodeGenOpts().OptimizationLevel)
+ return;
+
+ // We can't emit thunks for member functions with incomplete types.
+ const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
+ if (!CGM.getTypes().isFuncTypeConvertible(
+ cast<FunctionType>(MD->getType().getTypePtr())))
+ return;
+
+ EmitThunk(GD, Thunk, /*UseAvailableExternallyLinkage=*/true);
+}
+
+void CodeGenVTables::EmitThunks(GlobalDecl GD)
+{
+ const CXXMethodDecl *MD =
+ cast<CXXMethodDecl>(GD.getDecl())->getCanonicalDecl();
+
+ // We don't need to generate thunks for the base destructor.
+ if (isa<CXXDestructorDecl>(MD) && GD.getDtorType() == Dtor_Base)
+ return;
+
+ const VTableContext::ThunkInfoVectorTy *ThunkInfoVector =
+ VTContext.getThunkInfo(MD);
+ if (!ThunkInfoVector)
+ return;
+
+ for (unsigned I = 0, E = ThunkInfoVector->size(); I != E; ++I)
+ EmitThunk(GD, (*ThunkInfoVector)[I],
+ /*UseAvailableExternallyLinkage=*/false);
+}
+
+llvm::Constant *
+CodeGenVTables::CreateVTableInitializer(const CXXRecordDecl *RD,
+ const VTableComponent *Components,
+ unsigned NumComponents,
+ const VTableLayout::VTableThunkTy *VTableThunks,
+ unsigned NumVTableThunks) {
+ SmallVector<llvm::Constant *, 64> Inits;
+
+ llvm::Type *Int8PtrTy = CGM.Int8PtrTy;
+
+ llvm::Type *PtrDiffTy =
+ CGM.getTypes().ConvertType(CGM.getContext().getPointerDiffType());
+
+ QualType ClassType = CGM.getContext().getTagDeclType(RD);
+ llvm::Constant *RTTI = CGM.GetAddrOfRTTIDescriptor(ClassType);
+
+ unsigned NextVTableThunkIndex = 0;
+
+ llvm::Constant* PureVirtualFn = 0;
+
+ for (unsigned I = 0; I != NumComponents; ++I) {
+ VTableComponent Component = Components[I];
+
+ llvm::Constant *Init = 0;
+
+ switch (Component.getKind()) {
+ case VTableComponent::CK_VCallOffset:
+ Init = llvm::ConstantInt::get(PtrDiffTy,
+ Component.getVCallOffset().getQuantity());
+ Init = llvm::ConstantExpr::getIntToPtr(Init, Int8PtrTy);
+ break;
+ case VTableComponent::CK_VBaseOffset:
+ Init = llvm::ConstantInt::get(PtrDiffTy,
+ Component.getVBaseOffset().getQuantity());
+ Init = llvm::ConstantExpr::getIntToPtr(Init, Int8PtrTy);
+ break;
+ case VTableComponent::CK_OffsetToTop:
+ Init = llvm::ConstantInt::get(PtrDiffTy,
+ Component.getOffsetToTop().getQuantity());
+ Init = llvm::ConstantExpr::getIntToPtr(Init, Int8PtrTy);
+ break;
+ case VTableComponent::CK_RTTI:
+ Init = llvm::ConstantExpr::getBitCast(RTTI, Int8PtrTy);
+ break;
+ case VTableComponent::CK_FunctionPointer:
+ case VTableComponent::CK_CompleteDtorPointer:
+ case VTableComponent::CK_DeletingDtorPointer: {
+ GlobalDecl GD;
+
+ // Get the right global decl.
+ switch (Component.getKind()) {
+ default:
+ llvm_unreachable("Unexpected vtable component kind");
+ case VTableComponent::CK_FunctionPointer:
+ GD = Component.getFunctionDecl();
+ break;
+ case VTableComponent::CK_CompleteDtorPointer:
+ GD = GlobalDecl(Component.getDestructorDecl(), Dtor_Complete);
+ break;
+ case VTableComponent::CK_DeletingDtorPointer:
+ GD = GlobalDecl(Component.getDestructorDecl(), Dtor_Deleting);
+ break;
+ }
+
+ if (cast<CXXMethodDecl>(GD.getDecl())->isPure()) {
+ // We have a pure virtual member function.
+ if (!PureVirtualFn) {
+ llvm::FunctionType *Ty =
+ llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
+ PureVirtualFn =
+ CGM.CreateRuntimeFunction(Ty, "__cxa_pure_virtual");
+ PureVirtualFn = llvm::ConstantExpr::getBitCast(PureVirtualFn,
+ Int8PtrTy);
+ }
+
+ Init = PureVirtualFn;
+ } else {
+ // Check if we should use a thunk.
+ if (NextVTableThunkIndex < NumVTableThunks &&
+ VTableThunks[NextVTableThunkIndex].first == I) {
+ const ThunkInfo &Thunk = VTableThunks[NextVTableThunkIndex].second;
+
+ MaybeEmitThunkAvailableExternally(GD, Thunk);
+ Init = CGM.GetAddrOfThunk(GD, Thunk);
+
+ NextVTableThunkIndex++;
+ } else {
+ llvm::Type *Ty = CGM.getTypes().GetFunctionTypeForVTable(GD);
+
+ Init = CGM.GetAddrOfFunction(GD, Ty, /*ForVTable=*/true);
+ }
+
+ Init = llvm::ConstantExpr::getBitCast(Init, Int8PtrTy);
+ }
+ break;
+ }
+
+ case VTableComponent::CK_UnusedFunctionPointer:
+ Init = llvm::ConstantExpr::getNullValue(Int8PtrTy);
+ break;
+ };
+
+ Inits.push_back(Init);
+ }
+
+ llvm::ArrayType *ArrayType = llvm::ArrayType::get(Int8PtrTy, NumComponents);
+ return llvm::ConstantArray::get(ArrayType, Inits);
+}
+
+llvm::GlobalVariable *CodeGenVTables::GetAddrOfVTable(const CXXRecordDecl *RD) {
+ llvm::GlobalVariable *&VTable = VTables[RD];
+ if (VTable)
+ return VTable;
+
+ // We may need to generate a definition for this vtable.
+ if (ShouldEmitVTableInThisTU(RD))
+ CGM.DeferredVTables.push_back(RD);
+
+ SmallString<256> OutName;
+ llvm::raw_svector_ostream Out(OutName);
+ CGM.getCXXABI().getMangleContext().mangleCXXVTable(RD, Out);
+ Out.flush();
+ StringRef Name = OutName.str();
+
+ llvm::ArrayType *ArrayType =
+ llvm::ArrayType::get(CGM.Int8PtrTy,
+ VTContext.getVTableLayout(RD).getNumVTableComponents());
+
+ VTable =
+ CGM.CreateOrReplaceCXXRuntimeVariable(Name, ArrayType,
+ llvm::GlobalValue::ExternalLinkage);
+ VTable->setUnnamedAddr(true);
+ return VTable;
+}
+
+void
+CodeGenVTables::EmitVTableDefinition(llvm::GlobalVariable *VTable,
+ llvm::GlobalVariable::LinkageTypes Linkage,
+ const CXXRecordDecl *RD) {
+ const VTableLayout &VTLayout = VTContext.getVTableLayout(RD);
+
+ // Create and set the initializer.
+ llvm::Constant *Init =
+ CreateVTableInitializer(RD,
+ VTLayout.vtable_component_begin(),
+ VTLayout.getNumVTableComponents(),
+ VTLayout.vtable_thunk_begin(),
+ VTLayout.getNumVTableThunks());
+ VTable->setInitializer(Init);
+
+ // Set the correct linkage.
+ VTable->setLinkage(Linkage);
+
+ // Set the right visibility.
+ CGM.setTypeVisibility(VTable, RD, CodeGenModule::TVK_ForVTable);
+}
+
+llvm::GlobalVariable *
+CodeGenVTables::GenerateConstructionVTable(const CXXRecordDecl *RD,
+ const BaseSubobject &Base,
+ bool BaseIsVirtual,
+ llvm::GlobalVariable::LinkageTypes Linkage,
+ VTableAddressPointsMapTy& AddressPoints) {
+ OwningPtr<VTableLayout> VTLayout(
+ VTContext.createConstructionVTableLayout(Base.getBase(),
+ Base.getBaseOffset(),
+ BaseIsVirtual, RD));
+
+ // Add the address points.
+ AddressPoints = VTLayout->getAddressPoints();
+
+ // Get the mangled construction vtable name.
+ SmallString<256> OutName;
+ llvm::raw_svector_ostream Out(OutName);
+ CGM.getCXXABI().getMangleContext().
+ mangleCXXCtorVTable(RD, Base.getBaseOffset().getQuantity(), Base.getBase(),
+ Out);
+ Out.flush();
+ StringRef Name = OutName.str();
+
+ llvm::ArrayType *ArrayType =
+ llvm::ArrayType::get(CGM.Int8PtrTy, VTLayout->getNumVTableComponents());
+
+ // Create the variable that will hold the construction vtable.
+ llvm::GlobalVariable *VTable =
+ CGM.CreateOrReplaceCXXRuntimeVariable(Name, ArrayType, Linkage);
+ CGM.setTypeVisibility(VTable, RD, CodeGenModule::TVK_ForConstructionVTable);
+
+ // V-tables are always unnamed_addr.
+ VTable->setUnnamedAddr(true);
+
+ // Create and set the initializer.
+ llvm::Constant *Init =
+ CreateVTableInitializer(Base.getBase(),
+ VTLayout->vtable_component_begin(),
+ VTLayout->getNumVTableComponents(),
+ VTLayout->vtable_thunk_begin(),
+ VTLayout->getNumVTableThunks());
+ VTable->setInitializer(Init);
+
+ return VTable;
+}
+
+void
+CodeGenVTables::GenerateClassData(llvm::GlobalVariable::LinkageTypes Linkage,
+ const CXXRecordDecl *RD) {
+ llvm::GlobalVariable *VTable = GetAddrOfVTable(RD);
+ if (VTable->hasInitializer())
+ return;
+
+ EmitVTableDefinition(VTable, Linkage, RD);
+
+ if (RD->getNumVBases()) {
+ llvm::GlobalVariable *VTT = GetAddrOfVTT(RD);
+ EmitVTTDefinition(VTT, Linkage, RD);
+ }
+
+ // If this is the magic class __cxxabiv1::__fundamental_type_info,
+ // we will emit the typeinfo for the fundamental types. This is the
+ // same behaviour as GCC.
+ const DeclContext *DC = RD->getDeclContext();
+ if (RD->getIdentifier() &&
+ RD->getIdentifier()->isStr("__fundamental_type_info") &&
+ isa<NamespaceDecl>(DC) &&
+ cast<NamespaceDecl>(DC)->getIdentifier() &&
+ cast<NamespaceDecl>(DC)->getIdentifier()->isStr("__cxxabiv1") &&
+ DC->getParent()->isTranslationUnit())
+ CGM.EmitFundamentalRTTIDescriptors();
+}
diff --git a/clang/lib/CodeGen/CGVTables.h b/clang/lib/CodeGen/CGVTables.h
new file mode 100644
index 0000000..828330e
--- /dev/null
+++ b/clang/lib/CodeGen/CGVTables.h
@@ -0,0 +1,141 @@
+//===--- CGVTables.h - Emit LLVM Code for C++ vtables -----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code dealing with C++ code generation of virtual tables.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_CODEGEN_CGVTABLE_H
+#define CLANG_CODEGEN_CGVTABLE_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/GlobalVariable.h"
+#include "clang/Basic/ABI.h"
+#include "clang/AST/BaseSubobject.h"
+#include "clang/AST/CharUnits.h"
+#include "clang/AST/GlobalDecl.h"
+#include "clang/AST/VTableBuilder.h"
+
+namespace clang {
+ class CXXRecordDecl;
+
+namespace CodeGen {
+ class CodeGenModule;
+
+class CodeGenVTables {
+ CodeGenModule &CGM;
+
+ VTableContext VTContext;
+
+ /// VTables - All the vtables which have been defined.
+ llvm::DenseMap<const CXXRecordDecl *, llvm::GlobalVariable *> VTables;
+
+ /// VTableAddressPointsMapTy - Address points for a single vtable.
+ typedef llvm::DenseMap<BaseSubobject, uint64_t> VTableAddressPointsMapTy;
+
+ typedef std::pair<const CXXRecordDecl *, BaseSubobject> BaseSubobjectPairTy;
+ typedef llvm::DenseMap<BaseSubobjectPairTy, uint64_t> SubVTTIndiciesMapTy;
+
+ /// SubVTTIndicies - Contains indices into the various sub-VTTs.
+ SubVTTIndiciesMapTy SubVTTIndicies;
+
+ typedef llvm::DenseMap<BaseSubobjectPairTy, uint64_t>
+ SecondaryVirtualPointerIndicesMapTy;
+
+ /// SecondaryVirtualPointerIndices - Contains the secondary virtual pointer
+ /// indices.
+ SecondaryVirtualPointerIndicesMapTy SecondaryVirtualPointerIndices;
+
+ /// EmitThunk - Emit a single thunk.
+ void EmitThunk(GlobalDecl GD, const ThunkInfo &Thunk,
+ bool UseAvailableExternallyLinkage);
+
+ /// MaybeEmitThunkAvailableExternally - Try to emit the given thunk with
+ /// available_externally linkage to allow for inlining of thunks.
+ /// This will be done iff optimizations are enabled and the member function
+ /// doesn't contain any incomplete types.
+ void MaybeEmitThunkAvailableExternally(GlobalDecl GD, const ThunkInfo &Thunk);
+
+ /// CreateVTableInitializer - Create a vtable initializer for the given record
+ /// decl.
+ /// \param Components - The vtable components; this is really an array of
+ /// VTableComponents.
+ llvm::Constant *CreateVTableInitializer(const CXXRecordDecl *RD,
+ const VTableComponent *Components,
+ unsigned NumComponents,
+ const VTableLayout::VTableThunkTy *VTableThunks,
+ unsigned NumVTableThunks);
+
+public:
+ CodeGenVTables(CodeGenModule &CGM);
+
+ VTableContext &getVTableContext() { return VTContext; }
+
+ /// \brief True if the VTable of this record must be emitted in the
+ /// translation unit.
+ bool ShouldEmitVTableInThisTU(const CXXRecordDecl *RD);
+
+ /// needsVTTParameter - Return whether the given global decl needs a VTT
+ /// parameter, which it does if it's a base constructor or destructor with
+ /// virtual bases.
+ static bool needsVTTParameter(GlobalDecl GD);
+
+ /// getSubVTTIndex - Return the index of the sub-VTT for the base class of the
+ /// given record decl.
+ uint64_t getSubVTTIndex(const CXXRecordDecl *RD, BaseSubobject Base);
+
+ /// getSecondaryVirtualPointerIndex - Return the index in the VTT where the
+ /// virtual pointer for the given subobject is located.
+ uint64_t getSecondaryVirtualPointerIndex(const CXXRecordDecl *RD,
+ BaseSubobject Base);
+
+ /// getAddressPoint - Get the address point of the given subobject in the
+ /// class decl.
+ uint64_t getAddressPoint(BaseSubobject Base, const CXXRecordDecl *RD);
+
+ /// GetAddrOfVTable - Get the address of the vtable for the given record decl.
+ llvm::GlobalVariable *GetAddrOfVTable(const CXXRecordDecl *RD);
+
+ /// EmitVTableDefinition - Emit the definition of the given vtable.
+ void EmitVTableDefinition(llvm::GlobalVariable *VTable,
+ llvm::GlobalVariable::LinkageTypes Linkage,
+ const CXXRecordDecl *RD);
+
+ /// GenerateConstructionVTable - Generate a construction vtable for the given
+ /// base subobject.
+ llvm::GlobalVariable *
+ GenerateConstructionVTable(const CXXRecordDecl *RD, const BaseSubobject &Base,
+ bool BaseIsVirtual,
+ llvm::GlobalVariable::LinkageTypes Linkage,
+ VTableAddressPointsMapTy& AddressPoints);
+
+
+ /// GetAddrOfVTable - Get the address of the VTT for the given record decl.
+ llvm::GlobalVariable *GetAddrOfVTT(const CXXRecordDecl *RD);
+
+ /// EmitVTTDefinition - Emit the definition of the given vtable.
+ void EmitVTTDefinition(llvm::GlobalVariable *VTT,
+ llvm::GlobalVariable::LinkageTypes Linkage,
+ const CXXRecordDecl *RD);
+
+ /// EmitThunks - Emit the associated thunks for the given global decl.
+ void EmitThunks(GlobalDecl GD);
+
+ /// GenerateClassData - Generate all the class data required to be generated
+ /// upon definition of a KeyFunction. This includes the vtable, the
+ /// rtti data structure and the VTT.
+ ///
+ /// \param Linkage - The desired linkage of the vtable, the RTTI and the VTT.
+ void GenerateClassData(llvm::GlobalVariable::LinkageTypes Linkage,
+ const CXXRecordDecl *RD);
+};
+
+} // end namespace CodeGen
+} // end namespace clang
+#endif
diff --git a/clang/lib/CodeGen/CGValue.h b/clang/lib/CodeGen/CGValue.h
new file mode 100644
index 0000000..ac704e7
--- /dev/null
+++ b/clang/lib/CodeGen/CGValue.h
@@ -0,0 +1,451 @@
+//===-- CGValue.h - LLVM CodeGen wrappers for llvm::Value* ------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// These classes implement wrappers around llvm::Value in order to
+// fully represent the range of values for C L- and R- values.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_CODEGEN_CGVALUE_H
+#define CLANG_CODEGEN_CGVALUE_H
+
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/CharUnits.h"
+#include "clang/AST/Type.h"
+
+namespace llvm {
+ class Constant;
+ class Value;
+}
+
+namespace clang {
+namespace CodeGen {
+ class AggValueSlot;
+ class CGBitFieldInfo;
+
+/// RValue - This trivial value class is used to represent the result of an
+/// expression that is evaluated. It can be one of three things: either a
+/// simple LLVM SSA value, a pair of SSA values for complex numbers, or the
+/// address of an aggregate value in memory.
+class RValue {
+ enum Flavor { Scalar, Complex, Aggregate };
+
+ // Stores first value and flavor.
+ llvm::PointerIntPair<llvm::Value *, 2, Flavor> V1;
+ // Stores second value and volatility.
+ llvm::PointerIntPair<llvm::Value *, 1, bool> V2;
+
+public:
+ bool isScalar() const { return V1.getInt() == Scalar; }
+ bool isComplex() const { return V1.getInt() == Complex; }
+ bool isAggregate() const { return V1.getInt() == Aggregate; }
+
+ bool isVolatileQualified() const { return V2.getInt(); }
+
+ /// getScalarVal() - Return the Value* of this scalar value.
+ llvm::Value *getScalarVal() const {
+ assert(isScalar() && "Not a scalar!");
+ return V1.getPointer();
+ }
+
+ /// getComplexVal - Return the real/imag components of this complex value.
+ ///
+ std::pair<llvm::Value *, llvm::Value *> getComplexVal() const {
+ return std::make_pair(V1.getPointer(), V2.getPointer());
+ }
+
+ /// getAggregateAddr() - Return the Value* of the address of the aggregate.
+ llvm::Value *getAggregateAddr() const {
+ assert(isAggregate() && "Not an aggregate!");
+ return V1.getPointer();
+ }
+
+ static RValue get(llvm::Value *V) {
+ RValue ER;
+ ER.V1.setPointer(V);
+ ER.V1.setInt(Scalar);
+ ER.V2.setInt(false);
+ return ER;
+ }
+ static RValue getComplex(llvm::Value *V1, llvm::Value *V2) {
+ RValue ER;
+ ER.V1.setPointer(V1);
+ ER.V2.setPointer(V2);
+ ER.V1.setInt(Complex);
+ ER.V2.setInt(false);
+ return ER;
+ }
+ static RValue getComplex(const std::pair<llvm::Value *, llvm::Value *> &C) {
+ return getComplex(C.first, C.second);
+ }
+ // FIXME: Aggregate rvalues need to retain information about whether they are
+ // volatile or not. Remove default to find all places that probably get this
+ // wrong.
+ static RValue getAggregate(llvm::Value *V, bool Volatile = false) {
+ RValue ER;
+ ER.V1.setPointer(V);
+ ER.V1.setInt(Aggregate);
+ ER.V2.setInt(Volatile);
+ return ER;
+ }
+};
+
+
+/// LValue - This represents an lvalue references. Because C/C++ allow
+/// bitfields, this is not a simple LLVM pointer, it may be a pointer plus a
+/// bitrange.
+class LValue {
+ enum {
+ Simple, // This is a normal l-value, use getAddress().
+ VectorElt, // This is a vector element l-value (V[i]), use getVector*
+ BitField, // This is a bitfield l-value, use getBitfield*.
+ ExtVectorElt // This is an extended vector subset, use getExtVectorComp
+ } LVType;
+
+ llvm::Value *V;
+
+ union {
+ // Index into a vector subscript: V[i]
+ llvm::Value *VectorIdx;
+
+ // ExtVector element subset: V.xyx
+ llvm::Constant *VectorElts;
+
+ // BitField start bit and size
+ const CGBitFieldInfo *BitFieldInfo;
+ };
+
+ QualType Type;
+
+ // 'const' is unused here
+ Qualifiers Quals;
+
+ // The alignment to use when accessing this lvalue. (For vector elements,
+ // this is the alignment of the whole vector.)
+ unsigned short Alignment;
+
+ // objective-c's ivar
+ bool Ivar:1;
+
+ // objective-c's ivar is an array
+ bool ObjIsArray:1;
+
+ // LValue is non-gc'able for any reason, including being a parameter or local
+ // variable.
+ bool NonGC: 1;
+
+ // Lvalue is a global reference of an objective-c object
+ bool GlobalObjCRef : 1;
+
+ // Lvalue is a thread local reference
+ bool ThreadLocalRef : 1;
+
+ Expr *BaseIvarExp;
+
+ /// TBAAInfo - TBAA information to attach to dereferences of this LValue.
+ llvm::MDNode *TBAAInfo;
+
+private:
+ void Initialize(QualType Type, Qualifiers Quals,
+ CharUnits Alignment = CharUnits(),
+ llvm::MDNode *TBAAInfo = 0) {
+ this->Type = Type;
+ this->Quals = Quals;
+ this->Alignment = Alignment.getQuantity();
+ assert(this->Alignment == Alignment.getQuantity() &&
+ "Alignment exceeds allowed max!");
+
+ // Initialize Objective-C flags.
+ this->Ivar = this->ObjIsArray = this->NonGC = this->GlobalObjCRef = false;
+ this->ThreadLocalRef = false;
+ this->BaseIvarExp = 0;
+ this->TBAAInfo = TBAAInfo;
+ }
+
+public:
+ bool isSimple() const { return LVType == Simple; }
+ bool isVectorElt() const { return LVType == VectorElt; }
+ bool isBitField() const { return LVType == BitField; }
+ bool isExtVectorElt() const { return LVType == ExtVectorElt; }
+
+ bool isVolatileQualified() const { return Quals.hasVolatile(); }
+ bool isRestrictQualified() const { return Quals.hasRestrict(); }
+ unsigned getVRQualifiers() const {
+ return Quals.getCVRQualifiers() & ~Qualifiers::Const;
+ }
+
+ QualType getType() const { return Type; }
+
+ Qualifiers::ObjCLifetime getObjCLifetime() const {
+ return Quals.getObjCLifetime();
+ }
+
+ bool isObjCIvar() const { return Ivar; }
+ void setObjCIvar(bool Value) { Ivar = Value; }
+
+ bool isObjCArray() const { return ObjIsArray; }
+ void setObjCArray(bool Value) { ObjIsArray = Value; }
+
+ bool isNonGC () const { return NonGC; }
+ void setNonGC(bool Value) { NonGC = Value; }
+
+ bool isGlobalObjCRef() const { return GlobalObjCRef; }
+ void setGlobalObjCRef(bool Value) { GlobalObjCRef = Value; }
+
+ bool isThreadLocalRef() const { return ThreadLocalRef; }
+ void setThreadLocalRef(bool Value) { ThreadLocalRef = Value;}
+
+ bool isObjCWeak() const {
+ return Quals.getObjCGCAttr() == Qualifiers::Weak;
+ }
+ bool isObjCStrong() const {
+ return Quals.getObjCGCAttr() == Qualifiers::Strong;
+ }
+
+ bool isVolatile() const {
+ return Quals.hasVolatile();
+ }
+
+ Expr *getBaseIvarExp() const { return BaseIvarExp; }
+ void setBaseIvarExp(Expr *V) { BaseIvarExp = V; }
+
+ llvm::MDNode *getTBAAInfo() const { return TBAAInfo; }
+ void setTBAAInfo(llvm::MDNode *N) { TBAAInfo = N; }
+
+ const Qualifiers &getQuals() const { return Quals; }
+ Qualifiers &getQuals() { return Quals; }
+
+ unsigned getAddressSpace() const { return Quals.getAddressSpace(); }
+
+ CharUnits getAlignment() const { return CharUnits::fromQuantity(Alignment); }
+ void setAlignment(CharUnits A) { Alignment = A.getQuantity(); }
+
+ // simple lvalue
+ llvm::Value *getAddress() const { assert(isSimple()); return V; }
+ void setAddress(llvm::Value *address) {
+ assert(isSimple());
+ V = address;
+ }
+
+ // vector elt lvalue
+ llvm::Value *getVectorAddr() const { assert(isVectorElt()); return V; }
+ llvm::Value *getVectorIdx() const { assert(isVectorElt()); return VectorIdx; }
+
+ // extended vector elements.
+ llvm::Value *getExtVectorAddr() const { assert(isExtVectorElt()); return V; }
+ llvm::Constant *getExtVectorElts() const {
+ assert(isExtVectorElt());
+ return VectorElts;
+ }
+
+ // bitfield lvalue
+ llvm::Value *getBitFieldBaseAddr() const {
+ assert(isBitField());
+ return V;
+ }
+ const CGBitFieldInfo &getBitFieldInfo() const {
+ assert(isBitField());
+ return *BitFieldInfo;
+ }
+
+ static LValue MakeAddr(llvm::Value *address, QualType type,
+ CharUnits alignment, ASTContext &Context,
+ llvm::MDNode *TBAAInfo = 0) {
+ Qualifiers qs = type.getQualifiers();
+ qs.setObjCGCAttr(Context.getObjCGCAttrKind(type));
+
+ LValue R;
+ R.LVType = Simple;
+ R.V = address;
+ R.Initialize(type, qs, alignment, TBAAInfo);
+ return R;
+ }
+
+ static LValue MakeVectorElt(llvm::Value *Vec, llvm::Value *Idx,
+ QualType type, CharUnits Alignment) {
+ LValue R;
+ R.LVType = VectorElt;
+ R.V = Vec;
+ R.VectorIdx = Idx;
+ R.Initialize(type, type.getQualifiers(), Alignment);
+ return R;
+ }
+
+ static LValue MakeExtVectorElt(llvm::Value *Vec, llvm::Constant *Elts,
+ QualType type, CharUnits Alignment) {
+ LValue R;
+ R.LVType = ExtVectorElt;
+ R.V = Vec;
+ R.VectorElts = Elts;
+ R.Initialize(type, type.getQualifiers(), Alignment);
+ return R;
+ }
+
+ /// \brief Create a new object to represent a bit-field access.
+ ///
+ /// \param BaseValue - The base address of the structure containing the
+ /// bit-field.
+ /// \param Info - The information describing how to perform the bit-field
+ /// access.
+ static LValue MakeBitfield(llvm::Value *BaseValue,
+ const CGBitFieldInfo &Info,
+ QualType type) {
+ LValue R;
+ R.LVType = BitField;
+ R.V = BaseValue;
+ R.BitFieldInfo = &Info;
+ R.Initialize(type, type.getQualifiers());
+ return R;
+ }
+
+ RValue asAggregateRValue() const {
+ // FIMXE: Alignment
+ return RValue::getAggregate(getAddress(), isVolatileQualified());
+ }
+};
+
+/// An aggregate value slot.
+class AggValueSlot {
+ /// The address.
+ llvm::Value *Addr;
+
+ // Qualifiers
+ Qualifiers Quals;
+
+ unsigned short Alignment;
+
+ /// DestructedFlag - This is set to true if some external code is
+ /// responsible for setting up a destructor for the slot. Otherwise
+ /// the code which constructs it should push the appropriate cleanup.
+ bool DestructedFlag : 1;
+
+ /// ObjCGCFlag - This is set to true if writing to the memory in the
+ /// slot might require calling an appropriate Objective-C GC
+ /// barrier. The exact interaction here is unnecessarily mysterious.
+ bool ObjCGCFlag : 1;
+
+ /// ZeroedFlag - This is set to true if the memory in the slot is
+ /// known to be zero before the assignment into it. This means that
+ /// zero fields don't need to be set.
+ bool ZeroedFlag : 1;
+
+ /// AliasedFlag - This is set to true if the slot might be aliased
+ /// and it's not undefined behavior to access it through such an
+ /// alias. Note that it's always undefined behavior to access a C++
+ /// object that's under construction through an alias derived from
+ /// outside the construction process.
+ ///
+ /// This flag controls whether calls that produce the aggregate
+ /// value may be evaluated directly into the slot, or whether they
+ /// must be evaluated into an unaliased temporary and then memcpy'ed
+ /// over. Since it's invalid in general to memcpy a non-POD C++
+ /// object, it's important that this flag never be set when
+ /// evaluating an expression which constructs such an object.
+ bool AliasedFlag : 1;
+
+public:
+ enum IsAliased_t { IsNotAliased, IsAliased };
+ enum IsDestructed_t { IsNotDestructed, IsDestructed };
+ enum IsZeroed_t { IsNotZeroed, IsZeroed };
+ enum NeedsGCBarriers_t { DoesNotNeedGCBarriers, NeedsGCBarriers };
+
+ /// ignored - Returns an aggregate value slot indicating that the
+ /// aggregate value is being ignored.
+ static AggValueSlot ignored() {
+ return forAddr(0, CharUnits(), Qualifiers(), IsNotDestructed,
+ DoesNotNeedGCBarriers, IsNotAliased);
+ }
+
+ /// forAddr - Make a slot for an aggregate value.
+ ///
+ /// \param quals - The qualifiers that dictate how the slot should
+ /// be initialied. Only 'volatile' and the Objective-C lifetime
+ /// qualifiers matter.
+ ///
+ /// \param isDestructed - true if something else is responsible
+ /// for calling destructors on this object
+ /// \param needsGC - true if the slot is potentially located
+ /// somewhere that ObjC GC calls should be emitted for
+ static AggValueSlot forAddr(llvm::Value *addr, CharUnits align,
+ Qualifiers quals,
+ IsDestructed_t isDestructed,
+ NeedsGCBarriers_t needsGC,
+ IsAliased_t isAliased,
+ IsZeroed_t isZeroed = IsNotZeroed) {
+ AggValueSlot AV;
+ AV.Addr = addr;
+ AV.Alignment = align.getQuantity();
+ AV.Quals = quals;
+ AV.DestructedFlag = isDestructed;
+ AV.ObjCGCFlag = needsGC;
+ AV.ZeroedFlag = isZeroed;
+ AV.AliasedFlag = isAliased;
+ return AV;
+ }
+
+ static AggValueSlot forLValue(LValue LV, IsDestructed_t isDestructed,
+ NeedsGCBarriers_t needsGC,
+ IsAliased_t isAliased,
+ IsZeroed_t isZeroed = IsNotZeroed) {
+ return forAddr(LV.getAddress(), LV.getAlignment(),
+ LV.getQuals(), isDestructed, needsGC, isAliased, isZeroed);
+ }
+
+ IsDestructed_t isExternallyDestructed() const {
+ return IsDestructed_t(DestructedFlag);
+ }
+ void setExternallyDestructed(bool destructed = true) {
+ DestructedFlag = destructed;
+ }
+
+ Qualifiers getQualifiers() const { return Quals; }
+
+ bool isVolatile() const {
+ return Quals.hasVolatile();
+ }
+
+ Qualifiers::ObjCLifetime getObjCLifetime() const {
+ return Quals.getObjCLifetime();
+ }
+
+ NeedsGCBarriers_t requiresGCollection() const {
+ return NeedsGCBarriers_t(ObjCGCFlag);
+ }
+
+ llvm::Value *getAddr() const {
+ return Addr;
+ }
+
+ bool isIgnored() const {
+ return Addr == 0;
+ }
+
+ CharUnits getAlignment() const {
+ return CharUnits::fromQuantity(Alignment);
+ }
+
+ IsAliased_t isPotentiallyAliased() const {
+ return IsAliased_t(AliasedFlag);
+ }
+
+ // FIXME: Alignment?
+ RValue asRValue() const {
+ return RValue::getAggregate(getAddr(), isVolatile());
+ }
+
+ void setZeroed(bool V = true) { ZeroedFlag = V; }
+ IsZeroed_t isZeroed() const {
+ return IsZeroed_t(ZeroedFlag);
+ }
+};
+
+} // end namespace CodeGen
+} // end namespace clang
+
+#endif
diff --git a/clang/lib/CodeGen/CMakeLists.txt b/clang/lib/CodeGen/CMakeLists.txt
new file mode 100644
index 0000000..7b1dbce
--- /dev/null
+++ b/clang/lib/CodeGen/CMakeLists.txt
@@ -0,0 +1,56 @@
+set(LLVM_LINK_COMPONENTS
+ asmparser
+ bitreader
+ bitwriter
+ instrumentation
+ ipo
+ linker
+ vectorize
+ )
+
+set(LLVM_USED_LIBS clangBasic clangAST clangFrontend)
+
+add_clang_library(clangCodeGen
+ BackendUtil.cpp
+ CGBlocks.cpp
+ CGBuiltin.cpp
+ CGCall.cpp
+ CGClass.cpp
+ CGCUDANV.cpp
+ CGCUDARuntime.cpp
+ CGCXX.cpp
+ CGCXXABI.cpp
+ CGCleanup.cpp
+ CGDebugInfo.cpp
+ CGDecl.cpp
+ CGDeclCXX.cpp
+ CGException.cpp
+ CGExpr.cpp
+ CGExprAgg.cpp
+ CGExprComplex.cpp
+ CGExprConstant.cpp
+ CGExprCXX.cpp
+ CGExprScalar.cpp
+ CGObjC.cpp
+ CGObjCGNU.cpp
+ CGObjCMac.cpp
+ CGObjCRuntime.cpp
+ CGOpenCLRuntime.cpp
+ CGRecordLayoutBuilder.cpp
+ CGRTTI.cpp
+ CGStmt.cpp
+ CGVTables.cpp
+ CGVTT.cpp
+ CodeGenAction.cpp
+ CodeGenFunction.cpp
+ CodeGenModule.cpp
+ CodeGenTBAA.cpp
+ CodeGenTypes.cpp
+ ItaniumCXXABI.cpp
+ MicrosoftCXXABI.cpp
+ ModuleBuilder.cpp
+ TargetInfo.cpp
+ )
+
+add_dependencies(clangCodeGen ClangAttrClasses ClangAttrList ClangDeclNodes
+ ClangStmtNodes)
diff --git a/clang/lib/CodeGen/CodeGenAction.cpp b/clang/lib/CodeGen/CodeGenAction.cpp
new file mode 100644
index 0000000..dd32167
--- /dev/null
+++ b/clang/lib/CodeGen/CodeGenAction.cpp
@@ -0,0 +1,448 @@
+//===--- CodeGenAction.cpp - LLVM Code Generation Frontend Action ---------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/CodeGen/CodeGenAction.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/AST/ASTConsumer.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclGroup.h"
+#include "clang/CodeGen/BackendUtil.h"
+#include "clang/CodeGen/ModuleBuilder.h"
+#include "clang/Frontend/CompilerInstance.h"
+#include "clang/Frontend/FrontendDiagnostic.h"
+#include "llvm/LLVMContext.h"
+#include "llvm/Linker.h"
+#include "llvm/Module.h"
+#include "llvm/Pass.h"
+#include "llvm/ADT/OwningPtr.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/Bitcode/ReaderWriter.h"
+#include "llvm/Support/IRReader.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/SourceMgr.h"
+#include "llvm/Support/Timer.h"
+using namespace clang;
+using namespace llvm;
+
+namespace clang {
+ class BackendConsumer : public ASTConsumer {
+ virtual void anchor();
+ DiagnosticsEngine &Diags;
+ BackendAction Action;
+ const CodeGenOptions &CodeGenOpts;
+ const TargetOptions &TargetOpts;
+ const LangOptions &LangOpts;
+ raw_ostream *AsmOutStream;
+ ASTContext *Context;
+
+ Timer LLVMIRGeneration;
+
+ OwningPtr<CodeGenerator> Gen;
+
+ OwningPtr<llvm::Module> TheModule, LinkModule;
+
+ public:
+ BackendConsumer(BackendAction action, DiagnosticsEngine &_Diags,
+ const CodeGenOptions &compopts,
+ const TargetOptions &targetopts,
+ const LangOptions &langopts,
+ bool TimePasses,
+ const std::string &infile,
+ llvm::Module *LinkModule,
+ raw_ostream *OS,
+ LLVMContext &C) :
+ Diags(_Diags),
+ Action(action),
+ CodeGenOpts(compopts),
+ TargetOpts(targetopts),
+ LangOpts(langopts),
+ AsmOutStream(OS),
+ LLVMIRGeneration("LLVM IR Generation Time"),
+ Gen(CreateLLVMCodeGen(Diags, infile, compopts, C)),
+ LinkModule(LinkModule) {
+ llvm::TimePassesIsEnabled = TimePasses;
+ }
+
+ llvm::Module *takeModule() { return TheModule.take(); }
+ llvm::Module *takeLinkModule() { return LinkModule.take(); }
+
+ virtual void HandleCXXStaticMemberVarInstantiation(VarDecl *VD) {
+ Gen->HandleCXXStaticMemberVarInstantiation(VD);
+ }
+
+ virtual void Initialize(ASTContext &Ctx) {
+ Context = &Ctx;
+
+ if (llvm::TimePassesIsEnabled)
+ LLVMIRGeneration.startTimer();
+
+ Gen->Initialize(Ctx);
+
+ TheModule.reset(Gen->GetModule());
+
+ if (llvm::TimePassesIsEnabled)
+ LLVMIRGeneration.stopTimer();
+ }
+
+ virtual bool HandleTopLevelDecl(DeclGroupRef D) {
+ PrettyStackTraceDecl CrashInfo(*D.begin(), SourceLocation(),
+ Context->getSourceManager(),
+ "LLVM IR generation of declaration");
+
+ if (llvm::TimePassesIsEnabled)
+ LLVMIRGeneration.startTimer();
+
+ Gen->HandleTopLevelDecl(D);
+
+ if (llvm::TimePassesIsEnabled)
+ LLVMIRGeneration.stopTimer();
+
+ return true;
+ }
+
+ virtual void HandleTranslationUnit(ASTContext &C) {
+ {
+ PrettyStackTraceString CrashInfo("Per-file LLVM IR generation");
+ if (llvm::TimePassesIsEnabled)
+ LLVMIRGeneration.startTimer();
+
+ Gen->HandleTranslationUnit(C);
+
+ if (llvm::TimePassesIsEnabled)
+ LLVMIRGeneration.stopTimer();
+ }
+
+ // Silently ignore if we weren't initialized for some reason.
+ if (!TheModule)
+ return;
+
+ // Make sure IR generation is happy with the module. This is released by
+ // the module provider.
+ llvm::Module *M = Gen->ReleaseModule();
+ if (!M) {
+ // The module has been released by IR gen on failures, do not double
+ // free.
+ TheModule.take();
+ return;
+ }
+
+ assert(TheModule.get() == M &&
+ "Unexpected module change during IR generation");
+
+ // Link LinkModule into this module if present, preserving its validity.
+ if (LinkModule) {
+ std::string ErrorMsg;
+ if (Linker::LinkModules(M, LinkModule.get(), Linker::PreserveSource,
+ &ErrorMsg)) {
+ Diags.Report(diag::err_fe_cannot_link_module)
+ << LinkModule->getModuleIdentifier() << ErrorMsg;
+ return;
+ }
+ }
+
+ // Install an inline asm handler so that diagnostics get printed through
+ // our diagnostics hooks.
+ LLVMContext &Ctx = TheModule->getContext();
+ LLVMContext::InlineAsmDiagHandlerTy OldHandler =
+ Ctx.getInlineAsmDiagnosticHandler();
+ void *OldContext = Ctx.getInlineAsmDiagnosticContext();
+ Ctx.setInlineAsmDiagnosticHandler(InlineAsmDiagHandler, this);
+
+ EmitBackendOutput(Diags, CodeGenOpts, TargetOpts, LangOpts,
+ TheModule.get(), Action, AsmOutStream);
+
+ Ctx.setInlineAsmDiagnosticHandler(OldHandler, OldContext);
+ }
+
+ virtual void HandleTagDeclDefinition(TagDecl *D) {
+ PrettyStackTraceDecl CrashInfo(D, SourceLocation(),
+ Context->getSourceManager(),
+ "LLVM IR generation of declaration");
+ Gen->HandleTagDeclDefinition(D);
+ }
+
+ virtual void CompleteTentativeDefinition(VarDecl *D) {
+ Gen->CompleteTentativeDefinition(D);
+ }
+
+ virtual void HandleVTable(CXXRecordDecl *RD, bool DefinitionRequired) {
+ Gen->HandleVTable(RD, DefinitionRequired);
+ }
+
+ static void InlineAsmDiagHandler(const llvm::SMDiagnostic &SM,void *Context,
+ unsigned LocCookie) {
+ SourceLocation Loc = SourceLocation::getFromRawEncoding(LocCookie);
+ ((BackendConsumer*)Context)->InlineAsmDiagHandler2(SM, Loc);
+ }
+
+ void InlineAsmDiagHandler2(const llvm::SMDiagnostic &,
+ SourceLocation LocCookie);
+ };
+
+ void BackendConsumer::anchor() {}
+}
+
+/// ConvertBackendLocation - Convert a location in a temporary llvm::SourceMgr
+/// buffer to be a valid FullSourceLoc.
+static FullSourceLoc ConvertBackendLocation(const llvm::SMDiagnostic &D,
+ SourceManager &CSM) {
+ // Get both the clang and llvm source managers. The location is relative to
+ // a memory buffer that the LLVM Source Manager is handling, we need to add
+ // a copy to the Clang source manager.
+ const llvm::SourceMgr &LSM = *D.getSourceMgr();
+
+ // We need to copy the underlying LLVM memory buffer because llvm::SourceMgr
+ // already owns its one and clang::SourceManager wants to own its one.
+ const MemoryBuffer *LBuf =
+ LSM.getMemoryBuffer(LSM.FindBufferContainingLoc(D.getLoc()));
+
+ // Create the copy and transfer ownership to clang::SourceManager.
+ llvm::MemoryBuffer *CBuf =
+ llvm::MemoryBuffer::getMemBufferCopy(LBuf->getBuffer(),
+ LBuf->getBufferIdentifier());
+ FileID FID = CSM.createFileIDForMemBuffer(CBuf);
+
+ // Translate the offset into the file.
+ unsigned Offset = D.getLoc().getPointer() - LBuf->getBufferStart();
+ SourceLocation NewLoc =
+ CSM.getLocForStartOfFile(FID).getLocWithOffset(Offset);
+ return FullSourceLoc(NewLoc, CSM);
+}
+
+
+/// InlineAsmDiagHandler2 - This function is invoked when the backend hits an
+/// error parsing inline asm. The SMDiagnostic indicates the error relative to
+/// the temporary memory buffer that the inline asm parser has set up.
+void BackendConsumer::InlineAsmDiagHandler2(const llvm::SMDiagnostic &D,
+ SourceLocation LocCookie) {
+ // There are a couple of different kinds of errors we could get here. First,
+ // we re-format the SMDiagnostic in terms of a clang diagnostic.
+
+ // Strip "error: " off the start of the message string.
+ StringRef Message = D.getMessage();
+ if (Message.startswith("error: "))
+ Message = Message.substr(7);
+
+ // If the SMDiagnostic has an inline asm source location, translate it.
+ FullSourceLoc Loc;
+ if (D.getLoc() != SMLoc())
+ Loc = ConvertBackendLocation(D, Context->getSourceManager());
+
+
+ // If this problem has clang-level source location information, report the
+ // issue as being an error in the source with a note showing the instantiated
+ // code.
+ if (LocCookie.isValid()) {
+ Diags.Report(LocCookie, diag::err_fe_inline_asm).AddString(Message);
+
+ if (D.getLoc().isValid()) {
+ DiagnosticBuilder B = Diags.Report(Loc, diag::note_fe_inline_asm_here);
+ // Convert the SMDiagnostic ranges into SourceRange and attach them
+ // to the diagnostic.
+ for (unsigned i = 0, e = D.getRanges().size(); i != e; ++i) {
+ std::pair<unsigned, unsigned> Range = D.getRanges()[i];
+ unsigned Column = D.getColumnNo();
+ B << SourceRange(Loc.getLocWithOffset(Range.first - Column),
+ Loc.getLocWithOffset(Range.second - Column));
+ }
+ }
+ return;
+ }
+
+ // Otherwise, report the backend error as occurring in the generated .s file.
+ // If Loc is invalid, we still need to report the error, it just gets no
+ // location info.
+ Diags.Report(Loc, diag::err_fe_inline_asm).AddString(Message);
+}
+
+//
+
+CodeGenAction::CodeGenAction(unsigned _Act, LLVMContext *_VMContext)
+ : Act(_Act), LinkModule(0),
+ VMContext(_VMContext ? _VMContext : new LLVMContext),
+ OwnsVMContext(!_VMContext) {}
+
+CodeGenAction::~CodeGenAction() {
+ TheModule.reset();
+ if (OwnsVMContext)
+ delete VMContext;
+}
+
+bool CodeGenAction::hasIRSupport() const { return true; }
+
+void CodeGenAction::EndSourceFileAction() {
+ // If the consumer creation failed, do nothing.
+ if (!getCompilerInstance().hasASTConsumer())
+ return;
+
+ // If we were given a link module, release consumer's ownership of it.
+ if (LinkModule)
+ BEConsumer->takeLinkModule();
+
+ // Steal the module from the consumer.
+ TheModule.reset(BEConsumer->takeModule());
+}
+
+llvm::Module *CodeGenAction::takeModule() {
+ return TheModule.take();
+}
+
+llvm::LLVMContext *CodeGenAction::takeLLVMContext() {
+ OwnsVMContext = false;
+ return VMContext;
+}
+
+static raw_ostream *GetOutputStream(CompilerInstance &CI,
+ StringRef InFile,
+ BackendAction Action) {
+ switch (Action) {
+ case Backend_EmitAssembly:
+ return CI.createDefaultOutputFile(false, InFile, "s");
+ case Backend_EmitLL:
+ return CI.createDefaultOutputFile(false, InFile, "ll");
+ case Backend_EmitBC:
+ return CI.createDefaultOutputFile(true, InFile, "bc");
+ case Backend_EmitNothing:
+ return 0;
+ case Backend_EmitMCNull:
+ case Backend_EmitObj:
+ return CI.createDefaultOutputFile(true, InFile, "o");
+ }
+
+ llvm_unreachable("Invalid action!");
+}
+
+ASTConsumer *CodeGenAction::CreateASTConsumer(CompilerInstance &CI,
+ StringRef InFile) {
+ BackendAction BA = static_cast<BackendAction>(Act);
+ OwningPtr<raw_ostream> OS(GetOutputStream(CI, InFile, BA));
+ if (BA != Backend_EmitNothing && !OS)
+ return 0;
+
+ llvm::Module *LinkModuleToUse = LinkModule;
+
+ // If we were not given a link module, and the user requested that one be
+ // loaded from bitcode, do so now.
+ const std::string &LinkBCFile = CI.getCodeGenOpts().LinkBitcodeFile;
+ if (!LinkModuleToUse && !LinkBCFile.empty()) {
+ std::string ErrorStr;
+
+ llvm::MemoryBuffer *BCBuf =
+ CI.getFileManager().getBufferForFile(LinkBCFile, &ErrorStr);
+ if (!BCBuf) {
+ CI.getDiagnostics().Report(diag::err_cannot_open_file)
+ << LinkBCFile << ErrorStr;
+ return 0;
+ }
+
+ LinkModuleToUse = getLazyBitcodeModule(BCBuf, *VMContext, &ErrorStr);
+ if (!LinkModuleToUse) {
+ CI.getDiagnostics().Report(diag::err_cannot_open_file)
+ << LinkBCFile << ErrorStr;
+ return 0;
+ }
+ }
+
+ BEConsumer =
+ new BackendConsumer(BA, CI.getDiagnostics(),
+ CI.getCodeGenOpts(), CI.getTargetOpts(),
+ CI.getLangOpts(),
+ CI.getFrontendOpts().ShowTimers, InFile,
+ LinkModuleToUse, OS.take(), *VMContext);
+ return BEConsumer;
+}
+
+void CodeGenAction::ExecuteAction() {
+ // If this is an IR file, we have to treat it specially.
+ if (getCurrentFileKind() == IK_LLVM_IR) {
+ BackendAction BA = static_cast<BackendAction>(Act);
+ CompilerInstance &CI = getCompilerInstance();
+ raw_ostream *OS = GetOutputStream(CI, getCurrentFile(), BA);
+ if (BA != Backend_EmitNothing && !OS)
+ return;
+
+ bool Invalid;
+ SourceManager &SM = CI.getSourceManager();
+ const llvm::MemoryBuffer *MainFile = SM.getBuffer(SM.getMainFileID(),
+ &Invalid);
+ if (Invalid)
+ return;
+
+ // FIXME: This is stupid, IRReader shouldn't take ownership.
+ llvm::MemoryBuffer *MainFileCopy =
+ llvm::MemoryBuffer::getMemBufferCopy(MainFile->getBuffer(),
+ getCurrentFile().c_str());
+
+ llvm::SMDiagnostic Err;
+ TheModule.reset(ParseIR(MainFileCopy, Err, *VMContext));
+ if (!TheModule) {
+ // Translate from the diagnostic info to the SourceManager location.
+ SourceLocation Loc = SM.translateFileLineCol(
+ SM.getFileEntryForID(SM.getMainFileID()), Err.getLineNo(),
+ Err.getColumnNo() + 1);
+
+ // Get a custom diagnostic for the error. We strip off a leading
+ // diagnostic code if there is one.
+ StringRef Msg = Err.getMessage();
+ if (Msg.startswith("error: "))
+ Msg = Msg.substr(7);
+
+ // Escape '%', which is interpreted as a format character.
+ llvm::SmallString<128> EscapedMessage;
+ for (unsigned i = 0, e = Msg.size(); i != e; ++i) {
+ if (Msg[i] == '%')
+ EscapedMessage += '%';
+ EscapedMessage += Msg[i];
+ }
+
+ unsigned DiagID = CI.getDiagnostics().getCustomDiagID(
+ DiagnosticsEngine::Error, EscapedMessage);
+
+ CI.getDiagnostics().Report(Loc, DiagID);
+ return;
+ }
+
+ EmitBackendOutput(CI.getDiagnostics(), CI.getCodeGenOpts(),
+ CI.getTargetOpts(), CI.getLangOpts(),
+ TheModule.get(),
+ BA, OS);
+ return;
+ }
+
+ // Otherwise follow the normal AST path.
+ this->ASTFrontendAction::ExecuteAction();
+}
+
+//
+
+void EmitAssemblyAction::anchor() { }
+EmitAssemblyAction::EmitAssemblyAction(llvm::LLVMContext *_VMContext)
+ : CodeGenAction(Backend_EmitAssembly, _VMContext) {}
+
+void EmitBCAction::anchor() { }
+EmitBCAction::EmitBCAction(llvm::LLVMContext *_VMContext)
+ : CodeGenAction(Backend_EmitBC, _VMContext) {}
+
+void EmitLLVMAction::anchor() { }
+EmitLLVMAction::EmitLLVMAction(llvm::LLVMContext *_VMContext)
+ : CodeGenAction(Backend_EmitLL, _VMContext) {}
+
+void EmitLLVMOnlyAction::anchor() { }
+EmitLLVMOnlyAction::EmitLLVMOnlyAction(llvm::LLVMContext *_VMContext)
+ : CodeGenAction(Backend_EmitNothing, _VMContext) {}
+
+void EmitCodeGenOnlyAction::anchor() { }
+EmitCodeGenOnlyAction::EmitCodeGenOnlyAction(llvm::LLVMContext *_VMContext)
+ : CodeGenAction(Backend_EmitMCNull, _VMContext) {}
+
+void EmitObjAction::anchor() { }
+EmitObjAction::EmitObjAction(llvm::LLVMContext *_VMContext)
+ : CodeGenAction(Backend_EmitObj, _VMContext) {}
diff --git a/clang/lib/CodeGen/CodeGenFunction.cpp b/clang/lib/CodeGen/CodeGenFunction.cpp
new file mode 100644
index 0000000..2939062
--- /dev/null
+++ b/clang/lib/CodeGen/CodeGenFunction.cpp
@@ -0,0 +1,1149 @@
+//===--- CodeGenFunction.cpp - Emit LLVM Code from ASTs for a Function ----===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This coordinates the per-function state used while generating code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenFunction.h"
+#include "CodeGenModule.h"
+#include "CGCUDARuntime.h"
+#include "CGCXXABI.h"
+#include "CGDebugInfo.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/StmtCXX.h"
+#include "clang/Frontend/CodeGenOptions.h"
+#include "llvm/Intrinsics.h"
+#include "llvm/Support/MDBuilder.h"
+#include "llvm/Target/TargetData.h"
+using namespace clang;
+using namespace CodeGen;
+
+CodeGenFunction::CodeGenFunction(CodeGenModule &cgm)
+ : CodeGenTypeCache(cgm), CGM(cgm),
+ Target(CGM.getContext().getTargetInfo()),
+ Builder(cgm.getModule().getContext()),
+ AutoreleaseResult(false), BlockInfo(0), BlockPointer(0),
+ LambdaThisCaptureField(0), NormalCleanupDest(0), NextCleanupDestIndex(1),
+ FirstBlockInfo(0), EHResumeBlock(0), ExceptionSlot(0), EHSelectorSlot(0),
+ DebugInfo(0), DisableDebugInfo(false), DidCallStackSave(false),
+ IndirectBranch(0), SwitchInsn(0), CaseRangeBlock(0), UnreachableBlock(0),
+ CXXABIThisDecl(0), CXXABIThisValue(0), CXXThisValue(0), CXXVTTDecl(0),
+ CXXVTTValue(0), OutermostConditional(0), TerminateLandingPad(0),
+ TerminateHandler(0), TrapBB(0) {
+
+ CatchUndefined = getContext().getLangOpts().CatchUndefined;
+ CGM.getCXXABI().getMangleContext().startNewFunction();
+}
+
+CodeGenFunction::~CodeGenFunction() {
+ // If there are any unclaimed block infos, go ahead and destroy them
+ // now. This can happen if IR-gen gets clever and skips evaluating
+ // something.
+ if (FirstBlockInfo)
+ destroyBlockInfos(FirstBlockInfo);
+}
+
+
+llvm::Type *CodeGenFunction::ConvertTypeForMem(QualType T) {
+ return CGM.getTypes().ConvertTypeForMem(T);
+}
+
+llvm::Type *CodeGenFunction::ConvertType(QualType T) {
+ return CGM.getTypes().ConvertType(T);
+}
+
+bool CodeGenFunction::hasAggregateLLVMType(QualType type) {
+ switch (type.getCanonicalType()->getTypeClass()) {
+#define TYPE(name, parent)
+#define ABSTRACT_TYPE(name, parent)
+#define NON_CANONICAL_TYPE(name, parent) case Type::name:
+#define DEPENDENT_TYPE(name, parent) case Type::name:
+#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(name, parent) case Type::name:
+#include "clang/AST/TypeNodes.def"
+ llvm_unreachable("non-canonical or dependent type in IR-generation");
+
+ case Type::Builtin:
+ case Type::Pointer:
+ case Type::BlockPointer:
+ case Type::LValueReference:
+ case Type::RValueReference:
+ case Type::MemberPointer:
+ case Type::Vector:
+ case Type::ExtVector:
+ case Type::FunctionProto:
+ case Type::FunctionNoProto:
+ case Type::Enum:
+ case Type::ObjCObjectPointer:
+ return false;
+
+ // Complexes, arrays, records, and Objective-C objects.
+ case Type::Complex:
+ case Type::ConstantArray:
+ case Type::IncompleteArray:
+ case Type::VariableArray:
+ case Type::Record:
+ case Type::ObjCObject:
+ case Type::ObjCInterface:
+ return true;
+
+ // In IRGen, atomic types are just the underlying type
+ case Type::Atomic:
+ return hasAggregateLLVMType(type->getAs<AtomicType>()->getValueType());
+ }
+ llvm_unreachable("unknown type kind!");
+}
+
+void CodeGenFunction::EmitReturnBlock() {
+ // For cleanliness, we try to avoid emitting the return block for
+ // simple cases.
+ llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
+
+ if (CurBB) {
+ assert(!CurBB->getTerminator() && "Unexpected terminated block.");
+
+ // We have a valid insert point, reuse it if it is empty or there are no
+ // explicit jumps to the return block.
+ if (CurBB->empty() || ReturnBlock.getBlock()->use_empty()) {
+ ReturnBlock.getBlock()->replaceAllUsesWith(CurBB);
+ delete ReturnBlock.getBlock();
+ } else
+ EmitBlock(ReturnBlock.getBlock());
+ return;
+ }
+
+ // Otherwise, if the return block is the target of a single direct
+ // branch then we can just put the code in that block instead. This
+ // cleans up functions which started with a unified return block.
+ if (ReturnBlock.getBlock()->hasOneUse()) {
+ llvm::BranchInst *BI =
+ dyn_cast<llvm::BranchInst>(*ReturnBlock.getBlock()->use_begin());
+ if (BI && BI->isUnconditional() &&
+ BI->getSuccessor(0) == ReturnBlock.getBlock()) {
+ // Reset insertion point, including debug location, and delete the branch.
+ Builder.SetCurrentDebugLocation(BI->getDebugLoc());
+ Builder.SetInsertPoint(BI->getParent());
+ BI->eraseFromParent();
+ delete ReturnBlock.getBlock();
+ return;
+ }
+ }
+
+ // FIXME: We are at an unreachable point, there is no reason to emit the block
+ // unless it has uses. However, we still need a place to put the debug
+ // region.end for now.
+
+ EmitBlock(ReturnBlock.getBlock());
+}
+
+static void EmitIfUsed(CodeGenFunction &CGF, llvm::BasicBlock *BB) {
+ if (!BB) return;
+ if (!BB->use_empty())
+ return CGF.CurFn->getBasicBlockList().push_back(BB);
+ delete BB;
+}
+
+void CodeGenFunction::FinishFunction(SourceLocation EndLoc) {
+ assert(BreakContinueStack.empty() &&
+ "mismatched push/pop in break/continue stack!");
+
+ // Pop any cleanups that might have been associated with the
+ // parameters. Do this in whatever block we're currently in; it's
+ // important to do this before we enter the return block or return
+ // edges will be *really* confused.
+ if (EHStack.stable_begin() != PrologueCleanupDepth)
+ PopCleanupBlocks(PrologueCleanupDepth);
+
+ // Emit function epilog (to return).
+ EmitReturnBlock();
+
+ if (ShouldInstrumentFunction())
+ EmitFunctionInstrumentation("__cyg_profile_func_exit");
+
+ // Emit debug descriptor for function end.
+ if (CGDebugInfo *DI = getDebugInfo()) {
+ DI->setLocation(EndLoc);
+ DI->EmitFunctionEnd(Builder);
+ }
+
+ EmitFunctionEpilog(*CurFnInfo);
+ EmitEndEHSpec(CurCodeDecl);
+
+ assert(EHStack.empty() &&
+ "did not remove all scopes from cleanup stack!");
+
+ // If someone did an indirect goto, emit the indirect goto block at the end of
+ // the function.
+ if (IndirectBranch) {
+ EmitBlock(IndirectBranch->getParent());
+ Builder.ClearInsertionPoint();
+ }
+
+ // Remove the AllocaInsertPt instruction, which is just a convenience for us.
+ llvm::Instruction *Ptr = AllocaInsertPt;
+ AllocaInsertPt = 0;
+ Ptr->eraseFromParent();
+
+ // If someone took the address of a label but never did an indirect goto, we
+ // made a zero entry PHI node, which is illegal, zap it now.
+ if (IndirectBranch) {
+ llvm::PHINode *PN = cast<llvm::PHINode>(IndirectBranch->getAddress());
+ if (PN->getNumIncomingValues() == 0) {
+ PN->replaceAllUsesWith(llvm::UndefValue::get(PN->getType()));
+ PN->eraseFromParent();
+ }
+ }
+
+ EmitIfUsed(*this, EHResumeBlock);
+ EmitIfUsed(*this, TerminateLandingPad);
+ EmitIfUsed(*this, TerminateHandler);
+ EmitIfUsed(*this, UnreachableBlock);
+
+ if (CGM.getCodeGenOpts().EmitDeclMetadata)
+ EmitDeclMetadata();
+}
+
+/// ShouldInstrumentFunction - Return true if the current function should be
+/// instrumented with __cyg_profile_func_* calls
+bool CodeGenFunction::ShouldInstrumentFunction() {
+ if (!CGM.getCodeGenOpts().InstrumentFunctions)
+ return false;
+ if (!CurFuncDecl || CurFuncDecl->hasAttr<NoInstrumentFunctionAttr>())
+ return false;
+ return true;
+}
+
+/// EmitFunctionInstrumentation - Emit LLVM code to call the specified
+/// instrumentation function with the current function and the call site, if
+/// function instrumentation is enabled.
+void CodeGenFunction::EmitFunctionInstrumentation(const char *Fn) {
+ // void __cyg_profile_func_{enter,exit} (void *this_fn, void *call_site);
+ llvm::PointerType *PointerTy = Int8PtrTy;
+ llvm::Type *ProfileFuncArgs[] = { PointerTy, PointerTy };
+ llvm::FunctionType *FunctionTy =
+ llvm::FunctionType::get(VoidTy, ProfileFuncArgs, false);
+
+ llvm::Constant *F = CGM.CreateRuntimeFunction(FunctionTy, Fn);
+ llvm::CallInst *CallSite = Builder.CreateCall(
+ CGM.getIntrinsic(llvm::Intrinsic::returnaddress),
+ llvm::ConstantInt::get(Int32Ty, 0),
+ "callsite");
+
+ Builder.CreateCall2(F,
+ llvm::ConstantExpr::getBitCast(CurFn, PointerTy),
+ CallSite);
+}
+
+void CodeGenFunction::EmitMCountInstrumentation() {
+ llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false);
+
+ llvm::Constant *MCountFn = CGM.CreateRuntimeFunction(FTy,
+ Target.getMCountName());
+ Builder.CreateCall(MCountFn);
+}
+
+void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
+ llvm::Function *Fn,
+ const CGFunctionInfo &FnInfo,
+ const FunctionArgList &Args,
+ SourceLocation StartLoc) {
+ const Decl *D = GD.getDecl();
+
+ DidCallStackSave = false;
+ CurCodeDecl = CurFuncDecl = D;
+ FnRetTy = RetTy;
+ CurFn = Fn;
+ CurFnInfo = &FnInfo;
+ assert(CurFn->isDeclaration() && "Function already has body?");
+
+ // Pass inline keyword to optimizer if it appears explicitly on any
+ // declaration.
+ if (!CGM.getCodeGenOpts().NoInline)
+ if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D))
+ for (FunctionDecl::redecl_iterator RI = FD->redecls_begin(),
+ RE = FD->redecls_end(); RI != RE; ++RI)
+ if (RI->isInlineSpecified()) {
+ Fn->addFnAttr(llvm::Attribute::InlineHint);
+ break;
+ }
+
+ if (getContext().getLangOpts().OpenCL) {
+ // Add metadata for a kernel function.
+ if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D))
+ if (FD->hasAttr<OpenCLKernelAttr>()) {
+ llvm::LLVMContext &Context = getLLVMContext();
+ llvm::NamedMDNode *OpenCLMetadata =
+ CGM.getModule().getOrInsertNamedMetadata("opencl.kernels");
+
+ llvm::Value *Op = Fn;
+ OpenCLMetadata->addOperand(llvm::MDNode::get(Context, Op));
+ }
+ }
+
+ llvm::BasicBlock *EntryBB = createBasicBlock("entry", CurFn);
+
+ // Create a marker to make it easy to insert allocas into the entryblock
+ // later. Don't create this with the builder, because we don't want it
+ // folded.
+ llvm::Value *Undef = llvm::UndefValue::get(Int32Ty);
+ AllocaInsertPt = new llvm::BitCastInst(Undef, Int32Ty, "", EntryBB);
+ if (Builder.isNamePreserving())
+ AllocaInsertPt->setName("allocapt");
+
+ ReturnBlock = getJumpDestInCurrentScope("return");
+
+ Builder.SetInsertPoint(EntryBB);
+
+ // Emit subprogram debug descriptor.
+ if (CGDebugInfo *DI = getDebugInfo()) {
+ unsigned NumArgs = 0;
+ QualType *ArgsArray = new QualType[Args.size()];
+ for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
+ i != e; ++i) {
+ ArgsArray[NumArgs++] = (*i)->getType();
+ }
+
+ QualType FnType =
+ getContext().getFunctionType(RetTy, ArgsArray, NumArgs,
+ FunctionProtoType::ExtProtoInfo());
+
+ delete[] ArgsArray;
+
+ DI->setLocation(StartLoc);
+ DI->EmitFunctionStart(GD, FnType, CurFn, Builder);
+ }
+
+ if (ShouldInstrumentFunction())
+ EmitFunctionInstrumentation("__cyg_profile_func_enter");
+
+ if (CGM.getCodeGenOpts().InstrumentForProfiling)
+ EmitMCountInstrumentation();
+
+ if (RetTy->isVoidType()) {
+ // Void type; nothing to return.
+ ReturnValue = 0;
+ } else if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::Indirect &&
+ hasAggregateLLVMType(CurFnInfo->getReturnType())) {
+ // Indirect aggregate return; emit returned value directly into sret slot.
+ // This reduces code size, and affects correctness in C++.
+ ReturnValue = CurFn->arg_begin();
+ } else {
+ ReturnValue = CreateIRTemp(RetTy, "retval");
+
+ // Tell the epilog emitter to autorelease the result. We do this
+ // now so that various specialized functions can suppress it
+ // during their IR-generation.
+ if (getLangOpts().ObjCAutoRefCount &&
+ !CurFnInfo->isReturnsRetained() &&
+ RetTy->isObjCRetainableType())
+ AutoreleaseResult = true;
+ }
+
+ EmitStartEHSpec(CurCodeDecl);
+
+ PrologueCleanupDepth = EHStack.stable_begin();
+ EmitFunctionProlog(*CurFnInfo, CurFn, Args);
+
+ if (D && isa<CXXMethodDecl>(D) && cast<CXXMethodDecl>(D)->isInstance()) {
+ CGM.getCXXABI().EmitInstanceFunctionProlog(*this);
+ const CXXMethodDecl *MD = cast<CXXMethodDecl>(D);
+ if (MD->getParent()->isLambda() &&
+ MD->getOverloadedOperator() == OO_Call) {
+ // We're in a lambda; figure out the captures.
+ MD->getParent()->getCaptureFields(LambdaCaptureFields,
+ LambdaThisCaptureField);
+ if (LambdaThisCaptureField) {
+ // If this lambda captures this, load it.
+ QualType LambdaTagType =
+ getContext().getTagDeclType(LambdaThisCaptureField->getParent());
+ LValue LambdaLV = MakeNaturalAlignAddrLValue(CXXABIThisValue,
+ LambdaTagType);
+ LValue ThisLValue = EmitLValueForField(LambdaLV,
+ LambdaThisCaptureField);
+ CXXThisValue = EmitLoadOfLValue(ThisLValue).getScalarVal();
+ }
+ } else {
+ // Not in a lambda; just use 'this' from the method.
+ // FIXME: Should we generate a new load for each use of 'this'? The
+ // fast register allocator would be happier...
+ CXXThisValue = CXXABIThisValue;
+ }
+ }
+
+ // If any of the arguments have a variably modified type, make sure to
+ // emit the type size.
+ for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
+ i != e; ++i) {
+ QualType Ty = (*i)->getType();
+
+ if (Ty->isVariablyModifiedType())
+ EmitVariablyModifiedType(Ty);
+ }
+ // Emit a location at the end of the prologue.
+ if (CGDebugInfo *DI = getDebugInfo())
+ DI->EmitLocation(Builder, StartLoc);
+}
+
+void CodeGenFunction::EmitFunctionBody(FunctionArgList &Args) {
+ const FunctionDecl *FD = cast<FunctionDecl>(CurGD.getDecl());
+ assert(FD->getBody());
+ EmitStmt(FD->getBody());
+}
+
+/// Tries to mark the given function nounwind based on the
+/// non-existence of any throwing calls within it. We believe this is
+/// lightweight enough to do at -O0.
+static void TryMarkNoThrow(llvm::Function *F) {
+ // LLVM treats 'nounwind' on a function as part of the type, so we
+ // can't do this on functions that can be overwritten.
+ if (F->mayBeOverridden()) return;
+
+ for (llvm::Function::iterator FI = F->begin(), FE = F->end(); FI != FE; ++FI)
+ for (llvm::BasicBlock::iterator
+ BI = FI->begin(), BE = FI->end(); BI != BE; ++BI)
+ if (llvm::CallInst *Call = dyn_cast<llvm::CallInst>(&*BI)) {
+ if (!Call->doesNotThrow())
+ return;
+ } else if (isa<llvm::ResumeInst>(&*BI)) {
+ return;
+ }
+ F->setDoesNotThrow(true);
+}
+
+void CodeGenFunction::GenerateCode(GlobalDecl GD, llvm::Function *Fn,
+ const CGFunctionInfo &FnInfo) {
+ const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
+
+ // Check if we should generate debug info for this function.
+ if (CGM.getModuleDebugInfo() && !FD->hasAttr<NoDebugAttr>())
+ DebugInfo = CGM.getModuleDebugInfo();
+
+ FunctionArgList Args;
+ QualType ResTy = FD->getResultType();
+
+ CurGD = GD;
+ if (isa<CXXMethodDecl>(FD) && cast<CXXMethodDecl>(FD)->isInstance())
+ CGM.getCXXABI().BuildInstanceFunctionParams(*this, ResTy, Args);
+
+ for (unsigned i = 0, e = FD->getNumParams(); i != e; ++i)
+ Args.push_back(FD->getParamDecl(i));
+
+ SourceRange BodyRange;
+ if (Stmt *Body = FD->getBody()) BodyRange = Body->getSourceRange();
+
+ // Emit the standard function prologue.
+ StartFunction(GD, ResTy, Fn, FnInfo, Args, BodyRange.getBegin());
+
+ // Generate the body of the function.
+ if (isa<CXXDestructorDecl>(FD))
+ EmitDestructorBody(Args);
+ else if (isa<CXXConstructorDecl>(FD))
+ EmitConstructorBody(Args);
+ else if (getContext().getLangOpts().CUDA &&
+ !CGM.getCodeGenOpts().CUDAIsDevice &&
+ FD->hasAttr<CUDAGlobalAttr>())
+ CGM.getCUDARuntime().EmitDeviceStubBody(*this, Args);
+ else if (isa<CXXConversionDecl>(FD) &&
+ cast<CXXConversionDecl>(FD)->isLambdaToBlockPointerConversion()) {
+ // The lambda conversion to block pointer is special; the semantics can't be
+ // expressed in the AST, so IRGen needs to special-case it.
+ EmitLambdaToBlockPointerBody(Args);
+ } else if (isa<CXXMethodDecl>(FD) &&
+ cast<CXXMethodDecl>(FD)->isLambdaStaticInvoker()) {
+ // The lambda "__invoke" function is special, because it forwards or
+ // clones the body of the function call operator (but is actually static).
+ EmitLambdaStaticInvokeFunction(cast<CXXMethodDecl>(FD));
+ }
+ else
+ EmitFunctionBody(Args);
+
+ // Emit the standard function epilogue.
+ FinishFunction(BodyRange.getEnd());
+
+ // If we haven't marked the function nothrow through other means, do
+ // a quick pass now to see if we can.
+ if (!CurFn->doesNotThrow())
+ TryMarkNoThrow(CurFn);
+}
+
+/// ContainsLabel - Return true if the statement contains a label in it. If
+/// this statement is not executed normally, it not containing a label means
+/// that we can just remove the code.
+bool CodeGenFunction::ContainsLabel(const Stmt *S, bool IgnoreCaseStmts) {
+ // Null statement, not a label!
+ if (S == 0) return false;
+
+ // If this is a label, we have to emit the code, consider something like:
+ // if (0) { ... foo: bar(); } goto foo;
+ //
+ // TODO: If anyone cared, we could track __label__'s, since we know that you
+ // can't jump to one from outside their declared region.
+ if (isa<LabelStmt>(S))
+ return true;
+
+ // If this is a case/default statement, and we haven't seen a switch, we have
+ // to emit the code.
+ if (isa<SwitchCase>(S) && !IgnoreCaseStmts)
+ return true;
+
+ // If this is a switch statement, we want to ignore cases below it.
+ if (isa<SwitchStmt>(S))
+ IgnoreCaseStmts = true;
+
+ // Scan subexpressions for verboten labels.
+ for (Stmt::const_child_range I = S->children(); I; ++I)
+ if (ContainsLabel(*I, IgnoreCaseStmts))
+ return true;
+
+ return false;
+}
+
+/// containsBreak - Return true if the statement contains a break out of it.
+/// If the statement (recursively) contains a switch or loop with a break
+/// inside of it, this is fine.
+bool CodeGenFunction::containsBreak(const Stmt *S) {
+ // Null statement, not a label!
+ if (S == 0) return false;
+
+ // If this is a switch or loop that defines its own break scope, then we can
+ // include it and anything inside of it.
+ if (isa<SwitchStmt>(S) || isa<WhileStmt>(S) || isa<DoStmt>(S) ||
+ isa<ForStmt>(S))
+ return false;
+
+ if (isa<BreakStmt>(S))
+ return true;
+
+ // Scan subexpressions for verboten breaks.
+ for (Stmt::const_child_range I = S->children(); I; ++I)
+ if (containsBreak(*I))
+ return true;
+
+ return false;
+}
+
+
+/// ConstantFoldsToSimpleInteger - If the specified expression does not fold
+/// to a constant, or if it does but contains a label, return false. If it
+/// constant folds return true and set the boolean result in Result.
+bool CodeGenFunction::ConstantFoldsToSimpleInteger(const Expr *Cond,
+ bool &ResultBool) {
+ llvm::APInt ResultInt;
+ if (!ConstantFoldsToSimpleInteger(Cond, ResultInt))
+ return false;
+
+ ResultBool = ResultInt.getBoolValue();
+ return true;
+}
+
+/// ConstantFoldsToSimpleInteger - If the specified expression does not fold
+/// to a constant, or if it does but contains a label, return false. If it
+/// constant folds return true and set the folded value.
+bool CodeGenFunction::
+ConstantFoldsToSimpleInteger(const Expr *Cond, llvm::APInt &ResultInt) {
+ // FIXME: Rename and handle conversion of other evaluatable things
+ // to bool.
+ llvm::APSInt Int;
+ if (!Cond->EvaluateAsInt(Int, getContext()))
+ return false; // Not foldable, not integer or not fully evaluatable.
+
+ if (CodeGenFunction::ContainsLabel(Cond))
+ return false; // Contains a label.
+
+ ResultInt = Int;
+ return true;
+}
+
+
+
+/// EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g. for an if
+/// statement) to the specified blocks. Based on the condition, this might try
+/// to simplify the codegen of the conditional based on the branch.
+///
+void CodeGenFunction::EmitBranchOnBoolExpr(const Expr *Cond,
+ llvm::BasicBlock *TrueBlock,
+ llvm::BasicBlock *FalseBlock) {
+ Cond = Cond->IgnoreParens();
+
+ if (const BinaryOperator *CondBOp = dyn_cast<BinaryOperator>(Cond)) {
+ // Handle X && Y in a condition.
+ if (CondBOp->getOpcode() == BO_LAnd) {
+ // If we have "1 && X", simplify the code. "0 && X" would have constant
+ // folded if the case was simple enough.
+ bool ConstantBool = false;
+ if (ConstantFoldsToSimpleInteger(CondBOp->getLHS(), ConstantBool) &&
+ ConstantBool) {
+ // br(1 && X) -> br(X).
+ return EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock);
+ }
+
+ // If we have "X && 1", simplify the code to use an uncond branch.
+ // "X && 0" would have been constant folded to 0.
+ if (ConstantFoldsToSimpleInteger(CondBOp->getRHS(), ConstantBool) &&
+ ConstantBool) {
+ // br(X && 1) -> br(X).
+ return EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, FalseBlock);
+ }
+
+ // Emit the LHS as a conditional. If the LHS conditional is false, we
+ // want to jump to the FalseBlock.
+ llvm::BasicBlock *LHSTrue = createBasicBlock("land.lhs.true");
+
+ ConditionalEvaluation eval(*this);
+ EmitBranchOnBoolExpr(CondBOp->getLHS(), LHSTrue, FalseBlock);
+ EmitBlock(LHSTrue);
+
+ // Any temporaries created here are conditional.
+ eval.begin(*this);
+ EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock);
+ eval.end(*this);
+
+ return;
+ }
+
+ if (CondBOp->getOpcode() == BO_LOr) {
+ // If we have "0 || X", simplify the code. "1 || X" would have constant
+ // folded if the case was simple enough.
+ bool ConstantBool = false;
+ if (ConstantFoldsToSimpleInteger(CondBOp->getLHS(), ConstantBool) &&
+ !ConstantBool) {
+ // br(0 || X) -> br(X).
+ return EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock);
+ }
+
+ // If we have "X || 0", simplify the code to use an uncond branch.
+ // "X || 1" would have been constant folded to 1.
+ if (ConstantFoldsToSimpleInteger(CondBOp->getRHS(), ConstantBool) &&
+ !ConstantBool) {
+ // br(X || 0) -> br(X).
+ return EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, FalseBlock);
+ }
+
+ // Emit the LHS as a conditional. If the LHS conditional is true, we
+ // want to jump to the TrueBlock.
+ llvm::BasicBlock *LHSFalse = createBasicBlock("lor.lhs.false");
+
+ ConditionalEvaluation eval(*this);
+ EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, LHSFalse);
+ EmitBlock(LHSFalse);
+
+ // Any temporaries created here are conditional.
+ eval.begin(*this);
+ EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock);
+ eval.end(*this);
+
+ return;
+ }
+ }
+
+ if (const UnaryOperator *CondUOp = dyn_cast<UnaryOperator>(Cond)) {
+ // br(!x, t, f) -> br(x, f, t)
+ if (CondUOp->getOpcode() == UO_LNot)
+ return EmitBranchOnBoolExpr(CondUOp->getSubExpr(), FalseBlock, TrueBlock);
+ }
+
+ if (const ConditionalOperator *CondOp = dyn_cast<ConditionalOperator>(Cond)) {
+ // br(c ? x : y, t, f) -> br(c, br(x, t, f), br(y, t, f))
+ llvm::BasicBlock *LHSBlock = createBasicBlock("cond.true");
+ llvm::BasicBlock *RHSBlock = createBasicBlock("cond.false");
+
+ ConditionalEvaluation cond(*this);
+ EmitBranchOnBoolExpr(CondOp->getCond(), LHSBlock, RHSBlock);
+
+ cond.begin(*this);
+ EmitBlock(LHSBlock);
+ EmitBranchOnBoolExpr(CondOp->getLHS(), TrueBlock, FalseBlock);
+ cond.end(*this);
+
+ cond.begin(*this);
+ EmitBlock(RHSBlock);
+ EmitBranchOnBoolExpr(CondOp->getRHS(), TrueBlock, FalseBlock);
+ cond.end(*this);
+
+ return;
+ }
+
+ // Emit the code with the fully general case.
+ llvm::Value *CondV = EvaluateExprAsBool(Cond);
+ Builder.CreateCondBr(CondV, TrueBlock, FalseBlock);
+}
+
+/// ErrorUnsupported - Print out an error that codegen doesn't support the
+/// specified stmt yet.
+void CodeGenFunction::ErrorUnsupported(const Stmt *S, const char *Type,
+ bool OmitOnError) {
+ CGM.ErrorUnsupported(S, Type, OmitOnError);
+}
+
+/// emitNonZeroVLAInit - Emit the "zero" initialization of a
+/// variable-length array whose elements have a non-zero bit-pattern.
+///
+/// \param src - a char* pointing to the bit-pattern for a single
+/// base element of the array
+/// \param sizeInChars - the total size of the VLA, in chars
+/// \param align - the total alignment of the VLA
+static void emitNonZeroVLAInit(CodeGenFunction &CGF, QualType baseType,
+ llvm::Value *dest, llvm::Value *src,
+ llvm::Value *sizeInChars) {
+ std::pair<CharUnits,CharUnits> baseSizeAndAlign
+ = CGF.getContext().getTypeInfoInChars(baseType);
+
+ CGBuilderTy &Builder = CGF.Builder;
+
+ llvm::Value *baseSizeInChars
+ = llvm::ConstantInt::get(CGF.IntPtrTy, baseSizeAndAlign.first.getQuantity());
+
+ llvm::Type *i8p = Builder.getInt8PtrTy();
+
+ llvm::Value *begin = Builder.CreateBitCast(dest, i8p, "vla.begin");
+ llvm::Value *end = Builder.CreateInBoundsGEP(dest, sizeInChars, "vla.end");
+
+ llvm::BasicBlock *originBB = CGF.Builder.GetInsertBlock();
+ llvm::BasicBlock *loopBB = CGF.createBasicBlock("vla-init.loop");
+ llvm::BasicBlock *contBB = CGF.createBasicBlock("vla-init.cont");
+
+ // Make a loop over the VLA. C99 guarantees that the VLA element
+ // count must be nonzero.
+ CGF.EmitBlock(loopBB);
+
+ llvm::PHINode *cur = Builder.CreatePHI(i8p, 2, "vla.cur");
+ cur->addIncoming(begin, originBB);
+
+ // memcpy the individual element bit-pattern.
+ Builder.CreateMemCpy(cur, src, baseSizeInChars,
+ baseSizeAndAlign.second.getQuantity(),
+ /*volatile*/ false);
+
+ // Go to the next element.
+ llvm::Value *next = Builder.CreateConstInBoundsGEP1_32(cur, 1, "vla.next");
+
+ // Leave if that's the end of the VLA.
+ llvm::Value *done = Builder.CreateICmpEQ(next, end, "vla-init.isdone");
+ Builder.CreateCondBr(done, contBB, loopBB);
+ cur->addIncoming(next, loopBB);
+
+ CGF.EmitBlock(contBB);
+}
+
+void
+CodeGenFunction::EmitNullInitialization(llvm::Value *DestPtr, QualType Ty) {
+ // Ignore empty classes in C++.
+ if (getContext().getLangOpts().CPlusPlus) {
+ if (const RecordType *RT = Ty->getAs<RecordType>()) {
+ if (cast<CXXRecordDecl>(RT->getDecl())->isEmpty())
+ return;
+ }
+ }
+
+ // Cast the dest ptr to the appropriate i8 pointer type.
+ unsigned DestAS =
+ cast<llvm::PointerType>(DestPtr->getType())->getAddressSpace();
+ llvm::Type *BP = Builder.getInt8PtrTy(DestAS);
+ if (DestPtr->getType() != BP)
+ DestPtr = Builder.CreateBitCast(DestPtr, BP);
+
+ // Get size and alignment info for this aggregate.
+ std::pair<CharUnits, CharUnits> TypeInfo =
+ getContext().getTypeInfoInChars(Ty);
+ CharUnits Size = TypeInfo.first;
+ CharUnits Align = TypeInfo.second;
+
+ llvm::Value *SizeVal;
+ const VariableArrayType *vla;
+
+ // Don't bother emitting a zero-byte memset.
+ if (Size.isZero()) {
+ // But note that getTypeInfo returns 0 for a VLA.
+ if (const VariableArrayType *vlaType =
+ dyn_cast_or_null<VariableArrayType>(
+ getContext().getAsArrayType(Ty))) {
+ QualType eltType;
+ llvm::Value *numElts;
+ llvm::tie(numElts, eltType) = getVLASize(vlaType);
+
+ SizeVal = numElts;
+ CharUnits eltSize = getContext().getTypeSizeInChars(eltType);
+ if (!eltSize.isOne())
+ SizeVal = Builder.CreateNUWMul(SizeVal, CGM.getSize(eltSize));
+ vla = vlaType;
+ } else {
+ return;
+ }
+ } else {
+ SizeVal = CGM.getSize(Size);
+ vla = 0;
+ }
+
+ // If the type contains a pointer to data member we can't memset it to zero.
+ // Instead, create a null constant and copy it to the destination.
+ // TODO: there are other patterns besides zero that we can usefully memset,
+ // like -1, which happens to be the pattern used by member-pointers.
+ if (!CGM.getTypes().isZeroInitializable(Ty)) {
+ // For a VLA, emit a single element, then splat that over the VLA.
+ if (vla) Ty = getContext().getBaseElementType(vla);
+
+ llvm::Constant *NullConstant = CGM.EmitNullConstant(Ty);
+
+ llvm::GlobalVariable *NullVariable =
+ new llvm::GlobalVariable(CGM.getModule(), NullConstant->getType(),
+ /*isConstant=*/true,
+ llvm::GlobalVariable::PrivateLinkage,
+ NullConstant, Twine());
+ llvm::Value *SrcPtr =
+ Builder.CreateBitCast(NullVariable, Builder.getInt8PtrTy());
+
+ if (vla) return emitNonZeroVLAInit(*this, Ty, DestPtr, SrcPtr, SizeVal);
+
+ // Get and call the appropriate llvm.memcpy overload.
+ Builder.CreateMemCpy(DestPtr, SrcPtr, SizeVal, Align.getQuantity(), false);
+ return;
+ }
+
+ // Otherwise, just memset the whole thing to zero. This is legal
+ // because in LLVM, all default initializers (other than the ones we just
+ // handled above) are guaranteed to have a bit pattern of all zeros.
+ Builder.CreateMemSet(DestPtr, Builder.getInt8(0), SizeVal,
+ Align.getQuantity(), false);
+}
+
+llvm::BlockAddress *CodeGenFunction::GetAddrOfLabel(const LabelDecl *L) {
+ // Make sure that there is a block for the indirect goto.
+ if (IndirectBranch == 0)
+ GetIndirectGotoBlock();
+
+ llvm::BasicBlock *BB = getJumpDestForLabel(L).getBlock();
+
+ // Make sure the indirect branch includes all of the address-taken blocks.
+ IndirectBranch->addDestination(BB);
+ return llvm::BlockAddress::get(CurFn, BB);
+}
+
+llvm::BasicBlock *CodeGenFunction::GetIndirectGotoBlock() {
+ // If we already made the indirect branch for indirect goto, return its block.
+ if (IndirectBranch) return IndirectBranch->getParent();
+
+ CGBuilderTy TmpBuilder(createBasicBlock("indirectgoto"));
+
+ // Create the PHI node that indirect gotos will add entries to.
+ llvm::Value *DestVal = TmpBuilder.CreatePHI(Int8PtrTy, 0,
+ "indirect.goto.dest");
+
+ // Create the indirect branch instruction.
+ IndirectBranch = TmpBuilder.CreateIndirectBr(DestVal);
+ return IndirectBranch->getParent();
+}
+
+/// Computes the length of an array in elements, as well as the base
+/// element type and a properly-typed first element pointer.
+llvm::Value *CodeGenFunction::emitArrayLength(const ArrayType *origArrayType,
+ QualType &baseType,
+ llvm::Value *&addr) {
+ const ArrayType *arrayType = origArrayType;
+
+ // If it's a VLA, we have to load the stored size. Note that
+ // this is the size of the VLA in bytes, not its size in elements.
+ llvm::Value *numVLAElements = 0;
+ if (isa<VariableArrayType>(arrayType)) {
+ numVLAElements = getVLASize(cast<VariableArrayType>(arrayType)).first;
+
+ // Walk into all VLAs. This doesn't require changes to addr,
+ // which has type T* where T is the first non-VLA element type.
+ do {
+ QualType elementType = arrayType->getElementType();
+ arrayType = getContext().getAsArrayType(elementType);
+
+ // If we only have VLA components, 'addr' requires no adjustment.
+ if (!arrayType) {
+ baseType = elementType;
+ return numVLAElements;
+ }
+ } while (isa<VariableArrayType>(arrayType));
+
+ // We get out here only if we find a constant array type
+ // inside the VLA.
+ }
+
+ // We have some number of constant-length arrays, so addr should
+ // have LLVM type [M x [N x [...]]]*. Build a GEP that walks
+ // down to the first element of addr.
+ SmallVector<llvm::Value*, 8> gepIndices;
+
+ // GEP down to the array type.
+ llvm::ConstantInt *zero = Builder.getInt32(0);
+ gepIndices.push_back(zero);
+
+ // It's more efficient to calculate the count from the LLVM
+ // constant-length arrays than to re-evaluate the array bounds.
+ uint64_t countFromCLAs = 1;
+
+ llvm::ArrayType *llvmArrayType =
+ cast<llvm::ArrayType>(
+ cast<llvm::PointerType>(addr->getType())->getElementType());
+ while (true) {
+ assert(isa<ConstantArrayType>(arrayType));
+ assert(cast<ConstantArrayType>(arrayType)->getSize().getZExtValue()
+ == llvmArrayType->getNumElements());
+
+ gepIndices.push_back(zero);
+ countFromCLAs *= llvmArrayType->getNumElements();
+
+ llvmArrayType =
+ dyn_cast<llvm::ArrayType>(llvmArrayType->getElementType());
+ if (!llvmArrayType) break;
+
+ arrayType = getContext().getAsArrayType(arrayType->getElementType());
+ assert(arrayType && "LLVM and Clang types are out-of-synch");
+ }
+
+ baseType = arrayType->getElementType();
+
+ // Create the actual GEP.
+ addr = Builder.CreateInBoundsGEP(addr, gepIndices, "array.begin");
+
+ llvm::Value *numElements
+ = llvm::ConstantInt::get(SizeTy, countFromCLAs);
+
+ // If we had any VLA dimensions, factor them in.
+ if (numVLAElements)
+ numElements = Builder.CreateNUWMul(numVLAElements, numElements);
+
+ return numElements;
+}
+
+std::pair<llvm::Value*, QualType>
+CodeGenFunction::getVLASize(QualType type) {
+ const VariableArrayType *vla = getContext().getAsVariableArrayType(type);
+ assert(vla && "type was not a variable array type!");
+ return getVLASize(vla);
+}
+
+std::pair<llvm::Value*, QualType>
+CodeGenFunction::getVLASize(const VariableArrayType *type) {
+ // The number of elements so far; always size_t.
+ llvm::Value *numElements = 0;
+
+ QualType elementType;
+ do {
+ elementType = type->getElementType();
+ llvm::Value *vlaSize = VLASizeMap[type->getSizeExpr()];
+ assert(vlaSize && "no size for VLA!");
+ assert(vlaSize->getType() == SizeTy);
+
+ if (!numElements) {
+ numElements = vlaSize;
+ } else {
+ // It's undefined behavior if this wraps around, so mark it that way.
+ numElements = Builder.CreateNUWMul(numElements, vlaSize);
+ }
+ } while ((type = getContext().getAsVariableArrayType(elementType)));
+
+ return std::pair<llvm::Value*,QualType>(numElements, elementType);
+}
+
+void CodeGenFunction::EmitVariablyModifiedType(QualType type) {
+ assert(type->isVariablyModifiedType() &&
+ "Must pass variably modified type to EmitVLASizes!");
+
+ EnsureInsertPoint();
+
+ // We're going to walk down into the type and look for VLA
+ // expressions.
+ do {
+ assert(type->isVariablyModifiedType());
+
+ const Type *ty = type.getTypePtr();
+ switch (ty->getTypeClass()) {
+
+#define TYPE(Class, Base)
+#define ABSTRACT_TYPE(Class, Base)
+#define NON_CANONICAL_TYPE(Class, Base)
+#define DEPENDENT_TYPE(Class, Base) case Type::Class:
+#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base)
+#include "clang/AST/TypeNodes.def"
+ llvm_unreachable("unexpected dependent type!");
+
+ // These types are never variably-modified.
+ case Type::Builtin:
+ case Type::Complex:
+ case Type::Vector:
+ case Type::ExtVector:
+ case Type::Record:
+ case Type::Enum:
+ case Type::Elaborated:
+ case Type::TemplateSpecialization:
+ case Type::ObjCObject:
+ case Type::ObjCInterface:
+ case Type::ObjCObjectPointer:
+ llvm_unreachable("type class is never variably-modified!");
+
+ case Type::Pointer:
+ type = cast<PointerType>(ty)->getPointeeType();
+ break;
+
+ case Type::BlockPointer:
+ type = cast<BlockPointerType>(ty)->getPointeeType();
+ break;
+
+ case Type::LValueReference:
+ case Type::RValueReference:
+ type = cast<ReferenceType>(ty)->getPointeeType();
+ break;
+
+ case Type::MemberPointer:
+ type = cast<MemberPointerType>(ty)->getPointeeType();
+ break;
+
+ case Type::ConstantArray:
+ case Type::IncompleteArray:
+ // Losing element qualification here is fine.
+ type = cast<ArrayType>(ty)->getElementType();
+ break;
+
+ case Type::VariableArray: {
+ // Losing element qualification here is fine.
+ const VariableArrayType *vat = cast<VariableArrayType>(ty);
+
+ // Unknown size indication requires no size computation.
+ // Otherwise, evaluate and record it.
+ if (const Expr *size = vat->getSizeExpr()) {
+ // It's possible that we might have emitted this already,
+ // e.g. with a typedef and a pointer to it.
+ llvm::Value *&entry = VLASizeMap[size];
+ if (!entry) {
+ // Always zexting here would be wrong if it weren't
+ // undefined behavior to have a negative bound.
+ entry = Builder.CreateIntCast(EmitScalarExpr(size), SizeTy,
+ /*signed*/ false);
+ }
+ }
+ type = vat->getElementType();
+ break;
+ }
+
+ case Type::FunctionProto:
+ case Type::FunctionNoProto:
+ type = cast<FunctionType>(ty)->getResultType();
+ break;
+
+ case Type::Paren:
+ case Type::TypeOf:
+ case Type::UnaryTransform:
+ case Type::Attributed:
+ case Type::SubstTemplateTypeParm:
+ // Keep walking after single level desugaring.
+ type = type.getSingleStepDesugaredType(getContext());
+ break;
+
+ case Type::Typedef:
+ case Type::Decltype:
+ case Type::Auto:
+ // Stop walking: nothing to do.
+ return;
+
+ case Type::TypeOfExpr:
+ // Stop walking: emit typeof expression.
+ EmitIgnoredExpr(cast<TypeOfExprType>(ty)->getUnderlyingExpr());
+ return;
+
+ case Type::Atomic:
+ type = cast<AtomicType>(ty)->getValueType();
+ break;
+ }
+ } while (type->isVariablyModifiedType());
+}
+
+llvm::Value* CodeGenFunction::EmitVAListRef(const Expr* E) {
+ if (getContext().getBuiltinVaListType()->isArrayType())
+ return EmitScalarExpr(E);
+ return EmitLValue(E).getAddress();
+}
+
+void CodeGenFunction::EmitDeclRefExprDbgValue(const DeclRefExpr *E,
+ llvm::Constant *Init) {
+ assert (Init && "Invalid DeclRefExpr initializer!");
+ if (CGDebugInfo *Dbg = getDebugInfo())
+ Dbg->EmitGlobalVariable(E->getDecl(), Init);
+}
+
+CodeGenFunction::PeepholeProtection
+CodeGenFunction::protectFromPeepholes(RValue rvalue) {
+ // At the moment, the only aggressive peephole we do in IR gen
+ // is trunc(zext) folding, but if we add more, we can easily
+ // extend this protection.
+
+ if (!rvalue.isScalar()) return PeepholeProtection();
+ llvm::Value *value = rvalue.getScalarVal();
+ if (!isa<llvm::ZExtInst>(value)) return PeepholeProtection();
+
+ // Just make an extra bitcast.
+ assert(HaveInsertPoint());
+ llvm::Instruction *inst = new llvm::BitCastInst(value, value->getType(), "",
+ Builder.GetInsertBlock());
+
+ PeepholeProtection protection;
+ protection.Inst = inst;
+ return protection;
+}
+
+void CodeGenFunction::unprotectFromPeepholes(PeepholeProtection protection) {
+ if (!protection.Inst) return;
+
+ // In theory, we could try to duplicate the peepholes now, but whatever.
+ protection.Inst->eraseFromParent();
+}
+
+llvm::Value *CodeGenFunction::EmitAnnotationCall(llvm::Value *AnnotationFn,
+ llvm::Value *AnnotatedVal,
+ llvm::StringRef AnnotationStr,
+ SourceLocation Location) {
+ llvm::Value *Args[4] = {
+ AnnotatedVal,
+ Builder.CreateBitCast(CGM.EmitAnnotationString(AnnotationStr), Int8PtrTy),
+ Builder.CreateBitCast(CGM.EmitAnnotationUnit(Location), Int8PtrTy),
+ CGM.EmitAnnotationLineNo(Location)
+ };
+ return Builder.CreateCall(AnnotationFn, Args);
+}
+
+void CodeGenFunction::EmitVarAnnotations(const VarDecl *D, llvm::Value *V) {
+ assert(D->hasAttr<AnnotateAttr>() && "no annotate attribute");
+ // FIXME We create a new bitcast for every annotation because that's what
+ // llvm-gcc was doing.
+ for (specific_attr_iterator<AnnotateAttr>
+ ai = D->specific_attr_begin<AnnotateAttr>(),
+ ae = D->specific_attr_end<AnnotateAttr>(); ai != ae; ++ai)
+ EmitAnnotationCall(CGM.getIntrinsic(llvm::Intrinsic::var_annotation),
+ Builder.CreateBitCast(V, CGM.Int8PtrTy, V->getName()),
+ (*ai)->getAnnotation(), D->getLocation());
+}
+
+llvm::Value *CodeGenFunction::EmitFieldAnnotations(const FieldDecl *D,
+ llvm::Value *V) {
+ assert(D->hasAttr<AnnotateAttr>() && "no annotate attribute");
+ llvm::Type *VTy = V->getType();
+ llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::ptr_annotation,
+ CGM.Int8PtrTy);
+
+ for (specific_attr_iterator<AnnotateAttr>
+ ai = D->specific_attr_begin<AnnotateAttr>(),
+ ae = D->specific_attr_end<AnnotateAttr>(); ai != ae; ++ai) {
+ // FIXME Always emit the cast inst so we can differentiate between
+ // annotation on the first field of a struct and annotation on the struct
+ // itself.
+ if (VTy != CGM.Int8PtrTy)
+ V = Builder.Insert(new llvm::BitCastInst(V, CGM.Int8PtrTy));
+ V = EmitAnnotationCall(F, V, (*ai)->getAnnotation(), D->getLocation());
+ V = Builder.CreateBitCast(V, VTy);
+ }
+
+ return V;
+}
diff --git a/clang/lib/CodeGen/CodeGenFunction.h b/clang/lib/CodeGen/CodeGenFunction.h
new file mode 100644
index 0000000..83f1e2d
--- /dev/null
+++ b/clang/lib/CodeGen/CodeGenFunction.h
@@ -0,0 +1,2702 @@
+//===-- CodeGenFunction.h - Per-Function state for LLVM CodeGen -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This is the internal per-function state used for llvm translation.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_CODEGEN_CODEGENFUNCTION_H
+#define CLANG_CODEGEN_CODEGENFUNCTION_H
+
+#include "clang/AST/Type.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/ExprObjC.h"
+#include "clang/AST/CharUnits.h"
+#include "clang/Frontend/CodeGenOptions.h"
+#include "clang/Basic/ABI.h"
+#include "clang/Basic/TargetInfo.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/ValueHandle.h"
+#include "llvm/Support/Debug.h"
+#include "CodeGenModule.h"
+#include "CGBuilder.h"
+#include "CGDebugInfo.h"
+#include "CGValue.h"
+
+namespace llvm {
+ class BasicBlock;
+ class LLVMContext;
+ class MDNode;
+ class Module;
+ class SwitchInst;
+ class Twine;
+ class Value;
+ class CallSite;
+}
+
+namespace clang {
+ class ASTContext;
+ class BlockDecl;
+ class CXXDestructorDecl;
+ class CXXForRangeStmt;
+ class CXXTryStmt;
+ class Decl;
+ class LabelDecl;
+ class EnumConstantDecl;
+ class FunctionDecl;
+ class FunctionProtoType;
+ class LabelStmt;
+ class ObjCContainerDecl;
+ class ObjCInterfaceDecl;
+ class ObjCIvarDecl;
+ class ObjCMethodDecl;
+ class ObjCImplementationDecl;
+ class ObjCPropertyImplDecl;
+ class TargetInfo;
+ class TargetCodeGenInfo;
+ class VarDecl;
+ class ObjCForCollectionStmt;
+ class ObjCAtTryStmt;
+ class ObjCAtThrowStmt;
+ class ObjCAtSynchronizedStmt;
+ class ObjCAutoreleasePoolStmt;
+
+namespace CodeGen {
+ class CodeGenTypes;
+ class CGFunctionInfo;
+ class CGRecordLayout;
+ class CGBlockInfo;
+ class CGCXXABI;
+ class BlockFlags;
+ class BlockFieldFlags;
+
+/// A branch fixup. These are required when emitting a goto to a
+/// label which hasn't been emitted yet. The goto is optimistically
+/// emitted as a branch to the basic block for the label, and (if it
+/// occurs in a scope with non-trivial cleanups) a fixup is added to
+/// the innermost cleanup. When a (normal) cleanup is popped, any
+/// unresolved fixups in that scope are threaded through the cleanup.
+struct BranchFixup {
+ /// The block containing the terminator which needs to be modified
+ /// into a switch if this fixup is resolved into the current scope.
+ /// If null, LatestBranch points directly to the destination.
+ llvm::BasicBlock *OptimisticBranchBlock;
+
+ /// The ultimate destination of the branch.
+ ///
+ /// This can be set to null to indicate that this fixup was
+ /// successfully resolved.
+ llvm::BasicBlock *Destination;
+
+ /// The destination index value.
+ unsigned DestinationIndex;
+
+ /// The initial branch of the fixup.
+ llvm::BranchInst *InitialBranch;
+};
+
+template <class T> struct InvariantValue {
+ typedef T type;
+ typedef T saved_type;
+ static bool needsSaving(type value) { return false; }
+ static saved_type save(CodeGenFunction &CGF, type value) { return value; }
+ static type restore(CodeGenFunction &CGF, saved_type value) { return value; }
+};
+
+/// A metaprogramming class for ensuring that a value will dominate an
+/// arbitrary position in a function.
+template <class T> struct DominatingValue : InvariantValue<T> {};
+
+template <class T, bool mightBeInstruction =
+ llvm::is_base_of<llvm::Value, T>::value &&
+ !llvm::is_base_of<llvm::Constant, T>::value &&
+ !llvm::is_base_of<llvm::BasicBlock, T>::value>
+struct DominatingPointer;
+template <class T> struct DominatingPointer<T,false> : InvariantValue<T*> {};
+// template <class T> struct DominatingPointer<T,true> at end of file
+
+template <class T> struct DominatingValue<T*> : DominatingPointer<T> {};
+
+enum CleanupKind {
+ EHCleanup = 0x1,
+ NormalCleanup = 0x2,
+ NormalAndEHCleanup = EHCleanup | NormalCleanup,
+
+ InactiveCleanup = 0x4,
+ InactiveEHCleanup = EHCleanup | InactiveCleanup,
+ InactiveNormalCleanup = NormalCleanup | InactiveCleanup,
+ InactiveNormalAndEHCleanup = NormalAndEHCleanup | InactiveCleanup
+};
+
+/// A stack of scopes which respond to exceptions, including cleanups
+/// and catch blocks.
+class EHScopeStack {
+public:
+ /// A saved depth on the scope stack. This is necessary because
+ /// pushing scopes onto the stack invalidates iterators.
+ class stable_iterator {
+ friend class EHScopeStack;
+
+ /// Offset from StartOfData to EndOfBuffer.
+ ptrdiff_t Size;
+
+ stable_iterator(ptrdiff_t Size) : Size(Size) {}
+
+ public:
+ static stable_iterator invalid() { return stable_iterator(-1); }
+ stable_iterator() : Size(-1) {}
+
+ bool isValid() const { return Size >= 0; }
+
+ /// Returns true if this scope encloses I.
+ /// Returns false if I is invalid.
+ /// This scope must be valid.
+ bool encloses(stable_iterator I) const { return Size <= I.Size; }
+
+ /// Returns true if this scope strictly encloses I: that is,
+ /// if it encloses I and is not I.
+ /// Returns false is I is invalid.
+ /// This scope must be valid.
+ bool strictlyEncloses(stable_iterator I) const { return Size < I.Size; }
+
+ friend bool operator==(stable_iterator A, stable_iterator B) {
+ return A.Size == B.Size;
+ }
+ friend bool operator!=(stable_iterator A, stable_iterator B) {
+ return A.Size != B.Size;
+ }
+ };
+
+ /// Information for lazily generating a cleanup. Subclasses must be
+ /// POD-like: cleanups will not be destructed, and they will be
+ /// allocated on the cleanup stack and freely copied and moved
+ /// around.
+ ///
+ /// Cleanup implementations should generally be declared in an
+ /// anonymous namespace.
+ class Cleanup {
+ // Anchor the construction vtable.
+ virtual void anchor();
+ public:
+ /// Generation flags.
+ class Flags {
+ enum {
+ F_IsForEH = 0x1,
+ F_IsNormalCleanupKind = 0x2,
+ F_IsEHCleanupKind = 0x4
+ };
+ unsigned flags;
+
+ public:
+ Flags() : flags(0) {}
+
+ /// isForEH - true if the current emission is for an EH cleanup.
+ bool isForEHCleanup() const { return flags & F_IsForEH; }
+ bool isForNormalCleanup() const { return !isForEHCleanup(); }
+ void setIsForEHCleanup() { flags |= F_IsForEH; }
+
+ bool isNormalCleanupKind() const { return flags & F_IsNormalCleanupKind; }
+ void setIsNormalCleanupKind() { flags |= F_IsNormalCleanupKind; }
+
+ /// isEHCleanupKind - true if the cleanup was pushed as an EH
+ /// cleanup.
+ bool isEHCleanupKind() const { return flags & F_IsEHCleanupKind; }
+ void setIsEHCleanupKind() { flags |= F_IsEHCleanupKind; }
+ };
+
+ // Provide a virtual destructor to suppress a very common warning
+ // that unfortunately cannot be suppressed without this. Cleanups
+ // should not rely on this destructor ever being called.
+ virtual ~Cleanup() {}
+
+ /// Emit the cleanup. For normal cleanups, this is run in the
+ /// same EH context as when the cleanup was pushed, i.e. the
+ /// immediately-enclosing context of the cleanup scope. For
+ /// EH cleanups, this is run in a terminate context.
+ ///
+ // \param IsForEHCleanup true if this is for an EH cleanup, false
+ /// if for a normal cleanup.
+ virtual void Emit(CodeGenFunction &CGF, Flags flags) = 0;
+ };
+
+ /// ConditionalCleanupN stores the saved form of its N parameters,
+ /// then restores them and performs the cleanup.
+ template <class T, class A0>
+ class ConditionalCleanup1 : public Cleanup {
+ typedef typename DominatingValue<A0>::saved_type A0_saved;
+ A0_saved a0_saved;
+
+ void Emit(CodeGenFunction &CGF, Flags flags) {
+ A0 a0 = DominatingValue<A0>::restore(CGF, a0_saved);
+ T(a0).Emit(CGF, flags);
+ }
+
+ public:
+ ConditionalCleanup1(A0_saved a0)
+ : a0_saved(a0) {}
+ };
+
+ template <class T, class A0, class A1>
+ class ConditionalCleanup2 : public Cleanup {
+ typedef typename DominatingValue<A0>::saved_type A0_saved;
+ typedef typename DominatingValue<A1>::saved_type A1_saved;
+ A0_saved a0_saved;
+ A1_saved a1_saved;
+
+ void Emit(CodeGenFunction &CGF, Flags flags) {
+ A0 a0 = DominatingValue<A0>::restore(CGF, a0_saved);
+ A1 a1 = DominatingValue<A1>::restore(CGF, a1_saved);
+ T(a0, a1).Emit(CGF, flags);
+ }
+
+ public:
+ ConditionalCleanup2(A0_saved a0, A1_saved a1)
+ : a0_saved(a0), a1_saved(a1) {}
+ };
+
+ template <class T, class A0, class A1, class A2>
+ class ConditionalCleanup3 : public Cleanup {
+ typedef typename DominatingValue<A0>::saved_type A0_saved;
+ typedef typename DominatingValue<A1>::saved_type A1_saved;
+ typedef typename DominatingValue<A2>::saved_type A2_saved;
+ A0_saved a0_saved;
+ A1_saved a1_saved;
+ A2_saved a2_saved;
+
+ void Emit(CodeGenFunction &CGF, Flags flags) {
+ A0 a0 = DominatingValue<A0>::restore(CGF, a0_saved);
+ A1 a1 = DominatingValue<A1>::restore(CGF, a1_saved);
+ A2 a2 = DominatingValue<A2>::restore(CGF, a2_saved);
+ T(a0, a1, a2).Emit(CGF, flags);
+ }
+
+ public:
+ ConditionalCleanup3(A0_saved a0, A1_saved a1, A2_saved a2)
+ : a0_saved(a0), a1_saved(a1), a2_saved(a2) {}
+ };
+
+ template <class T, class A0, class A1, class A2, class A3>
+ class ConditionalCleanup4 : public Cleanup {
+ typedef typename DominatingValue<A0>::saved_type A0_saved;
+ typedef typename DominatingValue<A1>::saved_type A1_saved;
+ typedef typename DominatingValue<A2>::saved_type A2_saved;
+ typedef typename DominatingValue<A3>::saved_type A3_saved;
+ A0_saved a0_saved;
+ A1_saved a1_saved;
+ A2_saved a2_saved;
+ A3_saved a3_saved;
+
+ void Emit(CodeGenFunction &CGF, Flags flags) {
+ A0 a0 = DominatingValue<A0>::restore(CGF, a0_saved);
+ A1 a1 = DominatingValue<A1>::restore(CGF, a1_saved);
+ A2 a2 = DominatingValue<A2>::restore(CGF, a2_saved);
+ A3 a3 = DominatingValue<A3>::restore(CGF, a3_saved);
+ T(a0, a1, a2, a3).Emit(CGF, flags);
+ }
+
+ public:
+ ConditionalCleanup4(A0_saved a0, A1_saved a1, A2_saved a2, A3_saved a3)
+ : a0_saved(a0), a1_saved(a1), a2_saved(a2), a3_saved(a3) {}
+ };
+
+private:
+ // The implementation for this class is in CGException.h and
+ // CGException.cpp; the definition is here because it's used as a
+ // member of CodeGenFunction.
+
+ /// The start of the scope-stack buffer, i.e. the allocated pointer
+ /// for the buffer. All of these pointers are either simultaneously
+ /// null or simultaneously valid.
+ char *StartOfBuffer;
+
+ /// The end of the buffer.
+ char *EndOfBuffer;
+
+ /// The first valid entry in the buffer.
+ char *StartOfData;
+
+ /// The innermost normal cleanup on the stack.
+ stable_iterator InnermostNormalCleanup;
+
+ /// The innermost EH scope on the stack.
+ stable_iterator InnermostEHScope;
+
+ /// The current set of branch fixups. A branch fixup is a jump to
+ /// an as-yet unemitted label, i.e. a label for which we don't yet
+ /// know the EH stack depth. Whenever we pop a cleanup, we have
+ /// to thread all the current branch fixups through it.
+ ///
+ /// Fixups are recorded as the Use of the respective branch or
+ /// switch statement. The use points to the final destination.
+ /// When popping out of a cleanup, these uses are threaded through
+ /// the cleanup and adjusted to point to the new cleanup.
+ ///
+ /// Note that branches are allowed to jump into protected scopes
+ /// in certain situations; e.g. the following code is legal:
+ /// struct A { ~A(); }; // trivial ctor, non-trivial dtor
+ /// goto foo;
+ /// A a;
+ /// foo:
+ /// bar();
+ SmallVector<BranchFixup, 8> BranchFixups;
+
+ char *allocate(size_t Size);
+
+ void *pushCleanup(CleanupKind K, size_t DataSize);
+
+public:
+ EHScopeStack() : StartOfBuffer(0), EndOfBuffer(0), StartOfData(0),
+ InnermostNormalCleanup(stable_end()),
+ InnermostEHScope(stable_end()) {}
+ ~EHScopeStack() { delete[] StartOfBuffer; }
+
+ // Variadic templates would make this not terrible.
+
+ /// Push a lazily-created cleanup on the stack.
+ template <class T>
+ void pushCleanup(CleanupKind Kind) {
+ void *Buffer = pushCleanup(Kind, sizeof(T));
+ Cleanup *Obj = new(Buffer) T();
+ (void) Obj;
+ }
+
+ /// Push a lazily-created cleanup on the stack.
+ template <class T, class A0>
+ void pushCleanup(CleanupKind Kind, A0 a0) {
+ void *Buffer = pushCleanup(Kind, sizeof(T));
+ Cleanup *Obj = new(Buffer) T(a0);
+ (void) Obj;
+ }
+
+ /// Push a lazily-created cleanup on the stack.
+ template <class T, class A0, class A1>
+ void pushCleanup(CleanupKind Kind, A0 a0, A1 a1) {
+ void *Buffer = pushCleanup(Kind, sizeof(T));
+ Cleanup *Obj = new(Buffer) T(a0, a1);
+ (void) Obj;
+ }
+
+ /// Push a lazily-created cleanup on the stack.
+ template <class T, class A0, class A1, class A2>
+ void pushCleanup(CleanupKind Kind, A0 a0, A1 a1, A2 a2) {
+ void *Buffer = pushCleanup(Kind, sizeof(T));
+ Cleanup *Obj = new(Buffer) T(a0, a1, a2);
+ (void) Obj;
+ }
+
+ /// Push a lazily-created cleanup on the stack.
+ template <class T, class A0, class A1, class A2, class A3>
+ void pushCleanup(CleanupKind Kind, A0 a0, A1 a1, A2 a2, A3 a3) {
+ void *Buffer = pushCleanup(Kind, sizeof(T));
+ Cleanup *Obj = new(Buffer) T(a0, a1, a2, a3);
+ (void) Obj;
+ }
+
+ /// Push a lazily-created cleanup on the stack.
+ template <class T, class A0, class A1, class A2, class A3, class A4>
+ void pushCleanup(CleanupKind Kind, A0 a0, A1 a1, A2 a2, A3 a3, A4 a4) {
+ void *Buffer = pushCleanup(Kind, sizeof(T));
+ Cleanup *Obj = new(Buffer) T(a0, a1, a2, a3, a4);
+ (void) Obj;
+ }
+
+ // Feel free to add more variants of the following:
+
+ /// Push a cleanup with non-constant storage requirements on the
+ /// stack. The cleanup type must provide an additional static method:
+ /// static size_t getExtraSize(size_t);
+ /// The argument to this method will be the value N, which will also
+ /// be passed as the first argument to the constructor.
+ ///
+ /// The data stored in the extra storage must obey the same
+ /// restrictions as normal cleanup member data.
+ ///
+ /// The pointer returned from this method is valid until the cleanup
+ /// stack is modified.
+ template <class T, class A0, class A1, class A2>
+ T *pushCleanupWithExtra(CleanupKind Kind, size_t N, A0 a0, A1 a1, A2 a2) {
+ void *Buffer = pushCleanup(Kind, sizeof(T) + T::getExtraSize(N));
+ return new (Buffer) T(N, a0, a1, a2);
+ }
+
+ /// Pops a cleanup scope off the stack. This is private to CGCleanup.cpp.
+ void popCleanup();
+
+ /// Push a set of catch handlers on the stack. The catch is
+ /// uninitialized and will need to have the given number of handlers
+ /// set on it.
+ class EHCatchScope *pushCatch(unsigned NumHandlers);
+
+ /// Pops a catch scope off the stack. This is private to CGException.cpp.
+ void popCatch();
+
+ /// Push an exceptions filter on the stack.
+ class EHFilterScope *pushFilter(unsigned NumFilters);
+
+ /// Pops an exceptions filter off the stack.
+ void popFilter();
+
+ /// Push a terminate handler on the stack.
+ void pushTerminate();
+
+ /// Pops a terminate handler off the stack.
+ void popTerminate();
+
+ /// Determines whether the exception-scopes stack is empty.
+ bool empty() const { return StartOfData == EndOfBuffer; }
+
+ bool requiresLandingPad() const {
+ return InnermostEHScope != stable_end();
+ }
+
+ /// Determines whether there are any normal cleanups on the stack.
+ bool hasNormalCleanups() const {
+ return InnermostNormalCleanup != stable_end();
+ }
+
+ /// Returns the innermost normal cleanup on the stack, or
+ /// stable_end() if there are no normal cleanups.
+ stable_iterator getInnermostNormalCleanup() const {
+ return InnermostNormalCleanup;
+ }
+ stable_iterator getInnermostActiveNormalCleanup() const;
+
+ stable_iterator getInnermostEHScope() const {
+ return InnermostEHScope;
+ }
+
+ stable_iterator getInnermostActiveEHScope() const;
+
+ /// An unstable reference to a scope-stack depth. Invalidated by
+ /// pushes but not pops.
+ class iterator;
+
+ /// Returns an iterator pointing to the innermost EH scope.
+ iterator begin() const;
+
+ /// Returns an iterator pointing to the outermost EH scope.
+ iterator end() const;
+
+ /// Create a stable reference to the top of the EH stack. The
+ /// returned reference is valid until that scope is popped off the
+ /// stack.
+ stable_iterator stable_begin() const {
+ return stable_iterator(EndOfBuffer - StartOfData);
+ }
+
+ /// Create a stable reference to the bottom of the EH stack.
+ static stable_iterator stable_end() {
+ return stable_iterator(0);
+ }
+
+ /// Translates an iterator into a stable_iterator.
+ stable_iterator stabilize(iterator it) const;
+
+ /// Turn a stable reference to a scope depth into a unstable pointer
+ /// to the EH stack.
+ iterator find(stable_iterator save) const;
+
+ /// Removes the cleanup pointed to by the given stable_iterator.
+ void removeCleanup(stable_iterator save);
+
+ /// Add a branch fixup to the current cleanup scope.
+ BranchFixup &addBranchFixup() {
+ assert(hasNormalCleanups() && "adding fixup in scope without cleanups");
+ BranchFixups.push_back(BranchFixup());
+ return BranchFixups.back();
+ }
+
+ unsigned getNumBranchFixups() const { return BranchFixups.size(); }
+ BranchFixup &getBranchFixup(unsigned I) {
+ assert(I < getNumBranchFixups());
+ return BranchFixups[I];
+ }
+
+ /// Pops lazily-removed fixups from the end of the list. This
+ /// should only be called by procedures which have just popped a
+ /// cleanup or resolved one or more fixups.
+ void popNullFixups();
+
+ /// Clears the branch-fixups list. This should only be called by
+ /// ResolveAllBranchFixups.
+ void clearFixups() { BranchFixups.clear(); }
+};
+
+/// CodeGenFunction - This class organizes the per-function state that is used
+/// while generating LLVM code.
+class CodeGenFunction : public CodeGenTypeCache {
+ CodeGenFunction(const CodeGenFunction&); // DO NOT IMPLEMENT
+ void operator=(const CodeGenFunction&); // DO NOT IMPLEMENT
+
+ friend class CGCXXABI;
+public:
+ /// A jump destination is an abstract label, branching to which may
+ /// require a jump out through normal cleanups.
+ struct JumpDest {
+ JumpDest() : Block(0), ScopeDepth(), Index(0) {}
+ JumpDest(llvm::BasicBlock *Block,
+ EHScopeStack::stable_iterator Depth,
+ unsigned Index)
+ : Block(Block), ScopeDepth(Depth), Index(Index) {}
+
+ bool isValid() const { return Block != 0; }
+ llvm::BasicBlock *getBlock() const { return Block; }
+ EHScopeStack::stable_iterator getScopeDepth() const { return ScopeDepth; }
+ unsigned getDestIndex() const { return Index; }
+
+ private:
+ llvm::BasicBlock *Block;
+ EHScopeStack::stable_iterator ScopeDepth;
+ unsigned Index;
+ };
+
+ CodeGenModule &CGM; // Per-module state.
+ const TargetInfo &Target;
+
+ typedef std::pair<llvm::Value *, llvm::Value *> ComplexPairTy;
+ CGBuilderTy Builder;
+
+ /// CurFuncDecl - Holds the Decl for the current function or ObjC method.
+ /// This excludes BlockDecls.
+ const Decl *CurFuncDecl;
+ /// CurCodeDecl - This is the inner-most code context, which includes blocks.
+ const Decl *CurCodeDecl;
+ const CGFunctionInfo *CurFnInfo;
+ QualType FnRetTy;
+ llvm::Function *CurFn;
+
+ /// CurGD - The GlobalDecl for the current function being compiled.
+ GlobalDecl CurGD;
+
+ /// PrologueCleanupDepth - The cleanup depth enclosing all the
+ /// cleanups associated with the parameters.
+ EHScopeStack::stable_iterator PrologueCleanupDepth;
+
+ /// ReturnBlock - Unified return block.
+ JumpDest ReturnBlock;
+
+ /// ReturnValue - The temporary alloca to hold the return value. This is null
+ /// iff the function has no return value.
+ llvm::Value *ReturnValue;
+
+ /// AllocaInsertPoint - This is an instruction in the entry block before which
+ /// we prefer to insert allocas.
+ llvm::AssertingVH<llvm::Instruction> AllocaInsertPt;
+
+ bool CatchUndefined;
+
+ /// In ARC, whether we should autorelease the return value.
+ bool AutoreleaseResult;
+
+ const CodeGen::CGBlockInfo *BlockInfo;
+ llvm::Value *BlockPointer;
+
+ llvm::DenseMap<const VarDecl *, FieldDecl *> LambdaCaptureFields;
+ FieldDecl *LambdaThisCaptureField;
+
+ /// \brief A mapping from NRVO variables to the flags used to indicate
+ /// when the NRVO has been applied to this variable.
+ llvm::DenseMap<const VarDecl *, llvm::Value *> NRVOFlags;
+
+ EHScopeStack EHStack;
+
+ /// i32s containing the indexes of the cleanup destinations.
+ llvm::AllocaInst *NormalCleanupDest;
+
+ unsigned NextCleanupDestIndex;
+
+ /// FirstBlockInfo - The head of a singly-linked-list of block layouts.
+ CGBlockInfo *FirstBlockInfo;
+
+ /// EHResumeBlock - Unified block containing a call to llvm.eh.resume.
+ llvm::BasicBlock *EHResumeBlock;
+
+ /// The exception slot. All landing pads write the current exception pointer
+ /// into this alloca.
+ llvm::Value *ExceptionSlot;
+
+ /// The selector slot. Under the MandatoryCleanup model, all landing pads
+ /// write the current selector value into this alloca.
+ llvm::AllocaInst *EHSelectorSlot;
+
+ /// Emits a landing pad for the current EH stack.
+ llvm::BasicBlock *EmitLandingPad();
+
+ llvm::BasicBlock *getInvokeDestImpl();
+
+ template <class T>
+ typename DominatingValue<T>::saved_type saveValueInCond(T value) {
+ return DominatingValue<T>::save(*this, value);
+ }
+
+public:
+ /// ObjCEHValueStack - Stack of Objective-C exception values, used for
+ /// rethrows.
+ SmallVector<llvm::Value*, 8> ObjCEHValueStack;
+
+ /// A class controlling the emission of a finally block.
+ class FinallyInfo {
+ /// Where the catchall's edge through the cleanup should go.
+ JumpDest RethrowDest;
+
+ /// A function to call to enter the catch.
+ llvm::Constant *BeginCatchFn;
+
+ /// An i1 variable indicating whether or not the @finally is
+ /// running for an exception.
+ llvm::AllocaInst *ForEHVar;
+
+ /// An i8* variable into which the exception pointer to rethrow
+ /// has been saved.
+ llvm::AllocaInst *SavedExnVar;
+
+ public:
+ void enter(CodeGenFunction &CGF, const Stmt *Finally,
+ llvm::Constant *beginCatchFn, llvm::Constant *endCatchFn,
+ llvm::Constant *rethrowFn);
+ void exit(CodeGenFunction &CGF);
+ };
+
+ /// pushFullExprCleanup - Push a cleanup to be run at the end of the
+ /// current full-expression. Safe against the possibility that
+ /// we're currently inside a conditionally-evaluated expression.
+ template <class T, class A0>
+ void pushFullExprCleanup(CleanupKind kind, A0 a0) {
+ // If we're not in a conditional branch, or if none of the
+ // arguments requires saving, then use the unconditional cleanup.
+ if (!isInConditionalBranch())
+ return EHStack.pushCleanup<T>(kind, a0);
+
+ typename DominatingValue<A0>::saved_type a0_saved = saveValueInCond(a0);
+
+ typedef EHScopeStack::ConditionalCleanup1<T, A0> CleanupType;
+ EHStack.pushCleanup<CleanupType>(kind, a0_saved);
+ initFullExprCleanup();
+ }
+
+ /// pushFullExprCleanup - Push a cleanup to be run at the end of the
+ /// current full-expression. Safe against the possibility that
+ /// we're currently inside a conditionally-evaluated expression.
+ template <class T, class A0, class A1>
+ void pushFullExprCleanup(CleanupKind kind, A0 a0, A1 a1) {
+ // If we're not in a conditional branch, or if none of the
+ // arguments requires saving, then use the unconditional cleanup.
+ if (!isInConditionalBranch())
+ return EHStack.pushCleanup<T>(kind, a0, a1);
+
+ typename DominatingValue<A0>::saved_type a0_saved = saveValueInCond(a0);
+ typename DominatingValue<A1>::saved_type a1_saved = saveValueInCond(a1);
+
+ typedef EHScopeStack::ConditionalCleanup2<T, A0, A1> CleanupType;
+ EHStack.pushCleanup<CleanupType>(kind, a0_saved, a1_saved);
+ initFullExprCleanup();
+ }
+
+ /// pushFullExprCleanup - Push a cleanup to be run at the end of the
+ /// current full-expression. Safe against the possibility that
+ /// we're currently inside a conditionally-evaluated expression.
+ template <class T, class A0, class A1, class A2>
+ void pushFullExprCleanup(CleanupKind kind, A0 a0, A1 a1, A2 a2) {
+ // If we're not in a conditional branch, or if none of the
+ // arguments requires saving, then use the unconditional cleanup.
+ if (!isInConditionalBranch()) {
+ return EHStack.pushCleanup<T>(kind, a0, a1, a2);
+ }
+
+ typename DominatingValue<A0>::saved_type a0_saved = saveValueInCond(a0);
+ typename DominatingValue<A1>::saved_type a1_saved = saveValueInCond(a1);
+ typename DominatingValue<A2>::saved_type a2_saved = saveValueInCond(a2);
+
+ typedef EHScopeStack::ConditionalCleanup3<T, A0, A1, A2> CleanupType;
+ EHStack.pushCleanup<CleanupType>(kind, a0_saved, a1_saved, a2_saved);
+ initFullExprCleanup();
+ }
+
+ /// pushFullExprCleanup - Push a cleanup to be run at the end of the
+ /// current full-expression. Safe against the possibility that
+ /// we're currently inside a conditionally-evaluated expression.
+ template <class T, class A0, class A1, class A2, class A3>
+ void pushFullExprCleanup(CleanupKind kind, A0 a0, A1 a1, A2 a2, A3 a3) {
+ // If we're not in a conditional branch, or if none of the
+ // arguments requires saving, then use the unconditional cleanup.
+ if (!isInConditionalBranch()) {
+ return EHStack.pushCleanup<T>(kind, a0, a1, a2, a3);
+ }
+
+ typename DominatingValue<A0>::saved_type a0_saved = saveValueInCond(a0);
+ typename DominatingValue<A1>::saved_type a1_saved = saveValueInCond(a1);
+ typename DominatingValue<A2>::saved_type a2_saved = saveValueInCond(a2);
+ typename DominatingValue<A3>::saved_type a3_saved = saveValueInCond(a3);
+
+ typedef EHScopeStack::ConditionalCleanup4<T, A0, A1, A2, A3> CleanupType;
+ EHStack.pushCleanup<CleanupType>(kind, a0_saved, a1_saved,
+ a2_saved, a3_saved);
+ initFullExprCleanup();
+ }
+
+ /// Set up the last cleaup that was pushed as a conditional
+ /// full-expression cleanup.
+ void initFullExprCleanup();
+
+ /// PushDestructorCleanup - Push a cleanup to call the
+ /// complete-object destructor of an object of the given type at the
+ /// given address. Does nothing if T is not a C++ class type with a
+ /// non-trivial destructor.
+ void PushDestructorCleanup(QualType T, llvm::Value *Addr);
+
+ /// PushDestructorCleanup - Push a cleanup to call the
+ /// complete-object variant of the given destructor on the object at
+ /// the given address.
+ void PushDestructorCleanup(const CXXDestructorDecl *Dtor,
+ llvm::Value *Addr);
+
+ /// PopCleanupBlock - Will pop the cleanup entry on the stack and
+ /// process all branch fixups.
+ void PopCleanupBlock(bool FallThroughIsBranchThrough = false);
+
+ /// DeactivateCleanupBlock - Deactivates the given cleanup block.
+ /// The block cannot be reactivated. Pops it if it's the top of the
+ /// stack.
+ ///
+ /// \param DominatingIP - An instruction which is known to
+ /// dominate the current IP (if set) and which lies along
+ /// all paths of execution between the current IP and the
+ /// the point at which the cleanup comes into scope.
+ void DeactivateCleanupBlock(EHScopeStack::stable_iterator Cleanup,
+ llvm::Instruction *DominatingIP);
+
+ /// ActivateCleanupBlock - Activates an initially-inactive cleanup.
+ /// Cannot be used to resurrect a deactivated cleanup.
+ ///
+ /// \param DominatingIP - An instruction which is known to
+ /// dominate the current IP (if set) and which lies along
+ /// all paths of execution between the current IP and the
+ /// the point at which the cleanup comes into scope.
+ void ActivateCleanupBlock(EHScopeStack::stable_iterator Cleanup,
+ llvm::Instruction *DominatingIP);
+
+ /// \brief Enters a new scope for capturing cleanups, all of which
+ /// will be executed once the scope is exited.
+ class RunCleanupsScope {
+ EHScopeStack::stable_iterator CleanupStackDepth;
+ bool OldDidCallStackSave;
+ bool PerformCleanup;
+
+ RunCleanupsScope(const RunCleanupsScope &); // DO NOT IMPLEMENT
+ RunCleanupsScope &operator=(const RunCleanupsScope &); // DO NOT IMPLEMENT
+
+ protected:
+ CodeGenFunction& CGF;
+
+ public:
+ /// \brief Enter a new cleanup scope.
+ explicit RunCleanupsScope(CodeGenFunction &CGF)
+ : PerformCleanup(true), CGF(CGF)
+ {
+ CleanupStackDepth = CGF.EHStack.stable_begin();
+ OldDidCallStackSave = CGF.DidCallStackSave;
+ CGF.DidCallStackSave = false;
+ }
+
+ /// \brief Exit this cleanup scope, emitting any accumulated
+ /// cleanups.
+ ~RunCleanupsScope() {
+ if (PerformCleanup) {
+ CGF.DidCallStackSave = OldDidCallStackSave;
+ CGF.PopCleanupBlocks(CleanupStackDepth);
+ }
+ }
+
+ /// \brief Determine whether this scope requires any cleanups.
+ bool requiresCleanups() const {
+ return CGF.EHStack.stable_begin() != CleanupStackDepth;
+ }
+
+ /// \brief Force the emission of cleanups now, instead of waiting
+ /// until this object is destroyed.
+ void ForceCleanup() {
+ assert(PerformCleanup && "Already forced cleanup");
+ CGF.DidCallStackSave = OldDidCallStackSave;
+ CGF.PopCleanupBlocks(CleanupStackDepth);
+ PerformCleanup = false;
+ }
+ };
+
+ class LexicalScope: protected RunCleanupsScope {
+ SourceRange Range;
+ bool PopDebugStack;
+
+ LexicalScope(const LexicalScope &); // DO NOT IMPLEMENT THESE
+ LexicalScope &operator=(const LexicalScope &);
+
+ public:
+ /// \brief Enter a new cleanup scope.
+ explicit LexicalScope(CodeGenFunction &CGF, SourceRange Range)
+ : RunCleanupsScope(CGF), Range(Range), PopDebugStack(true) {
+ if (CGDebugInfo *DI = CGF.getDebugInfo())
+ DI->EmitLexicalBlockStart(CGF.Builder, Range.getBegin());
+ }
+
+ /// \brief Exit this cleanup scope, emitting any accumulated
+ /// cleanups.
+ ~LexicalScope() {
+ if (PopDebugStack) {
+ CGDebugInfo *DI = CGF.getDebugInfo();
+ if (DI) DI->EmitLexicalBlockEnd(CGF.Builder, Range.getEnd());
+ }
+ }
+
+ /// \brief Force the emission of cleanups now, instead of waiting
+ /// until this object is destroyed.
+ void ForceCleanup() {
+ RunCleanupsScope::ForceCleanup();
+ if (CGDebugInfo *DI = CGF.getDebugInfo()) {
+ DI->EmitLexicalBlockEnd(CGF.Builder, Range.getEnd());
+ PopDebugStack = false;
+ }
+ }
+ };
+
+
+ /// PopCleanupBlocks - Takes the old cleanup stack size and emits
+ /// the cleanup blocks that have been added.
+ void PopCleanupBlocks(EHScopeStack::stable_iterator OldCleanupStackSize);
+
+ void ResolveBranchFixups(llvm::BasicBlock *Target);
+
+ /// The given basic block lies in the current EH scope, but may be a
+ /// target of a potentially scope-crossing jump; get a stable handle
+ /// to which we can perform this jump later.
+ JumpDest getJumpDestInCurrentScope(llvm::BasicBlock *Target) {
+ return JumpDest(Target,
+ EHStack.getInnermostNormalCleanup(),
+ NextCleanupDestIndex++);
+ }
+
+ /// The given basic block lies in the current EH scope, but may be a
+ /// target of a potentially scope-crossing jump; get a stable handle
+ /// to which we can perform this jump later.
+ JumpDest getJumpDestInCurrentScope(StringRef Name = StringRef()) {
+ return getJumpDestInCurrentScope(createBasicBlock(Name));
+ }
+
+ /// EmitBranchThroughCleanup - Emit a branch from the current insert
+ /// block through the normal cleanup handling code (if any) and then
+ /// on to \arg Dest.
+ void EmitBranchThroughCleanup(JumpDest Dest);
+
+ /// isObviouslyBranchWithoutCleanups - Return true if a branch to the
+ /// specified destination obviously has no cleanups to run. 'false' is always
+ /// a conservatively correct answer for this method.
+ bool isObviouslyBranchWithoutCleanups(JumpDest Dest) const;
+
+ /// popCatchScope - Pops the catch scope at the top of the EHScope
+ /// stack, emitting any required code (other than the catch handlers
+ /// themselves).
+ void popCatchScope();
+
+ llvm::BasicBlock *getEHResumeBlock();
+ llvm::BasicBlock *getEHDispatchBlock(EHScopeStack::stable_iterator scope);
+
+ /// An object to manage conditionally-evaluated expressions.
+ class ConditionalEvaluation {
+ llvm::BasicBlock *StartBB;
+
+ public:
+ ConditionalEvaluation(CodeGenFunction &CGF)
+ : StartBB(CGF.Builder.GetInsertBlock()) {}
+
+ void begin(CodeGenFunction &CGF) {
+ assert(CGF.OutermostConditional != this);
+ if (!CGF.OutermostConditional)
+ CGF.OutermostConditional = this;
+ }
+
+ void end(CodeGenFunction &CGF) {
+ assert(CGF.OutermostConditional != 0);
+ if (CGF.OutermostConditional == this)
+ CGF.OutermostConditional = 0;
+ }
+
+ /// Returns a block which will be executed prior to each
+ /// evaluation of the conditional code.
+ llvm::BasicBlock *getStartingBlock() const {
+ return StartBB;
+ }
+ };
+
+ /// isInConditionalBranch - Return true if we're currently emitting
+ /// one branch or the other of a conditional expression.
+ bool isInConditionalBranch() const { return OutermostConditional != 0; }
+
+ void setBeforeOutermostConditional(llvm::Value *value, llvm::Value *addr) {
+ assert(isInConditionalBranch());
+ llvm::BasicBlock *block = OutermostConditional->getStartingBlock();
+ new llvm::StoreInst(value, addr, &block->back());
+ }
+
+ /// An RAII object to record that we're evaluating a statement
+ /// expression.
+ class StmtExprEvaluation {
+ CodeGenFunction &CGF;
+
+ /// We have to save the outermost conditional: cleanups in a
+ /// statement expression aren't conditional just because the
+ /// StmtExpr is.
+ ConditionalEvaluation *SavedOutermostConditional;
+
+ public:
+ StmtExprEvaluation(CodeGenFunction &CGF)
+ : CGF(CGF), SavedOutermostConditional(CGF.OutermostConditional) {
+ CGF.OutermostConditional = 0;
+ }
+
+ ~StmtExprEvaluation() {
+ CGF.OutermostConditional = SavedOutermostConditional;
+ CGF.EnsureInsertPoint();
+ }
+ };
+
+ /// An object which temporarily prevents a value from being
+ /// destroyed by aggressive peephole optimizations that assume that
+ /// all uses of a value have been realized in the IR.
+ class PeepholeProtection {
+ llvm::Instruction *Inst;
+ friend class CodeGenFunction;
+
+ public:
+ PeepholeProtection() : Inst(0) {}
+ };
+
+ /// A non-RAII class containing all the information about a bound
+ /// opaque value. OpaqueValueMapping, below, is a RAII wrapper for
+ /// this which makes individual mappings very simple; using this
+ /// class directly is useful when you have a variable number of
+ /// opaque values or don't want the RAII functionality for some
+ /// reason.
+ class OpaqueValueMappingData {
+ const OpaqueValueExpr *OpaqueValue;
+ bool BoundLValue;
+ CodeGenFunction::PeepholeProtection Protection;
+
+ OpaqueValueMappingData(const OpaqueValueExpr *ov,
+ bool boundLValue)
+ : OpaqueValue(ov), BoundLValue(boundLValue) {}
+ public:
+ OpaqueValueMappingData() : OpaqueValue(0) {}
+
+ static bool shouldBindAsLValue(const Expr *expr) {
+ // gl-values should be bound as l-values for obvious reasons.
+ // Records should be bound as l-values because IR generation
+ // always keeps them in memory. Expressions of function type
+ // act exactly like l-values but are formally required to be
+ // r-values in C.
+ return expr->isGLValue() ||
+ expr->getType()->isRecordType() ||
+ expr->getType()->isFunctionType();
+ }
+
+ static OpaqueValueMappingData bind(CodeGenFunction &CGF,
+ const OpaqueValueExpr *ov,
+ const Expr *e) {
+ if (shouldBindAsLValue(ov))
+ return bind(CGF, ov, CGF.EmitLValue(e));
+ return bind(CGF, ov, CGF.EmitAnyExpr(e));
+ }
+
+ static OpaqueValueMappingData bind(CodeGenFunction &CGF,
+ const OpaqueValueExpr *ov,
+ const LValue &lv) {
+ assert(shouldBindAsLValue(ov));
+ CGF.OpaqueLValues.insert(std::make_pair(ov, lv));
+ return OpaqueValueMappingData(ov, true);
+ }
+
+ static OpaqueValueMappingData bind(CodeGenFunction &CGF,
+ const OpaqueValueExpr *ov,
+ const RValue &rv) {
+ assert(!shouldBindAsLValue(ov));
+ CGF.OpaqueRValues.insert(std::make_pair(ov, rv));
+
+ OpaqueValueMappingData data(ov, false);
+
+ // Work around an extremely aggressive peephole optimization in
+ // EmitScalarConversion which assumes that all other uses of a
+ // value are extant.
+ data.Protection = CGF.protectFromPeepholes(rv);
+
+ return data;
+ }
+
+ bool isValid() const { return OpaqueValue != 0; }
+ void clear() { OpaqueValue = 0; }
+
+ void unbind(CodeGenFunction &CGF) {
+ assert(OpaqueValue && "no data to unbind!");
+
+ if (BoundLValue) {
+ CGF.OpaqueLValues.erase(OpaqueValue);
+ } else {
+ CGF.OpaqueRValues.erase(OpaqueValue);
+ CGF.unprotectFromPeepholes(Protection);
+ }
+ }
+ };
+
+ /// An RAII object to set (and then clear) a mapping for an OpaqueValueExpr.
+ class OpaqueValueMapping {
+ CodeGenFunction &CGF;
+ OpaqueValueMappingData Data;
+
+ public:
+ static bool shouldBindAsLValue(const Expr *expr) {
+ return OpaqueValueMappingData::shouldBindAsLValue(expr);
+ }
+
+ /// Build the opaque value mapping for the given conditional
+ /// operator if it's the GNU ?: extension. This is a common
+ /// enough pattern that the convenience operator is really
+ /// helpful.
+ ///
+ OpaqueValueMapping(CodeGenFunction &CGF,
+ const AbstractConditionalOperator *op) : CGF(CGF) {
+ if (isa<ConditionalOperator>(op))
+ // Leave Data empty.
+ return;
+
+ const BinaryConditionalOperator *e = cast<BinaryConditionalOperator>(op);
+ Data = OpaqueValueMappingData::bind(CGF, e->getOpaqueValue(),
+ e->getCommon());
+ }
+
+ OpaqueValueMapping(CodeGenFunction &CGF,
+ const OpaqueValueExpr *opaqueValue,
+ LValue lvalue)
+ : CGF(CGF), Data(OpaqueValueMappingData::bind(CGF, opaqueValue, lvalue)) {
+ }
+
+ OpaqueValueMapping(CodeGenFunction &CGF,
+ const OpaqueValueExpr *opaqueValue,
+ RValue rvalue)
+ : CGF(CGF), Data(OpaqueValueMappingData::bind(CGF, opaqueValue, rvalue)) {
+ }
+
+ void pop() {
+ Data.unbind(CGF);
+ Data.clear();
+ }
+
+ ~OpaqueValueMapping() {
+ if (Data.isValid()) Data.unbind(CGF);
+ }
+ };
+
+ /// getByrefValueFieldNumber - Given a declaration, returns the LLVM field
+ /// number that holds the value.
+ unsigned getByRefValueLLVMField(const ValueDecl *VD) const;
+
+ /// BuildBlockByrefAddress - Computes address location of the
+ /// variable which is declared as __block.
+ llvm::Value *BuildBlockByrefAddress(llvm::Value *BaseAddr,
+ const VarDecl *V);
+private:
+ CGDebugInfo *DebugInfo;
+ bool DisableDebugInfo;
+
+ /// DidCallStackSave - Whether llvm.stacksave has been called. Used to avoid
+ /// calling llvm.stacksave for multiple VLAs in the same scope.
+ bool DidCallStackSave;
+
+ /// IndirectBranch - The first time an indirect goto is seen we create a block
+ /// with an indirect branch. Every time we see the address of a label taken,
+ /// we add the label to the indirect goto. Every subsequent indirect goto is
+ /// codegen'd as a jump to the IndirectBranch's basic block.
+ llvm::IndirectBrInst *IndirectBranch;
+
+ /// LocalDeclMap - This keeps track of the LLVM allocas or globals for local C
+ /// decls.
+ typedef llvm::DenseMap<const Decl*, llvm::Value*> DeclMapTy;
+ DeclMapTy LocalDeclMap;
+
+ /// LabelMap - This keeps track of the LLVM basic block for each C label.
+ llvm::DenseMap<const LabelDecl*, JumpDest> LabelMap;
+
+ // BreakContinueStack - This keeps track of where break and continue
+ // statements should jump to.
+ struct BreakContinue {
+ BreakContinue(JumpDest Break, JumpDest Continue)
+ : BreakBlock(Break), ContinueBlock(Continue) {}
+
+ JumpDest BreakBlock;
+ JumpDest ContinueBlock;
+ };
+ SmallVector<BreakContinue, 8> BreakContinueStack;
+
+ /// SwitchInsn - This is nearest current switch instruction. It is null if
+ /// current context is not in a switch.
+ llvm::SwitchInst *SwitchInsn;
+
+ /// CaseRangeBlock - This block holds if condition check for last case
+ /// statement range in current switch instruction.
+ llvm::BasicBlock *CaseRangeBlock;
+
+ /// OpaqueLValues - Keeps track of the current set of opaque value
+ /// expressions.
+ llvm::DenseMap<const OpaqueValueExpr *, LValue> OpaqueLValues;
+ llvm::DenseMap<const OpaqueValueExpr *, RValue> OpaqueRValues;
+
+ // VLASizeMap - This keeps track of the associated size for each VLA type.
+ // We track this by the size expression rather than the type itself because
+ // in certain situations, like a const qualifier applied to an VLA typedef,
+ // multiple VLA types can share the same size expression.
+ // FIXME: Maybe this could be a stack of maps that is pushed/popped as we
+ // enter/leave scopes.
+ llvm::DenseMap<const Expr*, llvm::Value*> VLASizeMap;
+
+ /// A block containing a single 'unreachable' instruction. Created
+ /// lazily by getUnreachableBlock().
+ llvm::BasicBlock *UnreachableBlock;
+
+ /// CXXThisDecl - When generating code for a C++ member function,
+ /// this will hold the implicit 'this' declaration.
+ ImplicitParamDecl *CXXABIThisDecl;
+ llvm::Value *CXXABIThisValue;
+ llvm::Value *CXXThisValue;
+
+ /// CXXVTTDecl - When generating code for a base object constructor or
+ /// base object destructor with virtual bases, this will hold the implicit
+ /// VTT parameter.
+ ImplicitParamDecl *CXXVTTDecl;
+ llvm::Value *CXXVTTValue;
+
+ /// OutermostConditional - Points to the outermost active
+ /// conditional control. This is used so that we know if a
+ /// temporary should be destroyed conditionally.
+ ConditionalEvaluation *OutermostConditional;
+
+
+ /// ByrefValueInfoMap - For each __block variable, contains a pair of the LLVM
+ /// type as well as the field number that contains the actual data.
+ llvm::DenseMap<const ValueDecl *, std::pair<llvm::Type *,
+ unsigned> > ByRefValueInfo;
+
+ llvm::BasicBlock *TerminateLandingPad;
+ llvm::BasicBlock *TerminateHandler;
+ llvm::BasicBlock *TrapBB;
+
+public:
+ CodeGenFunction(CodeGenModule &cgm);
+ ~CodeGenFunction();
+
+ CodeGenTypes &getTypes() const { return CGM.getTypes(); }
+ ASTContext &getContext() const { return CGM.getContext(); }
+ CGDebugInfo *getDebugInfo() {
+ if (DisableDebugInfo)
+ return NULL;
+ return DebugInfo;
+ }
+ void disableDebugInfo() { DisableDebugInfo = true; }
+ void enableDebugInfo() { DisableDebugInfo = false; }
+
+ bool shouldUseFusedARCCalls() {
+ return CGM.getCodeGenOpts().OptimizationLevel == 0;
+ }
+
+ const LangOptions &getLangOpts() const { return CGM.getLangOpts(); }
+
+ /// Returns a pointer to the function's exception object and selector slot,
+ /// which is assigned in every landing pad.
+ llvm::Value *getExceptionSlot();
+ llvm::Value *getEHSelectorSlot();
+
+ /// Returns the contents of the function's exception object and selector
+ /// slots.
+ llvm::Value *getExceptionFromSlot();
+ llvm::Value *getSelectorFromSlot();
+
+ llvm::Value *getNormalCleanupDestSlot();
+
+ llvm::BasicBlock *getUnreachableBlock() {
+ if (!UnreachableBlock) {
+ UnreachableBlock = createBasicBlock("unreachable");
+ new llvm::UnreachableInst(getLLVMContext(), UnreachableBlock);
+ }
+ return UnreachableBlock;
+ }
+
+ llvm::BasicBlock *getInvokeDest() {
+ if (!EHStack.requiresLandingPad()) return 0;
+ return getInvokeDestImpl();
+ }
+
+ llvm::LLVMContext &getLLVMContext() { return CGM.getLLVMContext(); }
+
+ //===--------------------------------------------------------------------===//
+ // Cleanups
+ //===--------------------------------------------------------------------===//
+
+ typedef void Destroyer(CodeGenFunction &CGF, llvm::Value *addr, QualType ty);
+
+ void pushIrregularPartialArrayCleanup(llvm::Value *arrayBegin,
+ llvm::Value *arrayEndPointer,
+ QualType elementType,
+ Destroyer *destroyer);
+ void pushRegularPartialArrayCleanup(llvm::Value *arrayBegin,
+ llvm::Value *arrayEnd,
+ QualType elementType,
+ Destroyer *destroyer);
+
+ void pushDestroy(QualType::DestructionKind dtorKind,
+ llvm::Value *addr, QualType type);
+ void pushDestroy(CleanupKind kind, llvm::Value *addr, QualType type,
+ Destroyer *destroyer, bool useEHCleanupForArray);
+ void emitDestroy(llvm::Value *addr, QualType type, Destroyer *destroyer,
+ bool useEHCleanupForArray);
+ llvm::Function *generateDestroyHelper(llvm::Constant *addr,
+ QualType type,
+ Destroyer *destroyer,
+ bool useEHCleanupForArray);
+ void emitArrayDestroy(llvm::Value *begin, llvm::Value *end,
+ QualType type, Destroyer *destroyer,
+ bool checkZeroLength, bool useEHCleanup);
+
+ Destroyer *getDestroyer(QualType::DestructionKind destructionKind);
+
+ /// Determines whether an EH cleanup is required to destroy a type
+ /// with the given destruction kind.
+ bool needsEHCleanup(QualType::DestructionKind kind) {
+ switch (kind) {
+ case QualType::DK_none:
+ return false;
+ case QualType::DK_cxx_destructor:
+ case QualType::DK_objc_weak_lifetime:
+ return getLangOpts().Exceptions;
+ case QualType::DK_objc_strong_lifetime:
+ return getLangOpts().Exceptions &&
+ CGM.getCodeGenOpts().ObjCAutoRefCountExceptions;
+ }
+ llvm_unreachable("bad destruction kind");
+ }
+
+ CleanupKind getCleanupKind(QualType::DestructionKind kind) {
+ return (needsEHCleanup(kind) ? NormalAndEHCleanup : NormalCleanup);
+ }
+
+ //===--------------------------------------------------------------------===//
+ // Objective-C
+ //===--------------------------------------------------------------------===//
+
+ void GenerateObjCMethod(const ObjCMethodDecl *OMD);
+
+ void StartObjCMethod(const ObjCMethodDecl *MD,
+ const ObjCContainerDecl *CD,
+ SourceLocation StartLoc);
+
+ /// GenerateObjCGetter - Synthesize an Objective-C property getter function.
+ void GenerateObjCGetter(ObjCImplementationDecl *IMP,
+ const ObjCPropertyImplDecl *PID);
+ void generateObjCGetterBody(const ObjCImplementationDecl *classImpl,
+ const ObjCPropertyImplDecl *propImpl,
+ llvm::Constant *AtomicHelperFn);
+
+ void GenerateObjCCtorDtorMethod(ObjCImplementationDecl *IMP,
+ ObjCMethodDecl *MD, bool ctor);
+
+ /// GenerateObjCSetter - Synthesize an Objective-C property setter function
+ /// for the given property.
+ void GenerateObjCSetter(ObjCImplementationDecl *IMP,
+ const ObjCPropertyImplDecl *PID);
+ void generateObjCSetterBody(const ObjCImplementationDecl *classImpl,
+ const ObjCPropertyImplDecl *propImpl,
+ llvm::Constant *AtomicHelperFn);
+ bool IndirectObjCSetterArg(const CGFunctionInfo &FI);
+ bool IvarTypeWithAggrGCObjects(QualType Ty);
+
+ //===--------------------------------------------------------------------===//
+ // Block Bits
+ //===--------------------------------------------------------------------===//
+
+ llvm::Value *EmitBlockLiteral(const BlockExpr *);
+ llvm::Value *EmitBlockLiteral(const CGBlockInfo &Info);
+ static void destroyBlockInfos(CGBlockInfo *info);
+ llvm::Constant *BuildDescriptorBlockDecl(const BlockExpr *,
+ const CGBlockInfo &Info,
+ llvm::StructType *,
+ llvm::Constant *BlockVarLayout);
+
+ llvm::Function *GenerateBlockFunction(GlobalDecl GD,
+ const CGBlockInfo &Info,
+ const Decl *OuterFuncDecl,
+ const DeclMapTy &ldm,
+ bool IsLambdaConversionToBlock);
+
+ llvm::Constant *GenerateCopyHelperFunction(const CGBlockInfo &blockInfo);
+ llvm::Constant *GenerateDestroyHelperFunction(const CGBlockInfo &blockInfo);
+ llvm::Constant *GenerateObjCAtomicSetterCopyHelperFunction(
+ const ObjCPropertyImplDecl *PID);
+ llvm::Constant *GenerateObjCAtomicGetterCopyHelperFunction(
+ const ObjCPropertyImplDecl *PID);
+ llvm::Value *EmitBlockCopyAndAutorelease(llvm::Value *Block, QualType Ty);
+
+ void BuildBlockRelease(llvm::Value *DeclPtr, BlockFieldFlags flags);
+
+ class AutoVarEmission;
+
+ void emitByrefStructureInit(const AutoVarEmission &emission);
+ void enterByrefCleanup(const AutoVarEmission &emission);
+
+ llvm::Value *LoadBlockStruct() {
+ assert(BlockPointer && "no block pointer set!");
+ return BlockPointer;
+ }
+
+ void AllocateBlockCXXThisPointer(const CXXThisExpr *E);
+ void AllocateBlockDecl(const DeclRefExpr *E);
+ llvm::Value *GetAddrOfBlockDecl(const VarDecl *var, bool ByRef);
+ llvm::Type *BuildByRefType(const VarDecl *var);
+
+ void GenerateCode(GlobalDecl GD, llvm::Function *Fn,
+ const CGFunctionInfo &FnInfo);
+ void StartFunction(GlobalDecl GD, QualType RetTy,
+ llvm::Function *Fn,
+ const CGFunctionInfo &FnInfo,
+ const FunctionArgList &Args,
+ SourceLocation StartLoc);
+
+ void EmitConstructorBody(FunctionArgList &Args);
+ void EmitDestructorBody(FunctionArgList &Args);
+ void EmitFunctionBody(FunctionArgList &Args);
+
+ void EmitForwardingCallToLambda(const CXXRecordDecl *Lambda,
+ CallArgList &CallArgs);
+ void EmitLambdaToBlockPointerBody(FunctionArgList &Args);
+ void EmitLambdaBlockInvokeBody();
+ void EmitLambdaDelegatingInvokeBody(const CXXMethodDecl *MD);
+ void EmitLambdaStaticInvokeFunction(const CXXMethodDecl *MD);
+
+ /// EmitReturnBlock - Emit the unified return block, trying to avoid its
+ /// emission when possible.
+ void EmitReturnBlock();
+
+ /// FinishFunction - Complete IR generation of the current function. It is
+ /// legal to call this function even if there is no current insertion point.
+ void FinishFunction(SourceLocation EndLoc=SourceLocation());
+
+ /// GenerateThunk - Generate a thunk for the given method.
+ void GenerateThunk(llvm::Function *Fn, const CGFunctionInfo &FnInfo,
+ GlobalDecl GD, const ThunkInfo &Thunk);
+
+ void GenerateVarArgsThunk(llvm::Function *Fn, const CGFunctionInfo &FnInfo,
+ GlobalDecl GD, const ThunkInfo &Thunk);
+
+ void EmitCtorPrologue(const CXXConstructorDecl *CD, CXXCtorType Type,
+ FunctionArgList &Args);
+
+ void EmitInitializerForField(FieldDecl *Field, LValue LHS, Expr *Init,
+ ArrayRef<VarDecl *> ArrayIndexes);
+
+ /// InitializeVTablePointer - Initialize the vtable pointer of the given
+ /// subobject.
+ ///
+ void InitializeVTablePointer(BaseSubobject Base,
+ const CXXRecordDecl *NearestVBase,
+ CharUnits OffsetFromNearestVBase,
+ llvm::Constant *VTable,
+ const CXXRecordDecl *VTableClass);
+
+ typedef llvm::SmallPtrSet<const CXXRecordDecl *, 4> VisitedVirtualBasesSetTy;
+ void InitializeVTablePointers(BaseSubobject Base,
+ const CXXRecordDecl *NearestVBase,
+ CharUnits OffsetFromNearestVBase,
+ bool BaseIsNonVirtualPrimaryBase,
+ llvm::Constant *VTable,
+ const CXXRecordDecl *VTableClass,
+ VisitedVirtualBasesSetTy& VBases);
+
+ void InitializeVTablePointers(const CXXRecordDecl *ClassDecl);
+
+ /// GetVTablePtr - Return the Value of the vtable pointer member pointed
+ /// to by This.
+ llvm::Value *GetVTablePtr(llvm::Value *This, llvm::Type *Ty);
+
+ /// EnterDtorCleanups - Enter the cleanups necessary to complete the
+ /// given phase of destruction for a destructor. The end result
+ /// should call destructors on members and base classes in reverse
+ /// order of their construction.
+ void EnterDtorCleanups(const CXXDestructorDecl *Dtor, CXXDtorType Type);
+
+ /// ShouldInstrumentFunction - Return true if the current function should be
+ /// instrumented with __cyg_profile_func_* calls
+ bool ShouldInstrumentFunction();
+
+ /// EmitFunctionInstrumentation - Emit LLVM code to call the specified
+ /// instrumentation function with the current function and the call site, if
+ /// function instrumentation is enabled.
+ void EmitFunctionInstrumentation(const char *Fn);
+
+ /// EmitMCountInstrumentation - Emit call to .mcount.
+ void EmitMCountInstrumentation();
+
+ /// EmitFunctionProlog - Emit the target specific LLVM code to load the
+ /// arguments for the given function. This is also responsible for naming the
+ /// LLVM function arguments.
+ void EmitFunctionProlog(const CGFunctionInfo &FI,
+ llvm::Function *Fn,
+ const FunctionArgList &Args);
+
+ /// EmitFunctionEpilog - Emit the target specific LLVM code to return the
+ /// given temporary.
+ void EmitFunctionEpilog(const CGFunctionInfo &FI);
+
+ /// EmitStartEHSpec - Emit the start of the exception spec.
+ void EmitStartEHSpec(const Decl *D);
+
+ /// EmitEndEHSpec - Emit the end of the exception spec.
+ void EmitEndEHSpec(const Decl *D);
+
+ /// getTerminateLandingPad - Return a landing pad that just calls terminate.
+ llvm::BasicBlock *getTerminateLandingPad();
+
+ /// getTerminateHandler - Return a handler (not a landing pad, just
+ /// a catch handler) that just calls terminate. This is used when
+ /// a terminate scope encloses a try.
+ llvm::BasicBlock *getTerminateHandler();
+
+ llvm::Type *ConvertTypeForMem(QualType T);
+ llvm::Type *ConvertType(QualType T);
+ llvm::Type *ConvertType(const TypeDecl *T) {
+ return ConvertType(getContext().getTypeDeclType(T));
+ }
+
+ /// LoadObjCSelf - Load the value of self. This function is only valid while
+ /// generating code for an Objective-C method.
+ llvm::Value *LoadObjCSelf();
+
+ /// TypeOfSelfObject - Return type of object that this self represents.
+ QualType TypeOfSelfObject();
+
+ /// hasAggregateLLVMType - Return true if the specified AST type will map into
+ /// an aggregate LLVM type or is void.
+ static bool hasAggregateLLVMType(QualType T);
+
+ /// createBasicBlock - Create an LLVM basic block.
+ llvm::BasicBlock *createBasicBlock(StringRef name = "",
+ llvm::Function *parent = 0,
+ llvm::BasicBlock *before = 0) {
+#ifdef NDEBUG
+ return llvm::BasicBlock::Create(getLLVMContext(), "", parent, before);
+#else
+ return llvm::BasicBlock::Create(getLLVMContext(), name, parent, before);
+#endif
+ }
+
+ /// getBasicBlockForLabel - Return the LLVM basicblock that the specified
+ /// label maps to.
+ JumpDest getJumpDestForLabel(const LabelDecl *S);
+
+ /// SimplifyForwardingBlocks - If the given basic block is only a branch to
+ /// another basic block, simplify it. This assumes that no other code could
+ /// potentially reference the basic block.
+ void SimplifyForwardingBlocks(llvm::BasicBlock *BB);
+
+ /// EmitBlock - Emit the given block \arg BB and set it as the insert point,
+ /// adding a fall-through branch from the current insert block if
+ /// necessary. It is legal to call this function even if there is no current
+ /// insertion point.
+ ///
+ /// IsFinished - If true, indicates that the caller has finished emitting
+ /// branches to the given block and does not expect to emit code into it. This
+ /// means the block can be ignored if it is unreachable.
+ void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false);
+
+ /// EmitBlockAfterUses - Emit the given block somewhere hopefully
+ /// near its uses, and leave the insertion point in it.
+ void EmitBlockAfterUses(llvm::BasicBlock *BB);
+
+ /// EmitBranch - Emit a branch to the specified basic block from the current
+ /// insert block, taking care to avoid creation of branches from dummy
+ /// blocks. It is legal to call this function even if there is no current
+ /// insertion point.
+ ///
+ /// This function clears the current insertion point. The caller should follow
+ /// calls to this function with calls to Emit*Block prior to generation new
+ /// code.
+ void EmitBranch(llvm::BasicBlock *Block);
+
+ /// HaveInsertPoint - True if an insertion point is defined. If not, this
+ /// indicates that the current code being emitted is unreachable.
+ bool HaveInsertPoint() const {
+ return Builder.GetInsertBlock() != 0;
+ }
+
+ /// EnsureInsertPoint - Ensure that an insertion point is defined so that
+ /// emitted IR has a place to go. Note that by definition, if this function
+ /// creates a block then that block is unreachable; callers may do better to
+ /// detect when no insertion point is defined and simply skip IR generation.
+ void EnsureInsertPoint() {
+ if (!HaveInsertPoint())
+ EmitBlock(createBasicBlock());
+ }
+
+ /// ErrorUnsupported - Print out an error that codegen doesn't support the
+ /// specified stmt yet.
+ void ErrorUnsupported(const Stmt *S, const char *Type,
+ bool OmitOnError=false);
+
+ //===--------------------------------------------------------------------===//
+ // Helpers
+ //===--------------------------------------------------------------------===//
+
+ LValue MakeAddrLValue(llvm::Value *V, QualType T,
+ CharUnits Alignment = CharUnits()) {
+ return LValue::MakeAddr(V, T, Alignment, getContext(),
+ CGM.getTBAAInfo(T));
+ }
+ LValue MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T) {
+ CharUnits Alignment;
+ if (!T->isIncompleteType())
+ Alignment = getContext().getTypeAlignInChars(T);
+ return LValue::MakeAddr(V, T, Alignment, getContext(),
+ CGM.getTBAAInfo(T));
+ }
+
+ /// CreateTempAlloca - This creates a alloca and inserts it into the entry
+ /// block. The caller is responsible for setting an appropriate alignment on
+ /// the alloca.
+ llvm::AllocaInst *CreateTempAlloca(llvm::Type *Ty,
+ const Twine &Name = "tmp");
+
+ /// InitTempAlloca - Provide an initial value for the given alloca.
+ void InitTempAlloca(llvm::AllocaInst *Alloca, llvm::Value *Value);
+
+ /// CreateIRTemp - Create a temporary IR object of the given type, with
+ /// appropriate alignment. This routine should only be used when an temporary
+ /// value needs to be stored into an alloca (for example, to avoid explicit
+ /// PHI construction), but the type is the IR type, not the type appropriate
+ /// for storing in memory.
+ llvm::AllocaInst *CreateIRTemp(QualType T, const Twine &Name = "tmp");
+
+ /// CreateMemTemp - Create a temporary memory object of the given type, with
+ /// appropriate alignment.
+ llvm::AllocaInst *CreateMemTemp(QualType T, const Twine &Name = "tmp");
+
+ /// CreateAggTemp - Create a temporary memory object for the given
+ /// aggregate type.
+ AggValueSlot CreateAggTemp(QualType T, const Twine &Name = "tmp") {
+ CharUnits Alignment = getContext().getTypeAlignInChars(T);
+ return AggValueSlot::forAddr(CreateMemTemp(T, Name), Alignment,
+ T.getQualifiers(),
+ AggValueSlot::IsNotDestructed,
+ AggValueSlot::DoesNotNeedGCBarriers,
+ AggValueSlot::IsNotAliased);
+ }
+
+ /// Emit a cast to void* in the appropriate address space.
+ llvm::Value *EmitCastToVoidPtr(llvm::Value *value);
+
+ /// EvaluateExprAsBool - Perform the usual unary conversions on the specified
+ /// expression and compare the result against zero, returning an Int1Ty value.
+ llvm::Value *EvaluateExprAsBool(const Expr *E);
+
+ /// EmitIgnoredExpr - Emit an expression in a context which ignores the result.
+ void EmitIgnoredExpr(const Expr *E);
+
+ /// EmitAnyExpr - Emit code to compute the specified expression which can have
+ /// any type. The result is returned as an RValue struct. If this is an
+ /// aggregate expression, the aggloc/agglocvolatile arguments indicate where
+ /// the result should be returned.
+ ///
+ /// \param IgnoreResult - True if the resulting value isn't used.
+ RValue EmitAnyExpr(const Expr *E,
+ AggValueSlot AggSlot = AggValueSlot::ignored(),
+ bool IgnoreResult = false);
+
+ // EmitVAListRef - Emit a "reference" to a va_list; this is either the address
+ // or the value of the expression, depending on how va_list is defined.
+ llvm::Value *EmitVAListRef(const Expr *E);
+
+ /// EmitAnyExprToTemp - Similary to EmitAnyExpr(), however, the result will
+ /// always be accessible even if no aggregate location is provided.
+ RValue EmitAnyExprToTemp(const Expr *E);
+
+ /// EmitAnyExprToMem - Emits the code necessary to evaluate an
+ /// arbitrary expression into the given memory location.
+ void EmitAnyExprToMem(const Expr *E, llvm::Value *Location,
+ Qualifiers Quals, bool IsInitializer);
+
+ /// EmitExprAsInit - Emits the code necessary to initialize a
+ /// location in memory with the given initializer.
+ void EmitExprAsInit(const Expr *init, const ValueDecl *D,
+ LValue lvalue, bool capturedByInit);
+
+ /// EmitAggregateCopy - Emit an aggrate copy.
+ ///
+ /// \param isVolatile - True iff either the source or the destination is
+ /// volatile.
+ void EmitAggregateCopy(llvm::Value *DestPtr, llvm::Value *SrcPtr,
+ QualType EltTy, bool isVolatile=false,
+ unsigned Alignment = 0);
+
+ /// StartBlock - Start new block named N. If insert block is a dummy block
+ /// then reuse it.
+ void StartBlock(const char *N);
+
+ /// GetAddrOfStaticLocalVar - Return the address of a static local variable.
+ llvm::Constant *GetAddrOfStaticLocalVar(const VarDecl *BVD) {
+ return cast<llvm::Constant>(GetAddrOfLocalVar(BVD));
+ }
+
+ /// GetAddrOfLocalVar - Return the address of a local variable.
+ llvm::Value *GetAddrOfLocalVar(const VarDecl *VD) {
+ llvm::Value *Res = LocalDeclMap[VD];
+ assert(Res && "Invalid argument to GetAddrOfLocalVar(), no decl!");
+ return Res;
+ }
+
+ /// getOpaqueLValueMapping - Given an opaque value expression (which
+ /// must be mapped to an l-value), return its mapping.
+ const LValue &getOpaqueLValueMapping(const OpaqueValueExpr *e) {
+ assert(OpaqueValueMapping::shouldBindAsLValue(e));
+
+ llvm::DenseMap<const OpaqueValueExpr*,LValue>::iterator
+ it = OpaqueLValues.find(e);
+ assert(it != OpaqueLValues.end() && "no mapping for opaque value!");
+ return it->second;
+ }
+
+ /// getOpaqueRValueMapping - Given an opaque value expression (which
+ /// must be mapped to an r-value), return its mapping.
+ const RValue &getOpaqueRValueMapping(const OpaqueValueExpr *e) {
+ assert(!OpaqueValueMapping::shouldBindAsLValue(e));
+
+ llvm::DenseMap<const OpaqueValueExpr*,RValue>::iterator
+ it = OpaqueRValues.find(e);
+ assert(it != OpaqueRValues.end() && "no mapping for opaque value!");
+ return it->second;
+ }
+
+ /// getAccessedFieldNo - Given an encoded value and a result number, return
+ /// the input field number being accessed.
+ static unsigned getAccessedFieldNo(unsigned Idx, const llvm::Constant *Elts);
+
+ llvm::BlockAddress *GetAddrOfLabel(const LabelDecl *L);
+ llvm::BasicBlock *GetIndirectGotoBlock();
+
+ /// EmitNullInitialization - Generate code to set a value of the given type to
+ /// null, If the type contains data member pointers, they will be initialized
+ /// to -1 in accordance with the Itanium C++ ABI.
+ void EmitNullInitialization(llvm::Value *DestPtr, QualType Ty);
+
+ // EmitVAArg - Generate code to get an argument from the passed in pointer
+ // and update it accordingly. The return value is a pointer to the argument.
+ // FIXME: We should be able to get rid of this method and use the va_arg
+ // instruction in LLVM instead once it works well enough.
+ llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty);
+
+ /// emitArrayLength - Compute the length of an array, even if it's a
+ /// VLA, and drill down to the base element type.
+ llvm::Value *emitArrayLength(const ArrayType *arrayType,
+ QualType &baseType,
+ llvm::Value *&addr);
+
+ /// EmitVLASize - Capture all the sizes for the VLA expressions in
+ /// the given variably-modified type and store them in the VLASizeMap.
+ ///
+ /// This function can be called with a null (unreachable) insert point.
+ void EmitVariablyModifiedType(QualType Ty);
+
+ /// getVLASize - Returns an LLVM value that corresponds to the size,
+ /// in non-variably-sized elements, of a variable length array type,
+ /// plus that largest non-variably-sized element type. Assumes that
+ /// the type has already been emitted with EmitVariablyModifiedType.
+ std::pair<llvm::Value*,QualType> getVLASize(const VariableArrayType *vla);
+ std::pair<llvm::Value*,QualType> getVLASize(QualType vla);
+
+ /// LoadCXXThis - Load the value of 'this'. This function is only valid while
+ /// generating code for an C++ member function.
+ llvm::Value *LoadCXXThis() {
+ assert(CXXThisValue && "no 'this' value for this function");
+ return CXXThisValue;
+ }
+
+ /// LoadCXXVTT - Load the VTT parameter to base constructors/destructors have
+ /// virtual bases.
+ llvm::Value *LoadCXXVTT() {
+ assert(CXXVTTValue && "no VTT value for this function");
+ return CXXVTTValue;
+ }
+
+ /// GetAddressOfBaseOfCompleteClass - Convert the given pointer to a
+ /// complete class to the given direct base.
+ llvm::Value *
+ GetAddressOfDirectBaseInCompleteClass(llvm::Value *Value,
+ const CXXRecordDecl *Derived,
+ const CXXRecordDecl *Base,
+ bool BaseIsVirtual);
+
+ /// GetAddressOfBaseClass - This function will add the necessary delta to the
+ /// load of 'this' and returns address of the base class.
+ llvm::Value *GetAddressOfBaseClass(llvm::Value *Value,
+ const CXXRecordDecl *Derived,
+ CastExpr::path_const_iterator PathBegin,
+ CastExpr::path_const_iterator PathEnd,
+ bool NullCheckValue);
+
+ llvm::Value *GetAddressOfDerivedClass(llvm::Value *Value,
+ const CXXRecordDecl *Derived,
+ CastExpr::path_const_iterator PathBegin,
+ CastExpr::path_const_iterator PathEnd,
+ bool NullCheckValue);
+
+ llvm::Value *GetVirtualBaseClassOffset(llvm::Value *This,
+ const CXXRecordDecl *ClassDecl,
+ const CXXRecordDecl *BaseClassDecl);
+
+ void EmitDelegateCXXConstructorCall(const CXXConstructorDecl *Ctor,
+ CXXCtorType CtorType,
+ const FunctionArgList &Args);
+ // It's important not to confuse this and the previous function. Delegating
+ // constructors are the C++0x feature. The constructor delegate optimization
+ // is used to reduce duplication in the base and complete consturctors where
+ // they are substantially the same.
+ void EmitDelegatingCXXConstructorCall(const CXXConstructorDecl *Ctor,
+ const FunctionArgList &Args);
+ void EmitCXXConstructorCall(const CXXConstructorDecl *D, CXXCtorType Type,
+ bool ForVirtualBase, llvm::Value *This,
+ CallExpr::const_arg_iterator ArgBeg,
+ CallExpr::const_arg_iterator ArgEnd);
+
+ void EmitSynthesizedCXXCopyCtorCall(const CXXConstructorDecl *D,
+ llvm::Value *This, llvm::Value *Src,
+ CallExpr::const_arg_iterator ArgBeg,
+ CallExpr::const_arg_iterator ArgEnd);
+
+ void EmitCXXAggrConstructorCall(const CXXConstructorDecl *D,
+ const ConstantArrayType *ArrayTy,
+ llvm::Value *ArrayPtr,
+ CallExpr::const_arg_iterator ArgBeg,
+ CallExpr::const_arg_iterator ArgEnd,
+ bool ZeroInitialization = false);
+
+ void EmitCXXAggrConstructorCall(const CXXConstructorDecl *D,
+ llvm::Value *NumElements,
+ llvm::Value *ArrayPtr,
+ CallExpr::const_arg_iterator ArgBeg,
+ CallExpr::const_arg_iterator ArgEnd,
+ bool ZeroInitialization = false);
+
+ static Destroyer destroyCXXObject;
+
+ void EmitCXXDestructorCall(const CXXDestructorDecl *D, CXXDtorType Type,
+ bool ForVirtualBase, llvm::Value *This);
+
+ void EmitNewArrayInitializer(const CXXNewExpr *E, QualType elementType,
+ llvm::Value *NewPtr, llvm::Value *NumElements);
+
+ void EmitCXXTemporary(const CXXTemporary *Temporary, QualType TempType,
+ llvm::Value *Ptr);
+
+ llvm::Value *EmitCXXNewExpr(const CXXNewExpr *E);
+ void EmitCXXDeleteExpr(const CXXDeleteExpr *E);
+
+ void EmitDeleteCall(const FunctionDecl *DeleteFD, llvm::Value *Ptr,
+ QualType DeleteTy);
+
+ llvm::Value* EmitCXXTypeidExpr(const CXXTypeidExpr *E);
+ llvm::Value *EmitDynamicCast(llvm::Value *V, const CXXDynamicCastExpr *DCE);
+
+ void MaybeEmitStdInitializerListCleanup(llvm::Value *loc, const Expr *init);
+ void EmitStdInitializerListCleanup(llvm::Value *loc,
+ const InitListExpr *init);
+
+ void EmitCheck(llvm::Value *, unsigned Size);
+
+ llvm::Value *EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
+ bool isInc, bool isPre);
+ ComplexPairTy EmitComplexPrePostIncDec(const UnaryOperator *E, LValue LV,
+ bool isInc, bool isPre);
+ //===--------------------------------------------------------------------===//
+ // Declaration Emission
+ //===--------------------------------------------------------------------===//
+
+ /// EmitDecl - Emit a declaration.
+ ///
+ /// This function can be called with a null (unreachable) insert point.
+ void EmitDecl(const Decl &D);
+
+ /// EmitVarDecl - Emit a local variable declaration.
+ ///
+ /// This function can be called with a null (unreachable) insert point.
+ void EmitVarDecl(const VarDecl &D);
+
+ void EmitScalarInit(const Expr *init, const ValueDecl *D,
+ LValue lvalue, bool capturedByInit);
+ void EmitScalarInit(llvm::Value *init, LValue lvalue);
+
+ typedef void SpecialInitFn(CodeGenFunction &Init, const VarDecl &D,
+ llvm::Value *Address);
+
+ /// EmitAutoVarDecl - Emit an auto variable declaration.
+ ///
+ /// This function can be called with a null (unreachable) insert point.
+ void EmitAutoVarDecl(const VarDecl &D);
+
+ class AutoVarEmission {
+ friend class CodeGenFunction;
+
+ const VarDecl *Variable;
+
+ /// The alignment of the variable.
+ CharUnits Alignment;
+
+ /// The address of the alloca. Null if the variable was emitted
+ /// as a global constant.
+ llvm::Value *Address;
+
+ llvm::Value *NRVOFlag;
+
+ /// True if the variable is a __block variable.
+ bool IsByRef;
+
+ /// True if the variable is of aggregate type and has a constant
+ /// initializer.
+ bool IsConstantAggregate;
+
+ struct Invalid {};
+ AutoVarEmission(Invalid) : Variable(0) {}
+
+ AutoVarEmission(const VarDecl &variable)
+ : Variable(&variable), Address(0), NRVOFlag(0),
+ IsByRef(false), IsConstantAggregate(false) {}
+
+ bool wasEmittedAsGlobal() const { return Address == 0; }
+
+ public:
+ static AutoVarEmission invalid() { return AutoVarEmission(Invalid()); }
+
+ /// Returns the address of the object within this declaration.
+ /// Note that this does not chase the forwarding pointer for
+ /// __block decls.
+ llvm::Value *getObjectAddress(CodeGenFunction &CGF) const {
+ if (!IsByRef) return Address;
+
+ return CGF.Builder.CreateStructGEP(Address,
+ CGF.getByRefValueLLVMField(Variable),
+ Variable->getNameAsString());
+ }
+ };
+ AutoVarEmission EmitAutoVarAlloca(const VarDecl &var);
+ void EmitAutoVarInit(const AutoVarEmission &emission);
+ void EmitAutoVarCleanups(const AutoVarEmission &emission);
+ void emitAutoVarTypeCleanup(const AutoVarEmission &emission,
+ QualType::DestructionKind dtorKind);
+
+ void EmitStaticVarDecl(const VarDecl &D,
+ llvm::GlobalValue::LinkageTypes Linkage);
+
+ /// EmitParmDecl - Emit a ParmVarDecl or an ImplicitParamDecl.
+ void EmitParmDecl(const VarDecl &D, llvm::Value *Arg, unsigned ArgNo);
+
+ /// protectFromPeepholes - Protect a value that we're intending to
+ /// store to the side, but which will probably be used later, from
+ /// aggressive peepholing optimizations that might delete it.
+ ///
+ /// Pass the result to unprotectFromPeepholes to declare that
+ /// protection is no longer required.
+ ///
+ /// There's no particular reason why this shouldn't apply to
+ /// l-values, it's just that no existing peepholes work on pointers.
+ PeepholeProtection protectFromPeepholes(RValue rvalue);
+ void unprotectFromPeepholes(PeepholeProtection protection);
+
+ //===--------------------------------------------------------------------===//
+ // Statement Emission
+ //===--------------------------------------------------------------------===//
+
+ /// EmitStopPoint - Emit a debug stoppoint if we are emitting debug info.
+ void EmitStopPoint(const Stmt *S);
+
+ /// EmitStmt - Emit the code for the statement \arg S. It is legal to call
+ /// this function even if there is no current insertion point.
+ ///
+ /// This function may clear the current insertion point; callers should use
+ /// EnsureInsertPoint if they wish to subsequently generate code without first
+ /// calling EmitBlock, EmitBranch, or EmitStmt.
+ void EmitStmt(const Stmt *S);
+
+ /// EmitSimpleStmt - Try to emit a "simple" statement which does not
+ /// necessarily require an insertion point or debug information; typically
+ /// because the statement amounts to a jump or a container of other
+ /// statements.
+ ///
+ /// \return True if the statement was handled.
+ bool EmitSimpleStmt(const Stmt *S);
+
+ RValue EmitCompoundStmt(const CompoundStmt &S, bool GetLast = false,
+ AggValueSlot AVS = AggValueSlot::ignored());
+
+ /// EmitLabel - Emit the block for the given label. It is legal to call this
+ /// function even if there is no current insertion point.
+ void EmitLabel(const LabelDecl *D); // helper for EmitLabelStmt.
+
+ void EmitLabelStmt(const LabelStmt &S);
+ void EmitAttributedStmt(const AttributedStmt &S);
+ void EmitGotoStmt(const GotoStmt &S);
+ void EmitIndirectGotoStmt(const IndirectGotoStmt &S);
+ void EmitIfStmt(const IfStmt &S);
+ void EmitWhileStmt(const WhileStmt &S);
+ void EmitDoStmt(const DoStmt &S);
+ void EmitForStmt(const ForStmt &S);
+ void EmitReturnStmt(const ReturnStmt &S);
+ void EmitDeclStmt(const DeclStmt &S);
+ void EmitBreakStmt(const BreakStmt &S);
+ void EmitContinueStmt(const ContinueStmt &S);
+ void EmitSwitchStmt(const SwitchStmt &S);
+ void EmitDefaultStmt(const DefaultStmt &S);
+ void EmitCaseStmt(const CaseStmt &S);
+ void EmitCaseStmtRange(const CaseStmt &S);
+ void EmitAsmStmt(const AsmStmt &S);
+
+ void EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S);
+ void EmitObjCAtTryStmt(const ObjCAtTryStmt &S);
+ void EmitObjCAtThrowStmt(const ObjCAtThrowStmt &S);
+ void EmitObjCAtSynchronizedStmt(const ObjCAtSynchronizedStmt &S);
+ void EmitObjCAutoreleasePoolStmt(const ObjCAutoreleasePoolStmt &S);
+
+ llvm::Constant *getUnwindResumeFn();
+ llvm::Constant *getUnwindResumeOrRethrowFn();
+ void EnterCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock = false);
+ void ExitCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock = false);
+
+ void EmitCXXTryStmt(const CXXTryStmt &S);
+ void EmitCXXForRangeStmt(const CXXForRangeStmt &S);
+
+ //===--------------------------------------------------------------------===//
+ // LValue Expression Emission
+ //===--------------------------------------------------------------------===//
+
+ /// GetUndefRValue - Get an appropriate 'undef' rvalue for the given type.
+ RValue GetUndefRValue(QualType Ty);
+
+ /// EmitUnsupportedRValue - Emit a dummy r-value using the type of E
+ /// and issue an ErrorUnsupported style diagnostic (using the
+ /// provided Name).
+ RValue EmitUnsupportedRValue(const Expr *E,
+ const char *Name);
+
+ /// EmitUnsupportedLValue - Emit a dummy l-value using the type of E and issue
+ /// an ErrorUnsupported style diagnostic (using the provided Name).
+ LValue EmitUnsupportedLValue(const Expr *E,
+ const char *Name);
+
+ /// EmitLValue - Emit code to compute a designator that specifies the location
+ /// of the expression.
+ ///
+ /// This can return one of two things: a simple address or a bitfield
+ /// reference. In either case, the LLVM Value* in the LValue structure is
+ /// guaranteed to be an LLVM pointer type.
+ ///
+ /// If this returns a bitfield reference, nothing about the pointee type of
+ /// the LLVM value is known: For example, it may not be a pointer to an
+ /// integer.
+ ///
+ /// If this returns a normal address, and if the lvalue's C type is fixed
+ /// size, this method guarantees that the returned pointer type will point to
+ /// an LLVM type of the same size of the lvalue's type. If the lvalue has a
+ /// variable length type, this is not possible.
+ ///
+ LValue EmitLValue(const Expr *E);
+
+ /// EmitCheckedLValue - Same as EmitLValue but additionally we generate
+ /// checking code to guard against undefined behavior. This is only
+ /// suitable when we know that the address will be used to access the
+ /// object.
+ LValue EmitCheckedLValue(const Expr *E);
+
+ /// EmitToMemory - Change a scalar value from its value
+ /// representation to its in-memory representation.
+ llvm::Value *EmitToMemory(llvm::Value *Value, QualType Ty);
+
+ /// EmitFromMemory - Change a scalar value from its memory
+ /// representation to its value representation.
+ llvm::Value *EmitFromMemory(llvm::Value *Value, QualType Ty);
+
+ /// EmitLoadOfScalar - Load a scalar value from an address, taking
+ /// care to appropriately convert from the memory representation to
+ /// the LLVM value representation.
+ llvm::Value *EmitLoadOfScalar(llvm::Value *Addr, bool Volatile,
+ unsigned Alignment, QualType Ty,
+ llvm::MDNode *TBAAInfo = 0);
+
+ /// EmitLoadOfScalar - Load a scalar value from an address, taking
+ /// care to appropriately convert from the memory representation to
+ /// the LLVM value representation. The l-value must be a simple
+ /// l-value.
+ llvm::Value *EmitLoadOfScalar(LValue lvalue);
+
+ /// EmitStoreOfScalar - Store a scalar value to an address, taking
+ /// care to appropriately convert from the memory representation to
+ /// the LLVM value representation.
+ void EmitStoreOfScalar(llvm::Value *Value, llvm::Value *Addr,
+ bool Volatile, unsigned Alignment, QualType Ty,
+ llvm::MDNode *TBAAInfo = 0, bool isInit=false);
+
+ /// EmitStoreOfScalar - Store a scalar value to an address, taking
+ /// care to appropriately convert from the memory representation to
+ /// the LLVM value representation. The l-value must be a simple
+ /// l-value. The isInit flag indicates whether this is an initialization.
+ /// If so, atomic qualifiers are ignored and the store is always non-atomic.
+ void EmitStoreOfScalar(llvm::Value *value, LValue lvalue, bool isInit=false);
+
+ /// EmitLoadOfLValue - Given an expression that represents a value lvalue,
+ /// this method emits the address of the lvalue, then loads the result as an
+ /// rvalue, returning the rvalue.
+ RValue EmitLoadOfLValue(LValue V);
+ RValue EmitLoadOfExtVectorElementLValue(LValue V);
+ RValue EmitLoadOfBitfieldLValue(LValue LV);
+
+ /// EmitStoreThroughLValue - Store the specified rvalue into the specified
+ /// lvalue, where both are guaranteed to the have the same type, and that type
+ /// is 'Ty'.
+ void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit=false);
+ void EmitStoreThroughExtVectorComponentLValue(RValue Src, LValue Dst);
+
+ /// EmitStoreThroughLValue - Store Src into Dst with same constraints as
+ /// EmitStoreThroughLValue.
+ ///
+ /// \param Result [out] - If non-null, this will be set to a Value* for the
+ /// bit-field contents after the store, appropriate for use as the result of
+ /// an assignment to the bit-field.
+ void EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst,
+ llvm::Value **Result=0);
+
+ /// Emit an l-value for an assignment (simple or compound) of complex type.
+ LValue EmitComplexAssignmentLValue(const BinaryOperator *E);
+ LValue EmitComplexCompoundAssignmentLValue(const CompoundAssignOperator *E);
+
+ // Note: only available for agg return types
+ LValue EmitBinaryOperatorLValue(const BinaryOperator *E);
+ LValue EmitCompoundAssignmentLValue(const CompoundAssignOperator *E);
+ // Note: only available for agg return types
+ LValue EmitCallExprLValue(const CallExpr *E);
+ // Note: only available for agg return types
+ LValue EmitVAArgExprLValue(const VAArgExpr *E);
+ LValue EmitDeclRefLValue(const DeclRefExpr *E);
+ LValue EmitStringLiteralLValue(const StringLiteral *E);
+ LValue EmitObjCEncodeExprLValue(const ObjCEncodeExpr *E);
+ LValue EmitPredefinedLValue(const PredefinedExpr *E);
+ LValue EmitUnaryOpLValue(const UnaryOperator *E);
+ LValue EmitArraySubscriptExpr(const ArraySubscriptExpr *E);
+ LValue EmitExtVectorElementExpr(const ExtVectorElementExpr *E);
+ LValue EmitMemberExpr(const MemberExpr *E);
+ LValue EmitObjCIsaExpr(const ObjCIsaExpr *E);
+ LValue EmitCompoundLiteralLValue(const CompoundLiteralExpr *E);
+ LValue EmitConditionalOperatorLValue(const AbstractConditionalOperator *E);
+ LValue EmitCastLValue(const CastExpr *E);
+ LValue EmitNullInitializationLValue(const CXXScalarValueInitExpr *E);
+ LValue EmitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *E);
+ LValue EmitOpaqueValueLValue(const OpaqueValueExpr *e);
+
+ RValue EmitRValueForField(LValue LV, const FieldDecl *FD);
+
+ class ConstantEmission {
+ llvm::PointerIntPair<llvm::Constant*, 1, bool> ValueAndIsReference;
+ ConstantEmission(llvm::Constant *C, bool isReference)
+ : ValueAndIsReference(C, isReference) {}
+ public:
+ ConstantEmission() {}
+ static ConstantEmission forReference(llvm::Constant *C) {
+ return ConstantEmission(C, true);
+ }
+ static ConstantEmission forValue(llvm::Constant *C) {
+ return ConstantEmission(C, false);
+ }
+
+ operator bool() const { return ValueAndIsReference.getOpaqueValue() != 0; }
+
+ bool isReference() const { return ValueAndIsReference.getInt(); }
+ LValue getReferenceLValue(CodeGenFunction &CGF, Expr *refExpr) const {
+ assert(isReference());
+ return CGF.MakeNaturalAlignAddrLValue(ValueAndIsReference.getPointer(),
+ refExpr->getType());
+ }
+
+ llvm::Constant *getValue() const {
+ assert(!isReference());
+ return ValueAndIsReference.getPointer();
+ }
+ };
+
+ ConstantEmission tryEmitAsConstant(DeclRefExpr *refExpr);
+
+ RValue EmitPseudoObjectRValue(const PseudoObjectExpr *e,
+ AggValueSlot slot = AggValueSlot::ignored());
+ LValue EmitPseudoObjectLValue(const PseudoObjectExpr *e);
+
+ llvm::Value *EmitIvarOffset(const ObjCInterfaceDecl *Interface,
+ const ObjCIvarDecl *Ivar);
+ LValue EmitLValueForAnonRecordField(llvm::Value* Base,
+ const IndirectFieldDecl* Field,
+ unsigned CVRQualifiers);
+ LValue EmitLValueForField(LValue Base, const FieldDecl* Field);
+
+ /// EmitLValueForFieldInitialization - Like EmitLValueForField, except that
+ /// if the Field is a reference, this will return the address of the reference
+ /// and not the address of the value stored in the reference.
+ LValue EmitLValueForFieldInitialization(LValue Base,
+ const FieldDecl* Field);
+
+ LValue EmitLValueForIvar(QualType ObjectTy,
+ llvm::Value* Base, const ObjCIvarDecl *Ivar,
+ unsigned CVRQualifiers);
+
+ LValue EmitLValueForBitfield(llvm::Value* Base, const FieldDecl* Field,
+ unsigned CVRQualifiers);
+
+ LValue EmitCXXConstructLValue(const CXXConstructExpr *E);
+ LValue EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr *E);
+ LValue EmitLambdaLValue(const LambdaExpr *E);
+ LValue EmitCXXTypeidLValue(const CXXTypeidExpr *E);
+
+ LValue EmitObjCMessageExprLValue(const ObjCMessageExpr *E);
+ LValue EmitObjCIvarRefLValue(const ObjCIvarRefExpr *E);
+ LValue EmitStmtExprLValue(const StmtExpr *E);
+ LValue EmitPointerToDataMemberBinaryExpr(const BinaryOperator *E);
+ LValue EmitObjCSelectorLValue(const ObjCSelectorExpr *E);
+ void EmitDeclRefExprDbgValue(const DeclRefExpr *E, llvm::Constant *Init);
+
+ //===--------------------------------------------------------------------===//
+ // Scalar Expression Emission
+ //===--------------------------------------------------------------------===//
+
+ /// EmitCall - Generate a call of the given function, expecting the given
+ /// result type, and using the given argument list which specifies both the
+ /// LLVM arguments and the types they were derived from.
+ ///
+ /// \param TargetDecl - If given, the decl of the function in a direct call;
+ /// used to set attributes on the call (noreturn, etc.).
+ RValue EmitCall(const CGFunctionInfo &FnInfo,
+ llvm::Value *Callee,
+ ReturnValueSlot ReturnValue,
+ const CallArgList &Args,
+ const Decl *TargetDecl = 0,
+ llvm::Instruction **callOrInvoke = 0);
+
+ RValue EmitCall(QualType FnType, llvm::Value *Callee,
+ ReturnValueSlot ReturnValue,
+ CallExpr::const_arg_iterator ArgBeg,
+ CallExpr::const_arg_iterator ArgEnd,
+ const Decl *TargetDecl = 0);
+ RValue EmitCallExpr(const CallExpr *E,
+ ReturnValueSlot ReturnValue = ReturnValueSlot());
+
+ llvm::CallSite EmitCallOrInvoke(llvm::Value *Callee,
+ ArrayRef<llvm::Value *> Args,
+ const Twine &Name = "");
+ llvm::CallSite EmitCallOrInvoke(llvm::Value *Callee,
+ const Twine &Name = "");
+
+ llvm::Value *BuildVirtualCall(const CXXMethodDecl *MD, llvm::Value *This,
+ llvm::Type *Ty);
+ llvm::Value *BuildVirtualCall(const CXXDestructorDecl *DD, CXXDtorType Type,
+ llvm::Value *This, llvm::Type *Ty);
+ llvm::Value *BuildAppleKextVirtualCall(const CXXMethodDecl *MD,
+ NestedNameSpecifier *Qual,
+ llvm::Type *Ty);
+
+ llvm::Value *BuildAppleKextVirtualDestructorCall(const CXXDestructorDecl *DD,
+ CXXDtorType Type,
+ const CXXRecordDecl *RD);
+
+ RValue EmitCXXMemberCall(const CXXMethodDecl *MD,
+ llvm::Value *Callee,
+ ReturnValueSlot ReturnValue,
+ llvm::Value *This,
+ llvm::Value *VTT,
+ CallExpr::const_arg_iterator ArgBeg,
+ CallExpr::const_arg_iterator ArgEnd);
+ RValue EmitCXXMemberCallExpr(const CXXMemberCallExpr *E,
+ ReturnValueSlot ReturnValue);
+ RValue EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E,
+ ReturnValueSlot ReturnValue);
+
+ llvm::Value *EmitCXXOperatorMemberCallee(const CXXOperatorCallExpr *E,
+ const CXXMethodDecl *MD,
+ llvm::Value *This);
+ RValue EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E,
+ const CXXMethodDecl *MD,
+ ReturnValueSlot ReturnValue);
+
+ RValue EmitCUDAKernelCallExpr(const CUDAKernelCallExpr *E,
+ ReturnValueSlot ReturnValue);
+
+
+ RValue EmitBuiltinExpr(const FunctionDecl *FD,
+ unsigned BuiltinID, const CallExpr *E);
+
+ RValue EmitBlockCallExpr(const CallExpr *E, ReturnValueSlot ReturnValue);
+
+ /// EmitTargetBuiltinExpr - Emit the given builtin call. Returns 0 if the call
+ /// is unhandled by the current target.
+ llvm::Value *EmitTargetBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
+
+ llvm::Value *EmitARMBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
+ llvm::Value *EmitNeonCall(llvm::Function *F,
+ SmallVectorImpl<llvm::Value*> &O,
+ const char *name,
+ unsigned shift = 0, bool rightshift = false);
+ llvm::Value *EmitNeonSplat(llvm::Value *V, llvm::Constant *Idx);
+ llvm::Value *EmitNeonShiftVector(llvm::Value *V, llvm::Type *Ty,
+ bool negateForRightShift);
+
+ llvm::Value *BuildVector(ArrayRef<llvm::Value*> Ops);
+ llvm::Value *EmitX86BuiltinExpr(unsigned BuiltinID, const CallExpr *E);
+ llvm::Value *EmitHexagonBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
+ llvm::Value *EmitPPCBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
+
+ llvm::Value *EmitObjCProtocolExpr(const ObjCProtocolExpr *E);
+ llvm::Value *EmitObjCStringLiteral(const ObjCStringLiteral *E);
+ llvm::Value *EmitObjCNumericLiteral(const ObjCNumericLiteral *E);
+ llvm::Value *EmitObjCArrayLiteral(const ObjCArrayLiteral *E);
+ llvm::Value *EmitObjCDictionaryLiteral(const ObjCDictionaryLiteral *E);
+ llvm::Value *EmitObjCCollectionLiteral(const Expr *E,
+ const ObjCMethodDecl *MethodWithObjects);
+ llvm::Value *EmitObjCSelectorExpr(const ObjCSelectorExpr *E);
+ RValue EmitObjCMessageExpr(const ObjCMessageExpr *E,
+ ReturnValueSlot Return = ReturnValueSlot());
+
+ /// Retrieves the default cleanup kind for an ARC cleanup.
+ /// Except under -fobjc-arc-eh, ARC cleanups are normal-only.
+ CleanupKind getARCCleanupKind() {
+ return CGM.getCodeGenOpts().ObjCAutoRefCountExceptions
+ ? NormalAndEHCleanup : NormalCleanup;
+ }
+
+ // ARC primitives.
+ void EmitARCInitWeak(llvm::Value *value, llvm::Value *addr);
+ void EmitARCDestroyWeak(llvm::Value *addr);
+ llvm::Value *EmitARCLoadWeak(llvm::Value *addr);
+ llvm::Value *EmitARCLoadWeakRetained(llvm::Value *addr);
+ llvm::Value *EmitARCStoreWeak(llvm::Value *value, llvm::Value *addr,
+ bool ignored);
+ void EmitARCCopyWeak(llvm::Value *dst, llvm::Value *src);
+ void EmitARCMoveWeak(llvm::Value *dst, llvm::Value *src);
+ llvm::Value *EmitARCRetainAutorelease(QualType type, llvm::Value *value);
+ llvm::Value *EmitARCRetainAutoreleaseNonBlock(llvm::Value *value);
+ llvm::Value *EmitARCStoreStrong(LValue lvalue, llvm::Value *value,
+ bool ignored);
+ llvm::Value *EmitARCStoreStrongCall(llvm::Value *addr, llvm::Value *value,
+ bool ignored);
+ llvm::Value *EmitARCRetain(QualType type, llvm::Value *value);
+ llvm::Value *EmitARCRetainNonBlock(llvm::Value *value);
+ llvm::Value *EmitARCRetainBlock(llvm::Value *value, bool mandatory);
+ void EmitARCRelease(llvm::Value *value, bool precise);
+ llvm::Value *EmitARCAutorelease(llvm::Value *value);
+ llvm::Value *EmitARCAutoreleaseReturnValue(llvm::Value *value);
+ llvm::Value *EmitARCRetainAutoreleaseReturnValue(llvm::Value *value);
+ llvm::Value *EmitARCRetainAutoreleasedReturnValue(llvm::Value *value);
+
+ std::pair<LValue,llvm::Value*>
+ EmitARCStoreAutoreleasing(const BinaryOperator *e);
+ std::pair<LValue,llvm::Value*>
+ EmitARCStoreStrong(const BinaryOperator *e, bool ignored);
+
+ llvm::Value *EmitObjCThrowOperand(const Expr *expr);
+
+ llvm::Value *EmitObjCProduceObject(QualType T, llvm::Value *Ptr);
+ llvm::Value *EmitObjCConsumeObject(QualType T, llvm::Value *Ptr);
+ llvm::Value *EmitObjCExtendObjectLifetime(QualType T, llvm::Value *Ptr);
+
+ llvm::Value *EmitARCExtendBlockObject(const Expr *expr);
+ llvm::Value *EmitARCRetainScalarExpr(const Expr *expr);
+ llvm::Value *EmitARCRetainAutoreleaseScalarExpr(const Expr *expr);
+
+ static Destroyer destroyARCStrongImprecise;
+ static Destroyer destroyARCStrongPrecise;
+ static Destroyer destroyARCWeak;
+
+ void EmitObjCAutoreleasePoolPop(llvm::Value *Ptr);
+ llvm::Value *EmitObjCAutoreleasePoolPush();
+ llvm::Value *EmitObjCMRRAutoreleasePoolPush();
+ void EmitObjCAutoreleasePoolCleanup(llvm::Value *Ptr);
+ void EmitObjCMRRAutoreleasePoolPop(llvm::Value *Ptr);
+
+ /// EmitReferenceBindingToExpr - Emits a reference binding to the passed in
+ /// expression. Will emit a temporary variable if E is not an LValue.
+ RValue EmitReferenceBindingToExpr(const Expr* E,
+ const NamedDecl *InitializedDecl);
+
+ //===--------------------------------------------------------------------===//
+ // Expression Emission
+ //===--------------------------------------------------------------------===//
+
+ // Expressions are broken into three classes: scalar, complex, aggregate.
+
+ /// EmitScalarExpr - Emit the computation of the specified expression of LLVM
+ /// scalar type, returning the result.
+ llvm::Value *EmitScalarExpr(const Expr *E , bool IgnoreResultAssign = false);
+
+ /// EmitScalarConversion - Emit a conversion from the specified type to the
+ /// specified destination type, both of which are LLVM scalar types.
+ llvm::Value *EmitScalarConversion(llvm::Value *Src, QualType SrcTy,
+ QualType DstTy);
+
+ /// EmitComplexToScalarConversion - Emit a conversion from the specified
+ /// complex type to the specified destination type, where the destination type
+ /// is an LLVM scalar type.
+ llvm::Value *EmitComplexToScalarConversion(ComplexPairTy Src, QualType SrcTy,
+ QualType DstTy);
+
+
+ /// EmitAggExpr - Emit the computation of the specified expression
+ /// of aggregate type. The result is computed into the given slot,
+ /// which may be null to indicate that the value is not needed.
+ void EmitAggExpr(const Expr *E, AggValueSlot AS, bool IgnoreResult = false);
+
+ /// EmitAggExprToLValue - Emit the computation of the specified expression of
+ /// aggregate type into a temporary LValue.
+ LValue EmitAggExprToLValue(const Expr *E);
+
+ /// EmitGCMemmoveCollectable - Emit special API for structs with object
+ /// pointers.
+ void EmitGCMemmoveCollectable(llvm::Value *DestPtr, llvm::Value *SrcPtr,
+ QualType Ty);
+
+ /// EmitExtendGCLifetime - Given a pointer to an Objective-C object,
+ /// make sure it survives garbage collection until this point.
+ void EmitExtendGCLifetime(llvm::Value *object);
+
+ /// EmitComplexExpr - Emit the computation of the specified expression of
+ /// complex type, returning the result.
+ ComplexPairTy EmitComplexExpr(const Expr *E,
+ bool IgnoreReal = false,
+ bool IgnoreImag = false);
+
+ /// EmitComplexExprIntoAddr - Emit the computation of the specified expression
+ /// of complex type, storing into the specified Value*.
+ void EmitComplexExprIntoAddr(const Expr *E, llvm::Value *DestAddr,
+ bool DestIsVolatile);
+
+ /// StoreComplexToAddr - Store a complex number into the specified address.
+ void StoreComplexToAddr(ComplexPairTy V, llvm::Value *DestAddr,
+ bool DestIsVolatile);
+ /// LoadComplexFromAddr - Load a complex number from the specified address.
+ ComplexPairTy LoadComplexFromAddr(llvm::Value *SrcAddr, bool SrcIsVolatile);
+
+ /// CreateStaticVarDecl - Create a zero-initialized LLVM global for
+ /// a static local variable.
+ llvm::GlobalVariable *CreateStaticVarDecl(const VarDecl &D,
+ const char *Separator,
+ llvm::GlobalValue::LinkageTypes Linkage);
+
+ /// AddInitializerToStaticVarDecl - Add the initializer for 'D' to the
+ /// global variable that has already been created for it. If the initializer
+ /// has a different type than GV does, this may free GV and return a different
+ /// one. Otherwise it just returns GV.
+ llvm::GlobalVariable *
+ AddInitializerToStaticVarDecl(const VarDecl &D,
+ llvm::GlobalVariable *GV);
+
+
+ /// EmitCXXGlobalVarDeclInit - Create the initializer for a C++
+ /// variable with global storage.
+ void EmitCXXGlobalVarDeclInit(const VarDecl &D, llvm::Constant *DeclPtr,
+ bool PerformInit);
+
+ /// EmitCXXGlobalDtorRegistration - Emits a call to register the global ptr
+ /// with the C++ runtime so that its destructor will be called at exit.
+ void EmitCXXGlobalDtorRegistration(llvm::Constant *DtorFn,
+ llvm::Constant *DeclPtr);
+
+ /// Emit code in this function to perform a guarded variable
+ /// initialization. Guarded initializations are used when it's not
+ /// possible to prove that an initialization will be done exactly
+ /// once, e.g. with a static local variable or a static data member
+ /// of a class template.
+ void EmitCXXGuardedInit(const VarDecl &D, llvm::GlobalVariable *DeclPtr,
+ bool PerformInit);
+
+ /// GenerateCXXGlobalInitFunc - Generates code for initializing global
+ /// variables.
+ void GenerateCXXGlobalInitFunc(llvm::Function *Fn,
+ llvm::Constant **Decls,
+ unsigned NumDecls);
+
+ /// GenerateCXXGlobalDtorsFunc - Generates code for destroying global
+ /// variables.
+ void GenerateCXXGlobalDtorsFunc(llvm::Function *Fn,
+ const std::vector<std::pair<llvm::WeakVH,
+ llvm::Constant*> > &DtorsAndObjects);
+
+ void GenerateCXXGlobalVarDeclInitFunc(llvm::Function *Fn,
+ const VarDecl *D,
+ llvm::GlobalVariable *Addr,
+ bool PerformInit);
+
+ void EmitCXXConstructExpr(const CXXConstructExpr *E, AggValueSlot Dest);
+
+ void EmitSynthesizedCXXCopyCtor(llvm::Value *Dest, llvm::Value *Src,
+ const Expr *Exp);
+
+ void enterFullExpression(const ExprWithCleanups *E) {
+ if (E->getNumObjects() == 0) return;
+ enterNonTrivialFullExpression(E);
+ }
+ void enterNonTrivialFullExpression(const ExprWithCleanups *E);
+
+ void EmitCXXThrowExpr(const CXXThrowExpr *E);
+
+ void EmitLambdaExpr(const LambdaExpr *E, AggValueSlot Dest);
+
+ RValue EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest = 0);
+
+ //===--------------------------------------------------------------------===//
+ // Annotations Emission
+ //===--------------------------------------------------------------------===//
+
+ /// Emit an annotation call (intrinsic or builtin).
+ llvm::Value *EmitAnnotationCall(llvm::Value *AnnotationFn,
+ llvm::Value *AnnotatedVal,
+ llvm::StringRef AnnotationStr,
+ SourceLocation Location);
+
+ /// Emit local annotations for the local variable V, declared by D.
+ void EmitVarAnnotations(const VarDecl *D, llvm::Value *V);
+
+ /// Emit field annotations for the given field & value. Returns the
+ /// annotation result.
+ llvm::Value *EmitFieldAnnotations(const FieldDecl *D, llvm::Value *V);
+
+ //===--------------------------------------------------------------------===//
+ // Internal Helpers
+ //===--------------------------------------------------------------------===//
+
+ /// ContainsLabel - Return true if the statement contains a label in it. If
+ /// this statement is not executed normally, it not containing a label means
+ /// that we can just remove the code.
+ static bool ContainsLabel(const Stmt *S, bool IgnoreCaseStmts = false);
+
+ /// containsBreak - Return true if the statement contains a break out of it.
+ /// If the statement (recursively) contains a switch or loop with a break
+ /// inside of it, this is fine.
+ static bool containsBreak(const Stmt *S);
+
+ /// ConstantFoldsToSimpleInteger - If the specified expression does not fold
+ /// to a constant, or if it does but contains a label, return false. If it
+ /// constant folds return true and set the boolean result in Result.
+ bool ConstantFoldsToSimpleInteger(const Expr *Cond, bool &Result);
+
+ /// ConstantFoldsToSimpleInteger - If the specified expression does not fold
+ /// to a constant, or if it does but contains a label, return false. If it
+ /// constant folds return true and set the folded value.
+ bool ConstantFoldsToSimpleInteger(const Expr *Cond, llvm::APInt &Result);
+
+ /// EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g. for an
+ /// if statement) to the specified blocks. Based on the condition, this might
+ /// try to simplify the codegen of the conditional based on the branch.
+ void EmitBranchOnBoolExpr(const Expr *Cond, llvm::BasicBlock *TrueBlock,
+ llvm::BasicBlock *FalseBlock);
+
+ /// getTrapBB - Create a basic block that will call the trap intrinsic. We'll
+ /// generate a branch around the created basic block as necessary.
+ llvm::BasicBlock *getTrapBB();
+
+ /// EmitCallArg - Emit a single call argument.
+ void EmitCallArg(CallArgList &args, const Expr *E, QualType ArgType);
+
+ /// EmitDelegateCallArg - We are performing a delegate call; that
+ /// is, the current function is delegating to another one. Produce
+ /// a r-value suitable for passing the given parameter.
+ void EmitDelegateCallArg(CallArgList &args, const VarDecl *param);
+
+ /// SetFPAccuracy - Set the minimum required accuracy of the given floating
+ /// point operation, expressed as the maximum relative error in ulp.
+ void SetFPAccuracy(llvm::Value *Val, float Accuracy);
+
+private:
+ llvm::MDNode *getRangeForLoadFromType(QualType Ty);
+ void EmitReturnOfRValue(RValue RV, QualType Ty);
+
+ /// ExpandTypeFromArgs - Reconstruct a structure of type \arg Ty
+ /// from function arguments into \arg Dst. See ABIArgInfo::Expand.
+ ///
+ /// \param AI - The first function argument of the expansion.
+ /// \return The argument following the last expanded function
+ /// argument.
+ llvm::Function::arg_iterator
+ ExpandTypeFromArgs(QualType Ty, LValue Dst,
+ llvm::Function::arg_iterator AI);
+
+ /// ExpandTypeToArgs - Expand an RValue \arg Src, with the LLVM type for \arg
+ /// Ty, into individual arguments on the provided vector \arg Args. See
+ /// ABIArgInfo::Expand.
+ void ExpandTypeToArgs(QualType Ty, RValue Src,
+ SmallVector<llvm::Value*, 16> &Args,
+ llvm::FunctionType *IRFuncTy);
+
+ llvm::Value* EmitAsmInput(const AsmStmt &S,
+ const TargetInfo::ConstraintInfo &Info,
+ const Expr *InputExpr, std::string &ConstraintStr);
+
+ llvm::Value* EmitAsmInputLValue(const AsmStmt &S,
+ const TargetInfo::ConstraintInfo &Info,
+ LValue InputValue, QualType InputType,
+ std::string &ConstraintStr);
+
+ /// EmitCallArgs - Emit call arguments for a function.
+ /// The CallArgTypeInfo parameter is used for iterating over the known
+ /// argument types of the function being called.
+ template<typename T>
+ void EmitCallArgs(CallArgList& Args, const T* CallArgTypeInfo,
+ CallExpr::const_arg_iterator ArgBeg,
+ CallExpr::const_arg_iterator ArgEnd) {
+ CallExpr::const_arg_iterator Arg = ArgBeg;
+
+ // First, use the argument types that the type info knows about
+ if (CallArgTypeInfo) {
+ for (typename T::arg_type_iterator I = CallArgTypeInfo->arg_type_begin(),
+ E = CallArgTypeInfo->arg_type_end(); I != E; ++I, ++Arg) {
+ assert(Arg != ArgEnd && "Running over edge of argument list!");
+ QualType ArgType = *I;
+#ifndef NDEBUG
+ QualType ActualArgType = Arg->getType();
+ if (ArgType->isPointerType() && ActualArgType->isPointerType()) {
+ QualType ActualBaseType =
+ ActualArgType->getAs<PointerType>()->getPointeeType();
+ QualType ArgBaseType =
+ ArgType->getAs<PointerType>()->getPointeeType();
+ if (ArgBaseType->isVariableArrayType()) {
+ if (const VariableArrayType *VAT =
+ getContext().getAsVariableArrayType(ActualBaseType)) {
+ if (!VAT->getSizeExpr())
+ ActualArgType = ArgType;
+ }
+ }
+ }
+ assert(getContext().getCanonicalType(ArgType.getNonReferenceType()).
+ getTypePtr() ==
+ getContext().getCanonicalType(ActualArgType).getTypePtr() &&
+ "type mismatch in call argument!");
+#endif
+ EmitCallArg(Args, *Arg, ArgType);
+ }
+
+ // Either we've emitted all the call args, or we have a call to a
+ // variadic function.
+ assert((Arg == ArgEnd || CallArgTypeInfo->isVariadic()) &&
+ "Extra arguments in non-variadic function!");
+
+ }
+
+ // If we still have any arguments, emit them using the type of the argument.
+ for (; Arg != ArgEnd; ++Arg)
+ EmitCallArg(Args, *Arg, Arg->getType());
+ }
+
+ const TargetCodeGenInfo &getTargetHooks() const {
+ return CGM.getTargetCodeGenInfo();
+ }
+
+ void EmitDeclMetadata();
+
+ CodeGenModule::ByrefHelpers *
+ buildByrefHelpers(llvm::StructType &byrefType,
+ const AutoVarEmission &emission);
+
+ void AddObjCARCExceptionMetadata(llvm::Instruction *Inst);
+
+ /// GetPointeeAlignment - Given an expression with a pointer type, find the
+ /// alignment of the type referenced by the pointer. Skip over implicit
+ /// casts.
+ unsigned GetPointeeAlignment(const Expr *Addr);
+
+ /// GetPointeeAlignmentValue - Given an expression with a pointer type, find
+ /// the alignment of the type referenced by the pointer. Skip over implicit
+ /// casts. Return the alignment as an llvm::Value.
+ llvm::Value *GetPointeeAlignmentValue(const Expr *Addr);
+};
+
+/// Helper class with most of the code for saving a value for a
+/// conditional expression cleanup.
+struct DominatingLLVMValue {
+ typedef llvm::PointerIntPair<llvm::Value*, 1, bool> saved_type;
+
+ /// Answer whether the given value needs extra work to be saved.
+ static bool needsSaving(llvm::Value *value) {
+ // If it's not an instruction, we don't need to save.
+ if (!isa<llvm::Instruction>(value)) return false;
+
+ // If it's an instruction in the entry block, we don't need to save.
+ llvm::BasicBlock *block = cast<llvm::Instruction>(value)->getParent();
+ return (block != &block->getParent()->getEntryBlock());
+ }
+
+ /// Try to save the given value.
+ static saved_type save(CodeGenFunction &CGF, llvm::Value *value) {
+ if (!needsSaving(value)) return saved_type(value, false);
+
+ // Otherwise we need an alloca.
+ llvm::Value *alloca =
+ CGF.CreateTempAlloca(value->getType(), "cond-cleanup.save");
+ CGF.Builder.CreateStore(value, alloca);
+
+ return saved_type(alloca, true);
+ }
+
+ static llvm::Value *restore(CodeGenFunction &CGF, saved_type value) {
+ if (!value.getInt()) return value.getPointer();
+ return CGF.Builder.CreateLoad(value.getPointer());
+ }
+};
+
+/// A partial specialization of DominatingValue for llvm::Values that
+/// might be llvm::Instructions.
+template <class T> struct DominatingPointer<T,true> : DominatingLLVMValue {
+ typedef T *type;
+ static type restore(CodeGenFunction &CGF, saved_type value) {
+ return static_cast<T*>(DominatingLLVMValue::restore(CGF, value));
+ }
+};
+
+/// A specialization of DominatingValue for RValue.
+template <> struct DominatingValue<RValue> {
+ typedef RValue type;
+ class saved_type {
+ enum Kind { ScalarLiteral, ScalarAddress, AggregateLiteral,
+ AggregateAddress, ComplexAddress };
+
+ llvm::Value *Value;
+ Kind K;
+ saved_type(llvm::Value *v, Kind k) : Value(v), K(k) {}
+
+ public:
+ static bool needsSaving(RValue value);
+ static saved_type save(CodeGenFunction &CGF, RValue value);
+ RValue restore(CodeGenFunction &CGF);
+
+ // implementations in CGExprCXX.cpp
+ };
+
+ static bool needsSaving(type value) {
+ return saved_type::needsSaving(value);
+ }
+ static saved_type save(CodeGenFunction &CGF, type value) {
+ return saved_type::save(CGF, value);
+ }
+ static type restore(CodeGenFunction &CGF, saved_type value) {
+ return value.restore(CGF);
+ }
+};
+
+} // end namespace CodeGen
+} // end namespace clang
+
+#endif
diff --git a/clang/lib/CodeGen/CodeGenModule.cpp b/clang/lib/CodeGen/CodeGenModule.cpp
new file mode 100644
index 0000000..9a55c08
--- /dev/null
+++ b/clang/lib/CodeGen/CodeGenModule.cpp
@@ -0,0 +1,2667 @@
+//===--- CodeGenModule.cpp - Emit LLVM Code from ASTs for a Module --------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This coordinates the per-module state used while generating code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenModule.h"
+#include "CGDebugInfo.h"
+#include "CodeGenFunction.h"
+#include "CodeGenTBAA.h"
+#include "CGCall.h"
+#include "CGCUDARuntime.h"
+#include "CGCXXABI.h"
+#include "CGObjCRuntime.h"
+#include "CGOpenCLRuntime.h"
+#include "TargetInfo.h"
+#include "clang/Frontend/CodeGenOptions.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/CharUnits.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/Mangle.h"
+#include "clang/AST/RecordLayout.h"
+#include "clang/AST/RecursiveASTVisitor.h"
+#include "clang/Basic/Builtins.h"
+#include "clang/Basic/Diagnostic.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/Basic/ConvertUTF.h"
+#include "llvm/CallingConv.h"
+#include "llvm/Module.h"
+#include "llvm/Intrinsics.h"
+#include "llvm/LLVMContext.h"
+#include "llvm/ADT/APSInt.h"
+#include "llvm/ADT/Triple.h"
+#include "llvm/Target/Mangler.h"
+#include "llvm/Target/TargetData.h"
+#include "llvm/Support/CallSite.h"
+#include "llvm/Support/ErrorHandling.h"
+using namespace clang;
+using namespace CodeGen;
+
+static const char AnnotationSection[] = "llvm.metadata";
+
+static CGCXXABI &createCXXABI(CodeGenModule &CGM) {
+ switch (CGM.getContext().getTargetInfo().getCXXABI()) {
+ case CXXABI_ARM: return *CreateARMCXXABI(CGM);
+ case CXXABI_Itanium: return *CreateItaniumCXXABI(CGM);
+ case CXXABI_Microsoft: return *CreateMicrosoftCXXABI(CGM);
+ }
+
+ llvm_unreachable("invalid C++ ABI kind");
+}
+
+
+CodeGenModule::CodeGenModule(ASTContext &C, const CodeGenOptions &CGO,
+ llvm::Module &M, const llvm::TargetData &TD,
+ DiagnosticsEngine &diags)
+ : Context(C), LangOpts(C.getLangOpts()), CodeGenOpts(CGO), TheModule(M),
+ TheTargetData(TD), TheTargetCodeGenInfo(0), Diags(diags),
+ ABI(createCXXABI(*this)),
+ Types(*this),
+ TBAA(0),
+ VTables(*this), ObjCRuntime(0), OpenCLRuntime(0), CUDARuntime(0),
+ DebugInfo(0), ARCData(0), NoObjCARCExceptionsMetadata(0),
+ RRData(0), CFConstantStringClassRef(0),
+ ConstantStringClassRef(0), NSConstantStringType(0),
+ VMContext(M.getContext()),
+ NSConcreteGlobalBlock(0), NSConcreteStackBlock(0),
+ BlockObjectAssign(0), BlockObjectDispose(0),
+ BlockDescriptorType(0), GenericBlockLiteralType(0) {
+
+ // Initialize the type cache.
+ llvm::LLVMContext &LLVMContext = M.getContext();
+ VoidTy = llvm::Type::getVoidTy(LLVMContext);
+ Int8Ty = llvm::Type::getInt8Ty(LLVMContext);
+ Int16Ty = llvm::Type::getInt16Ty(LLVMContext);
+ Int32Ty = llvm::Type::getInt32Ty(LLVMContext);
+ Int64Ty = llvm::Type::getInt64Ty(LLVMContext);
+ FloatTy = llvm::Type::getFloatTy(LLVMContext);
+ DoubleTy = llvm::Type::getDoubleTy(LLVMContext);
+ PointerWidthInBits = C.getTargetInfo().getPointerWidth(0);
+ PointerAlignInBytes =
+ C.toCharUnitsFromBits(C.getTargetInfo().getPointerAlign(0)).getQuantity();
+ IntTy = llvm::IntegerType::get(LLVMContext, C.getTargetInfo().getIntWidth());
+ IntPtrTy = llvm::IntegerType::get(LLVMContext, PointerWidthInBits);
+ Int8PtrTy = Int8Ty->getPointerTo(0);
+ Int8PtrPtrTy = Int8PtrTy->getPointerTo(0);
+
+ if (LangOpts.ObjC1)
+ createObjCRuntime();
+ if (LangOpts.OpenCL)
+ createOpenCLRuntime();
+ if (LangOpts.CUDA)
+ createCUDARuntime();
+
+ // Enable TBAA unless it's suppressed.
+ if (!CodeGenOpts.RelaxedAliasing && CodeGenOpts.OptimizationLevel > 0)
+ TBAA = new CodeGenTBAA(Context, VMContext, getLangOpts(),
+ ABI.getMangleContext());
+
+ // If debug info or coverage generation is enabled, create the CGDebugInfo
+ // object.
+ if (CodeGenOpts.DebugInfo || CodeGenOpts.EmitGcovArcs ||
+ CodeGenOpts.EmitGcovNotes)
+ DebugInfo = new CGDebugInfo(*this);
+
+ Block.GlobalUniqueCount = 0;
+
+ if (C.getLangOpts().ObjCAutoRefCount)
+ ARCData = new ARCEntrypoints();
+ RRData = new RREntrypoints();
+}
+
+CodeGenModule::~CodeGenModule() {
+ delete ObjCRuntime;
+ delete OpenCLRuntime;
+ delete CUDARuntime;
+ delete TheTargetCodeGenInfo;
+ delete &ABI;
+ delete TBAA;
+ delete DebugInfo;
+ delete ARCData;
+ delete RRData;
+}
+
+void CodeGenModule::createObjCRuntime() {
+ if (!LangOpts.NeXTRuntime)
+ ObjCRuntime = CreateGNUObjCRuntime(*this);
+ else
+ ObjCRuntime = CreateMacObjCRuntime(*this);
+}
+
+void CodeGenModule::createOpenCLRuntime() {
+ OpenCLRuntime = new CGOpenCLRuntime(*this);
+}
+
+void CodeGenModule::createCUDARuntime() {
+ CUDARuntime = CreateNVCUDARuntime(*this);
+}
+
+void CodeGenModule::Release() {
+ EmitDeferred();
+ EmitCXXGlobalInitFunc();
+ EmitCXXGlobalDtorFunc();
+ if (ObjCRuntime)
+ if (llvm::Function *ObjCInitFunction = ObjCRuntime->ModuleInitFunction())
+ AddGlobalCtor(ObjCInitFunction);
+ EmitCtorList(GlobalCtors, "llvm.global_ctors");
+ EmitCtorList(GlobalDtors, "llvm.global_dtors");
+ EmitGlobalAnnotations();
+ EmitLLVMUsed();
+
+ SimplifyPersonality();
+
+ if (getCodeGenOpts().EmitDeclMetadata)
+ EmitDeclMetadata();
+
+ if (getCodeGenOpts().EmitGcovArcs || getCodeGenOpts().EmitGcovNotes)
+ EmitCoverageFile();
+
+ if (DebugInfo)
+ DebugInfo->finalize();
+}
+
+void CodeGenModule::UpdateCompletedType(const TagDecl *TD) {
+ // Make sure that this type is translated.
+ Types.UpdateCompletedType(TD);
+}
+
+llvm::MDNode *CodeGenModule::getTBAAInfo(QualType QTy) {
+ if (!TBAA)
+ return 0;
+ return TBAA->getTBAAInfo(QTy);
+}
+
+llvm::MDNode *CodeGenModule::getTBAAInfoForVTablePtr() {
+ if (!TBAA)
+ return 0;
+ return TBAA->getTBAAInfoForVTablePtr();
+}
+
+void CodeGenModule::DecorateInstruction(llvm::Instruction *Inst,
+ llvm::MDNode *TBAAInfo) {
+ Inst->setMetadata(llvm::LLVMContext::MD_tbaa, TBAAInfo);
+}
+
+bool CodeGenModule::isTargetDarwin() const {
+ return getContext().getTargetInfo().getTriple().isOSDarwin();
+}
+
+void CodeGenModule::Error(SourceLocation loc, StringRef error) {
+ unsigned diagID = getDiags().getCustomDiagID(DiagnosticsEngine::Error, error);
+ getDiags().Report(Context.getFullLoc(loc), diagID);
+}
+
+/// ErrorUnsupported - Print out an error that codegen doesn't support the
+/// specified stmt yet.
+void CodeGenModule::ErrorUnsupported(const Stmt *S, const char *Type,
+ bool OmitOnError) {
+ if (OmitOnError && getDiags().hasErrorOccurred())
+ return;
+ unsigned DiagID = getDiags().getCustomDiagID(DiagnosticsEngine::Error,
+ "cannot compile this %0 yet");
+ std::string Msg = Type;
+ getDiags().Report(Context.getFullLoc(S->getLocStart()), DiagID)
+ << Msg << S->getSourceRange();
+}
+
+/// ErrorUnsupported - Print out an error that codegen doesn't support the
+/// specified decl yet.
+void CodeGenModule::ErrorUnsupported(const Decl *D, const char *Type,
+ bool OmitOnError) {
+ if (OmitOnError && getDiags().hasErrorOccurred())
+ return;
+ unsigned DiagID = getDiags().getCustomDiagID(DiagnosticsEngine::Error,
+ "cannot compile this %0 yet");
+ std::string Msg = Type;
+ getDiags().Report(Context.getFullLoc(D->getLocation()), DiagID) << Msg;
+}
+
+llvm::ConstantInt *CodeGenModule::getSize(CharUnits size) {
+ return llvm::ConstantInt::get(SizeTy, size.getQuantity());
+}
+
+void CodeGenModule::setGlobalVisibility(llvm::GlobalValue *GV,
+ const NamedDecl *D) const {
+ // Internal definitions always have default visibility.
+ if (GV->hasLocalLinkage()) {
+ GV->setVisibility(llvm::GlobalValue::DefaultVisibility);
+ return;
+ }
+
+ // Set visibility for definitions.
+ NamedDecl::LinkageInfo LV = D->getLinkageAndVisibility();
+ if (LV.visibilityExplicit() || !GV->hasAvailableExternallyLinkage())
+ GV->setVisibility(GetLLVMVisibility(LV.visibility()));
+}
+
+/// Set the symbol visibility of type information (vtable and RTTI)
+/// associated with the given type.
+void CodeGenModule::setTypeVisibility(llvm::GlobalValue *GV,
+ const CXXRecordDecl *RD,
+ TypeVisibilityKind TVK) const {
+ setGlobalVisibility(GV, RD);
+
+ if (!CodeGenOpts.HiddenWeakVTables)
+ return;
+
+ // We never want to drop the visibility for RTTI names.
+ if (TVK == TVK_ForRTTIName)
+ return;
+
+ // We want to drop the visibility to hidden for weak type symbols.
+ // This isn't possible if there might be unresolved references
+ // elsewhere that rely on this symbol being visible.
+
+ // This should be kept roughly in sync with setThunkVisibility
+ // in CGVTables.cpp.
+
+ // Preconditions.
+ if (GV->getLinkage() != llvm::GlobalVariable::LinkOnceODRLinkage ||
+ GV->getVisibility() != llvm::GlobalVariable::DefaultVisibility)
+ return;
+
+ // Don't override an explicit visibility attribute.
+ if (RD->getExplicitVisibility())
+ return;
+
+ switch (RD->getTemplateSpecializationKind()) {
+ // We have to disable the optimization if this is an EI definition
+ // because there might be EI declarations in other shared objects.
+ case TSK_ExplicitInstantiationDefinition:
+ case TSK_ExplicitInstantiationDeclaration:
+ return;
+
+ // Every use of a non-template class's type information has to emit it.
+ case TSK_Undeclared:
+ break;
+
+ // In theory, implicit instantiations can ignore the possibility of
+ // an explicit instantiation declaration because there necessarily
+ // must be an EI definition somewhere with default visibility. In
+ // practice, it's possible to have an explicit instantiation for
+ // an arbitrary template class, and linkers aren't necessarily able
+ // to deal with mixed-visibility symbols.
+ case TSK_ExplicitSpecialization:
+ case TSK_ImplicitInstantiation:
+ if (!CodeGenOpts.HiddenWeakTemplateVTables)
+ return;
+ break;
+ }
+
+ // If there's a key function, there may be translation units
+ // that don't have the key function's definition. But ignore
+ // this if we're emitting RTTI under -fno-rtti.
+ if (!(TVK != TVK_ForRTTI) || LangOpts.RTTI) {
+ if (Context.getKeyFunction(RD))
+ return;
+ }
+
+ // Otherwise, drop the visibility to hidden.
+ GV->setVisibility(llvm::GlobalValue::HiddenVisibility);
+ GV->setUnnamedAddr(true);
+}
+
+StringRef CodeGenModule::getMangledName(GlobalDecl GD) {
+ const NamedDecl *ND = cast<NamedDecl>(GD.getDecl());
+
+ StringRef &Str = MangledDeclNames[GD.getCanonicalDecl()];
+ if (!Str.empty())
+ return Str;
+
+ if (!getCXXABI().getMangleContext().shouldMangleDeclName(ND)) {
+ IdentifierInfo *II = ND->getIdentifier();
+ assert(II && "Attempt to mangle unnamed decl.");
+
+ Str = II->getName();
+ return Str;
+ }
+
+ SmallString<256> Buffer;
+ llvm::raw_svector_ostream Out(Buffer);
+ if (const CXXConstructorDecl *D = dyn_cast<CXXConstructorDecl>(ND))
+ getCXXABI().getMangleContext().mangleCXXCtor(D, GD.getCtorType(), Out);
+ else if (const CXXDestructorDecl *D = dyn_cast<CXXDestructorDecl>(ND))
+ getCXXABI().getMangleContext().mangleCXXDtor(D, GD.getDtorType(), Out);
+ else if (const BlockDecl *BD = dyn_cast<BlockDecl>(ND))
+ getCXXABI().getMangleContext().mangleBlock(BD, Out);
+ else
+ getCXXABI().getMangleContext().mangleName(ND, Out);
+
+ // Allocate space for the mangled name.
+ Out.flush();
+ size_t Length = Buffer.size();
+ char *Name = MangledNamesAllocator.Allocate<char>(Length);
+ std::copy(Buffer.begin(), Buffer.end(), Name);
+
+ Str = StringRef(Name, Length);
+
+ return Str;
+}
+
+void CodeGenModule::getBlockMangledName(GlobalDecl GD, MangleBuffer &Buffer,
+ const BlockDecl *BD) {
+ MangleContext &MangleCtx = getCXXABI().getMangleContext();
+ const Decl *D = GD.getDecl();
+ llvm::raw_svector_ostream Out(Buffer.getBuffer());
+ if (D == 0)
+ MangleCtx.mangleGlobalBlock(BD, Out);
+ else if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(D))
+ MangleCtx.mangleCtorBlock(CD, GD.getCtorType(), BD, Out);
+ else if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(D))
+ MangleCtx.mangleDtorBlock(DD, GD.getDtorType(), BD, Out);
+ else
+ MangleCtx.mangleBlock(cast<DeclContext>(D), BD, Out);
+}
+
+llvm::GlobalValue *CodeGenModule::GetGlobalValue(StringRef Name) {
+ return getModule().getNamedValue(Name);
+}
+
+/// AddGlobalCtor - Add a function to the list that will be called before
+/// main() runs.
+void CodeGenModule::AddGlobalCtor(llvm::Function * Ctor, int Priority) {
+ // FIXME: Type coercion of void()* types.
+ GlobalCtors.push_back(std::make_pair(Ctor, Priority));
+}
+
+/// AddGlobalDtor - Add a function to the list that will be called
+/// when the module is unloaded.
+void CodeGenModule::AddGlobalDtor(llvm::Function * Dtor, int Priority) {
+ // FIXME: Type coercion of void()* types.
+ GlobalDtors.push_back(std::make_pair(Dtor, Priority));
+}
+
+void CodeGenModule::EmitCtorList(const CtorList &Fns, const char *GlobalName) {
+ // Ctor function type is void()*.
+ llvm::FunctionType* CtorFTy = llvm::FunctionType::get(VoidTy, false);
+ llvm::Type *CtorPFTy = llvm::PointerType::getUnqual(CtorFTy);
+
+ // Get the type of a ctor entry, { i32, void ()* }.
+ llvm::StructType *CtorStructTy =
+ llvm::StructType::get(Int32Ty, llvm::PointerType::getUnqual(CtorFTy), NULL);
+
+ // Construct the constructor and destructor arrays.
+ SmallVector<llvm::Constant*, 8> Ctors;
+ for (CtorList::const_iterator I = Fns.begin(), E = Fns.end(); I != E; ++I) {
+ llvm::Constant *S[] = {
+ llvm::ConstantInt::get(Int32Ty, I->second, false),
+ llvm::ConstantExpr::getBitCast(I->first, CtorPFTy)
+ };
+ Ctors.push_back(llvm::ConstantStruct::get(CtorStructTy, S));
+ }
+
+ if (!Ctors.empty()) {
+ llvm::ArrayType *AT = llvm::ArrayType::get(CtorStructTy, Ctors.size());
+ new llvm::GlobalVariable(TheModule, AT, false,
+ llvm::GlobalValue::AppendingLinkage,
+ llvm::ConstantArray::get(AT, Ctors),
+ GlobalName);
+ }
+}
+
+llvm::GlobalValue::LinkageTypes
+CodeGenModule::getFunctionLinkage(const FunctionDecl *D) {
+ GVALinkage Linkage = getContext().GetGVALinkageForFunction(D);
+
+ if (Linkage == GVA_Internal)
+ return llvm::Function::InternalLinkage;
+
+ if (D->hasAttr<DLLExportAttr>())
+ return llvm::Function::DLLExportLinkage;
+
+ if (D->hasAttr<WeakAttr>())
+ return llvm::Function::WeakAnyLinkage;
+
+ // In C99 mode, 'inline' functions are guaranteed to have a strong
+ // definition somewhere else, so we can use available_externally linkage.
+ if (Linkage == GVA_C99Inline)
+ return llvm::Function::AvailableExternallyLinkage;
+
+ // Note that Apple's kernel linker doesn't support symbol
+ // coalescing, so we need to avoid linkonce and weak linkages there.
+ // Normally, this means we just map to internal, but for explicit
+ // instantiations we'll map to external.
+
+ // In C++, the compiler has to emit a definition in every translation unit
+ // that references the function. We should use linkonce_odr because
+ // a) if all references in this translation unit are optimized away, we
+ // don't need to codegen it. b) if the function persists, it needs to be
+ // merged with other definitions. c) C++ has the ODR, so we know the
+ // definition is dependable.
+ if (Linkage == GVA_CXXInline || Linkage == GVA_TemplateInstantiation)
+ return !Context.getLangOpts().AppleKext
+ ? llvm::Function::LinkOnceODRLinkage
+ : llvm::Function::InternalLinkage;
+
+ // An explicit instantiation of a template has weak linkage, since
+ // explicit instantiations can occur in multiple translation units
+ // and must all be equivalent. However, we are not allowed to
+ // throw away these explicit instantiations.
+ if (Linkage == GVA_ExplicitTemplateInstantiation)
+ return !Context.getLangOpts().AppleKext
+ ? llvm::Function::WeakODRLinkage
+ : llvm::Function::ExternalLinkage;
+
+ // Otherwise, we have strong external linkage.
+ assert(Linkage == GVA_StrongExternal);
+ return llvm::Function::ExternalLinkage;
+}
+
+
+/// SetFunctionDefinitionAttributes - Set attributes for a global.
+///
+/// FIXME: This is currently only done for aliases and functions, but not for
+/// variables (these details are set in EmitGlobalVarDefinition for variables).
+void CodeGenModule::SetFunctionDefinitionAttributes(const FunctionDecl *D,
+ llvm::GlobalValue *GV) {
+ SetCommonAttributes(D, GV);
+}
+
+void CodeGenModule::SetLLVMFunctionAttributes(const Decl *D,
+ const CGFunctionInfo &Info,
+ llvm::Function *F) {
+ unsigned CallingConv;
+ AttributeListType AttributeList;
+ ConstructAttributeList(Info, D, AttributeList, CallingConv);
+ F->setAttributes(llvm::AttrListPtr::get(AttributeList.begin(),
+ AttributeList.size()));
+ F->setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
+}
+
+/// Determines whether the language options require us to model
+/// unwind exceptions. We treat -fexceptions as mandating this
+/// except under the fragile ObjC ABI with only ObjC exceptions
+/// enabled. This means, for example, that C with -fexceptions
+/// enables this.
+static bool hasUnwindExceptions(const LangOptions &LangOpts) {
+ // If exceptions are completely disabled, obviously this is false.
+ if (!LangOpts.Exceptions) return false;
+
+ // If C++ exceptions are enabled, this is true.
+ if (LangOpts.CXXExceptions) return true;
+
+ // If ObjC exceptions are enabled, this depends on the ABI.
+ if (LangOpts.ObjCExceptions) {
+ if (!LangOpts.ObjCNonFragileABI) return false;
+ }
+
+ return true;
+}
+
+void CodeGenModule::SetLLVMFunctionAttributesForDefinition(const Decl *D,
+ llvm::Function *F) {
+ if (CodeGenOpts.UnwindTables)
+ F->setHasUWTable();
+
+ if (!hasUnwindExceptions(LangOpts))
+ F->addFnAttr(llvm::Attribute::NoUnwind);
+
+ if (D->hasAttr<NakedAttr>()) {
+ // Naked implies noinline: we should not be inlining such functions.
+ F->addFnAttr(llvm::Attribute::Naked);
+ F->addFnAttr(llvm::Attribute::NoInline);
+ }
+
+ if (D->hasAttr<NoInlineAttr>())
+ F->addFnAttr(llvm::Attribute::NoInline);
+
+ // (noinline wins over always_inline, and we can't specify both in IR)
+ if (D->hasAttr<AlwaysInlineAttr>() &&
+ !F->hasFnAttr(llvm::Attribute::NoInline))
+ F->addFnAttr(llvm::Attribute::AlwaysInline);
+
+ if (isa<CXXConstructorDecl>(D) || isa<CXXDestructorDecl>(D))
+ F->setUnnamedAddr(true);
+
+ if (LangOpts.getStackProtector() == LangOptions::SSPOn)
+ F->addFnAttr(llvm::Attribute::StackProtect);
+ else if (LangOpts.getStackProtector() == LangOptions::SSPReq)
+ F->addFnAttr(llvm::Attribute::StackProtectReq);
+
+ if (LangOpts.AddressSanitizer) {
+ // When AddressSanitizer is enabled, set AddressSafety attribute
+ // unless __attribute__((no_address_safety_analysis)) is used.
+ if (!D->hasAttr<NoAddressSafetyAnalysisAttr>())
+ F->addFnAttr(llvm::Attribute::AddressSafety);
+ }
+
+ unsigned alignment = D->getMaxAlignment() / Context.getCharWidth();
+ if (alignment)
+ F->setAlignment(alignment);
+
+ // C++ ABI requires 2-byte alignment for member functions.
+ if (F->getAlignment() < 2 && isa<CXXMethodDecl>(D))
+ F->setAlignment(2);
+}
+
+void CodeGenModule::SetCommonAttributes(const Decl *D,
+ llvm::GlobalValue *GV) {
+ if (const NamedDecl *ND = dyn_cast<NamedDecl>(D))
+ setGlobalVisibility(GV, ND);
+ else
+ GV->setVisibility(llvm::GlobalValue::DefaultVisibility);
+
+ if (D->hasAttr<UsedAttr>())
+ AddUsedGlobal(GV);
+
+ if (const SectionAttr *SA = D->getAttr<SectionAttr>())
+ GV->setSection(SA->getName());
+
+ getTargetCodeGenInfo().SetTargetAttributes(D, GV, *this);
+}
+
+void CodeGenModule::SetInternalFunctionAttributes(const Decl *D,
+ llvm::Function *F,
+ const CGFunctionInfo &FI) {
+ SetLLVMFunctionAttributes(D, FI, F);
+ SetLLVMFunctionAttributesForDefinition(D, F);
+
+ F->setLinkage(llvm::Function::InternalLinkage);
+
+ SetCommonAttributes(D, F);
+}
+
+void CodeGenModule::SetFunctionAttributes(GlobalDecl GD,
+ llvm::Function *F,
+ bool IsIncompleteFunction) {
+ if (unsigned IID = F->getIntrinsicID()) {
+ // If this is an intrinsic function, set the function's attributes
+ // to the intrinsic's attributes.
+ F->setAttributes(llvm::Intrinsic::getAttributes((llvm::Intrinsic::ID)IID));
+ return;
+ }
+
+ const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
+
+ if (!IsIncompleteFunction)
+ SetLLVMFunctionAttributes(FD, getTypes().arrangeGlobalDeclaration(GD), F);
+
+ // Only a few attributes are set on declarations; these may later be
+ // overridden by a definition.
+
+ if (FD->hasAttr<DLLImportAttr>()) {
+ F->setLinkage(llvm::Function::DLLImportLinkage);
+ } else if (FD->hasAttr<WeakAttr>() ||
+ FD->isWeakImported()) {
+ // "extern_weak" is overloaded in LLVM; we probably should have
+ // separate linkage types for this.
+ F->setLinkage(llvm::Function::ExternalWeakLinkage);
+ } else {
+ F->setLinkage(llvm::Function::ExternalLinkage);
+
+ NamedDecl::LinkageInfo LV = FD->getLinkageAndVisibility();
+ if (LV.linkage() == ExternalLinkage && LV.visibilityExplicit()) {
+ F->setVisibility(GetLLVMVisibility(LV.visibility()));
+ }
+ }
+
+ if (const SectionAttr *SA = FD->getAttr<SectionAttr>())
+ F->setSection(SA->getName());
+}
+
+void CodeGenModule::AddUsedGlobal(llvm::GlobalValue *GV) {
+ assert(!GV->isDeclaration() &&
+ "Only globals with definition can force usage.");
+ LLVMUsed.push_back(GV);
+}
+
+void CodeGenModule::EmitLLVMUsed() {
+ // Don't create llvm.used if there is no need.
+ if (LLVMUsed.empty())
+ return;
+
+ // Convert LLVMUsed to what ConstantArray needs.
+ SmallVector<llvm::Constant*, 8> UsedArray;
+ UsedArray.resize(LLVMUsed.size());
+ for (unsigned i = 0, e = LLVMUsed.size(); i != e; ++i) {
+ UsedArray[i] =
+ llvm::ConstantExpr::getBitCast(cast<llvm::Constant>(&*LLVMUsed[i]),
+ Int8PtrTy);
+ }
+
+ if (UsedArray.empty())
+ return;
+ llvm::ArrayType *ATy = llvm::ArrayType::get(Int8PtrTy, UsedArray.size());
+
+ llvm::GlobalVariable *GV =
+ new llvm::GlobalVariable(getModule(), ATy, false,
+ llvm::GlobalValue::AppendingLinkage,
+ llvm::ConstantArray::get(ATy, UsedArray),
+ "llvm.used");
+
+ GV->setSection("llvm.metadata");
+}
+
+void CodeGenModule::EmitDeferred() {
+ // Emit code for any potentially referenced deferred decls. Since a
+ // previously unused static decl may become used during the generation of code
+ // for a static function, iterate until no changes are made.
+
+ while (!DeferredDeclsToEmit.empty() || !DeferredVTables.empty()) {
+ if (!DeferredVTables.empty()) {
+ const CXXRecordDecl *RD = DeferredVTables.back();
+ DeferredVTables.pop_back();
+ getVTables().GenerateClassData(getVTableLinkage(RD), RD);
+ continue;
+ }
+
+ GlobalDecl D = DeferredDeclsToEmit.back();
+ DeferredDeclsToEmit.pop_back();
+
+ // Check to see if we've already emitted this. This is necessary
+ // for a couple of reasons: first, decls can end up in the
+ // deferred-decls queue multiple times, and second, decls can end
+ // up with definitions in unusual ways (e.g. by an extern inline
+ // function acquiring a strong function redefinition). Just
+ // ignore these cases.
+ //
+ // TODO: That said, looking this up multiple times is very wasteful.
+ StringRef Name = getMangledName(D);
+ llvm::GlobalValue *CGRef = GetGlobalValue(Name);
+ assert(CGRef && "Deferred decl wasn't referenced?");
+
+ if (!CGRef->isDeclaration())
+ continue;
+
+ // GlobalAlias::isDeclaration() defers to the aliasee, but for our
+ // purposes an alias counts as a definition.
+ if (isa<llvm::GlobalAlias>(CGRef))
+ continue;
+
+ // Otherwise, emit the definition and move on to the next one.
+ EmitGlobalDefinition(D);
+ }
+}
+
+void CodeGenModule::EmitGlobalAnnotations() {
+ if (Annotations.empty())
+ return;
+
+ // Create a new global variable for the ConstantStruct in the Module.
+ llvm::Constant *Array = llvm::ConstantArray::get(llvm::ArrayType::get(
+ Annotations[0]->getType(), Annotations.size()), Annotations);
+ llvm::GlobalValue *gv = new llvm::GlobalVariable(getModule(),
+ Array->getType(), false, llvm::GlobalValue::AppendingLinkage, Array,
+ "llvm.global.annotations");
+ gv->setSection(AnnotationSection);
+}
+
+llvm::Constant *CodeGenModule::EmitAnnotationString(llvm::StringRef Str) {
+ llvm::StringMap<llvm::Constant*>::iterator i = AnnotationStrings.find(Str);
+ if (i != AnnotationStrings.end())
+ return i->second;
+
+ // Not found yet, create a new global.
+ llvm::Constant *s = llvm::ConstantDataArray::getString(getLLVMContext(), Str);
+ llvm::GlobalValue *gv = new llvm::GlobalVariable(getModule(), s->getType(),
+ true, llvm::GlobalValue::PrivateLinkage, s, ".str");
+ gv->setSection(AnnotationSection);
+ gv->setUnnamedAddr(true);
+ AnnotationStrings[Str] = gv;
+ return gv;
+}
+
+llvm::Constant *CodeGenModule::EmitAnnotationUnit(SourceLocation Loc) {
+ SourceManager &SM = getContext().getSourceManager();
+ PresumedLoc PLoc = SM.getPresumedLoc(Loc);
+ if (PLoc.isValid())
+ return EmitAnnotationString(PLoc.getFilename());
+ return EmitAnnotationString(SM.getBufferName(Loc));
+}
+
+llvm::Constant *CodeGenModule::EmitAnnotationLineNo(SourceLocation L) {
+ SourceManager &SM = getContext().getSourceManager();
+ PresumedLoc PLoc = SM.getPresumedLoc(L);
+ unsigned LineNo = PLoc.isValid() ? PLoc.getLine() :
+ SM.getExpansionLineNumber(L);
+ return llvm::ConstantInt::get(Int32Ty, LineNo);
+}
+
+llvm::Constant *CodeGenModule::EmitAnnotateAttr(llvm::GlobalValue *GV,
+ const AnnotateAttr *AA,
+ SourceLocation L) {
+ // Get the globals for file name, annotation, and the line number.
+ llvm::Constant *AnnoGV = EmitAnnotationString(AA->getAnnotation()),
+ *UnitGV = EmitAnnotationUnit(L),
+ *LineNoCst = EmitAnnotationLineNo(L);
+
+ // Create the ConstantStruct for the global annotation.
+ llvm::Constant *Fields[4] = {
+ llvm::ConstantExpr::getBitCast(GV, Int8PtrTy),
+ llvm::ConstantExpr::getBitCast(AnnoGV, Int8PtrTy),
+ llvm::ConstantExpr::getBitCast(UnitGV, Int8PtrTy),
+ LineNoCst
+ };
+ return llvm::ConstantStruct::getAnon(Fields);
+}
+
+void CodeGenModule::AddGlobalAnnotations(const ValueDecl *D,
+ llvm::GlobalValue *GV) {
+ assert(D->hasAttr<AnnotateAttr>() && "no annotate attribute");
+ // Get the struct elements for these annotations.
+ for (specific_attr_iterator<AnnotateAttr>
+ ai = D->specific_attr_begin<AnnotateAttr>(),
+ ae = D->specific_attr_end<AnnotateAttr>(); ai != ae; ++ai)
+ Annotations.push_back(EmitAnnotateAttr(GV, *ai, D->getLocation()));
+}
+
+bool CodeGenModule::MayDeferGeneration(const ValueDecl *Global) {
+ // Never defer when EmitAllDecls is specified.
+ if (LangOpts.EmitAllDecls)
+ return false;
+
+ return !getContext().DeclMustBeEmitted(Global);
+}
+
+llvm::Constant *CodeGenModule::GetWeakRefReference(const ValueDecl *VD) {
+ const AliasAttr *AA = VD->getAttr<AliasAttr>();
+ assert(AA && "No alias?");
+
+ llvm::Type *DeclTy = getTypes().ConvertTypeForMem(VD->getType());
+
+ // See if there is already something with the target's name in the module.
+ llvm::GlobalValue *Entry = GetGlobalValue(AA->getAliasee());
+
+ llvm::Constant *Aliasee;
+ if (isa<llvm::FunctionType>(DeclTy))
+ Aliasee = GetOrCreateLLVMFunction(AA->getAliasee(), DeclTy, GlobalDecl(),
+ /*ForVTable=*/false);
+ else
+ Aliasee = GetOrCreateLLVMGlobal(AA->getAliasee(),
+ llvm::PointerType::getUnqual(DeclTy), 0);
+ if (!Entry) {
+ llvm::GlobalValue* F = cast<llvm::GlobalValue>(Aliasee);
+ F->setLinkage(llvm::Function::ExternalWeakLinkage);
+ WeakRefReferences.insert(F);
+ }
+
+ return Aliasee;
+}
+
+void CodeGenModule::EmitGlobal(GlobalDecl GD) {
+ const ValueDecl *Global = cast<ValueDecl>(GD.getDecl());
+
+ // Weak references don't produce any output by themselves.
+ if (Global->hasAttr<WeakRefAttr>())
+ return;
+
+ // If this is an alias definition (which otherwise looks like a declaration)
+ // emit it now.
+ if (Global->hasAttr<AliasAttr>())
+ return EmitAliasDefinition(GD);
+
+ // If this is CUDA, be selective about which declarations we emit.
+ if (LangOpts.CUDA) {
+ if (CodeGenOpts.CUDAIsDevice) {
+ if (!Global->hasAttr<CUDADeviceAttr>() &&
+ !Global->hasAttr<CUDAGlobalAttr>() &&
+ !Global->hasAttr<CUDAConstantAttr>() &&
+ !Global->hasAttr<CUDASharedAttr>())
+ return;
+ } else {
+ if (!Global->hasAttr<CUDAHostAttr>() && (
+ Global->hasAttr<CUDADeviceAttr>() ||
+ Global->hasAttr<CUDAConstantAttr>() ||
+ Global->hasAttr<CUDASharedAttr>()))
+ return;
+ }
+ }
+
+ // Ignore declarations, they will be emitted on their first use.
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(Global)) {
+ // Forward declarations are emitted lazily on first use.
+ if (!FD->doesThisDeclarationHaveABody()) {
+ if (!FD->doesDeclarationForceExternallyVisibleDefinition())
+ return;
+
+ const FunctionDecl *InlineDefinition = 0;
+ FD->getBody(InlineDefinition);
+
+ StringRef MangledName = getMangledName(GD);
+ DeferredDecls.erase(MangledName);
+ EmitGlobalDefinition(InlineDefinition);
+ return;
+ }
+ } else {
+ const VarDecl *VD = cast<VarDecl>(Global);
+ assert(VD->isFileVarDecl() && "Cannot emit local var decl as global.");
+
+ if (VD->isThisDeclarationADefinition() != VarDecl::Definition)
+ return;
+ }
+
+ // Defer code generation when possible if this is a static definition, inline
+ // function etc. These we only want to emit if they are used.
+ if (!MayDeferGeneration(Global)) {
+ // Emit the definition if it can't be deferred.
+ EmitGlobalDefinition(GD);
+ return;
+ }
+
+ // If we're deferring emission of a C++ variable with an
+ // initializer, remember the order in which it appeared in the file.
+ if (getLangOpts().CPlusPlus && isa<VarDecl>(Global) &&
+ cast<VarDecl>(Global)->hasInit()) {
+ DelayedCXXInitPosition[Global] = CXXGlobalInits.size();
+ CXXGlobalInits.push_back(0);
+ }
+
+ // If the value has already been used, add it directly to the
+ // DeferredDeclsToEmit list.
+ StringRef MangledName = getMangledName(GD);
+ if (GetGlobalValue(MangledName))
+ DeferredDeclsToEmit.push_back(GD);
+ else {
+ // Otherwise, remember that we saw a deferred decl with this name. The
+ // first use of the mangled name will cause it to move into
+ // DeferredDeclsToEmit.
+ DeferredDecls[MangledName] = GD;
+ }
+}
+
+namespace {
+ struct FunctionIsDirectlyRecursive :
+ public RecursiveASTVisitor<FunctionIsDirectlyRecursive> {
+ const StringRef Name;
+ const Builtin::Context &BI;
+ bool Result;
+ FunctionIsDirectlyRecursive(StringRef N, const Builtin::Context &C) :
+ Name(N), BI(C), Result(false) {
+ }
+ typedef RecursiveASTVisitor<FunctionIsDirectlyRecursive> Base;
+
+ bool TraverseCallExpr(CallExpr *E) {
+ const FunctionDecl *FD = E->getDirectCallee();
+ if (!FD)
+ return true;
+ AsmLabelAttr *Attr = FD->getAttr<AsmLabelAttr>();
+ if (Attr && Name == Attr->getLabel()) {
+ Result = true;
+ return false;
+ }
+ unsigned BuiltinID = FD->getBuiltinID();
+ if (!BuiltinID)
+ return true;
+ StringRef BuiltinName = BI.GetName(BuiltinID);
+ if (BuiltinName.startswith("__builtin_") &&
+ Name == BuiltinName.slice(strlen("__builtin_"), StringRef::npos)) {
+ Result = true;
+ return false;
+ }
+ return true;
+ }
+ };
+}
+
+// isTriviallyRecursive - Check if this function calls another
+// decl that, because of the asm attribute or the other decl being a builtin,
+// ends up pointing to itself.
+bool
+CodeGenModule::isTriviallyRecursive(const FunctionDecl *FD) {
+ StringRef Name;
+ if (getCXXABI().getMangleContext().shouldMangleDeclName(FD)) {
+ // asm labels are a special kind of mangling we have to support.
+ AsmLabelAttr *Attr = FD->getAttr<AsmLabelAttr>();
+ if (!Attr)
+ return false;
+ Name = Attr->getLabel();
+ } else {
+ Name = FD->getName();
+ }
+
+ FunctionIsDirectlyRecursive Walker(Name, Context.BuiltinInfo);
+ Walker.TraverseFunctionDecl(const_cast<FunctionDecl*>(FD));
+ return Walker.Result;
+}
+
+bool
+CodeGenModule::shouldEmitFunction(const FunctionDecl *F) {
+ if (getFunctionLinkage(F) != llvm::Function::AvailableExternallyLinkage)
+ return true;
+ if (CodeGenOpts.OptimizationLevel == 0 &&
+ !F->hasAttr<AlwaysInlineAttr>())
+ return false;
+ // PR9614. Avoid cases where the source code is lying to us. An available
+ // externally function should have an equivalent function somewhere else,
+ // but a function that calls itself is clearly not equivalent to the real
+ // implementation.
+ // This happens in glibc's btowc and in some configure checks.
+ return !isTriviallyRecursive(F);
+}
+
+void CodeGenModule::EmitGlobalDefinition(GlobalDecl GD) {
+ const ValueDecl *D = cast<ValueDecl>(GD.getDecl());
+
+ PrettyStackTraceDecl CrashInfo(const_cast<ValueDecl *>(D), D->getLocation(),
+ Context.getSourceManager(),
+ "Generating code for declaration");
+
+ if (const FunctionDecl *Function = dyn_cast<FunctionDecl>(D)) {
+ // At -O0, don't generate IR for functions with available_externally
+ // linkage.
+ if (!shouldEmitFunction(Function))
+ return;
+
+ if (const CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(D)) {
+ // Make sure to emit the definition(s) before we emit the thunks.
+ // This is necessary for the generation of certain thunks.
+ if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(Method))
+ EmitCXXConstructor(CD, GD.getCtorType());
+ else if (const CXXDestructorDecl *DD =dyn_cast<CXXDestructorDecl>(Method))
+ EmitCXXDestructor(DD, GD.getDtorType());
+ else
+ EmitGlobalFunctionDefinition(GD);
+
+ if (Method->isVirtual())
+ getVTables().EmitThunks(GD);
+
+ return;
+ }
+
+ return EmitGlobalFunctionDefinition(GD);
+ }
+
+ if (const VarDecl *VD = dyn_cast<VarDecl>(D))
+ return EmitGlobalVarDefinition(VD);
+
+ llvm_unreachable("Invalid argument to EmitGlobalDefinition()");
+}
+
+/// GetOrCreateLLVMFunction - If the specified mangled name is not in the
+/// module, create and return an llvm Function with the specified type. If there
+/// is something in the module with the specified name, return it potentially
+/// bitcasted to the right type.
+///
+/// If D is non-null, it specifies a decl that correspond to this. This is used
+/// to set the attributes on the function when it is first created.
+llvm::Constant *
+CodeGenModule::GetOrCreateLLVMFunction(StringRef MangledName,
+ llvm::Type *Ty,
+ GlobalDecl D, bool ForVTable,
+ llvm::Attributes ExtraAttrs) {
+ // Lookup the entry, lazily creating it if necessary.
+ llvm::GlobalValue *Entry = GetGlobalValue(MangledName);
+ if (Entry) {
+ if (WeakRefReferences.count(Entry)) {
+ const FunctionDecl *FD = cast_or_null<FunctionDecl>(D.getDecl());
+ if (FD && !FD->hasAttr<WeakAttr>())
+ Entry->setLinkage(llvm::Function::ExternalLinkage);
+
+ WeakRefReferences.erase(Entry);
+ }
+
+ if (Entry->getType()->getElementType() == Ty)
+ return Entry;
+
+ // Make sure the result is of the correct type.
+ return llvm::ConstantExpr::getBitCast(Entry, Ty->getPointerTo());
+ }
+
+ // This function doesn't have a complete type (for example, the return
+ // type is an incomplete struct). Use a fake type instead, and make
+ // sure not to try to set attributes.
+ bool IsIncompleteFunction = false;
+
+ llvm::FunctionType *FTy;
+ if (isa<llvm::FunctionType>(Ty)) {
+ FTy = cast<llvm::FunctionType>(Ty);
+ } else {
+ FTy = llvm::FunctionType::get(VoidTy, false);
+ IsIncompleteFunction = true;
+ }
+
+ llvm::Function *F = llvm::Function::Create(FTy,
+ llvm::Function::ExternalLinkage,
+ MangledName, &getModule());
+ assert(F->getName() == MangledName && "name was uniqued!");
+ if (D.getDecl())
+ SetFunctionAttributes(D, F, IsIncompleteFunction);
+ if (ExtraAttrs != llvm::Attribute::None)
+ F->addFnAttr(ExtraAttrs);
+
+ // This is the first use or definition of a mangled name. If there is a
+ // deferred decl with this name, remember that we need to emit it at the end
+ // of the file.
+ llvm::StringMap<GlobalDecl>::iterator DDI = DeferredDecls.find(MangledName);
+ if (DDI != DeferredDecls.end()) {
+ // Move the potentially referenced deferred decl to the DeferredDeclsToEmit
+ // list, and remove it from DeferredDecls (since we don't need it anymore).
+ DeferredDeclsToEmit.push_back(DDI->second);
+ DeferredDecls.erase(DDI);
+
+ // Otherwise, there are cases we have to worry about where we're
+ // using a declaration for which we must emit a definition but where
+ // we might not find a top-level definition:
+ // - member functions defined inline in their classes
+ // - friend functions defined inline in some class
+ // - special member functions with implicit definitions
+ // If we ever change our AST traversal to walk into class methods,
+ // this will be unnecessary.
+ //
+ // We also don't emit a definition for a function if it's going to be an entry
+ // in a vtable, unless it's already marked as used.
+ } else if (getLangOpts().CPlusPlus && D.getDecl()) {
+ // Look for a declaration that's lexically in a record.
+ const FunctionDecl *FD = cast<FunctionDecl>(D.getDecl());
+ do {
+ if (isa<CXXRecordDecl>(FD->getLexicalDeclContext())) {
+ if (FD->isImplicit() && !ForVTable) {
+ assert(FD->isUsed() && "Sema didn't mark implicit function as used!");
+ DeferredDeclsToEmit.push_back(D.getWithDecl(FD));
+ break;
+ } else if (FD->doesThisDeclarationHaveABody()) {
+ DeferredDeclsToEmit.push_back(D.getWithDecl(FD));
+ break;
+ }
+ }
+ FD = FD->getPreviousDecl();
+ } while (FD);
+ }
+
+ // Make sure the result is of the requested type.
+ if (!IsIncompleteFunction) {
+ assert(F->getType()->getElementType() == Ty);
+ return F;
+ }
+
+ llvm::Type *PTy = llvm::PointerType::getUnqual(Ty);
+ return llvm::ConstantExpr::getBitCast(F, PTy);
+}
+
+/// GetAddrOfFunction - Return the address of the given function. If Ty is
+/// non-null, then this function will use the specified type if it has to
+/// create it (this occurs when we see a definition of the function).
+llvm::Constant *CodeGenModule::GetAddrOfFunction(GlobalDecl GD,
+ llvm::Type *Ty,
+ bool ForVTable) {
+ // If there was no specific requested type, just convert it now.
+ if (!Ty)
+ Ty = getTypes().ConvertType(cast<ValueDecl>(GD.getDecl())->getType());
+
+ StringRef MangledName = getMangledName(GD);
+ return GetOrCreateLLVMFunction(MangledName, Ty, GD, ForVTable);
+}
+
+/// CreateRuntimeFunction - Create a new runtime function with the specified
+/// type and name.
+llvm::Constant *
+CodeGenModule::CreateRuntimeFunction(llvm::FunctionType *FTy,
+ StringRef Name,
+ llvm::Attributes ExtraAttrs) {
+ return GetOrCreateLLVMFunction(Name, FTy, GlobalDecl(), /*ForVTable=*/false,
+ ExtraAttrs);
+}
+
+/// isTypeConstant - Determine whether an object of this type can be emitted
+/// as a constant.
+///
+/// If ExcludeCtor is true, the duration when the object's constructor runs
+/// will not be considered. The caller will need to verify that the object is
+/// not written to during its construction.
+bool CodeGenModule::isTypeConstant(QualType Ty, bool ExcludeCtor) {
+ if (!Ty.isConstant(Context) && !Ty->isReferenceType())
+ return false;
+
+ if (Context.getLangOpts().CPlusPlus) {
+ if (const CXXRecordDecl *Record
+ = Context.getBaseElementType(Ty)->getAsCXXRecordDecl())
+ return ExcludeCtor && !Record->hasMutableFields() &&
+ Record->hasTrivialDestructor();
+ }
+
+ return true;
+}
+
+/// GetOrCreateLLVMGlobal - If the specified mangled name is not in the module,
+/// create and return an llvm GlobalVariable with the specified type. If there
+/// is something in the module with the specified name, return it potentially
+/// bitcasted to the right type.
+///
+/// If D is non-null, it specifies a decl that correspond to this. This is used
+/// to set the attributes on the global when it is first created.
+llvm::Constant *
+CodeGenModule::GetOrCreateLLVMGlobal(StringRef MangledName,
+ llvm::PointerType *Ty,
+ const VarDecl *D,
+ bool UnnamedAddr) {
+ // Lookup the entry, lazily creating it if necessary.
+ llvm::GlobalValue *Entry = GetGlobalValue(MangledName);
+ if (Entry) {
+ if (WeakRefReferences.count(Entry)) {
+ if (D && !D->hasAttr<WeakAttr>())
+ Entry->setLinkage(llvm::Function::ExternalLinkage);
+
+ WeakRefReferences.erase(Entry);
+ }
+
+ if (UnnamedAddr)
+ Entry->setUnnamedAddr(true);
+
+ if (Entry->getType() == Ty)
+ return Entry;
+
+ // Make sure the result is of the correct type.
+ return llvm::ConstantExpr::getBitCast(Entry, Ty);
+ }
+
+ // This is the first use or definition of a mangled name. If there is a
+ // deferred decl with this name, remember that we need to emit it at the end
+ // of the file.
+ llvm::StringMap<GlobalDecl>::iterator DDI = DeferredDecls.find(MangledName);
+ if (DDI != DeferredDecls.end()) {
+ // Move the potentially referenced deferred decl to the DeferredDeclsToEmit
+ // list, and remove it from DeferredDecls (since we don't need it anymore).
+ DeferredDeclsToEmit.push_back(DDI->second);
+ DeferredDecls.erase(DDI);
+ }
+
+ llvm::GlobalVariable *GV =
+ new llvm::GlobalVariable(getModule(), Ty->getElementType(), false,
+ llvm::GlobalValue::ExternalLinkage,
+ 0, MangledName, 0,
+ false, Ty->getAddressSpace());
+
+ // Handle things which are present even on external declarations.
+ if (D) {
+ // FIXME: This code is overly simple and should be merged with other global
+ // handling.
+ GV->setConstant(isTypeConstant(D->getType(), false));
+
+ // Set linkage and visibility in case we never see a definition.
+ NamedDecl::LinkageInfo LV = D->getLinkageAndVisibility();
+ if (LV.linkage() != ExternalLinkage) {
+ // Don't set internal linkage on declarations.
+ } else {
+ if (D->hasAttr<DLLImportAttr>())
+ GV->setLinkage(llvm::GlobalValue::DLLImportLinkage);
+ else if (D->hasAttr<WeakAttr>() || D->isWeakImported())
+ GV->setLinkage(llvm::GlobalValue::ExternalWeakLinkage);
+
+ // Set visibility on a declaration only if it's explicit.
+ if (LV.visibilityExplicit())
+ GV->setVisibility(GetLLVMVisibility(LV.visibility()));
+ }
+
+ GV->setThreadLocal(D->isThreadSpecified());
+ }
+
+ return GV;
+}
+
+
+llvm::GlobalVariable *
+CodeGenModule::CreateOrReplaceCXXRuntimeVariable(StringRef Name,
+ llvm::Type *Ty,
+ llvm::GlobalValue::LinkageTypes Linkage) {
+ llvm::GlobalVariable *GV = getModule().getNamedGlobal(Name);
+ llvm::GlobalVariable *OldGV = 0;
+
+
+ if (GV) {
+ // Check if the variable has the right type.
+ if (GV->getType()->getElementType() == Ty)
+ return GV;
+
+ // Because C++ name mangling, the only way we can end up with an already
+ // existing global with the same name is if it has been declared extern "C".
+ assert(GV->isDeclaration() && "Declaration has wrong type!");
+ OldGV = GV;
+ }
+
+ // Create a new variable.
+ GV = new llvm::GlobalVariable(getModule(), Ty, /*isConstant=*/true,
+ Linkage, 0, Name);
+
+ if (OldGV) {
+ // Replace occurrences of the old variable if needed.
+ GV->takeName(OldGV);
+
+ if (!OldGV->use_empty()) {
+ llvm::Constant *NewPtrForOldDecl =
+ llvm::ConstantExpr::getBitCast(GV, OldGV->getType());
+ OldGV->replaceAllUsesWith(NewPtrForOldDecl);
+ }
+
+ OldGV->eraseFromParent();
+ }
+
+ return GV;
+}
+
+/// GetAddrOfGlobalVar - Return the llvm::Constant for the address of the
+/// given global variable. If Ty is non-null and if the global doesn't exist,
+/// then it will be created with the specified type instead of whatever the
+/// normal requested type would be.
+llvm::Constant *CodeGenModule::GetAddrOfGlobalVar(const VarDecl *D,
+ llvm::Type *Ty) {
+ assert(D->hasGlobalStorage() && "Not a global variable");
+ QualType ASTTy = D->getType();
+ if (Ty == 0)
+ Ty = getTypes().ConvertTypeForMem(ASTTy);
+
+ llvm::PointerType *PTy =
+ llvm::PointerType::get(Ty, getContext().getTargetAddressSpace(ASTTy));
+
+ StringRef MangledName = getMangledName(D);
+ return GetOrCreateLLVMGlobal(MangledName, PTy, D);
+}
+
+/// CreateRuntimeVariable - Create a new runtime global variable with the
+/// specified type and name.
+llvm::Constant *
+CodeGenModule::CreateRuntimeVariable(llvm::Type *Ty,
+ StringRef Name) {
+ return GetOrCreateLLVMGlobal(Name, llvm::PointerType::getUnqual(Ty), 0,
+ true);
+}
+
+void CodeGenModule::EmitTentativeDefinition(const VarDecl *D) {
+ assert(!D->getInit() && "Cannot emit definite definitions here!");
+
+ if (MayDeferGeneration(D)) {
+ // If we have not seen a reference to this variable yet, place it
+ // into the deferred declarations table to be emitted if needed
+ // later.
+ StringRef MangledName = getMangledName(D);
+ if (!GetGlobalValue(MangledName)) {
+ DeferredDecls[MangledName] = D;
+ return;
+ }
+ }
+
+ // The tentative definition is the only definition.
+ EmitGlobalVarDefinition(D);
+}
+
+void CodeGenModule::EmitVTable(CXXRecordDecl *Class, bool DefinitionRequired) {
+ if (DefinitionRequired)
+ getVTables().GenerateClassData(getVTableLinkage(Class), Class);
+}
+
+llvm::GlobalVariable::LinkageTypes
+CodeGenModule::getVTableLinkage(const CXXRecordDecl *RD) {
+ if (RD->getLinkage() != ExternalLinkage)
+ return llvm::GlobalVariable::InternalLinkage;
+
+ if (const CXXMethodDecl *KeyFunction
+ = RD->getASTContext().getKeyFunction(RD)) {
+ // If this class has a key function, use that to determine the linkage of
+ // the vtable.
+ const FunctionDecl *Def = 0;
+ if (KeyFunction->hasBody(Def))
+ KeyFunction = cast<CXXMethodDecl>(Def);
+
+ switch (KeyFunction->getTemplateSpecializationKind()) {
+ case TSK_Undeclared:
+ case TSK_ExplicitSpecialization:
+ // When compiling with optimizations turned on, we emit all vtables,
+ // even if the key function is not defined in the current translation
+ // unit. If this is the case, use available_externally linkage.
+ if (!Def && CodeGenOpts.OptimizationLevel)
+ return llvm::GlobalVariable::AvailableExternallyLinkage;
+
+ if (KeyFunction->isInlined())
+ return !Context.getLangOpts().AppleKext ?
+ llvm::GlobalVariable::LinkOnceODRLinkage :
+ llvm::Function::InternalLinkage;
+
+ return llvm::GlobalVariable::ExternalLinkage;
+
+ case TSK_ImplicitInstantiation:
+ return !Context.getLangOpts().AppleKext ?
+ llvm::GlobalVariable::LinkOnceODRLinkage :
+ llvm::Function::InternalLinkage;
+
+ case TSK_ExplicitInstantiationDefinition:
+ return !Context.getLangOpts().AppleKext ?
+ llvm::GlobalVariable::WeakODRLinkage :
+ llvm::Function::InternalLinkage;
+
+ case TSK_ExplicitInstantiationDeclaration:
+ // FIXME: Use available_externally linkage. However, this currently
+ // breaks LLVM's build due to undefined symbols.
+ // return llvm::GlobalVariable::AvailableExternallyLinkage;
+ return !Context.getLangOpts().AppleKext ?
+ llvm::GlobalVariable::LinkOnceODRLinkage :
+ llvm::Function::InternalLinkage;
+ }
+ }
+
+ if (Context.getLangOpts().AppleKext)
+ return llvm::Function::InternalLinkage;
+
+ switch (RD->getTemplateSpecializationKind()) {
+ case TSK_Undeclared:
+ case TSK_ExplicitSpecialization:
+ case TSK_ImplicitInstantiation:
+ // FIXME: Use available_externally linkage. However, this currently
+ // breaks LLVM's build due to undefined symbols.
+ // return llvm::GlobalVariable::AvailableExternallyLinkage;
+ case TSK_ExplicitInstantiationDeclaration:
+ return llvm::GlobalVariable::LinkOnceODRLinkage;
+
+ case TSK_ExplicitInstantiationDefinition:
+ return llvm::GlobalVariable::WeakODRLinkage;
+ }
+
+ llvm_unreachable("Invalid TemplateSpecializationKind!");
+}
+
+CharUnits CodeGenModule::GetTargetTypeStoreSize(llvm::Type *Ty) const {
+ return Context.toCharUnitsFromBits(
+ TheTargetData.getTypeStoreSizeInBits(Ty));
+}
+
+llvm::Constant *
+CodeGenModule::MaybeEmitGlobalStdInitializerListInitializer(const VarDecl *D,
+ const Expr *rawInit) {
+ ArrayRef<ExprWithCleanups::CleanupObject> cleanups;
+ if (const ExprWithCleanups *withCleanups =
+ dyn_cast<ExprWithCleanups>(rawInit)) {
+ cleanups = withCleanups->getObjects();
+ rawInit = withCleanups->getSubExpr();
+ }
+
+ const InitListExpr *init = dyn_cast<InitListExpr>(rawInit);
+ if (!init || !init->initializesStdInitializerList() ||
+ init->getNumInits() == 0)
+ return 0;
+
+ ASTContext &ctx = getContext();
+ unsigned numInits = init->getNumInits();
+ // FIXME: This check is here because we would otherwise silently miscompile
+ // nested global std::initializer_lists. Better would be to have a real
+ // implementation.
+ for (unsigned i = 0; i < numInits; ++i) {
+ const InitListExpr *inner = dyn_cast<InitListExpr>(init->getInit(i));
+ if (inner && inner->initializesStdInitializerList()) {
+ ErrorUnsupported(inner, "nested global std::initializer_list");
+ return 0;
+ }
+ }
+
+ // Synthesize a fake VarDecl for the array and initialize that.
+ QualType elementType = init->getInit(0)->getType();
+ llvm::APInt numElements(ctx.getTypeSize(ctx.getSizeType()), numInits);
+ QualType arrayType = ctx.getConstantArrayType(elementType, numElements,
+ ArrayType::Normal, 0);
+
+ IdentifierInfo *name = &ctx.Idents.get(D->getNameAsString() + "__initlist");
+ TypeSourceInfo *sourceInfo = ctx.getTrivialTypeSourceInfo(
+ arrayType, D->getLocation());
+ VarDecl *backingArray = VarDecl::Create(ctx, const_cast<DeclContext*>(
+ D->getDeclContext()),
+ D->getLocStart(), D->getLocation(),
+ name, arrayType, sourceInfo,
+ SC_Static, SC_Static);
+
+ // Now clone the InitListExpr to initialize the array instead.
+ // Incredible hack: we want to use the existing InitListExpr here, so we need
+ // to tell it that it no longer initializes a std::initializer_list.
+ Expr *arrayInit = new (ctx) InitListExpr(ctx, init->getLBraceLoc(),
+ const_cast<InitListExpr*>(init)->getInits(),
+ init->getNumInits(),
+ init->getRBraceLoc());
+ arrayInit->setType(arrayType);
+
+ if (!cleanups.empty())
+ arrayInit = ExprWithCleanups::Create(ctx, arrayInit, cleanups);
+
+ backingArray->setInit(arrayInit);
+
+ // Emit the definition of the array.
+ EmitGlobalVarDefinition(backingArray);
+
+ // Inspect the initializer list to validate it and determine its type.
+ // FIXME: doing this every time is probably inefficient; caching would be nice
+ RecordDecl *record = init->getType()->castAs<RecordType>()->getDecl();
+ RecordDecl::field_iterator field = record->field_begin();
+ if (field == record->field_end()) {
+ ErrorUnsupported(D, "weird std::initializer_list");
+ return 0;
+ }
+ QualType elementPtr = ctx.getPointerType(elementType.withConst());
+ // Start pointer.
+ if (!ctx.hasSameType(field->getType(), elementPtr)) {
+ ErrorUnsupported(D, "weird std::initializer_list");
+ return 0;
+ }
+ ++field;
+ if (field == record->field_end()) {
+ ErrorUnsupported(D, "weird std::initializer_list");
+ return 0;
+ }
+ bool isStartEnd = false;
+ if (ctx.hasSameType(field->getType(), elementPtr)) {
+ // End pointer.
+ isStartEnd = true;
+ } else if(!ctx.hasSameType(field->getType(), ctx.getSizeType())) {
+ ErrorUnsupported(D, "weird std::initializer_list");
+ return 0;
+ }
+
+ // Now build an APValue representing the std::initializer_list.
+ APValue initListValue(APValue::UninitStruct(), 0, 2);
+ APValue &startField = initListValue.getStructField(0);
+ APValue::LValuePathEntry startOffsetPathEntry;
+ startOffsetPathEntry.ArrayIndex = 0;
+ startField = APValue(APValue::LValueBase(backingArray),
+ CharUnits::fromQuantity(0),
+ llvm::makeArrayRef(startOffsetPathEntry),
+ /*IsOnePastTheEnd=*/false, 0);
+
+ if (isStartEnd) {
+ APValue &endField = initListValue.getStructField(1);
+ APValue::LValuePathEntry endOffsetPathEntry;
+ endOffsetPathEntry.ArrayIndex = numInits;
+ endField = APValue(APValue::LValueBase(backingArray),
+ ctx.getTypeSizeInChars(elementType) * numInits,
+ llvm::makeArrayRef(endOffsetPathEntry),
+ /*IsOnePastTheEnd=*/true, 0);
+ } else {
+ APValue &sizeField = initListValue.getStructField(1);
+ sizeField = APValue(llvm::APSInt(numElements));
+ }
+
+ // Emit the constant for the initializer_list.
+ llvm::Constant *llvmInit =
+ EmitConstantValueForMemory(initListValue, D->getType());
+ assert(llvmInit && "failed to initialize as constant");
+ return llvmInit;
+}
+
+void CodeGenModule::EmitGlobalVarDefinition(const VarDecl *D) {
+ llvm::Constant *Init = 0;
+ QualType ASTTy = D->getType();
+ CXXRecordDecl *RD = ASTTy->getBaseElementTypeUnsafe()->getAsCXXRecordDecl();
+ bool NeedsGlobalCtor = false;
+ bool NeedsGlobalDtor = RD && !RD->hasTrivialDestructor();
+
+ const VarDecl *InitDecl;
+ const Expr *InitExpr = D->getAnyInitializer(InitDecl);
+
+ if (!InitExpr) {
+ // This is a tentative definition; tentative definitions are
+ // implicitly initialized with { 0 }.
+ //
+ // Note that tentative definitions are only emitted at the end of
+ // a translation unit, so they should never have incomplete
+ // type. In addition, EmitTentativeDefinition makes sure that we
+ // never attempt to emit a tentative definition if a real one
+ // exists. A use may still exists, however, so we still may need
+ // to do a RAUW.
+ assert(!ASTTy->isIncompleteType() && "Unexpected incomplete type");
+ Init = EmitNullConstant(D->getType());
+ } else {
+ // If this is a std::initializer_list, emit the special initializer.
+ Init = MaybeEmitGlobalStdInitializerListInitializer(D, InitExpr);
+ // An empty init list will perform zero-initialization, which happens
+ // to be exactly what we want.
+ // FIXME: It does so in a global constructor, which is *not* what we
+ // want.
+
+ if (!Init)
+ Init = EmitConstantInit(*InitDecl);
+ if (!Init) {
+ QualType T = InitExpr->getType();
+ if (D->getType()->isReferenceType())
+ T = D->getType();
+
+ if (getLangOpts().CPlusPlus) {
+ Init = EmitNullConstant(T);
+ NeedsGlobalCtor = true;
+ } else {
+ ErrorUnsupported(D, "static initializer");
+ Init = llvm::UndefValue::get(getTypes().ConvertType(T));
+ }
+ } else {
+ // We don't need an initializer, so remove the entry for the delayed
+ // initializer position (just in case this entry was delayed) if we
+ // also don't need to register a destructor.
+ if (getLangOpts().CPlusPlus && !NeedsGlobalDtor)
+ DelayedCXXInitPosition.erase(D);
+ }
+ }
+
+ llvm::Type* InitType = Init->getType();
+ llvm::Constant *Entry = GetAddrOfGlobalVar(D, InitType);
+
+ // Strip off a bitcast if we got one back.
+ if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Entry)) {
+ assert(CE->getOpcode() == llvm::Instruction::BitCast ||
+ // all zero index gep.
+ CE->getOpcode() == llvm::Instruction::GetElementPtr);
+ Entry = CE->getOperand(0);
+ }
+
+ // Entry is now either a Function or GlobalVariable.
+ llvm::GlobalVariable *GV = dyn_cast<llvm::GlobalVariable>(Entry);
+
+ // We have a definition after a declaration with the wrong type.
+ // We must make a new GlobalVariable* and update everything that used OldGV
+ // (a declaration or tentative definition) with the new GlobalVariable*
+ // (which will be a definition).
+ //
+ // This happens if there is a prototype for a global (e.g.
+ // "extern int x[];") and then a definition of a different type (e.g.
+ // "int x[10];"). This also happens when an initializer has a different type
+ // from the type of the global (this happens with unions).
+ if (GV == 0 ||
+ GV->getType()->getElementType() != InitType ||
+ GV->getType()->getAddressSpace() !=
+ getContext().getTargetAddressSpace(ASTTy)) {
+
+ // Move the old entry aside so that we'll create a new one.
+ Entry->setName(StringRef());
+
+ // Make a new global with the correct type, this is now guaranteed to work.
+ GV = cast<llvm::GlobalVariable>(GetAddrOfGlobalVar(D, InitType));
+
+ // Replace all uses of the old global with the new global
+ llvm::Constant *NewPtrForOldDecl =
+ llvm::ConstantExpr::getBitCast(GV, Entry->getType());
+ Entry->replaceAllUsesWith(NewPtrForOldDecl);
+
+ // Erase the old global, since it is no longer used.
+ cast<llvm::GlobalValue>(Entry)->eraseFromParent();
+ }
+
+ if (D->hasAttr<AnnotateAttr>())
+ AddGlobalAnnotations(D, GV);
+
+ GV->setInitializer(Init);
+
+ // If it is safe to mark the global 'constant', do so now.
+ GV->setConstant(!NeedsGlobalCtor && !NeedsGlobalDtor &&
+ isTypeConstant(D->getType(), true));
+
+ GV->setAlignment(getContext().getDeclAlign(D).getQuantity());
+
+ // Set the llvm linkage type as appropriate.
+ llvm::GlobalValue::LinkageTypes Linkage =
+ GetLLVMLinkageVarDefinition(D, GV);
+ GV->setLinkage(Linkage);
+ if (Linkage == llvm::GlobalVariable::CommonLinkage)
+ // common vars aren't constant even if declared const.
+ GV->setConstant(false);
+
+ SetCommonAttributes(D, GV);
+
+ // Emit the initializer function if necessary.
+ if (NeedsGlobalCtor || NeedsGlobalDtor)
+ EmitCXXGlobalVarDeclInitFunc(D, GV, NeedsGlobalCtor);
+
+ // Emit global variable debug information.
+ if (CGDebugInfo *DI = getModuleDebugInfo())
+ DI->EmitGlobalVariable(GV, D);
+}
+
+llvm::GlobalValue::LinkageTypes
+CodeGenModule::GetLLVMLinkageVarDefinition(const VarDecl *D,
+ llvm::GlobalVariable *GV) {
+ GVALinkage Linkage = getContext().GetGVALinkageForVariable(D);
+ if (Linkage == GVA_Internal)
+ return llvm::Function::InternalLinkage;
+ else if (D->hasAttr<DLLImportAttr>())
+ return llvm::Function::DLLImportLinkage;
+ else if (D->hasAttr<DLLExportAttr>())
+ return llvm::Function::DLLExportLinkage;
+ else if (D->hasAttr<WeakAttr>()) {
+ if (GV->isConstant())
+ return llvm::GlobalVariable::WeakODRLinkage;
+ else
+ return llvm::GlobalVariable::WeakAnyLinkage;
+ } else if (Linkage == GVA_TemplateInstantiation ||
+ Linkage == GVA_ExplicitTemplateInstantiation)
+ return llvm::GlobalVariable::WeakODRLinkage;
+ else if (!getLangOpts().CPlusPlus &&
+ ((!CodeGenOpts.NoCommon && !D->getAttr<NoCommonAttr>()) ||
+ D->getAttr<CommonAttr>()) &&
+ !D->hasExternalStorage() && !D->getInit() &&
+ !D->getAttr<SectionAttr>() && !D->isThreadSpecified() &&
+ !D->getAttr<WeakImportAttr>()) {
+ // Thread local vars aren't considered common linkage.
+ return llvm::GlobalVariable::CommonLinkage;
+ }
+ return llvm::GlobalVariable::ExternalLinkage;
+}
+
+/// ReplaceUsesOfNonProtoTypeWithRealFunction - This function is called when we
+/// implement a function with no prototype, e.g. "int foo() {}". If there are
+/// existing call uses of the old function in the module, this adjusts them to
+/// call the new function directly.
+///
+/// This is not just a cleanup: the always_inline pass requires direct calls to
+/// functions to be able to inline them. If there is a bitcast in the way, it
+/// won't inline them. Instcombine normally deletes these calls, but it isn't
+/// run at -O0.
+static void ReplaceUsesOfNonProtoTypeWithRealFunction(llvm::GlobalValue *Old,
+ llvm::Function *NewFn) {
+ // If we're redefining a global as a function, don't transform it.
+ llvm::Function *OldFn = dyn_cast<llvm::Function>(Old);
+ if (OldFn == 0) return;
+
+ llvm::Type *NewRetTy = NewFn->getReturnType();
+ SmallVector<llvm::Value*, 4> ArgList;
+
+ for (llvm::Value::use_iterator UI = OldFn->use_begin(), E = OldFn->use_end();
+ UI != E; ) {
+ // TODO: Do invokes ever occur in C code? If so, we should handle them too.
+ llvm::Value::use_iterator I = UI++; // Increment before the CI is erased.
+ llvm::CallInst *CI = dyn_cast<llvm::CallInst>(*I);
+ if (!CI) continue; // FIXME: when we allow Invoke, just do CallSite CS(*I)
+ llvm::CallSite CS(CI);
+ if (!CI || !CS.isCallee(I)) continue;
+
+ // If the return types don't match exactly, and if the call isn't dead, then
+ // we can't transform this call.
+ if (CI->getType() != NewRetTy && !CI->use_empty())
+ continue;
+
+ // Get the attribute list.
+ llvm::SmallVector<llvm::AttributeWithIndex, 8> AttrVec;
+ llvm::AttrListPtr AttrList = CI->getAttributes();
+
+ // Get any return attributes.
+ llvm::Attributes RAttrs = AttrList.getRetAttributes();
+
+ // Add the return attributes.
+ if (RAttrs)
+ AttrVec.push_back(llvm::AttributeWithIndex::get(0, RAttrs));
+
+ // If the function was passed too few arguments, don't transform. If extra
+ // arguments were passed, we silently drop them. If any of the types
+ // mismatch, we don't transform.
+ unsigned ArgNo = 0;
+ bool DontTransform = false;
+ for (llvm::Function::arg_iterator AI = NewFn->arg_begin(),
+ E = NewFn->arg_end(); AI != E; ++AI, ++ArgNo) {
+ if (CS.arg_size() == ArgNo ||
+ CS.getArgument(ArgNo)->getType() != AI->getType()) {
+ DontTransform = true;
+ break;
+ }
+
+ // Add any parameter attributes.
+ if (llvm::Attributes PAttrs = AttrList.getParamAttributes(ArgNo + 1))
+ AttrVec.push_back(llvm::AttributeWithIndex::get(ArgNo + 1, PAttrs));
+ }
+ if (DontTransform)
+ continue;
+
+ if (llvm::Attributes FnAttrs = AttrList.getFnAttributes())
+ AttrVec.push_back(llvm::AttributeWithIndex::get(~0, FnAttrs));
+
+ // Okay, we can transform this. Create the new call instruction and copy
+ // over the required information.
+ ArgList.append(CS.arg_begin(), CS.arg_begin() + ArgNo);
+ llvm::CallInst *NewCall = llvm::CallInst::Create(NewFn, ArgList, "", CI);
+ ArgList.clear();
+ if (!NewCall->getType()->isVoidTy())
+ NewCall->takeName(CI);
+ NewCall->setAttributes(llvm::AttrListPtr::get(AttrVec.begin(),
+ AttrVec.end()));
+ NewCall->setCallingConv(CI->getCallingConv());
+
+ // Finally, remove the old call, replacing any uses with the new one.
+ if (!CI->use_empty())
+ CI->replaceAllUsesWith(NewCall);
+
+ // Copy debug location attached to CI.
+ if (!CI->getDebugLoc().isUnknown())
+ NewCall->setDebugLoc(CI->getDebugLoc());
+ CI->eraseFromParent();
+ }
+}
+
+void CodeGenModule::HandleCXXStaticMemberVarInstantiation(VarDecl *VD) {
+ TemplateSpecializationKind TSK = VD->getTemplateSpecializationKind();
+ // If we have a definition, this might be a deferred decl. If the
+ // instantiation is explicit, make sure we emit it at the end.
+ if (VD->getDefinition() && TSK == TSK_ExplicitInstantiationDefinition)
+ GetAddrOfGlobalVar(VD);
+}
+
+void CodeGenModule::EmitGlobalFunctionDefinition(GlobalDecl GD) {
+ const FunctionDecl *D = cast<FunctionDecl>(GD.getDecl());
+
+ // Compute the function info and LLVM type.
+ const CGFunctionInfo &FI = getTypes().arrangeGlobalDeclaration(GD);
+ llvm::FunctionType *Ty = getTypes().GetFunctionType(FI);
+
+ // Get or create the prototype for the function.
+ llvm::Constant *Entry = GetAddrOfFunction(GD, Ty);
+
+ // Strip off a bitcast if we got one back.
+ if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Entry)) {
+ assert(CE->getOpcode() == llvm::Instruction::BitCast);
+ Entry = CE->getOperand(0);
+ }
+
+
+ if (cast<llvm::GlobalValue>(Entry)->getType()->getElementType() != Ty) {
+ llvm::GlobalValue *OldFn = cast<llvm::GlobalValue>(Entry);
+
+ // If the types mismatch then we have to rewrite the definition.
+ assert(OldFn->isDeclaration() &&
+ "Shouldn't replace non-declaration");
+
+ // F is the Function* for the one with the wrong type, we must make a new
+ // Function* and update everything that used F (a declaration) with the new
+ // Function* (which will be a definition).
+ //
+ // This happens if there is a prototype for a function
+ // (e.g. "int f()") and then a definition of a different type
+ // (e.g. "int f(int x)"). Move the old function aside so that it
+ // doesn't interfere with GetAddrOfFunction.
+ OldFn->setName(StringRef());
+ llvm::Function *NewFn = cast<llvm::Function>(GetAddrOfFunction(GD, Ty));
+
+ // If this is an implementation of a function without a prototype, try to
+ // replace any existing uses of the function (which may be calls) with uses
+ // of the new function
+ if (D->getType()->isFunctionNoProtoType()) {
+ ReplaceUsesOfNonProtoTypeWithRealFunction(OldFn, NewFn);
+ OldFn->removeDeadConstantUsers();
+ }
+
+ // Replace uses of F with the Function we will endow with a body.
+ if (!Entry->use_empty()) {
+ llvm::Constant *NewPtrForOldDecl =
+ llvm::ConstantExpr::getBitCast(NewFn, Entry->getType());
+ Entry->replaceAllUsesWith(NewPtrForOldDecl);
+ }
+
+ // Ok, delete the old function now, which is dead.
+ OldFn->eraseFromParent();
+
+ Entry = NewFn;
+ }
+
+ // We need to set linkage and visibility on the function before
+ // generating code for it because various parts of IR generation
+ // want to propagate this information down (e.g. to local static
+ // declarations).
+ llvm::Function *Fn = cast<llvm::Function>(Entry);
+ setFunctionLinkage(D, Fn);
+
+ // FIXME: this is redundant with part of SetFunctionDefinitionAttributes
+ setGlobalVisibility(Fn, D);
+
+ CodeGenFunction(*this).GenerateCode(D, Fn, FI);
+
+ SetFunctionDefinitionAttributes(D, Fn);
+ SetLLVMFunctionAttributesForDefinition(D, Fn);
+
+ if (const ConstructorAttr *CA = D->getAttr<ConstructorAttr>())
+ AddGlobalCtor(Fn, CA->getPriority());
+ if (const DestructorAttr *DA = D->getAttr<DestructorAttr>())
+ AddGlobalDtor(Fn, DA->getPriority());
+ if (D->hasAttr<AnnotateAttr>())
+ AddGlobalAnnotations(D, Fn);
+}
+
+void CodeGenModule::EmitAliasDefinition(GlobalDecl GD) {
+ const ValueDecl *D = cast<ValueDecl>(GD.getDecl());
+ const AliasAttr *AA = D->getAttr<AliasAttr>();
+ assert(AA && "Not an alias?");
+
+ StringRef MangledName = getMangledName(GD);
+
+ // If there is a definition in the module, then it wins over the alias.
+ // This is dubious, but allow it to be safe. Just ignore the alias.
+ llvm::GlobalValue *Entry = GetGlobalValue(MangledName);
+ if (Entry && !Entry->isDeclaration())
+ return;
+
+ llvm::Type *DeclTy = getTypes().ConvertTypeForMem(D->getType());
+
+ // Create a reference to the named value. This ensures that it is emitted
+ // if a deferred decl.
+ llvm::Constant *Aliasee;
+ if (isa<llvm::FunctionType>(DeclTy))
+ Aliasee = GetOrCreateLLVMFunction(AA->getAliasee(), DeclTy, GlobalDecl(),
+ /*ForVTable=*/false);
+ else
+ Aliasee = GetOrCreateLLVMGlobal(AA->getAliasee(),
+ llvm::PointerType::getUnqual(DeclTy), 0);
+
+ // Create the new alias itself, but don't set a name yet.
+ llvm::GlobalValue *GA =
+ new llvm::GlobalAlias(Aliasee->getType(),
+ llvm::Function::ExternalLinkage,
+ "", Aliasee, &getModule());
+
+ if (Entry) {
+ assert(Entry->isDeclaration());
+
+ // If there is a declaration in the module, then we had an extern followed
+ // by the alias, as in:
+ // extern int test6();
+ // ...
+ // int test6() __attribute__((alias("test7")));
+ //
+ // Remove it and replace uses of it with the alias.
+ GA->takeName(Entry);
+
+ Entry->replaceAllUsesWith(llvm::ConstantExpr::getBitCast(GA,
+ Entry->getType()));
+ Entry->eraseFromParent();
+ } else {
+ GA->setName(MangledName);
+ }
+
+ // Set attributes which are particular to an alias; this is a
+ // specialization of the attributes which may be set on a global
+ // variable/function.
+ if (D->hasAttr<DLLExportAttr>()) {
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ // The dllexport attribute is ignored for undefined symbols.
+ if (FD->hasBody())
+ GA->setLinkage(llvm::Function::DLLExportLinkage);
+ } else {
+ GA->setLinkage(llvm::Function::DLLExportLinkage);
+ }
+ } else if (D->hasAttr<WeakAttr>() ||
+ D->hasAttr<WeakRefAttr>() ||
+ D->isWeakImported()) {
+ GA->setLinkage(llvm::Function::WeakAnyLinkage);
+ }
+
+ SetCommonAttributes(D, GA);
+}
+
+llvm::Function *CodeGenModule::getIntrinsic(unsigned IID,
+ ArrayRef<llvm::Type*> Tys) {
+ return llvm::Intrinsic::getDeclaration(&getModule(), (llvm::Intrinsic::ID)IID,
+ Tys);
+}
+
+static llvm::StringMapEntry<llvm::Constant*> &
+GetConstantCFStringEntry(llvm::StringMap<llvm::Constant*> &Map,
+ const StringLiteral *Literal,
+ bool TargetIsLSB,
+ bool &IsUTF16,
+ unsigned &StringLength) {
+ StringRef String = Literal->getString();
+ unsigned NumBytes = String.size();
+
+ // Check for simple case.
+ if (!Literal->containsNonAsciiOrNull()) {
+ StringLength = NumBytes;
+ return Map.GetOrCreateValue(String);
+ }
+
+ // Otherwise, convert the UTF8 literals into a string of shorts.
+ IsUTF16 = true;
+
+ SmallVector<UTF16, 128> ToBuf(NumBytes + 1); // +1 for ending nulls.
+ const UTF8 *FromPtr = (UTF8 *)String.data();
+ UTF16 *ToPtr = &ToBuf[0];
+
+ (void)ConvertUTF8toUTF16(&FromPtr, FromPtr + NumBytes,
+ &ToPtr, ToPtr + NumBytes,
+ strictConversion);
+
+ // ConvertUTF8toUTF16 returns the length in ToPtr.
+ StringLength = ToPtr - &ToBuf[0];
+
+ // Add an explicit null.
+ *ToPtr = 0;
+ return Map.
+ GetOrCreateValue(StringRef(reinterpret_cast<const char *>(ToBuf.data()),
+ (StringLength + 1) * 2));
+}
+
+static llvm::StringMapEntry<llvm::Constant*> &
+GetConstantStringEntry(llvm::StringMap<llvm::Constant*> &Map,
+ const StringLiteral *Literal,
+ unsigned &StringLength) {
+ StringRef String = Literal->getString();
+ StringLength = String.size();
+ return Map.GetOrCreateValue(String);
+}
+
+llvm::Constant *
+CodeGenModule::GetAddrOfConstantCFString(const StringLiteral *Literal) {
+ unsigned StringLength = 0;
+ bool isUTF16 = false;
+ llvm::StringMapEntry<llvm::Constant*> &Entry =
+ GetConstantCFStringEntry(CFConstantStringMap, Literal,
+ getTargetData().isLittleEndian(),
+ isUTF16, StringLength);
+
+ if (llvm::Constant *C = Entry.getValue())
+ return C;
+
+ llvm::Constant *Zero = llvm::Constant::getNullValue(Int32Ty);
+ llvm::Constant *Zeros[] = { Zero, Zero };
+
+ // If we don't already have it, get __CFConstantStringClassReference.
+ if (!CFConstantStringClassRef) {
+ llvm::Type *Ty = getTypes().ConvertType(getContext().IntTy);
+ Ty = llvm::ArrayType::get(Ty, 0);
+ llvm::Constant *GV = CreateRuntimeVariable(Ty,
+ "__CFConstantStringClassReference");
+ // Decay array -> ptr
+ CFConstantStringClassRef =
+ llvm::ConstantExpr::getGetElementPtr(GV, Zeros);
+ }
+
+ QualType CFTy = getContext().getCFConstantStringType();
+
+ llvm::StructType *STy =
+ cast<llvm::StructType>(getTypes().ConvertType(CFTy));
+
+ llvm::Constant *Fields[4];
+
+ // Class pointer.
+ Fields[0] = CFConstantStringClassRef;
+
+ // Flags.
+ llvm::Type *Ty = getTypes().ConvertType(getContext().UnsignedIntTy);
+ Fields[1] = isUTF16 ? llvm::ConstantInt::get(Ty, 0x07d0) :
+ llvm::ConstantInt::get(Ty, 0x07C8);
+
+ // String pointer.
+ llvm::Constant *C = 0;
+ if (isUTF16) {
+ ArrayRef<uint16_t> Arr =
+ llvm::makeArrayRef<uint16_t>((uint16_t*)Entry.getKey().data(),
+ Entry.getKey().size() / 2);
+ C = llvm::ConstantDataArray::get(VMContext, Arr);
+ } else {
+ C = llvm::ConstantDataArray::getString(VMContext, Entry.getKey());
+ }
+
+ llvm::GlobalValue::LinkageTypes Linkage;
+ if (isUTF16)
+ // FIXME: why do utf strings get "_" labels instead of "L" labels?
+ Linkage = llvm::GlobalValue::InternalLinkage;
+ else
+ // FIXME: With OS X ld 123.2 (xcode 4) and LTO we would get a linker error
+ // when using private linkage. It is not clear if this is a bug in ld
+ // or a reasonable new restriction.
+ Linkage = llvm::GlobalValue::LinkerPrivateLinkage;
+
+ // Note: -fwritable-strings doesn't make the backing store strings of
+ // CFStrings writable. (See <rdar://problem/10657500>)
+ llvm::GlobalVariable *GV =
+ new llvm::GlobalVariable(getModule(), C->getType(), /*isConstant=*/true,
+ Linkage, C, ".str");
+ GV->setUnnamedAddr(true);
+ if (isUTF16) {
+ CharUnits Align = getContext().getTypeAlignInChars(getContext().ShortTy);
+ GV->setAlignment(Align.getQuantity());
+ } else {
+ CharUnits Align = getContext().getTypeAlignInChars(getContext().CharTy);
+ GV->setAlignment(Align.getQuantity());
+ }
+
+ // String.
+ Fields[2] = llvm::ConstantExpr::getGetElementPtr(GV, Zeros);
+
+ if (isUTF16)
+ // Cast the UTF16 string to the correct type.
+ Fields[2] = llvm::ConstantExpr::getBitCast(Fields[2], Int8PtrTy);
+
+ // String length.
+ Ty = getTypes().ConvertType(getContext().LongTy);
+ Fields[3] = llvm::ConstantInt::get(Ty, StringLength);
+
+ // The struct.
+ C = llvm::ConstantStruct::get(STy, Fields);
+ GV = new llvm::GlobalVariable(getModule(), C->getType(), true,
+ llvm::GlobalVariable::PrivateLinkage, C,
+ "_unnamed_cfstring_");
+ if (const char *Sect = getContext().getTargetInfo().getCFStringSection())
+ GV->setSection(Sect);
+ Entry.setValue(GV);
+
+ return GV;
+}
+
+static RecordDecl *
+CreateRecordDecl(const ASTContext &Ctx, RecordDecl::TagKind TK,
+ DeclContext *DC, IdentifierInfo *Id) {
+ SourceLocation Loc;
+ if (Ctx.getLangOpts().CPlusPlus)
+ return CXXRecordDecl::Create(Ctx, TK, DC, Loc, Loc, Id);
+ else
+ return RecordDecl::Create(Ctx, TK, DC, Loc, Loc, Id);
+}
+
+llvm::Constant *
+CodeGenModule::GetAddrOfConstantString(const StringLiteral *Literal) {
+ unsigned StringLength = 0;
+ llvm::StringMapEntry<llvm::Constant*> &Entry =
+ GetConstantStringEntry(CFConstantStringMap, Literal, StringLength);
+
+ if (llvm::Constant *C = Entry.getValue())
+ return C;
+
+ llvm::Constant *Zero = llvm::Constant::getNullValue(Int32Ty);
+ llvm::Constant *Zeros[] = { Zero, Zero };
+
+ // If we don't already have it, get _NSConstantStringClassReference.
+ if (!ConstantStringClassRef) {
+ std::string StringClass(getLangOpts().ObjCConstantStringClass);
+ llvm::Type *Ty = getTypes().ConvertType(getContext().IntTy);
+ llvm::Constant *GV;
+ if (LangOpts.ObjCNonFragileABI) {
+ std::string str =
+ StringClass.empty() ? "OBJC_CLASS_$_NSConstantString"
+ : "OBJC_CLASS_$_" + StringClass;
+ GV = getObjCRuntime().GetClassGlobal(str);
+ // Make sure the result is of the correct type.
+ llvm::Type *PTy = llvm::PointerType::getUnqual(Ty);
+ ConstantStringClassRef =
+ llvm::ConstantExpr::getBitCast(GV, PTy);
+ } else {
+ std::string str =
+ StringClass.empty() ? "_NSConstantStringClassReference"
+ : "_" + StringClass + "ClassReference";
+ llvm::Type *PTy = llvm::ArrayType::get(Ty, 0);
+ GV = CreateRuntimeVariable(PTy, str);
+ // Decay array -> ptr
+ ConstantStringClassRef =
+ llvm::ConstantExpr::getGetElementPtr(GV, Zeros);
+ }
+ }
+
+ if (!NSConstantStringType) {
+ // Construct the type for a constant NSString.
+ RecordDecl *D = CreateRecordDecl(Context, TTK_Struct,
+ Context.getTranslationUnitDecl(),
+ &Context.Idents.get("__builtin_NSString"));
+ D->startDefinition();
+
+ QualType FieldTypes[3];
+
+ // const int *isa;
+ FieldTypes[0] = Context.getPointerType(Context.IntTy.withConst());
+ // const char *str;
+ FieldTypes[1] = Context.getPointerType(Context.CharTy.withConst());
+ // unsigned int length;
+ FieldTypes[2] = Context.UnsignedIntTy;
+
+ // Create fields
+ for (unsigned i = 0; i < 3; ++i) {
+ FieldDecl *Field = FieldDecl::Create(Context, D,
+ SourceLocation(),
+ SourceLocation(), 0,
+ FieldTypes[i], /*TInfo=*/0,
+ /*BitWidth=*/0,
+ /*Mutable=*/false,
+ /*HasInit=*/false);
+ Field->setAccess(AS_public);
+ D->addDecl(Field);
+ }
+
+ D->completeDefinition();
+ QualType NSTy = Context.getTagDeclType(D);
+ NSConstantStringType = cast<llvm::StructType>(getTypes().ConvertType(NSTy));
+ }
+
+ llvm::Constant *Fields[3];
+
+ // Class pointer.
+ Fields[0] = ConstantStringClassRef;
+
+ // String pointer.
+ llvm::Constant *C =
+ llvm::ConstantDataArray::getString(VMContext, Entry.getKey());
+
+ llvm::GlobalValue::LinkageTypes Linkage;
+ bool isConstant;
+ Linkage = llvm::GlobalValue::PrivateLinkage;
+ isConstant = !LangOpts.WritableStrings;
+
+ llvm::GlobalVariable *GV =
+ new llvm::GlobalVariable(getModule(), C->getType(), isConstant, Linkage, C,
+ ".str");
+ GV->setUnnamedAddr(true);
+ CharUnits Align = getContext().getTypeAlignInChars(getContext().CharTy);
+ GV->setAlignment(Align.getQuantity());
+ Fields[1] = llvm::ConstantExpr::getGetElementPtr(GV, Zeros);
+
+ // String length.
+ llvm::Type *Ty = getTypes().ConvertType(getContext().UnsignedIntTy);
+ Fields[2] = llvm::ConstantInt::get(Ty, StringLength);
+
+ // The struct.
+ C = llvm::ConstantStruct::get(NSConstantStringType, Fields);
+ GV = new llvm::GlobalVariable(getModule(), C->getType(), true,
+ llvm::GlobalVariable::PrivateLinkage, C,
+ "_unnamed_nsstring_");
+ // FIXME. Fix section.
+ if (const char *Sect =
+ LangOpts.ObjCNonFragileABI
+ ? getContext().getTargetInfo().getNSStringNonFragileABISection()
+ : getContext().getTargetInfo().getNSStringSection())
+ GV->setSection(Sect);
+ Entry.setValue(GV);
+
+ return GV;
+}
+
+QualType CodeGenModule::getObjCFastEnumerationStateType() {
+ if (ObjCFastEnumerationStateType.isNull()) {
+ RecordDecl *D = CreateRecordDecl(Context, TTK_Struct,
+ Context.getTranslationUnitDecl(),
+ &Context.Idents.get("__objcFastEnumerationState"));
+ D->startDefinition();
+
+ QualType FieldTypes[] = {
+ Context.UnsignedLongTy,
+ Context.getPointerType(Context.getObjCIdType()),
+ Context.getPointerType(Context.UnsignedLongTy),
+ Context.getConstantArrayType(Context.UnsignedLongTy,
+ llvm::APInt(32, 5), ArrayType::Normal, 0)
+ };
+
+ for (size_t i = 0; i < 4; ++i) {
+ FieldDecl *Field = FieldDecl::Create(Context,
+ D,
+ SourceLocation(),
+ SourceLocation(), 0,
+ FieldTypes[i], /*TInfo=*/0,
+ /*BitWidth=*/0,
+ /*Mutable=*/false,
+ /*HasInit=*/false);
+ Field->setAccess(AS_public);
+ D->addDecl(Field);
+ }
+
+ D->completeDefinition();
+ ObjCFastEnumerationStateType = Context.getTagDeclType(D);
+ }
+
+ return ObjCFastEnumerationStateType;
+}
+
+llvm::Constant *
+CodeGenModule::GetConstantArrayFromStringLiteral(const StringLiteral *E) {
+ assert(!E->getType()->isPointerType() && "Strings are always arrays");
+
+ // Don't emit it as the address of the string, emit the string data itself
+ // as an inline array.
+ if (E->getCharByteWidth() == 1) {
+ SmallString<64> Str(E->getString());
+
+ // Resize the string to the right size, which is indicated by its type.
+ const ConstantArrayType *CAT = Context.getAsConstantArrayType(E->getType());
+ Str.resize(CAT->getSize().getZExtValue());
+ return llvm::ConstantDataArray::getString(VMContext, Str, false);
+ }
+
+ llvm::ArrayType *AType =
+ cast<llvm::ArrayType>(getTypes().ConvertType(E->getType()));
+ llvm::Type *ElemTy = AType->getElementType();
+ unsigned NumElements = AType->getNumElements();
+
+ // Wide strings have either 2-byte or 4-byte elements.
+ if (ElemTy->getPrimitiveSizeInBits() == 16) {
+ SmallVector<uint16_t, 32> Elements;
+ Elements.reserve(NumElements);
+
+ for(unsigned i = 0, e = E->getLength(); i != e; ++i)
+ Elements.push_back(E->getCodeUnit(i));
+ Elements.resize(NumElements);
+ return llvm::ConstantDataArray::get(VMContext, Elements);
+ }
+
+ assert(ElemTy->getPrimitiveSizeInBits() == 32);
+ SmallVector<uint32_t, 32> Elements;
+ Elements.reserve(NumElements);
+
+ for(unsigned i = 0, e = E->getLength(); i != e; ++i)
+ Elements.push_back(E->getCodeUnit(i));
+ Elements.resize(NumElements);
+ return llvm::ConstantDataArray::get(VMContext, Elements);
+}
+
+/// GetAddrOfConstantStringFromLiteral - Return a pointer to a
+/// constant array for the given string literal.
+llvm::Constant *
+CodeGenModule::GetAddrOfConstantStringFromLiteral(const StringLiteral *S) {
+ CharUnits Align = getContext().getTypeAlignInChars(S->getType());
+ if (S->isAscii() || S->isUTF8()) {
+ SmallString<64> Str(S->getString());
+
+ // Resize the string to the right size, which is indicated by its type.
+ const ConstantArrayType *CAT = Context.getAsConstantArrayType(S->getType());
+ Str.resize(CAT->getSize().getZExtValue());
+ return GetAddrOfConstantString(Str, /*GlobalName*/ 0, Align.getQuantity());
+ }
+
+ // FIXME: the following does not memoize wide strings.
+ llvm::Constant *C = GetConstantArrayFromStringLiteral(S);
+ llvm::GlobalVariable *GV =
+ new llvm::GlobalVariable(getModule(),C->getType(),
+ !LangOpts.WritableStrings,
+ llvm::GlobalValue::PrivateLinkage,
+ C,".str");
+
+ GV->setAlignment(Align.getQuantity());
+ GV->setUnnamedAddr(true);
+ return GV;
+}
+
+/// GetAddrOfConstantStringFromObjCEncode - Return a pointer to a constant
+/// array for the given ObjCEncodeExpr node.
+llvm::Constant *
+CodeGenModule::GetAddrOfConstantStringFromObjCEncode(const ObjCEncodeExpr *E) {
+ std::string Str;
+ getContext().getObjCEncodingForType(E->getEncodedType(), Str);
+
+ return GetAddrOfConstantCString(Str);
+}
+
+
+/// GenerateWritableString -- Creates storage for a string literal.
+static llvm::GlobalVariable *GenerateStringLiteral(StringRef str,
+ bool constant,
+ CodeGenModule &CGM,
+ const char *GlobalName,
+ unsigned Alignment) {
+ // Create Constant for this string literal. Don't add a '\0'.
+ llvm::Constant *C =
+ llvm::ConstantDataArray::getString(CGM.getLLVMContext(), str, false);
+
+ // Create a global variable for this string
+ llvm::GlobalVariable *GV =
+ new llvm::GlobalVariable(CGM.getModule(), C->getType(), constant,
+ llvm::GlobalValue::PrivateLinkage,
+ C, GlobalName);
+ GV->setAlignment(Alignment);
+ GV->setUnnamedAddr(true);
+ return GV;
+}
+
+/// GetAddrOfConstantString - Returns a pointer to a character array
+/// containing the literal. This contents are exactly that of the
+/// given string, i.e. it will not be null terminated automatically;
+/// see GetAddrOfConstantCString. Note that whether the result is
+/// actually a pointer to an LLVM constant depends on
+/// Feature.WriteableStrings.
+///
+/// The result has pointer to array type.
+llvm::Constant *CodeGenModule::GetAddrOfConstantString(StringRef Str,
+ const char *GlobalName,
+ unsigned Alignment) {
+ // Get the default prefix if a name wasn't specified.
+ if (!GlobalName)
+ GlobalName = ".str";
+
+ // Don't share any string literals if strings aren't constant.
+ if (LangOpts.WritableStrings)
+ return GenerateStringLiteral(Str, false, *this, GlobalName, Alignment);
+
+ llvm::StringMapEntry<llvm::GlobalVariable *> &Entry =
+ ConstantStringMap.GetOrCreateValue(Str);
+
+ if (llvm::GlobalVariable *GV = Entry.getValue()) {
+ if (Alignment > GV->getAlignment()) {
+ GV->setAlignment(Alignment);
+ }
+ return GV;
+ }
+
+ // Create a global variable for this.
+ llvm::GlobalVariable *GV = GenerateStringLiteral(Str, true, *this, GlobalName,
+ Alignment);
+ Entry.setValue(GV);
+ return GV;
+}
+
+/// GetAddrOfConstantCString - Returns a pointer to a character
+/// array containing the literal and a terminating '\0'
+/// character. The result has pointer to array type.
+llvm::Constant *CodeGenModule::GetAddrOfConstantCString(const std::string &Str,
+ const char *GlobalName,
+ unsigned Alignment) {
+ StringRef StrWithNull(Str.c_str(), Str.size() + 1);
+ return GetAddrOfConstantString(StrWithNull, GlobalName, Alignment);
+}
+
+/// EmitObjCPropertyImplementations - Emit information for synthesized
+/// properties for an implementation.
+void CodeGenModule::EmitObjCPropertyImplementations(const
+ ObjCImplementationDecl *D) {
+ for (ObjCImplementationDecl::propimpl_iterator
+ i = D->propimpl_begin(), e = D->propimpl_end(); i != e; ++i) {
+ ObjCPropertyImplDecl *PID = *i;
+
+ // Dynamic is just for type-checking.
+ if (PID->getPropertyImplementation() == ObjCPropertyImplDecl::Synthesize) {
+ ObjCPropertyDecl *PD = PID->getPropertyDecl();
+
+ // Determine which methods need to be implemented, some may have
+ // been overridden. Note that ::isSynthesized is not the method
+ // we want, that just indicates if the decl came from a
+ // property. What we want to know is if the method is defined in
+ // this implementation.
+ if (!D->getInstanceMethod(PD->getGetterName()))
+ CodeGenFunction(*this).GenerateObjCGetter(
+ const_cast<ObjCImplementationDecl *>(D), PID);
+ if (!PD->isReadOnly() &&
+ !D->getInstanceMethod(PD->getSetterName()))
+ CodeGenFunction(*this).GenerateObjCSetter(
+ const_cast<ObjCImplementationDecl *>(D), PID);
+ }
+ }
+}
+
+static bool needsDestructMethod(ObjCImplementationDecl *impl) {
+ const ObjCInterfaceDecl *iface = impl->getClassInterface();
+ for (const ObjCIvarDecl *ivar = iface->all_declared_ivar_begin();
+ ivar; ivar = ivar->getNextIvar())
+ if (ivar->getType().isDestructedType())
+ return true;
+
+ return false;
+}
+
+/// EmitObjCIvarInitializations - Emit information for ivar initialization
+/// for an implementation.
+void CodeGenModule::EmitObjCIvarInitializations(ObjCImplementationDecl *D) {
+ // We might need a .cxx_destruct even if we don't have any ivar initializers.
+ if (needsDestructMethod(D)) {
+ IdentifierInfo *II = &getContext().Idents.get(".cxx_destruct");
+ Selector cxxSelector = getContext().Selectors.getSelector(0, &II);
+ ObjCMethodDecl *DTORMethod =
+ ObjCMethodDecl::Create(getContext(), D->getLocation(), D->getLocation(),
+ cxxSelector, getContext().VoidTy, 0, D,
+ /*isInstance=*/true, /*isVariadic=*/false,
+ /*isSynthesized=*/true, /*isImplicitlyDeclared=*/true,
+ /*isDefined=*/false, ObjCMethodDecl::Required);
+ D->addInstanceMethod(DTORMethod);
+ CodeGenFunction(*this).GenerateObjCCtorDtorMethod(D, DTORMethod, false);
+ D->setHasCXXStructors(true);
+ }
+
+ // If the implementation doesn't have any ivar initializers, we don't need
+ // a .cxx_construct.
+ if (D->getNumIvarInitializers() == 0)
+ return;
+
+ IdentifierInfo *II = &getContext().Idents.get(".cxx_construct");
+ Selector cxxSelector = getContext().Selectors.getSelector(0, &II);
+ // The constructor returns 'self'.
+ ObjCMethodDecl *CTORMethod = ObjCMethodDecl::Create(getContext(),
+ D->getLocation(),
+ D->getLocation(),
+ cxxSelector,
+ getContext().getObjCIdType(), 0,
+ D, /*isInstance=*/true,
+ /*isVariadic=*/false,
+ /*isSynthesized=*/true,
+ /*isImplicitlyDeclared=*/true,
+ /*isDefined=*/false,
+ ObjCMethodDecl::Required);
+ D->addInstanceMethod(CTORMethod);
+ CodeGenFunction(*this).GenerateObjCCtorDtorMethod(D, CTORMethod, true);
+ D->setHasCXXStructors(true);
+}
+
+/// EmitNamespace - Emit all declarations in a namespace.
+void CodeGenModule::EmitNamespace(const NamespaceDecl *ND) {
+ for (RecordDecl::decl_iterator I = ND->decls_begin(), E = ND->decls_end();
+ I != E; ++I)
+ EmitTopLevelDecl(*I);
+}
+
+// EmitLinkageSpec - Emit all declarations in a linkage spec.
+void CodeGenModule::EmitLinkageSpec(const LinkageSpecDecl *LSD) {
+ if (LSD->getLanguage() != LinkageSpecDecl::lang_c &&
+ LSD->getLanguage() != LinkageSpecDecl::lang_cxx) {
+ ErrorUnsupported(LSD, "linkage spec");
+ return;
+ }
+
+ for (RecordDecl::decl_iterator I = LSD->decls_begin(), E = LSD->decls_end();
+ I != E; ++I)
+ EmitTopLevelDecl(*I);
+}
+
+/// EmitTopLevelDecl - Emit code for a single top level declaration.
+void CodeGenModule::EmitTopLevelDecl(Decl *D) {
+ // If an error has occurred, stop code generation, but continue
+ // parsing and semantic analysis (to ensure all warnings and errors
+ // are emitted).
+ if (Diags.hasErrorOccurred())
+ return;
+
+ // Ignore dependent declarations.
+ if (D->getDeclContext() && D->getDeclContext()->isDependentContext())
+ return;
+
+ switch (D->getKind()) {
+ case Decl::CXXConversion:
+ case Decl::CXXMethod:
+ case Decl::Function:
+ // Skip function templates
+ if (cast<FunctionDecl>(D)->getDescribedFunctionTemplate() ||
+ cast<FunctionDecl>(D)->isLateTemplateParsed())
+ return;
+
+ EmitGlobal(cast<FunctionDecl>(D));
+ break;
+
+ case Decl::Var:
+ EmitGlobal(cast<VarDecl>(D));
+ break;
+
+ // Indirect fields from global anonymous structs and unions can be
+ // ignored; only the actual variable requires IR gen support.
+ case Decl::IndirectField:
+ break;
+
+ // C++ Decls
+ case Decl::Namespace:
+ EmitNamespace(cast<NamespaceDecl>(D));
+ break;
+ // No code generation needed.
+ case Decl::UsingShadow:
+ case Decl::Using:
+ case Decl::UsingDirective:
+ case Decl::ClassTemplate:
+ case Decl::FunctionTemplate:
+ case Decl::TypeAliasTemplate:
+ case Decl::NamespaceAlias:
+ case Decl::Block:
+ case Decl::Import:
+ break;
+ case Decl::CXXConstructor:
+ // Skip function templates
+ if (cast<FunctionDecl>(D)->getDescribedFunctionTemplate() ||
+ cast<FunctionDecl>(D)->isLateTemplateParsed())
+ return;
+
+ EmitCXXConstructors(cast<CXXConstructorDecl>(D));
+ break;
+ case Decl::CXXDestructor:
+ if (cast<FunctionDecl>(D)->isLateTemplateParsed())
+ return;
+ EmitCXXDestructors(cast<CXXDestructorDecl>(D));
+ break;
+
+ case Decl::StaticAssert:
+ // Nothing to do.
+ break;
+
+ // Objective-C Decls
+
+ // Forward declarations, no (immediate) code generation.
+ case Decl::ObjCInterface:
+ break;
+
+ case Decl::ObjCCategory: {
+ ObjCCategoryDecl *CD = cast<ObjCCategoryDecl>(D);
+ if (CD->IsClassExtension() && CD->hasSynthBitfield())
+ Context.ResetObjCLayout(CD->getClassInterface());
+ break;
+ }
+
+ case Decl::ObjCProtocol: {
+ ObjCProtocolDecl *Proto = cast<ObjCProtocolDecl>(D);
+ if (Proto->isThisDeclarationADefinition())
+ ObjCRuntime->GenerateProtocol(Proto);
+ break;
+ }
+
+ case Decl::ObjCCategoryImpl:
+ // Categories have properties but don't support synthesize so we
+ // can ignore them here.
+ ObjCRuntime->GenerateCategory(cast<ObjCCategoryImplDecl>(D));
+ break;
+
+ case Decl::ObjCImplementation: {
+ ObjCImplementationDecl *OMD = cast<ObjCImplementationDecl>(D);
+ if (LangOpts.ObjCNonFragileABI2 && OMD->hasSynthBitfield())
+ Context.ResetObjCLayout(OMD->getClassInterface());
+ EmitObjCPropertyImplementations(OMD);
+ EmitObjCIvarInitializations(OMD);
+ ObjCRuntime->GenerateClass(OMD);
+ // Emit global variable debug information.
+ if (CGDebugInfo *DI = getModuleDebugInfo())
+ DI->getOrCreateInterfaceType(getContext().getObjCInterfaceType(OMD->getClassInterface()),
+ OMD->getLocation());
+
+ break;
+ }
+ case Decl::ObjCMethod: {
+ ObjCMethodDecl *OMD = cast<ObjCMethodDecl>(D);
+ // If this is not a prototype, emit the body.
+ if (OMD->getBody())
+ CodeGenFunction(*this).GenerateObjCMethod(OMD);
+ break;
+ }
+ case Decl::ObjCCompatibleAlias:
+ ObjCRuntime->RegisterAlias(cast<ObjCCompatibleAliasDecl>(D));
+ break;
+
+ case Decl::LinkageSpec:
+ EmitLinkageSpec(cast<LinkageSpecDecl>(D));
+ break;
+
+ case Decl::FileScopeAsm: {
+ FileScopeAsmDecl *AD = cast<FileScopeAsmDecl>(D);
+ StringRef AsmString = AD->getAsmString()->getString();
+
+ const std::string &S = getModule().getModuleInlineAsm();
+ if (S.empty())
+ getModule().setModuleInlineAsm(AsmString);
+ else if (*--S.end() == '\n')
+ getModule().setModuleInlineAsm(S + AsmString.str());
+ else
+ getModule().setModuleInlineAsm(S + '\n' + AsmString.str());
+ break;
+ }
+
+ default:
+ // Make sure we handled everything we should, every other kind is a
+ // non-top-level decl. FIXME: Would be nice to have an isTopLevelDeclKind
+ // function. Need to recode Decl::Kind to do that easily.
+ assert(isa<TypeDecl>(D) && "Unsupported decl kind");
+ }
+}
+
+/// Turns the given pointer into a constant.
+static llvm::Constant *GetPointerConstant(llvm::LLVMContext &Context,
+ const void *Ptr) {
+ uintptr_t PtrInt = reinterpret_cast<uintptr_t>(Ptr);
+ llvm::Type *i64 = llvm::Type::getInt64Ty(Context);
+ return llvm::ConstantInt::get(i64, PtrInt);
+}
+
+static void EmitGlobalDeclMetadata(CodeGenModule &CGM,
+ llvm::NamedMDNode *&GlobalMetadata,
+ GlobalDecl D,
+ llvm::GlobalValue *Addr) {
+ if (!GlobalMetadata)
+ GlobalMetadata =
+ CGM.getModule().getOrInsertNamedMetadata("clang.global.decl.ptrs");
+
+ // TODO: should we report variant information for ctors/dtors?
+ llvm::Value *Ops[] = {
+ Addr,
+ GetPointerConstant(CGM.getLLVMContext(), D.getDecl())
+ };
+ GlobalMetadata->addOperand(llvm::MDNode::get(CGM.getLLVMContext(), Ops));
+}
+
+/// Emits metadata nodes associating all the global values in the
+/// current module with the Decls they came from. This is useful for
+/// projects using IR gen as a subroutine.
+///
+/// Since there's currently no way to associate an MDNode directly
+/// with an llvm::GlobalValue, we create a global named metadata
+/// with the name 'clang.global.decl.ptrs'.
+void CodeGenModule::EmitDeclMetadata() {
+ llvm::NamedMDNode *GlobalMetadata = 0;
+
+ // StaticLocalDeclMap
+ for (llvm::DenseMap<GlobalDecl,StringRef>::iterator
+ I = MangledDeclNames.begin(), E = MangledDeclNames.end();
+ I != E; ++I) {
+ llvm::GlobalValue *Addr = getModule().getNamedValue(I->second);
+ EmitGlobalDeclMetadata(*this, GlobalMetadata, I->first, Addr);
+ }
+}
+
+/// Emits metadata nodes for all the local variables in the current
+/// function.
+void CodeGenFunction::EmitDeclMetadata() {
+ if (LocalDeclMap.empty()) return;
+
+ llvm::LLVMContext &Context = getLLVMContext();
+
+ // Find the unique metadata ID for this name.
+ unsigned DeclPtrKind = Context.getMDKindID("clang.decl.ptr");
+
+ llvm::NamedMDNode *GlobalMetadata = 0;
+
+ for (llvm::DenseMap<const Decl*, llvm::Value*>::iterator
+ I = LocalDeclMap.begin(), E = LocalDeclMap.end(); I != E; ++I) {
+ const Decl *D = I->first;
+ llvm::Value *Addr = I->second;
+
+ if (llvm::AllocaInst *Alloca = dyn_cast<llvm::AllocaInst>(Addr)) {
+ llvm::Value *DAddr = GetPointerConstant(getLLVMContext(), D);
+ Alloca->setMetadata(DeclPtrKind, llvm::MDNode::get(Context, DAddr));
+ } else if (llvm::GlobalValue *GV = dyn_cast<llvm::GlobalValue>(Addr)) {
+ GlobalDecl GD = GlobalDecl(cast<VarDecl>(D));
+ EmitGlobalDeclMetadata(CGM, GlobalMetadata, GD, GV);
+ }
+ }
+}
+
+void CodeGenModule::EmitCoverageFile() {
+ if (!getCodeGenOpts().CoverageFile.empty()) {
+ if (llvm::NamedMDNode *CUNode = TheModule.getNamedMetadata("llvm.dbg.cu")) {
+ llvm::NamedMDNode *GCov = TheModule.getOrInsertNamedMetadata("llvm.gcov");
+ llvm::LLVMContext &Ctx = TheModule.getContext();
+ llvm::MDString *CoverageFile =
+ llvm::MDString::get(Ctx, getCodeGenOpts().CoverageFile);
+ for (int i = 0, e = CUNode->getNumOperands(); i != e; ++i) {
+ llvm::MDNode *CU = CUNode->getOperand(i);
+ llvm::Value *node[] = { CoverageFile, CU };
+ llvm::MDNode *N = llvm::MDNode::get(Ctx, node);
+ GCov->addOperand(N);
+ }
+ }
+ }
+}
diff --git a/clang/lib/CodeGen/CodeGenModule.h b/clang/lib/CodeGen/CodeGenModule.h
new file mode 100644
index 0000000..38f5008
--- /dev/null
+++ b/clang/lib/CodeGen/CodeGenModule.h
@@ -0,0 +1,986 @@
+//===--- CodeGenModule.h - Per-Module state for LLVM CodeGen ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This is the internal per-translation-unit state used for llvm translation.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_CODEGEN_CODEGENMODULE_H
+#define CLANG_CODEGEN_CODEGENMODULE_H
+
+#include "clang/Basic/ABI.h"
+#include "clang/Basic/LangOptions.h"
+#include "clang/AST/Attr.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/GlobalDecl.h"
+#include "clang/AST/Mangle.h"
+#include "CGVTables.h"
+#include "CodeGenTypes.h"
+#include "llvm/Module.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/Support/ValueHandle.h"
+
+namespace llvm {
+ class Module;
+ class Constant;
+ class ConstantInt;
+ class Function;
+ class GlobalValue;
+ class TargetData;
+ class FunctionType;
+ class LLVMContext;
+}
+
+namespace clang {
+ class TargetCodeGenInfo;
+ class ASTContext;
+ class FunctionDecl;
+ class IdentifierInfo;
+ class ObjCMethodDecl;
+ class ObjCImplementationDecl;
+ class ObjCCategoryImplDecl;
+ class ObjCProtocolDecl;
+ class ObjCEncodeExpr;
+ class BlockExpr;
+ class CharUnits;
+ class Decl;
+ class Expr;
+ class Stmt;
+ class InitListExpr;
+ class StringLiteral;
+ class NamedDecl;
+ class ValueDecl;
+ class VarDecl;
+ class LangOptions;
+ class CodeGenOptions;
+ class DiagnosticsEngine;
+ class AnnotateAttr;
+ class CXXDestructorDecl;
+ class MangleBuffer;
+
+namespace CodeGen {
+
+ class CallArgList;
+ class CodeGenFunction;
+ class CodeGenTBAA;
+ class CGCXXABI;
+ class CGDebugInfo;
+ class CGObjCRuntime;
+ class CGOpenCLRuntime;
+ class CGCUDARuntime;
+ class BlockFieldFlags;
+ class FunctionArgList;
+
+ struct OrderGlobalInits {
+ unsigned int priority;
+ unsigned int lex_order;
+ OrderGlobalInits(unsigned int p, unsigned int l)
+ : priority(p), lex_order(l) {}
+
+ bool operator==(const OrderGlobalInits &RHS) const {
+ return priority == RHS.priority &&
+ lex_order == RHS.lex_order;
+ }
+
+ bool operator<(const OrderGlobalInits &RHS) const {
+ if (priority < RHS.priority)
+ return true;
+
+ return priority == RHS.priority && lex_order < RHS.lex_order;
+ }
+ };
+
+ struct CodeGenTypeCache {
+ /// void
+ llvm::Type *VoidTy;
+
+ /// i8, i16, i32, and i64
+ llvm::IntegerType *Int8Ty, *Int16Ty, *Int32Ty, *Int64Ty;
+ /// float, double
+ llvm::Type *FloatTy, *DoubleTy;
+
+ /// int
+ llvm::IntegerType *IntTy;
+
+ /// intptr_t, size_t, and ptrdiff_t, which we assume are the same size.
+ union {
+ llvm::IntegerType *IntPtrTy;
+ llvm::IntegerType *SizeTy;
+ llvm::IntegerType *PtrDiffTy;
+ };
+
+ /// void* in address space 0
+ union {
+ llvm::PointerType *VoidPtrTy;
+ llvm::PointerType *Int8PtrTy;
+ };
+
+ /// void** in address space 0
+ union {
+ llvm::PointerType *VoidPtrPtrTy;
+ llvm::PointerType *Int8PtrPtrTy;
+ };
+
+ /// The width of a pointer into the generic address space.
+ unsigned char PointerWidthInBits;
+
+ /// The size and alignment of a pointer into the generic address
+ /// space.
+ union {
+ unsigned char PointerAlignInBytes;
+ unsigned char PointerSizeInBytes;
+ };
+ };
+
+struct RREntrypoints {
+ RREntrypoints() { memset(this, 0, sizeof(*this)); }
+ /// void objc_autoreleasePoolPop(void*);
+ llvm::Constant *objc_autoreleasePoolPop;
+
+ /// void *objc_autoreleasePoolPush(void);
+ llvm::Constant *objc_autoreleasePoolPush;
+};
+
+struct ARCEntrypoints {
+ ARCEntrypoints() { memset(this, 0, sizeof(*this)); }
+
+ /// id objc_autorelease(id);
+ llvm::Constant *objc_autorelease;
+
+ /// id objc_autoreleaseReturnValue(id);
+ llvm::Constant *objc_autoreleaseReturnValue;
+
+ /// void objc_copyWeak(id *dest, id *src);
+ llvm::Constant *objc_copyWeak;
+
+ /// void objc_destroyWeak(id*);
+ llvm::Constant *objc_destroyWeak;
+
+ /// id objc_initWeak(id*, id);
+ llvm::Constant *objc_initWeak;
+
+ /// id objc_loadWeak(id*);
+ llvm::Constant *objc_loadWeak;
+
+ /// id objc_loadWeakRetained(id*);
+ llvm::Constant *objc_loadWeakRetained;
+
+ /// void objc_moveWeak(id *dest, id *src);
+ llvm::Constant *objc_moveWeak;
+
+ /// id objc_retain(id);
+ llvm::Constant *objc_retain;
+
+ /// id objc_retainAutorelease(id);
+ llvm::Constant *objc_retainAutorelease;
+
+ /// id objc_retainAutoreleaseReturnValue(id);
+ llvm::Constant *objc_retainAutoreleaseReturnValue;
+
+ /// id objc_retainAutoreleasedReturnValue(id);
+ llvm::Constant *objc_retainAutoreleasedReturnValue;
+
+ /// id objc_retainBlock(id);
+ llvm::Constant *objc_retainBlock;
+
+ /// void objc_release(id);
+ llvm::Constant *objc_release;
+
+ /// id objc_storeStrong(id*, id);
+ llvm::Constant *objc_storeStrong;
+
+ /// id objc_storeWeak(id*, id);
+ llvm::Constant *objc_storeWeak;
+
+ /// A void(void) inline asm to use to mark that the return value of
+ /// a call will be immediately retain.
+ llvm::InlineAsm *retainAutoreleasedReturnValueMarker;
+};
+
+/// CodeGenModule - This class organizes the cross-function state that is used
+/// while generating LLVM code.
+class CodeGenModule : public CodeGenTypeCache {
+ CodeGenModule(const CodeGenModule&); // DO NOT IMPLEMENT
+ void operator=(const CodeGenModule&); // DO NOT IMPLEMENT
+
+ typedef std::vector<std::pair<llvm::Constant*, int> > CtorList;
+
+ ASTContext &Context;
+ const LangOptions &LangOpts;
+ const CodeGenOptions &CodeGenOpts;
+ llvm::Module &TheModule;
+ const llvm::TargetData &TheTargetData;
+ mutable const TargetCodeGenInfo *TheTargetCodeGenInfo;
+ DiagnosticsEngine &Diags;
+ CGCXXABI &ABI;
+ CodeGenTypes Types;
+ CodeGenTBAA *TBAA;
+
+ /// VTables - Holds information about C++ vtables.
+ CodeGenVTables VTables;
+ friend class CodeGenVTables;
+
+ CGObjCRuntime* ObjCRuntime;
+ CGOpenCLRuntime* OpenCLRuntime;
+ CGCUDARuntime* CUDARuntime;
+ CGDebugInfo* DebugInfo;
+ ARCEntrypoints *ARCData;
+ llvm::MDNode *NoObjCARCExceptionsMetadata;
+ RREntrypoints *RRData;
+
+ // WeakRefReferences - A set of references that have only been seen via
+ // a weakref so far. This is used to remove the weak of the reference if we ever
+ // see a direct reference or a definition.
+ llvm::SmallPtrSet<llvm::GlobalValue*, 10> WeakRefReferences;
+
+ /// DeferredDecls - This contains all the decls which have definitions but
+ /// which are deferred for emission and therefore should only be output if
+ /// they are actually used. If a decl is in this, then it is known to have
+ /// not been referenced yet.
+ llvm::StringMap<GlobalDecl> DeferredDecls;
+
+ /// DeferredDeclsToEmit - This is a list of deferred decls which we have seen
+ /// that *are* actually referenced. These get code generated when the module
+ /// is done.
+ std::vector<GlobalDecl> DeferredDeclsToEmit;
+
+ /// LLVMUsed - List of global values which are required to be
+ /// present in the object file; bitcast to i8*. This is used for
+ /// forcing visibility of symbols which may otherwise be optimized
+ /// out.
+ std::vector<llvm::WeakVH> LLVMUsed;
+
+ /// GlobalCtors - Store the list of global constructors and their respective
+ /// priorities to be emitted when the translation unit is complete.
+ CtorList GlobalCtors;
+
+ /// GlobalDtors - Store the list of global destructors and their respective
+ /// priorities to be emitted when the translation unit is complete.
+ CtorList GlobalDtors;
+
+ /// MangledDeclNames - A map of canonical GlobalDecls to their mangled names.
+ llvm::DenseMap<GlobalDecl, StringRef> MangledDeclNames;
+ llvm::BumpPtrAllocator MangledNamesAllocator;
+
+ /// Global annotations.
+ std::vector<llvm::Constant*> Annotations;
+
+ /// Map used to get unique annotation strings.
+ llvm::StringMap<llvm::Constant*> AnnotationStrings;
+
+ llvm::StringMap<llvm::Constant*> CFConstantStringMap;
+ llvm::StringMap<llvm::GlobalVariable*> ConstantStringMap;
+ llvm::DenseMap<const Decl*, llvm::Constant *> StaticLocalDeclMap;
+ llvm::DenseMap<const Decl*, llvm::GlobalVariable*> StaticLocalDeclGuardMap;
+
+ llvm::DenseMap<QualType, llvm::Constant *> AtomicSetterHelperFnMap;
+ llvm::DenseMap<QualType, llvm::Constant *> AtomicGetterHelperFnMap;
+
+ /// CXXGlobalInits - Global variables with initializers that need to run
+ /// before main.
+ std::vector<llvm::Constant*> CXXGlobalInits;
+
+ /// When a C++ decl with an initializer is deferred, null is
+ /// appended to CXXGlobalInits, and the index of that null is placed
+ /// here so that the initializer will be performed in the correct
+ /// order.
+ llvm::DenseMap<const Decl*, unsigned> DelayedCXXInitPosition;
+
+ /// - Global variables with initializers whose order of initialization
+ /// is set by init_priority attribute.
+
+ SmallVector<std::pair<OrderGlobalInits, llvm::Function*>, 8>
+ PrioritizedCXXGlobalInits;
+
+ /// CXXGlobalDtors - Global destructor functions and arguments that need to
+ /// run on termination.
+ std::vector<std::pair<llvm::WeakVH,llvm::Constant*> > CXXGlobalDtors;
+
+ /// @name Cache for Objective-C runtime types
+ /// @{
+
+ /// CFConstantStringClassRef - Cached reference to the class for constant
+ /// strings. This value has type int * but is actually an Obj-C class pointer.
+ llvm::Constant *CFConstantStringClassRef;
+
+ /// ConstantStringClassRef - Cached reference to the class for constant
+ /// strings. This value has type int * but is actually an Obj-C class pointer.
+ llvm::Constant *ConstantStringClassRef;
+
+ /// \brief The LLVM type corresponding to NSConstantString.
+ llvm::StructType *NSConstantStringType;
+
+ /// \brief The type used to describe the state of a fast enumeration in
+ /// Objective-C's for..in loop.
+ QualType ObjCFastEnumerationStateType;
+
+ /// @}
+
+ /// Lazily create the Objective-C runtime
+ void createObjCRuntime();
+
+ void createOpenCLRuntime();
+ void createCUDARuntime();
+
+ bool isTriviallyRecursive(const FunctionDecl *F);
+ bool shouldEmitFunction(const FunctionDecl *F);
+ llvm::LLVMContext &VMContext;
+
+ /// @name Cache for Blocks Runtime Globals
+ /// @{
+
+ llvm::Constant *NSConcreteGlobalBlock;
+ llvm::Constant *NSConcreteStackBlock;
+
+ llvm::Constant *BlockObjectAssign;
+ llvm::Constant *BlockObjectDispose;
+
+ llvm::Type *BlockDescriptorType;
+ llvm::Type *GenericBlockLiteralType;
+
+ struct {
+ int GlobalUniqueCount;
+ } Block;
+
+ /// @}
+public:
+ CodeGenModule(ASTContext &C, const CodeGenOptions &CodeGenOpts,
+ llvm::Module &M, const llvm::TargetData &TD,
+ DiagnosticsEngine &Diags);
+
+ ~CodeGenModule();
+
+ /// Release - Finalize LLVM code generation.
+ void Release();
+
+ /// getObjCRuntime() - Return a reference to the configured
+ /// Objective-C runtime.
+ CGObjCRuntime &getObjCRuntime() {
+ if (!ObjCRuntime) createObjCRuntime();
+ return *ObjCRuntime;
+ }
+
+ /// hasObjCRuntime() - Return true iff an Objective-C runtime has
+ /// been configured.
+ bool hasObjCRuntime() { return !!ObjCRuntime; }
+
+ /// getOpenCLRuntime() - Return a reference to the configured OpenCL runtime.
+ CGOpenCLRuntime &getOpenCLRuntime() {
+ assert(OpenCLRuntime != 0);
+ return *OpenCLRuntime;
+ }
+
+ /// getCUDARuntime() - Return a reference to the configured CUDA runtime.
+ CGCUDARuntime &getCUDARuntime() {
+ assert(CUDARuntime != 0);
+ return *CUDARuntime;
+ }
+
+ /// getCXXABI() - Return a reference to the configured C++ ABI.
+ CGCXXABI &getCXXABI() { return ABI; }
+
+ ARCEntrypoints &getARCEntrypoints() const {
+ assert(getLangOpts().ObjCAutoRefCount && ARCData != 0);
+ return *ARCData;
+ }
+
+ RREntrypoints &getRREntrypoints() const {
+ assert(RRData != 0);
+ return *RRData;
+ }
+
+ llvm::Constant *getStaticLocalDeclAddress(const VarDecl *D) {
+ return StaticLocalDeclMap[D];
+ }
+ void setStaticLocalDeclAddress(const VarDecl *D,
+ llvm::Constant *C) {
+ StaticLocalDeclMap[D] = C;
+ }
+
+ llvm::GlobalVariable *getStaticLocalDeclGuardAddress(const VarDecl *D) {
+ return StaticLocalDeclGuardMap[D];
+ }
+ void setStaticLocalDeclGuardAddress(const VarDecl *D,
+ llvm::GlobalVariable *C) {
+ StaticLocalDeclGuardMap[D] = C;
+ }
+
+ llvm::Constant *getAtomicSetterHelperFnMap(QualType Ty) {
+ return AtomicSetterHelperFnMap[Ty];
+ }
+ void setAtomicSetterHelperFnMap(QualType Ty,
+ llvm::Constant *Fn) {
+ AtomicSetterHelperFnMap[Ty] = Fn;
+ }
+
+ llvm::Constant *getAtomicGetterHelperFnMap(QualType Ty) {
+ return AtomicGetterHelperFnMap[Ty];
+ }
+ void setAtomicGetterHelperFnMap(QualType Ty,
+ llvm::Constant *Fn) {
+ AtomicGetterHelperFnMap[Ty] = Fn;
+ }
+
+ CGDebugInfo *getModuleDebugInfo() { return DebugInfo; }
+
+ llvm::MDNode *getNoObjCARCExceptionsMetadata() {
+ if (!NoObjCARCExceptionsMetadata)
+ NoObjCARCExceptionsMetadata =
+ llvm::MDNode::get(getLLVMContext(),
+ SmallVector<llvm::Value*,1>());
+ return NoObjCARCExceptionsMetadata;
+ }
+
+ ASTContext &getContext() const { return Context; }
+ const CodeGenOptions &getCodeGenOpts() const { return CodeGenOpts; }
+ const LangOptions &getLangOpts() const { return LangOpts; }
+ llvm::Module &getModule() const { return TheModule; }
+ CodeGenTypes &getTypes() { return Types; }
+ CodeGenVTables &getVTables() { return VTables; }
+ VTableContext &getVTableContext() { return VTables.getVTableContext(); }
+ DiagnosticsEngine &getDiags() const { return Diags; }
+ const llvm::TargetData &getTargetData() const { return TheTargetData; }
+ const TargetInfo &getTarget() const { return Context.getTargetInfo(); }
+ llvm::LLVMContext &getLLVMContext() { return VMContext; }
+ const TargetCodeGenInfo &getTargetCodeGenInfo();
+ bool isTargetDarwin() const;
+
+ bool shouldUseTBAA() const { return TBAA != 0; }
+
+ llvm::MDNode *getTBAAInfo(QualType QTy);
+ llvm::MDNode *getTBAAInfoForVTablePtr();
+
+ bool isTypeConstant(QualType QTy, bool ExcludeCtorDtor);
+
+ static void DecorateInstruction(llvm::Instruction *Inst,
+ llvm::MDNode *TBAAInfo);
+
+ /// getSize - Emit the given number of characters as a value of type size_t.
+ llvm::ConstantInt *getSize(CharUnits numChars);
+
+ /// setGlobalVisibility - Set the visibility for the given LLVM
+ /// GlobalValue.
+ void setGlobalVisibility(llvm::GlobalValue *GV, const NamedDecl *D) const;
+
+ /// TypeVisibilityKind - The kind of global variable that is passed to
+ /// setTypeVisibility
+ enum TypeVisibilityKind {
+ TVK_ForVTT,
+ TVK_ForVTable,
+ TVK_ForConstructionVTable,
+ TVK_ForRTTI,
+ TVK_ForRTTIName
+ };
+
+ /// setTypeVisibility - Set the visibility for the given global
+ /// value which holds information about a type.
+ void setTypeVisibility(llvm::GlobalValue *GV, const CXXRecordDecl *D,
+ TypeVisibilityKind TVK) const;
+
+ static llvm::GlobalValue::VisibilityTypes GetLLVMVisibility(Visibility V) {
+ switch (V) {
+ case DefaultVisibility: return llvm::GlobalValue::DefaultVisibility;
+ case HiddenVisibility: return llvm::GlobalValue::HiddenVisibility;
+ case ProtectedVisibility: return llvm::GlobalValue::ProtectedVisibility;
+ }
+ llvm_unreachable("unknown visibility!");
+ }
+
+ llvm::Constant *GetAddrOfGlobal(GlobalDecl GD) {
+ if (isa<CXXConstructorDecl>(GD.getDecl()))
+ return GetAddrOfCXXConstructor(cast<CXXConstructorDecl>(GD.getDecl()),
+ GD.getCtorType());
+ else if (isa<CXXDestructorDecl>(GD.getDecl()))
+ return GetAddrOfCXXDestructor(cast<CXXDestructorDecl>(GD.getDecl()),
+ GD.getDtorType());
+ else if (isa<FunctionDecl>(GD.getDecl()))
+ return GetAddrOfFunction(GD);
+ else
+ return GetAddrOfGlobalVar(cast<VarDecl>(GD.getDecl()));
+ }
+
+ /// CreateOrReplaceCXXRuntimeVariable - Will return a global variable of the given
+ /// type. If a variable with a different type already exists then a new
+ /// variable with the right type will be created and all uses of the old
+ /// variable will be replaced with a bitcast to the new variable.
+ llvm::GlobalVariable *
+ CreateOrReplaceCXXRuntimeVariable(StringRef Name, llvm::Type *Ty,
+ llvm::GlobalValue::LinkageTypes Linkage);
+
+ /// GetAddrOfGlobalVar - Return the llvm::Constant for the address of the
+ /// given global variable. If Ty is non-null and if the global doesn't exist,
+ /// then it will be greated with the specified type instead of whatever the
+ /// normal requested type would be.
+ llvm::Constant *GetAddrOfGlobalVar(const VarDecl *D,
+ llvm::Type *Ty = 0);
+
+
+ /// GetAddrOfFunction - Return the address of the given function. If Ty is
+ /// non-null, then this function will use the specified type if it has to
+ /// create it.
+ llvm::Constant *GetAddrOfFunction(GlobalDecl GD,
+ llvm::Type *Ty = 0,
+ bool ForVTable = false);
+
+ /// GetAddrOfRTTIDescriptor - Get the address of the RTTI descriptor
+ /// for the given type.
+ llvm::Constant *GetAddrOfRTTIDescriptor(QualType Ty, bool ForEH = false);
+
+ /// GetAddrOfThunk - Get the address of the thunk for the given global decl.
+ llvm::Constant *GetAddrOfThunk(GlobalDecl GD, const ThunkInfo &Thunk);
+
+ /// GetWeakRefReference - Get a reference to the target of VD.
+ llvm::Constant *GetWeakRefReference(const ValueDecl *VD);
+
+ /// GetNonVirtualBaseClassOffset - Returns the offset from a derived class to
+ /// a class. Returns null if the offset is 0.
+ llvm::Constant *
+ GetNonVirtualBaseClassOffset(const CXXRecordDecl *ClassDecl,
+ CastExpr::path_const_iterator PathBegin,
+ CastExpr::path_const_iterator PathEnd);
+
+ /// A pair of helper functions for a __block variable.
+ class ByrefHelpers : public llvm::FoldingSetNode {
+ public:
+ llvm::Constant *CopyHelper;
+ llvm::Constant *DisposeHelper;
+
+ /// The alignment of the field. This is important because
+ /// different offsets to the field within the byref struct need to
+ /// have different helper functions.
+ CharUnits Alignment;
+
+ ByrefHelpers(CharUnits alignment) : Alignment(alignment) {}
+ virtual ~ByrefHelpers();
+
+ void Profile(llvm::FoldingSetNodeID &id) const {
+ id.AddInteger(Alignment.getQuantity());
+ profileImpl(id);
+ }
+ virtual void profileImpl(llvm::FoldingSetNodeID &id) const = 0;
+
+ virtual bool needsCopy() const { return true; }
+ virtual void emitCopy(CodeGenFunction &CGF,
+ llvm::Value *dest, llvm::Value *src) = 0;
+
+ virtual bool needsDispose() const { return true; }
+ virtual void emitDispose(CodeGenFunction &CGF, llvm::Value *field) = 0;
+ };
+
+ llvm::FoldingSet<ByrefHelpers> ByrefHelpersCache;
+
+ /// getUniqueBlockCount - Fetches the global unique block count.
+ int getUniqueBlockCount() { return ++Block.GlobalUniqueCount; }
+
+ /// getBlockDescriptorType - Fetches the type of a generic block
+ /// descriptor.
+ llvm::Type *getBlockDescriptorType();
+
+ /// getGenericBlockLiteralType - The type of a generic block literal.
+ llvm::Type *getGenericBlockLiteralType();
+
+ /// GetAddrOfGlobalBlock - Gets the address of a block which
+ /// requires no captures.
+ llvm::Constant *GetAddrOfGlobalBlock(const BlockExpr *BE, const char *);
+
+ /// GetAddrOfConstantCFString - Return a pointer to a constant CFString object
+ /// for the given string.
+ llvm::Constant *GetAddrOfConstantCFString(const StringLiteral *Literal);
+
+ /// GetAddrOfConstantString - Return a pointer to a constant NSString object
+ /// for the given string. Or a user defined String object as defined via
+ /// -fconstant-string-class=class_name option.
+ llvm::Constant *GetAddrOfConstantString(const StringLiteral *Literal);
+
+ /// GetConstantArrayFromStringLiteral - Return a constant array for the given
+ /// string.
+ llvm::Constant *GetConstantArrayFromStringLiteral(const StringLiteral *E);
+
+ /// GetAddrOfConstantStringFromLiteral - Return a pointer to a constant array
+ /// for the given string literal.
+ llvm::Constant *GetAddrOfConstantStringFromLiteral(const StringLiteral *S);
+
+ /// GetAddrOfConstantStringFromObjCEncode - Return a pointer to a constant
+ /// array for the given ObjCEncodeExpr node.
+ llvm::Constant *GetAddrOfConstantStringFromObjCEncode(const ObjCEncodeExpr *);
+
+ /// GetAddrOfConstantString - Returns a pointer to a character array
+ /// containing the literal. This contents are exactly that of the given
+ /// string, i.e. it will not be null terminated automatically; see
+ /// GetAddrOfConstantCString. Note that whether the result is actually a
+ /// pointer to an LLVM constant depends on Feature.WriteableStrings.
+ ///
+ /// The result has pointer to array type.
+ ///
+ /// \param GlobalName If provided, the name to use for the global
+ /// (if one is created).
+ llvm::Constant *GetAddrOfConstantString(StringRef Str,
+ const char *GlobalName=0,
+ unsigned Alignment=1);
+
+ /// GetAddrOfConstantCString - Returns a pointer to a character array
+ /// containing the literal and a terminating '\0' character. The result has
+ /// pointer to array type.
+ ///
+ /// \param GlobalName If provided, the name to use for the global (if one is
+ /// created).
+ llvm::Constant *GetAddrOfConstantCString(const std::string &str,
+ const char *GlobalName=0,
+ unsigned Alignment=1);
+
+ /// GetAddrOfConstantCompoundLiteral - Returns a pointer to a constant global
+ /// variable for the given file-scope compound literal expression.
+ llvm::Constant *GetAddrOfConstantCompoundLiteral(const CompoundLiteralExpr*E);
+
+ /// \brief Retrieve the record type that describes the state of an
+ /// Objective-C fast enumeration loop (for..in).
+ QualType getObjCFastEnumerationStateType();
+
+ /// GetAddrOfCXXConstructor - Return the address of the constructor of the
+ /// given type.
+ llvm::GlobalValue *GetAddrOfCXXConstructor(const CXXConstructorDecl *ctor,
+ CXXCtorType ctorType,
+ const CGFunctionInfo *fnInfo = 0);
+
+ /// GetAddrOfCXXDestructor - Return the address of the constructor of the
+ /// given type.
+ llvm::GlobalValue *GetAddrOfCXXDestructor(const CXXDestructorDecl *dtor,
+ CXXDtorType dtorType,
+ const CGFunctionInfo *fnInfo = 0);
+
+ /// getBuiltinLibFunction - Given a builtin id for a function like
+ /// "__builtin_fabsf", return a Function* for "fabsf".
+ llvm::Value *getBuiltinLibFunction(const FunctionDecl *FD,
+ unsigned BuiltinID);
+
+ llvm::Function *getIntrinsic(unsigned IID, ArrayRef<llvm::Type*> Tys =
+ ArrayRef<llvm::Type*>());
+
+ /// EmitTopLevelDecl - Emit code for a single top level declaration.
+ void EmitTopLevelDecl(Decl *D);
+
+ /// HandleCXXStaticMemberVarInstantiation - Tell the consumer that this
+ // variable has been instantiated.
+ void HandleCXXStaticMemberVarInstantiation(VarDecl *VD);
+
+ /// AddUsedGlobal - Add a global which should be forced to be
+ /// present in the object file; these are emitted to the llvm.used
+ /// metadata global.
+ void AddUsedGlobal(llvm::GlobalValue *GV);
+
+ /// AddCXXDtorEntry - Add a destructor and object to add to the C++ global
+ /// destructor function.
+ void AddCXXDtorEntry(llvm::Constant *DtorFn, llvm::Constant *Object) {
+ CXXGlobalDtors.push_back(std::make_pair(DtorFn, Object));
+ }
+
+ /// CreateRuntimeFunction - Create a new runtime function with the specified
+ /// type and name.
+ llvm::Constant *CreateRuntimeFunction(llvm::FunctionType *Ty,
+ StringRef Name,
+ llvm::Attributes ExtraAttrs =
+ llvm::Attribute::None);
+ /// CreateRuntimeVariable - Create a new runtime global variable with the
+ /// specified type and name.
+ llvm::Constant *CreateRuntimeVariable(llvm::Type *Ty,
+ StringRef Name);
+
+ ///@name Custom Blocks Runtime Interfaces
+ ///@{
+
+ llvm::Constant *getNSConcreteGlobalBlock();
+ llvm::Constant *getNSConcreteStackBlock();
+ llvm::Constant *getBlockObjectAssign();
+ llvm::Constant *getBlockObjectDispose();
+
+ ///@}
+
+ // UpdateCompleteType - Make sure that this type is translated.
+ void UpdateCompletedType(const TagDecl *TD);
+
+ llvm::Constant *getMemberPointerConstant(const UnaryOperator *e);
+
+ /// EmitConstantInit - Try to emit the initializer for the given declaration
+ /// as a constant; returns 0 if the expression cannot be emitted as a
+ /// constant.
+ llvm::Constant *EmitConstantInit(const VarDecl &D, CodeGenFunction *CGF = 0);
+
+ /// EmitConstantExpr - Try to emit the given expression as a
+ /// constant; returns 0 if the expression cannot be emitted as a
+ /// constant.
+ llvm::Constant *EmitConstantExpr(const Expr *E, QualType DestType,
+ CodeGenFunction *CGF = 0);
+
+ /// EmitConstantValue - Emit the given constant value as a constant, in the
+ /// type's scalar representation.
+ llvm::Constant *EmitConstantValue(const APValue &Value, QualType DestType,
+ CodeGenFunction *CGF = 0);
+
+ /// EmitConstantValueForMemory - Emit the given constant value as a constant,
+ /// in the type's memory representation.
+ llvm::Constant *EmitConstantValueForMemory(const APValue &Value,
+ QualType DestType,
+ CodeGenFunction *CGF = 0);
+
+ /// EmitNullConstant - Return the result of value-initializing the given
+ /// type, i.e. a null expression of the given type. This is usually,
+ /// but not always, an LLVM null constant.
+ llvm::Constant *EmitNullConstant(QualType T);
+
+ /// EmitNullConstantForBase - Return a null constant appropriate for
+ /// zero-initializing a base class with the given type. This is usually,
+ /// but not always, an LLVM null constant.
+ llvm::Constant *EmitNullConstantForBase(const CXXRecordDecl *Record);
+
+ /// Error - Emit a general error that something can't be done.
+ void Error(SourceLocation loc, StringRef error);
+
+ /// ErrorUnsupported - Print out an error that codegen doesn't support the
+ /// specified stmt yet.
+ /// \param OmitOnError - If true, then this error should only be emitted if no
+ /// other errors have been reported.
+ void ErrorUnsupported(const Stmt *S, const char *Type,
+ bool OmitOnError=false);
+
+ /// ErrorUnsupported - Print out an error that codegen doesn't support the
+ /// specified decl yet.
+ /// \param OmitOnError - If true, then this error should only be emitted if no
+ /// other errors have been reported.
+ void ErrorUnsupported(const Decl *D, const char *Type,
+ bool OmitOnError=false);
+
+ /// SetInternalFunctionAttributes - Set the attributes on the LLVM
+ /// function for the given decl and function info. This applies
+ /// attributes necessary for handling the ABI as well as user
+ /// specified attributes like section.
+ void SetInternalFunctionAttributes(const Decl *D, llvm::Function *F,
+ const CGFunctionInfo &FI);
+
+ /// SetLLVMFunctionAttributes - Set the LLVM function attributes
+ /// (sext, zext, etc).
+ void SetLLVMFunctionAttributes(const Decl *D,
+ const CGFunctionInfo &Info,
+ llvm::Function *F);
+
+ /// SetLLVMFunctionAttributesForDefinition - Set the LLVM function attributes
+ /// which only apply to a function definintion.
+ void SetLLVMFunctionAttributesForDefinition(const Decl *D, llvm::Function *F);
+
+ /// ReturnTypeUsesSRet - Return true iff the given type uses 'sret' when used
+ /// as a return type.
+ bool ReturnTypeUsesSRet(const CGFunctionInfo &FI);
+
+ /// ReturnTypeUsesFPRet - Return true iff the given type uses 'fpret' when
+ /// used as a return type.
+ bool ReturnTypeUsesFPRet(QualType ResultType);
+
+ /// ReturnTypeUsesFP2Ret - Return true iff the given type uses 'fp2ret' when
+ /// used as a return type.
+ bool ReturnTypeUsesFP2Ret(QualType ResultType);
+
+ /// ConstructAttributeList - Get the LLVM attributes and calling convention to
+ /// use for a particular function type.
+ ///
+ /// \param Info - The function type information.
+ /// \param TargetDecl - The decl these attributes are being constructed
+ /// for. If supplied the attributes applied to this decl may contribute to the
+ /// function attributes and calling convention.
+ /// \param PAL [out] - On return, the attribute list to use.
+ /// \param CallingConv [out] - On return, the LLVM calling convention to use.
+ void ConstructAttributeList(const CGFunctionInfo &Info,
+ const Decl *TargetDecl,
+ AttributeListType &PAL,
+ unsigned &CallingConv);
+
+ StringRef getMangledName(GlobalDecl GD);
+ void getBlockMangledName(GlobalDecl GD, MangleBuffer &Buffer,
+ const BlockDecl *BD);
+
+ void EmitTentativeDefinition(const VarDecl *D);
+
+ void EmitVTable(CXXRecordDecl *Class, bool DefinitionRequired);
+
+ llvm::GlobalVariable::LinkageTypes
+ getFunctionLinkage(const FunctionDecl *FD);
+
+ void setFunctionLinkage(const FunctionDecl *FD, llvm::GlobalValue *V) {
+ V->setLinkage(getFunctionLinkage(FD));
+ }
+
+ /// getVTableLinkage - Return the appropriate linkage for the vtable, VTT,
+ /// and type information of the given class.
+ llvm::GlobalVariable::LinkageTypes getVTableLinkage(const CXXRecordDecl *RD);
+
+ /// GetTargetTypeStoreSize - Return the store size, in character units, of
+ /// the given LLVM type.
+ CharUnits GetTargetTypeStoreSize(llvm::Type *Ty) const;
+
+ /// GetLLVMLinkageVarDefinition - Returns LLVM linkage for a global
+ /// variable.
+ llvm::GlobalValue::LinkageTypes
+ GetLLVMLinkageVarDefinition(const VarDecl *D,
+ llvm::GlobalVariable *GV);
+
+ std::vector<const CXXRecordDecl*> DeferredVTables;
+
+ /// Emit all the global annotations.
+ void EmitGlobalAnnotations();
+
+ /// Emit an annotation string.
+ llvm::Constant *EmitAnnotationString(llvm::StringRef Str);
+
+ /// Emit the annotation's translation unit.
+ llvm::Constant *EmitAnnotationUnit(SourceLocation Loc);
+
+ /// Emit the annotation line number.
+ llvm::Constant *EmitAnnotationLineNo(SourceLocation L);
+
+ /// EmitAnnotateAttr - Generate the llvm::ConstantStruct which contains the
+ /// annotation information for a given GlobalValue. The annotation struct is
+ /// {i8 *, i8 *, i8 *, i32}. The first field is a constant expression, the
+ /// GlobalValue being annotated. The second field is the constant string
+ /// created from the AnnotateAttr's annotation. The third field is a constant
+ /// string containing the name of the translation unit. The fourth field is
+ /// the line number in the file of the annotated value declaration.
+ llvm::Constant *EmitAnnotateAttr(llvm::GlobalValue *GV,
+ const AnnotateAttr *AA,
+ SourceLocation L);
+
+ /// Add global annotations that are set on D, for the global GV. Those
+ /// annotations are emitted during finalization of the LLVM code.
+ void AddGlobalAnnotations(const ValueDecl *D, llvm::GlobalValue *GV);
+
+private:
+ llvm::GlobalValue *GetGlobalValue(StringRef Ref);
+
+ llvm::Constant *GetOrCreateLLVMFunction(StringRef MangledName,
+ llvm::Type *Ty,
+ GlobalDecl D,
+ bool ForVTable,
+ llvm::Attributes ExtraAttrs =
+ llvm::Attribute::None);
+ llvm::Constant *GetOrCreateLLVMGlobal(StringRef MangledName,
+ llvm::PointerType *PTy,
+ const VarDecl *D,
+ bool UnnamedAddr = false);
+
+ /// SetCommonAttributes - Set attributes which are common to any
+ /// form of a global definition (alias, Objective-C method,
+ /// function, global variable).
+ ///
+ /// NOTE: This should only be called for definitions.
+ void SetCommonAttributes(const Decl *D, llvm::GlobalValue *GV);
+
+ /// SetFunctionDefinitionAttributes - Set attributes for a global definition.
+ void SetFunctionDefinitionAttributes(const FunctionDecl *D,
+ llvm::GlobalValue *GV);
+
+ /// SetFunctionAttributes - Set function attributes for a function
+ /// declaration.
+ void SetFunctionAttributes(GlobalDecl GD,
+ llvm::Function *F,
+ bool IsIncompleteFunction);
+
+ /// EmitGlobal - Emit code for a singal global function or var decl. Forward
+ /// declarations are emitted lazily.
+ void EmitGlobal(GlobalDecl D);
+
+ void EmitGlobalDefinition(GlobalDecl D);
+
+ void EmitGlobalFunctionDefinition(GlobalDecl GD);
+ void EmitGlobalVarDefinition(const VarDecl *D);
+ llvm::Constant *MaybeEmitGlobalStdInitializerListInitializer(const VarDecl *D,
+ const Expr *init);
+ void EmitAliasDefinition(GlobalDecl GD);
+ void EmitObjCPropertyImplementations(const ObjCImplementationDecl *D);
+ void EmitObjCIvarInitializations(ObjCImplementationDecl *D);
+
+ // C++ related functions.
+
+ bool TryEmitDefinitionAsAlias(GlobalDecl Alias, GlobalDecl Target);
+ bool TryEmitBaseDestructorAsAlias(const CXXDestructorDecl *D);
+
+ void EmitNamespace(const NamespaceDecl *D);
+ void EmitLinkageSpec(const LinkageSpecDecl *D);
+
+ /// EmitCXXConstructors - Emit constructors (base, complete) from a
+ /// C++ constructor Decl.
+ void EmitCXXConstructors(const CXXConstructorDecl *D);
+
+ /// EmitCXXConstructor - Emit a single constructor with the given type from
+ /// a C++ constructor Decl.
+ void EmitCXXConstructor(const CXXConstructorDecl *D, CXXCtorType Type);
+
+ /// EmitCXXDestructors - Emit destructors (base, complete) from a
+ /// C++ destructor Decl.
+ void EmitCXXDestructors(const CXXDestructorDecl *D);
+
+ /// EmitCXXDestructor - Emit a single destructor with the given type from
+ /// a C++ destructor Decl.
+ void EmitCXXDestructor(const CXXDestructorDecl *D, CXXDtorType Type);
+
+ /// EmitCXXGlobalInitFunc - Emit the function that initializes C++ globals.
+ void EmitCXXGlobalInitFunc();
+
+ /// EmitCXXGlobalDtorFunc - Emit the function that destroys C++ globals.
+ void EmitCXXGlobalDtorFunc();
+
+ /// EmitCXXGlobalVarDeclInitFunc - Emit the function that initializes the
+ /// specified global (if PerformInit is true) and registers its destructor.
+ void EmitCXXGlobalVarDeclInitFunc(const VarDecl *D,
+ llvm::GlobalVariable *Addr,
+ bool PerformInit);
+
+ // FIXME: Hardcoding priority here is gross.
+ void AddGlobalCtor(llvm::Function *Ctor, int Priority=65535);
+ void AddGlobalDtor(llvm::Function *Dtor, int Priority=65535);
+
+ /// EmitCtorList - Generates a global array of functions and priorities using
+ /// the given list and name. This array will have appending linkage and is
+ /// suitable for use as a LLVM constructor or destructor array.
+ void EmitCtorList(const CtorList &Fns, const char *GlobalName);
+
+ /// EmitFundamentalRTTIDescriptor - Emit the RTTI descriptors for the
+ /// given type.
+ void EmitFundamentalRTTIDescriptor(QualType Type);
+
+ /// EmitFundamentalRTTIDescriptors - Emit the RTTI descriptors for the
+ /// builtin types.
+ void EmitFundamentalRTTIDescriptors();
+
+ /// EmitDeferred - Emit any needed decls for which code generation
+ /// was deferred.
+ void EmitDeferred(void);
+
+ /// EmitLLVMUsed - Emit the llvm.used metadata used to force
+ /// references to global which may otherwise be optimized out.
+ void EmitLLVMUsed(void);
+
+ void EmitDeclMetadata();
+
+ /// EmitCoverageFile - Emit the llvm.gcov metadata used to tell LLVM where
+ /// to emit the .gcno and .gcda files in a way that persists in .bc files.
+ void EmitCoverageFile();
+
+ /// MayDeferGeneration - Determine if the given decl can be emitted
+ /// lazily; this is only relevant for definitions. The given decl
+ /// must be either a function or var decl.
+ bool MayDeferGeneration(const ValueDecl *D);
+
+ /// SimplifyPersonality - Check whether we can use a "simpler", more
+ /// core exceptions personality function.
+ void SimplifyPersonality();
+};
+} // end namespace CodeGen
+} // end namespace clang
+
+#endif
diff --git a/clang/lib/CodeGen/CodeGenTBAA.cpp b/clang/lib/CodeGen/CodeGenTBAA.cpp
new file mode 100644
index 0000000..a3cadcf
--- /dev/null
+++ b/clang/lib/CodeGen/CodeGenTBAA.cpp
@@ -0,0 +1,163 @@
+//===--- CodeGenTypes.cpp - TBAA information for LLVM CodeGen -------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This is the code that manages TBAA information and defines the TBAA policy
+// for the optimizer to use. Relevant standards text includes:
+//
+// C99 6.5p7
+// C++ [basic.lval] (p10 in n3126, p15 in some earlier versions)
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenTBAA.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Mangle.h"
+#include "llvm/LLVMContext.h"
+#include "llvm/Metadata.h"
+#include "llvm/Constants.h"
+#include "llvm/Type.h"
+using namespace clang;
+using namespace CodeGen;
+
+CodeGenTBAA::CodeGenTBAA(ASTContext &Ctx, llvm::LLVMContext& VMContext,
+ const LangOptions &Features, MangleContext &MContext)
+ : Context(Ctx), VMContext(VMContext), Features(Features), MContext(MContext),
+ MDHelper(VMContext), Root(0), Char(0) {
+}
+
+CodeGenTBAA::~CodeGenTBAA() {
+}
+
+llvm::MDNode *CodeGenTBAA::getRoot() {
+ // Define the root of the tree. This identifies the tree, so that
+ // if our LLVM IR is linked with LLVM IR from a different front-end
+ // (or a different version of this front-end), their TBAA trees will
+ // remain distinct, and the optimizer will treat them conservatively.
+ if (!Root)
+ Root = MDHelper.createTBAARoot("Simple C/C++ TBAA");
+
+ return Root;
+}
+
+llvm::MDNode *CodeGenTBAA::getChar() {
+ // Define the root of the tree for user-accessible memory. C and C++
+ // give special powers to char and certain similar types. However,
+ // these special powers only cover user-accessible memory, and doesn't
+ // include things like vtables.
+ if (!Char)
+ Char = MDHelper.createTBAANode("omnipotent char", getRoot());
+
+ return Char;
+}
+
+static bool TypeHasMayAlias(QualType QTy) {
+ // Tagged types have declarations, and therefore may have attributes.
+ if (const TagType *TTy = dyn_cast<TagType>(QTy))
+ return TTy->getDecl()->hasAttr<MayAliasAttr>();
+
+ // Typedef types have declarations, and therefore may have attributes.
+ if (const TypedefType *TTy = dyn_cast<TypedefType>(QTy)) {
+ if (TTy->getDecl()->hasAttr<MayAliasAttr>())
+ return true;
+ // Also, their underlying types may have relevant attributes.
+ return TypeHasMayAlias(TTy->desugar());
+ }
+
+ return false;
+}
+
+llvm::MDNode *
+CodeGenTBAA::getTBAAInfo(QualType QTy) {
+ // If the type has the may_alias attribute (even on a typedef), it is
+ // effectively in the general char alias class.
+ if (TypeHasMayAlias(QTy))
+ return getChar();
+
+ const Type *Ty = Context.getCanonicalType(QTy).getTypePtr();
+
+ if (llvm::MDNode *N = MetadataCache[Ty])
+ return N;
+
+ // Handle builtin types.
+ if (const BuiltinType *BTy = dyn_cast<BuiltinType>(Ty)) {
+ switch (BTy->getKind()) {
+ // Character types are special and can alias anything.
+ // In C++, this technically only includes "char" and "unsigned char",
+ // and not "signed char". In C, it includes all three. For now,
+ // the risk of exploiting this detail in C++ seems likely to outweigh
+ // the benefit.
+ case BuiltinType::Char_U:
+ case BuiltinType::Char_S:
+ case BuiltinType::UChar:
+ case BuiltinType::SChar:
+ return getChar();
+
+ // Unsigned types can alias their corresponding signed types.
+ case BuiltinType::UShort:
+ return getTBAAInfo(Context.ShortTy);
+ case BuiltinType::UInt:
+ return getTBAAInfo(Context.IntTy);
+ case BuiltinType::ULong:
+ return getTBAAInfo(Context.LongTy);
+ case BuiltinType::ULongLong:
+ return getTBAAInfo(Context.LongLongTy);
+ case BuiltinType::UInt128:
+ return getTBAAInfo(Context.Int128Ty);
+
+ // Treat all other builtin types as distinct types. This includes
+ // treating wchar_t, char16_t, and char32_t as distinct from their
+ // "underlying types".
+ default:
+ return MetadataCache[Ty] =
+ MDHelper.createTBAANode(BTy->getName(Features), getChar());
+ }
+ }
+
+ // Handle pointers.
+ // TODO: Implement C++'s type "similarity" and consider dis-"similar"
+ // pointers distinct.
+ if (Ty->isPointerType())
+ return MetadataCache[Ty] = MDHelper.createTBAANode("any pointer",
+ getChar());
+
+ // Enum types are distinct types. In C++ they have "underlying types",
+ // however they aren't related for TBAA.
+ if (const EnumType *ETy = dyn_cast<EnumType>(Ty)) {
+ // In C mode, two anonymous enums are compatible iff their members
+ // are the same -- see C99 6.2.7p1. For now, be conservative. We could
+ // theoretically implement this by combining information about all the
+ // members into a single identifying MDNode.
+ if (!Features.CPlusPlus &&
+ ETy->getDecl()->getTypedefNameForAnonDecl())
+ return MetadataCache[Ty] = getChar();
+
+ // In C++ mode, types have linkage, so we can rely on the ODR and
+ // on their mangled names, if they're external.
+ // TODO: Is there a way to get a program-wide unique name for a
+ // decl with local linkage or no linkage?
+ if (Features.CPlusPlus &&
+ ETy->getDecl()->getLinkage() != ExternalLinkage)
+ return MetadataCache[Ty] = getChar();
+
+ // TODO: This is using the RTTI name. Is there a better way to get
+ // a unique string for a type?
+ SmallString<256> OutName;
+ llvm::raw_svector_ostream Out(OutName);
+ MContext.mangleCXXRTTIName(QualType(ETy, 0), Out);
+ Out.flush();
+ return MetadataCache[Ty] = MDHelper.createTBAANode(OutName, getChar());
+ }
+
+ // For now, handle any other kind of type conservatively.
+ return MetadataCache[Ty] = getChar();
+}
+
+llvm::MDNode *CodeGenTBAA::getTBAAInfoForVTablePtr() {
+ return MDHelper.createTBAANode("vtable pointer", getRoot());
+}
diff --git a/clang/lib/CodeGen/CodeGenTBAA.h b/clang/lib/CodeGen/CodeGenTBAA.h
new file mode 100644
index 0000000..4a97852
--- /dev/null
+++ b/clang/lib/CodeGen/CodeGenTBAA.h
@@ -0,0 +1,80 @@
+//===--- CodeGenTBAA.h - TBAA information for LLVM CodeGen ------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This is the code that manages TBAA information and defines the TBAA policy
+// for the optimizer to use.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_CODEGEN_CODEGENTBAA_H
+#define CLANG_CODEGEN_CODEGENTBAA_H
+
+#include "clang/Basic/LLVM.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/Support/MDBuilder.h"
+
+namespace llvm {
+ class LLVMContext;
+ class MDNode;
+}
+
+namespace clang {
+ class ASTContext;
+ class LangOptions;
+ class MangleContext;
+ class QualType;
+ class Type;
+
+namespace CodeGen {
+ class CGRecordLayout;
+
+/// CodeGenTBAA - This class organizes the cross-module state that is used
+/// while lowering AST types to LLVM types.
+class CodeGenTBAA {
+ ASTContext &Context;
+ llvm::LLVMContext& VMContext;
+ const LangOptions &Features;
+ MangleContext &MContext;
+
+ // MDHelper - Helper for creating metadata.
+ llvm::MDBuilder MDHelper;
+
+ /// MetadataCache - This maps clang::Types to llvm::MDNodes describing them.
+ llvm::DenseMap<const Type *, llvm::MDNode *> MetadataCache;
+
+ llvm::MDNode *Root;
+ llvm::MDNode *Char;
+
+ /// getRoot - This is the mdnode for the root of the metadata type graph
+ /// for this translation unit.
+ llvm::MDNode *getRoot();
+
+ /// getChar - This is the mdnode for "char", which is special, and any types
+ /// considered to be equivalent to it.
+ llvm::MDNode *getChar();
+
+public:
+ CodeGenTBAA(ASTContext &Ctx, llvm::LLVMContext &VMContext,
+ const LangOptions &Features,
+ MangleContext &MContext);
+ ~CodeGenTBAA();
+
+ /// getTBAAInfo - Get the TBAA MDNode to be used for a dereference
+ /// of the given type.
+ llvm::MDNode *getTBAAInfo(QualType QTy);
+
+ /// getTBAAInfoForVTablePtr - Get the TBAA MDNode to be used for a
+ /// dereference of a vtable pointer.
+ llvm::MDNode *getTBAAInfoForVTablePtr();
+};
+
+} // end namespace CodeGen
+} // end namespace clang
+
+#endif
diff --git a/clang/lib/CodeGen/CodeGenTypes.cpp b/clang/lib/CodeGen/CodeGenTypes.cpp
new file mode 100644
index 0000000..41fd536
--- /dev/null
+++ b/clang/lib/CodeGen/CodeGenTypes.cpp
@@ -0,0 +1,676 @@
+//===--- CodeGenTypes.cpp - Type translation for LLVM CodeGen -------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This is the code that handles AST -> LLVM type lowering.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenTypes.h"
+#include "CGCall.h"
+#include "CGCXXABI.h"
+#include "CGRecordLayout.h"
+#include "TargetInfo.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/RecordLayout.h"
+#include "llvm/DerivedTypes.h"
+#include "llvm/Module.h"
+#include "llvm/Target/TargetData.h"
+using namespace clang;
+using namespace CodeGen;
+
+CodeGenTypes::CodeGenTypes(CodeGenModule &CGM)
+ : Context(CGM.getContext()), Target(Context.getTargetInfo()),
+ TheModule(CGM.getModule()), TheTargetData(CGM.getTargetData()),
+ TheABIInfo(CGM.getTargetCodeGenInfo().getABIInfo()),
+ TheCXXABI(CGM.getCXXABI()),
+ CodeGenOpts(CGM.getCodeGenOpts()), CGM(CGM) {
+ SkippedLayout = false;
+}
+
+CodeGenTypes::~CodeGenTypes() {
+ for (llvm::DenseMap<const Type *, CGRecordLayout *>::iterator
+ I = CGRecordLayouts.begin(), E = CGRecordLayouts.end();
+ I != E; ++I)
+ delete I->second;
+
+ for (llvm::FoldingSet<CGFunctionInfo>::iterator
+ I = FunctionInfos.begin(), E = FunctionInfos.end(); I != E; )
+ delete &*I++;
+}
+
+void CodeGenTypes::addRecordTypeName(const RecordDecl *RD,
+ llvm::StructType *Ty,
+ StringRef suffix) {
+ SmallString<256> TypeName;
+ llvm::raw_svector_ostream OS(TypeName);
+ OS << RD->getKindName() << '.';
+
+ // Name the codegen type after the typedef name
+ // if there is no tag type name available
+ if (RD->getIdentifier()) {
+ // FIXME: We should not have to check for a null decl context here.
+ // Right now we do it because the implicit Obj-C decls don't have one.
+ if (RD->getDeclContext())
+ OS << RD->getQualifiedNameAsString();
+ else
+ RD->printName(OS);
+ } else if (const TypedefNameDecl *TDD = RD->getTypedefNameForAnonDecl()) {
+ // FIXME: We should not have to check for a null decl context here.
+ // Right now we do it because the implicit Obj-C decls don't have one.
+ if (TDD->getDeclContext())
+ OS << TDD->getQualifiedNameAsString();
+ else
+ TDD->printName(OS);
+ } else
+ OS << "anon";
+
+ if (!suffix.empty())
+ OS << suffix;
+
+ Ty->setName(OS.str());
+}
+
+/// ConvertTypeForMem - Convert type T into a llvm::Type. This differs from
+/// ConvertType in that it is used to convert to the memory representation for
+/// a type. For example, the scalar representation for _Bool is i1, but the
+/// memory representation is usually i8 or i32, depending on the target.
+llvm::Type *CodeGenTypes::ConvertTypeForMem(QualType T){
+ llvm::Type *R = ConvertType(T);
+
+ // If this is a non-bool type, don't map it.
+ if (!R->isIntegerTy(1))
+ return R;
+
+ // Otherwise, return an integer of the target-specified size.
+ return llvm::IntegerType::get(getLLVMContext(),
+ (unsigned)Context.getTypeSize(T));
+}
+
+
+/// isRecordLayoutComplete - Return true if the specified type is already
+/// completely laid out.
+bool CodeGenTypes::isRecordLayoutComplete(const Type *Ty) const {
+ llvm::DenseMap<const Type*, llvm::StructType *>::const_iterator I =
+ RecordDeclTypes.find(Ty);
+ return I != RecordDeclTypes.end() && !I->second->isOpaque();
+}
+
+static bool
+isSafeToConvert(QualType T, CodeGenTypes &CGT,
+ llvm::SmallPtrSet<const RecordDecl*, 16> &AlreadyChecked);
+
+
+/// isSafeToConvert - Return true if it is safe to convert the specified record
+/// decl to IR and lay it out, false if doing so would cause us to get into a
+/// recursive compilation mess.
+static bool
+isSafeToConvert(const RecordDecl *RD, CodeGenTypes &CGT,
+ llvm::SmallPtrSet<const RecordDecl*, 16> &AlreadyChecked) {
+ // If we have already checked this type (maybe the same type is used by-value
+ // multiple times in multiple structure fields, don't check again.
+ if (!AlreadyChecked.insert(RD)) return true;
+
+ const Type *Key = CGT.getContext().getTagDeclType(RD).getTypePtr();
+
+ // If this type is already laid out, converting it is a noop.
+ if (CGT.isRecordLayoutComplete(Key)) return true;
+
+ // If this type is currently being laid out, we can't recursively compile it.
+ if (CGT.isRecordBeingLaidOut(Key))
+ return false;
+
+ // If this type would require laying out bases that are currently being laid
+ // out, don't do it. This includes virtual base classes which get laid out
+ // when a class is translated, even though they aren't embedded by-value into
+ // the class.
+ if (const CXXRecordDecl *CRD = dyn_cast<CXXRecordDecl>(RD)) {
+ for (CXXRecordDecl::base_class_const_iterator I = CRD->bases_begin(),
+ E = CRD->bases_end(); I != E; ++I)
+ if (!isSafeToConvert(I->getType()->getAs<RecordType>()->getDecl(),
+ CGT, AlreadyChecked))
+ return false;
+ }
+
+ // If this type would require laying out members that are currently being laid
+ // out, don't do it.
+ for (RecordDecl::field_iterator I = RD->field_begin(),
+ E = RD->field_end(); I != E; ++I)
+ if (!isSafeToConvert(I->getType(), CGT, AlreadyChecked))
+ return false;
+
+ // If there are no problems, lets do it.
+ return true;
+}
+
+/// isSafeToConvert - Return true if it is safe to convert this field type,
+/// which requires the structure elements contained by-value to all be
+/// recursively safe to convert.
+static bool
+isSafeToConvert(QualType T, CodeGenTypes &CGT,
+ llvm::SmallPtrSet<const RecordDecl*, 16> &AlreadyChecked) {
+ T = T.getCanonicalType();
+
+ // If this is a record, check it.
+ if (const RecordType *RT = dyn_cast<RecordType>(T))
+ return isSafeToConvert(RT->getDecl(), CGT, AlreadyChecked);
+
+ // If this is an array, check the elements, which are embedded inline.
+ if (const ArrayType *AT = dyn_cast<ArrayType>(T))
+ return isSafeToConvert(AT->getElementType(), CGT, AlreadyChecked);
+
+ // Otherwise, there is no concern about transforming this. We only care about
+ // things that are contained by-value in a structure that can have another
+ // structure as a member.
+ return true;
+}
+
+
+/// isSafeToConvert - Return true if it is safe to convert the specified record
+/// decl to IR and lay it out, false if doing so would cause us to get into a
+/// recursive compilation mess.
+static bool isSafeToConvert(const RecordDecl *RD, CodeGenTypes &CGT) {
+ // If no structs are being laid out, we can certainly do this one.
+ if (CGT.noRecordsBeingLaidOut()) return true;
+
+ llvm::SmallPtrSet<const RecordDecl*, 16> AlreadyChecked;
+ return isSafeToConvert(RD, CGT, AlreadyChecked);
+}
+
+
+/// isFuncTypeArgumentConvertible - Return true if the specified type in a
+/// function argument or result position can be converted to an IR type at this
+/// point. This boils down to being whether it is complete, as well as whether
+/// we've temporarily deferred expanding the type because we're in a recursive
+/// context.
+bool CodeGenTypes::isFuncTypeArgumentConvertible(QualType Ty) {
+ // If this isn't a tagged type, we can convert it!
+ const TagType *TT = Ty->getAs<TagType>();
+ if (TT == 0) return true;
+
+ // Incomplete types cannot be converted.
+ if (TT->isIncompleteType())
+ return false;
+
+ // If this is an enum, then it is always safe to convert.
+ const RecordType *RT = dyn_cast<RecordType>(TT);
+ if (RT == 0) return true;
+
+ // Otherwise, we have to be careful. If it is a struct that we're in the
+ // process of expanding, then we can't convert the function type. That's ok
+ // though because we must be in a pointer context under the struct, so we can
+ // just convert it to a dummy type.
+ //
+ // We decide this by checking whether ConvertRecordDeclType returns us an
+ // opaque type for a struct that we know is defined.
+ return isSafeToConvert(RT->getDecl(), *this);
+}
+
+
+/// Code to verify a given function type is complete, i.e. the return type
+/// and all of the argument types are complete. Also check to see if we are in
+/// a RS_StructPointer context, and if so whether any struct types have been
+/// pended. If so, we don't want to ask the ABI lowering code to handle a type
+/// that cannot be converted to an IR type.
+bool CodeGenTypes::isFuncTypeConvertible(const FunctionType *FT) {
+ if (!isFuncTypeArgumentConvertible(FT->getResultType()))
+ return false;
+
+ if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(FT))
+ for (unsigned i = 0, e = FPT->getNumArgs(); i != e; i++)
+ if (!isFuncTypeArgumentConvertible(FPT->getArgType(i)))
+ return false;
+
+ return true;
+}
+
+/// UpdateCompletedType - When we find the full definition for a TagDecl,
+/// replace the 'opaque' type we previously made for it if applicable.
+void CodeGenTypes::UpdateCompletedType(const TagDecl *TD) {
+ // If this is an enum being completed, then we flush all non-struct types from
+ // the cache. This allows function types and other things that may be derived
+ // from the enum to be recomputed.
+ if (const EnumDecl *ED = dyn_cast<EnumDecl>(TD)) {
+ // Only flush the cache if we've actually already converted this type.
+ if (TypeCache.count(ED->getTypeForDecl())) {
+ // Okay, we formed some types based on this. We speculated that the enum
+ // would be lowered to i32, so we only need to flush the cache if this
+ // didn't happen.
+ if (!ConvertType(ED->getIntegerType())->isIntegerTy(32))
+ TypeCache.clear();
+ }
+ return;
+ }
+
+ // If we completed a RecordDecl that we previously used and converted to an
+ // anonymous type, then go ahead and complete it now.
+ const RecordDecl *RD = cast<RecordDecl>(TD);
+ if (RD->isDependentType()) return;
+
+ // Only complete it if we converted it already. If we haven't converted it
+ // yet, we'll just do it lazily.
+ if (RecordDeclTypes.count(Context.getTagDeclType(RD).getTypePtr()))
+ ConvertRecordDeclType(RD);
+}
+
+static llvm::Type *getTypeForFormat(llvm::LLVMContext &VMContext,
+ const llvm::fltSemantics &format) {
+ if (&format == &llvm::APFloat::IEEEhalf)
+ return llvm::Type::getInt16Ty(VMContext);
+ if (&format == &llvm::APFloat::IEEEsingle)
+ return llvm::Type::getFloatTy(VMContext);
+ if (&format == &llvm::APFloat::IEEEdouble)
+ return llvm::Type::getDoubleTy(VMContext);
+ if (&format == &llvm::APFloat::IEEEquad)
+ return llvm::Type::getFP128Ty(VMContext);
+ if (&format == &llvm::APFloat::PPCDoubleDouble)
+ return llvm::Type::getPPC_FP128Ty(VMContext);
+ if (&format == &llvm::APFloat::x87DoubleExtended)
+ return llvm::Type::getX86_FP80Ty(VMContext);
+ llvm_unreachable("Unknown float format!");
+}
+
+/// ConvertType - Convert the specified type to its LLVM form.
+llvm::Type *CodeGenTypes::ConvertType(QualType T) {
+ T = Context.getCanonicalType(T);
+
+ const Type *Ty = T.getTypePtr();
+
+ // RecordTypes are cached and processed specially.
+ if (const RecordType *RT = dyn_cast<RecordType>(Ty))
+ return ConvertRecordDeclType(RT->getDecl());
+
+ // See if type is already cached.
+ llvm::DenseMap<const Type *, llvm::Type *>::iterator TCI = TypeCache.find(Ty);
+ // If type is found in map then use it. Otherwise, convert type T.
+ if (TCI != TypeCache.end())
+ return TCI->second;
+
+ // If we don't have it in the cache, convert it now.
+ llvm::Type *ResultType = 0;
+ switch (Ty->getTypeClass()) {
+ case Type::Record: // Handled above.
+#define TYPE(Class, Base)
+#define ABSTRACT_TYPE(Class, Base)
+#define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
+#define DEPENDENT_TYPE(Class, Base) case Type::Class:
+#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
+#include "clang/AST/TypeNodes.def"
+ llvm_unreachable("Non-canonical or dependent types aren't possible.");
+
+ case Type::Builtin: {
+ switch (cast<BuiltinType>(Ty)->getKind()) {
+ case BuiltinType::Void:
+ case BuiltinType::ObjCId:
+ case BuiltinType::ObjCClass:
+ case BuiltinType::ObjCSel:
+ // LLVM void type can only be used as the result of a function call. Just
+ // map to the same as char.
+ ResultType = llvm::Type::getInt8Ty(getLLVMContext());
+ break;
+
+ case BuiltinType::Bool:
+ // Note that we always return bool as i1 for use as a scalar type.
+ ResultType = llvm::Type::getInt1Ty(getLLVMContext());
+ break;
+
+ case BuiltinType::Char_S:
+ case BuiltinType::Char_U:
+ case BuiltinType::SChar:
+ case BuiltinType::UChar:
+ case BuiltinType::Short:
+ case BuiltinType::UShort:
+ case BuiltinType::Int:
+ case BuiltinType::UInt:
+ case BuiltinType::Long:
+ case BuiltinType::ULong:
+ case BuiltinType::LongLong:
+ case BuiltinType::ULongLong:
+ case BuiltinType::WChar_S:
+ case BuiltinType::WChar_U:
+ case BuiltinType::Char16:
+ case BuiltinType::Char32:
+ ResultType = llvm::IntegerType::get(getLLVMContext(),
+ static_cast<unsigned>(Context.getTypeSize(T)));
+ break;
+
+ case BuiltinType::Half:
+ // Half is special: it might be lowered to i16 (and will be storage-only
+ // type),. or can be represented as a set of native operations.
+
+ // FIXME: Ask target which kind of half FP it prefers (storage only vs
+ // native).
+ ResultType = llvm::Type::getInt16Ty(getLLVMContext());
+ break;
+ case BuiltinType::Float:
+ case BuiltinType::Double:
+ case BuiltinType::LongDouble:
+ ResultType = getTypeForFormat(getLLVMContext(),
+ Context.getFloatTypeSemantics(T));
+ break;
+
+ case BuiltinType::NullPtr:
+ // Model std::nullptr_t as i8*
+ ResultType = llvm::Type::getInt8PtrTy(getLLVMContext());
+ break;
+
+ case BuiltinType::UInt128:
+ case BuiltinType::Int128:
+ ResultType = llvm::IntegerType::get(getLLVMContext(), 128);
+ break;
+
+ case BuiltinType::Dependent:
+#define BUILTIN_TYPE(Id, SingletonId)
+#define PLACEHOLDER_TYPE(Id, SingletonId) \
+ case BuiltinType::Id:
+#include "clang/AST/BuiltinTypes.def"
+ llvm_unreachable("Unexpected placeholder builtin type!");
+ }
+ break;
+ }
+ case Type::Complex: {
+ llvm::Type *EltTy = ConvertType(cast<ComplexType>(Ty)->getElementType());
+ ResultType = llvm::StructType::get(EltTy, EltTy, NULL);
+ break;
+ }
+ case Type::LValueReference:
+ case Type::RValueReference: {
+ const ReferenceType *RTy = cast<ReferenceType>(Ty);
+ QualType ETy = RTy->getPointeeType();
+ llvm::Type *PointeeType = ConvertTypeForMem(ETy);
+ unsigned AS = Context.getTargetAddressSpace(ETy);
+ ResultType = llvm::PointerType::get(PointeeType, AS);
+ break;
+ }
+ case Type::Pointer: {
+ const PointerType *PTy = cast<PointerType>(Ty);
+ QualType ETy = PTy->getPointeeType();
+ llvm::Type *PointeeType = ConvertTypeForMem(ETy);
+ if (PointeeType->isVoidTy())
+ PointeeType = llvm::Type::getInt8Ty(getLLVMContext());
+ unsigned AS = Context.getTargetAddressSpace(ETy);
+ ResultType = llvm::PointerType::get(PointeeType, AS);
+ break;
+ }
+
+ case Type::VariableArray: {
+ const VariableArrayType *A = cast<VariableArrayType>(Ty);
+ assert(A->getIndexTypeCVRQualifiers() == 0 &&
+ "FIXME: We only handle trivial array types so far!");
+ // VLAs resolve to the innermost element type; this matches
+ // the return of alloca, and there isn't any obviously better choice.
+ ResultType = ConvertTypeForMem(A->getElementType());
+ break;
+ }
+ case Type::IncompleteArray: {
+ const IncompleteArrayType *A = cast<IncompleteArrayType>(Ty);
+ assert(A->getIndexTypeCVRQualifiers() == 0 &&
+ "FIXME: We only handle trivial array types so far!");
+ // int X[] -> [0 x int], unless the element type is not sized. If it is
+ // unsized (e.g. an incomplete struct) just use [0 x i8].
+ ResultType = ConvertTypeForMem(A->getElementType());
+ if (!ResultType->isSized()) {
+ SkippedLayout = true;
+ ResultType = llvm::Type::getInt8Ty(getLLVMContext());
+ }
+ ResultType = llvm::ArrayType::get(ResultType, 0);
+ break;
+ }
+ case Type::ConstantArray: {
+ const ConstantArrayType *A = cast<ConstantArrayType>(Ty);
+ llvm::Type *EltTy = ConvertTypeForMem(A->getElementType());
+
+ // Lower arrays of undefined struct type to arrays of i8 just to have a
+ // concrete type.
+ if (!EltTy->isSized()) {
+ SkippedLayout = true;
+ EltTy = llvm::Type::getInt8Ty(getLLVMContext());
+ }
+
+ ResultType = llvm::ArrayType::get(EltTy, A->getSize().getZExtValue());
+ break;
+ }
+ case Type::ExtVector:
+ case Type::Vector: {
+ const VectorType *VT = cast<VectorType>(Ty);
+ ResultType = llvm::VectorType::get(ConvertType(VT->getElementType()),
+ VT->getNumElements());
+ break;
+ }
+ case Type::FunctionNoProto:
+ case Type::FunctionProto: {
+ const FunctionType *FT = cast<FunctionType>(Ty);
+ // First, check whether we can build the full function type. If the
+ // function type depends on an incomplete type (e.g. a struct or enum), we
+ // cannot lower the function type.
+ if (!isFuncTypeConvertible(FT)) {
+ // This function's type depends on an incomplete tag type.
+ // Return a placeholder type.
+ ResultType = llvm::StructType::get(getLLVMContext());
+
+ SkippedLayout = true;
+ break;
+ }
+
+ // While we're converting the argument types for a function, we don't want
+ // to recursively convert any pointed-to structs. Converting directly-used
+ // structs is ok though.
+ if (!RecordsBeingLaidOut.insert(Ty)) {
+ ResultType = llvm::StructType::get(getLLVMContext());
+
+ SkippedLayout = true;
+ break;
+ }
+
+ // The function type can be built; call the appropriate routines to
+ // build it.
+ const CGFunctionInfo *FI;
+ if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(FT)) {
+ FI = &arrangeFunctionType(
+ CanQual<FunctionProtoType>::CreateUnsafe(QualType(FPT, 0)));
+ } else {
+ const FunctionNoProtoType *FNPT = cast<FunctionNoProtoType>(FT);
+ FI = &arrangeFunctionType(
+ CanQual<FunctionNoProtoType>::CreateUnsafe(QualType(FNPT, 0)));
+ }
+
+ // If there is something higher level prodding our CGFunctionInfo, then
+ // don't recurse into it again.
+ if (FunctionsBeingProcessed.count(FI)) {
+
+ ResultType = llvm::StructType::get(getLLVMContext());
+ SkippedLayout = true;
+ } else {
+
+ // Otherwise, we're good to go, go ahead and convert it.
+ ResultType = GetFunctionType(*FI);
+ }
+
+ RecordsBeingLaidOut.erase(Ty);
+
+ if (SkippedLayout)
+ TypeCache.clear();
+
+ if (RecordsBeingLaidOut.empty())
+ while (!DeferredRecords.empty())
+ ConvertRecordDeclType(DeferredRecords.pop_back_val());
+ break;
+ }
+
+ case Type::ObjCObject:
+ ResultType = ConvertType(cast<ObjCObjectType>(Ty)->getBaseType());
+ break;
+
+ case Type::ObjCInterface: {
+ // Objective-C interfaces are always opaque (outside of the
+ // runtime, which can do whatever it likes); we never refine
+ // these.
+ llvm::Type *&T = InterfaceTypes[cast<ObjCInterfaceType>(Ty)];
+ if (!T)
+ T = llvm::StructType::create(getLLVMContext());
+ ResultType = T;
+ break;
+ }
+
+ case Type::ObjCObjectPointer: {
+ // Protocol qualifications do not influence the LLVM type, we just return a
+ // pointer to the underlying interface type. We don't need to worry about
+ // recursive conversion.
+ llvm::Type *T =
+ ConvertTypeForMem(cast<ObjCObjectPointerType>(Ty)->getPointeeType());
+ ResultType = T->getPointerTo();
+ break;
+ }
+
+ case Type::Enum: {
+ const EnumDecl *ED = cast<EnumType>(Ty)->getDecl();
+ if (ED->isCompleteDefinition() || ED->isFixed())
+ return ConvertType(ED->getIntegerType());
+ // Return a placeholder 'i32' type. This can be changed later when the
+ // type is defined (see UpdateCompletedType), but is likely to be the
+ // "right" answer.
+ ResultType = llvm::Type::getInt32Ty(getLLVMContext());
+ break;
+ }
+
+ case Type::BlockPointer: {
+ const QualType FTy = cast<BlockPointerType>(Ty)->getPointeeType();
+ llvm::Type *PointeeType = ConvertTypeForMem(FTy);
+ unsigned AS = Context.getTargetAddressSpace(FTy);
+ ResultType = llvm::PointerType::get(PointeeType, AS);
+ break;
+ }
+
+ case Type::MemberPointer: {
+ ResultType =
+ getCXXABI().ConvertMemberPointerType(cast<MemberPointerType>(Ty));
+ break;
+ }
+
+ case Type::Atomic: {
+ ResultType = ConvertType(cast<AtomicType>(Ty)->getValueType());
+ break;
+ }
+ }
+
+ assert(ResultType && "Didn't convert a type?");
+
+ TypeCache[Ty] = ResultType;
+ return ResultType;
+}
+
+/// ConvertRecordDeclType - Lay out a tagged decl type like struct or union.
+llvm::StructType *CodeGenTypes::ConvertRecordDeclType(const RecordDecl *RD) {
+ // TagDecl's are not necessarily unique, instead use the (clang)
+ // type connected to the decl.
+ const Type *Key = Context.getTagDeclType(RD).getTypePtr();
+
+ llvm::StructType *&Entry = RecordDeclTypes[Key];
+
+ // If we don't have a StructType at all yet, create the forward declaration.
+ if (Entry == 0) {
+ Entry = llvm::StructType::create(getLLVMContext());
+ addRecordTypeName(RD, Entry, "");
+ }
+ llvm::StructType *Ty = Entry;
+
+ // If this is still a forward declaration, or the LLVM type is already
+ // complete, there's nothing more to do.
+ RD = RD->getDefinition();
+ if (RD == 0 || !RD->isCompleteDefinition() || !Ty->isOpaque())
+ return Ty;
+
+ // If converting this type would cause us to infinitely loop, don't do it!
+ if (!isSafeToConvert(RD, *this)) {
+ DeferredRecords.push_back(RD);
+ return Ty;
+ }
+
+ // Okay, this is a definition of a type. Compile the implementation now.
+ bool InsertResult = RecordsBeingLaidOut.insert(Key); (void)InsertResult;
+ assert(InsertResult && "Recursively compiling a struct?");
+
+ // Force conversion of non-virtual base classes recursively.
+ if (const CXXRecordDecl *CRD = dyn_cast<CXXRecordDecl>(RD)) {
+ for (CXXRecordDecl::base_class_const_iterator i = CRD->bases_begin(),
+ e = CRD->bases_end(); i != e; ++i) {
+ if (i->isVirtual()) continue;
+
+ ConvertRecordDeclType(i->getType()->getAs<RecordType>()->getDecl());
+ }
+ }
+
+ // Layout fields.
+ CGRecordLayout *Layout = ComputeRecordLayout(RD, Ty);
+ CGRecordLayouts[Key] = Layout;
+
+ // We're done laying out this struct.
+ bool EraseResult = RecordsBeingLaidOut.erase(Key); (void)EraseResult;
+ assert(EraseResult && "struct not in RecordsBeingLaidOut set?");
+
+ // If this struct blocked a FunctionType conversion, then recompute whatever
+ // was derived from that.
+ // FIXME: This is hugely overconservative.
+ if (SkippedLayout)
+ TypeCache.clear();
+
+ // If we're done converting the outer-most record, then convert any deferred
+ // structs as well.
+ if (RecordsBeingLaidOut.empty())
+ while (!DeferredRecords.empty())
+ ConvertRecordDeclType(DeferredRecords.pop_back_val());
+
+ return Ty;
+}
+
+/// getCGRecordLayout - Return record layout info for the given record decl.
+const CGRecordLayout &
+CodeGenTypes::getCGRecordLayout(const RecordDecl *RD) {
+ const Type *Key = Context.getTagDeclType(RD).getTypePtr();
+
+ const CGRecordLayout *Layout = CGRecordLayouts.lookup(Key);
+ if (!Layout) {
+ // Compute the type information.
+ ConvertRecordDeclType(RD);
+
+ // Now try again.
+ Layout = CGRecordLayouts.lookup(Key);
+ }
+
+ assert(Layout && "Unable to find record layout information for type");
+ return *Layout;
+}
+
+bool CodeGenTypes::isZeroInitializable(QualType T) {
+ // No need to check for member pointers when not compiling C++.
+ if (!Context.getLangOpts().CPlusPlus)
+ return true;
+
+ T = Context.getBaseElementType(T);
+
+ // Records are non-zero-initializable if they contain any
+ // non-zero-initializable subobjects.
+ if (const RecordType *RT = T->getAs<RecordType>()) {
+ const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
+ return isZeroInitializable(RD);
+ }
+
+ // We have to ask the ABI about member pointers.
+ if (const MemberPointerType *MPT = T->getAs<MemberPointerType>())
+ return getCXXABI().isZeroInitializable(MPT);
+
+ // Everything else is okay.
+ return true;
+}
+
+bool CodeGenTypes::isZeroInitializable(const CXXRecordDecl *RD) {
+ return getCGRecordLayout(RD).isZeroInitializable();
+}
diff --git a/clang/lib/CodeGen/CodeGenTypes.h b/clang/lib/CodeGen/CodeGenTypes.h
new file mode 100644
index 0000000..ba2b3ae
--- /dev/null
+++ b/clang/lib/CodeGen/CodeGenTypes.h
@@ -0,0 +1,254 @@
+//===--- CodeGenTypes.h - Type translation for LLVM CodeGen -----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This is the code that handles AST -> LLVM type lowering.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_CODEGEN_CODEGENTYPES_H
+#define CLANG_CODEGEN_CODEGENTYPES_H
+
+#include "CGCall.h"
+#include "clang/AST/GlobalDecl.h"
+#include "llvm/Module.h"
+#include "llvm/ADT/DenseMap.h"
+#include <vector>
+
+namespace llvm {
+ class FunctionType;
+ class Module;
+ class TargetData;
+ class Type;
+ class LLVMContext;
+ class StructType;
+}
+
+namespace clang {
+ class ABIInfo;
+ class ASTContext;
+ template <typename> class CanQual;
+ class CXXConstructorDecl;
+ class CXXDestructorDecl;
+ class CXXMethodDecl;
+ class CodeGenOptions;
+ class FieldDecl;
+ class FunctionProtoType;
+ class ObjCInterfaceDecl;
+ class ObjCIvarDecl;
+ class PointerType;
+ class QualType;
+ class RecordDecl;
+ class TagDecl;
+ class TargetInfo;
+ class Type;
+ typedef CanQual<Type> CanQualType;
+
+namespace CodeGen {
+ class CGCXXABI;
+ class CGRecordLayout;
+ class CodeGenModule;
+ class RequiredArgs;
+
+/// CodeGenTypes - This class organizes the cross-module state that is used
+/// while lowering AST types to LLVM types.
+class CodeGenTypes {
+ // Some of this stuff should probably be left on the CGM.
+ ASTContext &Context;
+ const TargetInfo &Target;
+ llvm::Module &TheModule;
+ const llvm::TargetData &TheTargetData;
+ const ABIInfo &TheABIInfo;
+ CGCXXABI &TheCXXABI;
+ const CodeGenOptions &CodeGenOpts;
+ CodeGenModule &CGM;
+
+ /// The opaque type map for Objective-C interfaces. All direct
+ /// manipulation is done by the runtime interfaces, which are
+ /// responsible for coercing to the appropriate type; these opaque
+ /// types are never refined.
+ llvm::DenseMap<const ObjCInterfaceType*, llvm::Type *> InterfaceTypes;
+
+ /// CGRecordLayouts - This maps llvm struct type with corresponding
+ /// record layout info.
+ llvm::DenseMap<const Type*, CGRecordLayout *> CGRecordLayouts;
+
+ /// RecordDeclTypes - This contains the LLVM IR type for any converted
+ /// RecordDecl.
+ llvm::DenseMap<const Type*, llvm::StructType *> RecordDeclTypes;
+
+ /// FunctionInfos - Hold memoized CGFunctionInfo results.
+ llvm::FoldingSet<CGFunctionInfo> FunctionInfos;
+
+ /// RecordsBeingLaidOut - This set keeps track of records that we're currently
+ /// converting to an IR type. For example, when converting:
+ /// struct A { struct B { int x; } } when processing 'x', the 'A' and 'B'
+ /// types will be in this set.
+ llvm::SmallPtrSet<const Type*, 4> RecordsBeingLaidOut;
+
+ llvm::SmallPtrSet<const CGFunctionInfo*, 4> FunctionsBeingProcessed;
+
+ /// SkippedLayout - True if we didn't layout a function due to a being inside
+ /// a recursive struct conversion, set this to true.
+ bool SkippedLayout;
+
+ SmallVector<const RecordDecl *, 8> DeferredRecords;
+
+private:
+ /// TypeCache - This map keeps cache of llvm::Types
+ /// and maps llvm::Types to corresponding clang::Type.
+ llvm::DenseMap<const Type *, llvm::Type *> TypeCache;
+
+public:
+ CodeGenTypes(CodeGenModule &CGM);
+ ~CodeGenTypes();
+
+ const llvm::TargetData &getTargetData() const { return TheTargetData; }
+ const TargetInfo &getTarget() const { return Target; }
+ ASTContext &getContext() const { return Context; }
+ const ABIInfo &getABIInfo() const { return TheABIInfo; }
+ const CodeGenOptions &getCodeGenOpts() const { return CodeGenOpts; }
+ CGCXXABI &getCXXABI() const { return TheCXXABI; }
+ llvm::LLVMContext &getLLVMContext() { return TheModule.getContext(); }
+
+ /// ConvertType - Convert type T into a llvm::Type.
+ llvm::Type *ConvertType(QualType T);
+
+ /// ConvertTypeForMem - Convert type T into a llvm::Type. This differs from
+ /// ConvertType in that it is used to convert to the memory representation for
+ /// a type. For example, the scalar representation for _Bool is i1, but the
+ /// memory representation is usually i8 or i32, depending on the target.
+ llvm::Type *ConvertTypeForMem(QualType T);
+
+ /// GetFunctionType - Get the LLVM function type for \arg Info.
+ llvm::FunctionType *GetFunctionType(const CGFunctionInfo &Info);
+
+ llvm::FunctionType *GetFunctionType(GlobalDecl GD);
+
+ /// isFuncTypeConvertible - Utility to check whether a function type can
+ /// be converted to an LLVM type (i.e. doesn't depend on an incomplete tag
+ /// type).
+ bool isFuncTypeConvertible(const FunctionType *FT);
+ bool isFuncTypeArgumentConvertible(QualType Ty);
+
+ /// GetFunctionTypeForVTable - Get the LLVM function type for use in a vtable,
+ /// given a CXXMethodDecl. If the method to has an incomplete return type,
+ /// and/or incomplete argument types, this will return the opaque type.
+ llvm::Type *GetFunctionTypeForVTable(GlobalDecl GD);
+
+ const CGRecordLayout &getCGRecordLayout(const RecordDecl*);
+
+ /// UpdateCompletedType - When we find the full definition for a TagDecl,
+ /// replace the 'opaque' type we previously made for it if applicable.
+ void UpdateCompletedType(const TagDecl *TD);
+
+ /// getNullaryFunctionInfo - Get the function info for a void()
+ /// function with standard CC.
+ const CGFunctionInfo &arrangeNullaryFunction();
+
+ // The arrangement methods are split into three families:
+ // - those meant to drive the signature and prologue/epilogue
+ // of a function declaration or definition,
+ // - those meant for the computation of the LLVM type for an abstract
+ // appearance of a function, and
+ // - those meant for performing the IR-generation of a call.
+ // They differ mainly in how they deal with optional (i.e. variadic)
+ // arguments, as well as unprototyped functions.
+ //
+ // Key points:
+ // - The CGFunctionInfo for emitting a specific call site must include
+ // entries for the optional arguments.
+ // - The function type used at the call site must reflect the formal
+ // signature of the declaration being called, or else the call will
+ // go awry.
+ // - For the most part, unprototyped functions are called by casting to
+ // a formal signature inferred from the specific argument types used
+ // at the call-site. However, some targets (e.g. x86-64) screw with
+ // this for compatibility reasons.
+
+ const CGFunctionInfo &arrangeGlobalDeclaration(GlobalDecl GD);
+ const CGFunctionInfo &arrangeFunctionDeclaration(const FunctionDecl *FD);
+ const CGFunctionInfo &arrangeFunctionDeclaration(QualType ResTy,
+ const FunctionArgList &Args,
+ const FunctionType::ExtInfo &Info,
+ bool isVariadic);
+
+ const CGFunctionInfo &arrangeObjCMethodDeclaration(const ObjCMethodDecl *MD);
+ const CGFunctionInfo &arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD,
+ QualType receiverType);
+
+ const CGFunctionInfo &arrangeCXXMethodDeclaration(const CXXMethodDecl *MD);
+ const CGFunctionInfo &arrangeCXXConstructorDeclaration(
+ const CXXConstructorDecl *D,
+ CXXCtorType Type);
+ const CGFunctionInfo &arrangeCXXDestructor(const CXXDestructorDecl *D,
+ CXXDtorType Type);
+
+ const CGFunctionInfo &arrangeFunctionCall(const CallArgList &Args,
+ const FunctionType *Ty);
+ const CGFunctionInfo &arrangeFunctionCall(QualType ResTy,
+ const CallArgList &args,
+ const FunctionType::ExtInfo &info,
+ RequiredArgs required);
+
+ const CGFunctionInfo &arrangeFunctionType(CanQual<FunctionProtoType> Ty);
+ const CGFunctionInfo &arrangeFunctionType(CanQual<FunctionNoProtoType> Ty);
+ const CGFunctionInfo &arrangeCXXMethodType(const CXXRecordDecl *RD,
+ const FunctionProtoType *FTP);
+
+ /// Retrieves the ABI information for the given function signature.
+ /// This is the "core" routine to which all the others defer.
+ ///
+ /// \param argTypes - must all actually be canonical as params
+ const CGFunctionInfo &arrangeFunctionType(CanQualType returnType,
+ ArrayRef<CanQualType> argTypes,
+ const FunctionType::ExtInfo &info,
+ RequiredArgs args);
+
+ /// \brief Compute a new LLVM record layout object for the given record.
+ CGRecordLayout *ComputeRecordLayout(const RecordDecl *D,
+ llvm::StructType *Ty);
+
+ /// addRecordTypeName - Compute a name from the given record decl with an
+ /// optional suffix and name the given LLVM type using it.
+ void addRecordTypeName(const RecordDecl *RD, llvm::StructType *Ty,
+ StringRef suffix);
+
+
+public: // These are internal details of CGT that shouldn't be used externally.
+ /// ConvertRecordDeclType - Lay out a tagged decl type like struct or union.
+ llvm::StructType *ConvertRecordDeclType(const RecordDecl *TD);
+
+ /// GetExpandedTypes - Expand the type \arg Ty into the LLVM
+ /// argument types it would be passed as on the provided vector \arg
+ /// ArgTys. See ABIArgInfo::Expand.
+ void GetExpandedTypes(QualType type,
+ SmallVectorImpl<llvm::Type*> &expanded);
+
+ /// IsZeroInitializable - Return whether a type can be
+ /// zero-initialized (in the C++ sense) with an LLVM zeroinitializer.
+ bool isZeroInitializable(QualType T);
+
+ /// IsZeroInitializable - Return whether a record type can be
+ /// zero-initialized (in the C++ sense) with an LLVM zeroinitializer.
+ bool isZeroInitializable(const CXXRecordDecl *RD);
+
+ bool isRecordLayoutComplete(const Type *Ty) const;
+ bool noRecordsBeingLaidOut() const {
+ return RecordsBeingLaidOut.empty();
+ }
+ bool isRecordBeingLaidOut(const Type *Ty) const {
+ return RecordsBeingLaidOut.count(Ty);
+ }
+
+};
+
+} // end namespace CodeGen
+} // end namespace clang
+
+#endif
diff --git a/clang/lib/CodeGen/ItaniumCXXABI.cpp b/clang/lib/CodeGen/ItaniumCXXABI.cpp
new file mode 100644
index 0000000..98f67f3
--- /dev/null
+++ b/clang/lib/CodeGen/ItaniumCXXABI.cpp
@@ -0,0 +1,1202 @@
+//===------- ItaniumCXXABI.cpp - Emit LLVM Code from ASTs for a Module ----===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This provides C++ code generation targeting the Itanium C++ ABI. The class
+// in this file generates structures that follow the Itanium C++ ABI, which is
+// documented at:
+// http://www.codesourcery.com/public/cxx-abi/abi.html
+// http://www.codesourcery.com/public/cxx-abi/abi-eh.html
+//
+// It also supports the closely-related ARM ABI, documented at:
+// http://infocenter.arm.com/help/topic/com.arm.doc.ihi0041c/IHI0041C_cppabi.pdf
+//
+//===----------------------------------------------------------------------===//
+
+#include "CGCXXABI.h"
+#include "CGRecordLayout.h"
+#include "CodeGenFunction.h"
+#include "CodeGenModule.h"
+#include <clang/AST/Mangle.h>
+#include <clang/AST/Type.h>
+#include <llvm/Intrinsics.h>
+#include <llvm/Target/TargetData.h>
+#include <llvm/Value.h>
+
+using namespace clang;
+using namespace CodeGen;
+
+namespace {
+class ItaniumCXXABI : public CodeGen::CGCXXABI {
+private:
+ llvm::IntegerType *PtrDiffTy;
+protected:
+ bool IsARM;
+
+ // It's a little silly for us to cache this.
+ llvm::IntegerType *getPtrDiffTy() {
+ if (!PtrDiffTy) {
+ QualType T = getContext().getPointerDiffType();
+ llvm::Type *Ty = CGM.getTypes().ConvertType(T);
+ PtrDiffTy = cast<llvm::IntegerType>(Ty);
+ }
+ return PtrDiffTy;
+ }
+
+ bool NeedsArrayCookie(const CXXNewExpr *expr);
+ bool NeedsArrayCookie(const CXXDeleteExpr *expr,
+ QualType elementType);
+
+public:
+ ItaniumCXXABI(CodeGen::CodeGenModule &CGM, bool IsARM = false) :
+ CGCXXABI(CGM), PtrDiffTy(0), IsARM(IsARM) { }
+
+ bool isZeroInitializable(const MemberPointerType *MPT);
+
+ llvm::Type *ConvertMemberPointerType(const MemberPointerType *MPT);
+
+ llvm::Value *EmitLoadOfMemberFunctionPointer(CodeGenFunction &CGF,
+ llvm::Value *&This,
+ llvm::Value *MemFnPtr,
+ const MemberPointerType *MPT);
+
+ llvm::Value *EmitMemberDataPointerAddress(CodeGenFunction &CGF,
+ llvm::Value *Base,
+ llvm::Value *MemPtr,
+ const MemberPointerType *MPT);
+
+ llvm::Value *EmitMemberPointerConversion(CodeGenFunction &CGF,
+ const CastExpr *E,
+ llvm::Value *Src);
+ llvm::Constant *EmitMemberPointerConversion(const CastExpr *E,
+ llvm::Constant *Src);
+
+ llvm::Constant *EmitNullMemberPointer(const MemberPointerType *MPT);
+
+ llvm::Constant *EmitMemberPointer(const CXXMethodDecl *MD);
+ llvm::Constant *EmitMemberDataPointer(const MemberPointerType *MPT,
+ CharUnits offset);
+ llvm::Constant *EmitMemberPointer(const APValue &MP, QualType MPT);
+ llvm::Constant *BuildMemberPointer(const CXXMethodDecl *MD,
+ CharUnits ThisAdjustment);
+
+ llvm::Value *EmitMemberPointerComparison(CodeGenFunction &CGF,
+ llvm::Value *L,
+ llvm::Value *R,
+ const MemberPointerType *MPT,
+ bool Inequality);
+
+ llvm::Value *EmitMemberPointerIsNotNull(CodeGenFunction &CGF,
+ llvm::Value *Addr,
+ const MemberPointerType *MPT);
+
+ void BuildConstructorSignature(const CXXConstructorDecl *Ctor,
+ CXXCtorType T,
+ CanQualType &ResTy,
+ SmallVectorImpl<CanQualType> &ArgTys);
+
+ void BuildDestructorSignature(const CXXDestructorDecl *Dtor,
+ CXXDtorType T,
+ CanQualType &ResTy,
+ SmallVectorImpl<CanQualType> &ArgTys);
+
+ void BuildInstanceFunctionParams(CodeGenFunction &CGF,
+ QualType &ResTy,
+ FunctionArgList &Params);
+
+ void EmitInstanceFunctionProlog(CodeGenFunction &CGF);
+
+ CharUnits GetArrayCookieSize(const CXXNewExpr *expr);
+ llvm::Value *InitializeArrayCookie(CodeGenFunction &CGF,
+ llvm::Value *NewPtr,
+ llvm::Value *NumElements,
+ const CXXNewExpr *expr,
+ QualType ElementType);
+ void ReadArrayCookie(CodeGenFunction &CGF, llvm::Value *Ptr,
+ const CXXDeleteExpr *expr,
+ QualType ElementType, llvm::Value *&NumElements,
+ llvm::Value *&AllocPtr, CharUnits &CookieSize);
+
+ void EmitGuardedInit(CodeGenFunction &CGF, const VarDecl &D,
+ llvm::GlobalVariable *DeclPtr, bool PerformInit);
+};
+
+class ARMCXXABI : public ItaniumCXXABI {
+public:
+ ARMCXXABI(CodeGen::CodeGenModule &CGM) : ItaniumCXXABI(CGM, /*ARM*/ true) {}
+
+ void BuildConstructorSignature(const CXXConstructorDecl *Ctor,
+ CXXCtorType T,
+ CanQualType &ResTy,
+ SmallVectorImpl<CanQualType> &ArgTys);
+
+ void BuildDestructorSignature(const CXXDestructorDecl *Dtor,
+ CXXDtorType T,
+ CanQualType &ResTy,
+ SmallVectorImpl<CanQualType> &ArgTys);
+
+ void BuildInstanceFunctionParams(CodeGenFunction &CGF,
+ QualType &ResTy,
+ FunctionArgList &Params);
+
+ void EmitInstanceFunctionProlog(CodeGenFunction &CGF);
+
+ void EmitReturnFromThunk(CodeGenFunction &CGF, RValue RV, QualType ResTy);
+
+ CharUnits GetArrayCookieSize(const CXXNewExpr *expr);
+ llvm::Value *InitializeArrayCookie(CodeGenFunction &CGF,
+ llvm::Value *NewPtr,
+ llvm::Value *NumElements,
+ const CXXNewExpr *expr,
+ QualType ElementType);
+ void ReadArrayCookie(CodeGenFunction &CGF, llvm::Value *Ptr,
+ const CXXDeleteExpr *expr,
+ QualType ElementType, llvm::Value *&NumElements,
+ llvm::Value *&AllocPtr, CharUnits &CookieSize);
+
+private:
+ /// \brief Returns true if the given instance method is one of the
+ /// kinds that the ARM ABI says returns 'this'.
+ static bool HasThisReturn(GlobalDecl GD) {
+ const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
+ return ((isa<CXXDestructorDecl>(MD) && GD.getDtorType() != Dtor_Deleting) ||
+ (isa<CXXConstructorDecl>(MD)));
+ }
+};
+}
+
+CodeGen::CGCXXABI *CodeGen::CreateItaniumCXXABI(CodeGenModule &CGM) {
+ return new ItaniumCXXABI(CGM);
+}
+
+CodeGen::CGCXXABI *CodeGen::CreateARMCXXABI(CodeGenModule &CGM) {
+ return new ARMCXXABI(CGM);
+}
+
+llvm::Type *
+ItaniumCXXABI::ConvertMemberPointerType(const MemberPointerType *MPT) {
+ if (MPT->isMemberDataPointer())
+ return getPtrDiffTy();
+ return llvm::StructType::get(getPtrDiffTy(), getPtrDiffTy(), NULL);
+}
+
+/// In the Itanium and ARM ABIs, method pointers have the form:
+/// struct { ptrdiff_t ptr; ptrdiff_t adj; } memptr;
+///
+/// In the Itanium ABI:
+/// - method pointers are virtual if (memptr.ptr & 1) is nonzero
+/// - the this-adjustment is (memptr.adj)
+/// - the virtual offset is (memptr.ptr - 1)
+///
+/// In the ARM ABI:
+/// - method pointers are virtual if (memptr.adj & 1) is nonzero
+/// - the this-adjustment is (memptr.adj >> 1)
+/// - the virtual offset is (memptr.ptr)
+/// ARM uses 'adj' for the virtual flag because Thumb functions
+/// may be only single-byte aligned.
+///
+/// If the member is virtual, the adjusted 'this' pointer points
+/// to a vtable pointer from which the virtual offset is applied.
+///
+/// If the member is non-virtual, memptr.ptr is the address of
+/// the function to call.
+llvm::Value *
+ItaniumCXXABI::EmitLoadOfMemberFunctionPointer(CodeGenFunction &CGF,
+ llvm::Value *&This,
+ llvm::Value *MemFnPtr,
+ const MemberPointerType *MPT) {
+ CGBuilderTy &Builder = CGF.Builder;
+
+ const FunctionProtoType *FPT =
+ MPT->getPointeeType()->getAs<FunctionProtoType>();
+ const CXXRecordDecl *RD =
+ cast<CXXRecordDecl>(MPT->getClass()->getAs<RecordType>()->getDecl());
+
+ llvm::FunctionType *FTy =
+ CGM.getTypes().GetFunctionType(
+ CGM.getTypes().arrangeCXXMethodType(RD, FPT));
+
+ llvm::IntegerType *ptrdiff = getPtrDiffTy();
+ llvm::Constant *ptrdiff_1 = llvm::ConstantInt::get(ptrdiff, 1);
+
+ llvm::BasicBlock *FnVirtual = CGF.createBasicBlock("memptr.virtual");
+ llvm::BasicBlock *FnNonVirtual = CGF.createBasicBlock("memptr.nonvirtual");
+ llvm::BasicBlock *FnEnd = CGF.createBasicBlock("memptr.end");
+
+ // Extract memptr.adj, which is in the second field.
+ llvm::Value *RawAdj = Builder.CreateExtractValue(MemFnPtr, 1, "memptr.adj");
+
+ // Compute the true adjustment.
+ llvm::Value *Adj = RawAdj;
+ if (IsARM)
+ Adj = Builder.CreateAShr(Adj, ptrdiff_1, "memptr.adj.shifted");
+
+ // Apply the adjustment and cast back to the original struct type
+ // for consistency.
+ llvm::Value *Ptr = Builder.CreateBitCast(This, Builder.getInt8PtrTy());
+ Ptr = Builder.CreateInBoundsGEP(Ptr, Adj);
+ This = Builder.CreateBitCast(Ptr, This->getType(), "this.adjusted");
+
+ // Load the function pointer.
+ llvm::Value *FnAsInt = Builder.CreateExtractValue(MemFnPtr, 0, "memptr.ptr");
+
+ // If the LSB in the function pointer is 1, the function pointer points to
+ // a virtual function.
+ llvm::Value *IsVirtual;
+ if (IsARM)
+ IsVirtual = Builder.CreateAnd(RawAdj, ptrdiff_1);
+ else
+ IsVirtual = Builder.CreateAnd(FnAsInt, ptrdiff_1);
+ IsVirtual = Builder.CreateIsNotNull(IsVirtual, "memptr.isvirtual");
+ Builder.CreateCondBr(IsVirtual, FnVirtual, FnNonVirtual);
+
+ // In the virtual path, the adjustment left 'This' pointing to the
+ // vtable of the correct base subobject. The "function pointer" is an
+ // offset within the vtable (+1 for the virtual flag on non-ARM).
+ CGF.EmitBlock(FnVirtual);
+
+ // Cast the adjusted this to a pointer to vtable pointer and load.
+ llvm::Type *VTableTy = Builder.getInt8PtrTy();
+ llvm::Value *VTable = Builder.CreateBitCast(This, VTableTy->getPointerTo());
+ VTable = Builder.CreateLoad(VTable, "memptr.vtable");
+
+ // Apply the offset.
+ llvm::Value *VTableOffset = FnAsInt;
+ if (!IsARM) VTableOffset = Builder.CreateSub(VTableOffset, ptrdiff_1);
+ VTable = Builder.CreateGEP(VTable, VTableOffset);
+
+ // Load the virtual function to call.
+ VTable = Builder.CreateBitCast(VTable, FTy->getPointerTo()->getPointerTo());
+ llvm::Value *VirtualFn = Builder.CreateLoad(VTable, "memptr.virtualfn");
+ CGF.EmitBranch(FnEnd);
+
+ // In the non-virtual path, the function pointer is actually a
+ // function pointer.
+ CGF.EmitBlock(FnNonVirtual);
+ llvm::Value *NonVirtualFn =
+ Builder.CreateIntToPtr(FnAsInt, FTy->getPointerTo(), "memptr.nonvirtualfn");
+
+ // We're done.
+ CGF.EmitBlock(FnEnd);
+ llvm::PHINode *Callee = Builder.CreatePHI(FTy->getPointerTo(), 2);
+ Callee->addIncoming(VirtualFn, FnVirtual);
+ Callee->addIncoming(NonVirtualFn, FnNonVirtual);
+ return Callee;
+}
+
+/// Compute an l-value by applying the given pointer-to-member to a
+/// base object.
+llvm::Value *ItaniumCXXABI::EmitMemberDataPointerAddress(CodeGenFunction &CGF,
+ llvm::Value *Base,
+ llvm::Value *MemPtr,
+ const MemberPointerType *MPT) {
+ assert(MemPtr->getType() == getPtrDiffTy());
+
+ CGBuilderTy &Builder = CGF.Builder;
+
+ unsigned AS = cast<llvm::PointerType>(Base->getType())->getAddressSpace();
+
+ // Cast to char*.
+ Base = Builder.CreateBitCast(Base, Builder.getInt8Ty()->getPointerTo(AS));
+
+ // Apply the offset, which we assume is non-null.
+ llvm::Value *Addr = Builder.CreateInBoundsGEP(Base, MemPtr, "memptr.offset");
+
+ // Cast the address to the appropriate pointer type, adopting the
+ // address space of the base pointer.
+ llvm::Type *PType
+ = CGF.ConvertTypeForMem(MPT->getPointeeType())->getPointerTo(AS);
+ return Builder.CreateBitCast(Addr, PType);
+}
+
+/// Perform a bitcast, derived-to-base, or base-to-derived member pointer
+/// conversion.
+///
+/// Bitcast conversions are always a no-op under Itanium.
+///
+/// Obligatory offset/adjustment diagram:
+/// <-- offset --> <-- adjustment -->
+/// |--------------------------|----------------------|--------------------|
+/// ^Derived address point ^Base address point ^Member address point
+///
+/// So when converting a base member pointer to a derived member pointer,
+/// we add the offset to the adjustment because the address point has
+/// decreased; and conversely, when converting a derived MP to a base MP
+/// we subtract the offset from the adjustment because the address point
+/// has increased.
+///
+/// The standard forbids (at compile time) conversion to and from
+/// virtual bases, which is why we don't have to consider them here.
+///
+/// The standard forbids (at run time) casting a derived MP to a base
+/// MP when the derived MP does not point to a member of the base.
+/// This is why -1 is a reasonable choice for null data member
+/// pointers.
+llvm::Value *
+ItaniumCXXABI::EmitMemberPointerConversion(CodeGenFunction &CGF,
+ const CastExpr *E,
+ llvm::Value *src) {
+ assert(E->getCastKind() == CK_DerivedToBaseMemberPointer ||
+ E->getCastKind() == CK_BaseToDerivedMemberPointer ||
+ E->getCastKind() == CK_ReinterpretMemberPointer);
+
+ // Under Itanium, reinterprets don't require any additional processing.
+ if (E->getCastKind() == CK_ReinterpretMemberPointer) return src;
+
+ // Use constant emission if we can.
+ if (isa<llvm::Constant>(src))
+ return EmitMemberPointerConversion(E, cast<llvm::Constant>(src));
+
+ llvm::Constant *adj = getMemberPointerAdjustment(E);
+ if (!adj) return src;
+
+ CGBuilderTy &Builder = CGF.Builder;
+ bool isDerivedToBase = (E->getCastKind() == CK_DerivedToBaseMemberPointer);
+
+ const MemberPointerType *destTy =
+ E->getType()->castAs<MemberPointerType>();
+
+ // For member data pointers, this is just a matter of adding the
+ // offset if the source is non-null.
+ if (destTy->isMemberDataPointer()) {
+ llvm::Value *dst;
+ if (isDerivedToBase)
+ dst = Builder.CreateNSWSub(src, adj, "adj");
+ else
+ dst = Builder.CreateNSWAdd(src, adj, "adj");
+
+ // Null check.
+ llvm::Value *null = llvm::Constant::getAllOnesValue(src->getType());
+ llvm::Value *isNull = Builder.CreateICmpEQ(src, null, "memptr.isnull");
+ return Builder.CreateSelect(isNull, src, dst);
+ }
+
+ // The this-adjustment is left-shifted by 1 on ARM.
+ if (IsARM) {
+ uint64_t offset = cast<llvm::ConstantInt>(adj)->getZExtValue();
+ offset <<= 1;
+ adj = llvm::ConstantInt::get(adj->getType(), offset);
+ }
+
+ llvm::Value *srcAdj = Builder.CreateExtractValue(src, 1, "src.adj");
+ llvm::Value *dstAdj;
+ if (isDerivedToBase)
+ dstAdj = Builder.CreateNSWSub(srcAdj, adj, "adj");
+ else
+ dstAdj = Builder.CreateNSWAdd(srcAdj, adj, "adj");
+
+ return Builder.CreateInsertValue(src, dstAdj, 1);
+}
+
+llvm::Constant *
+ItaniumCXXABI::EmitMemberPointerConversion(const CastExpr *E,
+ llvm::Constant *src) {
+ assert(E->getCastKind() == CK_DerivedToBaseMemberPointer ||
+ E->getCastKind() == CK_BaseToDerivedMemberPointer ||
+ E->getCastKind() == CK_ReinterpretMemberPointer);
+
+ // Under Itanium, reinterprets don't require any additional processing.
+ if (E->getCastKind() == CK_ReinterpretMemberPointer) return src;
+
+ // If the adjustment is trivial, we don't need to do anything.
+ llvm::Constant *adj = getMemberPointerAdjustment(E);
+ if (!adj) return src;
+
+ bool isDerivedToBase = (E->getCastKind() == CK_DerivedToBaseMemberPointer);
+
+ const MemberPointerType *destTy =
+ E->getType()->castAs<MemberPointerType>();
+
+ // For member data pointers, this is just a matter of adding the
+ // offset if the source is non-null.
+ if (destTy->isMemberDataPointer()) {
+ // null maps to null.
+ if (src->isAllOnesValue()) return src;
+
+ if (isDerivedToBase)
+ return llvm::ConstantExpr::getNSWSub(src, adj);
+ else
+ return llvm::ConstantExpr::getNSWAdd(src, adj);
+ }
+
+ // The this-adjustment is left-shifted by 1 on ARM.
+ if (IsARM) {
+ uint64_t offset = cast<llvm::ConstantInt>(adj)->getZExtValue();
+ offset <<= 1;
+ adj = llvm::ConstantInt::get(adj->getType(), offset);
+ }
+
+ llvm::Constant *srcAdj = llvm::ConstantExpr::getExtractValue(src, 1);
+ llvm::Constant *dstAdj;
+ if (isDerivedToBase)
+ dstAdj = llvm::ConstantExpr::getNSWSub(srcAdj, adj);
+ else
+ dstAdj = llvm::ConstantExpr::getNSWAdd(srcAdj, adj);
+
+ return llvm::ConstantExpr::getInsertValue(src, dstAdj, 1);
+}
+
+llvm::Constant *
+ItaniumCXXABI::EmitNullMemberPointer(const MemberPointerType *MPT) {
+ llvm::Type *ptrdiff_t = getPtrDiffTy();
+
+ // Itanium C++ ABI 2.3:
+ // A NULL pointer is represented as -1.
+ if (MPT->isMemberDataPointer())
+ return llvm::ConstantInt::get(ptrdiff_t, -1ULL, /*isSigned=*/true);
+
+ llvm::Constant *Zero = llvm::ConstantInt::get(ptrdiff_t, 0);
+ llvm::Constant *Values[2] = { Zero, Zero };
+ return llvm::ConstantStruct::getAnon(Values);
+}
+
+llvm::Constant *
+ItaniumCXXABI::EmitMemberDataPointer(const MemberPointerType *MPT,
+ CharUnits offset) {
+ // Itanium C++ ABI 2.3:
+ // A pointer to data member is an offset from the base address of
+ // the class object containing it, represented as a ptrdiff_t
+ return llvm::ConstantInt::get(getPtrDiffTy(), offset.getQuantity());
+}
+
+llvm::Constant *ItaniumCXXABI::EmitMemberPointer(const CXXMethodDecl *MD) {
+ return BuildMemberPointer(MD, CharUnits::Zero());
+}
+
+llvm::Constant *ItaniumCXXABI::BuildMemberPointer(const CXXMethodDecl *MD,
+ CharUnits ThisAdjustment) {
+ assert(MD->isInstance() && "Member function must not be static!");
+ MD = MD->getCanonicalDecl();
+
+ CodeGenTypes &Types = CGM.getTypes();
+ llvm::Type *ptrdiff_t = getPtrDiffTy();
+
+ // Get the function pointer (or index if this is a virtual function).
+ llvm::Constant *MemPtr[2];
+ if (MD->isVirtual()) {
+ uint64_t Index = CGM.getVTableContext().getMethodVTableIndex(MD);
+
+ const ASTContext &Context = getContext();
+ CharUnits PointerWidth =
+ Context.toCharUnitsFromBits(Context.getTargetInfo().getPointerWidth(0));
+ uint64_t VTableOffset = (Index * PointerWidth.getQuantity());
+
+ if (IsARM) {
+ // ARM C++ ABI 3.2.1:
+ // This ABI specifies that adj contains twice the this
+ // adjustment, plus 1 if the member function is virtual. The
+ // least significant bit of adj then makes exactly the same
+ // discrimination as the least significant bit of ptr does for
+ // Itanium.
+ MemPtr[0] = llvm::ConstantInt::get(ptrdiff_t, VTableOffset);
+ MemPtr[1] = llvm::ConstantInt::get(ptrdiff_t,
+ 2 * ThisAdjustment.getQuantity() + 1);
+ } else {
+ // Itanium C++ ABI 2.3:
+ // For a virtual function, [the pointer field] is 1 plus the
+ // virtual table offset (in bytes) of the function,
+ // represented as a ptrdiff_t.
+ MemPtr[0] = llvm::ConstantInt::get(ptrdiff_t, VTableOffset + 1);
+ MemPtr[1] = llvm::ConstantInt::get(ptrdiff_t,
+ ThisAdjustment.getQuantity());
+ }
+ } else {
+ const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>();
+ llvm::Type *Ty;
+ // Check whether the function has a computable LLVM signature.
+ if (Types.isFuncTypeConvertible(FPT)) {
+ // The function has a computable LLVM signature; use the correct type.
+ Ty = Types.GetFunctionType(Types.arrangeCXXMethodDeclaration(MD));
+ } else {
+ // Use an arbitrary non-function type to tell GetAddrOfFunction that the
+ // function type is incomplete.
+ Ty = ptrdiff_t;
+ }
+ llvm::Constant *addr = CGM.GetAddrOfFunction(MD, Ty);
+
+ MemPtr[0] = llvm::ConstantExpr::getPtrToInt(addr, ptrdiff_t);
+ MemPtr[1] = llvm::ConstantInt::get(ptrdiff_t, (IsARM ? 2 : 1) *
+ ThisAdjustment.getQuantity());
+ }
+
+ return llvm::ConstantStruct::getAnon(MemPtr);
+}
+
+llvm::Constant *ItaniumCXXABI::EmitMemberPointer(const APValue &MP,
+ QualType MPType) {
+ const MemberPointerType *MPT = MPType->castAs<MemberPointerType>();
+ const ValueDecl *MPD = MP.getMemberPointerDecl();
+ if (!MPD)
+ return EmitNullMemberPointer(MPT);
+
+ // Compute the this-adjustment.
+ CharUnits ThisAdjustment = CharUnits::Zero();
+ ArrayRef<const CXXRecordDecl*> Path = MP.getMemberPointerPath();
+ bool DerivedMember = MP.isMemberPointerToDerivedMember();
+ const CXXRecordDecl *RD = cast<CXXRecordDecl>(MPD->getDeclContext());
+ for (unsigned I = 0, N = Path.size(); I != N; ++I) {
+ const CXXRecordDecl *Base = RD;
+ const CXXRecordDecl *Derived = Path[I];
+ if (DerivedMember)
+ std::swap(Base, Derived);
+ ThisAdjustment +=
+ getContext().getASTRecordLayout(Derived).getBaseClassOffset(Base);
+ RD = Path[I];
+ }
+ if (DerivedMember)
+ ThisAdjustment = -ThisAdjustment;
+
+ if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(MPD))
+ return BuildMemberPointer(MD, ThisAdjustment);
+
+ CharUnits FieldOffset =
+ getContext().toCharUnitsFromBits(getContext().getFieldOffset(MPD));
+ return EmitMemberDataPointer(MPT, ThisAdjustment + FieldOffset);
+}
+
+/// The comparison algorithm is pretty easy: the member pointers are
+/// the same if they're either bitwise identical *or* both null.
+///
+/// ARM is different here only because null-ness is more complicated.
+llvm::Value *
+ItaniumCXXABI::EmitMemberPointerComparison(CodeGenFunction &CGF,
+ llvm::Value *L,
+ llvm::Value *R,
+ const MemberPointerType *MPT,
+ bool Inequality) {
+ CGBuilderTy &Builder = CGF.Builder;
+
+ llvm::ICmpInst::Predicate Eq;
+ llvm::Instruction::BinaryOps And, Or;
+ if (Inequality) {
+ Eq = llvm::ICmpInst::ICMP_NE;
+ And = llvm::Instruction::Or;
+ Or = llvm::Instruction::And;
+ } else {
+ Eq = llvm::ICmpInst::ICMP_EQ;
+ And = llvm::Instruction::And;
+ Or = llvm::Instruction::Or;
+ }
+
+ // Member data pointers are easy because there's a unique null
+ // value, so it just comes down to bitwise equality.
+ if (MPT->isMemberDataPointer())
+ return Builder.CreateICmp(Eq, L, R);
+
+ // For member function pointers, the tautologies are more complex.
+ // The Itanium tautology is:
+ // (L == R) <==> (L.ptr == R.ptr && (L.ptr == 0 || L.adj == R.adj))
+ // The ARM tautology is:
+ // (L == R) <==> (L.ptr == R.ptr &&
+ // (L.adj == R.adj ||
+ // (L.ptr == 0 && ((L.adj|R.adj) & 1) == 0)))
+ // The inequality tautologies have exactly the same structure, except
+ // applying De Morgan's laws.
+
+ llvm::Value *LPtr = Builder.CreateExtractValue(L, 0, "lhs.memptr.ptr");
+ llvm::Value *RPtr = Builder.CreateExtractValue(R, 0, "rhs.memptr.ptr");
+
+ // This condition tests whether L.ptr == R.ptr. This must always be
+ // true for equality to hold.
+ llvm::Value *PtrEq = Builder.CreateICmp(Eq, LPtr, RPtr, "cmp.ptr");
+
+ // This condition, together with the assumption that L.ptr == R.ptr,
+ // tests whether the pointers are both null. ARM imposes an extra
+ // condition.
+ llvm::Value *Zero = llvm::Constant::getNullValue(LPtr->getType());
+ llvm::Value *EqZero = Builder.CreateICmp(Eq, LPtr, Zero, "cmp.ptr.null");
+
+ // This condition tests whether L.adj == R.adj. If this isn't
+ // true, the pointers are unequal unless they're both null.
+ llvm::Value *LAdj = Builder.CreateExtractValue(L, 1, "lhs.memptr.adj");
+ llvm::Value *RAdj = Builder.CreateExtractValue(R, 1, "rhs.memptr.adj");
+ llvm::Value *AdjEq = Builder.CreateICmp(Eq, LAdj, RAdj, "cmp.adj");
+
+ // Null member function pointers on ARM clear the low bit of Adj,
+ // so the zero condition has to check that neither low bit is set.
+ if (IsARM) {
+ llvm::Value *One = llvm::ConstantInt::get(LPtr->getType(), 1);
+
+ // Compute (l.adj | r.adj) & 1 and test it against zero.
+ llvm::Value *OrAdj = Builder.CreateOr(LAdj, RAdj, "or.adj");
+ llvm::Value *OrAdjAnd1 = Builder.CreateAnd(OrAdj, One);
+ llvm::Value *OrAdjAnd1EqZero = Builder.CreateICmp(Eq, OrAdjAnd1, Zero,
+ "cmp.or.adj");
+ EqZero = Builder.CreateBinOp(And, EqZero, OrAdjAnd1EqZero);
+ }
+
+ // Tie together all our conditions.
+ llvm::Value *Result = Builder.CreateBinOp(Or, EqZero, AdjEq);
+ Result = Builder.CreateBinOp(And, PtrEq, Result,
+ Inequality ? "memptr.ne" : "memptr.eq");
+ return Result;
+}
+
+llvm::Value *
+ItaniumCXXABI::EmitMemberPointerIsNotNull(CodeGenFunction &CGF,
+ llvm::Value *MemPtr,
+ const MemberPointerType *MPT) {
+ CGBuilderTy &Builder = CGF.Builder;
+
+ /// For member data pointers, this is just a check against -1.
+ if (MPT->isMemberDataPointer()) {
+ assert(MemPtr->getType() == getPtrDiffTy());
+ llvm::Value *NegativeOne =
+ llvm::Constant::getAllOnesValue(MemPtr->getType());
+ return Builder.CreateICmpNE(MemPtr, NegativeOne, "memptr.tobool");
+ }
+
+ // In Itanium, a member function pointer is not null if 'ptr' is not null.
+ llvm::Value *Ptr = Builder.CreateExtractValue(MemPtr, 0, "memptr.ptr");
+
+ llvm::Constant *Zero = llvm::ConstantInt::get(Ptr->getType(), 0);
+ llvm::Value *Result = Builder.CreateICmpNE(Ptr, Zero, "memptr.tobool");
+
+ // On ARM, a member function pointer is also non-null if the low bit of 'adj'
+ // (the virtual bit) is set.
+ if (IsARM) {
+ llvm::Constant *One = llvm::ConstantInt::get(Ptr->getType(), 1);
+ llvm::Value *Adj = Builder.CreateExtractValue(MemPtr, 1, "memptr.adj");
+ llvm::Value *VirtualBit = Builder.CreateAnd(Adj, One, "memptr.virtualbit");
+ llvm::Value *IsVirtual = Builder.CreateICmpNE(VirtualBit, Zero,
+ "memptr.isvirtual");
+ Result = Builder.CreateOr(Result, IsVirtual);
+ }
+
+ return Result;
+}
+
+/// The Itanium ABI requires non-zero initialization only for data
+/// member pointers, for which '0' is a valid offset.
+bool ItaniumCXXABI::isZeroInitializable(const MemberPointerType *MPT) {
+ return MPT->getPointeeType()->isFunctionType();
+}
+
+/// The generic ABI passes 'this', plus a VTT if it's initializing a
+/// base subobject.
+void ItaniumCXXABI::BuildConstructorSignature(const CXXConstructorDecl *Ctor,
+ CXXCtorType Type,
+ CanQualType &ResTy,
+ SmallVectorImpl<CanQualType> &ArgTys) {
+ ASTContext &Context = getContext();
+
+ // 'this' is already there.
+
+ // Check if we need to add a VTT parameter (which has type void **).
+ if (Type == Ctor_Base && Ctor->getParent()->getNumVBases() != 0)
+ ArgTys.push_back(Context.getPointerType(Context.VoidPtrTy));
+}
+
+/// The ARM ABI does the same as the Itanium ABI, but returns 'this'.
+void ARMCXXABI::BuildConstructorSignature(const CXXConstructorDecl *Ctor,
+ CXXCtorType Type,
+ CanQualType &ResTy,
+ SmallVectorImpl<CanQualType> &ArgTys) {
+ ItaniumCXXABI::BuildConstructorSignature(Ctor, Type, ResTy, ArgTys);
+ ResTy = ArgTys[0];
+}
+
+/// The generic ABI passes 'this', plus a VTT if it's destroying a
+/// base subobject.
+void ItaniumCXXABI::BuildDestructorSignature(const CXXDestructorDecl *Dtor,
+ CXXDtorType Type,
+ CanQualType &ResTy,
+ SmallVectorImpl<CanQualType> &ArgTys) {
+ ASTContext &Context = getContext();
+
+ // 'this' is already there.
+
+ // Check if we need to add a VTT parameter (which has type void **).
+ if (Type == Dtor_Base && Dtor->getParent()->getNumVBases() != 0)
+ ArgTys.push_back(Context.getPointerType(Context.VoidPtrTy));
+}
+
+/// The ARM ABI does the same as the Itanium ABI, but returns 'this'
+/// for non-deleting destructors.
+void ARMCXXABI::BuildDestructorSignature(const CXXDestructorDecl *Dtor,
+ CXXDtorType Type,
+ CanQualType &ResTy,
+ SmallVectorImpl<CanQualType> &ArgTys) {
+ ItaniumCXXABI::BuildDestructorSignature(Dtor, Type, ResTy, ArgTys);
+
+ if (Type != Dtor_Deleting)
+ ResTy = ArgTys[0];
+}
+
+void ItaniumCXXABI::BuildInstanceFunctionParams(CodeGenFunction &CGF,
+ QualType &ResTy,
+ FunctionArgList &Params) {
+ /// Create the 'this' variable.
+ BuildThisParam(CGF, Params);
+
+ const CXXMethodDecl *MD = cast<CXXMethodDecl>(CGF.CurGD.getDecl());
+ assert(MD->isInstance());
+
+ // Check if we need a VTT parameter as well.
+ if (CodeGenVTables::needsVTTParameter(CGF.CurGD)) {
+ ASTContext &Context = getContext();
+
+ // FIXME: avoid the fake decl
+ QualType T = Context.getPointerType(Context.VoidPtrTy);
+ ImplicitParamDecl *VTTDecl
+ = ImplicitParamDecl::Create(Context, 0, MD->getLocation(),
+ &Context.Idents.get("vtt"), T);
+ Params.push_back(VTTDecl);
+ getVTTDecl(CGF) = VTTDecl;
+ }
+}
+
+void ARMCXXABI::BuildInstanceFunctionParams(CodeGenFunction &CGF,
+ QualType &ResTy,
+ FunctionArgList &Params) {
+ ItaniumCXXABI::BuildInstanceFunctionParams(CGF, ResTy, Params);
+
+ // Return 'this' from certain constructors and destructors.
+ if (HasThisReturn(CGF.CurGD))
+ ResTy = Params[0]->getType();
+}
+
+void ItaniumCXXABI::EmitInstanceFunctionProlog(CodeGenFunction &CGF) {
+ /// Initialize the 'this' slot.
+ EmitThisParam(CGF);
+
+ /// Initialize the 'vtt' slot if needed.
+ if (getVTTDecl(CGF)) {
+ getVTTValue(CGF)
+ = CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(getVTTDecl(CGF)),
+ "vtt");
+ }
+}
+
+void ARMCXXABI::EmitInstanceFunctionProlog(CodeGenFunction &CGF) {
+ ItaniumCXXABI::EmitInstanceFunctionProlog(CGF);
+
+ /// Initialize the return slot to 'this' at the start of the
+ /// function.
+ if (HasThisReturn(CGF.CurGD))
+ CGF.Builder.CreateStore(getThisValue(CGF), CGF.ReturnValue);
+}
+
+void ARMCXXABI::EmitReturnFromThunk(CodeGenFunction &CGF,
+ RValue RV, QualType ResultType) {
+ if (!isa<CXXDestructorDecl>(CGF.CurGD.getDecl()))
+ return ItaniumCXXABI::EmitReturnFromThunk(CGF, RV, ResultType);
+
+ // Destructor thunks in the ARM ABI have indeterminate results.
+ llvm::Type *T =
+ cast<llvm::PointerType>(CGF.ReturnValue->getType())->getElementType();
+ RValue Undef = RValue::get(llvm::UndefValue::get(T));
+ return ItaniumCXXABI::EmitReturnFromThunk(CGF, Undef, ResultType);
+}
+
+/************************** Array allocation cookies **************************/
+
+bool ItaniumCXXABI::NeedsArrayCookie(const CXXNewExpr *expr) {
+ // If the class's usual deallocation function takes two arguments,
+ // it needs a cookie.
+ if (expr->doesUsualArrayDeleteWantSize())
+ return true;
+
+ // Automatic Reference Counting:
+ // We need an array cookie for pointers with strong or weak lifetime.
+ QualType AllocatedType = expr->getAllocatedType();
+ if (getContext().getLangOpts().ObjCAutoRefCount &&
+ AllocatedType->isObjCLifetimeType()) {
+ switch (AllocatedType.getObjCLifetime()) {
+ case Qualifiers::OCL_None:
+ case Qualifiers::OCL_ExplicitNone:
+ case Qualifiers::OCL_Autoreleasing:
+ return false;
+
+ case Qualifiers::OCL_Strong:
+ case Qualifiers::OCL_Weak:
+ return true;
+ }
+ }
+
+ // Otherwise, if the class has a non-trivial destructor, it always
+ // needs a cookie.
+ const CXXRecordDecl *record =
+ AllocatedType->getBaseElementTypeUnsafe()->getAsCXXRecordDecl();
+ return (record && !record->hasTrivialDestructor());
+}
+
+bool ItaniumCXXABI::NeedsArrayCookie(const CXXDeleteExpr *expr,
+ QualType elementType) {
+ // If the class's usual deallocation function takes two arguments,
+ // it needs a cookie.
+ if (expr->doesUsualArrayDeleteWantSize())
+ return true;
+
+ return elementType.isDestructedType();
+}
+
+CharUnits ItaniumCXXABI::GetArrayCookieSize(const CXXNewExpr *expr) {
+ if (!NeedsArrayCookie(expr))
+ return CharUnits::Zero();
+
+ // Padding is the maximum of sizeof(size_t) and alignof(elementType)
+ ASTContext &Ctx = getContext();
+ return std::max(Ctx.getTypeSizeInChars(Ctx.getSizeType()),
+ Ctx.getTypeAlignInChars(expr->getAllocatedType()));
+}
+
+llvm::Value *ItaniumCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
+ llvm::Value *NewPtr,
+ llvm::Value *NumElements,
+ const CXXNewExpr *expr,
+ QualType ElementType) {
+ assert(NeedsArrayCookie(expr));
+
+ unsigned AS = cast<llvm::PointerType>(NewPtr->getType())->getAddressSpace();
+
+ ASTContext &Ctx = getContext();
+ QualType SizeTy = Ctx.getSizeType();
+ CharUnits SizeSize = Ctx.getTypeSizeInChars(SizeTy);
+
+ // The size of the cookie.
+ CharUnits CookieSize =
+ std::max(SizeSize, Ctx.getTypeAlignInChars(ElementType));
+
+ // Compute an offset to the cookie.
+ llvm::Value *CookiePtr = NewPtr;
+ CharUnits CookieOffset = CookieSize - SizeSize;
+ if (!CookieOffset.isZero())
+ CookiePtr = CGF.Builder.CreateConstInBoundsGEP1_64(CookiePtr,
+ CookieOffset.getQuantity());
+
+ // Write the number of elements into the appropriate slot.
+ llvm::Value *NumElementsPtr
+ = CGF.Builder.CreateBitCast(CookiePtr,
+ CGF.ConvertType(SizeTy)->getPointerTo(AS));
+ CGF.Builder.CreateStore(NumElements, NumElementsPtr);
+
+ // Finally, compute a pointer to the actual data buffer by skipping
+ // over the cookie completely.
+ return CGF.Builder.CreateConstInBoundsGEP1_64(NewPtr,
+ CookieSize.getQuantity());
+}
+
+void ItaniumCXXABI::ReadArrayCookie(CodeGenFunction &CGF,
+ llvm::Value *Ptr,
+ const CXXDeleteExpr *expr,
+ QualType ElementType,
+ llvm::Value *&NumElements,
+ llvm::Value *&AllocPtr,
+ CharUnits &CookieSize) {
+ // Derive a char* in the same address space as the pointer.
+ unsigned AS = cast<llvm::PointerType>(Ptr->getType())->getAddressSpace();
+ llvm::Type *CharPtrTy = CGF.Builder.getInt8Ty()->getPointerTo(AS);
+
+ // If we don't need an array cookie, bail out early.
+ if (!NeedsArrayCookie(expr, ElementType)) {
+ AllocPtr = CGF.Builder.CreateBitCast(Ptr, CharPtrTy);
+ NumElements = 0;
+ CookieSize = CharUnits::Zero();
+ return;
+ }
+
+ QualType SizeTy = getContext().getSizeType();
+ CharUnits SizeSize = getContext().getTypeSizeInChars(SizeTy);
+ llvm::Type *SizeLTy = CGF.ConvertType(SizeTy);
+
+ CookieSize
+ = std::max(SizeSize, getContext().getTypeAlignInChars(ElementType));
+
+ CharUnits NumElementsOffset = CookieSize - SizeSize;
+
+ // Compute the allocated pointer.
+ AllocPtr = CGF.Builder.CreateBitCast(Ptr, CharPtrTy);
+ AllocPtr = CGF.Builder.CreateConstInBoundsGEP1_64(AllocPtr,
+ -CookieSize.getQuantity());
+
+ llvm::Value *NumElementsPtr = AllocPtr;
+ if (!NumElementsOffset.isZero())
+ NumElementsPtr =
+ CGF.Builder.CreateConstInBoundsGEP1_64(NumElementsPtr,
+ NumElementsOffset.getQuantity());
+ NumElementsPtr =
+ CGF.Builder.CreateBitCast(NumElementsPtr, SizeLTy->getPointerTo(AS));
+ NumElements = CGF.Builder.CreateLoad(NumElementsPtr);
+}
+
+CharUnits ARMCXXABI::GetArrayCookieSize(const CXXNewExpr *expr) {
+ if (!NeedsArrayCookie(expr))
+ return CharUnits::Zero();
+
+ // On ARM, the cookie is always:
+ // struct array_cookie {
+ // std::size_t element_size; // element_size != 0
+ // std::size_t element_count;
+ // };
+ // TODO: what should we do if the allocated type actually wants
+ // greater alignment?
+ return getContext().getTypeSizeInChars(getContext().getSizeType()) * 2;
+}
+
+llvm::Value *ARMCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
+ llvm::Value *NewPtr,
+ llvm::Value *NumElements,
+ const CXXNewExpr *expr,
+ QualType ElementType) {
+ assert(NeedsArrayCookie(expr));
+
+ // NewPtr is a char*.
+
+ unsigned AS = cast<llvm::PointerType>(NewPtr->getType())->getAddressSpace();
+
+ ASTContext &Ctx = getContext();
+ CharUnits SizeSize = Ctx.getTypeSizeInChars(Ctx.getSizeType());
+ llvm::IntegerType *SizeTy =
+ cast<llvm::IntegerType>(CGF.ConvertType(Ctx.getSizeType()));
+
+ // The cookie is always at the start of the buffer.
+ llvm::Value *CookiePtr = NewPtr;
+
+ // The first element is the element size.
+ CookiePtr = CGF.Builder.CreateBitCast(CookiePtr, SizeTy->getPointerTo(AS));
+ llvm::Value *ElementSize = llvm::ConstantInt::get(SizeTy,
+ Ctx.getTypeSizeInChars(ElementType).getQuantity());
+ CGF.Builder.CreateStore(ElementSize, CookiePtr);
+
+ // The second element is the element count.
+ CookiePtr = CGF.Builder.CreateConstInBoundsGEP1_32(CookiePtr, 1);
+ CGF.Builder.CreateStore(NumElements, CookiePtr);
+
+ // Finally, compute a pointer to the actual data buffer by skipping
+ // over the cookie completely.
+ CharUnits CookieSize = 2 * SizeSize;
+ return CGF.Builder.CreateConstInBoundsGEP1_64(NewPtr,
+ CookieSize.getQuantity());
+}
+
+void ARMCXXABI::ReadArrayCookie(CodeGenFunction &CGF,
+ llvm::Value *Ptr,
+ const CXXDeleteExpr *expr,
+ QualType ElementType,
+ llvm::Value *&NumElements,
+ llvm::Value *&AllocPtr,
+ CharUnits &CookieSize) {
+ // Derive a char* in the same address space as the pointer.
+ unsigned AS = cast<llvm::PointerType>(Ptr->getType())->getAddressSpace();
+ llvm::Type *CharPtrTy = CGF.Builder.getInt8Ty()->getPointerTo(AS);
+
+ // If we don't need an array cookie, bail out early.
+ if (!NeedsArrayCookie(expr, ElementType)) {
+ AllocPtr = CGF.Builder.CreateBitCast(Ptr, CharPtrTy);
+ NumElements = 0;
+ CookieSize = CharUnits::Zero();
+ return;
+ }
+
+ QualType SizeTy = getContext().getSizeType();
+ CharUnits SizeSize = getContext().getTypeSizeInChars(SizeTy);
+ llvm::Type *SizeLTy = CGF.ConvertType(SizeTy);
+
+ // The cookie size is always 2 * sizeof(size_t).
+ CookieSize = 2 * SizeSize;
+
+ // The allocated pointer is the input ptr, minus that amount.
+ AllocPtr = CGF.Builder.CreateBitCast(Ptr, CharPtrTy);
+ AllocPtr = CGF.Builder.CreateConstInBoundsGEP1_64(AllocPtr,
+ -CookieSize.getQuantity());
+
+ // The number of elements is at offset sizeof(size_t) relative to that.
+ llvm::Value *NumElementsPtr
+ = CGF.Builder.CreateConstInBoundsGEP1_64(AllocPtr,
+ SizeSize.getQuantity());
+ NumElementsPtr =
+ CGF.Builder.CreateBitCast(NumElementsPtr, SizeLTy->getPointerTo(AS));
+ NumElements = CGF.Builder.CreateLoad(NumElementsPtr);
+}
+
+/*********************** Static local initialization **************************/
+
+static llvm::Constant *getGuardAcquireFn(CodeGenModule &CGM,
+ llvm::PointerType *GuardPtrTy) {
+ // int __cxa_guard_acquire(__guard *guard_object);
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(CGM.getTypes().ConvertType(CGM.getContext().IntTy),
+ GuardPtrTy, /*isVarArg=*/false);
+
+ return CGM.CreateRuntimeFunction(FTy, "__cxa_guard_acquire",
+ llvm::Attribute::NoUnwind);
+}
+
+static llvm::Constant *getGuardReleaseFn(CodeGenModule &CGM,
+ llvm::PointerType *GuardPtrTy) {
+ // void __cxa_guard_release(__guard *guard_object);
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(CGM.VoidTy, GuardPtrTy, /*isVarArg=*/false);
+
+ return CGM.CreateRuntimeFunction(FTy, "__cxa_guard_release",
+ llvm::Attribute::NoUnwind);
+}
+
+static llvm::Constant *getGuardAbortFn(CodeGenModule &CGM,
+ llvm::PointerType *GuardPtrTy) {
+ // void __cxa_guard_abort(__guard *guard_object);
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(CGM.VoidTy, GuardPtrTy, /*isVarArg=*/false);
+
+ return CGM.CreateRuntimeFunction(FTy, "__cxa_guard_abort",
+ llvm::Attribute::NoUnwind);
+}
+
+namespace {
+ struct CallGuardAbort : EHScopeStack::Cleanup {
+ llvm::GlobalVariable *Guard;
+ CallGuardAbort(llvm::GlobalVariable *Guard) : Guard(Guard) {}
+
+ void Emit(CodeGenFunction &CGF, Flags flags) {
+ CGF.Builder.CreateCall(getGuardAbortFn(CGF.CGM, Guard->getType()), Guard)
+ ->setDoesNotThrow();
+ }
+ };
+}
+
+/// The ARM code here follows the Itanium code closely enough that we
+/// just special-case it at particular places.
+void ItaniumCXXABI::EmitGuardedInit(CodeGenFunction &CGF,
+ const VarDecl &D,
+ llvm::GlobalVariable *var,
+ bool shouldPerformInit) {
+ CGBuilderTy &Builder = CGF.Builder;
+
+ // We only need to use thread-safe statics for local variables;
+ // global initialization is always single-threaded.
+ bool threadsafe =
+ (getContext().getLangOpts().ThreadsafeStatics && D.isLocalVarDecl());
+
+ // If we have a global variable with internal linkage and thread-safe statics
+ // are disabled, we can just let the guard variable be of type i8.
+ bool useInt8GuardVariable = !threadsafe && var->hasInternalLinkage();
+
+ llvm::IntegerType *guardTy;
+ if (useInt8GuardVariable) {
+ guardTy = CGF.Int8Ty;
+ } else {
+ // Guard variables are 64 bits in the generic ABI and 32 bits on ARM.
+ guardTy = (IsARM ? CGF.Int32Ty : CGF.Int64Ty);
+ }
+ llvm::PointerType *guardPtrTy = guardTy->getPointerTo();
+
+ // Create the guard variable if we don't already have it (as we
+ // might if we're double-emitting this function body).
+ llvm::GlobalVariable *guard = CGM.getStaticLocalDeclGuardAddress(&D);
+ if (!guard) {
+ // Mangle the name for the guard.
+ SmallString<256> guardName;
+ {
+ llvm::raw_svector_ostream out(guardName);
+ getMangleContext().mangleItaniumGuardVariable(&D, out);
+ out.flush();
+ }
+
+ // Create the guard variable with a zero-initializer.
+ // Just absorb linkage and visibility from the guarded variable.
+ guard = new llvm::GlobalVariable(CGM.getModule(), guardTy,
+ false, var->getLinkage(),
+ llvm::ConstantInt::get(guardTy, 0),
+ guardName.str());
+ guard->setVisibility(var->getVisibility());
+
+ CGM.setStaticLocalDeclGuardAddress(&D, guard);
+ }
+
+ // Test whether the variable has completed initialization.
+ llvm::Value *isInitialized;
+
+ // ARM C++ ABI 3.2.3.1:
+ // To support the potential use of initialization guard variables
+ // as semaphores that are the target of ARM SWP and LDREX/STREX
+ // synchronizing instructions we define a static initialization
+ // guard variable to be a 4-byte aligned, 4- byte word with the
+ // following inline access protocol.
+ // #define INITIALIZED 1
+ // if ((obj_guard & INITIALIZED) != INITIALIZED) {
+ // if (__cxa_guard_acquire(&obj_guard))
+ // ...
+ // }
+ if (IsARM && !useInt8GuardVariable) {
+ llvm::Value *V = Builder.CreateLoad(guard);
+ V = Builder.CreateAnd(V, Builder.getInt32(1));
+ isInitialized = Builder.CreateIsNull(V, "guard.uninitialized");
+
+ // Itanium C++ ABI 3.3.2:
+ // The following is pseudo-code showing how these functions can be used:
+ // if (obj_guard.first_byte == 0) {
+ // if ( __cxa_guard_acquire (&obj_guard) ) {
+ // try {
+ // ... initialize the object ...;
+ // } catch (...) {
+ // __cxa_guard_abort (&obj_guard);
+ // throw;
+ // }
+ // ... queue object destructor with __cxa_atexit() ...;
+ // __cxa_guard_release (&obj_guard);
+ // }
+ // }
+ } else {
+ // Load the first byte of the guard variable.
+ llvm::LoadInst *LI =
+ Builder.CreateLoad(Builder.CreateBitCast(guard, CGM.Int8PtrTy));
+ LI->setAlignment(1);
+
+ // Itanium ABI:
+ // An implementation supporting thread-safety on multiprocessor
+ // systems must also guarantee that references to the initialized
+ // object do not occur before the load of the initialization flag.
+ //
+ // In LLVM, we do this by marking the load Acquire.
+ if (threadsafe)
+ LI->setAtomic(llvm::Acquire);
+
+ isInitialized = Builder.CreateIsNull(LI, "guard.uninitialized");
+ }
+
+ llvm::BasicBlock *InitCheckBlock = CGF.createBasicBlock("init.check");
+ llvm::BasicBlock *EndBlock = CGF.createBasicBlock("init.end");
+
+ // Check if the first byte of the guard variable is zero.
+ Builder.CreateCondBr(isInitialized, InitCheckBlock, EndBlock);
+
+ CGF.EmitBlock(InitCheckBlock);
+
+ // Variables used when coping with thread-safe statics and exceptions.
+ if (threadsafe) {
+ // Call __cxa_guard_acquire.
+ llvm::Value *V
+ = Builder.CreateCall(getGuardAcquireFn(CGM, guardPtrTy), guard);
+
+ llvm::BasicBlock *InitBlock = CGF.createBasicBlock("init");
+
+ Builder.CreateCondBr(Builder.CreateIsNotNull(V, "tobool"),
+ InitBlock, EndBlock);
+
+ // Call __cxa_guard_abort along the exceptional edge.
+ CGF.EHStack.pushCleanup<CallGuardAbort>(EHCleanup, guard);
+
+ CGF.EmitBlock(InitBlock);
+ }
+
+ // Emit the initializer and add a global destructor if appropriate.
+ CGF.EmitCXXGlobalVarDeclInit(D, var, shouldPerformInit);
+
+ if (threadsafe) {
+ // Pop the guard-abort cleanup if we pushed one.
+ CGF.PopCleanupBlock();
+
+ // Call __cxa_guard_release. This cannot throw.
+ Builder.CreateCall(getGuardReleaseFn(CGM, guardPtrTy), guard);
+ } else {
+ Builder.CreateStore(llvm::ConstantInt::get(guardTy, 1), guard);
+ }
+
+ CGF.EmitBlock(EndBlock);
+}
diff --git a/clang/lib/CodeGen/Makefile b/clang/lib/CodeGen/Makefile
new file mode 100644
index 0000000..6032dff
--- /dev/null
+++ b/clang/lib/CodeGen/Makefile
@@ -0,0 +1,19 @@
+##===- clang/lib/CodeGen/Makefile --------------------------*- Makefile -*-===##
+#
+# The LLVM Compiler Infrastructure
+#
+# This file is distributed under the University of Illinois Open Source
+# License. See LICENSE.TXT for details.
+#
+##===----------------------------------------------------------------------===##
+#
+# This implements the AST -> LLVM code generation library for the
+# C-Language front-end.
+#
+##===----------------------------------------------------------------------===##
+
+CLANG_LEVEL := ../..
+LIBRARYNAME := clangCodeGen
+
+include $(CLANG_LEVEL)/Makefile
+
diff --git a/clang/lib/CodeGen/MicrosoftCXXABI.cpp b/clang/lib/CodeGen/MicrosoftCXXABI.cpp
new file mode 100644
index 0000000..825e041
--- /dev/null
+++ b/clang/lib/CodeGen/MicrosoftCXXABI.cpp
@@ -0,0 +1,95 @@
+//===--- MicrosoftCXXABI.cpp - Emit LLVM Code from ASTs for a Module ------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This provides C++ code generation targeting the Microsoft Visual C++ ABI.
+// The class in this file generates structures that follow the Microsoft
+// Visual C++ ABI, which is actually not very well documented at all outside
+// of Microsoft.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CGCXXABI.h"
+#include "CodeGenModule.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclCXX.h"
+
+using namespace clang;
+using namespace CodeGen;
+
+namespace {
+
+class MicrosoftCXXABI : public CGCXXABI {
+public:
+ MicrosoftCXXABI(CodeGenModule &CGM) : CGCXXABI(CGM) {}
+
+ void BuildConstructorSignature(const CXXConstructorDecl *Ctor,
+ CXXCtorType Type,
+ CanQualType &ResTy,
+ SmallVectorImpl<CanQualType> &ArgTys) {
+ // 'this' is already in place
+ // TODO: 'for base' flag
+ }
+
+ void BuildDestructorSignature(const CXXDestructorDecl *Ctor,
+ CXXDtorType Type,
+ CanQualType &ResTy,
+ SmallVectorImpl<CanQualType> &ArgTys) {
+ // 'this' is already in place
+ // TODO: 'for base' flag
+ }
+
+ void BuildInstanceFunctionParams(CodeGenFunction &CGF,
+ QualType &ResTy,
+ FunctionArgList &Params) {
+ BuildThisParam(CGF, Params);
+ // TODO: 'for base' flag
+ }
+
+ void EmitInstanceFunctionProlog(CodeGenFunction &CGF) {
+ EmitThisParam(CGF);
+ // TODO: 'for base' flag
+ }
+
+ // ==== Notes on array cookies =========
+ //
+ // MSVC seems to only use cookies when the class has a destructor; a
+ // two-argument usual array deallocation function isn't sufficient.
+ //
+ // For example, this code prints "100" and "1":
+ // struct A {
+ // char x;
+ // void *operator new[](size_t sz) {
+ // printf("%u\n", sz);
+ // return malloc(sz);
+ // }
+ // void operator delete[](void *p, size_t sz) {
+ // printf("%u\n", sz);
+ // free(p);
+ // }
+ // };
+ // int main() {
+ // A *p = new A[100];
+ // delete[] p;
+ // }
+ // Whereas it prints "104" and "104" if you give A a destructor.
+ void ReadArrayCookie(CodeGenFunction &CGF, llvm::Value *Ptr,
+ const CXXDeleteExpr *expr,
+ QualType ElementType, llvm::Value *&NumElements,
+ llvm::Value *&AllocPtr, CharUnits &CookieSize) {
+ CGF.CGM.ErrorUnsupported(expr, "don't know how to handle array cookies "
+ "in the Microsoft C++ ABI");
+ }
+};
+
+}
+
+CGCXXABI *clang::CodeGen::CreateMicrosoftCXXABI(CodeGenModule &CGM) {
+ return new MicrosoftCXXABI(CGM);
+}
+
diff --git a/clang/lib/CodeGen/ModuleBuilder.cpp b/clang/lib/CodeGen/ModuleBuilder.cpp
new file mode 100644
index 0000000..ea2389e
--- /dev/null
+++ b/clang/lib/CodeGen/ModuleBuilder.cpp
@@ -0,0 +1,127 @@
+//===--- ModuleBuilder.cpp - Emit LLVM Code from ASTs ---------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This builds an AST and converts it to LLVM Code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/CodeGen/ModuleBuilder.h"
+#include "CodeGenModule.h"
+#include "clang/Frontend/CodeGenOptions.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/Expr.h"
+#include "clang/Basic/Diagnostic.h"
+#include "clang/Basic/TargetInfo.h"
+#include "llvm/LLVMContext.h"
+#include "llvm/Module.h"
+#include "llvm/Target/TargetData.h"
+#include "llvm/ADT/OwningPtr.h"
+using namespace clang;
+
+namespace {
+ class CodeGeneratorImpl : public CodeGenerator {
+ DiagnosticsEngine &Diags;
+ OwningPtr<const llvm::TargetData> TD;
+ ASTContext *Ctx;
+ const CodeGenOptions CodeGenOpts; // Intentionally copied in.
+ protected:
+ OwningPtr<llvm::Module> M;
+ OwningPtr<CodeGen::CodeGenModule> Builder;
+ public:
+ CodeGeneratorImpl(DiagnosticsEngine &diags, const std::string& ModuleName,
+ const CodeGenOptions &CGO, llvm::LLVMContext& C)
+ : Diags(diags), CodeGenOpts(CGO), M(new llvm::Module(ModuleName, C)) {}
+
+ virtual ~CodeGeneratorImpl() {}
+
+ virtual llvm::Module* GetModule() {
+ return M.get();
+ }
+
+ virtual llvm::Module* ReleaseModule() {
+ return M.take();
+ }
+
+ virtual void Initialize(ASTContext &Context) {
+ Ctx = &Context;
+
+ M->setTargetTriple(Ctx->getTargetInfo().getTriple().getTriple());
+ M->setDataLayout(Ctx->getTargetInfo().getTargetDescription());
+ TD.reset(new llvm::TargetData(Ctx->getTargetInfo().getTargetDescription()));
+ Builder.reset(new CodeGen::CodeGenModule(Context, CodeGenOpts,
+ *M, *TD, Diags));
+ }
+
+ virtual void HandleCXXStaticMemberVarInstantiation(VarDecl *VD) {
+ Builder->HandleCXXStaticMemberVarInstantiation(VD);
+ }
+
+ virtual bool HandleTopLevelDecl(DeclGroupRef DG) {
+ // Make sure to emit all elements of a Decl.
+ for (DeclGroupRef::iterator I = DG.begin(), E = DG.end(); I != E; ++I)
+ Builder->EmitTopLevelDecl(*I);
+ return true;
+ }
+
+ /// HandleTagDeclDefinition - This callback is invoked each time a TagDecl
+ /// to (e.g. struct, union, enum, class) is completed. This allows the
+ /// client hack on the type, which can occur at any point in the file
+ /// (because these can be defined in declspecs).
+ virtual void HandleTagDeclDefinition(TagDecl *D) {
+ Builder->UpdateCompletedType(D);
+
+ // In C++, we may have member functions that need to be emitted at this
+ // point.
+ if (Ctx->getLangOpts().CPlusPlus && !D->isDependentContext()) {
+ for (DeclContext::decl_iterator M = D->decls_begin(),
+ MEnd = D->decls_end();
+ M != MEnd; ++M)
+ if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(*M))
+ if (Method->doesThisDeclarationHaveABody() &&
+ (Method->hasAttr<UsedAttr>() ||
+ Method->hasAttr<ConstructorAttr>()))
+ Builder->EmitTopLevelDecl(Method);
+ }
+ }
+
+ virtual void HandleTranslationUnit(ASTContext &Ctx) {
+ if (Diags.hasErrorOccurred()) {
+ M.reset();
+ return;
+ }
+
+ if (Builder)
+ Builder->Release();
+ }
+
+ virtual void CompleteTentativeDefinition(VarDecl *D) {
+ if (Diags.hasErrorOccurred())
+ return;
+
+ Builder->EmitTentativeDefinition(D);
+ }
+
+ virtual void HandleVTable(CXXRecordDecl *RD, bool DefinitionRequired) {
+ if (Diags.hasErrorOccurred())
+ return;
+
+ Builder->EmitVTable(RD, DefinitionRequired);
+ }
+ };
+}
+
+void CodeGenerator::anchor() { }
+
+CodeGenerator *clang::CreateLLVMCodeGen(DiagnosticsEngine &Diags,
+ const std::string& ModuleName,
+ const CodeGenOptions &CGO,
+ llvm::LLVMContext& C) {
+ return new CodeGeneratorImpl(Diags, ModuleName, CGO, C);
+}
diff --git a/clang/lib/CodeGen/README.txt b/clang/lib/CodeGen/README.txt
new file mode 100644
index 0000000..e6d6109
--- /dev/null
+++ b/clang/lib/CodeGen/README.txt
@@ -0,0 +1,47 @@
+IRgen optimization opportunities.
+
+//===---------------------------------------------------------------------===//
+
+The common pattern of
+--
+short x; // or char, etc
+(x == 10)
+--
+generates an zext/sext of x which can easily be avoided.
+
+//===---------------------------------------------------------------------===//
+
+Bitfields accesses can be shifted to simplify masking and sign
+extension. For example, if the bitfield width is 8 and it is
+appropriately aligned then is is a lot shorter to just load the char
+directly.
+
+//===---------------------------------------------------------------------===//
+
+It may be worth avoiding creation of alloca's for formal arguments
+for the common situation where the argument is never written to or has
+its address taken. The idea would be to begin generating code by using
+the argument directly and if its address is taken or it is stored to
+then generate the alloca and patch up the existing code.
+
+In theory, the same optimization could be a win for block local
+variables as long as the declaration dominates all statements in the
+block.
+
+NOTE: The main case we care about this for is for -O0 -g compile time
+performance, and in that scenario we will need to emit the alloca
+anyway currently to emit proper debug info. So this is blocked by
+being able to emit debug information which refers to an LLVM
+temporary, not an alloca.
+
+//===---------------------------------------------------------------------===//
+
+We should try and avoid generating basic blocks which only contain
+jumps. At -O0, this penalizes us all the way from IRgen (malloc &
+instruction overhead), all the way down through code generation and
+assembly time.
+
+On 176.gcc:expr.ll, it looks like over 12% of basic blocks are just
+direct branches!
+
+//===---------------------------------------------------------------------===//
diff --git a/clang/lib/CodeGen/TargetInfo.cpp b/clang/lib/CodeGen/TargetInfo.cpp
new file mode 100644
index 0000000..2b71fdd
--- /dev/null
+++ b/clang/lib/CodeGen/TargetInfo.cpp
@@ -0,0 +1,3698 @@
+//===---- TargetInfo.cpp - Encapsulate target details -----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// These classes wrap the information about a call or function
+// definition used to handle ABI compliancy.
+//
+//===----------------------------------------------------------------------===//
+
+#include "TargetInfo.h"
+#include "ABIInfo.h"
+#include "CodeGenFunction.h"
+#include "clang/AST/RecordLayout.h"
+#include "clang/Frontend/CodeGenOptions.h"
+#include "llvm/Type.h"
+#include "llvm/Target/TargetData.h"
+#include "llvm/ADT/Triple.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace clang;
+using namespace CodeGen;
+
+static void AssignToArrayRange(CodeGen::CGBuilderTy &Builder,
+ llvm::Value *Array,
+ llvm::Value *Value,
+ unsigned FirstIndex,
+ unsigned LastIndex) {
+ // Alternatively, we could emit this as a loop in the source.
+ for (unsigned I = FirstIndex; I <= LastIndex; ++I) {
+ llvm::Value *Cell = Builder.CreateConstInBoundsGEP1_32(Array, I);
+ Builder.CreateStore(Value, Cell);
+ }
+}
+
+static bool isAggregateTypeForABI(QualType T) {
+ return CodeGenFunction::hasAggregateLLVMType(T) ||
+ T->isMemberFunctionPointerType();
+}
+
+ABIInfo::~ABIInfo() {}
+
+ASTContext &ABIInfo::getContext() const {
+ return CGT.getContext();
+}
+
+llvm::LLVMContext &ABIInfo::getVMContext() const {
+ return CGT.getLLVMContext();
+}
+
+const llvm::TargetData &ABIInfo::getTargetData() const {
+ return CGT.getTargetData();
+}
+
+
+void ABIArgInfo::dump() const {
+ raw_ostream &OS = llvm::errs();
+ OS << "(ABIArgInfo Kind=";
+ switch (TheKind) {
+ case Direct:
+ OS << "Direct Type=";
+ if (llvm::Type *Ty = getCoerceToType())
+ Ty->print(OS);
+ else
+ OS << "null";
+ break;
+ case Extend:
+ OS << "Extend";
+ break;
+ case Ignore:
+ OS << "Ignore";
+ break;
+ case Indirect:
+ OS << "Indirect Align=" << getIndirectAlign()
+ << " ByVal=" << getIndirectByVal()
+ << " Realign=" << getIndirectRealign();
+ break;
+ case Expand:
+ OS << "Expand";
+ break;
+ }
+ OS << ")\n";
+}
+
+TargetCodeGenInfo::~TargetCodeGenInfo() { delete Info; }
+
+// If someone can figure out a general rule for this, that would be great.
+// It's probably just doomed to be platform-dependent, though.
+unsigned TargetCodeGenInfo::getSizeOfUnwindException() const {
+ // Verified for:
+ // x86-64 FreeBSD, Linux, Darwin
+ // x86-32 FreeBSD, Linux, Darwin
+ // PowerPC Linux, Darwin
+ // ARM Darwin (*not* EABI)
+ return 32;
+}
+
+bool TargetCodeGenInfo::isNoProtoCallVariadic(const CallArgList &args,
+ const FunctionNoProtoType *fnType) const {
+ // The following conventions are known to require this to be false:
+ // x86_stdcall
+ // MIPS
+ // For everything else, we just prefer false unless we opt out.
+ return false;
+}
+
+static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays);
+
+/// isEmptyField - Return true iff a the field is "empty", that is it
+/// is an unnamed bit-field or an (array of) empty record(s).
+static bool isEmptyField(ASTContext &Context, const FieldDecl *FD,
+ bool AllowArrays) {
+ if (FD->isUnnamedBitfield())
+ return true;
+
+ QualType FT = FD->getType();
+
+ // Constant arrays of empty records count as empty, strip them off.
+ // Constant arrays of zero length always count as empty.
+ if (AllowArrays)
+ while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) {
+ if (AT->getSize() == 0)
+ return true;
+ FT = AT->getElementType();
+ }
+
+ const RecordType *RT = FT->getAs<RecordType>();
+ if (!RT)
+ return false;
+
+ // C++ record fields are never empty, at least in the Itanium ABI.
+ //
+ // FIXME: We should use a predicate for whether this behavior is true in the
+ // current ABI.
+ if (isa<CXXRecordDecl>(RT->getDecl()))
+ return false;
+
+ return isEmptyRecord(Context, FT, AllowArrays);
+}
+
+/// isEmptyRecord - Return true iff a structure contains only empty
+/// fields. Note that a structure with a flexible array member is not
+/// considered empty.
+static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays) {
+ const RecordType *RT = T->getAs<RecordType>();
+ if (!RT)
+ return 0;
+ const RecordDecl *RD = RT->getDecl();
+ if (RD->hasFlexibleArrayMember())
+ return false;
+
+ // If this is a C++ record, check the bases first.
+ if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
+ for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(),
+ e = CXXRD->bases_end(); i != e; ++i)
+ if (!isEmptyRecord(Context, i->getType(), true))
+ return false;
+
+ for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
+ i != e; ++i)
+ if (!isEmptyField(Context, *i, AllowArrays))
+ return false;
+ return true;
+}
+
+/// hasNonTrivialDestructorOrCopyConstructor - Determine if a type has either
+/// a non-trivial destructor or a non-trivial copy constructor.
+static bool hasNonTrivialDestructorOrCopyConstructor(const RecordType *RT) {
+ const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl());
+ if (!RD)
+ return false;
+
+ return !RD->hasTrivialDestructor() || !RD->hasTrivialCopyConstructor();
+}
+
+/// isRecordWithNonTrivialDestructorOrCopyConstructor - Determine if a type is
+/// a record type with either a non-trivial destructor or a non-trivial copy
+/// constructor.
+static bool isRecordWithNonTrivialDestructorOrCopyConstructor(QualType T) {
+ const RecordType *RT = T->getAs<RecordType>();
+ if (!RT)
+ return false;
+
+ return hasNonTrivialDestructorOrCopyConstructor(RT);
+}
+
+/// isSingleElementStruct - Determine if a structure is a "single
+/// element struct", i.e. it has exactly one non-empty field or
+/// exactly one field which is itself a single element
+/// struct. Structures with flexible array members are never
+/// considered single element structs.
+///
+/// \return The field declaration for the single non-empty field, if
+/// it exists.
+static const Type *isSingleElementStruct(QualType T, ASTContext &Context) {
+ const RecordType *RT = T->getAsStructureType();
+ if (!RT)
+ return 0;
+
+ const RecordDecl *RD = RT->getDecl();
+ if (RD->hasFlexibleArrayMember())
+ return 0;
+
+ const Type *Found = 0;
+
+ // If this is a C++ record, check the bases first.
+ if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
+ for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(),
+ e = CXXRD->bases_end(); i != e; ++i) {
+ // Ignore empty records.
+ if (isEmptyRecord(Context, i->getType(), true))
+ continue;
+
+ // If we already found an element then this isn't a single-element struct.
+ if (Found)
+ return 0;
+
+ // If this is non-empty and not a single element struct, the composite
+ // cannot be a single element struct.
+ Found = isSingleElementStruct(i->getType(), Context);
+ if (!Found)
+ return 0;
+ }
+ }
+
+ // Check for single element.
+ for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
+ i != e; ++i) {
+ const FieldDecl *FD = *i;
+ QualType FT = FD->getType();
+
+ // Ignore empty fields.
+ if (isEmptyField(Context, FD, true))
+ continue;
+
+ // If we already found an element then this isn't a single-element
+ // struct.
+ if (Found)
+ return 0;
+
+ // Treat single element arrays as the element.
+ while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) {
+ if (AT->getSize().getZExtValue() != 1)
+ break;
+ FT = AT->getElementType();
+ }
+
+ if (!isAggregateTypeForABI(FT)) {
+ Found = FT.getTypePtr();
+ } else {
+ Found = isSingleElementStruct(FT, Context);
+ if (!Found)
+ return 0;
+ }
+ }
+
+ // We don't consider a struct a single-element struct if it has
+ // padding beyond the element type.
+ if (Found && Context.getTypeSize(Found) != Context.getTypeSize(T))
+ return 0;
+
+ return Found;
+}
+
+static bool is32Or64BitBasicType(QualType Ty, ASTContext &Context) {
+ if (!Ty->getAs<BuiltinType>() && !Ty->hasPointerRepresentation() &&
+ !Ty->isAnyComplexType() && !Ty->isEnumeralType() &&
+ !Ty->isBlockPointerType())
+ return false;
+
+ uint64_t Size = Context.getTypeSize(Ty);
+ return Size == 32 || Size == 64;
+}
+
+/// canExpandIndirectArgument - Test whether an argument type which is to be
+/// passed indirectly (on the stack) would have the equivalent layout if it was
+/// expanded into separate arguments. If so, we prefer to do the latter to avoid
+/// inhibiting optimizations.
+///
+// FIXME: This predicate is missing many cases, currently it just follows
+// llvm-gcc (checks that all fields are 32-bit or 64-bit primitive types). We
+// should probably make this smarter, or better yet make the LLVM backend
+// capable of handling it.
+static bool canExpandIndirectArgument(QualType Ty, ASTContext &Context) {
+ // We can only expand structure types.
+ const RecordType *RT = Ty->getAs<RecordType>();
+ if (!RT)
+ return false;
+
+ // We can only expand (C) structures.
+ //
+ // FIXME: This needs to be generalized to handle classes as well.
+ const RecordDecl *RD = RT->getDecl();
+ if (!RD->isStruct() || isa<CXXRecordDecl>(RD))
+ return false;
+
+ uint64_t Size = 0;
+
+ for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
+ i != e; ++i) {
+ const FieldDecl *FD = *i;
+
+ if (!is32Or64BitBasicType(FD->getType(), Context))
+ return false;
+
+ // FIXME: Reject bit-fields wholesale; there are two problems, we don't know
+ // how to expand them yet, and the predicate for telling if a bitfield still
+ // counts as "basic" is more complicated than what we were doing previously.
+ if (FD->isBitField())
+ return false;
+
+ Size += Context.getTypeSize(FD->getType());
+ }
+
+ // Make sure there are not any holes in the struct.
+ if (Size != Context.getTypeSize(Ty))
+ return false;
+
+ return true;
+}
+
+namespace {
+/// DefaultABIInfo - The default implementation for ABI specific
+/// details. This implementation provides information which results in
+/// self-consistent and sensible LLVM IR generation, but does not
+/// conform to any particular ABI.
+class DefaultABIInfo : public ABIInfo {
+public:
+ DefaultABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {}
+
+ ABIArgInfo classifyReturnType(QualType RetTy) const;
+ ABIArgInfo classifyArgumentType(QualType RetTy) const;
+
+ virtual void computeInfo(CGFunctionInfo &FI) const {
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
+ for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
+ it != ie; ++it)
+ it->info = classifyArgumentType(it->type);
+ }
+
+ virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const;
+};
+
+class DefaultTargetCodeGenInfo : public TargetCodeGenInfo {
+public:
+ DefaultTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
+ : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {}
+};
+
+llvm::Value *DefaultABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const {
+ return 0;
+}
+
+ABIArgInfo DefaultABIInfo::classifyArgumentType(QualType Ty) const {
+ if (isAggregateTypeForABI(Ty)) {
+ // Records with non trivial destructors/constructors should not be passed
+ // by value.
+ if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty))
+ return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
+
+ return ABIArgInfo::getIndirect(0);
+ }
+
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = Ty->getAs<EnumType>())
+ Ty = EnumTy->getDecl()->getIntegerType();
+
+ return (Ty->isPromotableIntegerType() ?
+ ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
+}
+
+ABIArgInfo DefaultABIInfo::classifyReturnType(QualType RetTy) const {
+ if (RetTy->isVoidType())
+ return ABIArgInfo::getIgnore();
+
+ if (isAggregateTypeForABI(RetTy))
+ return ABIArgInfo::getIndirect(0);
+
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
+ RetTy = EnumTy->getDecl()->getIntegerType();
+
+ return (RetTy->isPromotableIntegerType() ?
+ ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
+}
+
+/// UseX86_MMXType - Return true if this is an MMX type that should use the
+/// special x86_mmx type.
+bool UseX86_MMXType(llvm::Type *IRType) {
+ // If the type is an MMX type <2 x i32>, <4 x i16>, or <8 x i8>, use the
+ // special x86_mmx type.
+ return IRType->isVectorTy() && IRType->getPrimitiveSizeInBits() == 64 &&
+ cast<llvm::VectorType>(IRType)->getElementType()->isIntegerTy() &&
+ IRType->getScalarSizeInBits() != 64;
+}
+
+static llvm::Type* X86AdjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
+ StringRef Constraint,
+ llvm::Type* Ty) {
+ if ((Constraint == "y" || Constraint == "&y") && Ty->isVectorTy())
+ return llvm::Type::getX86_MMXTy(CGF.getLLVMContext());
+ return Ty;
+}
+
+//===----------------------------------------------------------------------===//
+// X86-32 ABI Implementation
+//===----------------------------------------------------------------------===//
+
+/// X86_32ABIInfo - The X86-32 ABI information.
+class X86_32ABIInfo : public ABIInfo {
+ static const unsigned MinABIStackAlignInBytes = 4;
+
+ bool IsDarwinVectorABI;
+ bool IsSmallStructInRegABI;
+ bool IsMMXDisabled;
+ bool IsWin32FloatStructABI;
+
+ static bool isRegisterSize(unsigned Size) {
+ return (Size == 8 || Size == 16 || Size == 32 || Size == 64);
+ }
+
+ static bool shouldReturnTypeInRegister(QualType Ty, ASTContext &Context,
+ unsigned callingConvention);
+
+ /// getIndirectResult - Give a source type \arg Ty, return a suitable result
+ /// such that the argument will be passed in memory.
+ ABIArgInfo getIndirectResult(QualType Ty, bool ByVal = true) const;
+
+ /// \brief Return the alignment to use for the given type on the stack.
+ unsigned getTypeStackAlignInBytes(QualType Ty, unsigned Align) const;
+
+public:
+
+ ABIArgInfo classifyReturnType(QualType RetTy,
+ unsigned callingConvention) const;
+ ABIArgInfo classifyArgumentType(QualType RetTy) const;
+
+ virtual void computeInfo(CGFunctionInfo &FI) const {
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType(),
+ FI.getCallingConvention());
+ for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
+ it != ie; ++it)
+ it->info = classifyArgumentType(it->type);
+ }
+
+ virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const;
+
+ X86_32ABIInfo(CodeGen::CodeGenTypes &CGT, bool d, bool p, bool m, bool w)
+ : ABIInfo(CGT), IsDarwinVectorABI(d), IsSmallStructInRegABI(p),
+ IsMMXDisabled(m), IsWin32FloatStructABI(w) {}
+};
+
+class X86_32TargetCodeGenInfo : public TargetCodeGenInfo {
+public:
+ X86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT,
+ bool d, bool p, bool m, bool w)
+ :TargetCodeGenInfo(new X86_32ABIInfo(CGT, d, p, m, w)) {}
+
+ void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
+ CodeGen::CodeGenModule &CGM) const;
+
+ int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const {
+ // Darwin uses different dwarf register numbers for EH.
+ if (CGM.isTargetDarwin()) return 5;
+
+ return 4;
+ }
+
+ bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *Address) const;
+
+ llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
+ StringRef Constraint,
+ llvm::Type* Ty) const {
+ return X86AdjustInlineAsmType(CGF, Constraint, Ty);
+ }
+
+};
+
+}
+
+/// shouldReturnTypeInRegister - Determine if the given type should be
+/// passed in a register (for the Darwin ABI).
+bool X86_32ABIInfo::shouldReturnTypeInRegister(QualType Ty,
+ ASTContext &Context,
+ unsigned callingConvention) {
+ uint64_t Size = Context.getTypeSize(Ty);
+
+ // Type must be register sized.
+ if (!isRegisterSize(Size))
+ return false;
+
+ if (Ty->isVectorType()) {
+ // 64- and 128- bit vectors inside structures are not returned in
+ // registers.
+ if (Size == 64 || Size == 128)
+ return false;
+
+ return true;
+ }
+
+ // If this is a builtin, pointer, enum, complex type, member pointer, or
+ // member function pointer it is ok.
+ if (Ty->getAs<BuiltinType>() || Ty->hasPointerRepresentation() ||
+ Ty->isAnyComplexType() || Ty->isEnumeralType() ||
+ Ty->isBlockPointerType() || Ty->isMemberPointerType())
+ return true;
+
+ // Arrays are treated like records.
+ if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty))
+ return shouldReturnTypeInRegister(AT->getElementType(), Context,
+ callingConvention);
+
+ // Otherwise, it must be a record type.
+ const RecordType *RT = Ty->getAs<RecordType>();
+ if (!RT) return false;
+
+ // FIXME: Traverse bases here too.
+
+ // For thiscall conventions, structures will never be returned in
+ // a register. This is for compatibility with the MSVC ABI
+ if (callingConvention == llvm::CallingConv::X86_ThisCall &&
+ RT->isStructureType()) {
+ return false;
+ }
+
+ // Structure types are passed in register if all fields would be
+ // passed in a register.
+ for (RecordDecl::field_iterator i = RT->getDecl()->field_begin(),
+ e = RT->getDecl()->field_end(); i != e; ++i) {
+ const FieldDecl *FD = *i;
+
+ // Empty fields are ignored.
+ if (isEmptyField(Context, FD, true))
+ continue;
+
+ // Check fields recursively.
+ if (!shouldReturnTypeInRegister(FD->getType(), Context,
+ callingConvention))
+ return false;
+ }
+ return true;
+}
+
+ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy,
+ unsigned callingConvention) const {
+ if (RetTy->isVoidType())
+ return ABIArgInfo::getIgnore();
+
+ if (const VectorType *VT = RetTy->getAs<VectorType>()) {
+ // On Darwin, some vectors are returned in registers.
+ if (IsDarwinVectorABI) {
+ uint64_t Size = getContext().getTypeSize(RetTy);
+
+ // 128-bit vectors are a special case; they are returned in
+ // registers and we need to make sure to pick a type the LLVM
+ // backend will like.
+ if (Size == 128)
+ return ABIArgInfo::getDirect(llvm::VectorType::get(
+ llvm::Type::getInt64Ty(getVMContext()), 2));
+
+ // Always return in register if it fits in a general purpose
+ // register, or if it is 64 bits and has a single element.
+ if ((Size == 8 || Size == 16 || Size == 32) ||
+ (Size == 64 && VT->getNumElements() == 1))
+ return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
+ Size));
+
+ return ABIArgInfo::getIndirect(0);
+ }
+
+ return ABIArgInfo::getDirect();
+ }
+
+ if (isAggregateTypeForABI(RetTy)) {
+ if (const RecordType *RT = RetTy->getAs<RecordType>()) {
+ // Structures with either a non-trivial destructor or a non-trivial
+ // copy constructor are always indirect.
+ if (hasNonTrivialDestructorOrCopyConstructor(RT))
+ return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
+
+ // Structures with flexible arrays are always indirect.
+ if (RT->getDecl()->hasFlexibleArrayMember())
+ return ABIArgInfo::getIndirect(0);
+ }
+
+ // If specified, structs and unions are always indirect.
+ if (!IsSmallStructInRegABI && !RetTy->isAnyComplexType())
+ return ABIArgInfo::getIndirect(0);
+
+ // Small structures which are register sized are generally returned
+ // in a register.
+ if (X86_32ABIInfo::shouldReturnTypeInRegister(RetTy, getContext(),
+ callingConvention)) {
+ uint64_t Size = getContext().getTypeSize(RetTy);
+
+ // As a special-case, if the struct is a "single-element" struct, and
+ // the field is of type "float" or "double", return it in a
+ // floating-point register. (MSVC does not apply this special case.)
+ // We apply a similar transformation for pointer types to improve the
+ // quality of the generated IR.
+ if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext()))
+ if ((!IsWin32FloatStructABI && SeltTy->isRealFloatingType())
+ || SeltTy->hasPointerRepresentation())
+ return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
+
+ // FIXME: We should be able to narrow this integer in cases with dead
+ // padding.
+ return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),Size));
+ }
+
+ return ABIArgInfo::getIndirect(0);
+ }
+
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
+ RetTy = EnumTy->getDecl()->getIntegerType();
+
+ return (RetTy->isPromotableIntegerType() ?
+ ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
+}
+
+static bool isRecordWithSSEVectorType(ASTContext &Context, QualType Ty) {
+ const RecordType *RT = Ty->getAs<RecordType>();
+ if (!RT)
+ return 0;
+ const RecordDecl *RD = RT->getDecl();
+
+ // If this is a C++ record, check the bases first.
+ if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
+ for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(),
+ e = CXXRD->bases_end(); i != e; ++i)
+ if (!isRecordWithSSEVectorType(Context, i->getType()))
+ return false;
+
+ for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
+ i != e; ++i) {
+ QualType FT = i->getType();
+
+ if (FT->getAs<VectorType>() && Context.getTypeSize(FT) == 128)
+ return true;
+
+ if (isRecordWithSSEVectorType(Context, FT))
+ return true;
+ }
+
+ return false;
+}
+
+unsigned X86_32ABIInfo::getTypeStackAlignInBytes(QualType Ty,
+ unsigned Align) const {
+ // Otherwise, if the alignment is less than or equal to the minimum ABI
+ // alignment, just use the default; the backend will handle this.
+ if (Align <= MinABIStackAlignInBytes)
+ return 0; // Use default alignment.
+
+ // On non-Darwin, the stack type alignment is always 4.
+ if (!IsDarwinVectorABI) {
+ // Set explicit alignment, since we may need to realign the top.
+ return MinABIStackAlignInBytes;
+ }
+
+ // Otherwise, if the type contains an SSE vector type, the alignment is 16.
+ if (Align >= 16 && isRecordWithSSEVectorType(getContext(), Ty))
+ return 16;
+
+ return MinABIStackAlignInBytes;
+}
+
+ABIArgInfo X86_32ABIInfo::getIndirectResult(QualType Ty, bool ByVal) const {
+ if (!ByVal)
+ return ABIArgInfo::getIndirect(0, false);
+
+ // Compute the byval alignment.
+ unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8;
+ unsigned StackAlign = getTypeStackAlignInBytes(Ty, TypeAlign);
+ if (StackAlign == 0)
+ return ABIArgInfo::getIndirect(4);
+
+ // If the stack alignment is less than the type alignment, realign the
+ // argument.
+ if (StackAlign < TypeAlign)
+ return ABIArgInfo::getIndirect(StackAlign, /*ByVal=*/true,
+ /*Realign=*/true);
+
+ return ABIArgInfo::getIndirect(StackAlign);
+}
+
+ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty) const {
+ // FIXME: Set alignment on indirect arguments.
+ if (isAggregateTypeForABI(Ty)) {
+ // Structures with flexible arrays are always indirect.
+ if (const RecordType *RT = Ty->getAs<RecordType>()) {
+ // Structures with either a non-trivial destructor or a non-trivial
+ // copy constructor are always indirect.
+ if (hasNonTrivialDestructorOrCopyConstructor(RT))
+ return getIndirectResult(Ty, /*ByVal=*/false);
+
+ if (RT->getDecl()->hasFlexibleArrayMember())
+ return getIndirectResult(Ty);
+ }
+
+ // Ignore empty structs/unions.
+ if (isEmptyRecord(getContext(), Ty, true))
+ return ABIArgInfo::getIgnore();
+
+ // Expand small (<= 128-bit) record types when we know that the stack layout
+ // of those arguments will match the struct. This is important because the
+ // LLVM backend isn't smart enough to remove byval, which inhibits many
+ // optimizations.
+ if (getContext().getTypeSize(Ty) <= 4*32 &&
+ canExpandIndirectArgument(Ty, getContext()))
+ return ABIArgInfo::getExpand();
+
+ return getIndirectResult(Ty);
+ }
+
+ if (const VectorType *VT = Ty->getAs<VectorType>()) {
+ // On Darwin, some vectors are passed in memory, we handle this by passing
+ // it as an i8/i16/i32/i64.
+ if (IsDarwinVectorABI) {
+ uint64_t Size = getContext().getTypeSize(Ty);
+ if ((Size == 8 || Size == 16 || Size == 32) ||
+ (Size == 64 && VT->getNumElements() == 1))
+ return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
+ Size));
+ }
+
+ llvm::Type *IRType = CGT.ConvertType(Ty);
+ if (UseX86_MMXType(IRType)) {
+ if (IsMMXDisabled)
+ return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
+ 64));
+ ABIArgInfo AAI = ABIArgInfo::getDirect(IRType);
+ AAI.setCoerceToType(llvm::Type::getX86_MMXTy(getVMContext()));
+ return AAI;
+ }
+
+ return ABIArgInfo::getDirect();
+ }
+
+
+ if (const EnumType *EnumTy = Ty->getAs<EnumType>())
+ Ty = EnumTy->getDecl()->getIntegerType();
+
+ return (Ty->isPromotableIntegerType() ?
+ ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
+}
+
+llvm::Value *X86_32ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const {
+ llvm::Type *BPP = CGF.Int8PtrPtrTy;
+
+ CGBuilderTy &Builder = CGF.Builder;
+ llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP,
+ "ap");
+ llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
+
+ // Compute if the address needs to be aligned
+ unsigned Align = CGF.getContext().getTypeAlignInChars(Ty).getQuantity();
+ Align = getTypeStackAlignInBytes(Ty, Align);
+ Align = std::max(Align, 4U);
+ if (Align > 4) {
+ // addr = (addr + align - 1) & -align;
+ llvm::Value *Offset =
+ llvm::ConstantInt::get(CGF.Int32Ty, Align - 1);
+ Addr = CGF.Builder.CreateGEP(Addr, Offset);
+ llvm::Value *AsInt = CGF.Builder.CreatePtrToInt(Addr,
+ CGF.Int32Ty);
+ llvm::Value *Mask = llvm::ConstantInt::get(CGF.Int32Ty, -Align);
+ Addr = CGF.Builder.CreateIntToPtr(CGF.Builder.CreateAnd(AsInt, Mask),
+ Addr->getType(),
+ "ap.cur.aligned");
+ }
+
+ llvm::Type *PTy =
+ llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
+ llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
+
+ uint64_t Offset =
+ llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, Align);
+ llvm::Value *NextAddr =
+ Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset),
+ "ap.next");
+ Builder.CreateStore(NextAddr, VAListAddrAsBPP);
+
+ return AddrTyped;
+}
+
+void X86_32TargetCodeGenInfo::SetTargetAttributes(const Decl *D,
+ llvm::GlobalValue *GV,
+ CodeGen::CodeGenModule &CGM) const {
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
+ // Get the LLVM function.
+ llvm::Function *Fn = cast<llvm::Function>(GV);
+
+ // Now add the 'alignstack' attribute with a value of 16.
+ Fn->addFnAttr(llvm::Attribute::constructStackAlignmentFromInt(16));
+ }
+ }
+}
+
+bool X86_32TargetCodeGenInfo::initDwarfEHRegSizeTable(
+ CodeGen::CodeGenFunction &CGF,
+ llvm::Value *Address) const {
+ CodeGen::CGBuilderTy &Builder = CGF.Builder;
+
+ llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4);
+
+ // 0-7 are the eight integer registers; the order is different
+ // on Darwin (for EH), but the range is the same.
+ // 8 is %eip.
+ AssignToArrayRange(Builder, Address, Four8, 0, 8);
+
+ if (CGF.CGM.isTargetDarwin()) {
+ // 12-16 are st(0..4). Not sure why we stop at 4.
+ // These have size 16, which is sizeof(long double) on
+ // platforms with 8-byte alignment for that type.
+ llvm::Value *Sixteen8 = llvm::ConstantInt::get(CGF.Int8Ty, 16);
+ AssignToArrayRange(Builder, Address, Sixteen8, 12, 16);
+
+ } else {
+ // 9 is %eflags, which doesn't get a size on Darwin for some
+ // reason.
+ Builder.CreateStore(Four8, Builder.CreateConstInBoundsGEP1_32(Address, 9));
+
+ // 11-16 are st(0..5). Not sure why we stop at 5.
+ // These have size 12, which is sizeof(long double) on
+ // platforms with 4-byte alignment for that type.
+ llvm::Value *Twelve8 = llvm::ConstantInt::get(CGF.Int8Ty, 12);
+ AssignToArrayRange(Builder, Address, Twelve8, 11, 16);
+ }
+
+ return false;
+}
+
+//===----------------------------------------------------------------------===//
+// X86-64 ABI Implementation
+//===----------------------------------------------------------------------===//
+
+
+namespace {
+/// X86_64ABIInfo - The X86_64 ABI information.
+class X86_64ABIInfo : public ABIInfo {
+ enum Class {
+ Integer = 0,
+ SSE,
+ SSEUp,
+ X87,
+ X87Up,
+ ComplexX87,
+ NoClass,
+ Memory
+ };
+
+ /// merge - Implement the X86_64 ABI merging algorithm.
+ ///
+ /// Merge an accumulating classification \arg Accum with a field
+ /// classification \arg Field.
+ ///
+ /// \param Accum - The accumulating classification. This should
+ /// always be either NoClass or the result of a previous merge
+ /// call. In addition, this should never be Memory (the caller
+ /// should just return Memory for the aggregate).
+ static Class merge(Class Accum, Class Field);
+
+ /// postMerge - Implement the X86_64 ABI post merging algorithm.
+ ///
+ /// Post merger cleanup, reduces a malformed Hi and Lo pair to
+ /// final MEMORY or SSE classes when necessary.
+ ///
+ /// \param AggregateSize - The size of the current aggregate in
+ /// the classification process.
+ ///
+ /// \param Lo - The classification for the parts of the type
+ /// residing in the low word of the containing object.
+ ///
+ /// \param Hi - The classification for the parts of the type
+ /// residing in the higher words of the containing object.
+ ///
+ void postMerge(unsigned AggregateSize, Class &Lo, Class &Hi) const;
+
+ /// classify - Determine the x86_64 register classes in which the
+ /// given type T should be passed.
+ ///
+ /// \param Lo - The classification for the parts of the type
+ /// residing in the low word of the containing object.
+ ///
+ /// \param Hi - The classification for the parts of the type
+ /// residing in the high word of the containing object.
+ ///
+ /// \param OffsetBase - The bit offset of this type in the
+ /// containing object. Some parameters are classified different
+ /// depending on whether they straddle an eightbyte boundary.
+ ///
+ /// If a word is unused its result will be NoClass; if a type should
+ /// be passed in Memory then at least the classification of \arg Lo
+ /// will be Memory.
+ ///
+ /// The \arg Lo class will be NoClass iff the argument is ignored.
+ ///
+ /// If the \arg Lo class is ComplexX87, then the \arg Hi class will
+ /// also be ComplexX87.
+ void classify(QualType T, uint64_t OffsetBase, Class &Lo, Class &Hi) const;
+
+ llvm::Type *GetByteVectorType(QualType Ty) const;
+ llvm::Type *GetSSETypeAtOffset(llvm::Type *IRType,
+ unsigned IROffset, QualType SourceTy,
+ unsigned SourceOffset) const;
+ llvm::Type *GetINTEGERTypeAtOffset(llvm::Type *IRType,
+ unsigned IROffset, QualType SourceTy,
+ unsigned SourceOffset) const;
+
+ /// getIndirectResult - Give a source type \arg Ty, return a suitable result
+ /// such that the argument will be returned in memory.
+ ABIArgInfo getIndirectReturnResult(QualType Ty) const;
+
+ /// getIndirectResult - Give a source type \arg Ty, return a suitable result
+ /// such that the argument will be passed in memory.
+ ///
+ /// \param freeIntRegs - The number of free integer registers remaining
+ /// available.
+ ABIArgInfo getIndirectResult(QualType Ty, unsigned freeIntRegs) const;
+
+ ABIArgInfo classifyReturnType(QualType RetTy) const;
+
+ ABIArgInfo classifyArgumentType(QualType Ty,
+ unsigned freeIntRegs,
+ unsigned &neededInt,
+ unsigned &neededSSE) const;
+
+ bool IsIllegalVectorType(QualType Ty) const;
+
+ /// The 0.98 ABI revision clarified a lot of ambiguities,
+ /// unfortunately in ways that were not always consistent with
+ /// certain previous compilers. In particular, platforms which
+ /// required strict binary compatibility with older versions of GCC
+ /// may need to exempt themselves.
+ bool honorsRevision0_98() const {
+ return !getContext().getTargetInfo().getTriple().isOSDarwin();
+ }
+
+ bool HasAVX;
+
+public:
+ X86_64ABIInfo(CodeGen::CodeGenTypes &CGT, bool hasavx) :
+ ABIInfo(CGT), HasAVX(hasavx) {}
+
+ bool isPassedUsingAVXType(QualType type) const {
+ unsigned neededInt, neededSSE;
+ // The freeIntRegs argument doesn't matter here.
+ ABIArgInfo info = classifyArgumentType(type, 0, neededInt, neededSSE);
+ if (info.isDirect()) {
+ llvm::Type *ty = info.getCoerceToType();
+ if (llvm::VectorType *vectorTy = dyn_cast_or_null<llvm::VectorType>(ty))
+ return (vectorTy->getBitWidth() > 128);
+ }
+ return false;
+ }
+
+ virtual void computeInfo(CGFunctionInfo &FI) const;
+
+ virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const;
+};
+
+/// WinX86_64ABIInfo - The Windows X86_64 ABI information.
+class WinX86_64ABIInfo : public ABIInfo {
+
+ ABIArgInfo classify(QualType Ty) const;
+
+public:
+ WinX86_64ABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {}
+
+ virtual void computeInfo(CGFunctionInfo &FI) const;
+
+ virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const;
+};
+
+class X86_64TargetCodeGenInfo : public TargetCodeGenInfo {
+public:
+ X86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool HasAVX)
+ : TargetCodeGenInfo(new X86_64ABIInfo(CGT, HasAVX)) {}
+
+ const X86_64ABIInfo &getABIInfo() const {
+ return static_cast<const X86_64ABIInfo&>(TargetCodeGenInfo::getABIInfo());
+ }
+
+ int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const {
+ return 7;
+ }
+
+ bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *Address) const {
+ llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8);
+
+ // 0-15 are the 16 integer registers.
+ // 16 is %rip.
+ AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 16);
+ return false;
+ }
+
+ llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
+ StringRef Constraint,
+ llvm::Type* Ty) const {
+ return X86AdjustInlineAsmType(CGF, Constraint, Ty);
+ }
+
+ bool isNoProtoCallVariadic(const CallArgList &args,
+ const FunctionNoProtoType *fnType) const {
+ // The default CC on x86-64 sets %al to the number of SSA
+ // registers used, and GCC sets this when calling an unprototyped
+ // function, so we override the default behavior. However, don't do
+ // that when AVX types are involved: the ABI explicitly states it is
+ // undefined, and it doesn't work in practice because of how the ABI
+ // defines varargs anyway.
+ if (fnType->getCallConv() == CC_Default || fnType->getCallConv() == CC_C) {
+ bool HasAVXType = false;
+ for (CallArgList::const_iterator
+ it = args.begin(), ie = args.end(); it != ie; ++it) {
+ if (getABIInfo().isPassedUsingAVXType(it->Ty)) {
+ HasAVXType = true;
+ break;
+ }
+ }
+
+ if (!HasAVXType)
+ return true;
+ }
+
+ return TargetCodeGenInfo::isNoProtoCallVariadic(args, fnType);
+ }
+
+};
+
+class WinX86_64TargetCodeGenInfo : public TargetCodeGenInfo {
+public:
+ WinX86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
+ : TargetCodeGenInfo(new WinX86_64ABIInfo(CGT)) {}
+
+ int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const {
+ return 7;
+ }
+
+ bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *Address) const {
+ llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8);
+
+ // 0-15 are the 16 integer registers.
+ // 16 is %rip.
+ AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 16);
+ return false;
+ }
+};
+
+}
+
+void X86_64ABIInfo::postMerge(unsigned AggregateSize, Class &Lo,
+ Class &Hi) const {
+ // AMD64-ABI 3.2.3p2: Rule 5. Then a post merger cleanup is done:
+ //
+ // (a) If one of the classes is Memory, the whole argument is passed in
+ // memory.
+ //
+ // (b) If X87UP is not preceded by X87, the whole argument is passed in
+ // memory.
+ //
+ // (c) If the size of the aggregate exceeds two eightbytes and the first
+ // eightbyte isn't SSE or any other eightbyte isn't SSEUP, the whole
+ // argument is passed in memory. NOTE: This is necessary to keep the
+ // ABI working for processors that don't support the __m256 type.
+ //
+ // (d) If SSEUP is not preceded by SSE or SSEUP, it is converted to SSE.
+ //
+ // Some of these are enforced by the merging logic. Others can arise
+ // only with unions; for example:
+ // union { _Complex double; unsigned; }
+ //
+ // Note that clauses (b) and (c) were added in 0.98.
+ //
+ if (Hi == Memory)
+ Lo = Memory;
+ if (Hi == X87Up && Lo != X87 && honorsRevision0_98())
+ Lo = Memory;
+ if (AggregateSize > 128 && (Lo != SSE || Hi != SSEUp))
+ Lo = Memory;
+ if (Hi == SSEUp && Lo != SSE)
+ Hi = SSE;
+}
+
+X86_64ABIInfo::Class X86_64ABIInfo::merge(Class Accum, Class Field) {
+ // AMD64-ABI 3.2.3p2: Rule 4. Each field of an object is
+ // classified recursively so that always two fields are
+ // considered. The resulting class is calculated according to
+ // the classes of the fields in the eightbyte:
+ //
+ // (a) If both classes are equal, this is the resulting class.
+ //
+ // (b) If one of the classes is NO_CLASS, the resulting class is
+ // the other class.
+ //
+ // (c) If one of the classes is MEMORY, the result is the MEMORY
+ // class.
+ //
+ // (d) If one of the classes is INTEGER, the result is the
+ // INTEGER.
+ //
+ // (e) If one of the classes is X87, X87UP, COMPLEX_X87 class,
+ // MEMORY is used as class.
+ //
+ // (f) Otherwise class SSE is used.
+
+ // Accum should never be memory (we should have returned) or
+ // ComplexX87 (because this cannot be passed in a structure).
+ assert((Accum != Memory && Accum != ComplexX87) &&
+ "Invalid accumulated classification during merge.");
+ if (Accum == Field || Field == NoClass)
+ return Accum;
+ if (Field == Memory)
+ return Memory;
+ if (Accum == NoClass)
+ return Field;
+ if (Accum == Integer || Field == Integer)
+ return Integer;
+ if (Field == X87 || Field == X87Up || Field == ComplexX87 ||
+ Accum == X87 || Accum == X87Up)
+ return Memory;
+ return SSE;
+}
+
+void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase,
+ Class &Lo, Class &Hi) const {
+ // FIXME: This code can be simplified by introducing a simple value class for
+ // Class pairs with appropriate constructor methods for the various
+ // situations.
+
+ // FIXME: Some of the split computations are wrong; unaligned vectors
+ // shouldn't be passed in registers for example, so there is no chance they
+ // can straddle an eightbyte. Verify & simplify.
+
+ Lo = Hi = NoClass;
+
+ Class &Current = OffsetBase < 64 ? Lo : Hi;
+ Current = Memory;
+
+ if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
+ BuiltinType::Kind k = BT->getKind();
+
+ if (k == BuiltinType::Void) {
+ Current = NoClass;
+ } else if (k == BuiltinType::Int128 || k == BuiltinType::UInt128) {
+ Lo = Integer;
+ Hi = Integer;
+ } else if (k >= BuiltinType::Bool && k <= BuiltinType::LongLong) {
+ Current = Integer;
+ } else if (k == BuiltinType::Float || k == BuiltinType::Double) {
+ Current = SSE;
+ } else if (k == BuiltinType::LongDouble) {
+ Lo = X87;
+ Hi = X87Up;
+ }
+ // FIXME: _Decimal32 and _Decimal64 are SSE.
+ // FIXME: _float128 and _Decimal128 are (SSE, SSEUp).
+ return;
+ }
+
+ if (const EnumType *ET = Ty->getAs<EnumType>()) {
+ // Classify the underlying integer type.
+ classify(ET->getDecl()->getIntegerType(), OffsetBase, Lo, Hi);
+ return;
+ }
+
+ if (Ty->hasPointerRepresentation()) {
+ Current = Integer;
+ return;
+ }
+
+ if (Ty->isMemberPointerType()) {
+ if (Ty->isMemberFunctionPointerType())
+ Lo = Hi = Integer;
+ else
+ Current = Integer;
+ return;
+ }
+
+ if (const VectorType *VT = Ty->getAs<VectorType>()) {
+ uint64_t Size = getContext().getTypeSize(VT);
+ if (Size == 32) {
+ // gcc passes all <4 x char>, <2 x short>, <1 x int>, <1 x
+ // float> as integer.
+ Current = Integer;
+
+ // If this type crosses an eightbyte boundary, it should be
+ // split.
+ uint64_t EB_Real = (OffsetBase) / 64;
+ uint64_t EB_Imag = (OffsetBase + Size - 1) / 64;
+ if (EB_Real != EB_Imag)
+ Hi = Lo;
+ } else if (Size == 64) {
+ // gcc passes <1 x double> in memory. :(
+ if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::Double))
+ return;
+
+ // gcc passes <1 x long long> as INTEGER.
+ if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::LongLong) ||
+ VT->getElementType()->isSpecificBuiltinType(BuiltinType::ULongLong) ||
+ VT->getElementType()->isSpecificBuiltinType(BuiltinType::Long) ||
+ VT->getElementType()->isSpecificBuiltinType(BuiltinType::ULong))
+ Current = Integer;
+ else
+ Current = SSE;
+
+ // If this type crosses an eightbyte boundary, it should be
+ // split.
+ if (OffsetBase && OffsetBase != 64)
+ Hi = Lo;
+ } else if (Size == 128 || (HasAVX && Size == 256)) {
+ // Arguments of 256-bits are split into four eightbyte chunks. The
+ // least significant one belongs to class SSE and all the others to class
+ // SSEUP. The original Lo and Hi design considers that types can't be
+ // greater than 128-bits, so a 64-bit split in Hi and Lo makes sense.
+ // This design isn't correct for 256-bits, but since there're no cases
+ // where the upper parts would need to be inspected, avoid adding
+ // complexity and just consider Hi to match the 64-256 part.
+ Lo = SSE;
+ Hi = SSEUp;
+ }
+ return;
+ }
+
+ if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
+ QualType ET = getContext().getCanonicalType(CT->getElementType());
+
+ uint64_t Size = getContext().getTypeSize(Ty);
+ if (ET->isIntegralOrEnumerationType()) {
+ if (Size <= 64)
+ Current = Integer;
+ else if (Size <= 128)
+ Lo = Hi = Integer;
+ } else if (ET == getContext().FloatTy)
+ Current = SSE;
+ else if (ET == getContext().DoubleTy)
+ Lo = Hi = SSE;
+ else if (ET == getContext().LongDoubleTy)
+ Current = ComplexX87;
+
+ // If this complex type crosses an eightbyte boundary then it
+ // should be split.
+ uint64_t EB_Real = (OffsetBase) / 64;
+ uint64_t EB_Imag = (OffsetBase + getContext().getTypeSize(ET)) / 64;
+ if (Hi == NoClass && EB_Real != EB_Imag)
+ Hi = Lo;
+
+ return;
+ }
+
+ if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
+ // Arrays are treated like structures.
+
+ uint64_t Size = getContext().getTypeSize(Ty);
+
+ // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
+ // than four eightbytes, ..., it has class MEMORY.
+ if (Size > 256)
+ return;
+
+ // AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned
+ // fields, it has class MEMORY.
+ //
+ // Only need to check alignment of array base.
+ if (OffsetBase % getContext().getTypeAlign(AT->getElementType()))
+ return;
+
+ // Otherwise implement simplified merge. We could be smarter about
+ // this, but it isn't worth it and would be harder to verify.
+ Current = NoClass;
+ uint64_t EltSize = getContext().getTypeSize(AT->getElementType());
+ uint64_t ArraySize = AT->getSize().getZExtValue();
+
+ // The only case a 256-bit wide vector could be used is when the array
+ // contains a single 256-bit element. Since Lo and Hi logic isn't extended
+ // to work for sizes wider than 128, early check and fallback to memory.
+ if (Size > 128 && EltSize != 256)
+ return;
+
+ for (uint64_t i=0, Offset=OffsetBase; i<ArraySize; ++i, Offset += EltSize) {
+ Class FieldLo, FieldHi;
+ classify(AT->getElementType(), Offset, FieldLo, FieldHi);
+ Lo = merge(Lo, FieldLo);
+ Hi = merge(Hi, FieldHi);
+ if (Lo == Memory || Hi == Memory)
+ break;
+ }
+
+ postMerge(Size, Lo, Hi);
+ assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp array classification.");
+ return;
+ }
+
+ if (const RecordType *RT = Ty->getAs<RecordType>()) {
+ uint64_t Size = getContext().getTypeSize(Ty);
+
+ // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
+ // than four eightbytes, ..., it has class MEMORY.
+ if (Size > 256)
+ return;
+
+ // AMD64-ABI 3.2.3p2: Rule 2. If a C++ object has either a non-trivial
+ // copy constructor or a non-trivial destructor, it is passed by invisible
+ // reference.
+ if (hasNonTrivialDestructorOrCopyConstructor(RT))
+ return;
+
+ const RecordDecl *RD = RT->getDecl();
+
+ // Assume variable sized types are passed in memory.
+ if (RD->hasFlexibleArrayMember())
+ return;
+
+ const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
+
+ // Reset Lo class, this will be recomputed.
+ Current = NoClass;
+
+ // If this is a C++ record, classify the bases first.
+ if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
+ for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(),
+ e = CXXRD->bases_end(); i != e; ++i) {
+ assert(!i->isVirtual() && !i->getType()->isDependentType() &&
+ "Unexpected base class!");
+ const CXXRecordDecl *Base =
+ cast<CXXRecordDecl>(i->getType()->getAs<RecordType>()->getDecl());
+
+ // Classify this field.
+ //
+ // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate exceeds a
+ // single eightbyte, each is classified separately. Each eightbyte gets
+ // initialized to class NO_CLASS.
+ Class FieldLo, FieldHi;
+ uint64_t Offset = OffsetBase + Layout.getBaseClassOffsetInBits(Base);
+ classify(i->getType(), Offset, FieldLo, FieldHi);
+ Lo = merge(Lo, FieldLo);
+ Hi = merge(Hi, FieldHi);
+ if (Lo == Memory || Hi == Memory)
+ break;
+ }
+ }
+
+ // Classify the fields one at a time, merging the results.
+ unsigned idx = 0;
+ for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
+ i != e; ++i, ++idx) {
+ uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx);
+ bool BitField = i->isBitField();
+
+ // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger than
+ // four eightbytes, or it contains unaligned fields, it has class MEMORY.
+ //
+ // The only case a 256-bit wide vector could be used is when the struct
+ // contains a single 256-bit element. Since Lo and Hi logic isn't extended
+ // to work for sizes wider than 128, early check and fallback to memory.
+ //
+ if (Size > 128 && getContext().getTypeSize(i->getType()) != 256) {
+ Lo = Memory;
+ return;
+ }
+ // Note, skip this test for bit-fields, see below.
+ if (!BitField && Offset % getContext().getTypeAlign(i->getType())) {
+ Lo = Memory;
+ return;
+ }
+
+ // Classify this field.
+ //
+ // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate
+ // exceeds a single eightbyte, each is classified
+ // separately. Each eightbyte gets initialized to class
+ // NO_CLASS.
+ Class FieldLo, FieldHi;
+
+ // Bit-fields require special handling, they do not force the
+ // structure to be passed in memory even if unaligned, and
+ // therefore they can straddle an eightbyte.
+ if (BitField) {
+ // Ignore padding bit-fields.
+ if (i->isUnnamedBitfield())
+ continue;
+
+ uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx);
+ uint64_t Size = i->getBitWidthValue(getContext());
+
+ uint64_t EB_Lo = Offset / 64;
+ uint64_t EB_Hi = (Offset + Size - 1) / 64;
+ FieldLo = FieldHi = NoClass;
+ if (EB_Lo) {
+ assert(EB_Hi == EB_Lo && "Invalid classification, type > 16 bytes.");
+ FieldLo = NoClass;
+ FieldHi = Integer;
+ } else {
+ FieldLo = Integer;
+ FieldHi = EB_Hi ? Integer : NoClass;
+ }
+ } else
+ classify(i->getType(), Offset, FieldLo, FieldHi);
+ Lo = merge(Lo, FieldLo);
+ Hi = merge(Hi, FieldHi);
+ if (Lo == Memory || Hi == Memory)
+ break;
+ }
+
+ postMerge(Size, Lo, Hi);
+ }
+}
+
+ABIArgInfo X86_64ABIInfo::getIndirectReturnResult(QualType Ty) const {
+ // If this is a scalar LLVM value then assume LLVM will pass it in the right
+ // place naturally.
+ if (!isAggregateTypeForABI(Ty)) {
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = Ty->getAs<EnumType>())
+ Ty = EnumTy->getDecl()->getIntegerType();
+
+ return (Ty->isPromotableIntegerType() ?
+ ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
+ }
+
+ return ABIArgInfo::getIndirect(0);
+}
+
+bool X86_64ABIInfo::IsIllegalVectorType(QualType Ty) const {
+ if (const VectorType *VecTy = Ty->getAs<VectorType>()) {
+ uint64_t Size = getContext().getTypeSize(VecTy);
+ unsigned LargestVector = HasAVX ? 256 : 128;
+ if (Size <= 64 || Size > LargestVector)
+ return true;
+ }
+
+ return false;
+}
+
+ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty,
+ unsigned freeIntRegs) const {
+ // If this is a scalar LLVM value then assume LLVM will pass it in the right
+ // place naturally.
+ //
+ // This assumption is optimistic, as there could be free registers available
+ // when we need to pass this argument in memory, and LLVM could try to pass
+ // the argument in the free register. This does not seem to happen currently,
+ // but this code would be much safer if we could mark the argument with
+ // 'onstack'. See PR12193.
+ if (!isAggregateTypeForABI(Ty) && !IsIllegalVectorType(Ty)) {
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = Ty->getAs<EnumType>())
+ Ty = EnumTy->getDecl()->getIntegerType();
+
+ return (Ty->isPromotableIntegerType() ?
+ ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
+ }
+
+ if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty))
+ return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
+
+ // Compute the byval alignment. We specify the alignment of the byval in all
+ // cases so that the mid-level optimizer knows the alignment of the byval.
+ unsigned Align = std::max(getContext().getTypeAlign(Ty) / 8, 8U);
+
+ // Attempt to avoid passing indirect results using byval when possible. This
+ // is important for good codegen.
+ //
+ // We do this by coercing the value into a scalar type which the backend can
+ // handle naturally (i.e., without using byval).
+ //
+ // For simplicity, we currently only do this when we have exhausted all of the
+ // free integer registers. Doing this when there are free integer registers
+ // would require more care, as we would have to ensure that the coerced value
+ // did not claim the unused register. That would require either reording the
+ // arguments to the function (so that any subsequent inreg values came first),
+ // or only doing this optimization when there were no following arguments that
+ // might be inreg.
+ //
+ // We currently expect it to be rare (particularly in well written code) for
+ // arguments to be passed on the stack when there are still free integer
+ // registers available (this would typically imply large structs being passed
+ // by value), so this seems like a fair tradeoff for now.
+ //
+ // We can revisit this if the backend grows support for 'onstack' parameter
+ // attributes. See PR12193.
+ if (freeIntRegs == 0) {
+ uint64_t Size = getContext().getTypeSize(Ty);
+
+ // If this type fits in an eightbyte, coerce it into the matching integral
+ // type, which will end up on the stack (with alignment 8).
+ if (Align == 8 && Size <= 64)
+ return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
+ Size));
+ }
+
+ return ABIArgInfo::getIndirect(Align);
+}
+
+/// GetByteVectorType - The ABI specifies that a value should be passed in an
+/// full vector XMM/YMM register. Pick an LLVM IR type that will be passed as a
+/// vector register.
+llvm::Type *X86_64ABIInfo::GetByteVectorType(QualType Ty) const {
+ llvm::Type *IRType = CGT.ConvertType(Ty);
+
+ // Wrapper structs that just contain vectors are passed just like vectors,
+ // strip them off if present.
+ llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType);
+ while (STy && STy->getNumElements() == 1) {
+ IRType = STy->getElementType(0);
+ STy = dyn_cast<llvm::StructType>(IRType);
+ }
+
+ // If the preferred type is a 16-byte vector, prefer to pass it.
+ if (llvm::VectorType *VT = dyn_cast<llvm::VectorType>(IRType)){
+ llvm::Type *EltTy = VT->getElementType();
+ unsigned BitWidth = VT->getBitWidth();
+ if ((BitWidth >= 128 && BitWidth <= 256) &&
+ (EltTy->isFloatTy() || EltTy->isDoubleTy() ||
+ EltTy->isIntegerTy(8) || EltTy->isIntegerTy(16) ||
+ EltTy->isIntegerTy(32) || EltTy->isIntegerTy(64) ||
+ EltTy->isIntegerTy(128)))
+ return VT;
+ }
+
+ return llvm::VectorType::get(llvm::Type::getDoubleTy(getVMContext()), 2);
+}
+
+/// BitsContainNoUserData - Return true if the specified [start,end) bit range
+/// is known to either be off the end of the specified type or being in
+/// alignment padding. The user type specified is known to be at most 128 bits
+/// in size, and have passed through X86_64ABIInfo::classify with a successful
+/// classification that put one of the two halves in the INTEGER class.
+///
+/// It is conservatively correct to return false.
+static bool BitsContainNoUserData(QualType Ty, unsigned StartBit,
+ unsigned EndBit, ASTContext &Context) {
+ // If the bytes being queried are off the end of the type, there is no user
+ // data hiding here. This handles analysis of builtins, vectors and other
+ // types that don't contain interesting padding.
+ unsigned TySize = (unsigned)Context.getTypeSize(Ty);
+ if (TySize <= StartBit)
+ return true;
+
+ if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) {
+ unsigned EltSize = (unsigned)Context.getTypeSize(AT->getElementType());
+ unsigned NumElts = (unsigned)AT->getSize().getZExtValue();
+
+ // Check each element to see if the element overlaps with the queried range.
+ for (unsigned i = 0; i != NumElts; ++i) {
+ // If the element is after the span we care about, then we're done..
+ unsigned EltOffset = i*EltSize;
+ if (EltOffset >= EndBit) break;
+
+ unsigned EltStart = EltOffset < StartBit ? StartBit-EltOffset :0;
+ if (!BitsContainNoUserData(AT->getElementType(), EltStart,
+ EndBit-EltOffset, Context))
+ return false;
+ }
+ // If it overlaps no elements, then it is safe to process as padding.
+ return true;
+ }
+
+ if (const RecordType *RT = Ty->getAs<RecordType>()) {
+ const RecordDecl *RD = RT->getDecl();
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
+
+ // If this is a C++ record, check the bases first.
+ if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
+ for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(),
+ e = CXXRD->bases_end(); i != e; ++i) {
+ assert(!i->isVirtual() && !i->getType()->isDependentType() &&
+ "Unexpected base class!");
+ const CXXRecordDecl *Base =
+ cast<CXXRecordDecl>(i->getType()->getAs<RecordType>()->getDecl());
+
+ // If the base is after the span we care about, ignore it.
+ unsigned BaseOffset = (unsigned)Layout.getBaseClassOffsetInBits(Base);
+ if (BaseOffset >= EndBit) continue;
+
+ unsigned BaseStart = BaseOffset < StartBit ? StartBit-BaseOffset :0;
+ if (!BitsContainNoUserData(i->getType(), BaseStart,
+ EndBit-BaseOffset, Context))
+ return false;
+ }
+ }
+
+ // Verify that no field has data that overlaps the region of interest. Yes
+ // this could be sped up a lot by being smarter about queried fields,
+ // however we're only looking at structs up to 16 bytes, so we don't care
+ // much.
+ unsigned idx = 0;
+ for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
+ i != e; ++i, ++idx) {
+ unsigned FieldOffset = (unsigned)Layout.getFieldOffset(idx);
+
+ // If we found a field after the region we care about, then we're done.
+ if (FieldOffset >= EndBit) break;
+
+ unsigned FieldStart = FieldOffset < StartBit ? StartBit-FieldOffset :0;
+ if (!BitsContainNoUserData(i->getType(), FieldStart, EndBit-FieldOffset,
+ Context))
+ return false;
+ }
+
+ // If nothing in this record overlapped the area of interest, then we're
+ // clean.
+ return true;
+ }
+
+ return false;
+}
+
+/// ContainsFloatAtOffset - Return true if the specified LLVM IR type has a
+/// float member at the specified offset. For example, {int,{float}} has a
+/// float at offset 4. It is conservatively correct for this routine to return
+/// false.
+static bool ContainsFloatAtOffset(llvm::Type *IRType, unsigned IROffset,
+ const llvm::TargetData &TD) {
+ // Base case if we find a float.
+ if (IROffset == 0 && IRType->isFloatTy())
+ return true;
+
+ // If this is a struct, recurse into the field at the specified offset.
+ if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {
+ const llvm::StructLayout *SL = TD.getStructLayout(STy);
+ unsigned Elt = SL->getElementContainingOffset(IROffset);
+ IROffset -= SL->getElementOffset(Elt);
+ return ContainsFloatAtOffset(STy->getElementType(Elt), IROffset, TD);
+ }
+
+ // If this is an array, recurse into the field at the specified offset.
+ if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
+ llvm::Type *EltTy = ATy->getElementType();
+ unsigned EltSize = TD.getTypeAllocSize(EltTy);
+ IROffset -= IROffset/EltSize*EltSize;
+ return ContainsFloatAtOffset(EltTy, IROffset, TD);
+ }
+
+ return false;
+}
+
+
+/// GetSSETypeAtOffset - Return a type that will be passed by the backend in the
+/// low 8 bytes of an XMM register, corresponding to the SSE class.
+llvm::Type *X86_64ABIInfo::
+GetSSETypeAtOffset(llvm::Type *IRType, unsigned IROffset,
+ QualType SourceTy, unsigned SourceOffset) const {
+ // The only three choices we have are either double, <2 x float>, or float. We
+ // pass as float if the last 4 bytes is just padding. This happens for
+ // structs that contain 3 floats.
+ if (BitsContainNoUserData(SourceTy, SourceOffset*8+32,
+ SourceOffset*8+64, getContext()))
+ return llvm::Type::getFloatTy(getVMContext());
+
+ // We want to pass as <2 x float> if the LLVM IR type contains a float at
+ // offset+0 and offset+4. Walk the LLVM IR type to find out if this is the
+ // case.
+ if (ContainsFloatAtOffset(IRType, IROffset, getTargetData()) &&
+ ContainsFloatAtOffset(IRType, IROffset+4, getTargetData()))
+ return llvm::VectorType::get(llvm::Type::getFloatTy(getVMContext()), 2);
+
+ return llvm::Type::getDoubleTy(getVMContext());
+}
+
+
+/// GetINTEGERTypeAtOffset - The ABI specifies that a value should be passed in
+/// an 8-byte GPR. This means that we either have a scalar or we are talking
+/// about the high or low part of an up-to-16-byte struct. This routine picks
+/// the best LLVM IR type to represent this, which may be i64 or may be anything
+/// else that the backend will pass in a GPR that works better (e.g. i8, %foo*,
+/// etc).
+///
+/// PrefType is an LLVM IR type that corresponds to (part of) the IR type for
+/// the source type. IROffset is an offset in bytes into the LLVM IR type that
+/// the 8-byte value references. PrefType may be null.
+///
+/// SourceTy is the source level type for the entire argument. SourceOffset is
+/// an offset into this that we're processing (which is always either 0 or 8).
+///
+llvm::Type *X86_64ABIInfo::
+GetINTEGERTypeAtOffset(llvm::Type *IRType, unsigned IROffset,
+ QualType SourceTy, unsigned SourceOffset) const {
+ // If we're dealing with an un-offset LLVM IR type, then it means that we're
+ // returning an 8-byte unit starting with it. See if we can safely use it.
+ if (IROffset == 0) {
+ // Pointers and int64's always fill the 8-byte unit.
+ if (isa<llvm::PointerType>(IRType) || IRType->isIntegerTy(64))
+ return IRType;
+
+ // If we have a 1/2/4-byte integer, we can use it only if the rest of the
+ // goodness in the source type is just tail padding. This is allowed to
+ // kick in for struct {double,int} on the int, but not on
+ // struct{double,int,int} because we wouldn't return the second int. We
+ // have to do this analysis on the source type because we can't depend on
+ // unions being lowered a specific way etc.
+ if (IRType->isIntegerTy(8) || IRType->isIntegerTy(16) ||
+ IRType->isIntegerTy(32)) {
+ unsigned BitWidth = cast<llvm::IntegerType>(IRType)->getBitWidth();
+
+ if (BitsContainNoUserData(SourceTy, SourceOffset*8+BitWidth,
+ SourceOffset*8+64, getContext()))
+ return IRType;
+ }
+ }
+
+ if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {
+ // If this is a struct, recurse into the field at the specified offset.
+ const llvm::StructLayout *SL = getTargetData().getStructLayout(STy);
+ if (IROffset < SL->getSizeInBytes()) {
+ unsigned FieldIdx = SL->getElementContainingOffset(IROffset);
+ IROffset -= SL->getElementOffset(FieldIdx);
+
+ return GetINTEGERTypeAtOffset(STy->getElementType(FieldIdx), IROffset,
+ SourceTy, SourceOffset);
+ }
+ }
+
+ if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
+ llvm::Type *EltTy = ATy->getElementType();
+ unsigned EltSize = getTargetData().getTypeAllocSize(EltTy);
+ unsigned EltOffset = IROffset/EltSize*EltSize;
+ return GetINTEGERTypeAtOffset(EltTy, IROffset-EltOffset, SourceTy,
+ SourceOffset);
+ }
+
+ // Okay, we don't have any better idea of what to pass, so we pass this in an
+ // integer register that isn't too big to fit the rest of the struct.
+ unsigned TySizeInBytes =
+ (unsigned)getContext().getTypeSizeInChars(SourceTy).getQuantity();
+
+ assert(TySizeInBytes != SourceOffset && "Empty field?");
+
+ // It is always safe to classify this as an integer type up to i64 that
+ // isn't larger than the structure.
+ return llvm::IntegerType::get(getVMContext(),
+ std::min(TySizeInBytes-SourceOffset, 8U)*8);
+}
+
+
+/// GetX86_64ByValArgumentPair - Given a high and low type that can ideally
+/// be used as elements of a two register pair to pass or return, return a
+/// first class aggregate to represent them. For example, if the low part of
+/// a by-value argument should be passed as i32* and the high part as float,
+/// return {i32*, float}.
+static llvm::Type *
+GetX86_64ByValArgumentPair(llvm::Type *Lo, llvm::Type *Hi,
+ const llvm::TargetData &TD) {
+ // In order to correctly satisfy the ABI, we need to the high part to start
+ // at offset 8. If the high and low parts we inferred are both 4-byte types
+ // (e.g. i32 and i32) then the resultant struct type ({i32,i32}) won't have
+ // the second element at offset 8. Check for this:
+ unsigned LoSize = (unsigned)TD.getTypeAllocSize(Lo);
+ unsigned HiAlign = TD.getABITypeAlignment(Hi);
+ unsigned HiStart = llvm::TargetData::RoundUpAlignment(LoSize, HiAlign);
+ assert(HiStart != 0 && HiStart <= 8 && "Invalid x86-64 argument pair!");
+
+ // To handle this, we have to increase the size of the low part so that the
+ // second element will start at an 8 byte offset. We can't increase the size
+ // of the second element because it might make us access off the end of the
+ // struct.
+ if (HiStart != 8) {
+ // There are only two sorts of types the ABI generation code can produce for
+ // the low part of a pair that aren't 8 bytes in size: float or i8/i16/i32.
+ // Promote these to a larger type.
+ if (Lo->isFloatTy())
+ Lo = llvm::Type::getDoubleTy(Lo->getContext());
+ else {
+ assert(Lo->isIntegerTy() && "Invalid/unknown lo type");
+ Lo = llvm::Type::getInt64Ty(Lo->getContext());
+ }
+ }
+
+ llvm::StructType *Result = llvm::StructType::get(Lo, Hi, NULL);
+
+
+ // Verify that the second element is at an 8-byte offset.
+ assert(TD.getStructLayout(Result)->getElementOffset(1) == 8 &&
+ "Invalid x86-64 argument pair!");
+ return Result;
+}
+
+ABIArgInfo X86_64ABIInfo::
+classifyReturnType(QualType RetTy) const {
+ // AMD64-ABI 3.2.3p4: Rule 1. Classify the return type with the
+ // classification algorithm.
+ X86_64ABIInfo::Class Lo, Hi;
+ classify(RetTy, 0, Lo, Hi);
+
+ // Check some invariants.
+ assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.");
+ assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.");
+
+ llvm::Type *ResType = 0;
+ switch (Lo) {
+ case NoClass:
+ if (Hi == NoClass)
+ return ABIArgInfo::getIgnore();
+ // If the low part is just padding, it takes no register, leave ResType
+ // null.
+ assert((Hi == SSE || Hi == Integer || Hi == X87Up) &&
+ "Unknown missing lo part");
+ break;
+
+ case SSEUp:
+ case X87Up:
+ llvm_unreachable("Invalid classification for lo word.");
+
+ // AMD64-ABI 3.2.3p4: Rule 2. Types of class memory are returned via
+ // hidden argument.
+ case Memory:
+ return getIndirectReturnResult(RetTy);
+
+ // AMD64-ABI 3.2.3p4: Rule 3. If the class is INTEGER, the next
+ // available register of the sequence %rax, %rdx is used.
+ case Integer:
+ ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0);
+
+ // If we have a sign or zero extended integer, make sure to return Extend
+ // so that the parameter gets the right LLVM IR attributes.
+ if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) {
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
+ RetTy = EnumTy->getDecl()->getIntegerType();
+
+ if (RetTy->isIntegralOrEnumerationType() &&
+ RetTy->isPromotableIntegerType())
+ return ABIArgInfo::getExtend();
+ }
+ break;
+
+ // AMD64-ABI 3.2.3p4: Rule 4. If the class is SSE, the next
+ // available SSE register of the sequence %xmm0, %xmm1 is used.
+ case SSE:
+ ResType = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0);
+ break;
+
+ // AMD64-ABI 3.2.3p4: Rule 6. If the class is X87, the value is
+ // returned on the X87 stack in %st0 as 80-bit x87 number.
+ case X87:
+ ResType = llvm::Type::getX86_FP80Ty(getVMContext());
+ break;
+
+ // AMD64-ABI 3.2.3p4: Rule 8. If the class is COMPLEX_X87, the real
+ // part of the value is returned in %st0 and the imaginary part in
+ // %st1.
+ case ComplexX87:
+ assert(Hi == ComplexX87 && "Unexpected ComplexX87 classification.");
+ ResType = llvm::StructType::get(llvm::Type::getX86_FP80Ty(getVMContext()),
+ llvm::Type::getX86_FP80Ty(getVMContext()),
+ NULL);
+ break;
+ }
+
+ llvm::Type *HighPart = 0;
+ switch (Hi) {
+ // Memory was handled previously and X87 should
+ // never occur as a hi class.
+ case Memory:
+ case X87:
+ llvm_unreachable("Invalid classification for hi word.");
+
+ case ComplexX87: // Previously handled.
+ case NoClass:
+ break;
+
+ case Integer:
+ HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
+ if (Lo == NoClass) // Return HighPart at offset 8 in memory.
+ return ABIArgInfo::getDirect(HighPart, 8);
+ break;
+ case SSE:
+ HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
+ if (Lo == NoClass) // Return HighPart at offset 8 in memory.
+ return ABIArgInfo::getDirect(HighPart, 8);
+ break;
+
+ // AMD64-ABI 3.2.3p4: Rule 5. If the class is SSEUP, the eightbyte
+ // is passed in the next available eightbyte chunk if the last used
+ // vector register.
+ //
+ // SSEUP should always be preceded by SSE, just widen.
+ case SSEUp:
+ assert(Lo == SSE && "Unexpected SSEUp classification.");
+ ResType = GetByteVectorType(RetTy);
+ break;
+
+ // AMD64-ABI 3.2.3p4: Rule 7. If the class is X87UP, the value is
+ // returned together with the previous X87 value in %st0.
+ case X87Up:
+ // If X87Up is preceded by X87, we don't need to do
+ // anything. However, in some cases with unions it may not be
+ // preceded by X87. In such situations we follow gcc and pass the
+ // extra bits in an SSE reg.
+ if (Lo != X87) {
+ HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
+ if (Lo == NoClass) // Return HighPart at offset 8 in memory.
+ return ABIArgInfo::getDirect(HighPart, 8);
+ }
+ break;
+ }
+
+ // If a high part was specified, merge it together with the low part. It is
+ // known to pass in the high eightbyte of the result. We do this by forming a
+ // first class struct aggregate with the high and low part: {low, high}
+ if (HighPart)
+ ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getTargetData());
+
+ return ABIArgInfo::getDirect(ResType);
+}
+
+ABIArgInfo X86_64ABIInfo::classifyArgumentType(
+ QualType Ty, unsigned freeIntRegs, unsigned &neededInt, unsigned &neededSSE)
+ const
+{
+ X86_64ABIInfo::Class Lo, Hi;
+ classify(Ty, 0, Lo, Hi);
+
+ // Check some invariants.
+ // FIXME: Enforce these by construction.
+ assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.");
+ assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.");
+
+ neededInt = 0;
+ neededSSE = 0;
+ llvm::Type *ResType = 0;
+ switch (Lo) {
+ case NoClass:
+ if (Hi == NoClass)
+ return ABIArgInfo::getIgnore();
+ // If the low part is just padding, it takes no register, leave ResType
+ // null.
+ assert((Hi == SSE || Hi == Integer || Hi == X87Up) &&
+ "Unknown missing lo part");
+ break;
+
+ // AMD64-ABI 3.2.3p3: Rule 1. If the class is MEMORY, pass the argument
+ // on the stack.
+ case Memory:
+
+ // AMD64-ABI 3.2.3p3: Rule 5. If the class is X87, X87UP or
+ // COMPLEX_X87, it is passed in memory.
+ case X87:
+ case ComplexX87:
+ if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty))
+ ++neededInt;
+ return getIndirectResult(Ty, freeIntRegs);
+
+ case SSEUp:
+ case X87Up:
+ llvm_unreachable("Invalid classification for lo word.");
+
+ // AMD64-ABI 3.2.3p3: Rule 2. If the class is INTEGER, the next
+ // available register of the sequence %rdi, %rsi, %rdx, %rcx, %r8
+ // and %r9 is used.
+ case Integer:
+ ++neededInt;
+
+ // Pick an 8-byte type based on the preferred type.
+ ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 0, Ty, 0);
+
+ // If we have a sign or zero extended integer, make sure to return Extend
+ // so that the parameter gets the right LLVM IR attributes.
+ if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) {
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = Ty->getAs<EnumType>())
+ Ty = EnumTy->getDecl()->getIntegerType();
+
+ if (Ty->isIntegralOrEnumerationType() &&
+ Ty->isPromotableIntegerType())
+ return ABIArgInfo::getExtend();
+ }
+
+ break;
+
+ // AMD64-ABI 3.2.3p3: Rule 3. If the class is SSE, the next
+ // available SSE register is used, the registers are taken in the
+ // order from %xmm0 to %xmm7.
+ case SSE: {
+ llvm::Type *IRType = CGT.ConvertType(Ty);
+ ResType = GetSSETypeAtOffset(IRType, 0, Ty, 0);
+ ++neededSSE;
+ break;
+ }
+ }
+
+ llvm::Type *HighPart = 0;
+ switch (Hi) {
+ // Memory was handled previously, ComplexX87 and X87 should
+ // never occur as hi classes, and X87Up must be preceded by X87,
+ // which is passed in memory.
+ case Memory:
+ case X87:
+ case ComplexX87:
+ llvm_unreachable("Invalid classification for hi word.");
+
+ case NoClass: break;
+
+ case Integer:
+ ++neededInt;
+ // Pick an 8-byte type based on the preferred type.
+ HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8);
+
+ if (Lo == NoClass) // Pass HighPart at offset 8 in memory.
+ return ABIArgInfo::getDirect(HighPart, 8);
+ break;
+
+ // X87Up generally doesn't occur here (long double is passed in
+ // memory), except in situations involving unions.
+ case X87Up:
+ case SSE:
+ HighPart = GetSSETypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8);
+
+ if (Lo == NoClass) // Pass HighPart at offset 8 in memory.
+ return ABIArgInfo::getDirect(HighPart, 8);
+
+ ++neededSSE;
+ break;
+
+ // AMD64-ABI 3.2.3p3: Rule 4. If the class is SSEUP, the
+ // eightbyte is passed in the upper half of the last used SSE
+ // register. This only happens when 128-bit vectors are passed.
+ case SSEUp:
+ assert(Lo == SSE && "Unexpected SSEUp classification");
+ ResType = GetByteVectorType(Ty);
+ break;
+ }
+
+ // If a high part was specified, merge it together with the low part. It is
+ // known to pass in the high eightbyte of the result. We do this by forming a
+ // first class struct aggregate with the high and low part: {low, high}
+ if (HighPart)
+ ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getTargetData());
+
+ return ABIArgInfo::getDirect(ResType);
+}
+
+void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const {
+
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
+
+ // Keep track of the number of assigned registers.
+ unsigned freeIntRegs = 6, freeSSERegs = 8;
+
+ // If the return value is indirect, then the hidden argument is consuming one
+ // integer register.
+ if (FI.getReturnInfo().isIndirect())
+ --freeIntRegs;
+
+ // AMD64-ABI 3.2.3p3: Once arguments are classified, the registers
+ // get assigned (in left-to-right order) for passing as follows...
+ for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
+ it != ie; ++it) {
+ unsigned neededInt, neededSSE;
+ it->info = classifyArgumentType(it->type, freeIntRegs, neededInt,
+ neededSSE);
+
+ // AMD64-ABI 3.2.3p3: If there are no registers available for any
+ // eightbyte of an argument, the whole argument is passed on the
+ // stack. If registers have already been assigned for some
+ // eightbytes of such an argument, the assignments get reverted.
+ if (freeIntRegs >= neededInt && freeSSERegs >= neededSSE) {
+ freeIntRegs -= neededInt;
+ freeSSERegs -= neededSSE;
+ } else {
+ it->info = getIndirectResult(it->type, freeIntRegs);
+ }
+ }
+}
+
+static llvm::Value *EmitVAArgFromMemory(llvm::Value *VAListAddr,
+ QualType Ty,
+ CodeGenFunction &CGF) {
+ llvm::Value *overflow_arg_area_p =
+ CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_p");
+ llvm::Value *overflow_arg_area =
+ CGF.Builder.CreateLoad(overflow_arg_area_p, "overflow_arg_area");
+
+ // AMD64-ABI 3.5.7p5: Step 7. Align l->overflow_arg_area upwards to a 16
+ // byte boundary if alignment needed by type exceeds 8 byte boundary.
+ // It isn't stated explicitly in the standard, but in practice we use
+ // alignment greater than 16 where necessary.
+ uint64_t Align = CGF.getContext().getTypeAlign(Ty) / 8;
+ if (Align > 8) {
+ // overflow_arg_area = (overflow_arg_area + align - 1) & -align;
+ llvm::Value *Offset =
+ llvm::ConstantInt::get(CGF.Int64Ty, Align - 1);
+ overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset);
+ llvm::Value *AsInt = CGF.Builder.CreatePtrToInt(overflow_arg_area,
+ CGF.Int64Ty);
+ llvm::Value *Mask = llvm::ConstantInt::get(CGF.Int64Ty, -(uint64_t)Align);
+ overflow_arg_area =
+ CGF.Builder.CreateIntToPtr(CGF.Builder.CreateAnd(AsInt, Mask),
+ overflow_arg_area->getType(),
+ "overflow_arg_area.align");
+ }
+
+ // AMD64-ABI 3.5.7p5: Step 8. Fetch type from l->overflow_arg_area.
+ llvm::Type *LTy = CGF.ConvertTypeForMem(Ty);
+ llvm::Value *Res =
+ CGF.Builder.CreateBitCast(overflow_arg_area,
+ llvm::PointerType::getUnqual(LTy));
+
+ // AMD64-ABI 3.5.7p5: Step 9. Set l->overflow_arg_area to:
+ // l->overflow_arg_area + sizeof(type).
+ // AMD64-ABI 3.5.7p5: Step 10. Align l->overflow_arg_area upwards to
+ // an 8 byte boundary.
+
+ uint64_t SizeInBytes = (CGF.getContext().getTypeSize(Ty) + 7) / 8;
+ llvm::Value *Offset =
+ llvm::ConstantInt::get(CGF.Int32Ty, (SizeInBytes + 7) & ~7);
+ overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset,
+ "overflow_arg_area.next");
+ CGF.Builder.CreateStore(overflow_arg_area, overflow_arg_area_p);
+
+ // AMD64-ABI 3.5.7p5: Step 11. Return the fetched type.
+ return Res;
+}
+
+llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const {
+ // Assume that va_list type is correct; should be pointer to LLVM type:
+ // struct {
+ // i32 gp_offset;
+ // i32 fp_offset;
+ // i8* overflow_arg_area;
+ // i8* reg_save_area;
+ // };
+ unsigned neededInt, neededSSE;
+
+ Ty = CGF.getContext().getCanonicalType(Ty);
+ ABIArgInfo AI = classifyArgumentType(Ty, 0, neededInt, neededSSE);
+
+ // AMD64-ABI 3.5.7p5: Step 1. Determine whether type may be passed
+ // in the registers. If not go to step 7.
+ if (!neededInt && !neededSSE)
+ return EmitVAArgFromMemory(VAListAddr, Ty, CGF);
+
+ // AMD64-ABI 3.5.7p5: Step 2. Compute num_gp to hold the number of
+ // general purpose registers needed to pass type and num_fp to hold
+ // the number of floating point registers needed.
+
+ // AMD64-ABI 3.5.7p5: Step 3. Verify whether arguments fit into
+ // registers. In the case: l->gp_offset > 48 - num_gp * 8 or
+ // l->fp_offset > 304 - num_fp * 16 go to step 7.
+ //
+ // NOTE: 304 is a typo, there are (6 * 8 + 8 * 16) = 176 bytes of
+ // register save space).
+
+ llvm::Value *InRegs = 0;
+ llvm::Value *gp_offset_p = 0, *gp_offset = 0;
+ llvm::Value *fp_offset_p = 0, *fp_offset = 0;
+ if (neededInt) {
+ gp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, "gp_offset_p");
+ gp_offset = CGF.Builder.CreateLoad(gp_offset_p, "gp_offset");
+ InRegs = llvm::ConstantInt::get(CGF.Int32Ty, 48 - neededInt * 8);
+ InRegs = CGF.Builder.CreateICmpULE(gp_offset, InRegs, "fits_in_gp");
+ }
+
+ if (neededSSE) {
+ fp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 1, "fp_offset_p");
+ fp_offset = CGF.Builder.CreateLoad(fp_offset_p, "fp_offset");
+ llvm::Value *FitsInFP =
+ llvm::ConstantInt::get(CGF.Int32Ty, 176 - neededSSE * 16);
+ FitsInFP = CGF.Builder.CreateICmpULE(fp_offset, FitsInFP, "fits_in_fp");
+ InRegs = InRegs ? CGF.Builder.CreateAnd(InRegs, FitsInFP) : FitsInFP;
+ }
+
+ llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
+ llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem");
+ llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
+ CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock);
+
+ // Emit code to load the value if it was passed in registers.
+
+ CGF.EmitBlock(InRegBlock);
+
+ // AMD64-ABI 3.5.7p5: Step 4. Fetch type from l->reg_save_area with
+ // an offset of l->gp_offset and/or l->fp_offset. This may require
+ // copying to a temporary location in case the parameter is passed
+ // in different register classes or requires an alignment greater
+ // than 8 for general purpose registers and 16 for XMM registers.
+ //
+ // FIXME: This really results in shameful code when we end up needing to
+ // collect arguments from different places; often what should result in a
+ // simple assembling of a structure from scattered addresses has many more
+ // loads than necessary. Can we clean this up?
+ llvm::Type *LTy = CGF.ConvertTypeForMem(Ty);
+ llvm::Value *RegAddr =
+ CGF.Builder.CreateLoad(CGF.Builder.CreateStructGEP(VAListAddr, 3),
+ "reg_save_area");
+ if (neededInt && neededSSE) {
+ // FIXME: Cleanup.
+ assert(AI.isDirect() && "Unexpected ABI info for mixed regs");
+ llvm::StructType *ST = cast<llvm::StructType>(AI.getCoerceToType());
+ llvm::Value *Tmp = CGF.CreateTempAlloca(ST);
+ assert(ST->getNumElements() == 2 && "Unexpected ABI info for mixed regs");
+ llvm::Type *TyLo = ST->getElementType(0);
+ llvm::Type *TyHi = ST->getElementType(1);
+ assert((TyLo->isFPOrFPVectorTy() ^ TyHi->isFPOrFPVectorTy()) &&
+ "Unexpected ABI info for mixed regs");
+ llvm::Type *PTyLo = llvm::PointerType::getUnqual(TyLo);
+ llvm::Type *PTyHi = llvm::PointerType::getUnqual(TyHi);
+ llvm::Value *GPAddr = CGF.Builder.CreateGEP(RegAddr, gp_offset);
+ llvm::Value *FPAddr = CGF.Builder.CreateGEP(RegAddr, fp_offset);
+ llvm::Value *RegLoAddr = TyLo->isFloatingPointTy() ? FPAddr : GPAddr;
+ llvm::Value *RegHiAddr = TyLo->isFloatingPointTy() ? GPAddr : FPAddr;
+ llvm::Value *V =
+ CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegLoAddr, PTyLo));
+ CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0));
+ V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegHiAddr, PTyHi));
+ CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1));
+
+ RegAddr = CGF.Builder.CreateBitCast(Tmp,
+ llvm::PointerType::getUnqual(LTy));
+ } else if (neededInt) {
+ RegAddr = CGF.Builder.CreateGEP(RegAddr, gp_offset);
+ RegAddr = CGF.Builder.CreateBitCast(RegAddr,
+ llvm::PointerType::getUnqual(LTy));
+ } else if (neededSSE == 1) {
+ RegAddr = CGF.Builder.CreateGEP(RegAddr, fp_offset);
+ RegAddr = CGF.Builder.CreateBitCast(RegAddr,
+ llvm::PointerType::getUnqual(LTy));
+ } else {
+ assert(neededSSE == 2 && "Invalid number of needed registers!");
+ // SSE registers are spaced 16 bytes apart in the register save
+ // area, we need to collect the two eightbytes together.
+ llvm::Value *RegAddrLo = CGF.Builder.CreateGEP(RegAddr, fp_offset);
+ llvm::Value *RegAddrHi = CGF.Builder.CreateConstGEP1_32(RegAddrLo, 16);
+ llvm::Type *DoubleTy = CGF.DoubleTy;
+ llvm::Type *DblPtrTy =
+ llvm::PointerType::getUnqual(DoubleTy);
+ llvm::StructType *ST = llvm::StructType::get(DoubleTy,
+ DoubleTy, NULL);
+ llvm::Value *V, *Tmp = CGF.CreateTempAlloca(ST);
+ V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrLo,
+ DblPtrTy));
+ CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0));
+ V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrHi,
+ DblPtrTy));
+ CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1));
+ RegAddr = CGF.Builder.CreateBitCast(Tmp,
+ llvm::PointerType::getUnqual(LTy));
+ }
+
+ // AMD64-ABI 3.5.7p5: Step 5. Set:
+ // l->gp_offset = l->gp_offset + num_gp * 8
+ // l->fp_offset = l->fp_offset + num_fp * 16.
+ if (neededInt) {
+ llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededInt * 8);
+ CGF.Builder.CreateStore(CGF.Builder.CreateAdd(gp_offset, Offset),
+ gp_offset_p);
+ }
+ if (neededSSE) {
+ llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededSSE * 16);
+ CGF.Builder.CreateStore(CGF.Builder.CreateAdd(fp_offset, Offset),
+ fp_offset_p);
+ }
+ CGF.EmitBranch(ContBlock);
+
+ // Emit code to load the value if it was passed in memory.
+
+ CGF.EmitBlock(InMemBlock);
+ llvm::Value *MemAddr = EmitVAArgFromMemory(VAListAddr, Ty, CGF);
+
+ // Return the appropriate result.
+
+ CGF.EmitBlock(ContBlock);
+ llvm::PHINode *ResAddr = CGF.Builder.CreatePHI(RegAddr->getType(), 2,
+ "vaarg.addr");
+ ResAddr->addIncoming(RegAddr, InRegBlock);
+ ResAddr->addIncoming(MemAddr, InMemBlock);
+ return ResAddr;
+}
+
+ABIArgInfo WinX86_64ABIInfo::classify(QualType Ty) const {
+
+ if (Ty->isVoidType())
+ return ABIArgInfo::getIgnore();
+
+ if (const EnumType *EnumTy = Ty->getAs<EnumType>())
+ Ty = EnumTy->getDecl()->getIntegerType();
+
+ uint64_t Size = getContext().getTypeSize(Ty);
+
+ if (const RecordType *RT = Ty->getAs<RecordType>()) {
+ if (hasNonTrivialDestructorOrCopyConstructor(RT) ||
+ RT->getDecl()->hasFlexibleArrayMember())
+ return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
+
+ // FIXME: mingw-w64-gcc emits 128-bit struct as i128
+ if (Size == 128 &&
+ getContext().getTargetInfo().getTriple().getOS()
+ == llvm::Triple::MinGW32)
+ return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
+ Size));
+
+ // MS x64 ABI requirement: "Any argument that doesn't fit in 8 bytes, or is
+ // not 1, 2, 4, or 8 bytes, must be passed by reference."
+ if (Size <= 64 &&
+ (Size & (Size - 1)) == 0)
+ return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
+ Size));
+
+ return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
+ }
+
+ if (Ty->isPromotableIntegerType())
+ return ABIArgInfo::getExtend();
+
+ return ABIArgInfo::getDirect();
+}
+
+void WinX86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const {
+
+ QualType RetTy = FI.getReturnType();
+ FI.getReturnInfo() = classify(RetTy);
+
+ for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
+ it != ie; ++it)
+ it->info = classify(it->type);
+}
+
+llvm::Value *WinX86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const {
+ llvm::Type *BPP = CGF.Int8PtrPtrTy;
+
+ CGBuilderTy &Builder = CGF.Builder;
+ llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP,
+ "ap");
+ llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
+ llvm::Type *PTy =
+ llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
+ llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
+
+ uint64_t Offset =
+ llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 8);
+ llvm::Value *NextAddr =
+ Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset),
+ "ap.next");
+ Builder.CreateStore(NextAddr, VAListAddrAsBPP);
+
+ return AddrTyped;
+}
+
+// PowerPC-32
+
+namespace {
+class PPC32TargetCodeGenInfo : public DefaultTargetCodeGenInfo {
+public:
+ PPC32TargetCodeGenInfo(CodeGenTypes &CGT) : DefaultTargetCodeGenInfo(CGT) {}
+
+ int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const {
+ // This is recovered from gcc output.
+ return 1; // r1 is the dedicated stack pointer
+ }
+
+ bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *Address) const;
+};
+
+}
+
+bool
+PPC32TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *Address) const {
+ // This is calculated from the LLVM and GCC tables and verified
+ // against gcc output. AFAIK all ABIs use the same encoding.
+
+ CodeGen::CGBuilderTy &Builder = CGF.Builder;
+
+ llvm::IntegerType *i8 = CGF.Int8Ty;
+ llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
+ llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
+ llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16);
+
+ // 0-31: r0-31, the 4-byte general-purpose registers
+ AssignToArrayRange(Builder, Address, Four8, 0, 31);
+
+ // 32-63: fp0-31, the 8-byte floating-point registers
+ AssignToArrayRange(Builder, Address, Eight8, 32, 63);
+
+ // 64-76 are various 4-byte special-purpose registers:
+ // 64: mq
+ // 65: lr
+ // 66: ctr
+ // 67: ap
+ // 68-75 cr0-7
+ // 76: xer
+ AssignToArrayRange(Builder, Address, Four8, 64, 76);
+
+ // 77-108: v0-31, the 16-byte vector registers
+ AssignToArrayRange(Builder, Address, Sixteen8, 77, 108);
+
+ // 109: vrsave
+ // 110: vscr
+ // 111: spe_acc
+ // 112: spefscr
+ // 113: sfp
+ AssignToArrayRange(Builder, Address, Four8, 109, 113);
+
+ return false;
+}
+
+
+//===----------------------------------------------------------------------===//
+// ARM ABI Implementation
+//===----------------------------------------------------------------------===//
+
+namespace {
+
+class ARMABIInfo : public ABIInfo {
+public:
+ enum ABIKind {
+ APCS = 0,
+ AAPCS = 1,
+ AAPCS_VFP
+ };
+
+private:
+ ABIKind Kind;
+
+public:
+ ARMABIInfo(CodeGenTypes &CGT, ABIKind _Kind) : ABIInfo(CGT), Kind(_Kind) {}
+
+ bool isEABI() const {
+ StringRef Env =
+ getContext().getTargetInfo().getTriple().getEnvironmentName();
+ return (Env == "gnueabi" || Env == "eabi" || Env == "androideabi");
+ }
+
+private:
+ ABIKind getABIKind() const { return Kind; }
+
+ ABIArgInfo classifyReturnType(QualType RetTy) const;
+ ABIArgInfo classifyArgumentType(QualType RetTy) const;
+
+ virtual void computeInfo(CGFunctionInfo &FI) const;
+
+ virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const;
+};
+
+class ARMTargetCodeGenInfo : public TargetCodeGenInfo {
+public:
+ ARMTargetCodeGenInfo(CodeGenTypes &CGT, ARMABIInfo::ABIKind K)
+ :TargetCodeGenInfo(new ARMABIInfo(CGT, K)) {}
+
+ const ARMABIInfo &getABIInfo() const {
+ return static_cast<const ARMABIInfo&>(TargetCodeGenInfo::getABIInfo());
+ }
+
+ int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const {
+ return 13;
+ }
+
+ StringRef getARCRetainAutoreleasedReturnValueMarker() const {
+ return "mov\tr7, r7\t\t@ marker for objc_retainAutoreleaseReturnValue";
+ }
+
+ bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *Address) const {
+ llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4);
+
+ // 0-15 are the 16 integer registers.
+ AssignToArrayRange(CGF.Builder, Address, Four8, 0, 15);
+ return false;
+ }
+
+ unsigned getSizeOfUnwindException() const {
+ if (getABIInfo().isEABI()) return 88;
+ return TargetCodeGenInfo::getSizeOfUnwindException();
+ }
+};
+
+}
+
+void ARMABIInfo::computeInfo(CGFunctionInfo &FI) const {
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
+ for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
+ it != ie; ++it)
+ it->info = classifyArgumentType(it->type);
+
+ // Always honor user-specified calling convention.
+ if (FI.getCallingConvention() != llvm::CallingConv::C)
+ return;
+
+ // Calling convention as default by an ABI.
+ llvm::CallingConv::ID DefaultCC;
+ if (isEABI())
+ DefaultCC = llvm::CallingConv::ARM_AAPCS;
+ else
+ DefaultCC = llvm::CallingConv::ARM_APCS;
+
+ // If user did not ask for specific calling convention explicitly (e.g. via
+ // pcs attribute), set effective calling convention if it's different than ABI
+ // default.
+ switch (getABIKind()) {
+ case APCS:
+ if (DefaultCC != llvm::CallingConv::ARM_APCS)
+ FI.setEffectiveCallingConvention(llvm::CallingConv::ARM_APCS);
+ break;
+ case AAPCS:
+ if (DefaultCC != llvm::CallingConv::ARM_AAPCS)
+ FI.setEffectiveCallingConvention(llvm::CallingConv::ARM_AAPCS);
+ break;
+ case AAPCS_VFP:
+ if (DefaultCC != llvm::CallingConv::ARM_AAPCS_VFP)
+ FI.setEffectiveCallingConvention(llvm::CallingConv::ARM_AAPCS_VFP);
+ break;
+ }
+}
+
+/// isHomogeneousAggregate - Return true if a type is an AAPCS-VFP homogeneous
+/// aggregate. If HAMembers is non-null, the number of base elements
+/// contained in the type is returned through it; this is used for the
+/// recursive calls that check aggregate component types.
+static bool isHomogeneousAggregate(QualType Ty, const Type *&Base,
+ ASTContext &Context,
+ uint64_t *HAMembers = 0) {
+ uint64_t Members = 0;
+ if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) {
+ if (!isHomogeneousAggregate(AT->getElementType(), Base, Context, &Members))
+ return false;
+ Members *= AT->getSize().getZExtValue();
+ } else if (const RecordType *RT = Ty->getAs<RecordType>()) {
+ const RecordDecl *RD = RT->getDecl();
+ if (RD->hasFlexibleArrayMember())
+ return false;
+
+ Members = 0;
+ for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
+ i != e; ++i) {
+ const FieldDecl *FD = *i;
+ uint64_t FldMembers;
+ if (!isHomogeneousAggregate(FD->getType(), Base, Context, &FldMembers))
+ return false;
+
+ Members = (RD->isUnion() ?
+ std::max(Members, FldMembers) : Members + FldMembers);
+ }
+ } else {
+ Members = 1;
+ if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
+ Members = 2;
+ Ty = CT->getElementType();
+ }
+
+ // Homogeneous aggregates for AAPCS-VFP must have base types of float,
+ // double, or 64-bit or 128-bit vectors.
+ if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
+ if (BT->getKind() != BuiltinType::Float &&
+ BT->getKind() != BuiltinType::Double)
+ return false;
+ } else if (const VectorType *VT = Ty->getAs<VectorType>()) {
+ unsigned VecSize = Context.getTypeSize(VT);
+ if (VecSize != 64 && VecSize != 128)
+ return false;
+ } else {
+ return false;
+ }
+
+ // The base type must be the same for all members. Vector types of the
+ // same total size are treated as being equivalent here.
+ const Type *TyPtr = Ty.getTypePtr();
+ if (!Base)
+ Base = TyPtr;
+ if (Base != TyPtr &&
+ (!Base->isVectorType() || !TyPtr->isVectorType() ||
+ Context.getTypeSize(Base) != Context.getTypeSize(TyPtr)))
+ return false;
+ }
+
+ // Homogeneous Aggregates can have at most 4 members of the base type.
+ if (HAMembers)
+ *HAMembers = Members;
+
+ return (Members > 0 && Members <= 4);
+}
+
+ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty) const {
+ if (!isAggregateTypeForABI(Ty)) {
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = Ty->getAs<EnumType>())
+ Ty = EnumTy->getDecl()->getIntegerType();
+
+ return (Ty->isPromotableIntegerType() ?
+ ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
+ }
+
+ // Ignore empty records.
+ if (isEmptyRecord(getContext(), Ty, true))
+ return ABIArgInfo::getIgnore();
+
+ // Structures with either a non-trivial destructor or a non-trivial
+ // copy constructor are always indirect.
+ if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty))
+ return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
+
+ if (getABIKind() == ARMABIInfo::AAPCS_VFP) {
+ // Homogeneous Aggregates need to be expanded.
+ const Type *Base = 0;
+ if (isHomogeneousAggregate(Ty, Base, getContext())) {
+ assert(Base && "Base class should be set for homogeneous aggregate");
+ return ABIArgInfo::getExpand();
+ }
+ }
+
+ // Otherwise, pass by coercing to a structure of the appropriate size.
+ //
+ // FIXME: This is kind of nasty... but there isn't much choice because the ARM
+ // backend doesn't support byval.
+ // FIXME: This doesn't handle alignment > 64 bits.
+ llvm::Type* ElemTy;
+ unsigned SizeRegs;
+ if (getContext().getTypeAlign(Ty) > 32) {
+ ElemTy = llvm::Type::getInt64Ty(getVMContext());
+ SizeRegs = (getContext().getTypeSize(Ty) + 63) / 64;
+ } else {
+ ElemTy = llvm::Type::getInt32Ty(getVMContext());
+ SizeRegs = (getContext().getTypeSize(Ty) + 31) / 32;
+ }
+
+ llvm::Type *STy =
+ llvm::StructType::get(llvm::ArrayType::get(ElemTy, SizeRegs), NULL);
+ return ABIArgInfo::getDirect(STy);
+}
+
+static bool isIntegerLikeType(QualType Ty, ASTContext &Context,
+ llvm::LLVMContext &VMContext) {
+ // APCS, C Language Calling Conventions, Non-Simple Return Values: A structure
+ // is called integer-like if its size is less than or equal to one word, and
+ // the offset of each of its addressable sub-fields is zero.
+
+ uint64_t Size = Context.getTypeSize(Ty);
+
+ // Check that the type fits in a word.
+ if (Size > 32)
+ return false;
+
+ // FIXME: Handle vector types!
+ if (Ty->isVectorType())
+ return false;
+
+ // Float types are never treated as "integer like".
+ if (Ty->isRealFloatingType())
+ return false;
+
+ // If this is a builtin or pointer type then it is ok.
+ if (Ty->getAs<BuiltinType>() || Ty->isPointerType())
+ return true;
+
+ // Small complex integer types are "integer like".
+ if (const ComplexType *CT = Ty->getAs<ComplexType>())
+ return isIntegerLikeType(CT->getElementType(), Context, VMContext);
+
+ // Single element and zero sized arrays should be allowed, by the definition
+ // above, but they are not.
+
+ // Otherwise, it must be a record type.
+ const RecordType *RT = Ty->getAs<RecordType>();
+ if (!RT) return false;
+
+ // Ignore records with flexible arrays.
+ const RecordDecl *RD = RT->getDecl();
+ if (RD->hasFlexibleArrayMember())
+ return false;
+
+ // Check that all sub-fields are at offset 0, and are themselves "integer
+ // like".
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
+
+ bool HadField = false;
+ unsigned idx = 0;
+ for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
+ i != e; ++i, ++idx) {
+ const FieldDecl *FD = *i;
+
+ // Bit-fields are not addressable, we only need to verify they are "integer
+ // like". We still have to disallow a subsequent non-bitfield, for example:
+ // struct { int : 0; int x }
+ // is non-integer like according to gcc.
+ if (FD->isBitField()) {
+ if (!RD->isUnion())
+ HadField = true;
+
+ if (!isIntegerLikeType(FD->getType(), Context, VMContext))
+ return false;
+
+ continue;
+ }
+
+ // Check if this field is at offset 0.
+ if (Layout.getFieldOffset(idx) != 0)
+ return false;
+
+ if (!isIntegerLikeType(FD->getType(), Context, VMContext))
+ return false;
+
+ // Only allow at most one field in a structure. This doesn't match the
+ // wording above, but follows gcc in situations with a field following an
+ // empty structure.
+ if (!RD->isUnion()) {
+ if (HadField)
+ return false;
+
+ HadField = true;
+ }
+ }
+
+ return true;
+}
+
+ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy) const {
+ if (RetTy->isVoidType())
+ return ABIArgInfo::getIgnore();
+
+ // Large vector types should be returned via memory.
+ if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 128)
+ return ABIArgInfo::getIndirect(0);
+
+ if (!isAggregateTypeForABI(RetTy)) {
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
+ RetTy = EnumTy->getDecl()->getIntegerType();
+
+ return (RetTy->isPromotableIntegerType() ?
+ ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
+ }
+
+ // Structures with either a non-trivial destructor or a non-trivial
+ // copy constructor are always indirect.
+ if (isRecordWithNonTrivialDestructorOrCopyConstructor(RetTy))
+ return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
+
+ // Are we following APCS?
+ if (getABIKind() == APCS) {
+ if (isEmptyRecord(getContext(), RetTy, false))
+ return ABIArgInfo::getIgnore();
+
+ // Complex types are all returned as packed integers.
+ //
+ // FIXME: Consider using 2 x vector types if the back end handles them
+ // correctly.
+ if (RetTy->isAnyComplexType())
+ return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
+ getContext().getTypeSize(RetTy)));
+
+ // Integer like structures are returned in r0.
+ if (isIntegerLikeType(RetTy, getContext(), getVMContext())) {
+ // Return in the smallest viable integer type.
+ uint64_t Size = getContext().getTypeSize(RetTy);
+ if (Size <= 8)
+ return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
+ if (Size <= 16)
+ return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
+ return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
+ }
+
+ // Otherwise return in memory.
+ return ABIArgInfo::getIndirect(0);
+ }
+
+ // Otherwise this is an AAPCS variant.
+
+ if (isEmptyRecord(getContext(), RetTy, true))
+ return ABIArgInfo::getIgnore();
+
+ // Check for homogeneous aggregates with AAPCS-VFP.
+ if (getABIKind() == AAPCS_VFP) {
+ const Type *Base = 0;
+ if (isHomogeneousAggregate(RetTy, Base, getContext())) {
+ assert(Base && "Base class should be set for homogeneous aggregate");
+ // Homogeneous Aggregates are returned directly.
+ return ABIArgInfo::getDirect();
+ }
+ }
+
+ // Aggregates <= 4 bytes are returned in r0; other aggregates
+ // are returned indirectly.
+ uint64_t Size = getContext().getTypeSize(RetTy);
+ if (Size <= 32) {
+ // Return in the smallest viable integer type.
+ if (Size <= 8)
+ return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
+ if (Size <= 16)
+ return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
+ return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
+ }
+
+ return ABIArgInfo::getIndirect(0);
+}
+
+llvm::Value *ARMABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const {
+ llvm::Type *BP = CGF.Int8PtrTy;
+ llvm::Type *BPP = CGF.Int8PtrPtrTy;
+
+ CGBuilderTy &Builder = CGF.Builder;
+ llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap");
+ llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
+ // Handle address alignment for type alignment > 32 bits
+ uint64_t TyAlign = CGF.getContext().getTypeAlign(Ty) / 8;
+ if (TyAlign > 4) {
+ assert((TyAlign & (TyAlign - 1)) == 0 &&
+ "Alignment is not power of 2!");
+ llvm::Value *AddrAsInt = Builder.CreatePtrToInt(Addr, CGF.Int32Ty);
+ AddrAsInt = Builder.CreateAdd(AddrAsInt, Builder.getInt32(TyAlign - 1));
+ AddrAsInt = Builder.CreateAnd(AddrAsInt, Builder.getInt32(~(TyAlign - 1)));
+ Addr = Builder.CreateIntToPtr(AddrAsInt, BP);
+ }
+ llvm::Type *PTy =
+ llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
+ llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
+
+ uint64_t Offset =
+ llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 4);
+ llvm::Value *NextAddr =
+ Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset),
+ "ap.next");
+ Builder.CreateStore(NextAddr, VAListAddrAsBPP);
+
+ return AddrTyped;
+}
+
+//===----------------------------------------------------------------------===//
+// PTX ABI Implementation
+//===----------------------------------------------------------------------===//
+
+namespace {
+
+class PTXABIInfo : public ABIInfo {
+public:
+ PTXABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
+
+ ABIArgInfo classifyReturnType(QualType RetTy) const;
+ ABIArgInfo classifyArgumentType(QualType Ty) const;
+
+ virtual void computeInfo(CGFunctionInfo &FI) const;
+ virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGenFunction &CFG) const;
+};
+
+class PTXTargetCodeGenInfo : public TargetCodeGenInfo {
+public:
+ PTXTargetCodeGenInfo(CodeGenTypes &CGT)
+ : TargetCodeGenInfo(new PTXABIInfo(CGT)) {}
+
+ virtual void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
+ CodeGen::CodeGenModule &M) const;
+};
+
+ABIArgInfo PTXABIInfo::classifyReturnType(QualType RetTy) const {
+ if (RetTy->isVoidType())
+ return ABIArgInfo::getIgnore();
+ if (isAggregateTypeForABI(RetTy))
+ return ABIArgInfo::getIndirect(0);
+ return ABIArgInfo::getDirect();
+}
+
+ABIArgInfo PTXABIInfo::classifyArgumentType(QualType Ty) const {
+ if (isAggregateTypeForABI(Ty))
+ return ABIArgInfo::getIndirect(0);
+
+ return ABIArgInfo::getDirect();
+}
+
+void PTXABIInfo::computeInfo(CGFunctionInfo &FI) const {
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
+ for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
+ it != ie; ++it)
+ it->info = classifyArgumentType(it->type);
+
+ // Always honor user-specified calling convention.
+ if (FI.getCallingConvention() != llvm::CallingConv::C)
+ return;
+
+ // Calling convention as default by an ABI.
+ llvm::CallingConv::ID DefaultCC;
+ const LangOptions &LangOpts = getContext().getLangOpts();
+ if (LangOpts.OpenCL || LangOpts.CUDA) {
+ // If we are in OpenCL or CUDA mode, then default to device functions
+ DefaultCC = llvm::CallingConv::PTX_Device;
+ } else {
+ // If we are in standard C/C++ mode, use the triple to decide on the default
+ StringRef Env =
+ getContext().getTargetInfo().getTriple().getEnvironmentName();
+ if (Env == "device")
+ DefaultCC = llvm::CallingConv::PTX_Device;
+ else
+ DefaultCC = llvm::CallingConv::PTX_Kernel;
+ }
+ FI.setEffectiveCallingConvention(DefaultCC);
+
+}
+
+llvm::Value *PTXABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGenFunction &CFG) const {
+ llvm_unreachable("PTX does not support varargs");
+}
+
+void PTXTargetCodeGenInfo::SetTargetAttributes(const Decl *D,
+ llvm::GlobalValue *GV,
+ CodeGen::CodeGenModule &M) const{
+ const FunctionDecl *FD = dyn_cast<FunctionDecl>(D);
+ if (!FD) return;
+
+ llvm::Function *F = cast<llvm::Function>(GV);
+
+ // Perform special handling in OpenCL mode
+ if (M.getLangOpts().OpenCL) {
+ // Use OpenCL function attributes to set proper calling conventions
+ // By default, all functions are device functions
+ if (FD->hasAttr<OpenCLKernelAttr>()) {
+ // OpenCL __kernel functions get a kernel calling convention
+ F->setCallingConv(llvm::CallingConv::PTX_Kernel);
+ // And kernel functions are not subject to inlining
+ F->addFnAttr(llvm::Attribute::NoInline);
+ }
+ }
+
+ // Perform special handling in CUDA mode.
+ if (M.getLangOpts().CUDA) {
+ // CUDA __global__ functions get a kernel calling convention. Since
+ // __global__ functions cannot be called from the device, we do not
+ // need to set the noinline attribute.
+ if (FD->getAttr<CUDAGlobalAttr>())
+ F->setCallingConv(llvm::CallingConv::PTX_Kernel);
+ }
+}
+
+}
+
+//===----------------------------------------------------------------------===//
+// MBlaze ABI Implementation
+//===----------------------------------------------------------------------===//
+
+namespace {
+
+class MBlazeABIInfo : public ABIInfo {
+public:
+ MBlazeABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
+
+ bool isPromotableIntegerType(QualType Ty) const;
+
+ ABIArgInfo classifyReturnType(QualType RetTy) const;
+ ABIArgInfo classifyArgumentType(QualType RetTy) const;
+
+ virtual void computeInfo(CGFunctionInfo &FI) const {
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
+ for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
+ it != ie; ++it)
+ it->info = classifyArgumentType(it->type);
+ }
+
+ virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const;
+};
+
+class MBlazeTargetCodeGenInfo : public TargetCodeGenInfo {
+public:
+ MBlazeTargetCodeGenInfo(CodeGenTypes &CGT)
+ : TargetCodeGenInfo(new MBlazeABIInfo(CGT)) {}
+ void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
+ CodeGen::CodeGenModule &M) const;
+};
+
+}
+
+bool MBlazeABIInfo::isPromotableIntegerType(QualType Ty) const {
+ // MBlaze ABI requires all 8 and 16 bit quantities to be extended.
+ if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
+ switch (BT->getKind()) {
+ case BuiltinType::Bool:
+ case BuiltinType::Char_S:
+ case BuiltinType::Char_U:
+ case BuiltinType::SChar:
+ case BuiltinType::UChar:
+ case BuiltinType::Short:
+ case BuiltinType::UShort:
+ return true;
+ default:
+ return false;
+ }
+ return false;
+}
+
+llvm::Value *MBlazeABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const {
+ // FIXME: Implement
+ return 0;
+}
+
+
+ABIArgInfo MBlazeABIInfo::classifyReturnType(QualType RetTy) const {
+ if (RetTy->isVoidType())
+ return ABIArgInfo::getIgnore();
+ if (isAggregateTypeForABI(RetTy))
+ return ABIArgInfo::getIndirect(0);
+
+ return (isPromotableIntegerType(RetTy) ?
+ ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
+}
+
+ABIArgInfo MBlazeABIInfo::classifyArgumentType(QualType Ty) const {
+ if (isAggregateTypeForABI(Ty))
+ return ABIArgInfo::getIndirect(0);
+
+ return (isPromotableIntegerType(Ty) ?
+ ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
+}
+
+void MBlazeTargetCodeGenInfo::SetTargetAttributes(const Decl *D,
+ llvm::GlobalValue *GV,
+ CodeGen::CodeGenModule &M)
+ const {
+ const FunctionDecl *FD = dyn_cast<FunctionDecl>(D);
+ if (!FD) return;
+
+ llvm::CallingConv::ID CC = llvm::CallingConv::C;
+ if (FD->hasAttr<MBlazeInterruptHandlerAttr>())
+ CC = llvm::CallingConv::MBLAZE_INTR;
+ else if (FD->hasAttr<MBlazeSaveVolatilesAttr>())
+ CC = llvm::CallingConv::MBLAZE_SVOL;
+
+ if (CC != llvm::CallingConv::C) {
+ // Handle 'interrupt_handler' attribute:
+ llvm::Function *F = cast<llvm::Function>(GV);
+
+ // Step 1: Set ISR calling convention.
+ F->setCallingConv(CC);
+
+ // Step 2: Add attributes goodness.
+ F->addFnAttr(llvm::Attribute::NoInline);
+ }
+
+ // Step 3: Emit _interrupt_handler alias.
+ if (CC == llvm::CallingConv::MBLAZE_INTR)
+ new llvm::GlobalAlias(GV->getType(), llvm::Function::ExternalLinkage,
+ "_interrupt_handler", GV, &M.getModule());
+}
+
+
+//===----------------------------------------------------------------------===//
+// MSP430 ABI Implementation
+//===----------------------------------------------------------------------===//
+
+namespace {
+
+class MSP430TargetCodeGenInfo : public TargetCodeGenInfo {
+public:
+ MSP430TargetCodeGenInfo(CodeGenTypes &CGT)
+ : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {}
+ void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
+ CodeGen::CodeGenModule &M) const;
+};
+
+}
+
+void MSP430TargetCodeGenInfo::SetTargetAttributes(const Decl *D,
+ llvm::GlobalValue *GV,
+ CodeGen::CodeGenModule &M) const {
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ if (const MSP430InterruptAttr *attr = FD->getAttr<MSP430InterruptAttr>()) {
+ // Handle 'interrupt' attribute:
+ llvm::Function *F = cast<llvm::Function>(GV);
+
+ // Step 1: Set ISR calling convention.
+ F->setCallingConv(llvm::CallingConv::MSP430_INTR);
+
+ // Step 2: Add attributes goodness.
+ F->addFnAttr(llvm::Attribute::NoInline);
+
+ // Step 3: Emit ISR vector alias.
+ unsigned Num = attr->getNumber() + 0xffe0;
+ new llvm::GlobalAlias(GV->getType(), llvm::Function::ExternalLinkage,
+ "vector_" + Twine::utohexstr(Num),
+ GV, &M.getModule());
+ }
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// MIPS ABI Implementation. This works for both little-endian and
+// big-endian variants.
+//===----------------------------------------------------------------------===//
+
+namespace {
+class MipsABIInfo : public ABIInfo {
+ bool IsO32;
+ unsigned MinABIStackAlignInBytes;
+ llvm::Type* HandleAggregates(QualType Ty) const;
+ llvm::Type* returnAggregateInRegs(QualType RetTy, uint64_t Size) const;
+ llvm::Type* getPaddingType(uint64_t Align, uint64_t Offset) const;
+public:
+ MipsABIInfo(CodeGenTypes &CGT, bool _IsO32) :
+ ABIInfo(CGT), IsO32(_IsO32), MinABIStackAlignInBytes(IsO32 ? 4 : 8) {}
+
+ ABIArgInfo classifyReturnType(QualType RetTy) const;
+ ABIArgInfo classifyArgumentType(QualType RetTy, uint64_t &Offset) const;
+ virtual void computeInfo(CGFunctionInfo &FI) const;
+ virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const;
+};
+
+class MIPSTargetCodeGenInfo : public TargetCodeGenInfo {
+ unsigned SizeOfUnwindException;
+public:
+ MIPSTargetCodeGenInfo(CodeGenTypes &CGT, bool IsO32)
+ : TargetCodeGenInfo(new MipsABIInfo(CGT, IsO32)),
+ SizeOfUnwindException(IsO32 ? 24 : 32) {}
+
+ int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const {
+ return 29;
+ }
+
+ bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *Address) const;
+
+ unsigned getSizeOfUnwindException() const {
+ return SizeOfUnwindException;
+ }
+};
+}
+
+// In N32/64, an aligned double precision floating point field is passed in
+// a register.
+llvm::Type* MipsABIInfo::HandleAggregates(QualType Ty) const {
+ if (IsO32)
+ return 0;
+
+ if (Ty->isComplexType())
+ return CGT.ConvertType(Ty);
+
+ const RecordType *RT = Ty->getAs<RecordType>();
+
+ // Unions are passed in integer registers.
+ if (!RT || !RT->isStructureOrClassType())
+ return 0;
+
+ const RecordDecl *RD = RT->getDecl();
+ const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
+ uint64_t StructSize = getContext().getTypeSize(Ty);
+ assert(!(StructSize % 8) && "Size of structure must be multiple of 8.");
+
+ uint64_t LastOffset = 0;
+ unsigned idx = 0;
+ llvm::IntegerType *I64 = llvm::IntegerType::get(getVMContext(), 64);
+ SmallVector<llvm::Type*, 8> ArgList;
+
+ // Iterate over fields in the struct/class and check if there are any aligned
+ // double fields.
+ for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
+ i != e; ++i, ++idx) {
+ const QualType Ty = (*i)->getType();
+ const BuiltinType *BT = Ty->getAs<BuiltinType>();
+
+ if (!BT || BT->getKind() != BuiltinType::Double)
+ continue;
+
+ uint64_t Offset = Layout.getFieldOffset(idx);
+ if (Offset % 64) // Ignore doubles that are not aligned.
+ continue;
+
+ // Add ((Offset - LastOffset) / 64) args of type i64.
+ for (unsigned j = (Offset - LastOffset) / 64; j > 0; --j)
+ ArgList.push_back(I64);
+
+ // Add double type.
+ ArgList.push_back(llvm::Type::getDoubleTy(getVMContext()));
+ LastOffset = Offset + 64;
+ }
+
+ // This struct/class doesn't have an aligned double field.
+ if (!LastOffset)
+ return 0;
+
+ // Add ((StructSize - LastOffset) / 64) args of type i64.
+ for (unsigned N = (StructSize - LastOffset) / 64; N; --N)
+ ArgList.push_back(I64);
+
+ // If the size of the remainder is not zero, add one more integer type to
+ // ArgList.
+ unsigned R = (StructSize - LastOffset) % 64;
+ if (R)
+ ArgList.push_back(llvm::IntegerType::get(getVMContext(), R));
+
+ return llvm::StructType::get(getVMContext(), ArgList);
+}
+
+llvm::Type *MipsABIInfo::getPaddingType(uint64_t Align, uint64_t Offset) const {
+ // Padding is inserted only for N32/64.
+ if (IsO32)
+ return 0;
+
+ assert(Align <= 16 && "Alignment larger than 16 not handled.");
+ return (Align == 16 && Offset & 0xf) ?
+ llvm::IntegerType::get(getVMContext(), 64) : 0;
+}
+
+ABIArgInfo
+MipsABIInfo::classifyArgumentType(QualType Ty, uint64_t &Offset) const {
+ uint64_t OrigOffset = Offset;
+ uint64_t TySize =
+ llvm::RoundUpToAlignment(getContext().getTypeSize(Ty), 64) / 8;
+ uint64_t Align = getContext().getTypeAlign(Ty) / 8;
+ Offset = llvm::RoundUpToAlignment(Offset, std::max(Align, (uint64_t)8));
+ Offset += TySize;
+
+ if (isAggregateTypeForABI(Ty)) {
+ // Ignore empty aggregates.
+ if (TySize == 0)
+ return ABIArgInfo::getIgnore();
+
+ // Records with non trivial destructors/constructors should not be passed
+ // by value.
+ if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty)) {
+ Offset = OrigOffset + 8;
+ return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
+ }
+
+ // If we have reached here, aggregates are passed either indirectly via a
+ // byval pointer or directly by coercing to another structure type. In the
+ // latter case, padding is inserted if the offset of the aggregate is
+ // unaligned.
+ llvm::Type *ResType = HandleAggregates(Ty);
+
+ if (!ResType)
+ return ABIArgInfo::getIndirect(0);
+
+ return ABIArgInfo::getDirect(ResType, 0, getPaddingType(Align, OrigOffset));
+ }
+
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = Ty->getAs<EnumType>())
+ Ty = EnumTy->getDecl()->getIntegerType();
+
+ if (Ty->isPromotableIntegerType())
+ return ABIArgInfo::getExtend();
+
+ return ABIArgInfo::getDirect(0, 0, getPaddingType(Align, OrigOffset));
+}
+
+llvm::Type*
+MipsABIInfo::returnAggregateInRegs(QualType RetTy, uint64_t Size) const {
+ const RecordType *RT = RetTy->getAs<RecordType>();
+ SmallVector<llvm::Type*, 2> RTList;
+
+ if (RT && RT->isStructureOrClassType()) {
+ const RecordDecl *RD = RT->getDecl();
+ const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
+ unsigned FieldCnt = Layout.getFieldCount();
+
+ // N32/64 returns struct/classes in floating point registers if the
+ // following conditions are met:
+ // 1. The size of the struct/class is no larger than 128-bit.
+ // 2. The struct/class has one or two fields all of which are floating
+ // point types.
+ // 3. The offset of the first field is zero (this follows what gcc does).
+ //
+ // Any other composite results are returned in integer registers.
+ //
+ if (FieldCnt && (FieldCnt <= 2) && !Layout.getFieldOffset(0)) {
+ RecordDecl::field_iterator b = RD->field_begin(), e = RD->field_end();
+ for (; b != e; ++b) {
+ const BuiltinType *BT = (*b)->getType()->getAs<BuiltinType>();
+
+ if (!BT || !BT->isFloatingPoint())
+ break;
+
+ RTList.push_back(CGT.ConvertType((*b)->getType()));
+ }
+
+ if (b == e)
+ return llvm::StructType::get(getVMContext(), RTList,
+ RD->hasAttr<PackedAttr>());
+
+ RTList.clear();
+ }
+ }
+
+ RTList.push_back(llvm::IntegerType::get(getVMContext(),
+ std::min(Size, (uint64_t)64)));
+ if (Size > 64)
+ RTList.push_back(llvm::IntegerType::get(getVMContext(), Size - 64));
+
+ return llvm::StructType::get(getVMContext(), RTList);
+}
+
+ABIArgInfo MipsABIInfo::classifyReturnType(QualType RetTy) const {
+ uint64_t Size = getContext().getTypeSize(RetTy);
+
+ if (RetTy->isVoidType() || Size == 0)
+ return ABIArgInfo::getIgnore();
+
+ if (isAggregateTypeForABI(RetTy)) {
+ if (Size <= 128) {
+ if (RetTy->isAnyComplexType())
+ return ABIArgInfo::getDirect();
+
+ if (!IsO32 && !isRecordWithNonTrivialDestructorOrCopyConstructor(RetTy))
+ return ABIArgInfo::getDirect(returnAggregateInRegs(RetTy, Size));
+ }
+
+ return ABIArgInfo::getIndirect(0);
+ }
+
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
+ RetTy = EnumTy->getDecl()->getIntegerType();
+
+ return (RetTy->isPromotableIntegerType() ?
+ ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
+}
+
+void MipsABIInfo::computeInfo(CGFunctionInfo &FI) const {
+ ABIArgInfo &RetInfo = FI.getReturnInfo();
+ RetInfo = classifyReturnType(FI.getReturnType());
+
+ // Check if a pointer to an aggregate is passed as a hidden argument.
+ uint64_t Offset = RetInfo.isIndirect() ? 8 : 0;
+
+ for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
+ it != ie; ++it)
+ it->info = classifyArgumentType(it->type, Offset);
+}
+
+llvm::Value* MipsABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const {
+ llvm::Type *BP = CGF.Int8PtrTy;
+ llvm::Type *BPP = CGF.Int8PtrPtrTy;
+
+ CGBuilderTy &Builder = CGF.Builder;
+ llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap");
+ llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
+ int64_t TypeAlign = getContext().getTypeAlign(Ty) / 8;
+ llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
+ llvm::Value *AddrTyped;
+ unsigned PtrWidth = getContext().getTargetInfo().getPointerWidth(0);
+ llvm::IntegerType *IntTy = (PtrWidth == 32) ? CGF.Int32Ty : CGF.Int64Ty;
+
+ if (TypeAlign > MinABIStackAlignInBytes) {
+ llvm::Value *AddrAsInt = CGF.Builder.CreatePtrToInt(Addr, IntTy);
+ llvm::Value *Inc = llvm::ConstantInt::get(IntTy, TypeAlign - 1);
+ llvm::Value *Mask = llvm::ConstantInt::get(IntTy, -TypeAlign);
+ llvm::Value *Add = CGF.Builder.CreateAdd(AddrAsInt, Inc);
+ llvm::Value *And = CGF.Builder.CreateAnd(Add, Mask);
+ AddrTyped = CGF.Builder.CreateIntToPtr(And, PTy);
+ }
+ else
+ AddrTyped = Builder.CreateBitCast(Addr, PTy);
+
+ llvm::Value *AlignedAddr = Builder.CreateBitCast(AddrTyped, BP);
+ TypeAlign = std::max((unsigned)TypeAlign, MinABIStackAlignInBytes);
+ uint64_t Offset =
+ llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, TypeAlign);
+ llvm::Value *NextAddr =
+ Builder.CreateGEP(AlignedAddr, llvm::ConstantInt::get(IntTy, Offset),
+ "ap.next");
+ Builder.CreateStore(NextAddr, VAListAddrAsBPP);
+
+ return AddrTyped;
+}
+
+bool
+MIPSTargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *Address) const {
+ // This information comes from gcc's implementation, which seems to
+ // as canonical as it gets.
+
+ // Everything on MIPS is 4 bytes. Double-precision FP registers
+ // are aliased to pairs of single-precision FP registers.
+ llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4);
+
+ // 0-31 are the general purpose registers, $0 - $31.
+ // 32-63 are the floating-point registers, $f0 - $f31.
+ // 64 and 65 are the multiply/divide registers, $hi and $lo.
+ // 66 is the (notional, I think) register for signal-handler return.
+ AssignToArrayRange(CGF.Builder, Address, Four8, 0, 65);
+
+ // 67-74 are the floating-point status registers, $fcc0 - $fcc7.
+ // They are one bit wide and ignored here.
+
+ // 80-111 are the coprocessor 0 registers, $c0r0 - $c0r31.
+ // (coprocessor 1 is the FP unit)
+ // 112-143 are the coprocessor 2 registers, $c2r0 - $c2r31.
+ // 144-175 are the coprocessor 3 registers, $c3r0 - $c3r31.
+ // 176-181 are the DSP accumulator registers.
+ AssignToArrayRange(CGF.Builder, Address, Four8, 80, 181);
+ return false;
+}
+
+//===----------------------------------------------------------------------===//
+// TCE ABI Implementation (see http://tce.cs.tut.fi). Uses mostly the defaults.
+// Currently subclassed only to implement custom OpenCL C function attribute
+// handling.
+//===----------------------------------------------------------------------===//
+
+namespace {
+
+class TCETargetCodeGenInfo : public DefaultTargetCodeGenInfo {
+public:
+ TCETargetCodeGenInfo(CodeGenTypes &CGT)
+ : DefaultTargetCodeGenInfo(CGT) {}
+
+ virtual void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
+ CodeGen::CodeGenModule &M) const;
+};
+
+void TCETargetCodeGenInfo::SetTargetAttributes(const Decl *D,
+ llvm::GlobalValue *GV,
+ CodeGen::CodeGenModule &M) const {
+ const FunctionDecl *FD = dyn_cast<FunctionDecl>(D);
+ if (!FD) return;
+
+ llvm::Function *F = cast<llvm::Function>(GV);
+
+ if (M.getLangOpts().OpenCL) {
+ if (FD->hasAttr<OpenCLKernelAttr>()) {
+ // OpenCL C Kernel functions are not subject to inlining
+ F->addFnAttr(llvm::Attribute::NoInline);
+
+ if (FD->hasAttr<ReqdWorkGroupSizeAttr>()) {
+
+ // Convert the reqd_work_group_size() attributes to metadata.
+ llvm::LLVMContext &Context = F->getContext();
+ llvm::NamedMDNode *OpenCLMetadata =
+ M.getModule().getOrInsertNamedMetadata("opencl.kernel_wg_size_info");
+
+ SmallVector<llvm::Value*, 5> Operands;
+ Operands.push_back(F);
+
+ Operands.push_back(llvm::Constant::getIntegerValue(M.Int32Ty,
+ llvm::APInt(32,
+ FD->getAttr<ReqdWorkGroupSizeAttr>()->getXDim())));
+ Operands.push_back(llvm::Constant::getIntegerValue(M.Int32Ty,
+ llvm::APInt(32,
+ FD->getAttr<ReqdWorkGroupSizeAttr>()->getYDim())));
+ Operands.push_back(llvm::Constant::getIntegerValue(M.Int32Ty,
+ llvm::APInt(32,
+ FD->getAttr<ReqdWorkGroupSizeAttr>()->getZDim())));
+
+ // Add a boolean constant operand for "required" (true) or "hint" (false)
+ // for implementing the work_group_size_hint attr later. Currently
+ // always true as the hint is not yet implemented.
+ Operands.push_back(llvm::ConstantInt::getTrue(Context));
+ OpenCLMetadata->addOperand(llvm::MDNode::get(Context, Operands));
+ }
+ }
+ }
+}
+
+}
+
+//===----------------------------------------------------------------------===//
+// Hexagon ABI Implementation
+//===----------------------------------------------------------------------===//
+
+namespace {
+
+class HexagonABIInfo : public ABIInfo {
+
+
+public:
+ HexagonABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
+
+private:
+
+ ABIArgInfo classifyReturnType(QualType RetTy) const;
+ ABIArgInfo classifyArgumentType(QualType RetTy) const;
+
+ virtual void computeInfo(CGFunctionInfo &FI) const;
+
+ virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const;
+};
+
+class HexagonTargetCodeGenInfo : public TargetCodeGenInfo {
+public:
+ HexagonTargetCodeGenInfo(CodeGenTypes &CGT)
+ :TargetCodeGenInfo(new HexagonABIInfo(CGT)) {}
+
+ int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const {
+ return 29;
+ }
+};
+
+}
+
+void HexagonABIInfo::computeInfo(CGFunctionInfo &FI) const {
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
+ for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
+ it != ie; ++it)
+ it->info = classifyArgumentType(it->type);
+}
+
+ABIArgInfo HexagonABIInfo::classifyArgumentType(QualType Ty) const {
+ if (!isAggregateTypeForABI(Ty)) {
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = Ty->getAs<EnumType>())
+ Ty = EnumTy->getDecl()->getIntegerType();
+
+ return (Ty->isPromotableIntegerType() ?
+ ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
+ }
+
+ // Ignore empty records.
+ if (isEmptyRecord(getContext(), Ty, true))
+ return ABIArgInfo::getIgnore();
+
+ // Structures with either a non-trivial destructor or a non-trivial
+ // copy constructor are always indirect.
+ if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty))
+ return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
+
+ uint64_t Size = getContext().getTypeSize(Ty);
+ if (Size > 64)
+ return ABIArgInfo::getIndirect(0, /*ByVal=*/true);
+ // Pass in the smallest viable integer type.
+ else if (Size > 32)
+ return ABIArgInfo::getDirect(llvm::Type::getInt64Ty(getVMContext()));
+ else if (Size > 16)
+ return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
+ else if (Size > 8)
+ return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
+ else
+ return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
+}
+
+ABIArgInfo HexagonABIInfo::classifyReturnType(QualType RetTy) const {
+ if (RetTy->isVoidType())
+ return ABIArgInfo::getIgnore();
+
+ // Large vector types should be returned via memory.
+ if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 64)
+ return ABIArgInfo::getIndirect(0);
+
+ if (!isAggregateTypeForABI(RetTy)) {
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
+ RetTy = EnumTy->getDecl()->getIntegerType();
+
+ return (RetTy->isPromotableIntegerType() ?
+ ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
+ }
+
+ // Structures with either a non-trivial destructor or a non-trivial
+ // copy constructor are always indirect.
+ if (isRecordWithNonTrivialDestructorOrCopyConstructor(RetTy))
+ return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
+
+ if (isEmptyRecord(getContext(), RetTy, true))
+ return ABIArgInfo::getIgnore();
+
+ // Aggregates <= 8 bytes are returned in r0; other aggregates
+ // are returned indirectly.
+ uint64_t Size = getContext().getTypeSize(RetTy);
+ if (Size <= 64) {
+ // Return in the smallest viable integer type.
+ if (Size <= 8)
+ return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
+ if (Size <= 16)
+ return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
+ if (Size <= 32)
+ return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
+ return ABIArgInfo::getDirect(llvm::Type::getInt64Ty(getVMContext()));
+ }
+
+ return ABIArgInfo::getIndirect(0, /*ByVal=*/true);
+}
+
+llvm::Value *HexagonABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const {
+ // FIXME: Need to handle alignment
+ llvm::Type *BPP = CGF.Int8PtrPtrTy;
+
+ CGBuilderTy &Builder = CGF.Builder;
+ llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP,
+ "ap");
+ llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
+ llvm::Type *PTy =
+ llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
+ llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
+
+ uint64_t Offset =
+ llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 4);
+ llvm::Value *NextAddr =
+ Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset),
+ "ap.next");
+ Builder.CreateStore(NextAddr, VAListAddrAsBPP);
+
+ return AddrTyped;
+}
+
+
+const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() {
+ if (TheTargetCodeGenInfo)
+ return *TheTargetCodeGenInfo;
+
+ const llvm::Triple &Triple = getContext().getTargetInfo().getTriple();
+ switch (Triple.getArch()) {
+ default:
+ return *(TheTargetCodeGenInfo = new DefaultTargetCodeGenInfo(Types));
+
+ case llvm::Triple::mips:
+ case llvm::Triple::mipsel:
+ return *(TheTargetCodeGenInfo = new MIPSTargetCodeGenInfo(Types, true));
+
+ case llvm::Triple::mips64:
+ case llvm::Triple::mips64el:
+ return *(TheTargetCodeGenInfo = new MIPSTargetCodeGenInfo(Types, false));
+
+ case llvm::Triple::arm:
+ case llvm::Triple::thumb:
+ {
+ ARMABIInfo::ABIKind Kind = ARMABIInfo::AAPCS;
+
+ if (strcmp(getContext().getTargetInfo().getABI(), "apcs-gnu") == 0)
+ Kind = ARMABIInfo::APCS;
+ else if (CodeGenOpts.FloatABI == "hard")
+ Kind = ARMABIInfo::AAPCS_VFP;
+
+ return *(TheTargetCodeGenInfo = new ARMTargetCodeGenInfo(Types, Kind));
+ }
+
+ case llvm::Triple::ppc:
+ return *(TheTargetCodeGenInfo = new PPC32TargetCodeGenInfo(Types));
+
+ case llvm::Triple::ptx32:
+ case llvm::Triple::ptx64:
+ return *(TheTargetCodeGenInfo = new PTXTargetCodeGenInfo(Types));
+
+ case llvm::Triple::mblaze:
+ return *(TheTargetCodeGenInfo = new MBlazeTargetCodeGenInfo(Types));
+
+ case llvm::Triple::msp430:
+ return *(TheTargetCodeGenInfo = new MSP430TargetCodeGenInfo(Types));
+
+ case llvm::Triple::tce:
+ return *(TheTargetCodeGenInfo = new TCETargetCodeGenInfo(Types));
+
+ case llvm::Triple::x86: {
+ bool DisableMMX = strcmp(getContext().getTargetInfo().getABI(), "no-mmx") == 0;
+
+ if (Triple.isOSDarwin())
+ return *(TheTargetCodeGenInfo =
+ new X86_32TargetCodeGenInfo(
+ Types, true, true, DisableMMX, false));
+
+ switch (Triple.getOS()) {
+ case llvm::Triple::Cygwin:
+ case llvm::Triple::MinGW32:
+ case llvm::Triple::AuroraUX:
+ case llvm::Triple::DragonFly:
+ case llvm::Triple::FreeBSD:
+ case llvm::Triple::OpenBSD:
+ return *(TheTargetCodeGenInfo =
+ new X86_32TargetCodeGenInfo(
+ Types, false, true, DisableMMX, false));
+
+ case llvm::Triple::Win32:
+ return *(TheTargetCodeGenInfo =
+ new X86_32TargetCodeGenInfo(
+ Types, false, true, DisableMMX, true));
+
+ default:
+ return *(TheTargetCodeGenInfo =
+ new X86_32TargetCodeGenInfo(
+ Types, false, false, DisableMMX, false));
+ }
+ }
+
+ case llvm::Triple::x86_64: {
+ bool HasAVX = strcmp(getContext().getTargetInfo().getABI(), "avx") == 0;
+
+ switch (Triple.getOS()) {
+ case llvm::Triple::Win32:
+ case llvm::Triple::MinGW32:
+ case llvm::Triple::Cygwin:
+ return *(TheTargetCodeGenInfo = new WinX86_64TargetCodeGenInfo(Types));
+ default:
+ return *(TheTargetCodeGenInfo = new X86_64TargetCodeGenInfo(Types,
+ HasAVX));
+ }
+ }
+ case llvm::Triple::hexagon:
+ return *(TheTargetCodeGenInfo = new HexagonTargetCodeGenInfo(Types));
+ }
+}
diff --git a/clang/lib/CodeGen/TargetInfo.h b/clang/lib/CodeGen/TargetInfo.h
new file mode 100644
index 0000000..88b4997
--- /dev/null
+++ b/clang/lib/CodeGen/TargetInfo.h
@@ -0,0 +1,170 @@
+//===---- TargetInfo.h - Encapsulate target details -------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// These classes wrap the information about a call or function
+// definition used to handle ABI compliancy.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_CODEGEN_TARGETINFO_H
+#define CLANG_CODEGEN_TARGETINFO_H
+
+#include "clang/Basic/LLVM.h"
+#include "clang/AST/Type.h"
+#include "llvm/ADT/StringRef.h"
+
+namespace llvm {
+ class GlobalValue;
+ class Type;
+ class Value;
+}
+
+namespace clang {
+ class ABIInfo;
+ class Decl;
+
+ namespace CodeGen {
+ class CallArgList;
+ class CodeGenModule;
+ class CodeGenFunction;
+ class CGFunctionInfo;
+ }
+
+ /// TargetCodeGenInfo - This class organizes various target-specific
+ /// codegeneration issues, like target-specific attributes, builtins and so
+ /// on.
+ class TargetCodeGenInfo {
+ ABIInfo *Info;
+ public:
+ // WARNING: Acquires the ownership of ABIInfo.
+ TargetCodeGenInfo(ABIInfo *info = 0):Info(info) { }
+ virtual ~TargetCodeGenInfo();
+
+ /// getABIInfo() - Returns ABI info helper for the target.
+ const ABIInfo& getABIInfo() const { return *Info; }
+
+ /// SetTargetAttributes - Provides a convenient hook to handle extra
+ /// target-specific attributes for the given global.
+ virtual void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
+ CodeGen::CodeGenModule &M) const { }
+
+ /// Determines the size of struct _Unwind_Exception on this platform,
+ /// in 8-bit units. The Itanium ABI defines this as:
+ /// struct _Unwind_Exception {
+ /// uint64 exception_class;
+ /// _Unwind_Exception_Cleanup_Fn exception_cleanup;
+ /// uint64 private_1;
+ /// uint64 private_2;
+ /// };
+ virtual unsigned getSizeOfUnwindException() const;
+
+ /// Controls whether __builtin_extend_pointer should sign-extend
+ /// pointers to uint64_t or zero-extend them (the default). Has
+ /// no effect for targets:
+ /// - that have 64-bit pointers, or
+ /// - that cannot address through registers larger than pointers, or
+ /// - that implicitly ignore/truncate the top bits when addressing
+ /// through such registers.
+ virtual bool extendPointerWithSExt() const { return false; }
+
+ /// Determines the DWARF register number for the stack pointer, for
+ /// exception-handling purposes. Implements __builtin_dwarf_sp_column.
+ ///
+ /// Returns -1 if the operation is unsupported by this target.
+ virtual int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const {
+ return -1;
+ }
+
+ /// Initializes the given DWARF EH register-size table, a char*.
+ /// Implements __builtin_init_dwarf_reg_size_table.
+ ///
+ /// Returns true if the operation is unsupported by this target.
+ virtual bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *Address) const {
+ return true;
+ }
+
+ /// Performs the code-generation required to convert a return
+ /// address as stored by the system into the actual address of the
+ /// next instruction that will be executed.
+ ///
+ /// Used by __builtin_extract_return_addr().
+ virtual llvm::Value *decodeReturnAddress(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *Address) const {
+ return Address;
+ }
+
+ /// Performs the code-generation required to convert the address
+ /// of an instruction into a return address suitable for storage
+ /// by the system in a return slot.
+ ///
+ /// Used by __builtin_frob_return_addr().
+ virtual llvm::Value *encodeReturnAddress(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *Address) const {
+ return Address;
+ }
+
+ virtual llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
+ StringRef Constraint,
+ llvm::Type* Ty) const {
+ return Ty;
+ }
+
+ /// Retrieve the address of a function to call immediately before
+ /// calling objc_retainAutoreleasedReturnValue. The
+ /// implementation of objc_autoreleaseReturnValue sniffs the
+ /// instruction stream following its return address to decide
+ /// whether it's a call to objc_retainAutoreleasedReturnValue.
+ /// This can be prohibitively expensive, depending on the
+ /// relocation model, and so on some targets it instead sniffs for
+ /// a particular instruction sequence. This functions returns
+ /// that instruction sequence in inline assembly, which will be
+ /// empty if none is required.
+ virtual StringRef getARCRetainAutoreleasedReturnValueMarker() const {
+ return "";
+ }
+
+ /// Determine whether a call to an unprototyped functions under
+ /// the given calling convention should use the variadic
+ /// convention or the non-variadic convention.
+ ///
+ /// There's a good reason to make a platform's variadic calling
+ /// convention be different from its non-variadic calling
+ /// convention: the non-variadic arguments can be passed in
+ /// registers (better for performance), and the variadic arguments
+ /// can be passed on the stack (also better for performance). If
+ /// this is done, however, unprototyped functions *must* use the
+ /// non-variadic convention, because C99 states that a call
+ /// through an unprototyped function type must succeed if the
+ /// function was defined with a non-variadic prototype with
+ /// compatible parameters. Therefore, splitting the conventions
+ /// makes it impossible to call a variadic function through an
+ /// unprototyped type. Since function prototypes came out in the
+ /// late 1970s, this is probably an acceptable trade-off.
+ /// Nonetheless, not all platforms are willing to make it, and in
+ /// particularly x86-64 bends over backwards to make the
+ /// conventions compatible.
+ ///
+ /// The default is false. This is correct whenever:
+ /// - the conventions are exactly the same, because it does not
+ /// matter and the resulting IR will be somewhat prettier in
+ /// certain cases; or
+ /// - the conventions are substantively different in how they pass
+ /// arguments, because in this case using the variadic convention
+ /// will lead to C99 violations.
+ /// It is not necessarily correct when arguments are passed in the
+ /// same way and some out-of-band information is passed for the
+ /// benefit of variadic callees, as is the case for x86-64.
+ /// In this case the ABI should be consulted.
+ virtual bool isNoProtoCallVariadic(const CodeGen::CallArgList &args,
+ const FunctionNoProtoType *fnType) const;
+ };
+}
+
+#endif // CLANG_CODEGEN_TARGETINFO_H