summaryrefslogtreecommitdiff
path: root/include/llvm/ExecutionEngine/Orc
diff options
context:
space:
mode:
authorDimitry Andric <dim@FreeBSD.org>2015-05-27 18:44:32 +0000
committerDimitry Andric <dim@FreeBSD.org>2015-05-27 18:44:32 +0000
commit5a5ac124e1efaf208671f01c46edb15f29ed2a0b (patch)
treea6140557876943cdd800ee997c9317283394b22c /include/llvm/ExecutionEngine/Orc
parentf03b5bed27d0d2eafd68562ce14f8b5e3f1f0801 (diff)
Diffstat (limited to 'include/llvm/ExecutionEngine/Orc')
-rw-r--r--include/llvm/ExecutionEngine/Orc/CompileOnDemandLayer.h547
-rw-r--r--include/llvm/ExecutionEngine/Orc/CompileUtils.h62
-rw-r--r--include/llvm/ExecutionEngine/Orc/ExecutionUtils.h182
-rw-r--r--include/llvm/ExecutionEngine/Orc/IRCompileLayer.h148
-rw-r--r--include/llvm/ExecutionEngine/Orc/IRTransformLayer.h101
-rw-r--r--include/llvm/ExecutionEngine/Orc/IndirectionUtils.h295
-rw-r--r--include/llvm/ExecutionEngine/Orc/JITSymbol.h77
-rw-r--r--include/llvm/ExecutionEngine/Orc/LambdaResolver.h62
-rw-r--r--include/llvm/ExecutionEngine/Orc/LazyEmittingLayer.h304
-rw-r--r--include/llvm/ExecutionEngine/Orc/ObjectLinkingLayer.h284
-rw-r--r--include/llvm/ExecutionEngine/Orc/OrcTargetSupport.h53
11 files changed, 2115 insertions, 0 deletions
diff --git a/include/llvm/ExecutionEngine/Orc/CompileOnDemandLayer.h b/include/llvm/ExecutionEngine/Orc/CompileOnDemandLayer.h
new file mode 100644
index 0000000000000..719adbc562c21
--- /dev/null
+++ b/include/llvm/ExecutionEngine/Orc/CompileOnDemandLayer.h
@@ -0,0 +1,547 @@
+//===- CompileOnDemandLayer.h - Compile each function on demand -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// JIT layer for breaking up modules and inserting callbacks to allow
+// individual functions to be compiled on demand.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTIONENGINE_ORC_COMPILEONDEMANDLAYER_H
+#define LLVM_EXECUTIONENGINE_ORC_COMPILEONDEMANDLAYER_H
+
+//#include "CloneSubModule.h"
+#include "IndirectionUtils.h"
+#include "LambdaResolver.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ExecutionEngine/SectionMemoryManager.h"
+#include "llvm/Transforms/Utils/Cloning.h"
+#include <list>
+#include <set>
+
+#include "llvm/Support/Debug.h"
+
+namespace llvm {
+namespace orc {
+
+/// @brief Compile-on-demand layer.
+///
+/// When a module is added to this layer a stub is created for each of its
+/// function definitions. The stubs and other global values are immediately
+/// added to the layer below. When a stub is called it triggers the extraction
+/// of the function body from the original module. The extracted body is then
+/// compiled and executed.
+template <typename BaseLayerT, typename CompileCallbackMgrT>
+class CompileOnDemandLayer {
+private:
+
+ // Utility class for MapValue. Only materializes declarations for global
+ // variables.
+ class GlobalDeclMaterializer : public ValueMaterializer {
+ public:
+ GlobalDeclMaterializer(Module &Dst) : Dst(Dst) {}
+ Value* materializeValueFor(Value *V) final {
+ if (auto *GV = dyn_cast<GlobalVariable>(V))
+ return cloneGlobalVariableDecl(Dst, *GV);
+ else if (auto *F = dyn_cast<Function>(V))
+ return cloneFunctionDecl(Dst, *F);
+ // Else.
+ return nullptr;
+ }
+ private:
+ Module &Dst;
+ };
+
+ typedef typename BaseLayerT::ModuleSetHandleT BaseLayerModuleSetHandleT;
+ class UncompiledPartition;
+
+ // Logical module.
+ //
+ // This struct contains the handles for the global values and stubs (which
+ // cover the external symbols of the original module), plus the handes for
+ // each of the extracted partitions. These handleds are used for lookup (only
+ // the globals/stubs module is searched) and memory management. The actual
+ // searching and resource management are handled by the LogicalDylib that owns
+ // the LogicalModule.
+ struct LogicalModule {
+ LogicalModule() {}
+
+ LogicalModule(LogicalModule &&Other)
+ : SrcM(std::move(Other.SrcM)),
+ GVsAndStubsHandle(std::move(Other.GVsAndStubsHandle)),
+ ImplHandles(std::move(Other.ImplHandles)) {}
+
+ std::unique_ptr<Module> SrcM;
+ BaseLayerModuleSetHandleT GVsAndStubsHandle;
+ std::vector<BaseLayerModuleSetHandleT> ImplHandles;
+ };
+
+ // Logical dylib.
+ //
+ // This class handles symbol resolution and resource management for a set of
+ // modules that were added together as a logical dylib.
+ //
+ // A logical dylib contains one-or-more LogicalModules plus a set of
+ // UncompiledPartitions. LogicalModules support symbol resolution and resource
+ // management for for code that has already been emitted. UncompiledPartitions
+ // represent code that has not yet been compiled.
+ class LogicalDylib {
+ private:
+ friend class UncompiledPartition;
+ typedef std::list<LogicalModule> LogicalModuleList;
+ public:
+
+ typedef unsigned UncompiledPartitionID;
+ typedef typename LogicalModuleList::iterator LMHandle;
+
+ // Construct a logical dylib.
+ LogicalDylib(CompileOnDemandLayer &CODLayer) : CODLayer(CODLayer) { }
+
+ // Delete this logical dylib, release logical module resources.
+ virtual ~LogicalDylib() {
+ releaseLogicalModuleResources();
+ }
+
+ // Get a reference to the containing layer.
+ CompileOnDemandLayer& getCODLayer() { return CODLayer; }
+
+ // Get a reference to the base layer.
+ BaseLayerT& getBaseLayer() { return CODLayer.BaseLayer; }
+
+ // Start a new context for a single logical module.
+ LMHandle createLogicalModule() {
+ LogicalModules.push_back(LogicalModule());
+ return std::prev(LogicalModules.end());
+ }
+
+ // Set the global-values-and-stubs module handle for this logical module.
+ void setGVsAndStubsHandle(LMHandle LMH, BaseLayerModuleSetHandleT H) {
+ LMH->GVsAndStubsHandle = H;
+ }
+
+ // Return the global-values-and-stubs module handle for this logical module.
+ BaseLayerModuleSetHandleT getGVsAndStubsHandle(LMHandle LMH) {
+ return LMH->GVsAndStubsHandle;
+ }
+
+ // Add a handle to a module containing lazy function bodies to the given
+ // logical module.
+ void addToLogicalModule(LMHandle LMH, BaseLayerModuleSetHandleT H) {
+ LMH->ImplHandles.push_back(H);
+ }
+
+ // Create an UncompiledPartition attached to this LogicalDylib.
+ UncompiledPartition& createUncompiledPartition(LMHandle LMH,
+ std::shared_ptr<Module> SrcM);
+
+ // Take ownership of the given UncompiledPartition from the logical dylib.
+ std::unique_ptr<UncompiledPartition>
+ takeUPOwnership(UncompiledPartitionID ID);
+
+ // Look up a symbol in this context.
+ JITSymbol findSymbolInternally(LMHandle LMH, const std::string &Name) {
+ if (auto Symbol = getBaseLayer().findSymbolIn(LMH->GVsAndStubsHandle,
+ Name, false))
+ return Symbol;
+
+ for (auto I = LogicalModules.begin(), E = LogicalModules.end(); I != E;
+ ++I)
+ if (I != LMH)
+ if (auto Symbol = getBaseLayer().findSymbolIn(I->GVsAndStubsHandle,
+ Name, false))
+ return Symbol;
+
+ return nullptr;
+ }
+
+ JITSymbol findSymbol(const std::string &Name, bool ExportedSymbolsOnly) {
+ for (auto &LM : LogicalModules)
+ if (auto Symbol = getBaseLayer().findSymbolIn(LM.GVsAndStubsHandle,
+ Name,
+ ExportedSymbolsOnly))
+ return Symbol;
+ return nullptr;
+ }
+
+ // Find an external symbol (via the user supplied SymbolResolver).
+ virtual RuntimeDyld::SymbolInfo
+ findSymbolExternally(const std::string &Name) const = 0;
+
+ private:
+
+ void releaseLogicalModuleResources() {
+ for (auto I = LogicalModules.begin(), E = LogicalModules.end(); I != E;
+ ++I) {
+ getBaseLayer().removeModuleSet(I->GVsAndStubsHandle);
+ for (auto H : I->ImplHandles)
+ getBaseLayer().removeModuleSet(H);
+ }
+ }
+
+ CompileOnDemandLayer &CODLayer;
+ LogicalModuleList LogicalModules;
+ std::vector<std::unique_ptr<UncompiledPartition>> UncompiledPartitions;
+ };
+
+ template <typename ResolverPtrT>
+ class LogicalDylibImpl : public LogicalDylib {
+ public:
+ LogicalDylibImpl(CompileOnDemandLayer &CODLayer, ResolverPtrT Resolver)
+ : LogicalDylib(CODLayer), Resolver(std::move(Resolver)) {}
+
+ RuntimeDyld::SymbolInfo
+ findSymbolExternally(const std::string &Name) const override {
+ return Resolver->findSymbol(Name);
+ }
+
+ private:
+ ResolverPtrT Resolver;
+ };
+
+ template <typename ResolverPtrT>
+ static std::unique_ptr<LogicalDylib>
+ createLogicalDylib(CompileOnDemandLayer &CODLayer,
+ ResolverPtrT Resolver) {
+ typedef LogicalDylibImpl<ResolverPtrT> Impl;
+ return llvm::make_unique<Impl>(CODLayer, std::move(Resolver));
+ }
+
+ // Uncompiled partition.
+ //
+ // Represents one as-yet uncompiled portion of a module.
+ class UncompiledPartition {
+ public:
+
+ struct PartitionEntry {
+ PartitionEntry(Function *F, TargetAddress CallbackID)
+ : F(F), CallbackID(CallbackID) {}
+ Function *F;
+ TargetAddress CallbackID;
+ };
+
+ typedef std::vector<PartitionEntry> PartitionEntryList;
+
+ // Creates an uncompiled partition with the list of functions that make up
+ // this partition.
+ UncompiledPartition(LogicalDylib &LD, typename LogicalDylib::LMHandle LMH,
+ std::shared_ptr<Module> SrcM)
+ : LD(LD), LMH(LMH), SrcM(std::move(SrcM)), ID(~0U) {}
+
+ ~UncompiledPartition() {
+ // FIXME: When we want to support threaded lazy compilation we'll need to
+ // lock the callback manager here.
+ auto &CCMgr = LD.getCODLayer().CompileCallbackMgr;
+ for (auto PEntry : PartitionEntries)
+ CCMgr.releaseCompileCallback(PEntry.CallbackID);
+ }
+
+ // Set the ID for this partition.
+ void setID(typename LogicalDylib::UncompiledPartitionID ID) {
+ this->ID = ID;
+ }
+
+ // Set the function set and callbacks for this partition.
+ void setPartitionEntries(PartitionEntryList PartitionEntries) {
+ this->PartitionEntries = std::move(PartitionEntries);
+ }
+
+ // Handle a compile callback for the function at index FnIdx.
+ TargetAddress compile(unsigned FnIdx) {
+ // Take ownership of self. This will ensure we delete the partition and
+ // free all its resources once we're done compiling.
+ std::unique_ptr<UncompiledPartition> This = LD.takeUPOwnership(ID);
+
+ // Release all other compile callbacks for this partition.
+ // We skip the callback for this function because that's the one that
+ // called us, and the callback manager will already have removed it.
+ auto &CCMgr = LD.getCODLayer().CompileCallbackMgr;
+ for (unsigned I = 0; I < PartitionEntries.size(); ++I)
+ if (I != FnIdx)
+ CCMgr.releaseCompileCallback(PartitionEntries[I].CallbackID);
+
+ // Grab the name of the function being called here.
+ Function *F = PartitionEntries[FnIdx].F;
+ std::string CalledFnName = Mangle(F->getName(), SrcM->getDataLayout());
+
+ // Extract the function and add it to the base layer.
+ auto PartitionImplH = emitPartition();
+ LD.addToLogicalModule(LMH, PartitionImplH);
+
+ // Update body pointers.
+ // FIXME: When we start supporting remote lazy jitting this will need to
+ // be replaced with a user-supplied callback for updating the
+ // remote pointers.
+ TargetAddress CalledAddr = 0;
+ for (unsigned I = 0; I < PartitionEntries.size(); ++I) {
+ auto F = PartitionEntries[I].F;
+ std::string FName(F->getName());
+ auto FnBodySym =
+ LD.getBaseLayer().findSymbolIn(PartitionImplH,
+ Mangle(FName, SrcM->getDataLayout()),
+ false);
+ auto FnPtrSym =
+ LD.getBaseLayer().findSymbolIn(LD.getGVsAndStubsHandle(LMH),
+ Mangle(FName + "$orc_addr",
+ SrcM->getDataLayout()),
+ false);
+ assert(FnBodySym && "Couldn't find function body.");
+ assert(FnPtrSym && "Couldn't find function body pointer.");
+
+ auto FnBodyAddr = FnBodySym.getAddress();
+ void *FnPtrAddr = reinterpret_cast<void*>(
+ static_cast<uintptr_t>(FnPtrSym.getAddress()));
+
+ // If this is the function we're calling record the address so we can
+ // return it from this function.
+ if (I == FnIdx)
+ CalledAddr = FnBodyAddr;
+
+ memcpy(FnPtrAddr, &FnBodyAddr, sizeof(uintptr_t));
+ }
+
+ // Finally, clear the partition structure so we don't try to
+ // double-release the callbacks in the UncompiledPartition destructor.
+ PartitionEntries.clear();
+
+ return CalledAddr;
+ }
+
+ private:
+
+ BaseLayerModuleSetHandleT emitPartition() {
+ // Create the module.
+ std::string NewName(SrcM->getName());
+ for (auto &PEntry : PartitionEntries) {
+ NewName += ".";
+ NewName += PEntry.F->getName();
+ }
+ auto PM = llvm::make_unique<Module>(NewName, SrcM->getContext());
+ PM->setDataLayout(SrcM->getDataLayout());
+ ValueToValueMapTy VMap;
+ GlobalDeclMaterializer GDM(*PM);
+
+ // Create decls in the new module.
+ for (auto &PEntry : PartitionEntries)
+ cloneFunctionDecl(*PM, *PEntry.F, &VMap);
+
+ // Move the function bodies.
+ for (auto &PEntry : PartitionEntries)
+ moveFunctionBody(*PEntry.F, VMap);
+
+ // Create memory manager and symbol resolver.
+ auto MemMgr = llvm::make_unique<SectionMemoryManager>();
+ auto Resolver = createLambdaResolver(
+ [this](const std::string &Name) {
+ if (auto Symbol = LD.findSymbolInternally(LMH, Name))
+ return RuntimeDyld::SymbolInfo(Symbol.getAddress(),
+ Symbol.getFlags());
+ return LD.findSymbolExternally(Name);
+ },
+ [this](const std::string &Name) {
+ if (auto Symbol = LD.findSymbolInternally(LMH, Name))
+ return RuntimeDyld::SymbolInfo(Symbol.getAddress(),
+ Symbol.getFlags());
+ return RuntimeDyld::SymbolInfo(nullptr);
+ });
+ std::vector<std::unique_ptr<Module>> PartMSet;
+ PartMSet.push_back(std::move(PM));
+ return LD.getBaseLayer().addModuleSet(std::move(PartMSet),
+ std::move(MemMgr),
+ std::move(Resolver));
+ }
+
+ LogicalDylib &LD;
+ typename LogicalDylib::LMHandle LMH;
+ std::shared_ptr<Module> SrcM;
+ typename LogicalDylib::UncompiledPartitionID ID;
+ PartitionEntryList PartitionEntries;
+ };
+
+ typedef std::list<std::unique_ptr<LogicalDylib>> LogicalDylibList;
+
+public:
+ /// @brief Handle to a set of loaded modules.
+ typedef typename LogicalDylibList::iterator ModuleSetHandleT;
+
+ /// @brief Construct a compile-on-demand layer instance.
+ CompileOnDemandLayer(BaseLayerT &BaseLayer, CompileCallbackMgrT &CallbackMgr)
+ : BaseLayer(BaseLayer), CompileCallbackMgr(CallbackMgr) {}
+
+ /// @brief Add a module to the compile-on-demand layer.
+ template <typename ModuleSetT, typename MemoryManagerPtrT,
+ typename SymbolResolverPtrT>
+ ModuleSetHandleT addModuleSet(ModuleSetT Ms,
+ MemoryManagerPtrT MemMgr,
+ SymbolResolverPtrT Resolver) {
+
+ assert(MemMgr == nullptr &&
+ "User supplied memory managers not supported with COD yet.");
+
+ LogicalDylibs.push_back(createLogicalDylib(*this, std::move(Resolver)));
+
+ // Process each of the modules in this module set.
+ for (auto &M : Ms) {
+ std::vector<std::vector<Function*>> Partitioning;
+ for (auto &F : *M) {
+ if (F.isDeclaration())
+ continue;
+ Partitioning.push_back(std::vector<Function*>());
+ Partitioning.back().push_back(&F);
+ }
+ addLogicalModule(*LogicalDylibs.back(),
+ std::shared_ptr<Module>(std::move(M)),
+ std::move(Partitioning));
+ }
+
+ return std::prev(LogicalDylibs.end());
+ }
+
+ /// @brief Remove the module represented by the given handle.
+ ///
+ /// This will remove all modules in the layers below that were derived from
+ /// the module represented by H.
+ void removeModuleSet(ModuleSetHandleT H) {
+ LogicalDylibs.erase(H);
+ }
+
+ /// @brief Search for the given named symbol.
+ /// @param Name The name of the symbol to search for.
+ /// @param ExportedSymbolsOnly If true, search only for exported symbols.
+ /// @return A handle for the given named symbol, if it exists.
+ JITSymbol findSymbol(StringRef Name, bool ExportedSymbolsOnly) {
+ return BaseLayer.findSymbol(Name, ExportedSymbolsOnly);
+ }
+
+ /// @brief Get the address of a symbol provided by this layer, or some layer
+ /// below this one.
+ JITSymbol findSymbolIn(ModuleSetHandleT H, const std::string &Name,
+ bool ExportedSymbolsOnly) {
+ return (*H)->findSymbol(Name, ExportedSymbolsOnly);
+ }
+
+private:
+
+ void addLogicalModule(LogicalDylib &LD, std::shared_ptr<Module> SrcM,
+ std::vector<std::vector<Function*>> Partitions) {
+
+ // Bump the linkage and rename any anonymous/privote members in SrcM to
+ // ensure that everything will resolve properly after we partition SrcM.
+ makeAllSymbolsExternallyAccessible(*SrcM);
+
+ // Create a logical module handle for SrcM within the logical dylib.
+ auto LMH = LD.createLogicalModule();
+
+ // Create the GVs-and-stubs module.
+ auto GVsAndStubsM = llvm::make_unique<Module>(
+ (SrcM->getName() + ".globals_and_stubs").str(),
+ SrcM->getContext());
+ GVsAndStubsM->setDataLayout(SrcM->getDataLayout());
+ ValueToValueMapTy VMap;
+
+ // Process partitions and create stubs.
+ // We create the stubs before copying the global variables as we know the
+ // stubs won't refer to any globals (they only refer to their implementation
+ // pointer) so there's no ordering/value-mapping issues.
+ for (auto& Partition : Partitions) {
+ auto &UP = LD.createUncompiledPartition(LMH, SrcM);
+ typename UncompiledPartition::PartitionEntryList PartitionEntries;
+ for (auto &F : Partition) {
+ assert(!F->isDeclaration() &&
+ "Partition should only contain definitions");
+ unsigned FnIdx = PartitionEntries.size();
+ auto CCI = CompileCallbackMgr.getCompileCallback(SrcM->getContext());
+ PartitionEntries.push_back(
+ typename UncompiledPartition::PartitionEntry(F, CCI.getAddress()));
+ Function *StubF = cloneFunctionDecl(*GVsAndStubsM, *F, &VMap);
+ GlobalVariable *FnBodyPtr =
+ createImplPointer(*StubF->getType(), *StubF->getParent(),
+ StubF->getName() + "$orc_addr",
+ createIRTypedAddress(*StubF->getFunctionType(),
+ CCI.getAddress()));
+ makeStub(*StubF, *FnBodyPtr);
+ CCI.setCompileAction([&UP, FnIdx]() { return UP.compile(FnIdx); });
+ }
+
+ UP.setPartitionEntries(std::move(PartitionEntries));
+ }
+
+ // Now clone the global variable declarations.
+ GlobalDeclMaterializer GDMat(*GVsAndStubsM);
+ for (auto &GV : SrcM->globals())
+ if (!GV.isDeclaration())
+ cloneGlobalVariableDecl(*GVsAndStubsM, GV, &VMap);
+
+ // Then clone the initializers.
+ for (auto &GV : SrcM->globals())
+ if (!GV.isDeclaration())
+ moveGlobalVariableInitializer(GV, VMap, &GDMat);
+
+ // Build a resolver for the stubs module and add it to the base layer.
+ auto GVsAndStubsResolver = createLambdaResolver(
+ [&LD](const std::string &Name) {
+ if (auto Symbol = LD.findSymbol(Name, false))
+ return RuntimeDyld::SymbolInfo(Symbol.getAddress(),
+ Symbol.getFlags());
+ return LD.findSymbolExternally(Name);
+ },
+ [&LD](const std::string &Name) {
+ return RuntimeDyld::SymbolInfo(nullptr);
+ });
+
+ std::vector<std::unique_ptr<Module>> GVsAndStubsMSet;
+ GVsAndStubsMSet.push_back(std::move(GVsAndStubsM));
+ auto GVsAndStubsH =
+ BaseLayer.addModuleSet(std::move(GVsAndStubsMSet),
+ llvm::make_unique<SectionMemoryManager>(),
+ std::move(GVsAndStubsResolver));
+ LD.setGVsAndStubsHandle(LMH, GVsAndStubsH);
+ }
+
+ static std::string Mangle(StringRef Name, const DataLayout &DL) {
+ Mangler M(&DL);
+ std::string MangledName;
+ {
+ raw_string_ostream MangledNameStream(MangledName);
+ M.getNameWithPrefix(MangledNameStream, Name);
+ }
+ return MangledName;
+ }
+
+ BaseLayerT &BaseLayer;
+ CompileCallbackMgrT &CompileCallbackMgr;
+ LogicalDylibList LogicalDylibs;
+};
+
+template <typename BaseLayerT, typename CompileCallbackMgrT>
+typename CompileOnDemandLayer<BaseLayerT, CompileCallbackMgrT>::
+ UncompiledPartition&
+CompileOnDemandLayer<BaseLayerT, CompileCallbackMgrT>::LogicalDylib::
+ createUncompiledPartition(LMHandle LMH, std::shared_ptr<Module> SrcM) {
+ UncompiledPartitions.push_back(
+ llvm::make_unique<UncompiledPartition>(*this, LMH, std::move(SrcM)));
+ UncompiledPartitions.back()->setID(UncompiledPartitions.size() - 1);
+ return *UncompiledPartitions.back();
+}
+
+template <typename BaseLayerT, typename CompileCallbackMgrT>
+std::unique_ptr<typename CompileOnDemandLayer<BaseLayerT, CompileCallbackMgrT>::
+ UncompiledPartition>
+CompileOnDemandLayer<BaseLayerT, CompileCallbackMgrT>::LogicalDylib::
+ takeUPOwnership(UncompiledPartitionID ID) {
+
+ std::swap(UncompiledPartitions[ID], UncompiledPartitions.back());
+ UncompiledPartitions[ID]->setID(ID);
+ auto UP = std::move(UncompiledPartitions.back());
+ UncompiledPartitions.pop_back();
+ return UP;
+}
+
+} // End namespace orc.
+} // End namespace llvm.
+
+#endif // LLVM_EXECUTIONENGINE_ORC_COMPILEONDEMANDLAYER_H
diff --git a/include/llvm/ExecutionEngine/Orc/CompileUtils.h b/include/llvm/ExecutionEngine/Orc/CompileUtils.h
new file mode 100644
index 0000000000000..49a1fbadb295f
--- /dev/null
+++ b/include/llvm/ExecutionEngine/Orc/CompileUtils.h
@@ -0,0 +1,62 @@
+//===-- CompileUtils.h - Utilities for compiling IR in the JIT --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Contains utilities for compiling IR to object files.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTIONENGINE_ORC_COMPILEUTILS_H
+#define LLVM_EXECUTIONENGINE_ORC_COMPILEUTILS_H
+
+#include "llvm/ExecutionEngine/ObjectMemoryBuffer.h"
+#include "llvm/IR/LegacyPassManager.h"
+#include "llvm/MC/MCContext.h"
+#include "llvm/Object/ObjectFile.h"
+#include "llvm/Target/TargetMachine.h"
+
+namespace llvm {
+namespace orc {
+
+/// @brief Simple compile functor: Takes a single IR module and returns an
+/// ObjectFile.
+class SimpleCompiler {
+public:
+ /// @brief Construct a simple compile functor with the given target.
+ SimpleCompiler(TargetMachine &TM) : TM(TM) {}
+
+ /// @brief Compile a Module to an ObjectFile.
+ object::OwningBinary<object::ObjectFile> operator()(Module &M) const {
+ SmallVector<char, 0> ObjBufferSV;
+ raw_svector_ostream ObjStream(ObjBufferSV);
+
+ legacy::PassManager PM;
+ MCContext *Ctx;
+ if (TM.addPassesToEmitMC(PM, Ctx, ObjStream))
+ llvm_unreachable("Target does not support MC emission.");
+ PM.run(M);
+ ObjStream.flush();
+ std::unique_ptr<MemoryBuffer> ObjBuffer(
+ new ObjectMemoryBuffer(std::move(ObjBufferSV)));
+ ErrorOr<std::unique_ptr<object::ObjectFile>> Obj =
+ object::ObjectFile::createObjectFile(ObjBuffer->getMemBufferRef());
+ // TODO: Actually report errors helpfully.
+ typedef object::OwningBinary<object::ObjectFile> OwningObj;
+ if (Obj)
+ return OwningObj(std::move(*Obj), std::move(ObjBuffer));
+ return OwningObj(nullptr, nullptr);
+ }
+
+private:
+ TargetMachine &TM;
+};
+
+} // End namespace orc.
+} // End namespace llvm.
+
+#endif // LLVM_EXECUTIONENGINE_ORC_COMPILEUTILS_H
diff --git a/include/llvm/ExecutionEngine/Orc/ExecutionUtils.h b/include/llvm/ExecutionEngine/Orc/ExecutionUtils.h
new file mode 100644
index 0000000000000..c10508cc8a62f
--- /dev/null
+++ b/include/llvm/ExecutionEngine/Orc/ExecutionUtils.h
@@ -0,0 +1,182 @@
+//===-- ExecutionUtils.h - Utilities for executing code in Orc --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Contains utilities for executing code in Orc.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTIONENGINE_ORC_EXECUTIONUTILS_H
+#define LLVM_EXECUTIONENGINE_ORC_EXECUTIONUTILS_H
+
+#include "JITSymbol.h"
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ExecutionEngine/RuntimeDyld.h"
+#include <vector>
+
+namespace llvm {
+
+class ConstantArray;
+class GlobalVariable;
+class Function;
+class Module;
+class Value;
+
+namespace orc {
+
+/// @brief This iterator provides a convenient way to iterate over the elements
+/// of an llvm.global_ctors/llvm.global_dtors instance.
+///
+/// The easiest way to get hold of instances of this class is to use the
+/// getConstructors/getDestructors functions.
+class CtorDtorIterator {
+public:
+
+ /// @brief Accessor for an element of the global_ctors/global_dtors array.
+ ///
+ /// This class provides a read-only view of the element with any casts on
+ /// the function stripped away.
+ struct Element {
+ Element(unsigned Priority, const Function *Func, const Value *Data)
+ : Priority(Priority), Func(Func), Data(Data) {}
+
+ unsigned Priority;
+ const Function *Func;
+ const Value *Data;
+ };
+
+ /// @brief Construct an iterator instance. If End is true then this iterator
+ /// acts as the end of the range, otherwise it is the beginning.
+ CtorDtorIterator(const GlobalVariable *GV, bool End);
+
+ /// @brief Test iterators for equality.
+ bool operator==(const CtorDtorIterator &Other) const;
+
+ /// @brief Test iterators for inequality.
+ bool operator!=(const CtorDtorIterator &Other) const;
+
+ /// @brief Pre-increment iterator.
+ CtorDtorIterator& operator++();
+
+ /// @brief Post-increment iterator.
+ CtorDtorIterator operator++(int);
+
+ /// @brief Dereference iterator. The resulting value provides a read-only view
+ /// of this element of the global_ctors/global_dtors list.
+ Element operator*() const;
+
+private:
+ const ConstantArray *InitList;
+ unsigned I;
+};
+
+/// @brief Create an iterator range over the entries of the llvm.global_ctors
+/// array.
+iterator_range<CtorDtorIterator> getConstructors(const Module &M);
+
+/// @brief Create an iterator range over the entries of the llvm.global_ctors
+/// array.
+iterator_range<CtorDtorIterator> getDestructors(const Module &M);
+
+/// @brief Convenience class for recording constructor/destructor names for
+/// later execution.
+template <typename JITLayerT>
+class CtorDtorRunner {
+public:
+
+ /// @brief Construct a CtorDtorRunner for the given range using the given
+ /// name mangling function.
+ CtorDtorRunner(std::vector<std::string> CtorDtorNames,
+ typename JITLayerT::ModuleSetHandleT H)
+ : CtorDtorNames(std::move(CtorDtorNames)), H(H) {}
+
+ /// @brief Run the recorded constructors/destructors through the given JIT
+ /// layer.
+ bool runViaLayer(JITLayerT &JITLayer) const {
+ typedef void (*CtorDtorTy)();
+
+ bool Error = false;
+ for (const auto &CtorDtorName : CtorDtorNames)
+ if (auto CtorDtorSym = JITLayer.findSymbolIn(H, CtorDtorName, false)) {
+ CtorDtorTy CtorDtor =
+ reinterpret_cast<CtorDtorTy>(
+ static_cast<uintptr_t>(CtorDtorSym.getAddress()));
+ CtorDtor();
+ } else
+ Error = true;
+ return !Error;
+ }
+
+private:
+ std::vector<std::string> CtorDtorNames;
+ typename JITLayerT::ModuleSetHandleT H;
+};
+
+/// @brief Support class for static dtor execution. For hosted (in-process) JITs
+/// only!
+///
+/// If a __cxa_atexit function isn't found C++ programs that use static
+/// destructors will fail to link. However, we don't want to use the host
+/// process's __cxa_atexit, because it will schedule JIT'd destructors to run
+/// after the JIT has been torn down, which is no good. This class makes it easy
+/// to override __cxa_atexit (and the related __dso_handle).
+///
+/// To use, clients should manually call searchOverrides from their symbol
+/// resolver. This should generally be done after attempting symbol resolution
+/// inside the JIT, but before searching the host process's symbol table. When
+/// the client determines that destructors should be run (generally at JIT
+/// teardown or after a return from main), the runDestructors method should be
+/// called.
+class LocalCXXRuntimeOverrides {
+public:
+
+ /// Create a runtime-overrides class.
+ template <typename MangleFtorT>
+ LocalCXXRuntimeOverrides(const MangleFtorT &Mangle) {
+ addOverride(Mangle("__dso_handle"), toTargetAddress(&DSOHandleOverride));
+ addOverride(Mangle("__cxa_atexit"), toTargetAddress(&CXAAtExitOverride));
+ }
+
+ /// Search overrided symbols.
+ RuntimeDyld::SymbolInfo searchOverrides(const std::string &Name) {
+ auto I = CXXRuntimeOverrides.find(Name);
+ if (I != CXXRuntimeOverrides.end())
+ return RuntimeDyld::SymbolInfo(I->second, JITSymbolFlags::Exported);
+ return nullptr;
+ }
+
+ /// Run any destructors recorded by the overriden __cxa_atexit function
+ /// (CXAAtExitOverride).
+ void runDestructors();
+
+private:
+
+ template <typename PtrTy>
+ TargetAddress toTargetAddress(PtrTy* P) {
+ return static_cast<TargetAddress>(reinterpret_cast<uintptr_t>(P));
+ }
+
+ void addOverride(const std::string &Name, TargetAddress Addr) {
+ CXXRuntimeOverrides.insert(std::make_pair(Name, Addr));
+ }
+
+ StringMap<TargetAddress> CXXRuntimeOverrides;
+
+ typedef void (*DestructorPtr)(void*);
+ typedef std::pair<DestructorPtr, void*> CXXDestructorDataPair;
+ typedef std::vector<CXXDestructorDataPair> CXXDestructorDataPairList;
+ CXXDestructorDataPairList DSOHandleOverride;
+ static int CXAAtExitOverride(DestructorPtr Destructor, void *Arg,
+ void *DSOHandle);
+};
+
+} // End namespace orc.
+} // End namespace llvm.
+
+#endif // LLVM_EXECUTIONENGINE_ORC_EXECUTIONUTILS_H
diff --git a/include/llvm/ExecutionEngine/Orc/IRCompileLayer.h b/include/llvm/ExecutionEngine/Orc/IRCompileLayer.h
new file mode 100644
index 0000000000000..637902200786e
--- /dev/null
+++ b/include/llvm/ExecutionEngine/Orc/IRCompileLayer.h
@@ -0,0 +1,148 @@
+//===------ IRCompileLayer.h -- Eagerly compile IR for JIT ------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Contains the definition for a basic, eagerly compiling layer of the JIT.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTIONENGINE_ORC_IRCOMPILELAYER_H
+#define LLVM_EXECUTIONENGINE_ORC_IRCOMPILELAYER_H
+
+#include "JITSymbol.h"
+#include "llvm/ExecutionEngine/ObjectCache.h"
+#include "llvm/ExecutionEngine/RTDyldMemoryManager.h"
+#include "llvm/Object/ObjectFile.h"
+#include <memory>
+
+namespace llvm {
+namespace orc {
+
+/// @brief Eager IR compiling layer.
+///
+/// This layer accepts sets of LLVM IR Modules (via addModuleSet). It
+/// immediately compiles each IR module to an object file (each IR Module is
+/// compiled separately). The resulting set of object files is then added to
+/// the layer below, which must implement the object layer concept.
+template <typename BaseLayerT> class IRCompileLayer {
+public:
+ typedef std::function<object::OwningBinary<object::ObjectFile>(Module &)>
+ CompileFtor;
+
+private:
+ typedef typename BaseLayerT::ObjSetHandleT ObjSetHandleT;
+
+ typedef std::vector<std::unique_ptr<object::ObjectFile>> OwningObjectVec;
+ typedef std::vector<std::unique_ptr<MemoryBuffer>> OwningBufferVec;
+
+public:
+ /// @brief Handle to a set of compiled modules.
+ typedef ObjSetHandleT ModuleSetHandleT;
+
+ /// @brief Construct an IRCompileLayer with the given BaseLayer, which must
+ /// implement the ObjectLayer concept.
+ IRCompileLayer(BaseLayerT &BaseLayer, CompileFtor Compile)
+ : BaseLayer(BaseLayer), Compile(std::move(Compile)), ObjCache(nullptr) {}
+
+ /// @brief Set an ObjectCache to query before compiling.
+ void setObjectCache(ObjectCache *NewCache) { ObjCache = NewCache; }
+
+ /// @brief Compile each module in the given module set, then add the resulting
+ /// set of objects to the base layer along with the memory manager and
+ /// symbol resolver.
+ ///
+ /// @return A handle for the added modules.
+ template <typename ModuleSetT, typename MemoryManagerPtrT,
+ typename SymbolResolverPtrT>
+ ModuleSetHandleT addModuleSet(ModuleSetT Ms,
+ MemoryManagerPtrT MemMgr,
+ SymbolResolverPtrT Resolver) {
+ OwningObjectVec Objects;
+ OwningBufferVec Buffers;
+
+ for (const auto &M : Ms) {
+ std::unique_ptr<object::ObjectFile> Object;
+ std::unique_ptr<MemoryBuffer> Buffer;
+
+ if (ObjCache)
+ std::tie(Object, Buffer) = tryToLoadFromObjectCache(*M).takeBinary();
+
+ if (!Object) {
+ std::tie(Object, Buffer) = Compile(*M).takeBinary();
+ if (ObjCache)
+ ObjCache->notifyObjectCompiled(&*M, Buffer->getMemBufferRef());
+ }
+
+ Objects.push_back(std::move(Object));
+ Buffers.push_back(std::move(Buffer));
+ }
+
+ ModuleSetHandleT H =
+ BaseLayer.addObjectSet(Objects, std::move(MemMgr), std::move(Resolver));
+
+ BaseLayer.takeOwnershipOfBuffers(H, std::move(Buffers));
+
+ return H;
+ }
+
+ /// @brief Remove the module set associated with the handle H.
+ void removeModuleSet(ModuleSetHandleT H) { BaseLayer.removeObjectSet(H); }
+
+ /// @brief Search for the given named symbol.
+ /// @param Name The name of the symbol to search for.
+ /// @param ExportedSymbolsOnly If true, search only for exported symbols.
+ /// @return A handle for the given named symbol, if it exists.
+ JITSymbol findSymbol(const std::string &Name, bool ExportedSymbolsOnly) {
+ return BaseLayer.findSymbol(Name, ExportedSymbolsOnly);
+ }
+
+ /// @brief Get the address of the given symbol in the context of the set of
+ /// compiled modules represented by the handle H. This call is
+ /// forwarded to the base layer's implementation.
+ /// @param H The handle for the module set to search in.
+ /// @param Name The name of the symbol to search for.
+ /// @param ExportedSymbolsOnly If true, search only for exported symbols.
+ /// @return A handle for the given named symbol, if it is found in the
+ /// given module set.
+ JITSymbol findSymbolIn(ModuleSetHandleT H, const std::string &Name,
+ bool ExportedSymbolsOnly) {
+ return BaseLayer.findSymbolIn(H, Name, ExportedSymbolsOnly);
+ }
+
+ /// @brief Immediately emit and finalize the moduleOB set represented by the
+ /// given handle.
+ /// @param H Handle for module set to emit/finalize.
+ void emitAndFinalize(ModuleSetHandleT H) {
+ BaseLayer.emitAndFinalize(H);
+ }
+
+private:
+ object::OwningBinary<object::ObjectFile>
+ tryToLoadFromObjectCache(const Module &M) {
+ std::unique_ptr<MemoryBuffer> ObjBuffer = ObjCache->getObject(&M);
+ if (!ObjBuffer)
+ return object::OwningBinary<object::ObjectFile>();
+
+ ErrorOr<std::unique_ptr<object::ObjectFile>> Obj =
+ object::ObjectFile::createObjectFile(ObjBuffer->getMemBufferRef());
+ if (!Obj)
+ return object::OwningBinary<object::ObjectFile>();
+
+ return object::OwningBinary<object::ObjectFile>(std::move(*Obj),
+ std::move(ObjBuffer));
+ }
+
+ BaseLayerT &BaseLayer;
+ CompileFtor Compile;
+ ObjectCache *ObjCache;
+};
+
+} // End namespace orc.
+} // End namespace llvm.
+
+#endif // LLVM_EXECUTIONENGINE_ORC_IRCOMPILINGLAYER_H
diff --git a/include/llvm/ExecutionEngine/Orc/IRTransformLayer.h b/include/llvm/ExecutionEngine/Orc/IRTransformLayer.h
new file mode 100644
index 0000000000000..4dabb9a414940
--- /dev/null
+++ b/include/llvm/ExecutionEngine/Orc/IRTransformLayer.h
@@ -0,0 +1,101 @@
+//===----- IRTransformLayer.h - Run all IR through a functor ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Run all IR passed in through a user supplied functor.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTIONENGINE_ORC_IRTRANSFORMLAYER_H
+#define LLVM_EXECUTIONENGINE_ORC_IRTRANSFORMLAYER_H
+
+#include "JITSymbol.h"
+
+namespace llvm {
+namespace orc {
+
+/// @brief IR mutating layer.
+///
+/// This layer accepts sets of LLVM IR Modules (via addModuleSet). It
+/// immediately applies the user supplied functor to each module, then adds
+/// the set of transformed modules to the layer below.
+template <typename BaseLayerT, typename TransformFtor>
+class IRTransformLayer {
+public:
+ /// @brief Handle to a set of added modules.
+ typedef typename BaseLayerT::ModuleSetHandleT ModuleSetHandleT;
+
+ /// @brief Construct an IRTransformLayer with the given BaseLayer
+ IRTransformLayer(BaseLayerT &BaseLayer,
+ TransformFtor Transform = TransformFtor())
+ : BaseLayer(BaseLayer), Transform(std::move(Transform)) {}
+
+ /// @brief Apply the transform functor to each module in the module set, then
+ /// add the resulting set of modules to the base layer, along with the
+ /// memory manager and symbol resolver.
+ ///
+ /// @return A handle for the added modules.
+ template <typename ModuleSetT, typename MemoryManagerPtrT,
+ typename SymbolResolverPtrT>
+ ModuleSetHandleT addModuleSet(ModuleSetT Ms,
+ MemoryManagerPtrT MemMgr,
+ SymbolResolverPtrT Resolver) {
+
+ for (auto I = Ms.begin(), E = Ms.end(); I != E; ++I)
+ *I = Transform(std::move(*I));
+
+ return BaseLayer.addModuleSet(std::move(Ms), std::move(MemMgr),
+ std::move(Resolver));
+ }
+
+ /// @brief Remove the module set associated with the handle H.
+ void removeModuleSet(ModuleSetHandleT H) { BaseLayer.removeModuleSet(H); }
+
+ /// @brief Search for the given named symbol.
+ /// @param Name The name of the symbol to search for.
+ /// @param ExportedSymbolsOnly If true, search only for exported symbols.
+ /// @return A handle for the given named symbol, if it exists.
+ JITSymbol findSymbol(const std::string &Name, bool ExportedSymbolsOnly) {
+ return BaseLayer.findSymbol(Name, ExportedSymbolsOnly);
+ }
+
+ /// @brief Get the address of the given symbol in the context of the set of
+ /// modules represented by the handle H. This call is forwarded to the
+ /// base layer's implementation.
+ /// @param H The handle for the module set to search in.
+ /// @param Name The name of the symbol to search for.
+ /// @param ExportedSymbolsOnly If true, search only for exported symbols.
+ /// @return A handle for the given named symbol, if it is found in the
+ /// given module set.
+ JITSymbol findSymbolIn(ModuleSetHandleT H, const std::string &Name,
+ bool ExportedSymbolsOnly) {
+ return BaseLayer.findSymbolIn(H, Name, ExportedSymbolsOnly);
+ }
+
+ /// @brief Immediately emit and finalize the module set represented by the
+ /// given handle.
+ /// @param H Handle for module set to emit/finalize.
+ void emitAndFinalize(ModuleSetHandleT H) {
+ BaseLayer.emitAndFinalize(H);
+ }
+
+ /// @brief Access the transform functor directly.
+ TransformFtor& getTransform() { return Transform; }
+
+ /// @brief Access the mumate functor directly.
+ const TransformFtor& getTransform() const { return Transform; }
+
+private:
+ BaseLayerT &BaseLayer;
+ TransformFtor Transform;
+};
+
+} // End namespace orc.
+} // End namespace llvm.
+
+#endif // LLVM_EXECUTIONENGINE_ORC_IRTRANSFORMLAYER_H
diff --git a/include/llvm/ExecutionEngine/Orc/IndirectionUtils.h b/include/llvm/ExecutionEngine/Orc/IndirectionUtils.h
new file mode 100644
index 0000000000000..4b7fc5e84b9c6
--- /dev/null
+++ b/include/llvm/ExecutionEngine/Orc/IndirectionUtils.h
@@ -0,0 +1,295 @@
+//===-- IndirectionUtils.h - Utilities for adding indirections --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Contains utilities for adding indirections and breaking up modules.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTIONENGINE_ORC_INDIRECTIONUTILS_H
+#define LLVM_EXECUTIONENGINE_ORC_INDIRECTIONUTILS_H
+
+#include "JITSymbol.h"
+#include "LambdaResolver.h"
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ExecutionEngine/RuntimeDyld.h"
+#include "llvm/IR/IRBuilder.h"
+#include "llvm/IR/Mangler.h"
+#include "llvm/IR/Module.h"
+#include "llvm/Transforms/Utils/ValueMapper.h"
+#include <sstream>
+
+namespace llvm {
+namespace orc {
+
+/// @brief Base class for JITLayer independent aspects of
+/// JITCompileCallbackManager.
+class JITCompileCallbackManagerBase {
+public:
+
+ typedef std::function<TargetAddress()> CompileFtor;
+
+ /// @brief Handle to a newly created compile callback. Can be used to get an
+ /// IR constant representing the address of the trampoline, and to set
+ /// the compile action for the callback.
+ class CompileCallbackInfo {
+ public:
+ CompileCallbackInfo(TargetAddress Addr, CompileFtor &Compile)
+ : Addr(Addr), Compile(Compile) {}
+
+ TargetAddress getAddress() const { return Addr; }
+ void setCompileAction(CompileFtor Compile) {
+ this->Compile = std::move(Compile);
+ }
+ private:
+ TargetAddress Addr;
+ CompileFtor &Compile;
+ };
+
+ /// @brief Construct a JITCompileCallbackManagerBase.
+ /// @param ErrorHandlerAddress The address of an error handler in the target
+ /// process to be used if a compile callback fails.
+ /// @param NumTrampolinesPerBlock Number of trampolines to emit if there is no
+ /// available trampoline when getCompileCallback is
+ /// called.
+ JITCompileCallbackManagerBase(TargetAddress ErrorHandlerAddress,
+ unsigned NumTrampolinesPerBlock)
+ : ErrorHandlerAddress(ErrorHandlerAddress),
+ NumTrampolinesPerBlock(NumTrampolinesPerBlock) {}
+
+ virtual ~JITCompileCallbackManagerBase() {}
+
+ /// @brief Execute the callback for the given trampoline id. Called by the JIT
+ /// to compile functions on demand.
+ TargetAddress executeCompileCallback(TargetAddress TrampolineAddr) {
+ auto I = ActiveTrampolines.find(TrampolineAddr);
+ // FIXME: Also raise an error in the Orc error-handler when we finally have
+ // one.
+ if (I == ActiveTrampolines.end())
+ return ErrorHandlerAddress;
+
+ // Found a callback handler. Yank this trampoline out of the active list and
+ // put it back in the available trampolines list, then try to run the
+ // handler's compile and update actions.
+ // Moving the trampoline ID back to the available list first means there's at
+ // least one available trampoline if the compile action triggers a request for
+ // a new one.
+ auto Compile = std::move(I->second);
+ ActiveTrampolines.erase(I);
+ AvailableTrampolines.push_back(TrampolineAddr);
+
+ if (auto Addr = Compile())
+ return Addr;
+
+ return ErrorHandlerAddress;
+ }
+
+ /// @brief Reserve a compile callback.
+ virtual CompileCallbackInfo getCompileCallback(LLVMContext &Context) = 0;
+
+ /// @brief Get a CompileCallbackInfo for an existing callback.
+ CompileCallbackInfo getCompileCallbackInfo(TargetAddress TrampolineAddr) {
+ auto I = ActiveTrampolines.find(TrampolineAddr);
+ assert(I != ActiveTrampolines.end() && "Not an active trampoline.");
+ return CompileCallbackInfo(I->first, I->second);
+ }
+
+ /// @brief Release a compile callback.
+ ///
+ /// Note: Callbacks are auto-released after they execute. This method should
+ /// only be called to manually release a callback that is not going to
+ /// execute.
+ void releaseCompileCallback(TargetAddress TrampolineAddr) {
+ auto I = ActiveTrampolines.find(TrampolineAddr);
+ assert(I != ActiveTrampolines.end() && "Not an active trampoline.");
+ ActiveTrampolines.erase(I);
+ AvailableTrampolines.push_back(TrampolineAddr);
+ }
+
+protected:
+ TargetAddress ErrorHandlerAddress;
+ unsigned NumTrampolinesPerBlock;
+
+ typedef std::map<TargetAddress, CompileFtor> TrampolineMapT;
+ TrampolineMapT ActiveTrampolines;
+ std::vector<TargetAddress> AvailableTrampolines;
+};
+
+/// @brief Manage compile callbacks.
+template <typename JITLayerT, typename TargetT>
+class JITCompileCallbackManager : public JITCompileCallbackManagerBase {
+public:
+
+ /// @brief Construct a JITCompileCallbackManager.
+ /// @param JIT JIT layer to emit callback trampolines, etc. into.
+ /// @param Context LLVMContext to use for trampoline & resolve block modules.
+ /// @param ErrorHandlerAddress The address of an error handler in the target
+ /// process to be used if a compile callback fails.
+ /// @param NumTrampolinesPerBlock Number of trampolines to allocate whenever
+ /// there is no existing callback trampoline.
+ /// (Trampolines are allocated in blocks for
+ /// efficiency.)
+ JITCompileCallbackManager(JITLayerT &JIT, RuntimeDyld::MemoryManager &MemMgr,
+ LLVMContext &Context,
+ TargetAddress ErrorHandlerAddress,
+ unsigned NumTrampolinesPerBlock)
+ : JITCompileCallbackManagerBase(ErrorHandlerAddress,
+ NumTrampolinesPerBlock),
+ JIT(JIT), MemMgr(MemMgr) {
+ emitResolverBlock(Context);
+ }
+
+ /// @brief Get/create a compile callback with the given signature.
+ CompileCallbackInfo getCompileCallback(LLVMContext &Context) final {
+ TargetAddress TrampolineAddr = getAvailableTrampolineAddr(Context);
+ auto &Compile = this->ActiveTrampolines[TrampolineAddr];
+ return CompileCallbackInfo(TrampolineAddr, Compile);
+ }
+
+private:
+
+ std::vector<std::unique_ptr<Module>>
+ SingletonSet(std::unique_ptr<Module> M) {
+ std::vector<std::unique_ptr<Module>> Ms;
+ Ms.push_back(std::move(M));
+ return Ms;
+ }
+
+ void emitResolverBlock(LLVMContext &Context) {
+ std::unique_ptr<Module> M(new Module("resolver_block_module",
+ Context));
+ TargetT::insertResolverBlock(*M, *this);
+ auto NonResolver =
+ createLambdaResolver(
+ [](const std::string &Name) -> RuntimeDyld::SymbolInfo {
+ llvm_unreachable("External symbols in resolver block?");
+ },
+ [](const std::string &Name) -> RuntimeDyld::SymbolInfo {
+ llvm_unreachable("Dylib symbols in resolver block?");
+ });
+ auto H = JIT.addModuleSet(SingletonSet(std::move(M)), &MemMgr,
+ std::move(NonResolver));
+ JIT.emitAndFinalize(H);
+ auto ResolverBlockSymbol =
+ JIT.findSymbolIn(H, TargetT::ResolverBlockName, false);
+ assert(ResolverBlockSymbol && "Failed to insert resolver block");
+ ResolverBlockAddr = ResolverBlockSymbol.getAddress();
+ }
+
+ TargetAddress getAvailableTrampolineAddr(LLVMContext &Context) {
+ if (this->AvailableTrampolines.empty())
+ grow(Context);
+ assert(!this->AvailableTrampolines.empty() &&
+ "Failed to grow available trampolines.");
+ TargetAddress TrampolineAddr = this->AvailableTrampolines.back();
+ this->AvailableTrampolines.pop_back();
+ return TrampolineAddr;
+ }
+
+ void grow(LLVMContext &Context) {
+ assert(this->AvailableTrampolines.empty() && "Growing prematurely?");
+ std::unique_ptr<Module> M(new Module("trampoline_block", Context));
+ auto GetLabelName =
+ TargetT::insertCompileCallbackTrampolines(*M, ResolverBlockAddr,
+ this->NumTrampolinesPerBlock,
+ this->ActiveTrampolines.size());
+ auto NonResolver =
+ createLambdaResolver(
+ [](const std::string &Name) -> RuntimeDyld::SymbolInfo {
+ llvm_unreachable("External symbols in trampoline block?");
+ },
+ [](const std::string &Name) -> RuntimeDyld::SymbolInfo {
+ llvm_unreachable("Dylib symbols in trampoline block?");
+ });
+ auto H = JIT.addModuleSet(SingletonSet(std::move(M)), &MemMgr,
+ std::move(NonResolver));
+ JIT.emitAndFinalize(H);
+ for (unsigned I = 0; I < this->NumTrampolinesPerBlock; ++I) {
+ std::string Name = GetLabelName(I);
+ auto TrampolineSymbol = JIT.findSymbolIn(H, Name, false);
+ assert(TrampolineSymbol && "Failed to emit trampoline.");
+ this->AvailableTrampolines.push_back(TrampolineSymbol.getAddress());
+ }
+ }
+
+ JITLayerT &JIT;
+ RuntimeDyld::MemoryManager &MemMgr;
+ TargetAddress ResolverBlockAddr;
+};
+
+/// @brief Build a function pointer of FunctionType with the given constant
+/// address.
+///
+/// Usage example: Turn a trampoline address into a function pointer constant
+/// for use in a stub.
+Constant* createIRTypedAddress(FunctionType &FT, TargetAddress Addr);
+
+/// @brief Create a function pointer with the given type, name, and initializer
+/// in the given Module.
+GlobalVariable* createImplPointer(PointerType &PT, Module &M,
+ const Twine &Name, Constant *Initializer);
+
+/// @brief Turn a function declaration into a stub function that makes an
+/// indirect call using the given function pointer.
+void makeStub(Function &F, GlobalVariable &ImplPointer);
+
+/// @brief Raise linkage types and rename as necessary to ensure that all
+/// symbols are accessible for other modules.
+///
+/// This should be called before partitioning a module to ensure that the
+/// partitions retain access to each other's symbols.
+void makeAllSymbolsExternallyAccessible(Module &M);
+
+/// @brief Clone a function declaration into a new module.
+///
+/// This function can be used as the first step towards creating a callback
+/// stub (see makeStub), or moving a function body (see moveFunctionBody).
+///
+/// If the VMap argument is non-null, a mapping will be added between F and
+/// the new declaration, and between each of F's arguments and the new
+/// declaration's arguments. This map can then be passed in to moveFunction to
+/// move the function body if required. Note: When moving functions between
+/// modules with these utilities, all decls should be cloned (and added to a
+/// single VMap) before any bodies are moved. This will ensure that references
+/// between functions all refer to the versions in the new module.
+Function* cloneFunctionDecl(Module &Dst, const Function &F,
+ ValueToValueMapTy *VMap = nullptr);
+
+/// @brief Move the body of function 'F' to a cloned function declaration in a
+/// different module (See related cloneFunctionDecl).
+///
+/// If the target function declaration is not supplied via the NewF parameter
+/// then it will be looked up via the VMap.
+///
+/// This will delete the body of function 'F' from its original parent module,
+/// but leave its declaration.
+void moveFunctionBody(Function &OrigF, ValueToValueMapTy &VMap,
+ ValueMaterializer *Materializer = nullptr,
+ Function *NewF = nullptr);
+
+/// @brief Clone a global variable declaration into a new module.
+GlobalVariable* cloneGlobalVariableDecl(Module &Dst, const GlobalVariable &GV,
+ ValueToValueMapTy *VMap = nullptr);
+
+/// @brief Move global variable GV from its parent module to cloned global
+/// declaration in a different module.
+///
+/// If the target global declaration is not supplied via the NewGV parameter
+/// then it will be looked up via the VMap.
+///
+/// This will delete the initializer of GV from its original parent module,
+/// but leave its declaration.
+void moveGlobalVariableInitializer(GlobalVariable &OrigGV,
+ ValueToValueMapTy &VMap,
+ ValueMaterializer *Materializer = nullptr,
+ GlobalVariable *NewGV = nullptr);
+
+} // End namespace orc.
+} // End namespace llvm.
+
+#endif // LLVM_EXECUTIONENGINE_ORC_INDIRECTIONUTILS_H
diff --git a/include/llvm/ExecutionEngine/Orc/JITSymbol.h b/include/llvm/ExecutionEngine/Orc/JITSymbol.h
new file mode 100644
index 0000000000000..422a3761837c2
--- /dev/null
+++ b/include/llvm/ExecutionEngine/Orc/JITSymbol.h
@@ -0,0 +1,77 @@
+//===----------- JITSymbol.h - JIT symbol abstraction -----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Abstraction for target process addresses.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTIONENGINE_ORC_JITSYMBOL_H
+#define LLVM_EXECUTIONENGINE_ORC_JITSYMBOL_H
+
+#include "llvm/ExecutionEngine/JITSymbolFlags.h"
+#include "llvm/Support/DataTypes.h"
+#include <cassert>
+#include <functional>
+
+namespace llvm {
+namespace orc {
+
+/// @brief Represents an address in the target process's address space.
+typedef uint64_t TargetAddress;
+
+/// @brief Represents a symbol in the JIT.
+class JITSymbol : public JITSymbolBase {
+public:
+
+ typedef std::function<TargetAddress()> GetAddressFtor;
+
+ /// @brief Create a 'null' symbol that represents failure to find a symbol
+ /// definition.
+ JITSymbol(std::nullptr_t)
+ : JITSymbolBase(JITSymbolFlags::None), CachedAddr(0) {}
+
+ /// @brief Create a symbol for a definition with a known address.
+ JITSymbol(TargetAddress Addr, JITSymbolFlags Flags)
+ : JITSymbolBase(Flags), CachedAddr(Addr) {}
+
+ /// @brief Create a symbol for a definition that doesn't have a known address
+ /// yet.
+ /// @param GetAddress A functor to materialize a definition (fixing the
+ /// address) on demand.
+ ///
+ /// This constructor allows a JIT layer to provide a reference to a symbol
+ /// definition without actually materializing the definition up front. The
+ /// user can materialize the definition at any time by calling the getAddress
+ /// method.
+ JITSymbol(GetAddressFtor GetAddress, JITSymbolFlags Flags)
+ : JITSymbolBase(Flags), GetAddress(std::move(GetAddress)), CachedAddr(0) {}
+
+ /// @brief Returns true if the symbol exists, false otherwise.
+ explicit operator bool() const { return CachedAddr || GetAddress; }
+
+ /// @brief Get the address of the symbol in the target address space. Returns
+ /// '0' if the symbol does not exist.
+ TargetAddress getAddress() {
+ if (GetAddress) {
+ CachedAddr = GetAddress();
+ assert(CachedAddr && "Symbol could not be materialized.");
+ GetAddress = nullptr;
+ }
+ return CachedAddr;
+ }
+
+private:
+ GetAddressFtor GetAddress;
+ TargetAddress CachedAddr;
+};
+
+} // End namespace orc.
+} // End namespace llvm.
+
+#endif // LLVM_EXECUTIONENGINE_ORC_JITSYMBOL_H
diff --git a/include/llvm/ExecutionEngine/Orc/LambdaResolver.h b/include/llvm/ExecutionEngine/Orc/LambdaResolver.h
new file mode 100644
index 0000000000000..faa23658524fd
--- /dev/null
+++ b/include/llvm/ExecutionEngine/Orc/LambdaResolver.h
@@ -0,0 +1,62 @@
+//===-- LambdaResolverMM - Redirect symbol lookup via a functor -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Defines a RuntimeDyld::SymbolResolver subclass that uses a user-supplied
+// functor for symbol resolution.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTIONENGINE_ORC_LAMBDARESOLVER_H
+#define LLVM_EXECUTIONENGINE_ORC_LAMBDARESOLVER_H
+
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ExecutionEngine/RuntimeDyld.h"
+#include <memory>
+#include <vector>
+
+namespace llvm {
+namespace orc {
+
+template <typename ExternalLookupFtorT, typename DylibLookupFtorT>
+class LambdaResolver : public RuntimeDyld::SymbolResolver {
+public:
+
+ LambdaResolver(ExternalLookupFtorT ExternalLookupFtor,
+ DylibLookupFtorT DylibLookupFtor)
+ : ExternalLookupFtor(ExternalLookupFtor),
+ DylibLookupFtor(DylibLookupFtor) {}
+
+ RuntimeDyld::SymbolInfo findSymbol(const std::string &Name) final {
+ return ExternalLookupFtor(Name);
+ }
+
+ RuntimeDyld::SymbolInfo
+ findSymbolInLogicalDylib(const std::string &Name) final {
+ return DylibLookupFtor(Name);
+ }
+
+private:
+ ExternalLookupFtorT ExternalLookupFtor;
+ DylibLookupFtorT DylibLookupFtor;
+};
+
+template <typename ExternalLookupFtorT,
+ typename DylibLookupFtorT>
+std::unique_ptr<LambdaResolver<ExternalLookupFtorT, DylibLookupFtorT>>
+createLambdaResolver(ExternalLookupFtorT ExternalLookupFtor,
+ DylibLookupFtorT DylibLookupFtor) {
+ typedef LambdaResolver<ExternalLookupFtorT, DylibLookupFtorT> LR;
+ return make_unique<LR>(std::move(ExternalLookupFtor),
+ std::move(DylibLookupFtor));
+}
+
+} // End namespace orc.
+} // End namespace llvm.
+
+#endif // LLVM_EXECUTIONENGINE_ORC_LAMBDARESOLVER_H
diff --git a/include/llvm/ExecutionEngine/Orc/LazyEmittingLayer.h b/include/llvm/ExecutionEngine/Orc/LazyEmittingLayer.h
new file mode 100644
index 0000000000000..71c83f7e05f62
--- /dev/null
+++ b/include/llvm/ExecutionEngine/Orc/LazyEmittingLayer.h
@@ -0,0 +1,304 @@
+//===- LazyEmittingLayer.h - Lazily emit IR to lower JIT layers -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Contains the definition for a lazy-emitting layer for the JIT.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTIONENGINE_ORC_LAZYEMITTINGLAYER_H
+#define LLVM_EXECUTIONENGINE_ORC_LAZYEMITTINGLAYER_H
+
+#include "JITSymbol.h"
+#include "llvm/ExecutionEngine/RTDyldMemoryManager.h"
+#include "llvm/IR/GlobalValue.h"
+#include "llvm/IR/Mangler.h"
+#include "llvm/IR/Module.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringMap.h"
+#include <list>
+
+namespace llvm {
+namespace orc {
+
+/// @brief Lazy-emitting IR layer.
+///
+/// This layer accepts sets of LLVM IR Modules (via addModuleSet), but does
+/// not immediately emit them the layer below. Instead, emissing to the base
+/// layer is deferred until the first time the client requests the address
+/// (via JITSymbol::getAddress) for a symbol contained in this layer.
+template <typename BaseLayerT> class LazyEmittingLayer {
+public:
+ typedef typename BaseLayerT::ModuleSetHandleT BaseLayerHandleT;
+
+private:
+ class EmissionDeferredSet {
+ public:
+ EmissionDeferredSet() : EmitState(NotEmitted) {}
+ virtual ~EmissionDeferredSet() {}
+
+ JITSymbol find(StringRef Name, bool ExportedSymbolsOnly, BaseLayerT &B) {
+ switch (EmitState) {
+ case NotEmitted:
+ if (auto GV = searchGVs(Name, ExportedSymbolsOnly)) {
+ // Create a std::string version of Name to capture here - the argument
+ // (a StringRef) may go away before the lambda is executed.
+ // FIXME: Use capture-init when we move to C++14.
+ std::string PName = Name;
+ JITSymbolFlags Flags = JITSymbolBase::flagsFromGlobalValue(*GV);
+ auto GetAddress =
+ [this, ExportedSymbolsOnly, PName, &B]() -> TargetAddress {
+ if (this->EmitState == Emitting)
+ return 0;
+ else if (this->EmitState == NotEmitted) {
+ this->EmitState = Emitting;
+ Handle = this->emitToBaseLayer(B);
+ this->EmitState = Emitted;
+ }
+ auto Sym = B.findSymbolIn(Handle, PName, ExportedSymbolsOnly);
+ return Sym.getAddress();
+ };
+ return JITSymbol(std::move(GetAddress), Flags);
+ } else
+ return nullptr;
+ case Emitting:
+ // Calling "emit" can trigger external symbol lookup (e.g. to check for
+ // pre-existing definitions of common-symbol), but it will never find in
+ // this module that it would not have found already, so return null from
+ // here.
+ return nullptr;
+ case Emitted:
+ return B.findSymbolIn(Handle, Name, ExportedSymbolsOnly);
+ }
+ llvm_unreachable("Invalid emit-state.");
+ }
+
+ void removeModulesFromBaseLayer(BaseLayerT &BaseLayer) {
+ if (EmitState != NotEmitted)
+ BaseLayer.removeModuleSet(Handle);
+ }
+
+ void emitAndFinalize(BaseLayerT &BaseLayer) {
+ assert(EmitState != Emitting &&
+ "Cannot emitAndFinalize while already emitting");
+ if (EmitState == NotEmitted) {
+ EmitState = Emitting;
+ Handle = emitToBaseLayer(BaseLayer);
+ EmitState = Emitted;
+ }
+ BaseLayer.emitAndFinalize(Handle);
+ }
+
+ template <typename ModuleSetT, typename MemoryManagerPtrT,
+ typename SymbolResolverPtrT>
+ static std::unique_ptr<EmissionDeferredSet>
+ create(BaseLayerT &B, ModuleSetT Ms, MemoryManagerPtrT MemMgr,
+ SymbolResolverPtrT Resolver);
+
+ protected:
+ virtual const GlobalValue* searchGVs(StringRef Name,
+ bool ExportedSymbolsOnly) const = 0;
+ virtual BaseLayerHandleT emitToBaseLayer(BaseLayerT &BaseLayer) = 0;
+
+ private:
+ enum { NotEmitted, Emitting, Emitted } EmitState;
+ BaseLayerHandleT Handle;
+ };
+
+ template <typename ModuleSetT, typename MemoryManagerPtrT,
+ typename SymbolResolverPtrT>
+ class EmissionDeferredSetImpl : public EmissionDeferredSet {
+ public:
+ EmissionDeferredSetImpl(ModuleSetT Ms,
+ MemoryManagerPtrT MemMgr,
+ SymbolResolverPtrT Resolver)
+ : Ms(std::move(Ms)), MemMgr(std::move(MemMgr)),
+ Resolver(std::move(Resolver)) {}
+
+ protected:
+
+ const GlobalValue* searchGVs(StringRef Name,
+ bool ExportedSymbolsOnly) const override {
+ // FIXME: We could clean all this up if we had a way to reliably demangle
+ // names: We could just demangle name and search, rather than
+ // mangling everything else.
+
+ // If we have already built the mangled name set then just search it.
+ if (MangledSymbols) {
+ auto VI = MangledSymbols->find(Name);
+ if (VI == MangledSymbols->end())
+ return nullptr;
+ auto GV = VI->second;
+ if (!ExportedSymbolsOnly || GV->hasDefaultVisibility())
+ return GV;
+ return nullptr;
+ }
+
+ // If we haven't built the mangled name set yet, try to build it. As an
+ // optimization this will leave MangledNames set to nullptr if we find
+ // Name in the process of building the set.
+ return buildMangledSymbols(Name, ExportedSymbolsOnly);
+ }
+
+ BaseLayerHandleT emitToBaseLayer(BaseLayerT &BaseLayer) override {
+ // We don't need the mangled names set any more: Once we've emitted this
+ // to the base layer we'll just look for symbols there.
+ MangledSymbols.reset();
+ return BaseLayer.addModuleSet(std::move(Ms), std::move(MemMgr),
+ std::move(Resolver));
+ }
+
+ private:
+ // If the mangled name of the given GlobalValue matches the given search
+ // name (and its visibility conforms to the ExportedSymbolsOnly flag) then
+ // return the symbol. Otherwise, add the mangled name to the Names map and
+ // return nullptr.
+ const GlobalValue* addGlobalValue(StringMap<const GlobalValue*> &Names,
+ const GlobalValue &GV,
+ const Mangler &Mang, StringRef SearchName,
+ bool ExportedSymbolsOnly) const {
+ // Modules don't "provide" decls or common symbols.
+ if (GV.isDeclaration() || GV.hasCommonLinkage())
+ return nullptr;
+
+ // Mangle the GV name.
+ std::string MangledName;
+ {
+ raw_string_ostream MangledNameStream(MangledName);
+ Mang.getNameWithPrefix(MangledNameStream, &GV, false);
+ }
+
+ // Check whether this is the name we were searching for, and if it is then
+ // bail out early.
+ if (MangledName == SearchName)
+ if (!ExportedSymbolsOnly || GV.hasDefaultVisibility())
+ return &GV;
+
+ // Otherwise add this to the map for later.
+ Names[MangledName] = &GV;
+ return nullptr;
+ }
+
+ // Build the MangledSymbols map. Bails out early (with MangledSymbols left set
+ // to nullptr) if the given SearchName is found while building the map.
+ const GlobalValue* buildMangledSymbols(StringRef SearchName,
+ bool ExportedSymbolsOnly) const {
+ assert(!MangledSymbols && "Mangled symbols map already exists?");
+
+ auto Symbols = llvm::make_unique<StringMap<const GlobalValue*>>();
+
+ for (const auto &M : Ms) {
+ Mangler Mang(&M->getDataLayout());
+
+ for (const auto &V : M->globals())
+ if (auto GV = addGlobalValue(*Symbols, V, Mang, SearchName,
+ ExportedSymbolsOnly))
+ return GV;
+
+ for (const auto &F : *M)
+ if (auto GV = addGlobalValue(*Symbols, F, Mang, SearchName,
+ ExportedSymbolsOnly))
+ return GV;
+ }
+
+ MangledSymbols = std::move(Symbols);
+ return nullptr;
+ }
+
+ ModuleSetT Ms;
+ MemoryManagerPtrT MemMgr;
+ SymbolResolverPtrT Resolver;
+ mutable std::unique_ptr<StringMap<const GlobalValue*>> MangledSymbols;
+ };
+
+ typedef std::list<std::unique_ptr<EmissionDeferredSet>> ModuleSetListT;
+
+ BaseLayerT &BaseLayer;
+ ModuleSetListT ModuleSetList;
+
+public:
+ /// @brief Handle to a set of loaded modules.
+ typedef typename ModuleSetListT::iterator ModuleSetHandleT;
+
+ /// @brief Construct a lazy emitting layer.
+ LazyEmittingLayer(BaseLayerT &BaseLayer) : BaseLayer(BaseLayer) {}
+
+ /// @brief Add the given set of modules to the lazy emitting layer.
+ template <typename ModuleSetT, typename MemoryManagerPtrT,
+ typename SymbolResolverPtrT>
+ ModuleSetHandleT addModuleSet(ModuleSetT Ms,
+ MemoryManagerPtrT MemMgr,
+ SymbolResolverPtrT Resolver) {
+ return ModuleSetList.insert(
+ ModuleSetList.end(),
+ EmissionDeferredSet::create(BaseLayer, std::move(Ms), std::move(MemMgr),
+ std::move(Resolver)));
+ }
+
+ /// @brief Remove the module set represented by the given handle.
+ ///
+ /// This method will free the memory associated with the given module set,
+ /// both in this layer, and the base layer.
+ void removeModuleSet(ModuleSetHandleT H) {
+ (*H)->removeModulesFromBaseLayer(BaseLayer);
+ ModuleSetList.erase(H);
+ }
+
+ /// @brief Search for the given named symbol.
+ /// @param Name The name of the symbol to search for.
+ /// @param ExportedSymbolsOnly If true, search only for exported symbols.
+ /// @return A handle for the given named symbol, if it exists.
+ JITSymbol findSymbol(const std::string &Name, bool ExportedSymbolsOnly) {
+ // Look for the symbol among existing definitions.
+ if (auto Symbol = BaseLayer.findSymbol(Name, ExportedSymbolsOnly))
+ return Symbol;
+
+ // If not found then search the deferred sets. If any of these contain a
+ // definition of 'Name' then they will return a JITSymbol that will emit
+ // the corresponding module when the symbol address is requested.
+ for (auto &DeferredSet : ModuleSetList)
+ if (auto Symbol = DeferredSet->find(Name, ExportedSymbolsOnly, BaseLayer))
+ return Symbol;
+
+ // If no definition found anywhere return a null symbol.
+ return nullptr;
+ }
+
+ /// @brief Get the address of the given symbol in the context of the set of
+ /// compiled modules represented by the handle H.
+ JITSymbol findSymbolIn(ModuleSetHandleT H, const std::string &Name,
+ bool ExportedSymbolsOnly) {
+ return (*H)->find(Name, ExportedSymbolsOnly, BaseLayer);
+ }
+
+ /// @brief Immediately emit and finalize the moduleOB set represented by the
+ /// given handle.
+ /// @param H Handle for module set to emit/finalize.
+ void emitAndFinalize(ModuleSetHandleT H) {
+ (*H)->emitAndFinalize(BaseLayer);
+ }
+
+};
+
+template <typename BaseLayerT>
+template <typename ModuleSetT, typename MemoryManagerPtrT,
+ typename SymbolResolverPtrT>
+std::unique_ptr<typename LazyEmittingLayer<BaseLayerT>::EmissionDeferredSet>
+LazyEmittingLayer<BaseLayerT>::EmissionDeferredSet::create(
+ BaseLayerT &B, ModuleSetT Ms, MemoryManagerPtrT MemMgr,
+ SymbolResolverPtrT Resolver) {
+ typedef EmissionDeferredSetImpl<ModuleSetT, MemoryManagerPtrT, SymbolResolverPtrT>
+ EDS;
+ return llvm::make_unique<EDS>(std::move(Ms), std::move(MemMgr),
+ std::move(Resolver));
+}
+
+} // End namespace orc.
+} // End namespace llvm.
+
+#endif // LLVM_EXECUTIONENGINE_ORC_LAZYEMITTINGLAYER_H
diff --git a/include/llvm/ExecutionEngine/Orc/ObjectLinkingLayer.h b/include/llvm/ExecutionEngine/Orc/ObjectLinkingLayer.h
new file mode 100644
index 0000000000000..f3094dafae3c3
--- /dev/null
+++ b/include/llvm/ExecutionEngine/Orc/ObjectLinkingLayer.h
@@ -0,0 +1,284 @@
+//===- ObjectLinkingLayer.h - Add object files to a JIT process -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Contains the definition for the object layer of the JIT.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTIONENGINE_ORC_OBJECTLINKINGLAYER_H
+#define LLVM_EXECUTIONENGINE_ORC_OBJECTLINKINGLAYER_H
+
+#include "JITSymbol.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ExecutionEngine/ExecutionEngine.h"
+#include "llvm/ExecutionEngine/SectionMemoryManager.h"
+#include <list>
+#include <memory>
+
+namespace llvm {
+namespace orc {
+
+class ObjectLinkingLayerBase {
+protected:
+
+ /// @brief Holds a set of objects to be allocated/linked as a unit in the JIT.
+ ///
+ /// An instance of this class will be created for each set of objects added
+ /// via JITObjectLayer::addObjectSet. Deleting the instance (via
+ /// removeObjectSet) frees its memory, removing all symbol definitions that
+ /// had been provided by this instance. Higher level layers are responsible
+ /// for taking any action required to handle the missing symbols.
+ class LinkedObjectSet {
+ LinkedObjectSet(const LinkedObjectSet&) = delete;
+ void operator=(const LinkedObjectSet&) = delete;
+ public:
+ LinkedObjectSet(RuntimeDyld::MemoryManager &MemMgr,
+ RuntimeDyld::SymbolResolver &Resolver)
+ : RTDyld(llvm::make_unique<RuntimeDyld>(MemMgr, Resolver)),
+ State(Raw) {}
+
+ virtual ~LinkedObjectSet() {}
+
+ std::unique_ptr<RuntimeDyld::LoadedObjectInfo>
+ addObject(const object::ObjectFile &Obj) {
+ return RTDyld->loadObject(Obj);
+ }
+
+ RuntimeDyld::SymbolInfo getSymbol(StringRef Name) const {
+ return RTDyld->getSymbol(Name);
+ }
+
+ bool NeedsFinalization() const { return (State == Raw); }
+
+ virtual void Finalize() = 0;
+
+ void mapSectionAddress(const void *LocalAddress, TargetAddress TargetAddr) {
+ assert((State != Finalized) &&
+ "Attempting to remap sections for finalized objects.");
+ RTDyld->mapSectionAddress(LocalAddress, TargetAddr);
+ }
+
+ void takeOwnershipOfBuffer(std::unique_ptr<MemoryBuffer> B) {
+ OwnedBuffers.push_back(std::move(B));
+ }
+
+ protected:
+ std::unique_ptr<RuntimeDyld> RTDyld;
+ enum { Raw, Finalizing, Finalized } State;
+
+ // FIXME: This ownership hack only exists because RuntimeDyldELF still
+ // wants to be able to inspect the original object when resolving
+ // relocations. As soon as that can be fixed this should be removed.
+ std::vector<std::unique_ptr<MemoryBuffer>> OwnedBuffers;
+ };
+
+ typedef std::list<std::unique_ptr<LinkedObjectSet>> LinkedObjectSetListT;
+
+public:
+ /// @brief Handle to a set of loaded objects.
+ typedef LinkedObjectSetListT::iterator ObjSetHandleT;
+
+ // Ownership hack.
+ // FIXME: Remove this as soon as RuntimeDyldELF can apply relocations without
+ // referencing the original object.
+ template <typename OwningMBSet>
+ void takeOwnershipOfBuffers(ObjSetHandleT H, OwningMBSet MBs) {
+ for (auto &MB : MBs)
+ (*H)->takeOwnershipOfBuffer(std::move(MB));
+ }
+
+};
+
+/// @brief Default (no-op) action to perform when loading objects.
+class DoNothingOnNotifyLoaded {
+public:
+ template <typename ObjSetT, typename LoadResult>
+ void operator()(ObjectLinkingLayerBase::ObjSetHandleT, const ObjSetT &,
+ const LoadResult &) {}
+};
+
+/// @brief Bare bones object linking layer.
+///
+/// This class is intended to be used as the base layer for a JIT. It allows
+/// object files to be loaded into memory, linked, and the addresses of their
+/// symbols queried. All objects added to this layer can see each other's
+/// symbols.
+template <typename NotifyLoadedFtor = DoNothingOnNotifyLoaded>
+class ObjectLinkingLayer : public ObjectLinkingLayerBase {
+private:
+
+ template <typename MemoryManagerPtrT, typename SymbolResolverPtrT>
+ class ConcreteLinkedObjectSet : public LinkedObjectSet {
+ public:
+ ConcreteLinkedObjectSet(MemoryManagerPtrT MemMgr,
+ SymbolResolverPtrT Resolver)
+ : LinkedObjectSet(*MemMgr, *Resolver), MemMgr(std::move(MemMgr)),
+ Resolver(std::move(Resolver)) { }
+
+ void Finalize() override {
+ State = Finalizing;
+ RTDyld->resolveRelocations();
+ RTDyld->registerEHFrames();
+ MemMgr->finalizeMemory();
+ OwnedBuffers.clear();
+ State = Finalized;
+ }
+
+ private:
+ MemoryManagerPtrT MemMgr;
+ SymbolResolverPtrT Resolver;
+ };
+
+ template <typename MemoryManagerPtrT, typename SymbolResolverPtrT>
+ std::unique_ptr<LinkedObjectSet>
+ createLinkedObjectSet(MemoryManagerPtrT MemMgr, SymbolResolverPtrT Resolver) {
+ typedef ConcreteLinkedObjectSet<MemoryManagerPtrT, SymbolResolverPtrT> LOS;
+ return llvm::make_unique<LOS>(std::move(MemMgr), std::move(Resolver));
+ }
+
+public:
+
+ /// @brief LoadedObjectInfo list. Contains a list of owning pointers to
+ /// RuntimeDyld::LoadedObjectInfo instances.
+ typedef std::vector<std::unique_ptr<RuntimeDyld::LoadedObjectInfo>>
+ LoadedObjInfoList;
+
+ /// @brief Functor for receiving finalization notifications.
+ typedef std::function<void(ObjSetHandleT)> NotifyFinalizedFtor;
+
+ /// @brief Construct an ObjectLinkingLayer with the given NotifyLoaded,
+ /// and NotifyFinalized functors.
+ ObjectLinkingLayer(
+ NotifyLoadedFtor NotifyLoaded = NotifyLoadedFtor(),
+ NotifyFinalizedFtor NotifyFinalized = NotifyFinalizedFtor())
+ : NotifyLoaded(std::move(NotifyLoaded)),
+ NotifyFinalized(std::move(NotifyFinalized)) {}
+
+ /// @brief Add a set of objects (or archives) that will be treated as a unit
+ /// for the purposes of symbol lookup and memory management.
+ ///
+ /// @return A pair containing (1) A handle that can be used to free the memory
+ /// allocated for the objects, and (2) a LoadedObjInfoList containing
+ /// one LoadedObjInfo instance for each object at the corresponding
+ /// index in the Objects list.
+ ///
+ /// This version of this method allows the client to pass in an
+ /// RTDyldMemoryManager instance that will be used to allocate memory and look
+ /// up external symbol addresses for the given objects.
+ template <typename ObjSetT,
+ typename MemoryManagerPtrT,
+ typename SymbolResolverPtrT>
+ ObjSetHandleT addObjectSet(const ObjSetT &Objects,
+ MemoryManagerPtrT MemMgr,
+ SymbolResolverPtrT Resolver) {
+ ObjSetHandleT Handle =
+ LinkedObjSetList.insert(
+ LinkedObjSetList.end(),
+ createLinkedObjectSet(std::move(MemMgr), std::move(Resolver)));
+
+ LinkedObjectSet &LOS = **Handle;
+ LoadedObjInfoList LoadedObjInfos;
+
+ for (auto &Obj : Objects)
+ LoadedObjInfos.push_back(LOS.addObject(*Obj));
+
+ NotifyLoaded(Handle, Objects, LoadedObjInfos);
+
+ return Handle;
+ }
+
+ /// @brief Remove the set of objects associated with handle H.
+ ///
+ /// All memory allocated for the objects will be freed, and the sections and
+ /// symbols they provided will no longer be available. No attempt is made to
+ /// re-emit the missing symbols, and any use of these symbols (directly or
+ /// indirectly) will result in undefined behavior. If dependence tracking is
+ /// required to detect or resolve such issues it should be added at a higher
+ /// layer.
+ void removeObjectSet(ObjSetHandleT H) {
+ // How do we invalidate the symbols in H?
+ LinkedObjSetList.erase(H);
+ }
+
+ /// @brief Search for the given named symbol.
+ /// @param Name The name of the symbol to search for.
+ /// @param ExportedSymbolsOnly If true, search only for exported symbols.
+ /// @return A handle for the given named symbol, if it exists.
+ JITSymbol findSymbol(StringRef Name, bool ExportedSymbolsOnly) {
+ for (auto I = LinkedObjSetList.begin(), E = LinkedObjSetList.end(); I != E;
+ ++I)
+ if (auto Symbol = findSymbolIn(I, Name, ExportedSymbolsOnly))
+ return Symbol;
+
+ return nullptr;
+ }
+
+ /// @brief Search for the given named symbol in the context of the set of
+ /// loaded objects represented by the handle H.
+ /// @param H The handle for the object set to search in.
+ /// @param Name The name of the symbol to search for.
+ /// @param ExportedSymbolsOnly If true, search only for exported symbols.
+ /// @return A handle for the given named symbol, if it is found in the
+ /// given object set.
+ JITSymbol findSymbolIn(ObjSetHandleT H, StringRef Name,
+ bool ExportedSymbolsOnly) {
+ if (auto Sym = (*H)->getSymbol(Name)) {
+ if (Sym.isExported() || !ExportedSymbolsOnly) {
+ auto Addr = Sym.getAddress();
+ auto Flags = Sym.getFlags();
+ if (!(*H)->NeedsFinalization()) {
+ // If this instance has already been finalized then we can just return
+ // the address.
+ return JITSymbol(Addr, Flags);
+ } else {
+ // If this instance needs finalization return a functor that will do
+ // it. The functor still needs to double-check whether finalization is
+ // required, in case someone else finalizes this set before the
+ // functor is called.
+ auto GetAddress =
+ [this, Addr, H]() {
+ if ((*H)->NeedsFinalization()) {
+ (*H)->Finalize();
+ if (NotifyFinalized)
+ NotifyFinalized(H);
+ }
+ return Addr;
+ };
+ return JITSymbol(std::move(GetAddress), Flags);
+ }
+ }
+ }
+ return nullptr;
+ }
+
+ /// @brief Map section addresses for the objects associated with the handle H.
+ void mapSectionAddress(ObjSetHandleT H, const void *LocalAddress,
+ TargetAddress TargetAddr) {
+ (*H)->mapSectionAddress(LocalAddress, TargetAddr);
+ }
+
+ /// @brief Immediately emit and finalize the object set represented by the
+ /// given handle.
+ /// @param H Handle for object set to emit/finalize.
+ void emitAndFinalize(ObjSetHandleT H) {
+ (*H)->Finalize();
+ if (NotifyFinalized)
+ NotifyFinalized(H);
+ }
+
+private:
+ LinkedObjectSetListT LinkedObjSetList;
+ NotifyLoadedFtor NotifyLoaded;
+ NotifyFinalizedFtor NotifyFinalized;
+};
+
+} // End namespace orc.
+} // End namespace llvm
+
+#endif // LLVM_EXECUTIONENGINE_ORC_OBJECTLINKINGLAYER_H
diff --git a/include/llvm/ExecutionEngine/Orc/OrcTargetSupport.h b/include/llvm/ExecutionEngine/Orc/OrcTargetSupport.h
new file mode 100644
index 0000000000000..309f5a96090ef
--- /dev/null
+++ b/include/llvm/ExecutionEngine/Orc/OrcTargetSupport.h
@@ -0,0 +1,53 @@
+//===-- OrcTargetSupport.h - Code to support specific targets --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Target specific code for Orc, e.g. callback assembly.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTIONENGINE_ORC_ORCTARGETSUPPORT_H
+#define LLVM_EXECUTIONENGINE_ORC_ORCTARGETSUPPORT_H
+
+#include "IndirectionUtils.h"
+
+namespace llvm {
+namespace orc {
+
+class OrcX86_64 {
+public:
+ static const char *ResolverBlockName;
+
+ /// @brief Insert module-level inline callback asm into module M for the
+ /// symbols managed by JITResolveCallbackHandler J.
+ static void insertResolverBlock(Module &M,
+ JITCompileCallbackManagerBase &JCBM);
+
+ /// @brief Get a label name from the given index.
+ typedef std::function<std::string(unsigned)> LabelNameFtor;
+
+ /// @brief Insert the requested number of trampolines into the given module.
+ /// @param M Module to insert the call block into.
+ /// @param NumCalls Number of calls to create in the call block.
+ /// @param StartIndex Optional argument specifying the index suffix to start
+ /// with.
+ /// @return A functor that provides the symbol name for each entry in the call
+ /// block.
+ ///
+ static LabelNameFtor insertCompileCallbackTrampolines(
+ Module &M,
+ TargetAddress TrampolineAddr,
+ unsigned NumCalls,
+ unsigned StartIndex = 0);
+
+};
+
+} // End namespace orc.
+} // End namespace llvm.
+
+#endif // LLVM_EXECUTIONENGINE_ORC_ORCTARGETSUPPORT_H