summaryrefslogtreecommitdiff
path: root/llvm/lib/ExecutionEngine
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib/ExecutionEngine')
-rw-r--r--llvm/lib/ExecutionEngine/ExecutionEngine.cpp1309
-rw-r--r--llvm/lib/ExecutionEngine/ExecutionEngineBindings.cpp436
-rw-r--r--llvm/lib/ExecutionEngine/GDBRegistrationListener.cpp238
-rw-r--r--llvm/lib/ExecutionEngine/IntelJITEvents/IntelJITEventListener.cpp258
-rw-r--r--llvm/lib/ExecutionEngine/IntelJITEvents/IntelJITEventsWrapper.h95
-rw-r--r--llvm/lib/ExecutionEngine/IntelJITEvents/ittnotify_config.h453
-rw-r--r--llvm/lib/ExecutionEngine/IntelJITEvents/ittnotify_types.h69
-rw-r--r--llvm/lib/ExecutionEngine/IntelJITEvents/jitprofiling.c480
-rw-r--r--llvm/lib/ExecutionEngine/IntelJITEvents/jitprofiling.h258
-rw-r--r--llvm/lib/ExecutionEngine/Interpreter/Execution.cpp2172
-rw-r--r--llvm/lib/ExecutionEngine/Interpreter/ExternalFunctions.cpp509
-rw-r--r--llvm/lib/ExecutionEngine/Interpreter/Interpreter.cpp102
-rw-r--r--llvm/lib/ExecutionEngine/Interpreter/Interpreter.h235
-rw-r--r--llvm/lib/ExecutionEngine/JITLink/BasicGOTAndStubsBuilder.h81
-rw-r--r--llvm/lib/ExecutionEngine/JITLink/EHFrameSupport.cpp538
-rw-r--r--llvm/lib/ExecutionEngine/JITLink/EHFrameSupportImpl.h78
-rw-r--r--llvm/lib/ExecutionEngine/JITLink/JITLink.cpp228
-rw-r--r--llvm/lib/ExecutionEngine/JITLink/JITLinkGeneric.cpp358
-rw-r--r--llvm/lib/ExecutionEngine/JITLink/JITLinkGeneric.h247
-rw-r--r--llvm/lib/ExecutionEngine/JITLink/JITLinkMemoryManager.cpp132
-rw-r--r--llvm/lib/ExecutionEngine/JITLink/MachO.cpp81
-rw-r--r--llvm/lib/ExecutionEngine/JITLink/MachOLinkGraphBuilder.cpp535
-rw-r--r--llvm/lib/ExecutionEngine/JITLink/MachOLinkGraphBuilder.h269
-rw-r--r--llvm/lib/ExecutionEngine/JITLink/MachO_arm64.cpp736
-rw-r--r--llvm/lib/ExecutionEngine/JITLink/MachO_x86_64.cpp635
-rw-r--r--llvm/lib/ExecutionEngine/MCJIT/MCJIT.cpp679
-rw-r--r--llvm/lib/ExecutionEngine/MCJIT/MCJIT.h343
-rw-r--r--llvm/lib/ExecutionEngine/OProfileJIT/OProfileJITEventListener.cpp188
-rw-r--r--llvm/lib/ExecutionEngine/OProfileJIT/OProfileWrapper.cpp267
-rw-r--r--llvm/lib/ExecutionEngine/Orc/CompileOnDemandLayer.cpp319
-rw-r--r--llvm/lib/ExecutionEngine/Orc/CompileUtils.cpp86
-rw-r--r--llvm/lib/ExecutionEngine/Orc/Core.cpp2152
-rw-r--r--llvm/lib/ExecutionEngine/Orc/ExecutionUtils.cpp308
-rw-r--r--llvm/lib/ExecutionEngine/Orc/IRCompileLayer.cpp43
-rw-r--r--llvm/lib/ExecutionEngine/Orc/IRTransformLayer.cpp33
-rw-r--r--llvm/lib/ExecutionEngine/Orc/IndirectionUtils.cpp374
-rw-r--r--llvm/lib/ExecutionEngine/Orc/JITTargetMachineBuilder.cpp69
-rw-r--r--llvm/lib/ExecutionEngine/Orc/LLJIT.cpp240
-rw-r--r--llvm/lib/ExecutionEngine/Orc/Layer.cpp185
-rw-r--r--llvm/lib/ExecutionEngine/Orc/LazyReexports.cpp210
-rw-r--r--llvm/lib/ExecutionEngine/Orc/Legacy.cpp67
-rw-r--r--llvm/lib/ExecutionEngine/Orc/NullResolver.cpp37
-rw-r--r--llvm/lib/ExecutionEngine/Orc/ObjectLinkingLayer.cpp487
-rw-r--r--llvm/lib/ExecutionEngine/Orc/ObjectTransformLayer.cpp33
-rw-r--r--llvm/lib/ExecutionEngine/Orc/OrcABISupport.cpp983
-rw-r--r--llvm/lib/ExecutionEngine/Orc/OrcCBindings.cpp158
-rw-r--r--llvm/lib/ExecutionEngine/Orc/OrcCBindingsStack.h534
-rw-r--r--llvm/lib/ExecutionEngine/Orc/OrcError.cpp115
-rw-r--r--llvm/lib/ExecutionEngine/Orc/OrcMCJITReplacement.cpp138
-rw-r--r--llvm/lib/ExecutionEngine/Orc/OrcMCJITReplacement.h501
-rw-r--r--llvm/lib/ExecutionEngine/Orc/RPCUtils.cpp54
-rw-r--r--llvm/lib/ExecutionEngine/Orc/RTDyldObjectLinkingLayer.cpp224
-rw-r--r--llvm/lib/ExecutionEngine/Orc/SpeculateAnalyses.cpp307
-rw-r--r--llvm/lib/ExecutionEngine/Orc/Speculation.cpp146
-rw-r--r--llvm/lib/ExecutionEngine/Orc/ThreadSafeModule.cpp64
-rw-r--r--llvm/lib/ExecutionEngine/PerfJITEvents/PerfJITEventListener.cpp503
-rw-r--r--llvm/lib/ExecutionEngine/RuntimeDyld/JITSymbol.cpp131
-rw-r--r--llvm/lib/ExecutionEngine/RuntimeDyld/RTDyldMemoryManager.cpp303
-rw-r--r--llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyld.cpp1434
-rw-r--r--llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldCOFF.cpp82
-rw-r--r--llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldCOFF.h48
-rw-r--r--llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldChecker.cpp875
-rw-r--r--llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldCheckerImpl.h74
-rw-r--r--llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.cpp1960
-rw-r--r--llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.h189
-rw-r--r--llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldImpl.h586
-rw-r--r--llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.cpp382
-rw-r--r--llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.h167
-rw-r--r--llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldCOFFI386.h217
-rw-r--r--llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldCOFFThumb.h315
-rw-r--r--llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldCOFFX86_64.h305
-rw-r--r--llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldELFMips.cpp320
-rw-r--r--llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldELFMips.h67
-rw-r--r--llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOAArch64.h541
-rw-r--r--llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOARM.h432
-rw-r--r--llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOI386.h251
-rw-r--r--llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOX86_64.h239
-rw-r--r--llvm/lib/ExecutionEngine/SectionMemoryManager.cpp267
-rw-r--r--llvm/lib/ExecutionEngine/TargetSelect.cpp103
79 files changed, 29667 insertions, 0 deletions
diff --git a/llvm/lib/ExecutionEngine/ExecutionEngine.cpp b/llvm/lib/ExecutionEngine/ExecutionEngine.cpp
new file mode 100644
index 0000000000000..ee7a7cb60bc98
--- /dev/null
+++ b/llvm/lib/ExecutionEngine/ExecutionEngine.cpp
@@ -0,0 +1,1309 @@
+//===-- ExecutionEngine.cpp - Common Implementation shared by EEs ---------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the common interface used by the various execution engine
+// subclasses.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/ExecutionEngine.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/ExecutionEngine/GenericValue.h"
+#include "llvm/ExecutionEngine/JITEventListener.h"
+#include "llvm/ExecutionEngine/ObjectCache.h"
+#include "llvm/ExecutionEngine/RTDyldMemoryManager.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/Mangler.h"
+#include "llvm/IR/Module.h"
+#include "llvm/IR/Operator.h"
+#include "llvm/IR/ValueHandle.h"
+#include "llvm/Object/Archive.h"
+#include "llvm/Object/ObjectFile.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/DynamicLibrary.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/Host.h"
+#include "llvm/Support/TargetRegistry.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Target/TargetMachine.h"
+#include <cmath>
+#include <cstring>
+#include <mutex>
+using namespace llvm;
+
+#define DEBUG_TYPE "jit"
+
+STATISTIC(NumInitBytes, "Number of bytes of global vars initialized");
+STATISTIC(NumGlobals , "Number of global vars initialized");
+
+ExecutionEngine *(*ExecutionEngine::MCJITCtor)(
+ std::unique_ptr<Module> M, std::string *ErrorStr,
+ std::shared_ptr<MCJITMemoryManager> MemMgr,
+ std::shared_ptr<LegacyJITSymbolResolver> Resolver,
+ std::unique_ptr<TargetMachine> TM) = nullptr;
+
+ExecutionEngine *(*ExecutionEngine::OrcMCJITReplacementCtor)(
+ std::string *ErrorStr, std::shared_ptr<MCJITMemoryManager> MemMgr,
+ std::shared_ptr<LegacyJITSymbolResolver> Resolver,
+ std::unique_ptr<TargetMachine> TM) = nullptr;
+
+ExecutionEngine *(*ExecutionEngine::InterpCtor)(std::unique_ptr<Module> M,
+ std::string *ErrorStr) =nullptr;
+
+void JITEventListener::anchor() {}
+
+void ObjectCache::anchor() {}
+
+void ExecutionEngine::Init(std::unique_ptr<Module> M) {
+ CompilingLazily = false;
+ GVCompilationDisabled = false;
+ SymbolSearchingDisabled = false;
+
+ // IR module verification is enabled by default in debug builds, and disabled
+ // by default in release builds.
+#ifndef NDEBUG
+ VerifyModules = true;
+#else
+ VerifyModules = false;
+#endif
+
+ assert(M && "Module is null?");
+ Modules.push_back(std::move(M));
+}
+
+ExecutionEngine::ExecutionEngine(std::unique_ptr<Module> M)
+ : DL(M->getDataLayout()), LazyFunctionCreator(nullptr) {
+ Init(std::move(M));
+}
+
+ExecutionEngine::ExecutionEngine(DataLayout DL, std::unique_ptr<Module> M)
+ : DL(std::move(DL)), LazyFunctionCreator(nullptr) {
+ Init(std::move(M));
+}
+
+ExecutionEngine::~ExecutionEngine() {
+ clearAllGlobalMappings();
+}
+
+namespace {
+/// Helper class which uses a value handler to automatically deletes the
+/// memory block when the GlobalVariable is destroyed.
+class GVMemoryBlock final : public CallbackVH {
+ GVMemoryBlock(const GlobalVariable *GV)
+ : CallbackVH(const_cast<GlobalVariable*>(GV)) {}
+
+public:
+ /// Returns the address the GlobalVariable should be written into. The
+ /// GVMemoryBlock object prefixes that.
+ static char *Create(const GlobalVariable *GV, const DataLayout& TD) {
+ Type *ElTy = GV->getValueType();
+ size_t GVSize = (size_t)TD.getTypeAllocSize(ElTy);
+ void *RawMemory = ::operator new(
+ alignTo(sizeof(GVMemoryBlock), TD.getPreferredAlignment(GV)) + GVSize);
+ new(RawMemory) GVMemoryBlock(GV);
+ return static_cast<char*>(RawMemory) + sizeof(GVMemoryBlock);
+ }
+
+ void deleted() override {
+ // We allocated with operator new and with some extra memory hanging off the
+ // end, so don't just delete this. I'm not sure if this is actually
+ // required.
+ this->~GVMemoryBlock();
+ ::operator delete(this);
+ }
+};
+} // anonymous namespace
+
+char *ExecutionEngine::getMemoryForGV(const GlobalVariable *GV) {
+ return GVMemoryBlock::Create(GV, getDataLayout());
+}
+
+void ExecutionEngine::addObjectFile(std::unique_ptr<object::ObjectFile> O) {
+ llvm_unreachable("ExecutionEngine subclass doesn't implement addObjectFile.");
+}
+
+void
+ExecutionEngine::addObjectFile(object::OwningBinary<object::ObjectFile> O) {
+ llvm_unreachable("ExecutionEngine subclass doesn't implement addObjectFile.");
+}
+
+void ExecutionEngine::addArchive(object::OwningBinary<object::Archive> A) {
+ llvm_unreachable("ExecutionEngine subclass doesn't implement addArchive.");
+}
+
+bool ExecutionEngine::removeModule(Module *M) {
+ for (auto I = Modules.begin(), E = Modules.end(); I != E; ++I) {
+ Module *Found = I->get();
+ if (Found == M) {
+ I->release();
+ Modules.erase(I);
+ clearGlobalMappingsFromModule(M);
+ return true;
+ }
+ }
+ return false;
+}
+
+Function *ExecutionEngine::FindFunctionNamed(StringRef FnName) {
+ for (unsigned i = 0, e = Modules.size(); i != e; ++i) {
+ Function *F = Modules[i]->getFunction(FnName);
+ if (F && !F->isDeclaration())
+ return F;
+ }
+ return nullptr;
+}
+
+GlobalVariable *ExecutionEngine::FindGlobalVariableNamed(StringRef Name, bool AllowInternal) {
+ for (unsigned i = 0, e = Modules.size(); i != e; ++i) {
+ GlobalVariable *GV = Modules[i]->getGlobalVariable(Name,AllowInternal);
+ if (GV && !GV->isDeclaration())
+ return GV;
+ }
+ return nullptr;
+}
+
+uint64_t ExecutionEngineState::RemoveMapping(StringRef Name) {
+ GlobalAddressMapTy::iterator I = GlobalAddressMap.find(Name);
+ uint64_t OldVal;
+
+ // FIXME: This is silly, we shouldn't end up with a mapping -> 0 in the
+ // GlobalAddressMap.
+ if (I == GlobalAddressMap.end())
+ OldVal = 0;
+ else {
+ GlobalAddressReverseMap.erase(I->second);
+ OldVal = I->second;
+ GlobalAddressMap.erase(I);
+ }
+
+ return OldVal;
+}
+
+std::string ExecutionEngine::getMangledName(const GlobalValue *GV) {
+ assert(GV->hasName() && "Global must have name.");
+
+ std::lock_guard<sys::Mutex> locked(lock);
+ SmallString<128> FullName;
+
+ const DataLayout &DL =
+ GV->getParent()->getDataLayout().isDefault()
+ ? getDataLayout()
+ : GV->getParent()->getDataLayout();
+
+ Mangler::getNameWithPrefix(FullName, GV->getName(), DL);
+ return FullName.str();
+}
+
+void ExecutionEngine::addGlobalMapping(const GlobalValue *GV, void *Addr) {
+ std::lock_guard<sys::Mutex> locked(lock);
+ addGlobalMapping(getMangledName(GV), (uint64_t) Addr);
+}
+
+void ExecutionEngine::addGlobalMapping(StringRef Name, uint64_t Addr) {
+ std::lock_guard<sys::Mutex> locked(lock);
+
+ assert(!Name.empty() && "Empty GlobalMapping symbol name!");
+
+ LLVM_DEBUG(dbgs() << "JIT: Map \'" << Name << "\' to [" << Addr << "]\n";);
+ uint64_t &CurVal = EEState.getGlobalAddressMap()[Name];
+ assert((!CurVal || !Addr) && "GlobalMapping already established!");
+ CurVal = Addr;
+
+ // If we are using the reverse mapping, add it too.
+ if (!EEState.getGlobalAddressReverseMap().empty()) {
+ std::string &V = EEState.getGlobalAddressReverseMap()[CurVal];
+ assert((!V.empty() || !Name.empty()) &&
+ "GlobalMapping already established!");
+ V = Name;
+ }
+}
+
+void ExecutionEngine::clearAllGlobalMappings() {
+ std::lock_guard<sys::Mutex> locked(lock);
+
+ EEState.getGlobalAddressMap().clear();
+ EEState.getGlobalAddressReverseMap().clear();
+}
+
+void ExecutionEngine::clearGlobalMappingsFromModule(Module *M) {
+ std::lock_guard<sys::Mutex> locked(lock);
+
+ for (GlobalObject &GO : M->global_objects())
+ EEState.RemoveMapping(getMangledName(&GO));
+}
+
+uint64_t ExecutionEngine::updateGlobalMapping(const GlobalValue *GV,
+ void *Addr) {
+ std::lock_guard<sys::Mutex> locked(lock);
+ return updateGlobalMapping(getMangledName(GV), (uint64_t) Addr);
+}
+
+uint64_t ExecutionEngine::updateGlobalMapping(StringRef Name, uint64_t Addr) {
+ std::lock_guard<sys::Mutex> locked(lock);
+
+ ExecutionEngineState::GlobalAddressMapTy &Map =
+ EEState.getGlobalAddressMap();
+
+ // Deleting from the mapping?
+ if (!Addr)
+ return EEState.RemoveMapping(Name);
+
+ uint64_t &CurVal = Map[Name];
+ uint64_t OldVal = CurVal;
+
+ if (CurVal && !EEState.getGlobalAddressReverseMap().empty())
+ EEState.getGlobalAddressReverseMap().erase(CurVal);
+ CurVal = Addr;
+
+ // If we are using the reverse mapping, add it too.
+ if (!EEState.getGlobalAddressReverseMap().empty()) {
+ std::string &V = EEState.getGlobalAddressReverseMap()[CurVal];
+ assert((!V.empty() || !Name.empty()) &&
+ "GlobalMapping already established!");
+ V = Name;
+ }
+ return OldVal;
+}
+
+uint64_t ExecutionEngine::getAddressToGlobalIfAvailable(StringRef S) {
+ std::lock_guard<sys::Mutex> locked(lock);
+ uint64_t Address = 0;
+ ExecutionEngineState::GlobalAddressMapTy::iterator I =
+ EEState.getGlobalAddressMap().find(S);
+ if (I != EEState.getGlobalAddressMap().end())
+ Address = I->second;
+ return Address;
+}
+
+
+void *ExecutionEngine::getPointerToGlobalIfAvailable(StringRef S) {
+ std::lock_guard<sys::Mutex> locked(lock);
+ if (void* Address = (void *) getAddressToGlobalIfAvailable(S))
+ return Address;
+ return nullptr;
+}
+
+void *ExecutionEngine::getPointerToGlobalIfAvailable(const GlobalValue *GV) {
+ std::lock_guard<sys::Mutex> locked(lock);
+ return getPointerToGlobalIfAvailable(getMangledName(GV));
+}
+
+const GlobalValue *ExecutionEngine::getGlobalValueAtAddress(void *Addr) {
+ std::lock_guard<sys::Mutex> locked(lock);
+
+ // If we haven't computed the reverse mapping yet, do so first.
+ if (EEState.getGlobalAddressReverseMap().empty()) {
+ for (ExecutionEngineState::GlobalAddressMapTy::iterator
+ I = EEState.getGlobalAddressMap().begin(),
+ E = EEState.getGlobalAddressMap().end(); I != E; ++I) {
+ StringRef Name = I->first();
+ uint64_t Addr = I->second;
+ EEState.getGlobalAddressReverseMap().insert(std::make_pair(
+ Addr, Name));
+ }
+ }
+
+ std::map<uint64_t, std::string>::iterator I =
+ EEState.getGlobalAddressReverseMap().find((uint64_t) Addr);
+
+ if (I != EEState.getGlobalAddressReverseMap().end()) {
+ StringRef Name = I->second;
+ for (unsigned i = 0, e = Modules.size(); i != e; ++i)
+ if (GlobalValue *GV = Modules[i]->getNamedValue(Name))
+ return GV;
+ }
+ return nullptr;
+}
+
+namespace {
+class ArgvArray {
+ std::unique_ptr<char[]> Array;
+ std::vector<std::unique_ptr<char[]>> Values;
+public:
+ /// Turn a vector of strings into a nice argv style array of pointers to null
+ /// terminated strings.
+ void *reset(LLVMContext &C, ExecutionEngine *EE,
+ const std::vector<std::string> &InputArgv);
+};
+} // anonymous namespace
+void *ArgvArray::reset(LLVMContext &C, ExecutionEngine *EE,
+ const std::vector<std::string> &InputArgv) {
+ Values.clear(); // Free the old contents.
+ Values.reserve(InputArgv.size());
+ unsigned PtrSize = EE->getDataLayout().getPointerSize();
+ Array = std::make_unique<char[]>((InputArgv.size()+1)*PtrSize);
+
+ LLVM_DEBUG(dbgs() << "JIT: ARGV = " << (void *)Array.get() << "\n");
+ Type *SBytePtr = Type::getInt8PtrTy(C);
+
+ for (unsigned i = 0; i != InputArgv.size(); ++i) {
+ unsigned Size = InputArgv[i].size()+1;
+ auto Dest = std::make_unique<char[]>(Size);
+ LLVM_DEBUG(dbgs() << "JIT: ARGV[" << i << "] = " << (void *)Dest.get()
+ << "\n");
+
+ std::copy(InputArgv[i].begin(), InputArgv[i].end(), Dest.get());
+ Dest[Size-1] = 0;
+
+ // Endian safe: Array[i] = (PointerTy)Dest;
+ EE->StoreValueToMemory(PTOGV(Dest.get()),
+ (GenericValue*)(&Array[i*PtrSize]), SBytePtr);
+ Values.push_back(std::move(Dest));
+ }
+
+ // Null terminate it
+ EE->StoreValueToMemory(PTOGV(nullptr),
+ (GenericValue*)(&Array[InputArgv.size()*PtrSize]),
+ SBytePtr);
+ return Array.get();
+}
+
+void ExecutionEngine::runStaticConstructorsDestructors(Module &module,
+ bool isDtors) {
+ StringRef Name(isDtors ? "llvm.global_dtors" : "llvm.global_ctors");
+ GlobalVariable *GV = module.getNamedGlobal(Name);
+
+ // If this global has internal linkage, or if it has a use, then it must be
+ // an old-style (llvmgcc3) static ctor with __main linked in and in use. If
+ // this is the case, don't execute any of the global ctors, __main will do
+ // it.
+ if (!GV || GV->isDeclaration() || GV->hasLocalLinkage()) return;
+
+ // Should be an array of '{ i32, void ()* }' structs. The first value is
+ // the init priority, which we ignore.
+ ConstantArray *InitList = dyn_cast<ConstantArray>(GV->getInitializer());
+ if (!InitList)
+ return;
+ for (unsigned i = 0, e = InitList->getNumOperands(); i != e; ++i) {
+ ConstantStruct *CS = dyn_cast<ConstantStruct>(InitList->getOperand(i));
+ if (!CS) continue;
+
+ Constant *FP = CS->getOperand(1);
+ if (FP->isNullValue())
+ continue; // Found a sentinal value, ignore.
+
+ // Strip off constant expression casts.
+ if (ConstantExpr *CE = dyn_cast<ConstantExpr>(FP))
+ if (CE->isCast())
+ FP = CE->getOperand(0);
+
+ // Execute the ctor/dtor function!
+ if (Function *F = dyn_cast<Function>(FP))
+ runFunction(F, None);
+
+ // FIXME: It is marginally lame that we just do nothing here if we see an
+ // entry we don't recognize. It might not be unreasonable for the verifier
+ // to not even allow this and just assert here.
+ }
+}
+
+void ExecutionEngine::runStaticConstructorsDestructors(bool isDtors) {
+ // Execute global ctors/dtors for each module in the program.
+ for (std::unique_ptr<Module> &M : Modules)
+ runStaticConstructorsDestructors(*M, isDtors);
+}
+
+#ifndef NDEBUG
+/// isTargetNullPtr - Return whether the target pointer stored at Loc is null.
+static bool isTargetNullPtr(ExecutionEngine *EE, void *Loc) {
+ unsigned PtrSize = EE->getDataLayout().getPointerSize();
+ for (unsigned i = 0; i < PtrSize; ++i)
+ if (*(i + (uint8_t*)Loc))
+ return false;
+ return true;
+}
+#endif
+
+int ExecutionEngine::runFunctionAsMain(Function *Fn,
+ const std::vector<std::string> &argv,
+ const char * const * envp) {
+ std::vector<GenericValue> GVArgs;
+ GenericValue GVArgc;
+ GVArgc.IntVal = APInt(32, argv.size());
+
+ // Check main() type
+ unsigned NumArgs = Fn->getFunctionType()->getNumParams();
+ FunctionType *FTy = Fn->getFunctionType();
+ Type* PPInt8Ty = Type::getInt8PtrTy(Fn->getContext())->getPointerTo();
+
+ // Check the argument types.
+ if (NumArgs > 3)
+ report_fatal_error("Invalid number of arguments of main() supplied");
+ if (NumArgs >= 3 && FTy->getParamType(2) != PPInt8Ty)
+ report_fatal_error("Invalid type for third argument of main() supplied");
+ if (NumArgs >= 2 && FTy->getParamType(1) != PPInt8Ty)
+ report_fatal_error("Invalid type for second argument of main() supplied");
+ if (NumArgs >= 1 && !FTy->getParamType(0)->isIntegerTy(32))
+ report_fatal_error("Invalid type for first argument of main() supplied");
+ if (!FTy->getReturnType()->isIntegerTy() &&
+ !FTy->getReturnType()->isVoidTy())
+ report_fatal_error("Invalid return type of main() supplied");
+
+ ArgvArray CArgv;
+ ArgvArray CEnv;
+ if (NumArgs) {
+ GVArgs.push_back(GVArgc); // Arg #0 = argc.
+ if (NumArgs > 1) {
+ // Arg #1 = argv.
+ GVArgs.push_back(PTOGV(CArgv.reset(Fn->getContext(), this, argv)));
+ assert(!isTargetNullPtr(this, GVTOP(GVArgs[1])) &&
+ "argv[0] was null after CreateArgv");
+ if (NumArgs > 2) {
+ std::vector<std::string> EnvVars;
+ for (unsigned i = 0; envp[i]; ++i)
+ EnvVars.emplace_back(envp[i]);
+ // Arg #2 = envp.
+ GVArgs.push_back(PTOGV(CEnv.reset(Fn->getContext(), this, EnvVars)));
+ }
+ }
+ }
+
+ return runFunction(Fn, GVArgs).IntVal.getZExtValue();
+}
+
+EngineBuilder::EngineBuilder() : EngineBuilder(nullptr) {}
+
+EngineBuilder::EngineBuilder(std::unique_ptr<Module> M)
+ : M(std::move(M)), WhichEngine(EngineKind::Either), ErrorStr(nullptr),
+ OptLevel(CodeGenOpt::Default), MemMgr(nullptr), Resolver(nullptr),
+ UseOrcMCJITReplacement(false) {
+// IR module verification is enabled by default in debug builds, and disabled
+// by default in release builds.
+#ifndef NDEBUG
+ VerifyModules = true;
+#else
+ VerifyModules = false;
+#endif
+}
+
+EngineBuilder::~EngineBuilder() = default;
+
+EngineBuilder &EngineBuilder::setMCJITMemoryManager(
+ std::unique_ptr<RTDyldMemoryManager> mcjmm) {
+ auto SharedMM = std::shared_ptr<RTDyldMemoryManager>(std::move(mcjmm));
+ MemMgr = SharedMM;
+ Resolver = SharedMM;
+ return *this;
+}
+
+EngineBuilder&
+EngineBuilder::setMemoryManager(std::unique_ptr<MCJITMemoryManager> MM) {
+ MemMgr = std::shared_ptr<MCJITMemoryManager>(std::move(MM));
+ return *this;
+}
+
+EngineBuilder &
+EngineBuilder::setSymbolResolver(std::unique_ptr<LegacyJITSymbolResolver> SR) {
+ Resolver = std::shared_ptr<LegacyJITSymbolResolver>(std::move(SR));
+ return *this;
+}
+
+ExecutionEngine *EngineBuilder::create(TargetMachine *TM) {
+ std::unique_ptr<TargetMachine> TheTM(TM); // Take ownership.
+
+ // Make sure we can resolve symbols in the program as well. The zero arg
+ // to the function tells DynamicLibrary to load the program, not a library.
+ if (sys::DynamicLibrary::LoadLibraryPermanently(nullptr, ErrorStr))
+ return nullptr;
+
+ // If the user specified a memory manager but didn't specify which engine to
+ // create, we assume they only want the JIT, and we fail if they only want
+ // the interpreter.
+ if (MemMgr) {
+ if (WhichEngine & EngineKind::JIT)
+ WhichEngine = EngineKind::JIT;
+ else {
+ if (ErrorStr)
+ *ErrorStr = "Cannot create an interpreter with a memory manager.";
+ return nullptr;
+ }
+ }
+
+ // Unless the interpreter was explicitly selected or the JIT is not linked,
+ // try making a JIT.
+ if ((WhichEngine & EngineKind::JIT) && TheTM) {
+ if (!TM->getTarget().hasJIT()) {
+ errs() << "WARNING: This target JIT is not designed for the host"
+ << " you are running. If bad things happen, please choose"
+ << " a different -march switch.\n";
+ }
+
+ ExecutionEngine *EE = nullptr;
+ if (ExecutionEngine::OrcMCJITReplacementCtor && UseOrcMCJITReplacement) {
+ EE = ExecutionEngine::OrcMCJITReplacementCtor(ErrorStr, std::move(MemMgr),
+ std::move(Resolver),
+ std::move(TheTM));
+ EE->addModule(std::move(M));
+ } else if (ExecutionEngine::MCJITCtor)
+ EE = ExecutionEngine::MCJITCtor(std::move(M), ErrorStr, std::move(MemMgr),
+ std::move(Resolver), std::move(TheTM));
+
+ if (EE) {
+ EE->setVerifyModules(VerifyModules);
+ return EE;
+ }
+ }
+
+ // If we can't make a JIT and we didn't request one specifically, try making
+ // an interpreter instead.
+ if (WhichEngine & EngineKind::Interpreter) {
+ if (ExecutionEngine::InterpCtor)
+ return ExecutionEngine::InterpCtor(std::move(M), ErrorStr);
+ if (ErrorStr)
+ *ErrorStr = "Interpreter has not been linked in.";
+ return nullptr;
+ }
+
+ if ((WhichEngine & EngineKind::JIT) && !ExecutionEngine::MCJITCtor) {
+ if (ErrorStr)
+ *ErrorStr = "JIT has not been linked in.";
+ }
+
+ return nullptr;
+}
+
+void *ExecutionEngine::getPointerToGlobal(const GlobalValue *GV) {
+ if (Function *F = const_cast<Function*>(dyn_cast<Function>(GV)))
+ return getPointerToFunction(F);
+
+ std::lock_guard<sys::Mutex> locked(lock);
+ if (void* P = getPointerToGlobalIfAvailable(GV))
+ return P;
+
+ // Global variable might have been added since interpreter started.
+ if (GlobalVariable *GVar =
+ const_cast<GlobalVariable *>(dyn_cast<GlobalVariable>(GV)))
+ EmitGlobalVariable(GVar);
+ else
+ llvm_unreachable("Global hasn't had an address allocated yet!");
+
+ return getPointerToGlobalIfAvailable(GV);
+}
+
+/// Converts a Constant* into a GenericValue, including handling of
+/// ConstantExpr values.
+GenericValue ExecutionEngine::getConstantValue(const Constant *C) {
+ // If its undefined, return the garbage.
+ if (isa<UndefValue>(C)) {
+ GenericValue Result;
+ switch (C->getType()->getTypeID()) {
+ default:
+ break;
+ case Type::IntegerTyID:
+ case Type::X86_FP80TyID:
+ case Type::FP128TyID:
+ case Type::PPC_FP128TyID:
+ // Although the value is undefined, we still have to construct an APInt
+ // with the correct bit width.
+ Result.IntVal = APInt(C->getType()->getPrimitiveSizeInBits(), 0);
+ break;
+ case Type::StructTyID: {
+ // if the whole struct is 'undef' just reserve memory for the value.
+ if(StructType *STy = dyn_cast<StructType>(C->getType())) {
+ unsigned int elemNum = STy->getNumElements();
+ Result.AggregateVal.resize(elemNum);
+ for (unsigned int i = 0; i < elemNum; ++i) {
+ Type *ElemTy = STy->getElementType(i);
+ if (ElemTy->isIntegerTy())
+ Result.AggregateVal[i].IntVal =
+ APInt(ElemTy->getPrimitiveSizeInBits(), 0);
+ else if (ElemTy->isAggregateType()) {
+ const Constant *ElemUndef = UndefValue::get(ElemTy);
+ Result.AggregateVal[i] = getConstantValue(ElemUndef);
+ }
+ }
+ }
+ }
+ break;
+ case Type::VectorTyID:
+ // if the whole vector is 'undef' just reserve memory for the value.
+ auto* VTy = cast<VectorType>(C->getType());
+ Type *ElemTy = VTy->getElementType();
+ unsigned int elemNum = VTy->getNumElements();
+ Result.AggregateVal.resize(elemNum);
+ if (ElemTy->isIntegerTy())
+ for (unsigned int i = 0; i < elemNum; ++i)
+ Result.AggregateVal[i].IntVal =
+ APInt(ElemTy->getPrimitiveSizeInBits(), 0);
+ break;
+ }
+ return Result;
+ }
+
+ // Otherwise, if the value is a ConstantExpr...
+ if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
+ Constant *Op0 = CE->getOperand(0);
+ switch (CE->getOpcode()) {
+ case Instruction::GetElementPtr: {
+ // Compute the index
+ GenericValue Result = getConstantValue(Op0);
+ APInt Offset(DL.getPointerSizeInBits(), 0);
+ cast<GEPOperator>(CE)->accumulateConstantOffset(DL, Offset);
+
+ char* tmp = (char*) Result.PointerVal;
+ Result = PTOGV(tmp + Offset.getSExtValue());
+ return Result;
+ }
+ case Instruction::Trunc: {
+ GenericValue GV = getConstantValue(Op0);
+ uint32_t BitWidth = cast<IntegerType>(CE->getType())->getBitWidth();
+ GV.IntVal = GV.IntVal.trunc(BitWidth);
+ return GV;
+ }
+ case Instruction::ZExt: {
+ GenericValue GV = getConstantValue(Op0);
+ uint32_t BitWidth = cast<IntegerType>(CE->getType())->getBitWidth();
+ GV.IntVal = GV.IntVal.zext(BitWidth);
+ return GV;
+ }
+ case Instruction::SExt: {
+ GenericValue GV = getConstantValue(Op0);
+ uint32_t BitWidth = cast<IntegerType>(CE->getType())->getBitWidth();
+ GV.IntVal = GV.IntVal.sext(BitWidth);
+ return GV;
+ }
+ case Instruction::FPTrunc: {
+ // FIXME long double
+ GenericValue GV = getConstantValue(Op0);
+ GV.FloatVal = float(GV.DoubleVal);
+ return GV;
+ }
+ case Instruction::FPExt:{
+ // FIXME long double
+ GenericValue GV = getConstantValue(Op0);
+ GV.DoubleVal = double(GV.FloatVal);
+ return GV;
+ }
+ case Instruction::UIToFP: {
+ GenericValue GV = getConstantValue(Op0);
+ if (CE->getType()->isFloatTy())
+ GV.FloatVal = float(GV.IntVal.roundToDouble());
+ else if (CE->getType()->isDoubleTy())
+ GV.DoubleVal = GV.IntVal.roundToDouble();
+ else if (CE->getType()->isX86_FP80Ty()) {
+ APFloat apf = APFloat::getZero(APFloat::x87DoubleExtended());
+ (void)apf.convertFromAPInt(GV.IntVal,
+ false,
+ APFloat::rmNearestTiesToEven);
+ GV.IntVal = apf.bitcastToAPInt();
+ }
+ return GV;
+ }
+ case Instruction::SIToFP: {
+ GenericValue GV = getConstantValue(Op0);
+ if (CE->getType()->isFloatTy())
+ GV.FloatVal = float(GV.IntVal.signedRoundToDouble());
+ else if (CE->getType()->isDoubleTy())
+ GV.DoubleVal = GV.IntVal.signedRoundToDouble();
+ else if (CE->getType()->isX86_FP80Ty()) {
+ APFloat apf = APFloat::getZero(APFloat::x87DoubleExtended());
+ (void)apf.convertFromAPInt(GV.IntVal,
+ true,
+ APFloat::rmNearestTiesToEven);
+ GV.IntVal = apf.bitcastToAPInt();
+ }
+ return GV;
+ }
+ case Instruction::FPToUI: // double->APInt conversion handles sign
+ case Instruction::FPToSI: {
+ GenericValue GV = getConstantValue(Op0);
+ uint32_t BitWidth = cast<IntegerType>(CE->getType())->getBitWidth();
+ if (Op0->getType()->isFloatTy())
+ GV.IntVal = APIntOps::RoundFloatToAPInt(GV.FloatVal, BitWidth);
+ else if (Op0->getType()->isDoubleTy())
+ GV.IntVal = APIntOps::RoundDoubleToAPInt(GV.DoubleVal, BitWidth);
+ else if (Op0->getType()->isX86_FP80Ty()) {
+ APFloat apf = APFloat(APFloat::x87DoubleExtended(), GV.IntVal);
+ uint64_t v;
+ bool ignored;
+ (void)apf.convertToInteger(makeMutableArrayRef(v), BitWidth,
+ CE->getOpcode()==Instruction::FPToSI,
+ APFloat::rmTowardZero, &ignored);
+ GV.IntVal = v; // endian?
+ }
+ return GV;
+ }
+ case Instruction::PtrToInt: {
+ GenericValue GV = getConstantValue(Op0);
+ uint32_t PtrWidth = DL.getTypeSizeInBits(Op0->getType());
+ assert(PtrWidth <= 64 && "Bad pointer width");
+ GV.IntVal = APInt(PtrWidth, uintptr_t(GV.PointerVal));
+ uint32_t IntWidth = DL.getTypeSizeInBits(CE->getType());
+ GV.IntVal = GV.IntVal.zextOrTrunc(IntWidth);
+ return GV;
+ }
+ case Instruction::IntToPtr: {
+ GenericValue GV = getConstantValue(Op0);
+ uint32_t PtrWidth = DL.getTypeSizeInBits(CE->getType());
+ GV.IntVal = GV.IntVal.zextOrTrunc(PtrWidth);
+ assert(GV.IntVal.getBitWidth() <= 64 && "Bad pointer width");
+ GV.PointerVal = PointerTy(uintptr_t(GV.IntVal.getZExtValue()));
+ return GV;
+ }
+ case Instruction::BitCast: {
+ GenericValue GV = getConstantValue(Op0);
+ Type* DestTy = CE->getType();
+ switch (Op0->getType()->getTypeID()) {
+ default: llvm_unreachable("Invalid bitcast operand");
+ case Type::IntegerTyID:
+ assert(DestTy->isFloatingPointTy() && "invalid bitcast");
+ if (DestTy->isFloatTy())
+ GV.FloatVal = GV.IntVal.bitsToFloat();
+ else if (DestTy->isDoubleTy())
+ GV.DoubleVal = GV.IntVal.bitsToDouble();
+ break;
+ case Type::FloatTyID:
+ assert(DestTy->isIntegerTy(32) && "Invalid bitcast");
+ GV.IntVal = APInt::floatToBits(GV.FloatVal);
+ break;
+ case Type::DoubleTyID:
+ assert(DestTy->isIntegerTy(64) && "Invalid bitcast");
+ GV.IntVal = APInt::doubleToBits(GV.DoubleVal);
+ break;
+ case Type::PointerTyID:
+ assert(DestTy->isPointerTy() && "Invalid bitcast");
+ break; // getConstantValue(Op0) above already converted it
+ }
+ return GV;
+ }
+ case Instruction::Add:
+ case Instruction::FAdd:
+ case Instruction::Sub:
+ case Instruction::FSub:
+ case Instruction::Mul:
+ case Instruction::FMul:
+ case Instruction::UDiv:
+ case Instruction::SDiv:
+ case Instruction::URem:
+ case Instruction::SRem:
+ case Instruction::And:
+ case Instruction::Or:
+ case Instruction::Xor: {
+ GenericValue LHS = getConstantValue(Op0);
+ GenericValue RHS = getConstantValue(CE->getOperand(1));
+ GenericValue GV;
+ switch (CE->getOperand(0)->getType()->getTypeID()) {
+ default: llvm_unreachable("Bad add type!");
+ case Type::IntegerTyID:
+ switch (CE->getOpcode()) {
+ default: llvm_unreachable("Invalid integer opcode");
+ case Instruction::Add: GV.IntVal = LHS.IntVal + RHS.IntVal; break;
+ case Instruction::Sub: GV.IntVal = LHS.IntVal - RHS.IntVal; break;
+ case Instruction::Mul: GV.IntVal = LHS.IntVal * RHS.IntVal; break;
+ case Instruction::UDiv:GV.IntVal = LHS.IntVal.udiv(RHS.IntVal); break;
+ case Instruction::SDiv:GV.IntVal = LHS.IntVal.sdiv(RHS.IntVal); break;
+ case Instruction::URem:GV.IntVal = LHS.IntVal.urem(RHS.IntVal); break;
+ case Instruction::SRem:GV.IntVal = LHS.IntVal.srem(RHS.IntVal); break;
+ case Instruction::And: GV.IntVal = LHS.IntVal & RHS.IntVal; break;
+ case Instruction::Or: GV.IntVal = LHS.IntVal | RHS.IntVal; break;
+ case Instruction::Xor: GV.IntVal = LHS.IntVal ^ RHS.IntVal; break;
+ }
+ break;
+ case Type::FloatTyID:
+ switch (CE->getOpcode()) {
+ default: llvm_unreachable("Invalid float opcode");
+ case Instruction::FAdd:
+ GV.FloatVal = LHS.FloatVal + RHS.FloatVal; break;
+ case Instruction::FSub:
+ GV.FloatVal = LHS.FloatVal - RHS.FloatVal; break;
+ case Instruction::FMul:
+ GV.FloatVal = LHS.FloatVal * RHS.FloatVal; break;
+ case Instruction::FDiv:
+ GV.FloatVal = LHS.FloatVal / RHS.FloatVal; break;
+ case Instruction::FRem:
+ GV.FloatVal = std::fmod(LHS.FloatVal,RHS.FloatVal); break;
+ }
+ break;
+ case Type::DoubleTyID:
+ switch (CE->getOpcode()) {
+ default: llvm_unreachable("Invalid double opcode");
+ case Instruction::FAdd:
+ GV.DoubleVal = LHS.DoubleVal + RHS.DoubleVal; break;
+ case Instruction::FSub:
+ GV.DoubleVal = LHS.DoubleVal - RHS.DoubleVal; break;
+ case Instruction::FMul:
+ GV.DoubleVal = LHS.DoubleVal * RHS.DoubleVal; break;
+ case Instruction::FDiv:
+ GV.DoubleVal = LHS.DoubleVal / RHS.DoubleVal; break;
+ case Instruction::FRem:
+ GV.DoubleVal = std::fmod(LHS.DoubleVal,RHS.DoubleVal); break;
+ }
+ break;
+ case Type::X86_FP80TyID:
+ case Type::PPC_FP128TyID:
+ case Type::FP128TyID: {
+ const fltSemantics &Sem = CE->getOperand(0)->getType()->getFltSemantics();
+ APFloat apfLHS = APFloat(Sem, LHS.IntVal);
+ switch (CE->getOpcode()) {
+ default: llvm_unreachable("Invalid long double opcode");
+ case Instruction::FAdd:
+ apfLHS.add(APFloat(Sem, RHS.IntVal), APFloat::rmNearestTiesToEven);
+ GV.IntVal = apfLHS.bitcastToAPInt();
+ break;
+ case Instruction::FSub:
+ apfLHS.subtract(APFloat(Sem, RHS.IntVal),
+ APFloat::rmNearestTiesToEven);
+ GV.IntVal = apfLHS.bitcastToAPInt();
+ break;
+ case Instruction::FMul:
+ apfLHS.multiply(APFloat(Sem, RHS.IntVal),
+ APFloat::rmNearestTiesToEven);
+ GV.IntVal = apfLHS.bitcastToAPInt();
+ break;
+ case Instruction::FDiv:
+ apfLHS.divide(APFloat(Sem, RHS.IntVal),
+ APFloat::rmNearestTiesToEven);
+ GV.IntVal = apfLHS.bitcastToAPInt();
+ break;
+ case Instruction::FRem:
+ apfLHS.mod(APFloat(Sem, RHS.IntVal));
+ GV.IntVal = apfLHS.bitcastToAPInt();
+ break;
+ }
+ }
+ break;
+ }
+ return GV;
+ }
+ default:
+ break;
+ }
+
+ SmallString<256> Msg;
+ raw_svector_ostream OS(Msg);
+ OS << "ConstantExpr not handled: " << *CE;
+ report_fatal_error(OS.str());
+ }
+
+ // Otherwise, we have a simple constant.
+ GenericValue Result;
+ switch (C->getType()->getTypeID()) {
+ case Type::FloatTyID:
+ Result.FloatVal = cast<ConstantFP>(C)->getValueAPF().convertToFloat();
+ break;
+ case Type::DoubleTyID:
+ Result.DoubleVal = cast<ConstantFP>(C)->getValueAPF().convertToDouble();
+ break;
+ case Type::X86_FP80TyID:
+ case Type::FP128TyID:
+ case Type::PPC_FP128TyID:
+ Result.IntVal = cast <ConstantFP>(C)->getValueAPF().bitcastToAPInt();
+ break;
+ case Type::IntegerTyID:
+ Result.IntVal = cast<ConstantInt>(C)->getValue();
+ break;
+ case Type::PointerTyID:
+ while (auto *A = dyn_cast<GlobalAlias>(C)) {
+ C = A->getAliasee();
+ }
+ if (isa<ConstantPointerNull>(C))
+ Result.PointerVal = nullptr;
+ else if (const Function *F = dyn_cast<Function>(C))
+ Result = PTOGV(getPointerToFunctionOrStub(const_cast<Function*>(F)));
+ else if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(C))
+ Result = PTOGV(getOrEmitGlobalVariable(const_cast<GlobalVariable*>(GV)));
+ else
+ llvm_unreachable("Unknown constant pointer type!");
+ break;
+ case Type::VectorTyID: {
+ unsigned elemNum;
+ Type* ElemTy;
+ const ConstantDataVector *CDV = dyn_cast<ConstantDataVector>(C);
+ const ConstantVector *CV = dyn_cast<ConstantVector>(C);
+ const ConstantAggregateZero *CAZ = dyn_cast<ConstantAggregateZero>(C);
+
+ if (CDV) {
+ elemNum = CDV->getNumElements();
+ ElemTy = CDV->getElementType();
+ } else if (CV || CAZ) {
+ auto* VTy = cast<VectorType>(C->getType());
+ elemNum = VTy->getNumElements();
+ ElemTy = VTy->getElementType();
+ } else {
+ llvm_unreachable("Unknown constant vector type!");
+ }
+
+ Result.AggregateVal.resize(elemNum);
+ // Check if vector holds floats.
+ if(ElemTy->isFloatTy()) {
+ if (CAZ) {
+ GenericValue floatZero;
+ floatZero.FloatVal = 0.f;
+ std::fill(Result.AggregateVal.begin(), Result.AggregateVal.end(),
+ floatZero);
+ break;
+ }
+ if(CV) {
+ for (unsigned i = 0; i < elemNum; ++i)
+ if (!isa<UndefValue>(CV->getOperand(i)))
+ Result.AggregateVal[i].FloatVal = cast<ConstantFP>(
+ CV->getOperand(i))->getValueAPF().convertToFloat();
+ break;
+ }
+ if(CDV)
+ for (unsigned i = 0; i < elemNum; ++i)
+ Result.AggregateVal[i].FloatVal = CDV->getElementAsFloat(i);
+
+ break;
+ }
+ // Check if vector holds doubles.
+ if (ElemTy->isDoubleTy()) {
+ if (CAZ) {
+ GenericValue doubleZero;
+ doubleZero.DoubleVal = 0.0;
+ std::fill(Result.AggregateVal.begin(), Result.AggregateVal.end(),
+ doubleZero);
+ break;
+ }
+ if(CV) {
+ for (unsigned i = 0; i < elemNum; ++i)
+ if (!isa<UndefValue>(CV->getOperand(i)))
+ Result.AggregateVal[i].DoubleVal = cast<ConstantFP>(
+ CV->getOperand(i))->getValueAPF().convertToDouble();
+ break;
+ }
+ if(CDV)
+ for (unsigned i = 0; i < elemNum; ++i)
+ Result.AggregateVal[i].DoubleVal = CDV->getElementAsDouble(i);
+
+ break;
+ }
+ // Check if vector holds integers.
+ if (ElemTy->isIntegerTy()) {
+ if (CAZ) {
+ GenericValue intZero;
+ intZero.IntVal = APInt(ElemTy->getScalarSizeInBits(), 0ull);
+ std::fill(Result.AggregateVal.begin(), Result.AggregateVal.end(),
+ intZero);
+ break;
+ }
+ if(CV) {
+ for (unsigned i = 0; i < elemNum; ++i)
+ if (!isa<UndefValue>(CV->getOperand(i)))
+ Result.AggregateVal[i].IntVal = cast<ConstantInt>(
+ CV->getOperand(i))->getValue();
+ else {
+ Result.AggregateVal[i].IntVal =
+ APInt(CV->getOperand(i)->getType()->getPrimitiveSizeInBits(), 0);
+ }
+ break;
+ }
+ if(CDV)
+ for (unsigned i = 0; i < elemNum; ++i)
+ Result.AggregateVal[i].IntVal = APInt(
+ CDV->getElementType()->getPrimitiveSizeInBits(),
+ CDV->getElementAsInteger(i));
+
+ break;
+ }
+ llvm_unreachable("Unknown constant pointer type!");
+ }
+ break;
+
+ default:
+ SmallString<256> Msg;
+ raw_svector_ostream OS(Msg);
+ OS << "ERROR: Constant unimplemented for type: " << *C->getType();
+ report_fatal_error(OS.str());
+ }
+
+ return Result;
+}
+
+void ExecutionEngine::StoreValueToMemory(const GenericValue &Val,
+ GenericValue *Ptr, Type *Ty) {
+ const unsigned StoreBytes = getDataLayout().getTypeStoreSize(Ty);
+
+ switch (Ty->getTypeID()) {
+ default:
+ dbgs() << "Cannot store value of type " << *Ty << "!\n";
+ break;
+ case Type::IntegerTyID:
+ StoreIntToMemory(Val.IntVal, (uint8_t*)Ptr, StoreBytes);
+ break;
+ case Type::FloatTyID:
+ *((float*)Ptr) = Val.FloatVal;
+ break;
+ case Type::DoubleTyID:
+ *((double*)Ptr) = Val.DoubleVal;
+ break;
+ case Type::X86_FP80TyID:
+ memcpy(Ptr, Val.IntVal.getRawData(), 10);
+ break;
+ case Type::PointerTyID:
+ // Ensure 64 bit target pointers are fully initialized on 32 bit hosts.
+ if (StoreBytes != sizeof(PointerTy))
+ memset(&(Ptr->PointerVal), 0, StoreBytes);
+
+ *((PointerTy*)Ptr) = Val.PointerVal;
+ break;
+ case Type::VectorTyID:
+ for (unsigned i = 0; i < Val.AggregateVal.size(); ++i) {
+ if (cast<VectorType>(Ty)->getElementType()->isDoubleTy())
+ *(((double*)Ptr)+i) = Val.AggregateVal[i].DoubleVal;
+ if (cast<VectorType>(Ty)->getElementType()->isFloatTy())
+ *(((float*)Ptr)+i) = Val.AggregateVal[i].FloatVal;
+ if (cast<VectorType>(Ty)->getElementType()->isIntegerTy()) {
+ unsigned numOfBytes =(Val.AggregateVal[i].IntVal.getBitWidth()+7)/8;
+ StoreIntToMemory(Val.AggregateVal[i].IntVal,
+ (uint8_t*)Ptr + numOfBytes*i, numOfBytes);
+ }
+ }
+ break;
+ }
+
+ if (sys::IsLittleEndianHost != getDataLayout().isLittleEndian())
+ // Host and target are different endian - reverse the stored bytes.
+ std::reverse((uint8_t*)Ptr, StoreBytes + (uint8_t*)Ptr);
+}
+
+/// FIXME: document
+///
+void ExecutionEngine::LoadValueFromMemory(GenericValue &Result,
+ GenericValue *Ptr,
+ Type *Ty) {
+ const unsigned LoadBytes = getDataLayout().getTypeStoreSize(Ty);
+
+ switch (Ty->getTypeID()) {
+ case Type::IntegerTyID:
+ // An APInt with all words initially zero.
+ Result.IntVal = APInt(cast<IntegerType>(Ty)->getBitWidth(), 0);
+ LoadIntFromMemory(Result.IntVal, (uint8_t*)Ptr, LoadBytes);
+ break;
+ case Type::FloatTyID:
+ Result.FloatVal = *((float*)Ptr);
+ break;
+ case Type::DoubleTyID:
+ Result.DoubleVal = *((double*)Ptr);
+ break;
+ case Type::PointerTyID:
+ Result.PointerVal = *((PointerTy*)Ptr);
+ break;
+ case Type::X86_FP80TyID: {
+ // This is endian dependent, but it will only work on x86 anyway.
+ // FIXME: Will not trap if loading a signaling NaN.
+ uint64_t y[2];
+ memcpy(y, Ptr, 10);
+ Result.IntVal = APInt(80, y);
+ break;
+ }
+ case Type::VectorTyID: {
+ auto *VT = cast<VectorType>(Ty);
+ Type *ElemT = VT->getElementType();
+ const unsigned numElems = VT->getNumElements();
+ if (ElemT->isFloatTy()) {
+ Result.AggregateVal.resize(numElems);
+ for (unsigned i = 0; i < numElems; ++i)
+ Result.AggregateVal[i].FloatVal = *((float*)Ptr+i);
+ }
+ if (ElemT->isDoubleTy()) {
+ Result.AggregateVal.resize(numElems);
+ for (unsigned i = 0; i < numElems; ++i)
+ Result.AggregateVal[i].DoubleVal = *((double*)Ptr+i);
+ }
+ if (ElemT->isIntegerTy()) {
+ GenericValue intZero;
+ const unsigned elemBitWidth = cast<IntegerType>(ElemT)->getBitWidth();
+ intZero.IntVal = APInt(elemBitWidth, 0);
+ Result.AggregateVal.resize(numElems, intZero);
+ for (unsigned i = 0; i < numElems; ++i)
+ LoadIntFromMemory(Result.AggregateVal[i].IntVal,
+ (uint8_t*)Ptr+((elemBitWidth+7)/8)*i, (elemBitWidth+7)/8);
+ }
+ break;
+ }
+ default:
+ SmallString<256> Msg;
+ raw_svector_ostream OS(Msg);
+ OS << "Cannot load value of type " << *Ty << "!";
+ report_fatal_error(OS.str());
+ }
+}
+
+void ExecutionEngine::InitializeMemory(const Constant *Init, void *Addr) {
+ LLVM_DEBUG(dbgs() << "JIT: Initializing " << Addr << " ");
+ LLVM_DEBUG(Init->dump());
+ if (isa<UndefValue>(Init))
+ return;
+
+ if (const ConstantVector *CP = dyn_cast<ConstantVector>(Init)) {
+ unsigned ElementSize =
+ getDataLayout().getTypeAllocSize(CP->getType()->getElementType());
+ for (unsigned i = 0, e = CP->getNumOperands(); i != e; ++i)
+ InitializeMemory(CP->getOperand(i), (char*)Addr+i*ElementSize);
+ return;
+ }
+
+ if (isa<ConstantAggregateZero>(Init)) {
+ memset(Addr, 0, (size_t)getDataLayout().getTypeAllocSize(Init->getType()));
+ return;
+ }
+
+ if (const ConstantArray *CPA = dyn_cast<ConstantArray>(Init)) {
+ unsigned ElementSize =
+ getDataLayout().getTypeAllocSize(CPA->getType()->getElementType());
+ for (unsigned i = 0, e = CPA->getNumOperands(); i != e; ++i)
+ InitializeMemory(CPA->getOperand(i), (char*)Addr+i*ElementSize);
+ return;
+ }
+
+ if (const ConstantStruct *CPS = dyn_cast<ConstantStruct>(Init)) {
+ const StructLayout *SL =
+ getDataLayout().getStructLayout(cast<StructType>(CPS->getType()));
+ for (unsigned i = 0, e = CPS->getNumOperands(); i != e; ++i)
+ InitializeMemory(CPS->getOperand(i), (char*)Addr+SL->getElementOffset(i));
+ return;
+ }
+
+ if (const ConstantDataSequential *CDS =
+ dyn_cast<ConstantDataSequential>(Init)) {
+ // CDS is already laid out in host memory order.
+ StringRef Data = CDS->getRawDataValues();
+ memcpy(Addr, Data.data(), Data.size());
+ return;
+ }
+
+ if (Init->getType()->isFirstClassType()) {
+ GenericValue Val = getConstantValue(Init);
+ StoreValueToMemory(Val, (GenericValue*)Addr, Init->getType());
+ return;
+ }
+
+ LLVM_DEBUG(dbgs() << "Bad Type: " << *Init->getType() << "\n");
+ llvm_unreachable("Unknown constant type to initialize memory with!");
+}
+
+/// EmitGlobals - Emit all of the global variables to memory, storing their
+/// addresses into GlobalAddress. This must make sure to copy the contents of
+/// their initializers into the memory.
+void ExecutionEngine::emitGlobals() {
+ // Loop over all of the global variables in the program, allocating the memory
+ // to hold them. If there is more than one module, do a prepass over globals
+ // to figure out how the different modules should link together.
+ std::map<std::pair<std::string, Type*>,
+ const GlobalValue*> LinkedGlobalsMap;
+
+ if (Modules.size() != 1) {
+ for (unsigned m = 0, e = Modules.size(); m != e; ++m) {
+ Module &M = *Modules[m];
+ for (const auto &GV : M.globals()) {
+ if (GV.hasLocalLinkage() || GV.isDeclaration() ||
+ GV.hasAppendingLinkage() || !GV.hasName())
+ continue;// Ignore external globals and globals with internal linkage.
+
+ const GlobalValue *&GVEntry =
+ LinkedGlobalsMap[std::make_pair(GV.getName(), GV.getType())];
+
+ // If this is the first time we've seen this global, it is the canonical
+ // version.
+ if (!GVEntry) {
+ GVEntry = &GV;
+ continue;
+ }
+
+ // If the existing global is strong, never replace it.
+ if (GVEntry->hasExternalLinkage())
+ continue;
+
+ // Otherwise, we know it's linkonce/weak, replace it if this is a strong
+ // symbol. FIXME is this right for common?
+ if (GV.hasExternalLinkage() || GVEntry->hasExternalWeakLinkage())
+ GVEntry = &GV;
+ }
+ }
+ }
+
+ std::vector<const GlobalValue*> NonCanonicalGlobals;
+ for (unsigned m = 0, e = Modules.size(); m != e; ++m) {
+ Module &M = *Modules[m];
+ for (const auto &GV : M.globals()) {
+ // In the multi-module case, see what this global maps to.
+ if (!LinkedGlobalsMap.empty()) {
+ if (const GlobalValue *GVEntry =
+ LinkedGlobalsMap[std::make_pair(GV.getName(), GV.getType())]) {
+ // If something else is the canonical global, ignore this one.
+ if (GVEntry != &GV) {
+ NonCanonicalGlobals.push_back(&GV);
+ continue;
+ }
+ }
+ }
+
+ if (!GV.isDeclaration()) {
+ addGlobalMapping(&GV, getMemoryForGV(&GV));
+ } else {
+ // External variable reference. Try to use the dynamic loader to
+ // get a pointer to it.
+ if (void *SymAddr =
+ sys::DynamicLibrary::SearchForAddressOfSymbol(GV.getName()))
+ addGlobalMapping(&GV, SymAddr);
+ else {
+ report_fatal_error("Could not resolve external global address: "
+ +GV.getName());
+ }
+ }
+ }
+
+ // If there are multiple modules, map the non-canonical globals to their
+ // canonical location.
+ if (!NonCanonicalGlobals.empty()) {
+ for (unsigned i = 0, e = NonCanonicalGlobals.size(); i != e; ++i) {
+ const GlobalValue *GV = NonCanonicalGlobals[i];
+ const GlobalValue *CGV =
+ LinkedGlobalsMap[std::make_pair(GV->getName(), GV->getType())];
+ void *Ptr = getPointerToGlobalIfAvailable(CGV);
+ assert(Ptr && "Canonical global wasn't codegen'd!");
+ addGlobalMapping(GV, Ptr);
+ }
+ }
+
+ // Now that all of the globals are set up in memory, loop through them all
+ // and initialize their contents.
+ for (const auto &GV : M.globals()) {
+ if (!GV.isDeclaration()) {
+ if (!LinkedGlobalsMap.empty()) {
+ if (const GlobalValue *GVEntry =
+ LinkedGlobalsMap[std::make_pair(GV.getName(), GV.getType())])
+ if (GVEntry != &GV) // Not the canonical variable.
+ continue;
+ }
+ EmitGlobalVariable(&GV);
+ }
+ }
+ }
+}
+
+// EmitGlobalVariable - This method emits the specified global variable to the
+// address specified in GlobalAddresses, or allocates new memory if it's not
+// already in the map.
+void ExecutionEngine::EmitGlobalVariable(const GlobalVariable *GV) {
+ void *GA = getPointerToGlobalIfAvailable(GV);
+
+ if (!GA) {
+ // If it's not already specified, allocate memory for the global.
+ GA = getMemoryForGV(GV);
+
+ // If we failed to allocate memory for this global, return.
+ if (!GA) return;
+
+ addGlobalMapping(GV, GA);
+ }
+
+ // Don't initialize if it's thread local, let the client do it.
+ if (!GV->isThreadLocal())
+ InitializeMemory(GV->getInitializer(), GA);
+
+ Type *ElTy = GV->getValueType();
+ size_t GVSize = (size_t)getDataLayout().getTypeAllocSize(ElTy);
+ NumInitBytes += (unsigned)GVSize;
+ ++NumGlobals;
+}
diff --git a/llvm/lib/ExecutionEngine/ExecutionEngineBindings.cpp b/llvm/lib/ExecutionEngine/ExecutionEngineBindings.cpp
new file mode 100644
index 0000000000000..c741fe2b37783
--- /dev/null
+++ b/llvm/lib/ExecutionEngine/ExecutionEngineBindings.cpp
@@ -0,0 +1,436 @@
+//===-- ExecutionEngineBindings.cpp - C bindings for EEs ------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the C bindings for the ExecutionEngine library.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm-c/ExecutionEngine.h"
+#include "llvm/ExecutionEngine/ExecutionEngine.h"
+#include "llvm/ExecutionEngine/GenericValue.h"
+#include "llvm/ExecutionEngine/JITEventListener.h"
+#include "llvm/ExecutionEngine/RTDyldMemoryManager.h"
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/Module.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Target/CodeGenCWrappers.h"
+#include "llvm/Target/TargetOptions.h"
+#include <cstring>
+
+using namespace llvm;
+
+#define DEBUG_TYPE "jit"
+
+// Wrapping the C bindings types.
+DEFINE_SIMPLE_CONVERSION_FUNCTIONS(GenericValue, LLVMGenericValueRef)
+
+
+static LLVMTargetMachineRef wrap(const TargetMachine *P) {
+ return
+ reinterpret_cast<LLVMTargetMachineRef>(const_cast<TargetMachine*>(P));
+}
+
+/*===-- Operations on generic values --------------------------------------===*/
+
+LLVMGenericValueRef LLVMCreateGenericValueOfInt(LLVMTypeRef Ty,
+ unsigned long long N,
+ LLVMBool IsSigned) {
+ GenericValue *GenVal = new GenericValue();
+ GenVal->IntVal = APInt(unwrap<IntegerType>(Ty)->getBitWidth(), N, IsSigned);
+ return wrap(GenVal);
+}
+
+LLVMGenericValueRef LLVMCreateGenericValueOfPointer(void *P) {
+ GenericValue *GenVal = new GenericValue();
+ GenVal->PointerVal = P;
+ return wrap(GenVal);
+}
+
+LLVMGenericValueRef LLVMCreateGenericValueOfFloat(LLVMTypeRef TyRef, double N) {
+ GenericValue *GenVal = new GenericValue();
+ switch (unwrap(TyRef)->getTypeID()) {
+ case Type::FloatTyID:
+ GenVal->FloatVal = N;
+ break;
+ case Type::DoubleTyID:
+ GenVal->DoubleVal = N;
+ break;
+ default:
+ llvm_unreachable("LLVMGenericValueToFloat supports only float and double.");
+ }
+ return wrap(GenVal);
+}
+
+unsigned LLVMGenericValueIntWidth(LLVMGenericValueRef GenValRef) {
+ return unwrap(GenValRef)->IntVal.getBitWidth();
+}
+
+unsigned long long LLVMGenericValueToInt(LLVMGenericValueRef GenValRef,
+ LLVMBool IsSigned) {
+ GenericValue *GenVal = unwrap(GenValRef);
+ if (IsSigned)
+ return GenVal->IntVal.getSExtValue();
+ else
+ return GenVal->IntVal.getZExtValue();
+}
+
+void *LLVMGenericValueToPointer(LLVMGenericValueRef GenVal) {
+ return unwrap(GenVal)->PointerVal;
+}
+
+double LLVMGenericValueToFloat(LLVMTypeRef TyRef, LLVMGenericValueRef GenVal) {
+ switch (unwrap(TyRef)->getTypeID()) {
+ case Type::FloatTyID:
+ return unwrap(GenVal)->FloatVal;
+ case Type::DoubleTyID:
+ return unwrap(GenVal)->DoubleVal;
+ default:
+ llvm_unreachable("LLVMGenericValueToFloat supports only float and double.");
+ }
+}
+
+void LLVMDisposeGenericValue(LLVMGenericValueRef GenVal) {
+ delete unwrap(GenVal);
+}
+
+/*===-- Operations on execution engines -----------------------------------===*/
+
+LLVMBool LLVMCreateExecutionEngineForModule(LLVMExecutionEngineRef *OutEE,
+ LLVMModuleRef M,
+ char **OutError) {
+ std::string Error;
+ EngineBuilder builder(std::unique_ptr<Module>(unwrap(M)));
+ builder.setEngineKind(EngineKind::Either)
+ .setErrorStr(&Error);
+ if (ExecutionEngine *EE = builder.create()){
+ *OutEE = wrap(EE);
+ return 0;
+ }
+ *OutError = strdup(Error.c_str());
+ return 1;
+}
+
+LLVMBool LLVMCreateInterpreterForModule(LLVMExecutionEngineRef *OutInterp,
+ LLVMModuleRef M,
+ char **OutError) {
+ std::string Error;
+ EngineBuilder builder(std::unique_ptr<Module>(unwrap(M)));
+ builder.setEngineKind(EngineKind::Interpreter)
+ .setErrorStr(&Error);
+ if (ExecutionEngine *Interp = builder.create()) {
+ *OutInterp = wrap(Interp);
+ return 0;
+ }
+ *OutError = strdup(Error.c_str());
+ return 1;
+}
+
+LLVMBool LLVMCreateJITCompilerForModule(LLVMExecutionEngineRef *OutJIT,
+ LLVMModuleRef M,
+ unsigned OptLevel,
+ char **OutError) {
+ std::string Error;
+ EngineBuilder builder(std::unique_ptr<Module>(unwrap(M)));
+ builder.setEngineKind(EngineKind::JIT)
+ .setErrorStr(&Error)
+ .setOptLevel((CodeGenOpt::Level)OptLevel);
+ if (ExecutionEngine *JIT = builder.create()) {
+ *OutJIT = wrap(JIT);
+ return 0;
+ }
+ *OutError = strdup(Error.c_str());
+ return 1;
+}
+
+void LLVMInitializeMCJITCompilerOptions(LLVMMCJITCompilerOptions *PassedOptions,
+ size_t SizeOfPassedOptions) {
+ LLVMMCJITCompilerOptions options;
+ memset(&options, 0, sizeof(options)); // Most fields are zero by default.
+ options.CodeModel = LLVMCodeModelJITDefault;
+
+ memcpy(PassedOptions, &options,
+ std::min(sizeof(options), SizeOfPassedOptions));
+}
+
+LLVMBool LLVMCreateMCJITCompilerForModule(
+ LLVMExecutionEngineRef *OutJIT, LLVMModuleRef M,
+ LLVMMCJITCompilerOptions *PassedOptions, size_t SizeOfPassedOptions,
+ char **OutError) {
+ LLVMMCJITCompilerOptions options;
+ // If the user passed a larger sized options struct, then they were compiled
+ // against a newer LLVM. Tell them that something is wrong.
+ if (SizeOfPassedOptions > sizeof(options)) {
+ *OutError = strdup(
+ "Refusing to use options struct that is larger than my own; assuming "
+ "LLVM library mismatch.");
+ return 1;
+ }
+
+ // Defend against the user having an old version of the API by ensuring that
+ // any fields they didn't see are cleared. We must defend against fields being
+ // set to the bitwise equivalent of zero, and assume that this means "do the
+ // default" as if that option hadn't been available.
+ LLVMInitializeMCJITCompilerOptions(&options, sizeof(options));
+ memcpy(&options, PassedOptions, SizeOfPassedOptions);
+
+ TargetOptions targetOptions;
+ targetOptions.EnableFastISel = options.EnableFastISel;
+ std::unique_ptr<Module> Mod(unwrap(M));
+
+ if (Mod)
+ // Set function attribute "no-frame-pointer-elim" based on
+ // NoFramePointerElim.
+ for (auto &F : *Mod) {
+ auto Attrs = F.getAttributes();
+ StringRef Value(options.NoFramePointerElim ? "true" : "false");
+ Attrs = Attrs.addAttribute(F.getContext(), AttributeList::FunctionIndex,
+ "no-frame-pointer-elim", Value);
+ F.setAttributes(Attrs);
+ }
+
+ std::string Error;
+ EngineBuilder builder(std::move(Mod));
+ builder.setEngineKind(EngineKind::JIT)
+ .setErrorStr(&Error)
+ .setOptLevel((CodeGenOpt::Level)options.OptLevel)
+ .setTargetOptions(targetOptions);
+ bool JIT;
+ if (Optional<CodeModel::Model> CM = unwrap(options.CodeModel, JIT))
+ builder.setCodeModel(*CM);
+ if (options.MCJMM)
+ builder.setMCJITMemoryManager(
+ std::unique_ptr<RTDyldMemoryManager>(unwrap(options.MCJMM)));
+ if (ExecutionEngine *JIT = builder.create()) {
+ *OutJIT = wrap(JIT);
+ return 0;
+ }
+ *OutError = strdup(Error.c_str());
+ return 1;
+}
+
+void LLVMDisposeExecutionEngine(LLVMExecutionEngineRef EE) {
+ delete unwrap(EE);
+}
+
+void LLVMRunStaticConstructors(LLVMExecutionEngineRef EE) {
+ unwrap(EE)->finalizeObject();
+ unwrap(EE)->runStaticConstructorsDestructors(false);
+}
+
+void LLVMRunStaticDestructors(LLVMExecutionEngineRef EE) {
+ unwrap(EE)->finalizeObject();
+ unwrap(EE)->runStaticConstructorsDestructors(true);
+}
+
+int LLVMRunFunctionAsMain(LLVMExecutionEngineRef EE, LLVMValueRef F,
+ unsigned ArgC, const char * const *ArgV,
+ const char * const *EnvP) {
+ unwrap(EE)->finalizeObject();
+
+ std::vector<std::string> ArgVec(ArgV, ArgV + ArgC);
+ return unwrap(EE)->runFunctionAsMain(unwrap<Function>(F), ArgVec, EnvP);
+}
+
+LLVMGenericValueRef LLVMRunFunction(LLVMExecutionEngineRef EE, LLVMValueRef F,
+ unsigned NumArgs,
+ LLVMGenericValueRef *Args) {
+ unwrap(EE)->finalizeObject();
+
+ std::vector<GenericValue> ArgVec;
+ ArgVec.reserve(NumArgs);
+ for (unsigned I = 0; I != NumArgs; ++I)
+ ArgVec.push_back(*unwrap(Args[I]));
+
+ GenericValue *Result = new GenericValue();
+ *Result = unwrap(EE)->runFunction(unwrap<Function>(F), ArgVec);
+ return wrap(Result);
+}
+
+void LLVMFreeMachineCodeForFunction(LLVMExecutionEngineRef EE, LLVMValueRef F) {
+}
+
+void LLVMAddModule(LLVMExecutionEngineRef EE, LLVMModuleRef M){
+ unwrap(EE)->addModule(std::unique_ptr<Module>(unwrap(M)));
+}
+
+LLVMBool LLVMRemoveModule(LLVMExecutionEngineRef EE, LLVMModuleRef M,
+ LLVMModuleRef *OutMod, char **OutError) {
+ Module *Mod = unwrap(M);
+ unwrap(EE)->removeModule(Mod);
+ *OutMod = wrap(Mod);
+ return 0;
+}
+
+LLVMBool LLVMFindFunction(LLVMExecutionEngineRef EE, const char *Name,
+ LLVMValueRef *OutFn) {
+ if (Function *F = unwrap(EE)->FindFunctionNamed(Name)) {
+ *OutFn = wrap(F);
+ return 0;
+ }
+ return 1;
+}
+
+void *LLVMRecompileAndRelinkFunction(LLVMExecutionEngineRef EE,
+ LLVMValueRef Fn) {
+ return nullptr;
+}
+
+LLVMTargetDataRef LLVMGetExecutionEngineTargetData(LLVMExecutionEngineRef EE) {
+ return wrap(&unwrap(EE)->getDataLayout());
+}
+
+LLVMTargetMachineRef
+LLVMGetExecutionEngineTargetMachine(LLVMExecutionEngineRef EE) {
+ return wrap(unwrap(EE)->getTargetMachine());
+}
+
+void LLVMAddGlobalMapping(LLVMExecutionEngineRef EE, LLVMValueRef Global,
+ void* Addr) {
+ unwrap(EE)->addGlobalMapping(unwrap<GlobalValue>(Global), Addr);
+}
+
+void *LLVMGetPointerToGlobal(LLVMExecutionEngineRef EE, LLVMValueRef Global) {
+ unwrap(EE)->finalizeObject();
+
+ return unwrap(EE)->getPointerToGlobal(unwrap<GlobalValue>(Global));
+}
+
+uint64_t LLVMGetGlobalValueAddress(LLVMExecutionEngineRef EE, const char *Name) {
+ return unwrap(EE)->getGlobalValueAddress(Name);
+}
+
+uint64_t LLVMGetFunctionAddress(LLVMExecutionEngineRef EE, const char *Name) {
+ return unwrap(EE)->getFunctionAddress(Name);
+}
+
+/*===-- Operations on memory managers -------------------------------------===*/
+
+namespace {
+
+struct SimpleBindingMMFunctions {
+ LLVMMemoryManagerAllocateCodeSectionCallback AllocateCodeSection;
+ LLVMMemoryManagerAllocateDataSectionCallback AllocateDataSection;
+ LLVMMemoryManagerFinalizeMemoryCallback FinalizeMemory;
+ LLVMMemoryManagerDestroyCallback Destroy;
+};
+
+class SimpleBindingMemoryManager : public RTDyldMemoryManager {
+public:
+ SimpleBindingMemoryManager(const SimpleBindingMMFunctions& Functions,
+ void *Opaque);
+ ~SimpleBindingMemoryManager() override;
+
+ uint8_t *allocateCodeSection(uintptr_t Size, unsigned Alignment,
+ unsigned SectionID,
+ StringRef SectionName) override;
+
+ uint8_t *allocateDataSection(uintptr_t Size, unsigned Alignment,
+ unsigned SectionID, StringRef SectionName,
+ bool isReadOnly) override;
+
+ bool finalizeMemory(std::string *ErrMsg) override;
+
+private:
+ SimpleBindingMMFunctions Functions;
+ void *Opaque;
+};
+
+SimpleBindingMemoryManager::SimpleBindingMemoryManager(
+ const SimpleBindingMMFunctions& Functions,
+ void *Opaque)
+ : Functions(Functions), Opaque(Opaque) {
+ assert(Functions.AllocateCodeSection &&
+ "No AllocateCodeSection function provided!");
+ assert(Functions.AllocateDataSection &&
+ "No AllocateDataSection function provided!");
+ assert(Functions.FinalizeMemory &&
+ "No FinalizeMemory function provided!");
+ assert(Functions.Destroy &&
+ "No Destroy function provided!");
+}
+
+SimpleBindingMemoryManager::~SimpleBindingMemoryManager() {
+ Functions.Destroy(Opaque);
+}
+
+uint8_t *SimpleBindingMemoryManager::allocateCodeSection(
+ uintptr_t Size, unsigned Alignment, unsigned SectionID,
+ StringRef SectionName) {
+ return Functions.AllocateCodeSection(Opaque, Size, Alignment, SectionID,
+ SectionName.str().c_str());
+}
+
+uint8_t *SimpleBindingMemoryManager::allocateDataSection(
+ uintptr_t Size, unsigned Alignment, unsigned SectionID,
+ StringRef SectionName, bool isReadOnly) {
+ return Functions.AllocateDataSection(Opaque, Size, Alignment, SectionID,
+ SectionName.str().c_str(),
+ isReadOnly);
+}
+
+bool SimpleBindingMemoryManager::finalizeMemory(std::string *ErrMsg) {
+ char *errMsgCString = nullptr;
+ bool result = Functions.FinalizeMemory(Opaque, &errMsgCString);
+ assert((result || !errMsgCString) &&
+ "Did not expect an error message if FinalizeMemory succeeded");
+ if (errMsgCString) {
+ if (ErrMsg)
+ *ErrMsg = errMsgCString;
+ free(errMsgCString);
+ }
+ return result;
+}
+
+} // anonymous namespace
+
+LLVMMCJITMemoryManagerRef LLVMCreateSimpleMCJITMemoryManager(
+ void *Opaque,
+ LLVMMemoryManagerAllocateCodeSectionCallback AllocateCodeSection,
+ LLVMMemoryManagerAllocateDataSectionCallback AllocateDataSection,
+ LLVMMemoryManagerFinalizeMemoryCallback FinalizeMemory,
+ LLVMMemoryManagerDestroyCallback Destroy) {
+
+ if (!AllocateCodeSection || !AllocateDataSection || !FinalizeMemory ||
+ !Destroy)
+ return nullptr;
+
+ SimpleBindingMMFunctions functions;
+ functions.AllocateCodeSection = AllocateCodeSection;
+ functions.AllocateDataSection = AllocateDataSection;
+ functions.FinalizeMemory = FinalizeMemory;
+ functions.Destroy = Destroy;
+ return wrap(new SimpleBindingMemoryManager(functions, Opaque));
+}
+
+void LLVMDisposeMCJITMemoryManager(LLVMMCJITMemoryManagerRef MM) {
+ delete unwrap(MM);
+}
+
+/*===-- JIT Event Listener functions -------------------------------------===*/
+
+
+#if !LLVM_USE_INTEL_JITEVENTS
+LLVMJITEventListenerRef LLVMCreateIntelJITEventListener(void)
+{
+ return nullptr;
+}
+#endif
+
+#if !LLVM_USE_OPROFILE
+LLVMJITEventListenerRef LLVMCreateOProfileJITEventListener(void)
+{
+ return nullptr;
+}
+#endif
+
+#if !LLVM_USE_PERF
+LLVMJITEventListenerRef LLVMCreatePerfJITEventListener(void)
+{
+ return nullptr;
+}
+#endif
diff --git a/llvm/lib/ExecutionEngine/GDBRegistrationListener.cpp b/llvm/lib/ExecutionEngine/GDBRegistrationListener.cpp
new file mode 100644
index 0000000000000..7ed025fbb481e
--- /dev/null
+++ b/llvm/lib/ExecutionEngine/GDBRegistrationListener.cpp
@@ -0,0 +1,238 @@
+//===----- GDBRegistrationListener.cpp - Registers objects with GDB -------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm-c/ExecutionEngine.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ExecutionEngine/JITEventListener.h"
+#include "llvm/Object/ObjectFile.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/ManagedStatic.h"
+#include "llvm/Support/Mutex.h"
+#include <mutex>
+
+using namespace llvm;
+using namespace llvm::object;
+
+// This must be kept in sync with gdb/gdb/jit.h .
+extern "C" {
+
+ typedef enum {
+ JIT_NOACTION = 0,
+ JIT_REGISTER_FN,
+ JIT_UNREGISTER_FN
+ } jit_actions_t;
+
+ struct jit_code_entry {
+ struct jit_code_entry *next_entry;
+ struct jit_code_entry *prev_entry;
+ const char *symfile_addr;
+ uint64_t symfile_size;
+ };
+
+ struct jit_descriptor {
+ uint32_t version;
+ // This should be jit_actions_t, but we want to be specific about the
+ // bit-width.
+ uint32_t action_flag;
+ struct jit_code_entry *relevant_entry;
+ struct jit_code_entry *first_entry;
+ };
+
+ // We put information about the JITed function in this global, which the
+ // debugger reads. Make sure to specify the version statically, because the
+ // debugger checks the version before we can set it during runtime.
+ struct jit_descriptor __jit_debug_descriptor = { 1, 0, nullptr, nullptr };
+
+ // Debuggers puts a breakpoint in this function.
+ LLVM_ATTRIBUTE_NOINLINE void __jit_debug_register_code() {
+ // The noinline and the asm prevent calls to this function from being
+ // optimized out.
+#if !defined(_MSC_VER)
+ asm volatile("":::"memory");
+#endif
+ }
+
+}
+
+namespace {
+
+struct RegisteredObjectInfo {
+ RegisteredObjectInfo() {}
+
+ RegisteredObjectInfo(std::size_t Size, jit_code_entry *Entry,
+ OwningBinary<ObjectFile> Obj)
+ : Size(Size), Entry(Entry), Obj(std::move(Obj)) {}
+
+ std::size_t Size;
+ jit_code_entry *Entry;
+ OwningBinary<ObjectFile> Obj;
+};
+
+// Buffer for an in-memory object file in executable memory
+typedef llvm::DenseMap<JITEventListener::ObjectKey, RegisteredObjectInfo>
+ RegisteredObjectBufferMap;
+
+/// Global access point for the JIT debugging interface designed for use with a
+/// singleton toolbox. Handles thread-safe registration and deregistration of
+/// object files that are in executable memory managed by the client of this
+/// class.
+class GDBJITRegistrationListener : public JITEventListener {
+ /// A map of in-memory object files that have been registered with the
+ /// JIT interface.
+ RegisteredObjectBufferMap ObjectBufferMap;
+
+public:
+ /// Instantiates the JIT service.
+ GDBJITRegistrationListener() : ObjectBufferMap() {}
+
+ /// Unregisters each object that was previously registered and releases all
+ /// internal resources.
+ ~GDBJITRegistrationListener() override;
+
+ /// Creates an entry in the JIT registry for the buffer @p Object,
+ /// which must contain an object file in executable memory with any
+ /// debug information for the debugger.
+ void notifyObjectLoaded(ObjectKey K, const ObjectFile &Obj,
+ const RuntimeDyld::LoadedObjectInfo &L) override;
+
+ /// Removes the internal registration of @p Object, and
+ /// frees associated resources.
+ /// Returns true if @p Object was found in ObjectBufferMap.
+ void notifyFreeingObject(ObjectKey K) override;
+
+private:
+ /// Deregister the debug info for the given object file from the debugger
+ /// and delete any temporary copies. This private method does not remove
+ /// the function from Map so that it can be called while iterating over Map.
+ void deregisterObjectInternal(RegisteredObjectBufferMap::iterator I);
+};
+
+/// Lock used to serialize all jit registration events, since they
+/// modify global variables.
+ManagedStatic<sys::Mutex> JITDebugLock;
+
+/// Do the registration.
+void NotifyDebugger(jit_code_entry* JITCodeEntry) {
+ __jit_debug_descriptor.action_flag = JIT_REGISTER_FN;
+
+ // Insert this entry at the head of the list.
+ JITCodeEntry->prev_entry = nullptr;
+ jit_code_entry* NextEntry = __jit_debug_descriptor.first_entry;
+ JITCodeEntry->next_entry = NextEntry;
+ if (NextEntry) {
+ NextEntry->prev_entry = JITCodeEntry;
+ }
+ __jit_debug_descriptor.first_entry = JITCodeEntry;
+ __jit_debug_descriptor.relevant_entry = JITCodeEntry;
+ __jit_debug_register_code();
+}
+
+GDBJITRegistrationListener::~GDBJITRegistrationListener() {
+ // Free all registered object files.
+ std::lock_guard<llvm::sys::Mutex> locked(*JITDebugLock);
+ for (RegisteredObjectBufferMap::iterator I = ObjectBufferMap.begin(),
+ E = ObjectBufferMap.end();
+ I != E; ++I) {
+ // Call the private method that doesn't update the map so our iterator
+ // doesn't break.
+ deregisterObjectInternal(I);
+ }
+ ObjectBufferMap.clear();
+}
+
+void GDBJITRegistrationListener::notifyObjectLoaded(
+ ObjectKey K, const ObjectFile &Obj,
+ const RuntimeDyld::LoadedObjectInfo &L) {
+
+ OwningBinary<ObjectFile> DebugObj = L.getObjectForDebug(Obj);
+
+ // Bail out if debug objects aren't supported.
+ if (!DebugObj.getBinary())
+ return;
+
+ const char *Buffer = DebugObj.getBinary()->getMemoryBufferRef().getBufferStart();
+ size_t Size = DebugObj.getBinary()->getMemoryBufferRef().getBufferSize();
+
+ std::lock_guard<llvm::sys::Mutex> locked(*JITDebugLock);
+ assert(ObjectBufferMap.find(K) == ObjectBufferMap.end() &&
+ "Second attempt to perform debug registration.");
+ jit_code_entry* JITCodeEntry = new jit_code_entry();
+
+ if (!JITCodeEntry) {
+ llvm::report_fatal_error(
+ "Allocation failed when registering a JIT entry!\n");
+ } else {
+ JITCodeEntry->symfile_addr = Buffer;
+ JITCodeEntry->symfile_size = Size;
+
+ ObjectBufferMap[K] =
+ RegisteredObjectInfo(Size, JITCodeEntry, std::move(DebugObj));
+ NotifyDebugger(JITCodeEntry);
+ }
+}
+
+void GDBJITRegistrationListener::notifyFreeingObject(ObjectKey K) {
+ std::lock_guard<llvm::sys::Mutex> locked(*JITDebugLock);
+ RegisteredObjectBufferMap::iterator I = ObjectBufferMap.find(K);
+
+ if (I != ObjectBufferMap.end()) {
+ deregisterObjectInternal(I);
+ ObjectBufferMap.erase(I);
+ }
+}
+
+void GDBJITRegistrationListener::deregisterObjectInternal(
+ RegisteredObjectBufferMap::iterator I) {
+
+ jit_code_entry*& JITCodeEntry = I->second.Entry;
+
+ // Do the unregistration.
+ {
+ __jit_debug_descriptor.action_flag = JIT_UNREGISTER_FN;
+
+ // Remove the jit_code_entry from the linked list.
+ jit_code_entry* PrevEntry = JITCodeEntry->prev_entry;
+ jit_code_entry* NextEntry = JITCodeEntry->next_entry;
+
+ if (NextEntry) {
+ NextEntry->prev_entry = PrevEntry;
+ }
+ if (PrevEntry) {
+ PrevEntry->next_entry = NextEntry;
+ }
+ else {
+ assert(__jit_debug_descriptor.first_entry == JITCodeEntry);
+ __jit_debug_descriptor.first_entry = NextEntry;
+ }
+
+ // Tell the debugger which entry we removed, and unregister the code.
+ __jit_debug_descriptor.relevant_entry = JITCodeEntry;
+ __jit_debug_register_code();
+ }
+
+ delete JITCodeEntry;
+ JITCodeEntry = nullptr;
+}
+
+llvm::ManagedStatic<GDBJITRegistrationListener> GDBRegListener;
+
+} // end namespace
+
+namespace llvm {
+
+JITEventListener* JITEventListener::createGDBRegistrationListener() {
+ return &*GDBRegListener;
+}
+
+} // namespace llvm
+
+LLVMJITEventListenerRef LLVMCreateGDBRegistrationListener(void)
+{
+ return wrap(JITEventListener::createGDBRegistrationListener());
+}
diff --git a/llvm/lib/ExecutionEngine/IntelJITEvents/IntelJITEventListener.cpp b/llvm/lib/ExecutionEngine/IntelJITEvents/IntelJITEventListener.cpp
new file mode 100644
index 0000000000000..1ebc820a8b49d
--- /dev/null
+++ b/llvm/lib/ExecutionEngine/IntelJITEvents/IntelJITEventListener.cpp
@@ -0,0 +1,258 @@
+//===-- IntelJITEventListener.cpp - Tell Intel profiler about JITed code --===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a JITEventListener object to tell Intel(R) VTune(TM)
+// Amplifier XE 2011 about JITted functions.
+//
+//===----------------------------------------------------------------------===//
+
+#include "IntelJITEventsWrapper.h"
+#include "llvm-c/ExecutionEngine.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/Config/config.h"
+#include "llvm/DebugInfo/DIContext.h"
+#include "llvm/DebugInfo/DWARF/DWARFContext.h"
+#include "llvm/ExecutionEngine/JITEventListener.h"
+#include "llvm/IR/DebugInfo.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/Metadata.h"
+#include "llvm/IR/ValueHandle.h"
+#include "llvm/Object/ObjectFile.h"
+#include "llvm/Object/SymbolSize.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/Errno.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace llvm;
+using namespace llvm::object;
+
+#define DEBUG_TYPE "amplifier-jit-event-listener"
+
+namespace {
+
+class IntelJITEventListener : public JITEventListener {
+ typedef DenseMap<void*, unsigned int> MethodIDMap;
+
+ std::unique_ptr<IntelJITEventsWrapper> Wrapper;
+ MethodIDMap MethodIDs;
+
+ typedef SmallVector<const void *, 64> MethodAddressVector;
+ typedef DenseMap<const void *, MethodAddressVector> ObjectMap;
+
+ ObjectMap LoadedObjectMap;
+ std::map<ObjectKey, OwningBinary<ObjectFile>> DebugObjects;
+
+public:
+ IntelJITEventListener(IntelJITEventsWrapper* libraryWrapper) {
+ Wrapper.reset(libraryWrapper);
+ }
+
+ ~IntelJITEventListener() {
+ }
+
+ void notifyObjectLoaded(ObjectKey Key, const ObjectFile &Obj,
+ const RuntimeDyld::LoadedObjectInfo &L) override;
+
+ void notifyFreeingObject(ObjectKey Key) override;
+};
+
+static LineNumberInfo DILineInfoToIntelJITFormat(uintptr_t StartAddress,
+ uintptr_t Address,
+ DILineInfo Line) {
+ LineNumberInfo Result;
+
+ Result.Offset = Address - StartAddress;
+ Result.LineNumber = Line.Line;
+
+ return Result;
+}
+
+static iJIT_Method_Load FunctionDescToIntelJITFormat(
+ IntelJITEventsWrapper& Wrapper,
+ const char* FnName,
+ uintptr_t FnStart,
+ size_t FnSize) {
+ iJIT_Method_Load Result;
+ memset(&Result, 0, sizeof(iJIT_Method_Load));
+
+ Result.method_id = Wrapper.iJIT_GetNewMethodID();
+ Result.method_name = const_cast<char*>(FnName);
+ Result.method_load_address = reinterpret_cast<void*>(FnStart);
+ Result.method_size = FnSize;
+
+ Result.class_id = 0;
+ Result.class_file_name = NULL;
+ Result.user_data = NULL;
+ Result.user_data_size = 0;
+ Result.env = iJDE_JittingAPI;
+
+ return Result;
+}
+
+void IntelJITEventListener::notifyObjectLoaded(
+ ObjectKey Key, const ObjectFile &Obj,
+ const RuntimeDyld::LoadedObjectInfo &L) {
+
+ OwningBinary<ObjectFile> DebugObjOwner = L.getObjectForDebug(Obj);
+ const ObjectFile *DebugObj = DebugObjOwner.getBinary();
+ if (!DebugObj)
+ return;
+
+ // Get the address of the object image for use as a unique identifier
+ const void* ObjData = DebugObj->getData().data();
+ std::unique_ptr<DIContext> Context = DWARFContext::create(*DebugObj);
+ MethodAddressVector Functions;
+
+ // Use symbol info to iterate functions in the object.
+ for (const std::pair<SymbolRef, uint64_t> &P : computeSymbolSizes(*DebugObj)) {
+ SymbolRef Sym = P.first;
+ std::vector<LineNumberInfo> LineInfo;
+ std::string SourceFileName;
+
+ Expected<SymbolRef::Type> SymTypeOrErr = Sym.getType();
+ if (!SymTypeOrErr) {
+ // TODO: Actually report errors helpfully.
+ consumeError(SymTypeOrErr.takeError());
+ continue;
+ }
+ SymbolRef::Type SymType = *SymTypeOrErr;
+ if (SymType != SymbolRef::ST_Function)
+ continue;
+
+ Expected<StringRef> Name = Sym.getName();
+ if (!Name) {
+ // TODO: Actually report errors helpfully.
+ consumeError(Name.takeError());
+ continue;
+ }
+
+ Expected<uint64_t> AddrOrErr = Sym.getAddress();
+ if (!AddrOrErr) {
+ // TODO: Actually report errors helpfully.
+ consumeError(AddrOrErr.takeError());
+ continue;
+ }
+ uint64_t Addr = *AddrOrErr;
+ uint64_t Size = P.second;
+
+ auto SecOrErr = Sym.getSection();
+ if (!SecOrErr) {
+ // TODO: Actually report errors helpfully.
+ consumeError(SecOrErr.takeError());
+ continue;
+ }
+ object::section_iterator Sec = *SecOrErr;
+ if (Sec == Obj.section_end())
+ continue;
+ uint64_t Index = Sec->getIndex();
+
+ // Record this address in a local vector
+ Functions.push_back((void*)Addr);
+
+ // Build the function loaded notification message
+ iJIT_Method_Load FunctionMessage =
+ FunctionDescToIntelJITFormat(*Wrapper, Name->data(), Addr, Size);
+ DILineInfoTable Lines =
+ Context->getLineInfoForAddressRange({Addr, Index}, Size);
+ DILineInfoTable::iterator Begin = Lines.begin();
+ DILineInfoTable::iterator End = Lines.end();
+ for (DILineInfoTable::iterator It = Begin; It != End; ++It) {
+ LineInfo.push_back(
+ DILineInfoToIntelJITFormat((uintptr_t)Addr, It->first, It->second));
+ }
+ if (LineInfo.size() == 0) {
+ FunctionMessage.source_file_name = 0;
+ FunctionMessage.line_number_size = 0;
+ FunctionMessage.line_number_table = 0;
+ } else {
+ // Source line information for the address range is provided as
+ // a code offset for the start of the corresponding sub-range and
+ // a source line. JIT API treats offsets in LineNumberInfo structures
+ // as the end of the corresponding code region. The start of the code
+ // is taken from the previous element. Need to shift the elements.
+
+ LineNumberInfo last = LineInfo.back();
+ last.Offset = FunctionMessage.method_size;
+ LineInfo.push_back(last);
+ for (size_t i = LineInfo.size() - 2; i > 0; --i)
+ LineInfo[i].LineNumber = LineInfo[i - 1].LineNumber;
+
+ SourceFileName = Lines.front().second.FileName;
+ FunctionMessage.source_file_name =
+ const_cast<char *>(SourceFileName.c_str());
+ FunctionMessage.line_number_size = LineInfo.size();
+ FunctionMessage.line_number_table = &*LineInfo.begin();
+ }
+
+ Wrapper->iJIT_NotifyEvent(iJVM_EVENT_TYPE_METHOD_LOAD_FINISHED,
+ &FunctionMessage);
+ MethodIDs[(void*)Addr] = FunctionMessage.method_id;
+ }
+
+ // To support object unload notification, we need to keep a list of
+ // registered function addresses for each loaded object. We will
+ // use the MethodIDs map to get the registered ID for each function.
+ LoadedObjectMap[ObjData] = Functions;
+ DebugObjects[Key] = std::move(DebugObjOwner);
+}
+
+void IntelJITEventListener::notifyFreeingObject(ObjectKey Key) {
+ // This object may not have been registered with the listener. If it wasn't,
+ // bail out.
+ if (DebugObjects.find(Key) == DebugObjects.end())
+ return;
+
+ // Get the address of the object image for use as a unique identifier
+ const ObjectFile &DebugObj = *DebugObjects[Key].getBinary();
+ const void* ObjData = DebugObj.getData().data();
+
+ // Get the object's function list from LoadedObjectMap
+ ObjectMap::iterator OI = LoadedObjectMap.find(ObjData);
+ if (OI == LoadedObjectMap.end())
+ return;
+ MethodAddressVector& Functions = OI->second;
+
+ // Walk the function list, unregistering each function
+ for (MethodAddressVector::iterator FI = Functions.begin(),
+ FE = Functions.end();
+ FI != FE;
+ ++FI) {
+ void* FnStart = const_cast<void*>(*FI);
+ MethodIDMap::iterator MI = MethodIDs.find(FnStart);
+ if (MI != MethodIDs.end()) {
+ Wrapper->iJIT_NotifyEvent(iJVM_EVENT_TYPE_METHOD_UNLOAD_START,
+ &MI->second);
+ MethodIDs.erase(MI);
+ }
+ }
+
+ // Erase the object from LoadedObjectMap
+ LoadedObjectMap.erase(OI);
+ DebugObjects.erase(Key);
+}
+
+} // anonymous namespace.
+
+namespace llvm {
+JITEventListener *JITEventListener::createIntelJITEventListener() {
+ return new IntelJITEventListener(new IntelJITEventsWrapper);
+}
+
+// for testing
+JITEventListener *JITEventListener::createIntelJITEventListener(
+ IntelJITEventsWrapper* TestImpl) {
+ return new IntelJITEventListener(TestImpl);
+}
+
+} // namespace llvm
+
+LLVMJITEventListenerRef LLVMCreateIntelJITEventListener(void)
+{
+ return wrap(JITEventListener::createIntelJITEventListener());
+}
diff --git a/llvm/lib/ExecutionEngine/IntelJITEvents/IntelJITEventsWrapper.h b/llvm/lib/ExecutionEngine/IntelJITEvents/IntelJITEventsWrapper.h
new file mode 100644
index 0000000000000..68699c6a2200c
--- /dev/null
+++ b/llvm/lib/ExecutionEngine/IntelJITEvents/IntelJITEventsWrapper.h
@@ -0,0 +1,95 @@
+//===-- IntelJITEventsWrapper.h - Intel JIT Events API Wrapper --*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a wrapper for the Intel JIT Events API. It allows for the
+// implementation of the jitprofiling library to be swapped with an alternative
+// implementation (for testing). To include this file, you must have the
+// jitprofiling.h header available; it is available in Intel(R) VTune(TM)
+// Amplifier XE 2011.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef INTEL_JIT_EVENTS_WRAPPER_H
+#define INTEL_JIT_EVENTS_WRAPPER_H
+
+#include "jitprofiling.h"
+
+namespace llvm {
+
+class IntelJITEventsWrapper {
+ // Function pointer types for testing implementation of Intel jitprofiling
+ // library
+ typedef int (*NotifyEventPtr)(iJIT_JVM_EVENT, void*);
+ typedef void (*RegisterCallbackExPtr)(void *, iJIT_ModeChangedEx );
+ typedef iJIT_IsProfilingActiveFlags (*IsProfilingActivePtr)(void);
+ typedef void (*FinalizeThreadPtr)(void);
+ typedef void (*FinalizeProcessPtr)(void);
+ typedef unsigned int (*GetNewMethodIDPtr)(void);
+
+ NotifyEventPtr NotifyEventFunc;
+ RegisterCallbackExPtr RegisterCallbackExFunc;
+ IsProfilingActivePtr IsProfilingActiveFunc;
+ GetNewMethodIDPtr GetNewMethodIDFunc;
+
+public:
+ bool isAmplifierRunning() {
+ return iJIT_IsProfilingActive() == iJIT_SAMPLING_ON;
+ }
+
+ IntelJITEventsWrapper()
+ : NotifyEventFunc(::iJIT_NotifyEvent),
+ RegisterCallbackExFunc(::iJIT_RegisterCallbackEx),
+ IsProfilingActiveFunc(::iJIT_IsProfilingActive),
+ GetNewMethodIDFunc(::iJIT_GetNewMethodID) {
+ }
+
+ IntelJITEventsWrapper(NotifyEventPtr NotifyEventImpl,
+ RegisterCallbackExPtr RegisterCallbackExImpl,
+ IsProfilingActivePtr IsProfilingActiveImpl,
+ FinalizeThreadPtr FinalizeThreadImpl,
+ FinalizeProcessPtr FinalizeProcessImpl,
+ GetNewMethodIDPtr GetNewMethodIDImpl)
+ : NotifyEventFunc(NotifyEventImpl),
+ RegisterCallbackExFunc(RegisterCallbackExImpl),
+ IsProfilingActiveFunc(IsProfilingActiveImpl),
+ GetNewMethodIDFunc(GetNewMethodIDImpl) {
+ }
+
+ // Sends an event announcing that a function has been emitted
+ // return values are event-specific. See Intel documentation for details.
+ int iJIT_NotifyEvent(iJIT_JVM_EVENT EventType, void *EventSpecificData) {
+ if (!NotifyEventFunc)
+ return -1;
+ return NotifyEventFunc(EventType, EventSpecificData);
+ }
+
+ // Registers a callback function to receive notice of profiling state changes
+ void iJIT_RegisterCallbackEx(void *UserData,
+ iJIT_ModeChangedEx NewModeCallBackFuncEx) {
+ if (RegisterCallbackExFunc)
+ RegisterCallbackExFunc(UserData, NewModeCallBackFuncEx);
+ }
+
+ // Returns the current profiler mode
+ iJIT_IsProfilingActiveFlags iJIT_IsProfilingActive(void) {
+ if (!IsProfilingActiveFunc)
+ return iJIT_NOTHING_RUNNING;
+ return IsProfilingActiveFunc();
+ }
+
+ // Generates a locally unique method ID for use in code registration
+ unsigned int iJIT_GetNewMethodID(void) {
+ if (!GetNewMethodIDFunc)
+ return -1;
+ return GetNewMethodIDFunc();
+ }
+};
+
+} //namespace llvm
+
+#endif //INTEL_JIT_EVENTS_WRAPPER_H
diff --git a/llvm/lib/ExecutionEngine/IntelJITEvents/ittnotify_config.h b/llvm/lib/ExecutionEngine/IntelJITEvents/ittnotify_config.h
new file mode 100644
index 0000000000000..16ce672150cc2
--- /dev/null
+++ b/llvm/lib/ExecutionEngine/IntelJITEvents/ittnotify_config.h
@@ -0,0 +1,453 @@
+/*===-- ittnotify_config.h - JIT Profiling API internal config-----*- C -*-===*
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===----------------------------------------------------------------------===*
+ *
+ * This file provides Intel(R) Performance Analyzer JIT (Just-In-Time)
+ * Profiling API internal config.
+ *
+ * NOTE: This file comes in a style different from the rest of LLVM
+ * source base since this is a piece of code shared from Intel(R)
+ * products. Please do not reformat / re-style this code to make
+ * subsequent merges and contributions from the original source base eaiser.
+ *
+ *===----------------------------------------------------------------------===*/
+#ifndef _ITTNOTIFY_CONFIG_H_
+#define _ITTNOTIFY_CONFIG_H_
+
+/** @cond exclude_from_documentation */
+#ifndef ITT_OS_WIN
+# define ITT_OS_WIN 1
+#endif /* ITT_OS_WIN */
+
+#ifndef ITT_OS_LINUX
+# define ITT_OS_LINUX 2
+#endif /* ITT_OS_LINUX */
+
+#ifndef ITT_OS_MAC
+# define ITT_OS_MAC 3
+#endif /* ITT_OS_MAC */
+
+#ifndef ITT_OS
+# if defined WIN32 || defined _WIN32
+# define ITT_OS ITT_OS_WIN
+# elif defined( __APPLE__ ) && defined( __MACH__ )
+# define ITT_OS ITT_OS_MAC
+# else
+# define ITT_OS ITT_OS_LINUX
+# endif
+#endif /* ITT_OS */
+
+#ifndef ITT_PLATFORM_WIN
+# define ITT_PLATFORM_WIN 1
+#endif /* ITT_PLATFORM_WIN */
+
+#ifndef ITT_PLATFORM_POSIX
+# define ITT_PLATFORM_POSIX 2
+#endif /* ITT_PLATFORM_POSIX */
+
+#ifndef ITT_PLATFORM
+# if ITT_OS==ITT_OS_WIN
+# define ITT_PLATFORM ITT_PLATFORM_WIN
+# else
+# define ITT_PLATFORM ITT_PLATFORM_POSIX
+# endif /* _WIN32 */
+#endif /* ITT_PLATFORM */
+
+#if defined(_UNICODE) && !defined(UNICODE)
+#define UNICODE
+#endif
+
+#include <stddef.h>
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+#include <tchar.h>
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#include <stdint.h>
+#if defined(UNICODE) || defined(_UNICODE)
+#include <wchar.h>
+#endif /* UNICODE || _UNICODE */
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+
+#ifndef CDECL
+# if ITT_PLATFORM==ITT_PLATFORM_WIN
+# define CDECL __cdecl
+# else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+# if defined _M_X64 || defined _M_AMD64 || defined __x86_64__
+# define CDECL /* not actual on x86_64 platform */
+# else /* _M_X64 || _M_AMD64 || __x86_64__ */
+# define CDECL __attribute__ ((cdecl))
+# endif /* _M_X64 || _M_AMD64 || __x86_64__ */
+# endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#endif /* CDECL */
+
+#ifndef STDCALL
+# if ITT_PLATFORM==ITT_PLATFORM_WIN
+# define STDCALL __stdcall
+# else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+# if defined _M_X64 || defined _M_AMD64 || defined __x86_64__
+# define STDCALL /* not supported on x86_64 platform */
+# else /* _M_X64 || _M_AMD64 || __x86_64__ */
+# define STDCALL __attribute__ ((stdcall))
+# endif /* _M_X64 || _M_AMD64 || __x86_64__ */
+# endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#endif /* STDCALL */
+
+#define ITTAPI CDECL
+#define LIBITTAPI CDECL
+
+/* TODO: Temporary for compatibility! */
+#define ITTAPI_CALL CDECL
+#define LIBITTAPI_CALL CDECL
+
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+/* use __forceinline (VC++ specific) */
+#define ITT_INLINE __forceinline
+#define ITT_INLINE_ATTRIBUTE /* nothing */
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+/*
+ * Generally, functions are not inlined unless optimization is specified.
+ * For functions declared inline, this attribute inlines the function even
+ * if no optimization level was specified.
+ */
+#ifdef __STRICT_ANSI__
+#define ITT_INLINE static
+#else /* __STRICT_ANSI__ */
+#define ITT_INLINE static inline
+#endif /* __STRICT_ANSI__ */
+#define ITT_INLINE_ATTRIBUTE __attribute__ ((always_inline))
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+/** @endcond */
+
+#ifndef ITT_ARCH_IA32
+# define ITT_ARCH_IA32 1
+#endif /* ITT_ARCH_IA32 */
+
+#ifndef ITT_ARCH_IA32E
+# define ITT_ARCH_IA32E 2
+#endif /* ITT_ARCH_IA32E */
+
+#ifndef ITT_ARCH_IA64
+# define ITT_ARCH_IA64 3
+#endif /* ITT_ARCH_IA64 */
+
+#ifndef ITT_ARCH
+# if defined _M_X64 || defined _M_AMD64 || defined __x86_64__
+# define ITT_ARCH ITT_ARCH_IA32E
+# elif defined _M_IA64 || defined __ia64
+# define ITT_ARCH ITT_ARCH_IA64
+# else
+# define ITT_ARCH ITT_ARCH_IA32
+# endif
+#endif
+
+#ifdef __cplusplus
+# define ITT_EXTERN_C extern "C"
+#else
+# define ITT_EXTERN_C /* nothing */
+#endif /* __cplusplus */
+
+#define ITT_TO_STR_AUX(x) #x
+#define ITT_TO_STR(x) ITT_TO_STR_AUX(x)
+
+#define __ITT_BUILD_ASSERT(expr, suffix) do { \
+ static char __itt_build_check_##suffix[(expr) ? 1 : -1]; \
+ __itt_build_check_##suffix[0] = 0; \
+} while(0)
+#define _ITT_BUILD_ASSERT(expr, suffix) __ITT_BUILD_ASSERT((expr), suffix)
+#define ITT_BUILD_ASSERT(expr) _ITT_BUILD_ASSERT((expr), __LINE__)
+
+#define ITT_MAGIC { 0xED, 0xAB, 0xAB, 0xEC, 0x0D, 0xEE, 0xDA, 0x30 }
+
+/* Replace with snapshot date YYYYMMDD for promotion build. */
+#define API_VERSION_BUILD 20111111
+
+#ifndef API_VERSION_NUM
+#define API_VERSION_NUM 0.0.0
+#endif /* API_VERSION_NUM */
+
+#define API_VERSION "ITT-API-Version " ITT_TO_STR(API_VERSION_NUM) \
+ " (" ITT_TO_STR(API_VERSION_BUILD) ")"
+
+/* OS communication functions */
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+#include <windows.h>
+typedef HMODULE lib_t;
+typedef DWORD TIDT;
+typedef CRITICAL_SECTION mutex_t;
+#define MUTEX_INITIALIZER { 0 }
+#define strong_alias(name, aliasname) /* empty for Windows */
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#include <dlfcn.h>
+#if defined(UNICODE) || defined(_UNICODE)
+#include <wchar.h>
+#endif /* UNICODE */
+#ifndef _GNU_SOURCE
+#define _GNU_SOURCE 1 /* need for PTHREAD_MUTEX_RECURSIVE */
+#endif /* _GNU_SOURCE */
+#include <pthread.h>
+typedef void* lib_t;
+typedef pthread_t TIDT;
+typedef pthread_mutex_t mutex_t;
+#define MUTEX_INITIALIZER PTHREAD_MUTEX_INITIALIZER
+#define _strong_alias(name, aliasname) \
+ extern __typeof (name) aliasname __attribute__ ((alias (#name)));
+#define strong_alias(name, aliasname) _strong_alias(name, aliasname)
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+#define __itt_get_proc(lib, name) GetProcAddress(lib, name)
+#define __itt_mutex_init(mutex) InitializeCriticalSection(mutex)
+#define __itt_mutex_lock(mutex) EnterCriticalSection(mutex)
+#define __itt_mutex_unlock(mutex) LeaveCriticalSection(mutex)
+#define __itt_load_lib(name) LoadLibraryA(name)
+#define __itt_unload_lib(handle) FreeLibrary(handle)
+#define __itt_system_error() (int)GetLastError()
+#define __itt_fstrcmp(s1, s2) lstrcmpA(s1, s2)
+#define __itt_fstrlen(s) lstrlenA(s)
+#define __itt_fstrcpyn(s1, s2, l) lstrcpynA(s1, s2, l)
+#define __itt_fstrdup(s) _strdup(s)
+#define __itt_thread_id() GetCurrentThreadId()
+#define __itt_thread_yield() SwitchToThread()
+#ifndef ITT_SIMPLE_INIT
+ITT_INLINE long
+__itt_interlocked_increment(volatile long* ptr) ITT_INLINE_ATTRIBUTE;
+ITT_INLINE long __itt_interlocked_increment(volatile long* ptr)
+{
+ return InterlockedIncrement(ptr);
+}
+#endif /* ITT_SIMPLE_INIT */
+#else /* ITT_PLATFORM!=ITT_PLATFORM_WIN */
+#define __itt_get_proc(lib, name) dlsym(lib, name)
+#define __itt_mutex_init(mutex) {\
+ pthread_mutexattr_t mutex_attr; \
+ int error_code = pthread_mutexattr_init(&mutex_attr); \
+ if (error_code) \
+ __itt_report_error(__itt_error_system, "pthread_mutexattr_init", \
+ error_code); \
+ error_code = pthread_mutexattr_settype(&mutex_attr, \
+ PTHREAD_MUTEX_RECURSIVE); \
+ if (error_code) \
+ __itt_report_error(__itt_error_system, "pthread_mutexattr_settype", \
+ error_code); \
+ error_code = pthread_mutex_init(mutex, &mutex_attr); \
+ if (error_code) \
+ __itt_report_error(__itt_error_system, "pthread_mutex_init", \
+ error_code); \
+ error_code = pthread_mutexattr_destroy(&mutex_attr); \
+ if (error_code) \
+ __itt_report_error(__itt_error_system, "pthread_mutexattr_destroy", \
+ error_code); \
+}
+#define __itt_mutex_lock(mutex) pthread_mutex_lock(mutex)
+#define __itt_mutex_unlock(mutex) pthread_mutex_unlock(mutex)
+#define __itt_load_lib(name) dlopen(name, RTLD_LAZY)
+#define __itt_unload_lib(handle) dlclose(handle)
+#define __itt_system_error() errno
+#define __itt_fstrcmp(s1, s2) strcmp(s1, s2)
+#define __itt_fstrlen(s) strlen(s)
+#define __itt_fstrcpyn(s1, s2, l) strncpy(s1, s2, l)
+#define __itt_fstrdup(s) strdup(s)
+#define __itt_thread_id() pthread_self()
+#define __itt_thread_yield() sched_yield()
+#if ITT_ARCH==ITT_ARCH_IA64
+#ifdef __INTEL_COMPILER
+#define __TBB_machine_fetchadd4(addr, val) __fetchadd4_acq((void *)addr, val)
+#else /* __INTEL_COMPILER */
+/* TODO: Add Support for not Intel compilers for IA64 */
+#endif /* __INTEL_COMPILER */
+#else /* ITT_ARCH!=ITT_ARCH_IA64 */
+ITT_INLINE long
+__TBB_machine_fetchadd4(volatile void* ptr, long addend) ITT_INLINE_ATTRIBUTE;
+ITT_INLINE long __TBB_machine_fetchadd4(volatile void* ptr, long addend)
+{
+ long result;
+ __asm__ __volatile__("lock\nxadd %0,%1"
+ : "=r"(result),"=m"(*(long*)ptr)
+ : "0"(addend), "m"(*(long*)ptr)
+ : "memory");
+ return result;
+}
+#endif /* ITT_ARCH==ITT_ARCH_IA64 */
+#ifndef ITT_SIMPLE_INIT
+ITT_INLINE long
+__itt_interlocked_increment(volatile long* ptr) ITT_INLINE_ATTRIBUTE;
+ITT_INLINE long __itt_interlocked_increment(volatile long* ptr)
+{
+ return __TBB_machine_fetchadd4(ptr, 1) + 1L;
+}
+#endif /* ITT_SIMPLE_INIT */
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+
+typedef enum {
+ __itt_collection_normal = 0,
+ __itt_collection_paused = 1
+} __itt_collection_state;
+
+typedef enum {
+ __itt_thread_normal = 0,
+ __itt_thread_ignored = 1
+} __itt_thread_state;
+
+#pragma pack(push, 8)
+
+typedef struct ___itt_thread_info
+{
+ const char* nameA; /*!< Copy of original name in ASCII. */
+#if defined(UNICODE) || defined(_UNICODE)
+ const wchar_t* nameW; /*!< Copy of original name in UNICODE. */
+#else /* UNICODE || _UNICODE */
+ void* nameW;
+#endif /* UNICODE || _UNICODE */
+ TIDT tid;
+ __itt_thread_state state; /*!< Thread state (paused or normal) */
+ int extra1; /*!< Reserved to the runtime */
+ void* extra2; /*!< Reserved to the runtime */
+ struct ___itt_thread_info* next;
+} __itt_thread_info;
+
+#include "ittnotify_types.h" /* For __itt_group_id definition */
+
+typedef struct ___itt_api_info_20101001
+{
+ const char* name;
+ void** func_ptr;
+ void* init_func;
+ __itt_group_id group;
+} __itt_api_info_20101001;
+
+typedef struct ___itt_api_info
+{
+ const char* name;
+ void** func_ptr;
+ void* init_func;
+ void* null_func;
+ __itt_group_id group;
+} __itt_api_info;
+
+struct ___itt_domain;
+struct ___itt_string_handle;
+
+typedef struct ___itt_global
+{
+ unsigned char magic[8];
+ unsigned long version_major;
+ unsigned long version_minor;
+ unsigned long version_build;
+ volatile long api_initialized;
+ volatile long mutex_initialized;
+ volatile long atomic_counter;
+ mutex_t mutex;
+ lib_t lib;
+ void* error_handler;
+ const char** dll_path_ptr;
+ __itt_api_info* api_list_ptr;
+ struct ___itt_global* next;
+ /* Joinable structures below */
+ __itt_thread_info* thread_list;
+ struct ___itt_domain* domain_list;
+ struct ___itt_string_handle* string_list;
+ __itt_collection_state state;
+} __itt_global;
+
+#pragma pack(pop)
+
+#define NEW_THREAD_INFO_W(gptr,h,h_tail,t,s,n) { \
+ h = (__itt_thread_info*)malloc(sizeof(__itt_thread_info)); \
+ if (h != NULL) { \
+ h->tid = t; \
+ h->nameA = NULL; \
+ h->nameW = n ? _wcsdup(n) : NULL; \
+ h->state = s; \
+ h->extra1 = 0; /* reserved */ \
+ h->extra2 = NULL; /* reserved */ \
+ h->next = NULL; \
+ if (h_tail == NULL) \
+ (gptr)->thread_list = h; \
+ else \
+ h_tail->next = h; \
+ } \
+}
+
+#define NEW_THREAD_INFO_A(gptr,h,h_tail,t,s,n) { \
+ h = (__itt_thread_info*)malloc(sizeof(__itt_thread_info)); \
+ if (h != NULL) { \
+ h->tid = t; \
+ h->nameA = n ? __itt_fstrdup(n) : NULL; \
+ h->nameW = NULL; \
+ h->state = s; \
+ h->extra1 = 0; /* reserved */ \
+ h->extra2 = NULL; /* reserved */ \
+ h->next = NULL; \
+ if (h_tail == NULL) \
+ (gptr)->thread_list = h; \
+ else \
+ h_tail->next = h; \
+ } \
+}
+
+#define NEW_DOMAIN_W(gptr,h,h_tail,name) { \
+ h = (__itt_domain*)malloc(sizeof(__itt_domain)); \
+ if (h != NULL) { \
+ h->flags = 0; /* domain is disabled by default */ \
+ h->nameA = NULL; \
+ h->nameW = name ? _wcsdup(name) : NULL; \
+ h->extra1 = 0; /* reserved */ \
+ h->extra2 = NULL; /* reserved */ \
+ h->next = NULL; \
+ if (h_tail == NULL) \
+ (gptr)->domain_list = h; \
+ else \
+ h_tail->next = h; \
+ } \
+}
+
+#define NEW_DOMAIN_A(gptr,h,h_tail,name) { \
+ h = (__itt_domain*)malloc(sizeof(__itt_domain)); \
+ if (h != NULL) { \
+ h->flags = 0; /* domain is disabled by default */ \
+ h->nameA = name ? __itt_fstrdup(name) : NULL; \
+ h->nameW = NULL; \
+ h->extra1 = 0; /* reserved */ \
+ h->extra2 = NULL; /* reserved */ \
+ h->next = NULL; \
+ if (h_tail == NULL) \
+ (gptr)->domain_list = h; \
+ else \
+ h_tail->next = h; \
+ } \
+}
+
+#define NEW_STRING_HANDLE_W(gptr,h,h_tail,name) { \
+ h = (__itt_string_handle*)malloc(sizeof(__itt_string_handle)); \
+ if (h != NULL) { \
+ h->strA = NULL; \
+ h->strW = name ? _wcsdup(name) : NULL; \
+ h->extra1 = 0; /* reserved */ \
+ h->extra2 = NULL; /* reserved */ \
+ h->next = NULL; \
+ if (h_tail == NULL) \
+ (gptr)->string_list = h; \
+ else \
+ h_tail->next = h; \
+ } \
+}
+
+#define NEW_STRING_HANDLE_A(gptr,h,h_tail,name) { \
+ h = (__itt_string_handle*)malloc(sizeof(__itt_string_handle)); \
+ if (h != NULL) { \
+ h->strA = name ? __itt_fstrdup(name) : NULL; \
+ h->strW = NULL; \
+ h->extra1 = 0; /* reserved */ \
+ h->extra2 = NULL; /* reserved */ \
+ h->next = NULL; \
+ if (h_tail == NULL) \
+ (gptr)->string_list = h; \
+ else \
+ h_tail->next = h; \
+ } \
+}
+
+#endif /* _ITTNOTIFY_CONFIG_H_ */
diff --git a/llvm/lib/ExecutionEngine/IntelJITEvents/ittnotify_types.h b/llvm/lib/ExecutionEngine/IntelJITEvents/ittnotify_types.h
new file mode 100644
index 0000000000000..15008fe93e607
--- /dev/null
+++ b/llvm/lib/ExecutionEngine/IntelJITEvents/ittnotify_types.h
@@ -0,0 +1,69 @@
+/*===-- ittnotify_types.h - JIT Profiling API internal types--------*- C -*-===*
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===----------------------------------------------------------------------===*
+ *
+ * NOTE: This file comes in a style different from the rest of LLVM
+ * source base since this is a piece of code shared from Intel(R)
+ * products. Please do not reformat / re-style this code to make
+ * subsequent merges and contributions from the original source base eaiser.
+ *
+ *===----------------------------------------------------------------------===*/
+#ifndef _ITTNOTIFY_TYPES_H_
+#define _ITTNOTIFY_TYPES_H_
+
+typedef enum ___itt_group_id
+{
+ __itt_group_none = 0,
+ __itt_group_legacy = 1<<0,
+ __itt_group_control = 1<<1,
+ __itt_group_thread = 1<<2,
+ __itt_group_mark = 1<<3,
+ __itt_group_sync = 1<<4,
+ __itt_group_fsync = 1<<5,
+ __itt_group_jit = 1<<6,
+ __itt_group_model = 1<<7,
+ __itt_group_splitter_min = 1<<7,
+ __itt_group_counter = 1<<8,
+ __itt_group_frame = 1<<9,
+ __itt_group_stitch = 1<<10,
+ __itt_group_heap = 1<<11,
+ __itt_group_splitter_max = 1<<12,
+ __itt_group_structure = 1<<12,
+ __itt_group_suppress = 1<<13,
+ __itt_group_all = -1
+} __itt_group_id;
+
+#pragma pack(push, 8)
+
+typedef struct ___itt_group_list
+{
+ __itt_group_id id;
+ const char* name;
+} __itt_group_list;
+
+#pragma pack(pop)
+
+#define ITT_GROUP_LIST(varname) \
+ static __itt_group_list varname[] = { \
+ { __itt_group_all, "all" }, \
+ { __itt_group_control, "control" }, \
+ { __itt_group_thread, "thread" }, \
+ { __itt_group_mark, "mark" }, \
+ { __itt_group_sync, "sync" }, \
+ { __itt_group_fsync, "fsync" }, \
+ { __itt_group_jit, "jit" }, \
+ { __itt_group_model, "model" }, \
+ { __itt_group_counter, "counter" }, \
+ { __itt_group_frame, "frame" }, \
+ { __itt_group_stitch, "stitch" }, \
+ { __itt_group_heap, "heap" }, \
+ { __itt_group_structure, "structure" }, \
+ { __itt_group_suppress, "suppress" }, \
+ { __itt_group_none, NULL } \
+ }
+
+#endif /* _ITTNOTIFY_TYPES_H_ */
diff --git a/llvm/lib/ExecutionEngine/IntelJITEvents/jitprofiling.c b/llvm/lib/ExecutionEngine/IntelJITEvents/jitprofiling.c
new file mode 100644
index 0000000000000..074e0735628a5
--- /dev/null
+++ b/llvm/lib/ExecutionEngine/IntelJITEvents/jitprofiling.c
@@ -0,0 +1,480 @@
+/*===-- jitprofiling.c - JIT (Just-In-Time) Profiling API----------*- C -*-===*
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===----------------------------------------------------------------------===*
+ *
+ * This file provides Intel(R) Performance Analyzer JIT (Just-In-Time)
+ * Profiling API implementation.
+ *
+ * NOTE: This file comes in a style different from the rest of LLVM
+ * source base since this is a piece of code shared from Intel(R)
+ * products. Please do not reformat / re-style this code to make
+ * subsequent merges and contributions from the original source base eaiser.
+ *
+ *===----------------------------------------------------------------------===*/
+#include "ittnotify_config.h"
+
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+#include <windows.h>
+#pragma optimize("", off)
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#include <dlfcn.h>
+#include <pthread.h>
+#include <stdint.h>
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#include <stdlib.h>
+
+#include "jitprofiling.h"
+
+static const char rcsid[] = "\n@(#) $Revision: 243501 $\n";
+
+#define DLL_ENVIRONMENT_VAR "VS_PROFILER"
+
+#ifndef NEW_DLL_ENVIRONMENT_VAR
+#if ITT_ARCH==ITT_ARCH_IA32
+#define NEW_DLL_ENVIRONMENT_VAR "INTEL_JIT_PROFILER32"
+#else
+#define NEW_DLL_ENVIRONMENT_VAR "INTEL_JIT_PROFILER64"
+#endif
+#endif /* NEW_DLL_ENVIRONMENT_VAR */
+
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+#define DEFAULT_DLLNAME "JitPI.dll"
+HINSTANCE m_libHandle = NULL;
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#define DEFAULT_DLLNAME "libJitPI.so"
+void* m_libHandle = NULL;
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+
+/* default location of JIT profiling agent on Android */
+#define ANDROID_JIT_AGENT_PATH "/data/intel/libittnotify.so"
+
+/* the function pointers */
+typedef unsigned int(*TPInitialize)(void);
+static TPInitialize FUNC_Initialize=NULL;
+
+typedef unsigned int(*TPNotify)(unsigned int, void*);
+static TPNotify FUNC_NotifyEvent=NULL;
+
+static iJIT_IsProfilingActiveFlags executionMode = iJIT_NOTHING_RUNNING;
+
+/* end collector dll part. */
+
+/* loadiJIT_Funcs() : this function is called just in the beginning
+ * and is responsible to load the functions from BistroJavaCollector.dll
+ * result:
+ * on success: the functions loads, iJIT_DLL_is_missing=0, return value = 1
+ * on failure: the functions are NULL, iJIT_DLL_is_missing=1, return value = 0
+ */
+static int loadiJIT_Funcs(void);
+
+/* global representing whether the BistroJavaCollector can't be loaded */
+static int iJIT_DLL_is_missing = 0;
+
+/* Virtual stack - the struct is used as a virtual stack for each thread.
+ * Every thread initializes with a stack of size INIT_TOP_STACK.
+ * Every method entry decreases from the current stack point,
+ * and when a thread stack reaches its top of stack (return from the global
+ * function), the top of stack and the current stack increase. Notice that
+ * when returning from a function the stack pointer is the address of
+ * the function return.
+*/
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+static DWORD threadLocalStorageHandle = 0;
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+static pthread_key_t threadLocalStorageHandle = (pthread_key_t)0;
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+
+#define INIT_TOP_Stack 10000
+
+typedef struct
+{
+ unsigned int TopStack;
+ unsigned int CurrentStack;
+} ThreadStack, *pThreadStack;
+
+/* end of virtual stack. */
+
+/*
+ * The function for reporting virtual-machine related events to VTune.
+ * Note: when reporting iJVM_EVENT_TYPE_ENTER_NIDS, there is no need to fill
+ * in the stack_id field in the iJIT_Method_NIDS structure, as VTune fills it.
+ * The return value in iJVM_EVENT_TYPE_ENTER_NIDS &&
+ * iJVM_EVENT_TYPE_LEAVE_NIDS events will be 0 in case of failure.
+ * in iJVM_EVENT_TYPE_METHOD_LOAD_FINISHED event
+ * it will be -1 if EventSpecificData == 0 otherwise it will be 0.
+*/
+
+ITT_EXTERN_C int JITAPI
+iJIT_NotifyEvent(iJIT_JVM_EVENT event_type, void *EventSpecificData)
+{
+ int ReturnValue;
+
+ /*
+ * This section is for debugging outside of VTune.
+ * It creates the environment variables that indicates call graph mode.
+ * If running outside of VTune remove the remark.
+ *
+ *
+ * static int firstTime = 1;
+ * char DoCallGraph[12] = "DoCallGraph";
+ * if (firstTime)
+ * {
+ * firstTime = 0;
+ * SetEnvironmentVariable( "BISTRO_COLLECTORS_DO_CALLGRAPH", DoCallGraph);
+ * }
+ *
+ * end of section.
+ */
+
+ /* initialization part - the functions have not been loaded yet. This part
+ * will load the functions, and check if we are in Call Graph mode.
+ * (for special treatment).
+ */
+ if (!FUNC_NotifyEvent)
+ {
+ if (iJIT_DLL_is_missing)
+ return 0;
+
+ /* load the Function from the DLL */
+ if (!loadiJIT_Funcs())
+ return 0;
+
+ /* Call Graph initialization. */
+ }
+
+ /* If the event is method entry/exit, check that in the current mode
+ * VTune is allowed to receive it
+ */
+ if ((event_type == iJVM_EVENT_TYPE_ENTER_NIDS ||
+ event_type == iJVM_EVENT_TYPE_LEAVE_NIDS) &&
+ (executionMode != iJIT_CALLGRAPH_ON))
+ {
+ return 0;
+ }
+ /* This section is performed when method enter event occurs.
+ * It updates the virtual stack, or creates it if this is the first
+ * method entry in the thread. The stack pointer is decreased.
+ */
+ if (event_type == iJVM_EVENT_TYPE_ENTER_NIDS)
+ {
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+ pThreadStack threadStack =
+ (pThreadStack)TlsGetValue (threadLocalStorageHandle);
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+ pThreadStack threadStack =
+ (pThreadStack)pthread_getspecific(threadLocalStorageHandle);
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+
+ /* check for use of reserved method IDs */
+ if ( ((piJIT_Method_NIDS) EventSpecificData)->method_id <= 999 )
+ return 0;
+
+ if (!threadStack)
+ {
+ /* initialize the stack. */
+ threadStack = (pThreadStack) calloc (sizeof(ThreadStack), 1);
+ threadStack->TopStack = INIT_TOP_Stack;
+ threadStack->CurrentStack = INIT_TOP_Stack;
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+ TlsSetValue(threadLocalStorageHandle,(void*)threadStack);
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+ pthread_setspecific(threadLocalStorageHandle,(void*)threadStack);
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+ }
+
+ /* decrease the stack. */
+ ((piJIT_Method_NIDS) EventSpecificData)->stack_id =
+ (threadStack->CurrentStack)--;
+ }
+
+ /* This section is performed when method leave event occurs
+ * It updates the virtual stack.
+ * Increases the stack pointer.
+ * If the stack pointer reached the top (left the global function)
+ * increase the pointer and the top pointer.
+ */
+ if (event_type == iJVM_EVENT_TYPE_LEAVE_NIDS)
+ {
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+ pThreadStack threadStack =
+ (pThreadStack)TlsGetValue (threadLocalStorageHandle);
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+ pThreadStack threadStack =
+ (pThreadStack)pthread_getspecific(threadLocalStorageHandle);
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+
+ /* check for use of reserved method IDs */
+ if ( ((piJIT_Method_NIDS) EventSpecificData)->method_id <= 999 )
+ return 0;
+
+ if (!threadStack)
+ {
+ /* Error: first report in this thread is method exit */
+ exit (1);
+ }
+
+ ((piJIT_Method_NIDS) EventSpecificData)->stack_id =
+ ++(threadStack->CurrentStack) + 1;
+
+ if (((piJIT_Method_NIDS) EventSpecificData)->stack_id
+ > threadStack->TopStack)
+ ((piJIT_Method_NIDS) EventSpecificData)->stack_id =
+ (unsigned int)-1;
+ }
+
+ if (event_type == iJVM_EVENT_TYPE_METHOD_LOAD_FINISHED)
+ {
+ /* check for use of reserved method IDs */
+ if ( ((piJIT_Method_Load) EventSpecificData)->method_id <= 999 )
+ return 0;
+ }
+
+ ReturnValue = (int)FUNC_NotifyEvent(event_type, EventSpecificData);
+
+ return ReturnValue;
+}
+
+/* The new mode call back routine */
+ITT_EXTERN_C void JITAPI
+iJIT_RegisterCallbackEx(void *userdata, iJIT_ModeChangedEx
+ NewModeCallBackFuncEx)
+{
+ /* is it already missing... or the load of functions from the DLL failed */
+ if (iJIT_DLL_is_missing || !loadiJIT_Funcs())
+ {
+ /* then do not bother with notifications */
+ NewModeCallBackFuncEx(userdata, iJIT_NO_NOTIFICATIONS);
+ /* Error: could not load JIT functions. */
+ return;
+ }
+ /* nothing to do with the callback */
+}
+
+/*
+ * This function allows the user to query in which mode, if at all,
+ *VTune is running
+ */
+ITT_EXTERN_C iJIT_IsProfilingActiveFlags JITAPI iJIT_IsProfilingActive()
+{
+ if (!iJIT_DLL_is_missing)
+ {
+ loadiJIT_Funcs();
+ }
+
+ return executionMode;
+}
+
+/* this function loads the collector dll (BistroJavaCollector)
+ * and the relevant functions.
+ * on success: all functions load, iJIT_DLL_is_missing = 0, return value = 1
+ * on failure: all functions are NULL, iJIT_DLL_is_missing = 1, return value = 0
+ */
+static int loadiJIT_Funcs()
+{
+ static int bDllWasLoaded = 0;
+ char *dllName = (char*)rcsid; /* !! Just to avoid unused code elimination */
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+ DWORD dNameLength = 0;
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+
+ if(bDllWasLoaded)
+ {
+ /* dll was already loaded, no need to do it for the second time */
+ return 1;
+ }
+
+ /* Assumes that the DLL will not be found */
+ iJIT_DLL_is_missing = 1;
+ FUNC_NotifyEvent = NULL;
+
+ if (m_libHandle)
+ {
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+ FreeLibrary(m_libHandle);
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+ dlclose(m_libHandle);
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+ m_libHandle = NULL;
+ }
+
+ /* Try to get the dll name from the environment */
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+ dNameLength = GetEnvironmentVariableA(NEW_DLL_ENVIRONMENT_VAR, NULL, 0);
+ if (dNameLength)
+ {
+ DWORD envret = 0;
+ dllName = (char*)malloc(sizeof(char) * (dNameLength + 1));
+ envret = GetEnvironmentVariableA(NEW_DLL_ENVIRONMENT_VAR,
+ dllName, dNameLength);
+ if (envret)
+ {
+ /* Try to load the dll from the PATH... */
+ m_libHandle = LoadLibraryExA(dllName,
+ NULL, LOAD_WITH_ALTERED_SEARCH_PATH);
+ }
+ free(dllName);
+ } else {
+ /* Try to use old VS_PROFILER variable */
+ dNameLength = GetEnvironmentVariableA(DLL_ENVIRONMENT_VAR, NULL, 0);
+ if (dNameLength)
+ {
+ DWORD envret = 0;
+ dllName = (char*)malloc(sizeof(char) * (dNameLength + 1));
+ envret = GetEnvironmentVariableA(DLL_ENVIRONMENT_VAR,
+ dllName, dNameLength);
+ if (envret)
+ {
+ /* Try to load the dll from the PATH... */
+ m_libHandle = LoadLibraryA(dllName);
+ }
+ free(dllName);
+ }
+ }
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+ dllName = getenv(NEW_DLL_ENVIRONMENT_VAR);
+ if (!dllName)
+ dllName = getenv(DLL_ENVIRONMENT_VAR);
+#ifdef ANDROID
+ if (!dllName)
+ dllName = ANDROID_JIT_AGENT_PATH;
+#endif
+ if (dllName)
+ {
+ /* Try to load the dll from the PATH... */
+ m_libHandle = dlopen(dllName, RTLD_LAZY);
+ }
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+
+ if (!m_libHandle)
+ {
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+ m_libHandle = LoadLibraryA(DEFAULT_DLLNAME);
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+ m_libHandle = dlopen(DEFAULT_DLLNAME, RTLD_LAZY);
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+ }
+
+ /* if the dll wasn't loaded - exit. */
+ if (!m_libHandle)
+ {
+ iJIT_DLL_is_missing = 1; /* don't try to initialize
+ * JIT agent the second time
+ */
+ return 0;
+ }
+
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+ FUNC_NotifyEvent = (TPNotify)GetProcAddress(m_libHandle, "NotifyEvent");
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+ FUNC_NotifyEvent = (TPNotify)(intptr_t)dlsym(m_libHandle, "NotifyEvent");
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+ if (!FUNC_NotifyEvent)
+ {
+ FUNC_Initialize = NULL;
+ return 0;
+ }
+
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+ FUNC_Initialize = (TPInitialize)GetProcAddress(m_libHandle, "Initialize");
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+ FUNC_Initialize = (TPInitialize)(intptr_t)dlsym(m_libHandle, "Initialize");
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+ if (!FUNC_Initialize)
+ {
+ FUNC_NotifyEvent = NULL;
+ return 0;
+ }
+
+ executionMode = (iJIT_IsProfilingActiveFlags)FUNC_Initialize();
+
+ bDllWasLoaded = 1;
+ iJIT_DLL_is_missing = 0; /* DLL is ok. */
+
+ /*
+ * Call Graph mode: init the thread local storage
+ * (need to store the virtual stack there).
+ */
+ if ( executionMode == iJIT_CALLGRAPH_ON )
+ {
+ /* Allocate a thread local storage slot for the thread "stack" */
+ if (!threadLocalStorageHandle)
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+ threadLocalStorageHandle = TlsAlloc();
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+ pthread_key_create(&threadLocalStorageHandle, NULL);
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+ }
+
+ return 1;
+}
+
+/*
+ * This function should be called by the user whenever a thread ends,
+ * to free the thread "virtual stack" storage
+ */
+ITT_EXTERN_C void JITAPI FinalizeThread()
+{
+ if (threadLocalStorageHandle)
+ {
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+ pThreadStack threadStack =
+ (pThreadStack)TlsGetValue (threadLocalStorageHandle);
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+ pThreadStack threadStack =
+ (pThreadStack)pthread_getspecific(threadLocalStorageHandle);
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+ if (threadStack)
+ {
+ free (threadStack);
+ threadStack = NULL;
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+ TlsSetValue (threadLocalStorageHandle, threadStack);
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+ pthread_setspecific(threadLocalStorageHandle, threadStack);
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+ }
+ }
+}
+
+/*
+ * This function should be called by the user when the process ends,
+ * to free the local storage index
+*/
+ITT_EXTERN_C void JITAPI FinalizeProcess()
+{
+ if (m_libHandle)
+ {
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+ FreeLibrary(m_libHandle);
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+ dlclose(m_libHandle);
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+ m_libHandle = NULL;
+ }
+
+ if (threadLocalStorageHandle)
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+ TlsFree (threadLocalStorageHandle);
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+ pthread_key_delete(threadLocalStorageHandle);
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+}
+
+/*
+ * This function should be called by the user for any method once.
+ * The function will return a unique method ID, the user should maintain
+ * the ID for each method
+ */
+ITT_EXTERN_C unsigned int JITAPI iJIT_GetNewMethodID()
+{
+ static unsigned int methodID = 0x100000;
+
+ if (methodID == 0)
+ return 0; /* ERROR : this is not a valid value */
+
+ return methodID++;
+}
diff --git a/llvm/lib/ExecutionEngine/IntelJITEvents/jitprofiling.h b/llvm/lib/ExecutionEngine/IntelJITEvents/jitprofiling.h
new file mode 100644
index 0000000000000..ba627b430ff12
--- /dev/null
+++ b/llvm/lib/ExecutionEngine/IntelJITEvents/jitprofiling.h
@@ -0,0 +1,258 @@
+/*===-- jitprofiling.h - JIT Profiling API-------------------------*- C -*-===*
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===----------------------------------------------------------------------===*
+ *
+ * This file provides Intel(R) Performance Analyzer JIT (Just-In-Time)
+ * Profiling API declaration.
+ *
+ * NOTE: This file comes in a style different from the rest of LLVM
+ * source base since this is a piece of code shared from Intel(R)
+ * products. Please do not reformat / re-style this code to make
+ * subsequent merges and contributions from the original source base eaiser.
+ *
+ *===----------------------------------------------------------------------===*/
+#ifndef __JITPROFILING_H__
+#define __JITPROFILING_H__
+
+/*
+ * Various constants used by functions
+ */
+
+/* event notification */
+typedef enum iJIT_jvm_event
+{
+
+ /* shutdown */
+
+ /*
+ * Program exiting EventSpecificData NA
+ */
+ iJVM_EVENT_TYPE_SHUTDOWN = 2,
+
+ /* JIT profiling */
+
+ /*
+ * issued after method code jitted into memory but before code is executed
+ * EventSpecificData is an iJIT_Method_Load
+ */
+ iJVM_EVENT_TYPE_METHOD_LOAD_FINISHED=13,
+
+ /* issued before unload. Method code will no longer be executed, but code
+ * and info are still in memory. The VTune profiler may capture method
+ * code only at this point EventSpecificData is iJIT_Method_Id
+ */
+ iJVM_EVENT_TYPE_METHOD_UNLOAD_START,
+
+ /* Method Profiling */
+
+ /* method name, Id and stack is supplied
+ * issued when a method is about to be entered EventSpecificData is
+ * iJIT_Method_NIDS
+ */
+ iJVM_EVENT_TYPE_ENTER_NIDS = 19,
+
+ /* method name, Id and stack is supplied
+ * issued when a method is about to be left EventSpecificData is
+ * iJIT_Method_NIDS
+ */
+ iJVM_EVENT_TYPE_LEAVE_NIDS
+} iJIT_JVM_EVENT;
+
+typedef enum _iJIT_ModeFlags
+{
+ /* No need to Notify VTune, since VTune is not running */
+ iJIT_NO_NOTIFICATIONS = 0x0000,
+
+ /* when turned on the jit must call
+ * iJIT_NotifyEvent
+ * (
+ * iJVM_EVENT_TYPE_METHOD_LOAD_FINISHED,
+ * )
+ * for all the method already jitted
+ */
+ iJIT_BE_NOTIFY_ON_LOAD = 0x0001,
+
+ /* when turned on the jit must call
+ * iJIT_NotifyEvent
+ * (
+ * iJVM_EVENT_TYPE_METHOD_UNLOAD_FINISHED,
+ * ) for all the method that are unloaded
+ */
+ iJIT_BE_NOTIFY_ON_UNLOAD = 0x0002,
+
+ /* when turned on the jit must instrument all
+ * the currently jited code with calls on
+ * method entries
+ */
+ iJIT_BE_NOTIFY_ON_METHOD_ENTRY = 0x0004,
+
+ /* when turned on the jit must instrument all
+ * the currently jited code with calls
+ * on method exit
+ */
+ iJIT_BE_NOTIFY_ON_METHOD_EXIT = 0x0008
+
+} iJIT_ModeFlags;
+
+
+ /* Flags used by iJIT_IsProfilingActive() */
+typedef enum _iJIT_IsProfilingActiveFlags
+{
+ /* No profiler is running. Currently not used */
+ iJIT_NOTHING_RUNNING = 0x0000,
+
+ /* Sampling is running. This is the default value
+ * returned by iJIT_IsProfilingActive()
+ */
+ iJIT_SAMPLING_ON = 0x0001,
+
+ /* Call Graph is running */
+ iJIT_CALLGRAPH_ON = 0x0002
+
+} iJIT_IsProfilingActiveFlags;
+
+/* Enumerator for the environment of methods*/
+typedef enum _iJDEnvironmentType
+{
+ iJDE_JittingAPI = 2
+} iJDEnvironmentType;
+
+/**********************************
+ * Data structures for the events *
+ **********************************/
+
+/* structure for the events:
+ * iJVM_EVENT_TYPE_METHOD_UNLOAD_START
+ */
+
+typedef struct _iJIT_Method_Id
+{
+ /* Id of the method (same as the one passed in
+ * the iJIT_Method_Load struct
+ */
+ unsigned int method_id;
+
+} *piJIT_Method_Id, iJIT_Method_Id;
+
+
+/* structure for the events:
+ * iJVM_EVENT_TYPE_ENTER_NIDS,
+ * iJVM_EVENT_TYPE_LEAVE_NIDS,
+ * iJVM_EVENT_TYPE_EXCEPTION_OCCURRED_NIDS
+ */
+
+typedef struct _iJIT_Method_NIDS
+{
+ /* unique method ID */
+ unsigned int method_id;
+
+ /* NOTE: no need to fill this field, it's filled by VTune */
+ unsigned int stack_id;
+
+ /* method name (just the method, without the class) */
+ char* method_name;
+} *piJIT_Method_NIDS, iJIT_Method_NIDS;
+
+/* structures for the events:
+ * iJVM_EVENT_TYPE_METHOD_LOAD_FINISHED
+ */
+
+typedef struct _LineNumberInfo
+{
+ /* x86 Offset from the beginning of the method*/
+ unsigned int Offset;
+
+ /* source line number from the beginning of the source file */
+ unsigned int LineNumber;
+
+} *pLineNumberInfo, LineNumberInfo;
+
+typedef struct _iJIT_Method_Load
+{
+ /* unique method ID - can be any unique value, (except 0 - 999) */
+ unsigned int method_id;
+
+ /* method name (can be with or without the class and signature, in any case
+ * the class name will be added to it)
+ */
+ char* method_name;
+
+ /* virtual address of that method - This determines the method range for the
+ * iJVM_EVENT_TYPE_ENTER/LEAVE_METHOD_ADDR events
+ */
+ void* method_load_address;
+
+ /* Size in memory - Must be exact */
+ unsigned int method_size;
+
+ /* Line Table size in number of entries - Zero if none */
+ unsigned int line_number_size;
+
+ /* Pointer to the beginning of the line numbers info array */
+ pLineNumberInfo line_number_table;
+
+ /* unique class ID */
+ unsigned int class_id;
+
+ /* class file name */
+ char* class_file_name;
+
+ /* source file name */
+ char* source_file_name;
+
+ /* bits supplied by the user for saving in the JIT file */
+ void* user_data;
+
+ /* the size of the user data buffer */
+ unsigned int user_data_size;
+
+ /* NOTE: no need to fill this field, it's filled by VTune */
+ iJDEnvironmentType env;
+
+} *piJIT_Method_Load, iJIT_Method_Load;
+
+/* API Functions */
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifndef CDECL
+# if defined WIN32 || defined _WIN32
+# define CDECL __cdecl
+# else /* defined WIN32 || defined _WIN32 */
+# if defined _M_X64 || defined _M_AMD64 || defined __x86_64__
+# define CDECL /* not actual on x86_64 platform */
+# else /* _M_X64 || _M_AMD64 || __x86_64__ */
+# define CDECL __attribute__ ((cdecl))
+# endif /* _M_X64 || _M_AMD64 || __x86_64__ */
+# endif /* defined WIN32 || defined _WIN32 */
+#endif /* CDECL */
+
+#define JITAPI CDECL
+
+/* called when the settings are changed with new settings */
+typedef void (*iJIT_ModeChangedEx)(void *UserData, iJIT_ModeFlags Flags);
+
+int JITAPI iJIT_NotifyEvent(iJIT_JVM_EVENT event_type, void *EventSpecificData);
+
+/* The new mode call back routine */
+void JITAPI iJIT_RegisterCallbackEx(void *userdata,
+ iJIT_ModeChangedEx NewModeCallBackFuncEx);
+
+iJIT_IsProfilingActiveFlags JITAPI iJIT_IsProfilingActive(void);
+
+void JITAPI FinalizeThread(void);
+
+void JITAPI FinalizeProcess(void);
+
+unsigned int JITAPI iJIT_GetNewMethodID(void);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __JITPROFILING_H__ */
diff --git a/llvm/lib/ExecutionEngine/Interpreter/Execution.cpp b/llvm/lib/ExecutionEngine/Interpreter/Execution.cpp
new file mode 100644
index 0000000000000..51f31d3d5d8f6
--- /dev/null
+++ b/llvm/lib/ExecutionEngine/Interpreter/Execution.cpp
@@ -0,0 +1,2172 @@
+//===-- Execution.cpp - Implement code to simulate the program ------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the actual instruction interpreter.
+//
+//===----------------------------------------------------------------------===//
+
+#include "Interpreter.h"
+#include "llvm/ADT/APInt.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/CodeGen/IntrinsicLowering.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/GetElementPtrTypeIterator.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/MathExtras.h"
+#include "llvm/Support/raw_ostream.h"
+#include <algorithm>
+#include <cmath>
+using namespace llvm;
+
+#define DEBUG_TYPE "interpreter"
+
+STATISTIC(NumDynamicInsts, "Number of dynamic instructions executed");
+
+static cl::opt<bool> PrintVolatile("interpreter-print-volatile", cl::Hidden,
+ cl::desc("make the interpreter print every volatile load and store"));
+
+//===----------------------------------------------------------------------===//
+// Various Helper Functions
+//===----------------------------------------------------------------------===//
+
+static void SetValue(Value *V, GenericValue Val, ExecutionContext &SF) {
+ SF.Values[V] = Val;
+}
+
+//===----------------------------------------------------------------------===//
+// Unary Instruction Implementations
+//===----------------------------------------------------------------------===//
+
+static void executeFNegInst(GenericValue &Dest, GenericValue Src, Type *Ty) {
+ switch (Ty->getTypeID()) {
+ case Type::FloatTyID:
+ Dest.FloatVal = -Src.FloatVal;
+ break;
+ case Type::DoubleTyID:
+ Dest.DoubleVal = -Src.DoubleVal;
+ break;
+ default:
+ llvm_unreachable("Unhandled type for FNeg instruction");
+ }
+}
+
+void Interpreter::visitUnaryOperator(UnaryOperator &I) {
+ ExecutionContext &SF = ECStack.back();
+ Type *Ty = I.getOperand(0)->getType();
+ GenericValue Src = getOperandValue(I.getOperand(0), SF);
+ GenericValue R; // Result
+
+ // First process vector operation
+ if (Ty->isVectorTy()) {
+ R.AggregateVal.resize(Src.AggregateVal.size());
+
+ switch(I.getOpcode()) {
+ default:
+ llvm_unreachable("Don't know how to handle this unary operator");
+ break;
+ case Instruction::FNeg:
+ if (cast<VectorType>(Ty)->getElementType()->isFloatTy()) {
+ for (unsigned i = 0; i < R.AggregateVal.size(); ++i)
+ R.AggregateVal[i].FloatVal = -Src.AggregateVal[i].FloatVal;
+ } else if (cast<VectorType>(Ty)->getElementType()->isDoubleTy()) {
+ for (unsigned i = 0; i < R.AggregateVal.size(); ++i)
+ R.AggregateVal[i].DoubleVal = -Src.AggregateVal[i].DoubleVal;
+ } else {
+ llvm_unreachable("Unhandled type for FNeg instruction");
+ }
+ break;
+ }
+ } else {
+ switch (I.getOpcode()) {
+ default:
+ llvm_unreachable("Don't know how to handle this unary operator");
+ break;
+ case Instruction::FNeg: executeFNegInst(R, Src, Ty); break;
+ }
+ }
+ SetValue(&I, R, SF);
+}
+
+//===----------------------------------------------------------------------===//
+// Binary Instruction Implementations
+//===----------------------------------------------------------------------===//
+
+#define IMPLEMENT_BINARY_OPERATOR(OP, TY) \
+ case Type::TY##TyID: \
+ Dest.TY##Val = Src1.TY##Val OP Src2.TY##Val; \
+ break
+
+static void executeFAddInst(GenericValue &Dest, GenericValue Src1,
+ GenericValue Src2, Type *Ty) {
+ switch (Ty->getTypeID()) {
+ IMPLEMENT_BINARY_OPERATOR(+, Float);
+ IMPLEMENT_BINARY_OPERATOR(+, Double);
+ default:
+ dbgs() << "Unhandled type for FAdd instruction: " << *Ty << "\n";
+ llvm_unreachable(nullptr);
+ }
+}
+
+static void executeFSubInst(GenericValue &Dest, GenericValue Src1,
+ GenericValue Src2, Type *Ty) {
+ switch (Ty->getTypeID()) {
+ IMPLEMENT_BINARY_OPERATOR(-, Float);
+ IMPLEMENT_BINARY_OPERATOR(-, Double);
+ default:
+ dbgs() << "Unhandled type for FSub instruction: " << *Ty << "\n";
+ llvm_unreachable(nullptr);
+ }
+}
+
+static void executeFMulInst(GenericValue &Dest, GenericValue Src1,
+ GenericValue Src2, Type *Ty) {
+ switch (Ty->getTypeID()) {
+ IMPLEMENT_BINARY_OPERATOR(*, Float);
+ IMPLEMENT_BINARY_OPERATOR(*, Double);
+ default:
+ dbgs() << "Unhandled type for FMul instruction: " << *Ty << "\n";
+ llvm_unreachable(nullptr);
+ }
+}
+
+static void executeFDivInst(GenericValue &Dest, GenericValue Src1,
+ GenericValue Src2, Type *Ty) {
+ switch (Ty->getTypeID()) {
+ IMPLEMENT_BINARY_OPERATOR(/, Float);
+ IMPLEMENT_BINARY_OPERATOR(/, Double);
+ default:
+ dbgs() << "Unhandled type for FDiv instruction: " << *Ty << "\n";
+ llvm_unreachable(nullptr);
+ }
+}
+
+static void executeFRemInst(GenericValue &Dest, GenericValue Src1,
+ GenericValue Src2, Type *Ty) {
+ switch (Ty->getTypeID()) {
+ case Type::FloatTyID:
+ Dest.FloatVal = fmod(Src1.FloatVal, Src2.FloatVal);
+ break;
+ case Type::DoubleTyID:
+ Dest.DoubleVal = fmod(Src1.DoubleVal, Src2.DoubleVal);
+ break;
+ default:
+ dbgs() << "Unhandled type for Rem instruction: " << *Ty << "\n";
+ llvm_unreachable(nullptr);
+ }
+}
+
+#define IMPLEMENT_INTEGER_ICMP(OP, TY) \
+ case Type::IntegerTyID: \
+ Dest.IntVal = APInt(1,Src1.IntVal.OP(Src2.IntVal)); \
+ break;
+
+#define IMPLEMENT_VECTOR_INTEGER_ICMP(OP, TY) \
+ case Type::VectorTyID: { \
+ assert(Src1.AggregateVal.size() == Src2.AggregateVal.size()); \
+ Dest.AggregateVal.resize( Src1.AggregateVal.size() ); \
+ for( uint32_t _i=0;_i<Src1.AggregateVal.size();_i++) \
+ Dest.AggregateVal[_i].IntVal = APInt(1, \
+ Src1.AggregateVal[_i].IntVal.OP(Src2.AggregateVal[_i].IntVal));\
+ } break;
+
+// Handle pointers specially because they must be compared with only as much
+// width as the host has. We _do not_ want to be comparing 64 bit values when
+// running on a 32-bit target, otherwise the upper 32 bits might mess up
+// comparisons if they contain garbage.
+#define IMPLEMENT_POINTER_ICMP(OP) \
+ case Type::PointerTyID: \
+ Dest.IntVal = APInt(1,(void*)(intptr_t)Src1.PointerVal OP \
+ (void*)(intptr_t)Src2.PointerVal); \
+ break;
+
+static GenericValue executeICMP_EQ(GenericValue Src1, GenericValue Src2,
+ Type *Ty) {
+ GenericValue Dest;
+ switch (Ty->getTypeID()) {
+ IMPLEMENT_INTEGER_ICMP(eq,Ty);
+ IMPLEMENT_VECTOR_INTEGER_ICMP(eq,Ty);
+ IMPLEMENT_POINTER_ICMP(==);
+ default:
+ dbgs() << "Unhandled type for ICMP_EQ predicate: " << *Ty << "\n";
+ llvm_unreachable(nullptr);
+ }
+ return Dest;
+}
+
+static GenericValue executeICMP_NE(GenericValue Src1, GenericValue Src2,
+ Type *Ty) {
+ GenericValue Dest;
+ switch (Ty->getTypeID()) {
+ IMPLEMENT_INTEGER_ICMP(ne,Ty);
+ IMPLEMENT_VECTOR_INTEGER_ICMP(ne,Ty);
+ IMPLEMENT_POINTER_ICMP(!=);
+ default:
+ dbgs() << "Unhandled type for ICMP_NE predicate: " << *Ty << "\n";
+ llvm_unreachable(nullptr);
+ }
+ return Dest;
+}
+
+static GenericValue executeICMP_ULT(GenericValue Src1, GenericValue Src2,
+ Type *Ty) {
+ GenericValue Dest;
+ switch (Ty->getTypeID()) {
+ IMPLEMENT_INTEGER_ICMP(ult,Ty);
+ IMPLEMENT_VECTOR_INTEGER_ICMP(ult,Ty);
+ IMPLEMENT_POINTER_ICMP(<);
+ default:
+ dbgs() << "Unhandled type for ICMP_ULT predicate: " << *Ty << "\n";
+ llvm_unreachable(nullptr);
+ }
+ return Dest;
+}
+
+static GenericValue executeICMP_SLT(GenericValue Src1, GenericValue Src2,
+ Type *Ty) {
+ GenericValue Dest;
+ switch (Ty->getTypeID()) {
+ IMPLEMENT_INTEGER_ICMP(slt,Ty);
+ IMPLEMENT_VECTOR_INTEGER_ICMP(slt,Ty);
+ IMPLEMENT_POINTER_ICMP(<);
+ default:
+ dbgs() << "Unhandled type for ICMP_SLT predicate: " << *Ty << "\n";
+ llvm_unreachable(nullptr);
+ }
+ return Dest;
+}
+
+static GenericValue executeICMP_UGT(GenericValue Src1, GenericValue Src2,
+ Type *Ty) {
+ GenericValue Dest;
+ switch (Ty->getTypeID()) {
+ IMPLEMENT_INTEGER_ICMP(ugt,Ty);
+ IMPLEMENT_VECTOR_INTEGER_ICMP(ugt,Ty);
+ IMPLEMENT_POINTER_ICMP(>);
+ default:
+ dbgs() << "Unhandled type for ICMP_UGT predicate: " << *Ty << "\n";
+ llvm_unreachable(nullptr);
+ }
+ return Dest;
+}
+
+static GenericValue executeICMP_SGT(GenericValue Src1, GenericValue Src2,
+ Type *Ty) {
+ GenericValue Dest;
+ switch (Ty->getTypeID()) {
+ IMPLEMENT_INTEGER_ICMP(sgt,Ty);
+ IMPLEMENT_VECTOR_INTEGER_ICMP(sgt,Ty);
+ IMPLEMENT_POINTER_ICMP(>);
+ default:
+ dbgs() << "Unhandled type for ICMP_SGT predicate: " << *Ty << "\n";
+ llvm_unreachable(nullptr);
+ }
+ return Dest;
+}
+
+static GenericValue executeICMP_ULE(GenericValue Src1, GenericValue Src2,
+ Type *Ty) {
+ GenericValue Dest;
+ switch (Ty->getTypeID()) {
+ IMPLEMENT_INTEGER_ICMP(ule,Ty);
+ IMPLEMENT_VECTOR_INTEGER_ICMP(ule,Ty);
+ IMPLEMENT_POINTER_ICMP(<=);
+ default:
+ dbgs() << "Unhandled type for ICMP_ULE predicate: " << *Ty << "\n";
+ llvm_unreachable(nullptr);
+ }
+ return Dest;
+}
+
+static GenericValue executeICMP_SLE(GenericValue Src1, GenericValue Src2,
+ Type *Ty) {
+ GenericValue Dest;
+ switch (Ty->getTypeID()) {
+ IMPLEMENT_INTEGER_ICMP(sle,Ty);
+ IMPLEMENT_VECTOR_INTEGER_ICMP(sle,Ty);
+ IMPLEMENT_POINTER_ICMP(<=);
+ default:
+ dbgs() << "Unhandled type for ICMP_SLE predicate: " << *Ty << "\n";
+ llvm_unreachable(nullptr);
+ }
+ return Dest;
+}
+
+static GenericValue executeICMP_UGE(GenericValue Src1, GenericValue Src2,
+ Type *Ty) {
+ GenericValue Dest;
+ switch (Ty->getTypeID()) {
+ IMPLEMENT_INTEGER_ICMP(uge,Ty);
+ IMPLEMENT_VECTOR_INTEGER_ICMP(uge,Ty);
+ IMPLEMENT_POINTER_ICMP(>=);
+ default:
+ dbgs() << "Unhandled type for ICMP_UGE predicate: " << *Ty << "\n";
+ llvm_unreachable(nullptr);
+ }
+ return Dest;
+}
+
+static GenericValue executeICMP_SGE(GenericValue Src1, GenericValue Src2,
+ Type *Ty) {
+ GenericValue Dest;
+ switch (Ty->getTypeID()) {
+ IMPLEMENT_INTEGER_ICMP(sge,Ty);
+ IMPLEMENT_VECTOR_INTEGER_ICMP(sge,Ty);
+ IMPLEMENT_POINTER_ICMP(>=);
+ default:
+ dbgs() << "Unhandled type for ICMP_SGE predicate: " << *Ty << "\n";
+ llvm_unreachable(nullptr);
+ }
+ return Dest;
+}
+
+void Interpreter::visitICmpInst(ICmpInst &I) {
+ ExecutionContext &SF = ECStack.back();
+ Type *Ty = I.getOperand(0)->getType();
+ GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
+ GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
+ GenericValue R; // Result
+
+ switch (I.getPredicate()) {
+ case ICmpInst::ICMP_EQ: R = executeICMP_EQ(Src1, Src2, Ty); break;
+ case ICmpInst::ICMP_NE: R = executeICMP_NE(Src1, Src2, Ty); break;
+ case ICmpInst::ICMP_ULT: R = executeICMP_ULT(Src1, Src2, Ty); break;
+ case ICmpInst::ICMP_SLT: R = executeICMP_SLT(Src1, Src2, Ty); break;
+ case ICmpInst::ICMP_UGT: R = executeICMP_UGT(Src1, Src2, Ty); break;
+ case ICmpInst::ICMP_SGT: R = executeICMP_SGT(Src1, Src2, Ty); break;
+ case ICmpInst::ICMP_ULE: R = executeICMP_ULE(Src1, Src2, Ty); break;
+ case ICmpInst::ICMP_SLE: R = executeICMP_SLE(Src1, Src2, Ty); break;
+ case ICmpInst::ICMP_UGE: R = executeICMP_UGE(Src1, Src2, Ty); break;
+ case ICmpInst::ICMP_SGE: R = executeICMP_SGE(Src1, Src2, Ty); break;
+ default:
+ dbgs() << "Don't know how to handle this ICmp predicate!\n-->" << I;
+ llvm_unreachable(nullptr);
+ }
+
+ SetValue(&I, R, SF);
+}
+
+#define IMPLEMENT_FCMP(OP, TY) \
+ case Type::TY##TyID: \
+ Dest.IntVal = APInt(1,Src1.TY##Val OP Src2.TY##Val); \
+ break
+
+#define IMPLEMENT_VECTOR_FCMP_T(OP, TY) \
+ assert(Src1.AggregateVal.size() == Src2.AggregateVal.size()); \
+ Dest.AggregateVal.resize( Src1.AggregateVal.size() ); \
+ for( uint32_t _i=0;_i<Src1.AggregateVal.size();_i++) \
+ Dest.AggregateVal[_i].IntVal = APInt(1, \
+ Src1.AggregateVal[_i].TY##Val OP Src2.AggregateVal[_i].TY##Val);\
+ break;
+
+#define IMPLEMENT_VECTOR_FCMP(OP) \
+ case Type::VectorTyID: \
+ if (cast<VectorType>(Ty)->getElementType()->isFloatTy()) { \
+ IMPLEMENT_VECTOR_FCMP_T(OP, Float); \
+ } else { \
+ IMPLEMENT_VECTOR_FCMP_T(OP, Double); \
+ }
+
+static GenericValue executeFCMP_OEQ(GenericValue Src1, GenericValue Src2,
+ Type *Ty) {
+ GenericValue Dest;
+ switch (Ty->getTypeID()) {
+ IMPLEMENT_FCMP(==, Float);
+ IMPLEMENT_FCMP(==, Double);
+ IMPLEMENT_VECTOR_FCMP(==);
+ default:
+ dbgs() << "Unhandled type for FCmp EQ instruction: " << *Ty << "\n";
+ llvm_unreachable(nullptr);
+ }
+ return Dest;
+}
+
+#define IMPLEMENT_SCALAR_NANS(TY, X,Y) \
+ if (TY->isFloatTy()) { \
+ if (X.FloatVal != X.FloatVal || Y.FloatVal != Y.FloatVal) { \
+ Dest.IntVal = APInt(1,false); \
+ return Dest; \
+ } \
+ } else { \
+ if (X.DoubleVal != X.DoubleVal || Y.DoubleVal != Y.DoubleVal) { \
+ Dest.IntVal = APInt(1,false); \
+ return Dest; \
+ } \
+ }
+
+#define MASK_VECTOR_NANS_T(X,Y, TZ, FLAG) \
+ assert(X.AggregateVal.size() == Y.AggregateVal.size()); \
+ Dest.AggregateVal.resize( X.AggregateVal.size() ); \
+ for( uint32_t _i=0;_i<X.AggregateVal.size();_i++) { \
+ if (X.AggregateVal[_i].TZ##Val != X.AggregateVal[_i].TZ##Val || \
+ Y.AggregateVal[_i].TZ##Val != Y.AggregateVal[_i].TZ##Val) \
+ Dest.AggregateVal[_i].IntVal = APInt(1,FLAG); \
+ else { \
+ Dest.AggregateVal[_i].IntVal = APInt(1,!FLAG); \
+ } \
+ }
+
+#define MASK_VECTOR_NANS(TY, X,Y, FLAG) \
+ if (TY->isVectorTy()) { \
+ if (cast<VectorType>(TY)->getElementType()->isFloatTy()) { \
+ MASK_VECTOR_NANS_T(X, Y, Float, FLAG) \
+ } else { \
+ MASK_VECTOR_NANS_T(X, Y, Double, FLAG) \
+ } \
+ } \
+
+
+
+static GenericValue executeFCMP_ONE(GenericValue Src1, GenericValue Src2,
+ Type *Ty)
+{
+ GenericValue Dest;
+ // if input is scalar value and Src1 or Src2 is NaN return false
+ IMPLEMENT_SCALAR_NANS(Ty, Src1, Src2)
+ // if vector input detect NaNs and fill mask
+ MASK_VECTOR_NANS(Ty, Src1, Src2, false)
+ GenericValue DestMask = Dest;
+ switch (Ty->getTypeID()) {
+ IMPLEMENT_FCMP(!=, Float);
+ IMPLEMENT_FCMP(!=, Double);
+ IMPLEMENT_VECTOR_FCMP(!=);
+ default:
+ dbgs() << "Unhandled type for FCmp NE instruction: " << *Ty << "\n";
+ llvm_unreachable(nullptr);
+ }
+ // in vector case mask out NaN elements
+ if (Ty->isVectorTy())
+ for( size_t _i=0; _i<Src1.AggregateVal.size(); _i++)
+ if (DestMask.AggregateVal[_i].IntVal == false)
+ Dest.AggregateVal[_i].IntVal = APInt(1,false);
+
+ return Dest;
+}
+
+static GenericValue executeFCMP_OLE(GenericValue Src1, GenericValue Src2,
+ Type *Ty) {
+ GenericValue Dest;
+ switch (Ty->getTypeID()) {
+ IMPLEMENT_FCMP(<=, Float);
+ IMPLEMENT_FCMP(<=, Double);
+ IMPLEMENT_VECTOR_FCMP(<=);
+ default:
+ dbgs() << "Unhandled type for FCmp LE instruction: " << *Ty << "\n";
+ llvm_unreachable(nullptr);
+ }
+ return Dest;
+}
+
+static GenericValue executeFCMP_OGE(GenericValue Src1, GenericValue Src2,
+ Type *Ty) {
+ GenericValue Dest;
+ switch (Ty->getTypeID()) {
+ IMPLEMENT_FCMP(>=, Float);
+ IMPLEMENT_FCMP(>=, Double);
+ IMPLEMENT_VECTOR_FCMP(>=);
+ default:
+ dbgs() << "Unhandled type for FCmp GE instruction: " << *Ty << "\n";
+ llvm_unreachable(nullptr);
+ }
+ return Dest;
+}
+
+static GenericValue executeFCMP_OLT(GenericValue Src1, GenericValue Src2,
+ Type *Ty) {
+ GenericValue Dest;
+ switch (Ty->getTypeID()) {
+ IMPLEMENT_FCMP(<, Float);
+ IMPLEMENT_FCMP(<, Double);
+ IMPLEMENT_VECTOR_FCMP(<);
+ default:
+ dbgs() << "Unhandled type for FCmp LT instruction: " << *Ty << "\n";
+ llvm_unreachable(nullptr);
+ }
+ return Dest;
+}
+
+static GenericValue executeFCMP_OGT(GenericValue Src1, GenericValue Src2,
+ Type *Ty) {
+ GenericValue Dest;
+ switch (Ty->getTypeID()) {
+ IMPLEMENT_FCMP(>, Float);
+ IMPLEMENT_FCMP(>, Double);
+ IMPLEMENT_VECTOR_FCMP(>);
+ default:
+ dbgs() << "Unhandled type for FCmp GT instruction: " << *Ty << "\n";
+ llvm_unreachable(nullptr);
+ }
+ return Dest;
+}
+
+#define IMPLEMENT_UNORDERED(TY, X,Y) \
+ if (TY->isFloatTy()) { \
+ if (X.FloatVal != X.FloatVal || Y.FloatVal != Y.FloatVal) { \
+ Dest.IntVal = APInt(1,true); \
+ return Dest; \
+ } \
+ } else if (X.DoubleVal != X.DoubleVal || Y.DoubleVal != Y.DoubleVal) { \
+ Dest.IntVal = APInt(1,true); \
+ return Dest; \
+ }
+
+#define IMPLEMENT_VECTOR_UNORDERED(TY, X, Y, FUNC) \
+ if (TY->isVectorTy()) { \
+ GenericValue DestMask = Dest; \
+ Dest = FUNC(Src1, Src2, Ty); \
+ for (size_t _i = 0; _i < Src1.AggregateVal.size(); _i++) \
+ if (DestMask.AggregateVal[_i].IntVal == true) \
+ Dest.AggregateVal[_i].IntVal = APInt(1, true); \
+ return Dest; \
+ }
+
+static GenericValue executeFCMP_UEQ(GenericValue Src1, GenericValue Src2,
+ Type *Ty) {
+ GenericValue Dest;
+ IMPLEMENT_UNORDERED(Ty, Src1, Src2)
+ MASK_VECTOR_NANS(Ty, Src1, Src2, true)
+ IMPLEMENT_VECTOR_UNORDERED(Ty, Src1, Src2, executeFCMP_OEQ)
+ return executeFCMP_OEQ(Src1, Src2, Ty);
+
+}
+
+static GenericValue executeFCMP_UNE(GenericValue Src1, GenericValue Src2,
+ Type *Ty) {
+ GenericValue Dest;
+ IMPLEMENT_UNORDERED(Ty, Src1, Src2)
+ MASK_VECTOR_NANS(Ty, Src1, Src2, true)
+ IMPLEMENT_VECTOR_UNORDERED(Ty, Src1, Src2, executeFCMP_ONE)
+ return executeFCMP_ONE(Src1, Src2, Ty);
+}
+
+static GenericValue executeFCMP_ULE(GenericValue Src1, GenericValue Src2,
+ Type *Ty) {
+ GenericValue Dest;
+ IMPLEMENT_UNORDERED(Ty, Src1, Src2)
+ MASK_VECTOR_NANS(Ty, Src1, Src2, true)
+ IMPLEMENT_VECTOR_UNORDERED(Ty, Src1, Src2, executeFCMP_OLE)
+ return executeFCMP_OLE(Src1, Src2, Ty);
+}
+
+static GenericValue executeFCMP_UGE(GenericValue Src1, GenericValue Src2,
+ Type *Ty) {
+ GenericValue Dest;
+ IMPLEMENT_UNORDERED(Ty, Src1, Src2)
+ MASK_VECTOR_NANS(Ty, Src1, Src2, true)
+ IMPLEMENT_VECTOR_UNORDERED(Ty, Src1, Src2, executeFCMP_OGE)
+ return executeFCMP_OGE(Src1, Src2, Ty);
+}
+
+static GenericValue executeFCMP_ULT(GenericValue Src1, GenericValue Src2,
+ Type *Ty) {
+ GenericValue Dest;
+ IMPLEMENT_UNORDERED(Ty, Src1, Src2)
+ MASK_VECTOR_NANS(Ty, Src1, Src2, true)
+ IMPLEMENT_VECTOR_UNORDERED(Ty, Src1, Src2, executeFCMP_OLT)
+ return executeFCMP_OLT(Src1, Src2, Ty);
+}
+
+static GenericValue executeFCMP_UGT(GenericValue Src1, GenericValue Src2,
+ Type *Ty) {
+ GenericValue Dest;
+ IMPLEMENT_UNORDERED(Ty, Src1, Src2)
+ MASK_VECTOR_NANS(Ty, Src1, Src2, true)
+ IMPLEMENT_VECTOR_UNORDERED(Ty, Src1, Src2, executeFCMP_OGT)
+ return executeFCMP_OGT(Src1, Src2, Ty);
+}
+
+static GenericValue executeFCMP_ORD(GenericValue Src1, GenericValue Src2,
+ Type *Ty) {
+ GenericValue Dest;
+ if(Ty->isVectorTy()) {
+ assert(Src1.AggregateVal.size() == Src2.AggregateVal.size());
+ Dest.AggregateVal.resize( Src1.AggregateVal.size() );
+ if (cast<VectorType>(Ty)->getElementType()->isFloatTy()) {
+ for( size_t _i=0;_i<Src1.AggregateVal.size();_i++)
+ Dest.AggregateVal[_i].IntVal = APInt(1,
+ ( (Src1.AggregateVal[_i].FloatVal ==
+ Src1.AggregateVal[_i].FloatVal) &&
+ (Src2.AggregateVal[_i].FloatVal ==
+ Src2.AggregateVal[_i].FloatVal)));
+ } else {
+ for( size_t _i=0;_i<Src1.AggregateVal.size();_i++)
+ Dest.AggregateVal[_i].IntVal = APInt(1,
+ ( (Src1.AggregateVal[_i].DoubleVal ==
+ Src1.AggregateVal[_i].DoubleVal) &&
+ (Src2.AggregateVal[_i].DoubleVal ==
+ Src2.AggregateVal[_i].DoubleVal)));
+ }
+ } else if (Ty->isFloatTy())
+ Dest.IntVal = APInt(1,(Src1.FloatVal == Src1.FloatVal &&
+ Src2.FloatVal == Src2.FloatVal));
+ else {
+ Dest.IntVal = APInt(1,(Src1.DoubleVal == Src1.DoubleVal &&
+ Src2.DoubleVal == Src2.DoubleVal));
+ }
+ return Dest;
+}
+
+static GenericValue executeFCMP_UNO(GenericValue Src1, GenericValue Src2,
+ Type *Ty) {
+ GenericValue Dest;
+ if(Ty->isVectorTy()) {
+ assert(Src1.AggregateVal.size() == Src2.AggregateVal.size());
+ Dest.AggregateVal.resize( Src1.AggregateVal.size() );
+ if (cast<VectorType>(Ty)->getElementType()->isFloatTy()) {
+ for( size_t _i=0;_i<Src1.AggregateVal.size();_i++)
+ Dest.AggregateVal[_i].IntVal = APInt(1,
+ ( (Src1.AggregateVal[_i].FloatVal !=
+ Src1.AggregateVal[_i].FloatVal) ||
+ (Src2.AggregateVal[_i].FloatVal !=
+ Src2.AggregateVal[_i].FloatVal)));
+ } else {
+ for( size_t _i=0;_i<Src1.AggregateVal.size();_i++)
+ Dest.AggregateVal[_i].IntVal = APInt(1,
+ ( (Src1.AggregateVal[_i].DoubleVal !=
+ Src1.AggregateVal[_i].DoubleVal) ||
+ (Src2.AggregateVal[_i].DoubleVal !=
+ Src2.AggregateVal[_i].DoubleVal)));
+ }
+ } else if (Ty->isFloatTy())
+ Dest.IntVal = APInt(1,(Src1.FloatVal != Src1.FloatVal ||
+ Src2.FloatVal != Src2.FloatVal));
+ else {
+ Dest.IntVal = APInt(1,(Src1.DoubleVal != Src1.DoubleVal ||
+ Src2.DoubleVal != Src2.DoubleVal));
+ }
+ return Dest;
+}
+
+static GenericValue executeFCMP_BOOL(GenericValue Src1, GenericValue Src2,
+ Type *Ty, const bool val) {
+ GenericValue Dest;
+ if(Ty->isVectorTy()) {
+ assert(Src1.AggregateVal.size() == Src2.AggregateVal.size());
+ Dest.AggregateVal.resize( Src1.AggregateVal.size() );
+ for( size_t _i=0; _i<Src1.AggregateVal.size(); _i++)
+ Dest.AggregateVal[_i].IntVal = APInt(1,val);
+ } else {
+ Dest.IntVal = APInt(1, val);
+ }
+
+ return Dest;
+}
+
+void Interpreter::visitFCmpInst(FCmpInst &I) {
+ ExecutionContext &SF = ECStack.back();
+ Type *Ty = I.getOperand(0)->getType();
+ GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
+ GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
+ GenericValue R; // Result
+
+ switch (I.getPredicate()) {
+ default:
+ dbgs() << "Don't know how to handle this FCmp predicate!\n-->" << I;
+ llvm_unreachable(nullptr);
+ break;
+ case FCmpInst::FCMP_FALSE: R = executeFCMP_BOOL(Src1, Src2, Ty, false);
+ break;
+ case FCmpInst::FCMP_TRUE: R = executeFCMP_BOOL(Src1, Src2, Ty, true);
+ break;
+ case FCmpInst::FCMP_ORD: R = executeFCMP_ORD(Src1, Src2, Ty); break;
+ case FCmpInst::FCMP_UNO: R = executeFCMP_UNO(Src1, Src2, Ty); break;
+ case FCmpInst::FCMP_UEQ: R = executeFCMP_UEQ(Src1, Src2, Ty); break;
+ case FCmpInst::FCMP_OEQ: R = executeFCMP_OEQ(Src1, Src2, Ty); break;
+ case FCmpInst::FCMP_UNE: R = executeFCMP_UNE(Src1, Src2, Ty); break;
+ case FCmpInst::FCMP_ONE: R = executeFCMP_ONE(Src1, Src2, Ty); break;
+ case FCmpInst::FCMP_ULT: R = executeFCMP_ULT(Src1, Src2, Ty); break;
+ case FCmpInst::FCMP_OLT: R = executeFCMP_OLT(Src1, Src2, Ty); break;
+ case FCmpInst::FCMP_UGT: R = executeFCMP_UGT(Src1, Src2, Ty); break;
+ case FCmpInst::FCMP_OGT: R = executeFCMP_OGT(Src1, Src2, Ty); break;
+ case FCmpInst::FCMP_ULE: R = executeFCMP_ULE(Src1, Src2, Ty); break;
+ case FCmpInst::FCMP_OLE: R = executeFCMP_OLE(Src1, Src2, Ty); break;
+ case FCmpInst::FCMP_UGE: R = executeFCMP_UGE(Src1, Src2, Ty); break;
+ case FCmpInst::FCMP_OGE: R = executeFCMP_OGE(Src1, Src2, Ty); break;
+ }
+
+ SetValue(&I, R, SF);
+}
+
+static GenericValue executeCmpInst(unsigned predicate, GenericValue Src1,
+ GenericValue Src2, Type *Ty) {
+ GenericValue Result;
+ switch (predicate) {
+ case ICmpInst::ICMP_EQ: return executeICMP_EQ(Src1, Src2, Ty);
+ case ICmpInst::ICMP_NE: return executeICMP_NE(Src1, Src2, Ty);
+ case ICmpInst::ICMP_UGT: return executeICMP_UGT(Src1, Src2, Ty);
+ case ICmpInst::ICMP_SGT: return executeICMP_SGT(Src1, Src2, Ty);
+ case ICmpInst::ICMP_ULT: return executeICMP_ULT(Src1, Src2, Ty);
+ case ICmpInst::ICMP_SLT: return executeICMP_SLT(Src1, Src2, Ty);
+ case ICmpInst::ICMP_UGE: return executeICMP_UGE(Src1, Src2, Ty);
+ case ICmpInst::ICMP_SGE: return executeICMP_SGE(Src1, Src2, Ty);
+ case ICmpInst::ICMP_ULE: return executeICMP_ULE(Src1, Src2, Ty);
+ case ICmpInst::ICMP_SLE: return executeICMP_SLE(Src1, Src2, Ty);
+ case FCmpInst::FCMP_ORD: return executeFCMP_ORD(Src1, Src2, Ty);
+ case FCmpInst::FCMP_UNO: return executeFCMP_UNO(Src1, Src2, Ty);
+ case FCmpInst::FCMP_OEQ: return executeFCMP_OEQ(Src1, Src2, Ty);
+ case FCmpInst::FCMP_UEQ: return executeFCMP_UEQ(Src1, Src2, Ty);
+ case FCmpInst::FCMP_ONE: return executeFCMP_ONE(Src1, Src2, Ty);
+ case FCmpInst::FCMP_UNE: return executeFCMP_UNE(Src1, Src2, Ty);
+ case FCmpInst::FCMP_OLT: return executeFCMP_OLT(Src1, Src2, Ty);
+ case FCmpInst::FCMP_ULT: return executeFCMP_ULT(Src1, Src2, Ty);
+ case FCmpInst::FCMP_OGT: return executeFCMP_OGT(Src1, Src2, Ty);
+ case FCmpInst::FCMP_UGT: return executeFCMP_UGT(Src1, Src2, Ty);
+ case FCmpInst::FCMP_OLE: return executeFCMP_OLE(Src1, Src2, Ty);
+ case FCmpInst::FCMP_ULE: return executeFCMP_ULE(Src1, Src2, Ty);
+ case FCmpInst::FCMP_OGE: return executeFCMP_OGE(Src1, Src2, Ty);
+ case FCmpInst::FCMP_UGE: return executeFCMP_UGE(Src1, Src2, Ty);
+ case FCmpInst::FCMP_FALSE: return executeFCMP_BOOL(Src1, Src2, Ty, false);
+ case FCmpInst::FCMP_TRUE: return executeFCMP_BOOL(Src1, Src2, Ty, true);
+ default:
+ dbgs() << "Unhandled Cmp predicate\n";
+ llvm_unreachable(nullptr);
+ }
+}
+
+void Interpreter::visitBinaryOperator(BinaryOperator &I) {
+ ExecutionContext &SF = ECStack.back();
+ Type *Ty = I.getOperand(0)->getType();
+ GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
+ GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
+ GenericValue R; // Result
+
+ // First process vector operation
+ if (Ty->isVectorTy()) {
+ assert(Src1.AggregateVal.size() == Src2.AggregateVal.size());
+ R.AggregateVal.resize(Src1.AggregateVal.size());
+
+ // Macros to execute binary operation 'OP' over integer vectors
+#define INTEGER_VECTOR_OPERATION(OP) \
+ for (unsigned i = 0; i < R.AggregateVal.size(); ++i) \
+ R.AggregateVal[i].IntVal = \
+ Src1.AggregateVal[i].IntVal OP Src2.AggregateVal[i].IntVal;
+
+ // Additional macros to execute binary operations udiv/sdiv/urem/srem since
+ // they have different notation.
+#define INTEGER_VECTOR_FUNCTION(OP) \
+ for (unsigned i = 0; i < R.AggregateVal.size(); ++i) \
+ R.AggregateVal[i].IntVal = \
+ Src1.AggregateVal[i].IntVal.OP(Src2.AggregateVal[i].IntVal);
+
+ // Macros to execute binary operation 'OP' over floating point type TY
+ // (float or double) vectors
+#define FLOAT_VECTOR_FUNCTION(OP, TY) \
+ for (unsigned i = 0; i < R.AggregateVal.size(); ++i) \
+ R.AggregateVal[i].TY = \
+ Src1.AggregateVal[i].TY OP Src2.AggregateVal[i].TY;
+
+ // Macros to choose appropriate TY: float or double and run operation
+ // execution
+#define FLOAT_VECTOR_OP(OP) { \
+ if (cast<VectorType>(Ty)->getElementType()->isFloatTy()) \
+ FLOAT_VECTOR_FUNCTION(OP, FloatVal) \
+ else { \
+ if (cast<VectorType>(Ty)->getElementType()->isDoubleTy()) \
+ FLOAT_VECTOR_FUNCTION(OP, DoubleVal) \
+ else { \
+ dbgs() << "Unhandled type for OP instruction: " << *Ty << "\n"; \
+ llvm_unreachable(0); \
+ } \
+ } \
+}
+
+ switch(I.getOpcode()){
+ default:
+ dbgs() << "Don't know how to handle this binary operator!\n-->" << I;
+ llvm_unreachable(nullptr);
+ break;
+ case Instruction::Add: INTEGER_VECTOR_OPERATION(+) break;
+ case Instruction::Sub: INTEGER_VECTOR_OPERATION(-) break;
+ case Instruction::Mul: INTEGER_VECTOR_OPERATION(*) break;
+ case Instruction::UDiv: INTEGER_VECTOR_FUNCTION(udiv) break;
+ case Instruction::SDiv: INTEGER_VECTOR_FUNCTION(sdiv) break;
+ case Instruction::URem: INTEGER_VECTOR_FUNCTION(urem) break;
+ case Instruction::SRem: INTEGER_VECTOR_FUNCTION(srem) break;
+ case Instruction::And: INTEGER_VECTOR_OPERATION(&) break;
+ case Instruction::Or: INTEGER_VECTOR_OPERATION(|) break;
+ case Instruction::Xor: INTEGER_VECTOR_OPERATION(^) break;
+ case Instruction::FAdd: FLOAT_VECTOR_OP(+) break;
+ case Instruction::FSub: FLOAT_VECTOR_OP(-) break;
+ case Instruction::FMul: FLOAT_VECTOR_OP(*) break;
+ case Instruction::FDiv: FLOAT_VECTOR_OP(/) break;
+ case Instruction::FRem:
+ if (cast<VectorType>(Ty)->getElementType()->isFloatTy())
+ for (unsigned i = 0; i < R.AggregateVal.size(); ++i)
+ R.AggregateVal[i].FloatVal =
+ fmod(Src1.AggregateVal[i].FloatVal, Src2.AggregateVal[i].FloatVal);
+ else {
+ if (cast<VectorType>(Ty)->getElementType()->isDoubleTy())
+ for (unsigned i = 0; i < R.AggregateVal.size(); ++i)
+ R.AggregateVal[i].DoubleVal =
+ fmod(Src1.AggregateVal[i].DoubleVal, Src2.AggregateVal[i].DoubleVal);
+ else {
+ dbgs() << "Unhandled type for Rem instruction: " << *Ty << "\n";
+ llvm_unreachable(nullptr);
+ }
+ }
+ break;
+ }
+ } else {
+ switch (I.getOpcode()) {
+ default:
+ dbgs() << "Don't know how to handle this binary operator!\n-->" << I;
+ llvm_unreachable(nullptr);
+ break;
+ case Instruction::Add: R.IntVal = Src1.IntVal + Src2.IntVal; break;
+ case Instruction::Sub: R.IntVal = Src1.IntVal - Src2.IntVal; break;
+ case Instruction::Mul: R.IntVal = Src1.IntVal * Src2.IntVal; break;
+ case Instruction::FAdd: executeFAddInst(R, Src1, Src2, Ty); break;
+ case Instruction::FSub: executeFSubInst(R, Src1, Src2, Ty); break;
+ case Instruction::FMul: executeFMulInst(R, Src1, Src2, Ty); break;
+ case Instruction::FDiv: executeFDivInst(R, Src1, Src2, Ty); break;
+ case Instruction::FRem: executeFRemInst(R, Src1, Src2, Ty); break;
+ case Instruction::UDiv: R.IntVal = Src1.IntVal.udiv(Src2.IntVal); break;
+ case Instruction::SDiv: R.IntVal = Src1.IntVal.sdiv(Src2.IntVal); break;
+ case Instruction::URem: R.IntVal = Src1.IntVal.urem(Src2.IntVal); break;
+ case Instruction::SRem: R.IntVal = Src1.IntVal.srem(Src2.IntVal); break;
+ case Instruction::And: R.IntVal = Src1.IntVal & Src2.IntVal; break;
+ case Instruction::Or: R.IntVal = Src1.IntVal | Src2.IntVal; break;
+ case Instruction::Xor: R.IntVal = Src1.IntVal ^ Src2.IntVal; break;
+ }
+ }
+ SetValue(&I, R, SF);
+}
+
+static GenericValue executeSelectInst(GenericValue Src1, GenericValue Src2,
+ GenericValue Src3, Type *Ty) {
+ GenericValue Dest;
+ if(Ty->isVectorTy()) {
+ assert(Src1.AggregateVal.size() == Src2.AggregateVal.size());
+ assert(Src2.AggregateVal.size() == Src3.AggregateVal.size());
+ Dest.AggregateVal.resize( Src1.AggregateVal.size() );
+ for (size_t i = 0; i < Src1.AggregateVal.size(); ++i)
+ Dest.AggregateVal[i] = (Src1.AggregateVal[i].IntVal == 0) ?
+ Src3.AggregateVal[i] : Src2.AggregateVal[i];
+ } else {
+ Dest = (Src1.IntVal == 0) ? Src3 : Src2;
+ }
+ return Dest;
+}
+
+void Interpreter::visitSelectInst(SelectInst &I) {
+ ExecutionContext &SF = ECStack.back();
+ Type * Ty = I.getOperand(0)->getType();
+ GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
+ GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
+ GenericValue Src3 = getOperandValue(I.getOperand(2), SF);
+ GenericValue R = executeSelectInst(Src1, Src2, Src3, Ty);
+ SetValue(&I, R, SF);
+}
+
+//===----------------------------------------------------------------------===//
+// Terminator Instruction Implementations
+//===----------------------------------------------------------------------===//
+
+void Interpreter::exitCalled(GenericValue GV) {
+ // runAtExitHandlers() assumes there are no stack frames, but
+ // if exit() was called, then it had a stack frame. Blow away
+ // the stack before interpreting atexit handlers.
+ ECStack.clear();
+ runAtExitHandlers();
+ exit(GV.IntVal.zextOrTrunc(32).getZExtValue());
+}
+
+/// Pop the last stack frame off of ECStack and then copy the result
+/// back into the result variable if we are not returning void. The
+/// result variable may be the ExitValue, or the Value of the calling
+/// CallInst if there was a previous stack frame. This method may
+/// invalidate any ECStack iterators you have. This method also takes
+/// care of switching to the normal destination BB, if we are returning
+/// from an invoke.
+///
+void Interpreter::popStackAndReturnValueToCaller(Type *RetTy,
+ GenericValue Result) {
+ // Pop the current stack frame.
+ ECStack.pop_back();
+
+ if (ECStack.empty()) { // Finished main. Put result into exit code...
+ if (RetTy && !RetTy->isVoidTy()) { // Nonvoid return type?
+ ExitValue = Result; // Capture the exit value of the program
+ } else {
+ memset(&ExitValue.Untyped, 0, sizeof(ExitValue.Untyped));
+ }
+ } else {
+ // If we have a previous stack frame, and we have a previous call,
+ // fill in the return value...
+ ExecutionContext &CallingSF = ECStack.back();
+ if (Instruction *I = CallingSF.Caller.getInstruction()) {
+ // Save result...
+ if (!CallingSF.Caller.getType()->isVoidTy())
+ SetValue(I, Result, CallingSF);
+ if (InvokeInst *II = dyn_cast<InvokeInst> (I))
+ SwitchToNewBasicBlock (II->getNormalDest (), CallingSF);
+ CallingSF.Caller = CallSite(); // We returned from the call...
+ }
+ }
+}
+
+void Interpreter::visitReturnInst(ReturnInst &I) {
+ ExecutionContext &SF = ECStack.back();
+ Type *RetTy = Type::getVoidTy(I.getContext());
+ GenericValue Result;
+
+ // Save away the return value... (if we are not 'ret void')
+ if (I.getNumOperands()) {
+ RetTy = I.getReturnValue()->getType();
+ Result = getOperandValue(I.getReturnValue(), SF);
+ }
+
+ popStackAndReturnValueToCaller(RetTy, Result);
+}
+
+void Interpreter::visitUnreachableInst(UnreachableInst &I) {
+ report_fatal_error("Program executed an 'unreachable' instruction!");
+}
+
+void Interpreter::visitBranchInst(BranchInst &I) {
+ ExecutionContext &SF = ECStack.back();
+ BasicBlock *Dest;
+
+ Dest = I.getSuccessor(0); // Uncond branches have a fixed dest...
+ if (!I.isUnconditional()) {
+ Value *Cond = I.getCondition();
+ if (getOperandValue(Cond, SF).IntVal == 0) // If false cond...
+ Dest = I.getSuccessor(1);
+ }
+ SwitchToNewBasicBlock(Dest, SF);
+}
+
+void Interpreter::visitSwitchInst(SwitchInst &I) {
+ ExecutionContext &SF = ECStack.back();
+ Value* Cond = I.getCondition();
+ Type *ElTy = Cond->getType();
+ GenericValue CondVal = getOperandValue(Cond, SF);
+
+ // Check to see if any of the cases match...
+ BasicBlock *Dest = nullptr;
+ for (auto Case : I.cases()) {
+ GenericValue CaseVal = getOperandValue(Case.getCaseValue(), SF);
+ if (executeICMP_EQ(CondVal, CaseVal, ElTy).IntVal != 0) {
+ Dest = cast<BasicBlock>(Case.getCaseSuccessor());
+ break;
+ }
+ }
+ if (!Dest) Dest = I.getDefaultDest(); // No cases matched: use default
+ SwitchToNewBasicBlock(Dest, SF);
+}
+
+void Interpreter::visitIndirectBrInst(IndirectBrInst &I) {
+ ExecutionContext &SF = ECStack.back();
+ void *Dest = GVTOP(getOperandValue(I.getAddress(), SF));
+ SwitchToNewBasicBlock((BasicBlock*)Dest, SF);
+}
+
+
+// SwitchToNewBasicBlock - This method is used to jump to a new basic block.
+// This function handles the actual updating of block and instruction iterators
+// as well as execution of all of the PHI nodes in the destination block.
+//
+// This method does this because all of the PHI nodes must be executed
+// atomically, reading their inputs before any of the results are updated. Not
+// doing this can cause problems if the PHI nodes depend on other PHI nodes for
+// their inputs. If the input PHI node is updated before it is read, incorrect
+// results can happen. Thus we use a two phase approach.
+//
+void Interpreter::SwitchToNewBasicBlock(BasicBlock *Dest, ExecutionContext &SF){
+ BasicBlock *PrevBB = SF.CurBB; // Remember where we came from...
+ SF.CurBB = Dest; // Update CurBB to branch destination
+ SF.CurInst = SF.CurBB->begin(); // Update new instruction ptr...
+
+ if (!isa<PHINode>(SF.CurInst)) return; // Nothing fancy to do
+
+ // Loop over all of the PHI nodes in the current block, reading their inputs.
+ std::vector<GenericValue> ResultValues;
+
+ for (; PHINode *PN = dyn_cast<PHINode>(SF.CurInst); ++SF.CurInst) {
+ // Search for the value corresponding to this previous bb...
+ int i = PN->getBasicBlockIndex(PrevBB);
+ assert(i != -1 && "PHINode doesn't contain entry for predecessor??");
+ Value *IncomingValue = PN->getIncomingValue(i);
+
+ // Save the incoming value for this PHI node...
+ ResultValues.push_back(getOperandValue(IncomingValue, SF));
+ }
+
+ // Now loop over all of the PHI nodes setting their values...
+ SF.CurInst = SF.CurBB->begin();
+ for (unsigned i = 0; isa<PHINode>(SF.CurInst); ++SF.CurInst, ++i) {
+ PHINode *PN = cast<PHINode>(SF.CurInst);
+ SetValue(PN, ResultValues[i], SF);
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// Memory Instruction Implementations
+//===----------------------------------------------------------------------===//
+
+void Interpreter::visitAllocaInst(AllocaInst &I) {
+ ExecutionContext &SF = ECStack.back();
+
+ Type *Ty = I.getType()->getElementType(); // Type to be allocated
+
+ // Get the number of elements being allocated by the array...
+ unsigned NumElements =
+ getOperandValue(I.getOperand(0), SF).IntVal.getZExtValue();
+
+ unsigned TypeSize = (size_t)getDataLayout().getTypeAllocSize(Ty);
+
+ // Avoid malloc-ing zero bytes, use max()...
+ unsigned MemToAlloc = std::max(1U, NumElements * TypeSize);
+
+ // Allocate enough memory to hold the type...
+ void *Memory = safe_malloc(MemToAlloc);
+
+ LLVM_DEBUG(dbgs() << "Allocated Type: " << *Ty << " (" << TypeSize
+ << " bytes) x " << NumElements << " (Total: " << MemToAlloc
+ << ") at " << uintptr_t(Memory) << '\n');
+
+ GenericValue Result = PTOGV(Memory);
+ assert(Result.PointerVal && "Null pointer returned by malloc!");
+ SetValue(&I, Result, SF);
+
+ if (I.getOpcode() == Instruction::Alloca)
+ ECStack.back().Allocas.add(Memory);
+}
+
+// getElementOffset - The workhorse for getelementptr.
+//
+GenericValue Interpreter::executeGEPOperation(Value *Ptr, gep_type_iterator I,
+ gep_type_iterator E,
+ ExecutionContext &SF) {
+ assert(Ptr->getType()->isPointerTy() &&
+ "Cannot getElementOffset of a nonpointer type!");
+
+ uint64_t Total = 0;
+
+ for (; I != E; ++I) {
+ if (StructType *STy = I.getStructTypeOrNull()) {
+ const StructLayout *SLO = getDataLayout().getStructLayout(STy);
+
+ const ConstantInt *CPU = cast<ConstantInt>(I.getOperand());
+ unsigned Index = unsigned(CPU->getZExtValue());
+
+ Total += SLO->getElementOffset(Index);
+ } else {
+ // Get the index number for the array... which must be long type...
+ GenericValue IdxGV = getOperandValue(I.getOperand(), SF);
+
+ int64_t Idx;
+ unsigned BitWidth =
+ cast<IntegerType>(I.getOperand()->getType())->getBitWidth();
+ if (BitWidth == 32)
+ Idx = (int64_t)(int32_t)IdxGV.IntVal.getZExtValue();
+ else {
+ assert(BitWidth == 64 && "Invalid index type for getelementptr");
+ Idx = (int64_t)IdxGV.IntVal.getZExtValue();
+ }
+ Total += getDataLayout().getTypeAllocSize(I.getIndexedType()) * Idx;
+ }
+ }
+
+ GenericValue Result;
+ Result.PointerVal = ((char*)getOperandValue(Ptr, SF).PointerVal) + Total;
+ LLVM_DEBUG(dbgs() << "GEP Index " << Total << " bytes.\n");
+ return Result;
+}
+
+void Interpreter::visitGetElementPtrInst(GetElementPtrInst &I) {
+ ExecutionContext &SF = ECStack.back();
+ SetValue(&I, executeGEPOperation(I.getPointerOperand(),
+ gep_type_begin(I), gep_type_end(I), SF), SF);
+}
+
+void Interpreter::visitLoadInst(LoadInst &I) {
+ ExecutionContext &SF = ECStack.back();
+ GenericValue SRC = getOperandValue(I.getPointerOperand(), SF);
+ GenericValue *Ptr = (GenericValue*)GVTOP(SRC);
+ GenericValue Result;
+ LoadValueFromMemory(Result, Ptr, I.getType());
+ SetValue(&I, Result, SF);
+ if (I.isVolatile() && PrintVolatile)
+ dbgs() << "Volatile load " << I;
+}
+
+void Interpreter::visitStoreInst(StoreInst &I) {
+ ExecutionContext &SF = ECStack.back();
+ GenericValue Val = getOperandValue(I.getOperand(0), SF);
+ GenericValue SRC = getOperandValue(I.getPointerOperand(), SF);
+ StoreValueToMemory(Val, (GenericValue *)GVTOP(SRC),
+ I.getOperand(0)->getType());
+ if (I.isVolatile() && PrintVolatile)
+ dbgs() << "Volatile store: " << I;
+}
+
+//===----------------------------------------------------------------------===//
+// Miscellaneous Instruction Implementations
+//===----------------------------------------------------------------------===//
+
+void Interpreter::visitCallSite(CallSite CS) {
+ ExecutionContext &SF = ECStack.back();
+
+ // Check to see if this is an intrinsic function call...
+ Function *F = CS.getCalledFunction();
+ if (F && F->isDeclaration())
+ switch (F->getIntrinsicID()) {
+ case Intrinsic::not_intrinsic:
+ break;
+ case Intrinsic::vastart: { // va_start
+ GenericValue ArgIndex;
+ ArgIndex.UIntPairVal.first = ECStack.size() - 1;
+ ArgIndex.UIntPairVal.second = 0;
+ SetValue(CS.getInstruction(), ArgIndex, SF);
+ return;
+ }
+ case Intrinsic::vaend: // va_end is a noop for the interpreter
+ return;
+ case Intrinsic::vacopy: // va_copy: dest = src
+ SetValue(CS.getInstruction(), getOperandValue(*CS.arg_begin(), SF), SF);
+ return;
+ default:
+ // If it is an unknown intrinsic function, use the intrinsic lowering
+ // class to transform it into hopefully tasty LLVM code.
+ //
+ BasicBlock::iterator me(CS.getInstruction());
+ BasicBlock *Parent = CS.getInstruction()->getParent();
+ bool atBegin(Parent->begin() == me);
+ if (!atBegin)
+ --me;
+ IL->LowerIntrinsicCall(cast<CallInst>(CS.getInstruction()));
+
+ // Restore the CurInst pointer to the first instruction newly inserted, if
+ // any.
+ if (atBegin) {
+ SF.CurInst = Parent->begin();
+ } else {
+ SF.CurInst = me;
+ ++SF.CurInst;
+ }
+ return;
+ }
+
+
+ SF.Caller = CS;
+ std::vector<GenericValue> ArgVals;
+ const unsigned NumArgs = SF.Caller.arg_size();
+ ArgVals.reserve(NumArgs);
+ uint16_t pNum = 1;
+ for (CallSite::arg_iterator i = SF.Caller.arg_begin(),
+ e = SF.Caller.arg_end(); i != e; ++i, ++pNum) {
+ Value *V = *i;
+ ArgVals.push_back(getOperandValue(V, SF));
+ }
+
+ // To handle indirect calls, we must get the pointer value from the argument
+ // and treat it as a function pointer.
+ GenericValue SRC = getOperandValue(SF.Caller.getCalledValue(), SF);
+ callFunction((Function*)GVTOP(SRC), ArgVals);
+}
+
+// auxiliary function for shift operations
+static unsigned getShiftAmount(uint64_t orgShiftAmount,
+ llvm::APInt valueToShift) {
+ unsigned valueWidth = valueToShift.getBitWidth();
+ if (orgShiftAmount < (uint64_t)valueWidth)
+ return orgShiftAmount;
+ // according to the llvm documentation, if orgShiftAmount > valueWidth,
+ // the result is undfeined. but we do shift by this rule:
+ return (NextPowerOf2(valueWidth-1) - 1) & orgShiftAmount;
+}
+
+
+void Interpreter::visitShl(BinaryOperator &I) {
+ ExecutionContext &SF = ECStack.back();
+ GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
+ GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
+ GenericValue Dest;
+ Type *Ty = I.getType();
+
+ if (Ty->isVectorTy()) {
+ uint32_t src1Size = uint32_t(Src1.AggregateVal.size());
+ assert(src1Size == Src2.AggregateVal.size());
+ for (unsigned i = 0; i < src1Size; i++) {
+ GenericValue Result;
+ uint64_t shiftAmount = Src2.AggregateVal[i].IntVal.getZExtValue();
+ llvm::APInt valueToShift = Src1.AggregateVal[i].IntVal;
+ Result.IntVal = valueToShift.shl(getShiftAmount(shiftAmount, valueToShift));
+ Dest.AggregateVal.push_back(Result);
+ }
+ } else {
+ // scalar
+ uint64_t shiftAmount = Src2.IntVal.getZExtValue();
+ llvm::APInt valueToShift = Src1.IntVal;
+ Dest.IntVal = valueToShift.shl(getShiftAmount(shiftAmount, valueToShift));
+ }
+
+ SetValue(&I, Dest, SF);
+}
+
+void Interpreter::visitLShr(BinaryOperator &I) {
+ ExecutionContext &SF = ECStack.back();
+ GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
+ GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
+ GenericValue Dest;
+ Type *Ty = I.getType();
+
+ if (Ty->isVectorTy()) {
+ uint32_t src1Size = uint32_t(Src1.AggregateVal.size());
+ assert(src1Size == Src2.AggregateVal.size());
+ for (unsigned i = 0; i < src1Size; i++) {
+ GenericValue Result;
+ uint64_t shiftAmount = Src2.AggregateVal[i].IntVal.getZExtValue();
+ llvm::APInt valueToShift = Src1.AggregateVal[i].IntVal;
+ Result.IntVal = valueToShift.lshr(getShiftAmount(shiftAmount, valueToShift));
+ Dest.AggregateVal.push_back(Result);
+ }
+ } else {
+ // scalar
+ uint64_t shiftAmount = Src2.IntVal.getZExtValue();
+ llvm::APInt valueToShift = Src1.IntVal;
+ Dest.IntVal = valueToShift.lshr(getShiftAmount(shiftAmount, valueToShift));
+ }
+
+ SetValue(&I, Dest, SF);
+}
+
+void Interpreter::visitAShr(BinaryOperator &I) {
+ ExecutionContext &SF = ECStack.back();
+ GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
+ GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
+ GenericValue Dest;
+ Type *Ty = I.getType();
+
+ if (Ty->isVectorTy()) {
+ size_t src1Size = Src1.AggregateVal.size();
+ assert(src1Size == Src2.AggregateVal.size());
+ for (unsigned i = 0; i < src1Size; i++) {
+ GenericValue Result;
+ uint64_t shiftAmount = Src2.AggregateVal[i].IntVal.getZExtValue();
+ llvm::APInt valueToShift = Src1.AggregateVal[i].IntVal;
+ Result.IntVal = valueToShift.ashr(getShiftAmount(shiftAmount, valueToShift));
+ Dest.AggregateVal.push_back(Result);
+ }
+ } else {
+ // scalar
+ uint64_t shiftAmount = Src2.IntVal.getZExtValue();
+ llvm::APInt valueToShift = Src1.IntVal;
+ Dest.IntVal = valueToShift.ashr(getShiftAmount(shiftAmount, valueToShift));
+ }
+
+ SetValue(&I, Dest, SF);
+}
+
+GenericValue Interpreter::executeTruncInst(Value *SrcVal, Type *DstTy,
+ ExecutionContext &SF) {
+ GenericValue Dest, Src = getOperandValue(SrcVal, SF);
+ Type *SrcTy = SrcVal->getType();
+ if (SrcTy->isVectorTy()) {
+ Type *DstVecTy = DstTy->getScalarType();
+ unsigned DBitWidth = cast<IntegerType>(DstVecTy)->getBitWidth();
+ unsigned NumElts = Src.AggregateVal.size();
+ // the sizes of src and dst vectors must be equal
+ Dest.AggregateVal.resize(NumElts);
+ for (unsigned i = 0; i < NumElts; i++)
+ Dest.AggregateVal[i].IntVal = Src.AggregateVal[i].IntVal.trunc(DBitWidth);
+ } else {
+ IntegerType *DITy = cast<IntegerType>(DstTy);
+ unsigned DBitWidth = DITy->getBitWidth();
+ Dest.IntVal = Src.IntVal.trunc(DBitWidth);
+ }
+ return Dest;
+}
+
+GenericValue Interpreter::executeSExtInst(Value *SrcVal, Type *DstTy,
+ ExecutionContext &SF) {
+ Type *SrcTy = SrcVal->getType();
+ GenericValue Dest, Src = getOperandValue(SrcVal, SF);
+ if (SrcTy->isVectorTy()) {
+ Type *DstVecTy = DstTy->getScalarType();
+ unsigned DBitWidth = cast<IntegerType>(DstVecTy)->getBitWidth();
+ unsigned size = Src.AggregateVal.size();
+ // the sizes of src and dst vectors must be equal.
+ Dest.AggregateVal.resize(size);
+ for (unsigned i = 0; i < size; i++)
+ Dest.AggregateVal[i].IntVal = Src.AggregateVal[i].IntVal.sext(DBitWidth);
+ } else {
+ auto *DITy = cast<IntegerType>(DstTy);
+ unsigned DBitWidth = DITy->getBitWidth();
+ Dest.IntVal = Src.IntVal.sext(DBitWidth);
+ }
+ return Dest;
+}
+
+GenericValue Interpreter::executeZExtInst(Value *SrcVal, Type *DstTy,
+ ExecutionContext &SF) {
+ Type *SrcTy = SrcVal->getType();
+ GenericValue Dest, Src = getOperandValue(SrcVal, SF);
+ if (SrcTy->isVectorTy()) {
+ Type *DstVecTy = DstTy->getScalarType();
+ unsigned DBitWidth = cast<IntegerType>(DstVecTy)->getBitWidth();
+
+ unsigned size = Src.AggregateVal.size();
+ // the sizes of src and dst vectors must be equal.
+ Dest.AggregateVal.resize(size);
+ for (unsigned i = 0; i < size; i++)
+ Dest.AggregateVal[i].IntVal = Src.AggregateVal[i].IntVal.zext(DBitWidth);
+ } else {
+ auto *DITy = cast<IntegerType>(DstTy);
+ unsigned DBitWidth = DITy->getBitWidth();
+ Dest.IntVal = Src.IntVal.zext(DBitWidth);
+ }
+ return Dest;
+}
+
+GenericValue Interpreter::executeFPTruncInst(Value *SrcVal, Type *DstTy,
+ ExecutionContext &SF) {
+ GenericValue Dest, Src = getOperandValue(SrcVal, SF);
+
+ if (SrcVal->getType()->getTypeID() == Type::VectorTyID) {
+ assert(SrcVal->getType()->getScalarType()->isDoubleTy() &&
+ DstTy->getScalarType()->isFloatTy() &&
+ "Invalid FPTrunc instruction");
+
+ unsigned size = Src.AggregateVal.size();
+ // the sizes of src and dst vectors must be equal.
+ Dest.AggregateVal.resize(size);
+ for (unsigned i = 0; i < size; i++)
+ Dest.AggregateVal[i].FloatVal = (float)Src.AggregateVal[i].DoubleVal;
+ } else {
+ assert(SrcVal->getType()->isDoubleTy() && DstTy->isFloatTy() &&
+ "Invalid FPTrunc instruction");
+ Dest.FloatVal = (float)Src.DoubleVal;
+ }
+
+ return Dest;
+}
+
+GenericValue Interpreter::executeFPExtInst(Value *SrcVal, Type *DstTy,
+ ExecutionContext &SF) {
+ GenericValue Dest, Src = getOperandValue(SrcVal, SF);
+
+ if (SrcVal->getType()->getTypeID() == Type::VectorTyID) {
+ assert(SrcVal->getType()->getScalarType()->isFloatTy() &&
+ DstTy->getScalarType()->isDoubleTy() && "Invalid FPExt instruction");
+
+ unsigned size = Src.AggregateVal.size();
+ // the sizes of src and dst vectors must be equal.
+ Dest.AggregateVal.resize(size);
+ for (unsigned i = 0; i < size; i++)
+ Dest.AggregateVal[i].DoubleVal = (double)Src.AggregateVal[i].FloatVal;
+ } else {
+ assert(SrcVal->getType()->isFloatTy() && DstTy->isDoubleTy() &&
+ "Invalid FPExt instruction");
+ Dest.DoubleVal = (double)Src.FloatVal;
+ }
+
+ return Dest;
+}
+
+GenericValue Interpreter::executeFPToUIInst(Value *SrcVal, Type *DstTy,
+ ExecutionContext &SF) {
+ Type *SrcTy = SrcVal->getType();
+ GenericValue Dest, Src = getOperandValue(SrcVal, SF);
+
+ if (SrcTy->getTypeID() == Type::VectorTyID) {
+ Type *DstVecTy = DstTy->getScalarType();
+ Type *SrcVecTy = SrcTy->getScalarType();
+ uint32_t DBitWidth = cast<IntegerType>(DstVecTy)->getBitWidth();
+ unsigned size = Src.AggregateVal.size();
+ // the sizes of src and dst vectors must be equal.
+ Dest.AggregateVal.resize(size);
+
+ if (SrcVecTy->getTypeID() == Type::FloatTyID) {
+ assert(SrcVecTy->isFloatingPointTy() && "Invalid FPToUI instruction");
+ for (unsigned i = 0; i < size; i++)
+ Dest.AggregateVal[i].IntVal = APIntOps::RoundFloatToAPInt(
+ Src.AggregateVal[i].FloatVal, DBitWidth);
+ } else {
+ for (unsigned i = 0; i < size; i++)
+ Dest.AggregateVal[i].IntVal = APIntOps::RoundDoubleToAPInt(
+ Src.AggregateVal[i].DoubleVal, DBitWidth);
+ }
+ } else {
+ // scalar
+ uint32_t DBitWidth = cast<IntegerType>(DstTy)->getBitWidth();
+ assert(SrcTy->isFloatingPointTy() && "Invalid FPToUI instruction");
+
+ if (SrcTy->getTypeID() == Type::FloatTyID)
+ Dest.IntVal = APIntOps::RoundFloatToAPInt(Src.FloatVal, DBitWidth);
+ else {
+ Dest.IntVal = APIntOps::RoundDoubleToAPInt(Src.DoubleVal, DBitWidth);
+ }
+ }
+
+ return Dest;
+}
+
+GenericValue Interpreter::executeFPToSIInst(Value *SrcVal, Type *DstTy,
+ ExecutionContext &SF) {
+ Type *SrcTy = SrcVal->getType();
+ GenericValue Dest, Src = getOperandValue(SrcVal, SF);
+
+ if (SrcTy->getTypeID() == Type::VectorTyID) {
+ Type *DstVecTy = DstTy->getScalarType();
+ Type *SrcVecTy = SrcTy->getScalarType();
+ uint32_t DBitWidth = cast<IntegerType>(DstVecTy)->getBitWidth();
+ unsigned size = Src.AggregateVal.size();
+ // the sizes of src and dst vectors must be equal
+ Dest.AggregateVal.resize(size);
+
+ if (SrcVecTy->getTypeID() == Type::FloatTyID) {
+ assert(SrcVecTy->isFloatingPointTy() && "Invalid FPToSI instruction");
+ for (unsigned i = 0; i < size; i++)
+ Dest.AggregateVal[i].IntVal = APIntOps::RoundFloatToAPInt(
+ Src.AggregateVal[i].FloatVal, DBitWidth);
+ } else {
+ for (unsigned i = 0; i < size; i++)
+ Dest.AggregateVal[i].IntVal = APIntOps::RoundDoubleToAPInt(
+ Src.AggregateVal[i].DoubleVal, DBitWidth);
+ }
+ } else {
+ // scalar
+ unsigned DBitWidth = cast<IntegerType>(DstTy)->getBitWidth();
+ assert(SrcTy->isFloatingPointTy() && "Invalid FPToSI instruction");
+
+ if (SrcTy->getTypeID() == Type::FloatTyID)
+ Dest.IntVal = APIntOps::RoundFloatToAPInt(Src.FloatVal, DBitWidth);
+ else {
+ Dest.IntVal = APIntOps::RoundDoubleToAPInt(Src.DoubleVal, DBitWidth);
+ }
+ }
+ return Dest;
+}
+
+GenericValue Interpreter::executeUIToFPInst(Value *SrcVal, Type *DstTy,
+ ExecutionContext &SF) {
+ GenericValue Dest, Src = getOperandValue(SrcVal, SF);
+
+ if (SrcVal->getType()->getTypeID() == Type::VectorTyID) {
+ Type *DstVecTy = DstTy->getScalarType();
+ unsigned size = Src.AggregateVal.size();
+ // the sizes of src and dst vectors must be equal
+ Dest.AggregateVal.resize(size);
+
+ if (DstVecTy->getTypeID() == Type::FloatTyID) {
+ assert(DstVecTy->isFloatingPointTy() && "Invalid UIToFP instruction");
+ for (unsigned i = 0; i < size; i++)
+ Dest.AggregateVal[i].FloatVal =
+ APIntOps::RoundAPIntToFloat(Src.AggregateVal[i].IntVal);
+ } else {
+ for (unsigned i = 0; i < size; i++)
+ Dest.AggregateVal[i].DoubleVal =
+ APIntOps::RoundAPIntToDouble(Src.AggregateVal[i].IntVal);
+ }
+ } else {
+ // scalar
+ assert(DstTy->isFloatingPointTy() && "Invalid UIToFP instruction");
+ if (DstTy->getTypeID() == Type::FloatTyID)
+ Dest.FloatVal = APIntOps::RoundAPIntToFloat(Src.IntVal);
+ else {
+ Dest.DoubleVal = APIntOps::RoundAPIntToDouble(Src.IntVal);
+ }
+ }
+ return Dest;
+}
+
+GenericValue Interpreter::executeSIToFPInst(Value *SrcVal, Type *DstTy,
+ ExecutionContext &SF) {
+ GenericValue Dest, Src = getOperandValue(SrcVal, SF);
+
+ if (SrcVal->getType()->getTypeID() == Type::VectorTyID) {
+ Type *DstVecTy = DstTy->getScalarType();
+ unsigned size = Src.AggregateVal.size();
+ // the sizes of src and dst vectors must be equal
+ Dest.AggregateVal.resize(size);
+
+ if (DstVecTy->getTypeID() == Type::FloatTyID) {
+ assert(DstVecTy->isFloatingPointTy() && "Invalid SIToFP instruction");
+ for (unsigned i = 0; i < size; i++)
+ Dest.AggregateVal[i].FloatVal =
+ APIntOps::RoundSignedAPIntToFloat(Src.AggregateVal[i].IntVal);
+ } else {
+ for (unsigned i = 0; i < size; i++)
+ Dest.AggregateVal[i].DoubleVal =
+ APIntOps::RoundSignedAPIntToDouble(Src.AggregateVal[i].IntVal);
+ }
+ } else {
+ // scalar
+ assert(DstTy->isFloatingPointTy() && "Invalid SIToFP instruction");
+
+ if (DstTy->getTypeID() == Type::FloatTyID)
+ Dest.FloatVal = APIntOps::RoundSignedAPIntToFloat(Src.IntVal);
+ else {
+ Dest.DoubleVal = APIntOps::RoundSignedAPIntToDouble(Src.IntVal);
+ }
+ }
+
+ return Dest;
+}
+
+GenericValue Interpreter::executePtrToIntInst(Value *SrcVal, Type *DstTy,
+ ExecutionContext &SF) {
+ uint32_t DBitWidth = cast<IntegerType>(DstTy)->getBitWidth();
+ GenericValue Dest, Src = getOperandValue(SrcVal, SF);
+ assert(SrcVal->getType()->isPointerTy() && "Invalid PtrToInt instruction");
+
+ Dest.IntVal = APInt(DBitWidth, (intptr_t) Src.PointerVal);
+ return Dest;
+}
+
+GenericValue Interpreter::executeIntToPtrInst(Value *SrcVal, Type *DstTy,
+ ExecutionContext &SF) {
+ GenericValue Dest, Src = getOperandValue(SrcVal, SF);
+ assert(DstTy->isPointerTy() && "Invalid PtrToInt instruction");
+
+ uint32_t PtrSize = getDataLayout().getPointerSizeInBits();
+ if (PtrSize != Src.IntVal.getBitWidth())
+ Src.IntVal = Src.IntVal.zextOrTrunc(PtrSize);
+
+ Dest.PointerVal = PointerTy(intptr_t(Src.IntVal.getZExtValue()));
+ return Dest;
+}
+
+GenericValue Interpreter::executeBitCastInst(Value *SrcVal, Type *DstTy,
+ ExecutionContext &SF) {
+
+ // This instruction supports bitwise conversion of vectors to integers and
+ // to vectors of other types (as long as they have the same size)
+ Type *SrcTy = SrcVal->getType();
+ GenericValue Dest, Src = getOperandValue(SrcVal, SF);
+
+ if ((SrcTy->getTypeID() == Type::VectorTyID) ||
+ (DstTy->getTypeID() == Type::VectorTyID)) {
+ // vector src bitcast to vector dst or vector src bitcast to scalar dst or
+ // scalar src bitcast to vector dst
+ bool isLittleEndian = getDataLayout().isLittleEndian();
+ GenericValue TempDst, TempSrc, SrcVec;
+ Type *SrcElemTy;
+ Type *DstElemTy;
+ unsigned SrcBitSize;
+ unsigned DstBitSize;
+ unsigned SrcNum;
+ unsigned DstNum;
+
+ if (SrcTy->getTypeID() == Type::VectorTyID) {
+ SrcElemTy = SrcTy->getScalarType();
+ SrcBitSize = SrcTy->getScalarSizeInBits();
+ SrcNum = Src.AggregateVal.size();
+ SrcVec = Src;
+ } else {
+ // if src is scalar value, make it vector <1 x type>
+ SrcElemTy = SrcTy;
+ SrcBitSize = SrcTy->getPrimitiveSizeInBits();
+ SrcNum = 1;
+ SrcVec.AggregateVal.push_back(Src);
+ }
+
+ if (DstTy->getTypeID() == Type::VectorTyID) {
+ DstElemTy = DstTy->getScalarType();
+ DstBitSize = DstTy->getScalarSizeInBits();
+ DstNum = (SrcNum * SrcBitSize) / DstBitSize;
+ } else {
+ DstElemTy = DstTy;
+ DstBitSize = DstTy->getPrimitiveSizeInBits();
+ DstNum = 1;
+ }
+
+ if (SrcNum * SrcBitSize != DstNum * DstBitSize)
+ llvm_unreachable("Invalid BitCast");
+
+ // If src is floating point, cast to integer first.
+ TempSrc.AggregateVal.resize(SrcNum);
+ if (SrcElemTy->isFloatTy()) {
+ for (unsigned i = 0; i < SrcNum; i++)
+ TempSrc.AggregateVal[i].IntVal =
+ APInt::floatToBits(SrcVec.AggregateVal[i].FloatVal);
+
+ } else if (SrcElemTy->isDoubleTy()) {
+ for (unsigned i = 0; i < SrcNum; i++)
+ TempSrc.AggregateVal[i].IntVal =
+ APInt::doubleToBits(SrcVec.AggregateVal[i].DoubleVal);
+ } else if (SrcElemTy->isIntegerTy()) {
+ for (unsigned i = 0; i < SrcNum; i++)
+ TempSrc.AggregateVal[i].IntVal = SrcVec.AggregateVal[i].IntVal;
+ } else {
+ // Pointers are not allowed as the element type of vector.
+ llvm_unreachable("Invalid Bitcast");
+ }
+
+ // now TempSrc is integer type vector
+ if (DstNum < SrcNum) {
+ // Example: bitcast <4 x i32> <i32 0, i32 1, i32 2, i32 3> to <2 x i64>
+ unsigned Ratio = SrcNum / DstNum;
+ unsigned SrcElt = 0;
+ for (unsigned i = 0; i < DstNum; i++) {
+ GenericValue Elt;
+ Elt.IntVal = 0;
+ Elt.IntVal = Elt.IntVal.zext(DstBitSize);
+ unsigned ShiftAmt = isLittleEndian ? 0 : SrcBitSize * (Ratio - 1);
+ for (unsigned j = 0; j < Ratio; j++) {
+ APInt Tmp;
+ Tmp = Tmp.zext(SrcBitSize);
+ Tmp = TempSrc.AggregateVal[SrcElt++].IntVal;
+ Tmp = Tmp.zext(DstBitSize);
+ Tmp <<= ShiftAmt;
+ ShiftAmt += isLittleEndian ? SrcBitSize : -SrcBitSize;
+ Elt.IntVal |= Tmp;
+ }
+ TempDst.AggregateVal.push_back(Elt);
+ }
+ } else {
+ // Example: bitcast <2 x i64> <i64 0, i64 1> to <4 x i32>
+ unsigned Ratio = DstNum / SrcNum;
+ for (unsigned i = 0; i < SrcNum; i++) {
+ unsigned ShiftAmt = isLittleEndian ? 0 : DstBitSize * (Ratio - 1);
+ for (unsigned j = 0; j < Ratio; j++) {
+ GenericValue Elt;
+ Elt.IntVal = Elt.IntVal.zext(SrcBitSize);
+ Elt.IntVal = TempSrc.AggregateVal[i].IntVal;
+ Elt.IntVal.lshrInPlace(ShiftAmt);
+ // it could be DstBitSize == SrcBitSize, so check it
+ if (DstBitSize < SrcBitSize)
+ Elt.IntVal = Elt.IntVal.trunc(DstBitSize);
+ ShiftAmt += isLittleEndian ? DstBitSize : -DstBitSize;
+ TempDst.AggregateVal.push_back(Elt);
+ }
+ }
+ }
+
+ // convert result from integer to specified type
+ if (DstTy->getTypeID() == Type::VectorTyID) {
+ if (DstElemTy->isDoubleTy()) {
+ Dest.AggregateVal.resize(DstNum);
+ for (unsigned i = 0; i < DstNum; i++)
+ Dest.AggregateVal[i].DoubleVal =
+ TempDst.AggregateVal[i].IntVal.bitsToDouble();
+ } else if (DstElemTy->isFloatTy()) {
+ Dest.AggregateVal.resize(DstNum);
+ for (unsigned i = 0; i < DstNum; i++)
+ Dest.AggregateVal[i].FloatVal =
+ TempDst.AggregateVal[i].IntVal.bitsToFloat();
+ } else {
+ Dest = TempDst;
+ }
+ } else {
+ if (DstElemTy->isDoubleTy())
+ Dest.DoubleVal = TempDst.AggregateVal[0].IntVal.bitsToDouble();
+ else if (DstElemTy->isFloatTy()) {
+ Dest.FloatVal = TempDst.AggregateVal[0].IntVal.bitsToFloat();
+ } else {
+ Dest.IntVal = TempDst.AggregateVal[0].IntVal;
+ }
+ }
+ } else { // if ((SrcTy->getTypeID() == Type::VectorTyID) ||
+ // (DstTy->getTypeID() == Type::VectorTyID))
+
+ // scalar src bitcast to scalar dst
+ if (DstTy->isPointerTy()) {
+ assert(SrcTy->isPointerTy() && "Invalid BitCast");
+ Dest.PointerVal = Src.PointerVal;
+ } else if (DstTy->isIntegerTy()) {
+ if (SrcTy->isFloatTy())
+ Dest.IntVal = APInt::floatToBits(Src.FloatVal);
+ else if (SrcTy->isDoubleTy()) {
+ Dest.IntVal = APInt::doubleToBits(Src.DoubleVal);
+ } else if (SrcTy->isIntegerTy()) {
+ Dest.IntVal = Src.IntVal;
+ } else {
+ llvm_unreachable("Invalid BitCast");
+ }
+ } else if (DstTy->isFloatTy()) {
+ if (SrcTy->isIntegerTy())
+ Dest.FloatVal = Src.IntVal.bitsToFloat();
+ else {
+ Dest.FloatVal = Src.FloatVal;
+ }
+ } else if (DstTy->isDoubleTy()) {
+ if (SrcTy->isIntegerTy())
+ Dest.DoubleVal = Src.IntVal.bitsToDouble();
+ else {
+ Dest.DoubleVal = Src.DoubleVal;
+ }
+ } else {
+ llvm_unreachable("Invalid Bitcast");
+ }
+ }
+
+ return Dest;
+}
+
+void Interpreter::visitTruncInst(TruncInst &I) {
+ ExecutionContext &SF = ECStack.back();
+ SetValue(&I, executeTruncInst(I.getOperand(0), I.getType(), SF), SF);
+}
+
+void Interpreter::visitSExtInst(SExtInst &I) {
+ ExecutionContext &SF = ECStack.back();
+ SetValue(&I, executeSExtInst(I.getOperand(0), I.getType(), SF), SF);
+}
+
+void Interpreter::visitZExtInst(ZExtInst &I) {
+ ExecutionContext &SF = ECStack.back();
+ SetValue(&I, executeZExtInst(I.getOperand(0), I.getType(), SF), SF);
+}
+
+void Interpreter::visitFPTruncInst(FPTruncInst &I) {
+ ExecutionContext &SF = ECStack.back();
+ SetValue(&I, executeFPTruncInst(I.getOperand(0), I.getType(), SF), SF);
+}
+
+void Interpreter::visitFPExtInst(FPExtInst &I) {
+ ExecutionContext &SF = ECStack.back();
+ SetValue(&I, executeFPExtInst(I.getOperand(0), I.getType(), SF), SF);
+}
+
+void Interpreter::visitUIToFPInst(UIToFPInst &I) {
+ ExecutionContext &SF = ECStack.back();
+ SetValue(&I, executeUIToFPInst(I.getOperand(0), I.getType(), SF), SF);
+}
+
+void Interpreter::visitSIToFPInst(SIToFPInst &I) {
+ ExecutionContext &SF = ECStack.back();
+ SetValue(&I, executeSIToFPInst(I.getOperand(0), I.getType(), SF), SF);
+}
+
+void Interpreter::visitFPToUIInst(FPToUIInst &I) {
+ ExecutionContext &SF = ECStack.back();
+ SetValue(&I, executeFPToUIInst(I.getOperand(0), I.getType(), SF), SF);
+}
+
+void Interpreter::visitFPToSIInst(FPToSIInst &I) {
+ ExecutionContext &SF = ECStack.back();
+ SetValue(&I, executeFPToSIInst(I.getOperand(0), I.getType(), SF), SF);
+}
+
+void Interpreter::visitPtrToIntInst(PtrToIntInst &I) {
+ ExecutionContext &SF = ECStack.back();
+ SetValue(&I, executePtrToIntInst(I.getOperand(0), I.getType(), SF), SF);
+}
+
+void Interpreter::visitIntToPtrInst(IntToPtrInst &I) {
+ ExecutionContext &SF = ECStack.back();
+ SetValue(&I, executeIntToPtrInst(I.getOperand(0), I.getType(), SF), SF);
+}
+
+void Interpreter::visitBitCastInst(BitCastInst &I) {
+ ExecutionContext &SF = ECStack.back();
+ SetValue(&I, executeBitCastInst(I.getOperand(0), I.getType(), SF), SF);
+}
+
+#define IMPLEMENT_VAARG(TY) \
+ case Type::TY##TyID: Dest.TY##Val = Src.TY##Val; break
+
+void Interpreter::visitVAArgInst(VAArgInst &I) {
+ ExecutionContext &SF = ECStack.back();
+
+ // Get the incoming valist parameter. LLI treats the valist as a
+ // (ec-stack-depth var-arg-index) pair.
+ GenericValue VAList = getOperandValue(I.getOperand(0), SF);
+ GenericValue Dest;
+ GenericValue Src = ECStack[VAList.UIntPairVal.first]
+ .VarArgs[VAList.UIntPairVal.second];
+ Type *Ty = I.getType();
+ switch (Ty->getTypeID()) {
+ case Type::IntegerTyID:
+ Dest.IntVal = Src.IntVal;
+ break;
+ IMPLEMENT_VAARG(Pointer);
+ IMPLEMENT_VAARG(Float);
+ IMPLEMENT_VAARG(Double);
+ default:
+ dbgs() << "Unhandled dest type for vaarg instruction: " << *Ty << "\n";
+ llvm_unreachable(nullptr);
+ }
+
+ // Set the Value of this Instruction.
+ SetValue(&I, Dest, SF);
+
+ // Move the pointer to the next vararg.
+ ++VAList.UIntPairVal.second;
+}
+
+void Interpreter::visitExtractElementInst(ExtractElementInst &I) {
+ ExecutionContext &SF = ECStack.back();
+ GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
+ GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
+ GenericValue Dest;
+
+ Type *Ty = I.getType();
+ const unsigned indx = unsigned(Src2.IntVal.getZExtValue());
+
+ if(Src1.AggregateVal.size() > indx) {
+ switch (Ty->getTypeID()) {
+ default:
+ dbgs() << "Unhandled destination type for extractelement instruction: "
+ << *Ty << "\n";
+ llvm_unreachable(nullptr);
+ break;
+ case Type::IntegerTyID:
+ Dest.IntVal = Src1.AggregateVal[indx].IntVal;
+ break;
+ case Type::FloatTyID:
+ Dest.FloatVal = Src1.AggregateVal[indx].FloatVal;
+ break;
+ case Type::DoubleTyID:
+ Dest.DoubleVal = Src1.AggregateVal[indx].DoubleVal;
+ break;
+ }
+ } else {
+ dbgs() << "Invalid index in extractelement instruction\n";
+ }
+
+ SetValue(&I, Dest, SF);
+}
+
+void Interpreter::visitInsertElementInst(InsertElementInst &I) {
+ ExecutionContext &SF = ECStack.back();
+ VectorType *Ty = cast<VectorType>(I.getType());
+
+ GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
+ GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
+ GenericValue Src3 = getOperandValue(I.getOperand(2), SF);
+ GenericValue Dest;
+
+ Type *TyContained = Ty->getElementType();
+
+ const unsigned indx = unsigned(Src3.IntVal.getZExtValue());
+ Dest.AggregateVal = Src1.AggregateVal;
+
+ if(Src1.AggregateVal.size() <= indx)
+ llvm_unreachable("Invalid index in insertelement instruction");
+ switch (TyContained->getTypeID()) {
+ default:
+ llvm_unreachable("Unhandled dest type for insertelement instruction");
+ case Type::IntegerTyID:
+ Dest.AggregateVal[indx].IntVal = Src2.IntVal;
+ break;
+ case Type::FloatTyID:
+ Dest.AggregateVal[indx].FloatVal = Src2.FloatVal;
+ break;
+ case Type::DoubleTyID:
+ Dest.AggregateVal[indx].DoubleVal = Src2.DoubleVal;
+ break;
+ }
+ SetValue(&I, Dest, SF);
+}
+
+void Interpreter::visitShuffleVectorInst(ShuffleVectorInst &I){
+ ExecutionContext &SF = ECStack.back();
+
+ VectorType *Ty = cast<VectorType>(I.getType());
+
+ GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
+ GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
+ GenericValue Src3 = getOperandValue(I.getOperand(2), SF);
+ GenericValue Dest;
+
+ // There is no need to check types of src1 and src2, because the compiled
+ // bytecode can't contain different types for src1 and src2 for a
+ // shufflevector instruction.
+
+ Type *TyContained = Ty->getElementType();
+ unsigned src1Size = (unsigned)Src1.AggregateVal.size();
+ unsigned src2Size = (unsigned)Src2.AggregateVal.size();
+ unsigned src3Size = (unsigned)Src3.AggregateVal.size();
+
+ Dest.AggregateVal.resize(src3Size);
+
+ switch (TyContained->getTypeID()) {
+ default:
+ llvm_unreachable("Unhandled dest type for insertelement instruction");
+ break;
+ case Type::IntegerTyID:
+ for( unsigned i=0; i<src3Size; i++) {
+ unsigned j = Src3.AggregateVal[i].IntVal.getZExtValue();
+ if(j < src1Size)
+ Dest.AggregateVal[i].IntVal = Src1.AggregateVal[j].IntVal;
+ else if(j < src1Size + src2Size)
+ Dest.AggregateVal[i].IntVal = Src2.AggregateVal[j-src1Size].IntVal;
+ else
+ // The selector may not be greater than sum of lengths of first and
+ // second operands and llasm should not allow situation like
+ // %tmp = shufflevector <2 x i32> <i32 3, i32 4>, <2 x i32> undef,
+ // <2 x i32> < i32 0, i32 5 >,
+ // where i32 5 is invalid, but let it be additional check here:
+ llvm_unreachable("Invalid mask in shufflevector instruction");
+ }
+ break;
+ case Type::FloatTyID:
+ for( unsigned i=0; i<src3Size; i++) {
+ unsigned j = Src3.AggregateVal[i].IntVal.getZExtValue();
+ if(j < src1Size)
+ Dest.AggregateVal[i].FloatVal = Src1.AggregateVal[j].FloatVal;
+ else if(j < src1Size + src2Size)
+ Dest.AggregateVal[i].FloatVal = Src2.AggregateVal[j-src1Size].FloatVal;
+ else
+ llvm_unreachable("Invalid mask in shufflevector instruction");
+ }
+ break;
+ case Type::DoubleTyID:
+ for( unsigned i=0; i<src3Size; i++) {
+ unsigned j = Src3.AggregateVal[i].IntVal.getZExtValue();
+ if(j < src1Size)
+ Dest.AggregateVal[i].DoubleVal = Src1.AggregateVal[j].DoubleVal;
+ else if(j < src1Size + src2Size)
+ Dest.AggregateVal[i].DoubleVal =
+ Src2.AggregateVal[j-src1Size].DoubleVal;
+ else
+ llvm_unreachable("Invalid mask in shufflevector instruction");
+ }
+ break;
+ }
+ SetValue(&I, Dest, SF);
+}
+
+void Interpreter::visitExtractValueInst(ExtractValueInst &I) {
+ ExecutionContext &SF = ECStack.back();
+ Value *Agg = I.getAggregateOperand();
+ GenericValue Dest;
+ GenericValue Src = getOperandValue(Agg, SF);
+
+ ExtractValueInst::idx_iterator IdxBegin = I.idx_begin();
+ unsigned Num = I.getNumIndices();
+ GenericValue *pSrc = &Src;
+
+ for (unsigned i = 0 ; i < Num; ++i) {
+ pSrc = &pSrc->AggregateVal[*IdxBegin];
+ ++IdxBegin;
+ }
+
+ Type *IndexedType = ExtractValueInst::getIndexedType(Agg->getType(), I.getIndices());
+ switch (IndexedType->getTypeID()) {
+ default:
+ llvm_unreachable("Unhandled dest type for extractelement instruction");
+ break;
+ case Type::IntegerTyID:
+ Dest.IntVal = pSrc->IntVal;
+ break;
+ case Type::FloatTyID:
+ Dest.FloatVal = pSrc->FloatVal;
+ break;
+ case Type::DoubleTyID:
+ Dest.DoubleVal = pSrc->DoubleVal;
+ break;
+ case Type::ArrayTyID:
+ case Type::StructTyID:
+ case Type::VectorTyID:
+ Dest.AggregateVal = pSrc->AggregateVal;
+ break;
+ case Type::PointerTyID:
+ Dest.PointerVal = pSrc->PointerVal;
+ break;
+ }
+
+ SetValue(&I, Dest, SF);
+}
+
+void Interpreter::visitInsertValueInst(InsertValueInst &I) {
+
+ ExecutionContext &SF = ECStack.back();
+ Value *Agg = I.getAggregateOperand();
+
+ GenericValue Src1 = getOperandValue(Agg, SF);
+ GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
+ GenericValue Dest = Src1; // Dest is a slightly changed Src1
+
+ ExtractValueInst::idx_iterator IdxBegin = I.idx_begin();
+ unsigned Num = I.getNumIndices();
+
+ GenericValue *pDest = &Dest;
+ for (unsigned i = 0 ; i < Num; ++i) {
+ pDest = &pDest->AggregateVal[*IdxBegin];
+ ++IdxBegin;
+ }
+ // pDest points to the target value in the Dest now
+
+ Type *IndexedType = ExtractValueInst::getIndexedType(Agg->getType(), I.getIndices());
+
+ switch (IndexedType->getTypeID()) {
+ default:
+ llvm_unreachable("Unhandled dest type for insertelement instruction");
+ break;
+ case Type::IntegerTyID:
+ pDest->IntVal = Src2.IntVal;
+ break;
+ case Type::FloatTyID:
+ pDest->FloatVal = Src2.FloatVal;
+ break;
+ case Type::DoubleTyID:
+ pDest->DoubleVal = Src2.DoubleVal;
+ break;
+ case Type::ArrayTyID:
+ case Type::StructTyID:
+ case Type::VectorTyID:
+ pDest->AggregateVal = Src2.AggregateVal;
+ break;
+ case Type::PointerTyID:
+ pDest->PointerVal = Src2.PointerVal;
+ break;
+ }
+
+ SetValue(&I, Dest, SF);
+}
+
+GenericValue Interpreter::getConstantExprValue (ConstantExpr *CE,
+ ExecutionContext &SF) {
+ switch (CE->getOpcode()) {
+ case Instruction::Trunc:
+ return executeTruncInst(CE->getOperand(0), CE->getType(), SF);
+ case Instruction::ZExt:
+ return executeZExtInst(CE->getOperand(0), CE->getType(), SF);
+ case Instruction::SExt:
+ return executeSExtInst(CE->getOperand(0), CE->getType(), SF);
+ case Instruction::FPTrunc:
+ return executeFPTruncInst(CE->getOperand(0), CE->getType(), SF);
+ case Instruction::FPExt:
+ return executeFPExtInst(CE->getOperand(0), CE->getType(), SF);
+ case Instruction::UIToFP:
+ return executeUIToFPInst(CE->getOperand(0), CE->getType(), SF);
+ case Instruction::SIToFP:
+ return executeSIToFPInst(CE->getOperand(0), CE->getType(), SF);
+ case Instruction::FPToUI:
+ return executeFPToUIInst(CE->getOperand(0), CE->getType(), SF);
+ case Instruction::FPToSI:
+ return executeFPToSIInst(CE->getOperand(0), CE->getType(), SF);
+ case Instruction::PtrToInt:
+ return executePtrToIntInst(CE->getOperand(0), CE->getType(), SF);
+ case Instruction::IntToPtr:
+ return executeIntToPtrInst(CE->getOperand(0), CE->getType(), SF);
+ case Instruction::BitCast:
+ return executeBitCastInst(CE->getOperand(0), CE->getType(), SF);
+ case Instruction::GetElementPtr:
+ return executeGEPOperation(CE->getOperand(0), gep_type_begin(CE),
+ gep_type_end(CE), SF);
+ case Instruction::FCmp:
+ case Instruction::ICmp:
+ return executeCmpInst(CE->getPredicate(),
+ getOperandValue(CE->getOperand(0), SF),
+ getOperandValue(CE->getOperand(1), SF),
+ CE->getOperand(0)->getType());
+ case Instruction::Select:
+ return executeSelectInst(getOperandValue(CE->getOperand(0), SF),
+ getOperandValue(CE->getOperand(1), SF),
+ getOperandValue(CE->getOperand(2), SF),
+ CE->getOperand(0)->getType());
+ default :
+ break;
+ }
+
+ // The cases below here require a GenericValue parameter for the result
+ // so we initialize one, compute it and then return it.
+ GenericValue Op0 = getOperandValue(CE->getOperand(0), SF);
+ GenericValue Op1 = getOperandValue(CE->getOperand(1), SF);
+ GenericValue Dest;
+ Type * Ty = CE->getOperand(0)->getType();
+ switch (CE->getOpcode()) {
+ case Instruction::Add: Dest.IntVal = Op0.IntVal + Op1.IntVal; break;
+ case Instruction::Sub: Dest.IntVal = Op0.IntVal - Op1.IntVal; break;
+ case Instruction::Mul: Dest.IntVal = Op0.IntVal * Op1.IntVal; break;
+ case Instruction::FAdd: executeFAddInst(Dest, Op0, Op1, Ty); break;
+ case Instruction::FSub: executeFSubInst(Dest, Op0, Op1, Ty); break;
+ case Instruction::FMul: executeFMulInst(Dest, Op0, Op1, Ty); break;
+ case Instruction::FDiv: executeFDivInst(Dest, Op0, Op1, Ty); break;
+ case Instruction::FRem: executeFRemInst(Dest, Op0, Op1, Ty); break;
+ case Instruction::SDiv: Dest.IntVal = Op0.IntVal.sdiv(Op1.IntVal); break;
+ case Instruction::UDiv: Dest.IntVal = Op0.IntVal.udiv(Op1.IntVal); break;
+ case Instruction::URem: Dest.IntVal = Op0.IntVal.urem(Op1.IntVal); break;
+ case Instruction::SRem: Dest.IntVal = Op0.IntVal.srem(Op1.IntVal); break;
+ case Instruction::And: Dest.IntVal = Op0.IntVal & Op1.IntVal; break;
+ case Instruction::Or: Dest.IntVal = Op0.IntVal | Op1.IntVal; break;
+ case Instruction::Xor: Dest.IntVal = Op0.IntVal ^ Op1.IntVal; break;
+ case Instruction::Shl:
+ Dest.IntVal = Op0.IntVal.shl(Op1.IntVal.getZExtValue());
+ break;
+ case Instruction::LShr:
+ Dest.IntVal = Op0.IntVal.lshr(Op1.IntVal.getZExtValue());
+ break;
+ case Instruction::AShr:
+ Dest.IntVal = Op0.IntVal.ashr(Op1.IntVal.getZExtValue());
+ break;
+ default:
+ dbgs() << "Unhandled ConstantExpr: " << *CE << "\n";
+ llvm_unreachable("Unhandled ConstantExpr");
+ }
+ return Dest;
+}
+
+GenericValue Interpreter::getOperandValue(Value *V, ExecutionContext &SF) {
+ if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) {
+ return getConstantExprValue(CE, SF);
+ } else if (Constant *CPV = dyn_cast<Constant>(V)) {
+ return getConstantValue(CPV);
+ } else if (GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
+ return PTOGV(getPointerToGlobal(GV));
+ } else {
+ return SF.Values[V];
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// Dispatch and Execution Code
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// callFunction - Execute the specified function...
+//
+void Interpreter::callFunction(Function *F, ArrayRef<GenericValue> ArgVals) {
+ assert((ECStack.empty() || !ECStack.back().Caller.getInstruction() ||
+ ECStack.back().Caller.arg_size() == ArgVals.size()) &&
+ "Incorrect number of arguments passed into function call!");
+ // Make a new stack frame... and fill it in.
+ ECStack.emplace_back();
+ ExecutionContext &StackFrame = ECStack.back();
+ StackFrame.CurFunction = F;
+
+ // Special handling for external functions.
+ if (F->isDeclaration()) {
+ GenericValue Result = callExternalFunction (F, ArgVals);
+ // Simulate a 'ret' instruction of the appropriate type.
+ popStackAndReturnValueToCaller (F->getReturnType (), Result);
+ return;
+ }
+
+ // Get pointers to first LLVM BB & Instruction in function.
+ StackFrame.CurBB = &F->front();
+ StackFrame.CurInst = StackFrame.CurBB->begin();
+
+ // Run through the function arguments and initialize their values...
+ assert((ArgVals.size() == F->arg_size() ||
+ (ArgVals.size() > F->arg_size() && F->getFunctionType()->isVarArg()))&&
+ "Invalid number of values passed to function invocation!");
+
+ // Handle non-varargs arguments...
+ unsigned i = 0;
+ for (Function::arg_iterator AI = F->arg_begin(), E = F->arg_end();
+ AI != E; ++AI, ++i)
+ SetValue(&*AI, ArgVals[i], StackFrame);
+
+ // Handle varargs arguments...
+ StackFrame.VarArgs.assign(ArgVals.begin()+i, ArgVals.end());
+}
+
+
+void Interpreter::run() {
+ while (!ECStack.empty()) {
+ // Interpret a single instruction & increment the "PC".
+ ExecutionContext &SF = ECStack.back(); // Current stack frame
+ Instruction &I = *SF.CurInst++; // Increment before execute
+
+ // Track the number of dynamic instructions executed.
+ ++NumDynamicInsts;
+
+ LLVM_DEBUG(dbgs() << "About to interpret: " << I << "\n");
+ visit(I); // Dispatch to one of the visit* methods...
+ }
+}
diff --git a/llvm/lib/ExecutionEngine/Interpreter/ExternalFunctions.cpp b/llvm/lib/ExecutionEngine/Interpreter/ExternalFunctions.cpp
new file mode 100644
index 0000000000000..71b7f893d712d
--- /dev/null
+++ b/llvm/lib/ExecutionEngine/Interpreter/ExternalFunctions.cpp
@@ -0,0 +1,509 @@
+//===-- ExternalFunctions.cpp - Implement External Functions --------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains both code to deal with invoking "external" functions, but
+// also contains code that implements "exported" external functions.
+//
+// There are currently two mechanisms for handling external functions in the
+// Interpreter. The first is to implement lle_* wrapper functions that are
+// specific to well-known library functions which manually translate the
+// arguments from GenericValues and make the call. If such a wrapper does
+// not exist, and libffi is available, then the Interpreter will attempt to
+// invoke the function using libffi, after finding its address.
+//
+//===----------------------------------------------------------------------===//
+
+#include "Interpreter.h"
+#include "llvm/ADT/APInt.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/Config/config.h" // Detect libffi
+#include "llvm/ExecutionEngine/GenericValue.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/Type.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/DynamicLibrary.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/ManagedStatic.h"
+#include "llvm/Support/Mutex.h"
+#include "llvm/Support/raw_ostream.h"
+#include <cassert>
+#include <cmath>
+#include <csignal>
+#include <cstdint>
+#include <cstdio>
+#include <cstring>
+#include <map>
+#include <mutex>
+#include <string>
+#include <utility>
+#include <vector>
+
+#ifdef HAVE_FFI_CALL
+#ifdef HAVE_FFI_H
+#include <ffi.h>
+#define USE_LIBFFI
+#elif HAVE_FFI_FFI_H
+#include <ffi/ffi.h>
+#define USE_LIBFFI
+#endif
+#endif
+
+using namespace llvm;
+
+static ManagedStatic<sys::Mutex> FunctionsLock;
+
+typedef GenericValue (*ExFunc)(FunctionType *, ArrayRef<GenericValue>);
+static ManagedStatic<std::map<const Function *, ExFunc> > ExportedFunctions;
+static ManagedStatic<std::map<std::string, ExFunc> > FuncNames;
+
+#ifdef USE_LIBFFI
+typedef void (*RawFunc)();
+static ManagedStatic<std::map<const Function *, RawFunc> > RawFunctions;
+#endif
+
+static Interpreter *TheInterpreter;
+
+static char getTypeID(Type *Ty) {
+ switch (Ty->getTypeID()) {
+ case Type::VoidTyID: return 'V';
+ case Type::IntegerTyID:
+ switch (cast<IntegerType>(Ty)->getBitWidth()) {
+ case 1: return 'o';
+ case 8: return 'B';
+ case 16: return 'S';
+ case 32: return 'I';
+ case 64: return 'L';
+ default: return 'N';
+ }
+ case Type::FloatTyID: return 'F';
+ case Type::DoubleTyID: return 'D';
+ case Type::PointerTyID: return 'P';
+ case Type::FunctionTyID:return 'M';
+ case Type::StructTyID: return 'T';
+ case Type::ArrayTyID: return 'A';
+ default: return 'U';
+ }
+}
+
+// Try to find address of external function given a Function object.
+// Please note, that interpreter doesn't know how to assemble a
+// real call in general case (this is JIT job), that's why it assumes,
+// that all external functions has the same (and pretty "general") signature.
+// The typical example of such functions are "lle_X_" ones.
+static ExFunc lookupFunction(const Function *F) {
+ // Function not found, look it up... start by figuring out what the
+ // composite function name should be.
+ std::string ExtName = "lle_";
+ FunctionType *FT = F->getFunctionType();
+ ExtName += getTypeID(FT->getReturnType());
+ for (Type *T : FT->params())
+ ExtName += getTypeID(T);
+ ExtName += ("_" + F->getName()).str();
+
+ sys::ScopedLock Writer(*FunctionsLock);
+ ExFunc FnPtr = (*FuncNames)[ExtName];
+ if (!FnPtr)
+ FnPtr = (*FuncNames)[("lle_X_" + F->getName()).str()];
+ if (!FnPtr) // Try calling a generic function... if it exists...
+ FnPtr = (ExFunc)(intptr_t)sys::DynamicLibrary::SearchForAddressOfSymbol(
+ ("lle_X_" + F->getName()).str());
+ if (FnPtr)
+ ExportedFunctions->insert(std::make_pair(F, FnPtr)); // Cache for later
+ return FnPtr;
+}
+
+#ifdef USE_LIBFFI
+static ffi_type *ffiTypeFor(Type *Ty) {
+ switch (Ty->getTypeID()) {
+ case Type::VoidTyID: return &ffi_type_void;
+ case Type::IntegerTyID:
+ switch (cast<IntegerType>(Ty)->getBitWidth()) {
+ case 8: return &ffi_type_sint8;
+ case 16: return &ffi_type_sint16;
+ case 32: return &ffi_type_sint32;
+ case 64: return &ffi_type_sint64;
+ }
+ case Type::FloatTyID: return &ffi_type_float;
+ case Type::DoubleTyID: return &ffi_type_double;
+ case Type::PointerTyID: return &ffi_type_pointer;
+ default: break;
+ }
+ // TODO: Support other types such as StructTyID, ArrayTyID, OpaqueTyID, etc.
+ report_fatal_error("Type could not be mapped for use with libffi.");
+ return NULL;
+}
+
+static void *ffiValueFor(Type *Ty, const GenericValue &AV,
+ void *ArgDataPtr) {
+ switch (Ty->getTypeID()) {
+ case Type::IntegerTyID:
+ switch (cast<IntegerType>(Ty)->getBitWidth()) {
+ case 8: {
+ int8_t *I8Ptr = (int8_t *) ArgDataPtr;
+ *I8Ptr = (int8_t) AV.IntVal.getZExtValue();
+ return ArgDataPtr;
+ }
+ case 16: {
+ int16_t *I16Ptr = (int16_t *) ArgDataPtr;
+ *I16Ptr = (int16_t) AV.IntVal.getZExtValue();
+ return ArgDataPtr;
+ }
+ case 32: {
+ int32_t *I32Ptr = (int32_t *) ArgDataPtr;
+ *I32Ptr = (int32_t) AV.IntVal.getZExtValue();
+ return ArgDataPtr;
+ }
+ case 64: {
+ int64_t *I64Ptr = (int64_t *) ArgDataPtr;
+ *I64Ptr = (int64_t) AV.IntVal.getZExtValue();
+ return ArgDataPtr;
+ }
+ }
+ case Type::FloatTyID: {
+ float *FloatPtr = (float *) ArgDataPtr;
+ *FloatPtr = AV.FloatVal;
+ return ArgDataPtr;
+ }
+ case Type::DoubleTyID: {
+ double *DoublePtr = (double *) ArgDataPtr;
+ *DoublePtr = AV.DoubleVal;
+ return ArgDataPtr;
+ }
+ case Type::PointerTyID: {
+ void **PtrPtr = (void **) ArgDataPtr;
+ *PtrPtr = GVTOP(AV);
+ return ArgDataPtr;
+ }
+ default: break;
+ }
+ // TODO: Support other types such as StructTyID, ArrayTyID, OpaqueTyID, etc.
+ report_fatal_error("Type value could not be mapped for use with libffi.");
+ return NULL;
+}
+
+static bool ffiInvoke(RawFunc Fn, Function *F, ArrayRef<GenericValue> ArgVals,
+ const DataLayout &TD, GenericValue &Result) {
+ ffi_cif cif;
+ FunctionType *FTy = F->getFunctionType();
+ const unsigned NumArgs = F->arg_size();
+
+ // TODO: We don't have type information about the remaining arguments, because
+ // this information is never passed into ExecutionEngine::runFunction().
+ if (ArgVals.size() > NumArgs && F->isVarArg()) {
+ report_fatal_error("Calling external var arg function '" + F->getName()
+ + "' is not supported by the Interpreter.");
+ }
+
+ unsigned ArgBytes = 0;
+
+ std::vector<ffi_type*> args(NumArgs);
+ for (Function::const_arg_iterator A = F->arg_begin(), E = F->arg_end();
+ A != E; ++A) {
+ const unsigned ArgNo = A->getArgNo();
+ Type *ArgTy = FTy->getParamType(ArgNo);
+ args[ArgNo] = ffiTypeFor(ArgTy);
+ ArgBytes += TD.getTypeStoreSize(ArgTy);
+ }
+
+ SmallVector<uint8_t, 128> ArgData;
+ ArgData.resize(ArgBytes);
+ uint8_t *ArgDataPtr = ArgData.data();
+ SmallVector<void*, 16> values(NumArgs);
+ for (Function::const_arg_iterator A = F->arg_begin(), E = F->arg_end();
+ A != E; ++A) {
+ const unsigned ArgNo = A->getArgNo();
+ Type *ArgTy = FTy->getParamType(ArgNo);
+ values[ArgNo] = ffiValueFor(ArgTy, ArgVals[ArgNo], ArgDataPtr);
+ ArgDataPtr += TD.getTypeStoreSize(ArgTy);
+ }
+
+ Type *RetTy = FTy->getReturnType();
+ ffi_type *rtype = ffiTypeFor(RetTy);
+
+ if (ffi_prep_cif(&cif, FFI_DEFAULT_ABI, NumArgs, rtype, args.data()) ==
+ FFI_OK) {
+ SmallVector<uint8_t, 128> ret;
+ if (RetTy->getTypeID() != Type::VoidTyID)
+ ret.resize(TD.getTypeStoreSize(RetTy));
+ ffi_call(&cif, Fn, ret.data(), values.data());
+ switch (RetTy->getTypeID()) {
+ case Type::IntegerTyID:
+ switch (cast<IntegerType>(RetTy)->getBitWidth()) {
+ case 8: Result.IntVal = APInt(8 , *(int8_t *) ret.data()); break;
+ case 16: Result.IntVal = APInt(16, *(int16_t*) ret.data()); break;
+ case 32: Result.IntVal = APInt(32, *(int32_t*) ret.data()); break;
+ case 64: Result.IntVal = APInt(64, *(int64_t*) ret.data()); break;
+ }
+ break;
+ case Type::FloatTyID: Result.FloatVal = *(float *) ret.data(); break;
+ case Type::DoubleTyID: Result.DoubleVal = *(double*) ret.data(); break;
+ case Type::PointerTyID: Result.PointerVal = *(void **) ret.data(); break;
+ default: break;
+ }
+ return true;
+ }
+
+ return false;
+}
+#endif // USE_LIBFFI
+
+GenericValue Interpreter::callExternalFunction(Function *F,
+ ArrayRef<GenericValue> ArgVals) {
+ TheInterpreter = this;
+
+ std::unique_lock<sys::Mutex> Guard(*FunctionsLock);
+
+ // Do a lookup to see if the function is in our cache... this should just be a
+ // deferred annotation!
+ std::map<const Function *, ExFunc>::iterator FI = ExportedFunctions->find(F);
+ if (ExFunc Fn = (FI == ExportedFunctions->end()) ? lookupFunction(F)
+ : FI->second) {
+ Guard.unlock();
+ return Fn(F->getFunctionType(), ArgVals);
+ }
+
+#ifdef USE_LIBFFI
+ std::map<const Function *, RawFunc>::iterator RF = RawFunctions->find(F);
+ RawFunc RawFn;
+ if (RF == RawFunctions->end()) {
+ RawFn = (RawFunc)(intptr_t)
+ sys::DynamicLibrary::SearchForAddressOfSymbol(F->getName());
+ if (!RawFn)
+ RawFn = (RawFunc)(intptr_t)getPointerToGlobalIfAvailable(F);
+ if (RawFn != 0)
+ RawFunctions->insert(std::make_pair(F, RawFn)); // Cache for later
+ } else {
+ RawFn = RF->second;
+ }
+
+ Guard.unlock();
+
+ GenericValue Result;
+ if (RawFn != 0 && ffiInvoke(RawFn, F, ArgVals, getDataLayout(), Result))
+ return Result;
+#endif // USE_LIBFFI
+
+ if (F->getName() == "__main")
+ errs() << "Tried to execute an unknown external function: "
+ << *F->getType() << " __main\n";
+ else
+ report_fatal_error("Tried to execute an unknown external function: " +
+ F->getName());
+#ifndef USE_LIBFFI
+ errs() << "Recompiling LLVM with --enable-libffi might help.\n";
+#endif
+ return GenericValue();
+}
+
+//===----------------------------------------------------------------------===//
+// Functions "exported" to the running application...
+//
+
+// void atexit(Function*)
+static GenericValue lle_X_atexit(FunctionType *FT,
+ ArrayRef<GenericValue> Args) {
+ assert(Args.size() == 1);
+ TheInterpreter->addAtExitHandler((Function*)GVTOP(Args[0]));
+ GenericValue GV;
+ GV.IntVal = 0;
+ return GV;
+}
+
+// void exit(int)
+static GenericValue lle_X_exit(FunctionType *FT, ArrayRef<GenericValue> Args) {
+ TheInterpreter->exitCalled(Args[0]);
+ return GenericValue();
+}
+
+// void abort(void)
+static GenericValue lle_X_abort(FunctionType *FT, ArrayRef<GenericValue> Args) {
+ //FIXME: should we report or raise here?
+ //report_fatal_error("Interpreted program raised SIGABRT");
+ raise (SIGABRT);
+ return GenericValue();
+}
+
+// int sprintf(char *, const char *, ...) - a very rough implementation to make
+// output useful.
+static GenericValue lle_X_sprintf(FunctionType *FT,
+ ArrayRef<GenericValue> Args) {
+ char *OutputBuffer = (char *)GVTOP(Args[0]);
+ const char *FmtStr = (const char *)GVTOP(Args[1]);
+ unsigned ArgNo = 2;
+
+ // printf should return # chars printed. This is completely incorrect, but
+ // close enough for now.
+ GenericValue GV;
+ GV.IntVal = APInt(32, strlen(FmtStr));
+ while (true) {
+ switch (*FmtStr) {
+ case 0: return GV; // Null terminator...
+ default: // Normal nonspecial character
+ sprintf(OutputBuffer++, "%c", *FmtStr++);
+ break;
+ case '\\': { // Handle escape codes
+ sprintf(OutputBuffer, "%c%c", *FmtStr, *(FmtStr+1));
+ FmtStr += 2; OutputBuffer += 2;
+ break;
+ }
+ case '%': { // Handle format specifiers
+ char FmtBuf[100] = "", Buffer[1000] = "";
+ char *FB = FmtBuf;
+ *FB++ = *FmtStr++;
+ char Last = *FB++ = *FmtStr++;
+ unsigned HowLong = 0;
+ while (Last != 'c' && Last != 'd' && Last != 'i' && Last != 'u' &&
+ Last != 'o' && Last != 'x' && Last != 'X' && Last != 'e' &&
+ Last != 'E' && Last != 'g' && Last != 'G' && Last != 'f' &&
+ Last != 'p' && Last != 's' && Last != '%') {
+ if (Last == 'l' || Last == 'L') HowLong++; // Keep track of l's
+ Last = *FB++ = *FmtStr++;
+ }
+ *FB = 0;
+
+ switch (Last) {
+ case '%':
+ memcpy(Buffer, "%", 2); break;
+ case 'c':
+ sprintf(Buffer, FmtBuf, uint32_t(Args[ArgNo++].IntVal.getZExtValue()));
+ break;
+ case 'd': case 'i':
+ case 'u': case 'o':
+ case 'x': case 'X':
+ if (HowLong >= 1) {
+ if (HowLong == 1 &&
+ TheInterpreter->getDataLayout().getPointerSizeInBits() == 64 &&
+ sizeof(long) < sizeof(int64_t)) {
+ // Make sure we use %lld with a 64 bit argument because we might be
+ // compiling LLI on a 32 bit compiler.
+ unsigned Size = strlen(FmtBuf);
+ FmtBuf[Size] = FmtBuf[Size-1];
+ FmtBuf[Size+1] = 0;
+ FmtBuf[Size-1] = 'l';
+ }
+ sprintf(Buffer, FmtBuf, Args[ArgNo++].IntVal.getZExtValue());
+ } else
+ sprintf(Buffer, FmtBuf,uint32_t(Args[ArgNo++].IntVal.getZExtValue()));
+ break;
+ case 'e': case 'E': case 'g': case 'G': case 'f':
+ sprintf(Buffer, FmtBuf, Args[ArgNo++].DoubleVal); break;
+ case 'p':
+ sprintf(Buffer, FmtBuf, (void*)GVTOP(Args[ArgNo++])); break;
+ case 's':
+ sprintf(Buffer, FmtBuf, (char*)GVTOP(Args[ArgNo++])); break;
+ default:
+ errs() << "<unknown printf code '" << *FmtStr << "'!>";
+ ArgNo++; break;
+ }
+ size_t Len = strlen(Buffer);
+ memcpy(OutputBuffer, Buffer, Len + 1);
+ OutputBuffer += Len;
+ }
+ break;
+ }
+ }
+ return GV;
+}
+
+// int printf(const char *, ...) - a very rough implementation to make output
+// useful.
+static GenericValue lle_X_printf(FunctionType *FT,
+ ArrayRef<GenericValue> Args) {
+ char Buffer[10000];
+ std::vector<GenericValue> NewArgs;
+ NewArgs.push_back(PTOGV((void*)&Buffer[0]));
+ NewArgs.insert(NewArgs.end(), Args.begin(), Args.end());
+ GenericValue GV = lle_X_sprintf(FT, NewArgs);
+ outs() << Buffer;
+ return GV;
+}
+
+// int sscanf(const char *format, ...);
+static GenericValue lle_X_sscanf(FunctionType *FT,
+ ArrayRef<GenericValue> args) {
+ assert(args.size() < 10 && "Only handle up to 10 args to sscanf right now!");
+
+ char *Args[10];
+ for (unsigned i = 0; i < args.size(); ++i)
+ Args[i] = (char*)GVTOP(args[i]);
+
+ GenericValue GV;
+ GV.IntVal = APInt(32, sscanf(Args[0], Args[1], Args[2], Args[3], Args[4],
+ Args[5], Args[6], Args[7], Args[8], Args[9]));
+ return GV;
+}
+
+// int scanf(const char *format, ...);
+static GenericValue lle_X_scanf(FunctionType *FT, ArrayRef<GenericValue> args) {
+ assert(args.size() < 10 && "Only handle up to 10 args to scanf right now!");
+
+ char *Args[10];
+ for (unsigned i = 0; i < args.size(); ++i)
+ Args[i] = (char*)GVTOP(args[i]);
+
+ GenericValue GV;
+ GV.IntVal = APInt(32, scanf( Args[0], Args[1], Args[2], Args[3], Args[4],
+ Args[5], Args[6], Args[7], Args[8], Args[9]));
+ return GV;
+}
+
+// int fprintf(FILE *, const char *, ...) - a very rough implementation to make
+// output useful.
+static GenericValue lle_X_fprintf(FunctionType *FT,
+ ArrayRef<GenericValue> Args) {
+ assert(Args.size() >= 2);
+ char Buffer[10000];
+ std::vector<GenericValue> NewArgs;
+ NewArgs.push_back(PTOGV(Buffer));
+ NewArgs.insert(NewArgs.end(), Args.begin()+1, Args.end());
+ GenericValue GV = lle_X_sprintf(FT, NewArgs);
+
+ fputs(Buffer, (FILE *) GVTOP(Args[0]));
+ return GV;
+}
+
+static GenericValue lle_X_memset(FunctionType *FT,
+ ArrayRef<GenericValue> Args) {
+ int val = (int)Args[1].IntVal.getSExtValue();
+ size_t len = (size_t)Args[2].IntVal.getZExtValue();
+ memset((void *)GVTOP(Args[0]), val, len);
+ // llvm.memset.* returns void, lle_X_* returns GenericValue,
+ // so here we return GenericValue with IntVal set to zero
+ GenericValue GV;
+ GV.IntVal = 0;
+ return GV;
+}
+
+static GenericValue lle_X_memcpy(FunctionType *FT,
+ ArrayRef<GenericValue> Args) {
+ memcpy(GVTOP(Args[0]), GVTOP(Args[1]),
+ (size_t)(Args[2].IntVal.getLimitedValue()));
+
+ // llvm.memcpy* returns void, lle_X_* returns GenericValue,
+ // so here we return GenericValue with IntVal set to zero
+ GenericValue GV;
+ GV.IntVal = 0;
+ return GV;
+}
+
+void Interpreter::initializeExternalFunctions() {
+ sys::ScopedLock Writer(*FunctionsLock);
+ (*FuncNames)["lle_X_atexit"] = lle_X_atexit;
+ (*FuncNames)["lle_X_exit"] = lle_X_exit;
+ (*FuncNames)["lle_X_abort"] = lle_X_abort;
+
+ (*FuncNames)["lle_X_printf"] = lle_X_printf;
+ (*FuncNames)["lle_X_sprintf"] = lle_X_sprintf;
+ (*FuncNames)["lle_X_sscanf"] = lle_X_sscanf;
+ (*FuncNames)["lle_X_scanf"] = lle_X_scanf;
+ (*FuncNames)["lle_X_fprintf"] = lle_X_fprintf;
+ (*FuncNames)["lle_X_memset"] = lle_X_memset;
+ (*FuncNames)["lle_X_memcpy"] = lle_X_memcpy;
+}
diff --git a/llvm/lib/ExecutionEngine/Interpreter/Interpreter.cpp b/llvm/lib/ExecutionEngine/Interpreter/Interpreter.cpp
new file mode 100644
index 0000000000000..5727f7adb49c3
--- /dev/null
+++ b/llvm/lib/ExecutionEngine/Interpreter/Interpreter.cpp
@@ -0,0 +1,102 @@
+//===- Interpreter.cpp - Top-Level LLVM Interpreter Implementation --------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the top-level functionality for the LLVM interpreter.
+// This interpreter is designed to be a very simple, portable, inefficient
+// interpreter.
+//
+//===----------------------------------------------------------------------===//
+
+#include "Interpreter.h"
+#include "llvm/CodeGen/IntrinsicLowering.h"
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/Module.h"
+#include <cstring>
+using namespace llvm;
+
+namespace {
+
+static struct RegisterInterp {
+ RegisterInterp() { Interpreter::Register(); }
+} InterpRegistrator;
+
+}
+
+extern "C" void LLVMLinkInInterpreter() { }
+
+/// Create a new interpreter object.
+///
+ExecutionEngine *Interpreter::create(std::unique_ptr<Module> M,
+ std::string *ErrStr) {
+ // Tell this Module to materialize everything and release the GVMaterializer.
+ if (Error Err = M->materializeAll()) {
+ std::string Msg;
+ handleAllErrors(std::move(Err), [&](ErrorInfoBase &EIB) {
+ Msg = EIB.message();
+ });
+ if (ErrStr)
+ *ErrStr = Msg;
+ // We got an error, just return 0
+ return nullptr;
+ }
+
+ return new Interpreter(std::move(M));
+}
+
+//===----------------------------------------------------------------------===//
+// Interpreter ctor - Initialize stuff
+//
+Interpreter::Interpreter(std::unique_ptr<Module> M)
+ : ExecutionEngine(std::move(M)) {
+
+ memset(&ExitValue.Untyped, 0, sizeof(ExitValue.Untyped));
+ // Initialize the "backend"
+ initializeExecutionEngine();
+ initializeExternalFunctions();
+ emitGlobals();
+
+ IL = new IntrinsicLowering(getDataLayout());
+}
+
+Interpreter::~Interpreter() {
+ delete IL;
+}
+
+void Interpreter::runAtExitHandlers () {
+ while (!AtExitHandlers.empty()) {
+ callFunction(AtExitHandlers.back(), None);
+ AtExitHandlers.pop_back();
+ run();
+ }
+}
+
+/// run - Start execution with the specified function and arguments.
+///
+GenericValue Interpreter::runFunction(Function *F,
+ ArrayRef<GenericValue> ArgValues) {
+ assert (F && "Function *F was null at entry to run()");
+
+ // Try extra hard not to pass extra args to a function that isn't
+ // expecting them. C programmers frequently bend the rules and
+ // declare main() with fewer parameters than it actually gets
+ // passed, and the interpreter barfs if you pass a function more
+ // parameters than it is declared to take. This does not attempt to
+ // take into account gratuitous differences in declared types,
+ // though.
+ const size_t ArgCount = F->getFunctionType()->getNumParams();
+ ArrayRef<GenericValue> ActualArgs =
+ ArgValues.slice(0, std::min(ArgValues.size(), ArgCount));
+
+ // Set up the function call.
+ callFunction(F, ActualArgs);
+
+ // Start executing the function.
+ run();
+
+ return ExitValue;
+}
diff --git a/llvm/lib/ExecutionEngine/Interpreter/Interpreter.h b/llvm/lib/ExecutionEngine/Interpreter/Interpreter.h
new file mode 100644
index 0000000000000..e72d778317d6b
--- /dev/null
+++ b/llvm/lib/ExecutionEngine/Interpreter/Interpreter.h
@@ -0,0 +1,235 @@
+//===-- Interpreter.h ------------------------------------------*- C++ -*--===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This header file defines the interpreter structure
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_EXECUTIONENGINE_INTERPRETER_INTERPRETER_H
+#define LLVM_LIB_EXECUTIONENGINE_INTERPRETER_INTERPRETER_H
+
+#include "llvm/ExecutionEngine/ExecutionEngine.h"
+#include "llvm/ExecutionEngine/GenericValue.h"
+#include "llvm/IR/CallSite.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/InstVisitor.h"
+#include "llvm/Support/DataTypes.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/raw_ostream.h"
+namespace llvm {
+
+class IntrinsicLowering;
+template<typename T> class generic_gep_type_iterator;
+class ConstantExpr;
+typedef generic_gep_type_iterator<User::const_op_iterator> gep_type_iterator;
+
+
+// AllocaHolder - Object to track all of the blocks of memory allocated by
+// alloca. When the function returns, this object is popped off the execution
+// stack, which causes the dtor to be run, which frees all the alloca'd memory.
+//
+class AllocaHolder {
+ std::vector<void *> Allocations;
+
+public:
+ AllocaHolder() {}
+
+ // Make this type move-only.
+ AllocaHolder(AllocaHolder &&) = default;
+ AllocaHolder &operator=(AllocaHolder &&RHS) = default;
+
+ ~AllocaHolder() {
+ for (void *Allocation : Allocations)
+ free(Allocation);
+ }
+
+ void add(void *Mem) { Allocations.push_back(Mem); }
+};
+
+typedef std::vector<GenericValue> ValuePlaneTy;
+
+// ExecutionContext struct - This struct represents one stack frame currently
+// executing.
+//
+struct ExecutionContext {
+ Function *CurFunction;// The currently executing function
+ BasicBlock *CurBB; // The currently executing BB
+ BasicBlock::iterator CurInst; // The next instruction to execute
+ CallSite Caller; // Holds the call that called subframes.
+ // NULL if main func or debugger invoked fn
+ std::map<Value *, GenericValue> Values; // LLVM values used in this invocation
+ std::vector<GenericValue> VarArgs; // Values passed through an ellipsis
+ AllocaHolder Allocas; // Track memory allocated by alloca
+
+ ExecutionContext() : CurFunction(nullptr), CurBB(nullptr), CurInst(nullptr) {}
+};
+
+// Interpreter - This class represents the entirety of the interpreter.
+//
+class Interpreter : public ExecutionEngine, public InstVisitor<Interpreter> {
+ GenericValue ExitValue; // The return value of the called function
+ IntrinsicLowering *IL;
+
+ // The runtime stack of executing code. The top of the stack is the current
+ // function record.
+ std::vector<ExecutionContext> ECStack;
+
+ // AtExitHandlers - List of functions to call when the program exits,
+ // registered with the atexit() library function.
+ std::vector<Function*> AtExitHandlers;
+
+public:
+ explicit Interpreter(std::unique_ptr<Module> M);
+ ~Interpreter() override;
+
+ /// runAtExitHandlers - Run any functions registered by the program's calls to
+ /// atexit(3), which we intercept and store in AtExitHandlers.
+ ///
+ void runAtExitHandlers();
+
+ static void Register() {
+ InterpCtor = create;
+ }
+
+ /// Create an interpreter ExecutionEngine.
+ ///
+ static ExecutionEngine *create(std::unique_ptr<Module> M,
+ std::string *ErrorStr = nullptr);
+
+ /// run - Start execution with the specified function and arguments.
+ ///
+ GenericValue runFunction(Function *F,
+ ArrayRef<GenericValue> ArgValues) override;
+
+ void *getPointerToNamedFunction(StringRef Name,
+ bool AbortOnFailure = true) override {
+ // FIXME: not implemented.
+ return nullptr;
+ }
+
+ // Methods used to execute code:
+ // Place a call on the stack
+ void callFunction(Function *F, ArrayRef<GenericValue> ArgVals);
+ void run(); // Execute instructions until nothing left to do
+
+ // Opcode Implementations
+ void visitReturnInst(ReturnInst &I);
+ void visitBranchInst(BranchInst &I);
+ void visitSwitchInst(SwitchInst &I);
+ void visitIndirectBrInst(IndirectBrInst &I);
+
+ void visitUnaryOperator(UnaryOperator &I);
+ void visitBinaryOperator(BinaryOperator &I);
+ void visitICmpInst(ICmpInst &I);
+ void visitFCmpInst(FCmpInst &I);
+ void visitAllocaInst(AllocaInst &I);
+ void visitLoadInst(LoadInst &I);
+ void visitStoreInst(StoreInst &I);
+ void visitGetElementPtrInst(GetElementPtrInst &I);
+ void visitPHINode(PHINode &PN) {
+ llvm_unreachable("PHI nodes already handled!");
+ }
+ void visitTruncInst(TruncInst &I);
+ void visitZExtInst(ZExtInst &I);
+ void visitSExtInst(SExtInst &I);
+ void visitFPTruncInst(FPTruncInst &I);
+ void visitFPExtInst(FPExtInst &I);
+ void visitUIToFPInst(UIToFPInst &I);
+ void visitSIToFPInst(SIToFPInst &I);
+ void visitFPToUIInst(FPToUIInst &I);
+ void visitFPToSIInst(FPToSIInst &I);
+ void visitPtrToIntInst(PtrToIntInst &I);
+ void visitIntToPtrInst(IntToPtrInst &I);
+ void visitBitCastInst(BitCastInst &I);
+ void visitSelectInst(SelectInst &I);
+
+
+ void visitCallSite(CallSite CS);
+ void visitCallInst(CallInst &I) { visitCallSite (CallSite (&I)); }
+ void visitInvokeInst(InvokeInst &I) { visitCallSite (CallSite (&I)); }
+ void visitUnreachableInst(UnreachableInst &I);
+
+ void visitShl(BinaryOperator &I);
+ void visitLShr(BinaryOperator &I);
+ void visitAShr(BinaryOperator &I);
+
+ void visitVAArgInst(VAArgInst &I);
+ void visitExtractElementInst(ExtractElementInst &I);
+ void visitInsertElementInst(InsertElementInst &I);
+ void visitShuffleVectorInst(ShuffleVectorInst &I);
+
+ void visitExtractValueInst(ExtractValueInst &I);
+ void visitInsertValueInst(InsertValueInst &I);
+
+ void visitInstruction(Instruction &I) {
+ errs() << I << "\n";
+ llvm_unreachable("Instruction not interpretable yet!");
+ }
+
+ GenericValue callExternalFunction(Function *F,
+ ArrayRef<GenericValue> ArgVals);
+ void exitCalled(GenericValue GV);
+
+ void addAtExitHandler(Function *F) {
+ AtExitHandlers.push_back(F);
+ }
+
+ GenericValue *getFirstVarArg () {
+ return &(ECStack.back ().VarArgs[0]);
+ }
+
+private: // Helper functions
+ GenericValue executeGEPOperation(Value *Ptr, gep_type_iterator I,
+ gep_type_iterator E, ExecutionContext &SF);
+
+ // SwitchToNewBasicBlock - Start execution in a new basic block and run any
+ // PHI nodes in the top of the block. This is used for intraprocedural
+ // control flow.
+ //
+ void SwitchToNewBasicBlock(BasicBlock *Dest, ExecutionContext &SF);
+
+ void *getPointerToFunction(Function *F) override { return (void*)F; }
+
+ void initializeExecutionEngine() { }
+ void initializeExternalFunctions();
+ GenericValue getConstantExprValue(ConstantExpr *CE, ExecutionContext &SF);
+ GenericValue getOperandValue(Value *V, ExecutionContext &SF);
+ GenericValue executeTruncInst(Value *SrcVal, Type *DstTy,
+ ExecutionContext &SF);
+ GenericValue executeSExtInst(Value *SrcVal, Type *DstTy,
+ ExecutionContext &SF);
+ GenericValue executeZExtInst(Value *SrcVal, Type *DstTy,
+ ExecutionContext &SF);
+ GenericValue executeFPTruncInst(Value *SrcVal, Type *DstTy,
+ ExecutionContext &SF);
+ GenericValue executeFPExtInst(Value *SrcVal, Type *DstTy,
+ ExecutionContext &SF);
+ GenericValue executeFPToUIInst(Value *SrcVal, Type *DstTy,
+ ExecutionContext &SF);
+ GenericValue executeFPToSIInst(Value *SrcVal, Type *DstTy,
+ ExecutionContext &SF);
+ GenericValue executeUIToFPInst(Value *SrcVal, Type *DstTy,
+ ExecutionContext &SF);
+ GenericValue executeSIToFPInst(Value *SrcVal, Type *DstTy,
+ ExecutionContext &SF);
+ GenericValue executePtrToIntInst(Value *SrcVal, Type *DstTy,
+ ExecutionContext &SF);
+ GenericValue executeIntToPtrInst(Value *SrcVal, Type *DstTy,
+ ExecutionContext &SF);
+ GenericValue executeBitCastInst(Value *SrcVal, Type *DstTy,
+ ExecutionContext &SF);
+ GenericValue executeCastOperation(Instruction::CastOps opcode, Value *SrcVal,
+ Type *Ty, ExecutionContext &SF);
+ void popStackAndReturnValueToCaller(Type *RetTy, GenericValue Result);
+
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/llvm/lib/ExecutionEngine/JITLink/BasicGOTAndStubsBuilder.h b/llvm/lib/ExecutionEngine/JITLink/BasicGOTAndStubsBuilder.h
new file mode 100644
index 0000000000000..b47a798c7603b
--- /dev/null
+++ b/llvm/lib/ExecutionEngine/JITLink/BasicGOTAndStubsBuilder.h
@@ -0,0 +1,81 @@
+//===--- BasicGOTAndStubsBuilder.h - Generic GOT/Stub creation --*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// A base for simple GOT and stub creation.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_EXECUTIONENGINE_JITLINK_BASICGOTANDSTUBSBUILDER_H
+#define LLVM_LIB_EXECUTIONENGINE_JITLINK_BASICGOTANDSTUBSBUILDER_H
+
+#include "llvm/ExecutionEngine/JITLink/JITLink.h"
+
+namespace llvm {
+namespace jitlink {
+
+template <typename BuilderImpl> class BasicGOTAndStubsBuilder {
+public:
+ BasicGOTAndStubsBuilder(LinkGraph &G) : G(G) {}
+
+ void run() {
+ // We're going to be adding new blocks, but we don't want to iterate over
+ // the newly added ones, so just copy the existing blocks out.
+ std::vector<Block *> Blocks(G.blocks().begin(), G.blocks().end());
+
+ for (auto *B : Blocks)
+ for (auto &E : B->edges())
+ if (impl().isGOTEdge(E))
+ impl().fixGOTEdge(E, getGOTEntrySymbol(E.getTarget()));
+ else if (impl().isExternalBranchEdge(E))
+ impl().fixExternalBranchEdge(E, getStubSymbol(E.getTarget()));
+ }
+
+protected:
+ Symbol &getGOTEntrySymbol(Symbol &Target) {
+ assert(Target.hasName() && "GOT edge cannot point to anonymous target");
+
+ auto GOTEntryI = GOTEntries.find(Target.getName());
+
+ // Build the entry if it doesn't exist.
+ if (GOTEntryI == GOTEntries.end()) {
+ auto &GOTEntry = impl().createGOTEntry(Target);
+ GOTEntryI =
+ GOTEntries.insert(std::make_pair(Target.getName(), &GOTEntry)).first;
+ }
+
+ assert(GOTEntryI != GOTEntries.end() && "Could not get GOT entry symbol");
+ return *GOTEntryI->second;
+ }
+
+ Symbol &getStubSymbol(Symbol &Target) {
+ assert(Target.hasName() &&
+ "External branch edge can not point to an anonymous target");
+ auto StubI = Stubs.find(Target.getName());
+
+ if (StubI == Stubs.end()) {
+ auto &StubSymbol = impl().createStub(Target);
+ StubI = Stubs.insert(std::make_pair(Target.getName(), &StubSymbol)).first;
+ }
+
+ assert(StubI != Stubs.end() && "Count not get stub symbol");
+ return *StubI->second;
+ }
+
+ LinkGraph &G;
+
+private:
+ BuilderImpl &impl() { return static_cast<BuilderImpl &>(*this); }
+
+ DenseMap<StringRef, Symbol *> GOTEntries;
+ DenseMap<StringRef, Symbol *> Stubs;
+};
+
+} // end namespace jitlink
+} // end namespace llvm
+
+#endif // LLVM_LIB_EXECUTIONENGINE_JITLINK_BASICGOTANDSTUBSBUILDER_H
diff --git a/llvm/lib/ExecutionEngine/JITLink/EHFrameSupport.cpp b/llvm/lib/ExecutionEngine/JITLink/EHFrameSupport.cpp
new file mode 100644
index 0000000000000..f80b0e7f89095
--- /dev/null
+++ b/llvm/lib/ExecutionEngine/JITLink/EHFrameSupport.cpp
@@ -0,0 +1,538 @@
+//===-------- JITLink_EHFrameSupport.cpp - JITLink eh-frame utils ---------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "EHFrameSupportImpl.h"
+
+#include "llvm/BinaryFormat/Dwarf.h"
+#include "llvm/Support/DynamicLibrary.h"
+
+#define DEBUG_TYPE "jitlink"
+
+namespace llvm {
+namespace jitlink {
+
+EHFrameBinaryParser::EHFrameBinaryParser(JITTargetAddress EHFrameAddress,
+ StringRef EHFrameContent,
+ unsigned PointerSize,
+ support::endianness Endianness)
+ : EHFrameAddress(EHFrameAddress), EHFrameContent(EHFrameContent),
+ PointerSize(PointerSize), EHFrameReader(EHFrameContent, Endianness) {}
+
+Error EHFrameBinaryParser::addToGraph() {
+ while (!EHFrameReader.empty()) {
+ size_t RecordOffset = EHFrameReader.getOffset();
+
+ LLVM_DEBUG({
+ dbgs() << "Processing eh-frame record at "
+ << format("0x%016" PRIx64, EHFrameAddress + RecordOffset)
+ << " (offset " << RecordOffset << ")\n";
+ });
+
+ size_t RecordLength = 0;
+ uint32_t RecordLengthField;
+ if (auto Err = EHFrameReader.readInteger(RecordLengthField))
+ return Err;
+
+ // Process CIE/FDE length/extended-length fields to build the blocks.
+ //
+ // The value of these fields describe the length of the *rest* of the CIE
+ // (not including data up to the end of the field itself) so we have to
+ // bump RecordLength to include the data up to the end of the field: 4 bytes
+ // for Length, or 12 bytes (4 bytes + 8 bytes) for ExtendedLength.
+ if (RecordLengthField == 0) // Length 0 means end of __eh_frame section.
+ break;
+
+ // If the regular length field's value is 0xffffffff, use extended length.
+ if (RecordLengthField == 0xffffffff) {
+ uint64_t ExtendedLengthField;
+ if (auto Err = EHFrameReader.readInteger(ExtendedLengthField))
+ return Err;
+ if (ExtendedLengthField > EHFrameReader.bytesRemaining())
+ return make_error<JITLinkError>("CIE record extends past the end of "
+ "the __eh_frame section");
+ if (ExtendedLengthField + 12 > std::numeric_limits<size_t>::max())
+ return make_error<JITLinkError>("CIE record too large to process");
+ RecordLength = ExtendedLengthField + 12;
+ } else {
+ if (RecordLengthField > EHFrameReader.bytesRemaining())
+ return make_error<JITLinkError>("CIE record extends past the end of "
+ "the __eh_frame section");
+ RecordLength = RecordLengthField + 4;
+ }
+
+ LLVM_DEBUG(dbgs() << " length: " << RecordLength << "\n");
+
+ // Read the CIE Pointer.
+ size_t CIEPointerAddress = EHFrameAddress + EHFrameReader.getOffset();
+ uint32_t CIEPointer;
+ if (auto Err = EHFrameReader.readInteger(CIEPointer))
+ return Err;
+
+ // Based on the CIE pointer value, parse this as a CIE or FDE record.
+ if (CIEPointer == 0) {
+ if (auto Err = processCIE(RecordOffset, RecordLength))
+ return Err;
+ } else {
+ if (auto Err = processFDE(RecordOffset, RecordLength, CIEPointerAddress,
+ CIEPointer))
+ return Err;
+ }
+
+ EHFrameReader.setOffset(RecordOffset + RecordLength);
+ }
+
+ return Error::success();
+}
+
+void EHFrameBinaryParser::anchor() {}
+
+Expected<EHFrameBinaryParser::AugmentationInfo>
+EHFrameBinaryParser::parseAugmentationString() {
+ AugmentationInfo AugInfo;
+ uint8_t NextChar;
+ uint8_t *NextField = &AugInfo.Fields[0];
+
+ if (auto Err = EHFrameReader.readInteger(NextChar))
+ return std::move(Err);
+
+ while (NextChar != 0) {
+ switch (NextChar) {
+ case 'z':
+ AugInfo.AugmentationDataPresent = true;
+ break;
+ case 'e':
+ if (auto Err = EHFrameReader.readInteger(NextChar))
+ return std::move(Err);
+ if (NextChar != 'h')
+ return make_error<JITLinkError>("Unrecognized substring e" +
+ Twine(NextChar) +
+ " in augmentation string");
+ AugInfo.EHDataFieldPresent = true;
+ break;
+ case 'L':
+ case 'P':
+ case 'R':
+ *NextField++ = NextChar;
+ break;
+ default:
+ return make_error<JITLinkError>("Unrecognized character " +
+ Twine(NextChar) +
+ " in augmentation string");
+ }
+
+ if (auto Err = EHFrameReader.readInteger(NextChar))
+ return std::move(Err);
+ }
+
+ return std::move(AugInfo);
+}
+
+Expected<JITTargetAddress> EHFrameBinaryParser::readAbsolutePointer() {
+ static_assert(sizeof(JITTargetAddress) == sizeof(uint64_t),
+ "Result must be able to hold a uint64_t");
+ JITTargetAddress Addr;
+ if (PointerSize == 8) {
+ if (auto Err = EHFrameReader.readInteger(Addr))
+ return std::move(Err);
+ } else if (PointerSize == 4) {
+ uint32_t Addr32;
+ if (auto Err = EHFrameReader.readInteger(Addr32))
+ return std::move(Err);
+ Addr = Addr32;
+ } else
+ llvm_unreachable("Pointer size is not 32-bit or 64-bit");
+ return Addr;
+}
+
+Error EHFrameBinaryParser::processCIE(size_t RecordOffset,
+ size_t RecordLength) {
+ // Use the dwarf namespace for convenient access to pointer encoding
+ // constants.
+ using namespace dwarf;
+
+ LLVM_DEBUG(dbgs() << " Record is CIE\n");
+
+ auto &CIESymbol =
+ createCIERecord(EHFrameAddress + RecordOffset,
+ EHFrameContent.substr(RecordOffset, RecordLength));
+
+ CIEInformation CIEInfo(CIESymbol);
+
+ uint8_t Version = 0;
+ if (auto Err = EHFrameReader.readInteger(Version))
+ return Err;
+
+ if (Version != 0x01)
+ return make_error<JITLinkError>("Bad CIE version " + Twine(Version) +
+ " (should be 0x01) in eh-frame");
+
+ auto AugInfo = parseAugmentationString();
+ if (!AugInfo)
+ return AugInfo.takeError();
+
+ // Skip the EH Data field if present.
+ if (AugInfo->EHDataFieldPresent)
+ if (auto Err = EHFrameReader.skip(PointerSize))
+ return Err;
+
+ // Read and sanity check the code alignment factor.
+ {
+ uint64_t CodeAlignmentFactor = 0;
+ if (auto Err = EHFrameReader.readULEB128(CodeAlignmentFactor))
+ return Err;
+ if (CodeAlignmentFactor != 1)
+ return make_error<JITLinkError>("Unsupported CIE code alignment factor " +
+ Twine(CodeAlignmentFactor) +
+ " (expected 1)");
+ }
+
+ // Read and sanity check the data alignment factor.
+ {
+ int64_t DataAlignmentFactor = 0;
+ if (auto Err = EHFrameReader.readSLEB128(DataAlignmentFactor))
+ return Err;
+ if (DataAlignmentFactor != -8)
+ return make_error<JITLinkError>("Unsupported CIE data alignment factor " +
+ Twine(DataAlignmentFactor) +
+ " (expected -8)");
+ }
+
+ // Skip the return address register field.
+ if (auto Err = EHFrameReader.skip(1))
+ return Err;
+
+ uint64_t AugmentationDataLength = 0;
+ if (auto Err = EHFrameReader.readULEB128(AugmentationDataLength))
+ return Err;
+
+ uint32_t AugmentationDataStartOffset = EHFrameReader.getOffset();
+
+ uint8_t *NextField = &AugInfo->Fields[0];
+ while (uint8_t Field = *NextField++) {
+ switch (Field) {
+ case 'L': {
+ CIEInfo.FDEsHaveLSDAField = true;
+ uint8_t LSDAPointerEncoding;
+ if (auto Err = EHFrameReader.readInteger(LSDAPointerEncoding))
+ return Err;
+ if (LSDAPointerEncoding != (DW_EH_PE_pcrel | DW_EH_PE_absptr))
+ return make_error<JITLinkError>(
+ "Unsupported LSDA pointer encoding " +
+ formatv("{0:x2}", LSDAPointerEncoding) + " in CIE at " +
+ formatv("{0:x16}", CIESymbol.getAddress()));
+ break;
+ }
+ case 'P': {
+ uint8_t PersonalityPointerEncoding = 0;
+ if (auto Err = EHFrameReader.readInteger(PersonalityPointerEncoding))
+ return Err;
+ if (PersonalityPointerEncoding !=
+ (DW_EH_PE_indirect | DW_EH_PE_pcrel | DW_EH_PE_sdata4))
+ return make_error<JITLinkError>(
+ "Unspported personality pointer "
+ "encoding " +
+ formatv("{0:x2}", PersonalityPointerEncoding) + " in CIE at " +
+ formatv("{0:x16}", CIESymbol.getAddress()));
+ uint32_t PersonalityPointerAddress;
+ if (auto Err = EHFrameReader.readInteger(PersonalityPointerAddress))
+ return Err;
+ break;
+ }
+ case 'R': {
+ uint8_t FDEPointerEncoding;
+ if (auto Err = EHFrameReader.readInteger(FDEPointerEncoding))
+ return Err;
+ if (FDEPointerEncoding != (DW_EH_PE_pcrel | DW_EH_PE_absptr))
+ return make_error<JITLinkError>(
+ "Unsupported FDE address pointer "
+ "encoding " +
+ formatv("{0:x2}", FDEPointerEncoding) + " in CIE at " +
+ formatv("{0:x16}", CIESymbol.getAddress()));
+ break;
+ }
+ default:
+ llvm_unreachable("Invalid augmentation string field");
+ }
+ }
+
+ if (EHFrameReader.getOffset() - AugmentationDataStartOffset >
+ AugmentationDataLength)
+ return make_error<JITLinkError>("Read past the end of the augmentation "
+ "data while parsing fields");
+
+ assert(!CIEInfos.count(CIESymbol.getAddress()) &&
+ "Multiple CIEs recorded at the same address?");
+ CIEInfos[CIESymbol.getAddress()] = std::move(CIEInfo);
+
+ return Error::success();
+}
+
+Error EHFrameBinaryParser::processFDE(size_t RecordOffset, size_t RecordLength,
+ JITTargetAddress CIEPointerAddress,
+ uint32_t CIEPointer) {
+ LLVM_DEBUG(dbgs() << " Record is FDE\n");
+
+ LLVM_DEBUG({
+ dbgs() << " CIE pointer: "
+ << format("0x%016" PRIx64, CIEPointerAddress - CIEPointer) << "\n";
+ });
+
+ auto CIEInfoItr = CIEInfos.find(CIEPointerAddress - CIEPointer);
+ if (CIEInfoItr == CIEInfos.end())
+ return make_error<JITLinkError>(
+ "FDE at " + formatv("{0:x16}", EHFrameAddress + RecordOffset) +
+ " points to non-existant CIE at " +
+ formatv("{0:x16}", CIEPointerAddress - CIEPointer));
+ auto &CIEInfo = CIEInfoItr->second;
+
+ // Read and sanity check the PC-start pointer and size.
+ JITTargetAddress PCBeginAddress = EHFrameAddress + EHFrameReader.getOffset();
+
+ auto PCBeginDelta = readAbsolutePointer();
+ if (!PCBeginDelta)
+ return PCBeginDelta.takeError();
+
+ JITTargetAddress PCBegin = PCBeginAddress + *PCBeginDelta;
+ LLVM_DEBUG({
+ dbgs() << " PC begin: " << format("0x%016" PRIx64, PCBegin) << "\n";
+ });
+
+ auto *TargetSymbol = getSymbolAtAddress(PCBegin);
+
+ if (!TargetSymbol)
+ return make_error<JITLinkError>("FDE PC-begin " +
+ formatv("{0:x16}", PCBegin) +
+ " does not point at symbol");
+
+ if (TargetSymbol->getAddress() != PCBegin)
+ return make_error<JITLinkError>(
+ "FDE PC-begin " + formatv("{0:x16}", PCBegin) +
+ " does not point to start of symbol at " +
+ formatv("{0:x16}", TargetSymbol->getAddress()));
+
+ LLVM_DEBUG(dbgs() << " FDE target: " << *TargetSymbol << "\n");
+
+ // Skip over the PC range size field.
+ if (auto Err = EHFrameReader.skip(PointerSize))
+ return Err;
+
+ Symbol *LSDASymbol = nullptr;
+ JITTargetAddress LSDAAddress = 0;
+ if (CIEInfo.FDEsHaveLSDAField) {
+ uint64_t AugmentationDataSize;
+ if (auto Err = EHFrameReader.readULEB128(AugmentationDataSize))
+ return Err;
+ if (AugmentationDataSize != PointerSize)
+ return make_error<JITLinkError>(
+ "Unexpected FDE augmentation data size (expected " +
+ Twine(PointerSize) + ", got " + Twine(AugmentationDataSize) +
+ ") for FDE at " + formatv("{0:x16}", EHFrameAddress + RecordOffset));
+ LSDAAddress = EHFrameAddress + EHFrameReader.getOffset();
+ auto LSDADelta = readAbsolutePointer();
+ if (!LSDADelta)
+ return LSDADelta.takeError();
+
+ JITTargetAddress LSDA = LSDAAddress + *LSDADelta;
+
+ LSDASymbol = getSymbolAtAddress(LSDA);
+
+ if (!LSDASymbol)
+ return make_error<JITLinkError>("FDE LSDA " + formatv("{0:x16}", LSDA) +
+ " does not point at symbol");
+
+ if (LSDASymbol->getAddress() != LSDA)
+ return make_error<JITLinkError>(
+ "FDE LSDA " + formatv("{0:x16}", LSDA) +
+ " does not point to start of symbol at " +
+ formatv("{0:x16}", LSDASymbol->getAddress()));
+
+ LLVM_DEBUG(dbgs() << " FDE LSDA: " << *LSDASymbol << "\n");
+ }
+
+ JITTargetAddress RecordAddress = EHFrameAddress + RecordOffset;
+ auto FDESymbol = createFDERecord(
+ RecordAddress, EHFrameContent.substr(RecordOffset, RecordLength),
+ *CIEInfo.CIESymbol, CIEPointerAddress - RecordAddress, *TargetSymbol,
+ PCBeginAddress - RecordAddress, LSDASymbol, LSDAAddress - RecordAddress);
+
+ return FDESymbol.takeError();
+}
+
+// Determine whether we can register EH tables.
+#if (defined(__GNUC__) && !defined(__ARM_EABI__) && !defined(__ia64__) && \
+ !(defined(_AIX) && defined(__ibmxl__)) && !defined(__SEH__) && \
+ !defined(__USING_SJLJ_EXCEPTIONS__))
+#define HAVE_EHTABLE_SUPPORT 1
+#else
+#define HAVE_EHTABLE_SUPPORT 0
+#endif
+
+#if HAVE_EHTABLE_SUPPORT
+extern "C" void __register_frame(const void *);
+extern "C" void __deregister_frame(const void *);
+
+Error registerFrameWrapper(const void *P) {
+ __register_frame(P);
+ return Error::success();
+}
+
+Error deregisterFrameWrapper(const void *P) {
+ __deregister_frame(P);
+ return Error::success();
+}
+
+#else
+
+// The building compiler does not have __(de)register_frame but
+// it may be found at runtime in a dynamically-loaded library.
+// For example, this happens when building LLVM with Visual C++
+// but using the MingW runtime.
+static Error registerFrameWrapper(const void *P) {
+ static void((*RegisterFrame)(const void *)) = 0;
+
+ if (!RegisterFrame)
+ *(void **)&RegisterFrame =
+ llvm::sys::DynamicLibrary::SearchForAddressOfSymbol("__register_frame");
+
+ if (RegisterFrame) {
+ RegisterFrame(P);
+ return Error::success();
+ }
+
+ return make_error<JITLinkError>("could not register eh-frame: "
+ "__register_frame function not found");
+}
+
+static Error deregisterFrameWrapper(const void *P) {
+ static void((*DeregisterFrame)(const void *)) = 0;
+
+ if (!DeregisterFrame)
+ *(void **)&DeregisterFrame =
+ llvm::sys::DynamicLibrary::SearchForAddressOfSymbol(
+ "__deregister_frame");
+
+ if (DeregisterFrame) {
+ DeregisterFrame(P);
+ return Error::success();
+ }
+
+ return make_error<JITLinkError>("could not deregister eh-frame: "
+ "__deregister_frame function not found");
+}
+#endif
+
+#ifdef __APPLE__
+
+template <typename HandleFDEFn>
+Error walkAppleEHFrameSection(const char *const SectionStart,
+ size_t SectionSize,
+ HandleFDEFn HandleFDE) {
+ const char *CurCFIRecord = SectionStart;
+ const char *End = SectionStart + SectionSize;
+ uint64_t Size = *reinterpret_cast<const uint32_t *>(CurCFIRecord);
+
+ while (CurCFIRecord != End && Size != 0) {
+ const char *OffsetField = CurCFIRecord + (Size == 0xffffffff ? 12 : 4);
+ if (Size == 0xffffffff)
+ Size = *reinterpret_cast<const uint64_t *>(CurCFIRecord + 4) + 12;
+ else
+ Size += 4;
+ uint32_t Offset = *reinterpret_cast<const uint32_t *>(OffsetField);
+ if (Offset != 0)
+ if (auto Err = HandleFDE(CurCFIRecord))
+ return Err;
+
+ LLVM_DEBUG({
+ dbgs() << "Registering eh-frame section:\n";
+ dbgs() << "Processing " << (Offset ? "FDE" : "CIE") << " @"
+ << (void *)CurCFIRecord << ": [";
+ for (unsigned I = 0; I < Size; ++I)
+ dbgs() << format(" 0x%02" PRIx8, *(CurCFIRecord + I));
+ dbgs() << " ]\n";
+ });
+ CurCFIRecord += Size;
+
+ Size = *reinterpret_cast<const uint32_t *>(CurCFIRecord);
+ }
+
+ return Error::success();
+}
+
+#endif // __APPLE__
+
+Error registerEHFrameSection(const void *EHFrameSectionAddr,
+ size_t EHFrameSectionSize) {
+#ifdef __APPLE__
+ // On Darwin __register_frame has to be called for each FDE entry.
+ return walkAppleEHFrameSection(static_cast<const char *>(EHFrameSectionAddr),
+ EHFrameSectionSize,
+ registerFrameWrapper);
+#else
+ // On Linux __register_frame takes a single argument:
+ // a pointer to the start of the .eh_frame section.
+
+ // How can it find the end? Because crtendS.o is linked
+ // in and it has an .eh_frame section with four zero chars.
+ return registerFrameWrapper(EHFrameSectionAddr);
+#endif
+}
+
+Error deregisterEHFrameSection(const void *EHFrameSectionAddr,
+ size_t EHFrameSectionSize) {
+#ifdef __APPLE__
+ return walkAppleEHFrameSection(static_cast<const char *>(EHFrameSectionAddr),
+ EHFrameSectionSize,
+ deregisterFrameWrapper);
+#else
+ return deregisterFrameWrapper(EHFrameSectionAddr);
+#endif
+}
+
+EHFrameRegistrar::~EHFrameRegistrar() {}
+
+InProcessEHFrameRegistrar &InProcessEHFrameRegistrar::getInstance() {
+ static InProcessEHFrameRegistrar Instance;
+ return Instance;
+}
+
+InProcessEHFrameRegistrar::InProcessEHFrameRegistrar() {}
+
+LinkGraphPassFunction
+createEHFrameRecorderPass(const Triple &TT,
+ StoreFrameRangeFunction StoreRangeAddress) {
+ const char *EHFrameSectionName = nullptr;
+ if (TT.getObjectFormat() == Triple::MachO)
+ EHFrameSectionName = "__eh_frame";
+ else
+ EHFrameSectionName = ".eh_frame";
+
+ auto RecordEHFrame =
+ [EHFrameSectionName,
+ StoreFrameRange = std::move(StoreRangeAddress)](LinkGraph &G) -> Error {
+ // Search for a non-empty eh-frame and record the address of the first
+ // symbol in it.
+ JITTargetAddress Addr = 0;
+ size_t Size = 0;
+ if (auto *S = G.findSectionByName(EHFrameSectionName)) {
+ auto R = SectionRange(*S);
+ Addr = R.getStart();
+ Size = R.getSize();
+ }
+ if (Addr == 0 && Size != 0)
+ return make_error<JITLinkError>("__eh_frame section can not have zero "
+ "address with non-zero size");
+ StoreFrameRange(Addr, Size);
+ return Error::success();
+ };
+
+ return RecordEHFrame;
+}
+
+} // end namespace jitlink
+} // end namespace llvm
diff --git a/llvm/lib/ExecutionEngine/JITLink/EHFrameSupportImpl.h b/llvm/lib/ExecutionEngine/JITLink/EHFrameSupportImpl.h
new file mode 100644
index 0000000000000..6f9f68ad8382c
--- /dev/null
+++ b/llvm/lib/ExecutionEngine/JITLink/EHFrameSupportImpl.h
@@ -0,0 +1,78 @@
+//===------- EHFrameSupportImpl.h - JITLink eh-frame utils ------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// EHFrame registration support for JITLink.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_EXECUTIONENGINE_JITLINK_EHFRAMESUPPORTIMPL_H
+#define LLVM_LIB_EXECUTIONENGINE_JITLINK_EHFRAMESUPPORTIMPL_H
+
+#include "llvm/ExecutionEngine/JITLink/EHFrameSupport.h"
+
+#include "llvm/ExecutionEngine/JITLink/JITLink.h"
+#include "llvm/Support/BinaryStreamReader.h"
+
+namespace llvm {
+namespace jitlink {
+
+/// A generic binary parser for eh-frame sections.
+///
+/// Adds blocks and symbols representing CIE and FDE entries to a JITLink graph.
+///
+/// This parser assumes that the user has already verified that the EH-frame's
+/// address range does not overlap any other section/symbol, so that generated
+/// CIE/FDE records do not overlap other sections/symbols.
+class EHFrameBinaryParser {
+public:
+ EHFrameBinaryParser(JITTargetAddress EHFrameAddress, StringRef EHFrameContent,
+ unsigned PointerSize, support::endianness Endianness);
+ virtual ~EHFrameBinaryParser() {}
+
+ Error addToGraph();
+
+private:
+ virtual void anchor();
+ virtual Symbol *getSymbolAtAddress(JITTargetAddress Addr) = 0;
+ virtual Symbol &createCIERecord(JITTargetAddress RecordAddr,
+ StringRef RecordContent) = 0;
+ virtual Expected<Symbol &>
+ createFDERecord(JITTargetAddress RecordAddr, StringRef RecordContent,
+ Symbol &CIE, size_t CIEOffset, Symbol &Func,
+ size_t FuncOffset, Symbol *LSDA, size_t LSDAOffset) = 0;
+
+ struct AugmentationInfo {
+ bool AugmentationDataPresent = false;
+ bool EHDataFieldPresent = false;
+ uint8_t Fields[4] = {0x0, 0x0, 0x0, 0x0};
+ };
+
+ Expected<AugmentationInfo> parseAugmentationString();
+ Expected<JITTargetAddress> readAbsolutePointer();
+ Error processCIE(size_t RecordOffset, size_t RecordLength);
+ Error processFDE(size_t RecordOffset, size_t RecordLength,
+ JITTargetAddress CIEPointerOffset, uint32_t CIEPointer);
+
+ struct CIEInformation {
+ CIEInformation() = default;
+ CIEInformation(Symbol &CIESymbol) : CIESymbol(&CIESymbol) {}
+ Symbol *CIESymbol = nullptr;
+ bool FDEsHaveLSDAField = false;
+ };
+
+ JITTargetAddress EHFrameAddress;
+ StringRef EHFrameContent;
+ unsigned PointerSize;
+ BinaryStreamReader EHFrameReader;
+ DenseMap<JITTargetAddress, CIEInformation> CIEInfos;
+};
+
+} // end namespace jitlink
+} // end namespace llvm
+
+#endif // LLVM_LIB_EXECUTIONENGINE_JITLINK_EHFRAMESUPPORTIMPL_H
diff --git a/llvm/lib/ExecutionEngine/JITLink/JITLink.cpp b/llvm/lib/ExecutionEngine/JITLink/JITLink.cpp
new file mode 100644
index 0000000000000..1e19038951ac2
--- /dev/null
+++ b/llvm/lib/ExecutionEngine/JITLink/JITLink.cpp
@@ -0,0 +1,228 @@
+//===------------- JITLink.cpp - Core Run-time JIT linker APIs ------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/JITLink/JITLink.h"
+
+#include "llvm/BinaryFormat/Magic.h"
+#include "llvm/ExecutionEngine/JITLink/MachO.h"
+#include "llvm/Support/Format.h"
+#include "llvm/Support/ManagedStatic.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace llvm;
+using namespace llvm::object;
+
+#define DEBUG_TYPE "jitlink"
+
+namespace {
+
+enum JITLinkErrorCode { GenericJITLinkError = 1 };
+
+// FIXME: This class is only here to support the transition to llvm::Error. It
+// will be removed once this transition is complete. Clients should prefer to
+// deal with the Error value directly, rather than converting to error_code.
+class JITLinkerErrorCategory : public std::error_category {
+public:
+ const char *name() const noexcept override { return "runtimedyld"; }
+
+ std::string message(int Condition) const override {
+ switch (static_cast<JITLinkErrorCode>(Condition)) {
+ case GenericJITLinkError:
+ return "Generic JITLink error";
+ }
+ llvm_unreachable("Unrecognized JITLinkErrorCode");
+ }
+};
+
+static ManagedStatic<JITLinkerErrorCategory> JITLinkerErrorCategory;
+
+} // namespace
+
+namespace llvm {
+namespace jitlink {
+
+char JITLinkError::ID = 0;
+
+void JITLinkError::log(raw_ostream &OS) const { OS << ErrMsg << "\n"; }
+
+std::error_code JITLinkError::convertToErrorCode() const {
+ return std::error_code(GenericJITLinkError, *JITLinkerErrorCategory);
+}
+
+const char *getGenericEdgeKindName(Edge::Kind K) {
+ switch (K) {
+ case Edge::Invalid:
+ return "INVALID RELOCATION";
+ case Edge::KeepAlive:
+ return "Keep-Alive";
+ default:
+ llvm_unreachable("Unrecognized relocation kind");
+ }
+}
+
+const char *getLinkageName(Linkage L) {
+ switch (L) {
+ case Linkage::Strong:
+ return "strong";
+ case Linkage::Weak:
+ return "weak";
+ }
+ llvm_unreachable("Unrecognized llvm.jitlink.Linkage enum");
+}
+
+const char *getScopeName(Scope S) {
+ switch (S) {
+ case Scope::Default:
+ return "default";
+ case Scope::Hidden:
+ return "hidden";
+ case Scope::Local:
+ return "local";
+ }
+ llvm_unreachable("Unrecognized llvm.jitlink.Scope enum");
+}
+
+raw_ostream &operator<<(raw_ostream &OS, const Block &B) {
+ return OS << formatv("{0:x16}", B.getAddress()) << " -- "
+ << formatv("{0:x16}", B.getAddress() + B.getSize()) << ": "
+ << (B.isZeroFill() ? "zero-fill" : "content")
+ << ", align = " << B.getAlignment()
+ << ", align-ofs = " << B.getAlignmentOffset()
+ << ", section = " << B.getSection().getName();
+}
+
+raw_ostream &operator<<(raw_ostream &OS, const Symbol &Sym) {
+ OS << "<";
+ if (Sym.getName().empty())
+ OS << "*anon*";
+ else
+ OS << Sym.getName();
+ OS << ": flags = ";
+ switch (Sym.getLinkage()) {
+ case Linkage::Strong:
+ OS << 'S';
+ break;
+ case Linkage::Weak:
+ OS << 'W';
+ break;
+ }
+ switch (Sym.getScope()) {
+ case Scope::Default:
+ OS << 'D';
+ break;
+ case Scope::Hidden:
+ OS << 'H';
+ break;
+ case Scope::Local:
+ OS << 'L';
+ break;
+ }
+ OS << (Sym.isLive() ? '+' : '-')
+ << ", size = " << formatv("{0:x8}", Sym.getSize())
+ << ", addr = " << formatv("{0:x16}", Sym.getAddress()) << " ("
+ << formatv("{0:x16}", Sym.getAddressable().getAddress()) << " + "
+ << formatv("{0:x8}", Sym.getOffset());
+ if (Sym.isDefined())
+ OS << " " << Sym.getBlock().getSection().getName();
+ OS << ")>";
+ return OS;
+}
+
+void printEdge(raw_ostream &OS, const Block &B, const Edge &E,
+ StringRef EdgeKindName) {
+ OS << "edge@" << formatv("{0:x16}", B.getAddress() + E.getOffset()) << ": "
+ << formatv("{0:x16}", B.getAddress()) << " + " << E.getOffset() << " -- "
+ << EdgeKindName << " -> " << E.getTarget() << " + " << E.getAddend();
+}
+
+Section::~Section() {
+ for (auto *Sym : Symbols)
+ Sym->~Symbol();
+}
+
+LinkGraph::~LinkGraph() {
+ // Destroy blocks.
+ for (auto *B : Blocks)
+ B->~Block();
+}
+
+void LinkGraph::dump(raw_ostream &OS,
+ std::function<StringRef(Edge::Kind)> EdgeKindToName) {
+ if (!EdgeKindToName)
+ EdgeKindToName = [](Edge::Kind K) { return StringRef(); };
+
+ OS << "Symbols:\n";
+ for (auto *Sym : defined_symbols()) {
+ OS << " " << format("0x%016" PRIx64, Sym->getAddress()) << ": " << *Sym
+ << "\n";
+ if (Sym->isDefined()) {
+ for (auto &E : Sym->getBlock().edges()) {
+ OS << " ";
+ StringRef EdgeName = (E.getKind() < Edge::FirstRelocation
+ ? getGenericEdgeKindName(E.getKind())
+ : EdgeKindToName(E.getKind()));
+
+ if (!EdgeName.empty())
+ printEdge(OS, Sym->getBlock(), E, EdgeName);
+ else {
+ auto EdgeNumberString = std::to_string(E.getKind());
+ printEdge(OS, Sym->getBlock(), E, EdgeNumberString);
+ }
+ OS << "\n";
+ }
+ }
+ }
+
+ OS << "Absolute symbols:\n";
+ for (auto *Sym : absolute_symbols())
+ OS << " " << format("0x%016" PRIx64, Sym->getAddress()) << ": " << *Sym
+ << "\n";
+
+ OS << "External symbols:\n";
+ for (auto *Sym : external_symbols())
+ OS << " " << format("0x%016" PRIx64, Sym->getAddress()) << ": " << *Sym
+ << "\n";
+}
+
+void JITLinkAsyncLookupContinuation::anchor() {}
+
+JITLinkContext::~JITLinkContext() {}
+
+bool JITLinkContext::shouldAddDefaultTargetPasses(const Triple &TT) const {
+ return true;
+}
+
+LinkGraphPassFunction JITLinkContext::getMarkLivePass(const Triple &TT) const {
+ return LinkGraphPassFunction();
+}
+
+Error JITLinkContext::modifyPassConfig(const Triple &TT,
+ PassConfiguration &Config) {
+ return Error::success();
+}
+
+Error markAllSymbolsLive(LinkGraph &G) {
+ for (auto *Sym : G.defined_symbols())
+ Sym->setLive(true);
+ return Error::success();
+}
+
+void jitLink(std::unique_ptr<JITLinkContext> Ctx) {
+ auto Magic = identify_magic(Ctx->getObjectBuffer().getBuffer());
+ switch (Magic) {
+ case file_magic::macho_object:
+ return jitLink_MachO(std::move(Ctx));
+ default:
+ Ctx->notifyFailed(make_error<JITLinkError>("Unsupported file format"));
+ };
+}
+
+} // end namespace jitlink
+} // end namespace llvm
diff --git a/llvm/lib/ExecutionEngine/JITLink/JITLinkGeneric.cpp b/llvm/lib/ExecutionEngine/JITLink/JITLinkGeneric.cpp
new file mode 100644
index 0000000000000..d4270b5aa7967
--- /dev/null
+++ b/llvm/lib/ExecutionEngine/JITLink/JITLinkGeneric.cpp
@@ -0,0 +1,358 @@
+//===--------- JITLinkGeneric.cpp - Generic JIT linker utilities ----------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Generic JITLinker utility class.
+//
+//===----------------------------------------------------------------------===//
+
+#include "JITLinkGeneric.h"
+
+#include "llvm/Support/BinaryStreamReader.h"
+#include "llvm/Support/MemoryBuffer.h"
+
+#define DEBUG_TYPE "jitlink"
+
+namespace llvm {
+namespace jitlink {
+
+JITLinkerBase::~JITLinkerBase() {}
+
+void JITLinkerBase::linkPhase1(std::unique_ptr<JITLinkerBase> Self) {
+
+ // Build the link graph.
+ if (auto GraphOrErr = buildGraph(Ctx->getObjectBuffer()))
+ G = std::move(*GraphOrErr);
+ else
+ return Ctx->notifyFailed(GraphOrErr.takeError());
+ assert(G && "Graph should have been created by buildGraph above");
+
+ // Prune and optimize the graph.
+ if (auto Err = runPasses(Passes.PrePrunePasses))
+ return Ctx->notifyFailed(std::move(Err));
+
+ LLVM_DEBUG({
+ dbgs() << "Link graph \"" << G->getName() << "\" pre-pruning:\n";
+ dumpGraph(dbgs());
+ });
+
+ prune(*G);
+
+ LLVM_DEBUG({
+ dbgs() << "Link graph \"" << G->getName() << "\" post-pruning:\n";
+ dumpGraph(dbgs());
+ });
+
+ // Run post-pruning passes.
+ if (auto Err = runPasses(Passes.PostPrunePasses))
+ return Ctx->notifyFailed(std::move(Err));
+
+ // Sort blocks into segments.
+ auto Layout = layOutBlocks();
+
+ // Allocate memory for segments.
+ if (auto Err = allocateSegments(Layout))
+ return Ctx->notifyFailed(std::move(Err));
+
+ // Notify client that the defined symbols have been assigned addresses.
+ Ctx->notifyResolved(*G);
+
+ auto ExternalSymbols = getExternalSymbolNames();
+
+ // We're about to hand off ownership of ourself to the continuation. Grab a
+ // pointer to the context so that we can call it to initiate the lookup.
+ //
+ // FIXME: Once callee expressions are defined to be sequenced before argument
+ // expressions (c++17) we can simplify all this to:
+ //
+ // Ctx->lookup(std::move(UnresolvedExternals),
+ // [Self=std::move(Self)](Expected<AsyncLookupResult> Result) {
+ // Self->linkPhase2(std::move(Self), std::move(Result));
+ // });
+ auto *TmpCtx = Ctx.get();
+ TmpCtx->lookup(std::move(ExternalSymbols),
+ createLookupContinuation(
+ [S = std::move(Self), L = std::move(Layout)](
+ Expected<AsyncLookupResult> LookupResult) mutable {
+ auto &TmpSelf = *S;
+ TmpSelf.linkPhase2(std::move(S), std::move(LookupResult),
+ std::move(L));
+ }));
+}
+
+void JITLinkerBase::linkPhase2(std::unique_ptr<JITLinkerBase> Self,
+ Expected<AsyncLookupResult> LR,
+ SegmentLayoutMap Layout) {
+ // If the lookup failed, bail out.
+ if (!LR)
+ return deallocateAndBailOut(LR.takeError());
+
+ // Assign addresses to external addressables.
+ applyLookupResult(*LR);
+
+ LLVM_DEBUG({
+ dbgs() << "Link graph \"" << G->getName() << "\" before copy-and-fixup:\n";
+ dumpGraph(dbgs());
+ });
+
+ // Copy block content to working memory and fix up.
+ if (auto Err = copyAndFixUpBlocks(Layout, *Alloc))
+ return deallocateAndBailOut(std::move(Err));
+
+ LLVM_DEBUG({
+ dbgs() << "Link graph \"" << G->getName() << "\" after copy-and-fixup:\n";
+ dumpGraph(dbgs());
+ });
+
+ if (auto Err = runPasses(Passes.PostFixupPasses))
+ return deallocateAndBailOut(std::move(Err));
+
+ // FIXME: Use move capture once we have c++14.
+ auto *UnownedSelf = Self.release();
+ auto Phase3Continuation = [UnownedSelf](Error Err) {
+ std::unique_ptr<JITLinkerBase> Self(UnownedSelf);
+ UnownedSelf->linkPhase3(std::move(Self), std::move(Err));
+ };
+
+ Alloc->finalizeAsync(std::move(Phase3Continuation));
+}
+
+void JITLinkerBase::linkPhase3(std::unique_ptr<JITLinkerBase> Self, Error Err) {
+ if (Err)
+ return deallocateAndBailOut(std::move(Err));
+ Ctx->notifyFinalized(std::move(Alloc));
+}
+
+Error JITLinkerBase::runPasses(LinkGraphPassList &Passes) {
+ for (auto &P : Passes)
+ if (auto Err = P(*G))
+ return Err;
+ return Error::success();
+}
+
+JITLinkerBase::SegmentLayoutMap JITLinkerBase::layOutBlocks() {
+
+ SegmentLayoutMap Layout;
+
+ /// Partition blocks based on permissions and content vs. zero-fill.
+ for (auto *B : G->blocks()) {
+ auto &SegLists = Layout[B->getSection().getProtectionFlags()];
+ if (!B->isZeroFill())
+ SegLists.ContentBlocks.push_back(B);
+ else
+ SegLists.ZeroFillBlocks.push_back(B);
+ }
+
+ /// Sort blocks within each list.
+ for (auto &KV : Layout) {
+
+ auto CompareBlocks = [](const Block *LHS, const Block *RHS) {
+ if (LHS->getSection().getOrdinal() != RHS->getSection().getOrdinal())
+ return LHS->getSection().getOrdinal() < RHS->getSection().getOrdinal();
+ return LHS->getOrdinal() < RHS->getOrdinal();
+ };
+
+ auto &SegLists = KV.second;
+ llvm::sort(SegLists.ContentBlocks, CompareBlocks);
+ llvm::sort(SegLists.ZeroFillBlocks, CompareBlocks);
+ }
+
+ LLVM_DEBUG({
+ dbgs() << "Segment ordering:\n";
+ for (auto &KV : Layout) {
+ dbgs() << " Segment "
+ << static_cast<sys::Memory::ProtectionFlags>(KV.first) << ":\n";
+ auto &SL = KV.second;
+ for (auto &SIEntry :
+ {std::make_pair(&SL.ContentBlocks, "content block"),
+ std::make_pair(&SL.ZeroFillBlocks, "zero-fill block")}) {
+ dbgs() << " " << SIEntry.second << ":\n";
+ for (auto *B : *SIEntry.first)
+ dbgs() << " " << *B << "\n";
+ }
+ }
+ });
+
+ return Layout;
+}
+
+Error JITLinkerBase::allocateSegments(const SegmentLayoutMap &Layout) {
+
+ // Compute segment sizes and allocate memory.
+ LLVM_DEBUG(dbgs() << "JIT linker requesting: { ");
+ JITLinkMemoryManager::SegmentsRequestMap Segments;
+ for (auto &KV : Layout) {
+ auto &Prot = KV.first;
+ auto &SegLists = KV.second;
+
+ uint64_t SegAlign = 1;
+
+ // Calculate segment content size.
+ size_t SegContentSize = 0;
+ for (auto *B : SegLists.ContentBlocks) {
+ SegAlign = std::max(SegAlign, B->getAlignment());
+ SegContentSize = alignToBlock(SegContentSize, *B);
+ SegContentSize += B->getSize();
+ }
+
+ uint64_t SegZeroFillStart = SegContentSize;
+ uint64_t SegZeroFillEnd = SegZeroFillStart;
+
+ for (auto *B : SegLists.ZeroFillBlocks) {
+ SegAlign = std::max(SegAlign, B->getAlignment());
+ SegZeroFillEnd = alignToBlock(SegZeroFillEnd, *B);
+ SegZeroFillEnd += B->getSize();
+ }
+
+ Segments[Prot] = {SegAlign, SegContentSize,
+ SegZeroFillEnd - SegZeroFillStart};
+
+ LLVM_DEBUG({
+ dbgs() << (&KV == &*Layout.begin() ? "" : "; ")
+ << static_cast<sys::Memory::ProtectionFlags>(Prot)
+ << ": alignment = " << SegAlign
+ << ", content size = " << SegContentSize
+ << ", zero-fill size = " << (SegZeroFillEnd - SegZeroFillStart);
+ });
+ }
+ LLVM_DEBUG(dbgs() << " }\n");
+
+ if (auto AllocOrErr = Ctx->getMemoryManager().allocate(Segments))
+ Alloc = std::move(*AllocOrErr);
+ else
+ return AllocOrErr.takeError();
+
+ LLVM_DEBUG({
+ dbgs() << "JIT linker got working memory:\n";
+ for (auto &KV : Layout) {
+ auto Prot = static_cast<sys::Memory::ProtectionFlags>(KV.first);
+ dbgs() << " " << Prot << ": "
+ << (const void *)Alloc->getWorkingMemory(Prot).data() << "\n";
+ }
+ });
+
+ // Update block target addresses.
+ for (auto &KV : Layout) {
+ auto &Prot = KV.first;
+ auto &SL = KV.second;
+
+ JITTargetAddress NextBlockAddr =
+ Alloc->getTargetMemory(static_cast<sys::Memory::ProtectionFlags>(Prot));
+
+ for (auto *SIList : {&SL.ContentBlocks, &SL.ZeroFillBlocks})
+ for (auto *B : *SIList) {
+ NextBlockAddr = alignToBlock(NextBlockAddr, *B);
+ B->setAddress(NextBlockAddr);
+ NextBlockAddr += B->getSize();
+ }
+ }
+
+ return Error::success();
+}
+
+DenseSet<StringRef> JITLinkerBase::getExternalSymbolNames() const {
+ // Identify unresolved external symbols.
+ DenseSet<StringRef> UnresolvedExternals;
+ for (auto *Sym : G->external_symbols()) {
+ assert(Sym->getAddress() == 0 &&
+ "External has already been assigned an address");
+ assert(Sym->getName() != StringRef() && Sym->getName() != "" &&
+ "Externals must be named");
+ UnresolvedExternals.insert(Sym->getName());
+ }
+ return UnresolvedExternals;
+}
+
+void JITLinkerBase::applyLookupResult(AsyncLookupResult Result) {
+ for (auto *Sym : G->external_symbols()) {
+ assert(Sym->getAddress() == 0 && "Symbol already resolved");
+ assert(!Sym->isDefined() && "Symbol being resolved is already defined");
+ assert(Result.count(Sym->getName()) && "Missing resolution for symbol");
+ Sym->getAddressable().setAddress(Result[Sym->getName()].getAddress());
+ }
+
+ LLVM_DEBUG({
+ dbgs() << "Externals after applying lookup result:\n";
+ for (auto *Sym : G->external_symbols())
+ dbgs() << " " << Sym->getName() << ": "
+ << formatv("{0:x16}", Sym->getAddress()) << "\n";
+ });
+ assert(llvm::all_of(G->external_symbols(),
+ [](Symbol *Sym) { return Sym->getAddress() != 0; }) &&
+ "All symbols should have been resolved by this point");
+}
+
+void JITLinkerBase::deallocateAndBailOut(Error Err) {
+ assert(Err && "Should not be bailing out on success value");
+ assert(Alloc && "can not call deallocateAndBailOut before allocation");
+ Ctx->notifyFailed(joinErrors(std::move(Err), Alloc->deallocate()));
+}
+
+void JITLinkerBase::dumpGraph(raw_ostream &OS) {
+ assert(G && "Graph is not set yet");
+ G->dump(dbgs(), [this](Edge::Kind K) { return getEdgeKindName(K); });
+}
+
+void prune(LinkGraph &G) {
+ std::vector<Symbol *> Worklist;
+ DenseSet<Block *> VisitedBlocks;
+
+ // Build the initial worklist from all symbols initially live.
+ for (auto *Sym : G.defined_symbols())
+ if (Sym->isLive())
+ Worklist.push_back(Sym);
+
+ // Propagate live flags to all symbols reachable from the initial live set.
+ while (!Worklist.empty()) {
+ auto *Sym = Worklist.back();
+ Worklist.pop_back();
+
+ auto &B = Sym->getBlock();
+
+ // Skip addressables that we've visited before.
+ if (VisitedBlocks.count(&B))
+ continue;
+
+ VisitedBlocks.insert(&B);
+
+ for (auto &E : Sym->getBlock().edges()) {
+ if (E.getTarget().isDefined() && !E.getTarget().isLive()) {
+ E.getTarget().setLive(true);
+ Worklist.push_back(&E.getTarget());
+ }
+ }
+ }
+
+ // Collect all the symbols to remove, then remove them.
+ {
+ LLVM_DEBUG(dbgs() << "Dead-stripping symbols:\n");
+ std::vector<Symbol *> SymbolsToRemove;
+ for (auto *Sym : G.defined_symbols())
+ if (!Sym->isLive())
+ SymbolsToRemove.push_back(Sym);
+ for (auto *Sym : SymbolsToRemove) {
+ LLVM_DEBUG(dbgs() << " " << *Sym << "...\n");
+ G.removeDefinedSymbol(*Sym);
+ }
+ }
+
+ // Delete any unused blocks.
+ {
+ LLVM_DEBUG(dbgs() << "Dead-stripping blocks:\n");
+ std::vector<Block *> BlocksToRemove;
+ for (auto *B : G.blocks())
+ if (!VisitedBlocks.count(B))
+ BlocksToRemove.push_back(B);
+ for (auto *B : BlocksToRemove) {
+ LLVM_DEBUG(dbgs() << " " << *B << "...\n");
+ G.removeBlock(*B);
+ }
+ }
+}
+
+} // end namespace jitlink
+} // end namespace llvm
diff --git a/llvm/lib/ExecutionEngine/JITLink/JITLinkGeneric.h b/llvm/lib/ExecutionEngine/JITLink/JITLinkGeneric.h
new file mode 100644
index 0000000000000..07dee6cee2002
--- /dev/null
+++ b/llvm/lib/ExecutionEngine/JITLink/JITLinkGeneric.h
@@ -0,0 +1,247 @@
+//===------ JITLinkGeneric.h - Generic JIT linker utilities -----*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Generic JITLinker utilities. E.g. graph pruning, eh-frame parsing.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LIB_EXECUTIONENGINE_JITLINK_JITLINKGENERIC_H
+#define LIB_EXECUTIONENGINE_JITLINK_JITLINKGENERIC_H
+
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ExecutionEngine/JITLink/JITLink.h"
+
+#define DEBUG_TYPE "jitlink"
+
+namespace llvm {
+
+class MemoryBufferRef;
+
+namespace jitlink {
+
+/// Base class for a JIT linker.
+///
+/// A JITLinkerBase instance links one object file into an ongoing JIT
+/// session. Symbol resolution and finalization operations are pluggable,
+/// and called using continuation passing (passing a continuation for the
+/// remaining linker work) to allow them to be performed asynchronously.
+class JITLinkerBase {
+public:
+ JITLinkerBase(std::unique_ptr<JITLinkContext> Ctx, PassConfiguration Passes)
+ : Ctx(std::move(Ctx)), Passes(std::move(Passes)) {
+ assert(this->Ctx && "Ctx can not be null");
+ }
+
+ virtual ~JITLinkerBase();
+
+protected:
+ struct SegmentLayout {
+ using BlocksList = std::vector<Block *>;
+
+ BlocksList ContentBlocks;
+ BlocksList ZeroFillBlocks;
+ };
+
+ using SegmentLayoutMap = DenseMap<unsigned, SegmentLayout>;
+
+ // Phase 1:
+ // 1.1: Build link graph
+ // 1.2: Run pre-prune passes
+ // 1.2: Prune graph
+ // 1.3: Run post-prune passes
+ // 1.4: Sort blocks into segments
+ // 1.5: Allocate segment memory
+ // 1.6: Identify externals and make an async call to resolve function
+ void linkPhase1(std::unique_ptr<JITLinkerBase> Self);
+
+ // Phase 2:
+ // 2.1: Apply resolution results
+ // 2.2: Fix up block contents
+ // 2.3: Call OnResolved callback
+ // 2.3: Make an async call to transfer and finalize memory.
+ void linkPhase2(std::unique_ptr<JITLinkerBase> Self,
+ Expected<AsyncLookupResult> LookupResult,
+ SegmentLayoutMap Layout);
+
+ // Phase 3:
+ // 3.1: Call OnFinalized callback, handing off allocation.
+ void linkPhase3(std::unique_ptr<JITLinkerBase> Self, Error Err);
+
+ // Build a graph from the given object buffer.
+ // To be implemented by the client.
+ virtual Expected<std::unique_ptr<LinkGraph>>
+ buildGraph(MemoryBufferRef ObjBuffer) = 0;
+
+ // For debug dumping of the link graph.
+ virtual StringRef getEdgeKindName(Edge::Kind K) const = 0;
+
+ // Alight a JITTargetAddress to conform with block alignment requirements.
+ static JITTargetAddress alignToBlock(JITTargetAddress Addr, Block &B) {
+ uint64_t Delta = (B.getAlignmentOffset() - Addr) % B.getAlignment();
+ return Addr + Delta;
+ }
+
+ // Alight a pointer to conform with block alignment requirements.
+ static char *alignToBlock(char *P, Block &B) {
+ uint64_t PAddr = static_cast<uint64_t>(reinterpret_cast<uintptr_t>(P));
+ uint64_t Delta = (B.getAlignmentOffset() - PAddr) % B.getAlignment();
+ return P + Delta;
+ }
+
+private:
+ // Run all passes in the given pass list, bailing out immediately if any pass
+ // returns an error.
+ Error runPasses(LinkGraphPassList &Passes);
+
+ // Copy block contents and apply relocations.
+ // Implemented in JITLinker.
+ virtual Error
+ copyAndFixUpBlocks(const SegmentLayoutMap &Layout,
+ JITLinkMemoryManager::Allocation &Alloc) const = 0;
+
+ SegmentLayoutMap layOutBlocks();
+ Error allocateSegments(const SegmentLayoutMap &Layout);
+ DenseSet<StringRef> getExternalSymbolNames() const;
+ void applyLookupResult(AsyncLookupResult LR);
+ void deallocateAndBailOut(Error Err);
+
+ void dumpGraph(raw_ostream &OS);
+
+ std::unique_ptr<JITLinkContext> Ctx;
+ PassConfiguration Passes;
+ std::unique_ptr<LinkGraph> G;
+ std::unique_ptr<JITLinkMemoryManager::Allocation> Alloc;
+};
+
+template <typename LinkerImpl> class JITLinker : public JITLinkerBase {
+public:
+ using JITLinkerBase::JITLinkerBase;
+
+ /// Link constructs a LinkerImpl instance and calls linkPhase1.
+ /// Link should be called with the constructor arguments for LinkerImpl, which
+ /// will be forwarded to the constructor.
+ template <typename... ArgTs> static void link(ArgTs &&... Args) {
+ auto L = std::make_unique<LinkerImpl>(std::forward<ArgTs>(Args)...);
+
+ // Ownership of the linker is passed into the linker's doLink function to
+ // allow it to be passed on to async continuations.
+ //
+ // FIXME: Remove LTmp once we have c++17.
+ // C++17 sequencing rules guarantee that function name expressions are
+ // sequenced before arguments, so L->linkPhase1(std::move(L), ...) will be
+ // well formed.
+ auto &LTmp = *L;
+ LTmp.linkPhase1(std::move(L));
+ }
+
+private:
+ const LinkerImpl &impl() const {
+ return static_cast<const LinkerImpl &>(*this);
+ }
+
+ Error
+ copyAndFixUpBlocks(const SegmentLayoutMap &Layout,
+ JITLinkMemoryManager::Allocation &Alloc) const override {
+ LLVM_DEBUG(dbgs() << "Copying and fixing up blocks:\n");
+ for (auto &KV : Layout) {
+ auto &Prot = KV.first;
+ auto &SegLayout = KV.second;
+
+ auto SegMem = Alloc.getWorkingMemory(
+ static_cast<sys::Memory::ProtectionFlags>(Prot));
+ char *LastBlockEnd = SegMem.data();
+ char *BlockDataPtr = LastBlockEnd;
+
+ LLVM_DEBUG({
+ dbgs() << " Processing segment "
+ << static_cast<sys::Memory::ProtectionFlags>(Prot) << " [ "
+ << (const void *)SegMem.data() << " .. "
+ << (const void *)((char *)SegMem.data() + SegMem.size())
+ << " ]\n Processing content sections:\n";
+ });
+
+ for (auto *B : SegLayout.ContentBlocks) {
+ LLVM_DEBUG(dbgs() << " " << *B << ":\n");
+
+ // Pad to alignment/alignment-offset.
+ BlockDataPtr = alignToBlock(BlockDataPtr, *B);
+
+ LLVM_DEBUG({
+ dbgs() << " Bumped block pointer to "
+ << (const void *)BlockDataPtr << " to meet block alignment "
+ << B->getAlignment() << " and alignment offset "
+ << B->getAlignmentOffset() << "\n";
+ });
+
+ // Zero pad up to alignment.
+ LLVM_DEBUG({
+ if (LastBlockEnd != BlockDataPtr)
+ dbgs() << " Zero padding from " << (const void *)LastBlockEnd
+ << " to " << (const void *)BlockDataPtr << "\n";
+ });
+
+ while (LastBlockEnd != BlockDataPtr)
+ *LastBlockEnd++ = 0;
+
+ // Copy initial block content.
+ LLVM_DEBUG({
+ dbgs() << " Copying block " << *B << " content, "
+ << B->getContent().size() << " bytes, from "
+ << (const void *)B->getContent().data() << " to "
+ << (const void *)BlockDataPtr << "\n";
+ });
+ memcpy(BlockDataPtr, B->getContent().data(), B->getContent().size());
+
+ // Copy Block data and apply fixups.
+ LLVM_DEBUG(dbgs() << " Applying fixups.\n");
+ for (auto &E : B->edges()) {
+
+ // Skip non-relocation edges.
+ if (!E.isRelocation())
+ continue;
+
+ // Dispatch to LinkerImpl for fixup.
+ if (auto Err = impl().applyFixup(*B, E, BlockDataPtr))
+ return Err;
+ }
+
+ // Point the block's content to the fixed up buffer.
+ B->setContent(StringRef(BlockDataPtr, B->getContent().size()));
+
+ // Update block end pointer.
+ LastBlockEnd = BlockDataPtr + B->getContent().size();
+ BlockDataPtr = LastBlockEnd;
+ }
+
+ // Zero pad the rest of the segment.
+ LLVM_DEBUG({
+ dbgs() << " Zero padding end of segment from "
+ << (const void *)LastBlockEnd << " to "
+ << (const void *)((char *)SegMem.data() + SegMem.size()) << "\n";
+ });
+ while (LastBlockEnd != SegMem.data() + SegMem.size())
+ *LastBlockEnd++ = 0;
+ }
+
+ return Error::success();
+ }
+};
+
+/// Removes dead symbols/blocks/addressables.
+///
+/// Finds the set of symbols and addressables reachable from any symbol
+/// initially marked live. All symbols/addressables not marked live at the end
+/// of this process are removed.
+void prune(LinkGraph &G);
+
+} // end namespace jitlink
+} // end namespace llvm
+
+#undef DEBUG_TYPE // "jitlink"
+
+#endif // LLVM_EXECUTIONENGINE_JITLINK_JITLINKGENERIC_H
diff --git a/llvm/lib/ExecutionEngine/JITLink/JITLinkMemoryManager.cpp b/llvm/lib/ExecutionEngine/JITLink/JITLinkMemoryManager.cpp
new file mode 100644
index 0000000000000..9e0d207e8bdb1
--- /dev/null
+++ b/llvm/lib/ExecutionEngine/JITLink/JITLinkMemoryManager.cpp
@@ -0,0 +1,132 @@
+//===--- JITLinkMemoryManager.cpp - JITLinkMemoryManager implementation ---===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/JITLink/JITLinkMemoryManager.h"
+#include "llvm/Support/Process.h"
+
+namespace llvm {
+namespace jitlink {
+
+JITLinkMemoryManager::~JITLinkMemoryManager() = default;
+JITLinkMemoryManager::Allocation::~Allocation() = default;
+
+Expected<std::unique_ptr<JITLinkMemoryManager::Allocation>>
+InProcessMemoryManager::allocate(const SegmentsRequestMap &Request) {
+
+ using AllocationMap = DenseMap<unsigned, sys::MemoryBlock>;
+
+ // Local class for allocation.
+ class IPMMAlloc : public Allocation {
+ public:
+ IPMMAlloc(AllocationMap SegBlocks) : SegBlocks(std::move(SegBlocks)) {}
+ MutableArrayRef<char> getWorkingMemory(ProtectionFlags Seg) override {
+ assert(SegBlocks.count(Seg) && "No allocation for segment");
+ return {static_cast<char *>(SegBlocks[Seg].base()),
+ SegBlocks[Seg].allocatedSize()};
+ }
+ JITTargetAddress getTargetMemory(ProtectionFlags Seg) override {
+ assert(SegBlocks.count(Seg) && "No allocation for segment");
+ return reinterpret_cast<JITTargetAddress>(SegBlocks[Seg].base());
+ }
+ void finalizeAsync(FinalizeContinuation OnFinalize) override {
+ OnFinalize(applyProtections());
+ }
+ Error deallocate() override {
+ if (SegBlocks.empty())
+ return Error::success();
+ void *SlabStart = SegBlocks.begin()->second.base();
+ char *SlabEnd = (char *)SlabStart;
+ for (auto &KV : SegBlocks) {
+ SlabStart = std::min(SlabStart, KV.second.base());
+ SlabEnd = std::max(SlabEnd, (char *)(KV.second.base()) +
+ KV.second.allocatedSize());
+ }
+ size_t SlabSize = SlabEnd - (char *)SlabStart;
+ assert((SlabSize % sys::Process::getPageSizeEstimate()) == 0 &&
+ "Slab size is not a multiple of page size");
+ sys::MemoryBlock Slab(SlabStart, SlabSize);
+ if (auto EC = sys::Memory::releaseMappedMemory(Slab))
+ return errorCodeToError(EC);
+ return Error::success();
+ }
+
+ private:
+ Error applyProtections() {
+ for (auto &KV : SegBlocks) {
+ auto &Prot = KV.first;
+ auto &Block = KV.second;
+ if (auto EC = sys::Memory::protectMappedMemory(Block, Prot))
+ return errorCodeToError(EC);
+ if (Prot & sys::Memory::MF_EXEC)
+ sys::Memory::InvalidateInstructionCache(Block.base(),
+ Block.allocatedSize());
+ }
+ return Error::success();
+ }
+
+ AllocationMap SegBlocks;
+ };
+
+ if (!isPowerOf2_64((uint64_t)sys::Process::getPageSizeEstimate()))
+ return make_error<StringError>("Page size is not a power of 2",
+ inconvertibleErrorCode());
+
+ AllocationMap Blocks;
+ const sys::Memory::ProtectionFlags ReadWrite =
+ static_cast<sys::Memory::ProtectionFlags>(sys::Memory::MF_READ |
+ sys::Memory::MF_WRITE);
+
+ // Compute the total number of pages to allocate.
+ size_t TotalSize = 0;
+ for (auto &KV : Request) {
+ const auto &Seg = KV.second;
+
+ if (Seg.getAlignment() > sys::Process::getPageSizeEstimate())
+ return make_error<StringError>("Cannot request higher than page "
+ "alignment",
+ inconvertibleErrorCode());
+
+ TotalSize = alignTo(TotalSize, sys::Process::getPageSizeEstimate());
+ TotalSize += Seg.getContentSize();
+ TotalSize += Seg.getZeroFillSize();
+ }
+
+ // Allocate one slab to cover all the segments.
+ std::error_code EC;
+ auto SlabRemaining =
+ sys::Memory::allocateMappedMemory(TotalSize, nullptr, ReadWrite, EC);
+
+ if (EC)
+ return errorCodeToError(EC);
+
+ // Allocate segment memory from the slab.
+ for (auto &KV : Request) {
+
+ const auto &Seg = KV.second;
+
+ uint64_t SegmentSize = alignTo(Seg.getContentSize() + Seg.getZeroFillSize(),
+ sys::Process::getPageSizeEstimate());
+
+ sys::MemoryBlock SegMem(SlabRemaining.base(), SegmentSize);
+ SlabRemaining = sys::MemoryBlock((char *)SlabRemaining.base() + SegmentSize,
+ SegmentSize);
+
+ // Zero out the zero-fill memory.
+ memset(static_cast<char *>(SegMem.base()) + Seg.getContentSize(), 0,
+ Seg.getZeroFillSize());
+
+ // Record the block for this segment.
+ Blocks[KV.first] = std::move(SegMem);
+ }
+ return std::unique_ptr<InProcessMemoryManager::Allocation>(
+ new IPMMAlloc(std::move(Blocks)));
+}
+
+} // end namespace jitlink
+} // end namespace llvm
diff --git a/llvm/lib/ExecutionEngine/JITLink/MachO.cpp b/llvm/lib/ExecutionEngine/JITLink/MachO.cpp
new file mode 100644
index 0000000000000..58bc0f56e1555
--- /dev/null
+++ b/llvm/lib/ExecutionEngine/JITLink/MachO.cpp
@@ -0,0 +1,81 @@
+//===-------------- MachO.cpp - JIT linker function for MachO -------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// MachO jit-link function.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/JITLink/MachO.h"
+
+#include "llvm/BinaryFormat/MachO.h"
+#include "llvm/ExecutionEngine/JITLink/MachO_arm64.h"
+#include "llvm/ExecutionEngine/JITLink/MachO_x86_64.h"
+#include "llvm/Support/Endian.h"
+#include "llvm/Support/Format.h"
+#include "llvm/Support/MemoryBuffer.h"
+
+using namespace llvm;
+
+#define DEBUG_TYPE "jitlink"
+
+namespace llvm {
+namespace jitlink {
+
+void jitLink_MachO(std::unique_ptr<JITLinkContext> Ctx) {
+
+ // We don't want to do full MachO validation here. Just parse enough of the
+ // header to find out what MachO linker to use.
+
+ StringRef Data = Ctx->getObjectBuffer().getBuffer();
+ if (Data.size() < 4) {
+ Ctx->notifyFailed(make_error<JITLinkError>("Truncated MachO buffer"));
+ return;
+ }
+
+ uint32_t Magic;
+ memcpy(&Magic, Data.data(), sizeof(uint32_t));
+ LLVM_DEBUG({
+ dbgs() << "jitLink_MachO: magic = " << format("0x%08" PRIx32, Magic)
+ << ", identifier = \""
+ << Ctx->getObjectBuffer().getBufferIdentifier() << "\"\n";
+ });
+
+ if (Magic == MachO::MH_MAGIC || Magic == MachO::MH_CIGAM) {
+ Ctx->notifyFailed(
+ make_error<JITLinkError>("MachO 32-bit platforms not supported"));
+ return;
+ } else if (Magic == MachO::MH_MAGIC_64 || Magic == MachO::MH_CIGAM_64) {
+ MachO::mach_header_64 Header;
+
+ memcpy(&Header, Data.data(), sizeof(MachO::mach_header_64));
+ if (Magic == MachO::MH_CIGAM_64)
+ swapStruct(Header);
+
+ LLVM_DEBUG({
+ dbgs() << "jitLink_MachO: cputype = "
+ << format("0x%08" PRIx32, Header.cputype)
+ << ", cpusubtype = " << format("0x%08" PRIx32, Header.cpusubtype)
+ << "\n";
+ });
+
+ switch (Header.cputype) {
+ case MachO::CPU_TYPE_ARM64:
+ return jitLink_MachO_arm64(std::move(Ctx));
+ case MachO::CPU_TYPE_X86_64:
+ return jitLink_MachO_x86_64(std::move(Ctx));
+ }
+ Ctx->notifyFailed(make_error<JITLinkError>("MachO-64 CPU type not valid"));
+ return;
+ }
+
+ Ctx->notifyFailed(make_error<JITLinkError>("MachO magic not valid"));
+}
+
+} // end namespace jitlink
+} // end namespace llvm
diff --git a/llvm/lib/ExecutionEngine/JITLink/MachOLinkGraphBuilder.cpp b/llvm/lib/ExecutionEngine/JITLink/MachOLinkGraphBuilder.cpp
new file mode 100644
index 0000000000000..7366f53ebf36b
--- /dev/null
+++ b/llvm/lib/ExecutionEngine/JITLink/MachOLinkGraphBuilder.cpp
@@ -0,0 +1,535 @@
+//=--------- MachOLinkGraphBuilder.cpp - MachO LinkGraph builder ----------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Generic MachO LinkGraph buliding code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "MachOLinkGraphBuilder.h"
+
+#define DEBUG_TYPE "jitlink"
+
+static const char *CommonSectionName = "__common";
+
+namespace llvm {
+namespace jitlink {
+
+MachOLinkGraphBuilder::~MachOLinkGraphBuilder() {}
+
+Expected<std::unique_ptr<LinkGraph>> MachOLinkGraphBuilder::buildGraph() {
+
+ // Sanity check: we only operate on relocatable objects.
+ if (!Obj.isRelocatableObject())
+ return make_error<JITLinkError>("Object is not a relocatable MachO");
+
+ if (auto Err = createNormalizedSections())
+ return std::move(Err);
+
+ if (auto Err = createNormalizedSymbols())
+ return std::move(Err);
+
+ if (auto Err = graphifyRegularSymbols())
+ return std::move(Err);
+
+ if (auto Err = graphifySectionsWithCustomParsers())
+ return std::move(Err);
+
+ if (auto Err = addRelocations())
+ return std::move(Err);
+
+ return std::move(G);
+}
+
+MachOLinkGraphBuilder::MachOLinkGraphBuilder(const object::MachOObjectFile &Obj)
+ : Obj(Obj),
+ G(std::make_unique<LinkGraph>(Obj.getFileName(), getPointerSize(Obj),
+ getEndianness(Obj))) {}
+
+void MachOLinkGraphBuilder::addCustomSectionParser(
+ StringRef SectionName, SectionParserFunction Parser) {
+ assert(!CustomSectionParserFunctions.count(SectionName) &&
+ "Custom parser for this section already exists");
+ CustomSectionParserFunctions[SectionName] = std::move(Parser);
+}
+
+Linkage MachOLinkGraphBuilder::getLinkage(uint16_t Desc) {
+ if ((Desc & MachO::N_WEAK_DEF) || (Desc & MachO::N_WEAK_REF))
+ return Linkage::Weak;
+ return Linkage::Strong;
+}
+
+Scope MachOLinkGraphBuilder::getScope(StringRef Name, uint8_t Type) {
+ if (Name.startswith("l"))
+ return Scope::Local;
+ if (Type & MachO::N_PEXT)
+ return Scope::Hidden;
+ if (Type & MachO::N_EXT)
+ return Scope::Default;
+ return Scope::Local;
+}
+
+bool MachOLinkGraphBuilder::isAltEntry(const NormalizedSymbol &NSym) {
+ return NSym.Desc & MachO::N_ALT_ENTRY;
+}
+
+unsigned
+MachOLinkGraphBuilder::getPointerSize(const object::MachOObjectFile &Obj) {
+ return Obj.is64Bit() ? 8 : 4;
+}
+
+support::endianness
+MachOLinkGraphBuilder::getEndianness(const object::MachOObjectFile &Obj) {
+ return Obj.isLittleEndian() ? support::little : support::big;
+}
+
+Section &MachOLinkGraphBuilder::getCommonSection() {
+ if (!CommonSection) {
+ auto Prot = static_cast<sys::Memory::ProtectionFlags>(
+ sys::Memory::MF_READ | sys::Memory::MF_WRITE);
+ CommonSection = &G->createSection(CommonSectionName, Prot);
+ }
+ return *CommonSection;
+}
+
+Error MachOLinkGraphBuilder::createNormalizedSections() {
+ // Build normalized sections. Verifies that section data is in-range (for
+ // sections with content) and that address ranges are non-overlapping.
+
+ LLVM_DEBUG(dbgs() << "Creating normalized sections...\n");
+
+ for (auto &SecRef : Obj.sections()) {
+ NormalizedSection NSec;
+ uint32_t DataOffset = 0;
+
+ auto SecIndex = Obj.getSectionIndex(SecRef.getRawDataRefImpl());
+
+ auto Name = SecRef.getName();
+ if (!Name)
+ return Name.takeError();
+
+ if (Obj.is64Bit()) {
+ const MachO::section_64 &Sec64 =
+ Obj.getSection64(SecRef.getRawDataRefImpl());
+
+ NSec.Address = Sec64.addr;
+ NSec.Size = Sec64.size;
+ NSec.Alignment = 1ULL << Sec64.align;
+ NSec.Flags = Sec64.flags;
+ DataOffset = Sec64.offset;
+ } else {
+ const MachO::section &Sec32 = Obj.getSection(SecRef.getRawDataRefImpl());
+ NSec.Address = Sec32.addr;
+ NSec.Size = Sec32.size;
+ NSec.Alignment = 1ULL << Sec32.align;
+ NSec.Flags = Sec32.flags;
+ DataOffset = Sec32.offset;
+ }
+
+ LLVM_DEBUG({
+ dbgs() << " " << *Name << ": " << formatv("{0:x16}", NSec.Address)
+ << " -- " << formatv("{0:x16}", NSec.Address + NSec.Size)
+ << ", align: " << NSec.Alignment << ", index: " << SecIndex
+ << "\n";
+ });
+
+ // Get the section data if any.
+ {
+ unsigned SectionType = NSec.Flags & MachO::SECTION_TYPE;
+ if (SectionType != MachO::S_ZEROFILL &&
+ SectionType != MachO::S_GB_ZEROFILL) {
+
+ if (DataOffset + NSec.Size > Obj.getData().size())
+ return make_error<JITLinkError>(
+ "Section data extends past end of file");
+
+ NSec.Data = Obj.getData().data() + DataOffset;
+ }
+ }
+
+ // Get prot flags.
+ // FIXME: Make sure this test is correct (it's probably missing cases
+ // as-is).
+ sys::Memory::ProtectionFlags Prot;
+ if (NSec.Flags & MachO::S_ATTR_PURE_INSTRUCTIONS)
+ Prot = static_cast<sys::Memory::ProtectionFlags>(sys::Memory::MF_READ |
+ sys::Memory::MF_EXEC);
+ else
+ Prot = static_cast<sys::Memory::ProtectionFlags>(sys::Memory::MF_READ |
+ sys::Memory::MF_WRITE);
+
+ NSec.GraphSection = &G->createSection(*Name, Prot);
+ IndexToSection.insert(std::make_pair(SecIndex, std::move(NSec)));
+ }
+
+ std::vector<NormalizedSection *> Sections;
+ Sections.reserve(IndexToSection.size());
+ for (auto &KV : IndexToSection)
+ Sections.push_back(&KV.second);
+
+ // If we didn't end up creating any sections then bail out. The code below
+ // assumes that we have at least one section.
+ if (Sections.empty())
+ return Error::success();
+
+ llvm::sort(Sections,
+ [](const NormalizedSection *LHS, const NormalizedSection *RHS) {
+ assert(LHS && RHS && "Null section?");
+ return LHS->Address < RHS->Address;
+ });
+
+ for (unsigned I = 0, E = Sections.size() - 1; I != E; ++I) {
+ auto &Cur = *Sections[I];
+ auto &Next = *Sections[I + 1];
+ if (Next.Address < Cur.Address + Cur.Size)
+ return make_error<JITLinkError>(
+ "Address range for section " + Cur.GraphSection->getName() +
+ formatv(" [ {0:x16} -- {1:x16} ] ", Cur.Address,
+ Cur.Address + Cur.Size) +
+ "overlaps " +
+ formatv(" [ {0:x16} -- {1:x16} ] ", Next.Address,
+ Next.Address + Next.Size));
+ }
+
+ return Error::success();
+}
+
+Error MachOLinkGraphBuilder::createNormalizedSymbols() {
+ LLVM_DEBUG(dbgs() << "Creating normalized symbols...\n");
+
+ for (auto &SymRef : Obj.symbols()) {
+
+ unsigned SymbolIndex = Obj.getSymbolIndex(SymRef.getRawDataRefImpl());
+ uint64_t Value;
+ uint32_t NStrX;
+ uint8_t Type;
+ uint8_t Sect;
+ uint16_t Desc;
+
+ if (Obj.is64Bit()) {
+ const MachO::nlist_64 &NL64 =
+ Obj.getSymbol64TableEntry(SymRef.getRawDataRefImpl());
+ Value = NL64.n_value;
+ NStrX = NL64.n_strx;
+ Type = NL64.n_type;
+ Sect = NL64.n_sect;
+ Desc = NL64.n_desc;
+ } else {
+ const MachO::nlist &NL32 =
+ Obj.getSymbolTableEntry(SymRef.getRawDataRefImpl());
+ Value = NL32.n_value;
+ NStrX = NL32.n_strx;
+ Type = NL32.n_type;
+ Sect = NL32.n_sect;
+ Desc = NL32.n_desc;
+ }
+
+ // Skip stabs.
+ // FIXME: Are there other symbols we should be skipping?
+ if (Type & MachO::N_STAB)
+ continue;
+
+ Optional<StringRef> Name;
+ if (NStrX) {
+ if (auto NameOrErr = SymRef.getName())
+ Name = *NameOrErr;
+ else
+ return NameOrErr.takeError();
+ }
+
+ LLVM_DEBUG({
+ dbgs() << " ";
+ if (!Name)
+ dbgs() << "<anonymous symbol>";
+ else
+ dbgs() << *Name;
+ dbgs() << ": value = " << formatv("{0:x16}", Value)
+ << ", type = " << formatv("{0:x2}", Type)
+ << ", desc = " << formatv("{0:x4}", Desc) << ", sect = ";
+ if (Sect)
+ dbgs() << static_cast<unsigned>(Sect - 1);
+ else
+ dbgs() << "none";
+ dbgs() << "\n";
+ });
+
+ // If this symbol has a section, sanity check that the addresses line up.
+ NormalizedSection *NSec = nullptr;
+ if (Sect != 0) {
+ if (auto NSecOrErr = findSectionByIndex(Sect - 1))
+ NSec = &*NSecOrErr;
+ else
+ return NSecOrErr.takeError();
+
+ if (Value < NSec->Address || Value > NSec->Address + NSec->Size)
+ return make_error<JITLinkError>("Symbol address does not fall within "
+ "section");
+ }
+
+ IndexToSymbol[SymbolIndex] =
+ &createNormalizedSymbol(*Name, Value, Type, Sect, Desc,
+ getLinkage(Type), getScope(*Name, Type));
+ }
+
+ return Error::success();
+}
+
+void MachOLinkGraphBuilder::addSectionStartSymAndBlock(
+ Section &GraphSec, uint64_t Address, const char *Data, uint64_t Size,
+ uint32_t Alignment, bool IsLive) {
+ Block &B =
+ Data ? G->createContentBlock(GraphSec, StringRef(Data, Size), Address,
+ Alignment, 0)
+ : G->createZeroFillBlock(GraphSec, Size, Address, Alignment, 0);
+ auto &Sym = G->addAnonymousSymbol(B, 0, Size, false, IsLive);
+ assert(!AddrToCanonicalSymbol.count(Sym.getAddress()) &&
+ "Anonymous block start symbol clashes with existing symbol address");
+ AddrToCanonicalSymbol[Sym.getAddress()] = &Sym;
+}
+
+Error MachOLinkGraphBuilder::graphifyRegularSymbols() {
+
+ LLVM_DEBUG(dbgs() << "Creating graph symbols...\n");
+
+ /// We only have 256 section indexes: Use a vector rather than a map.
+ std::vector<std::vector<NormalizedSymbol *>> SecIndexToSymbols;
+ SecIndexToSymbols.resize(256);
+
+ // Create commons, externs, and absolutes, and partition all other symbols by
+ // section.
+ for (auto &KV : IndexToSymbol) {
+ auto &NSym = *KV.second;
+
+ switch (NSym.Type & MachO::N_TYPE) {
+ case MachO::N_UNDF:
+ if (NSym.Value) {
+ if (!NSym.Name)
+ return make_error<JITLinkError>("Anonymous common symbol at index " +
+ Twine(KV.first));
+ NSym.GraphSymbol = &G->addCommonSymbol(
+ *NSym.Name, NSym.S, getCommonSection(), NSym.Value, 0,
+ 1ull << MachO::GET_COMM_ALIGN(NSym.Desc),
+ NSym.Desc & MachO::N_NO_DEAD_STRIP);
+ } else {
+ if (!NSym.Name)
+ return make_error<JITLinkError>("Anonymous external symbol at "
+ "index " +
+ Twine(KV.first));
+ NSym.GraphSymbol = &G->addExternalSymbol(*NSym.Name, 0);
+ }
+ break;
+ case MachO::N_ABS:
+ if (!NSym.Name)
+ return make_error<JITLinkError>("Anonymous absolute symbol at index " +
+ Twine(KV.first));
+ NSym.GraphSymbol = &G->addAbsoluteSymbol(
+ *NSym.Name, NSym.Value, 0, Linkage::Strong, Scope::Default,
+ NSym.Desc & MachO::N_NO_DEAD_STRIP);
+ break;
+ case MachO::N_SECT:
+ SecIndexToSymbols[NSym.Sect - 1].push_back(&NSym);
+ break;
+ case MachO::N_PBUD:
+ return make_error<JITLinkError>(
+ "Unupported N_PBUD symbol " +
+ (NSym.Name ? ("\"" + *NSym.Name + "\"") : Twine("<anon>")) +
+ " at index " + Twine(KV.first));
+ case MachO::N_INDR:
+ return make_error<JITLinkError>(
+ "Unupported N_INDR symbol " +
+ (NSym.Name ? ("\"" + *NSym.Name + "\"") : Twine("<anon>")) +
+ " at index " + Twine(KV.first));
+ default:
+ return make_error<JITLinkError>(
+ "Unrecognized symbol type " + Twine(NSym.Type & MachO::N_TYPE) +
+ " for symbol " +
+ (NSym.Name ? ("\"" + *NSym.Name + "\"") : Twine("<anon>")) +
+ " at index " + Twine(KV.first));
+ }
+ }
+
+ // Loop over sections performing regular graphification for those that
+ // don't have custom parsers.
+ for (auto &KV : IndexToSection) {
+ auto SecIndex = KV.first;
+ auto &NSec = KV.second;
+
+ // Skip sections with custom parsers.
+ if (CustomSectionParserFunctions.count(NSec.GraphSection->getName())) {
+ LLVM_DEBUG({
+ dbgs() << " Skipping section " << NSec.GraphSection->getName()
+ << " as it has a custom parser.\n";
+ });
+ continue;
+ } else
+ LLVM_DEBUG({
+ dbgs() << " Processing section " << NSec.GraphSection->getName()
+ << "...\n";
+ });
+
+ bool SectionIsNoDeadStrip = NSec.Flags & MachO::S_ATTR_NO_DEAD_STRIP;
+ bool SectionIsText = NSec.Flags & MachO::S_ATTR_PURE_INSTRUCTIONS;
+
+ auto &SecNSymStack = SecIndexToSymbols[SecIndex];
+
+ // If this section is non-empty but there are no symbols covering it then
+ // create one block and anonymous symbol to cover the entire section.
+ if (SecNSymStack.empty()) {
+ if (NSec.Size > 0) {
+ LLVM_DEBUG({
+ dbgs() << " Section non-empty, but contains no symbols. "
+ "Creating anonymous block to cover "
+ << formatv("{0:x16}", NSec.Address) << " -- "
+ << formatv("{0:x16}", NSec.Address + NSec.Size) << "\n";
+ });
+ addSectionStartSymAndBlock(*NSec.GraphSection, NSec.Address, NSec.Data,
+ NSec.Size, NSec.Alignment,
+ SectionIsNoDeadStrip);
+ } else
+ LLVM_DEBUG({
+ dbgs() << " Section empty and contains no symbols. Skipping.\n";
+ });
+ continue;
+ }
+
+ // Sort the symbol stack in by address, alt-entry status, scope, and name.
+ // We sort in reverse order so that symbols will be visited in the right
+ // order when we pop off the stack below.
+ llvm::sort(SecNSymStack, [](const NormalizedSymbol *LHS,
+ const NormalizedSymbol *RHS) {
+ if (LHS->Value != RHS->Value)
+ return LHS->Value > RHS->Value;
+ if (isAltEntry(*LHS) != isAltEntry(*RHS))
+ return isAltEntry(*RHS);
+ if (LHS->S != RHS->S)
+ return static_cast<uint8_t>(LHS->S) < static_cast<uint8_t>(RHS->S);
+ return LHS->Name < RHS->Name;
+ });
+
+ // The first symbol in a section can not be an alt-entry symbol.
+ if (!SecNSymStack.empty() && isAltEntry(*SecNSymStack.back()))
+ return make_error<JITLinkError>(
+ "First symbol in " + NSec.GraphSection->getName() + " is alt-entry");
+
+ // If the section is non-empty but there is no symbol covering the start
+ // address then add an anonymous one.
+ if (SecNSymStack.back()->Value != NSec.Address) {
+ auto AnonBlockSize = SecNSymStack.back()->Value - NSec.Address;
+ LLVM_DEBUG({
+ dbgs() << " Section start not covered by symbol. "
+ << "Creating anonymous block to cover [ "
+ << formatv("{0:x16}", NSec.Address) << " -- "
+ << formatv("{0:x16}", NSec.Address + AnonBlockSize) << " ]\n";
+ });
+ addSectionStartSymAndBlock(*NSec.GraphSection, NSec.Address, NSec.Data,
+ AnonBlockSize, NSec.Alignment,
+ SectionIsNoDeadStrip);
+ }
+
+ // Visit section symbols in order by popping off the reverse-sorted stack,
+ // building blocks for each alt-entry chain and creating symbols as we go.
+ while (!SecNSymStack.empty()) {
+ SmallVector<NormalizedSymbol *, 8> BlockSyms;
+
+ BlockSyms.push_back(SecNSymStack.back());
+ SecNSymStack.pop_back();
+ while (!SecNSymStack.empty() &&
+ (isAltEntry(*SecNSymStack.back()) ||
+ SecNSymStack.back()->Value == BlockSyms.back()->Value)) {
+ BlockSyms.push_back(SecNSymStack.back());
+ SecNSymStack.pop_back();
+ }
+
+ // BlockNSyms now contains the block symbols in reverse canonical order.
+ JITTargetAddress BlockStart = BlockSyms.front()->Value;
+ JITTargetAddress BlockEnd = SecNSymStack.empty()
+ ? NSec.Address + NSec.Size
+ : SecNSymStack.back()->Value;
+ JITTargetAddress BlockOffset = BlockStart - NSec.Address;
+ JITTargetAddress BlockSize = BlockEnd - BlockStart;
+
+ LLVM_DEBUG({
+ dbgs() << " Creating block for " << formatv("{0:x16}", BlockStart)
+ << " -- " << formatv("{0:x16}", BlockEnd) << ": "
+ << NSec.GraphSection->getName() << " + "
+ << formatv("{0:x16}", BlockOffset) << " with "
+ << BlockSyms.size() << " symbol(s)...\n";
+ });
+
+ Block &B =
+ NSec.Data
+ ? G->createContentBlock(
+ *NSec.GraphSection,
+ StringRef(NSec.Data + BlockOffset, BlockSize), BlockStart,
+ NSec.Alignment, BlockStart % NSec.Alignment)
+ : G->createZeroFillBlock(*NSec.GraphSection, BlockSize,
+ BlockStart, NSec.Alignment,
+ BlockStart % NSec.Alignment);
+
+ Optional<JITTargetAddress> LastCanonicalAddr;
+ JITTargetAddress SymEnd = BlockEnd;
+ while (!BlockSyms.empty()) {
+ auto &NSym = *BlockSyms.back();
+ BlockSyms.pop_back();
+
+ bool SymLive =
+ (NSym.Desc & MachO::N_NO_DEAD_STRIP) || SectionIsNoDeadStrip;
+
+ LLVM_DEBUG({
+ dbgs() << " " << formatv("{0:x16}", NSym.Value) << " -- "
+ << formatv("{0:x16}", SymEnd) << ": ";
+ if (!NSym.Name)
+ dbgs() << "<anonymous symbol>";
+ else
+ dbgs() << NSym.Name;
+ if (SymLive)
+ dbgs() << " [no-dead-strip]";
+ if (LastCanonicalAddr == NSym.Value)
+ dbgs() << " [non-canonical]";
+ dbgs() << "\n";
+ });
+
+ auto &Sym =
+ NSym.Name
+ ? G->addDefinedSymbol(B, NSym.Value - BlockStart, *NSym.Name,
+ SymEnd - NSym.Value, NSym.L, NSym.S,
+ SectionIsText, SymLive)
+ : G->addAnonymousSymbol(B, NSym.Value - BlockStart,
+ SymEnd - NSym.Value, SectionIsText,
+ SymLive);
+ NSym.GraphSymbol = &Sym;
+ if (LastCanonicalAddr != Sym.getAddress()) {
+ if (LastCanonicalAddr)
+ SymEnd = *LastCanonicalAddr;
+ LastCanonicalAddr = Sym.getAddress();
+ setCanonicalSymbol(Sym);
+ }
+ }
+ }
+ }
+
+ return Error::success();
+}
+
+Error MachOLinkGraphBuilder::graphifySectionsWithCustomParsers() {
+ // Graphify special sections.
+ for (auto &KV : IndexToSection) {
+ auto &NSec = KV.second;
+
+ auto HI = CustomSectionParserFunctions.find(NSec.GraphSection->getName());
+ if (HI != CustomSectionParserFunctions.end()) {
+ auto &Parse = HI->second;
+ if (auto Err = Parse(NSec))
+ return Err;
+ }
+ }
+
+ return Error::success();
+}
+
+} // end namespace jitlink
+} // end namespace llvm
diff --git a/llvm/lib/ExecutionEngine/JITLink/MachOLinkGraphBuilder.h b/llvm/lib/ExecutionEngine/JITLink/MachOLinkGraphBuilder.h
new file mode 100644
index 0000000000000..e1123cd110487
--- /dev/null
+++ b/llvm/lib/ExecutionEngine/JITLink/MachOLinkGraphBuilder.h
@@ -0,0 +1,269 @@
+//===----- MachOLinkGraphBuilder.h - MachO LinkGraph builder ----*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Generic MachO LinkGraph building code.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LIB_EXECUTIONENGINE_JITLINK_MACHOLINKGRAPHBUILDER_H
+#define LIB_EXECUTIONENGINE_JITLINK_MACHOLINKGRAPHBUILDER_H
+
+#include "llvm/ExecutionEngine/JITLink/JITLink.h"
+
+#include "EHFrameSupportImpl.h"
+#include "JITLinkGeneric.h"
+#include "llvm/Object/MachO.h"
+
+#include <list>
+
+namespace llvm {
+namespace jitlink {
+
+class MachOLinkGraphBuilder {
+public:
+ virtual ~MachOLinkGraphBuilder();
+ Expected<std::unique_ptr<LinkGraph>> buildGraph();
+
+protected:
+ class MachOEHFrameBinaryParser : public EHFrameBinaryParser {
+ public:
+ MachOEHFrameBinaryParser(MachOLinkGraphBuilder &Builder,
+ JITTargetAddress EHFrameAddress,
+ StringRef EHFrameContent, Section &EHFrameSection,
+ uint64_t CIEAlignment, uint64_t FDEAlignment,
+ Edge::Kind FDEToCIERelocKind,
+ Edge::Kind FDEToTargetRelocKind)
+ : EHFrameBinaryParser(EHFrameAddress, EHFrameContent,
+ Builder.getGraph().getPointerSize(),
+ Builder.getGraph().getEndianness()),
+ Builder(Builder), EHFrameSection(EHFrameSection),
+ CIEAlignment(CIEAlignment), FDEAlignment(FDEAlignment),
+ FDEToCIERelocKind(FDEToCIERelocKind),
+ FDEToTargetRelocKind(FDEToTargetRelocKind) {}
+
+ Symbol *getSymbolAtAddress(JITTargetAddress Address) override {
+ if (auto *Sym = Builder.getSymbolByAddress(Address))
+ if (Sym->getAddress() == Address)
+ return Sym;
+ return nullptr;
+ }
+
+ Symbol &createCIERecord(JITTargetAddress RecordAddr,
+ StringRef RecordContent) override {
+ auto &G = Builder.getGraph();
+ auto &B = G.createContentBlock(EHFrameSection, RecordContent, RecordAddr,
+ CIEAlignment, 0);
+ auto &CIESymbol =
+ G.addAnonymousSymbol(B, 0, RecordContent.size(), false, false);
+ Builder.setCanonicalSymbol(CIESymbol);
+ return CIESymbol;
+ }
+
+ Expected<Symbol &> createFDERecord(JITTargetAddress RecordAddr,
+ StringRef RecordContent, Symbol &CIE,
+ size_t CIEOffset, Symbol &Func,
+ size_t FuncOffset, Symbol *LSDA,
+ size_t LSDAOffset) override {
+ auto &G = Builder.getGraph();
+ auto &B = G.createContentBlock(EHFrameSection, RecordContent, RecordAddr,
+ FDEAlignment, 0);
+
+ // Add edges to CIE, Func, and (conditionally) LSDA.
+ B.addEdge(FDEToCIERelocKind, CIEOffset, CIE, 0);
+ B.addEdge(FDEToTargetRelocKind, FuncOffset, Func, 0);
+
+ if (LSDA)
+ B.addEdge(FDEToTargetRelocKind, LSDAOffset, *LSDA, 0);
+
+ auto &FDESymbol =
+ G.addAnonymousSymbol(B, 0, RecordContent.size(), false, false);
+
+ // Add a keep-alive relocation from the function to the FDE to ensure it
+ // is not dead stripped.
+ Func.getBlock().addEdge(Edge::KeepAlive, 0, FDESymbol, 0);
+
+ return FDESymbol;
+ }
+
+ private:
+ MachOLinkGraphBuilder &Builder;
+ Section &EHFrameSection;
+ uint64_t CIEAlignment;
+ uint64_t FDEAlignment;
+ Edge::Kind FDEToCIERelocKind;
+ Edge::Kind FDEToTargetRelocKind;
+ };
+
+ struct NormalizedSymbol {
+ friend class MachOLinkGraphBuilder;
+
+ private:
+ NormalizedSymbol(Optional<StringRef> Name, uint64_t Value, uint8_t Type,
+ uint8_t Sect, uint16_t Desc, Linkage L, Scope S)
+ : Name(Name), Value(Value), Type(Type), Sect(Sect), Desc(Desc), L(L),
+ S(S) {
+ assert((!Name || !Name->empty()) && "Name must be none or non-empty");
+ }
+
+ public:
+ NormalizedSymbol(const NormalizedSymbol &) = delete;
+ NormalizedSymbol &operator=(const NormalizedSymbol &) = delete;
+ NormalizedSymbol(NormalizedSymbol &&) = delete;
+ NormalizedSymbol &operator=(NormalizedSymbol &&) = delete;
+
+ Optional<StringRef> Name;
+ uint64_t Value = 0;
+ uint8_t Type = 0;
+ uint8_t Sect = 0;
+ uint16_t Desc = 0;
+ Linkage L = Linkage::Strong;
+ Scope S = Scope::Default;
+ Symbol *GraphSymbol = nullptr;
+ };
+
+ class NormalizedSection {
+ friend class MachOLinkGraphBuilder;
+
+ private:
+ NormalizedSection() = default;
+
+ public:
+ Section *GraphSection = nullptr;
+ uint64_t Address = 0;
+ uint64_t Size = 0;
+ uint64_t Alignment = 0;
+ uint32_t Flags = 0;
+ const char *Data = nullptr;
+ };
+
+ using SectionParserFunction = std::function<Error(NormalizedSection &S)>;
+
+ MachOLinkGraphBuilder(const object::MachOObjectFile &Obj);
+
+ LinkGraph &getGraph() const { return *G; }
+
+ const object::MachOObjectFile &getObject() const { return Obj; }
+
+ void addCustomSectionParser(StringRef SectionName,
+ SectionParserFunction Parse);
+
+ virtual Error addRelocations() = 0;
+
+ /// Create a symbol.
+ template <typename... ArgTs>
+ NormalizedSymbol &createNormalizedSymbol(ArgTs &&... Args) {
+ NormalizedSymbol *Sym = reinterpret_cast<NormalizedSymbol *>(
+ Allocator.Allocate<NormalizedSymbol>());
+ new (Sym) NormalizedSymbol(std::forward<ArgTs>(Args)...);
+ return *Sym;
+ }
+
+ /// Index is zero-based (MachO section indexes are usually one-based) and
+ /// assumed to be in-range. Client is responsible for checking.
+ NormalizedSection &getSectionByIndex(unsigned Index) {
+ auto I = IndexToSection.find(Index);
+ assert(I != IndexToSection.end() && "No section recorded at index");
+ return I->second;
+ }
+
+ /// Try to get the section at the given index. Will return an error if the
+ /// given index is out of range, or if no section has been added for the given
+ /// index.
+ Expected<NormalizedSection &> findSectionByIndex(unsigned Index) {
+ auto I = IndexToSection.find(Index);
+ if (I == IndexToSection.end())
+ return make_error<JITLinkError>("No section recorded for index " +
+ formatv("{0:u}", Index));
+ return I->second;
+ }
+
+ /// Try to get the symbol at the given index. Will return an error if the
+ /// given index is out of range, or if no symbol has been added for the given
+ /// index.
+ Expected<NormalizedSymbol &> findSymbolByIndex(uint64_t Index) {
+ if (Index >= IndexToSymbol.size())
+ return make_error<JITLinkError>("Symbol index out of range");
+ auto *Sym = IndexToSymbol[Index];
+ if (!Sym)
+ return make_error<JITLinkError>("No symbol at index " +
+ formatv("{0:u}", Index));
+ return *Sym;
+ }
+
+ /// Returns the symbol with the highest address not greater than the search
+ /// address, or null if no such symbol exists.
+ Symbol *getSymbolByAddress(JITTargetAddress Address) {
+ auto I = AddrToCanonicalSymbol.upper_bound(Address);
+ if (I == AddrToCanonicalSymbol.begin())
+ return nullptr;
+ return std::prev(I)->second;
+ }
+
+ /// Returns the symbol with the highest address not greater than the search
+ /// address, or an error if no such symbol exists.
+ Expected<Symbol &> findSymbolByAddress(JITTargetAddress Address) {
+ auto *Sym = getSymbolByAddress(Address);
+ if (Sym)
+ if (Address < Sym->getAddress() + Sym->getSize())
+ return *Sym;
+ return make_error<JITLinkError>("No symbol covering address " +
+ formatv("{0:x16}", Address));
+ }
+
+ static Linkage getLinkage(uint16_t Desc);
+ static Scope getScope(StringRef Name, uint8_t Type);
+ static bool isAltEntry(const NormalizedSymbol &NSym);
+
+private:
+ static unsigned getPointerSize(const object::MachOObjectFile &Obj);
+ static support::endianness getEndianness(const object::MachOObjectFile &Obj);
+
+ void setCanonicalSymbol(Symbol &Sym) {
+ auto *&CanonicalSymEntry = AddrToCanonicalSymbol[Sym.getAddress()];
+ // There should be no symbol at this address, or, if there is,
+ // it should be a zero-sized symbol from an empty section (which
+ // we can safely override).
+ assert((!CanonicalSymEntry || CanonicalSymEntry->getSize() == 0) &&
+ "Duplicate canonical symbol at address");
+ CanonicalSymEntry = &Sym;
+ }
+
+ Section &getCommonSection();
+ void addSectionStartSymAndBlock(Section &GraphSec, uint64_t Address,
+ const char *Data, uint64_t Size,
+ uint32_t Alignment, bool IsLive);
+
+ Error createNormalizedSections();
+ Error createNormalizedSymbols();
+
+ /// Create graph blocks and symbols for externals, absolutes, commons and
+ /// all defined symbols in sections without custom parsers.
+ Error graphifyRegularSymbols();
+
+ /// Create graph blocks and symbols for all sections.
+ Error graphifySectionsWithCustomParsers();
+
+ // Put the BumpPtrAllocator first so that we don't free any of the underlying
+ // memory until the Symbol/Addressable destructors have been run.
+ BumpPtrAllocator Allocator;
+
+ const object::MachOObjectFile &Obj;
+ std::unique_ptr<LinkGraph> G;
+
+ DenseMap<unsigned, NormalizedSection> IndexToSection;
+ Section *CommonSection = nullptr;
+
+ DenseMap<uint32_t, NormalizedSymbol *> IndexToSymbol;
+ std::map<JITTargetAddress, Symbol *> AddrToCanonicalSymbol;
+ StringMap<SectionParserFunction> CustomSectionParserFunctions;
+};
+
+} // end namespace jitlink
+} // end namespace llvm
+
+#endif // LIB_EXECUTIONENGINE_JITLINK_MACHOLINKGRAPHBUILDER_H
diff --git a/llvm/lib/ExecutionEngine/JITLink/MachO_arm64.cpp b/llvm/lib/ExecutionEngine/JITLink/MachO_arm64.cpp
new file mode 100644
index 0000000000000..945343bff89d2
--- /dev/null
+++ b/llvm/lib/ExecutionEngine/JITLink/MachO_arm64.cpp
@@ -0,0 +1,736 @@
+//===---- MachO_arm64.cpp - JIT linker implementation for MachO/arm64 -----===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// MachO/arm64 jit-link implementation.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/JITLink/MachO_arm64.h"
+
+#include "BasicGOTAndStubsBuilder.h"
+#include "MachOLinkGraphBuilder.h"
+
+#define DEBUG_TYPE "jitlink"
+
+using namespace llvm;
+using namespace llvm::jitlink;
+using namespace llvm::jitlink::MachO_arm64_Edges;
+
+namespace {
+
+class MachOLinkGraphBuilder_arm64 : public MachOLinkGraphBuilder {
+public:
+ MachOLinkGraphBuilder_arm64(const object::MachOObjectFile &Obj)
+ : MachOLinkGraphBuilder(Obj),
+ NumSymbols(Obj.getSymtabLoadCommand().nsyms) {
+ addCustomSectionParser(
+ "__eh_frame", [this](NormalizedSection &EHFrameSection) {
+ if (!EHFrameSection.Data)
+ return make_error<JITLinkError>(
+ "__eh_frame section is marked zero-fill");
+ return MachOEHFrameBinaryParser(
+ *this, EHFrameSection.Address,
+ StringRef(EHFrameSection.Data, EHFrameSection.Size),
+ *EHFrameSection.GraphSection, 8, 4, NegDelta32, Delta64)
+ .addToGraph();
+ });
+ }
+
+private:
+ static Expected<MachOARM64RelocationKind>
+ getRelocationKind(const MachO::relocation_info &RI) {
+ switch (RI.r_type) {
+ case MachO::ARM64_RELOC_UNSIGNED:
+ if (!RI.r_pcrel) {
+ if (RI.r_length == 3)
+ return RI.r_extern ? Pointer64 : Pointer64Anon;
+ else if (RI.r_length == 2)
+ return Pointer32;
+ }
+ break;
+ case MachO::ARM64_RELOC_SUBTRACTOR:
+ // SUBTRACTOR must be non-pc-rel, extern, with length 2 or 3.
+ // Initially represent SUBTRACTOR relocations with 'Delta<W>'.
+ // They may be turned into NegDelta<W> by parsePairRelocation.
+ if (!RI.r_pcrel && RI.r_extern) {
+ if (RI.r_length == 2)
+ return Delta32;
+ else if (RI.r_length == 3)
+ return Delta64;
+ }
+ break;
+ case MachO::ARM64_RELOC_BRANCH26:
+ if (RI.r_pcrel && RI.r_extern && RI.r_length == 2)
+ return Branch26;
+ break;
+ case MachO::ARM64_RELOC_PAGE21:
+ if (RI.r_pcrel && RI.r_extern && RI.r_length == 2)
+ return Page21;
+ break;
+ case MachO::ARM64_RELOC_PAGEOFF12:
+ if (!RI.r_pcrel && RI.r_extern && RI.r_length == 2)
+ return PageOffset12;
+ break;
+ case MachO::ARM64_RELOC_GOT_LOAD_PAGE21:
+ if (RI.r_pcrel && RI.r_extern && RI.r_length == 2)
+ return GOTPage21;
+ break;
+ case MachO::ARM64_RELOC_GOT_LOAD_PAGEOFF12:
+ if (!RI.r_pcrel && RI.r_extern && RI.r_length == 2)
+ return GOTPageOffset12;
+ break;
+ case MachO::ARM64_RELOC_POINTER_TO_GOT:
+ if (RI.r_pcrel && RI.r_extern && RI.r_length == 2)
+ return PointerToGOT;
+ break;
+ case MachO::ARM64_RELOC_ADDEND:
+ if (!RI.r_pcrel && !RI.r_extern && RI.r_length == 2)
+ return PairedAddend;
+ break;
+ }
+
+ return make_error<JITLinkError>(
+ "Unsupported arm64 relocation: address=" +
+ formatv("{0:x8}", RI.r_address) +
+ ", symbolnum=" + formatv("{0:x6}", RI.r_symbolnum) +
+ ", kind=" + formatv("{0:x1}", RI.r_type) +
+ ", pc_rel=" + (RI.r_pcrel ? "true" : "false") +
+ ", extern=" + (RI.r_extern ? "true" : "false") +
+ ", length=" + formatv("{0:d}", RI.r_length));
+ }
+
+ MachO::relocation_info
+ getRelocationInfo(const object::relocation_iterator RelItr) {
+ MachO::any_relocation_info ARI =
+ getObject().getRelocation(RelItr->getRawDataRefImpl());
+ MachO::relocation_info RI;
+ memcpy(&RI, &ARI, sizeof(MachO::relocation_info));
+ return RI;
+ }
+
+ using PairRelocInfo =
+ std::tuple<MachOARM64RelocationKind, Symbol *, uint64_t>;
+
+ // Parses paired SUBTRACTOR/UNSIGNED relocations and, on success,
+ // returns the edge kind and addend to be used.
+ Expected<PairRelocInfo>
+ parsePairRelocation(Block &BlockToFix, Edge::Kind SubtractorKind,
+ const MachO::relocation_info &SubRI,
+ JITTargetAddress FixupAddress, const char *FixupContent,
+ object::relocation_iterator &UnsignedRelItr,
+ object::relocation_iterator &RelEnd) {
+ using namespace support;
+
+ assert(((SubtractorKind == Delta32 && SubRI.r_length == 2) ||
+ (SubtractorKind == Delta64 && SubRI.r_length == 3)) &&
+ "Subtractor kind should match length");
+ assert(SubRI.r_extern && "SUBTRACTOR reloc symbol should be extern");
+ assert(!SubRI.r_pcrel && "SUBTRACTOR reloc should not be PCRel");
+
+ if (UnsignedRelItr == RelEnd)
+ return make_error<JITLinkError>("arm64 SUBTRACTOR without paired "
+ "UNSIGNED relocation");
+
+ auto UnsignedRI = getRelocationInfo(UnsignedRelItr);
+
+ if (SubRI.r_address != UnsignedRI.r_address)
+ return make_error<JITLinkError>("arm64 SUBTRACTOR and paired UNSIGNED "
+ "point to different addresses");
+
+ if (SubRI.r_length != UnsignedRI.r_length)
+ return make_error<JITLinkError>("length of arm64 SUBTRACTOR and paired "
+ "UNSIGNED reloc must match");
+
+ Symbol *FromSymbol;
+ if (auto FromSymbolOrErr = findSymbolByIndex(SubRI.r_symbolnum))
+ FromSymbol = FromSymbolOrErr->GraphSymbol;
+ else
+ return FromSymbolOrErr.takeError();
+
+ // Read the current fixup value.
+ uint64_t FixupValue = 0;
+ if (SubRI.r_length == 3)
+ FixupValue = *(const little64_t *)FixupContent;
+ else
+ FixupValue = *(const little32_t *)FixupContent;
+
+ // Find 'ToSymbol' using symbol number or address, depending on whether the
+ // paired UNSIGNED relocation is extern.
+ Symbol *ToSymbol = nullptr;
+ if (UnsignedRI.r_extern) {
+ // Find target symbol by symbol index.
+ if (auto ToSymbolOrErr = findSymbolByIndex(UnsignedRI.r_symbolnum))
+ ToSymbol = ToSymbolOrErr->GraphSymbol;
+ else
+ return ToSymbolOrErr.takeError();
+ } else {
+ if (auto ToSymbolOrErr = findSymbolByAddress(FixupValue))
+ ToSymbol = &*ToSymbolOrErr;
+ else
+ return ToSymbolOrErr.takeError();
+ FixupValue -= ToSymbol->getAddress();
+ }
+
+ MachOARM64RelocationKind DeltaKind;
+ Symbol *TargetSymbol;
+ uint64_t Addend;
+ if (&BlockToFix == &FromSymbol->getAddressable()) {
+ TargetSymbol = ToSymbol;
+ DeltaKind = (SubRI.r_length == 3) ? Delta64 : Delta32;
+ Addend = FixupValue + (FixupAddress - FromSymbol->getAddress());
+ // FIXME: handle extern 'from'.
+ } else if (&BlockToFix == &ToSymbol->getAddressable()) {
+ TargetSymbol = &*FromSymbol;
+ DeltaKind = (SubRI.r_length == 3) ? NegDelta64 : NegDelta32;
+ Addend = FixupValue - (FixupAddress - ToSymbol->getAddress());
+ } else {
+ // BlockToFix was neither FromSymbol nor ToSymbol.
+ return make_error<JITLinkError>("SUBTRACTOR relocation must fix up "
+ "either 'A' or 'B' (or a symbol in one "
+ "of their alt-entry groups)");
+ }
+
+ return PairRelocInfo(DeltaKind, TargetSymbol, Addend);
+ }
+
+ Error addRelocations() override {
+ using namespace support;
+ auto &Obj = getObject();
+
+ for (auto &S : Obj.sections()) {
+
+ JITTargetAddress SectionAddress = S.getAddress();
+
+ for (auto RelItr = S.relocation_begin(), RelEnd = S.relocation_end();
+ RelItr != RelEnd; ++RelItr) {
+
+ MachO::relocation_info RI = getRelocationInfo(RelItr);
+
+ // Sanity check the relocation kind.
+ auto Kind = getRelocationKind(RI);
+ if (!Kind)
+ return Kind.takeError();
+
+ // Find the address of the value to fix up.
+ JITTargetAddress FixupAddress = SectionAddress + (uint32_t)RI.r_address;
+
+ LLVM_DEBUG({
+ dbgs() << "Processing " << getMachOARM64RelocationKindName(*Kind)
+ << " relocation at " << format("0x%016" PRIx64, FixupAddress)
+ << "\n";
+ });
+
+ // Find the block that the fixup points to.
+ Block *BlockToFix = nullptr;
+ {
+ auto SymbolToFixOrErr = findSymbolByAddress(FixupAddress);
+ if (!SymbolToFixOrErr)
+ return SymbolToFixOrErr.takeError();
+ BlockToFix = &SymbolToFixOrErr->getBlock();
+ }
+
+ if (FixupAddress + static_cast<JITTargetAddress>(1ULL << RI.r_length) >
+ BlockToFix->getAddress() + BlockToFix->getContent().size())
+ return make_error<JITLinkError>(
+ "Relocation content extends past end of fixup block");
+
+ // Get a pointer to the fixup content.
+ const char *FixupContent = BlockToFix->getContent().data() +
+ (FixupAddress - BlockToFix->getAddress());
+
+ // The target symbol and addend will be populated by the switch below.
+ Symbol *TargetSymbol = nullptr;
+ uint64_t Addend = 0;
+
+ if (*Kind == PairedAddend) {
+ // If this is an Addend relocation then process it and move to the
+ // paired reloc.
+
+ Addend = RI.r_symbolnum;
+
+ if (RelItr == RelEnd)
+ return make_error<JITLinkError>("Unpaired Addend reloc at " +
+ formatv("{0:x16}", FixupAddress));
+ ++RelItr;
+ RI = getRelocationInfo(RelItr);
+
+ Kind = getRelocationKind(RI);
+ if (!Kind)
+ return Kind.takeError();
+
+ if (*Kind != Branch26 && *Kind != Page21 && *Kind != PageOffset12)
+ return make_error<JITLinkError>(
+ "Invalid relocation pair: Addend + " +
+ getMachOARM64RelocationKindName(*Kind));
+ else
+ LLVM_DEBUG({
+ dbgs() << " pair is " << getMachOARM64RelocationKindName(*Kind)
+ << "`\n";
+ });
+
+ // Find the address of the value to fix up.
+ JITTargetAddress PairedFixupAddress =
+ SectionAddress + (uint32_t)RI.r_address;
+ if (PairedFixupAddress != FixupAddress)
+ return make_error<JITLinkError>("Paired relocation points at "
+ "different target");
+ }
+
+ switch (*Kind) {
+ case Branch26: {
+ if (auto TargetSymbolOrErr = findSymbolByIndex(RI.r_symbolnum))
+ TargetSymbol = TargetSymbolOrErr->GraphSymbol;
+ else
+ return TargetSymbolOrErr.takeError();
+ uint32_t Instr = *(const ulittle32_t *)FixupContent;
+ if ((Instr & 0x7fffffff) != 0x14000000)
+ return make_error<JITLinkError>("BRANCH26 target is not a B or BL "
+ "instruction with a zero addend");
+ break;
+ }
+ case Pointer32:
+ if (auto TargetSymbolOrErr = findSymbolByIndex(RI.r_symbolnum))
+ TargetSymbol = TargetSymbolOrErr->GraphSymbol;
+ else
+ return TargetSymbolOrErr.takeError();
+ Addend = *(const ulittle32_t *)FixupContent;
+ break;
+ case Pointer64:
+ if (auto TargetSymbolOrErr = findSymbolByIndex(RI.r_symbolnum))
+ TargetSymbol = TargetSymbolOrErr->GraphSymbol;
+ else
+ return TargetSymbolOrErr.takeError();
+ Addend = *(const ulittle64_t *)FixupContent;
+ break;
+ case Pointer64Anon: {
+ JITTargetAddress TargetAddress = *(const ulittle64_t *)FixupContent;
+ if (auto TargetSymbolOrErr = findSymbolByAddress(TargetAddress))
+ TargetSymbol = &*TargetSymbolOrErr;
+ else
+ return TargetSymbolOrErr.takeError();
+ Addend = TargetAddress - TargetSymbol->getAddress();
+ break;
+ }
+ case Page21:
+ case GOTPage21: {
+ if (auto TargetSymbolOrErr = findSymbolByIndex(RI.r_symbolnum))
+ TargetSymbol = TargetSymbolOrErr->GraphSymbol;
+ else
+ return TargetSymbolOrErr.takeError();
+ uint32_t Instr = *(const ulittle32_t *)FixupContent;
+ if ((Instr & 0xffffffe0) != 0x90000000)
+ return make_error<JITLinkError>("PAGE21/GOTPAGE21 target is not an "
+ "ADRP instruction with a zero "
+ "addend");
+ break;
+ }
+ case PageOffset12: {
+ if (auto TargetSymbolOrErr = findSymbolByIndex(RI.r_symbolnum))
+ TargetSymbol = TargetSymbolOrErr->GraphSymbol;
+ else
+ return TargetSymbolOrErr.takeError();
+ break;
+ }
+ case GOTPageOffset12: {
+ if (auto TargetSymbolOrErr = findSymbolByIndex(RI.r_symbolnum))
+ TargetSymbol = TargetSymbolOrErr->GraphSymbol;
+ else
+ return TargetSymbolOrErr.takeError();
+ uint32_t Instr = *(const ulittle32_t *)FixupContent;
+ if ((Instr & 0xfffffc00) != 0xf9400000)
+ return make_error<JITLinkError>("GOTPAGEOFF12 target is not an LDR "
+ "immediate instruction with a zero "
+ "addend");
+ break;
+ }
+ case PointerToGOT:
+ if (auto TargetSymbolOrErr = findSymbolByIndex(RI.r_symbolnum))
+ TargetSymbol = TargetSymbolOrErr->GraphSymbol;
+ else
+ return TargetSymbolOrErr.takeError();
+ break;
+ case Delta32:
+ case Delta64: {
+ // We use Delta32/Delta64 to represent SUBTRACTOR relocations.
+ // parsePairRelocation handles the paired reloc, and returns the
+ // edge kind to be used (either Delta32/Delta64, or
+ // NegDelta32/NegDelta64, depending on the direction of the
+ // subtraction) along with the addend.
+ auto PairInfo =
+ parsePairRelocation(*BlockToFix, *Kind, RI, FixupAddress,
+ FixupContent, ++RelItr, RelEnd);
+ if (!PairInfo)
+ return PairInfo.takeError();
+ std::tie(*Kind, TargetSymbol, Addend) = *PairInfo;
+ assert(TargetSymbol && "No target symbol from parsePairRelocation?");
+ break;
+ }
+ default:
+ llvm_unreachable("Special relocation kind should not appear in "
+ "mach-o file");
+ }
+
+ LLVM_DEBUG({
+ Edge GE(*Kind, FixupAddress - BlockToFix->getAddress(), *TargetSymbol,
+ Addend);
+ printEdge(dbgs(), *BlockToFix, GE,
+ getMachOARM64RelocationKindName(*Kind));
+ dbgs() << "\n";
+ });
+ BlockToFix->addEdge(*Kind, FixupAddress - BlockToFix->getAddress(),
+ *TargetSymbol, Addend);
+ }
+ }
+ return Error::success();
+ }
+
+ unsigned NumSymbols = 0;
+};
+
+class MachO_arm64_GOTAndStubsBuilder
+ : public BasicGOTAndStubsBuilder<MachO_arm64_GOTAndStubsBuilder> {
+public:
+ MachO_arm64_GOTAndStubsBuilder(LinkGraph &G)
+ : BasicGOTAndStubsBuilder<MachO_arm64_GOTAndStubsBuilder>(G) {}
+
+ bool isGOTEdge(Edge &E) const {
+ return E.getKind() == GOTPage21 || E.getKind() == GOTPageOffset12 ||
+ E.getKind() == PointerToGOT;
+ }
+
+ Symbol &createGOTEntry(Symbol &Target) {
+ auto &GOTEntryBlock = G.createContentBlock(
+ getGOTSection(), getGOTEntryBlockContent(), 0, 8, 0);
+ GOTEntryBlock.addEdge(Pointer64, 0, Target, 0);
+ return G.addAnonymousSymbol(GOTEntryBlock, 0, 8, false, false);
+ }
+
+ void fixGOTEdge(Edge &E, Symbol &GOTEntry) {
+ if (E.getKind() == GOTPage21 || E.getKind() == GOTPageOffset12) {
+ // Update the target, but leave the edge addend as-is.
+ E.setTarget(GOTEntry);
+ } else if (E.getKind() == PointerToGOT) {
+ E.setTarget(GOTEntry);
+ E.setKind(Delta32);
+ } else
+ llvm_unreachable("Not a GOT edge?");
+ }
+
+ bool isExternalBranchEdge(Edge &E) {
+ return E.getKind() == Branch26 && !E.getTarget().isDefined();
+ }
+
+ Symbol &createStub(Symbol &Target) {
+ auto &StubContentBlock =
+ G.createContentBlock(getStubsSection(), getStubBlockContent(), 0, 1, 0);
+ // Re-use GOT entries for stub targets.
+ auto &GOTEntrySymbol = getGOTEntrySymbol(Target);
+ StubContentBlock.addEdge(LDRLiteral19, 0, GOTEntrySymbol, 0);
+ return G.addAnonymousSymbol(StubContentBlock, 0, 8, true, false);
+ }
+
+ void fixExternalBranchEdge(Edge &E, Symbol &Stub) {
+ assert(E.getKind() == Branch26 && "Not a Branch32 edge?");
+ assert(E.getAddend() == 0 && "Branch32 edge has non-zero addend?");
+ E.setTarget(Stub);
+ }
+
+private:
+ Section &getGOTSection() {
+ if (!GOTSection)
+ GOTSection = &G.createSection("$__GOT", sys::Memory::MF_READ);
+ return *GOTSection;
+ }
+
+ Section &getStubsSection() {
+ if (!StubsSection) {
+ auto StubsProt = static_cast<sys::Memory::ProtectionFlags>(
+ sys::Memory::MF_READ | sys::Memory::MF_EXEC);
+ StubsSection = &G.createSection("$__STUBS", StubsProt);
+ }
+ return *StubsSection;
+ }
+
+ StringRef getGOTEntryBlockContent() {
+ return StringRef(reinterpret_cast<const char *>(NullGOTEntryContent),
+ sizeof(NullGOTEntryContent));
+ }
+
+ StringRef getStubBlockContent() {
+ return StringRef(reinterpret_cast<const char *>(StubContent),
+ sizeof(StubContent));
+ }
+
+ static const uint8_t NullGOTEntryContent[8];
+ static const uint8_t StubContent[8];
+ Section *GOTSection = nullptr;
+ Section *StubsSection = nullptr;
+};
+
+const uint8_t MachO_arm64_GOTAndStubsBuilder::NullGOTEntryContent[8] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
+const uint8_t MachO_arm64_GOTAndStubsBuilder::StubContent[8] = {
+ 0x10, 0x00, 0x00, 0x58, // LDR x16, <literal>
+ 0x00, 0x02, 0x1f, 0xd6 // BR x16
+};
+
+} // namespace
+
+namespace llvm {
+namespace jitlink {
+
+class MachOJITLinker_arm64 : public JITLinker<MachOJITLinker_arm64> {
+ friend class JITLinker<MachOJITLinker_arm64>;
+
+public:
+ MachOJITLinker_arm64(std::unique_ptr<JITLinkContext> Ctx,
+ PassConfiguration PassConfig)
+ : JITLinker(std::move(Ctx), std::move(PassConfig)) {}
+
+private:
+ StringRef getEdgeKindName(Edge::Kind R) const override {
+ return getMachOARM64RelocationKindName(R);
+ }
+
+ Expected<std::unique_ptr<LinkGraph>>
+ buildGraph(MemoryBufferRef ObjBuffer) override {
+ auto MachOObj = object::ObjectFile::createMachOObjectFile(ObjBuffer);
+ if (!MachOObj)
+ return MachOObj.takeError();
+ return MachOLinkGraphBuilder_arm64(**MachOObj).buildGraph();
+ }
+
+ static Error targetOutOfRangeError(const Block &B, const Edge &E) {
+ std::string ErrMsg;
+ {
+ raw_string_ostream ErrStream(ErrMsg);
+ ErrStream << "Relocation target out of range: ";
+ printEdge(ErrStream, B, E, getMachOARM64RelocationKindName(E.getKind()));
+ ErrStream << "\n";
+ }
+ return make_error<JITLinkError>(std::move(ErrMsg));
+ }
+
+ static unsigned getPageOffset12Shift(uint32_t Instr) {
+ constexpr uint32_t LDRLiteralMask = 0x3ffffc00;
+
+ // Check for a GPR LDR immediate with a zero embedded literal.
+ // If found, the top two bits contain the shift.
+ if ((Instr & LDRLiteralMask) == 0x39400000)
+ return Instr >> 30;
+
+ // Check for a Neon LDR immediate of size 64-bit or less with a zero
+ // embedded literal. If found, the top two bits contain the shift.
+ if ((Instr & LDRLiteralMask) == 0x3d400000)
+ return Instr >> 30;
+
+ // Check for a Neon LDR immediate of size 128-bit with a zero embedded
+ // literal.
+ constexpr uint32_t SizeBitsMask = 0xc0000000;
+ if ((Instr & (LDRLiteralMask | SizeBitsMask)) == 0x3dc00000)
+ return 4;
+
+ return 0;
+ }
+
+ Error applyFixup(Block &B, const Edge &E, char *BlockWorkingMem) const {
+ using namespace support;
+
+ char *FixupPtr = BlockWorkingMem + E.getOffset();
+ JITTargetAddress FixupAddress = B.getAddress() + E.getOffset();
+
+ switch (E.getKind()) {
+ case Branch26: {
+ assert((FixupAddress & 0x3) == 0 && "Branch-inst is not 32-bit aligned");
+
+ int64_t Value = E.getTarget().getAddress() - FixupAddress + E.getAddend();
+
+ if (static_cast<uint64_t>(Value) & 0x3)
+ return make_error<JITLinkError>("Branch26 target is not 32-bit "
+ "aligned");
+
+ if (Value < -(1 << 27) || Value > ((1 << 27) - 1))
+ return targetOutOfRangeError(B, E);
+
+ uint32_t RawInstr = *(little32_t *)FixupPtr;
+ assert((RawInstr & 0x7fffffff) == 0x14000000 &&
+ "RawInstr isn't a B or BR immediate instruction");
+ uint32_t Imm = (static_cast<uint32_t>(Value) & ((1 << 28) - 1)) >> 2;
+ uint32_t FixedInstr = RawInstr | Imm;
+ *(little32_t *)FixupPtr = FixedInstr;
+ break;
+ }
+ case Pointer32: {
+ uint64_t Value = E.getTarget().getAddress() + E.getAddend();
+ if (Value > std::numeric_limits<uint32_t>::max())
+ return targetOutOfRangeError(B, E);
+ *(ulittle32_t *)FixupPtr = Value;
+ break;
+ }
+ case Pointer64: {
+ uint64_t Value = E.getTarget().getAddress() + E.getAddend();
+ *(ulittle64_t *)FixupPtr = Value;
+ break;
+ }
+ case Page21:
+ case GOTPage21: {
+ assert(E.getAddend() == 0 && "PAGE21/GOTPAGE21 with non-zero addend");
+ uint64_t TargetPage =
+ E.getTarget().getAddress() & ~static_cast<uint64_t>(4096 - 1);
+ uint64_t PCPage = B.getAddress() & ~static_cast<uint64_t>(4096 - 1);
+
+ int64_t PageDelta = TargetPage - PCPage;
+ if (PageDelta < -(1 << 30) || PageDelta > ((1 << 30) - 1))
+ return targetOutOfRangeError(B, E);
+
+ uint32_t RawInstr = *(ulittle32_t *)FixupPtr;
+ assert((RawInstr & 0xffffffe0) == 0x90000000 &&
+ "RawInstr isn't an ADRP instruction");
+ uint32_t ImmLo = (static_cast<uint64_t>(PageDelta) >> 12) & 0x3;
+ uint32_t ImmHi = (static_cast<uint64_t>(PageDelta) >> 14) & 0x7ffff;
+ uint32_t FixedInstr = RawInstr | (ImmLo << 29) | (ImmHi << 5);
+ *(ulittle32_t *)FixupPtr = FixedInstr;
+ break;
+ }
+ case PageOffset12: {
+ assert(E.getAddend() == 0 && "PAGEOFF12 with non-zero addend");
+ uint64_t TargetOffset = E.getTarget().getAddress() & 0xfff;
+
+ uint32_t RawInstr = *(ulittle32_t *)FixupPtr;
+ unsigned ImmShift = getPageOffset12Shift(RawInstr);
+
+ if (TargetOffset & ((1 << ImmShift) - 1))
+ return make_error<JITLinkError>("PAGEOFF12 target is not aligned");
+
+ uint32_t EncodedImm = (TargetOffset >> ImmShift) << 10;
+ uint32_t FixedInstr = RawInstr | EncodedImm;
+ *(ulittle32_t *)FixupPtr = FixedInstr;
+ break;
+ }
+ case GOTPageOffset12: {
+ assert(E.getAddend() == 0 && "GOTPAGEOF12 with non-zero addend");
+
+ uint32_t RawInstr = *(ulittle32_t *)FixupPtr;
+ assert((RawInstr & 0xfffffc00) == 0xf9400000 &&
+ "RawInstr isn't a 64-bit LDR immediate");
+
+ uint32_t TargetOffset = E.getTarget().getAddress() & 0xfff;
+ assert((TargetOffset & 0x7) == 0 && "GOT entry is not 8-byte aligned");
+ uint32_t EncodedImm = (TargetOffset >> 3) << 10;
+ uint32_t FixedInstr = RawInstr | EncodedImm;
+ *(ulittle32_t *)FixupPtr = FixedInstr;
+ break;
+ }
+ case LDRLiteral19: {
+ assert((FixupAddress & 0x3) == 0 && "LDR is not 32-bit aligned");
+ assert(E.getAddend() == 0 && "LDRLiteral19 with non-zero addend");
+ uint32_t RawInstr = *(ulittle32_t *)FixupPtr;
+ assert(RawInstr == 0x58000010 && "RawInstr isn't a 64-bit LDR literal");
+ int64_t Delta = E.getTarget().getAddress() - FixupAddress;
+ if (Delta & 0x3)
+ return make_error<JITLinkError>("LDR literal target is not 32-bit "
+ "aligned");
+ if (Delta < -(1 << 20) || Delta > ((1 << 20) - 1))
+ return targetOutOfRangeError(B, E);
+
+ uint32_t EncodedImm = (static_cast<uint32_t>(Delta) >> 2) << 5;
+ uint32_t FixedInstr = RawInstr | EncodedImm;
+ *(ulittle32_t *)FixupPtr = FixedInstr;
+ break;
+ }
+ case Delta32:
+ case Delta64:
+ case NegDelta32:
+ case NegDelta64: {
+ int64_t Value;
+ if (E.getKind() == Delta32 || E.getKind() == Delta64)
+ Value = E.getTarget().getAddress() - FixupAddress + E.getAddend();
+ else
+ Value = FixupAddress - E.getTarget().getAddress() + E.getAddend();
+
+ if (E.getKind() == Delta32 || E.getKind() == NegDelta32) {
+ if (Value < std::numeric_limits<int32_t>::min() ||
+ Value > std::numeric_limits<int32_t>::max())
+ return targetOutOfRangeError(B, E);
+ *(little32_t *)FixupPtr = Value;
+ } else
+ *(little64_t *)FixupPtr = Value;
+ break;
+ }
+ default:
+ llvm_unreachable("Unrecognized edge kind");
+ }
+
+ return Error::success();
+ }
+
+ uint64_t NullValue = 0;
+};
+
+void jitLink_MachO_arm64(std::unique_ptr<JITLinkContext> Ctx) {
+ PassConfiguration Config;
+ Triple TT("arm64-apple-ios");
+
+ if (Ctx->shouldAddDefaultTargetPasses(TT)) {
+ // Add a mark-live pass.
+ if (auto MarkLive = Ctx->getMarkLivePass(TT))
+ Config.PrePrunePasses.push_back(std::move(MarkLive));
+ else
+ Config.PrePrunePasses.push_back(markAllSymbolsLive);
+
+ // Add an in-place GOT/Stubs pass.
+ Config.PostPrunePasses.push_back([](LinkGraph &G) -> Error {
+ MachO_arm64_GOTAndStubsBuilder(G).run();
+ return Error::success();
+ });
+ }
+
+ if (auto Err = Ctx->modifyPassConfig(TT, Config))
+ return Ctx->notifyFailed(std::move(Err));
+
+ // Construct a JITLinker and run the link function.
+ MachOJITLinker_arm64::link(std::move(Ctx), std::move(Config));
+}
+
+StringRef getMachOARM64RelocationKindName(Edge::Kind R) {
+ switch (R) {
+ case Branch26:
+ return "Branch26";
+ case Pointer64:
+ return "Pointer64";
+ case Pointer64Anon:
+ return "Pointer64Anon";
+ case Page21:
+ return "Page21";
+ case PageOffset12:
+ return "PageOffset12";
+ case GOTPage21:
+ return "GOTPage21";
+ case GOTPageOffset12:
+ return "GOTPageOffset12";
+ case PointerToGOT:
+ return "PointerToGOT";
+ case PairedAddend:
+ return "PairedAddend";
+ case LDRLiteral19:
+ return "LDRLiteral19";
+ case Delta32:
+ return "Delta32";
+ case Delta64:
+ return "Delta64";
+ case NegDelta32:
+ return "NegDelta32";
+ case NegDelta64:
+ return "NegDelta64";
+ default:
+ return getGenericEdgeKindName(static_cast<Edge::Kind>(R));
+ }
+}
+
+} // end namespace jitlink
+} // end namespace llvm
diff --git a/llvm/lib/ExecutionEngine/JITLink/MachO_x86_64.cpp b/llvm/lib/ExecutionEngine/JITLink/MachO_x86_64.cpp
new file mode 100644
index 0000000000000..d83787ffd5986
--- /dev/null
+++ b/llvm/lib/ExecutionEngine/JITLink/MachO_x86_64.cpp
@@ -0,0 +1,635 @@
+//===---- MachO_x86_64.cpp -JIT linker implementation for MachO/x86-64 ----===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// MachO/x86-64 jit-link implementation.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/JITLink/MachO_x86_64.h"
+
+#include "BasicGOTAndStubsBuilder.h"
+#include "MachOLinkGraphBuilder.h"
+
+#define DEBUG_TYPE "jitlink"
+
+using namespace llvm;
+using namespace llvm::jitlink;
+using namespace llvm::jitlink::MachO_x86_64_Edges;
+
+namespace {
+
+class MachOLinkGraphBuilder_x86_64 : public MachOLinkGraphBuilder {
+public:
+ MachOLinkGraphBuilder_x86_64(const object::MachOObjectFile &Obj)
+ : MachOLinkGraphBuilder(Obj) {
+ addCustomSectionParser(
+ "__eh_frame", [this](NormalizedSection &EHFrameSection) {
+ if (!EHFrameSection.Data)
+ return make_error<JITLinkError>(
+ "__eh_frame section is marked zero-fill");
+ return MachOEHFrameBinaryParser(
+ *this, EHFrameSection.Address,
+ StringRef(EHFrameSection.Data, EHFrameSection.Size),
+ *EHFrameSection.GraphSection, 8, 4, NegDelta32, Delta64)
+ .addToGraph();
+ });
+ }
+
+private:
+ static Expected<MachOX86RelocationKind>
+ getRelocationKind(const MachO::relocation_info &RI) {
+ switch (RI.r_type) {
+ case MachO::X86_64_RELOC_UNSIGNED:
+ if (!RI.r_pcrel) {
+ if (RI.r_length == 3)
+ return RI.r_extern ? Pointer64 : Pointer64Anon;
+ else if (RI.r_extern && RI.r_length == 2)
+ return Pointer32;
+ }
+ break;
+ case MachO::X86_64_RELOC_SIGNED:
+ if (RI.r_pcrel && RI.r_length == 2)
+ return RI.r_extern ? PCRel32 : PCRel32Anon;
+ break;
+ case MachO::X86_64_RELOC_BRANCH:
+ if (RI.r_pcrel && RI.r_extern && RI.r_length == 2)
+ return Branch32;
+ break;
+ case MachO::X86_64_RELOC_GOT_LOAD:
+ if (RI.r_pcrel && RI.r_extern && RI.r_length == 2)
+ return PCRel32GOTLoad;
+ break;
+ case MachO::X86_64_RELOC_GOT:
+ if (RI.r_pcrel && RI.r_extern && RI.r_length == 2)
+ return PCRel32GOT;
+ break;
+ case MachO::X86_64_RELOC_SUBTRACTOR:
+ // SUBTRACTOR must be non-pc-rel, extern, with length 2 or 3.
+ // Initially represent SUBTRACTOR relocations with 'Delta<W>'. They may
+ // be turned into NegDelta<W> by parsePairRelocation.
+ if (!RI.r_pcrel && RI.r_extern) {
+ if (RI.r_length == 2)
+ return Delta32;
+ else if (RI.r_length == 3)
+ return Delta64;
+ }
+ break;
+ case MachO::X86_64_RELOC_SIGNED_1:
+ if (RI.r_pcrel && RI.r_length == 2)
+ return RI.r_extern ? PCRel32Minus1 : PCRel32Minus1Anon;
+ break;
+ case MachO::X86_64_RELOC_SIGNED_2:
+ if (RI.r_pcrel && RI.r_length == 2)
+ return RI.r_extern ? PCRel32Minus2 : PCRel32Minus2Anon;
+ break;
+ case MachO::X86_64_RELOC_SIGNED_4:
+ if (RI.r_pcrel && RI.r_length == 2)
+ return RI.r_extern ? PCRel32Minus4 : PCRel32Minus4Anon;
+ break;
+ case MachO::X86_64_RELOC_TLV:
+ if (RI.r_pcrel && RI.r_extern && RI.r_length == 2)
+ return PCRel32TLV;
+ break;
+ }
+
+ return make_error<JITLinkError>(
+ "Unsupported x86-64 relocation: address=" +
+ formatv("{0:x8}", RI.r_address) +
+ ", symbolnum=" + formatv("{0:x6}", RI.r_symbolnum) +
+ ", kind=" + formatv("{0:x1}", RI.r_type) +
+ ", pc_rel=" + (RI.r_pcrel ? "true" : "false") +
+ ", extern=" + (RI.r_extern ? "true" : "false") +
+ ", length=" + formatv("{0:d}", RI.r_length));
+ }
+
+ MachO::relocation_info
+ getRelocationInfo(const object::relocation_iterator RelItr) {
+ MachO::any_relocation_info ARI =
+ getObject().getRelocation(RelItr->getRawDataRefImpl());
+ MachO::relocation_info RI;
+ memcpy(&RI, &ARI, sizeof(MachO::relocation_info));
+ return RI;
+ }
+
+ using PairRelocInfo = std::tuple<MachOX86RelocationKind, Symbol *, uint64_t>;
+
+ // Parses paired SUBTRACTOR/UNSIGNED relocations and, on success,
+ // returns the edge kind and addend to be used.
+ Expected<PairRelocInfo>
+ parsePairRelocation(Block &BlockToFix, Edge::Kind SubtractorKind,
+ const MachO::relocation_info &SubRI,
+ JITTargetAddress FixupAddress, const char *FixupContent,
+ object::relocation_iterator &UnsignedRelItr,
+ object::relocation_iterator &RelEnd) {
+ using namespace support;
+
+ assert(((SubtractorKind == Delta32 && SubRI.r_length == 2) ||
+ (SubtractorKind == Delta64 && SubRI.r_length == 3)) &&
+ "Subtractor kind should match length");
+ assert(SubRI.r_extern && "SUBTRACTOR reloc symbol should be extern");
+ assert(!SubRI.r_pcrel && "SUBTRACTOR reloc should not be PCRel");
+
+ if (UnsignedRelItr == RelEnd)
+ return make_error<JITLinkError>("x86_64 SUBTRACTOR without paired "
+ "UNSIGNED relocation");
+
+ auto UnsignedRI = getRelocationInfo(UnsignedRelItr);
+
+ if (SubRI.r_address != UnsignedRI.r_address)
+ return make_error<JITLinkError>("x86_64 SUBTRACTOR and paired UNSIGNED "
+ "point to different addresses");
+
+ if (SubRI.r_length != UnsignedRI.r_length)
+ return make_error<JITLinkError>("length of x86_64 SUBTRACTOR and paired "
+ "UNSIGNED reloc must match");
+
+ Symbol *FromSymbol;
+ if (auto FromSymbolOrErr = findSymbolByIndex(SubRI.r_symbolnum))
+ FromSymbol = FromSymbolOrErr->GraphSymbol;
+ else
+ return FromSymbolOrErr.takeError();
+
+ // Read the current fixup value.
+ uint64_t FixupValue = 0;
+ if (SubRI.r_length == 3)
+ FixupValue = *(const little64_t *)FixupContent;
+ else
+ FixupValue = *(const little32_t *)FixupContent;
+
+ // Find 'ToSymbol' using symbol number or address, depending on whether the
+ // paired UNSIGNED relocation is extern.
+ Symbol *ToSymbol = nullptr;
+ if (UnsignedRI.r_extern) {
+ // Find target symbol by symbol index.
+ if (auto ToSymbolOrErr = findSymbolByIndex(UnsignedRI.r_symbolnum))
+ ToSymbol = ToSymbolOrErr->GraphSymbol;
+ else
+ return ToSymbolOrErr.takeError();
+ } else {
+ if (auto ToSymbolOrErr = findSymbolByAddress(FixupValue))
+ ToSymbol = &*ToSymbolOrErr;
+ else
+ return ToSymbolOrErr.takeError();
+ FixupValue -= ToSymbol->getAddress();
+ }
+
+ MachOX86RelocationKind DeltaKind;
+ Symbol *TargetSymbol;
+ uint64_t Addend;
+ if (&BlockToFix == &FromSymbol->getAddressable()) {
+ TargetSymbol = ToSymbol;
+ DeltaKind = (SubRI.r_length == 3) ? Delta64 : Delta32;
+ Addend = FixupValue + (FixupAddress - FromSymbol->getAddress());
+ // FIXME: handle extern 'from'.
+ } else if (&BlockToFix == &ToSymbol->getAddressable()) {
+ TargetSymbol = FromSymbol;
+ DeltaKind = (SubRI.r_length == 3) ? NegDelta64 : NegDelta32;
+ Addend = FixupValue - (FixupAddress - ToSymbol->getAddress());
+ } else {
+ // BlockToFix was neither FromSymbol nor ToSymbol.
+ return make_error<JITLinkError>("SUBTRACTOR relocation must fix up "
+ "either 'A' or 'B' (or a symbol in one "
+ "of their alt-entry chains)");
+ }
+
+ return PairRelocInfo(DeltaKind, TargetSymbol, Addend);
+ }
+
+ Error addRelocations() override {
+ using namespace support;
+ auto &Obj = getObject();
+
+ for (auto &S : Obj.sections()) {
+
+ JITTargetAddress SectionAddress = S.getAddress();
+
+ if (S.isVirtual()) {
+ if (S.relocation_begin() != S.relocation_end())
+ return make_error<JITLinkError>("Virtual section contains "
+ "relocations");
+ continue;
+ }
+
+ for (auto RelItr = S.relocation_begin(), RelEnd = S.relocation_end();
+ RelItr != RelEnd; ++RelItr) {
+
+ MachO::relocation_info RI = getRelocationInfo(RelItr);
+
+ // Sanity check the relocation kind.
+ auto Kind = getRelocationKind(RI);
+ if (!Kind)
+ return Kind.takeError();
+
+ // Find the address of the value to fix up.
+ JITTargetAddress FixupAddress = SectionAddress + (uint32_t)RI.r_address;
+
+ LLVM_DEBUG({
+ dbgs() << "Processing relocation at "
+ << format("0x%016" PRIx64, FixupAddress) << "\n";
+ });
+
+ // Find the block that the fixup points to.
+ Block *BlockToFix = nullptr;
+ {
+ auto SymbolToFixOrErr = findSymbolByAddress(FixupAddress);
+ if (!SymbolToFixOrErr)
+ return SymbolToFixOrErr.takeError();
+ BlockToFix = &SymbolToFixOrErr->getBlock();
+ }
+
+ if (FixupAddress + static_cast<JITTargetAddress>(1ULL << RI.r_length) >
+ BlockToFix->getAddress() + BlockToFix->getContent().size())
+ return make_error<JITLinkError>(
+ "Relocation extends past end of fixup block");
+
+ // Get a pointer to the fixup content.
+ const char *FixupContent = BlockToFix->getContent().data() +
+ (FixupAddress - BlockToFix->getAddress());
+
+ // The target symbol and addend will be populated by the switch below.
+ Symbol *TargetSymbol = nullptr;
+ uint64_t Addend = 0;
+
+ switch (*Kind) {
+ case Branch32:
+ case PCRel32:
+ case PCRel32GOTLoad:
+ case PCRel32GOT:
+ if (auto TargetSymbolOrErr = findSymbolByIndex(RI.r_symbolnum))
+ TargetSymbol = TargetSymbolOrErr->GraphSymbol;
+ else
+ return TargetSymbolOrErr.takeError();
+ Addend = *(const ulittle32_t *)FixupContent;
+ break;
+ case Pointer32:
+ if (auto TargetSymbolOrErr = findSymbolByIndex(RI.r_symbolnum))
+ TargetSymbol = TargetSymbolOrErr->GraphSymbol;
+ else
+ return TargetSymbolOrErr.takeError();
+ Addend = *(const ulittle32_t *)FixupContent;
+ break;
+ case Pointer64:
+ if (auto TargetSymbolOrErr = findSymbolByIndex(RI.r_symbolnum))
+ TargetSymbol = TargetSymbolOrErr->GraphSymbol;
+ else
+ return TargetSymbolOrErr.takeError();
+ Addend = *(const ulittle64_t *)FixupContent;
+ break;
+ case Pointer64Anon: {
+ JITTargetAddress TargetAddress = *(const ulittle64_t *)FixupContent;
+ if (auto TargetSymbolOrErr = findSymbolByAddress(TargetAddress))
+ TargetSymbol = &*TargetSymbolOrErr;
+ else
+ return TargetSymbolOrErr.takeError();
+ Addend = TargetAddress - TargetSymbol->getAddress();
+ break;
+ }
+ case PCRel32Minus1:
+ case PCRel32Minus2:
+ case PCRel32Minus4:
+ if (auto TargetSymbolOrErr = findSymbolByIndex(RI.r_symbolnum))
+ TargetSymbol = TargetSymbolOrErr->GraphSymbol;
+ else
+ return TargetSymbolOrErr.takeError();
+ Addend = *(const ulittle32_t *)FixupContent +
+ (1 << (*Kind - PCRel32Minus1));
+ break;
+ case PCRel32Anon: {
+ JITTargetAddress TargetAddress =
+ FixupAddress + 4 + *(const ulittle32_t *)FixupContent;
+ if (auto TargetSymbolOrErr = findSymbolByAddress(TargetAddress))
+ TargetSymbol = &*TargetSymbolOrErr;
+ else
+ return TargetSymbolOrErr.takeError();
+ Addend = TargetAddress - TargetSymbol->getAddress();
+ break;
+ }
+ case PCRel32Minus1Anon:
+ case PCRel32Minus2Anon:
+ case PCRel32Minus4Anon: {
+ JITTargetAddress Delta =
+ static_cast<JITTargetAddress>(1ULL << (*Kind - PCRel32Minus1Anon));
+ JITTargetAddress TargetAddress =
+ FixupAddress + 4 + Delta + *(const ulittle32_t *)FixupContent;
+ if (auto TargetSymbolOrErr = findSymbolByAddress(TargetAddress))
+ TargetSymbol = &*TargetSymbolOrErr;
+ else
+ return TargetSymbolOrErr.takeError();
+ Addend = TargetAddress - TargetSymbol->getAddress();
+ break;
+ }
+ case Delta32:
+ case Delta64: {
+ // We use Delta32/Delta64 to represent SUBTRACTOR relocations.
+ // parsePairRelocation handles the paired reloc, and returns the
+ // edge kind to be used (either Delta32/Delta64, or
+ // NegDelta32/NegDelta64, depending on the direction of the
+ // subtraction) along with the addend.
+ auto PairInfo =
+ parsePairRelocation(*BlockToFix, *Kind, RI, FixupAddress,
+ FixupContent, ++RelItr, RelEnd);
+ if (!PairInfo)
+ return PairInfo.takeError();
+ std::tie(*Kind, TargetSymbol, Addend) = *PairInfo;
+ assert(TargetSymbol && "No target symbol from parsePairRelocation?");
+ break;
+ }
+ default:
+ llvm_unreachable("Special relocation kind should not appear in "
+ "mach-o file");
+ }
+
+ LLVM_DEBUG({
+ Edge GE(*Kind, FixupAddress - BlockToFix->getAddress(), *TargetSymbol,
+ Addend);
+ printEdge(dbgs(), *BlockToFix, GE,
+ getMachOX86RelocationKindName(*Kind));
+ dbgs() << "\n";
+ });
+ BlockToFix->addEdge(*Kind, FixupAddress - BlockToFix->getAddress(),
+ *TargetSymbol, Addend);
+ }
+ }
+ return Error::success();
+ }
+};
+
+class MachO_x86_64_GOTAndStubsBuilder
+ : public BasicGOTAndStubsBuilder<MachO_x86_64_GOTAndStubsBuilder> {
+public:
+ MachO_x86_64_GOTAndStubsBuilder(LinkGraph &G)
+ : BasicGOTAndStubsBuilder<MachO_x86_64_GOTAndStubsBuilder>(G) {}
+
+ bool isGOTEdge(Edge &E) const {
+ return E.getKind() == PCRel32GOT || E.getKind() == PCRel32GOTLoad;
+ }
+
+ Symbol &createGOTEntry(Symbol &Target) {
+ auto &GOTEntryBlock = G.createContentBlock(
+ getGOTSection(), getGOTEntryBlockContent(), 0, 8, 0);
+ GOTEntryBlock.addEdge(Pointer64, 0, Target, 0);
+ return G.addAnonymousSymbol(GOTEntryBlock, 0, 8, false, false);
+ }
+
+ void fixGOTEdge(Edge &E, Symbol &GOTEntry) {
+ assert((E.getKind() == PCRel32GOT || E.getKind() == PCRel32GOTLoad) &&
+ "Not a GOT edge?");
+ E.setKind(PCRel32);
+ E.setTarget(GOTEntry);
+ // Leave the edge addend as-is.
+ }
+
+ bool isExternalBranchEdge(Edge &E) {
+ return E.getKind() == Branch32 && !E.getTarget().isDefined();
+ }
+
+ Symbol &createStub(Symbol &Target) {
+ auto &StubContentBlock =
+ G.createContentBlock(getStubsSection(), getStubBlockContent(), 0, 1, 0);
+ // Re-use GOT entries for stub targets.
+ auto &GOTEntrySymbol = getGOTEntrySymbol(Target);
+ StubContentBlock.addEdge(PCRel32, 2, GOTEntrySymbol, 0);
+ return G.addAnonymousSymbol(StubContentBlock, 0, 6, true, false);
+ }
+
+ void fixExternalBranchEdge(Edge &E, Symbol &Stub) {
+ assert(E.getKind() == Branch32 && "Not a Branch32 edge?");
+ assert(E.getAddend() == 0 && "Branch32 edge has non-zero addend?");
+ E.setTarget(Stub);
+ }
+
+private:
+ Section &getGOTSection() {
+ if (!GOTSection)
+ GOTSection = &G.createSection("$__GOT", sys::Memory::MF_READ);
+ return *GOTSection;
+ }
+
+ Section &getStubsSection() {
+ if (!StubsSection) {
+ auto StubsProt = static_cast<sys::Memory::ProtectionFlags>(
+ sys::Memory::MF_READ | sys::Memory::MF_EXEC);
+ StubsSection = &G.createSection("$__STUBS", StubsProt);
+ }
+ return *StubsSection;
+ }
+
+ StringRef getGOTEntryBlockContent() {
+ return StringRef(reinterpret_cast<const char *>(NullGOTEntryContent),
+ sizeof(NullGOTEntryContent));
+ }
+
+ StringRef getStubBlockContent() {
+ return StringRef(reinterpret_cast<const char *>(StubContent),
+ sizeof(StubContent));
+ }
+
+ static const uint8_t NullGOTEntryContent[8];
+ static const uint8_t StubContent[6];
+ Section *GOTSection = nullptr;
+ Section *StubsSection = nullptr;
+};
+
+const uint8_t MachO_x86_64_GOTAndStubsBuilder::NullGOTEntryContent[8] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
+const uint8_t MachO_x86_64_GOTAndStubsBuilder::StubContent[6] = {
+ 0xFF, 0x25, 0x00, 0x00, 0x00, 0x00};
+} // namespace
+
+namespace llvm {
+namespace jitlink {
+
+class MachOJITLinker_x86_64 : public JITLinker<MachOJITLinker_x86_64> {
+ friend class JITLinker<MachOJITLinker_x86_64>;
+
+public:
+ MachOJITLinker_x86_64(std::unique_ptr<JITLinkContext> Ctx,
+ PassConfiguration PassConfig)
+ : JITLinker(std::move(Ctx), std::move(PassConfig)) {}
+
+private:
+ StringRef getEdgeKindName(Edge::Kind R) const override {
+ return getMachOX86RelocationKindName(R);
+ }
+
+ Expected<std::unique_ptr<LinkGraph>>
+ buildGraph(MemoryBufferRef ObjBuffer) override {
+ auto MachOObj = object::ObjectFile::createMachOObjectFile(ObjBuffer);
+ if (!MachOObj)
+ return MachOObj.takeError();
+ return MachOLinkGraphBuilder_x86_64(**MachOObj).buildGraph();
+ }
+
+ static Error targetOutOfRangeError(const Block &B, const Edge &E) {
+ std::string ErrMsg;
+ {
+ raw_string_ostream ErrStream(ErrMsg);
+ ErrStream << "Relocation target out of range: ";
+ printEdge(ErrStream, B, E, getMachOX86RelocationKindName(E.getKind()));
+ ErrStream << "\n";
+ }
+ return make_error<JITLinkError>(std::move(ErrMsg));
+ }
+
+ Error applyFixup(Block &B, const Edge &E, char *BlockWorkingMem) const {
+
+ using namespace support;
+
+ char *FixupPtr = BlockWorkingMem + E.getOffset();
+ JITTargetAddress FixupAddress = B.getAddress() + E.getOffset();
+
+ switch (E.getKind()) {
+ case Branch32:
+ case PCRel32:
+ case PCRel32Anon: {
+ int64_t Value =
+ E.getTarget().getAddress() - (FixupAddress + 4) + E.getAddend();
+ if (Value < std::numeric_limits<int32_t>::min() ||
+ Value > std::numeric_limits<int32_t>::max())
+ return targetOutOfRangeError(B, E);
+ *(little32_t *)FixupPtr = Value;
+ break;
+ }
+ case Pointer64:
+ case Pointer64Anon: {
+ uint64_t Value = E.getTarget().getAddress() + E.getAddend();
+ *(ulittle64_t *)FixupPtr = Value;
+ break;
+ }
+ case PCRel32Minus1:
+ case PCRel32Minus2:
+ case PCRel32Minus4: {
+ int Delta = 4 + (1 << (E.getKind() - PCRel32Minus1));
+ int64_t Value =
+ E.getTarget().getAddress() - (FixupAddress + Delta) + E.getAddend();
+ if (Value < std::numeric_limits<int32_t>::min() ||
+ Value > std::numeric_limits<int32_t>::max())
+ return targetOutOfRangeError(B, E);
+ *(little32_t *)FixupPtr = Value;
+ break;
+ }
+ case PCRel32Minus1Anon:
+ case PCRel32Minus2Anon:
+ case PCRel32Minus4Anon: {
+ int Delta = 4 + (1 << (E.getKind() - PCRel32Minus1Anon));
+ int64_t Value =
+ E.getTarget().getAddress() - (FixupAddress + Delta) + E.getAddend();
+ if (Value < std::numeric_limits<int32_t>::min() ||
+ Value > std::numeric_limits<int32_t>::max())
+ return targetOutOfRangeError(B, E);
+ *(little32_t *)FixupPtr = Value;
+ break;
+ }
+ case Delta32:
+ case Delta64:
+ case NegDelta32:
+ case NegDelta64: {
+ int64_t Value;
+ if (E.getKind() == Delta32 || E.getKind() == Delta64)
+ Value = E.getTarget().getAddress() - FixupAddress + E.getAddend();
+ else
+ Value = FixupAddress - E.getTarget().getAddress() + E.getAddend();
+
+ if (E.getKind() == Delta32 || E.getKind() == NegDelta32) {
+ if (Value < std::numeric_limits<int32_t>::min() ||
+ Value > std::numeric_limits<int32_t>::max())
+ return targetOutOfRangeError(B, E);
+ *(little32_t *)FixupPtr = Value;
+ } else
+ *(little64_t *)FixupPtr = Value;
+ break;
+ }
+ case Pointer32: {
+ uint64_t Value = E.getTarget().getAddress() + E.getAddend();
+ if (Value > std::numeric_limits<uint32_t>::max())
+ return targetOutOfRangeError(B, E);
+ *(ulittle32_t *)FixupPtr = Value;
+ break;
+ }
+ default:
+ llvm_unreachable("Unrecognized edge kind");
+ }
+
+ return Error::success();
+ }
+
+ uint64_t NullValue = 0;
+};
+
+void jitLink_MachO_x86_64(std::unique_ptr<JITLinkContext> Ctx) {
+ PassConfiguration Config;
+ Triple TT("x86_64-apple-macosx");
+
+ if (Ctx->shouldAddDefaultTargetPasses(TT)) {
+ // Add a mark-live pass.
+ if (auto MarkLive = Ctx->getMarkLivePass(TT))
+ Config.PrePrunePasses.push_back(std::move(MarkLive));
+ else
+ Config.PrePrunePasses.push_back(markAllSymbolsLive);
+
+ // Add an in-place GOT/Stubs pass.
+ Config.PostPrunePasses.push_back([](LinkGraph &G) -> Error {
+ MachO_x86_64_GOTAndStubsBuilder(G).run();
+ return Error::success();
+ });
+ }
+
+ if (auto Err = Ctx->modifyPassConfig(TT, Config))
+ return Ctx->notifyFailed(std::move(Err));
+
+ // Construct a JITLinker and run the link function.
+ MachOJITLinker_x86_64::link(std::move(Ctx), std::move(Config));
+}
+
+StringRef getMachOX86RelocationKindName(Edge::Kind R) {
+ switch (R) {
+ case Branch32:
+ return "Branch32";
+ case Pointer32:
+ return "Pointer32";
+ case Pointer64:
+ return "Pointer64";
+ case Pointer64Anon:
+ return "Pointer64Anon";
+ case PCRel32:
+ return "PCRel32";
+ case PCRel32Minus1:
+ return "PCRel32Minus1";
+ case PCRel32Minus2:
+ return "PCRel32Minus2";
+ case PCRel32Minus4:
+ return "PCRel32Minus4";
+ case PCRel32Anon:
+ return "PCRel32Anon";
+ case PCRel32Minus1Anon:
+ return "PCRel32Minus1Anon";
+ case PCRel32Minus2Anon:
+ return "PCRel32Minus2Anon";
+ case PCRel32Minus4Anon:
+ return "PCRel32Minus4Anon";
+ case PCRel32GOTLoad:
+ return "PCRel32GOTLoad";
+ case PCRel32GOT:
+ return "PCRel32GOT";
+ case PCRel32TLV:
+ return "PCRel32TLV";
+ case Delta32:
+ return "Delta32";
+ case Delta64:
+ return "Delta64";
+ case NegDelta32:
+ return "NegDelta32";
+ case NegDelta64:
+ return "NegDelta64";
+ default:
+ return getGenericEdgeKindName(static_cast<Edge::Kind>(R));
+ }
+}
+
+} // end namespace jitlink
+} // end namespace llvm
diff --git a/llvm/lib/ExecutionEngine/MCJIT/MCJIT.cpp b/llvm/lib/ExecutionEngine/MCJIT/MCJIT.cpp
new file mode 100644
index 0000000000000..94741f5f01d5c
--- /dev/null
+++ b/llvm/lib/ExecutionEngine/MCJIT/MCJIT.cpp
@@ -0,0 +1,679 @@
+//===-- MCJIT.cpp - MC-based Just-in-Time Compiler ------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "MCJIT.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ExecutionEngine/GenericValue.h"
+#include "llvm/ExecutionEngine/JITEventListener.h"
+#include "llvm/ExecutionEngine/MCJIT.h"
+#include "llvm/ExecutionEngine/SectionMemoryManager.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/LegacyPassManager.h"
+#include "llvm/IR/Mangler.h"
+#include "llvm/IR/Module.h"
+#include "llvm/Object/Archive.h"
+#include "llvm/Object/ObjectFile.h"
+#include "llvm/Support/DynamicLibrary.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include <mutex>
+
+using namespace llvm;
+
+namespace {
+
+static struct RegisterJIT {
+ RegisterJIT() { MCJIT::Register(); }
+} JITRegistrator;
+
+}
+
+extern "C" void LLVMLinkInMCJIT() {
+}
+
+ExecutionEngine *
+MCJIT::createJIT(std::unique_ptr<Module> M, std::string *ErrorStr,
+ std::shared_ptr<MCJITMemoryManager> MemMgr,
+ std::shared_ptr<LegacyJITSymbolResolver> Resolver,
+ std::unique_ptr<TargetMachine> TM) {
+ // Try to register the program as a source of symbols to resolve against.
+ //
+ // FIXME: Don't do this here.
+ sys::DynamicLibrary::LoadLibraryPermanently(nullptr, nullptr);
+
+ if (!MemMgr || !Resolver) {
+ auto RTDyldMM = std::make_shared<SectionMemoryManager>();
+ if (!MemMgr)
+ MemMgr = RTDyldMM;
+ if (!Resolver)
+ Resolver = RTDyldMM;
+ }
+
+ return new MCJIT(std::move(M), std::move(TM), std::move(MemMgr),
+ std::move(Resolver));
+}
+
+MCJIT::MCJIT(std::unique_ptr<Module> M, std::unique_ptr<TargetMachine> TM,
+ std::shared_ptr<MCJITMemoryManager> MemMgr,
+ std::shared_ptr<LegacyJITSymbolResolver> Resolver)
+ : ExecutionEngine(TM->createDataLayout(), std::move(M)), TM(std::move(TM)),
+ Ctx(nullptr), MemMgr(std::move(MemMgr)),
+ Resolver(*this, std::move(Resolver)), Dyld(*this->MemMgr, this->Resolver),
+ ObjCache(nullptr) {
+ // FIXME: We are managing our modules, so we do not want the base class
+ // ExecutionEngine to manage them as well. To avoid double destruction
+ // of the first (and only) module added in ExecutionEngine constructor
+ // we remove it from EE and will destruct it ourselves.
+ //
+ // It may make sense to move our module manager (based on SmallStPtr) back
+ // into EE if the JIT and Interpreter can live with it.
+ // If so, additional functions: addModule, removeModule, FindFunctionNamed,
+ // runStaticConstructorsDestructors could be moved back to EE as well.
+ //
+ std::unique_ptr<Module> First = std::move(Modules[0]);
+ Modules.clear();
+
+ if (First->getDataLayout().isDefault())
+ First->setDataLayout(getDataLayout());
+
+ OwnedModules.addModule(std::move(First));
+ RegisterJITEventListener(JITEventListener::createGDBRegistrationListener());
+}
+
+MCJIT::~MCJIT() {
+ std::lock_guard<sys::Mutex> locked(lock);
+
+ Dyld.deregisterEHFrames();
+
+ for (auto &Obj : LoadedObjects)
+ if (Obj)
+ notifyFreeingObject(*Obj);
+
+ Archives.clear();
+}
+
+void MCJIT::addModule(std::unique_ptr<Module> M) {
+ std::lock_guard<sys::Mutex> locked(lock);
+
+ if (M->getDataLayout().isDefault())
+ M->setDataLayout(getDataLayout());
+
+ OwnedModules.addModule(std::move(M));
+}
+
+bool MCJIT::removeModule(Module *M) {
+ std::lock_guard<sys::Mutex> locked(lock);
+ return OwnedModules.removeModule(M);
+}
+
+void MCJIT::addObjectFile(std::unique_ptr<object::ObjectFile> Obj) {
+ std::unique_ptr<RuntimeDyld::LoadedObjectInfo> L = Dyld.loadObject(*Obj);
+ if (Dyld.hasError())
+ report_fatal_error(Dyld.getErrorString());
+
+ notifyObjectLoaded(*Obj, *L);
+
+ LoadedObjects.push_back(std::move(Obj));
+}
+
+void MCJIT::addObjectFile(object::OwningBinary<object::ObjectFile> Obj) {
+ std::unique_ptr<object::ObjectFile> ObjFile;
+ std::unique_ptr<MemoryBuffer> MemBuf;
+ std::tie(ObjFile, MemBuf) = Obj.takeBinary();
+ addObjectFile(std::move(ObjFile));
+ Buffers.push_back(std::move(MemBuf));
+}
+
+void MCJIT::addArchive(object::OwningBinary<object::Archive> A) {
+ Archives.push_back(std::move(A));
+}
+
+void MCJIT::setObjectCache(ObjectCache* NewCache) {
+ std::lock_guard<sys::Mutex> locked(lock);
+ ObjCache = NewCache;
+}
+
+std::unique_ptr<MemoryBuffer> MCJIT::emitObject(Module *M) {
+ assert(M && "Can not emit a null module");
+
+ std::lock_guard<sys::Mutex> locked(lock);
+
+ // Materialize all globals in the module if they have not been
+ // materialized already.
+ cantFail(M->materializeAll());
+
+ // This must be a module which has already been added but not loaded to this
+ // MCJIT instance, since these conditions are tested by our caller,
+ // generateCodeForModule.
+
+ legacy::PassManager PM;
+
+ // The RuntimeDyld will take ownership of this shortly
+ SmallVector<char, 4096> ObjBufferSV;
+ raw_svector_ostream ObjStream(ObjBufferSV);
+
+ // Turn the machine code intermediate representation into bytes in memory
+ // that may be executed.
+ if (TM->addPassesToEmitMC(PM, Ctx, ObjStream, !getVerifyModules()))
+ report_fatal_error("Target does not support MC emission!");
+
+ // Initialize passes.
+ PM.run(*M);
+ // Flush the output buffer to get the generated code into memory
+
+ std::unique_ptr<MemoryBuffer> CompiledObjBuffer(
+ new SmallVectorMemoryBuffer(std::move(ObjBufferSV)));
+
+ // If we have an object cache, tell it about the new object.
+ // Note that we're using the compiled image, not the loaded image (as below).
+ if (ObjCache) {
+ // MemoryBuffer is a thin wrapper around the actual memory, so it's OK
+ // to create a temporary object here and delete it after the call.
+ MemoryBufferRef MB = CompiledObjBuffer->getMemBufferRef();
+ ObjCache->notifyObjectCompiled(M, MB);
+ }
+
+ return CompiledObjBuffer;
+}
+
+void MCJIT::generateCodeForModule(Module *M) {
+ // Get a thread lock to make sure we aren't trying to load multiple times
+ std::lock_guard<sys::Mutex> locked(lock);
+
+ // This must be a module which has already been added to this MCJIT instance.
+ assert(OwnedModules.ownsModule(M) &&
+ "MCJIT::generateCodeForModule: Unknown module.");
+
+ // Re-compilation is not supported
+ if (OwnedModules.hasModuleBeenLoaded(M))
+ return;
+
+ std::unique_ptr<MemoryBuffer> ObjectToLoad;
+ // Try to load the pre-compiled object from cache if possible
+ if (ObjCache)
+ ObjectToLoad = ObjCache->getObject(M);
+
+ assert(M->getDataLayout() == getDataLayout() && "DataLayout Mismatch");
+
+ // If the cache did not contain a suitable object, compile the object
+ if (!ObjectToLoad) {
+ ObjectToLoad = emitObject(M);
+ assert(ObjectToLoad && "Compilation did not produce an object.");
+ }
+
+ // Load the object into the dynamic linker.
+ // MCJIT now owns the ObjectImage pointer (via its LoadedObjects list).
+ Expected<std::unique_ptr<object::ObjectFile>> LoadedObject =
+ object::ObjectFile::createObjectFile(ObjectToLoad->getMemBufferRef());
+ if (!LoadedObject) {
+ std::string Buf;
+ raw_string_ostream OS(Buf);
+ logAllUnhandledErrors(LoadedObject.takeError(), OS);
+ OS.flush();
+ report_fatal_error(Buf);
+ }
+ std::unique_ptr<RuntimeDyld::LoadedObjectInfo> L =
+ Dyld.loadObject(*LoadedObject.get());
+
+ if (Dyld.hasError())
+ report_fatal_error(Dyld.getErrorString());
+
+ notifyObjectLoaded(*LoadedObject.get(), *L);
+
+ Buffers.push_back(std::move(ObjectToLoad));
+ LoadedObjects.push_back(std::move(*LoadedObject));
+
+ OwnedModules.markModuleAsLoaded(M);
+}
+
+void MCJIT::finalizeLoadedModules() {
+ std::lock_guard<sys::Mutex> locked(lock);
+
+ // Resolve any outstanding relocations.
+ Dyld.resolveRelocations();
+
+ OwnedModules.markAllLoadedModulesAsFinalized();
+
+ // Register EH frame data for any module we own which has been loaded
+ Dyld.registerEHFrames();
+
+ // Set page permissions.
+ MemMgr->finalizeMemory();
+}
+
+// FIXME: Rename this.
+void MCJIT::finalizeObject() {
+ std::lock_guard<sys::Mutex> locked(lock);
+
+ // Generate code for module is going to move objects out of the 'added' list,
+ // so we need to copy that out before using it:
+ SmallVector<Module*, 16> ModsToAdd;
+ for (auto M : OwnedModules.added())
+ ModsToAdd.push_back(M);
+
+ for (auto M : ModsToAdd)
+ generateCodeForModule(M);
+
+ finalizeLoadedModules();
+}
+
+void MCJIT::finalizeModule(Module *M) {
+ std::lock_guard<sys::Mutex> locked(lock);
+
+ // This must be a module which has already been added to this MCJIT instance.
+ assert(OwnedModules.ownsModule(M) && "MCJIT::finalizeModule: Unknown module.");
+
+ // If the module hasn't been compiled, just do that.
+ if (!OwnedModules.hasModuleBeenLoaded(M))
+ generateCodeForModule(M);
+
+ finalizeLoadedModules();
+}
+
+JITSymbol MCJIT::findExistingSymbol(const std::string &Name) {
+ if (void *Addr = getPointerToGlobalIfAvailable(Name))
+ return JITSymbol(static_cast<uint64_t>(
+ reinterpret_cast<uintptr_t>(Addr)),
+ JITSymbolFlags::Exported);
+
+ return Dyld.getSymbol(Name);
+}
+
+Module *MCJIT::findModuleForSymbol(const std::string &Name,
+ bool CheckFunctionsOnly) {
+ StringRef DemangledName = Name;
+ if (DemangledName[0] == getDataLayout().getGlobalPrefix())
+ DemangledName = DemangledName.substr(1);
+
+ std::lock_guard<sys::Mutex> locked(lock);
+
+ // If it hasn't already been generated, see if it's in one of our modules.
+ for (ModulePtrSet::iterator I = OwnedModules.begin_added(),
+ E = OwnedModules.end_added();
+ I != E; ++I) {
+ Module *M = *I;
+ Function *F = M->getFunction(DemangledName);
+ if (F && !F->isDeclaration())
+ return M;
+ if (!CheckFunctionsOnly) {
+ GlobalVariable *G = M->getGlobalVariable(DemangledName);
+ if (G && !G->isDeclaration())
+ return M;
+ // FIXME: Do we need to worry about global aliases?
+ }
+ }
+ // We didn't find the symbol in any of our modules.
+ return nullptr;
+}
+
+uint64_t MCJIT::getSymbolAddress(const std::string &Name,
+ bool CheckFunctionsOnly) {
+ std::string MangledName;
+ {
+ raw_string_ostream MangledNameStream(MangledName);
+ Mangler::getNameWithPrefix(MangledNameStream, Name, getDataLayout());
+ }
+ if (auto Sym = findSymbol(MangledName, CheckFunctionsOnly)) {
+ if (auto AddrOrErr = Sym.getAddress())
+ return *AddrOrErr;
+ else
+ report_fatal_error(AddrOrErr.takeError());
+ } else if (auto Err = Sym.takeError())
+ report_fatal_error(Sym.takeError());
+ return 0;
+}
+
+JITSymbol MCJIT::findSymbol(const std::string &Name,
+ bool CheckFunctionsOnly) {
+ std::lock_guard<sys::Mutex> locked(lock);
+
+ // First, check to see if we already have this symbol.
+ if (auto Sym = findExistingSymbol(Name))
+ return Sym;
+
+ for (object::OwningBinary<object::Archive> &OB : Archives) {
+ object::Archive *A = OB.getBinary();
+ // Look for our symbols in each Archive
+ auto OptionalChildOrErr = A->findSym(Name);
+ if (!OptionalChildOrErr)
+ report_fatal_error(OptionalChildOrErr.takeError());
+ auto &OptionalChild = *OptionalChildOrErr;
+ if (OptionalChild) {
+ // FIXME: Support nested archives?
+ Expected<std::unique_ptr<object::Binary>> ChildBinOrErr =
+ OptionalChild->getAsBinary();
+ if (!ChildBinOrErr) {
+ // TODO: Actually report errors helpfully.
+ consumeError(ChildBinOrErr.takeError());
+ continue;
+ }
+ std::unique_ptr<object::Binary> &ChildBin = ChildBinOrErr.get();
+ if (ChildBin->isObject()) {
+ std::unique_ptr<object::ObjectFile> OF(
+ static_cast<object::ObjectFile *>(ChildBin.release()));
+ // This causes the object file to be loaded.
+ addObjectFile(std::move(OF));
+ // The address should be here now.
+ if (auto Sym = findExistingSymbol(Name))
+ return Sym;
+ }
+ }
+ }
+
+ // If it hasn't already been generated, see if it's in one of our modules.
+ Module *M = findModuleForSymbol(Name, CheckFunctionsOnly);
+ if (M) {
+ generateCodeForModule(M);
+
+ // Check the RuntimeDyld table again, it should be there now.
+ return findExistingSymbol(Name);
+ }
+
+ // If a LazyFunctionCreator is installed, use it to get/create the function.
+ // FIXME: Should we instead have a LazySymbolCreator callback?
+ if (LazyFunctionCreator) {
+ auto Addr = static_cast<uint64_t>(
+ reinterpret_cast<uintptr_t>(LazyFunctionCreator(Name)));
+ return JITSymbol(Addr, JITSymbolFlags::Exported);
+ }
+
+ return nullptr;
+}
+
+uint64_t MCJIT::getGlobalValueAddress(const std::string &Name) {
+ std::lock_guard<sys::Mutex> locked(lock);
+ uint64_t Result = getSymbolAddress(Name, false);
+ if (Result != 0)
+ finalizeLoadedModules();
+ return Result;
+}
+
+uint64_t MCJIT::getFunctionAddress(const std::string &Name) {
+ std::lock_guard<sys::Mutex> locked(lock);
+ uint64_t Result = getSymbolAddress(Name, true);
+ if (Result != 0)
+ finalizeLoadedModules();
+ return Result;
+}
+
+// Deprecated. Use getFunctionAddress instead.
+void *MCJIT::getPointerToFunction(Function *F) {
+ std::lock_guard<sys::Mutex> locked(lock);
+
+ Mangler Mang;
+ SmallString<128> Name;
+ TM->getNameWithPrefix(Name, F, Mang);
+
+ if (F->isDeclaration() || F->hasAvailableExternallyLinkage()) {
+ bool AbortOnFailure = !F->hasExternalWeakLinkage();
+ void *Addr = getPointerToNamedFunction(Name, AbortOnFailure);
+ updateGlobalMapping(F, Addr);
+ return Addr;
+ }
+
+ Module *M = F->getParent();
+ bool HasBeenAddedButNotLoaded = OwnedModules.hasModuleBeenAddedButNotLoaded(M);
+
+ // Make sure the relevant module has been compiled and loaded.
+ if (HasBeenAddedButNotLoaded)
+ generateCodeForModule(M);
+ else if (!OwnedModules.hasModuleBeenLoaded(M)) {
+ // If this function doesn't belong to one of our modules, we're done.
+ // FIXME: Asking for the pointer to a function that hasn't been registered,
+ // and isn't a declaration (which is handled above) should probably
+ // be an assertion.
+ return nullptr;
+ }
+
+ // FIXME: Should the Dyld be retaining module information? Probably not.
+ //
+ // This is the accessor for the target address, so make sure to check the
+ // load address of the symbol, not the local address.
+ return (void*)Dyld.getSymbol(Name).getAddress();
+}
+
+void MCJIT::runStaticConstructorsDestructorsInModulePtrSet(
+ bool isDtors, ModulePtrSet::iterator I, ModulePtrSet::iterator E) {
+ for (; I != E; ++I) {
+ ExecutionEngine::runStaticConstructorsDestructors(**I, isDtors);
+ }
+}
+
+void MCJIT::runStaticConstructorsDestructors(bool isDtors) {
+ // Execute global ctors/dtors for each module in the program.
+ runStaticConstructorsDestructorsInModulePtrSet(
+ isDtors, OwnedModules.begin_added(), OwnedModules.end_added());
+ runStaticConstructorsDestructorsInModulePtrSet(
+ isDtors, OwnedModules.begin_loaded(), OwnedModules.end_loaded());
+ runStaticConstructorsDestructorsInModulePtrSet(
+ isDtors, OwnedModules.begin_finalized(), OwnedModules.end_finalized());
+}
+
+Function *MCJIT::FindFunctionNamedInModulePtrSet(StringRef FnName,
+ ModulePtrSet::iterator I,
+ ModulePtrSet::iterator E) {
+ for (; I != E; ++I) {
+ Function *F = (*I)->getFunction(FnName);
+ if (F && !F->isDeclaration())
+ return F;
+ }
+ return nullptr;
+}
+
+GlobalVariable *MCJIT::FindGlobalVariableNamedInModulePtrSet(StringRef Name,
+ bool AllowInternal,
+ ModulePtrSet::iterator I,
+ ModulePtrSet::iterator E) {
+ for (; I != E; ++I) {
+ GlobalVariable *GV = (*I)->getGlobalVariable(Name, AllowInternal);
+ if (GV && !GV->isDeclaration())
+ return GV;
+ }
+ return nullptr;
+}
+
+
+Function *MCJIT::FindFunctionNamed(StringRef FnName) {
+ Function *F = FindFunctionNamedInModulePtrSet(
+ FnName, OwnedModules.begin_added(), OwnedModules.end_added());
+ if (!F)
+ F = FindFunctionNamedInModulePtrSet(FnName, OwnedModules.begin_loaded(),
+ OwnedModules.end_loaded());
+ if (!F)
+ F = FindFunctionNamedInModulePtrSet(FnName, OwnedModules.begin_finalized(),
+ OwnedModules.end_finalized());
+ return F;
+}
+
+GlobalVariable *MCJIT::FindGlobalVariableNamed(StringRef Name, bool AllowInternal) {
+ GlobalVariable *GV = FindGlobalVariableNamedInModulePtrSet(
+ Name, AllowInternal, OwnedModules.begin_added(), OwnedModules.end_added());
+ if (!GV)
+ GV = FindGlobalVariableNamedInModulePtrSet(Name, AllowInternal, OwnedModules.begin_loaded(),
+ OwnedModules.end_loaded());
+ if (!GV)
+ GV = FindGlobalVariableNamedInModulePtrSet(Name, AllowInternal, OwnedModules.begin_finalized(),
+ OwnedModules.end_finalized());
+ return GV;
+}
+
+GenericValue MCJIT::runFunction(Function *F, ArrayRef<GenericValue> ArgValues) {
+ assert(F && "Function *F was null at entry to run()");
+
+ void *FPtr = getPointerToFunction(F);
+ finalizeModule(F->getParent());
+ assert(FPtr && "Pointer to fn's code was null after getPointerToFunction");
+ FunctionType *FTy = F->getFunctionType();
+ Type *RetTy = FTy->getReturnType();
+
+ assert((FTy->getNumParams() == ArgValues.size() ||
+ (FTy->isVarArg() && FTy->getNumParams() <= ArgValues.size())) &&
+ "Wrong number of arguments passed into function!");
+ assert(FTy->getNumParams() == ArgValues.size() &&
+ "This doesn't support passing arguments through varargs (yet)!");
+
+ // Handle some common cases first. These cases correspond to common `main'
+ // prototypes.
+ if (RetTy->isIntegerTy(32) || RetTy->isVoidTy()) {
+ switch (ArgValues.size()) {
+ case 3:
+ if (FTy->getParamType(0)->isIntegerTy(32) &&
+ FTy->getParamType(1)->isPointerTy() &&
+ FTy->getParamType(2)->isPointerTy()) {
+ int (*PF)(int, char **, const char **) =
+ (int(*)(int, char **, const char **))(intptr_t)FPtr;
+
+ // Call the function.
+ GenericValue rv;
+ rv.IntVal = APInt(32, PF(ArgValues[0].IntVal.getZExtValue(),
+ (char **)GVTOP(ArgValues[1]),
+ (const char **)GVTOP(ArgValues[2])));
+ return rv;
+ }
+ break;
+ case 2:
+ if (FTy->getParamType(0)->isIntegerTy(32) &&
+ FTy->getParamType(1)->isPointerTy()) {
+ int (*PF)(int, char **) = (int(*)(int, char **))(intptr_t)FPtr;
+
+ // Call the function.
+ GenericValue rv;
+ rv.IntVal = APInt(32, PF(ArgValues[0].IntVal.getZExtValue(),
+ (char **)GVTOP(ArgValues[1])));
+ return rv;
+ }
+ break;
+ case 1:
+ if (FTy->getNumParams() == 1 &&
+ FTy->getParamType(0)->isIntegerTy(32)) {
+ GenericValue rv;
+ int (*PF)(int) = (int(*)(int))(intptr_t)FPtr;
+ rv.IntVal = APInt(32, PF(ArgValues[0].IntVal.getZExtValue()));
+ return rv;
+ }
+ break;
+ }
+ }
+
+ // Handle cases where no arguments are passed first.
+ if (ArgValues.empty()) {
+ GenericValue rv;
+ switch (RetTy->getTypeID()) {
+ default: llvm_unreachable("Unknown return type for function call!");
+ case Type::IntegerTyID: {
+ unsigned BitWidth = cast<IntegerType>(RetTy)->getBitWidth();
+ if (BitWidth == 1)
+ rv.IntVal = APInt(BitWidth, ((bool(*)())(intptr_t)FPtr)());
+ else if (BitWidth <= 8)
+ rv.IntVal = APInt(BitWidth, ((char(*)())(intptr_t)FPtr)());
+ else if (BitWidth <= 16)
+ rv.IntVal = APInt(BitWidth, ((short(*)())(intptr_t)FPtr)());
+ else if (BitWidth <= 32)
+ rv.IntVal = APInt(BitWidth, ((int(*)())(intptr_t)FPtr)());
+ else if (BitWidth <= 64)
+ rv.IntVal = APInt(BitWidth, ((int64_t(*)())(intptr_t)FPtr)());
+ else
+ llvm_unreachable("Integer types > 64 bits not supported");
+ return rv;
+ }
+ case Type::VoidTyID:
+ rv.IntVal = APInt(32, ((int(*)())(intptr_t)FPtr)());
+ return rv;
+ case Type::FloatTyID:
+ rv.FloatVal = ((float(*)())(intptr_t)FPtr)();
+ return rv;
+ case Type::DoubleTyID:
+ rv.DoubleVal = ((double(*)())(intptr_t)FPtr)();
+ return rv;
+ case Type::X86_FP80TyID:
+ case Type::FP128TyID:
+ case Type::PPC_FP128TyID:
+ llvm_unreachable("long double not supported yet");
+ case Type::PointerTyID:
+ return PTOGV(((void*(*)())(intptr_t)FPtr)());
+ }
+ }
+
+ report_fatal_error("MCJIT::runFunction does not support full-featured "
+ "argument passing. Please use "
+ "ExecutionEngine::getFunctionAddress and cast the result "
+ "to the desired function pointer type.");
+}
+
+void *MCJIT::getPointerToNamedFunction(StringRef Name, bool AbortOnFailure) {
+ if (!isSymbolSearchingDisabled()) {
+ if (auto Sym = Resolver.findSymbol(Name)) {
+ if (auto AddrOrErr = Sym.getAddress())
+ return reinterpret_cast<void*>(
+ static_cast<uintptr_t>(*AddrOrErr));
+ } else if (auto Err = Sym.takeError())
+ report_fatal_error(std::move(Err));
+ }
+
+ /// If a LazyFunctionCreator is installed, use it to get/create the function.
+ if (LazyFunctionCreator)
+ if (void *RP = LazyFunctionCreator(Name))
+ return RP;
+
+ if (AbortOnFailure) {
+ report_fatal_error("Program used external function '"+Name+
+ "' which could not be resolved!");
+ }
+ return nullptr;
+}
+
+void MCJIT::RegisterJITEventListener(JITEventListener *L) {
+ if (!L)
+ return;
+ std::lock_guard<sys::Mutex> locked(lock);
+ EventListeners.push_back(L);
+}
+
+void MCJIT::UnregisterJITEventListener(JITEventListener *L) {
+ if (!L)
+ return;
+ std::lock_guard<sys::Mutex> locked(lock);
+ auto I = find(reverse(EventListeners), L);
+ if (I != EventListeners.rend()) {
+ std::swap(*I, EventListeners.back());
+ EventListeners.pop_back();
+ }
+}
+
+void MCJIT::notifyObjectLoaded(const object::ObjectFile &Obj,
+ const RuntimeDyld::LoadedObjectInfo &L) {
+ uint64_t Key =
+ static_cast<uint64_t>(reinterpret_cast<uintptr_t>(Obj.getData().data()));
+ std::lock_guard<sys::Mutex> locked(lock);
+ MemMgr->notifyObjectLoaded(this, Obj);
+ for (unsigned I = 0, S = EventListeners.size(); I < S; ++I) {
+ EventListeners[I]->notifyObjectLoaded(Key, Obj, L);
+ }
+}
+
+void MCJIT::notifyFreeingObject(const object::ObjectFile &Obj) {
+ uint64_t Key =
+ static_cast<uint64_t>(reinterpret_cast<uintptr_t>(Obj.getData().data()));
+ std::lock_guard<sys::Mutex> locked(lock);
+ for (JITEventListener *L : EventListeners)
+ L->notifyFreeingObject(Key);
+}
+
+JITSymbol
+LinkingSymbolResolver::findSymbol(const std::string &Name) {
+ auto Result = ParentEngine.findSymbol(Name, false);
+ if (Result)
+ return Result;
+ if (ParentEngine.isSymbolSearchingDisabled())
+ return nullptr;
+ return ClientResolver->findSymbol(Name);
+}
+
+void LinkingSymbolResolver::anchor() {}
diff --git a/llvm/lib/ExecutionEngine/MCJIT/MCJIT.h b/llvm/lib/ExecutionEngine/MCJIT/MCJIT.h
new file mode 100644
index 0000000000000..77097fc0d17e4
--- /dev/null
+++ b/llvm/lib/ExecutionEngine/MCJIT/MCJIT.h
@@ -0,0 +1,343 @@
+//===-- MCJIT.h - Class definition for the MCJIT ----------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_EXECUTIONENGINE_MCJIT_MCJIT_H
+#define LLVM_LIB_EXECUTIONENGINE_MCJIT_MCJIT_H
+
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ExecutionEngine/ExecutionEngine.h"
+#include "llvm/ExecutionEngine/ObjectCache.h"
+#include "llvm/ExecutionEngine/RTDyldMemoryManager.h"
+#include "llvm/ExecutionEngine/RuntimeDyld.h"
+#include "llvm/IR/Module.h"
+#include "llvm/Support/SmallVectorMemoryBuffer.h"
+
+namespace llvm {
+class MCJIT;
+
+// This is a helper class that the MCJIT execution engine uses for linking
+// functions across modules that it owns. It aggregates the memory manager
+// that is passed in to the MCJIT constructor and defers most functionality
+// to that object.
+class LinkingSymbolResolver : public LegacyJITSymbolResolver {
+public:
+ LinkingSymbolResolver(MCJIT &Parent,
+ std::shared_ptr<LegacyJITSymbolResolver> Resolver)
+ : ParentEngine(Parent), ClientResolver(std::move(Resolver)) {}
+
+ JITSymbol findSymbol(const std::string &Name) override;
+
+ // MCJIT doesn't support logical dylibs.
+ JITSymbol findSymbolInLogicalDylib(const std::string &Name) override {
+ return nullptr;
+ }
+
+private:
+ MCJIT &ParentEngine;
+ std::shared_ptr<LegacyJITSymbolResolver> ClientResolver;
+ void anchor() override;
+};
+
+// About Module states: added->loaded->finalized.
+//
+// The purpose of the "added" state is having modules in standby. (added=known
+// but not compiled). The idea is that you can add a module to provide function
+// definitions but if nothing in that module is referenced by a module in which
+// a function is executed (note the wording here because it's not exactly the
+// ideal case) then the module never gets compiled. This is sort of lazy
+// compilation.
+//
+// The purpose of the "loaded" state (loaded=compiled and required sections
+// copied into local memory but not yet ready for execution) is to have an
+// intermediate state wherein clients can remap the addresses of sections, using
+// MCJIT::mapSectionAddress, (in preparation for later copying to a new location
+// or an external process) before relocations and page permissions are applied.
+//
+// It might not be obvious at first glance, but the "remote-mcjit" case in the
+// lli tool does this. In that case, the intermediate action is taken by the
+// RemoteMemoryManager in response to the notifyObjectLoaded function being
+// called.
+
+class MCJIT : public ExecutionEngine {
+ MCJIT(std::unique_ptr<Module> M, std::unique_ptr<TargetMachine> tm,
+ std::shared_ptr<MCJITMemoryManager> MemMgr,
+ std::shared_ptr<LegacyJITSymbolResolver> Resolver);
+
+ typedef llvm::SmallPtrSet<Module *, 4> ModulePtrSet;
+
+ class OwningModuleContainer {
+ public:
+ OwningModuleContainer() {
+ }
+ ~OwningModuleContainer() {
+ freeModulePtrSet(AddedModules);
+ freeModulePtrSet(LoadedModules);
+ freeModulePtrSet(FinalizedModules);
+ }
+
+ ModulePtrSet::iterator begin_added() { return AddedModules.begin(); }
+ ModulePtrSet::iterator end_added() { return AddedModules.end(); }
+ iterator_range<ModulePtrSet::iterator> added() {
+ return make_range(begin_added(), end_added());
+ }
+
+ ModulePtrSet::iterator begin_loaded() { return LoadedModules.begin(); }
+ ModulePtrSet::iterator end_loaded() { return LoadedModules.end(); }
+
+ ModulePtrSet::iterator begin_finalized() { return FinalizedModules.begin(); }
+ ModulePtrSet::iterator end_finalized() { return FinalizedModules.end(); }
+
+ void addModule(std::unique_ptr<Module> M) {
+ AddedModules.insert(M.release());
+ }
+
+ bool removeModule(Module *M) {
+ return AddedModules.erase(M) || LoadedModules.erase(M) ||
+ FinalizedModules.erase(M);
+ }
+
+ bool hasModuleBeenAddedButNotLoaded(Module *M) {
+ return AddedModules.count(M) != 0;
+ }
+
+ bool hasModuleBeenLoaded(Module *M) {
+ // If the module is in either the "loaded" or "finalized" sections it
+ // has been loaded.
+ return (LoadedModules.count(M) != 0 ) || (FinalizedModules.count(M) != 0);
+ }
+
+ bool hasModuleBeenFinalized(Module *M) {
+ return FinalizedModules.count(M) != 0;
+ }
+
+ bool ownsModule(Module* M) {
+ return (AddedModules.count(M) != 0) || (LoadedModules.count(M) != 0) ||
+ (FinalizedModules.count(M) != 0);
+ }
+
+ void markModuleAsLoaded(Module *M) {
+ // This checks against logic errors in the MCJIT implementation.
+ // This function should never be called with either a Module that MCJIT
+ // does not own or a Module that has already been loaded and/or finalized.
+ assert(AddedModules.count(M) &&
+ "markModuleAsLoaded: Module not found in AddedModules");
+
+ // Remove the module from the "Added" set.
+ AddedModules.erase(M);
+
+ // Add the Module to the "Loaded" set.
+ LoadedModules.insert(M);
+ }
+
+ void markModuleAsFinalized(Module *M) {
+ // This checks against logic errors in the MCJIT implementation.
+ // This function should never be called with either a Module that MCJIT
+ // does not own, a Module that has not been loaded or a Module that has
+ // already been finalized.
+ assert(LoadedModules.count(M) &&
+ "markModuleAsFinalized: Module not found in LoadedModules");
+
+ // Remove the module from the "Loaded" section of the list.
+ LoadedModules.erase(M);
+
+ // Add the Module to the "Finalized" section of the list by inserting it
+ // before the 'end' iterator.
+ FinalizedModules.insert(M);
+ }
+
+ void markAllLoadedModulesAsFinalized() {
+ for (ModulePtrSet::iterator I = LoadedModules.begin(),
+ E = LoadedModules.end();
+ I != E; ++I) {
+ Module *M = *I;
+ FinalizedModules.insert(M);
+ }
+ LoadedModules.clear();
+ }
+
+ private:
+ ModulePtrSet AddedModules;
+ ModulePtrSet LoadedModules;
+ ModulePtrSet FinalizedModules;
+
+ void freeModulePtrSet(ModulePtrSet& MPS) {
+ // Go through the module set and delete everything.
+ for (ModulePtrSet::iterator I = MPS.begin(), E = MPS.end(); I != E; ++I) {
+ Module *M = *I;
+ delete M;
+ }
+ MPS.clear();
+ }
+ };
+
+ std::unique_ptr<TargetMachine> TM;
+ MCContext *Ctx;
+ std::shared_ptr<MCJITMemoryManager> MemMgr;
+ LinkingSymbolResolver Resolver;
+ RuntimeDyld Dyld;
+ std::vector<JITEventListener*> EventListeners;
+
+ OwningModuleContainer OwnedModules;
+
+ SmallVector<object::OwningBinary<object::Archive>, 2> Archives;
+ SmallVector<std::unique_ptr<MemoryBuffer>, 2> Buffers;
+
+ SmallVector<std::unique_ptr<object::ObjectFile>, 2> LoadedObjects;
+
+ // An optional ObjectCache to be notified of compiled objects and used to
+ // perform lookup of pre-compiled code to avoid re-compilation.
+ ObjectCache *ObjCache;
+
+ Function *FindFunctionNamedInModulePtrSet(StringRef FnName,
+ ModulePtrSet::iterator I,
+ ModulePtrSet::iterator E);
+
+ GlobalVariable *FindGlobalVariableNamedInModulePtrSet(StringRef Name,
+ bool AllowInternal,
+ ModulePtrSet::iterator I,
+ ModulePtrSet::iterator E);
+
+ void runStaticConstructorsDestructorsInModulePtrSet(bool isDtors,
+ ModulePtrSet::iterator I,
+ ModulePtrSet::iterator E);
+
+public:
+ ~MCJIT() override;
+
+ /// @name ExecutionEngine interface implementation
+ /// @{
+ void addModule(std::unique_ptr<Module> M) override;
+ void addObjectFile(std::unique_ptr<object::ObjectFile> O) override;
+ void addObjectFile(object::OwningBinary<object::ObjectFile> O) override;
+ void addArchive(object::OwningBinary<object::Archive> O) override;
+ bool removeModule(Module *M) override;
+
+ /// FindFunctionNamed - Search all of the active modules to find the function that
+ /// defines FnName. This is very slow operation and shouldn't be used for
+ /// general code.
+ Function *FindFunctionNamed(StringRef FnName) override;
+
+ /// FindGlobalVariableNamed - Search all of the active modules to find the
+ /// global variable that defines Name. This is very slow operation and
+ /// shouldn't be used for general code.
+ GlobalVariable *FindGlobalVariableNamed(StringRef Name,
+ bool AllowInternal = false) override;
+
+ /// Sets the object manager that MCJIT should use to avoid compilation.
+ void setObjectCache(ObjectCache *manager) override;
+
+ void setProcessAllSections(bool ProcessAllSections) override {
+ Dyld.setProcessAllSections(ProcessAllSections);
+ }
+
+ void generateCodeForModule(Module *M) override;
+
+ /// finalizeObject - ensure the module is fully processed and is usable.
+ ///
+ /// It is the user-level function for completing the process of making the
+ /// object usable for execution. It should be called after sections within an
+ /// object have been relocated using mapSectionAddress. When this method is
+ /// called the MCJIT execution engine will reapply relocations for a loaded
+ /// object.
+ /// Is it OK to finalize a set of modules, add modules and finalize again.
+ // FIXME: Do we really need both of these?
+ void finalizeObject() override;
+ virtual void finalizeModule(Module *);
+ void finalizeLoadedModules();
+
+ /// runStaticConstructorsDestructors - This method is used to execute all of
+ /// the static constructors or destructors for a program.
+ ///
+ /// \param isDtors - Run the destructors instead of constructors.
+ void runStaticConstructorsDestructors(bool isDtors) override;
+
+ void *getPointerToFunction(Function *F) override;
+
+ GenericValue runFunction(Function *F,
+ ArrayRef<GenericValue> ArgValues) override;
+
+ /// getPointerToNamedFunction - This method returns the address of the
+ /// specified function by using the dlsym function call. As such it is only
+ /// useful for resolving library symbols, not code generated symbols.
+ ///
+ /// If AbortOnFailure is false and no function with the given name is
+ /// found, this function silently returns a null pointer. Otherwise,
+ /// it prints a message to stderr and aborts.
+ ///
+ void *getPointerToNamedFunction(StringRef Name,
+ bool AbortOnFailure = true) override;
+
+ /// mapSectionAddress - map a section to its target address space value.
+ /// Map the address of a JIT section as returned from the memory manager
+ /// to the address in the target process as the running code will see it.
+ /// This is the address which will be used for relocation resolution.
+ void mapSectionAddress(const void *LocalAddress,
+ uint64_t TargetAddress) override {
+ Dyld.mapSectionAddress(LocalAddress, TargetAddress);
+ }
+ void RegisterJITEventListener(JITEventListener *L) override;
+ void UnregisterJITEventListener(JITEventListener *L) override;
+
+ // If successful, these function will implicitly finalize all loaded objects.
+ // To get a function address within MCJIT without causing a finalize, use
+ // getSymbolAddress.
+ uint64_t getGlobalValueAddress(const std::string &Name) override;
+ uint64_t getFunctionAddress(const std::string &Name) override;
+
+ TargetMachine *getTargetMachine() override { return TM.get(); }
+
+ /// @}
+ /// @name (Private) Registration Interfaces
+ /// @{
+
+ static void Register() {
+ MCJITCtor = createJIT;
+ }
+
+ static ExecutionEngine *
+ createJIT(std::unique_ptr<Module> M, std::string *ErrorStr,
+ std::shared_ptr<MCJITMemoryManager> MemMgr,
+ std::shared_ptr<LegacyJITSymbolResolver> Resolver,
+ std::unique_ptr<TargetMachine> TM);
+
+ // @}
+
+ // Takes a mangled name and returns the corresponding JITSymbol (if a
+ // definition of that mangled name has been added to the JIT).
+ JITSymbol findSymbol(const std::string &Name, bool CheckFunctionsOnly);
+
+ // DEPRECATED - Please use findSymbol instead.
+ //
+ // This is not directly exposed via the ExecutionEngine API, but it is
+ // used by the LinkingMemoryManager.
+ //
+ // getSymbolAddress takes an unmangled name and returns the corresponding
+ // JITSymbol if a definition of the name has been added to the JIT.
+ uint64_t getSymbolAddress(const std::string &Name,
+ bool CheckFunctionsOnly);
+
+protected:
+ /// emitObject -- Generate a JITed object in memory from the specified module
+ /// Currently, MCJIT only supports a single module and the module passed to
+ /// this function call is expected to be the contained module. The module
+ /// is passed as a parameter here to prepare for multiple module support in
+ /// the future.
+ std::unique_ptr<MemoryBuffer> emitObject(Module *M);
+
+ void notifyObjectLoaded(const object::ObjectFile &Obj,
+ const RuntimeDyld::LoadedObjectInfo &L);
+ void notifyFreeingObject(const object::ObjectFile &Obj);
+
+ JITSymbol findExistingSymbol(const std::string &Name);
+ Module *findModuleForSymbol(const std::string &Name, bool CheckFunctionsOnly);
+};
+
+} // end llvm namespace
+
+#endif // LLVM_LIB_EXECUTIONENGINE_MCJIT_MCJIT_H
diff --git a/llvm/lib/ExecutionEngine/OProfileJIT/OProfileJITEventListener.cpp b/llvm/lib/ExecutionEngine/OProfileJIT/OProfileJITEventListener.cpp
new file mode 100644
index 0000000000000..bb5d96051da94
--- /dev/null
+++ b/llvm/lib/ExecutionEngine/OProfileJIT/OProfileJITEventListener.cpp
@@ -0,0 +1,188 @@
+//===-- OProfileJITEventListener.cpp - Tell OProfile about JITted code ----===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a JITEventListener object that uses OProfileWrapper to tell
+// oprofile about JITted functions, including source line information.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm-c/ExecutionEngine.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/Config/config.h"
+#include "llvm/DebugInfo/DWARF/DWARFContext.h"
+#include "llvm/ExecutionEngine/JITEventListener.h"
+#include "llvm/ExecutionEngine/OProfileWrapper.h"
+#include "llvm/ExecutionEngine/RuntimeDyld.h"
+#include "llvm/IR/DebugInfo.h"
+#include "llvm/IR/Function.h"
+#include "llvm/Object/ObjectFile.h"
+#include "llvm/Object/SymbolSize.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/Errno.h"
+#include "llvm/Support/raw_ostream.h"
+#include <dirent.h>
+#include <fcntl.h>
+
+using namespace llvm;
+using namespace llvm::object;
+
+#define DEBUG_TYPE "oprofile-jit-event-listener"
+
+namespace {
+
+class OProfileJITEventListener : public JITEventListener {
+ std::unique_ptr<OProfileWrapper> Wrapper;
+
+ void initialize();
+ std::map<ObjectKey, OwningBinary<ObjectFile>> DebugObjects;
+
+public:
+ OProfileJITEventListener(std::unique_ptr<OProfileWrapper> LibraryWrapper)
+ : Wrapper(std::move(LibraryWrapper)) {
+ initialize();
+ }
+
+ ~OProfileJITEventListener();
+
+ void notifyObjectLoaded(ObjectKey Key, const ObjectFile &Obj,
+ const RuntimeDyld::LoadedObjectInfo &L) override;
+
+ void notifyFreeingObject(ObjectKey Key) override;
+};
+
+void OProfileJITEventListener::initialize() {
+ if (!Wrapper->op_open_agent()) {
+ const std::string err_str = sys::StrError();
+ LLVM_DEBUG(dbgs() << "Failed to connect to OProfile agent: " << err_str
+ << "\n");
+ } else {
+ LLVM_DEBUG(dbgs() << "Connected to OProfile agent.\n");
+ }
+}
+
+OProfileJITEventListener::~OProfileJITEventListener() {
+ if (Wrapper->isAgentAvailable()) {
+ if (Wrapper->op_close_agent() == -1) {
+ const std::string err_str = sys::StrError();
+ LLVM_DEBUG(dbgs() << "Failed to disconnect from OProfile agent: "
+ << err_str << "\n");
+ } else {
+ LLVM_DEBUG(dbgs() << "Disconnected from OProfile agent.\n");
+ }
+ }
+}
+
+void OProfileJITEventListener::notifyObjectLoaded(
+ ObjectKey Key, const ObjectFile &Obj,
+ const RuntimeDyld::LoadedObjectInfo &L) {
+ if (!Wrapper->isAgentAvailable()) {
+ return;
+ }
+
+ OwningBinary<ObjectFile> DebugObjOwner = L.getObjectForDebug(Obj);
+ const ObjectFile &DebugObj = *DebugObjOwner.getBinary();
+ std::unique_ptr<DIContext> Context = DWARFContext::create(DebugObj);
+
+ // Use symbol info to iterate functions in the object.
+ for (const std::pair<SymbolRef, uint64_t> &P : computeSymbolSizes(DebugObj)) {
+ SymbolRef Sym = P.first;
+ if (!Sym.getType() || *Sym.getType() != SymbolRef::ST_Function)
+ continue;
+
+ Expected<StringRef> NameOrErr = Sym.getName();
+ if (!NameOrErr)
+ continue;
+ StringRef Name = *NameOrErr;
+ Expected<uint64_t> AddrOrErr = Sym.getAddress();
+ if (!AddrOrErr)
+ continue;
+ uint64_t Addr = *AddrOrErr;
+ uint64_t Size = P.second;
+
+ if (Wrapper->op_write_native_code(Name.data(), Addr, (void *)Addr, Size) ==
+ -1) {
+ LLVM_DEBUG(dbgs() << "Failed to tell OProfile about native function "
+ << Name << " at [" << (void *)Addr << "-"
+ << ((char *)Addr + Size) << "]\n");
+ continue;
+ }
+
+ DILineInfoTable Lines = Context->getLineInfoForAddressRange(Addr, Size);
+ size_t i = 0;
+ size_t num_entries = Lines.size();
+ struct debug_line_info *debug_line;
+ debug_line = (struct debug_line_info *)calloc(
+ num_entries, sizeof(struct debug_line_info));
+
+ for (auto& It : Lines) {
+ debug_line[i].vma = (unsigned long)It.first;
+ debug_line[i].lineno = It.second.Line;
+ debug_line[i].filename =
+ const_cast<char *>(Lines.front().second.FileName.c_str());
+ ++i;
+ }
+
+ if (Wrapper->op_write_debug_line_info((void *)Addr, num_entries,
+ debug_line) == -1) {
+ LLVM_DEBUG(dbgs() << "Failed to tell OProfiler about debug object at ["
+ << (void *)Addr << "-" << ((char *)Addr + Size)
+ << "]\n");
+ continue;
+ }
+ }
+
+ DebugObjects[Key] = std::move(DebugObjOwner);
+}
+
+void OProfileJITEventListener::notifyFreeingObject(ObjectKey Key) {
+ if (Wrapper->isAgentAvailable()) {
+
+ // If there was no agent registered when the original object was loaded then
+ // we won't have created a debug object for it, so bail out.
+ if (DebugObjects.find(Key) == DebugObjects.end())
+ return;
+
+ const ObjectFile &DebugObj = *DebugObjects[Key].getBinary();
+
+ // Use symbol info to iterate functions in the object.
+ for (symbol_iterator I = DebugObj.symbol_begin(),
+ E = DebugObj.symbol_end();
+ I != E; ++I) {
+ if (I->getType() && *I->getType() == SymbolRef::ST_Function) {
+ Expected<uint64_t> AddrOrErr = I->getAddress();
+ if (!AddrOrErr)
+ continue;
+ uint64_t Addr = *AddrOrErr;
+
+ if (Wrapper->op_unload_native_code(Addr) == -1) {
+ LLVM_DEBUG(
+ dbgs()
+ << "Failed to tell OProfile about unload of native function at "
+ << (void *)Addr << "\n");
+ continue;
+ }
+ }
+ }
+ }
+
+ DebugObjects.erase(Key);
+}
+
+} // anonymous namespace.
+
+namespace llvm {
+JITEventListener *JITEventListener::createOProfileJITEventListener() {
+ return new OProfileJITEventListener(std::make_unique<OProfileWrapper>());
+}
+
+} // namespace llvm
+
+LLVMJITEventListenerRef LLVMCreateOProfileJITEventListener(void)
+{
+ return wrap(JITEventListener::createOProfileJITEventListener());
+}
diff --git a/llvm/lib/ExecutionEngine/OProfileJIT/OProfileWrapper.cpp b/llvm/lib/ExecutionEngine/OProfileJIT/OProfileWrapper.cpp
new file mode 100644
index 0000000000000..b78d2531382d0
--- /dev/null
+++ b/llvm/lib/ExecutionEngine/OProfileJIT/OProfileWrapper.cpp
@@ -0,0 +1,267 @@
+//===-- OProfileWrapper.cpp - OProfile JIT API Wrapper implementation -----===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the interface in OProfileWrapper.h. It is responsible
+// for loading the opagent dynamic library when the first call to an op_
+// function occurs.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/OProfileWrapper.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/DynamicLibrary.h"
+#include "llvm/Support/Mutex.h"
+#include "llvm/Support/raw_ostream.h"
+#include <cstring>
+#include <dirent.h>
+#include <fcntl.h>
+#include <mutex>
+#include <stddef.h>
+#include <sys/stat.h>
+#include <unistd.h>
+
+#define DEBUG_TYPE "oprofile-wrapper"
+
+namespace {
+
+// Global mutex to ensure a single thread initializes oprofile agent.
+llvm::sys::Mutex OProfileInitializationMutex;
+
+} // anonymous namespace
+
+namespace llvm {
+
+OProfileWrapper::OProfileWrapper()
+: Agent(0),
+ OpenAgentFunc(0),
+ CloseAgentFunc(0),
+ WriteNativeCodeFunc(0),
+ WriteDebugLineInfoFunc(0),
+ UnloadNativeCodeFunc(0),
+ MajorVersionFunc(0),
+ MinorVersionFunc(0),
+ IsOProfileRunningFunc(0),
+ Initialized(false) {
+}
+
+bool OProfileWrapper::initialize() {
+ using namespace llvm;
+ using namespace llvm::sys;
+
+ std::lock_guard<sys::Mutex> Guard(OProfileInitializationMutex);
+
+ if (Initialized)
+ return OpenAgentFunc != 0;
+
+ Initialized = true;
+
+ // If the oprofile daemon is not running, don't load the opagent library
+ if (!isOProfileRunning()) {
+ LLVM_DEBUG(dbgs() << "OProfile daemon is not detected.\n");
+ return false;
+ }
+
+ std::string error;
+ if(!DynamicLibrary::LoadLibraryPermanently("libopagent.so", &error)) {
+ LLVM_DEBUG(
+ dbgs()
+ << "OProfile connector library libopagent.so could not be loaded: "
+ << error << "\n");
+ }
+
+ // Get the addresses of the opagent functions
+ OpenAgentFunc = (op_open_agent_ptr_t)(intptr_t)
+ DynamicLibrary::SearchForAddressOfSymbol("op_open_agent");
+ CloseAgentFunc = (op_close_agent_ptr_t)(intptr_t)
+ DynamicLibrary::SearchForAddressOfSymbol("op_close_agent");
+ WriteNativeCodeFunc = (op_write_native_code_ptr_t)(intptr_t)
+ DynamicLibrary::SearchForAddressOfSymbol("op_write_native_code");
+ WriteDebugLineInfoFunc = (op_write_debug_line_info_ptr_t)(intptr_t)
+ DynamicLibrary::SearchForAddressOfSymbol("op_write_debug_line_info");
+ UnloadNativeCodeFunc = (op_unload_native_code_ptr_t)(intptr_t)
+ DynamicLibrary::SearchForAddressOfSymbol("op_unload_native_code");
+ MajorVersionFunc = (op_major_version_ptr_t)(intptr_t)
+ DynamicLibrary::SearchForAddressOfSymbol("op_major_version");
+ MinorVersionFunc = (op_major_version_ptr_t)(intptr_t)
+ DynamicLibrary::SearchForAddressOfSymbol("op_minor_version");
+
+ // With missing functions, we can do nothing
+ if (!OpenAgentFunc
+ || !CloseAgentFunc
+ || !WriteNativeCodeFunc
+ || !WriteDebugLineInfoFunc
+ || !UnloadNativeCodeFunc) {
+ OpenAgentFunc = 0;
+ CloseAgentFunc = 0;
+ WriteNativeCodeFunc = 0;
+ WriteDebugLineInfoFunc = 0;
+ UnloadNativeCodeFunc = 0;
+ return false;
+ }
+
+ return true;
+}
+
+bool OProfileWrapper::isOProfileRunning() {
+ if (IsOProfileRunningFunc != 0)
+ return IsOProfileRunningFunc();
+ return checkForOProfileProcEntry();
+}
+
+bool OProfileWrapper::checkForOProfileProcEntry() {
+ DIR* ProcDir;
+
+ ProcDir = opendir("/proc");
+ if (!ProcDir)
+ return false;
+
+ // Walk the /proc tree looking for the oprofile daemon
+ struct dirent* Entry;
+ while (0 != (Entry = readdir(ProcDir))) {
+ if (Entry->d_type == DT_DIR) {
+ // Build a path from the current entry name
+ SmallString<256> CmdLineFName;
+ raw_svector_ostream(CmdLineFName) << "/proc/" << Entry->d_name
+ << "/cmdline";
+
+ // Open the cmdline file
+ int CmdLineFD = open(CmdLineFName.c_str(), S_IRUSR);
+ if (CmdLineFD != -1) {
+ char ExeName[PATH_MAX+1];
+ char* BaseName = 0;
+
+ // Read the cmdline file
+ ssize_t NumRead = read(CmdLineFD, ExeName, PATH_MAX+1);
+ close(CmdLineFD);
+ ssize_t Idx = 0;
+
+ if (ExeName[0] != '/') {
+ BaseName = ExeName;
+ }
+
+ // Find the terminator for the first string
+ while (Idx < NumRead-1 && ExeName[Idx] != 0) {
+ Idx++;
+ }
+
+ // Go back to the last non-null character
+ Idx--;
+
+ // Find the last path separator in the first string
+ while (Idx > 0) {
+ if (ExeName[Idx] == '/') {
+ BaseName = ExeName + Idx + 1;
+ break;
+ }
+ Idx--;
+ }
+
+ // Test this to see if it is the oprofile daemon
+ if (BaseName != 0 && (!strcmp("oprofiled", BaseName) ||
+ !strcmp("operf", BaseName))) {
+ // If it is, we're done
+ closedir(ProcDir);
+ return true;
+ }
+ }
+ }
+ }
+
+ // We've looked through all the files and didn't find the daemon
+ closedir(ProcDir);
+ return false;
+}
+
+bool OProfileWrapper::op_open_agent() {
+ if (!Initialized)
+ initialize();
+
+ if (OpenAgentFunc != 0) {
+ Agent = OpenAgentFunc();
+ return Agent != 0;
+ }
+
+ return false;
+}
+
+int OProfileWrapper::op_close_agent() {
+ if (!Initialized)
+ initialize();
+
+ int ret = -1;
+ if (Agent && CloseAgentFunc) {
+ ret = CloseAgentFunc(Agent);
+ if (ret == 0) {
+ Agent = 0;
+ }
+ }
+ return ret;
+}
+
+bool OProfileWrapper::isAgentAvailable() {
+ return Agent != 0;
+}
+
+int OProfileWrapper::op_write_native_code(const char* Name,
+ uint64_t Addr,
+ void const* Code,
+ const unsigned int Size) {
+ if (!Initialized)
+ initialize();
+
+ if (Agent && WriteNativeCodeFunc)
+ return WriteNativeCodeFunc(Agent, Name, Addr, Code, Size);
+
+ return -1;
+}
+
+int OProfileWrapper::op_write_debug_line_info(
+ void const* Code,
+ size_t NumEntries,
+ struct debug_line_info const* Info) {
+ if (!Initialized)
+ initialize();
+
+ if (Agent && WriteDebugLineInfoFunc)
+ return WriteDebugLineInfoFunc(Agent, Code, NumEntries, Info);
+
+ return -1;
+}
+
+int OProfileWrapper::op_major_version() {
+ if (!Initialized)
+ initialize();
+
+ if (Agent && MajorVersionFunc)
+ return MajorVersionFunc();
+
+ return -1;
+}
+
+int OProfileWrapper::op_minor_version() {
+ if (!Initialized)
+ initialize();
+
+ if (Agent && MinorVersionFunc)
+ return MinorVersionFunc();
+
+ return -1;
+}
+
+int OProfileWrapper::op_unload_native_code(uint64_t Addr) {
+ if (!Initialized)
+ initialize();
+
+ if (Agent && UnloadNativeCodeFunc)
+ return UnloadNativeCodeFunc(Agent, Addr);
+
+ return -1;
+}
+
+} // namespace llvm
diff --git a/llvm/lib/ExecutionEngine/Orc/CompileOnDemandLayer.cpp b/llvm/lib/ExecutionEngine/Orc/CompileOnDemandLayer.cpp
new file mode 100644
index 0000000000000..75ddbc30445d2
--- /dev/null
+++ b/llvm/lib/ExecutionEngine/Orc/CompileOnDemandLayer.cpp
@@ -0,0 +1,319 @@
+//===----- CompileOnDemandLayer.cpp - Lazily emit IR on first call --------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/CompileOnDemandLayer.h"
+#include "llvm/IR/Mangler.h"
+#include "llvm/IR/Module.h"
+
+using namespace llvm;
+using namespace llvm::orc;
+
+static ThreadSafeModule extractSubModule(ThreadSafeModule &TSM,
+ StringRef Suffix,
+ GVPredicate ShouldExtract) {
+
+ auto DeleteExtractedDefs = [](GlobalValue &GV) {
+ // Bump the linkage: this global will be provided by the external module.
+ GV.setLinkage(GlobalValue::ExternalLinkage);
+
+ // Delete the definition in the source module.
+ if (isa<Function>(GV)) {
+ auto &F = cast<Function>(GV);
+ F.deleteBody();
+ F.setPersonalityFn(nullptr);
+ } else if (isa<GlobalVariable>(GV)) {
+ cast<GlobalVariable>(GV).setInitializer(nullptr);
+ } else if (isa<GlobalAlias>(GV)) {
+ // We need to turn deleted aliases into function or variable decls based
+ // on the type of their aliasee.
+ auto &A = cast<GlobalAlias>(GV);
+ Constant *Aliasee = A.getAliasee();
+ assert(A.hasName() && "Anonymous alias?");
+ assert(Aliasee->hasName() && "Anonymous aliasee");
+ std::string AliasName = A.getName();
+
+ if (isa<Function>(Aliasee)) {
+ auto *F = cloneFunctionDecl(*A.getParent(), *cast<Function>(Aliasee));
+ A.replaceAllUsesWith(F);
+ A.eraseFromParent();
+ F->setName(AliasName);
+ } else if (isa<GlobalVariable>(Aliasee)) {
+ auto *G = cloneGlobalVariableDecl(*A.getParent(),
+ *cast<GlobalVariable>(Aliasee));
+ A.replaceAllUsesWith(G);
+ A.eraseFromParent();
+ G->setName(AliasName);
+ } else
+ llvm_unreachable("Alias to unsupported type");
+ } else
+ llvm_unreachable("Unsupported global type");
+ };
+
+ auto NewTSM = cloneToNewContext(TSM, ShouldExtract, DeleteExtractedDefs);
+ NewTSM.withModuleDo([&](Module &M) {
+ M.setModuleIdentifier((M.getModuleIdentifier() + Suffix).str());
+ });
+
+ return NewTSM;
+}
+
+namespace llvm {
+namespace orc {
+
+class PartitioningIRMaterializationUnit : public IRMaterializationUnit {
+public:
+ PartitioningIRMaterializationUnit(ExecutionSession &ES, ThreadSafeModule TSM,
+ VModuleKey K, CompileOnDemandLayer &Parent)
+ : IRMaterializationUnit(ES, std::move(TSM), std::move(K)),
+ Parent(Parent) {}
+
+ PartitioningIRMaterializationUnit(
+ ThreadSafeModule TSM, SymbolFlagsMap SymbolFlags,
+ SymbolNameToDefinitionMap SymbolToDefinition,
+ CompileOnDemandLayer &Parent)
+ : IRMaterializationUnit(std::move(TSM), std::move(K),
+ std::move(SymbolFlags),
+ std::move(SymbolToDefinition)),
+ Parent(Parent) {}
+
+private:
+ void materialize(MaterializationResponsibility R) override {
+ Parent.emitPartition(std::move(R), std::move(TSM),
+ std::move(SymbolToDefinition));
+ }
+
+ void discard(const JITDylib &V, const SymbolStringPtr &Name) override {
+ // All original symbols were materialized by the CODLayer and should be
+ // final. The function bodies provided by M should never be overridden.
+ llvm_unreachable("Discard should never be called on an "
+ "ExtractingIRMaterializationUnit");
+ }
+
+ mutable std::mutex SourceModuleMutex;
+ CompileOnDemandLayer &Parent;
+};
+
+Optional<CompileOnDemandLayer::GlobalValueSet>
+CompileOnDemandLayer::compileRequested(GlobalValueSet Requested) {
+ return std::move(Requested);
+}
+
+Optional<CompileOnDemandLayer::GlobalValueSet>
+CompileOnDemandLayer::compileWholeModule(GlobalValueSet Requested) {
+ return None;
+}
+
+CompileOnDemandLayer::CompileOnDemandLayer(
+ ExecutionSession &ES, IRLayer &BaseLayer, LazyCallThroughManager &LCTMgr,
+ IndirectStubsManagerBuilder BuildIndirectStubsManager)
+ : IRLayer(ES), BaseLayer(BaseLayer), LCTMgr(LCTMgr),
+ BuildIndirectStubsManager(std::move(BuildIndirectStubsManager)) {}
+
+void CompileOnDemandLayer::setPartitionFunction(PartitionFunction Partition) {
+ this->Partition = std::move(Partition);
+}
+
+void CompileOnDemandLayer::setImplMap(ImplSymbolMap *Imp) {
+ this->AliaseeImpls = Imp;
+}
+void CompileOnDemandLayer::emit(MaterializationResponsibility R,
+ ThreadSafeModule TSM) {
+ assert(TSM && "Null module");
+
+ auto &ES = getExecutionSession();
+
+ // Sort the callables and non-callables, build re-exports and lodge the
+ // actual module with the implementation dylib.
+ auto &PDR = getPerDylibResources(R.getTargetJITDylib());
+
+ SymbolAliasMap NonCallables;
+ SymbolAliasMap Callables;
+ TSM.withModuleDo([&](Module &M) {
+ // First, do some cleanup on the module:
+ cleanUpModule(M);
+
+ MangleAndInterner Mangle(ES, M.getDataLayout());
+ for (auto &GV : M.global_values()) {
+ if (GV.isDeclaration() || GV.hasLocalLinkage() ||
+ GV.hasAppendingLinkage())
+ continue;
+
+ auto Name = Mangle(GV.getName());
+ auto Flags = JITSymbolFlags::fromGlobalValue(GV);
+ if (Flags.isCallable())
+ Callables[Name] = SymbolAliasMapEntry(Name, Flags);
+ else
+ NonCallables[Name] = SymbolAliasMapEntry(Name, Flags);
+ }
+ });
+
+ // Create a partitioning materialization unit and lodge it with the
+ // implementation dylib.
+ if (auto Err = PDR.getImplDylib().define(
+ std::make_unique<PartitioningIRMaterializationUnit>(
+ ES, std::move(TSM), R.getVModuleKey(), *this))) {
+ ES.reportError(std::move(Err));
+ R.failMaterialization();
+ return;
+ }
+
+ R.replace(reexports(PDR.getImplDylib(), std::move(NonCallables), true));
+ R.replace(lazyReexports(LCTMgr, PDR.getISManager(), PDR.getImplDylib(),
+ std::move(Callables), AliaseeImpls));
+}
+
+CompileOnDemandLayer::PerDylibResources &
+CompileOnDemandLayer::getPerDylibResources(JITDylib &TargetD) {
+ auto I = DylibResources.find(&TargetD);
+ if (I == DylibResources.end()) {
+ auto &ImplD = getExecutionSession().createJITDylib(
+ TargetD.getName() + ".impl", false);
+ TargetD.withSearchOrderDo([&](const JITDylibSearchList &TargetSearchOrder) {
+ auto NewSearchOrder = TargetSearchOrder;
+ assert(!NewSearchOrder.empty() &&
+ NewSearchOrder.front().first == &TargetD &&
+ NewSearchOrder.front().second == true &&
+ "TargetD must be at the front of its own search order and match "
+ "non-exported symbol");
+ NewSearchOrder.insert(std::next(NewSearchOrder.begin()), {&ImplD, true});
+ ImplD.setSearchOrder(std::move(NewSearchOrder), false);
+ });
+ PerDylibResources PDR(ImplD, BuildIndirectStubsManager());
+ I = DylibResources.insert(std::make_pair(&TargetD, std::move(PDR))).first;
+ }
+
+ return I->second;
+}
+
+void CompileOnDemandLayer::cleanUpModule(Module &M) {
+ for (auto &F : M.functions()) {
+ if (F.isDeclaration())
+ continue;
+
+ if (F.hasAvailableExternallyLinkage()) {
+ F.deleteBody();
+ F.setPersonalityFn(nullptr);
+ continue;
+ }
+ }
+}
+
+void CompileOnDemandLayer::expandPartition(GlobalValueSet &Partition) {
+ // Expands the partition to ensure the following rules hold:
+ // (1) If any alias is in the partition, its aliasee is also in the partition.
+ // (2) If any aliasee is in the partition, its aliases are also in the
+ // partiton.
+ // (3) If any global variable is in the partition then all global variables
+ // are in the partition.
+ assert(!Partition.empty() && "Unexpected empty partition");
+
+ const Module &M = *(*Partition.begin())->getParent();
+ bool ContainsGlobalVariables = false;
+ std::vector<const GlobalValue *> GVsToAdd;
+
+ for (auto *GV : Partition)
+ if (isa<GlobalAlias>(GV))
+ GVsToAdd.push_back(
+ cast<GlobalValue>(cast<GlobalAlias>(GV)->getAliasee()));
+ else if (isa<GlobalVariable>(GV))
+ ContainsGlobalVariables = true;
+
+ for (auto &A : M.aliases())
+ if (Partition.count(cast<GlobalValue>(A.getAliasee())))
+ GVsToAdd.push_back(&A);
+
+ if (ContainsGlobalVariables)
+ for (auto &G : M.globals())
+ GVsToAdd.push_back(&G);
+
+ for (auto *GV : GVsToAdd)
+ Partition.insert(GV);
+}
+
+void CompileOnDemandLayer::emitPartition(
+ MaterializationResponsibility R, ThreadSafeModule TSM,
+ IRMaterializationUnit::SymbolNameToDefinitionMap Defs) {
+
+ // FIXME: Need a 'notify lazy-extracting/emitting' callback to tie the
+ // extracted module key, extracted module, and source module key
+ // together. This could be used, for example, to provide a specific
+ // memory manager instance to the linking layer.
+
+ auto &ES = getExecutionSession();
+ GlobalValueSet RequestedGVs;
+ for (auto &Name : R.getRequestedSymbols()) {
+ assert(Defs.count(Name) && "No definition for symbol");
+ RequestedGVs.insert(Defs[Name]);
+ }
+
+ /// Perform partitioning with the context lock held, since the partition
+ /// function is allowed to access the globals to compute the partition.
+ auto GVsToExtract =
+ TSM.withModuleDo([&](Module &M) { return Partition(RequestedGVs); });
+
+ // Take a 'None' partition to mean the whole module (as opposed to an empty
+ // partition, which means "materialize nothing"). Emit the whole module
+ // unmodified to the base layer.
+ if (GVsToExtract == None) {
+ Defs.clear();
+ BaseLayer.emit(std::move(R), std::move(TSM));
+ return;
+ }
+
+ // If the partition is empty, return the whole module to the symbol table.
+ if (GVsToExtract->empty()) {
+ R.replace(std::make_unique<PartitioningIRMaterializationUnit>(
+ std::move(TSM), R.getSymbols(), std::move(Defs), *this));
+ return;
+ }
+
+ // Ok -- we actually need to partition the symbols. Promote the symbol
+ // linkages/names, expand the partition to include any required symbols
+ // (i.e. symbols that can't be separated from our partition), and
+ // then extract the partition.
+ //
+ // FIXME: We apply this promotion once per partitioning. It's safe, but
+ // overkill.
+
+ auto ExtractedTSM =
+ TSM.withModuleDo([&](Module &M) -> Expected<ThreadSafeModule> {
+ auto PromotedGlobals = PromoteSymbols(M);
+ if (!PromotedGlobals.empty()) {
+ MangleAndInterner Mangle(ES, M.getDataLayout());
+ SymbolFlagsMap SymbolFlags;
+ for (auto &GV : PromotedGlobals)
+ SymbolFlags[Mangle(GV->getName())] =
+ JITSymbolFlags::fromGlobalValue(*GV);
+ if (auto Err = R.defineMaterializing(SymbolFlags))
+ return std::move(Err);
+ }
+
+ expandPartition(*GVsToExtract);
+
+ // Extract the requested partiton (plus any necessary aliases) and
+ // put the rest back into the impl dylib.
+ auto ShouldExtract = [&](const GlobalValue &GV) -> bool {
+ return GVsToExtract->count(&GV);
+ };
+
+ return extractSubModule(TSM, ".submodule", ShouldExtract);
+ });
+
+ if (!ExtractedTSM) {
+ ES.reportError(ExtractedTSM.takeError());
+ R.failMaterialization();
+ return;
+ }
+
+ R.replace(std::make_unique<PartitioningIRMaterializationUnit>(
+ ES, std::move(TSM), R.getVModuleKey(), *this));
+ BaseLayer.emit(std::move(R), std::move(*ExtractedTSM));
+}
+
+} // end namespace orc
+} // end namespace llvm
diff --git a/llvm/lib/ExecutionEngine/Orc/CompileUtils.cpp b/llvm/lib/ExecutionEngine/Orc/CompileUtils.cpp
new file mode 100644
index 0000000000000..f8251627a4efd
--- /dev/null
+++ b/llvm/lib/ExecutionEngine/Orc/CompileUtils.cpp
@@ -0,0 +1,86 @@
+//===------ CompileUtils.cpp - Utilities for compiling IR in the JIT ------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/CompileUtils.h"
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ExecutionEngine/ObjectCache.h"
+#include "llvm/IR/LegacyPassManager.h"
+#include "llvm/IR/Module.h"
+#include "llvm/Object/ObjectFile.h"
+#include "llvm/Support/Error.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/SmallVectorMemoryBuffer.h"
+#include "llvm/Target/TargetMachine.h"
+
+#include <algorithm>
+
+namespace llvm {
+namespace orc {
+
+/// Compile a Module to an ObjectFile.
+SimpleCompiler::CompileResult SimpleCompiler::operator()(Module &M) {
+ CompileResult CachedObject = tryToLoadFromObjectCache(M);
+ if (CachedObject)
+ return CachedObject;
+
+ SmallVector<char, 0> ObjBufferSV;
+
+ {
+ raw_svector_ostream ObjStream(ObjBufferSV);
+
+ legacy::PassManager PM;
+ MCContext *Ctx;
+ if (TM.addPassesToEmitMC(PM, Ctx, ObjStream))
+ llvm_unreachable("Target does not support MC emission.");
+ PM.run(M);
+ }
+
+ auto ObjBuffer = std::make_unique<SmallVectorMemoryBuffer>(
+ std::move(ObjBufferSV),
+ "<in memory object compiled from " + M.getModuleIdentifier() + ">");
+
+ auto Obj = object::ObjectFile::createObjectFile(ObjBuffer->getMemBufferRef());
+
+ if (Obj) {
+ notifyObjectCompiled(M, *ObjBuffer);
+ return std::move(ObjBuffer);
+ }
+
+ // TODO: Actually report errors helpfully.
+ consumeError(Obj.takeError());
+ return nullptr;
+}
+
+SimpleCompiler::CompileResult
+SimpleCompiler::tryToLoadFromObjectCache(const Module &M) {
+ if (!ObjCache)
+ return CompileResult();
+
+ return ObjCache->getObject(&M);
+}
+
+void SimpleCompiler::notifyObjectCompiled(const Module &M,
+ const MemoryBuffer &ObjBuffer) {
+ if (ObjCache)
+ ObjCache->notifyObjectCompiled(&M, ObjBuffer.getMemBufferRef());
+}
+
+ConcurrentIRCompiler::ConcurrentIRCompiler(JITTargetMachineBuilder JTMB,
+ ObjectCache *ObjCache)
+ : JTMB(std::move(JTMB)), ObjCache(ObjCache) {}
+
+std::unique_ptr<MemoryBuffer> ConcurrentIRCompiler::operator()(Module &M) {
+ auto TM = cantFail(JTMB.createTargetMachine());
+ SimpleCompiler C(*TM, ObjCache);
+ return C(M);
+}
+
+} // end namespace orc
+} // end namespace llvm
diff --git a/llvm/lib/ExecutionEngine/Orc/Core.cpp b/llvm/lib/ExecutionEngine/Orc/Core.cpp
new file mode 100644
index 0000000000000..5c7d888c2d6e1
--- /dev/null
+++ b/llvm/lib/ExecutionEngine/Orc/Core.cpp
@@ -0,0 +1,2152 @@
+//===--- Core.cpp - Core ORC APIs (MaterializationUnit, JITDylib, etc.) ---===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/Core.h"
+#include "llvm/Config/llvm-config.h"
+#include "llvm/ExecutionEngine/Orc/OrcError.h"
+#include "llvm/IR/Mangler.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/Format.h"
+
+#if LLVM_ENABLE_THREADS
+#include <future>
+#endif
+
+#define DEBUG_TYPE "orc"
+
+using namespace llvm;
+
+namespace {
+
+#ifndef NDEBUG
+
+cl::opt<bool> PrintHidden("debug-orc-print-hidden", cl::init(true),
+ cl::desc("debug print hidden symbols defined by "
+ "materialization units"),
+ cl::Hidden);
+
+cl::opt<bool> PrintCallable("debug-orc-print-callable", cl::init(true),
+ cl::desc("debug print callable symbols defined by "
+ "materialization units"),
+ cl::Hidden);
+
+cl::opt<bool> PrintData("debug-orc-print-data", cl::init(true),
+ cl::desc("debug print data symbols defined by "
+ "materialization units"),
+ cl::Hidden);
+
+#endif // NDEBUG
+
+// SetPrinter predicate that prints every element.
+template <typename T> struct PrintAll {
+ bool operator()(const T &E) { return true; }
+};
+
+bool anyPrintSymbolOptionSet() {
+#ifndef NDEBUG
+ return PrintHidden || PrintCallable || PrintData;
+#else
+ return false;
+#endif // NDEBUG
+}
+
+bool flagsMatchCLOpts(const JITSymbolFlags &Flags) {
+#ifndef NDEBUG
+ // Bail out early if this is a hidden symbol and we're not printing hiddens.
+ if (!PrintHidden && !Flags.isExported())
+ return false;
+
+ // Return true if this is callable and we're printing callables.
+ if (PrintCallable && Flags.isCallable())
+ return true;
+
+ // Return true if this is data and we're printing data.
+ if (PrintData && !Flags.isCallable())
+ return true;
+
+ // otherwise return false.
+ return false;
+#else
+ return false;
+#endif // NDEBUG
+}
+
+// Prints a set of items, filtered by an user-supplied predicate.
+template <typename Set, typename Pred = PrintAll<typename Set::value_type>>
+class SetPrinter {
+public:
+ SetPrinter(const Set &S, Pred ShouldPrint = Pred())
+ : S(S), ShouldPrint(std::move(ShouldPrint)) {}
+
+ void printTo(llvm::raw_ostream &OS) const {
+ bool PrintComma = false;
+ OS << "{";
+ for (auto &E : S) {
+ if (ShouldPrint(E)) {
+ if (PrintComma)
+ OS << ',';
+ OS << ' ' << E;
+ PrintComma = true;
+ }
+ }
+ OS << " }";
+ }
+
+private:
+ const Set &S;
+ mutable Pred ShouldPrint;
+};
+
+template <typename Set, typename Pred>
+SetPrinter<Set, Pred> printSet(const Set &S, Pred P = Pred()) {
+ return SetPrinter<Set, Pred>(S, std::move(P));
+}
+
+// Render a SetPrinter by delegating to its printTo method.
+template <typename Set, typename Pred>
+llvm::raw_ostream &operator<<(llvm::raw_ostream &OS,
+ const SetPrinter<Set, Pred> &Printer) {
+ Printer.printTo(OS);
+ return OS;
+}
+
+struct PrintSymbolFlagsMapElemsMatchingCLOpts {
+ bool operator()(const orc::SymbolFlagsMap::value_type &KV) {
+ return flagsMatchCLOpts(KV.second);
+ }
+};
+
+struct PrintSymbolMapElemsMatchingCLOpts {
+ bool operator()(const orc::SymbolMap::value_type &KV) {
+ return flagsMatchCLOpts(KV.second.getFlags());
+ }
+};
+
+} // end anonymous namespace
+
+namespace llvm {
+namespace orc {
+
+char FailedToMaterialize::ID = 0;
+char SymbolsNotFound::ID = 0;
+char SymbolsCouldNotBeRemoved::ID = 0;
+
+RegisterDependenciesFunction NoDependenciesToRegister =
+ RegisterDependenciesFunction();
+
+void MaterializationUnit::anchor() {}
+
+raw_ostream &operator<<(raw_ostream &OS, const SymbolStringPtr &Sym) {
+ return OS << *Sym;
+}
+
+raw_ostream &operator<<(raw_ostream &OS, const SymbolNameSet &Symbols) {
+ return OS << printSet(Symbols, PrintAll<SymbolStringPtr>());
+}
+
+raw_ostream &operator<<(raw_ostream &OS, const JITSymbolFlags &Flags) {
+ if (Flags.hasError())
+ OS << "[*ERROR*]";
+ if (Flags.isCallable())
+ OS << "[Callable]";
+ else
+ OS << "[Data]";
+ if (Flags.isWeak())
+ OS << "[Weak]";
+ else if (Flags.isCommon())
+ OS << "[Common]";
+
+ if (!Flags.isExported())
+ OS << "[Hidden]";
+
+ return OS;
+}
+
+raw_ostream &operator<<(raw_ostream &OS, const JITEvaluatedSymbol &Sym) {
+ return OS << format("0x%016" PRIx64, Sym.getAddress()) << " "
+ << Sym.getFlags();
+}
+
+raw_ostream &operator<<(raw_ostream &OS, const SymbolFlagsMap::value_type &KV) {
+ return OS << "(\"" << KV.first << "\", " << KV.second << ")";
+}
+
+raw_ostream &operator<<(raw_ostream &OS, const SymbolMap::value_type &KV) {
+ return OS << "(\"" << KV.first << "\": " << KV.second << ")";
+}
+
+raw_ostream &operator<<(raw_ostream &OS, const SymbolFlagsMap &SymbolFlags) {
+ return OS << printSet(SymbolFlags, PrintSymbolFlagsMapElemsMatchingCLOpts());
+}
+
+raw_ostream &operator<<(raw_ostream &OS, const SymbolMap &Symbols) {
+ return OS << printSet(Symbols, PrintSymbolMapElemsMatchingCLOpts());
+}
+
+raw_ostream &operator<<(raw_ostream &OS,
+ const SymbolDependenceMap::value_type &KV) {
+ return OS << "(" << KV.first << ", " << KV.second << ")";
+}
+
+raw_ostream &operator<<(raw_ostream &OS, const SymbolDependenceMap &Deps) {
+ return OS << printSet(Deps, PrintAll<SymbolDependenceMap::value_type>());
+}
+
+raw_ostream &operator<<(raw_ostream &OS, const MaterializationUnit &MU) {
+ OS << "MU@" << &MU << " (\"" << MU.getName() << "\"";
+ if (anyPrintSymbolOptionSet())
+ OS << ", " << MU.getSymbols();
+ return OS << ")";
+}
+
+raw_ostream &operator<<(raw_ostream &OS, const JITDylibSearchList &JDs) {
+ OS << "[";
+ if (!JDs.empty()) {
+ assert(JDs.front().first && "JITDylibList entries must not be null");
+ OS << " (\"" << JDs.front().first->getName() << "\", "
+ << (JDs.front().second ? "true" : "false") << ")";
+ for (auto &KV : make_range(std::next(JDs.begin()), JDs.end())) {
+ assert(KV.first && "JITDylibList entries must not be null");
+ OS << ", (\"" << KV.first->getName() << "\", "
+ << (KV.second ? "true" : "false") << ")";
+ }
+ }
+ OS << " ]";
+ return OS;
+}
+
+raw_ostream &operator<<(raw_ostream &OS, const SymbolAliasMap &Aliases) {
+ OS << "{";
+ for (auto &KV : Aliases)
+ OS << " " << *KV.first << ": " << KV.second.Aliasee << " "
+ << KV.second.AliasFlags;
+ OS << " }";
+ return OS;
+}
+
+raw_ostream &operator<<(raw_ostream &OS, const SymbolState &S) {
+ switch (S) {
+ case SymbolState::Invalid:
+ return OS << "Invalid";
+ case SymbolState::NeverSearched:
+ return OS << "Never-Searched";
+ case SymbolState::Materializing:
+ return OS << "Materializing";
+ case SymbolState::Resolved:
+ return OS << "Resolved";
+ case SymbolState::Emitted:
+ return OS << "Emitted";
+ case SymbolState::Ready:
+ return OS << "Ready";
+ }
+ llvm_unreachable("Invalid state");
+}
+
+FailedToMaterialize::FailedToMaterialize(
+ std::shared_ptr<SymbolDependenceMap> Symbols)
+ : Symbols(std::move(Symbols)) {
+ assert(!this->Symbols->empty() && "Can not fail to resolve an empty set");
+}
+
+std::error_code FailedToMaterialize::convertToErrorCode() const {
+ return orcError(OrcErrorCode::UnknownORCError);
+}
+
+void FailedToMaterialize::log(raw_ostream &OS) const {
+ OS << "Failed to materialize symbols: " << *Symbols;
+}
+
+SymbolsNotFound::SymbolsNotFound(SymbolNameSet Symbols)
+ : Symbols(std::move(Symbols)) {
+ assert(!this->Symbols.empty() && "Can not fail to resolve an empty set");
+}
+
+std::error_code SymbolsNotFound::convertToErrorCode() const {
+ return orcError(OrcErrorCode::UnknownORCError);
+}
+
+void SymbolsNotFound::log(raw_ostream &OS) const {
+ OS << "Symbols not found: " << Symbols;
+}
+
+SymbolsCouldNotBeRemoved::SymbolsCouldNotBeRemoved(SymbolNameSet Symbols)
+ : Symbols(std::move(Symbols)) {
+ assert(!this->Symbols.empty() && "Can not fail to resolve an empty set");
+}
+
+std::error_code SymbolsCouldNotBeRemoved::convertToErrorCode() const {
+ return orcError(OrcErrorCode::UnknownORCError);
+}
+
+void SymbolsCouldNotBeRemoved::log(raw_ostream &OS) const {
+ OS << "Symbols could not be removed: " << Symbols;
+}
+
+AsynchronousSymbolQuery::AsynchronousSymbolQuery(
+ const SymbolNameSet &Symbols, SymbolState RequiredState,
+ SymbolsResolvedCallback NotifyComplete)
+ : NotifyComplete(std::move(NotifyComplete)), RequiredState(RequiredState) {
+ assert(RequiredState >= SymbolState::Resolved &&
+ "Cannot query for a symbols that have not reached the resolve state "
+ "yet");
+
+ OutstandingSymbolsCount = Symbols.size();
+
+ for (auto &S : Symbols)
+ ResolvedSymbols[S] = nullptr;
+}
+
+void AsynchronousSymbolQuery::notifySymbolMetRequiredState(
+ const SymbolStringPtr &Name, JITEvaluatedSymbol Sym) {
+ auto I = ResolvedSymbols.find(Name);
+ assert(I != ResolvedSymbols.end() &&
+ "Resolving symbol outside the requested set");
+ assert(I->second.getAddress() == 0 && "Redundantly resolving symbol Name");
+ I->second = std::move(Sym);
+ --OutstandingSymbolsCount;
+}
+
+void AsynchronousSymbolQuery::handleComplete() {
+ assert(OutstandingSymbolsCount == 0 &&
+ "Symbols remain, handleComplete called prematurely");
+
+ auto TmpNotifyComplete = std::move(NotifyComplete);
+ NotifyComplete = SymbolsResolvedCallback();
+ TmpNotifyComplete(std::move(ResolvedSymbols));
+}
+
+bool AsynchronousSymbolQuery::canStillFail() { return !!NotifyComplete; }
+
+void AsynchronousSymbolQuery::handleFailed(Error Err) {
+ assert(QueryRegistrations.empty() && ResolvedSymbols.empty() &&
+ OutstandingSymbolsCount == 0 &&
+ "Query should already have been abandoned");
+ NotifyComplete(std::move(Err));
+ NotifyComplete = SymbolsResolvedCallback();
+}
+
+void AsynchronousSymbolQuery::addQueryDependence(JITDylib &JD,
+ SymbolStringPtr Name) {
+ bool Added = QueryRegistrations[&JD].insert(std::move(Name)).second;
+ (void)Added;
+ assert(Added && "Duplicate dependence notification?");
+}
+
+void AsynchronousSymbolQuery::removeQueryDependence(
+ JITDylib &JD, const SymbolStringPtr &Name) {
+ auto QRI = QueryRegistrations.find(&JD);
+ assert(QRI != QueryRegistrations.end() &&
+ "No dependencies registered for JD");
+ assert(QRI->second.count(Name) && "No dependency on Name in JD");
+ QRI->second.erase(Name);
+ if (QRI->second.empty())
+ QueryRegistrations.erase(QRI);
+}
+
+void AsynchronousSymbolQuery::detach() {
+ ResolvedSymbols.clear();
+ OutstandingSymbolsCount = 0;
+ for (auto &KV : QueryRegistrations)
+ KV.first->detachQueryHelper(*this, KV.second);
+ QueryRegistrations.clear();
+}
+
+MaterializationResponsibility::MaterializationResponsibility(
+ JITDylib &JD, SymbolFlagsMap SymbolFlags, VModuleKey K)
+ : JD(JD), SymbolFlags(std::move(SymbolFlags)), K(std::move(K)) {
+ assert(!this->SymbolFlags.empty() && "Materializing nothing?");
+}
+
+MaterializationResponsibility::~MaterializationResponsibility() {
+ assert(SymbolFlags.empty() &&
+ "All symbols should have been explicitly materialized or failed");
+}
+
+SymbolNameSet MaterializationResponsibility::getRequestedSymbols() const {
+ return JD.getRequestedSymbols(SymbolFlags);
+}
+
+Error MaterializationResponsibility::notifyResolved(const SymbolMap &Symbols) {
+ LLVM_DEBUG({
+ dbgs() << "In " << JD.getName() << " resolving " << Symbols << "\n";
+ });
+#ifndef NDEBUG
+ for (auto &KV : Symbols) {
+ auto WeakFlags = JITSymbolFlags::Weak | JITSymbolFlags::Common;
+ auto I = SymbolFlags.find(KV.first);
+ assert(I != SymbolFlags.end() &&
+ "Resolving symbol outside this responsibility set");
+ assert((KV.second.getFlags() & ~WeakFlags) == (I->second & ~WeakFlags) &&
+ "Resolving symbol with incorrect flags");
+ }
+#endif
+
+ return JD.resolve(Symbols);
+}
+
+Error MaterializationResponsibility::notifyEmitted() {
+
+ LLVM_DEBUG({
+ dbgs() << "In " << JD.getName() << " emitting " << SymbolFlags << "\n";
+ });
+
+ if (auto Err = JD.emit(SymbolFlags))
+ return Err;
+
+ SymbolFlags.clear();
+ return Error::success();
+}
+
+Error MaterializationResponsibility::defineMaterializing(
+ const SymbolFlagsMap &NewSymbolFlags) {
+ // Add the given symbols to this responsibility object.
+ // It's ok if we hit a duplicate here: In that case the new version will be
+ // discarded, and the JITDylib::defineMaterializing method will return a
+ // duplicate symbol error.
+ for (auto &KV : NewSymbolFlags)
+ SymbolFlags.insert(KV);
+
+ return JD.defineMaterializing(NewSymbolFlags);
+}
+
+void MaterializationResponsibility::failMaterialization() {
+
+ LLVM_DEBUG({
+ dbgs() << "In " << JD.getName() << " failing materialization for "
+ << SymbolFlags << "\n";
+ });
+
+ JITDylib::FailedSymbolsWorklist Worklist;
+
+ for (auto &KV : SymbolFlags)
+ Worklist.push_back(std::make_pair(&JD, KV.first));
+ SymbolFlags.clear();
+
+ JD.notifyFailed(std::move(Worklist));
+}
+
+void MaterializationResponsibility::replace(
+ std::unique_ptr<MaterializationUnit> MU) {
+ for (auto &KV : MU->getSymbols())
+ SymbolFlags.erase(KV.first);
+
+ LLVM_DEBUG(JD.getExecutionSession().runSessionLocked([&]() {
+ dbgs() << "In " << JD.getName() << " replacing symbols with " << *MU
+ << "\n";
+ }););
+
+ JD.replace(std::move(MU));
+}
+
+MaterializationResponsibility
+MaterializationResponsibility::delegate(const SymbolNameSet &Symbols,
+ VModuleKey NewKey) {
+
+ if (NewKey == VModuleKey())
+ NewKey = K;
+
+ SymbolFlagsMap DelegatedFlags;
+
+ for (auto &Name : Symbols) {
+ auto I = SymbolFlags.find(Name);
+ assert(I != SymbolFlags.end() &&
+ "Symbol is not tracked by this MaterializationResponsibility "
+ "instance");
+
+ DelegatedFlags[Name] = std::move(I->second);
+ SymbolFlags.erase(I);
+ }
+
+ return MaterializationResponsibility(JD, std::move(DelegatedFlags),
+ std::move(NewKey));
+}
+
+void MaterializationResponsibility::addDependencies(
+ const SymbolStringPtr &Name, const SymbolDependenceMap &Dependencies) {
+ assert(SymbolFlags.count(Name) &&
+ "Symbol not covered by this MaterializationResponsibility instance");
+ JD.addDependencies(Name, Dependencies);
+}
+
+void MaterializationResponsibility::addDependenciesForAll(
+ const SymbolDependenceMap &Dependencies) {
+ for (auto &KV : SymbolFlags)
+ JD.addDependencies(KV.first, Dependencies);
+}
+
+AbsoluteSymbolsMaterializationUnit::AbsoluteSymbolsMaterializationUnit(
+ SymbolMap Symbols, VModuleKey K)
+ : MaterializationUnit(extractFlags(Symbols), std::move(K)),
+ Symbols(std::move(Symbols)) {}
+
+StringRef AbsoluteSymbolsMaterializationUnit::getName() const {
+ return "<Absolute Symbols>";
+}
+
+void AbsoluteSymbolsMaterializationUnit::materialize(
+ MaterializationResponsibility R) {
+ // No dependencies, so these calls can't fail.
+ cantFail(R.notifyResolved(Symbols));
+ cantFail(R.notifyEmitted());
+}
+
+void AbsoluteSymbolsMaterializationUnit::discard(const JITDylib &JD,
+ const SymbolStringPtr &Name) {
+ assert(Symbols.count(Name) && "Symbol is not part of this MU");
+ Symbols.erase(Name);
+}
+
+SymbolFlagsMap
+AbsoluteSymbolsMaterializationUnit::extractFlags(const SymbolMap &Symbols) {
+ SymbolFlagsMap Flags;
+ for (const auto &KV : Symbols)
+ Flags[KV.first] = KV.second.getFlags();
+ return Flags;
+}
+
+ReExportsMaterializationUnit::ReExportsMaterializationUnit(
+ JITDylib *SourceJD, bool MatchNonExported, SymbolAliasMap Aliases,
+ VModuleKey K)
+ : MaterializationUnit(extractFlags(Aliases), std::move(K)),
+ SourceJD(SourceJD), MatchNonExported(MatchNonExported),
+ Aliases(std::move(Aliases)) {}
+
+StringRef ReExportsMaterializationUnit::getName() const {
+ return "<Reexports>";
+}
+
+void ReExportsMaterializationUnit::materialize(
+ MaterializationResponsibility R) {
+
+ auto &ES = R.getTargetJITDylib().getExecutionSession();
+ JITDylib &TgtJD = R.getTargetJITDylib();
+ JITDylib &SrcJD = SourceJD ? *SourceJD : TgtJD;
+
+ // Find the set of requested aliases and aliasees. Return any unrequested
+ // aliases back to the JITDylib so as to not prematurely materialize any
+ // aliasees.
+ auto RequestedSymbols = R.getRequestedSymbols();
+ SymbolAliasMap RequestedAliases;
+
+ for (auto &Name : RequestedSymbols) {
+ auto I = Aliases.find(Name);
+ assert(I != Aliases.end() && "Symbol not found in aliases map?");
+ RequestedAliases[Name] = std::move(I->second);
+ Aliases.erase(I);
+ }
+
+ LLVM_DEBUG({
+ ES.runSessionLocked([&]() {
+ dbgs() << "materializing reexports: target = " << TgtJD.getName()
+ << ", source = " << SrcJD.getName() << " " << RequestedAliases
+ << "\n";
+ });
+ });
+
+ if (!Aliases.empty()) {
+ if (SourceJD)
+ R.replace(reexports(*SourceJD, std::move(Aliases), MatchNonExported));
+ else
+ R.replace(symbolAliases(std::move(Aliases)));
+ }
+
+ // The OnResolveInfo struct will hold the aliases and responsibilty for each
+ // query in the list.
+ struct OnResolveInfo {
+ OnResolveInfo(MaterializationResponsibility R, SymbolAliasMap Aliases)
+ : R(std::move(R)), Aliases(std::move(Aliases)) {}
+
+ MaterializationResponsibility R;
+ SymbolAliasMap Aliases;
+ };
+
+ // Build a list of queries to issue. In each round we build the largest set of
+ // aliases that we can resolve without encountering a chain definition of the
+ // form Foo -> Bar, Bar -> Baz. Such a form would deadlock as the query would
+ // be waitin on a symbol that it itself had to resolve. Usually this will just
+ // involve one round and a single query.
+
+ std::vector<std::pair<SymbolNameSet, std::shared_ptr<OnResolveInfo>>>
+ QueryInfos;
+ while (!RequestedAliases.empty()) {
+ SymbolNameSet ResponsibilitySymbols;
+ SymbolNameSet QuerySymbols;
+ SymbolAliasMap QueryAliases;
+
+ // Collect as many aliases as we can without including a chain.
+ for (auto &KV : RequestedAliases) {
+ // Chain detected. Skip this symbol for this round.
+ if (&SrcJD == &TgtJD && (QueryAliases.count(KV.second.Aliasee) ||
+ RequestedAliases.count(KV.second.Aliasee)))
+ continue;
+
+ ResponsibilitySymbols.insert(KV.first);
+ QuerySymbols.insert(KV.second.Aliasee);
+ QueryAliases[KV.first] = std::move(KV.second);
+ }
+
+ // Remove the aliases collected this round from the RequestedAliases map.
+ for (auto &KV : QueryAliases)
+ RequestedAliases.erase(KV.first);
+
+ assert(!QuerySymbols.empty() && "Alias cycle detected!");
+
+ auto QueryInfo = std::make_shared<OnResolveInfo>(
+ R.delegate(ResponsibilitySymbols), std::move(QueryAliases));
+ QueryInfos.push_back(
+ make_pair(std::move(QuerySymbols), std::move(QueryInfo)));
+ }
+
+ // Issue the queries.
+ while (!QueryInfos.empty()) {
+ auto QuerySymbols = std::move(QueryInfos.back().first);
+ auto QueryInfo = std::move(QueryInfos.back().second);
+
+ QueryInfos.pop_back();
+
+ auto RegisterDependencies = [QueryInfo,
+ &SrcJD](const SymbolDependenceMap &Deps) {
+ // If there were no materializing symbols, just bail out.
+ if (Deps.empty())
+ return;
+
+ // Otherwise the only deps should be on SrcJD.
+ assert(Deps.size() == 1 && Deps.count(&SrcJD) &&
+ "Unexpected dependencies for reexports");
+
+ auto &SrcJDDeps = Deps.find(&SrcJD)->second;
+ SymbolDependenceMap PerAliasDepsMap;
+ auto &PerAliasDeps = PerAliasDepsMap[&SrcJD];
+
+ for (auto &KV : QueryInfo->Aliases)
+ if (SrcJDDeps.count(KV.second.Aliasee)) {
+ PerAliasDeps = {KV.second.Aliasee};
+ QueryInfo->R.addDependencies(KV.first, PerAliasDepsMap);
+ }
+ };
+
+ auto OnComplete = [QueryInfo](Expected<SymbolMap> Result) {
+ auto &ES = QueryInfo->R.getTargetJITDylib().getExecutionSession();
+ if (Result) {
+ SymbolMap ResolutionMap;
+ for (auto &KV : QueryInfo->Aliases) {
+ assert(Result->count(KV.second.Aliasee) &&
+ "Result map missing entry?");
+ ResolutionMap[KV.first] = JITEvaluatedSymbol(
+ (*Result)[KV.second.Aliasee].getAddress(), KV.second.AliasFlags);
+ }
+ if (auto Err = QueryInfo->R.notifyResolved(ResolutionMap)) {
+ ES.reportError(std::move(Err));
+ QueryInfo->R.failMaterialization();
+ return;
+ }
+ if (auto Err = QueryInfo->R.notifyEmitted()) {
+ ES.reportError(std::move(Err));
+ QueryInfo->R.failMaterialization();
+ return;
+ }
+ } else {
+ ES.reportError(Result.takeError());
+ QueryInfo->R.failMaterialization();
+ }
+ };
+
+ ES.lookup(JITDylibSearchList({{&SrcJD, MatchNonExported}}), QuerySymbols,
+ SymbolState::Resolved, std::move(OnComplete),
+ std::move(RegisterDependencies));
+ }
+}
+
+void ReExportsMaterializationUnit::discard(const JITDylib &JD,
+ const SymbolStringPtr &Name) {
+ assert(Aliases.count(Name) &&
+ "Symbol not covered by this MaterializationUnit");
+ Aliases.erase(Name);
+}
+
+SymbolFlagsMap
+ReExportsMaterializationUnit::extractFlags(const SymbolAliasMap &Aliases) {
+ SymbolFlagsMap SymbolFlags;
+ for (auto &KV : Aliases)
+ SymbolFlags[KV.first] = KV.second.AliasFlags;
+
+ return SymbolFlags;
+}
+
+Expected<SymbolAliasMap>
+buildSimpleReexportsAliasMap(JITDylib &SourceJD, const SymbolNameSet &Symbols) {
+ auto Flags = SourceJD.lookupFlags(Symbols);
+
+ if (!Flags)
+ return Flags.takeError();
+
+ if (Flags->size() != Symbols.size()) {
+ SymbolNameSet Unresolved = Symbols;
+ for (auto &KV : *Flags)
+ Unresolved.erase(KV.first);
+ return make_error<SymbolsNotFound>(std::move(Unresolved));
+ }
+
+ SymbolAliasMap Result;
+ for (auto &Name : Symbols) {
+ assert(Flags->count(Name) && "Missing entry in flags map");
+ Result[Name] = SymbolAliasMapEntry(Name, (*Flags)[Name]);
+ }
+
+ return Result;
+}
+
+ReexportsGenerator::ReexportsGenerator(JITDylib &SourceJD,
+ bool MatchNonExported,
+ SymbolPredicate Allow)
+ : SourceJD(SourceJD), MatchNonExported(MatchNonExported),
+ Allow(std::move(Allow)) {}
+
+Expected<SymbolNameSet>
+ReexportsGenerator::tryToGenerate(JITDylib &JD, const SymbolNameSet &Names) {
+ orc::SymbolNameSet Added;
+ orc::SymbolAliasMap AliasMap;
+
+ auto Flags = SourceJD.lookupFlags(Names);
+
+ if (!Flags)
+ return Flags.takeError();
+
+ for (auto &KV : *Flags) {
+ if (Allow && !Allow(KV.first))
+ continue;
+ AliasMap[KV.first] = SymbolAliasMapEntry(KV.first, KV.second);
+ Added.insert(KV.first);
+ }
+
+ if (!Added.empty())
+ cantFail(JD.define(reexports(SourceJD, AliasMap, MatchNonExported)));
+
+ return Added;
+}
+
+JITDylib::DefinitionGenerator::~DefinitionGenerator() {}
+
+void JITDylib::removeGenerator(DefinitionGenerator &G) {
+ ES.runSessionLocked([&]() {
+ auto I = std::find_if(DefGenerators.begin(), DefGenerators.end(),
+ [&](const std::unique_ptr<DefinitionGenerator> &H) {
+ return H.get() == &G;
+ });
+ assert(I != DefGenerators.end() && "Generator not found");
+ DefGenerators.erase(I);
+ });
+}
+
+Error JITDylib::defineMaterializing(const SymbolFlagsMap &SymbolFlags) {
+ return ES.runSessionLocked([&]() -> Error {
+ std::vector<SymbolTable::iterator> AddedSyms;
+
+ for (auto &KV : SymbolFlags) {
+ SymbolTable::iterator EntryItr;
+ bool Added;
+
+ std::tie(EntryItr, Added) =
+ Symbols.insert(std::make_pair(KV.first, SymbolTableEntry(KV.second)));
+
+ if (Added) {
+ AddedSyms.push_back(EntryItr);
+ EntryItr->second.setState(SymbolState::Materializing);
+ } else {
+ // Remove any symbols already added.
+ for (auto &SI : AddedSyms)
+ Symbols.erase(SI);
+
+ // FIXME: Return all duplicates.
+ return make_error<DuplicateDefinition>(*KV.first);
+ }
+ }
+
+ return Error::success();
+ });
+}
+
+void JITDylib::replace(std::unique_ptr<MaterializationUnit> MU) {
+ assert(MU != nullptr && "Can not replace with a null MaterializationUnit");
+
+ auto MustRunMU =
+ ES.runSessionLocked([&, this]() -> std::unique_ptr<MaterializationUnit> {
+
+#ifndef NDEBUG
+ for (auto &KV : MU->getSymbols()) {
+ auto SymI = Symbols.find(KV.first);
+ assert(SymI != Symbols.end() && "Replacing unknown symbol");
+ assert(SymI->second.isInMaterializationPhase() &&
+ "Can not call replace on a symbol that is not materializing");
+ assert(!SymI->second.hasMaterializerAttached() &&
+ "Symbol should not have materializer attached already");
+ assert(UnmaterializedInfos.count(KV.first) == 0 &&
+ "Symbol being replaced should have no UnmaterializedInfo");
+ }
+#endif // NDEBUG
+
+ // If any symbol has pending queries against it then we need to
+ // materialize MU immediately.
+ for (auto &KV : MU->getSymbols()) {
+ auto MII = MaterializingInfos.find(KV.first);
+ if (MII != MaterializingInfos.end()) {
+ if (MII->second.hasQueriesPending())
+ return std::move(MU);
+ }
+ }
+
+ // Otherwise, make MU responsible for all the symbols.
+ auto UMI = std::make_shared<UnmaterializedInfo>(std::move(MU));
+ for (auto &KV : UMI->MU->getSymbols()) {
+ auto SymI = Symbols.find(KV.first);
+ assert(SymI->second.getState() == SymbolState::Materializing &&
+ "Can not replace a symbol that is not materializing");
+ assert(!SymI->second.hasMaterializerAttached() &&
+ "Can not replace a symbol that has a materializer attached");
+ assert(UnmaterializedInfos.count(KV.first) == 0 &&
+ "Unexpected materializer entry in map");
+ SymI->second.setAddress(SymI->second.getAddress());
+ SymI->second.setMaterializerAttached(true);
+ UnmaterializedInfos[KV.first] = UMI;
+ }
+
+ return nullptr;
+ });
+
+ if (MustRunMU)
+ ES.dispatchMaterialization(*this, std::move(MustRunMU));
+}
+
+SymbolNameSet
+JITDylib::getRequestedSymbols(const SymbolFlagsMap &SymbolFlags) const {
+ return ES.runSessionLocked([&]() {
+ SymbolNameSet RequestedSymbols;
+
+ for (auto &KV : SymbolFlags) {
+ assert(Symbols.count(KV.first) && "JITDylib does not cover this symbol?");
+ assert(Symbols.find(KV.first)->second.isInMaterializationPhase() &&
+ "getRequestedSymbols can only be called for symbols that have "
+ "started materializing");
+ auto I = MaterializingInfos.find(KV.first);
+ if (I == MaterializingInfos.end())
+ continue;
+
+ if (I->second.hasQueriesPending())
+ RequestedSymbols.insert(KV.first);
+ }
+
+ return RequestedSymbols;
+ });
+}
+
+void JITDylib::addDependencies(const SymbolStringPtr &Name,
+ const SymbolDependenceMap &Dependencies) {
+ assert(Symbols.count(Name) && "Name not in symbol table");
+ assert(Symbols[Name].isInMaterializationPhase() &&
+ "Can not add dependencies for a symbol that is not materializing");
+
+ // If Name is already in an error state then just bail out.
+ if (Symbols[Name].getFlags().hasError())
+ return;
+
+ auto &MI = MaterializingInfos[Name];
+ assert(Symbols[Name].getState() != SymbolState::Emitted &&
+ "Can not add dependencies to an emitted symbol");
+
+ bool DependsOnSymbolInErrorState = false;
+
+ // Register dependencies, record whether any depenendency is in the error
+ // state.
+ for (auto &KV : Dependencies) {
+ assert(KV.first && "Null JITDylib in dependency?");
+ auto &OtherJITDylib = *KV.first;
+ auto &DepsOnOtherJITDylib = MI.UnemittedDependencies[&OtherJITDylib];
+
+ for (auto &OtherSymbol : KV.second) {
+
+ // Check the sym entry for the dependency.
+ auto OtherSymI = OtherJITDylib.Symbols.find(OtherSymbol);
+
+#ifndef NDEBUG
+ // Assert that this symbol exists and has not reached the ready state
+ // already.
+ assert(OtherSymI != OtherJITDylib.Symbols.end() &&
+ (OtherSymI->second.getState() != SymbolState::Ready &&
+ "Dependency on emitted/ready symbol"));
+#endif
+
+ auto &OtherSymEntry = OtherSymI->second;
+
+ // If the dependency is in an error state then note this and continue,
+ // we will move this symbol to the error state below.
+ if (OtherSymEntry.getFlags().hasError()) {
+ DependsOnSymbolInErrorState = true;
+ continue;
+ }
+
+ // If the dependency was not in the error state then add it to
+ // our list of dependencies.
+ assert(OtherJITDylib.MaterializingInfos.count(OtherSymbol) &&
+ "No MaterializingInfo for dependency");
+ auto &OtherMI = OtherJITDylib.MaterializingInfos[OtherSymbol];
+
+ if (OtherSymEntry.getState() == SymbolState::Emitted)
+ transferEmittedNodeDependencies(MI, Name, OtherMI);
+ else if (&OtherJITDylib != this || OtherSymbol != Name) {
+ OtherMI.Dependants[this].insert(Name);
+ DepsOnOtherJITDylib.insert(OtherSymbol);
+ }
+ }
+
+ if (DepsOnOtherJITDylib.empty())
+ MI.UnemittedDependencies.erase(&OtherJITDylib);
+ }
+
+ // If this symbol dependended on any symbols in the error state then move
+ // this symbol to the error state too.
+ if (DependsOnSymbolInErrorState)
+ Symbols[Name].setFlags(Symbols[Name].getFlags() | JITSymbolFlags::HasError);
+}
+
+Error JITDylib::resolve(const SymbolMap &Resolved) {
+ SymbolNameSet SymbolsInErrorState;
+ AsynchronousSymbolQuerySet CompletedQueries;
+
+ ES.runSessionLocked([&, this]() {
+ struct WorklistEntry {
+ SymbolTable::iterator SymI;
+ JITEvaluatedSymbol ResolvedSym;
+ };
+
+ std::vector<WorklistEntry> Worklist;
+ Worklist.reserve(Resolved.size());
+
+ // Build worklist and check for any symbols in the error state.
+ for (const auto &KV : Resolved) {
+
+ assert(!KV.second.getFlags().hasError() &&
+ "Resolution result can not have error flag set");
+
+ auto SymI = Symbols.find(KV.first);
+
+ assert(SymI != Symbols.end() && "Symbol not found");
+ assert(!SymI->second.hasMaterializerAttached() &&
+ "Resolving symbol with materializer attached?");
+ assert(SymI->second.getState() == SymbolState::Materializing &&
+ "Symbol should be materializing");
+ assert(SymI->second.getAddress() == 0 &&
+ "Symbol has already been resolved");
+
+ if (SymI->second.getFlags().hasError())
+ SymbolsInErrorState.insert(KV.first);
+ else {
+ auto Flags = KV.second.getFlags();
+ Flags &= ~(JITSymbolFlags::Weak | JITSymbolFlags::Common);
+ assert(Flags == (SymI->second.getFlags() &
+ ~(JITSymbolFlags::Weak | JITSymbolFlags::Common)) &&
+ "Resolved flags should match the declared flags");
+
+ Worklist.push_back(
+ {SymI, JITEvaluatedSymbol(KV.second.getAddress(), Flags)});
+ }
+ }
+
+ // If any symbols were in the error state then bail out.
+ if (!SymbolsInErrorState.empty())
+ return;
+
+ while (!Worklist.empty()) {
+ auto SymI = Worklist.back().SymI;
+ auto ResolvedSym = Worklist.back().ResolvedSym;
+ Worklist.pop_back();
+
+ auto &Name = SymI->first;
+
+ // Resolved symbols can not be weak: discard the weak flag.
+ JITSymbolFlags ResolvedFlags = ResolvedSym.getFlags();
+ SymI->second.setAddress(ResolvedSym.getAddress());
+ SymI->second.setFlags(ResolvedFlags);
+ SymI->second.setState(SymbolState::Resolved);
+
+ auto &MI = MaterializingInfos[Name];
+ for (auto &Q : MI.takeQueriesMeeting(SymbolState::Resolved)) {
+ Q->notifySymbolMetRequiredState(Name, ResolvedSym);
+ Q->removeQueryDependence(*this, Name);
+ if (Q->isComplete())
+ CompletedQueries.insert(std::move(Q));
+ }
+ }
+ });
+
+ assert((SymbolsInErrorState.empty() || CompletedQueries.empty()) &&
+ "Can't fail symbols and completed queries at the same time");
+
+ // If we failed any symbols then return an error.
+ if (!SymbolsInErrorState.empty()) {
+ auto FailedSymbolsDepMap = std::make_shared<SymbolDependenceMap>();
+ (*FailedSymbolsDepMap)[this] = std::move(SymbolsInErrorState);
+ return make_error<FailedToMaterialize>(std::move(FailedSymbolsDepMap));
+ }
+
+ // Otherwise notify all the completed queries.
+ for (auto &Q : CompletedQueries) {
+ assert(Q->isComplete() && "Q not completed");
+ Q->handleComplete();
+ }
+
+ return Error::success();
+}
+
+Error JITDylib::emit(const SymbolFlagsMap &Emitted) {
+ AsynchronousSymbolQuerySet CompletedQueries;
+ SymbolNameSet SymbolsInErrorState;
+
+ ES.runSessionLocked([&, this]() {
+ std::vector<SymbolTable::iterator> Worklist;
+
+ // Scan to build worklist, record any symbols in the erorr state.
+ for (const auto &KV : Emitted) {
+ auto &Name = KV.first;
+
+ auto SymI = Symbols.find(Name);
+ assert(SymI != Symbols.end() && "No symbol table entry for Name");
+
+ if (SymI->second.getFlags().hasError())
+ SymbolsInErrorState.insert(Name);
+ else
+ Worklist.push_back(SymI);
+ }
+
+ // If any symbols were in the error state then bail out.
+ if (!SymbolsInErrorState.empty())
+ return;
+
+ // Otherwise update dependencies and move to the emitted state.
+ while (!Worklist.empty()) {
+ auto SymI = Worklist.back();
+ Worklist.pop_back();
+
+ auto &Name = SymI->first;
+ auto &SymEntry = SymI->second;
+
+ // Move symbol to the emitted state.
+ assert(SymEntry.getState() == SymbolState::Resolved &&
+ "Emitting from state other than Resolved");
+ SymEntry.setState(SymbolState::Emitted);
+
+ auto MII = MaterializingInfos.find(Name);
+ assert(MII != MaterializingInfos.end() &&
+ "Missing MaterializingInfo entry");
+ auto &MI = MII->second;
+
+ // For each dependant, transfer this node's emitted dependencies to
+ // it. If the dependant node is ready (i.e. has no unemitted
+ // dependencies) then notify any pending queries.
+ for (auto &KV : MI.Dependants) {
+ auto &DependantJD = *KV.first;
+ for (auto &DependantName : KV.second) {
+ auto DependantMII =
+ DependantJD.MaterializingInfos.find(DependantName);
+ assert(DependantMII != DependantJD.MaterializingInfos.end() &&
+ "Dependant should have MaterializingInfo");
+
+ auto &DependantMI = DependantMII->second;
+
+ // Remove the dependant's dependency on this node.
+ assert(DependantMI.UnemittedDependencies.count(this) &&
+ "Dependant does not have an unemitted dependencies record for "
+ "this JITDylib");
+ assert(DependantMI.UnemittedDependencies[this].count(Name) &&
+ "Dependant does not count this symbol as a dependency?");
+
+ DependantMI.UnemittedDependencies[this].erase(Name);
+ if (DependantMI.UnemittedDependencies[this].empty())
+ DependantMI.UnemittedDependencies.erase(this);
+
+ // Transfer unemitted dependencies from this node to the dependant.
+ DependantJD.transferEmittedNodeDependencies(DependantMI,
+ DependantName, MI);
+
+ auto DependantSymI = DependantJD.Symbols.find(DependantName);
+ assert(DependantSymI != DependantJD.Symbols.end() &&
+ "Dependant has no entry in the Symbols table");
+ auto &DependantSymEntry = DependantSymI->second;
+
+ // If the dependant is emitted and this node was the last of its
+ // unemitted dependencies then the dependant node is now ready, so
+ // notify any pending queries on the dependant node.
+ if (DependantSymEntry.getState() == SymbolState::Emitted &&
+ DependantMI.UnemittedDependencies.empty()) {
+ assert(DependantMI.Dependants.empty() &&
+ "Dependants should be empty by now");
+
+ // Since this dependant is now ready, we erase its MaterializingInfo
+ // and update its materializing state.
+ DependantSymEntry.setState(SymbolState::Ready);
+
+ for (auto &Q : DependantMI.takeQueriesMeeting(SymbolState::Ready)) {
+ Q->notifySymbolMetRequiredState(
+ DependantName, DependantSymI->second.getSymbol());
+ if (Q->isComplete())
+ CompletedQueries.insert(Q);
+ Q->removeQueryDependence(DependantJD, DependantName);
+ }
+
+ DependantJD.MaterializingInfos.erase(DependantMII);
+ }
+ }
+ }
+
+ MI.Dependants.clear();
+ if (MI.UnemittedDependencies.empty()) {
+ SymI->second.setState(SymbolState::Ready);
+ for (auto &Q : MI.takeQueriesMeeting(SymbolState::Ready)) {
+ Q->notifySymbolMetRequiredState(Name, SymI->second.getSymbol());
+ if (Q->isComplete())
+ CompletedQueries.insert(Q);
+ Q->removeQueryDependence(*this, Name);
+ }
+ MaterializingInfos.erase(MII);
+ }
+ }
+ });
+
+ assert((SymbolsInErrorState.empty() || CompletedQueries.empty()) &&
+ "Can't fail symbols and completed queries at the same time");
+
+ // If we failed any symbols then return an error.
+ if (!SymbolsInErrorState.empty()) {
+ auto FailedSymbolsDepMap = std::make_shared<SymbolDependenceMap>();
+ (*FailedSymbolsDepMap)[this] = std::move(SymbolsInErrorState);
+ return make_error<FailedToMaterialize>(std::move(FailedSymbolsDepMap));
+ }
+
+ // Otherwise notify all the completed queries.
+ for (auto &Q : CompletedQueries) {
+ assert(Q->isComplete() && "Q is not complete");
+ Q->handleComplete();
+ }
+
+ return Error::success();
+}
+
+void JITDylib::notifyFailed(FailedSymbolsWorklist Worklist) {
+ AsynchronousSymbolQuerySet FailedQueries;
+ auto FailedSymbolsMap = std::make_shared<SymbolDependenceMap>();
+
+ // Failing no symbols is a no-op.
+ if (Worklist.empty())
+ return;
+
+ auto &ES = Worklist.front().first->getExecutionSession();
+
+ ES.runSessionLocked([&]() {
+ while (!Worklist.empty()) {
+ assert(Worklist.back().first && "Failed JITDylib can not be null");
+ auto &JD = *Worklist.back().first;
+ auto Name = std::move(Worklist.back().second);
+ Worklist.pop_back();
+
+ (*FailedSymbolsMap)[&JD].insert(Name);
+
+ assert(JD.Symbols.count(Name) && "No symbol table entry for Name");
+ auto &Sym = JD.Symbols[Name];
+
+ // Move the symbol into the error state.
+ // Note that this may be redundant: The symbol might already have been
+ // moved to this state in response to the failure of a dependence.
+ Sym.setFlags(Sym.getFlags() | JITSymbolFlags::HasError);
+
+ // FIXME: Come up with a sane mapping of state to
+ // presence-of-MaterializingInfo so that we can assert presence / absence
+ // here, rather than testing it.
+ auto MII = JD.MaterializingInfos.find(Name);
+
+ if (MII == JD.MaterializingInfos.end())
+ continue;
+
+ auto &MI = MII->second;
+
+ // Move all dependants to the error state and disconnect from them.
+ for (auto &KV : MI.Dependants) {
+ auto &DependantJD = *KV.first;
+ for (auto &DependantName : KV.second) {
+ assert(DependantJD.Symbols.count(DependantName) &&
+ "No symbol table entry for DependantName");
+ auto &DependantSym = DependantJD.Symbols[DependantName];
+ DependantSym.setFlags(DependantSym.getFlags() |
+ JITSymbolFlags::HasError);
+
+ assert(DependantJD.MaterializingInfos.count(DependantName) &&
+ "No MaterializingInfo for dependant");
+ auto &DependantMI = DependantJD.MaterializingInfos[DependantName];
+
+ auto UnemittedDepI = DependantMI.UnemittedDependencies.find(&JD);
+ assert(UnemittedDepI != DependantMI.UnemittedDependencies.end() &&
+ "No UnemittedDependencies entry for this JITDylib");
+ assert(UnemittedDepI->second.count(Name) &&
+ "No UnemittedDependencies entry for this symbol");
+ UnemittedDepI->second.erase(Name);
+ if (UnemittedDepI->second.empty())
+ DependantMI.UnemittedDependencies.erase(UnemittedDepI);
+
+ // If this symbol is already in the emitted state then we need to
+ // take responsibility for failing its queries, so add it to the
+ // worklist.
+ if (DependantSym.getState() == SymbolState::Emitted) {
+ assert(DependantMI.Dependants.empty() &&
+ "Emitted symbol should not have dependants");
+ Worklist.push_back(std::make_pair(&DependantJD, DependantName));
+ }
+ }
+ }
+ MI.Dependants.clear();
+
+ // Disconnect from all unemitted depenencies.
+ for (auto &KV : MI.UnemittedDependencies) {
+ auto &UnemittedDepJD = *KV.first;
+ for (auto &UnemittedDepName : KV.second) {
+ auto UnemittedDepMII =
+ UnemittedDepJD.MaterializingInfos.find(UnemittedDepName);
+ assert(UnemittedDepMII != UnemittedDepJD.MaterializingInfos.end() &&
+ "Missing MII for unemitted dependency");
+ assert(UnemittedDepMII->second.Dependants.count(&JD) &&
+ "JD not listed as a dependant of unemitted dependency");
+ assert(UnemittedDepMII->second.Dependants[&JD].count(Name) &&
+ "Name is not listed as a dependant of unemitted dependency");
+ UnemittedDepMII->second.Dependants[&JD].erase(Name);
+ if (UnemittedDepMII->second.Dependants[&JD].empty())
+ UnemittedDepMII->second.Dependants.erase(&JD);
+ }
+ }
+ MI.UnemittedDependencies.clear();
+
+ // Collect queries to be failed for this MII.
+ for (auto &Q : MII->second.pendingQueries()) {
+ // Add the query to the list to be failed and detach it.
+ FailedQueries.insert(Q);
+ Q->detach();
+ }
+
+ assert(MI.Dependants.empty() &&
+ "Can not delete MaterializingInfo with dependants still attached");
+ assert(MI.UnemittedDependencies.empty() &&
+ "Can not delete MaterializingInfo with unemitted dependencies "
+ "still attached");
+ assert(!MI.hasQueriesPending() &&
+ "Can not delete MaterializingInfo with queries pending");
+ JD.MaterializingInfos.erase(MII);
+ }
+ });
+
+ for (auto &Q : FailedQueries)
+ Q->handleFailed(make_error<FailedToMaterialize>(FailedSymbolsMap));
+}
+
+void JITDylib::setSearchOrder(JITDylibSearchList NewSearchOrder,
+ bool SearchThisJITDylibFirst,
+ bool MatchNonExportedInThisDylib) {
+ if (SearchThisJITDylibFirst) {
+ if (NewSearchOrder.empty() || NewSearchOrder.front().first != this)
+ NewSearchOrder.insert(NewSearchOrder.begin(),
+ {this, MatchNonExportedInThisDylib});
+ }
+
+ ES.runSessionLocked([&]() { SearchOrder = std::move(NewSearchOrder); });
+}
+
+void JITDylib::addToSearchOrder(JITDylib &JD, bool MatchNonExported) {
+ ES.runSessionLocked([&]() {
+ SearchOrder.push_back({&JD, MatchNonExported});
+ });
+}
+
+void JITDylib::replaceInSearchOrder(JITDylib &OldJD, JITDylib &NewJD,
+ bool MatchNonExported) {
+ ES.runSessionLocked([&]() {
+ auto I = std::find_if(SearchOrder.begin(), SearchOrder.end(),
+ [&](const JITDylibSearchList::value_type &KV) {
+ return KV.first == &OldJD;
+ });
+
+ if (I != SearchOrder.end())
+ *I = {&NewJD, MatchNonExported};
+ });
+}
+
+void JITDylib::removeFromSearchOrder(JITDylib &JD) {
+ ES.runSessionLocked([&]() {
+ auto I = std::find_if(SearchOrder.begin(), SearchOrder.end(),
+ [&](const JITDylibSearchList::value_type &KV) {
+ return KV.first == &JD;
+ });
+ if (I != SearchOrder.end())
+ SearchOrder.erase(I);
+ });
+}
+
+Error JITDylib::remove(const SymbolNameSet &Names) {
+ return ES.runSessionLocked([&]() -> Error {
+ using SymbolMaterializerItrPair =
+ std::pair<SymbolTable::iterator, UnmaterializedInfosMap::iterator>;
+ std::vector<SymbolMaterializerItrPair> SymbolsToRemove;
+ SymbolNameSet Missing;
+ SymbolNameSet Materializing;
+
+ for (auto &Name : Names) {
+ auto I = Symbols.find(Name);
+
+ // Note symbol missing.
+ if (I == Symbols.end()) {
+ Missing.insert(Name);
+ continue;
+ }
+
+ // Note symbol materializing.
+ if (I->second.isInMaterializationPhase()) {
+ Materializing.insert(Name);
+ continue;
+ }
+
+ auto UMII = I->second.hasMaterializerAttached()
+ ? UnmaterializedInfos.find(Name)
+ : UnmaterializedInfos.end();
+ SymbolsToRemove.push_back(std::make_pair(I, UMII));
+ }
+
+ // If any of the symbols are not defined, return an error.
+ if (!Missing.empty())
+ return make_error<SymbolsNotFound>(std::move(Missing));
+
+ // If any of the symbols are currently materializing, return an error.
+ if (!Materializing.empty())
+ return make_error<SymbolsCouldNotBeRemoved>(std::move(Materializing));
+
+ // Remove the symbols.
+ for (auto &SymbolMaterializerItrPair : SymbolsToRemove) {
+ auto UMII = SymbolMaterializerItrPair.second;
+
+ // If there is a materializer attached, call discard.
+ if (UMII != UnmaterializedInfos.end()) {
+ UMII->second->MU->doDiscard(*this, UMII->first);
+ UnmaterializedInfos.erase(UMII);
+ }
+
+ auto SymI = SymbolMaterializerItrPair.first;
+ Symbols.erase(SymI);
+ }
+
+ return Error::success();
+ });
+}
+
+Expected<SymbolFlagsMap> JITDylib::lookupFlags(const SymbolNameSet &Names) {
+ return ES.runSessionLocked([&, this]() -> Expected<SymbolFlagsMap> {
+ SymbolFlagsMap Result;
+ auto Unresolved = lookupFlagsImpl(Result, Names);
+ if (!Unresolved)
+ return Unresolved.takeError();
+
+ /// Run any definition generators.
+ for (auto &DG : DefGenerators) {
+
+ // Bail out early if we've resolved everything.
+ if (Unresolved->empty())
+ break;
+
+ // Run this generator.
+ auto NewDefs = DG->tryToGenerate(*this, *Unresolved);
+ if (!NewDefs)
+ return NewDefs.takeError();
+
+ if (!NewDefs->empty()) {
+ auto Unresolved2 = lookupFlagsImpl(Result, *NewDefs);
+ if (!Unresolved2)
+ return Unresolved2.takeError();
+ (void)Unresolved2;
+ assert(Unresolved2->empty() &&
+ "All fallback defs should have been found by lookupFlagsImpl");
+ }
+
+ for (auto &Name : *NewDefs)
+ Unresolved->erase(Name);
+ }
+ return Result;
+ });
+}
+
+Expected<SymbolNameSet> JITDylib::lookupFlagsImpl(SymbolFlagsMap &Flags,
+ const SymbolNameSet &Names) {
+ SymbolNameSet Unresolved;
+
+ for (auto &Name : Names) {
+ auto I = Symbols.find(Name);
+ if (I != Symbols.end()) {
+ assert(!Flags.count(Name) && "Symbol already present in Flags map");
+ Flags[Name] = I->second.getFlags();
+ } else
+ Unresolved.insert(Name);
+ }
+
+ return Unresolved;
+}
+
+Error JITDylib::lodgeQuery(std::shared_ptr<AsynchronousSymbolQuery> &Q,
+ SymbolNameSet &Unresolved, bool MatchNonExported,
+ MaterializationUnitList &MUs) {
+ assert(Q && "Query can not be null");
+
+ if (auto Err = lodgeQueryImpl(Q, Unresolved, MatchNonExported, MUs))
+ return Err;
+
+ // Run any definition generators.
+ for (auto &DG : DefGenerators) {
+
+ // Bail out early if we have resolved everything.
+ if (Unresolved.empty())
+ break;
+
+ // Run the generator.
+ auto NewDefs = DG->tryToGenerate(*this, Unresolved);
+
+ // If the generator returns an error then bail out.
+ if (!NewDefs)
+ return NewDefs.takeError();
+
+ // If the generator was able to generate new definitions for any of the
+ // unresolved symbols then lodge the query against them.
+ if (!NewDefs->empty()) {
+ for (auto &D : *NewDefs)
+ Unresolved.erase(D);
+
+ // Lodge query. This can not fail as any new definitions were added
+ // by the generator under the session locked. Since they can't have
+ // started materializing yet the can not have failed.
+ cantFail(lodgeQueryImpl(Q, *NewDefs, MatchNonExported, MUs));
+
+ assert(NewDefs->empty() &&
+ "All fallback defs should have been found by lookupImpl");
+ }
+ }
+
+ return Error::success();
+}
+
+Error JITDylib::lodgeQueryImpl(
+ std::shared_ptr<AsynchronousSymbolQuery> &Q, SymbolNameSet &Unresolved,
+ bool MatchNonExported,
+ std::vector<std::unique_ptr<MaterializationUnit>> &MUs) {
+
+ std::vector<SymbolStringPtr> ToRemove;
+ for (auto Name : Unresolved) {
+
+ // Search for the name in Symbols. Skip it if not found.
+ auto SymI = Symbols.find(Name);
+ if (SymI == Symbols.end())
+ continue;
+
+ // If this is a non exported symbol and we're skipping those then skip it.
+ if (!SymI->second.getFlags().isExported() && !MatchNonExported)
+ continue;
+
+ // If we matched against Name in JD, mark it to be removed from the
+ // Unresolved set.
+ ToRemove.push_back(Name);
+
+ // If we matched against this symbol but it is in the error state then
+ // bail out and treat it as a failure to materialize.
+ if (SymI->second.getFlags().hasError()) {
+ auto FailedSymbolsMap = std::make_shared<SymbolDependenceMap>();
+ (*FailedSymbolsMap)[this] = {Name};
+ return make_error<FailedToMaterialize>(std::move(FailedSymbolsMap));
+ }
+
+ // If this symbol already meets the required state for then notify the
+ // query and continue.
+ if (SymI->second.getState() >= Q->getRequiredState()) {
+ Q->notifySymbolMetRequiredState(Name, SymI->second.getSymbol());
+ continue;
+ }
+
+ // Otherwise this symbol does not yet meet the required state. Check whether
+ // it has a materializer attached, and if so prepare to run it.
+ if (SymI->second.hasMaterializerAttached()) {
+ assert(SymI->second.getAddress() == 0 &&
+ "Symbol not resolved but already has address?");
+ auto UMII = UnmaterializedInfos.find(Name);
+ assert(UMII != UnmaterializedInfos.end() &&
+ "Lazy symbol should have UnmaterializedInfo");
+ auto MU = std::move(UMII->second->MU);
+ assert(MU != nullptr && "Materializer should not be null");
+
+ // Move all symbols associated with this MaterializationUnit into
+ // materializing state.
+ for (auto &KV : MU->getSymbols()) {
+ auto SymK = Symbols.find(KV.first);
+ SymK->second.setMaterializerAttached(false);
+ SymK->second.setState(SymbolState::Materializing);
+ UnmaterializedInfos.erase(KV.first);
+ }
+
+ // Add MU to the list of MaterializationUnits to be materialized.
+ MUs.push_back(std::move(MU));
+ }
+
+ // Add the query to the PendingQueries list.
+ assert(SymI->second.isInMaterializationPhase() &&
+ "By this line the symbol should be materializing");
+ auto &MI = MaterializingInfos[Name];
+ MI.addQuery(Q);
+ Q->addQueryDependence(*this, Name);
+ }
+
+ // Remove any symbols that we found.
+ for (auto &Name : ToRemove)
+ Unresolved.erase(Name);
+
+ return Error::success();
+}
+
+Expected<SymbolNameSet>
+JITDylib::legacyLookup(std::shared_ptr<AsynchronousSymbolQuery> Q,
+ SymbolNameSet Names) {
+ assert(Q && "Query can not be null");
+
+ ES.runOutstandingMUs();
+
+ bool QueryComplete = false;
+ std::vector<std::unique_ptr<MaterializationUnit>> MUs;
+
+ SymbolNameSet Unresolved = std::move(Names);
+ auto Err = ES.runSessionLocked([&, this]() -> Error {
+ QueryComplete = lookupImpl(Q, MUs, Unresolved);
+
+ // Run any definition generators.
+ for (auto &DG : DefGenerators) {
+
+ // Bail out early if we have resolved everything.
+ if (Unresolved.empty())
+ break;
+
+ assert(!QueryComplete && "query complete but unresolved symbols remain?");
+ auto NewDefs = DG->tryToGenerate(*this, Unresolved);
+ if (!NewDefs)
+ return NewDefs.takeError();
+ if (!NewDefs->empty()) {
+ for (auto &D : *NewDefs)
+ Unresolved.erase(D);
+ QueryComplete = lookupImpl(Q, MUs, *NewDefs);
+ assert(NewDefs->empty() &&
+ "All fallback defs should have been found by lookupImpl");
+ }
+ }
+ return Error::success();
+ });
+
+ if (Err)
+ return std::move(Err);
+
+ assert((MUs.empty() || !QueryComplete) &&
+ "If action flags are set, there should be no work to do (so no MUs)");
+
+ if (QueryComplete)
+ Q->handleComplete();
+
+ // FIXME: Swap back to the old code below once RuntimeDyld works with
+ // callbacks from asynchronous queries.
+ // Add MUs to the OutstandingMUs list.
+ {
+ std::lock_guard<std::recursive_mutex> Lock(ES.OutstandingMUsMutex);
+ for (auto &MU : MUs)
+ ES.OutstandingMUs.push_back(make_pair(this, std::move(MU)));
+ }
+ ES.runOutstandingMUs();
+
+ // Dispatch any required MaterializationUnits for materialization.
+ // for (auto &MU : MUs)
+ // ES.dispatchMaterialization(*this, std::move(MU));
+
+ return Unresolved;
+}
+
+bool JITDylib::lookupImpl(
+ std::shared_ptr<AsynchronousSymbolQuery> &Q,
+ std::vector<std::unique_ptr<MaterializationUnit>> &MUs,
+ SymbolNameSet &Unresolved) {
+ bool QueryComplete = false;
+
+ std::vector<SymbolStringPtr> ToRemove;
+ for (auto Name : Unresolved) {
+
+ // Search for the name in Symbols. Skip it if not found.
+ auto SymI = Symbols.find(Name);
+ if (SymI == Symbols.end())
+ continue;
+
+ // If we found Name, mark it to be removed from the Unresolved set.
+ ToRemove.push_back(Name);
+
+ if (SymI->second.getState() >= Q->getRequiredState()) {
+ Q->notifySymbolMetRequiredState(Name, SymI->second.getSymbol());
+ if (Q->isComplete())
+ QueryComplete = true;
+ continue;
+ }
+
+ // If the symbol is lazy, get the MaterialiaztionUnit for it.
+ if (SymI->second.hasMaterializerAttached()) {
+ assert(SymI->second.getAddress() == 0 &&
+ "Lazy symbol should not have a resolved address");
+ auto UMII = UnmaterializedInfos.find(Name);
+ assert(UMII != UnmaterializedInfos.end() &&
+ "Lazy symbol should have UnmaterializedInfo");
+ auto MU = std::move(UMII->second->MU);
+ assert(MU != nullptr && "Materializer should not be null");
+
+ // Kick all symbols associated with this MaterializationUnit into
+ // materializing state.
+ for (auto &KV : MU->getSymbols()) {
+ auto SymK = Symbols.find(KV.first);
+ assert(SymK != Symbols.end() && "Missing symbol table entry");
+ SymK->second.setState(SymbolState::Materializing);
+ SymK->second.setMaterializerAttached(false);
+ UnmaterializedInfos.erase(KV.first);
+ }
+
+ // Add MU to the list of MaterializationUnits to be materialized.
+ MUs.push_back(std::move(MU));
+ }
+
+ // Add the query to the PendingQueries list.
+ assert(SymI->second.isInMaterializationPhase() &&
+ "By this line the symbol should be materializing");
+ auto &MI = MaterializingInfos[Name];
+ MI.addQuery(Q);
+ Q->addQueryDependence(*this, Name);
+ }
+
+ // Remove any marked symbols from the Unresolved set.
+ for (auto &Name : ToRemove)
+ Unresolved.erase(Name);
+
+ return QueryComplete;
+}
+
+void JITDylib::dump(raw_ostream &OS) {
+ ES.runSessionLocked([&, this]() {
+ OS << "JITDylib \"" << JITDylibName << "\" (ES: "
+ << format("0x%016" PRIx64, reinterpret_cast<uintptr_t>(&ES)) << "):\n"
+ << "Search order: [";
+ for (auto &KV : SearchOrder)
+ OS << " (\"" << KV.first->getName() << "\", "
+ << (KV.second ? "all" : "exported only") << ")";
+ OS << " ]\n"
+ << "Symbol table:\n";
+
+ for (auto &KV : Symbols) {
+ OS << " \"" << *KV.first << "\": ";
+ if (auto Addr = KV.second.getAddress())
+ OS << format("0x%016" PRIx64, Addr) << ", " << KV.second.getFlags()
+ << " ";
+ else
+ OS << "<not resolved> ";
+
+ OS << KV.second.getState();
+
+ if (KV.second.hasMaterializerAttached()) {
+ OS << " (Materializer ";
+ auto I = UnmaterializedInfos.find(KV.first);
+ assert(I != UnmaterializedInfos.end() &&
+ "Lazy symbol should have UnmaterializedInfo");
+ OS << I->second->MU.get() << ")\n";
+ } else
+ OS << "\n";
+ }
+
+ if (!MaterializingInfos.empty())
+ OS << " MaterializingInfos entries:\n";
+ for (auto &KV : MaterializingInfos) {
+ OS << " \"" << *KV.first << "\":\n"
+ << " " << KV.second.pendingQueries().size()
+ << " pending queries: { ";
+ for (const auto &Q : KV.second.pendingQueries())
+ OS << Q.get() << " (" << Q->getRequiredState() << ") ";
+ OS << "}\n Dependants:\n";
+ for (auto &KV2 : KV.second.Dependants)
+ OS << " " << KV2.first->getName() << ": " << KV2.second << "\n";
+ OS << " Unemitted Dependencies:\n";
+ for (auto &KV2 : KV.second.UnemittedDependencies)
+ OS << " " << KV2.first->getName() << ": " << KV2.second << "\n";
+ }
+ });
+}
+
+void JITDylib::MaterializingInfo::addQuery(
+ std::shared_ptr<AsynchronousSymbolQuery> Q) {
+
+ auto I = std::lower_bound(
+ PendingQueries.rbegin(), PendingQueries.rend(), Q->getRequiredState(),
+ [](const std::shared_ptr<AsynchronousSymbolQuery> &V, SymbolState S) {
+ return V->getRequiredState() <= S;
+ });
+ PendingQueries.insert(I.base(), std::move(Q));
+}
+
+void JITDylib::MaterializingInfo::removeQuery(
+ const AsynchronousSymbolQuery &Q) {
+ // FIXME: Implement 'find_as' for shared_ptr<T>/T*.
+ auto I =
+ std::find_if(PendingQueries.begin(), PendingQueries.end(),
+ [&Q](const std::shared_ptr<AsynchronousSymbolQuery> &V) {
+ return V.get() == &Q;
+ });
+ assert(I != PendingQueries.end() &&
+ "Query is not attached to this MaterializingInfo");
+ PendingQueries.erase(I);
+}
+
+JITDylib::AsynchronousSymbolQueryList
+JITDylib::MaterializingInfo::takeQueriesMeeting(SymbolState RequiredState) {
+ AsynchronousSymbolQueryList Result;
+ while (!PendingQueries.empty()) {
+ if (PendingQueries.back()->getRequiredState() > RequiredState)
+ break;
+
+ Result.push_back(std::move(PendingQueries.back()));
+ PendingQueries.pop_back();
+ }
+
+ return Result;
+}
+
+JITDylib::JITDylib(ExecutionSession &ES, std::string Name)
+ : ES(ES), JITDylibName(std::move(Name)) {
+ SearchOrder.push_back({this, true});
+}
+
+Error JITDylib::defineImpl(MaterializationUnit &MU) {
+ SymbolNameSet Duplicates;
+ std::vector<SymbolStringPtr> ExistingDefsOverridden;
+ std::vector<SymbolStringPtr> MUDefsOverridden;
+
+ for (const auto &KV : MU.getSymbols()) {
+ auto I = Symbols.find(KV.first);
+
+ if (I != Symbols.end()) {
+ if (KV.second.isStrong()) {
+ if (I->second.getFlags().isStrong() ||
+ I->second.getState() > SymbolState::NeverSearched)
+ Duplicates.insert(KV.first);
+ else {
+ assert(I->second.getState() == SymbolState::NeverSearched &&
+ "Overridden existing def should be in the never-searched "
+ "state");
+ ExistingDefsOverridden.push_back(KV.first);
+ }
+ } else
+ MUDefsOverridden.push_back(KV.first);
+ }
+ }
+
+ // If there were any duplicate definitions then bail out.
+ if (!Duplicates.empty())
+ return make_error<DuplicateDefinition>(**Duplicates.begin());
+
+ // Discard any overridden defs in this MU.
+ for (auto &S : MUDefsOverridden)
+ MU.doDiscard(*this, S);
+
+ // Discard existing overridden defs.
+ for (auto &S : ExistingDefsOverridden) {
+
+ auto UMII = UnmaterializedInfos.find(S);
+ assert(UMII != UnmaterializedInfos.end() &&
+ "Overridden existing def should have an UnmaterializedInfo");
+ UMII->second->MU->doDiscard(*this, S);
+ }
+
+ // Finally, add the defs from this MU.
+ for (auto &KV : MU.getSymbols()) {
+ auto &SymEntry = Symbols[KV.first];
+ SymEntry.setFlags(KV.second);
+ SymEntry.setState(SymbolState::NeverSearched);
+ SymEntry.setMaterializerAttached(true);
+ }
+
+ return Error::success();
+}
+
+void JITDylib::detachQueryHelper(AsynchronousSymbolQuery &Q,
+ const SymbolNameSet &QuerySymbols) {
+ for (auto &QuerySymbol : QuerySymbols) {
+ assert(MaterializingInfos.count(QuerySymbol) &&
+ "QuerySymbol does not have MaterializingInfo");
+ auto &MI = MaterializingInfos[QuerySymbol];
+ MI.removeQuery(Q);
+ }
+}
+
+void JITDylib::transferEmittedNodeDependencies(
+ MaterializingInfo &DependantMI, const SymbolStringPtr &DependantName,
+ MaterializingInfo &EmittedMI) {
+ for (auto &KV : EmittedMI.UnemittedDependencies) {
+ auto &DependencyJD = *KV.first;
+ SymbolNameSet *UnemittedDependenciesOnDependencyJD = nullptr;
+
+ for (auto &DependencyName : KV.second) {
+ auto &DependencyMI = DependencyJD.MaterializingInfos[DependencyName];
+
+ // Do not add self dependencies.
+ if (&DependencyMI == &DependantMI)
+ continue;
+
+ // If we haven't looked up the dependencies for DependencyJD yet, do it
+ // now and cache the result.
+ if (!UnemittedDependenciesOnDependencyJD)
+ UnemittedDependenciesOnDependencyJD =
+ &DependantMI.UnemittedDependencies[&DependencyJD];
+
+ DependencyMI.Dependants[this].insert(DependantName);
+ UnemittedDependenciesOnDependencyJD->insert(DependencyName);
+ }
+ }
+}
+
+ExecutionSession::ExecutionSession(std::shared_ptr<SymbolStringPool> SSP)
+ : SSP(SSP ? std::move(SSP) : std::make_shared<SymbolStringPool>()) {
+ // Construct the main dylib.
+ JDs.push_back(std::unique_ptr<JITDylib>(new JITDylib(*this, "<main>")));
+}
+
+JITDylib &ExecutionSession::getMainJITDylib() {
+ return runSessionLocked([this]() -> JITDylib & { return *JDs.front(); });
+}
+
+JITDylib *ExecutionSession::getJITDylibByName(StringRef Name) {
+ return runSessionLocked([&, this]() -> JITDylib * {
+ for (auto &JD : JDs)
+ if (JD->getName() == Name)
+ return JD.get();
+ return nullptr;
+ });
+}
+
+JITDylib &ExecutionSession::createJITDylib(std::string Name,
+ bool AddToMainDylibSearchOrder) {
+ assert(!getJITDylibByName(Name) && "JITDylib with that name already exists");
+ return runSessionLocked([&, this]() -> JITDylib & {
+ JDs.push_back(
+ std::unique_ptr<JITDylib>(new JITDylib(*this, std::move(Name))));
+ if (AddToMainDylibSearchOrder)
+ JDs.front()->addToSearchOrder(*JDs.back());
+ return *JDs.back();
+ });
+}
+
+void ExecutionSession::legacyFailQuery(AsynchronousSymbolQuery &Q, Error Err) {
+ assert(!!Err && "Error should be in failure state");
+
+ bool SendErrorToQuery;
+ runSessionLocked([&]() {
+ Q.detach();
+ SendErrorToQuery = Q.canStillFail();
+ });
+
+ if (SendErrorToQuery)
+ Q.handleFailed(std::move(Err));
+ else
+ reportError(std::move(Err));
+}
+
+Expected<SymbolMap> ExecutionSession::legacyLookup(
+ LegacyAsyncLookupFunction AsyncLookup, SymbolNameSet Names,
+ SymbolState RequiredState,
+ RegisterDependenciesFunction RegisterDependencies) {
+#if LLVM_ENABLE_THREADS
+ // In the threaded case we use promises to return the results.
+ std::promise<SymbolMap> PromisedResult;
+ Error ResolutionError = Error::success();
+ auto NotifyComplete = [&](Expected<SymbolMap> R) {
+ if (R)
+ PromisedResult.set_value(std::move(*R));
+ else {
+ ErrorAsOutParameter _(&ResolutionError);
+ ResolutionError = R.takeError();
+ PromisedResult.set_value(SymbolMap());
+ }
+ };
+#else
+ SymbolMap Result;
+ Error ResolutionError = Error::success();
+
+ auto NotifyComplete = [&](Expected<SymbolMap> R) {
+ ErrorAsOutParameter _(&ResolutionError);
+ if (R)
+ Result = std::move(*R);
+ else
+ ResolutionError = R.takeError();
+ };
+#endif
+
+ auto Query = std::make_shared<AsynchronousSymbolQuery>(
+ Names, RequiredState, std::move(NotifyComplete));
+ // FIXME: This should be run session locked along with the registration code
+ // and error reporting below.
+ SymbolNameSet UnresolvedSymbols = AsyncLookup(Query, std::move(Names));
+
+ // If the query was lodged successfully then register the dependencies,
+ // otherwise fail it with an error.
+ if (UnresolvedSymbols.empty())
+ RegisterDependencies(Query->QueryRegistrations);
+ else {
+ bool DeliverError = runSessionLocked([&]() {
+ Query->detach();
+ return Query->canStillFail();
+ });
+ auto Err = make_error<SymbolsNotFound>(std::move(UnresolvedSymbols));
+ if (DeliverError)
+ Query->handleFailed(std::move(Err));
+ else
+ reportError(std::move(Err));
+ }
+
+#if LLVM_ENABLE_THREADS
+ auto ResultFuture = PromisedResult.get_future();
+ auto Result = ResultFuture.get();
+ if (ResolutionError)
+ return std::move(ResolutionError);
+ return std::move(Result);
+
+#else
+ if (ResolutionError)
+ return std::move(ResolutionError);
+
+ return Result;
+#endif
+}
+
+void ExecutionSession::lookup(
+ const JITDylibSearchList &SearchOrder, SymbolNameSet Symbols,
+ SymbolState RequiredState, SymbolsResolvedCallback NotifyComplete,
+ RegisterDependenciesFunction RegisterDependencies) {
+
+ LLVM_DEBUG({
+ runSessionLocked([&]() {
+ dbgs() << "Looking up " << Symbols << " in " << SearchOrder
+ << " (required state: " << RequiredState << ")\n";
+ });
+ });
+
+ // lookup can be re-entered recursively if running on a single thread. Run any
+ // outstanding MUs in case this query depends on them, otherwise this lookup
+ // will starve waiting for a result from an MU that is stuck in the queue.
+ runOutstandingMUs();
+
+ auto Unresolved = std::move(Symbols);
+ std::map<JITDylib *, MaterializationUnitList> CollectedMUsMap;
+ auto Q = std::make_shared<AsynchronousSymbolQuery>(Unresolved, RequiredState,
+ std::move(NotifyComplete));
+ bool QueryComplete = false;
+
+ auto LodgingErr = runSessionLocked([&]() -> Error {
+ auto LodgeQuery = [&]() -> Error {
+ for (auto &KV : SearchOrder) {
+ assert(KV.first && "JITDylibList entries must not be null");
+ assert(!CollectedMUsMap.count(KV.first) &&
+ "JITDylibList should not contain duplicate entries");
+
+ auto &JD = *KV.first;
+ auto MatchNonExported = KV.second;
+ if (auto Err = JD.lodgeQuery(Q, Unresolved, MatchNonExported,
+ CollectedMUsMap[&JD]))
+ return Err;
+ }
+
+ if (!Unresolved.empty())
+ return make_error<SymbolsNotFound>(std::move(Unresolved));
+
+ return Error::success();
+ };
+
+ if (auto Err = LodgeQuery()) {
+ // Query failed.
+
+ // Disconnect the query from its dependencies.
+ Q->detach();
+
+ // Replace the MUs.
+ for (auto &KV : CollectedMUsMap)
+ for (auto &MU : KV.second)
+ KV.first->replace(std::move(MU));
+
+ return Err;
+ }
+
+ // Query lodged successfully.
+
+ // Record whether this query is fully ready / resolved. We will use
+ // this to call handleFullyResolved/handleFullyReady outside the session
+ // lock.
+ QueryComplete = Q->isComplete();
+
+ // Call the register dependencies function.
+ if (RegisterDependencies && !Q->QueryRegistrations.empty())
+ RegisterDependencies(Q->QueryRegistrations);
+
+ return Error::success();
+ });
+
+ if (LodgingErr) {
+ Q->handleFailed(std::move(LodgingErr));
+ return;
+ }
+
+ if (QueryComplete)
+ Q->handleComplete();
+
+ // Move the MUs to the OutstandingMUs list, then materialize.
+ {
+ std::lock_guard<std::recursive_mutex> Lock(OutstandingMUsMutex);
+
+ for (auto &KV : CollectedMUsMap)
+ for (auto &MU : KV.second)
+ OutstandingMUs.push_back(std::make_pair(KV.first, std::move(MU)));
+ }
+
+ runOutstandingMUs();
+}
+
+Expected<SymbolMap>
+ExecutionSession::lookup(const JITDylibSearchList &SearchOrder,
+ const SymbolNameSet &Symbols,
+ SymbolState RequiredState,
+ RegisterDependenciesFunction RegisterDependencies) {
+#if LLVM_ENABLE_THREADS
+ // In the threaded case we use promises to return the results.
+ std::promise<SymbolMap> PromisedResult;
+ Error ResolutionError = Error::success();
+
+ auto NotifyComplete = [&](Expected<SymbolMap> R) {
+ if (R)
+ PromisedResult.set_value(std::move(*R));
+ else {
+ ErrorAsOutParameter _(&ResolutionError);
+ ResolutionError = R.takeError();
+ PromisedResult.set_value(SymbolMap());
+ }
+ };
+
+#else
+ SymbolMap Result;
+ Error ResolutionError = Error::success();
+
+ auto NotifyComplete = [&](Expected<SymbolMap> R) {
+ ErrorAsOutParameter _(&ResolutionError);
+ if (R)
+ Result = std::move(*R);
+ else
+ ResolutionError = R.takeError();
+ };
+#endif
+
+ // Perform the asynchronous lookup.
+ lookup(SearchOrder, Symbols, RequiredState, NotifyComplete,
+ RegisterDependencies);
+
+#if LLVM_ENABLE_THREADS
+ auto ResultFuture = PromisedResult.get_future();
+ auto Result = ResultFuture.get();
+
+ if (ResolutionError)
+ return std::move(ResolutionError);
+
+ return std::move(Result);
+
+#else
+ if (ResolutionError)
+ return std::move(ResolutionError);
+
+ return Result;
+#endif
+}
+
+Expected<JITEvaluatedSymbol>
+ExecutionSession::lookup(const JITDylibSearchList &SearchOrder,
+ SymbolStringPtr Name) {
+ SymbolNameSet Names({Name});
+
+ if (auto ResultMap = lookup(SearchOrder, std::move(Names), SymbolState::Ready,
+ NoDependenciesToRegister)) {
+ assert(ResultMap->size() == 1 && "Unexpected number of results");
+ assert(ResultMap->count(Name) && "Missing result for symbol");
+ return std::move(ResultMap->begin()->second);
+ } else
+ return ResultMap.takeError();
+}
+
+Expected<JITEvaluatedSymbol>
+ExecutionSession::lookup(ArrayRef<JITDylib *> SearchOrder,
+ SymbolStringPtr Name) {
+ SymbolNameSet Names({Name});
+
+ JITDylibSearchList FullSearchOrder;
+ FullSearchOrder.reserve(SearchOrder.size());
+ for (auto *JD : SearchOrder)
+ FullSearchOrder.push_back({JD, false});
+
+ return lookup(FullSearchOrder, Name);
+}
+
+Expected<JITEvaluatedSymbol>
+ExecutionSession::lookup(ArrayRef<JITDylib *> SearchOrder, StringRef Name) {
+ return lookup(SearchOrder, intern(Name));
+}
+
+void ExecutionSession::dump(raw_ostream &OS) {
+ runSessionLocked([this, &OS]() {
+ for (auto &JD : JDs)
+ JD->dump(OS);
+ });
+}
+
+void ExecutionSession::runOutstandingMUs() {
+ while (1) {
+ std::pair<JITDylib *, std::unique_ptr<MaterializationUnit>> JITDylibAndMU;
+
+ {
+ std::lock_guard<std::recursive_mutex> Lock(OutstandingMUsMutex);
+ if (!OutstandingMUs.empty()) {
+ JITDylibAndMU = std::move(OutstandingMUs.back());
+ OutstandingMUs.pop_back();
+ }
+ }
+
+ if (JITDylibAndMU.first) {
+ assert(JITDylibAndMU.second && "JITDylib, but no MU?");
+ dispatchMaterialization(*JITDylibAndMU.first,
+ std::move(JITDylibAndMU.second));
+ } else
+ break;
+ }
+}
+
+MangleAndInterner::MangleAndInterner(ExecutionSession &ES, const DataLayout &DL)
+ : ES(ES), DL(DL) {}
+
+SymbolStringPtr MangleAndInterner::operator()(StringRef Name) {
+ std::string MangledName;
+ {
+ raw_string_ostream MangledNameStream(MangledName);
+ Mangler::getNameWithPrefix(MangledNameStream, Name, DL);
+ }
+ return ES.intern(MangledName);
+}
+
+} // End namespace orc.
+} // End namespace llvm.
diff --git a/llvm/lib/ExecutionEngine/Orc/ExecutionUtils.cpp b/llvm/lib/ExecutionEngine/Orc/ExecutionUtils.cpp
new file mode 100644
index 0000000000000..4a886ac0597c1
--- /dev/null
+++ b/llvm/lib/ExecutionEngine/Orc/ExecutionUtils.cpp
@@ -0,0 +1,308 @@
+//===---- ExecutionUtils.cpp - Utilities for executing functions in Orc ---===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/ExecutionUtils.h"
+
+#include "llvm/ExecutionEngine/Orc/Layer.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/GlobalVariable.h"
+#include "llvm/IR/Module.h"
+#include "llvm/Support/TargetRegistry.h"
+#include "llvm/Target/TargetMachine.h"
+
+namespace llvm {
+namespace orc {
+
+CtorDtorIterator::CtorDtorIterator(const GlobalVariable *GV, bool End)
+ : InitList(
+ GV ? dyn_cast_or_null<ConstantArray>(GV->getInitializer()) : nullptr),
+ I((InitList && End) ? InitList->getNumOperands() : 0) {
+}
+
+bool CtorDtorIterator::operator==(const CtorDtorIterator &Other) const {
+ assert(InitList == Other.InitList && "Incomparable iterators.");
+ return I == Other.I;
+}
+
+bool CtorDtorIterator::operator!=(const CtorDtorIterator &Other) const {
+ return !(*this == Other);
+}
+
+CtorDtorIterator& CtorDtorIterator::operator++() {
+ ++I;
+ return *this;
+}
+
+CtorDtorIterator CtorDtorIterator::operator++(int) {
+ CtorDtorIterator Temp = *this;
+ ++I;
+ return Temp;
+}
+
+CtorDtorIterator::Element CtorDtorIterator::operator*() const {
+ ConstantStruct *CS = dyn_cast<ConstantStruct>(InitList->getOperand(I));
+ assert(CS && "Unrecognized type in llvm.global_ctors/llvm.global_dtors");
+
+ Constant *FuncC = CS->getOperand(1);
+ Function *Func = nullptr;
+
+ // Extract function pointer, pulling off any casts.
+ while (FuncC) {
+ if (Function *F = dyn_cast_or_null<Function>(FuncC)) {
+ Func = F;
+ break;
+ } else if (ConstantExpr *CE = dyn_cast_or_null<ConstantExpr>(FuncC)) {
+ if (CE->isCast())
+ FuncC = dyn_cast_or_null<ConstantExpr>(CE->getOperand(0));
+ else
+ break;
+ } else {
+ // This isn't anything we recognize. Bail out with Func left set to null.
+ break;
+ }
+ }
+
+ auto *Priority = cast<ConstantInt>(CS->getOperand(0));
+ Value *Data = CS->getNumOperands() == 3 ? CS->getOperand(2) : nullptr;
+ if (Data && !isa<GlobalValue>(Data))
+ Data = nullptr;
+ return Element(Priority->getZExtValue(), Func, Data);
+}
+
+iterator_range<CtorDtorIterator> getConstructors(const Module &M) {
+ const GlobalVariable *CtorsList = M.getNamedGlobal("llvm.global_ctors");
+ return make_range(CtorDtorIterator(CtorsList, false),
+ CtorDtorIterator(CtorsList, true));
+}
+
+iterator_range<CtorDtorIterator> getDestructors(const Module &M) {
+ const GlobalVariable *DtorsList = M.getNamedGlobal("llvm.global_dtors");
+ return make_range(CtorDtorIterator(DtorsList, false),
+ CtorDtorIterator(DtorsList, true));
+}
+
+void CtorDtorRunner::add(iterator_range<CtorDtorIterator> CtorDtors) {
+ if (CtorDtors.empty())
+ return;
+
+ MangleAndInterner Mangle(
+ JD.getExecutionSession(),
+ (*CtorDtors.begin()).Func->getParent()->getDataLayout());
+
+ for (const auto &CtorDtor : CtorDtors) {
+ assert(CtorDtor.Func && CtorDtor.Func->hasName() &&
+ "Ctor/Dtor function must be named to be runnable under the JIT");
+
+ // FIXME: Maybe use a symbol promoter here instead.
+ if (CtorDtor.Func->hasLocalLinkage()) {
+ CtorDtor.Func->setLinkage(GlobalValue::ExternalLinkage);
+ CtorDtor.Func->setVisibility(GlobalValue::HiddenVisibility);
+ }
+
+ if (CtorDtor.Data && cast<GlobalValue>(CtorDtor.Data)->isDeclaration()) {
+ dbgs() << " Skipping because why now?\n";
+ continue;
+ }
+
+ CtorDtorsByPriority[CtorDtor.Priority].push_back(
+ Mangle(CtorDtor.Func->getName()));
+ }
+}
+
+Error CtorDtorRunner::run() {
+ using CtorDtorTy = void (*)();
+
+ SymbolNameSet Names;
+
+ for (auto &KV : CtorDtorsByPriority) {
+ for (auto &Name : KV.second) {
+ auto Added = Names.insert(Name).second;
+ (void)Added;
+ assert(Added && "Ctor/Dtor names clashed");
+ }
+ }
+
+ auto &ES = JD.getExecutionSession();
+ if (auto CtorDtorMap =
+ ES.lookup(JITDylibSearchList({{&JD, true}}), std::move(Names))) {
+ for (auto &KV : CtorDtorsByPriority) {
+ for (auto &Name : KV.second) {
+ assert(CtorDtorMap->count(Name) && "No entry for Name");
+ auto CtorDtor = reinterpret_cast<CtorDtorTy>(
+ static_cast<uintptr_t>((*CtorDtorMap)[Name].getAddress()));
+ CtorDtor();
+ }
+ }
+ CtorDtorsByPriority.clear();
+ return Error::success();
+ } else
+ return CtorDtorMap.takeError();
+}
+
+void LocalCXXRuntimeOverridesBase::runDestructors() {
+ auto& CXXDestructorDataPairs = DSOHandleOverride;
+ for (auto &P : CXXDestructorDataPairs)
+ P.first(P.second);
+ CXXDestructorDataPairs.clear();
+}
+
+int LocalCXXRuntimeOverridesBase::CXAAtExitOverride(DestructorPtr Destructor,
+ void *Arg,
+ void *DSOHandle) {
+ auto& CXXDestructorDataPairs =
+ *reinterpret_cast<CXXDestructorDataPairList*>(DSOHandle);
+ CXXDestructorDataPairs.push_back(std::make_pair(Destructor, Arg));
+ return 0;
+}
+
+Error LocalCXXRuntimeOverrides::enable(JITDylib &JD,
+ MangleAndInterner &Mangle) {
+ SymbolMap RuntimeInterposes;
+ RuntimeInterposes[Mangle("__dso_handle")] =
+ JITEvaluatedSymbol(toTargetAddress(&DSOHandleOverride),
+ JITSymbolFlags::Exported);
+ RuntimeInterposes[Mangle("__cxa_atexit")] =
+ JITEvaluatedSymbol(toTargetAddress(&CXAAtExitOverride),
+ JITSymbolFlags::Exported);
+
+ return JD.define(absoluteSymbols(std::move(RuntimeInterposes)));
+}
+
+DynamicLibrarySearchGenerator::DynamicLibrarySearchGenerator(
+ sys::DynamicLibrary Dylib, char GlobalPrefix, SymbolPredicate Allow)
+ : Dylib(std::move(Dylib)), Allow(std::move(Allow)),
+ GlobalPrefix(GlobalPrefix) {}
+
+Expected<std::unique_ptr<DynamicLibrarySearchGenerator>>
+DynamicLibrarySearchGenerator::Load(const char *FileName, char GlobalPrefix,
+ SymbolPredicate Allow) {
+ std::string ErrMsg;
+ auto Lib = sys::DynamicLibrary::getPermanentLibrary(FileName, &ErrMsg);
+ if (!Lib.isValid())
+ return make_error<StringError>(std::move(ErrMsg), inconvertibleErrorCode());
+ return std::make_unique<DynamicLibrarySearchGenerator>(
+ std::move(Lib), GlobalPrefix, std::move(Allow));
+}
+
+Expected<SymbolNameSet>
+DynamicLibrarySearchGenerator::tryToGenerate(JITDylib &JD,
+ const SymbolNameSet &Names) {
+ orc::SymbolNameSet Added;
+ orc::SymbolMap NewSymbols;
+
+ bool HasGlobalPrefix = (GlobalPrefix != '\0');
+
+ for (auto &Name : Names) {
+ if ((*Name).empty())
+ continue;
+
+ if (Allow && !Allow(Name))
+ continue;
+
+ if (HasGlobalPrefix && (*Name).front() != GlobalPrefix)
+ continue;
+
+ std::string Tmp((*Name).data() + HasGlobalPrefix,
+ (*Name).size() - HasGlobalPrefix);
+ if (void *Addr = Dylib.getAddressOfSymbol(Tmp.c_str())) {
+ Added.insert(Name);
+ NewSymbols[Name] = JITEvaluatedSymbol(
+ static_cast<JITTargetAddress>(reinterpret_cast<uintptr_t>(Addr)),
+ JITSymbolFlags::Exported);
+ }
+ }
+
+ // Add any new symbols to JD. Since the generator is only called for symbols
+ // that are not already defined, this will never trigger a duplicate
+ // definition error, so we can wrap this call in a 'cantFail'.
+ if (!NewSymbols.empty())
+ cantFail(JD.define(absoluteSymbols(std::move(NewSymbols))));
+
+ return Added;
+}
+
+Expected<std::unique_ptr<StaticLibraryDefinitionGenerator>>
+StaticLibraryDefinitionGenerator::Load(ObjectLayer &L, const char *FileName) {
+ auto ArchiveBuffer = errorOrToExpected(MemoryBuffer::getFile(FileName));
+
+ if (!ArchiveBuffer)
+ return ArchiveBuffer.takeError();
+
+ return Create(L, std::move(*ArchiveBuffer));
+}
+
+Expected<std::unique_ptr<StaticLibraryDefinitionGenerator>>
+StaticLibraryDefinitionGenerator::Create(
+ ObjectLayer &L, std::unique_ptr<MemoryBuffer> ArchiveBuffer) {
+ Error Err = Error::success();
+
+ std::unique_ptr<StaticLibraryDefinitionGenerator> ADG(
+ new StaticLibraryDefinitionGenerator(L, std::move(ArchiveBuffer), Err));
+
+ if (Err)
+ return std::move(Err);
+
+ return std::move(ADG);
+}
+
+Expected<SymbolNameSet>
+StaticLibraryDefinitionGenerator::tryToGenerate(JITDylib &JD,
+ const SymbolNameSet &Names) {
+
+ DenseSet<std::pair<StringRef, StringRef>> ChildBufferInfos;
+ SymbolNameSet NewDefs;
+
+ for (const auto &Name : Names) {
+ auto Child = Archive.findSym(*Name);
+ if (!Child)
+ return Child.takeError();
+ if (*Child == None)
+ continue;
+ auto ChildBuffer = (*Child)->getMemoryBufferRef();
+ if (!ChildBuffer)
+ return ChildBuffer.takeError();
+ ChildBufferInfos.insert(
+ {ChildBuffer->getBuffer(), ChildBuffer->getBufferIdentifier()});
+ NewDefs.insert(Name);
+ }
+
+ for (auto ChildBufferInfo : ChildBufferInfos) {
+ MemoryBufferRef ChildBufferRef(ChildBufferInfo.first,
+ ChildBufferInfo.second);
+
+ if (auto Err =
+ L.add(JD, MemoryBuffer::getMemBuffer(ChildBufferRef), VModuleKey()))
+ return std::move(Err);
+
+ --UnrealizedObjects;
+ }
+
+ return NewDefs;
+}
+
+StaticLibraryDefinitionGenerator::StaticLibraryDefinitionGenerator(
+ ObjectLayer &L, std::unique_ptr<MemoryBuffer> ArchiveBuffer, Error &Err)
+ : L(L), ArchiveBuffer(std::move(ArchiveBuffer)),
+ Archive(*this->ArchiveBuffer, Err) {
+
+ if (Err)
+ return;
+
+ Error Err2 = Error::success();
+ for (auto _ : Archive.children(Err2)) {
+ (void)_;
+ ++UnrealizedObjects;
+ }
+
+ // No need to check this: We will leave it to the caller.
+ Err = std::move(Err2);
+}
+
+} // End namespace orc.
+} // End namespace llvm.
diff --git a/llvm/lib/ExecutionEngine/Orc/IRCompileLayer.cpp b/llvm/lib/ExecutionEngine/Orc/IRCompileLayer.cpp
new file mode 100644
index 0000000000000..d311f34179c7c
--- /dev/null
+++ b/llvm/lib/ExecutionEngine/Orc/IRCompileLayer.cpp
@@ -0,0 +1,43 @@
+//===--------------- IRCompileLayer.cpp - IR Compiling Layer --------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/IRCompileLayer.h"
+
+namespace llvm {
+namespace orc {
+
+IRCompileLayer::IRCompileLayer(ExecutionSession &ES, ObjectLayer &BaseLayer,
+ CompileFunction Compile)
+ : IRLayer(ES), BaseLayer(BaseLayer), Compile(std::move(Compile)) {}
+
+void IRCompileLayer::setNotifyCompiled(NotifyCompiledFunction NotifyCompiled) {
+ std::lock_guard<std::mutex> Lock(IRLayerMutex);
+ this->NotifyCompiled = std::move(NotifyCompiled);
+}
+
+void IRCompileLayer::emit(MaterializationResponsibility R,
+ ThreadSafeModule TSM) {
+ assert(TSM && "Module must not be null");
+
+ if (auto Obj = TSM.withModuleDo(Compile)) {
+ {
+ std::lock_guard<std::mutex> Lock(IRLayerMutex);
+ if (NotifyCompiled)
+ NotifyCompiled(R.getVModuleKey(), std::move(TSM));
+ else
+ TSM = ThreadSafeModule();
+ }
+ BaseLayer.emit(std::move(R), std::move(*Obj));
+ } else {
+ R.failMaterialization();
+ getExecutionSession().reportError(Obj.takeError());
+ }
+}
+
+} // End namespace orc.
+} // End namespace llvm.
diff --git a/llvm/lib/ExecutionEngine/Orc/IRTransformLayer.cpp b/llvm/lib/ExecutionEngine/Orc/IRTransformLayer.cpp
new file mode 100644
index 0000000000000..845ecc71eb870
--- /dev/null
+++ b/llvm/lib/ExecutionEngine/Orc/IRTransformLayer.cpp
@@ -0,0 +1,33 @@
+//===-------------- IRTransformLayer.cpp - IR Transform Layer -------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/IRTransformLayer.h"
+#include "llvm/Support/MemoryBuffer.h"
+
+namespace llvm {
+namespace orc {
+
+IRTransformLayer::IRTransformLayer(ExecutionSession &ES,
+ IRLayer &BaseLayer,
+ TransformFunction Transform)
+ : IRLayer(ES), BaseLayer(BaseLayer), Transform(std::move(Transform)) {}
+
+void IRTransformLayer::emit(MaterializationResponsibility R,
+ ThreadSafeModule TSM) {
+ assert(TSM && "Module must not be null");
+
+ if (auto TransformedTSM = Transform(std::move(TSM), R))
+ BaseLayer.emit(std::move(R), std::move(*TransformedTSM));
+ else {
+ R.failMaterialization();
+ getExecutionSession().reportError(TransformedTSM.takeError());
+ }
+}
+
+} // End namespace orc.
+} // End namespace llvm.
diff --git a/llvm/lib/ExecutionEngine/Orc/IndirectionUtils.cpp b/llvm/lib/ExecutionEngine/Orc/IndirectionUtils.cpp
new file mode 100644
index 0000000000000..0295db7633dd0
--- /dev/null
+++ b/llvm/lib/ExecutionEngine/Orc/IndirectionUtils.cpp
@@ -0,0 +1,374 @@
+//===---- IndirectionUtils.cpp - Utilities for call indirection in Orc ----===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/IndirectionUtils.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/Triple.h"
+#include "llvm/ExecutionEngine/Orc/OrcABISupport.h"
+#include "llvm/IR/CallSite.h"
+#include "llvm/IR/IRBuilder.h"
+#include "llvm/Support/Format.h"
+#include "llvm/Transforms/Utils/Cloning.h"
+#include <sstream>
+
+using namespace llvm;
+using namespace llvm::orc;
+
+namespace {
+
+class CompileCallbackMaterializationUnit : public orc::MaterializationUnit {
+public:
+ using CompileFunction = JITCompileCallbackManager::CompileFunction;
+
+ CompileCallbackMaterializationUnit(SymbolStringPtr Name,
+ CompileFunction Compile, VModuleKey K)
+ : MaterializationUnit(SymbolFlagsMap({{Name, JITSymbolFlags::Exported}}),
+ std::move(K)),
+ Name(std::move(Name)), Compile(std::move(Compile)) {}
+
+ StringRef getName() const override { return "<Compile Callbacks>"; }
+
+private:
+ void materialize(MaterializationResponsibility R) override {
+ SymbolMap Result;
+ Result[Name] = JITEvaluatedSymbol(Compile(), JITSymbolFlags::Exported);
+ // No dependencies, so these calls cannot fail.
+ cantFail(R.notifyResolved(Result));
+ cantFail(R.notifyEmitted());
+ }
+
+ void discard(const JITDylib &JD, const SymbolStringPtr &Name) override {
+ llvm_unreachable("Discard should never occur on a LMU?");
+ }
+
+ SymbolStringPtr Name;
+ CompileFunction Compile;
+};
+
+} // namespace
+
+namespace llvm {
+namespace orc {
+
+void IndirectStubsManager::anchor() {}
+void TrampolinePool::anchor() {}
+
+Expected<JITTargetAddress>
+JITCompileCallbackManager::getCompileCallback(CompileFunction Compile) {
+ if (auto TrampolineAddr = TP->getTrampoline()) {
+ auto CallbackName =
+ ES.intern(std::string("cc") + std::to_string(++NextCallbackId));
+
+ std::lock_guard<std::mutex> Lock(CCMgrMutex);
+ AddrToSymbol[*TrampolineAddr] = CallbackName;
+ cantFail(CallbacksJD.define(
+ std::make_unique<CompileCallbackMaterializationUnit>(
+ std::move(CallbackName), std::move(Compile),
+ ES.allocateVModule())));
+ return *TrampolineAddr;
+ } else
+ return TrampolineAddr.takeError();
+}
+
+JITTargetAddress JITCompileCallbackManager::executeCompileCallback(
+ JITTargetAddress TrampolineAddr) {
+ SymbolStringPtr Name;
+
+ {
+ std::unique_lock<std::mutex> Lock(CCMgrMutex);
+ auto I = AddrToSymbol.find(TrampolineAddr);
+
+ // If this address is not associated with a compile callback then report an
+ // error to the execution session and return ErrorHandlerAddress to the
+ // callee.
+ if (I == AddrToSymbol.end()) {
+ Lock.unlock();
+ std::string ErrMsg;
+ {
+ raw_string_ostream ErrMsgStream(ErrMsg);
+ ErrMsgStream << "No compile callback for trampoline at "
+ << format("0x%016" PRIx64, TrampolineAddr);
+ }
+ ES.reportError(
+ make_error<StringError>(std::move(ErrMsg), inconvertibleErrorCode()));
+ return ErrorHandlerAddress;
+ } else
+ Name = I->second;
+ }
+
+ if (auto Sym = ES.lookup(JITDylibSearchList({{&CallbacksJD, true}}), Name))
+ return Sym->getAddress();
+ else {
+ llvm::dbgs() << "Didn't find callback.\n";
+ // If anything goes wrong materializing Sym then report it to the session
+ // and return the ErrorHandlerAddress;
+ ES.reportError(Sym.takeError());
+ return ErrorHandlerAddress;
+ }
+}
+
+Expected<std::unique_ptr<JITCompileCallbackManager>>
+createLocalCompileCallbackManager(const Triple &T, ExecutionSession &ES,
+ JITTargetAddress ErrorHandlerAddress) {
+ switch (T.getArch()) {
+ default:
+ return make_error<StringError>(
+ std::string("No callback manager available for ") + T.str(),
+ inconvertibleErrorCode());
+ case Triple::aarch64:
+ case Triple::aarch64_32: {
+ typedef orc::LocalJITCompileCallbackManager<orc::OrcAArch64> CCMgrT;
+ return CCMgrT::Create(ES, ErrorHandlerAddress);
+ }
+
+ case Triple::x86: {
+ typedef orc::LocalJITCompileCallbackManager<orc::OrcI386> CCMgrT;
+ return CCMgrT::Create(ES, ErrorHandlerAddress);
+ }
+
+ case Triple::mips: {
+ typedef orc::LocalJITCompileCallbackManager<orc::OrcMips32Be> CCMgrT;
+ return CCMgrT::Create(ES, ErrorHandlerAddress);
+ }
+ case Triple::mipsel: {
+ typedef orc::LocalJITCompileCallbackManager<orc::OrcMips32Le> CCMgrT;
+ return CCMgrT::Create(ES, ErrorHandlerAddress);
+ }
+
+ case Triple::mips64:
+ case Triple::mips64el: {
+ typedef orc::LocalJITCompileCallbackManager<orc::OrcMips64> CCMgrT;
+ return CCMgrT::Create(ES, ErrorHandlerAddress);
+ }
+
+ case Triple::x86_64: {
+ if ( T.getOS() == Triple::OSType::Win32 ) {
+ typedef orc::LocalJITCompileCallbackManager<orc::OrcX86_64_Win32> CCMgrT;
+ return CCMgrT::Create(ES, ErrorHandlerAddress);
+ } else {
+ typedef orc::LocalJITCompileCallbackManager<orc::OrcX86_64_SysV> CCMgrT;
+ return CCMgrT::Create(ES, ErrorHandlerAddress);
+ }
+ }
+
+ }
+}
+
+std::function<std::unique_ptr<IndirectStubsManager>()>
+createLocalIndirectStubsManagerBuilder(const Triple &T) {
+ switch (T.getArch()) {
+ default:
+ return [](){
+ return std::make_unique<
+ orc::LocalIndirectStubsManager<orc::OrcGenericABI>>();
+ };
+
+ case Triple::aarch64:
+ case Triple::aarch64_32:
+ return [](){
+ return std::make_unique<
+ orc::LocalIndirectStubsManager<orc::OrcAArch64>>();
+ };
+
+ case Triple::x86:
+ return [](){
+ return std::make_unique<
+ orc::LocalIndirectStubsManager<orc::OrcI386>>();
+ };
+
+ case Triple::mips:
+ return [](){
+ return std::make_unique<
+ orc::LocalIndirectStubsManager<orc::OrcMips32Be>>();
+ };
+
+ case Triple::mipsel:
+ return [](){
+ return std::make_unique<
+ orc::LocalIndirectStubsManager<orc::OrcMips32Le>>();
+ };
+
+ case Triple::mips64:
+ case Triple::mips64el:
+ return [](){
+ return std::make_unique<
+ orc::LocalIndirectStubsManager<orc::OrcMips64>>();
+ };
+
+ case Triple::x86_64:
+ if (T.getOS() == Triple::OSType::Win32) {
+ return [](){
+ return std::make_unique<
+ orc::LocalIndirectStubsManager<orc::OrcX86_64_Win32>>();
+ };
+ } else {
+ return [](){
+ return std::make_unique<
+ orc::LocalIndirectStubsManager<orc::OrcX86_64_SysV>>();
+ };
+ }
+
+ }
+}
+
+Constant* createIRTypedAddress(FunctionType &FT, JITTargetAddress Addr) {
+ Constant *AddrIntVal =
+ ConstantInt::get(Type::getInt64Ty(FT.getContext()), Addr);
+ Constant *AddrPtrVal =
+ ConstantExpr::getCast(Instruction::IntToPtr, AddrIntVal,
+ PointerType::get(&FT, 0));
+ return AddrPtrVal;
+}
+
+GlobalVariable* createImplPointer(PointerType &PT, Module &M,
+ const Twine &Name, Constant *Initializer) {
+ auto IP = new GlobalVariable(M, &PT, false, GlobalValue::ExternalLinkage,
+ Initializer, Name, nullptr,
+ GlobalValue::NotThreadLocal, 0, true);
+ IP->setVisibility(GlobalValue::HiddenVisibility);
+ return IP;
+}
+
+void makeStub(Function &F, Value &ImplPointer) {
+ assert(F.isDeclaration() && "Can't turn a definition into a stub.");
+ assert(F.getParent() && "Function isn't in a module.");
+ Module &M = *F.getParent();
+ BasicBlock *EntryBlock = BasicBlock::Create(M.getContext(), "entry", &F);
+ IRBuilder<> Builder(EntryBlock);
+ LoadInst *ImplAddr = Builder.CreateLoad(F.getType(), &ImplPointer);
+ std::vector<Value*> CallArgs;
+ for (auto &A : F.args())
+ CallArgs.push_back(&A);
+ CallInst *Call = Builder.CreateCall(F.getFunctionType(), ImplAddr, CallArgs);
+ Call->setTailCall();
+ Call->setAttributes(F.getAttributes());
+ if (F.getReturnType()->isVoidTy())
+ Builder.CreateRetVoid();
+ else
+ Builder.CreateRet(Call);
+}
+
+std::vector<GlobalValue *> SymbolLinkagePromoter::operator()(Module &M) {
+ std::vector<GlobalValue *> PromotedGlobals;
+
+ for (auto &GV : M.global_values()) {
+ bool Promoted = true;
+
+ // Rename if necessary.
+ if (!GV.hasName())
+ GV.setName("__orc_anon." + Twine(NextId++));
+ else if (GV.getName().startswith("\01L"))
+ GV.setName("__" + GV.getName().substr(1) + "." + Twine(NextId++));
+ else if (GV.hasLocalLinkage())
+ GV.setName("__orc_lcl." + GV.getName() + "." + Twine(NextId++));
+ else
+ Promoted = false;
+
+ if (GV.hasLocalLinkage()) {
+ GV.setLinkage(GlobalValue::ExternalLinkage);
+ GV.setVisibility(GlobalValue::HiddenVisibility);
+ Promoted = true;
+ }
+ GV.setUnnamedAddr(GlobalValue::UnnamedAddr::None);
+
+ if (Promoted)
+ PromotedGlobals.push_back(&GV);
+ }
+
+ return PromotedGlobals;
+}
+
+Function* cloneFunctionDecl(Module &Dst, const Function &F,
+ ValueToValueMapTy *VMap) {
+ Function *NewF =
+ Function::Create(cast<FunctionType>(F.getValueType()),
+ F.getLinkage(), F.getName(), &Dst);
+ NewF->copyAttributesFrom(&F);
+
+ if (VMap) {
+ (*VMap)[&F] = NewF;
+ auto NewArgI = NewF->arg_begin();
+ for (auto ArgI = F.arg_begin(), ArgE = F.arg_end(); ArgI != ArgE;
+ ++ArgI, ++NewArgI)
+ (*VMap)[&*ArgI] = &*NewArgI;
+ }
+
+ return NewF;
+}
+
+void moveFunctionBody(Function &OrigF, ValueToValueMapTy &VMap,
+ ValueMaterializer *Materializer,
+ Function *NewF) {
+ assert(!OrigF.isDeclaration() && "Nothing to move");
+ if (!NewF)
+ NewF = cast<Function>(VMap[&OrigF]);
+ else
+ assert(VMap[&OrigF] == NewF && "Incorrect function mapping in VMap.");
+ assert(NewF && "Function mapping missing from VMap.");
+ assert(NewF->getParent() != OrigF.getParent() &&
+ "moveFunctionBody should only be used to move bodies between "
+ "modules.");
+
+ SmallVector<ReturnInst *, 8> Returns; // Ignore returns cloned.
+ CloneFunctionInto(NewF, &OrigF, VMap, /*ModuleLevelChanges=*/true, Returns,
+ "", nullptr, nullptr, Materializer);
+ OrigF.deleteBody();
+}
+
+GlobalVariable* cloneGlobalVariableDecl(Module &Dst, const GlobalVariable &GV,
+ ValueToValueMapTy *VMap) {
+ GlobalVariable *NewGV = new GlobalVariable(
+ Dst, GV.getValueType(), GV.isConstant(),
+ GV.getLinkage(), nullptr, GV.getName(), nullptr,
+ GV.getThreadLocalMode(), GV.getType()->getAddressSpace());
+ NewGV->copyAttributesFrom(&GV);
+ if (VMap)
+ (*VMap)[&GV] = NewGV;
+ return NewGV;
+}
+
+void moveGlobalVariableInitializer(GlobalVariable &OrigGV,
+ ValueToValueMapTy &VMap,
+ ValueMaterializer *Materializer,
+ GlobalVariable *NewGV) {
+ assert(OrigGV.hasInitializer() && "Nothing to move");
+ if (!NewGV)
+ NewGV = cast<GlobalVariable>(VMap[&OrigGV]);
+ else
+ assert(VMap[&OrigGV] == NewGV &&
+ "Incorrect global variable mapping in VMap.");
+ assert(NewGV->getParent() != OrigGV.getParent() &&
+ "moveGlobalVariableInitializer should only be used to move "
+ "initializers between modules");
+
+ NewGV->setInitializer(MapValue(OrigGV.getInitializer(), VMap, RF_None,
+ nullptr, Materializer));
+}
+
+GlobalAlias* cloneGlobalAliasDecl(Module &Dst, const GlobalAlias &OrigA,
+ ValueToValueMapTy &VMap) {
+ assert(OrigA.getAliasee() && "Original alias doesn't have an aliasee?");
+ auto *NewA = GlobalAlias::create(OrigA.getValueType(),
+ OrigA.getType()->getPointerAddressSpace(),
+ OrigA.getLinkage(), OrigA.getName(), &Dst);
+ NewA->copyAttributesFrom(&OrigA);
+ VMap[&OrigA] = NewA;
+ return NewA;
+}
+
+void cloneModuleFlagsMetadata(Module &Dst, const Module &Src,
+ ValueToValueMapTy &VMap) {
+ auto *MFs = Src.getModuleFlagsMetadata();
+ if (!MFs)
+ return;
+ for (auto *MF : MFs->operands())
+ Dst.addModuleFlag(MapMetadata(MF, VMap));
+}
+
+} // End namespace orc.
+} // End namespace llvm.
diff --git a/llvm/lib/ExecutionEngine/Orc/JITTargetMachineBuilder.cpp b/llvm/lib/ExecutionEngine/Orc/JITTargetMachineBuilder.cpp
new file mode 100644
index 0000000000000..1d3e6db913e21
--- /dev/null
+++ b/llvm/lib/ExecutionEngine/Orc/JITTargetMachineBuilder.cpp
@@ -0,0 +1,69 @@
+//===----- JITTargetMachineBuilder.cpp - Build TargetMachines for JIT -----===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/JITTargetMachineBuilder.h"
+
+#include "llvm/Support/Host.h"
+#include "llvm/Support/TargetRegistry.h"
+
+namespace llvm {
+namespace orc {
+
+JITTargetMachineBuilder::JITTargetMachineBuilder(Triple TT)
+ : TT(std::move(TT)) {
+ Options.EmulatedTLS = true;
+ Options.ExplicitEmulatedTLS = true;
+}
+
+Expected<JITTargetMachineBuilder> JITTargetMachineBuilder::detectHost() {
+ // FIXME: getProcessTriple is bogus. It returns the host LLVM was compiled on,
+ // rather than a valid triple for the current process.
+ JITTargetMachineBuilder TMBuilder((Triple(sys::getProcessTriple())));
+
+ // Retrieve host CPU name and sub-target features and add them to builder.
+ // Relocation model, code model and codegen opt level are kept to default
+ // values.
+ llvm::SubtargetFeatures SubtargetFeatures;
+ llvm::StringMap<bool> FeatureMap;
+ llvm::sys::getHostCPUFeatures(FeatureMap);
+ for (auto &Feature : FeatureMap)
+ SubtargetFeatures.AddFeature(Feature.first(), Feature.second);
+
+ TMBuilder.setCPU(llvm::sys::getHostCPUName());
+ TMBuilder.addFeatures(SubtargetFeatures.getFeatures());
+
+ return TMBuilder;
+}
+
+Expected<std::unique_ptr<TargetMachine>>
+JITTargetMachineBuilder::createTargetMachine() {
+
+ std::string ErrMsg;
+ auto *TheTarget = TargetRegistry::lookupTarget(TT.getTriple(), ErrMsg);
+ if (!TheTarget)
+ return make_error<StringError>(std::move(ErrMsg), inconvertibleErrorCode());
+
+ auto *TM =
+ TheTarget->createTargetMachine(TT.getTriple(), CPU, Features.getString(),
+ Options, RM, CM, OptLevel, /*JIT*/ true);
+ if (!TM)
+ return make_error<StringError>("Could not allocate target machine",
+ inconvertibleErrorCode());
+
+ return std::unique_ptr<TargetMachine>(TM);
+}
+
+JITTargetMachineBuilder &JITTargetMachineBuilder::addFeatures(
+ const std::vector<std::string> &FeatureVec) {
+ for (const auto &F : FeatureVec)
+ Features.AddFeature(F);
+ return *this;
+}
+
+} // End namespace orc.
+} // End namespace llvm.
diff --git a/llvm/lib/ExecutionEngine/Orc/LLJIT.cpp b/llvm/lib/ExecutionEngine/Orc/LLJIT.cpp
new file mode 100644
index 0000000000000..a80f78afe80f9
--- /dev/null
+++ b/llvm/lib/ExecutionEngine/Orc/LLJIT.cpp
@@ -0,0 +1,240 @@
+//===--------- LLJIT.cpp - An ORC-based JIT for compiling LLVM IR ---------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/LLJIT.h"
+#include "llvm/ExecutionEngine/Orc/OrcError.h"
+#include "llvm/ExecutionEngine/Orc/RTDyldObjectLinkingLayer.h"
+#include "llvm/ExecutionEngine/SectionMemoryManager.h"
+#include "llvm/IR/Mangler.h"
+
+namespace llvm {
+namespace orc {
+
+Error LLJITBuilderState::prepareForConstruction() {
+
+ if (!JTMB) {
+ if (auto JTMBOrErr = JITTargetMachineBuilder::detectHost())
+ JTMB = std::move(*JTMBOrErr);
+ else
+ return JTMBOrErr.takeError();
+ }
+
+ return Error::success();
+}
+
+LLJIT::~LLJIT() {
+ if (CompileThreads)
+ CompileThreads->wait();
+}
+
+Error LLJIT::defineAbsolute(StringRef Name, JITEvaluatedSymbol Sym) {
+ auto InternedName = ES->intern(Name);
+ SymbolMap Symbols({{InternedName, Sym}});
+ return Main.define(absoluteSymbols(std::move(Symbols)));
+}
+
+Error LLJIT::addIRModule(JITDylib &JD, ThreadSafeModule TSM) {
+ assert(TSM && "Can not add null module");
+
+ if (auto Err =
+ TSM.withModuleDo([&](Module &M) { return applyDataLayout(M); }))
+ return Err;
+
+ return CompileLayer->add(JD, std::move(TSM), ES->allocateVModule());
+}
+
+Error LLJIT::addObjectFile(JITDylib &JD, std::unique_ptr<MemoryBuffer> Obj) {
+ assert(Obj && "Can not add null object");
+
+ return ObjLinkingLayer->add(JD, std::move(Obj), ES->allocateVModule());
+}
+
+Expected<JITEvaluatedSymbol> LLJIT::lookupLinkerMangled(JITDylib &JD,
+ StringRef Name) {
+ return ES->lookup(JITDylibSearchList({{&JD, true}}), ES->intern(Name));
+}
+
+std::unique_ptr<ObjectLayer>
+LLJIT::createObjectLinkingLayer(LLJITBuilderState &S, ExecutionSession &ES) {
+
+ // If the config state provided an ObjectLinkingLayer factory then use it.
+ if (S.CreateObjectLinkingLayer)
+ return S.CreateObjectLinkingLayer(ES, S.JTMB->getTargetTriple());
+
+ // Otherwise default to creating an RTDyldObjectLinkingLayer that constructs
+ // a new SectionMemoryManager for each object.
+ auto GetMemMgr = []() { return std::make_unique<SectionMemoryManager>(); };
+ auto ObjLinkingLayer =
+ std::make_unique<RTDyldObjectLinkingLayer>(ES, std::move(GetMemMgr));
+
+ if (S.JTMB->getTargetTriple().isOSBinFormatCOFF())
+ ObjLinkingLayer->setOverrideObjectFlagsWithResponsibilityFlags(true);
+
+ // FIXME: Explicit conversion to std::unique_ptr<ObjectLayer> added to silence
+ // errors from some GCC / libstdc++ bots. Remove this conversion (i.e.
+ // just return ObjLinkingLayer) once those bots are upgraded.
+ return std::unique_ptr<ObjectLayer>(std::move(ObjLinkingLayer));
+}
+
+Expected<IRCompileLayer::CompileFunction>
+LLJIT::createCompileFunction(LLJITBuilderState &S,
+ JITTargetMachineBuilder JTMB) {
+
+ /// If there is a custom compile function creator set then use it.
+ if (S.CreateCompileFunction)
+ return S.CreateCompileFunction(std::move(JTMB));
+
+ // Otherwise default to creating a SimpleCompiler, or ConcurrentIRCompiler,
+ // depending on the number of threads requested.
+ if (S.NumCompileThreads > 0)
+ return ConcurrentIRCompiler(std::move(JTMB));
+
+ auto TM = JTMB.createTargetMachine();
+ if (!TM)
+ return TM.takeError();
+
+ return TMOwningSimpleCompiler(std::move(*TM));
+}
+
+LLJIT::LLJIT(LLJITBuilderState &S, Error &Err)
+ : ES(S.ES ? std::move(S.ES) : std::make_unique<ExecutionSession>()),
+ Main(this->ES->getMainJITDylib()), DL(""), CtorRunner(Main),
+ DtorRunner(Main) {
+
+ ErrorAsOutParameter _(&Err);
+
+ ObjLinkingLayer = createObjectLinkingLayer(S, *ES);
+
+ if (auto DLOrErr = S.JTMB->getDefaultDataLayoutForTarget())
+ DL = std::move(*DLOrErr);
+ else {
+ Err = DLOrErr.takeError();
+ return;
+ }
+
+ {
+ auto CompileFunction = createCompileFunction(S, std::move(*S.JTMB));
+ if (!CompileFunction) {
+ Err = CompileFunction.takeError();
+ return;
+ }
+ CompileLayer = std::make_unique<IRCompileLayer>(
+ *ES, *ObjLinkingLayer, std::move(*CompileFunction));
+ }
+
+ if (S.NumCompileThreads > 0) {
+ CompileLayer->setCloneToNewContextOnEmit(true);
+ CompileThreads = std::make_unique<ThreadPool>(S.NumCompileThreads);
+ ES->setDispatchMaterialization(
+ [this](JITDylib &JD, std::unique_ptr<MaterializationUnit> MU) {
+ // FIXME: Switch to move capture once we have c++14.
+ auto SharedMU = std::shared_ptr<MaterializationUnit>(std::move(MU));
+ auto Work = [SharedMU, &JD]() { SharedMU->doMaterialize(JD); };
+ CompileThreads->async(std::move(Work));
+ });
+ }
+}
+
+std::string LLJIT::mangle(StringRef UnmangledName) {
+ std::string MangledName;
+ {
+ raw_string_ostream MangledNameStream(MangledName);
+ Mangler::getNameWithPrefix(MangledNameStream, UnmangledName, DL);
+ }
+ return MangledName;
+}
+
+Error LLJIT::applyDataLayout(Module &M) {
+ if (M.getDataLayout().isDefault())
+ M.setDataLayout(DL);
+
+ if (M.getDataLayout() != DL)
+ return make_error<StringError>(
+ "Added modules have incompatible data layouts",
+ inconvertibleErrorCode());
+
+ return Error::success();
+}
+
+void LLJIT::recordCtorDtors(Module &M) {
+ CtorRunner.add(getConstructors(M));
+ DtorRunner.add(getDestructors(M));
+}
+
+Error LLLazyJITBuilderState::prepareForConstruction() {
+ if (auto Err = LLJITBuilderState::prepareForConstruction())
+ return Err;
+ TT = JTMB->getTargetTriple();
+ return Error::success();
+}
+
+Error LLLazyJIT::addLazyIRModule(JITDylib &JD, ThreadSafeModule TSM) {
+ assert(TSM && "Can not add null module");
+
+ if (auto Err = TSM.withModuleDo([&](Module &M) -> Error {
+ if (auto Err = applyDataLayout(M))
+ return Err;
+
+ recordCtorDtors(M);
+ return Error::success();
+ }))
+ return Err;
+
+ return CODLayer->add(JD, std::move(TSM), ES->allocateVModule());
+}
+
+LLLazyJIT::LLLazyJIT(LLLazyJITBuilderState &S, Error &Err) : LLJIT(S, Err) {
+
+ // If LLJIT construction failed then bail out.
+ if (Err)
+ return;
+
+ ErrorAsOutParameter _(&Err);
+
+ /// Take/Create the lazy-compile callthrough manager.
+ if (S.LCTMgr)
+ LCTMgr = std::move(S.LCTMgr);
+ else {
+ if (auto LCTMgrOrErr = createLocalLazyCallThroughManager(
+ S.TT, *ES, S.LazyCompileFailureAddr))
+ LCTMgr = std::move(*LCTMgrOrErr);
+ else {
+ Err = LCTMgrOrErr.takeError();
+ return;
+ }
+ }
+
+ // Take/Create the indirect stubs manager builder.
+ auto ISMBuilder = std::move(S.ISMBuilder);
+
+ // If none was provided, try to build one.
+ if (!ISMBuilder)
+ ISMBuilder = createLocalIndirectStubsManagerBuilder(S.TT);
+
+ // No luck. Bail out.
+ if (!ISMBuilder) {
+ Err = make_error<StringError>("Could not construct "
+ "IndirectStubsManagerBuilder for target " +
+ S.TT.str(),
+ inconvertibleErrorCode());
+ return;
+ }
+
+ // Create the transform layer.
+ TransformLayer = std::make_unique<IRTransformLayer>(*ES, *CompileLayer);
+
+ // Create the COD layer.
+ CODLayer = std::make_unique<CompileOnDemandLayer>(
+ *ES, *TransformLayer, *LCTMgr, std::move(ISMBuilder));
+
+ if (S.NumCompileThreads > 0)
+ CODLayer->setCloneToNewContextOnEmit(true);
+}
+
+} // End namespace orc.
+} // End namespace llvm.
diff --git a/llvm/lib/ExecutionEngine/Orc/Layer.cpp b/llvm/lib/ExecutionEngine/Orc/Layer.cpp
new file mode 100644
index 0000000000000..580e2682ec8c6
--- /dev/null
+++ b/llvm/lib/ExecutionEngine/Orc/Layer.cpp
@@ -0,0 +1,185 @@
+//===-------------------- Layer.cpp - Layer interfaces --------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/Layer.h"
+#include "llvm/Object/ObjectFile.h"
+#include "llvm/Support/Debug.h"
+
+#define DEBUG_TYPE "orc"
+
+namespace llvm {
+namespace orc {
+
+IRLayer::IRLayer(ExecutionSession &ES) : ES(ES) {}
+IRLayer::~IRLayer() {}
+
+Error IRLayer::add(JITDylib &JD, ThreadSafeModule TSM, VModuleKey K) {
+ return JD.define(std::make_unique<BasicIRLayerMaterializationUnit>(
+ *this, std::move(K), std::move(TSM)));
+}
+
+IRMaterializationUnit::IRMaterializationUnit(ExecutionSession &ES,
+ ThreadSafeModule TSM, VModuleKey K)
+ : MaterializationUnit(SymbolFlagsMap(), std::move(K)), TSM(std::move(TSM)) {
+
+ assert(this->TSM && "Module must not be null");
+
+ MangleAndInterner Mangle(ES, this->TSM.getModuleUnlocked()->getDataLayout());
+ this->TSM.withModuleDo([&](Module &M) {
+ for (auto &G : M.global_values()) {
+ if (G.hasName() && !G.isDeclaration() && !G.hasLocalLinkage() &&
+ !G.hasAvailableExternallyLinkage() && !G.hasAppendingLinkage()) {
+ auto MangledName = Mangle(G.getName());
+ SymbolFlags[MangledName] = JITSymbolFlags::fromGlobalValue(G);
+ SymbolToDefinition[MangledName] = &G;
+ }
+ }
+ });
+}
+
+IRMaterializationUnit::IRMaterializationUnit(
+ ThreadSafeModule TSM, VModuleKey K, SymbolFlagsMap SymbolFlags,
+ SymbolNameToDefinitionMap SymbolToDefinition)
+ : MaterializationUnit(std::move(SymbolFlags), std::move(K)),
+ TSM(std::move(TSM)), SymbolToDefinition(std::move(SymbolToDefinition)) {}
+
+StringRef IRMaterializationUnit::getName() const {
+ if (TSM)
+ return TSM.withModuleDo(
+ [](const Module &M) -> StringRef { return M.getModuleIdentifier(); });
+ return "<null module>";
+}
+
+void IRMaterializationUnit::discard(const JITDylib &JD,
+ const SymbolStringPtr &Name) {
+ LLVM_DEBUG(JD.getExecutionSession().runSessionLocked([&]() {
+ dbgs() << "In " << JD.getName() << " discarding " << *Name << " from MU@"
+ << this << " (" << getName() << ")\n";
+ }););
+
+ auto I = SymbolToDefinition.find(Name);
+ assert(I != SymbolToDefinition.end() &&
+ "Symbol not provided by this MU, or previously discarded");
+ assert(!I->second->isDeclaration() &&
+ "Discard should only apply to definitions");
+ I->second->setLinkage(GlobalValue::AvailableExternallyLinkage);
+ SymbolToDefinition.erase(I);
+}
+
+BasicIRLayerMaterializationUnit::BasicIRLayerMaterializationUnit(
+ IRLayer &L, VModuleKey K, ThreadSafeModule TSM)
+ : IRMaterializationUnit(L.getExecutionSession(), std::move(TSM),
+ std::move(K)),
+ L(L), K(std::move(K)) {}
+
+void BasicIRLayerMaterializationUnit::materialize(
+ MaterializationResponsibility R) {
+
+ // Throw away the SymbolToDefinition map: it's not usable after we hand
+ // off the module.
+ SymbolToDefinition.clear();
+
+ // If cloneToNewContextOnEmit is set, clone the module now.
+ if (L.getCloneToNewContextOnEmit())
+ TSM = cloneToNewContext(TSM);
+
+#ifndef NDEBUG
+ auto &ES = R.getTargetJITDylib().getExecutionSession();
+ auto &N = R.getTargetJITDylib().getName();
+#endif // NDEBUG
+
+ LLVM_DEBUG(ES.runSessionLocked(
+ [&]() { dbgs() << "Emitting, for " << N << ", " << *this << "\n"; }););
+ L.emit(std::move(R), std::move(TSM));
+ LLVM_DEBUG(ES.runSessionLocked([&]() {
+ dbgs() << "Finished emitting, for " << N << ", " << *this << "\n";
+ }););
+}
+
+ObjectLayer::ObjectLayer(ExecutionSession &ES) : ES(ES) {}
+
+ObjectLayer::~ObjectLayer() {}
+
+Error ObjectLayer::add(JITDylib &JD, std::unique_ptr<MemoryBuffer> O,
+ VModuleKey K) {
+ auto ObjMU = BasicObjectLayerMaterializationUnit::Create(*this, std::move(K),
+ std::move(O));
+ if (!ObjMU)
+ return ObjMU.takeError();
+ return JD.define(std::move(*ObjMU));
+}
+
+Expected<std::unique_ptr<BasicObjectLayerMaterializationUnit>>
+BasicObjectLayerMaterializationUnit::Create(ObjectLayer &L, VModuleKey K,
+ std::unique_ptr<MemoryBuffer> O) {
+ auto SymbolFlags =
+ getObjectSymbolFlags(L.getExecutionSession(), O->getMemBufferRef());
+
+ if (!SymbolFlags)
+ return SymbolFlags.takeError();
+
+ return std::unique_ptr<BasicObjectLayerMaterializationUnit>(
+ new BasicObjectLayerMaterializationUnit(L, K, std::move(O),
+ std::move(*SymbolFlags)));
+}
+
+BasicObjectLayerMaterializationUnit::BasicObjectLayerMaterializationUnit(
+ ObjectLayer &L, VModuleKey K, std::unique_ptr<MemoryBuffer> O,
+ SymbolFlagsMap SymbolFlags)
+ : MaterializationUnit(std::move(SymbolFlags), std::move(K)), L(L),
+ O(std::move(O)) {}
+
+StringRef BasicObjectLayerMaterializationUnit::getName() const {
+ if (O)
+ return O->getBufferIdentifier();
+ return "<null object>";
+}
+
+void BasicObjectLayerMaterializationUnit::materialize(
+ MaterializationResponsibility R) {
+ L.emit(std::move(R), std::move(O));
+}
+
+void BasicObjectLayerMaterializationUnit::discard(const JITDylib &JD,
+ const SymbolStringPtr &Name) {
+ // FIXME: Support object file level discard. This could be done by building a
+ // filter to pass to the object layer along with the object itself.
+}
+
+Expected<SymbolFlagsMap> getObjectSymbolFlags(ExecutionSession &ES,
+ MemoryBufferRef ObjBuffer) {
+ auto Obj = object::ObjectFile::createObjectFile(ObjBuffer);
+
+ if (!Obj)
+ return Obj.takeError();
+
+ SymbolFlagsMap SymbolFlags;
+ for (auto &Sym : (*Obj)->symbols()) {
+ // Skip symbols not defined in this object file.
+ if (Sym.getFlags() & object::BasicSymbolRef::SF_Undefined)
+ continue;
+
+ // Skip symbols that are not global.
+ if (!(Sym.getFlags() & object::BasicSymbolRef::SF_Global))
+ continue;
+
+ auto Name = Sym.getName();
+ if (!Name)
+ return Name.takeError();
+ auto InternedName = ES.intern(*Name);
+ auto SymFlags = JITSymbolFlags::fromObjectSymbol(Sym);
+ if (!SymFlags)
+ return SymFlags.takeError();
+ SymbolFlags[InternedName] = std::move(*SymFlags);
+ }
+
+ return SymbolFlags;
+}
+
+} // End namespace orc.
+} // End namespace llvm.
diff --git a/llvm/lib/ExecutionEngine/Orc/LazyReexports.cpp b/llvm/lib/ExecutionEngine/Orc/LazyReexports.cpp
new file mode 100644
index 0000000000000..93aabd817d601
--- /dev/null
+++ b/llvm/lib/ExecutionEngine/Orc/LazyReexports.cpp
@@ -0,0 +1,210 @@
+//===---------- LazyReexports.cpp - Utilities for lazy reexports ----------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/LazyReexports.h"
+
+#include "llvm/ADT/Triple.h"
+#include "llvm/ExecutionEngine/Orc/OrcABISupport.h"
+
+#define DEBUG_TYPE "orc"
+
+namespace llvm {
+namespace orc {
+
+void LazyCallThroughManager::NotifyResolvedFunction::anchor() {}
+
+LazyCallThroughManager::LazyCallThroughManager(
+ ExecutionSession &ES, JITTargetAddress ErrorHandlerAddr,
+ std::unique_ptr<TrampolinePool> TP)
+ : ES(ES), ErrorHandlerAddr(ErrorHandlerAddr), TP(std::move(TP)) {}
+
+Expected<JITTargetAddress> LazyCallThroughManager::getCallThroughTrampoline(
+ JITDylib &SourceJD, SymbolStringPtr SymbolName,
+ std::shared_ptr<NotifyResolvedFunction> NotifyResolved) {
+ std::lock_guard<std::mutex> Lock(LCTMMutex);
+ auto Trampoline = TP->getTrampoline();
+
+ if (!Trampoline)
+ return Trampoline.takeError();
+
+ Reexports[*Trampoline] = std::make_pair(&SourceJD, std::move(SymbolName));
+ Notifiers[*Trampoline] = std::move(NotifyResolved);
+ return *Trampoline;
+}
+
+JITTargetAddress
+LazyCallThroughManager::callThroughToSymbol(JITTargetAddress TrampolineAddr) {
+ JITDylib *SourceJD = nullptr;
+ SymbolStringPtr SymbolName;
+
+ {
+ std::lock_guard<std::mutex> Lock(LCTMMutex);
+ auto I = Reexports.find(TrampolineAddr);
+ if (I == Reexports.end())
+ return ErrorHandlerAddr;
+ SourceJD = I->second.first;
+ SymbolName = I->second.second;
+ }
+ auto LookupResult =
+ ES.lookup(JITDylibSearchList({{SourceJD, true}}), SymbolName);
+
+ if (!LookupResult) {
+ ES.reportError(LookupResult.takeError());
+ return ErrorHandlerAddr;
+ }
+
+ auto ResolvedAddr = LookupResult->getAddress();
+
+ std::shared_ptr<NotifyResolvedFunction> NotifyResolved = nullptr;
+ {
+ std::lock_guard<std::mutex> Lock(LCTMMutex);
+ auto I = Notifiers.find(TrampolineAddr);
+ if (I != Notifiers.end()) {
+ NotifyResolved = I->second;
+ Notifiers.erase(I);
+ }
+ }
+
+ if (NotifyResolved) {
+ if (auto Err = (*NotifyResolved)(*SourceJD, SymbolName, ResolvedAddr)) {
+ ES.reportError(std::move(Err));
+ return ErrorHandlerAddr;
+ }
+ }
+
+ return ResolvedAddr;
+}
+
+Expected<std::unique_ptr<LazyCallThroughManager>>
+createLocalLazyCallThroughManager(const Triple &T, ExecutionSession &ES,
+ JITTargetAddress ErrorHandlerAddr) {
+ switch (T.getArch()) {
+ default:
+ return make_error<StringError>(
+ std::string("No callback manager available for ") + T.str(),
+ inconvertibleErrorCode());
+
+ case Triple::aarch64:
+ case Triple::aarch64_32:
+ return LocalLazyCallThroughManager::Create<OrcAArch64>(ES,
+ ErrorHandlerAddr);
+
+ case Triple::x86:
+ return LocalLazyCallThroughManager::Create<OrcI386>(ES, ErrorHandlerAddr);
+
+ case Triple::mips:
+ return LocalLazyCallThroughManager::Create<OrcMips32Be>(ES,
+ ErrorHandlerAddr);
+
+ case Triple::mipsel:
+ return LocalLazyCallThroughManager::Create<OrcMips32Le>(ES,
+ ErrorHandlerAddr);
+
+ case Triple::mips64:
+ case Triple::mips64el:
+ return LocalLazyCallThroughManager::Create<OrcMips64>(ES, ErrorHandlerAddr);
+
+ case Triple::x86_64:
+ if (T.getOS() == Triple::OSType::Win32)
+ return LocalLazyCallThroughManager::Create<OrcX86_64_Win32>(
+ ES, ErrorHandlerAddr);
+ else
+ return LocalLazyCallThroughManager::Create<OrcX86_64_SysV>(
+ ES, ErrorHandlerAddr);
+ }
+}
+
+LazyReexportsMaterializationUnit::LazyReexportsMaterializationUnit(
+ LazyCallThroughManager &LCTManager, IndirectStubsManager &ISManager,
+ JITDylib &SourceJD, SymbolAliasMap CallableAliases, ImplSymbolMap *SrcJDLoc,
+ VModuleKey K)
+ : MaterializationUnit(extractFlags(CallableAliases), std::move(K)),
+ LCTManager(LCTManager), ISManager(ISManager), SourceJD(SourceJD),
+ CallableAliases(std::move(CallableAliases)),
+ NotifyResolved(LazyCallThroughManager::createNotifyResolvedFunction(
+ [&ISManager](JITDylib &JD, const SymbolStringPtr &SymbolName,
+ JITTargetAddress ResolvedAddr) {
+ return ISManager.updatePointer(*SymbolName, ResolvedAddr);
+ })),
+ AliaseeTable(SrcJDLoc) {}
+
+StringRef LazyReexportsMaterializationUnit::getName() const {
+ return "<Lazy Reexports>";
+}
+
+void LazyReexportsMaterializationUnit::materialize(
+ MaterializationResponsibility R) {
+ auto RequestedSymbols = R.getRequestedSymbols();
+
+ SymbolAliasMap RequestedAliases;
+ for (auto &RequestedSymbol : RequestedSymbols) {
+ auto I = CallableAliases.find(RequestedSymbol);
+ assert(I != CallableAliases.end() && "Symbol not found in alias map?");
+ RequestedAliases[I->first] = std::move(I->second);
+ CallableAliases.erase(I);
+ }
+
+ if (!CallableAliases.empty())
+ R.replace(lazyReexports(LCTManager, ISManager, SourceJD,
+ std::move(CallableAliases), AliaseeTable));
+
+ IndirectStubsManager::StubInitsMap StubInits;
+ for (auto &Alias : RequestedAliases) {
+
+ auto CallThroughTrampoline = LCTManager.getCallThroughTrampoline(
+ SourceJD, Alias.second.Aliasee, NotifyResolved);
+
+ if (!CallThroughTrampoline) {
+ SourceJD.getExecutionSession().reportError(
+ CallThroughTrampoline.takeError());
+ R.failMaterialization();
+ return;
+ }
+
+ StubInits[*Alias.first] =
+ std::make_pair(*CallThroughTrampoline, Alias.second.AliasFlags);
+ }
+
+ if (AliaseeTable != nullptr && !RequestedAliases.empty())
+ AliaseeTable->trackImpls(RequestedAliases, &SourceJD);
+
+ if (auto Err = ISManager.createStubs(StubInits)) {
+ SourceJD.getExecutionSession().reportError(std::move(Err));
+ R.failMaterialization();
+ return;
+ }
+
+ SymbolMap Stubs;
+ for (auto &Alias : RequestedAliases)
+ Stubs[Alias.first] = ISManager.findStub(*Alias.first, false);
+
+ // No registered dependencies, so these calls cannot fail.
+ cantFail(R.notifyResolved(Stubs));
+ cantFail(R.notifyEmitted());
+}
+
+void LazyReexportsMaterializationUnit::discard(const JITDylib &JD,
+ const SymbolStringPtr &Name) {
+ assert(CallableAliases.count(Name) &&
+ "Symbol not covered by this MaterializationUnit");
+ CallableAliases.erase(Name);
+}
+
+SymbolFlagsMap
+LazyReexportsMaterializationUnit::extractFlags(const SymbolAliasMap &Aliases) {
+ SymbolFlagsMap SymbolFlags;
+ for (auto &KV : Aliases) {
+ assert(KV.second.AliasFlags.isCallable() &&
+ "Lazy re-exports must be callable symbols");
+ SymbolFlags[KV.first] = KV.second.AliasFlags;
+ }
+ return SymbolFlags;
+}
+
+} // End namespace orc.
+} // End namespace llvm.
diff --git a/llvm/lib/ExecutionEngine/Orc/Legacy.cpp b/llvm/lib/ExecutionEngine/Orc/Legacy.cpp
new file mode 100644
index 0000000000000..9f9a6730b2c30
--- /dev/null
+++ b/llvm/lib/ExecutionEngine/Orc/Legacy.cpp
@@ -0,0 +1,67 @@
+//===------- Legacy.cpp - Adapters for ExecutionEngine API interop --------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/Legacy.h"
+
+namespace llvm {
+namespace orc {
+
+void SymbolResolver::anchor() {}
+
+JITSymbolResolverAdapter::JITSymbolResolverAdapter(
+ ExecutionSession &ES, SymbolResolver &R, MaterializationResponsibility *MR)
+ : ES(ES), R(R), MR(MR) {}
+
+void JITSymbolResolverAdapter::lookup(const LookupSet &Symbols,
+ OnResolvedFunction OnResolved) {
+ SymbolNameSet InternedSymbols;
+ for (auto &S : Symbols)
+ InternedSymbols.insert(ES.intern(S));
+
+ auto OnResolvedWithUnwrap = [OnResolved = std::move(OnResolved)](
+ Expected<SymbolMap> InternedResult) mutable {
+ if (!InternedResult) {
+ OnResolved(InternedResult.takeError());
+ return;
+ }
+
+ LookupResult Result;
+ for (auto &KV : *InternedResult)
+ Result[*KV.first] = std::move(KV.second);
+ OnResolved(Result);
+ };
+
+ auto Q = std::make_shared<AsynchronousSymbolQuery>(
+ InternedSymbols, SymbolState::Resolved, std::move(OnResolvedWithUnwrap));
+
+ auto Unresolved = R.lookup(Q, InternedSymbols);
+ if (Unresolved.empty()) {
+ if (MR)
+ MR->addDependenciesForAll(Q->QueryRegistrations);
+ } else
+ ES.legacyFailQuery(*Q, make_error<SymbolsNotFound>(std::move(Unresolved)));
+}
+
+Expected<JITSymbolResolverAdapter::LookupSet>
+JITSymbolResolverAdapter::getResponsibilitySet(const LookupSet &Symbols) {
+ SymbolNameSet InternedSymbols;
+ for (auto &S : Symbols)
+ InternedSymbols.insert(ES.intern(S));
+
+ auto InternedResult = R.getResponsibilitySet(InternedSymbols);
+ LookupSet Result;
+ for (auto &S : InternedResult) {
+ ResolvedStrings.insert(S);
+ Result.insert(*S);
+ }
+
+ return Result;
+}
+
+} // End namespace orc.
+} // End namespace llvm.
diff --git a/llvm/lib/ExecutionEngine/Orc/NullResolver.cpp b/llvm/lib/ExecutionEngine/Orc/NullResolver.cpp
new file mode 100644
index 0000000000000..5b4345b870bb8
--- /dev/null
+++ b/llvm/lib/ExecutionEngine/Orc/NullResolver.cpp
@@ -0,0 +1,37 @@
+//===---------- NullResolver.cpp - Reject symbol lookup requests ----------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/NullResolver.h"
+
+#include "llvm/Support/ErrorHandling.h"
+
+namespace llvm {
+namespace orc {
+
+SymbolNameSet NullResolver::getResponsibilitySet(const SymbolNameSet &Symbols) {
+ return Symbols;
+}
+
+SymbolNameSet
+NullResolver::lookup(std::shared_ptr<AsynchronousSymbolQuery> Query,
+ SymbolNameSet Symbols) {
+ assert(Symbols.empty() && "Null resolver: Symbols must be empty");
+ return Symbols;
+}
+
+JITSymbol NullLegacyResolver::findSymbol(const std::string &Name) {
+ llvm_unreachable("Unexpected cross-object symbol reference");
+}
+
+JITSymbol
+NullLegacyResolver::findSymbolInLogicalDylib(const std::string &Name) {
+ llvm_unreachable("Unexpected cross-object symbol reference");
+}
+
+} // End namespace orc.
+} // End namespace llvm.
diff --git a/llvm/lib/ExecutionEngine/Orc/ObjectLinkingLayer.cpp b/llvm/lib/ExecutionEngine/Orc/ObjectLinkingLayer.cpp
new file mode 100644
index 0000000000000..874decb2ade0b
--- /dev/null
+++ b/llvm/lib/ExecutionEngine/Orc/ObjectLinkingLayer.cpp
@@ -0,0 +1,487 @@
+//===------- ObjectLinkingLayer.cpp - JITLink backed ORC ObjectLayer ------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/ObjectLinkingLayer.h"
+
+#include "llvm/ADT/Optional.h"
+#include "llvm/ExecutionEngine/JITLink/EHFrameSupport.h"
+
+#include <vector>
+
+#define DEBUG_TYPE "orc"
+
+using namespace llvm;
+using namespace llvm::jitlink;
+using namespace llvm::orc;
+
+namespace llvm {
+namespace orc {
+
+class ObjectLinkingLayerJITLinkContext final : public JITLinkContext {
+public:
+ ObjectLinkingLayerJITLinkContext(ObjectLinkingLayer &Layer,
+ MaterializationResponsibility MR,
+ std::unique_ptr<MemoryBuffer> ObjBuffer)
+ : Layer(Layer), MR(std::move(MR)), ObjBuffer(std::move(ObjBuffer)) {}
+
+ ~ObjectLinkingLayerJITLinkContext() {
+ // If there is an object buffer return function then use it to
+ // return ownership of the buffer.
+ if (Layer.ReturnObjectBuffer)
+ Layer.ReturnObjectBuffer(std::move(ObjBuffer));
+ }
+
+ JITLinkMemoryManager &getMemoryManager() override { return Layer.MemMgr; }
+
+ MemoryBufferRef getObjectBuffer() const override {
+ return ObjBuffer->getMemBufferRef();
+ }
+
+ void notifyFailed(Error Err) override {
+ Layer.getExecutionSession().reportError(std::move(Err));
+ MR.failMaterialization();
+ }
+
+ void lookup(const DenseSet<StringRef> &Symbols,
+ std::unique_ptr<JITLinkAsyncLookupContinuation> LC) override {
+
+ JITDylibSearchList SearchOrder;
+ MR.getTargetJITDylib().withSearchOrderDo(
+ [&](const JITDylibSearchList &JDs) { SearchOrder = JDs; });
+
+ auto &ES = Layer.getExecutionSession();
+
+ SymbolNameSet InternedSymbols;
+ for (auto &S : Symbols)
+ InternedSymbols.insert(ES.intern(S));
+
+ // OnResolve -- De-intern the symbols and pass the result to the linker.
+ auto OnResolve = [this, LookupContinuation = std::move(LC)](
+ Expected<SymbolMap> Result) mutable {
+ auto Main = Layer.getExecutionSession().intern("_main");
+ if (!Result)
+ LookupContinuation->run(Result.takeError());
+ else {
+ AsyncLookupResult LR;
+ for (auto &KV : *Result)
+ LR[*KV.first] = KV.second;
+ LookupContinuation->run(std::move(LR));
+ }
+ };
+
+ ES.lookup(SearchOrder, std::move(InternedSymbols), SymbolState::Resolved,
+ std::move(OnResolve), [this](const SymbolDependenceMap &Deps) {
+ registerDependencies(Deps);
+ });
+ }
+
+ void notifyResolved(LinkGraph &G) override {
+ auto &ES = Layer.getExecutionSession();
+
+ SymbolFlagsMap ExtraSymbolsToClaim;
+ bool AutoClaim = Layer.AutoClaimObjectSymbols;
+
+ SymbolMap InternedResult;
+ for (auto *Sym : G.defined_symbols())
+ if (Sym->hasName() && Sym->getScope() != Scope::Local) {
+ auto InternedName = ES.intern(Sym->getName());
+ JITSymbolFlags Flags;
+
+ if (Sym->isCallable())
+ Flags |= JITSymbolFlags::Callable;
+ if (Sym->getScope() == Scope::Default)
+ Flags |= JITSymbolFlags::Exported;
+
+ InternedResult[InternedName] =
+ JITEvaluatedSymbol(Sym->getAddress(), Flags);
+ if (AutoClaim && !MR.getSymbols().count(InternedName)) {
+ assert(!ExtraSymbolsToClaim.count(InternedName) &&
+ "Duplicate symbol to claim?");
+ ExtraSymbolsToClaim[InternedName] = Flags;
+ }
+ }
+
+ for (auto *Sym : G.absolute_symbols())
+ if (Sym->hasName()) {
+ auto InternedName = ES.intern(Sym->getName());
+ JITSymbolFlags Flags;
+ Flags |= JITSymbolFlags::Absolute;
+ if (Sym->isCallable())
+ Flags |= JITSymbolFlags::Callable;
+ if (Sym->getLinkage() == Linkage::Weak)
+ Flags |= JITSymbolFlags::Weak;
+ InternedResult[InternedName] =
+ JITEvaluatedSymbol(Sym->getAddress(), Flags);
+ if (AutoClaim && !MR.getSymbols().count(InternedName)) {
+ assert(!ExtraSymbolsToClaim.count(InternedName) &&
+ "Duplicate symbol to claim?");
+ ExtraSymbolsToClaim[InternedName] = Flags;
+ }
+ }
+
+ if (!ExtraSymbolsToClaim.empty())
+ if (auto Err = MR.defineMaterializing(ExtraSymbolsToClaim))
+ return notifyFailed(std::move(Err));
+ if (auto Err = MR.notifyResolved(InternedResult)) {
+ Layer.getExecutionSession().reportError(std::move(Err));
+ MR.failMaterialization();
+ return;
+ }
+ Layer.notifyLoaded(MR);
+ }
+
+ void notifyFinalized(
+ std::unique_ptr<JITLinkMemoryManager::Allocation> A) override {
+ if (auto Err = Layer.notifyEmitted(MR, std::move(A))) {
+ Layer.getExecutionSession().reportError(std::move(Err));
+ MR.failMaterialization();
+ return;
+ }
+ if (auto Err = MR.notifyEmitted()) {
+ Layer.getExecutionSession().reportError(std::move(Err));
+ MR.failMaterialization();
+ }
+ }
+
+ LinkGraphPassFunction getMarkLivePass(const Triple &TT) const override {
+ return [this](LinkGraph &G) { return markResponsibilitySymbolsLive(G); };
+ }
+
+ Error modifyPassConfig(const Triple &TT, PassConfiguration &Config) override {
+ // Add passes to mark duplicate defs as should-discard, and to walk the
+ // link graph to build the symbol dependence graph.
+ Config.PrePrunePasses.push_back(
+ [this](LinkGraph &G) { return externalizeWeakAndCommonSymbols(G); });
+ Config.PostPrunePasses.push_back(
+ [this](LinkGraph &G) { return computeNamedSymbolDependencies(G); });
+
+ Layer.modifyPassConfig(MR, TT, Config);
+
+ return Error::success();
+ }
+
+private:
+ using AnonToNamedDependenciesMap = DenseMap<const Symbol *, SymbolNameSet>;
+
+ Error externalizeWeakAndCommonSymbols(LinkGraph &G) {
+ auto &ES = Layer.getExecutionSession();
+ for (auto *Sym : G.defined_symbols())
+ if (Sym->hasName() && Sym->getLinkage() == Linkage::Weak) {
+ if (!MR.getSymbols().count(ES.intern(Sym->getName())))
+ G.makeExternal(*Sym);
+ }
+
+ for (auto *Sym : G.absolute_symbols())
+ if (Sym->hasName() && Sym->getLinkage() == Linkage::Weak) {
+ if (!MR.getSymbols().count(ES.intern(Sym->getName())))
+ G.makeExternal(*Sym);
+ }
+
+ return Error::success();
+ }
+
+ Error markResponsibilitySymbolsLive(LinkGraph &G) const {
+ auto &ES = Layer.getExecutionSession();
+ for (auto *Sym : G.defined_symbols())
+ if (Sym->hasName() && MR.getSymbols().count(ES.intern(Sym->getName())))
+ Sym->setLive(true);
+ return Error::success();
+ }
+
+ Error computeNamedSymbolDependencies(LinkGraph &G) {
+ auto &ES = MR.getTargetJITDylib().getExecutionSession();
+ auto AnonDeps = computeAnonDeps(G);
+
+ for (auto *Sym : G.defined_symbols()) {
+
+ // Skip anonymous and non-global atoms: we do not need dependencies for
+ // these.
+ if (Sym->getScope() == Scope::Local)
+ continue;
+
+ auto SymName = ES.intern(Sym->getName());
+ SymbolNameSet &SymDeps = NamedSymbolDeps[SymName];
+
+ for (auto &E : Sym->getBlock().edges()) {
+ auto &TargetSym = E.getTarget();
+
+ if (TargetSym.getScope() != Scope::Local)
+ SymDeps.insert(ES.intern(TargetSym.getName()));
+ else {
+ assert(TargetSym.isDefined() &&
+ "Anonymous/local symbols must be defined");
+ auto I = AnonDeps.find(&TargetSym);
+ if (I != AnonDeps.end())
+ for (auto &S : I->second)
+ SymDeps.insert(S);
+ }
+ }
+ }
+
+ return Error::success();
+ }
+
+ AnonToNamedDependenciesMap computeAnonDeps(LinkGraph &G) {
+
+ auto &ES = MR.getTargetJITDylib().getExecutionSession();
+ AnonToNamedDependenciesMap DepMap;
+
+ // For all anonymous symbols:
+ // (1) Add their named dependencies.
+ // (2) Add them to the worklist for further iteration if they have any
+ // depend on any other anonymous symbols.
+ struct WorklistEntry {
+ WorklistEntry(Symbol *Sym, DenseSet<Symbol *> SymAnonDeps)
+ : Sym(Sym), SymAnonDeps(std::move(SymAnonDeps)) {}
+
+ Symbol *Sym = nullptr;
+ DenseSet<Symbol *> SymAnonDeps;
+ };
+ std::vector<WorklistEntry> Worklist;
+ for (auto *Sym : G.defined_symbols())
+ if (!Sym->hasName()) {
+ auto &SymNamedDeps = DepMap[Sym];
+ DenseSet<Symbol *> SymAnonDeps;
+
+ for (auto &E : Sym->getBlock().edges()) {
+ auto &TargetSym = E.getTarget();
+ if (TargetSym.hasName())
+ SymNamedDeps.insert(ES.intern(TargetSym.getName()));
+ else {
+ assert(TargetSym.isDefined() &&
+ "Anonymous symbols must be defined");
+ SymAnonDeps.insert(&TargetSym);
+ }
+ }
+
+ if (!SymAnonDeps.empty())
+ Worklist.push_back(WorklistEntry(Sym, std::move(SymAnonDeps)));
+ }
+
+ // Loop over all anonymous symbols with anonymous dependencies, propagating
+ // their respective *named* dependencies. Iterate until we hit a stable
+ // state.
+ bool Changed;
+ do {
+ Changed = false;
+ for (auto &WLEntry : Worklist) {
+ auto *Sym = WLEntry.Sym;
+ auto &SymNamedDeps = DepMap[Sym];
+ auto &SymAnonDeps = WLEntry.SymAnonDeps;
+
+ for (auto *TargetSym : SymAnonDeps) {
+ auto I = DepMap.find(TargetSym);
+ if (I != DepMap.end())
+ for (const auto &S : I->second)
+ Changed |= SymNamedDeps.insert(S).second;
+ }
+ }
+ } while (Changed);
+
+ return DepMap;
+ }
+
+ void registerDependencies(const SymbolDependenceMap &QueryDeps) {
+ for (auto &NamedDepsEntry : NamedSymbolDeps) {
+ auto &Name = NamedDepsEntry.first;
+ auto &NameDeps = NamedDepsEntry.second;
+ SymbolDependenceMap SymbolDeps;
+
+ for (const auto &QueryDepsEntry : QueryDeps) {
+ JITDylib &SourceJD = *QueryDepsEntry.first;
+ const SymbolNameSet &Symbols = QueryDepsEntry.second;
+ auto &DepsForJD = SymbolDeps[&SourceJD];
+
+ for (const auto &S : Symbols)
+ if (NameDeps.count(S))
+ DepsForJD.insert(S);
+
+ if (DepsForJD.empty())
+ SymbolDeps.erase(&SourceJD);
+ }
+
+ MR.addDependencies(Name, SymbolDeps);
+ }
+ }
+
+ ObjectLinkingLayer &Layer;
+ MaterializationResponsibility MR;
+ std::unique_ptr<MemoryBuffer> ObjBuffer;
+ DenseMap<SymbolStringPtr, SymbolNameSet> NamedSymbolDeps;
+};
+
+ObjectLinkingLayer::Plugin::~Plugin() {}
+
+ObjectLinkingLayer::ObjectLinkingLayer(ExecutionSession &ES,
+ JITLinkMemoryManager &MemMgr)
+ : ObjectLayer(ES), MemMgr(MemMgr) {}
+
+ObjectLinkingLayer::~ObjectLinkingLayer() {
+ if (auto Err = removeAllModules())
+ getExecutionSession().reportError(std::move(Err));
+}
+
+void ObjectLinkingLayer::emit(MaterializationResponsibility R,
+ std::unique_ptr<MemoryBuffer> O) {
+ assert(O && "Object must not be null");
+ jitLink(std::make_unique<ObjectLinkingLayerJITLinkContext>(
+ *this, std::move(R), std::move(O)));
+}
+
+void ObjectLinkingLayer::modifyPassConfig(MaterializationResponsibility &MR,
+ const Triple &TT,
+ PassConfiguration &PassConfig) {
+ for (auto &P : Plugins)
+ P->modifyPassConfig(MR, TT, PassConfig);
+}
+
+void ObjectLinkingLayer::notifyLoaded(MaterializationResponsibility &MR) {
+ for (auto &P : Plugins)
+ P->notifyLoaded(MR);
+}
+
+Error ObjectLinkingLayer::notifyEmitted(MaterializationResponsibility &MR,
+ AllocPtr Alloc) {
+ Error Err = Error::success();
+ for (auto &P : Plugins)
+ Err = joinErrors(std::move(Err), P->notifyEmitted(MR));
+
+ if (Err)
+ return Err;
+
+ {
+ std::lock_guard<std::mutex> Lock(LayerMutex);
+ UntrackedAllocs.push_back(std::move(Alloc));
+ }
+
+ return Error::success();
+}
+
+Error ObjectLinkingLayer::removeModule(VModuleKey K) {
+ Error Err = Error::success();
+
+ for (auto &P : Plugins)
+ Err = joinErrors(std::move(Err), P->notifyRemovingModule(K));
+
+ AllocPtr Alloc;
+
+ {
+ std::lock_guard<std::mutex> Lock(LayerMutex);
+ auto AllocItr = TrackedAllocs.find(K);
+ Alloc = std::move(AllocItr->second);
+ TrackedAllocs.erase(AllocItr);
+ }
+
+ assert(Alloc && "No allocation for key K");
+
+ return joinErrors(std::move(Err), Alloc->deallocate());
+}
+
+Error ObjectLinkingLayer::removeAllModules() {
+
+ Error Err = Error::success();
+
+ for (auto &P : Plugins)
+ Err = joinErrors(std::move(Err), P->notifyRemovingAllModules());
+
+ std::vector<AllocPtr> Allocs;
+ {
+ std::lock_guard<std::mutex> Lock(LayerMutex);
+ Allocs = std::move(UntrackedAllocs);
+
+ for (auto &KV : TrackedAllocs)
+ Allocs.push_back(std::move(KV.second));
+
+ TrackedAllocs.clear();
+ }
+
+ while (!Allocs.empty()) {
+ Err = joinErrors(std::move(Err), Allocs.back()->deallocate());
+ Allocs.pop_back();
+ }
+
+ return Err;
+}
+
+EHFrameRegistrationPlugin::EHFrameRegistrationPlugin(
+ EHFrameRegistrar &Registrar)
+ : Registrar(Registrar) {}
+
+void EHFrameRegistrationPlugin::modifyPassConfig(
+ MaterializationResponsibility &MR, const Triple &TT,
+ PassConfiguration &PassConfig) {
+ assert(!InProcessLinks.count(&MR) && "Link for MR already being tracked?");
+
+ PassConfig.PostFixupPasses.push_back(
+ createEHFrameRecorderPass(TT, [this, &MR](JITTargetAddress Addr,
+ size_t Size) {
+ if (Addr)
+ InProcessLinks[&MR] = { Addr, Size };
+ }));
+}
+
+Error EHFrameRegistrationPlugin::notifyEmitted(
+ MaterializationResponsibility &MR) {
+
+ auto EHFrameRangeItr = InProcessLinks.find(&MR);
+ if (EHFrameRangeItr == InProcessLinks.end())
+ return Error::success();
+
+ auto EHFrameRange = EHFrameRangeItr->second;
+ assert(EHFrameRange.Addr &&
+ "eh-frame addr to register can not be null");
+
+ InProcessLinks.erase(EHFrameRangeItr);
+ if (auto Key = MR.getVModuleKey())
+ TrackedEHFrameRanges[Key] = EHFrameRange;
+ else
+ UntrackedEHFrameRanges.push_back(EHFrameRange);
+
+ return Registrar.registerEHFrames(EHFrameRange.Addr, EHFrameRange.Size);
+}
+
+Error EHFrameRegistrationPlugin::notifyRemovingModule(VModuleKey K) {
+ auto EHFrameRangeItr = TrackedEHFrameRanges.find(K);
+ if (EHFrameRangeItr == TrackedEHFrameRanges.end())
+ return Error::success();
+
+ auto EHFrameRange = EHFrameRangeItr->second;
+ assert(EHFrameRange.Addr && "Tracked eh-frame range must not be null");
+
+ TrackedEHFrameRanges.erase(EHFrameRangeItr);
+
+ return Registrar.deregisterEHFrames(EHFrameRange.Addr, EHFrameRange.Size);
+}
+
+Error EHFrameRegistrationPlugin::notifyRemovingAllModules() {
+
+ std::vector<EHFrameRange> EHFrameRanges =
+ std::move(UntrackedEHFrameRanges);
+ EHFrameRanges.reserve(EHFrameRanges.size() + TrackedEHFrameRanges.size());
+
+ for (auto &KV : TrackedEHFrameRanges)
+ EHFrameRanges.push_back(KV.second);
+
+ TrackedEHFrameRanges.clear();
+
+ Error Err = Error::success();
+
+ while (!EHFrameRanges.empty()) {
+ auto EHFrameRange = EHFrameRanges.back();
+ assert(EHFrameRange.Addr && "Untracked eh-frame range must not be null");
+ EHFrameRanges.pop_back();
+ Err = joinErrors(std::move(Err),
+ Registrar.deregisterEHFrames(EHFrameRange.Addr,
+ EHFrameRange.Size));
+ }
+
+ return Err;
+}
+
+} // End namespace orc.
+} // End namespace llvm.
diff --git a/llvm/lib/ExecutionEngine/Orc/ObjectTransformLayer.cpp b/llvm/lib/ExecutionEngine/Orc/ObjectTransformLayer.cpp
new file mode 100644
index 0000000000000..815517321b76b
--- /dev/null
+++ b/llvm/lib/ExecutionEngine/Orc/ObjectTransformLayer.cpp
@@ -0,0 +1,33 @@
+//===---------- ObjectTransformLayer.cpp - Object Transform Layer ---------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/ObjectTransformLayer.h"
+#include "llvm/Support/MemoryBuffer.h"
+
+namespace llvm {
+namespace orc {
+
+ObjectTransformLayer::ObjectTransformLayer(ExecutionSession &ES,
+ ObjectLayer &BaseLayer,
+ TransformFunction Transform)
+ : ObjectLayer(ES), BaseLayer(BaseLayer), Transform(std::move(Transform)) {}
+
+void ObjectTransformLayer::emit(MaterializationResponsibility R,
+ std::unique_ptr<MemoryBuffer> O) {
+ assert(O && "Module must not be null");
+
+ if (auto TransformedObj = Transform(std::move(O)))
+ BaseLayer.emit(std::move(R), std::move(*TransformedObj));
+ else {
+ R.failMaterialization();
+ getExecutionSession().reportError(TransformedObj.takeError());
+ }
+}
+
+} // End namespace orc.
+} // End namespace llvm.
diff --git a/llvm/lib/ExecutionEngine/Orc/OrcABISupport.cpp b/llvm/lib/ExecutionEngine/Orc/OrcABISupport.cpp
new file mode 100644
index 0000000000000..8ed23de419d1e
--- /dev/null
+++ b/llvm/lib/ExecutionEngine/Orc/OrcABISupport.cpp
@@ -0,0 +1,983 @@
+//===------------- OrcABISupport.cpp - ABI specific support code ----------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/OrcABISupport.h"
+#include "llvm/Support/Process.h"
+
+namespace llvm {
+namespace orc {
+
+void OrcAArch64::writeResolverCode(uint8_t *ResolverMem, JITReentryFn ReentryFn,
+ void *CallbackMgr) {
+
+ const uint32_t ResolverCode[] = {
+ // resolver_entry:
+ 0xa9bf47fd, // 0x000: stp x29, x17, [sp, #-16]!
+ 0x910003fd, // 0x004: mov x29, sp
+ 0xa9bf73fb, // 0x008: stp x27, x28, [sp, #-16]!
+ 0xa9bf6bf9, // 0x00c: stp x25, x26, [sp, #-16]!
+ 0xa9bf63f7, // 0x010: stp x23, x24, [sp, #-16]!
+ 0xa9bf5bf5, // 0x014: stp x21, x22, [sp, #-16]!
+ 0xa9bf53f3, // 0x018: stp x19, x20, [sp, #-16]!
+ 0xa9bf3fee, // 0x01c: stp x14, x15, [sp, #-16]!
+ 0xa9bf37ec, // 0x020: stp x12, x13, [sp, #-16]!
+ 0xa9bf2fea, // 0x024: stp x10, x11, [sp, #-16]!
+ 0xa9bf27e8, // 0x028: stp x8, x9, [sp, #-16]!
+ 0xa9bf1fe6, // 0x02c: stp x6, x7, [sp, #-16]!
+ 0xa9bf17e4, // 0x030: stp x4, x5, [sp, #-16]!
+ 0xa9bf0fe2, // 0x034: stp x2, x3, [sp, #-16]!
+ 0xa9bf07e0, // 0x038: stp x0, x1, [sp, #-16]!
+ 0xadbf7ffe, // 0x03c: stp q30, q31, [sp, #-32]!
+ 0xadbf77fc, // 0x040: stp q28, q29, [sp, #-32]!
+ 0xadbf6ffa, // 0x044: stp q26, q27, [sp, #-32]!
+ 0xadbf67f8, // 0x048: stp q24, q25, [sp, #-32]!
+ 0xadbf5ff6, // 0x04c: stp q22, q23, [sp, #-32]!
+ 0xadbf57f4, // 0x050: stp q20, q21, [sp, #-32]!
+ 0xadbf4ff2, // 0x054: stp q18, q19, [sp, #-32]!
+ 0xadbf47f0, // 0x058: stp q16, q17, [sp, #-32]!
+ 0xadbf3fee, // 0x05c: stp q14, q15, [sp, #-32]!
+ 0xadbf37ec, // 0x060: stp q12, q13, [sp, #-32]!
+ 0xadbf2fea, // 0x064: stp q10, q11, [sp, #-32]!
+ 0xadbf27e8, // 0x068: stp q8, q9, [sp, #-32]!
+ 0xadbf1fe6, // 0x06c: stp q6, q7, [sp, #-32]!
+ 0xadbf17e4, // 0x070: stp q4, q5, [sp, #-32]!
+ 0xadbf0fe2, // 0x074: stp q2, q3, [sp, #-32]!
+ 0xadbf07e0, // 0x078: stp q0, q1, [sp, #-32]!
+ 0x580004e0, // 0x07c: ldr x0, Lcallbackmgr
+ 0xaa1e03e1, // 0x080: mov x1, x30
+ 0xd1003021, // 0x084: sub x1, x1, #12
+ 0x58000442, // 0x088: ldr x2, Lreentry_fn_ptr
+ 0xd63f0040, // 0x08c: blr x2
+ 0xaa0003f1, // 0x090: mov x17, x0
+ 0xacc107e0, // 0x094: ldp q0, q1, [sp], #32
+ 0xacc10fe2, // 0x098: ldp q2, q3, [sp], #32
+ 0xacc117e4, // 0x09c: ldp q4, q5, [sp], #32
+ 0xacc11fe6, // 0x0a0: ldp q6, q7, [sp], #32
+ 0xacc127e8, // 0x0a4: ldp q8, q9, [sp], #32
+ 0xacc12fea, // 0x0a8: ldp q10, q11, [sp], #32
+ 0xacc137ec, // 0x0ac: ldp q12, q13, [sp], #32
+ 0xacc13fee, // 0x0b0: ldp q14, q15, [sp], #32
+ 0xacc147f0, // 0x0b4: ldp q16, q17, [sp], #32
+ 0xacc14ff2, // 0x0b8: ldp q18, q19, [sp], #32
+ 0xacc157f4, // 0x0bc: ldp q20, q21, [sp], #32
+ 0xacc15ff6, // 0x0c0: ldp q22, q23, [sp], #32
+ 0xacc167f8, // 0x0c4: ldp q24, q25, [sp], #32
+ 0xacc16ffa, // 0x0c8: ldp q26, q27, [sp], #32
+ 0xacc177fc, // 0x0cc: ldp q28, q29, [sp], #32
+ 0xacc17ffe, // 0x0d0: ldp q30, q31, [sp], #32
+ 0xa8c107e0, // 0x0d4: ldp x0, x1, [sp], #16
+ 0xa8c10fe2, // 0x0d8: ldp x2, x3, [sp], #16
+ 0xa8c117e4, // 0x0dc: ldp x4, x5, [sp], #16
+ 0xa8c11fe6, // 0x0e0: ldp x6, x7, [sp], #16
+ 0xa8c127e8, // 0x0e4: ldp x8, x9, [sp], #16
+ 0xa8c12fea, // 0x0e8: ldp x10, x11, [sp], #16
+ 0xa8c137ec, // 0x0ec: ldp x12, x13, [sp], #16
+ 0xa8c13fee, // 0x0f0: ldp x14, x15, [sp], #16
+ 0xa8c153f3, // 0x0f4: ldp x19, x20, [sp], #16
+ 0xa8c15bf5, // 0x0f8: ldp x21, x22, [sp], #16
+ 0xa8c163f7, // 0x0fc: ldp x23, x24, [sp], #16
+ 0xa8c16bf9, // 0x100: ldp x25, x26, [sp], #16
+ 0xa8c173fb, // 0x104: ldp x27, x28, [sp], #16
+ 0xa8c17bfd, // 0x108: ldp x29, x30, [sp], #16
+ 0xd65f0220, // 0x10c: ret x17
+ 0x01234567, // 0x110: Lreentry_fn_ptr:
+ 0xdeadbeef, // 0x114: .quad 0
+ 0x98765432, // 0x118: Lcallbackmgr:
+ 0xcafef00d // 0x11c: .quad 0
+ };
+
+ const unsigned ReentryFnAddrOffset = 0x110;
+ const unsigned CallbackMgrAddrOffset = 0x118;
+
+ memcpy(ResolverMem, ResolverCode, sizeof(ResolverCode));
+ memcpy(ResolverMem + ReentryFnAddrOffset, &ReentryFn, sizeof(ReentryFn));
+ memcpy(ResolverMem + CallbackMgrAddrOffset, &CallbackMgr,
+ sizeof(CallbackMgr));
+}
+
+void OrcAArch64::writeTrampolines(uint8_t *TrampolineMem, void *ResolverAddr,
+ unsigned NumTrampolines) {
+
+ unsigned OffsetToPtr = alignTo(NumTrampolines * TrampolineSize, 8);
+
+ memcpy(TrampolineMem + OffsetToPtr, &ResolverAddr, sizeof(void *));
+
+ // OffsetToPtr is actually the offset from the PC for the 2nd instruction, so
+ // subtract 32-bits.
+ OffsetToPtr -= 4;
+
+ uint32_t *Trampolines = reinterpret_cast<uint32_t *>(TrampolineMem);
+
+ for (unsigned I = 0; I < NumTrampolines; ++I, OffsetToPtr -= TrampolineSize) {
+ Trampolines[3 * I + 0] = 0xaa1e03f1; // mov x17, x30
+ Trampolines[3 * I + 1] = 0x58000010 | (OffsetToPtr << 3); // adr x16, Lptr
+ Trampolines[3 * I + 2] = 0xd63f0200; // blr x16
+ }
+
+}
+
+Error OrcAArch64::emitIndirectStubsBlock(IndirectStubsInfo &StubsInfo,
+ unsigned MinStubs,
+ void *InitialPtrVal) {
+ // Stub format is:
+ //
+ // .section __orc_stubs
+ // stub1:
+ // ldr x0, ptr1 ; PC-rel load of ptr1
+ // br x0 ; Jump to resolver
+ // stub2:
+ // ldr x0, ptr2 ; PC-rel load of ptr2
+ // br x0 ; Jump to resolver
+ //
+ // ...
+ //
+ // .section __orc_ptrs
+ // ptr1:
+ // .quad 0x0
+ // ptr2:
+ // .quad 0x0
+ //
+ // ...
+
+ const unsigned StubSize = IndirectStubsInfo::StubSize;
+
+ // Emit at least MinStubs, rounded up to fill the pages allocated.
+ static const unsigned PageSize = sys::Process::getPageSizeEstimate();
+ unsigned NumPages = ((MinStubs * StubSize) + (PageSize - 1)) / PageSize;
+ unsigned NumStubs = (NumPages * PageSize) / StubSize;
+
+ // Allocate memory for stubs and pointers in one call.
+ std::error_code EC;
+ auto StubsMem = sys::OwningMemoryBlock(sys::Memory::allocateMappedMemory(
+ 2 * NumPages * PageSize, nullptr,
+ sys::Memory::MF_READ | sys::Memory::MF_WRITE, EC));
+
+ if (EC)
+ return errorCodeToError(EC);
+
+ // Create separate MemoryBlocks representing the stubs and pointers.
+ sys::MemoryBlock StubsBlock(StubsMem.base(), NumPages * PageSize);
+ sys::MemoryBlock PtrsBlock(static_cast<char *>(StubsMem.base()) +
+ NumPages * PageSize,
+ NumPages * PageSize);
+
+ // Populate the stubs page stubs and mark it executable.
+ uint64_t *Stub = reinterpret_cast<uint64_t *>(StubsBlock.base());
+ uint64_t PtrOffsetField = static_cast<uint64_t>(NumPages * PageSize)
+ << 3;
+
+ for (unsigned I = 0; I < NumStubs; ++I)
+ Stub[I] = 0xd61f020058000010 | PtrOffsetField;
+
+ if (auto EC = sys::Memory::protectMappedMemory(
+ StubsBlock, sys::Memory::MF_READ | sys::Memory::MF_EXEC))
+ return errorCodeToError(EC);
+
+ // Initialize all pointers to point at FailureAddress.
+ void **Ptr = reinterpret_cast<void **>(PtrsBlock.base());
+ for (unsigned I = 0; I < NumStubs; ++I)
+ Ptr[I] = InitialPtrVal;
+
+ StubsInfo = IndirectStubsInfo(NumStubs, std::move(StubsMem));
+
+ return Error::success();
+}
+
+void OrcX86_64_Base::writeTrampolines(uint8_t *TrampolineMem,
+ void *ResolverAddr,
+ unsigned NumTrampolines) {
+
+ unsigned OffsetToPtr = NumTrampolines * TrampolineSize;
+
+ memcpy(TrampolineMem + OffsetToPtr, &ResolverAddr, sizeof(void *));
+
+ uint64_t *Trampolines = reinterpret_cast<uint64_t *>(TrampolineMem);
+ uint64_t CallIndirPCRel = 0xf1c40000000015ff;
+
+ for (unsigned I = 0; I < NumTrampolines; ++I, OffsetToPtr -= TrampolineSize)
+ Trampolines[I] = CallIndirPCRel | ((OffsetToPtr - 6) << 16);
+}
+
+Error OrcX86_64_Base::emitIndirectStubsBlock(IndirectStubsInfo &StubsInfo,
+ unsigned MinStubs,
+ void *InitialPtrVal) {
+ // Stub format is:
+ //
+ // .section __orc_stubs
+ // stub1:
+ // jmpq *ptr1(%rip)
+ // .byte 0xC4 ; <- Invalid opcode padding.
+ // .byte 0xF1
+ // stub2:
+ // jmpq *ptr2(%rip)
+ //
+ // ...
+ //
+ // .section __orc_ptrs
+ // ptr1:
+ // .quad 0x0
+ // ptr2:
+ // .quad 0x0
+ //
+ // ...
+
+ const unsigned StubSize = IndirectStubsInfo::StubSize;
+
+ // Emit at least MinStubs, rounded up to fill the pages allocated.
+ static const unsigned PageSize = sys::Process::getPageSizeEstimate();
+ unsigned NumPages = ((MinStubs * StubSize) + (PageSize - 1)) / PageSize;
+ unsigned NumStubs = (NumPages * PageSize) / StubSize;
+
+ // Allocate memory for stubs and pointers in one call.
+ std::error_code EC;
+ auto StubsMem = sys::OwningMemoryBlock(sys::Memory::allocateMappedMemory(
+ 2 * NumPages * PageSize, nullptr,
+ sys::Memory::MF_READ | sys::Memory::MF_WRITE, EC));
+
+ if (EC)
+ return errorCodeToError(EC);
+
+ // Create separate MemoryBlocks representing the stubs and pointers.
+ sys::MemoryBlock StubsBlock(StubsMem.base(), NumPages * PageSize);
+ sys::MemoryBlock PtrsBlock(static_cast<char *>(StubsMem.base()) +
+ NumPages * PageSize,
+ NumPages * PageSize);
+
+ // Populate the stubs page stubs and mark it executable.
+ uint64_t *Stub = reinterpret_cast<uint64_t *>(StubsBlock.base());
+ uint64_t PtrOffsetField = static_cast<uint64_t>(NumPages * PageSize - 6)
+ << 16;
+ for (unsigned I = 0; I < NumStubs; ++I)
+ Stub[I] = 0xF1C40000000025ff | PtrOffsetField;
+
+ if (auto EC = sys::Memory::protectMappedMemory(
+ StubsBlock, sys::Memory::MF_READ | sys::Memory::MF_EXEC))
+ return errorCodeToError(EC);
+
+ // Initialize all pointers to point at FailureAddress.
+ void **Ptr = reinterpret_cast<void **>(PtrsBlock.base());
+ for (unsigned I = 0; I < NumStubs; ++I)
+ Ptr[I] = InitialPtrVal;
+
+ StubsInfo = IndirectStubsInfo(NumStubs, std::move(StubsMem));
+
+ return Error::success();
+}
+
+void OrcX86_64_SysV::writeResolverCode(uint8_t *ResolverMem,
+ JITReentryFn ReentryFn,
+ void *CallbackMgr) {
+
+ const uint8_t ResolverCode[] = {
+ // resolver_entry:
+ 0x55, // 0x00: pushq %rbp
+ 0x48, 0x89, 0xe5, // 0x01: movq %rsp, %rbp
+ 0x50, // 0x04: pushq %rax
+ 0x53, // 0x05: pushq %rbx
+ 0x51, // 0x06: pushq %rcx
+ 0x52, // 0x07: pushq %rdx
+ 0x56, // 0x08: pushq %rsi
+ 0x57, // 0x09: pushq %rdi
+ 0x41, 0x50, // 0x0a: pushq %r8
+ 0x41, 0x51, // 0x0c: pushq %r9
+ 0x41, 0x52, // 0x0e: pushq %r10
+ 0x41, 0x53, // 0x10: pushq %r11
+ 0x41, 0x54, // 0x12: pushq %r12
+ 0x41, 0x55, // 0x14: pushq %r13
+ 0x41, 0x56, // 0x16: pushq %r14
+ 0x41, 0x57, // 0x18: pushq %r15
+ 0x48, 0x81, 0xec, 0x08, 0x02, 0x00, 0x00, // 0x1a: subq 0x208, %rsp
+ 0x48, 0x0f, 0xae, 0x04, 0x24, // 0x21: fxsave64 (%rsp)
+ 0x48, 0xbf, // 0x26: movabsq <CBMgr>, %rdi
+
+ // 0x28: Callback manager addr.
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+
+ 0x48, 0x8b, 0x75, 0x08, // 0x30: movq 8(%rbp), %rsi
+ 0x48, 0x83, 0xee, 0x06, // 0x34: subq $6, %rsi
+ 0x48, 0xb8, // 0x38: movabsq <REntry>, %rax
+
+ // 0x3a: JIT re-entry fn addr:
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+
+ 0xff, 0xd0, // 0x42: callq *%rax
+ 0x48, 0x89, 0x45, 0x08, // 0x44: movq %rax, 8(%rbp)
+ 0x48, 0x0f, 0xae, 0x0c, 0x24, // 0x48: fxrstor64 (%rsp)
+ 0x48, 0x81, 0xc4, 0x08, 0x02, 0x00, 0x00, // 0x4d: addq 0x208, %rsp
+ 0x41, 0x5f, // 0x54: popq %r15
+ 0x41, 0x5e, // 0x56: popq %r14
+ 0x41, 0x5d, // 0x58: popq %r13
+ 0x41, 0x5c, // 0x5a: popq %r12
+ 0x41, 0x5b, // 0x5c: popq %r11
+ 0x41, 0x5a, // 0x5e: popq %r10
+ 0x41, 0x59, // 0x60: popq %r9
+ 0x41, 0x58, // 0x62: popq %r8
+ 0x5f, // 0x64: popq %rdi
+ 0x5e, // 0x65: popq %rsi
+ 0x5a, // 0x66: popq %rdx
+ 0x59, // 0x67: popq %rcx
+ 0x5b, // 0x68: popq %rbx
+ 0x58, // 0x69: popq %rax
+ 0x5d, // 0x6a: popq %rbp
+ 0xc3, // 0x6b: retq
+ };
+
+ const unsigned ReentryFnAddrOffset = 0x3a;
+ const unsigned CallbackMgrAddrOffset = 0x28;
+
+ memcpy(ResolverMem, ResolverCode, sizeof(ResolverCode));
+ memcpy(ResolverMem + ReentryFnAddrOffset, &ReentryFn, sizeof(ReentryFn));
+ memcpy(ResolverMem + CallbackMgrAddrOffset, &CallbackMgr,
+ sizeof(CallbackMgr));
+}
+
+void OrcX86_64_Win32::writeResolverCode(uint8_t *ResolverMem,
+ JITReentryFn ReentryFn,
+ void *CallbackMgr) {
+
+ // resolverCode is similar to OrcX86_64 with differences specific to windows x64 calling convention:
+ // arguments go into rcx, rdx and come in reverse order, shadow space allocation on stack
+ const uint8_t ResolverCode[] = {
+ // resolver_entry:
+ 0x55, // 0x00: pushq %rbp
+ 0x48, 0x89, 0xe5, // 0x01: movq %rsp, %rbp
+ 0x50, // 0x04: pushq %rax
+ 0x53, // 0x05: pushq %rbx
+ 0x51, // 0x06: pushq %rcx
+ 0x52, // 0x07: pushq %rdx
+ 0x56, // 0x08: pushq %rsi
+ 0x57, // 0x09: pushq %rdi
+ 0x41, 0x50, // 0x0a: pushq %r8
+ 0x41, 0x51, // 0x0c: pushq %r9
+ 0x41, 0x52, // 0x0e: pushq %r10
+ 0x41, 0x53, // 0x10: pushq %r11
+ 0x41, 0x54, // 0x12: pushq %r12
+ 0x41, 0x55, // 0x14: pushq %r13
+ 0x41, 0x56, // 0x16: pushq %r14
+ 0x41, 0x57, // 0x18: pushq %r15
+ 0x48, 0x81, 0xec, 0x08, 0x02, 0x00, 0x00, // 0x1a: subq 0x208, %rsp
+ 0x48, 0x0f, 0xae, 0x04, 0x24, // 0x21: fxsave64 (%rsp)
+
+ 0x48, 0xb9, // 0x26: movabsq <CBMgr>, %rcx
+ // 0x28: Callback manager addr.
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+
+ 0x48, 0x8B, 0x55, 0x08, // 0x30: mov rdx, [rbp+0x8]
+ 0x48, 0x83, 0xea, 0x06, // 0x34: sub rdx, 0x6
+
+ 0x48, 0xb8, // 0x38: movabsq <REntry>, %rax
+ // 0x3a: JIT re-entry fn addr:
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+
+ // 0x42: sub rsp, 0x20 (Allocate shadow space)
+ 0x48, 0x83, 0xEC, 0x20,
+ 0xff, 0xd0, // 0x46: callq *%rax
+
+ // 0x48: add rsp, 0x20 (Free shadow space)
+ 0x48, 0x83, 0xC4, 0x20,
+
+ 0x48, 0x89, 0x45, 0x08, // 0x4C: movq %rax, 8(%rbp)
+ 0x48, 0x0f, 0xae, 0x0c, 0x24, // 0x50: fxrstor64 (%rsp)
+ 0x48, 0x81, 0xc4, 0x08, 0x02, 0x00, 0x00, // 0x55: addq 0x208, %rsp
+ 0x41, 0x5f, // 0x5C: popq %r15
+ 0x41, 0x5e, // 0x5E: popq %r14
+ 0x41, 0x5d, // 0x60: popq %r13
+ 0x41, 0x5c, // 0x62: popq %r12
+ 0x41, 0x5b, // 0x64: popq %r11
+ 0x41, 0x5a, // 0x66: popq %r10
+ 0x41, 0x59, // 0x68: popq %r9
+ 0x41, 0x58, // 0x6a: popq %r8
+ 0x5f, // 0x6c: popq %rdi
+ 0x5e, // 0x6d: popq %rsi
+ 0x5a, // 0x6e: popq %rdx
+ 0x59, // 0x6f: popq %rcx
+ 0x5b, // 0x70: popq %rbx
+ 0x58, // 0x71: popq %rax
+ 0x5d, // 0x72: popq %rbp
+ 0xc3, // 0x73: retq
+ };
+
+
+ const unsigned ReentryFnAddrOffset = 0x3a;
+ const unsigned CallbackMgrAddrOffset = 0x28;
+
+ memcpy(ResolverMem, ResolverCode, sizeof(ResolverCode));
+ memcpy(ResolverMem + ReentryFnAddrOffset, &ReentryFn, sizeof(ReentryFn));
+ memcpy(ResolverMem + CallbackMgrAddrOffset, &CallbackMgr,
+ sizeof(CallbackMgr));
+}
+
+void OrcI386::writeResolverCode(uint8_t *ResolverMem, JITReentryFn ReentryFn,
+ void *CallbackMgr) {
+
+ const uint8_t ResolverCode[] = {
+ // resolver_entry:
+ 0x55, // 0x00: pushl %ebp
+ 0x89, 0xe5, // 0x01: movl %esp, %ebp
+ 0x54, // 0x03: pushl %esp
+ 0x83, 0xe4, 0xf0, // 0x04: andl $-0x10, %esp
+ 0x50, // 0x07: pushl %eax
+ 0x53, // 0x08: pushl %ebx
+ 0x51, // 0x09: pushl %ecx
+ 0x52, // 0x0a: pushl %edx
+ 0x56, // 0x0b: pushl %esi
+ 0x57, // 0x0c: pushl %edi
+ 0x81, 0xec, 0x18, 0x02, 0x00, 0x00, // 0x0d: subl $0x218, %esp
+ 0x0f, 0xae, 0x44, 0x24, 0x10, // 0x13: fxsave 0x10(%esp)
+ 0x8b, 0x75, 0x04, // 0x18: movl 0x4(%ebp), %esi
+ 0x83, 0xee, 0x05, // 0x1b: subl $0x5, %esi
+ 0x89, 0x74, 0x24, 0x04, // 0x1e: movl %esi, 0x4(%esp)
+ 0xc7, 0x04, 0x24, 0x00, 0x00, 0x00,
+ 0x00, // 0x22: movl <cbmgr>, (%esp)
+ 0xb8, 0x00, 0x00, 0x00, 0x00, // 0x29: movl <reentry>, %eax
+ 0xff, 0xd0, // 0x2e: calll *%eax
+ 0x89, 0x45, 0x04, // 0x30: movl %eax, 0x4(%ebp)
+ 0x0f, 0xae, 0x4c, 0x24, 0x10, // 0x33: fxrstor 0x10(%esp)
+ 0x81, 0xc4, 0x18, 0x02, 0x00, 0x00, // 0x38: addl $0x218, %esp
+ 0x5f, // 0x3e: popl %edi
+ 0x5e, // 0x3f: popl %esi
+ 0x5a, // 0x40: popl %edx
+ 0x59, // 0x41: popl %ecx
+ 0x5b, // 0x42: popl %ebx
+ 0x58, // 0x43: popl %eax
+ 0x8b, 0x65, 0xfc, // 0x44: movl -0x4(%ebp), %esp
+ 0x5d, // 0x48: popl %ebp
+ 0xc3 // 0x49: retl
+ };
+
+ const unsigned ReentryFnAddrOffset = 0x2a;
+ const unsigned CallbackMgrAddrOffset = 0x25;
+
+ memcpy(ResolverMem, ResolverCode, sizeof(ResolverCode));
+ memcpy(ResolverMem + ReentryFnAddrOffset, &ReentryFn, sizeof(ReentryFn));
+ memcpy(ResolverMem + CallbackMgrAddrOffset, &CallbackMgr,
+ sizeof(CallbackMgr));
+}
+
+void OrcI386::writeTrampolines(uint8_t *TrampolineMem, void *ResolverAddr,
+ unsigned NumTrampolines) {
+
+ uint64_t CallRelImm = 0xF1C4C400000000e8;
+ uint64_t Resolver = reinterpret_cast<uint64_t>(ResolverAddr);
+ uint64_t ResolverRel =
+ Resolver - reinterpret_cast<uint64_t>(TrampolineMem) - 5;
+
+ uint64_t *Trampolines = reinterpret_cast<uint64_t *>(TrampolineMem);
+ for (unsigned I = 0; I < NumTrampolines; ++I, ResolverRel -= TrampolineSize)
+ Trampolines[I] = CallRelImm | (ResolverRel << 8);
+}
+
+Error OrcI386::emitIndirectStubsBlock(IndirectStubsInfo &StubsInfo,
+ unsigned MinStubs, void *InitialPtrVal) {
+ // Stub format is:
+ //
+ // .section __orc_stubs
+ // stub1:
+ // jmpq *ptr1
+ // .byte 0xC4 ; <- Invalid opcode padding.
+ // .byte 0xF1
+ // stub2:
+ // jmpq *ptr2
+ //
+ // ...
+ //
+ // .section __orc_ptrs
+ // ptr1:
+ // .quad 0x0
+ // ptr2:
+ // .quad 0x0
+ //
+ // ...
+
+ const unsigned StubSize = IndirectStubsInfo::StubSize;
+
+ // Emit at least MinStubs, rounded up to fill the pages allocated.
+ static const unsigned PageSize = sys::Process::getPageSizeEstimate();
+ unsigned NumPages = ((MinStubs * StubSize) + (PageSize - 1)) / PageSize;
+ unsigned NumStubs = (NumPages * PageSize) / StubSize;
+
+ // Allocate memory for stubs and pointers in one call.
+ std::error_code EC;
+ auto StubsMem = sys::OwningMemoryBlock(sys::Memory::allocateMappedMemory(
+ 2 * NumPages * PageSize, nullptr,
+ sys::Memory::MF_READ | sys::Memory::MF_WRITE, EC));
+
+ if (EC)
+ return errorCodeToError(EC);
+
+ // Create separate MemoryBlocks representing the stubs and pointers.
+ sys::MemoryBlock StubsBlock(StubsMem.base(), NumPages * PageSize);
+ sys::MemoryBlock PtrsBlock(static_cast<char *>(StubsMem.base()) +
+ NumPages * PageSize,
+ NumPages * PageSize);
+
+ // Populate the stubs page stubs and mark it executable.
+ uint64_t *Stub = reinterpret_cast<uint64_t *>(StubsBlock.base());
+ uint64_t PtrAddr = reinterpret_cast<uint64_t>(PtrsBlock.base());
+ for (unsigned I = 0; I < NumStubs; ++I, PtrAddr += 4)
+ Stub[I] = 0xF1C40000000025ff | (PtrAddr << 16);
+
+ if (auto EC = sys::Memory::protectMappedMemory(
+ StubsBlock, sys::Memory::MF_READ | sys::Memory::MF_EXEC))
+ return errorCodeToError(EC);
+
+ // Initialize all pointers to point at FailureAddress.
+ void **Ptr = reinterpret_cast<void **>(PtrsBlock.base());
+ for (unsigned I = 0; I < NumStubs; ++I)
+ Ptr[I] = InitialPtrVal;
+
+ StubsInfo = IndirectStubsInfo(NumStubs, std::move(StubsMem));
+
+ return Error::success();
+}
+
+void OrcMips32_Base::writeResolverCode(uint8_t *ResolverMem,
+ JITReentryFn ReentryFn,
+ void *CallbackMgr, bool isBigEndian) {
+
+ const uint32_t ResolverCode[] = {
+ // resolver_entry:
+ 0x27bdff98, // 0x00: addiu $sp,$sp,-104
+ 0xafa20000, // 0x04: sw $v0,0($sp)
+ 0xafa30004, // 0x08: sw $v1,4($sp)
+ 0xafa40008, // 0x0c: sw $a0,8($sp)
+ 0xafa5000c, // 0x10: sw $a1,12($sp)
+ 0xafa60010, // 0x14: sw $a2,16($sp)
+ 0xafa70014, // 0x18: sw $a3,20($sp)
+ 0xafb00018, // 0x1c: sw $s0,24($sp)
+ 0xafb1001c, // 0x20: sw $s1,28($sp)
+ 0xafb20020, // 0x24: sw $s2,32($sp)
+ 0xafb30024, // 0x28: sw $s3,36($sp)
+ 0xafb40028, // 0x2c: sw $s4,40($sp)
+ 0xafb5002c, // 0x30: sw $s5,44($sp)
+ 0xafb60030, // 0x34: sw $s6,48($sp)
+ 0xafb70034, // 0x38: sw $s7,52($sp)
+ 0xafa80038, // 0x3c: sw $t0,56($sp)
+ 0xafa9003c, // 0x40: sw $t1,60($sp)
+ 0xafaa0040, // 0x44: sw $t2,64($sp)
+ 0xafab0044, // 0x48: sw $t3,68($sp)
+ 0xafac0048, // 0x4c: sw $t4,72($sp)
+ 0xafad004c, // 0x50: sw $t5,76($sp)
+ 0xafae0050, // 0x54: sw $t6,80($sp)
+ 0xafaf0054, // 0x58: sw $t7,84($sp)
+ 0xafb80058, // 0x5c: sw $t8,88($sp)
+ 0xafb9005c, // 0x60: sw $t9,92($sp)
+ 0xafbe0060, // 0x64: sw $fp,96($sp)
+ 0xafbf0064, // 0x68: sw $ra,100($sp)
+
+ // Callback manager addr.
+ 0x00000000, // 0x6c: lui $a0,callbackmgr
+ 0x00000000, // 0x70: addiu $a0,$a0,callbackmgr
+
+ 0x03e02825, // 0x74: move $a1, $ra
+ 0x24a5ffec, // 0x78: addiu $a1,$a1,-20
+
+ // JIT re-entry fn addr:
+ 0x00000000, // 0x7c: lui $t9,reentry
+ 0x00000000, // 0x80: addiu $t9,$t9,reentry
+
+ 0x0320f809, // 0x84: jalr $t9
+ 0x00000000, // 0x88: nop
+ 0x8fbf0064, // 0x8c: lw $ra,100($sp)
+ 0x8fbe0060, // 0x90: lw $fp,96($sp)
+ 0x8fb9005c, // 0x94: lw $t9,92($sp)
+ 0x8fb80058, // 0x98: lw $t8,88($sp)
+ 0x8faf0054, // 0x9c: lw $t7,84($sp)
+ 0x8fae0050, // 0xa0: lw $t6,80($sp)
+ 0x8fad004c, // 0xa4: lw $t5,76($sp)
+ 0x8fac0048, // 0xa8: lw $t4,72($sp)
+ 0x8fab0044, // 0xac: lw $t3,68($sp)
+ 0x8faa0040, // 0xb0: lw $t2,64($sp)
+ 0x8fa9003c, // 0xb4: lw $t1,60($sp)
+ 0x8fa80038, // 0xb8: lw $t0,56($sp)
+ 0x8fb70034, // 0xbc: lw $s7,52($sp)
+ 0x8fb60030, // 0xc0: lw $s6,48($sp)
+ 0x8fb5002c, // 0xc4: lw $s5,44($sp)
+ 0x8fb40028, // 0xc8: lw $s4,40($sp)
+ 0x8fb30024, // 0xcc: lw $s3,36($sp)
+ 0x8fb20020, // 0xd0: lw $s2,32($sp)
+ 0x8fb1001c, // 0xd4: lw $s1,28($sp)
+ 0x8fb00018, // 0xd8: lw $s0,24($sp)
+ 0x8fa70014, // 0xdc: lw $a3,20($sp)
+ 0x8fa60010, // 0xe0: lw $a2,16($sp)
+ 0x8fa5000c, // 0xe4: lw $a1,12($sp)
+ 0x8fa40008, // 0xe8: lw $a0,8($sp)
+ 0x27bd0068, // 0xec: addiu $sp,$sp,104
+ 0x0300f825, // 0xf0: move $ra, $t8
+ 0x03200008, // 0xf4: jr $t9
+ 0x00000000, // 0xf8: move $t9, $v0/v1
+ };
+
+ const unsigned ReentryFnAddrOffset = 0x7c; // JIT re-entry fn addr lui
+ const unsigned CallbackMgrAddrOffset = 0x6c; // Callback manager addr lui
+ const unsigned Offsett = 0xf8;
+
+ memcpy(ResolverMem, ResolverCode, sizeof(ResolverCode));
+
+ // Depending on endian return value will be in v0 or v1.
+ uint32_t MoveVxT9 = isBigEndian ? 0x0060c825 : 0x0040c825;
+ memcpy(ResolverMem + Offsett, &MoveVxT9, sizeof(MoveVxT9));
+
+ uint64_t CallMgrAddr = reinterpret_cast<uint64_t>(CallbackMgr);
+ uint32_t CallMgrLUi = 0x3c040000 | (((CallMgrAddr + 0x8000) >> 16) & 0xFFFF);
+ uint32_t CallMgrADDiu = 0x24840000 | ((CallMgrAddr) & 0xFFFF);
+ memcpy(ResolverMem + CallbackMgrAddrOffset, &CallMgrLUi, sizeof(CallMgrLUi));
+ memcpy(ResolverMem + CallbackMgrAddrOffset + 4, &CallMgrADDiu,
+ sizeof(CallMgrADDiu));
+
+ uint64_t ReentryAddr = reinterpret_cast<uint64_t>(ReentryFn);
+ uint32_t ReentryLUi = 0x3c190000 | (((ReentryAddr + 0x8000) >> 16) & 0xFFFF);
+ uint32_t ReentryADDiu = 0x27390000 | ((ReentryAddr) & 0xFFFF);
+ memcpy(ResolverMem + ReentryFnAddrOffset, &ReentryLUi, sizeof(ReentryLUi));
+ memcpy(ResolverMem + ReentryFnAddrOffset + 4, &ReentryADDiu,
+ sizeof(ReentryADDiu));
+}
+
+void OrcMips32_Base::writeTrampolines(uint8_t *TrampolineMem,
+ void *ResolverAddr,
+ unsigned NumTrampolines) {
+
+ uint32_t *Trampolines = reinterpret_cast<uint32_t *>(TrampolineMem);
+ uint64_t ResolveAddr = reinterpret_cast<uint64_t>(ResolverAddr);
+ uint32_t RHiAddr = ((ResolveAddr + 0x8000) >> 16);
+
+ for (unsigned I = 0; I < NumTrampolines; ++I) {
+ Trampolines[5 * I + 0] = 0x03e0c025; // move $t8,$ra
+ Trampolines[5 * I + 1] = 0x3c190000 | (RHiAddr & 0xFFFF); // lui $t9,resolveAddr
+ Trampolines[5 * I + 2] = 0x27390000 | (ResolveAddr & 0xFFFF); // addiu $t9,$t9,resolveAddr
+ Trampolines[5 * I + 3] = 0x0320f809; // jalr $t9
+ Trampolines[5 * I + 4] = 0x00000000; // nop
+ }
+}
+
+Error OrcMips32_Base::emitIndirectStubsBlock(IndirectStubsInfo &StubsInfo,
+ unsigned MinStubs,
+ void *InitialPtrVal) {
+ // Stub format is:
+ //
+ // .section __orc_stubs
+ // stub1:
+ // lui $t9, ptr1
+ // lw $t9, %lo(ptr1)($t9)
+ // jr $t9
+ // stub2:
+ // lui $t9, ptr2
+ // lw $t9,%lo(ptr1)($t9)
+ // jr $t9
+ //
+ // ...
+ //
+ // .section __orc_ptrs
+ // ptr1:
+ // .word 0x0
+ // ptr2:
+ // .word 0x0
+ //
+ // ...
+
+ const unsigned StubSize = IndirectStubsInfo::StubSize;
+
+ // Emit at least MinStubs, rounded up to fill the pages allocated.
+ static const unsigned PageSize = sys::Process::getPageSizeEstimate();
+ unsigned NumPages = ((MinStubs * StubSize) + (PageSize - 1)) / PageSize;
+ unsigned NumStubs = (NumPages * PageSize) / StubSize;
+
+ // Allocate memory for stubs and pointers in one call.
+ std::error_code EC;
+ auto StubsMem = sys::OwningMemoryBlock(sys::Memory::allocateMappedMemory(
+ 2 * NumPages * PageSize, nullptr,
+ sys::Memory::MF_READ | sys::Memory::MF_WRITE, EC));
+
+ if (EC)
+ return errorCodeToError(EC);
+
+ // Create separate MemoryBlocks representing the stubs and pointers.
+ sys::MemoryBlock StubsBlock(StubsMem.base(), NumPages * PageSize);
+ sys::MemoryBlock PtrsBlock(static_cast<char *>(StubsMem.base()) +
+ NumPages * PageSize,
+ NumPages * PageSize);
+
+ // Populate the stubs page stubs and mark it executable.
+ uint32_t *Stub = reinterpret_cast<uint32_t *>(StubsBlock.base());
+ uint64_t PtrAddr = reinterpret_cast<uint64_t>(Stub) + NumPages * PageSize;
+
+ for (unsigned I = 0; I < NumStubs; ++I) {
+ uint32_t HiAddr = ((PtrAddr + 0x8000) >> 16);
+ Stub[4 * I + 0] = 0x3c190000 | (HiAddr & 0xFFFF); // lui $t9,ptr1
+ Stub[4 * I + 1] = 0x8f390000 | (PtrAddr & 0xFFFF); // lw $t9,%lo(ptr1)($t9)
+ Stub[4 * I + 2] = 0x03200008; // jr $t9
+ Stub[4 * I + 3] = 0x00000000; // nop
+ PtrAddr += 4;
+ }
+
+ if (auto EC = sys::Memory::protectMappedMemory(
+ StubsBlock, sys::Memory::MF_READ | sys::Memory::MF_EXEC))
+ return errorCodeToError(EC);
+
+ // Initialize all pointers to point at FailureAddress.
+ void **Ptr = reinterpret_cast<void **>(PtrsBlock.base());
+ for (unsigned I = 0; I < NumStubs; ++I)
+ Ptr[I] = InitialPtrVal;
+
+ StubsInfo = IndirectStubsInfo(NumStubs, std::move(StubsMem));
+
+ return Error::success();
+}
+
+void OrcMips64::writeResolverCode(uint8_t *ResolverMem, JITReentryFn ReentryFn,
+ void *CallbackMgr) {
+
+ const uint32_t ResolverCode[] = {
+ //resolver_entry:
+ 0x67bdff30, // 0x00: daddiu $sp,$sp,-208
+ 0xffa20000, // 0x04: sd v0,0(sp)
+ 0xffa30008, // 0x08: sd v1,8(sp)
+ 0xffa40010, // 0x0c: sd a0,16(sp)
+ 0xffa50018, // 0x10: sd a1,24(sp)
+ 0xffa60020, // 0x14: sd a2,32(sp)
+ 0xffa70028, // 0x18: sd a3,40(sp)
+ 0xffa80030, // 0x1c: sd a4,48(sp)
+ 0xffa90038, // 0x20: sd a5,56(sp)
+ 0xffaa0040, // 0x24: sd a6,64(sp)
+ 0xffab0048, // 0x28: sd a7,72(sp)
+ 0xffac0050, // 0x2c: sd t0,80(sp)
+ 0xffad0058, // 0x30: sd t1,88(sp)
+ 0xffae0060, // 0x34: sd t2,96(sp)
+ 0xffaf0068, // 0x38: sd t3,104(sp)
+ 0xffb00070, // 0x3c: sd s0,112(sp)
+ 0xffb10078, // 0x40: sd s1,120(sp)
+ 0xffb20080, // 0x44: sd s2,128(sp)
+ 0xffb30088, // 0x48: sd s3,136(sp)
+ 0xffb40090, // 0x4c: sd s4,144(sp)
+ 0xffb50098, // 0x50: sd s5,152(sp)
+ 0xffb600a0, // 0x54: sd s6,160(sp)
+ 0xffb700a8, // 0x58: sd s7,168(sp)
+ 0xffb800b0, // 0x5c: sd t8,176(sp)
+ 0xffb900b8, // 0x60: sd t9,184(sp)
+ 0xffbe00c0, // 0x64: sd fp,192(sp)
+ 0xffbf00c8, // 0x68: sd ra,200(sp)
+
+ // Callback manager addr.
+ 0x00000000, // 0x6c: lui $a0,heighest(callbackmgr)
+ 0x00000000, // 0x70: daddiu $a0,$a0,heigher(callbackmgr)
+ 0x00000000, // 0x74: dsll $a0,$a0,16
+ 0x00000000, // 0x78: daddiu $a0,$a0,hi(callbackmgr)
+ 0x00000000, // 0x7c: dsll $a0,$a0,16
+ 0x00000000, // 0x80: daddiu $a0,$a0,lo(callbackmgr)
+
+ 0x03e02825, // 0x84: move $a1, $ra
+ 0x64a5ffdc, // 0x88: daddiu $a1,$a1,-36
+
+ // JIT re-entry fn addr:
+ 0x00000000, // 0x8c: lui $t9,reentry
+ 0x00000000, // 0x90: daddiu $t9,$t9,reentry
+ 0x00000000, // 0x94: dsll $t9,$t9,
+ 0x00000000, // 0x98: daddiu $t9,$t9,
+ 0x00000000, // 0x9c: dsll $t9,$t9,
+ 0x00000000, // 0xa0: daddiu $t9,$t9,
+ 0x0320f809, // 0xa4: jalr $t9
+ 0x00000000, // 0xa8: nop
+ 0xdfbf00c8, // 0xac: ld ra, 200(sp)
+ 0xdfbe00c0, // 0xb0: ld fp, 192(sp)
+ 0xdfb900b8, // 0xb4: ld t9, 184(sp)
+ 0xdfb800b0, // 0xb8: ld t8, 176(sp)
+ 0xdfb700a8, // 0xbc: ld s7, 168(sp)
+ 0xdfb600a0, // 0xc0: ld s6, 160(sp)
+ 0xdfb50098, // 0xc4: ld s5, 152(sp)
+ 0xdfb40090, // 0xc8: ld s4, 144(sp)
+ 0xdfb30088, // 0xcc: ld s3, 136(sp)
+ 0xdfb20080, // 0xd0: ld s2, 128(sp)
+ 0xdfb10078, // 0xd4: ld s1, 120(sp)
+ 0xdfb00070, // 0xd8: ld s0, 112(sp)
+ 0xdfaf0068, // 0xdc: ld t3, 104(sp)
+ 0xdfae0060, // 0xe0: ld t2, 96(sp)
+ 0xdfad0058, // 0xe4: ld t1, 88(sp)
+ 0xdfac0050, // 0xe8: ld t0, 80(sp)
+ 0xdfab0048, // 0xec: ld a7, 72(sp)
+ 0xdfaa0040, // 0xf0: ld a6, 64(sp)
+ 0xdfa90038, // 0xf4: ld a5, 56(sp)
+ 0xdfa80030, // 0xf8: ld a4, 48(sp)
+ 0xdfa70028, // 0xfc: ld a3, 40(sp)
+ 0xdfa60020, // 0x100: ld a2, 32(sp)
+ 0xdfa50018, // 0x104: ld a1, 24(sp)
+ 0xdfa40010, // 0x108: ld a0, 16(sp)
+ 0xdfa30008, // 0x10c: ld v1, 8(sp)
+ 0x67bd00d0, // 0x110: daddiu $sp,$sp,208
+ 0x0300f825, // 0x114: move $ra, $t8
+ 0x03200008, // 0x118: jr $t9
+ 0x0040c825, // 0x11c: move $t9, $v0
+ };
+
+ const unsigned ReentryFnAddrOffset = 0x8c; // JIT re-entry fn addr lui
+ const unsigned CallbackMgrAddrOffset = 0x6c; // Callback manager addr lui
+
+ memcpy(ResolverMem, ResolverCode, sizeof(ResolverCode));
+
+ uint64_t CallMgrAddr = reinterpret_cast<uint64_t>(CallbackMgr);
+
+ uint32_t CallMgrLUi =
+ 0x3c040000 | (((CallMgrAddr + 0x800080008000) >> 48) & 0xFFFF);
+ uint32_t CallMgrDADDiu =
+ 0x64840000 | (((CallMgrAddr + 0x80008000) >> 32) & 0xFFFF);
+ uint32_t CallMgrDSLL = 0x00042438;
+ uint32_t CallMgrDADDiu2 =
+ 0x64840000 | ((((CallMgrAddr + 0x8000) >> 16) & 0xFFFF));
+ uint32_t CallMgrDSLL2 = 0x00042438;
+ uint32_t CallMgrDADDiu3 = 0x64840000 | ((CallMgrAddr)&0xFFFF);
+
+ memcpy(ResolverMem + CallbackMgrAddrOffset, &CallMgrLUi, sizeof(CallMgrLUi));
+ memcpy(ResolverMem + (CallbackMgrAddrOffset + 4), &CallMgrDADDiu,
+ sizeof(CallMgrDADDiu));
+ memcpy(ResolverMem + (CallbackMgrAddrOffset + 8), &CallMgrDSLL,
+ sizeof(CallMgrDSLL));
+ memcpy(ResolverMem + (CallbackMgrAddrOffset + 12), &CallMgrDADDiu2,
+ sizeof(CallMgrDADDiu2));
+ memcpy(ResolverMem + (CallbackMgrAddrOffset + 16), &CallMgrDSLL2,
+ sizeof(CallMgrDSLL2));
+ memcpy(ResolverMem + (CallbackMgrAddrOffset + 20), &CallMgrDADDiu3,
+ sizeof(CallMgrDADDiu3));
+
+ uint64_t ReentryAddr = reinterpret_cast<uint64_t>(ReentryFn);
+
+ uint32_t ReentryLUi =
+ 0x3c190000 | (((ReentryAddr + 0x800080008000) >> 48) & 0xFFFF);
+
+ uint32_t ReentryDADDiu =
+ 0x67390000 | (((ReentryAddr + 0x80008000) >> 32) & 0xFFFF);
+
+ uint32_t ReentryDSLL = 0x0019cc38;
+
+ uint32_t ReentryDADDiu2 =
+ 0x67390000 | (((ReentryAddr + 0x8000) >> 16) & 0xFFFF);
+
+ uint32_t ReentryDSLL2 = 0x0019cc38;
+
+ uint32_t ReentryDADDiu3 = 0x67390000 | ((ReentryAddr)&0xFFFF);
+
+ memcpy(ResolverMem + ReentryFnAddrOffset, &ReentryLUi, sizeof(ReentryLUi));
+ memcpy(ResolverMem + (ReentryFnAddrOffset + 4), &ReentryDADDiu,
+ sizeof(ReentryDADDiu));
+ memcpy(ResolverMem + (ReentryFnAddrOffset + 8), &ReentryDSLL,
+ sizeof(ReentryDSLL));
+ memcpy(ResolverMem + (ReentryFnAddrOffset + 12), &ReentryDADDiu2,
+ sizeof(ReentryDADDiu2));
+ memcpy(ResolverMem + (ReentryFnAddrOffset + 16), &ReentryDSLL2,
+ sizeof(ReentryDSLL2));
+ memcpy(ResolverMem + (ReentryFnAddrOffset + 20), &ReentryDADDiu3,
+ sizeof(ReentryDADDiu3));
+}
+
+void OrcMips64::writeTrampolines(uint8_t *TrampolineMem, void *ResolverAddr,
+ unsigned NumTrampolines) {
+
+ uint32_t *Trampolines = reinterpret_cast<uint32_t *>(TrampolineMem);
+ uint64_t ResolveAddr = reinterpret_cast<uint64_t>(ResolverAddr);
+
+ uint64_t HeighestAddr = ((ResolveAddr + 0x800080008000) >> 48);
+ uint64_t HeigherAddr = ((ResolveAddr + 0x80008000) >> 32);
+ uint64_t HiAddr = ((ResolveAddr + 0x8000) >> 16);
+
+ for (unsigned I = 0; I < NumTrampolines; ++I) {
+ Trampolines[10 * I + 0] = 0x03e0c025; // move $t8,$ra
+ Trampolines[10 * I + 1] = 0x3c190000 | (HeighestAddr & 0xFFFF); // lui $t9,resolveAddr
+ Trampolines[10 * I + 2] = 0x67390000 | (HeigherAddr & 0xFFFF); // daddiu $t9,$t9,%higher(resolveAddr)
+ Trampolines[10 * I + 3] = 0x0019cc38; // dsll $t9,$t9,16
+ Trampolines[10 * I + 4] = 0x67390000 | (HiAddr & 0xFFFF); // daddiu $t9,$t9,%hi(ptr)
+ Trampolines[10 * I + 5] = 0x0019cc38; // dsll $t9,$t9,16
+ Trampolines[10 * I + 6] = 0x67390000 | (ResolveAddr & 0xFFFF); // daddiu $t9,$t9,%lo(ptr)
+ Trampolines[10 * I + 7] = 0x0320f809; // jalr $t9
+ Trampolines[10 * I + 8] = 0x00000000; // nop
+ Trampolines[10 * I + 9] = 0x00000000; // nop
+ }
+}
+
+Error OrcMips64::emitIndirectStubsBlock(IndirectStubsInfo &StubsInfo,
+ unsigned MinStubs,
+ void *InitialPtrVal) {
+ // Stub format is:
+ //
+ // .section __orc_stubs
+ // stub1:
+ // lui $t9,ptr1
+ // dsll $t9,$t9,16
+ // daddiu $t9,$t9,%hi(ptr)
+ // dsll $t9,$t9,16
+ // ld $t9,%lo(ptr)
+ // jr $t9
+ // stub2:
+ // lui $t9,ptr1
+ // dsll $t9,$t9,16
+ // daddiu $t9,$t9,%hi(ptr)
+ // dsll $t9,$t9,16
+ // ld $t9,%lo(ptr)
+ // jr $t9
+ //
+ // ...
+ //
+ // .section __orc_ptrs
+ // ptr1:
+ // .dword 0x0
+ // ptr2:
+ // .dword 0x0
+ //
+ // ...
+ const unsigned StubSize = IndirectStubsInfo::StubSize;
+
+ // Emit at least MinStubs, rounded up to fill the pages allocated.
+ static const unsigned PageSize = sys::Process::getPageSizeEstimate();
+ unsigned NumPages = ((MinStubs * StubSize) + (PageSize - 1)) / PageSize;
+ unsigned NumStubs = (NumPages * PageSize) / StubSize;
+
+ // Allocate memory for stubs and pointers in one call.
+ std::error_code EC;
+ auto StubsMem = sys::OwningMemoryBlock(sys::Memory::allocateMappedMemory(
+ 2 * NumPages * PageSize, nullptr,
+ sys::Memory::MF_READ | sys::Memory::MF_WRITE, EC));
+
+ if (EC)
+ return errorCodeToError(EC);
+
+ // Create separate MemoryBlocks representing the stubs and pointers.
+ sys::MemoryBlock StubsBlock(StubsMem.base(), NumPages * PageSize);
+ sys::MemoryBlock PtrsBlock(static_cast<char *>(StubsMem.base()) +
+ NumPages * PageSize,
+ NumPages * PageSize);
+
+ // Populate the stubs page stubs and mark it executable.
+ uint32_t *Stub = reinterpret_cast<uint32_t *>(StubsBlock.base());
+ uint64_t PtrAddr = reinterpret_cast<uint64_t>(PtrsBlock.base());
+
+ for (unsigned I = 0; I < NumStubs; ++I, PtrAddr += 8) {
+ uint64_t HeighestAddr = ((PtrAddr + 0x800080008000) >> 48);
+ uint64_t HeigherAddr = ((PtrAddr + 0x80008000) >> 32);
+ uint64_t HiAddr = ((PtrAddr + 0x8000) >> 16);
+ Stub[8 * I + 0] = 0x3c190000 | (HeighestAddr & 0xFFFF); // lui $t9,ptr1
+ Stub[8 * I + 1] = 0x67390000 | (HeigherAddr & 0xFFFF); // daddiu $t9,$t9,%higher(ptr)
+ Stub[8 * I + 2] = 0x0019cc38; // dsll $t9,$t9,16
+ Stub[8 * I + 3] = 0x67390000 | (HiAddr & 0xFFFF); // daddiu $t9,$t9,%hi(ptr)
+ Stub[8 * I + 4] = 0x0019cc38; // dsll $t9,$t9,16
+ Stub[8 * I + 5] = 0xdf390000 | (PtrAddr & 0xFFFF); // ld $t9,%lo(ptr)
+ Stub[8 * I + 6] = 0x03200008; // jr $t9
+ Stub[8 * I + 7] = 0x00000000; // nop
+ }
+
+ if (auto EC = sys::Memory::protectMappedMemory(
+ StubsBlock, sys::Memory::MF_READ | sys::Memory::MF_EXEC))
+ return errorCodeToError(EC);
+
+ // Initialize all pointers to point at FailureAddress.
+ void **Ptr = reinterpret_cast<void **>(PtrsBlock.base());
+ for (unsigned I = 0; I < NumStubs; ++I)
+ Ptr[I] = InitialPtrVal;
+
+ StubsInfo = IndirectStubsInfo(NumStubs, std::move(StubsMem));
+
+ return Error::success();
+}
+} // End namespace orc.
+} // End namespace llvm.
diff --git a/llvm/lib/ExecutionEngine/Orc/OrcCBindings.cpp b/llvm/lib/ExecutionEngine/Orc/OrcCBindings.cpp
new file mode 100644
index 0000000000000..28c8479abba44
--- /dev/null
+++ b/llvm/lib/ExecutionEngine/Orc/OrcCBindings.cpp
@@ -0,0 +1,158 @@
+//===----------- OrcCBindings.cpp - C bindings for the Orc APIs -----------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "OrcCBindingsStack.h"
+#include "llvm-c/OrcBindings.h"
+#include "llvm/ExecutionEngine/JITEventListener.h"
+
+using namespace llvm;
+
+LLVMOrcJITStackRef LLVMOrcCreateInstance(LLVMTargetMachineRef TM) {
+ TargetMachine *TM2(unwrap(TM));
+
+ Triple T(TM2->getTargetTriple());
+
+ auto IndirectStubsMgrBuilder =
+ orc::createLocalIndirectStubsManagerBuilder(T);
+
+ OrcCBindingsStack *JITStack =
+ new OrcCBindingsStack(*TM2, std::move(IndirectStubsMgrBuilder));
+
+ return wrap(JITStack);
+}
+
+const char *LLVMOrcGetErrorMsg(LLVMOrcJITStackRef JITStack) {
+ OrcCBindingsStack &J = *unwrap(JITStack);
+ return J.getErrorMessage().c_str();
+}
+
+void LLVMOrcGetMangledSymbol(LLVMOrcJITStackRef JITStack, char **MangledName,
+ const char *SymbolName) {
+ OrcCBindingsStack &J = *unwrap(JITStack);
+ std::string Mangled = J.mangle(SymbolName);
+ *MangledName = new char[Mangled.size() + 1];
+ strcpy(*MangledName, Mangled.c_str());
+}
+
+void LLVMOrcDisposeMangledSymbol(char *MangledName) { delete[] MangledName; }
+
+LLVMErrorRef LLVMOrcCreateLazyCompileCallback(
+ LLVMOrcJITStackRef JITStack, LLVMOrcTargetAddress *RetAddr,
+ LLVMOrcLazyCompileCallbackFn Callback, void *CallbackCtx) {
+ OrcCBindingsStack &J = *unwrap(JITStack);
+ if (auto Addr = J.createLazyCompileCallback(Callback, CallbackCtx)) {
+ *RetAddr = *Addr;
+ return LLVMErrorSuccess;
+ } else
+ return wrap(Addr.takeError());
+}
+
+LLVMErrorRef LLVMOrcCreateIndirectStub(LLVMOrcJITStackRef JITStack,
+ const char *StubName,
+ LLVMOrcTargetAddress InitAddr) {
+ OrcCBindingsStack &J = *unwrap(JITStack);
+ return wrap(J.createIndirectStub(StubName, InitAddr));
+}
+
+LLVMErrorRef LLVMOrcSetIndirectStubPointer(LLVMOrcJITStackRef JITStack,
+ const char *StubName,
+ LLVMOrcTargetAddress NewAddr) {
+ OrcCBindingsStack &J = *unwrap(JITStack);
+ return wrap(J.setIndirectStubPointer(StubName, NewAddr));
+}
+
+LLVMErrorRef LLVMOrcAddEagerlyCompiledIR(LLVMOrcJITStackRef JITStack,
+ LLVMOrcModuleHandle *RetHandle,
+ LLVMModuleRef Mod,
+ LLVMOrcSymbolResolverFn SymbolResolver,
+ void *SymbolResolverCtx) {
+ OrcCBindingsStack &J = *unwrap(JITStack);
+ std::unique_ptr<Module> M(unwrap(Mod));
+ if (auto Handle =
+ J.addIRModuleEager(std::move(M), SymbolResolver, SymbolResolverCtx)) {
+ *RetHandle = *Handle;
+ return LLVMErrorSuccess;
+ } else
+ return wrap(Handle.takeError());
+}
+
+LLVMErrorRef LLVMOrcAddLazilyCompiledIR(LLVMOrcJITStackRef JITStack,
+ LLVMOrcModuleHandle *RetHandle,
+ LLVMModuleRef Mod,
+ LLVMOrcSymbolResolverFn SymbolResolver,
+ void *SymbolResolverCtx) {
+ OrcCBindingsStack &J = *unwrap(JITStack);
+ std::unique_ptr<Module> M(unwrap(Mod));
+ if (auto Handle =
+ J.addIRModuleLazy(std::move(M), SymbolResolver, SymbolResolverCtx)) {
+ *RetHandle = *Handle;
+ return LLVMErrorSuccess;
+ } else
+ return wrap(Handle.takeError());
+}
+
+LLVMErrorRef LLVMOrcAddObjectFile(LLVMOrcJITStackRef JITStack,
+ LLVMOrcModuleHandle *RetHandle,
+ LLVMMemoryBufferRef Obj,
+ LLVMOrcSymbolResolverFn SymbolResolver,
+ void *SymbolResolverCtx) {
+ OrcCBindingsStack &J = *unwrap(JITStack);
+ std::unique_ptr<MemoryBuffer> O(unwrap(Obj));
+ if (auto Handle =
+ J.addObject(std::move(O), SymbolResolver, SymbolResolverCtx)) {
+ *RetHandle = *Handle;
+ return LLVMErrorSuccess;
+ } else
+ return wrap(Handle.takeError());
+}
+
+LLVMErrorRef LLVMOrcRemoveModule(LLVMOrcJITStackRef JITStack,
+ LLVMOrcModuleHandle H) {
+ OrcCBindingsStack &J = *unwrap(JITStack);
+ return wrap(J.removeModule(H));
+}
+
+LLVMErrorRef LLVMOrcGetSymbolAddress(LLVMOrcJITStackRef JITStack,
+ LLVMOrcTargetAddress *RetAddr,
+ const char *SymbolName) {
+ OrcCBindingsStack &J = *unwrap(JITStack);
+ if (auto Addr = J.findSymbolAddress(SymbolName, true)) {
+ *RetAddr = *Addr;
+ return LLVMErrorSuccess;
+ } else
+ return wrap(Addr.takeError());
+}
+
+LLVMErrorRef LLVMOrcGetSymbolAddressIn(LLVMOrcJITStackRef JITStack,
+ LLVMOrcTargetAddress *RetAddr,
+ LLVMOrcModuleHandle H,
+ const char *SymbolName) {
+ OrcCBindingsStack &J = *unwrap(JITStack);
+ if (auto Addr = J.findSymbolAddressIn(H, SymbolName, true)) {
+ *RetAddr = *Addr;
+ return LLVMErrorSuccess;
+ } else
+ return wrap(Addr.takeError());
+}
+
+LLVMErrorRef LLVMOrcDisposeInstance(LLVMOrcJITStackRef JITStack) {
+ auto *J = unwrap(JITStack);
+ auto Err = J->shutdown();
+ delete J;
+ return wrap(std::move(Err));
+}
+
+void LLVMOrcRegisterJITEventListener(LLVMOrcJITStackRef JITStack, LLVMJITEventListenerRef L)
+{
+ unwrap(JITStack)->RegisterJITEventListener(unwrap(L));
+}
+
+void LLVMOrcUnregisterJITEventListener(LLVMOrcJITStackRef JITStack, LLVMJITEventListenerRef L)
+{
+ unwrap(JITStack)->UnregisterJITEventListener(unwrap(L));
+}
diff --git a/llvm/lib/ExecutionEngine/Orc/OrcCBindingsStack.h b/llvm/lib/ExecutionEngine/Orc/OrcCBindingsStack.h
new file mode 100644
index 0000000000000..e0af3df9d010f
--- /dev/null
+++ b/llvm/lib/ExecutionEngine/Orc/OrcCBindingsStack.h
@@ -0,0 +1,534 @@
+//===- OrcCBindingsStack.h - Orc JIT stack for C bindings -----*- C++ -*---===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_EXECUTIONENGINE_ORC_ORCCBINDINGSSTACK_H
+#define LLVM_LIB_EXECUTIONENGINE_ORC_ORCCBINDINGSSTACK_H
+
+#include "llvm-c/OrcBindings.h"
+#include "llvm-c/TargetMachine.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ExecutionEngine/JITSymbol.h"
+#include "llvm/ExecutionEngine/JITEventListener.h"
+#include "llvm/ExecutionEngine/Orc/CompileOnDemandLayer.h"
+#include "llvm/ExecutionEngine/Orc/CompileUtils.h"
+#include "llvm/ExecutionEngine/Orc/ExecutionUtils.h"
+#include "llvm/ExecutionEngine/Orc/IRCompileLayer.h"
+#include "llvm/ExecutionEngine/Orc/LambdaResolver.h"
+#include "llvm/ExecutionEngine/Orc/RTDyldObjectLinkingLayer.h"
+#include "llvm/ExecutionEngine/RuntimeDyld.h"
+#include "llvm/ExecutionEngine/SectionMemoryManager.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/Mangler.h"
+#include "llvm/IR/Module.h"
+#include "llvm/Support/CBindingWrapping.h"
+#include "llvm/Support/Error.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Target/TargetMachine.h"
+#include <algorithm>
+#include <cstdint>
+#include <functional>
+#include <map>
+#include <memory>
+#include <set>
+#include <string>
+#include <vector>
+
+namespace llvm {
+
+class OrcCBindingsStack;
+
+DEFINE_SIMPLE_CONVERSION_FUNCTIONS(OrcCBindingsStack, LLVMOrcJITStackRef)
+DEFINE_SIMPLE_CONVERSION_FUNCTIONS(TargetMachine, LLVMTargetMachineRef)
+
+namespace detail {
+
+// FIXME: Kill this off once the Layer concept becomes an interface.
+class GenericLayer {
+public:
+ virtual ~GenericLayer() = default;
+
+ virtual JITSymbol findSymbolIn(orc::VModuleKey K, const std::string &Name,
+ bool ExportedSymbolsOnly) = 0;
+ virtual Error removeModule(orc::VModuleKey K) = 0;
+ };
+
+ template <typename LayerT> class GenericLayerImpl : public GenericLayer {
+ public:
+ GenericLayerImpl(LayerT &Layer) : Layer(Layer) {}
+
+ JITSymbol findSymbolIn(orc::VModuleKey K, const std::string &Name,
+ bool ExportedSymbolsOnly) override {
+ return Layer.findSymbolIn(K, Name, ExportedSymbolsOnly);
+ }
+
+ Error removeModule(orc::VModuleKey K) override {
+ return Layer.removeModule(K);
+ }
+
+ private:
+ LayerT &Layer;
+ };
+
+ template <>
+ class GenericLayerImpl<orc::LegacyRTDyldObjectLinkingLayer> : public GenericLayer {
+ private:
+ using LayerT = orc::LegacyRTDyldObjectLinkingLayer;
+ public:
+ GenericLayerImpl(LayerT &Layer) : Layer(Layer) {}
+
+ JITSymbol findSymbolIn(orc::VModuleKey K, const std::string &Name,
+ bool ExportedSymbolsOnly) override {
+ return Layer.findSymbolIn(K, Name, ExportedSymbolsOnly);
+ }
+
+ Error removeModule(orc::VModuleKey K) override {
+ return Layer.removeObject(K);
+ }
+
+ private:
+ LayerT &Layer;
+ };
+
+ template <typename LayerT>
+ std::unique_ptr<GenericLayerImpl<LayerT>> createGenericLayer(LayerT &Layer) {
+ return std::make_unique<GenericLayerImpl<LayerT>>(Layer);
+ }
+
+} // end namespace detail
+
+class OrcCBindingsStack {
+public:
+
+ using CompileCallbackMgr = orc::JITCompileCallbackManager;
+ using ObjLayerT = orc::LegacyRTDyldObjectLinkingLayer;
+ using CompileLayerT = orc::LegacyIRCompileLayer<ObjLayerT, orc::SimpleCompiler>;
+ using CODLayerT =
+ orc::LegacyCompileOnDemandLayer<CompileLayerT, CompileCallbackMgr>;
+
+ using CallbackManagerBuilder =
+ std::function<std::unique_ptr<CompileCallbackMgr>()>;
+
+ using IndirectStubsManagerBuilder = CODLayerT::IndirectStubsManagerBuilderT;
+
+private:
+
+ using OwningObject = object::OwningBinary<object::ObjectFile>;
+
+ class CBindingsResolver : public orc::SymbolResolver {
+ public:
+ CBindingsResolver(OrcCBindingsStack &Stack,
+ LLVMOrcSymbolResolverFn ExternalResolver,
+ void *ExternalResolverCtx)
+ : Stack(Stack), ExternalResolver(std::move(ExternalResolver)),
+ ExternalResolverCtx(std::move(ExternalResolverCtx)) {}
+
+ orc::SymbolNameSet
+ getResponsibilitySet(const orc::SymbolNameSet &Symbols) override {
+ orc::SymbolNameSet Result;
+
+ for (auto &S : Symbols) {
+ if (auto Sym = findSymbol(*S)) {
+ if (!Sym.getFlags().isStrong())
+ Result.insert(S);
+ } else if (auto Err = Sym.takeError()) {
+ Stack.reportError(std::move(Err));
+ return orc::SymbolNameSet();
+ }
+ }
+
+ return Result;
+ }
+
+ orc::SymbolNameSet
+ lookup(std::shared_ptr<orc::AsynchronousSymbolQuery> Query,
+ orc::SymbolNameSet Symbols) override {
+ orc::SymbolNameSet UnresolvedSymbols;
+
+ for (auto &S : Symbols) {
+ if (auto Sym = findSymbol(*S)) {
+ if (auto Addr = Sym.getAddress()) {
+ Query->notifySymbolMetRequiredState(
+ S, JITEvaluatedSymbol(*Addr, Sym.getFlags()));
+ } else {
+ Stack.ES.legacyFailQuery(*Query, Addr.takeError());
+ return orc::SymbolNameSet();
+ }
+ } else if (auto Err = Sym.takeError()) {
+ Stack.ES.legacyFailQuery(*Query, std::move(Err));
+ return orc::SymbolNameSet();
+ } else
+ UnresolvedSymbols.insert(S);
+ }
+
+ if (Query->isComplete())
+ Query->handleComplete();
+
+ return UnresolvedSymbols;
+ }
+
+ private:
+ JITSymbol findSymbol(const std::string &Name) {
+ // Search order:
+ // 1. JIT'd symbols.
+ // 2. Runtime overrides.
+ // 3. External resolver (if present).
+
+ if (Stack.CODLayer) {
+ if (auto Sym = Stack.CODLayer->findSymbol(Name, true))
+ return Sym;
+ else if (auto Err = Sym.takeError())
+ return Sym.takeError();
+ } else {
+ if (auto Sym = Stack.CompileLayer.findSymbol(Name, true))
+ return Sym;
+ else if (auto Err = Sym.takeError())
+ return Sym.takeError();
+ }
+
+ if (auto Sym = Stack.CXXRuntimeOverrides.searchOverrides(Name))
+ return Sym;
+
+ if (ExternalResolver)
+ return JITSymbol(ExternalResolver(Name.c_str(), ExternalResolverCtx),
+ JITSymbolFlags::Exported);
+
+ return JITSymbol(nullptr);
+ }
+
+ OrcCBindingsStack &Stack;
+ LLVMOrcSymbolResolverFn ExternalResolver;
+ void *ExternalResolverCtx = nullptr;
+ };
+
+public:
+ OrcCBindingsStack(TargetMachine &TM,
+ IndirectStubsManagerBuilder IndirectStubsMgrBuilder)
+ : CCMgr(createCompileCallbackManager(TM, ES)), DL(TM.createDataLayout()),
+ IndirectStubsMgr(IndirectStubsMgrBuilder()),
+ ObjectLayer(
+ AcknowledgeORCv1Deprecation, ES,
+ [this](orc::VModuleKey K) {
+ auto ResolverI = Resolvers.find(K);
+ assert(ResolverI != Resolvers.end() &&
+ "No resolver for module K");
+ auto Resolver = std::move(ResolverI->second);
+ Resolvers.erase(ResolverI);
+ return ObjLayerT::Resources{
+ std::make_shared<SectionMemoryManager>(), Resolver};
+ },
+ nullptr,
+ [this](orc::VModuleKey K, const object::ObjectFile &Obj,
+ const RuntimeDyld::LoadedObjectInfo &LoadedObjInfo) {
+ this->notifyFinalized(K, Obj, LoadedObjInfo);
+ },
+ [this](orc::VModuleKey K, const object::ObjectFile &Obj) {
+ this->notifyFreed(K, Obj);
+ }),
+ CompileLayer(AcknowledgeORCv1Deprecation, ObjectLayer,
+ orc::SimpleCompiler(TM)),
+ CODLayer(createCODLayer(ES, CompileLayer, CCMgr.get(),
+ std::move(IndirectStubsMgrBuilder), Resolvers)),
+ CXXRuntimeOverrides(
+ AcknowledgeORCv1Deprecation,
+ [this](const std::string &S) { return mangle(S); }) {}
+
+ Error shutdown() {
+ // Run any destructors registered with __cxa_atexit.
+ CXXRuntimeOverrides.runDestructors();
+ // Run any IR destructors.
+ for (auto &DtorRunner : IRStaticDestructorRunners)
+ if (auto Err = DtorRunner.runViaLayer(*this))
+ return Err;
+ return Error::success();
+ }
+
+ std::string mangle(StringRef Name) {
+ std::string MangledName;
+ {
+ raw_string_ostream MangledNameStream(MangledName);
+ Mangler::getNameWithPrefix(MangledNameStream, Name, DL);
+ }
+ return MangledName;
+ }
+
+ template <typename PtrTy>
+ static PtrTy fromTargetAddress(JITTargetAddress Addr) {
+ return reinterpret_cast<PtrTy>(static_cast<uintptr_t>(Addr));
+ }
+
+ Expected<JITTargetAddress>
+ createLazyCompileCallback(LLVMOrcLazyCompileCallbackFn Callback,
+ void *CallbackCtx) {
+ auto WrappedCallback = [=]() -> JITTargetAddress {
+ return Callback(wrap(this), CallbackCtx);
+ };
+
+ return CCMgr->getCompileCallback(std::move(WrappedCallback));
+ }
+
+ Error createIndirectStub(StringRef StubName, JITTargetAddress Addr) {
+ return IndirectStubsMgr->createStub(StubName, Addr,
+ JITSymbolFlags::Exported);
+ }
+
+ Error setIndirectStubPointer(StringRef Name, JITTargetAddress Addr) {
+ return IndirectStubsMgr->updatePointer(Name, Addr);
+ }
+
+ template <typename LayerT>
+ Expected<orc::VModuleKey>
+ addIRModule(LayerT &Layer, std::unique_ptr<Module> M,
+ std::unique_ptr<RuntimeDyld::MemoryManager> MemMgr,
+ LLVMOrcSymbolResolverFn ExternalResolver,
+ void *ExternalResolverCtx) {
+
+ // Attach a data-layout if one isn't already present.
+ if (M->getDataLayout().isDefault())
+ M->setDataLayout(DL);
+
+ // Record the static constructors and destructors. We have to do this before
+ // we hand over ownership of the module to the JIT.
+ std::vector<std::string> CtorNames, DtorNames;
+ for (auto Ctor : orc::getConstructors(*M))
+ CtorNames.push_back(mangle(Ctor.Func->getName()));
+ for (auto Dtor : orc::getDestructors(*M))
+ DtorNames.push_back(mangle(Dtor.Func->getName()));
+
+ // Add the module to the JIT.
+ auto K = ES.allocateVModule();
+ Resolvers[K] = std::make_shared<CBindingsResolver>(*this, ExternalResolver,
+ ExternalResolverCtx);
+ if (auto Err = Layer.addModule(K, std::move(M)))
+ return std::move(Err);
+
+ KeyLayers[K] = detail::createGenericLayer(Layer);
+
+ // Run the static constructors, and save the static destructor runner for
+ // execution when the JIT is torn down.
+ orc::LegacyCtorDtorRunner<OrcCBindingsStack> CtorRunner(
+ AcknowledgeORCv1Deprecation, std::move(CtorNames), K);
+ if (auto Err = CtorRunner.runViaLayer(*this))
+ return std::move(Err);
+
+ IRStaticDestructorRunners.emplace_back(AcknowledgeORCv1Deprecation,
+ std::move(DtorNames), K);
+
+ return K;
+ }
+
+ Expected<orc::VModuleKey>
+ addIRModuleEager(std::unique_ptr<Module> M,
+ LLVMOrcSymbolResolverFn ExternalResolver,
+ void *ExternalResolverCtx) {
+ return addIRModule(CompileLayer, std::move(M),
+ std::make_unique<SectionMemoryManager>(),
+ std::move(ExternalResolver), ExternalResolverCtx);
+ }
+
+ Expected<orc::VModuleKey>
+ addIRModuleLazy(std::unique_ptr<Module> M,
+ LLVMOrcSymbolResolverFn ExternalResolver,
+ void *ExternalResolverCtx) {
+ if (!CODLayer)
+ return make_error<StringError>("Can not add lazy module: No compile "
+ "callback manager available",
+ inconvertibleErrorCode());
+
+ return addIRModule(*CODLayer, std::move(M),
+ std::make_unique<SectionMemoryManager>(),
+ std::move(ExternalResolver), ExternalResolverCtx);
+ }
+
+ Error removeModule(orc::VModuleKey K) {
+ // FIXME: Should error release the module key?
+ if (auto Err = KeyLayers[K]->removeModule(K))
+ return Err;
+ ES.releaseVModule(K);
+ KeyLayers.erase(K);
+ return Error::success();
+ }
+
+ Expected<orc::VModuleKey> addObject(std::unique_ptr<MemoryBuffer> ObjBuffer,
+ LLVMOrcSymbolResolverFn ExternalResolver,
+ void *ExternalResolverCtx) {
+ if (auto Obj = object::ObjectFile::createObjectFile(
+ ObjBuffer->getMemBufferRef())) {
+
+ auto K = ES.allocateVModule();
+ Resolvers[K] = std::make_shared<CBindingsResolver>(
+ *this, ExternalResolver, ExternalResolverCtx);
+
+ if (auto Err = ObjectLayer.addObject(K, std::move(ObjBuffer)))
+ return std::move(Err);
+
+ KeyLayers[K] = detail::createGenericLayer(ObjectLayer);
+
+ return K;
+ } else
+ return Obj.takeError();
+ }
+
+ JITSymbol findSymbol(const std::string &Name,
+ bool ExportedSymbolsOnly) {
+ if (auto Sym = IndirectStubsMgr->findStub(Name, ExportedSymbolsOnly))
+ return Sym;
+ if (CODLayer)
+ return CODLayer->findSymbol(mangle(Name), ExportedSymbolsOnly);
+ return CompileLayer.findSymbol(mangle(Name), ExportedSymbolsOnly);
+ }
+
+ JITSymbol findSymbolIn(orc::VModuleKey K, const std::string &Name,
+ bool ExportedSymbolsOnly) {
+ assert(KeyLayers.count(K) && "looking up symbol in unknown module");
+ return KeyLayers[K]->findSymbolIn(K, mangle(Name), ExportedSymbolsOnly);
+ }
+
+ Expected<JITTargetAddress> findSymbolAddress(const std::string &Name,
+ bool ExportedSymbolsOnly) {
+ if (auto Sym = findSymbol(Name, ExportedSymbolsOnly)) {
+ // Successful lookup, non-null symbol:
+ if (auto AddrOrErr = Sym.getAddress())
+ return *AddrOrErr;
+ else
+ return AddrOrErr.takeError();
+ } else if (auto Err = Sym.takeError()) {
+ // Lookup failure - report error.
+ return std::move(Err);
+ }
+
+ // No symbol not found. Return 0.
+ return 0;
+ }
+
+ Expected<JITTargetAddress> findSymbolAddressIn(orc::VModuleKey K,
+ const std::string &Name,
+ bool ExportedSymbolsOnly) {
+ if (auto Sym = findSymbolIn(K, Name, ExportedSymbolsOnly)) {
+ // Successful lookup, non-null symbol:
+ if (auto AddrOrErr = Sym.getAddress())
+ return *AddrOrErr;
+ else
+ return AddrOrErr.takeError();
+ } else if (auto Err = Sym.takeError()) {
+ // Lookup failure - report error.
+ return std::move(Err);
+ }
+
+ // Symbol not found. Return 0.
+ return 0;
+ }
+
+ const std::string &getErrorMessage() const { return ErrMsg; }
+
+ void RegisterJITEventListener(JITEventListener *L) {
+ if (!L)
+ return;
+ EventListeners.push_back(L);
+ }
+
+ void UnregisterJITEventListener(JITEventListener *L) {
+ if (!L)
+ return;
+
+ auto I = find(reverse(EventListeners), L);
+ if (I != EventListeners.rend()) {
+ std::swap(*I, EventListeners.back());
+ EventListeners.pop_back();
+ }
+ }
+
+private:
+ using ResolverMap =
+ std::map<orc::VModuleKey, std::shared_ptr<orc::SymbolResolver>>;
+
+ static std::unique_ptr<CompileCallbackMgr>
+ createCompileCallbackManager(TargetMachine &TM, orc::ExecutionSession &ES) {
+ auto CCMgr = createLocalCompileCallbackManager(TM.getTargetTriple(), ES, 0);
+ if (!CCMgr) {
+ // FIXME: It would be good if we could report this somewhere, but we do
+ // have an instance yet.
+ logAllUnhandledErrors(CCMgr.takeError(), errs(), "ORC error: ");
+ return nullptr;
+ }
+ return std::move(*CCMgr);
+ }
+
+ static std::unique_ptr<CODLayerT>
+ createCODLayer(orc::ExecutionSession &ES, CompileLayerT &CompileLayer,
+ CompileCallbackMgr *CCMgr,
+ IndirectStubsManagerBuilder IndirectStubsMgrBuilder,
+ ResolverMap &Resolvers) {
+ // If there is no compile callback manager available we can not create a
+ // compile on demand layer.
+ if (!CCMgr)
+ return nullptr;
+
+ return std::make_unique<CODLayerT>(
+ AcknowledgeORCv1Deprecation, ES, CompileLayer,
+ [&Resolvers](orc::VModuleKey K) {
+ auto ResolverI = Resolvers.find(K);
+ assert(ResolverI != Resolvers.end() && "No resolver for module K");
+ return ResolverI->second;
+ },
+ [&Resolvers](orc::VModuleKey K,
+ std::shared_ptr<orc::SymbolResolver> Resolver) {
+ assert(!Resolvers.count(K) && "Resolver already present");
+ Resolvers[K] = std::move(Resolver);
+ },
+ [](Function &F) { return std::set<Function *>({&F}); }, *CCMgr,
+ std::move(IndirectStubsMgrBuilder), false);
+ }
+
+ void reportError(Error Err) {
+ // FIXME: Report errors on the execution session.
+ logAllUnhandledErrors(std::move(Err), errs(), "ORC error: ");
+ };
+
+ void notifyFinalized(orc::VModuleKey K,
+ const object::ObjectFile &Obj,
+ const RuntimeDyld::LoadedObjectInfo &LoadedObjInfo) {
+ uint64_t Key = static_cast<uint64_t>(
+ reinterpret_cast<uintptr_t>(Obj.getData().data()));
+ for (auto &Listener : EventListeners)
+ Listener->notifyObjectLoaded(Key, Obj, LoadedObjInfo);
+ }
+
+ void notifyFreed(orc::VModuleKey K, const object::ObjectFile &Obj) {
+ uint64_t Key = static_cast<uint64_t>(
+ reinterpret_cast<uintptr_t>(Obj.getData().data()));
+ for (auto &Listener : EventListeners)
+ Listener->notifyFreeingObject(Key);
+ }
+
+ orc::ExecutionSession ES;
+ std::unique_ptr<CompileCallbackMgr> CCMgr;
+
+ std::vector<JITEventListener *> EventListeners;
+
+ DataLayout DL;
+ SectionMemoryManager CCMgrMemMgr;
+
+ std::unique_ptr<orc::IndirectStubsManager> IndirectStubsMgr;
+
+ ObjLayerT ObjectLayer;
+ CompileLayerT CompileLayer;
+ std::unique_ptr<CODLayerT> CODLayer;
+
+ std::map<orc::VModuleKey, std::unique_ptr<detail::GenericLayer>> KeyLayers;
+
+ orc::LegacyLocalCXXRuntimeOverrides CXXRuntimeOverrides;
+ std::vector<orc::LegacyCtorDtorRunner<OrcCBindingsStack>> IRStaticDestructorRunners;
+ std::string ErrMsg;
+
+ ResolverMap Resolvers;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_LIB_EXECUTIONENGINE_ORC_ORCCBINDINGSSTACK_H
diff --git a/llvm/lib/ExecutionEngine/Orc/OrcError.cpp b/llvm/lib/ExecutionEngine/Orc/OrcError.cpp
new file mode 100644
index 0000000000000..e6e9a095319ca
--- /dev/null
+++ b/llvm/lib/ExecutionEngine/Orc/OrcError.cpp
@@ -0,0 +1,115 @@
+//===---------------- OrcError.cpp - Error codes for ORC ------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Error codes for ORC.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/OrcError.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/ManagedStatic.h"
+
+using namespace llvm;
+using namespace llvm::orc;
+
+namespace {
+
+// FIXME: This class is only here to support the transition to llvm::Error. It
+// will be removed once this transition is complete. Clients should prefer to
+// deal with the Error value directly, rather than converting to error_code.
+class OrcErrorCategory : public std::error_category {
+public:
+ const char *name() const noexcept override { return "orc"; }
+
+ std::string message(int condition) const override {
+ switch (static_cast<OrcErrorCode>(condition)) {
+ case OrcErrorCode::UnknownORCError:
+ return "Unknown ORC error";
+ case OrcErrorCode::DuplicateDefinition:
+ return "Duplicate symbol definition";
+ case OrcErrorCode::JITSymbolNotFound:
+ return "JIT symbol not found";
+ case OrcErrorCode::RemoteAllocatorDoesNotExist:
+ return "Remote allocator does not exist";
+ case OrcErrorCode::RemoteAllocatorIdAlreadyInUse:
+ return "Remote allocator Id already in use";
+ case OrcErrorCode::RemoteMProtectAddrUnrecognized:
+ return "Remote mprotect call references unallocated memory";
+ case OrcErrorCode::RemoteIndirectStubsOwnerDoesNotExist:
+ return "Remote indirect stubs owner does not exist";
+ case OrcErrorCode::RemoteIndirectStubsOwnerIdAlreadyInUse:
+ return "Remote indirect stubs owner Id already in use";
+ case OrcErrorCode::RPCConnectionClosed:
+ return "RPC connection closed";
+ case OrcErrorCode::RPCCouldNotNegotiateFunction:
+ return "Could not negotiate RPC function";
+ case OrcErrorCode::RPCResponseAbandoned:
+ return "RPC response abandoned";
+ case OrcErrorCode::UnexpectedRPCCall:
+ return "Unexpected RPC call";
+ case OrcErrorCode::UnexpectedRPCResponse:
+ return "Unexpected RPC response";
+ case OrcErrorCode::UnknownErrorCodeFromRemote:
+ return "Unknown error returned from remote RPC function "
+ "(Use StringError to get error message)";
+ case OrcErrorCode::UnknownResourceHandle:
+ return "Unknown resource handle";
+ }
+ llvm_unreachable("Unhandled error code");
+ }
+};
+
+static ManagedStatic<OrcErrorCategory> OrcErrCat;
+}
+
+namespace llvm {
+namespace orc {
+
+char DuplicateDefinition::ID = 0;
+char JITSymbolNotFound::ID = 0;
+
+std::error_code orcError(OrcErrorCode ErrCode) {
+ typedef std::underlying_type<OrcErrorCode>::type UT;
+ return std::error_code(static_cast<UT>(ErrCode), *OrcErrCat);
+}
+
+
+DuplicateDefinition::DuplicateDefinition(std::string SymbolName)
+ : SymbolName(std::move(SymbolName)) {}
+
+std::error_code DuplicateDefinition::convertToErrorCode() const {
+ return orcError(OrcErrorCode::DuplicateDefinition);
+}
+
+void DuplicateDefinition::log(raw_ostream &OS) const {
+ OS << "Duplicate definition of symbol '" << SymbolName << "'";
+}
+
+const std::string &DuplicateDefinition::getSymbolName() const {
+ return SymbolName;
+}
+
+JITSymbolNotFound::JITSymbolNotFound(std::string SymbolName)
+ : SymbolName(std::move(SymbolName)) {}
+
+std::error_code JITSymbolNotFound::convertToErrorCode() const {
+ typedef std::underlying_type<OrcErrorCode>::type UT;
+ return std::error_code(static_cast<UT>(OrcErrorCode::JITSymbolNotFound),
+ *OrcErrCat);
+}
+
+void JITSymbolNotFound::log(raw_ostream &OS) const {
+ OS << "Could not find symbol '" << SymbolName << "'";
+}
+
+const std::string &JITSymbolNotFound::getSymbolName() const {
+ return SymbolName;
+}
+
+}
+}
diff --git a/llvm/lib/ExecutionEngine/Orc/OrcMCJITReplacement.cpp b/llvm/lib/ExecutionEngine/Orc/OrcMCJITReplacement.cpp
new file mode 100644
index 0000000000000..772a9c2c4ab2f
--- /dev/null
+++ b/llvm/lib/ExecutionEngine/Orc/OrcMCJITReplacement.cpp
@@ -0,0 +1,138 @@
+//===-------- OrcMCJITReplacement.cpp - Orc-based MCJIT replacement -------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "OrcMCJITReplacement.h"
+#include "llvm/ExecutionEngine/GenericValue.h"
+
+namespace {
+
+static struct RegisterJIT {
+ RegisterJIT() { llvm::orc::OrcMCJITReplacement::Register(); }
+} JITRegistrator;
+
+}
+
+extern "C" void LLVMLinkInOrcMCJITReplacement() {}
+
+namespace llvm {
+namespace orc {
+
+GenericValue
+OrcMCJITReplacement::runFunction(Function *F,
+ ArrayRef<GenericValue> ArgValues) {
+ assert(F && "Function *F was null at entry to run()");
+
+ void *FPtr = getPointerToFunction(F);
+ assert(FPtr && "Pointer to fn's code was null after getPointerToFunction");
+ FunctionType *FTy = F->getFunctionType();
+ Type *RetTy = FTy->getReturnType();
+
+ assert((FTy->getNumParams() == ArgValues.size() ||
+ (FTy->isVarArg() && FTy->getNumParams() <= ArgValues.size())) &&
+ "Wrong number of arguments passed into function!");
+ assert(FTy->getNumParams() == ArgValues.size() &&
+ "This doesn't support passing arguments through varargs (yet)!");
+
+ // Handle some common cases first. These cases correspond to common `main'
+ // prototypes.
+ if (RetTy->isIntegerTy(32) || RetTy->isVoidTy()) {
+ switch (ArgValues.size()) {
+ case 3:
+ if (FTy->getParamType(0)->isIntegerTy(32) &&
+ FTy->getParamType(1)->isPointerTy() &&
+ FTy->getParamType(2)->isPointerTy()) {
+ int (*PF)(int, char **, const char **) =
+ (int (*)(int, char **, const char **))(intptr_t)FPtr;
+
+ // Call the function.
+ GenericValue rv;
+ rv.IntVal = APInt(32, PF(ArgValues[0].IntVal.getZExtValue(),
+ (char **)GVTOP(ArgValues[1]),
+ (const char **)GVTOP(ArgValues[2])));
+ return rv;
+ }
+ break;
+ case 2:
+ if (FTy->getParamType(0)->isIntegerTy(32) &&
+ FTy->getParamType(1)->isPointerTy()) {
+ int (*PF)(int, char **) = (int (*)(int, char **))(intptr_t)FPtr;
+
+ // Call the function.
+ GenericValue rv;
+ rv.IntVal = APInt(32, PF(ArgValues[0].IntVal.getZExtValue(),
+ (char **)GVTOP(ArgValues[1])));
+ return rv;
+ }
+ break;
+ case 1:
+ if (FTy->getNumParams() == 1 && FTy->getParamType(0)->isIntegerTy(32)) {
+ GenericValue rv;
+ int (*PF)(int) = (int (*)(int))(intptr_t)FPtr;
+ rv.IntVal = APInt(32, PF(ArgValues[0].IntVal.getZExtValue()));
+ return rv;
+ }
+ break;
+ }
+ }
+
+ // Handle cases where no arguments are passed first.
+ if (ArgValues.empty()) {
+ GenericValue rv;
+ switch (RetTy->getTypeID()) {
+ default:
+ llvm_unreachable("Unknown return type for function call!");
+ case Type::IntegerTyID: {
+ unsigned BitWidth = cast<IntegerType>(RetTy)->getBitWidth();
+ if (BitWidth == 1)
+ rv.IntVal = APInt(BitWidth, ((bool (*)())(intptr_t)FPtr)());
+ else if (BitWidth <= 8)
+ rv.IntVal = APInt(BitWidth, ((char (*)())(intptr_t)FPtr)());
+ else if (BitWidth <= 16)
+ rv.IntVal = APInt(BitWidth, ((short (*)())(intptr_t)FPtr)());
+ else if (BitWidth <= 32)
+ rv.IntVal = APInt(BitWidth, ((int (*)())(intptr_t)FPtr)());
+ else if (BitWidth <= 64)
+ rv.IntVal = APInt(BitWidth, ((int64_t (*)())(intptr_t)FPtr)());
+ else
+ llvm_unreachable("Integer types > 64 bits not supported");
+ return rv;
+ }
+ case Type::VoidTyID:
+ rv.IntVal = APInt(32, ((int (*)())(intptr_t)FPtr)());
+ return rv;
+ case Type::FloatTyID:
+ rv.FloatVal = ((float (*)())(intptr_t)FPtr)();
+ return rv;
+ case Type::DoubleTyID:
+ rv.DoubleVal = ((double (*)())(intptr_t)FPtr)();
+ return rv;
+ case Type::X86_FP80TyID:
+ case Type::FP128TyID:
+ case Type::PPC_FP128TyID:
+ llvm_unreachable("long double not supported yet");
+ case Type::PointerTyID:
+ return PTOGV(((void *(*)())(intptr_t)FPtr)());
+ }
+ }
+
+ llvm_unreachable("Full-featured argument passing not supported yet!");
+}
+
+void OrcMCJITReplacement::runStaticConstructorsDestructors(bool isDtors) {
+ auto &CtorDtorsMap = isDtors ? UnexecutedDestructors : UnexecutedConstructors;
+
+ for (auto &KV : CtorDtorsMap)
+ cantFail(LegacyCtorDtorRunner<LazyEmitLayerT>(
+ AcknowledgeORCv1Deprecation, std::move(KV.second), KV.first)
+ .runViaLayer(LazyEmitLayer));
+
+ CtorDtorsMap.clear();
+}
+
+} // End namespace orc.
+} // End namespace llvm.
diff --git a/llvm/lib/ExecutionEngine/Orc/OrcMCJITReplacement.h b/llvm/lib/ExecutionEngine/Orc/OrcMCJITReplacement.h
new file mode 100644
index 0000000000000..169dc8f1d02b8
--- /dev/null
+++ b/llvm/lib/ExecutionEngine/Orc/OrcMCJITReplacement.h
@@ -0,0 +1,501 @@
+//===- OrcMCJITReplacement.h - Orc based MCJIT replacement ------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Orc based MCJIT replacement.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_EXECUTIONENGINE_ORC_ORCMCJITREPLACEMENT_H
+#define LLVM_LIB_EXECUTIONENGINE_ORC_ORCMCJITREPLACEMENT_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ExecutionEngine/ExecutionEngine.h"
+#include "llvm/ExecutionEngine/GenericValue.h"
+#include "llvm/ExecutionEngine/JITSymbol.h"
+#include "llvm/ExecutionEngine/Orc/CompileUtils.h"
+#include "llvm/ExecutionEngine/Orc/ExecutionUtils.h"
+#include "llvm/ExecutionEngine/Orc/IRCompileLayer.h"
+#include "llvm/ExecutionEngine/Orc/LazyEmittingLayer.h"
+#include "llvm/ExecutionEngine/Orc/RTDyldObjectLinkingLayer.h"
+#include "llvm/ExecutionEngine/RTDyldMemoryManager.h"
+#include "llvm/ExecutionEngine/RuntimeDyld.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/Mangler.h"
+#include "llvm/IR/Module.h"
+#include "llvm/Object/Archive.h"
+#include "llvm/Object/Binary.h"
+#include "llvm/Object/ObjectFile.h"
+#include "llvm/Support/Error.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Target/TargetMachine.h"
+#include <algorithm>
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+#include <map>
+#include <memory>
+#include <set>
+#include <string>
+#include <vector>
+
+namespace llvm {
+
+class ObjectCache;
+
+namespace orc {
+
+class OrcMCJITReplacement : public ExecutionEngine {
+
+ // OrcMCJITReplacement needs to do a little extra book-keeping to ensure that
+ // Orc's automatic finalization doesn't kick in earlier than MCJIT clients are
+ // expecting - see finalizeMemory.
+ class MCJITReplacementMemMgr : public MCJITMemoryManager {
+ public:
+ MCJITReplacementMemMgr(OrcMCJITReplacement &M,
+ std::shared_ptr<MCJITMemoryManager> ClientMM)
+ : M(M), ClientMM(std::move(ClientMM)) {}
+
+ uint8_t *allocateCodeSection(uintptr_t Size, unsigned Alignment,
+ unsigned SectionID,
+ StringRef SectionName) override {
+ uint8_t *Addr =
+ ClientMM->allocateCodeSection(Size, Alignment, SectionID,
+ SectionName);
+ M.SectionsAllocatedSinceLastLoad.insert(Addr);
+ return Addr;
+ }
+
+ uint8_t *allocateDataSection(uintptr_t Size, unsigned Alignment,
+ unsigned SectionID, StringRef SectionName,
+ bool IsReadOnly) override {
+ uint8_t *Addr = ClientMM->allocateDataSection(Size, Alignment, SectionID,
+ SectionName, IsReadOnly);
+ M.SectionsAllocatedSinceLastLoad.insert(Addr);
+ return Addr;
+ }
+
+ void reserveAllocationSpace(uintptr_t CodeSize, uint32_t CodeAlign,
+ uintptr_t RODataSize, uint32_t RODataAlign,
+ uintptr_t RWDataSize,
+ uint32_t RWDataAlign) override {
+ return ClientMM->reserveAllocationSpace(CodeSize, CodeAlign,
+ RODataSize, RODataAlign,
+ RWDataSize, RWDataAlign);
+ }
+
+ bool needsToReserveAllocationSpace() override {
+ return ClientMM->needsToReserveAllocationSpace();
+ }
+
+ void registerEHFrames(uint8_t *Addr, uint64_t LoadAddr,
+ size_t Size) override {
+ return ClientMM->registerEHFrames(Addr, LoadAddr, Size);
+ }
+
+ void deregisterEHFrames() override {
+ return ClientMM->deregisterEHFrames();
+ }
+
+ void notifyObjectLoaded(RuntimeDyld &RTDyld,
+ const object::ObjectFile &O) override {
+ return ClientMM->notifyObjectLoaded(RTDyld, O);
+ }
+
+ void notifyObjectLoaded(ExecutionEngine *EE,
+ const object::ObjectFile &O) override {
+ return ClientMM->notifyObjectLoaded(EE, O);
+ }
+
+ bool finalizeMemory(std::string *ErrMsg = nullptr) override {
+ // Each set of objects loaded will be finalized exactly once, but since
+ // symbol lookup during relocation may recursively trigger the
+ // loading/relocation of other modules, and since we're forwarding all
+ // finalizeMemory calls to a single underlying memory manager, we need to
+ // defer forwarding the call on until all necessary objects have been
+ // loaded. Otherwise, during the relocation of a leaf object, we will end
+ // up finalizing memory, causing a crash further up the stack when we
+ // attempt to apply relocations to finalized memory.
+ // To avoid finalizing too early, look at how many objects have been
+ // loaded but not yet finalized. This is a bit of a hack that relies on
+ // the fact that we're lazily emitting object files: The only way you can
+ // get more than one set of objects loaded but not yet finalized is if
+ // they were loaded during relocation of another set.
+ if (M.UnfinalizedSections.size() == 1)
+ return ClientMM->finalizeMemory(ErrMsg);
+ return false;
+ }
+
+ private:
+ OrcMCJITReplacement &M;
+ std::shared_ptr<MCJITMemoryManager> ClientMM;
+ };
+
+ class LinkingORCResolver : public orc::SymbolResolver {
+ public:
+ LinkingORCResolver(OrcMCJITReplacement &M) : M(M) {}
+
+ SymbolNameSet getResponsibilitySet(const SymbolNameSet &Symbols) override {
+ SymbolNameSet Result;
+
+ for (auto &S : Symbols) {
+ if (auto Sym = M.findMangledSymbol(*S)) {
+ if (!Sym.getFlags().isStrong())
+ Result.insert(S);
+ } else if (auto Err = Sym.takeError()) {
+ M.reportError(std::move(Err));
+ return SymbolNameSet();
+ } else {
+ if (auto Sym2 = M.ClientResolver->findSymbolInLogicalDylib(*S)) {
+ if (!Sym2.getFlags().isStrong())
+ Result.insert(S);
+ } else if (auto Err = Sym2.takeError()) {
+ M.reportError(std::move(Err));
+ return SymbolNameSet();
+ } else
+ Result.insert(S);
+ }
+ }
+
+ return Result;
+ }
+
+ SymbolNameSet lookup(std::shared_ptr<AsynchronousSymbolQuery> Query,
+ SymbolNameSet Symbols) override {
+ SymbolNameSet UnresolvedSymbols;
+ bool NewSymbolsResolved = false;
+
+ for (auto &S : Symbols) {
+ if (auto Sym = M.findMangledSymbol(*S)) {
+ if (auto Addr = Sym.getAddress()) {
+ Query->notifySymbolMetRequiredState(
+ S, JITEvaluatedSymbol(*Addr, Sym.getFlags()));
+ NewSymbolsResolved = true;
+ } else {
+ M.ES.legacyFailQuery(*Query, Addr.takeError());
+ return SymbolNameSet();
+ }
+ } else if (auto Err = Sym.takeError()) {
+ M.ES.legacyFailQuery(*Query, std::move(Err));
+ return SymbolNameSet();
+ } else {
+ if (auto Sym2 = M.ClientResolver->findSymbol(*S)) {
+ if (auto Addr = Sym2.getAddress()) {
+ Query->notifySymbolMetRequiredState(
+ S, JITEvaluatedSymbol(*Addr, Sym2.getFlags()));
+ NewSymbolsResolved = true;
+ } else {
+ M.ES.legacyFailQuery(*Query, Addr.takeError());
+ return SymbolNameSet();
+ }
+ } else if (auto Err = Sym2.takeError()) {
+ M.ES.legacyFailQuery(*Query, std::move(Err));
+ return SymbolNameSet();
+ } else
+ UnresolvedSymbols.insert(S);
+ }
+ }
+
+ if (NewSymbolsResolved && Query->isComplete())
+ Query->handleComplete();
+
+ return UnresolvedSymbols;
+ }
+
+ private:
+ OrcMCJITReplacement &M;
+ };
+
+private:
+ static ExecutionEngine *
+ createOrcMCJITReplacement(std::string *ErrorMsg,
+ std::shared_ptr<MCJITMemoryManager> MemMgr,
+ std::shared_ptr<LegacyJITSymbolResolver> Resolver,
+ std::unique_ptr<TargetMachine> TM) {
+ return new OrcMCJITReplacement(std::move(MemMgr), std::move(Resolver),
+ std::move(TM));
+ }
+
+ void reportError(Error Err) {
+ logAllUnhandledErrors(std::move(Err), errs(), "MCJIT error: ");
+ }
+
+public:
+ OrcMCJITReplacement(std::shared_ptr<MCJITMemoryManager> MemMgr,
+ std::shared_ptr<LegacyJITSymbolResolver> ClientResolver,
+ std::unique_ptr<TargetMachine> TM)
+ : ExecutionEngine(TM->createDataLayout()), TM(std::move(TM)),
+ MemMgr(
+ std::make_shared<MCJITReplacementMemMgr>(*this, std::move(MemMgr))),
+ Resolver(std::make_shared<LinkingORCResolver>(*this)),
+ ClientResolver(std::move(ClientResolver)), NotifyObjectLoaded(*this),
+ NotifyFinalized(*this),
+ ObjectLayer(
+ AcknowledgeORCv1Deprecation, ES,
+ [this](VModuleKey K) {
+ return ObjectLayerT::Resources{this->MemMgr, this->Resolver};
+ },
+ NotifyObjectLoaded, NotifyFinalized),
+ CompileLayer(AcknowledgeORCv1Deprecation, ObjectLayer,
+ SimpleCompiler(*this->TM),
+ [this](VModuleKey K, std::unique_ptr<Module> M) {
+ Modules.push_back(std::move(M));
+ }),
+ LazyEmitLayer(AcknowledgeORCv1Deprecation, CompileLayer) {}
+
+ static void Register() {
+ OrcMCJITReplacementCtor = createOrcMCJITReplacement;
+ }
+
+ void addModule(std::unique_ptr<Module> M) override {
+ // If this module doesn't have a DataLayout attached then attach the
+ // default.
+ if (M->getDataLayout().isDefault()) {
+ M->setDataLayout(getDataLayout());
+ } else {
+ assert(M->getDataLayout() == getDataLayout() && "DataLayout Mismatch");
+ }
+
+ // Rename, bump linkage and record static constructors and destructors.
+ // We have to do this before we hand over ownership of the module to the
+ // JIT.
+ std::vector<std::string> CtorNames, DtorNames;
+ {
+ unsigned CtorId = 0, DtorId = 0;
+ for (auto Ctor : orc::getConstructors(*M)) {
+ std::string NewCtorName = ("__ORCstatic_ctor." + Twine(CtorId++)).str();
+ Ctor.Func->setName(NewCtorName);
+ Ctor.Func->setLinkage(GlobalValue::ExternalLinkage);
+ Ctor.Func->setVisibility(GlobalValue::HiddenVisibility);
+ CtorNames.push_back(mangle(NewCtorName));
+ }
+ for (auto Dtor : orc::getDestructors(*M)) {
+ std::string NewDtorName = ("__ORCstatic_dtor." + Twine(DtorId++)).str();
+ dbgs() << "Found dtor: " << NewDtorName << "\n";
+ Dtor.Func->setName(NewDtorName);
+ Dtor.Func->setLinkage(GlobalValue::ExternalLinkage);
+ Dtor.Func->setVisibility(GlobalValue::HiddenVisibility);
+ DtorNames.push_back(mangle(NewDtorName));
+ }
+ }
+
+ auto K = ES.allocateVModule();
+
+ UnexecutedConstructors[K] = std::move(CtorNames);
+ UnexecutedDestructors[K] = std::move(DtorNames);
+
+ cantFail(LazyEmitLayer.addModule(K, std::move(M)));
+ }
+
+ void addObjectFile(std::unique_ptr<object::ObjectFile> O) override {
+ cantFail(ObjectLayer.addObject(
+ ES.allocateVModule(), MemoryBuffer::getMemBufferCopy(O->getData())));
+ }
+
+ void addObjectFile(object::OwningBinary<object::ObjectFile> O) override {
+ std::unique_ptr<object::ObjectFile> Obj;
+ std::unique_ptr<MemoryBuffer> ObjBuffer;
+ std::tie(Obj, ObjBuffer) = O.takeBinary();
+ cantFail(ObjectLayer.addObject(ES.allocateVModule(), std::move(ObjBuffer)));
+ }
+
+ void addArchive(object::OwningBinary<object::Archive> A) override {
+ Archives.push_back(std::move(A));
+ }
+
+ bool removeModule(Module *M) override {
+ auto I = Modules.begin();
+ for (auto E = Modules.end(); I != E; ++I)
+ if (I->get() == M)
+ break;
+ if (I == Modules.end())
+ return false;
+ Modules.erase(I);
+ return true;
+ }
+
+ uint64_t getSymbolAddress(StringRef Name) {
+ return cantFail(findSymbol(Name).getAddress());
+ }
+
+ JITSymbol findSymbol(StringRef Name) {
+ return findMangledSymbol(mangle(Name));
+ }
+
+ void finalizeObject() override {
+ // This is deprecated - Aim to remove in ExecutionEngine.
+ // REMOVE IF POSSIBLE - Doesn't make sense for New JIT.
+ }
+
+ void mapSectionAddress(const void *LocalAddress,
+ uint64_t TargetAddress) override {
+ for (auto &P : UnfinalizedSections)
+ if (P.second.count(LocalAddress))
+ ObjectLayer.mapSectionAddress(P.first, LocalAddress, TargetAddress);
+ }
+
+ uint64_t getGlobalValueAddress(const std::string &Name) override {
+ return getSymbolAddress(Name);
+ }
+
+ uint64_t getFunctionAddress(const std::string &Name) override {
+ return getSymbolAddress(Name);
+ }
+
+ void *getPointerToFunction(Function *F) override {
+ uint64_t FAddr = getSymbolAddress(F->getName());
+ return reinterpret_cast<void *>(static_cast<uintptr_t>(FAddr));
+ }
+
+ void *getPointerToNamedFunction(StringRef Name,
+ bool AbortOnFailure = true) override {
+ uint64_t Addr = getSymbolAddress(Name);
+ if (!Addr && AbortOnFailure)
+ llvm_unreachable("Missing symbol!");
+ return reinterpret_cast<void *>(static_cast<uintptr_t>(Addr));
+ }
+
+ GenericValue runFunction(Function *F,
+ ArrayRef<GenericValue> ArgValues) override;
+
+ void setObjectCache(ObjectCache *NewCache) override {
+ CompileLayer.getCompiler().setObjectCache(NewCache);
+ }
+
+ void setProcessAllSections(bool ProcessAllSections) override {
+ ObjectLayer.setProcessAllSections(ProcessAllSections);
+ }
+
+ void runStaticConstructorsDestructors(bool isDtors) override;
+
+private:
+ JITSymbol findMangledSymbol(StringRef Name) {
+ if (auto Sym = LazyEmitLayer.findSymbol(Name, false))
+ return Sym;
+ if (auto Sym = ClientResolver->findSymbol(Name))
+ return Sym;
+ if (auto Sym = scanArchives(Name))
+ return Sym;
+
+ return nullptr;
+ }
+
+ JITSymbol scanArchives(StringRef Name) {
+ for (object::OwningBinary<object::Archive> &OB : Archives) {
+ object::Archive *A = OB.getBinary();
+ // Look for our symbols in each Archive
+ auto OptionalChildOrErr = A->findSym(Name);
+ if (!OptionalChildOrErr)
+ report_fatal_error(OptionalChildOrErr.takeError());
+ auto &OptionalChild = *OptionalChildOrErr;
+ if (OptionalChild) {
+ // FIXME: Support nested archives?
+ Expected<std::unique_ptr<object::Binary>> ChildBinOrErr =
+ OptionalChild->getAsBinary();
+ if (!ChildBinOrErr) {
+ // TODO: Actually report errors helpfully.
+ consumeError(ChildBinOrErr.takeError());
+ continue;
+ }
+ std::unique_ptr<object::Binary> &ChildBin = ChildBinOrErr.get();
+ if (ChildBin->isObject()) {
+ cantFail(ObjectLayer.addObject(
+ ES.allocateVModule(),
+ MemoryBuffer::getMemBufferCopy(ChildBin->getData())));
+ if (auto Sym = ObjectLayer.findSymbol(Name, true))
+ return Sym;
+ }
+ }
+ }
+ return nullptr;
+ }
+
+ class NotifyObjectLoadedT {
+ public:
+ using LoadedObjInfoListT =
+ std::vector<std::unique_ptr<RuntimeDyld::LoadedObjectInfo>>;
+
+ NotifyObjectLoadedT(OrcMCJITReplacement &M) : M(M) {}
+
+ void operator()(VModuleKey K, const object::ObjectFile &Obj,
+ const RuntimeDyld::LoadedObjectInfo &Info) const {
+ M.UnfinalizedSections[K] = std::move(M.SectionsAllocatedSinceLastLoad);
+ M.SectionsAllocatedSinceLastLoad = SectionAddrSet();
+ M.MemMgr->notifyObjectLoaded(&M, Obj);
+ }
+ private:
+ OrcMCJITReplacement &M;
+ };
+
+ class NotifyFinalizedT {
+ public:
+ NotifyFinalizedT(OrcMCJITReplacement &M) : M(M) {}
+
+ void operator()(VModuleKey K, const object::ObjectFile &Obj,
+ const RuntimeDyld::LoadedObjectInfo &Info) {
+ M.UnfinalizedSections.erase(K);
+ }
+
+ private:
+ OrcMCJITReplacement &M;
+ };
+
+ std::string mangle(StringRef Name) {
+ std::string MangledName;
+ {
+ raw_string_ostream MangledNameStream(MangledName);
+ Mang.getNameWithPrefix(MangledNameStream, Name, getDataLayout());
+ }
+ return MangledName;
+ }
+
+ using ObjectLayerT = LegacyRTDyldObjectLinkingLayer;
+ using CompileLayerT = LegacyIRCompileLayer<ObjectLayerT, orc::SimpleCompiler>;
+ using LazyEmitLayerT = LazyEmittingLayer<CompileLayerT>;
+
+ ExecutionSession ES;
+
+ std::unique_ptr<TargetMachine> TM;
+ std::shared_ptr<MCJITReplacementMemMgr> MemMgr;
+ std::shared_ptr<LinkingORCResolver> Resolver;
+ std::shared_ptr<LegacyJITSymbolResolver> ClientResolver;
+ Mangler Mang;
+
+ // IMPORTANT: ShouldDelete *must* come before LocalModules: The shared_ptr
+ // delete blocks in LocalModules refer to the ShouldDelete map, so
+ // LocalModules needs to be destructed before ShouldDelete.
+ std::map<Module*, bool> ShouldDelete;
+
+ NotifyObjectLoadedT NotifyObjectLoaded;
+ NotifyFinalizedT NotifyFinalized;
+
+ ObjectLayerT ObjectLayer;
+ CompileLayerT CompileLayer;
+ LazyEmitLayerT LazyEmitLayer;
+
+ std::map<VModuleKey, std::vector<std::string>> UnexecutedConstructors;
+ std::map<VModuleKey, std::vector<std::string>> UnexecutedDestructors;
+
+ // We need to store ObjLayerT::ObjSetHandles for each of the object sets
+ // that have been emitted but not yet finalized so that we can forward the
+ // mapSectionAddress calls appropriately.
+ using SectionAddrSet = std::set<const void *>;
+ SectionAddrSet SectionsAllocatedSinceLastLoad;
+ std::map<VModuleKey, SectionAddrSet> UnfinalizedSections;
+
+ std::vector<object::OwningBinary<object::Archive>> Archives;
+};
+
+} // end namespace orc
+
+} // end namespace llvm
+
+#endif // LLVM_LIB_EXECUTIONENGINE_ORC_MCJITREPLACEMENT_H
diff --git a/llvm/lib/ExecutionEngine/Orc/RPCUtils.cpp b/llvm/lib/ExecutionEngine/Orc/RPCUtils.cpp
new file mode 100644
index 0000000000000..367b3639f8411
--- /dev/null
+++ b/llvm/lib/ExecutionEngine/Orc/RPCUtils.cpp
@@ -0,0 +1,54 @@
+//===--------------- RPCUtils.cpp - RPCUtils implementation ---------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// RPCUtils implementation.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/RPCUtils.h"
+
+char llvm::orc::rpc::RPCFatalError::ID = 0;
+char llvm::orc::rpc::ConnectionClosed::ID = 0;
+char llvm::orc::rpc::ResponseAbandoned::ID = 0;
+char llvm::orc::rpc::CouldNotNegotiate::ID = 0;
+
+namespace llvm {
+namespace orc {
+namespace rpc {
+
+std::error_code ConnectionClosed::convertToErrorCode() const {
+ return orcError(OrcErrorCode::RPCConnectionClosed);
+}
+
+void ConnectionClosed::log(raw_ostream &OS) const {
+ OS << "RPC connection already closed";
+}
+
+std::error_code ResponseAbandoned::convertToErrorCode() const {
+ return orcError(OrcErrorCode::RPCResponseAbandoned);
+}
+
+void ResponseAbandoned::log(raw_ostream &OS) const {
+ OS << "RPC response abandoned";
+}
+
+CouldNotNegotiate::CouldNotNegotiate(std::string Signature)
+ : Signature(std::move(Signature)) {}
+
+std::error_code CouldNotNegotiate::convertToErrorCode() const {
+ return orcError(OrcErrorCode::RPCCouldNotNegotiateFunction);
+}
+
+void CouldNotNegotiate::log(raw_ostream &OS) const {
+ OS << "Could not negotiate RPC function " << Signature;
+}
+
+
+} // end namespace rpc
+} // end namespace orc
+} // end namespace llvm
diff --git a/llvm/lib/ExecutionEngine/Orc/RTDyldObjectLinkingLayer.cpp b/llvm/lib/ExecutionEngine/Orc/RTDyldObjectLinkingLayer.cpp
new file mode 100644
index 0000000000000..939cd539d1fb0
--- /dev/null
+++ b/llvm/lib/ExecutionEngine/Orc/RTDyldObjectLinkingLayer.cpp
@@ -0,0 +1,224 @@
+//===-- RTDyldObjectLinkingLayer.cpp - RuntimeDyld backed ORC ObjectLayer -===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/RTDyldObjectLinkingLayer.h"
+
+namespace {
+
+using namespace llvm;
+using namespace llvm::orc;
+
+class JITDylibSearchOrderResolver : public JITSymbolResolver {
+public:
+ JITDylibSearchOrderResolver(MaterializationResponsibility &MR) : MR(MR) {}
+
+ void lookup(const LookupSet &Symbols, OnResolvedFunction OnResolved) {
+ auto &ES = MR.getTargetJITDylib().getExecutionSession();
+ SymbolNameSet InternedSymbols;
+
+ // Intern the requested symbols: lookup takes interned strings.
+ for (auto &S : Symbols)
+ InternedSymbols.insert(ES.intern(S));
+
+ // Build an OnResolve callback to unwrap the interned strings and pass them
+ // to the OnResolved callback.
+ auto OnResolvedWithUnwrap =
+ [OnResolved = std::move(OnResolved)](
+ Expected<SymbolMap> InternedResult) mutable {
+ if (!InternedResult) {
+ OnResolved(InternedResult.takeError());
+ return;
+ }
+
+ LookupResult Result;
+ for (auto &KV : *InternedResult)
+ Result[*KV.first] = std::move(KV.second);
+ OnResolved(Result);
+ };
+
+ // Register dependencies for all symbols contained in this set.
+ auto RegisterDependencies = [&](const SymbolDependenceMap &Deps) {
+ MR.addDependenciesForAll(Deps);
+ };
+
+ JITDylibSearchList SearchOrder;
+ MR.getTargetJITDylib().withSearchOrderDo(
+ [&](const JITDylibSearchList &JDs) { SearchOrder = JDs; });
+ ES.lookup(SearchOrder, InternedSymbols, SymbolState::Resolved,
+ std::move(OnResolvedWithUnwrap), RegisterDependencies);
+ }
+
+ Expected<LookupSet> getResponsibilitySet(const LookupSet &Symbols) {
+ LookupSet Result;
+
+ for (auto &KV : MR.getSymbols()) {
+ if (Symbols.count(*KV.first))
+ Result.insert(*KV.first);
+ }
+
+ return Result;
+ }
+
+private:
+ MaterializationResponsibility &MR;
+};
+
+} // end anonymous namespace
+
+namespace llvm {
+namespace orc {
+
+RTDyldObjectLinkingLayer::RTDyldObjectLinkingLayer(
+ ExecutionSession &ES, GetMemoryManagerFunction GetMemoryManager)
+ : ObjectLayer(ES), GetMemoryManager(GetMemoryManager) {}
+
+void RTDyldObjectLinkingLayer::emit(MaterializationResponsibility R,
+ std::unique_ptr<MemoryBuffer> O) {
+ assert(O && "Object must not be null");
+
+ // This method launches an asynchronous link step that will fulfill our
+ // materialization responsibility. We need to switch R to be heap
+ // allocated before that happens so it can live as long as the asynchronous
+ // link needs it to (i.e. it must be able to outlive this method).
+ auto SharedR = std::make_shared<MaterializationResponsibility>(std::move(R));
+
+ auto &ES = getExecutionSession();
+
+ // Create a MemoryBufferRef backed MemoryBuffer (i.e. shallow) copy of the
+ // the underlying buffer to pass into RuntimeDyld. This allows us to hold
+ // ownership of the real underlying buffer and return it to the user once
+ // the object has been emitted.
+ auto ObjBuffer = MemoryBuffer::getMemBuffer(O->getMemBufferRef(), false);
+
+ auto Obj = object::ObjectFile::createObjectFile(*ObjBuffer);
+
+ if (!Obj) {
+ getExecutionSession().reportError(Obj.takeError());
+ SharedR->failMaterialization();
+ return;
+ }
+
+ // Collect the internal symbols from the object file: We will need to
+ // filter these later.
+ auto InternalSymbols = std::make_shared<std::set<StringRef>>();
+ {
+ for (auto &Sym : (*Obj)->symbols()) {
+ if (!(Sym.getFlags() & object::BasicSymbolRef::SF_Global)) {
+ if (auto SymName = Sym.getName())
+ InternalSymbols->insert(*SymName);
+ else {
+ ES.reportError(SymName.takeError());
+ R.failMaterialization();
+ return;
+ }
+ }
+ }
+ }
+
+ auto K = R.getVModuleKey();
+ RuntimeDyld::MemoryManager *MemMgr = nullptr;
+
+ // Create a record a memory manager for this object.
+ {
+ auto Tmp = GetMemoryManager();
+ std::lock_guard<std::mutex> Lock(RTDyldLayerMutex);
+ MemMgrs.push_back(std::move(Tmp));
+ MemMgr = MemMgrs.back().get();
+ }
+
+ JITDylibSearchOrderResolver Resolver(*SharedR);
+
+ jitLinkForORC(
+ **Obj, std::move(O), *MemMgr, Resolver, ProcessAllSections,
+ [this, K, SharedR, &Obj, InternalSymbols](
+ std::unique_ptr<RuntimeDyld::LoadedObjectInfo> LoadedObjInfo,
+ std::map<StringRef, JITEvaluatedSymbol> ResolvedSymbols) {
+ return onObjLoad(K, *SharedR, **Obj, std::move(LoadedObjInfo),
+ ResolvedSymbols, *InternalSymbols);
+ },
+ [this, K, SharedR, O = std::move(O)](Error Err) mutable {
+ onObjEmit(K, std::move(O), *SharedR, std::move(Err));
+ });
+}
+
+Error RTDyldObjectLinkingLayer::onObjLoad(
+ VModuleKey K, MaterializationResponsibility &R, object::ObjectFile &Obj,
+ std::unique_ptr<RuntimeDyld::LoadedObjectInfo> LoadedObjInfo,
+ std::map<StringRef, JITEvaluatedSymbol> Resolved,
+ std::set<StringRef> &InternalSymbols) {
+ SymbolFlagsMap ExtraSymbolsToClaim;
+ SymbolMap Symbols;
+ for (auto &KV : Resolved) {
+ // Scan the symbols and add them to the Symbols map for resolution.
+
+ // We never claim internal symbols.
+ if (InternalSymbols.count(KV.first))
+ continue;
+
+ auto InternedName = getExecutionSession().intern(KV.first);
+ auto Flags = KV.second.getFlags();
+
+ // Override object flags and claim responsibility for symbols if
+ // requested.
+ if (OverrideObjectFlags || AutoClaimObjectSymbols) {
+ auto I = R.getSymbols().find(InternedName);
+
+ if (OverrideObjectFlags && I != R.getSymbols().end())
+ Flags = I->second;
+ else if (AutoClaimObjectSymbols && I == R.getSymbols().end())
+ ExtraSymbolsToClaim[InternedName] = Flags;
+ }
+
+ Symbols[InternedName] = JITEvaluatedSymbol(KV.second.getAddress(), Flags);
+ }
+
+ if (!ExtraSymbolsToClaim.empty())
+ if (auto Err = R.defineMaterializing(ExtraSymbolsToClaim))
+ return Err;
+
+ if (auto Err = R.notifyResolved(Symbols)) {
+ R.failMaterialization();
+ return Err;
+ }
+
+ if (NotifyLoaded)
+ NotifyLoaded(K, Obj, *LoadedObjInfo);
+
+ return Error::success();
+}
+
+void RTDyldObjectLinkingLayer::onObjEmit(
+ VModuleKey K, std::unique_ptr<MemoryBuffer> ObjBuffer,
+ MaterializationResponsibility &R, Error Err) {
+ if (Err) {
+ getExecutionSession().reportError(std::move(Err));
+ R.failMaterialization();
+ return;
+ }
+
+ if (auto Err = R.notifyEmitted()) {
+ getExecutionSession().reportError(std::move(Err));
+ R.failMaterialization();
+ return;
+ }
+
+ if (NotifyEmitted)
+ NotifyEmitted(K, std::move(ObjBuffer));
+}
+
+LegacyRTDyldObjectLinkingLayer::LegacyRTDyldObjectLinkingLayer(
+ ExecutionSession &ES, ResourcesGetter GetResources,
+ NotifyLoadedFtor NotifyLoaded, NotifyFinalizedFtor NotifyFinalized,
+ NotifyFreedFtor NotifyFreed)
+ : ES(ES), GetResources(std::move(GetResources)),
+ NotifyLoaded(std::move(NotifyLoaded)),
+ NotifyFinalized(std::move(NotifyFinalized)),
+ NotifyFreed(std::move(NotifyFreed)), ProcessAllSections(false) {}
+
+} // End namespace orc.
+} // End namespace llvm.
diff --git a/llvm/lib/ExecutionEngine/Orc/SpeculateAnalyses.cpp b/llvm/lib/ExecutionEngine/Orc/SpeculateAnalyses.cpp
new file mode 100644
index 0000000000000..f22acf50419d4
--- /dev/null
+++ b/llvm/lib/ExecutionEngine/Orc/SpeculateAnalyses.cpp
@@ -0,0 +1,307 @@
+//===-- SpeculateAnalyses.cpp --*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/SpeculateAnalyses.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Analysis/BlockFrequencyInfo.h"
+#include "llvm/Analysis/BranchProbabilityInfo.h"
+#include "llvm/Analysis/CFG.h"
+#include "llvm/IR/PassManager.h"
+#include "llvm/Passes/PassBuilder.h"
+#include "llvm/Support/ErrorHandling.h"
+
+#include <algorithm>
+
+namespace {
+using namespace llvm;
+SmallVector<const BasicBlock *, 8> findBBwithCalls(const Function &F,
+ bool IndirectCall = false) {
+ SmallVector<const BasicBlock *, 8> BBs;
+
+ auto findCallInst = [&IndirectCall](const Instruction &I) {
+ if (auto Call = dyn_cast<CallBase>(&I))
+ return Call->isIndirectCall() ? IndirectCall : true;
+ else
+ return false;
+ };
+ for (auto &BB : F)
+ if (findCallInst(*BB.getTerminator()) ||
+ llvm::any_of(BB.instructionsWithoutDebug(), findCallInst))
+ BBs.emplace_back(&BB);
+
+ return BBs;
+}
+} // namespace
+
+// Implementations of Queries shouldn't need to lock the resources
+// such as LLVMContext, each argument (function) has a non-shared LLVMContext
+// Plus, if Queries contain states necessary locking scheme should be provided.
+namespace llvm {
+namespace orc {
+
+// Collect direct calls only
+void SpeculateQuery::findCalles(const BasicBlock *BB,
+ DenseSet<StringRef> &CallesNames) {
+ assert(BB != nullptr && "Traversing Null BB to find calls?");
+
+ auto getCalledFunction = [&CallesNames](const CallBase *Call) {
+ auto CalledValue = Call->getCalledOperand()->stripPointerCasts();
+ if (auto DirectCall = dyn_cast<Function>(CalledValue))
+ CallesNames.insert(DirectCall->getName());
+ };
+ for (auto &I : BB->instructionsWithoutDebug())
+ if (auto CI = dyn_cast<CallInst>(&I))
+ getCalledFunction(CI);
+
+ if (auto II = dyn_cast<InvokeInst>(BB->getTerminator()))
+ getCalledFunction(II);
+}
+
+bool SpeculateQuery::isStraightLine(const Function &F) {
+ return llvm::all_of(F.getBasicBlockList(), [](const BasicBlock &BB) {
+ return BB.getSingleSuccessor() != nullptr;
+ });
+}
+
+// BlockFreqQuery Implementations
+
+size_t BlockFreqQuery::numBBToGet(size_t numBB) {
+ // small CFG
+ if (numBB < 4)
+ return numBB;
+ // mid-size CFG
+ else if (numBB < 20)
+ return (numBB / 2);
+ else
+ return (numBB / 2) + (numBB / 4);
+}
+
+BlockFreqQuery::ResultTy BlockFreqQuery::operator()(Function &F) {
+ DenseMap<StringRef, DenseSet<StringRef>> CallerAndCalles;
+ DenseSet<StringRef> Calles;
+ SmallVector<std::pair<const BasicBlock *, uint64_t>, 8> BBFreqs;
+
+ PassBuilder PB;
+ FunctionAnalysisManager FAM;
+ PB.registerFunctionAnalyses(FAM);
+
+ auto IBBs = findBBwithCalls(F);
+
+ if (IBBs.empty())
+ return None;
+
+ auto &BFI = FAM.getResult<BlockFrequencyAnalysis>(F);
+
+ for (const auto I : IBBs)
+ BBFreqs.push_back({I, BFI.getBlockFreq(I).getFrequency()});
+
+ assert(IBBs.size() == BBFreqs.size() && "BB Count Mismatch");
+
+ llvm::sort(BBFreqs.begin(), BBFreqs.end(),
+ [](decltype(BBFreqs)::const_reference BBF,
+ decltype(BBFreqs)::const_reference BBS) {
+ return BBF.second > BBS.second ? true : false;
+ });
+
+ // ignoring number of direct calls in a BB
+ auto Topk = numBBToGet(BBFreqs.size());
+
+ for (size_t i = 0; i < Topk; i++)
+ findCalles(BBFreqs[i].first, Calles);
+
+ assert(!Calles.empty() && "Running Analysis on Function with no calls?");
+
+ CallerAndCalles.insert({F.getName(), std::move(Calles)});
+
+ return CallerAndCalles;
+}
+
+// SequenceBBQuery Implementation
+std::size_t SequenceBBQuery::getHottestBlocks(std::size_t TotalBlocks) {
+ if (TotalBlocks == 1)
+ return TotalBlocks;
+ return TotalBlocks / 2;
+}
+
+// FIXME : find good implementation.
+SequenceBBQuery::BlockListTy
+SequenceBBQuery::rearrangeBB(const Function &F, const BlockListTy &BBList) {
+ BlockListTy RearrangedBBSet;
+
+ for (auto &Block : F.getBasicBlockList())
+ if (llvm::is_contained(BBList, &Block))
+ RearrangedBBSet.push_back(&Block);
+
+ assert(RearrangedBBSet.size() == BBList.size() &&
+ "BasicBlock missing while rearranging?");
+ return RearrangedBBSet;
+}
+
+void SequenceBBQuery::traverseToEntryBlock(const BasicBlock *AtBB,
+ const BlockListTy &CallerBlocks,
+ const BackEdgesInfoTy &BackEdgesInfo,
+ const BranchProbabilityInfo *BPI,
+ VisitedBlocksInfoTy &VisitedBlocks) {
+ auto Itr = VisitedBlocks.find(AtBB);
+ if (Itr != VisitedBlocks.end()) { // already visited.
+ if (!Itr->second.Upward)
+ return;
+ Itr->second.Upward = false;
+ } else {
+ // Create hint for newly discoverd blocks.
+ WalkDirection BlockHint;
+ BlockHint.Upward = false;
+ // FIXME: Expensive Check
+ if (llvm::is_contained(CallerBlocks, AtBB))
+ BlockHint.CallerBlock = true;
+ VisitedBlocks.insert(std::make_pair(AtBB, BlockHint));
+ }
+
+ const_pred_iterator PIt = pred_begin(AtBB), EIt = pred_end(AtBB);
+ // Move this check to top, when we have code setup to launch speculative
+ // compiles for function in entry BB, this triggers the speculative compiles
+ // before running the program.
+ if (PIt == EIt) // No Preds.
+ return;
+
+ DenseSet<const BasicBlock *> PredSkipNodes;
+
+ // Since we are checking for predecessor's backedges, this Block
+ // occurs in second position.
+ for (auto &I : BackEdgesInfo)
+ if (I.second == AtBB)
+ PredSkipNodes.insert(I.first);
+
+ // Skip predecessors which source of back-edges.
+ for (; PIt != EIt; ++PIt)
+ // checking EdgeHotness is cheaper
+ if (BPI->isEdgeHot(*PIt, AtBB) && !PredSkipNodes.count(*PIt))
+ traverseToEntryBlock(*PIt, CallerBlocks, BackEdgesInfo, BPI,
+ VisitedBlocks);
+}
+
+void SequenceBBQuery::traverseToExitBlock(const BasicBlock *AtBB,
+ const BlockListTy &CallerBlocks,
+ const BackEdgesInfoTy &BackEdgesInfo,
+ const BranchProbabilityInfo *BPI,
+ VisitedBlocksInfoTy &VisitedBlocks) {
+ auto Itr = VisitedBlocks.find(AtBB);
+ if (Itr != VisitedBlocks.end()) { // already visited.
+ if (!Itr->second.Downward)
+ return;
+ Itr->second.Downward = false;
+ } else {
+ // Create hint for newly discoverd blocks.
+ WalkDirection BlockHint;
+ BlockHint.Downward = false;
+ // FIXME: Expensive Check
+ if (llvm::is_contained(CallerBlocks, AtBB))
+ BlockHint.CallerBlock = true;
+ VisitedBlocks.insert(std::make_pair(AtBB, BlockHint));
+ }
+
+ succ_const_iterator PIt = succ_begin(AtBB), EIt = succ_end(AtBB);
+ if (PIt == EIt) // No succs.
+ return;
+
+ // If there are hot edges, then compute SuccSkipNodes.
+ DenseSet<const BasicBlock *> SuccSkipNodes;
+
+ // Since we are checking for successor's backedges, this Block
+ // occurs in first position.
+ for (auto &I : BackEdgesInfo)
+ if (I.first == AtBB)
+ SuccSkipNodes.insert(I.second);
+
+ for (; PIt != EIt; ++PIt)
+ if (BPI->isEdgeHot(AtBB, *PIt) && !SuccSkipNodes.count(*PIt))
+ traverseToExitBlock(*PIt, CallerBlocks, BackEdgesInfo, BPI,
+ VisitedBlocks);
+}
+
+// Get Block frequencies for blocks and take most frquently executed block,
+// walk towards the entry block from those blocks and discover the basic blocks
+// with call.
+SequenceBBQuery::BlockListTy
+SequenceBBQuery::queryCFG(Function &F, const BlockListTy &CallerBlocks) {
+
+ BlockFreqInfoTy BBFreqs;
+ VisitedBlocksInfoTy VisitedBlocks;
+ BackEdgesInfoTy BackEdgesInfo;
+
+ PassBuilder PB;
+ FunctionAnalysisManager FAM;
+ PB.registerFunctionAnalyses(FAM);
+
+ auto &BFI = FAM.getResult<BlockFrequencyAnalysis>(F);
+
+ llvm::FindFunctionBackedges(F, BackEdgesInfo);
+
+ for (const auto I : CallerBlocks)
+ BBFreqs.push_back({I, BFI.getBlockFreq(I).getFrequency()});
+
+ llvm::sort(BBFreqs, [](decltype(BBFreqs)::const_reference Bbf,
+ decltype(BBFreqs)::const_reference Bbs) {
+ return Bbf.second > Bbs.second;
+ });
+
+ ArrayRef<std::pair<const BasicBlock *, uint64_t>> HotBlocksRef(BBFreqs);
+ HotBlocksRef =
+ HotBlocksRef.drop_back(BBFreqs.size() - getHottestBlocks(BBFreqs.size()));
+
+ BranchProbabilityInfo *BPI =
+ FAM.getCachedResult<BranchProbabilityAnalysis>(F);
+
+ // visit NHotBlocks,
+ // traverse upwards to entry
+ // traverse downwards to end.
+
+ for (auto I : HotBlocksRef) {
+ traverseToEntryBlock(I.first, CallerBlocks, BackEdgesInfo, BPI,
+ VisitedBlocks);
+ traverseToExitBlock(I.first, CallerBlocks, BackEdgesInfo, BPI,
+ VisitedBlocks);
+ }
+
+ BlockListTy MinCallerBlocks;
+ for (auto &I : VisitedBlocks)
+ if (I.second.CallerBlock)
+ MinCallerBlocks.push_back(std::move(I.first));
+
+ return rearrangeBB(F, MinCallerBlocks);
+}
+
+SpeculateQuery::ResultTy SequenceBBQuery::operator()(Function &F) {
+ // reduce the number of lists!
+ DenseMap<StringRef, DenseSet<StringRef>> CallerAndCalles;
+ DenseSet<StringRef> Calles;
+ BlockListTy SequencedBlocks;
+ BlockListTy CallerBlocks;
+
+ CallerBlocks = findBBwithCalls(F);
+ if (CallerBlocks.empty())
+ return None;
+
+ if (isStraightLine(F))
+ SequencedBlocks = rearrangeBB(F, CallerBlocks);
+ else
+ SequencedBlocks = queryCFG(F, CallerBlocks);
+
+ for (auto BB : SequencedBlocks)
+ findCalles(BB, Calles);
+
+ CallerAndCalles.insert({F.getName(), std::move(Calles)});
+ return CallerAndCalles;
+}
+
+} // namespace orc
+} // namespace llvm
diff --git a/llvm/lib/ExecutionEngine/Orc/Speculation.cpp b/llvm/lib/ExecutionEngine/Orc/Speculation.cpp
new file mode 100644
index 0000000000000..f29201c147a12
--- /dev/null
+++ b/llvm/lib/ExecutionEngine/Orc/Speculation.cpp
@@ -0,0 +1,146 @@
+//===---------- speculation.cpp - Utilities for Speculation ----------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/Speculation.h"
+#include "llvm/IR/BasicBlock.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/IRBuilder.h"
+#include "llvm/IR/Instruction.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/Module.h"
+#include "llvm/IR/Type.h"
+#include "llvm/IR/Verifier.h"
+#include "llvm/Support/Debug.h"
+
+#include <vector>
+
+namespace llvm {
+
+namespace orc {
+
+// ImplSymbolMap methods
+void ImplSymbolMap::trackImpls(SymbolAliasMap ImplMaps, JITDylib *SrcJD) {
+ assert(SrcJD && "Tracking on Null Source .impl dylib");
+ std::lock_guard<std::mutex> Lockit(ConcurrentAccess);
+ for (auto &I : ImplMaps) {
+ auto It = Maps.insert({I.first, {I.second.Aliasee, SrcJD}});
+ // check rationale when independent dylibs have same symbol name?
+ assert(It.second && "ImplSymbols are already tracked for this Symbol?");
+ (void)(It);
+ }
+}
+
+// Trigger Speculative Compiles.
+void Speculator::speculateForEntryPoint(Speculator *Ptr, uint64_t StubId) {
+ assert(Ptr && " Null Address Received in orc_speculate_for ");
+ Ptr->speculateFor(StubId);
+}
+
+Error Speculator::addSpeculationRuntime(JITDylib &JD,
+ MangleAndInterner &Mangle) {
+ JITEvaluatedSymbol ThisPtr(pointerToJITTargetAddress(this),
+ JITSymbolFlags::Exported);
+ JITEvaluatedSymbol SpeculateForEntryPtr(
+ pointerToJITTargetAddress(&speculateForEntryPoint),
+ JITSymbolFlags::Exported);
+ return JD.define(absoluteSymbols({
+ {Mangle("__orc_speculator"), ThisPtr}, // Data Symbol
+ {Mangle("__orc_speculate_for"), SpeculateForEntryPtr} // Callable Symbol
+ }));
+}
+
+// If two modules, share the same LLVMContext, different threads must
+// not access them concurrently without locking the associated LLVMContext
+// this implementation follows this contract.
+void IRSpeculationLayer::emit(MaterializationResponsibility R,
+ ThreadSafeModule TSM) {
+
+ assert(TSM && "Speculation Layer received Null Module ?");
+ assert(TSM.getContext().getContext() != nullptr &&
+ "Module with null LLVMContext?");
+
+ // Instrumentation of runtime calls, lock the Module
+ TSM.withModuleDo([this, &R](Module &M) {
+ auto &MContext = M.getContext();
+ auto SpeculatorVTy = StructType::create(MContext, "Class.Speculator");
+ auto RuntimeCallTy = FunctionType::get(
+ Type::getVoidTy(MContext),
+ {SpeculatorVTy->getPointerTo(), Type::getInt64Ty(MContext)}, false);
+ auto RuntimeCall =
+ Function::Create(RuntimeCallTy, Function::LinkageTypes::ExternalLinkage,
+ "__orc_speculate_for", &M);
+ auto SpeclAddr = new GlobalVariable(
+ M, SpeculatorVTy, false, GlobalValue::LinkageTypes::ExternalLinkage,
+ nullptr, "__orc_speculator");
+
+ IRBuilder<> Mutator(MContext);
+
+ // QueryAnalysis allowed to transform the IR source, one such example is
+ // Simplify CFG helps the static branch prediction heuristics!
+ for (auto &Fn : M.getFunctionList()) {
+ if (!Fn.isDeclaration()) {
+
+ auto IRNames = QueryAnalysis(Fn);
+ // Instrument and register if Query has result
+ if (IRNames.hasValue()) {
+
+ // Emit globals for each function.
+ auto LoadValueTy = Type::getInt8Ty(MContext);
+ auto SpeculatorGuard = new GlobalVariable(
+ M, LoadValueTy, false, GlobalValue::LinkageTypes::InternalLinkage,
+ ConstantInt::get(LoadValueTy, 0),
+ "__orc_speculate.guard.for." + Fn.getName());
+ SpeculatorGuard->setAlignment(Align::None());
+ SpeculatorGuard->setUnnamedAddr(GlobalValue::UnnamedAddr::Local);
+
+ BasicBlock &ProgramEntry = Fn.getEntryBlock();
+ // Create BasicBlocks before the program's entry basicblock
+ BasicBlock *SpeculateBlock = BasicBlock::Create(
+ MContext, "__orc_speculate.block", &Fn, &ProgramEntry);
+ BasicBlock *SpeculateDecisionBlock = BasicBlock::Create(
+ MContext, "__orc_speculate.decision.block", &Fn, SpeculateBlock);
+
+ assert(SpeculateDecisionBlock == &Fn.getEntryBlock() &&
+ "SpeculateDecisionBlock not updated?");
+ Mutator.SetInsertPoint(SpeculateDecisionBlock);
+
+ auto LoadGuard =
+ Mutator.CreateLoad(LoadValueTy, SpeculatorGuard, "guard.value");
+ // if just loaded value equal to 0,return true.
+ auto CanSpeculate =
+ Mutator.CreateICmpEQ(LoadGuard, ConstantInt::get(LoadValueTy, 0),
+ "compare.to.speculate");
+ Mutator.CreateCondBr(CanSpeculate, SpeculateBlock, &ProgramEntry);
+
+ Mutator.SetInsertPoint(SpeculateBlock);
+ auto ImplAddrToUint =
+ Mutator.CreatePtrToInt(&Fn, Type::getInt64Ty(MContext));
+ Mutator.CreateCall(RuntimeCallTy, RuntimeCall,
+ {SpeclAddr, ImplAddrToUint});
+ Mutator.CreateStore(ConstantInt::get(LoadValueTy, 1),
+ SpeculatorGuard);
+ Mutator.CreateBr(&ProgramEntry);
+
+ assert(Mutator.GetInsertBlock()->getParent() == &Fn &&
+ "IR builder association mismatch?");
+ S.registerSymbols(internToJITSymbols(IRNames.getValue()),
+ &R.getTargetJITDylib());
+ }
+ }
+ }
+ });
+
+ assert(!TSM.withModuleDo([](const Module &M) { return verifyModule(M); }) &&
+ "Speculation Instrumentation breaks IR?");
+
+ NextLayer.emit(std::move(R), std::move(TSM));
+}
+
+} // namespace orc
+} // namespace llvm
diff --git a/llvm/lib/ExecutionEngine/Orc/ThreadSafeModule.cpp b/llvm/lib/ExecutionEngine/Orc/ThreadSafeModule.cpp
new file mode 100644
index 0000000000000..1f4e6f1321150
--- /dev/null
+++ b/llvm/lib/ExecutionEngine/Orc/ThreadSafeModule.cpp
@@ -0,0 +1,64 @@
+//===-- ThreadSafeModule.cpp - Thread safe Module, Context, and Utilities
+//h-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/ThreadSafeModule.h"
+#include "llvm/Bitcode/BitcodeReader.h"
+#include "llvm/Bitcode/BitcodeWriter.h"
+#include "llvm/Transforms/Utils/Cloning.h"
+
+namespace llvm {
+namespace orc {
+
+ThreadSafeModule cloneToNewContext(ThreadSafeModule &TSM,
+ GVPredicate ShouldCloneDef,
+ GVModifier UpdateClonedDefSource) {
+ assert(TSM && "Can not clone null module");
+
+ if (!ShouldCloneDef)
+ ShouldCloneDef = [](const GlobalValue &) { return true; };
+
+ return TSM.withModuleDo([&](Module &M) {
+ SmallVector<char, 1> ClonedModuleBuffer;
+
+ {
+ std::set<GlobalValue *> ClonedDefsInSrc;
+ ValueToValueMapTy VMap;
+ auto Tmp = CloneModule(M, VMap, [&](const GlobalValue *GV) {
+ if (ShouldCloneDef(*GV)) {
+ ClonedDefsInSrc.insert(const_cast<GlobalValue *>(GV));
+ return true;
+ }
+ return false;
+ });
+
+ if (UpdateClonedDefSource)
+ for (auto *GV : ClonedDefsInSrc)
+ UpdateClonedDefSource(*GV);
+
+ BitcodeWriter BCWriter(ClonedModuleBuffer);
+
+ BCWriter.writeModule(*Tmp);
+ BCWriter.writeSymtab();
+ BCWriter.writeStrtab();
+ }
+
+ MemoryBufferRef ClonedModuleBufferRef(
+ StringRef(ClonedModuleBuffer.data(), ClonedModuleBuffer.size()),
+ "cloned module buffer");
+ ThreadSafeContext NewTSCtx(std::make_unique<LLVMContext>());
+
+ auto ClonedModule = cantFail(
+ parseBitcodeFile(ClonedModuleBufferRef, *NewTSCtx.getContext()));
+ ClonedModule->setModuleIdentifier(M.getName());
+ return ThreadSafeModule(std::move(ClonedModule), std::move(NewTSCtx));
+ });
+}
+
+} // end namespace orc
+} // end namespace llvm
diff --git a/llvm/lib/ExecutionEngine/PerfJITEvents/PerfJITEventListener.cpp b/llvm/lib/ExecutionEngine/PerfJITEvents/PerfJITEventListener.cpp
new file mode 100644
index 0000000000000..184388dc4d7a7
--- /dev/null
+++ b/llvm/lib/ExecutionEngine/PerfJITEvents/PerfJITEventListener.cpp
@@ -0,0 +1,503 @@
+//===-- PerfJITEventListener.cpp - Tell Linux's perf about JITted code ----===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a JITEventListener object that tells perf about JITted
+// functions, including source line information.
+//
+// Documentation for perf jit integration is available at:
+// https://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/tree/tools/perf/Documentation/jitdump-specification.txt
+// https://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/tree/tools/perf/Documentation/jit-interface.txt
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ADT/Twine.h"
+#include "llvm/Config/config.h"
+#include "llvm/DebugInfo/DWARF/DWARFContext.h"
+#include "llvm/ExecutionEngine/JITEventListener.h"
+#include "llvm/Object/ObjectFile.h"
+#include "llvm/Object/SymbolSize.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/Errno.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/Mutex.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Support/Process.h"
+#include "llvm/Support/Threading.h"
+#include "llvm/Support/raw_ostream.h"
+#include <mutex>
+
+#include <sys/mman.h> // mmap()
+#include <sys/types.h> // getpid()
+#include <time.h> // clock_gettime(), time(), localtime_r() */
+#include <unistd.h> // for getpid(), read(), close()
+
+using namespace llvm;
+using namespace llvm::object;
+typedef DILineInfoSpecifier::FileLineInfoKind FileLineInfoKind;
+
+namespace {
+
+// language identifier (XXX: should we generate something better from debug
+// info?)
+#define JIT_LANG "llvm-IR"
+#define LLVM_PERF_JIT_MAGIC \
+ ((uint32_t)'J' << 24 | (uint32_t)'i' << 16 | (uint32_t)'T' << 8 | \
+ (uint32_t)'D')
+#define LLVM_PERF_JIT_VERSION 1
+
+// bit 0: set if the jitdump file is using an architecture-specific timestamp
+// clock source
+#define JITDUMP_FLAGS_ARCH_TIMESTAMP (1ULL << 0)
+
+struct LLVMPerfJitHeader;
+
+class PerfJITEventListener : public JITEventListener {
+public:
+ PerfJITEventListener();
+ ~PerfJITEventListener() {
+ if (MarkerAddr)
+ CloseMarker();
+ }
+
+ void notifyObjectLoaded(ObjectKey K, const ObjectFile &Obj,
+ const RuntimeDyld::LoadedObjectInfo &L) override;
+ void notifyFreeingObject(ObjectKey K) override;
+
+private:
+ bool InitDebuggingDir();
+ bool OpenMarker();
+ void CloseMarker();
+ static bool FillMachine(LLVMPerfJitHeader &hdr);
+
+ void NotifyCode(Expected<llvm::StringRef> &Symbol, uint64_t CodeAddr,
+ uint64_t CodeSize);
+ void NotifyDebug(uint64_t CodeAddr, DILineInfoTable Lines);
+
+ // cache lookups
+ pid_t Pid;
+
+ // base directory for output data
+ std::string JitPath;
+
+ // output data stream, closed via Dumpstream
+ int DumpFd = -1;
+
+ // output data stream
+ std::unique_ptr<raw_fd_ostream> Dumpstream;
+
+ // prevent concurrent dumps from messing up the output file
+ sys::Mutex Mutex;
+
+ // perf mmap marker
+ void *MarkerAddr = NULL;
+
+ // perf support ready
+ bool SuccessfullyInitialized = false;
+
+ // identifier for functions, primarily to identify when moving them around
+ uint64_t CodeGeneration = 1;
+};
+
+// The following are POD struct definitions from the perf jit specification
+
+enum LLVMPerfJitRecordType {
+ JIT_CODE_LOAD = 0,
+ JIT_CODE_MOVE = 1, // not emitted, code isn't moved
+ JIT_CODE_DEBUG_INFO = 2,
+ JIT_CODE_CLOSE = 3, // not emitted, unnecessary
+ JIT_CODE_UNWINDING_INFO = 4, // not emitted
+
+ JIT_CODE_MAX
+};
+
+struct LLVMPerfJitHeader {
+ uint32_t Magic; // characters "JiTD"
+ uint32_t Version; // header version
+ uint32_t TotalSize; // total size of header
+ uint32_t ElfMach; // elf mach target
+ uint32_t Pad1; // reserved
+ uint32_t Pid;
+ uint64_t Timestamp; // timestamp
+ uint64_t Flags; // flags
+};
+
+// record prefix (mandatory in each record)
+struct LLVMPerfJitRecordPrefix {
+ uint32_t Id; // record type identifier
+ uint32_t TotalSize;
+ uint64_t Timestamp;
+};
+
+struct LLVMPerfJitRecordCodeLoad {
+ LLVMPerfJitRecordPrefix Prefix;
+
+ uint32_t Pid;
+ uint32_t Tid;
+ uint64_t Vma;
+ uint64_t CodeAddr;
+ uint64_t CodeSize;
+ uint64_t CodeIndex;
+};
+
+struct LLVMPerfJitDebugEntry {
+ uint64_t Addr;
+ int Lineno; // source line number starting at 1
+ int Discrim; // column discriminator, 0 is default
+ // followed by null terminated filename, \xff\0 if same as previous entry
+};
+
+struct LLVMPerfJitRecordDebugInfo {
+ LLVMPerfJitRecordPrefix Prefix;
+
+ uint64_t CodeAddr;
+ uint64_t NrEntry;
+ // followed by NrEntry LLVMPerfJitDebugEntry records
+};
+
+static inline uint64_t timespec_to_ns(const struct timespec *ts) {
+ const uint64_t NanoSecPerSec = 1000000000;
+ return ((uint64_t)ts->tv_sec * NanoSecPerSec) + ts->tv_nsec;
+}
+
+static inline uint64_t perf_get_timestamp(void) {
+ struct timespec ts;
+ int ret;
+
+ ret = clock_gettime(CLOCK_MONOTONIC, &ts);
+ if (ret)
+ return 0;
+
+ return timespec_to_ns(&ts);
+}
+
+PerfJITEventListener::PerfJITEventListener() : Pid(::getpid()) {
+ // check if clock-source is supported
+ if (!perf_get_timestamp()) {
+ errs() << "kernel does not support CLOCK_MONOTONIC\n";
+ return;
+ }
+
+ if (!InitDebuggingDir()) {
+ errs() << "could not initialize debugging directory\n";
+ return;
+ }
+
+ std::string Filename;
+ raw_string_ostream FilenameBuf(Filename);
+ FilenameBuf << JitPath << "/jit-" << Pid << ".dump";
+
+ // Need to open ourselves, because we need to hand the FD to OpenMarker() and
+ // raw_fd_ostream doesn't expose the FD.
+ using sys::fs::openFileForWrite;
+ if (auto EC =
+ openFileForReadWrite(FilenameBuf.str(), DumpFd,
+ sys::fs::CD_CreateNew, sys::fs::OF_None)) {
+ errs() << "could not open JIT dump file " << FilenameBuf.str() << ": "
+ << EC.message() << "\n";
+ return;
+ }
+
+ Dumpstream = std::make_unique<raw_fd_ostream>(DumpFd, true);
+
+ LLVMPerfJitHeader Header = {0};
+ if (!FillMachine(Header))
+ return;
+
+ // signal this process emits JIT information
+ if (!OpenMarker())
+ return;
+
+ // emit dumpstream header
+ Header.Magic = LLVM_PERF_JIT_MAGIC;
+ Header.Version = LLVM_PERF_JIT_VERSION;
+ Header.TotalSize = sizeof(Header);
+ Header.Pid = Pid;
+ Header.Timestamp = perf_get_timestamp();
+ Dumpstream->write(reinterpret_cast<const char *>(&Header), sizeof(Header));
+
+ // Everything initialized, can do profiling now.
+ if (!Dumpstream->has_error())
+ SuccessfullyInitialized = true;
+}
+
+void PerfJITEventListener::notifyObjectLoaded(
+ ObjectKey K, const ObjectFile &Obj,
+ const RuntimeDyld::LoadedObjectInfo &L) {
+
+ if (!SuccessfullyInitialized)
+ return;
+
+ OwningBinary<ObjectFile> DebugObjOwner = L.getObjectForDebug(Obj);
+ const ObjectFile &DebugObj = *DebugObjOwner.getBinary();
+
+ // Get the address of the object image for use as a unique identifier
+ std::unique_ptr<DIContext> Context = DWARFContext::create(DebugObj);
+
+ // Use symbol info to iterate over functions in the object.
+ for (const std::pair<SymbolRef, uint64_t> &P : computeSymbolSizes(DebugObj)) {
+ SymbolRef Sym = P.first;
+ std::string SourceFileName;
+
+ Expected<SymbolRef::Type> SymTypeOrErr = Sym.getType();
+ if (!SymTypeOrErr) {
+ // There's not much we can with errors here
+ consumeError(SymTypeOrErr.takeError());
+ continue;
+ }
+ SymbolRef::Type SymType = *SymTypeOrErr;
+ if (SymType != SymbolRef::ST_Function)
+ continue;
+
+ Expected<StringRef> Name = Sym.getName();
+ if (!Name) {
+ consumeError(Name.takeError());
+ continue;
+ }
+
+ Expected<uint64_t> AddrOrErr = Sym.getAddress();
+ if (!AddrOrErr) {
+ consumeError(AddrOrErr.takeError());
+ continue;
+ }
+ uint64_t Size = P.second;
+ object::SectionedAddress Address;
+ Address.Address = *AddrOrErr;
+
+ uint64_t SectionIndex = object::SectionedAddress::UndefSection;
+ if (auto SectOrErr = Sym.getSection())
+ if (*SectOrErr != Obj.section_end())
+ SectionIndex = SectOrErr.get()->getIndex();
+
+ // According to spec debugging info has to come before loading the
+ // corresonding code load.
+ DILineInfoTable Lines = Context->getLineInfoForAddressRange(
+ {*AddrOrErr, SectionIndex}, Size, FileLineInfoKind::AbsoluteFilePath);
+
+ NotifyDebug(*AddrOrErr, Lines);
+ NotifyCode(Name, *AddrOrErr, Size);
+ }
+
+ Dumpstream->flush();
+}
+
+void PerfJITEventListener::notifyFreeingObject(ObjectKey K) {
+ // perf currently doesn't have an interface for unloading. But munmap()ing the
+ // code section does, so that's ok.
+}
+
+bool PerfJITEventListener::InitDebuggingDir() {
+ time_t Time;
+ struct tm LocalTime;
+ char TimeBuffer[sizeof("YYYYMMDD")];
+ SmallString<64> Path;
+
+ // search for location to dump data to
+ if (const char *BaseDir = getenv("JITDUMPDIR"))
+ Path.append(BaseDir);
+ else if (!sys::path::home_directory(Path))
+ Path = ".";
+
+ // create debug directory
+ Path += "/.debug/jit/";
+ if (auto EC = sys::fs::create_directories(Path)) {
+ errs() << "could not create jit cache directory " << Path << ": "
+ << EC.message() << "\n";
+ return false;
+ }
+
+ // create unique directory for dump data related to this process
+ time(&Time);
+ localtime_r(&Time, &LocalTime);
+ strftime(TimeBuffer, sizeof(TimeBuffer), "%Y%m%d", &LocalTime);
+ Path += JIT_LANG "-jit-";
+ Path += TimeBuffer;
+
+ SmallString<128> UniqueDebugDir;
+
+ using sys::fs::createUniqueDirectory;
+ if (auto EC = createUniqueDirectory(Path, UniqueDebugDir)) {
+ errs() << "could not create unique jit cache directory " << UniqueDebugDir
+ << ": " << EC.message() << "\n";
+ return false;
+ }
+
+ JitPath = UniqueDebugDir.str();
+
+ return true;
+}
+
+bool PerfJITEventListener::OpenMarker() {
+ // We mmap the jitdump to create an MMAP RECORD in perf.data file. The mmap
+ // is captured either live (perf record running when we mmap) or in deferred
+ // mode, via /proc/PID/maps. The MMAP record is used as a marker of a jitdump
+ // file for more meta data info about the jitted code. Perf report/annotate
+ // detect this special filename and process the jitdump file.
+ //
+ // Mapping must be PROT_EXEC to ensure it is captured by perf record
+ // even when not using -d option.
+ MarkerAddr = ::mmap(NULL, sys::Process::getPageSizeEstimate(),
+ PROT_READ | PROT_EXEC, MAP_PRIVATE, DumpFd, 0);
+
+ if (MarkerAddr == MAP_FAILED) {
+ errs() << "could not mmap JIT marker\n";
+ return false;
+ }
+ return true;
+}
+
+void PerfJITEventListener::CloseMarker() {
+ if (!MarkerAddr)
+ return;
+
+ munmap(MarkerAddr, sys::Process::getPageSizeEstimate());
+ MarkerAddr = nullptr;
+}
+
+bool PerfJITEventListener::FillMachine(LLVMPerfJitHeader &hdr) {
+ char id[16];
+ struct {
+ uint16_t e_type;
+ uint16_t e_machine;
+ } info;
+
+ size_t RequiredMemory = sizeof(id) + sizeof(info);
+
+ ErrorOr<std::unique_ptr<MemoryBuffer>> MB =
+ MemoryBuffer::getFileSlice("/proc/self/exe",
+ RequiredMemory,
+ 0);
+
+ // This'll not guarantee that enough data was actually read from the
+ // underlying file. Instead the trailing part of the buffer would be
+ // zeroed. Given the ELF signature check below that seems ok though,
+ // it's unlikely that the file ends just after that, and the
+ // consequence would just be that perf wouldn't recognize the
+ // signature.
+ if (auto EC = MB.getError()) {
+ errs() << "could not open /proc/self/exe: " << EC.message() << "\n";
+ return false;
+ }
+
+ memcpy(&id, (*MB)->getBufferStart(), sizeof(id));
+ memcpy(&info, (*MB)->getBufferStart() + sizeof(id), sizeof(info));
+
+ // check ELF signature
+ if (id[0] != 0x7f || id[1] != 'E' || id[2] != 'L' || id[3] != 'F') {
+ errs() << "invalid elf signature\n";
+ return false;
+ }
+
+ hdr.ElfMach = info.e_machine;
+
+ return true;
+}
+
+void PerfJITEventListener::NotifyCode(Expected<llvm::StringRef> &Symbol,
+ uint64_t CodeAddr, uint64_t CodeSize) {
+ assert(SuccessfullyInitialized);
+
+ // 0 length functions can't have samples.
+ if (CodeSize == 0)
+ return;
+
+ LLVMPerfJitRecordCodeLoad rec;
+ rec.Prefix.Id = JIT_CODE_LOAD;
+ rec.Prefix.TotalSize = sizeof(rec) + // debug record itself
+ Symbol->size() + 1 + // symbol name
+ CodeSize; // and code
+ rec.Prefix.Timestamp = perf_get_timestamp();
+
+ rec.CodeSize = CodeSize;
+ rec.Vma = 0;
+ rec.CodeAddr = CodeAddr;
+ rec.Pid = Pid;
+ rec.Tid = get_threadid();
+
+ // avoid interspersing output
+ std::lock_guard<sys::Mutex> Guard(Mutex);
+
+ rec.CodeIndex = CodeGeneration++; // under lock!
+
+ Dumpstream->write(reinterpret_cast<const char *>(&rec), sizeof(rec));
+ Dumpstream->write(Symbol->data(), Symbol->size() + 1);
+ Dumpstream->write(reinterpret_cast<const char *>(CodeAddr), CodeSize);
+}
+
+void PerfJITEventListener::NotifyDebug(uint64_t CodeAddr,
+ DILineInfoTable Lines) {
+ assert(SuccessfullyInitialized);
+
+ // Didn't get useful debug info.
+ if (Lines.empty())
+ return;
+
+ LLVMPerfJitRecordDebugInfo rec;
+ rec.Prefix.Id = JIT_CODE_DEBUG_INFO;
+ rec.Prefix.TotalSize = sizeof(rec); // will be increased further
+ rec.Prefix.Timestamp = perf_get_timestamp();
+ rec.CodeAddr = CodeAddr;
+ rec.NrEntry = Lines.size();
+
+ // compute total size size of record (variable due to filenames)
+ DILineInfoTable::iterator Begin = Lines.begin();
+ DILineInfoTable::iterator End = Lines.end();
+ for (DILineInfoTable::iterator It = Begin; It != End; ++It) {
+ DILineInfo &line = It->second;
+ rec.Prefix.TotalSize += sizeof(LLVMPerfJitDebugEntry);
+ rec.Prefix.TotalSize += line.FileName.size() + 1;
+ }
+
+ // The debug_entry describes the source line information. It is defined as
+ // follows in order:
+ // * uint64_t code_addr: address of function for which the debug information
+ // is generated
+ // * uint32_t line : source file line number (starting at 1)
+ // * uint32_t discrim : column discriminator, 0 is default
+ // * char name[n] : source file name in ASCII, including null termination
+
+ // avoid interspersing output
+ std::lock_guard<sys::Mutex> Guard(Mutex);
+
+ Dumpstream->write(reinterpret_cast<const char *>(&rec), sizeof(rec));
+
+ for (DILineInfoTable::iterator It = Begin; It != End; ++It) {
+ LLVMPerfJitDebugEntry LineInfo;
+ DILineInfo &Line = It->second;
+
+ LineInfo.Addr = It->first;
+ // The function re-created by perf is preceded by a elf
+ // header. Need to adjust for that, otherwise the results are
+ // wrong.
+ LineInfo.Addr += 0x40;
+ LineInfo.Lineno = Line.Line;
+ LineInfo.Discrim = Line.Discriminator;
+
+ Dumpstream->write(reinterpret_cast<const char *>(&LineInfo),
+ sizeof(LineInfo));
+ Dumpstream->write(Line.FileName.c_str(), Line.FileName.size() + 1);
+ }
+}
+
+// There should be only a single event listener per process, otherwise perf gets
+// confused.
+llvm::ManagedStatic<PerfJITEventListener> PerfListener;
+
+} // end anonymous namespace
+
+namespace llvm {
+JITEventListener *JITEventListener::createPerfJITEventListener() {
+ return &*PerfListener;
+}
+
+} // namespace llvm
+
+LLVMJITEventListenerRef LLVMCreatePerfJITEventListener(void)
+{
+ return wrap(JITEventListener::createPerfJITEventListener());
+}
diff --git a/llvm/lib/ExecutionEngine/RuntimeDyld/JITSymbol.cpp b/llvm/lib/ExecutionEngine/RuntimeDyld/JITSymbol.cpp
new file mode 100644
index 0000000000000..4e2d0f422f39b
--- /dev/null
+++ b/llvm/lib/ExecutionEngine/RuntimeDyld/JITSymbol.cpp
@@ -0,0 +1,131 @@
+//===----------- JITSymbol.cpp - JITSymbol class implementation -----------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// JITSymbol class implementation plus helper functions.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/JITSymbol.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/GlobalAlias.h"
+#include "llvm/IR/GlobalValue.h"
+#include "llvm/Object/ObjectFile.h"
+
+using namespace llvm;
+
+JITSymbolFlags llvm::JITSymbolFlags::fromGlobalValue(const GlobalValue &GV) {
+ JITSymbolFlags Flags = JITSymbolFlags::None;
+ if (GV.hasWeakLinkage() || GV.hasLinkOnceLinkage())
+ Flags |= JITSymbolFlags::Weak;
+ if (GV.hasCommonLinkage())
+ Flags |= JITSymbolFlags::Common;
+ if (!GV.hasLocalLinkage() && !GV.hasHiddenVisibility())
+ Flags |= JITSymbolFlags::Exported;
+
+ if (isa<Function>(GV))
+ Flags |= JITSymbolFlags::Callable;
+ else if (isa<GlobalAlias>(GV) &&
+ isa<Function>(cast<GlobalAlias>(GV).getAliasee()))
+ Flags |= JITSymbolFlags::Callable;
+
+ return Flags;
+}
+
+Expected<JITSymbolFlags>
+llvm::JITSymbolFlags::fromObjectSymbol(const object::SymbolRef &Symbol) {
+ JITSymbolFlags Flags = JITSymbolFlags::None;
+ if (Symbol.getFlags() & object::BasicSymbolRef::SF_Weak)
+ Flags |= JITSymbolFlags::Weak;
+ if (Symbol.getFlags() & object::BasicSymbolRef::SF_Common)
+ Flags |= JITSymbolFlags::Common;
+ if (Symbol.getFlags() & object::BasicSymbolRef::SF_Exported)
+ Flags |= JITSymbolFlags::Exported;
+
+ auto SymbolType = Symbol.getType();
+ if (!SymbolType)
+ return SymbolType.takeError();
+
+ if (*SymbolType & object::SymbolRef::ST_Function)
+ Flags |= JITSymbolFlags::Callable;
+
+ return Flags;
+}
+
+ARMJITSymbolFlags
+llvm::ARMJITSymbolFlags::fromObjectSymbol(const object::SymbolRef &Symbol) {
+ ARMJITSymbolFlags Flags;
+ if (Symbol.getFlags() & object::BasicSymbolRef::SF_Thumb)
+ Flags |= ARMJITSymbolFlags::Thumb;
+ return Flags;
+}
+
+/// Performs lookup by, for each symbol, first calling
+/// findSymbolInLogicalDylib and if that fails calling
+/// findSymbol.
+void LegacyJITSymbolResolver::lookup(const LookupSet &Symbols,
+ OnResolvedFunction OnResolved) {
+ JITSymbolResolver::LookupResult Result;
+ for (auto &Symbol : Symbols) {
+ std::string SymName = Symbol.str();
+ if (auto Sym = findSymbolInLogicalDylib(SymName)) {
+ if (auto AddrOrErr = Sym.getAddress())
+ Result[Symbol] = JITEvaluatedSymbol(*AddrOrErr, Sym.getFlags());
+ else {
+ OnResolved(AddrOrErr.takeError());
+ return;
+ }
+ } else if (auto Err = Sym.takeError()) {
+ OnResolved(std::move(Err));
+ return;
+ } else {
+ // findSymbolInLogicalDylib failed. Lets try findSymbol.
+ if (auto Sym = findSymbol(SymName)) {
+ if (auto AddrOrErr = Sym.getAddress())
+ Result[Symbol] = JITEvaluatedSymbol(*AddrOrErr, Sym.getFlags());
+ else {
+ OnResolved(AddrOrErr.takeError());
+ return;
+ }
+ } else if (auto Err = Sym.takeError()) {
+ OnResolved(std::move(Err));
+ return;
+ } else {
+ OnResolved(make_error<StringError>("Symbol not found: " + Symbol,
+ inconvertibleErrorCode()));
+ return;
+ }
+ }
+ }
+
+ OnResolved(std::move(Result));
+}
+
+/// Performs flags lookup by calling findSymbolInLogicalDylib and
+/// returning the flags value for that symbol.
+Expected<JITSymbolResolver::LookupSet>
+LegacyJITSymbolResolver::getResponsibilitySet(const LookupSet &Symbols) {
+ JITSymbolResolver::LookupSet Result;
+
+ for (auto &Symbol : Symbols) {
+ std::string SymName = Symbol.str();
+ if (auto Sym = findSymbolInLogicalDylib(SymName)) {
+ // If there's an existing def but it is not strong, then the caller is
+ // responsible for it.
+ if (!Sym.getFlags().isStrong())
+ Result.insert(Symbol);
+ } else if (auto Err = Sym.takeError())
+ return std::move(Err);
+ else {
+ // If there is no existing definition then the caller is responsible for
+ // it.
+ Result.insert(Symbol);
+ }
+ }
+
+ return std::move(Result);
+}
diff --git a/llvm/lib/ExecutionEngine/RuntimeDyld/RTDyldMemoryManager.cpp b/llvm/lib/ExecutionEngine/RuntimeDyld/RTDyldMemoryManager.cpp
new file mode 100644
index 0000000000000..46604ff4000c1
--- /dev/null
+++ b/llvm/lib/ExecutionEngine/RuntimeDyld/RTDyldMemoryManager.cpp
@@ -0,0 +1,303 @@
+//===-- RTDyldMemoryManager.cpp - Memory manager for MC-JIT -----*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Implementation of the runtime dynamic memory manager base class.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Config/config.h"
+#include "llvm/ExecutionEngine/RTDyldMemoryManager.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/DynamicLibrary.h"
+#include "llvm/Support/ErrorHandling.h"
+#include <cstdlib>
+
+#ifdef __linux__
+ // These includes used by RTDyldMemoryManager::getPointerToNamedFunction()
+ // for Glibc trickery. See comments in this function for more information.
+ #ifdef HAVE_SYS_STAT_H
+ #include <sys/stat.h>
+ #endif
+ #include <fcntl.h>
+ #include <unistd.h>
+#endif
+
+namespace llvm {
+
+RTDyldMemoryManager::~RTDyldMemoryManager() {}
+
+// Determine whether we can register EH tables.
+#if (defined(__GNUC__) && !defined(__ARM_EABI__) && !defined(__ia64__) && \
+ !(defined(_AIX) && defined(__ibmxl__)) && !defined(__SEH__) && \
+ !defined(__USING_SJLJ_EXCEPTIONS__))
+#define HAVE_EHTABLE_SUPPORT 1
+#else
+#define HAVE_EHTABLE_SUPPORT 0
+#endif
+
+#if HAVE_EHTABLE_SUPPORT
+extern "C" void __register_frame(void *);
+extern "C" void __deregister_frame(void *);
+#else
+// The building compiler does not have __(de)register_frame but
+// it may be found at runtime in a dynamically-loaded library.
+// For example, this happens when building LLVM with Visual C++
+// but using the MingW runtime.
+static void __register_frame(void *p) {
+ static bool Searched = false;
+ static void((*rf)(void *)) = 0;
+
+ if (!Searched) {
+ Searched = true;
+ *(void **)&rf =
+ llvm::sys::DynamicLibrary::SearchForAddressOfSymbol("__register_frame");
+ }
+ if (rf)
+ rf(p);
+}
+
+static void __deregister_frame(void *p) {
+ static bool Searched = false;
+ static void((*df)(void *)) = 0;
+
+ if (!Searched) {
+ Searched = true;
+ *(void **)&df = llvm::sys::DynamicLibrary::SearchForAddressOfSymbol(
+ "__deregister_frame");
+ }
+ if (df)
+ df(p);
+}
+#endif
+
+#ifdef __APPLE__
+
+static const char *processFDE(const char *Entry, bool isDeregister) {
+ const char *P = Entry;
+ uint32_t Length = *((const uint32_t *)P);
+ P += 4;
+ uint32_t Offset = *((const uint32_t *)P);
+ if (Offset != 0) {
+ if (isDeregister)
+ __deregister_frame(const_cast<char *>(Entry));
+ else
+ __register_frame(const_cast<char *>(Entry));
+ }
+ return P + Length;
+}
+
+// This implementation handles frame registration for local targets.
+// Memory managers for remote targets should re-implement this function
+// and use the LoadAddr parameter.
+void RTDyldMemoryManager::registerEHFramesInProcess(uint8_t *Addr,
+ size_t Size) {
+ // On OS X OS X __register_frame takes a single FDE as an argument.
+ // See http://lists.llvm.org/pipermail/llvm-dev/2013-April/061737.html
+ // and projects/libunwind/src/UnwindLevel1-gcc-ext.c.
+ const char *P = (const char *)Addr;
+ const char *End = P + Size;
+ do {
+ P = processFDE(P, false);
+ } while(P != End);
+}
+
+void RTDyldMemoryManager::deregisterEHFramesInProcess(uint8_t *Addr,
+ size_t Size) {
+ const char *P = (const char *)Addr;
+ const char *End = P + Size;
+ do {
+ P = processFDE(P, true);
+ } while(P != End);
+}
+
+#else
+
+void RTDyldMemoryManager::registerEHFramesInProcess(uint8_t *Addr,
+ size_t Size) {
+ // On Linux __register_frame takes a single argument:
+ // a pointer to the start of the .eh_frame section.
+
+ // How can it find the end? Because crtendS.o is linked
+ // in and it has an .eh_frame section with four zero chars.
+ __register_frame(Addr);
+}
+
+void RTDyldMemoryManager::deregisterEHFramesInProcess(uint8_t *Addr,
+ size_t Size) {
+ __deregister_frame(Addr);
+}
+
+#endif
+
+void RTDyldMemoryManager::registerEHFrames(uint8_t *Addr, uint64_t LoadAddr,
+ size_t Size) {
+ registerEHFramesInProcess(Addr, Size);
+ EHFrames.push_back({Addr, Size});
+}
+
+void RTDyldMemoryManager::deregisterEHFrames() {
+ for (auto &Frame : EHFrames)
+ deregisterEHFramesInProcess(Frame.Addr, Frame.Size);
+ EHFrames.clear();
+}
+
+static int jit_noop() {
+ return 0;
+}
+
+// ARM math functions are statically linked on Android from libgcc.a, but not
+// available at runtime for dynamic linking. On Linux these are usually placed
+// in libgcc_s.so so can be found by normal dynamic lookup.
+#if defined(__BIONIC__) && defined(__arm__)
+// List of functions which are statically linked on Android and can be generated
+// by LLVM. This is done as a nested macro which is used once to declare the
+// imported functions with ARM_MATH_DECL and once to compare them to the
+// user-requested symbol in getSymbolAddress with ARM_MATH_CHECK. The test
+// assumes that all functions start with __aeabi_ and getSymbolAddress must be
+// modified if that changes.
+#define ARM_MATH_IMPORTS(PP) \
+ PP(__aeabi_d2f) \
+ PP(__aeabi_d2iz) \
+ PP(__aeabi_d2lz) \
+ PP(__aeabi_d2uiz) \
+ PP(__aeabi_d2ulz) \
+ PP(__aeabi_dadd) \
+ PP(__aeabi_dcmpeq) \
+ PP(__aeabi_dcmpge) \
+ PP(__aeabi_dcmpgt) \
+ PP(__aeabi_dcmple) \
+ PP(__aeabi_dcmplt) \
+ PP(__aeabi_dcmpun) \
+ PP(__aeabi_ddiv) \
+ PP(__aeabi_dmul) \
+ PP(__aeabi_dsub) \
+ PP(__aeabi_f2d) \
+ PP(__aeabi_f2iz) \
+ PP(__aeabi_f2lz) \
+ PP(__aeabi_f2uiz) \
+ PP(__aeabi_f2ulz) \
+ PP(__aeabi_fadd) \
+ PP(__aeabi_fcmpeq) \
+ PP(__aeabi_fcmpge) \
+ PP(__aeabi_fcmpgt) \
+ PP(__aeabi_fcmple) \
+ PP(__aeabi_fcmplt) \
+ PP(__aeabi_fcmpun) \
+ PP(__aeabi_fdiv) \
+ PP(__aeabi_fmul) \
+ PP(__aeabi_fsub) \
+ PP(__aeabi_i2d) \
+ PP(__aeabi_i2f) \
+ PP(__aeabi_idiv) \
+ PP(__aeabi_idivmod) \
+ PP(__aeabi_l2d) \
+ PP(__aeabi_l2f) \
+ PP(__aeabi_lasr) \
+ PP(__aeabi_ldivmod) \
+ PP(__aeabi_llsl) \
+ PP(__aeabi_llsr) \
+ PP(__aeabi_lmul) \
+ PP(__aeabi_ui2d) \
+ PP(__aeabi_ui2f) \
+ PP(__aeabi_uidiv) \
+ PP(__aeabi_uidivmod) \
+ PP(__aeabi_ul2d) \
+ PP(__aeabi_ul2f) \
+ PP(__aeabi_uldivmod)
+
+// Declare statically linked math functions on ARM. The function declarations
+// here do not have the correct prototypes for each function in
+// ARM_MATH_IMPORTS, but it doesn't matter because only the symbol addresses are
+// needed. In particular the __aeabi_*divmod functions do not have calling
+// conventions which match any C prototype.
+#define ARM_MATH_DECL(name) extern "C" void name();
+ARM_MATH_IMPORTS(ARM_MATH_DECL)
+#undef ARM_MATH_DECL
+#endif
+
+#if defined(__linux__) && defined(__GLIBC__) && \
+ (defined(__i386__) || defined(__x86_64__))
+extern "C" LLVM_ATTRIBUTE_WEAK void __morestack();
+#endif
+
+uint64_t
+RTDyldMemoryManager::getSymbolAddressInProcess(const std::string &Name) {
+ // This implementation assumes that the host program is the target.
+ // Clients generating code for a remote target should implement their own
+ // memory manager.
+#if defined(__linux__) && defined(__GLIBC__)
+ //===--------------------------------------------------------------------===//
+ // Function stubs that are invoked instead of certain library calls
+ //
+ // Force the following functions to be linked in to anything that uses the
+ // JIT. This is a hack designed to work around the all-too-clever Glibc
+ // strategy of making these functions work differently when inlined vs. when
+ // not inlined, and hiding their real definitions in a separate archive file
+ // that the dynamic linker can't see. For more info, search for
+ // 'libc_nonshared.a' on Google, or read http://llvm.org/PR274.
+ if (Name == "stat") return (uint64_t)&stat;
+ if (Name == "fstat") return (uint64_t)&fstat;
+ if (Name == "lstat") return (uint64_t)&lstat;
+ if (Name == "stat64") return (uint64_t)&stat64;
+ if (Name == "fstat64") return (uint64_t)&fstat64;
+ if (Name == "lstat64") return (uint64_t)&lstat64;
+ if (Name == "atexit") return (uint64_t)&atexit;
+ if (Name == "mknod") return (uint64_t)&mknod;
+
+#if defined(__i386__) || defined(__x86_64__)
+ // __morestack lives in libgcc, a static library.
+ if (&__morestack && Name == "__morestack")
+ return (uint64_t)&__morestack;
+#endif
+#endif // __linux__ && __GLIBC__
+
+ // See ARM_MATH_IMPORTS definition for explanation
+#if defined(__BIONIC__) && defined(__arm__)
+ if (Name.compare(0, 8, "__aeabi_") == 0) {
+ // Check if the user has requested any of the functions listed in
+ // ARM_MATH_IMPORTS, and if so redirect to the statically linked symbol.
+#define ARM_MATH_CHECK(fn) if (Name == #fn) return (uint64_t)&fn;
+ ARM_MATH_IMPORTS(ARM_MATH_CHECK)
+#undef ARM_MATH_CHECK
+ }
+#endif
+
+ // We should not invoke parent's ctors/dtors from generated main()!
+ // On Mingw and Cygwin, the symbol __main is resolved to
+ // callee's(eg. tools/lli) one, to invoke wrong duplicated ctors
+ // (and register wrong callee's dtors with atexit(3)).
+ // We expect ExecutionEngine::runStaticConstructorsDestructors()
+ // is called before ExecutionEngine::runFunctionAsMain() is called.
+ if (Name == "__main") return (uint64_t)&jit_noop;
+
+ const char *NameStr = Name.c_str();
+
+ // DynamicLibrary::SearchForAddresOfSymbol expects an unmangled 'C' symbol
+ // name so ff we're on Darwin, strip the leading '_' off.
+#ifdef __APPLE__
+ if (NameStr[0] == '_')
+ ++NameStr;
+#endif
+
+ return (uint64_t)sys::DynamicLibrary::SearchForAddressOfSymbol(NameStr);
+}
+
+void *RTDyldMemoryManager::getPointerToNamedFunction(const std::string &Name,
+ bool AbortOnFailure) {
+ uint64_t Addr = getSymbolAddress(Name);
+
+ if (!Addr && AbortOnFailure)
+ report_fatal_error("Program used external function '" + Name +
+ "' which could not be resolved!");
+
+ return (void*)Addr;
+}
+
+void RTDyldMemoryManager::anchor() {}
+void MCJITMemoryManager::anchor() {}
+} // namespace llvm
diff --git a/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyld.cpp b/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyld.cpp
new file mode 100644
index 0000000000000..2df71a5e5e741
--- /dev/null
+++ b/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyld.cpp
@@ -0,0 +1,1434 @@
+//===-- RuntimeDyld.cpp - Run-time dynamic linker for MC-JIT ----*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Implementation of the MC-JIT runtime dynamic linker.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/RuntimeDyld.h"
+#include "RuntimeDyldCOFF.h"
+#include "RuntimeDyldELF.h"
+#include "RuntimeDyldImpl.h"
+#include "RuntimeDyldMachO.h"
+#include "llvm/Object/COFF.h"
+#include "llvm/Object/ELFObjectFile.h"
+#include "llvm/Support/Alignment.h"
+#include "llvm/Support/MSVCErrorWorkarounds.h"
+#include "llvm/Support/ManagedStatic.h"
+#include "llvm/Support/MathExtras.h"
+#include <mutex>
+
+#include <future>
+
+using namespace llvm;
+using namespace llvm::object;
+
+#define DEBUG_TYPE "dyld"
+
+namespace {
+
+enum RuntimeDyldErrorCode {
+ GenericRTDyldError = 1
+};
+
+// FIXME: This class is only here to support the transition to llvm::Error. It
+// will be removed once this transition is complete. Clients should prefer to
+// deal with the Error value directly, rather than converting to error_code.
+class RuntimeDyldErrorCategory : public std::error_category {
+public:
+ const char *name() const noexcept override { return "runtimedyld"; }
+
+ std::string message(int Condition) const override {
+ switch (static_cast<RuntimeDyldErrorCode>(Condition)) {
+ case GenericRTDyldError: return "Generic RuntimeDyld error";
+ }
+ llvm_unreachable("Unrecognized RuntimeDyldErrorCode");
+ }
+};
+
+static ManagedStatic<RuntimeDyldErrorCategory> RTDyldErrorCategory;
+
+}
+
+char RuntimeDyldError::ID = 0;
+
+void RuntimeDyldError::log(raw_ostream &OS) const {
+ OS << ErrMsg << "\n";
+}
+
+std::error_code RuntimeDyldError::convertToErrorCode() const {
+ return std::error_code(GenericRTDyldError, *RTDyldErrorCategory);
+}
+
+// Empty out-of-line virtual destructor as the key function.
+RuntimeDyldImpl::~RuntimeDyldImpl() {}
+
+// Pin LoadedObjectInfo's vtables to this file.
+void RuntimeDyld::LoadedObjectInfo::anchor() {}
+
+namespace llvm {
+
+void RuntimeDyldImpl::registerEHFrames() {}
+
+void RuntimeDyldImpl::deregisterEHFrames() {
+ MemMgr.deregisterEHFrames();
+}
+
+#ifndef NDEBUG
+static void dumpSectionMemory(const SectionEntry &S, StringRef State) {
+ dbgs() << "----- Contents of section " << S.getName() << " " << State
+ << " -----";
+
+ if (S.getAddress() == nullptr) {
+ dbgs() << "\n <section not emitted>\n";
+ return;
+ }
+
+ const unsigned ColsPerRow = 16;
+
+ uint8_t *DataAddr = S.getAddress();
+ uint64_t LoadAddr = S.getLoadAddress();
+
+ unsigned StartPadding = LoadAddr & (ColsPerRow - 1);
+ unsigned BytesRemaining = S.getSize();
+
+ if (StartPadding) {
+ dbgs() << "\n" << format("0x%016" PRIx64,
+ LoadAddr & ~(uint64_t)(ColsPerRow - 1)) << ":";
+ while (StartPadding--)
+ dbgs() << " ";
+ }
+
+ while (BytesRemaining > 0) {
+ if ((LoadAddr & (ColsPerRow - 1)) == 0)
+ dbgs() << "\n" << format("0x%016" PRIx64, LoadAddr) << ":";
+
+ dbgs() << " " << format("%02x", *DataAddr);
+
+ ++DataAddr;
+ ++LoadAddr;
+ --BytesRemaining;
+ }
+
+ dbgs() << "\n";
+}
+#endif
+
+// Resolve the relocations for all symbols we currently know about.
+void RuntimeDyldImpl::resolveRelocations() {
+ std::lock_guard<sys::Mutex> locked(lock);
+
+ // Print out the sections prior to relocation.
+ LLVM_DEBUG(for (int i = 0, e = Sections.size(); i != e; ++i)
+ dumpSectionMemory(Sections[i], "before relocations"););
+
+ // First, resolve relocations associated with external symbols.
+ if (auto Err = resolveExternalSymbols()) {
+ HasError = true;
+ ErrorStr = toString(std::move(Err));
+ }
+
+ resolveLocalRelocations();
+
+ // Print out sections after relocation.
+ LLVM_DEBUG(for (int i = 0, e = Sections.size(); i != e; ++i)
+ dumpSectionMemory(Sections[i], "after relocations"););
+}
+
+void RuntimeDyldImpl::resolveLocalRelocations() {
+ // Iterate over all outstanding relocations
+ for (auto it = Relocations.begin(), e = Relocations.end(); it != e; ++it) {
+ // The Section here (Sections[i]) refers to the section in which the
+ // symbol for the relocation is located. The SectionID in the relocation
+ // entry provides the section to which the relocation will be applied.
+ int Idx = it->first;
+ uint64_t Addr = Sections[Idx].getLoadAddress();
+ LLVM_DEBUG(dbgs() << "Resolving relocations Section #" << Idx << "\t"
+ << format("%p", (uintptr_t)Addr) << "\n");
+ resolveRelocationList(it->second, Addr);
+ }
+ Relocations.clear();
+}
+
+void RuntimeDyldImpl::mapSectionAddress(const void *LocalAddress,
+ uint64_t TargetAddress) {
+ std::lock_guard<sys::Mutex> locked(lock);
+ for (unsigned i = 0, e = Sections.size(); i != e; ++i) {
+ if (Sections[i].getAddress() == LocalAddress) {
+ reassignSectionAddress(i, TargetAddress);
+ return;
+ }
+ }
+ llvm_unreachable("Attempting to remap address of unknown section!");
+}
+
+static Error getOffset(const SymbolRef &Sym, SectionRef Sec,
+ uint64_t &Result) {
+ Expected<uint64_t> AddressOrErr = Sym.getAddress();
+ if (!AddressOrErr)
+ return AddressOrErr.takeError();
+ Result = *AddressOrErr - Sec.getAddress();
+ return Error::success();
+}
+
+Expected<RuntimeDyldImpl::ObjSectionToIDMap>
+RuntimeDyldImpl::loadObjectImpl(const object::ObjectFile &Obj) {
+ std::lock_guard<sys::Mutex> locked(lock);
+
+ // Save information about our target
+ Arch = (Triple::ArchType)Obj.getArch();
+ IsTargetLittleEndian = Obj.isLittleEndian();
+ setMipsABI(Obj);
+
+ // Compute the memory size required to load all sections to be loaded
+ // and pass this information to the memory manager
+ if (MemMgr.needsToReserveAllocationSpace()) {
+ uint64_t CodeSize = 0, RODataSize = 0, RWDataSize = 0;
+ uint32_t CodeAlign = 1, RODataAlign = 1, RWDataAlign = 1;
+ if (auto Err = computeTotalAllocSize(Obj,
+ CodeSize, CodeAlign,
+ RODataSize, RODataAlign,
+ RWDataSize, RWDataAlign))
+ return std::move(Err);
+ MemMgr.reserveAllocationSpace(CodeSize, CodeAlign, RODataSize, RODataAlign,
+ RWDataSize, RWDataAlign);
+ }
+
+ // Used sections from the object file
+ ObjSectionToIDMap LocalSections;
+
+ // Common symbols requiring allocation, with their sizes and alignments
+ CommonSymbolList CommonSymbolsToAllocate;
+
+ uint64_t CommonSize = 0;
+ uint32_t CommonAlign = 0;
+
+ // First, collect all weak and common symbols. We need to know if stronger
+ // definitions occur elsewhere.
+ JITSymbolResolver::LookupSet ResponsibilitySet;
+ {
+ JITSymbolResolver::LookupSet Symbols;
+ for (auto &Sym : Obj.symbols()) {
+ uint32_t Flags = Sym.getFlags();
+ if ((Flags & SymbolRef::SF_Common) || (Flags & SymbolRef::SF_Weak)) {
+ // Get symbol name.
+ if (auto NameOrErr = Sym.getName())
+ Symbols.insert(*NameOrErr);
+ else
+ return NameOrErr.takeError();
+ }
+ }
+
+ if (auto ResultOrErr = Resolver.getResponsibilitySet(Symbols))
+ ResponsibilitySet = std::move(*ResultOrErr);
+ else
+ return ResultOrErr.takeError();
+ }
+
+ // Parse symbols
+ LLVM_DEBUG(dbgs() << "Parse symbols:\n");
+ for (symbol_iterator I = Obj.symbol_begin(), E = Obj.symbol_end(); I != E;
+ ++I) {
+ uint32_t Flags = I->getFlags();
+
+ // Skip undefined symbols.
+ if (Flags & SymbolRef::SF_Undefined)
+ continue;
+
+ // Get the symbol type.
+ object::SymbolRef::Type SymType;
+ if (auto SymTypeOrErr = I->getType())
+ SymType = *SymTypeOrErr;
+ else
+ return SymTypeOrErr.takeError();
+
+ // Get symbol name.
+ StringRef Name;
+ if (auto NameOrErr = I->getName())
+ Name = *NameOrErr;
+ else
+ return NameOrErr.takeError();
+
+ // Compute JIT symbol flags.
+ auto JITSymFlags = getJITSymbolFlags(*I);
+ if (!JITSymFlags)
+ return JITSymFlags.takeError();
+
+ // If this is a weak definition, check to see if there's a strong one.
+ // If there is, skip this symbol (we won't be providing it: the strong
+ // definition will). If there's no strong definition, make this definition
+ // strong.
+ if (JITSymFlags->isWeak() || JITSymFlags->isCommon()) {
+ // First check whether there's already a definition in this instance.
+ if (GlobalSymbolTable.count(Name))
+ continue;
+
+ // If we're not responsible for this symbol, skip it.
+ if (!ResponsibilitySet.count(Name))
+ continue;
+
+ // Otherwise update the flags on the symbol to make this definition
+ // strong.
+ if (JITSymFlags->isWeak())
+ *JITSymFlags &= ~JITSymbolFlags::Weak;
+ if (JITSymFlags->isCommon()) {
+ *JITSymFlags &= ~JITSymbolFlags::Common;
+ uint32_t Align = I->getAlignment();
+ uint64_t Size = I->getCommonSize();
+ if (!CommonAlign)
+ CommonAlign = Align;
+ CommonSize = alignTo(CommonSize, Align) + Size;
+ CommonSymbolsToAllocate.push_back(*I);
+ }
+ }
+
+ if (Flags & SymbolRef::SF_Absolute &&
+ SymType != object::SymbolRef::ST_File) {
+ uint64_t Addr = 0;
+ if (auto AddrOrErr = I->getAddress())
+ Addr = *AddrOrErr;
+ else
+ return AddrOrErr.takeError();
+
+ unsigned SectionID = AbsoluteSymbolSection;
+
+ LLVM_DEBUG(dbgs() << "\tType: " << SymType << " (absolute) Name: " << Name
+ << " SID: " << SectionID
+ << " Offset: " << format("%p", (uintptr_t)Addr)
+ << " flags: " << Flags << "\n");
+ GlobalSymbolTable[Name] = SymbolTableEntry(SectionID, Addr, *JITSymFlags);
+ } else if (SymType == object::SymbolRef::ST_Function ||
+ SymType == object::SymbolRef::ST_Data ||
+ SymType == object::SymbolRef::ST_Unknown ||
+ SymType == object::SymbolRef::ST_Other) {
+
+ section_iterator SI = Obj.section_end();
+ if (auto SIOrErr = I->getSection())
+ SI = *SIOrErr;
+ else
+ return SIOrErr.takeError();
+
+ if (SI == Obj.section_end())
+ continue;
+
+ // Get symbol offset.
+ uint64_t SectOffset;
+ if (auto Err = getOffset(*I, *SI, SectOffset))
+ return std::move(Err);
+
+ bool IsCode = SI->isText();
+ unsigned SectionID;
+ if (auto SectionIDOrErr =
+ findOrEmitSection(Obj, *SI, IsCode, LocalSections))
+ SectionID = *SectionIDOrErr;
+ else
+ return SectionIDOrErr.takeError();
+
+ LLVM_DEBUG(dbgs() << "\tType: " << SymType << " Name: " << Name
+ << " SID: " << SectionID
+ << " Offset: " << format("%p", (uintptr_t)SectOffset)
+ << " flags: " << Flags << "\n");
+ GlobalSymbolTable[Name] =
+ SymbolTableEntry(SectionID, SectOffset, *JITSymFlags);
+ }
+ }
+
+ // Allocate common symbols
+ if (auto Err = emitCommonSymbols(Obj, CommonSymbolsToAllocate, CommonSize,
+ CommonAlign))
+ return std::move(Err);
+
+ // Parse and process relocations
+ LLVM_DEBUG(dbgs() << "Parse relocations:\n");
+ for (section_iterator SI = Obj.section_begin(), SE = Obj.section_end();
+ SI != SE; ++SI) {
+ StubMap Stubs;
+
+ Expected<section_iterator> RelSecOrErr = SI->getRelocatedSection();
+ if (!RelSecOrErr)
+ return RelSecOrErr.takeError();
+
+ section_iterator RelocatedSection = *RelSecOrErr;
+ if (RelocatedSection == SE)
+ continue;
+
+ relocation_iterator I = SI->relocation_begin();
+ relocation_iterator E = SI->relocation_end();
+
+ if (I == E && !ProcessAllSections)
+ continue;
+
+ bool IsCode = RelocatedSection->isText();
+ unsigned SectionID = 0;
+ if (auto SectionIDOrErr = findOrEmitSection(Obj, *RelocatedSection, IsCode,
+ LocalSections))
+ SectionID = *SectionIDOrErr;
+ else
+ return SectionIDOrErr.takeError();
+
+ LLVM_DEBUG(dbgs() << "\tSectionID: " << SectionID << "\n");
+
+ for (; I != E;)
+ if (auto IOrErr = processRelocationRef(SectionID, I, Obj, LocalSections, Stubs))
+ I = *IOrErr;
+ else
+ return IOrErr.takeError();
+
+ // If there is a NotifyStubEmitted callback set, call it to register any
+ // stubs created for this section.
+ if (NotifyStubEmitted) {
+ StringRef FileName = Obj.getFileName();
+ StringRef SectionName = Sections[SectionID].getName();
+ for (auto &KV : Stubs) {
+
+ auto &VR = KV.first;
+ uint64_t StubAddr = KV.second;
+
+ // If this is a named stub, just call NotifyStubEmitted.
+ if (VR.SymbolName) {
+ NotifyStubEmitted(FileName, SectionName, VR.SymbolName, SectionID,
+ StubAddr);
+ continue;
+ }
+
+ // Otherwise we will have to try a reverse lookup on the globla symbol table.
+ for (auto &GSTMapEntry : GlobalSymbolTable) {
+ StringRef SymbolName = GSTMapEntry.first();
+ auto &GSTEntry = GSTMapEntry.second;
+ if (GSTEntry.getSectionID() == VR.SectionID &&
+ GSTEntry.getOffset() == VR.Offset) {
+ NotifyStubEmitted(FileName, SectionName, SymbolName, SectionID,
+ StubAddr);
+ break;
+ }
+ }
+ }
+ }
+ }
+
+ // Process remaining sections
+ if (ProcessAllSections) {
+ LLVM_DEBUG(dbgs() << "Process remaining sections:\n");
+ for (section_iterator SI = Obj.section_begin(), SE = Obj.section_end();
+ SI != SE; ++SI) {
+
+ /* Ignore already loaded sections */
+ if (LocalSections.find(*SI) != LocalSections.end())
+ continue;
+
+ bool IsCode = SI->isText();
+ if (auto SectionIDOrErr =
+ findOrEmitSection(Obj, *SI, IsCode, LocalSections))
+ LLVM_DEBUG(dbgs() << "\tSectionID: " << (*SectionIDOrErr) << "\n");
+ else
+ return SectionIDOrErr.takeError();
+ }
+ }
+
+ // Give the subclasses a chance to tie-up any loose ends.
+ if (auto Err = finalizeLoad(Obj, LocalSections))
+ return std::move(Err);
+
+// for (auto E : LocalSections)
+// llvm::dbgs() << "Added: " << E.first.getRawDataRefImpl() << " -> " << E.second << "\n";
+
+ return LocalSections;
+}
+
+// A helper method for computeTotalAllocSize.
+// Computes the memory size required to allocate sections with the given sizes,
+// assuming that all sections are allocated with the given alignment
+static uint64_t
+computeAllocationSizeForSections(std::vector<uint64_t> &SectionSizes,
+ uint64_t Alignment) {
+ uint64_t TotalSize = 0;
+ for (size_t Idx = 0, Cnt = SectionSizes.size(); Idx < Cnt; Idx++) {
+ uint64_t AlignedSize =
+ (SectionSizes[Idx] + Alignment - 1) / Alignment * Alignment;
+ TotalSize += AlignedSize;
+ }
+ return TotalSize;
+}
+
+static bool isRequiredForExecution(const SectionRef Section) {
+ const ObjectFile *Obj = Section.getObject();
+ if (isa<object::ELFObjectFileBase>(Obj))
+ return ELFSectionRef(Section).getFlags() & ELF::SHF_ALLOC;
+ if (auto *COFFObj = dyn_cast<object::COFFObjectFile>(Obj)) {
+ const coff_section *CoffSection = COFFObj->getCOFFSection(Section);
+ // Avoid loading zero-sized COFF sections.
+ // In PE files, VirtualSize gives the section size, and SizeOfRawData
+ // may be zero for sections with content. In Obj files, SizeOfRawData
+ // gives the section size, and VirtualSize is always zero. Hence
+ // the need to check for both cases below.
+ bool HasContent =
+ (CoffSection->VirtualSize > 0) || (CoffSection->SizeOfRawData > 0);
+ bool IsDiscardable =
+ CoffSection->Characteristics &
+ (COFF::IMAGE_SCN_MEM_DISCARDABLE | COFF::IMAGE_SCN_LNK_INFO);
+ return HasContent && !IsDiscardable;
+ }
+
+ assert(isa<MachOObjectFile>(Obj));
+ return true;
+}
+
+static bool isReadOnlyData(const SectionRef Section) {
+ const ObjectFile *Obj = Section.getObject();
+ if (isa<object::ELFObjectFileBase>(Obj))
+ return !(ELFSectionRef(Section).getFlags() &
+ (ELF::SHF_WRITE | ELF::SHF_EXECINSTR));
+ if (auto *COFFObj = dyn_cast<object::COFFObjectFile>(Obj))
+ return ((COFFObj->getCOFFSection(Section)->Characteristics &
+ (COFF::IMAGE_SCN_CNT_INITIALIZED_DATA
+ | COFF::IMAGE_SCN_MEM_READ
+ | COFF::IMAGE_SCN_MEM_WRITE))
+ ==
+ (COFF::IMAGE_SCN_CNT_INITIALIZED_DATA
+ | COFF::IMAGE_SCN_MEM_READ));
+
+ assert(isa<MachOObjectFile>(Obj));
+ return false;
+}
+
+static bool isZeroInit(const SectionRef Section) {
+ const ObjectFile *Obj = Section.getObject();
+ if (isa<object::ELFObjectFileBase>(Obj))
+ return ELFSectionRef(Section).getType() == ELF::SHT_NOBITS;
+ if (auto *COFFObj = dyn_cast<object::COFFObjectFile>(Obj))
+ return COFFObj->getCOFFSection(Section)->Characteristics &
+ COFF::IMAGE_SCN_CNT_UNINITIALIZED_DATA;
+
+ auto *MachO = cast<MachOObjectFile>(Obj);
+ unsigned SectionType = MachO->getSectionType(Section);
+ return SectionType == MachO::S_ZEROFILL ||
+ SectionType == MachO::S_GB_ZEROFILL;
+}
+
+// Compute an upper bound of the memory size that is required to load all
+// sections
+Error RuntimeDyldImpl::computeTotalAllocSize(const ObjectFile &Obj,
+ uint64_t &CodeSize,
+ uint32_t &CodeAlign,
+ uint64_t &RODataSize,
+ uint32_t &RODataAlign,
+ uint64_t &RWDataSize,
+ uint32_t &RWDataAlign) {
+ // Compute the size of all sections required for execution
+ std::vector<uint64_t> CodeSectionSizes;
+ std::vector<uint64_t> ROSectionSizes;
+ std::vector<uint64_t> RWSectionSizes;
+
+ // Collect sizes of all sections to be loaded;
+ // also determine the max alignment of all sections
+ for (section_iterator SI = Obj.section_begin(), SE = Obj.section_end();
+ SI != SE; ++SI) {
+ const SectionRef &Section = *SI;
+
+ bool IsRequired = isRequiredForExecution(Section) || ProcessAllSections;
+
+ // Consider only the sections that are required to be loaded for execution
+ if (IsRequired) {
+ uint64_t DataSize = Section.getSize();
+ uint64_t Alignment64 = Section.getAlignment();
+ unsigned Alignment = (unsigned)Alignment64 & 0xffffffffL;
+ bool IsCode = Section.isText();
+ bool IsReadOnly = isReadOnlyData(Section);
+
+ Expected<StringRef> NameOrErr = Section.getName();
+ if (!NameOrErr)
+ return NameOrErr.takeError();
+ StringRef Name = *NameOrErr;
+
+ uint64_t StubBufSize = computeSectionStubBufSize(Obj, Section);
+
+ uint64_t PaddingSize = 0;
+ if (Name == ".eh_frame")
+ PaddingSize += 4;
+ if (StubBufSize != 0)
+ PaddingSize += getStubAlignment() - 1;
+
+ uint64_t SectionSize = DataSize + PaddingSize + StubBufSize;
+
+ // The .eh_frame section (at least on Linux) needs an extra four bytes
+ // padded
+ // with zeroes added at the end. For MachO objects, this section has a
+ // slightly different name, so this won't have any effect for MachO
+ // objects.
+ if (Name == ".eh_frame")
+ SectionSize += 4;
+
+ if (!SectionSize)
+ SectionSize = 1;
+
+ if (IsCode) {
+ CodeAlign = std::max(CodeAlign, Alignment);
+ CodeSectionSizes.push_back(SectionSize);
+ } else if (IsReadOnly) {
+ RODataAlign = std::max(RODataAlign, Alignment);
+ ROSectionSizes.push_back(SectionSize);
+ } else {
+ RWDataAlign = std::max(RWDataAlign, Alignment);
+ RWSectionSizes.push_back(SectionSize);
+ }
+ }
+ }
+
+ // Compute Global Offset Table size. If it is not zero we
+ // also update alignment, which is equal to a size of a
+ // single GOT entry.
+ if (unsigned GotSize = computeGOTSize(Obj)) {
+ RWSectionSizes.push_back(GotSize);
+ RWDataAlign = std::max<uint32_t>(RWDataAlign, getGOTEntrySize());
+ }
+
+ // Compute the size of all common symbols
+ uint64_t CommonSize = 0;
+ uint32_t CommonAlign = 1;
+ for (symbol_iterator I = Obj.symbol_begin(), E = Obj.symbol_end(); I != E;
+ ++I) {
+ uint32_t Flags = I->getFlags();
+ if (Flags & SymbolRef::SF_Common) {
+ // Add the common symbols to a list. We'll allocate them all below.
+ uint64_t Size = I->getCommonSize();
+ uint32_t Align = I->getAlignment();
+ // If this is the first common symbol, use its alignment as the alignment
+ // for the common symbols section.
+ if (CommonSize == 0)
+ CommonAlign = Align;
+ CommonSize = alignTo(CommonSize, Align) + Size;
+ }
+ }
+ if (CommonSize != 0) {
+ RWSectionSizes.push_back(CommonSize);
+ RWDataAlign = std::max(RWDataAlign, CommonAlign);
+ }
+
+ // Compute the required allocation space for each different type of sections
+ // (code, read-only data, read-write data) assuming that all sections are
+ // allocated with the max alignment. Note that we cannot compute with the
+ // individual alignments of the sections, because then the required size
+ // depends on the order, in which the sections are allocated.
+ CodeSize = computeAllocationSizeForSections(CodeSectionSizes, CodeAlign);
+ RODataSize = computeAllocationSizeForSections(ROSectionSizes, RODataAlign);
+ RWDataSize = computeAllocationSizeForSections(RWSectionSizes, RWDataAlign);
+
+ return Error::success();
+}
+
+// compute GOT size
+unsigned RuntimeDyldImpl::computeGOTSize(const ObjectFile &Obj) {
+ size_t GotEntrySize = getGOTEntrySize();
+ if (!GotEntrySize)
+ return 0;
+
+ size_t GotSize = 0;
+ for (section_iterator SI = Obj.section_begin(), SE = Obj.section_end();
+ SI != SE; ++SI) {
+
+ for (const RelocationRef &Reloc : SI->relocations())
+ if (relocationNeedsGot(Reloc))
+ GotSize += GotEntrySize;
+ }
+
+ return GotSize;
+}
+
+// compute stub buffer size for the given section
+unsigned RuntimeDyldImpl::computeSectionStubBufSize(const ObjectFile &Obj,
+ const SectionRef &Section) {
+ unsigned StubSize = getMaxStubSize();
+ if (StubSize == 0) {
+ return 0;
+ }
+ // FIXME: this is an inefficient way to handle this. We should computed the
+ // necessary section allocation size in loadObject by walking all the sections
+ // once.
+ unsigned StubBufSize = 0;
+ for (section_iterator SI = Obj.section_begin(), SE = Obj.section_end();
+ SI != SE; ++SI) {
+
+ Expected<section_iterator> RelSecOrErr = SI->getRelocatedSection();
+ if (!RelSecOrErr)
+ report_fatal_error(toString(RelSecOrErr.takeError()));
+
+ section_iterator RelSecI = *RelSecOrErr;
+ if (!(RelSecI == Section))
+ continue;
+
+ for (const RelocationRef &Reloc : SI->relocations())
+ if (relocationNeedsStub(Reloc))
+ StubBufSize += StubSize;
+ }
+
+ // Get section data size and alignment
+ uint64_t DataSize = Section.getSize();
+ uint64_t Alignment64 = Section.getAlignment();
+
+ // Add stubbuf size alignment
+ unsigned Alignment = (unsigned)Alignment64 & 0xffffffffL;
+ unsigned StubAlignment = getStubAlignment();
+ unsigned EndAlignment = (DataSize | Alignment) & -(DataSize | Alignment);
+ if (StubAlignment > EndAlignment)
+ StubBufSize += StubAlignment - EndAlignment;
+ return StubBufSize;
+}
+
+uint64_t RuntimeDyldImpl::readBytesUnaligned(uint8_t *Src,
+ unsigned Size) const {
+ uint64_t Result = 0;
+ if (IsTargetLittleEndian) {
+ Src += Size - 1;
+ while (Size--)
+ Result = (Result << 8) | *Src--;
+ } else
+ while (Size--)
+ Result = (Result << 8) | *Src++;
+
+ return Result;
+}
+
+void RuntimeDyldImpl::writeBytesUnaligned(uint64_t Value, uint8_t *Dst,
+ unsigned Size) const {
+ if (IsTargetLittleEndian) {
+ while (Size--) {
+ *Dst++ = Value & 0xFF;
+ Value >>= 8;
+ }
+ } else {
+ Dst += Size - 1;
+ while (Size--) {
+ *Dst-- = Value & 0xFF;
+ Value >>= 8;
+ }
+ }
+}
+
+Expected<JITSymbolFlags>
+RuntimeDyldImpl::getJITSymbolFlags(const SymbolRef &SR) {
+ return JITSymbolFlags::fromObjectSymbol(SR);
+}
+
+Error RuntimeDyldImpl::emitCommonSymbols(const ObjectFile &Obj,
+ CommonSymbolList &SymbolsToAllocate,
+ uint64_t CommonSize,
+ uint32_t CommonAlign) {
+ if (SymbolsToAllocate.empty())
+ return Error::success();
+
+ // Allocate memory for the section
+ unsigned SectionID = Sections.size();
+ uint8_t *Addr = MemMgr.allocateDataSection(CommonSize, CommonAlign, SectionID,
+ "<common symbols>", false);
+ if (!Addr)
+ report_fatal_error("Unable to allocate memory for common symbols!");
+ uint64_t Offset = 0;
+ Sections.push_back(
+ SectionEntry("<common symbols>", Addr, CommonSize, CommonSize, 0));
+ memset(Addr, 0, CommonSize);
+
+ LLVM_DEBUG(dbgs() << "emitCommonSection SectionID: " << SectionID
+ << " new addr: " << format("%p", Addr)
+ << " DataSize: " << CommonSize << "\n");
+
+ // Assign the address of each symbol
+ for (auto &Sym : SymbolsToAllocate) {
+ uint32_t Alignment = Sym.getAlignment();
+ uint64_t Size = Sym.getCommonSize();
+ StringRef Name;
+ if (auto NameOrErr = Sym.getName())
+ Name = *NameOrErr;
+ else
+ return NameOrErr.takeError();
+ if (Alignment) {
+ // This symbol has an alignment requirement.
+ uint64_t AlignOffset =
+ offsetToAlignment((uint64_t)Addr, Align(Alignment));
+ Addr += AlignOffset;
+ Offset += AlignOffset;
+ }
+ auto JITSymFlags = getJITSymbolFlags(Sym);
+
+ if (!JITSymFlags)
+ return JITSymFlags.takeError();
+
+ LLVM_DEBUG(dbgs() << "Allocating common symbol " << Name << " address "
+ << format("%p", Addr) << "\n");
+ GlobalSymbolTable[Name] =
+ SymbolTableEntry(SectionID, Offset, std::move(*JITSymFlags));
+ Offset += Size;
+ Addr += Size;
+ }
+
+ return Error::success();
+}
+
+Expected<unsigned>
+RuntimeDyldImpl::emitSection(const ObjectFile &Obj,
+ const SectionRef &Section,
+ bool IsCode) {
+ StringRef data;
+ uint64_t Alignment64 = Section.getAlignment();
+
+ unsigned Alignment = (unsigned)Alignment64 & 0xffffffffL;
+ unsigned PaddingSize = 0;
+ unsigned StubBufSize = 0;
+ bool IsRequired = isRequiredForExecution(Section);
+ bool IsVirtual = Section.isVirtual();
+ bool IsZeroInit = isZeroInit(Section);
+ bool IsReadOnly = isReadOnlyData(Section);
+ uint64_t DataSize = Section.getSize();
+
+ // An alignment of 0 (at least with ELF) is identical to an alignment of 1,
+ // while being more "polite". Other formats do not support 0-aligned sections
+ // anyway, so we should guarantee that the alignment is always at least 1.
+ Alignment = std::max(1u, Alignment);
+
+ Expected<StringRef> NameOrErr = Section.getName();
+ if (!NameOrErr)
+ return NameOrErr.takeError();
+ StringRef Name = *NameOrErr;
+
+ StubBufSize = computeSectionStubBufSize(Obj, Section);
+
+ // The .eh_frame section (at least on Linux) needs an extra four bytes padded
+ // with zeroes added at the end. For MachO objects, this section has a
+ // slightly different name, so this won't have any effect for MachO objects.
+ if (Name == ".eh_frame")
+ PaddingSize = 4;
+
+ uintptr_t Allocate;
+ unsigned SectionID = Sections.size();
+ uint8_t *Addr;
+ const char *pData = nullptr;
+
+ // If this section contains any bits (i.e. isn't a virtual or bss section),
+ // grab a reference to them.
+ if (!IsVirtual && !IsZeroInit) {
+ // In either case, set the location of the unrelocated section in memory,
+ // since we still process relocations for it even if we're not applying them.
+ if (Expected<StringRef> E = Section.getContents())
+ data = *E;
+ else
+ return E.takeError();
+ pData = data.data();
+ }
+
+ // If there are any stubs then the section alignment needs to be at least as
+ // high as stub alignment or padding calculations may by incorrect when the
+ // section is remapped.
+ if (StubBufSize != 0) {
+ Alignment = std::max(Alignment, getStubAlignment());
+ PaddingSize += getStubAlignment() - 1;
+ }
+
+ // Some sections, such as debug info, don't need to be loaded for execution.
+ // Process those only if explicitly requested.
+ if (IsRequired || ProcessAllSections) {
+ Allocate = DataSize + PaddingSize + StubBufSize;
+ if (!Allocate)
+ Allocate = 1;
+ Addr = IsCode ? MemMgr.allocateCodeSection(Allocate, Alignment, SectionID,
+ Name)
+ : MemMgr.allocateDataSection(Allocate, Alignment, SectionID,
+ Name, IsReadOnly);
+ if (!Addr)
+ report_fatal_error("Unable to allocate section memory!");
+
+ // Zero-initialize or copy the data from the image
+ if (IsZeroInit || IsVirtual)
+ memset(Addr, 0, DataSize);
+ else
+ memcpy(Addr, pData, DataSize);
+
+ // Fill in any extra bytes we allocated for padding
+ if (PaddingSize != 0) {
+ memset(Addr + DataSize, 0, PaddingSize);
+ // Update the DataSize variable to include padding.
+ DataSize += PaddingSize;
+
+ // Align DataSize to stub alignment if we have any stubs (PaddingSize will
+ // have been increased above to account for this).
+ if (StubBufSize > 0)
+ DataSize &= -(uint64_t)getStubAlignment();
+ }
+
+ LLVM_DEBUG(dbgs() << "emitSection SectionID: " << SectionID << " Name: "
+ << Name << " obj addr: " << format("%p", pData)
+ << " new addr: " << format("%p", Addr) << " DataSize: "
+ << DataSize << " StubBufSize: " << StubBufSize
+ << " Allocate: " << Allocate << "\n");
+ } else {
+ // Even if we didn't load the section, we need to record an entry for it
+ // to handle later processing (and by 'handle' I mean don't do anything
+ // with these sections).
+ Allocate = 0;
+ Addr = nullptr;
+ LLVM_DEBUG(
+ dbgs() << "emitSection SectionID: " << SectionID << " Name: " << Name
+ << " obj addr: " << format("%p", data.data()) << " new addr: 0"
+ << " DataSize: " << DataSize << " StubBufSize: " << StubBufSize
+ << " Allocate: " << Allocate << "\n");
+ }
+
+ Sections.push_back(
+ SectionEntry(Name, Addr, DataSize, Allocate, (uintptr_t)pData));
+
+ // Debug info sections are linked as if their load address was zero
+ if (!IsRequired)
+ Sections.back().setLoadAddress(0);
+
+ return SectionID;
+}
+
+Expected<unsigned>
+RuntimeDyldImpl::findOrEmitSection(const ObjectFile &Obj,
+ const SectionRef &Section,
+ bool IsCode,
+ ObjSectionToIDMap &LocalSections) {
+
+ unsigned SectionID = 0;
+ ObjSectionToIDMap::iterator i = LocalSections.find(Section);
+ if (i != LocalSections.end())
+ SectionID = i->second;
+ else {
+ if (auto SectionIDOrErr = emitSection(Obj, Section, IsCode))
+ SectionID = *SectionIDOrErr;
+ else
+ return SectionIDOrErr.takeError();
+ LocalSections[Section] = SectionID;
+ }
+ return SectionID;
+}
+
+void RuntimeDyldImpl::addRelocationForSection(const RelocationEntry &RE,
+ unsigned SectionID) {
+ Relocations[SectionID].push_back(RE);
+}
+
+void RuntimeDyldImpl::addRelocationForSymbol(const RelocationEntry &RE,
+ StringRef SymbolName) {
+ // Relocation by symbol. If the symbol is found in the global symbol table,
+ // create an appropriate section relocation. Otherwise, add it to
+ // ExternalSymbolRelocations.
+ RTDyldSymbolTable::const_iterator Loc = GlobalSymbolTable.find(SymbolName);
+ if (Loc == GlobalSymbolTable.end()) {
+ ExternalSymbolRelocations[SymbolName].push_back(RE);
+ } else {
+ // Copy the RE since we want to modify its addend.
+ RelocationEntry RECopy = RE;
+ const auto &SymInfo = Loc->second;
+ RECopy.Addend += SymInfo.getOffset();
+ Relocations[SymInfo.getSectionID()].push_back(RECopy);
+ }
+}
+
+uint8_t *RuntimeDyldImpl::createStubFunction(uint8_t *Addr,
+ unsigned AbiVariant) {
+ if (Arch == Triple::aarch64 || Arch == Triple::aarch64_be ||
+ Arch == Triple::aarch64_32) {
+ // This stub has to be able to access the full address space,
+ // since symbol lookup won't necessarily find a handy, in-range,
+ // PLT stub for functions which could be anywhere.
+ // Stub can use ip0 (== x16) to calculate address
+ writeBytesUnaligned(0xd2e00010, Addr, 4); // movz ip0, #:abs_g3:<addr>
+ writeBytesUnaligned(0xf2c00010, Addr+4, 4); // movk ip0, #:abs_g2_nc:<addr>
+ writeBytesUnaligned(0xf2a00010, Addr+8, 4); // movk ip0, #:abs_g1_nc:<addr>
+ writeBytesUnaligned(0xf2800010, Addr+12, 4); // movk ip0, #:abs_g0_nc:<addr>
+ writeBytesUnaligned(0xd61f0200, Addr+16, 4); // br ip0
+
+ return Addr;
+ } else if (Arch == Triple::arm || Arch == Triple::armeb) {
+ // TODO: There is only ARM far stub now. We should add the Thumb stub,
+ // and stubs for branches Thumb - ARM and ARM - Thumb.
+ writeBytesUnaligned(0xe51ff004, Addr, 4); // ldr pc, [pc, #-4]
+ return Addr + 4;
+ } else if (IsMipsO32ABI || IsMipsN32ABI) {
+ // 0: 3c190000 lui t9,%hi(addr).
+ // 4: 27390000 addiu t9,t9,%lo(addr).
+ // 8: 03200008 jr t9.
+ // c: 00000000 nop.
+ const unsigned LuiT9Instr = 0x3c190000, AdduiT9Instr = 0x27390000;
+ const unsigned NopInstr = 0x0;
+ unsigned JrT9Instr = 0x03200008;
+ if ((AbiVariant & ELF::EF_MIPS_ARCH) == ELF::EF_MIPS_ARCH_32R6 ||
+ (AbiVariant & ELF::EF_MIPS_ARCH) == ELF::EF_MIPS_ARCH_64R6)
+ JrT9Instr = 0x03200009;
+
+ writeBytesUnaligned(LuiT9Instr, Addr, 4);
+ writeBytesUnaligned(AdduiT9Instr, Addr + 4, 4);
+ writeBytesUnaligned(JrT9Instr, Addr + 8, 4);
+ writeBytesUnaligned(NopInstr, Addr + 12, 4);
+ return Addr;
+ } else if (IsMipsN64ABI) {
+ // 0: 3c190000 lui t9,%highest(addr).
+ // 4: 67390000 daddiu t9,t9,%higher(addr).
+ // 8: 0019CC38 dsll t9,t9,16.
+ // c: 67390000 daddiu t9,t9,%hi(addr).
+ // 10: 0019CC38 dsll t9,t9,16.
+ // 14: 67390000 daddiu t9,t9,%lo(addr).
+ // 18: 03200008 jr t9.
+ // 1c: 00000000 nop.
+ const unsigned LuiT9Instr = 0x3c190000, DaddiuT9Instr = 0x67390000,
+ DsllT9Instr = 0x19CC38;
+ const unsigned NopInstr = 0x0;
+ unsigned JrT9Instr = 0x03200008;
+ if ((AbiVariant & ELF::EF_MIPS_ARCH) == ELF::EF_MIPS_ARCH_64R6)
+ JrT9Instr = 0x03200009;
+
+ writeBytesUnaligned(LuiT9Instr, Addr, 4);
+ writeBytesUnaligned(DaddiuT9Instr, Addr + 4, 4);
+ writeBytesUnaligned(DsllT9Instr, Addr + 8, 4);
+ writeBytesUnaligned(DaddiuT9Instr, Addr + 12, 4);
+ writeBytesUnaligned(DsllT9Instr, Addr + 16, 4);
+ writeBytesUnaligned(DaddiuT9Instr, Addr + 20, 4);
+ writeBytesUnaligned(JrT9Instr, Addr + 24, 4);
+ writeBytesUnaligned(NopInstr, Addr + 28, 4);
+ return Addr;
+ } else if (Arch == Triple::ppc64 || Arch == Triple::ppc64le) {
+ // Depending on which version of the ELF ABI is in use, we need to
+ // generate one of two variants of the stub. They both start with
+ // the same sequence to load the target address into r12.
+ writeInt32BE(Addr, 0x3D800000); // lis r12, highest(addr)
+ writeInt32BE(Addr+4, 0x618C0000); // ori r12, higher(addr)
+ writeInt32BE(Addr+8, 0x798C07C6); // sldi r12, r12, 32
+ writeInt32BE(Addr+12, 0x658C0000); // oris r12, r12, h(addr)
+ writeInt32BE(Addr+16, 0x618C0000); // ori r12, r12, l(addr)
+ if (AbiVariant == 2) {
+ // PowerPC64 stub ELFv2 ABI: The address points to the function itself.
+ // The address is already in r12 as required by the ABI. Branch to it.
+ writeInt32BE(Addr+20, 0xF8410018); // std r2, 24(r1)
+ writeInt32BE(Addr+24, 0x7D8903A6); // mtctr r12
+ writeInt32BE(Addr+28, 0x4E800420); // bctr
+ } else {
+ // PowerPC64 stub ELFv1 ABI: The address points to a function descriptor.
+ // Load the function address on r11 and sets it to control register. Also
+ // loads the function TOC in r2 and environment pointer to r11.
+ writeInt32BE(Addr+20, 0xF8410028); // std r2, 40(r1)
+ writeInt32BE(Addr+24, 0xE96C0000); // ld r11, 0(r12)
+ writeInt32BE(Addr+28, 0xE84C0008); // ld r2, 0(r12)
+ writeInt32BE(Addr+32, 0x7D6903A6); // mtctr r11
+ writeInt32BE(Addr+36, 0xE96C0010); // ld r11, 16(r2)
+ writeInt32BE(Addr+40, 0x4E800420); // bctr
+ }
+ return Addr;
+ } else if (Arch == Triple::systemz) {
+ writeInt16BE(Addr, 0xC418); // lgrl %r1,.+8
+ writeInt16BE(Addr+2, 0x0000);
+ writeInt16BE(Addr+4, 0x0004);
+ writeInt16BE(Addr+6, 0x07F1); // brc 15,%r1
+ // 8-byte address stored at Addr + 8
+ return Addr;
+ } else if (Arch == Triple::x86_64) {
+ *Addr = 0xFF; // jmp
+ *(Addr+1) = 0x25; // rip
+ // 32-bit PC-relative address of the GOT entry will be stored at Addr+2
+ } else if (Arch == Triple::x86) {
+ *Addr = 0xE9; // 32-bit pc-relative jump.
+ }
+ return Addr;
+}
+
+// Assign an address to a symbol name and resolve all the relocations
+// associated with it.
+void RuntimeDyldImpl::reassignSectionAddress(unsigned SectionID,
+ uint64_t Addr) {
+ // The address to use for relocation resolution is not
+ // the address of the local section buffer. We must be doing
+ // a remote execution environment of some sort. Relocations can't
+ // be applied until all the sections have been moved. The client must
+ // trigger this with a call to MCJIT::finalize() or
+ // RuntimeDyld::resolveRelocations().
+ //
+ // Addr is a uint64_t because we can't assume the pointer width
+ // of the target is the same as that of the host. Just use a generic
+ // "big enough" type.
+ LLVM_DEBUG(
+ dbgs() << "Reassigning address for section " << SectionID << " ("
+ << Sections[SectionID].getName() << "): "
+ << format("0x%016" PRIx64, Sections[SectionID].getLoadAddress())
+ << " -> " << format("0x%016" PRIx64, Addr) << "\n");
+ Sections[SectionID].setLoadAddress(Addr);
+}
+
+void RuntimeDyldImpl::resolveRelocationList(const RelocationList &Relocs,
+ uint64_t Value) {
+ for (unsigned i = 0, e = Relocs.size(); i != e; ++i) {
+ const RelocationEntry &RE = Relocs[i];
+ // Ignore relocations for sections that were not loaded
+ if (Sections[RE.SectionID].getAddress() == nullptr)
+ continue;
+ resolveRelocation(RE, Value);
+ }
+}
+
+void RuntimeDyldImpl::applyExternalSymbolRelocations(
+ const StringMap<JITEvaluatedSymbol> ExternalSymbolMap) {
+ while (!ExternalSymbolRelocations.empty()) {
+
+ StringMap<RelocationList>::iterator i = ExternalSymbolRelocations.begin();
+
+ StringRef Name = i->first();
+ if (Name.size() == 0) {
+ // This is an absolute symbol, use an address of zero.
+ LLVM_DEBUG(dbgs() << "Resolving absolute relocations."
+ << "\n");
+ RelocationList &Relocs = i->second;
+ resolveRelocationList(Relocs, 0);
+ } else {
+ uint64_t Addr = 0;
+ JITSymbolFlags Flags;
+ RTDyldSymbolTable::const_iterator Loc = GlobalSymbolTable.find(Name);
+ if (Loc == GlobalSymbolTable.end()) {
+ auto RRI = ExternalSymbolMap.find(Name);
+ assert(RRI != ExternalSymbolMap.end() && "No result for symbol");
+ Addr = RRI->second.getAddress();
+ Flags = RRI->second.getFlags();
+ // The call to getSymbolAddress may have caused additional modules to
+ // be loaded, which may have added new entries to the
+ // ExternalSymbolRelocations map. Consquently, we need to update our
+ // iterator. This is also why retrieval of the relocation list
+ // associated with this symbol is deferred until below this point.
+ // New entries may have been added to the relocation list.
+ i = ExternalSymbolRelocations.find(Name);
+ } else {
+ // We found the symbol in our global table. It was probably in a
+ // Module that we loaded previously.
+ const auto &SymInfo = Loc->second;
+ Addr = getSectionLoadAddress(SymInfo.getSectionID()) +
+ SymInfo.getOffset();
+ Flags = SymInfo.getFlags();
+ }
+
+ // FIXME: Implement error handling that doesn't kill the host program!
+ if (!Addr)
+ report_fatal_error("Program used external function '" + Name +
+ "' which could not be resolved!");
+
+ // If Resolver returned UINT64_MAX, the client wants to handle this symbol
+ // manually and we shouldn't resolve its relocations.
+ if (Addr != UINT64_MAX) {
+
+ // Tweak the address based on the symbol flags if necessary.
+ // For example, this is used by RuntimeDyldMachOARM to toggle the low bit
+ // if the target symbol is Thumb.
+ Addr = modifyAddressBasedOnFlags(Addr, Flags);
+
+ LLVM_DEBUG(dbgs() << "Resolving relocations Name: " << Name << "\t"
+ << format("0x%lx", Addr) << "\n");
+ // This list may have been updated when we called getSymbolAddress, so
+ // don't change this code to get the list earlier.
+ RelocationList &Relocs = i->second;
+ resolveRelocationList(Relocs, Addr);
+ }
+ }
+
+ ExternalSymbolRelocations.erase(i);
+ }
+}
+
+Error RuntimeDyldImpl::resolveExternalSymbols() {
+ StringMap<JITEvaluatedSymbol> ExternalSymbolMap;
+
+ // Resolution can trigger emission of more symbols, so iterate until
+ // we've resolved *everything*.
+ {
+ JITSymbolResolver::LookupSet ResolvedSymbols;
+
+ while (true) {
+ JITSymbolResolver::LookupSet NewSymbols;
+
+ for (auto &RelocKV : ExternalSymbolRelocations) {
+ StringRef Name = RelocKV.first();
+ if (!Name.empty() && !GlobalSymbolTable.count(Name) &&
+ !ResolvedSymbols.count(Name))
+ NewSymbols.insert(Name);
+ }
+
+ if (NewSymbols.empty())
+ break;
+
+#ifdef _MSC_VER
+ using ExpectedLookupResult =
+ MSVCPExpected<JITSymbolResolver::LookupResult>;
+#else
+ using ExpectedLookupResult = Expected<JITSymbolResolver::LookupResult>;
+#endif
+
+ auto NewSymbolsP = std::make_shared<std::promise<ExpectedLookupResult>>();
+ auto NewSymbolsF = NewSymbolsP->get_future();
+ Resolver.lookup(NewSymbols,
+ [=](Expected<JITSymbolResolver::LookupResult> Result) {
+ NewSymbolsP->set_value(std::move(Result));
+ });
+
+ auto NewResolverResults = NewSymbolsF.get();
+
+ if (!NewResolverResults)
+ return NewResolverResults.takeError();
+
+ assert(NewResolverResults->size() == NewSymbols.size() &&
+ "Should have errored on unresolved symbols");
+
+ for (auto &RRKV : *NewResolverResults) {
+ assert(!ResolvedSymbols.count(RRKV.first) && "Redundant resolution?");
+ ExternalSymbolMap.insert(RRKV);
+ ResolvedSymbols.insert(RRKV.first);
+ }
+ }
+ }
+
+ applyExternalSymbolRelocations(ExternalSymbolMap);
+
+ return Error::success();
+}
+
+void RuntimeDyldImpl::finalizeAsync(
+ std::unique_ptr<RuntimeDyldImpl> This,
+ unique_function<void(Error)> OnEmitted,
+ std::unique_ptr<MemoryBuffer> UnderlyingBuffer) {
+
+ auto SharedThis = std::shared_ptr<RuntimeDyldImpl>(std::move(This));
+ auto PostResolveContinuation =
+ [SharedThis, OnEmitted = std::move(OnEmitted),
+ UnderlyingBuffer = std::move(UnderlyingBuffer)](
+ Expected<JITSymbolResolver::LookupResult> Result) mutable {
+ if (!Result) {
+ OnEmitted(Result.takeError());
+ return;
+ }
+
+ /// Copy the result into a StringMap, where the keys are held by value.
+ StringMap<JITEvaluatedSymbol> Resolved;
+ for (auto &KV : *Result)
+ Resolved[KV.first] = KV.second;
+
+ SharedThis->applyExternalSymbolRelocations(Resolved);
+ SharedThis->resolveLocalRelocations();
+ SharedThis->registerEHFrames();
+ std::string ErrMsg;
+ if (SharedThis->MemMgr.finalizeMemory(&ErrMsg))
+ OnEmitted(make_error<StringError>(std::move(ErrMsg),
+ inconvertibleErrorCode()));
+ else
+ OnEmitted(Error::success());
+ };
+
+ JITSymbolResolver::LookupSet Symbols;
+
+ for (auto &RelocKV : SharedThis->ExternalSymbolRelocations) {
+ StringRef Name = RelocKV.first();
+ assert(!Name.empty() && "Symbol has no name?");
+ assert(!SharedThis->GlobalSymbolTable.count(Name) &&
+ "Name already processed. RuntimeDyld instances can not be re-used "
+ "when finalizing with finalizeAsync.");
+ Symbols.insert(Name);
+ }
+
+ if (!Symbols.empty()) {
+ SharedThis->Resolver.lookup(Symbols, std::move(PostResolveContinuation));
+ } else
+ PostResolveContinuation(std::map<StringRef, JITEvaluatedSymbol>());
+}
+
+//===----------------------------------------------------------------------===//
+// RuntimeDyld class implementation
+
+uint64_t RuntimeDyld::LoadedObjectInfo::getSectionLoadAddress(
+ const object::SectionRef &Sec) const {
+
+ auto I = ObjSecToIDMap.find(Sec);
+ if (I != ObjSecToIDMap.end())
+ return RTDyld.Sections[I->second].getLoadAddress();
+
+ return 0;
+}
+
+void RuntimeDyld::MemoryManager::anchor() {}
+void JITSymbolResolver::anchor() {}
+void LegacyJITSymbolResolver::anchor() {}
+
+RuntimeDyld::RuntimeDyld(RuntimeDyld::MemoryManager &MemMgr,
+ JITSymbolResolver &Resolver)
+ : MemMgr(MemMgr), Resolver(Resolver) {
+ // FIXME: There's a potential issue lurking here if a single instance of
+ // RuntimeDyld is used to load multiple objects. The current implementation
+ // associates a single memory manager with a RuntimeDyld instance. Even
+ // though the public class spawns a new 'impl' instance for each load,
+ // they share a single memory manager. This can become a problem when page
+ // permissions are applied.
+ Dyld = nullptr;
+ ProcessAllSections = false;
+}
+
+RuntimeDyld::~RuntimeDyld() {}
+
+static std::unique_ptr<RuntimeDyldCOFF>
+createRuntimeDyldCOFF(
+ Triple::ArchType Arch, RuntimeDyld::MemoryManager &MM,
+ JITSymbolResolver &Resolver, bool ProcessAllSections,
+ RuntimeDyld::NotifyStubEmittedFunction NotifyStubEmitted) {
+ std::unique_ptr<RuntimeDyldCOFF> Dyld =
+ RuntimeDyldCOFF::create(Arch, MM, Resolver);
+ Dyld->setProcessAllSections(ProcessAllSections);
+ Dyld->setNotifyStubEmitted(std::move(NotifyStubEmitted));
+ return Dyld;
+}
+
+static std::unique_ptr<RuntimeDyldELF>
+createRuntimeDyldELF(Triple::ArchType Arch, RuntimeDyld::MemoryManager &MM,
+ JITSymbolResolver &Resolver, bool ProcessAllSections,
+ RuntimeDyld::NotifyStubEmittedFunction NotifyStubEmitted) {
+ std::unique_ptr<RuntimeDyldELF> Dyld =
+ RuntimeDyldELF::create(Arch, MM, Resolver);
+ Dyld->setProcessAllSections(ProcessAllSections);
+ Dyld->setNotifyStubEmitted(std::move(NotifyStubEmitted));
+ return Dyld;
+}
+
+static std::unique_ptr<RuntimeDyldMachO>
+createRuntimeDyldMachO(
+ Triple::ArchType Arch, RuntimeDyld::MemoryManager &MM,
+ JITSymbolResolver &Resolver,
+ bool ProcessAllSections,
+ RuntimeDyld::NotifyStubEmittedFunction NotifyStubEmitted) {
+ std::unique_ptr<RuntimeDyldMachO> Dyld =
+ RuntimeDyldMachO::create(Arch, MM, Resolver);
+ Dyld->setProcessAllSections(ProcessAllSections);
+ Dyld->setNotifyStubEmitted(std::move(NotifyStubEmitted));
+ return Dyld;
+}
+
+std::unique_ptr<RuntimeDyld::LoadedObjectInfo>
+RuntimeDyld::loadObject(const ObjectFile &Obj) {
+ if (!Dyld) {
+ if (Obj.isELF())
+ Dyld =
+ createRuntimeDyldELF(static_cast<Triple::ArchType>(Obj.getArch()),
+ MemMgr, Resolver, ProcessAllSections,
+ std::move(NotifyStubEmitted));
+ else if (Obj.isMachO())
+ Dyld = createRuntimeDyldMachO(
+ static_cast<Triple::ArchType>(Obj.getArch()), MemMgr, Resolver,
+ ProcessAllSections, std::move(NotifyStubEmitted));
+ else if (Obj.isCOFF())
+ Dyld = createRuntimeDyldCOFF(
+ static_cast<Triple::ArchType>(Obj.getArch()), MemMgr, Resolver,
+ ProcessAllSections, std::move(NotifyStubEmitted));
+ else
+ report_fatal_error("Incompatible object format!");
+ }
+
+ if (!Dyld->isCompatibleFile(Obj))
+ report_fatal_error("Incompatible object format!");
+
+ auto LoadedObjInfo = Dyld->loadObject(Obj);
+ MemMgr.notifyObjectLoaded(*this, Obj);
+ return LoadedObjInfo;
+}
+
+void *RuntimeDyld::getSymbolLocalAddress(StringRef Name) const {
+ if (!Dyld)
+ return nullptr;
+ return Dyld->getSymbolLocalAddress(Name);
+}
+
+unsigned RuntimeDyld::getSymbolSectionID(StringRef Name) const {
+ assert(Dyld && "No RuntimeDyld instance attached");
+ return Dyld->getSymbolSectionID(Name);
+}
+
+JITEvaluatedSymbol RuntimeDyld::getSymbol(StringRef Name) const {
+ if (!Dyld)
+ return nullptr;
+ return Dyld->getSymbol(Name);
+}
+
+std::map<StringRef, JITEvaluatedSymbol> RuntimeDyld::getSymbolTable() const {
+ if (!Dyld)
+ return std::map<StringRef, JITEvaluatedSymbol>();
+ return Dyld->getSymbolTable();
+}
+
+void RuntimeDyld::resolveRelocations() { Dyld->resolveRelocations(); }
+
+void RuntimeDyld::reassignSectionAddress(unsigned SectionID, uint64_t Addr) {
+ Dyld->reassignSectionAddress(SectionID, Addr);
+}
+
+void RuntimeDyld::mapSectionAddress(const void *LocalAddress,
+ uint64_t TargetAddress) {
+ Dyld->mapSectionAddress(LocalAddress, TargetAddress);
+}
+
+bool RuntimeDyld::hasError() { return Dyld->hasError(); }
+
+StringRef RuntimeDyld::getErrorString() { return Dyld->getErrorString(); }
+
+void RuntimeDyld::finalizeWithMemoryManagerLocking() {
+ bool MemoryFinalizationLocked = MemMgr.FinalizationLocked;
+ MemMgr.FinalizationLocked = true;
+ resolveRelocations();
+ registerEHFrames();
+ if (!MemoryFinalizationLocked) {
+ MemMgr.finalizeMemory();
+ MemMgr.FinalizationLocked = false;
+ }
+}
+
+StringRef RuntimeDyld::getSectionContent(unsigned SectionID) const {
+ assert(Dyld && "No Dyld instance attached");
+ return Dyld->getSectionContent(SectionID);
+}
+
+uint64_t RuntimeDyld::getSectionLoadAddress(unsigned SectionID) const {
+ assert(Dyld && "No Dyld instance attached");
+ return Dyld->getSectionLoadAddress(SectionID);
+}
+
+void RuntimeDyld::registerEHFrames() {
+ if (Dyld)
+ Dyld->registerEHFrames();
+}
+
+void RuntimeDyld::deregisterEHFrames() {
+ if (Dyld)
+ Dyld->deregisterEHFrames();
+}
+// FIXME: Kill this with fire once we have a new JIT linker: this is only here
+// so that we can re-use RuntimeDyld's implementation without twisting the
+// interface any further for ORC's purposes.
+void jitLinkForORC(object::ObjectFile &Obj,
+ std::unique_ptr<MemoryBuffer> UnderlyingBuffer,
+ RuntimeDyld::MemoryManager &MemMgr,
+ JITSymbolResolver &Resolver, bool ProcessAllSections,
+ unique_function<Error(
+ std::unique_ptr<RuntimeDyld::LoadedObjectInfo> LoadedObj,
+ std::map<StringRef, JITEvaluatedSymbol>)>
+ OnLoaded,
+ unique_function<void(Error)> OnEmitted) {
+
+ RuntimeDyld RTDyld(MemMgr, Resolver);
+ RTDyld.setProcessAllSections(ProcessAllSections);
+
+ auto Info = RTDyld.loadObject(Obj);
+
+ if (RTDyld.hasError()) {
+ OnEmitted(make_error<StringError>(RTDyld.getErrorString(),
+ inconvertibleErrorCode()));
+ return;
+ }
+
+ if (auto Err = OnLoaded(std::move(Info), RTDyld.getSymbolTable()))
+ OnEmitted(std::move(Err));
+
+ RuntimeDyldImpl::finalizeAsync(std::move(RTDyld.Dyld), std::move(OnEmitted),
+ std::move(UnderlyingBuffer));
+}
+
+} // end namespace llvm
diff --git a/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldCOFF.cpp b/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldCOFF.cpp
new file mode 100644
index 0000000000000..27a7690db34fc
--- /dev/null
+++ b/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldCOFF.cpp
@@ -0,0 +1,82 @@
+//===-- RuntimeDyldCOFF.cpp - Run-time dynamic linker for MC-JIT -*- C++ -*-==//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Implementation of COFF support for the MC-JIT runtime dynamic linker.
+//
+//===----------------------------------------------------------------------===//
+
+#include "RuntimeDyldCOFF.h"
+#include "Targets/RuntimeDyldCOFFI386.h"
+#include "Targets/RuntimeDyldCOFFThumb.h"
+#include "Targets/RuntimeDyldCOFFX86_64.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/Triple.h"
+#include "llvm/Object/ObjectFile.h"
+
+using namespace llvm;
+using namespace llvm::object;
+
+#define DEBUG_TYPE "dyld"
+
+namespace {
+
+class LoadedCOFFObjectInfo final
+ : public LoadedObjectInfoHelper<LoadedCOFFObjectInfo,
+ RuntimeDyld::LoadedObjectInfo> {
+public:
+ LoadedCOFFObjectInfo(
+ RuntimeDyldImpl &RTDyld,
+ RuntimeDyld::LoadedObjectInfo::ObjSectionToIDMap ObjSecToIDMap)
+ : LoadedObjectInfoHelper(RTDyld, std::move(ObjSecToIDMap)) {}
+
+ OwningBinary<ObjectFile>
+ getObjectForDebug(const ObjectFile &Obj) const override {
+ return OwningBinary<ObjectFile>();
+ }
+};
+}
+
+namespace llvm {
+
+std::unique_ptr<RuntimeDyldCOFF>
+llvm::RuntimeDyldCOFF::create(Triple::ArchType Arch,
+ RuntimeDyld::MemoryManager &MemMgr,
+ JITSymbolResolver &Resolver) {
+ switch (Arch) {
+ default: llvm_unreachable("Unsupported target for RuntimeDyldCOFF.");
+ case Triple::x86:
+ return std::make_unique<RuntimeDyldCOFFI386>(MemMgr, Resolver);
+ case Triple::thumb:
+ return std::make_unique<RuntimeDyldCOFFThumb>(MemMgr, Resolver);
+ case Triple::x86_64:
+ return std::make_unique<RuntimeDyldCOFFX86_64>(MemMgr, Resolver);
+ }
+}
+
+std::unique_ptr<RuntimeDyld::LoadedObjectInfo>
+RuntimeDyldCOFF::loadObject(const object::ObjectFile &O) {
+ if (auto ObjSectionToIDOrErr = loadObjectImpl(O)) {
+ return std::make_unique<LoadedCOFFObjectInfo>(*this, *ObjSectionToIDOrErr);
+ } else {
+ HasError = true;
+ raw_string_ostream ErrStream(ErrorStr);
+ logAllUnhandledErrors(ObjSectionToIDOrErr.takeError(), ErrStream);
+ return nullptr;
+ }
+}
+
+uint64_t RuntimeDyldCOFF::getSymbolOffset(const SymbolRef &Sym) {
+ // The value in a relocatable COFF object is the offset.
+ return Sym.getValue();
+}
+
+bool RuntimeDyldCOFF::isCompatibleFile(const object::ObjectFile &Obj) const {
+ return Obj.isCOFF();
+}
+
+} // namespace llvm
diff --git a/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldCOFF.h b/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldCOFF.h
new file mode 100644
index 0000000000000..4efd18a2e6c57
--- /dev/null
+++ b/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldCOFF.h
@@ -0,0 +1,48 @@
+//===-- RuntimeDyldCOFF.h - Run-time dynamic linker for MC-JIT ---*- C++ -*-==//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// COFF support for MC-JIT runtime dynamic linker.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_RUNTIME_DYLD_COFF_H
+#define LLVM_RUNTIME_DYLD_COFF_H
+
+#include "RuntimeDyldImpl.h"
+
+#define DEBUG_TYPE "dyld"
+
+using namespace llvm;
+
+namespace llvm {
+
+// Common base class for COFF dynamic linker support.
+// Concrete subclasses for each target can be found in ./Targets.
+class RuntimeDyldCOFF : public RuntimeDyldImpl {
+
+public:
+ std::unique_ptr<RuntimeDyld::LoadedObjectInfo>
+ loadObject(const object::ObjectFile &Obj) override;
+ bool isCompatibleFile(const object::ObjectFile &Obj) const override;
+
+ static std::unique_ptr<RuntimeDyldCOFF>
+ create(Triple::ArchType Arch, RuntimeDyld::MemoryManager &MemMgr,
+ JITSymbolResolver &Resolver);
+
+protected:
+ RuntimeDyldCOFF(RuntimeDyld::MemoryManager &MemMgr,
+ JITSymbolResolver &Resolver)
+ : RuntimeDyldImpl(MemMgr, Resolver) {}
+ uint64_t getSymbolOffset(const SymbolRef &Sym);
+};
+
+} // end namespace llvm
+
+#undef DEBUG_TYPE
+
+#endif
diff --git a/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldChecker.cpp b/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldChecker.cpp
new file mode 100644
index 0000000000000..b9c5a12e08d8b
--- /dev/null
+++ b/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldChecker.cpp
@@ -0,0 +1,875 @@
+//===--- RuntimeDyldChecker.cpp - RuntimeDyld tester framework --*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/RuntimeDyldChecker.h"
+#include "RuntimeDyldCheckerImpl.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/MC/MCContext.h"
+#include "llvm/MC/MCDisassembler/MCDisassembler.h"
+#include "llvm/MC/MCInst.h"
+#include "llvm/Support/Endian.h"
+#include "llvm/Support/MSVCErrorWorkarounds.h"
+#include "llvm/Support/Path.h"
+#include <cctype>
+#include <memory>
+#include <utility>
+
+#define DEBUG_TYPE "rtdyld"
+
+using namespace llvm;
+
+namespace llvm {
+
+// Helper class that implements the language evaluated by RuntimeDyldChecker.
+class RuntimeDyldCheckerExprEval {
+public:
+ RuntimeDyldCheckerExprEval(const RuntimeDyldCheckerImpl &Checker,
+ raw_ostream &ErrStream)
+ : Checker(Checker) {}
+
+ bool evaluate(StringRef Expr) const {
+ // Expect equality expression of the form 'LHS = RHS'.
+ Expr = Expr.trim();
+ size_t EQIdx = Expr.find('=');
+
+ ParseContext OutsideLoad(false);
+
+ // Evaluate LHS.
+ StringRef LHSExpr = Expr.substr(0, EQIdx).rtrim();
+ StringRef RemainingExpr;
+ EvalResult LHSResult;
+ std::tie(LHSResult, RemainingExpr) =
+ evalComplexExpr(evalSimpleExpr(LHSExpr, OutsideLoad), OutsideLoad);
+ if (LHSResult.hasError())
+ return handleError(Expr, LHSResult);
+ if (RemainingExpr != "")
+ return handleError(Expr, unexpectedToken(RemainingExpr, LHSExpr, ""));
+
+ // Evaluate RHS.
+ StringRef RHSExpr = Expr.substr(EQIdx + 1).ltrim();
+ EvalResult RHSResult;
+ std::tie(RHSResult, RemainingExpr) =
+ evalComplexExpr(evalSimpleExpr(RHSExpr, OutsideLoad), OutsideLoad);
+ if (RHSResult.hasError())
+ return handleError(Expr, RHSResult);
+ if (RemainingExpr != "")
+ return handleError(Expr, unexpectedToken(RemainingExpr, RHSExpr, ""));
+
+ if (LHSResult.getValue() != RHSResult.getValue()) {
+ Checker.ErrStream << "Expression '" << Expr << "' is false: "
+ << format("0x%" PRIx64, LHSResult.getValue())
+ << " != " << format("0x%" PRIx64, RHSResult.getValue())
+ << "\n";
+ return false;
+ }
+ return true;
+ }
+
+private:
+ // RuntimeDyldCheckerExprEval requires some context when parsing exprs. In
+ // particular, it needs to know whether a symbol is being evaluated in the
+ // context of a load, in which case we want the linker's local address for
+ // the symbol, or outside of a load, in which case we want the symbol's
+ // address in the remote target.
+
+ struct ParseContext {
+ bool IsInsideLoad;
+ ParseContext(bool IsInsideLoad) : IsInsideLoad(IsInsideLoad) {}
+ };
+
+ const RuntimeDyldCheckerImpl &Checker;
+
+ enum class BinOpToken : unsigned {
+ Invalid,
+ Add,
+ Sub,
+ BitwiseAnd,
+ BitwiseOr,
+ ShiftLeft,
+ ShiftRight
+ };
+
+ class EvalResult {
+ public:
+ EvalResult() : Value(0), ErrorMsg("") {}
+ EvalResult(uint64_t Value) : Value(Value), ErrorMsg("") {}
+ EvalResult(std::string ErrorMsg)
+ : Value(0), ErrorMsg(std::move(ErrorMsg)) {}
+ uint64_t getValue() const { return Value; }
+ bool hasError() const { return ErrorMsg != ""; }
+ const std::string &getErrorMsg() const { return ErrorMsg; }
+
+ private:
+ uint64_t Value;
+ std::string ErrorMsg;
+ };
+
+ StringRef getTokenForError(StringRef Expr) const {
+ if (Expr.empty())
+ return "";
+
+ StringRef Token, Remaining;
+ if (isalpha(Expr[0]))
+ std::tie(Token, Remaining) = parseSymbol(Expr);
+ else if (isdigit(Expr[0]))
+ std::tie(Token, Remaining) = parseNumberString(Expr);
+ else {
+ unsigned TokLen = 1;
+ if (Expr.startswith("<<") || Expr.startswith(">>"))
+ TokLen = 2;
+ Token = Expr.substr(0, TokLen);
+ }
+ return Token;
+ }
+
+ EvalResult unexpectedToken(StringRef TokenStart, StringRef SubExpr,
+ StringRef ErrText) const {
+ std::string ErrorMsg("Encountered unexpected token '");
+ ErrorMsg += getTokenForError(TokenStart);
+ if (SubExpr != "") {
+ ErrorMsg += "' while parsing subexpression '";
+ ErrorMsg += SubExpr;
+ }
+ ErrorMsg += "'";
+ if (ErrText != "") {
+ ErrorMsg += " ";
+ ErrorMsg += ErrText;
+ }
+ return EvalResult(std::move(ErrorMsg));
+ }
+
+ bool handleError(StringRef Expr, const EvalResult &R) const {
+ assert(R.hasError() && "Not an error result.");
+ Checker.ErrStream << "Error evaluating expression '" << Expr
+ << "': " << R.getErrorMsg() << "\n";
+ return false;
+ }
+
+ std::pair<BinOpToken, StringRef> parseBinOpToken(StringRef Expr) const {
+ if (Expr.empty())
+ return std::make_pair(BinOpToken::Invalid, "");
+
+ // Handle the two 2-character tokens.
+ if (Expr.startswith("<<"))
+ return std::make_pair(BinOpToken::ShiftLeft, Expr.substr(2).ltrim());
+ if (Expr.startswith(">>"))
+ return std::make_pair(BinOpToken::ShiftRight, Expr.substr(2).ltrim());
+
+ // Handle one-character tokens.
+ BinOpToken Op;
+ switch (Expr[0]) {
+ default:
+ return std::make_pair(BinOpToken::Invalid, Expr);
+ case '+':
+ Op = BinOpToken::Add;
+ break;
+ case '-':
+ Op = BinOpToken::Sub;
+ break;
+ case '&':
+ Op = BinOpToken::BitwiseAnd;
+ break;
+ case '|':
+ Op = BinOpToken::BitwiseOr;
+ break;
+ }
+
+ return std::make_pair(Op, Expr.substr(1).ltrim());
+ }
+
+ EvalResult computeBinOpResult(BinOpToken Op, const EvalResult &LHSResult,
+ const EvalResult &RHSResult) const {
+ switch (Op) {
+ default:
+ llvm_unreachable("Tried to evaluate unrecognized operation.");
+ case BinOpToken::Add:
+ return EvalResult(LHSResult.getValue() + RHSResult.getValue());
+ case BinOpToken::Sub:
+ return EvalResult(LHSResult.getValue() - RHSResult.getValue());
+ case BinOpToken::BitwiseAnd:
+ return EvalResult(LHSResult.getValue() & RHSResult.getValue());
+ case BinOpToken::BitwiseOr:
+ return EvalResult(LHSResult.getValue() | RHSResult.getValue());
+ case BinOpToken::ShiftLeft:
+ return EvalResult(LHSResult.getValue() << RHSResult.getValue());
+ case BinOpToken::ShiftRight:
+ return EvalResult(LHSResult.getValue() >> RHSResult.getValue());
+ }
+ }
+
+ // Parse a symbol and return a (string, string) pair representing the symbol
+ // name and expression remaining to be parsed.
+ std::pair<StringRef, StringRef> parseSymbol(StringRef Expr) const {
+ size_t FirstNonSymbol = Expr.find_first_not_of("0123456789"
+ "abcdefghijklmnopqrstuvwxyz"
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
+ ":_.$");
+ return std::make_pair(Expr.substr(0, FirstNonSymbol),
+ Expr.substr(FirstNonSymbol).ltrim());
+ }
+
+ // Evaluate a call to decode_operand. Decode the instruction operand at the
+ // given symbol and get the value of the requested operand.
+ // Returns an error if the instruction cannot be decoded, or the requested
+ // operand is not an immediate.
+ // On success, returns a pair containing the value of the operand, plus
+ // the expression remaining to be evaluated.
+ std::pair<EvalResult, StringRef> evalDecodeOperand(StringRef Expr) const {
+ if (!Expr.startswith("("))
+ return std::make_pair(unexpectedToken(Expr, Expr, "expected '('"), "");
+ StringRef RemainingExpr = Expr.substr(1).ltrim();
+ StringRef Symbol;
+ std::tie(Symbol, RemainingExpr) = parseSymbol(RemainingExpr);
+
+ if (!Checker.isSymbolValid(Symbol))
+ return std::make_pair(
+ EvalResult(("Cannot decode unknown symbol '" + Symbol + "'").str()),
+ "");
+
+ if (!RemainingExpr.startswith(","))
+ return std::make_pair(
+ unexpectedToken(RemainingExpr, RemainingExpr, "expected ','"), "");
+ RemainingExpr = RemainingExpr.substr(1).ltrim();
+
+ EvalResult OpIdxExpr;
+ std::tie(OpIdxExpr, RemainingExpr) = evalNumberExpr(RemainingExpr);
+ if (OpIdxExpr.hasError())
+ return std::make_pair(OpIdxExpr, "");
+
+ if (!RemainingExpr.startswith(")"))
+ return std::make_pair(
+ unexpectedToken(RemainingExpr, RemainingExpr, "expected ')'"), "");
+ RemainingExpr = RemainingExpr.substr(1).ltrim();
+
+ MCInst Inst;
+ uint64_t Size;
+ if (!decodeInst(Symbol, Inst, Size))
+ return std::make_pair(
+ EvalResult(("Couldn't decode instruction at '" + Symbol + "'").str()),
+ "");
+
+ unsigned OpIdx = OpIdxExpr.getValue();
+ if (OpIdx >= Inst.getNumOperands()) {
+ std::string ErrMsg;
+ raw_string_ostream ErrMsgStream(ErrMsg);
+ ErrMsgStream << "Invalid operand index '" << format("%i", OpIdx)
+ << "' for instruction '" << Symbol
+ << "'. Instruction has only "
+ << format("%i", Inst.getNumOperands())
+ << " operands.\nInstruction is:\n ";
+ Inst.dump_pretty(ErrMsgStream, Checker.InstPrinter);
+ return std::make_pair(EvalResult(ErrMsgStream.str()), "");
+ }
+
+ const MCOperand &Op = Inst.getOperand(OpIdx);
+ if (!Op.isImm()) {
+ std::string ErrMsg;
+ raw_string_ostream ErrMsgStream(ErrMsg);
+ ErrMsgStream << "Operand '" << format("%i", OpIdx) << "' of instruction '"
+ << Symbol << "' is not an immediate.\nInstruction is:\n ";
+ Inst.dump_pretty(ErrMsgStream, Checker.InstPrinter);
+
+ return std::make_pair(EvalResult(ErrMsgStream.str()), "");
+ }
+
+ return std::make_pair(EvalResult(Op.getImm()), RemainingExpr);
+ }
+
+ // Evaluate a call to next_pc.
+ // Decode the instruction at the given symbol and return the following program
+ // counter.
+ // Returns an error if the instruction cannot be decoded.
+ // On success, returns a pair containing the next PC, plus of the
+ // expression remaining to be evaluated.
+ std::pair<EvalResult, StringRef> evalNextPC(StringRef Expr,
+ ParseContext PCtx) const {
+ if (!Expr.startswith("("))
+ return std::make_pair(unexpectedToken(Expr, Expr, "expected '('"), "");
+ StringRef RemainingExpr = Expr.substr(1).ltrim();
+ StringRef Symbol;
+ std::tie(Symbol, RemainingExpr) = parseSymbol(RemainingExpr);
+
+ if (!Checker.isSymbolValid(Symbol))
+ return std::make_pair(
+ EvalResult(("Cannot decode unknown symbol '" + Symbol + "'").str()),
+ "");
+
+ if (!RemainingExpr.startswith(")"))
+ return std::make_pair(
+ unexpectedToken(RemainingExpr, RemainingExpr, "expected ')'"), "");
+ RemainingExpr = RemainingExpr.substr(1).ltrim();
+
+ MCInst Inst;
+ uint64_t InstSize;
+ if (!decodeInst(Symbol, Inst, InstSize))
+ return std::make_pair(
+ EvalResult(("Couldn't decode instruction at '" + Symbol + "'").str()),
+ "");
+
+ uint64_t SymbolAddr = PCtx.IsInsideLoad
+ ? Checker.getSymbolLocalAddr(Symbol)
+ : Checker.getSymbolRemoteAddr(Symbol);
+ uint64_t NextPC = SymbolAddr + InstSize;
+
+ return std::make_pair(EvalResult(NextPC), RemainingExpr);
+ }
+
+ // Evaluate a call to stub_addr/got_addr.
+ // Look up and return the address of the stub for the given
+ // (<file name>, <section name>, <symbol name>) tuple.
+ // On success, returns a pair containing the stub address, plus the expression
+ // remaining to be evaluated.
+ std::pair<EvalResult, StringRef>
+ evalStubOrGOTAddr(StringRef Expr, ParseContext PCtx, bool IsStubAddr) const {
+ if (!Expr.startswith("("))
+ return std::make_pair(unexpectedToken(Expr, Expr, "expected '('"), "");
+ StringRef RemainingExpr = Expr.substr(1).ltrim();
+
+ // Handle file-name specially, as it may contain characters that aren't
+ // legal for symbols.
+ StringRef StubContainerName;
+ size_t ComaIdx = RemainingExpr.find(',');
+ StubContainerName = RemainingExpr.substr(0, ComaIdx).rtrim();
+ RemainingExpr = RemainingExpr.substr(ComaIdx).ltrim();
+
+ if (!RemainingExpr.startswith(","))
+ return std::make_pair(
+ unexpectedToken(RemainingExpr, Expr, "expected ','"), "");
+ RemainingExpr = RemainingExpr.substr(1).ltrim();
+
+ StringRef Symbol;
+ std::tie(Symbol, RemainingExpr) = parseSymbol(RemainingExpr);
+
+ if (!RemainingExpr.startswith(")"))
+ return std::make_pair(
+ unexpectedToken(RemainingExpr, Expr, "expected ')'"), "");
+ RemainingExpr = RemainingExpr.substr(1).ltrim();
+
+ uint64_t StubAddr;
+ std::string ErrorMsg = "";
+ std::tie(StubAddr, ErrorMsg) = Checker.getStubOrGOTAddrFor(
+ StubContainerName, Symbol, PCtx.IsInsideLoad, IsStubAddr);
+
+ if (ErrorMsg != "")
+ return std::make_pair(EvalResult(ErrorMsg), "");
+
+ return std::make_pair(EvalResult(StubAddr), RemainingExpr);
+ }
+
+ std::pair<EvalResult, StringRef> evalSectionAddr(StringRef Expr,
+ ParseContext PCtx) const {
+ if (!Expr.startswith("("))
+ return std::make_pair(unexpectedToken(Expr, Expr, "expected '('"), "");
+ StringRef RemainingExpr = Expr.substr(1).ltrim();
+
+ // Handle file-name specially, as it may contain characters that aren't
+ // legal for symbols.
+ StringRef FileName;
+ size_t ComaIdx = RemainingExpr.find(',');
+ FileName = RemainingExpr.substr(0, ComaIdx).rtrim();
+ RemainingExpr = RemainingExpr.substr(ComaIdx).ltrim();
+
+ if (!RemainingExpr.startswith(","))
+ return std::make_pair(
+ unexpectedToken(RemainingExpr, Expr, "expected ','"), "");
+ RemainingExpr = RemainingExpr.substr(1).ltrim();
+
+ StringRef SectionName;
+ std::tie(SectionName, RemainingExpr) = parseSymbol(RemainingExpr);
+
+ if (!RemainingExpr.startswith(")"))
+ return std::make_pair(
+ unexpectedToken(RemainingExpr, Expr, "expected ')'"), "");
+ RemainingExpr = RemainingExpr.substr(1).ltrim();
+
+ uint64_t StubAddr;
+ std::string ErrorMsg = "";
+ std::tie(StubAddr, ErrorMsg) = Checker.getSectionAddr(
+ FileName, SectionName, PCtx.IsInsideLoad);
+
+ if (ErrorMsg != "")
+ return std::make_pair(EvalResult(ErrorMsg), "");
+
+ return std::make_pair(EvalResult(StubAddr), RemainingExpr);
+ }
+
+ // Evaluate an identiefer expr, which may be a symbol, or a call to
+ // one of the builtin functions: get_insn_opcode or get_insn_length.
+ // Return the result, plus the expression remaining to be parsed.
+ std::pair<EvalResult, StringRef> evalIdentifierExpr(StringRef Expr,
+ ParseContext PCtx) const {
+ StringRef Symbol;
+ StringRef RemainingExpr;
+ std::tie(Symbol, RemainingExpr) = parseSymbol(Expr);
+
+ // Check for builtin function calls.
+ if (Symbol == "decode_operand")
+ return evalDecodeOperand(RemainingExpr);
+ else if (Symbol == "next_pc")
+ return evalNextPC(RemainingExpr, PCtx);
+ else if (Symbol == "stub_addr")
+ return evalStubOrGOTAddr(RemainingExpr, PCtx, true);
+ else if (Symbol == "got_addr")
+ return evalStubOrGOTAddr(RemainingExpr, PCtx, false);
+ else if (Symbol == "section_addr")
+ return evalSectionAddr(RemainingExpr, PCtx);
+
+ if (!Checker.isSymbolValid(Symbol)) {
+ std::string ErrMsg("No known address for symbol '");
+ ErrMsg += Symbol;
+ ErrMsg += "'";
+ if (Symbol.startswith("L"))
+ ErrMsg += " (this appears to be an assembler local label - "
+ " perhaps drop the 'L'?)";
+
+ return std::make_pair(EvalResult(ErrMsg), "");
+ }
+
+ // The value for the symbol depends on the context we're evaluating in:
+ // Inside a load this is the address in the linker's memory, outside a
+ // load it's the address in the target processes memory.
+ uint64_t Value = PCtx.IsInsideLoad ? Checker.getSymbolLocalAddr(Symbol)
+ : Checker.getSymbolRemoteAddr(Symbol);
+
+ // Looks like a plain symbol reference.
+ return std::make_pair(EvalResult(Value), RemainingExpr);
+ }
+
+ // Parse a number (hexadecimal or decimal) and return a (string, string)
+ // pair representing the number and the expression remaining to be parsed.
+ std::pair<StringRef, StringRef> parseNumberString(StringRef Expr) const {
+ size_t FirstNonDigit = StringRef::npos;
+ if (Expr.startswith("0x")) {
+ FirstNonDigit = Expr.find_first_not_of("0123456789abcdefABCDEF", 2);
+ if (FirstNonDigit == StringRef::npos)
+ FirstNonDigit = Expr.size();
+ } else {
+ FirstNonDigit = Expr.find_first_not_of("0123456789");
+ if (FirstNonDigit == StringRef::npos)
+ FirstNonDigit = Expr.size();
+ }
+ return std::make_pair(Expr.substr(0, FirstNonDigit),
+ Expr.substr(FirstNonDigit));
+ }
+
+ // Evaluate a constant numeric expression (hexadecimal or decimal) and
+ // return a pair containing the result, and the expression remaining to be
+ // evaluated.
+ std::pair<EvalResult, StringRef> evalNumberExpr(StringRef Expr) const {
+ StringRef ValueStr;
+ StringRef RemainingExpr;
+ std::tie(ValueStr, RemainingExpr) = parseNumberString(Expr);
+
+ if (ValueStr.empty() || !isdigit(ValueStr[0]))
+ return std::make_pair(
+ unexpectedToken(RemainingExpr, RemainingExpr, "expected number"), "");
+ uint64_t Value;
+ ValueStr.getAsInteger(0, Value);
+ return std::make_pair(EvalResult(Value), RemainingExpr);
+ }
+
+ // Evaluate an expression of the form "(<expr>)" and return a pair
+ // containing the result of evaluating <expr>, plus the expression
+ // remaining to be parsed.
+ std::pair<EvalResult, StringRef> evalParensExpr(StringRef Expr,
+ ParseContext PCtx) const {
+ assert(Expr.startswith("(") && "Not a parenthesized expression");
+ EvalResult SubExprResult;
+ StringRef RemainingExpr;
+ std::tie(SubExprResult, RemainingExpr) =
+ evalComplexExpr(evalSimpleExpr(Expr.substr(1).ltrim(), PCtx), PCtx);
+ if (SubExprResult.hasError())
+ return std::make_pair(SubExprResult, "");
+ if (!RemainingExpr.startswith(")"))
+ return std::make_pair(
+ unexpectedToken(RemainingExpr, Expr, "expected ')'"), "");
+ RemainingExpr = RemainingExpr.substr(1).ltrim();
+ return std::make_pair(SubExprResult, RemainingExpr);
+ }
+
+ // Evaluate an expression in one of the following forms:
+ // *{<number>}<expr>
+ // Return a pair containing the result, plus the expression remaining to be
+ // parsed.
+ std::pair<EvalResult, StringRef> evalLoadExpr(StringRef Expr) const {
+ assert(Expr.startswith("*") && "Not a load expression");
+ StringRef RemainingExpr = Expr.substr(1).ltrim();
+
+ // Parse read size.
+ if (!RemainingExpr.startswith("{"))
+ return std::make_pair(EvalResult("Expected '{' following '*'."), "");
+ RemainingExpr = RemainingExpr.substr(1).ltrim();
+ EvalResult ReadSizeExpr;
+ std::tie(ReadSizeExpr, RemainingExpr) = evalNumberExpr(RemainingExpr);
+ if (ReadSizeExpr.hasError())
+ return std::make_pair(ReadSizeExpr, RemainingExpr);
+ uint64_t ReadSize = ReadSizeExpr.getValue();
+ if (ReadSize < 1 || ReadSize > 8)
+ return std::make_pair(EvalResult("Invalid size for dereference."), "");
+ if (!RemainingExpr.startswith("}"))
+ return std::make_pair(EvalResult("Missing '}' for dereference."), "");
+ RemainingExpr = RemainingExpr.substr(1).ltrim();
+
+ // Evaluate the expression representing the load address.
+ ParseContext LoadCtx(true);
+ EvalResult LoadAddrExprResult;
+ std::tie(LoadAddrExprResult, RemainingExpr) =
+ evalComplexExpr(evalSimpleExpr(RemainingExpr, LoadCtx), LoadCtx);
+
+ if (LoadAddrExprResult.hasError())
+ return std::make_pair(LoadAddrExprResult, "");
+
+ uint64_t LoadAddr = LoadAddrExprResult.getValue();
+
+ // If there is no error but the content pointer is null then this is a
+ // zero-fill symbol/section.
+ if (LoadAddr == 0)
+ return std::make_pair(0, RemainingExpr);
+
+ return std::make_pair(
+ EvalResult(Checker.readMemoryAtAddr(LoadAddr, ReadSize)),
+ RemainingExpr);
+ }
+
+ // Evaluate a "simple" expression. This is any expression that _isn't_ an
+ // un-parenthesized binary expression.
+ //
+ // "Simple" expressions can be optionally bit-sliced. See evalSlicedExpr.
+ //
+ // Returns a pair containing the result of the evaluation, plus the
+ // expression remaining to be parsed.
+ std::pair<EvalResult, StringRef> evalSimpleExpr(StringRef Expr,
+ ParseContext PCtx) const {
+ EvalResult SubExprResult;
+ StringRef RemainingExpr;
+
+ if (Expr.empty())
+ return std::make_pair(EvalResult("Unexpected end of expression"), "");
+
+ if (Expr[0] == '(')
+ std::tie(SubExprResult, RemainingExpr) = evalParensExpr(Expr, PCtx);
+ else if (Expr[0] == '*')
+ std::tie(SubExprResult, RemainingExpr) = evalLoadExpr(Expr);
+ else if (isalpha(Expr[0]) || Expr[0] == '_')
+ std::tie(SubExprResult, RemainingExpr) = evalIdentifierExpr(Expr, PCtx);
+ else if (isdigit(Expr[0]))
+ std::tie(SubExprResult, RemainingExpr) = evalNumberExpr(Expr);
+ else
+ return std::make_pair(
+ unexpectedToken(Expr, Expr,
+ "expected '(', '*', identifier, or number"), "");
+
+ if (SubExprResult.hasError())
+ return std::make_pair(SubExprResult, RemainingExpr);
+
+ // Evaluate bit-slice if present.
+ if (RemainingExpr.startswith("["))
+ std::tie(SubExprResult, RemainingExpr) =
+ evalSliceExpr(std::make_pair(SubExprResult, RemainingExpr));
+
+ return std::make_pair(SubExprResult, RemainingExpr);
+ }
+
+ // Evaluate a bit-slice of an expression.
+ // A bit-slice has the form "<expr>[high:low]". The result of evaluating a
+ // slice is the bits between high and low (inclusive) in the original
+ // expression, right shifted so that the "low" bit is in position 0 in the
+ // result.
+ // Returns a pair containing the result of the slice operation, plus the
+ // expression remaining to be parsed.
+ std::pair<EvalResult, StringRef>
+ evalSliceExpr(const std::pair<EvalResult, StringRef> &Ctx) const {
+ EvalResult SubExprResult;
+ StringRef RemainingExpr;
+ std::tie(SubExprResult, RemainingExpr) = Ctx;
+
+ assert(RemainingExpr.startswith("[") && "Not a slice expr.");
+ RemainingExpr = RemainingExpr.substr(1).ltrim();
+
+ EvalResult HighBitExpr;
+ std::tie(HighBitExpr, RemainingExpr) = evalNumberExpr(RemainingExpr);
+
+ if (HighBitExpr.hasError())
+ return std::make_pair(HighBitExpr, RemainingExpr);
+
+ if (!RemainingExpr.startswith(":"))
+ return std::make_pair(
+ unexpectedToken(RemainingExpr, RemainingExpr, "expected ':'"), "");
+ RemainingExpr = RemainingExpr.substr(1).ltrim();
+
+ EvalResult LowBitExpr;
+ std::tie(LowBitExpr, RemainingExpr) = evalNumberExpr(RemainingExpr);
+
+ if (LowBitExpr.hasError())
+ return std::make_pair(LowBitExpr, RemainingExpr);
+
+ if (!RemainingExpr.startswith("]"))
+ return std::make_pair(
+ unexpectedToken(RemainingExpr, RemainingExpr, "expected ']'"), "");
+ RemainingExpr = RemainingExpr.substr(1).ltrim();
+
+ unsigned HighBit = HighBitExpr.getValue();
+ unsigned LowBit = LowBitExpr.getValue();
+ uint64_t Mask = ((uint64_t)1 << (HighBit - LowBit + 1)) - 1;
+ uint64_t SlicedValue = (SubExprResult.getValue() >> LowBit) & Mask;
+ return std::make_pair(EvalResult(SlicedValue), RemainingExpr);
+ }
+
+ // Evaluate a "complex" expression.
+ // Takes an already evaluated subexpression and checks for the presence of a
+ // binary operator, computing the result of the binary operation if one is
+ // found. Used to make arithmetic expressions left-associative.
+ // Returns a pair containing the ultimate result of evaluating the
+ // expression, plus the expression remaining to be evaluated.
+ std::pair<EvalResult, StringRef>
+ evalComplexExpr(const std::pair<EvalResult, StringRef> &LHSAndRemaining,
+ ParseContext PCtx) const {
+ EvalResult LHSResult;
+ StringRef RemainingExpr;
+ std::tie(LHSResult, RemainingExpr) = LHSAndRemaining;
+
+ // If there was an error, or there's nothing left to evaluate, return the
+ // result.
+ if (LHSResult.hasError() || RemainingExpr == "")
+ return std::make_pair(LHSResult, RemainingExpr);
+
+ // Otherwise check if this is a binary expressioan.
+ BinOpToken BinOp;
+ std::tie(BinOp, RemainingExpr) = parseBinOpToken(RemainingExpr);
+
+ // If this isn't a recognized expression just return.
+ if (BinOp == BinOpToken::Invalid)
+ return std::make_pair(LHSResult, RemainingExpr);
+
+ // This is a recognized bin-op. Evaluate the RHS, then evaluate the binop.
+ EvalResult RHSResult;
+ std::tie(RHSResult, RemainingExpr) = evalSimpleExpr(RemainingExpr, PCtx);
+
+ // If there was an error evaluating the RHS, return it.
+ if (RHSResult.hasError())
+ return std::make_pair(RHSResult, RemainingExpr);
+
+ // This is a binary expression - evaluate and try to continue as a
+ // complex expr.
+ EvalResult ThisResult(computeBinOpResult(BinOp, LHSResult, RHSResult));
+
+ return evalComplexExpr(std::make_pair(ThisResult, RemainingExpr), PCtx);
+ }
+
+ bool decodeInst(StringRef Symbol, MCInst &Inst, uint64_t &Size) const {
+ MCDisassembler *Dis = Checker.Disassembler;
+ StringRef SymbolMem = Checker.getSymbolContent(Symbol);
+ ArrayRef<uint8_t> SymbolBytes(SymbolMem.bytes_begin(), SymbolMem.size());
+
+ MCDisassembler::DecodeStatus S =
+ Dis->getInstruction(Inst, Size, SymbolBytes, 0, nulls(), nulls());
+
+ return (S == MCDisassembler::Success);
+ }
+};
+}
+
+RuntimeDyldCheckerImpl::RuntimeDyldCheckerImpl(
+ IsSymbolValidFunction IsSymbolValid, GetSymbolInfoFunction GetSymbolInfo,
+ GetSectionInfoFunction GetSectionInfo, GetStubInfoFunction GetStubInfo,
+ GetGOTInfoFunction GetGOTInfo, support::endianness Endianness,
+ MCDisassembler *Disassembler, MCInstPrinter *InstPrinter,
+ raw_ostream &ErrStream)
+ : IsSymbolValid(std::move(IsSymbolValid)),
+ GetSymbolInfo(std::move(GetSymbolInfo)),
+ GetSectionInfo(std::move(GetSectionInfo)),
+ GetStubInfo(std::move(GetStubInfo)), GetGOTInfo(std::move(GetGOTInfo)),
+ Endianness(Endianness), Disassembler(Disassembler),
+ InstPrinter(InstPrinter), ErrStream(ErrStream) {}
+
+bool RuntimeDyldCheckerImpl::check(StringRef CheckExpr) const {
+ CheckExpr = CheckExpr.trim();
+ LLVM_DEBUG(dbgs() << "RuntimeDyldChecker: Checking '" << CheckExpr
+ << "'...\n");
+ RuntimeDyldCheckerExprEval P(*this, ErrStream);
+ bool Result = P.evaluate(CheckExpr);
+ (void)Result;
+ LLVM_DEBUG(dbgs() << "RuntimeDyldChecker: '" << CheckExpr << "' "
+ << (Result ? "passed" : "FAILED") << ".\n");
+ return Result;
+}
+
+bool RuntimeDyldCheckerImpl::checkAllRulesInBuffer(StringRef RulePrefix,
+ MemoryBuffer *MemBuf) const {
+ bool DidAllTestsPass = true;
+ unsigned NumRules = 0;
+
+ const char *LineStart = MemBuf->getBufferStart();
+
+ // Eat whitespace.
+ while (LineStart != MemBuf->getBufferEnd() && std::isspace(*LineStart))
+ ++LineStart;
+
+ while (LineStart != MemBuf->getBufferEnd() && *LineStart != '\0') {
+ const char *LineEnd = LineStart;
+ while (LineEnd != MemBuf->getBufferEnd() && *LineEnd != '\r' &&
+ *LineEnd != '\n')
+ ++LineEnd;
+
+ StringRef Line(LineStart, LineEnd - LineStart);
+ if (Line.startswith(RulePrefix)) {
+ DidAllTestsPass &= check(Line.substr(RulePrefix.size()));
+ ++NumRules;
+ }
+
+ // Eat whitespace.
+ LineStart = LineEnd;
+ while (LineStart != MemBuf->getBufferEnd() && std::isspace(*LineStart))
+ ++LineStart;
+ }
+ return DidAllTestsPass && (NumRules != 0);
+}
+
+bool RuntimeDyldCheckerImpl::isSymbolValid(StringRef Symbol) const {
+ return IsSymbolValid(Symbol);
+}
+
+uint64_t RuntimeDyldCheckerImpl::getSymbolLocalAddr(StringRef Symbol) const {
+ auto SymInfo = GetSymbolInfo(Symbol);
+ if (!SymInfo) {
+ logAllUnhandledErrors(SymInfo.takeError(), errs(), "RTDyldChecker: ");
+ return 0;
+ }
+
+ if (SymInfo->isZeroFill())
+ return 0;
+
+ return static_cast<uint64_t>(
+ reinterpret_cast<uintptr_t>(SymInfo->getContent().data()));
+}
+
+uint64_t RuntimeDyldCheckerImpl::getSymbolRemoteAddr(StringRef Symbol) const {
+ auto SymInfo = GetSymbolInfo(Symbol);
+ if (!SymInfo) {
+ logAllUnhandledErrors(SymInfo.takeError(), errs(), "RTDyldChecker: ");
+ return 0;
+ }
+
+ return SymInfo->getTargetAddress();
+}
+
+uint64_t RuntimeDyldCheckerImpl::readMemoryAtAddr(uint64_t SrcAddr,
+ unsigned Size) const {
+ uintptr_t PtrSizedAddr = static_cast<uintptr_t>(SrcAddr);
+ assert(PtrSizedAddr == SrcAddr && "Linker memory pointer out-of-range.");
+ void *Ptr = reinterpret_cast<void*>(PtrSizedAddr);
+
+ switch (Size) {
+ case 1:
+ return support::endian::read<uint8_t>(Ptr, Endianness);
+ case 2:
+ return support::endian::read<uint16_t>(Ptr, Endianness);
+ case 4:
+ return support::endian::read<uint32_t>(Ptr, Endianness);
+ case 8:
+ return support::endian::read<uint64_t>(Ptr, Endianness);
+ }
+ llvm_unreachable("Unsupported read size");
+}
+
+StringRef RuntimeDyldCheckerImpl::getSymbolContent(StringRef Symbol) const {
+ auto SymInfo = GetSymbolInfo(Symbol);
+ if (!SymInfo) {
+ logAllUnhandledErrors(SymInfo.takeError(), errs(), "RTDyldChecker: ");
+ return StringRef();
+ }
+ return SymInfo->getContent();
+}
+
+std::pair<uint64_t, std::string> RuntimeDyldCheckerImpl::getSectionAddr(
+ StringRef FileName, StringRef SectionName, bool IsInsideLoad) const {
+
+ auto SecInfo = GetSectionInfo(FileName, SectionName);
+ if (!SecInfo) {
+ std::string ErrMsg;
+ {
+ raw_string_ostream ErrMsgStream(ErrMsg);
+ logAllUnhandledErrors(SecInfo.takeError(), ErrMsgStream,
+ "RTDyldChecker: ");
+ }
+ return std::make_pair(0, std::move(ErrMsg));
+ }
+
+ // If this address is being looked up in "load" mode, return the content
+ // pointer, otherwise return the target address.
+
+ uint64_t Addr = 0;
+
+ if (IsInsideLoad) {
+ if (SecInfo->isZeroFill())
+ Addr = 0;
+ else
+ Addr = pointerToJITTargetAddress(SecInfo->getContent().data());
+ } else
+ Addr = SecInfo->getTargetAddress();
+
+ return std::make_pair(Addr, "");
+}
+
+std::pair<uint64_t, std::string> RuntimeDyldCheckerImpl::getStubOrGOTAddrFor(
+ StringRef StubContainerName, StringRef SymbolName, bool IsInsideLoad,
+ bool IsStubAddr) const {
+
+ auto StubInfo = IsStubAddr ? GetStubInfo(StubContainerName, SymbolName)
+ : GetGOTInfo(StubContainerName, SymbolName);
+
+ if (!StubInfo) {
+ std::string ErrMsg;
+ {
+ raw_string_ostream ErrMsgStream(ErrMsg);
+ logAllUnhandledErrors(StubInfo.takeError(), ErrMsgStream,
+ "RTDyldChecker: ");
+ }
+ return std::make_pair((uint64_t)0, std::move(ErrMsg));
+ }
+
+ uint64_t Addr = 0;
+
+ if (IsInsideLoad) {
+ if (StubInfo->isZeroFill())
+ return std::make_pair((uint64_t)0, "Detected zero-filled stub/GOT entry");
+ Addr = pointerToJITTargetAddress(StubInfo->getContent().data());
+ } else
+ Addr = StubInfo->getTargetAddress();
+
+ return std::make_pair(Addr, "");
+}
+
+RuntimeDyldChecker::RuntimeDyldChecker(
+ IsSymbolValidFunction IsSymbolValid, GetSymbolInfoFunction GetSymbolInfo,
+ GetSectionInfoFunction GetSectionInfo, GetStubInfoFunction GetStubInfo,
+ GetGOTInfoFunction GetGOTInfo, support::endianness Endianness,
+ MCDisassembler *Disassembler, MCInstPrinter *InstPrinter,
+ raw_ostream &ErrStream)
+ : Impl(::std::make_unique<RuntimeDyldCheckerImpl>(
+ std::move(IsSymbolValid), std::move(GetSymbolInfo),
+ std::move(GetSectionInfo), std::move(GetStubInfo),
+ std::move(GetGOTInfo), Endianness, Disassembler, InstPrinter,
+ ErrStream)) {}
+
+RuntimeDyldChecker::~RuntimeDyldChecker() {}
+
+bool RuntimeDyldChecker::check(StringRef CheckExpr) const {
+ return Impl->check(CheckExpr);
+}
+
+bool RuntimeDyldChecker::checkAllRulesInBuffer(StringRef RulePrefix,
+ MemoryBuffer *MemBuf) const {
+ return Impl->checkAllRulesInBuffer(RulePrefix, MemBuf);
+}
+
+std::pair<uint64_t, std::string>
+RuntimeDyldChecker::getSectionAddr(StringRef FileName, StringRef SectionName,
+ bool LocalAddress) {
+ return Impl->getSectionAddr(FileName, SectionName, LocalAddress);
+}
diff --git a/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldCheckerImpl.h b/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldCheckerImpl.h
new file mode 100644
index 0000000000000..ac9d4d4602174
--- /dev/null
+++ b/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldCheckerImpl.h
@@ -0,0 +1,74 @@
+//===-- RuntimeDyldCheckerImpl.h -- RuntimeDyld test framework --*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_EXECUTIONENGINE_RUNTIMEDYLD_RUNTIMEDYLDCHECKERIMPL_H
+#define LLVM_LIB_EXECUTIONENGINE_RUNTIMEDYLD_RUNTIMEDYLDCHECKERIMPL_H
+
+#include "RuntimeDyldImpl.h"
+
+namespace llvm {
+
+class RuntimeDyldCheckerImpl {
+ friend class RuntimeDyldChecker;
+ friend class RuntimeDyldCheckerExprEval;
+
+ using IsSymbolValidFunction =
+ RuntimeDyldChecker::IsSymbolValidFunction;
+ using GetSymbolInfoFunction = RuntimeDyldChecker::GetSymbolInfoFunction;
+ using GetSectionInfoFunction = RuntimeDyldChecker::GetSectionInfoFunction;
+ using GetStubInfoFunction = RuntimeDyldChecker::GetStubInfoFunction;
+ using GetGOTInfoFunction = RuntimeDyldChecker::GetGOTInfoFunction;
+
+public:
+ RuntimeDyldCheckerImpl(
+ IsSymbolValidFunction IsSymbolValid, GetSymbolInfoFunction GetSymbolInfo,
+ GetSectionInfoFunction GetSectionInfo, GetStubInfoFunction GetStubInfo,
+ GetGOTInfoFunction GetGOTInfo, support::endianness Endianness,
+ MCDisassembler *Disassembler, MCInstPrinter *InstPrinter,
+ llvm::raw_ostream &ErrStream);
+
+ bool check(StringRef CheckExpr) const;
+ bool checkAllRulesInBuffer(StringRef RulePrefix, MemoryBuffer *MemBuf) const;
+
+private:
+
+ // StubMap typedefs.
+
+ Expected<JITSymbolResolver::LookupResult>
+ lookup(const JITSymbolResolver::LookupSet &Symbols) const;
+
+ bool isSymbolValid(StringRef Symbol) const;
+ uint64_t getSymbolLocalAddr(StringRef Symbol) const;
+ uint64_t getSymbolRemoteAddr(StringRef Symbol) const;
+ uint64_t readMemoryAtAddr(uint64_t Addr, unsigned Size) const;
+
+ StringRef getSymbolContent(StringRef Symbol) const;
+
+ std::pair<uint64_t, std::string> getSectionAddr(StringRef FileName,
+ StringRef SectionName,
+ bool IsInsideLoad) const;
+
+ std::pair<uint64_t, std::string>
+ getStubOrGOTAddrFor(StringRef StubContainerName, StringRef Symbol,
+ bool IsInsideLoad, bool IsStubAddr) const;
+
+ Optional<uint64_t> getSectionLoadAddress(void *LocalAddr) const;
+
+ IsSymbolValidFunction IsSymbolValid;
+ GetSymbolInfoFunction GetSymbolInfo;
+ GetSectionInfoFunction GetSectionInfo;
+ GetStubInfoFunction GetStubInfo;
+ GetGOTInfoFunction GetGOTInfo;
+ support::endianness Endianness;
+ MCDisassembler *Disassembler;
+ MCInstPrinter *InstPrinter;
+ llvm::raw_ostream &ErrStream;
+};
+}
+
+#endif
diff --git a/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.cpp b/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.cpp
new file mode 100644
index 0000000000000..440ab4174a565
--- /dev/null
+++ b/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.cpp
@@ -0,0 +1,1960 @@
+//===-- RuntimeDyldELF.cpp - Run-time dynamic linker for MC-JIT -*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Implementation of ELF support for the MC-JIT runtime dynamic linker.
+//
+//===----------------------------------------------------------------------===//
+
+#include "RuntimeDyldELF.h"
+#include "RuntimeDyldCheckerImpl.h"
+#include "Targets/RuntimeDyldELFMips.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/Triple.h"
+#include "llvm/BinaryFormat/ELF.h"
+#include "llvm/Object/ELFObjectFile.h"
+#include "llvm/Object/ObjectFile.h"
+#include "llvm/Support/Endian.h"
+#include "llvm/Support/MemoryBuffer.h"
+
+using namespace llvm;
+using namespace llvm::object;
+using namespace llvm::support::endian;
+
+#define DEBUG_TYPE "dyld"
+
+static void or32le(void *P, int32_t V) { write32le(P, read32le(P) | V); }
+
+static void or32AArch64Imm(void *L, uint64_t Imm) {
+ or32le(L, (Imm & 0xFFF) << 10);
+}
+
+template <class T> static void write(bool isBE, void *P, T V) {
+ isBE ? write<T, support::big>(P, V) : write<T, support::little>(P, V);
+}
+
+static void write32AArch64Addr(void *L, uint64_t Imm) {
+ uint32_t ImmLo = (Imm & 0x3) << 29;
+ uint32_t ImmHi = (Imm & 0x1FFFFC) << 3;
+ uint64_t Mask = (0x3 << 29) | (0x1FFFFC << 3);
+ write32le(L, (read32le(L) & ~Mask) | ImmLo | ImmHi);
+}
+
+// Return the bits [Start, End] from Val shifted Start bits.
+// For instance, getBits(0xF0, 4, 8) returns 0xF.
+static uint64_t getBits(uint64_t Val, int Start, int End) {
+ uint64_t Mask = ((uint64_t)1 << (End + 1 - Start)) - 1;
+ return (Val >> Start) & Mask;
+}
+
+namespace {
+
+template <class ELFT> class DyldELFObject : public ELFObjectFile<ELFT> {
+ LLVM_ELF_IMPORT_TYPES_ELFT(ELFT)
+
+ typedef Elf_Shdr_Impl<ELFT> Elf_Shdr;
+ typedef Elf_Sym_Impl<ELFT> Elf_Sym;
+ typedef Elf_Rel_Impl<ELFT, false> Elf_Rel;
+ typedef Elf_Rel_Impl<ELFT, true> Elf_Rela;
+
+ typedef Elf_Ehdr_Impl<ELFT> Elf_Ehdr;
+
+ typedef typename ELFT::uint addr_type;
+
+ DyldELFObject(ELFObjectFile<ELFT> &&Obj);
+
+public:
+ static Expected<std::unique_ptr<DyldELFObject>>
+ create(MemoryBufferRef Wrapper);
+
+ void updateSectionAddress(const SectionRef &Sec, uint64_t Addr);
+
+ void updateSymbolAddress(const SymbolRef &SymRef, uint64_t Addr);
+
+ // Methods for type inquiry through isa, cast and dyn_cast
+ static bool classof(const Binary *v) {
+ return (isa<ELFObjectFile<ELFT>>(v) &&
+ classof(cast<ELFObjectFile<ELFT>>(v)));
+ }
+ static bool classof(const ELFObjectFile<ELFT> *v) {
+ return v->isDyldType();
+ }
+};
+
+
+
+// The MemoryBuffer passed into this constructor is just a wrapper around the
+// actual memory. Ultimately, the Binary parent class will take ownership of
+// this MemoryBuffer object but not the underlying memory.
+template <class ELFT>
+DyldELFObject<ELFT>::DyldELFObject(ELFObjectFile<ELFT> &&Obj)
+ : ELFObjectFile<ELFT>(std::move(Obj)) {
+ this->isDyldELFObject = true;
+}
+
+template <class ELFT>
+Expected<std::unique_ptr<DyldELFObject<ELFT>>>
+DyldELFObject<ELFT>::create(MemoryBufferRef Wrapper) {
+ auto Obj = ELFObjectFile<ELFT>::create(Wrapper);
+ if (auto E = Obj.takeError())
+ return std::move(E);
+ std::unique_ptr<DyldELFObject<ELFT>> Ret(
+ new DyldELFObject<ELFT>(std::move(*Obj)));
+ return std::move(Ret);
+}
+
+template <class ELFT>
+void DyldELFObject<ELFT>::updateSectionAddress(const SectionRef &Sec,
+ uint64_t Addr) {
+ DataRefImpl ShdrRef = Sec.getRawDataRefImpl();
+ Elf_Shdr *shdr =
+ const_cast<Elf_Shdr *>(reinterpret_cast<const Elf_Shdr *>(ShdrRef.p));
+
+ // This assumes the address passed in matches the target address bitness
+ // The template-based type cast handles everything else.
+ shdr->sh_addr = static_cast<addr_type>(Addr);
+}
+
+template <class ELFT>
+void DyldELFObject<ELFT>::updateSymbolAddress(const SymbolRef &SymRef,
+ uint64_t Addr) {
+
+ Elf_Sym *sym = const_cast<Elf_Sym *>(
+ ELFObjectFile<ELFT>::getSymbol(SymRef.getRawDataRefImpl()));
+
+ // This assumes the address passed in matches the target address bitness
+ // The template-based type cast handles everything else.
+ sym->st_value = static_cast<addr_type>(Addr);
+}
+
+class LoadedELFObjectInfo final
+ : public LoadedObjectInfoHelper<LoadedELFObjectInfo,
+ RuntimeDyld::LoadedObjectInfo> {
+public:
+ LoadedELFObjectInfo(RuntimeDyldImpl &RTDyld, ObjSectionToIDMap ObjSecToIDMap)
+ : LoadedObjectInfoHelper(RTDyld, std::move(ObjSecToIDMap)) {}
+
+ OwningBinary<ObjectFile>
+ getObjectForDebug(const ObjectFile &Obj) const override;
+};
+
+template <typename ELFT>
+static Expected<std::unique_ptr<DyldELFObject<ELFT>>>
+createRTDyldELFObject(MemoryBufferRef Buffer, const ObjectFile &SourceObject,
+ const LoadedELFObjectInfo &L) {
+ typedef typename ELFT::Shdr Elf_Shdr;
+ typedef typename ELFT::uint addr_type;
+
+ Expected<std::unique_ptr<DyldELFObject<ELFT>>> ObjOrErr =
+ DyldELFObject<ELFT>::create(Buffer);
+ if (Error E = ObjOrErr.takeError())
+ return std::move(E);
+
+ std::unique_ptr<DyldELFObject<ELFT>> Obj = std::move(*ObjOrErr);
+
+ // Iterate over all sections in the object.
+ auto SI = SourceObject.section_begin();
+ for (const auto &Sec : Obj->sections()) {
+ Expected<StringRef> NameOrErr = Sec.getName();
+ if (!NameOrErr) {
+ consumeError(NameOrErr.takeError());
+ continue;
+ }
+
+ if (*NameOrErr != "") {
+ DataRefImpl ShdrRef = Sec.getRawDataRefImpl();
+ Elf_Shdr *shdr = const_cast<Elf_Shdr *>(
+ reinterpret_cast<const Elf_Shdr *>(ShdrRef.p));
+
+ if (uint64_t SecLoadAddr = L.getSectionLoadAddress(*SI)) {
+ // This assumes that the address passed in matches the target address
+ // bitness. The template-based type cast handles everything else.
+ shdr->sh_addr = static_cast<addr_type>(SecLoadAddr);
+ }
+ }
+ ++SI;
+ }
+
+ return std::move(Obj);
+}
+
+static OwningBinary<ObjectFile>
+createELFDebugObject(const ObjectFile &Obj, const LoadedELFObjectInfo &L) {
+ assert(Obj.isELF() && "Not an ELF object file.");
+
+ std::unique_ptr<MemoryBuffer> Buffer =
+ MemoryBuffer::getMemBufferCopy(Obj.getData(), Obj.getFileName());
+
+ Expected<std::unique_ptr<ObjectFile>> DebugObj(nullptr);
+ handleAllErrors(DebugObj.takeError());
+ if (Obj.getBytesInAddress() == 4 && Obj.isLittleEndian())
+ DebugObj =
+ createRTDyldELFObject<ELF32LE>(Buffer->getMemBufferRef(), Obj, L);
+ else if (Obj.getBytesInAddress() == 4 && !Obj.isLittleEndian())
+ DebugObj =
+ createRTDyldELFObject<ELF32BE>(Buffer->getMemBufferRef(), Obj, L);
+ else if (Obj.getBytesInAddress() == 8 && !Obj.isLittleEndian())
+ DebugObj =
+ createRTDyldELFObject<ELF64BE>(Buffer->getMemBufferRef(), Obj, L);
+ else if (Obj.getBytesInAddress() == 8 && Obj.isLittleEndian())
+ DebugObj =
+ createRTDyldELFObject<ELF64LE>(Buffer->getMemBufferRef(), Obj, L);
+ else
+ llvm_unreachable("Unexpected ELF format");
+
+ handleAllErrors(DebugObj.takeError());
+ return OwningBinary<ObjectFile>(std::move(*DebugObj), std::move(Buffer));
+}
+
+OwningBinary<ObjectFile>
+LoadedELFObjectInfo::getObjectForDebug(const ObjectFile &Obj) const {
+ return createELFDebugObject(Obj, *this);
+}
+
+} // anonymous namespace
+
+namespace llvm {
+
+RuntimeDyldELF::RuntimeDyldELF(RuntimeDyld::MemoryManager &MemMgr,
+ JITSymbolResolver &Resolver)
+ : RuntimeDyldImpl(MemMgr, Resolver), GOTSectionID(0), CurrentGOTIndex(0) {}
+RuntimeDyldELF::~RuntimeDyldELF() {}
+
+void RuntimeDyldELF::registerEHFrames() {
+ for (int i = 0, e = UnregisteredEHFrameSections.size(); i != e; ++i) {
+ SID EHFrameSID = UnregisteredEHFrameSections[i];
+ uint8_t *EHFrameAddr = Sections[EHFrameSID].getAddress();
+ uint64_t EHFrameLoadAddr = Sections[EHFrameSID].getLoadAddress();
+ size_t EHFrameSize = Sections[EHFrameSID].getSize();
+ MemMgr.registerEHFrames(EHFrameAddr, EHFrameLoadAddr, EHFrameSize);
+ }
+ UnregisteredEHFrameSections.clear();
+}
+
+std::unique_ptr<RuntimeDyldELF>
+llvm::RuntimeDyldELF::create(Triple::ArchType Arch,
+ RuntimeDyld::MemoryManager &MemMgr,
+ JITSymbolResolver &Resolver) {
+ switch (Arch) {
+ default:
+ return std::make_unique<RuntimeDyldELF>(MemMgr, Resolver);
+ case Triple::mips:
+ case Triple::mipsel:
+ case Triple::mips64:
+ case Triple::mips64el:
+ return std::make_unique<RuntimeDyldELFMips>(MemMgr, Resolver);
+ }
+}
+
+std::unique_ptr<RuntimeDyld::LoadedObjectInfo>
+RuntimeDyldELF::loadObject(const object::ObjectFile &O) {
+ if (auto ObjSectionToIDOrErr = loadObjectImpl(O))
+ return std::make_unique<LoadedELFObjectInfo>(*this, *ObjSectionToIDOrErr);
+ else {
+ HasError = true;
+ raw_string_ostream ErrStream(ErrorStr);
+ logAllUnhandledErrors(ObjSectionToIDOrErr.takeError(), ErrStream);
+ return nullptr;
+ }
+}
+
+void RuntimeDyldELF::resolveX86_64Relocation(const SectionEntry &Section,
+ uint64_t Offset, uint64_t Value,
+ uint32_t Type, int64_t Addend,
+ uint64_t SymOffset) {
+ switch (Type) {
+ default:
+ llvm_unreachable("Relocation type not implemented yet!");
+ break;
+ case ELF::R_X86_64_NONE:
+ break;
+ case ELF::R_X86_64_64: {
+ support::ulittle64_t::ref(Section.getAddressWithOffset(Offset)) =
+ Value + Addend;
+ LLVM_DEBUG(dbgs() << "Writing " << format("%p", (Value + Addend)) << " at "
+ << format("%p\n", Section.getAddressWithOffset(Offset)));
+ break;
+ }
+ case ELF::R_X86_64_32:
+ case ELF::R_X86_64_32S: {
+ Value += Addend;
+ assert((Type == ELF::R_X86_64_32 && (Value <= UINT32_MAX)) ||
+ (Type == ELF::R_X86_64_32S &&
+ ((int64_t)Value <= INT32_MAX && (int64_t)Value >= INT32_MIN)));
+ uint32_t TruncatedAddr = (Value & 0xFFFFFFFF);
+ support::ulittle32_t::ref(Section.getAddressWithOffset(Offset)) =
+ TruncatedAddr;
+ LLVM_DEBUG(dbgs() << "Writing " << format("%p", TruncatedAddr) << " at "
+ << format("%p\n", Section.getAddressWithOffset(Offset)));
+ break;
+ }
+ case ELF::R_X86_64_PC8: {
+ uint64_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
+ int64_t RealOffset = Value + Addend - FinalAddress;
+ assert(isInt<8>(RealOffset));
+ int8_t TruncOffset = (RealOffset & 0xFF);
+ Section.getAddress()[Offset] = TruncOffset;
+ break;
+ }
+ case ELF::R_X86_64_PC32: {
+ uint64_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
+ int64_t RealOffset = Value + Addend - FinalAddress;
+ assert(isInt<32>(RealOffset));
+ int32_t TruncOffset = (RealOffset & 0xFFFFFFFF);
+ support::ulittle32_t::ref(Section.getAddressWithOffset(Offset)) =
+ TruncOffset;
+ break;
+ }
+ case ELF::R_X86_64_PC64: {
+ uint64_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
+ int64_t RealOffset = Value + Addend - FinalAddress;
+ support::ulittle64_t::ref(Section.getAddressWithOffset(Offset)) =
+ RealOffset;
+ LLVM_DEBUG(dbgs() << "Writing " << format("%p", RealOffset) << " at "
+ << format("%p\n", FinalAddress));
+ break;
+ }
+ case ELF::R_X86_64_GOTOFF64: {
+ // Compute Value - GOTBase.
+ uint64_t GOTBase = 0;
+ for (const auto &Section : Sections) {
+ if (Section.getName() == ".got") {
+ GOTBase = Section.getLoadAddressWithOffset(0);
+ break;
+ }
+ }
+ assert(GOTBase != 0 && "missing GOT");
+ int64_t GOTOffset = Value - GOTBase + Addend;
+ support::ulittle64_t::ref(Section.getAddressWithOffset(Offset)) = GOTOffset;
+ break;
+ }
+ }
+}
+
+void RuntimeDyldELF::resolveX86Relocation(const SectionEntry &Section,
+ uint64_t Offset, uint32_t Value,
+ uint32_t Type, int32_t Addend) {
+ switch (Type) {
+ case ELF::R_386_32: {
+ support::ulittle32_t::ref(Section.getAddressWithOffset(Offset)) =
+ Value + Addend;
+ break;
+ }
+ // Handle R_386_PLT32 like R_386_PC32 since it should be able to
+ // reach any 32 bit address.
+ case ELF::R_386_PLT32:
+ case ELF::R_386_PC32: {
+ uint32_t FinalAddress =
+ Section.getLoadAddressWithOffset(Offset) & 0xFFFFFFFF;
+ uint32_t RealOffset = Value + Addend - FinalAddress;
+ support::ulittle32_t::ref(Section.getAddressWithOffset(Offset)) =
+ RealOffset;
+ break;
+ }
+ default:
+ // There are other relocation types, but it appears these are the
+ // only ones currently used by the LLVM ELF object writer
+ llvm_unreachable("Relocation type not implemented yet!");
+ break;
+ }
+}
+
+void RuntimeDyldELF::resolveAArch64Relocation(const SectionEntry &Section,
+ uint64_t Offset, uint64_t Value,
+ uint32_t Type, int64_t Addend) {
+ uint32_t *TargetPtr =
+ reinterpret_cast<uint32_t *>(Section.getAddressWithOffset(Offset));
+ uint64_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
+ // Data should use target endian. Code should always use little endian.
+ bool isBE = Arch == Triple::aarch64_be;
+
+ LLVM_DEBUG(dbgs() << "resolveAArch64Relocation, LocalAddress: 0x"
+ << format("%llx", Section.getAddressWithOffset(Offset))
+ << " FinalAddress: 0x" << format("%llx", FinalAddress)
+ << " Value: 0x" << format("%llx", Value) << " Type: 0x"
+ << format("%x", Type) << " Addend: 0x"
+ << format("%llx", Addend) << "\n");
+
+ switch (Type) {
+ default:
+ llvm_unreachable("Relocation type not implemented yet!");
+ break;
+ case ELF::R_AARCH64_ABS16: {
+ uint64_t Result = Value + Addend;
+ assert(static_cast<int64_t>(Result) >= INT16_MIN && Result < UINT16_MAX);
+ write(isBE, TargetPtr, static_cast<uint16_t>(Result & 0xffffU));
+ break;
+ }
+ case ELF::R_AARCH64_ABS32: {
+ uint64_t Result = Value + Addend;
+ assert(static_cast<int64_t>(Result) >= INT32_MIN && Result < UINT32_MAX);
+ write(isBE, TargetPtr, static_cast<uint32_t>(Result & 0xffffffffU));
+ break;
+ }
+ case ELF::R_AARCH64_ABS64:
+ write(isBE, TargetPtr, Value + Addend);
+ break;
+ case ELF::R_AARCH64_PREL32: {
+ uint64_t Result = Value + Addend - FinalAddress;
+ assert(static_cast<int64_t>(Result) >= INT32_MIN &&
+ static_cast<int64_t>(Result) <= UINT32_MAX);
+ write(isBE, TargetPtr, static_cast<uint32_t>(Result & 0xffffffffU));
+ break;
+ }
+ case ELF::R_AARCH64_PREL64:
+ write(isBE, TargetPtr, Value + Addend - FinalAddress);
+ break;
+ case ELF::R_AARCH64_CALL26: // fallthrough
+ case ELF::R_AARCH64_JUMP26: {
+ // Operation: S+A-P. Set Call or B immediate value to bits fff_fffc of the
+ // calculation.
+ uint64_t BranchImm = Value + Addend - FinalAddress;
+
+ // "Check that -2^27 <= result < 2^27".
+ assert(isInt<28>(BranchImm));
+ or32le(TargetPtr, (BranchImm & 0x0FFFFFFC) >> 2);
+ break;
+ }
+ case ELF::R_AARCH64_MOVW_UABS_G3:
+ or32le(TargetPtr, ((Value + Addend) & 0xFFFF000000000000) >> 43);
+ break;
+ case ELF::R_AARCH64_MOVW_UABS_G2_NC:
+ or32le(TargetPtr, ((Value + Addend) & 0xFFFF00000000) >> 27);
+ break;
+ case ELF::R_AARCH64_MOVW_UABS_G1_NC:
+ or32le(TargetPtr, ((Value + Addend) & 0xFFFF0000) >> 11);
+ break;
+ case ELF::R_AARCH64_MOVW_UABS_G0_NC:
+ or32le(TargetPtr, ((Value + Addend) & 0xFFFF) << 5);
+ break;
+ case ELF::R_AARCH64_ADR_PREL_PG_HI21: {
+ // Operation: Page(S+A) - Page(P)
+ uint64_t Result =
+ ((Value + Addend) & ~0xfffULL) - (FinalAddress & ~0xfffULL);
+
+ // Check that -2^32 <= X < 2^32
+ assert(isInt<33>(Result) && "overflow check failed for relocation");
+
+ // Immediate goes in bits 30:29 + 5:23 of ADRP instruction, taken
+ // from bits 32:12 of X.
+ write32AArch64Addr(TargetPtr, Result >> 12);
+ break;
+ }
+ case ELF::R_AARCH64_ADD_ABS_LO12_NC:
+ // Operation: S + A
+ // Immediate goes in bits 21:10 of LD/ST instruction, taken
+ // from bits 11:0 of X
+ or32AArch64Imm(TargetPtr, Value + Addend);
+ break;
+ case ELF::R_AARCH64_LDST8_ABS_LO12_NC:
+ // Operation: S + A
+ // Immediate goes in bits 21:10 of LD/ST instruction, taken
+ // from bits 11:0 of X
+ or32AArch64Imm(TargetPtr, getBits(Value + Addend, 0, 11));
+ break;
+ case ELF::R_AARCH64_LDST16_ABS_LO12_NC:
+ // Operation: S + A
+ // Immediate goes in bits 21:10 of LD/ST instruction, taken
+ // from bits 11:1 of X
+ or32AArch64Imm(TargetPtr, getBits(Value + Addend, 1, 11));
+ break;
+ case ELF::R_AARCH64_LDST32_ABS_LO12_NC:
+ // Operation: S + A
+ // Immediate goes in bits 21:10 of LD/ST instruction, taken
+ // from bits 11:2 of X
+ or32AArch64Imm(TargetPtr, getBits(Value + Addend, 2, 11));
+ break;
+ case ELF::R_AARCH64_LDST64_ABS_LO12_NC:
+ // Operation: S + A
+ // Immediate goes in bits 21:10 of LD/ST instruction, taken
+ // from bits 11:3 of X
+ or32AArch64Imm(TargetPtr, getBits(Value + Addend, 3, 11));
+ break;
+ case ELF::R_AARCH64_LDST128_ABS_LO12_NC:
+ // Operation: S + A
+ // Immediate goes in bits 21:10 of LD/ST instruction, taken
+ // from bits 11:4 of X
+ or32AArch64Imm(TargetPtr, getBits(Value + Addend, 4, 11));
+ break;
+ }
+}
+
+void RuntimeDyldELF::resolveARMRelocation(const SectionEntry &Section,
+ uint64_t Offset, uint32_t Value,
+ uint32_t Type, int32_t Addend) {
+ // TODO: Add Thumb relocations.
+ uint32_t *TargetPtr =
+ reinterpret_cast<uint32_t *>(Section.getAddressWithOffset(Offset));
+ uint32_t FinalAddress = Section.getLoadAddressWithOffset(Offset) & 0xFFFFFFFF;
+ Value += Addend;
+
+ LLVM_DEBUG(dbgs() << "resolveARMRelocation, LocalAddress: "
+ << Section.getAddressWithOffset(Offset)
+ << " FinalAddress: " << format("%p", FinalAddress)
+ << " Value: " << format("%x", Value)
+ << " Type: " << format("%x", Type)
+ << " Addend: " << format("%x", Addend) << "\n");
+
+ switch (Type) {
+ default:
+ llvm_unreachable("Not implemented relocation type!");
+
+ case ELF::R_ARM_NONE:
+ break;
+ // Write a 31bit signed offset
+ case ELF::R_ARM_PREL31:
+ support::ulittle32_t::ref{TargetPtr} =
+ (support::ulittle32_t::ref{TargetPtr} & 0x80000000) |
+ ((Value - FinalAddress) & ~0x80000000);
+ break;
+ case ELF::R_ARM_TARGET1:
+ case ELF::R_ARM_ABS32:
+ support::ulittle32_t::ref{TargetPtr} = Value;
+ break;
+ // Write first 16 bit of 32 bit value to the mov instruction.
+ // Last 4 bit should be shifted.
+ case ELF::R_ARM_MOVW_ABS_NC:
+ case ELF::R_ARM_MOVT_ABS:
+ if (Type == ELF::R_ARM_MOVW_ABS_NC)
+ Value = Value & 0xFFFF;
+ else if (Type == ELF::R_ARM_MOVT_ABS)
+ Value = (Value >> 16) & 0xFFFF;
+ support::ulittle32_t::ref{TargetPtr} =
+ (support::ulittle32_t::ref{TargetPtr} & ~0x000F0FFF) | (Value & 0xFFF) |
+ (((Value >> 12) & 0xF) << 16);
+ break;
+ // Write 24 bit relative value to the branch instruction.
+ case ELF::R_ARM_PC24: // Fall through.
+ case ELF::R_ARM_CALL: // Fall through.
+ case ELF::R_ARM_JUMP24:
+ int32_t RelValue = static_cast<int32_t>(Value - FinalAddress - 8);
+ RelValue = (RelValue & 0x03FFFFFC) >> 2;
+ assert((support::ulittle32_t::ref{TargetPtr} & 0xFFFFFF) == 0xFFFFFE);
+ support::ulittle32_t::ref{TargetPtr} =
+ (support::ulittle32_t::ref{TargetPtr} & 0xFF000000) | RelValue;
+ break;
+ }
+}
+
+void RuntimeDyldELF::setMipsABI(const ObjectFile &Obj) {
+ if (Arch == Triple::UnknownArch ||
+ !StringRef(Triple::getArchTypePrefix(Arch)).equals("mips")) {
+ IsMipsO32ABI = false;
+ IsMipsN32ABI = false;
+ IsMipsN64ABI = false;
+ return;
+ }
+ if (auto *E = dyn_cast<ELFObjectFileBase>(&Obj)) {
+ unsigned AbiVariant = E->getPlatformFlags();
+ IsMipsO32ABI = AbiVariant & ELF::EF_MIPS_ABI_O32;
+ IsMipsN32ABI = AbiVariant & ELF::EF_MIPS_ABI2;
+ }
+ IsMipsN64ABI = Obj.getFileFormatName().equals("ELF64-mips");
+}
+
+// Return the .TOC. section and offset.
+Error RuntimeDyldELF::findPPC64TOCSection(const ELFObjectFileBase &Obj,
+ ObjSectionToIDMap &LocalSections,
+ RelocationValueRef &Rel) {
+ // Set a default SectionID in case we do not find a TOC section below.
+ // This may happen for references to TOC base base (sym@toc, .odp
+ // relocation) without a .toc directive. In this case just use the
+ // first section (which is usually the .odp) since the code won't
+ // reference the .toc base directly.
+ Rel.SymbolName = nullptr;
+ Rel.SectionID = 0;
+
+ // The TOC consists of sections .got, .toc, .tocbss, .plt in that
+ // order. The TOC starts where the first of these sections starts.
+ for (auto &Section : Obj.sections()) {
+ Expected<StringRef> NameOrErr = Section.getName();
+ if (!NameOrErr)
+ return NameOrErr.takeError();
+ StringRef SectionName = *NameOrErr;
+
+ if (SectionName == ".got"
+ || SectionName == ".toc"
+ || SectionName == ".tocbss"
+ || SectionName == ".plt") {
+ if (auto SectionIDOrErr =
+ findOrEmitSection(Obj, Section, false, LocalSections))
+ Rel.SectionID = *SectionIDOrErr;
+ else
+ return SectionIDOrErr.takeError();
+ break;
+ }
+ }
+
+ // Per the ppc64-elf-linux ABI, The TOC base is TOC value plus 0x8000
+ // thus permitting a full 64 Kbytes segment.
+ Rel.Addend = 0x8000;
+
+ return Error::success();
+}
+
+// Returns the sections and offset associated with the ODP entry referenced
+// by Symbol.
+Error RuntimeDyldELF::findOPDEntrySection(const ELFObjectFileBase &Obj,
+ ObjSectionToIDMap &LocalSections,
+ RelocationValueRef &Rel) {
+ // Get the ELF symbol value (st_value) to compare with Relocation offset in
+ // .opd entries
+ for (section_iterator si = Obj.section_begin(), se = Obj.section_end();
+ si != se; ++si) {
+
+ Expected<section_iterator> RelSecOrErr = si->getRelocatedSection();
+ if (!RelSecOrErr)
+ report_fatal_error(toString(RelSecOrErr.takeError()));
+
+ section_iterator RelSecI = *RelSecOrErr;
+ if (RelSecI == Obj.section_end())
+ continue;
+
+ Expected<StringRef> NameOrErr = RelSecI->getName();
+ if (!NameOrErr)
+ return NameOrErr.takeError();
+ StringRef RelSectionName = *NameOrErr;
+
+ if (RelSectionName != ".opd")
+ continue;
+
+ for (elf_relocation_iterator i = si->relocation_begin(),
+ e = si->relocation_end();
+ i != e;) {
+ // The R_PPC64_ADDR64 relocation indicates the first field
+ // of a .opd entry
+ uint64_t TypeFunc = i->getType();
+ if (TypeFunc != ELF::R_PPC64_ADDR64) {
+ ++i;
+ continue;
+ }
+
+ uint64_t TargetSymbolOffset = i->getOffset();
+ symbol_iterator TargetSymbol = i->getSymbol();
+ int64_t Addend;
+ if (auto AddendOrErr = i->getAddend())
+ Addend = *AddendOrErr;
+ else
+ return AddendOrErr.takeError();
+
+ ++i;
+ if (i == e)
+ break;
+
+ // Just check if following relocation is a R_PPC64_TOC
+ uint64_t TypeTOC = i->getType();
+ if (TypeTOC != ELF::R_PPC64_TOC)
+ continue;
+
+ // Finally compares the Symbol value and the target symbol offset
+ // to check if this .opd entry refers to the symbol the relocation
+ // points to.
+ if (Rel.Addend != (int64_t)TargetSymbolOffset)
+ continue;
+
+ section_iterator TSI = Obj.section_end();
+ if (auto TSIOrErr = TargetSymbol->getSection())
+ TSI = *TSIOrErr;
+ else
+ return TSIOrErr.takeError();
+ assert(TSI != Obj.section_end() && "TSI should refer to a valid section");
+
+ bool IsCode = TSI->isText();
+ if (auto SectionIDOrErr = findOrEmitSection(Obj, *TSI, IsCode,
+ LocalSections))
+ Rel.SectionID = *SectionIDOrErr;
+ else
+ return SectionIDOrErr.takeError();
+ Rel.Addend = (intptr_t)Addend;
+ return Error::success();
+ }
+ }
+ llvm_unreachable("Attempting to get address of ODP entry!");
+}
+
+// Relocation masks following the #lo(value), #hi(value), #ha(value),
+// #higher(value), #highera(value), #highest(value), and #highesta(value)
+// macros defined in section 4.5.1. Relocation Types of the PPC-elf64abi
+// document.
+
+static inline uint16_t applyPPClo(uint64_t value) { return value & 0xffff; }
+
+static inline uint16_t applyPPChi(uint64_t value) {
+ return (value >> 16) & 0xffff;
+}
+
+static inline uint16_t applyPPCha (uint64_t value) {
+ return ((value + 0x8000) >> 16) & 0xffff;
+}
+
+static inline uint16_t applyPPChigher(uint64_t value) {
+ return (value >> 32) & 0xffff;
+}
+
+static inline uint16_t applyPPChighera (uint64_t value) {
+ return ((value + 0x8000) >> 32) & 0xffff;
+}
+
+static inline uint16_t applyPPChighest(uint64_t value) {
+ return (value >> 48) & 0xffff;
+}
+
+static inline uint16_t applyPPChighesta (uint64_t value) {
+ return ((value + 0x8000) >> 48) & 0xffff;
+}
+
+void RuntimeDyldELF::resolvePPC32Relocation(const SectionEntry &Section,
+ uint64_t Offset, uint64_t Value,
+ uint32_t Type, int64_t Addend) {
+ uint8_t *LocalAddress = Section.getAddressWithOffset(Offset);
+ switch (Type) {
+ default:
+ llvm_unreachable("Relocation type not implemented yet!");
+ break;
+ case ELF::R_PPC_ADDR16_LO:
+ writeInt16BE(LocalAddress, applyPPClo(Value + Addend));
+ break;
+ case ELF::R_PPC_ADDR16_HI:
+ writeInt16BE(LocalAddress, applyPPChi(Value + Addend));
+ break;
+ case ELF::R_PPC_ADDR16_HA:
+ writeInt16BE(LocalAddress, applyPPCha(Value + Addend));
+ break;
+ }
+}
+
+void RuntimeDyldELF::resolvePPC64Relocation(const SectionEntry &Section,
+ uint64_t Offset, uint64_t Value,
+ uint32_t Type, int64_t Addend) {
+ uint8_t *LocalAddress = Section.getAddressWithOffset(Offset);
+ switch (Type) {
+ default:
+ llvm_unreachable("Relocation type not implemented yet!");
+ break;
+ case ELF::R_PPC64_ADDR16:
+ writeInt16BE(LocalAddress, applyPPClo(Value + Addend));
+ break;
+ case ELF::R_PPC64_ADDR16_DS:
+ writeInt16BE(LocalAddress, applyPPClo(Value + Addend) & ~3);
+ break;
+ case ELF::R_PPC64_ADDR16_LO:
+ writeInt16BE(LocalAddress, applyPPClo(Value + Addend));
+ break;
+ case ELF::R_PPC64_ADDR16_LO_DS:
+ writeInt16BE(LocalAddress, applyPPClo(Value + Addend) & ~3);
+ break;
+ case ELF::R_PPC64_ADDR16_HI:
+ case ELF::R_PPC64_ADDR16_HIGH:
+ writeInt16BE(LocalAddress, applyPPChi(Value + Addend));
+ break;
+ case ELF::R_PPC64_ADDR16_HA:
+ case ELF::R_PPC64_ADDR16_HIGHA:
+ writeInt16BE(LocalAddress, applyPPCha(Value + Addend));
+ break;
+ case ELF::R_PPC64_ADDR16_HIGHER:
+ writeInt16BE(LocalAddress, applyPPChigher(Value + Addend));
+ break;
+ case ELF::R_PPC64_ADDR16_HIGHERA:
+ writeInt16BE(LocalAddress, applyPPChighera(Value + Addend));
+ break;
+ case ELF::R_PPC64_ADDR16_HIGHEST:
+ writeInt16BE(LocalAddress, applyPPChighest(Value + Addend));
+ break;
+ case ELF::R_PPC64_ADDR16_HIGHESTA:
+ writeInt16BE(LocalAddress, applyPPChighesta(Value + Addend));
+ break;
+ case ELF::R_PPC64_ADDR14: {
+ assert(((Value + Addend) & 3) == 0);
+ // Preserve the AA/LK bits in the branch instruction
+ uint8_t aalk = *(LocalAddress + 3);
+ writeInt16BE(LocalAddress + 2, (aalk & 3) | ((Value + Addend) & 0xfffc));
+ } break;
+ case ELF::R_PPC64_REL16_LO: {
+ uint64_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
+ uint64_t Delta = Value - FinalAddress + Addend;
+ writeInt16BE(LocalAddress, applyPPClo(Delta));
+ } break;
+ case ELF::R_PPC64_REL16_HI: {
+ uint64_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
+ uint64_t Delta = Value - FinalAddress + Addend;
+ writeInt16BE(LocalAddress, applyPPChi(Delta));
+ } break;
+ case ELF::R_PPC64_REL16_HA: {
+ uint64_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
+ uint64_t Delta = Value - FinalAddress + Addend;
+ writeInt16BE(LocalAddress, applyPPCha(Delta));
+ } break;
+ case ELF::R_PPC64_ADDR32: {
+ int64_t Result = static_cast<int64_t>(Value + Addend);
+ if (SignExtend64<32>(Result) != Result)
+ llvm_unreachable("Relocation R_PPC64_ADDR32 overflow");
+ writeInt32BE(LocalAddress, Result);
+ } break;
+ case ELF::R_PPC64_REL24: {
+ uint64_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
+ int64_t delta = static_cast<int64_t>(Value - FinalAddress + Addend);
+ if (SignExtend64<26>(delta) != delta)
+ llvm_unreachable("Relocation R_PPC64_REL24 overflow");
+ // We preserve bits other than LI field, i.e. PO and AA/LK fields.
+ uint32_t Inst = readBytesUnaligned(LocalAddress, 4);
+ writeInt32BE(LocalAddress, (Inst & 0xFC000003) | (delta & 0x03FFFFFC));
+ } break;
+ case ELF::R_PPC64_REL32: {
+ uint64_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
+ int64_t delta = static_cast<int64_t>(Value - FinalAddress + Addend);
+ if (SignExtend64<32>(delta) != delta)
+ llvm_unreachable("Relocation R_PPC64_REL32 overflow");
+ writeInt32BE(LocalAddress, delta);
+ } break;
+ case ELF::R_PPC64_REL64: {
+ uint64_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
+ uint64_t Delta = Value - FinalAddress + Addend;
+ writeInt64BE(LocalAddress, Delta);
+ } break;
+ case ELF::R_PPC64_ADDR64:
+ writeInt64BE(LocalAddress, Value + Addend);
+ break;
+ }
+}
+
+void RuntimeDyldELF::resolveSystemZRelocation(const SectionEntry &Section,
+ uint64_t Offset, uint64_t Value,
+ uint32_t Type, int64_t Addend) {
+ uint8_t *LocalAddress = Section.getAddressWithOffset(Offset);
+ switch (Type) {
+ default:
+ llvm_unreachable("Relocation type not implemented yet!");
+ break;
+ case ELF::R_390_PC16DBL:
+ case ELF::R_390_PLT16DBL: {
+ int64_t Delta = (Value + Addend) - Section.getLoadAddressWithOffset(Offset);
+ assert(int16_t(Delta / 2) * 2 == Delta && "R_390_PC16DBL overflow");
+ writeInt16BE(LocalAddress, Delta / 2);
+ break;
+ }
+ case ELF::R_390_PC32DBL:
+ case ELF::R_390_PLT32DBL: {
+ int64_t Delta = (Value + Addend) - Section.getLoadAddressWithOffset(Offset);
+ assert(int32_t(Delta / 2) * 2 == Delta && "R_390_PC32DBL overflow");
+ writeInt32BE(LocalAddress, Delta / 2);
+ break;
+ }
+ case ELF::R_390_PC16: {
+ int64_t Delta = (Value + Addend) - Section.getLoadAddressWithOffset(Offset);
+ assert(int16_t(Delta) == Delta && "R_390_PC16 overflow");
+ writeInt16BE(LocalAddress, Delta);
+ break;
+ }
+ case ELF::R_390_PC32: {
+ int64_t Delta = (Value + Addend) - Section.getLoadAddressWithOffset(Offset);
+ assert(int32_t(Delta) == Delta && "R_390_PC32 overflow");
+ writeInt32BE(LocalAddress, Delta);
+ break;
+ }
+ case ELF::R_390_PC64: {
+ int64_t Delta = (Value + Addend) - Section.getLoadAddressWithOffset(Offset);
+ writeInt64BE(LocalAddress, Delta);
+ break;
+ }
+ case ELF::R_390_8:
+ *LocalAddress = (uint8_t)(Value + Addend);
+ break;
+ case ELF::R_390_16:
+ writeInt16BE(LocalAddress, Value + Addend);
+ break;
+ case ELF::R_390_32:
+ writeInt32BE(LocalAddress, Value + Addend);
+ break;
+ case ELF::R_390_64:
+ writeInt64BE(LocalAddress, Value + Addend);
+ break;
+ }
+}
+
+void RuntimeDyldELF::resolveBPFRelocation(const SectionEntry &Section,
+ uint64_t Offset, uint64_t Value,
+ uint32_t Type, int64_t Addend) {
+ bool isBE = Arch == Triple::bpfeb;
+
+ switch (Type) {
+ default:
+ llvm_unreachable("Relocation type not implemented yet!");
+ break;
+ case ELF::R_BPF_NONE:
+ break;
+ case ELF::R_BPF_64_64: {
+ write(isBE, Section.getAddressWithOffset(Offset), Value + Addend);
+ LLVM_DEBUG(dbgs() << "Writing " << format("%p", (Value + Addend)) << " at "
+ << format("%p\n", Section.getAddressWithOffset(Offset)));
+ break;
+ }
+ case ELF::R_BPF_64_32: {
+ Value += Addend;
+ assert(Value <= UINT32_MAX);
+ write(isBE, Section.getAddressWithOffset(Offset), static_cast<uint32_t>(Value));
+ LLVM_DEBUG(dbgs() << "Writing " << format("%p", Value) << " at "
+ << format("%p\n", Section.getAddressWithOffset(Offset)));
+ break;
+ }
+ }
+}
+
+// The target location for the relocation is described by RE.SectionID and
+// RE.Offset. RE.SectionID can be used to find the SectionEntry. Each
+// SectionEntry has three members describing its location.
+// SectionEntry::Address is the address at which the section has been loaded
+// into memory in the current (host) process. SectionEntry::LoadAddress is the
+// address that the section will have in the target process.
+// SectionEntry::ObjAddress is the address of the bits for this section in the
+// original emitted object image (also in the current address space).
+//
+// Relocations will be applied as if the section were loaded at
+// SectionEntry::LoadAddress, but they will be applied at an address based
+// on SectionEntry::Address. SectionEntry::ObjAddress will be used to refer to
+// Target memory contents if they are required for value calculations.
+//
+// The Value parameter here is the load address of the symbol for the
+// relocation to be applied. For relocations which refer to symbols in the
+// current object Value will be the LoadAddress of the section in which
+// the symbol resides (RE.Addend provides additional information about the
+// symbol location). For external symbols, Value will be the address of the
+// symbol in the target address space.
+void RuntimeDyldELF::resolveRelocation(const RelocationEntry &RE,
+ uint64_t Value) {
+ const SectionEntry &Section = Sections[RE.SectionID];
+ return resolveRelocation(Section, RE.Offset, Value, RE.RelType, RE.Addend,
+ RE.SymOffset, RE.SectionID);
+}
+
+void RuntimeDyldELF::resolveRelocation(const SectionEntry &Section,
+ uint64_t Offset, uint64_t Value,
+ uint32_t Type, int64_t Addend,
+ uint64_t SymOffset, SID SectionID) {
+ switch (Arch) {
+ case Triple::x86_64:
+ resolveX86_64Relocation(Section, Offset, Value, Type, Addend, SymOffset);
+ break;
+ case Triple::x86:
+ resolveX86Relocation(Section, Offset, (uint32_t)(Value & 0xffffffffL), Type,
+ (uint32_t)(Addend & 0xffffffffL));
+ break;
+ case Triple::aarch64:
+ case Triple::aarch64_be:
+ resolveAArch64Relocation(Section, Offset, Value, Type, Addend);
+ break;
+ case Triple::arm: // Fall through.
+ case Triple::armeb:
+ case Triple::thumb:
+ case Triple::thumbeb:
+ resolveARMRelocation(Section, Offset, (uint32_t)(Value & 0xffffffffL), Type,
+ (uint32_t)(Addend & 0xffffffffL));
+ break;
+ case Triple::ppc:
+ resolvePPC32Relocation(Section, Offset, Value, Type, Addend);
+ break;
+ case Triple::ppc64: // Fall through.
+ case Triple::ppc64le:
+ resolvePPC64Relocation(Section, Offset, Value, Type, Addend);
+ break;
+ case Triple::systemz:
+ resolveSystemZRelocation(Section, Offset, Value, Type, Addend);
+ break;
+ case Triple::bpfel:
+ case Triple::bpfeb:
+ resolveBPFRelocation(Section, Offset, Value, Type, Addend);
+ break;
+ default:
+ llvm_unreachable("Unsupported CPU type!");
+ }
+}
+
+void *RuntimeDyldELF::computePlaceholderAddress(unsigned SectionID, uint64_t Offset) const {
+ return (void *)(Sections[SectionID].getObjAddress() + Offset);
+}
+
+void RuntimeDyldELF::processSimpleRelocation(unsigned SectionID, uint64_t Offset, unsigned RelType, RelocationValueRef Value) {
+ RelocationEntry RE(SectionID, Offset, RelType, Value.Addend, Value.Offset);
+ if (Value.SymbolName)
+ addRelocationForSymbol(RE, Value.SymbolName);
+ else
+ addRelocationForSection(RE, Value.SectionID);
+}
+
+uint32_t RuntimeDyldELF::getMatchingLoRelocation(uint32_t RelType,
+ bool IsLocal) const {
+ switch (RelType) {
+ case ELF::R_MICROMIPS_GOT16:
+ if (IsLocal)
+ return ELF::R_MICROMIPS_LO16;
+ break;
+ case ELF::R_MICROMIPS_HI16:
+ return ELF::R_MICROMIPS_LO16;
+ case ELF::R_MIPS_GOT16:
+ if (IsLocal)
+ return ELF::R_MIPS_LO16;
+ break;
+ case ELF::R_MIPS_HI16:
+ return ELF::R_MIPS_LO16;
+ case ELF::R_MIPS_PCHI16:
+ return ELF::R_MIPS_PCLO16;
+ default:
+ break;
+ }
+ return ELF::R_MIPS_NONE;
+}
+
+// Sometimes we don't need to create thunk for a branch.
+// This typically happens when branch target is located
+// in the same object file. In such case target is either
+// a weak symbol or symbol in a different executable section.
+// This function checks if branch target is located in the
+// same object file and if distance between source and target
+// fits R_AARCH64_CALL26 relocation. If both conditions are
+// met, it emits direct jump to the target and returns true.
+// Otherwise false is returned and thunk is created.
+bool RuntimeDyldELF::resolveAArch64ShortBranch(
+ unsigned SectionID, relocation_iterator RelI,
+ const RelocationValueRef &Value) {
+ uint64_t Address;
+ if (Value.SymbolName) {
+ auto Loc = GlobalSymbolTable.find(Value.SymbolName);
+
+ // Don't create direct branch for external symbols.
+ if (Loc == GlobalSymbolTable.end())
+ return false;
+
+ const auto &SymInfo = Loc->second;
+ Address =
+ uint64_t(Sections[SymInfo.getSectionID()].getLoadAddressWithOffset(
+ SymInfo.getOffset()));
+ } else {
+ Address = uint64_t(Sections[Value.SectionID].getLoadAddress());
+ }
+ uint64_t Offset = RelI->getOffset();
+ uint64_t SourceAddress = Sections[SectionID].getLoadAddressWithOffset(Offset);
+
+ // R_AARCH64_CALL26 requires immediate to be in range -2^27 <= imm < 2^27
+ // If distance between source and target is out of range then we should
+ // create thunk.
+ if (!isInt<28>(Address + Value.Addend - SourceAddress))
+ return false;
+
+ resolveRelocation(Sections[SectionID], Offset, Address, RelI->getType(),
+ Value.Addend);
+
+ return true;
+}
+
+void RuntimeDyldELF::resolveAArch64Branch(unsigned SectionID,
+ const RelocationValueRef &Value,
+ relocation_iterator RelI,
+ StubMap &Stubs) {
+
+ LLVM_DEBUG(dbgs() << "\t\tThis is an AArch64 branch relocation.");
+ SectionEntry &Section = Sections[SectionID];
+
+ uint64_t Offset = RelI->getOffset();
+ unsigned RelType = RelI->getType();
+ // Look for an existing stub.
+ StubMap::const_iterator i = Stubs.find(Value);
+ if (i != Stubs.end()) {
+ resolveRelocation(Section, Offset,
+ (uint64_t)Section.getAddressWithOffset(i->second),
+ RelType, 0);
+ LLVM_DEBUG(dbgs() << " Stub function found\n");
+ } else if (!resolveAArch64ShortBranch(SectionID, RelI, Value)) {
+ // Create a new stub function.
+ LLVM_DEBUG(dbgs() << " Create a new stub function\n");
+ Stubs[Value] = Section.getStubOffset();
+ uint8_t *StubTargetAddr = createStubFunction(
+ Section.getAddressWithOffset(Section.getStubOffset()));
+
+ RelocationEntry REmovz_g3(SectionID, StubTargetAddr - Section.getAddress(),
+ ELF::R_AARCH64_MOVW_UABS_G3, Value.Addend);
+ RelocationEntry REmovk_g2(SectionID,
+ StubTargetAddr - Section.getAddress() + 4,
+ ELF::R_AARCH64_MOVW_UABS_G2_NC, Value.Addend);
+ RelocationEntry REmovk_g1(SectionID,
+ StubTargetAddr - Section.getAddress() + 8,
+ ELF::R_AARCH64_MOVW_UABS_G1_NC, Value.Addend);
+ RelocationEntry REmovk_g0(SectionID,
+ StubTargetAddr - Section.getAddress() + 12,
+ ELF::R_AARCH64_MOVW_UABS_G0_NC, Value.Addend);
+
+ if (Value.SymbolName) {
+ addRelocationForSymbol(REmovz_g3, Value.SymbolName);
+ addRelocationForSymbol(REmovk_g2, Value.SymbolName);
+ addRelocationForSymbol(REmovk_g1, Value.SymbolName);
+ addRelocationForSymbol(REmovk_g0, Value.SymbolName);
+ } else {
+ addRelocationForSection(REmovz_g3, Value.SectionID);
+ addRelocationForSection(REmovk_g2, Value.SectionID);
+ addRelocationForSection(REmovk_g1, Value.SectionID);
+ addRelocationForSection(REmovk_g0, Value.SectionID);
+ }
+ resolveRelocation(Section, Offset,
+ reinterpret_cast<uint64_t>(Section.getAddressWithOffset(
+ Section.getStubOffset())),
+ RelType, 0);
+ Section.advanceStubOffset(getMaxStubSize());
+ }
+}
+
+Expected<relocation_iterator>
+RuntimeDyldELF::processRelocationRef(
+ unsigned SectionID, relocation_iterator RelI, const ObjectFile &O,
+ ObjSectionToIDMap &ObjSectionToID, StubMap &Stubs) {
+ const auto &Obj = cast<ELFObjectFileBase>(O);
+ uint64_t RelType = RelI->getType();
+ int64_t Addend = 0;
+ if (Expected<int64_t> AddendOrErr = ELFRelocationRef(*RelI).getAddend())
+ Addend = *AddendOrErr;
+ else
+ consumeError(AddendOrErr.takeError());
+ elf_symbol_iterator Symbol = RelI->getSymbol();
+
+ // Obtain the symbol name which is referenced in the relocation
+ StringRef TargetName;
+ if (Symbol != Obj.symbol_end()) {
+ if (auto TargetNameOrErr = Symbol->getName())
+ TargetName = *TargetNameOrErr;
+ else
+ return TargetNameOrErr.takeError();
+ }
+ LLVM_DEBUG(dbgs() << "\t\tRelType: " << RelType << " Addend: " << Addend
+ << " TargetName: " << TargetName << "\n");
+ RelocationValueRef Value;
+ // First search for the symbol in the local symbol table
+ SymbolRef::Type SymType = SymbolRef::ST_Unknown;
+
+ // Search for the symbol in the global symbol table
+ RTDyldSymbolTable::const_iterator gsi = GlobalSymbolTable.end();
+ if (Symbol != Obj.symbol_end()) {
+ gsi = GlobalSymbolTable.find(TargetName.data());
+ Expected<SymbolRef::Type> SymTypeOrErr = Symbol->getType();
+ if (!SymTypeOrErr) {
+ std::string Buf;
+ raw_string_ostream OS(Buf);
+ logAllUnhandledErrors(SymTypeOrErr.takeError(), OS);
+ OS.flush();
+ report_fatal_error(Buf);
+ }
+ SymType = *SymTypeOrErr;
+ }
+ if (gsi != GlobalSymbolTable.end()) {
+ const auto &SymInfo = gsi->second;
+ Value.SectionID = SymInfo.getSectionID();
+ Value.Offset = SymInfo.getOffset();
+ Value.Addend = SymInfo.getOffset() + Addend;
+ } else {
+ switch (SymType) {
+ case SymbolRef::ST_Debug: {
+ // TODO: Now ELF SymbolRef::ST_Debug = STT_SECTION, it's not obviously
+ // and can be changed by another developers. Maybe best way is add
+ // a new symbol type ST_Section to SymbolRef and use it.
+ auto SectionOrErr = Symbol->getSection();
+ if (!SectionOrErr) {
+ std::string Buf;
+ raw_string_ostream OS(Buf);
+ logAllUnhandledErrors(SectionOrErr.takeError(), OS);
+ OS.flush();
+ report_fatal_error(Buf);
+ }
+ section_iterator si = *SectionOrErr;
+ if (si == Obj.section_end())
+ llvm_unreachable("Symbol section not found, bad object file format!");
+ LLVM_DEBUG(dbgs() << "\t\tThis is section symbol\n");
+ bool isCode = si->isText();
+ if (auto SectionIDOrErr = findOrEmitSection(Obj, (*si), isCode,
+ ObjSectionToID))
+ Value.SectionID = *SectionIDOrErr;
+ else
+ return SectionIDOrErr.takeError();
+ Value.Addend = Addend;
+ break;
+ }
+ case SymbolRef::ST_Data:
+ case SymbolRef::ST_Function:
+ case SymbolRef::ST_Unknown: {
+ Value.SymbolName = TargetName.data();
+ Value.Addend = Addend;
+
+ // Absolute relocations will have a zero symbol ID (STN_UNDEF), which
+ // will manifest here as a NULL symbol name.
+ // We can set this as a valid (but empty) symbol name, and rely
+ // on addRelocationForSymbol to handle this.
+ if (!Value.SymbolName)
+ Value.SymbolName = "";
+ break;
+ }
+ default:
+ llvm_unreachable("Unresolved symbol type!");
+ break;
+ }
+ }
+
+ uint64_t Offset = RelI->getOffset();
+
+ LLVM_DEBUG(dbgs() << "\t\tSectionID: " << SectionID << " Offset: " << Offset
+ << "\n");
+ if ((Arch == Triple::aarch64 || Arch == Triple::aarch64_be)) {
+ if (RelType == ELF::R_AARCH64_CALL26 || RelType == ELF::R_AARCH64_JUMP26) {
+ resolveAArch64Branch(SectionID, Value, RelI, Stubs);
+ } else if (RelType == ELF::R_AARCH64_ADR_GOT_PAGE) {
+ // Craete new GOT entry or find existing one. If GOT entry is
+ // to be created, then we also emit ABS64 relocation for it.
+ uint64_t GOTOffset = findOrAllocGOTEntry(Value, ELF::R_AARCH64_ABS64);
+ resolveGOTOffsetRelocation(SectionID, Offset, GOTOffset + Addend,
+ ELF::R_AARCH64_ADR_PREL_PG_HI21);
+
+ } else if (RelType == ELF::R_AARCH64_LD64_GOT_LO12_NC) {
+ uint64_t GOTOffset = findOrAllocGOTEntry(Value, ELF::R_AARCH64_ABS64);
+ resolveGOTOffsetRelocation(SectionID, Offset, GOTOffset + Addend,
+ ELF::R_AARCH64_LDST64_ABS_LO12_NC);
+ } else {
+ processSimpleRelocation(SectionID, Offset, RelType, Value);
+ }
+ } else if (Arch == Triple::arm) {
+ if (RelType == ELF::R_ARM_PC24 || RelType == ELF::R_ARM_CALL ||
+ RelType == ELF::R_ARM_JUMP24) {
+ // This is an ARM branch relocation, need to use a stub function.
+ LLVM_DEBUG(dbgs() << "\t\tThis is an ARM branch relocation.\n");
+ SectionEntry &Section = Sections[SectionID];
+
+ // Look for an existing stub.
+ StubMap::const_iterator i = Stubs.find(Value);
+ if (i != Stubs.end()) {
+ resolveRelocation(
+ Section, Offset,
+ reinterpret_cast<uint64_t>(Section.getAddressWithOffset(i->second)),
+ RelType, 0);
+ LLVM_DEBUG(dbgs() << " Stub function found\n");
+ } else {
+ // Create a new stub function.
+ LLVM_DEBUG(dbgs() << " Create a new stub function\n");
+ Stubs[Value] = Section.getStubOffset();
+ uint8_t *StubTargetAddr = createStubFunction(
+ Section.getAddressWithOffset(Section.getStubOffset()));
+ RelocationEntry RE(SectionID, StubTargetAddr - Section.getAddress(),
+ ELF::R_ARM_ABS32, Value.Addend);
+ if (Value.SymbolName)
+ addRelocationForSymbol(RE, Value.SymbolName);
+ else
+ addRelocationForSection(RE, Value.SectionID);
+
+ resolveRelocation(Section, Offset, reinterpret_cast<uint64_t>(
+ Section.getAddressWithOffset(
+ Section.getStubOffset())),
+ RelType, 0);
+ Section.advanceStubOffset(getMaxStubSize());
+ }
+ } else {
+ uint32_t *Placeholder =
+ reinterpret_cast<uint32_t*>(computePlaceholderAddress(SectionID, Offset));
+ if (RelType == ELF::R_ARM_PREL31 || RelType == ELF::R_ARM_TARGET1 ||
+ RelType == ELF::R_ARM_ABS32) {
+ Value.Addend += *Placeholder;
+ } else if (RelType == ELF::R_ARM_MOVW_ABS_NC || RelType == ELF::R_ARM_MOVT_ABS) {
+ // See ELF for ARM documentation
+ Value.Addend += (int16_t)((*Placeholder & 0xFFF) | (((*Placeholder >> 16) & 0xF) << 12));
+ }
+ processSimpleRelocation(SectionID, Offset, RelType, Value);
+ }
+ } else if (IsMipsO32ABI) {
+ uint8_t *Placeholder = reinterpret_cast<uint8_t *>(
+ computePlaceholderAddress(SectionID, Offset));
+ uint32_t Opcode = readBytesUnaligned(Placeholder, 4);
+ if (RelType == ELF::R_MIPS_26) {
+ // This is an Mips branch relocation, need to use a stub function.
+ LLVM_DEBUG(dbgs() << "\t\tThis is a Mips branch relocation.");
+ SectionEntry &Section = Sections[SectionID];
+
+ // Extract the addend from the instruction.
+ // We shift up by two since the Value will be down shifted again
+ // when applying the relocation.
+ uint32_t Addend = (Opcode & 0x03ffffff) << 2;
+
+ Value.Addend += Addend;
+
+ // Look up for existing stub.
+ StubMap::const_iterator i = Stubs.find(Value);
+ if (i != Stubs.end()) {
+ RelocationEntry RE(SectionID, Offset, RelType, i->second);
+ addRelocationForSection(RE, SectionID);
+ LLVM_DEBUG(dbgs() << " Stub function found\n");
+ } else {
+ // Create a new stub function.
+ LLVM_DEBUG(dbgs() << " Create a new stub function\n");
+ Stubs[Value] = Section.getStubOffset();
+
+ unsigned AbiVariant = Obj.getPlatformFlags();
+
+ uint8_t *StubTargetAddr = createStubFunction(
+ Section.getAddressWithOffset(Section.getStubOffset()), AbiVariant);
+
+ // Creating Hi and Lo relocations for the filled stub instructions.
+ RelocationEntry REHi(SectionID, StubTargetAddr - Section.getAddress(),
+ ELF::R_MIPS_HI16, Value.Addend);
+ RelocationEntry RELo(SectionID,
+ StubTargetAddr - Section.getAddress() + 4,
+ ELF::R_MIPS_LO16, Value.Addend);
+
+ if (Value.SymbolName) {
+ addRelocationForSymbol(REHi, Value.SymbolName);
+ addRelocationForSymbol(RELo, Value.SymbolName);
+ } else {
+ addRelocationForSection(REHi, Value.SectionID);
+ addRelocationForSection(RELo, Value.SectionID);
+ }
+
+ RelocationEntry RE(SectionID, Offset, RelType, Section.getStubOffset());
+ addRelocationForSection(RE, SectionID);
+ Section.advanceStubOffset(getMaxStubSize());
+ }
+ } else if (RelType == ELF::R_MIPS_HI16 || RelType == ELF::R_MIPS_PCHI16) {
+ int64_t Addend = (Opcode & 0x0000ffff) << 16;
+ RelocationEntry RE(SectionID, Offset, RelType, Addend);
+ PendingRelocs.push_back(std::make_pair(Value, RE));
+ } else if (RelType == ELF::R_MIPS_LO16 || RelType == ELF::R_MIPS_PCLO16) {
+ int64_t Addend = Value.Addend + SignExtend32<16>(Opcode & 0x0000ffff);
+ for (auto I = PendingRelocs.begin(); I != PendingRelocs.end();) {
+ const RelocationValueRef &MatchingValue = I->first;
+ RelocationEntry &Reloc = I->second;
+ if (MatchingValue == Value &&
+ RelType == getMatchingLoRelocation(Reloc.RelType) &&
+ SectionID == Reloc.SectionID) {
+ Reloc.Addend += Addend;
+ if (Value.SymbolName)
+ addRelocationForSymbol(Reloc, Value.SymbolName);
+ else
+ addRelocationForSection(Reloc, Value.SectionID);
+ I = PendingRelocs.erase(I);
+ } else
+ ++I;
+ }
+ RelocationEntry RE(SectionID, Offset, RelType, Addend);
+ if (Value.SymbolName)
+ addRelocationForSymbol(RE, Value.SymbolName);
+ else
+ addRelocationForSection(RE, Value.SectionID);
+ } else {
+ if (RelType == ELF::R_MIPS_32)
+ Value.Addend += Opcode;
+ else if (RelType == ELF::R_MIPS_PC16)
+ Value.Addend += SignExtend32<18>((Opcode & 0x0000ffff) << 2);
+ else if (RelType == ELF::R_MIPS_PC19_S2)
+ Value.Addend += SignExtend32<21>((Opcode & 0x0007ffff) << 2);
+ else if (RelType == ELF::R_MIPS_PC21_S2)
+ Value.Addend += SignExtend32<23>((Opcode & 0x001fffff) << 2);
+ else if (RelType == ELF::R_MIPS_PC26_S2)
+ Value.Addend += SignExtend32<28>((Opcode & 0x03ffffff) << 2);
+ processSimpleRelocation(SectionID, Offset, RelType, Value);
+ }
+ } else if (IsMipsN32ABI || IsMipsN64ABI) {
+ uint32_t r_type = RelType & 0xff;
+ RelocationEntry RE(SectionID, Offset, RelType, Value.Addend);
+ if (r_type == ELF::R_MIPS_CALL16 || r_type == ELF::R_MIPS_GOT_PAGE
+ || r_type == ELF::R_MIPS_GOT_DISP) {
+ StringMap<uint64_t>::iterator i = GOTSymbolOffsets.find(TargetName);
+ if (i != GOTSymbolOffsets.end())
+ RE.SymOffset = i->second;
+ else {
+ RE.SymOffset = allocateGOTEntries(1);
+ GOTSymbolOffsets[TargetName] = RE.SymOffset;
+ }
+ if (Value.SymbolName)
+ addRelocationForSymbol(RE, Value.SymbolName);
+ else
+ addRelocationForSection(RE, Value.SectionID);
+ } else if (RelType == ELF::R_MIPS_26) {
+ // This is an Mips branch relocation, need to use a stub function.
+ LLVM_DEBUG(dbgs() << "\t\tThis is a Mips branch relocation.");
+ SectionEntry &Section = Sections[SectionID];
+
+ // Look up for existing stub.
+ StubMap::const_iterator i = Stubs.find(Value);
+ if (i != Stubs.end()) {
+ RelocationEntry RE(SectionID, Offset, RelType, i->second);
+ addRelocationForSection(RE, SectionID);
+ LLVM_DEBUG(dbgs() << " Stub function found\n");
+ } else {
+ // Create a new stub function.
+ LLVM_DEBUG(dbgs() << " Create a new stub function\n");
+ Stubs[Value] = Section.getStubOffset();
+
+ unsigned AbiVariant = Obj.getPlatformFlags();
+
+ uint8_t *StubTargetAddr = createStubFunction(
+ Section.getAddressWithOffset(Section.getStubOffset()), AbiVariant);
+
+ if (IsMipsN32ABI) {
+ // Creating Hi and Lo relocations for the filled stub instructions.
+ RelocationEntry REHi(SectionID, StubTargetAddr - Section.getAddress(),
+ ELF::R_MIPS_HI16, Value.Addend);
+ RelocationEntry RELo(SectionID,
+ StubTargetAddr - Section.getAddress() + 4,
+ ELF::R_MIPS_LO16, Value.Addend);
+ if (Value.SymbolName) {
+ addRelocationForSymbol(REHi, Value.SymbolName);
+ addRelocationForSymbol(RELo, Value.SymbolName);
+ } else {
+ addRelocationForSection(REHi, Value.SectionID);
+ addRelocationForSection(RELo, Value.SectionID);
+ }
+ } else {
+ // Creating Highest, Higher, Hi and Lo relocations for the filled stub
+ // instructions.
+ RelocationEntry REHighest(SectionID,
+ StubTargetAddr - Section.getAddress(),
+ ELF::R_MIPS_HIGHEST, Value.Addend);
+ RelocationEntry REHigher(SectionID,
+ StubTargetAddr - Section.getAddress() + 4,
+ ELF::R_MIPS_HIGHER, Value.Addend);
+ RelocationEntry REHi(SectionID,
+ StubTargetAddr - Section.getAddress() + 12,
+ ELF::R_MIPS_HI16, Value.Addend);
+ RelocationEntry RELo(SectionID,
+ StubTargetAddr - Section.getAddress() + 20,
+ ELF::R_MIPS_LO16, Value.Addend);
+ if (Value.SymbolName) {
+ addRelocationForSymbol(REHighest, Value.SymbolName);
+ addRelocationForSymbol(REHigher, Value.SymbolName);
+ addRelocationForSymbol(REHi, Value.SymbolName);
+ addRelocationForSymbol(RELo, Value.SymbolName);
+ } else {
+ addRelocationForSection(REHighest, Value.SectionID);
+ addRelocationForSection(REHigher, Value.SectionID);
+ addRelocationForSection(REHi, Value.SectionID);
+ addRelocationForSection(RELo, Value.SectionID);
+ }
+ }
+ RelocationEntry RE(SectionID, Offset, RelType, Section.getStubOffset());
+ addRelocationForSection(RE, SectionID);
+ Section.advanceStubOffset(getMaxStubSize());
+ }
+ } else {
+ processSimpleRelocation(SectionID, Offset, RelType, Value);
+ }
+
+ } else if (Arch == Triple::ppc64 || Arch == Triple::ppc64le) {
+ if (RelType == ELF::R_PPC64_REL24) {
+ // Determine ABI variant in use for this object.
+ unsigned AbiVariant = Obj.getPlatformFlags();
+ AbiVariant &= ELF::EF_PPC64_ABI;
+ // A PPC branch relocation will need a stub function if the target is
+ // an external symbol (either Value.SymbolName is set, or SymType is
+ // Symbol::ST_Unknown) or if the target address is not within the
+ // signed 24-bits branch address.
+ SectionEntry &Section = Sections[SectionID];
+ uint8_t *Target = Section.getAddressWithOffset(Offset);
+ bool RangeOverflow = false;
+ bool IsExtern = Value.SymbolName || SymType == SymbolRef::ST_Unknown;
+ if (!IsExtern) {
+ if (AbiVariant != 2) {
+ // In the ELFv1 ABI, a function call may point to the .opd entry,
+ // so the final symbol value is calculated based on the relocation
+ // values in the .opd section.
+ if (auto Err = findOPDEntrySection(Obj, ObjSectionToID, Value))
+ return std::move(Err);
+ } else {
+ // In the ELFv2 ABI, a function symbol may provide a local entry
+ // point, which must be used for direct calls.
+ if (Value.SectionID == SectionID){
+ uint8_t SymOther = Symbol->getOther();
+ Value.Addend += ELF::decodePPC64LocalEntryOffset(SymOther);
+ }
+ }
+ uint8_t *RelocTarget =
+ Sections[Value.SectionID].getAddressWithOffset(Value.Addend);
+ int64_t delta = static_cast<int64_t>(Target - RelocTarget);
+ // If it is within 26-bits branch range, just set the branch target
+ if (SignExtend64<26>(delta) != delta) {
+ RangeOverflow = true;
+ } else if ((AbiVariant != 2) ||
+ (AbiVariant == 2 && Value.SectionID == SectionID)) {
+ RelocationEntry RE(SectionID, Offset, RelType, Value.Addend);
+ addRelocationForSection(RE, Value.SectionID);
+ }
+ }
+ if (IsExtern || (AbiVariant == 2 && Value.SectionID != SectionID) ||
+ RangeOverflow) {
+ // It is an external symbol (either Value.SymbolName is set, or
+ // SymType is SymbolRef::ST_Unknown) or out of range.
+ StubMap::const_iterator i = Stubs.find(Value);
+ if (i != Stubs.end()) {
+ // Symbol function stub already created, just relocate to it
+ resolveRelocation(Section, Offset,
+ reinterpret_cast<uint64_t>(
+ Section.getAddressWithOffset(i->second)),
+ RelType, 0);
+ LLVM_DEBUG(dbgs() << " Stub function found\n");
+ } else {
+ // Create a new stub function.
+ LLVM_DEBUG(dbgs() << " Create a new stub function\n");
+ Stubs[Value] = Section.getStubOffset();
+ uint8_t *StubTargetAddr = createStubFunction(
+ Section.getAddressWithOffset(Section.getStubOffset()),
+ AbiVariant);
+ RelocationEntry RE(SectionID, StubTargetAddr - Section.getAddress(),
+ ELF::R_PPC64_ADDR64, Value.Addend);
+
+ // Generates the 64-bits address loads as exemplified in section
+ // 4.5.1 in PPC64 ELF ABI. Note that the relocations need to
+ // apply to the low part of the instructions, so we have to update
+ // the offset according to the target endianness.
+ uint64_t StubRelocOffset = StubTargetAddr - Section.getAddress();
+ if (!IsTargetLittleEndian)
+ StubRelocOffset += 2;
+
+ RelocationEntry REhst(SectionID, StubRelocOffset + 0,
+ ELF::R_PPC64_ADDR16_HIGHEST, Value.Addend);
+ RelocationEntry REhr(SectionID, StubRelocOffset + 4,
+ ELF::R_PPC64_ADDR16_HIGHER, Value.Addend);
+ RelocationEntry REh(SectionID, StubRelocOffset + 12,
+ ELF::R_PPC64_ADDR16_HI, Value.Addend);
+ RelocationEntry REl(SectionID, StubRelocOffset + 16,
+ ELF::R_PPC64_ADDR16_LO, Value.Addend);
+
+ if (Value.SymbolName) {
+ addRelocationForSymbol(REhst, Value.SymbolName);
+ addRelocationForSymbol(REhr, Value.SymbolName);
+ addRelocationForSymbol(REh, Value.SymbolName);
+ addRelocationForSymbol(REl, Value.SymbolName);
+ } else {
+ addRelocationForSection(REhst, Value.SectionID);
+ addRelocationForSection(REhr, Value.SectionID);
+ addRelocationForSection(REh, Value.SectionID);
+ addRelocationForSection(REl, Value.SectionID);
+ }
+
+ resolveRelocation(Section, Offset, reinterpret_cast<uint64_t>(
+ Section.getAddressWithOffset(
+ Section.getStubOffset())),
+ RelType, 0);
+ Section.advanceStubOffset(getMaxStubSize());
+ }
+ if (IsExtern || (AbiVariant == 2 && Value.SectionID != SectionID)) {
+ // Restore the TOC for external calls
+ if (AbiVariant == 2)
+ writeInt32BE(Target + 4, 0xE8410018); // ld r2,24(r1)
+ else
+ writeInt32BE(Target + 4, 0xE8410028); // ld r2,40(r1)
+ }
+ }
+ } else if (RelType == ELF::R_PPC64_TOC16 ||
+ RelType == ELF::R_PPC64_TOC16_DS ||
+ RelType == ELF::R_PPC64_TOC16_LO ||
+ RelType == ELF::R_PPC64_TOC16_LO_DS ||
+ RelType == ELF::R_PPC64_TOC16_HI ||
+ RelType == ELF::R_PPC64_TOC16_HA) {
+ // These relocations are supposed to subtract the TOC address from
+ // the final value. This does not fit cleanly into the RuntimeDyld
+ // scheme, since there may be *two* sections involved in determining
+ // the relocation value (the section of the symbol referred to by the
+ // relocation, and the TOC section associated with the current module).
+ //
+ // Fortunately, these relocations are currently only ever generated
+ // referring to symbols that themselves reside in the TOC, which means
+ // that the two sections are actually the same. Thus they cancel out
+ // and we can immediately resolve the relocation right now.
+ switch (RelType) {
+ case ELF::R_PPC64_TOC16: RelType = ELF::R_PPC64_ADDR16; break;
+ case ELF::R_PPC64_TOC16_DS: RelType = ELF::R_PPC64_ADDR16_DS; break;
+ case ELF::R_PPC64_TOC16_LO: RelType = ELF::R_PPC64_ADDR16_LO; break;
+ case ELF::R_PPC64_TOC16_LO_DS: RelType = ELF::R_PPC64_ADDR16_LO_DS; break;
+ case ELF::R_PPC64_TOC16_HI: RelType = ELF::R_PPC64_ADDR16_HI; break;
+ case ELF::R_PPC64_TOC16_HA: RelType = ELF::R_PPC64_ADDR16_HA; break;
+ default: llvm_unreachable("Wrong relocation type.");
+ }
+
+ RelocationValueRef TOCValue;
+ if (auto Err = findPPC64TOCSection(Obj, ObjSectionToID, TOCValue))
+ return std::move(Err);
+ if (Value.SymbolName || Value.SectionID != TOCValue.SectionID)
+ llvm_unreachable("Unsupported TOC relocation.");
+ Value.Addend -= TOCValue.Addend;
+ resolveRelocation(Sections[SectionID], Offset, Value.Addend, RelType, 0);
+ } else {
+ // There are two ways to refer to the TOC address directly: either
+ // via a ELF::R_PPC64_TOC relocation (where both symbol and addend are
+ // ignored), or via any relocation that refers to the magic ".TOC."
+ // symbols (in which case the addend is respected).
+ if (RelType == ELF::R_PPC64_TOC) {
+ RelType = ELF::R_PPC64_ADDR64;
+ if (auto Err = findPPC64TOCSection(Obj, ObjSectionToID, Value))
+ return std::move(Err);
+ } else if (TargetName == ".TOC.") {
+ if (auto Err = findPPC64TOCSection(Obj, ObjSectionToID, Value))
+ return std::move(Err);
+ Value.Addend += Addend;
+ }
+
+ RelocationEntry RE(SectionID, Offset, RelType, Value.Addend);
+
+ if (Value.SymbolName)
+ addRelocationForSymbol(RE, Value.SymbolName);
+ else
+ addRelocationForSection(RE, Value.SectionID);
+ }
+ } else if (Arch == Triple::systemz &&
+ (RelType == ELF::R_390_PLT32DBL || RelType == ELF::R_390_GOTENT)) {
+ // Create function stubs for both PLT and GOT references, regardless of
+ // whether the GOT reference is to data or code. The stub contains the
+ // full address of the symbol, as needed by GOT references, and the
+ // executable part only adds an overhead of 8 bytes.
+ //
+ // We could try to conserve space by allocating the code and data
+ // parts of the stub separately. However, as things stand, we allocate
+ // a stub for every relocation, so using a GOT in JIT code should be
+ // no less space efficient than using an explicit constant pool.
+ LLVM_DEBUG(dbgs() << "\t\tThis is a SystemZ indirect relocation.");
+ SectionEntry &Section = Sections[SectionID];
+
+ // Look for an existing stub.
+ StubMap::const_iterator i = Stubs.find(Value);
+ uintptr_t StubAddress;
+ if (i != Stubs.end()) {
+ StubAddress = uintptr_t(Section.getAddressWithOffset(i->second));
+ LLVM_DEBUG(dbgs() << " Stub function found\n");
+ } else {
+ // Create a new stub function.
+ LLVM_DEBUG(dbgs() << " Create a new stub function\n");
+
+ uintptr_t BaseAddress = uintptr_t(Section.getAddress());
+ uintptr_t StubAlignment = getStubAlignment();
+ StubAddress =
+ (BaseAddress + Section.getStubOffset() + StubAlignment - 1) &
+ -StubAlignment;
+ unsigned StubOffset = StubAddress - BaseAddress;
+
+ Stubs[Value] = StubOffset;
+ createStubFunction((uint8_t *)StubAddress);
+ RelocationEntry RE(SectionID, StubOffset + 8, ELF::R_390_64,
+ Value.Offset);
+ if (Value.SymbolName)
+ addRelocationForSymbol(RE, Value.SymbolName);
+ else
+ addRelocationForSection(RE, Value.SectionID);
+ Section.advanceStubOffset(getMaxStubSize());
+ }
+
+ if (RelType == ELF::R_390_GOTENT)
+ resolveRelocation(Section, Offset, StubAddress + 8, ELF::R_390_PC32DBL,
+ Addend);
+ else
+ resolveRelocation(Section, Offset, StubAddress, RelType, Addend);
+ } else if (Arch == Triple::x86_64) {
+ if (RelType == ELF::R_X86_64_PLT32) {
+ // The way the PLT relocations normally work is that the linker allocates
+ // the
+ // PLT and this relocation makes a PC-relative call into the PLT. The PLT
+ // entry will then jump to an address provided by the GOT. On first call,
+ // the
+ // GOT address will point back into PLT code that resolves the symbol. After
+ // the first call, the GOT entry points to the actual function.
+ //
+ // For local functions we're ignoring all of that here and just replacing
+ // the PLT32 relocation type with PC32, which will translate the relocation
+ // into a PC-relative call directly to the function. For external symbols we
+ // can't be sure the function will be within 2^32 bytes of the call site, so
+ // we need to create a stub, which calls into the GOT. This case is
+ // equivalent to the usual PLT implementation except that we use the stub
+ // mechanism in RuntimeDyld (which puts stubs at the end of the section)
+ // rather than allocating a PLT section.
+ if (Value.SymbolName) {
+ // This is a call to an external function.
+ // Look for an existing stub.
+ SectionEntry &Section = Sections[SectionID];
+ StubMap::const_iterator i = Stubs.find(Value);
+ uintptr_t StubAddress;
+ if (i != Stubs.end()) {
+ StubAddress = uintptr_t(Section.getAddress()) + i->second;
+ LLVM_DEBUG(dbgs() << " Stub function found\n");
+ } else {
+ // Create a new stub function (equivalent to a PLT entry).
+ LLVM_DEBUG(dbgs() << " Create a new stub function\n");
+
+ uintptr_t BaseAddress = uintptr_t(Section.getAddress());
+ uintptr_t StubAlignment = getStubAlignment();
+ StubAddress =
+ (BaseAddress + Section.getStubOffset() + StubAlignment - 1) &
+ -StubAlignment;
+ unsigned StubOffset = StubAddress - BaseAddress;
+ Stubs[Value] = StubOffset;
+ createStubFunction((uint8_t *)StubAddress);
+
+ // Bump our stub offset counter
+ Section.advanceStubOffset(getMaxStubSize());
+
+ // Allocate a GOT Entry
+ uint64_t GOTOffset = allocateGOTEntries(1);
+
+ // The load of the GOT address has an addend of -4
+ resolveGOTOffsetRelocation(SectionID, StubOffset + 2, GOTOffset - 4,
+ ELF::R_X86_64_PC32);
+
+ // Fill in the value of the symbol we're targeting into the GOT
+ addRelocationForSymbol(
+ computeGOTOffsetRE(GOTOffset, 0, ELF::R_X86_64_64),
+ Value.SymbolName);
+ }
+
+ // Make the target call a call into the stub table.
+ resolveRelocation(Section, Offset, StubAddress, ELF::R_X86_64_PC32,
+ Addend);
+ } else {
+ RelocationEntry RE(SectionID, Offset, ELF::R_X86_64_PC32, Value.Addend,
+ Value.Offset);
+ addRelocationForSection(RE, Value.SectionID);
+ }
+ } else if (RelType == ELF::R_X86_64_GOTPCREL ||
+ RelType == ELF::R_X86_64_GOTPCRELX ||
+ RelType == ELF::R_X86_64_REX_GOTPCRELX) {
+ uint64_t GOTOffset = allocateGOTEntries(1);
+ resolveGOTOffsetRelocation(SectionID, Offset, GOTOffset + Addend,
+ ELF::R_X86_64_PC32);
+
+ // Fill in the value of the symbol we're targeting into the GOT
+ RelocationEntry RE =
+ computeGOTOffsetRE(GOTOffset, Value.Offset, ELF::R_X86_64_64);
+ if (Value.SymbolName)
+ addRelocationForSymbol(RE, Value.SymbolName);
+ else
+ addRelocationForSection(RE, Value.SectionID);
+ } else if (RelType == ELF::R_X86_64_GOT64) {
+ // Fill in a 64-bit GOT offset.
+ uint64_t GOTOffset = allocateGOTEntries(1);
+ resolveRelocation(Sections[SectionID], Offset, GOTOffset,
+ ELF::R_X86_64_64, 0);
+
+ // Fill in the value of the symbol we're targeting into the GOT
+ RelocationEntry RE =
+ computeGOTOffsetRE(GOTOffset, Value.Offset, ELF::R_X86_64_64);
+ if (Value.SymbolName)
+ addRelocationForSymbol(RE, Value.SymbolName);
+ else
+ addRelocationForSection(RE, Value.SectionID);
+ } else if (RelType == ELF::R_X86_64_GOTPC64) {
+ // Materialize the address of the base of the GOT relative to the PC.
+ // This doesn't create a GOT entry, but it does mean we need a GOT
+ // section.
+ (void)allocateGOTEntries(0);
+ resolveGOTOffsetRelocation(SectionID, Offset, Addend, ELF::R_X86_64_PC64);
+ } else if (RelType == ELF::R_X86_64_GOTOFF64) {
+ // GOTOFF relocations ultimately require a section difference relocation.
+ (void)allocateGOTEntries(0);
+ processSimpleRelocation(SectionID, Offset, RelType, Value);
+ } else if (RelType == ELF::R_X86_64_PC32) {
+ Value.Addend += support::ulittle32_t::ref(computePlaceholderAddress(SectionID, Offset));
+ processSimpleRelocation(SectionID, Offset, RelType, Value);
+ } else if (RelType == ELF::R_X86_64_PC64) {
+ Value.Addend += support::ulittle64_t::ref(computePlaceholderAddress(SectionID, Offset));
+ processSimpleRelocation(SectionID, Offset, RelType, Value);
+ } else {
+ processSimpleRelocation(SectionID, Offset, RelType, Value);
+ }
+ } else {
+ if (Arch == Triple::x86) {
+ Value.Addend += support::ulittle32_t::ref(computePlaceholderAddress(SectionID, Offset));
+ }
+ processSimpleRelocation(SectionID, Offset, RelType, Value);
+ }
+ return ++RelI;
+}
+
+size_t RuntimeDyldELF::getGOTEntrySize() {
+ // We don't use the GOT in all of these cases, but it's essentially free
+ // to put them all here.
+ size_t Result = 0;
+ switch (Arch) {
+ case Triple::x86_64:
+ case Triple::aarch64:
+ case Triple::aarch64_be:
+ case Triple::ppc64:
+ case Triple::ppc64le:
+ case Triple::systemz:
+ Result = sizeof(uint64_t);
+ break;
+ case Triple::x86:
+ case Triple::arm:
+ case Triple::thumb:
+ Result = sizeof(uint32_t);
+ break;
+ case Triple::mips:
+ case Triple::mipsel:
+ case Triple::mips64:
+ case Triple::mips64el:
+ if (IsMipsO32ABI || IsMipsN32ABI)
+ Result = sizeof(uint32_t);
+ else if (IsMipsN64ABI)
+ Result = sizeof(uint64_t);
+ else
+ llvm_unreachable("Mips ABI not handled");
+ break;
+ default:
+ llvm_unreachable("Unsupported CPU type!");
+ }
+ return Result;
+}
+
+uint64_t RuntimeDyldELF::allocateGOTEntries(unsigned no) {
+ if (GOTSectionID == 0) {
+ GOTSectionID = Sections.size();
+ // Reserve a section id. We'll allocate the section later
+ // once we know the total size
+ Sections.push_back(SectionEntry(".got", nullptr, 0, 0, 0));
+ }
+ uint64_t StartOffset = CurrentGOTIndex * getGOTEntrySize();
+ CurrentGOTIndex += no;
+ return StartOffset;
+}
+
+uint64_t RuntimeDyldELF::findOrAllocGOTEntry(const RelocationValueRef &Value,
+ unsigned GOTRelType) {
+ auto E = GOTOffsetMap.insert({Value, 0});
+ if (E.second) {
+ uint64_t GOTOffset = allocateGOTEntries(1);
+
+ // Create relocation for newly created GOT entry
+ RelocationEntry RE =
+ computeGOTOffsetRE(GOTOffset, Value.Offset, GOTRelType);
+ if (Value.SymbolName)
+ addRelocationForSymbol(RE, Value.SymbolName);
+ else
+ addRelocationForSection(RE, Value.SectionID);
+
+ E.first->second = GOTOffset;
+ }
+
+ return E.first->second;
+}
+
+void RuntimeDyldELF::resolveGOTOffsetRelocation(unsigned SectionID,
+ uint64_t Offset,
+ uint64_t GOTOffset,
+ uint32_t Type) {
+ // Fill in the relative address of the GOT Entry into the stub
+ RelocationEntry GOTRE(SectionID, Offset, Type, GOTOffset);
+ addRelocationForSection(GOTRE, GOTSectionID);
+}
+
+RelocationEntry RuntimeDyldELF::computeGOTOffsetRE(uint64_t GOTOffset,
+ uint64_t SymbolOffset,
+ uint32_t Type) {
+ return RelocationEntry(GOTSectionID, GOTOffset, Type, SymbolOffset);
+}
+
+Error RuntimeDyldELF::finalizeLoad(const ObjectFile &Obj,
+ ObjSectionToIDMap &SectionMap) {
+ if (IsMipsO32ABI)
+ if (!PendingRelocs.empty())
+ return make_error<RuntimeDyldError>("Can't find matching LO16 reloc");
+
+ // If necessary, allocate the global offset table
+ if (GOTSectionID != 0) {
+ // Allocate memory for the section
+ size_t TotalSize = CurrentGOTIndex * getGOTEntrySize();
+ uint8_t *Addr = MemMgr.allocateDataSection(TotalSize, getGOTEntrySize(),
+ GOTSectionID, ".got", false);
+ if (!Addr)
+ return make_error<RuntimeDyldError>("Unable to allocate memory for GOT!");
+
+ Sections[GOTSectionID] =
+ SectionEntry(".got", Addr, TotalSize, TotalSize, 0);
+
+ // For now, initialize all GOT entries to zero. We'll fill them in as
+ // needed when GOT-based relocations are applied.
+ memset(Addr, 0, TotalSize);
+ if (IsMipsN32ABI || IsMipsN64ABI) {
+ // To correctly resolve Mips GOT relocations, we need a mapping from
+ // object's sections to GOTs.
+ for (section_iterator SI = Obj.section_begin(), SE = Obj.section_end();
+ SI != SE; ++SI) {
+ if (SI->relocation_begin() != SI->relocation_end()) {
+ Expected<section_iterator> RelSecOrErr = SI->getRelocatedSection();
+ if (!RelSecOrErr)
+ return make_error<RuntimeDyldError>(
+ toString(RelSecOrErr.takeError()));
+
+ section_iterator RelocatedSection = *RelSecOrErr;
+ ObjSectionToIDMap::iterator i = SectionMap.find(*RelocatedSection);
+ assert (i != SectionMap.end());
+ SectionToGOTMap[i->second] = GOTSectionID;
+ }
+ }
+ GOTSymbolOffsets.clear();
+ }
+ }
+
+ // Look for and record the EH frame section.
+ ObjSectionToIDMap::iterator i, e;
+ for (i = SectionMap.begin(), e = SectionMap.end(); i != e; ++i) {
+ const SectionRef &Section = i->first;
+
+ StringRef Name;
+ Expected<StringRef> NameOrErr = Section.getName();
+ if (NameOrErr)
+ Name = *NameOrErr;
+ else
+ consumeError(NameOrErr.takeError());
+
+ if (Name == ".eh_frame") {
+ UnregisteredEHFrameSections.push_back(i->second);
+ break;
+ }
+ }
+
+ GOTSectionID = 0;
+ CurrentGOTIndex = 0;
+
+ return Error::success();
+}
+
+bool RuntimeDyldELF::isCompatibleFile(const object::ObjectFile &Obj) const {
+ return Obj.isELF();
+}
+
+bool RuntimeDyldELF::relocationNeedsGot(const RelocationRef &R) const {
+ unsigned RelTy = R.getType();
+ if (Arch == Triple::aarch64 || Arch == Triple::aarch64_be)
+ return RelTy == ELF::R_AARCH64_ADR_GOT_PAGE ||
+ RelTy == ELF::R_AARCH64_LD64_GOT_LO12_NC;
+
+ if (Arch == Triple::x86_64)
+ return RelTy == ELF::R_X86_64_GOTPCREL ||
+ RelTy == ELF::R_X86_64_GOTPCRELX ||
+ RelTy == ELF::R_X86_64_GOT64 ||
+ RelTy == ELF::R_X86_64_REX_GOTPCRELX;
+ return false;
+}
+
+bool RuntimeDyldELF::relocationNeedsStub(const RelocationRef &R) const {
+ if (Arch != Triple::x86_64)
+ return true; // Conservative answer
+
+ switch (R.getType()) {
+ default:
+ return true; // Conservative answer
+
+
+ case ELF::R_X86_64_GOTPCREL:
+ case ELF::R_X86_64_GOTPCRELX:
+ case ELF::R_X86_64_REX_GOTPCRELX:
+ case ELF::R_X86_64_GOTPC64:
+ case ELF::R_X86_64_GOT64:
+ case ELF::R_X86_64_GOTOFF64:
+ case ELF::R_X86_64_PC32:
+ case ELF::R_X86_64_PC64:
+ case ELF::R_X86_64_64:
+ // We know that these reloation types won't need a stub function. This list
+ // can be extended as needed.
+ return false;
+ }
+}
+
+} // namespace llvm
diff --git a/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.h b/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.h
new file mode 100644
index 0000000000000..ef0784e2273b8
--- /dev/null
+++ b/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.h
@@ -0,0 +1,189 @@
+//===-- RuntimeDyldELF.h - Run-time dynamic linker for MC-JIT ---*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// ELF support for MC-JIT runtime dynamic linker.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_EXECUTIONENGINE_RUNTIMEDYLD_RUNTIMEDYLDELF_H
+#define LLVM_LIB_EXECUTIONENGINE_RUNTIMEDYLD_RUNTIMEDYLDELF_H
+
+#include "RuntimeDyldImpl.h"
+#include "llvm/ADT/DenseMap.h"
+
+using namespace llvm;
+
+namespace llvm {
+namespace object {
+class ELFObjectFileBase;
+}
+
+class RuntimeDyldELF : public RuntimeDyldImpl {
+
+ void resolveRelocation(const SectionEntry &Section, uint64_t Offset,
+ uint64_t Value, uint32_t Type, int64_t Addend,
+ uint64_t SymOffset = 0, SID SectionID = 0);
+
+ void resolveX86_64Relocation(const SectionEntry &Section, uint64_t Offset,
+ uint64_t Value, uint32_t Type, int64_t Addend,
+ uint64_t SymOffset);
+
+ void resolveX86Relocation(const SectionEntry &Section, uint64_t Offset,
+ uint32_t Value, uint32_t Type, int32_t Addend);
+
+ void resolveAArch64Relocation(const SectionEntry &Section, uint64_t Offset,
+ uint64_t Value, uint32_t Type, int64_t Addend);
+
+ bool resolveAArch64ShortBranch(unsigned SectionID, relocation_iterator RelI,
+ const RelocationValueRef &Value);
+
+ void resolveAArch64Branch(unsigned SectionID, const RelocationValueRef &Value,
+ relocation_iterator RelI, StubMap &Stubs);
+
+ void resolveARMRelocation(const SectionEntry &Section, uint64_t Offset,
+ uint32_t Value, uint32_t Type, int32_t Addend);
+
+ void resolvePPC32Relocation(const SectionEntry &Section, uint64_t Offset,
+ uint64_t Value, uint32_t Type, int64_t Addend);
+
+ void resolvePPC64Relocation(const SectionEntry &Section, uint64_t Offset,
+ uint64_t Value, uint32_t Type, int64_t Addend);
+
+ void resolveSystemZRelocation(const SectionEntry &Section, uint64_t Offset,
+ uint64_t Value, uint32_t Type, int64_t Addend);
+
+ void resolveBPFRelocation(const SectionEntry &Section, uint64_t Offset,
+ uint64_t Value, uint32_t Type, int64_t Addend);
+
+ unsigned getMaxStubSize() const override {
+ if (Arch == Triple::aarch64 || Arch == Triple::aarch64_be)
+ return 20; // movz; movk; movk; movk; br
+ if (Arch == Triple::arm || Arch == Triple::thumb)
+ return 8; // 32-bit instruction and 32-bit address
+ else if (IsMipsO32ABI || IsMipsN32ABI)
+ return 16;
+ else if (IsMipsN64ABI)
+ return 32;
+ else if (Arch == Triple::ppc64 || Arch == Triple::ppc64le)
+ return 44;
+ else if (Arch == Triple::x86_64)
+ return 6; // 2-byte jmp instruction + 32-bit relative address
+ else if (Arch == Triple::systemz)
+ return 16;
+ else
+ return 0;
+ }
+
+ unsigned getStubAlignment() override {
+ if (Arch == Triple::systemz)
+ return 8;
+ else
+ return 1;
+ }
+
+ void setMipsABI(const ObjectFile &Obj) override;
+
+ Error findPPC64TOCSection(const ELFObjectFileBase &Obj,
+ ObjSectionToIDMap &LocalSections,
+ RelocationValueRef &Rel);
+ Error findOPDEntrySection(const ELFObjectFileBase &Obj,
+ ObjSectionToIDMap &LocalSections,
+ RelocationValueRef &Rel);
+protected:
+ size_t getGOTEntrySize() override;
+
+private:
+ SectionEntry &getSection(unsigned SectionID) { return Sections[SectionID]; }
+
+ // Allocate no GOT entries for use in the given section.
+ uint64_t allocateGOTEntries(unsigned no);
+
+ // Find GOT entry corresponding to relocation or create new one.
+ uint64_t findOrAllocGOTEntry(const RelocationValueRef &Value,
+ unsigned GOTRelType);
+
+ // Resolve the relvative address of GOTOffset in Section ID and place
+ // it at the given Offset
+ void resolveGOTOffsetRelocation(unsigned SectionID, uint64_t Offset,
+ uint64_t GOTOffset, uint32_t Type);
+
+ // For a GOT entry referenced from SectionID, compute a relocation entry
+ // that will place the final resolved value in the GOT slot
+ RelocationEntry computeGOTOffsetRE(uint64_t GOTOffset, uint64_t SymbolOffset,
+ unsigned Type);
+
+ // Compute the address in memory where we can find the placeholder
+ void *computePlaceholderAddress(unsigned SectionID, uint64_t Offset) const;
+
+ // Split out common case for createing the RelocationEntry for when the relocation requires
+ // no particular advanced processing.
+ void processSimpleRelocation(unsigned SectionID, uint64_t Offset, unsigned RelType, RelocationValueRef Value);
+
+ // Return matching *LO16 relocation (Mips specific)
+ uint32_t getMatchingLoRelocation(uint32_t RelType,
+ bool IsLocal = false) const;
+
+ // The tentative ID for the GOT section
+ unsigned GOTSectionID;
+
+ // Records the current number of allocated slots in the GOT
+ // (This would be equivalent to GOTEntries.size() were it not for relocations
+ // that consume more than one slot)
+ unsigned CurrentGOTIndex;
+
+protected:
+ // A map from section to a GOT section that has entries for section's GOT
+ // relocations. (Mips64 specific)
+ DenseMap<SID, SID> SectionToGOTMap;
+
+private:
+ // A map to avoid duplicate got entries (Mips64 specific)
+ StringMap<uint64_t> GOTSymbolOffsets;
+
+ // *HI16 relocations will be added for resolving when we find matching
+ // *LO16 part. (Mips specific)
+ SmallVector<std::pair<RelocationValueRef, RelocationEntry>, 8> PendingRelocs;
+
+ // When a module is loaded we save the SectionID of the EH frame section
+ // in a table until we receive a request to register all unregistered
+ // EH frame sections with the memory manager.
+ SmallVector<SID, 2> UnregisteredEHFrameSections;
+
+ // Map between GOT relocation value and corresponding GOT offset
+ std::map<RelocationValueRef, uint64_t> GOTOffsetMap;
+
+ bool relocationNeedsGot(const RelocationRef &R) const override;
+ bool relocationNeedsStub(const RelocationRef &R) const override;
+
+public:
+ RuntimeDyldELF(RuntimeDyld::MemoryManager &MemMgr,
+ JITSymbolResolver &Resolver);
+ ~RuntimeDyldELF() override;
+
+ static std::unique_ptr<RuntimeDyldELF>
+ create(Triple::ArchType Arch, RuntimeDyld::MemoryManager &MemMgr,
+ JITSymbolResolver &Resolver);
+
+ std::unique_ptr<RuntimeDyld::LoadedObjectInfo>
+ loadObject(const object::ObjectFile &O) override;
+
+ void resolveRelocation(const RelocationEntry &RE, uint64_t Value) override;
+ Expected<relocation_iterator>
+ processRelocationRef(unsigned SectionID, relocation_iterator RelI,
+ const ObjectFile &Obj,
+ ObjSectionToIDMap &ObjSectionToID,
+ StubMap &Stubs) override;
+ bool isCompatibleFile(const object::ObjectFile &Obj) const override;
+ void registerEHFrames() override;
+ Error finalizeLoad(const ObjectFile &Obj,
+ ObjSectionToIDMap &SectionMap) override;
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldImpl.h b/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldImpl.h
new file mode 100644
index 0000000000000..cec7b92b8c48e
--- /dev/null
+++ b/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldImpl.h
@@ -0,0 +1,586 @@
+//===-- RuntimeDyldImpl.h - Run-time dynamic linker for MC-JIT --*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Interface for the implementations of runtime dynamic linker facilities.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_EXECUTIONENGINE_RUNTIMEDYLD_RUNTIMEDYLDIMPL_H
+#define LLVM_LIB_EXECUTIONENGINE_RUNTIMEDYLD_RUNTIMEDYLDIMPL_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/Triple.h"
+#include "llvm/ExecutionEngine/RTDyldMemoryManager.h"
+#include "llvm/ExecutionEngine/RuntimeDyld.h"
+#include "llvm/ExecutionEngine/RuntimeDyldChecker.h"
+#include "llvm/Object/ObjectFile.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/Format.h"
+#include "llvm/Support/Host.h"
+#include "llvm/Support/Mutex.h"
+#include "llvm/Support/SwapByteOrder.h"
+#include <map>
+#include <system_error>
+#include <unordered_map>
+
+using namespace llvm;
+using namespace llvm::object;
+
+namespace llvm {
+
+class Twine;
+
+#define UNIMPLEMENTED_RELOC(RelType) \
+ case RelType: \
+ return make_error<RuntimeDyldError>("Unimplemented relocation: " #RelType)
+
+/// SectionEntry - represents a section emitted into memory by the dynamic
+/// linker.
+class SectionEntry {
+ /// Name - section name.
+ std::string Name;
+
+ /// Address - address in the linker's memory where the section resides.
+ uint8_t *Address;
+
+ /// Size - section size. Doesn't include the stubs.
+ size_t Size;
+
+ /// LoadAddress - the address of the section in the target process's memory.
+ /// Used for situations in which JIT-ed code is being executed in the address
+ /// space of a separate process. If the code executes in the same address
+ /// space where it was JIT-ed, this just equals Address.
+ uint64_t LoadAddress;
+
+ /// StubOffset - used for architectures with stub functions for far
+ /// relocations (like ARM).
+ uintptr_t StubOffset;
+
+ /// The total amount of space allocated for this section. This includes the
+ /// section size and the maximum amount of space that the stubs can occupy.
+ size_t AllocationSize;
+
+ /// ObjAddress - address of the section in the in-memory object file. Used
+ /// for calculating relocations in some object formats (like MachO).
+ uintptr_t ObjAddress;
+
+public:
+ SectionEntry(StringRef name, uint8_t *address, size_t size,
+ size_t allocationSize, uintptr_t objAddress)
+ : Name(name), Address(address), Size(size),
+ LoadAddress(reinterpret_cast<uintptr_t>(address)), StubOffset(size),
+ AllocationSize(allocationSize), ObjAddress(objAddress) {
+ // AllocationSize is used only in asserts, prevent an "unused private field"
+ // warning:
+ (void)AllocationSize;
+ }
+
+ StringRef getName() const { return Name; }
+
+ uint8_t *getAddress() const { return Address; }
+
+ /// Return the address of this section with an offset.
+ uint8_t *getAddressWithOffset(unsigned OffsetBytes) const {
+ assert(OffsetBytes <= AllocationSize && "Offset out of bounds!");
+ return Address + OffsetBytes;
+ }
+
+ size_t getSize() const { return Size; }
+
+ uint64_t getLoadAddress() const { return LoadAddress; }
+ void setLoadAddress(uint64_t LA) { LoadAddress = LA; }
+
+ /// Return the load address of this section with an offset.
+ uint64_t getLoadAddressWithOffset(unsigned OffsetBytes) const {
+ assert(OffsetBytes <= AllocationSize && "Offset out of bounds!");
+ return LoadAddress + OffsetBytes;
+ }
+
+ uintptr_t getStubOffset() const { return StubOffset; }
+
+ void advanceStubOffset(unsigned StubSize) {
+ StubOffset += StubSize;
+ assert(StubOffset <= AllocationSize && "Not enough space allocated!");
+ }
+
+ uintptr_t getObjAddress() const { return ObjAddress; }
+};
+
+/// RelocationEntry - used to represent relocations internally in the dynamic
+/// linker.
+class RelocationEntry {
+public:
+ /// SectionID - the section this relocation points to.
+ unsigned SectionID;
+
+ /// Offset - offset into the section.
+ uint64_t Offset;
+
+ /// RelType - relocation type.
+ uint32_t RelType;
+
+ /// Addend - the relocation addend encoded in the instruction itself. Also
+ /// used to make a relocation section relative instead of symbol relative.
+ int64_t Addend;
+
+ struct SectionPair {
+ uint32_t SectionA;
+ uint32_t SectionB;
+ };
+
+ /// SymOffset - Section offset of the relocation entry's symbol (used for GOT
+ /// lookup).
+ union {
+ uint64_t SymOffset;
+ SectionPair Sections;
+ };
+
+ /// True if this is a PCRel relocation (MachO specific).
+ bool IsPCRel;
+
+ /// The size of this relocation (MachO specific).
+ unsigned Size;
+
+ // ARM (MachO and COFF) specific.
+ bool IsTargetThumbFunc = false;
+
+ RelocationEntry(unsigned id, uint64_t offset, uint32_t type, int64_t addend)
+ : SectionID(id), Offset(offset), RelType(type), Addend(addend),
+ SymOffset(0), IsPCRel(false), Size(0), IsTargetThumbFunc(false) {}
+
+ RelocationEntry(unsigned id, uint64_t offset, uint32_t type, int64_t addend,
+ uint64_t symoffset)
+ : SectionID(id), Offset(offset), RelType(type), Addend(addend),
+ SymOffset(symoffset), IsPCRel(false), Size(0),
+ IsTargetThumbFunc(false) {}
+
+ RelocationEntry(unsigned id, uint64_t offset, uint32_t type, int64_t addend,
+ bool IsPCRel, unsigned Size)
+ : SectionID(id), Offset(offset), RelType(type), Addend(addend),
+ SymOffset(0), IsPCRel(IsPCRel), Size(Size), IsTargetThumbFunc(false) {}
+
+ RelocationEntry(unsigned id, uint64_t offset, uint32_t type, int64_t addend,
+ unsigned SectionA, uint64_t SectionAOffset, unsigned SectionB,
+ uint64_t SectionBOffset, bool IsPCRel, unsigned Size)
+ : SectionID(id), Offset(offset), RelType(type),
+ Addend(SectionAOffset - SectionBOffset + addend), IsPCRel(IsPCRel),
+ Size(Size), IsTargetThumbFunc(false) {
+ Sections.SectionA = SectionA;
+ Sections.SectionB = SectionB;
+ }
+
+ RelocationEntry(unsigned id, uint64_t offset, uint32_t type, int64_t addend,
+ unsigned SectionA, uint64_t SectionAOffset, unsigned SectionB,
+ uint64_t SectionBOffset, bool IsPCRel, unsigned Size,
+ bool IsTargetThumbFunc)
+ : SectionID(id), Offset(offset), RelType(type),
+ Addend(SectionAOffset - SectionBOffset + addend), IsPCRel(IsPCRel),
+ Size(Size), IsTargetThumbFunc(IsTargetThumbFunc) {
+ Sections.SectionA = SectionA;
+ Sections.SectionB = SectionB;
+ }
+};
+
+class RelocationValueRef {
+public:
+ unsigned SectionID;
+ uint64_t Offset;
+ int64_t Addend;
+ const char *SymbolName;
+ bool IsStubThumb = false;
+ RelocationValueRef() : SectionID(0), Offset(0), Addend(0),
+ SymbolName(nullptr) {}
+
+ inline bool operator==(const RelocationValueRef &Other) const {
+ return SectionID == Other.SectionID && Offset == Other.Offset &&
+ Addend == Other.Addend && SymbolName == Other.SymbolName &&
+ IsStubThumb == Other.IsStubThumb;
+ }
+ inline bool operator<(const RelocationValueRef &Other) const {
+ if (SectionID != Other.SectionID)
+ return SectionID < Other.SectionID;
+ if (Offset != Other.Offset)
+ return Offset < Other.Offset;
+ if (Addend != Other.Addend)
+ return Addend < Other.Addend;
+ if (IsStubThumb != Other.IsStubThumb)
+ return IsStubThumb < Other.IsStubThumb;
+ return SymbolName < Other.SymbolName;
+ }
+};
+
+/// Symbol info for RuntimeDyld.
+class SymbolTableEntry {
+public:
+ SymbolTableEntry() = default;
+
+ SymbolTableEntry(unsigned SectionID, uint64_t Offset, JITSymbolFlags Flags)
+ : Offset(Offset), SectionID(SectionID), Flags(Flags) {}
+
+ unsigned getSectionID() const { return SectionID; }
+ uint64_t getOffset() const { return Offset; }
+ void setOffset(uint64_t NewOffset) { Offset = NewOffset; }
+
+ JITSymbolFlags getFlags() const { return Flags; }
+
+private:
+ uint64_t Offset = 0;
+ unsigned SectionID = 0;
+ JITSymbolFlags Flags = JITSymbolFlags::None;
+};
+
+typedef StringMap<SymbolTableEntry> RTDyldSymbolTable;
+
+class RuntimeDyldImpl {
+ friend class RuntimeDyld::LoadedObjectInfo;
+protected:
+ static const unsigned AbsoluteSymbolSection = ~0U;
+
+ // The MemoryManager to load objects into.
+ RuntimeDyld::MemoryManager &MemMgr;
+
+ // The symbol resolver to use for external symbols.
+ JITSymbolResolver &Resolver;
+
+ // A list of all sections emitted by the dynamic linker. These sections are
+ // referenced in the code by means of their index in this list - SectionID.
+ typedef SmallVector<SectionEntry, 64> SectionList;
+ SectionList Sections;
+
+ typedef unsigned SID; // Type for SectionIDs
+#define RTDYLD_INVALID_SECTION_ID ((RuntimeDyldImpl::SID)(-1))
+
+ // Keep a map of sections from object file to the SectionID which
+ // references it.
+ typedef std::map<SectionRef, unsigned> ObjSectionToIDMap;
+
+ // A global symbol table for symbols from all loaded modules.
+ RTDyldSymbolTable GlobalSymbolTable;
+
+ // Keep a map of common symbols to their info pairs
+ typedef std::vector<SymbolRef> CommonSymbolList;
+
+ // For each symbol, keep a list of relocations based on it. Anytime
+ // its address is reassigned (the JIT re-compiled the function, e.g.),
+ // the relocations get re-resolved.
+ // The symbol (or section) the relocation is sourced from is the Key
+ // in the relocation list where it's stored.
+ typedef SmallVector<RelocationEntry, 64> RelocationList;
+ // Relocations to sections already loaded. Indexed by SectionID which is the
+ // source of the address. The target where the address will be written is
+ // SectionID/Offset in the relocation itself.
+ std::unordered_map<unsigned, RelocationList> Relocations;
+
+ // Relocations to external symbols that are not yet resolved. Symbols are
+ // external when they aren't found in the global symbol table of all loaded
+ // modules. This map is indexed by symbol name.
+ StringMap<RelocationList> ExternalSymbolRelocations;
+
+
+ typedef std::map<RelocationValueRef, uintptr_t> StubMap;
+
+ Triple::ArchType Arch;
+ bool IsTargetLittleEndian;
+ bool IsMipsO32ABI;
+ bool IsMipsN32ABI;
+ bool IsMipsN64ABI;
+
+ // True if all sections should be passed to the memory manager, false if only
+ // sections containing relocations should be. Defaults to 'false'.
+ bool ProcessAllSections;
+
+ // This mutex prevents simultaneously loading objects from two different
+ // threads. This keeps us from having to protect individual data structures
+ // and guarantees that section allocation requests to the memory manager
+ // won't be interleaved between modules. It is also used in mapSectionAddress
+ // and resolveRelocations to protect write access to internal data structures.
+ //
+ // loadObject may be called on the same thread during the handling of of
+ // processRelocations, and that's OK. The handling of the relocation lists
+ // is written in such a way as to work correctly if new elements are added to
+ // the end of the list while the list is being processed.
+ sys::Mutex lock;
+
+ using NotifyStubEmittedFunction =
+ RuntimeDyld::NotifyStubEmittedFunction;
+ NotifyStubEmittedFunction NotifyStubEmitted;
+
+ virtual unsigned getMaxStubSize() const = 0;
+ virtual unsigned getStubAlignment() = 0;
+
+ bool HasError;
+ std::string ErrorStr;
+
+ void writeInt16BE(uint8_t *Addr, uint16_t Value) {
+ if (IsTargetLittleEndian)
+ sys::swapByteOrder(Value);
+ *Addr = (Value >> 8) & 0xFF;
+ *(Addr + 1) = Value & 0xFF;
+ }
+
+ void writeInt32BE(uint8_t *Addr, uint32_t Value) {
+ if (IsTargetLittleEndian)
+ sys::swapByteOrder(Value);
+ *Addr = (Value >> 24) & 0xFF;
+ *(Addr + 1) = (Value >> 16) & 0xFF;
+ *(Addr + 2) = (Value >> 8) & 0xFF;
+ *(Addr + 3) = Value & 0xFF;
+ }
+
+ void writeInt64BE(uint8_t *Addr, uint64_t Value) {
+ if (IsTargetLittleEndian)
+ sys::swapByteOrder(Value);
+ *Addr = (Value >> 56) & 0xFF;
+ *(Addr + 1) = (Value >> 48) & 0xFF;
+ *(Addr + 2) = (Value >> 40) & 0xFF;
+ *(Addr + 3) = (Value >> 32) & 0xFF;
+ *(Addr + 4) = (Value >> 24) & 0xFF;
+ *(Addr + 5) = (Value >> 16) & 0xFF;
+ *(Addr + 6) = (Value >> 8) & 0xFF;
+ *(Addr + 7) = Value & 0xFF;
+ }
+
+ virtual void setMipsABI(const ObjectFile &Obj) {
+ IsMipsO32ABI = false;
+ IsMipsN32ABI = false;
+ IsMipsN64ABI = false;
+ }
+
+ /// Endian-aware read Read the least significant Size bytes from Src.
+ uint64_t readBytesUnaligned(uint8_t *Src, unsigned Size) const;
+
+ /// Endian-aware write. Write the least significant Size bytes from Value to
+ /// Dst.
+ void writeBytesUnaligned(uint64_t Value, uint8_t *Dst, unsigned Size) const;
+
+ /// Generate JITSymbolFlags from a libObject symbol.
+ virtual Expected<JITSymbolFlags> getJITSymbolFlags(const SymbolRef &Sym);
+
+ /// Modify the given target address based on the given symbol flags.
+ /// This can be used by subclasses to tweak addresses based on symbol flags,
+ /// For example: the MachO/ARM target uses it to set the low bit if the target
+ /// is a thumb symbol.
+ virtual uint64_t modifyAddressBasedOnFlags(uint64_t Addr,
+ JITSymbolFlags Flags) const {
+ return Addr;
+ }
+
+ /// Given the common symbols discovered in the object file, emit a
+ /// new section for them and update the symbol mappings in the object and
+ /// symbol table.
+ Error emitCommonSymbols(const ObjectFile &Obj,
+ CommonSymbolList &CommonSymbols, uint64_t CommonSize,
+ uint32_t CommonAlign);
+
+ /// Emits section data from the object file to the MemoryManager.
+ /// \param IsCode if it's true then allocateCodeSection() will be
+ /// used for emits, else allocateDataSection() will be used.
+ /// \return SectionID.
+ Expected<unsigned> emitSection(const ObjectFile &Obj,
+ const SectionRef &Section,
+ bool IsCode);
+
+ /// Find Section in LocalSections. If the secton is not found - emit
+ /// it and store in LocalSections.
+ /// \param IsCode if it's true then allocateCodeSection() will be
+ /// used for emmits, else allocateDataSection() will be used.
+ /// \return SectionID.
+ Expected<unsigned> findOrEmitSection(const ObjectFile &Obj,
+ const SectionRef &Section, bool IsCode,
+ ObjSectionToIDMap &LocalSections);
+
+ // Add a relocation entry that uses the given section.
+ void addRelocationForSection(const RelocationEntry &RE, unsigned SectionID);
+
+ // Add a relocation entry that uses the given symbol. This symbol may
+ // be found in the global symbol table, or it may be external.
+ void addRelocationForSymbol(const RelocationEntry &RE, StringRef SymbolName);
+
+ /// Emits long jump instruction to Addr.
+ /// \return Pointer to the memory area for emitting target address.
+ uint8_t *createStubFunction(uint8_t *Addr, unsigned AbiVariant = 0);
+
+ /// Resolves relocations from Relocs list with address from Value.
+ void resolveRelocationList(const RelocationList &Relocs, uint64_t Value);
+
+ /// A object file specific relocation resolver
+ /// \param RE The relocation to be resolved
+ /// \param Value Target symbol address to apply the relocation action
+ virtual void resolveRelocation(const RelocationEntry &RE, uint64_t Value) = 0;
+
+ /// Parses one or more object file relocations (some object files use
+ /// relocation pairs) and stores it to Relocations or SymbolRelocations
+ /// (this depends on the object file type).
+ /// \return Iterator to the next relocation that needs to be parsed.
+ virtual Expected<relocation_iterator>
+ processRelocationRef(unsigned SectionID, relocation_iterator RelI,
+ const ObjectFile &Obj, ObjSectionToIDMap &ObjSectionToID,
+ StubMap &Stubs) = 0;
+
+ void applyExternalSymbolRelocations(
+ const StringMap<JITEvaluatedSymbol> ExternalSymbolMap);
+
+ /// Resolve relocations to external symbols.
+ Error resolveExternalSymbols();
+
+ // Compute an upper bound of the memory that is required to load all
+ // sections
+ Error computeTotalAllocSize(const ObjectFile &Obj,
+ uint64_t &CodeSize, uint32_t &CodeAlign,
+ uint64_t &RODataSize, uint32_t &RODataAlign,
+ uint64_t &RWDataSize, uint32_t &RWDataAlign);
+
+ // Compute GOT size
+ unsigned computeGOTSize(const ObjectFile &Obj);
+
+ // Compute the stub buffer size required for a section
+ unsigned computeSectionStubBufSize(const ObjectFile &Obj,
+ const SectionRef &Section);
+
+ // Implementation of the generic part of the loadObject algorithm.
+ Expected<ObjSectionToIDMap> loadObjectImpl(const object::ObjectFile &Obj);
+
+ // Return size of Global Offset Table (GOT) entry
+ virtual size_t getGOTEntrySize() { return 0; }
+
+ // Return true if the relocation R may require allocating a GOT entry.
+ virtual bool relocationNeedsGot(const RelocationRef &R) const {
+ return false;
+ }
+
+ // Return true if the relocation R may require allocating a stub.
+ virtual bool relocationNeedsStub(const RelocationRef &R) const {
+ return true; // Conservative answer
+ }
+
+public:
+ RuntimeDyldImpl(RuntimeDyld::MemoryManager &MemMgr,
+ JITSymbolResolver &Resolver)
+ : MemMgr(MemMgr), Resolver(Resolver),
+ ProcessAllSections(false), HasError(false) {
+ }
+
+ virtual ~RuntimeDyldImpl();
+
+ void setProcessAllSections(bool ProcessAllSections) {
+ this->ProcessAllSections = ProcessAllSections;
+ }
+
+ virtual std::unique_ptr<RuntimeDyld::LoadedObjectInfo>
+ loadObject(const object::ObjectFile &Obj) = 0;
+
+ uint64_t getSectionLoadAddress(unsigned SectionID) const {
+ return Sections[SectionID].getLoadAddress();
+ }
+
+ uint8_t *getSectionAddress(unsigned SectionID) const {
+ return Sections[SectionID].getAddress();
+ }
+
+ StringRef getSectionContent(unsigned SectionID) const {
+ return StringRef(reinterpret_cast<char *>(Sections[SectionID].getAddress()),
+ Sections[SectionID].getStubOffset() + getMaxStubSize());
+ }
+
+ uint8_t* getSymbolLocalAddress(StringRef Name) const {
+ // FIXME: Just look up as a function for now. Overly simple of course.
+ // Work in progress.
+ RTDyldSymbolTable::const_iterator pos = GlobalSymbolTable.find(Name);
+ if (pos == GlobalSymbolTable.end())
+ return nullptr;
+ const auto &SymInfo = pos->second;
+ // Absolute symbols do not have a local address.
+ if (SymInfo.getSectionID() == AbsoluteSymbolSection)
+ return nullptr;
+ return getSectionAddress(SymInfo.getSectionID()) + SymInfo.getOffset();
+ }
+
+ unsigned getSymbolSectionID(StringRef Name) const {
+ auto GSTItr = GlobalSymbolTable.find(Name);
+ if (GSTItr == GlobalSymbolTable.end())
+ return ~0U;
+ return GSTItr->second.getSectionID();
+ }
+
+ JITEvaluatedSymbol getSymbol(StringRef Name) const {
+ // FIXME: Just look up as a function for now. Overly simple of course.
+ // Work in progress.
+ RTDyldSymbolTable::const_iterator pos = GlobalSymbolTable.find(Name);
+ if (pos == GlobalSymbolTable.end())
+ return nullptr;
+ const auto &SymEntry = pos->second;
+ uint64_t SectionAddr = 0;
+ if (SymEntry.getSectionID() != AbsoluteSymbolSection)
+ SectionAddr = getSectionLoadAddress(SymEntry.getSectionID());
+ uint64_t TargetAddr = SectionAddr + SymEntry.getOffset();
+
+ // FIXME: Have getSymbol should return the actual address and the client
+ // modify it based on the flags. This will require clients to be
+ // aware of the target architecture, which we should build
+ // infrastructure for.
+ TargetAddr = modifyAddressBasedOnFlags(TargetAddr, SymEntry.getFlags());
+ return JITEvaluatedSymbol(TargetAddr, SymEntry.getFlags());
+ }
+
+ std::map<StringRef, JITEvaluatedSymbol> getSymbolTable() const {
+ std::map<StringRef, JITEvaluatedSymbol> Result;
+
+ for (auto &KV : GlobalSymbolTable) {
+ auto SectionID = KV.second.getSectionID();
+ uint64_t SectionAddr = 0;
+ if (SectionID != AbsoluteSymbolSection)
+ SectionAddr = getSectionLoadAddress(SectionID);
+ Result[KV.first()] =
+ JITEvaluatedSymbol(SectionAddr + KV.second.getOffset(), KV.second.getFlags());
+ }
+
+ return Result;
+ }
+
+ void resolveRelocations();
+
+ void resolveLocalRelocations();
+
+ static void finalizeAsync(std::unique_ptr<RuntimeDyldImpl> This,
+ unique_function<void(Error)> OnEmitted,
+ std::unique_ptr<MemoryBuffer> UnderlyingBuffer);
+
+ void reassignSectionAddress(unsigned SectionID, uint64_t Addr);
+
+ void mapSectionAddress(const void *LocalAddress, uint64_t TargetAddress);
+
+ // Is the linker in an error state?
+ bool hasError() { return HasError; }
+
+ // Mark the error condition as handled and continue.
+ void clearError() { HasError = false; }
+
+ // Get the error message.
+ StringRef getErrorString() { return ErrorStr; }
+
+ virtual bool isCompatibleFile(const ObjectFile &Obj) const = 0;
+
+ void setNotifyStubEmitted(NotifyStubEmittedFunction NotifyStubEmitted) {
+ this->NotifyStubEmitted = std::move(NotifyStubEmitted);
+ }
+
+ virtual void registerEHFrames();
+
+ void deregisterEHFrames();
+
+ virtual Error finalizeLoad(const ObjectFile &ObjImg,
+ ObjSectionToIDMap &SectionMap) {
+ return Error::success();
+ }
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.cpp b/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.cpp
new file mode 100644
index 0000000000000..9ca76602ea18e
--- /dev/null
+++ b/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.cpp
@@ -0,0 +1,382 @@
+//===-- RuntimeDyldMachO.cpp - Run-time dynamic linker for MC-JIT -*- C++ -*-=//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Implementation of the MC-JIT runtime dynamic linker.
+//
+//===----------------------------------------------------------------------===//
+
+#include "RuntimeDyldMachO.h"
+#include "Targets/RuntimeDyldMachOAArch64.h"
+#include "Targets/RuntimeDyldMachOARM.h"
+#include "Targets/RuntimeDyldMachOI386.h"
+#include "Targets/RuntimeDyldMachOX86_64.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringRef.h"
+
+using namespace llvm;
+using namespace llvm::object;
+
+#define DEBUG_TYPE "dyld"
+
+namespace {
+
+class LoadedMachOObjectInfo final
+ : public LoadedObjectInfoHelper<LoadedMachOObjectInfo,
+ RuntimeDyld::LoadedObjectInfo> {
+public:
+ LoadedMachOObjectInfo(RuntimeDyldImpl &RTDyld,
+ ObjSectionToIDMap ObjSecToIDMap)
+ : LoadedObjectInfoHelper(RTDyld, std::move(ObjSecToIDMap)) {}
+
+ OwningBinary<ObjectFile>
+ getObjectForDebug(const ObjectFile &Obj) const override {
+ return OwningBinary<ObjectFile>();
+ }
+};
+
+}
+
+namespace llvm {
+
+int64_t RuntimeDyldMachO::memcpyAddend(const RelocationEntry &RE) const {
+ unsigned NumBytes = 1 << RE.Size;
+ uint8_t *Src = Sections[RE.SectionID].getAddress() + RE.Offset;
+
+ return static_cast<int64_t>(readBytesUnaligned(Src, NumBytes));
+}
+
+Expected<relocation_iterator>
+RuntimeDyldMachO::processScatteredVANILLA(
+ unsigned SectionID, relocation_iterator RelI,
+ const ObjectFile &BaseObjT,
+ RuntimeDyldMachO::ObjSectionToIDMap &ObjSectionToID,
+ bool TargetIsLocalThumbFunc) {
+ const MachOObjectFile &Obj =
+ static_cast<const MachOObjectFile&>(BaseObjT);
+ MachO::any_relocation_info RE =
+ Obj.getRelocation(RelI->getRawDataRefImpl());
+
+ SectionEntry &Section = Sections[SectionID];
+ uint32_t RelocType = Obj.getAnyRelocationType(RE);
+ bool IsPCRel = Obj.getAnyRelocationPCRel(RE);
+ unsigned Size = Obj.getAnyRelocationLength(RE);
+ uint64_t Offset = RelI->getOffset();
+ uint8_t *LocalAddress = Section.getAddressWithOffset(Offset);
+ unsigned NumBytes = 1 << Size;
+ int64_t Addend = readBytesUnaligned(LocalAddress, NumBytes);
+
+ unsigned SymbolBaseAddr = Obj.getScatteredRelocationValue(RE);
+ section_iterator TargetSI = getSectionByAddress(Obj, SymbolBaseAddr);
+ assert(TargetSI != Obj.section_end() && "Can't find section for symbol");
+ uint64_t SectionBaseAddr = TargetSI->getAddress();
+ SectionRef TargetSection = *TargetSI;
+ bool IsCode = TargetSection.isText();
+ uint32_t TargetSectionID = ~0U;
+ if (auto TargetSectionIDOrErr =
+ findOrEmitSection(Obj, TargetSection, IsCode, ObjSectionToID))
+ TargetSectionID = *TargetSectionIDOrErr;
+ else
+ return TargetSectionIDOrErr.takeError();
+
+ Addend -= SectionBaseAddr;
+ RelocationEntry R(SectionID, Offset, RelocType, Addend, IsPCRel, Size);
+ R.IsTargetThumbFunc = TargetIsLocalThumbFunc;
+
+ addRelocationForSection(R, TargetSectionID);
+
+ return ++RelI;
+}
+
+
+Expected<RelocationValueRef>
+RuntimeDyldMachO::getRelocationValueRef(
+ const ObjectFile &BaseTObj, const relocation_iterator &RI,
+ const RelocationEntry &RE, ObjSectionToIDMap &ObjSectionToID) {
+
+ const MachOObjectFile &Obj =
+ static_cast<const MachOObjectFile &>(BaseTObj);
+ MachO::any_relocation_info RelInfo =
+ Obj.getRelocation(RI->getRawDataRefImpl());
+ RelocationValueRef Value;
+
+ bool IsExternal = Obj.getPlainRelocationExternal(RelInfo);
+ if (IsExternal) {
+ symbol_iterator Symbol = RI->getSymbol();
+ StringRef TargetName;
+ if (auto TargetNameOrErr = Symbol->getName())
+ TargetName = *TargetNameOrErr;
+ else
+ return TargetNameOrErr.takeError();
+ RTDyldSymbolTable::const_iterator SI =
+ GlobalSymbolTable.find(TargetName.data());
+ if (SI != GlobalSymbolTable.end()) {
+ const auto &SymInfo = SI->second;
+ Value.SectionID = SymInfo.getSectionID();
+ Value.Offset = SymInfo.getOffset() + RE.Addend;
+ } else {
+ Value.SymbolName = TargetName.data();
+ Value.Offset = RE.Addend;
+ }
+ } else {
+ SectionRef Sec = Obj.getAnyRelocationSection(RelInfo);
+ bool IsCode = Sec.isText();
+ if (auto SectionIDOrErr = findOrEmitSection(Obj, Sec, IsCode,
+ ObjSectionToID))
+ Value.SectionID = *SectionIDOrErr;
+ else
+ return SectionIDOrErr.takeError();
+ uint64_t Addr = Sec.getAddress();
+ Value.Offset = RE.Addend - Addr;
+ }
+
+ return Value;
+}
+
+void RuntimeDyldMachO::makeValueAddendPCRel(RelocationValueRef &Value,
+ const relocation_iterator &RI,
+ unsigned OffsetToNextPC) {
+ auto &O = *cast<MachOObjectFile>(RI->getObject());
+ section_iterator SecI = O.getRelocationRelocatedSection(RI);
+ Value.Offset += RI->getOffset() + OffsetToNextPC + SecI->getAddress();
+}
+
+void RuntimeDyldMachO::dumpRelocationToResolve(const RelocationEntry &RE,
+ uint64_t Value) const {
+ const SectionEntry &Section = Sections[RE.SectionID];
+ uint8_t *LocalAddress = Section.getAddress() + RE.Offset;
+ uint64_t FinalAddress = Section.getLoadAddress() + RE.Offset;
+
+ dbgs() << "resolveRelocation Section: " << RE.SectionID
+ << " LocalAddress: " << format("%p", LocalAddress)
+ << " FinalAddress: " << format("0x%016" PRIx64, FinalAddress)
+ << " Value: " << format("0x%016" PRIx64, Value) << " Addend: " << RE.Addend
+ << " isPCRel: " << RE.IsPCRel << " MachoType: " << RE.RelType
+ << " Size: " << (1 << RE.Size) << "\n";
+}
+
+section_iterator
+RuntimeDyldMachO::getSectionByAddress(const MachOObjectFile &Obj,
+ uint64_t Addr) {
+ section_iterator SI = Obj.section_begin();
+ section_iterator SE = Obj.section_end();
+
+ for (; SI != SE; ++SI) {
+ uint64_t SAddr = SI->getAddress();
+ uint64_t SSize = SI->getSize();
+ if ((Addr >= SAddr) && (Addr < SAddr + SSize))
+ return SI;
+ }
+
+ return SE;
+}
+
+
+// Populate __pointers section.
+Error RuntimeDyldMachO::populateIndirectSymbolPointersSection(
+ const MachOObjectFile &Obj,
+ const SectionRef &PTSection,
+ unsigned PTSectionID) {
+ assert(!Obj.is64Bit() &&
+ "Pointer table section not supported in 64-bit MachO.");
+
+ MachO::dysymtab_command DySymTabCmd = Obj.getDysymtabLoadCommand();
+ MachO::section Sec32 = Obj.getSection(PTSection.getRawDataRefImpl());
+ uint32_t PTSectionSize = Sec32.size;
+ unsigned FirstIndirectSymbol = Sec32.reserved1;
+ const unsigned PTEntrySize = 4;
+ unsigned NumPTEntries = PTSectionSize / PTEntrySize;
+ unsigned PTEntryOffset = 0;
+
+ assert((PTSectionSize % PTEntrySize) == 0 &&
+ "Pointers section does not contain a whole number of stubs?");
+
+ LLVM_DEBUG(dbgs() << "Populating pointer table section "
+ << Sections[PTSectionID].getName() << ", Section ID "
+ << PTSectionID << ", " << NumPTEntries << " entries, "
+ << PTEntrySize << " bytes each:\n");
+
+ for (unsigned i = 0; i < NumPTEntries; ++i) {
+ unsigned SymbolIndex =
+ Obj.getIndirectSymbolTableEntry(DySymTabCmd, FirstIndirectSymbol + i);
+ symbol_iterator SI = Obj.getSymbolByIndex(SymbolIndex);
+ StringRef IndirectSymbolName;
+ if (auto IndirectSymbolNameOrErr = SI->getName())
+ IndirectSymbolName = *IndirectSymbolNameOrErr;
+ else
+ return IndirectSymbolNameOrErr.takeError();
+ LLVM_DEBUG(dbgs() << " " << IndirectSymbolName << ": index " << SymbolIndex
+ << ", PT offset: " << PTEntryOffset << "\n");
+ RelocationEntry RE(PTSectionID, PTEntryOffset,
+ MachO::GENERIC_RELOC_VANILLA, 0, false, 2);
+ addRelocationForSymbol(RE, IndirectSymbolName);
+ PTEntryOffset += PTEntrySize;
+ }
+ return Error::success();
+}
+
+bool RuntimeDyldMachO::isCompatibleFile(const object::ObjectFile &Obj) const {
+ return Obj.isMachO();
+}
+
+template <typename Impl>
+Error
+RuntimeDyldMachOCRTPBase<Impl>::finalizeLoad(const ObjectFile &Obj,
+ ObjSectionToIDMap &SectionMap) {
+ unsigned EHFrameSID = RTDYLD_INVALID_SECTION_ID;
+ unsigned TextSID = RTDYLD_INVALID_SECTION_ID;
+ unsigned ExceptTabSID = RTDYLD_INVALID_SECTION_ID;
+
+ for (const auto &Section : Obj.sections()) {
+ StringRef Name;
+ if (Expected<StringRef> NameOrErr = Section.getName())
+ Name = *NameOrErr;
+ else
+ consumeError(NameOrErr.takeError());
+
+ // Force emission of the __text, __eh_frame, and __gcc_except_tab sections
+ // if they're present. Otherwise call down to the impl to handle other
+ // sections that have already been emitted.
+ if (Name == "__text") {
+ if (auto TextSIDOrErr = findOrEmitSection(Obj, Section, true, SectionMap))
+ TextSID = *TextSIDOrErr;
+ else
+ return TextSIDOrErr.takeError();
+ } else if (Name == "__eh_frame") {
+ if (auto EHFrameSIDOrErr = findOrEmitSection(Obj, Section, false,
+ SectionMap))
+ EHFrameSID = *EHFrameSIDOrErr;
+ else
+ return EHFrameSIDOrErr.takeError();
+ } else if (Name == "__gcc_except_tab") {
+ if (auto ExceptTabSIDOrErr = findOrEmitSection(Obj, Section, true,
+ SectionMap))
+ ExceptTabSID = *ExceptTabSIDOrErr;
+ else
+ return ExceptTabSIDOrErr.takeError();
+ } else {
+ auto I = SectionMap.find(Section);
+ if (I != SectionMap.end())
+ if (auto Err = impl().finalizeSection(Obj, I->second, Section))
+ return Err;
+ }
+ }
+ UnregisteredEHFrameSections.push_back(
+ EHFrameRelatedSections(EHFrameSID, TextSID, ExceptTabSID));
+
+ return Error::success();
+}
+
+template <typename Impl>
+unsigned char *RuntimeDyldMachOCRTPBase<Impl>::processFDE(uint8_t *P,
+ int64_t DeltaForText,
+ int64_t DeltaForEH) {
+ typedef typename Impl::TargetPtrT TargetPtrT;
+
+ LLVM_DEBUG(dbgs() << "Processing FDE: Delta for text: " << DeltaForText
+ << ", Delta for EH: " << DeltaForEH << "\n");
+ uint32_t Length = readBytesUnaligned(P, 4);
+ P += 4;
+ uint8_t *Ret = P + Length;
+ uint32_t Offset = readBytesUnaligned(P, 4);
+ if (Offset == 0) // is a CIE
+ return Ret;
+
+ P += 4;
+ TargetPtrT FDELocation = readBytesUnaligned(P, sizeof(TargetPtrT));
+ TargetPtrT NewLocation = FDELocation - DeltaForText;
+ writeBytesUnaligned(NewLocation, P, sizeof(TargetPtrT));
+
+ P += sizeof(TargetPtrT);
+
+ // Skip the FDE address range
+ P += sizeof(TargetPtrT);
+
+ uint8_t Augmentationsize = *P;
+ P += 1;
+ if (Augmentationsize != 0) {
+ TargetPtrT LSDA = readBytesUnaligned(P, sizeof(TargetPtrT));
+ TargetPtrT NewLSDA = LSDA - DeltaForEH;
+ writeBytesUnaligned(NewLSDA, P, sizeof(TargetPtrT));
+ }
+
+ return Ret;
+}
+
+static int64_t computeDelta(SectionEntry *A, SectionEntry *B) {
+ int64_t ObjDistance = static_cast<int64_t>(A->getObjAddress()) -
+ static_cast<int64_t>(B->getObjAddress());
+ int64_t MemDistance = A->getLoadAddress() - B->getLoadAddress();
+ return ObjDistance - MemDistance;
+}
+
+template <typename Impl>
+void RuntimeDyldMachOCRTPBase<Impl>::registerEHFrames() {
+
+ for (int i = 0, e = UnregisteredEHFrameSections.size(); i != e; ++i) {
+ EHFrameRelatedSections &SectionInfo = UnregisteredEHFrameSections[i];
+ if (SectionInfo.EHFrameSID == RTDYLD_INVALID_SECTION_ID ||
+ SectionInfo.TextSID == RTDYLD_INVALID_SECTION_ID)
+ continue;
+ SectionEntry *Text = &Sections[SectionInfo.TextSID];
+ SectionEntry *EHFrame = &Sections[SectionInfo.EHFrameSID];
+ SectionEntry *ExceptTab = nullptr;
+ if (SectionInfo.ExceptTabSID != RTDYLD_INVALID_SECTION_ID)
+ ExceptTab = &Sections[SectionInfo.ExceptTabSID];
+
+ int64_t DeltaForText = computeDelta(Text, EHFrame);
+ int64_t DeltaForEH = 0;
+ if (ExceptTab)
+ DeltaForEH = computeDelta(ExceptTab, EHFrame);
+
+ uint8_t *P = EHFrame->getAddress();
+ uint8_t *End = P + EHFrame->getSize();
+ while (P != End) {
+ P = processFDE(P, DeltaForText, DeltaForEH);
+ }
+
+ MemMgr.registerEHFrames(EHFrame->getAddress(), EHFrame->getLoadAddress(),
+ EHFrame->getSize());
+ }
+ UnregisteredEHFrameSections.clear();
+}
+
+std::unique_ptr<RuntimeDyldMachO>
+RuntimeDyldMachO::create(Triple::ArchType Arch,
+ RuntimeDyld::MemoryManager &MemMgr,
+ JITSymbolResolver &Resolver) {
+ switch (Arch) {
+ default:
+ llvm_unreachable("Unsupported target for RuntimeDyldMachO.");
+ break;
+ case Triple::arm:
+ return std::make_unique<RuntimeDyldMachOARM>(MemMgr, Resolver);
+ case Triple::aarch64:
+ return std::make_unique<RuntimeDyldMachOAArch64>(MemMgr, Resolver);
+ case Triple::aarch64_32:
+ return std::make_unique<RuntimeDyldMachOAArch64>(MemMgr, Resolver);
+ case Triple::x86:
+ return std::make_unique<RuntimeDyldMachOI386>(MemMgr, Resolver);
+ case Triple::x86_64:
+ return std::make_unique<RuntimeDyldMachOX86_64>(MemMgr, Resolver);
+ }
+}
+
+std::unique_ptr<RuntimeDyld::LoadedObjectInfo>
+RuntimeDyldMachO::loadObject(const object::ObjectFile &O) {
+ if (auto ObjSectionToIDOrErr = loadObjectImpl(O))
+ return std::make_unique<LoadedMachOObjectInfo>(*this,
+ *ObjSectionToIDOrErr);
+ else {
+ HasError = true;
+ raw_string_ostream ErrStream(ErrorStr);
+ logAllUnhandledErrors(ObjSectionToIDOrErr.takeError(), ErrStream);
+ return nullptr;
+ }
+}
+
+} // end namespace llvm
diff --git a/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.h b/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.h
new file mode 100644
index 0000000000000..650e7b79fbb8e
--- /dev/null
+++ b/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.h
@@ -0,0 +1,167 @@
+//===-- RuntimeDyldMachO.h - Run-time dynamic linker for MC-JIT ---*- C++ -*-=//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// MachO support for MC-JIT runtime dynamic linker.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_EXECUTIONENGINE_RUNTIMEDYLD_RUNTIMEDYLDMACHO_H
+#define LLVM_LIB_EXECUTIONENGINE_RUNTIMEDYLD_RUNTIMEDYLDMACHO_H
+
+#include "RuntimeDyldImpl.h"
+#include "llvm/Object/MachO.h"
+#include "llvm/Support/Format.h"
+
+#define DEBUG_TYPE "dyld"
+
+using namespace llvm;
+using namespace llvm::object;
+
+namespace llvm {
+class RuntimeDyldMachO : public RuntimeDyldImpl {
+protected:
+ struct SectionOffsetPair {
+ unsigned SectionID;
+ uint64_t Offset;
+ };
+
+ struct EHFrameRelatedSections {
+ EHFrameRelatedSections()
+ : EHFrameSID(RTDYLD_INVALID_SECTION_ID),
+ TextSID(RTDYLD_INVALID_SECTION_ID),
+ ExceptTabSID(RTDYLD_INVALID_SECTION_ID) {}
+
+ EHFrameRelatedSections(SID EH, SID T, SID Ex)
+ : EHFrameSID(EH), TextSID(T), ExceptTabSID(Ex) {}
+ SID EHFrameSID;
+ SID TextSID;
+ SID ExceptTabSID;
+ };
+
+ // When a module is loaded we save the SectionID of the EH frame section
+ // in a table until we receive a request to register all unregistered
+ // EH frame sections with the memory manager.
+ SmallVector<EHFrameRelatedSections, 2> UnregisteredEHFrameSections;
+
+ RuntimeDyldMachO(RuntimeDyld::MemoryManager &MemMgr,
+ JITSymbolResolver &Resolver)
+ : RuntimeDyldImpl(MemMgr, Resolver) {}
+
+ /// This convenience method uses memcpy to extract a contiguous addend (the
+ /// addend size and offset are taken from the corresponding fields of the RE).
+ int64_t memcpyAddend(const RelocationEntry &RE) const;
+
+ /// Given a relocation_iterator for a non-scattered relocation, construct a
+ /// RelocationEntry and fill in the common fields. The 'Addend' field is *not*
+ /// filled in, since immediate encodings are highly target/opcode specific.
+ /// For targets/opcodes with simple, contiguous immediates (e.g. X86) the
+ /// memcpyAddend method can be used to read the immediate.
+ RelocationEntry getRelocationEntry(unsigned SectionID,
+ const ObjectFile &BaseTObj,
+ const relocation_iterator &RI) const {
+ const MachOObjectFile &Obj =
+ static_cast<const MachOObjectFile &>(BaseTObj);
+ MachO::any_relocation_info RelInfo =
+ Obj.getRelocation(RI->getRawDataRefImpl());
+
+ bool IsPCRel = Obj.getAnyRelocationPCRel(RelInfo);
+ unsigned Size = Obj.getAnyRelocationLength(RelInfo);
+ uint64_t Offset = RI->getOffset();
+ MachO::RelocationInfoType RelType =
+ static_cast<MachO::RelocationInfoType>(Obj.getAnyRelocationType(RelInfo));
+
+ return RelocationEntry(SectionID, Offset, RelType, 0, IsPCRel, Size);
+ }
+
+ /// Process a scattered vanilla relocation.
+ Expected<relocation_iterator>
+ processScatteredVANILLA(unsigned SectionID, relocation_iterator RelI,
+ const ObjectFile &BaseObjT,
+ RuntimeDyldMachO::ObjSectionToIDMap &ObjSectionToID,
+ bool TargetIsLocalThumbFunc = false);
+
+ /// Construct a RelocationValueRef representing the relocation target.
+ /// For Symbols in known sections, this will return a RelocationValueRef
+ /// representing a (SectionID, Offset) pair.
+ /// For Symbols whose section is not known, this will return a
+ /// (SymbolName, Offset) pair, where the Offset is taken from the instruction
+ /// immediate (held in RE.Addend).
+ /// In both cases the Addend field is *NOT* fixed up to be PC-relative. That
+ /// should be done by the caller where appropriate by calling makePCRel on
+ /// the RelocationValueRef.
+ Expected<RelocationValueRef>
+ getRelocationValueRef(const ObjectFile &BaseTObj,
+ const relocation_iterator &RI,
+ const RelocationEntry &RE,
+ ObjSectionToIDMap &ObjSectionToID);
+
+ /// Make the RelocationValueRef addend PC-relative.
+ void makeValueAddendPCRel(RelocationValueRef &Value,
+ const relocation_iterator &RI,
+ unsigned OffsetToNextPC);
+
+ /// Dump information about the relocation entry (RE) and resolved value.
+ void dumpRelocationToResolve(const RelocationEntry &RE, uint64_t Value) const;
+
+ // Return a section iterator for the section containing the given address.
+ static section_iterator getSectionByAddress(const MachOObjectFile &Obj,
+ uint64_t Addr);
+
+
+ // Populate __pointers section.
+ Error populateIndirectSymbolPointersSection(const MachOObjectFile &Obj,
+ const SectionRef &PTSection,
+ unsigned PTSectionID);
+
+public:
+
+ /// Create a RuntimeDyldMachO instance for the given target architecture.
+ static std::unique_ptr<RuntimeDyldMachO>
+ create(Triple::ArchType Arch,
+ RuntimeDyld::MemoryManager &MemMgr,
+ JITSymbolResolver &Resolver);
+
+ std::unique_ptr<RuntimeDyld::LoadedObjectInfo>
+ loadObject(const object::ObjectFile &O) override;
+
+ SectionEntry &getSection(unsigned SectionID) { return Sections[SectionID]; }
+
+ bool isCompatibleFile(const object::ObjectFile &Obj) const override;
+};
+
+/// RuntimeDyldMachOTarget - Templated base class for generic MachO linker
+/// algorithms and data structures.
+///
+/// Concrete, target specific sub-classes can be accessed via the impl()
+/// methods. (i.e. the RuntimeDyldMachO hierarchy uses the Curiously
+/// Recurring Template Idiom). Concrete subclasses for each target
+/// can be found in ./Targets.
+template <typename Impl>
+class RuntimeDyldMachOCRTPBase : public RuntimeDyldMachO {
+private:
+ Impl &impl() { return static_cast<Impl &>(*this); }
+ const Impl &impl() const { return static_cast<const Impl &>(*this); }
+
+ unsigned char *processFDE(uint8_t *P, int64_t DeltaForText,
+ int64_t DeltaForEH);
+
+public:
+ RuntimeDyldMachOCRTPBase(RuntimeDyld::MemoryManager &MemMgr,
+ JITSymbolResolver &Resolver)
+ : RuntimeDyldMachO(MemMgr, Resolver) {}
+
+ Error finalizeLoad(const ObjectFile &Obj,
+ ObjSectionToIDMap &SectionMap) override;
+ void registerEHFrames() override;
+};
+
+} // end namespace llvm
+
+#undef DEBUG_TYPE
+
+#endif
diff --git a/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldCOFFI386.h b/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldCOFFI386.h
new file mode 100644
index 0000000000000..40910bea0c364
--- /dev/null
+++ b/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldCOFFI386.h
@@ -0,0 +1,217 @@
+//===--- RuntimeDyldCOFFI386.h --- COFF/X86_64 specific code ---*- C++ --*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// COFF x86 support for MC-JIT runtime dynamic linker.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_EXECUTIONENGINE_RUNTIMEDYLD_TARGETS_RUNTIMEDYLDCOFFI386_H
+#define LLVM_LIB_EXECUTIONENGINE_RUNTIMEDYLD_TARGETS_RUNTIMEDYLDCOFFI386_H
+
+#include "../RuntimeDyldCOFF.h"
+#include "llvm/BinaryFormat/COFF.h"
+#include "llvm/Object/COFF.h"
+
+#define DEBUG_TYPE "dyld"
+
+namespace llvm {
+
+class RuntimeDyldCOFFI386 : public RuntimeDyldCOFF {
+public:
+ RuntimeDyldCOFFI386(RuntimeDyld::MemoryManager &MM,
+ JITSymbolResolver &Resolver)
+ : RuntimeDyldCOFF(MM, Resolver) {}
+
+ unsigned getMaxStubSize() const override {
+ return 8; // 2-byte jmp instruction + 32-bit relative address + 2 byte pad
+ }
+
+ unsigned getStubAlignment() override { return 1; }
+
+ Expected<object::relocation_iterator>
+ processRelocationRef(unsigned SectionID,
+ object::relocation_iterator RelI,
+ const object::ObjectFile &Obj,
+ ObjSectionToIDMap &ObjSectionToID,
+ StubMap &Stubs) override {
+
+ auto Symbol = RelI->getSymbol();
+ if (Symbol == Obj.symbol_end())
+ report_fatal_error("Unknown symbol in relocation");
+
+ Expected<StringRef> TargetNameOrErr = Symbol->getName();
+ if (!TargetNameOrErr)
+ return TargetNameOrErr.takeError();
+ StringRef TargetName = *TargetNameOrErr;
+
+ auto SectionOrErr = Symbol->getSection();
+ if (!SectionOrErr)
+ return SectionOrErr.takeError();
+ auto Section = *SectionOrErr;
+
+ uint64_t RelType = RelI->getType();
+ uint64_t Offset = RelI->getOffset();
+
+ // Determine the Addend used to adjust the relocation value.
+ uint64_t Addend = 0;
+ SectionEntry &AddendSection = Sections[SectionID];
+ uintptr_t ObjTarget = AddendSection.getObjAddress() + Offset;
+ uint8_t *Displacement = (uint8_t *)ObjTarget;
+
+ switch (RelType) {
+ case COFF::IMAGE_REL_I386_DIR32:
+ case COFF::IMAGE_REL_I386_DIR32NB:
+ case COFF::IMAGE_REL_I386_SECREL:
+ case COFF::IMAGE_REL_I386_REL32: {
+ Addend = readBytesUnaligned(Displacement, 4);
+ break;
+ }
+ default:
+ break;
+ }
+
+#if !defined(NDEBUG)
+ SmallString<32> RelTypeName;
+ RelI->getTypeName(RelTypeName);
+#endif
+ LLVM_DEBUG(dbgs() << "\t\tIn Section " << SectionID << " Offset " << Offset
+ << " RelType: " << RelTypeName << " TargetName: "
+ << TargetName << " Addend " << Addend << "\n");
+
+ unsigned TargetSectionID = -1;
+ if (Section == Obj.section_end()) {
+ RelocationEntry RE(SectionID, Offset, RelType, 0, -1, 0, 0, 0, false, 0);
+ addRelocationForSymbol(RE, TargetName);
+ } else {
+ if (auto TargetSectionIDOrErr =
+ findOrEmitSection(Obj, *Section, Section->isText(), ObjSectionToID))
+ TargetSectionID = *TargetSectionIDOrErr;
+ else
+ return TargetSectionIDOrErr.takeError();
+
+ switch (RelType) {
+ case COFF::IMAGE_REL_I386_ABSOLUTE:
+ // This relocation is ignored.
+ break;
+ case COFF::IMAGE_REL_I386_DIR32:
+ case COFF::IMAGE_REL_I386_DIR32NB:
+ case COFF::IMAGE_REL_I386_REL32: {
+ RelocationEntry RE =
+ RelocationEntry(SectionID, Offset, RelType, Addend, TargetSectionID,
+ getSymbolOffset(*Symbol), 0, 0, false, 0);
+ addRelocationForSection(RE, TargetSectionID);
+ break;
+ }
+ case COFF::IMAGE_REL_I386_SECTION: {
+ RelocationEntry RE =
+ RelocationEntry(TargetSectionID, Offset, RelType, 0);
+ addRelocationForSection(RE, TargetSectionID);
+ break;
+ }
+ case COFF::IMAGE_REL_I386_SECREL: {
+ RelocationEntry RE = RelocationEntry(SectionID, Offset, RelType,
+ getSymbolOffset(*Symbol) + Addend);
+ addRelocationForSection(RE, TargetSectionID);
+ break;
+ }
+ default:
+ llvm_unreachable("unsupported relocation type");
+ }
+
+ }
+
+ return ++RelI;
+ }
+
+ void resolveRelocation(const RelocationEntry &RE, uint64_t Value) override {
+ const auto Section = Sections[RE.SectionID];
+ uint8_t *Target = Section.getAddressWithOffset(RE.Offset);
+
+ switch (RE.RelType) {
+ case COFF::IMAGE_REL_I386_ABSOLUTE:
+ // This relocation is ignored.
+ break;
+ case COFF::IMAGE_REL_I386_DIR32: {
+ // The target's 32-bit VA.
+ uint64_t Result =
+ RE.Sections.SectionA == static_cast<uint32_t>(-1)
+ ? Value
+ : Sections[RE.Sections.SectionA].getLoadAddressWithOffset(
+ RE.Addend);
+ assert(Result <= UINT32_MAX && "relocation overflow");
+ LLVM_DEBUG(dbgs() << "\t\tOffset: " << RE.Offset
+ << " RelType: IMAGE_REL_I386_DIR32"
+ << " TargetSection: " << RE.Sections.SectionA
+ << " Value: " << format("0x%08" PRIx32, Result)
+ << '\n');
+ writeBytesUnaligned(Result, Target, 4);
+ break;
+ }
+ case COFF::IMAGE_REL_I386_DIR32NB: {
+ // The target's 32-bit RVA.
+ // NOTE: use Section[0].getLoadAddress() as an approximation of ImageBase
+ uint64_t Result =
+ Sections[RE.Sections.SectionA].getLoadAddressWithOffset(RE.Addend) -
+ Sections[0].getLoadAddress();
+ assert(Result <= UINT32_MAX && "relocation overflow");
+ LLVM_DEBUG(dbgs() << "\t\tOffset: " << RE.Offset
+ << " RelType: IMAGE_REL_I386_DIR32NB"
+ << " TargetSection: " << RE.Sections.SectionA
+ << " Value: " << format("0x%08" PRIx32, Result)
+ << '\n');
+ writeBytesUnaligned(Result, Target, 4);
+ break;
+ }
+ case COFF::IMAGE_REL_I386_REL32: {
+ // 32-bit relative displacement to the target.
+ uint64_t Result = RE.Sections.SectionA == static_cast<uint32_t>(-1)
+ ? Value
+ : Sections[RE.Sections.SectionA].getLoadAddress();
+ Result = Result - Section.getLoadAddress() + RE.Addend - 4 - RE.Offset;
+ assert(static_cast<int64_t>(Result) <= INT32_MAX &&
+ "relocation overflow");
+ assert(static_cast<int64_t>(Result) >= INT32_MIN &&
+ "relocation underflow");
+ LLVM_DEBUG(dbgs() << "\t\tOffset: " << RE.Offset
+ << " RelType: IMAGE_REL_I386_REL32"
+ << " TargetSection: " << RE.Sections.SectionA
+ << " Value: " << format("0x%08" PRIx32, Result)
+ << '\n');
+ writeBytesUnaligned(Result, Target, 4);
+ break;
+ }
+ case COFF::IMAGE_REL_I386_SECTION:
+ // 16-bit section index of the section that contains the target.
+ assert(static_cast<uint32_t>(RE.SectionID) <= UINT16_MAX &&
+ "relocation overflow");
+ LLVM_DEBUG(dbgs() << "\t\tOffset: " << RE.Offset
+ << " RelType: IMAGE_REL_I386_SECTION Value: "
+ << RE.SectionID << '\n');
+ writeBytesUnaligned(RE.SectionID, Target, 2);
+ break;
+ case COFF::IMAGE_REL_I386_SECREL:
+ // 32-bit offset of the target from the beginning of its section.
+ assert(static_cast<uint64_t>(RE.Addend) <= UINT32_MAX &&
+ "relocation overflow");
+ LLVM_DEBUG(dbgs() << "\t\tOffset: " << RE.Offset
+ << " RelType: IMAGE_REL_I386_SECREL Value: "
+ << RE.Addend << '\n');
+ writeBytesUnaligned(RE.Addend, Target, 4);
+ break;
+ default:
+ llvm_unreachable("unsupported relocation type");
+ }
+ }
+
+ void registerEHFrames() override {}
+};
+
+}
+
+#endif
+
diff --git a/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldCOFFThumb.h b/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldCOFFThumb.h
new file mode 100644
index 0000000000000..bb2e9626e0b07
--- /dev/null
+++ b/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldCOFFThumb.h
@@ -0,0 +1,315 @@
+//===--- RuntimeDyldCOFFThumb.h --- COFF/Thumb specific code ---*- C++ --*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// COFF thumb support for MC-JIT runtime dynamic linker.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_EXECUTIONENGINE_RUNTIMEDYLD_TARGETS_RUNTIMEDYLDCOFFTHUMB_H
+#define LLVM_LIB_EXECUTIONENGINE_RUNTIMEDYLD_TARGETS_RUNTIMEDYLDCOFFTHUMB_H
+
+#include "../RuntimeDyldCOFF.h"
+#include "llvm/BinaryFormat/COFF.h"
+#include "llvm/Object/COFF.h"
+
+#define DEBUG_TYPE "dyld"
+
+namespace llvm {
+
+static bool isThumbFunc(object::symbol_iterator Symbol,
+ const object::ObjectFile &Obj,
+ object::section_iterator Section) {
+ Expected<object::SymbolRef::Type> SymTypeOrErr = Symbol->getType();
+ if (!SymTypeOrErr) {
+ std::string Buf;
+ raw_string_ostream OS(Buf);
+ logAllUnhandledErrors(SymTypeOrErr.takeError(), OS);
+ OS.flush();
+ report_fatal_error(Buf);
+ }
+
+ if (*SymTypeOrErr != object::SymbolRef::ST_Function)
+ return false;
+
+ // We check the IMAGE_SCN_MEM_16BIT flag in the section of the symbol to tell
+ // if it's thumb or not
+ return cast<object::COFFObjectFile>(Obj)
+ .getCOFFSection(*Section)
+ ->Characteristics &
+ COFF::IMAGE_SCN_MEM_16BIT;
+}
+
+class RuntimeDyldCOFFThumb : public RuntimeDyldCOFF {
+public:
+ RuntimeDyldCOFFThumb(RuntimeDyld::MemoryManager &MM,
+ JITSymbolResolver &Resolver)
+ : RuntimeDyldCOFF(MM, Resolver) {}
+
+ unsigned getMaxStubSize() const override {
+ return 16; // 8-byte load instructions, 4-byte jump, 4-byte padding
+ }
+
+ unsigned getStubAlignment() override { return 1; }
+
+ Expected<object::relocation_iterator>
+ processRelocationRef(unsigned SectionID,
+ object::relocation_iterator RelI,
+ const object::ObjectFile &Obj,
+ ObjSectionToIDMap &ObjSectionToID,
+ StubMap &Stubs) override {
+ auto Symbol = RelI->getSymbol();
+ if (Symbol == Obj.symbol_end())
+ report_fatal_error("Unknown symbol in relocation");
+
+ Expected<StringRef> TargetNameOrErr = Symbol->getName();
+ if (!TargetNameOrErr)
+ return TargetNameOrErr.takeError();
+ StringRef TargetName = *TargetNameOrErr;
+
+ auto SectionOrErr = Symbol->getSection();
+ if (!SectionOrErr)
+ return SectionOrErr.takeError();
+ auto Section = *SectionOrErr;
+
+ uint64_t RelType = RelI->getType();
+ uint64_t Offset = RelI->getOffset();
+
+ // Determine the Addend used to adjust the relocation value.
+ uint64_t Addend = 0;
+ SectionEntry &AddendSection = Sections[SectionID];
+ uintptr_t ObjTarget = AddendSection.getObjAddress() + Offset;
+ uint8_t *Displacement = (uint8_t *)ObjTarget;
+
+ switch (RelType) {
+ case COFF::IMAGE_REL_ARM_ADDR32:
+ case COFF::IMAGE_REL_ARM_ADDR32NB:
+ case COFF::IMAGE_REL_ARM_SECREL:
+ Addend = readBytesUnaligned(Displacement, 4);
+ break;
+ default:
+ break;
+ }
+
+#if !defined(NDEBUG)
+ SmallString<32> RelTypeName;
+ RelI->getTypeName(RelTypeName);
+#endif
+ LLVM_DEBUG(dbgs() << "\t\tIn Section " << SectionID << " Offset " << Offset
+ << " RelType: " << RelTypeName << " TargetName: "
+ << TargetName << " Addend " << Addend << "\n");
+
+ unsigned TargetSectionID = -1;
+ if (Section == Obj.section_end()) {
+ RelocationEntry RE(SectionID, Offset, RelType, 0, -1, 0, 0, 0, false, 0);
+ addRelocationForSymbol(RE, TargetName);
+ } else {
+ if (auto TargetSectionIDOrErr =
+ findOrEmitSection(Obj, *Section, Section->isText(), ObjSectionToID))
+ TargetSectionID = *TargetSectionIDOrErr;
+ else
+ return TargetSectionIDOrErr.takeError();
+
+ // We need to find out if the relocation is relative to a thumb function
+ // so that we include the ISA selection bit when resolve the relocation
+ bool IsTargetThumbFunc = isThumbFunc(Symbol, Obj, Section);
+
+ switch (RelType) {
+ default: llvm_unreachable("unsupported relocation type");
+ case COFF::IMAGE_REL_ARM_ABSOLUTE:
+ // This relocation is ignored.
+ break;
+ case COFF::IMAGE_REL_ARM_ADDR32: {
+ RelocationEntry RE = RelocationEntry(
+ SectionID, Offset, RelType, Addend, TargetSectionID,
+ getSymbolOffset(*Symbol), 0, 0, false, 0, IsTargetThumbFunc);
+ addRelocationForSection(RE, TargetSectionID);
+ break;
+ }
+ case COFF::IMAGE_REL_ARM_ADDR32NB: {
+ RelocationEntry RE =
+ RelocationEntry(SectionID, Offset, RelType, Addend, TargetSectionID,
+ getSymbolOffset(*Symbol), 0, 0, false, 0);
+ addRelocationForSection(RE, TargetSectionID);
+ break;
+ }
+ case COFF::IMAGE_REL_ARM_SECTION: {
+ RelocationEntry RE =
+ RelocationEntry(TargetSectionID, Offset, RelType, 0);
+ addRelocationForSection(RE, TargetSectionID);
+ break;
+ }
+ case COFF::IMAGE_REL_ARM_SECREL: {
+ RelocationEntry RE = RelocationEntry(SectionID, Offset, RelType,
+ getSymbolOffset(*Symbol) + Addend);
+ addRelocationForSection(RE, TargetSectionID);
+ break;
+ }
+ case COFF::IMAGE_REL_ARM_MOV32T: {
+ RelocationEntry RE = RelocationEntry(
+ SectionID, Offset, RelType, Addend, TargetSectionID,
+ getSymbolOffset(*Symbol), 0, 0, false, 0, IsTargetThumbFunc);
+ addRelocationForSection(RE, TargetSectionID);
+ break;
+ }
+ case COFF::IMAGE_REL_ARM_BRANCH20T:
+ case COFF::IMAGE_REL_ARM_BRANCH24T:
+ case COFF::IMAGE_REL_ARM_BLX23T: {
+ RelocationEntry RE =
+ RelocationEntry(SectionID, Offset, RelType,
+ getSymbolOffset(*Symbol) + Addend, true, 0);
+ addRelocationForSection(RE, TargetSectionID);
+ break;
+ }
+ }
+ }
+
+ return ++RelI;
+ }
+
+ void resolveRelocation(const RelocationEntry &RE, uint64_t Value) override {
+ const auto Section = Sections[RE.SectionID];
+ uint8_t *Target = Section.getAddressWithOffset(RE.Offset);
+ int ISASelectionBit = RE.IsTargetThumbFunc ? 1 : 0;
+
+ switch (RE.RelType) {
+ default: llvm_unreachable("unsupported relocation type");
+ case COFF::IMAGE_REL_ARM_ABSOLUTE:
+ // This relocation is ignored.
+ break;
+ case COFF::IMAGE_REL_ARM_ADDR32: {
+ // The target's 32-bit VA.
+ uint64_t Result =
+ RE.Sections.SectionA == static_cast<uint32_t>(-1)
+ ? Value
+ : Sections[RE.Sections.SectionA].getLoadAddressWithOffset(RE.Addend);
+ Result |= ISASelectionBit;
+ assert(Result <= UINT32_MAX && "relocation overflow");
+ LLVM_DEBUG(dbgs() << "\t\tOffset: " << RE.Offset
+ << " RelType: IMAGE_REL_ARM_ADDR32"
+ << " TargetSection: " << RE.Sections.SectionA
+ << " Value: " << format("0x%08" PRIx32, Result)
+ << '\n');
+ writeBytesUnaligned(Result, Target, 4);
+ break;
+ }
+ case COFF::IMAGE_REL_ARM_ADDR32NB: {
+ // The target's 32-bit RVA.
+ // NOTE: use Section[0].getLoadAddress() as an approximation of ImageBase
+ uint64_t Result = Sections[RE.Sections.SectionA].getLoadAddress() -
+ Sections[0].getLoadAddress() + RE.Addend;
+ assert(Result <= UINT32_MAX && "relocation overflow");
+ LLVM_DEBUG(dbgs() << "\t\tOffset: " << RE.Offset
+ << " RelType: IMAGE_REL_ARM_ADDR32NB"
+ << " TargetSection: " << RE.Sections.SectionA
+ << " Value: " << format("0x%08" PRIx32, Result)
+ << '\n');
+ Result |= ISASelectionBit;
+ writeBytesUnaligned(Result, Target, 4);
+ break;
+ }
+ case COFF::IMAGE_REL_ARM_SECTION:
+ // 16-bit section index of the section that contains the target.
+ assert(static_cast<uint32_t>(RE.SectionID) <= UINT16_MAX &&
+ "relocation overflow");
+ LLVM_DEBUG(dbgs() << "\t\tOffset: " << RE.Offset
+ << " RelType: IMAGE_REL_ARM_SECTION Value: "
+ << RE.SectionID << '\n');
+ writeBytesUnaligned(RE.SectionID, Target, 2);
+ break;
+ case COFF::IMAGE_REL_ARM_SECREL:
+ // 32-bit offset of the target from the beginning of its section.
+ assert(static_cast<uint64_t>(RE.Addend) <= UINT32_MAX &&
+ "relocation overflow");
+ LLVM_DEBUG(dbgs() << "\t\tOffset: " << RE.Offset
+ << " RelType: IMAGE_REL_ARM_SECREL Value: " << RE.Addend
+ << '\n');
+ writeBytesUnaligned(RE.Addend, Target, 2);
+ break;
+ case COFF::IMAGE_REL_ARM_MOV32T: {
+ // 32-bit VA of the target applied to a contiguous MOVW+MOVT pair.
+ uint64_t Result =
+ Sections[RE.Sections.SectionA].getLoadAddressWithOffset(RE.Addend);
+ assert(Result <= UINT32_MAX && "relocation overflow");
+ LLVM_DEBUG(dbgs() << "\t\tOffset: " << RE.Offset
+ << " RelType: IMAGE_REL_ARM_MOV32T"
+ << " TargetSection: " << RE.Sections.SectionA
+ << " Value: " << format("0x%08" PRIx32, Result)
+ << '\n');
+
+ // MOVW(T3): |11110|i|10|0|1|0|0|imm4|0|imm3|Rd|imm8|
+ // imm32 = zext imm4:i:imm3:imm8
+ // MOVT(T1): |11110|i|10|1|1|0|0|imm4|0|imm3|Rd|imm8|
+ // imm16 = imm4:i:imm3:imm8
+
+ auto EncodeImmediate = [](uint8_t *Bytes, uint16_t Immediate) {
+ Bytes[0] |= ((Immediate & 0xf000) >> 12);
+ Bytes[1] |= ((Immediate & 0x0800) >> 11);
+ Bytes[2] |= ((Immediate & 0x00ff) >> 0);
+ Bytes[3] |= (((Immediate & 0x0700) >> 8) << 4);
+ };
+
+ EncodeImmediate(&Target[0],
+ (static_cast<uint32_t>(Result) >> 00) | ISASelectionBit);
+ EncodeImmediate(&Target[4], static_cast<uint32_t>(Result) >> 16);
+
+ break;
+ }
+ case COFF::IMAGE_REL_ARM_BRANCH20T: {
+ // The most significant 20-bits of the signed 21-bit relative displacement
+ uint64_t Value =
+ RE.Addend - (Sections[RE.SectionID].getLoadAddress() + RE.Offset) - 4;
+ assert(static_cast<int64_t>(RE.Addend) <= INT32_MAX &&
+ "relocation overflow");
+ assert(static_cast<int64_t>(RE.Addend) >= INT32_MIN &&
+ "relocation underflow");
+ LLVM_DEBUG(dbgs() << "\t\tOffset: " << RE.Offset
+ << " RelType: IMAGE_REL_ARM_BRANCH20T"
+ << " Value: " << static_cast<int32_t>(Value) << '\n');
+ static_cast<void>(Value);
+ llvm_unreachable("unimplemented relocation");
+ break;
+ }
+ case COFF::IMAGE_REL_ARM_BRANCH24T: {
+ // The most significant 24-bits of the signed 25-bit relative displacement
+ uint64_t Value =
+ RE.Addend - (Sections[RE.SectionID].getLoadAddress() + RE.Offset) - 4;
+ assert(static_cast<int64_t>(RE.Addend) <= INT32_MAX &&
+ "relocation overflow");
+ assert(static_cast<int64_t>(RE.Addend) >= INT32_MIN &&
+ "relocation underflow");
+ LLVM_DEBUG(dbgs() << "\t\tOffset: " << RE.Offset
+ << " RelType: IMAGE_REL_ARM_BRANCH24T"
+ << " Value: " << static_cast<int32_t>(Value) << '\n');
+ static_cast<void>(Value);
+ llvm_unreachable("unimplemented relocation");
+ break;
+ }
+ case COFF::IMAGE_REL_ARM_BLX23T: {
+ // The most significant 24-bits of the signed 25-bit relative displacement
+ uint64_t Value =
+ RE.Addend - (Sections[RE.SectionID].getLoadAddress() + RE.Offset) - 4;
+ assert(static_cast<int64_t>(RE.Addend) <= INT32_MAX &&
+ "relocation overflow");
+ assert(static_cast<int64_t>(RE.Addend) >= INT32_MIN &&
+ "relocation underflow");
+ LLVM_DEBUG(dbgs() << "\t\tOffset: " << RE.Offset
+ << " RelType: IMAGE_REL_ARM_BLX23T"
+ << " Value: " << static_cast<int32_t>(Value) << '\n');
+ static_cast<void>(Value);
+ llvm_unreachable("unimplemented relocation");
+ break;
+ }
+ }
+ }
+
+ void registerEHFrames() override {}
+};
+
+}
+
+#endif
diff --git a/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldCOFFX86_64.h b/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldCOFFX86_64.h
new file mode 100644
index 0000000000000..dc4af08583de7
--- /dev/null
+++ b/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldCOFFX86_64.h
@@ -0,0 +1,305 @@
+//===-- RuntimeDyldCOFFX86_64.h --- COFF/X86_64 specific code ---*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// COFF x86_x64 support for MC-JIT runtime dynamic linker.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_EXECUTIONENGINE_RUNTIMEDYLD_TARGETS_RUNTIMEDYLDCOFF86_64_H
+#define LLVM_LIB_EXECUTIONENGINE_RUNTIMEDYLD_TARGETS_RUNTIMEDYLDCOFF86_64_H
+
+#include "../RuntimeDyldCOFF.h"
+#include "llvm/BinaryFormat/COFF.h"
+#include "llvm/Object/COFF.h"
+
+#define DEBUG_TYPE "dyld"
+
+namespace llvm {
+
+class RuntimeDyldCOFFX86_64 : public RuntimeDyldCOFF {
+
+private:
+ // When a module is loaded we save the SectionID of the unwind
+ // sections in a table until we receive a request to register all
+ // unregisteredEH frame sections with the memory manager.
+ SmallVector<SID, 2> UnregisteredEHFrameSections;
+ SmallVector<SID, 2> RegisteredEHFrameSections;
+ uint64_t ImageBase;
+
+ // Fake an __ImageBase pointer by returning the section with the lowest adress
+ uint64_t getImageBase() {
+ if (!ImageBase) {
+ ImageBase = std::numeric_limits<uint64_t>::max();
+ for (const SectionEntry &Section : Sections)
+ // The Sections list may contain sections that weren't loaded for
+ // whatever reason: they may be debug sections, and ProcessAllSections
+ // is false, or they may be sections that contain 0 bytes. If the
+ // section isn't loaded, the load address will be 0, and it should not
+ // be included in the ImageBase calculation.
+ if (Section.getLoadAddress() != 0)
+ ImageBase = std::min(ImageBase, Section.getLoadAddress());
+ }
+ return ImageBase;
+ }
+
+ void write32BitOffset(uint8_t *Target, int64_t Addend, uint64_t Delta) {
+ uint64_t Result = Addend + Delta;
+ assert(Result <= UINT32_MAX && "Relocation overflow");
+ writeBytesUnaligned(Result, Target, 4);
+ }
+
+public:
+ RuntimeDyldCOFFX86_64(RuntimeDyld::MemoryManager &MM,
+ JITSymbolResolver &Resolver)
+ : RuntimeDyldCOFF(MM, Resolver), ImageBase(0) {}
+
+ unsigned getStubAlignment() override { return 1; }
+
+ // 2-byte jmp instruction + 32-bit relative address + 64-bit absolute jump
+ unsigned getMaxStubSize() const override { return 14; }
+
+ // The target location for the relocation is described by RE.SectionID and
+ // RE.Offset. RE.SectionID can be used to find the SectionEntry. Each
+ // SectionEntry has three members describing its location.
+ // SectionEntry::Address is the address at which the section has been loaded
+ // into memory in the current (host) process. SectionEntry::LoadAddress is
+ // the address that the section will have in the target process.
+ // SectionEntry::ObjAddress is the address of the bits for this section in the
+ // original emitted object image (also in the current address space).
+ //
+ // Relocations will be applied as if the section were loaded at
+ // SectionEntry::LoadAddress, but they will be applied at an address based
+ // on SectionEntry::Address. SectionEntry::ObjAddress will be used to refer
+ // to Target memory contents if they are required for value calculations.
+ //
+ // The Value parameter here is the load address of the symbol for the
+ // relocation to be applied. For relocations which refer to symbols in the
+ // current object Value will be the LoadAddress of the section in which
+ // the symbol resides (RE.Addend provides additional information about the
+ // symbol location). For external symbols, Value will be the address of the
+ // symbol in the target address space.
+ void resolveRelocation(const RelocationEntry &RE, uint64_t Value) override {
+ const SectionEntry &Section = Sections[RE.SectionID];
+ uint8_t *Target = Section.getAddressWithOffset(RE.Offset);
+
+ switch (RE.RelType) {
+
+ case COFF::IMAGE_REL_AMD64_REL32:
+ case COFF::IMAGE_REL_AMD64_REL32_1:
+ case COFF::IMAGE_REL_AMD64_REL32_2:
+ case COFF::IMAGE_REL_AMD64_REL32_3:
+ case COFF::IMAGE_REL_AMD64_REL32_4:
+ case COFF::IMAGE_REL_AMD64_REL32_5: {
+ uint64_t FinalAddress = Section.getLoadAddressWithOffset(RE.Offset);
+ // Delta is the distance from the start of the reloc to the end of the
+ // instruction with the reloc.
+ uint64_t Delta = 4 + (RE.RelType - COFF::IMAGE_REL_AMD64_REL32);
+ Value -= FinalAddress + Delta;
+ uint64_t Result = Value + RE.Addend;
+ assert(((int64_t)Result <= INT32_MAX) && "Relocation overflow");
+ assert(((int64_t)Result >= INT32_MIN) && "Relocation underflow");
+ writeBytesUnaligned(Result, Target, 4);
+ break;
+ }
+
+ case COFF::IMAGE_REL_AMD64_ADDR32NB: {
+ // ADDR32NB requires an offset less than 2GB from 'ImageBase'.
+ // The MemoryManager can make sure this is always true by forcing the
+ // memory layout to be: CodeSection < ReadOnlySection < ReadWriteSection.
+ const uint64_t ImageBase = getImageBase();
+ if (Value < ImageBase || ((Value - ImageBase) > UINT32_MAX)) {
+ llvm::errs() << "IMAGE_REL_AMD64_ADDR32NB relocation requires an"
+ << "ordered section layout.\n";
+ write32BitOffset(Target, 0, 0);
+ } else {
+ write32BitOffset(Target, RE.Addend, Value - ImageBase);
+ }
+ break;
+ }
+
+ case COFF::IMAGE_REL_AMD64_ADDR64: {
+ writeBytesUnaligned(Value + RE.Addend, Target, 8);
+ break;
+ }
+
+ case COFF::IMAGE_REL_AMD64_SECREL: {
+ assert(static_cast<int64_t>(RE.Addend) <= INT32_MAX && "Relocation overflow");
+ assert(static_cast<int64_t>(RE.Addend) >= INT32_MIN && "Relocation underflow");
+ writeBytesUnaligned(RE.Addend, Target, 4);
+ break;
+ }
+
+ default:
+ llvm_unreachable("Relocation type not implemented yet!");
+ break;
+ }
+ }
+
+ std::tuple<uint64_t, uint64_t, uint64_t>
+ generateRelocationStub(unsigned SectionID, StringRef TargetName,
+ uint64_t Offset, uint64_t RelType, uint64_t Addend,
+ StubMap &Stubs) {
+ uintptr_t StubOffset;
+ SectionEntry &Section = Sections[SectionID];
+
+ RelocationValueRef OriginalRelValueRef;
+ OriginalRelValueRef.SectionID = SectionID;
+ OriginalRelValueRef.Offset = Offset;
+ OriginalRelValueRef.Addend = Addend;
+ OriginalRelValueRef.SymbolName = TargetName.data();
+
+ auto Stub = Stubs.find(OriginalRelValueRef);
+ if (Stub == Stubs.end()) {
+ LLVM_DEBUG(dbgs() << " Create a new stub function for "
+ << TargetName.data() << "\n");
+
+ StubOffset = Section.getStubOffset();
+ Stubs[OriginalRelValueRef] = StubOffset;
+ createStubFunction(Section.getAddressWithOffset(StubOffset));
+ Section.advanceStubOffset(getMaxStubSize());
+ } else {
+ LLVM_DEBUG(dbgs() << " Stub function found for " << TargetName.data()
+ << "\n");
+ StubOffset = Stub->second;
+ }
+
+ // FIXME: If RelType == COFF::IMAGE_REL_AMD64_ADDR32NB we should be able
+ // to ignore the __ImageBase requirement and just forward to the stub
+ // directly as an offset of this section:
+ // write32BitOffset(Section.getAddressWithOffset(Offset), 0, StubOffset);
+ // .xdata exception handler's aren't having this though.
+
+ // Resolve original relocation to stub function.
+ const RelocationEntry RE(SectionID, Offset, RelType, Addend);
+ resolveRelocation(RE, Section.getLoadAddressWithOffset(StubOffset));
+
+ // adjust relocation info so resolution writes to the stub function
+ Addend = 0;
+ Offset = StubOffset + 6;
+ RelType = COFF::IMAGE_REL_AMD64_ADDR64;
+
+ return std::make_tuple(Offset, RelType, Addend);
+ }
+
+ Expected<object::relocation_iterator>
+ processRelocationRef(unsigned SectionID,
+ object::relocation_iterator RelI,
+ const object::ObjectFile &Obj,
+ ObjSectionToIDMap &ObjSectionToID,
+ StubMap &Stubs) override {
+ // If possible, find the symbol referred to in the relocation,
+ // and the section that contains it.
+ object::symbol_iterator Symbol = RelI->getSymbol();
+ if (Symbol == Obj.symbol_end())
+ report_fatal_error("Unknown symbol in relocation");
+ auto SectionOrError = Symbol->getSection();
+ if (!SectionOrError)
+ return SectionOrError.takeError();
+ object::section_iterator SecI = *SectionOrError;
+ // If there is no section, this must be an external reference.
+ const bool IsExtern = SecI == Obj.section_end();
+
+ // Determine the Addend used to adjust the relocation value.
+ uint64_t RelType = RelI->getType();
+ uint64_t Offset = RelI->getOffset();
+ uint64_t Addend = 0;
+ SectionEntry &Section = Sections[SectionID];
+ uintptr_t ObjTarget = Section.getObjAddress() + Offset;
+
+ Expected<StringRef> TargetNameOrErr = Symbol->getName();
+ if (!TargetNameOrErr)
+ return TargetNameOrErr.takeError();
+ StringRef TargetName = *TargetNameOrErr;
+
+ switch (RelType) {
+
+ case COFF::IMAGE_REL_AMD64_REL32:
+ case COFF::IMAGE_REL_AMD64_REL32_1:
+ case COFF::IMAGE_REL_AMD64_REL32_2:
+ case COFF::IMAGE_REL_AMD64_REL32_3:
+ case COFF::IMAGE_REL_AMD64_REL32_4:
+ case COFF::IMAGE_REL_AMD64_REL32_5:
+ case COFF::IMAGE_REL_AMD64_ADDR32NB: {
+ uint8_t *Displacement = (uint8_t *)ObjTarget;
+ Addend = readBytesUnaligned(Displacement, 4);
+
+ if (IsExtern)
+ std::tie(Offset, RelType, Addend) = generateRelocationStub(
+ SectionID, TargetName, Offset, RelType, Addend, Stubs);
+
+ break;
+ }
+
+ case COFF::IMAGE_REL_AMD64_ADDR64: {
+ uint8_t *Displacement = (uint8_t *)ObjTarget;
+ Addend = readBytesUnaligned(Displacement, 8);
+ break;
+ }
+
+ default:
+ break;
+ }
+
+ LLVM_DEBUG(dbgs() << "\t\tIn Section " << SectionID << " Offset " << Offset
+ << " RelType: " << RelType << " TargetName: "
+ << TargetName << " Addend " << Addend << "\n");
+
+ if (IsExtern) {
+ RelocationEntry RE(SectionID, Offset, RelType, Addend);
+ addRelocationForSymbol(RE, TargetName);
+ } else {
+ bool IsCode = SecI->isText();
+ unsigned TargetSectionID;
+ if (auto TargetSectionIDOrErr =
+ findOrEmitSection(Obj, *SecI, IsCode, ObjSectionToID))
+ TargetSectionID = *TargetSectionIDOrErr;
+ else
+ return TargetSectionIDOrErr.takeError();
+ uint64_t TargetOffset = getSymbolOffset(*Symbol);
+ RelocationEntry RE(SectionID, Offset, RelType, TargetOffset + Addend);
+ addRelocationForSection(RE, TargetSectionID);
+ }
+
+ return ++RelI;
+ }
+
+ void registerEHFrames() override {
+ for (auto const &EHFrameSID : UnregisteredEHFrameSections) {
+ uint8_t *EHFrameAddr = Sections[EHFrameSID].getAddress();
+ uint64_t EHFrameLoadAddr = Sections[EHFrameSID].getLoadAddress();
+ size_t EHFrameSize = Sections[EHFrameSID].getSize();
+ MemMgr.registerEHFrames(EHFrameAddr, EHFrameLoadAddr, EHFrameSize);
+ RegisteredEHFrameSections.push_back(EHFrameSID);
+ }
+ UnregisteredEHFrameSections.clear();
+ }
+
+ Error finalizeLoad(const object::ObjectFile &Obj,
+ ObjSectionToIDMap &SectionMap) override {
+ // Look for and record the EH frame section IDs.
+ for (const auto &SectionPair : SectionMap) {
+ const object::SectionRef &Section = SectionPair.first;
+ Expected<StringRef> NameOrErr = Section.getName();
+ if (!NameOrErr)
+ return NameOrErr.takeError();
+
+ // Note unwind info is stored in .pdata but often points to .xdata
+ // with an IMAGE_REL_AMD64_ADDR32NB relocation. Using a memory manager
+ // that keeps sections ordered in relation to __ImageBase is necessary.
+ if ((*NameOrErr) == ".pdata")
+ UnregisteredEHFrameSections.push_back(SectionPair.second);
+ }
+ return Error::success();
+ }
+};
+
+} // end namespace llvm
+
+#undef DEBUG_TYPE
+
+#endif
diff --git a/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldELFMips.cpp b/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldELFMips.cpp
new file mode 100644
index 0000000000000..17cbe612fb43e
--- /dev/null
+++ b/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldELFMips.cpp
@@ -0,0 +1,320 @@
+//===-- RuntimeDyldELFMips.cpp ---- ELF/Mips specific code. -----*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "RuntimeDyldELFMips.h"
+#include "llvm/BinaryFormat/ELF.h"
+
+#define DEBUG_TYPE "dyld"
+
+void RuntimeDyldELFMips::resolveRelocation(const RelocationEntry &RE,
+ uint64_t Value) {
+ const SectionEntry &Section = Sections[RE.SectionID];
+ if (IsMipsO32ABI)
+ resolveMIPSO32Relocation(Section, RE.Offset, Value, RE.RelType, RE.Addend);
+ else if (IsMipsN32ABI) {
+ resolveMIPSN32Relocation(Section, RE.Offset, Value, RE.RelType, RE.Addend,
+ RE.SymOffset, RE.SectionID);
+ } else if (IsMipsN64ABI)
+ resolveMIPSN64Relocation(Section, RE.Offset, Value, RE.RelType, RE.Addend,
+ RE.SymOffset, RE.SectionID);
+ else
+ llvm_unreachable("Mips ABI not handled");
+}
+
+uint64_t RuntimeDyldELFMips::evaluateRelocation(const RelocationEntry &RE,
+ uint64_t Value,
+ uint64_t Addend) {
+ if (IsMipsN32ABI) {
+ const SectionEntry &Section = Sections[RE.SectionID];
+ Value = evaluateMIPS64Relocation(Section, RE.Offset, Value, RE.RelType,
+ Addend, RE.SymOffset, RE.SectionID);
+ return Value;
+ }
+ llvm_unreachable("Not reachable");
+}
+
+void RuntimeDyldELFMips::applyRelocation(const RelocationEntry &RE,
+ uint64_t Value) {
+ if (IsMipsN32ABI) {
+ const SectionEntry &Section = Sections[RE.SectionID];
+ applyMIPSRelocation(Section.getAddressWithOffset(RE.Offset), Value,
+ RE.RelType);
+ return;
+ }
+ llvm_unreachable("Not reachable");
+}
+
+int64_t
+RuntimeDyldELFMips::evaluateMIPS32Relocation(const SectionEntry &Section,
+ uint64_t Offset, uint64_t Value,
+ uint32_t Type) {
+
+ LLVM_DEBUG(dbgs() << "evaluateMIPS32Relocation, LocalAddress: 0x"
+ << format("%llx", Section.getAddressWithOffset(Offset))
+ << " FinalAddress: 0x"
+ << format("%llx", Section.getLoadAddressWithOffset(Offset))
+ << " Value: 0x" << format("%llx", Value) << " Type: 0x"
+ << format("%x", Type) << "\n");
+
+ switch (Type) {
+ default:
+ llvm_unreachable("Unknown relocation type!");
+ return Value;
+ case ELF::R_MIPS_32:
+ return Value;
+ case ELF::R_MIPS_26:
+ return Value >> 2;
+ case ELF::R_MIPS_HI16:
+ // Get the higher 16-bits. Also add 1 if bit 15 is 1.
+ return (Value + 0x8000) >> 16;
+ case ELF::R_MIPS_LO16:
+ return Value;
+ case ELF::R_MIPS_PC32: {
+ uint32_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
+ return Value - FinalAddress;
+ }
+ case ELF::R_MIPS_PC16: {
+ uint32_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
+ return (Value - FinalAddress) >> 2;
+ }
+ case ELF::R_MIPS_PC19_S2: {
+ uint32_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
+ return (Value - (FinalAddress & ~0x3)) >> 2;
+ }
+ case ELF::R_MIPS_PC21_S2: {
+ uint32_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
+ return (Value - FinalAddress) >> 2;
+ }
+ case ELF::R_MIPS_PC26_S2: {
+ uint32_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
+ return (Value - FinalAddress) >> 2;
+ }
+ case ELF::R_MIPS_PCHI16: {
+ uint32_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
+ return (Value - FinalAddress + 0x8000) >> 16;
+ }
+ case ELF::R_MIPS_PCLO16: {
+ uint32_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
+ return Value - FinalAddress;
+ }
+ }
+}
+
+int64_t RuntimeDyldELFMips::evaluateMIPS64Relocation(
+ const SectionEntry &Section, uint64_t Offset, uint64_t Value, uint32_t Type,
+ int64_t Addend, uint64_t SymOffset, SID SectionID) {
+
+ LLVM_DEBUG(dbgs() << "evaluateMIPS64Relocation, LocalAddress: 0x"
+ << format("%llx", Section.getAddressWithOffset(Offset))
+ << " FinalAddress: 0x"
+ << format("%llx", Section.getLoadAddressWithOffset(Offset))
+ << " Value: 0x" << format("%llx", Value) << " Type: 0x"
+ << format("%x", Type) << " Addend: 0x"
+ << format("%llx", Addend)
+ << " Offset: " << format("%llx" PRIx64, Offset)
+ << " SID: " << format("%d", SectionID)
+ << " SymOffset: " << format("%x", SymOffset) << "\n");
+
+ switch (Type) {
+ default:
+ llvm_unreachable("Not implemented relocation type!");
+ break;
+ case ELF::R_MIPS_JALR:
+ case ELF::R_MIPS_NONE:
+ break;
+ case ELF::R_MIPS_32:
+ case ELF::R_MIPS_64:
+ return Value + Addend;
+ case ELF::R_MIPS_26:
+ return ((Value + Addend) >> 2) & 0x3ffffff;
+ case ELF::R_MIPS_GPREL16: {
+ uint64_t GOTAddr = getSectionLoadAddress(SectionToGOTMap[SectionID]);
+ return Value + Addend - (GOTAddr + 0x7ff0);
+ }
+ case ELF::R_MIPS_SUB:
+ return Value - Addend;
+ case ELF::R_MIPS_HI16:
+ // Get the higher 16-bits. Also add 1 if bit 15 is 1.
+ return ((Value + Addend + 0x8000) >> 16) & 0xffff;
+ case ELF::R_MIPS_LO16:
+ return (Value + Addend) & 0xffff;
+ case ELF::R_MIPS_HIGHER:
+ return ((Value + Addend + 0x80008000) >> 32) & 0xffff;
+ case ELF::R_MIPS_HIGHEST:
+ return ((Value + Addend + 0x800080008000) >> 48) & 0xffff;
+ case ELF::R_MIPS_CALL16:
+ case ELF::R_MIPS_GOT_DISP:
+ case ELF::R_MIPS_GOT_PAGE: {
+ uint8_t *LocalGOTAddr =
+ getSectionAddress(SectionToGOTMap[SectionID]) + SymOffset;
+ uint64_t GOTEntry = readBytesUnaligned(LocalGOTAddr, getGOTEntrySize());
+
+ Value += Addend;
+ if (Type == ELF::R_MIPS_GOT_PAGE)
+ Value = (Value + 0x8000) & ~0xffff;
+
+ if (GOTEntry)
+ assert(GOTEntry == Value &&
+ "GOT entry has two different addresses.");
+ else
+ writeBytesUnaligned(Value, LocalGOTAddr, getGOTEntrySize());
+
+ return (SymOffset - 0x7ff0) & 0xffff;
+ }
+ case ELF::R_MIPS_GOT_OFST: {
+ int64_t page = (Value + Addend + 0x8000) & ~0xffff;
+ return (Value + Addend - page) & 0xffff;
+ }
+ case ELF::R_MIPS_GPREL32: {
+ uint64_t GOTAddr = getSectionLoadAddress(SectionToGOTMap[SectionID]);
+ return Value + Addend - (GOTAddr + 0x7ff0);
+ }
+ case ELF::R_MIPS_PC16: {
+ uint64_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
+ return ((Value + Addend - FinalAddress) >> 2) & 0xffff;
+ }
+ case ELF::R_MIPS_PC32: {
+ uint64_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
+ return Value + Addend - FinalAddress;
+ }
+ case ELF::R_MIPS_PC18_S3: {
+ uint64_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
+ return ((Value + Addend - (FinalAddress & ~0x7)) >> 3) & 0x3ffff;
+ }
+ case ELF::R_MIPS_PC19_S2: {
+ uint64_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
+ return ((Value + Addend - (FinalAddress & ~0x3)) >> 2) & 0x7ffff;
+ }
+ case ELF::R_MIPS_PC21_S2: {
+ uint64_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
+ return ((Value + Addend - FinalAddress) >> 2) & 0x1fffff;
+ }
+ case ELF::R_MIPS_PC26_S2: {
+ uint64_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
+ return ((Value + Addend - FinalAddress) >> 2) & 0x3ffffff;
+ }
+ case ELF::R_MIPS_PCHI16: {
+ uint64_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
+ return ((Value + Addend - FinalAddress + 0x8000) >> 16) & 0xffff;
+ }
+ case ELF::R_MIPS_PCLO16: {
+ uint64_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
+ return (Value + Addend - FinalAddress) & 0xffff;
+ }
+ }
+ return 0;
+}
+
+void RuntimeDyldELFMips::applyMIPSRelocation(uint8_t *TargetPtr, int64_t Value,
+ uint32_t Type) {
+ uint32_t Insn = readBytesUnaligned(TargetPtr, 4);
+
+ switch (Type) {
+ default:
+ llvm_unreachable("Unknown relocation type!");
+ break;
+ case ELF::R_MIPS_GPREL16:
+ case ELF::R_MIPS_HI16:
+ case ELF::R_MIPS_LO16:
+ case ELF::R_MIPS_HIGHER:
+ case ELF::R_MIPS_HIGHEST:
+ case ELF::R_MIPS_PC16:
+ case ELF::R_MIPS_PCHI16:
+ case ELF::R_MIPS_PCLO16:
+ case ELF::R_MIPS_CALL16:
+ case ELF::R_MIPS_GOT_DISP:
+ case ELF::R_MIPS_GOT_PAGE:
+ case ELF::R_MIPS_GOT_OFST:
+ Insn = (Insn & 0xffff0000) | (Value & 0x0000ffff);
+ writeBytesUnaligned(Insn, TargetPtr, 4);
+ break;
+ case ELF::R_MIPS_PC18_S3:
+ Insn = (Insn & 0xfffc0000) | (Value & 0x0003ffff);
+ writeBytesUnaligned(Insn, TargetPtr, 4);
+ break;
+ case ELF::R_MIPS_PC19_S2:
+ Insn = (Insn & 0xfff80000) | (Value & 0x0007ffff);
+ writeBytesUnaligned(Insn, TargetPtr, 4);
+ break;
+ case ELF::R_MIPS_PC21_S2:
+ Insn = (Insn & 0xffe00000) | (Value & 0x001fffff);
+ writeBytesUnaligned(Insn, TargetPtr, 4);
+ break;
+ case ELF::R_MIPS_26:
+ case ELF::R_MIPS_PC26_S2:
+ Insn = (Insn & 0xfc000000) | (Value & 0x03ffffff);
+ writeBytesUnaligned(Insn, TargetPtr, 4);
+ break;
+ case ELF::R_MIPS_32:
+ case ELF::R_MIPS_GPREL32:
+ case ELF::R_MIPS_PC32:
+ writeBytesUnaligned(Value & 0xffffffff, TargetPtr, 4);
+ break;
+ case ELF::R_MIPS_64:
+ case ELF::R_MIPS_SUB:
+ writeBytesUnaligned(Value, TargetPtr, 8);
+ break;
+ }
+}
+
+void RuntimeDyldELFMips::resolveMIPSN32Relocation(
+ const SectionEntry &Section, uint64_t Offset, uint64_t Value, uint32_t Type,
+ int64_t Addend, uint64_t SymOffset, SID SectionID) {
+ int64_t CalculatedValue = evaluateMIPS64Relocation(
+ Section, Offset, Value, Type, Addend, SymOffset, SectionID);
+ applyMIPSRelocation(Section.getAddressWithOffset(Offset), CalculatedValue,
+ Type);
+}
+
+void RuntimeDyldELFMips::resolveMIPSN64Relocation(
+ const SectionEntry &Section, uint64_t Offset, uint64_t Value, uint32_t Type,
+ int64_t Addend, uint64_t SymOffset, SID SectionID) {
+ uint32_t r_type = Type & 0xff;
+ uint32_t r_type2 = (Type >> 8) & 0xff;
+ uint32_t r_type3 = (Type >> 16) & 0xff;
+
+ // RelType is used to keep information for which relocation type we are
+ // applying relocation.
+ uint32_t RelType = r_type;
+ int64_t CalculatedValue = evaluateMIPS64Relocation(Section, Offset, Value,
+ RelType, Addend,
+ SymOffset, SectionID);
+ if (r_type2 != ELF::R_MIPS_NONE) {
+ RelType = r_type2;
+ CalculatedValue = evaluateMIPS64Relocation(Section, Offset, 0, RelType,
+ CalculatedValue, SymOffset,
+ SectionID);
+ }
+ if (r_type3 != ELF::R_MIPS_NONE) {
+ RelType = r_type3;
+ CalculatedValue = evaluateMIPS64Relocation(Section, Offset, 0, RelType,
+ CalculatedValue, SymOffset,
+ SectionID);
+ }
+ applyMIPSRelocation(Section.getAddressWithOffset(Offset), CalculatedValue,
+ RelType);
+}
+
+void RuntimeDyldELFMips::resolveMIPSO32Relocation(const SectionEntry &Section,
+ uint64_t Offset,
+ uint32_t Value, uint32_t Type,
+ int32_t Addend) {
+ uint8_t *TargetPtr = Section.getAddressWithOffset(Offset);
+ Value += Addend;
+
+ LLVM_DEBUG(dbgs() << "resolveMIPSO32Relocation, LocalAddress: "
+ << Section.getAddressWithOffset(Offset) << " FinalAddress: "
+ << format("%p", Section.getLoadAddressWithOffset(Offset))
+ << " Value: " << format("%x", Value) << " Type: "
+ << format("%x", Type) << " Addend: " << format("%x", Addend)
+ << " SymOffset: " << format("%x", Offset) << "\n");
+
+ Value = evaluateMIPS32Relocation(Section, Offset, Value, Type);
+
+ applyMIPSRelocation(TargetPtr, Value, Type);
+}
diff --git a/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldELFMips.h b/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldELFMips.h
new file mode 100644
index 0000000000000..14fb36f070f8e
--- /dev/null
+++ b/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldELFMips.h
@@ -0,0 +1,67 @@
+//===-- RuntimeDyldELFMips.h ---- ELF/Mips specific code. -------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_EXECUTIONENGINE_RUNTIMEDYLD_TARGETS_RUNTIMEDYLDELFMIPS_H
+#define LLVM_LIB_EXECUTIONENGINE_RUNTIMEDYLD_TARGETS_RUNTIMEDYLDELFMIPS_H
+
+#include "../RuntimeDyldELF.h"
+#include <string>
+
+#define DEBUG_TYPE "dyld"
+
+namespace llvm {
+
+class RuntimeDyldELFMips : public RuntimeDyldELF {
+public:
+
+ typedef uint64_t TargetPtrT;
+
+ RuntimeDyldELFMips(RuntimeDyld::MemoryManager &MM,
+ JITSymbolResolver &Resolver)
+ : RuntimeDyldELF(MM, Resolver) {}
+
+ void resolveRelocation(const RelocationEntry &RE, uint64_t Value) override;
+
+protected:
+ void resolveMIPSO32Relocation(const SectionEntry &Section, uint64_t Offset,
+ uint32_t Value, uint32_t Type, int32_t Addend);
+ void resolveMIPSN32Relocation(const SectionEntry &Section, uint64_t Offset,
+ uint64_t Value, uint32_t Type, int64_t Addend,
+ uint64_t SymOffset, SID SectionID);
+ void resolveMIPSN64Relocation(const SectionEntry &Section, uint64_t Offset,
+ uint64_t Value, uint32_t Type, int64_t Addend,
+ uint64_t SymOffset, SID SectionID);
+
+private:
+ /// A object file specific relocation resolver
+ /// \param RE The relocation to be resolved
+ /// \param Value Target symbol address to apply the relocation action
+ uint64_t evaluateRelocation(const RelocationEntry &RE, uint64_t Value,
+ uint64_t Addend);
+
+ /// A object file specific relocation resolver
+ /// \param RE The relocation to be resolved
+ /// \param Value Target symbol address to apply the relocation action
+ void applyRelocation(const RelocationEntry &RE, uint64_t Value);
+
+ int64_t evaluateMIPS32Relocation(const SectionEntry &Section, uint64_t Offset,
+ uint64_t Value, uint32_t Type);
+ int64_t evaluateMIPS64Relocation(const SectionEntry &Section,
+ uint64_t Offset, uint64_t Value,
+ uint32_t Type, int64_t Addend,
+ uint64_t SymOffset, SID SectionID);
+
+ void applyMIPSRelocation(uint8_t *TargetPtr, int64_t CalculatedValue,
+ uint32_t Type);
+
+};
+}
+
+#undef DEBUG_TYPE
+
+#endif
diff --git a/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOAArch64.h b/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOAArch64.h
new file mode 100644
index 0000000000000..f2ee1b06d4943
--- /dev/null
+++ b/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOAArch64.h
@@ -0,0 +1,541 @@
+//===-- RuntimeDyldMachOAArch64.h -- MachO/AArch64 specific code. -*- C++ -*-=//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_EXECUTIONENGINE_RUNTIMEDYLD_TARGETS_RUNTIMEDYLDMACHOAARCH64_H
+#define LLVM_LIB_EXECUTIONENGINE_RUNTIMEDYLD_TARGETS_RUNTIMEDYLDMACHOAARCH64_H
+
+#include "../RuntimeDyldMachO.h"
+#include "llvm/Support/Endian.h"
+
+#define DEBUG_TYPE "dyld"
+
+namespace llvm {
+
+class RuntimeDyldMachOAArch64
+ : public RuntimeDyldMachOCRTPBase<RuntimeDyldMachOAArch64> {
+public:
+
+ typedef uint64_t TargetPtrT;
+
+ RuntimeDyldMachOAArch64(RuntimeDyld::MemoryManager &MM,
+ JITSymbolResolver &Resolver)
+ : RuntimeDyldMachOCRTPBase(MM, Resolver) {}
+
+ unsigned getMaxStubSize() const override { return 8; }
+
+ unsigned getStubAlignment() override { return 8; }
+
+ /// Extract the addend encoded in the instruction / memory location.
+ Expected<int64_t> decodeAddend(const RelocationEntry &RE) const {
+ const SectionEntry &Section = Sections[RE.SectionID];
+ uint8_t *LocalAddress = Section.getAddressWithOffset(RE.Offset);
+ unsigned NumBytes = 1 << RE.Size;
+ int64_t Addend = 0;
+ // Verify that the relocation has the correct size and alignment.
+ switch (RE.RelType) {
+ default: {
+ std::string ErrMsg;
+ {
+ raw_string_ostream ErrStream(ErrMsg);
+ ErrStream << "Unsupported relocation type: "
+ << getRelocName(RE.RelType);
+ }
+ return make_error<StringError>(std::move(ErrMsg),
+ inconvertibleErrorCode());
+ }
+ case MachO::ARM64_RELOC_POINTER_TO_GOT:
+ case MachO::ARM64_RELOC_UNSIGNED: {
+ if (NumBytes != 4 && NumBytes != 8) {
+ std::string ErrMsg;
+ {
+ raw_string_ostream ErrStream(ErrMsg);
+ ErrStream << "Invalid relocation size for relocation "
+ << getRelocName(RE.RelType);
+ }
+ return make_error<StringError>(std::move(ErrMsg),
+ inconvertibleErrorCode());
+ }
+ break;
+ }
+ case MachO::ARM64_RELOC_BRANCH26:
+ case MachO::ARM64_RELOC_PAGE21:
+ case MachO::ARM64_RELOC_PAGEOFF12:
+ case MachO::ARM64_RELOC_GOT_LOAD_PAGE21:
+ case MachO::ARM64_RELOC_GOT_LOAD_PAGEOFF12:
+ assert(NumBytes == 4 && "Invalid relocation size.");
+ assert((((uintptr_t)LocalAddress & 0x3) == 0) &&
+ "Instruction address is not aligned to 4 bytes.");
+ break;
+ }
+
+ switch (RE.RelType) {
+ default:
+ llvm_unreachable("Unsupported relocation type!");
+ case MachO::ARM64_RELOC_POINTER_TO_GOT:
+ case MachO::ARM64_RELOC_UNSIGNED:
+ // This could be an unaligned memory location.
+ if (NumBytes == 4)
+ Addend = *reinterpret_cast<support::ulittle32_t *>(LocalAddress);
+ else
+ Addend = *reinterpret_cast<support::ulittle64_t *>(LocalAddress);
+ break;
+ case MachO::ARM64_RELOC_BRANCH26: {
+ // Verify that the relocation points to a B/BL instruction.
+ auto *p = reinterpret_cast<support::aligned_ulittle32_t *>(LocalAddress);
+ assert(((*p & 0xFC000000) == 0x14000000 ||
+ (*p & 0xFC000000) == 0x94000000) &&
+ "Expected branch instruction.");
+
+ // Get the 26 bit addend encoded in the branch instruction and sign-extend
+ // to 64 bit. The lower 2 bits are always zeros and are therefore implicit
+ // (<< 2).
+ Addend = (*p & 0x03FFFFFF) << 2;
+ Addend = SignExtend64(Addend, 28);
+ break;
+ }
+ case MachO::ARM64_RELOC_GOT_LOAD_PAGE21:
+ case MachO::ARM64_RELOC_PAGE21: {
+ // Verify that the relocation points to the expected adrp instruction.
+ auto *p = reinterpret_cast<support::aligned_ulittle32_t *>(LocalAddress);
+ assert((*p & 0x9F000000) == 0x90000000 && "Expected adrp instruction.");
+
+ // Get the 21 bit addend encoded in the adrp instruction and sign-extend
+ // to 64 bit. The lower 12 bits (4096 byte page) are always zeros and are
+ // therefore implicit (<< 12).
+ Addend = ((*p & 0x60000000) >> 29) | ((*p & 0x01FFFFE0) >> 3) << 12;
+ Addend = SignExtend64(Addend, 33);
+ break;
+ }
+ case MachO::ARM64_RELOC_GOT_LOAD_PAGEOFF12: {
+ // Verify that the relocation points to one of the expected load / store
+ // instructions.
+ auto *p = reinterpret_cast<support::aligned_ulittle32_t *>(LocalAddress);
+ (void)p;
+ assert((*p & 0x3B000000) == 0x39000000 &&
+ "Only expected load / store instructions.");
+ LLVM_FALLTHROUGH;
+ }
+ case MachO::ARM64_RELOC_PAGEOFF12: {
+ // Verify that the relocation points to one of the expected load / store
+ // or add / sub instructions.
+ auto *p = reinterpret_cast<support::aligned_ulittle32_t *>(LocalAddress);
+ assert((((*p & 0x3B000000) == 0x39000000) ||
+ ((*p & 0x11C00000) == 0x11000000) ) &&
+ "Expected load / store or add/sub instruction.");
+
+ // Get the 12 bit addend encoded in the instruction.
+ Addend = (*p & 0x003FFC00) >> 10;
+
+ // Check which instruction we are decoding to obtain the implicit shift
+ // factor of the instruction.
+ int ImplicitShift = 0;
+ if ((*p & 0x3B000000) == 0x39000000) { // << load / store
+ // For load / store instructions the size is encoded in bits 31:30.
+ ImplicitShift = ((*p >> 30) & 0x3);
+ if (ImplicitShift == 0) {
+ // Check if this a vector op to get the correct shift value.
+ if ((*p & 0x04800000) == 0x04800000)
+ ImplicitShift = 4;
+ }
+ }
+ // Compensate for implicit shift.
+ Addend <<= ImplicitShift;
+ break;
+ }
+ }
+ return Addend;
+ }
+
+ /// Extract the addend encoded in the instruction.
+ void encodeAddend(uint8_t *LocalAddress, unsigned NumBytes,
+ MachO::RelocationInfoType RelType, int64_t Addend) const {
+ // Verify that the relocation has the correct alignment.
+ switch (RelType) {
+ default:
+ llvm_unreachable("Unsupported relocation type!");
+ case MachO::ARM64_RELOC_POINTER_TO_GOT:
+ case MachO::ARM64_RELOC_UNSIGNED:
+ assert((NumBytes == 4 || NumBytes == 8) && "Invalid relocation size.");
+ break;
+ case MachO::ARM64_RELOC_BRANCH26:
+ case MachO::ARM64_RELOC_PAGE21:
+ case MachO::ARM64_RELOC_PAGEOFF12:
+ case MachO::ARM64_RELOC_GOT_LOAD_PAGE21:
+ case MachO::ARM64_RELOC_GOT_LOAD_PAGEOFF12:
+ assert(NumBytes == 4 && "Invalid relocation size.");
+ assert((((uintptr_t)LocalAddress & 0x3) == 0) &&
+ "Instruction address is not aligned to 4 bytes.");
+ break;
+ }
+
+ switch (RelType) {
+ default:
+ llvm_unreachable("Unsupported relocation type!");
+ case MachO::ARM64_RELOC_POINTER_TO_GOT:
+ case MachO::ARM64_RELOC_UNSIGNED:
+ // This could be an unaligned memory location.
+ if (NumBytes == 4)
+ *reinterpret_cast<support::ulittle32_t *>(LocalAddress) = Addend;
+ else
+ *reinterpret_cast<support::ulittle64_t *>(LocalAddress) = Addend;
+ break;
+ case MachO::ARM64_RELOC_BRANCH26: {
+ auto *p = reinterpret_cast<support::aligned_ulittle32_t *>(LocalAddress);
+ // Verify that the relocation points to the expected branch instruction.
+ assert(((*p & 0xFC000000) == 0x14000000 ||
+ (*p & 0xFC000000) == 0x94000000) &&
+ "Expected branch instruction.");
+
+ // Verify addend value.
+ assert((Addend & 0x3) == 0 && "Branch target is not aligned");
+ assert(isInt<28>(Addend) && "Branch target is out of range.");
+
+ // Encode the addend as 26 bit immediate in the branch instruction.
+ *p = (*p & 0xFC000000) | ((uint32_t)(Addend >> 2) & 0x03FFFFFF);
+ break;
+ }
+ case MachO::ARM64_RELOC_GOT_LOAD_PAGE21:
+ case MachO::ARM64_RELOC_PAGE21: {
+ // Verify that the relocation points to the expected adrp instruction.
+ auto *p = reinterpret_cast<support::aligned_ulittle32_t *>(LocalAddress);
+ assert((*p & 0x9F000000) == 0x90000000 && "Expected adrp instruction.");
+
+ // Check that the addend fits into 21 bits (+ 12 lower bits).
+ assert((Addend & 0xFFF) == 0 && "ADRP target is not page aligned.");
+ assert(isInt<33>(Addend) && "Invalid page reloc value.");
+
+ // Encode the addend into the instruction.
+ uint32_t ImmLoValue = ((uint64_t)Addend << 17) & 0x60000000;
+ uint32_t ImmHiValue = ((uint64_t)Addend >> 9) & 0x00FFFFE0;
+ *p = (*p & 0x9F00001F) | ImmHiValue | ImmLoValue;
+ break;
+ }
+ case MachO::ARM64_RELOC_GOT_LOAD_PAGEOFF12: {
+ // Verify that the relocation points to one of the expected load / store
+ // instructions.
+ auto *p = reinterpret_cast<support::aligned_ulittle32_t *>(LocalAddress);
+ assert((*p & 0x3B000000) == 0x39000000 &&
+ "Only expected load / store instructions.");
+ (void)p;
+ LLVM_FALLTHROUGH;
+ }
+ case MachO::ARM64_RELOC_PAGEOFF12: {
+ // Verify that the relocation points to one of the expected load / store
+ // or add / sub instructions.
+ auto *p = reinterpret_cast<support::aligned_ulittle32_t *>(LocalAddress);
+ assert((((*p & 0x3B000000) == 0x39000000) ||
+ ((*p & 0x11C00000) == 0x11000000) ) &&
+ "Expected load / store or add/sub instruction.");
+
+ // Check which instruction we are decoding to obtain the implicit shift
+ // factor of the instruction and verify alignment.
+ int ImplicitShift = 0;
+ if ((*p & 0x3B000000) == 0x39000000) { // << load / store
+ // For load / store instructions the size is encoded in bits 31:30.
+ ImplicitShift = ((*p >> 30) & 0x3);
+ switch (ImplicitShift) {
+ case 0:
+ // Check if this a vector op to get the correct shift value.
+ if ((*p & 0x04800000) == 0x04800000) {
+ ImplicitShift = 4;
+ assert(((Addend & 0xF) == 0) &&
+ "128-bit LDR/STR not 16-byte aligned.");
+ }
+ break;
+ case 1:
+ assert(((Addend & 0x1) == 0) && "16-bit LDR/STR not 2-byte aligned.");
+ break;
+ case 2:
+ assert(((Addend & 0x3) == 0) && "32-bit LDR/STR not 4-byte aligned.");
+ break;
+ case 3:
+ assert(((Addend & 0x7) == 0) && "64-bit LDR/STR not 8-byte aligned.");
+ break;
+ }
+ }
+ // Compensate for implicit shift.
+ Addend >>= ImplicitShift;
+ assert(isUInt<12>(Addend) && "Addend cannot be encoded.");
+
+ // Encode the addend into the instruction.
+ *p = (*p & 0xFFC003FF) | ((uint32_t)(Addend << 10) & 0x003FFC00);
+ break;
+ }
+ }
+ }
+
+ Expected<relocation_iterator>
+ processRelocationRef(unsigned SectionID, relocation_iterator RelI,
+ const ObjectFile &BaseObjT,
+ ObjSectionToIDMap &ObjSectionToID,
+ StubMap &Stubs) override {
+ const MachOObjectFile &Obj =
+ static_cast<const MachOObjectFile &>(BaseObjT);
+ MachO::any_relocation_info RelInfo =
+ Obj.getRelocation(RelI->getRawDataRefImpl());
+
+ if (Obj.isRelocationScattered(RelInfo))
+ return make_error<RuntimeDyldError>("Scattered relocations not supported "
+ "for MachO AArch64");
+
+ // ARM64 has an ARM64_RELOC_ADDEND relocation type that carries an explicit
+ // addend for the following relocation. If found: (1) store the associated
+ // addend, (2) consume the next relocation, and (3) use the stored addend to
+ // override the addend.
+ int64_t ExplicitAddend = 0;
+ if (Obj.getAnyRelocationType(RelInfo) == MachO::ARM64_RELOC_ADDEND) {
+ assert(!Obj.getPlainRelocationExternal(RelInfo));
+ assert(!Obj.getAnyRelocationPCRel(RelInfo));
+ assert(Obj.getAnyRelocationLength(RelInfo) == 2);
+ int64_t RawAddend = Obj.getPlainRelocationSymbolNum(RelInfo);
+ // Sign-extend the 24-bit to 64-bit.
+ ExplicitAddend = SignExtend64(RawAddend, 24);
+ ++RelI;
+ RelInfo = Obj.getRelocation(RelI->getRawDataRefImpl());
+ }
+
+ if (Obj.getAnyRelocationType(RelInfo) == MachO::ARM64_RELOC_SUBTRACTOR)
+ return processSubtractRelocation(SectionID, RelI, Obj, ObjSectionToID);
+
+ RelocationEntry RE(getRelocationEntry(SectionID, Obj, RelI));
+
+ if (RE.RelType == MachO::ARM64_RELOC_POINTER_TO_GOT) {
+ bool Valid =
+ (RE.Size == 2 && RE.IsPCRel) || (RE.Size == 3 && !RE.IsPCRel);
+ if (!Valid)
+ return make_error<StringError>("ARM64_RELOC_POINTER_TO_GOT supports "
+ "32-bit pc-rel or 64-bit absolute only",
+ inconvertibleErrorCode());
+ }
+
+ if (auto Addend = decodeAddend(RE))
+ RE.Addend = *Addend;
+ else
+ return Addend.takeError();
+
+ assert((ExplicitAddend == 0 || RE.Addend == 0) && "Relocation has "\
+ "ARM64_RELOC_ADDEND and embedded addend in the instruction.");
+ if (ExplicitAddend)
+ RE.Addend = ExplicitAddend;
+
+ RelocationValueRef Value;
+ if (auto ValueOrErr = getRelocationValueRef(Obj, RelI, RE, ObjSectionToID))
+ Value = *ValueOrErr;
+ else
+ return ValueOrErr.takeError();
+
+ bool IsExtern = Obj.getPlainRelocationExternal(RelInfo);
+ if (RE.RelType == MachO::ARM64_RELOC_POINTER_TO_GOT) {
+ // We'll take care of the offset in processGOTRelocation.
+ Value.Offset = 0;
+ } else if (!IsExtern && RE.IsPCRel)
+ makeValueAddendPCRel(Value, RelI, 1 << RE.Size);
+
+ RE.Addend = Value.Offset;
+
+ if (RE.RelType == MachO::ARM64_RELOC_GOT_LOAD_PAGE21 ||
+ RE.RelType == MachO::ARM64_RELOC_GOT_LOAD_PAGEOFF12 ||
+ RE.RelType == MachO::ARM64_RELOC_POINTER_TO_GOT)
+ processGOTRelocation(RE, Value, Stubs);
+ else {
+ if (Value.SymbolName)
+ addRelocationForSymbol(RE, Value.SymbolName);
+ else
+ addRelocationForSection(RE, Value.SectionID);
+ }
+
+ return ++RelI;
+ }
+
+ void resolveRelocation(const RelocationEntry &RE, uint64_t Value) override {
+ LLVM_DEBUG(dumpRelocationToResolve(RE, Value));
+
+ const SectionEntry &Section = Sections[RE.SectionID];
+ uint8_t *LocalAddress = Section.getAddressWithOffset(RE.Offset);
+ MachO::RelocationInfoType RelType =
+ static_cast<MachO::RelocationInfoType>(RE.RelType);
+
+ switch (RelType) {
+ default:
+ llvm_unreachable("Invalid relocation type!");
+ case MachO::ARM64_RELOC_UNSIGNED: {
+ assert(!RE.IsPCRel && "PCRel and ARM64_RELOC_UNSIGNED not supported");
+ // Mask in the target value a byte at a time (we don't have an alignment
+ // guarantee for the target address, so this is safest).
+ if (RE.Size < 2)
+ llvm_unreachable("Invalid size for ARM64_RELOC_UNSIGNED");
+
+ encodeAddend(LocalAddress, 1 << RE.Size, RelType, Value + RE.Addend);
+ break;
+ }
+
+ case MachO::ARM64_RELOC_POINTER_TO_GOT: {
+ assert(((RE.Size == 2 && RE.IsPCRel) || (RE.Size == 3 && !RE.IsPCRel)) &&
+ "ARM64_RELOC_POINTER_TO_GOT only supports 32-bit pc-rel or 64-bit "
+ "absolute");
+ // Addend is the GOT entry address and RE.Offset the target of the
+ // relocation.
+ uint64_t Result =
+ RE.IsPCRel ? (RE.Addend - RE.Offset) : (Value + RE.Addend);
+ encodeAddend(LocalAddress, 1 << RE.Size, RelType, Result);
+ break;
+ }
+
+ case MachO::ARM64_RELOC_BRANCH26: {
+ assert(RE.IsPCRel && "not PCRel and ARM64_RELOC_BRANCH26 not supported");
+ // Check if branch is in range.
+ uint64_t FinalAddress = Section.getLoadAddressWithOffset(RE.Offset);
+ int64_t PCRelVal = Value - FinalAddress + RE.Addend;
+ encodeAddend(LocalAddress, /*Size=*/4, RelType, PCRelVal);
+ break;
+ }
+ case MachO::ARM64_RELOC_GOT_LOAD_PAGE21:
+ case MachO::ARM64_RELOC_PAGE21: {
+ assert(RE.IsPCRel && "not PCRel and ARM64_RELOC_PAGE21 not supported");
+ // Adjust for PC-relative relocation and offset.
+ uint64_t FinalAddress = Section.getLoadAddressWithOffset(RE.Offset);
+ int64_t PCRelVal =
+ ((Value + RE.Addend) & (-4096)) - (FinalAddress & (-4096));
+ encodeAddend(LocalAddress, /*Size=*/4, RelType, PCRelVal);
+ break;
+ }
+ case MachO::ARM64_RELOC_GOT_LOAD_PAGEOFF12:
+ case MachO::ARM64_RELOC_PAGEOFF12: {
+ assert(!RE.IsPCRel && "PCRel and ARM64_RELOC_PAGEOFF21 not supported");
+ // Add the offset from the symbol.
+ Value += RE.Addend;
+ // Mask out the page address and only use the lower 12 bits.
+ Value &= 0xFFF;
+ encodeAddend(LocalAddress, /*Size=*/4, RelType, Value);
+ break;
+ }
+ case MachO::ARM64_RELOC_SUBTRACTOR: {
+ uint64_t SectionABase = Sections[RE.Sections.SectionA].getLoadAddress();
+ uint64_t SectionBBase = Sections[RE.Sections.SectionB].getLoadAddress();
+ assert((Value == SectionABase || Value == SectionBBase) &&
+ "Unexpected SUBTRACTOR relocation value.");
+ Value = SectionABase - SectionBBase + RE.Addend;
+ writeBytesUnaligned(Value, LocalAddress, 1 << RE.Size);
+ break;
+ }
+
+ case MachO::ARM64_RELOC_TLVP_LOAD_PAGE21:
+ case MachO::ARM64_RELOC_TLVP_LOAD_PAGEOFF12:
+ llvm_unreachable("Relocation type not yet implemented!");
+ case MachO::ARM64_RELOC_ADDEND:
+ llvm_unreachable("ARM64_RELOC_ADDEND should have been handeled by "
+ "processRelocationRef!");
+ }
+ }
+
+ Error finalizeSection(const ObjectFile &Obj, unsigned SectionID,
+ const SectionRef &Section) {
+ return Error::success();
+ }
+
+private:
+ void processGOTRelocation(const RelocationEntry &RE,
+ RelocationValueRef &Value, StubMap &Stubs) {
+ assert((RE.RelType == MachO::ARM64_RELOC_POINTER_TO_GOT &&
+ (RE.Size == 2 || RE.Size == 3)) ||
+ RE.Size == 2);
+ SectionEntry &Section = Sections[RE.SectionID];
+ StubMap::const_iterator i = Stubs.find(Value);
+ int64_t Offset;
+ if (i != Stubs.end())
+ Offset = static_cast<int64_t>(i->second);
+ else {
+ // FIXME: There must be a better way to do this then to check and fix the
+ // alignment every time!!!
+ uintptr_t BaseAddress = uintptr_t(Section.getAddress());
+ uintptr_t StubAlignment = getStubAlignment();
+ uintptr_t StubAddress =
+ (BaseAddress + Section.getStubOffset() + StubAlignment - 1) &
+ -StubAlignment;
+ unsigned StubOffset = StubAddress - BaseAddress;
+ Stubs[Value] = StubOffset;
+ assert(((StubAddress % getStubAlignment()) == 0) &&
+ "GOT entry not aligned");
+ RelocationEntry GOTRE(RE.SectionID, StubOffset,
+ MachO::ARM64_RELOC_UNSIGNED, Value.Offset,
+ /*IsPCRel=*/false, /*Size=*/3);
+ if (Value.SymbolName)
+ addRelocationForSymbol(GOTRE, Value.SymbolName);
+ else
+ addRelocationForSection(GOTRE, Value.SectionID);
+ Section.advanceStubOffset(getMaxStubSize());
+ Offset = static_cast<int64_t>(StubOffset);
+ }
+ RelocationEntry TargetRE(RE.SectionID, RE.Offset, RE.RelType, Offset,
+ RE.IsPCRel, RE.Size);
+ addRelocationForSection(TargetRE, RE.SectionID);
+ }
+
+ Expected<relocation_iterator>
+ processSubtractRelocation(unsigned SectionID, relocation_iterator RelI,
+ const ObjectFile &BaseObjT,
+ ObjSectionToIDMap &ObjSectionToID) {
+ const MachOObjectFile &Obj =
+ static_cast<const MachOObjectFile&>(BaseObjT);
+ MachO::any_relocation_info RE =
+ Obj.getRelocation(RelI->getRawDataRefImpl());
+
+ unsigned Size = Obj.getAnyRelocationLength(RE);
+ uint64_t Offset = RelI->getOffset();
+ uint8_t *LocalAddress = Sections[SectionID].getAddressWithOffset(Offset);
+ unsigned NumBytes = 1 << Size;
+
+ Expected<StringRef> SubtrahendNameOrErr = RelI->getSymbol()->getName();
+ if (!SubtrahendNameOrErr)
+ return SubtrahendNameOrErr.takeError();
+ auto SubtrahendI = GlobalSymbolTable.find(*SubtrahendNameOrErr);
+ unsigned SectionBID = SubtrahendI->second.getSectionID();
+ uint64_t SectionBOffset = SubtrahendI->second.getOffset();
+ int64_t Addend =
+ SignExtend64(readBytesUnaligned(LocalAddress, NumBytes), NumBytes * 8);
+
+ ++RelI;
+ Expected<StringRef> MinuendNameOrErr = RelI->getSymbol()->getName();
+ if (!MinuendNameOrErr)
+ return MinuendNameOrErr.takeError();
+ auto MinuendI = GlobalSymbolTable.find(*MinuendNameOrErr);
+ unsigned SectionAID = MinuendI->second.getSectionID();
+ uint64_t SectionAOffset = MinuendI->second.getOffset();
+
+ RelocationEntry R(SectionID, Offset, MachO::ARM64_RELOC_SUBTRACTOR, (uint64_t)Addend,
+ SectionAID, SectionAOffset, SectionBID, SectionBOffset,
+ false, Size);
+
+ addRelocationForSection(R, SectionAID);
+
+ return ++RelI;
+ }
+
+ static const char *getRelocName(uint32_t RelocType) {
+ switch (RelocType) {
+ case MachO::ARM64_RELOC_UNSIGNED: return "ARM64_RELOC_UNSIGNED";
+ case MachO::ARM64_RELOC_SUBTRACTOR: return "ARM64_RELOC_SUBTRACTOR";
+ case MachO::ARM64_RELOC_BRANCH26: return "ARM64_RELOC_BRANCH26";
+ case MachO::ARM64_RELOC_PAGE21: return "ARM64_RELOC_PAGE21";
+ case MachO::ARM64_RELOC_PAGEOFF12: return "ARM64_RELOC_PAGEOFF12";
+ case MachO::ARM64_RELOC_GOT_LOAD_PAGE21: return "ARM64_RELOC_GOT_LOAD_PAGE21";
+ case MachO::ARM64_RELOC_GOT_LOAD_PAGEOFF12: return "ARM64_RELOC_GOT_LOAD_PAGEOFF12";
+ case MachO::ARM64_RELOC_POINTER_TO_GOT: return "ARM64_RELOC_POINTER_TO_GOT";
+ case MachO::ARM64_RELOC_TLVP_LOAD_PAGE21: return "ARM64_RELOC_TLVP_LOAD_PAGE21";
+ case MachO::ARM64_RELOC_TLVP_LOAD_PAGEOFF12: return "ARM64_RELOC_TLVP_LOAD_PAGEOFF12";
+ case MachO::ARM64_RELOC_ADDEND: return "ARM64_RELOC_ADDEND";
+ }
+ return "Unrecognized arm64 addend";
+ }
+
+};
+}
+
+#undef DEBUG_TYPE
+
+#endif
diff --git a/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOARM.h b/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOARM.h
new file mode 100644
index 0000000000000..a76958a9e2c2a
--- /dev/null
+++ b/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOARM.h
@@ -0,0 +1,432 @@
+//===----- RuntimeDyldMachOARM.h ---- MachO/ARM specific code. ----*- C++ -*-=//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_EXECUTIONENGINE_RUNTIMEDYLD_TARGETS_RUNTIMEDYLDMACHOARM_H
+#define LLVM_LIB_EXECUTIONENGINE_RUNTIMEDYLD_TARGETS_RUNTIMEDYLDMACHOARM_H
+
+#include "../RuntimeDyldMachO.h"
+#include <string>
+
+#define DEBUG_TYPE "dyld"
+
+namespace llvm {
+
+class RuntimeDyldMachOARM
+ : public RuntimeDyldMachOCRTPBase<RuntimeDyldMachOARM> {
+private:
+ typedef RuntimeDyldMachOCRTPBase<RuntimeDyldMachOARM> ParentT;
+
+public:
+
+ typedef uint32_t TargetPtrT;
+
+ RuntimeDyldMachOARM(RuntimeDyld::MemoryManager &MM,
+ JITSymbolResolver &Resolver)
+ : RuntimeDyldMachOCRTPBase(MM, Resolver) {}
+
+ unsigned getMaxStubSize() const override { return 8; }
+
+ unsigned getStubAlignment() override { return 4; }
+
+ Expected<JITSymbolFlags> getJITSymbolFlags(const SymbolRef &SR) override {
+ auto Flags = RuntimeDyldImpl::getJITSymbolFlags(SR);
+ if (!Flags)
+ return Flags.takeError();
+ Flags->getTargetFlags() = ARMJITSymbolFlags::fromObjectSymbol(SR);
+ return Flags;
+ }
+
+ uint64_t modifyAddressBasedOnFlags(uint64_t Addr,
+ JITSymbolFlags Flags) const override {
+ if (Flags.getTargetFlags() & ARMJITSymbolFlags::Thumb)
+ Addr |= 0x1;
+ return Addr;
+ }
+
+ bool isAddrTargetThumb(unsigned SectionID, uint64_t Offset) {
+ auto TargetObjAddr = Sections[SectionID].getObjAddress() + Offset;
+ for (auto &KV : GlobalSymbolTable) {
+ auto &Entry = KV.second;
+ auto SymbolObjAddr =
+ Sections[Entry.getSectionID()].getObjAddress() + Entry.getOffset();
+ if (TargetObjAddr == SymbolObjAddr)
+ return (Entry.getFlags().getTargetFlags() & ARMJITSymbolFlags::Thumb);
+ }
+ return false;
+ }
+
+ Expected<int64_t> decodeAddend(const RelocationEntry &RE) const {
+ const SectionEntry &Section = Sections[RE.SectionID];
+ uint8_t *LocalAddress = Section.getAddressWithOffset(RE.Offset);
+
+ switch (RE.RelType) {
+ default:
+ return memcpyAddend(RE);
+ case MachO::ARM_RELOC_BR24: {
+ uint32_t Temp = readBytesUnaligned(LocalAddress, 4);
+ Temp &= 0x00ffffff; // Mask out the opcode.
+ // Now we've got the shifted immediate, shift by 2, sign extend and ret.
+ return SignExtend32<26>(Temp << 2);
+ }
+
+ case MachO::ARM_THUMB_RELOC_BR22: {
+ // This is a pair of instructions whose operands combine to provide 22
+ // bits of displacement:
+ // Encoding for high bits 1111 0XXX XXXX XXXX
+ // Encoding for low bits 1111 1XXX XXXX XXXX
+ uint16_t HighInsn = readBytesUnaligned(LocalAddress, 2);
+ if ((HighInsn & 0xf800) != 0xf000)
+ return make_error<StringError>("Unrecognized thumb branch encoding "
+ "(BR22 high bits)",
+ inconvertibleErrorCode());
+
+ uint16_t LowInsn = readBytesUnaligned(LocalAddress + 2, 2);
+ if ((LowInsn & 0xf800) != 0xf800)
+ return make_error<StringError>("Unrecognized thumb branch encoding "
+ "(BR22 low bits)",
+ inconvertibleErrorCode());
+
+ return SignExtend64<23>(((HighInsn & 0x7ff) << 12) |
+ ((LowInsn & 0x7ff) << 1));
+ }
+ }
+ }
+
+ Expected<relocation_iterator>
+ processRelocationRef(unsigned SectionID, relocation_iterator RelI,
+ const ObjectFile &BaseObjT,
+ ObjSectionToIDMap &ObjSectionToID,
+ StubMap &Stubs) override {
+ const MachOObjectFile &Obj =
+ static_cast<const MachOObjectFile &>(BaseObjT);
+ MachO::any_relocation_info RelInfo =
+ Obj.getRelocation(RelI->getRawDataRefImpl());
+ uint32_t RelType = Obj.getAnyRelocationType(RelInfo);
+
+ // Set to true for thumb functions in this (or previous) TUs.
+ // Will be used to set the TargetIsThumbFunc member on the relocation entry.
+ bool TargetIsLocalThumbFunc = false;
+ if (Obj.getPlainRelocationExternal(RelInfo)) {
+ auto Symbol = RelI->getSymbol();
+ StringRef TargetName;
+ if (auto TargetNameOrErr = Symbol->getName())
+ TargetName = *TargetNameOrErr;
+ else
+ return TargetNameOrErr.takeError();
+
+ // If the target is external but the value doesn't have a name then we've
+ // converted the value to a section/offset pair, but we still need to set
+ // the IsTargetThumbFunc bit, so look the value up in the globla symbol table.
+ auto EntryItr = GlobalSymbolTable.find(TargetName);
+ if (EntryItr != GlobalSymbolTable.end()) {
+ TargetIsLocalThumbFunc =
+ EntryItr->second.getFlags().getTargetFlags() &
+ ARMJITSymbolFlags::Thumb;
+ }
+ }
+
+ if (Obj.isRelocationScattered(RelInfo)) {
+ if (RelType == MachO::ARM_RELOC_HALF_SECTDIFF)
+ return processHALFSECTDIFFRelocation(SectionID, RelI, Obj,
+ ObjSectionToID);
+ else if (RelType == MachO::GENERIC_RELOC_VANILLA)
+ return processScatteredVANILLA(SectionID, RelI, Obj, ObjSectionToID,
+ TargetIsLocalThumbFunc);
+ else
+ return ++RelI;
+ }
+
+ // Sanity check relocation type.
+ switch (RelType) {
+ UNIMPLEMENTED_RELOC(MachO::ARM_RELOC_PAIR);
+ UNIMPLEMENTED_RELOC(MachO::ARM_RELOC_SECTDIFF);
+ UNIMPLEMENTED_RELOC(MachO::ARM_RELOC_LOCAL_SECTDIFF);
+ UNIMPLEMENTED_RELOC(MachO::ARM_RELOC_PB_LA_PTR);
+ UNIMPLEMENTED_RELOC(MachO::ARM_THUMB_32BIT_BRANCH);
+ UNIMPLEMENTED_RELOC(MachO::ARM_RELOC_HALF);
+ default:
+ if (RelType > MachO::ARM_RELOC_HALF_SECTDIFF)
+ return make_error<RuntimeDyldError>(("MachO ARM relocation type " +
+ Twine(RelType) +
+ " is out of range").str());
+ break;
+ }
+
+ RelocationEntry RE(getRelocationEntry(SectionID, Obj, RelI));
+ if (auto AddendOrErr = decodeAddend(RE))
+ RE.Addend = *AddendOrErr;
+ else
+ return AddendOrErr.takeError();
+ RE.IsTargetThumbFunc = TargetIsLocalThumbFunc;
+
+ RelocationValueRef Value;
+ if (auto ValueOrErr = getRelocationValueRef(Obj, RelI, RE, ObjSectionToID))
+ Value = *ValueOrErr;
+ else
+ return ValueOrErr.takeError();
+
+ // If this is a branch from a thumb function (BR22) then make sure we mark
+ // the value as being a thumb stub: we don't want to mix it up with an ARM
+ // stub targeting the same function.
+ if (RE.RelType == MachO::ARM_THUMB_RELOC_BR22)
+ Value.IsStubThumb = true;
+
+ if (RE.IsPCRel)
+ makeValueAddendPCRel(Value, RelI,
+ (RE.RelType == MachO::ARM_THUMB_RELOC_BR22) ? 4 : 8);
+
+ // If this is a non-external branch target check whether Value points to a
+ // thumb func.
+ if (!Value.SymbolName && (RelType == MachO::ARM_RELOC_BR24 ||
+ RelType == MachO::ARM_THUMB_RELOC_BR22))
+ RE.IsTargetThumbFunc = isAddrTargetThumb(Value.SectionID, Value.Offset);
+
+ if (RE.RelType == MachO::ARM_RELOC_BR24 ||
+ RE.RelType == MachO::ARM_THUMB_RELOC_BR22)
+ processBranchRelocation(RE, Value, Stubs);
+ else {
+ RE.Addend = Value.Offset;
+ if (Value.SymbolName)
+ addRelocationForSymbol(RE, Value.SymbolName);
+ else
+ addRelocationForSection(RE, Value.SectionID);
+ }
+
+ return ++RelI;
+ }
+
+ void resolveRelocation(const RelocationEntry &RE, uint64_t Value) override {
+ LLVM_DEBUG(dumpRelocationToResolve(RE, Value));
+ const SectionEntry &Section = Sections[RE.SectionID];
+ uint8_t *LocalAddress = Section.getAddressWithOffset(RE.Offset);
+
+ // If the relocation is PC-relative, the value to be encoded is the
+ // pointer difference.
+ if (RE.IsPCRel) {
+ uint64_t FinalAddress = Section.getLoadAddressWithOffset(RE.Offset);
+ Value -= FinalAddress;
+ // ARM PCRel relocations have an effective-PC offset of two instructions
+ // (four bytes in Thumb mode, 8 bytes in ARM mode).
+ Value -= (RE.RelType == MachO::ARM_THUMB_RELOC_BR22) ? 4 : 8;
+ }
+
+ switch (RE.RelType) {
+ case MachO::ARM_THUMB_RELOC_BR22: {
+ Value += RE.Addend;
+ uint16_t HighInsn = readBytesUnaligned(LocalAddress, 2);
+ assert((HighInsn & 0xf800) == 0xf000 &&
+ "Unrecognized thumb branch encoding (BR22 high bits)");
+ HighInsn = (HighInsn & 0xf800) | ((Value >> 12) & 0x7ff);
+
+ uint16_t LowInsn = readBytesUnaligned(LocalAddress + 2, 2);
+ assert((LowInsn & 0xf800) == 0xf800 &&
+ "Unrecognized thumb branch encoding (BR22 low bits)");
+ LowInsn = (LowInsn & 0xf800) | ((Value >> 1) & 0x7ff);
+
+ writeBytesUnaligned(HighInsn, LocalAddress, 2);
+ writeBytesUnaligned(LowInsn, LocalAddress + 2, 2);
+ break;
+ }
+
+ case MachO::ARM_RELOC_VANILLA:
+ if (RE.IsTargetThumbFunc)
+ Value |= 0x01;
+ writeBytesUnaligned(Value + RE.Addend, LocalAddress, 1 << RE.Size);
+ break;
+ case MachO::ARM_RELOC_BR24: {
+ // Mask the value into the target address. We know instructions are
+ // 32-bit aligned, so we can do it all at once.
+ Value += RE.Addend;
+ // The low two bits of the value are not encoded.
+ Value >>= 2;
+ // Mask the value to 24 bits.
+ uint64_t FinalValue = Value & 0xffffff;
+ // FIXME: If the destination is a Thumb function (and the instruction
+ // is a non-predicated BL instruction), we need to change it to a BLX
+ // instruction instead.
+
+ // Insert the value into the instruction.
+ uint32_t Temp = readBytesUnaligned(LocalAddress, 4);
+ writeBytesUnaligned((Temp & ~0xffffff) | FinalValue, LocalAddress, 4);
+
+ break;
+ }
+ case MachO::ARM_RELOC_HALF_SECTDIFF: {
+ uint64_t SectionABase = Sections[RE.Sections.SectionA].getLoadAddress();
+ uint64_t SectionBBase = Sections[RE.Sections.SectionB].getLoadAddress();
+ assert((Value == SectionABase || Value == SectionBBase) &&
+ "Unexpected HALFSECTDIFF relocation value.");
+ Value = SectionABase - SectionBBase + RE.Addend;
+ if (RE.Size & 0x1) // :upper16:
+ Value = (Value >> 16);
+
+ bool IsThumb = RE.Size & 0x2;
+
+ Value &= 0xffff;
+
+ uint32_t Insn = readBytesUnaligned(LocalAddress, 4);
+
+ if (IsThumb)
+ Insn = (Insn & 0x8f00fbf0) | ((Value & 0xf000) >> 12) |
+ ((Value & 0x0800) >> 1) | ((Value & 0x0700) << 20) |
+ ((Value & 0x00ff) << 16);
+ else
+ Insn = (Insn & 0xfff0f000) | ((Value & 0xf000) << 4) | (Value & 0x0fff);
+ writeBytesUnaligned(Insn, LocalAddress, 4);
+ break;
+ }
+
+ default:
+ llvm_unreachable("Invalid relocation type");
+ }
+ }
+
+ Error finalizeSection(const ObjectFile &Obj, unsigned SectionID,
+ const SectionRef &Section) {
+ StringRef Name;
+ if (Expected<StringRef> NameOrErr = Section.getName())
+ Name = *NameOrErr;
+ else
+ consumeError(NameOrErr.takeError());
+
+ if (Name == "__nl_symbol_ptr")
+ return populateIndirectSymbolPointersSection(cast<MachOObjectFile>(Obj),
+ Section, SectionID);
+ return Error::success();
+ }
+
+private:
+
+ void processBranchRelocation(const RelocationEntry &RE,
+ const RelocationValueRef &Value,
+ StubMap &Stubs) {
+ // This is an ARM branch relocation, need to use a stub function.
+ // Look up for existing stub.
+ SectionEntry &Section = Sections[RE.SectionID];
+ RuntimeDyldMachO::StubMap::const_iterator i = Stubs.find(Value);
+ uint8_t *Addr;
+ if (i != Stubs.end()) {
+ Addr = Section.getAddressWithOffset(i->second);
+ } else {
+ // Create a new stub function.
+ assert(Section.getStubOffset() % 4 == 0 && "Misaligned stub");
+ Stubs[Value] = Section.getStubOffset();
+ uint32_t StubOpcode = 0;
+ if (RE.RelType == MachO::ARM_RELOC_BR24)
+ StubOpcode = 0xe51ff004; // ldr pc, [pc, #-4]
+ else if (RE.RelType == MachO::ARM_THUMB_RELOC_BR22)
+ StubOpcode = 0xf000f8df; // ldr pc, [pc]
+ else
+ llvm_unreachable("Unrecognized relocation");
+ Addr = Section.getAddressWithOffset(Section.getStubOffset());
+ writeBytesUnaligned(StubOpcode, Addr, 4);
+ uint8_t *StubTargetAddr = Addr + 4;
+ RelocationEntry StubRE(
+ RE.SectionID, StubTargetAddr - Section.getAddress(),
+ MachO::GENERIC_RELOC_VANILLA, Value.Offset, false, 2);
+ StubRE.IsTargetThumbFunc = RE.IsTargetThumbFunc;
+ if (Value.SymbolName)
+ addRelocationForSymbol(StubRE, Value.SymbolName);
+ else
+ addRelocationForSection(StubRE, Value.SectionID);
+ Section.advanceStubOffset(getMaxStubSize());
+ }
+ RelocationEntry TargetRE(RE.SectionID, RE.Offset, RE.RelType, 0,
+ RE.IsPCRel, RE.Size);
+ resolveRelocation(TargetRE, (uint64_t)Addr);
+ }
+
+ Expected<relocation_iterator>
+ processHALFSECTDIFFRelocation(unsigned SectionID, relocation_iterator RelI,
+ const ObjectFile &BaseTObj,
+ ObjSectionToIDMap &ObjSectionToID) {
+ const MachOObjectFile &MachO =
+ static_cast<const MachOObjectFile&>(BaseTObj);
+ MachO::any_relocation_info RE =
+ MachO.getRelocation(RelI->getRawDataRefImpl());
+
+ // For a half-diff relocation the length bits actually record whether this
+ // is a movw/movt, and whether this is arm or thumb.
+ // Bit 0 indicates movw (b0 == 0) or movt (b0 == 1).
+ // Bit 1 indicates arm (b1 == 0) or thumb (b1 == 1).
+ unsigned HalfDiffKindBits = MachO.getAnyRelocationLength(RE);
+ bool IsThumb = HalfDiffKindBits & 0x2;
+
+ SectionEntry &Section = Sections[SectionID];
+ uint32_t RelocType = MachO.getAnyRelocationType(RE);
+ bool IsPCRel = MachO.getAnyRelocationPCRel(RE);
+ uint64_t Offset = RelI->getOffset();
+ uint8_t *LocalAddress = Section.getAddressWithOffset(Offset);
+ int64_t Immediate = readBytesUnaligned(LocalAddress, 4); // Copy the whole instruction out.
+
+ if (IsThumb)
+ Immediate = ((Immediate & 0x0000000f) << 12) |
+ ((Immediate & 0x00000400) << 1) |
+ ((Immediate & 0x70000000) >> 20) |
+ ((Immediate & 0x00ff0000) >> 16);
+ else
+ Immediate = ((Immediate >> 4) & 0xf000) | (Immediate & 0xfff);
+
+ ++RelI;
+ MachO::any_relocation_info RE2 =
+ MachO.getRelocation(RelI->getRawDataRefImpl());
+ uint32_t AddrA = MachO.getScatteredRelocationValue(RE);
+ section_iterator SAI = getSectionByAddress(MachO, AddrA);
+ assert(SAI != MachO.section_end() && "Can't find section for address A");
+ uint64_t SectionABase = SAI->getAddress();
+ uint64_t SectionAOffset = AddrA - SectionABase;
+ SectionRef SectionA = *SAI;
+ bool IsCode = SectionA.isText();
+ uint32_t SectionAID = ~0U;
+ if (auto SectionAIDOrErr =
+ findOrEmitSection(MachO, SectionA, IsCode, ObjSectionToID))
+ SectionAID = *SectionAIDOrErr;
+ else
+ return SectionAIDOrErr.takeError();
+
+ uint32_t AddrB = MachO.getScatteredRelocationValue(RE2);
+ section_iterator SBI = getSectionByAddress(MachO, AddrB);
+ assert(SBI != MachO.section_end() && "Can't find section for address B");
+ uint64_t SectionBBase = SBI->getAddress();
+ uint64_t SectionBOffset = AddrB - SectionBBase;
+ SectionRef SectionB = *SBI;
+ uint32_t SectionBID = ~0U;
+ if (auto SectionBIDOrErr =
+ findOrEmitSection(MachO, SectionB, IsCode, ObjSectionToID))
+ SectionBID = *SectionBIDOrErr;
+ else
+ return SectionBIDOrErr.takeError();
+
+ uint32_t OtherHalf = MachO.getAnyRelocationAddress(RE2) & 0xffff;
+ unsigned Shift = (HalfDiffKindBits & 0x1) ? 16 : 0;
+ uint32_t FullImmVal = (Immediate << Shift) | (OtherHalf << (16 - Shift));
+ int64_t Addend = FullImmVal - (AddrA - AddrB);
+
+ // addend = Encoded - Expected
+ // = Encoded - (AddrA - AddrB)
+
+ LLVM_DEBUG(dbgs() << "Found SECTDIFF: AddrA: " << AddrA
+ << ", AddrB: " << AddrB << ", Addend: " << Addend
+ << ", SectionA ID: " << SectionAID << ", SectionAOffset: "
+ << SectionAOffset << ", SectionB ID: " << SectionBID
+ << ", SectionBOffset: " << SectionBOffset << "\n");
+ RelocationEntry R(SectionID, Offset, RelocType, Addend, SectionAID,
+ SectionAOffset, SectionBID, SectionBOffset, IsPCRel,
+ HalfDiffKindBits);
+
+ addRelocationForSection(R, SectionAID);
+
+ return ++RelI;
+ }
+
+};
+}
+
+#undef DEBUG_TYPE
+
+#endif
diff --git a/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOI386.h b/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOI386.h
new file mode 100644
index 0000000000000..523deb29b723e
--- /dev/null
+++ b/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOI386.h
@@ -0,0 +1,251 @@
+//===---- RuntimeDyldMachOI386.h ---- MachO/I386 specific code. ---*- C++ -*-=//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_EXECUTIONENGINE_RUNTIMEDYLD_TARGETS_RUNTIMEDYLDMACHOI386_H
+#define LLVM_LIB_EXECUTIONENGINE_RUNTIMEDYLD_TARGETS_RUNTIMEDYLDMACHOI386_H
+
+#include "../RuntimeDyldMachO.h"
+#include <string>
+
+#define DEBUG_TYPE "dyld"
+
+namespace llvm {
+
+class RuntimeDyldMachOI386
+ : public RuntimeDyldMachOCRTPBase<RuntimeDyldMachOI386> {
+public:
+
+ typedef uint32_t TargetPtrT;
+
+ RuntimeDyldMachOI386(RuntimeDyld::MemoryManager &MM,
+ JITSymbolResolver &Resolver)
+ : RuntimeDyldMachOCRTPBase(MM, Resolver) {}
+
+ unsigned getMaxStubSize() const override { return 0; }
+
+ unsigned getStubAlignment() override { return 1; }
+
+ Expected<relocation_iterator>
+ processRelocationRef(unsigned SectionID, relocation_iterator RelI,
+ const ObjectFile &BaseObjT,
+ ObjSectionToIDMap &ObjSectionToID,
+ StubMap &Stubs) override {
+ const MachOObjectFile &Obj =
+ static_cast<const MachOObjectFile &>(BaseObjT);
+ MachO::any_relocation_info RelInfo =
+ Obj.getRelocation(RelI->getRawDataRefImpl());
+ uint32_t RelType = Obj.getAnyRelocationType(RelInfo);
+
+ if (Obj.isRelocationScattered(RelInfo)) {
+ if (RelType == MachO::GENERIC_RELOC_SECTDIFF ||
+ RelType == MachO::GENERIC_RELOC_LOCAL_SECTDIFF)
+ return processSECTDIFFRelocation(SectionID, RelI, Obj,
+ ObjSectionToID);
+ else if (RelType == MachO::GENERIC_RELOC_VANILLA)
+ return processScatteredVANILLA(SectionID, RelI, Obj, ObjSectionToID);
+ return make_error<RuntimeDyldError>(("Unhandled I386 scattered relocation "
+ "type: " + Twine(RelType)).str());
+ }
+
+ switch (RelType) {
+ UNIMPLEMENTED_RELOC(MachO::GENERIC_RELOC_PAIR);
+ UNIMPLEMENTED_RELOC(MachO::GENERIC_RELOC_PB_LA_PTR);
+ UNIMPLEMENTED_RELOC(MachO::GENERIC_RELOC_TLV);
+ default:
+ if (RelType > MachO::GENERIC_RELOC_TLV)
+ return make_error<RuntimeDyldError>(("MachO I386 relocation type " +
+ Twine(RelType) +
+ " is out of range").str());
+ break;
+ }
+
+ RelocationEntry RE(getRelocationEntry(SectionID, Obj, RelI));
+ RE.Addend = memcpyAddend(RE);
+ RelocationValueRef Value;
+ if (auto ValueOrErr = getRelocationValueRef(Obj, RelI, RE, ObjSectionToID))
+ Value = *ValueOrErr;
+ else
+ return ValueOrErr.takeError();
+
+ // Addends for external, PC-rel relocations on i386 point back to the zero
+ // offset. Calculate the final offset from the relocation target instead.
+ // This allows us to use the same logic for both external and internal
+ // relocations in resolveI386RelocationRef.
+ // bool IsExtern = Obj.getPlainRelocationExternal(RelInfo);
+ // if (IsExtern && RE.IsPCRel) {
+ // uint64_t RelocAddr = 0;
+ // RelI->getAddress(RelocAddr);
+ // Value.Addend += RelocAddr + 4;
+ // }
+ if (RE.IsPCRel)
+ makeValueAddendPCRel(Value, RelI, 1 << RE.Size);
+
+ RE.Addend = Value.Offset;
+
+ if (Value.SymbolName)
+ addRelocationForSymbol(RE, Value.SymbolName);
+ else
+ addRelocationForSection(RE, Value.SectionID);
+
+ return ++RelI;
+ }
+
+ void resolveRelocation(const RelocationEntry &RE, uint64_t Value) override {
+ LLVM_DEBUG(dumpRelocationToResolve(RE, Value));
+
+ const SectionEntry &Section = Sections[RE.SectionID];
+ uint8_t *LocalAddress = Section.getAddressWithOffset(RE.Offset);
+
+ if (RE.IsPCRel) {
+ uint64_t FinalAddress = Section.getLoadAddressWithOffset(RE.Offset);
+ Value -= FinalAddress + 4; // see MachOX86_64::resolveRelocation.
+ }
+
+ switch (RE.RelType) {
+ case MachO::GENERIC_RELOC_VANILLA:
+ writeBytesUnaligned(Value + RE.Addend, LocalAddress, 1 << RE.Size);
+ break;
+ case MachO::GENERIC_RELOC_SECTDIFF:
+ case MachO::GENERIC_RELOC_LOCAL_SECTDIFF: {
+ uint64_t SectionABase = Sections[RE.Sections.SectionA].getLoadAddress();
+ uint64_t SectionBBase = Sections[RE.Sections.SectionB].getLoadAddress();
+ assert((Value == SectionABase || Value == SectionBBase) &&
+ "Unexpected SECTDIFF relocation value.");
+ Value = SectionABase - SectionBBase + RE.Addend;
+ writeBytesUnaligned(Value, LocalAddress, 1 << RE.Size);
+ break;
+ }
+ default:
+ llvm_unreachable("Invalid relocation type!");
+ }
+ }
+
+ Error finalizeSection(const ObjectFile &Obj, unsigned SectionID,
+ const SectionRef &Section) {
+ StringRef Name;
+ if (Expected<StringRef> NameOrErr = Section.getName())
+ Name = *NameOrErr;
+ else
+ consumeError(NameOrErr.takeError());
+
+ if (Name == "__jump_table")
+ return populateJumpTable(cast<MachOObjectFile>(Obj), Section, SectionID);
+ else if (Name == "__pointers")
+ return populateIndirectSymbolPointersSection(cast<MachOObjectFile>(Obj),
+ Section, SectionID);
+ return Error::success();
+ }
+
+private:
+ Expected<relocation_iterator>
+ processSECTDIFFRelocation(unsigned SectionID, relocation_iterator RelI,
+ const ObjectFile &BaseObjT,
+ ObjSectionToIDMap &ObjSectionToID) {
+ const MachOObjectFile &Obj =
+ static_cast<const MachOObjectFile&>(BaseObjT);
+ MachO::any_relocation_info RE =
+ Obj.getRelocation(RelI->getRawDataRefImpl());
+
+ SectionEntry &Section = Sections[SectionID];
+ uint32_t RelocType = Obj.getAnyRelocationType(RE);
+ bool IsPCRel = Obj.getAnyRelocationPCRel(RE);
+ unsigned Size = Obj.getAnyRelocationLength(RE);
+ uint64_t Offset = RelI->getOffset();
+ uint8_t *LocalAddress = Section.getAddressWithOffset(Offset);
+ unsigned NumBytes = 1 << Size;
+ uint64_t Addend = readBytesUnaligned(LocalAddress, NumBytes);
+
+ ++RelI;
+ MachO::any_relocation_info RE2 =
+ Obj.getRelocation(RelI->getRawDataRefImpl());
+
+ uint32_t AddrA = Obj.getScatteredRelocationValue(RE);
+ section_iterator SAI = getSectionByAddress(Obj, AddrA);
+ assert(SAI != Obj.section_end() && "Can't find section for address A");
+ uint64_t SectionABase = SAI->getAddress();
+ uint64_t SectionAOffset = AddrA - SectionABase;
+ SectionRef SectionA = *SAI;
+ bool IsCode = SectionA.isText();
+ uint32_t SectionAID = ~0U;
+ if (auto SectionAIDOrErr =
+ findOrEmitSection(Obj, SectionA, IsCode, ObjSectionToID))
+ SectionAID = *SectionAIDOrErr;
+ else
+ return SectionAIDOrErr.takeError();
+
+ uint32_t AddrB = Obj.getScatteredRelocationValue(RE2);
+ section_iterator SBI = getSectionByAddress(Obj, AddrB);
+ assert(SBI != Obj.section_end() && "Can't find section for address B");
+ uint64_t SectionBBase = SBI->getAddress();
+ uint64_t SectionBOffset = AddrB - SectionBBase;
+ SectionRef SectionB = *SBI;
+ uint32_t SectionBID = ~0U;
+ if (auto SectionBIDOrErr =
+ findOrEmitSection(Obj, SectionB, IsCode, ObjSectionToID))
+ SectionBID = *SectionBIDOrErr;
+ else
+ return SectionBIDOrErr.takeError();
+
+ // Compute the addend 'C' from the original expression 'A - B + C'.
+ Addend -= AddrA - AddrB;
+
+ LLVM_DEBUG(dbgs() << "Found SECTDIFF: AddrA: " << AddrA
+ << ", AddrB: " << AddrB << ", Addend: " << Addend
+ << ", SectionA ID: " << SectionAID << ", SectionAOffset: "
+ << SectionAOffset << ", SectionB ID: " << SectionBID
+ << ", SectionBOffset: " << SectionBOffset << "\n");
+ RelocationEntry R(SectionID, Offset, RelocType, Addend, SectionAID,
+ SectionAOffset, SectionBID, SectionBOffset,
+ IsPCRel, Size);
+
+ addRelocationForSection(R, SectionAID);
+
+ return ++RelI;
+ }
+
+ // Populate stubs in __jump_table section.
+ Error populateJumpTable(const MachOObjectFile &Obj,
+ const SectionRef &JTSection,
+ unsigned JTSectionID) {
+ MachO::dysymtab_command DySymTabCmd = Obj.getDysymtabLoadCommand();
+ MachO::section Sec32 = Obj.getSection(JTSection.getRawDataRefImpl());
+ uint32_t JTSectionSize = Sec32.size;
+ unsigned FirstIndirectSymbol = Sec32.reserved1;
+ unsigned JTEntrySize = Sec32.reserved2;
+ unsigned NumJTEntries = JTSectionSize / JTEntrySize;
+ uint8_t *JTSectionAddr = getSectionAddress(JTSectionID);
+ unsigned JTEntryOffset = 0;
+
+ if (JTSectionSize % JTEntrySize != 0)
+ return make_error<RuntimeDyldError>("Jump-table section does not contain "
+ "a whole number of stubs?");
+
+ for (unsigned i = 0; i < NumJTEntries; ++i) {
+ unsigned SymbolIndex =
+ Obj.getIndirectSymbolTableEntry(DySymTabCmd, FirstIndirectSymbol + i);
+ symbol_iterator SI = Obj.getSymbolByIndex(SymbolIndex);
+ Expected<StringRef> IndirectSymbolName = SI->getName();
+ if (!IndirectSymbolName)
+ return IndirectSymbolName.takeError();
+ uint8_t *JTEntryAddr = JTSectionAddr + JTEntryOffset;
+ createStubFunction(JTEntryAddr);
+ RelocationEntry RE(JTSectionID, JTEntryOffset + 1,
+ MachO::GENERIC_RELOC_VANILLA, 0, true, 2);
+ addRelocationForSymbol(RE, *IndirectSymbolName);
+ JTEntryOffset += JTEntrySize;
+ }
+
+ return Error::success();
+ }
+
+};
+}
+
+#undef DEBUG_TYPE
+
+#endif
diff --git a/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOX86_64.h b/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOX86_64.h
new file mode 100644
index 0000000000000..28febbdb948c3
--- /dev/null
+++ b/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOX86_64.h
@@ -0,0 +1,239 @@
+//===-- RuntimeDyldMachOX86_64.h ---- MachO/X86_64 specific code. -*- C++ -*-=//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_EXECUTIONENGINE_RUNTIMEDYLD_TARGETS_RUNTIMEDYLDMACHOX86_64_H
+#define LLVM_LIB_EXECUTIONENGINE_RUNTIMEDYLD_TARGETS_RUNTIMEDYLDMACHOX86_64_H
+
+#include "../RuntimeDyldMachO.h"
+#include <string>
+
+#define DEBUG_TYPE "dyld"
+
+namespace llvm {
+
+class RuntimeDyldMachOX86_64
+ : public RuntimeDyldMachOCRTPBase<RuntimeDyldMachOX86_64> {
+public:
+
+ typedef uint64_t TargetPtrT;
+
+ RuntimeDyldMachOX86_64(RuntimeDyld::MemoryManager &MM,
+ JITSymbolResolver &Resolver)
+ : RuntimeDyldMachOCRTPBase(MM, Resolver) {}
+
+ unsigned getMaxStubSize() const override { return 8; }
+
+ unsigned getStubAlignment() override { return 8; }
+
+ Expected<relocation_iterator>
+ processRelocationRef(unsigned SectionID, relocation_iterator RelI,
+ const ObjectFile &BaseObjT,
+ ObjSectionToIDMap &ObjSectionToID,
+ StubMap &Stubs) override {
+ const MachOObjectFile &Obj =
+ static_cast<const MachOObjectFile &>(BaseObjT);
+ MachO::any_relocation_info RelInfo =
+ Obj.getRelocation(RelI->getRawDataRefImpl());
+ uint32_t RelType = Obj.getAnyRelocationType(RelInfo);
+
+ if (RelType == MachO::X86_64_RELOC_SUBTRACTOR)
+ return processSubtractRelocation(SectionID, RelI, Obj, ObjSectionToID);
+
+ assert(!Obj.isRelocationScattered(RelInfo) &&
+ "Scattered relocations not supported on X86_64");
+
+ RelocationEntry RE(getRelocationEntry(SectionID, Obj, RelI));
+ RE.Addend = memcpyAddend(RE);
+ RelocationValueRef Value;
+ if (auto ValueOrErr = getRelocationValueRef(Obj, RelI, RE, ObjSectionToID))
+ Value = *ValueOrErr;
+ else
+ return ValueOrErr.takeError();
+
+ bool IsExtern = Obj.getPlainRelocationExternal(RelInfo);
+ if (!IsExtern && RE.IsPCRel)
+ makeValueAddendPCRel(Value, RelI, 1 << RE.Size);
+
+ switch (RelType) {
+ UNIMPLEMENTED_RELOC(MachO::X86_64_RELOC_TLV);
+ default:
+ if (RelType > MachO::X86_64_RELOC_TLV)
+ return make_error<RuntimeDyldError>(("MachO X86_64 relocation type " +
+ Twine(RelType) +
+ " is out of range").str());
+ break;
+ }
+
+ if (RE.RelType == MachO::X86_64_RELOC_GOT ||
+ RE.RelType == MachO::X86_64_RELOC_GOT_LOAD)
+ processGOTRelocation(RE, Value, Stubs);
+ else {
+ RE.Addend = Value.Offset;
+ if (Value.SymbolName)
+ addRelocationForSymbol(RE, Value.SymbolName);
+ else
+ addRelocationForSection(RE, Value.SectionID);
+ }
+
+ return ++RelI;
+ }
+
+ void resolveRelocation(const RelocationEntry &RE, uint64_t Value) override {
+ LLVM_DEBUG(dumpRelocationToResolve(RE, Value));
+ const SectionEntry &Section = Sections[RE.SectionID];
+ uint8_t *LocalAddress = Section.getAddressWithOffset(RE.Offset);
+
+ // If the relocation is PC-relative, the value to be encoded is the
+ // pointer difference.
+ if (RE.IsPCRel) {
+ // FIXME: It seems this value needs to be adjusted by 4 for an effective
+ // PC address. Is that expected? Only for branches, perhaps?
+ uint64_t FinalAddress = Section.getLoadAddressWithOffset(RE.Offset);
+ Value -= FinalAddress + 4;
+ }
+
+ switch (RE.RelType) {
+ default:
+ llvm_unreachable("Invalid relocation type!");
+ case MachO::X86_64_RELOC_SIGNED_1:
+ case MachO::X86_64_RELOC_SIGNED_2:
+ case MachO::X86_64_RELOC_SIGNED_4:
+ case MachO::X86_64_RELOC_SIGNED:
+ case MachO::X86_64_RELOC_UNSIGNED:
+ case MachO::X86_64_RELOC_BRANCH:
+ writeBytesUnaligned(Value + RE.Addend, LocalAddress, 1 << RE.Size);
+ break;
+ case MachO::X86_64_RELOC_SUBTRACTOR: {
+ uint64_t SectionABase = Sections[RE.Sections.SectionA].getLoadAddress();
+ uint64_t SectionBBase = Sections[RE.Sections.SectionB].getLoadAddress();
+ assert((Value == SectionABase || Value == SectionBBase) &&
+ "Unexpected SUBTRACTOR relocation value.");
+ Value = SectionABase - SectionBBase + RE.Addend;
+ writeBytesUnaligned(Value, LocalAddress, 1 << RE.Size);
+ break;
+ }
+ }
+ }
+
+ Error finalizeSection(const ObjectFile &Obj, unsigned SectionID,
+ const SectionRef &Section) {
+ return Error::success();
+ }
+
+private:
+ void processGOTRelocation(const RelocationEntry &RE,
+ RelocationValueRef &Value, StubMap &Stubs) {
+ SectionEntry &Section = Sections[RE.SectionID];
+ assert(RE.IsPCRel);
+ assert(RE.Size == 2);
+ Value.Offset -= RE.Addend;
+ RuntimeDyldMachO::StubMap::const_iterator i = Stubs.find(Value);
+ uint8_t *Addr;
+ if (i != Stubs.end()) {
+ Addr = Section.getAddressWithOffset(i->second);
+ } else {
+ Stubs[Value] = Section.getStubOffset();
+ uint8_t *GOTEntry = Section.getAddressWithOffset(Section.getStubOffset());
+ RelocationEntry GOTRE(RE.SectionID, Section.getStubOffset(),
+ MachO::X86_64_RELOC_UNSIGNED, Value.Offset, false,
+ 3);
+ if (Value.SymbolName)
+ addRelocationForSymbol(GOTRE, Value.SymbolName);
+ else
+ addRelocationForSection(GOTRE, Value.SectionID);
+ Section.advanceStubOffset(8);
+ Addr = GOTEntry;
+ }
+ RelocationEntry TargetRE(RE.SectionID, RE.Offset,
+ MachO::X86_64_RELOC_UNSIGNED, RE.Addend, true, 2);
+ resolveRelocation(TargetRE, (uint64_t)Addr);
+ }
+
+ Expected<relocation_iterator>
+ processSubtractRelocation(unsigned SectionID, relocation_iterator RelI,
+ const MachOObjectFile &BaseObj,
+ ObjSectionToIDMap &ObjSectionToID) {
+ const MachOObjectFile &Obj =
+ static_cast<const MachOObjectFile&>(BaseObj);
+ MachO::any_relocation_info RE =
+ Obj.getRelocation(RelI->getRawDataRefImpl());
+
+ unsigned Size = Obj.getAnyRelocationLength(RE);
+ uint64_t Offset = RelI->getOffset();
+ uint8_t *LocalAddress = Sections[SectionID].getAddressWithOffset(Offset);
+ unsigned NumBytes = 1 << Size;
+ int64_t Addend =
+ SignExtend64(readBytesUnaligned(LocalAddress, NumBytes), NumBytes * 8);
+
+ unsigned SectionBID = ~0U;
+ uint64_t SectionBOffset = 0;
+
+ MachO::any_relocation_info RelInfo =
+ Obj.getRelocation(RelI->getRawDataRefImpl());
+
+ bool AIsExternal = BaseObj.getPlainRelocationExternal(RelInfo);
+
+ if (AIsExternal) {
+ Expected<StringRef> SubtrahendNameOrErr = RelI->getSymbol()->getName();
+ if (!SubtrahendNameOrErr)
+ return SubtrahendNameOrErr.takeError();
+ auto SubtrahendI = GlobalSymbolTable.find(*SubtrahendNameOrErr);
+ SectionBID = SubtrahendI->second.getSectionID();
+ SectionBOffset = SubtrahendI->second.getOffset();
+ } else {
+ SectionRef SecB = Obj.getAnyRelocationSection(RelInfo);
+ bool IsCode = SecB.isText();
+ Expected<unsigned> SectionBIDOrErr =
+ findOrEmitSection(Obj, SecB, IsCode, ObjSectionToID);
+ if (!SectionBIDOrErr)
+ return SectionBIDOrErr.takeError();
+ SectionBID = *SectionBIDOrErr;
+ Addend += SecB.getAddress();
+ }
+
+ ++RelI;
+
+ unsigned SectionAID = ~0U;
+ uint64_t SectionAOffset = 0;
+
+ RelInfo = Obj.getRelocation(RelI->getRawDataRefImpl());
+
+ bool BIsExternal = BaseObj.getPlainRelocationExternal(RelInfo);
+ if (BIsExternal) {
+ Expected<StringRef> MinuendNameOrErr = RelI->getSymbol()->getName();
+ if (!MinuendNameOrErr)
+ return MinuendNameOrErr.takeError();
+ auto MinuendI = GlobalSymbolTable.find(*MinuendNameOrErr);
+ SectionAID = MinuendI->second.getSectionID();
+ SectionAOffset = MinuendI->second.getOffset();
+ } else {
+ SectionRef SecA = Obj.getAnyRelocationSection(RelInfo);
+ bool IsCode = SecA.isText();
+ Expected<unsigned> SectionAIDOrErr =
+ findOrEmitSection(Obj, SecA, IsCode, ObjSectionToID);
+ if (!SectionAIDOrErr)
+ return SectionAIDOrErr.takeError();
+ SectionAID = *SectionAIDOrErr;
+ Addend -= SecA.getAddress();
+ }
+
+ RelocationEntry R(SectionID, Offset, MachO::X86_64_RELOC_SUBTRACTOR, (uint64_t)Addend,
+ SectionAID, SectionAOffset, SectionBID, SectionBOffset,
+ false, Size);
+
+ addRelocationForSection(R, SectionAID);
+
+ return ++RelI;
+ }
+
+};
+}
+
+#undef DEBUG_TYPE
+
+#endif
diff --git a/llvm/lib/ExecutionEngine/SectionMemoryManager.cpp b/llvm/lib/ExecutionEngine/SectionMemoryManager.cpp
new file mode 100644
index 0000000000000..925049b2a1b4f
--- /dev/null
+++ b/llvm/lib/ExecutionEngine/SectionMemoryManager.cpp
@@ -0,0 +1,267 @@
+//===- SectionMemoryManager.cpp - Memory manager for MCJIT/RtDyld *- C++ -*-==//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the section-based memory manager used by the MCJIT
+// execution engine and RuntimeDyld
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/SectionMemoryManager.h"
+#include "llvm/Config/config.h"
+#include "llvm/Support/MathExtras.h"
+#include "llvm/Support/Process.h"
+
+namespace llvm {
+
+uint8_t *SectionMemoryManager::allocateDataSection(uintptr_t Size,
+ unsigned Alignment,
+ unsigned SectionID,
+ StringRef SectionName,
+ bool IsReadOnly) {
+ if (IsReadOnly)
+ return allocateSection(SectionMemoryManager::AllocationPurpose::ROData,
+ Size, Alignment);
+ return allocateSection(SectionMemoryManager::AllocationPurpose::RWData, Size,
+ Alignment);
+}
+
+uint8_t *SectionMemoryManager::allocateCodeSection(uintptr_t Size,
+ unsigned Alignment,
+ unsigned SectionID,
+ StringRef SectionName) {
+ return allocateSection(SectionMemoryManager::AllocationPurpose::Code, Size,
+ Alignment);
+}
+
+uint8_t *SectionMemoryManager::allocateSection(
+ SectionMemoryManager::AllocationPurpose Purpose, uintptr_t Size,
+ unsigned Alignment) {
+ if (!Alignment)
+ Alignment = 16;
+
+ assert(!(Alignment & (Alignment - 1)) && "Alignment must be a power of two.");
+
+ uintptr_t RequiredSize = Alignment * ((Size + Alignment - 1) / Alignment + 1);
+ uintptr_t Addr = 0;
+
+ MemoryGroup &MemGroup = [&]() -> MemoryGroup & {
+ switch (Purpose) {
+ case AllocationPurpose::Code:
+ return CodeMem;
+ case AllocationPurpose::ROData:
+ return RODataMem;
+ case AllocationPurpose::RWData:
+ return RWDataMem;
+ }
+ llvm_unreachable("Unknown SectionMemoryManager::AllocationPurpose");
+ }();
+
+ // Look in the list of free memory regions and use a block there if one
+ // is available.
+ for (FreeMemBlock &FreeMB : MemGroup.FreeMem) {
+ if (FreeMB.Free.allocatedSize() >= RequiredSize) {
+ Addr = (uintptr_t)FreeMB.Free.base();
+ uintptr_t EndOfBlock = Addr + FreeMB.Free.allocatedSize();
+ // Align the address.
+ Addr = (Addr + Alignment - 1) & ~(uintptr_t)(Alignment - 1);
+
+ if (FreeMB.PendingPrefixIndex == (unsigned)-1) {
+ // The part of the block we're giving out to the user is now pending
+ MemGroup.PendingMem.push_back(sys::MemoryBlock((void *)Addr, Size));
+
+ // Remember this pending block, such that future allocations can just
+ // modify it rather than creating a new one
+ FreeMB.PendingPrefixIndex = MemGroup.PendingMem.size() - 1;
+ } else {
+ sys::MemoryBlock &PendingMB =
+ MemGroup.PendingMem[FreeMB.PendingPrefixIndex];
+ PendingMB = sys::MemoryBlock(PendingMB.base(),
+ Addr + Size - (uintptr_t)PendingMB.base());
+ }
+
+ // Remember how much free space is now left in this block
+ FreeMB.Free =
+ sys::MemoryBlock((void *)(Addr + Size), EndOfBlock - Addr - Size);
+ return (uint8_t *)Addr;
+ }
+ }
+
+ // No pre-allocated free block was large enough. Allocate a new memory region.
+ // Note that all sections get allocated as read-write. The permissions will
+ // be updated later based on memory group.
+ //
+ // FIXME: It would be useful to define a default allocation size (or add
+ // it as a constructor parameter) to minimize the number of allocations.
+ //
+ // FIXME: Initialize the Near member for each memory group to avoid
+ // interleaving.
+ std::error_code ec;
+ sys::MemoryBlock MB = MMapper.allocateMappedMemory(
+ Purpose, RequiredSize, &MemGroup.Near,
+ sys::Memory::MF_READ | sys::Memory::MF_WRITE, ec);
+ if (ec) {
+ // FIXME: Add error propagation to the interface.
+ return nullptr;
+ }
+
+ // Save this address as the basis for our next request
+ MemGroup.Near = MB;
+
+ // Remember that we allocated this memory
+ MemGroup.AllocatedMem.push_back(MB);
+ Addr = (uintptr_t)MB.base();
+ uintptr_t EndOfBlock = Addr + MB.allocatedSize();
+
+ // Align the address.
+ Addr = (Addr + Alignment - 1) & ~(uintptr_t)(Alignment - 1);
+
+ // The part of the block we're giving out to the user is now pending
+ MemGroup.PendingMem.push_back(sys::MemoryBlock((void *)Addr, Size));
+
+ // The allocateMappedMemory may allocate much more memory than we need. In
+ // this case, we store the unused memory as a free memory block.
+ unsigned FreeSize = EndOfBlock - Addr - Size;
+ if (FreeSize > 16) {
+ FreeMemBlock FreeMB;
+ FreeMB.Free = sys::MemoryBlock((void *)(Addr + Size), FreeSize);
+ FreeMB.PendingPrefixIndex = (unsigned)-1;
+ MemGroup.FreeMem.push_back(FreeMB);
+ }
+
+ // Return aligned address
+ return (uint8_t *)Addr;
+}
+
+bool SectionMemoryManager::finalizeMemory(std::string *ErrMsg) {
+ // FIXME: Should in-progress permissions be reverted if an error occurs?
+ std::error_code ec;
+
+ // Make code memory executable.
+ ec = applyMemoryGroupPermissions(CodeMem,
+ sys::Memory::MF_READ | sys::Memory::MF_EXEC);
+ if (ec) {
+ if (ErrMsg) {
+ *ErrMsg = ec.message();
+ }
+ return true;
+ }
+
+ // Make read-only data memory read-only.
+ ec = applyMemoryGroupPermissions(RODataMem,
+ sys::Memory::MF_READ | sys::Memory::MF_EXEC);
+ if (ec) {
+ if (ErrMsg) {
+ *ErrMsg = ec.message();
+ }
+ return true;
+ }
+
+ // Read-write data memory already has the correct permissions
+
+ // Some platforms with separate data cache and instruction cache require
+ // explicit cache flush, otherwise JIT code manipulations (like resolved
+ // relocations) will get to the data cache but not to the instruction cache.
+ invalidateInstructionCache();
+
+ return false;
+}
+
+static sys::MemoryBlock trimBlockToPageSize(sys::MemoryBlock M) {
+ static const size_t PageSize = sys::Process::getPageSizeEstimate();
+
+ size_t StartOverlap =
+ (PageSize - ((uintptr_t)M.base() % PageSize)) % PageSize;
+
+ size_t TrimmedSize = M.allocatedSize();
+ TrimmedSize -= StartOverlap;
+ TrimmedSize -= TrimmedSize % PageSize;
+
+ sys::MemoryBlock Trimmed((void *)((uintptr_t)M.base() + StartOverlap),
+ TrimmedSize);
+
+ assert(((uintptr_t)Trimmed.base() % PageSize) == 0);
+ assert((Trimmed.allocatedSize() % PageSize) == 0);
+ assert(M.base() <= Trimmed.base() &&
+ Trimmed.allocatedSize() <= M.allocatedSize());
+
+ return Trimmed;
+}
+
+std::error_code
+SectionMemoryManager::applyMemoryGroupPermissions(MemoryGroup &MemGroup,
+ unsigned Permissions) {
+ for (sys::MemoryBlock &MB : MemGroup.PendingMem)
+ if (std::error_code EC = MMapper.protectMappedMemory(MB, Permissions))
+ return EC;
+
+ MemGroup.PendingMem.clear();
+
+ // Now go through free blocks and trim any of them that don't span the entire
+ // page because one of the pending blocks may have overlapped it.
+ for (FreeMemBlock &FreeMB : MemGroup.FreeMem) {
+ FreeMB.Free = trimBlockToPageSize(FreeMB.Free);
+ // We cleared the PendingMem list, so all these pointers are now invalid
+ FreeMB.PendingPrefixIndex = (unsigned)-1;
+ }
+
+ // Remove all blocks which are now empty
+ MemGroup.FreeMem.erase(remove_if(MemGroup.FreeMem,
+ [](FreeMemBlock &FreeMB) {
+ return FreeMB.Free.allocatedSize() == 0;
+ }),
+ MemGroup.FreeMem.end());
+
+ return std::error_code();
+}
+
+void SectionMemoryManager::invalidateInstructionCache() {
+ for (sys::MemoryBlock &Block : CodeMem.PendingMem)
+ sys::Memory::InvalidateInstructionCache(Block.base(),
+ Block.allocatedSize());
+}
+
+SectionMemoryManager::~SectionMemoryManager() {
+ for (MemoryGroup *Group : {&CodeMem, &RWDataMem, &RODataMem}) {
+ for (sys::MemoryBlock &Block : Group->AllocatedMem)
+ MMapper.releaseMappedMemory(Block);
+ }
+}
+
+SectionMemoryManager::MemoryMapper::~MemoryMapper() {}
+
+void SectionMemoryManager::anchor() {}
+
+namespace {
+// Trivial implementation of SectionMemoryManager::MemoryMapper that just calls
+// into sys::Memory.
+class DefaultMMapper final : public SectionMemoryManager::MemoryMapper {
+public:
+ sys::MemoryBlock
+ allocateMappedMemory(SectionMemoryManager::AllocationPurpose Purpose,
+ size_t NumBytes, const sys::MemoryBlock *const NearBlock,
+ unsigned Flags, std::error_code &EC) override {
+ return sys::Memory::allocateMappedMemory(NumBytes, NearBlock, Flags, EC);
+ }
+
+ std::error_code protectMappedMemory(const sys::MemoryBlock &Block,
+ unsigned Flags) override {
+ return sys::Memory::protectMappedMemory(Block, Flags);
+ }
+
+ std::error_code releaseMappedMemory(sys::MemoryBlock &M) override {
+ return sys::Memory::releaseMappedMemory(M);
+ }
+};
+
+DefaultMMapper DefaultMMapperInstance;
+} // namespace
+
+SectionMemoryManager::SectionMemoryManager(MemoryMapper *MM)
+ : MMapper(MM ? *MM : DefaultMMapperInstance) {}
+
+} // namespace llvm
diff --git a/llvm/lib/ExecutionEngine/TargetSelect.cpp b/llvm/lib/ExecutionEngine/TargetSelect.cpp
new file mode 100644
index 0000000000000..0d9c6cfa09087
--- /dev/null
+++ b/llvm/lib/ExecutionEngine/TargetSelect.cpp
@@ -0,0 +1,103 @@
+//===-- TargetSelect.cpp - Target Chooser Code ----------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This just asks the TargetRegistry for the appropriate target to use, and
+// allows the user to specify a specific one on the commandline with -march=x,
+// -mcpu=y, and -mattr=a,-b,+c. Clients should initialize targets prior to
+// calling selectTarget().
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ADT/Triple.h"
+#include "llvm/ExecutionEngine/ExecutionEngine.h"
+#include "llvm/IR/Module.h"
+#include "llvm/MC/SubtargetFeature.h"
+#include "llvm/Support/Host.h"
+#include "llvm/Support/TargetRegistry.h"
+#include "llvm/Target/TargetMachine.h"
+
+using namespace llvm;
+
+TargetMachine *EngineBuilder::selectTarget() {
+ Triple TT;
+
+ // MCJIT can generate code for remote targets, but the old JIT and Interpreter
+ // must use the host architecture.
+ if (WhichEngine != EngineKind::Interpreter && M)
+ TT.setTriple(M->getTargetTriple());
+
+ return selectTarget(TT, MArch, MCPU, MAttrs);
+}
+
+/// selectTarget - Pick a target either via -march or by guessing the native
+/// arch. Add any CPU features specified via -mcpu or -mattr.
+TargetMachine *EngineBuilder::selectTarget(const Triple &TargetTriple,
+ StringRef MArch,
+ StringRef MCPU,
+ const SmallVectorImpl<std::string>& MAttrs) {
+ Triple TheTriple(TargetTriple);
+ if (TheTriple.getTriple().empty())
+ TheTriple.setTriple(sys::getProcessTriple());
+
+ // Adjust the triple to match what the user requested.
+ const Target *TheTarget = nullptr;
+ if (!MArch.empty()) {
+ auto I = find_if(TargetRegistry::targets(),
+ [&](const Target &T) { return MArch == T.getName(); });
+
+ if (I == TargetRegistry::targets().end()) {
+ if (ErrorStr)
+ *ErrorStr = "No available targets are compatible with this -march, "
+ "see -version for the available targets.\n";
+ return nullptr;
+ }
+
+ TheTarget = &*I;
+
+ // Adjust the triple to match (if known), otherwise stick with the
+ // requested/host triple.
+ Triple::ArchType Type = Triple::getArchTypeForLLVMName(MArch);
+ if (Type != Triple::UnknownArch)
+ TheTriple.setArch(Type);
+ } else {
+ std::string Error;
+ TheTarget = TargetRegistry::lookupTarget(TheTriple.getTriple(), Error);
+ if (!TheTarget) {
+ if (ErrorStr)
+ *ErrorStr = Error;
+ return nullptr;
+ }
+ }
+
+ // Package up features to be passed to target/subtarget
+ std::string FeaturesStr;
+ if (!MAttrs.empty()) {
+ SubtargetFeatures Features;
+ for (unsigned i = 0; i != MAttrs.size(); ++i)
+ Features.AddFeature(MAttrs[i]);
+ FeaturesStr = Features.getString();
+ }
+
+ // FIXME: non-iOS ARM FastISel is broken with MCJIT.
+ if (TheTriple.getArch() == Triple::arm &&
+ !TheTriple.isiOS() &&
+ OptLevel == CodeGenOpt::None) {
+ OptLevel = CodeGenOpt::Less;
+ }
+
+ // Allocate a target...
+ TargetMachine *Target =
+ TheTarget->createTargetMachine(TheTriple.getTriple(), MCPU, FeaturesStr,
+ Options, RelocModel, CMModel, OptLevel,
+ /*JIT*/ true);
+ Target->Options.EmulatedTLS = EmulatedTLS;
+ Target->Options.ExplicitEmulatedTLS = true;
+
+ assert(Target && "Could not allocate target machine!");
+ return Target;
+}