aboutsummaryrefslogtreecommitdiff
path: root/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld
diff options
context:
space:
mode:
Diffstat (limited to 'contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld')
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/JITSymbol.cpp169
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/RTDyldMemoryManager.cpp295
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyld.cpp1477
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldCOFF.cpp122
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldCOFF.h61
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldChecker.cpp1061
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldCheckerImpl.h85
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.cpp2572
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.h236
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldImpl.h594
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.cpp382
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.h167
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldCOFFAArch64.h377
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldCOFFI386.h228
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldCOFFThumb.h348
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldCOFFX86_64.h322
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldELFMips.cpp320
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldELFMips.h66
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOAArch64.h541
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOARM.h431
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOI386.h250
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOX86_64.h238
22 files changed, 10342 insertions, 0 deletions
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/JITSymbol.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/JITSymbol.cpp
new file mode 100644
index 000000000000..c153b4464568
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/JITSymbol.cpp
@@ -0,0 +1,169 @@
+//===----------- JITSymbol.cpp - JITSymbol class implementation -----------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// JITSymbol class implementation plus helper functions.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/JITSymbol.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/GlobalAlias.h"
+#include "llvm/IR/GlobalValue.h"
+#include "llvm/IR/ModuleSummaryIndex.h"
+#include "llvm/Object/ObjectFile.h"
+
+using namespace llvm;
+
+JITSymbolFlags llvm::JITSymbolFlags::fromGlobalValue(const GlobalValue &GV) {
+ assert(GV.hasName() && "Can't get flags for anonymous symbol");
+
+ JITSymbolFlags Flags = JITSymbolFlags::None;
+ if (GV.hasWeakLinkage() || GV.hasLinkOnceLinkage())
+ Flags |= JITSymbolFlags::Weak;
+ if (GV.hasCommonLinkage())
+ Flags |= JITSymbolFlags::Common;
+ if (!GV.hasLocalLinkage() && !GV.hasHiddenVisibility())
+ Flags |= JITSymbolFlags::Exported;
+
+ if (isa<Function>(GV))
+ Flags |= JITSymbolFlags::Callable;
+ else if (isa<GlobalAlias>(GV) &&
+ isa<Function>(cast<GlobalAlias>(GV).getAliasee()))
+ Flags |= JITSymbolFlags::Callable;
+
+ // Check for a linker-private-global-prefix on the symbol name, in which
+ // case it must be marked as non-exported.
+ if (auto *M = GV.getParent()) {
+ const auto &DL = M->getDataLayout();
+ StringRef LPGP = DL.getLinkerPrivateGlobalPrefix();
+ if (!LPGP.empty() && GV.getName().front() == '\01' &&
+ GV.getName().substr(1).starts_with(LPGP))
+ Flags &= ~JITSymbolFlags::Exported;
+ }
+
+ return Flags;
+}
+
+JITSymbolFlags llvm::JITSymbolFlags::fromSummary(GlobalValueSummary *S) {
+ JITSymbolFlags Flags = JITSymbolFlags::None;
+ auto L = S->linkage();
+ if (GlobalValue::isWeakLinkage(L) || GlobalValue::isLinkOnceLinkage(L))
+ Flags |= JITSymbolFlags::Weak;
+ if (GlobalValue::isCommonLinkage(L))
+ Flags |= JITSymbolFlags::Common;
+ if (GlobalValue::isExternalLinkage(L) || GlobalValue::isExternalWeakLinkage(L))
+ Flags |= JITSymbolFlags::Exported;
+
+ if (isa<FunctionSummary>(S))
+ Flags |= JITSymbolFlags::Callable;
+
+ return Flags;
+}
+
+Expected<JITSymbolFlags>
+llvm::JITSymbolFlags::fromObjectSymbol(const object::SymbolRef &Symbol) {
+ Expected<uint32_t> SymbolFlagsOrErr = Symbol.getFlags();
+ if (!SymbolFlagsOrErr)
+ // TODO: Test this error.
+ return SymbolFlagsOrErr.takeError();
+
+ JITSymbolFlags Flags = JITSymbolFlags::None;
+ if (*SymbolFlagsOrErr & object::BasicSymbolRef::SF_Weak)
+ Flags |= JITSymbolFlags::Weak;
+ if (*SymbolFlagsOrErr & object::BasicSymbolRef::SF_Common)
+ Flags |= JITSymbolFlags::Common;
+ if (*SymbolFlagsOrErr & object::BasicSymbolRef::SF_Exported)
+ Flags |= JITSymbolFlags::Exported;
+
+ auto SymbolType = Symbol.getType();
+ if (!SymbolType)
+ return SymbolType.takeError();
+
+ if (*SymbolType == object::SymbolRef::ST_Function)
+ Flags |= JITSymbolFlags::Callable;
+
+ return Flags;
+}
+
+ARMJITSymbolFlags
+llvm::ARMJITSymbolFlags::fromObjectSymbol(const object::SymbolRef &Symbol) {
+ Expected<uint32_t> SymbolFlagsOrErr = Symbol.getFlags();
+ if (!SymbolFlagsOrErr)
+ // TODO: Actually report errors helpfully.
+ report_fatal_error(SymbolFlagsOrErr.takeError());
+ ARMJITSymbolFlags Flags;
+ if (*SymbolFlagsOrErr & object::BasicSymbolRef::SF_Thumb)
+ Flags |= ARMJITSymbolFlags::Thumb;
+ return Flags;
+}
+
+/// Performs lookup by, for each symbol, first calling
+/// findSymbolInLogicalDylib and if that fails calling
+/// findSymbol.
+void LegacyJITSymbolResolver::lookup(const LookupSet &Symbols,
+ OnResolvedFunction OnResolved) {
+ JITSymbolResolver::LookupResult Result;
+ for (auto &Symbol : Symbols) {
+ std::string SymName = Symbol.str();
+ if (auto Sym = findSymbolInLogicalDylib(SymName)) {
+ if (auto AddrOrErr = Sym.getAddress())
+ Result[Symbol] = JITEvaluatedSymbol(*AddrOrErr, Sym.getFlags());
+ else {
+ OnResolved(AddrOrErr.takeError());
+ return;
+ }
+ } else if (auto Err = Sym.takeError()) {
+ OnResolved(std::move(Err));
+ return;
+ } else {
+ // findSymbolInLogicalDylib failed. Lets try findSymbol.
+ if (auto Sym = findSymbol(SymName)) {
+ if (auto AddrOrErr = Sym.getAddress())
+ Result[Symbol] = JITEvaluatedSymbol(*AddrOrErr, Sym.getFlags());
+ else {
+ OnResolved(AddrOrErr.takeError());
+ return;
+ }
+ } else if (auto Err = Sym.takeError()) {
+ OnResolved(std::move(Err));
+ return;
+ } else {
+ OnResolved(make_error<StringError>("Symbol not found: " + Symbol,
+ inconvertibleErrorCode()));
+ return;
+ }
+ }
+ }
+
+ OnResolved(std::move(Result));
+}
+
+/// Performs flags lookup by calling findSymbolInLogicalDylib and
+/// returning the flags value for that symbol.
+Expected<JITSymbolResolver::LookupSet>
+LegacyJITSymbolResolver::getResponsibilitySet(const LookupSet &Symbols) {
+ JITSymbolResolver::LookupSet Result;
+
+ for (auto &Symbol : Symbols) {
+ std::string SymName = Symbol.str();
+ if (auto Sym = findSymbolInLogicalDylib(SymName)) {
+ // If there's an existing def but it is not strong, then the caller is
+ // responsible for it.
+ if (!Sym.getFlags().isStrong())
+ Result.insert(Symbol);
+ } else if (auto Err = Sym.takeError())
+ return std::move(Err);
+ else {
+ // If there is no existing definition then the caller is responsible for
+ // it.
+ Result.insert(Symbol);
+ }
+ }
+
+ return std::move(Result);
+}
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/RTDyldMemoryManager.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/RTDyldMemoryManager.cpp
new file mode 100644
index 000000000000..fd11450b635b
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/RTDyldMemoryManager.cpp
@@ -0,0 +1,295 @@
+//===-- RTDyldMemoryManager.cpp - Memory manager for MC-JIT -----*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Implementation of the runtime dynamic memory manager base class.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Config/config.h"
+#include "llvm/ExecutionEngine/RTDyldMemoryManager.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/DynamicLibrary.h"
+#include "llvm/Support/ErrorHandling.h"
+#include <cstdlib>
+
+#ifdef __linux__
+ // These includes used by RTDyldMemoryManager::getPointerToNamedFunction()
+ // for Glibc trickery. See comments in this function for more information.
+ #ifdef HAVE_SYS_STAT_H
+ #include <sys/stat.h>
+ #endif
+ #include <fcntl.h>
+ #include <unistd.h>
+#endif
+
+namespace llvm {
+
+RTDyldMemoryManager::~RTDyldMemoryManager() = default;
+
+#if defined(HAVE_REGISTER_FRAME) && defined(HAVE_DEREGISTER_FRAME) && \
+ !defined(__SEH__) && !defined(__USING_SJLJ_EXCEPTIONS__)
+extern "C" void __register_frame(void *);
+extern "C" void __deregister_frame(void *);
+#else
+// The building compiler does not have __(de)register_frame but
+// it may be found at runtime in a dynamically-loaded library.
+// For example, this happens when building LLVM with Visual C++
+// but using the MingW runtime.
+static void __register_frame(void *p) {
+ static bool Searched = false;
+ static void((*rf)(void *)) = 0;
+
+ if (!Searched) {
+ Searched = true;
+ *(void **)&rf =
+ llvm::sys::DynamicLibrary::SearchForAddressOfSymbol("__register_frame");
+ }
+ if (rf)
+ rf(p);
+}
+
+static void __deregister_frame(void *p) {
+ static bool Searched = false;
+ static void((*df)(void *)) = 0;
+
+ if (!Searched) {
+ Searched = true;
+ *(void **)&df = llvm::sys::DynamicLibrary::SearchForAddressOfSymbol(
+ "__deregister_frame");
+ }
+ if (df)
+ df(p);
+}
+#endif
+
+/* libgcc and libunwind __register_frame behave differently. We use the presence
+ * of __unw_add_dynamic_fde to detect libunwind. */
+#if defined(HAVE_UNW_ADD_DYNAMIC_FDE) || defined(__APPLE__)
+
+static const char *processFDE(const char *Entry, bool isDeregister) {
+ const char *P = Entry;
+ uint32_t Length = *((const uint32_t *)P);
+ P += 4;
+ uint32_t Offset = *((const uint32_t *)P);
+ if (Offset != 0) {
+ if (isDeregister)
+ __deregister_frame(const_cast<char *>(Entry));
+ else
+ __register_frame(const_cast<char *>(Entry));
+ }
+ return P + Length;
+}
+
+// This implementation handles frame registration for local targets.
+// Memory managers for remote targets should re-implement this function
+// and use the LoadAddr parameter.
+void RTDyldMemoryManager::registerEHFramesInProcess(uint8_t *Addr,
+ size_t Size) {
+ // On OS X OS X __register_frame takes a single FDE as an argument.
+ // See http://lists.llvm.org/pipermail/llvm-dev/2013-April/061737.html
+ // and projects/libunwind/src/UnwindLevel1-gcc-ext.c.
+ const char *P = (const char *)Addr;
+ const char *End = P + Size;
+ while (P != End)
+ P = processFDE(P, false);
+}
+
+void RTDyldMemoryManager::deregisterEHFramesInProcess(uint8_t *Addr,
+ size_t Size) {
+ const char *P = (const char *)Addr;
+ const char *End = P + Size;
+ while (P != End)
+ P = processFDE(P, true);
+}
+
+#else
+
+void RTDyldMemoryManager::registerEHFramesInProcess(uint8_t *Addr,
+ size_t Size) {
+ // On Linux __register_frame takes a single argument:
+ // a pointer to the start of the .eh_frame section.
+
+ // How can it find the end? Because crtendS.o is linked
+ // in and it has an .eh_frame section with four zero chars.
+ __register_frame(Addr);
+}
+
+void RTDyldMemoryManager::deregisterEHFramesInProcess(uint8_t *Addr,
+ size_t Size) {
+ __deregister_frame(Addr);
+}
+
+#endif
+
+void RTDyldMemoryManager::registerEHFrames(uint8_t *Addr, uint64_t LoadAddr,
+ size_t Size) {
+ registerEHFramesInProcess(Addr, Size);
+ EHFrames.push_back({Addr, Size});
+}
+
+void RTDyldMemoryManager::deregisterEHFrames() {
+ for (auto &Frame : EHFrames)
+ deregisterEHFramesInProcess(Frame.Addr, Frame.Size);
+ EHFrames.clear();
+}
+
+static int jit_noop() {
+ return 0;
+}
+
+// ARM math functions are statically linked on Android from libgcc.a, but not
+// available at runtime for dynamic linking. On Linux these are usually placed
+// in libgcc_s.so so can be found by normal dynamic lookup.
+#if defined(__BIONIC__) && defined(__arm__)
+// List of functions which are statically linked on Android and can be generated
+// by LLVM. This is done as a nested macro which is used once to declare the
+// imported functions with ARM_MATH_DECL and once to compare them to the
+// user-requested symbol in getSymbolAddress with ARM_MATH_CHECK. The test
+// assumes that all functions start with __aeabi_ and getSymbolAddress must be
+// modified if that changes.
+#define ARM_MATH_IMPORTS(PP) \
+ PP(__aeabi_d2f) \
+ PP(__aeabi_d2iz) \
+ PP(__aeabi_d2lz) \
+ PP(__aeabi_d2uiz) \
+ PP(__aeabi_d2ulz) \
+ PP(__aeabi_dadd) \
+ PP(__aeabi_dcmpeq) \
+ PP(__aeabi_dcmpge) \
+ PP(__aeabi_dcmpgt) \
+ PP(__aeabi_dcmple) \
+ PP(__aeabi_dcmplt) \
+ PP(__aeabi_dcmpun) \
+ PP(__aeabi_ddiv) \
+ PP(__aeabi_dmul) \
+ PP(__aeabi_dsub) \
+ PP(__aeabi_f2d) \
+ PP(__aeabi_f2iz) \
+ PP(__aeabi_f2lz) \
+ PP(__aeabi_f2uiz) \
+ PP(__aeabi_f2ulz) \
+ PP(__aeabi_fadd) \
+ PP(__aeabi_fcmpeq) \
+ PP(__aeabi_fcmpge) \
+ PP(__aeabi_fcmpgt) \
+ PP(__aeabi_fcmple) \
+ PP(__aeabi_fcmplt) \
+ PP(__aeabi_fcmpun) \
+ PP(__aeabi_fdiv) \
+ PP(__aeabi_fmul) \
+ PP(__aeabi_fsub) \
+ PP(__aeabi_i2d) \
+ PP(__aeabi_i2f) \
+ PP(__aeabi_idiv) \
+ PP(__aeabi_idivmod) \
+ PP(__aeabi_l2d) \
+ PP(__aeabi_l2f) \
+ PP(__aeabi_lasr) \
+ PP(__aeabi_ldivmod) \
+ PP(__aeabi_llsl) \
+ PP(__aeabi_llsr) \
+ PP(__aeabi_lmul) \
+ PP(__aeabi_ui2d) \
+ PP(__aeabi_ui2f) \
+ PP(__aeabi_uidiv) \
+ PP(__aeabi_uidivmod) \
+ PP(__aeabi_ul2d) \
+ PP(__aeabi_ul2f) \
+ PP(__aeabi_uldivmod)
+
+// Declare statically linked math functions on ARM. The function declarations
+// here do not have the correct prototypes for each function in
+// ARM_MATH_IMPORTS, but it doesn't matter because only the symbol addresses are
+// needed. In particular the __aeabi_*divmod functions do not have calling
+// conventions which match any C prototype.
+#define ARM_MATH_DECL(name) extern "C" void name();
+ARM_MATH_IMPORTS(ARM_MATH_DECL)
+#undef ARM_MATH_DECL
+#endif
+
+#if defined(__linux__) && defined(__GLIBC__) && \
+ (defined(__i386__) || defined(__x86_64__))
+extern "C" LLVM_ATTRIBUTE_WEAK void __morestack();
+#endif
+
+uint64_t
+RTDyldMemoryManager::getSymbolAddressInProcess(const std::string &Name) {
+ // This implementation assumes that the host program is the target.
+ // Clients generating code for a remote target should implement their own
+ // memory manager.
+#if defined(__linux__) && defined(__GLIBC__)
+ //===--------------------------------------------------------------------===//
+ // Function stubs that are invoked instead of certain library calls
+ //
+ // Force the following functions to be linked in to anything that uses the
+ // JIT. This is a hack designed to work around the all-too-clever Glibc
+ // strategy of making these functions work differently when inlined vs. when
+ // not inlined, and hiding their real definitions in a separate archive file
+ // that the dynamic linker can't see. For more info, search for
+ // 'libc_nonshared.a' on Google, or read http://llvm.org/PR274.
+ if (Name == "stat") return (uint64_t)&stat;
+ if (Name == "fstat") return (uint64_t)&fstat;
+ if (Name == "lstat") return (uint64_t)&lstat;
+ if (Name == "stat64") return (uint64_t)&stat64;
+ if (Name == "fstat64") return (uint64_t)&fstat64;
+ if (Name == "lstat64") return (uint64_t)&lstat64;
+ if (Name == "atexit") return (uint64_t)&atexit;
+ if (Name == "mknod") return (uint64_t)&mknod;
+
+#if defined(__i386__) || defined(__x86_64__)
+ // __morestack lives in libgcc, a static library.
+ if (&__morestack && Name == "__morestack")
+ return (uint64_t)&__morestack;
+#endif
+#endif // __linux__ && __GLIBC__
+
+ // See ARM_MATH_IMPORTS definition for explanation
+#if defined(__BIONIC__) && defined(__arm__)
+ if (Name.compare(0, 8, "__aeabi_") == 0) {
+ // Check if the user has requested any of the functions listed in
+ // ARM_MATH_IMPORTS, and if so redirect to the statically linked symbol.
+#define ARM_MATH_CHECK(fn) if (Name == #fn) return (uint64_t)&fn;
+ ARM_MATH_IMPORTS(ARM_MATH_CHECK)
+#undef ARM_MATH_CHECK
+ }
+#endif
+
+ // We should not invoke parent's ctors/dtors from generated main()!
+ // On Mingw and Cygwin, the symbol __main is resolved to
+ // callee's(eg. tools/lli) one, to invoke wrong duplicated ctors
+ // (and register wrong callee's dtors with atexit(3)).
+ // We expect ExecutionEngine::runStaticConstructorsDestructors()
+ // is called before ExecutionEngine::runFunctionAsMain() is called.
+ if (Name == "__main") return (uint64_t)&jit_noop;
+
+ const char *NameStr = Name.c_str();
+
+ // DynamicLibrary::SearchForAddressOfSymbol expects an unmangled 'C' symbol
+ // name so ff we're on Darwin, strip the leading '_' off.
+#ifdef __APPLE__
+ if (NameStr[0] == '_')
+ ++NameStr;
+#endif
+
+ return (uint64_t)sys::DynamicLibrary::SearchForAddressOfSymbol(NameStr);
+}
+
+void *RTDyldMemoryManager::getPointerToNamedFunction(const std::string &Name,
+ bool AbortOnFailure) {
+ uint64_t Addr = getSymbolAddress(Name);
+
+ if (!Addr && AbortOnFailure)
+ report_fatal_error(Twine("Program used external function '") + Name +
+ "' which could not be resolved!");
+
+ return (void*)Addr;
+}
+
+void RTDyldMemoryManager::anchor() {}
+void MCJITMemoryManager::anchor() {}
+} // namespace llvm
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyld.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyld.cpp
new file mode 100644
index 000000000000..7eb7da0138c9
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyld.cpp
@@ -0,0 +1,1477 @@
+//===-- RuntimeDyld.cpp - Run-time dynamic linker for MC-JIT ----*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Implementation of the MC-JIT runtime dynamic linker.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/RuntimeDyld.h"
+#include "RuntimeDyldCOFF.h"
+#include "RuntimeDyldELF.h"
+#include "RuntimeDyldImpl.h"
+#include "RuntimeDyldMachO.h"
+#include "llvm/Object/COFF.h"
+#include "llvm/Object/ELFObjectFile.h"
+#include "llvm/Support/Alignment.h"
+#include "llvm/Support/MSVCErrorWorkarounds.h"
+#include "llvm/Support/MathExtras.h"
+#include <mutex>
+
+#include <future>
+
+using namespace llvm;
+using namespace llvm::object;
+
+#define DEBUG_TYPE "dyld"
+
+namespace {
+
+enum RuntimeDyldErrorCode {
+ GenericRTDyldError = 1
+};
+
+// FIXME: This class is only here to support the transition to llvm::Error. It
+// will be removed once this transition is complete. Clients should prefer to
+// deal with the Error value directly, rather than converting to error_code.
+class RuntimeDyldErrorCategory : public std::error_category {
+public:
+ const char *name() const noexcept override { return "runtimedyld"; }
+
+ std::string message(int Condition) const override {
+ switch (static_cast<RuntimeDyldErrorCode>(Condition)) {
+ case GenericRTDyldError: return "Generic RuntimeDyld error";
+ }
+ llvm_unreachable("Unrecognized RuntimeDyldErrorCode");
+ }
+};
+
+}
+
+char RuntimeDyldError::ID = 0;
+
+void RuntimeDyldError::log(raw_ostream &OS) const {
+ OS << ErrMsg << "\n";
+}
+
+std::error_code RuntimeDyldError::convertToErrorCode() const {
+ static RuntimeDyldErrorCategory RTDyldErrorCategory;
+ return std::error_code(GenericRTDyldError, RTDyldErrorCategory);
+}
+
+// Empty out-of-line virtual destructor as the key function.
+RuntimeDyldImpl::~RuntimeDyldImpl() = default;
+
+// Pin LoadedObjectInfo's vtables to this file.
+void RuntimeDyld::LoadedObjectInfo::anchor() {}
+
+namespace llvm {
+
+void RuntimeDyldImpl::registerEHFrames() {}
+
+void RuntimeDyldImpl::deregisterEHFrames() {
+ MemMgr.deregisterEHFrames();
+}
+
+#ifndef NDEBUG
+static void dumpSectionMemory(const SectionEntry &S, StringRef State) {
+ dbgs() << "----- Contents of section " << S.getName() << " " << State
+ << " -----";
+
+ if (S.getAddress() == nullptr) {
+ dbgs() << "\n <section not emitted>\n";
+ return;
+ }
+
+ const unsigned ColsPerRow = 16;
+
+ uint8_t *DataAddr = S.getAddress();
+ uint64_t LoadAddr = S.getLoadAddress();
+
+ unsigned StartPadding = LoadAddr & (ColsPerRow - 1);
+ unsigned BytesRemaining = S.getSize();
+
+ if (StartPadding) {
+ dbgs() << "\n" << format("0x%016" PRIx64,
+ LoadAddr & ~(uint64_t)(ColsPerRow - 1)) << ":";
+ while (StartPadding--)
+ dbgs() << " ";
+ }
+
+ while (BytesRemaining > 0) {
+ if ((LoadAddr & (ColsPerRow - 1)) == 0)
+ dbgs() << "\n" << format("0x%016" PRIx64, LoadAddr) << ":";
+
+ dbgs() << " " << format("%02x", *DataAddr);
+
+ ++DataAddr;
+ ++LoadAddr;
+ --BytesRemaining;
+ }
+
+ dbgs() << "\n";
+}
+#endif
+
+// Resolve the relocations for all symbols we currently know about.
+void RuntimeDyldImpl::resolveRelocations() {
+ std::lock_guard<sys::Mutex> locked(lock);
+
+ // Print out the sections prior to relocation.
+ LLVM_DEBUG({
+ for (SectionEntry &S : Sections)
+ dumpSectionMemory(S, "before relocations");
+ });
+
+ // First, resolve relocations associated with external symbols.
+ if (auto Err = resolveExternalSymbols()) {
+ HasError = true;
+ ErrorStr = toString(std::move(Err));
+ }
+
+ resolveLocalRelocations();
+
+ // Print out sections after relocation.
+ LLVM_DEBUG({
+ for (SectionEntry &S : Sections)
+ dumpSectionMemory(S, "after relocations");
+ });
+}
+
+void RuntimeDyldImpl::resolveLocalRelocations() {
+ // Iterate over all outstanding relocations
+ for (const auto &Rel : Relocations) {
+ // The Section here (Sections[i]) refers to the section in which the
+ // symbol for the relocation is located. The SectionID in the relocation
+ // entry provides the section to which the relocation will be applied.
+ unsigned Idx = Rel.first;
+ uint64_t Addr = getSectionLoadAddress(Idx);
+ LLVM_DEBUG(dbgs() << "Resolving relocations Section #" << Idx << "\t"
+ << format("%p", (uintptr_t)Addr) << "\n");
+ resolveRelocationList(Rel.second, Addr);
+ }
+ Relocations.clear();
+}
+
+void RuntimeDyldImpl::mapSectionAddress(const void *LocalAddress,
+ uint64_t TargetAddress) {
+ std::lock_guard<sys::Mutex> locked(lock);
+ for (unsigned i = 0, e = Sections.size(); i != e; ++i) {
+ if (Sections[i].getAddress() == LocalAddress) {
+ reassignSectionAddress(i, TargetAddress);
+ return;
+ }
+ }
+ llvm_unreachable("Attempting to remap address of unknown section!");
+}
+
+static Error getOffset(const SymbolRef &Sym, SectionRef Sec,
+ uint64_t &Result) {
+ Expected<uint64_t> AddressOrErr = Sym.getAddress();
+ if (!AddressOrErr)
+ return AddressOrErr.takeError();
+ Result = *AddressOrErr - Sec.getAddress();
+ return Error::success();
+}
+
+Expected<RuntimeDyldImpl::ObjSectionToIDMap>
+RuntimeDyldImpl::loadObjectImpl(const object::ObjectFile &Obj) {
+ std::lock_guard<sys::Mutex> locked(lock);
+
+ // Save information about our target
+ Arch = (Triple::ArchType)Obj.getArch();
+ IsTargetLittleEndian = Obj.isLittleEndian();
+ setMipsABI(Obj);
+
+ // Compute the memory size required to load all sections to be loaded
+ // and pass this information to the memory manager
+ if (MemMgr.needsToReserveAllocationSpace()) {
+ uint64_t CodeSize = 0, RODataSize = 0, RWDataSize = 0;
+ Align CodeAlign, RODataAlign, RWDataAlign;
+ if (auto Err = computeTotalAllocSize(Obj, CodeSize, CodeAlign, RODataSize,
+ RODataAlign, RWDataSize, RWDataAlign))
+ return std::move(Err);
+ MemMgr.reserveAllocationSpace(CodeSize, CodeAlign, RODataSize, RODataAlign,
+ RWDataSize, RWDataAlign);
+ }
+
+ // Used sections from the object file
+ ObjSectionToIDMap LocalSections;
+
+ // Common symbols requiring allocation, with their sizes and alignments
+ CommonSymbolList CommonSymbolsToAllocate;
+
+ uint64_t CommonSize = 0;
+ uint32_t CommonAlign = 0;
+
+ // First, collect all weak and common symbols. We need to know if stronger
+ // definitions occur elsewhere.
+ JITSymbolResolver::LookupSet ResponsibilitySet;
+ {
+ JITSymbolResolver::LookupSet Symbols;
+ for (auto &Sym : Obj.symbols()) {
+ Expected<uint32_t> FlagsOrErr = Sym.getFlags();
+ if (!FlagsOrErr)
+ // TODO: Test this error.
+ return FlagsOrErr.takeError();
+ if ((*FlagsOrErr & SymbolRef::SF_Common) ||
+ (*FlagsOrErr & SymbolRef::SF_Weak)) {
+ // Get symbol name.
+ if (auto NameOrErr = Sym.getName())
+ Symbols.insert(*NameOrErr);
+ else
+ return NameOrErr.takeError();
+ }
+ }
+
+ if (auto ResultOrErr = Resolver.getResponsibilitySet(Symbols))
+ ResponsibilitySet = std::move(*ResultOrErr);
+ else
+ return ResultOrErr.takeError();
+ }
+
+ // Parse symbols
+ LLVM_DEBUG(dbgs() << "Parse symbols:\n");
+ for (symbol_iterator I = Obj.symbol_begin(), E = Obj.symbol_end(); I != E;
+ ++I) {
+ Expected<uint32_t> FlagsOrErr = I->getFlags();
+ if (!FlagsOrErr)
+ // TODO: Test this error.
+ return FlagsOrErr.takeError();
+
+ // Skip undefined symbols.
+ if (*FlagsOrErr & SymbolRef::SF_Undefined)
+ continue;
+
+ // Get the symbol type.
+ object::SymbolRef::Type SymType;
+ if (auto SymTypeOrErr = I->getType())
+ SymType = *SymTypeOrErr;
+ else
+ return SymTypeOrErr.takeError();
+
+ // Get symbol name.
+ StringRef Name;
+ if (auto NameOrErr = I->getName())
+ Name = *NameOrErr;
+ else
+ return NameOrErr.takeError();
+
+ // Compute JIT symbol flags.
+ auto JITSymFlags = getJITSymbolFlags(*I);
+ if (!JITSymFlags)
+ return JITSymFlags.takeError();
+
+ // If this is a weak definition, check to see if there's a strong one.
+ // If there is, skip this symbol (we won't be providing it: the strong
+ // definition will). If there's no strong definition, make this definition
+ // strong.
+ if (JITSymFlags->isWeak() || JITSymFlags->isCommon()) {
+ // First check whether there's already a definition in this instance.
+ if (GlobalSymbolTable.count(Name))
+ continue;
+
+ // If we're not responsible for this symbol, skip it.
+ if (!ResponsibilitySet.count(Name))
+ continue;
+
+ // Otherwise update the flags on the symbol to make this definition
+ // strong.
+ if (JITSymFlags->isWeak())
+ *JITSymFlags &= ~JITSymbolFlags::Weak;
+ if (JITSymFlags->isCommon()) {
+ *JITSymFlags &= ~JITSymbolFlags::Common;
+ uint32_t Align = I->getAlignment();
+ uint64_t Size = I->getCommonSize();
+ if (!CommonAlign)
+ CommonAlign = Align;
+ CommonSize = alignTo(CommonSize, Align) + Size;
+ CommonSymbolsToAllocate.push_back(*I);
+ }
+ }
+
+ if (*FlagsOrErr & SymbolRef::SF_Absolute &&
+ SymType != object::SymbolRef::ST_File) {
+ uint64_t Addr = 0;
+ if (auto AddrOrErr = I->getAddress())
+ Addr = *AddrOrErr;
+ else
+ return AddrOrErr.takeError();
+
+ unsigned SectionID = AbsoluteSymbolSection;
+
+ LLVM_DEBUG(dbgs() << "\tType: " << SymType << " (absolute) Name: " << Name
+ << " SID: " << SectionID
+ << " Offset: " << format("%p", (uintptr_t)Addr)
+ << " flags: " << *FlagsOrErr << "\n");
+ // Skip absolute symbol relocations.
+ if (!Name.empty()) {
+ auto Result = GlobalSymbolTable.insert_or_assign(
+ Name, SymbolTableEntry(SectionID, Addr, *JITSymFlags));
+ processNewSymbol(*I, Result.first->getValue());
+ }
+ } else if (SymType == object::SymbolRef::ST_Function ||
+ SymType == object::SymbolRef::ST_Data ||
+ SymType == object::SymbolRef::ST_Unknown ||
+ SymType == object::SymbolRef::ST_Other) {
+
+ section_iterator SI = Obj.section_end();
+ if (auto SIOrErr = I->getSection())
+ SI = *SIOrErr;
+ else
+ return SIOrErr.takeError();
+
+ if (SI == Obj.section_end())
+ continue;
+
+ // Get symbol offset.
+ uint64_t SectOffset;
+ if (auto Err = getOffset(*I, *SI, SectOffset))
+ return std::move(Err);
+
+ bool IsCode = SI->isText();
+ unsigned SectionID;
+ if (auto SectionIDOrErr =
+ findOrEmitSection(Obj, *SI, IsCode, LocalSections))
+ SectionID = *SectionIDOrErr;
+ else
+ return SectionIDOrErr.takeError();
+
+ LLVM_DEBUG(dbgs() << "\tType: " << SymType << " Name: " << Name
+ << " SID: " << SectionID
+ << " Offset: " << format("%p", (uintptr_t)SectOffset)
+ << " flags: " << *FlagsOrErr << "\n");
+ // Skip absolute symbol relocations.
+ if (!Name.empty()) {
+ auto Result = GlobalSymbolTable.insert_or_assign(
+ Name, SymbolTableEntry(SectionID, SectOffset, *JITSymFlags));
+ processNewSymbol(*I, Result.first->getValue());
+ }
+ }
+ }
+
+ // Allocate common symbols
+ if (auto Err = emitCommonSymbols(Obj, CommonSymbolsToAllocate, CommonSize,
+ CommonAlign))
+ return std::move(Err);
+
+ // Parse and process relocations
+ LLVM_DEBUG(dbgs() << "Parse relocations:\n");
+ for (section_iterator SI = Obj.section_begin(), SE = Obj.section_end();
+ SI != SE; ++SI) {
+ StubMap Stubs;
+
+ Expected<section_iterator> RelSecOrErr = SI->getRelocatedSection();
+ if (!RelSecOrErr)
+ return RelSecOrErr.takeError();
+
+ section_iterator RelocatedSection = *RelSecOrErr;
+ if (RelocatedSection == SE)
+ continue;
+
+ relocation_iterator I = SI->relocation_begin();
+ relocation_iterator E = SI->relocation_end();
+
+ if (I == E && !ProcessAllSections)
+ continue;
+
+ bool IsCode = RelocatedSection->isText();
+ unsigned SectionID = 0;
+ if (auto SectionIDOrErr = findOrEmitSection(Obj, *RelocatedSection, IsCode,
+ LocalSections))
+ SectionID = *SectionIDOrErr;
+ else
+ return SectionIDOrErr.takeError();
+
+ LLVM_DEBUG(dbgs() << "\tSectionID: " << SectionID << "\n");
+
+ for (; I != E;)
+ if (auto IOrErr = processRelocationRef(SectionID, I, Obj, LocalSections, Stubs))
+ I = *IOrErr;
+ else
+ return IOrErr.takeError();
+
+ // If there is a NotifyStubEmitted callback set, call it to register any
+ // stubs created for this section.
+ if (NotifyStubEmitted) {
+ StringRef FileName = Obj.getFileName();
+ StringRef SectionName = Sections[SectionID].getName();
+ for (auto &KV : Stubs) {
+
+ auto &VR = KV.first;
+ uint64_t StubAddr = KV.second;
+
+ // If this is a named stub, just call NotifyStubEmitted.
+ if (VR.SymbolName) {
+ NotifyStubEmitted(FileName, SectionName, VR.SymbolName, SectionID,
+ StubAddr);
+ continue;
+ }
+
+ // Otherwise we will have to try a reverse lookup on the globla symbol table.
+ for (auto &GSTMapEntry : GlobalSymbolTable) {
+ StringRef SymbolName = GSTMapEntry.first();
+ auto &GSTEntry = GSTMapEntry.second;
+ if (GSTEntry.getSectionID() == VR.SectionID &&
+ GSTEntry.getOffset() == VR.Offset) {
+ NotifyStubEmitted(FileName, SectionName, SymbolName, SectionID,
+ StubAddr);
+ break;
+ }
+ }
+ }
+ }
+ }
+
+ // Process remaining sections
+ if (ProcessAllSections) {
+ LLVM_DEBUG(dbgs() << "Process remaining sections:\n");
+ for (section_iterator SI = Obj.section_begin(), SE = Obj.section_end();
+ SI != SE; ++SI) {
+
+ /* Ignore already loaded sections */
+ if (LocalSections.find(*SI) != LocalSections.end())
+ continue;
+
+ bool IsCode = SI->isText();
+ if (auto SectionIDOrErr =
+ findOrEmitSection(Obj, *SI, IsCode, LocalSections))
+ LLVM_DEBUG(dbgs() << "\tSectionID: " << (*SectionIDOrErr) << "\n");
+ else
+ return SectionIDOrErr.takeError();
+ }
+ }
+
+ // Give the subclasses a chance to tie-up any loose ends.
+ if (auto Err = finalizeLoad(Obj, LocalSections))
+ return std::move(Err);
+
+// for (auto E : LocalSections)
+// llvm::dbgs() << "Added: " << E.first.getRawDataRefImpl() << " -> " << E.second << "\n";
+
+ return LocalSections;
+}
+
+// A helper method for computeTotalAllocSize.
+// Computes the memory size required to allocate sections with the given sizes,
+// assuming that all sections are allocated with the given alignment
+static uint64_t
+computeAllocationSizeForSections(std::vector<uint64_t> &SectionSizes,
+ Align Alignment) {
+ uint64_t TotalSize = 0;
+ for (uint64_t SectionSize : SectionSizes)
+ TotalSize += alignTo(SectionSize, Alignment);
+ return TotalSize;
+}
+
+static bool isRequiredForExecution(const SectionRef Section) {
+ const ObjectFile *Obj = Section.getObject();
+ if (isa<object::ELFObjectFileBase>(Obj))
+ return ELFSectionRef(Section).getFlags() & ELF::SHF_ALLOC;
+ if (auto *COFFObj = dyn_cast<object::COFFObjectFile>(Obj)) {
+ const coff_section *CoffSection = COFFObj->getCOFFSection(Section);
+ // Avoid loading zero-sized COFF sections.
+ // In PE files, VirtualSize gives the section size, and SizeOfRawData
+ // may be zero for sections with content. In Obj files, SizeOfRawData
+ // gives the section size, and VirtualSize is always zero. Hence
+ // the need to check for both cases below.
+ bool HasContent =
+ (CoffSection->VirtualSize > 0) || (CoffSection->SizeOfRawData > 0);
+ bool IsDiscardable =
+ CoffSection->Characteristics &
+ (COFF::IMAGE_SCN_MEM_DISCARDABLE | COFF::IMAGE_SCN_LNK_INFO);
+ return HasContent && !IsDiscardable;
+ }
+
+ assert(isa<MachOObjectFile>(Obj));
+ return true;
+}
+
+static bool isReadOnlyData(const SectionRef Section) {
+ const ObjectFile *Obj = Section.getObject();
+ if (isa<object::ELFObjectFileBase>(Obj))
+ return !(ELFSectionRef(Section).getFlags() &
+ (ELF::SHF_WRITE | ELF::SHF_EXECINSTR));
+ if (auto *COFFObj = dyn_cast<object::COFFObjectFile>(Obj))
+ return ((COFFObj->getCOFFSection(Section)->Characteristics &
+ (COFF::IMAGE_SCN_CNT_INITIALIZED_DATA
+ | COFF::IMAGE_SCN_MEM_READ
+ | COFF::IMAGE_SCN_MEM_WRITE))
+ ==
+ (COFF::IMAGE_SCN_CNT_INITIALIZED_DATA
+ | COFF::IMAGE_SCN_MEM_READ));
+
+ assert(isa<MachOObjectFile>(Obj));
+ return false;
+}
+
+static bool isZeroInit(const SectionRef Section) {
+ const ObjectFile *Obj = Section.getObject();
+ if (isa<object::ELFObjectFileBase>(Obj))
+ return ELFSectionRef(Section).getType() == ELF::SHT_NOBITS;
+ if (auto *COFFObj = dyn_cast<object::COFFObjectFile>(Obj))
+ return COFFObj->getCOFFSection(Section)->Characteristics &
+ COFF::IMAGE_SCN_CNT_UNINITIALIZED_DATA;
+
+ auto *MachO = cast<MachOObjectFile>(Obj);
+ unsigned SectionType = MachO->getSectionType(Section);
+ return SectionType == MachO::S_ZEROFILL ||
+ SectionType == MachO::S_GB_ZEROFILL;
+}
+
+static bool isTLS(const SectionRef Section) {
+ const ObjectFile *Obj = Section.getObject();
+ if (isa<object::ELFObjectFileBase>(Obj))
+ return ELFSectionRef(Section).getFlags() & ELF::SHF_TLS;
+ return false;
+}
+
+// Compute an upper bound of the memory size that is required to load all
+// sections
+Error RuntimeDyldImpl::computeTotalAllocSize(
+ const ObjectFile &Obj, uint64_t &CodeSize, Align &CodeAlign,
+ uint64_t &RODataSize, Align &RODataAlign, uint64_t &RWDataSize,
+ Align &RWDataAlign) {
+ // Compute the size of all sections required for execution
+ std::vector<uint64_t> CodeSectionSizes;
+ std::vector<uint64_t> ROSectionSizes;
+ std::vector<uint64_t> RWSectionSizes;
+
+ // Collect sizes of all sections to be loaded;
+ // also determine the max alignment of all sections
+ for (section_iterator SI = Obj.section_begin(), SE = Obj.section_end();
+ SI != SE; ++SI) {
+ const SectionRef &Section = *SI;
+
+ bool IsRequired = isRequiredForExecution(Section) || ProcessAllSections;
+
+ // Consider only the sections that are required to be loaded for execution
+ if (IsRequired) {
+ uint64_t DataSize = Section.getSize();
+ Align Alignment = Section.getAlignment();
+ bool IsCode = Section.isText();
+ bool IsReadOnly = isReadOnlyData(Section);
+ bool IsTLS = isTLS(Section);
+
+ Expected<StringRef> NameOrErr = Section.getName();
+ if (!NameOrErr)
+ return NameOrErr.takeError();
+ StringRef Name = *NameOrErr;
+
+ uint64_t StubBufSize = computeSectionStubBufSize(Obj, Section);
+
+ uint64_t PaddingSize = 0;
+ if (Name == ".eh_frame")
+ PaddingSize += 4;
+ if (StubBufSize != 0)
+ PaddingSize += getStubAlignment().value() - 1;
+
+ uint64_t SectionSize = DataSize + PaddingSize + StubBufSize;
+
+ // The .eh_frame section (at least on Linux) needs an extra four bytes
+ // padded
+ // with zeroes added at the end. For MachO objects, this section has a
+ // slightly different name, so this won't have any effect for MachO
+ // objects.
+ if (Name == ".eh_frame")
+ SectionSize += 4;
+
+ if (!SectionSize)
+ SectionSize = 1;
+
+ if (IsCode) {
+ CodeAlign = std::max(CodeAlign, Alignment);
+ CodeSectionSizes.push_back(SectionSize);
+ } else if (IsReadOnly) {
+ RODataAlign = std::max(RODataAlign, Alignment);
+ ROSectionSizes.push_back(SectionSize);
+ } else if (!IsTLS) {
+ RWDataAlign = std::max(RWDataAlign, Alignment);
+ RWSectionSizes.push_back(SectionSize);
+ }
+ }
+ }
+
+ // Compute Global Offset Table size. If it is not zero we
+ // also update alignment, which is equal to a size of a
+ // single GOT entry.
+ if (unsigned GotSize = computeGOTSize(Obj)) {
+ RWSectionSizes.push_back(GotSize);
+ RWDataAlign = std::max(RWDataAlign, Align(getGOTEntrySize()));
+ }
+
+ // Compute the size of all common symbols
+ uint64_t CommonSize = 0;
+ Align CommonAlign;
+ for (symbol_iterator I = Obj.symbol_begin(), E = Obj.symbol_end(); I != E;
+ ++I) {
+ Expected<uint32_t> FlagsOrErr = I->getFlags();
+ if (!FlagsOrErr)
+ // TODO: Test this error.
+ return FlagsOrErr.takeError();
+ if (*FlagsOrErr & SymbolRef::SF_Common) {
+ // Add the common symbols to a list. We'll allocate them all below.
+ uint64_t Size = I->getCommonSize();
+ Align Alignment = Align(I->getAlignment());
+ // If this is the first common symbol, use its alignment as the alignment
+ // for the common symbols section.
+ if (CommonSize == 0)
+ CommonAlign = Alignment;
+ CommonSize = alignTo(CommonSize, Alignment) + Size;
+ }
+ }
+ if (CommonSize != 0) {
+ RWSectionSizes.push_back(CommonSize);
+ RWDataAlign = std::max(RWDataAlign, CommonAlign);
+ }
+
+ if (!CodeSectionSizes.empty()) {
+ // Add 64 bytes for a potential IFunc resolver stub
+ CodeSectionSizes.push_back(64);
+ }
+
+ // Compute the required allocation space for each different type of sections
+ // (code, read-only data, read-write data) assuming that all sections are
+ // allocated with the max alignment. Note that we cannot compute with the
+ // individual alignments of the sections, because then the required size
+ // depends on the order, in which the sections are allocated.
+ CodeSize = computeAllocationSizeForSections(CodeSectionSizes, CodeAlign);
+ RODataSize = computeAllocationSizeForSections(ROSectionSizes, RODataAlign);
+ RWDataSize = computeAllocationSizeForSections(RWSectionSizes, RWDataAlign);
+
+ return Error::success();
+}
+
+// compute GOT size
+unsigned RuntimeDyldImpl::computeGOTSize(const ObjectFile &Obj) {
+ size_t GotEntrySize = getGOTEntrySize();
+ if (!GotEntrySize)
+ return 0;
+
+ size_t GotSize = 0;
+ for (section_iterator SI = Obj.section_begin(), SE = Obj.section_end();
+ SI != SE; ++SI) {
+
+ for (const RelocationRef &Reloc : SI->relocations())
+ if (relocationNeedsGot(Reloc))
+ GotSize += GotEntrySize;
+ }
+
+ return GotSize;
+}
+
+// compute stub buffer size for the given section
+unsigned RuntimeDyldImpl::computeSectionStubBufSize(const ObjectFile &Obj,
+ const SectionRef &Section) {
+ if (!MemMgr.allowStubAllocation()) {
+ return 0;
+ }
+
+ unsigned StubSize = getMaxStubSize();
+ if (StubSize == 0) {
+ return 0;
+ }
+ // FIXME: this is an inefficient way to handle this. We should computed the
+ // necessary section allocation size in loadObject by walking all the sections
+ // once.
+ unsigned StubBufSize = 0;
+ for (section_iterator SI = Obj.section_begin(), SE = Obj.section_end();
+ SI != SE; ++SI) {
+
+ Expected<section_iterator> RelSecOrErr = SI->getRelocatedSection();
+ if (!RelSecOrErr)
+ report_fatal_error(Twine(toString(RelSecOrErr.takeError())));
+
+ section_iterator RelSecI = *RelSecOrErr;
+ if (!(RelSecI == Section))
+ continue;
+
+ for (const RelocationRef &Reloc : SI->relocations())
+ if (relocationNeedsStub(Reloc))
+ StubBufSize += StubSize;
+ }
+
+ // Get section data size and alignment
+ uint64_t DataSize = Section.getSize();
+ Align Alignment = Section.getAlignment();
+
+ // Add stubbuf size alignment
+ Align StubAlignment = getStubAlignment();
+ Align EndAlignment = commonAlignment(Alignment, DataSize);
+ if (StubAlignment > EndAlignment)
+ StubBufSize += StubAlignment.value() - EndAlignment.value();
+ return StubBufSize;
+}
+
+uint64_t RuntimeDyldImpl::readBytesUnaligned(uint8_t *Src,
+ unsigned Size) const {
+ uint64_t Result = 0;
+ if (IsTargetLittleEndian) {
+ Src += Size - 1;
+ while (Size--)
+ Result = (Result << 8) | *Src--;
+ } else
+ while (Size--)
+ Result = (Result << 8) | *Src++;
+
+ return Result;
+}
+
+void RuntimeDyldImpl::writeBytesUnaligned(uint64_t Value, uint8_t *Dst,
+ unsigned Size) const {
+ if (IsTargetLittleEndian) {
+ while (Size--) {
+ *Dst++ = Value & 0xFF;
+ Value >>= 8;
+ }
+ } else {
+ Dst += Size - 1;
+ while (Size--) {
+ *Dst-- = Value & 0xFF;
+ Value >>= 8;
+ }
+ }
+}
+
+Expected<JITSymbolFlags>
+RuntimeDyldImpl::getJITSymbolFlags(const SymbolRef &SR) {
+ return JITSymbolFlags::fromObjectSymbol(SR);
+}
+
+Error RuntimeDyldImpl::emitCommonSymbols(const ObjectFile &Obj,
+ CommonSymbolList &SymbolsToAllocate,
+ uint64_t CommonSize,
+ uint32_t CommonAlign) {
+ if (SymbolsToAllocate.empty())
+ return Error::success();
+
+ // Allocate memory for the section
+ unsigned SectionID = Sections.size();
+ uint8_t *Addr = MemMgr.allocateDataSection(CommonSize, CommonAlign, SectionID,
+ "<common symbols>", false);
+ if (!Addr)
+ report_fatal_error("Unable to allocate memory for common symbols!");
+ uint64_t Offset = 0;
+ Sections.push_back(
+ SectionEntry("<common symbols>", Addr, CommonSize, CommonSize, 0));
+ memset(Addr, 0, CommonSize);
+
+ LLVM_DEBUG(dbgs() << "emitCommonSection SectionID: " << SectionID
+ << " new addr: " << format("%p", Addr)
+ << " DataSize: " << CommonSize << "\n");
+
+ // Assign the address of each symbol
+ for (auto &Sym : SymbolsToAllocate) {
+ uint32_t Alignment = Sym.getAlignment();
+ uint64_t Size = Sym.getCommonSize();
+ StringRef Name;
+ if (auto NameOrErr = Sym.getName())
+ Name = *NameOrErr;
+ else
+ return NameOrErr.takeError();
+ if (Alignment) {
+ // This symbol has an alignment requirement.
+ uint64_t AlignOffset =
+ offsetToAlignment((uint64_t)Addr, Align(Alignment));
+ Addr += AlignOffset;
+ Offset += AlignOffset;
+ }
+ auto JITSymFlags = getJITSymbolFlags(Sym);
+
+ if (!JITSymFlags)
+ return JITSymFlags.takeError();
+
+ LLVM_DEBUG(dbgs() << "Allocating common symbol " << Name << " address "
+ << format("%p", Addr) << "\n");
+ if (!Name.empty()) // Skip absolute symbol relocations.
+ GlobalSymbolTable[Name] =
+ SymbolTableEntry(SectionID, Offset, std::move(*JITSymFlags));
+ Offset += Size;
+ Addr += Size;
+ }
+
+ return Error::success();
+}
+
+Expected<unsigned>
+RuntimeDyldImpl::emitSection(const ObjectFile &Obj,
+ const SectionRef &Section,
+ bool IsCode) {
+ StringRef data;
+ Align Alignment = Section.getAlignment();
+
+ unsigned PaddingSize = 0;
+ unsigned StubBufSize = 0;
+ bool IsRequired = isRequiredForExecution(Section);
+ bool IsVirtual = Section.isVirtual();
+ bool IsZeroInit = isZeroInit(Section);
+ bool IsReadOnly = isReadOnlyData(Section);
+ bool IsTLS = isTLS(Section);
+ uint64_t DataSize = Section.getSize();
+
+ Expected<StringRef> NameOrErr = Section.getName();
+ if (!NameOrErr)
+ return NameOrErr.takeError();
+ StringRef Name = *NameOrErr;
+
+ StubBufSize = computeSectionStubBufSize(Obj, Section);
+
+ // The .eh_frame section (at least on Linux) needs an extra four bytes padded
+ // with zeroes added at the end. For MachO objects, this section has a
+ // slightly different name, so this won't have any effect for MachO objects.
+ if (Name == ".eh_frame")
+ PaddingSize = 4;
+
+ uintptr_t Allocate;
+ unsigned SectionID = Sections.size();
+ uint8_t *Addr;
+ uint64_t LoadAddress = 0;
+ const char *pData = nullptr;
+
+ // If this section contains any bits (i.e. isn't a virtual or bss section),
+ // grab a reference to them.
+ if (!IsVirtual && !IsZeroInit) {
+ // In either case, set the location of the unrelocated section in memory,
+ // since we still process relocations for it even if we're not applying them.
+ if (Expected<StringRef> E = Section.getContents())
+ data = *E;
+ else
+ return E.takeError();
+ pData = data.data();
+ }
+
+ // If there are any stubs then the section alignment needs to be at least as
+ // high as stub alignment or padding calculations may by incorrect when the
+ // section is remapped.
+ if (StubBufSize != 0) {
+ Alignment = std::max(Alignment, getStubAlignment());
+ PaddingSize += getStubAlignment().value() - 1;
+ }
+
+ // Some sections, such as debug info, don't need to be loaded for execution.
+ // Process those only if explicitly requested.
+ if (IsRequired || ProcessAllSections) {
+ Allocate = DataSize + PaddingSize + StubBufSize;
+ if (!Allocate)
+ Allocate = 1;
+ if (IsTLS) {
+ auto TLSSection = MemMgr.allocateTLSSection(Allocate, Alignment.value(),
+ SectionID, Name);
+ Addr = TLSSection.InitializationImage;
+ LoadAddress = TLSSection.Offset;
+ } else if (IsCode) {
+ Addr = MemMgr.allocateCodeSection(Allocate, Alignment.value(), SectionID,
+ Name);
+ } else {
+ Addr = MemMgr.allocateDataSection(Allocate, Alignment.value(), SectionID,
+ Name, IsReadOnly);
+ }
+ if (!Addr)
+ report_fatal_error("Unable to allocate section memory!");
+
+ // Zero-initialize or copy the data from the image
+ if (IsZeroInit || IsVirtual)
+ memset(Addr, 0, DataSize);
+ else
+ memcpy(Addr, pData, DataSize);
+
+ // Fill in any extra bytes we allocated for padding
+ if (PaddingSize != 0) {
+ memset(Addr + DataSize, 0, PaddingSize);
+ // Update the DataSize variable to include padding.
+ DataSize += PaddingSize;
+
+ // Align DataSize to stub alignment if we have any stubs (PaddingSize will
+ // have been increased above to account for this).
+ if (StubBufSize > 0)
+ DataSize &= -(uint64_t)getStubAlignment().value();
+ }
+
+ LLVM_DEBUG(dbgs() << "emitSection SectionID: " << SectionID << " Name: "
+ << Name << " obj addr: " << format("%p", pData)
+ << " new addr: " << format("%p", Addr) << " DataSize: "
+ << DataSize << " StubBufSize: " << StubBufSize
+ << " Allocate: " << Allocate << "\n");
+ } else {
+ // Even if we didn't load the section, we need to record an entry for it
+ // to handle later processing (and by 'handle' I mean don't do anything
+ // with these sections).
+ Allocate = 0;
+ Addr = nullptr;
+ LLVM_DEBUG(
+ dbgs() << "emitSection SectionID: " << SectionID << " Name: " << Name
+ << " obj addr: " << format("%p", data.data()) << " new addr: 0"
+ << " DataSize: " << DataSize << " StubBufSize: " << StubBufSize
+ << " Allocate: " << Allocate << "\n");
+ }
+
+ Sections.push_back(
+ SectionEntry(Name, Addr, DataSize, Allocate, (uintptr_t)pData));
+
+ // The load address of a TLS section is not equal to the address of its
+ // initialization image
+ if (IsTLS)
+ Sections.back().setLoadAddress(LoadAddress);
+ // Debug info sections are linked as if their load address was zero
+ if (!IsRequired)
+ Sections.back().setLoadAddress(0);
+
+ return SectionID;
+}
+
+Expected<unsigned>
+RuntimeDyldImpl::findOrEmitSection(const ObjectFile &Obj,
+ const SectionRef &Section,
+ bool IsCode,
+ ObjSectionToIDMap &LocalSections) {
+
+ unsigned SectionID = 0;
+ ObjSectionToIDMap::iterator i = LocalSections.find(Section);
+ if (i != LocalSections.end())
+ SectionID = i->second;
+ else {
+ if (auto SectionIDOrErr = emitSection(Obj, Section, IsCode))
+ SectionID = *SectionIDOrErr;
+ else
+ return SectionIDOrErr.takeError();
+ LocalSections[Section] = SectionID;
+ }
+ return SectionID;
+}
+
+void RuntimeDyldImpl::addRelocationForSection(const RelocationEntry &RE,
+ unsigned SectionID) {
+ Relocations[SectionID].push_back(RE);
+}
+
+void RuntimeDyldImpl::addRelocationForSymbol(const RelocationEntry &RE,
+ StringRef SymbolName) {
+ // Relocation by symbol. If the symbol is found in the global symbol table,
+ // create an appropriate section relocation. Otherwise, add it to
+ // ExternalSymbolRelocations.
+ RTDyldSymbolTable::const_iterator Loc = GlobalSymbolTable.find(SymbolName);
+ if (Loc == GlobalSymbolTable.end()) {
+ ExternalSymbolRelocations[SymbolName].push_back(RE);
+ } else {
+ assert(!SymbolName.empty() &&
+ "Empty symbol should not be in GlobalSymbolTable");
+ // Copy the RE since we want to modify its addend.
+ RelocationEntry RECopy = RE;
+ const auto &SymInfo = Loc->second;
+ RECopy.Addend += SymInfo.getOffset();
+ Relocations[SymInfo.getSectionID()].push_back(RECopy);
+ }
+}
+
+uint8_t *RuntimeDyldImpl::createStubFunction(uint8_t *Addr,
+ unsigned AbiVariant) {
+ if (Arch == Triple::aarch64 || Arch == Triple::aarch64_be ||
+ Arch == Triple::aarch64_32) {
+ // This stub has to be able to access the full address space,
+ // since symbol lookup won't necessarily find a handy, in-range,
+ // PLT stub for functions which could be anywhere.
+ // Stub can use ip0 (== x16) to calculate address
+ writeBytesUnaligned(0xd2e00010, Addr, 4); // movz ip0, #:abs_g3:<addr>
+ writeBytesUnaligned(0xf2c00010, Addr+4, 4); // movk ip0, #:abs_g2_nc:<addr>
+ writeBytesUnaligned(0xf2a00010, Addr+8, 4); // movk ip0, #:abs_g1_nc:<addr>
+ writeBytesUnaligned(0xf2800010, Addr+12, 4); // movk ip0, #:abs_g0_nc:<addr>
+ writeBytesUnaligned(0xd61f0200, Addr+16, 4); // br ip0
+
+ return Addr;
+ } else if (Arch == Triple::arm || Arch == Triple::armeb) {
+ // TODO: There is only ARM far stub now. We should add the Thumb stub,
+ // and stubs for branches Thumb - ARM and ARM - Thumb.
+ writeBytesUnaligned(0xe51ff004, Addr, 4); // ldr pc, [pc, #-4]
+ return Addr + 4;
+ } else if (IsMipsO32ABI || IsMipsN32ABI) {
+ // 0: 3c190000 lui t9,%hi(addr).
+ // 4: 27390000 addiu t9,t9,%lo(addr).
+ // 8: 03200008 jr t9.
+ // c: 00000000 nop.
+ const unsigned LuiT9Instr = 0x3c190000, AdduiT9Instr = 0x27390000;
+ const unsigned NopInstr = 0x0;
+ unsigned JrT9Instr = 0x03200008;
+ if ((AbiVariant & ELF::EF_MIPS_ARCH) == ELF::EF_MIPS_ARCH_32R6 ||
+ (AbiVariant & ELF::EF_MIPS_ARCH) == ELF::EF_MIPS_ARCH_64R6)
+ JrT9Instr = 0x03200009;
+
+ writeBytesUnaligned(LuiT9Instr, Addr, 4);
+ writeBytesUnaligned(AdduiT9Instr, Addr + 4, 4);
+ writeBytesUnaligned(JrT9Instr, Addr + 8, 4);
+ writeBytesUnaligned(NopInstr, Addr + 12, 4);
+ return Addr;
+ } else if (IsMipsN64ABI) {
+ // 0: 3c190000 lui t9,%highest(addr).
+ // 4: 67390000 daddiu t9,t9,%higher(addr).
+ // 8: 0019CC38 dsll t9,t9,16.
+ // c: 67390000 daddiu t9,t9,%hi(addr).
+ // 10: 0019CC38 dsll t9,t9,16.
+ // 14: 67390000 daddiu t9,t9,%lo(addr).
+ // 18: 03200008 jr t9.
+ // 1c: 00000000 nop.
+ const unsigned LuiT9Instr = 0x3c190000, DaddiuT9Instr = 0x67390000,
+ DsllT9Instr = 0x19CC38;
+ const unsigned NopInstr = 0x0;
+ unsigned JrT9Instr = 0x03200008;
+ if ((AbiVariant & ELF::EF_MIPS_ARCH) == ELF::EF_MIPS_ARCH_64R6)
+ JrT9Instr = 0x03200009;
+
+ writeBytesUnaligned(LuiT9Instr, Addr, 4);
+ writeBytesUnaligned(DaddiuT9Instr, Addr + 4, 4);
+ writeBytesUnaligned(DsllT9Instr, Addr + 8, 4);
+ writeBytesUnaligned(DaddiuT9Instr, Addr + 12, 4);
+ writeBytesUnaligned(DsllT9Instr, Addr + 16, 4);
+ writeBytesUnaligned(DaddiuT9Instr, Addr + 20, 4);
+ writeBytesUnaligned(JrT9Instr, Addr + 24, 4);
+ writeBytesUnaligned(NopInstr, Addr + 28, 4);
+ return Addr;
+ } else if (Arch == Triple::ppc64 || Arch == Triple::ppc64le) {
+ // Depending on which version of the ELF ABI is in use, we need to
+ // generate one of two variants of the stub. They both start with
+ // the same sequence to load the target address into r12.
+ writeInt32BE(Addr, 0x3D800000); // lis r12, highest(addr)
+ writeInt32BE(Addr+4, 0x618C0000); // ori r12, higher(addr)
+ writeInt32BE(Addr+8, 0x798C07C6); // sldi r12, r12, 32
+ writeInt32BE(Addr+12, 0x658C0000); // oris r12, r12, h(addr)
+ writeInt32BE(Addr+16, 0x618C0000); // ori r12, r12, l(addr)
+ if (AbiVariant == 2) {
+ // PowerPC64 stub ELFv2 ABI: The address points to the function itself.
+ // The address is already in r12 as required by the ABI. Branch to it.
+ writeInt32BE(Addr+20, 0xF8410018); // std r2, 24(r1)
+ writeInt32BE(Addr+24, 0x7D8903A6); // mtctr r12
+ writeInt32BE(Addr+28, 0x4E800420); // bctr
+ } else {
+ // PowerPC64 stub ELFv1 ABI: The address points to a function descriptor.
+ // Load the function address on r11 and sets it to control register. Also
+ // loads the function TOC in r2 and environment pointer to r11.
+ writeInt32BE(Addr+20, 0xF8410028); // std r2, 40(r1)
+ writeInt32BE(Addr+24, 0xE96C0000); // ld r11, 0(r12)
+ writeInt32BE(Addr+28, 0xE84C0008); // ld r2, 0(r12)
+ writeInt32BE(Addr+32, 0x7D6903A6); // mtctr r11
+ writeInt32BE(Addr+36, 0xE96C0010); // ld r11, 16(r2)
+ writeInt32BE(Addr+40, 0x4E800420); // bctr
+ }
+ return Addr;
+ } else if (Arch == Triple::systemz) {
+ writeInt16BE(Addr, 0xC418); // lgrl %r1,.+8
+ writeInt16BE(Addr+2, 0x0000);
+ writeInt16BE(Addr+4, 0x0004);
+ writeInt16BE(Addr+6, 0x07F1); // brc 15,%r1
+ // 8-byte address stored at Addr + 8
+ return Addr;
+ } else if (Arch == Triple::x86_64) {
+ *Addr = 0xFF; // jmp
+ *(Addr+1) = 0x25; // rip
+ // 32-bit PC-relative address of the GOT entry will be stored at Addr+2
+ } else if (Arch == Triple::x86) {
+ *Addr = 0xE9; // 32-bit pc-relative jump.
+ }
+ return Addr;
+}
+
+// Assign an address to a symbol name and resolve all the relocations
+// associated with it.
+void RuntimeDyldImpl::reassignSectionAddress(unsigned SectionID,
+ uint64_t Addr) {
+ // The address to use for relocation resolution is not
+ // the address of the local section buffer. We must be doing
+ // a remote execution environment of some sort. Relocations can't
+ // be applied until all the sections have been moved. The client must
+ // trigger this with a call to MCJIT::finalize() or
+ // RuntimeDyld::resolveRelocations().
+ //
+ // Addr is a uint64_t because we can't assume the pointer width
+ // of the target is the same as that of the host. Just use a generic
+ // "big enough" type.
+ LLVM_DEBUG(
+ dbgs() << "Reassigning address for section " << SectionID << " ("
+ << Sections[SectionID].getName() << "): "
+ << format("0x%016" PRIx64, Sections[SectionID].getLoadAddress())
+ << " -> " << format("0x%016" PRIx64, Addr) << "\n");
+ Sections[SectionID].setLoadAddress(Addr);
+}
+
+void RuntimeDyldImpl::resolveRelocationList(const RelocationList &Relocs,
+ uint64_t Value) {
+ for (const RelocationEntry &RE : Relocs) {
+ // Ignore relocations for sections that were not loaded
+ if (RE.SectionID != AbsoluteSymbolSection &&
+ Sections[RE.SectionID].getAddress() == nullptr)
+ continue;
+ resolveRelocation(RE, Value);
+ }
+}
+
+void RuntimeDyldImpl::applyExternalSymbolRelocations(
+ const StringMap<JITEvaluatedSymbol> ExternalSymbolMap) {
+ for (auto &RelocKV : ExternalSymbolRelocations) {
+ StringRef Name = RelocKV.first();
+ RelocationList &Relocs = RelocKV.second;
+ if (Name.size() == 0) {
+ // This is an absolute symbol, use an address of zero.
+ LLVM_DEBUG(dbgs() << "Resolving absolute relocations."
+ << "\n");
+ resolveRelocationList(Relocs, 0);
+ } else {
+ uint64_t Addr = 0;
+ JITSymbolFlags Flags;
+ RTDyldSymbolTable::const_iterator Loc = GlobalSymbolTable.find(Name);
+ if (Loc == GlobalSymbolTable.end()) {
+ auto RRI = ExternalSymbolMap.find(Name);
+ assert(RRI != ExternalSymbolMap.end() && "No result for symbol");
+ Addr = RRI->second.getAddress();
+ Flags = RRI->second.getFlags();
+ } else {
+ // We found the symbol in our global table. It was probably in a
+ // Module that we loaded previously.
+ const auto &SymInfo = Loc->second;
+ Addr = getSectionLoadAddress(SymInfo.getSectionID()) +
+ SymInfo.getOffset();
+ Flags = SymInfo.getFlags();
+ }
+
+ // FIXME: Implement error handling that doesn't kill the host program!
+ if (!Addr && !Resolver.allowsZeroSymbols())
+ report_fatal_error(Twine("Program used external function '") + Name +
+ "' which could not be resolved!");
+
+ // If Resolver returned UINT64_MAX, the client wants to handle this symbol
+ // manually and we shouldn't resolve its relocations.
+ if (Addr != UINT64_MAX) {
+
+ // Tweak the address based on the symbol flags if necessary.
+ // For example, this is used by RuntimeDyldMachOARM to toggle the low bit
+ // if the target symbol is Thumb.
+ Addr = modifyAddressBasedOnFlags(Addr, Flags);
+
+ LLVM_DEBUG(dbgs() << "Resolving relocations Name: " << Name << "\t"
+ << format("0x%lx", Addr) << "\n");
+ resolveRelocationList(Relocs, Addr);
+ }
+ }
+ }
+ ExternalSymbolRelocations.clear();
+}
+
+Error RuntimeDyldImpl::resolveExternalSymbols() {
+ StringMap<JITEvaluatedSymbol> ExternalSymbolMap;
+
+ // Resolution can trigger emission of more symbols, so iterate until
+ // we've resolved *everything*.
+ {
+ JITSymbolResolver::LookupSet ResolvedSymbols;
+
+ while (true) {
+ JITSymbolResolver::LookupSet NewSymbols;
+
+ for (auto &RelocKV : ExternalSymbolRelocations) {
+ StringRef Name = RelocKV.first();
+ if (!Name.empty() && !GlobalSymbolTable.count(Name) &&
+ !ResolvedSymbols.count(Name))
+ NewSymbols.insert(Name);
+ }
+
+ if (NewSymbols.empty())
+ break;
+
+#ifdef _MSC_VER
+ using ExpectedLookupResult =
+ MSVCPExpected<JITSymbolResolver::LookupResult>;
+#else
+ using ExpectedLookupResult = Expected<JITSymbolResolver::LookupResult>;
+#endif
+
+ auto NewSymbolsP = std::make_shared<std::promise<ExpectedLookupResult>>();
+ auto NewSymbolsF = NewSymbolsP->get_future();
+ Resolver.lookup(NewSymbols,
+ [=](Expected<JITSymbolResolver::LookupResult> Result) {
+ NewSymbolsP->set_value(std::move(Result));
+ });
+
+ auto NewResolverResults = NewSymbolsF.get();
+
+ if (!NewResolverResults)
+ return NewResolverResults.takeError();
+
+ assert(NewResolverResults->size() == NewSymbols.size() &&
+ "Should have errored on unresolved symbols");
+
+ for (auto &RRKV : *NewResolverResults) {
+ assert(!ResolvedSymbols.count(RRKV.first) && "Redundant resolution?");
+ ExternalSymbolMap.insert(RRKV);
+ ResolvedSymbols.insert(RRKV.first);
+ }
+ }
+ }
+
+ applyExternalSymbolRelocations(ExternalSymbolMap);
+
+ return Error::success();
+}
+
+void RuntimeDyldImpl::finalizeAsync(
+ std::unique_ptr<RuntimeDyldImpl> This,
+ unique_function<void(object::OwningBinary<object::ObjectFile>,
+ std::unique_ptr<RuntimeDyld::LoadedObjectInfo>, Error)>
+ OnEmitted,
+ object::OwningBinary<object::ObjectFile> O,
+ std::unique_ptr<RuntimeDyld::LoadedObjectInfo> Info) {
+
+ auto SharedThis = std::shared_ptr<RuntimeDyldImpl>(std::move(This));
+ auto PostResolveContinuation =
+ [SharedThis, OnEmitted = std::move(OnEmitted), O = std::move(O),
+ Info = std::move(Info)](
+ Expected<JITSymbolResolver::LookupResult> Result) mutable {
+ if (!Result) {
+ OnEmitted(std::move(O), std::move(Info), Result.takeError());
+ return;
+ }
+
+ /// Copy the result into a StringMap, where the keys are held by value.
+ StringMap<JITEvaluatedSymbol> Resolved;
+ for (auto &KV : *Result)
+ Resolved[KV.first] = KV.second;
+
+ SharedThis->applyExternalSymbolRelocations(Resolved);
+ SharedThis->resolveLocalRelocations();
+ SharedThis->registerEHFrames();
+ std::string ErrMsg;
+ if (SharedThis->MemMgr.finalizeMemory(&ErrMsg))
+ OnEmitted(std::move(O), std::move(Info),
+ make_error<StringError>(std::move(ErrMsg),
+ inconvertibleErrorCode()));
+ else
+ OnEmitted(std::move(O), std::move(Info), Error::success());
+ };
+
+ JITSymbolResolver::LookupSet Symbols;
+
+ for (auto &RelocKV : SharedThis->ExternalSymbolRelocations) {
+ StringRef Name = RelocKV.first();
+ if (Name.empty()) // Skip absolute symbol relocations.
+ continue;
+ assert(!SharedThis->GlobalSymbolTable.count(Name) &&
+ "Name already processed. RuntimeDyld instances can not be re-used "
+ "when finalizing with finalizeAsync.");
+ Symbols.insert(Name);
+ }
+
+ if (!Symbols.empty()) {
+ SharedThis->Resolver.lookup(Symbols, std::move(PostResolveContinuation));
+ } else
+ PostResolveContinuation(std::map<StringRef, JITEvaluatedSymbol>());
+}
+
+//===----------------------------------------------------------------------===//
+// RuntimeDyld class implementation
+
+uint64_t RuntimeDyld::LoadedObjectInfo::getSectionLoadAddress(
+ const object::SectionRef &Sec) const {
+
+ auto I = ObjSecToIDMap.find(Sec);
+ if (I != ObjSecToIDMap.end())
+ return RTDyld.Sections[I->second].getLoadAddress();
+
+ return 0;
+}
+
+RuntimeDyld::MemoryManager::TLSSection
+RuntimeDyld::MemoryManager::allocateTLSSection(uintptr_t Size,
+ unsigned Alignment,
+ unsigned SectionID,
+ StringRef SectionName) {
+ report_fatal_error("allocation of TLS not implemented");
+}
+
+void RuntimeDyld::MemoryManager::anchor() {}
+void JITSymbolResolver::anchor() {}
+void LegacyJITSymbolResolver::anchor() {}
+
+RuntimeDyld::RuntimeDyld(RuntimeDyld::MemoryManager &MemMgr,
+ JITSymbolResolver &Resolver)
+ : MemMgr(MemMgr), Resolver(Resolver) {
+ // FIXME: There's a potential issue lurking here if a single instance of
+ // RuntimeDyld is used to load multiple objects. The current implementation
+ // associates a single memory manager with a RuntimeDyld instance. Even
+ // though the public class spawns a new 'impl' instance for each load,
+ // they share a single memory manager. This can become a problem when page
+ // permissions are applied.
+ Dyld = nullptr;
+ ProcessAllSections = false;
+}
+
+RuntimeDyld::~RuntimeDyld() = default;
+
+static std::unique_ptr<RuntimeDyldCOFF>
+createRuntimeDyldCOFF(
+ Triple::ArchType Arch, RuntimeDyld::MemoryManager &MM,
+ JITSymbolResolver &Resolver, bool ProcessAllSections,
+ RuntimeDyld::NotifyStubEmittedFunction NotifyStubEmitted) {
+ std::unique_ptr<RuntimeDyldCOFF> Dyld =
+ RuntimeDyldCOFF::create(Arch, MM, Resolver);
+ Dyld->setProcessAllSections(ProcessAllSections);
+ Dyld->setNotifyStubEmitted(std::move(NotifyStubEmitted));
+ return Dyld;
+}
+
+static std::unique_ptr<RuntimeDyldELF>
+createRuntimeDyldELF(Triple::ArchType Arch, RuntimeDyld::MemoryManager &MM,
+ JITSymbolResolver &Resolver, bool ProcessAllSections,
+ RuntimeDyld::NotifyStubEmittedFunction NotifyStubEmitted) {
+ std::unique_ptr<RuntimeDyldELF> Dyld =
+ RuntimeDyldELF::create(Arch, MM, Resolver);
+ Dyld->setProcessAllSections(ProcessAllSections);
+ Dyld->setNotifyStubEmitted(std::move(NotifyStubEmitted));
+ return Dyld;
+}
+
+static std::unique_ptr<RuntimeDyldMachO>
+createRuntimeDyldMachO(
+ Triple::ArchType Arch, RuntimeDyld::MemoryManager &MM,
+ JITSymbolResolver &Resolver,
+ bool ProcessAllSections,
+ RuntimeDyld::NotifyStubEmittedFunction NotifyStubEmitted) {
+ std::unique_ptr<RuntimeDyldMachO> Dyld =
+ RuntimeDyldMachO::create(Arch, MM, Resolver);
+ Dyld->setProcessAllSections(ProcessAllSections);
+ Dyld->setNotifyStubEmitted(std::move(NotifyStubEmitted));
+ return Dyld;
+}
+
+std::unique_ptr<RuntimeDyld::LoadedObjectInfo>
+RuntimeDyld::loadObject(const ObjectFile &Obj) {
+ if (!Dyld) {
+ if (Obj.isELF())
+ Dyld =
+ createRuntimeDyldELF(static_cast<Triple::ArchType>(Obj.getArch()),
+ MemMgr, Resolver, ProcessAllSections,
+ std::move(NotifyStubEmitted));
+ else if (Obj.isMachO())
+ Dyld = createRuntimeDyldMachO(
+ static_cast<Triple::ArchType>(Obj.getArch()), MemMgr, Resolver,
+ ProcessAllSections, std::move(NotifyStubEmitted));
+ else if (Obj.isCOFF())
+ Dyld = createRuntimeDyldCOFF(
+ static_cast<Triple::ArchType>(Obj.getArch()), MemMgr, Resolver,
+ ProcessAllSections, std::move(NotifyStubEmitted));
+ else
+ report_fatal_error("Incompatible object format!");
+ }
+
+ if (!Dyld->isCompatibleFile(Obj))
+ report_fatal_error("Incompatible object format!");
+
+ auto LoadedObjInfo = Dyld->loadObject(Obj);
+ MemMgr.notifyObjectLoaded(*this, Obj);
+ return LoadedObjInfo;
+}
+
+void *RuntimeDyld::getSymbolLocalAddress(StringRef Name) const {
+ if (!Dyld)
+ return nullptr;
+ return Dyld->getSymbolLocalAddress(Name);
+}
+
+unsigned RuntimeDyld::getSymbolSectionID(StringRef Name) const {
+ assert(Dyld && "No RuntimeDyld instance attached");
+ return Dyld->getSymbolSectionID(Name);
+}
+
+JITEvaluatedSymbol RuntimeDyld::getSymbol(StringRef Name) const {
+ if (!Dyld)
+ return nullptr;
+ return Dyld->getSymbol(Name);
+}
+
+std::map<StringRef, JITEvaluatedSymbol> RuntimeDyld::getSymbolTable() const {
+ if (!Dyld)
+ return std::map<StringRef, JITEvaluatedSymbol>();
+ return Dyld->getSymbolTable();
+}
+
+void RuntimeDyld::resolveRelocations() { Dyld->resolveRelocations(); }
+
+void RuntimeDyld::reassignSectionAddress(unsigned SectionID, uint64_t Addr) {
+ Dyld->reassignSectionAddress(SectionID, Addr);
+}
+
+void RuntimeDyld::mapSectionAddress(const void *LocalAddress,
+ uint64_t TargetAddress) {
+ Dyld->mapSectionAddress(LocalAddress, TargetAddress);
+}
+
+bool RuntimeDyld::hasError() { return Dyld->hasError(); }
+
+StringRef RuntimeDyld::getErrorString() { return Dyld->getErrorString(); }
+
+void RuntimeDyld::finalizeWithMemoryManagerLocking() {
+ bool MemoryFinalizationLocked = MemMgr.FinalizationLocked;
+ MemMgr.FinalizationLocked = true;
+ resolveRelocations();
+ registerEHFrames();
+ if (!MemoryFinalizationLocked) {
+ MemMgr.finalizeMemory();
+ MemMgr.FinalizationLocked = false;
+ }
+}
+
+StringRef RuntimeDyld::getSectionContent(unsigned SectionID) const {
+ assert(Dyld && "No Dyld instance attached");
+ return Dyld->getSectionContent(SectionID);
+}
+
+uint64_t RuntimeDyld::getSectionLoadAddress(unsigned SectionID) const {
+ assert(Dyld && "No Dyld instance attached");
+ return Dyld->getSectionLoadAddress(SectionID);
+}
+
+void RuntimeDyld::registerEHFrames() {
+ if (Dyld)
+ Dyld->registerEHFrames();
+}
+
+void RuntimeDyld::deregisterEHFrames() {
+ if (Dyld)
+ Dyld->deregisterEHFrames();
+}
+// FIXME: Kill this with fire once we have a new JIT linker: this is only here
+// so that we can re-use RuntimeDyld's implementation without twisting the
+// interface any further for ORC's purposes.
+void jitLinkForORC(
+ object::OwningBinary<object::ObjectFile> O,
+ RuntimeDyld::MemoryManager &MemMgr, JITSymbolResolver &Resolver,
+ bool ProcessAllSections,
+ unique_function<Error(const object::ObjectFile &Obj,
+ RuntimeDyld::LoadedObjectInfo &LoadedObj,
+ std::map<StringRef, JITEvaluatedSymbol>)>
+ OnLoaded,
+ unique_function<void(object::OwningBinary<object::ObjectFile>,
+ std::unique_ptr<RuntimeDyld::LoadedObjectInfo>, Error)>
+ OnEmitted) {
+
+ RuntimeDyld RTDyld(MemMgr, Resolver);
+ RTDyld.setProcessAllSections(ProcessAllSections);
+
+ auto Info = RTDyld.loadObject(*O.getBinary());
+
+ if (RTDyld.hasError()) {
+ OnEmitted(std::move(O), std::move(Info),
+ make_error<StringError>(RTDyld.getErrorString(),
+ inconvertibleErrorCode()));
+ return;
+ }
+
+ if (auto Err = OnLoaded(*O.getBinary(), *Info, RTDyld.getSymbolTable())) {
+ OnEmitted(std::move(O), std::move(Info), std::move(Err));
+ return;
+ }
+
+ RuntimeDyldImpl::finalizeAsync(std::move(RTDyld.Dyld), std::move(OnEmitted),
+ std::move(O), std::move(Info));
+}
+
+} // end namespace llvm
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldCOFF.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldCOFF.cpp
new file mode 100644
index 000000000000..25a2d8780fb5
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldCOFF.cpp
@@ -0,0 +1,122 @@
+//===-- RuntimeDyldCOFF.cpp - Run-time dynamic linker for MC-JIT -*- C++ -*-==//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Implementation of COFF support for the MC-JIT runtime dynamic linker.
+//
+//===----------------------------------------------------------------------===//
+
+#include "RuntimeDyldCOFF.h"
+#include "Targets/RuntimeDyldCOFFAArch64.h"
+#include "Targets/RuntimeDyldCOFFI386.h"
+#include "Targets/RuntimeDyldCOFFThumb.h"
+#include "Targets/RuntimeDyldCOFFX86_64.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/Object/ObjectFile.h"
+#include "llvm/Support/FormatVariadic.h"
+#include "llvm/TargetParser/Triple.h"
+
+using namespace llvm;
+using namespace llvm::object;
+
+#define DEBUG_TYPE "dyld"
+
+namespace {
+
+class LoadedCOFFObjectInfo final
+ : public LoadedObjectInfoHelper<LoadedCOFFObjectInfo,
+ RuntimeDyld::LoadedObjectInfo> {
+public:
+ LoadedCOFFObjectInfo(
+ RuntimeDyldImpl &RTDyld,
+ RuntimeDyld::LoadedObjectInfo::ObjSectionToIDMap ObjSecToIDMap)
+ : LoadedObjectInfoHelper(RTDyld, std::move(ObjSecToIDMap)) {}
+
+ OwningBinary<ObjectFile>
+ getObjectForDebug(const ObjectFile &Obj) const override {
+ return OwningBinary<ObjectFile>();
+ }
+};
+}
+
+namespace llvm {
+
+std::unique_ptr<RuntimeDyldCOFF>
+llvm::RuntimeDyldCOFF::create(Triple::ArchType Arch,
+ RuntimeDyld::MemoryManager &MemMgr,
+ JITSymbolResolver &Resolver) {
+ switch (Arch) {
+ default: llvm_unreachable("Unsupported target for RuntimeDyldCOFF.");
+ case Triple::x86:
+ return std::make_unique<RuntimeDyldCOFFI386>(MemMgr, Resolver);
+ case Triple::thumb:
+ return std::make_unique<RuntimeDyldCOFFThumb>(MemMgr, Resolver);
+ case Triple::x86_64:
+ return std::make_unique<RuntimeDyldCOFFX86_64>(MemMgr, Resolver);
+ case Triple::aarch64:
+ return std::make_unique<RuntimeDyldCOFFAArch64>(MemMgr, Resolver);
+ }
+}
+
+std::unique_ptr<RuntimeDyld::LoadedObjectInfo>
+RuntimeDyldCOFF::loadObject(const object::ObjectFile &O) {
+ if (auto ObjSectionToIDOrErr = loadObjectImpl(O)) {
+ return std::make_unique<LoadedCOFFObjectInfo>(*this, *ObjSectionToIDOrErr);
+ } else {
+ HasError = true;
+ raw_string_ostream ErrStream(ErrorStr);
+ logAllUnhandledErrors(ObjSectionToIDOrErr.takeError(), ErrStream);
+ return nullptr;
+ }
+}
+
+uint64_t RuntimeDyldCOFF::getSymbolOffset(const SymbolRef &Sym) {
+ // The value in a relocatable COFF object is the offset.
+ return cantFail(Sym.getValue());
+}
+
+uint64_t RuntimeDyldCOFF::getDLLImportOffset(unsigned SectionID, StubMap &Stubs,
+ StringRef Name,
+ bool SetSectionIDMinus1) {
+ LLVM_DEBUG(dbgs() << "Getting DLLImport entry for " << Name << "... ");
+ assert(Name.starts_with(getImportSymbolPrefix()) &&
+ "Not a DLLImport symbol?");
+ RelocationValueRef Reloc;
+ Reloc.SymbolName = Name.data();
+ auto I = Stubs.find(Reloc);
+ if (I != Stubs.end()) {
+ LLVM_DEBUG(dbgs() << format("{0:x8}", I->second) << "\n");
+ return I->second;
+ }
+
+ assert(SectionID < Sections.size() && "SectionID out of range");
+ auto &Sec = Sections[SectionID];
+ auto EntryOffset = alignTo(Sec.getStubOffset(), PointerSize);
+ Sec.advanceStubOffset(EntryOffset + PointerSize - Sec.getStubOffset());
+ Stubs[Reloc] = EntryOffset;
+
+ RelocationEntry RE(SectionID, EntryOffset, PointerReloc, 0, false,
+ Log2_64(PointerSize));
+ // Hack to tell I386/Thumb resolveRelocation that this isn't section relative.
+ if (SetSectionIDMinus1)
+ RE.Sections.SectionA = -1;
+ addRelocationForSymbol(RE, Name.drop_front(getImportSymbolPrefix().size()));
+
+ LLVM_DEBUG({
+ dbgs() << "Creating entry at "
+ << formatv("{0:x16} + {1:x8} ( {2:x16} )", Sec.getLoadAddress(),
+ EntryOffset, Sec.getLoadAddress() + EntryOffset)
+ << "\n";
+ });
+ return EntryOffset;
+}
+
+bool RuntimeDyldCOFF::isCompatibleFile(const object::ObjectFile &Obj) const {
+ return Obj.isCOFF();
+}
+
+} // namespace llvm
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldCOFF.h b/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldCOFF.h
new file mode 100644
index 000000000000..41ee06c15448
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldCOFF.h
@@ -0,0 +1,61 @@
+//===-- RuntimeDyldCOFF.h - Run-time dynamic linker for MC-JIT ---*- C++ -*-==//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// COFF support for MC-JIT runtime dynamic linker.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_RUNTIME_DYLD_COFF_H
+#define LLVM_RUNTIME_DYLD_COFF_H
+
+#include "RuntimeDyldImpl.h"
+
+#define DEBUG_TYPE "dyld"
+
+using namespace llvm;
+
+namespace llvm {
+
+// Common base class for COFF dynamic linker support.
+// Concrete subclasses for each target can be found in ./Targets.
+class RuntimeDyldCOFF : public RuntimeDyldImpl {
+
+public:
+ std::unique_ptr<RuntimeDyld::LoadedObjectInfo>
+ loadObject(const object::ObjectFile &Obj) override;
+ bool isCompatibleFile(const object::ObjectFile &Obj) const override;
+
+ static std::unique_ptr<RuntimeDyldCOFF>
+ create(Triple::ArchType Arch, RuntimeDyld::MemoryManager &MemMgr,
+ JITSymbolResolver &Resolver);
+
+protected:
+ RuntimeDyldCOFF(RuntimeDyld::MemoryManager &MemMgr,
+ JITSymbolResolver &Resolver, unsigned PointerSize,
+ uint32_t PointerReloc)
+ : RuntimeDyldImpl(MemMgr, Resolver), PointerSize(PointerSize),
+ PointerReloc(PointerReloc) {
+ assert((PointerSize == 4 || PointerSize == 8) && "Unexpected pointer size");
+ }
+
+ uint64_t getSymbolOffset(const SymbolRef &Sym);
+ uint64_t getDLLImportOffset(unsigned SectionID, StubMap &Stubs,
+ StringRef Name, bool SetSectionIDMinus1 = false);
+
+ static constexpr StringRef getImportSymbolPrefix() { return "__imp_"; }
+
+private:
+ unsigned PointerSize;
+ uint32_t PointerReloc;
+};
+
+} // end namespace llvm
+
+#undef DEBUG_TYPE
+
+#endif
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldChecker.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldChecker.cpp
new file mode 100644
index 000000000000..b98d455cea37
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldChecker.cpp
@@ -0,0 +1,1061 @@
+//===--- RuntimeDyldChecker.cpp - RuntimeDyld tester framework --*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/RuntimeDyldChecker.h"
+#include "RuntimeDyldCheckerImpl.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/MC/MCAsmInfo.h"
+#include "llvm/MC/MCContext.h"
+#include "llvm/MC/MCDisassembler/MCDisassembler.h"
+#include "llvm/MC/MCInst.h"
+#include "llvm/MC/MCInstPrinter.h"
+#include "llvm/MC/MCInstrInfo.h"
+#include "llvm/MC/MCRegisterInfo.h"
+#include "llvm/MC/MCSubtargetInfo.h"
+#include "llvm/MC/MCTargetOptions.h"
+#include "llvm/MC/TargetRegistry.h"
+#include "llvm/Support/Endian.h"
+#include "llvm/Support/MSVCErrorWorkarounds.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/Path.h"
+#include <cctype>
+#include <memory>
+#include <utility>
+
+#define DEBUG_TYPE "rtdyld"
+
+using namespace llvm;
+
+namespace {
+struct TargetInfo {
+ const Target *TheTarget;
+ std::unique_ptr<MCSubtargetInfo> STI;
+ std::unique_ptr<MCRegisterInfo> MRI;
+ std::unique_ptr<MCAsmInfo> MAI;
+ std::unique_ptr<MCContext> Ctx;
+ std::unique_ptr<MCDisassembler> Disassembler;
+ std::unique_ptr<MCInstrInfo> MII;
+ std::unique_ptr<MCInstPrinter> InstPrinter;
+};
+} // anonymous namespace
+
+namespace llvm {
+
+// Helper class that implements the language evaluated by RuntimeDyldChecker.
+class RuntimeDyldCheckerExprEval {
+public:
+ RuntimeDyldCheckerExprEval(const RuntimeDyldCheckerImpl &Checker,
+ raw_ostream &ErrStream)
+ : Checker(Checker) {}
+
+ bool evaluate(StringRef Expr) const {
+ // Expect equality expression of the form 'LHS = RHS'.
+ Expr = Expr.trim();
+ size_t EQIdx = Expr.find('=');
+
+ ParseContext OutsideLoad(false);
+
+ // Evaluate LHS.
+ StringRef LHSExpr = Expr.substr(0, EQIdx).rtrim();
+ StringRef RemainingExpr;
+ EvalResult LHSResult;
+ std::tie(LHSResult, RemainingExpr) =
+ evalComplexExpr(evalSimpleExpr(LHSExpr, OutsideLoad), OutsideLoad);
+ if (LHSResult.hasError())
+ return handleError(Expr, LHSResult);
+ if (RemainingExpr != "")
+ return handleError(Expr, unexpectedToken(RemainingExpr, LHSExpr, ""));
+
+ // Evaluate RHS.
+ StringRef RHSExpr = Expr.substr(EQIdx + 1).ltrim();
+ EvalResult RHSResult;
+ std::tie(RHSResult, RemainingExpr) =
+ evalComplexExpr(evalSimpleExpr(RHSExpr, OutsideLoad), OutsideLoad);
+ if (RHSResult.hasError())
+ return handleError(Expr, RHSResult);
+ if (RemainingExpr != "")
+ return handleError(Expr, unexpectedToken(RemainingExpr, RHSExpr, ""));
+
+ if (LHSResult.getValue() != RHSResult.getValue()) {
+ Checker.ErrStream << "Expression '" << Expr << "' is false: "
+ << format("0x%" PRIx64, LHSResult.getValue())
+ << " != " << format("0x%" PRIx64, RHSResult.getValue())
+ << "\n";
+ return false;
+ }
+ return true;
+ }
+
+private:
+ // RuntimeDyldCheckerExprEval requires some context when parsing exprs. In
+ // particular, it needs to know whether a symbol is being evaluated in the
+ // context of a load, in which case we want the linker's local address for
+ // the symbol, or outside of a load, in which case we want the symbol's
+ // address in the remote target.
+
+ struct ParseContext {
+ bool IsInsideLoad;
+ ParseContext(bool IsInsideLoad) : IsInsideLoad(IsInsideLoad) {}
+ };
+
+ const RuntimeDyldCheckerImpl &Checker;
+
+ enum class BinOpToken : unsigned {
+ Invalid,
+ Add,
+ Sub,
+ BitwiseAnd,
+ BitwiseOr,
+ ShiftLeft,
+ ShiftRight
+ };
+
+ class EvalResult {
+ public:
+ EvalResult() : Value(0) {}
+ EvalResult(uint64_t Value) : Value(Value) {}
+ EvalResult(std::string ErrorMsg)
+ : Value(0), ErrorMsg(std::move(ErrorMsg)) {}
+ uint64_t getValue() const { return Value; }
+ bool hasError() const { return ErrorMsg != ""; }
+ const std::string &getErrorMsg() const { return ErrorMsg; }
+
+ private:
+ uint64_t Value;
+ std::string ErrorMsg;
+ };
+
+ StringRef getTokenForError(StringRef Expr) const {
+ if (Expr.empty())
+ return "";
+
+ StringRef Token, Remaining;
+ if (isalpha(Expr[0]))
+ std::tie(Token, Remaining) = parseSymbol(Expr);
+ else if (isdigit(Expr[0]))
+ std::tie(Token, Remaining) = parseNumberString(Expr);
+ else {
+ unsigned TokLen = 1;
+ if (Expr.starts_with("<<") || Expr.starts_with(">>"))
+ TokLen = 2;
+ Token = Expr.substr(0, TokLen);
+ }
+ return Token;
+ }
+
+ EvalResult unexpectedToken(StringRef TokenStart, StringRef SubExpr,
+ StringRef ErrText) const {
+ std::string ErrorMsg("Encountered unexpected token '");
+ ErrorMsg += getTokenForError(TokenStart);
+ if (SubExpr != "") {
+ ErrorMsg += "' while parsing subexpression '";
+ ErrorMsg += SubExpr;
+ }
+ ErrorMsg += "'";
+ if (ErrText != "") {
+ ErrorMsg += " ";
+ ErrorMsg += ErrText;
+ }
+ return EvalResult(std::move(ErrorMsg));
+ }
+
+ bool handleError(StringRef Expr, const EvalResult &R) const {
+ assert(R.hasError() && "Not an error result.");
+ Checker.ErrStream << "Error evaluating expression '" << Expr
+ << "': " << R.getErrorMsg() << "\n";
+ return false;
+ }
+
+ std::pair<BinOpToken, StringRef> parseBinOpToken(StringRef Expr) const {
+ if (Expr.empty())
+ return std::make_pair(BinOpToken::Invalid, "");
+
+ // Handle the two 2-character tokens.
+ if (Expr.starts_with("<<"))
+ return std::make_pair(BinOpToken::ShiftLeft, Expr.substr(2).ltrim());
+ if (Expr.starts_with(">>"))
+ return std::make_pair(BinOpToken::ShiftRight, Expr.substr(2).ltrim());
+
+ // Handle one-character tokens.
+ BinOpToken Op;
+ switch (Expr[0]) {
+ default:
+ return std::make_pair(BinOpToken::Invalid, Expr);
+ case '+':
+ Op = BinOpToken::Add;
+ break;
+ case '-':
+ Op = BinOpToken::Sub;
+ break;
+ case '&':
+ Op = BinOpToken::BitwiseAnd;
+ break;
+ case '|':
+ Op = BinOpToken::BitwiseOr;
+ break;
+ }
+
+ return std::make_pair(Op, Expr.substr(1).ltrim());
+ }
+
+ EvalResult computeBinOpResult(BinOpToken Op, const EvalResult &LHSResult,
+ const EvalResult &RHSResult) const {
+ switch (Op) {
+ default:
+ llvm_unreachable("Tried to evaluate unrecognized operation.");
+ case BinOpToken::Add:
+ return EvalResult(LHSResult.getValue() + RHSResult.getValue());
+ case BinOpToken::Sub:
+ return EvalResult(LHSResult.getValue() - RHSResult.getValue());
+ case BinOpToken::BitwiseAnd:
+ return EvalResult(LHSResult.getValue() & RHSResult.getValue());
+ case BinOpToken::BitwiseOr:
+ return EvalResult(LHSResult.getValue() | RHSResult.getValue());
+ case BinOpToken::ShiftLeft:
+ return EvalResult(LHSResult.getValue() << RHSResult.getValue());
+ case BinOpToken::ShiftRight:
+ return EvalResult(LHSResult.getValue() >> RHSResult.getValue());
+ }
+ }
+
+ // Parse a symbol and return a (string, string) pair representing the symbol
+ // name and expression remaining to be parsed.
+ std::pair<StringRef, StringRef> parseSymbol(StringRef Expr) const {
+ size_t FirstNonSymbol = Expr.find_first_not_of("0123456789"
+ "abcdefghijklmnopqrstuvwxyz"
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
+ ":_.$");
+ return std::make_pair(Expr.substr(0, FirstNonSymbol),
+ Expr.substr(FirstNonSymbol).ltrim());
+ }
+
+ // Evaluate a call to decode_operand. Decode the instruction operand at the
+ // given symbol and get the value of the requested operand.
+ // Returns an error if the instruction cannot be decoded, or the requested
+ // operand is not an immediate.
+ // On success, returns a pair containing the value of the operand, plus
+ // the expression remaining to be evaluated.
+ std::pair<EvalResult, StringRef> evalDecodeOperand(StringRef Expr) const {
+ if (!Expr.starts_with("("))
+ return std::make_pair(unexpectedToken(Expr, Expr, "expected '('"), "");
+ StringRef RemainingExpr = Expr.substr(1).ltrim();
+ StringRef Symbol;
+ std::tie(Symbol, RemainingExpr) = parseSymbol(RemainingExpr);
+
+ if (!Checker.isSymbolValid(Symbol))
+ return std::make_pair(
+ EvalResult(("Cannot decode unknown symbol '" + Symbol + "'").str()),
+ "");
+
+ // if there is an offset number expr
+ int64_t Offset = 0;
+ BinOpToken BinOp;
+ std::tie(BinOp, RemainingExpr) = parseBinOpToken(RemainingExpr);
+ switch (BinOp) {
+ case BinOpToken::Add: {
+ EvalResult Number;
+ std::tie(Number, RemainingExpr) = evalNumberExpr(RemainingExpr);
+ Offset = Number.getValue();
+ break;
+ }
+ case BinOpToken::Invalid:
+ break;
+ default:
+ return std::make_pair(
+ unexpectedToken(RemainingExpr, RemainingExpr,
+ "expected '+' for offset or ',' if no offset"),
+ "");
+ }
+
+ if (!RemainingExpr.starts_with(","))
+ return std::make_pair(
+ unexpectedToken(RemainingExpr, RemainingExpr, "expected ','"), "");
+ RemainingExpr = RemainingExpr.substr(1).ltrim();
+
+ EvalResult OpIdxExpr;
+ std::tie(OpIdxExpr, RemainingExpr) = evalNumberExpr(RemainingExpr);
+ if (OpIdxExpr.hasError())
+ return std::make_pair(OpIdxExpr, "");
+
+ if (!RemainingExpr.starts_with(")"))
+ return std::make_pair(
+ unexpectedToken(RemainingExpr, RemainingExpr, "expected ')'"), "");
+ RemainingExpr = RemainingExpr.substr(1).ltrim();
+
+ MCInst Inst;
+ uint64_t Size;
+ if (!decodeInst(Symbol, Inst, Size, Offset))
+ return std::make_pair(
+ EvalResult(("Couldn't decode instruction at '" + Symbol + "'").str()),
+ "");
+
+ unsigned OpIdx = OpIdxExpr.getValue();
+
+ auto printInst = [this](StringRef Symbol, MCInst Inst,
+ raw_string_ostream &ErrMsgStream) {
+ auto TT = Checker.getTripleForSymbol(Checker.getTargetFlag(Symbol));
+ auto TI = getTargetInfo(TT, Checker.getCPU(), Checker.getFeatures());
+ if (auto E = TI.takeError()) {
+ errs() << "Error obtaining instruction printer: "
+ << toString(std::move(E)) << "\n";
+ return std::make_pair(EvalResult(ErrMsgStream.str()), "");
+ }
+ Inst.dump_pretty(ErrMsgStream, TI->InstPrinter.get());
+ return std::make_pair(EvalResult(ErrMsgStream.str()), "");
+ };
+
+ if (OpIdx >= Inst.getNumOperands()) {
+ std::string ErrMsg;
+ raw_string_ostream ErrMsgStream(ErrMsg);
+ ErrMsgStream << "Invalid operand index '" << format("%i", OpIdx)
+ << "' for instruction '" << Symbol
+ << "'. Instruction has only "
+ << format("%i", Inst.getNumOperands())
+ << " operands.\nInstruction is:\n ";
+
+ return printInst(Symbol, Inst, ErrMsgStream);
+ }
+
+ const MCOperand &Op = Inst.getOperand(OpIdx);
+ if (!Op.isImm()) {
+ std::string ErrMsg;
+ raw_string_ostream ErrMsgStream(ErrMsg);
+ ErrMsgStream << "Operand '" << format("%i", OpIdx) << "' of instruction '"
+ << Symbol << "' is not an immediate.\nInstruction is:\n ";
+
+ return printInst(Symbol, Inst, ErrMsgStream);
+ }
+
+ return std::make_pair(EvalResult(Op.getImm()), RemainingExpr);
+ }
+
+ // Evaluate a call to next_pc.
+ // Decode the instruction at the given symbol and return the following program
+ // counter.
+ // Returns an error if the instruction cannot be decoded.
+ // On success, returns a pair containing the next PC, plus of the
+ // expression remaining to be evaluated.
+ std::pair<EvalResult, StringRef> evalNextPC(StringRef Expr,
+ ParseContext PCtx) const {
+ if (!Expr.starts_with("("))
+ return std::make_pair(unexpectedToken(Expr, Expr, "expected '('"), "");
+ StringRef RemainingExpr = Expr.substr(1).ltrim();
+ StringRef Symbol;
+ std::tie(Symbol, RemainingExpr) = parseSymbol(RemainingExpr);
+
+ if (!Checker.isSymbolValid(Symbol))
+ return std::make_pair(
+ EvalResult(("Cannot decode unknown symbol '" + Symbol + "'").str()),
+ "");
+
+ if (!RemainingExpr.starts_with(")"))
+ return std::make_pair(
+ unexpectedToken(RemainingExpr, RemainingExpr, "expected ')'"), "");
+ RemainingExpr = RemainingExpr.substr(1).ltrim();
+
+ MCInst Inst;
+ uint64_t InstSize;
+ if (!decodeInst(Symbol, Inst, InstSize, 0))
+ return std::make_pair(
+ EvalResult(("Couldn't decode instruction at '" + Symbol + "'").str()),
+ "");
+
+ uint64_t SymbolAddr = PCtx.IsInsideLoad
+ ? Checker.getSymbolLocalAddr(Symbol)
+ : Checker.getSymbolRemoteAddr(Symbol);
+
+ // ARM PC offset is 8 instead of 4, because it accounts for an additional
+ // prefetch instruction that increments PC even though it is implicit.
+ auto TT = Checker.getTripleForSymbol(Checker.getTargetFlag(Symbol));
+ uint64_t PCOffset = TT.getArch() == Triple::ArchType::arm ? 4 : 0;
+
+ uint64_t NextPC = SymbolAddr + InstSize + PCOffset;
+
+ return std::make_pair(EvalResult(NextPC), RemainingExpr);
+ }
+
+ // Evaluate a call to stub_addr/got_addr.
+ // Look up and return the address of the stub for the given
+ // (<file name>, <section name>, <symbol name>) tuple.
+ // On success, returns a pair containing the stub address, plus the expression
+ // remaining to be evaluated.
+ std::pair<EvalResult, StringRef>
+ evalStubOrGOTAddr(StringRef Expr, ParseContext PCtx, bool IsStubAddr) const {
+ if (!Expr.starts_with("("))
+ return std::make_pair(unexpectedToken(Expr, Expr, "expected '('"), "");
+ StringRef RemainingExpr = Expr.substr(1).ltrim();
+
+ // Handle file-name specially, as it may contain characters that aren't
+ // legal for symbols.
+ StringRef StubContainerName;
+ size_t ComaIdx = RemainingExpr.find(',');
+ StubContainerName = RemainingExpr.substr(0, ComaIdx).rtrim();
+ RemainingExpr = RemainingExpr.substr(ComaIdx).ltrim();
+
+ if (!RemainingExpr.starts_with(","))
+ return std::make_pair(
+ unexpectedToken(RemainingExpr, Expr, "expected ','"), "");
+ RemainingExpr = RemainingExpr.substr(1).ltrim();
+
+ StringRef Symbol;
+ std::tie(Symbol, RemainingExpr) = parseSymbol(RemainingExpr);
+
+ // Parse optional parameter to filter by stub kind
+ StringRef KindNameFilter;
+ if (RemainingExpr.starts_with(",")) {
+ RemainingExpr = RemainingExpr.substr(1).ltrim();
+ size_t ClosingBracket = RemainingExpr.find(")");
+ KindNameFilter = RemainingExpr.substr(0, ClosingBracket);
+ RemainingExpr = RemainingExpr.substr(ClosingBracket);
+ }
+
+ if (!RemainingExpr.starts_with(")"))
+ return std::make_pair(
+ unexpectedToken(RemainingExpr, Expr, "expected ')'"), "");
+ RemainingExpr = RemainingExpr.substr(1).ltrim();
+
+ uint64_t StubAddr;
+ std::string ErrorMsg;
+ std::tie(StubAddr, ErrorMsg) =
+ Checker.getStubOrGOTAddrFor(StubContainerName, Symbol, KindNameFilter,
+ PCtx.IsInsideLoad, IsStubAddr);
+
+ if (ErrorMsg != "")
+ return std::make_pair(EvalResult(ErrorMsg), "");
+
+ return std::make_pair(EvalResult(StubAddr), RemainingExpr);
+ }
+
+ std::pair<EvalResult, StringRef> evalSectionAddr(StringRef Expr,
+ ParseContext PCtx) const {
+ if (!Expr.starts_with("("))
+ return std::make_pair(unexpectedToken(Expr, Expr, "expected '('"), "");
+ StringRef RemainingExpr = Expr.substr(1).ltrim();
+
+ // Handle file-name specially, as it may contain characters that aren't
+ // legal for symbols.
+ StringRef FileName;
+ size_t ComaIdx = RemainingExpr.find(',');
+ FileName = RemainingExpr.substr(0, ComaIdx).rtrim();
+ RemainingExpr = RemainingExpr.substr(ComaIdx).ltrim();
+
+ if (!RemainingExpr.starts_with(","))
+ return std::make_pair(
+ unexpectedToken(RemainingExpr, Expr, "expected ','"), "");
+ RemainingExpr = RemainingExpr.substr(1).ltrim();
+
+ StringRef SectionName;
+ size_t CloseParensIdx = RemainingExpr.find(')');
+ SectionName = RemainingExpr.substr(0, CloseParensIdx).rtrim();
+ RemainingExpr = RemainingExpr.substr(CloseParensIdx).ltrim();
+
+ if (!RemainingExpr.starts_with(")"))
+ return std::make_pair(
+ unexpectedToken(RemainingExpr, Expr, "expected ')'"), "");
+ RemainingExpr = RemainingExpr.substr(1).ltrim();
+
+ uint64_t StubAddr;
+ std::string ErrorMsg;
+ std::tie(StubAddr, ErrorMsg) = Checker.getSectionAddr(
+ FileName, SectionName, PCtx.IsInsideLoad);
+
+ if (ErrorMsg != "")
+ return std::make_pair(EvalResult(ErrorMsg), "");
+
+ return std::make_pair(EvalResult(StubAddr), RemainingExpr);
+ }
+
+ // Evaluate an identifier expr, which may be a symbol, or a call to
+ // one of the builtin functions: get_insn_opcode or get_insn_length.
+ // Return the result, plus the expression remaining to be parsed.
+ std::pair<EvalResult, StringRef> evalIdentifierExpr(StringRef Expr,
+ ParseContext PCtx) const {
+ StringRef Symbol;
+ StringRef RemainingExpr;
+ std::tie(Symbol, RemainingExpr) = parseSymbol(Expr);
+
+ // Check for builtin function calls.
+ if (Symbol == "decode_operand")
+ return evalDecodeOperand(RemainingExpr);
+ else if (Symbol == "next_pc")
+ return evalNextPC(RemainingExpr, PCtx);
+ else if (Symbol == "stub_addr")
+ return evalStubOrGOTAddr(RemainingExpr, PCtx, true);
+ else if (Symbol == "got_addr")
+ return evalStubOrGOTAddr(RemainingExpr, PCtx, false);
+ else if (Symbol == "section_addr")
+ return evalSectionAddr(RemainingExpr, PCtx);
+
+ if (!Checker.isSymbolValid(Symbol)) {
+ std::string ErrMsg("No known address for symbol '");
+ ErrMsg += Symbol;
+ ErrMsg += "'";
+ if (Symbol.starts_with("L"))
+ ErrMsg += " (this appears to be an assembler local label - "
+ " perhaps drop the 'L'?)";
+
+ return std::make_pair(EvalResult(ErrMsg), "");
+ }
+
+ // The value for the symbol depends on the context we're evaluating in:
+ // Inside a load this is the address in the linker's memory, outside a
+ // load it's the address in the target processes memory.
+ uint64_t Value = PCtx.IsInsideLoad ? Checker.getSymbolLocalAddr(Symbol)
+ : Checker.getSymbolRemoteAddr(Symbol);
+
+ // Looks like a plain symbol reference.
+ return std::make_pair(EvalResult(Value), RemainingExpr);
+ }
+
+ // Parse a number (hexadecimal or decimal) and return a (string, string)
+ // pair representing the number and the expression remaining to be parsed.
+ std::pair<StringRef, StringRef> parseNumberString(StringRef Expr) const {
+ size_t FirstNonDigit = StringRef::npos;
+ if (Expr.starts_with("0x")) {
+ FirstNonDigit = Expr.find_first_not_of("0123456789abcdefABCDEF", 2);
+ if (FirstNonDigit == StringRef::npos)
+ FirstNonDigit = Expr.size();
+ } else {
+ FirstNonDigit = Expr.find_first_not_of("0123456789");
+ if (FirstNonDigit == StringRef::npos)
+ FirstNonDigit = Expr.size();
+ }
+ return std::make_pair(Expr.substr(0, FirstNonDigit),
+ Expr.substr(FirstNonDigit));
+ }
+
+ // Evaluate a constant numeric expression (hexadecimal or decimal) and
+ // return a pair containing the result, and the expression remaining to be
+ // evaluated.
+ std::pair<EvalResult, StringRef> evalNumberExpr(StringRef Expr) const {
+ StringRef ValueStr;
+ StringRef RemainingExpr;
+ std::tie(ValueStr, RemainingExpr) = parseNumberString(Expr);
+
+ if (ValueStr.empty() || !isdigit(ValueStr[0]))
+ return std::make_pair(
+ unexpectedToken(RemainingExpr, RemainingExpr, "expected number"), "");
+ uint64_t Value;
+ ValueStr.getAsInteger(0, Value);
+ return std::make_pair(EvalResult(Value), RemainingExpr);
+ }
+
+ // Evaluate an expression of the form "(<expr>)" and return a pair
+ // containing the result of evaluating <expr>, plus the expression
+ // remaining to be parsed.
+ std::pair<EvalResult, StringRef> evalParensExpr(StringRef Expr,
+ ParseContext PCtx) const {
+ assert(Expr.starts_with("(") && "Not a parenthesized expression");
+ EvalResult SubExprResult;
+ StringRef RemainingExpr;
+ std::tie(SubExprResult, RemainingExpr) =
+ evalComplexExpr(evalSimpleExpr(Expr.substr(1).ltrim(), PCtx), PCtx);
+ if (SubExprResult.hasError())
+ return std::make_pair(SubExprResult, "");
+ if (!RemainingExpr.starts_with(")"))
+ return std::make_pair(
+ unexpectedToken(RemainingExpr, Expr, "expected ')'"), "");
+ RemainingExpr = RemainingExpr.substr(1).ltrim();
+ return std::make_pair(SubExprResult, RemainingExpr);
+ }
+
+ // Evaluate an expression in one of the following forms:
+ // *{<number>}<expr>
+ // Return a pair containing the result, plus the expression remaining to be
+ // parsed.
+ std::pair<EvalResult, StringRef> evalLoadExpr(StringRef Expr) const {
+ assert(Expr.starts_with("*") && "Not a load expression");
+ StringRef RemainingExpr = Expr.substr(1).ltrim();
+
+ // Parse read size.
+ if (!RemainingExpr.starts_with("{"))
+ return std::make_pair(EvalResult("Expected '{' following '*'."), "");
+ RemainingExpr = RemainingExpr.substr(1).ltrim();
+ EvalResult ReadSizeExpr;
+ std::tie(ReadSizeExpr, RemainingExpr) = evalNumberExpr(RemainingExpr);
+ if (ReadSizeExpr.hasError())
+ return std::make_pair(ReadSizeExpr, RemainingExpr);
+ uint64_t ReadSize = ReadSizeExpr.getValue();
+ if (ReadSize < 1 || ReadSize > 8)
+ return std::make_pair(EvalResult("Invalid size for dereference."), "");
+ if (!RemainingExpr.starts_with("}"))
+ return std::make_pair(EvalResult("Missing '}' for dereference."), "");
+ RemainingExpr = RemainingExpr.substr(1).ltrim();
+
+ // Evaluate the expression representing the load address.
+ ParseContext LoadCtx(true);
+ EvalResult LoadAddrExprResult;
+ std::tie(LoadAddrExprResult, RemainingExpr) =
+ evalComplexExpr(evalSimpleExpr(RemainingExpr, LoadCtx), LoadCtx);
+
+ if (LoadAddrExprResult.hasError())
+ return std::make_pair(LoadAddrExprResult, "");
+
+ uint64_t LoadAddr = LoadAddrExprResult.getValue();
+
+ // If there is no error but the content pointer is null then this is a
+ // zero-fill symbol/section.
+ if (LoadAddr == 0)
+ return std::make_pair(0, RemainingExpr);
+
+ return std::make_pair(
+ EvalResult(Checker.readMemoryAtAddr(LoadAddr, ReadSize)),
+ RemainingExpr);
+ }
+
+ // Evaluate a "simple" expression. This is any expression that _isn't_ an
+ // un-parenthesized binary expression.
+ //
+ // "Simple" expressions can be optionally bit-sliced. See evalSlicedExpr.
+ //
+ // Returns a pair containing the result of the evaluation, plus the
+ // expression remaining to be parsed.
+ std::pair<EvalResult, StringRef> evalSimpleExpr(StringRef Expr,
+ ParseContext PCtx) const {
+ EvalResult SubExprResult;
+ StringRef RemainingExpr;
+
+ if (Expr.empty())
+ return std::make_pair(EvalResult("Unexpected end of expression"), "");
+
+ if (Expr[0] == '(')
+ std::tie(SubExprResult, RemainingExpr) = evalParensExpr(Expr, PCtx);
+ else if (Expr[0] == '*')
+ std::tie(SubExprResult, RemainingExpr) = evalLoadExpr(Expr);
+ else if (isalpha(Expr[0]) || Expr[0] == '_')
+ std::tie(SubExprResult, RemainingExpr) = evalIdentifierExpr(Expr, PCtx);
+ else if (isdigit(Expr[0]))
+ std::tie(SubExprResult, RemainingExpr) = evalNumberExpr(Expr);
+ else
+ return std::make_pair(
+ unexpectedToken(Expr, Expr,
+ "expected '(', '*', identifier, or number"), "");
+
+ if (SubExprResult.hasError())
+ return std::make_pair(SubExprResult, RemainingExpr);
+
+ // Evaluate bit-slice if present.
+ if (RemainingExpr.starts_with("["))
+ std::tie(SubExprResult, RemainingExpr) =
+ evalSliceExpr(std::make_pair(SubExprResult, RemainingExpr));
+
+ return std::make_pair(SubExprResult, RemainingExpr);
+ }
+
+ // Evaluate a bit-slice of an expression.
+ // A bit-slice has the form "<expr>[high:low]". The result of evaluating a
+ // slice is the bits between high and low (inclusive) in the original
+ // expression, right shifted so that the "low" bit is in position 0 in the
+ // result.
+ // Returns a pair containing the result of the slice operation, plus the
+ // expression remaining to be parsed.
+ std::pair<EvalResult, StringRef>
+ evalSliceExpr(const std::pair<EvalResult, StringRef> &Ctx) const {
+ EvalResult SubExprResult;
+ StringRef RemainingExpr;
+ std::tie(SubExprResult, RemainingExpr) = Ctx;
+
+ assert(RemainingExpr.starts_with("[") && "Not a slice expr.");
+ RemainingExpr = RemainingExpr.substr(1).ltrim();
+
+ EvalResult HighBitExpr;
+ std::tie(HighBitExpr, RemainingExpr) = evalNumberExpr(RemainingExpr);
+
+ if (HighBitExpr.hasError())
+ return std::make_pair(HighBitExpr, RemainingExpr);
+
+ if (!RemainingExpr.starts_with(":"))
+ return std::make_pair(
+ unexpectedToken(RemainingExpr, RemainingExpr, "expected ':'"), "");
+ RemainingExpr = RemainingExpr.substr(1).ltrim();
+
+ EvalResult LowBitExpr;
+ std::tie(LowBitExpr, RemainingExpr) = evalNumberExpr(RemainingExpr);
+
+ if (LowBitExpr.hasError())
+ return std::make_pair(LowBitExpr, RemainingExpr);
+
+ if (!RemainingExpr.starts_with("]"))
+ return std::make_pair(
+ unexpectedToken(RemainingExpr, RemainingExpr, "expected ']'"), "");
+ RemainingExpr = RemainingExpr.substr(1).ltrim();
+
+ unsigned HighBit = HighBitExpr.getValue();
+ unsigned LowBit = LowBitExpr.getValue();
+ uint64_t Mask = ((uint64_t)1 << (HighBit - LowBit + 1)) - 1;
+ uint64_t SlicedValue = (SubExprResult.getValue() >> LowBit) & Mask;
+ return std::make_pair(EvalResult(SlicedValue), RemainingExpr);
+ }
+
+ // Evaluate a "complex" expression.
+ // Takes an already evaluated subexpression and checks for the presence of a
+ // binary operator, computing the result of the binary operation if one is
+ // found. Used to make arithmetic expressions left-associative.
+ // Returns a pair containing the ultimate result of evaluating the
+ // expression, plus the expression remaining to be evaluated.
+ std::pair<EvalResult, StringRef>
+ evalComplexExpr(const std::pair<EvalResult, StringRef> &LHSAndRemaining,
+ ParseContext PCtx) const {
+ EvalResult LHSResult;
+ StringRef RemainingExpr;
+ std::tie(LHSResult, RemainingExpr) = LHSAndRemaining;
+
+ // If there was an error, or there's nothing left to evaluate, return the
+ // result.
+ if (LHSResult.hasError() || RemainingExpr == "")
+ return std::make_pair(LHSResult, RemainingExpr);
+
+ // Otherwise check if this is a binary expression.
+ BinOpToken BinOp;
+ std::tie(BinOp, RemainingExpr) = parseBinOpToken(RemainingExpr);
+
+ // If this isn't a recognized expression just return.
+ if (BinOp == BinOpToken::Invalid)
+ return std::make_pair(LHSResult, RemainingExpr);
+
+ // This is a recognized bin-op. Evaluate the RHS, then evaluate the binop.
+ EvalResult RHSResult;
+ std::tie(RHSResult, RemainingExpr) = evalSimpleExpr(RemainingExpr, PCtx);
+
+ // If there was an error evaluating the RHS, return it.
+ if (RHSResult.hasError())
+ return std::make_pair(RHSResult, RemainingExpr);
+
+ // This is a binary expression - evaluate and try to continue as a
+ // complex expr.
+ EvalResult ThisResult(computeBinOpResult(BinOp, LHSResult, RHSResult));
+
+ return evalComplexExpr(std::make_pair(ThisResult, RemainingExpr), PCtx);
+ }
+
+ bool decodeInst(StringRef Symbol, MCInst &Inst, uint64_t &Size,
+ int64_t Offset) const {
+ auto TT = Checker.getTripleForSymbol(Checker.getTargetFlag(Symbol));
+ auto TI = getTargetInfo(TT, Checker.getCPU(), Checker.getFeatures());
+
+ if (auto E = TI.takeError()) {
+ errs() << "Error obtaining disassembler: " << toString(std::move(E))
+ << "\n";
+ return false;
+ }
+
+ StringRef SymbolMem = Checker.getSymbolContent(Symbol);
+ ArrayRef<uint8_t> SymbolBytes(SymbolMem.bytes_begin() + Offset,
+ SymbolMem.size() - Offset);
+
+ MCDisassembler::DecodeStatus S =
+ TI->Disassembler->getInstruction(Inst, Size, SymbolBytes, 0, nulls());
+
+ return (S == MCDisassembler::Success);
+ }
+
+ Expected<TargetInfo> getTargetInfo(const Triple &TT, const StringRef &CPU,
+ const SubtargetFeatures &TF) const {
+
+ auto TripleName = TT.str();
+ std::string ErrorStr;
+ const Target *TheTarget =
+ TargetRegistry::lookupTarget(TripleName, ErrorStr);
+ if (!TheTarget)
+ return make_error<StringError>("Error accessing target '" + TripleName +
+ "': " + ErrorStr,
+ inconvertibleErrorCode());
+
+ std::unique_ptr<MCSubtargetInfo> STI(
+ TheTarget->createMCSubtargetInfo(TripleName, CPU, TF.getString()));
+ if (!STI)
+ return make_error<StringError>("Unable to create subtarget for " +
+ TripleName,
+ inconvertibleErrorCode());
+
+ std::unique_ptr<MCRegisterInfo> MRI(TheTarget->createMCRegInfo(TripleName));
+ if (!MRI)
+ return make_error<StringError>("Unable to create target register info "
+ "for " +
+ TripleName,
+ inconvertibleErrorCode());
+
+ MCTargetOptions MCOptions;
+ std::unique_ptr<MCAsmInfo> MAI(
+ TheTarget->createMCAsmInfo(*MRI, TripleName, MCOptions));
+ if (!MAI)
+ return make_error<StringError>("Unable to create target asm info " +
+ TripleName,
+ inconvertibleErrorCode());
+
+ auto Ctx = std::make_unique<MCContext>(Triple(TripleName), MAI.get(),
+ MRI.get(), STI.get());
+
+ std::unique_ptr<MCDisassembler> Disassembler(
+ TheTarget->createMCDisassembler(*STI, *Ctx));
+ if (!Disassembler)
+ return make_error<StringError>("Unable to create disassembler for " +
+ TripleName,
+ inconvertibleErrorCode());
+
+ std::unique_ptr<MCInstrInfo> MII(TheTarget->createMCInstrInfo());
+ if (!MII)
+ return make_error<StringError>("Unable to create instruction info for" +
+ TripleName,
+ inconvertibleErrorCode());
+
+ std::unique_ptr<MCInstPrinter> InstPrinter(TheTarget->createMCInstPrinter(
+ Triple(TripleName), 0, *MAI, *MII, *MRI));
+ if (!InstPrinter)
+ return make_error<StringError>(
+ "Unable to create instruction printer for" + TripleName,
+ inconvertibleErrorCode());
+
+ return TargetInfo({TheTarget, std::move(STI), std::move(MRI),
+ std::move(MAI), std::move(Ctx), std::move(Disassembler),
+ std::move(MII), std::move(InstPrinter)});
+ }
+};
+} // namespace llvm
+
+RuntimeDyldCheckerImpl::RuntimeDyldCheckerImpl(
+ IsSymbolValidFunction IsSymbolValid, GetSymbolInfoFunction GetSymbolInfo,
+ GetSectionInfoFunction GetSectionInfo, GetStubInfoFunction GetStubInfo,
+ GetGOTInfoFunction GetGOTInfo, llvm::endianness Endianness, Triple TT,
+ StringRef CPU, SubtargetFeatures TF, raw_ostream &ErrStream)
+ : IsSymbolValid(std::move(IsSymbolValid)),
+ GetSymbolInfo(std::move(GetSymbolInfo)),
+ GetSectionInfo(std::move(GetSectionInfo)),
+ GetStubInfo(std::move(GetStubInfo)), GetGOTInfo(std::move(GetGOTInfo)),
+ Endianness(Endianness), TT(std::move(TT)), CPU(std::move(CPU)),
+ TF(std::move(TF)), ErrStream(ErrStream) {}
+
+bool RuntimeDyldCheckerImpl::check(StringRef CheckExpr) const {
+ CheckExpr = CheckExpr.trim();
+ LLVM_DEBUG(dbgs() << "RuntimeDyldChecker: Checking '" << CheckExpr
+ << "'...\n");
+ RuntimeDyldCheckerExprEval P(*this, ErrStream);
+ bool Result = P.evaluate(CheckExpr);
+ (void)Result;
+ LLVM_DEBUG(dbgs() << "RuntimeDyldChecker: '" << CheckExpr << "' "
+ << (Result ? "passed" : "FAILED") << ".\n");
+ return Result;
+}
+
+bool RuntimeDyldCheckerImpl::checkAllRulesInBuffer(StringRef RulePrefix,
+ MemoryBuffer *MemBuf) const {
+ bool DidAllTestsPass = true;
+ unsigned NumRules = 0;
+
+ std::string CheckExpr;
+ const char *LineStart = MemBuf->getBufferStart();
+
+ // Eat whitespace.
+ while (LineStart != MemBuf->getBufferEnd() && isSpace(*LineStart))
+ ++LineStart;
+
+ while (LineStart != MemBuf->getBufferEnd() && *LineStart != '\0') {
+ const char *LineEnd = LineStart;
+ while (LineEnd != MemBuf->getBufferEnd() && *LineEnd != '\r' &&
+ *LineEnd != '\n')
+ ++LineEnd;
+
+ StringRef Line(LineStart, LineEnd - LineStart);
+ if (Line.starts_with(RulePrefix))
+ CheckExpr += Line.substr(RulePrefix.size()).str();
+
+ // If there's a check expr string...
+ if (!CheckExpr.empty()) {
+ // ... and it's complete then run it, otherwise remove the trailer '\'.
+ if (CheckExpr.back() != '\\') {
+ DidAllTestsPass &= check(CheckExpr);
+ CheckExpr.clear();
+ ++NumRules;
+ } else
+ CheckExpr.pop_back();
+ }
+
+ // Eat whitespace.
+ LineStart = LineEnd;
+ while (LineStart != MemBuf->getBufferEnd() && isSpace(*LineStart))
+ ++LineStart;
+ }
+ return DidAllTestsPass && (NumRules != 0);
+}
+
+bool RuntimeDyldCheckerImpl::isSymbolValid(StringRef Symbol) const {
+ return IsSymbolValid(Symbol);
+}
+
+uint64_t RuntimeDyldCheckerImpl::getSymbolLocalAddr(StringRef Symbol) const {
+ auto SymInfo = GetSymbolInfo(Symbol);
+ if (!SymInfo) {
+ logAllUnhandledErrors(SymInfo.takeError(), errs(), "RTDyldChecker: ");
+ return 0;
+ }
+
+ if (SymInfo->isZeroFill())
+ return 0;
+
+ return static_cast<uint64_t>(
+ reinterpret_cast<uintptr_t>(SymInfo->getContent().data()));
+}
+
+uint64_t RuntimeDyldCheckerImpl::getSymbolRemoteAddr(StringRef Symbol) const {
+ auto SymInfo = GetSymbolInfo(Symbol);
+ if (!SymInfo) {
+ logAllUnhandledErrors(SymInfo.takeError(), errs(), "RTDyldChecker: ");
+ return 0;
+ }
+
+ return SymInfo->getTargetAddress();
+}
+
+uint64_t RuntimeDyldCheckerImpl::readMemoryAtAddr(uint64_t SrcAddr,
+ unsigned Size) const {
+ uintptr_t PtrSizedAddr = static_cast<uintptr_t>(SrcAddr);
+ assert(PtrSizedAddr == SrcAddr && "Linker memory pointer out-of-range.");
+ void *Ptr = reinterpret_cast<void*>(PtrSizedAddr);
+
+ switch (Size) {
+ case 1:
+ return support::endian::read<uint8_t>(Ptr, Endianness);
+ case 2:
+ return support::endian::read<uint16_t>(Ptr, Endianness);
+ case 4:
+ return support::endian::read<uint32_t>(Ptr, Endianness);
+ case 8:
+ return support::endian::read<uint64_t>(Ptr, Endianness);
+ }
+ llvm_unreachable("Unsupported read size");
+}
+
+StringRef RuntimeDyldCheckerImpl::getSymbolContent(StringRef Symbol) const {
+ auto SymInfo = GetSymbolInfo(Symbol);
+ if (!SymInfo) {
+ logAllUnhandledErrors(SymInfo.takeError(), errs(), "RTDyldChecker: ");
+ return StringRef();
+ }
+ return {SymInfo->getContent().data(), SymInfo->getContent().size()};
+}
+
+TargetFlagsType RuntimeDyldCheckerImpl::getTargetFlag(StringRef Symbol) const {
+ auto SymInfo = GetSymbolInfo(Symbol);
+ if (!SymInfo) {
+ logAllUnhandledErrors(SymInfo.takeError(), errs(), "RTDyldChecker: ");
+ return TargetFlagsType{};
+ }
+ return SymInfo->getTargetFlags();
+}
+
+Triple
+RuntimeDyldCheckerImpl::getTripleForSymbol(TargetFlagsType Flag) const {
+ Triple TheTriple = TT;
+
+ switch (TT.getArch()) {
+ case Triple::ArchType::arm:
+ if (~Flag & 0x1)
+ return TT;
+ TheTriple.setArchName((Twine("thumb") + TT.getArchName().substr(3)).str());
+ return TheTriple;
+ case Triple::ArchType::thumb:
+ if (Flag & 0x1)
+ return TT;
+ TheTriple.setArchName((Twine("arm") + TT.getArchName().substr(5)).str());
+ return TheTriple;
+
+ default:
+ return TT;
+ }
+}
+
+std::pair<uint64_t, std::string> RuntimeDyldCheckerImpl::getSectionAddr(
+ StringRef FileName, StringRef SectionName, bool IsInsideLoad) const {
+
+ auto SecInfo = GetSectionInfo(FileName, SectionName);
+ if (!SecInfo) {
+ std::string ErrMsg;
+ {
+ raw_string_ostream ErrMsgStream(ErrMsg);
+ logAllUnhandledErrors(SecInfo.takeError(), ErrMsgStream,
+ "RTDyldChecker: ");
+ }
+ return std::make_pair(0, std::move(ErrMsg));
+ }
+
+ // If this address is being looked up in "load" mode, return the content
+ // pointer, otherwise return the target address.
+
+ uint64_t Addr = 0;
+
+ if (IsInsideLoad) {
+ if (SecInfo->isZeroFill())
+ Addr = 0;
+ else
+ Addr = pointerToJITTargetAddress(SecInfo->getContent().data());
+ } else
+ Addr = SecInfo->getTargetAddress();
+
+ return std::make_pair(Addr, "");
+}
+
+std::pair<uint64_t, std::string> RuntimeDyldCheckerImpl::getStubOrGOTAddrFor(
+ StringRef StubContainerName, StringRef SymbolName, StringRef StubKindFilter,
+ bool IsInsideLoad, bool IsStubAddr) const {
+
+ assert((StubKindFilter.empty() || IsStubAddr) &&
+ "Kind name filter only supported for stubs");
+ auto StubInfo =
+ IsStubAddr ? GetStubInfo(StubContainerName, SymbolName, StubKindFilter)
+ : GetGOTInfo(StubContainerName, SymbolName);
+
+ if (!StubInfo) {
+ std::string ErrMsg;
+ {
+ raw_string_ostream ErrMsgStream(ErrMsg);
+ logAllUnhandledErrors(StubInfo.takeError(), ErrMsgStream,
+ "RTDyldChecker: ");
+ }
+ return std::make_pair((uint64_t)0, std::move(ErrMsg));
+ }
+
+ uint64_t Addr = 0;
+
+ if (IsInsideLoad) {
+ if (StubInfo->isZeroFill())
+ return std::make_pair((uint64_t)0, "Detected zero-filled stub/GOT entry");
+ Addr = pointerToJITTargetAddress(StubInfo->getContent().data());
+ } else
+ Addr = StubInfo->getTargetAddress();
+
+ return std::make_pair(Addr, "");
+}
+
+RuntimeDyldChecker::RuntimeDyldChecker(
+ IsSymbolValidFunction IsSymbolValid, GetSymbolInfoFunction GetSymbolInfo,
+ GetSectionInfoFunction GetSectionInfo, GetStubInfoFunction GetStubInfo,
+ GetGOTInfoFunction GetGOTInfo, llvm::endianness Endianness, Triple TT,
+ StringRef CPU, SubtargetFeatures TF, raw_ostream &ErrStream)
+ : Impl(::std::make_unique<RuntimeDyldCheckerImpl>(
+ std::move(IsSymbolValid), std::move(GetSymbolInfo),
+ std::move(GetSectionInfo), std::move(GetStubInfo),
+ std::move(GetGOTInfo), Endianness, std::move(TT), std::move(CPU),
+ std::move(TF), ErrStream)) {}
+
+RuntimeDyldChecker::~RuntimeDyldChecker() = default;
+
+bool RuntimeDyldChecker::check(StringRef CheckExpr) const {
+ return Impl->check(CheckExpr);
+}
+
+bool RuntimeDyldChecker::checkAllRulesInBuffer(StringRef RulePrefix,
+ MemoryBuffer *MemBuf) const {
+ return Impl->checkAllRulesInBuffer(RulePrefix, MemBuf);
+}
+
+std::pair<uint64_t, std::string>
+RuntimeDyldChecker::getSectionAddr(StringRef FileName, StringRef SectionName,
+ bool LocalAddress) {
+ return Impl->getSectionAddr(FileName, SectionName, LocalAddress);
+}
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldCheckerImpl.h b/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldCheckerImpl.h
new file mode 100644
index 000000000000..bda554e9e5b6
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldCheckerImpl.h
@@ -0,0 +1,85 @@
+//===-- RuntimeDyldCheckerImpl.h -- RuntimeDyld test framework --*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_EXECUTIONENGINE_RUNTIMEDYLD_RUNTIMEDYLDCHECKERIMPL_H
+#define LLVM_LIB_EXECUTIONENGINE_RUNTIMEDYLD_RUNTIMEDYLDCHECKERIMPL_H
+
+#include "RuntimeDyldImpl.h"
+
+namespace llvm {
+
+/// Holds target-specific properties for a symbol.
+using TargetFlagsType = uint8_t;
+
+class RuntimeDyldCheckerImpl {
+ friend class RuntimeDyldChecker;
+ friend class RuntimeDyldCheckerExprEval;
+
+ using IsSymbolValidFunction =
+ RuntimeDyldChecker::IsSymbolValidFunction;
+ using GetSymbolInfoFunction = RuntimeDyldChecker::GetSymbolInfoFunction;
+ using GetSectionInfoFunction = RuntimeDyldChecker::GetSectionInfoFunction;
+ using GetStubInfoFunction = RuntimeDyldChecker::GetStubInfoFunction;
+ using GetGOTInfoFunction = RuntimeDyldChecker::GetGOTInfoFunction;
+
+public:
+ RuntimeDyldCheckerImpl(IsSymbolValidFunction IsSymbolValid,
+ GetSymbolInfoFunction GetSymbolInfo,
+ GetSectionInfoFunction GetSectionInfo,
+ GetStubInfoFunction GetStubInfo,
+ GetGOTInfoFunction GetGOTInfo,
+ llvm::endianness Endianness, Triple TT, StringRef CPU,
+ SubtargetFeatures TF, llvm::raw_ostream &ErrStream);
+
+ bool check(StringRef CheckExpr) const;
+ bool checkAllRulesInBuffer(StringRef RulePrefix, MemoryBuffer *MemBuf) const;
+
+private:
+
+ // StubMap typedefs.
+
+ Expected<JITSymbolResolver::LookupResult>
+ lookup(const JITSymbolResolver::LookupSet &Symbols) const;
+
+ bool isSymbolValid(StringRef Symbol) const;
+ uint64_t getSymbolLocalAddr(StringRef Symbol) const;
+ uint64_t getSymbolRemoteAddr(StringRef Symbol) const;
+ uint64_t readMemoryAtAddr(uint64_t Addr, unsigned Size) const;
+
+ StringRef getSymbolContent(StringRef Symbol) const;
+
+ TargetFlagsType getTargetFlag(StringRef Symbol) const;
+ Triple getTripleForSymbol(TargetFlagsType Flag) const;
+ StringRef getCPU() const { return CPU; }
+ SubtargetFeatures getFeatures() const { return TF; }
+
+ std::pair<uint64_t, std::string> getSectionAddr(StringRef FileName,
+ StringRef SectionName,
+ bool IsInsideLoad) const;
+
+ std::pair<uint64_t, std::string>
+ getStubOrGOTAddrFor(StringRef StubContainerName, StringRef Symbol,
+ StringRef StubKindFilter, bool IsInsideLoad,
+ bool IsStubAddr) const;
+
+ std::optional<uint64_t> getSectionLoadAddress(void *LocalAddr) const;
+
+ IsSymbolValidFunction IsSymbolValid;
+ GetSymbolInfoFunction GetSymbolInfo;
+ GetSectionInfoFunction GetSectionInfo;
+ GetStubInfoFunction GetStubInfo;
+ GetGOTInfoFunction GetGOTInfo;
+ llvm::endianness Endianness;
+ Triple TT;
+ std::string CPU;
+ SubtargetFeatures TF;
+ llvm::raw_ostream &ErrStream;
+};
+}
+
+#endif
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.cpp
new file mode 100644
index 000000000000..736d9a3e056f
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.cpp
@@ -0,0 +1,2572 @@
+//===-- RuntimeDyldELF.cpp - Run-time dynamic linker for MC-JIT -*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Implementation of ELF support for the MC-JIT runtime dynamic linker.
+//
+//===----------------------------------------------------------------------===//
+
+#include "RuntimeDyldELF.h"
+#include "RuntimeDyldCheckerImpl.h"
+#include "Targets/RuntimeDyldELFMips.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/BinaryFormat/ELF.h"
+#include "llvm/Object/ELFObjectFile.h"
+#include "llvm/Object/ObjectFile.h"
+#include "llvm/Support/Endian.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/TargetParser/Triple.h"
+
+using namespace llvm;
+using namespace llvm::object;
+using namespace llvm::support::endian;
+
+#define DEBUG_TYPE "dyld"
+
+static void or32le(void *P, int32_t V) { write32le(P, read32le(P) | V); }
+
+static void or32AArch64Imm(void *L, uint64_t Imm) {
+ or32le(L, (Imm & 0xFFF) << 10);
+}
+
+template <class T> static void write(bool isBE, void *P, T V) {
+ isBE ? write<T, llvm::endianness::big>(P, V)
+ : write<T, llvm::endianness::little>(P, V);
+}
+
+static void write32AArch64Addr(void *L, uint64_t Imm) {
+ uint32_t ImmLo = (Imm & 0x3) << 29;
+ uint32_t ImmHi = (Imm & 0x1FFFFC) << 3;
+ uint64_t Mask = (0x3 << 29) | (0x1FFFFC << 3);
+ write32le(L, (read32le(L) & ~Mask) | ImmLo | ImmHi);
+}
+
+// Return the bits [Start, End] from Val shifted Start bits.
+// For instance, getBits(0xF0, 4, 8) returns 0xF.
+static uint64_t getBits(uint64_t Val, int Start, int End) {
+ uint64_t Mask = ((uint64_t)1 << (End + 1 - Start)) - 1;
+ return (Val >> Start) & Mask;
+}
+
+namespace {
+
+template <class ELFT> class DyldELFObject : public ELFObjectFile<ELFT> {
+ LLVM_ELF_IMPORT_TYPES_ELFT(ELFT)
+
+ typedef typename ELFT::uint addr_type;
+
+ DyldELFObject(ELFObjectFile<ELFT> &&Obj);
+
+public:
+ static Expected<std::unique_ptr<DyldELFObject>>
+ create(MemoryBufferRef Wrapper);
+
+ void updateSectionAddress(const SectionRef &Sec, uint64_t Addr);
+
+ void updateSymbolAddress(const SymbolRef &SymRef, uint64_t Addr);
+
+ // Methods for type inquiry through isa, cast and dyn_cast
+ static bool classof(const Binary *v) {
+ return (isa<ELFObjectFile<ELFT>>(v) &&
+ classof(cast<ELFObjectFile<ELFT>>(v)));
+ }
+ static bool classof(const ELFObjectFile<ELFT> *v) {
+ return v->isDyldType();
+ }
+};
+
+
+
+// The MemoryBuffer passed into this constructor is just a wrapper around the
+// actual memory. Ultimately, the Binary parent class will take ownership of
+// this MemoryBuffer object but not the underlying memory.
+template <class ELFT>
+DyldELFObject<ELFT>::DyldELFObject(ELFObjectFile<ELFT> &&Obj)
+ : ELFObjectFile<ELFT>(std::move(Obj)) {
+ this->isDyldELFObject = true;
+}
+
+template <class ELFT>
+Expected<std::unique_ptr<DyldELFObject<ELFT>>>
+DyldELFObject<ELFT>::create(MemoryBufferRef Wrapper) {
+ auto Obj = ELFObjectFile<ELFT>::create(Wrapper);
+ if (auto E = Obj.takeError())
+ return std::move(E);
+ std::unique_ptr<DyldELFObject<ELFT>> Ret(
+ new DyldELFObject<ELFT>(std::move(*Obj)));
+ return std::move(Ret);
+}
+
+template <class ELFT>
+void DyldELFObject<ELFT>::updateSectionAddress(const SectionRef &Sec,
+ uint64_t Addr) {
+ DataRefImpl ShdrRef = Sec.getRawDataRefImpl();
+ Elf_Shdr *shdr =
+ const_cast<Elf_Shdr *>(reinterpret_cast<const Elf_Shdr *>(ShdrRef.p));
+
+ // This assumes the address passed in matches the target address bitness
+ // The template-based type cast handles everything else.
+ shdr->sh_addr = static_cast<addr_type>(Addr);
+}
+
+template <class ELFT>
+void DyldELFObject<ELFT>::updateSymbolAddress(const SymbolRef &SymRef,
+ uint64_t Addr) {
+
+ Elf_Sym *sym = const_cast<Elf_Sym *>(
+ ELFObjectFile<ELFT>::getSymbol(SymRef.getRawDataRefImpl()));
+
+ // This assumes the address passed in matches the target address bitness
+ // The template-based type cast handles everything else.
+ sym->st_value = static_cast<addr_type>(Addr);
+}
+
+class LoadedELFObjectInfo final
+ : public LoadedObjectInfoHelper<LoadedELFObjectInfo,
+ RuntimeDyld::LoadedObjectInfo> {
+public:
+ LoadedELFObjectInfo(RuntimeDyldImpl &RTDyld, ObjSectionToIDMap ObjSecToIDMap)
+ : LoadedObjectInfoHelper(RTDyld, std::move(ObjSecToIDMap)) {}
+
+ OwningBinary<ObjectFile>
+ getObjectForDebug(const ObjectFile &Obj) const override;
+};
+
+template <typename ELFT>
+static Expected<std::unique_ptr<DyldELFObject<ELFT>>>
+createRTDyldELFObject(MemoryBufferRef Buffer, const ObjectFile &SourceObject,
+ const LoadedELFObjectInfo &L) {
+ typedef typename ELFT::Shdr Elf_Shdr;
+ typedef typename ELFT::uint addr_type;
+
+ Expected<std::unique_ptr<DyldELFObject<ELFT>>> ObjOrErr =
+ DyldELFObject<ELFT>::create(Buffer);
+ if (Error E = ObjOrErr.takeError())
+ return std::move(E);
+
+ std::unique_ptr<DyldELFObject<ELFT>> Obj = std::move(*ObjOrErr);
+
+ // Iterate over all sections in the object.
+ auto SI = SourceObject.section_begin();
+ for (const auto &Sec : Obj->sections()) {
+ Expected<StringRef> NameOrErr = Sec.getName();
+ if (!NameOrErr) {
+ consumeError(NameOrErr.takeError());
+ continue;
+ }
+
+ if (*NameOrErr != "") {
+ DataRefImpl ShdrRef = Sec.getRawDataRefImpl();
+ Elf_Shdr *shdr = const_cast<Elf_Shdr *>(
+ reinterpret_cast<const Elf_Shdr *>(ShdrRef.p));
+
+ if (uint64_t SecLoadAddr = L.getSectionLoadAddress(*SI)) {
+ // This assumes that the address passed in matches the target address
+ // bitness. The template-based type cast handles everything else.
+ shdr->sh_addr = static_cast<addr_type>(SecLoadAddr);
+ }
+ }
+ ++SI;
+ }
+
+ return std::move(Obj);
+}
+
+static OwningBinary<ObjectFile>
+createELFDebugObject(const ObjectFile &Obj, const LoadedELFObjectInfo &L) {
+ assert(Obj.isELF() && "Not an ELF object file.");
+
+ std::unique_ptr<MemoryBuffer> Buffer =
+ MemoryBuffer::getMemBufferCopy(Obj.getData(), Obj.getFileName());
+
+ Expected<std::unique_ptr<ObjectFile>> DebugObj(nullptr);
+ handleAllErrors(DebugObj.takeError());
+ if (Obj.getBytesInAddress() == 4 && Obj.isLittleEndian())
+ DebugObj =
+ createRTDyldELFObject<ELF32LE>(Buffer->getMemBufferRef(), Obj, L);
+ else if (Obj.getBytesInAddress() == 4 && !Obj.isLittleEndian())
+ DebugObj =
+ createRTDyldELFObject<ELF32BE>(Buffer->getMemBufferRef(), Obj, L);
+ else if (Obj.getBytesInAddress() == 8 && !Obj.isLittleEndian())
+ DebugObj =
+ createRTDyldELFObject<ELF64BE>(Buffer->getMemBufferRef(), Obj, L);
+ else if (Obj.getBytesInAddress() == 8 && Obj.isLittleEndian())
+ DebugObj =
+ createRTDyldELFObject<ELF64LE>(Buffer->getMemBufferRef(), Obj, L);
+ else
+ llvm_unreachable("Unexpected ELF format");
+
+ handleAllErrors(DebugObj.takeError());
+ return OwningBinary<ObjectFile>(std::move(*DebugObj), std::move(Buffer));
+}
+
+OwningBinary<ObjectFile>
+LoadedELFObjectInfo::getObjectForDebug(const ObjectFile &Obj) const {
+ return createELFDebugObject(Obj, *this);
+}
+
+} // anonymous namespace
+
+namespace llvm {
+
+RuntimeDyldELF::RuntimeDyldELF(RuntimeDyld::MemoryManager &MemMgr,
+ JITSymbolResolver &Resolver)
+ : RuntimeDyldImpl(MemMgr, Resolver), GOTSectionID(0), CurrentGOTIndex(0) {}
+RuntimeDyldELF::~RuntimeDyldELF() = default;
+
+void RuntimeDyldELF::registerEHFrames() {
+ for (SID EHFrameSID : UnregisteredEHFrameSections) {
+ uint8_t *EHFrameAddr = Sections[EHFrameSID].getAddress();
+ uint64_t EHFrameLoadAddr = Sections[EHFrameSID].getLoadAddress();
+ size_t EHFrameSize = Sections[EHFrameSID].getSize();
+ MemMgr.registerEHFrames(EHFrameAddr, EHFrameLoadAddr, EHFrameSize);
+ }
+ UnregisteredEHFrameSections.clear();
+}
+
+std::unique_ptr<RuntimeDyldELF>
+llvm::RuntimeDyldELF::create(Triple::ArchType Arch,
+ RuntimeDyld::MemoryManager &MemMgr,
+ JITSymbolResolver &Resolver) {
+ switch (Arch) {
+ default:
+ return std::make_unique<RuntimeDyldELF>(MemMgr, Resolver);
+ case Triple::mips:
+ case Triple::mipsel:
+ case Triple::mips64:
+ case Triple::mips64el:
+ return std::make_unique<RuntimeDyldELFMips>(MemMgr, Resolver);
+ }
+}
+
+std::unique_ptr<RuntimeDyld::LoadedObjectInfo>
+RuntimeDyldELF::loadObject(const object::ObjectFile &O) {
+ if (auto ObjSectionToIDOrErr = loadObjectImpl(O))
+ return std::make_unique<LoadedELFObjectInfo>(*this, *ObjSectionToIDOrErr);
+ else {
+ HasError = true;
+ raw_string_ostream ErrStream(ErrorStr);
+ logAllUnhandledErrors(ObjSectionToIDOrErr.takeError(), ErrStream);
+ return nullptr;
+ }
+}
+
+void RuntimeDyldELF::resolveX86_64Relocation(const SectionEntry &Section,
+ uint64_t Offset, uint64_t Value,
+ uint32_t Type, int64_t Addend,
+ uint64_t SymOffset) {
+ switch (Type) {
+ default:
+ report_fatal_error("Relocation type not implemented yet!");
+ break;
+ case ELF::R_X86_64_NONE:
+ break;
+ case ELF::R_X86_64_8: {
+ Value += Addend;
+ assert((int64_t)Value <= INT8_MAX && (int64_t)Value >= INT8_MIN);
+ uint8_t TruncatedAddr = (Value & 0xFF);
+ *Section.getAddressWithOffset(Offset) = TruncatedAddr;
+ LLVM_DEBUG(dbgs() << "Writing " << format("%p", TruncatedAddr) << " at "
+ << format("%p\n", Section.getAddressWithOffset(Offset)));
+ break;
+ }
+ case ELF::R_X86_64_16: {
+ Value += Addend;
+ assert((int64_t)Value <= INT16_MAX && (int64_t)Value >= INT16_MIN);
+ uint16_t TruncatedAddr = (Value & 0xFFFF);
+ support::ulittle16_t::ref(Section.getAddressWithOffset(Offset)) =
+ TruncatedAddr;
+ LLVM_DEBUG(dbgs() << "Writing " << format("%p", TruncatedAddr) << " at "
+ << format("%p\n", Section.getAddressWithOffset(Offset)));
+ break;
+ }
+ case ELF::R_X86_64_64: {
+ support::ulittle64_t::ref(Section.getAddressWithOffset(Offset)) =
+ Value + Addend;
+ LLVM_DEBUG(dbgs() << "Writing " << format("%p", (Value + Addend)) << " at "
+ << format("%p\n", Section.getAddressWithOffset(Offset)));
+ break;
+ }
+ case ELF::R_X86_64_32:
+ case ELF::R_X86_64_32S: {
+ Value += Addend;
+ assert((Type == ELF::R_X86_64_32 && (Value <= UINT32_MAX)) ||
+ (Type == ELF::R_X86_64_32S &&
+ ((int64_t)Value <= INT32_MAX && (int64_t)Value >= INT32_MIN)));
+ uint32_t TruncatedAddr = (Value & 0xFFFFFFFF);
+ support::ulittle32_t::ref(Section.getAddressWithOffset(Offset)) =
+ TruncatedAddr;
+ LLVM_DEBUG(dbgs() << "Writing " << format("%p", TruncatedAddr) << " at "
+ << format("%p\n", Section.getAddressWithOffset(Offset)));
+ break;
+ }
+ case ELF::R_X86_64_PC8: {
+ uint64_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
+ int64_t RealOffset = Value + Addend - FinalAddress;
+ assert(isInt<8>(RealOffset));
+ int8_t TruncOffset = (RealOffset & 0xFF);
+ Section.getAddress()[Offset] = TruncOffset;
+ break;
+ }
+ case ELF::R_X86_64_PC32: {
+ uint64_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
+ int64_t RealOffset = Value + Addend - FinalAddress;
+ assert(isInt<32>(RealOffset));
+ int32_t TruncOffset = (RealOffset & 0xFFFFFFFF);
+ support::ulittle32_t::ref(Section.getAddressWithOffset(Offset)) =
+ TruncOffset;
+ break;
+ }
+ case ELF::R_X86_64_PC64: {
+ uint64_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
+ int64_t RealOffset = Value + Addend - FinalAddress;
+ support::ulittle64_t::ref(Section.getAddressWithOffset(Offset)) =
+ RealOffset;
+ LLVM_DEBUG(dbgs() << "Writing " << format("%p", RealOffset) << " at "
+ << format("%p\n", FinalAddress));
+ break;
+ }
+ case ELF::R_X86_64_GOTOFF64: {
+ // Compute Value - GOTBase.
+ uint64_t GOTBase = 0;
+ for (const auto &Section : Sections) {
+ if (Section.getName() == ".got") {
+ GOTBase = Section.getLoadAddressWithOffset(0);
+ break;
+ }
+ }
+ assert(GOTBase != 0 && "missing GOT");
+ int64_t GOTOffset = Value - GOTBase + Addend;
+ support::ulittle64_t::ref(Section.getAddressWithOffset(Offset)) = GOTOffset;
+ break;
+ }
+ case ELF::R_X86_64_DTPMOD64: {
+ // We only have one DSO, so the module id is always 1.
+ support::ulittle64_t::ref(Section.getAddressWithOffset(Offset)) = 1;
+ break;
+ }
+ case ELF::R_X86_64_DTPOFF64:
+ case ELF::R_X86_64_TPOFF64: {
+ // DTPOFF64 should resolve to the offset in the TLS block, TPOFF64 to the
+ // offset in the *initial* TLS block. Since we are statically linking, all
+ // TLS blocks already exist in the initial block, so resolve both
+ // relocations equally.
+ support::ulittle64_t::ref(Section.getAddressWithOffset(Offset)) =
+ Value + Addend;
+ break;
+ }
+ case ELF::R_X86_64_DTPOFF32:
+ case ELF::R_X86_64_TPOFF32: {
+ // As for the (D)TPOFF64 relocations above, both DTPOFF32 and TPOFF32 can
+ // be resolved equally.
+ int64_t RealValue = Value + Addend;
+ assert(RealValue >= INT32_MIN && RealValue <= INT32_MAX);
+ int32_t TruncValue = RealValue;
+ support::ulittle32_t::ref(Section.getAddressWithOffset(Offset)) =
+ TruncValue;
+ break;
+ }
+ }
+}
+
+void RuntimeDyldELF::resolveX86Relocation(const SectionEntry &Section,
+ uint64_t Offset, uint32_t Value,
+ uint32_t Type, int32_t Addend) {
+ switch (Type) {
+ case ELF::R_386_32: {
+ support::ulittle32_t::ref(Section.getAddressWithOffset(Offset)) =
+ Value + Addend;
+ break;
+ }
+ // Handle R_386_PLT32 like R_386_PC32 since it should be able to
+ // reach any 32 bit address.
+ case ELF::R_386_PLT32:
+ case ELF::R_386_PC32: {
+ uint32_t FinalAddress =
+ Section.getLoadAddressWithOffset(Offset) & 0xFFFFFFFF;
+ uint32_t RealOffset = Value + Addend - FinalAddress;
+ support::ulittle32_t::ref(Section.getAddressWithOffset(Offset)) =
+ RealOffset;
+ break;
+ }
+ default:
+ // There are other relocation types, but it appears these are the
+ // only ones currently used by the LLVM ELF object writer
+ report_fatal_error("Relocation type not implemented yet!");
+ break;
+ }
+}
+
+void RuntimeDyldELF::resolveAArch64Relocation(const SectionEntry &Section,
+ uint64_t Offset, uint64_t Value,
+ uint32_t Type, int64_t Addend) {
+ uint32_t *TargetPtr =
+ reinterpret_cast<uint32_t *>(Section.getAddressWithOffset(Offset));
+ uint64_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
+ // Data should use target endian. Code should always use little endian.
+ bool isBE = Arch == Triple::aarch64_be;
+
+ LLVM_DEBUG(dbgs() << "resolveAArch64Relocation, LocalAddress: 0x"
+ << format("%llx", Section.getAddressWithOffset(Offset))
+ << " FinalAddress: 0x" << format("%llx", FinalAddress)
+ << " Value: 0x" << format("%llx", Value) << " Type: 0x"
+ << format("%x", Type) << " Addend: 0x"
+ << format("%llx", Addend) << "\n");
+
+ switch (Type) {
+ default:
+ report_fatal_error("Relocation type not implemented yet!");
+ break;
+ case ELF::R_AARCH64_NONE:
+ break;
+ case ELF::R_AARCH64_ABS16: {
+ uint64_t Result = Value + Addend;
+ assert(Result == static_cast<uint64_t>(llvm::SignExtend64(Result, 16)) ||
+ (Result >> 16) == 0);
+ write(isBE, TargetPtr, static_cast<uint16_t>(Result & 0xffffU));
+ break;
+ }
+ case ELF::R_AARCH64_ABS32: {
+ uint64_t Result = Value + Addend;
+ assert(Result == static_cast<uint64_t>(llvm::SignExtend64(Result, 32)) ||
+ (Result >> 32) == 0);
+ write(isBE, TargetPtr, static_cast<uint32_t>(Result & 0xffffffffU));
+ break;
+ }
+ case ELF::R_AARCH64_ABS64:
+ write(isBE, TargetPtr, Value + Addend);
+ break;
+ case ELF::R_AARCH64_PLT32: {
+ uint64_t Result = Value + Addend - FinalAddress;
+ assert(static_cast<int64_t>(Result) >= INT32_MIN &&
+ static_cast<int64_t>(Result) <= INT32_MAX);
+ write(isBE, TargetPtr, static_cast<uint32_t>(Result));
+ break;
+ }
+ case ELF::R_AARCH64_PREL16: {
+ uint64_t Result = Value + Addend - FinalAddress;
+ assert(static_cast<int64_t>(Result) >= INT16_MIN &&
+ static_cast<int64_t>(Result) <= UINT16_MAX);
+ write(isBE, TargetPtr, static_cast<uint16_t>(Result & 0xffffU));
+ break;
+ }
+ case ELF::R_AARCH64_PREL32: {
+ uint64_t Result = Value + Addend - FinalAddress;
+ assert(static_cast<int64_t>(Result) >= INT32_MIN &&
+ static_cast<int64_t>(Result) <= UINT32_MAX);
+ write(isBE, TargetPtr, static_cast<uint32_t>(Result & 0xffffffffU));
+ break;
+ }
+ case ELF::R_AARCH64_PREL64:
+ write(isBE, TargetPtr, Value + Addend - FinalAddress);
+ break;
+ case ELF::R_AARCH64_CONDBR19: {
+ uint64_t BranchImm = Value + Addend - FinalAddress;
+
+ assert(isInt<21>(BranchImm));
+ *TargetPtr &= 0xff00001fU;
+ // Immediate:20:2 goes in bits 23:5 of Bcc, CBZ, CBNZ
+ or32le(TargetPtr, (BranchImm & 0x001FFFFC) << 3);
+ break;
+ }
+ case ELF::R_AARCH64_TSTBR14: {
+ uint64_t BranchImm = Value + Addend - FinalAddress;
+
+ assert(isInt<16>(BranchImm));
+
+ uint32_t RawInstr = *(support::little32_t *)TargetPtr;
+ *(support::little32_t *)TargetPtr = RawInstr & 0xfff8001fU;
+
+ // Immediate:15:2 goes in bits 18:5 of TBZ, TBNZ
+ or32le(TargetPtr, (BranchImm & 0x0000FFFC) << 3);
+ break;
+ }
+ case ELF::R_AARCH64_CALL26: // fallthrough
+ case ELF::R_AARCH64_JUMP26: {
+ // Operation: S+A-P. Set Call or B immediate value to bits fff_fffc of the
+ // calculation.
+ uint64_t BranchImm = Value + Addend - FinalAddress;
+
+ // "Check that -2^27 <= result < 2^27".
+ assert(isInt<28>(BranchImm));
+ or32le(TargetPtr, (BranchImm & 0x0FFFFFFC) >> 2);
+ break;
+ }
+ case ELF::R_AARCH64_MOVW_UABS_G3:
+ or32le(TargetPtr, ((Value + Addend) & 0xFFFF000000000000) >> 43);
+ break;
+ case ELF::R_AARCH64_MOVW_UABS_G2_NC:
+ or32le(TargetPtr, ((Value + Addend) & 0xFFFF00000000) >> 27);
+ break;
+ case ELF::R_AARCH64_MOVW_UABS_G1_NC:
+ or32le(TargetPtr, ((Value + Addend) & 0xFFFF0000) >> 11);
+ break;
+ case ELF::R_AARCH64_MOVW_UABS_G0_NC:
+ or32le(TargetPtr, ((Value + Addend) & 0xFFFF) << 5);
+ break;
+ case ELF::R_AARCH64_ADR_PREL_PG_HI21: {
+ // Operation: Page(S+A) - Page(P)
+ uint64_t Result =
+ ((Value + Addend) & ~0xfffULL) - (FinalAddress & ~0xfffULL);
+
+ // Check that -2^32 <= X < 2^32
+ assert(isInt<33>(Result) && "overflow check failed for relocation");
+
+ // Immediate goes in bits 30:29 + 5:23 of ADRP instruction, taken
+ // from bits 32:12 of X.
+ write32AArch64Addr(TargetPtr, Result >> 12);
+ break;
+ }
+ case ELF::R_AARCH64_ADD_ABS_LO12_NC:
+ // Operation: S + A
+ // Immediate goes in bits 21:10 of LD/ST instruction, taken
+ // from bits 11:0 of X
+ or32AArch64Imm(TargetPtr, Value + Addend);
+ break;
+ case ELF::R_AARCH64_LDST8_ABS_LO12_NC:
+ // Operation: S + A
+ // Immediate goes in bits 21:10 of LD/ST instruction, taken
+ // from bits 11:0 of X
+ or32AArch64Imm(TargetPtr, getBits(Value + Addend, 0, 11));
+ break;
+ case ELF::R_AARCH64_LDST16_ABS_LO12_NC:
+ // Operation: S + A
+ // Immediate goes in bits 21:10 of LD/ST instruction, taken
+ // from bits 11:1 of X
+ or32AArch64Imm(TargetPtr, getBits(Value + Addend, 1, 11));
+ break;
+ case ELF::R_AARCH64_LDST32_ABS_LO12_NC:
+ // Operation: S + A
+ // Immediate goes in bits 21:10 of LD/ST instruction, taken
+ // from bits 11:2 of X
+ or32AArch64Imm(TargetPtr, getBits(Value + Addend, 2, 11));
+ break;
+ case ELF::R_AARCH64_LDST64_ABS_LO12_NC:
+ // Operation: S + A
+ // Immediate goes in bits 21:10 of LD/ST instruction, taken
+ // from bits 11:3 of X
+ or32AArch64Imm(TargetPtr, getBits(Value + Addend, 3, 11));
+ break;
+ case ELF::R_AARCH64_LDST128_ABS_LO12_NC:
+ // Operation: S + A
+ // Immediate goes in bits 21:10 of LD/ST instruction, taken
+ // from bits 11:4 of X
+ or32AArch64Imm(TargetPtr, getBits(Value + Addend, 4, 11));
+ break;
+ case ELF::R_AARCH64_LD_PREL_LO19: {
+ // Operation: S + A - P
+ uint64_t Result = Value + Addend - FinalAddress;
+
+ // "Check that -2^20 <= result < 2^20".
+ assert(isInt<21>(Result));
+
+ *TargetPtr &= 0xff00001fU;
+ // Immediate goes in bits 23:5 of LD imm instruction, taken
+ // from bits 20:2 of X
+ *TargetPtr |= ((Result & 0xffc) << (5 - 2));
+ break;
+ }
+ case ELF::R_AARCH64_ADR_PREL_LO21: {
+ // Operation: S + A - P
+ uint64_t Result = Value + Addend - FinalAddress;
+
+ // "Check that -2^20 <= result < 2^20".
+ assert(isInt<21>(Result));
+
+ *TargetPtr &= 0x9f00001fU;
+ // Immediate goes in bits 23:5, 30:29 of ADR imm instruction, taken
+ // from bits 20:0 of X
+ *TargetPtr |= ((Result & 0xffc) << (5 - 2));
+ *TargetPtr |= (Result & 0x3) << 29;
+ break;
+ }
+ }
+}
+
+void RuntimeDyldELF::resolveARMRelocation(const SectionEntry &Section,
+ uint64_t Offset, uint32_t Value,
+ uint32_t Type, int32_t Addend) {
+ // TODO: Add Thumb relocations.
+ uint32_t *TargetPtr =
+ reinterpret_cast<uint32_t *>(Section.getAddressWithOffset(Offset));
+ uint32_t FinalAddress = Section.getLoadAddressWithOffset(Offset) & 0xFFFFFFFF;
+ Value += Addend;
+
+ LLVM_DEBUG(dbgs() << "resolveARMRelocation, LocalAddress: "
+ << Section.getAddressWithOffset(Offset)
+ << " FinalAddress: " << format("%p", FinalAddress)
+ << " Value: " << format("%x", Value)
+ << " Type: " << format("%x", Type)
+ << " Addend: " << format("%x", Addend) << "\n");
+
+ switch (Type) {
+ default:
+ llvm_unreachable("Not implemented relocation type!");
+
+ case ELF::R_ARM_NONE:
+ break;
+ // Write a 31bit signed offset
+ case ELF::R_ARM_PREL31:
+ support::ulittle32_t::ref{TargetPtr} =
+ (support::ulittle32_t::ref{TargetPtr} & 0x80000000) |
+ ((Value - FinalAddress) & ~0x80000000);
+ break;
+ case ELF::R_ARM_TARGET1:
+ case ELF::R_ARM_ABS32:
+ support::ulittle32_t::ref{TargetPtr} = Value;
+ break;
+ // Write first 16 bit of 32 bit value to the mov instruction.
+ // Last 4 bit should be shifted.
+ case ELF::R_ARM_MOVW_ABS_NC:
+ case ELF::R_ARM_MOVT_ABS:
+ if (Type == ELF::R_ARM_MOVW_ABS_NC)
+ Value = Value & 0xFFFF;
+ else if (Type == ELF::R_ARM_MOVT_ABS)
+ Value = (Value >> 16) & 0xFFFF;
+ support::ulittle32_t::ref{TargetPtr} =
+ (support::ulittle32_t::ref{TargetPtr} & ~0x000F0FFF) | (Value & 0xFFF) |
+ (((Value >> 12) & 0xF) << 16);
+ break;
+ // Write 24 bit relative value to the branch instruction.
+ case ELF::R_ARM_PC24: // Fall through.
+ case ELF::R_ARM_CALL: // Fall through.
+ case ELF::R_ARM_JUMP24:
+ int32_t RelValue = static_cast<int32_t>(Value - FinalAddress - 8);
+ RelValue = (RelValue & 0x03FFFFFC) >> 2;
+ assert((support::ulittle32_t::ref{TargetPtr} & 0xFFFFFF) == 0xFFFFFE);
+ support::ulittle32_t::ref{TargetPtr} =
+ (support::ulittle32_t::ref{TargetPtr} & 0xFF000000) | RelValue;
+ break;
+ }
+}
+
+void RuntimeDyldELF::setMipsABI(const ObjectFile &Obj) {
+ if (Arch == Triple::UnknownArch ||
+ Triple::getArchTypePrefix(Arch) != "mips") {
+ IsMipsO32ABI = false;
+ IsMipsN32ABI = false;
+ IsMipsN64ABI = false;
+ return;
+ }
+ if (auto *E = dyn_cast<ELFObjectFileBase>(&Obj)) {
+ unsigned AbiVariant = E->getPlatformFlags();
+ IsMipsO32ABI = AbiVariant & ELF::EF_MIPS_ABI_O32;
+ IsMipsN32ABI = AbiVariant & ELF::EF_MIPS_ABI2;
+ }
+ IsMipsN64ABI = Obj.getFileFormatName() == "elf64-mips";
+}
+
+// Return the .TOC. section and offset.
+Error RuntimeDyldELF::findPPC64TOCSection(const ELFObjectFileBase &Obj,
+ ObjSectionToIDMap &LocalSections,
+ RelocationValueRef &Rel) {
+ // Set a default SectionID in case we do not find a TOC section below.
+ // This may happen for references to TOC base base (sym@toc, .odp
+ // relocation) without a .toc directive. In this case just use the
+ // first section (which is usually the .odp) since the code won't
+ // reference the .toc base directly.
+ Rel.SymbolName = nullptr;
+ Rel.SectionID = 0;
+
+ // The TOC consists of sections .got, .toc, .tocbss, .plt in that
+ // order. The TOC starts where the first of these sections starts.
+ for (auto &Section : Obj.sections()) {
+ Expected<StringRef> NameOrErr = Section.getName();
+ if (!NameOrErr)
+ return NameOrErr.takeError();
+ StringRef SectionName = *NameOrErr;
+
+ if (SectionName == ".got"
+ || SectionName == ".toc"
+ || SectionName == ".tocbss"
+ || SectionName == ".plt") {
+ if (auto SectionIDOrErr =
+ findOrEmitSection(Obj, Section, false, LocalSections))
+ Rel.SectionID = *SectionIDOrErr;
+ else
+ return SectionIDOrErr.takeError();
+ break;
+ }
+ }
+
+ // Per the ppc64-elf-linux ABI, The TOC base is TOC value plus 0x8000
+ // thus permitting a full 64 Kbytes segment.
+ Rel.Addend = 0x8000;
+
+ return Error::success();
+}
+
+// Returns the sections and offset associated with the ODP entry referenced
+// by Symbol.
+Error RuntimeDyldELF::findOPDEntrySection(const ELFObjectFileBase &Obj,
+ ObjSectionToIDMap &LocalSections,
+ RelocationValueRef &Rel) {
+ // Get the ELF symbol value (st_value) to compare with Relocation offset in
+ // .opd entries
+ for (section_iterator si = Obj.section_begin(), se = Obj.section_end();
+ si != se; ++si) {
+
+ Expected<section_iterator> RelSecOrErr = si->getRelocatedSection();
+ if (!RelSecOrErr)
+ report_fatal_error(Twine(toString(RelSecOrErr.takeError())));
+
+ section_iterator RelSecI = *RelSecOrErr;
+ if (RelSecI == Obj.section_end())
+ continue;
+
+ Expected<StringRef> NameOrErr = RelSecI->getName();
+ if (!NameOrErr)
+ return NameOrErr.takeError();
+ StringRef RelSectionName = *NameOrErr;
+
+ if (RelSectionName != ".opd")
+ continue;
+
+ for (elf_relocation_iterator i = si->relocation_begin(),
+ e = si->relocation_end();
+ i != e;) {
+ // The R_PPC64_ADDR64 relocation indicates the first field
+ // of a .opd entry
+ uint64_t TypeFunc = i->getType();
+ if (TypeFunc != ELF::R_PPC64_ADDR64) {
+ ++i;
+ continue;
+ }
+
+ uint64_t TargetSymbolOffset = i->getOffset();
+ symbol_iterator TargetSymbol = i->getSymbol();
+ int64_t Addend;
+ if (auto AddendOrErr = i->getAddend())
+ Addend = *AddendOrErr;
+ else
+ return AddendOrErr.takeError();
+
+ ++i;
+ if (i == e)
+ break;
+
+ // Just check if following relocation is a R_PPC64_TOC
+ uint64_t TypeTOC = i->getType();
+ if (TypeTOC != ELF::R_PPC64_TOC)
+ continue;
+
+ // Finally compares the Symbol value and the target symbol offset
+ // to check if this .opd entry refers to the symbol the relocation
+ // points to.
+ if (Rel.Addend != (int64_t)TargetSymbolOffset)
+ continue;
+
+ section_iterator TSI = Obj.section_end();
+ if (auto TSIOrErr = TargetSymbol->getSection())
+ TSI = *TSIOrErr;
+ else
+ return TSIOrErr.takeError();
+ assert(TSI != Obj.section_end() && "TSI should refer to a valid section");
+
+ bool IsCode = TSI->isText();
+ if (auto SectionIDOrErr = findOrEmitSection(Obj, *TSI, IsCode,
+ LocalSections))
+ Rel.SectionID = *SectionIDOrErr;
+ else
+ return SectionIDOrErr.takeError();
+ Rel.Addend = (intptr_t)Addend;
+ return Error::success();
+ }
+ }
+ llvm_unreachable("Attempting to get address of ODP entry!");
+}
+
+// Relocation masks following the #lo(value), #hi(value), #ha(value),
+// #higher(value), #highera(value), #highest(value), and #highesta(value)
+// macros defined in section 4.5.1. Relocation Types of the PPC-elf64abi
+// document.
+
+static inline uint16_t applyPPClo(uint64_t value) { return value & 0xffff; }
+
+static inline uint16_t applyPPChi(uint64_t value) {
+ return (value >> 16) & 0xffff;
+}
+
+static inline uint16_t applyPPCha (uint64_t value) {
+ return ((value + 0x8000) >> 16) & 0xffff;
+}
+
+static inline uint16_t applyPPChigher(uint64_t value) {
+ return (value >> 32) & 0xffff;
+}
+
+static inline uint16_t applyPPChighera (uint64_t value) {
+ return ((value + 0x8000) >> 32) & 0xffff;
+}
+
+static inline uint16_t applyPPChighest(uint64_t value) {
+ return (value >> 48) & 0xffff;
+}
+
+static inline uint16_t applyPPChighesta (uint64_t value) {
+ return ((value + 0x8000) >> 48) & 0xffff;
+}
+
+void RuntimeDyldELF::resolvePPC32Relocation(const SectionEntry &Section,
+ uint64_t Offset, uint64_t Value,
+ uint32_t Type, int64_t Addend) {
+ uint8_t *LocalAddress = Section.getAddressWithOffset(Offset);
+ switch (Type) {
+ default:
+ report_fatal_error("Relocation type not implemented yet!");
+ break;
+ case ELF::R_PPC_ADDR16_LO:
+ writeInt16BE(LocalAddress, applyPPClo(Value + Addend));
+ break;
+ case ELF::R_PPC_ADDR16_HI:
+ writeInt16BE(LocalAddress, applyPPChi(Value + Addend));
+ break;
+ case ELF::R_PPC_ADDR16_HA:
+ writeInt16BE(LocalAddress, applyPPCha(Value + Addend));
+ break;
+ }
+}
+
+void RuntimeDyldELF::resolvePPC64Relocation(const SectionEntry &Section,
+ uint64_t Offset, uint64_t Value,
+ uint32_t Type, int64_t Addend) {
+ uint8_t *LocalAddress = Section.getAddressWithOffset(Offset);
+ switch (Type) {
+ default:
+ report_fatal_error("Relocation type not implemented yet!");
+ break;
+ case ELF::R_PPC64_ADDR16:
+ writeInt16BE(LocalAddress, applyPPClo(Value + Addend));
+ break;
+ case ELF::R_PPC64_ADDR16_DS:
+ writeInt16BE(LocalAddress, applyPPClo(Value + Addend) & ~3);
+ break;
+ case ELF::R_PPC64_ADDR16_LO:
+ writeInt16BE(LocalAddress, applyPPClo(Value + Addend));
+ break;
+ case ELF::R_PPC64_ADDR16_LO_DS:
+ writeInt16BE(LocalAddress, applyPPClo(Value + Addend) & ~3);
+ break;
+ case ELF::R_PPC64_ADDR16_HI:
+ case ELF::R_PPC64_ADDR16_HIGH:
+ writeInt16BE(LocalAddress, applyPPChi(Value + Addend));
+ break;
+ case ELF::R_PPC64_ADDR16_HA:
+ case ELF::R_PPC64_ADDR16_HIGHA:
+ writeInt16BE(LocalAddress, applyPPCha(Value + Addend));
+ break;
+ case ELF::R_PPC64_ADDR16_HIGHER:
+ writeInt16BE(LocalAddress, applyPPChigher(Value + Addend));
+ break;
+ case ELF::R_PPC64_ADDR16_HIGHERA:
+ writeInt16BE(LocalAddress, applyPPChighera(Value + Addend));
+ break;
+ case ELF::R_PPC64_ADDR16_HIGHEST:
+ writeInt16BE(LocalAddress, applyPPChighest(Value + Addend));
+ break;
+ case ELF::R_PPC64_ADDR16_HIGHESTA:
+ writeInt16BE(LocalAddress, applyPPChighesta(Value + Addend));
+ break;
+ case ELF::R_PPC64_ADDR14: {
+ assert(((Value + Addend) & 3) == 0);
+ // Preserve the AA/LK bits in the branch instruction
+ uint8_t aalk = *(LocalAddress + 3);
+ writeInt16BE(LocalAddress + 2, (aalk & 3) | ((Value + Addend) & 0xfffc));
+ } break;
+ case ELF::R_PPC64_REL16_LO: {
+ uint64_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
+ uint64_t Delta = Value - FinalAddress + Addend;
+ writeInt16BE(LocalAddress, applyPPClo(Delta));
+ } break;
+ case ELF::R_PPC64_REL16_HI: {
+ uint64_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
+ uint64_t Delta = Value - FinalAddress + Addend;
+ writeInt16BE(LocalAddress, applyPPChi(Delta));
+ } break;
+ case ELF::R_PPC64_REL16_HA: {
+ uint64_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
+ uint64_t Delta = Value - FinalAddress + Addend;
+ writeInt16BE(LocalAddress, applyPPCha(Delta));
+ } break;
+ case ELF::R_PPC64_ADDR32: {
+ int64_t Result = static_cast<int64_t>(Value + Addend);
+ if (SignExtend64<32>(Result) != Result)
+ llvm_unreachable("Relocation R_PPC64_ADDR32 overflow");
+ writeInt32BE(LocalAddress, Result);
+ } break;
+ case ELF::R_PPC64_REL24: {
+ uint64_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
+ int64_t delta = static_cast<int64_t>(Value - FinalAddress + Addend);
+ if (SignExtend64<26>(delta) != delta)
+ llvm_unreachable("Relocation R_PPC64_REL24 overflow");
+ // We preserve bits other than LI field, i.e. PO and AA/LK fields.
+ uint32_t Inst = readBytesUnaligned(LocalAddress, 4);
+ writeInt32BE(LocalAddress, (Inst & 0xFC000003) | (delta & 0x03FFFFFC));
+ } break;
+ case ELF::R_PPC64_REL32: {
+ uint64_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
+ int64_t delta = static_cast<int64_t>(Value - FinalAddress + Addend);
+ if (SignExtend64<32>(delta) != delta)
+ llvm_unreachable("Relocation R_PPC64_REL32 overflow");
+ writeInt32BE(LocalAddress, delta);
+ } break;
+ case ELF::R_PPC64_REL64: {
+ uint64_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
+ uint64_t Delta = Value - FinalAddress + Addend;
+ writeInt64BE(LocalAddress, Delta);
+ } break;
+ case ELF::R_PPC64_ADDR64:
+ writeInt64BE(LocalAddress, Value + Addend);
+ break;
+ }
+}
+
+void RuntimeDyldELF::resolveSystemZRelocation(const SectionEntry &Section,
+ uint64_t Offset, uint64_t Value,
+ uint32_t Type, int64_t Addend) {
+ uint8_t *LocalAddress = Section.getAddressWithOffset(Offset);
+ switch (Type) {
+ default:
+ report_fatal_error("Relocation type not implemented yet!");
+ break;
+ case ELF::R_390_PC16DBL:
+ case ELF::R_390_PLT16DBL: {
+ int64_t Delta = (Value + Addend) - Section.getLoadAddressWithOffset(Offset);
+ assert(int16_t(Delta / 2) * 2 == Delta && "R_390_PC16DBL overflow");
+ writeInt16BE(LocalAddress, Delta / 2);
+ break;
+ }
+ case ELF::R_390_PC32DBL:
+ case ELF::R_390_PLT32DBL: {
+ int64_t Delta = (Value + Addend) - Section.getLoadAddressWithOffset(Offset);
+ assert(int32_t(Delta / 2) * 2 == Delta && "R_390_PC32DBL overflow");
+ writeInt32BE(LocalAddress, Delta / 2);
+ break;
+ }
+ case ELF::R_390_PC16: {
+ int64_t Delta = (Value + Addend) - Section.getLoadAddressWithOffset(Offset);
+ assert(int16_t(Delta) == Delta && "R_390_PC16 overflow");
+ writeInt16BE(LocalAddress, Delta);
+ break;
+ }
+ case ELF::R_390_PC32: {
+ int64_t Delta = (Value + Addend) - Section.getLoadAddressWithOffset(Offset);
+ assert(int32_t(Delta) == Delta && "R_390_PC32 overflow");
+ writeInt32BE(LocalAddress, Delta);
+ break;
+ }
+ case ELF::R_390_PC64: {
+ int64_t Delta = (Value + Addend) - Section.getLoadAddressWithOffset(Offset);
+ writeInt64BE(LocalAddress, Delta);
+ break;
+ }
+ case ELF::R_390_8:
+ *LocalAddress = (uint8_t)(Value + Addend);
+ break;
+ case ELF::R_390_16:
+ writeInt16BE(LocalAddress, Value + Addend);
+ break;
+ case ELF::R_390_32:
+ writeInt32BE(LocalAddress, Value + Addend);
+ break;
+ case ELF::R_390_64:
+ writeInt64BE(LocalAddress, Value + Addend);
+ break;
+ }
+}
+
+void RuntimeDyldELF::resolveBPFRelocation(const SectionEntry &Section,
+ uint64_t Offset, uint64_t Value,
+ uint32_t Type, int64_t Addend) {
+ bool isBE = Arch == Triple::bpfeb;
+
+ switch (Type) {
+ default:
+ report_fatal_error("Relocation type not implemented yet!");
+ break;
+ case ELF::R_BPF_NONE:
+ case ELF::R_BPF_64_64:
+ case ELF::R_BPF_64_32:
+ case ELF::R_BPF_64_NODYLD32:
+ break;
+ case ELF::R_BPF_64_ABS64: {
+ write(isBE, Section.getAddressWithOffset(Offset), Value + Addend);
+ LLVM_DEBUG(dbgs() << "Writing " << format("%p", (Value + Addend)) << " at "
+ << format("%p\n", Section.getAddressWithOffset(Offset)));
+ break;
+ }
+ case ELF::R_BPF_64_ABS32: {
+ Value += Addend;
+ assert(Value <= UINT32_MAX);
+ write(isBE, Section.getAddressWithOffset(Offset), static_cast<uint32_t>(Value));
+ LLVM_DEBUG(dbgs() << "Writing " << format("%p", Value) << " at "
+ << format("%p\n", Section.getAddressWithOffset(Offset)));
+ break;
+ }
+ }
+}
+
+// The target location for the relocation is described by RE.SectionID and
+// RE.Offset. RE.SectionID can be used to find the SectionEntry. Each
+// SectionEntry has three members describing its location.
+// SectionEntry::Address is the address at which the section has been loaded
+// into memory in the current (host) process. SectionEntry::LoadAddress is the
+// address that the section will have in the target process.
+// SectionEntry::ObjAddress is the address of the bits for this section in the
+// original emitted object image (also in the current address space).
+//
+// Relocations will be applied as if the section were loaded at
+// SectionEntry::LoadAddress, but they will be applied at an address based
+// on SectionEntry::Address. SectionEntry::ObjAddress will be used to refer to
+// Target memory contents if they are required for value calculations.
+//
+// The Value parameter here is the load address of the symbol for the
+// relocation to be applied. For relocations which refer to symbols in the
+// current object Value will be the LoadAddress of the section in which
+// the symbol resides (RE.Addend provides additional information about the
+// symbol location). For external symbols, Value will be the address of the
+// symbol in the target address space.
+void RuntimeDyldELF::resolveRelocation(const RelocationEntry &RE,
+ uint64_t Value) {
+ const SectionEntry &Section = Sections[RE.SectionID];
+ return resolveRelocation(Section, RE.Offset, Value, RE.RelType, RE.Addend,
+ RE.SymOffset, RE.SectionID);
+}
+
+void RuntimeDyldELF::resolveRelocation(const SectionEntry &Section,
+ uint64_t Offset, uint64_t Value,
+ uint32_t Type, int64_t Addend,
+ uint64_t SymOffset, SID SectionID) {
+ switch (Arch) {
+ case Triple::x86_64:
+ resolveX86_64Relocation(Section, Offset, Value, Type, Addend, SymOffset);
+ break;
+ case Triple::x86:
+ resolveX86Relocation(Section, Offset, (uint32_t)(Value & 0xffffffffL), Type,
+ (uint32_t)(Addend & 0xffffffffL));
+ break;
+ case Triple::aarch64:
+ case Triple::aarch64_be:
+ resolveAArch64Relocation(Section, Offset, Value, Type, Addend);
+ break;
+ case Triple::arm: // Fall through.
+ case Triple::armeb:
+ case Triple::thumb:
+ case Triple::thumbeb:
+ resolveARMRelocation(Section, Offset, (uint32_t)(Value & 0xffffffffL), Type,
+ (uint32_t)(Addend & 0xffffffffL));
+ break;
+ case Triple::ppc: // Fall through.
+ case Triple::ppcle:
+ resolvePPC32Relocation(Section, Offset, Value, Type, Addend);
+ break;
+ case Triple::ppc64: // Fall through.
+ case Triple::ppc64le:
+ resolvePPC64Relocation(Section, Offset, Value, Type, Addend);
+ break;
+ case Triple::systemz:
+ resolveSystemZRelocation(Section, Offset, Value, Type, Addend);
+ break;
+ case Triple::bpfel:
+ case Triple::bpfeb:
+ resolveBPFRelocation(Section, Offset, Value, Type, Addend);
+ break;
+ default:
+ llvm_unreachable("Unsupported CPU type!");
+ }
+}
+
+void *RuntimeDyldELF::computePlaceholderAddress(unsigned SectionID, uint64_t Offset) const {
+ return (void *)(Sections[SectionID].getObjAddress() + Offset);
+}
+
+void RuntimeDyldELF::processSimpleRelocation(unsigned SectionID, uint64_t Offset, unsigned RelType, RelocationValueRef Value) {
+ RelocationEntry RE(SectionID, Offset, RelType, Value.Addend, Value.Offset);
+ if (Value.SymbolName)
+ addRelocationForSymbol(RE, Value.SymbolName);
+ else
+ addRelocationForSection(RE, Value.SectionID);
+}
+
+uint32_t RuntimeDyldELF::getMatchingLoRelocation(uint32_t RelType,
+ bool IsLocal) const {
+ switch (RelType) {
+ case ELF::R_MICROMIPS_GOT16:
+ if (IsLocal)
+ return ELF::R_MICROMIPS_LO16;
+ break;
+ case ELF::R_MICROMIPS_HI16:
+ return ELF::R_MICROMIPS_LO16;
+ case ELF::R_MIPS_GOT16:
+ if (IsLocal)
+ return ELF::R_MIPS_LO16;
+ break;
+ case ELF::R_MIPS_HI16:
+ return ELF::R_MIPS_LO16;
+ case ELF::R_MIPS_PCHI16:
+ return ELF::R_MIPS_PCLO16;
+ default:
+ break;
+ }
+ return ELF::R_MIPS_NONE;
+}
+
+// Sometimes we don't need to create thunk for a branch.
+// This typically happens when branch target is located
+// in the same object file. In such case target is either
+// a weak symbol or symbol in a different executable section.
+// This function checks if branch target is located in the
+// same object file and if distance between source and target
+// fits R_AARCH64_CALL26 relocation. If both conditions are
+// met, it emits direct jump to the target and returns true.
+// Otherwise false is returned and thunk is created.
+bool RuntimeDyldELF::resolveAArch64ShortBranch(
+ unsigned SectionID, relocation_iterator RelI,
+ const RelocationValueRef &Value) {
+ uint64_t TargetOffset;
+ unsigned TargetSectionID;
+ if (Value.SymbolName) {
+ auto Loc = GlobalSymbolTable.find(Value.SymbolName);
+
+ // Don't create direct branch for external symbols.
+ if (Loc == GlobalSymbolTable.end())
+ return false;
+
+ const auto &SymInfo = Loc->second;
+
+ TargetSectionID = SymInfo.getSectionID();
+ TargetOffset = SymInfo.getOffset();
+ } else {
+ TargetSectionID = Value.SectionID;
+ TargetOffset = 0;
+ }
+
+ // We don't actually know the load addresses at this point, so if the
+ // branch is cross-section, we don't know exactly how far away it is.
+ if (TargetSectionID != SectionID)
+ return false;
+
+ uint64_t SourceOffset = RelI->getOffset();
+
+ // R_AARCH64_CALL26 requires immediate to be in range -2^27 <= imm < 2^27
+ // If distance between source and target is out of range then we should
+ // create thunk.
+ if (!isInt<28>(TargetOffset + Value.Addend - SourceOffset))
+ return false;
+
+ RelocationEntry RE(SectionID, SourceOffset, RelI->getType(), Value.Addend);
+ if (Value.SymbolName)
+ addRelocationForSymbol(RE, Value.SymbolName);
+ else
+ addRelocationForSection(RE, Value.SectionID);
+
+ return true;
+}
+
+void RuntimeDyldELF::resolveAArch64Branch(unsigned SectionID,
+ const RelocationValueRef &Value,
+ relocation_iterator RelI,
+ StubMap &Stubs) {
+
+ LLVM_DEBUG(dbgs() << "\t\tThis is an AArch64 branch relocation.");
+ SectionEntry &Section = Sections[SectionID];
+
+ uint64_t Offset = RelI->getOffset();
+ unsigned RelType = RelI->getType();
+ // Look for an existing stub.
+ StubMap::const_iterator i = Stubs.find(Value);
+ if (i != Stubs.end()) {
+ resolveRelocation(Section, Offset,
+ Section.getLoadAddressWithOffset(i->second), RelType, 0);
+ LLVM_DEBUG(dbgs() << " Stub function found\n");
+ } else if (!resolveAArch64ShortBranch(SectionID, RelI, Value)) {
+ // Create a new stub function.
+ LLVM_DEBUG(dbgs() << " Create a new stub function\n");
+ Stubs[Value] = Section.getStubOffset();
+ uint8_t *StubTargetAddr = createStubFunction(
+ Section.getAddressWithOffset(Section.getStubOffset()));
+
+ RelocationEntry REmovz_g3(SectionID, StubTargetAddr - Section.getAddress(),
+ ELF::R_AARCH64_MOVW_UABS_G3, Value.Addend);
+ RelocationEntry REmovk_g2(SectionID,
+ StubTargetAddr - Section.getAddress() + 4,
+ ELF::R_AARCH64_MOVW_UABS_G2_NC, Value.Addend);
+ RelocationEntry REmovk_g1(SectionID,
+ StubTargetAddr - Section.getAddress() + 8,
+ ELF::R_AARCH64_MOVW_UABS_G1_NC, Value.Addend);
+ RelocationEntry REmovk_g0(SectionID,
+ StubTargetAddr - Section.getAddress() + 12,
+ ELF::R_AARCH64_MOVW_UABS_G0_NC, Value.Addend);
+
+ if (Value.SymbolName) {
+ addRelocationForSymbol(REmovz_g3, Value.SymbolName);
+ addRelocationForSymbol(REmovk_g2, Value.SymbolName);
+ addRelocationForSymbol(REmovk_g1, Value.SymbolName);
+ addRelocationForSymbol(REmovk_g0, Value.SymbolName);
+ } else {
+ addRelocationForSection(REmovz_g3, Value.SectionID);
+ addRelocationForSection(REmovk_g2, Value.SectionID);
+ addRelocationForSection(REmovk_g1, Value.SectionID);
+ addRelocationForSection(REmovk_g0, Value.SectionID);
+ }
+ resolveRelocation(Section, Offset,
+ Section.getLoadAddressWithOffset(Section.getStubOffset()),
+ RelType, 0);
+ Section.advanceStubOffset(getMaxStubSize());
+ }
+}
+
+Expected<relocation_iterator>
+RuntimeDyldELF::processRelocationRef(
+ unsigned SectionID, relocation_iterator RelI, const ObjectFile &O,
+ ObjSectionToIDMap &ObjSectionToID, StubMap &Stubs) {
+ const auto &Obj = cast<ELFObjectFileBase>(O);
+ uint64_t RelType = RelI->getType();
+ int64_t Addend = 0;
+ if (Expected<int64_t> AddendOrErr = ELFRelocationRef(*RelI).getAddend())
+ Addend = *AddendOrErr;
+ else
+ consumeError(AddendOrErr.takeError());
+ elf_symbol_iterator Symbol = RelI->getSymbol();
+
+ // Obtain the symbol name which is referenced in the relocation
+ StringRef TargetName;
+ if (Symbol != Obj.symbol_end()) {
+ if (auto TargetNameOrErr = Symbol->getName())
+ TargetName = *TargetNameOrErr;
+ else
+ return TargetNameOrErr.takeError();
+ }
+ LLVM_DEBUG(dbgs() << "\t\tRelType: " << RelType << " Addend: " << Addend
+ << " TargetName: " << TargetName << "\n");
+ RelocationValueRef Value;
+ // First search for the symbol in the local symbol table
+ SymbolRef::Type SymType = SymbolRef::ST_Unknown;
+
+ // Search for the symbol in the global symbol table
+ RTDyldSymbolTable::const_iterator gsi = GlobalSymbolTable.end();
+ if (Symbol != Obj.symbol_end()) {
+ gsi = GlobalSymbolTable.find(TargetName.data());
+ Expected<SymbolRef::Type> SymTypeOrErr = Symbol->getType();
+ if (!SymTypeOrErr) {
+ std::string Buf;
+ raw_string_ostream OS(Buf);
+ logAllUnhandledErrors(SymTypeOrErr.takeError(), OS);
+ report_fatal_error(Twine(OS.str()));
+ }
+ SymType = *SymTypeOrErr;
+ }
+ if (gsi != GlobalSymbolTable.end()) {
+ const auto &SymInfo = gsi->second;
+ Value.SectionID = SymInfo.getSectionID();
+ Value.Offset = SymInfo.getOffset();
+ Value.Addend = SymInfo.getOffset() + Addend;
+ } else {
+ switch (SymType) {
+ case SymbolRef::ST_Debug: {
+ // TODO: Now ELF SymbolRef::ST_Debug = STT_SECTION, it's not obviously
+ // and can be changed by another developers. Maybe best way is add
+ // a new symbol type ST_Section to SymbolRef and use it.
+ auto SectionOrErr = Symbol->getSection();
+ if (!SectionOrErr) {
+ std::string Buf;
+ raw_string_ostream OS(Buf);
+ logAllUnhandledErrors(SectionOrErr.takeError(), OS);
+ report_fatal_error(Twine(OS.str()));
+ }
+ section_iterator si = *SectionOrErr;
+ if (si == Obj.section_end())
+ llvm_unreachable("Symbol section not found, bad object file format!");
+ LLVM_DEBUG(dbgs() << "\t\tThis is section symbol\n");
+ bool isCode = si->isText();
+ if (auto SectionIDOrErr = findOrEmitSection(Obj, (*si), isCode,
+ ObjSectionToID))
+ Value.SectionID = *SectionIDOrErr;
+ else
+ return SectionIDOrErr.takeError();
+ Value.Addend = Addend;
+ break;
+ }
+ case SymbolRef::ST_Data:
+ case SymbolRef::ST_Function:
+ case SymbolRef::ST_Other:
+ case SymbolRef::ST_Unknown: {
+ Value.SymbolName = TargetName.data();
+ Value.Addend = Addend;
+
+ // Absolute relocations will have a zero symbol ID (STN_UNDEF), which
+ // will manifest here as a NULL symbol name.
+ // We can set this as a valid (but empty) symbol name, and rely
+ // on addRelocationForSymbol to handle this.
+ if (!Value.SymbolName)
+ Value.SymbolName = "";
+ break;
+ }
+ default:
+ llvm_unreachable("Unresolved symbol type!");
+ break;
+ }
+ }
+
+ uint64_t Offset = RelI->getOffset();
+
+ LLVM_DEBUG(dbgs() << "\t\tSectionID: " << SectionID << " Offset: " << Offset
+ << "\n");
+ if ((Arch == Triple::aarch64 || Arch == Triple::aarch64_be)) {
+ if ((RelType == ELF::R_AARCH64_CALL26 ||
+ RelType == ELF::R_AARCH64_JUMP26) &&
+ MemMgr.allowStubAllocation()) {
+ resolveAArch64Branch(SectionID, Value, RelI, Stubs);
+ } else if (RelType == ELF::R_AARCH64_ADR_GOT_PAGE) {
+ // Create new GOT entry or find existing one. If GOT entry is
+ // to be created, then we also emit ABS64 relocation for it.
+ uint64_t GOTOffset = findOrAllocGOTEntry(Value, ELF::R_AARCH64_ABS64);
+ resolveGOTOffsetRelocation(SectionID, Offset, GOTOffset + Addend,
+ ELF::R_AARCH64_ADR_PREL_PG_HI21);
+
+ } else if (RelType == ELF::R_AARCH64_LD64_GOT_LO12_NC) {
+ uint64_t GOTOffset = findOrAllocGOTEntry(Value, ELF::R_AARCH64_ABS64);
+ resolveGOTOffsetRelocation(SectionID, Offset, GOTOffset + Addend,
+ ELF::R_AARCH64_LDST64_ABS_LO12_NC);
+ } else {
+ processSimpleRelocation(SectionID, Offset, RelType, Value);
+ }
+ } else if (Arch == Triple::arm) {
+ if (RelType == ELF::R_ARM_PC24 || RelType == ELF::R_ARM_CALL ||
+ RelType == ELF::R_ARM_JUMP24) {
+ // This is an ARM branch relocation, need to use a stub function.
+ LLVM_DEBUG(dbgs() << "\t\tThis is an ARM branch relocation.\n");
+ SectionEntry &Section = Sections[SectionID];
+
+ // Look for an existing stub.
+ StubMap::const_iterator i = Stubs.find(Value);
+ if (i != Stubs.end()) {
+ resolveRelocation(Section, Offset,
+ Section.getLoadAddressWithOffset(i->second), RelType,
+ 0);
+ LLVM_DEBUG(dbgs() << " Stub function found\n");
+ } else {
+ // Create a new stub function.
+ LLVM_DEBUG(dbgs() << " Create a new stub function\n");
+ Stubs[Value] = Section.getStubOffset();
+ uint8_t *StubTargetAddr = createStubFunction(
+ Section.getAddressWithOffset(Section.getStubOffset()));
+ RelocationEntry RE(SectionID, StubTargetAddr - Section.getAddress(),
+ ELF::R_ARM_ABS32, Value.Addend);
+ if (Value.SymbolName)
+ addRelocationForSymbol(RE, Value.SymbolName);
+ else
+ addRelocationForSection(RE, Value.SectionID);
+
+ resolveRelocation(
+ Section, Offset,
+ Section.getLoadAddressWithOffset(Section.getStubOffset()), RelType,
+ 0);
+ Section.advanceStubOffset(getMaxStubSize());
+ }
+ } else {
+ uint32_t *Placeholder =
+ reinterpret_cast<uint32_t*>(computePlaceholderAddress(SectionID, Offset));
+ if (RelType == ELF::R_ARM_PREL31 || RelType == ELF::R_ARM_TARGET1 ||
+ RelType == ELF::R_ARM_ABS32) {
+ Value.Addend += *Placeholder;
+ } else if (RelType == ELF::R_ARM_MOVW_ABS_NC || RelType == ELF::R_ARM_MOVT_ABS) {
+ // See ELF for ARM documentation
+ Value.Addend += (int16_t)((*Placeholder & 0xFFF) | (((*Placeholder >> 16) & 0xF) << 12));
+ }
+ processSimpleRelocation(SectionID, Offset, RelType, Value);
+ }
+ } else if (IsMipsO32ABI) {
+ uint8_t *Placeholder = reinterpret_cast<uint8_t *>(
+ computePlaceholderAddress(SectionID, Offset));
+ uint32_t Opcode = readBytesUnaligned(Placeholder, 4);
+ if (RelType == ELF::R_MIPS_26) {
+ // This is an Mips branch relocation, need to use a stub function.
+ LLVM_DEBUG(dbgs() << "\t\tThis is a Mips branch relocation.");
+ SectionEntry &Section = Sections[SectionID];
+
+ // Extract the addend from the instruction.
+ // We shift up by two since the Value will be down shifted again
+ // when applying the relocation.
+ uint32_t Addend = (Opcode & 0x03ffffff) << 2;
+
+ Value.Addend += Addend;
+
+ // Look up for existing stub.
+ StubMap::const_iterator i = Stubs.find(Value);
+ if (i != Stubs.end()) {
+ RelocationEntry RE(SectionID, Offset, RelType, i->second);
+ addRelocationForSection(RE, SectionID);
+ LLVM_DEBUG(dbgs() << " Stub function found\n");
+ } else {
+ // Create a new stub function.
+ LLVM_DEBUG(dbgs() << " Create a new stub function\n");
+ Stubs[Value] = Section.getStubOffset();
+
+ unsigned AbiVariant = Obj.getPlatformFlags();
+
+ uint8_t *StubTargetAddr = createStubFunction(
+ Section.getAddressWithOffset(Section.getStubOffset()), AbiVariant);
+
+ // Creating Hi and Lo relocations for the filled stub instructions.
+ RelocationEntry REHi(SectionID, StubTargetAddr - Section.getAddress(),
+ ELF::R_MIPS_HI16, Value.Addend);
+ RelocationEntry RELo(SectionID,
+ StubTargetAddr - Section.getAddress() + 4,
+ ELF::R_MIPS_LO16, Value.Addend);
+
+ if (Value.SymbolName) {
+ addRelocationForSymbol(REHi, Value.SymbolName);
+ addRelocationForSymbol(RELo, Value.SymbolName);
+ } else {
+ addRelocationForSection(REHi, Value.SectionID);
+ addRelocationForSection(RELo, Value.SectionID);
+ }
+
+ RelocationEntry RE(SectionID, Offset, RelType, Section.getStubOffset());
+ addRelocationForSection(RE, SectionID);
+ Section.advanceStubOffset(getMaxStubSize());
+ }
+ } else if (RelType == ELF::R_MIPS_HI16 || RelType == ELF::R_MIPS_PCHI16) {
+ int64_t Addend = (Opcode & 0x0000ffff) << 16;
+ RelocationEntry RE(SectionID, Offset, RelType, Addend);
+ PendingRelocs.push_back(std::make_pair(Value, RE));
+ } else if (RelType == ELF::R_MIPS_LO16 || RelType == ELF::R_MIPS_PCLO16) {
+ int64_t Addend = Value.Addend + SignExtend32<16>(Opcode & 0x0000ffff);
+ for (auto I = PendingRelocs.begin(); I != PendingRelocs.end();) {
+ const RelocationValueRef &MatchingValue = I->first;
+ RelocationEntry &Reloc = I->second;
+ if (MatchingValue == Value &&
+ RelType == getMatchingLoRelocation(Reloc.RelType) &&
+ SectionID == Reloc.SectionID) {
+ Reloc.Addend += Addend;
+ if (Value.SymbolName)
+ addRelocationForSymbol(Reloc, Value.SymbolName);
+ else
+ addRelocationForSection(Reloc, Value.SectionID);
+ I = PendingRelocs.erase(I);
+ } else
+ ++I;
+ }
+ RelocationEntry RE(SectionID, Offset, RelType, Addend);
+ if (Value.SymbolName)
+ addRelocationForSymbol(RE, Value.SymbolName);
+ else
+ addRelocationForSection(RE, Value.SectionID);
+ } else {
+ if (RelType == ELF::R_MIPS_32)
+ Value.Addend += Opcode;
+ else if (RelType == ELF::R_MIPS_PC16)
+ Value.Addend += SignExtend32<18>((Opcode & 0x0000ffff) << 2);
+ else if (RelType == ELF::R_MIPS_PC19_S2)
+ Value.Addend += SignExtend32<21>((Opcode & 0x0007ffff) << 2);
+ else if (RelType == ELF::R_MIPS_PC21_S2)
+ Value.Addend += SignExtend32<23>((Opcode & 0x001fffff) << 2);
+ else if (RelType == ELF::R_MIPS_PC26_S2)
+ Value.Addend += SignExtend32<28>((Opcode & 0x03ffffff) << 2);
+ processSimpleRelocation(SectionID, Offset, RelType, Value);
+ }
+ } else if (IsMipsN32ABI || IsMipsN64ABI) {
+ uint32_t r_type = RelType & 0xff;
+ RelocationEntry RE(SectionID, Offset, RelType, Value.Addend);
+ if (r_type == ELF::R_MIPS_CALL16 || r_type == ELF::R_MIPS_GOT_PAGE
+ || r_type == ELF::R_MIPS_GOT_DISP) {
+ StringMap<uint64_t>::iterator i = GOTSymbolOffsets.find(TargetName);
+ if (i != GOTSymbolOffsets.end())
+ RE.SymOffset = i->second;
+ else {
+ RE.SymOffset = allocateGOTEntries(1);
+ GOTSymbolOffsets[TargetName] = RE.SymOffset;
+ }
+ if (Value.SymbolName)
+ addRelocationForSymbol(RE, Value.SymbolName);
+ else
+ addRelocationForSection(RE, Value.SectionID);
+ } else if (RelType == ELF::R_MIPS_26) {
+ // This is an Mips branch relocation, need to use a stub function.
+ LLVM_DEBUG(dbgs() << "\t\tThis is a Mips branch relocation.");
+ SectionEntry &Section = Sections[SectionID];
+
+ // Look up for existing stub.
+ StubMap::const_iterator i = Stubs.find(Value);
+ if (i != Stubs.end()) {
+ RelocationEntry RE(SectionID, Offset, RelType, i->second);
+ addRelocationForSection(RE, SectionID);
+ LLVM_DEBUG(dbgs() << " Stub function found\n");
+ } else {
+ // Create a new stub function.
+ LLVM_DEBUG(dbgs() << " Create a new stub function\n");
+ Stubs[Value] = Section.getStubOffset();
+
+ unsigned AbiVariant = Obj.getPlatformFlags();
+
+ uint8_t *StubTargetAddr = createStubFunction(
+ Section.getAddressWithOffset(Section.getStubOffset()), AbiVariant);
+
+ if (IsMipsN32ABI) {
+ // Creating Hi and Lo relocations for the filled stub instructions.
+ RelocationEntry REHi(SectionID, StubTargetAddr - Section.getAddress(),
+ ELF::R_MIPS_HI16, Value.Addend);
+ RelocationEntry RELo(SectionID,
+ StubTargetAddr - Section.getAddress() + 4,
+ ELF::R_MIPS_LO16, Value.Addend);
+ if (Value.SymbolName) {
+ addRelocationForSymbol(REHi, Value.SymbolName);
+ addRelocationForSymbol(RELo, Value.SymbolName);
+ } else {
+ addRelocationForSection(REHi, Value.SectionID);
+ addRelocationForSection(RELo, Value.SectionID);
+ }
+ } else {
+ // Creating Highest, Higher, Hi and Lo relocations for the filled stub
+ // instructions.
+ RelocationEntry REHighest(SectionID,
+ StubTargetAddr - Section.getAddress(),
+ ELF::R_MIPS_HIGHEST, Value.Addend);
+ RelocationEntry REHigher(SectionID,
+ StubTargetAddr - Section.getAddress() + 4,
+ ELF::R_MIPS_HIGHER, Value.Addend);
+ RelocationEntry REHi(SectionID,
+ StubTargetAddr - Section.getAddress() + 12,
+ ELF::R_MIPS_HI16, Value.Addend);
+ RelocationEntry RELo(SectionID,
+ StubTargetAddr - Section.getAddress() + 20,
+ ELF::R_MIPS_LO16, Value.Addend);
+ if (Value.SymbolName) {
+ addRelocationForSymbol(REHighest, Value.SymbolName);
+ addRelocationForSymbol(REHigher, Value.SymbolName);
+ addRelocationForSymbol(REHi, Value.SymbolName);
+ addRelocationForSymbol(RELo, Value.SymbolName);
+ } else {
+ addRelocationForSection(REHighest, Value.SectionID);
+ addRelocationForSection(REHigher, Value.SectionID);
+ addRelocationForSection(REHi, Value.SectionID);
+ addRelocationForSection(RELo, Value.SectionID);
+ }
+ }
+ RelocationEntry RE(SectionID, Offset, RelType, Section.getStubOffset());
+ addRelocationForSection(RE, SectionID);
+ Section.advanceStubOffset(getMaxStubSize());
+ }
+ } else {
+ processSimpleRelocation(SectionID, Offset, RelType, Value);
+ }
+
+ } else if (Arch == Triple::ppc64 || Arch == Triple::ppc64le) {
+ if (RelType == ELF::R_PPC64_REL24) {
+ // Determine ABI variant in use for this object.
+ unsigned AbiVariant = Obj.getPlatformFlags();
+ AbiVariant &= ELF::EF_PPC64_ABI;
+ // A PPC branch relocation will need a stub function if the target is
+ // an external symbol (either Value.SymbolName is set, or SymType is
+ // Symbol::ST_Unknown) or if the target address is not within the
+ // signed 24-bits branch address.
+ SectionEntry &Section = Sections[SectionID];
+ uint8_t *Target = Section.getAddressWithOffset(Offset);
+ bool RangeOverflow = false;
+ bool IsExtern = Value.SymbolName || SymType == SymbolRef::ST_Unknown;
+ if (!IsExtern) {
+ if (AbiVariant != 2) {
+ // In the ELFv1 ABI, a function call may point to the .opd entry,
+ // so the final symbol value is calculated based on the relocation
+ // values in the .opd section.
+ if (auto Err = findOPDEntrySection(Obj, ObjSectionToID, Value))
+ return std::move(Err);
+ } else {
+ // In the ELFv2 ABI, a function symbol may provide a local entry
+ // point, which must be used for direct calls.
+ if (Value.SectionID == SectionID){
+ uint8_t SymOther = Symbol->getOther();
+ Value.Addend += ELF::decodePPC64LocalEntryOffset(SymOther);
+ }
+ }
+ uint8_t *RelocTarget =
+ Sections[Value.SectionID].getAddressWithOffset(Value.Addend);
+ int64_t delta = static_cast<int64_t>(Target - RelocTarget);
+ // If it is within 26-bits branch range, just set the branch target
+ if (SignExtend64<26>(delta) != delta) {
+ RangeOverflow = true;
+ } else if ((AbiVariant != 2) ||
+ (AbiVariant == 2 && Value.SectionID == SectionID)) {
+ RelocationEntry RE(SectionID, Offset, RelType, Value.Addend);
+ addRelocationForSection(RE, Value.SectionID);
+ }
+ }
+ if (IsExtern || (AbiVariant == 2 && Value.SectionID != SectionID) ||
+ RangeOverflow) {
+ // It is an external symbol (either Value.SymbolName is set, or
+ // SymType is SymbolRef::ST_Unknown) or out of range.
+ StubMap::const_iterator i = Stubs.find(Value);
+ if (i != Stubs.end()) {
+ // Symbol function stub already created, just relocate to it
+ resolveRelocation(Section, Offset,
+ Section.getLoadAddressWithOffset(i->second),
+ RelType, 0);
+ LLVM_DEBUG(dbgs() << " Stub function found\n");
+ } else {
+ // Create a new stub function.
+ LLVM_DEBUG(dbgs() << " Create a new stub function\n");
+ Stubs[Value] = Section.getStubOffset();
+ uint8_t *StubTargetAddr = createStubFunction(
+ Section.getAddressWithOffset(Section.getStubOffset()),
+ AbiVariant);
+ RelocationEntry RE(SectionID, StubTargetAddr - Section.getAddress(),
+ ELF::R_PPC64_ADDR64, Value.Addend);
+
+ // Generates the 64-bits address loads as exemplified in section
+ // 4.5.1 in PPC64 ELF ABI. Note that the relocations need to
+ // apply to the low part of the instructions, so we have to update
+ // the offset according to the target endianness.
+ uint64_t StubRelocOffset = StubTargetAddr - Section.getAddress();
+ if (!IsTargetLittleEndian)
+ StubRelocOffset += 2;
+
+ RelocationEntry REhst(SectionID, StubRelocOffset + 0,
+ ELF::R_PPC64_ADDR16_HIGHEST, Value.Addend);
+ RelocationEntry REhr(SectionID, StubRelocOffset + 4,
+ ELF::R_PPC64_ADDR16_HIGHER, Value.Addend);
+ RelocationEntry REh(SectionID, StubRelocOffset + 12,
+ ELF::R_PPC64_ADDR16_HI, Value.Addend);
+ RelocationEntry REl(SectionID, StubRelocOffset + 16,
+ ELF::R_PPC64_ADDR16_LO, Value.Addend);
+
+ if (Value.SymbolName) {
+ addRelocationForSymbol(REhst, Value.SymbolName);
+ addRelocationForSymbol(REhr, Value.SymbolName);
+ addRelocationForSymbol(REh, Value.SymbolName);
+ addRelocationForSymbol(REl, Value.SymbolName);
+ } else {
+ addRelocationForSection(REhst, Value.SectionID);
+ addRelocationForSection(REhr, Value.SectionID);
+ addRelocationForSection(REh, Value.SectionID);
+ addRelocationForSection(REl, Value.SectionID);
+ }
+
+ resolveRelocation(
+ Section, Offset,
+ Section.getLoadAddressWithOffset(Section.getStubOffset()),
+ RelType, 0);
+ Section.advanceStubOffset(getMaxStubSize());
+ }
+ if (IsExtern || (AbiVariant == 2 && Value.SectionID != SectionID)) {
+ // Restore the TOC for external calls
+ if (AbiVariant == 2)
+ writeInt32BE(Target + 4, 0xE8410018); // ld r2,24(r1)
+ else
+ writeInt32BE(Target + 4, 0xE8410028); // ld r2,40(r1)
+ }
+ }
+ } else if (RelType == ELF::R_PPC64_TOC16 ||
+ RelType == ELF::R_PPC64_TOC16_DS ||
+ RelType == ELF::R_PPC64_TOC16_LO ||
+ RelType == ELF::R_PPC64_TOC16_LO_DS ||
+ RelType == ELF::R_PPC64_TOC16_HI ||
+ RelType == ELF::R_PPC64_TOC16_HA) {
+ // These relocations are supposed to subtract the TOC address from
+ // the final value. This does not fit cleanly into the RuntimeDyld
+ // scheme, since there may be *two* sections involved in determining
+ // the relocation value (the section of the symbol referred to by the
+ // relocation, and the TOC section associated with the current module).
+ //
+ // Fortunately, these relocations are currently only ever generated
+ // referring to symbols that themselves reside in the TOC, which means
+ // that the two sections are actually the same. Thus they cancel out
+ // and we can immediately resolve the relocation right now.
+ switch (RelType) {
+ case ELF::R_PPC64_TOC16: RelType = ELF::R_PPC64_ADDR16; break;
+ case ELF::R_PPC64_TOC16_DS: RelType = ELF::R_PPC64_ADDR16_DS; break;
+ case ELF::R_PPC64_TOC16_LO: RelType = ELF::R_PPC64_ADDR16_LO; break;
+ case ELF::R_PPC64_TOC16_LO_DS: RelType = ELF::R_PPC64_ADDR16_LO_DS; break;
+ case ELF::R_PPC64_TOC16_HI: RelType = ELF::R_PPC64_ADDR16_HI; break;
+ case ELF::R_PPC64_TOC16_HA: RelType = ELF::R_PPC64_ADDR16_HA; break;
+ default: llvm_unreachable("Wrong relocation type.");
+ }
+
+ RelocationValueRef TOCValue;
+ if (auto Err = findPPC64TOCSection(Obj, ObjSectionToID, TOCValue))
+ return std::move(Err);
+ if (Value.SymbolName || Value.SectionID != TOCValue.SectionID)
+ llvm_unreachable("Unsupported TOC relocation.");
+ Value.Addend -= TOCValue.Addend;
+ resolveRelocation(Sections[SectionID], Offset, Value.Addend, RelType, 0);
+ } else {
+ // There are two ways to refer to the TOC address directly: either
+ // via a ELF::R_PPC64_TOC relocation (where both symbol and addend are
+ // ignored), or via any relocation that refers to the magic ".TOC."
+ // symbols (in which case the addend is respected).
+ if (RelType == ELF::R_PPC64_TOC) {
+ RelType = ELF::R_PPC64_ADDR64;
+ if (auto Err = findPPC64TOCSection(Obj, ObjSectionToID, Value))
+ return std::move(Err);
+ } else if (TargetName == ".TOC.") {
+ if (auto Err = findPPC64TOCSection(Obj, ObjSectionToID, Value))
+ return std::move(Err);
+ Value.Addend += Addend;
+ }
+
+ RelocationEntry RE(SectionID, Offset, RelType, Value.Addend);
+
+ if (Value.SymbolName)
+ addRelocationForSymbol(RE, Value.SymbolName);
+ else
+ addRelocationForSection(RE, Value.SectionID);
+ }
+ } else if (Arch == Triple::systemz &&
+ (RelType == ELF::R_390_PLT32DBL || RelType == ELF::R_390_GOTENT)) {
+ // Create function stubs for both PLT and GOT references, regardless of
+ // whether the GOT reference is to data or code. The stub contains the
+ // full address of the symbol, as needed by GOT references, and the
+ // executable part only adds an overhead of 8 bytes.
+ //
+ // We could try to conserve space by allocating the code and data
+ // parts of the stub separately. However, as things stand, we allocate
+ // a stub for every relocation, so using a GOT in JIT code should be
+ // no less space efficient than using an explicit constant pool.
+ LLVM_DEBUG(dbgs() << "\t\tThis is a SystemZ indirect relocation.");
+ SectionEntry &Section = Sections[SectionID];
+
+ // Look for an existing stub.
+ StubMap::const_iterator i = Stubs.find(Value);
+ uintptr_t StubAddress;
+ if (i != Stubs.end()) {
+ StubAddress = uintptr_t(Section.getAddressWithOffset(i->second));
+ LLVM_DEBUG(dbgs() << " Stub function found\n");
+ } else {
+ // Create a new stub function.
+ LLVM_DEBUG(dbgs() << " Create a new stub function\n");
+
+ uintptr_t BaseAddress = uintptr_t(Section.getAddress());
+ StubAddress =
+ alignTo(BaseAddress + Section.getStubOffset(), getStubAlignment());
+ unsigned StubOffset = StubAddress - BaseAddress;
+
+ Stubs[Value] = StubOffset;
+ createStubFunction((uint8_t *)StubAddress);
+ RelocationEntry RE(SectionID, StubOffset + 8, ELF::R_390_64,
+ Value.Offset);
+ if (Value.SymbolName)
+ addRelocationForSymbol(RE, Value.SymbolName);
+ else
+ addRelocationForSection(RE, Value.SectionID);
+ Section.advanceStubOffset(getMaxStubSize());
+ }
+
+ if (RelType == ELF::R_390_GOTENT)
+ resolveRelocation(Section, Offset, StubAddress + 8, ELF::R_390_PC32DBL,
+ Addend);
+ else
+ resolveRelocation(Section, Offset, StubAddress, RelType, Addend);
+ } else if (Arch == Triple::x86_64) {
+ if (RelType == ELF::R_X86_64_PLT32) {
+ // The way the PLT relocations normally work is that the linker allocates
+ // the
+ // PLT and this relocation makes a PC-relative call into the PLT. The PLT
+ // entry will then jump to an address provided by the GOT. On first call,
+ // the
+ // GOT address will point back into PLT code that resolves the symbol. After
+ // the first call, the GOT entry points to the actual function.
+ //
+ // For local functions we're ignoring all of that here and just replacing
+ // the PLT32 relocation type with PC32, which will translate the relocation
+ // into a PC-relative call directly to the function. For external symbols we
+ // can't be sure the function will be within 2^32 bytes of the call site, so
+ // we need to create a stub, which calls into the GOT. This case is
+ // equivalent to the usual PLT implementation except that we use the stub
+ // mechanism in RuntimeDyld (which puts stubs at the end of the section)
+ // rather than allocating a PLT section.
+ if (Value.SymbolName && MemMgr.allowStubAllocation()) {
+ // This is a call to an external function.
+ // Look for an existing stub.
+ SectionEntry *Section = &Sections[SectionID];
+ StubMap::const_iterator i = Stubs.find(Value);
+ uintptr_t StubAddress;
+ if (i != Stubs.end()) {
+ StubAddress = uintptr_t(Section->getAddress()) + i->second;
+ LLVM_DEBUG(dbgs() << " Stub function found\n");
+ } else {
+ // Create a new stub function (equivalent to a PLT entry).
+ LLVM_DEBUG(dbgs() << " Create a new stub function\n");
+
+ uintptr_t BaseAddress = uintptr_t(Section->getAddress());
+ StubAddress = alignTo(BaseAddress + Section->getStubOffset(),
+ getStubAlignment());
+ unsigned StubOffset = StubAddress - BaseAddress;
+ Stubs[Value] = StubOffset;
+ createStubFunction((uint8_t *)StubAddress);
+
+ // Bump our stub offset counter
+ Section->advanceStubOffset(getMaxStubSize());
+
+ // Allocate a GOT Entry
+ uint64_t GOTOffset = allocateGOTEntries(1);
+ // This potentially creates a new Section which potentially
+ // invalidates the Section pointer, so reload it.
+ Section = &Sections[SectionID];
+
+ // The load of the GOT address has an addend of -4
+ resolveGOTOffsetRelocation(SectionID, StubOffset + 2, GOTOffset - 4,
+ ELF::R_X86_64_PC32);
+
+ // Fill in the value of the symbol we're targeting into the GOT
+ addRelocationForSymbol(
+ computeGOTOffsetRE(GOTOffset, 0, ELF::R_X86_64_64),
+ Value.SymbolName);
+ }
+
+ // Make the target call a call into the stub table.
+ resolveRelocation(*Section, Offset, StubAddress, ELF::R_X86_64_PC32,
+ Addend);
+ } else {
+ Value.Addend += support::ulittle32_t::ref(
+ computePlaceholderAddress(SectionID, Offset));
+ processSimpleRelocation(SectionID, Offset, ELF::R_X86_64_PC32, Value);
+ }
+ } else if (RelType == ELF::R_X86_64_GOTPCREL ||
+ RelType == ELF::R_X86_64_GOTPCRELX ||
+ RelType == ELF::R_X86_64_REX_GOTPCRELX) {
+ uint64_t GOTOffset = allocateGOTEntries(1);
+ resolveGOTOffsetRelocation(SectionID, Offset, GOTOffset + Addend,
+ ELF::R_X86_64_PC32);
+
+ // Fill in the value of the symbol we're targeting into the GOT
+ RelocationEntry RE =
+ computeGOTOffsetRE(GOTOffset, Value.Offset, ELF::R_X86_64_64);
+ if (Value.SymbolName)
+ addRelocationForSymbol(RE, Value.SymbolName);
+ else
+ addRelocationForSection(RE, Value.SectionID);
+ } else if (RelType == ELF::R_X86_64_GOT64) {
+ // Fill in a 64-bit GOT offset.
+ uint64_t GOTOffset = allocateGOTEntries(1);
+ resolveRelocation(Sections[SectionID], Offset, GOTOffset,
+ ELF::R_X86_64_64, 0);
+
+ // Fill in the value of the symbol we're targeting into the GOT
+ RelocationEntry RE =
+ computeGOTOffsetRE(GOTOffset, Value.Offset, ELF::R_X86_64_64);
+ if (Value.SymbolName)
+ addRelocationForSymbol(RE, Value.SymbolName);
+ else
+ addRelocationForSection(RE, Value.SectionID);
+ } else if (RelType == ELF::R_X86_64_GOTPC32) {
+ // Materialize the address of the base of the GOT relative to the PC.
+ // This doesn't create a GOT entry, but it does mean we need a GOT
+ // section.
+ (void)allocateGOTEntries(0);
+ resolveGOTOffsetRelocation(SectionID, Offset, Addend, ELF::R_X86_64_PC32);
+ } else if (RelType == ELF::R_X86_64_GOTPC64) {
+ (void)allocateGOTEntries(0);
+ resolveGOTOffsetRelocation(SectionID, Offset, Addend, ELF::R_X86_64_PC64);
+ } else if (RelType == ELF::R_X86_64_GOTOFF64) {
+ // GOTOFF relocations ultimately require a section difference relocation.
+ (void)allocateGOTEntries(0);
+ processSimpleRelocation(SectionID, Offset, RelType, Value);
+ } else if (RelType == ELF::R_X86_64_PC32) {
+ Value.Addend += support::ulittle32_t::ref(computePlaceholderAddress(SectionID, Offset));
+ processSimpleRelocation(SectionID, Offset, RelType, Value);
+ } else if (RelType == ELF::R_X86_64_PC64) {
+ Value.Addend += support::ulittle64_t::ref(computePlaceholderAddress(SectionID, Offset));
+ processSimpleRelocation(SectionID, Offset, RelType, Value);
+ } else if (RelType == ELF::R_X86_64_GOTTPOFF) {
+ processX86_64GOTTPOFFRelocation(SectionID, Offset, Value, Addend);
+ } else if (RelType == ELF::R_X86_64_TLSGD ||
+ RelType == ELF::R_X86_64_TLSLD) {
+ // The next relocation must be the relocation for __tls_get_addr.
+ ++RelI;
+ auto &GetAddrRelocation = *RelI;
+ processX86_64TLSRelocation(SectionID, Offset, RelType, Value, Addend,
+ GetAddrRelocation);
+ } else {
+ processSimpleRelocation(SectionID, Offset, RelType, Value);
+ }
+ } else {
+ if (Arch == Triple::x86) {
+ Value.Addend += support::ulittle32_t::ref(computePlaceholderAddress(SectionID, Offset));
+ }
+ processSimpleRelocation(SectionID, Offset, RelType, Value);
+ }
+ return ++RelI;
+}
+
+void RuntimeDyldELF::processX86_64GOTTPOFFRelocation(unsigned SectionID,
+ uint64_t Offset,
+ RelocationValueRef Value,
+ int64_t Addend) {
+ // Use the approach from "x86-64 Linker Optimizations" from the TLS spec
+ // to replace the GOTTPOFF relocation with a TPOFF relocation. The spec
+ // only mentions one optimization even though there are two different
+ // code sequences for the Initial Exec TLS Model. We match the code to
+ // find out which one was used.
+
+ // A possible TLS code sequence and its replacement
+ struct CodeSequence {
+ // The expected code sequence
+ ArrayRef<uint8_t> ExpectedCodeSequence;
+ // The negative offset of the GOTTPOFF relocation to the beginning of
+ // the sequence
+ uint64_t TLSSequenceOffset;
+ // The new code sequence
+ ArrayRef<uint8_t> NewCodeSequence;
+ // The offset of the new TPOFF relocation
+ uint64_t TpoffRelocationOffset;
+ };
+
+ std::array<CodeSequence, 2> CodeSequences;
+
+ // Initial Exec Code Model Sequence
+ {
+ static const std::initializer_list<uint8_t> ExpectedCodeSequenceList = {
+ 0x64, 0x48, 0x8b, 0x04, 0x25, 0x00, 0x00, 0x00,
+ 0x00, // mov %fs:0, %rax
+ 0x48, 0x03, 0x05, 0x00, 0x00, 0x00, 0x00 // add x@gotpoff(%rip),
+ // %rax
+ };
+ CodeSequences[0].ExpectedCodeSequence =
+ ArrayRef<uint8_t>(ExpectedCodeSequenceList);
+ CodeSequences[0].TLSSequenceOffset = 12;
+
+ static const std::initializer_list<uint8_t> NewCodeSequenceList = {
+ 0x64, 0x48, 0x8b, 0x04, 0x25, 0x00, 0x00, 0x00, 0x00, // mov %fs:0, %rax
+ 0x48, 0x8d, 0x80, 0x00, 0x00, 0x00, 0x00 // lea x@tpoff(%rax), %rax
+ };
+ CodeSequences[0].NewCodeSequence = ArrayRef<uint8_t>(NewCodeSequenceList);
+ CodeSequences[0].TpoffRelocationOffset = 12;
+ }
+
+ // Initial Exec Code Model Sequence, II
+ {
+ static const std::initializer_list<uint8_t> ExpectedCodeSequenceList = {
+ 0x48, 0x8b, 0x05, 0x00, 0x00, 0x00, 0x00, // mov x@gotpoff(%rip), %rax
+ 0x64, 0x48, 0x8b, 0x00, 0x00, 0x00, 0x00 // mov %fs:(%rax), %rax
+ };
+ CodeSequences[1].ExpectedCodeSequence =
+ ArrayRef<uint8_t>(ExpectedCodeSequenceList);
+ CodeSequences[1].TLSSequenceOffset = 3;
+
+ static const std::initializer_list<uint8_t> NewCodeSequenceList = {
+ 0x66, 0x0f, 0x1f, 0x44, 0x00, 0x00, // 6 byte nop
+ 0x64, 0x8b, 0x04, 0x25, 0x00, 0x00, 0x00, 0x00, // mov %fs:x@tpoff, %rax
+ };
+ CodeSequences[1].NewCodeSequence = ArrayRef<uint8_t>(NewCodeSequenceList);
+ CodeSequences[1].TpoffRelocationOffset = 10;
+ }
+
+ bool Resolved = false;
+ auto &Section = Sections[SectionID];
+ for (const auto &C : CodeSequences) {
+ assert(C.ExpectedCodeSequence.size() == C.NewCodeSequence.size() &&
+ "Old and new code sequences must have the same size");
+
+ if (Offset < C.TLSSequenceOffset ||
+ (Offset - C.TLSSequenceOffset + C.NewCodeSequence.size()) >
+ Section.getSize()) {
+ // This can't be a matching sequence as it doesn't fit in the current
+ // section
+ continue;
+ }
+
+ auto TLSSequenceStartOffset = Offset - C.TLSSequenceOffset;
+ auto *TLSSequence = Section.getAddressWithOffset(TLSSequenceStartOffset);
+ if (ArrayRef<uint8_t>(TLSSequence, C.ExpectedCodeSequence.size()) !=
+ C.ExpectedCodeSequence) {
+ continue;
+ }
+
+ memcpy(TLSSequence, C.NewCodeSequence.data(), C.NewCodeSequence.size());
+
+ // The original GOTTPOFF relocation has an addend as it is PC relative,
+ // so it needs to be corrected. The TPOFF32 relocation is used as an
+ // absolute value (which is an offset from %fs:0), so remove the addend
+ // again.
+ RelocationEntry RE(SectionID,
+ TLSSequenceStartOffset + C.TpoffRelocationOffset,
+ ELF::R_X86_64_TPOFF32, Value.Addend - Addend);
+
+ if (Value.SymbolName)
+ addRelocationForSymbol(RE, Value.SymbolName);
+ else
+ addRelocationForSection(RE, Value.SectionID);
+
+ Resolved = true;
+ break;
+ }
+
+ if (!Resolved) {
+ // The GOTTPOFF relocation was not used in one of the sequences
+ // described in the spec, so we can't optimize it to a TPOFF
+ // relocation.
+ uint64_t GOTOffset = allocateGOTEntries(1);
+ resolveGOTOffsetRelocation(SectionID, Offset, GOTOffset + Addend,
+ ELF::R_X86_64_PC32);
+ RelocationEntry RE =
+ computeGOTOffsetRE(GOTOffset, Value.Offset, ELF::R_X86_64_TPOFF64);
+ if (Value.SymbolName)
+ addRelocationForSymbol(RE, Value.SymbolName);
+ else
+ addRelocationForSection(RE, Value.SectionID);
+ }
+}
+
+void RuntimeDyldELF::processX86_64TLSRelocation(
+ unsigned SectionID, uint64_t Offset, uint64_t RelType,
+ RelocationValueRef Value, int64_t Addend,
+ const RelocationRef &GetAddrRelocation) {
+ // Since we are statically linking and have no additional DSOs, we can resolve
+ // the relocation directly without using __tls_get_addr.
+ // Use the approach from "x86-64 Linker Optimizations" from the TLS spec
+ // to replace it with the Local Exec relocation variant.
+
+ // Find out whether the code was compiled with the large or small memory
+ // model. For this we look at the next relocation which is the relocation
+ // for the __tls_get_addr function. If it's a 32 bit relocation, it's the
+ // small code model, with a 64 bit relocation it's the large code model.
+ bool IsSmallCodeModel;
+ // Is the relocation for the __tls_get_addr a PC-relative GOT relocation?
+ bool IsGOTPCRel = false;
+
+ switch (GetAddrRelocation.getType()) {
+ case ELF::R_X86_64_GOTPCREL:
+ case ELF::R_X86_64_REX_GOTPCRELX:
+ case ELF::R_X86_64_GOTPCRELX:
+ IsGOTPCRel = true;
+ [[fallthrough]];
+ case ELF::R_X86_64_PLT32:
+ IsSmallCodeModel = true;
+ break;
+ case ELF::R_X86_64_PLTOFF64:
+ IsSmallCodeModel = false;
+ break;
+ default:
+ report_fatal_error(
+ "invalid TLS relocations for General/Local Dynamic TLS Model: "
+ "expected PLT or GOT relocation for __tls_get_addr function");
+ }
+
+ // The negative offset to the start of the TLS code sequence relative to
+ // the offset of the TLSGD/TLSLD relocation
+ uint64_t TLSSequenceOffset;
+ // The expected start of the code sequence
+ ArrayRef<uint8_t> ExpectedCodeSequence;
+ // The new TLS code sequence that will replace the existing code
+ ArrayRef<uint8_t> NewCodeSequence;
+
+ if (RelType == ELF::R_X86_64_TLSGD) {
+ // The offset of the new TPOFF32 relocation (offset starting from the
+ // beginning of the whole TLS sequence)
+ uint64_t TpoffRelocOffset;
+
+ if (IsSmallCodeModel) {
+ if (!IsGOTPCRel) {
+ static const std::initializer_list<uint8_t> CodeSequence = {
+ 0x66, // data16 (no-op prefix)
+ 0x48, 0x8d, 0x3d, 0x00, 0x00,
+ 0x00, 0x00, // lea <disp32>(%rip), %rdi
+ 0x66, 0x66, // two data16 prefixes
+ 0x48, // rex64 (no-op prefix)
+ 0xe8, 0x00, 0x00, 0x00, 0x00 // call __tls_get_addr@plt
+ };
+ ExpectedCodeSequence = ArrayRef<uint8_t>(CodeSequence);
+ TLSSequenceOffset = 4;
+ } else {
+ // This code sequence is not described in the TLS spec but gcc
+ // generates it sometimes.
+ static const std::initializer_list<uint8_t> CodeSequence = {
+ 0x66, // data16 (no-op prefix)
+ 0x48, 0x8d, 0x3d, 0x00, 0x00,
+ 0x00, 0x00, // lea <disp32>(%rip), %rdi
+ 0x66, // data16 prefix (no-op prefix)
+ 0x48, // rex64 (no-op prefix)
+ 0xff, 0x15, 0x00, 0x00, 0x00,
+ 0x00 // call *__tls_get_addr@gotpcrel(%rip)
+ };
+ ExpectedCodeSequence = ArrayRef<uint8_t>(CodeSequence);
+ TLSSequenceOffset = 4;
+ }
+
+ // The replacement code for the small code model. It's the same for
+ // both sequences.
+ static const std::initializer_list<uint8_t> SmallSequence = {
+ 0x64, 0x48, 0x8b, 0x04, 0x25, 0x00, 0x00, 0x00,
+ 0x00, // mov %fs:0, %rax
+ 0x48, 0x8d, 0x80, 0x00, 0x00, 0x00, 0x00 // lea x@tpoff(%rax),
+ // %rax
+ };
+ NewCodeSequence = ArrayRef<uint8_t>(SmallSequence);
+ TpoffRelocOffset = 12;
+ } else {
+ static const std::initializer_list<uint8_t> CodeSequence = {
+ 0x48, 0x8d, 0x3d, 0x00, 0x00, 0x00, 0x00, // lea <disp32>(%rip),
+ // %rdi
+ 0x48, 0xb8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, // movabs $__tls_get_addr@pltoff, %rax
+ 0x48, 0x01, 0xd8, // add %rbx, %rax
+ 0xff, 0xd0 // call *%rax
+ };
+ ExpectedCodeSequence = ArrayRef<uint8_t>(CodeSequence);
+ TLSSequenceOffset = 3;
+
+ // The replacement code for the large code model
+ static const std::initializer_list<uint8_t> LargeSequence = {
+ 0x64, 0x48, 0x8b, 0x04, 0x25, 0x00, 0x00, 0x00,
+ 0x00, // mov %fs:0, %rax
+ 0x48, 0x8d, 0x80, 0x00, 0x00, 0x00, 0x00, // lea x@tpoff(%rax),
+ // %rax
+ 0x66, 0x0f, 0x1f, 0x44, 0x00, 0x00 // nopw 0x0(%rax,%rax,1)
+ };
+ NewCodeSequence = ArrayRef<uint8_t>(LargeSequence);
+ TpoffRelocOffset = 12;
+ }
+
+ // The TLSGD/TLSLD relocations are PC-relative, so they have an addend.
+ // The new TPOFF32 relocations is used as an absolute offset from
+ // %fs:0, so remove the TLSGD/TLSLD addend again.
+ RelocationEntry RE(SectionID, Offset - TLSSequenceOffset + TpoffRelocOffset,
+ ELF::R_X86_64_TPOFF32, Value.Addend - Addend);
+ if (Value.SymbolName)
+ addRelocationForSymbol(RE, Value.SymbolName);
+ else
+ addRelocationForSection(RE, Value.SectionID);
+ } else if (RelType == ELF::R_X86_64_TLSLD) {
+ if (IsSmallCodeModel) {
+ if (!IsGOTPCRel) {
+ static const std::initializer_list<uint8_t> CodeSequence = {
+ 0x48, 0x8d, 0x3d, 0x00, 0x00, 0x00, // leaq <disp32>(%rip), %rdi
+ 0x00, 0xe8, 0x00, 0x00, 0x00, 0x00 // call __tls_get_addr@plt
+ };
+ ExpectedCodeSequence = ArrayRef<uint8_t>(CodeSequence);
+ TLSSequenceOffset = 3;
+
+ // The replacement code for the small code model
+ static const std::initializer_list<uint8_t> SmallSequence = {
+ 0x66, 0x66, 0x66, // three data16 prefixes (no-op)
+ 0x64, 0x48, 0x8b, 0x04, 0x25,
+ 0x00, 0x00, 0x00, 0x00 // mov %fs:0, %rax
+ };
+ NewCodeSequence = ArrayRef<uint8_t>(SmallSequence);
+ } else {
+ // This code sequence is not described in the TLS spec but gcc
+ // generates it sometimes.
+ static const std::initializer_list<uint8_t> CodeSequence = {
+ 0x48, 0x8d, 0x3d, 0x00,
+ 0x00, 0x00, 0x00, // leaq <disp32>(%rip), %rdi
+ 0xff, 0x15, 0x00, 0x00,
+ 0x00, 0x00 // call
+ // *__tls_get_addr@gotpcrel(%rip)
+ };
+ ExpectedCodeSequence = ArrayRef<uint8_t>(CodeSequence);
+ TLSSequenceOffset = 3;
+
+ // The replacement is code is just like above but it needs to be
+ // one byte longer.
+ static const std::initializer_list<uint8_t> SmallSequence = {
+ 0x0f, 0x1f, 0x40, 0x00, // 4 byte nop
+ 0x64, 0x48, 0x8b, 0x04, 0x25,
+ 0x00, 0x00, 0x00, 0x00 // mov %fs:0, %rax
+ };
+ NewCodeSequence = ArrayRef<uint8_t>(SmallSequence);
+ }
+ } else {
+ // This is the same sequence as for the TLSGD sequence with the large
+ // memory model above
+ static const std::initializer_list<uint8_t> CodeSequence = {
+ 0x48, 0x8d, 0x3d, 0x00, 0x00, 0x00, 0x00, // lea <disp32>(%rip),
+ // %rdi
+ 0x48, 0xb8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x48, // movabs $__tls_get_addr@pltoff, %rax
+ 0x01, 0xd8, // add %rbx, %rax
+ 0xff, 0xd0 // call *%rax
+ };
+ ExpectedCodeSequence = ArrayRef<uint8_t>(CodeSequence);
+ TLSSequenceOffset = 3;
+
+ // The replacement code for the large code model
+ static const std::initializer_list<uint8_t> LargeSequence = {
+ 0x66, 0x66, 0x66, // three data16 prefixes (no-op)
+ 0x66, 0x66, 0x0f, 0x1f, 0x84, 0x00, 0x00, 0x00, 0x00,
+ 0x00, // 10 byte nop
+ 0x64, 0x48, 0x8b, 0x04, 0x25, 0x00, 0x00, 0x00, 0x00 // mov %fs:0,%rax
+ };
+ NewCodeSequence = ArrayRef<uint8_t>(LargeSequence);
+ }
+ } else {
+ llvm_unreachable("both TLS relocations handled above");
+ }
+
+ assert(ExpectedCodeSequence.size() == NewCodeSequence.size() &&
+ "Old and new code sequences must have the same size");
+
+ auto &Section = Sections[SectionID];
+ if (Offset < TLSSequenceOffset ||
+ (Offset - TLSSequenceOffset + NewCodeSequence.size()) >
+ Section.getSize()) {
+ report_fatal_error("unexpected end of section in TLS sequence");
+ }
+
+ auto *TLSSequence = Section.getAddressWithOffset(Offset - TLSSequenceOffset);
+ if (ArrayRef<uint8_t>(TLSSequence, ExpectedCodeSequence.size()) !=
+ ExpectedCodeSequence) {
+ report_fatal_error(
+ "invalid TLS sequence for Global/Local Dynamic TLS Model");
+ }
+
+ memcpy(TLSSequence, NewCodeSequence.data(), NewCodeSequence.size());
+}
+
+size_t RuntimeDyldELF::getGOTEntrySize() {
+ // We don't use the GOT in all of these cases, but it's essentially free
+ // to put them all here.
+ size_t Result = 0;
+ switch (Arch) {
+ case Triple::x86_64:
+ case Triple::aarch64:
+ case Triple::aarch64_be:
+ case Triple::ppc64:
+ case Triple::ppc64le:
+ case Triple::systemz:
+ Result = sizeof(uint64_t);
+ break;
+ case Triple::x86:
+ case Triple::arm:
+ case Triple::thumb:
+ Result = sizeof(uint32_t);
+ break;
+ case Triple::mips:
+ case Triple::mipsel:
+ case Triple::mips64:
+ case Triple::mips64el:
+ if (IsMipsO32ABI || IsMipsN32ABI)
+ Result = sizeof(uint32_t);
+ else if (IsMipsN64ABI)
+ Result = sizeof(uint64_t);
+ else
+ llvm_unreachable("Mips ABI not handled");
+ break;
+ default:
+ llvm_unreachable("Unsupported CPU type!");
+ }
+ return Result;
+}
+
+uint64_t RuntimeDyldELF::allocateGOTEntries(unsigned no) {
+ if (GOTSectionID == 0) {
+ GOTSectionID = Sections.size();
+ // Reserve a section id. We'll allocate the section later
+ // once we know the total size
+ Sections.push_back(SectionEntry(".got", nullptr, 0, 0, 0));
+ }
+ uint64_t StartOffset = CurrentGOTIndex * getGOTEntrySize();
+ CurrentGOTIndex += no;
+ return StartOffset;
+}
+
+uint64_t RuntimeDyldELF::findOrAllocGOTEntry(const RelocationValueRef &Value,
+ unsigned GOTRelType) {
+ auto E = GOTOffsetMap.insert({Value, 0});
+ if (E.second) {
+ uint64_t GOTOffset = allocateGOTEntries(1);
+
+ // Create relocation for newly created GOT entry
+ RelocationEntry RE =
+ computeGOTOffsetRE(GOTOffset, Value.Offset, GOTRelType);
+ if (Value.SymbolName)
+ addRelocationForSymbol(RE, Value.SymbolName);
+ else
+ addRelocationForSection(RE, Value.SectionID);
+
+ E.first->second = GOTOffset;
+ }
+
+ return E.first->second;
+}
+
+void RuntimeDyldELF::resolveGOTOffsetRelocation(unsigned SectionID,
+ uint64_t Offset,
+ uint64_t GOTOffset,
+ uint32_t Type) {
+ // Fill in the relative address of the GOT Entry into the stub
+ RelocationEntry GOTRE(SectionID, Offset, Type, GOTOffset);
+ addRelocationForSection(GOTRE, GOTSectionID);
+}
+
+RelocationEntry RuntimeDyldELF::computeGOTOffsetRE(uint64_t GOTOffset,
+ uint64_t SymbolOffset,
+ uint32_t Type) {
+ return RelocationEntry(GOTSectionID, GOTOffset, Type, SymbolOffset);
+}
+
+void RuntimeDyldELF::processNewSymbol(const SymbolRef &ObjSymbol, SymbolTableEntry& Symbol) {
+ // This should never return an error as `processNewSymbol` wouldn't have been
+ // called if getFlags() returned an error before.
+ auto ObjSymbolFlags = cantFail(ObjSymbol.getFlags());
+
+ if (ObjSymbolFlags & SymbolRef::SF_Indirect) {
+ if (IFuncStubSectionID == 0) {
+ // Create a dummy section for the ifunc stubs. It will be actually
+ // allocated in finalizeLoad() below.
+ IFuncStubSectionID = Sections.size();
+ Sections.push_back(
+ SectionEntry(".text.__llvm_IFuncStubs", nullptr, 0, 0, 0));
+ // First 64B are reserverd for the IFunc resolver
+ IFuncStubOffset = 64;
+ }
+
+ IFuncStubs.push_back(IFuncStub{IFuncStubOffset, Symbol});
+ // Modify the symbol so that it points to the ifunc stub instead of to the
+ // resolver function.
+ Symbol = SymbolTableEntry(IFuncStubSectionID, IFuncStubOffset,
+ Symbol.getFlags());
+ IFuncStubOffset += getMaxIFuncStubSize();
+ }
+}
+
+Error RuntimeDyldELF::finalizeLoad(const ObjectFile &Obj,
+ ObjSectionToIDMap &SectionMap) {
+ if (IsMipsO32ABI)
+ if (!PendingRelocs.empty())
+ return make_error<RuntimeDyldError>("Can't find matching LO16 reloc");
+
+ // Create the IFunc stubs if necessary. This must be done before processing
+ // the GOT entries, as the IFunc stubs may create some.
+ if (IFuncStubSectionID != 0) {
+ uint8_t *IFuncStubsAddr = MemMgr.allocateCodeSection(
+ IFuncStubOffset, 1, IFuncStubSectionID, ".text.__llvm_IFuncStubs");
+ if (!IFuncStubsAddr)
+ return make_error<RuntimeDyldError>(
+ "Unable to allocate memory for IFunc stubs!");
+ Sections[IFuncStubSectionID] =
+ SectionEntry(".text.__llvm_IFuncStubs", IFuncStubsAddr, IFuncStubOffset,
+ IFuncStubOffset, 0);
+
+ createIFuncResolver(IFuncStubsAddr);
+
+ LLVM_DEBUG(dbgs() << "Creating IFunc stubs SectionID: "
+ << IFuncStubSectionID << " Addr: "
+ << Sections[IFuncStubSectionID].getAddress() << '\n');
+ for (auto &IFuncStub : IFuncStubs) {
+ auto &Symbol = IFuncStub.OriginalSymbol;
+ LLVM_DEBUG(dbgs() << "\tSectionID: " << Symbol.getSectionID()
+ << " Offset: " << format("%p", Symbol.getOffset())
+ << " IFuncStubOffset: "
+ << format("%p\n", IFuncStub.StubOffset));
+ createIFuncStub(IFuncStubSectionID, 0, IFuncStub.StubOffset,
+ Symbol.getSectionID(), Symbol.getOffset());
+ }
+
+ IFuncStubSectionID = 0;
+ IFuncStubOffset = 0;
+ IFuncStubs.clear();
+ }
+
+ // If necessary, allocate the global offset table
+ if (GOTSectionID != 0) {
+ // Allocate memory for the section
+ size_t TotalSize = CurrentGOTIndex * getGOTEntrySize();
+ uint8_t *Addr = MemMgr.allocateDataSection(TotalSize, getGOTEntrySize(),
+ GOTSectionID, ".got", false);
+ if (!Addr)
+ return make_error<RuntimeDyldError>("Unable to allocate memory for GOT!");
+
+ Sections[GOTSectionID] =
+ SectionEntry(".got", Addr, TotalSize, TotalSize, 0);
+
+ // For now, initialize all GOT entries to zero. We'll fill them in as
+ // needed when GOT-based relocations are applied.
+ memset(Addr, 0, TotalSize);
+ if (IsMipsN32ABI || IsMipsN64ABI) {
+ // To correctly resolve Mips GOT relocations, we need a mapping from
+ // object's sections to GOTs.
+ for (section_iterator SI = Obj.section_begin(), SE = Obj.section_end();
+ SI != SE; ++SI) {
+ if (SI->relocation_begin() != SI->relocation_end()) {
+ Expected<section_iterator> RelSecOrErr = SI->getRelocatedSection();
+ if (!RelSecOrErr)
+ return make_error<RuntimeDyldError>(
+ toString(RelSecOrErr.takeError()));
+
+ section_iterator RelocatedSection = *RelSecOrErr;
+ ObjSectionToIDMap::iterator i = SectionMap.find(*RelocatedSection);
+ assert(i != SectionMap.end());
+ SectionToGOTMap[i->second] = GOTSectionID;
+ }
+ }
+ GOTSymbolOffsets.clear();
+ }
+ }
+
+ // Look for and record the EH frame section.
+ ObjSectionToIDMap::iterator i, e;
+ for (i = SectionMap.begin(), e = SectionMap.end(); i != e; ++i) {
+ const SectionRef &Section = i->first;
+
+ StringRef Name;
+ Expected<StringRef> NameOrErr = Section.getName();
+ if (NameOrErr)
+ Name = *NameOrErr;
+ else
+ consumeError(NameOrErr.takeError());
+
+ if (Name == ".eh_frame") {
+ UnregisteredEHFrameSections.push_back(i->second);
+ break;
+ }
+ }
+
+ GOTOffsetMap.clear();
+ GOTSectionID = 0;
+ CurrentGOTIndex = 0;
+
+ return Error::success();
+}
+
+bool RuntimeDyldELF::isCompatibleFile(const object::ObjectFile &Obj) const {
+ return Obj.isELF();
+}
+
+void RuntimeDyldELF::createIFuncResolver(uint8_t *Addr) const {
+ if (Arch == Triple::x86_64) {
+ // The adddres of the GOT1 entry is in %r11, the GOT2 entry is in %r11+8
+ // (see createIFuncStub() for details)
+ // The following code first saves all registers that contain the original
+ // function arguments as those registers are not saved by the resolver
+ // function. %r11 is saved as well so that the GOT2 entry can be updated
+ // afterwards. Then it calls the actual IFunc resolver function whose
+ // address is stored in GOT2. After the resolver function returns, all
+ // saved registers are restored and the return value is written to GOT1.
+ // Finally, jump to the now resolved function.
+ // clang-format off
+ const uint8_t StubCode[] = {
+ 0x57, // push %rdi
+ 0x56, // push %rsi
+ 0x52, // push %rdx
+ 0x51, // push %rcx
+ 0x41, 0x50, // push %r8
+ 0x41, 0x51, // push %r9
+ 0x41, 0x53, // push %r11
+ 0x41, 0xff, 0x53, 0x08, // call *0x8(%r11)
+ 0x41, 0x5b, // pop %r11
+ 0x41, 0x59, // pop %r9
+ 0x41, 0x58, // pop %r8
+ 0x59, // pop %rcx
+ 0x5a, // pop %rdx
+ 0x5e, // pop %rsi
+ 0x5f, // pop %rdi
+ 0x49, 0x89, 0x03, // mov %rax,(%r11)
+ 0xff, 0xe0 // jmp *%rax
+ };
+ // clang-format on
+ static_assert(sizeof(StubCode) <= 64,
+ "maximum size of the IFunc resolver is 64B");
+ memcpy(Addr, StubCode, sizeof(StubCode));
+ } else {
+ report_fatal_error(
+ "IFunc resolver is not supported for target architecture");
+ }
+}
+
+void RuntimeDyldELF::createIFuncStub(unsigned IFuncStubSectionID,
+ uint64_t IFuncResolverOffset,
+ uint64_t IFuncStubOffset,
+ unsigned IFuncSectionID,
+ uint64_t IFuncOffset) {
+ auto &IFuncStubSection = Sections[IFuncStubSectionID];
+ auto *Addr = IFuncStubSection.getAddressWithOffset(IFuncStubOffset);
+
+ if (Arch == Triple::x86_64) {
+ // The first instruction loads a PC-relative address into %r11 which is a
+ // GOT entry for this stub. This initially contains the address to the
+ // IFunc resolver. We can use %r11 here as it's caller saved but not used
+ // to pass any arguments. In fact, x86_64 ABI even suggests using %r11 for
+ // code in the PLT. The IFunc resolver will use %r11 to update the GOT
+ // entry.
+ //
+ // The next instruction just jumps to the address contained in the GOT
+ // entry. As mentioned above, we do this two-step jump by first setting
+ // %r11 so that the IFunc resolver has access to it.
+ //
+ // The IFunc resolver of course also needs to know the actual address of
+ // the actual IFunc resolver function. This will be stored in a GOT entry
+ // right next to the first one for this stub. So, the IFunc resolver will
+ // be able to call it with %r11+8.
+ //
+ // In total, two adjacent GOT entries (+relocation) and one additional
+ // relocation are required:
+ // GOT1: Address of the IFunc resolver.
+ // GOT2: Address of the IFunc resolver function.
+ // IFuncStubOffset+3: 32-bit PC-relative address of GOT1.
+ uint64_t GOT1 = allocateGOTEntries(2);
+ uint64_t GOT2 = GOT1 + getGOTEntrySize();
+
+ RelocationEntry RE1(GOTSectionID, GOT1, ELF::R_X86_64_64,
+ IFuncResolverOffset, {});
+ addRelocationForSection(RE1, IFuncStubSectionID);
+ RelocationEntry RE2(GOTSectionID, GOT2, ELF::R_X86_64_64, IFuncOffset, {});
+ addRelocationForSection(RE2, IFuncSectionID);
+
+ const uint8_t StubCode[] = {
+ 0x4c, 0x8d, 0x1d, 0x00, 0x00, 0x00, 0x00, // leaq 0x0(%rip),%r11
+ 0x41, 0xff, 0x23 // jmpq *(%r11)
+ };
+ assert(sizeof(StubCode) <= getMaxIFuncStubSize() &&
+ "IFunc stub size must not exceed getMaxIFuncStubSize()");
+ memcpy(Addr, StubCode, sizeof(StubCode));
+
+ // The PC-relative value starts 4 bytes from the end of the leaq
+ // instruction, so the addend is -4.
+ resolveGOTOffsetRelocation(IFuncStubSectionID, IFuncStubOffset + 3,
+ GOT1 - 4, ELF::R_X86_64_PC32);
+ } else {
+ report_fatal_error("IFunc stub is not supported for target architecture");
+ }
+}
+
+unsigned RuntimeDyldELF::getMaxIFuncStubSize() const {
+ if (Arch == Triple::x86_64) {
+ return 10;
+ }
+ return 0;
+}
+
+bool RuntimeDyldELF::relocationNeedsGot(const RelocationRef &R) const {
+ unsigned RelTy = R.getType();
+ if (Arch == Triple::aarch64 || Arch == Triple::aarch64_be)
+ return RelTy == ELF::R_AARCH64_ADR_GOT_PAGE ||
+ RelTy == ELF::R_AARCH64_LD64_GOT_LO12_NC;
+
+ if (Arch == Triple::x86_64)
+ return RelTy == ELF::R_X86_64_GOTPCREL ||
+ RelTy == ELF::R_X86_64_GOTPCRELX ||
+ RelTy == ELF::R_X86_64_GOT64 ||
+ RelTy == ELF::R_X86_64_REX_GOTPCRELX;
+ return false;
+}
+
+bool RuntimeDyldELF::relocationNeedsStub(const RelocationRef &R) const {
+ if (Arch != Triple::x86_64)
+ return true; // Conservative answer
+
+ switch (R.getType()) {
+ default:
+ return true; // Conservative answer
+
+
+ case ELF::R_X86_64_GOTPCREL:
+ case ELF::R_X86_64_GOTPCRELX:
+ case ELF::R_X86_64_REX_GOTPCRELX:
+ case ELF::R_X86_64_GOTPC64:
+ case ELF::R_X86_64_GOT64:
+ case ELF::R_X86_64_GOTOFF64:
+ case ELF::R_X86_64_PC32:
+ case ELF::R_X86_64_PC64:
+ case ELF::R_X86_64_64:
+ // We know that these reloation types won't need a stub function. This list
+ // can be extended as needed.
+ return false;
+ }
+}
+
+} // namespace llvm
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.h b/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.h
new file mode 100644
index 000000000000..b73d2af8c0c4
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.h
@@ -0,0 +1,236 @@
+//===-- RuntimeDyldELF.h - Run-time dynamic linker for MC-JIT ---*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// ELF support for MC-JIT runtime dynamic linker.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_EXECUTIONENGINE_RUNTIMEDYLD_RUNTIMEDYLDELF_H
+#define LLVM_LIB_EXECUTIONENGINE_RUNTIMEDYLD_RUNTIMEDYLDELF_H
+
+#include "RuntimeDyldImpl.h"
+#include "llvm/ADT/DenseMap.h"
+
+using namespace llvm;
+
+namespace llvm {
+namespace object {
+class ELFObjectFileBase;
+}
+
+class RuntimeDyldELF : public RuntimeDyldImpl {
+
+ void resolveRelocation(const SectionEntry &Section, uint64_t Offset,
+ uint64_t Value, uint32_t Type, int64_t Addend,
+ uint64_t SymOffset = 0, SID SectionID = 0);
+
+ void resolveX86_64Relocation(const SectionEntry &Section, uint64_t Offset,
+ uint64_t Value, uint32_t Type, int64_t Addend,
+ uint64_t SymOffset);
+
+ void resolveX86Relocation(const SectionEntry &Section, uint64_t Offset,
+ uint32_t Value, uint32_t Type, int32_t Addend);
+
+ void resolveAArch64Relocation(const SectionEntry &Section, uint64_t Offset,
+ uint64_t Value, uint32_t Type, int64_t Addend);
+
+ bool resolveAArch64ShortBranch(unsigned SectionID, relocation_iterator RelI,
+ const RelocationValueRef &Value);
+
+ void resolveAArch64Branch(unsigned SectionID, const RelocationValueRef &Value,
+ relocation_iterator RelI, StubMap &Stubs);
+
+ void resolveARMRelocation(const SectionEntry &Section, uint64_t Offset,
+ uint32_t Value, uint32_t Type, int32_t Addend);
+
+ void resolvePPC32Relocation(const SectionEntry &Section, uint64_t Offset,
+ uint64_t Value, uint32_t Type, int64_t Addend);
+
+ void resolvePPC64Relocation(const SectionEntry &Section, uint64_t Offset,
+ uint64_t Value, uint32_t Type, int64_t Addend);
+
+ void resolveSystemZRelocation(const SectionEntry &Section, uint64_t Offset,
+ uint64_t Value, uint32_t Type, int64_t Addend);
+
+ void resolveBPFRelocation(const SectionEntry &Section, uint64_t Offset,
+ uint64_t Value, uint32_t Type, int64_t Addend);
+
+ unsigned getMaxStubSize() const override {
+ if (Arch == Triple::aarch64 || Arch == Triple::aarch64_be)
+ return 20; // movz; movk; movk; movk; br
+ if (Arch == Triple::arm || Arch == Triple::thumb)
+ return 8; // 32-bit instruction and 32-bit address
+ else if (IsMipsO32ABI || IsMipsN32ABI)
+ return 16;
+ else if (IsMipsN64ABI)
+ return 32;
+ else if (Arch == Triple::ppc64 || Arch == Triple::ppc64le)
+ return 44;
+ else if (Arch == Triple::x86_64)
+ return 6; // 2-byte jmp instruction + 32-bit relative address
+ else if (Arch == Triple::systemz)
+ return 16;
+ else
+ return 0;
+ }
+
+ Align getStubAlignment() override {
+ if (Arch == Triple::systemz)
+ return Align(8);
+ else
+ return Align(1);
+ }
+
+ void setMipsABI(const ObjectFile &Obj) override;
+
+ Error findPPC64TOCSection(const object::ELFObjectFileBase &Obj,
+ ObjSectionToIDMap &LocalSections,
+ RelocationValueRef &Rel);
+ Error findOPDEntrySection(const object::ELFObjectFileBase &Obj,
+ ObjSectionToIDMap &LocalSections,
+ RelocationValueRef &Rel);
+
+protected:
+ size_t getGOTEntrySize() override;
+
+private:
+ SectionEntry &getSection(unsigned SectionID) { return Sections[SectionID]; }
+
+ // Allocate no GOT entries for use in the given section.
+ uint64_t allocateGOTEntries(unsigned no);
+
+ // Find GOT entry corresponding to relocation or create new one.
+ uint64_t findOrAllocGOTEntry(const RelocationValueRef &Value,
+ unsigned GOTRelType);
+
+ // Resolve the relative address of GOTOffset in Section ID and place
+ // it at the given Offset
+ void resolveGOTOffsetRelocation(unsigned SectionID, uint64_t Offset,
+ uint64_t GOTOffset, uint32_t Type);
+
+ // For a GOT entry referenced from SectionID, compute a relocation entry
+ // that will place the final resolved value in the GOT slot
+ RelocationEntry computeGOTOffsetRE(uint64_t GOTOffset, uint64_t SymbolOffset,
+ unsigned Type);
+
+ // Compute the address in memory where we can find the placeholder
+ void *computePlaceholderAddress(unsigned SectionID, uint64_t Offset) const;
+
+ // Split out common case for creating the RelocationEntry for when the
+ // relocation requires no particular advanced processing.
+ void processSimpleRelocation(unsigned SectionID, uint64_t Offset, unsigned RelType, RelocationValueRef Value);
+
+ // Return matching *LO16 relocation (Mips specific)
+ uint32_t getMatchingLoRelocation(uint32_t RelType,
+ bool IsLocal = false) const;
+
+ // The tentative ID for the GOT section
+ unsigned GOTSectionID;
+
+ // Records the current number of allocated slots in the GOT
+ // (This would be equivalent to GOTEntries.size() were it not for relocations
+ // that consume more than one slot)
+ unsigned CurrentGOTIndex;
+
+protected:
+ // A map from section to a GOT section that has entries for section's GOT
+ // relocations. (Mips64 specific)
+ DenseMap<SID, SID> SectionToGOTMap;
+
+private:
+ // A map to avoid duplicate got entries (Mips64 specific)
+ StringMap<uint64_t> GOTSymbolOffsets;
+
+ // *HI16 relocations will be added for resolving when we find matching
+ // *LO16 part. (Mips specific)
+ SmallVector<std::pair<RelocationValueRef, RelocationEntry>, 8> PendingRelocs;
+
+ // When a module is loaded we save the SectionID of the EH frame section
+ // in a table until we receive a request to register all unregistered
+ // EH frame sections with the memory manager.
+ SmallVector<SID, 2> UnregisteredEHFrameSections;
+
+ // Map between GOT relocation value and corresponding GOT offset
+ std::map<RelocationValueRef, uint64_t> GOTOffsetMap;
+
+ /// The ID of the current IFunc stub section
+ unsigned IFuncStubSectionID = 0;
+ /// The current offset into the IFunc stub section
+ uint64_t IFuncStubOffset = 0;
+
+ /// A IFunc stub and its original symbol
+ struct IFuncStub {
+ /// The offset of this stub in the IFunc stub section
+ uint64_t StubOffset;
+ /// The symbol table entry of the original symbol
+ SymbolTableEntry OriginalSymbol;
+ };
+
+ /// The IFunc stubs
+ SmallVector<IFuncStub, 2> IFuncStubs;
+
+ /// Create the code for the IFunc resolver at the given address. This code
+ /// works together with the stubs created in createIFuncStub() to call the
+ /// resolver function and then jump to the real function address.
+ /// It must not be larger than 64B.
+ void createIFuncResolver(uint8_t *Addr) const;
+ /// Create the code for an IFunc stub for the IFunc that is defined in
+ /// section IFuncSectionID at offset IFuncOffset. The IFunc resolver created
+ /// by createIFuncResolver() is defined in the section IFuncStubSectionID at
+ /// offset IFuncResolverOffset. The code should be written into the section
+ /// with the id IFuncStubSectionID at the offset IFuncStubOffset.
+ void createIFuncStub(unsigned IFuncStubSectionID,
+ uint64_t IFuncResolverOffset, uint64_t IFuncStubOffset,
+ unsigned IFuncSectionID, uint64_t IFuncOffset);
+ /// Return the maximum size of a stub created by createIFuncStub()
+ unsigned getMaxIFuncStubSize() const;
+
+ void processNewSymbol(const SymbolRef &ObjSymbol,
+ SymbolTableEntry &Entry) override;
+ bool relocationNeedsGot(const RelocationRef &R) const override;
+ bool relocationNeedsStub(const RelocationRef &R) const override;
+
+ // Process a GOTTPOFF TLS relocation for x86-64
+ // NOLINTNEXTLINE(readability-identifier-naming)
+ void processX86_64GOTTPOFFRelocation(unsigned SectionID, uint64_t Offset,
+ RelocationValueRef Value,
+ int64_t Addend);
+ // Process a TLSLD/TLSGD relocation for x86-64
+ // NOLINTNEXTLINE(readability-identifier-naming)
+ void processX86_64TLSRelocation(unsigned SectionID, uint64_t Offset,
+ uint64_t RelType, RelocationValueRef Value,
+ int64_t Addend,
+ const RelocationRef &GetAddrRelocation);
+
+public:
+ RuntimeDyldELF(RuntimeDyld::MemoryManager &MemMgr,
+ JITSymbolResolver &Resolver);
+ ~RuntimeDyldELF() override;
+
+ static std::unique_ptr<RuntimeDyldELF>
+ create(Triple::ArchType Arch, RuntimeDyld::MemoryManager &MemMgr,
+ JITSymbolResolver &Resolver);
+
+ std::unique_ptr<RuntimeDyld::LoadedObjectInfo>
+ loadObject(const object::ObjectFile &O) override;
+
+ void resolveRelocation(const RelocationEntry &RE, uint64_t Value) override;
+ Expected<relocation_iterator>
+ processRelocationRef(unsigned SectionID, relocation_iterator RelI,
+ const ObjectFile &Obj,
+ ObjSectionToIDMap &ObjSectionToID,
+ StubMap &Stubs) override;
+ bool isCompatibleFile(const object::ObjectFile &Obj) const override;
+ void registerEHFrames() override;
+ Error finalizeLoad(const ObjectFile &Obj,
+ ObjSectionToIDMap &SectionMap) override;
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldImpl.h b/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldImpl.h
new file mode 100644
index 000000000000..e09c632842d6
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldImpl.h
@@ -0,0 +1,594 @@
+//===-- RuntimeDyldImpl.h - Run-time dynamic linker for MC-JIT --*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Interface for the implementations of runtime dynamic linker facilities.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_EXECUTIONENGINE_RUNTIMEDYLD_RUNTIMEDYLDIMPL_H
+#define LLVM_LIB_EXECUTIONENGINE_RUNTIMEDYLD_RUNTIMEDYLDIMPL_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ExecutionEngine/RTDyldMemoryManager.h"
+#include "llvm/ExecutionEngine/RuntimeDyld.h"
+#include "llvm/ExecutionEngine/RuntimeDyldChecker.h"
+#include "llvm/Object/ObjectFile.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/Format.h"
+#include "llvm/Support/Mutex.h"
+#include "llvm/Support/SwapByteOrder.h"
+#include "llvm/TargetParser/Host.h"
+#include "llvm/TargetParser/Triple.h"
+#include <deque>
+#include <map>
+#include <system_error>
+#include <unordered_map>
+
+using namespace llvm;
+using namespace llvm::object;
+
+namespace llvm {
+
+#define UNIMPLEMENTED_RELOC(RelType) \
+ case RelType: \
+ return make_error<RuntimeDyldError>("Unimplemented relocation: " #RelType)
+
+/// SectionEntry - represents a section emitted into memory by the dynamic
+/// linker.
+class SectionEntry {
+ /// Name - section name.
+ std::string Name;
+
+ /// Address - address in the linker's memory where the section resides.
+ uint8_t *Address;
+
+ /// Size - section size. Doesn't include the stubs.
+ size_t Size;
+
+ /// LoadAddress - the address of the section in the target process's memory.
+ /// Used for situations in which JIT-ed code is being executed in the address
+ /// space of a separate process. If the code executes in the same address
+ /// space where it was JIT-ed, this just equals Address.
+ uint64_t LoadAddress;
+
+ /// StubOffset - used for architectures with stub functions for far
+ /// relocations (like ARM).
+ uintptr_t StubOffset;
+
+ /// The total amount of space allocated for this section. This includes the
+ /// section size and the maximum amount of space that the stubs can occupy.
+ size_t AllocationSize;
+
+ /// ObjAddress - address of the section in the in-memory object file. Used
+ /// for calculating relocations in some object formats (like MachO).
+ uintptr_t ObjAddress;
+
+public:
+ SectionEntry(StringRef name, uint8_t *address, size_t size,
+ size_t allocationSize, uintptr_t objAddress)
+ : Name(std::string(name)), Address(address), Size(size),
+ LoadAddress(reinterpret_cast<uintptr_t>(address)), StubOffset(size),
+ AllocationSize(allocationSize), ObjAddress(objAddress) {
+ // AllocationSize is used only in asserts, prevent an "unused private field"
+ // warning:
+ (void)AllocationSize;
+ }
+
+ StringRef getName() const { return Name; }
+
+ uint8_t *getAddress() const { return Address; }
+
+ /// Return the address of this section with an offset.
+ uint8_t *getAddressWithOffset(unsigned OffsetBytes) const {
+ assert(OffsetBytes <= AllocationSize && "Offset out of bounds!");
+ return Address + OffsetBytes;
+ }
+
+ size_t getSize() const { return Size; }
+
+ uint64_t getLoadAddress() const { return LoadAddress; }
+ void setLoadAddress(uint64_t LA) { LoadAddress = LA; }
+
+ /// Return the load address of this section with an offset.
+ uint64_t getLoadAddressWithOffset(unsigned OffsetBytes) const {
+ assert(OffsetBytes <= AllocationSize && "Offset out of bounds!");
+ return LoadAddress + OffsetBytes;
+ }
+
+ uintptr_t getStubOffset() const { return StubOffset; }
+
+ void advanceStubOffset(unsigned StubSize) {
+ StubOffset += StubSize;
+ assert(StubOffset <= AllocationSize && "Not enough space allocated!");
+ }
+
+ uintptr_t getObjAddress() const { return ObjAddress; }
+};
+
+/// RelocationEntry - used to represent relocations internally in the dynamic
+/// linker.
+class RelocationEntry {
+public:
+ /// Offset - offset into the section.
+ uint64_t Offset;
+
+ /// Addend - the relocation addend encoded in the instruction itself. Also
+ /// used to make a relocation section relative instead of symbol relative.
+ int64_t Addend;
+
+ /// SectionID - the section this relocation points to.
+ unsigned SectionID;
+
+ /// RelType - relocation type.
+ uint32_t RelType;
+
+ struct SectionPair {
+ uint32_t SectionA;
+ uint32_t SectionB;
+ };
+
+ /// SymOffset - Section offset of the relocation entry's symbol (used for GOT
+ /// lookup).
+ union {
+ uint64_t SymOffset;
+ SectionPair Sections;
+ };
+
+ /// The size of this relocation (MachO specific).
+ unsigned Size;
+
+ /// True if this is a PCRel relocation (MachO specific).
+ bool IsPCRel : 1;
+
+ // ARM (MachO and COFF) specific.
+ bool IsTargetThumbFunc : 1;
+
+ RelocationEntry(unsigned id, uint64_t offset, uint32_t type, int64_t addend)
+ : Offset(offset), Addend(addend), SectionID(id), RelType(type),
+ SymOffset(0), Size(0), IsPCRel(false), IsTargetThumbFunc(false) {}
+
+ RelocationEntry(unsigned id, uint64_t offset, uint32_t type, int64_t addend,
+ uint64_t symoffset)
+ : Offset(offset), Addend(addend), SectionID(id), RelType(type),
+ SymOffset(symoffset), Size(0), IsPCRel(false),
+ IsTargetThumbFunc(false) {}
+
+ RelocationEntry(unsigned id, uint64_t offset, uint32_t type, int64_t addend,
+ bool IsPCRel, unsigned Size)
+ : Offset(offset), Addend(addend), SectionID(id), RelType(type),
+ SymOffset(0), Size(Size), IsPCRel(IsPCRel), IsTargetThumbFunc(false) {}
+
+ RelocationEntry(unsigned id, uint64_t offset, uint32_t type, int64_t addend,
+ unsigned SectionA, uint64_t SectionAOffset, unsigned SectionB,
+ uint64_t SectionBOffset, bool IsPCRel, unsigned Size)
+ : Offset(offset), Addend(SectionAOffset - SectionBOffset + addend),
+ SectionID(id), RelType(type), Size(Size), IsPCRel(IsPCRel),
+ IsTargetThumbFunc(false) {
+ Sections.SectionA = SectionA;
+ Sections.SectionB = SectionB;
+ }
+
+ RelocationEntry(unsigned id, uint64_t offset, uint32_t type, int64_t addend,
+ unsigned SectionA, uint64_t SectionAOffset, unsigned SectionB,
+ uint64_t SectionBOffset, bool IsPCRel, unsigned Size,
+ bool IsTargetThumbFunc)
+ : Offset(offset), Addend(SectionAOffset - SectionBOffset + addend),
+ SectionID(id), RelType(type), Size(Size), IsPCRel(IsPCRel),
+ IsTargetThumbFunc(IsTargetThumbFunc) {
+ Sections.SectionA = SectionA;
+ Sections.SectionB = SectionB;
+ }
+};
+
+class RelocationValueRef {
+public:
+ unsigned SectionID = 0;
+ uint64_t Offset = 0;
+ int64_t Addend = 0;
+ const char *SymbolName = nullptr;
+ bool IsStubThumb = false;
+
+ inline bool operator==(const RelocationValueRef &Other) const {
+ return SectionID == Other.SectionID && Offset == Other.Offset &&
+ Addend == Other.Addend && SymbolName == Other.SymbolName &&
+ IsStubThumb == Other.IsStubThumb;
+ }
+ inline bool operator<(const RelocationValueRef &Other) const {
+ if (SectionID != Other.SectionID)
+ return SectionID < Other.SectionID;
+ if (Offset != Other.Offset)
+ return Offset < Other.Offset;
+ if (Addend != Other.Addend)
+ return Addend < Other.Addend;
+ if (IsStubThumb != Other.IsStubThumb)
+ return IsStubThumb < Other.IsStubThumb;
+ return SymbolName < Other.SymbolName;
+ }
+};
+
+/// Symbol info for RuntimeDyld.
+class SymbolTableEntry {
+public:
+ SymbolTableEntry() = default;
+
+ SymbolTableEntry(unsigned SectionID, uint64_t Offset, JITSymbolFlags Flags)
+ : Offset(Offset), SectionID(SectionID), Flags(Flags) {}
+
+ unsigned getSectionID() const { return SectionID; }
+ uint64_t getOffset() const { return Offset; }
+ void setOffset(uint64_t NewOffset) { Offset = NewOffset; }
+
+ JITSymbolFlags getFlags() const { return Flags; }
+
+private:
+ uint64_t Offset = 0;
+ unsigned SectionID = 0;
+ JITSymbolFlags Flags = JITSymbolFlags::None;
+};
+
+typedef StringMap<SymbolTableEntry> RTDyldSymbolTable;
+
+class RuntimeDyldImpl {
+ friend class RuntimeDyld::LoadedObjectInfo;
+protected:
+ static const unsigned AbsoluteSymbolSection = ~0U;
+
+ // The MemoryManager to load objects into.
+ RuntimeDyld::MemoryManager &MemMgr;
+
+ // The symbol resolver to use for external symbols.
+ JITSymbolResolver &Resolver;
+
+ // A list of all sections emitted by the dynamic linker. These sections are
+ // referenced in the code by means of their index in this list - SectionID.
+ // Because references may be kept while the list grows, use a container that
+ // guarantees reference stability.
+ typedef std::deque<SectionEntry> SectionList;
+ SectionList Sections;
+
+ typedef unsigned SID; // Type for SectionIDs
+#define RTDYLD_INVALID_SECTION_ID ((RuntimeDyldImpl::SID)(-1))
+
+ // Keep a map of sections from object file to the SectionID which
+ // references it.
+ typedef std::map<SectionRef, unsigned> ObjSectionToIDMap;
+
+ // A global symbol table for symbols from all loaded modules.
+ RTDyldSymbolTable GlobalSymbolTable;
+
+ // Keep a map of common symbols to their info pairs
+ typedef std::vector<SymbolRef> CommonSymbolList;
+
+ // For each symbol, keep a list of relocations based on it. Anytime
+ // its address is reassigned (the JIT re-compiled the function, e.g.),
+ // the relocations get re-resolved.
+ // The symbol (or section) the relocation is sourced from is the Key
+ // in the relocation list where it's stored.
+ typedef SmallVector<RelocationEntry, 64> RelocationList;
+ // Relocations to sections already loaded. Indexed by SectionID which is the
+ // source of the address. The target where the address will be written is
+ // SectionID/Offset in the relocation itself.
+ std::unordered_map<unsigned, RelocationList> Relocations;
+
+ // Relocations to external symbols that are not yet resolved. Symbols are
+ // external when they aren't found in the global symbol table of all loaded
+ // modules. This map is indexed by symbol name.
+ StringMap<RelocationList> ExternalSymbolRelocations;
+
+
+ typedef std::map<RelocationValueRef, uintptr_t> StubMap;
+
+ Triple::ArchType Arch;
+ bool IsTargetLittleEndian;
+ bool IsMipsO32ABI;
+ bool IsMipsN32ABI;
+ bool IsMipsN64ABI;
+
+ // True if all sections should be passed to the memory manager, false if only
+ // sections containing relocations should be. Defaults to 'false'.
+ bool ProcessAllSections;
+
+ // This mutex prevents simultaneously loading objects from two different
+ // threads. This keeps us from having to protect individual data structures
+ // and guarantees that section allocation requests to the memory manager
+ // won't be interleaved between modules. It is also used in mapSectionAddress
+ // and resolveRelocations to protect write access to internal data structures.
+ //
+ // loadObject may be called on the same thread during the handling of
+ // processRelocations, and that's OK. The handling of the relocation lists
+ // is written in such a way as to work correctly if new elements are added to
+ // the end of the list while the list is being processed.
+ sys::Mutex lock;
+
+ using NotifyStubEmittedFunction =
+ RuntimeDyld::NotifyStubEmittedFunction;
+ NotifyStubEmittedFunction NotifyStubEmitted;
+
+ virtual unsigned getMaxStubSize() const = 0;
+ virtual Align getStubAlignment() = 0;
+
+ bool HasError;
+ std::string ErrorStr;
+
+ void writeInt16BE(uint8_t *Addr, uint16_t Value) {
+ llvm::support::endian::write<uint16_t>(Addr, Value,
+ IsTargetLittleEndian
+ ? llvm::endianness::little
+ : llvm::endianness::big);
+ }
+
+ void writeInt32BE(uint8_t *Addr, uint32_t Value) {
+ llvm::support::endian::write<uint32_t>(Addr, Value,
+ IsTargetLittleEndian
+ ? llvm::endianness::little
+ : llvm::endianness::big);
+ }
+
+ void writeInt64BE(uint8_t *Addr, uint64_t Value) {
+ llvm::support::endian::write<uint64_t>(Addr, Value,
+ IsTargetLittleEndian
+ ? llvm::endianness::little
+ : llvm::endianness::big);
+ }
+
+ virtual void setMipsABI(const ObjectFile &Obj) {
+ IsMipsO32ABI = false;
+ IsMipsN32ABI = false;
+ IsMipsN64ABI = false;
+ }
+
+ /// Endian-aware read Read the least significant Size bytes from Src.
+ uint64_t readBytesUnaligned(uint8_t *Src, unsigned Size) const;
+
+ /// Endian-aware write. Write the least significant Size bytes from Value to
+ /// Dst.
+ void writeBytesUnaligned(uint64_t Value, uint8_t *Dst, unsigned Size) const;
+
+ /// Generate JITSymbolFlags from a libObject symbol.
+ virtual Expected<JITSymbolFlags> getJITSymbolFlags(const SymbolRef &Sym);
+
+ /// Modify the given target address based on the given symbol flags.
+ /// This can be used by subclasses to tweak addresses based on symbol flags,
+ /// For example: the MachO/ARM target uses it to set the low bit if the target
+ /// is a thumb symbol.
+ virtual uint64_t modifyAddressBasedOnFlags(uint64_t Addr,
+ JITSymbolFlags Flags) const {
+ return Addr;
+ }
+
+ /// Given the common symbols discovered in the object file, emit a
+ /// new section for them and update the symbol mappings in the object and
+ /// symbol table.
+ Error emitCommonSymbols(const ObjectFile &Obj,
+ CommonSymbolList &CommonSymbols, uint64_t CommonSize,
+ uint32_t CommonAlign);
+
+ /// Emits section data from the object file to the MemoryManager.
+ /// \param IsCode if it's true then allocateCodeSection() will be
+ /// used for emits, else allocateDataSection() will be used.
+ /// \return SectionID.
+ Expected<unsigned> emitSection(const ObjectFile &Obj,
+ const SectionRef &Section,
+ bool IsCode);
+
+ /// Find Section in LocalSections. If the secton is not found - emit
+ /// it and store in LocalSections.
+ /// \param IsCode if it's true then allocateCodeSection() will be
+ /// used for emmits, else allocateDataSection() will be used.
+ /// \return SectionID.
+ Expected<unsigned> findOrEmitSection(const ObjectFile &Obj,
+ const SectionRef &Section, bool IsCode,
+ ObjSectionToIDMap &LocalSections);
+
+ // Add a relocation entry that uses the given section.
+ void addRelocationForSection(const RelocationEntry &RE, unsigned SectionID);
+
+ // Add a relocation entry that uses the given symbol. This symbol may
+ // be found in the global symbol table, or it may be external.
+ void addRelocationForSymbol(const RelocationEntry &RE, StringRef SymbolName);
+
+ /// Emits long jump instruction to Addr.
+ /// \return Pointer to the memory area for emitting target address.
+ uint8_t *createStubFunction(uint8_t *Addr, unsigned AbiVariant = 0);
+
+ /// Resolves relocations from Relocs list with address from Value.
+ void resolveRelocationList(const RelocationList &Relocs, uint64_t Value);
+
+ /// A object file specific relocation resolver
+ /// \param RE The relocation to be resolved
+ /// \param Value Target symbol address to apply the relocation action
+ virtual void resolveRelocation(const RelocationEntry &RE, uint64_t Value) = 0;
+
+ /// Parses one or more object file relocations (some object files use
+ /// relocation pairs) and stores it to Relocations or SymbolRelocations
+ /// (this depends on the object file type).
+ /// \return Iterator to the next relocation that needs to be parsed.
+ virtual Expected<relocation_iterator>
+ processRelocationRef(unsigned SectionID, relocation_iterator RelI,
+ const ObjectFile &Obj, ObjSectionToIDMap &ObjSectionToID,
+ StubMap &Stubs) = 0;
+
+ void applyExternalSymbolRelocations(
+ const StringMap<JITEvaluatedSymbol> ExternalSymbolMap);
+
+ /// Resolve relocations to external symbols.
+ Error resolveExternalSymbols();
+
+ // Compute an upper bound of the memory that is required to load all
+ // sections
+ Error computeTotalAllocSize(const ObjectFile &Obj, uint64_t &CodeSize,
+ Align &CodeAlign, uint64_t &RODataSize,
+ Align &RODataAlign, uint64_t &RWDataSize,
+ Align &RWDataAlign);
+
+ // Compute GOT size
+ unsigned computeGOTSize(const ObjectFile &Obj);
+
+ // Compute the stub buffer size required for a section
+ unsigned computeSectionStubBufSize(const ObjectFile &Obj,
+ const SectionRef &Section);
+
+ // Implementation of the generic part of the loadObject algorithm.
+ Expected<ObjSectionToIDMap> loadObjectImpl(const object::ObjectFile &Obj);
+
+ // Return size of Global Offset Table (GOT) entry
+ virtual size_t getGOTEntrySize() { return 0; }
+
+ // Hook for the subclasses to do further processing when a symbol is added to
+ // the global symbol table. This function may modify the symbol table entry.
+ virtual void processNewSymbol(const SymbolRef &ObjSymbol, SymbolTableEntry& Entry) {}
+
+ // Return true if the relocation R may require allocating a GOT entry.
+ virtual bool relocationNeedsGot(const RelocationRef &R) const {
+ return false;
+ }
+
+ // Return true if the relocation R may require allocating a stub.
+ virtual bool relocationNeedsStub(const RelocationRef &R) const {
+ return true; // Conservative answer
+ }
+
+public:
+ RuntimeDyldImpl(RuntimeDyld::MemoryManager &MemMgr,
+ JITSymbolResolver &Resolver)
+ : MemMgr(MemMgr), Resolver(Resolver),
+ ProcessAllSections(false), HasError(false) {
+ }
+
+ virtual ~RuntimeDyldImpl();
+
+ void setProcessAllSections(bool ProcessAllSections) {
+ this->ProcessAllSections = ProcessAllSections;
+ }
+
+ virtual std::unique_ptr<RuntimeDyld::LoadedObjectInfo>
+ loadObject(const object::ObjectFile &Obj) = 0;
+
+ uint64_t getSectionLoadAddress(unsigned SectionID) const {
+ if (SectionID == AbsoluteSymbolSection)
+ return 0;
+ else
+ return Sections[SectionID].getLoadAddress();
+ }
+
+ uint8_t *getSectionAddress(unsigned SectionID) const {
+ if (SectionID == AbsoluteSymbolSection)
+ return nullptr;
+ else
+ return Sections[SectionID].getAddress();
+ }
+
+ StringRef getSectionContent(unsigned SectionID) const {
+ if (SectionID == AbsoluteSymbolSection)
+ return {};
+ else
+ return StringRef(
+ reinterpret_cast<char *>(Sections[SectionID].getAddress()),
+ Sections[SectionID].getStubOffset() + getMaxStubSize());
+ }
+
+ uint8_t* getSymbolLocalAddress(StringRef Name) const {
+ // FIXME: Just look up as a function for now. Overly simple of course.
+ // Work in progress.
+ RTDyldSymbolTable::const_iterator pos = GlobalSymbolTable.find(Name);
+ if (pos == GlobalSymbolTable.end())
+ return nullptr;
+ const auto &SymInfo = pos->second;
+ // Absolute symbols do not have a local address.
+ if (SymInfo.getSectionID() == AbsoluteSymbolSection)
+ return nullptr;
+ return getSectionAddress(SymInfo.getSectionID()) + SymInfo.getOffset();
+ }
+
+ unsigned getSymbolSectionID(StringRef Name) const {
+ auto GSTItr = GlobalSymbolTable.find(Name);
+ if (GSTItr == GlobalSymbolTable.end())
+ return ~0U;
+ return GSTItr->second.getSectionID();
+ }
+
+ JITEvaluatedSymbol getSymbol(StringRef Name) const {
+ // FIXME: Just look up as a function for now. Overly simple of course.
+ // Work in progress.
+ RTDyldSymbolTable::const_iterator pos = GlobalSymbolTable.find(Name);
+ if (pos == GlobalSymbolTable.end())
+ return nullptr;
+ const auto &SymEntry = pos->second;
+ uint64_t SectionAddr = 0;
+ if (SymEntry.getSectionID() != AbsoluteSymbolSection)
+ SectionAddr = getSectionLoadAddress(SymEntry.getSectionID());
+ uint64_t TargetAddr = SectionAddr + SymEntry.getOffset();
+
+ // FIXME: Have getSymbol should return the actual address and the client
+ // modify it based on the flags. This will require clients to be
+ // aware of the target architecture, which we should build
+ // infrastructure for.
+ TargetAddr = modifyAddressBasedOnFlags(TargetAddr, SymEntry.getFlags());
+ return JITEvaluatedSymbol(TargetAddr, SymEntry.getFlags());
+ }
+
+ std::map<StringRef, JITEvaluatedSymbol> getSymbolTable() const {
+ std::map<StringRef, JITEvaluatedSymbol> Result;
+
+ for (const auto &KV : GlobalSymbolTable) {
+ auto SectionID = KV.second.getSectionID();
+ uint64_t SectionAddr = getSectionLoadAddress(SectionID);
+ Result[KV.first()] =
+ JITEvaluatedSymbol(SectionAddr + KV.second.getOffset(), KV.second.getFlags());
+ }
+
+ return Result;
+ }
+
+ void resolveRelocations();
+
+ void resolveLocalRelocations();
+
+ static void finalizeAsync(
+ std::unique_ptr<RuntimeDyldImpl> This,
+ unique_function<void(object::OwningBinary<object::ObjectFile>,
+ std::unique_ptr<RuntimeDyld::LoadedObjectInfo>,
+ Error)>
+ OnEmitted,
+ object::OwningBinary<object::ObjectFile> O,
+ std::unique_ptr<RuntimeDyld::LoadedObjectInfo> Info);
+
+ void reassignSectionAddress(unsigned SectionID, uint64_t Addr);
+
+ void mapSectionAddress(const void *LocalAddress, uint64_t TargetAddress);
+
+ // Is the linker in an error state?
+ bool hasError() { return HasError; }
+
+ // Mark the error condition as handled and continue.
+ void clearError() { HasError = false; }
+
+ // Get the error message.
+ StringRef getErrorString() { return ErrorStr; }
+
+ virtual bool isCompatibleFile(const ObjectFile &Obj) const = 0;
+
+ void setNotifyStubEmitted(NotifyStubEmittedFunction NotifyStubEmitted) {
+ this->NotifyStubEmitted = std::move(NotifyStubEmitted);
+ }
+
+ virtual void registerEHFrames();
+
+ void deregisterEHFrames();
+
+ virtual Error finalizeLoad(const ObjectFile &ObjImg,
+ ObjSectionToIDMap &SectionMap) {
+ return Error::success();
+ }
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.cpp
new file mode 100644
index 000000000000..9ca76602ea18
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.cpp
@@ -0,0 +1,382 @@
+//===-- RuntimeDyldMachO.cpp - Run-time dynamic linker for MC-JIT -*- C++ -*-=//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Implementation of the MC-JIT runtime dynamic linker.
+//
+//===----------------------------------------------------------------------===//
+
+#include "RuntimeDyldMachO.h"
+#include "Targets/RuntimeDyldMachOAArch64.h"
+#include "Targets/RuntimeDyldMachOARM.h"
+#include "Targets/RuntimeDyldMachOI386.h"
+#include "Targets/RuntimeDyldMachOX86_64.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringRef.h"
+
+using namespace llvm;
+using namespace llvm::object;
+
+#define DEBUG_TYPE "dyld"
+
+namespace {
+
+class LoadedMachOObjectInfo final
+ : public LoadedObjectInfoHelper<LoadedMachOObjectInfo,
+ RuntimeDyld::LoadedObjectInfo> {
+public:
+ LoadedMachOObjectInfo(RuntimeDyldImpl &RTDyld,
+ ObjSectionToIDMap ObjSecToIDMap)
+ : LoadedObjectInfoHelper(RTDyld, std::move(ObjSecToIDMap)) {}
+
+ OwningBinary<ObjectFile>
+ getObjectForDebug(const ObjectFile &Obj) const override {
+ return OwningBinary<ObjectFile>();
+ }
+};
+
+}
+
+namespace llvm {
+
+int64_t RuntimeDyldMachO::memcpyAddend(const RelocationEntry &RE) const {
+ unsigned NumBytes = 1 << RE.Size;
+ uint8_t *Src = Sections[RE.SectionID].getAddress() + RE.Offset;
+
+ return static_cast<int64_t>(readBytesUnaligned(Src, NumBytes));
+}
+
+Expected<relocation_iterator>
+RuntimeDyldMachO::processScatteredVANILLA(
+ unsigned SectionID, relocation_iterator RelI,
+ const ObjectFile &BaseObjT,
+ RuntimeDyldMachO::ObjSectionToIDMap &ObjSectionToID,
+ bool TargetIsLocalThumbFunc) {
+ const MachOObjectFile &Obj =
+ static_cast<const MachOObjectFile&>(BaseObjT);
+ MachO::any_relocation_info RE =
+ Obj.getRelocation(RelI->getRawDataRefImpl());
+
+ SectionEntry &Section = Sections[SectionID];
+ uint32_t RelocType = Obj.getAnyRelocationType(RE);
+ bool IsPCRel = Obj.getAnyRelocationPCRel(RE);
+ unsigned Size = Obj.getAnyRelocationLength(RE);
+ uint64_t Offset = RelI->getOffset();
+ uint8_t *LocalAddress = Section.getAddressWithOffset(Offset);
+ unsigned NumBytes = 1 << Size;
+ int64_t Addend = readBytesUnaligned(LocalAddress, NumBytes);
+
+ unsigned SymbolBaseAddr = Obj.getScatteredRelocationValue(RE);
+ section_iterator TargetSI = getSectionByAddress(Obj, SymbolBaseAddr);
+ assert(TargetSI != Obj.section_end() && "Can't find section for symbol");
+ uint64_t SectionBaseAddr = TargetSI->getAddress();
+ SectionRef TargetSection = *TargetSI;
+ bool IsCode = TargetSection.isText();
+ uint32_t TargetSectionID = ~0U;
+ if (auto TargetSectionIDOrErr =
+ findOrEmitSection(Obj, TargetSection, IsCode, ObjSectionToID))
+ TargetSectionID = *TargetSectionIDOrErr;
+ else
+ return TargetSectionIDOrErr.takeError();
+
+ Addend -= SectionBaseAddr;
+ RelocationEntry R(SectionID, Offset, RelocType, Addend, IsPCRel, Size);
+ R.IsTargetThumbFunc = TargetIsLocalThumbFunc;
+
+ addRelocationForSection(R, TargetSectionID);
+
+ return ++RelI;
+}
+
+
+Expected<RelocationValueRef>
+RuntimeDyldMachO::getRelocationValueRef(
+ const ObjectFile &BaseTObj, const relocation_iterator &RI,
+ const RelocationEntry &RE, ObjSectionToIDMap &ObjSectionToID) {
+
+ const MachOObjectFile &Obj =
+ static_cast<const MachOObjectFile &>(BaseTObj);
+ MachO::any_relocation_info RelInfo =
+ Obj.getRelocation(RI->getRawDataRefImpl());
+ RelocationValueRef Value;
+
+ bool IsExternal = Obj.getPlainRelocationExternal(RelInfo);
+ if (IsExternal) {
+ symbol_iterator Symbol = RI->getSymbol();
+ StringRef TargetName;
+ if (auto TargetNameOrErr = Symbol->getName())
+ TargetName = *TargetNameOrErr;
+ else
+ return TargetNameOrErr.takeError();
+ RTDyldSymbolTable::const_iterator SI =
+ GlobalSymbolTable.find(TargetName.data());
+ if (SI != GlobalSymbolTable.end()) {
+ const auto &SymInfo = SI->second;
+ Value.SectionID = SymInfo.getSectionID();
+ Value.Offset = SymInfo.getOffset() + RE.Addend;
+ } else {
+ Value.SymbolName = TargetName.data();
+ Value.Offset = RE.Addend;
+ }
+ } else {
+ SectionRef Sec = Obj.getAnyRelocationSection(RelInfo);
+ bool IsCode = Sec.isText();
+ if (auto SectionIDOrErr = findOrEmitSection(Obj, Sec, IsCode,
+ ObjSectionToID))
+ Value.SectionID = *SectionIDOrErr;
+ else
+ return SectionIDOrErr.takeError();
+ uint64_t Addr = Sec.getAddress();
+ Value.Offset = RE.Addend - Addr;
+ }
+
+ return Value;
+}
+
+void RuntimeDyldMachO::makeValueAddendPCRel(RelocationValueRef &Value,
+ const relocation_iterator &RI,
+ unsigned OffsetToNextPC) {
+ auto &O = *cast<MachOObjectFile>(RI->getObject());
+ section_iterator SecI = O.getRelocationRelocatedSection(RI);
+ Value.Offset += RI->getOffset() + OffsetToNextPC + SecI->getAddress();
+}
+
+void RuntimeDyldMachO::dumpRelocationToResolve(const RelocationEntry &RE,
+ uint64_t Value) const {
+ const SectionEntry &Section = Sections[RE.SectionID];
+ uint8_t *LocalAddress = Section.getAddress() + RE.Offset;
+ uint64_t FinalAddress = Section.getLoadAddress() + RE.Offset;
+
+ dbgs() << "resolveRelocation Section: " << RE.SectionID
+ << " LocalAddress: " << format("%p", LocalAddress)
+ << " FinalAddress: " << format("0x%016" PRIx64, FinalAddress)
+ << " Value: " << format("0x%016" PRIx64, Value) << " Addend: " << RE.Addend
+ << " isPCRel: " << RE.IsPCRel << " MachoType: " << RE.RelType
+ << " Size: " << (1 << RE.Size) << "\n";
+}
+
+section_iterator
+RuntimeDyldMachO::getSectionByAddress(const MachOObjectFile &Obj,
+ uint64_t Addr) {
+ section_iterator SI = Obj.section_begin();
+ section_iterator SE = Obj.section_end();
+
+ for (; SI != SE; ++SI) {
+ uint64_t SAddr = SI->getAddress();
+ uint64_t SSize = SI->getSize();
+ if ((Addr >= SAddr) && (Addr < SAddr + SSize))
+ return SI;
+ }
+
+ return SE;
+}
+
+
+// Populate __pointers section.
+Error RuntimeDyldMachO::populateIndirectSymbolPointersSection(
+ const MachOObjectFile &Obj,
+ const SectionRef &PTSection,
+ unsigned PTSectionID) {
+ assert(!Obj.is64Bit() &&
+ "Pointer table section not supported in 64-bit MachO.");
+
+ MachO::dysymtab_command DySymTabCmd = Obj.getDysymtabLoadCommand();
+ MachO::section Sec32 = Obj.getSection(PTSection.getRawDataRefImpl());
+ uint32_t PTSectionSize = Sec32.size;
+ unsigned FirstIndirectSymbol = Sec32.reserved1;
+ const unsigned PTEntrySize = 4;
+ unsigned NumPTEntries = PTSectionSize / PTEntrySize;
+ unsigned PTEntryOffset = 0;
+
+ assert((PTSectionSize % PTEntrySize) == 0 &&
+ "Pointers section does not contain a whole number of stubs?");
+
+ LLVM_DEBUG(dbgs() << "Populating pointer table section "
+ << Sections[PTSectionID].getName() << ", Section ID "
+ << PTSectionID << ", " << NumPTEntries << " entries, "
+ << PTEntrySize << " bytes each:\n");
+
+ for (unsigned i = 0; i < NumPTEntries; ++i) {
+ unsigned SymbolIndex =
+ Obj.getIndirectSymbolTableEntry(DySymTabCmd, FirstIndirectSymbol + i);
+ symbol_iterator SI = Obj.getSymbolByIndex(SymbolIndex);
+ StringRef IndirectSymbolName;
+ if (auto IndirectSymbolNameOrErr = SI->getName())
+ IndirectSymbolName = *IndirectSymbolNameOrErr;
+ else
+ return IndirectSymbolNameOrErr.takeError();
+ LLVM_DEBUG(dbgs() << " " << IndirectSymbolName << ": index " << SymbolIndex
+ << ", PT offset: " << PTEntryOffset << "\n");
+ RelocationEntry RE(PTSectionID, PTEntryOffset,
+ MachO::GENERIC_RELOC_VANILLA, 0, false, 2);
+ addRelocationForSymbol(RE, IndirectSymbolName);
+ PTEntryOffset += PTEntrySize;
+ }
+ return Error::success();
+}
+
+bool RuntimeDyldMachO::isCompatibleFile(const object::ObjectFile &Obj) const {
+ return Obj.isMachO();
+}
+
+template <typename Impl>
+Error
+RuntimeDyldMachOCRTPBase<Impl>::finalizeLoad(const ObjectFile &Obj,
+ ObjSectionToIDMap &SectionMap) {
+ unsigned EHFrameSID = RTDYLD_INVALID_SECTION_ID;
+ unsigned TextSID = RTDYLD_INVALID_SECTION_ID;
+ unsigned ExceptTabSID = RTDYLD_INVALID_SECTION_ID;
+
+ for (const auto &Section : Obj.sections()) {
+ StringRef Name;
+ if (Expected<StringRef> NameOrErr = Section.getName())
+ Name = *NameOrErr;
+ else
+ consumeError(NameOrErr.takeError());
+
+ // Force emission of the __text, __eh_frame, and __gcc_except_tab sections
+ // if they're present. Otherwise call down to the impl to handle other
+ // sections that have already been emitted.
+ if (Name == "__text") {
+ if (auto TextSIDOrErr = findOrEmitSection(Obj, Section, true, SectionMap))
+ TextSID = *TextSIDOrErr;
+ else
+ return TextSIDOrErr.takeError();
+ } else if (Name == "__eh_frame") {
+ if (auto EHFrameSIDOrErr = findOrEmitSection(Obj, Section, false,
+ SectionMap))
+ EHFrameSID = *EHFrameSIDOrErr;
+ else
+ return EHFrameSIDOrErr.takeError();
+ } else if (Name == "__gcc_except_tab") {
+ if (auto ExceptTabSIDOrErr = findOrEmitSection(Obj, Section, true,
+ SectionMap))
+ ExceptTabSID = *ExceptTabSIDOrErr;
+ else
+ return ExceptTabSIDOrErr.takeError();
+ } else {
+ auto I = SectionMap.find(Section);
+ if (I != SectionMap.end())
+ if (auto Err = impl().finalizeSection(Obj, I->second, Section))
+ return Err;
+ }
+ }
+ UnregisteredEHFrameSections.push_back(
+ EHFrameRelatedSections(EHFrameSID, TextSID, ExceptTabSID));
+
+ return Error::success();
+}
+
+template <typename Impl>
+unsigned char *RuntimeDyldMachOCRTPBase<Impl>::processFDE(uint8_t *P,
+ int64_t DeltaForText,
+ int64_t DeltaForEH) {
+ typedef typename Impl::TargetPtrT TargetPtrT;
+
+ LLVM_DEBUG(dbgs() << "Processing FDE: Delta for text: " << DeltaForText
+ << ", Delta for EH: " << DeltaForEH << "\n");
+ uint32_t Length = readBytesUnaligned(P, 4);
+ P += 4;
+ uint8_t *Ret = P + Length;
+ uint32_t Offset = readBytesUnaligned(P, 4);
+ if (Offset == 0) // is a CIE
+ return Ret;
+
+ P += 4;
+ TargetPtrT FDELocation = readBytesUnaligned(P, sizeof(TargetPtrT));
+ TargetPtrT NewLocation = FDELocation - DeltaForText;
+ writeBytesUnaligned(NewLocation, P, sizeof(TargetPtrT));
+
+ P += sizeof(TargetPtrT);
+
+ // Skip the FDE address range
+ P += sizeof(TargetPtrT);
+
+ uint8_t Augmentationsize = *P;
+ P += 1;
+ if (Augmentationsize != 0) {
+ TargetPtrT LSDA = readBytesUnaligned(P, sizeof(TargetPtrT));
+ TargetPtrT NewLSDA = LSDA - DeltaForEH;
+ writeBytesUnaligned(NewLSDA, P, sizeof(TargetPtrT));
+ }
+
+ return Ret;
+}
+
+static int64_t computeDelta(SectionEntry *A, SectionEntry *B) {
+ int64_t ObjDistance = static_cast<int64_t>(A->getObjAddress()) -
+ static_cast<int64_t>(B->getObjAddress());
+ int64_t MemDistance = A->getLoadAddress() - B->getLoadAddress();
+ return ObjDistance - MemDistance;
+}
+
+template <typename Impl>
+void RuntimeDyldMachOCRTPBase<Impl>::registerEHFrames() {
+
+ for (int i = 0, e = UnregisteredEHFrameSections.size(); i != e; ++i) {
+ EHFrameRelatedSections &SectionInfo = UnregisteredEHFrameSections[i];
+ if (SectionInfo.EHFrameSID == RTDYLD_INVALID_SECTION_ID ||
+ SectionInfo.TextSID == RTDYLD_INVALID_SECTION_ID)
+ continue;
+ SectionEntry *Text = &Sections[SectionInfo.TextSID];
+ SectionEntry *EHFrame = &Sections[SectionInfo.EHFrameSID];
+ SectionEntry *ExceptTab = nullptr;
+ if (SectionInfo.ExceptTabSID != RTDYLD_INVALID_SECTION_ID)
+ ExceptTab = &Sections[SectionInfo.ExceptTabSID];
+
+ int64_t DeltaForText = computeDelta(Text, EHFrame);
+ int64_t DeltaForEH = 0;
+ if (ExceptTab)
+ DeltaForEH = computeDelta(ExceptTab, EHFrame);
+
+ uint8_t *P = EHFrame->getAddress();
+ uint8_t *End = P + EHFrame->getSize();
+ while (P != End) {
+ P = processFDE(P, DeltaForText, DeltaForEH);
+ }
+
+ MemMgr.registerEHFrames(EHFrame->getAddress(), EHFrame->getLoadAddress(),
+ EHFrame->getSize());
+ }
+ UnregisteredEHFrameSections.clear();
+}
+
+std::unique_ptr<RuntimeDyldMachO>
+RuntimeDyldMachO::create(Triple::ArchType Arch,
+ RuntimeDyld::MemoryManager &MemMgr,
+ JITSymbolResolver &Resolver) {
+ switch (Arch) {
+ default:
+ llvm_unreachable("Unsupported target for RuntimeDyldMachO.");
+ break;
+ case Triple::arm:
+ return std::make_unique<RuntimeDyldMachOARM>(MemMgr, Resolver);
+ case Triple::aarch64:
+ return std::make_unique<RuntimeDyldMachOAArch64>(MemMgr, Resolver);
+ case Triple::aarch64_32:
+ return std::make_unique<RuntimeDyldMachOAArch64>(MemMgr, Resolver);
+ case Triple::x86:
+ return std::make_unique<RuntimeDyldMachOI386>(MemMgr, Resolver);
+ case Triple::x86_64:
+ return std::make_unique<RuntimeDyldMachOX86_64>(MemMgr, Resolver);
+ }
+}
+
+std::unique_ptr<RuntimeDyld::LoadedObjectInfo>
+RuntimeDyldMachO::loadObject(const object::ObjectFile &O) {
+ if (auto ObjSectionToIDOrErr = loadObjectImpl(O))
+ return std::make_unique<LoadedMachOObjectInfo>(*this,
+ *ObjSectionToIDOrErr);
+ else {
+ HasError = true;
+ raw_string_ostream ErrStream(ErrorStr);
+ logAllUnhandledErrors(ObjSectionToIDOrErr.takeError(), ErrStream);
+ return nullptr;
+ }
+}
+
+} // end namespace llvm
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.h b/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.h
new file mode 100644
index 000000000000..650e7b79fbb8
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.h
@@ -0,0 +1,167 @@
+//===-- RuntimeDyldMachO.h - Run-time dynamic linker for MC-JIT ---*- C++ -*-=//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// MachO support for MC-JIT runtime dynamic linker.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_EXECUTIONENGINE_RUNTIMEDYLD_RUNTIMEDYLDMACHO_H
+#define LLVM_LIB_EXECUTIONENGINE_RUNTIMEDYLD_RUNTIMEDYLDMACHO_H
+
+#include "RuntimeDyldImpl.h"
+#include "llvm/Object/MachO.h"
+#include "llvm/Support/Format.h"
+
+#define DEBUG_TYPE "dyld"
+
+using namespace llvm;
+using namespace llvm::object;
+
+namespace llvm {
+class RuntimeDyldMachO : public RuntimeDyldImpl {
+protected:
+ struct SectionOffsetPair {
+ unsigned SectionID;
+ uint64_t Offset;
+ };
+
+ struct EHFrameRelatedSections {
+ EHFrameRelatedSections()
+ : EHFrameSID(RTDYLD_INVALID_SECTION_ID),
+ TextSID(RTDYLD_INVALID_SECTION_ID),
+ ExceptTabSID(RTDYLD_INVALID_SECTION_ID) {}
+
+ EHFrameRelatedSections(SID EH, SID T, SID Ex)
+ : EHFrameSID(EH), TextSID(T), ExceptTabSID(Ex) {}
+ SID EHFrameSID;
+ SID TextSID;
+ SID ExceptTabSID;
+ };
+
+ // When a module is loaded we save the SectionID of the EH frame section
+ // in a table until we receive a request to register all unregistered
+ // EH frame sections with the memory manager.
+ SmallVector<EHFrameRelatedSections, 2> UnregisteredEHFrameSections;
+
+ RuntimeDyldMachO(RuntimeDyld::MemoryManager &MemMgr,
+ JITSymbolResolver &Resolver)
+ : RuntimeDyldImpl(MemMgr, Resolver) {}
+
+ /// This convenience method uses memcpy to extract a contiguous addend (the
+ /// addend size and offset are taken from the corresponding fields of the RE).
+ int64_t memcpyAddend(const RelocationEntry &RE) const;
+
+ /// Given a relocation_iterator for a non-scattered relocation, construct a
+ /// RelocationEntry and fill in the common fields. The 'Addend' field is *not*
+ /// filled in, since immediate encodings are highly target/opcode specific.
+ /// For targets/opcodes with simple, contiguous immediates (e.g. X86) the
+ /// memcpyAddend method can be used to read the immediate.
+ RelocationEntry getRelocationEntry(unsigned SectionID,
+ const ObjectFile &BaseTObj,
+ const relocation_iterator &RI) const {
+ const MachOObjectFile &Obj =
+ static_cast<const MachOObjectFile &>(BaseTObj);
+ MachO::any_relocation_info RelInfo =
+ Obj.getRelocation(RI->getRawDataRefImpl());
+
+ bool IsPCRel = Obj.getAnyRelocationPCRel(RelInfo);
+ unsigned Size = Obj.getAnyRelocationLength(RelInfo);
+ uint64_t Offset = RI->getOffset();
+ MachO::RelocationInfoType RelType =
+ static_cast<MachO::RelocationInfoType>(Obj.getAnyRelocationType(RelInfo));
+
+ return RelocationEntry(SectionID, Offset, RelType, 0, IsPCRel, Size);
+ }
+
+ /// Process a scattered vanilla relocation.
+ Expected<relocation_iterator>
+ processScatteredVANILLA(unsigned SectionID, relocation_iterator RelI,
+ const ObjectFile &BaseObjT,
+ RuntimeDyldMachO::ObjSectionToIDMap &ObjSectionToID,
+ bool TargetIsLocalThumbFunc = false);
+
+ /// Construct a RelocationValueRef representing the relocation target.
+ /// For Symbols in known sections, this will return a RelocationValueRef
+ /// representing a (SectionID, Offset) pair.
+ /// For Symbols whose section is not known, this will return a
+ /// (SymbolName, Offset) pair, where the Offset is taken from the instruction
+ /// immediate (held in RE.Addend).
+ /// In both cases the Addend field is *NOT* fixed up to be PC-relative. That
+ /// should be done by the caller where appropriate by calling makePCRel on
+ /// the RelocationValueRef.
+ Expected<RelocationValueRef>
+ getRelocationValueRef(const ObjectFile &BaseTObj,
+ const relocation_iterator &RI,
+ const RelocationEntry &RE,
+ ObjSectionToIDMap &ObjSectionToID);
+
+ /// Make the RelocationValueRef addend PC-relative.
+ void makeValueAddendPCRel(RelocationValueRef &Value,
+ const relocation_iterator &RI,
+ unsigned OffsetToNextPC);
+
+ /// Dump information about the relocation entry (RE) and resolved value.
+ void dumpRelocationToResolve(const RelocationEntry &RE, uint64_t Value) const;
+
+ // Return a section iterator for the section containing the given address.
+ static section_iterator getSectionByAddress(const MachOObjectFile &Obj,
+ uint64_t Addr);
+
+
+ // Populate __pointers section.
+ Error populateIndirectSymbolPointersSection(const MachOObjectFile &Obj,
+ const SectionRef &PTSection,
+ unsigned PTSectionID);
+
+public:
+
+ /// Create a RuntimeDyldMachO instance for the given target architecture.
+ static std::unique_ptr<RuntimeDyldMachO>
+ create(Triple::ArchType Arch,
+ RuntimeDyld::MemoryManager &MemMgr,
+ JITSymbolResolver &Resolver);
+
+ std::unique_ptr<RuntimeDyld::LoadedObjectInfo>
+ loadObject(const object::ObjectFile &O) override;
+
+ SectionEntry &getSection(unsigned SectionID) { return Sections[SectionID]; }
+
+ bool isCompatibleFile(const object::ObjectFile &Obj) const override;
+};
+
+/// RuntimeDyldMachOTarget - Templated base class for generic MachO linker
+/// algorithms and data structures.
+///
+/// Concrete, target specific sub-classes can be accessed via the impl()
+/// methods. (i.e. the RuntimeDyldMachO hierarchy uses the Curiously
+/// Recurring Template Idiom). Concrete subclasses for each target
+/// can be found in ./Targets.
+template <typename Impl>
+class RuntimeDyldMachOCRTPBase : public RuntimeDyldMachO {
+private:
+ Impl &impl() { return static_cast<Impl &>(*this); }
+ const Impl &impl() const { return static_cast<const Impl &>(*this); }
+
+ unsigned char *processFDE(uint8_t *P, int64_t DeltaForText,
+ int64_t DeltaForEH);
+
+public:
+ RuntimeDyldMachOCRTPBase(RuntimeDyld::MemoryManager &MemMgr,
+ JITSymbolResolver &Resolver)
+ : RuntimeDyldMachO(MemMgr, Resolver) {}
+
+ Error finalizeLoad(const ObjectFile &Obj,
+ ObjSectionToIDMap &SectionMap) override;
+ void registerEHFrames() override;
+};
+
+} // end namespace llvm
+
+#undef DEBUG_TYPE
+
+#endif
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldCOFFAArch64.h b/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldCOFFAArch64.h
new file mode 100644
index 000000000000..66c9753a72fd
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldCOFFAArch64.h
@@ -0,0 +1,377 @@
+//===-- RuntimeDyldCOFFAArch64.h --- COFF/AArch64 specific code ---*- C++
+//-*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// COFF AArch64 support for MC-JIT runtime dynamic linker.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_EXECUTIONENGINE_RUNTIMEDYLD_TARGETS_RUNTIMEDYLDCOFFAARCH64_H
+#define LLVM_LIB_EXECUTIONENGINE_RUNTIMEDYLD_TARGETS_RUNTIMEDYLDCOFFAARCH64_H
+
+#include "../RuntimeDyldCOFF.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/BinaryFormat/COFF.h"
+#include "llvm/Object/COFF.h"
+#include "llvm/Support/Endian.h"
+
+#define DEBUG_TYPE "dyld"
+
+using namespace llvm::support::endian;
+
+namespace llvm {
+
+// This relocation type is used for handling long branch instruction
+// through the Stub.
+enum InternalRelocationType : unsigned {
+ INTERNAL_REL_ARM64_LONG_BRANCH26 = 0x111,
+};
+
+static void add16(uint8_t *p, int16_t v) { write16le(p, read16le(p) + v); }
+static void or32le(void *P, int32_t V) { write32le(P, read32le(P) | V); }
+
+static void write32AArch64Imm(uint8_t *T, uint64_t imm, uint32_t rangeLimit) {
+ uint32_t orig = read32le(T);
+ orig &= ~(0xFFF << 10);
+ write32le(T, orig | ((imm & (0xFFF >> rangeLimit)) << 10));
+}
+
+static void write32AArch64Ldr(uint8_t *T, uint64_t imm) {
+ uint32_t orig = read32le(T);
+ uint32_t size = orig >> 30;
+ // 0x04000000 indicates SIMD/FP registers
+ // 0x00800000 indicates 128 bit
+ if ((orig & 0x04800000) == 0x04800000)
+ size += 4;
+ if ((imm & ((1 << size) - 1)) != 0)
+ assert(0 && "misaligned ldr/str offset");
+ write32AArch64Imm(T, imm >> size, size);
+}
+
+static void write32AArch64Addr(void *T, uint64_t s, uint64_t p, int shift) {
+ uint64_t Imm = (s >> shift) - (p >> shift);
+ uint32_t ImmLo = (Imm & 0x3) << 29;
+ uint32_t ImmHi = (Imm & 0x1FFFFC) << 3;
+ uint64_t Mask = (0x3 << 29) | (0x1FFFFC << 3);
+ write32le(T, (read32le(T) & ~Mask) | ImmLo | ImmHi);
+}
+
+class RuntimeDyldCOFFAArch64 : public RuntimeDyldCOFF {
+
+private:
+ // When a module is loaded we save the SectionID of the unwind
+ // sections in a table until we receive a request to register all
+ // unregisteredEH frame sections with the memory manager.
+ SmallVector<SID, 2> UnregisteredEHFrameSections;
+ SmallVector<SID, 2> RegisteredEHFrameSections;
+ uint64_t ImageBase;
+
+ // Fake an __ImageBase pointer by returning the section with the lowest adress
+ uint64_t getImageBase() {
+ if (!ImageBase) {
+ ImageBase = std::numeric_limits<uint64_t>::max();
+ for (const SectionEntry &Section : Sections)
+ // The Sections list may contain sections that weren't loaded for
+ // whatever reason: they may be debug sections, and ProcessAllSections
+ // is false, or they may be sections that contain 0 bytes. If the
+ // section isn't loaded, the load address will be 0, and it should not
+ // be included in the ImageBase calculation.
+ if (Section.getLoadAddress() != 0)
+ ImageBase = std::min(ImageBase, Section.getLoadAddress());
+ }
+ return ImageBase;
+ }
+
+public:
+ RuntimeDyldCOFFAArch64(RuntimeDyld::MemoryManager &MM,
+ JITSymbolResolver &Resolver)
+ : RuntimeDyldCOFF(MM, Resolver, 8, COFF::IMAGE_REL_ARM64_ADDR64),
+ ImageBase(0) {}
+
+ Align getStubAlignment() override { return Align(8); }
+
+ unsigned getMaxStubSize() const override { return 20; }
+
+ std::tuple<uint64_t, uint64_t, uint64_t>
+ generateRelocationStub(unsigned SectionID, StringRef TargetName,
+ uint64_t Offset, uint64_t RelType, uint64_t Addend,
+ StubMap &Stubs) {
+ uintptr_t StubOffset;
+ SectionEntry &Section = Sections[SectionID];
+
+ RelocationValueRef OriginalRelValueRef;
+ OriginalRelValueRef.SectionID = SectionID;
+ OriginalRelValueRef.Offset = Offset;
+ OriginalRelValueRef.Addend = Addend;
+ OriginalRelValueRef.SymbolName = TargetName.data();
+
+ auto Stub = Stubs.find(OriginalRelValueRef);
+ if (Stub == Stubs.end()) {
+ LLVM_DEBUG(dbgs() << " Create a new stub function for "
+ << TargetName.data() << "\n");
+
+ StubOffset = Section.getStubOffset();
+ Stubs[OriginalRelValueRef] = StubOffset;
+ createStubFunction(Section.getAddressWithOffset(StubOffset));
+ Section.advanceStubOffset(getMaxStubSize());
+ } else {
+ LLVM_DEBUG(dbgs() << " Stub function found for " << TargetName.data()
+ << "\n");
+ StubOffset = Stub->second;
+ }
+
+ // Resolve original relocation to stub function.
+ const RelocationEntry RE(SectionID, Offset, RelType, Addend);
+ resolveRelocation(RE, Section.getLoadAddressWithOffset(StubOffset));
+
+ // adjust relocation info so resolution writes to the stub function
+ // Here an internal relocation type is used for resolving long branch via
+ // stub instruction.
+ Addend = 0;
+ Offset = StubOffset;
+ RelType = INTERNAL_REL_ARM64_LONG_BRANCH26;
+
+ return std::make_tuple(Offset, RelType, Addend);
+ }
+
+ Expected<object::relocation_iterator>
+ processRelocationRef(unsigned SectionID, object::relocation_iterator RelI,
+ const object::ObjectFile &Obj,
+ ObjSectionToIDMap &ObjSectionToID,
+ StubMap &Stubs) override {
+
+ auto Symbol = RelI->getSymbol();
+ if (Symbol == Obj.symbol_end())
+ report_fatal_error("Unknown symbol in relocation");
+
+ Expected<StringRef> TargetNameOrErr = Symbol->getName();
+ if (!TargetNameOrErr)
+ return TargetNameOrErr.takeError();
+ StringRef TargetName = *TargetNameOrErr;
+
+ auto SectionOrErr = Symbol->getSection();
+ if (!SectionOrErr)
+ return SectionOrErr.takeError();
+ auto Section = *SectionOrErr;
+
+ uint64_t RelType = RelI->getType();
+ uint64_t Offset = RelI->getOffset();
+
+ // If there is no section, this must be an external reference.
+ bool IsExtern = Section == Obj.section_end();
+
+ // Determine the Addend used to adjust the relocation value.
+ uint64_t Addend = 0;
+ SectionEntry &AddendSection = Sections[SectionID];
+ uintptr_t ObjTarget = AddendSection.getObjAddress() + Offset;
+ uint8_t *Displacement = (uint8_t *)ObjTarget;
+
+ unsigned TargetSectionID = -1;
+ uint64_t TargetOffset = -1;
+
+ if (TargetName.starts_with(getImportSymbolPrefix())) {
+ TargetSectionID = SectionID;
+ TargetOffset = getDLLImportOffset(SectionID, Stubs, TargetName);
+ TargetName = StringRef();
+ IsExtern = false;
+ } else if (!IsExtern) {
+ if (auto TargetSectionIDOrErr = findOrEmitSection(
+ Obj, *Section, Section->isText(), ObjSectionToID))
+ TargetSectionID = *TargetSectionIDOrErr;
+ else
+ return TargetSectionIDOrErr.takeError();
+
+ TargetOffset = getSymbolOffset(*Symbol);
+ }
+
+ switch (RelType) {
+ case COFF::IMAGE_REL_ARM64_ADDR32:
+ case COFF::IMAGE_REL_ARM64_ADDR32NB:
+ case COFF::IMAGE_REL_ARM64_REL32:
+ case COFF::IMAGE_REL_ARM64_SECREL:
+ Addend = read32le(Displacement);
+ break;
+ case COFF::IMAGE_REL_ARM64_BRANCH26: {
+ uint32_t orig = read32le(Displacement);
+ Addend = (orig & 0x03FFFFFF) << 2;
+
+ if (IsExtern)
+ std::tie(Offset, RelType, Addend) = generateRelocationStub(
+ SectionID, TargetName, Offset, RelType, Addend, Stubs);
+ break;
+ }
+ case COFF::IMAGE_REL_ARM64_BRANCH19: {
+ uint32_t orig = read32le(Displacement);
+ Addend = (orig & 0x00FFFFE0) >> 3;
+ break;
+ }
+ case COFF::IMAGE_REL_ARM64_BRANCH14: {
+ uint32_t orig = read32le(Displacement);
+ Addend = (orig & 0x000FFFE0) >> 3;
+ break;
+ }
+ case COFF::IMAGE_REL_ARM64_REL21:
+ case COFF::IMAGE_REL_ARM64_PAGEBASE_REL21: {
+ uint32_t orig = read32le(Displacement);
+ Addend = ((orig >> 29) & 0x3) | ((orig >> 3) & 0x1FFFFC);
+ break;
+ }
+ case COFF::IMAGE_REL_ARM64_PAGEOFFSET_12L:
+ case COFF::IMAGE_REL_ARM64_PAGEOFFSET_12A: {
+ uint32_t orig = read32le(Displacement);
+ Addend = ((orig >> 10) & 0xFFF);
+ break;
+ }
+ case COFF::IMAGE_REL_ARM64_ADDR64: {
+ Addend = read64le(Displacement);
+ break;
+ }
+ default:
+ break;
+ }
+
+#if !defined(NDEBUG)
+ SmallString<32> RelTypeName;
+ RelI->getTypeName(RelTypeName);
+
+ LLVM_DEBUG(dbgs() << "\t\tIn Section " << SectionID << " Offset " << Offset
+ << " RelType: " << RelTypeName << " TargetName: "
+ << TargetName << " Addend " << Addend << "\n");
+#endif
+
+ if (IsExtern) {
+ RelocationEntry RE(SectionID, Offset, RelType, Addend);
+ addRelocationForSymbol(RE, TargetName);
+ } else {
+ RelocationEntry RE(SectionID, Offset, RelType, TargetOffset + Addend);
+ addRelocationForSection(RE, TargetSectionID);
+ }
+ return ++RelI;
+ }
+
+ void resolveRelocation(const RelocationEntry &RE, uint64_t Value) override {
+ const auto Section = Sections[RE.SectionID];
+ uint8_t *Target = Section.getAddressWithOffset(RE.Offset);
+ uint64_t FinalAddress = Section.getLoadAddressWithOffset(RE.Offset);
+
+ switch (RE.RelType) {
+ default:
+ llvm_unreachable("unsupported relocation type");
+ case COFF::IMAGE_REL_ARM64_ABSOLUTE: {
+ // This relocation is ignored.
+ break;
+ }
+ case COFF::IMAGE_REL_ARM64_PAGEBASE_REL21: {
+ // The page base of the target, for ADRP instruction.
+ Value += RE.Addend;
+ write32AArch64Addr(Target, Value, FinalAddress, 12);
+ break;
+ }
+ case COFF::IMAGE_REL_ARM64_REL21: {
+ // The 12-bit relative displacement to the target, for instruction ADR
+ Value += RE.Addend;
+ write32AArch64Addr(Target, Value, FinalAddress, 0);
+ break;
+ }
+ case COFF::IMAGE_REL_ARM64_PAGEOFFSET_12A: {
+ // The 12-bit page offset of the target,
+ // for instructions ADD/ADDS (immediate) with zero shift.
+ Value += RE.Addend;
+ write32AArch64Imm(Target, Value & 0xFFF, 0);
+ break;
+ }
+ case COFF::IMAGE_REL_ARM64_PAGEOFFSET_12L: {
+ // The 12-bit page offset of the target,
+ // for instruction LDR (indexed, unsigned immediate).
+ Value += RE.Addend;
+ write32AArch64Ldr(Target, Value & 0xFFF);
+ break;
+ }
+ case COFF::IMAGE_REL_ARM64_ADDR32: {
+ // The 32-bit VA of the target.
+ uint32_t VA = Value + RE.Addend;
+ write32le(Target, VA);
+ break;
+ }
+ case COFF::IMAGE_REL_ARM64_ADDR32NB: {
+ // The target's 32-bit RVA.
+ uint64_t RVA = Value + RE.Addend - getImageBase();
+ write32le(Target, RVA);
+ break;
+ }
+ case INTERNAL_REL_ARM64_LONG_BRANCH26: {
+ // Encode the immadiate value for generated Stub instruction (MOVZ)
+ or32le(Target + 12, ((Value + RE.Addend) & 0xFFFF) << 5);
+ or32le(Target + 8, ((Value + RE.Addend) & 0xFFFF0000) >> 11);
+ or32le(Target + 4, ((Value + RE.Addend) & 0xFFFF00000000) >> 27);
+ or32le(Target + 0, ((Value + RE.Addend) & 0xFFFF000000000000) >> 43);
+ break;
+ }
+ case COFF::IMAGE_REL_ARM64_BRANCH26: {
+ // The 26-bit relative displacement to the target, for B and BL
+ // instructions.
+ uint64_t PCRelVal = Value + RE.Addend - FinalAddress;
+ assert(isInt<28>(PCRelVal) && "Branch target is out of range.");
+ write32le(Target, (read32le(Target) & ~(0x03FFFFFF)) |
+ (PCRelVal & 0x0FFFFFFC) >> 2);
+ break;
+ }
+ case COFF::IMAGE_REL_ARM64_BRANCH19: {
+ // The 19-bit offset to the relocation target,
+ // for conditional B instruction.
+ uint64_t PCRelVal = Value + RE.Addend - FinalAddress;
+ assert(isInt<21>(PCRelVal) && "Branch target is out of range.");
+ write32le(Target, (read32le(Target) & ~(0x00FFFFE0)) |
+ (PCRelVal & 0x001FFFFC) << 3);
+ break;
+ }
+ case COFF::IMAGE_REL_ARM64_BRANCH14: {
+ // The 14-bit offset to the relocation target,
+ // for instructions TBZ and TBNZ.
+ uint64_t PCRelVal = Value + RE.Addend - FinalAddress;
+ assert(isInt<16>(PCRelVal) && "Branch target is out of range.");
+ write32le(Target, (read32le(Target) & ~(0x000FFFE0)) |
+ (PCRelVal & 0x0000FFFC) << 3);
+ break;
+ }
+ case COFF::IMAGE_REL_ARM64_ADDR64: {
+ // The 64-bit VA of the relocation target.
+ write64le(Target, Value + RE.Addend);
+ break;
+ }
+ case COFF::IMAGE_REL_ARM64_SECTION: {
+ // 16-bit section index of the section that contains the target.
+ assert(static_cast<uint32_t>(RE.SectionID) <= UINT16_MAX &&
+ "relocation overflow");
+ add16(Target, RE.SectionID);
+ break;
+ }
+ case COFF::IMAGE_REL_ARM64_SECREL: {
+ // 32-bit offset of the target from the beginning of its section.
+ assert(static_cast<int64_t>(RE.Addend) <= INT32_MAX &&
+ "Relocation overflow");
+ assert(static_cast<int64_t>(RE.Addend) >= INT32_MIN &&
+ "Relocation underflow");
+ write32le(Target, RE.Addend);
+ break;
+ }
+ case COFF::IMAGE_REL_ARM64_REL32: {
+ // The 32-bit relative address from the byte following the relocation.
+ uint64_t Result = Value - FinalAddress - 4;
+ write32le(Target, Result + RE.Addend);
+ break;
+ }
+ }
+ }
+
+ void registerEHFrames() override {}
+};
+
+} // End namespace llvm
+
+#endif
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldCOFFI386.h b/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldCOFFI386.h
new file mode 100644
index 000000000000..0d5afc289b8c
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldCOFFI386.h
@@ -0,0 +1,228 @@
+//===--- RuntimeDyldCOFFI386.h --- COFF/X86_64 specific code ---*- C++ --*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// COFF x86 support for MC-JIT runtime dynamic linker.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_EXECUTIONENGINE_RUNTIMEDYLD_TARGETS_RUNTIMEDYLDCOFFI386_H
+#define LLVM_LIB_EXECUTIONENGINE_RUNTIMEDYLD_TARGETS_RUNTIMEDYLDCOFFI386_H
+
+#include "../RuntimeDyldCOFF.h"
+#include "llvm/BinaryFormat/COFF.h"
+#include "llvm/Object/COFF.h"
+
+#define DEBUG_TYPE "dyld"
+
+namespace llvm {
+
+class RuntimeDyldCOFFI386 : public RuntimeDyldCOFF {
+public:
+ RuntimeDyldCOFFI386(RuntimeDyld::MemoryManager &MM,
+ JITSymbolResolver &Resolver)
+ : RuntimeDyldCOFF(MM, Resolver, 4, COFF::IMAGE_REL_I386_DIR32) {}
+
+ unsigned getMaxStubSize() const override {
+ return 8; // 2-byte jmp instruction + 32-bit relative address + 2 byte pad
+ }
+
+ Align getStubAlignment() override { return Align(1); }
+
+ Expected<object::relocation_iterator>
+ processRelocationRef(unsigned SectionID,
+ object::relocation_iterator RelI,
+ const object::ObjectFile &Obj,
+ ObjSectionToIDMap &ObjSectionToID,
+ StubMap &Stubs) override {
+
+ auto Symbol = RelI->getSymbol();
+ if (Symbol == Obj.symbol_end())
+ report_fatal_error("Unknown symbol in relocation");
+
+ Expected<StringRef> TargetNameOrErr = Symbol->getName();
+ if (!TargetNameOrErr)
+ return TargetNameOrErr.takeError();
+ StringRef TargetName = *TargetNameOrErr;
+
+ auto SectionOrErr = Symbol->getSection();
+ if (!SectionOrErr)
+ return SectionOrErr.takeError();
+ auto Section = *SectionOrErr;
+ bool IsExtern = Section == Obj.section_end();
+
+ uint64_t RelType = RelI->getType();
+ uint64_t Offset = RelI->getOffset();
+
+ unsigned TargetSectionID = -1;
+ uint64_t TargetOffset = -1;
+ if (TargetName.starts_with(getImportSymbolPrefix())) {
+ TargetSectionID = SectionID;
+ TargetOffset = getDLLImportOffset(SectionID, Stubs, TargetName, true);
+ TargetName = StringRef();
+ IsExtern = false;
+ } else if (!IsExtern) {
+ if (auto TargetSectionIDOrErr = findOrEmitSection(
+ Obj, *Section, Section->isText(), ObjSectionToID))
+ TargetSectionID = *TargetSectionIDOrErr;
+ else
+ return TargetSectionIDOrErr.takeError();
+ if (RelType != COFF::IMAGE_REL_I386_SECTION)
+ TargetOffset = getSymbolOffset(*Symbol);
+ }
+
+ // Determine the Addend used to adjust the relocation value.
+ uint64_t Addend = 0;
+ SectionEntry &AddendSection = Sections[SectionID];
+ uintptr_t ObjTarget = AddendSection.getObjAddress() + Offset;
+ uint8_t *Displacement = (uint8_t *)ObjTarget;
+
+ switch (RelType) {
+ case COFF::IMAGE_REL_I386_DIR32:
+ case COFF::IMAGE_REL_I386_DIR32NB:
+ case COFF::IMAGE_REL_I386_SECREL:
+ case COFF::IMAGE_REL_I386_REL32: {
+ Addend = readBytesUnaligned(Displacement, 4);
+ break;
+ }
+ default:
+ break;
+ }
+
+#if !defined(NDEBUG)
+ SmallString<32> RelTypeName;
+ RelI->getTypeName(RelTypeName);
+#endif
+ LLVM_DEBUG(dbgs() << "\t\tIn Section " << SectionID << " Offset " << Offset
+ << " RelType: " << RelTypeName << " TargetName: "
+ << TargetName << " Addend " << Addend << "\n");
+
+ if (IsExtern) {
+ RelocationEntry RE(SectionID, Offset, RelType, 0, -1, 0, 0, 0, false, 0);
+ addRelocationForSymbol(RE, TargetName);
+ } else {
+
+ switch (RelType) {
+ case COFF::IMAGE_REL_I386_ABSOLUTE:
+ // This relocation is ignored.
+ break;
+ case COFF::IMAGE_REL_I386_DIR32:
+ case COFF::IMAGE_REL_I386_DIR32NB:
+ case COFF::IMAGE_REL_I386_REL32: {
+ RelocationEntry RE =
+ RelocationEntry(SectionID, Offset, RelType, Addend, TargetSectionID,
+ TargetOffset, 0, 0, false, 0);
+ addRelocationForSection(RE, TargetSectionID);
+ break;
+ }
+ case COFF::IMAGE_REL_I386_SECTION: {
+ RelocationEntry RE =
+ RelocationEntry(TargetSectionID, Offset, RelType, 0);
+ addRelocationForSection(RE, TargetSectionID);
+ break;
+ }
+ case COFF::IMAGE_REL_I386_SECREL: {
+ RelocationEntry RE =
+ RelocationEntry(SectionID, Offset, RelType, TargetOffset + Addend);
+ addRelocationForSection(RE, TargetSectionID);
+ break;
+ }
+ default:
+ llvm_unreachable("unsupported relocation type");
+ }
+ }
+
+ return ++RelI;
+ }
+
+ void resolveRelocation(const RelocationEntry &RE, uint64_t Value) override {
+ const auto Section = Sections[RE.SectionID];
+ uint8_t *Target = Section.getAddressWithOffset(RE.Offset);
+
+ switch (RE.RelType) {
+ case COFF::IMAGE_REL_I386_ABSOLUTE:
+ // This relocation is ignored.
+ break;
+ case COFF::IMAGE_REL_I386_DIR32: {
+ // The target's 32-bit VA.
+ uint64_t Result =
+ RE.Sections.SectionA == static_cast<uint32_t>(-1)
+ ? Value
+ : Sections[RE.Sections.SectionA].getLoadAddressWithOffset(
+ RE.Addend);
+ assert(Result <= UINT32_MAX && "relocation overflow");
+ LLVM_DEBUG(dbgs() << "\t\tOffset: " << RE.Offset
+ << " RelType: IMAGE_REL_I386_DIR32"
+ << " TargetSection: " << RE.Sections.SectionA
+ << " Value: " << format("0x%08" PRIx32, Result)
+ << '\n');
+ writeBytesUnaligned(Result, Target, 4);
+ break;
+ }
+ case COFF::IMAGE_REL_I386_DIR32NB: {
+ // The target's 32-bit RVA.
+ // NOTE: use Section[0].getLoadAddress() as an approximation of ImageBase
+ uint64_t Result =
+ Sections[RE.Sections.SectionA].getLoadAddressWithOffset(RE.Addend) -
+ Sections[0].getLoadAddress();
+ assert(Result <= UINT32_MAX && "relocation overflow");
+ LLVM_DEBUG(dbgs() << "\t\tOffset: " << RE.Offset
+ << " RelType: IMAGE_REL_I386_DIR32NB"
+ << " TargetSection: " << RE.Sections.SectionA
+ << " Value: " << format("0x%08" PRIx32, Result)
+ << '\n');
+ writeBytesUnaligned(Result, Target, 4);
+ break;
+ }
+ case COFF::IMAGE_REL_I386_REL32: {
+ // 32-bit relative displacement to the target.
+ uint64_t Result = RE.Sections.SectionA == static_cast<uint32_t>(-1)
+ ? Value
+ : Sections[RE.Sections.SectionA].getLoadAddress();
+ Result = Result - Section.getLoadAddress() + RE.Addend - 4 - RE.Offset;
+ assert(static_cast<int64_t>(Result) <= INT32_MAX &&
+ "relocation overflow");
+ assert(static_cast<int64_t>(Result) >= INT32_MIN &&
+ "relocation underflow");
+ LLVM_DEBUG(dbgs() << "\t\tOffset: " << RE.Offset
+ << " RelType: IMAGE_REL_I386_REL32"
+ << " TargetSection: " << RE.Sections.SectionA
+ << " Value: " << format("0x%08" PRIx32, Result)
+ << '\n');
+ writeBytesUnaligned(Result, Target, 4);
+ break;
+ }
+ case COFF::IMAGE_REL_I386_SECTION:
+ // 16-bit section index of the section that contains the target.
+ assert(static_cast<uint32_t>(RE.SectionID) <= UINT16_MAX &&
+ "relocation overflow");
+ LLVM_DEBUG(dbgs() << "\t\tOffset: " << RE.Offset
+ << " RelType: IMAGE_REL_I386_SECTION Value: "
+ << RE.SectionID << '\n');
+ writeBytesUnaligned(RE.SectionID, Target, 2);
+ break;
+ case COFF::IMAGE_REL_I386_SECREL:
+ // 32-bit offset of the target from the beginning of its section.
+ assert(static_cast<uint64_t>(RE.Addend) <= UINT32_MAX &&
+ "relocation overflow");
+ LLVM_DEBUG(dbgs() << "\t\tOffset: " << RE.Offset
+ << " RelType: IMAGE_REL_I386_SECREL Value: "
+ << RE.Addend << '\n');
+ writeBytesUnaligned(RE.Addend, Target, 4);
+ break;
+ default:
+ llvm_unreachable("unsupported relocation type");
+ }
+ }
+
+ void registerEHFrames() override {}
+};
+
+}
+
+#endif
+
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldCOFFThumb.h b/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldCOFFThumb.h
new file mode 100644
index 000000000000..c079d8896c1d
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldCOFFThumb.h
@@ -0,0 +1,348 @@
+//===--- RuntimeDyldCOFFThumb.h --- COFF/Thumb specific code ---*- C++ --*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// COFF thumb support for MC-JIT runtime dynamic linker.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_EXECUTIONENGINE_RUNTIMEDYLD_TARGETS_RUNTIMEDYLDCOFFTHUMB_H
+#define LLVM_LIB_EXECUTIONENGINE_RUNTIMEDYLD_TARGETS_RUNTIMEDYLDCOFFTHUMB_H
+
+#include "../RuntimeDyldCOFF.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/BinaryFormat/COFF.h"
+#include "llvm/Object/COFF.h"
+
+#define DEBUG_TYPE "dyld"
+
+namespace llvm {
+
+static bool isThumbFunc(object::symbol_iterator Symbol,
+ const object::ObjectFile &Obj,
+ object::section_iterator Section) {
+ Expected<object::SymbolRef::Type> SymTypeOrErr = Symbol->getType();
+ if (!SymTypeOrErr) {
+ std::string Buf;
+ raw_string_ostream OS(Buf);
+ logAllUnhandledErrors(SymTypeOrErr.takeError(), OS);
+ report_fatal_error(Twine(OS.str()));
+ }
+
+ if (*SymTypeOrErr != object::SymbolRef::ST_Function)
+ return false;
+
+ // We check the IMAGE_SCN_MEM_16BIT flag in the section of the symbol to tell
+ // if it's thumb or not
+ return cast<object::COFFObjectFile>(Obj)
+ .getCOFFSection(*Section)
+ ->Characteristics &
+ COFF::IMAGE_SCN_MEM_16BIT;
+}
+
+class RuntimeDyldCOFFThumb : public RuntimeDyldCOFF {
+public:
+ RuntimeDyldCOFFThumb(RuntimeDyld::MemoryManager &MM,
+ JITSymbolResolver &Resolver)
+ : RuntimeDyldCOFF(MM, Resolver, 4, COFF::IMAGE_REL_ARM_ADDR32) {}
+
+ unsigned getMaxStubSize() const override {
+ return 16; // 8-byte load instructions, 4-byte jump, 4-byte padding
+ }
+
+ Expected<JITSymbolFlags> getJITSymbolFlags(const SymbolRef &SR) override {
+
+ auto Flags = RuntimeDyldImpl::getJITSymbolFlags(SR);
+
+ if (!Flags) {
+ return Flags.takeError();
+ }
+ auto SectionIterOrErr = SR.getSection();
+ if (!SectionIterOrErr) {
+ return SectionIterOrErr.takeError();
+ }
+ SectionRef Sec = *SectionIterOrErr.get();
+ const object::COFFObjectFile *COFFObjPtr =
+ cast<object::COFFObjectFile>(Sec.getObject());
+ const coff_section *CoffSec = COFFObjPtr->getCOFFSection(Sec);
+ bool isThumb = CoffSec->Characteristics & COFF::IMAGE_SCN_MEM_16BIT;
+
+ Flags->getTargetFlags() = isThumb;
+
+ return Flags;
+ }
+
+ Align getStubAlignment() override { return Align(1); }
+
+ Expected<object::relocation_iterator>
+ processRelocationRef(unsigned SectionID,
+ object::relocation_iterator RelI,
+ const object::ObjectFile &Obj,
+ ObjSectionToIDMap &ObjSectionToID,
+ StubMap &Stubs) override {
+ auto Symbol = RelI->getSymbol();
+ if (Symbol == Obj.symbol_end())
+ report_fatal_error("Unknown symbol in relocation");
+
+ Expected<StringRef> TargetNameOrErr = Symbol->getName();
+ if (!TargetNameOrErr)
+ return TargetNameOrErr.takeError();
+ StringRef TargetName = *TargetNameOrErr;
+
+ auto SectionOrErr = Symbol->getSection();
+ if (!SectionOrErr)
+ return SectionOrErr.takeError();
+ auto Section = *SectionOrErr;
+
+ uint64_t RelType = RelI->getType();
+ uint64_t Offset = RelI->getOffset();
+
+ // Determine the Addend used to adjust the relocation value.
+ uint64_t Addend = 0;
+ SectionEntry &AddendSection = Sections[SectionID];
+ uintptr_t ObjTarget = AddendSection.getObjAddress() + Offset;
+ uint8_t *Displacement = (uint8_t *)ObjTarget;
+
+ switch (RelType) {
+ case COFF::IMAGE_REL_ARM_ADDR32:
+ case COFF::IMAGE_REL_ARM_ADDR32NB:
+ case COFF::IMAGE_REL_ARM_SECREL:
+ Addend = readBytesUnaligned(Displacement, 4);
+ break;
+ default:
+ break;
+ }
+
+#if !defined(NDEBUG)
+ SmallString<32> RelTypeName;
+ RelI->getTypeName(RelTypeName);
+#endif
+ LLVM_DEBUG(dbgs() << "\t\tIn Section " << SectionID << " Offset " << Offset
+ << " RelType: " << RelTypeName << " TargetName: "
+ << TargetName << " Addend " << Addend << "\n");
+
+ bool IsExtern = Section == Obj.section_end();
+ unsigned TargetSectionID = -1;
+ uint64_t TargetOffset = -1;
+
+ if (TargetName.starts_with(getImportSymbolPrefix())) {
+ TargetSectionID = SectionID;
+ TargetOffset = getDLLImportOffset(SectionID, Stubs, TargetName, true);
+ TargetName = StringRef();
+ IsExtern = false;
+ } else if (!IsExtern) {
+ if (auto TargetSectionIDOrErr =
+ findOrEmitSection(Obj, *Section, Section->isText(), ObjSectionToID))
+ TargetSectionID = *TargetSectionIDOrErr;
+ else
+ return TargetSectionIDOrErr.takeError();
+ if (RelType != COFF::IMAGE_REL_ARM_SECTION)
+ TargetOffset = getSymbolOffset(*Symbol);
+ }
+
+ if (IsExtern) {
+ RelocationEntry RE(SectionID, Offset, RelType, 0, -1, 0, 0, 0, false, 0);
+ addRelocationForSymbol(RE, TargetName);
+ } else {
+
+ // We need to find out if the relocation is relative to a thumb function
+ // so that we include the ISA selection bit when resolve the relocation
+ bool IsTargetThumbFunc = isThumbFunc(Symbol, Obj, Section);
+
+ switch (RelType) {
+ default: llvm_unreachable("unsupported relocation type");
+ case COFF::IMAGE_REL_ARM_ABSOLUTE:
+ // This relocation is ignored.
+ break;
+ case COFF::IMAGE_REL_ARM_ADDR32: {
+ RelocationEntry RE =
+ RelocationEntry(SectionID, Offset, RelType, Addend, TargetSectionID,
+ TargetOffset, 0, 0, false, 0, IsTargetThumbFunc);
+ addRelocationForSection(RE, TargetSectionID);
+ break;
+ }
+ case COFF::IMAGE_REL_ARM_ADDR32NB: {
+ RelocationEntry RE =
+ RelocationEntry(SectionID, Offset, RelType, Addend, TargetSectionID,
+ TargetOffset, 0, 0, false, 0);
+ addRelocationForSection(RE, TargetSectionID);
+ break;
+ }
+ case COFF::IMAGE_REL_ARM_SECTION: {
+ RelocationEntry RE =
+ RelocationEntry(TargetSectionID, Offset, RelType, 0);
+ addRelocationForSection(RE, TargetSectionID);
+ break;
+ }
+ case COFF::IMAGE_REL_ARM_SECREL: {
+ RelocationEntry RE =
+ RelocationEntry(SectionID, Offset, RelType, TargetOffset + Addend);
+ addRelocationForSection(RE, TargetSectionID);
+ break;
+ }
+ case COFF::IMAGE_REL_ARM_MOV32T: {
+ RelocationEntry RE =
+ RelocationEntry(SectionID, Offset, RelType, Addend, TargetSectionID,
+ TargetOffset, 0, 0, false, 0, IsTargetThumbFunc);
+ addRelocationForSection(RE, TargetSectionID);
+ break;
+ }
+ case COFF::IMAGE_REL_ARM_BRANCH20T:
+ case COFF::IMAGE_REL_ARM_BRANCH24T:
+ case COFF::IMAGE_REL_ARM_BLX23T: {
+ RelocationEntry RE = RelocationEntry(SectionID, Offset, RelType,
+ TargetOffset + Addend, true, 0);
+ addRelocationForSection(RE, TargetSectionID);
+ break;
+ }
+ }
+ }
+
+ return ++RelI;
+ }
+
+ void resolveRelocation(const RelocationEntry &RE, uint64_t Value) override {
+ const auto Section = Sections[RE.SectionID];
+ uint8_t *Target = Section.getAddressWithOffset(RE.Offset);
+ int ISASelectionBit = RE.IsTargetThumbFunc ? 1 : 0;
+
+ switch (RE.RelType) {
+ default: llvm_unreachable("unsupported relocation type");
+ case COFF::IMAGE_REL_ARM_ABSOLUTE:
+ // This relocation is ignored.
+ break;
+ case COFF::IMAGE_REL_ARM_ADDR32: {
+ // The target's 32-bit VA.
+ uint64_t Result =
+ RE.Sections.SectionA == static_cast<uint32_t>(-1)
+ ? Value
+ : Sections[RE.Sections.SectionA].getLoadAddressWithOffset(RE.Addend);
+ Result |= ISASelectionBit;
+ assert(Result <= UINT32_MAX && "relocation overflow");
+ LLVM_DEBUG(dbgs() << "\t\tOffset: " << RE.Offset
+ << " RelType: IMAGE_REL_ARM_ADDR32"
+ << " TargetSection: " << RE.Sections.SectionA
+ << " Value: " << format("0x%08" PRIx32, Result)
+ << '\n');
+ writeBytesUnaligned(Result, Target, 4);
+ break;
+ }
+ case COFF::IMAGE_REL_ARM_ADDR32NB: {
+ // The target's 32-bit RVA.
+ // NOTE: use Section[0].getLoadAddress() as an approximation of ImageBase
+ uint64_t Result = Sections[RE.Sections.SectionA].getLoadAddress() -
+ Sections[0].getLoadAddress() + RE.Addend;
+ assert(Result <= UINT32_MAX && "relocation overflow");
+ LLVM_DEBUG(dbgs() << "\t\tOffset: " << RE.Offset
+ << " RelType: IMAGE_REL_ARM_ADDR32NB"
+ << " TargetSection: " << RE.Sections.SectionA
+ << " Value: " << format("0x%08" PRIx32, Result)
+ << '\n');
+ Result |= ISASelectionBit;
+ writeBytesUnaligned(Result, Target, 4);
+ break;
+ }
+ case COFF::IMAGE_REL_ARM_SECTION:
+ // 16-bit section index of the section that contains the target.
+ assert(static_cast<uint32_t>(RE.SectionID) <= UINT16_MAX &&
+ "relocation overflow");
+ LLVM_DEBUG(dbgs() << "\t\tOffset: " << RE.Offset
+ << " RelType: IMAGE_REL_ARM_SECTION Value: "
+ << RE.SectionID << '\n');
+ writeBytesUnaligned(RE.SectionID, Target, 2);
+ break;
+ case COFF::IMAGE_REL_ARM_SECREL:
+ // 32-bit offset of the target from the beginning of its section.
+ assert(static_cast<uint64_t>(RE.Addend) <= UINT32_MAX &&
+ "relocation overflow");
+ LLVM_DEBUG(dbgs() << "\t\tOffset: " << RE.Offset
+ << " RelType: IMAGE_REL_ARM_SECREL Value: " << RE.Addend
+ << '\n');
+ writeBytesUnaligned(RE.Addend, Target, 2);
+ break;
+ case COFF::IMAGE_REL_ARM_MOV32T: {
+ // 32-bit VA of the target applied to a contiguous MOVW+MOVT pair.
+ uint64_t Result =
+ Sections[RE.Sections.SectionA].getLoadAddressWithOffset(RE.Addend);
+ assert(Result <= UINT32_MAX && "relocation overflow");
+ LLVM_DEBUG(dbgs() << "\t\tOffset: " << RE.Offset
+ << " RelType: IMAGE_REL_ARM_MOV32T"
+ << " TargetSection: " << RE.Sections.SectionA
+ << " Value: " << format("0x%08" PRIx32, Result)
+ << '\n');
+
+ // MOVW(T3): |11110|i|10|0|1|0|0|imm4|0|imm3|Rd|imm8|
+ // imm32 = zext imm4:i:imm3:imm8
+ // MOVT(T1): |11110|i|10|1|1|0|0|imm4|0|imm3|Rd|imm8|
+ // imm16 = imm4:i:imm3:imm8
+
+ auto EncodeImmediate = [](uint8_t *Bytes, uint16_t Immediate) {
+ Bytes[0] |= ((Immediate & 0xf000) >> 12);
+ Bytes[1] |= ((Immediate & 0x0800) >> 11);
+ Bytes[2] |= ((Immediate & 0x00ff) >> 0);
+ Bytes[3] |= (((Immediate & 0x0700) >> 8) << 4);
+ };
+
+ EncodeImmediate(&Target[0],
+ (static_cast<uint32_t>(Result) >> 00) | ISASelectionBit);
+ EncodeImmediate(&Target[4], static_cast<uint32_t>(Result) >> 16);
+ break;
+ }
+ case COFF::IMAGE_REL_ARM_BRANCH20T: {
+ // The most significant 20-bits of the signed 21-bit relative displacement
+ uint64_t Value =
+ RE.Addend - (Sections[RE.SectionID].getLoadAddress() + RE.Offset) - 4;
+ assert(static_cast<int64_t>(RE.Addend) <= INT32_MAX &&
+ "relocation overflow");
+ assert(static_cast<int64_t>(RE.Addend) >= INT32_MIN &&
+ "relocation underflow");
+ LLVM_DEBUG(dbgs() << "\t\tOffset: " << RE.Offset
+ << " RelType: IMAGE_REL_ARM_BRANCH20T"
+ << " Value: " << static_cast<int32_t>(Value) << '\n');
+ static_cast<void>(Value);
+ llvm_unreachable("unimplemented relocation");
+ break;
+ }
+ case COFF::IMAGE_REL_ARM_BRANCH24T: {
+ // The most significant 24-bits of the signed 25-bit relative displacement
+ uint64_t Value =
+ RE.Addend - (Sections[RE.SectionID].getLoadAddress() + RE.Offset) - 4;
+ assert(static_cast<int64_t>(RE.Addend) <= INT32_MAX &&
+ "relocation overflow");
+ assert(static_cast<int64_t>(RE.Addend) >= INT32_MIN &&
+ "relocation underflow");
+ LLVM_DEBUG(dbgs() << "\t\tOffset: " << RE.Offset
+ << " RelType: IMAGE_REL_ARM_BRANCH24T"
+ << " Value: " << static_cast<int32_t>(Value) << '\n');
+ static_cast<void>(Value);
+ llvm_unreachable("unimplemented relocation");
+ break;
+ }
+ case COFF::IMAGE_REL_ARM_BLX23T: {
+ // The most significant 24-bits of the signed 25-bit relative displacement
+ uint64_t Value =
+ RE.Addend - (Sections[RE.SectionID].getLoadAddress() + RE.Offset) - 4;
+ assert(static_cast<int64_t>(RE.Addend) <= INT32_MAX &&
+ "relocation overflow");
+ assert(static_cast<int64_t>(RE.Addend) >= INT32_MIN &&
+ "relocation underflow");
+ LLVM_DEBUG(dbgs() << "\t\tOffset: " << RE.Offset
+ << " RelType: IMAGE_REL_ARM_BLX23T"
+ << " Value: " << static_cast<int32_t>(Value) << '\n');
+ static_cast<void>(Value);
+ llvm_unreachable("unimplemented relocation");
+ break;
+ }
+ }
+ }
+
+ void registerEHFrames() override {}
+};
+
+}
+
+#endif
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldCOFFX86_64.h b/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldCOFFX86_64.h
new file mode 100644
index 000000000000..984a8d765c84
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldCOFFX86_64.h
@@ -0,0 +1,322 @@
+//===-- RuntimeDyldCOFFX86_64.h --- COFF/X86_64 specific code ---*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// COFF x86_x64 support for MC-JIT runtime dynamic linker.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_EXECUTIONENGINE_RUNTIMEDYLD_TARGETS_RUNTIMEDYLDCOFF86_64_H
+#define LLVM_LIB_EXECUTIONENGINE_RUNTIMEDYLD_TARGETS_RUNTIMEDYLDCOFF86_64_H
+
+#include "../RuntimeDyldCOFF.h"
+#include "llvm/BinaryFormat/COFF.h"
+#include "llvm/Object/COFF.h"
+
+#define DEBUG_TYPE "dyld"
+
+namespace llvm {
+
+class RuntimeDyldCOFFX86_64 : public RuntimeDyldCOFF {
+
+private:
+ // When a module is loaded we save the SectionID of the unwind
+ // sections in a table until we receive a request to register all
+ // unregisteredEH frame sections with the memory manager.
+ SmallVector<SID, 2> UnregisteredEHFrameSections;
+ SmallVector<SID, 2> RegisteredEHFrameSections;
+ uint64_t ImageBase;
+
+ // Fake an __ImageBase pointer by returning the section with the lowest adress
+ uint64_t getImageBase() {
+ if (!ImageBase) {
+ ImageBase = std::numeric_limits<uint64_t>::max();
+ for (const SectionEntry &Section : Sections)
+ // The Sections list may contain sections that weren't loaded for
+ // whatever reason: they may be debug sections, and ProcessAllSections
+ // is false, or they may be sections that contain 0 bytes. If the
+ // section isn't loaded, the load address will be 0, and it should not
+ // be included in the ImageBase calculation.
+ if (Section.getLoadAddress() != 0)
+ ImageBase = std::min(ImageBase, Section.getLoadAddress());
+ }
+ return ImageBase;
+ }
+
+ void write32BitOffset(uint8_t *Target, int64_t Addend, uint64_t Delta) {
+ uint64_t Result = Addend + Delta;
+ assert(Result <= UINT32_MAX && "Relocation overflow");
+ writeBytesUnaligned(Result, Target, 4);
+ }
+
+public:
+ RuntimeDyldCOFFX86_64(RuntimeDyld::MemoryManager &MM,
+ JITSymbolResolver &Resolver)
+ : RuntimeDyldCOFF(MM, Resolver, 8, COFF::IMAGE_REL_AMD64_ADDR64),
+ ImageBase(0) {}
+
+ Align getStubAlignment() override { return Align(1); }
+
+ // 2-byte jmp instruction + 32-bit relative address + 64-bit absolute jump
+ unsigned getMaxStubSize() const override { return 14; }
+
+ // The target location for the relocation is described by RE.SectionID and
+ // RE.Offset. RE.SectionID can be used to find the SectionEntry. Each
+ // SectionEntry has three members describing its location.
+ // SectionEntry::Address is the address at which the section has been loaded
+ // into memory in the current (host) process. SectionEntry::LoadAddress is
+ // the address that the section will have in the target process.
+ // SectionEntry::ObjAddress is the address of the bits for this section in the
+ // original emitted object image (also in the current address space).
+ //
+ // Relocations will be applied as if the section were loaded at
+ // SectionEntry::LoadAddress, but they will be applied at an address based
+ // on SectionEntry::Address. SectionEntry::ObjAddress will be used to refer
+ // to Target memory contents if they are required for value calculations.
+ //
+ // The Value parameter here is the load address of the symbol for the
+ // relocation to be applied. For relocations which refer to symbols in the
+ // current object Value will be the LoadAddress of the section in which
+ // the symbol resides (RE.Addend provides additional information about the
+ // symbol location). For external symbols, Value will be the address of the
+ // symbol in the target address space.
+ void resolveRelocation(const RelocationEntry &RE, uint64_t Value) override {
+ const SectionEntry &Section = Sections[RE.SectionID];
+ uint8_t *Target = Section.getAddressWithOffset(RE.Offset);
+
+ switch (RE.RelType) {
+
+ case COFF::IMAGE_REL_AMD64_REL32:
+ case COFF::IMAGE_REL_AMD64_REL32_1:
+ case COFF::IMAGE_REL_AMD64_REL32_2:
+ case COFF::IMAGE_REL_AMD64_REL32_3:
+ case COFF::IMAGE_REL_AMD64_REL32_4:
+ case COFF::IMAGE_REL_AMD64_REL32_5: {
+ uint64_t FinalAddress = Section.getLoadAddressWithOffset(RE.Offset);
+ // Delta is the distance from the start of the reloc to the end of the
+ // instruction with the reloc.
+ uint64_t Delta = 4 + (RE.RelType - COFF::IMAGE_REL_AMD64_REL32);
+ Value -= FinalAddress + Delta;
+ uint64_t Result = Value + RE.Addend;
+ assert(((int64_t)Result <= INT32_MAX) && "Relocation overflow");
+ assert(((int64_t)Result >= INT32_MIN) && "Relocation underflow");
+ writeBytesUnaligned(Result, Target, 4);
+ break;
+ }
+
+ case COFF::IMAGE_REL_AMD64_ADDR32NB: {
+ // ADDR32NB requires an offset less than 2GB from 'ImageBase'.
+ // The MemoryManager can make sure this is always true by forcing the
+ // memory layout to be: CodeSection < ReadOnlySection < ReadWriteSection.
+ const uint64_t ImageBase = getImageBase();
+ if (Value < ImageBase || ((Value - ImageBase) > UINT32_MAX))
+ report_fatal_error("IMAGE_REL_AMD64_ADDR32NB relocation requires an "
+ "ordered section layout");
+ else {
+ write32BitOffset(Target, RE.Addend, Value - ImageBase);
+ }
+ break;
+ }
+
+ case COFF::IMAGE_REL_AMD64_ADDR64: {
+ writeBytesUnaligned(Value + RE.Addend, Target, 8);
+ break;
+ }
+
+ case COFF::IMAGE_REL_AMD64_SECREL: {
+ assert(static_cast<int64_t>(RE.Addend) <= INT32_MAX && "Relocation overflow");
+ assert(static_cast<int64_t>(RE.Addend) >= INT32_MIN && "Relocation underflow");
+ writeBytesUnaligned(RE.Addend, Target, 4);
+ break;
+ }
+
+ case COFF::IMAGE_REL_AMD64_SECTION: {
+ assert(static_cast<int16_t>(RE.SectionID) <= INT16_MAX && "Relocation overflow");
+ assert(static_cast<int16_t>(RE.SectionID) >= INT16_MIN && "Relocation underflow");
+ writeBytesUnaligned(RE.SectionID, Target, 2);
+ break;
+ }
+
+ default:
+ llvm_unreachable("Relocation type not implemented yet!");
+ break;
+ }
+ }
+
+ std::tuple<uint64_t, uint64_t, uint64_t>
+ generateRelocationStub(unsigned SectionID, StringRef TargetName,
+ uint64_t Offset, uint64_t RelType, uint64_t Addend,
+ StubMap &Stubs) {
+ uintptr_t StubOffset;
+ SectionEntry &Section = Sections[SectionID];
+
+ RelocationValueRef OriginalRelValueRef;
+ OriginalRelValueRef.SectionID = SectionID;
+ OriginalRelValueRef.Offset = Offset;
+ OriginalRelValueRef.Addend = Addend;
+ OriginalRelValueRef.SymbolName = TargetName.data();
+
+ auto Stub = Stubs.find(OriginalRelValueRef);
+ if (Stub == Stubs.end()) {
+ LLVM_DEBUG(dbgs() << " Create a new stub function for "
+ << TargetName.data() << "\n");
+
+ StubOffset = Section.getStubOffset();
+ Stubs[OriginalRelValueRef] = StubOffset;
+ createStubFunction(Section.getAddressWithOffset(StubOffset));
+ Section.advanceStubOffset(getMaxStubSize());
+ } else {
+ LLVM_DEBUG(dbgs() << " Stub function found for " << TargetName.data()
+ << "\n");
+ StubOffset = Stub->second;
+ }
+
+ // FIXME: If RelType == COFF::IMAGE_REL_AMD64_ADDR32NB we should be able
+ // to ignore the __ImageBase requirement and just forward to the stub
+ // directly as an offset of this section:
+ // write32BitOffset(Section.getAddressWithOffset(Offset), 0, StubOffset);
+ // .xdata exception handler's aren't having this though.
+
+ // Resolve original relocation to stub function.
+ const RelocationEntry RE(SectionID, Offset, RelType, Addend);
+ resolveRelocation(RE, Section.getLoadAddressWithOffset(StubOffset));
+
+ // adjust relocation info so resolution writes to the stub function
+ Addend = 0;
+ Offset = StubOffset + 6;
+ RelType = COFF::IMAGE_REL_AMD64_ADDR64;
+
+ return std::make_tuple(Offset, RelType, Addend);
+ }
+
+ Expected<object::relocation_iterator>
+ processRelocationRef(unsigned SectionID,
+ object::relocation_iterator RelI,
+ const object::ObjectFile &Obj,
+ ObjSectionToIDMap &ObjSectionToID,
+ StubMap &Stubs) override {
+ // If possible, find the symbol referred to in the relocation,
+ // and the section that contains it.
+ object::symbol_iterator Symbol = RelI->getSymbol();
+ if (Symbol == Obj.symbol_end())
+ report_fatal_error("Unknown symbol in relocation");
+ auto SectionOrError = Symbol->getSection();
+ if (!SectionOrError)
+ return SectionOrError.takeError();
+ object::section_iterator SecI = *SectionOrError;
+ // If there is no section, this must be an external reference.
+ bool IsExtern = SecI == Obj.section_end();
+
+ // Determine the Addend used to adjust the relocation value.
+ uint64_t RelType = RelI->getType();
+ uint64_t Offset = RelI->getOffset();
+ uint64_t Addend = 0;
+ SectionEntry &Section = Sections[SectionID];
+ uintptr_t ObjTarget = Section.getObjAddress() + Offset;
+
+ Expected<StringRef> TargetNameOrErr = Symbol->getName();
+ if (!TargetNameOrErr)
+ return TargetNameOrErr.takeError();
+
+ StringRef TargetName = *TargetNameOrErr;
+ unsigned TargetSectionID = 0;
+ uint64_t TargetOffset = 0;
+
+ if (TargetName.starts_with(getImportSymbolPrefix())) {
+ assert(IsExtern && "DLLImport not marked extern?");
+ TargetSectionID = SectionID;
+ TargetOffset = getDLLImportOffset(SectionID, Stubs, TargetName);
+ TargetName = StringRef();
+ IsExtern = false;
+ } else if (!IsExtern) {
+ if (auto TargetSectionIDOrErr =
+ findOrEmitSection(Obj, *SecI, SecI->isText(), ObjSectionToID))
+ TargetSectionID = *TargetSectionIDOrErr;
+ else
+ return TargetSectionIDOrErr.takeError();
+ TargetOffset = getSymbolOffset(*Symbol);
+ }
+
+ switch (RelType) {
+
+ case COFF::IMAGE_REL_AMD64_REL32:
+ case COFF::IMAGE_REL_AMD64_REL32_1:
+ case COFF::IMAGE_REL_AMD64_REL32_2:
+ case COFF::IMAGE_REL_AMD64_REL32_3:
+ case COFF::IMAGE_REL_AMD64_REL32_4:
+ case COFF::IMAGE_REL_AMD64_REL32_5:
+ case COFF::IMAGE_REL_AMD64_ADDR32NB: {
+ uint8_t *Displacement = (uint8_t *)ObjTarget;
+ Addend = readBytesUnaligned(Displacement, 4);
+
+ if (IsExtern)
+ std::tie(Offset, RelType, Addend) = generateRelocationStub(
+ SectionID, TargetName, Offset, RelType, Addend, Stubs);
+
+ break;
+ }
+
+ case COFF::IMAGE_REL_AMD64_ADDR64: {
+ uint8_t *Displacement = (uint8_t *)ObjTarget;
+ Addend = readBytesUnaligned(Displacement, 8);
+ break;
+ }
+
+ default:
+ break;
+ }
+
+ LLVM_DEBUG(dbgs() << "\t\tIn Section " << SectionID << " Offset " << Offset
+ << " RelType: " << RelType << " TargetName: "
+ << TargetName << " Addend " << Addend << "\n");
+
+ if (IsExtern) {
+ RelocationEntry RE(SectionID, Offset, RelType, Addend);
+ addRelocationForSymbol(RE, TargetName);
+ } else {
+ RelocationEntry RE(SectionID, Offset, RelType, TargetOffset + Addend);
+ addRelocationForSection(RE, TargetSectionID);
+ }
+
+ return ++RelI;
+ }
+
+ void registerEHFrames() override {
+ for (auto const &EHFrameSID : UnregisteredEHFrameSections) {
+ uint8_t *EHFrameAddr = Sections[EHFrameSID].getAddress();
+ uint64_t EHFrameLoadAddr = Sections[EHFrameSID].getLoadAddress();
+ size_t EHFrameSize = Sections[EHFrameSID].getSize();
+ MemMgr.registerEHFrames(EHFrameAddr, EHFrameLoadAddr, EHFrameSize);
+ RegisteredEHFrameSections.push_back(EHFrameSID);
+ }
+ UnregisteredEHFrameSections.clear();
+ }
+
+ Error finalizeLoad(const object::ObjectFile &Obj,
+ ObjSectionToIDMap &SectionMap) override {
+ // Look for and record the EH frame section IDs.
+ for (const auto &SectionPair : SectionMap) {
+ const object::SectionRef &Section = SectionPair.first;
+ Expected<StringRef> NameOrErr = Section.getName();
+ if (!NameOrErr)
+ return NameOrErr.takeError();
+
+ // Note unwind info is stored in .pdata but often points to .xdata
+ // with an IMAGE_REL_AMD64_ADDR32NB relocation. Using a memory manager
+ // that keeps sections ordered in relation to __ImageBase is necessary.
+ if ((*NameOrErr) == ".pdata")
+ UnregisteredEHFrameSections.push_back(SectionPair.second);
+ }
+ return Error::success();
+ }
+};
+
+} // end namespace llvm
+
+#undef DEBUG_TYPE
+
+#endif
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldELFMips.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldELFMips.cpp
new file mode 100644
index 000000000000..17cbe612fb43
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldELFMips.cpp
@@ -0,0 +1,320 @@
+//===-- RuntimeDyldELFMips.cpp ---- ELF/Mips specific code. -----*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "RuntimeDyldELFMips.h"
+#include "llvm/BinaryFormat/ELF.h"
+
+#define DEBUG_TYPE "dyld"
+
+void RuntimeDyldELFMips::resolveRelocation(const RelocationEntry &RE,
+ uint64_t Value) {
+ const SectionEntry &Section = Sections[RE.SectionID];
+ if (IsMipsO32ABI)
+ resolveMIPSO32Relocation(Section, RE.Offset, Value, RE.RelType, RE.Addend);
+ else if (IsMipsN32ABI) {
+ resolveMIPSN32Relocation(Section, RE.Offset, Value, RE.RelType, RE.Addend,
+ RE.SymOffset, RE.SectionID);
+ } else if (IsMipsN64ABI)
+ resolveMIPSN64Relocation(Section, RE.Offset, Value, RE.RelType, RE.Addend,
+ RE.SymOffset, RE.SectionID);
+ else
+ llvm_unreachable("Mips ABI not handled");
+}
+
+uint64_t RuntimeDyldELFMips::evaluateRelocation(const RelocationEntry &RE,
+ uint64_t Value,
+ uint64_t Addend) {
+ if (IsMipsN32ABI) {
+ const SectionEntry &Section = Sections[RE.SectionID];
+ Value = evaluateMIPS64Relocation(Section, RE.Offset, Value, RE.RelType,
+ Addend, RE.SymOffset, RE.SectionID);
+ return Value;
+ }
+ llvm_unreachable("Not reachable");
+}
+
+void RuntimeDyldELFMips::applyRelocation(const RelocationEntry &RE,
+ uint64_t Value) {
+ if (IsMipsN32ABI) {
+ const SectionEntry &Section = Sections[RE.SectionID];
+ applyMIPSRelocation(Section.getAddressWithOffset(RE.Offset), Value,
+ RE.RelType);
+ return;
+ }
+ llvm_unreachable("Not reachable");
+}
+
+int64_t
+RuntimeDyldELFMips::evaluateMIPS32Relocation(const SectionEntry &Section,
+ uint64_t Offset, uint64_t Value,
+ uint32_t Type) {
+
+ LLVM_DEBUG(dbgs() << "evaluateMIPS32Relocation, LocalAddress: 0x"
+ << format("%llx", Section.getAddressWithOffset(Offset))
+ << " FinalAddress: 0x"
+ << format("%llx", Section.getLoadAddressWithOffset(Offset))
+ << " Value: 0x" << format("%llx", Value) << " Type: 0x"
+ << format("%x", Type) << "\n");
+
+ switch (Type) {
+ default:
+ llvm_unreachable("Unknown relocation type!");
+ return Value;
+ case ELF::R_MIPS_32:
+ return Value;
+ case ELF::R_MIPS_26:
+ return Value >> 2;
+ case ELF::R_MIPS_HI16:
+ // Get the higher 16-bits. Also add 1 if bit 15 is 1.
+ return (Value + 0x8000) >> 16;
+ case ELF::R_MIPS_LO16:
+ return Value;
+ case ELF::R_MIPS_PC32: {
+ uint32_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
+ return Value - FinalAddress;
+ }
+ case ELF::R_MIPS_PC16: {
+ uint32_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
+ return (Value - FinalAddress) >> 2;
+ }
+ case ELF::R_MIPS_PC19_S2: {
+ uint32_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
+ return (Value - (FinalAddress & ~0x3)) >> 2;
+ }
+ case ELF::R_MIPS_PC21_S2: {
+ uint32_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
+ return (Value - FinalAddress) >> 2;
+ }
+ case ELF::R_MIPS_PC26_S2: {
+ uint32_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
+ return (Value - FinalAddress) >> 2;
+ }
+ case ELF::R_MIPS_PCHI16: {
+ uint32_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
+ return (Value - FinalAddress + 0x8000) >> 16;
+ }
+ case ELF::R_MIPS_PCLO16: {
+ uint32_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
+ return Value - FinalAddress;
+ }
+ }
+}
+
+int64_t RuntimeDyldELFMips::evaluateMIPS64Relocation(
+ const SectionEntry &Section, uint64_t Offset, uint64_t Value, uint32_t Type,
+ int64_t Addend, uint64_t SymOffset, SID SectionID) {
+
+ LLVM_DEBUG(dbgs() << "evaluateMIPS64Relocation, LocalAddress: 0x"
+ << format("%llx", Section.getAddressWithOffset(Offset))
+ << " FinalAddress: 0x"
+ << format("%llx", Section.getLoadAddressWithOffset(Offset))
+ << " Value: 0x" << format("%llx", Value) << " Type: 0x"
+ << format("%x", Type) << " Addend: 0x"
+ << format("%llx", Addend)
+ << " Offset: " << format("%llx" PRIx64, Offset)
+ << " SID: " << format("%d", SectionID)
+ << " SymOffset: " << format("%x", SymOffset) << "\n");
+
+ switch (Type) {
+ default:
+ llvm_unreachable("Not implemented relocation type!");
+ break;
+ case ELF::R_MIPS_JALR:
+ case ELF::R_MIPS_NONE:
+ break;
+ case ELF::R_MIPS_32:
+ case ELF::R_MIPS_64:
+ return Value + Addend;
+ case ELF::R_MIPS_26:
+ return ((Value + Addend) >> 2) & 0x3ffffff;
+ case ELF::R_MIPS_GPREL16: {
+ uint64_t GOTAddr = getSectionLoadAddress(SectionToGOTMap[SectionID]);
+ return Value + Addend - (GOTAddr + 0x7ff0);
+ }
+ case ELF::R_MIPS_SUB:
+ return Value - Addend;
+ case ELF::R_MIPS_HI16:
+ // Get the higher 16-bits. Also add 1 if bit 15 is 1.
+ return ((Value + Addend + 0x8000) >> 16) & 0xffff;
+ case ELF::R_MIPS_LO16:
+ return (Value + Addend) & 0xffff;
+ case ELF::R_MIPS_HIGHER:
+ return ((Value + Addend + 0x80008000) >> 32) & 0xffff;
+ case ELF::R_MIPS_HIGHEST:
+ return ((Value + Addend + 0x800080008000) >> 48) & 0xffff;
+ case ELF::R_MIPS_CALL16:
+ case ELF::R_MIPS_GOT_DISP:
+ case ELF::R_MIPS_GOT_PAGE: {
+ uint8_t *LocalGOTAddr =
+ getSectionAddress(SectionToGOTMap[SectionID]) + SymOffset;
+ uint64_t GOTEntry = readBytesUnaligned(LocalGOTAddr, getGOTEntrySize());
+
+ Value += Addend;
+ if (Type == ELF::R_MIPS_GOT_PAGE)
+ Value = (Value + 0x8000) & ~0xffff;
+
+ if (GOTEntry)
+ assert(GOTEntry == Value &&
+ "GOT entry has two different addresses.");
+ else
+ writeBytesUnaligned(Value, LocalGOTAddr, getGOTEntrySize());
+
+ return (SymOffset - 0x7ff0) & 0xffff;
+ }
+ case ELF::R_MIPS_GOT_OFST: {
+ int64_t page = (Value + Addend + 0x8000) & ~0xffff;
+ return (Value + Addend - page) & 0xffff;
+ }
+ case ELF::R_MIPS_GPREL32: {
+ uint64_t GOTAddr = getSectionLoadAddress(SectionToGOTMap[SectionID]);
+ return Value + Addend - (GOTAddr + 0x7ff0);
+ }
+ case ELF::R_MIPS_PC16: {
+ uint64_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
+ return ((Value + Addend - FinalAddress) >> 2) & 0xffff;
+ }
+ case ELF::R_MIPS_PC32: {
+ uint64_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
+ return Value + Addend - FinalAddress;
+ }
+ case ELF::R_MIPS_PC18_S3: {
+ uint64_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
+ return ((Value + Addend - (FinalAddress & ~0x7)) >> 3) & 0x3ffff;
+ }
+ case ELF::R_MIPS_PC19_S2: {
+ uint64_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
+ return ((Value + Addend - (FinalAddress & ~0x3)) >> 2) & 0x7ffff;
+ }
+ case ELF::R_MIPS_PC21_S2: {
+ uint64_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
+ return ((Value + Addend - FinalAddress) >> 2) & 0x1fffff;
+ }
+ case ELF::R_MIPS_PC26_S2: {
+ uint64_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
+ return ((Value + Addend - FinalAddress) >> 2) & 0x3ffffff;
+ }
+ case ELF::R_MIPS_PCHI16: {
+ uint64_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
+ return ((Value + Addend - FinalAddress + 0x8000) >> 16) & 0xffff;
+ }
+ case ELF::R_MIPS_PCLO16: {
+ uint64_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
+ return (Value + Addend - FinalAddress) & 0xffff;
+ }
+ }
+ return 0;
+}
+
+void RuntimeDyldELFMips::applyMIPSRelocation(uint8_t *TargetPtr, int64_t Value,
+ uint32_t Type) {
+ uint32_t Insn = readBytesUnaligned(TargetPtr, 4);
+
+ switch (Type) {
+ default:
+ llvm_unreachable("Unknown relocation type!");
+ break;
+ case ELF::R_MIPS_GPREL16:
+ case ELF::R_MIPS_HI16:
+ case ELF::R_MIPS_LO16:
+ case ELF::R_MIPS_HIGHER:
+ case ELF::R_MIPS_HIGHEST:
+ case ELF::R_MIPS_PC16:
+ case ELF::R_MIPS_PCHI16:
+ case ELF::R_MIPS_PCLO16:
+ case ELF::R_MIPS_CALL16:
+ case ELF::R_MIPS_GOT_DISP:
+ case ELF::R_MIPS_GOT_PAGE:
+ case ELF::R_MIPS_GOT_OFST:
+ Insn = (Insn & 0xffff0000) | (Value & 0x0000ffff);
+ writeBytesUnaligned(Insn, TargetPtr, 4);
+ break;
+ case ELF::R_MIPS_PC18_S3:
+ Insn = (Insn & 0xfffc0000) | (Value & 0x0003ffff);
+ writeBytesUnaligned(Insn, TargetPtr, 4);
+ break;
+ case ELF::R_MIPS_PC19_S2:
+ Insn = (Insn & 0xfff80000) | (Value & 0x0007ffff);
+ writeBytesUnaligned(Insn, TargetPtr, 4);
+ break;
+ case ELF::R_MIPS_PC21_S2:
+ Insn = (Insn & 0xffe00000) | (Value & 0x001fffff);
+ writeBytesUnaligned(Insn, TargetPtr, 4);
+ break;
+ case ELF::R_MIPS_26:
+ case ELF::R_MIPS_PC26_S2:
+ Insn = (Insn & 0xfc000000) | (Value & 0x03ffffff);
+ writeBytesUnaligned(Insn, TargetPtr, 4);
+ break;
+ case ELF::R_MIPS_32:
+ case ELF::R_MIPS_GPREL32:
+ case ELF::R_MIPS_PC32:
+ writeBytesUnaligned(Value & 0xffffffff, TargetPtr, 4);
+ break;
+ case ELF::R_MIPS_64:
+ case ELF::R_MIPS_SUB:
+ writeBytesUnaligned(Value, TargetPtr, 8);
+ break;
+ }
+}
+
+void RuntimeDyldELFMips::resolveMIPSN32Relocation(
+ const SectionEntry &Section, uint64_t Offset, uint64_t Value, uint32_t Type,
+ int64_t Addend, uint64_t SymOffset, SID SectionID) {
+ int64_t CalculatedValue = evaluateMIPS64Relocation(
+ Section, Offset, Value, Type, Addend, SymOffset, SectionID);
+ applyMIPSRelocation(Section.getAddressWithOffset(Offset), CalculatedValue,
+ Type);
+}
+
+void RuntimeDyldELFMips::resolveMIPSN64Relocation(
+ const SectionEntry &Section, uint64_t Offset, uint64_t Value, uint32_t Type,
+ int64_t Addend, uint64_t SymOffset, SID SectionID) {
+ uint32_t r_type = Type & 0xff;
+ uint32_t r_type2 = (Type >> 8) & 0xff;
+ uint32_t r_type3 = (Type >> 16) & 0xff;
+
+ // RelType is used to keep information for which relocation type we are
+ // applying relocation.
+ uint32_t RelType = r_type;
+ int64_t CalculatedValue = evaluateMIPS64Relocation(Section, Offset, Value,
+ RelType, Addend,
+ SymOffset, SectionID);
+ if (r_type2 != ELF::R_MIPS_NONE) {
+ RelType = r_type2;
+ CalculatedValue = evaluateMIPS64Relocation(Section, Offset, 0, RelType,
+ CalculatedValue, SymOffset,
+ SectionID);
+ }
+ if (r_type3 != ELF::R_MIPS_NONE) {
+ RelType = r_type3;
+ CalculatedValue = evaluateMIPS64Relocation(Section, Offset, 0, RelType,
+ CalculatedValue, SymOffset,
+ SectionID);
+ }
+ applyMIPSRelocation(Section.getAddressWithOffset(Offset), CalculatedValue,
+ RelType);
+}
+
+void RuntimeDyldELFMips::resolveMIPSO32Relocation(const SectionEntry &Section,
+ uint64_t Offset,
+ uint32_t Value, uint32_t Type,
+ int32_t Addend) {
+ uint8_t *TargetPtr = Section.getAddressWithOffset(Offset);
+ Value += Addend;
+
+ LLVM_DEBUG(dbgs() << "resolveMIPSO32Relocation, LocalAddress: "
+ << Section.getAddressWithOffset(Offset) << " FinalAddress: "
+ << format("%p", Section.getLoadAddressWithOffset(Offset))
+ << " Value: " << format("%x", Value) << " Type: "
+ << format("%x", Type) << " Addend: " << format("%x", Addend)
+ << " SymOffset: " << format("%x", Offset) << "\n");
+
+ Value = evaluateMIPS32Relocation(Section, Offset, Value, Type);
+
+ applyMIPSRelocation(TargetPtr, Value, Type);
+}
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldELFMips.h b/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldELFMips.h
new file mode 100644
index 000000000000..f03acb41d670
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldELFMips.h
@@ -0,0 +1,66 @@
+//===-- RuntimeDyldELFMips.h ---- ELF/Mips specific code. -------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_EXECUTIONENGINE_RUNTIMEDYLD_TARGETS_RUNTIMEDYLDELFMIPS_H
+#define LLVM_LIB_EXECUTIONENGINE_RUNTIMEDYLD_TARGETS_RUNTIMEDYLDELFMIPS_H
+
+#include "../RuntimeDyldELF.h"
+
+#define DEBUG_TYPE "dyld"
+
+namespace llvm {
+
+class RuntimeDyldELFMips : public RuntimeDyldELF {
+public:
+
+ typedef uint64_t TargetPtrT;
+
+ RuntimeDyldELFMips(RuntimeDyld::MemoryManager &MM,
+ JITSymbolResolver &Resolver)
+ : RuntimeDyldELF(MM, Resolver) {}
+
+ void resolveRelocation(const RelocationEntry &RE, uint64_t Value) override;
+
+protected:
+ void resolveMIPSO32Relocation(const SectionEntry &Section, uint64_t Offset,
+ uint32_t Value, uint32_t Type, int32_t Addend);
+ void resolveMIPSN32Relocation(const SectionEntry &Section, uint64_t Offset,
+ uint64_t Value, uint32_t Type, int64_t Addend,
+ uint64_t SymOffset, SID SectionID);
+ void resolveMIPSN64Relocation(const SectionEntry &Section, uint64_t Offset,
+ uint64_t Value, uint32_t Type, int64_t Addend,
+ uint64_t SymOffset, SID SectionID);
+
+private:
+ /// A object file specific relocation resolver
+ /// \param RE The relocation to be resolved
+ /// \param Value Target symbol address to apply the relocation action
+ uint64_t evaluateRelocation(const RelocationEntry &RE, uint64_t Value,
+ uint64_t Addend);
+
+ /// A object file specific relocation resolver
+ /// \param RE The relocation to be resolved
+ /// \param Value Target symbol address to apply the relocation action
+ void applyRelocation(const RelocationEntry &RE, uint64_t Value);
+
+ int64_t evaluateMIPS32Relocation(const SectionEntry &Section, uint64_t Offset,
+ uint64_t Value, uint32_t Type);
+ int64_t evaluateMIPS64Relocation(const SectionEntry &Section,
+ uint64_t Offset, uint64_t Value,
+ uint32_t Type, int64_t Addend,
+ uint64_t SymOffset, SID SectionID);
+
+ void applyMIPSRelocation(uint8_t *TargetPtr, int64_t CalculatedValue,
+ uint32_t Type);
+
+};
+}
+
+#undef DEBUG_TYPE
+
+#endif
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOAArch64.h b/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOAArch64.h
new file mode 100644
index 000000000000..701cc3a88149
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOAArch64.h
@@ -0,0 +1,541 @@
+//===-- RuntimeDyldMachOAArch64.h -- MachO/AArch64 specific code. -*- C++ -*-=//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_EXECUTIONENGINE_RUNTIMEDYLD_TARGETS_RUNTIMEDYLDMACHOAARCH64_H
+#define LLVM_LIB_EXECUTIONENGINE_RUNTIMEDYLD_TARGETS_RUNTIMEDYLDMACHOAARCH64_H
+
+#include "../RuntimeDyldMachO.h"
+#include "llvm/Support/Endian.h"
+
+#define DEBUG_TYPE "dyld"
+
+namespace llvm {
+
+class RuntimeDyldMachOAArch64
+ : public RuntimeDyldMachOCRTPBase<RuntimeDyldMachOAArch64> {
+public:
+
+ typedef uint64_t TargetPtrT;
+
+ RuntimeDyldMachOAArch64(RuntimeDyld::MemoryManager &MM,
+ JITSymbolResolver &Resolver)
+ : RuntimeDyldMachOCRTPBase(MM, Resolver) {}
+
+ unsigned getMaxStubSize() const override { return 8; }
+
+ Align getStubAlignment() override { return Align(8); }
+
+ /// Extract the addend encoded in the instruction / memory location.
+ Expected<int64_t> decodeAddend(const RelocationEntry &RE) const {
+ const SectionEntry &Section = Sections[RE.SectionID];
+ uint8_t *LocalAddress = Section.getAddressWithOffset(RE.Offset);
+ unsigned NumBytes = 1 << RE.Size;
+ int64_t Addend = 0;
+ // Verify that the relocation has the correct size and alignment.
+ switch (RE.RelType) {
+ default: {
+ std::string ErrMsg;
+ {
+ raw_string_ostream ErrStream(ErrMsg);
+ ErrStream << "Unsupported relocation type: "
+ << getRelocName(RE.RelType);
+ }
+ return make_error<StringError>(std::move(ErrMsg),
+ inconvertibleErrorCode());
+ }
+ case MachO::ARM64_RELOC_POINTER_TO_GOT:
+ case MachO::ARM64_RELOC_UNSIGNED: {
+ if (NumBytes != 4 && NumBytes != 8) {
+ std::string ErrMsg;
+ {
+ raw_string_ostream ErrStream(ErrMsg);
+ ErrStream << "Invalid relocation size for relocation "
+ << getRelocName(RE.RelType);
+ }
+ return make_error<StringError>(std::move(ErrMsg),
+ inconvertibleErrorCode());
+ }
+ break;
+ }
+ case MachO::ARM64_RELOC_BRANCH26:
+ case MachO::ARM64_RELOC_PAGE21:
+ case MachO::ARM64_RELOC_PAGEOFF12:
+ case MachO::ARM64_RELOC_GOT_LOAD_PAGE21:
+ case MachO::ARM64_RELOC_GOT_LOAD_PAGEOFF12:
+ assert(NumBytes == 4 && "Invalid relocation size.");
+ assert((((uintptr_t)LocalAddress & 0x3) == 0) &&
+ "Instruction address is not aligned to 4 bytes.");
+ break;
+ }
+
+ switch (RE.RelType) {
+ default:
+ llvm_unreachable("Unsupported relocation type!");
+ case MachO::ARM64_RELOC_POINTER_TO_GOT:
+ case MachO::ARM64_RELOC_UNSIGNED:
+ // This could be an unaligned memory location.
+ if (NumBytes == 4)
+ Addend = *reinterpret_cast<support::ulittle32_t *>(LocalAddress);
+ else
+ Addend = *reinterpret_cast<support::ulittle64_t *>(LocalAddress);
+ break;
+ case MachO::ARM64_RELOC_BRANCH26: {
+ // Verify that the relocation points to a B/BL instruction.
+ auto *p = reinterpret_cast<support::aligned_ulittle32_t *>(LocalAddress);
+ assert(((*p & 0xFC000000) == 0x14000000 ||
+ (*p & 0xFC000000) == 0x94000000) &&
+ "Expected branch instruction.");
+
+ // Get the 26 bit addend encoded in the branch instruction and sign-extend
+ // to 64 bit. The lower 2 bits are always zeros and are therefore implicit
+ // (<< 2).
+ Addend = (*p & 0x03FFFFFF) << 2;
+ Addend = SignExtend64(Addend, 28);
+ break;
+ }
+ case MachO::ARM64_RELOC_GOT_LOAD_PAGE21:
+ case MachO::ARM64_RELOC_PAGE21: {
+ // Verify that the relocation points to the expected adrp instruction.
+ auto *p = reinterpret_cast<support::aligned_ulittle32_t *>(LocalAddress);
+ assert((*p & 0x9F000000) == 0x90000000 && "Expected adrp instruction.");
+
+ // Get the 21 bit addend encoded in the adrp instruction and sign-extend
+ // to 64 bit. The lower 12 bits (4096 byte page) are always zeros and are
+ // therefore implicit (<< 12).
+ Addend = ((*p & 0x60000000) >> 29) | ((*p & 0x01FFFFE0) >> 3) << 12;
+ Addend = SignExtend64(Addend, 33);
+ break;
+ }
+ case MachO::ARM64_RELOC_GOT_LOAD_PAGEOFF12: {
+ // Verify that the relocation points to one of the expected load / store
+ // instructions.
+ auto *p = reinterpret_cast<support::aligned_ulittle32_t *>(LocalAddress);
+ (void)p;
+ assert((*p & 0x3B000000) == 0x39000000 &&
+ "Only expected load / store instructions.");
+ [[fallthrough]];
+ }
+ case MachO::ARM64_RELOC_PAGEOFF12: {
+ // Verify that the relocation points to one of the expected load / store
+ // or add / sub instructions.
+ auto *p = reinterpret_cast<support::aligned_ulittle32_t *>(LocalAddress);
+ assert((((*p & 0x3B000000) == 0x39000000) ||
+ ((*p & 0x11C00000) == 0x11000000) ) &&
+ "Expected load / store or add/sub instruction.");
+
+ // Get the 12 bit addend encoded in the instruction.
+ Addend = (*p & 0x003FFC00) >> 10;
+
+ // Check which instruction we are decoding to obtain the implicit shift
+ // factor of the instruction.
+ int ImplicitShift = 0;
+ if ((*p & 0x3B000000) == 0x39000000) { // << load / store
+ // For load / store instructions the size is encoded in bits 31:30.
+ ImplicitShift = ((*p >> 30) & 0x3);
+ if (ImplicitShift == 0) {
+ // Check if this a vector op to get the correct shift value.
+ if ((*p & 0x04800000) == 0x04800000)
+ ImplicitShift = 4;
+ }
+ }
+ // Compensate for implicit shift.
+ Addend <<= ImplicitShift;
+ break;
+ }
+ }
+ return Addend;
+ }
+
+ /// Extract the addend encoded in the instruction.
+ void encodeAddend(uint8_t *LocalAddress, unsigned NumBytes,
+ MachO::RelocationInfoType RelType, int64_t Addend) const {
+ // Verify that the relocation has the correct alignment.
+ switch (RelType) {
+ default:
+ llvm_unreachable("Unsupported relocation type!");
+ case MachO::ARM64_RELOC_POINTER_TO_GOT:
+ case MachO::ARM64_RELOC_UNSIGNED:
+ assert((NumBytes == 4 || NumBytes == 8) && "Invalid relocation size.");
+ break;
+ case MachO::ARM64_RELOC_BRANCH26:
+ case MachO::ARM64_RELOC_PAGE21:
+ case MachO::ARM64_RELOC_PAGEOFF12:
+ case MachO::ARM64_RELOC_GOT_LOAD_PAGE21:
+ case MachO::ARM64_RELOC_GOT_LOAD_PAGEOFF12:
+ assert(NumBytes == 4 && "Invalid relocation size.");
+ assert((((uintptr_t)LocalAddress & 0x3) == 0) &&
+ "Instruction address is not aligned to 4 bytes.");
+ break;
+ }
+
+ switch (RelType) {
+ default:
+ llvm_unreachable("Unsupported relocation type!");
+ case MachO::ARM64_RELOC_POINTER_TO_GOT:
+ case MachO::ARM64_RELOC_UNSIGNED:
+ // This could be an unaligned memory location.
+ if (NumBytes == 4)
+ *reinterpret_cast<support::ulittle32_t *>(LocalAddress) = Addend;
+ else
+ *reinterpret_cast<support::ulittle64_t *>(LocalAddress) = Addend;
+ break;
+ case MachO::ARM64_RELOC_BRANCH26: {
+ auto *p = reinterpret_cast<support::aligned_ulittle32_t *>(LocalAddress);
+ // Verify that the relocation points to the expected branch instruction.
+ assert(((*p & 0xFC000000) == 0x14000000 ||
+ (*p & 0xFC000000) == 0x94000000) &&
+ "Expected branch instruction.");
+
+ // Verify addend value.
+ assert((Addend & 0x3) == 0 && "Branch target is not aligned");
+ assert(isInt<28>(Addend) && "Branch target is out of range.");
+
+ // Encode the addend as 26 bit immediate in the branch instruction.
+ *p = (*p & 0xFC000000) | ((uint32_t)(Addend >> 2) & 0x03FFFFFF);
+ break;
+ }
+ case MachO::ARM64_RELOC_GOT_LOAD_PAGE21:
+ case MachO::ARM64_RELOC_PAGE21: {
+ // Verify that the relocation points to the expected adrp instruction.
+ auto *p = reinterpret_cast<support::aligned_ulittle32_t *>(LocalAddress);
+ assert((*p & 0x9F000000) == 0x90000000 && "Expected adrp instruction.");
+
+ // Check that the addend fits into 21 bits (+ 12 lower bits).
+ assert((Addend & 0xFFF) == 0 && "ADRP target is not page aligned.");
+ assert(isInt<33>(Addend) && "Invalid page reloc value.");
+
+ // Encode the addend into the instruction.
+ uint32_t ImmLoValue = ((uint64_t)Addend << 17) & 0x60000000;
+ uint32_t ImmHiValue = ((uint64_t)Addend >> 9) & 0x00FFFFE0;
+ *p = (*p & 0x9F00001F) | ImmHiValue | ImmLoValue;
+ break;
+ }
+ case MachO::ARM64_RELOC_GOT_LOAD_PAGEOFF12: {
+ // Verify that the relocation points to one of the expected load / store
+ // instructions.
+ auto *p = reinterpret_cast<support::aligned_ulittle32_t *>(LocalAddress);
+ assert((*p & 0x3B000000) == 0x39000000 &&
+ "Only expected load / store instructions.");
+ (void)p;
+ [[fallthrough]];
+ }
+ case MachO::ARM64_RELOC_PAGEOFF12: {
+ // Verify that the relocation points to one of the expected load / store
+ // or add / sub instructions.
+ auto *p = reinterpret_cast<support::aligned_ulittle32_t *>(LocalAddress);
+ assert((((*p & 0x3B000000) == 0x39000000) ||
+ ((*p & 0x11C00000) == 0x11000000) ) &&
+ "Expected load / store or add/sub instruction.");
+
+ // Check which instruction we are decoding to obtain the implicit shift
+ // factor of the instruction and verify alignment.
+ int ImplicitShift = 0;
+ if ((*p & 0x3B000000) == 0x39000000) { // << load / store
+ // For load / store instructions the size is encoded in bits 31:30.
+ ImplicitShift = ((*p >> 30) & 0x3);
+ switch (ImplicitShift) {
+ case 0:
+ // Check if this a vector op to get the correct shift value.
+ if ((*p & 0x04800000) == 0x04800000) {
+ ImplicitShift = 4;
+ assert(((Addend & 0xF) == 0) &&
+ "128-bit LDR/STR not 16-byte aligned.");
+ }
+ break;
+ case 1:
+ assert(((Addend & 0x1) == 0) && "16-bit LDR/STR not 2-byte aligned.");
+ break;
+ case 2:
+ assert(((Addend & 0x3) == 0) && "32-bit LDR/STR not 4-byte aligned.");
+ break;
+ case 3:
+ assert(((Addend & 0x7) == 0) && "64-bit LDR/STR not 8-byte aligned.");
+ break;
+ }
+ }
+ // Compensate for implicit shift.
+ Addend >>= ImplicitShift;
+ assert(isUInt<12>(Addend) && "Addend cannot be encoded.");
+
+ // Encode the addend into the instruction.
+ *p = (*p & 0xFFC003FF) | ((uint32_t)(Addend << 10) & 0x003FFC00);
+ break;
+ }
+ }
+ }
+
+ Expected<relocation_iterator>
+ processRelocationRef(unsigned SectionID, relocation_iterator RelI,
+ const ObjectFile &BaseObjT,
+ ObjSectionToIDMap &ObjSectionToID,
+ StubMap &Stubs) override {
+ const MachOObjectFile &Obj =
+ static_cast<const MachOObjectFile &>(BaseObjT);
+ MachO::any_relocation_info RelInfo =
+ Obj.getRelocation(RelI->getRawDataRefImpl());
+
+ if (Obj.isRelocationScattered(RelInfo))
+ return make_error<RuntimeDyldError>("Scattered relocations not supported "
+ "for MachO AArch64");
+
+ // ARM64 has an ARM64_RELOC_ADDEND relocation type that carries an explicit
+ // addend for the following relocation. If found: (1) store the associated
+ // addend, (2) consume the next relocation, and (3) use the stored addend to
+ // override the addend.
+ int64_t ExplicitAddend = 0;
+ if (Obj.getAnyRelocationType(RelInfo) == MachO::ARM64_RELOC_ADDEND) {
+ assert(!Obj.getPlainRelocationExternal(RelInfo));
+ assert(!Obj.getAnyRelocationPCRel(RelInfo));
+ assert(Obj.getAnyRelocationLength(RelInfo) == 2);
+ int64_t RawAddend = Obj.getPlainRelocationSymbolNum(RelInfo);
+ // Sign-extend the 24-bit to 64-bit.
+ ExplicitAddend = SignExtend64(RawAddend, 24);
+ ++RelI;
+ RelInfo = Obj.getRelocation(RelI->getRawDataRefImpl());
+ }
+
+ if (Obj.getAnyRelocationType(RelInfo) == MachO::ARM64_RELOC_SUBTRACTOR)
+ return processSubtractRelocation(SectionID, RelI, Obj, ObjSectionToID);
+
+ RelocationEntry RE(getRelocationEntry(SectionID, Obj, RelI));
+
+ if (RE.RelType == MachO::ARM64_RELOC_POINTER_TO_GOT) {
+ bool Valid =
+ (RE.Size == 2 && RE.IsPCRel) || (RE.Size == 3 && !RE.IsPCRel);
+ if (!Valid)
+ return make_error<StringError>("ARM64_RELOC_POINTER_TO_GOT supports "
+ "32-bit pc-rel or 64-bit absolute only",
+ inconvertibleErrorCode());
+ }
+
+ if (auto Addend = decodeAddend(RE))
+ RE.Addend = *Addend;
+ else
+ return Addend.takeError();
+
+ assert((ExplicitAddend == 0 || RE.Addend == 0) && "Relocation has "\
+ "ARM64_RELOC_ADDEND and embedded addend in the instruction.");
+ if (ExplicitAddend)
+ RE.Addend = ExplicitAddend;
+
+ RelocationValueRef Value;
+ if (auto ValueOrErr = getRelocationValueRef(Obj, RelI, RE, ObjSectionToID))
+ Value = *ValueOrErr;
+ else
+ return ValueOrErr.takeError();
+
+ bool IsExtern = Obj.getPlainRelocationExternal(RelInfo);
+ if (RE.RelType == MachO::ARM64_RELOC_POINTER_TO_GOT) {
+ // We'll take care of the offset in processGOTRelocation.
+ Value.Offset = 0;
+ } else if (!IsExtern && RE.IsPCRel)
+ makeValueAddendPCRel(Value, RelI, 1 << RE.Size);
+
+ RE.Addend = Value.Offset;
+
+ if (RE.RelType == MachO::ARM64_RELOC_GOT_LOAD_PAGE21 ||
+ RE.RelType == MachO::ARM64_RELOC_GOT_LOAD_PAGEOFF12 ||
+ RE.RelType == MachO::ARM64_RELOC_POINTER_TO_GOT)
+ processGOTRelocation(RE, Value, Stubs);
+ else {
+ if (Value.SymbolName)
+ addRelocationForSymbol(RE, Value.SymbolName);
+ else
+ addRelocationForSection(RE, Value.SectionID);
+ }
+
+ return ++RelI;
+ }
+
+ void resolveRelocation(const RelocationEntry &RE, uint64_t Value) override {
+ LLVM_DEBUG(dumpRelocationToResolve(RE, Value));
+
+ const SectionEntry &Section = Sections[RE.SectionID];
+ uint8_t *LocalAddress = Section.getAddressWithOffset(RE.Offset);
+ MachO::RelocationInfoType RelType =
+ static_cast<MachO::RelocationInfoType>(RE.RelType);
+
+ switch (RelType) {
+ default:
+ llvm_unreachable("Invalid relocation type!");
+ case MachO::ARM64_RELOC_UNSIGNED: {
+ assert(!RE.IsPCRel && "PCRel and ARM64_RELOC_UNSIGNED not supported");
+ // Mask in the target value a byte at a time (we don't have an alignment
+ // guarantee for the target address, so this is safest).
+ if (RE.Size < 2)
+ llvm_unreachable("Invalid size for ARM64_RELOC_UNSIGNED");
+
+ encodeAddend(LocalAddress, 1 << RE.Size, RelType, Value + RE.Addend);
+ break;
+ }
+
+ case MachO::ARM64_RELOC_POINTER_TO_GOT: {
+ assert(((RE.Size == 2 && RE.IsPCRel) || (RE.Size == 3 && !RE.IsPCRel)) &&
+ "ARM64_RELOC_POINTER_TO_GOT only supports 32-bit pc-rel or 64-bit "
+ "absolute");
+ // Addend is the GOT entry address and RE.Offset the target of the
+ // relocation.
+ uint64_t Result =
+ RE.IsPCRel ? (RE.Addend - RE.Offset) : (Value + RE.Addend);
+ encodeAddend(LocalAddress, 1 << RE.Size, RelType, Result);
+ break;
+ }
+
+ case MachO::ARM64_RELOC_BRANCH26: {
+ assert(RE.IsPCRel && "not PCRel and ARM64_RELOC_BRANCH26 not supported");
+ // Check if branch is in range.
+ uint64_t FinalAddress = Section.getLoadAddressWithOffset(RE.Offset);
+ int64_t PCRelVal = Value - FinalAddress + RE.Addend;
+ encodeAddend(LocalAddress, /*Size=*/4, RelType, PCRelVal);
+ break;
+ }
+ case MachO::ARM64_RELOC_GOT_LOAD_PAGE21:
+ case MachO::ARM64_RELOC_PAGE21: {
+ assert(RE.IsPCRel && "not PCRel and ARM64_RELOC_PAGE21 not supported");
+ // Adjust for PC-relative relocation and offset.
+ uint64_t FinalAddress = Section.getLoadAddressWithOffset(RE.Offset);
+ int64_t PCRelVal =
+ ((Value + RE.Addend) & (-4096)) - (FinalAddress & (-4096));
+ encodeAddend(LocalAddress, /*Size=*/4, RelType, PCRelVal);
+ break;
+ }
+ case MachO::ARM64_RELOC_GOT_LOAD_PAGEOFF12:
+ case MachO::ARM64_RELOC_PAGEOFF12: {
+ assert(!RE.IsPCRel && "PCRel and ARM64_RELOC_PAGEOFF21 not supported");
+ // Add the offset from the symbol.
+ Value += RE.Addend;
+ // Mask out the page address and only use the lower 12 bits.
+ Value &= 0xFFF;
+ encodeAddend(LocalAddress, /*Size=*/4, RelType, Value);
+ break;
+ }
+ case MachO::ARM64_RELOC_SUBTRACTOR: {
+ uint64_t SectionABase = Sections[RE.Sections.SectionA].getLoadAddress();
+ uint64_t SectionBBase = Sections[RE.Sections.SectionB].getLoadAddress();
+ assert((Value == SectionABase || Value == SectionBBase) &&
+ "Unexpected SUBTRACTOR relocation value.");
+ Value = SectionABase - SectionBBase + RE.Addend;
+ writeBytesUnaligned(Value, LocalAddress, 1 << RE.Size);
+ break;
+ }
+
+ case MachO::ARM64_RELOC_TLVP_LOAD_PAGE21:
+ case MachO::ARM64_RELOC_TLVP_LOAD_PAGEOFF12:
+ llvm_unreachable("Relocation type not yet implemented!");
+ case MachO::ARM64_RELOC_ADDEND:
+ llvm_unreachable("ARM64_RELOC_ADDEND should have been handeled by "
+ "processRelocationRef!");
+ }
+ }
+
+ Error finalizeSection(const ObjectFile &Obj, unsigned SectionID,
+ const SectionRef &Section) {
+ return Error::success();
+ }
+
+private:
+ void processGOTRelocation(const RelocationEntry &RE,
+ RelocationValueRef &Value, StubMap &Stubs) {
+ assert((RE.RelType == MachO::ARM64_RELOC_POINTER_TO_GOT &&
+ (RE.Size == 2 || RE.Size == 3)) ||
+ RE.Size == 2);
+ SectionEntry &Section = Sections[RE.SectionID];
+ StubMap::const_iterator i = Stubs.find(Value);
+ int64_t Offset;
+ if (i != Stubs.end())
+ Offset = static_cast<int64_t>(i->second);
+ else {
+ // FIXME: There must be a better way to do this then to check and fix the
+ // alignment every time!!!
+ uintptr_t BaseAddress = uintptr_t(Section.getAddress());
+ uintptr_t StubAlignment = getStubAlignment().value();
+ uintptr_t StubAddress =
+ (BaseAddress + Section.getStubOffset() + StubAlignment - 1) &
+ -StubAlignment;
+ unsigned StubOffset = StubAddress - BaseAddress;
+ Stubs[Value] = StubOffset;
+ assert(isAligned(getStubAlignment(), StubAddress) &&
+ "GOT entry not aligned");
+ RelocationEntry GOTRE(RE.SectionID, StubOffset,
+ MachO::ARM64_RELOC_UNSIGNED, Value.Offset,
+ /*IsPCRel=*/false, /*Size=*/3);
+ if (Value.SymbolName)
+ addRelocationForSymbol(GOTRE, Value.SymbolName);
+ else
+ addRelocationForSection(GOTRE, Value.SectionID);
+ Section.advanceStubOffset(getMaxStubSize());
+ Offset = static_cast<int64_t>(StubOffset);
+ }
+ RelocationEntry TargetRE(RE.SectionID, RE.Offset, RE.RelType, Offset,
+ RE.IsPCRel, RE.Size);
+ addRelocationForSection(TargetRE, RE.SectionID);
+ }
+
+ Expected<relocation_iterator>
+ processSubtractRelocation(unsigned SectionID, relocation_iterator RelI,
+ const ObjectFile &BaseObjT,
+ ObjSectionToIDMap &ObjSectionToID) {
+ const MachOObjectFile &Obj =
+ static_cast<const MachOObjectFile&>(BaseObjT);
+ MachO::any_relocation_info RE =
+ Obj.getRelocation(RelI->getRawDataRefImpl());
+
+ unsigned Size = Obj.getAnyRelocationLength(RE);
+ uint64_t Offset = RelI->getOffset();
+ uint8_t *LocalAddress = Sections[SectionID].getAddressWithOffset(Offset);
+ unsigned NumBytes = 1 << Size;
+
+ Expected<StringRef> SubtrahendNameOrErr = RelI->getSymbol()->getName();
+ if (!SubtrahendNameOrErr)
+ return SubtrahendNameOrErr.takeError();
+ auto SubtrahendI = GlobalSymbolTable.find(*SubtrahendNameOrErr);
+ unsigned SectionBID = SubtrahendI->second.getSectionID();
+ uint64_t SectionBOffset = SubtrahendI->second.getOffset();
+ int64_t Addend =
+ SignExtend64(readBytesUnaligned(LocalAddress, NumBytes), NumBytes * 8);
+
+ ++RelI;
+ Expected<StringRef> MinuendNameOrErr = RelI->getSymbol()->getName();
+ if (!MinuendNameOrErr)
+ return MinuendNameOrErr.takeError();
+ auto MinuendI = GlobalSymbolTable.find(*MinuendNameOrErr);
+ unsigned SectionAID = MinuendI->second.getSectionID();
+ uint64_t SectionAOffset = MinuendI->second.getOffset();
+
+ RelocationEntry R(SectionID, Offset, MachO::ARM64_RELOC_SUBTRACTOR, (uint64_t)Addend,
+ SectionAID, SectionAOffset, SectionBID, SectionBOffset,
+ false, Size);
+
+ addRelocationForSection(R, SectionAID);
+
+ return ++RelI;
+ }
+
+ static const char *getRelocName(uint32_t RelocType) {
+ switch (RelocType) {
+ case MachO::ARM64_RELOC_UNSIGNED: return "ARM64_RELOC_UNSIGNED";
+ case MachO::ARM64_RELOC_SUBTRACTOR: return "ARM64_RELOC_SUBTRACTOR";
+ case MachO::ARM64_RELOC_BRANCH26: return "ARM64_RELOC_BRANCH26";
+ case MachO::ARM64_RELOC_PAGE21: return "ARM64_RELOC_PAGE21";
+ case MachO::ARM64_RELOC_PAGEOFF12: return "ARM64_RELOC_PAGEOFF12";
+ case MachO::ARM64_RELOC_GOT_LOAD_PAGE21: return "ARM64_RELOC_GOT_LOAD_PAGE21";
+ case MachO::ARM64_RELOC_GOT_LOAD_PAGEOFF12: return "ARM64_RELOC_GOT_LOAD_PAGEOFF12";
+ case MachO::ARM64_RELOC_POINTER_TO_GOT: return "ARM64_RELOC_POINTER_TO_GOT";
+ case MachO::ARM64_RELOC_TLVP_LOAD_PAGE21: return "ARM64_RELOC_TLVP_LOAD_PAGE21";
+ case MachO::ARM64_RELOC_TLVP_LOAD_PAGEOFF12: return "ARM64_RELOC_TLVP_LOAD_PAGEOFF12";
+ case MachO::ARM64_RELOC_ADDEND: return "ARM64_RELOC_ADDEND";
+ }
+ return "Unrecognized arm64 addend";
+ }
+
+};
+}
+
+#undef DEBUG_TYPE
+
+#endif
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOARM.h b/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOARM.h
new file mode 100644
index 000000000000..79b558eb7796
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOARM.h
@@ -0,0 +1,431 @@
+//===----- RuntimeDyldMachOARM.h ---- MachO/ARM specific code. ----*- C++ -*-=//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_EXECUTIONENGINE_RUNTIMEDYLD_TARGETS_RUNTIMEDYLDMACHOARM_H
+#define LLVM_LIB_EXECUTIONENGINE_RUNTIMEDYLD_TARGETS_RUNTIMEDYLDMACHOARM_H
+
+#include "../RuntimeDyldMachO.h"
+
+#define DEBUG_TYPE "dyld"
+
+namespace llvm {
+
+class RuntimeDyldMachOARM
+ : public RuntimeDyldMachOCRTPBase<RuntimeDyldMachOARM> {
+private:
+ typedef RuntimeDyldMachOCRTPBase<RuntimeDyldMachOARM> ParentT;
+
+public:
+
+ typedef uint32_t TargetPtrT;
+
+ RuntimeDyldMachOARM(RuntimeDyld::MemoryManager &MM,
+ JITSymbolResolver &Resolver)
+ : RuntimeDyldMachOCRTPBase(MM, Resolver) {}
+
+ unsigned getMaxStubSize() const override { return 8; }
+
+ Align getStubAlignment() override { return Align(4); }
+
+ Expected<JITSymbolFlags> getJITSymbolFlags(const SymbolRef &SR) override {
+ auto Flags = RuntimeDyldImpl::getJITSymbolFlags(SR);
+ if (!Flags)
+ return Flags.takeError();
+ Flags->getTargetFlags() = ARMJITSymbolFlags::fromObjectSymbol(SR);
+ return Flags;
+ }
+
+ uint64_t modifyAddressBasedOnFlags(uint64_t Addr,
+ JITSymbolFlags Flags) const override {
+ if (Flags.getTargetFlags() & ARMJITSymbolFlags::Thumb)
+ Addr |= 0x1;
+ return Addr;
+ }
+
+ bool isAddrTargetThumb(unsigned SectionID, uint64_t Offset) {
+ auto TargetObjAddr = Sections[SectionID].getObjAddress() + Offset;
+ for (auto &KV : GlobalSymbolTable) {
+ auto &Entry = KV.second;
+ auto SymbolObjAddr =
+ Sections[Entry.getSectionID()].getObjAddress() + Entry.getOffset();
+ if (TargetObjAddr == SymbolObjAddr)
+ return (Entry.getFlags().getTargetFlags() & ARMJITSymbolFlags::Thumb);
+ }
+ return false;
+ }
+
+ Expected<int64_t> decodeAddend(const RelocationEntry &RE) const {
+ const SectionEntry &Section = Sections[RE.SectionID];
+ uint8_t *LocalAddress = Section.getAddressWithOffset(RE.Offset);
+
+ switch (RE.RelType) {
+ default:
+ return memcpyAddend(RE);
+ case MachO::ARM_RELOC_BR24: {
+ uint32_t Temp = readBytesUnaligned(LocalAddress, 4);
+ Temp &= 0x00ffffff; // Mask out the opcode.
+ // Now we've got the shifted immediate, shift by 2, sign extend and ret.
+ return SignExtend32<26>(Temp << 2);
+ }
+
+ case MachO::ARM_THUMB_RELOC_BR22: {
+ // This is a pair of instructions whose operands combine to provide 22
+ // bits of displacement:
+ // Encoding for high bits 1111 0XXX XXXX XXXX
+ // Encoding for low bits 1111 1XXX XXXX XXXX
+ uint16_t HighInsn = readBytesUnaligned(LocalAddress, 2);
+ if ((HighInsn & 0xf800) != 0xf000)
+ return make_error<StringError>("Unrecognized thumb branch encoding "
+ "(BR22 high bits)",
+ inconvertibleErrorCode());
+
+ uint16_t LowInsn = readBytesUnaligned(LocalAddress + 2, 2);
+ if ((LowInsn & 0xf800) != 0xf800)
+ return make_error<StringError>("Unrecognized thumb branch encoding "
+ "(BR22 low bits)",
+ inconvertibleErrorCode());
+
+ return SignExtend64<23>(((HighInsn & 0x7ff) << 12) |
+ ((LowInsn & 0x7ff) << 1));
+ }
+ }
+ }
+
+ Expected<relocation_iterator>
+ processRelocationRef(unsigned SectionID, relocation_iterator RelI,
+ const ObjectFile &BaseObjT,
+ ObjSectionToIDMap &ObjSectionToID,
+ StubMap &Stubs) override {
+ const MachOObjectFile &Obj =
+ static_cast<const MachOObjectFile &>(BaseObjT);
+ MachO::any_relocation_info RelInfo =
+ Obj.getRelocation(RelI->getRawDataRefImpl());
+ uint32_t RelType = Obj.getAnyRelocationType(RelInfo);
+
+ // Set to true for thumb functions in this (or previous) TUs.
+ // Will be used to set the TargetIsThumbFunc member on the relocation entry.
+ bool TargetIsLocalThumbFunc = false;
+ if (Obj.getPlainRelocationExternal(RelInfo)) {
+ auto Symbol = RelI->getSymbol();
+ StringRef TargetName;
+ if (auto TargetNameOrErr = Symbol->getName())
+ TargetName = *TargetNameOrErr;
+ else
+ return TargetNameOrErr.takeError();
+
+ // If the target is external but the value doesn't have a name then we've
+ // converted the value to a section/offset pair, but we still need to set
+ // the IsTargetThumbFunc bit, so look the value up in the globla symbol table.
+ auto EntryItr = GlobalSymbolTable.find(TargetName);
+ if (EntryItr != GlobalSymbolTable.end()) {
+ TargetIsLocalThumbFunc =
+ EntryItr->second.getFlags().getTargetFlags() &
+ ARMJITSymbolFlags::Thumb;
+ }
+ }
+
+ if (Obj.isRelocationScattered(RelInfo)) {
+ if (RelType == MachO::ARM_RELOC_HALF_SECTDIFF)
+ return processHALFSECTDIFFRelocation(SectionID, RelI, Obj,
+ ObjSectionToID);
+ else if (RelType == MachO::GENERIC_RELOC_VANILLA)
+ return processScatteredVANILLA(SectionID, RelI, Obj, ObjSectionToID,
+ TargetIsLocalThumbFunc);
+ else
+ return ++RelI;
+ }
+
+ // Validate the relocation type.
+ switch (RelType) {
+ UNIMPLEMENTED_RELOC(MachO::ARM_RELOC_PAIR);
+ UNIMPLEMENTED_RELOC(MachO::ARM_RELOC_SECTDIFF);
+ UNIMPLEMENTED_RELOC(MachO::ARM_RELOC_LOCAL_SECTDIFF);
+ UNIMPLEMENTED_RELOC(MachO::ARM_RELOC_PB_LA_PTR);
+ UNIMPLEMENTED_RELOC(MachO::ARM_THUMB_32BIT_BRANCH);
+ UNIMPLEMENTED_RELOC(MachO::ARM_RELOC_HALF);
+ default:
+ if (RelType > MachO::ARM_RELOC_HALF_SECTDIFF)
+ return make_error<RuntimeDyldError>(("MachO ARM relocation type " +
+ Twine(RelType) +
+ " is out of range").str());
+ break;
+ }
+
+ RelocationEntry RE(getRelocationEntry(SectionID, Obj, RelI));
+ if (auto AddendOrErr = decodeAddend(RE))
+ RE.Addend = *AddendOrErr;
+ else
+ return AddendOrErr.takeError();
+ RE.IsTargetThumbFunc = TargetIsLocalThumbFunc;
+
+ RelocationValueRef Value;
+ if (auto ValueOrErr = getRelocationValueRef(Obj, RelI, RE, ObjSectionToID))
+ Value = *ValueOrErr;
+ else
+ return ValueOrErr.takeError();
+
+ // If this is a branch from a thumb function (BR22) then make sure we mark
+ // the value as being a thumb stub: we don't want to mix it up with an ARM
+ // stub targeting the same function.
+ if (RE.RelType == MachO::ARM_THUMB_RELOC_BR22)
+ Value.IsStubThumb = true;
+
+ if (RE.IsPCRel)
+ makeValueAddendPCRel(Value, RelI,
+ (RE.RelType == MachO::ARM_THUMB_RELOC_BR22) ? 4 : 8);
+
+ // If this is a non-external branch target check whether Value points to a
+ // thumb func.
+ if (!Value.SymbolName && (RelType == MachO::ARM_RELOC_BR24 ||
+ RelType == MachO::ARM_THUMB_RELOC_BR22))
+ RE.IsTargetThumbFunc = isAddrTargetThumb(Value.SectionID, Value.Offset);
+
+ if (RE.RelType == MachO::ARM_RELOC_BR24 ||
+ RE.RelType == MachO::ARM_THUMB_RELOC_BR22)
+ processBranchRelocation(RE, Value, Stubs);
+ else {
+ RE.Addend = Value.Offset;
+ if (Value.SymbolName)
+ addRelocationForSymbol(RE, Value.SymbolName);
+ else
+ addRelocationForSection(RE, Value.SectionID);
+ }
+
+ return ++RelI;
+ }
+
+ void resolveRelocation(const RelocationEntry &RE, uint64_t Value) override {
+ LLVM_DEBUG(dumpRelocationToResolve(RE, Value));
+ const SectionEntry &Section = Sections[RE.SectionID];
+ uint8_t *LocalAddress = Section.getAddressWithOffset(RE.Offset);
+
+ // If the relocation is PC-relative, the value to be encoded is the
+ // pointer difference.
+ if (RE.IsPCRel) {
+ uint64_t FinalAddress = Section.getLoadAddressWithOffset(RE.Offset);
+ Value -= FinalAddress;
+ // ARM PCRel relocations have an effective-PC offset of two instructions
+ // (four bytes in Thumb mode, 8 bytes in ARM mode).
+ Value -= (RE.RelType == MachO::ARM_THUMB_RELOC_BR22) ? 4 : 8;
+ }
+
+ switch (RE.RelType) {
+ case MachO::ARM_THUMB_RELOC_BR22: {
+ Value += RE.Addend;
+ uint16_t HighInsn = readBytesUnaligned(LocalAddress, 2);
+ assert((HighInsn & 0xf800) == 0xf000 &&
+ "Unrecognized thumb branch encoding (BR22 high bits)");
+ HighInsn = (HighInsn & 0xf800) | ((Value >> 12) & 0x7ff);
+
+ uint16_t LowInsn = readBytesUnaligned(LocalAddress + 2, 2);
+ assert((LowInsn & 0xf800) == 0xf800 &&
+ "Unrecognized thumb branch encoding (BR22 low bits)");
+ LowInsn = (LowInsn & 0xf800) | ((Value >> 1) & 0x7ff);
+
+ writeBytesUnaligned(HighInsn, LocalAddress, 2);
+ writeBytesUnaligned(LowInsn, LocalAddress + 2, 2);
+ break;
+ }
+
+ case MachO::ARM_RELOC_VANILLA:
+ if (RE.IsTargetThumbFunc)
+ Value |= 0x01;
+ writeBytesUnaligned(Value + RE.Addend, LocalAddress, 1 << RE.Size);
+ break;
+ case MachO::ARM_RELOC_BR24: {
+ // Mask the value into the target address. We know instructions are
+ // 32-bit aligned, so we can do it all at once.
+ Value += RE.Addend;
+ // The low two bits of the value are not encoded.
+ Value >>= 2;
+ // Mask the value to 24 bits.
+ uint64_t FinalValue = Value & 0xffffff;
+ // FIXME: If the destination is a Thumb function (and the instruction
+ // is a non-predicated BL instruction), we need to change it to a BLX
+ // instruction instead.
+
+ // Insert the value into the instruction.
+ uint32_t Temp = readBytesUnaligned(LocalAddress, 4);
+ writeBytesUnaligned((Temp & ~0xffffff) | FinalValue, LocalAddress, 4);
+
+ break;
+ }
+ case MachO::ARM_RELOC_HALF_SECTDIFF: {
+ uint64_t SectionABase = Sections[RE.Sections.SectionA].getLoadAddress();
+ uint64_t SectionBBase = Sections[RE.Sections.SectionB].getLoadAddress();
+ assert((Value == SectionABase || Value == SectionBBase) &&
+ "Unexpected HALFSECTDIFF relocation value.");
+ Value = SectionABase - SectionBBase + RE.Addend;
+ if (RE.Size & 0x1) // :upper16:
+ Value = (Value >> 16);
+
+ bool IsThumb = RE.Size & 0x2;
+
+ Value &= 0xffff;
+
+ uint32_t Insn = readBytesUnaligned(LocalAddress, 4);
+
+ if (IsThumb)
+ Insn = (Insn & 0x8f00fbf0) | ((Value & 0xf000) >> 12) |
+ ((Value & 0x0800) >> 1) | ((Value & 0x0700) << 20) |
+ ((Value & 0x00ff) << 16);
+ else
+ Insn = (Insn & 0xfff0f000) | ((Value & 0xf000) << 4) | (Value & 0x0fff);
+ writeBytesUnaligned(Insn, LocalAddress, 4);
+ break;
+ }
+
+ default:
+ llvm_unreachable("Invalid relocation type");
+ }
+ }
+
+ Error finalizeSection(const ObjectFile &Obj, unsigned SectionID,
+ const SectionRef &Section) {
+ StringRef Name;
+ if (Expected<StringRef> NameOrErr = Section.getName())
+ Name = *NameOrErr;
+ else
+ consumeError(NameOrErr.takeError());
+
+ if (Name == "__nl_symbol_ptr")
+ return populateIndirectSymbolPointersSection(cast<MachOObjectFile>(Obj),
+ Section, SectionID);
+ return Error::success();
+ }
+
+private:
+
+ void processBranchRelocation(const RelocationEntry &RE,
+ const RelocationValueRef &Value,
+ StubMap &Stubs) {
+ // This is an ARM branch relocation, need to use a stub function.
+ // Look up for existing stub.
+ SectionEntry &Section = Sections[RE.SectionID];
+ RuntimeDyldMachO::StubMap::const_iterator i = Stubs.find(Value);
+ uint8_t *Addr;
+ if (i != Stubs.end()) {
+ Addr = Section.getAddressWithOffset(i->second);
+ } else {
+ // Create a new stub function.
+ assert(Section.getStubOffset() % 4 == 0 && "Misaligned stub");
+ Stubs[Value] = Section.getStubOffset();
+ uint32_t StubOpcode = 0;
+ if (RE.RelType == MachO::ARM_RELOC_BR24)
+ StubOpcode = 0xe51ff004; // ldr pc, [pc, #-4]
+ else if (RE.RelType == MachO::ARM_THUMB_RELOC_BR22)
+ StubOpcode = 0xf000f8df; // ldr pc, [pc]
+ else
+ llvm_unreachable("Unrecognized relocation");
+ Addr = Section.getAddressWithOffset(Section.getStubOffset());
+ writeBytesUnaligned(StubOpcode, Addr, 4);
+ uint8_t *StubTargetAddr = Addr + 4;
+ RelocationEntry StubRE(
+ RE.SectionID, StubTargetAddr - Section.getAddress(),
+ MachO::GENERIC_RELOC_VANILLA, Value.Offset, false, 2);
+ StubRE.IsTargetThumbFunc = RE.IsTargetThumbFunc;
+ if (Value.SymbolName)
+ addRelocationForSymbol(StubRE, Value.SymbolName);
+ else
+ addRelocationForSection(StubRE, Value.SectionID);
+ Section.advanceStubOffset(getMaxStubSize());
+ }
+ RelocationEntry TargetRE(RE.SectionID, RE.Offset, RE.RelType, 0,
+ RE.IsPCRel, RE.Size);
+ resolveRelocation(TargetRE, (uint64_t)Addr);
+ }
+
+ Expected<relocation_iterator>
+ processHALFSECTDIFFRelocation(unsigned SectionID, relocation_iterator RelI,
+ const ObjectFile &BaseTObj,
+ ObjSectionToIDMap &ObjSectionToID) {
+ const MachOObjectFile &MachO =
+ static_cast<const MachOObjectFile&>(BaseTObj);
+ MachO::any_relocation_info RE =
+ MachO.getRelocation(RelI->getRawDataRefImpl());
+
+ // For a half-diff relocation the length bits actually record whether this
+ // is a movw/movt, and whether this is arm or thumb.
+ // Bit 0 indicates movw (b0 == 0) or movt (b0 == 1).
+ // Bit 1 indicates arm (b1 == 0) or thumb (b1 == 1).
+ unsigned HalfDiffKindBits = MachO.getAnyRelocationLength(RE);
+ bool IsThumb = HalfDiffKindBits & 0x2;
+
+ SectionEntry &Section = Sections[SectionID];
+ uint32_t RelocType = MachO.getAnyRelocationType(RE);
+ bool IsPCRel = MachO.getAnyRelocationPCRel(RE);
+ uint64_t Offset = RelI->getOffset();
+ uint8_t *LocalAddress = Section.getAddressWithOffset(Offset);
+ int64_t Immediate = readBytesUnaligned(LocalAddress, 4); // Copy the whole instruction out.
+
+ if (IsThumb)
+ Immediate = ((Immediate & 0x0000000f) << 12) |
+ ((Immediate & 0x00000400) << 1) |
+ ((Immediate & 0x70000000) >> 20) |
+ ((Immediate & 0x00ff0000) >> 16);
+ else
+ Immediate = ((Immediate >> 4) & 0xf000) | (Immediate & 0xfff);
+
+ ++RelI;
+ MachO::any_relocation_info RE2 =
+ MachO.getRelocation(RelI->getRawDataRefImpl());
+ uint32_t AddrA = MachO.getScatteredRelocationValue(RE);
+ section_iterator SAI = getSectionByAddress(MachO, AddrA);
+ assert(SAI != MachO.section_end() && "Can't find section for address A");
+ uint64_t SectionABase = SAI->getAddress();
+ uint64_t SectionAOffset = AddrA - SectionABase;
+ SectionRef SectionA = *SAI;
+ bool IsCode = SectionA.isText();
+ uint32_t SectionAID = ~0U;
+ if (auto SectionAIDOrErr =
+ findOrEmitSection(MachO, SectionA, IsCode, ObjSectionToID))
+ SectionAID = *SectionAIDOrErr;
+ else
+ return SectionAIDOrErr.takeError();
+
+ uint32_t AddrB = MachO.getScatteredRelocationValue(RE2);
+ section_iterator SBI = getSectionByAddress(MachO, AddrB);
+ assert(SBI != MachO.section_end() && "Can't find section for address B");
+ uint64_t SectionBBase = SBI->getAddress();
+ uint64_t SectionBOffset = AddrB - SectionBBase;
+ SectionRef SectionB = *SBI;
+ uint32_t SectionBID = ~0U;
+ if (auto SectionBIDOrErr =
+ findOrEmitSection(MachO, SectionB, IsCode, ObjSectionToID))
+ SectionBID = *SectionBIDOrErr;
+ else
+ return SectionBIDOrErr.takeError();
+
+ uint32_t OtherHalf = MachO.getAnyRelocationAddress(RE2) & 0xffff;
+ unsigned Shift = (HalfDiffKindBits & 0x1) ? 16 : 0;
+ uint32_t FullImmVal = (Immediate << Shift) | (OtherHalf << (16 - Shift));
+ int64_t Addend = FullImmVal - (AddrA - AddrB);
+
+ // addend = Encoded - Expected
+ // = Encoded - (AddrA - AddrB)
+
+ LLVM_DEBUG(dbgs() << "Found SECTDIFF: AddrA: " << AddrA
+ << ", AddrB: " << AddrB << ", Addend: " << Addend
+ << ", SectionA ID: " << SectionAID << ", SectionAOffset: "
+ << SectionAOffset << ", SectionB ID: " << SectionBID
+ << ", SectionBOffset: " << SectionBOffset << "\n");
+ RelocationEntry R(SectionID, Offset, RelocType, Addend, SectionAID,
+ SectionAOffset, SectionBID, SectionBOffset, IsPCRel,
+ HalfDiffKindBits);
+
+ addRelocationForSection(R, SectionAID);
+
+ return ++RelI;
+ }
+
+};
+}
+
+#undef DEBUG_TYPE
+
+#endif
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOI386.h b/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOI386.h
new file mode 100644
index 000000000000..a983e22671b2
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOI386.h
@@ -0,0 +1,250 @@
+//===---- RuntimeDyldMachOI386.h ---- MachO/I386 specific code. ---*- C++ -*-=//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_EXECUTIONENGINE_RUNTIMEDYLD_TARGETS_RUNTIMEDYLDMACHOI386_H
+#define LLVM_LIB_EXECUTIONENGINE_RUNTIMEDYLD_TARGETS_RUNTIMEDYLDMACHOI386_H
+
+#include "../RuntimeDyldMachO.h"
+
+#define DEBUG_TYPE "dyld"
+
+namespace llvm {
+
+class RuntimeDyldMachOI386
+ : public RuntimeDyldMachOCRTPBase<RuntimeDyldMachOI386> {
+public:
+
+ typedef uint32_t TargetPtrT;
+
+ RuntimeDyldMachOI386(RuntimeDyld::MemoryManager &MM,
+ JITSymbolResolver &Resolver)
+ : RuntimeDyldMachOCRTPBase(MM, Resolver) {}
+
+ unsigned getMaxStubSize() const override { return 0; }
+
+ Align getStubAlignment() override { return Align(1); }
+
+ Expected<relocation_iterator>
+ processRelocationRef(unsigned SectionID, relocation_iterator RelI,
+ const ObjectFile &BaseObjT,
+ ObjSectionToIDMap &ObjSectionToID,
+ StubMap &Stubs) override {
+ const MachOObjectFile &Obj =
+ static_cast<const MachOObjectFile &>(BaseObjT);
+ MachO::any_relocation_info RelInfo =
+ Obj.getRelocation(RelI->getRawDataRefImpl());
+ uint32_t RelType = Obj.getAnyRelocationType(RelInfo);
+
+ if (Obj.isRelocationScattered(RelInfo)) {
+ if (RelType == MachO::GENERIC_RELOC_SECTDIFF ||
+ RelType == MachO::GENERIC_RELOC_LOCAL_SECTDIFF)
+ return processSECTDIFFRelocation(SectionID, RelI, Obj,
+ ObjSectionToID);
+ else if (RelType == MachO::GENERIC_RELOC_VANILLA)
+ return processScatteredVANILLA(SectionID, RelI, Obj, ObjSectionToID);
+ return make_error<RuntimeDyldError>(("Unhandled I386 scattered relocation "
+ "type: " + Twine(RelType)).str());
+ }
+
+ switch (RelType) {
+ UNIMPLEMENTED_RELOC(MachO::GENERIC_RELOC_PAIR);
+ UNIMPLEMENTED_RELOC(MachO::GENERIC_RELOC_PB_LA_PTR);
+ UNIMPLEMENTED_RELOC(MachO::GENERIC_RELOC_TLV);
+ default:
+ if (RelType > MachO::GENERIC_RELOC_TLV)
+ return make_error<RuntimeDyldError>(("MachO I386 relocation type " +
+ Twine(RelType) +
+ " is out of range").str());
+ break;
+ }
+
+ RelocationEntry RE(getRelocationEntry(SectionID, Obj, RelI));
+ RE.Addend = memcpyAddend(RE);
+ RelocationValueRef Value;
+ if (auto ValueOrErr = getRelocationValueRef(Obj, RelI, RE, ObjSectionToID))
+ Value = *ValueOrErr;
+ else
+ return ValueOrErr.takeError();
+
+ // Addends for external, PC-rel relocations on i386 point back to the zero
+ // offset. Calculate the final offset from the relocation target instead.
+ // This allows us to use the same logic for both external and internal
+ // relocations in resolveI386RelocationRef.
+ // bool IsExtern = Obj.getPlainRelocationExternal(RelInfo);
+ // if (IsExtern && RE.IsPCRel) {
+ // uint64_t RelocAddr = 0;
+ // RelI->getAddress(RelocAddr);
+ // Value.Addend += RelocAddr + 4;
+ // }
+ if (RE.IsPCRel)
+ makeValueAddendPCRel(Value, RelI, 1 << RE.Size);
+
+ RE.Addend = Value.Offset;
+
+ if (Value.SymbolName)
+ addRelocationForSymbol(RE, Value.SymbolName);
+ else
+ addRelocationForSection(RE, Value.SectionID);
+
+ return ++RelI;
+ }
+
+ void resolveRelocation(const RelocationEntry &RE, uint64_t Value) override {
+ LLVM_DEBUG(dumpRelocationToResolve(RE, Value));
+
+ const SectionEntry &Section = Sections[RE.SectionID];
+ uint8_t *LocalAddress = Section.getAddressWithOffset(RE.Offset);
+
+ if (RE.IsPCRel) {
+ uint64_t FinalAddress = Section.getLoadAddressWithOffset(RE.Offset);
+ Value -= FinalAddress + 4; // see MachOX86_64::resolveRelocation.
+ }
+
+ switch (RE.RelType) {
+ case MachO::GENERIC_RELOC_VANILLA:
+ writeBytesUnaligned(Value + RE.Addend, LocalAddress, 1 << RE.Size);
+ break;
+ case MachO::GENERIC_RELOC_SECTDIFF:
+ case MachO::GENERIC_RELOC_LOCAL_SECTDIFF: {
+ uint64_t SectionABase = Sections[RE.Sections.SectionA].getLoadAddress();
+ uint64_t SectionBBase = Sections[RE.Sections.SectionB].getLoadAddress();
+ assert((Value == SectionABase || Value == SectionBBase) &&
+ "Unexpected SECTDIFF relocation value.");
+ Value = SectionABase - SectionBBase + RE.Addend;
+ writeBytesUnaligned(Value, LocalAddress, 1 << RE.Size);
+ break;
+ }
+ default:
+ llvm_unreachable("Invalid relocation type!");
+ }
+ }
+
+ Error finalizeSection(const ObjectFile &Obj, unsigned SectionID,
+ const SectionRef &Section) {
+ StringRef Name;
+ if (Expected<StringRef> NameOrErr = Section.getName())
+ Name = *NameOrErr;
+ else
+ consumeError(NameOrErr.takeError());
+
+ if (Name == "__jump_table")
+ return populateJumpTable(cast<MachOObjectFile>(Obj), Section, SectionID);
+ else if (Name == "__pointers")
+ return populateIndirectSymbolPointersSection(cast<MachOObjectFile>(Obj),
+ Section, SectionID);
+ return Error::success();
+ }
+
+private:
+ Expected<relocation_iterator>
+ processSECTDIFFRelocation(unsigned SectionID, relocation_iterator RelI,
+ const ObjectFile &BaseObjT,
+ ObjSectionToIDMap &ObjSectionToID) {
+ const MachOObjectFile &Obj =
+ static_cast<const MachOObjectFile&>(BaseObjT);
+ MachO::any_relocation_info RE =
+ Obj.getRelocation(RelI->getRawDataRefImpl());
+
+ SectionEntry &Section = Sections[SectionID];
+ uint32_t RelocType = Obj.getAnyRelocationType(RE);
+ bool IsPCRel = Obj.getAnyRelocationPCRel(RE);
+ unsigned Size = Obj.getAnyRelocationLength(RE);
+ uint64_t Offset = RelI->getOffset();
+ uint8_t *LocalAddress = Section.getAddressWithOffset(Offset);
+ unsigned NumBytes = 1 << Size;
+ uint64_t Addend = readBytesUnaligned(LocalAddress, NumBytes);
+
+ ++RelI;
+ MachO::any_relocation_info RE2 =
+ Obj.getRelocation(RelI->getRawDataRefImpl());
+
+ uint32_t AddrA = Obj.getScatteredRelocationValue(RE);
+ section_iterator SAI = getSectionByAddress(Obj, AddrA);
+ assert(SAI != Obj.section_end() && "Can't find section for address A");
+ uint64_t SectionABase = SAI->getAddress();
+ uint64_t SectionAOffset = AddrA - SectionABase;
+ SectionRef SectionA = *SAI;
+ bool IsCode = SectionA.isText();
+ uint32_t SectionAID = ~0U;
+ if (auto SectionAIDOrErr =
+ findOrEmitSection(Obj, SectionA, IsCode, ObjSectionToID))
+ SectionAID = *SectionAIDOrErr;
+ else
+ return SectionAIDOrErr.takeError();
+
+ uint32_t AddrB = Obj.getScatteredRelocationValue(RE2);
+ section_iterator SBI = getSectionByAddress(Obj, AddrB);
+ assert(SBI != Obj.section_end() && "Can't find section for address B");
+ uint64_t SectionBBase = SBI->getAddress();
+ uint64_t SectionBOffset = AddrB - SectionBBase;
+ SectionRef SectionB = *SBI;
+ uint32_t SectionBID = ~0U;
+ if (auto SectionBIDOrErr =
+ findOrEmitSection(Obj, SectionB, IsCode, ObjSectionToID))
+ SectionBID = *SectionBIDOrErr;
+ else
+ return SectionBIDOrErr.takeError();
+
+ // Compute the addend 'C' from the original expression 'A - B + C'.
+ Addend -= AddrA - AddrB;
+
+ LLVM_DEBUG(dbgs() << "Found SECTDIFF: AddrA: " << AddrA
+ << ", AddrB: " << AddrB << ", Addend: " << Addend
+ << ", SectionA ID: " << SectionAID << ", SectionAOffset: "
+ << SectionAOffset << ", SectionB ID: " << SectionBID
+ << ", SectionBOffset: " << SectionBOffset << "\n");
+ RelocationEntry R(SectionID, Offset, RelocType, Addend, SectionAID,
+ SectionAOffset, SectionBID, SectionBOffset,
+ IsPCRel, Size);
+
+ addRelocationForSection(R, SectionAID);
+
+ return ++RelI;
+ }
+
+ // Populate stubs in __jump_table section.
+ Error populateJumpTable(const MachOObjectFile &Obj,
+ const SectionRef &JTSection,
+ unsigned JTSectionID) {
+ MachO::dysymtab_command DySymTabCmd = Obj.getDysymtabLoadCommand();
+ MachO::section Sec32 = Obj.getSection(JTSection.getRawDataRefImpl());
+ uint32_t JTSectionSize = Sec32.size;
+ unsigned FirstIndirectSymbol = Sec32.reserved1;
+ unsigned JTEntrySize = Sec32.reserved2;
+ unsigned NumJTEntries = JTSectionSize / JTEntrySize;
+ uint8_t *JTSectionAddr = getSectionAddress(JTSectionID);
+ unsigned JTEntryOffset = 0;
+
+ if (JTSectionSize % JTEntrySize != 0)
+ return make_error<RuntimeDyldError>("Jump-table section does not contain "
+ "a whole number of stubs?");
+
+ for (unsigned i = 0; i < NumJTEntries; ++i) {
+ unsigned SymbolIndex =
+ Obj.getIndirectSymbolTableEntry(DySymTabCmd, FirstIndirectSymbol + i);
+ symbol_iterator SI = Obj.getSymbolByIndex(SymbolIndex);
+ Expected<StringRef> IndirectSymbolName = SI->getName();
+ if (!IndirectSymbolName)
+ return IndirectSymbolName.takeError();
+ uint8_t *JTEntryAddr = JTSectionAddr + JTEntryOffset;
+ createStubFunction(JTEntryAddr);
+ RelocationEntry RE(JTSectionID, JTEntryOffset + 1,
+ MachO::GENERIC_RELOC_VANILLA, 0, true, 2);
+ addRelocationForSymbol(RE, *IndirectSymbolName);
+ JTEntryOffset += JTEntrySize;
+ }
+
+ return Error::success();
+ }
+
+};
+}
+
+#undef DEBUG_TYPE
+
+#endif
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOX86_64.h b/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOX86_64.h
new file mode 100644
index 000000000000..bd0d72f9e117
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOX86_64.h
@@ -0,0 +1,238 @@
+//===-- RuntimeDyldMachOX86_64.h ---- MachO/X86_64 specific code. -*- C++ -*-=//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_EXECUTIONENGINE_RUNTIMEDYLD_TARGETS_RUNTIMEDYLDMACHOX86_64_H
+#define LLVM_LIB_EXECUTIONENGINE_RUNTIMEDYLD_TARGETS_RUNTIMEDYLDMACHOX86_64_H
+
+#include "../RuntimeDyldMachO.h"
+
+#define DEBUG_TYPE "dyld"
+
+namespace llvm {
+
+class RuntimeDyldMachOX86_64
+ : public RuntimeDyldMachOCRTPBase<RuntimeDyldMachOX86_64> {
+public:
+
+ typedef uint64_t TargetPtrT;
+
+ RuntimeDyldMachOX86_64(RuntimeDyld::MemoryManager &MM,
+ JITSymbolResolver &Resolver)
+ : RuntimeDyldMachOCRTPBase(MM, Resolver) {}
+
+ unsigned getMaxStubSize() const override { return 8; }
+
+ Align getStubAlignment() override { return Align(8); }
+
+ Expected<relocation_iterator>
+ processRelocationRef(unsigned SectionID, relocation_iterator RelI,
+ const ObjectFile &BaseObjT,
+ ObjSectionToIDMap &ObjSectionToID,
+ StubMap &Stubs) override {
+ const MachOObjectFile &Obj =
+ static_cast<const MachOObjectFile &>(BaseObjT);
+ MachO::any_relocation_info RelInfo =
+ Obj.getRelocation(RelI->getRawDataRefImpl());
+ uint32_t RelType = Obj.getAnyRelocationType(RelInfo);
+
+ if (RelType == MachO::X86_64_RELOC_SUBTRACTOR)
+ return processSubtractRelocation(SectionID, RelI, Obj, ObjSectionToID);
+
+ assert(!Obj.isRelocationScattered(RelInfo) &&
+ "Scattered relocations not supported on X86_64");
+
+ RelocationEntry RE(getRelocationEntry(SectionID, Obj, RelI));
+ RE.Addend = memcpyAddend(RE);
+ RelocationValueRef Value;
+ if (auto ValueOrErr = getRelocationValueRef(Obj, RelI, RE, ObjSectionToID))
+ Value = *ValueOrErr;
+ else
+ return ValueOrErr.takeError();
+
+ bool IsExtern = Obj.getPlainRelocationExternal(RelInfo);
+ if (!IsExtern && RE.IsPCRel)
+ makeValueAddendPCRel(Value, RelI, 1 << RE.Size);
+
+ switch (RelType) {
+ UNIMPLEMENTED_RELOC(MachO::X86_64_RELOC_TLV);
+ default:
+ if (RelType > MachO::X86_64_RELOC_TLV)
+ return make_error<RuntimeDyldError>(("MachO X86_64 relocation type " +
+ Twine(RelType) +
+ " is out of range").str());
+ break;
+ }
+
+ if (RE.RelType == MachO::X86_64_RELOC_GOT ||
+ RE.RelType == MachO::X86_64_RELOC_GOT_LOAD)
+ processGOTRelocation(RE, Value, Stubs);
+ else {
+ RE.Addend = Value.Offset;
+ if (Value.SymbolName)
+ addRelocationForSymbol(RE, Value.SymbolName);
+ else
+ addRelocationForSection(RE, Value.SectionID);
+ }
+
+ return ++RelI;
+ }
+
+ void resolveRelocation(const RelocationEntry &RE, uint64_t Value) override {
+ LLVM_DEBUG(dumpRelocationToResolve(RE, Value));
+ const SectionEntry &Section = Sections[RE.SectionID];
+ uint8_t *LocalAddress = Section.getAddressWithOffset(RE.Offset);
+
+ // If the relocation is PC-relative, the value to be encoded is the
+ // pointer difference.
+ if (RE.IsPCRel) {
+ // FIXME: It seems this value needs to be adjusted by 4 for an effective
+ // PC address. Is that expected? Only for branches, perhaps?
+ uint64_t FinalAddress = Section.getLoadAddressWithOffset(RE.Offset);
+ Value -= FinalAddress + 4;
+ }
+
+ switch (RE.RelType) {
+ default:
+ llvm_unreachable("Invalid relocation type!");
+ case MachO::X86_64_RELOC_SIGNED_1:
+ case MachO::X86_64_RELOC_SIGNED_2:
+ case MachO::X86_64_RELOC_SIGNED_4:
+ case MachO::X86_64_RELOC_SIGNED:
+ case MachO::X86_64_RELOC_UNSIGNED:
+ case MachO::X86_64_RELOC_BRANCH:
+ writeBytesUnaligned(Value + RE.Addend, LocalAddress, 1 << RE.Size);
+ break;
+ case MachO::X86_64_RELOC_SUBTRACTOR: {
+ uint64_t SectionABase = Sections[RE.Sections.SectionA].getLoadAddress();
+ uint64_t SectionBBase = Sections[RE.Sections.SectionB].getLoadAddress();
+ assert((Value == SectionABase || Value == SectionBBase) &&
+ "Unexpected SUBTRACTOR relocation value.");
+ Value = SectionABase - SectionBBase + RE.Addend;
+ writeBytesUnaligned(Value, LocalAddress, 1 << RE.Size);
+ break;
+ }
+ }
+ }
+
+ Error finalizeSection(const ObjectFile &Obj, unsigned SectionID,
+ const SectionRef &Section) {
+ return Error::success();
+ }
+
+private:
+ void processGOTRelocation(const RelocationEntry &RE,
+ RelocationValueRef &Value, StubMap &Stubs) {
+ SectionEntry &Section = Sections[RE.SectionID];
+ assert(RE.IsPCRel);
+ assert(RE.Size == 2);
+ Value.Offset -= RE.Addend;
+ RuntimeDyldMachO::StubMap::const_iterator i = Stubs.find(Value);
+ uint8_t *Addr;
+ if (i != Stubs.end()) {
+ Addr = Section.getAddressWithOffset(i->second);
+ } else {
+ Stubs[Value] = Section.getStubOffset();
+ uint8_t *GOTEntry = Section.getAddressWithOffset(Section.getStubOffset());
+ RelocationEntry GOTRE(RE.SectionID, Section.getStubOffset(),
+ MachO::X86_64_RELOC_UNSIGNED, Value.Offset, false,
+ 3);
+ if (Value.SymbolName)
+ addRelocationForSymbol(GOTRE, Value.SymbolName);
+ else
+ addRelocationForSection(GOTRE, Value.SectionID);
+ Section.advanceStubOffset(8);
+ Addr = GOTEntry;
+ }
+ RelocationEntry TargetRE(RE.SectionID, RE.Offset,
+ MachO::X86_64_RELOC_UNSIGNED, RE.Addend, true, 2);
+ resolveRelocation(TargetRE, (uint64_t)Addr);
+ }
+
+ Expected<relocation_iterator>
+ processSubtractRelocation(unsigned SectionID, relocation_iterator RelI,
+ const MachOObjectFile &BaseObj,
+ ObjSectionToIDMap &ObjSectionToID) {
+ const MachOObjectFile &Obj =
+ static_cast<const MachOObjectFile&>(BaseObj);
+ MachO::any_relocation_info RE =
+ Obj.getRelocation(RelI->getRawDataRefImpl());
+
+ unsigned Size = Obj.getAnyRelocationLength(RE);
+ uint64_t Offset = RelI->getOffset();
+ uint8_t *LocalAddress = Sections[SectionID].getAddressWithOffset(Offset);
+ unsigned NumBytes = 1 << Size;
+ int64_t Addend =
+ SignExtend64(readBytesUnaligned(LocalAddress, NumBytes), NumBytes * 8);
+
+ unsigned SectionBID = ~0U;
+ uint64_t SectionBOffset = 0;
+
+ MachO::any_relocation_info RelInfo =
+ Obj.getRelocation(RelI->getRawDataRefImpl());
+
+ bool AIsExternal = BaseObj.getPlainRelocationExternal(RelInfo);
+
+ if (AIsExternal) {
+ Expected<StringRef> SubtrahendNameOrErr = RelI->getSymbol()->getName();
+ if (!SubtrahendNameOrErr)
+ return SubtrahendNameOrErr.takeError();
+ auto SubtrahendI = GlobalSymbolTable.find(*SubtrahendNameOrErr);
+ SectionBID = SubtrahendI->second.getSectionID();
+ SectionBOffset = SubtrahendI->second.getOffset();
+ } else {
+ SectionRef SecB = Obj.getAnyRelocationSection(RelInfo);
+ bool IsCode = SecB.isText();
+ Expected<unsigned> SectionBIDOrErr =
+ findOrEmitSection(Obj, SecB, IsCode, ObjSectionToID);
+ if (!SectionBIDOrErr)
+ return SectionBIDOrErr.takeError();
+ SectionBID = *SectionBIDOrErr;
+ Addend += SecB.getAddress();
+ }
+
+ ++RelI;
+
+ unsigned SectionAID = ~0U;
+ uint64_t SectionAOffset = 0;
+
+ RelInfo = Obj.getRelocation(RelI->getRawDataRefImpl());
+
+ bool BIsExternal = BaseObj.getPlainRelocationExternal(RelInfo);
+ if (BIsExternal) {
+ Expected<StringRef> MinuendNameOrErr = RelI->getSymbol()->getName();
+ if (!MinuendNameOrErr)
+ return MinuendNameOrErr.takeError();
+ auto MinuendI = GlobalSymbolTable.find(*MinuendNameOrErr);
+ SectionAID = MinuendI->second.getSectionID();
+ SectionAOffset = MinuendI->second.getOffset();
+ } else {
+ SectionRef SecA = Obj.getAnyRelocationSection(RelInfo);
+ bool IsCode = SecA.isText();
+ Expected<unsigned> SectionAIDOrErr =
+ findOrEmitSection(Obj, SecA, IsCode, ObjSectionToID);
+ if (!SectionAIDOrErr)
+ return SectionAIDOrErr.takeError();
+ SectionAID = *SectionAIDOrErr;
+ Addend -= SecA.getAddress();
+ }
+
+ RelocationEntry R(SectionID, Offset, MachO::X86_64_RELOC_SUBTRACTOR, (uint64_t)Addend,
+ SectionAID, SectionAOffset, SectionBID, SectionBOffset,
+ false, Size);
+
+ addRelocationForSection(R, SectionAID);
+
+ return ++RelI;
+ }
+
+};
+}
+
+#undef DEBUG_TYPE
+
+#endif