aboutsummaryrefslogtreecommitdiff
path: root/contrib/llvm-project/llvm/lib/ExecutionEngine
diff options
context:
space:
mode:
Diffstat (limited to 'contrib/llvm-project/llvm/lib/ExecutionEngine')
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/ExecutionEngine.cpp1332
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/ExecutionEngineBindings.cpp448
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/GDBRegistrationListener.cpp252
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/IntelJITEvents/IntelJITEventListener.cpp401
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/IntelJITProfiling/IntelJITEventsWrapper.h110
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/IntelJITProfiling/ittnotify_config.h453
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/IntelJITProfiling/ittnotify_types.h69
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/IntelJITProfiling/jitprofiling.c480
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/IntelJITProfiling/jitprofiling.h258
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/Interpreter/Execution.cpp2086
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/Interpreter/ExternalFunctions.cpp533
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/Interpreter/Interpreter.cpp102
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/Interpreter/Interpreter.h233
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/COFF.cpp136
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/COFFDirectiveParser.cpp78
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/COFFDirectiveParser.h48
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/COFFLinkGraphBuilder.cpp631
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/COFFLinkGraphBuilder.h220
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/COFFOptions.td21
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/COFF_x86_64.cpp353
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/DWARFRecordSectionSplitter.cpp117
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/DefineExternalSectionStartAndEndSymbols.h158
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/EHFrameSupport.cpp717
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/EHFrameSupportImpl.h131
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/ELF.cpp154
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/ELFLinkGraphBuilder.cpp33
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/ELFLinkGraphBuilder.h698
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/ELF_aarch32.cpp334
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/ELF_aarch64.cpp644
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/ELF_i386.cpp265
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/ELF_loongarch.cpp213
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/ELF_ppc64.cpp543
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/ELF_riscv.cpp997
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/ELF_x86_64.cpp386
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/JITLink.cpp521
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/JITLinkGeneric.cpp354
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/JITLinkGeneric.h193
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/JITLinkMemoryManager.cpp494
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/MachO.cpp90
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/MachOLinkGraphBuilder.cpp860
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/MachOLinkGraphBuilder.h253
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/MachO_arm64.cpp625
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/MachO_x86_64.cpp550
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/PerGraphGOTAndPLTStubsBuilder.h126
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/SEHFrameSupport.h62
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/aarch32.cpp993
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/aarch64.cpp81
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/i386.cpp91
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/loongarch.cpp60
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/ppc64.cpp144
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/riscv.cpp92
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/x86_64.cpp197
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/MCJIT/MCJIT.cpp684
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/MCJIT/MCJIT.h335
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/OProfileJIT/OProfileJITEventListener.cpp188
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/OProfileJIT/OProfileWrapper.cpp267
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/COFFPlatform.cpp912
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/COFFVCRuntimeSupport.cpp184
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/CompileOnDemandLayer.cpp382
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/CompileUtils.cpp96
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/Core.cpp3777
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/DebugObjectManagerPlugin.cpp522
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/DebugUtils.cpp362
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/Debugging/DebugInfoSupport.cpp120
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/Debugging/DebuggerSupport.cpp61
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/Debugging/DebuggerSupportPlugin.cpp423
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/Debugging/LLJITUtilsCBindings.cpp22
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/Debugging/PerfSupportPlugin.cpp303
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/Debugging/VTuneSupportPlugin.cpp185
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/ELFNixPlatform.cpp843
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/EPCDebugObjectRegistrar.cpp59
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/EPCDynamicLibrarySearchGenerator.cpp95
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/EPCEHFrameRegistrar.cpp49
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/EPCGenericDylibManager.cpp121
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/EPCGenericJITLinkMemoryManager.cpp173
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/EPCGenericRTDyldMemoryManager.cpp315
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/EPCIndirectionUtils.cpp427
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/ExecutionUtils.cpp614
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/ExecutorProcessControl.cpp221
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/IRCompileLayer.cpp48
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/IRTransformLayer.cpp33
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/IndirectionUtils.cpp423
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/JITTargetMachineBuilder.cpp147
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/LLJIT.cpp1288
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/Layer.cpp229
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/LazyReexports.cpp243
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/LookupAndRecordAddrs.cpp82
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/MachOPlatform.cpp1834
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/Mangling.cpp84
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/MapperJITLinkMemoryManager.cpp189
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/MemoryMapper.cpp466
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/ObjectFileInterface.cpp292
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/ObjectLinkingLayer.cpp888
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/ObjectTransformLayer.cpp44
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/OrcABISupport.cpp1242
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/OrcV2CBindings.cpp1181
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/RTDyldObjectLinkingLayer.cpp443
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/SectCreate.cpp52
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/Shared/AllocationActions.cpp44
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/Shared/ObjectFormats.cpp113
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/Shared/OrcError.cpp122
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/Shared/OrcRTBridge.cpp66
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/Shared/SimpleRemoteEPCUtils.cpp250
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/SimpleRemoteEPC.cpp446
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/SpeculateAnalyses.cpp305
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/Speculation.cpp142
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/TargetProcess/ExecutorSharedMemoryMapperService.cpp371
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/TargetProcess/JITLoaderGDB.cpp109
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/TargetProcess/JITLoaderPerf.cpp457
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/TargetProcess/JITLoaderVTune.cpp224
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/TargetProcess/OrcRTBootstrap.cpp108
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/TargetProcess/OrcRTBootstrap.h36
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/TargetProcess/RegisterEHFrames.cpp183
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/TargetProcess/SimpleExecutorDylibManager.cpp124
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/TargetProcess/SimpleExecutorMemoryManager.cpp262
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/TargetProcess/SimpleRemoteEPCServer.cpp299
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/TargetProcess/TargetExecutionUtils.cpp47
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/TaskDispatch.cpp85
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/ThreadSafeModule.cpp64
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/PerfJITEvents/PerfJITEventListener.cpp505
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/JITSymbol.cpp169
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/RTDyldMemoryManager.cpp295
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyld.cpp1477
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldCOFF.cpp122
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldCOFF.h61
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldChecker.cpp1061
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldCheckerImpl.h85
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.cpp2572
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.h236
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldImpl.h594
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.cpp382
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.h167
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldCOFFAArch64.h377
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldCOFFI386.h228
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldCOFFThumb.h348
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldCOFFX86_64.h322
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldELFMips.cpp320
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldELFMips.h66
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOAArch64.h541
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOARM.h431
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOI386.h250
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOX86_64.h238
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/SectionMemoryManager.cpp276
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/TargetSelect.cpp95
144 files changed, 55443 insertions, 0 deletions
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/ExecutionEngine.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/ExecutionEngine.cpp
new file mode 100644
index 000000000000..8297d15b1580
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/ExecutionEngine.cpp
@@ -0,0 +1,1332 @@
+//===-- ExecutionEngine.cpp - Common Implementation shared by EEs ---------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the common interface used by the various execution engine
+// subclasses.
+//
+// FIXME: This file needs to be updated to support scalable vectors
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/ExecutionEngine.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/ExecutionEngine/GenericValue.h"
+#include "llvm/ExecutionEngine/JITEventListener.h"
+#include "llvm/ExecutionEngine/ObjectCache.h"
+#include "llvm/ExecutionEngine/RTDyldMemoryManager.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/Mangler.h"
+#include "llvm/IR/Module.h"
+#include "llvm/IR/Operator.h"
+#include "llvm/IR/ValueHandle.h"
+#include "llvm/MC/TargetRegistry.h"
+#include "llvm/Object/Archive.h"
+#include "llvm/Object/ObjectFile.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/DynamicLibrary.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Target/TargetMachine.h"
+#include "llvm/TargetParser/Host.h"
+#include <cmath>
+#include <cstring>
+#include <mutex>
+using namespace llvm;
+
+#define DEBUG_TYPE "jit"
+
+STATISTIC(NumInitBytes, "Number of bytes of global vars initialized");
+STATISTIC(NumGlobals , "Number of global vars initialized");
+
+ExecutionEngine *(*ExecutionEngine::MCJITCtor)(
+ std::unique_ptr<Module> M, std::string *ErrorStr,
+ std::shared_ptr<MCJITMemoryManager> MemMgr,
+ std::shared_ptr<LegacyJITSymbolResolver> Resolver,
+ std::unique_ptr<TargetMachine> TM) = nullptr;
+
+ExecutionEngine *(*ExecutionEngine::InterpCtor)(std::unique_ptr<Module> M,
+ std::string *ErrorStr) =nullptr;
+
+void JITEventListener::anchor() {}
+
+void ObjectCache::anchor() {}
+
+void ExecutionEngine::Init(std::unique_ptr<Module> M) {
+ CompilingLazily = false;
+ GVCompilationDisabled = false;
+ SymbolSearchingDisabled = false;
+
+ // IR module verification is enabled by default in debug builds, and disabled
+ // by default in release builds.
+#ifndef NDEBUG
+ VerifyModules = true;
+#else
+ VerifyModules = false;
+#endif
+
+ assert(M && "Module is null?");
+ Modules.push_back(std::move(M));
+}
+
+ExecutionEngine::ExecutionEngine(std::unique_ptr<Module> M)
+ : DL(M->getDataLayout()), LazyFunctionCreator(nullptr) {
+ Init(std::move(M));
+}
+
+ExecutionEngine::ExecutionEngine(DataLayout DL, std::unique_ptr<Module> M)
+ : DL(std::move(DL)), LazyFunctionCreator(nullptr) {
+ Init(std::move(M));
+}
+
+ExecutionEngine::~ExecutionEngine() {
+ clearAllGlobalMappings();
+}
+
+namespace {
+/// Helper class which uses a value handler to automatically deletes the
+/// memory block when the GlobalVariable is destroyed.
+class GVMemoryBlock final : public CallbackVH {
+ GVMemoryBlock(const GlobalVariable *GV)
+ : CallbackVH(const_cast<GlobalVariable*>(GV)) {}
+
+public:
+ /// Returns the address the GlobalVariable should be written into. The
+ /// GVMemoryBlock object prefixes that.
+ static char *Create(const GlobalVariable *GV, const DataLayout& TD) {
+ Type *ElTy = GV->getValueType();
+ size_t GVSize = (size_t)TD.getTypeAllocSize(ElTy);
+ void *RawMemory = ::operator new(
+ alignTo(sizeof(GVMemoryBlock), TD.getPreferredAlign(GV)) + GVSize);
+ new(RawMemory) GVMemoryBlock(GV);
+ return static_cast<char*>(RawMemory) + sizeof(GVMemoryBlock);
+ }
+
+ void deleted() override {
+ // We allocated with operator new and with some extra memory hanging off the
+ // end, so don't just delete this. I'm not sure if this is actually
+ // required.
+ this->~GVMemoryBlock();
+ ::operator delete(this);
+ }
+};
+} // anonymous namespace
+
+char *ExecutionEngine::getMemoryForGV(const GlobalVariable *GV) {
+ return GVMemoryBlock::Create(GV, getDataLayout());
+}
+
+void ExecutionEngine::addObjectFile(std::unique_ptr<object::ObjectFile> O) {
+ llvm_unreachable("ExecutionEngine subclass doesn't implement addObjectFile.");
+}
+
+void
+ExecutionEngine::addObjectFile(object::OwningBinary<object::ObjectFile> O) {
+ llvm_unreachable("ExecutionEngine subclass doesn't implement addObjectFile.");
+}
+
+void ExecutionEngine::addArchive(object::OwningBinary<object::Archive> A) {
+ llvm_unreachable("ExecutionEngine subclass doesn't implement addArchive.");
+}
+
+bool ExecutionEngine::removeModule(Module *M) {
+ for (auto I = Modules.begin(), E = Modules.end(); I != E; ++I) {
+ Module *Found = I->get();
+ if (Found == M) {
+ I->release();
+ Modules.erase(I);
+ clearGlobalMappingsFromModule(M);
+ return true;
+ }
+ }
+ return false;
+}
+
+Function *ExecutionEngine::FindFunctionNamed(StringRef FnName) {
+ for (const auto &M : Modules) {
+ Function *F = M->getFunction(FnName);
+ if (F && !F->isDeclaration())
+ return F;
+ }
+ return nullptr;
+}
+
+GlobalVariable *ExecutionEngine::FindGlobalVariableNamed(StringRef Name, bool AllowInternal) {
+ for (const auto &M : Modules) {
+ GlobalVariable *GV = M->getGlobalVariable(Name, AllowInternal);
+ if (GV && !GV->isDeclaration())
+ return GV;
+ }
+ return nullptr;
+}
+
+uint64_t ExecutionEngineState::RemoveMapping(StringRef Name) {
+ GlobalAddressMapTy::iterator I = GlobalAddressMap.find(Name);
+ uint64_t OldVal;
+
+ // FIXME: This is silly, we shouldn't end up with a mapping -> 0 in the
+ // GlobalAddressMap.
+ if (I == GlobalAddressMap.end())
+ OldVal = 0;
+ else {
+ GlobalAddressReverseMap.erase(I->second);
+ OldVal = I->second;
+ GlobalAddressMap.erase(I);
+ }
+
+ return OldVal;
+}
+
+std::string ExecutionEngine::getMangledName(const GlobalValue *GV) {
+ assert(GV->hasName() && "Global must have name.");
+
+ std::lock_guard<sys::Mutex> locked(lock);
+ SmallString<128> FullName;
+
+ const DataLayout &DL =
+ GV->getDataLayout().isDefault()
+ ? getDataLayout()
+ : GV->getDataLayout();
+
+ Mangler::getNameWithPrefix(FullName, GV->getName(), DL);
+ return std::string(FullName);
+}
+
+void ExecutionEngine::addGlobalMapping(const GlobalValue *GV, void *Addr) {
+ std::lock_guard<sys::Mutex> locked(lock);
+ addGlobalMapping(getMangledName(GV), (uint64_t) Addr);
+}
+
+void ExecutionEngine::addGlobalMapping(StringRef Name, uint64_t Addr) {
+ std::lock_guard<sys::Mutex> locked(lock);
+
+ assert(!Name.empty() && "Empty GlobalMapping symbol name!");
+
+ LLVM_DEBUG(dbgs() << "JIT: Map \'" << Name << "\' to [" << Addr << "]\n";);
+ uint64_t &CurVal = EEState.getGlobalAddressMap()[Name];
+ assert((!CurVal || !Addr) && "GlobalMapping already established!");
+ CurVal = Addr;
+
+ // If we are using the reverse mapping, add it too.
+ if (!EEState.getGlobalAddressReverseMap().empty()) {
+ std::string &V = EEState.getGlobalAddressReverseMap()[CurVal];
+ assert((!V.empty() || !Name.empty()) &&
+ "GlobalMapping already established!");
+ V = std::string(Name);
+ }
+}
+
+void ExecutionEngine::clearAllGlobalMappings() {
+ std::lock_guard<sys::Mutex> locked(lock);
+
+ EEState.getGlobalAddressMap().clear();
+ EEState.getGlobalAddressReverseMap().clear();
+}
+
+void ExecutionEngine::clearGlobalMappingsFromModule(Module *M) {
+ std::lock_guard<sys::Mutex> locked(lock);
+
+ for (GlobalObject &GO : M->global_objects())
+ EEState.RemoveMapping(getMangledName(&GO));
+}
+
+uint64_t ExecutionEngine::updateGlobalMapping(const GlobalValue *GV,
+ void *Addr) {
+ std::lock_guard<sys::Mutex> locked(lock);
+ return updateGlobalMapping(getMangledName(GV), (uint64_t) Addr);
+}
+
+uint64_t ExecutionEngine::updateGlobalMapping(StringRef Name, uint64_t Addr) {
+ std::lock_guard<sys::Mutex> locked(lock);
+
+ ExecutionEngineState::GlobalAddressMapTy &Map =
+ EEState.getGlobalAddressMap();
+
+ // Deleting from the mapping?
+ if (!Addr)
+ return EEState.RemoveMapping(Name);
+
+ uint64_t &CurVal = Map[Name];
+ uint64_t OldVal = CurVal;
+
+ if (CurVal && !EEState.getGlobalAddressReverseMap().empty())
+ EEState.getGlobalAddressReverseMap().erase(CurVal);
+ CurVal = Addr;
+
+ // If we are using the reverse mapping, add it too.
+ if (!EEState.getGlobalAddressReverseMap().empty()) {
+ std::string &V = EEState.getGlobalAddressReverseMap()[CurVal];
+ assert((!V.empty() || !Name.empty()) &&
+ "GlobalMapping already established!");
+ V = std::string(Name);
+ }
+ return OldVal;
+}
+
+uint64_t ExecutionEngine::getAddressToGlobalIfAvailable(StringRef S) {
+ std::lock_guard<sys::Mutex> locked(lock);
+ uint64_t Address = 0;
+ ExecutionEngineState::GlobalAddressMapTy::iterator I =
+ EEState.getGlobalAddressMap().find(S);
+ if (I != EEState.getGlobalAddressMap().end())
+ Address = I->second;
+ return Address;
+}
+
+
+void *ExecutionEngine::getPointerToGlobalIfAvailable(StringRef S) {
+ std::lock_guard<sys::Mutex> locked(lock);
+ if (void* Address = (void *) getAddressToGlobalIfAvailable(S))
+ return Address;
+ return nullptr;
+}
+
+void *ExecutionEngine::getPointerToGlobalIfAvailable(const GlobalValue *GV) {
+ std::lock_guard<sys::Mutex> locked(lock);
+ return getPointerToGlobalIfAvailable(getMangledName(GV));
+}
+
+const GlobalValue *ExecutionEngine::getGlobalValueAtAddress(void *Addr) {
+ std::lock_guard<sys::Mutex> locked(lock);
+
+ // If we haven't computed the reverse mapping yet, do so first.
+ if (EEState.getGlobalAddressReverseMap().empty()) {
+ for (ExecutionEngineState::GlobalAddressMapTy::iterator
+ I = EEState.getGlobalAddressMap().begin(),
+ E = EEState.getGlobalAddressMap().end(); I != E; ++I) {
+ StringRef Name = I->first();
+ uint64_t Addr = I->second;
+ EEState.getGlobalAddressReverseMap().insert(
+ std::make_pair(Addr, std::string(Name)));
+ }
+ }
+
+ std::map<uint64_t, std::string>::iterator I =
+ EEState.getGlobalAddressReverseMap().find((uint64_t) Addr);
+
+ if (I != EEState.getGlobalAddressReverseMap().end()) {
+ StringRef Name = I->second;
+ for (const auto &M : Modules)
+ if (GlobalValue *GV = M->getNamedValue(Name))
+ return GV;
+ }
+ return nullptr;
+}
+
+namespace {
+class ArgvArray {
+ std::unique_ptr<char[]> Array;
+ std::vector<std::unique_ptr<char[]>> Values;
+public:
+ /// Turn a vector of strings into a nice argv style array of pointers to null
+ /// terminated strings.
+ void *reset(LLVMContext &C, ExecutionEngine *EE,
+ const std::vector<std::string> &InputArgv);
+};
+} // anonymous namespace
+void *ArgvArray::reset(LLVMContext &C, ExecutionEngine *EE,
+ const std::vector<std::string> &InputArgv) {
+ Values.clear(); // Free the old contents.
+ Values.reserve(InputArgv.size());
+ unsigned PtrSize = EE->getDataLayout().getPointerSize();
+ Array = std::make_unique<char[]>((InputArgv.size()+1)*PtrSize);
+
+ LLVM_DEBUG(dbgs() << "JIT: ARGV = " << (void *)Array.get() << "\n");
+ Type *SBytePtr = PointerType::getUnqual(C);
+
+ for (unsigned i = 0; i != InputArgv.size(); ++i) {
+ unsigned Size = InputArgv[i].size()+1;
+ auto Dest = std::make_unique<char[]>(Size);
+ LLVM_DEBUG(dbgs() << "JIT: ARGV[" << i << "] = " << (void *)Dest.get()
+ << "\n");
+
+ std::copy(InputArgv[i].begin(), InputArgv[i].end(), Dest.get());
+ Dest[Size-1] = 0;
+
+ // Endian safe: Array[i] = (PointerTy)Dest;
+ EE->StoreValueToMemory(PTOGV(Dest.get()),
+ (GenericValue*)(&Array[i*PtrSize]), SBytePtr);
+ Values.push_back(std::move(Dest));
+ }
+
+ // Null terminate it
+ EE->StoreValueToMemory(PTOGV(nullptr),
+ (GenericValue*)(&Array[InputArgv.size()*PtrSize]),
+ SBytePtr);
+ return Array.get();
+}
+
+void ExecutionEngine::runStaticConstructorsDestructors(Module &module,
+ bool isDtors) {
+ StringRef Name(isDtors ? "llvm.global_dtors" : "llvm.global_ctors");
+ GlobalVariable *GV = module.getNamedGlobal(Name);
+
+ // If this global has internal linkage, or if it has a use, then it must be
+ // an old-style (llvmgcc3) static ctor with __main linked in and in use. If
+ // this is the case, don't execute any of the global ctors, __main will do
+ // it.
+ if (!GV || GV->isDeclaration() || GV->hasLocalLinkage()) return;
+
+ // Should be an array of '{ i32, void ()* }' structs. The first value is
+ // the init priority, which we ignore.
+ ConstantArray *InitList = dyn_cast<ConstantArray>(GV->getInitializer());
+ if (!InitList)
+ return;
+ for (unsigned i = 0, e = InitList->getNumOperands(); i != e; ++i) {
+ ConstantStruct *CS = dyn_cast<ConstantStruct>(InitList->getOperand(i));
+ if (!CS) continue;
+
+ Constant *FP = CS->getOperand(1);
+ if (FP->isNullValue())
+ continue; // Found a sentinel value, ignore.
+
+ // Strip off constant expression casts.
+ if (ConstantExpr *CE = dyn_cast<ConstantExpr>(FP))
+ if (CE->isCast())
+ FP = CE->getOperand(0);
+
+ // Execute the ctor/dtor function!
+ if (Function *F = dyn_cast<Function>(FP))
+ runFunction(F, std::nullopt);
+
+ // FIXME: It is marginally lame that we just do nothing here if we see an
+ // entry we don't recognize. It might not be unreasonable for the verifier
+ // to not even allow this and just assert here.
+ }
+}
+
+void ExecutionEngine::runStaticConstructorsDestructors(bool isDtors) {
+ // Execute global ctors/dtors for each module in the program.
+ for (std::unique_ptr<Module> &M : Modules)
+ runStaticConstructorsDestructors(*M, isDtors);
+}
+
+#ifndef NDEBUG
+/// isTargetNullPtr - Return whether the target pointer stored at Loc is null.
+static bool isTargetNullPtr(ExecutionEngine *EE, void *Loc) {
+ unsigned PtrSize = EE->getDataLayout().getPointerSize();
+ for (unsigned i = 0; i < PtrSize; ++i)
+ if (*(i + (uint8_t*)Loc))
+ return false;
+ return true;
+}
+#endif
+
+int ExecutionEngine::runFunctionAsMain(Function *Fn,
+ const std::vector<std::string> &argv,
+ const char * const * envp) {
+ std::vector<GenericValue> GVArgs;
+ GenericValue GVArgc;
+ GVArgc.IntVal = APInt(32, argv.size());
+
+ // Check main() type
+ unsigned NumArgs = Fn->getFunctionType()->getNumParams();
+ FunctionType *FTy = Fn->getFunctionType();
+ Type *PPInt8Ty = PointerType::get(Fn->getContext(), 0);
+
+ // Check the argument types.
+ if (NumArgs > 3)
+ report_fatal_error("Invalid number of arguments of main() supplied");
+ if (NumArgs >= 3 && FTy->getParamType(2) != PPInt8Ty)
+ report_fatal_error("Invalid type for third argument of main() supplied");
+ if (NumArgs >= 2 && FTy->getParamType(1) != PPInt8Ty)
+ report_fatal_error("Invalid type for second argument of main() supplied");
+ if (NumArgs >= 1 && !FTy->getParamType(0)->isIntegerTy(32))
+ report_fatal_error("Invalid type for first argument of main() supplied");
+ if (!FTy->getReturnType()->isIntegerTy() &&
+ !FTy->getReturnType()->isVoidTy())
+ report_fatal_error("Invalid return type of main() supplied");
+
+ ArgvArray CArgv;
+ ArgvArray CEnv;
+ if (NumArgs) {
+ GVArgs.push_back(GVArgc); // Arg #0 = argc.
+ if (NumArgs > 1) {
+ // Arg #1 = argv.
+ GVArgs.push_back(PTOGV(CArgv.reset(Fn->getContext(), this, argv)));
+ assert(!isTargetNullPtr(this, GVTOP(GVArgs[1])) &&
+ "argv[0] was null after CreateArgv");
+ if (NumArgs > 2) {
+ std::vector<std::string> EnvVars;
+ for (unsigned i = 0; envp[i]; ++i)
+ EnvVars.emplace_back(envp[i]);
+ // Arg #2 = envp.
+ GVArgs.push_back(PTOGV(CEnv.reset(Fn->getContext(), this, EnvVars)));
+ }
+ }
+ }
+
+ return runFunction(Fn, GVArgs).IntVal.getZExtValue();
+}
+
+EngineBuilder::EngineBuilder() : EngineBuilder(nullptr) {}
+
+EngineBuilder::EngineBuilder(std::unique_ptr<Module> M)
+ : M(std::move(M)), WhichEngine(EngineKind::Either), ErrorStr(nullptr),
+ OptLevel(CodeGenOptLevel::Default), MemMgr(nullptr), Resolver(nullptr) {
+// IR module verification is enabled by default in debug builds, and disabled
+// by default in release builds.
+#ifndef NDEBUG
+ VerifyModules = true;
+#else
+ VerifyModules = false;
+#endif
+}
+
+EngineBuilder::~EngineBuilder() = default;
+
+EngineBuilder &EngineBuilder::setMCJITMemoryManager(
+ std::unique_ptr<RTDyldMemoryManager> mcjmm) {
+ auto SharedMM = std::shared_ptr<RTDyldMemoryManager>(std::move(mcjmm));
+ MemMgr = SharedMM;
+ Resolver = SharedMM;
+ return *this;
+}
+
+EngineBuilder&
+EngineBuilder::setMemoryManager(std::unique_ptr<MCJITMemoryManager> MM) {
+ MemMgr = std::shared_ptr<MCJITMemoryManager>(std::move(MM));
+ return *this;
+}
+
+EngineBuilder &
+EngineBuilder::setSymbolResolver(std::unique_ptr<LegacyJITSymbolResolver> SR) {
+ Resolver = std::shared_ptr<LegacyJITSymbolResolver>(std::move(SR));
+ return *this;
+}
+
+ExecutionEngine *EngineBuilder::create(TargetMachine *TM) {
+ std::unique_ptr<TargetMachine> TheTM(TM); // Take ownership.
+
+ // Make sure we can resolve symbols in the program as well. The zero arg
+ // to the function tells DynamicLibrary to load the program, not a library.
+ if (sys::DynamicLibrary::LoadLibraryPermanently(nullptr, ErrorStr))
+ return nullptr;
+
+ // If the user specified a memory manager but didn't specify which engine to
+ // create, we assume they only want the JIT, and we fail if they only want
+ // the interpreter.
+ if (MemMgr) {
+ if (WhichEngine & EngineKind::JIT)
+ WhichEngine = EngineKind::JIT;
+ else {
+ if (ErrorStr)
+ *ErrorStr = "Cannot create an interpreter with a memory manager.";
+ return nullptr;
+ }
+ }
+
+ // Unless the interpreter was explicitly selected or the JIT is not linked,
+ // try making a JIT.
+ if ((WhichEngine & EngineKind::JIT) && TheTM) {
+ if (!TM->getTarget().hasJIT()) {
+ errs() << "WARNING: This target JIT is not designed for the host"
+ << " you are running. If bad things happen, please choose"
+ << " a different -march switch.\n";
+ }
+
+ ExecutionEngine *EE = nullptr;
+ if (ExecutionEngine::MCJITCtor)
+ EE = ExecutionEngine::MCJITCtor(std::move(M), ErrorStr, std::move(MemMgr),
+ std::move(Resolver), std::move(TheTM));
+
+ if (EE) {
+ EE->setVerifyModules(VerifyModules);
+ return EE;
+ }
+ }
+
+ // If we can't make a JIT and we didn't request one specifically, try making
+ // an interpreter instead.
+ if (WhichEngine & EngineKind::Interpreter) {
+ if (ExecutionEngine::InterpCtor)
+ return ExecutionEngine::InterpCtor(std::move(M), ErrorStr);
+ if (ErrorStr)
+ *ErrorStr = "Interpreter has not been linked in.";
+ return nullptr;
+ }
+
+ if ((WhichEngine & EngineKind::JIT) && !ExecutionEngine::MCJITCtor) {
+ if (ErrorStr)
+ *ErrorStr = "JIT has not been linked in.";
+ }
+
+ return nullptr;
+}
+
+void *ExecutionEngine::getPointerToGlobal(const GlobalValue *GV) {
+ if (Function *F = const_cast<Function*>(dyn_cast<Function>(GV)))
+ return getPointerToFunction(F);
+
+ std::lock_guard<sys::Mutex> locked(lock);
+ if (void* P = getPointerToGlobalIfAvailable(GV))
+ return P;
+
+ // Global variable might have been added since interpreter started.
+ if (GlobalVariable *GVar =
+ const_cast<GlobalVariable *>(dyn_cast<GlobalVariable>(GV)))
+ emitGlobalVariable(GVar);
+ else
+ llvm_unreachable("Global hasn't had an address allocated yet!");
+
+ return getPointerToGlobalIfAvailable(GV);
+}
+
+/// Converts a Constant* into a GenericValue, including handling of
+/// ConstantExpr values.
+GenericValue ExecutionEngine::getConstantValue(const Constant *C) {
+ // If its undefined, return the garbage.
+ if (isa<UndefValue>(C)) {
+ GenericValue Result;
+ switch (C->getType()->getTypeID()) {
+ default:
+ break;
+ case Type::IntegerTyID:
+ case Type::X86_FP80TyID:
+ case Type::FP128TyID:
+ case Type::PPC_FP128TyID:
+ // Although the value is undefined, we still have to construct an APInt
+ // with the correct bit width.
+ Result.IntVal = APInt(C->getType()->getPrimitiveSizeInBits(), 0);
+ break;
+ case Type::StructTyID: {
+ // if the whole struct is 'undef' just reserve memory for the value.
+ if(StructType *STy = dyn_cast<StructType>(C->getType())) {
+ unsigned int elemNum = STy->getNumElements();
+ Result.AggregateVal.resize(elemNum);
+ for (unsigned int i = 0; i < elemNum; ++i) {
+ Type *ElemTy = STy->getElementType(i);
+ if (ElemTy->isIntegerTy())
+ Result.AggregateVal[i].IntVal =
+ APInt(ElemTy->getPrimitiveSizeInBits(), 0);
+ else if (ElemTy->isAggregateType()) {
+ const Constant *ElemUndef = UndefValue::get(ElemTy);
+ Result.AggregateVal[i] = getConstantValue(ElemUndef);
+ }
+ }
+ }
+ }
+ break;
+ case Type::ScalableVectorTyID:
+ report_fatal_error(
+ "Scalable vector support not yet implemented in ExecutionEngine");
+ case Type::ArrayTyID: {
+ auto *ArrTy = cast<ArrayType>(C->getType());
+ Type *ElemTy = ArrTy->getElementType();
+ unsigned int elemNum = ArrTy->getNumElements();
+ Result.AggregateVal.resize(elemNum);
+ if (ElemTy->isIntegerTy())
+ for (unsigned int i = 0; i < elemNum; ++i)
+ Result.AggregateVal[i].IntVal =
+ APInt(ElemTy->getPrimitiveSizeInBits(), 0);
+ break;
+ }
+ case Type::FixedVectorTyID: {
+ // if the whole vector is 'undef' just reserve memory for the value.
+ auto *VTy = cast<FixedVectorType>(C->getType());
+ Type *ElemTy = VTy->getElementType();
+ unsigned int elemNum = VTy->getNumElements();
+ Result.AggregateVal.resize(elemNum);
+ if (ElemTy->isIntegerTy())
+ for (unsigned int i = 0; i < elemNum; ++i)
+ Result.AggregateVal[i].IntVal =
+ APInt(ElemTy->getPrimitiveSizeInBits(), 0);
+ break;
+ }
+ }
+ return Result;
+ }
+
+ // Otherwise, if the value is a ConstantExpr...
+ if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
+ Constant *Op0 = CE->getOperand(0);
+ switch (CE->getOpcode()) {
+ case Instruction::GetElementPtr: {
+ // Compute the index
+ GenericValue Result = getConstantValue(Op0);
+ APInt Offset(DL.getPointerSizeInBits(), 0);
+ cast<GEPOperator>(CE)->accumulateConstantOffset(DL, Offset);
+
+ char* tmp = (char*) Result.PointerVal;
+ Result = PTOGV(tmp + Offset.getSExtValue());
+ return Result;
+ }
+ case Instruction::Trunc: {
+ GenericValue GV = getConstantValue(Op0);
+ uint32_t BitWidth = cast<IntegerType>(CE->getType())->getBitWidth();
+ GV.IntVal = GV.IntVal.trunc(BitWidth);
+ return GV;
+ }
+ case Instruction::ZExt: {
+ GenericValue GV = getConstantValue(Op0);
+ uint32_t BitWidth = cast<IntegerType>(CE->getType())->getBitWidth();
+ GV.IntVal = GV.IntVal.zext(BitWidth);
+ return GV;
+ }
+ case Instruction::SExt: {
+ GenericValue GV = getConstantValue(Op0);
+ uint32_t BitWidth = cast<IntegerType>(CE->getType())->getBitWidth();
+ GV.IntVal = GV.IntVal.sext(BitWidth);
+ return GV;
+ }
+ case Instruction::FPTrunc: {
+ // FIXME long double
+ GenericValue GV = getConstantValue(Op0);
+ GV.FloatVal = float(GV.DoubleVal);
+ return GV;
+ }
+ case Instruction::FPExt:{
+ // FIXME long double
+ GenericValue GV = getConstantValue(Op0);
+ GV.DoubleVal = double(GV.FloatVal);
+ return GV;
+ }
+ case Instruction::UIToFP: {
+ GenericValue GV = getConstantValue(Op0);
+ if (CE->getType()->isFloatTy())
+ GV.FloatVal = float(GV.IntVal.roundToDouble());
+ else if (CE->getType()->isDoubleTy())
+ GV.DoubleVal = GV.IntVal.roundToDouble();
+ else if (CE->getType()->isX86_FP80Ty()) {
+ APFloat apf = APFloat::getZero(APFloat::x87DoubleExtended());
+ (void)apf.convertFromAPInt(GV.IntVal,
+ false,
+ APFloat::rmNearestTiesToEven);
+ GV.IntVal = apf.bitcastToAPInt();
+ }
+ return GV;
+ }
+ case Instruction::SIToFP: {
+ GenericValue GV = getConstantValue(Op0);
+ if (CE->getType()->isFloatTy())
+ GV.FloatVal = float(GV.IntVal.signedRoundToDouble());
+ else if (CE->getType()->isDoubleTy())
+ GV.DoubleVal = GV.IntVal.signedRoundToDouble();
+ else if (CE->getType()->isX86_FP80Ty()) {
+ APFloat apf = APFloat::getZero(APFloat::x87DoubleExtended());
+ (void)apf.convertFromAPInt(GV.IntVal,
+ true,
+ APFloat::rmNearestTiesToEven);
+ GV.IntVal = apf.bitcastToAPInt();
+ }
+ return GV;
+ }
+ case Instruction::FPToUI: // double->APInt conversion handles sign
+ case Instruction::FPToSI: {
+ GenericValue GV = getConstantValue(Op0);
+ uint32_t BitWidth = cast<IntegerType>(CE->getType())->getBitWidth();
+ if (Op0->getType()->isFloatTy())
+ GV.IntVal = APIntOps::RoundFloatToAPInt(GV.FloatVal, BitWidth);
+ else if (Op0->getType()->isDoubleTy())
+ GV.IntVal = APIntOps::RoundDoubleToAPInt(GV.DoubleVal, BitWidth);
+ else if (Op0->getType()->isX86_FP80Ty()) {
+ APFloat apf = APFloat(APFloat::x87DoubleExtended(), GV.IntVal);
+ uint64_t v;
+ bool ignored;
+ (void)apf.convertToInteger(MutableArrayRef(v), BitWidth,
+ CE->getOpcode()==Instruction::FPToSI,
+ APFloat::rmTowardZero, &ignored);
+ GV.IntVal = v; // endian?
+ }
+ return GV;
+ }
+ case Instruction::PtrToInt: {
+ GenericValue GV = getConstantValue(Op0);
+ uint32_t PtrWidth = DL.getTypeSizeInBits(Op0->getType());
+ assert(PtrWidth <= 64 && "Bad pointer width");
+ GV.IntVal = APInt(PtrWidth, uintptr_t(GV.PointerVal));
+ uint32_t IntWidth = DL.getTypeSizeInBits(CE->getType());
+ GV.IntVal = GV.IntVal.zextOrTrunc(IntWidth);
+ return GV;
+ }
+ case Instruction::IntToPtr: {
+ GenericValue GV = getConstantValue(Op0);
+ uint32_t PtrWidth = DL.getTypeSizeInBits(CE->getType());
+ GV.IntVal = GV.IntVal.zextOrTrunc(PtrWidth);
+ assert(GV.IntVal.getBitWidth() <= 64 && "Bad pointer width");
+ GV.PointerVal = PointerTy(uintptr_t(GV.IntVal.getZExtValue()));
+ return GV;
+ }
+ case Instruction::BitCast: {
+ GenericValue GV = getConstantValue(Op0);
+ Type* DestTy = CE->getType();
+ switch (Op0->getType()->getTypeID()) {
+ default: llvm_unreachable("Invalid bitcast operand");
+ case Type::IntegerTyID:
+ assert(DestTy->isFloatingPointTy() && "invalid bitcast");
+ if (DestTy->isFloatTy())
+ GV.FloatVal = GV.IntVal.bitsToFloat();
+ else if (DestTy->isDoubleTy())
+ GV.DoubleVal = GV.IntVal.bitsToDouble();
+ break;
+ case Type::FloatTyID:
+ assert(DestTy->isIntegerTy(32) && "Invalid bitcast");
+ GV.IntVal = APInt::floatToBits(GV.FloatVal);
+ break;
+ case Type::DoubleTyID:
+ assert(DestTy->isIntegerTy(64) && "Invalid bitcast");
+ GV.IntVal = APInt::doubleToBits(GV.DoubleVal);
+ break;
+ case Type::PointerTyID:
+ assert(DestTy->isPointerTy() && "Invalid bitcast");
+ break; // getConstantValue(Op0) above already converted it
+ }
+ return GV;
+ }
+ case Instruction::Add:
+ case Instruction::FAdd:
+ case Instruction::Sub:
+ case Instruction::FSub:
+ case Instruction::Mul:
+ case Instruction::FMul:
+ case Instruction::UDiv:
+ case Instruction::SDiv:
+ case Instruction::URem:
+ case Instruction::SRem:
+ case Instruction::And:
+ case Instruction::Or:
+ case Instruction::Xor: {
+ GenericValue LHS = getConstantValue(Op0);
+ GenericValue RHS = getConstantValue(CE->getOperand(1));
+ GenericValue GV;
+ switch (CE->getOperand(0)->getType()->getTypeID()) {
+ default: llvm_unreachable("Bad add type!");
+ case Type::IntegerTyID:
+ switch (CE->getOpcode()) {
+ default: llvm_unreachable("Invalid integer opcode");
+ case Instruction::Add: GV.IntVal = LHS.IntVal + RHS.IntVal; break;
+ case Instruction::Sub: GV.IntVal = LHS.IntVal - RHS.IntVal; break;
+ case Instruction::Mul: GV.IntVal = LHS.IntVal * RHS.IntVal; break;
+ case Instruction::UDiv:GV.IntVal = LHS.IntVal.udiv(RHS.IntVal); break;
+ case Instruction::SDiv:GV.IntVal = LHS.IntVal.sdiv(RHS.IntVal); break;
+ case Instruction::URem:GV.IntVal = LHS.IntVal.urem(RHS.IntVal); break;
+ case Instruction::SRem:GV.IntVal = LHS.IntVal.srem(RHS.IntVal); break;
+ case Instruction::And: GV.IntVal = LHS.IntVal & RHS.IntVal; break;
+ case Instruction::Or: GV.IntVal = LHS.IntVal | RHS.IntVal; break;
+ case Instruction::Xor: GV.IntVal = LHS.IntVal ^ RHS.IntVal; break;
+ }
+ break;
+ case Type::FloatTyID:
+ switch (CE->getOpcode()) {
+ default: llvm_unreachable("Invalid float opcode");
+ case Instruction::FAdd:
+ GV.FloatVal = LHS.FloatVal + RHS.FloatVal; break;
+ case Instruction::FSub:
+ GV.FloatVal = LHS.FloatVal - RHS.FloatVal; break;
+ case Instruction::FMul:
+ GV.FloatVal = LHS.FloatVal * RHS.FloatVal; break;
+ case Instruction::FDiv:
+ GV.FloatVal = LHS.FloatVal / RHS.FloatVal; break;
+ case Instruction::FRem:
+ GV.FloatVal = std::fmod(LHS.FloatVal,RHS.FloatVal); break;
+ }
+ break;
+ case Type::DoubleTyID:
+ switch (CE->getOpcode()) {
+ default: llvm_unreachable("Invalid double opcode");
+ case Instruction::FAdd:
+ GV.DoubleVal = LHS.DoubleVal + RHS.DoubleVal; break;
+ case Instruction::FSub:
+ GV.DoubleVal = LHS.DoubleVal - RHS.DoubleVal; break;
+ case Instruction::FMul:
+ GV.DoubleVal = LHS.DoubleVal * RHS.DoubleVal; break;
+ case Instruction::FDiv:
+ GV.DoubleVal = LHS.DoubleVal / RHS.DoubleVal; break;
+ case Instruction::FRem:
+ GV.DoubleVal = std::fmod(LHS.DoubleVal,RHS.DoubleVal); break;
+ }
+ break;
+ case Type::X86_FP80TyID:
+ case Type::PPC_FP128TyID:
+ case Type::FP128TyID: {
+ const fltSemantics &Sem = CE->getOperand(0)->getType()->getFltSemantics();
+ APFloat apfLHS = APFloat(Sem, LHS.IntVal);
+ switch (CE->getOpcode()) {
+ default: llvm_unreachable("Invalid long double opcode");
+ case Instruction::FAdd:
+ apfLHS.add(APFloat(Sem, RHS.IntVal), APFloat::rmNearestTiesToEven);
+ GV.IntVal = apfLHS.bitcastToAPInt();
+ break;
+ case Instruction::FSub:
+ apfLHS.subtract(APFloat(Sem, RHS.IntVal),
+ APFloat::rmNearestTiesToEven);
+ GV.IntVal = apfLHS.bitcastToAPInt();
+ break;
+ case Instruction::FMul:
+ apfLHS.multiply(APFloat(Sem, RHS.IntVal),
+ APFloat::rmNearestTiesToEven);
+ GV.IntVal = apfLHS.bitcastToAPInt();
+ break;
+ case Instruction::FDiv:
+ apfLHS.divide(APFloat(Sem, RHS.IntVal),
+ APFloat::rmNearestTiesToEven);
+ GV.IntVal = apfLHS.bitcastToAPInt();
+ break;
+ case Instruction::FRem:
+ apfLHS.mod(APFloat(Sem, RHS.IntVal));
+ GV.IntVal = apfLHS.bitcastToAPInt();
+ break;
+ }
+ }
+ break;
+ }
+ return GV;
+ }
+ default:
+ break;
+ }
+
+ SmallString<256> Msg;
+ raw_svector_ostream OS(Msg);
+ OS << "ConstantExpr not handled: " << *CE;
+ report_fatal_error(OS.str());
+ }
+
+ if (auto *TETy = dyn_cast<TargetExtType>(C->getType())) {
+ assert(TETy->hasProperty(TargetExtType::HasZeroInit) && C->isNullValue() &&
+ "TargetExtType only supports null constant value");
+ C = Constant::getNullValue(TETy->getLayoutType());
+ }
+
+ // Otherwise, we have a simple constant.
+ GenericValue Result;
+ switch (C->getType()->getTypeID()) {
+ case Type::FloatTyID:
+ Result.FloatVal = cast<ConstantFP>(C)->getValueAPF().convertToFloat();
+ break;
+ case Type::DoubleTyID:
+ Result.DoubleVal = cast<ConstantFP>(C)->getValueAPF().convertToDouble();
+ break;
+ case Type::X86_FP80TyID:
+ case Type::FP128TyID:
+ case Type::PPC_FP128TyID:
+ Result.IntVal = cast <ConstantFP>(C)->getValueAPF().bitcastToAPInt();
+ break;
+ case Type::IntegerTyID:
+ Result.IntVal = cast<ConstantInt>(C)->getValue();
+ break;
+ case Type::PointerTyID:
+ while (auto *A = dyn_cast<GlobalAlias>(C)) {
+ C = A->getAliasee();
+ }
+ if (isa<ConstantPointerNull>(C))
+ Result.PointerVal = nullptr;
+ else if (const Function *F = dyn_cast<Function>(C))
+ Result = PTOGV(getPointerToFunctionOrStub(const_cast<Function*>(F)));
+ else if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(C))
+ Result = PTOGV(getOrEmitGlobalVariable(const_cast<GlobalVariable*>(GV)));
+ else
+ llvm_unreachable("Unknown constant pointer type!");
+ break;
+ case Type::ScalableVectorTyID:
+ report_fatal_error(
+ "Scalable vector support not yet implemented in ExecutionEngine");
+ case Type::FixedVectorTyID: {
+ unsigned elemNum;
+ Type* ElemTy;
+ const ConstantDataVector *CDV = dyn_cast<ConstantDataVector>(C);
+ const ConstantVector *CV = dyn_cast<ConstantVector>(C);
+ const ConstantAggregateZero *CAZ = dyn_cast<ConstantAggregateZero>(C);
+
+ if (CDV) {
+ elemNum = CDV->getNumElements();
+ ElemTy = CDV->getElementType();
+ } else if (CV || CAZ) {
+ auto *VTy = cast<FixedVectorType>(C->getType());
+ elemNum = VTy->getNumElements();
+ ElemTy = VTy->getElementType();
+ } else {
+ llvm_unreachable("Unknown constant vector type!");
+ }
+
+ Result.AggregateVal.resize(elemNum);
+ // Check if vector holds floats.
+ if(ElemTy->isFloatTy()) {
+ if (CAZ) {
+ GenericValue floatZero;
+ floatZero.FloatVal = 0.f;
+ std::fill(Result.AggregateVal.begin(), Result.AggregateVal.end(),
+ floatZero);
+ break;
+ }
+ if(CV) {
+ for (unsigned i = 0; i < elemNum; ++i)
+ if (!isa<UndefValue>(CV->getOperand(i)))
+ Result.AggregateVal[i].FloatVal = cast<ConstantFP>(
+ CV->getOperand(i))->getValueAPF().convertToFloat();
+ break;
+ }
+ if(CDV)
+ for (unsigned i = 0; i < elemNum; ++i)
+ Result.AggregateVal[i].FloatVal = CDV->getElementAsFloat(i);
+
+ break;
+ }
+ // Check if vector holds doubles.
+ if (ElemTy->isDoubleTy()) {
+ if (CAZ) {
+ GenericValue doubleZero;
+ doubleZero.DoubleVal = 0.0;
+ std::fill(Result.AggregateVal.begin(), Result.AggregateVal.end(),
+ doubleZero);
+ break;
+ }
+ if(CV) {
+ for (unsigned i = 0; i < elemNum; ++i)
+ if (!isa<UndefValue>(CV->getOperand(i)))
+ Result.AggregateVal[i].DoubleVal = cast<ConstantFP>(
+ CV->getOperand(i))->getValueAPF().convertToDouble();
+ break;
+ }
+ if(CDV)
+ for (unsigned i = 0; i < elemNum; ++i)
+ Result.AggregateVal[i].DoubleVal = CDV->getElementAsDouble(i);
+
+ break;
+ }
+ // Check if vector holds integers.
+ if (ElemTy->isIntegerTy()) {
+ if (CAZ) {
+ GenericValue intZero;
+ intZero.IntVal = APInt(ElemTy->getScalarSizeInBits(), 0ull);
+ std::fill(Result.AggregateVal.begin(), Result.AggregateVal.end(),
+ intZero);
+ break;
+ }
+ if(CV) {
+ for (unsigned i = 0; i < elemNum; ++i)
+ if (!isa<UndefValue>(CV->getOperand(i)))
+ Result.AggregateVal[i].IntVal = cast<ConstantInt>(
+ CV->getOperand(i))->getValue();
+ else {
+ Result.AggregateVal[i].IntVal =
+ APInt(CV->getOperand(i)->getType()->getPrimitiveSizeInBits(), 0);
+ }
+ break;
+ }
+ if(CDV)
+ for (unsigned i = 0; i < elemNum; ++i)
+ Result.AggregateVal[i].IntVal = APInt(
+ CDV->getElementType()->getPrimitiveSizeInBits(),
+ CDV->getElementAsInteger(i));
+
+ break;
+ }
+ llvm_unreachable("Unknown constant pointer type!");
+ } break;
+
+ default:
+ SmallString<256> Msg;
+ raw_svector_ostream OS(Msg);
+ OS << "ERROR: Constant unimplemented for type: " << *C->getType();
+ report_fatal_error(OS.str());
+ }
+
+ return Result;
+}
+
+void ExecutionEngine::StoreValueToMemory(const GenericValue &Val,
+ GenericValue *Ptr, Type *Ty) {
+ // It is safe to treat TargetExtType as its layout type since the underlying
+ // bits are only copied and are not inspected.
+ if (auto *TETy = dyn_cast<TargetExtType>(Ty))
+ Ty = TETy->getLayoutType();
+
+ const unsigned StoreBytes = getDataLayout().getTypeStoreSize(Ty);
+
+ switch (Ty->getTypeID()) {
+ default:
+ dbgs() << "Cannot store value of type " << *Ty << "!\n";
+ break;
+ case Type::IntegerTyID:
+ StoreIntToMemory(Val.IntVal, (uint8_t*)Ptr, StoreBytes);
+ break;
+ case Type::FloatTyID:
+ *((float*)Ptr) = Val.FloatVal;
+ break;
+ case Type::DoubleTyID:
+ *((double*)Ptr) = Val.DoubleVal;
+ break;
+ case Type::X86_FP80TyID:
+ memcpy(Ptr, Val.IntVal.getRawData(), 10);
+ break;
+ case Type::PointerTyID:
+ // Ensure 64 bit target pointers are fully initialized on 32 bit hosts.
+ if (StoreBytes != sizeof(PointerTy))
+ memset(&(Ptr->PointerVal), 0, StoreBytes);
+
+ *((PointerTy*)Ptr) = Val.PointerVal;
+ break;
+ case Type::FixedVectorTyID:
+ case Type::ScalableVectorTyID:
+ for (unsigned i = 0; i < Val.AggregateVal.size(); ++i) {
+ if (cast<VectorType>(Ty)->getElementType()->isDoubleTy())
+ *(((double*)Ptr)+i) = Val.AggregateVal[i].DoubleVal;
+ if (cast<VectorType>(Ty)->getElementType()->isFloatTy())
+ *(((float*)Ptr)+i) = Val.AggregateVal[i].FloatVal;
+ if (cast<VectorType>(Ty)->getElementType()->isIntegerTy()) {
+ unsigned numOfBytes =(Val.AggregateVal[i].IntVal.getBitWidth()+7)/8;
+ StoreIntToMemory(Val.AggregateVal[i].IntVal,
+ (uint8_t*)Ptr + numOfBytes*i, numOfBytes);
+ }
+ }
+ break;
+ }
+
+ if (sys::IsLittleEndianHost != getDataLayout().isLittleEndian())
+ // Host and target are different endian - reverse the stored bytes.
+ std::reverse((uint8_t*)Ptr, StoreBytes + (uint8_t*)Ptr);
+}
+
+/// FIXME: document
+///
+void ExecutionEngine::LoadValueFromMemory(GenericValue &Result,
+ GenericValue *Ptr,
+ Type *Ty) {
+ if (auto *TETy = dyn_cast<TargetExtType>(Ty))
+ Ty = TETy->getLayoutType();
+
+ const unsigned LoadBytes = getDataLayout().getTypeStoreSize(Ty);
+
+ switch (Ty->getTypeID()) {
+ case Type::IntegerTyID:
+ // An APInt with all words initially zero.
+ Result.IntVal = APInt(cast<IntegerType>(Ty)->getBitWidth(), 0);
+ LoadIntFromMemory(Result.IntVal, (uint8_t*)Ptr, LoadBytes);
+ break;
+ case Type::FloatTyID:
+ Result.FloatVal = *((float*)Ptr);
+ break;
+ case Type::DoubleTyID:
+ Result.DoubleVal = *((double*)Ptr);
+ break;
+ case Type::PointerTyID:
+ Result.PointerVal = *((PointerTy*)Ptr);
+ break;
+ case Type::X86_FP80TyID: {
+ // This is endian dependent, but it will only work on x86 anyway.
+ // FIXME: Will not trap if loading a signaling NaN.
+ uint64_t y[2];
+ memcpy(y, Ptr, 10);
+ Result.IntVal = APInt(80, y);
+ break;
+ }
+ case Type::ScalableVectorTyID:
+ report_fatal_error(
+ "Scalable vector support not yet implemented in ExecutionEngine");
+ case Type::FixedVectorTyID: {
+ auto *VT = cast<FixedVectorType>(Ty);
+ Type *ElemT = VT->getElementType();
+ const unsigned numElems = VT->getNumElements();
+ if (ElemT->isFloatTy()) {
+ Result.AggregateVal.resize(numElems);
+ for (unsigned i = 0; i < numElems; ++i)
+ Result.AggregateVal[i].FloatVal = *((float*)Ptr+i);
+ }
+ if (ElemT->isDoubleTy()) {
+ Result.AggregateVal.resize(numElems);
+ for (unsigned i = 0; i < numElems; ++i)
+ Result.AggregateVal[i].DoubleVal = *((double*)Ptr+i);
+ }
+ if (ElemT->isIntegerTy()) {
+ GenericValue intZero;
+ const unsigned elemBitWidth = cast<IntegerType>(ElemT)->getBitWidth();
+ intZero.IntVal = APInt(elemBitWidth, 0);
+ Result.AggregateVal.resize(numElems, intZero);
+ for (unsigned i = 0; i < numElems; ++i)
+ LoadIntFromMemory(Result.AggregateVal[i].IntVal,
+ (uint8_t*)Ptr+((elemBitWidth+7)/8)*i, (elemBitWidth+7)/8);
+ }
+ break;
+ }
+ default:
+ SmallString<256> Msg;
+ raw_svector_ostream OS(Msg);
+ OS << "Cannot load value of type " << *Ty << "!";
+ report_fatal_error(OS.str());
+ }
+}
+
+void ExecutionEngine::InitializeMemory(const Constant *Init, void *Addr) {
+ LLVM_DEBUG(dbgs() << "JIT: Initializing " << Addr << " ");
+ LLVM_DEBUG(Init->dump());
+ if (isa<UndefValue>(Init))
+ return;
+
+ if (const ConstantVector *CP = dyn_cast<ConstantVector>(Init)) {
+ unsigned ElementSize =
+ getDataLayout().getTypeAllocSize(CP->getType()->getElementType());
+ for (unsigned i = 0, e = CP->getNumOperands(); i != e; ++i)
+ InitializeMemory(CP->getOperand(i), (char*)Addr+i*ElementSize);
+ return;
+ }
+
+ if (isa<ConstantAggregateZero>(Init)) {
+ memset(Addr, 0, (size_t)getDataLayout().getTypeAllocSize(Init->getType()));
+ return;
+ }
+
+ if (const ConstantArray *CPA = dyn_cast<ConstantArray>(Init)) {
+ unsigned ElementSize =
+ getDataLayout().getTypeAllocSize(CPA->getType()->getElementType());
+ for (unsigned i = 0, e = CPA->getNumOperands(); i != e; ++i)
+ InitializeMemory(CPA->getOperand(i), (char*)Addr+i*ElementSize);
+ return;
+ }
+
+ if (const ConstantStruct *CPS = dyn_cast<ConstantStruct>(Init)) {
+ const StructLayout *SL =
+ getDataLayout().getStructLayout(cast<StructType>(CPS->getType()));
+ for (unsigned i = 0, e = CPS->getNumOperands(); i != e; ++i)
+ InitializeMemory(CPS->getOperand(i), (char*)Addr+SL->getElementOffset(i));
+ return;
+ }
+
+ if (const ConstantDataSequential *CDS =
+ dyn_cast<ConstantDataSequential>(Init)) {
+ // CDS is already laid out in host memory order.
+ StringRef Data = CDS->getRawDataValues();
+ memcpy(Addr, Data.data(), Data.size());
+ return;
+ }
+
+ if (Init->getType()->isFirstClassType()) {
+ GenericValue Val = getConstantValue(Init);
+ StoreValueToMemory(Val, (GenericValue*)Addr, Init->getType());
+ return;
+ }
+
+ LLVM_DEBUG(dbgs() << "Bad Type: " << *Init->getType() << "\n");
+ llvm_unreachable("Unknown constant type to initialize memory with!");
+}
+
+/// EmitGlobals - Emit all of the global variables to memory, storing their
+/// addresses into GlobalAddress. This must make sure to copy the contents of
+/// their initializers into the memory.
+void ExecutionEngine::emitGlobals() {
+ // Loop over all of the global variables in the program, allocating the memory
+ // to hold them. If there is more than one module, do a prepass over globals
+ // to figure out how the different modules should link together.
+ std::map<std::pair<std::string, Type*>,
+ const GlobalValue*> LinkedGlobalsMap;
+
+ if (Modules.size() != 1) {
+ for (const auto &M : Modules) {
+ for (const auto &GV : M->globals()) {
+ if (GV.hasLocalLinkage() || GV.isDeclaration() ||
+ GV.hasAppendingLinkage() || !GV.hasName())
+ continue;// Ignore external globals and globals with internal linkage.
+
+ const GlobalValue *&GVEntry = LinkedGlobalsMap[std::make_pair(
+ std::string(GV.getName()), GV.getType())];
+
+ // If this is the first time we've seen this global, it is the canonical
+ // version.
+ if (!GVEntry) {
+ GVEntry = &GV;
+ continue;
+ }
+
+ // If the existing global is strong, never replace it.
+ if (GVEntry->hasExternalLinkage())
+ continue;
+
+ // Otherwise, we know it's linkonce/weak, replace it if this is a strong
+ // symbol. FIXME is this right for common?
+ if (GV.hasExternalLinkage() || GVEntry->hasExternalWeakLinkage())
+ GVEntry = &GV;
+ }
+ }
+ }
+
+ std::vector<const GlobalValue*> NonCanonicalGlobals;
+ for (const auto &M : Modules) {
+ for (const auto &GV : M->globals()) {
+ // In the multi-module case, see what this global maps to.
+ if (!LinkedGlobalsMap.empty()) {
+ if (const GlobalValue *GVEntry = LinkedGlobalsMap[std::make_pair(
+ std::string(GV.getName()), GV.getType())]) {
+ // If something else is the canonical global, ignore this one.
+ if (GVEntry != &GV) {
+ NonCanonicalGlobals.push_back(&GV);
+ continue;
+ }
+ }
+ }
+
+ if (!GV.isDeclaration()) {
+ addGlobalMapping(&GV, getMemoryForGV(&GV));
+ } else {
+ // External variable reference. Try to use the dynamic loader to
+ // get a pointer to it.
+ if (void *SymAddr = sys::DynamicLibrary::SearchForAddressOfSymbol(
+ std::string(GV.getName())))
+ addGlobalMapping(&GV, SymAddr);
+ else {
+ report_fatal_error("Could not resolve external global address: "
+ +GV.getName());
+ }
+ }
+ }
+
+ // If there are multiple modules, map the non-canonical globals to their
+ // canonical location.
+ if (!NonCanonicalGlobals.empty()) {
+ for (const GlobalValue *GV : NonCanonicalGlobals) {
+ const GlobalValue *CGV = LinkedGlobalsMap[std::make_pair(
+ std::string(GV->getName()), GV->getType())];
+ void *Ptr = getPointerToGlobalIfAvailable(CGV);
+ assert(Ptr && "Canonical global wasn't codegen'd!");
+ addGlobalMapping(GV, Ptr);
+ }
+ }
+
+ // Now that all of the globals are set up in memory, loop through them all
+ // and initialize their contents.
+ for (const auto &GV : M->globals()) {
+ if (!GV.isDeclaration()) {
+ if (!LinkedGlobalsMap.empty()) {
+ if (const GlobalValue *GVEntry = LinkedGlobalsMap[std::make_pair(
+ std::string(GV.getName()), GV.getType())])
+ if (GVEntry != &GV) // Not the canonical variable.
+ continue;
+ }
+ emitGlobalVariable(&GV);
+ }
+ }
+ }
+}
+
+// EmitGlobalVariable - This method emits the specified global variable to the
+// address specified in GlobalAddresses, or allocates new memory if it's not
+// already in the map.
+void ExecutionEngine::emitGlobalVariable(const GlobalVariable *GV) {
+ void *GA = getPointerToGlobalIfAvailable(GV);
+
+ if (!GA) {
+ // If it's not already specified, allocate memory for the global.
+ GA = getMemoryForGV(GV);
+
+ // If we failed to allocate memory for this global, return.
+ if (!GA) return;
+
+ addGlobalMapping(GV, GA);
+ }
+
+ // Don't initialize if it's thread local, let the client do it.
+ if (!GV->isThreadLocal())
+ InitializeMemory(GV->getInitializer(), GA);
+
+ Type *ElTy = GV->getValueType();
+ size_t GVSize = (size_t)getDataLayout().getTypeAllocSize(ElTy);
+ NumInitBytes += (unsigned)GVSize;
+ ++NumGlobals;
+}
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/ExecutionEngineBindings.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/ExecutionEngineBindings.cpp
new file mode 100644
index 000000000000..772a3fa93c51
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/ExecutionEngineBindings.cpp
@@ -0,0 +1,448 @@
+//===-- ExecutionEngineBindings.cpp - C bindings for EEs ------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the C bindings for the ExecutionEngine library.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm-c/ExecutionEngine.h"
+#include "llvm/ExecutionEngine/ExecutionEngine.h"
+#include "llvm/ExecutionEngine/GenericValue.h"
+#include "llvm/ExecutionEngine/JITEventListener.h"
+#include "llvm/ExecutionEngine/RTDyldMemoryManager.h"
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/Module.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Target/CodeGenCWrappers.h"
+#include "llvm/Target/TargetOptions.h"
+#include <cstring>
+#include <optional>
+
+using namespace llvm;
+
+#define DEBUG_TYPE "jit"
+
+// Wrapping the C bindings types.
+DEFINE_SIMPLE_CONVERSION_FUNCTIONS(GenericValue, LLVMGenericValueRef)
+
+
+static LLVMTargetMachineRef wrap(const TargetMachine *P) {
+ return
+ reinterpret_cast<LLVMTargetMachineRef>(const_cast<TargetMachine*>(P));
+}
+
+/*===-- Operations on generic values --------------------------------------===*/
+
+LLVMGenericValueRef LLVMCreateGenericValueOfInt(LLVMTypeRef Ty,
+ unsigned long long N,
+ LLVMBool IsSigned) {
+ GenericValue *GenVal = new GenericValue();
+ GenVal->IntVal = APInt(unwrap<IntegerType>(Ty)->getBitWidth(), N, IsSigned);
+ return wrap(GenVal);
+}
+
+LLVMGenericValueRef LLVMCreateGenericValueOfPointer(void *P) {
+ GenericValue *GenVal = new GenericValue();
+ GenVal->PointerVal = P;
+ return wrap(GenVal);
+}
+
+LLVMGenericValueRef LLVMCreateGenericValueOfFloat(LLVMTypeRef TyRef, double N) {
+ GenericValue *GenVal = new GenericValue();
+ switch (unwrap(TyRef)->getTypeID()) {
+ case Type::FloatTyID:
+ GenVal->FloatVal = N;
+ break;
+ case Type::DoubleTyID:
+ GenVal->DoubleVal = N;
+ break;
+ default:
+ llvm_unreachable("LLVMGenericValueToFloat supports only float and double.");
+ }
+ return wrap(GenVal);
+}
+
+unsigned LLVMGenericValueIntWidth(LLVMGenericValueRef GenValRef) {
+ return unwrap(GenValRef)->IntVal.getBitWidth();
+}
+
+unsigned long long LLVMGenericValueToInt(LLVMGenericValueRef GenValRef,
+ LLVMBool IsSigned) {
+ GenericValue *GenVal = unwrap(GenValRef);
+ if (IsSigned)
+ return GenVal->IntVal.getSExtValue();
+ else
+ return GenVal->IntVal.getZExtValue();
+}
+
+void *LLVMGenericValueToPointer(LLVMGenericValueRef GenVal) {
+ return unwrap(GenVal)->PointerVal;
+}
+
+double LLVMGenericValueToFloat(LLVMTypeRef TyRef, LLVMGenericValueRef GenVal) {
+ switch (unwrap(TyRef)->getTypeID()) {
+ case Type::FloatTyID:
+ return unwrap(GenVal)->FloatVal;
+ case Type::DoubleTyID:
+ return unwrap(GenVal)->DoubleVal;
+ default:
+ llvm_unreachable("LLVMGenericValueToFloat supports only float and double.");
+ }
+}
+
+void LLVMDisposeGenericValue(LLVMGenericValueRef GenVal) {
+ delete unwrap(GenVal);
+}
+
+/*===-- Operations on execution engines -----------------------------------===*/
+
+LLVMBool LLVMCreateExecutionEngineForModule(LLVMExecutionEngineRef *OutEE,
+ LLVMModuleRef M,
+ char **OutError) {
+ std::string Error;
+ EngineBuilder builder(std::unique_ptr<Module>(unwrap(M)));
+ builder.setEngineKind(EngineKind::Either)
+ .setErrorStr(&Error);
+ if (ExecutionEngine *EE = builder.create()){
+ *OutEE = wrap(EE);
+ return 0;
+ }
+ *OutError = strdup(Error.c_str());
+ return 1;
+}
+
+LLVMBool LLVMCreateInterpreterForModule(LLVMExecutionEngineRef *OutInterp,
+ LLVMModuleRef M,
+ char **OutError) {
+ std::string Error;
+ EngineBuilder builder(std::unique_ptr<Module>(unwrap(M)));
+ builder.setEngineKind(EngineKind::Interpreter)
+ .setErrorStr(&Error);
+ if (ExecutionEngine *Interp = builder.create()) {
+ *OutInterp = wrap(Interp);
+ return 0;
+ }
+ *OutError = strdup(Error.c_str());
+ return 1;
+}
+
+LLVMBool LLVMCreateJITCompilerForModule(LLVMExecutionEngineRef *OutJIT,
+ LLVMModuleRef M,
+ unsigned OptLevel,
+ char **OutError) {
+ std::string Error;
+ EngineBuilder builder(std::unique_ptr<Module>(unwrap(M)));
+ builder.setEngineKind(EngineKind::JIT)
+ .setErrorStr(&Error)
+ .setOptLevel((CodeGenOptLevel)OptLevel);
+ if (ExecutionEngine *JIT = builder.create()) {
+ *OutJIT = wrap(JIT);
+ return 0;
+ }
+ *OutError = strdup(Error.c_str());
+ return 1;
+}
+
+void LLVMInitializeMCJITCompilerOptions(LLVMMCJITCompilerOptions *PassedOptions,
+ size_t SizeOfPassedOptions) {
+ LLVMMCJITCompilerOptions options;
+ memset(&options, 0, sizeof(options)); // Most fields are zero by default.
+ options.CodeModel = LLVMCodeModelJITDefault;
+
+ memcpy(PassedOptions, &options,
+ std::min(sizeof(options), SizeOfPassedOptions));
+}
+
+LLVMBool LLVMCreateMCJITCompilerForModule(
+ LLVMExecutionEngineRef *OutJIT, LLVMModuleRef M,
+ LLVMMCJITCompilerOptions *PassedOptions, size_t SizeOfPassedOptions,
+ char **OutError) {
+ LLVMMCJITCompilerOptions options;
+ // If the user passed a larger sized options struct, then they were compiled
+ // against a newer LLVM. Tell them that something is wrong.
+ if (SizeOfPassedOptions > sizeof(options)) {
+ *OutError = strdup(
+ "Refusing to use options struct that is larger than my own; assuming "
+ "LLVM library mismatch.");
+ return 1;
+ }
+
+ // Defend against the user having an old version of the API by ensuring that
+ // any fields they didn't see are cleared. We must defend against fields being
+ // set to the bitwise equivalent of zero, and assume that this means "do the
+ // default" as if that option hadn't been available.
+ LLVMInitializeMCJITCompilerOptions(&options, sizeof(options));
+ memcpy(&options, PassedOptions, SizeOfPassedOptions);
+
+ TargetOptions targetOptions;
+ targetOptions.EnableFastISel = options.EnableFastISel;
+ std::unique_ptr<Module> Mod(unwrap(M));
+
+ if (Mod)
+ // Set function attribute "frame-pointer" based on
+ // NoFramePointerElim.
+ for (auto &F : *Mod) {
+ auto Attrs = F.getAttributes();
+ StringRef Value = options.NoFramePointerElim ? "all" : "none";
+ Attrs = Attrs.addFnAttribute(F.getContext(), "frame-pointer", Value);
+ F.setAttributes(Attrs);
+ }
+
+ std::string Error;
+ EngineBuilder builder(std::move(Mod));
+ builder.setEngineKind(EngineKind::JIT)
+ .setErrorStr(&Error)
+ .setOptLevel((CodeGenOptLevel)options.OptLevel)
+ .setTargetOptions(targetOptions);
+ bool JIT;
+ if (std::optional<CodeModel::Model> CM = unwrap(options.CodeModel, JIT))
+ builder.setCodeModel(*CM);
+ if (options.MCJMM)
+ builder.setMCJITMemoryManager(
+ std::unique_ptr<RTDyldMemoryManager>(unwrap(options.MCJMM)));
+ if (ExecutionEngine *JIT = builder.create()) {
+ *OutJIT = wrap(JIT);
+ return 0;
+ }
+ *OutError = strdup(Error.c_str());
+ return 1;
+}
+
+void LLVMDisposeExecutionEngine(LLVMExecutionEngineRef EE) {
+ delete unwrap(EE);
+}
+
+void LLVMRunStaticConstructors(LLVMExecutionEngineRef EE) {
+ unwrap(EE)->finalizeObject();
+ unwrap(EE)->runStaticConstructorsDestructors(false);
+}
+
+void LLVMRunStaticDestructors(LLVMExecutionEngineRef EE) {
+ unwrap(EE)->finalizeObject();
+ unwrap(EE)->runStaticConstructorsDestructors(true);
+}
+
+int LLVMRunFunctionAsMain(LLVMExecutionEngineRef EE, LLVMValueRef F,
+ unsigned ArgC, const char * const *ArgV,
+ const char * const *EnvP) {
+ unwrap(EE)->finalizeObject();
+
+ std::vector<std::string> ArgVec(ArgV, ArgV + ArgC);
+ return unwrap(EE)->runFunctionAsMain(unwrap<Function>(F), ArgVec, EnvP);
+}
+
+LLVMGenericValueRef LLVMRunFunction(LLVMExecutionEngineRef EE, LLVMValueRef F,
+ unsigned NumArgs,
+ LLVMGenericValueRef *Args) {
+ unwrap(EE)->finalizeObject();
+
+ std::vector<GenericValue> ArgVec;
+ ArgVec.reserve(NumArgs);
+ for (unsigned I = 0; I != NumArgs; ++I)
+ ArgVec.push_back(*unwrap(Args[I]));
+
+ GenericValue *Result = new GenericValue();
+ *Result = unwrap(EE)->runFunction(unwrap<Function>(F), ArgVec);
+ return wrap(Result);
+}
+
+void LLVMFreeMachineCodeForFunction(LLVMExecutionEngineRef EE, LLVMValueRef F) {
+}
+
+void LLVMAddModule(LLVMExecutionEngineRef EE, LLVMModuleRef M){
+ unwrap(EE)->addModule(std::unique_ptr<Module>(unwrap(M)));
+}
+
+LLVMBool LLVMRemoveModule(LLVMExecutionEngineRef EE, LLVMModuleRef M,
+ LLVMModuleRef *OutMod, char **OutError) {
+ Module *Mod = unwrap(M);
+ unwrap(EE)->removeModule(Mod);
+ *OutMod = wrap(Mod);
+ return 0;
+}
+
+LLVMBool LLVMFindFunction(LLVMExecutionEngineRef EE, const char *Name,
+ LLVMValueRef *OutFn) {
+ if (Function *F = unwrap(EE)->FindFunctionNamed(Name)) {
+ *OutFn = wrap(F);
+ return 0;
+ }
+ return 1;
+}
+
+void *LLVMRecompileAndRelinkFunction(LLVMExecutionEngineRef EE,
+ LLVMValueRef Fn) {
+ return nullptr;
+}
+
+LLVMTargetDataRef LLVMGetExecutionEngineTargetData(LLVMExecutionEngineRef EE) {
+ return wrap(&unwrap(EE)->getDataLayout());
+}
+
+LLVMTargetMachineRef
+LLVMGetExecutionEngineTargetMachine(LLVMExecutionEngineRef EE) {
+ return wrap(unwrap(EE)->getTargetMachine());
+}
+
+void LLVMAddGlobalMapping(LLVMExecutionEngineRef EE, LLVMValueRef Global,
+ void* Addr) {
+ unwrap(EE)->addGlobalMapping(unwrap<GlobalValue>(Global), Addr);
+}
+
+void *LLVMGetPointerToGlobal(LLVMExecutionEngineRef EE, LLVMValueRef Global) {
+ unwrap(EE)->finalizeObject();
+
+ return unwrap(EE)->getPointerToGlobal(unwrap<GlobalValue>(Global));
+}
+
+uint64_t LLVMGetGlobalValueAddress(LLVMExecutionEngineRef EE, const char *Name) {
+ return unwrap(EE)->getGlobalValueAddress(Name);
+}
+
+uint64_t LLVMGetFunctionAddress(LLVMExecutionEngineRef EE, const char *Name) {
+ return unwrap(EE)->getFunctionAddress(Name);
+}
+
+LLVMBool LLVMExecutionEngineGetErrMsg(LLVMExecutionEngineRef EE,
+ char **OutError) {
+ assert(OutError && "OutError must be non-null");
+ auto *ExecEngine = unwrap(EE);
+ if (ExecEngine->hasError()) {
+ *OutError = strdup(ExecEngine->getErrorMessage().c_str());
+ ExecEngine->clearErrorMessage();
+ return true;
+ }
+ return false;
+}
+
+/*===-- Operations on memory managers -------------------------------------===*/
+
+namespace {
+
+struct SimpleBindingMMFunctions {
+ LLVMMemoryManagerAllocateCodeSectionCallback AllocateCodeSection;
+ LLVMMemoryManagerAllocateDataSectionCallback AllocateDataSection;
+ LLVMMemoryManagerFinalizeMemoryCallback FinalizeMemory;
+ LLVMMemoryManagerDestroyCallback Destroy;
+};
+
+class SimpleBindingMemoryManager : public RTDyldMemoryManager {
+public:
+ SimpleBindingMemoryManager(const SimpleBindingMMFunctions& Functions,
+ void *Opaque);
+ ~SimpleBindingMemoryManager() override;
+
+ uint8_t *allocateCodeSection(uintptr_t Size, unsigned Alignment,
+ unsigned SectionID,
+ StringRef SectionName) override;
+
+ uint8_t *allocateDataSection(uintptr_t Size, unsigned Alignment,
+ unsigned SectionID, StringRef SectionName,
+ bool isReadOnly) override;
+
+ bool finalizeMemory(std::string *ErrMsg) override;
+
+private:
+ SimpleBindingMMFunctions Functions;
+ void *Opaque;
+};
+
+SimpleBindingMemoryManager::SimpleBindingMemoryManager(
+ const SimpleBindingMMFunctions& Functions,
+ void *Opaque)
+ : Functions(Functions), Opaque(Opaque) {
+ assert(Functions.AllocateCodeSection &&
+ "No AllocateCodeSection function provided!");
+ assert(Functions.AllocateDataSection &&
+ "No AllocateDataSection function provided!");
+ assert(Functions.FinalizeMemory &&
+ "No FinalizeMemory function provided!");
+ assert(Functions.Destroy &&
+ "No Destroy function provided!");
+}
+
+SimpleBindingMemoryManager::~SimpleBindingMemoryManager() {
+ Functions.Destroy(Opaque);
+}
+
+uint8_t *SimpleBindingMemoryManager::allocateCodeSection(
+ uintptr_t Size, unsigned Alignment, unsigned SectionID,
+ StringRef SectionName) {
+ return Functions.AllocateCodeSection(Opaque, Size, Alignment, SectionID,
+ SectionName.str().c_str());
+}
+
+uint8_t *SimpleBindingMemoryManager::allocateDataSection(
+ uintptr_t Size, unsigned Alignment, unsigned SectionID,
+ StringRef SectionName, bool isReadOnly) {
+ return Functions.AllocateDataSection(Opaque, Size, Alignment, SectionID,
+ SectionName.str().c_str(),
+ isReadOnly);
+}
+
+bool SimpleBindingMemoryManager::finalizeMemory(std::string *ErrMsg) {
+ char *errMsgCString = nullptr;
+ bool result = Functions.FinalizeMemory(Opaque, &errMsgCString);
+ assert((result || !errMsgCString) &&
+ "Did not expect an error message if FinalizeMemory succeeded");
+ if (errMsgCString) {
+ if (ErrMsg)
+ *ErrMsg = errMsgCString;
+ free(errMsgCString);
+ }
+ return result;
+}
+
+} // anonymous namespace
+
+LLVMMCJITMemoryManagerRef LLVMCreateSimpleMCJITMemoryManager(
+ void *Opaque,
+ LLVMMemoryManagerAllocateCodeSectionCallback AllocateCodeSection,
+ LLVMMemoryManagerAllocateDataSectionCallback AllocateDataSection,
+ LLVMMemoryManagerFinalizeMemoryCallback FinalizeMemory,
+ LLVMMemoryManagerDestroyCallback Destroy) {
+
+ if (!AllocateCodeSection || !AllocateDataSection || !FinalizeMemory ||
+ !Destroy)
+ return nullptr;
+
+ SimpleBindingMMFunctions functions;
+ functions.AllocateCodeSection = AllocateCodeSection;
+ functions.AllocateDataSection = AllocateDataSection;
+ functions.FinalizeMemory = FinalizeMemory;
+ functions.Destroy = Destroy;
+ return wrap(new SimpleBindingMemoryManager(functions, Opaque));
+}
+
+void LLVMDisposeMCJITMemoryManager(LLVMMCJITMemoryManagerRef MM) {
+ delete unwrap(MM);
+}
+
+/*===-- JIT Event Listener functions -------------------------------------===*/
+
+
+#if !LLVM_USE_INTEL_JITEVENTS
+LLVMJITEventListenerRef LLVMCreateIntelJITEventListener(void)
+{
+ return nullptr;
+}
+#endif
+
+#if !LLVM_USE_OPROFILE
+LLVMJITEventListenerRef LLVMCreateOProfileJITEventListener(void)
+{
+ return nullptr;
+}
+#endif
+
+#if !LLVM_USE_PERF
+LLVMJITEventListenerRef LLVMCreatePerfJITEventListener(void)
+{
+ return nullptr;
+}
+#endif
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/GDBRegistrationListener.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/GDBRegistrationListener.cpp
new file mode 100644
index 000000000000..1250c0defd31
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/GDBRegistrationListener.cpp
@@ -0,0 +1,252 @@
+//===----- GDBRegistrationListener.cpp - Registers objects with GDB -------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm-c/ExecutionEngine.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ExecutionEngine/JITEventListener.h"
+#include "llvm/Object/ObjectFile.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/Mutex.h"
+#include <mutex>
+
+using namespace llvm;
+using namespace llvm::object;
+
+// This must be kept in sync with gdb/gdb/jit.h .
+extern "C" {
+
+ typedef enum {
+ JIT_NOACTION = 0,
+ JIT_REGISTER_FN,
+ JIT_UNREGISTER_FN
+ } jit_actions_t;
+
+ struct jit_code_entry {
+ struct jit_code_entry *next_entry;
+ struct jit_code_entry *prev_entry;
+ const char *symfile_addr;
+ uint64_t symfile_size;
+ };
+
+ struct jit_descriptor {
+ uint32_t version;
+ // This should be jit_actions_t, but we want to be specific about the
+ // bit-width.
+ uint32_t action_flag;
+ struct jit_code_entry *relevant_entry;
+ struct jit_code_entry *first_entry;
+ };
+
+ // We put information about the JITed function in this global, which the
+ // debugger reads. Make sure to specify the version statically, because the
+ // debugger checks the version before we can set it during runtime.
+ extern struct jit_descriptor __jit_debug_descriptor;
+
+ // Debuggers puts a breakpoint in this function.
+ extern "C" void __jit_debug_register_code();
+}
+
+namespace {
+
+// FIXME: lli aims to provide both, RuntimeDyld and JITLink, as the dynamic
+// loaders for its JIT implementations. And they both offer debugging via the
+// GDB JIT interface, which builds on the two well-known symbol names below.
+// As these symbols must be unique across the linked executable, we can only
+// define them in one of the libraries and make the other depend on it.
+// OrcTargetProcess is a minimal stub for embedding a JIT client in remote
+// executors. For the moment it seems reasonable to have the definition there
+// and let ExecutionEngine depend on it, until we find a better solution.
+//
+LLVM_ATTRIBUTE_USED void requiredSymbolDefinitionsFromOrcTargetProcess() {
+ errs() << (void *)&__jit_debug_register_code
+ << (void *)&__jit_debug_descriptor;
+}
+
+struct RegisteredObjectInfo {
+ RegisteredObjectInfo() = default;
+
+ RegisteredObjectInfo(std::size_t Size, jit_code_entry *Entry,
+ OwningBinary<ObjectFile> Obj)
+ : Size(Size), Entry(Entry), Obj(std::move(Obj)) {}
+
+ std::size_t Size;
+ jit_code_entry *Entry;
+ OwningBinary<ObjectFile> Obj;
+};
+
+// Buffer for an in-memory object file in executable memory
+typedef llvm::DenseMap<JITEventListener::ObjectKey, RegisteredObjectInfo>
+ RegisteredObjectBufferMap;
+
+/// Global access point for the JIT debugging interface designed for use with a
+/// singleton toolbox. Handles thread-safe registration and deregistration of
+/// object files that are in executable memory managed by the client of this
+/// class.
+class GDBJITRegistrationListener : public JITEventListener {
+ /// Lock used to serialize all jit registration events, since they
+ /// modify global variables.
+ ///
+ /// Only a single instance of GDBJITRegistrationListener is ever created,
+ /// and so the lock can be a member variable of that instance. This ensures
+ /// destructors are run in the correct order.
+ sys::Mutex JITDebugLock;
+
+ /// A map of in-memory object files that have been registered with the
+ /// JIT interface.
+ RegisteredObjectBufferMap ObjectBufferMap;
+
+ /// Instantiates the JIT service.
+ GDBJITRegistrationListener() = default;
+
+ /// Unregisters each object that was previously registered and releases all
+ /// internal resources.
+ ~GDBJITRegistrationListener() override;
+
+public:
+ static GDBJITRegistrationListener &instance() {
+ static GDBJITRegistrationListener Instance;
+ return Instance;
+ }
+
+ /// Creates an entry in the JIT registry for the buffer @p Object,
+ /// which must contain an object file in executable memory with any
+ /// debug information for the debugger.
+ void notifyObjectLoaded(ObjectKey K, const ObjectFile &Obj,
+ const RuntimeDyld::LoadedObjectInfo &L) override;
+
+ /// Removes the internal registration of @p Object, and
+ /// frees associated resources.
+ /// Returns true if @p Object was found in ObjectBufferMap.
+ void notifyFreeingObject(ObjectKey K) override;
+
+private:
+ /// Deregister the debug info for the given object file from the debugger
+ /// and delete any temporary copies. This private method does not remove
+ /// the function from Map so that it can be called while iterating over Map.
+ void deregisterObjectInternal(RegisteredObjectBufferMap::iterator I);
+};
+
+/// Do the registration.
+void NotifyDebugger(jit_code_entry* JITCodeEntry) {
+ __jit_debug_descriptor.action_flag = JIT_REGISTER_FN;
+
+ // Insert this entry at the head of the list.
+ JITCodeEntry->prev_entry = nullptr;
+ jit_code_entry* NextEntry = __jit_debug_descriptor.first_entry;
+ JITCodeEntry->next_entry = NextEntry;
+ if (NextEntry) {
+ NextEntry->prev_entry = JITCodeEntry;
+ }
+ __jit_debug_descriptor.first_entry = JITCodeEntry;
+ __jit_debug_descriptor.relevant_entry = JITCodeEntry;
+ __jit_debug_register_code();
+}
+
+GDBJITRegistrationListener::~GDBJITRegistrationListener() {
+ // Free all registered object files.
+ std::lock_guard<llvm::sys::Mutex> locked(JITDebugLock);
+ for (RegisteredObjectBufferMap::iterator I = ObjectBufferMap.begin(),
+ E = ObjectBufferMap.end();
+ I != E; ++I) {
+ // Call the private method that doesn't update the map so our iterator
+ // doesn't break.
+ deregisterObjectInternal(I);
+ }
+ ObjectBufferMap.clear();
+}
+
+void GDBJITRegistrationListener::notifyObjectLoaded(
+ ObjectKey K, const ObjectFile &Obj,
+ const RuntimeDyld::LoadedObjectInfo &L) {
+
+ OwningBinary<ObjectFile> DebugObj = L.getObjectForDebug(Obj);
+
+ // Bail out if debug objects aren't supported.
+ if (!DebugObj.getBinary())
+ return;
+
+ const char *Buffer = DebugObj.getBinary()->getMemoryBufferRef().getBufferStart();
+ size_t Size = DebugObj.getBinary()->getMemoryBufferRef().getBufferSize();
+
+ std::lock_guard<llvm::sys::Mutex> locked(JITDebugLock);
+ assert(!ObjectBufferMap.contains(K) &&
+ "Second attempt to perform debug registration.");
+ jit_code_entry* JITCodeEntry = new jit_code_entry();
+
+ if (!JITCodeEntry) {
+ llvm::report_fatal_error(
+ "Allocation failed when registering a JIT entry!\n");
+ } else {
+ JITCodeEntry->symfile_addr = Buffer;
+ JITCodeEntry->symfile_size = Size;
+
+ ObjectBufferMap[K] =
+ RegisteredObjectInfo(Size, JITCodeEntry, std::move(DebugObj));
+ NotifyDebugger(JITCodeEntry);
+ }
+}
+
+void GDBJITRegistrationListener::notifyFreeingObject(ObjectKey K) {
+ std::lock_guard<llvm::sys::Mutex> locked(JITDebugLock);
+ RegisteredObjectBufferMap::iterator I = ObjectBufferMap.find(K);
+
+ if (I != ObjectBufferMap.end()) {
+ deregisterObjectInternal(I);
+ ObjectBufferMap.erase(I);
+ }
+}
+
+void GDBJITRegistrationListener::deregisterObjectInternal(
+ RegisteredObjectBufferMap::iterator I) {
+
+ jit_code_entry*& JITCodeEntry = I->second.Entry;
+
+ // Do the unregistration.
+ {
+ __jit_debug_descriptor.action_flag = JIT_UNREGISTER_FN;
+
+ // Remove the jit_code_entry from the linked list.
+ jit_code_entry* PrevEntry = JITCodeEntry->prev_entry;
+ jit_code_entry* NextEntry = JITCodeEntry->next_entry;
+
+ if (NextEntry) {
+ NextEntry->prev_entry = PrevEntry;
+ }
+ if (PrevEntry) {
+ PrevEntry->next_entry = NextEntry;
+ }
+ else {
+ assert(__jit_debug_descriptor.first_entry == JITCodeEntry);
+ __jit_debug_descriptor.first_entry = NextEntry;
+ }
+
+ // Tell the debugger which entry we removed, and unregister the code.
+ __jit_debug_descriptor.relevant_entry = JITCodeEntry;
+ __jit_debug_register_code();
+ }
+
+ delete JITCodeEntry;
+ JITCodeEntry = nullptr;
+}
+
+} // end namespace
+
+namespace llvm {
+
+JITEventListener* JITEventListener::createGDBRegistrationListener() {
+ return &GDBJITRegistrationListener::instance();
+}
+
+} // namespace llvm
+
+LLVMJITEventListenerRef LLVMCreateGDBRegistrationListener(void)
+{
+ return wrap(JITEventListener::createGDBRegistrationListener());
+}
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/IntelJITEvents/IntelJITEventListener.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/IntelJITEvents/IntelJITEventListener.cpp
new file mode 100644
index 000000000000..b1e99df73b48
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/IntelJITEvents/IntelJITEventListener.cpp
@@ -0,0 +1,401 @@
+//===-- IntelJITEventListener.cpp - Tell Intel profiler about JITed code --===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a JITEventListener object to tell Intel(R) VTune(TM)
+// Amplifier XE 2011 about JITted functions.
+//
+//===----------------------------------------------------------------------===//
+
+#include "IntelJITProfiling/IntelJITEventsWrapper.h"
+#include "ittnotify.h"
+#include "llvm-c/ExecutionEngine.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/Config/config.h"
+#include "llvm/DebugInfo/DIContext.h"
+#include "llvm/DebugInfo/DWARF/DWARFContext.h"
+#include "llvm/ExecutionEngine/JITEventListener.h"
+#include "llvm/IR/DebugInfo.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/Metadata.h"
+#include "llvm/IR/ValueHandle.h"
+#include "llvm/Object/ELFObjectFile.h"
+#include "llvm/Object/ObjectFile.h"
+#include "llvm/Object/SymbolSize.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/Errno.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace llvm;
+using namespace llvm::object;
+
+#define DEBUG_TYPE "amplifier-jit-event-listener"
+
+namespace {
+
+class IntelIttnotifyInfo {
+ std::string ModuleName;
+ std::vector<std::string> SectionNamesVector;
+ std::vector<__itt_section_info> SectionInfoVector;
+ __itt_module_object *ModuleObject;
+ IntelJITEventsWrapper &WrapperRef;
+
+public:
+ IntelIttnotifyInfo(IntelJITEventsWrapper &Wrapper)
+ : ModuleObject(NULL), WrapperRef(Wrapper){};
+ ~IntelIttnotifyInfo() { delete ModuleObject; };
+
+ void setModuleName(const char *Name) { ModuleName = std::string(Name); }
+
+ const char *getModuleName() { return ModuleName.c_str(); }
+
+ void setModuleObject(__itt_module_object *ModuleObj) {
+ ModuleObject = ModuleObj;
+ }
+
+ __itt_module_object *getModuleObject() { return ModuleObject; }
+
+ __itt_section_info *getSectionInfoVectorBegin() {
+ if (SectionInfoVector.size())
+ return &SectionInfoVector[0];
+ return NULL;
+ }
+
+ void reportSection(llvm::IttEventType EventType, const char *SectionName,
+ unsigned int SectionSize) {
+ WrapperRef.iJitIttNotifyInfo(EventType, SectionName, SectionSize);
+ }
+
+ int fillSectionInformation(const ObjectFile &Obj,
+ const RuntimeDyld::LoadedObjectInfo &L) {
+
+ int SectionCounter = 0;
+
+ for (auto &Section : Obj.sections()) {
+ uint64_t SectionLoadAddr = L.getSectionLoadAddress(Section);
+ if (SectionLoadAddr) {
+ object::ELFSectionRef ElfSection(Section);
+
+ __itt_section_info SectionInfo;
+ memset(&SectionInfo, 0, sizeof(SectionInfo));
+ SectionInfo.start_addr = reinterpret_cast<void *>(SectionLoadAddr);
+ SectionInfo.file_offset = ElfSection.getOffset();
+ SectionInfo.flags = ElfSection.getFlags();
+
+ StringRef SectionName("");
+ auto SectionNameOrError = ElfSection.getName();
+ if (SectionNameOrError)
+ SectionName = *SectionNameOrError;
+
+ SectionNamesVector.push_back(SectionName.str());
+ SectionInfo.size = ElfSection.getSize();
+ reportSection(llvm::LoadBinarySection, SectionName.str().c_str(),
+ SectionInfo.size);
+
+ if (ElfSection.isBSS()) {
+ SectionInfo.type = itt_section_type_bss;
+ } else if (ElfSection.isData()) {
+ SectionInfo.type = itt_section_type_data;
+ } else if (ElfSection.isText()) {
+ SectionInfo.type = itt_section_type_text;
+ }
+ SectionInfoVector.push_back(SectionInfo);
+ ++SectionCounter;
+ }
+ }
+ // Hereinafter: don't change SectionNamesVector content to avoid vector
+ // reallocation - reallocation invalidates all the references, pointers, and
+ // iterators referring to the elements in the sequence.
+ for (int I = 0; I < SectionCounter; ++I) {
+ SectionInfoVector[I].name = SectionNamesVector[I].c_str();
+ }
+ return SectionCounter;
+ }
+};
+
+class IntelJITEventListener : public JITEventListener {
+ typedef DenseMap<void*, unsigned int> MethodIDMap;
+
+ std::unique_ptr<IntelJITEventsWrapper> Wrapper;
+ MethodIDMap MethodIDs;
+
+ typedef SmallVector<const void *, 64> MethodAddressVector;
+ typedef DenseMap<const void *, MethodAddressVector> ObjectMap;
+
+ ObjectMap LoadedObjectMap;
+ std::map<ObjectKey, OwningBinary<ObjectFile>> DebugObjects;
+
+ std::map<ObjectKey, std::unique_ptr<IntelIttnotifyInfo>> KeyToIttnotify;
+
+public:
+ IntelJITEventListener(IntelJITEventsWrapper* libraryWrapper) {
+ Wrapper.reset(libraryWrapper);
+ }
+
+ ~IntelJITEventListener() {
+ }
+
+ void notifyObjectLoaded(ObjectKey Key, const ObjectFile &Obj,
+ const RuntimeDyld::LoadedObjectInfo &L) override;
+
+ void notifyFreeingObject(ObjectKey Key) override;
+};
+
+static LineNumberInfo DILineInfoToIntelJITFormat(uintptr_t StartAddress,
+ uintptr_t Address,
+ DILineInfo Line) {
+ LineNumberInfo Result;
+
+ Result.Offset = Address - StartAddress;
+ Result.LineNumber = Line.Line;
+
+ return Result;
+}
+
+static iJIT_Method_Load FunctionDescToIntelJITFormat(
+ IntelJITEventsWrapper& Wrapper,
+ const char* FnName,
+ uintptr_t FnStart,
+ size_t FnSize) {
+ iJIT_Method_Load Result;
+ memset(&Result, 0, sizeof(iJIT_Method_Load));
+
+ Result.method_id = Wrapper.iJIT_GetNewMethodID();
+ Result.method_name = const_cast<char*>(FnName);
+ Result.method_load_address = reinterpret_cast<void*>(FnStart);
+ Result.method_size = FnSize;
+
+ Result.class_id = 0;
+ Result.class_file_name = NULL;
+ Result.user_data = NULL;
+ Result.user_data_size = 0;
+ Result.env = iJDE_JittingAPI;
+
+ return Result;
+}
+
+int getBackwardCompatibilityMode() {
+
+ char *BackwardCompatibilityEnv = getenv("INTEL_JIT_BACKWARD_COMPATIBILITY");
+ int BackwardCompatibilityMode = 0;
+ if (BackwardCompatibilityEnv) {
+ StringRef(BackwardCompatibilityEnv)
+ .getAsInteger(10, BackwardCompatibilityMode);
+ }
+ return BackwardCompatibilityMode;
+}
+
+void IntelJITEventListener::notifyObjectLoaded(
+ ObjectKey Key, const ObjectFile &Obj,
+ const RuntimeDyld::LoadedObjectInfo &L) {
+
+ int BackwardCompatibilityMode = getBackwardCompatibilityMode();
+ if (BackwardCompatibilityMode == 0) {
+ if (Obj.isELF()) {
+ std::unique_ptr<IntelIttnotifyInfo> ModuleIttnotify =
+ std::make_unique<IntelIttnotifyInfo>(*Wrapper);
+ ModuleIttnotify->setModuleName(
+ StringRef(llvm::utohexstr(
+ MD5Hash(Obj.getMemoryBufferRef().getBuffer()), true))
+ .str()
+ .c_str());
+
+ __itt_module_object *ModuleObject = new __itt_module_object();
+ ModuleObject->module_name = ModuleIttnotify->getModuleName();
+ ModuleObject->module_size = Obj.getMemoryBufferRef().getBufferSize();
+ Wrapper->iJitIttNotifyInfo(llvm::LoadBinaryModule,
+ ModuleObject->module_name,
+ ModuleObject->module_size);
+ ModuleObject->module_type = __itt_module_type_elf;
+ ModuleObject->section_number =
+ ModuleIttnotify->fillSectionInformation(Obj, L);
+ ModuleObject->module_buffer =
+ (void *)const_cast<char *>(Obj.getMemoryBufferRef().getBufferStart());
+ ModuleObject->module_id =
+ __itt_id_make((void *)&(*ModuleObject), ModuleObject->module_size);
+ ModuleObject->section_array =
+ ModuleIttnotify->getSectionInfoVectorBegin();
+ ModuleIttnotify->setModuleObject(ModuleObject);
+
+ __itt_module_load_with_sections(ModuleObject);
+
+ KeyToIttnotify[Key] = std::move(ModuleIttnotify);
+ }
+ } else if (BackwardCompatibilityMode == 1) {
+
+ OwningBinary<ObjectFile> DebugObjOwner = L.getObjectForDebug(Obj);
+ const ObjectFile *DebugObj = DebugObjOwner.getBinary();
+ if (!DebugObj)
+ return;
+
+ // Get the address of the object image for use as a unique identifier
+ const void *ObjData = DebugObj->getData().data();
+ std::unique_ptr<DIContext> Context = DWARFContext::create(*DebugObj);
+ MethodAddressVector Functions;
+
+ // Use symbol info to iterate functions in the object.
+ for (const std::pair<SymbolRef, uint64_t> &P :
+ computeSymbolSizes(*DebugObj)) {
+ SymbolRef Sym = P.first;
+ std::vector<LineNumberInfo> LineInfo;
+ std::string SourceFileName;
+
+ Expected<SymbolRef::Type> SymTypeOrErr = Sym.getType();
+ if (!SymTypeOrErr) {
+ // TODO: Actually report errors helpfully.
+ consumeError(SymTypeOrErr.takeError());
+ continue;
+ }
+ SymbolRef::Type SymType = *SymTypeOrErr;
+ if (SymType != SymbolRef::ST_Function)
+ continue;
+
+ Expected<StringRef> Name = Sym.getName();
+ if (!Name) {
+ // TODO: Actually report errors helpfully.
+ consumeError(Name.takeError());
+ continue;
+ }
+
+ Expected<uint64_t> AddrOrErr = Sym.getAddress();
+ if (!AddrOrErr) {
+ // TODO: Actually report errors helpfully.
+ consumeError(AddrOrErr.takeError());
+ continue;
+ }
+ uint64_t Addr = *AddrOrErr;
+ uint64_t Size = P.second;
+
+ auto SecOrErr = Sym.getSection();
+ if (!SecOrErr) {
+ // TODO: Actually report errors helpfully.
+ consumeError(SecOrErr.takeError());
+ continue;
+ }
+ object::section_iterator Sec = *SecOrErr;
+ if (Sec == Obj.section_end())
+ continue;
+ uint64_t Index = Sec->getIndex();
+
+ // Record this address in a local vector
+ Functions.push_back((void *)Addr);
+
+ // Build the function loaded notification message
+ iJIT_Method_Load FunctionMessage =
+ FunctionDescToIntelJITFormat(*Wrapper, Name->data(), Addr, Size);
+ DILineInfoTable Lines =
+ Context->getLineInfoForAddressRange({Addr, Index}, Size);
+ DILineInfoTable::iterator Begin = Lines.begin();
+ DILineInfoTable::iterator End = Lines.end();
+ for (DILineInfoTable::iterator It = Begin; It != End; ++It) {
+ LineInfo.push_back(
+ DILineInfoToIntelJITFormat((uintptr_t)Addr, It->first, It->second));
+ }
+ if (LineInfo.size() == 0) {
+ FunctionMessage.source_file_name = 0;
+ FunctionMessage.line_number_size = 0;
+ FunctionMessage.line_number_table = 0;
+ } else {
+ // Source line information for the address range is provided as
+ // a code offset for the start of the corresponding sub-range and
+ // a source line. JIT API treats offsets in LineNumberInfo structures
+ // as the end of the corresponding code region. The start of the code
+ // is taken from the previous element. Need to shift the elements.
+
+ LineNumberInfo last = LineInfo.back();
+ last.Offset = FunctionMessage.method_size;
+ LineInfo.push_back(last);
+ for (size_t i = LineInfo.size() - 2; i > 0; --i)
+ LineInfo[i].LineNumber = LineInfo[i - 1].LineNumber;
+
+ SourceFileName = Lines.front().second.FileName;
+ FunctionMessage.source_file_name =
+ const_cast<char *>(SourceFileName.c_str());
+ FunctionMessage.line_number_size = LineInfo.size();
+ FunctionMessage.line_number_table = &*LineInfo.begin();
+ }
+
+ Wrapper->iJIT_NotifyEvent(iJVM_EVENT_TYPE_METHOD_LOAD_FINISHED,
+ &FunctionMessage);
+ MethodIDs[(void *)Addr] = FunctionMessage.method_id;
+ }
+
+ // To support object unload notification, we need to keep a list of
+ // registered function addresses for each loaded object. We will
+ // use the MethodIDs map to get the registered ID for each function.
+ LoadedObjectMap[ObjData] = Functions;
+ DebugObjects[Key] = std::move(DebugObjOwner);
+ }
+}
+
+void IntelJITEventListener::notifyFreeingObject(ObjectKey Key) {
+
+ int BackwardCompatibilityMode = getBackwardCompatibilityMode();
+ if (BackwardCompatibilityMode == 0) {
+ if (KeyToIttnotify.find(Key) == KeyToIttnotify.end())
+ return;
+ __itt_module_unload_with_sections(KeyToIttnotify[Key]->getModuleObject());
+ Wrapper->iJitIttNotifyInfo(
+ llvm::UnloadBinaryModule,
+ KeyToIttnotify[Key]->getModuleObject()->module_name,
+ KeyToIttnotify[Key]->getModuleObject()->module_size);
+ KeyToIttnotify.erase(Key);
+ } else if (BackwardCompatibilityMode == 1) {
+ // This object may not have been registered with the listener. If it wasn't,
+ // bail out.
+ if (DebugObjects.find(Key) == DebugObjects.end())
+ return;
+
+ // Get the address of the object image for use as a unique identifier
+ const ObjectFile &DebugObj = *DebugObjects[Key].getBinary();
+ const void *ObjData = DebugObj.getData().data();
+
+ // Get the object's function list from LoadedObjectMap
+ ObjectMap::iterator OI = LoadedObjectMap.find(ObjData);
+ if (OI == LoadedObjectMap.end())
+ return;
+ MethodAddressVector &Functions = OI->second;
+
+ // Walk the function list, unregistering each function
+ for (MethodAddressVector::iterator FI = Functions.begin(),
+ FE = Functions.end();
+ FI != FE; ++FI) {
+ void *FnStart = const_cast<void *>(*FI);
+ MethodIDMap::iterator MI = MethodIDs.find(FnStart);
+ if (MI != MethodIDs.end()) {
+ Wrapper->iJIT_NotifyEvent(iJVM_EVENT_TYPE_METHOD_UNLOAD_START,
+ &MI->second);
+ MethodIDs.erase(MI);
+ }
+ }
+
+ // Erase the object from LoadedObjectMap
+ LoadedObjectMap.erase(OI);
+ DebugObjects.erase(Key);
+ }
+}
+
+} // anonymous namespace.
+
+namespace llvm {
+JITEventListener *JITEventListener::createIntelJITEventListener() {
+ return new IntelJITEventListener(new IntelJITEventsWrapper);
+}
+
+// for testing
+JITEventListener *JITEventListener::createIntelJITEventListener(
+ IntelJITEventsWrapper* TestImpl) {
+ return new IntelJITEventListener(TestImpl);
+}
+
+} // namespace llvm
+
+LLVMJITEventListenerRef LLVMCreateIntelJITEventListener(void)
+{
+ return wrap(JITEventListener::createIntelJITEventListener());
+}
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/IntelJITProfiling/IntelJITEventsWrapper.h b/contrib/llvm-project/llvm/lib/ExecutionEngine/IntelJITProfiling/IntelJITEventsWrapper.h
new file mode 100644
index 000000000000..dfe208f2ccfd
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/IntelJITProfiling/IntelJITEventsWrapper.h
@@ -0,0 +1,110 @@
+//===-- IntelJITEventsWrapper.h - Intel JIT Events API Wrapper --*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a wrapper for the Intel JIT Events API. It allows for the
+// implementation of the jitprofiling library to be swapped with an alternative
+// implementation (for testing). To include this file, you must have the
+// jitprofiling.h header available; it is available in Intel(R) VTune(TM)
+// Amplifier XE 2011.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef INTEL_JIT_EVENTS_WRAPPER_H
+#define INTEL_JIT_EVENTS_WRAPPER_H
+
+#include "jitprofiling.h"
+
+namespace llvm {
+
+typedef enum {
+ LoadBinaryModule,
+ LoadBinarySection,
+ UnloadBinaryModule,
+ UnloadBinarySection
+} IttEventType;
+
+class IntelJITEventsWrapper {
+ // Function pointer types for testing implementation of Intel jitprofiling
+ // library
+ typedef int (*NotifyEventPtr)(iJIT_JVM_EVENT, void*);
+ typedef int (*IttnotifyInfoPtr)(IttEventType, const char *, unsigned int);
+ typedef void (*RegisterCallbackExPtr)(void *, iJIT_ModeChangedEx );
+ typedef iJIT_IsProfilingActiveFlags (*IsProfilingActivePtr)(void);
+ typedef void (*FinalizeThreadPtr)(void);
+ typedef void (*FinalizeProcessPtr)(void);
+ typedef unsigned int (*GetNewMethodIDPtr)(void);
+
+ NotifyEventPtr NotifyEventFunc;
+ IttnotifyInfoPtr IttnotifyInfoFunc;
+ RegisterCallbackExPtr RegisterCallbackExFunc;
+ IsProfilingActivePtr IsProfilingActiveFunc;
+ GetNewMethodIDPtr GetNewMethodIDFunc;
+
+public:
+ bool isAmplifierRunning() {
+ return iJIT_IsProfilingActive() == iJIT_SAMPLING_ON;
+ }
+
+ IntelJITEventsWrapper()
+ : NotifyEventFunc(::iJIT_NotifyEvent), IttnotifyInfoFunc(0),
+ RegisterCallbackExFunc(::iJIT_RegisterCallbackEx),
+ IsProfilingActiveFunc(::iJIT_IsProfilingActive),
+ GetNewMethodIDFunc(::iJIT_GetNewMethodID) {}
+
+ IntelJITEventsWrapper(NotifyEventPtr NotifyEventImpl,
+ IttnotifyInfoPtr IttnotifyInfoImpl,
+ RegisterCallbackExPtr RegisterCallbackExImpl,
+ IsProfilingActivePtr IsProfilingActiveImpl,
+ FinalizeThreadPtr FinalizeThreadImpl,
+ FinalizeProcessPtr FinalizeProcessImpl,
+ GetNewMethodIDPtr GetNewMethodIDImpl)
+ : NotifyEventFunc(NotifyEventImpl), IttnotifyInfoFunc(IttnotifyInfoImpl),
+ RegisterCallbackExFunc(RegisterCallbackExImpl),
+ IsProfilingActiveFunc(IsProfilingActiveImpl),
+ GetNewMethodIDFunc(GetNewMethodIDImpl) {}
+
+ // Sends an event announcing that a function has been emitted
+ // return values are event-specific. See Intel documentation for details.
+ int iJIT_NotifyEvent(iJIT_JVM_EVENT EventType, void *EventSpecificData) {
+ if (!NotifyEventFunc)
+ return -1;
+ return NotifyEventFunc(EventType, EventSpecificData);
+ }
+
+ int iJitIttNotifyInfo(IttEventType EventType, const char *Name,
+ unsigned int Size) {
+ if (!IttnotifyInfoFunc)
+ return -1;
+ return IttnotifyInfoFunc(EventType, Name, Size);
+ }
+
+ // Registers a callback function to receive notice of profiling state changes
+ void iJIT_RegisterCallbackEx(void *UserData,
+ iJIT_ModeChangedEx NewModeCallBackFuncEx) {
+ if (RegisterCallbackExFunc)
+ RegisterCallbackExFunc(UserData, NewModeCallBackFuncEx);
+ }
+
+ // Returns the current profiler mode
+ iJIT_IsProfilingActiveFlags iJIT_IsProfilingActive(void) {
+ if (!IsProfilingActiveFunc)
+ return iJIT_NOTHING_RUNNING;
+ return IsProfilingActiveFunc();
+ }
+
+ // Generates a locally unique method ID for use in code registration
+ unsigned int iJIT_GetNewMethodID(void) {
+ if (!GetNewMethodIDFunc)
+ return -1;
+ return GetNewMethodIDFunc();
+ }
+};
+
+} // namespace llvm
+
+#endif //INTEL_JIT_EVENTS_WRAPPER_H
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/IntelJITProfiling/ittnotify_config.h b/contrib/llvm-project/llvm/lib/ExecutionEngine/IntelJITProfiling/ittnotify_config.h
new file mode 100644
index 000000000000..16ce672150cc
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/IntelJITProfiling/ittnotify_config.h
@@ -0,0 +1,453 @@
+/*===-- ittnotify_config.h - JIT Profiling API internal config-----*- C -*-===*
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===----------------------------------------------------------------------===*
+ *
+ * This file provides Intel(R) Performance Analyzer JIT (Just-In-Time)
+ * Profiling API internal config.
+ *
+ * NOTE: This file comes in a style different from the rest of LLVM
+ * source base since this is a piece of code shared from Intel(R)
+ * products. Please do not reformat / re-style this code to make
+ * subsequent merges and contributions from the original source base eaiser.
+ *
+ *===----------------------------------------------------------------------===*/
+#ifndef _ITTNOTIFY_CONFIG_H_
+#define _ITTNOTIFY_CONFIG_H_
+
+/** @cond exclude_from_documentation */
+#ifndef ITT_OS_WIN
+# define ITT_OS_WIN 1
+#endif /* ITT_OS_WIN */
+
+#ifndef ITT_OS_LINUX
+# define ITT_OS_LINUX 2
+#endif /* ITT_OS_LINUX */
+
+#ifndef ITT_OS_MAC
+# define ITT_OS_MAC 3
+#endif /* ITT_OS_MAC */
+
+#ifndef ITT_OS
+# if defined WIN32 || defined _WIN32
+# define ITT_OS ITT_OS_WIN
+# elif defined( __APPLE__ ) && defined( __MACH__ )
+# define ITT_OS ITT_OS_MAC
+# else
+# define ITT_OS ITT_OS_LINUX
+# endif
+#endif /* ITT_OS */
+
+#ifndef ITT_PLATFORM_WIN
+# define ITT_PLATFORM_WIN 1
+#endif /* ITT_PLATFORM_WIN */
+
+#ifndef ITT_PLATFORM_POSIX
+# define ITT_PLATFORM_POSIX 2
+#endif /* ITT_PLATFORM_POSIX */
+
+#ifndef ITT_PLATFORM
+# if ITT_OS==ITT_OS_WIN
+# define ITT_PLATFORM ITT_PLATFORM_WIN
+# else
+# define ITT_PLATFORM ITT_PLATFORM_POSIX
+# endif /* _WIN32 */
+#endif /* ITT_PLATFORM */
+
+#if defined(_UNICODE) && !defined(UNICODE)
+#define UNICODE
+#endif
+
+#include <stddef.h>
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+#include <tchar.h>
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#include <stdint.h>
+#if defined(UNICODE) || defined(_UNICODE)
+#include <wchar.h>
+#endif /* UNICODE || _UNICODE */
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+
+#ifndef CDECL
+# if ITT_PLATFORM==ITT_PLATFORM_WIN
+# define CDECL __cdecl
+# else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+# if defined _M_X64 || defined _M_AMD64 || defined __x86_64__
+# define CDECL /* not actual on x86_64 platform */
+# else /* _M_X64 || _M_AMD64 || __x86_64__ */
+# define CDECL __attribute__ ((cdecl))
+# endif /* _M_X64 || _M_AMD64 || __x86_64__ */
+# endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#endif /* CDECL */
+
+#ifndef STDCALL
+# if ITT_PLATFORM==ITT_PLATFORM_WIN
+# define STDCALL __stdcall
+# else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+# if defined _M_X64 || defined _M_AMD64 || defined __x86_64__
+# define STDCALL /* not supported on x86_64 platform */
+# else /* _M_X64 || _M_AMD64 || __x86_64__ */
+# define STDCALL __attribute__ ((stdcall))
+# endif /* _M_X64 || _M_AMD64 || __x86_64__ */
+# endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#endif /* STDCALL */
+
+#define ITTAPI CDECL
+#define LIBITTAPI CDECL
+
+/* TODO: Temporary for compatibility! */
+#define ITTAPI_CALL CDECL
+#define LIBITTAPI_CALL CDECL
+
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+/* use __forceinline (VC++ specific) */
+#define ITT_INLINE __forceinline
+#define ITT_INLINE_ATTRIBUTE /* nothing */
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+/*
+ * Generally, functions are not inlined unless optimization is specified.
+ * For functions declared inline, this attribute inlines the function even
+ * if no optimization level was specified.
+ */
+#ifdef __STRICT_ANSI__
+#define ITT_INLINE static
+#else /* __STRICT_ANSI__ */
+#define ITT_INLINE static inline
+#endif /* __STRICT_ANSI__ */
+#define ITT_INLINE_ATTRIBUTE __attribute__ ((always_inline))
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+/** @endcond */
+
+#ifndef ITT_ARCH_IA32
+# define ITT_ARCH_IA32 1
+#endif /* ITT_ARCH_IA32 */
+
+#ifndef ITT_ARCH_IA32E
+# define ITT_ARCH_IA32E 2
+#endif /* ITT_ARCH_IA32E */
+
+#ifndef ITT_ARCH_IA64
+# define ITT_ARCH_IA64 3
+#endif /* ITT_ARCH_IA64 */
+
+#ifndef ITT_ARCH
+# if defined _M_X64 || defined _M_AMD64 || defined __x86_64__
+# define ITT_ARCH ITT_ARCH_IA32E
+# elif defined _M_IA64 || defined __ia64
+# define ITT_ARCH ITT_ARCH_IA64
+# else
+# define ITT_ARCH ITT_ARCH_IA32
+# endif
+#endif
+
+#ifdef __cplusplus
+# define ITT_EXTERN_C extern "C"
+#else
+# define ITT_EXTERN_C /* nothing */
+#endif /* __cplusplus */
+
+#define ITT_TO_STR_AUX(x) #x
+#define ITT_TO_STR(x) ITT_TO_STR_AUX(x)
+
+#define __ITT_BUILD_ASSERT(expr, suffix) do { \
+ static char __itt_build_check_##suffix[(expr) ? 1 : -1]; \
+ __itt_build_check_##suffix[0] = 0; \
+} while(0)
+#define _ITT_BUILD_ASSERT(expr, suffix) __ITT_BUILD_ASSERT((expr), suffix)
+#define ITT_BUILD_ASSERT(expr) _ITT_BUILD_ASSERT((expr), __LINE__)
+
+#define ITT_MAGIC { 0xED, 0xAB, 0xAB, 0xEC, 0x0D, 0xEE, 0xDA, 0x30 }
+
+/* Replace with snapshot date YYYYMMDD for promotion build. */
+#define API_VERSION_BUILD 20111111
+
+#ifndef API_VERSION_NUM
+#define API_VERSION_NUM 0.0.0
+#endif /* API_VERSION_NUM */
+
+#define API_VERSION "ITT-API-Version " ITT_TO_STR(API_VERSION_NUM) \
+ " (" ITT_TO_STR(API_VERSION_BUILD) ")"
+
+/* OS communication functions */
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+#include <windows.h>
+typedef HMODULE lib_t;
+typedef DWORD TIDT;
+typedef CRITICAL_SECTION mutex_t;
+#define MUTEX_INITIALIZER { 0 }
+#define strong_alias(name, aliasname) /* empty for Windows */
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#include <dlfcn.h>
+#if defined(UNICODE) || defined(_UNICODE)
+#include <wchar.h>
+#endif /* UNICODE */
+#ifndef _GNU_SOURCE
+#define _GNU_SOURCE 1 /* need for PTHREAD_MUTEX_RECURSIVE */
+#endif /* _GNU_SOURCE */
+#include <pthread.h>
+typedef void* lib_t;
+typedef pthread_t TIDT;
+typedef pthread_mutex_t mutex_t;
+#define MUTEX_INITIALIZER PTHREAD_MUTEX_INITIALIZER
+#define _strong_alias(name, aliasname) \
+ extern __typeof (name) aliasname __attribute__ ((alias (#name)));
+#define strong_alias(name, aliasname) _strong_alias(name, aliasname)
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+#define __itt_get_proc(lib, name) GetProcAddress(lib, name)
+#define __itt_mutex_init(mutex) InitializeCriticalSection(mutex)
+#define __itt_mutex_lock(mutex) EnterCriticalSection(mutex)
+#define __itt_mutex_unlock(mutex) LeaveCriticalSection(mutex)
+#define __itt_load_lib(name) LoadLibraryA(name)
+#define __itt_unload_lib(handle) FreeLibrary(handle)
+#define __itt_system_error() (int)GetLastError()
+#define __itt_fstrcmp(s1, s2) lstrcmpA(s1, s2)
+#define __itt_fstrlen(s) lstrlenA(s)
+#define __itt_fstrcpyn(s1, s2, l) lstrcpynA(s1, s2, l)
+#define __itt_fstrdup(s) _strdup(s)
+#define __itt_thread_id() GetCurrentThreadId()
+#define __itt_thread_yield() SwitchToThread()
+#ifndef ITT_SIMPLE_INIT
+ITT_INLINE long
+__itt_interlocked_increment(volatile long* ptr) ITT_INLINE_ATTRIBUTE;
+ITT_INLINE long __itt_interlocked_increment(volatile long* ptr)
+{
+ return InterlockedIncrement(ptr);
+}
+#endif /* ITT_SIMPLE_INIT */
+#else /* ITT_PLATFORM!=ITT_PLATFORM_WIN */
+#define __itt_get_proc(lib, name) dlsym(lib, name)
+#define __itt_mutex_init(mutex) {\
+ pthread_mutexattr_t mutex_attr; \
+ int error_code = pthread_mutexattr_init(&mutex_attr); \
+ if (error_code) \
+ __itt_report_error(__itt_error_system, "pthread_mutexattr_init", \
+ error_code); \
+ error_code = pthread_mutexattr_settype(&mutex_attr, \
+ PTHREAD_MUTEX_RECURSIVE); \
+ if (error_code) \
+ __itt_report_error(__itt_error_system, "pthread_mutexattr_settype", \
+ error_code); \
+ error_code = pthread_mutex_init(mutex, &mutex_attr); \
+ if (error_code) \
+ __itt_report_error(__itt_error_system, "pthread_mutex_init", \
+ error_code); \
+ error_code = pthread_mutexattr_destroy(&mutex_attr); \
+ if (error_code) \
+ __itt_report_error(__itt_error_system, "pthread_mutexattr_destroy", \
+ error_code); \
+}
+#define __itt_mutex_lock(mutex) pthread_mutex_lock(mutex)
+#define __itt_mutex_unlock(mutex) pthread_mutex_unlock(mutex)
+#define __itt_load_lib(name) dlopen(name, RTLD_LAZY)
+#define __itt_unload_lib(handle) dlclose(handle)
+#define __itt_system_error() errno
+#define __itt_fstrcmp(s1, s2) strcmp(s1, s2)
+#define __itt_fstrlen(s) strlen(s)
+#define __itt_fstrcpyn(s1, s2, l) strncpy(s1, s2, l)
+#define __itt_fstrdup(s) strdup(s)
+#define __itt_thread_id() pthread_self()
+#define __itt_thread_yield() sched_yield()
+#if ITT_ARCH==ITT_ARCH_IA64
+#ifdef __INTEL_COMPILER
+#define __TBB_machine_fetchadd4(addr, val) __fetchadd4_acq((void *)addr, val)
+#else /* __INTEL_COMPILER */
+/* TODO: Add Support for not Intel compilers for IA64 */
+#endif /* __INTEL_COMPILER */
+#else /* ITT_ARCH!=ITT_ARCH_IA64 */
+ITT_INLINE long
+__TBB_machine_fetchadd4(volatile void* ptr, long addend) ITT_INLINE_ATTRIBUTE;
+ITT_INLINE long __TBB_machine_fetchadd4(volatile void* ptr, long addend)
+{
+ long result;
+ __asm__ __volatile__("lock\nxadd %0,%1"
+ : "=r"(result),"=m"(*(long*)ptr)
+ : "0"(addend), "m"(*(long*)ptr)
+ : "memory");
+ return result;
+}
+#endif /* ITT_ARCH==ITT_ARCH_IA64 */
+#ifndef ITT_SIMPLE_INIT
+ITT_INLINE long
+__itt_interlocked_increment(volatile long* ptr) ITT_INLINE_ATTRIBUTE;
+ITT_INLINE long __itt_interlocked_increment(volatile long* ptr)
+{
+ return __TBB_machine_fetchadd4(ptr, 1) + 1L;
+}
+#endif /* ITT_SIMPLE_INIT */
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+
+typedef enum {
+ __itt_collection_normal = 0,
+ __itt_collection_paused = 1
+} __itt_collection_state;
+
+typedef enum {
+ __itt_thread_normal = 0,
+ __itt_thread_ignored = 1
+} __itt_thread_state;
+
+#pragma pack(push, 8)
+
+typedef struct ___itt_thread_info
+{
+ const char* nameA; /*!< Copy of original name in ASCII. */
+#if defined(UNICODE) || defined(_UNICODE)
+ const wchar_t* nameW; /*!< Copy of original name in UNICODE. */
+#else /* UNICODE || _UNICODE */
+ void* nameW;
+#endif /* UNICODE || _UNICODE */
+ TIDT tid;
+ __itt_thread_state state; /*!< Thread state (paused or normal) */
+ int extra1; /*!< Reserved to the runtime */
+ void* extra2; /*!< Reserved to the runtime */
+ struct ___itt_thread_info* next;
+} __itt_thread_info;
+
+#include "ittnotify_types.h" /* For __itt_group_id definition */
+
+typedef struct ___itt_api_info_20101001
+{
+ const char* name;
+ void** func_ptr;
+ void* init_func;
+ __itt_group_id group;
+} __itt_api_info_20101001;
+
+typedef struct ___itt_api_info
+{
+ const char* name;
+ void** func_ptr;
+ void* init_func;
+ void* null_func;
+ __itt_group_id group;
+} __itt_api_info;
+
+struct ___itt_domain;
+struct ___itt_string_handle;
+
+typedef struct ___itt_global
+{
+ unsigned char magic[8];
+ unsigned long version_major;
+ unsigned long version_minor;
+ unsigned long version_build;
+ volatile long api_initialized;
+ volatile long mutex_initialized;
+ volatile long atomic_counter;
+ mutex_t mutex;
+ lib_t lib;
+ void* error_handler;
+ const char** dll_path_ptr;
+ __itt_api_info* api_list_ptr;
+ struct ___itt_global* next;
+ /* Joinable structures below */
+ __itt_thread_info* thread_list;
+ struct ___itt_domain* domain_list;
+ struct ___itt_string_handle* string_list;
+ __itt_collection_state state;
+} __itt_global;
+
+#pragma pack(pop)
+
+#define NEW_THREAD_INFO_W(gptr,h,h_tail,t,s,n) { \
+ h = (__itt_thread_info*)malloc(sizeof(__itt_thread_info)); \
+ if (h != NULL) { \
+ h->tid = t; \
+ h->nameA = NULL; \
+ h->nameW = n ? _wcsdup(n) : NULL; \
+ h->state = s; \
+ h->extra1 = 0; /* reserved */ \
+ h->extra2 = NULL; /* reserved */ \
+ h->next = NULL; \
+ if (h_tail == NULL) \
+ (gptr)->thread_list = h; \
+ else \
+ h_tail->next = h; \
+ } \
+}
+
+#define NEW_THREAD_INFO_A(gptr,h,h_tail,t,s,n) { \
+ h = (__itt_thread_info*)malloc(sizeof(__itt_thread_info)); \
+ if (h != NULL) { \
+ h->tid = t; \
+ h->nameA = n ? __itt_fstrdup(n) : NULL; \
+ h->nameW = NULL; \
+ h->state = s; \
+ h->extra1 = 0; /* reserved */ \
+ h->extra2 = NULL; /* reserved */ \
+ h->next = NULL; \
+ if (h_tail == NULL) \
+ (gptr)->thread_list = h; \
+ else \
+ h_tail->next = h; \
+ } \
+}
+
+#define NEW_DOMAIN_W(gptr,h,h_tail,name) { \
+ h = (__itt_domain*)malloc(sizeof(__itt_domain)); \
+ if (h != NULL) { \
+ h->flags = 0; /* domain is disabled by default */ \
+ h->nameA = NULL; \
+ h->nameW = name ? _wcsdup(name) : NULL; \
+ h->extra1 = 0; /* reserved */ \
+ h->extra2 = NULL; /* reserved */ \
+ h->next = NULL; \
+ if (h_tail == NULL) \
+ (gptr)->domain_list = h; \
+ else \
+ h_tail->next = h; \
+ } \
+}
+
+#define NEW_DOMAIN_A(gptr,h,h_tail,name) { \
+ h = (__itt_domain*)malloc(sizeof(__itt_domain)); \
+ if (h != NULL) { \
+ h->flags = 0; /* domain is disabled by default */ \
+ h->nameA = name ? __itt_fstrdup(name) : NULL; \
+ h->nameW = NULL; \
+ h->extra1 = 0; /* reserved */ \
+ h->extra2 = NULL; /* reserved */ \
+ h->next = NULL; \
+ if (h_tail == NULL) \
+ (gptr)->domain_list = h; \
+ else \
+ h_tail->next = h; \
+ } \
+}
+
+#define NEW_STRING_HANDLE_W(gptr,h,h_tail,name) { \
+ h = (__itt_string_handle*)malloc(sizeof(__itt_string_handle)); \
+ if (h != NULL) { \
+ h->strA = NULL; \
+ h->strW = name ? _wcsdup(name) : NULL; \
+ h->extra1 = 0; /* reserved */ \
+ h->extra2 = NULL; /* reserved */ \
+ h->next = NULL; \
+ if (h_tail == NULL) \
+ (gptr)->string_list = h; \
+ else \
+ h_tail->next = h; \
+ } \
+}
+
+#define NEW_STRING_HANDLE_A(gptr,h,h_tail,name) { \
+ h = (__itt_string_handle*)malloc(sizeof(__itt_string_handle)); \
+ if (h != NULL) { \
+ h->strA = name ? __itt_fstrdup(name) : NULL; \
+ h->strW = NULL; \
+ h->extra1 = 0; /* reserved */ \
+ h->extra2 = NULL; /* reserved */ \
+ h->next = NULL; \
+ if (h_tail == NULL) \
+ (gptr)->string_list = h; \
+ else \
+ h_tail->next = h; \
+ } \
+}
+
+#endif /* _ITTNOTIFY_CONFIG_H_ */
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/IntelJITProfiling/ittnotify_types.h b/contrib/llvm-project/llvm/lib/ExecutionEngine/IntelJITProfiling/ittnotify_types.h
new file mode 100644
index 000000000000..15008fe93e60
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/IntelJITProfiling/ittnotify_types.h
@@ -0,0 +1,69 @@
+/*===-- ittnotify_types.h - JIT Profiling API internal types--------*- C -*-===*
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===----------------------------------------------------------------------===*
+ *
+ * NOTE: This file comes in a style different from the rest of LLVM
+ * source base since this is a piece of code shared from Intel(R)
+ * products. Please do not reformat / re-style this code to make
+ * subsequent merges and contributions from the original source base eaiser.
+ *
+ *===----------------------------------------------------------------------===*/
+#ifndef _ITTNOTIFY_TYPES_H_
+#define _ITTNOTIFY_TYPES_H_
+
+typedef enum ___itt_group_id
+{
+ __itt_group_none = 0,
+ __itt_group_legacy = 1<<0,
+ __itt_group_control = 1<<1,
+ __itt_group_thread = 1<<2,
+ __itt_group_mark = 1<<3,
+ __itt_group_sync = 1<<4,
+ __itt_group_fsync = 1<<5,
+ __itt_group_jit = 1<<6,
+ __itt_group_model = 1<<7,
+ __itt_group_splitter_min = 1<<7,
+ __itt_group_counter = 1<<8,
+ __itt_group_frame = 1<<9,
+ __itt_group_stitch = 1<<10,
+ __itt_group_heap = 1<<11,
+ __itt_group_splitter_max = 1<<12,
+ __itt_group_structure = 1<<12,
+ __itt_group_suppress = 1<<13,
+ __itt_group_all = -1
+} __itt_group_id;
+
+#pragma pack(push, 8)
+
+typedef struct ___itt_group_list
+{
+ __itt_group_id id;
+ const char* name;
+} __itt_group_list;
+
+#pragma pack(pop)
+
+#define ITT_GROUP_LIST(varname) \
+ static __itt_group_list varname[] = { \
+ { __itt_group_all, "all" }, \
+ { __itt_group_control, "control" }, \
+ { __itt_group_thread, "thread" }, \
+ { __itt_group_mark, "mark" }, \
+ { __itt_group_sync, "sync" }, \
+ { __itt_group_fsync, "fsync" }, \
+ { __itt_group_jit, "jit" }, \
+ { __itt_group_model, "model" }, \
+ { __itt_group_counter, "counter" }, \
+ { __itt_group_frame, "frame" }, \
+ { __itt_group_stitch, "stitch" }, \
+ { __itt_group_heap, "heap" }, \
+ { __itt_group_structure, "structure" }, \
+ { __itt_group_suppress, "suppress" }, \
+ { __itt_group_none, NULL } \
+ }
+
+#endif /* _ITTNOTIFY_TYPES_H_ */
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/IntelJITProfiling/jitprofiling.c b/contrib/llvm-project/llvm/lib/ExecutionEngine/IntelJITProfiling/jitprofiling.c
new file mode 100644
index 000000000000..50d64d70c98a
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/IntelJITProfiling/jitprofiling.c
@@ -0,0 +1,480 @@
+/*===-- jitprofiling.c - JIT (Just-In-Time) Profiling API----------*- C -*-===*
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===----------------------------------------------------------------------===*
+ *
+ * This file provides Intel(R) Performance Analyzer JIT (Just-In-Time)
+ * Profiling API implementation.
+ *
+ * NOTE: This file comes in a style different from the rest of LLVM
+ * source base since this is a piece of code shared from Intel(R)
+ * products. Please do not reformat / re-style this code to make
+ * subsequent merges and contributions from the original source base eaiser.
+ *
+ *===----------------------------------------------------------------------===*/
+#include "ittnotify_config.h"
+
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+#include <windows.h>
+#pragma optimize("", off)
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#include <dlfcn.h>
+#include <pthread.h>
+#include <stdint.h>
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#include <stdlib.h>
+
+#include "jitprofiling.h"
+
+static const char rcsid[] = "\n@(#) $Revision: 243501 $\n";
+
+#define DLL_ENVIRONMENT_VAR "VS_PROFILER"
+
+#ifndef NEW_DLL_ENVIRONMENT_VAR
+#if ITT_ARCH==ITT_ARCH_IA32
+#define NEW_DLL_ENVIRONMENT_VAR "INTEL_JIT_PROFILER32"
+#else
+#define NEW_DLL_ENVIRONMENT_VAR "INTEL_JIT_PROFILER64"
+#endif
+#endif /* NEW_DLL_ENVIRONMENT_VAR */
+
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+#define DEFAULT_DLLNAME "JitPI.dll"
+HINSTANCE m_libHandle = NULL;
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#define DEFAULT_DLLNAME "libJitPI.so"
+void* m_libHandle = NULL;
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+
+/* default location of JIT profiling agent on Android */
+#define ANDROID_JIT_AGENT_PATH "/data/intel/libittnotify.so"
+
+/* the function pointers */
+typedef unsigned int(*TPInitialize)(void);
+static TPInitialize FUNC_Initialize=NULL;
+
+typedef unsigned int(*TPNotify)(unsigned int, void*);
+static TPNotify FUNC_NotifyEvent=NULL;
+
+static iJIT_IsProfilingActiveFlags executionMode = iJIT_NOTHING_RUNNING;
+
+/* end collector dll part. */
+
+/* loadiJIT_Funcs() : this function is called just in the beginning
+ * and is responsible to load the functions from BistroJavaCollector.dll
+ * result:
+ * on success: the functions loads, iJIT_DLL_is_missing=0, return value = 1
+ * on failure: the functions are NULL, iJIT_DLL_is_missing=1, return value = 0
+ */
+static int loadiJIT_Funcs(void);
+
+/* global representing whether the BistroJavaCollector can't be loaded */
+static int iJIT_DLL_is_missing = 0;
+
+/* Virtual stack - the struct is used as a virtual stack for each thread.
+ * Every thread initializes with a stack of size INIT_TOP_STACK.
+ * Every method entry decreases from the current stack point,
+ * and when a thread stack reaches its top of stack (return from the global
+ * function), the top of stack and the current stack increase. Notice that
+ * when returning from a function the stack pointer is the address of
+ * the function return.
+*/
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+static DWORD threadLocalStorageHandle = 0;
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+static pthread_key_t threadLocalStorageHandle = (pthread_key_t)0;
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+
+#define INIT_TOP_Stack 10000
+
+typedef struct
+{
+ unsigned int TopStack;
+ unsigned int CurrentStack;
+} ThreadStack, *pThreadStack;
+
+/* end of virtual stack. */
+
+/*
+ * The function for reporting virtual-machine related events to VTune.
+ * Note: when reporting iJVM_EVENT_TYPE_ENTER_NIDS, there is no need to fill
+ * in the stack_id field in the iJIT_Method_NIDS structure, as VTune fills it.
+ * The return value in iJVM_EVENT_TYPE_ENTER_NIDS &&
+ * iJVM_EVENT_TYPE_LEAVE_NIDS events will be 0 in case of failure.
+ * in iJVM_EVENT_TYPE_METHOD_LOAD_FINISHED event
+ * it will be -1 if EventSpecificData == 0 otherwise it will be 0.
+*/
+
+ITT_EXTERN_C int JITAPI
+iJIT_NotifyEvent(iJIT_JVM_EVENT event_type, void *EventSpecificData)
+{
+ int ReturnValue;
+
+ /*
+ * This section is for debugging outside of VTune.
+ * It creates the environment variables that indicates call graph mode.
+ * If running outside of VTune remove the remark.
+ *
+ *
+ * static int firstTime = 1;
+ * char DoCallGraph[12] = "DoCallGraph";
+ * if (firstTime)
+ * {
+ * firstTime = 0;
+ * SetEnvironmentVariable( "BISTRO_COLLECTORS_DO_CALLGRAPH", DoCallGraph);
+ * }
+ *
+ * end of section.
+ */
+
+ /* initialization part - the functions have not been loaded yet. This part
+ * will load the functions, and check if we are in Call Graph mode.
+ * (for special treatment).
+ */
+ if (!FUNC_NotifyEvent)
+ {
+ if (iJIT_DLL_is_missing)
+ return 0;
+
+ /* load the Function from the DLL */
+ if (!loadiJIT_Funcs())
+ return 0;
+
+ /* Call Graph initialization. */
+ }
+
+ /* If the event is method entry/exit, check that in the current mode
+ * VTune is allowed to receive it
+ */
+ if ((event_type == iJVM_EVENT_TYPE_ENTER_NIDS ||
+ event_type == iJVM_EVENT_TYPE_LEAVE_NIDS) &&
+ (executionMode != iJIT_CALLGRAPH_ON))
+ {
+ return 0;
+ }
+ /* This section is performed when method enter event occurs.
+ * It updates the virtual stack, or creates it if this is the first
+ * method entry in the thread. The stack pointer is decreased.
+ */
+ if (event_type == iJVM_EVENT_TYPE_ENTER_NIDS)
+ {
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+ pThreadStack threadStack =
+ (pThreadStack)TlsGetValue (threadLocalStorageHandle);
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+ pThreadStack threadStack =
+ (pThreadStack)pthread_getspecific(threadLocalStorageHandle);
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+
+ /* check for use of reserved method IDs */
+ if ( ((piJIT_Method_NIDS) EventSpecificData)->method_id <= 999 )
+ return 0;
+
+ if (!threadStack)
+ {
+ /* initialize the stack. */
+ threadStack = (pThreadStack) calloc (sizeof(ThreadStack), 1);
+ threadStack->TopStack = INIT_TOP_Stack;
+ threadStack->CurrentStack = INIT_TOP_Stack;
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+ TlsSetValue(threadLocalStorageHandle,(void*)threadStack);
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+ pthread_setspecific(threadLocalStorageHandle,(void*)threadStack);
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+ }
+
+ /* decrease the stack. */
+ ((piJIT_Method_NIDS) EventSpecificData)->stack_id =
+ (threadStack->CurrentStack)--;
+ }
+
+ /* This section is performed when method leave event occurs
+ * It updates the virtual stack.
+ * Increases the stack pointer.
+ * If the stack pointer reached the top (left the global function)
+ * increase the pointer and the top pointer.
+ */
+ if (event_type == iJVM_EVENT_TYPE_LEAVE_NIDS)
+ {
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+ pThreadStack threadStack =
+ (pThreadStack)TlsGetValue (threadLocalStorageHandle);
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+ pThreadStack threadStack =
+ (pThreadStack)pthread_getspecific(threadLocalStorageHandle);
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+
+ /* check for use of reserved method IDs */
+ if ( ((piJIT_Method_NIDS) EventSpecificData)->method_id <= 999 )
+ return 0;
+
+ if (!threadStack)
+ {
+ /* Error: first report in this thread is method exit */
+ exit (1);
+ }
+
+ ((piJIT_Method_NIDS) EventSpecificData)->stack_id =
+ ++(threadStack->CurrentStack) + 1;
+
+ if (((piJIT_Method_NIDS) EventSpecificData)->stack_id
+ > threadStack->TopStack)
+ ((piJIT_Method_NIDS) EventSpecificData)->stack_id =
+ (unsigned int)-1;
+ }
+
+ if (event_type == iJVM_EVENT_TYPE_METHOD_LOAD_FINISHED)
+ {
+ /* check for use of reserved method IDs */
+ if ( ((piJIT_Method_Load) EventSpecificData)->method_id <= 999 )
+ return 0;
+ }
+
+ ReturnValue = (int)FUNC_NotifyEvent(event_type, EventSpecificData);
+
+ return ReturnValue;
+}
+
+/* The new mode call back routine */
+ITT_EXTERN_C void JITAPI
+iJIT_RegisterCallbackEx(void *userdata, iJIT_ModeChangedEx
+ NewModeCallBackFuncEx)
+{
+ /* is it already missing... or the load of functions from the DLL failed */
+ if (iJIT_DLL_is_missing || !loadiJIT_Funcs())
+ {
+ /* then do not bother with notifications */
+ NewModeCallBackFuncEx(userdata, iJIT_NO_NOTIFICATIONS);
+ /* Error: could not load JIT functions. */
+ return;
+ }
+ /* nothing to do with the callback */
+}
+
+/*
+ * This function allows the user to query in which mode, if at all,
+ *VTune is running
+ */
+ITT_EXTERN_C iJIT_IsProfilingActiveFlags JITAPI iJIT_IsProfilingActive(void)
+{
+ if (!iJIT_DLL_is_missing)
+ {
+ loadiJIT_Funcs();
+ }
+
+ return executionMode;
+}
+
+/* this function loads the collector dll (BistroJavaCollector)
+ * and the relevant functions.
+ * on success: all functions load, iJIT_DLL_is_missing = 0, return value = 1
+ * on failure: all functions are NULL, iJIT_DLL_is_missing = 1, return value = 0
+ */
+static int loadiJIT_Funcs(void)
+{
+ static int bDllWasLoaded = 0;
+ char *dllName = (char*)rcsid; /* !! Just to avoid unused code elimination */
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+ DWORD dNameLength = 0;
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+
+ if(bDllWasLoaded)
+ {
+ /* dll was already loaded, no need to do it for the second time */
+ return 1;
+ }
+
+ /* Assumes that the DLL will not be found */
+ iJIT_DLL_is_missing = 1;
+ FUNC_NotifyEvent = NULL;
+
+ if (m_libHandle)
+ {
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+ FreeLibrary(m_libHandle);
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+ dlclose(m_libHandle);
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+ m_libHandle = NULL;
+ }
+
+ /* Try to get the dll name from the environment */
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+ dNameLength = GetEnvironmentVariableA(NEW_DLL_ENVIRONMENT_VAR, NULL, 0);
+ if (dNameLength)
+ {
+ DWORD envret = 0;
+ dllName = (char*)malloc(sizeof(char) * (dNameLength + 1));
+ envret = GetEnvironmentVariableA(NEW_DLL_ENVIRONMENT_VAR,
+ dllName, dNameLength);
+ if (envret)
+ {
+ /* Try to load the dll from the PATH... */
+ m_libHandle = LoadLibraryExA(dllName,
+ NULL, LOAD_WITH_ALTERED_SEARCH_PATH);
+ }
+ free(dllName);
+ } else {
+ /* Try to use old VS_PROFILER variable */
+ dNameLength = GetEnvironmentVariableA(DLL_ENVIRONMENT_VAR, NULL, 0);
+ if (dNameLength)
+ {
+ DWORD envret = 0;
+ dllName = (char*)malloc(sizeof(char) * (dNameLength + 1));
+ envret = GetEnvironmentVariableA(DLL_ENVIRONMENT_VAR,
+ dllName, dNameLength);
+ if (envret)
+ {
+ /* Try to load the dll from the PATH... */
+ m_libHandle = LoadLibraryA(dllName);
+ }
+ free(dllName);
+ }
+ }
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+ dllName = getenv(NEW_DLL_ENVIRONMENT_VAR);
+ if (!dllName)
+ dllName = getenv(DLL_ENVIRONMENT_VAR);
+#ifdef ANDROID
+ if (!dllName)
+ dllName = ANDROID_JIT_AGENT_PATH;
+#endif
+ if (dllName)
+ {
+ /* Try to load the dll from the PATH... */
+ m_libHandle = dlopen(dllName, RTLD_LAZY);
+ }
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+
+ if (!m_libHandle)
+ {
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+ m_libHandle = LoadLibraryA(DEFAULT_DLLNAME);
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+ m_libHandle = dlopen(DEFAULT_DLLNAME, RTLD_LAZY);
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+ }
+
+ /* if the dll wasn't loaded - exit. */
+ if (!m_libHandle)
+ {
+ iJIT_DLL_is_missing = 1; /* don't try to initialize
+ * JIT agent the second time
+ */
+ return 0;
+ }
+
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+ FUNC_NotifyEvent = (TPNotify)GetProcAddress(m_libHandle, "NotifyEvent");
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+ FUNC_NotifyEvent = (TPNotify)(intptr_t)dlsym(m_libHandle, "NotifyEvent");
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+ if (!FUNC_NotifyEvent)
+ {
+ FUNC_Initialize = NULL;
+ return 0;
+ }
+
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+ FUNC_Initialize = (TPInitialize)GetProcAddress(m_libHandle, "Initialize");
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+ FUNC_Initialize = (TPInitialize)(intptr_t)dlsym(m_libHandle, "Initialize");
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+ if (!FUNC_Initialize)
+ {
+ FUNC_NotifyEvent = NULL;
+ return 0;
+ }
+
+ executionMode = (iJIT_IsProfilingActiveFlags)FUNC_Initialize();
+
+ bDllWasLoaded = 1;
+ iJIT_DLL_is_missing = 0; /* DLL is ok. */
+
+ /*
+ * Call Graph mode: init the thread local storage
+ * (need to store the virtual stack there).
+ */
+ if ( executionMode == iJIT_CALLGRAPH_ON )
+ {
+ /* Allocate a thread local storage slot for the thread "stack" */
+ if (!threadLocalStorageHandle)
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+ threadLocalStorageHandle = TlsAlloc();
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+ pthread_key_create(&threadLocalStorageHandle, NULL);
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+ }
+
+ return 1;
+}
+
+/*
+ * This function should be called by the user whenever a thread ends,
+ * to free the thread "virtual stack" storage
+ */
+ITT_EXTERN_C void JITAPI FinalizeThread(void)
+{
+ if (threadLocalStorageHandle)
+ {
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+ pThreadStack threadStack =
+ (pThreadStack)TlsGetValue (threadLocalStorageHandle);
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+ pThreadStack threadStack =
+ (pThreadStack)pthread_getspecific(threadLocalStorageHandle);
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+ if (threadStack)
+ {
+ free (threadStack);
+ threadStack = NULL;
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+ TlsSetValue (threadLocalStorageHandle, threadStack);
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+ pthread_setspecific(threadLocalStorageHandle, threadStack);
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+ }
+ }
+}
+
+/*
+ * This function should be called by the user when the process ends,
+ * to free the local storage index
+*/
+ITT_EXTERN_C void JITAPI FinalizeProcess(void)
+{
+ if (m_libHandle)
+ {
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+ FreeLibrary(m_libHandle);
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+ dlclose(m_libHandle);
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+ m_libHandle = NULL;
+ }
+
+ if (threadLocalStorageHandle)
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+ TlsFree (threadLocalStorageHandle);
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+ pthread_key_delete(threadLocalStorageHandle);
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+}
+
+/*
+ * This function should be called by the user for any method once.
+ * The function will return a unique method ID, the user should maintain
+ * the ID for each method
+ */
+ITT_EXTERN_C unsigned int JITAPI iJIT_GetNewMethodID(void)
+{
+ static unsigned int methodID = 0x100000;
+
+ if (methodID == 0)
+ return 0; /* ERROR : this is not a valid value */
+
+ return methodID++;
+}
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/IntelJITProfiling/jitprofiling.h b/contrib/llvm-project/llvm/lib/ExecutionEngine/IntelJITProfiling/jitprofiling.h
new file mode 100644
index 000000000000..ba627b430ff1
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/IntelJITProfiling/jitprofiling.h
@@ -0,0 +1,258 @@
+/*===-- jitprofiling.h - JIT Profiling API-------------------------*- C -*-===*
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===----------------------------------------------------------------------===*
+ *
+ * This file provides Intel(R) Performance Analyzer JIT (Just-In-Time)
+ * Profiling API declaration.
+ *
+ * NOTE: This file comes in a style different from the rest of LLVM
+ * source base since this is a piece of code shared from Intel(R)
+ * products. Please do not reformat / re-style this code to make
+ * subsequent merges and contributions from the original source base eaiser.
+ *
+ *===----------------------------------------------------------------------===*/
+#ifndef __JITPROFILING_H__
+#define __JITPROFILING_H__
+
+/*
+ * Various constants used by functions
+ */
+
+/* event notification */
+typedef enum iJIT_jvm_event
+{
+
+ /* shutdown */
+
+ /*
+ * Program exiting EventSpecificData NA
+ */
+ iJVM_EVENT_TYPE_SHUTDOWN = 2,
+
+ /* JIT profiling */
+
+ /*
+ * issued after method code jitted into memory but before code is executed
+ * EventSpecificData is an iJIT_Method_Load
+ */
+ iJVM_EVENT_TYPE_METHOD_LOAD_FINISHED=13,
+
+ /* issued before unload. Method code will no longer be executed, but code
+ * and info are still in memory. The VTune profiler may capture method
+ * code only at this point EventSpecificData is iJIT_Method_Id
+ */
+ iJVM_EVENT_TYPE_METHOD_UNLOAD_START,
+
+ /* Method Profiling */
+
+ /* method name, Id and stack is supplied
+ * issued when a method is about to be entered EventSpecificData is
+ * iJIT_Method_NIDS
+ */
+ iJVM_EVENT_TYPE_ENTER_NIDS = 19,
+
+ /* method name, Id and stack is supplied
+ * issued when a method is about to be left EventSpecificData is
+ * iJIT_Method_NIDS
+ */
+ iJVM_EVENT_TYPE_LEAVE_NIDS
+} iJIT_JVM_EVENT;
+
+typedef enum _iJIT_ModeFlags
+{
+ /* No need to Notify VTune, since VTune is not running */
+ iJIT_NO_NOTIFICATIONS = 0x0000,
+
+ /* when turned on the jit must call
+ * iJIT_NotifyEvent
+ * (
+ * iJVM_EVENT_TYPE_METHOD_LOAD_FINISHED,
+ * )
+ * for all the method already jitted
+ */
+ iJIT_BE_NOTIFY_ON_LOAD = 0x0001,
+
+ /* when turned on the jit must call
+ * iJIT_NotifyEvent
+ * (
+ * iJVM_EVENT_TYPE_METHOD_UNLOAD_FINISHED,
+ * ) for all the method that are unloaded
+ */
+ iJIT_BE_NOTIFY_ON_UNLOAD = 0x0002,
+
+ /* when turned on the jit must instrument all
+ * the currently jited code with calls on
+ * method entries
+ */
+ iJIT_BE_NOTIFY_ON_METHOD_ENTRY = 0x0004,
+
+ /* when turned on the jit must instrument all
+ * the currently jited code with calls
+ * on method exit
+ */
+ iJIT_BE_NOTIFY_ON_METHOD_EXIT = 0x0008
+
+} iJIT_ModeFlags;
+
+
+ /* Flags used by iJIT_IsProfilingActive() */
+typedef enum _iJIT_IsProfilingActiveFlags
+{
+ /* No profiler is running. Currently not used */
+ iJIT_NOTHING_RUNNING = 0x0000,
+
+ /* Sampling is running. This is the default value
+ * returned by iJIT_IsProfilingActive()
+ */
+ iJIT_SAMPLING_ON = 0x0001,
+
+ /* Call Graph is running */
+ iJIT_CALLGRAPH_ON = 0x0002
+
+} iJIT_IsProfilingActiveFlags;
+
+/* Enumerator for the environment of methods*/
+typedef enum _iJDEnvironmentType
+{
+ iJDE_JittingAPI = 2
+} iJDEnvironmentType;
+
+/**********************************
+ * Data structures for the events *
+ **********************************/
+
+/* structure for the events:
+ * iJVM_EVENT_TYPE_METHOD_UNLOAD_START
+ */
+
+typedef struct _iJIT_Method_Id
+{
+ /* Id of the method (same as the one passed in
+ * the iJIT_Method_Load struct
+ */
+ unsigned int method_id;
+
+} *piJIT_Method_Id, iJIT_Method_Id;
+
+
+/* structure for the events:
+ * iJVM_EVENT_TYPE_ENTER_NIDS,
+ * iJVM_EVENT_TYPE_LEAVE_NIDS,
+ * iJVM_EVENT_TYPE_EXCEPTION_OCCURRED_NIDS
+ */
+
+typedef struct _iJIT_Method_NIDS
+{
+ /* unique method ID */
+ unsigned int method_id;
+
+ /* NOTE: no need to fill this field, it's filled by VTune */
+ unsigned int stack_id;
+
+ /* method name (just the method, without the class) */
+ char* method_name;
+} *piJIT_Method_NIDS, iJIT_Method_NIDS;
+
+/* structures for the events:
+ * iJVM_EVENT_TYPE_METHOD_LOAD_FINISHED
+ */
+
+typedef struct _LineNumberInfo
+{
+ /* x86 Offset from the beginning of the method*/
+ unsigned int Offset;
+
+ /* source line number from the beginning of the source file */
+ unsigned int LineNumber;
+
+} *pLineNumberInfo, LineNumberInfo;
+
+typedef struct _iJIT_Method_Load
+{
+ /* unique method ID - can be any unique value, (except 0 - 999) */
+ unsigned int method_id;
+
+ /* method name (can be with or without the class and signature, in any case
+ * the class name will be added to it)
+ */
+ char* method_name;
+
+ /* virtual address of that method - This determines the method range for the
+ * iJVM_EVENT_TYPE_ENTER/LEAVE_METHOD_ADDR events
+ */
+ void* method_load_address;
+
+ /* Size in memory - Must be exact */
+ unsigned int method_size;
+
+ /* Line Table size in number of entries - Zero if none */
+ unsigned int line_number_size;
+
+ /* Pointer to the beginning of the line numbers info array */
+ pLineNumberInfo line_number_table;
+
+ /* unique class ID */
+ unsigned int class_id;
+
+ /* class file name */
+ char* class_file_name;
+
+ /* source file name */
+ char* source_file_name;
+
+ /* bits supplied by the user for saving in the JIT file */
+ void* user_data;
+
+ /* the size of the user data buffer */
+ unsigned int user_data_size;
+
+ /* NOTE: no need to fill this field, it's filled by VTune */
+ iJDEnvironmentType env;
+
+} *piJIT_Method_Load, iJIT_Method_Load;
+
+/* API Functions */
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifndef CDECL
+# if defined WIN32 || defined _WIN32
+# define CDECL __cdecl
+# else /* defined WIN32 || defined _WIN32 */
+# if defined _M_X64 || defined _M_AMD64 || defined __x86_64__
+# define CDECL /* not actual on x86_64 platform */
+# else /* _M_X64 || _M_AMD64 || __x86_64__ */
+# define CDECL __attribute__ ((cdecl))
+# endif /* _M_X64 || _M_AMD64 || __x86_64__ */
+# endif /* defined WIN32 || defined _WIN32 */
+#endif /* CDECL */
+
+#define JITAPI CDECL
+
+/* called when the settings are changed with new settings */
+typedef void (*iJIT_ModeChangedEx)(void *UserData, iJIT_ModeFlags Flags);
+
+int JITAPI iJIT_NotifyEvent(iJIT_JVM_EVENT event_type, void *EventSpecificData);
+
+/* The new mode call back routine */
+void JITAPI iJIT_RegisterCallbackEx(void *userdata,
+ iJIT_ModeChangedEx NewModeCallBackFuncEx);
+
+iJIT_IsProfilingActiveFlags JITAPI iJIT_IsProfilingActive(void);
+
+void JITAPI FinalizeThread(void);
+
+void JITAPI FinalizeProcess(void);
+
+unsigned int JITAPI iJIT_GetNewMethodID(void);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __JITPROFILING_H__ */
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/Interpreter/Execution.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/Interpreter/Execution.cpp
new file mode 100644
index 000000000000..2d69edef878e
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/Interpreter/Execution.cpp
@@ -0,0 +1,2086 @@
+//===-- Execution.cpp - Implement code to simulate the program ------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the actual instruction interpreter.
+//
+//===----------------------------------------------------------------------===//
+
+#include "Interpreter.h"
+#include "llvm/ADT/APInt.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/CodeGen/IntrinsicLowering.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/GetElementPtrTypeIterator.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/MathExtras.h"
+#include "llvm/Support/raw_ostream.h"
+#include <algorithm>
+#include <cmath>
+using namespace llvm;
+
+#define DEBUG_TYPE "interpreter"
+
+STATISTIC(NumDynamicInsts, "Number of dynamic instructions executed");
+
+static cl::opt<bool> PrintVolatile("interpreter-print-volatile", cl::Hidden,
+ cl::desc("make the interpreter print every volatile load and store"));
+
+//===----------------------------------------------------------------------===//
+// Various Helper Functions
+//===----------------------------------------------------------------------===//
+
+static void SetValue(Value *V, GenericValue Val, ExecutionContext &SF) {
+ SF.Values[V] = Val;
+}
+
+//===----------------------------------------------------------------------===//
+// Unary Instruction Implementations
+//===----------------------------------------------------------------------===//
+
+static void executeFNegInst(GenericValue &Dest, GenericValue Src, Type *Ty) {
+ switch (Ty->getTypeID()) {
+ case Type::FloatTyID:
+ Dest.FloatVal = -Src.FloatVal;
+ break;
+ case Type::DoubleTyID:
+ Dest.DoubleVal = -Src.DoubleVal;
+ break;
+ default:
+ llvm_unreachable("Unhandled type for FNeg instruction");
+ }
+}
+
+void Interpreter::visitUnaryOperator(UnaryOperator &I) {
+ ExecutionContext &SF = ECStack.back();
+ Type *Ty = I.getOperand(0)->getType();
+ GenericValue Src = getOperandValue(I.getOperand(0), SF);
+ GenericValue R; // Result
+
+ // First process vector operation
+ if (Ty->isVectorTy()) {
+ R.AggregateVal.resize(Src.AggregateVal.size());
+
+ switch(I.getOpcode()) {
+ default:
+ llvm_unreachable("Don't know how to handle this unary operator");
+ break;
+ case Instruction::FNeg:
+ if (cast<VectorType>(Ty)->getElementType()->isFloatTy()) {
+ for (unsigned i = 0; i < R.AggregateVal.size(); ++i)
+ R.AggregateVal[i].FloatVal = -Src.AggregateVal[i].FloatVal;
+ } else if (cast<VectorType>(Ty)->getElementType()->isDoubleTy()) {
+ for (unsigned i = 0; i < R.AggregateVal.size(); ++i)
+ R.AggregateVal[i].DoubleVal = -Src.AggregateVal[i].DoubleVal;
+ } else {
+ llvm_unreachable("Unhandled type for FNeg instruction");
+ }
+ break;
+ }
+ } else {
+ switch (I.getOpcode()) {
+ default:
+ llvm_unreachable("Don't know how to handle this unary operator");
+ break;
+ case Instruction::FNeg: executeFNegInst(R, Src, Ty); break;
+ }
+ }
+ SetValue(&I, R, SF);
+}
+
+//===----------------------------------------------------------------------===//
+// Binary Instruction Implementations
+//===----------------------------------------------------------------------===//
+
+#define IMPLEMENT_BINARY_OPERATOR(OP, TY) \
+ case Type::TY##TyID: \
+ Dest.TY##Val = Src1.TY##Val OP Src2.TY##Val; \
+ break
+
+static void executeFAddInst(GenericValue &Dest, GenericValue Src1,
+ GenericValue Src2, Type *Ty) {
+ switch (Ty->getTypeID()) {
+ IMPLEMENT_BINARY_OPERATOR(+, Float);
+ IMPLEMENT_BINARY_OPERATOR(+, Double);
+ default:
+ dbgs() << "Unhandled type for FAdd instruction: " << *Ty << "\n";
+ llvm_unreachable(nullptr);
+ }
+}
+
+static void executeFSubInst(GenericValue &Dest, GenericValue Src1,
+ GenericValue Src2, Type *Ty) {
+ switch (Ty->getTypeID()) {
+ IMPLEMENT_BINARY_OPERATOR(-, Float);
+ IMPLEMENT_BINARY_OPERATOR(-, Double);
+ default:
+ dbgs() << "Unhandled type for FSub instruction: " << *Ty << "\n";
+ llvm_unreachable(nullptr);
+ }
+}
+
+static void executeFMulInst(GenericValue &Dest, GenericValue Src1,
+ GenericValue Src2, Type *Ty) {
+ switch (Ty->getTypeID()) {
+ IMPLEMENT_BINARY_OPERATOR(*, Float);
+ IMPLEMENT_BINARY_OPERATOR(*, Double);
+ default:
+ dbgs() << "Unhandled type for FMul instruction: " << *Ty << "\n";
+ llvm_unreachable(nullptr);
+ }
+}
+
+static void executeFDivInst(GenericValue &Dest, GenericValue Src1,
+ GenericValue Src2, Type *Ty) {
+ switch (Ty->getTypeID()) {
+ IMPLEMENT_BINARY_OPERATOR(/, Float);
+ IMPLEMENT_BINARY_OPERATOR(/, Double);
+ default:
+ dbgs() << "Unhandled type for FDiv instruction: " << *Ty << "\n";
+ llvm_unreachable(nullptr);
+ }
+}
+
+static void executeFRemInst(GenericValue &Dest, GenericValue Src1,
+ GenericValue Src2, Type *Ty) {
+ switch (Ty->getTypeID()) {
+ case Type::FloatTyID:
+ Dest.FloatVal = fmod(Src1.FloatVal, Src2.FloatVal);
+ break;
+ case Type::DoubleTyID:
+ Dest.DoubleVal = fmod(Src1.DoubleVal, Src2.DoubleVal);
+ break;
+ default:
+ dbgs() << "Unhandled type for Rem instruction: " << *Ty << "\n";
+ llvm_unreachable(nullptr);
+ }
+}
+
+#define IMPLEMENT_INTEGER_ICMP(OP, TY) \
+ case Type::IntegerTyID: \
+ Dest.IntVal = APInt(1,Src1.IntVal.OP(Src2.IntVal)); \
+ break;
+
+#define IMPLEMENT_VECTOR_INTEGER_ICMP(OP, TY) \
+ case Type::FixedVectorTyID: \
+ case Type::ScalableVectorTyID: { \
+ assert(Src1.AggregateVal.size() == Src2.AggregateVal.size()); \
+ Dest.AggregateVal.resize(Src1.AggregateVal.size()); \
+ for (uint32_t _i = 0; _i < Src1.AggregateVal.size(); _i++) \
+ Dest.AggregateVal[_i].IntVal = APInt( \
+ 1, Src1.AggregateVal[_i].IntVal.OP(Src2.AggregateVal[_i].IntVal)); \
+ } break;
+
+// Handle pointers specially because they must be compared with only as much
+// width as the host has. We _do not_ want to be comparing 64 bit values when
+// running on a 32-bit target, otherwise the upper 32 bits might mess up
+// comparisons if they contain garbage.
+#define IMPLEMENT_POINTER_ICMP(OP) \
+ case Type::PointerTyID: \
+ Dest.IntVal = APInt(1,(void*)(intptr_t)Src1.PointerVal OP \
+ (void*)(intptr_t)Src2.PointerVal); \
+ break;
+
+static GenericValue executeICMP_EQ(GenericValue Src1, GenericValue Src2,
+ Type *Ty) {
+ GenericValue Dest;
+ switch (Ty->getTypeID()) {
+ IMPLEMENT_INTEGER_ICMP(eq,Ty);
+ IMPLEMENT_VECTOR_INTEGER_ICMP(eq,Ty);
+ IMPLEMENT_POINTER_ICMP(==);
+ default:
+ dbgs() << "Unhandled type for ICMP_EQ predicate: " << *Ty << "\n";
+ llvm_unreachable(nullptr);
+ }
+ return Dest;
+}
+
+static GenericValue executeICMP_NE(GenericValue Src1, GenericValue Src2,
+ Type *Ty) {
+ GenericValue Dest;
+ switch (Ty->getTypeID()) {
+ IMPLEMENT_INTEGER_ICMP(ne,Ty);
+ IMPLEMENT_VECTOR_INTEGER_ICMP(ne,Ty);
+ IMPLEMENT_POINTER_ICMP(!=);
+ default:
+ dbgs() << "Unhandled type for ICMP_NE predicate: " << *Ty << "\n";
+ llvm_unreachable(nullptr);
+ }
+ return Dest;
+}
+
+static GenericValue executeICMP_ULT(GenericValue Src1, GenericValue Src2,
+ Type *Ty) {
+ GenericValue Dest;
+ switch (Ty->getTypeID()) {
+ IMPLEMENT_INTEGER_ICMP(ult,Ty);
+ IMPLEMENT_VECTOR_INTEGER_ICMP(ult,Ty);
+ IMPLEMENT_POINTER_ICMP(<);
+ default:
+ dbgs() << "Unhandled type for ICMP_ULT predicate: " << *Ty << "\n";
+ llvm_unreachable(nullptr);
+ }
+ return Dest;
+}
+
+static GenericValue executeICMP_SLT(GenericValue Src1, GenericValue Src2,
+ Type *Ty) {
+ GenericValue Dest;
+ switch (Ty->getTypeID()) {
+ IMPLEMENT_INTEGER_ICMP(slt,Ty);
+ IMPLEMENT_VECTOR_INTEGER_ICMP(slt,Ty);
+ IMPLEMENT_POINTER_ICMP(<);
+ default:
+ dbgs() << "Unhandled type for ICMP_SLT predicate: " << *Ty << "\n";
+ llvm_unreachable(nullptr);
+ }
+ return Dest;
+}
+
+static GenericValue executeICMP_UGT(GenericValue Src1, GenericValue Src2,
+ Type *Ty) {
+ GenericValue Dest;
+ switch (Ty->getTypeID()) {
+ IMPLEMENT_INTEGER_ICMP(ugt,Ty);
+ IMPLEMENT_VECTOR_INTEGER_ICMP(ugt,Ty);
+ IMPLEMENT_POINTER_ICMP(>);
+ default:
+ dbgs() << "Unhandled type for ICMP_UGT predicate: " << *Ty << "\n";
+ llvm_unreachable(nullptr);
+ }
+ return Dest;
+}
+
+static GenericValue executeICMP_SGT(GenericValue Src1, GenericValue Src2,
+ Type *Ty) {
+ GenericValue Dest;
+ switch (Ty->getTypeID()) {
+ IMPLEMENT_INTEGER_ICMP(sgt,Ty);
+ IMPLEMENT_VECTOR_INTEGER_ICMP(sgt,Ty);
+ IMPLEMENT_POINTER_ICMP(>);
+ default:
+ dbgs() << "Unhandled type for ICMP_SGT predicate: " << *Ty << "\n";
+ llvm_unreachable(nullptr);
+ }
+ return Dest;
+}
+
+static GenericValue executeICMP_ULE(GenericValue Src1, GenericValue Src2,
+ Type *Ty) {
+ GenericValue Dest;
+ switch (Ty->getTypeID()) {
+ IMPLEMENT_INTEGER_ICMP(ule,Ty);
+ IMPLEMENT_VECTOR_INTEGER_ICMP(ule,Ty);
+ IMPLEMENT_POINTER_ICMP(<=);
+ default:
+ dbgs() << "Unhandled type for ICMP_ULE predicate: " << *Ty << "\n";
+ llvm_unreachable(nullptr);
+ }
+ return Dest;
+}
+
+static GenericValue executeICMP_SLE(GenericValue Src1, GenericValue Src2,
+ Type *Ty) {
+ GenericValue Dest;
+ switch (Ty->getTypeID()) {
+ IMPLEMENT_INTEGER_ICMP(sle,Ty);
+ IMPLEMENT_VECTOR_INTEGER_ICMP(sle,Ty);
+ IMPLEMENT_POINTER_ICMP(<=);
+ default:
+ dbgs() << "Unhandled type for ICMP_SLE predicate: " << *Ty << "\n";
+ llvm_unreachable(nullptr);
+ }
+ return Dest;
+}
+
+static GenericValue executeICMP_UGE(GenericValue Src1, GenericValue Src2,
+ Type *Ty) {
+ GenericValue Dest;
+ switch (Ty->getTypeID()) {
+ IMPLEMENT_INTEGER_ICMP(uge,Ty);
+ IMPLEMENT_VECTOR_INTEGER_ICMP(uge,Ty);
+ IMPLEMENT_POINTER_ICMP(>=);
+ default:
+ dbgs() << "Unhandled type for ICMP_UGE predicate: " << *Ty << "\n";
+ llvm_unreachable(nullptr);
+ }
+ return Dest;
+}
+
+static GenericValue executeICMP_SGE(GenericValue Src1, GenericValue Src2,
+ Type *Ty) {
+ GenericValue Dest;
+ switch (Ty->getTypeID()) {
+ IMPLEMENT_INTEGER_ICMP(sge,Ty);
+ IMPLEMENT_VECTOR_INTEGER_ICMP(sge,Ty);
+ IMPLEMENT_POINTER_ICMP(>=);
+ default:
+ dbgs() << "Unhandled type for ICMP_SGE predicate: " << *Ty << "\n";
+ llvm_unreachable(nullptr);
+ }
+ return Dest;
+}
+
+void Interpreter::visitICmpInst(ICmpInst &I) {
+ ExecutionContext &SF = ECStack.back();
+ Type *Ty = I.getOperand(0)->getType();
+ GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
+ GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
+ GenericValue R; // Result
+
+ switch (I.getPredicate()) {
+ case ICmpInst::ICMP_EQ: R = executeICMP_EQ(Src1, Src2, Ty); break;
+ case ICmpInst::ICMP_NE: R = executeICMP_NE(Src1, Src2, Ty); break;
+ case ICmpInst::ICMP_ULT: R = executeICMP_ULT(Src1, Src2, Ty); break;
+ case ICmpInst::ICMP_SLT: R = executeICMP_SLT(Src1, Src2, Ty); break;
+ case ICmpInst::ICMP_UGT: R = executeICMP_UGT(Src1, Src2, Ty); break;
+ case ICmpInst::ICMP_SGT: R = executeICMP_SGT(Src1, Src2, Ty); break;
+ case ICmpInst::ICMP_ULE: R = executeICMP_ULE(Src1, Src2, Ty); break;
+ case ICmpInst::ICMP_SLE: R = executeICMP_SLE(Src1, Src2, Ty); break;
+ case ICmpInst::ICMP_UGE: R = executeICMP_UGE(Src1, Src2, Ty); break;
+ case ICmpInst::ICMP_SGE: R = executeICMP_SGE(Src1, Src2, Ty); break;
+ default:
+ dbgs() << "Don't know how to handle this ICmp predicate!\n-->" << I;
+ llvm_unreachable(nullptr);
+ }
+
+ SetValue(&I, R, SF);
+}
+
+#define IMPLEMENT_FCMP(OP, TY) \
+ case Type::TY##TyID: \
+ Dest.IntVal = APInt(1,Src1.TY##Val OP Src2.TY##Val); \
+ break
+
+#define IMPLEMENT_VECTOR_FCMP_T(OP, TY) \
+ assert(Src1.AggregateVal.size() == Src2.AggregateVal.size()); \
+ Dest.AggregateVal.resize( Src1.AggregateVal.size() ); \
+ for( uint32_t _i=0;_i<Src1.AggregateVal.size();_i++) \
+ Dest.AggregateVal[_i].IntVal = APInt(1, \
+ Src1.AggregateVal[_i].TY##Val OP Src2.AggregateVal[_i].TY##Val);\
+ break;
+
+#define IMPLEMENT_VECTOR_FCMP(OP) \
+ case Type::FixedVectorTyID: \
+ case Type::ScalableVectorTyID: \
+ if (cast<VectorType>(Ty)->getElementType()->isFloatTy()) { \
+ IMPLEMENT_VECTOR_FCMP_T(OP, Float); \
+ } else { \
+ IMPLEMENT_VECTOR_FCMP_T(OP, Double); \
+ }
+
+static GenericValue executeFCMP_OEQ(GenericValue Src1, GenericValue Src2,
+ Type *Ty) {
+ GenericValue Dest;
+ switch (Ty->getTypeID()) {
+ IMPLEMENT_FCMP(==, Float);
+ IMPLEMENT_FCMP(==, Double);
+ IMPLEMENT_VECTOR_FCMP(==);
+ default:
+ dbgs() << "Unhandled type for FCmp EQ instruction: " << *Ty << "\n";
+ llvm_unreachable(nullptr);
+ }
+ return Dest;
+}
+
+#define IMPLEMENT_SCALAR_NANS(TY, X,Y) \
+ if (TY->isFloatTy()) { \
+ if (X.FloatVal != X.FloatVal || Y.FloatVal != Y.FloatVal) { \
+ Dest.IntVal = APInt(1,false); \
+ return Dest; \
+ } \
+ } else { \
+ if (X.DoubleVal != X.DoubleVal || Y.DoubleVal != Y.DoubleVal) { \
+ Dest.IntVal = APInt(1,false); \
+ return Dest; \
+ } \
+ }
+
+#define MASK_VECTOR_NANS_T(X,Y, TZ, FLAG) \
+ assert(X.AggregateVal.size() == Y.AggregateVal.size()); \
+ Dest.AggregateVal.resize( X.AggregateVal.size() ); \
+ for( uint32_t _i=0;_i<X.AggregateVal.size();_i++) { \
+ if (X.AggregateVal[_i].TZ##Val != X.AggregateVal[_i].TZ##Val || \
+ Y.AggregateVal[_i].TZ##Val != Y.AggregateVal[_i].TZ##Val) \
+ Dest.AggregateVal[_i].IntVal = APInt(1,FLAG); \
+ else { \
+ Dest.AggregateVal[_i].IntVal = APInt(1,!FLAG); \
+ } \
+ }
+
+#define MASK_VECTOR_NANS(TY, X,Y, FLAG) \
+ if (TY->isVectorTy()) { \
+ if (cast<VectorType>(TY)->getElementType()->isFloatTy()) { \
+ MASK_VECTOR_NANS_T(X, Y, Float, FLAG) \
+ } else { \
+ MASK_VECTOR_NANS_T(X, Y, Double, FLAG) \
+ } \
+ } \
+
+
+
+static GenericValue executeFCMP_ONE(GenericValue Src1, GenericValue Src2,
+ Type *Ty)
+{
+ GenericValue Dest;
+ // if input is scalar value and Src1 or Src2 is NaN return false
+ IMPLEMENT_SCALAR_NANS(Ty, Src1, Src2)
+ // if vector input detect NaNs and fill mask
+ MASK_VECTOR_NANS(Ty, Src1, Src2, false)
+ GenericValue DestMask = Dest;
+ switch (Ty->getTypeID()) {
+ IMPLEMENT_FCMP(!=, Float);
+ IMPLEMENT_FCMP(!=, Double);
+ IMPLEMENT_VECTOR_FCMP(!=);
+ default:
+ dbgs() << "Unhandled type for FCmp NE instruction: " << *Ty << "\n";
+ llvm_unreachable(nullptr);
+ }
+ // in vector case mask out NaN elements
+ if (Ty->isVectorTy())
+ for( size_t _i=0; _i<Src1.AggregateVal.size(); _i++)
+ if (DestMask.AggregateVal[_i].IntVal == false)
+ Dest.AggregateVal[_i].IntVal = APInt(1,false);
+
+ return Dest;
+}
+
+static GenericValue executeFCMP_OLE(GenericValue Src1, GenericValue Src2,
+ Type *Ty) {
+ GenericValue Dest;
+ switch (Ty->getTypeID()) {
+ IMPLEMENT_FCMP(<=, Float);
+ IMPLEMENT_FCMP(<=, Double);
+ IMPLEMENT_VECTOR_FCMP(<=);
+ default:
+ dbgs() << "Unhandled type for FCmp LE instruction: " << *Ty << "\n";
+ llvm_unreachable(nullptr);
+ }
+ return Dest;
+}
+
+static GenericValue executeFCMP_OGE(GenericValue Src1, GenericValue Src2,
+ Type *Ty) {
+ GenericValue Dest;
+ switch (Ty->getTypeID()) {
+ IMPLEMENT_FCMP(>=, Float);
+ IMPLEMENT_FCMP(>=, Double);
+ IMPLEMENT_VECTOR_FCMP(>=);
+ default:
+ dbgs() << "Unhandled type for FCmp GE instruction: " << *Ty << "\n";
+ llvm_unreachable(nullptr);
+ }
+ return Dest;
+}
+
+static GenericValue executeFCMP_OLT(GenericValue Src1, GenericValue Src2,
+ Type *Ty) {
+ GenericValue Dest;
+ switch (Ty->getTypeID()) {
+ IMPLEMENT_FCMP(<, Float);
+ IMPLEMENT_FCMP(<, Double);
+ IMPLEMENT_VECTOR_FCMP(<);
+ default:
+ dbgs() << "Unhandled type for FCmp LT instruction: " << *Ty << "\n";
+ llvm_unreachable(nullptr);
+ }
+ return Dest;
+}
+
+static GenericValue executeFCMP_OGT(GenericValue Src1, GenericValue Src2,
+ Type *Ty) {
+ GenericValue Dest;
+ switch (Ty->getTypeID()) {
+ IMPLEMENT_FCMP(>, Float);
+ IMPLEMENT_FCMP(>, Double);
+ IMPLEMENT_VECTOR_FCMP(>);
+ default:
+ dbgs() << "Unhandled type for FCmp GT instruction: " << *Ty << "\n";
+ llvm_unreachable(nullptr);
+ }
+ return Dest;
+}
+
+#define IMPLEMENT_UNORDERED(TY, X,Y) \
+ if (TY->isFloatTy()) { \
+ if (X.FloatVal != X.FloatVal || Y.FloatVal != Y.FloatVal) { \
+ Dest.IntVal = APInt(1,true); \
+ return Dest; \
+ } \
+ } else if (X.DoubleVal != X.DoubleVal || Y.DoubleVal != Y.DoubleVal) { \
+ Dest.IntVal = APInt(1,true); \
+ return Dest; \
+ }
+
+#define IMPLEMENT_VECTOR_UNORDERED(TY, X, Y, FUNC) \
+ if (TY->isVectorTy()) { \
+ GenericValue DestMask = Dest; \
+ Dest = FUNC(Src1, Src2, Ty); \
+ for (size_t _i = 0; _i < Src1.AggregateVal.size(); _i++) \
+ if (DestMask.AggregateVal[_i].IntVal == true) \
+ Dest.AggregateVal[_i].IntVal = APInt(1, true); \
+ return Dest; \
+ }
+
+static GenericValue executeFCMP_UEQ(GenericValue Src1, GenericValue Src2,
+ Type *Ty) {
+ GenericValue Dest;
+ IMPLEMENT_UNORDERED(Ty, Src1, Src2)
+ MASK_VECTOR_NANS(Ty, Src1, Src2, true)
+ IMPLEMENT_VECTOR_UNORDERED(Ty, Src1, Src2, executeFCMP_OEQ)
+ return executeFCMP_OEQ(Src1, Src2, Ty);
+
+}
+
+static GenericValue executeFCMP_UNE(GenericValue Src1, GenericValue Src2,
+ Type *Ty) {
+ GenericValue Dest;
+ IMPLEMENT_UNORDERED(Ty, Src1, Src2)
+ MASK_VECTOR_NANS(Ty, Src1, Src2, true)
+ IMPLEMENT_VECTOR_UNORDERED(Ty, Src1, Src2, executeFCMP_ONE)
+ return executeFCMP_ONE(Src1, Src2, Ty);
+}
+
+static GenericValue executeFCMP_ULE(GenericValue Src1, GenericValue Src2,
+ Type *Ty) {
+ GenericValue Dest;
+ IMPLEMENT_UNORDERED(Ty, Src1, Src2)
+ MASK_VECTOR_NANS(Ty, Src1, Src2, true)
+ IMPLEMENT_VECTOR_UNORDERED(Ty, Src1, Src2, executeFCMP_OLE)
+ return executeFCMP_OLE(Src1, Src2, Ty);
+}
+
+static GenericValue executeFCMP_UGE(GenericValue Src1, GenericValue Src2,
+ Type *Ty) {
+ GenericValue Dest;
+ IMPLEMENT_UNORDERED(Ty, Src1, Src2)
+ MASK_VECTOR_NANS(Ty, Src1, Src2, true)
+ IMPLEMENT_VECTOR_UNORDERED(Ty, Src1, Src2, executeFCMP_OGE)
+ return executeFCMP_OGE(Src1, Src2, Ty);
+}
+
+static GenericValue executeFCMP_ULT(GenericValue Src1, GenericValue Src2,
+ Type *Ty) {
+ GenericValue Dest;
+ IMPLEMENT_UNORDERED(Ty, Src1, Src2)
+ MASK_VECTOR_NANS(Ty, Src1, Src2, true)
+ IMPLEMENT_VECTOR_UNORDERED(Ty, Src1, Src2, executeFCMP_OLT)
+ return executeFCMP_OLT(Src1, Src2, Ty);
+}
+
+static GenericValue executeFCMP_UGT(GenericValue Src1, GenericValue Src2,
+ Type *Ty) {
+ GenericValue Dest;
+ IMPLEMENT_UNORDERED(Ty, Src1, Src2)
+ MASK_VECTOR_NANS(Ty, Src1, Src2, true)
+ IMPLEMENT_VECTOR_UNORDERED(Ty, Src1, Src2, executeFCMP_OGT)
+ return executeFCMP_OGT(Src1, Src2, Ty);
+}
+
+static GenericValue executeFCMP_ORD(GenericValue Src1, GenericValue Src2,
+ Type *Ty) {
+ GenericValue Dest;
+ if(Ty->isVectorTy()) {
+ assert(Src1.AggregateVal.size() == Src2.AggregateVal.size());
+ Dest.AggregateVal.resize( Src1.AggregateVal.size() );
+ if (cast<VectorType>(Ty)->getElementType()->isFloatTy()) {
+ for( size_t _i=0;_i<Src1.AggregateVal.size();_i++)
+ Dest.AggregateVal[_i].IntVal = APInt(1,
+ ( (Src1.AggregateVal[_i].FloatVal ==
+ Src1.AggregateVal[_i].FloatVal) &&
+ (Src2.AggregateVal[_i].FloatVal ==
+ Src2.AggregateVal[_i].FloatVal)));
+ } else {
+ for( size_t _i=0;_i<Src1.AggregateVal.size();_i++)
+ Dest.AggregateVal[_i].IntVal = APInt(1,
+ ( (Src1.AggregateVal[_i].DoubleVal ==
+ Src1.AggregateVal[_i].DoubleVal) &&
+ (Src2.AggregateVal[_i].DoubleVal ==
+ Src2.AggregateVal[_i].DoubleVal)));
+ }
+ } else if (Ty->isFloatTy())
+ Dest.IntVal = APInt(1,(Src1.FloatVal == Src1.FloatVal &&
+ Src2.FloatVal == Src2.FloatVal));
+ else {
+ Dest.IntVal = APInt(1,(Src1.DoubleVal == Src1.DoubleVal &&
+ Src2.DoubleVal == Src2.DoubleVal));
+ }
+ return Dest;
+}
+
+static GenericValue executeFCMP_UNO(GenericValue Src1, GenericValue Src2,
+ Type *Ty) {
+ GenericValue Dest;
+ if(Ty->isVectorTy()) {
+ assert(Src1.AggregateVal.size() == Src2.AggregateVal.size());
+ Dest.AggregateVal.resize( Src1.AggregateVal.size() );
+ if (cast<VectorType>(Ty)->getElementType()->isFloatTy()) {
+ for( size_t _i=0;_i<Src1.AggregateVal.size();_i++)
+ Dest.AggregateVal[_i].IntVal = APInt(1,
+ ( (Src1.AggregateVal[_i].FloatVal !=
+ Src1.AggregateVal[_i].FloatVal) ||
+ (Src2.AggregateVal[_i].FloatVal !=
+ Src2.AggregateVal[_i].FloatVal)));
+ } else {
+ for( size_t _i=0;_i<Src1.AggregateVal.size();_i++)
+ Dest.AggregateVal[_i].IntVal = APInt(1,
+ ( (Src1.AggregateVal[_i].DoubleVal !=
+ Src1.AggregateVal[_i].DoubleVal) ||
+ (Src2.AggregateVal[_i].DoubleVal !=
+ Src2.AggregateVal[_i].DoubleVal)));
+ }
+ } else if (Ty->isFloatTy())
+ Dest.IntVal = APInt(1,(Src1.FloatVal != Src1.FloatVal ||
+ Src2.FloatVal != Src2.FloatVal));
+ else {
+ Dest.IntVal = APInt(1,(Src1.DoubleVal != Src1.DoubleVal ||
+ Src2.DoubleVal != Src2.DoubleVal));
+ }
+ return Dest;
+}
+
+static GenericValue executeFCMP_BOOL(GenericValue Src1, GenericValue Src2,
+ Type *Ty, const bool val) {
+ GenericValue Dest;
+ if(Ty->isVectorTy()) {
+ assert(Src1.AggregateVal.size() == Src2.AggregateVal.size());
+ Dest.AggregateVal.resize( Src1.AggregateVal.size() );
+ for( size_t _i=0; _i<Src1.AggregateVal.size(); _i++)
+ Dest.AggregateVal[_i].IntVal = APInt(1,val);
+ } else {
+ Dest.IntVal = APInt(1, val);
+ }
+
+ return Dest;
+}
+
+void Interpreter::visitFCmpInst(FCmpInst &I) {
+ ExecutionContext &SF = ECStack.back();
+ Type *Ty = I.getOperand(0)->getType();
+ GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
+ GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
+ GenericValue R; // Result
+
+ switch (I.getPredicate()) {
+ default:
+ dbgs() << "Don't know how to handle this FCmp predicate!\n-->" << I;
+ llvm_unreachable(nullptr);
+ break;
+ case FCmpInst::FCMP_FALSE: R = executeFCMP_BOOL(Src1, Src2, Ty, false);
+ break;
+ case FCmpInst::FCMP_TRUE: R = executeFCMP_BOOL(Src1, Src2, Ty, true);
+ break;
+ case FCmpInst::FCMP_ORD: R = executeFCMP_ORD(Src1, Src2, Ty); break;
+ case FCmpInst::FCMP_UNO: R = executeFCMP_UNO(Src1, Src2, Ty); break;
+ case FCmpInst::FCMP_UEQ: R = executeFCMP_UEQ(Src1, Src2, Ty); break;
+ case FCmpInst::FCMP_OEQ: R = executeFCMP_OEQ(Src1, Src2, Ty); break;
+ case FCmpInst::FCMP_UNE: R = executeFCMP_UNE(Src1, Src2, Ty); break;
+ case FCmpInst::FCMP_ONE: R = executeFCMP_ONE(Src1, Src2, Ty); break;
+ case FCmpInst::FCMP_ULT: R = executeFCMP_ULT(Src1, Src2, Ty); break;
+ case FCmpInst::FCMP_OLT: R = executeFCMP_OLT(Src1, Src2, Ty); break;
+ case FCmpInst::FCMP_UGT: R = executeFCMP_UGT(Src1, Src2, Ty); break;
+ case FCmpInst::FCMP_OGT: R = executeFCMP_OGT(Src1, Src2, Ty); break;
+ case FCmpInst::FCMP_ULE: R = executeFCMP_ULE(Src1, Src2, Ty); break;
+ case FCmpInst::FCMP_OLE: R = executeFCMP_OLE(Src1, Src2, Ty); break;
+ case FCmpInst::FCMP_UGE: R = executeFCMP_UGE(Src1, Src2, Ty); break;
+ case FCmpInst::FCMP_OGE: R = executeFCMP_OGE(Src1, Src2, Ty); break;
+ }
+
+ SetValue(&I, R, SF);
+}
+
+void Interpreter::visitBinaryOperator(BinaryOperator &I) {
+ ExecutionContext &SF = ECStack.back();
+ Type *Ty = I.getOperand(0)->getType();
+ GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
+ GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
+ GenericValue R; // Result
+
+ // First process vector operation
+ if (Ty->isVectorTy()) {
+ assert(Src1.AggregateVal.size() == Src2.AggregateVal.size());
+ R.AggregateVal.resize(Src1.AggregateVal.size());
+
+ // Macros to execute binary operation 'OP' over integer vectors
+#define INTEGER_VECTOR_OPERATION(OP) \
+ for (unsigned i = 0; i < R.AggregateVal.size(); ++i) \
+ R.AggregateVal[i].IntVal = \
+ Src1.AggregateVal[i].IntVal OP Src2.AggregateVal[i].IntVal;
+
+ // Additional macros to execute binary operations udiv/sdiv/urem/srem since
+ // they have different notation.
+#define INTEGER_VECTOR_FUNCTION(OP) \
+ for (unsigned i = 0; i < R.AggregateVal.size(); ++i) \
+ R.AggregateVal[i].IntVal = \
+ Src1.AggregateVal[i].IntVal.OP(Src2.AggregateVal[i].IntVal);
+
+ // Macros to execute binary operation 'OP' over floating point type TY
+ // (float or double) vectors
+#define FLOAT_VECTOR_FUNCTION(OP, TY) \
+ for (unsigned i = 0; i < R.AggregateVal.size(); ++i) \
+ R.AggregateVal[i].TY = \
+ Src1.AggregateVal[i].TY OP Src2.AggregateVal[i].TY;
+
+ // Macros to choose appropriate TY: float or double and run operation
+ // execution
+#define FLOAT_VECTOR_OP(OP) { \
+ if (cast<VectorType>(Ty)->getElementType()->isFloatTy()) \
+ FLOAT_VECTOR_FUNCTION(OP, FloatVal) \
+ else { \
+ if (cast<VectorType>(Ty)->getElementType()->isDoubleTy()) \
+ FLOAT_VECTOR_FUNCTION(OP, DoubleVal) \
+ else { \
+ dbgs() << "Unhandled type for OP instruction: " << *Ty << "\n"; \
+ llvm_unreachable(0); \
+ } \
+ } \
+}
+
+ switch(I.getOpcode()){
+ default:
+ dbgs() << "Don't know how to handle this binary operator!\n-->" << I;
+ llvm_unreachable(nullptr);
+ break;
+ case Instruction::Add: INTEGER_VECTOR_OPERATION(+) break;
+ case Instruction::Sub: INTEGER_VECTOR_OPERATION(-) break;
+ case Instruction::Mul: INTEGER_VECTOR_OPERATION(*) break;
+ case Instruction::UDiv: INTEGER_VECTOR_FUNCTION(udiv) break;
+ case Instruction::SDiv: INTEGER_VECTOR_FUNCTION(sdiv) break;
+ case Instruction::URem: INTEGER_VECTOR_FUNCTION(urem) break;
+ case Instruction::SRem: INTEGER_VECTOR_FUNCTION(srem) break;
+ case Instruction::And: INTEGER_VECTOR_OPERATION(&) break;
+ case Instruction::Or: INTEGER_VECTOR_OPERATION(|) break;
+ case Instruction::Xor: INTEGER_VECTOR_OPERATION(^) break;
+ case Instruction::FAdd: FLOAT_VECTOR_OP(+) break;
+ case Instruction::FSub: FLOAT_VECTOR_OP(-) break;
+ case Instruction::FMul: FLOAT_VECTOR_OP(*) break;
+ case Instruction::FDiv: FLOAT_VECTOR_OP(/) break;
+ case Instruction::FRem:
+ if (cast<VectorType>(Ty)->getElementType()->isFloatTy())
+ for (unsigned i = 0; i < R.AggregateVal.size(); ++i)
+ R.AggregateVal[i].FloatVal =
+ fmod(Src1.AggregateVal[i].FloatVal, Src2.AggregateVal[i].FloatVal);
+ else {
+ if (cast<VectorType>(Ty)->getElementType()->isDoubleTy())
+ for (unsigned i = 0; i < R.AggregateVal.size(); ++i)
+ R.AggregateVal[i].DoubleVal =
+ fmod(Src1.AggregateVal[i].DoubleVal, Src2.AggregateVal[i].DoubleVal);
+ else {
+ dbgs() << "Unhandled type for Rem instruction: " << *Ty << "\n";
+ llvm_unreachable(nullptr);
+ }
+ }
+ break;
+ }
+ } else {
+ switch (I.getOpcode()) {
+ default:
+ dbgs() << "Don't know how to handle this binary operator!\n-->" << I;
+ llvm_unreachable(nullptr);
+ break;
+ case Instruction::Add: R.IntVal = Src1.IntVal + Src2.IntVal; break;
+ case Instruction::Sub: R.IntVal = Src1.IntVal - Src2.IntVal; break;
+ case Instruction::Mul: R.IntVal = Src1.IntVal * Src2.IntVal; break;
+ case Instruction::FAdd: executeFAddInst(R, Src1, Src2, Ty); break;
+ case Instruction::FSub: executeFSubInst(R, Src1, Src2, Ty); break;
+ case Instruction::FMul: executeFMulInst(R, Src1, Src2, Ty); break;
+ case Instruction::FDiv: executeFDivInst(R, Src1, Src2, Ty); break;
+ case Instruction::FRem: executeFRemInst(R, Src1, Src2, Ty); break;
+ case Instruction::UDiv: R.IntVal = Src1.IntVal.udiv(Src2.IntVal); break;
+ case Instruction::SDiv: R.IntVal = Src1.IntVal.sdiv(Src2.IntVal); break;
+ case Instruction::URem: R.IntVal = Src1.IntVal.urem(Src2.IntVal); break;
+ case Instruction::SRem: R.IntVal = Src1.IntVal.srem(Src2.IntVal); break;
+ case Instruction::And: R.IntVal = Src1.IntVal & Src2.IntVal; break;
+ case Instruction::Or: R.IntVal = Src1.IntVal | Src2.IntVal; break;
+ case Instruction::Xor: R.IntVal = Src1.IntVal ^ Src2.IntVal; break;
+ }
+ }
+ SetValue(&I, R, SF);
+}
+
+static GenericValue executeSelectInst(GenericValue Src1, GenericValue Src2,
+ GenericValue Src3, Type *Ty) {
+ GenericValue Dest;
+ if(Ty->isVectorTy()) {
+ assert(Src1.AggregateVal.size() == Src2.AggregateVal.size());
+ assert(Src2.AggregateVal.size() == Src3.AggregateVal.size());
+ Dest.AggregateVal.resize( Src1.AggregateVal.size() );
+ for (size_t i = 0; i < Src1.AggregateVal.size(); ++i)
+ Dest.AggregateVal[i] = (Src1.AggregateVal[i].IntVal == 0) ?
+ Src3.AggregateVal[i] : Src2.AggregateVal[i];
+ } else {
+ Dest = (Src1.IntVal == 0) ? Src3 : Src2;
+ }
+ return Dest;
+}
+
+void Interpreter::visitSelectInst(SelectInst &I) {
+ ExecutionContext &SF = ECStack.back();
+ Type * Ty = I.getOperand(0)->getType();
+ GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
+ GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
+ GenericValue Src3 = getOperandValue(I.getOperand(2), SF);
+ GenericValue R = executeSelectInst(Src1, Src2, Src3, Ty);
+ SetValue(&I, R, SF);
+}
+
+//===----------------------------------------------------------------------===//
+// Terminator Instruction Implementations
+//===----------------------------------------------------------------------===//
+
+void Interpreter::exitCalled(GenericValue GV) {
+ // runAtExitHandlers() assumes there are no stack frames, but
+ // if exit() was called, then it had a stack frame. Blow away
+ // the stack before interpreting atexit handlers.
+ ECStack.clear();
+ runAtExitHandlers();
+ exit(GV.IntVal.zextOrTrunc(32).getZExtValue());
+}
+
+/// Pop the last stack frame off of ECStack and then copy the result
+/// back into the result variable if we are not returning void. The
+/// result variable may be the ExitValue, or the Value of the calling
+/// CallInst if there was a previous stack frame. This method may
+/// invalidate any ECStack iterators you have. This method also takes
+/// care of switching to the normal destination BB, if we are returning
+/// from an invoke.
+///
+void Interpreter::popStackAndReturnValueToCaller(Type *RetTy,
+ GenericValue Result) {
+ // Pop the current stack frame.
+ ECStack.pop_back();
+
+ if (ECStack.empty()) { // Finished main. Put result into exit code...
+ if (RetTy && !RetTy->isVoidTy()) { // Nonvoid return type?
+ ExitValue = Result; // Capture the exit value of the program
+ } else {
+ memset(&ExitValue.Untyped, 0, sizeof(ExitValue.Untyped));
+ }
+ } else {
+ // If we have a previous stack frame, and we have a previous call,
+ // fill in the return value...
+ ExecutionContext &CallingSF = ECStack.back();
+ if (CallingSF.Caller) {
+ // Save result...
+ if (!CallingSF.Caller->getType()->isVoidTy())
+ SetValue(CallingSF.Caller, Result, CallingSF);
+ if (InvokeInst *II = dyn_cast<InvokeInst>(CallingSF.Caller))
+ SwitchToNewBasicBlock (II->getNormalDest (), CallingSF);
+ CallingSF.Caller = nullptr; // We returned from the call...
+ }
+ }
+}
+
+void Interpreter::visitReturnInst(ReturnInst &I) {
+ ExecutionContext &SF = ECStack.back();
+ Type *RetTy = Type::getVoidTy(I.getContext());
+ GenericValue Result;
+
+ // Save away the return value... (if we are not 'ret void')
+ if (I.getNumOperands()) {
+ RetTy = I.getReturnValue()->getType();
+ Result = getOperandValue(I.getReturnValue(), SF);
+ }
+
+ popStackAndReturnValueToCaller(RetTy, Result);
+}
+
+void Interpreter::visitUnreachableInst(UnreachableInst &I) {
+ report_fatal_error("Program executed an 'unreachable' instruction!");
+}
+
+void Interpreter::visitBranchInst(BranchInst &I) {
+ ExecutionContext &SF = ECStack.back();
+ BasicBlock *Dest;
+
+ Dest = I.getSuccessor(0); // Uncond branches have a fixed dest...
+ if (!I.isUnconditional()) {
+ Value *Cond = I.getCondition();
+ if (getOperandValue(Cond, SF).IntVal == 0) // If false cond...
+ Dest = I.getSuccessor(1);
+ }
+ SwitchToNewBasicBlock(Dest, SF);
+}
+
+void Interpreter::visitSwitchInst(SwitchInst &I) {
+ ExecutionContext &SF = ECStack.back();
+ Value* Cond = I.getCondition();
+ Type *ElTy = Cond->getType();
+ GenericValue CondVal = getOperandValue(Cond, SF);
+
+ // Check to see if any of the cases match...
+ BasicBlock *Dest = nullptr;
+ for (auto Case : I.cases()) {
+ GenericValue CaseVal = getOperandValue(Case.getCaseValue(), SF);
+ if (executeICMP_EQ(CondVal, CaseVal, ElTy).IntVal != 0) {
+ Dest = cast<BasicBlock>(Case.getCaseSuccessor());
+ break;
+ }
+ }
+ if (!Dest) Dest = I.getDefaultDest(); // No cases matched: use default
+ SwitchToNewBasicBlock(Dest, SF);
+}
+
+void Interpreter::visitIndirectBrInst(IndirectBrInst &I) {
+ ExecutionContext &SF = ECStack.back();
+ void *Dest = GVTOP(getOperandValue(I.getAddress(), SF));
+ SwitchToNewBasicBlock((BasicBlock*)Dest, SF);
+}
+
+
+// SwitchToNewBasicBlock - This method is used to jump to a new basic block.
+// This function handles the actual updating of block and instruction iterators
+// as well as execution of all of the PHI nodes in the destination block.
+//
+// This method does this because all of the PHI nodes must be executed
+// atomically, reading their inputs before any of the results are updated. Not
+// doing this can cause problems if the PHI nodes depend on other PHI nodes for
+// their inputs. If the input PHI node is updated before it is read, incorrect
+// results can happen. Thus we use a two phase approach.
+//
+void Interpreter::SwitchToNewBasicBlock(BasicBlock *Dest, ExecutionContext &SF){
+ BasicBlock *PrevBB = SF.CurBB; // Remember where we came from...
+ SF.CurBB = Dest; // Update CurBB to branch destination
+ SF.CurInst = SF.CurBB->begin(); // Update new instruction ptr...
+
+ if (!isa<PHINode>(SF.CurInst)) return; // Nothing fancy to do
+
+ // Loop over all of the PHI nodes in the current block, reading their inputs.
+ std::vector<GenericValue> ResultValues;
+
+ for (; PHINode *PN = dyn_cast<PHINode>(SF.CurInst); ++SF.CurInst) {
+ // Search for the value corresponding to this previous bb...
+ int i = PN->getBasicBlockIndex(PrevBB);
+ assert(i != -1 && "PHINode doesn't contain entry for predecessor??");
+ Value *IncomingValue = PN->getIncomingValue(i);
+
+ // Save the incoming value for this PHI node...
+ ResultValues.push_back(getOperandValue(IncomingValue, SF));
+ }
+
+ // Now loop over all of the PHI nodes setting their values...
+ SF.CurInst = SF.CurBB->begin();
+ for (unsigned i = 0; isa<PHINode>(SF.CurInst); ++SF.CurInst, ++i) {
+ PHINode *PN = cast<PHINode>(SF.CurInst);
+ SetValue(PN, ResultValues[i], SF);
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// Memory Instruction Implementations
+//===----------------------------------------------------------------------===//
+
+void Interpreter::visitAllocaInst(AllocaInst &I) {
+ ExecutionContext &SF = ECStack.back();
+
+ Type *Ty = I.getAllocatedType(); // Type to be allocated
+
+ // Get the number of elements being allocated by the array...
+ unsigned NumElements =
+ getOperandValue(I.getOperand(0), SF).IntVal.getZExtValue();
+
+ unsigned TypeSize = (size_t)getDataLayout().getTypeAllocSize(Ty);
+
+ // Avoid malloc-ing zero bytes, use max()...
+ unsigned MemToAlloc = std::max(1U, NumElements * TypeSize);
+
+ // Allocate enough memory to hold the type...
+ void *Memory = safe_malloc(MemToAlloc);
+
+ LLVM_DEBUG(dbgs() << "Allocated Type: " << *Ty << " (" << TypeSize
+ << " bytes) x " << NumElements << " (Total: " << MemToAlloc
+ << ") at " << uintptr_t(Memory) << '\n');
+
+ GenericValue Result = PTOGV(Memory);
+ assert(Result.PointerVal && "Null pointer returned by malloc!");
+ SetValue(&I, Result, SF);
+
+ if (I.getOpcode() == Instruction::Alloca)
+ ECStack.back().Allocas.add(Memory);
+}
+
+// getElementOffset - The workhorse for getelementptr.
+//
+GenericValue Interpreter::executeGEPOperation(Value *Ptr, gep_type_iterator I,
+ gep_type_iterator E,
+ ExecutionContext &SF) {
+ assert(Ptr->getType()->isPointerTy() &&
+ "Cannot getElementOffset of a nonpointer type!");
+
+ uint64_t Total = 0;
+
+ for (; I != E; ++I) {
+ if (StructType *STy = I.getStructTypeOrNull()) {
+ const StructLayout *SLO = getDataLayout().getStructLayout(STy);
+
+ const ConstantInt *CPU = cast<ConstantInt>(I.getOperand());
+ unsigned Index = unsigned(CPU->getZExtValue());
+
+ Total += SLO->getElementOffset(Index);
+ } else {
+ // Get the index number for the array... which must be long type...
+ GenericValue IdxGV = getOperandValue(I.getOperand(), SF);
+
+ int64_t Idx;
+ unsigned BitWidth =
+ cast<IntegerType>(I.getOperand()->getType())->getBitWidth();
+ if (BitWidth == 32)
+ Idx = (int64_t)(int32_t)IdxGV.IntVal.getZExtValue();
+ else {
+ assert(BitWidth == 64 && "Invalid index type for getelementptr");
+ Idx = (int64_t)IdxGV.IntVal.getZExtValue();
+ }
+ Total += I.getSequentialElementStride(getDataLayout()) * Idx;
+ }
+ }
+
+ GenericValue Result;
+ Result.PointerVal = ((char*)getOperandValue(Ptr, SF).PointerVal) + Total;
+ LLVM_DEBUG(dbgs() << "GEP Index " << Total << " bytes.\n");
+ return Result;
+}
+
+void Interpreter::visitGetElementPtrInst(GetElementPtrInst &I) {
+ ExecutionContext &SF = ECStack.back();
+ SetValue(&I, executeGEPOperation(I.getPointerOperand(),
+ gep_type_begin(I), gep_type_end(I), SF), SF);
+}
+
+void Interpreter::visitLoadInst(LoadInst &I) {
+ ExecutionContext &SF = ECStack.back();
+ GenericValue SRC = getOperandValue(I.getPointerOperand(), SF);
+ GenericValue *Ptr = (GenericValue*)GVTOP(SRC);
+ GenericValue Result;
+ LoadValueFromMemory(Result, Ptr, I.getType());
+ SetValue(&I, Result, SF);
+ if (I.isVolatile() && PrintVolatile)
+ dbgs() << "Volatile load " << I;
+}
+
+void Interpreter::visitStoreInst(StoreInst &I) {
+ ExecutionContext &SF = ECStack.back();
+ GenericValue Val = getOperandValue(I.getOperand(0), SF);
+ GenericValue SRC = getOperandValue(I.getPointerOperand(), SF);
+ StoreValueToMemory(Val, (GenericValue *)GVTOP(SRC),
+ I.getOperand(0)->getType());
+ if (I.isVolatile() && PrintVolatile)
+ dbgs() << "Volatile store: " << I;
+}
+
+//===----------------------------------------------------------------------===//
+// Miscellaneous Instruction Implementations
+//===----------------------------------------------------------------------===//
+
+void Interpreter::visitVAStartInst(VAStartInst &I) {
+ ExecutionContext &SF = ECStack.back();
+ GenericValue ArgIndex;
+ ArgIndex.UIntPairVal.first = ECStack.size() - 1;
+ ArgIndex.UIntPairVal.second = 0;
+ SetValue(&I, ArgIndex, SF);
+}
+
+void Interpreter::visitVAEndInst(VAEndInst &I) {
+ // va_end is a noop for the interpreter
+}
+
+void Interpreter::visitVACopyInst(VACopyInst &I) {
+ ExecutionContext &SF = ECStack.back();
+ SetValue(&I, getOperandValue(*I.arg_begin(), SF), SF);
+}
+
+void Interpreter::visitIntrinsicInst(IntrinsicInst &I) {
+ ExecutionContext &SF = ECStack.back();
+
+ // If it is an unknown intrinsic function, use the intrinsic lowering
+ // class to transform it into hopefully tasty LLVM code.
+ //
+ BasicBlock::iterator Me(&I);
+ BasicBlock *Parent = I.getParent();
+ bool atBegin(Parent->begin() == Me);
+ if (!atBegin)
+ --Me;
+ IL->LowerIntrinsicCall(&I);
+
+ // Restore the CurInst pointer to the first instruction newly inserted, if
+ // any.
+ if (atBegin) {
+ SF.CurInst = Parent->begin();
+ } else {
+ SF.CurInst = Me;
+ ++SF.CurInst;
+ }
+}
+
+void Interpreter::visitCallBase(CallBase &I) {
+ ExecutionContext &SF = ECStack.back();
+
+ SF.Caller = &I;
+ std::vector<GenericValue> ArgVals;
+ const unsigned NumArgs = SF.Caller->arg_size();
+ ArgVals.reserve(NumArgs);
+ for (Value *V : SF.Caller->args())
+ ArgVals.push_back(getOperandValue(V, SF));
+
+ // To handle indirect calls, we must get the pointer value from the argument
+ // and treat it as a function pointer.
+ GenericValue SRC = getOperandValue(SF.Caller->getCalledOperand(), SF);
+ callFunction((Function*)GVTOP(SRC), ArgVals);
+}
+
+// auxiliary function for shift operations
+static unsigned getShiftAmount(uint64_t orgShiftAmount,
+ llvm::APInt valueToShift) {
+ unsigned valueWidth = valueToShift.getBitWidth();
+ if (orgShiftAmount < (uint64_t)valueWidth)
+ return orgShiftAmount;
+ // according to the llvm documentation, if orgShiftAmount > valueWidth,
+ // the result is undfeined. but we do shift by this rule:
+ return (NextPowerOf2(valueWidth-1) - 1) & orgShiftAmount;
+}
+
+
+void Interpreter::visitShl(BinaryOperator &I) {
+ ExecutionContext &SF = ECStack.back();
+ GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
+ GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
+ GenericValue Dest;
+ Type *Ty = I.getType();
+
+ if (Ty->isVectorTy()) {
+ uint32_t src1Size = uint32_t(Src1.AggregateVal.size());
+ assert(src1Size == Src2.AggregateVal.size());
+ for (unsigned i = 0; i < src1Size; i++) {
+ GenericValue Result;
+ uint64_t shiftAmount = Src2.AggregateVal[i].IntVal.getZExtValue();
+ llvm::APInt valueToShift = Src1.AggregateVal[i].IntVal;
+ Result.IntVal = valueToShift.shl(getShiftAmount(shiftAmount, valueToShift));
+ Dest.AggregateVal.push_back(Result);
+ }
+ } else {
+ // scalar
+ uint64_t shiftAmount = Src2.IntVal.getZExtValue();
+ llvm::APInt valueToShift = Src1.IntVal;
+ Dest.IntVal = valueToShift.shl(getShiftAmount(shiftAmount, valueToShift));
+ }
+
+ SetValue(&I, Dest, SF);
+}
+
+void Interpreter::visitLShr(BinaryOperator &I) {
+ ExecutionContext &SF = ECStack.back();
+ GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
+ GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
+ GenericValue Dest;
+ Type *Ty = I.getType();
+
+ if (Ty->isVectorTy()) {
+ uint32_t src1Size = uint32_t(Src1.AggregateVal.size());
+ assert(src1Size == Src2.AggregateVal.size());
+ for (unsigned i = 0; i < src1Size; i++) {
+ GenericValue Result;
+ uint64_t shiftAmount = Src2.AggregateVal[i].IntVal.getZExtValue();
+ llvm::APInt valueToShift = Src1.AggregateVal[i].IntVal;
+ Result.IntVal = valueToShift.lshr(getShiftAmount(shiftAmount, valueToShift));
+ Dest.AggregateVal.push_back(Result);
+ }
+ } else {
+ // scalar
+ uint64_t shiftAmount = Src2.IntVal.getZExtValue();
+ llvm::APInt valueToShift = Src1.IntVal;
+ Dest.IntVal = valueToShift.lshr(getShiftAmount(shiftAmount, valueToShift));
+ }
+
+ SetValue(&I, Dest, SF);
+}
+
+void Interpreter::visitAShr(BinaryOperator &I) {
+ ExecutionContext &SF = ECStack.back();
+ GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
+ GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
+ GenericValue Dest;
+ Type *Ty = I.getType();
+
+ if (Ty->isVectorTy()) {
+ size_t src1Size = Src1.AggregateVal.size();
+ assert(src1Size == Src2.AggregateVal.size());
+ for (unsigned i = 0; i < src1Size; i++) {
+ GenericValue Result;
+ uint64_t shiftAmount = Src2.AggregateVal[i].IntVal.getZExtValue();
+ llvm::APInt valueToShift = Src1.AggregateVal[i].IntVal;
+ Result.IntVal = valueToShift.ashr(getShiftAmount(shiftAmount, valueToShift));
+ Dest.AggregateVal.push_back(Result);
+ }
+ } else {
+ // scalar
+ uint64_t shiftAmount = Src2.IntVal.getZExtValue();
+ llvm::APInt valueToShift = Src1.IntVal;
+ Dest.IntVal = valueToShift.ashr(getShiftAmount(shiftAmount, valueToShift));
+ }
+
+ SetValue(&I, Dest, SF);
+}
+
+GenericValue Interpreter::executeTruncInst(Value *SrcVal, Type *DstTy,
+ ExecutionContext &SF) {
+ GenericValue Dest, Src = getOperandValue(SrcVal, SF);
+ Type *SrcTy = SrcVal->getType();
+ if (SrcTy->isVectorTy()) {
+ Type *DstVecTy = DstTy->getScalarType();
+ unsigned DBitWidth = cast<IntegerType>(DstVecTy)->getBitWidth();
+ unsigned NumElts = Src.AggregateVal.size();
+ // the sizes of src and dst vectors must be equal
+ Dest.AggregateVal.resize(NumElts);
+ for (unsigned i = 0; i < NumElts; i++)
+ Dest.AggregateVal[i].IntVal = Src.AggregateVal[i].IntVal.trunc(DBitWidth);
+ } else {
+ IntegerType *DITy = cast<IntegerType>(DstTy);
+ unsigned DBitWidth = DITy->getBitWidth();
+ Dest.IntVal = Src.IntVal.trunc(DBitWidth);
+ }
+ return Dest;
+}
+
+GenericValue Interpreter::executeSExtInst(Value *SrcVal, Type *DstTy,
+ ExecutionContext &SF) {
+ Type *SrcTy = SrcVal->getType();
+ GenericValue Dest, Src = getOperandValue(SrcVal, SF);
+ if (SrcTy->isVectorTy()) {
+ Type *DstVecTy = DstTy->getScalarType();
+ unsigned DBitWidth = cast<IntegerType>(DstVecTy)->getBitWidth();
+ unsigned size = Src.AggregateVal.size();
+ // the sizes of src and dst vectors must be equal.
+ Dest.AggregateVal.resize(size);
+ for (unsigned i = 0; i < size; i++)
+ Dest.AggregateVal[i].IntVal = Src.AggregateVal[i].IntVal.sext(DBitWidth);
+ } else {
+ auto *DITy = cast<IntegerType>(DstTy);
+ unsigned DBitWidth = DITy->getBitWidth();
+ Dest.IntVal = Src.IntVal.sext(DBitWidth);
+ }
+ return Dest;
+}
+
+GenericValue Interpreter::executeZExtInst(Value *SrcVal, Type *DstTy,
+ ExecutionContext &SF) {
+ Type *SrcTy = SrcVal->getType();
+ GenericValue Dest, Src = getOperandValue(SrcVal, SF);
+ if (SrcTy->isVectorTy()) {
+ Type *DstVecTy = DstTy->getScalarType();
+ unsigned DBitWidth = cast<IntegerType>(DstVecTy)->getBitWidth();
+
+ unsigned size = Src.AggregateVal.size();
+ // the sizes of src and dst vectors must be equal.
+ Dest.AggregateVal.resize(size);
+ for (unsigned i = 0; i < size; i++)
+ Dest.AggregateVal[i].IntVal = Src.AggregateVal[i].IntVal.zext(DBitWidth);
+ } else {
+ auto *DITy = cast<IntegerType>(DstTy);
+ unsigned DBitWidth = DITy->getBitWidth();
+ Dest.IntVal = Src.IntVal.zext(DBitWidth);
+ }
+ return Dest;
+}
+
+GenericValue Interpreter::executeFPTruncInst(Value *SrcVal, Type *DstTy,
+ ExecutionContext &SF) {
+ GenericValue Dest, Src = getOperandValue(SrcVal, SF);
+
+ if (isa<VectorType>(SrcVal->getType())) {
+ assert(SrcVal->getType()->getScalarType()->isDoubleTy() &&
+ DstTy->getScalarType()->isFloatTy() &&
+ "Invalid FPTrunc instruction");
+
+ unsigned size = Src.AggregateVal.size();
+ // the sizes of src and dst vectors must be equal.
+ Dest.AggregateVal.resize(size);
+ for (unsigned i = 0; i < size; i++)
+ Dest.AggregateVal[i].FloatVal = (float)Src.AggregateVal[i].DoubleVal;
+ } else {
+ assert(SrcVal->getType()->isDoubleTy() && DstTy->isFloatTy() &&
+ "Invalid FPTrunc instruction");
+ Dest.FloatVal = (float)Src.DoubleVal;
+ }
+
+ return Dest;
+}
+
+GenericValue Interpreter::executeFPExtInst(Value *SrcVal, Type *DstTy,
+ ExecutionContext &SF) {
+ GenericValue Dest, Src = getOperandValue(SrcVal, SF);
+
+ if (isa<VectorType>(SrcVal->getType())) {
+ assert(SrcVal->getType()->getScalarType()->isFloatTy() &&
+ DstTy->getScalarType()->isDoubleTy() && "Invalid FPExt instruction");
+
+ unsigned size = Src.AggregateVal.size();
+ // the sizes of src and dst vectors must be equal.
+ Dest.AggregateVal.resize(size);
+ for (unsigned i = 0; i < size; i++)
+ Dest.AggregateVal[i].DoubleVal = (double)Src.AggregateVal[i].FloatVal;
+ } else {
+ assert(SrcVal->getType()->isFloatTy() && DstTy->isDoubleTy() &&
+ "Invalid FPExt instruction");
+ Dest.DoubleVal = (double)Src.FloatVal;
+ }
+
+ return Dest;
+}
+
+GenericValue Interpreter::executeFPToUIInst(Value *SrcVal, Type *DstTy,
+ ExecutionContext &SF) {
+ Type *SrcTy = SrcVal->getType();
+ GenericValue Dest, Src = getOperandValue(SrcVal, SF);
+
+ if (isa<VectorType>(SrcTy)) {
+ Type *DstVecTy = DstTy->getScalarType();
+ Type *SrcVecTy = SrcTy->getScalarType();
+ uint32_t DBitWidth = cast<IntegerType>(DstVecTy)->getBitWidth();
+ unsigned size = Src.AggregateVal.size();
+ // the sizes of src and dst vectors must be equal.
+ Dest.AggregateVal.resize(size);
+
+ if (SrcVecTy->getTypeID() == Type::FloatTyID) {
+ assert(SrcVecTy->isFloatingPointTy() && "Invalid FPToUI instruction");
+ for (unsigned i = 0; i < size; i++)
+ Dest.AggregateVal[i].IntVal = APIntOps::RoundFloatToAPInt(
+ Src.AggregateVal[i].FloatVal, DBitWidth);
+ } else {
+ for (unsigned i = 0; i < size; i++)
+ Dest.AggregateVal[i].IntVal = APIntOps::RoundDoubleToAPInt(
+ Src.AggregateVal[i].DoubleVal, DBitWidth);
+ }
+ } else {
+ // scalar
+ uint32_t DBitWidth = cast<IntegerType>(DstTy)->getBitWidth();
+ assert(SrcTy->isFloatingPointTy() && "Invalid FPToUI instruction");
+
+ if (SrcTy->getTypeID() == Type::FloatTyID)
+ Dest.IntVal = APIntOps::RoundFloatToAPInt(Src.FloatVal, DBitWidth);
+ else {
+ Dest.IntVal = APIntOps::RoundDoubleToAPInt(Src.DoubleVal, DBitWidth);
+ }
+ }
+
+ return Dest;
+}
+
+GenericValue Interpreter::executeFPToSIInst(Value *SrcVal, Type *DstTy,
+ ExecutionContext &SF) {
+ Type *SrcTy = SrcVal->getType();
+ GenericValue Dest, Src = getOperandValue(SrcVal, SF);
+
+ if (isa<VectorType>(SrcTy)) {
+ Type *DstVecTy = DstTy->getScalarType();
+ Type *SrcVecTy = SrcTy->getScalarType();
+ uint32_t DBitWidth = cast<IntegerType>(DstVecTy)->getBitWidth();
+ unsigned size = Src.AggregateVal.size();
+ // the sizes of src and dst vectors must be equal
+ Dest.AggregateVal.resize(size);
+
+ if (SrcVecTy->getTypeID() == Type::FloatTyID) {
+ assert(SrcVecTy->isFloatingPointTy() && "Invalid FPToSI instruction");
+ for (unsigned i = 0; i < size; i++)
+ Dest.AggregateVal[i].IntVal = APIntOps::RoundFloatToAPInt(
+ Src.AggregateVal[i].FloatVal, DBitWidth);
+ } else {
+ for (unsigned i = 0; i < size; i++)
+ Dest.AggregateVal[i].IntVal = APIntOps::RoundDoubleToAPInt(
+ Src.AggregateVal[i].DoubleVal, DBitWidth);
+ }
+ } else {
+ // scalar
+ unsigned DBitWidth = cast<IntegerType>(DstTy)->getBitWidth();
+ assert(SrcTy->isFloatingPointTy() && "Invalid FPToSI instruction");
+
+ if (SrcTy->getTypeID() == Type::FloatTyID)
+ Dest.IntVal = APIntOps::RoundFloatToAPInt(Src.FloatVal, DBitWidth);
+ else {
+ Dest.IntVal = APIntOps::RoundDoubleToAPInt(Src.DoubleVal, DBitWidth);
+ }
+ }
+ return Dest;
+}
+
+GenericValue Interpreter::executeUIToFPInst(Value *SrcVal, Type *DstTy,
+ ExecutionContext &SF) {
+ GenericValue Dest, Src = getOperandValue(SrcVal, SF);
+
+ if (isa<VectorType>(SrcVal->getType())) {
+ Type *DstVecTy = DstTy->getScalarType();
+ unsigned size = Src.AggregateVal.size();
+ // the sizes of src and dst vectors must be equal
+ Dest.AggregateVal.resize(size);
+
+ if (DstVecTy->getTypeID() == Type::FloatTyID) {
+ assert(DstVecTy->isFloatingPointTy() && "Invalid UIToFP instruction");
+ for (unsigned i = 0; i < size; i++)
+ Dest.AggregateVal[i].FloatVal =
+ APIntOps::RoundAPIntToFloat(Src.AggregateVal[i].IntVal);
+ } else {
+ for (unsigned i = 0; i < size; i++)
+ Dest.AggregateVal[i].DoubleVal =
+ APIntOps::RoundAPIntToDouble(Src.AggregateVal[i].IntVal);
+ }
+ } else {
+ // scalar
+ assert(DstTy->isFloatingPointTy() && "Invalid UIToFP instruction");
+ if (DstTy->getTypeID() == Type::FloatTyID)
+ Dest.FloatVal = APIntOps::RoundAPIntToFloat(Src.IntVal);
+ else {
+ Dest.DoubleVal = APIntOps::RoundAPIntToDouble(Src.IntVal);
+ }
+ }
+ return Dest;
+}
+
+GenericValue Interpreter::executeSIToFPInst(Value *SrcVal, Type *DstTy,
+ ExecutionContext &SF) {
+ GenericValue Dest, Src = getOperandValue(SrcVal, SF);
+
+ if (isa<VectorType>(SrcVal->getType())) {
+ Type *DstVecTy = DstTy->getScalarType();
+ unsigned size = Src.AggregateVal.size();
+ // the sizes of src and dst vectors must be equal
+ Dest.AggregateVal.resize(size);
+
+ if (DstVecTy->getTypeID() == Type::FloatTyID) {
+ assert(DstVecTy->isFloatingPointTy() && "Invalid SIToFP instruction");
+ for (unsigned i = 0; i < size; i++)
+ Dest.AggregateVal[i].FloatVal =
+ APIntOps::RoundSignedAPIntToFloat(Src.AggregateVal[i].IntVal);
+ } else {
+ for (unsigned i = 0; i < size; i++)
+ Dest.AggregateVal[i].DoubleVal =
+ APIntOps::RoundSignedAPIntToDouble(Src.AggregateVal[i].IntVal);
+ }
+ } else {
+ // scalar
+ assert(DstTy->isFloatingPointTy() && "Invalid SIToFP instruction");
+
+ if (DstTy->getTypeID() == Type::FloatTyID)
+ Dest.FloatVal = APIntOps::RoundSignedAPIntToFloat(Src.IntVal);
+ else {
+ Dest.DoubleVal = APIntOps::RoundSignedAPIntToDouble(Src.IntVal);
+ }
+ }
+
+ return Dest;
+}
+
+GenericValue Interpreter::executePtrToIntInst(Value *SrcVal, Type *DstTy,
+ ExecutionContext &SF) {
+ uint32_t DBitWidth = cast<IntegerType>(DstTy)->getBitWidth();
+ GenericValue Dest, Src = getOperandValue(SrcVal, SF);
+ assert(SrcVal->getType()->isPointerTy() && "Invalid PtrToInt instruction");
+
+ Dest.IntVal = APInt(DBitWidth, (intptr_t) Src.PointerVal);
+ return Dest;
+}
+
+GenericValue Interpreter::executeIntToPtrInst(Value *SrcVal, Type *DstTy,
+ ExecutionContext &SF) {
+ GenericValue Dest, Src = getOperandValue(SrcVal, SF);
+ assert(DstTy->isPointerTy() && "Invalid PtrToInt instruction");
+
+ uint32_t PtrSize = getDataLayout().getPointerSizeInBits();
+ if (PtrSize != Src.IntVal.getBitWidth())
+ Src.IntVal = Src.IntVal.zextOrTrunc(PtrSize);
+
+ Dest.PointerVal = PointerTy(intptr_t(Src.IntVal.getZExtValue()));
+ return Dest;
+}
+
+GenericValue Interpreter::executeBitCastInst(Value *SrcVal, Type *DstTy,
+ ExecutionContext &SF) {
+
+ // This instruction supports bitwise conversion of vectors to integers and
+ // to vectors of other types (as long as they have the same size)
+ Type *SrcTy = SrcVal->getType();
+ GenericValue Dest, Src = getOperandValue(SrcVal, SF);
+
+ if (isa<VectorType>(SrcTy) || isa<VectorType>(DstTy)) {
+ // vector src bitcast to vector dst or vector src bitcast to scalar dst or
+ // scalar src bitcast to vector dst
+ bool isLittleEndian = getDataLayout().isLittleEndian();
+ GenericValue TempDst, TempSrc, SrcVec;
+ Type *SrcElemTy;
+ Type *DstElemTy;
+ unsigned SrcBitSize;
+ unsigned DstBitSize;
+ unsigned SrcNum;
+ unsigned DstNum;
+
+ if (isa<VectorType>(SrcTy)) {
+ SrcElemTy = SrcTy->getScalarType();
+ SrcBitSize = SrcTy->getScalarSizeInBits();
+ SrcNum = Src.AggregateVal.size();
+ SrcVec = Src;
+ } else {
+ // if src is scalar value, make it vector <1 x type>
+ SrcElemTy = SrcTy;
+ SrcBitSize = SrcTy->getPrimitiveSizeInBits();
+ SrcNum = 1;
+ SrcVec.AggregateVal.push_back(Src);
+ }
+
+ if (isa<VectorType>(DstTy)) {
+ DstElemTy = DstTy->getScalarType();
+ DstBitSize = DstTy->getScalarSizeInBits();
+ DstNum = (SrcNum * SrcBitSize) / DstBitSize;
+ } else {
+ DstElemTy = DstTy;
+ DstBitSize = DstTy->getPrimitiveSizeInBits();
+ DstNum = 1;
+ }
+
+ if (SrcNum * SrcBitSize != DstNum * DstBitSize)
+ llvm_unreachable("Invalid BitCast");
+
+ // If src is floating point, cast to integer first.
+ TempSrc.AggregateVal.resize(SrcNum);
+ if (SrcElemTy->isFloatTy()) {
+ for (unsigned i = 0; i < SrcNum; i++)
+ TempSrc.AggregateVal[i].IntVal =
+ APInt::floatToBits(SrcVec.AggregateVal[i].FloatVal);
+
+ } else if (SrcElemTy->isDoubleTy()) {
+ for (unsigned i = 0; i < SrcNum; i++)
+ TempSrc.AggregateVal[i].IntVal =
+ APInt::doubleToBits(SrcVec.AggregateVal[i].DoubleVal);
+ } else if (SrcElemTy->isIntegerTy()) {
+ for (unsigned i = 0; i < SrcNum; i++)
+ TempSrc.AggregateVal[i].IntVal = SrcVec.AggregateVal[i].IntVal;
+ } else {
+ // Pointers are not allowed as the element type of vector.
+ llvm_unreachable("Invalid Bitcast");
+ }
+
+ // now TempSrc is integer type vector
+ if (DstNum < SrcNum) {
+ // Example: bitcast <4 x i32> <i32 0, i32 1, i32 2, i32 3> to <2 x i64>
+ unsigned Ratio = SrcNum / DstNum;
+ unsigned SrcElt = 0;
+ for (unsigned i = 0; i < DstNum; i++) {
+ GenericValue Elt;
+ Elt.IntVal = 0;
+ Elt.IntVal = Elt.IntVal.zext(DstBitSize);
+ unsigned ShiftAmt = isLittleEndian ? 0 : SrcBitSize * (Ratio - 1);
+ for (unsigned j = 0; j < Ratio; j++) {
+ APInt Tmp;
+ Tmp = Tmp.zext(SrcBitSize);
+ Tmp = TempSrc.AggregateVal[SrcElt++].IntVal;
+ Tmp = Tmp.zext(DstBitSize);
+ Tmp <<= ShiftAmt;
+ ShiftAmt += isLittleEndian ? SrcBitSize : -SrcBitSize;
+ Elt.IntVal |= Tmp;
+ }
+ TempDst.AggregateVal.push_back(Elt);
+ }
+ } else {
+ // Example: bitcast <2 x i64> <i64 0, i64 1> to <4 x i32>
+ unsigned Ratio = DstNum / SrcNum;
+ for (unsigned i = 0; i < SrcNum; i++) {
+ unsigned ShiftAmt = isLittleEndian ? 0 : DstBitSize * (Ratio - 1);
+ for (unsigned j = 0; j < Ratio; j++) {
+ GenericValue Elt;
+ Elt.IntVal = Elt.IntVal.zext(SrcBitSize);
+ Elt.IntVal = TempSrc.AggregateVal[i].IntVal;
+ Elt.IntVal.lshrInPlace(ShiftAmt);
+ // it could be DstBitSize == SrcBitSize, so check it
+ if (DstBitSize < SrcBitSize)
+ Elt.IntVal = Elt.IntVal.trunc(DstBitSize);
+ ShiftAmt += isLittleEndian ? DstBitSize : -DstBitSize;
+ TempDst.AggregateVal.push_back(Elt);
+ }
+ }
+ }
+
+ // convert result from integer to specified type
+ if (isa<VectorType>(DstTy)) {
+ if (DstElemTy->isDoubleTy()) {
+ Dest.AggregateVal.resize(DstNum);
+ for (unsigned i = 0; i < DstNum; i++)
+ Dest.AggregateVal[i].DoubleVal =
+ TempDst.AggregateVal[i].IntVal.bitsToDouble();
+ } else if (DstElemTy->isFloatTy()) {
+ Dest.AggregateVal.resize(DstNum);
+ for (unsigned i = 0; i < DstNum; i++)
+ Dest.AggregateVal[i].FloatVal =
+ TempDst.AggregateVal[i].IntVal.bitsToFloat();
+ } else {
+ Dest = TempDst;
+ }
+ } else {
+ if (DstElemTy->isDoubleTy())
+ Dest.DoubleVal = TempDst.AggregateVal[0].IntVal.bitsToDouble();
+ else if (DstElemTy->isFloatTy()) {
+ Dest.FloatVal = TempDst.AggregateVal[0].IntVal.bitsToFloat();
+ } else {
+ Dest.IntVal = TempDst.AggregateVal[0].IntVal;
+ }
+ }
+ } else { // if (isa<VectorType>(SrcTy)) || isa<VectorType>(DstTy))
+
+ // scalar src bitcast to scalar dst
+ if (DstTy->isPointerTy()) {
+ assert(SrcTy->isPointerTy() && "Invalid BitCast");
+ Dest.PointerVal = Src.PointerVal;
+ } else if (DstTy->isIntegerTy()) {
+ if (SrcTy->isFloatTy())
+ Dest.IntVal = APInt::floatToBits(Src.FloatVal);
+ else if (SrcTy->isDoubleTy()) {
+ Dest.IntVal = APInt::doubleToBits(Src.DoubleVal);
+ } else if (SrcTy->isIntegerTy()) {
+ Dest.IntVal = Src.IntVal;
+ } else {
+ llvm_unreachable("Invalid BitCast");
+ }
+ } else if (DstTy->isFloatTy()) {
+ if (SrcTy->isIntegerTy())
+ Dest.FloatVal = Src.IntVal.bitsToFloat();
+ else {
+ Dest.FloatVal = Src.FloatVal;
+ }
+ } else if (DstTy->isDoubleTy()) {
+ if (SrcTy->isIntegerTy())
+ Dest.DoubleVal = Src.IntVal.bitsToDouble();
+ else {
+ Dest.DoubleVal = Src.DoubleVal;
+ }
+ } else {
+ llvm_unreachable("Invalid Bitcast");
+ }
+ }
+
+ return Dest;
+}
+
+void Interpreter::visitTruncInst(TruncInst &I) {
+ ExecutionContext &SF = ECStack.back();
+ SetValue(&I, executeTruncInst(I.getOperand(0), I.getType(), SF), SF);
+}
+
+void Interpreter::visitSExtInst(SExtInst &I) {
+ ExecutionContext &SF = ECStack.back();
+ SetValue(&I, executeSExtInst(I.getOperand(0), I.getType(), SF), SF);
+}
+
+void Interpreter::visitZExtInst(ZExtInst &I) {
+ ExecutionContext &SF = ECStack.back();
+ SetValue(&I, executeZExtInst(I.getOperand(0), I.getType(), SF), SF);
+}
+
+void Interpreter::visitFPTruncInst(FPTruncInst &I) {
+ ExecutionContext &SF = ECStack.back();
+ SetValue(&I, executeFPTruncInst(I.getOperand(0), I.getType(), SF), SF);
+}
+
+void Interpreter::visitFPExtInst(FPExtInst &I) {
+ ExecutionContext &SF = ECStack.back();
+ SetValue(&I, executeFPExtInst(I.getOperand(0), I.getType(), SF), SF);
+}
+
+void Interpreter::visitUIToFPInst(UIToFPInst &I) {
+ ExecutionContext &SF = ECStack.back();
+ SetValue(&I, executeUIToFPInst(I.getOperand(0), I.getType(), SF), SF);
+}
+
+void Interpreter::visitSIToFPInst(SIToFPInst &I) {
+ ExecutionContext &SF = ECStack.back();
+ SetValue(&I, executeSIToFPInst(I.getOperand(0), I.getType(), SF), SF);
+}
+
+void Interpreter::visitFPToUIInst(FPToUIInst &I) {
+ ExecutionContext &SF = ECStack.back();
+ SetValue(&I, executeFPToUIInst(I.getOperand(0), I.getType(), SF), SF);
+}
+
+void Interpreter::visitFPToSIInst(FPToSIInst &I) {
+ ExecutionContext &SF = ECStack.back();
+ SetValue(&I, executeFPToSIInst(I.getOperand(0), I.getType(), SF), SF);
+}
+
+void Interpreter::visitPtrToIntInst(PtrToIntInst &I) {
+ ExecutionContext &SF = ECStack.back();
+ SetValue(&I, executePtrToIntInst(I.getOperand(0), I.getType(), SF), SF);
+}
+
+void Interpreter::visitIntToPtrInst(IntToPtrInst &I) {
+ ExecutionContext &SF = ECStack.back();
+ SetValue(&I, executeIntToPtrInst(I.getOperand(0), I.getType(), SF), SF);
+}
+
+void Interpreter::visitBitCastInst(BitCastInst &I) {
+ ExecutionContext &SF = ECStack.back();
+ SetValue(&I, executeBitCastInst(I.getOperand(0), I.getType(), SF), SF);
+}
+
+#define IMPLEMENT_VAARG(TY) \
+ case Type::TY##TyID: Dest.TY##Val = Src.TY##Val; break
+
+void Interpreter::visitVAArgInst(VAArgInst &I) {
+ ExecutionContext &SF = ECStack.back();
+
+ // Get the incoming valist parameter. LLI treats the valist as a
+ // (ec-stack-depth var-arg-index) pair.
+ GenericValue VAList = getOperandValue(I.getOperand(0), SF);
+ GenericValue Dest;
+ GenericValue Src = ECStack[VAList.UIntPairVal.first]
+ .VarArgs[VAList.UIntPairVal.second];
+ Type *Ty = I.getType();
+ switch (Ty->getTypeID()) {
+ case Type::IntegerTyID:
+ Dest.IntVal = Src.IntVal;
+ break;
+ IMPLEMENT_VAARG(Pointer);
+ IMPLEMENT_VAARG(Float);
+ IMPLEMENT_VAARG(Double);
+ default:
+ dbgs() << "Unhandled dest type for vaarg instruction: " << *Ty << "\n";
+ llvm_unreachable(nullptr);
+ }
+
+ // Set the Value of this Instruction.
+ SetValue(&I, Dest, SF);
+
+ // Move the pointer to the next vararg.
+ ++VAList.UIntPairVal.second;
+}
+
+void Interpreter::visitExtractElementInst(ExtractElementInst &I) {
+ ExecutionContext &SF = ECStack.back();
+ GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
+ GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
+ GenericValue Dest;
+
+ Type *Ty = I.getType();
+ const unsigned indx = unsigned(Src2.IntVal.getZExtValue());
+
+ if(Src1.AggregateVal.size() > indx) {
+ switch (Ty->getTypeID()) {
+ default:
+ dbgs() << "Unhandled destination type for extractelement instruction: "
+ << *Ty << "\n";
+ llvm_unreachable(nullptr);
+ break;
+ case Type::IntegerTyID:
+ Dest.IntVal = Src1.AggregateVal[indx].IntVal;
+ break;
+ case Type::FloatTyID:
+ Dest.FloatVal = Src1.AggregateVal[indx].FloatVal;
+ break;
+ case Type::DoubleTyID:
+ Dest.DoubleVal = Src1.AggregateVal[indx].DoubleVal;
+ break;
+ }
+ } else {
+ dbgs() << "Invalid index in extractelement instruction\n";
+ }
+
+ SetValue(&I, Dest, SF);
+}
+
+void Interpreter::visitInsertElementInst(InsertElementInst &I) {
+ ExecutionContext &SF = ECStack.back();
+ VectorType *Ty = cast<VectorType>(I.getType());
+
+ GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
+ GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
+ GenericValue Src3 = getOperandValue(I.getOperand(2), SF);
+ GenericValue Dest;
+
+ Type *TyContained = Ty->getElementType();
+
+ const unsigned indx = unsigned(Src3.IntVal.getZExtValue());
+ Dest.AggregateVal = Src1.AggregateVal;
+
+ if(Src1.AggregateVal.size() <= indx)
+ llvm_unreachable("Invalid index in insertelement instruction");
+ switch (TyContained->getTypeID()) {
+ default:
+ llvm_unreachable("Unhandled dest type for insertelement instruction");
+ case Type::IntegerTyID:
+ Dest.AggregateVal[indx].IntVal = Src2.IntVal;
+ break;
+ case Type::FloatTyID:
+ Dest.AggregateVal[indx].FloatVal = Src2.FloatVal;
+ break;
+ case Type::DoubleTyID:
+ Dest.AggregateVal[indx].DoubleVal = Src2.DoubleVal;
+ break;
+ }
+ SetValue(&I, Dest, SF);
+}
+
+void Interpreter::visitShuffleVectorInst(ShuffleVectorInst &I){
+ ExecutionContext &SF = ECStack.back();
+
+ VectorType *Ty = cast<VectorType>(I.getType());
+
+ GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
+ GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
+ GenericValue Dest;
+
+ // There is no need to check types of src1 and src2, because the compiled
+ // bytecode can't contain different types for src1 and src2 for a
+ // shufflevector instruction.
+
+ Type *TyContained = Ty->getElementType();
+ unsigned src1Size = (unsigned)Src1.AggregateVal.size();
+ unsigned src2Size = (unsigned)Src2.AggregateVal.size();
+ unsigned src3Size = I.getShuffleMask().size();
+
+ Dest.AggregateVal.resize(src3Size);
+
+ switch (TyContained->getTypeID()) {
+ default:
+ llvm_unreachable("Unhandled dest type for insertelement instruction");
+ break;
+ case Type::IntegerTyID:
+ for( unsigned i=0; i<src3Size; i++) {
+ unsigned j = std::max(0, I.getMaskValue(i));
+ if(j < src1Size)
+ Dest.AggregateVal[i].IntVal = Src1.AggregateVal[j].IntVal;
+ else if(j < src1Size + src2Size)
+ Dest.AggregateVal[i].IntVal = Src2.AggregateVal[j-src1Size].IntVal;
+ else
+ // The selector may not be greater than sum of lengths of first and
+ // second operands and llasm should not allow situation like
+ // %tmp = shufflevector <2 x i32> <i32 3, i32 4>, <2 x i32> undef,
+ // <2 x i32> < i32 0, i32 5 >,
+ // where i32 5 is invalid, but let it be additional check here:
+ llvm_unreachable("Invalid mask in shufflevector instruction");
+ }
+ break;
+ case Type::FloatTyID:
+ for( unsigned i=0; i<src3Size; i++) {
+ unsigned j = std::max(0, I.getMaskValue(i));
+ if(j < src1Size)
+ Dest.AggregateVal[i].FloatVal = Src1.AggregateVal[j].FloatVal;
+ else if(j < src1Size + src2Size)
+ Dest.AggregateVal[i].FloatVal = Src2.AggregateVal[j-src1Size].FloatVal;
+ else
+ llvm_unreachable("Invalid mask in shufflevector instruction");
+ }
+ break;
+ case Type::DoubleTyID:
+ for( unsigned i=0; i<src3Size; i++) {
+ unsigned j = std::max(0, I.getMaskValue(i));
+ if(j < src1Size)
+ Dest.AggregateVal[i].DoubleVal = Src1.AggregateVal[j].DoubleVal;
+ else if(j < src1Size + src2Size)
+ Dest.AggregateVal[i].DoubleVal =
+ Src2.AggregateVal[j-src1Size].DoubleVal;
+ else
+ llvm_unreachable("Invalid mask in shufflevector instruction");
+ }
+ break;
+ }
+ SetValue(&I, Dest, SF);
+}
+
+void Interpreter::visitExtractValueInst(ExtractValueInst &I) {
+ ExecutionContext &SF = ECStack.back();
+ Value *Agg = I.getAggregateOperand();
+ GenericValue Dest;
+ GenericValue Src = getOperandValue(Agg, SF);
+
+ ExtractValueInst::idx_iterator IdxBegin = I.idx_begin();
+ unsigned Num = I.getNumIndices();
+ GenericValue *pSrc = &Src;
+
+ for (unsigned i = 0 ; i < Num; ++i) {
+ pSrc = &pSrc->AggregateVal[*IdxBegin];
+ ++IdxBegin;
+ }
+
+ Type *IndexedType = ExtractValueInst::getIndexedType(Agg->getType(), I.getIndices());
+ switch (IndexedType->getTypeID()) {
+ default:
+ llvm_unreachable("Unhandled dest type for extractelement instruction");
+ break;
+ case Type::IntegerTyID:
+ Dest.IntVal = pSrc->IntVal;
+ break;
+ case Type::FloatTyID:
+ Dest.FloatVal = pSrc->FloatVal;
+ break;
+ case Type::DoubleTyID:
+ Dest.DoubleVal = pSrc->DoubleVal;
+ break;
+ case Type::ArrayTyID:
+ case Type::StructTyID:
+ case Type::FixedVectorTyID:
+ case Type::ScalableVectorTyID:
+ Dest.AggregateVal = pSrc->AggregateVal;
+ break;
+ case Type::PointerTyID:
+ Dest.PointerVal = pSrc->PointerVal;
+ break;
+ }
+
+ SetValue(&I, Dest, SF);
+}
+
+void Interpreter::visitInsertValueInst(InsertValueInst &I) {
+
+ ExecutionContext &SF = ECStack.back();
+ Value *Agg = I.getAggregateOperand();
+
+ GenericValue Src1 = getOperandValue(Agg, SF);
+ GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
+ GenericValue Dest = Src1; // Dest is a slightly changed Src1
+
+ ExtractValueInst::idx_iterator IdxBegin = I.idx_begin();
+ unsigned Num = I.getNumIndices();
+
+ GenericValue *pDest = &Dest;
+ for (unsigned i = 0 ; i < Num; ++i) {
+ pDest = &pDest->AggregateVal[*IdxBegin];
+ ++IdxBegin;
+ }
+ // pDest points to the target value in the Dest now
+
+ Type *IndexedType = ExtractValueInst::getIndexedType(Agg->getType(), I.getIndices());
+
+ switch (IndexedType->getTypeID()) {
+ default:
+ llvm_unreachable("Unhandled dest type for insertelement instruction");
+ break;
+ case Type::IntegerTyID:
+ pDest->IntVal = Src2.IntVal;
+ break;
+ case Type::FloatTyID:
+ pDest->FloatVal = Src2.FloatVal;
+ break;
+ case Type::DoubleTyID:
+ pDest->DoubleVal = Src2.DoubleVal;
+ break;
+ case Type::ArrayTyID:
+ case Type::StructTyID:
+ case Type::FixedVectorTyID:
+ case Type::ScalableVectorTyID:
+ pDest->AggregateVal = Src2.AggregateVal;
+ break;
+ case Type::PointerTyID:
+ pDest->PointerVal = Src2.PointerVal;
+ break;
+ }
+
+ SetValue(&I, Dest, SF);
+}
+
+GenericValue Interpreter::getConstantExprValue (ConstantExpr *CE,
+ ExecutionContext &SF) {
+ switch (CE->getOpcode()) {
+ case Instruction::Trunc:
+ return executeTruncInst(CE->getOperand(0), CE->getType(), SF);
+ case Instruction::PtrToInt:
+ return executePtrToIntInst(CE->getOperand(0), CE->getType(), SF);
+ case Instruction::IntToPtr:
+ return executeIntToPtrInst(CE->getOperand(0), CE->getType(), SF);
+ case Instruction::BitCast:
+ return executeBitCastInst(CE->getOperand(0), CE->getType(), SF);
+ case Instruction::GetElementPtr:
+ return executeGEPOperation(CE->getOperand(0), gep_type_begin(CE),
+ gep_type_end(CE), SF);
+ break;
+ }
+
+ // The cases below here require a GenericValue parameter for the result
+ // so we initialize one, compute it and then return it.
+ GenericValue Op0 = getOperandValue(CE->getOperand(0), SF);
+ GenericValue Op1 = getOperandValue(CE->getOperand(1), SF);
+ GenericValue Dest;
+ switch (CE->getOpcode()) {
+ case Instruction::Add: Dest.IntVal = Op0.IntVal + Op1.IntVal; break;
+ case Instruction::Sub: Dest.IntVal = Op0.IntVal - Op1.IntVal; break;
+ case Instruction::Mul: Dest.IntVal = Op0.IntVal * Op1.IntVal; break;
+ case Instruction::Xor: Dest.IntVal = Op0.IntVal ^ Op1.IntVal; break;
+ case Instruction::Shl:
+ Dest.IntVal = Op0.IntVal.shl(Op1.IntVal.getZExtValue());
+ break;
+ default:
+ dbgs() << "Unhandled ConstantExpr: " << *CE << "\n";
+ llvm_unreachable("Unhandled ConstantExpr");
+ }
+ return Dest;
+}
+
+GenericValue Interpreter::getOperandValue(Value *V, ExecutionContext &SF) {
+ if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) {
+ return getConstantExprValue(CE, SF);
+ } else if (Constant *CPV = dyn_cast<Constant>(V)) {
+ return getConstantValue(CPV);
+ } else if (GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
+ return PTOGV(getPointerToGlobal(GV));
+ } else {
+ return SF.Values[V];
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// Dispatch and Execution Code
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// callFunction - Execute the specified function...
+//
+void Interpreter::callFunction(Function *F, ArrayRef<GenericValue> ArgVals) {
+ assert((ECStack.empty() || !ECStack.back().Caller ||
+ ECStack.back().Caller->arg_size() == ArgVals.size()) &&
+ "Incorrect number of arguments passed into function call!");
+ // Make a new stack frame... and fill it in.
+ ECStack.emplace_back();
+ ExecutionContext &StackFrame = ECStack.back();
+ StackFrame.CurFunction = F;
+
+ // Special handling for external functions.
+ if (F->isDeclaration()) {
+ GenericValue Result = callExternalFunction (F, ArgVals);
+ // Simulate a 'ret' instruction of the appropriate type.
+ popStackAndReturnValueToCaller (F->getReturnType (), Result);
+ return;
+ }
+
+ // Get pointers to first LLVM BB & Instruction in function.
+ StackFrame.CurBB = &F->front();
+ StackFrame.CurInst = StackFrame.CurBB->begin();
+
+ // Run through the function arguments and initialize their values...
+ assert((ArgVals.size() == F->arg_size() ||
+ (ArgVals.size() > F->arg_size() && F->getFunctionType()->isVarArg()))&&
+ "Invalid number of values passed to function invocation!");
+
+ // Handle non-varargs arguments...
+ unsigned i = 0;
+ for (Function::arg_iterator AI = F->arg_begin(), E = F->arg_end();
+ AI != E; ++AI, ++i)
+ SetValue(&*AI, ArgVals[i], StackFrame);
+
+ // Handle varargs arguments...
+ StackFrame.VarArgs.assign(ArgVals.begin()+i, ArgVals.end());
+}
+
+
+void Interpreter::run() {
+ while (!ECStack.empty()) {
+ // Interpret a single instruction & increment the "PC".
+ ExecutionContext &SF = ECStack.back(); // Current stack frame
+ Instruction &I = *SF.CurInst++; // Increment before execute
+
+ // Track the number of dynamic instructions executed.
+ ++NumDynamicInsts;
+
+ LLVM_DEBUG(dbgs() << "About to interpret: " << I << "\n");
+ visit(I); // Dispatch to one of the visit* methods...
+ }
+}
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/Interpreter/ExternalFunctions.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/Interpreter/ExternalFunctions.cpp
new file mode 100644
index 000000000000..4f8f883a75f3
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/Interpreter/ExternalFunctions.cpp
@@ -0,0 +1,533 @@
+//===-- ExternalFunctions.cpp - Implement External Functions --------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains both code to deal with invoking "external" functions, but
+// also contains code that implements "exported" external functions.
+//
+// There are currently two mechanisms for handling external functions in the
+// Interpreter. The first is to implement lle_* wrapper functions that are
+// specific to well-known library functions which manually translate the
+// arguments from GenericValues and make the call. If such a wrapper does
+// not exist, and libffi is available, then the Interpreter will attempt to
+// invoke the function using libffi, after finding its address.
+//
+//===----------------------------------------------------------------------===//
+
+#include "Interpreter.h"
+#include "llvm/ADT/APInt.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/Config/config.h" // Detect libffi
+#include "llvm/ExecutionEngine/GenericValue.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/Type.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/DynamicLibrary.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/Mutex.h"
+#include "llvm/Support/raw_ostream.h"
+#include <cassert>
+#include <cmath>
+#include <csignal>
+#include <cstdint>
+#include <cstdio>
+#include <cstring>
+#include <map>
+#include <mutex>
+#include <string>
+#include <utility>
+#include <vector>
+
+#ifdef HAVE_FFI_CALL
+#ifdef HAVE_FFI_H
+#include <ffi.h>
+#define USE_LIBFFI
+#elif HAVE_FFI_FFI_H
+#include <ffi/ffi.h>
+#define USE_LIBFFI
+#endif
+#endif
+
+using namespace llvm;
+
+namespace {
+
+typedef GenericValue (*ExFunc)(FunctionType *, ArrayRef<GenericValue>);
+typedef void (*RawFunc)();
+
+struct Functions {
+ sys::Mutex Lock;
+ std::map<const Function *, ExFunc> ExportedFunctions;
+ std::map<std::string, ExFunc> FuncNames;
+#ifdef USE_LIBFFI
+ std::map<const Function *, RawFunc> RawFunctions;
+#endif
+};
+
+Functions &getFunctions() {
+ static Functions F;
+ return F;
+}
+
+} // anonymous namespace
+
+static Interpreter *TheInterpreter;
+
+static char getTypeID(Type *Ty) {
+ switch (Ty->getTypeID()) {
+ case Type::VoidTyID: return 'V';
+ case Type::IntegerTyID:
+ switch (cast<IntegerType>(Ty)->getBitWidth()) {
+ case 1: return 'o';
+ case 8: return 'B';
+ case 16: return 'S';
+ case 32: return 'I';
+ case 64: return 'L';
+ default: return 'N';
+ }
+ case Type::FloatTyID: return 'F';
+ case Type::DoubleTyID: return 'D';
+ case Type::PointerTyID: return 'P';
+ case Type::FunctionTyID:return 'M';
+ case Type::StructTyID: return 'T';
+ case Type::ArrayTyID: return 'A';
+ default: return 'U';
+ }
+}
+
+// Try to find address of external function given a Function object.
+// Please note, that interpreter doesn't know how to assemble a
+// real call in general case (this is JIT job), that's why it assumes,
+// that all external functions has the same (and pretty "general") signature.
+// The typical example of such functions are "lle_X_" ones.
+static ExFunc lookupFunction(const Function *F) {
+ // Function not found, look it up... start by figuring out what the
+ // composite function name should be.
+ std::string ExtName = "lle_";
+ FunctionType *FT = F->getFunctionType();
+ ExtName += getTypeID(FT->getReturnType());
+ for (Type *T : FT->params())
+ ExtName += getTypeID(T);
+ ExtName += ("_" + F->getName()).str();
+
+ auto &Fns = getFunctions();
+ sys::ScopedLock Writer(Fns.Lock);
+ ExFunc FnPtr = Fns.FuncNames[ExtName];
+ if (!FnPtr)
+ FnPtr = Fns.FuncNames[("lle_X_" + F->getName()).str()];
+ if (!FnPtr) // Try calling a generic function... if it exists...
+ FnPtr = (ExFunc)(intptr_t)sys::DynamicLibrary::SearchForAddressOfSymbol(
+ ("lle_X_" + F->getName()).str());
+ if (FnPtr)
+ Fns.ExportedFunctions.insert(std::make_pair(F, FnPtr)); // Cache for later
+ return FnPtr;
+}
+
+#ifdef USE_LIBFFI
+static ffi_type *ffiTypeFor(Type *Ty) {
+ switch (Ty->getTypeID()) {
+ case Type::VoidTyID: return &ffi_type_void;
+ case Type::IntegerTyID:
+ switch (cast<IntegerType>(Ty)->getBitWidth()) {
+ case 8: return &ffi_type_sint8;
+ case 16: return &ffi_type_sint16;
+ case 32: return &ffi_type_sint32;
+ case 64: return &ffi_type_sint64;
+ }
+ llvm_unreachable("Unhandled integer type bitwidth");
+ case Type::FloatTyID: return &ffi_type_float;
+ case Type::DoubleTyID: return &ffi_type_double;
+ case Type::PointerTyID: return &ffi_type_pointer;
+ default: break;
+ }
+ // TODO: Support other types such as StructTyID, ArrayTyID, OpaqueTyID, etc.
+ report_fatal_error("Type could not be mapped for use with libffi.");
+ return NULL;
+}
+
+static void *ffiValueFor(Type *Ty, const GenericValue &AV,
+ void *ArgDataPtr) {
+ switch (Ty->getTypeID()) {
+ case Type::IntegerTyID:
+ switch (cast<IntegerType>(Ty)->getBitWidth()) {
+ case 8: {
+ int8_t *I8Ptr = (int8_t *) ArgDataPtr;
+ *I8Ptr = (int8_t) AV.IntVal.getZExtValue();
+ return ArgDataPtr;
+ }
+ case 16: {
+ int16_t *I16Ptr = (int16_t *) ArgDataPtr;
+ *I16Ptr = (int16_t) AV.IntVal.getZExtValue();
+ return ArgDataPtr;
+ }
+ case 32: {
+ int32_t *I32Ptr = (int32_t *) ArgDataPtr;
+ *I32Ptr = (int32_t) AV.IntVal.getZExtValue();
+ return ArgDataPtr;
+ }
+ case 64: {
+ int64_t *I64Ptr = (int64_t *) ArgDataPtr;
+ *I64Ptr = (int64_t) AV.IntVal.getZExtValue();
+ return ArgDataPtr;
+ }
+ }
+ llvm_unreachable("Unhandled integer type bitwidth");
+ case Type::FloatTyID: {
+ float *FloatPtr = (float *) ArgDataPtr;
+ *FloatPtr = AV.FloatVal;
+ return ArgDataPtr;
+ }
+ case Type::DoubleTyID: {
+ double *DoublePtr = (double *) ArgDataPtr;
+ *DoublePtr = AV.DoubleVal;
+ return ArgDataPtr;
+ }
+ case Type::PointerTyID: {
+ void **PtrPtr = (void **) ArgDataPtr;
+ *PtrPtr = GVTOP(AV);
+ return ArgDataPtr;
+ }
+ default: break;
+ }
+ // TODO: Support other types such as StructTyID, ArrayTyID, OpaqueTyID, etc.
+ report_fatal_error("Type value could not be mapped for use with libffi.");
+ return NULL;
+}
+
+static bool ffiInvoke(RawFunc Fn, Function *F, ArrayRef<GenericValue> ArgVals,
+ const DataLayout &TD, GenericValue &Result) {
+ ffi_cif cif;
+ FunctionType *FTy = F->getFunctionType();
+ const unsigned NumArgs = F->arg_size();
+
+ // TODO: We don't have type information about the remaining arguments, because
+ // this information is never passed into ExecutionEngine::runFunction().
+ if (ArgVals.size() > NumArgs && F->isVarArg()) {
+ report_fatal_error("Calling external var arg function '" + F->getName()
+ + "' is not supported by the Interpreter.");
+ }
+
+ unsigned ArgBytes = 0;
+
+ std::vector<ffi_type*> args(NumArgs);
+ for (Function::const_arg_iterator A = F->arg_begin(), E = F->arg_end();
+ A != E; ++A) {
+ const unsigned ArgNo = A->getArgNo();
+ Type *ArgTy = FTy->getParamType(ArgNo);
+ args[ArgNo] = ffiTypeFor(ArgTy);
+ ArgBytes += TD.getTypeStoreSize(ArgTy);
+ }
+
+ SmallVector<uint8_t, 128> ArgData;
+ ArgData.resize(ArgBytes);
+ uint8_t *ArgDataPtr = ArgData.data();
+ SmallVector<void*, 16> values(NumArgs);
+ for (Function::const_arg_iterator A = F->arg_begin(), E = F->arg_end();
+ A != E; ++A) {
+ const unsigned ArgNo = A->getArgNo();
+ Type *ArgTy = FTy->getParamType(ArgNo);
+ values[ArgNo] = ffiValueFor(ArgTy, ArgVals[ArgNo], ArgDataPtr);
+ ArgDataPtr += TD.getTypeStoreSize(ArgTy);
+ }
+
+ Type *RetTy = FTy->getReturnType();
+ ffi_type *rtype = ffiTypeFor(RetTy);
+
+ if (ffi_prep_cif(&cif, FFI_DEFAULT_ABI, NumArgs, rtype, args.data()) ==
+ FFI_OK) {
+ SmallVector<uint8_t, 128> ret;
+ if (RetTy->getTypeID() != Type::VoidTyID)
+ ret.resize(TD.getTypeStoreSize(RetTy));
+ ffi_call(&cif, Fn, ret.data(), values.data());
+ switch (RetTy->getTypeID()) {
+ case Type::IntegerTyID:
+ switch (cast<IntegerType>(RetTy)->getBitWidth()) {
+ case 8: Result.IntVal = APInt(8 , *(int8_t *) ret.data()); break;
+ case 16: Result.IntVal = APInt(16, *(int16_t*) ret.data()); break;
+ case 32: Result.IntVal = APInt(32, *(int32_t*) ret.data()); break;
+ case 64: Result.IntVal = APInt(64, *(int64_t*) ret.data()); break;
+ }
+ break;
+ case Type::FloatTyID: Result.FloatVal = *(float *) ret.data(); break;
+ case Type::DoubleTyID: Result.DoubleVal = *(double*) ret.data(); break;
+ case Type::PointerTyID: Result.PointerVal = *(void **) ret.data(); break;
+ default: break;
+ }
+ return true;
+ }
+
+ return false;
+}
+#endif // USE_LIBFFI
+
+GenericValue Interpreter::callExternalFunction(Function *F,
+ ArrayRef<GenericValue> ArgVals) {
+ TheInterpreter = this;
+
+ auto &Fns = getFunctions();
+ std::unique_lock<sys::Mutex> Guard(Fns.Lock);
+
+ // Do a lookup to see if the function is in our cache... this should just be a
+ // deferred annotation!
+ std::map<const Function *, ExFunc>::iterator FI =
+ Fns.ExportedFunctions.find(F);
+ if (ExFunc Fn = (FI == Fns.ExportedFunctions.end()) ? lookupFunction(F)
+ : FI->second) {
+ Guard.unlock();
+ return Fn(F->getFunctionType(), ArgVals);
+ }
+
+#ifdef USE_LIBFFI
+ std::map<const Function *, RawFunc>::iterator RF = Fns.RawFunctions.find(F);
+ RawFunc RawFn;
+ if (RF == Fns.RawFunctions.end()) {
+ RawFn = (RawFunc)(intptr_t)
+ sys::DynamicLibrary::SearchForAddressOfSymbol(std::string(F->getName()));
+ if (!RawFn)
+ RawFn = (RawFunc)(intptr_t)getPointerToGlobalIfAvailable(F);
+ if (RawFn != 0)
+ Fns.RawFunctions.insert(std::make_pair(F, RawFn)); // Cache for later
+ } else {
+ RawFn = RF->second;
+ }
+
+ Guard.unlock();
+
+ GenericValue Result;
+ if (RawFn != 0 && ffiInvoke(RawFn, F, ArgVals, getDataLayout(), Result))
+ return Result;
+#endif // USE_LIBFFI
+
+ if (F->getName() == "__main")
+ errs() << "Tried to execute an unknown external function: "
+ << *F->getType() << " __main\n";
+ else
+ report_fatal_error("Tried to execute an unknown external function: " +
+ F->getName());
+#ifndef USE_LIBFFI
+ errs() << "Recompiling LLVM with --enable-libffi might help.\n";
+#endif
+ return GenericValue();
+}
+
+//===----------------------------------------------------------------------===//
+// Functions "exported" to the running application...
+//
+
+// void atexit(Function*)
+static GenericValue lle_X_atexit(FunctionType *FT,
+ ArrayRef<GenericValue> Args) {
+ assert(Args.size() == 1);
+ TheInterpreter->addAtExitHandler((Function*)GVTOP(Args[0]));
+ GenericValue GV;
+ GV.IntVal = 0;
+ return GV;
+}
+
+// void exit(int)
+static GenericValue lle_X_exit(FunctionType *FT, ArrayRef<GenericValue> Args) {
+ TheInterpreter->exitCalled(Args[0]);
+ return GenericValue();
+}
+
+// void abort(void)
+static GenericValue lle_X_abort(FunctionType *FT, ArrayRef<GenericValue> Args) {
+ //FIXME: should we report or raise here?
+ //report_fatal_error("Interpreted program raised SIGABRT");
+ raise (SIGABRT);
+ return GenericValue();
+}
+
+// Silence warnings about sprintf. (See also
+// https://github.com/llvm/llvm-project/issues/58086)
+#if defined(__clang__)
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wdeprecated-declarations"
+#endif
+// int sprintf(char *, const char *, ...) - a very rough implementation to make
+// output useful.
+static GenericValue lle_X_sprintf(FunctionType *FT,
+ ArrayRef<GenericValue> Args) {
+ char *OutputBuffer = (char *)GVTOP(Args[0]);
+ const char *FmtStr = (const char *)GVTOP(Args[1]);
+ unsigned ArgNo = 2;
+
+ // printf should return # chars printed. This is completely incorrect, but
+ // close enough for now.
+ GenericValue GV;
+ GV.IntVal = APInt(32, strlen(FmtStr));
+ while (true) {
+ switch (*FmtStr) {
+ case 0: return GV; // Null terminator...
+ default: // Normal nonspecial character
+ sprintf(OutputBuffer++, "%c", *FmtStr++);
+ break;
+ case '\\': { // Handle escape codes
+ sprintf(OutputBuffer, "%c%c", *FmtStr, *(FmtStr+1));
+ FmtStr += 2; OutputBuffer += 2;
+ break;
+ }
+ case '%': { // Handle format specifiers
+ char FmtBuf[100] = "", Buffer[1000] = "";
+ char *FB = FmtBuf;
+ *FB++ = *FmtStr++;
+ char Last = *FB++ = *FmtStr++;
+ unsigned HowLong = 0;
+ while (Last != 'c' && Last != 'd' && Last != 'i' && Last != 'u' &&
+ Last != 'o' && Last != 'x' && Last != 'X' && Last != 'e' &&
+ Last != 'E' && Last != 'g' && Last != 'G' && Last != 'f' &&
+ Last != 'p' && Last != 's' && Last != '%') {
+ if (Last == 'l' || Last == 'L') HowLong++; // Keep track of l's
+ Last = *FB++ = *FmtStr++;
+ }
+ *FB = 0;
+
+ switch (Last) {
+ case '%':
+ memcpy(Buffer, "%", 2); break;
+ case 'c':
+ sprintf(Buffer, FmtBuf, uint32_t(Args[ArgNo++].IntVal.getZExtValue()));
+ break;
+ case 'd': case 'i':
+ case 'u': case 'o':
+ case 'x': case 'X':
+ if (HowLong >= 1) {
+ if (HowLong == 1 &&
+ TheInterpreter->getDataLayout().getPointerSizeInBits() == 64 &&
+ sizeof(long) < sizeof(int64_t)) {
+ // Make sure we use %lld with a 64 bit argument because we might be
+ // compiling LLI on a 32 bit compiler.
+ unsigned Size = strlen(FmtBuf);
+ FmtBuf[Size] = FmtBuf[Size-1];
+ FmtBuf[Size+1] = 0;
+ FmtBuf[Size-1] = 'l';
+ }
+ sprintf(Buffer, FmtBuf, Args[ArgNo++].IntVal.getZExtValue());
+ } else
+ sprintf(Buffer, FmtBuf,uint32_t(Args[ArgNo++].IntVal.getZExtValue()));
+ break;
+ case 'e': case 'E': case 'g': case 'G': case 'f':
+ sprintf(Buffer, FmtBuf, Args[ArgNo++].DoubleVal); break;
+ case 'p':
+ sprintf(Buffer, FmtBuf, (void*)GVTOP(Args[ArgNo++])); break;
+ case 's':
+ sprintf(Buffer, FmtBuf, (char*)GVTOP(Args[ArgNo++])); break;
+ default:
+ errs() << "<unknown printf code '" << *FmtStr << "'!>";
+ ArgNo++; break;
+ }
+ size_t Len = strlen(Buffer);
+ memcpy(OutputBuffer, Buffer, Len + 1);
+ OutputBuffer += Len;
+ }
+ break;
+ }
+ }
+ return GV;
+}
+#if defined(__clang__)
+#pragma clang diagnostic pop
+#endif
+
+// int printf(const char *, ...) - a very rough implementation to make output
+// useful.
+static GenericValue lle_X_printf(FunctionType *FT,
+ ArrayRef<GenericValue> Args) {
+ char Buffer[10000];
+ std::vector<GenericValue> NewArgs;
+ NewArgs.push_back(PTOGV((void*)&Buffer[0]));
+ llvm::append_range(NewArgs, Args);
+ GenericValue GV = lle_X_sprintf(FT, NewArgs);
+ outs() << Buffer;
+ return GV;
+}
+
+// int sscanf(const char *format, ...);
+static GenericValue lle_X_sscanf(FunctionType *FT,
+ ArrayRef<GenericValue> args) {
+ assert(args.size() < 10 && "Only handle up to 10 args to sscanf right now!");
+
+ char *Args[10];
+ for (unsigned i = 0; i < args.size(); ++i)
+ Args[i] = (char*)GVTOP(args[i]);
+
+ GenericValue GV;
+ GV.IntVal = APInt(32, sscanf(Args[0], Args[1], Args[2], Args[3], Args[4],
+ Args[5], Args[6], Args[7], Args[8], Args[9]));
+ return GV;
+}
+
+// int scanf(const char *format, ...);
+static GenericValue lle_X_scanf(FunctionType *FT, ArrayRef<GenericValue> args) {
+ assert(args.size() < 10 && "Only handle up to 10 args to scanf right now!");
+
+ char *Args[10];
+ for (unsigned i = 0; i < args.size(); ++i)
+ Args[i] = (char*)GVTOP(args[i]);
+
+ GenericValue GV;
+ GV.IntVal = APInt(32, scanf( Args[0], Args[1], Args[2], Args[3], Args[4],
+ Args[5], Args[6], Args[7], Args[8], Args[9]));
+ return GV;
+}
+
+// int fprintf(FILE *, const char *, ...) - a very rough implementation to make
+// output useful.
+static GenericValue lle_X_fprintf(FunctionType *FT,
+ ArrayRef<GenericValue> Args) {
+ assert(Args.size() >= 2);
+ char Buffer[10000];
+ std::vector<GenericValue> NewArgs;
+ NewArgs.push_back(PTOGV(Buffer));
+ NewArgs.insert(NewArgs.end(), Args.begin()+1, Args.end());
+ GenericValue GV = lle_X_sprintf(FT, NewArgs);
+
+ fputs(Buffer, (FILE *) GVTOP(Args[0]));
+ return GV;
+}
+
+static GenericValue lle_X_memset(FunctionType *FT,
+ ArrayRef<GenericValue> Args) {
+ int val = (int)Args[1].IntVal.getSExtValue();
+ size_t len = (size_t)Args[2].IntVal.getZExtValue();
+ memset((void *)GVTOP(Args[0]), val, len);
+ // llvm.memset.* returns void, lle_X_* returns GenericValue,
+ // so here we return GenericValue with IntVal set to zero
+ GenericValue GV;
+ GV.IntVal = 0;
+ return GV;
+}
+
+static GenericValue lle_X_memcpy(FunctionType *FT,
+ ArrayRef<GenericValue> Args) {
+ memcpy(GVTOP(Args[0]), GVTOP(Args[1]),
+ (size_t)(Args[2].IntVal.getLimitedValue()));
+
+ // llvm.memcpy* returns void, lle_X_* returns GenericValue,
+ // so here we return GenericValue with IntVal set to zero
+ GenericValue GV;
+ GV.IntVal = 0;
+ return GV;
+}
+
+void Interpreter::initializeExternalFunctions() {
+ auto &Fns = getFunctions();
+ sys::ScopedLock Writer(Fns.Lock);
+ Fns.FuncNames["lle_X_atexit"] = lle_X_atexit;
+ Fns.FuncNames["lle_X_exit"] = lle_X_exit;
+ Fns.FuncNames["lle_X_abort"] = lle_X_abort;
+
+ Fns.FuncNames["lle_X_printf"] = lle_X_printf;
+ Fns.FuncNames["lle_X_sprintf"] = lle_X_sprintf;
+ Fns.FuncNames["lle_X_sscanf"] = lle_X_sscanf;
+ Fns.FuncNames["lle_X_scanf"] = lle_X_scanf;
+ Fns.FuncNames["lle_X_fprintf"] = lle_X_fprintf;
+ Fns.FuncNames["lle_X_memset"] = lle_X_memset;
+ Fns.FuncNames["lle_X_memcpy"] = lle_X_memcpy;
+}
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/Interpreter/Interpreter.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/Interpreter/Interpreter.cpp
new file mode 100644
index 000000000000..d4235cfa2ccf
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/Interpreter/Interpreter.cpp
@@ -0,0 +1,102 @@
+//===- Interpreter.cpp - Top-Level LLVM Interpreter Implementation --------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the top-level functionality for the LLVM interpreter.
+// This interpreter is designed to be a very simple, portable, inefficient
+// interpreter.
+//
+//===----------------------------------------------------------------------===//
+
+#include "Interpreter.h"
+#include "llvm/CodeGen/IntrinsicLowering.h"
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/Module.h"
+#include <cstring>
+using namespace llvm;
+
+namespace {
+
+static struct RegisterInterp {
+ RegisterInterp() { Interpreter::Register(); }
+} InterpRegistrator;
+
+}
+
+extern "C" void LLVMLinkInInterpreter() { }
+
+/// Create a new interpreter object.
+///
+ExecutionEngine *Interpreter::create(std::unique_ptr<Module> M,
+ std::string *ErrStr) {
+ // Tell this Module to materialize everything and release the GVMaterializer.
+ if (Error Err = M->materializeAll()) {
+ std::string Msg;
+ handleAllErrors(std::move(Err), [&](ErrorInfoBase &EIB) {
+ Msg = EIB.message();
+ });
+ if (ErrStr)
+ *ErrStr = Msg;
+ // We got an error, just return 0
+ return nullptr;
+ }
+
+ return new Interpreter(std::move(M));
+}
+
+//===----------------------------------------------------------------------===//
+// Interpreter ctor - Initialize stuff
+//
+Interpreter::Interpreter(std::unique_ptr<Module> M)
+ : ExecutionEngine(std::move(M)) {
+
+ memset(&ExitValue.Untyped, 0, sizeof(ExitValue.Untyped));
+ // Initialize the "backend"
+ initializeExecutionEngine();
+ initializeExternalFunctions();
+ emitGlobals();
+
+ IL = new IntrinsicLowering(getDataLayout());
+}
+
+Interpreter::~Interpreter() {
+ delete IL;
+}
+
+void Interpreter::runAtExitHandlers () {
+ while (!AtExitHandlers.empty()) {
+ callFunction(AtExitHandlers.back(), std::nullopt);
+ AtExitHandlers.pop_back();
+ run();
+ }
+}
+
+/// run - Start execution with the specified function and arguments.
+///
+GenericValue Interpreter::runFunction(Function *F,
+ ArrayRef<GenericValue> ArgValues) {
+ assert (F && "Function *F was null at entry to run()");
+
+ // Try extra hard not to pass extra args to a function that isn't
+ // expecting them. C programmers frequently bend the rules and
+ // declare main() with fewer parameters than it actually gets
+ // passed, and the interpreter barfs if you pass a function more
+ // parameters than it is declared to take. This does not attempt to
+ // take into account gratuitous differences in declared types,
+ // though.
+ const size_t ArgCount = F->getFunctionType()->getNumParams();
+ ArrayRef<GenericValue> ActualArgs =
+ ArgValues.slice(0, std::min(ArgValues.size(), ArgCount));
+
+ // Set up the function call.
+ callFunction(F, ActualArgs);
+
+ // Start executing the function.
+ run();
+
+ return ExitValue;
+}
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/Interpreter/Interpreter.h b/contrib/llvm-project/llvm/lib/ExecutionEngine/Interpreter/Interpreter.h
new file mode 100644
index 000000000000..41a0389442d3
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/Interpreter/Interpreter.h
@@ -0,0 +1,233 @@
+//===-- Interpreter.h ------------------------------------------*- C++ -*--===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This header file defines the interpreter structure
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_EXECUTIONENGINE_INTERPRETER_INTERPRETER_H
+#define LLVM_LIB_EXECUTIONENGINE_INTERPRETER_INTERPRETER_H
+
+#include "llvm/ExecutionEngine/ExecutionEngine.h"
+#include "llvm/ExecutionEngine/GenericValue.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/InstVisitor.h"
+#include "llvm/Support/DataTypes.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/raw_ostream.h"
+namespace llvm {
+
+class IntrinsicLowering;
+template<typename T> class generic_gep_type_iterator;
+class ConstantExpr;
+typedef generic_gep_type_iterator<User::const_op_iterator> gep_type_iterator;
+
+
+// AllocaHolder - Object to track all of the blocks of memory allocated by
+// alloca. When the function returns, this object is popped off the execution
+// stack, which causes the dtor to be run, which frees all the alloca'd memory.
+//
+class AllocaHolder {
+ std::vector<void *> Allocations;
+
+public:
+ AllocaHolder() = default;
+
+ // Make this type move-only.
+ AllocaHolder(AllocaHolder &&) = default;
+ AllocaHolder &operator=(AllocaHolder &&RHS) = default;
+
+ ~AllocaHolder() {
+ for (void *Allocation : Allocations)
+ free(Allocation);
+ }
+
+ void add(void *Mem) { Allocations.push_back(Mem); }
+};
+
+typedef std::vector<GenericValue> ValuePlaneTy;
+
+// ExecutionContext struct - This struct represents one stack frame currently
+// executing.
+//
+struct ExecutionContext {
+ Function *CurFunction;// The currently executing function
+ BasicBlock *CurBB; // The currently executing BB
+ BasicBlock::iterator CurInst; // The next instruction to execute
+ CallBase *Caller; // Holds the call that called subframes.
+ // NULL if main func or debugger invoked fn
+ std::map<Value *, GenericValue> Values; // LLVM values used in this invocation
+ std::vector<GenericValue> VarArgs; // Values passed through an ellipsis
+ AllocaHolder Allocas; // Track memory allocated by alloca
+
+ ExecutionContext() : CurFunction(nullptr), CurBB(nullptr), CurInst(nullptr) {}
+};
+
+// Interpreter - This class represents the entirety of the interpreter.
+//
+class Interpreter : public ExecutionEngine, public InstVisitor<Interpreter> {
+ GenericValue ExitValue; // The return value of the called function
+ IntrinsicLowering *IL;
+
+ // The runtime stack of executing code. The top of the stack is the current
+ // function record.
+ std::vector<ExecutionContext> ECStack;
+
+ // AtExitHandlers - List of functions to call when the program exits,
+ // registered with the atexit() library function.
+ std::vector<Function*> AtExitHandlers;
+
+public:
+ explicit Interpreter(std::unique_ptr<Module> M);
+ ~Interpreter() override;
+
+ /// runAtExitHandlers - Run any functions registered by the program's calls to
+ /// atexit(3), which we intercept and store in AtExitHandlers.
+ ///
+ void runAtExitHandlers();
+
+ static void Register() {
+ InterpCtor = create;
+ }
+
+ /// Create an interpreter ExecutionEngine.
+ ///
+ static ExecutionEngine *create(std::unique_ptr<Module> M,
+ std::string *ErrorStr = nullptr);
+
+ /// run - Start execution with the specified function and arguments.
+ ///
+ GenericValue runFunction(Function *F,
+ ArrayRef<GenericValue> ArgValues) override;
+
+ void *getPointerToNamedFunction(StringRef Name,
+ bool AbortOnFailure = true) override {
+ // FIXME: not implemented.
+ return nullptr;
+ }
+
+ // Methods used to execute code:
+ // Place a call on the stack
+ void callFunction(Function *F, ArrayRef<GenericValue> ArgVals);
+ void run(); // Execute instructions until nothing left to do
+
+ // Opcode Implementations
+ void visitReturnInst(ReturnInst &I);
+ void visitBranchInst(BranchInst &I);
+ void visitSwitchInst(SwitchInst &I);
+ void visitIndirectBrInst(IndirectBrInst &I);
+
+ void visitUnaryOperator(UnaryOperator &I);
+ void visitBinaryOperator(BinaryOperator &I);
+ void visitICmpInst(ICmpInst &I);
+ void visitFCmpInst(FCmpInst &I);
+ void visitAllocaInst(AllocaInst &I);
+ void visitLoadInst(LoadInst &I);
+ void visitStoreInst(StoreInst &I);
+ void visitGetElementPtrInst(GetElementPtrInst &I);
+ void visitPHINode(PHINode &PN) {
+ llvm_unreachable("PHI nodes already handled!");
+ }
+ void visitTruncInst(TruncInst &I);
+ void visitZExtInst(ZExtInst &I);
+ void visitSExtInst(SExtInst &I);
+ void visitFPTruncInst(FPTruncInst &I);
+ void visitFPExtInst(FPExtInst &I);
+ void visitUIToFPInst(UIToFPInst &I);
+ void visitSIToFPInst(SIToFPInst &I);
+ void visitFPToUIInst(FPToUIInst &I);
+ void visitFPToSIInst(FPToSIInst &I);
+ void visitPtrToIntInst(PtrToIntInst &I);
+ void visitIntToPtrInst(IntToPtrInst &I);
+ void visitBitCastInst(BitCastInst &I);
+ void visitSelectInst(SelectInst &I);
+
+ void visitVAStartInst(VAStartInst &I);
+ void visitVAEndInst(VAEndInst &I);
+ void visitVACopyInst(VACopyInst &I);
+ void visitIntrinsicInst(IntrinsicInst &I);
+ void visitCallBase(CallBase &I);
+ void visitUnreachableInst(UnreachableInst &I);
+
+ void visitShl(BinaryOperator &I);
+ void visitLShr(BinaryOperator &I);
+ void visitAShr(BinaryOperator &I);
+
+ void visitVAArgInst(VAArgInst &I);
+ void visitExtractElementInst(ExtractElementInst &I);
+ void visitInsertElementInst(InsertElementInst &I);
+ void visitShuffleVectorInst(ShuffleVectorInst &I);
+
+ void visitExtractValueInst(ExtractValueInst &I);
+ void visitInsertValueInst(InsertValueInst &I);
+
+ void visitInstruction(Instruction &I) {
+ errs() << I << "\n";
+ llvm_unreachable("Instruction not interpretable yet!");
+ }
+
+ GenericValue callExternalFunction(Function *F,
+ ArrayRef<GenericValue> ArgVals);
+ void exitCalled(GenericValue GV);
+
+ void addAtExitHandler(Function *F) {
+ AtExitHandlers.push_back(F);
+ }
+
+ GenericValue *getFirstVarArg () {
+ return &(ECStack.back ().VarArgs[0]);
+ }
+
+private: // Helper functions
+ GenericValue executeGEPOperation(Value *Ptr, gep_type_iterator I,
+ gep_type_iterator E, ExecutionContext &SF);
+
+ // SwitchToNewBasicBlock - Start execution in a new basic block and run any
+ // PHI nodes in the top of the block. This is used for intraprocedural
+ // control flow.
+ //
+ void SwitchToNewBasicBlock(BasicBlock *Dest, ExecutionContext &SF);
+
+ void *getPointerToFunction(Function *F) override { return (void*)F; }
+
+ void initializeExecutionEngine() { }
+ void initializeExternalFunctions();
+ GenericValue getConstantExprValue(ConstantExpr *CE, ExecutionContext &SF);
+ GenericValue getOperandValue(Value *V, ExecutionContext &SF);
+ GenericValue executeTruncInst(Value *SrcVal, Type *DstTy,
+ ExecutionContext &SF);
+ GenericValue executeSExtInst(Value *SrcVal, Type *DstTy,
+ ExecutionContext &SF);
+ GenericValue executeZExtInst(Value *SrcVal, Type *DstTy,
+ ExecutionContext &SF);
+ GenericValue executeFPTruncInst(Value *SrcVal, Type *DstTy,
+ ExecutionContext &SF);
+ GenericValue executeFPExtInst(Value *SrcVal, Type *DstTy,
+ ExecutionContext &SF);
+ GenericValue executeFPToUIInst(Value *SrcVal, Type *DstTy,
+ ExecutionContext &SF);
+ GenericValue executeFPToSIInst(Value *SrcVal, Type *DstTy,
+ ExecutionContext &SF);
+ GenericValue executeUIToFPInst(Value *SrcVal, Type *DstTy,
+ ExecutionContext &SF);
+ GenericValue executeSIToFPInst(Value *SrcVal, Type *DstTy,
+ ExecutionContext &SF);
+ GenericValue executePtrToIntInst(Value *SrcVal, Type *DstTy,
+ ExecutionContext &SF);
+ GenericValue executeIntToPtrInst(Value *SrcVal, Type *DstTy,
+ ExecutionContext &SF);
+ GenericValue executeBitCastInst(Value *SrcVal, Type *DstTy,
+ ExecutionContext &SF);
+ void popStackAndReturnValueToCaller(Type *RetTy, GenericValue Result);
+
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/COFF.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/COFF.cpp
new file mode 100644
index 000000000000..f4701bc830d6
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/COFF.cpp
@@ -0,0 +1,136 @@
+//===-------------- COFF.cpp - JIT linker function for COFF -------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// COFF jit-link function.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/JITLink/COFF.h"
+
+#include "llvm/BinaryFormat/COFF.h"
+#include "llvm/ExecutionEngine/JITLink/COFF_x86_64.h"
+#include "llvm/Object/COFF.h"
+#include "llvm/Support/Format.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include <cstring>
+
+using namespace llvm;
+
+#define DEBUG_TYPE "jitlink"
+
+namespace llvm {
+namespace jitlink {
+
+static StringRef getMachineName(uint16_t Machine) {
+ switch (Machine) {
+ case COFF::IMAGE_FILE_MACHINE_I386:
+ return "i386";
+ case COFF::IMAGE_FILE_MACHINE_AMD64:
+ return "x86_64";
+ case COFF::IMAGE_FILE_MACHINE_ARMNT:
+ return "ARM";
+ case COFF::IMAGE_FILE_MACHINE_ARM64:
+ return "ARM64";
+ default:
+ return "unknown";
+ }
+}
+
+Expected<std::unique_ptr<LinkGraph>>
+createLinkGraphFromCOFFObject(MemoryBufferRef ObjectBuffer) {
+ StringRef Data = ObjectBuffer.getBuffer();
+
+ // Check magic
+ auto Magic = identify_magic(ObjectBuffer.getBuffer());
+ if (Magic != file_magic::coff_object)
+ return make_error<JITLinkError>("Invalid COFF buffer");
+
+ if (Data.size() < sizeof(object::coff_file_header))
+ return make_error<JITLinkError>("Truncated COFF buffer");
+
+ uint64_t CurPtr = 0;
+ bool IsPE = false;
+
+ // Check if this is a PE/COFF file.
+ if (Data.size() >= sizeof(object::dos_header) + sizeof(COFF::PEMagic)) {
+ const auto *DH =
+ reinterpret_cast<const object::dos_header *>(Data.data() + CurPtr);
+ if (DH->Magic[0] == 'M' && DH->Magic[1] == 'Z') {
+ // Check the PE magic bytes. ("PE\0\0")
+ CurPtr = DH->AddressOfNewExeHeader;
+ if (memcmp(Data.data() + CurPtr, COFF::PEMagic, sizeof(COFF::PEMagic)) !=
+ 0) {
+ return make_error<JITLinkError>("Incorrect PE magic");
+ }
+ CurPtr += sizeof(COFF::PEMagic);
+ IsPE = true;
+ }
+ }
+ if (Data.size() < CurPtr + sizeof(object::coff_file_header))
+ return make_error<JITLinkError>("Truncated COFF buffer");
+
+ const object::coff_file_header *COFFHeader =
+ reinterpret_cast<const object::coff_file_header *>(Data.data() + CurPtr);
+ const object::coff_bigobj_file_header *COFFBigObjHeader = nullptr;
+
+ // Deal with bigobj file
+ if (!IsPE && COFFHeader->Machine == COFF::IMAGE_FILE_MACHINE_UNKNOWN &&
+ COFFHeader->NumberOfSections == uint16_t(0xffff) &&
+ Data.size() >= sizeof(object::coff_bigobj_file_header)) {
+ if (Data.size() < sizeof(object::coff_file_header)) {
+ return make_error<JITLinkError>("Truncated COFF buffer");
+ }
+ COFFBigObjHeader =
+ reinterpret_cast<const object::coff_bigobj_file_header *>(Data.data() +
+ CurPtr);
+
+ // Verify that we are dealing with bigobj.
+ if (COFFBigObjHeader->Version >= COFF::BigObjHeader::MinBigObjectVersion &&
+ std::memcmp(COFFBigObjHeader->UUID, COFF::BigObjMagic,
+ sizeof(COFF::BigObjMagic)) == 0) {
+ COFFHeader = nullptr;
+ CurPtr += sizeof(object::coff_bigobj_file_header);
+ } else
+ COFFBigObjHeader = nullptr;
+ }
+
+ uint16_t Machine =
+ COFFHeader ? COFFHeader->Machine : COFFBigObjHeader->Machine;
+ LLVM_DEBUG({
+ dbgs() << "jitLink_COFF: PE = " << (IsPE ? "yes" : "no")
+ << ", bigobj = " << (COFFBigObjHeader ? "yes" : "no")
+ << ", identifier = \"" << ObjectBuffer.getBufferIdentifier() << "\" "
+ << "machine = " << getMachineName(Machine) << "\n";
+ });
+
+ switch (Machine) {
+ case COFF::IMAGE_FILE_MACHINE_AMD64:
+ return createLinkGraphFromCOFFObject_x86_64(ObjectBuffer);
+ default:
+ return make_error<JITLinkError>(
+ "Unsupported target machine architecture in COFF object " +
+ ObjectBuffer.getBufferIdentifier() + ": " + getMachineName(Machine));
+ }
+}
+
+void link_COFF(std::unique_ptr<LinkGraph> G,
+ std::unique_ptr<JITLinkContext> Ctx) {
+ switch (G->getTargetTriple().getArch()) {
+ case Triple::x86_64:
+ link_COFF_x86_64(std::move(G), std::move(Ctx));
+ return;
+ default:
+ Ctx->notifyFailed(make_error<JITLinkError>(
+ "Unsupported target machine architecture in COFF link graph " +
+ G->getName()));
+ return;
+ }
+}
+
+} // end namespace jitlink
+} // end namespace llvm
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/COFFDirectiveParser.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/COFFDirectiveParser.cpp
new file mode 100644
index 000000000000..f23f3ed9406b
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/COFFDirectiveParser.cpp
@@ -0,0 +1,78 @@
+//===-- COFFDirectiveParser.cpp - JITLink coff directive parser --*- C++ -*===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// MSVC COFF directive parser
+//
+//===----------------------------------------------------------------------===//
+
+#include "COFFDirectiveParser.h"
+
+#include <array>
+
+using namespace llvm;
+using namespace jitlink;
+
+#define DEBUG_TYPE "jitlink"
+
+// Create prefix string literals used in Options.td
+#define PREFIX(NAME, VALUE) \
+ static constexpr StringLiteral NAME##_init[] = VALUE; \
+ static constexpr ArrayRef<StringLiteral> NAME(NAME##_init, \
+ std::size(NAME##_init) - 1);
+#include "COFFOptions.inc"
+#undef PREFIX
+
+static constexpr const StringLiteral PrefixTable_init[] =
+#define PREFIX_UNION(VALUES) VALUES
+#include "COFFOptions.inc"
+#undef PREFIX_UNION
+ ;
+static constexpr const ArrayRef<StringLiteral>
+ PrefixTable(PrefixTable_init, std::size(PrefixTable_init) - 1);
+
+// Create table mapping all options defined in COFFOptions.td
+using namespace llvm::opt;
+static constexpr opt::OptTable::Info infoTable[] = {
+#define OPTION(...) \
+ LLVM_CONSTRUCT_OPT_INFO_WITH_ID_PREFIX(COFF_OPT_, __VA_ARGS__),
+#include "COFFOptions.inc"
+#undef OPTION
+};
+
+class COFFOptTable : public opt::PrecomputedOptTable {
+public:
+ COFFOptTable() : PrecomputedOptTable(infoTable, PrefixTable, true) {}
+};
+
+static COFFOptTable optTable;
+
+Expected<opt::InputArgList> COFFDirectiveParser::parse(StringRef Str) {
+ SmallVector<StringRef, 16> Tokens;
+ SmallVector<const char *, 16> Buffer;
+ cl::TokenizeWindowsCommandLineNoCopy(Str, saver, Tokens);
+ for (StringRef Tok : Tokens) {
+ bool HasNul = Tok.end() != Str.end() && Tok.data()[Tok.size()] == '\0';
+ Buffer.push_back(HasNul ? Tok.data() : saver.save(Tok).data());
+ }
+
+ unsigned missingIndex;
+ unsigned missingCount;
+
+ auto Result = optTable.ParseArgs(Buffer, missingIndex, missingCount);
+
+ if (missingCount)
+ return make_error<JITLinkError>(Twine("COFF directive parsing failed: ") +
+ Result.getArgString(missingIndex) +
+ " missing argument");
+ LLVM_DEBUG({
+ for (auto *arg : Result.filtered(COFF_OPT_UNKNOWN))
+ dbgs() << "Unknown coff option argument: " << arg->getAsString(Result)
+ << "\n";
+ });
+ return std::move(Result);
+}
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/COFFDirectiveParser.h b/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/COFFDirectiveParser.h
new file mode 100644
index 000000000000..21808f0afcb5
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/COFFDirectiveParser.h
@@ -0,0 +1,48 @@
+//===--- COFFDirectiveParser.h - JITLink coff directive parser --*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// MSVC COFF directive parser
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTIONENGINE_JITLINK_COFFDIRECTIVEPARSER_H
+#define LLVM_EXECUTIONENGINE_JITLINK_COFFDIRECTIVEPARSER_H
+
+#include "llvm/ExecutionEngine/JITLink/JITLink.h"
+#include "llvm/Option/Arg.h"
+#include "llvm/Option/ArgList.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Error.h"
+#include "llvm/Support/StringSaver.h"
+#include "llvm/TargetParser/Triple.h"
+
+namespace llvm {
+namespace jitlink {
+
+enum {
+ COFF_OPT_INVALID = 0,
+#define OPTION(...) LLVM_MAKE_OPT_ID_WITH_ID_PREFIX(COFF_OPT_, __VA_ARGS__),
+#include "COFFOptions.inc"
+#undef OPTION
+};
+
+/// Parser for the MSVC specific preprocessor directives.
+/// https://docs.microsoft.com/en-us/cpp/preprocessor/comment-c-cpp?view=msvc-160
+class COFFDirectiveParser {
+public:
+ Expected<opt::InputArgList> parse(StringRef Str);
+
+private:
+ llvm::BumpPtrAllocator bAlloc;
+ llvm::StringSaver saver{bAlloc};
+};
+
+} // end namespace jitlink
+} // end namespace llvm
+
+#endif // LLVM_EXECUTIONENGINE_JITLINK_COFFDIRECTIVEPARSER_H
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/COFFLinkGraphBuilder.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/COFFLinkGraphBuilder.cpp
new file mode 100644
index 000000000000..1fd2a33d3f11
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/COFFLinkGraphBuilder.cpp
@@ -0,0 +1,631 @@
+//=--------- COFFLinkGraphBuilder.cpp - COFF LinkGraph builder ----------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Generic COFF LinkGraph building code.
+//
+//===----------------------------------------------------------------------===//
+#include "COFFLinkGraphBuilder.h"
+
+#define DEBUG_TYPE "jitlink"
+
+static const char *CommonSectionName = "__common";
+
+namespace llvm {
+namespace jitlink {
+
+static Triple createTripleWithCOFFFormat(Triple T) {
+ T.setObjectFormat(Triple::COFF);
+ return T;
+}
+
+COFFLinkGraphBuilder::COFFLinkGraphBuilder(
+ const object::COFFObjectFile &Obj, Triple TT, SubtargetFeatures Features,
+ LinkGraph::GetEdgeKindNameFunction GetEdgeKindName)
+ : Obj(Obj), G(std::make_unique<LinkGraph>(
+ Obj.getFileName().str(), createTripleWithCOFFFormat(TT),
+ std::move(Features), getPointerSize(Obj),
+ getEndianness(Obj), std::move(GetEdgeKindName))) {
+ LLVM_DEBUG({
+ dbgs() << "Created COFFLinkGraphBuilder for \"" << Obj.getFileName()
+ << "\"\n";
+ });
+}
+
+COFFLinkGraphBuilder::~COFFLinkGraphBuilder() = default;
+
+unsigned
+COFFLinkGraphBuilder::getPointerSize(const object::COFFObjectFile &Obj) {
+ return Obj.getBytesInAddress();
+}
+
+llvm::endianness
+COFFLinkGraphBuilder::getEndianness(const object::COFFObjectFile &Obj) {
+ return Obj.isLittleEndian() ? llvm::endianness::little
+ : llvm::endianness::big;
+}
+
+uint64_t COFFLinkGraphBuilder::getSectionSize(const object::COFFObjectFile &Obj,
+ const object::coff_section *Sec) {
+ // Consider the difference between executable form and object form.
+ // More information is inside COFFObjectFile::getSectionSize
+ if (Obj.getDOSHeader())
+ return std::min(Sec->VirtualSize, Sec->SizeOfRawData);
+ return Sec->SizeOfRawData;
+}
+
+uint64_t
+COFFLinkGraphBuilder::getSectionAddress(const object::COFFObjectFile &Obj,
+ const object::coff_section *Section) {
+ return Section->VirtualAddress + Obj.getImageBase();
+}
+
+bool COFFLinkGraphBuilder::isComdatSection(
+ const object::coff_section *Section) {
+ return Section->Characteristics & COFF::IMAGE_SCN_LNK_COMDAT;
+}
+
+Section &COFFLinkGraphBuilder::getCommonSection() {
+ if (!CommonSection)
+ CommonSection = &G->createSection(CommonSectionName,
+ orc::MemProt::Read | orc::MemProt::Write);
+ return *CommonSection;
+}
+
+Expected<std::unique_ptr<LinkGraph>> COFFLinkGraphBuilder::buildGraph() {
+ if (!Obj.isRelocatableObject())
+ return make_error<JITLinkError>("Object is not a relocatable COFF file");
+
+ if (auto Err = graphifySections())
+ return std::move(Err);
+
+ if (auto Err = graphifySymbols())
+ return std::move(Err);
+
+ if (auto Err = addRelocations())
+ return std::move(Err);
+
+ return std::move(G);
+}
+
+StringRef
+COFFLinkGraphBuilder::getCOFFSectionName(COFFSectionIndex SectionIndex,
+ const object::coff_section *Sec,
+ object::COFFSymbolRef Sym) {
+ switch (SectionIndex) {
+ case COFF::IMAGE_SYM_UNDEFINED: {
+ if (Sym.getValue())
+ return "(common)";
+ else
+ return "(external)";
+ }
+ case COFF::IMAGE_SYM_ABSOLUTE:
+ return "(absolute)";
+ case COFF::IMAGE_SYM_DEBUG: {
+ // Used with .file symbol
+ return "(debug)";
+ }
+ default: {
+ // Non reserved regular section numbers
+ if (Expected<StringRef> SecNameOrErr = Obj.getSectionName(Sec))
+ return *SecNameOrErr;
+ }
+ }
+ return "";
+}
+
+Error COFFLinkGraphBuilder::graphifySections() {
+ LLVM_DEBUG(dbgs() << " Creating graph sections...\n");
+
+ GraphBlocks.resize(Obj.getNumberOfSections() + 1);
+ // For each section...
+ for (COFFSectionIndex SecIndex = 1;
+ SecIndex <= static_cast<COFFSectionIndex>(Obj.getNumberOfSections());
+ SecIndex++) {
+ Expected<const object::coff_section *> Sec = Obj.getSection(SecIndex);
+ if (!Sec)
+ return Sec.takeError();
+
+ StringRef SectionName;
+ if (Expected<StringRef> SecNameOrErr = Obj.getSectionName(*Sec))
+ SectionName = *SecNameOrErr;
+
+ // FIXME: Skip debug info sections
+ if (SectionName == ".voltbl") {
+ LLVM_DEBUG({
+ dbgs() << " "
+ << "Skipping section \"" << SectionName << "\"\n";
+ });
+ continue;
+ }
+
+ LLVM_DEBUG({
+ dbgs() << " "
+ << "Creating section for \"" << SectionName << "\"\n";
+ });
+
+ // Get the section's memory protection flags.
+ orc::MemProt Prot = orc::MemProt::Read;
+ if ((*Sec)->Characteristics & COFF::IMAGE_SCN_MEM_EXECUTE)
+ Prot |= orc::MemProt::Exec;
+ if ((*Sec)->Characteristics & COFF::IMAGE_SCN_MEM_READ)
+ Prot |= orc::MemProt::Read;
+ if ((*Sec)->Characteristics & COFF::IMAGE_SCN_MEM_WRITE)
+ Prot |= orc::MemProt::Write;
+
+ // Look for existing sections first.
+ auto *GraphSec = G->findSectionByName(SectionName);
+ if (!GraphSec) {
+ GraphSec = &G->createSection(SectionName, Prot);
+ if ((*Sec)->Characteristics & COFF::IMAGE_SCN_LNK_REMOVE)
+ GraphSec->setMemLifetime(orc::MemLifetime::NoAlloc);
+ }
+ if (GraphSec->getMemProt() != Prot)
+ return make_error<JITLinkError>("MemProt should match");
+
+ Block *B = nullptr;
+ if ((*Sec)->Characteristics & COFF::IMAGE_SCN_CNT_UNINITIALIZED_DATA)
+ B = &G->createZeroFillBlock(
+ *GraphSec, getSectionSize(Obj, *Sec),
+ orc::ExecutorAddr(getSectionAddress(Obj, *Sec)),
+ (*Sec)->getAlignment(), 0);
+ else {
+ ArrayRef<uint8_t> Data;
+ if (auto Err = Obj.getSectionContents(*Sec, Data))
+ return Err;
+
+ auto CharData = ArrayRef<char>(
+ reinterpret_cast<const char *>(Data.data()), Data.size());
+
+ if (SectionName == getDirectiveSectionName())
+ if (auto Err = handleDirectiveSection(
+ StringRef(CharData.data(), CharData.size())))
+ return Err;
+
+ B = &G->createContentBlock(
+ *GraphSec, CharData, orc::ExecutorAddr(getSectionAddress(Obj, *Sec)),
+ (*Sec)->getAlignment(), 0);
+ }
+
+ setGraphBlock(SecIndex, B);
+ }
+
+ return Error::success();
+}
+
+Error COFFLinkGraphBuilder::graphifySymbols() {
+ LLVM_DEBUG(dbgs() << " Creating graph symbols...\n");
+
+ SymbolSets.resize(Obj.getNumberOfSections() + 1);
+ PendingComdatExports.resize(Obj.getNumberOfSections() + 1);
+ GraphSymbols.resize(Obj.getNumberOfSymbols());
+
+ for (COFFSymbolIndex SymIndex = 0;
+ SymIndex < static_cast<COFFSymbolIndex>(Obj.getNumberOfSymbols());
+ SymIndex++) {
+ Expected<object::COFFSymbolRef> Sym = Obj.getSymbol(SymIndex);
+ if (!Sym)
+ return Sym.takeError();
+
+ StringRef SymbolName;
+ if (Expected<StringRef> SymNameOrErr = Obj.getSymbolName(*Sym))
+ SymbolName = *SymNameOrErr;
+
+ COFFSectionIndex SectionIndex = Sym->getSectionNumber();
+ const object::coff_section *Sec = nullptr;
+
+ if (!COFF::isReservedSectionNumber(SectionIndex)) {
+ auto SecOrErr = Obj.getSection(SectionIndex);
+ if (!SecOrErr)
+ return make_error<JITLinkError>(
+ "Invalid COFF section number:" + formatv("{0:d}: ", SectionIndex) +
+ " (" + toString(SecOrErr.takeError()) + ")");
+ Sec = *SecOrErr;
+ }
+
+ // Create jitlink symbol
+ jitlink::Symbol *GSym = nullptr;
+ if (Sym->isFileRecord())
+ LLVM_DEBUG({
+ dbgs() << " " << SymIndex << ": Skipping FileRecord symbol \""
+ << SymbolName << "\" in "
+ << getCOFFSectionName(SectionIndex, Sec, *Sym)
+ << " (index: " << SectionIndex << ") \n";
+ });
+ else if (Sym->isUndefined()) {
+ GSym = createExternalSymbol(SymIndex, SymbolName, *Sym, Sec);
+ } else if (Sym->isWeakExternal()) {
+ auto *WeakExternal = Sym->getAux<object::coff_aux_weak_external>();
+ COFFSymbolIndex TagIndex = WeakExternal->TagIndex;
+ uint32_t Characteristics = WeakExternal->Characteristics;
+ WeakExternalRequests.push_back(
+ {SymIndex, TagIndex, Characteristics, SymbolName});
+ } else {
+ Expected<jitlink::Symbol *> NewGSym =
+ createDefinedSymbol(SymIndex, SymbolName, *Sym, Sec);
+ if (!NewGSym)
+ return NewGSym.takeError();
+ GSym = *NewGSym;
+ if (GSym) {
+ LLVM_DEBUG({
+ dbgs() << " " << SymIndex
+ << ": Creating defined graph symbol for COFF symbol \""
+ << SymbolName << "\" in "
+ << getCOFFSectionName(SectionIndex, Sec, *Sym)
+ << " (index: " << SectionIndex << ") \n";
+ dbgs() << " " << *GSym << "\n";
+ });
+ }
+ }
+
+ // Register the symbol
+ if (GSym)
+ setGraphSymbol(SectionIndex, SymIndex, *GSym);
+ SymIndex += Sym->getNumberOfAuxSymbols();
+ }
+
+ if (auto Err = flushWeakAliasRequests())
+ return Err;
+
+ if (auto Err = handleAlternateNames())
+ return Err;
+
+ if (auto Err = calculateImplicitSizeOfSymbols())
+ return Err;
+
+ return Error::success();
+}
+
+Error COFFLinkGraphBuilder::handleDirectiveSection(StringRef Str) {
+ auto Parsed = DirectiveParser.parse(Str);
+ if (!Parsed)
+ return Parsed.takeError();
+ for (auto *Arg : *Parsed) {
+ StringRef S = Arg->getValue();
+ switch (Arg->getOption().getID()) {
+ case COFF_OPT_alternatename: {
+ StringRef From, To;
+ std::tie(From, To) = S.split('=');
+ if (From.empty() || To.empty())
+ return make_error<JITLinkError>(
+ "Invalid COFF /alternatename directive");
+ AlternateNames[From] = To;
+ break;
+ }
+ case COFF_OPT_incl: {
+ auto DataCopy = G->allocateContent(S);
+ StringRef StrCopy(DataCopy.data(), DataCopy.size());
+ ExternalSymbols[StrCopy] = &G->addExternalSymbol(StrCopy, 0, false);
+ ExternalSymbols[StrCopy]->setLive(true);
+ break;
+ }
+ case COFF_OPT_export:
+ break;
+ default: {
+ LLVM_DEBUG({
+ dbgs() << "Unknown coff directive: " << Arg->getSpelling() << "\n";
+ });
+ break;
+ }
+ }
+ }
+ return Error::success();
+}
+
+Error COFFLinkGraphBuilder::flushWeakAliasRequests() {
+ // Export the weak external symbols and alias it
+ for (auto &WeakExternal : WeakExternalRequests) {
+ if (auto *Target = getGraphSymbol(WeakExternal.Target)) {
+ Expected<object::COFFSymbolRef> AliasSymbol =
+ Obj.getSymbol(WeakExternal.Alias);
+ if (!AliasSymbol)
+ return AliasSymbol.takeError();
+
+ // FIXME: IMAGE_WEAK_EXTERN_SEARCH_NOLIBRARY and
+ // IMAGE_WEAK_EXTERN_SEARCH_LIBRARY are handled in the same way.
+ Scope S =
+ WeakExternal.Characteristics == COFF::IMAGE_WEAK_EXTERN_SEARCH_ALIAS
+ ? Scope::Default
+ : Scope::Local;
+
+ auto NewSymbol =
+ createAliasSymbol(WeakExternal.SymbolName, Linkage::Weak, S, *Target);
+ if (!NewSymbol)
+ return NewSymbol.takeError();
+ setGraphSymbol(AliasSymbol->getSectionNumber(), WeakExternal.Alias,
+ **NewSymbol);
+ LLVM_DEBUG({
+ dbgs() << " " << WeakExternal.Alias
+ << ": Creating weak external symbol for COFF symbol \""
+ << WeakExternal.SymbolName << "\" in section "
+ << AliasSymbol->getSectionNumber() << "\n";
+ dbgs() << " " << **NewSymbol << "\n";
+ });
+ } else
+ return make_error<JITLinkError>("Weak symbol alias requested but actual "
+ "symbol not found for symbol " +
+ formatv("{0:d}", WeakExternal.Alias));
+ }
+ return Error::success();
+}
+
+Error COFFLinkGraphBuilder::handleAlternateNames() {
+ for (auto &KeyValue : AlternateNames)
+ if (DefinedSymbols.count(KeyValue.second) &&
+ ExternalSymbols.count(KeyValue.first)) {
+ auto *Target = DefinedSymbols[KeyValue.second];
+ auto *Alias = ExternalSymbols[KeyValue.first];
+ G->makeDefined(*Alias, Target->getBlock(), Target->getOffset(),
+ Target->getSize(), Linkage::Weak, Scope::Local, false);
+ }
+ return Error::success();
+}
+
+Symbol *COFFLinkGraphBuilder::createExternalSymbol(
+ COFFSymbolIndex SymIndex, StringRef SymbolName,
+ object::COFFSymbolRef Symbol, const object::coff_section *Section) {
+ if (!ExternalSymbols.count(SymbolName))
+ ExternalSymbols[SymbolName] =
+ &G->addExternalSymbol(SymbolName, Symbol.getValue(), false);
+
+ LLVM_DEBUG({
+ dbgs() << " " << SymIndex
+ << ": Creating external graph symbol for COFF symbol \""
+ << SymbolName << "\" in "
+ << getCOFFSectionName(Symbol.getSectionNumber(), Section, Symbol)
+ << " (index: " << Symbol.getSectionNumber() << ") \n";
+ });
+ return ExternalSymbols[SymbolName];
+}
+
+Expected<Symbol *> COFFLinkGraphBuilder::createAliasSymbol(StringRef SymbolName,
+ Linkage L, Scope S,
+ Symbol &Target) {
+ if (!Target.isDefined()) {
+ // FIXME: Support this when there's a way to handle this.
+ return make_error<JITLinkError>("Weak external symbol with external "
+ "symbol as alternative not supported.");
+ }
+ return &G->addDefinedSymbol(Target.getBlock(), Target.getOffset(), SymbolName,
+ Target.getSize(), L, S, Target.isCallable(),
+ false);
+}
+
+// In COFF, most of the defined symbols don't contain the size information.
+// Hence, we calculate the "implicit" size of symbol by taking the delta of
+// offsets of consecutive symbols within a block. We maintain a balanced tree
+// set of symbols sorted by offset per each block in order to achieve
+// logarithmic time complexity of sorted symbol insertion. Symbol is inserted to
+// the set once it's processed in graphifySymbols. In this function, we iterate
+// each collected symbol in sorted order and calculate the implicit size.
+Error COFFLinkGraphBuilder::calculateImplicitSizeOfSymbols() {
+ for (COFFSectionIndex SecIndex = 1;
+ SecIndex <= static_cast<COFFSectionIndex>(Obj.getNumberOfSections());
+ SecIndex++) {
+ auto &SymbolSet = SymbolSets[SecIndex];
+ if (SymbolSet.empty())
+ continue;
+ jitlink::Block *B = getGraphBlock(SecIndex);
+ orc::ExecutorAddrDiff LastOffset = B->getSize();
+ orc::ExecutorAddrDiff LastDifferentOffset = B->getSize();
+ orc::ExecutorAddrDiff LastSize = 0;
+ for (auto It = SymbolSet.rbegin(); It != SymbolSet.rend(); It++) {
+ orc::ExecutorAddrDiff Offset = It->first;
+ jitlink::Symbol *Symbol = It->second;
+ orc::ExecutorAddrDiff CandSize;
+ // Last offset can be same when aliasing happened
+ if (Symbol->getOffset() == LastOffset)
+ CandSize = LastSize;
+ else
+ CandSize = LastOffset - Offset;
+
+ LLVM_DEBUG({
+ if (Offset + Symbol->getSize() > LastDifferentOffset)
+ dbgs() << " Overlapping symbol range generated for the following "
+ "symbol:"
+ << "\n"
+ << " " << *Symbol << "\n";
+ });
+ (void)LastDifferentOffset;
+ if (LastOffset != Offset)
+ LastDifferentOffset = Offset;
+ LastSize = CandSize;
+ LastOffset = Offset;
+ if (Symbol->getSize()) {
+ // Non empty symbol can happen in COMDAT symbol.
+ // We don't consider the possibility of overlapping symbol range that
+ // could be introduced by disparity between inferred symbol size and
+ // defined symbol size because symbol size information is currently only
+ // used by jitlink-check where we have control to not make overlapping
+ // ranges.
+ continue;
+ }
+
+ LLVM_DEBUG({
+ if (!CandSize)
+ dbgs() << " Empty implicit symbol size generated for the following "
+ "symbol:"
+ << "\n"
+ << " " << *Symbol << "\n";
+ });
+
+ Symbol->setSize(CandSize);
+ }
+ }
+ return Error::success();
+}
+
+Expected<Symbol *> COFFLinkGraphBuilder::createDefinedSymbol(
+ COFFSymbolIndex SymIndex, StringRef SymbolName,
+ object::COFFSymbolRef Symbol, const object::coff_section *Section) {
+ if (Symbol.isCommon()) {
+ // FIXME: correct alignment
+ return &G->addDefinedSymbol(
+ G->createZeroFillBlock(getCommonSection(), Symbol.getValue(),
+ orc::ExecutorAddr(), Symbol.getValue(), 0),
+ 0, SymbolName, Symbol.getValue(), Linkage::Strong, Scope::Default,
+ false, false);
+ }
+ if (Symbol.isAbsolute())
+ return &G->addAbsoluteSymbol(SymbolName,
+ orc::ExecutorAddr(Symbol.getValue()), 0,
+ Linkage::Strong, Scope::Local, false);
+
+ if (llvm::COFF::isReservedSectionNumber(Symbol.getSectionNumber()))
+ return make_error<JITLinkError>(
+ "Reserved section number used in regular symbol " +
+ formatv("{0:d}", SymIndex));
+
+ Block *B = getGraphBlock(Symbol.getSectionNumber());
+ if (!B) {
+ LLVM_DEBUG({
+ dbgs() << " " << SymIndex
+ << ": Skipping graph symbol since section was not created for "
+ "COFF symbol \""
+ << SymbolName << "\" in section " << Symbol.getSectionNumber()
+ << "\n";
+ });
+ return nullptr;
+ }
+
+ if (Symbol.isExternal()) {
+ // This is not a comdat sequence, export the symbol as it is
+ if (!isComdatSection(Section)) {
+ auto GSym = &G->addDefinedSymbol(
+ *B, Symbol.getValue(), SymbolName, 0, Linkage::Strong, Scope::Default,
+ Symbol.getComplexType() == COFF::IMAGE_SYM_DTYPE_FUNCTION, false);
+ DefinedSymbols[SymbolName] = GSym;
+ return GSym;
+ } else {
+ if (!PendingComdatExports[Symbol.getSectionNumber()])
+ return make_error<JITLinkError>("No pending COMDAT export for symbol " +
+ formatv("{0:d}", SymIndex));
+
+ return exportCOMDATSymbol(SymIndex, SymbolName, Symbol);
+ }
+ }
+
+ if (Symbol.getStorageClass() == COFF::IMAGE_SYM_CLASS_STATIC ||
+ Symbol.getStorageClass() == COFF::IMAGE_SYM_CLASS_LABEL) {
+ const object::coff_aux_section_definition *Definition =
+ Symbol.getSectionDefinition();
+ if (!Definition || !isComdatSection(Section)) {
+ // Handle typical static symbol
+ return &G->addDefinedSymbol(
+ *B, Symbol.getValue(), SymbolName, 0, Linkage::Strong, Scope::Local,
+ Symbol.getComplexType() == COFF::IMAGE_SYM_DTYPE_FUNCTION, false);
+ }
+ if (Definition->Selection == COFF::IMAGE_COMDAT_SELECT_ASSOCIATIVE) {
+ auto Target = Definition->getNumber(Symbol.isBigObj());
+ auto GSym = &G->addDefinedSymbol(
+ *B, Symbol.getValue(), SymbolName, 0, Linkage::Strong, Scope::Local,
+ Symbol.getComplexType() == COFF::IMAGE_SYM_DTYPE_FUNCTION, false);
+ getGraphBlock(Target)->addEdge(Edge::KeepAlive, 0, *GSym, 0);
+ return GSym;
+ }
+ if (PendingComdatExports[Symbol.getSectionNumber()])
+ return make_error<JITLinkError>(
+ "COMDAT export request already exists before symbol " +
+ formatv("{0:d}", SymIndex));
+ return createCOMDATExportRequest(SymIndex, Symbol, Definition);
+ }
+ return make_error<JITLinkError>("Unsupported storage class " +
+ formatv("{0:d}", Symbol.getStorageClass()) +
+ " in symbol " + formatv("{0:d}", SymIndex));
+}
+
+// COMDAT handling:
+// When IMAGE_SCN_LNK_COMDAT flag is set in the flags of a section,
+// the section is called a COMDAT section. It contains two symbols
+// in a sequence that specifes the behavior. First symbol is the section
+// symbol which contains the size and name of the section. It also contains
+// selection type that specifies how duplicate of the symbol is handled.
+// Second symbol is COMDAT symbol which usually defines the external name and
+// data type.
+//
+// Since two symbols always come in a specific order, we initiate pending COMDAT
+// export request when we encounter the first symbol and actually exports it
+// when we process the second symbol.
+//
+// Process the first symbol of COMDAT sequence.
+Expected<Symbol *> COFFLinkGraphBuilder::createCOMDATExportRequest(
+ COFFSymbolIndex SymIndex, object::COFFSymbolRef Symbol,
+ const object::coff_aux_section_definition *Definition) {
+ Linkage L = Linkage::Strong;
+ switch (Definition->Selection) {
+ case COFF::IMAGE_COMDAT_SELECT_NODUPLICATES: {
+ L = Linkage::Strong;
+ break;
+ }
+ case COFF::IMAGE_COMDAT_SELECT_ANY: {
+ L = Linkage::Weak;
+ break;
+ }
+ case COFF::IMAGE_COMDAT_SELECT_EXACT_MATCH:
+ case COFF::IMAGE_COMDAT_SELECT_SAME_SIZE: {
+ // FIXME: Implement size/content validation when LinkGraph is able to
+ // handle this.
+ L = Linkage::Weak;
+ break;
+ }
+ case COFF::IMAGE_COMDAT_SELECT_LARGEST: {
+ // FIXME: Support IMAGE_COMDAT_SELECT_LARGEST properly when LinkGraph is
+ // able to handle this.
+ LLVM_DEBUG({
+ dbgs() << " " << SymIndex
+ << ": Partially supported IMAGE_COMDAT_SELECT_LARGEST was used"
+ " in section "
+ << Symbol.getSectionNumber() << " (size: " << Definition->Length
+ << ")\n";
+ });
+ L = Linkage::Weak;
+ break;
+ }
+ case COFF::IMAGE_COMDAT_SELECT_NEWEST: {
+ // Even link.exe doesn't support this selection properly.
+ return make_error<JITLinkError>(
+ "IMAGE_COMDAT_SELECT_NEWEST is not supported.");
+ }
+ default: {
+ return make_error<JITLinkError>("Invalid comdat selection type: " +
+ formatv("{0:d}", Definition->Selection));
+ }
+ }
+ PendingComdatExports[Symbol.getSectionNumber()] = {SymIndex, L,
+ Definition->Length};
+ return nullptr;
+}
+
+// Process the second symbol of COMDAT sequence.
+Expected<Symbol *>
+COFFLinkGraphBuilder::exportCOMDATSymbol(COFFSymbolIndex SymIndex,
+ StringRef SymbolName,
+ object::COFFSymbolRef Symbol) {
+ Block *B = getGraphBlock(Symbol.getSectionNumber());
+ auto &PendingComdatExport = PendingComdatExports[Symbol.getSectionNumber()];
+ // NOTE: ComdatDef->Length is the size of "section" not size of symbol.
+ // We use zero symbol size to not reach out of bound of block when symbol
+ // offset is non-zero.
+ auto GSym = &G->addDefinedSymbol(
+ *B, Symbol.getValue(), SymbolName, 0, PendingComdatExport->Linkage,
+ Scope::Default, Symbol.getComplexType() == COFF::IMAGE_SYM_DTYPE_FUNCTION,
+ false);
+ LLVM_DEBUG({
+ dbgs() << " " << SymIndex
+ << ": Exporting COMDAT graph symbol for COFF symbol \"" << SymbolName
+ << "\" in section " << Symbol.getSectionNumber() << "\n";
+ dbgs() << " " << *GSym << "\n";
+ });
+ setGraphSymbol(Symbol.getSectionNumber(), PendingComdatExport->SymbolIndex,
+ *GSym);
+ DefinedSymbols[SymbolName] = GSym;
+ PendingComdatExport = std::nullopt;
+ return GSym;
+}
+
+} // namespace jitlink
+} // namespace llvm
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/COFFLinkGraphBuilder.h b/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/COFFLinkGraphBuilder.h
new file mode 100644
index 000000000000..e5f3ce8c53f5
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/COFFLinkGraphBuilder.h
@@ -0,0 +1,220 @@
+//===----- COFFLinkGraphBuilder.h - COFF LinkGraph builder ----*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Generic COFF LinkGraph building code.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LIB_EXECUTIONENGINE_JITLINK_COFFLINKGRAPHBUILDER_H
+#define LIB_EXECUTIONENGINE_JITLINK_COFFLINKGRAPHBUILDER_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ExecutionEngine/JITLink/JITLink.h"
+#include "llvm/Object/COFF.h"
+
+#include "COFFDirectiveParser.h"
+#include "EHFrameSupportImpl.h"
+#include "JITLinkGeneric.h"
+
+#define DEBUG_TYPE "jitlink"
+
+#include <list>
+
+namespace llvm {
+namespace jitlink {
+
+class COFFLinkGraphBuilder {
+public:
+ virtual ~COFFLinkGraphBuilder();
+ Expected<std::unique_ptr<LinkGraph>> buildGraph();
+
+protected:
+ using COFFSectionIndex = int32_t;
+ using COFFSymbolIndex = int32_t;
+
+ COFFLinkGraphBuilder(const object::COFFObjectFile &Obj, Triple TT,
+ SubtargetFeatures Features,
+ LinkGraph::GetEdgeKindNameFunction GetEdgeKindName);
+
+ LinkGraph &getGraph() const { return *G; }
+
+ const object::COFFObjectFile &getObject() const { return Obj; }
+
+ virtual Error addRelocations() = 0;
+
+ Error graphifySections();
+ Error graphifySymbols();
+
+ void setGraphSymbol(COFFSectionIndex SecIndex, COFFSymbolIndex SymIndex,
+ Symbol &Sym) {
+ assert(!GraphSymbols[SymIndex] && "Duplicate symbol at index");
+ GraphSymbols[SymIndex] = &Sym;
+ if (!COFF::isReservedSectionNumber(SecIndex))
+ SymbolSets[SecIndex].insert({Sym.getOffset(), &Sym});
+ }
+
+ Symbol *getGraphSymbol(COFFSymbolIndex SymIndex) const {
+ if (SymIndex < 0 ||
+ SymIndex >= static_cast<COFFSymbolIndex>(GraphSymbols.size()))
+ return nullptr;
+ return GraphSymbols[SymIndex];
+ }
+
+ void setGraphBlock(COFFSectionIndex SecIndex, Block *B) {
+ assert(!GraphBlocks[SecIndex] && "Duplicate section at index");
+ assert(!COFF::isReservedSectionNumber(SecIndex) && "Invalid section index");
+ GraphBlocks[SecIndex] = B;
+ }
+
+ Block *getGraphBlock(COFFSectionIndex SecIndex) const {
+ if (SecIndex <= 0 ||
+ SecIndex >= static_cast<COFFSectionIndex>(GraphSymbols.size()))
+ return nullptr;
+ return GraphBlocks[SecIndex];
+ }
+
+ object::COFFObjectFile::section_iterator_range sections() const {
+ return Obj.sections();
+ }
+
+ /// Traverse all matching relocation records in the given section. The handler
+ /// function Func should be callable with this signature:
+ /// Error(const object::RelocationRef&,
+ /// const object::SectionRef&, Section &)
+ ///
+ template <typename RelocHandlerFunction>
+ Error forEachRelocation(const object::SectionRef &RelSec,
+ RelocHandlerFunction &&Func,
+ bool ProcessDebugSections = false);
+
+ /// Traverse all matching relocation records in the given section. Convenience
+ /// wrapper to allow passing a member function for the handler.
+ ///
+ template <typename ClassT, typename RelocHandlerMethod>
+ Error forEachRelocation(const object::SectionRef &RelSec, ClassT *Instance,
+ RelocHandlerMethod &&Method,
+ bool ProcessDebugSections = false) {
+ return forEachRelocation(
+ RelSec,
+ [Instance, Method](const auto &Rel, const auto &Target, auto &GS) {
+ return (Instance->*Method)(Rel, Target, GS);
+ },
+ ProcessDebugSections);
+ }
+
+private:
+ // Pending comdat symbol export that is initiated by the first symbol of
+ // COMDAT sequence.
+ struct ComdatExportRequest {
+ COFFSymbolIndex SymbolIndex;
+ jitlink::Linkage Linkage;
+ orc::ExecutorAddrDiff Size;
+ };
+ std::vector<std::optional<ComdatExportRequest>> PendingComdatExports;
+
+ // This represents a pending request to create a weak external symbol with a
+ // name.
+ struct WeakExternalRequest {
+ COFFSymbolIndex Alias;
+ COFFSymbolIndex Target;
+ uint32_t Characteristics;
+ StringRef SymbolName;
+ };
+ std::vector<WeakExternalRequest> WeakExternalRequests;
+
+ // Per COFF section jitlink symbol set sorted by offset.
+ // Used for calculating implicit size of defined symbols.
+ using SymbolSet = std::set<std::pair<orc::ExecutorAddrDiff, Symbol *>>;
+ std::vector<SymbolSet> SymbolSets;
+
+ Section &getCommonSection();
+
+ Symbol *createExternalSymbol(COFFSymbolIndex SymIndex, StringRef SymbolName,
+ object::COFFSymbolRef Symbol,
+ const object::coff_section *Section);
+ Expected<Symbol *> createAliasSymbol(StringRef SymbolName, Linkage L, Scope S,
+ Symbol &Target);
+ Expected<Symbol *> createDefinedSymbol(COFFSymbolIndex SymIndex,
+ StringRef SymbolName,
+ object::COFFSymbolRef Symbol,
+ const object::coff_section *Section);
+ Expected<Symbol *> createCOMDATExportRequest(
+ COFFSymbolIndex SymIndex, object::COFFSymbolRef Symbol,
+ const object::coff_aux_section_definition *Definition);
+ Expected<Symbol *> exportCOMDATSymbol(COFFSymbolIndex SymIndex,
+ StringRef SymbolName,
+ object::COFFSymbolRef Symbol);
+
+ Error handleDirectiveSection(StringRef Str);
+ Error flushWeakAliasRequests();
+ Error handleAlternateNames();
+ Error calculateImplicitSizeOfSymbols();
+
+ static uint64_t getSectionAddress(const object::COFFObjectFile &Obj,
+ const object::coff_section *Section);
+ static uint64_t getSectionSize(const object::COFFObjectFile &Obj,
+ const object::coff_section *Section);
+ static bool isComdatSection(const object::coff_section *Section);
+ static unsigned getPointerSize(const object::COFFObjectFile &Obj);
+ static llvm::endianness getEndianness(const object::COFFObjectFile &Obj);
+ static StringRef getDLLImportStubPrefix() { return "__imp_"; }
+ static StringRef getDirectiveSectionName() { return ".drectve"; }
+ StringRef getCOFFSectionName(COFFSectionIndex SectionIndex,
+ const object::coff_section *Sec,
+ object::COFFSymbolRef Sym);
+
+ const object::COFFObjectFile &Obj;
+ std::unique_ptr<LinkGraph> G;
+ COFFDirectiveParser DirectiveParser;
+
+ Section *CommonSection = nullptr;
+ std::vector<Block *> GraphBlocks;
+ std::vector<Symbol *> GraphSymbols;
+
+ DenseMap<StringRef, StringRef> AlternateNames;
+ DenseMap<StringRef, Symbol *> ExternalSymbols;
+ DenseMap<StringRef, Symbol *> DefinedSymbols;
+};
+
+template <typename RelocHandlerFunction>
+Error COFFLinkGraphBuilder::forEachRelocation(const object::SectionRef &RelSec,
+ RelocHandlerFunction &&Func,
+ bool ProcessDebugSections) {
+
+ auto COFFRelSect = Obj.getCOFFSection(RelSec);
+
+ // Target sections have names in valid COFF object files.
+ Expected<StringRef> Name = Obj.getSectionName(COFFRelSect);
+ if (!Name)
+ return Name.takeError();
+
+ // Skip the unhandled metadata sections.
+ if (*Name == ".voltbl")
+ return Error::success();
+ LLVM_DEBUG(dbgs() << " " << *Name << ":\n");
+
+ // Lookup the link-graph node corresponding to the target section name.
+ auto *BlockToFix = getGraphBlock(RelSec.getIndex() + 1);
+ if (!BlockToFix)
+ return make_error<StringError>(
+ "Referencing a section that wasn't added to the graph: " + *Name,
+ inconvertibleErrorCode());
+
+ // Let the callee process relocation entries one by one.
+ for (const auto &R : RelSec.relocations())
+ if (Error Err = Func(R, RelSec, *BlockToFix))
+ return Err;
+
+ LLVM_DEBUG(dbgs() << "\n");
+ return Error::success();
+}
+
+} // end namespace jitlink
+} // end namespace llvm
+
+#endif // LIB_EXECUTIONENGINE_JITLINK_COFFLINKGRAPHBUILDER_H
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/COFFOptions.td b/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/COFFOptions.td
new file mode 100644
index 000000000000..0a0ce2fc76dd
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/COFFOptions.td
@@ -0,0 +1,21 @@
+include "llvm/Option/OptParser.td"
+
+// link.exe accepts options starting with either a dash or a slash.
+
+// Flag that takes no arguments.
+class F<string name> : Flag<["/", "-", "/?", "-?"], name>;
+
+// Flag that takes one argument after ":".
+class P<string name> :
+ Joined<["/", "-", "/?", "-?"], name#":">;
+
+// Boolean flag which can be suffixed by ":no". Using it unsuffixed turns the
+// flag on and using it suffixed by ":no" turns it off.
+multiclass B_priv<string name> {
+ def "" : F<name>;
+ def _no : F<name#":no">;
+}
+
+def export : P<"export">;
+def alternatename : P<"alternatename">;
+def incl : Joined<["/", "-", "/?", "-?"], "include:">; \ No newline at end of file
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/COFF_x86_64.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/COFF_x86_64.cpp
new file mode 100644
index 000000000000..3257a2ae94f6
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/COFF_x86_64.cpp
@@ -0,0 +1,353 @@
+//===----- COFF_x86_64.cpp - JIT linker implementation for COFF/x86_64 ----===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// COFF/x86_64 jit-link implementation.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/JITLink/COFF_x86_64.h"
+#include "COFFLinkGraphBuilder.h"
+#include "JITLinkGeneric.h"
+#include "SEHFrameSupport.h"
+#include "llvm/BinaryFormat/COFF.h"
+#include "llvm/ExecutionEngine/JITLink/x86_64.h"
+#include "llvm/Object/COFF.h"
+#include "llvm/Support/Endian.h"
+
+#define DEBUG_TYPE "jitlink"
+
+using namespace llvm;
+using namespace llvm::jitlink;
+
+namespace {
+
+enum EdgeKind_coff_x86_64 : Edge::Kind {
+ PCRel32 = x86_64::FirstPlatformRelocation,
+ Pointer32NB,
+ Pointer64,
+ SectionIdx16,
+ SecRel32,
+};
+
+class COFFJITLinker_x86_64 : public JITLinker<COFFJITLinker_x86_64> {
+ friend class JITLinker<COFFJITLinker_x86_64>;
+
+public:
+ COFFJITLinker_x86_64(std::unique_ptr<JITLinkContext> Ctx,
+ std::unique_ptr<LinkGraph> G,
+ PassConfiguration PassConfig)
+ : JITLinker(std::move(Ctx), std::move(G), std::move(PassConfig)) {}
+
+private:
+ Error applyFixup(LinkGraph &G, Block &B, const Edge &E) const {
+ return x86_64::applyFixup(G, B, E, nullptr);
+ }
+};
+
+class COFFLinkGraphBuilder_x86_64 : public COFFLinkGraphBuilder {
+private:
+ Error addRelocations() override {
+ LLVM_DEBUG(dbgs() << "Processing relocations:\n");
+
+ for (const auto &RelSect : sections())
+ if (Error Err = COFFLinkGraphBuilder::forEachRelocation(
+ RelSect, this, &COFFLinkGraphBuilder_x86_64::addSingleRelocation))
+ return Err;
+
+ return Error::success();
+ }
+
+ Error addSingleRelocation(const object::RelocationRef &Rel,
+ const object::SectionRef &FixupSect,
+ Block &BlockToFix) {
+ const object::coff_relocation *COFFRel = getObject().getCOFFRelocation(Rel);
+ auto SymbolIt = Rel.getSymbol();
+ if (SymbolIt == getObject().symbol_end()) {
+ return make_error<StringError>(
+ formatv("Invalid symbol index in relocation entry. "
+ "index: {0}, section: {1}",
+ COFFRel->SymbolTableIndex, FixupSect.getIndex()),
+ inconvertibleErrorCode());
+ }
+
+ object::COFFSymbolRef COFFSymbol = getObject().getCOFFSymbol(*SymbolIt);
+ COFFSymbolIndex SymIndex = getObject().getSymbolIndex(COFFSymbol);
+
+ Symbol *GraphSymbol = getGraphSymbol(SymIndex);
+ if (!GraphSymbol)
+ return make_error<StringError>(
+ formatv("Could not find symbol at given index, did you add it to "
+ "JITSymbolTable? index: {0}, section: {1}",
+ SymIndex, FixupSect.getIndex()),
+ inconvertibleErrorCode());
+
+ int64_t Addend = 0;
+ orc::ExecutorAddr FixupAddress =
+ orc::ExecutorAddr(FixupSect.getAddress()) + Rel.getOffset();
+ Edge::OffsetT Offset = FixupAddress - BlockToFix.getAddress();
+
+ Edge::Kind Kind = Edge::Invalid;
+ const char *FixupPtr = BlockToFix.getContent().data() + Offset;
+
+ switch (Rel.getType()) {
+ case COFF::RelocationTypeAMD64::IMAGE_REL_AMD64_ADDR32NB: {
+ Kind = EdgeKind_coff_x86_64::Pointer32NB;
+ Addend = *reinterpret_cast<const support::little32_t *>(FixupPtr);
+ break;
+ }
+ case COFF::RelocationTypeAMD64::IMAGE_REL_AMD64_REL32: {
+ Kind = EdgeKind_coff_x86_64::PCRel32;
+ Addend = *reinterpret_cast<const support::little32_t *>(FixupPtr);
+ break;
+ }
+ case COFF::RelocationTypeAMD64::IMAGE_REL_AMD64_REL32_1: {
+ Kind = EdgeKind_coff_x86_64::PCRel32;
+ Addend = *reinterpret_cast<const support::little32_t *>(FixupPtr);
+ Addend -= 1;
+ break;
+ }
+ case COFF::RelocationTypeAMD64::IMAGE_REL_AMD64_REL32_2: {
+ Kind = EdgeKind_coff_x86_64::PCRel32;
+ Addend = *reinterpret_cast<const support::little32_t *>(FixupPtr);
+ Addend -= 2;
+ break;
+ }
+ case COFF::RelocationTypeAMD64::IMAGE_REL_AMD64_REL32_3: {
+ Kind = EdgeKind_coff_x86_64::PCRel32;
+ Addend = *reinterpret_cast<const support::little32_t *>(FixupPtr);
+ Addend -= 3;
+ break;
+ }
+ case COFF::RelocationTypeAMD64::IMAGE_REL_AMD64_REL32_4: {
+ Kind = EdgeKind_coff_x86_64::PCRel32;
+ Addend = *reinterpret_cast<const support::little32_t *>(FixupPtr);
+ Addend -= 4;
+ break;
+ }
+ case COFF::RelocationTypeAMD64::IMAGE_REL_AMD64_REL32_5: {
+ Kind = EdgeKind_coff_x86_64::PCRel32;
+ Addend = *reinterpret_cast<const support::little32_t *>(FixupPtr);
+ Addend -= 5;
+ break;
+ }
+ case COFF::RelocationTypeAMD64::IMAGE_REL_AMD64_ADDR64: {
+ Kind = EdgeKind_coff_x86_64::Pointer64;
+ Addend = *reinterpret_cast<const support::little64_t *>(FixupPtr);
+ break;
+ }
+ case COFF::RelocationTypeAMD64::IMAGE_REL_AMD64_SECTION: {
+ Kind = EdgeKind_coff_x86_64::SectionIdx16;
+ Addend = *reinterpret_cast<const support::little16_t *>(FixupPtr);
+ uint64_t SectionIdx = 0;
+ if (COFFSymbol.isAbsolute())
+ SectionIdx = getObject().getNumberOfSections() + 1;
+ else
+ SectionIdx = COFFSymbol.getSectionNumber();
+ auto *AbsSym = &getGraph().addAbsoluteSymbol(
+ "secidx", orc::ExecutorAddr(SectionIdx), 2, Linkage::Strong,
+ Scope::Local, false);
+ GraphSymbol = AbsSym;
+ break;
+ }
+ case COFF::RelocationTypeAMD64::IMAGE_REL_AMD64_SECREL: {
+ // FIXME: SECREL to external symbol should be handled
+ if (!GraphSymbol->isDefined())
+ return Error::success();
+ Kind = EdgeKind_coff_x86_64::SecRel32;
+ Addend = *reinterpret_cast<const support::little32_t *>(FixupPtr);
+ break;
+ }
+ default: {
+ return make_error<JITLinkError>("Unsupported x86_64 relocation:" +
+ formatv("{0:d}", Rel.getType()));
+ }
+ };
+
+ Edge GE(Kind, Offset, *GraphSymbol, Addend);
+ LLVM_DEBUG({
+ dbgs() << " ";
+ printEdge(dbgs(), BlockToFix, GE, getCOFFX86RelocationKindName(Kind));
+ dbgs() << "\n";
+ });
+
+ BlockToFix.addEdge(std::move(GE));
+
+ return Error::success();
+ }
+
+public:
+ COFFLinkGraphBuilder_x86_64(const object::COFFObjectFile &Obj, const Triple T,
+ const SubtargetFeatures Features)
+ : COFFLinkGraphBuilder(Obj, std::move(T), std::move(Features),
+ getCOFFX86RelocationKindName) {}
+};
+
+class COFFLinkGraphLowering_x86_64 {
+public:
+ // Lowers COFF x86_64 specific edges to generic x86_64 edges.
+ Error lowerCOFFRelocationEdges(LinkGraph &G, JITLinkContext &Ctx) {
+ for (auto *B : G.blocks()) {
+ for (auto &E : B->edges()) {
+ switch (E.getKind()) {
+ case EdgeKind_coff_x86_64::Pointer32NB: {
+ auto ImageBase = getImageBaseAddress(G, Ctx);
+ if (!ImageBase)
+ return ImageBase.takeError();
+ E.setAddend(E.getAddend() - ImageBase->getValue());
+ E.setKind(x86_64::Pointer32);
+ break;
+ }
+ case EdgeKind_coff_x86_64::PCRel32: {
+ E.setKind(x86_64::PCRel32);
+ break;
+ }
+ case EdgeKind_coff_x86_64::Pointer64: {
+ E.setKind(x86_64::Pointer64);
+ break;
+ }
+ case EdgeKind_coff_x86_64::SectionIdx16: {
+ E.setKind(x86_64::Pointer16);
+ break;
+ }
+ case EdgeKind_coff_x86_64::SecRel32: {
+ E.setAddend(E.getAddend() -
+ getSectionStart(E.getTarget().getBlock().getSection())
+ .getValue());
+ E.setKind(x86_64::Pointer32);
+ break;
+ }
+ default:
+ break;
+ }
+ }
+ }
+ return Error::success();
+ }
+
+private:
+ static StringRef getImageBaseSymbolName() { return "__ImageBase"; }
+
+ orc::ExecutorAddr getSectionStart(Section &Sec) {
+ if (!SectionStartCache.count(&Sec)) {
+ SectionRange Range(Sec);
+ SectionStartCache[&Sec] = Range.getStart();
+ }
+ return SectionStartCache[&Sec];
+ }
+
+ Expected<orc::ExecutorAddr> getImageBaseAddress(LinkGraph &G,
+ JITLinkContext &Ctx) {
+ if (this->ImageBase)
+ return this->ImageBase;
+ for (auto *S : G.defined_symbols())
+ if (S->getName() == getImageBaseSymbolName()) {
+ this->ImageBase = S->getAddress();
+ return this->ImageBase;
+ }
+
+ JITLinkContext::LookupMap Symbols;
+ Symbols[getImageBaseSymbolName()] = SymbolLookupFlags::RequiredSymbol;
+ orc::ExecutorAddr ImageBase;
+ Error Err = Error::success();
+ Ctx.lookup(Symbols,
+ createLookupContinuation([&](Expected<AsyncLookupResult> LR) {
+ ErrorAsOutParameter EAO(&Err);
+ if (!LR) {
+ Err = LR.takeError();
+ return;
+ }
+ ImageBase = LR->begin()->second.getAddress();
+ }));
+ if (Err)
+ return std::move(Err);
+ this->ImageBase = ImageBase;
+ return ImageBase;
+ }
+
+ DenseMap<Section *, orc::ExecutorAddr> SectionStartCache;
+ orc::ExecutorAddr ImageBase;
+};
+
+Error lowerEdges_COFF_x86_64(LinkGraph &G, JITLinkContext *Ctx) {
+ LLVM_DEBUG(dbgs() << "Lowering COFF x86_64 edges:\n");
+ COFFLinkGraphLowering_x86_64 GraphLowering;
+
+ if (auto Err = GraphLowering.lowerCOFFRelocationEdges(G, *Ctx))
+ return Err;
+
+ return Error::success();
+}
+} // namespace
+
+namespace llvm {
+namespace jitlink {
+
+/// Return the string name of the given COFF x86_64 edge kind.
+const char *getCOFFX86RelocationKindName(Edge::Kind R) {
+ switch (R) {
+ case PCRel32:
+ return "PCRel32";
+ case Pointer32NB:
+ return "Pointer32NB";
+ case Pointer64:
+ return "Pointer64";
+ case SectionIdx16:
+ return "SectionIdx16";
+ case SecRel32:
+ return "SecRel32";
+ default:
+ return x86_64::getEdgeKindName(R);
+ }
+}
+
+Expected<std::unique_ptr<LinkGraph>>
+createLinkGraphFromCOFFObject_x86_64(MemoryBufferRef ObjectBuffer) {
+ LLVM_DEBUG({
+ dbgs() << "Building jitlink graph for new input "
+ << ObjectBuffer.getBufferIdentifier() << "...\n";
+ });
+
+ auto COFFObj = object::ObjectFile::createCOFFObjectFile(ObjectBuffer);
+ if (!COFFObj)
+ return COFFObj.takeError();
+
+ auto Features = (*COFFObj)->getFeatures();
+ if (!Features)
+ return Features.takeError();
+
+ return COFFLinkGraphBuilder_x86_64(**COFFObj, (*COFFObj)->makeTriple(),
+ std::move(*Features))
+ .buildGraph();
+}
+
+void link_COFF_x86_64(std::unique_ptr<LinkGraph> G,
+ std::unique_ptr<JITLinkContext> Ctx) {
+ PassConfiguration Config;
+ const Triple &TT = G->getTargetTriple();
+ if (Ctx->shouldAddDefaultTargetPasses(TT)) {
+ // Add a mark-live pass.
+ if (auto MarkLive = Ctx->getMarkLivePass(TT)) {
+ Config.PrePrunePasses.push_back(std::move(MarkLive));
+ Config.PrePrunePasses.push_back(SEHFrameKeepAlivePass(".pdata"));
+ } else
+ Config.PrePrunePasses.push_back(markAllSymbolsLive);
+
+ // Add COFF edge lowering passes.
+ JITLinkContext *CtxPtr = Ctx.get();
+ Config.PreFixupPasses.push_back(
+ [CtxPtr](LinkGraph &G) { return lowerEdges_COFF_x86_64(G, CtxPtr); });
+ }
+
+ if (auto Err = Ctx->modifyPassConfig(*G, Config))
+ return Ctx->notifyFailed(std::move(Err));
+
+ COFFJITLinker_x86_64::link(std::move(Ctx), std::move(G), std::move(Config));
+}
+
+} // namespace jitlink
+} // namespace llvm
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/DWARFRecordSectionSplitter.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/DWARFRecordSectionSplitter.cpp
new file mode 100644
index 000000000000..2a60d8206f63
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/DWARFRecordSectionSplitter.cpp
@@ -0,0 +1,117 @@
+//===-------- JITLink_DWARFRecordSectionSplitter.cpp - JITLink-------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/JITLink/DWARFRecordSectionSplitter.h"
+#include "llvm/Support/BinaryStreamReader.h"
+
+#define DEBUG_TYPE "jitlink"
+
+namespace llvm {
+namespace jitlink {
+
+DWARFRecordSectionSplitter::DWARFRecordSectionSplitter(StringRef SectionName)
+ : SectionName(SectionName) {}
+
+Error DWARFRecordSectionSplitter::operator()(LinkGraph &G) {
+ auto *Section = G.findSectionByName(SectionName);
+
+ if (!Section) {
+ LLVM_DEBUG({
+ dbgs() << "DWARFRecordSectionSplitter: No " << SectionName
+ << " section. Nothing to do\n";
+ });
+ return Error::success();
+ }
+
+ LLVM_DEBUG({
+ dbgs() << "DWARFRecordSectionSplitter: Processing " << SectionName
+ << "...\n";
+ });
+
+ DenseMap<Block *, LinkGraph::SplitBlockCache> Caches;
+
+ {
+ // Pre-build the split caches.
+ for (auto *B : Section->blocks())
+ Caches[B] = LinkGraph::SplitBlockCache::value_type();
+ for (auto *Sym : Section->symbols())
+ Caches[&Sym->getBlock()]->push_back(Sym);
+ for (auto *B : Section->blocks())
+ llvm::sort(*Caches[B], [](const Symbol *LHS, const Symbol *RHS) {
+ return LHS->getOffset() > RHS->getOffset();
+ });
+ }
+
+ // Iterate over blocks (we do this by iterating over Caches entries rather
+ // than Section->blocks() as we will be inserting new blocks along the way,
+ // which would invalidate iterators in the latter sequence.
+ for (auto &KV : Caches) {
+ auto &B = *KV.first;
+ auto &BCache = KV.second;
+ if (auto Err = processBlock(G, B, BCache))
+ return Err;
+ }
+
+ return Error::success();
+}
+
+Error DWARFRecordSectionSplitter::processBlock(
+ LinkGraph &G, Block &B, LinkGraph::SplitBlockCache &Cache) {
+ LLVM_DEBUG(dbgs() << " Processing block at " << B.getAddress() << "\n");
+
+ // Section should not contain zero-fill blocks.
+ if (B.isZeroFill())
+ return make_error<JITLinkError>("Unexpected zero-fill block in " +
+ SectionName + " section");
+
+ if (B.getSize() == 0) {
+ LLVM_DEBUG(dbgs() << " Block is empty. Skipping.\n");
+ return Error::success();
+ }
+
+ BinaryStreamReader BlockReader(
+ StringRef(B.getContent().data(), B.getContent().size()),
+ G.getEndianness());
+
+ while (true) {
+ uint64_t RecordStartOffset = BlockReader.getOffset();
+
+ LLVM_DEBUG({
+ dbgs() << " Processing CFI record at "
+ << formatv("{0:x16}", B.getAddress()) << "\n";
+ });
+
+ uint32_t Length;
+ if (auto Err = BlockReader.readInteger(Length))
+ return Err;
+ if (Length != 0xffffffff) {
+ if (auto Err = BlockReader.skip(Length))
+ return Err;
+ } else {
+ uint64_t ExtendedLength;
+ if (auto Err = BlockReader.readInteger(ExtendedLength))
+ return Err;
+ if (auto Err = BlockReader.skip(ExtendedLength))
+ return Err;
+ }
+
+ // If this was the last block then there's nothing to split
+ if (BlockReader.empty()) {
+ LLVM_DEBUG(dbgs() << " Extracted " << B << "\n");
+ return Error::success();
+ }
+
+ uint64_t BlockSize = BlockReader.getOffset() - RecordStartOffset;
+ auto &NewBlock = G.splitBlock(B, BlockSize, &Cache);
+ (void)NewBlock;
+ LLVM_DEBUG(dbgs() << " Extracted " << NewBlock << "\n");
+ }
+}
+
+} // namespace jitlink
+} // namespace llvm
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/DefineExternalSectionStartAndEndSymbols.h b/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/DefineExternalSectionStartAndEndSymbols.h
new file mode 100644
index 000000000000..4a492ee2f7d0
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/DefineExternalSectionStartAndEndSymbols.h
@@ -0,0 +1,158 @@
+//===--------- DefineExternalSectionStartAndEndSymbols.h --------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Utility class for recognizing external section start and end symbols and
+// transforming them into defined symbols for the start and end blocks of the
+// associated Section.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTIONENGINE_JITLINK_DEFINEEXTERNALSECTIONSTARTANDENDSYMBOLS_H
+#define LLVM_EXECUTIONENGINE_JITLINK_DEFINEEXTERNALSECTIONSTARTANDENDSYMBOLS_H
+
+#include "llvm/ExecutionEngine/JITLink/JITLink.h"
+#include "llvm/Support/Debug.h"
+
+#define DEBUG_TYPE "jitlink"
+
+namespace llvm {
+namespace jitlink {
+
+struct SectionRangeSymbolDesc {
+ SectionRangeSymbolDesc() = default;
+ SectionRangeSymbolDesc(Section &Sec, bool IsStart)
+ : Sec(&Sec), IsStart(IsStart) {}
+ Section *Sec = nullptr;
+ bool IsStart = false;
+};
+
+/// Pass implementation for the createDefineExternalSectionStartAndEndSymbols
+/// function.
+template <typename SymbolIdentifierFunction>
+class DefineExternalSectionStartAndEndSymbols {
+public:
+ DefineExternalSectionStartAndEndSymbols(SymbolIdentifierFunction F)
+ : F(std::move(F)) {}
+
+ Error operator()(LinkGraph &G) {
+
+ // This pass will affect the external symbols set, so copy them out into a
+ // vector and iterate over that.
+ std::vector<Symbol *> Externals(G.external_symbols().begin(),
+ G.external_symbols().end());
+
+ for (auto *Sym : Externals) {
+ SectionRangeSymbolDesc D = F(G, *Sym);
+ if (D.Sec) {
+ auto &SR = getSectionRange(*D.Sec);
+ if (D.IsStart) {
+ if (SR.empty())
+ G.makeAbsolute(*Sym, orc::ExecutorAddr());
+ else
+ G.makeDefined(*Sym, *SR.getFirstBlock(), 0, 0, Linkage::Strong,
+ Scope::Local, false);
+ } else {
+ if (SR.empty())
+ G.makeAbsolute(*Sym, orc::ExecutorAddr());
+ else
+ G.makeDefined(*Sym, *SR.getLastBlock(),
+ SR.getLastBlock()->getSize(), 0, Linkage::Strong,
+ Scope::Local, false);
+ }
+ }
+ }
+ return Error::success();
+ }
+
+private:
+ SectionRange &getSectionRange(Section &Sec) {
+ auto I = SectionRanges.find(&Sec);
+ if (I == SectionRanges.end())
+ I = SectionRanges.insert(std::make_pair(&Sec, SectionRange(Sec))).first;
+ return I->second;
+ }
+
+ DenseMap<Section *, SectionRange> SectionRanges;
+ SymbolIdentifierFunction F;
+};
+
+/// Returns a JITLink pass (as a function class) that uses the given symbol
+/// identification function to identify external section start and end symbols
+/// (and their associated Section*s) and transform the identified externals
+/// into defined symbols pointing to the start of the first block in the
+/// section and the end of the last (start and end symbols for empty sections
+/// will be transformed into absolute symbols at address 0).
+///
+/// The identification function should be callable as
+///
+/// SectionRangeSymbolDesc (LinkGraph &G, Symbol &Sym)
+///
+/// If Sym is not a section range start or end symbol then a default
+/// constructed SectionRangeSymbolDesc should be returned. If Sym is a start
+/// symbol then SectionRangeSymbolDesc(Sec, true), where Sec is a reference to
+/// the target Section. If Sym is an end symbol then
+/// SectionRangeSymbolDesc(Sec, false) should be returned.
+///
+/// This pass should be run in the PostAllocationPass pipeline, at which point
+/// all blocks should have been assigned their final addresses.
+template <typename SymbolIdentifierFunction>
+DefineExternalSectionStartAndEndSymbols<SymbolIdentifierFunction>
+createDefineExternalSectionStartAndEndSymbolsPass(
+ SymbolIdentifierFunction &&F) {
+ return DefineExternalSectionStartAndEndSymbols<SymbolIdentifierFunction>(
+ std::forward<SymbolIdentifierFunction>(F));
+}
+
+/// ELF section start/end symbol detection.
+inline SectionRangeSymbolDesc
+identifyELFSectionStartAndEndSymbols(LinkGraph &G, Symbol &Sym) {
+ constexpr StringRef StartSymbolPrefix = "__start_";
+ constexpr StringRef EndSymbolPrefix = "__stop_";
+
+ auto SymName = Sym.getName();
+ if (SymName.starts_with(StartSymbolPrefix)) {
+ if (auto *Sec =
+ G.findSectionByName(SymName.drop_front(StartSymbolPrefix.size())))
+ return {*Sec, true};
+ } else if (SymName.starts_with(EndSymbolPrefix)) {
+ if (auto *Sec =
+ G.findSectionByName(SymName.drop_front(EndSymbolPrefix.size())))
+ return {*Sec, false};
+ }
+ return {};
+}
+
+/// MachO section start/end symbol detection.
+inline SectionRangeSymbolDesc
+identifyMachOSectionStartAndEndSymbols(LinkGraph &G, Symbol &Sym) {
+ constexpr StringRef StartSymbolPrefix = "section$start$";
+ constexpr StringRef EndSymbolPrefix = "section$end$";
+
+ auto SymName = Sym.getName();
+ if (SymName.starts_with(StartSymbolPrefix)) {
+ auto [SegName, SecName] =
+ SymName.drop_front(StartSymbolPrefix.size()).split('$');
+ std::string SectionName = (SegName + "," + SecName).str();
+ if (auto *Sec = G.findSectionByName(SectionName))
+ return {*Sec, true};
+ } else if (SymName.starts_with(EndSymbolPrefix)) {
+ auto [SegName, SecName] =
+ SymName.drop_front(EndSymbolPrefix.size()).split('$');
+ std::string SectionName = (SegName + "," + SecName).str();
+ if (auto *Sec = G.findSectionByName(SectionName))
+ return {*Sec, false};
+ }
+ return {};
+}
+
+} // end namespace jitlink
+} // end namespace llvm
+
+#undef DEBUG_TYPE
+
+#endif // LLVM_EXECUTIONENGINE_JITLINK_DEFINEEXTERNALSECTIONSTARTANDENDSYMBOLS_H
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/EHFrameSupport.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/EHFrameSupport.cpp
new file mode 100644
index 000000000000..c11577b03fd7
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/EHFrameSupport.cpp
@@ -0,0 +1,717 @@
+//===-------- JITLink_EHFrameSupport.cpp - JITLink eh-frame utils ---------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "EHFrameSupportImpl.h"
+
+#include "llvm/BinaryFormat/Dwarf.h"
+#include "llvm/Config/config.h"
+#include "llvm/ExecutionEngine/JITLink/DWARFRecordSectionSplitter.h"
+#include "llvm/ExecutionEngine/Orc/TargetProcess/RegisterEHFrames.h"
+#include "llvm/Support/DynamicLibrary.h"
+
+#define DEBUG_TYPE "jitlink"
+
+namespace llvm {
+namespace jitlink {
+
+EHFrameEdgeFixer::EHFrameEdgeFixer(StringRef EHFrameSectionName,
+ unsigned PointerSize, Edge::Kind Pointer32,
+ Edge::Kind Pointer64, Edge::Kind Delta32,
+ Edge::Kind Delta64, Edge::Kind NegDelta32)
+ : EHFrameSectionName(EHFrameSectionName), PointerSize(PointerSize),
+ Pointer32(Pointer32), Pointer64(Pointer64), Delta32(Delta32),
+ Delta64(Delta64), NegDelta32(NegDelta32) {}
+
+Error EHFrameEdgeFixer::operator()(LinkGraph &G) {
+ auto *EHFrame = G.findSectionByName(EHFrameSectionName);
+
+ if (!EHFrame) {
+ LLVM_DEBUG({
+ dbgs() << "EHFrameEdgeFixer: No " << EHFrameSectionName
+ << " section in \"" << G.getName() << "\". Nothing to do.\n";
+ });
+ return Error::success();
+ }
+
+ // Check that we support the graph's pointer size.
+ if (G.getPointerSize() != 4 && G.getPointerSize() != 8)
+ return make_error<JITLinkError>(
+ "EHFrameEdgeFixer only supports 32 and 64 bit targets");
+
+ LLVM_DEBUG({
+ dbgs() << "EHFrameEdgeFixer: Processing " << EHFrameSectionName << " in \""
+ << G.getName() << "\"...\n";
+ });
+
+ ParseContext PC(G);
+
+ // Build a map of all blocks and symbols in the text sections. We will use
+ // these for finding / building edge targets when processing FDEs.
+ for (auto &Sec : G.sections()) {
+ // Just record the most-canonical symbol (for eh-frame purposes) at each
+ // address.
+ for (auto *Sym : Sec.symbols()) {
+ auto &CurSym = PC.AddrToSym[Sym->getAddress()];
+ if (!CurSym || (std::make_tuple(Sym->getLinkage(), Sym->getScope(),
+ !Sym->hasName(), Sym->getName()) <
+ std::make_tuple(CurSym->getLinkage(), CurSym->getScope(),
+ !CurSym->hasName(), CurSym->getName())))
+ CurSym = Sym;
+ }
+ if (auto Err = PC.AddrToBlock.addBlocks(Sec.blocks(),
+ BlockAddressMap::includeNonNull))
+ return Err;
+ }
+
+ // Sort eh-frame blocks into address order to ensure we visit CIEs before
+ // their child FDEs.
+ std::vector<Block *> EHFrameBlocks;
+ for (auto *B : EHFrame->blocks())
+ EHFrameBlocks.push_back(B);
+ llvm::sort(EHFrameBlocks, [](const Block *LHS, const Block *RHS) {
+ return LHS->getAddress() < RHS->getAddress();
+ });
+
+ // Loop over the blocks in address order.
+ for (auto *B : EHFrameBlocks)
+ if (auto Err = processBlock(PC, *B))
+ return Err;
+
+ return Error::success();
+}
+
+static Expected<size_t> readCFIRecordLength(const Block &B,
+ BinaryStreamReader &R) {
+ uint32_t Length;
+ if (auto Err = R.readInteger(Length))
+ return std::move(Err);
+
+ // If Length < 0xffffffff then use the regular length field, otherwise
+ // read the extended length field.
+ if (Length != 0xffffffff)
+ return Length;
+
+ uint64_t ExtendedLength;
+ if (auto Err = R.readInteger(ExtendedLength))
+ return std::move(Err);
+
+ if (ExtendedLength > std::numeric_limits<size_t>::max())
+ return make_error<JITLinkError>(
+ "In CFI record at " +
+ formatv("{0:x}", B.getAddress() + R.getOffset() - 12) +
+ ", extended length of " + formatv("{0:x}", ExtendedLength) +
+ " exceeds address-range max (" +
+ formatv("{0:x}", std::numeric_limits<size_t>::max()));
+
+ return ExtendedLength;
+}
+
+Error EHFrameEdgeFixer::processBlock(ParseContext &PC, Block &B) {
+
+ LLVM_DEBUG(dbgs() << " Processing block at " << B.getAddress() << "\n");
+
+ // eh-frame should not contain zero-fill blocks.
+ if (B.isZeroFill())
+ return make_error<JITLinkError>("Unexpected zero-fill block in " +
+ EHFrameSectionName + " section");
+
+ if (B.getSize() == 0) {
+ LLVM_DEBUG(dbgs() << " Block is empty. Skipping.\n");
+ return Error::success();
+ }
+
+ // Find the offsets of any existing edges from this block.
+ BlockEdgesInfo BlockEdges;
+ for (auto &E : B.edges())
+ if (E.isRelocation()) {
+ // Check if we already saw more than one relocation at this offset.
+ if (BlockEdges.Multiple.contains(E.getOffset()))
+ continue;
+
+ // Otherwise check if we previously had exactly one relocation at this
+ // offset. If so, we now have a second one and move it from the TargetMap
+ // into the Multiple set.
+ auto It = BlockEdges.TargetMap.find(E.getOffset());
+ if (It != BlockEdges.TargetMap.end()) {
+ BlockEdges.TargetMap.erase(It);
+ BlockEdges.Multiple.insert(E.getOffset());
+ } else {
+ BlockEdges.TargetMap[E.getOffset()] = EdgeTarget(E);
+ }
+ }
+
+ BinaryStreamReader BlockReader(
+ StringRef(B.getContent().data(), B.getContent().size()),
+ PC.G.getEndianness());
+
+ // Get the record length.
+ Expected<size_t> RecordRemaining = readCFIRecordLength(B, BlockReader);
+ if (!RecordRemaining)
+ return RecordRemaining.takeError();
+
+ // We expect DWARFRecordSectionSplitter to split each CFI record into its own
+ // block.
+ if (BlockReader.bytesRemaining() != *RecordRemaining)
+ return make_error<JITLinkError>("Incomplete CFI record at " +
+ formatv("{0:x16}", B.getAddress()));
+
+ // Read the CIE delta for this record.
+ uint64_t CIEDeltaFieldOffset = BlockReader.getOffset();
+ uint32_t CIEDelta;
+ if (auto Err = BlockReader.readInteger(CIEDelta))
+ return Err;
+
+ if (CIEDelta == 0) {
+ if (auto Err = processCIE(PC, B, CIEDeltaFieldOffset, BlockEdges))
+ return Err;
+ } else {
+ if (auto Err = processFDE(PC, B, CIEDeltaFieldOffset, CIEDelta, BlockEdges))
+ return Err;
+ }
+
+ return Error::success();
+}
+
+Error EHFrameEdgeFixer::processCIE(ParseContext &PC, Block &B,
+ size_t CIEDeltaFieldOffset,
+ const BlockEdgesInfo &BlockEdges) {
+
+ LLVM_DEBUG(dbgs() << " Record is CIE\n");
+
+ BinaryStreamReader RecordReader(
+ StringRef(B.getContent().data(), B.getContent().size()),
+ PC.G.getEndianness());
+
+ // Skip past the CIE delta field: we've already processed this far.
+ RecordReader.setOffset(CIEDeltaFieldOffset + 4);
+
+ auto &CIESymbol = PC.G.addAnonymousSymbol(B, 0, B.getSize(), false, false);
+ CIEInformation CIEInfo(CIESymbol);
+
+ uint8_t Version = 0;
+ if (auto Err = RecordReader.readInteger(Version))
+ return Err;
+
+ if (Version != 0x01)
+ return make_error<JITLinkError>("Bad CIE version " + Twine(Version) +
+ " (should be 0x01) in eh-frame");
+
+ auto AugInfo = parseAugmentationString(RecordReader);
+ if (!AugInfo)
+ return AugInfo.takeError();
+
+ // Skip the EH Data field if present.
+ if (AugInfo->EHDataFieldPresent)
+ if (auto Err = RecordReader.skip(PC.G.getPointerSize()))
+ return Err;
+
+ // Read and validate the code alignment factor.
+ {
+ uint64_t CodeAlignmentFactor = 0;
+ if (auto Err = RecordReader.readULEB128(CodeAlignmentFactor))
+ return Err;
+ }
+
+ // Read and validate the data alignment factor.
+ {
+ int64_t DataAlignmentFactor = 0;
+ if (auto Err = RecordReader.readSLEB128(DataAlignmentFactor))
+ return Err;
+ }
+
+ // Skip the return address register field.
+ if (auto Err = RecordReader.skip(1))
+ return Err;
+
+ if (AugInfo->AugmentationDataPresent) {
+
+ CIEInfo.AugmentationDataPresent = true;
+
+ uint64_t AugmentationDataLength = 0;
+ if (auto Err = RecordReader.readULEB128(AugmentationDataLength))
+ return Err;
+
+ uint32_t AugmentationDataStartOffset = RecordReader.getOffset();
+
+ uint8_t *NextField = &AugInfo->Fields[0];
+ while (uint8_t Field = *NextField++) {
+ switch (Field) {
+ case 'L':
+ CIEInfo.LSDAPresent = true;
+ if (auto PE = readPointerEncoding(RecordReader, B, "LSDA"))
+ CIEInfo.LSDAEncoding = *PE;
+ else
+ return PE.takeError();
+ break;
+ case 'P': {
+ auto PersonalityPointerEncoding =
+ readPointerEncoding(RecordReader, B, "personality");
+ if (!PersonalityPointerEncoding)
+ return PersonalityPointerEncoding.takeError();
+ if (auto Err =
+ getOrCreateEncodedPointerEdge(
+ PC, BlockEdges, *PersonalityPointerEncoding, RecordReader,
+ B, RecordReader.getOffset(), "personality")
+ .takeError())
+ return Err;
+ break;
+ }
+ case 'R':
+ if (auto PE = readPointerEncoding(RecordReader, B, "address")) {
+ CIEInfo.AddressEncoding = *PE;
+ if (CIEInfo.AddressEncoding == dwarf::DW_EH_PE_omit)
+ return make_error<JITLinkError>(
+ "Invalid address encoding DW_EH_PE_omit in CIE at " +
+ formatv("{0:x}", B.getAddress().getValue()));
+ } else
+ return PE.takeError();
+ break;
+ default:
+ llvm_unreachable("Invalid augmentation string field");
+ }
+ }
+
+ if (RecordReader.getOffset() - AugmentationDataStartOffset >
+ AugmentationDataLength)
+ return make_error<JITLinkError>("Read past the end of the augmentation "
+ "data while parsing fields");
+ }
+
+ assert(!PC.CIEInfos.count(CIESymbol.getAddress()) &&
+ "Multiple CIEs recorded at the same address?");
+ PC.CIEInfos[CIESymbol.getAddress()] = std::move(CIEInfo);
+
+ return Error::success();
+}
+
+Error EHFrameEdgeFixer::processFDE(ParseContext &PC, Block &B,
+ size_t CIEDeltaFieldOffset,
+ uint32_t CIEDelta,
+ const BlockEdgesInfo &BlockEdges) {
+ LLVM_DEBUG(dbgs() << " Record is FDE\n");
+
+ orc::ExecutorAddr RecordAddress = B.getAddress();
+
+ BinaryStreamReader RecordReader(
+ StringRef(B.getContent().data(), B.getContent().size()),
+ PC.G.getEndianness());
+
+ // Skip past the CIE delta field: we've already read this far.
+ RecordReader.setOffset(CIEDeltaFieldOffset + 4);
+
+ auto &FDESymbol = PC.G.addAnonymousSymbol(B, 0, B.getSize(), false, false);
+
+ CIEInformation *CIEInfo = nullptr;
+
+ {
+ // Process the CIE pointer field.
+ if (BlockEdges.Multiple.contains(CIEDeltaFieldOffset))
+ return make_error<JITLinkError>(
+ "CIE pointer field already has multiple edges at " +
+ formatv("{0:x16}", RecordAddress + CIEDeltaFieldOffset));
+
+ auto CIEEdgeItr = BlockEdges.TargetMap.find(CIEDeltaFieldOffset);
+
+ orc::ExecutorAddr CIEAddress =
+ RecordAddress + orc::ExecutorAddrDiff(CIEDeltaFieldOffset) -
+ orc::ExecutorAddrDiff(CIEDelta);
+ if (CIEEdgeItr == BlockEdges.TargetMap.end()) {
+ LLVM_DEBUG({
+ dbgs() << " Adding edge at "
+ << (RecordAddress + CIEDeltaFieldOffset)
+ << " to CIE at: " << CIEAddress << "\n";
+ });
+ if (auto CIEInfoOrErr = PC.findCIEInfo(CIEAddress))
+ CIEInfo = *CIEInfoOrErr;
+ else
+ return CIEInfoOrErr.takeError();
+ assert(CIEInfo->CIESymbol && "CIEInfo has no CIE symbol set");
+ B.addEdge(NegDelta32, CIEDeltaFieldOffset, *CIEInfo->CIESymbol, 0);
+ } else {
+ LLVM_DEBUG({
+ dbgs() << " Already has edge at "
+ << (RecordAddress + CIEDeltaFieldOffset) << " to CIE at "
+ << CIEAddress << "\n";
+ });
+ auto &EI = CIEEdgeItr->second;
+ if (EI.Addend)
+ return make_error<JITLinkError>(
+ "CIE edge at " +
+ formatv("{0:x16}", RecordAddress + CIEDeltaFieldOffset) +
+ " has non-zero addend");
+ if (auto CIEInfoOrErr = PC.findCIEInfo(EI.Target->getAddress()))
+ CIEInfo = *CIEInfoOrErr;
+ else
+ return CIEInfoOrErr.takeError();
+ }
+ }
+
+ // Process the PC-Begin field.
+ LLVM_DEBUG({
+ dbgs() << " Processing PC-begin at "
+ << (RecordAddress + RecordReader.getOffset()) << "\n";
+ });
+ if (auto PCBegin = getOrCreateEncodedPointerEdge(
+ PC, BlockEdges, CIEInfo->AddressEncoding, RecordReader, B,
+ RecordReader.getOffset(), "PC begin")) {
+ assert(*PCBegin && "PC-begin symbol not set");
+ if ((*PCBegin)->isDefined()) {
+ // Add a keep-alive edge from the FDE target to the FDE to ensure that the
+ // FDE is kept alive if its target is.
+ LLVM_DEBUG({
+ dbgs() << " Adding keep-alive edge from target at "
+ << (*PCBegin)->getBlock().getAddress() << " to FDE at "
+ << RecordAddress << "\n";
+ });
+ (*PCBegin)->getBlock().addEdge(Edge::KeepAlive, 0, FDESymbol, 0);
+ } else {
+ LLVM_DEBUG({
+ dbgs() << " WARNING: Not adding keep-alive edge to FDE at "
+ << RecordAddress << ", which points to "
+ << ((*PCBegin)->isExternal() ? "external" : "absolute")
+ << " symbol \"" << (*PCBegin)->getName()
+ << "\" -- FDE must be kept alive manually or it will be "
+ << "dead stripped.\n";
+ });
+ }
+ } else
+ return PCBegin.takeError();
+
+ // Skip over the PC range size field.
+ if (auto Err = skipEncodedPointer(CIEInfo->AddressEncoding, RecordReader))
+ return Err;
+
+ if (CIEInfo->AugmentationDataPresent) {
+ uint64_t AugmentationDataSize;
+ if (auto Err = RecordReader.readULEB128(AugmentationDataSize))
+ return Err;
+
+ if (CIEInfo->LSDAPresent)
+ if (auto Err = getOrCreateEncodedPointerEdge(
+ PC, BlockEdges, CIEInfo->LSDAEncoding, RecordReader, B,
+ RecordReader.getOffset(), "LSDA")
+ .takeError())
+ return Err;
+ } else {
+ LLVM_DEBUG(dbgs() << " Record does not have LSDA field.\n");
+ }
+
+ return Error::success();
+}
+
+Expected<EHFrameEdgeFixer::AugmentationInfo>
+EHFrameEdgeFixer::parseAugmentationString(BinaryStreamReader &RecordReader) {
+ AugmentationInfo AugInfo;
+ uint8_t NextChar;
+ uint8_t *NextField = &AugInfo.Fields[0];
+
+ if (auto Err = RecordReader.readInteger(NextChar))
+ return std::move(Err);
+
+ while (NextChar != 0) {
+ switch (NextChar) {
+ case 'z':
+ AugInfo.AugmentationDataPresent = true;
+ break;
+ case 'e':
+ if (auto Err = RecordReader.readInteger(NextChar))
+ return std::move(Err);
+ if (NextChar != 'h')
+ return make_error<JITLinkError>("Unrecognized substring e" +
+ Twine(NextChar) +
+ " in augmentation string");
+ AugInfo.EHDataFieldPresent = true;
+ break;
+ case 'L':
+ case 'P':
+ case 'R':
+ *NextField++ = NextChar;
+ break;
+ default:
+ return make_error<JITLinkError>("Unrecognized character " +
+ Twine(NextChar) +
+ " in augmentation string");
+ }
+
+ if (auto Err = RecordReader.readInteger(NextChar))
+ return std::move(Err);
+ }
+
+ return std::move(AugInfo);
+}
+
+Expected<uint8_t> EHFrameEdgeFixer::readPointerEncoding(BinaryStreamReader &R,
+ Block &InBlock,
+ const char *FieldName) {
+ using namespace dwarf;
+
+ uint8_t PointerEncoding;
+ if (auto Err = R.readInteger(PointerEncoding))
+ return std::move(Err);
+
+ bool Supported = true;
+ switch (PointerEncoding & 0xf) {
+ case DW_EH_PE_uleb128:
+ case DW_EH_PE_udata2:
+ case DW_EH_PE_sleb128:
+ case DW_EH_PE_sdata2:
+ Supported = false;
+ break;
+ }
+ if (Supported) {
+ switch (PointerEncoding & 0x70) {
+ case DW_EH_PE_textrel:
+ case DW_EH_PE_datarel:
+ case DW_EH_PE_funcrel:
+ case DW_EH_PE_aligned:
+ Supported = false;
+ break;
+ }
+ }
+
+ if (Supported)
+ return PointerEncoding;
+
+ return make_error<JITLinkError>("Unsupported pointer encoding " +
+ formatv("{0:x2}", PointerEncoding) + " for " +
+ FieldName + "in CFI record at " +
+ formatv("{0:x16}", InBlock.getAddress()));
+}
+
+Error EHFrameEdgeFixer::skipEncodedPointer(uint8_t PointerEncoding,
+ BinaryStreamReader &RecordReader) {
+ using namespace dwarf;
+
+ // Switch absptr to corresponding udata encoding.
+ if ((PointerEncoding & 0xf) == DW_EH_PE_absptr)
+ PointerEncoding |= (PointerSize == 8) ? DW_EH_PE_udata8 : DW_EH_PE_udata4;
+
+ switch (PointerEncoding & 0xf) {
+ case DW_EH_PE_udata4:
+ case DW_EH_PE_sdata4:
+ if (auto Err = RecordReader.skip(4))
+ return Err;
+ break;
+ case DW_EH_PE_udata8:
+ case DW_EH_PE_sdata8:
+ if (auto Err = RecordReader.skip(8))
+ return Err;
+ break;
+ default:
+ llvm_unreachable("Unrecognized encoding");
+ }
+ return Error::success();
+}
+
+Expected<Symbol *> EHFrameEdgeFixer::getOrCreateEncodedPointerEdge(
+ ParseContext &PC, const BlockEdgesInfo &BlockEdges, uint8_t PointerEncoding,
+ BinaryStreamReader &RecordReader, Block &BlockToFix,
+ size_t PointerFieldOffset, const char *FieldName) {
+ using namespace dwarf;
+
+ if (PointerEncoding == DW_EH_PE_omit)
+ return nullptr;
+
+ // If there's already an edge here then just skip the encoded pointer and
+ // return the edge's target.
+ {
+ auto EdgeI = BlockEdges.TargetMap.find(PointerFieldOffset);
+ if (EdgeI != BlockEdges.TargetMap.end()) {
+ LLVM_DEBUG({
+ dbgs() << " Existing edge at "
+ << (BlockToFix.getAddress() + PointerFieldOffset) << " to "
+ << FieldName << " at " << EdgeI->second.Target->getAddress();
+ if (EdgeI->second.Target->hasName())
+ dbgs() << " (" << EdgeI->second.Target->getName() << ")";
+ dbgs() << "\n";
+ });
+ if (auto Err = skipEncodedPointer(PointerEncoding, RecordReader))
+ return std::move(Err);
+ return EdgeI->second.Target;
+ }
+
+ if (BlockEdges.Multiple.contains(PointerFieldOffset))
+ return make_error<JITLinkError>("Multiple relocations at offset " +
+ formatv("{0:x16}", PointerFieldOffset));
+ }
+
+ // Switch absptr to corresponding udata encoding.
+ if ((PointerEncoding & 0xf) == DW_EH_PE_absptr)
+ PointerEncoding |= (PointerSize == 8) ? DW_EH_PE_udata8 : DW_EH_PE_udata4;
+
+ // We need to create an edge. Start by reading the field value.
+ uint64_t FieldValue;
+ bool Is64Bit = false;
+ switch (PointerEncoding & 0xf) {
+ case DW_EH_PE_udata4: {
+ uint32_t Val;
+ if (auto Err = RecordReader.readInteger(Val))
+ return std::move(Err);
+ FieldValue = Val;
+ break;
+ }
+ case DW_EH_PE_sdata4: {
+ uint32_t Val;
+ if (auto Err = RecordReader.readInteger(Val))
+ return std::move(Err);
+ FieldValue = Val;
+ break;
+ }
+ case DW_EH_PE_udata8:
+ case DW_EH_PE_sdata8:
+ Is64Bit = true;
+ if (auto Err = RecordReader.readInteger(FieldValue))
+ return std::move(Err);
+ break;
+ default:
+ llvm_unreachable("Unsupported encoding");
+ }
+
+ // Find the edge target and edge kind to use.
+ orc::ExecutorAddr Target;
+ Edge::Kind PtrEdgeKind = Edge::Invalid;
+ if ((PointerEncoding & 0x70) == DW_EH_PE_pcrel) {
+ Target = BlockToFix.getAddress() + PointerFieldOffset;
+ PtrEdgeKind = Is64Bit ? Delta64 : Delta32;
+ } else
+ PtrEdgeKind = Is64Bit ? Pointer64 : Pointer32;
+ Target += FieldValue;
+
+ // Find or create a symbol to point the edge at.
+ auto TargetSym = getOrCreateSymbol(PC, Target);
+ if (!TargetSym)
+ return TargetSym.takeError();
+ BlockToFix.addEdge(PtrEdgeKind, PointerFieldOffset, *TargetSym, 0);
+
+ LLVM_DEBUG({
+ dbgs() << " Adding edge at "
+ << (BlockToFix.getAddress() + PointerFieldOffset) << " to "
+ << FieldName << " at " << TargetSym->getAddress();
+ if (TargetSym->hasName())
+ dbgs() << " (" << TargetSym->getName() << ")";
+ dbgs() << "\n";
+ });
+
+ return &*TargetSym;
+}
+
+Expected<Symbol &> EHFrameEdgeFixer::getOrCreateSymbol(ParseContext &PC,
+ orc::ExecutorAddr Addr) {
+ // See whether we have a canonical symbol for the given address already.
+ auto CanonicalSymI = PC.AddrToSym.find(Addr);
+ if (CanonicalSymI != PC.AddrToSym.end())
+ return *CanonicalSymI->second;
+
+ // Otherwise search for a block covering the address and create a new symbol.
+ auto *B = PC.AddrToBlock.getBlockCovering(Addr);
+ if (!B)
+ return make_error<JITLinkError>("No symbol or block covering address " +
+ formatv("{0:x16}", Addr));
+
+ auto &S =
+ PC.G.addAnonymousSymbol(*B, Addr - B->getAddress(), 0, false, false);
+ PC.AddrToSym[S.getAddress()] = &S;
+ return S;
+}
+
+char EHFrameNullTerminator::NullTerminatorBlockContent[4] = {0, 0, 0, 0};
+
+EHFrameNullTerminator::EHFrameNullTerminator(StringRef EHFrameSectionName)
+ : EHFrameSectionName(EHFrameSectionName) {}
+
+Error EHFrameNullTerminator::operator()(LinkGraph &G) {
+ auto *EHFrame = G.findSectionByName(EHFrameSectionName);
+
+ if (!EHFrame)
+ return Error::success();
+
+ LLVM_DEBUG({
+ dbgs() << "EHFrameNullTerminator adding null terminator to "
+ << EHFrameSectionName << "\n";
+ });
+
+ auto &NullTerminatorBlock =
+ G.createContentBlock(*EHFrame, NullTerminatorBlockContent,
+ orc::ExecutorAddr(~uint64_t(4)), 1, 0);
+ G.addAnonymousSymbol(NullTerminatorBlock, 0, 4, false, true);
+ return Error::success();
+}
+
+EHFrameRegistrar::~EHFrameRegistrar() = default;
+
+Error InProcessEHFrameRegistrar::registerEHFrames(
+ orc::ExecutorAddrRange EHFrameSection) {
+ return orc::registerEHFrameSection(EHFrameSection.Start.toPtr<void *>(),
+ EHFrameSection.size());
+}
+
+Error InProcessEHFrameRegistrar::deregisterEHFrames(
+ orc::ExecutorAddrRange EHFrameSection) {
+ return orc::deregisterEHFrameSection(EHFrameSection.Start.toPtr<void *>(),
+ EHFrameSection.size());
+}
+
+EHFrameCFIBlockInspector EHFrameCFIBlockInspector::FromEdgeScan(Block &B) {
+ if (B.edges_empty())
+ return EHFrameCFIBlockInspector(nullptr);
+ if (B.edges_size() == 1)
+ return EHFrameCFIBlockInspector(&*B.edges().begin());
+ SmallVector<Edge *, 3> Es;
+ for (auto &E : B.edges())
+ Es.push_back(&E);
+ assert(Es.size() >= 2 && Es.size() <= 3 && "Unexpected number of edges");
+ llvm::sort(Es, [](const Edge *LHS, const Edge *RHS) {
+ return LHS->getOffset() < RHS->getOffset();
+ });
+ return EHFrameCFIBlockInspector(*Es[0], *Es[1],
+ Es.size() == 3 ? Es[2] : nullptr);
+ return EHFrameCFIBlockInspector(nullptr);
+}
+
+EHFrameCFIBlockInspector::EHFrameCFIBlockInspector(Edge *PersonalityEdge)
+ : PersonalityEdge(PersonalityEdge) {}
+
+EHFrameCFIBlockInspector::EHFrameCFIBlockInspector(Edge &CIEEdge,
+ Edge &PCBeginEdge,
+ Edge *LSDAEdge)
+ : CIEEdge(&CIEEdge), PCBeginEdge(&PCBeginEdge), LSDAEdge(LSDAEdge) {}
+
+LinkGraphPassFunction
+createEHFrameRecorderPass(const Triple &TT,
+ StoreFrameRangeFunction StoreRangeAddress) {
+ const char *EHFrameSectionName = nullptr;
+ if (TT.getObjectFormat() == Triple::MachO)
+ EHFrameSectionName = "__TEXT,__eh_frame";
+ else
+ EHFrameSectionName = ".eh_frame";
+
+ auto RecordEHFrame =
+ [EHFrameSectionName,
+ StoreFrameRange = std::move(StoreRangeAddress)](LinkGraph &G) -> Error {
+ // Search for a non-empty eh-frame and record the address of the first
+ // symbol in it.
+ orc::ExecutorAddr Addr;
+ size_t Size = 0;
+ if (auto *S = G.findSectionByName(EHFrameSectionName)) {
+ auto R = SectionRange(*S);
+ Addr = R.getStart();
+ Size = R.getSize();
+ }
+ if (!Addr && Size != 0)
+ return make_error<JITLinkError>(
+ StringRef(EHFrameSectionName) +
+ " section can not have zero address with non-zero size");
+ StoreFrameRange(Addr, Size);
+ return Error::success();
+ };
+
+ return RecordEHFrame;
+}
+
+} // end namespace jitlink
+} // end namespace llvm
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/EHFrameSupportImpl.h b/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/EHFrameSupportImpl.h
new file mode 100644
index 000000000000..49fbf650e7a7
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/EHFrameSupportImpl.h
@@ -0,0 +1,131 @@
+//===------- EHFrameSupportImpl.h - JITLink eh-frame utils ------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// EHFrame registration support for JITLink.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_EXECUTIONENGINE_JITLINK_EHFRAMESUPPORTIMPL_H
+#define LLVM_LIB_EXECUTIONENGINE_JITLINK_EHFRAMESUPPORTIMPL_H
+
+#include "llvm/ExecutionEngine/JITLink/EHFrameSupport.h"
+
+#include "llvm/ExecutionEngine/JITLink/JITLink.h"
+#include "llvm/Support/BinaryStreamReader.h"
+
+namespace llvm {
+namespace jitlink {
+
+/// A LinkGraph pass that adds missing FDE-to-CIE, FDE-to-PC and FDE-to-LSDA
+/// edges.
+class EHFrameEdgeFixer {
+public:
+ /// Create an eh-frame edge fixer.
+ /// If a given edge-kind is not supported on the target architecture then
+ /// Edge::Invalid should be used.
+ EHFrameEdgeFixer(StringRef EHFrameSectionName, unsigned PointerSize,
+ Edge::Kind Pointer32, Edge::Kind Pointer64,
+ Edge::Kind Delta32, Edge::Kind Delta64,
+ Edge::Kind NegDelta32);
+ Error operator()(LinkGraph &G);
+
+private:
+
+ struct AugmentationInfo {
+ bool AugmentationDataPresent = false;
+ bool EHDataFieldPresent = false;
+ uint8_t Fields[4] = {0x0, 0x0, 0x0, 0x0};
+ };
+
+ struct CIEInformation {
+ CIEInformation() = default;
+ CIEInformation(Symbol &CIESymbol) : CIESymbol(&CIESymbol) {}
+ Symbol *CIESymbol = nullptr;
+ bool AugmentationDataPresent = false;
+ bool LSDAPresent = false;
+ uint8_t LSDAEncoding = 0;
+ uint8_t AddressEncoding = 0;
+ };
+
+ struct EdgeTarget {
+ EdgeTarget() = default;
+ EdgeTarget(const Edge &E) : Target(&E.getTarget()), Addend(E.getAddend()) {}
+
+ Symbol *Target = nullptr;
+ Edge::AddendT Addend = 0;
+ };
+
+ struct BlockEdgesInfo {
+ DenseMap<Edge::OffsetT, EdgeTarget> TargetMap;
+ DenseSet<Edge::OffsetT> Multiple;
+ };
+
+ using CIEInfosMap = DenseMap<orc::ExecutorAddr, CIEInformation>;
+
+ struct ParseContext {
+ ParseContext(LinkGraph &G) : G(G) {}
+
+ Expected<CIEInformation *> findCIEInfo(orc::ExecutorAddr Address) {
+ auto I = CIEInfos.find(Address);
+ if (I == CIEInfos.end())
+ return make_error<JITLinkError>("No CIE found at address " +
+ formatv("{0:x16}", Address));
+ return &I->second;
+ }
+
+ LinkGraph &G;
+ CIEInfosMap CIEInfos;
+ BlockAddressMap AddrToBlock;
+ DenseMap<orc::ExecutorAddr, Symbol *> AddrToSym;
+ };
+
+ Error processBlock(ParseContext &PC, Block &B);
+ Error processCIE(ParseContext &PC, Block &B, size_t CIEDeltaFieldOffset,
+ const BlockEdgesInfo &BlockEdges);
+ Error processFDE(ParseContext &PC, Block &B, size_t CIEDeltaFieldOffset,
+ uint32_t CIEDelta, const BlockEdgesInfo &BlockEdges);
+
+ Expected<AugmentationInfo>
+ parseAugmentationString(BinaryStreamReader &RecordReader);
+
+ Expected<uint8_t> readPointerEncoding(BinaryStreamReader &RecordReader,
+ Block &InBlock, const char *FieldName);
+ Error skipEncodedPointer(uint8_t PointerEncoding,
+ BinaryStreamReader &RecordReader);
+ Expected<Symbol *> getOrCreateEncodedPointerEdge(
+ ParseContext &PC, const BlockEdgesInfo &BlockEdges,
+ uint8_t PointerEncoding, BinaryStreamReader &RecordReader,
+ Block &BlockToFix, size_t PointerFieldOffset, const char *FieldName);
+
+ Expected<Symbol &> getOrCreateSymbol(ParseContext &PC,
+ orc::ExecutorAddr Addr);
+
+ StringRef EHFrameSectionName;
+ unsigned PointerSize;
+ Edge::Kind Pointer32;
+ Edge::Kind Pointer64;
+ Edge::Kind Delta32;
+ Edge::Kind Delta64;
+ Edge::Kind NegDelta32;
+};
+
+/// Add a 32-bit null-terminator to the end of the eh-frame section.
+class EHFrameNullTerminator {
+public:
+ EHFrameNullTerminator(StringRef EHFrameSectionName);
+ Error operator()(LinkGraph &G);
+
+private:
+ static char NullTerminatorBlockContent[];
+ StringRef EHFrameSectionName;
+};
+
+} // end namespace jitlink
+} // end namespace llvm
+
+#endif // LLVM_LIB_EXECUTIONENGINE_JITLINK_EHFRAMESUPPORTIMPL_H
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/ELF.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/ELF.cpp
new file mode 100644
index 000000000000..fdcce20cd2d1
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/ELF.cpp
@@ -0,0 +1,154 @@
+//===-------------- ELF.cpp - JIT linker function for ELF -------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// ELF jit-link function.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/JITLink/ELF.h"
+
+#include "llvm/BinaryFormat/ELF.h"
+#include "llvm/ExecutionEngine/JITLink/ELF_aarch32.h"
+#include "llvm/ExecutionEngine/JITLink/ELF_aarch64.h"
+#include "llvm/ExecutionEngine/JITLink/ELF_i386.h"
+#include "llvm/ExecutionEngine/JITLink/ELF_loongarch.h"
+#include "llvm/ExecutionEngine/JITLink/ELF_ppc64.h"
+#include "llvm/ExecutionEngine/JITLink/ELF_riscv.h"
+#include "llvm/ExecutionEngine/JITLink/ELF_x86_64.h"
+#include "llvm/Object/ELF.h"
+#include "llvm/Support/Format.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include <cstring>
+
+using namespace llvm;
+
+#define DEBUG_TYPE "jitlink"
+
+namespace llvm {
+namespace jitlink {
+
+Expected<uint16_t> readTargetMachineArch(StringRef Buffer) {
+ const char *Data = Buffer.data();
+
+ if (Data[ELF::EI_DATA] == ELF::ELFDATA2LSB) {
+ if (Data[ELF::EI_CLASS] == ELF::ELFCLASS64) {
+ if (auto File = llvm::object::ELF64LEFile::create(Buffer)) {
+ return File->getHeader().e_machine;
+ } else {
+ return File.takeError();
+ }
+ } else if (Data[ELF::EI_CLASS] == ELF::ELFCLASS32) {
+ if (auto File = llvm::object::ELF32LEFile::create(Buffer)) {
+ return File->getHeader().e_machine;
+ } else {
+ return File.takeError();
+ }
+ }
+ }
+
+ if (Data[ELF::EI_DATA] == ELF::ELFDATA2MSB) {
+ if (Data[ELF::EI_CLASS] == ELF::ELFCLASS64) {
+ if (auto File = llvm::object::ELF64BEFile::create(Buffer)) {
+ return File->getHeader().e_machine;
+ } else {
+ return File.takeError();
+ }
+ } else if (Data[ELF::EI_CLASS] == ELF::ELFCLASS32) {
+ if (auto File = llvm::object::ELF32BEFile::create(Buffer)) {
+ return File->getHeader().e_machine;
+ } else {
+ return File.takeError();
+ }
+ }
+ }
+
+ return ELF::EM_NONE;
+}
+
+Expected<std::unique_ptr<LinkGraph>>
+createLinkGraphFromELFObject(MemoryBufferRef ObjectBuffer) {
+ StringRef Buffer = ObjectBuffer.getBuffer();
+ if (Buffer.size() < ELF::EI_NIDENT)
+ return make_error<JITLinkError>("Truncated ELF buffer");
+
+ if (memcmp(Buffer.data(), ELF::ElfMagic, strlen(ELF::ElfMagic)) != 0)
+ return make_error<JITLinkError>("ELF magic not valid");
+
+ uint8_t DataEncoding = Buffer.data()[ELF::EI_DATA];
+ Expected<uint16_t> TargetMachineArch = readTargetMachineArch(Buffer);
+ if (!TargetMachineArch)
+ return TargetMachineArch.takeError();
+
+ switch (*TargetMachineArch) {
+ case ELF::EM_AARCH64:
+ return createLinkGraphFromELFObject_aarch64(ObjectBuffer);
+ case ELF::EM_ARM:
+ return createLinkGraphFromELFObject_aarch32(ObjectBuffer);
+ case ELF::EM_LOONGARCH:
+ return createLinkGraphFromELFObject_loongarch(ObjectBuffer);
+ case ELF::EM_PPC64: {
+ if (DataEncoding == ELF::ELFDATA2LSB)
+ return createLinkGraphFromELFObject_ppc64le(ObjectBuffer);
+ else
+ return createLinkGraphFromELFObject_ppc64(ObjectBuffer);
+ }
+ case ELF::EM_RISCV:
+ return createLinkGraphFromELFObject_riscv(ObjectBuffer);
+ case ELF::EM_X86_64:
+ return createLinkGraphFromELFObject_x86_64(ObjectBuffer);
+ case ELF::EM_386:
+ return createLinkGraphFromELFObject_i386(ObjectBuffer);
+ default:
+ return make_error<JITLinkError>(
+ "Unsupported target machine architecture in ELF object " +
+ ObjectBuffer.getBufferIdentifier());
+ }
+}
+
+void link_ELF(std::unique_ptr<LinkGraph> G,
+ std::unique_ptr<JITLinkContext> Ctx) {
+ switch (G->getTargetTriple().getArch()) {
+ case Triple::aarch64:
+ link_ELF_aarch64(std::move(G), std::move(Ctx));
+ return;
+ case Triple::arm:
+ case Triple::armeb:
+ case Triple::thumb:
+ case Triple::thumbeb:
+ link_ELF_aarch32(std::move(G), std::move(Ctx));
+ return;
+ case Triple::loongarch32:
+ case Triple::loongarch64:
+ link_ELF_loongarch(std::move(G), std::move(Ctx));
+ return;
+ case Triple::ppc64:
+ link_ELF_ppc64(std::move(G), std::move(Ctx));
+ return;
+ case Triple::ppc64le:
+ link_ELF_ppc64le(std::move(G), std::move(Ctx));
+ return;
+ case Triple::riscv32:
+ case Triple::riscv64:
+ link_ELF_riscv(std::move(G), std::move(Ctx));
+ return;
+ case Triple::x86_64:
+ link_ELF_x86_64(std::move(G), std::move(Ctx));
+ return;
+ case Triple::x86:
+ link_ELF_i386(std::move(G), std::move(Ctx));
+ return;
+ default:
+ Ctx->notifyFailed(make_error<JITLinkError>(
+ "Unsupported target machine architecture in ELF link graph " +
+ G->getName()));
+ return;
+ }
+}
+
+} // end namespace jitlink
+} // end namespace llvm
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/ELFLinkGraphBuilder.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/ELFLinkGraphBuilder.cpp
new file mode 100644
index 000000000000..e081f47ca42f
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/ELFLinkGraphBuilder.cpp
@@ -0,0 +1,33 @@
+//=----------- ELFLinkGraphBuilder.cpp - ELF LinkGraph builder ------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Generic ELF LinkGraph building code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ELFLinkGraphBuilder.h"
+
+#define DEBUG_TYPE "jitlink"
+
+static const char *DWSecNames[] = {
+#define HANDLE_DWARF_SECTION(ENUM_NAME, ELF_NAME, CMDLINE_NAME, OPTION) \
+ ELF_NAME,
+#include "llvm/BinaryFormat/Dwarf.def"
+#undef HANDLE_DWARF_SECTION
+};
+
+namespace llvm {
+namespace jitlink {
+
+StringRef ELFLinkGraphBuilderBase::CommonSectionName(".common");
+ArrayRef<const char *> ELFLinkGraphBuilderBase::DwarfSectionNames = DWSecNames;
+
+ELFLinkGraphBuilderBase::~ELFLinkGraphBuilderBase() = default;
+
+} // end namespace jitlink
+} // end namespace llvm
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/ELFLinkGraphBuilder.h b/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/ELFLinkGraphBuilder.h
new file mode 100644
index 000000000000..5dae60062939
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/ELFLinkGraphBuilder.h
@@ -0,0 +1,698 @@
+//===------- ELFLinkGraphBuilder.h - ELF LinkGraph builder ------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Generic ELF LinkGraph building code.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LIB_EXECUTIONENGINE_JITLINK_ELFLINKGRAPHBUILDER_H
+#define LIB_EXECUTIONENGINE_JITLINK_ELFLINKGRAPHBUILDER_H
+
+#include "llvm/ExecutionEngine/JITLink/JITLink.h"
+#include "llvm/Object/ELF.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/Error.h"
+#include "llvm/Support/FormatVariadic.h"
+
+#define DEBUG_TYPE "jitlink"
+
+namespace llvm {
+namespace jitlink {
+
+/// Common link-graph building code shared between all ELFFiles.
+class ELFLinkGraphBuilderBase {
+public:
+ ELFLinkGraphBuilderBase(std::unique_ptr<LinkGraph> G) : G(std::move(G)) {}
+ virtual ~ELFLinkGraphBuilderBase();
+
+protected:
+ static bool isDwarfSection(StringRef SectionName) {
+ return llvm::is_contained(DwarfSectionNames, SectionName);
+ }
+
+ Section &getCommonSection() {
+ if (!CommonSection)
+ CommonSection = &G->createSection(
+ CommonSectionName, orc::MemProt::Read | orc::MemProt::Write);
+ return *CommonSection;
+ }
+
+ std::unique_ptr<LinkGraph> G;
+
+private:
+ static StringRef CommonSectionName;
+ static ArrayRef<const char *> DwarfSectionNames;
+
+ Section *CommonSection = nullptr;
+};
+
+/// LinkGraph building code that's specific to the given ELFT, but common
+/// across all architectures.
+template <typename ELFT>
+class ELFLinkGraphBuilder : public ELFLinkGraphBuilderBase {
+ using ELFFile = object::ELFFile<ELFT>;
+
+public:
+ ELFLinkGraphBuilder(const object::ELFFile<ELFT> &Obj, Triple TT,
+ SubtargetFeatures Features, StringRef FileName,
+ LinkGraph::GetEdgeKindNameFunction GetEdgeKindName);
+
+ /// Debug sections are included in the graph by default. Use
+ /// setProcessDebugSections(false) to ignore them if debug info is not
+ /// needed.
+ ELFLinkGraphBuilder &setProcessDebugSections(bool ProcessDebugSections) {
+ this->ProcessDebugSections = ProcessDebugSections;
+ return *this;
+ }
+
+ /// Attempt to construct and return the LinkGraph.
+ Expected<std::unique_ptr<LinkGraph>> buildGraph();
+
+ /// Call to derived class to handle relocations. These require
+ /// architecture specific knowledge to map to JITLink edge kinds.
+ virtual Error addRelocations() = 0;
+
+protected:
+ using ELFSectionIndex = unsigned;
+ using ELFSymbolIndex = unsigned;
+
+ bool isRelocatable() const {
+ return Obj.getHeader().e_type == llvm::ELF::ET_REL;
+ }
+
+ void setGraphBlock(ELFSectionIndex SecIndex, Block *B) {
+ assert(!GraphBlocks.count(SecIndex) && "Duplicate section at index");
+ GraphBlocks[SecIndex] = B;
+ }
+
+ Block *getGraphBlock(ELFSectionIndex SecIndex) {
+ return GraphBlocks.lookup(SecIndex);
+ }
+
+ void setGraphSymbol(ELFSymbolIndex SymIndex, Symbol &Sym) {
+ assert(!GraphSymbols.count(SymIndex) && "Duplicate symbol at index");
+ GraphSymbols[SymIndex] = &Sym;
+ }
+
+ Symbol *getGraphSymbol(ELFSymbolIndex SymIndex) {
+ return GraphSymbols.lookup(SymIndex);
+ }
+
+ Expected<std::pair<Linkage, Scope>>
+ getSymbolLinkageAndScope(const typename ELFT::Sym &Sym, StringRef Name);
+
+ /// Set the target flags on the given Symbol.
+ virtual TargetFlagsType makeTargetFlags(const typename ELFT::Sym &Sym) {
+ return TargetFlagsType{};
+ }
+
+ /// Get the physical offset of the symbol on the target platform.
+ virtual orc::ExecutorAddrDiff getRawOffset(const typename ELFT::Sym &Sym,
+ TargetFlagsType Flags) {
+ return Sym.getValue();
+ }
+
+ Error prepare();
+ Error graphifySections();
+ Error graphifySymbols();
+
+ /// Override in derived classes to suppress certain sections in the link
+ /// graph.
+ virtual bool excludeSection(const typename ELFT::Shdr &Sect) const {
+ return false;
+ }
+
+ /// Traverse all matching ELFT::Rela relocation records in the given section.
+ /// The handler function Func should be callable with this signature:
+ /// Error(const typename ELFT::Rela &,
+ /// const typename ELFT::Shdr &, Section &)
+ ///
+ template <typename RelocHandlerMethod>
+ Error forEachRelaRelocation(const typename ELFT::Shdr &RelSect,
+ RelocHandlerMethod &&Func);
+
+ /// Traverse all matching ELFT::Rel relocation records in the given section.
+ /// The handler function Func should be callable with this signature:
+ /// Error(const typename ELFT::Rel &,
+ /// const typename ELFT::Shdr &, Section &)
+ ///
+ template <typename RelocHandlerMethod>
+ Error forEachRelRelocation(const typename ELFT::Shdr &RelSect,
+ RelocHandlerMethod &&Func);
+
+ /// Traverse all matching rela relocation records in the given section.
+ /// Convenience wrapper to allow passing a member function for the handler.
+ ///
+ template <typename ClassT, typename RelocHandlerMethod>
+ Error forEachRelaRelocation(const typename ELFT::Shdr &RelSect,
+ ClassT *Instance, RelocHandlerMethod &&Method) {
+ return forEachRelaRelocation(
+ RelSect,
+ [Instance, Method](const auto &Rel, const auto &Target, auto &GS) {
+ return (Instance->*Method)(Rel, Target, GS);
+ });
+ }
+
+ /// Traverse all matching rel relocation records in the given section.
+ /// Convenience wrapper to allow passing a member function for the handler.
+ ///
+ template <typename ClassT, typename RelocHandlerMethod>
+ Error forEachRelRelocation(const typename ELFT::Shdr &RelSect,
+ ClassT *Instance, RelocHandlerMethod &&Method) {
+ return forEachRelRelocation(
+ RelSect,
+ [Instance, Method](const auto &Rel, const auto &Target, auto &GS) {
+ return (Instance->*Method)(Rel, Target, GS);
+ });
+ }
+
+ const ELFFile &Obj;
+
+ typename ELFFile::Elf_Shdr_Range Sections;
+ const typename ELFFile::Elf_Shdr *SymTabSec = nullptr;
+ StringRef SectionStringTab;
+ bool ProcessDebugSections = true;
+
+ // Maps ELF section indexes to LinkGraph Blocks.
+ // Only SHF_ALLOC sections will have graph blocks.
+ DenseMap<ELFSectionIndex, Block *> GraphBlocks;
+ DenseMap<ELFSymbolIndex, Symbol *> GraphSymbols;
+ DenseMap<const typename ELFFile::Elf_Shdr *,
+ ArrayRef<typename ELFFile::Elf_Word>>
+ ShndxTables;
+};
+
+template <typename ELFT>
+ELFLinkGraphBuilder<ELFT>::ELFLinkGraphBuilder(
+ const ELFFile &Obj, Triple TT, SubtargetFeatures Features,
+ StringRef FileName, LinkGraph::GetEdgeKindNameFunction GetEdgeKindName)
+ : ELFLinkGraphBuilderBase(std::make_unique<LinkGraph>(
+ FileName.str(), Triple(std::move(TT)), std::move(Features),
+ ELFT::Is64Bits ? 8 : 4, llvm::endianness(ELFT::Endianness),
+ std::move(GetEdgeKindName))),
+ Obj(Obj) {
+ LLVM_DEBUG(
+ { dbgs() << "Created ELFLinkGraphBuilder for \"" << FileName << "\""; });
+}
+
+template <typename ELFT>
+Expected<std::unique_ptr<LinkGraph>> ELFLinkGraphBuilder<ELFT>::buildGraph() {
+ if (!isRelocatable())
+ return make_error<JITLinkError>("Object is not a relocatable ELF file");
+
+ if (auto Err = prepare())
+ return std::move(Err);
+
+ if (auto Err = graphifySections())
+ return std::move(Err);
+
+ if (auto Err = graphifySymbols())
+ return std::move(Err);
+
+ if (auto Err = addRelocations())
+ return std::move(Err);
+
+ return std::move(G);
+}
+
+template <typename ELFT>
+Expected<std::pair<Linkage, Scope>>
+ELFLinkGraphBuilder<ELFT>::getSymbolLinkageAndScope(
+ const typename ELFT::Sym &Sym, StringRef Name) {
+ Linkage L = Linkage::Strong;
+ Scope S = Scope::Default;
+
+ switch (Sym.getBinding()) {
+ case ELF::STB_LOCAL:
+ S = Scope::Local;
+ break;
+ case ELF::STB_GLOBAL:
+ // Nothing to do here.
+ break;
+ case ELF::STB_WEAK:
+ case ELF::STB_GNU_UNIQUE:
+ L = Linkage::Weak;
+ break;
+ default:
+ return make_error<StringError>(
+ "Unrecognized symbol binding " +
+ Twine(static_cast<int>(Sym.getBinding())) + " for " + Name,
+ inconvertibleErrorCode());
+ }
+
+ switch (Sym.getVisibility()) {
+ case ELF::STV_DEFAULT:
+ case ELF::STV_PROTECTED:
+ // FIXME: Make STV_DEFAULT symbols pre-emptible? This probably needs
+ // Orc support.
+ // Otherwise nothing to do here.
+ break;
+ case ELF::STV_HIDDEN:
+ // Default scope -> Hidden scope. No effect on local scope.
+ if (S == Scope::Default)
+ S = Scope::Hidden;
+ break;
+ case ELF::STV_INTERNAL:
+ return make_error<StringError>(
+ "Unrecognized symbol visibility " +
+ Twine(static_cast<int>(Sym.getVisibility())) + " for " + Name,
+ inconvertibleErrorCode());
+ }
+
+ return std::make_pair(L, S);
+}
+
+template <typename ELFT> Error ELFLinkGraphBuilder<ELFT>::prepare() {
+ LLVM_DEBUG(dbgs() << " Preparing to build...\n");
+
+ // Get the sections array.
+ if (auto SectionsOrErr = Obj.sections())
+ Sections = *SectionsOrErr;
+ else
+ return SectionsOrErr.takeError();
+
+ // Get the section string table.
+ if (auto SectionStringTabOrErr = Obj.getSectionStringTable(Sections))
+ SectionStringTab = *SectionStringTabOrErr;
+ else
+ return SectionStringTabOrErr.takeError();
+
+ // Get the SHT_SYMTAB section.
+ for (auto &Sec : Sections) {
+ if (Sec.sh_type == ELF::SHT_SYMTAB) {
+ if (!SymTabSec)
+ SymTabSec = &Sec;
+ else
+ return make_error<JITLinkError>("Multiple SHT_SYMTAB sections in " +
+ G->getName());
+ }
+
+ // Extended table.
+ if (Sec.sh_type == ELF::SHT_SYMTAB_SHNDX) {
+ uint32_t SymtabNdx = Sec.sh_link;
+ if (SymtabNdx >= Sections.size())
+ return make_error<JITLinkError>("sh_link is out of bound");
+
+ auto ShndxTable = Obj.getSHNDXTable(Sec);
+ if (!ShndxTable)
+ return ShndxTable.takeError();
+
+ ShndxTables.insert({&Sections[SymtabNdx], *ShndxTable});
+ }
+ }
+
+ return Error::success();
+}
+
+template <typename ELFT> Error ELFLinkGraphBuilder<ELFT>::graphifySections() {
+ LLVM_DEBUG(dbgs() << " Creating graph sections...\n");
+
+ // For each section...
+ for (ELFSectionIndex SecIndex = 0; SecIndex != Sections.size(); ++SecIndex) {
+
+ auto &Sec = Sections[SecIndex];
+
+ // Start by getting the section name.
+ auto Name = Obj.getSectionName(Sec, SectionStringTab);
+ if (!Name)
+ return Name.takeError();
+ if (excludeSection(Sec)) {
+ LLVM_DEBUG({
+ dbgs() << " " << SecIndex << ": Skipping section \"" << *Name
+ << "\" explicitly\n";
+ });
+ continue;
+ }
+
+ // Skip null sections.
+ if (Sec.sh_type == ELF::SHT_NULL) {
+ LLVM_DEBUG({
+ dbgs() << " " << SecIndex << ": has type SHT_NULL. Skipping.\n";
+ });
+ continue;
+ }
+
+ // If the name indicates that it's a debug section then skip it: We don't
+ // support those yet.
+ if (!ProcessDebugSections && isDwarfSection(*Name)) {
+ LLVM_DEBUG({
+ dbgs() << " " << SecIndex << ": \"" << *Name
+ << "\" is a debug section: "
+ "No graph section will be created.\n";
+ });
+ continue;
+ }
+
+ LLVM_DEBUG({
+ dbgs() << " " << SecIndex << ": Creating section for \"" << *Name
+ << "\"\n";
+ });
+
+ // Get the section's memory protection flags.
+ orc::MemProt Prot = orc::MemProt::Read;
+ if (Sec.sh_flags & ELF::SHF_EXECINSTR)
+ Prot |= orc::MemProt::Exec;
+ if (Sec.sh_flags & ELF::SHF_WRITE)
+ Prot |= orc::MemProt::Write;
+
+ // Look for existing sections first.
+ auto *GraphSec = G->findSectionByName(*Name);
+ if (!GraphSec) {
+ GraphSec = &G->createSection(*Name, Prot);
+ // Non-SHF_ALLOC sections get NoAlloc memory lifetimes.
+ if (!(Sec.sh_flags & ELF::SHF_ALLOC)) {
+ GraphSec->setMemLifetime(orc::MemLifetime::NoAlloc);
+ LLVM_DEBUG({
+ dbgs() << " " << SecIndex << ": \"" << *Name
+ << "\" is not a SHF_ALLOC section. Using NoAlloc lifetime.\n";
+ });
+ }
+ }
+
+ if (GraphSec->getMemProt() != Prot) {
+ std::string ErrMsg;
+ raw_string_ostream(ErrMsg)
+ << "In " << G->getName() << ", section " << *Name
+ << " is present more than once with different permissions: "
+ << GraphSec->getMemProt() << " vs " << Prot;
+ return make_error<JITLinkError>(std::move(ErrMsg));
+ }
+
+ Block *B = nullptr;
+ if (Sec.sh_type != ELF::SHT_NOBITS) {
+ auto Data = Obj.template getSectionContentsAsArray<char>(Sec);
+ if (!Data)
+ return Data.takeError();
+
+ B = &G->createContentBlock(*GraphSec, *Data,
+ orc::ExecutorAddr(Sec.sh_addr),
+ Sec.sh_addralign, 0);
+ } else
+ B = &G->createZeroFillBlock(*GraphSec, Sec.sh_size,
+ orc::ExecutorAddr(Sec.sh_addr),
+ Sec.sh_addralign, 0);
+
+ if (Sec.sh_type == ELF::SHT_ARM_EXIDX) {
+ // Add live symbol to avoid dead-stripping for .ARM.exidx sections
+ G->addAnonymousSymbol(*B, orc::ExecutorAddrDiff(),
+ orc::ExecutorAddrDiff(), false, true);
+ }
+
+ setGraphBlock(SecIndex, B);
+ }
+
+ return Error::success();
+}
+
+template <typename ELFT> Error ELFLinkGraphBuilder<ELFT>::graphifySymbols() {
+ LLVM_DEBUG(dbgs() << " Creating graph symbols...\n");
+
+ // No SYMTAB -- Bail out early.
+ if (!SymTabSec)
+ return Error::success();
+
+ // Get the section content as a Symbols array.
+ auto Symbols = Obj.symbols(SymTabSec);
+ if (!Symbols)
+ return Symbols.takeError();
+
+ // Get the string table for this section.
+ auto StringTab = Obj.getStringTableForSymtab(*SymTabSec, Sections);
+ if (!StringTab)
+ return StringTab.takeError();
+
+ LLVM_DEBUG({
+ StringRef SymTabName;
+
+ if (auto SymTabNameOrErr = Obj.getSectionName(*SymTabSec, SectionStringTab))
+ SymTabName = *SymTabNameOrErr;
+ else {
+ dbgs() << "Could not get ELF SHT_SYMTAB section name for logging: "
+ << toString(SymTabNameOrErr.takeError()) << "\n";
+ SymTabName = "<SHT_SYMTAB section with invalid name>";
+ }
+
+ dbgs() << " Adding symbols from symtab section \"" << SymTabName
+ << "\"\n";
+ });
+
+ for (ELFSymbolIndex SymIndex = 0; SymIndex != Symbols->size(); ++SymIndex) {
+ auto &Sym = (*Symbols)[SymIndex];
+
+ // Check symbol type.
+ switch (Sym.getType()) {
+ case ELF::STT_FILE:
+ LLVM_DEBUG({
+ if (auto Name = Sym.getName(*StringTab))
+ dbgs() << " " << SymIndex << ": Skipping STT_FILE symbol \""
+ << *Name << "\"\n";
+ else {
+ dbgs() << "Could not get STT_FILE symbol name: "
+ << toString(Name.takeError()) << "\n";
+ dbgs() << " " << SymIndex
+ << ": Skipping STT_FILE symbol with invalid name\n";
+ }
+ });
+ continue;
+ break;
+ }
+
+ // Get the symbol name.
+ auto Name = Sym.getName(*StringTab);
+ if (!Name)
+ return Name.takeError();
+
+ // Handle common symbols specially.
+ if (Sym.isCommon()) {
+ Symbol &GSym = G->addDefinedSymbol(
+ G->createZeroFillBlock(getCommonSection(), Sym.st_size,
+ orc::ExecutorAddr(), Sym.getValue(), 0),
+ 0, *Name, Sym.st_size, Linkage::Strong, Scope::Default, false, false);
+ setGraphSymbol(SymIndex, GSym);
+ continue;
+ }
+
+ if (Sym.isDefined() &&
+ (Sym.getType() == ELF::STT_NOTYPE || Sym.getType() == ELF::STT_FUNC ||
+ Sym.getType() == ELF::STT_OBJECT ||
+ Sym.getType() == ELF::STT_SECTION || Sym.getType() == ELF::STT_TLS)) {
+
+ // Map Visibility and Binding to Scope and Linkage:
+ Linkage L;
+ Scope S;
+ if (auto LSOrErr = getSymbolLinkageAndScope(Sym, *Name))
+ std::tie(L, S) = *LSOrErr;
+ else
+ return LSOrErr.takeError();
+
+ // Handle extended tables.
+ unsigned Shndx = Sym.st_shndx;
+ if (Shndx == ELF::SHN_XINDEX) {
+ auto ShndxTable = ShndxTables.find(SymTabSec);
+ if (ShndxTable == ShndxTables.end())
+ continue;
+ auto NdxOrErr = object::getExtendedSymbolTableIndex<ELFT>(
+ Sym, SymIndex, ShndxTable->second);
+ if (!NdxOrErr)
+ return NdxOrErr.takeError();
+ Shndx = *NdxOrErr;
+ }
+ if (auto *B = getGraphBlock(Shndx)) {
+ LLVM_DEBUG({
+ dbgs() << " " << SymIndex
+ << ": Creating defined graph symbol for ELF symbol \"" << *Name
+ << "\"\n";
+ });
+
+ TargetFlagsType Flags = makeTargetFlags(Sym);
+ orc::ExecutorAddrDiff Offset = getRawOffset(Sym, Flags);
+
+ if (Offset + Sym.st_size > B->getSize()) {
+ std::string ErrMsg;
+ raw_string_ostream ErrStream(ErrMsg);
+ ErrStream << "In " << G->getName() << ", symbol ";
+ if (!Name->empty())
+ ErrStream << *Name;
+ else
+ ErrStream << "<anon>";
+ ErrStream << " (" << (B->getAddress() + Offset) << " -- "
+ << (B->getAddress() + Offset + Sym.st_size) << ") extends "
+ << formatv("{0:x}", Offset + Sym.st_size - B->getSize())
+ << " bytes past the end of its containing block ("
+ << B->getRange() << ")";
+ return make_error<JITLinkError>(std::move(ErrMsg));
+ }
+
+ // In RISCV, temporary symbols (Used to generate dwarf, eh_frame
+ // sections...) will appear in object code's symbol table, and LLVM does
+ // not use names on these temporary symbols (RISCV gnu toolchain uses
+ // names on these temporary symbols). If the symbol is unnamed, add an
+ // anonymous symbol.
+ auto &GSym =
+ Name->empty()
+ ? G->addAnonymousSymbol(*B, Offset, Sym.st_size,
+ false, false)
+ : G->addDefinedSymbol(*B, Offset, *Name, Sym.st_size, L,
+ S, Sym.getType() == ELF::STT_FUNC,
+ false);
+
+ GSym.setTargetFlags(Flags);
+ setGraphSymbol(SymIndex, GSym);
+ }
+ } else if (Sym.isUndefined() && Sym.isExternal()) {
+ LLVM_DEBUG({
+ dbgs() << " " << SymIndex
+ << ": Creating external graph symbol for ELF symbol \"" << *Name
+ << "\"\n";
+ });
+
+ if (Sym.getBinding() != ELF::STB_GLOBAL &&
+ Sym.getBinding() != ELF::STB_WEAK)
+ return make_error<StringError>(
+ "Invalid symbol binding " +
+ Twine(static_cast<int>(Sym.getBinding())) +
+ " for external symbol " + *Name,
+ inconvertibleErrorCode());
+
+ // If L is Linkage::Weak that means this is a weakly referenced symbol.
+ auto &GSym = G->addExternalSymbol(*Name, Sym.st_size,
+ Sym.getBinding() == ELF::STB_WEAK);
+ setGraphSymbol(SymIndex, GSym);
+ } else if (Sym.isUndefined() && Sym.st_value == 0 && Sym.st_size == 0 &&
+ Sym.getType() == ELF::STT_NOTYPE &&
+ Sym.getBinding() == ELF::STB_LOCAL && Name->empty()) {
+ // Some relocations (e.g., R_RISCV_ALIGN) don't have a target symbol and
+ // use this kind of null symbol as a placeholder.
+ LLVM_DEBUG({
+ dbgs() << " " << SymIndex << ": Creating null graph symbol\n";
+ });
+
+ auto SymName =
+ G->allocateContent("__jitlink_ELF_SYM_UND_" + Twine(SymIndex));
+ auto SymNameRef = StringRef(SymName.data(), SymName.size());
+ auto &GSym = G->addAbsoluteSymbol(SymNameRef, orc::ExecutorAddr(0), 0,
+ Linkage::Strong, Scope::Local, false);
+ setGraphSymbol(SymIndex, GSym);
+ } else {
+ LLVM_DEBUG({
+ dbgs() << " " << SymIndex
+ << ": Not creating graph symbol for ELF symbol \"" << *Name
+ << "\" with unrecognized type\n";
+ });
+ }
+ }
+
+ return Error::success();
+}
+
+template <typename ELFT>
+template <typename RelocHandlerFunction>
+Error ELFLinkGraphBuilder<ELFT>::forEachRelaRelocation(
+ const typename ELFT::Shdr &RelSect, RelocHandlerFunction &&Func) {
+ // Only look into sections that store relocation entries.
+ if (RelSect.sh_type != ELF::SHT_RELA)
+ return Error::success();
+
+ // sh_info contains the section header index of the target (FixupSection),
+ // which is the section to which all relocations in RelSect apply.
+ auto FixupSection = Obj.getSection(RelSect.sh_info);
+ if (!FixupSection)
+ return FixupSection.takeError();
+
+ // Target sections have names in valid ELF object files.
+ Expected<StringRef> Name = Obj.getSectionName(**FixupSection);
+ if (!Name)
+ return Name.takeError();
+ LLVM_DEBUG(dbgs() << " " << *Name << ":\n");
+
+ // Consider skipping these relocations.
+ if (!ProcessDebugSections && isDwarfSection(*Name)) {
+ LLVM_DEBUG(dbgs() << " skipped (dwarf section)\n\n");
+ return Error::success();
+ }
+ if (excludeSection(**FixupSection)) {
+ LLVM_DEBUG(dbgs() << " skipped (fixup section excluded explicitly)\n\n");
+ return Error::success();
+ }
+
+ // Lookup the link-graph node corresponding to the target section name.
+ auto *BlockToFix = getGraphBlock(RelSect.sh_info);
+ if (!BlockToFix)
+ return make_error<StringError>(
+ "Refencing a section that wasn't added to the graph: " + *Name,
+ inconvertibleErrorCode());
+
+ auto RelEntries = Obj.relas(RelSect);
+ if (!RelEntries)
+ return RelEntries.takeError();
+
+ // Let the callee process relocation entries one by one.
+ for (const typename ELFT::Rela &R : *RelEntries)
+ if (Error Err = Func(R, **FixupSection, *BlockToFix))
+ return Err;
+
+ LLVM_DEBUG(dbgs() << "\n");
+ return Error::success();
+}
+
+template <typename ELFT>
+template <typename RelocHandlerFunction>
+Error ELFLinkGraphBuilder<ELFT>::forEachRelRelocation(
+ const typename ELFT::Shdr &RelSect, RelocHandlerFunction &&Func) {
+ // Only look into sections that store relocation entries.
+ if (RelSect.sh_type != ELF::SHT_REL)
+ return Error::success();
+
+ // sh_info contains the section header index of the target (FixupSection),
+ // which is the section to which all relocations in RelSect apply.
+ auto FixupSection = Obj.getSection(RelSect.sh_info);
+ if (!FixupSection)
+ return FixupSection.takeError();
+
+ // Target sections have names in valid ELF object files.
+ Expected<StringRef> Name = Obj.getSectionName(**FixupSection);
+ if (!Name)
+ return Name.takeError();
+ LLVM_DEBUG(dbgs() << " " << *Name << ":\n");
+
+ // Consider skipping these relocations.
+ if (!ProcessDebugSections && isDwarfSection(*Name)) {
+ LLVM_DEBUG(dbgs() << " skipped (dwarf section)\n\n");
+ return Error::success();
+ }
+ if (excludeSection(**FixupSection)) {
+ LLVM_DEBUG(dbgs() << " skipped (fixup section excluded explicitly)\n\n");
+ return Error::success();
+ }
+
+ // Lookup the link-graph node corresponding to the target section name.
+ auto *BlockToFix = getGraphBlock(RelSect.sh_info);
+ if (!BlockToFix)
+ return make_error<StringError>(
+ "Refencing a section that wasn't added to the graph: " + *Name,
+ inconvertibleErrorCode());
+
+ auto RelEntries = Obj.rels(RelSect);
+ if (!RelEntries)
+ return RelEntries.takeError();
+
+ // Let the callee process relocation entries one by one.
+ for (const typename ELFT::Rel &R : *RelEntries)
+ if (Error Err = Func(R, **FixupSection, *BlockToFix))
+ return Err;
+
+ LLVM_DEBUG(dbgs() << "\n");
+ return Error::success();
+}
+
+} // end namespace jitlink
+} // end namespace llvm
+
+#undef DEBUG_TYPE
+
+#endif // LIB_EXECUTIONENGINE_JITLINK_ELFLINKGRAPHBUILDER_H
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/ELF_aarch32.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/ELF_aarch32.cpp
new file mode 100644
index 000000000000..866de2cb227c
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/ELF_aarch32.cpp
@@ -0,0 +1,334 @@
+//===----- ELF_aarch32.cpp - JIT linker implementation for arm/thumb ------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// ELF/aarch32 jit-link implementation.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/JITLink/ELF_aarch32.h"
+
+#include "llvm/BinaryFormat/ELF.h"
+#include "llvm/ExecutionEngine/JITLink/JITLink.h"
+#include "llvm/ExecutionEngine/JITLink/aarch32.h"
+#include "llvm/Object/ELF.h"
+#include "llvm/Object/ELFObjectFile.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/TargetParser/ARMTargetParser.h"
+
+#include "ELFLinkGraphBuilder.h"
+#include "JITLinkGeneric.h"
+
+#define DEBUG_TYPE "jitlink"
+
+using namespace llvm::object;
+
+namespace llvm {
+namespace jitlink {
+
+/// Translate from ELF relocation type to JITLink-internal edge kind.
+Expected<aarch32::EdgeKind_aarch32>
+getJITLinkEdgeKind(uint32_t ELFType, const aarch32::ArmConfig &ArmCfg) {
+ switch (ELFType) {
+ case ELF::R_ARM_ABS32:
+ return aarch32::Data_Pointer32;
+ case ELF::R_ARM_GOT_PREL:
+ return aarch32::Data_RequestGOTAndTransformToDelta32;
+ case ELF::R_ARM_REL32:
+ return aarch32::Data_Delta32;
+ case ELF::R_ARM_CALL:
+ return aarch32::Arm_Call;
+ case ELF::R_ARM_JUMP24:
+ return aarch32::Arm_Jump24;
+ case ELF::R_ARM_MOVW_ABS_NC:
+ return aarch32::Arm_MovwAbsNC;
+ case ELF::R_ARM_MOVT_ABS:
+ return aarch32::Arm_MovtAbs;
+ case ELF::R_ARM_NONE:
+ return aarch32::None;
+ case ELF::R_ARM_PREL31:
+ return aarch32::Data_PRel31;
+ case ELF::R_ARM_TARGET1:
+ return (ArmCfg.Target1Rel) ? aarch32::Data_Delta32
+ : aarch32::Data_Pointer32;
+ case ELF::R_ARM_THM_CALL:
+ return aarch32::Thumb_Call;
+ case ELF::R_ARM_THM_JUMP24:
+ return aarch32::Thumb_Jump24;
+ case ELF::R_ARM_THM_MOVW_ABS_NC:
+ return aarch32::Thumb_MovwAbsNC;
+ case ELF::R_ARM_THM_MOVT_ABS:
+ return aarch32::Thumb_MovtAbs;
+ case ELF::R_ARM_THM_MOVW_PREL_NC:
+ return aarch32::Thumb_MovwPrelNC;
+ case ELF::R_ARM_THM_MOVT_PREL:
+ return aarch32::Thumb_MovtPrel;
+ }
+
+ return make_error<JITLinkError>(
+ "Unsupported aarch32 relocation " + formatv("{0:d}: ", ELFType) +
+ object::getELFRelocationTypeName(ELF::EM_ARM, ELFType));
+}
+
+/// Translate from JITLink-internal edge kind back to ELF relocation type.
+Expected<uint32_t> getELFRelocationType(Edge::Kind Kind) {
+ switch (static_cast<aarch32::EdgeKind_aarch32>(Kind)) {
+ case aarch32::Data_Delta32:
+ return ELF::R_ARM_REL32;
+ case aarch32::Data_Pointer32:
+ return ELF::R_ARM_ABS32;
+ case aarch32::Data_PRel31:
+ return ELF::R_ARM_PREL31;
+ case aarch32::Data_RequestGOTAndTransformToDelta32:
+ return ELF::R_ARM_GOT_PREL;
+ case aarch32::Arm_Call:
+ return ELF::R_ARM_CALL;
+ case aarch32::Arm_Jump24:
+ return ELF::R_ARM_JUMP24;
+ case aarch32::Arm_MovwAbsNC:
+ return ELF::R_ARM_MOVW_ABS_NC;
+ case aarch32::Arm_MovtAbs:
+ return ELF::R_ARM_MOVT_ABS;
+ case aarch32::Thumb_Call:
+ return ELF::R_ARM_THM_CALL;
+ case aarch32::Thumb_Jump24:
+ return ELF::R_ARM_THM_JUMP24;
+ case aarch32::Thumb_MovwAbsNC:
+ return ELF::R_ARM_THM_MOVW_ABS_NC;
+ case aarch32::Thumb_MovtAbs:
+ return ELF::R_ARM_THM_MOVT_ABS;
+ case aarch32::Thumb_MovwPrelNC:
+ return ELF::R_ARM_THM_MOVW_PREL_NC;
+ case aarch32::Thumb_MovtPrel:
+ return ELF::R_ARM_THM_MOVT_PREL;
+ case aarch32::None:
+ return ELF::R_ARM_NONE;
+ }
+
+ return make_error<JITLinkError>(formatv("Invalid aarch32 edge {0:d}: ",
+ Kind));
+}
+
+/// Get a human-readable name for the given ELF AArch32 edge kind.
+const char *getELFAArch32EdgeKindName(Edge::Kind R) {
+ // No ELF-specific edge kinds yet
+ return aarch32::getEdgeKindName(R);
+}
+
+class ELFJITLinker_aarch32 : public JITLinker<ELFJITLinker_aarch32> {
+ friend class JITLinker<ELFJITLinker_aarch32>;
+
+public:
+ ELFJITLinker_aarch32(std::unique_ptr<JITLinkContext> Ctx,
+ std::unique_ptr<LinkGraph> G, PassConfiguration PassCfg,
+ aarch32::ArmConfig ArmCfg)
+ : JITLinker(std::move(Ctx), std::move(G), std::move(PassCfg)),
+ ArmCfg(std::move(ArmCfg)) {}
+
+private:
+ aarch32::ArmConfig ArmCfg;
+
+ Error applyFixup(LinkGraph &G, Block &B, const Edge &E) const {
+ return aarch32::applyFixup(G, B, E, ArmCfg);
+ }
+};
+
+template <llvm::endianness DataEndianness>
+class ELFLinkGraphBuilder_aarch32
+ : public ELFLinkGraphBuilder<ELFType<DataEndianness, false>> {
+private:
+ using ELFT = ELFType<DataEndianness, false>;
+ using Base = ELFLinkGraphBuilder<ELFT>;
+
+ Error addRelocations() override {
+ LLVM_DEBUG(dbgs() << "Processing relocations:\n");
+ using Self = ELFLinkGraphBuilder_aarch32<DataEndianness>;
+ for (const auto &RelSect : Base::Sections) {
+ if (Error Err = Base::forEachRelRelocation(RelSect, this,
+ &Self::addSingleRelRelocation))
+ return Err;
+ }
+ return Error::success();
+ }
+
+ Error addSingleRelRelocation(const typename ELFT::Rel &Rel,
+ const typename ELFT::Shdr &FixupSect,
+ Block &BlockToFix) {
+ uint32_t SymbolIndex = Rel.getSymbol(false);
+ auto ObjSymbol = Base::Obj.getRelocationSymbol(Rel, Base::SymTabSec);
+ if (!ObjSymbol)
+ return ObjSymbol.takeError();
+
+ Symbol *GraphSymbol = Base::getGraphSymbol(SymbolIndex);
+ if (!GraphSymbol)
+ return make_error<StringError>(
+ formatv("Could not find symbol at given index, did you add it to "
+ "JITSymbolTable? index: {0}, shndx: {1} Size of table: {2}",
+ SymbolIndex, (*ObjSymbol)->st_shndx,
+ Base::GraphSymbols.size()),
+ inconvertibleErrorCode());
+
+ uint32_t Type = Rel.getType(false);
+ Expected<aarch32::EdgeKind_aarch32> Kind = getJITLinkEdgeKind(Type, ArmCfg);
+ if (!Kind)
+ return Kind.takeError();
+
+ auto FixupAddress = orc::ExecutorAddr(FixupSect.sh_addr) + Rel.r_offset;
+ Edge::OffsetT Offset = FixupAddress - BlockToFix.getAddress();
+
+ Expected<int64_t> Addend =
+ aarch32::readAddend(*Base::G, BlockToFix, Offset, *Kind, ArmCfg);
+ if (!Addend)
+ return Addend.takeError();
+
+ Edge E(*Kind, Offset, *GraphSymbol, *Addend);
+ LLVM_DEBUG({
+ dbgs() << " ";
+ printEdge(dbgs(), BlockToFix, E, getELFAArch32EdgeKindName(*Kind));
+ dbgs() << "\n";
+ });
+
+ BlockToFix.addEdge(std::move(E));
+ return Error::success();
+ }
+
+ aarch32::ArmConfig ArmCfg;
+
+protected:
+ TargetFlagsType makeTargetFlags(const typename ELFT::Sym &Sym) override {
+ // Only emit target flag for callable symbols
+ if (Sym.getType() != ELF::STT_FUNC)
+ return TargetFlagsType{};
+ if (Sym.getValue() & 0x01)
+ return aarch32::ThumbSymbol;
+ return TargetFlagsType{};
+ }
+
+ orc::ExecutorAddrDiff getRawOffset(const typename ELFT::Sym &Sym,
+ TargetFlagsType Flags) override {
+ assert((makeTargetFlags(Sym) & Flags) == Flags);
+ static constexpr uint64_t ThumbBit = 0x01;
+ if (Sym.getType() == ELF::STT_FUNC)
+ return Sym.getValue() & ~ThumbBit;
+ return Sym.getValue();
+ }
+
+public:
+ ELFLinkGraphBuilder_aarch32(StringRef FileName,
+ const llvm::object::ELFFile<ELFT> &Obj, Triple TT,
+ SubtargetFeatures Features,
+ aarch32::ArmConfig ArmCfg)
+ : ELFLinkGraphBuilder<ELFT>(Obj, std::move(TT), std::move(Features),
+ FileName, getELFAArch32EdgeKindName),
+ ArmCfg(std::move(ArmCfg)) {}
+};
+
+template <typename StubsManagerType>
+Error buildTables_ELF_aarch32(LinkGraph &G) {
+ LLVM_DEBUG(dbgs() << "Visiting edges in graph:\n");
+
+ StubsManagerType StubsManager;
+ visitExistingEdges(G, StubsManager);
+ aarch32::GOTBuilder GOT;
+ visitExistingEdges(G, GOT);
+
+ return Error::success();
+}
+
+Expected<std::unique_ptr<LinkGraph>>
+createLinkGraphFromELFObject_aarch32(MemoryBufferRef ObjectBuffer) {
+ LLVM_DEBUG({
+ dbgs() << "Building jitlink graph for new input "
+ << ObjectBuffer.getBufferIdentifier() << "...\n";
+ });
+
+ auto ELFObj = ObjectFile::createELFObjectFile(ObjectBuffer);
+ if (!ELFObj)
+ return ELFObj.takeError();
+
+ auto Features = (*ELFObj)->getFeatures();
+ if (!Features)
+ return Features.takeError();
+
+ // Find out what exact AArch32 instruction set and features we target.
+ auto TT = (*ELFObj)->makeTriple();
+ ARM::ArchKind AK = ARM::parseArch(TT.getArchName());
+ if (AK == ARM::ArchKind::INVALID)
+ return make_error<JITLinkError>(
+ "Failed to build ELF link graph: Invalid ARM ArchKind");
+
+ // Resolve our internal configuration for the target. If at some point the
+ // CPUArch alone becomes too unprecise, we can find more details in the
+ // Tag_CPU_arch_profile.
+ auto Arch = static_cast<ARMBuildAttrs::CPUArch>(ARM::getArchAttr(AK));
+ aarch32::ArmConfig ArmCfg = aarch32::getArmConfigForCPUArch(Arch);
+
+ // Populate the link-graph.
+ switch (TT.getArch()) {
+ case Triple::arm:
+ case Triple::thumb: {
+ auto &ELFFile = cast<ELFObjectFile<ELF32LE>>(**ELFObj).getELFFile();
+ return ELFLinkGraphBuilder_aarch32<llvm::endianness::little>(
+ (*ELFObj)->getFileName(), ELFFile, TT, std::move(*Features),
+ ArmCfg)
+ .buildGraph();
+ }
+ case Triple::armeb:
+ case Triple::thumbeb: {
+ auto &ELFFile = cast<ELFObjectFile<ELF32BE>>(**ELFObj).getELFFile();
+ return ELFLinkGraphBuilder_aarch32<llvm::endianness::big>(
+ (*ELFObj)->getFileName(), ELFFile, TT, std::move(*Features),
+ ArmCfg)
+ .buildGraph();
+ }
+ default:
+ return make_error<JITLinkError>(
+ "Failed to build ELF/aarch32 link graph: Invalid target triple " +
+ TT.getTriple());
+ }
+}
+
+void link_ELF_aarch32(std::unique_ptr<LinkGraph> G,
+ std::unique_ptr<JITLinkContext> Ctx) {
+ const Triple &TT = G->getTargetTriple();
+
+ using namespace ARMBuildAttrs;
+ ARM::ArchKind AK = ARM::parseArch(TT.getArchName());
+ auto CPU = static_cast<CPUArch>(ARM::getArchAttr(AK));
+ aarch32::ArmConfig ArmCfg = aarch32::getArmConfigForCPUArch(CPU);
+
+ PassConfiguration PassCfg;
+ if (Ctx->shouldAddDefaultTargetPasses(TT)) {
+ // Add a mark-live pass.
+ if (auto MarkLive = Ctx->getMarkLivePass(TT))
+ PassCfg.PrePrunePasses.push_back(std::move(MarkLive));
+ else
+ PassCfg.PrePrunePasses.push_back(markAllSymbolsLive);
+
+ switch (ArmCfg.Stubs) {
+ case aarch32::StubsFlavor::pre_v7:
+ PassCfg.PostPrunePasses.push_back(
+ buildTables_ELF_aarch32<aarch32::StubsManager_prev7>);
+ break;
+ case aarch32::StubsFlavor::v7:
+ PassCfg.PostPrunePasses.push_back(
+ buildTables_ELF_aarch32<aarch32::StubsManager_v7>);
+ break;
+ case aarch32::StubsFlavor::Undefined:
+ llvm_unreachable("Check before building graph");
+ }
+ }
+
+ if (auto Err = Ctx->modifyPassConfig(*G, PassCfg))
+ return Ctx->notifyFailed(std::move(Err));
+
+ ELFJITLinker_aarch32::link(std::move(Ctx), std::move(G), std::move(PassCfg),
+ std::move(ArmCfg));
+}
+
+} // namespace jitlink
+} // namespace llvm
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/ELF_aarch64.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/ELF_aarch64.cpp
new file mode 100644
index 000000000000..9ce8aecb717c
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/ELF_aarch64.cpp
@@ -0,0 +1,644 @@
+//===----- ELF_aarch64.cpp - JIT linker implementation for ELF/aarch64 ----===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// ELF/aarch64 jit-link implementation.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/JITLink/ELF_aarch64.h"
+#include "llvm/BinaryFormat/ELF.h"
+#include "llvm/ExecutionEngine/JITLink/DWARFRecordSectionSplitter.h"
+#include "llvm/ExecutionEngine/JITLink/aarch64.h"
+#include "llvm/Object/ELFObjectFile.h"
+#include "llvm/Support/Endian.h"
+
+#include "DefineExternalSectionStartAndEndSymbols.h"
+#include "EHFrameSupportImpl.h"
+#include "ELFLinkGraphBuilder.h"
+#include "JITLinkGeneric.h"
+
+#define DEBUG_TYPE "jitlink"
+
+using namespace llvm;
+using namespace llvm::jitlink;
+
+namespace {
+
+class ELFJITLinker_aarch64 : public JITLinker<ELFJITLinker_aarch64> {
+ friend class JITLinker<ELFJITLinker_aarch64>;
+
+public:
+ ELFJITLinker_aarch64(std::unique_ptr<JITLinkContext> Ctx,
+ std::unique_ptr<LinkGraph> G,
+ PassConfiguration PassConfig)
+ : JITLinker(std::move(Ctx), std::move(G), std::move(PassConfig)) {}
+
+private:
+ Error applyFixup(LinkGraph &G, Block &B, const Edge &E) const {
+ return aarch64::applyFixup(G, B, E);
+ }
+};
+
+template <typename ELFT>
+class ELFLinkGraphBuilder_aarch64 : public ELFLinkGraphBuilder<ELFT> {
+private:
+ enum ELFAArch64RelocationKind : Edge::Kind {
+ ELFCall26 = Edge::FirstRelocation,
+ ELFLdrLo19,
+ ELFAdrLo21,
+ ELFAdrPage21,
+ ELFAddAbs12,
+ ELFLdSt8Abs12,
+ ELFLdSt16Abs12,
+ ELFLdSt32Abs12,
+ ELFLdSt64Abs12,
+ ELFLdSt128Abs12,
+ ELFMovwAbsG0,
+ ELFMovwAbsG1,
+ ELFMovwAbsG2,
+ ELFMovwAbsG3,
+ ELFTstBr14,
+ ELFCondBr19,
+ ELFAbs32,
+ ELFAbs64,
+ ELFPrel32,
+ ELFPrel64,
+ ELFAdrGOTPage21,
+ ELFLd64GOTLo12,
+ ELFTLSDescAdrPage21,
+ ELFTLSDescAddLo12,
+ ELFTLSDescLd64Lo12,
+ ELFTLSDescCall,
+ };
+
+ static Expected<ELFAArch64RelocationKind>
+ getRelocationKind(const uint32_t Type) {
+ using namespace aarch64;
+ switch (Type) {
+ case ELF::R_AARCH64_CALL26:
+ case ELF::R_AARCH64_JUMP26:
+ return ELFCall26;
+ case ELF::R_AARCH64_LD_PREL_LO19:
+ return ELFLdrLo19;
+ case ELF::R_AARCH64_ADR_PREL_LO21:
+ return ELFAdrLo21;
+ case ELF::R_AARCH64_ADR_PREL_PG_HI21:
+ return ELFAdrPage21;
+ case ELF::R_AARCH64_ADD_ABS_LO12_NC:
+ return ELFAddAbs12;
+ case ELF::R_AARCH64_LDST8_ABS_LO12_NC:
+ return ELFLdSt8Abs12;
+ case ELF::R_AARCH64_LDST16_ABS_LO12_NC:
+ return ELFLdSt16Abs12;
+ case ELF::R_AARCH64_LDST32_ABS_LO12_NC:
+ return ELFLdSt32Abs12;
+ case ELF::R_AARCH64_LDST64_ABS_LO12_NC:
+ return ELFLdSt64Abs12;
+ case ELF::R_AARCH64_LDST128_ABS_LO12_NC:
+ return ELFLdSt128Abs12;
+ case ELF::R_AARCH64_MOVW_UABS_G0_NC:
+ return ELFMovwAbsG0;
+ case ELF::R_AARCH64_MOVW_UABS_G1_NC:
+ return ELFMovwAbsG1;
+ case ELF::R_AARCH64_MOVW_UABS_G2_NC:
+ return ELFMovwAbsG2;
+ case ELF::R_AARCH64_MOVW_UABS_G3:
+ return ELFMovwAbsG3;
+ case ELF::R_AARCH64_TSTBR14:
+ return ELFTstBr14;
+ case ELF::R_AARCH64_CONDBR19:
+ return ELFCondBr19;
+ case ELF::R_AARCH64_ABS32:
+ return ELFAbs32;
+ case ELF::R_AARCH64_ABS64:
+ return ELFAbs64;
+ case ELF::R_AARCH64_PREL32:
+ return ELFPrel32;
+ case ELF::R_AARCH64_PREL64:
+ return ELFPrel64;
+ case ELF::R_AARCH64_ADR_GOT_PAGE:
+ return ELFAdrGOTPage21;
+ case ELF::R_AARCH64_LD64_GOT_LO12_NC:
+ return ELFLd64GOTLo12;
+ case ELF::R_AARCH64_TLSDESC_ADR_PAGE21:
+ return ELFTLSDescAdrPage21;
+ case ELF::R_AARCH64_TLSDESC_ADD_LO12:
+ return ELFTLSDescAddLo12;
+ case ELF::R_AARCH64_TLSDESC_LD64_LO12:
+ return ELFTLSDescLd64Lo12;
+ case ELF::R_AARCH64_TLSDESC_CALL:
+ return ELFTLSDescCall;
+ }
+
+ return make_error<JITLinkError>(
+ "Unsupported aarch64 relocation:" + formatv("{0:d}: ", Type) +
+ object::getELFRelocationTypeName(ELF::EM_AARCH64, Type));
+ }
+
+ Error addRelocations() override {
+ LLVM_DEBUG(dbgs() << "Processing relocations:\n");
+
+ using Base = ELFLinkGraphBuilder<ELFT>;
+ using Self = ELFLinkGraphBuilder_aarch64<ELFT>;
+ for (const auto &RelSect : Base::Sections)
+ if (Error Err = Base::forEachRelaRelocation(RelSect, this,
+ &Self::addSingleRelocation))
+ return Err;
+
+ return Error::success();
+ }
+
+ Error addSingleRelocation(const typename ELFT::Rela &Rel,
+ const typename ELFT::Shdr &FixupSect,
+ Block &BlockToFix) {
+ using support::ulittle32_t;
+ using Base = ELFLinkGraphBuilder<ELFT>;
+
+ uint32_t SymbolIndex = Rel.getSymbol(false);
+ auto ObjSymbol = Base::Obj.getRelocationSymbol(Rel, Base::SymTabSec);
+ if (!ObjSymbol)
+ return ObjSymbol.takeError();
+
+ Symbol *GraphSymbol = Base::getGraphSymbol(SymbolIndex);
+ if (!GraphSymbol)
+ return make_error<StringError>(
+ formatv("Could not find symbol at given index, did you add it to "
+ "JITSymbolTable? index: {0}, shndx: {1} Size of table: {2}",
+ SymbolIndex, (*ObjSymbol)->st_shndx,
+ Base::GraphSymbols.size()),
+ inconvertibleErrorCode());
+
+ uint32_t Type = Rel.getType(false);
+ Expected<ELFAArch64RelocationKind> RelocKind = getRelocationKind(Type);
+ if (!RelocKind)
+ return RelocKind.takeError();
+
+ int64_t Addend = Rel.r_addend;
+ orc::ExecutorAddr FixupAddress =
+ orc::ExecutorAddr(FixupSect.sh_addr) + Rel.r_offset;
+ Edge::OffsetT Offset = FixupAddress - BlockToFix.getAddress();
+
+ // Get a pointer to the fixup content.
+ const void *FixupContent = BlockToFix.getContent().data() +
+ (FixupAddress - BlockToFix.getAddress());
+
+ Edge::Kind Kind = Edge::Invalid;
+
+ switch (*RelocKind) {
+ case ELFCall26: {
+ Kind = aarch64::Branch26PCRel;
+ break;
+ }
+ case ELFLdrLo19: {
+ uint32_t Instr = *(const ulittle32_t *)FixupContent;
+ if (!aarch64::isLDRLiteral(Instr))
+ return make_error<JITLinkError>(
+ "R_AARCH64_LDR_PREL_LO19 target is not an LDR Literal instruction");
+
+ Kind = aarch64::LDRLiteral19;
+ break;
+ }
+ case ELFAdrLo21: {
+ uint32_t Instr = *(const ulittle32_t *)FixupContent;
+ if (!aarch64::isADR(Instr))
+ return make_error<JITLinkError>(
+ "R_AARCH64_ADR_PREL_LO21 target is not an ADR instruction");
+
+ Kind = aarch64::ADRLiteral21;
+ break;
+ }
+ case ELFAdrPage21: {
+ Kind = aarch64::Page21;
+ break;
+ }
+ case ELFAddAbs12: {
+ Kind = aarch64::PageOffset12;
+ break;
+ }
+ case ELFLdSt8Abs12: {
+ uint32_t Instr = *(const ulittle32_t *)FixupContent;
+ if (!aarch64::isLoadStoreImm12(Instr) ||
+ aarch64::getPageOffset12Shift(Instr) != 0)
+ return make_error<JITLinkError>(
+ "R_AARCH64_LDST8_ABS_LO12_NC target is not a "
+ "LDRB/STRB (imm12) instruction");
+
+ Kind = aarch64::PageOffset12;
+ break;
+ }
+ case ELFLdSt16Abs12: {
+ uint32_t Instr = *(const ulittle32_t *)FixupContent;
+ if (!aarch64::isLoadStoreImm12(Instr) ||
+ aarch64::getPageOffset12Shift(Instr) != 1)
+ return make_error<JITLinkError>(
+ "R_AARCH64_LDST16_ABS_LO12_NC target is not a "
+ "LDRH/STRH (imm12) instruction");
+
+ Kind = aarch64::PageOffset12;
+ break;
+ }
+ case ELFLdSt32Abs12: {
+ uint32_t Instr = *(const ulittle32_t *)FixupContent;
+ if (!aarch64::isLoadStoreImm12(Instr) ||
+ aarch64::getPageOffset12Shift(Instr) != 2)
+ return make_error<JITLinkError>(
+ "R_AARCH64_LDST32_ABS_LO12_NC target is not a "
+ "LDR/STR (imm12, 32 bit) instruction");
+
+ Kind = aarch64::PageOffset12;
+ break;
+ }
+ case ELFLdSt64Abs12: {
+ uint32_t Instr = *(const ulittle32_t *)FixupContent;
+ if (!aarch64::isLoadStoreImm12(Instr) ||
+ aarch64::getPageOffset12Shift(Instr) != 3)
+ return make_error<JITLinkError>(
+ "R_AARCH64_LDST64_ABS_LO12_NC target is not a "
+ "LDR/STR (imm12, 64 bit) instruction");
+
+ Kind = aarch64::PageOffset12;
+ break;
+ }
+ case ELFLdSt128Abs12: {
+ uint32_t Instr = *(const ulittle32_t *)FixupContent;
+ if (!aarch64::isLoadStoreImm12(Instr) ||
+ aarch64::getPageOffset12Shift(Instr) != 4)
+ return make_error<JITLinkError>(
+ "R_AARCH64_LDST128_ABS_LO12_NC target is not a "
+ "LDR/STR (imm12, 128 bit) instruction");
+
+ Kind = aarch64::PageOffset12;
+ break;
+ }
+ case ELFMovwAbsG0: {
+ uint32_t Instr = *(const ulittle32_t *)FixupContent;
+ if (!aarch64::isMoveWideImm16(Instr) ||
+ aarch64::getMoveWide16Shift(Instr) != 0)
+ return make_error<JITLinkError>(
+ "R_AARCH64_MOVW_UABS_G0_NC target is not a "
+ "MOVK/MOVZ (imm16, LSL #0) instruction");
+
+ Kind = aarch64::MoveWide16;
+ break;
+ }
+ case ELFMovwAbsG1: {
+ uint32_t Instr = *(const ulittle32_t *)FixupContent;
+ if (!aarch64::isMoveWideImm16(Instr) ||
+ aarch64::getMoveWide16Shift(Instr) != 16)
+ return make_error<JITLinkError>(
+ "R_AARCH64_MOVW_UABS_G1_NC target is not a "
+ "MOVK/MOVZ (imm16, LSL #16) instruction");
+
+ Kind = aarch64::MoveWide16;
+ break;
+ }
+ case ELFMovwAbsG2: {
+ uint32_t Instr = *(const ulittle32_t *)FixupContent;
+ if (!aarch64::isMoveWideImm16(Instr) ||
+ aarch64::getMoveWide16Shift(Instr) != 32)
+ return make_error<JITLinkError>(
+ "R_AARCH64_MOVW_UABS_G2_NC target is not a "
+ "MOVK/MOVZ (imm16, LSL #32) instruction");
+
+ Kind = aarch64::MoveWide16;
+ break;
+ }
+ case ELFMovwAbsG3: {
+ uint32_t Instr = *(const ulittle32_t *)FixupContent;
+ if (!aarch64::isMoveWideImm16(Instr) ||
+ aarch64::getMoveWide16Shift(Instr) != 48)
+ return make_error<JITLinkError>(
+ "R_AARCH64_MOVW_UABS_G3 target is not a "
+ "MOVK/MOVZ (imm16, LSL #48) instruction");
+
+ Kind = aarch64::MoveWide16;
+ break;
+ }
+ case ELFTstBr14: {
+ uint32_t Instr = *(const ulittle32_t *)FixupContent;
+ if (!aarch64::isTestAndBranchImm14(Instr))
+ return make_error<JITLinkError>("R_AARCH64_TSTBR14 target is not a "
+ "test and branch instruction");
+
+ Kind = aarch64::TestAndBranch14PCRel;
+ break;
+ }
+ case ELFCondBr19: {
+ uint32_t Instr = *(const ulittle32_t *)FixupContent;
+ if (!aarch64::isCondBranchImm19(Instr) &&
+ !aarch64::isCompAndBranchImm19(Instr))
+ return make_error<JITLinkError>("R_AARCH64_CONDBR19 target is not a "
+ "conditional branch instruction");
+
+ Kind = aarch64::CondBranch19PCRel;
+ break;
+ }
+ case ELFAbs32: {
+ Kind = aarch64::Pointer32;
+ break;
+ }
+ case ELFAbs64: {
+ Kind = aarch64::Pointer64;
+ break;
+ }
+ case ELFPrel32: {
+ Kind = aarch64::Delta32;
+ break;
+ }
+ case ELFPrel64: {
+ Kind = aarch64::Delta64;
+ break;
+ }
+ case ELFAdrGOTPage21: {
+ Kind = aarch64::RequestGOTAndTransformToPage21;
+ break;
+ }
+ case ELFLd64GOTLo12: {
+ Kind = aarch64::RequestGOTAndTransformToPageOffset12;
+ break;
+ }
+ case ELFTLSDescAdrPage21: {
+ Kind = aarch64::RequestTLSDescEntryAndTransformToPage21;
+ break;
+ }
+ case ELFTLSDescAddLo12:
+ case ELFTLSDescLd64Lo12: {
+ Kind = aarch64::RequestTLSDescEntryAndTransformToPageOffset12;
+ break;
+ }
+ case ELFTLSDescCall: {
+ return Error::success();
+ }
+ };
+
+ Edge GE(Kind, Offset, *GraphSymbol, Addend);
+ LLVM_DEBUG({
+ dbgs() << " ";
+ printEdge(dbgs(), BlockToFix, GE, aarch64::getEdgeKindName(Kind));
+ dbgs() << "\n";
+ });
+
+ BlockToFix.addEdge(std::move(GE));
+
+ return Error::success();
+ }
+
+ /// Return the string name of the given ELF aarch64 edge kind.
+ const char *getELFAArch64RelocationKindName(Edge::Kind R) {
+ switch (R) {
+ case ELFCall26:
+ return "ELFCall26";
+ case ELFAdrPage21:
+ return "ELFAdrPage21";
+ case ELFAddAbs12:
+ return "ELFAddAbs12";
+ case ELFLdSt8Abs12:
+ return "ELFLdSt8Abs12";
+ case ELFLdSt16Abs12:
+ return "ELFLdSt16Abs12";
+ case ELFLdSt32Abs12:
+ return "ELFLdSt32Abs12";
+ case ELFLdSt64Abs12:
+ return "ELFLdSt64Abs12";
+ case ELFLdSt128Abs12:
+ return "ELFLdSt128Abs12";
+ case ELFMovwAbsG0:
+ return "ELFMovwAbsG0";
+ case ELFMovwAbsG1:
+ return "ELFMovwAbsG1";
+ case ELFMovwAbsG2:
+ return "ELFMovwAbsG2";
+ case ELFMovwAbsG3:
+ return "ELFMovwAbsG3";
+ case ELFAbs32:
+ return "ELFAbs32";
+ case ELFAbs64:
+ return "ELFAbs64";
+ case ELFPrel32:
+ return "ELFPrel32";
+ case ELFPrel64:
+ return "ELFPrel64";
+ case ELFAdrGOTPage21:
+ return "ELFAdrGOTPage21";
+ case ELFLd64GOTLo12:
+ return "ELFLd64GOTLo12";
+ case ELFTLSDescAdrPage21:
+ return "ELFTLSDescAdrPage21";
+ case ELFTLSDescAddLo12:
+ return "ELFTLSDescAddLo12";
+ case ELFTLSDescLd64Lo12:
+ return "ELFTLSDescLd64Lo12";
+ case ELFTLSDescCall:
+ return "ELFTLSDescCall";
+ default:
+ return getGenericEdgeKindName(static_cast<Edge::Kind>(R));
+ }
+ }
+
+public:
+ ELFLinkGraphBuilder_aarch64(StringRef FileName,
+ const object::ELFFile<ELFT> &Obj, Triple TT,
+ SubtargetFeatures Features)
+ : ELFLinkGraphBuilder<ELFT>(Obj, std::move(TT), std::move(Features),
+ FileName, aarch64::getEdgeKindName) {}
+};
+
+// TLS Info Builder.
+class TLSInfoTableManager_ELF_aarch64
+ : public TableManager<TLSInfoTableManager_ELF_aarch64> {
+public:
+ static StringRef getSectionName() { return "$__TLSINFO"; }
+
+ static const uint8_t TLSInfoEntryContent[16];
+
+ bool visitEdge(LinkGraph &G, Block *B, Edge &E) { return false; }
+
+ Symbol &createEntry(LinkGraph &G, Symbol &Target) {
+ // the TLS Info entry's key value will be written by the fixTLVSectionByName
+ // pass, so create mutable content.
+ auto &TLSInfoEntry = G.createMutableContentBlock(
+ getTLSInfoSection(G), G.allocateContent(getTLSInfoEntryContent()),
+ orc::ExecutorAddr(), 8, 0);
+ TLSInfoEntry.addEdge(aarch64::Pointer64, 8, Target, 0);
+ return G.addAnonymousSymbol(TLSInfoEntry, 0, 16, false, false);
+ }
+
+private:
+ Section &getTLSInfoSection(LinkGraph &G) {
+ if (!TLSInfoTable)
+ TLSInfoTable = &G.createSection(getSectionName(), orc::MemProt::Read);
+ return *TLSInfoTable;
+ }
+
+ ArrayRef<char> getTLSInfoEntryContent() const {
+ return {reinterpret_cast<const char *>(TLSInfoEntryContent),
+ sizeof(TLSInfoEntryContent)};
+ }
+
+ Section *TLSInfoTable = nullptr;
+};
+
+const uint8_t TLSInfoTableManager_ELF_aarch64::TLSInfoEntryContent[16] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /*pthread key */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 /*data address*/
+};
+
+// TLS Descriptor Builder.
+class TLSDescTableManager_ELF_aarch64
+ : public TableManager<TLSDescTableManager_ELF_aarch64> {
+public:
+ TLSDescTableManager_ELF_aarch64(
+ TLSInfoTableManager_ELF_aarch64 &TLSInfoTableManager)
+ : TLSInfoTableManager(TLSInfoTableManager) {}
+
+ static StringRef getSectionName() { return "$__TLSDESC"; }
+
+ static const uint8_t TLSDescEntryContent[16];
+
+ bool visitEdge(LinkGraph &G, Block *B, Edge &E) {
+ Edge::Kind KindToSet = Edge::Invalid;
+ switch (E.getKind()) {
+ case aarch64::RequestTLSDescEntryAndTransformToPage21: {
+ KindToSet = aarch64::Page21;
+ break;
+ }
+ case aarch64::RequestTLSDescEntryAndTransformToPageOffset12: {
+ KindToSet = aarch64::PageOffset12;
+ break;
+ }
+ default:
+ return false;
+ }
+ assert(KindToSet != Edge::Invalid &&
+ "Fell through switch, but no new kind to set");
+ DEBUG_WITH_TYPE("jitlink", {
+ dbgs() << " Fixing " << G.getEdgeKindName(E.getKind()) << " edge at "
+ << B->getFixupAddress(E) << " (" << B->getAddress() << " + "
+ << formatv("{0:x}", E.getOffset()) << ")\n";
+ });
+ E.setKind(KindToSet);
+ E.setTarget(getEntryForTarget(G, E.getTarget()));
+ return true;
+ }
+
+ Symbol &createEntry(LinkGraph &G, Symbol &Target) {
+ auto &EntryBlock =
+ G.createContentBlock(getTLSDescSection(G), getTLSDescBlockContent(),
+ orc::ExecutorAddr(), 8, 0);
+ EntryBlock.addEdge(aarch64::Pointer64, 0, getTLSDescResolver(G), 0);
+ EntryBlock.addEdge(aarch64::Pointer64, 8,
+ TLSInfoTableManager.getEntryForTarget(G, Target), 0);
+ return G.addAnonymousSymbol(EntryBlock, 0, 8, false, false);
+ }
+
+private:
+ Section &getTLSDescSection(LinkGraph &G) {
+ if (!GOTSection)
+ GOTSection = &G.createSection(getSectionName(), orc::MemProt::Read);
+ return *GOTSection;
+ }
+
+ Symbol &getTLSDescResolver(LinkGraph &G) {
+ if (!TLSDescResolver)
+ TLSDescResolver = &G.addExternalSymbol("__tlsdesc_resolver", 8, false);
+ return *TLSDescResolver;
+ }
+
+ ArrayRef<char> getTLSDescBlockContent() {
+ return {reinterpret_cast<const char *>(TLSDescEntryContent),
+ sizeof(TLSDescEntryContent)};
+ }
+
+ Section *GOTSection = nullptr;
+ Symbol *TLSDescResolver = nullptr;
+ TLSInfoTableManager_ELF_aarch64 &TLSInfoTableManager;
+};
+
+const uint8_t TLSDescTableManager_ELF_aarch64::TLSDescEntryContent[16] = {
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, /*resolver function pointer*/
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00 /*pointer to tls info*/
+};
+
+Error buildTables_ELF_aarch64(LinkGraph &G) {
+ LLVM_DEBUG(dbgs() << "Visiting edges in graph:\n");
+
+ aarch64::GOTTableManager GOT;
+ aarch64::PLTTableManager PLT(GOT);
+ TLSInfoTableManager_ELF_aarch64 TLSInfo;
+ TLSDescTableManager_ELF_aarch64 TLSDesc(TLSInfo);
+ visitExistingEdges(G, GOT, PLT, TLSDesc, TLSInfo);
+ return Error::success();
+}
+
+} // namespace
+
+namespace llvm {
+namespace jitlink {
+
+Expected<std::unique_ptr<LinkGraph>>
+createLinkGraphFromELFObject_aarch64(MemoryBufferRef ObjectBuffer) {
+ LLVM_DEBUG({
+ dbgs() << "Building jitlink graph for new input "
+ << ObjectBuffer.getBufferIdentifier() << "...\n";
+ });
+
+ auto ELFObj = object::ObjectFile::createELFObjectFile(ObjectBuffer);
+ if (!ELFObj)
+ return ELFObj.takeError();
+
+ auto Features = (*ELFObj)->getFeatures();
+ if (!Features)
+ return Features.takeError();
+
+ assert((*ELFObj)->getArch() == Triple::aarch64 &&
+ "Only AArch64 (little endian) is supported for now");
+
+ auto &ELFObjFile = cast<object::ELFObjectFile<object::ELF64LE>>(**ELFObj);
+ return ELFLinkGraphBuilder_aarch64<object::ELF64LE>(
+ (*ELFObj)->getFileName(), ELFObjFile.getELFFile(),
+ (*ELFObj)->makeTriple(), std::move(*Features))
+ .buildGraph();
+}
+
+void link_ELF_aarch64(std::unique_ptr<LinkGraph> G,
+ std::unique_ptr<JITLinkContext> Ctx) {
+ PassConfiguration Config;
+ const Triple &TT = G->getTargetTriple();
+ if (Ctx->shouldAddDefaultTargetPasses(TT)) {
+ // Add eh-frame passes.
+ Config.PrePrunePasses.push_back(DWARFRecordSectionSplitter(".eh_frame"));
+ Config.PrePrunePasses.push_back(EHFrameEdgeFixer(
+ ".eh_frame", 8, aarch64::Pointer32, aarch64::Pointer64,
+ aarch64::Delta32, aarch64::Delta64, aarch64::NegDelta32));
+ Config.PrePrunePasses.push_back(EHFrameNullTerminator(".eh_frame"));
+
+ // Add a mark-live pass.
+ if (auto MarkLive = Ctx->getMarkLivePass(TT))
+ Config.PrePrunePasses.push_back(std::move(MarkLive));
+ else
+ Config.PrePrunePasses.push_back(markAllSymbolsLive);
+
+ // Resolve any external section start / end symbols.
+ Config.PostAllocationPasses.push_back(
+ createDefineExternalSectionStartAndEndSymbolsPass(
+ identifyELFSectionStartAndEndSymbols));
+
+ // Add an in-place GOT/TLS/Stubs build pass.
+ Config.PostPrunePasses.push_back(buildTables_ELF_aarch64);
+ }
+
+ if (auto Err = Ctx->modifyPassConfig(*G, Config))
+ return Ctx->notifyFailed(std::move(Err));
+
+ ELFJITLinker_aarch64::link(std::move(Ctx), std::move(G), std::move(Config));
+}
+
+} // namespace jitlink
+} // namespace llvm
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/ELF_i386.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/ELF_i386.cpp
new file mode 100644
index 000000000000..860165365a7e
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/ELF_i386.cpp
@@ -0,0 +1,265 @@
+//===----- ELF_i386.cpp - JIT linker implementation for ELF/i386 ----===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// ELF/i386 jit-link implementation.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/JITLink/ELF_i386.h"
+#include "DefineExternalSectionStartAndEndSymbols.h"
+#include "ELFLinkGraphBuilder.h"
+#include "JITLinkGeneric.h"
+#include "llvm/BinaryFormat/ELF.h"
+#include "llvm/ExecutionEngine/JITLink/i386.h"
+#include "llvm/Object/ELFObjectFile.h"
+
+#define DEBUG_TYPE "jitlink"
+
+using namespace llvm;
+using namespace llvm::jitlink;
+
+namespace {
+constexpr StringRef ELFGOTSymbolName = "_GLOBAL_OFFSET_TABLE_";
+
+Error buildTables_ELF_i386(LinkGraph &G) {
+ LLVM_DEBUG(dbgs() << "Visiting edges in graph:\n");
+
+ i386::GOTTableManager GOT;
+ i386::PLTTableManager PLT(GOT);
+ visitExistingEdges(G, GOT, PLT);
+ return Error::success();
+}
+} // namespace
+
+namespace llvm::jitlink {
+
+class ELFJITLinker_i386 : public JITLinker<ELFJITLinker_i386> {
+ friend class JITLinker<ELFJITLinker_i386>;
+
+public:
+ ELFJITLinker_i386(std::unique_ptr<JITLinkContext> Ctx,
+ std::unique_ptr<LinkGraph> G, PassConfiguration PassConfig)
+ : JITLinker(std::move(Ctx), std::move(G), std::move(PassConfig)) {
+ getPassConfig().PostAllocationPasses.push_back(
+ [this](LinkGraph &G) { return getOrCreateGOTSymbol(G); });
+ }
+
+private:
+ Symbol *GOTSymbol = nullptr;
+
+ Error getOrCreateGOTSymbol(LinkGraph &G) {
+ auto DefineExternalGOTSymbolIfPresent =
+ createDefineExternalSectionStartAndEndSymbolsPass(
+ [&](LinkGraph &LG, Symbol &Sym) -> SectionRangeSymbolDesc {
+ if (Sym.getName() == ELFGOTSymbolName)
+ if (auto *GOTSection = G.findSectionByName(
+ i386::GOTTableManager::getSectionName())) {
+ GOTSymbol = &Sym;
+ return {*GOTSection, true};
+ }
+ return {};
+ });
+
+ // Try to attach _GLOBAL_OFFSET_TABLE_ to the GOT if it's defined as an
+ // external.
+ if (auto Err = DefineExternalGOTSymbolIfPresent(G))
+ return Err;
+
+ // If we succeeded then we're done.
+ if (GOTSymbol)
+ return Error::success();
+
+ // Otherwise look for a GOT section: If it already has a start symbol we'll
+ // record it, otherwise we'll create our own.
+ // If there's a GOT section but we didn't find an external GOT symbol...
+ if (auto *GOTSection =
+ G.findSectionByName(i386::GOTTableManager::getSectionName())) {
+
+ // Check for an existing defined symbol.
+ for (auto *Sym : GOTSection->symbols())
+ if (Sym->getName() == ELFGOTSymbolName) {
+ GOTSymbol = Sym;
+ return Error::success();
+ }
+
+ // If there's no defined symbol then create one.
+ SectionRange SR(*GOTSection);
+
+ if (SR.empty()) {
+ GOTSymbol =
+ &G.addAbsoluteSymbol(ELFGOTSymbolName, orc::ExecutorAddr(), 0,
+ Linkage::Strong, Scope::Local, true);
+ } else {
+ GOTSymbol =
+ &G.addDefinedSymbol(*SR.getFirstBlock(), 0, ELFGOTSymbolName, 0,
+ Linkage::Strong, Scope::Local, false, true);
+ }
+ }
+
+ return Error::success();
+ }
+
+ Error applyFixup(LinkGraph &G, Block &B, const Edge &E) const {
+ return i386::applyFixup(G, B, E, GOTSymbol);
+ }
+};
+
+template <typename ELFT>
+class ELFLinkGraphBuilder_i386 : public ELFLinkGraphBuilder<ELFT> {
+private:
+ static Expected<i386::EdgeKind_i386> getRelocationKind(const uint32_t Type) {
+ using namespace i386;
+ switch (Type) {
+ case ELF::R_386_NONE:
+ return EdgeKind_i386::None;
+ case ELF::R_386_32:
+ return EdgeKind_i386::Pointer32;
+ case ELF::R_386_PC32:
+ return EdgeKind_i386::PCRel32;
+ case ELF::R_386_16:
+ return EdgeKind_i386::Pointer16;
+ case ELF::R_386_PC16:
+ return EdgeKind_i386::PCRel16;
+ case ELF::R_386_GOT32:
+ return EdgeKind_i386::RequestGOTAndTransformToDelta32FromGOT;
+ case ELF::R_386_GOTPC:
+ return EdgeKind_i386::Delta32;
+ case ELF::R_386_GOTOFF:
+ return EdgeKind_i386::Delta32FromGOT;
+ case ELF::R_386_PLT32:
+ return EdgeKind_i386::BranchPCRel32;
+ }
+
+ return make_error<JITLinkError>("Unsupported i386 relocation:" +
+ formatv("{0:d}", Type));
+ }
+
+ Error addRelocations() override {
+ LLVM_DEBUG(dbgs() << "Adding relocations\n");
+ using Base = ELFLinkGraphBuilder<ELFT>;
+ using Self = ELFLinkGraphBuilder_i386;
+
+ for (const auto &RelSect : Base::Sections) {
+ // Validate the section to read relocation entries from.
+ if (RelSect.sh_type == ELF::SHT_RELA)
+ return make_error<StringError>(
+ "No SHT_RELA in valid i386 ELF object files",
+ inconvertibleErrorCode());
+
+ if (Error Err = Base::forEachRelRelocation(RelSect, this,
+ &Self::addSingleRelocation))
+ return Err;
+ }
+
+ return Error::success();
+ }
+
+ Error addSingleRelocation(const typename ELFT::Rel &Rel,
+ const typename ELFT::Shdr &FixupSection,
+ Block &BlockToFix) {
+ using Base = ELFLinkGraphBuilder<ELFT>;
+
+ uint32_t SymbolIndex = Rel.getSymbol(false);
+ auto ObjSymbol = Base::Obj.getRelocationSymbol(Rel, Base::SymTabSec);
+ if (!ObjSymbol)
+ return ObjSymbol.takeError();
+
+ Symbol *GraphSymbol = Base::getGraphSymbol(SymbolIndex);
+ if (!GraphSymbol)
+ return make_error<StringError>(
+ formatv("Could not find symbol at given index, did you add it to "
+ "JITSymbolTable? index: {0}, shndx: {1} Size of table: {2}",
+ SymbolIndex, (*ObjSymbol)->st_shndx,
+ Base::GraphSymbols.size()),
+ inconvertibleErrorCode());
+
+ Expected<i386::EdgeKind_i386> Kind = getRelocationKind(Rel.getType(false));
+ if (!Kind)
+ return Kind.takeError();
+
+ auto FixupAddress = orc::ExecutorAddr(FixupSection.sh_addr) + Rel.r_offset;
+ int64_t Addend = 0;
+
+ switch (*Kind) {
+ case i386::EdgeKind_i386::Delta32: {
+ const char *FixupContent = BlockToFix.getContent().data() +
+ (FixupAddress - BlockToFix.getAddress());
+ Addend = *(const support::ulittle32_t *)FixupContent;
+ break;
+ }
+ default:
+ break;
+ }
+
+ Edge::OffsetT Offset = FixupAddress - BlockToFix.getAddress();
+ Edge GE(*Kind, Offset, *GraphSymbol, Addend);
+ LLVM_DEBUG({
+ dbgs() << " ";
+ printEdge(dbgs(), BlockToFix, GE, i386::getEdgeKindName(*Kind));
+ dbgs() << "\n";
+ });
+
+ BlockToFix.addEdge(std::move(GE));
+ return Error::success();
+ }
+
+public:
+ ELFLinkGraphBuilder_i386(StringRef FileName, const object::ELFFile<ELFT> &Obj,
+ Triple TT, SubtargetFeatures Features)
+ : ELFLinkGraphBuilder<ELFT>(Obj, std::move(TT), std::move(Features),
+ FileName, i386::getEdgeKindName) {}
+};
+
+Expected<std::unique_ptr<LinkGraph>>
+createLinkGraphFromELFObject_i386(MemoryBufferRef ObjectBuffer) {
+ LLVM_DEBUG({
+ dbgs() << "Building jitlink graph for new input "
+ << ObjectBuffer.getBufferIdentifier() << "...\n";
+ });
+
+ auto ELFObj = object::ObjectFile::createELFObjectFile(ObjectBuffer);
+ if (!ELFObj)
+ return ELFObj.takeError();
+
+ auto Features = (*ELFObj)->getFeatures();
+ if (!Features)
+ return Features.takeError();
+
+ assert((*ELFObj)->getArch() == Triple::x86 &&
+ "Only i386 (little endian) is supported for now");
+
+ auto &ELFObjFile = cast<object::ELFObjectFile<object::ELF32LE>>(**ELFObj);
+ return ELFLinkGraphBuilder_i386<object::ELF32LE>(
+ (*ELFObj)->getFileName(), ELFObjFile.getELFFile(),
+ (*ELFObj)->makeTriple(), std::move(*Features))
+ .buildGraph();
+}
+
+void link_ELF_i386(std::unique_ptr<LinkGraph> G,
+ std::unique_ptr<JITLinkContext> Ctx) {
+ PassConfiguration Config;
+ const Triple &TT = G->getTargetTriple();
+ if (Ctx->shouldAddDefaultTargetPasses(TT)) {
+ if (auto MarkLive = Ctx->getMarkLivePass(TT))
+ Config.PrePrunePasses.push_back(std::move(MarkLive));
+ else
+ Config.PrePrunePasses.push_back(markAllSymbolsLive);
+
+ // Add an in-place GOT and PLT build pass.
+ Config.PostPrunePasses.push_back(buildTables_ELF_i386);
+
+ // Add GOT/Stubs optimizer pass.
+ Config.PreFixupPasses.push_back(i386::optimizeGOTAndStubAccesses);
+ }
+ if (auto Err = Ctx->modifyPassConfig(*G, Config))
+ return Ctx->notifyFailed(std::move(Err));
+
+ ELFJITLinker_i386::link(std::move(Ctx), std::move(G), std::move(Config));
+}
+
+} // namespace llvm::jitlink
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/ELF_loongarch.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/ELF_loongarch.cpp
new file mode 100644
index 000000000000..aa9385fcb183
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/ELF_loongarch.cpp
@@ -0,0 +1,213 @@
+//===--- ELF_loongarch.cpp - JIT linker implementation for ELF/loongarch --===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// ELF/loongarch jit-link implementation.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/JITLink/ELF_loongarch.h"
+#include "llvm/BinaryFormat/ELF.h"
+#include "llvm/ExecutionEngine/JITLink/DWARFRecordSectionSplitter.h"
+#include "llvm/ExecutionEngine/JITLink/JITLink.h"
+#include "llvm/ExecutionEngine/JITLink/loongarch.h"
+#include "llvm/Object/ELF.h"
+#include "llvm/Object/ELFObjectFile.h"
+
+#include "EHFrameSupportImpl.h"
+#include "ELFLinkGraphBuilder.h"
+#include "JITLinkGeneric.h"
+
+#define DEBUG_TYPE "jitlink"
+
+using namespace llvm;
+using namespace llvm::jitlink;
+using namespace llvm::jitlink::loongarch;
+
+namespace {
+
+class ELFJITLinker_loongarch : public JITLinker<ELFJITLinker_loongarch> {
+ friend class JITLinker<ELFJITLinker_loongarch>;
+
+public:
+ ELFJITLinker_loongarch(std::unique_ptr<JITLinkContext> Ctx,
+ std::unique_ptr<LinkGraph> G,
+ PassConfiguration PassConfig)
+ : JITLinker(std::move(Ctx), std::move(G), std::move(PassConfig)) {}
+
+private:
+ Error applyFixup(LinkGraph &G, Block &B, const Edge &E) const {
+ return loongarch::applyFixup(G, B, E);
+ }
+};
+
+template <typename ELFT>
+class ELFLinkGraphBuilder_loongarch : public ELFLinkGraphBuilder<ELFT> {
+private:
+ static Expected<loongarch::EdgeKind_loongarch>
+ getRelocationKind(const uint32_t Type) {
+ using namespace loongarch;
+ switch (Type) {
+ case ELF::R_LARCH_64:
+ return Pointer64;
+ case ELF::R_LARCH_32:
+ return Pointer32;
+ case ELF::R_LARCH_32_PCREL:
+ return Delta32;
+ case ELF::R_LARCH_B26:
+ return Branch26PCRel;
+ case ELF::R_LARCH_PCALA_HI20:
+ return Page20;
+ case ELF::R_LARCH_PCALA_LO12:
+ return PageOffset12;
+ case ELF::R_LARCH_GOT_PC_HI20:
+ return RequestGOTAndTransformToPage20;
+ case ELF::R_LARCH_GOT_PC_LO12:
+ return RequestGOTAndTransformToPageOffset12;
+ }
+
+ return make_error<JITLinkError>(
+ "Unsupported loongarch relocation:" + formatv("{0:d}: ", Type) +
+ object::getELFRelocationTypeName(ELF::EM_LOONGARCH, Type));
+ }
+
+ Error addRelocations() override {
+ LLVM_DEBUG(dbgs() << "Processing relocations:\n");
+
+ using Base = ELFLinkGraphBuilder<ELFT>;
+ using Self = ELFLinkGraphBuilder_loongarch<ELFT>;
+ for (const auto &RelSect : Base::Sections)
+ if (Error Err = Base::forEachRelaRelocation(RelSect, this,
+ &Self::addSingleRelocation))
+ return Err;
+
+ return Error::success();
+ }
+
+ Error addSingleRelocation(const typename ELFT::Rela &Rel,
+ const typename ELFT::Shdr &FixupSect,
+ Block &BlockToFix) {
+ using Base = ELFLinkGraphBuilder<ELFT>;
+
+ uint32_t SymbolIndex = Rel.getSymbol(false);
+ auto ObjSymbol = Base::Obj.getRelocationSymbol(Rel, Base::SymTabSec);
+ if (!ObjSymbol)
+ return ObjSymbol.takeError();
+
+ Symbol *GraphSymbol = Base::getGraphSymbol(SymbolIndex);
+ if (!GraphSymbol)
+ return make_error<StringError>(
+ formatv("Could not find symbol at given index, did you add it to "
+ "JITSymbolTable? index: {0}, shndx: {1} Size of table: {2}",
+ SymbolIndex, (*ObjSymbol)->st_shndx,
+ Base::GraphSymbols.size()),
+ inconvertibleErrorCode());
+
+ uint32_t Type = Rel.getType(false);
+ Expected<loongarch::EdgeKind_loongarch> Kind = getRelocationKind(Type);
+ if (!Kind)
+ return Kind.takeError();
+
+ int64_t Addend = Rel.r_addend;
+ auto FixupAddress = orc::ExecutorAddr(FixupSect.sh_addr) + Rel.r_offset;
+ Edge::OffsetT Offset = FixupAddress - BlockToFix.getAddress();
+ Edge GE(*Kind, Offset, *GraphSymbol, Addend);
+ LLVM_DEBUG({
+ dbgs() << " ";
+ printEdge(dbgs(), BlockToFix, GE, loongarch::getEdgeKindName(*Kind));
+ dbgs() << "\n";
+ });
+
+ BlockToFix.addEdge(std::move(GE));
+
+ return Error::success();
+ }
+
+public:
+ ELFLinkGraphBuilder_loongarch(StringRef FileName,
+ const object::ELFFile<ELFT> &Obj, Triple TT,
+ SubtargetFeatures Features)
+ : ELFLinkGraphBuilder<ELFT>(Obj, std::move(TT), std::move(Features),
+ FileName, loongarch::getEdgeKindName) {}
+};
+
+Error buildTables_ELF_loongarch(LinkGraph &G) {
+ LLVM_DEBUG(dbgs() << "Visiting edges in graph:\n");
+
+ GOTTableManager GOT;
+ PLTTableManager PLT(GOT);
+ visitExistingEdges(G, GOT, PLT);
+ return Error::success();
+}
+
+} // namespace
+
+namespace llvm {
+namespace jitlink {
+
+Expected<std::unique_ptr<LinkGraph>>
+createLinkGraphFromELFObject_loongarch(MemoryBufferRef ObjectBuffer) {
+ LLVM_DEBUG({
+ dbgs() << "Building jitlink graph for new input "
+ << ObjectBuffer.getBufferIdentifier() << "...\n";
+ });
+
+ auto ELFObj = object::ObjectFile::createELFObjectFile(ObjectBuffer);
+ if (!ELFObj)
+ return ELFObj.takeError();
+
+ auto Features = (*ELFObj)->getFeatures();
+ if (!Features)
+ return Features.takeError();
+
+ if ((*ELFObj)->getArch() == Triple::loongarch64) {
+ auto &ELFObjFile = cast<object::ELFObjectFile<object::ELF64LE>>(**ELFObj);
+ return ELFLinkGraphBuilder_loongarch<object::ELF64LE>(
+ (*ELFObj)->getFileName(), ELFObjFile.getELFFile(),
+ (*ELFObj)->makeTriple(), std::move(*Features))
+ .buildGraph();
+ }
+
+ assert((*ELFObj)->getArch() == Triple::loongarch32 &&
+ "Invalid triple for LoongArch ELF object file");
+ auto &ELFObjFile = cast<object::ELFObjectFile<object::ELF32LE>>(**ELFObj);
+ return ELFLinkGraphBuilder_loongarch<object::ELF32LE>(
+ (*ELFObj)->getFileName(), ELFObjFile.getELFFile(),
+ (*ELFObj)->makeTriple(), std::move(*Features))
+ .buildGraph();
+}
+
+void link_ELF_loongarch(std::unique_ptr<LinkGraph> G,
+ std::unique_ptr<JITLinkContext> Ctx) {
+ PassConfiguration Config;
+ const Triple &TT = G->getTargetTriple();
+ if (Ctx->shouldAddDefaultTargetPasses(TT)) {
+ // Add eh-frame passes.
+ Config.PrePrunePasses.push_back(DWARFRecordSectionSplitter(".eh_frame"));
+ Config.PrePrunePasses.push_back(
+ EHFrameEdgeFixer(".eh_frame", G->getPointerSize(), Pointer32, Pointer64,
+ Delta32, Delta64, NegDelta32));
+ Config.PrePrunePasses.push_back(EHFrameNullTerminator(".eh_frame"));
+
+ // Add a mark-live pass.
+ if (auto MarkLive = Ctx->getMarkLivePass(TT))
+ Config.PrePrunePasses.push_back(std::move(MarkLive));
+ else
+ Config.PrePrunePasses.push_back(markAllSymbolsLive);
+
+ // Add an in-place GOT/PLTStubs build pass.
+ Config.PostPrunePasses.push_back(buildTables_ELF_loongarch);
+ }
+
+ if (auto Err = Ctx->modifyPassConfig(*G, Config))
+ return Ctx->notifyFailed(std::move(Err));
+
+ ELFJITLinker_loongarch::link(std::move(Ctx), std::move(G), std::move(Config));
+}
+
+} // namespace jitlink
+} // namespace llvm
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/ELF_ppc64.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/ELF_ppc64.cpp
new file mode 100644
index 000000000000..3b86250b60a4
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/ELF_ppc64.cpp
@@ -0,0 +1,543 @@
+//===------- ELF_ppc64.cpp -JIT linker implementation for ELF/ppc64 -------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// ELF/ppc64 jit-link implementation.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/JITLink/ELF_ppc64.h"
+#include "llvm/ExecutionEngine/JITLink/DWARFRecordSectionSplitter.h"
+#include "llvm/ExecutionEngine/JITLink/TableManager.h"
+#include "llvm/ExecutionEngine/JITLink/ppc64.h"
+#include "llvm/Object/ELFObjectFile.h"
+
+#include "EHFrameSupportImpl.h"
+#include "ELFLinkGraphBuilder.h"
+#include "JITLinkGeneric.h"
+
+#define DEBUG_TYPE "jitlink"
+
+namespace {
+
+using namespace llvm;
+using namespace llvm::jitlink;
+
+constexpr StringRef ELFTOCSymbolName = ".TOC.";
+constexpr StringRef TOCSymbolAliasIdent = "__TOC__";
+constexpr uint64_t ELFTOCBaseOffset = 0x8000;
+constexpr StringRef ELFTLSInfoSectionName = "$__TLSINFO";
+
+template <llvm::endianness Endianness>
+class TLSInfoTableManager_ELF_ppc64
+ : public TableManager<TLSInfoTableManager_ELF_ppc64<Endianness>> {
+public:
+ static const uint8_t TLSInfoEntryContent[16];
+
+ static StringRef getSectionName() { return ELFTLSInfoSectionName; }
+
+ bool visitEdge(LinkGraph &G, Block *B, Edge &E) {
+ Edge::Kind K = E.getKind();
+ switch (K) {
+ case ppc64::RequestTLSDescInGOTAndTransformToTOCDelta16HA:
+ E.setKind(ppc64::TOCDelta16HA);
+ E.setTarget(this->getEntryForTarget(G, E.getTarget()));
+ return true;
+ case ppc64::RequestTLSDescInGOTAndTransformToTOCDelta16LO:
+ E.setKind(ppc64::TOCDelta16LO);
+ E.setTarget(this->getEntryForTarget(G, E.getTarget()));
+ return true;
+ case ppc64::RequestTLSDescInGOTAndTransformToDelta34:
+ E.setKind(ppc64::Delta34);
+ E.setTarget(this->getEntryForTarget(G, E.getTarget()));
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ Symbol &createEntry(LinkGraph &G, Symbol &Target) {
+ // The TLS Info entry's key value will be written by
+ // `fixTLVSectionsAndEdges`, so create mutable content.
+ auto &TLSInfoEntry = G.createMutableContentBlock(
+ getTLSInfoSection(G), G.allocateContent(getTLSInfoEntryContent()),
+ orc::ExecutorAddr(), 8, 0);
+ TLSInfoEntry.addEdge(ppc64::Pointer64, 8, Target, 0);
+ return G.addAnonymousSymbol(TLSInfoEntry, 0, 16, false, false);
+ }
+
+private:
+ Section &getTLSInfoSection(LinkGraph &G) {
+ if (!TLSInfoTable)
+ TLSInfoTable =
+ &G.createSection(ELFTLSInfoSectionName, orc::MemProt::Read);
+ return *TLSInfoTable;
+ }
+
+ ArrayRef<char> getTLSInfoEntryContent() const {
+ return {reinterpret_cast<const char *>(TLSInfoEntryContent),
+ sizeof(TLSInfoEntryContent)};
+ }
+
+ Section *TLSInfoTable = nullptr;
+};
+
+template <>
+const uint8_t TLSInfoTableManager_ELF_ppc64<
+ llvm::endianness::little>::TLSInfoEntryContent[16] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /*pthread key */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 /*data address*/
+};
+
+template <>
+const uint8_t TLSInfoTableManager_ELF_ppc64<
+ llvm::endianness::big>::TLSInfoEntryContent[16] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /*pthread key */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 /*data address*/
+};
+
+template <llvm::endianness Endianness>
+Symbol &createELFGOTHeader(LinkGraph &G,
+ ppc64::TOCTableManager<Endianness> &TOC) {
+ Symbol *TOCSymbol = nullptr;
+
+ for (Symbol *Sym : G.defined_symbols())
+ if (LLVM_UNLIKELY(Sym->getName() == ELFTOCSymbolName)) {
+ TOCSymbol = Sym;
+ break;
+ }
+
+ if (LLVM_LIKELY(TOCSymbol == nullptr)) {
+ for (Symbol *Sym : G.external_symbols())
+ if (Sym->getName() == ELFTOCSymbolName) {
+ TOCSymbol = Sym;
+ break;
+ }
+ }
+
+ if (!TOCSymbol)
+ TOCSymbol = &G.addExternalSymbol(ELFTOCSymbolName, 0, false);
+
+ return TOC.getEntryForTarget(G, *TOCSymbol);
+}
+
+// Register preexisting GOT entries with TOC table manager.
+template <llvm::endianness Endianness>
+inline void
+registerExistingGOTEntries(LinkGraph &G,
+ ppc64::TOCTableManager<Endianness> &TOC) {
+ auto isGOTEntry = [](const Edge &E) {
+ return E.getKind() == ppc64::Pointer64 && E.getTarget().isExternal();
+ };
+ if (Section *dotTOCSection = G.findSectionByName(".toc")) {
+ for (Block *B : dotTOCSection->blocks())
+ for (Edge &E : B->edges())
+ if (isGOTEntry(E))
+ TOC.registerPreExistingEntry(E.getTarget(),
+ G.addAnonymousSymbol(*B, E.getOffset(),
+ G.getPointerSize(),
+ false, false));
+ }
+}
+
+template <llvm::endianness Endianness>
+Error buildTables_ELF_ppc64(LinkGraph &G) {
+ LLVM_DEBUG(dbgs() << "Visiting edges in graph:\n");
+ ppc64::TOCTableManager<Endianness> TOC;
+ // Before visiting edges, we create a header containing the address of TOC
+ // base as ELFABIv2 suggests:
+ // > The GOT consists of an 8-byte header that contains the TOC base (the
+ // first TOC base when multiple TOCs are present), followed by an array of
+ // 8-byte addresses.
+ createELFGOTHeader(G, TOC);
+
+ // There might be compiler-generated GOT entries in ELF relocatable file.
+ registerExistingGOTEntries(G, TOC);
+
+ ppc64::PLTTableManager<Endianness> PLT(TOC);
+ TLSInfoTableManager_ELF_ppc64<Endianness> TLSInfo;
+ visitExistingEdges(G, TOC, PLT, TLSInfo);
+
+ // After visiting edges in LinkGraph, we have GOT entries built in the
+ // synthesized section.
+ // Merge sections included in TOC into synthesized TOC section,
+ // thus TOC is compact and reducing chances of relocation
+ // overflow.
+ if (Section *TOCSection = G.findSectionByName(TOC.getSectionName())) {
+ // .got and .plt are not normally present in a relocatable object file
+ // because they are linker generated.
+ if (Section *gotSection = G.findSectionByName(".got"))
+ G.mergeSections(*TOCSection, *gotSection);
+ if (Section *tocSection = G.findSectionByName(".toc"))
+ G.mergeSections(*TOCSection, *tocSection);
+ if (Section *sdataSection = G.findSectionByName(".sdata"))
+ G.mergeSections(*TOCSection, *sdataSection);
+ if (Section *sbssSection = G.findSectionByName(".sbss"))
+ G.mergeSections(*TOCSection, *sbssSection);
+ // .tocbss no longer appears in ELFABIv2. Leave it here to be compatible
+ // with rtdyld.
+ if (Section *tocbssSection = G.findSectionByName(".tocbss"))
+ G.mergeSections(*TOCSection, *tocbssSection);
+ if (Section *pltSection = G.findSectionByName(".plt"))
+ G.mergeSections(*TOCSection, *pltSection);
+ }
+
+ return Error::success();
+}
+
+} // namespace
+
+namespace llvm::jitlink {
+
+template <llvm::endianness Endianness>
+class ELFLinkGraphBuilder_ppc64
+ : public ELFLinkGraphBuilder<object::ELFType<Endianness, true>> {
+private:
+ using ELFT = object::ELFType<Endianness, true>;
+ using Base = ELFLinkGraphBuilder<ELFT>;
+
+ using Base::G; // Use LinkGraph pointer from base class.
+
+ Error addRelocations() override {
+ LLVM_DEBUG(dbgs() << "Processing relocations:\n");
+
+ using Self = ELFLinkGraphBuilder_ppc64<Endianness>;
+ for (const auto &RelSect : Base::Sections) {
+ // Validate the section to read relocation entries from.
+ if (RelSect.sh_type == ELF::SHT_REL)
+ return make_error<StringError>("No SHT_REL in valid " +
+ G->getTargetTriple().getArchName() +
+ " ELF object files",
+ inconvertibleErrorCode());
+
+ if (Error Err = Base::forEachRelaRelocation(RelSect, this,
+ &Self::addSingleRelocation))
+ return Err;
+ }
+
+ return Error::success();
+ }
+
+ Error addSingleRelocation(const typename ELFT::Rela &Rel,
+ const typename ELFT::Shdr &FixupSection,
+ Block &BlockToFix) {
+ using Base = ELFLinkGraphBuilder<ELFT>;
+ auto ELFReloc = Rel.getType(false);
+
+ // R_PPC64_NONE is a no-op.
+ if (LLVM_UNLIKELY(ELFReloc == ELF::R_PPC64_NONE))
+ return Error::success();
+
+ // TLS model markers. We only support global-dynamic model now.
+ if (ELFReloc == ELF::R_PPC64_TLSGD)
+ return Error::success();
+ if (ELFReloc == ELF::R_PPC64_TLSLD)
+ return make_error<StringError>("Local-dynamic TLS model is not supported",
+ inconvertibleErrorCode());
+
+ if (ELFReloc == ELF::R_PPC64_PCREL_OPT)
+ // TODO: Support PCREL optimization, now ignore it.
+ return Error::success();
+
+ if (ELFReloc == ELF::R_PPC64_TPREL34)
+ return make_error<StringError>("Local-exec TLS model is not supported",
+ inconvertibleErrorCode());
+
+ auto ObjSymbol = Base::Obj.getRelocationSymbol(Rel, Base::SymTabSec);
+ if (!ObjSymbol)
+ return ObjSymbol.takeError();
+
+ uint32_t SymbolIndex = Rel.getSymbol(false);
+ Symbol *GraphSymbol = Base::getGraphSymbol(SymbolIndex);
+ if (!GraphSymbol)
+ return make_error<StringError>(
+ formatv("Could not find symbol at given index, did you add it to "
+ "JITSymbolTable? index: {0}, shndx: {1} Size of table: {2}",
+ SymbolIndex, (*ObjSymbol)->st_shndx,
+ Base::GraphSymbols.size()),
+ inconvertibleErrorCode());
+
+ int64_t Addend = Rel.r_addend;
+ orc::ExecutorAddr FixupAddress =
+ orc::ExecutorAddr(FixupSection.sh_addr) + Rel.r_offset;
+ Edge::OffsetT Offset = FixupAddress - BlockToFix.getAddress();
+ Edge::Kind Kind = Edge::Invalid;
+
+ switch (ELFReloc) {
+ default:
+ return make_error<JITLinkError>(
+ "In " + G->getName() + ": Unsupported ppc64 relocation type " +
+ object::getELFRelocationTypeName(ELF::EM_PPC64, ELFReloc));
+ case ELF::R_PPC64_ADDR64:
+ Kind = ppc64::Pointer64;
+ break;
+ case ELF::R_PPC64_ADDR32:
+ Kind = ppc64::Pointer32;
+ break;
+ case ELF::R_PPC64_ADDR16:
+ Kind = ppc64::Pointer16;
+ break;
+ case ELF::R_PPC64_ADDR16_DS:
+ Kind = ppc64::Pointer16DS;
+ break;
+ case ELF::R_PPC64_ADDR16_HA:
+ Kind = ppc64::Pointer16HA;
+ break;
+ case ELF::R_PPC64_ADDR16_HI:
+ Kind = ppc64::Pointer16HI;
+ break;
+ case ELF::R_PPC64_ADDR16_HIGH:
+ Kind = ppc64::Pointer16HIGH;
+ break;
+ case ELF::R_PPC64_ADDR16_HIGHA:
+ Kind = ppc64::Pointer16HIGHA;
+ break;
+ case ELF::R_PPC64_ADDR16_HIGHER:
+ Kind = ppc64::Pointer16HIGHER;
+ break;
+ case ELF::R_PPC64_ADDR16_HIGHERA:
+ Kind = ppc64::Pointer16HIGHERA;
+ break;
+ case ELF::R_PPC64_ADDR16_HIGHEST:
+ Kind = ppc64::Pointer16HIGHEST;
+ break;
+ case ELF::R_PPC64_ADDR16_HIGHESTA:
+ Kind = ppc64::Pointer16HIGHESTA;
+ break;
+ case ELF::R_PPC64_ADDR16_LO:
+ Kind = ppc64::Pointer16LO;
+ break;
+ case ELF::R_PPC64_ADDR16_LO_DS:
+ Kind = ppc64::Pointer16LODS;
+ break;
+ case ELF::R_PPC64_ADDR14:
+ Kind = ppc64::Pointer14;
+ break;
+ case ELF::R_PPC64_TOC:
+ Kind = ppc64::TOC;
+ break;
+ case ELF::R_PPC64_TOC16:
+ Kind = ppc64::TOCDelta16;
+ break;
+ case ELF::R_PPC64_TOC16_HA:
+ Kind = ppc64::TOCDelta16HA;
+ break;
+ case ELF::R_PPC64_TOC16_HI:
+ Kind = ppc64::TOCDelta16HI;
+ break;
+ case ELF::R_PPC64_TOC16_DS:
+ Kind = ppc64::TOCDelta16DS;
+ break;
+ case ELF::R_PPC64_TOC16_LO:
+ Kind = ppc64::TOCDelta16LO;
+ break;
+ case ELF::R_PPC64_TOC16_LO_DS:
+ Kind = ppc64::TOCDelta16LODS;
+ break;
+ case ELF::R_PPC64_REL16:
+ Kind = ppc64::Delta16;
+ break;
+ case ELF::R_PPC64_REL16_HA:
+ Kind = ppc64::Delta16HA;
+ break;
+ case ELF::R_PPC64_REL16_HI:
+ Kind = ppc64::Delta16HI;
+ break;
+ case ELF::R_PPC64_REL16_LO:
+ Kind = ppc64::Delta16LO;
+ break;
+ case ELF::R_PPC64_REL32:
+ Kind = ppc64::Delta32;
+ break;
+ case ELF::R_PPC64_REL24_NOTOC:
+ Kind = ppc64::RequestCallNoTOC;
+ break;
+ case ELF::R_PPC64_REL24:
+ Kind = ppc64::RequestCall;
+ // Determining a target is external or not is deferred in PostPrunePass.
+ // We assume branching to local entry by default, since in PostPrunePass,
+ // we don't have any context to determine LocalEntryOffset. If it finally
+ // turns out to be an external call, we'll have a stub for the external
+ // target, the target of this edge will be the stub and its addend will be
+ // set 0.
+ Addend += ELF::decodePPC64LocalEntryOffset((*ObjSymbol)->st_other);
+ break;
+ case ELF::R_PPC64_REL64:
+ Kind = ppc64::Delta64;
+ break;
+ case ELF::R_PPC64_PCREL34:
+ Kind = ppc64::Delta34;
+ break;
+ case ELF::R_PPC64_GOT_PCREL34:
+ Kind = ppc64::RequestGOTAndTransformToDelta34;
+ break;
+ case ELF::R_PPC64_GOT_TLSGD16_HA:
+ Kind = ppc64::RequestTLSDescInGOTAndTransformToTOCDelta16HA;
+ break;
+ case ELF::R_PPC64_GOT_TLSGD16_LO:
+ Kind = ppc64::RequestTLSDescInGOTAndTransformToTOCDelta16LO;
+ break;
+ case ELF::R_PPC64_GOT_TLSGD_PCREL34:
+ Kind = ppc64::RequestTLSDescInGOTAndTransformToDelta34;
+ break;
+ }
+
+ Edge GE(Kind, Offset, *GraphSymbol, Addend);
+ BlockToFix.addEdge(std::move(GE));
+ return Error::success();
+ }
+
+public:
+ ELFLinkGraphBuilder_ppc64(StringRef FileName,
+ const object::ELFFile<ELFT> &Obj, Triple TT,
+ SubtargetFeatures Features)
+ : ELFLinkGraphBuilder<ELFT>(Obj, std::move(TT), std::move(Features),
+ FileName, ppc64::getEdgeKindName) {}
+};
+
+template <llvm::endianness Endianness>
+class ELFJITLinker_ppc64 : public JITLinker<ELFJITLinker_ppc64<Endianness>> {
+ using JITLinkerBase = JITLinker<ELFJITLinker_ppc64<Endianness>>;
+ friend JITLinkerBase;
+
+public:
+ ELFJITLinker_ppc64(std::unique_ptr<JITLinkContext> Ctx,
+ std::unique_ptr<LinkGraph> G, PassConfiguration PassConfig)
+ : JITLinkerBase(std::move(Ctx), std::move(G), std::move(PassConfig)) {
+ JITLinkerBase::getPassConfig().PostAllocationPasses.push_back(
+ [this](LinkGraph &G) { return defineTOCBase(G); });
+ }
+
+private:
+ Symbol *TOCSymbol = nullptr;
+
+ Error defineTOCBase(LinkGraph &G) {
+ for (Symbol *Sym : G.defined_symbols()) {
+ if (LLVM_UNLIKELY(Sym->getName() == ELFTOCSymbolName)) {
+ TOCSymbol = Sym;
+ return Error::success();
+ }
+ }
+
+ assert(TOCSymbol == nullptr &&
+ "TOCSymbol should not be defined at this point");
+
+ for (Symbol *Sym : G.external_symbols()) {
+ if (Sym->getName() == ELFTOCSymbolName) {
+ TOCSymbol = Sym;
+ break;
+ }
+ }
+
+ if (Section *TOCSection = G.findSectionByName(
+ ppc64::TOCTableManager<Endianness>::getSectionName())) {
+ assert(!TOCSection->empty() && "TOC section should have reserved an "
+ "entry for containing the TOC base");
+
+ SectionRange SR(*TOCSection);
+ orc::ExecutorAddr TOCBaseAddr(SR.getFirstBlock()->getAddress() +
+ ELFTOCBaseOffset);
+ assert(TOCSymbol && TOCSymbol->isExternal() &&
+ ".TOC. should be a external symbol at this point");
+ G.makeAbsolute(*TOCSymbol, TOCBaseAddr);
+ // Create an alias of .TOC. so that rtdyld checker can recognize.
+ G.addAbsoluteSymbol(TOCSymbolAliasIdent, TOCSymbol->getAddress(),
+ TOCSymbol->getSize(), TOCSymbol->getLinkage(),
+ TOCSymbol->getScope(), TOCSymbol->isLive());
+ return Error::success();
+ }
+
+ // If TOC section doesn't exist, which means no TOC relocation is found, we
+ // don't need a TOCSymbol.
+ return Error::success();
+ }
+
+ Error applyFixup(LinkGraph &G, Block &B, const Edge &E) const {
+ return ppc64::applyFixup<Endianness>(G, B, E, TOCSymbol);
+ }
+};
+
+template <llvm::endianness Endianness>
+Expected<std::unique_ptr<LinkGraph>>
+createLinkGraphFromELFObject_ppc64(MemoryBufferRef ObjectBuffer) {
+ LLVM_DEBUG({
+ dbgs() << "Building jitlink graph for new input "
+ << ObjectBuffer.getBufferIdentifier() << "...\n";
+ });
+
+ auto ELFObj = object::ObjectFile::createELFObjectFile(ObjectBuffer);
+ if (!ELFObj)
+ return ELFObj.takeError();
+
+ auto Features = (*ELFObj)->getFeatures();
+ if (!Features)
+ return Features.takeError();
+
+ using ELFT = object::ELFType<Endianness, true>;
+ auto &ELFObjFile = cast<object::ELFObjectFile<ELFT>>(**ELFObj);
+ return ELFLinkGraphBuilder_ppc64<Endianness>(
+ (*ELFObj)->getFileName(), ELFObjFile.getELFFile(),
+ (*ELFObj)->makeTriple(), std::move(*Features))
+ .buildGraph();
+}
+
+template <llvm::endianness Endianness>
+void link_ELF_ppc64(std::unique_ptr<LinkGraph> G,
+ std::unique_ptr<JITLinkContext> Ctx) {
+ PassConfiguration Config;
+
+ if (Ctx->shouldAddDefaultTargetPasses(G->getTargetTriple())) {
+ // Construct a JITLinker and run the link function.
+
+ // Add eh-frame passes.
+ Config.PrePrunePasses.push_back(DWARFRecordSectionSplitter(".eh_frame"));
+ Config.PrePrunePasses.push_back(EHFrameEdgeFixer(
+ ".eh_frame", G->getPointerSize(), ppc64::Pointer32, ppc64::Pointer64,
+ ppc64::Delta32, ppc64::Delta64, ppc64::NegDelta32));
+ Config.PrePrunePasses.push_back(EHFrameNullTerminator(".eh_frame"));
+
+ // Add a mark-live pass.
+ if (auto MarkLive = Ctx->getMarkLivePass(G->getTargetTriple()))
+ Config.PrePrunePasses.push_back(std::move(MarkLive));
+ else
+ Config.PrePrunePasses.push_back(markAllSymbolsLive);
+ }
+
+ Config.PostPrunePasses.push_back(buildTables_ELF_ppc64<Endianness>);
+
+ if (auto Err = Ctx->modifyPassConfig(*G, Config))
+ return Ctx->notifyFailed(std::move(Err));
+
+ ELFJITLinker_ppc64<Endianness>::link(std::move(Ctx), std::move(G),
+ std::move(Config));
+}
+
+Expected<std::unique_ptr<LinkGraph>>
+createLinkGraphFromELFObject_ppc64(MemoryBufferRef ObjectBuffer) {
+ return createLinkGraphFromELFObject_ppc64<llvm::endianness::big>(
+ std::move(ObjectBuffer));
+}
+
+Expected<std::unique_ptr<LinkGraph>>
+createLinkGraphFromELFObject_ppc64le(MemoryBufferRef ObjectBuffer) {
+ return createLinkGraphFromELFObject_ppc64<llvm::endianness::little>(
+ std::move(ObjectBuffer));
+}
+
+/// jit-link the given object buffer, which must be a ELF ppc64 object file.
+void link_ELF_ppc64(std::unique_ptr<LinkGraph> G,
+ std::unique_ptr<JITLinkContext> Ctx) {
+ return link_ELF_ppc64<llvm::endianness::big>(std::move(G), std::move(Ctx));
+}
+
+/// jit-link the given object buffer, which must be a ELF ppc64le object file.
+void link_ELF_ppc64le(std::unique_ptr<LinkGraph> G,
+ std::unique_ptr<JITLinkContext> Ctx) {
+ return link_ELF_ppc64<llvm::endianness::little>(std::move(G), std::move(Ctx));
+}
+
+} // end namespace llvm::jitlink
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/ELF_riscv.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/ELF_riscv.cpp
new file mode 100644
index 000000000000..0cf548ede938
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/ELF_riscv.cpp
@@ -0,0 +1,997 @@
+//===------- ELF_riscv.cpp -JIT linker implementation for ELF/riscv -------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// ELF/riscv jit-link implementation.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/JITLink/ELF_riscv.h"
+#include "EHFrameSupportImpl.h"
+#include "ELFLinkGraphBuilder.h"
+#include "JITLinkGeneric.h"
+#include "PerGraphGOTAndPLTStubsBuilder.h"
+#include "llvm/BinaryFormat/ELF.h"
+#include "llvm/ExecutionEngine/JITLink/DWARFRecordSectionSplitter.h"
+#include "llvm/ExecutionEngine/JITLink/JITLink.h"
+#include "llvm/ExecutionEngine/JITLink/riscv.h"
+#include "llvm/Object/ELF.h"
+#include "llvm/Object/ELFObjectFile.h"
+#include "llvm/Support/Endian.h"
+
+#define DEBUG_TYPE "jitlink"
+using namespace llvm;
+using namespace llvm::jitlink;
+using namespace llvm::jitlink::riscv;
+
+namespace {
+
+class PerGraphGOTAndPLTStubsBuilder_ELF_riscv
+ : public PerGraphGOTAndPLTStubsBuilder<
+ PerGraphGOTAndPLTStubsBuilder_ELF_riscv> {
+public:
+ static constexpr size_t StubEntrySize = 16;
+ static const uint8_t NullGOTEntryContent[8];
+ static const uint8_t RV64StubContent[StubEntrySize];
+ static const uint8_t RV32StubContent[StubEntrySize];
+
+ using PerGraphGOTAndPLTStubsBuilder<
+ PerGraphGOTAndPLTStubsBuilder_ELF_riscv>::PerGraphGOTAndPLTStubsBuilder;
+
+ bool isRV64() const { return G.getPointerSize() == 8; }
+
+ bool isGOTEdgeToFix(Edge &E) const { return E.getKind() == R_RISCV_GOT_HI20; }
+
+ Symbol &createGOTEntry(Symbol &Target) {
+ Block &GOTBlock =
+ G.createContentBlock(getGOTSection(), getGOTEntryBlockContent(),
+ orc::ExecutorAddr(), G.getPointerSize(), 0);
+ GOTBlock.addEdge(isRV64() ? R_RISCV_64 : R_RISCV_32, 0, Target, 0);
+ return G.addAnonymousSymbol(GOTBlock, 0, G.getPointerSize(), false, false);
+ }
+
+ Symbol &createPLTStub(Symbol &Target) {
+ Block &StubContentBlock = G.createContentBlock(
+ getStubsSection(), getStubBlockContent(), orc::ExecutorAddr(), 4, 0);
+ auto &GOTEntrySymbol = getGOTEntry(Target);
+ StubContentBlock.addEdge(R_RISCV_CALL, 0, GOTEntrySymbol, 0);
+ return G.addAnonymousSymbol(StubContentBlock, 0, StubEntrySize, true,
+ false);
+ }
+
+ void fixGOTEdge(Edge &E, Symbol &GOTEntry) {
+ // Replace the relocation pair (R_RISCV_GOT_HI20, R_RISCV_PCREL_LO12)
+ // with (R_RISCV_PCREL_HI20, R_RISCV_PCREL_LO12)
+ // Therefore, here just change the R_RISCV_GOT_HI20 to R_RISCV_PCREL_HI20
+ E.setKind(R_RISCV_PCREL_HI20);
+ E.setTarget(GOTEntry);
+ }
+
+ void fixPLTEdge(Edge &E, Symbol &PLTStubs) {
+ assert((E.getKind() == R_RISCV_CALL || E.getKind() == R_RISCV_CALL_PLT ||
+ E.getKind() == CallRelaxable) &&
+ "Not a PLT edge?");
+ E.setKind(R_RISCV_CALL);
+ E.setTarget(PLTStubs);
+ }
+
+ bool isExternalBranchEdge(Edge &E) const {
+ return (E.getKind() == R_RISCV_CALL || E.getKind() == R_RISCV_CALL_PLT ||
+ E.getKind() == CallRelaxable) &&
+ !E.getTarget().isDefined();
+ }
+
+private:
+ Section &getGOTSection() const {
+ if (!GOTSection)
+ GOTSection = &G.createSection("$__GOT", orc::MemProt::Read);
+ return *GOTSection;
+ }
+
+ Section &getStubsSection() const {
+ if (!StubsSection)
+ StubsSection =
+ &G.createSection("$__STUBS", orc::MemProt::Read | orc::MemProt::Exec);
+ return *StubsSection;
+ }
+
+ ArrayRef<char> getGOTEntryBlockContent() {
+ return {reinterpret_cast<const char *>(NullGOTEntryContent),
+ G.getPointerSize()};
+ }
+
+ ArrayRef<char> getStubBlockContent() {
+ auto StubContent = isRV64() ? RV64StubContent : RV32StubContent;
+ return {reinterpret_cast<const char *>(StubContent), StubEntrySize};
+ }
+
+ mutable Section *GOTSection = nullptr;
+ mutable Section *StubsSection = nullptr;
+};
+
+const uint8_t PerGraphGOTAndPLTStubsBuilder_ELF_riscv::NullGOTEntryContent[8] =
+ {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
+
+const uint8_t
+ PerGraphGOTAndPLTStubsBuilder_ELF_riscv::RV64StubContent[StubEntrySize] = {
+ 0x17, 0x0e, 0x00, 0x00, // auipc t3, literal
+ 0x03, 0x3e, 0x0e, 0x00, // ld t3, literal(t3)
+ 0x67, 0x00, 0x0e, 0x00, // jr t3
+ 0x13, 0x00, 0x00, 0x00}; // nop
+
+const uint8_t
+ PerGraphGOTAndPLTStubsBuilder_ELF_riscv::RV32StubContent[StubEntrySize] = {
+ 0x17, 0x0e, 0x00, 0x00, // auipc t3, literal
+ 0x03, 0x2e, 0x0e, 0x00, // lw t3, literal(t3)
+ 0x67, 0x00, 0x0e, 0x00, // jr t3
+ 0x13, 0x00, 0x00, 0x00}; // nop
+} // namespace
+namespace llvm {
+namespace jitlink {
+
+static uint32_t extractBits(uint32_t Num, unsigned Low, unsigned Size) {
+ return (Num & (((1ULL << Size) - 1) << Low)) >> Low;
+}
+
+static inline bool isAlignmentCorrect(uint64_t Value, int N) {
+ return (Value & (N - 1)) ? false : true;
+}
+
+// Requires 0 < N <= 64.
+static inline bool isInRangeForImm(int64_t Value, int N) {
+ return Value == llvm::SignExtend64(Value, N);
+}
+
+class ELFJITLinker_riscv : public JITLinker<ELFJITLinker_riscv> {
+ friend class JITLinker<ELFJITLinker_riscv>;
+
+public:
+ ELFJITLinker_riscv(std::unique_ptr<JITLinkContext> Ctx,
+ std::unique_ptr<LinkGraph> G, PassConfiguration PassConfig)
+ : JITLinker(std::move(Ctx), std::move(G), std::move(PassConfig)) {
+ JITLinkerBase::getPassConfig().PostAllocationPasses.push_back(
+ [this](LinkGraph &G) { return gatherRISCVPCRelHi20(G); });
+ }
+
+private:
+ DenseMap<std::pair<const Block *, orc::ExecutorAddrDiff>, const Edge *>
+ RelHi20;
+
+ Error gatherRISCVPCRelHi20(LinkGraph &G) {
+ for (Block *B : G.blocks())
+ for (Edge &E : B->edges())
+ if (E.getKind() == R_RISCV_PCREL_HI20)
+ RelHi20[{B, E.getOffset()}] = &E;
+
+ return Error::success();
+ }
+
+ Expected<const Edge &> getRISCVPCRelHi20(const Edge &E) const {
+ using namespace riscv;
+ assert((E.getKind() == R_RISCV_PCREL_LO12_I ||
+ E.getKind() == R_RISCV_PCREL_LO12_S) &&
+ "Can only have high relocation for R_RISCV_PCREL_LO12_I or "
+ "R_RISCV_PCREL_LO12_S");
+
+ const Symbol &Sym = E.getTarget();
+ const Block &B = Sym.getBlock();
+ orc::ExecutorAddrDiff Offset = Sym.getOffset();
+
+ auto It = RelHi20.find({&B, Offset});
+ if (It != RelHi20.end())
+ return *It->second;
+
+ return make_error<JITLinkError>("No HI20 PCREL relocation type be found "
+ "for LO12 PCREL relocation type");
+ }
+
+ Error applyFixup(LinkGraph &G, Block &B, const Edge &E) const {
+ using namespace riscv;
+ using namespace llvm::support;
+
+ char *BlockWorkingMem = B.getAlreadyMutableContent().data();
+ char *FixupPtr = BlockWorkingMem + E.getOffset();
+ orc::ExecutorAddr FixupAddress = B.getAddress() + E.getOffset();
+ switch (E.getKind()) {
+ case R_RISCV_32: {
+ int64_t Value = (E.getTarget().getAddress() + E.getAddend()).getValue();
+ *(little32_t *)FixupPtr = static_cast<uint32_t>(Value);
+ break;
+ }
+ case R_RISCV_64: {
+ int64_t Value = (E.getTarget().getAddress() + E.getAddend()).getValue();
+ *(little64_t *)FixupPtr = static_cast<uint64_t>(Value);
+ break;
+ }
+ case R_RISCV_BRANCH: {
+ int64_t Value = E.getTarget().getAddress() + E.getAddend() - FixupAddress;
+ if (LLVM_UNLIKELY(!isInRangeForImm(Value >> 1, 12)))
+ return makeTargetOutOfRangeError(G, B, E);
+ if (LLVM_UNLIKELY(!isAlignmentCorrect(Value, 2)))
+ return makeAlignmentError(FixupAddress, Value, 2, E);
+ uint32_t Imm12 = extractBits(Value, 12, 1) << 31;
+ uint32_t Imm10_5 = extractBits(Value, 5, 6) << 25;
+ uint32_t Imm4_1 = extractBits(Value, 1, 4) << 8;
+ uint32_t Imm11 = extractBits(Value, 11, 1) << 7;
+ uint32_t RawInstr = *(little32_t *)FixupPtr;
+ *(little32_t *)FixupPtr =
+ (RawInstr & 0x1FFF07F) | Imm12 | Imm10_5 | Imm4_1 | Imm11;
+ break;
+ }
+ case R_RISCV_JAL: {
+ int64_t Value = E.getTarget().getAddress() + E.getAddend() - FixupAddress;
+ if (LLVM_UNLIKELY(!isInRangeForImm(Value >> 1, 20)))
+ return makeTargetOutOfRangeError(G, B, E);
+ if (LLVM_UNLIKELY(!isAlignmentCorrect(Value, 2)))
+ return makeAlignmentError(FixupAddress, Value, 2, E);
+ uint32_t Imm20 = extractBits(Value, 20, 1) << 31;
+ uint32_t Imm10_1 = extractBits(Value, 1, 10) << 21;
+ uint32_t Imm11 = extractBits(Value, 11, 1) << 20;
+ uint32_t Imm19_12 = extractBits(Value, 12, 8) << 12;
+ uint32_t RawInstr = *(little32_t *)FixupPtr;
+ *(little32_t *)FixupPtr =
+ (RawInstr & 0xFFF) | Imm20 | Imm10_1 | Imm11 | Imm19_12;
+ break;
+ }
+ case CallRelaxable:
+ // Treat as R_RISCV_CALL when the relaxation pass did not run
+ case R_RISCV_CALL_PLT:
+ case R_RISCV_CALL: {
+ int64_t Value = E.getTarget().getAddress() + E.getAddend() - FixupAddress;
+ int64_t Hi = Value + 0x800;
+ if (LLVM_UNLIKELY(!isInRangeForImm(Hi, 32)))
+ return makeTargetOutOfRangeError(G, B, E);
+ int32_t Lo = Value & 0xFFF;
+ uint32_t RawInstrAuipc = *(little32_t *)FixupPtr;
+ uint32_t RawInstrJalr = *(little32_t *)(FixupPtr + 4);
+ *(little32_t *)FixupPtr =
+ RawInstrAuipc | (static_cast<uint32_t>(Hi & 0xFFFFF000));
+ *(little32_t *)(FixupPtr + 4) =
+ RawInstrJalr | (static_cast<uint32_t>(Lo) << 20);
+ break;
+ }
+ // The relocations R_RISCV_CALL_PLT and R_RISCV_GOT_HI20 are handled by
+ // PerGraphGOTAndPLTStubsBuilder_ELF_riscv and are transformed into
+ // R_RISCV_CALL and R_RISCV_PCREL_HI20.
+ case R_RISCV_PCREL_HI20: {
+ int64_t Value = E.getTarget().getAddress() + E.getAddend() - FixupAddress;
+ int64_t Hi = Value + 0x800;
+ if (LLVM_UNLIKELY(!isInRangeForImm(Hi, 32)))
+ return makeTargetOutOfRangeError(G, B, E);
+ uint32_t RawInstr = *(little32_t *)FixupPtr;
+ *(little32_t *)FixupPtr =
+ (RawInstr & 0xFFF) | (static_cast<uint32_t>(Hi & 0xFFFFF000));
+ break;
+ }
+ case R_RISCV_PCREL_LO12_I: {
+ // FIXME: We assume that R_RISCV_PCREL_HI20 is present in object code and
+ // pairs with current relocation R_RISCV_PCREL_LO12_I. So here may need a
+ // check.
+ auto RelHI20 = getRISCVPCRelHi20(E);
+ if (!RelHI20)
+ return RelHI20.takeError();
+ int64_t Value = RelHI20->getTarget().getAddress() +
+ RelHI20->getAddend() - E.getTarget().getAddress();
+ int64_t Lo = Value & 0xFFF;
+ uint32_t RawInstr = *(little32_t *)FixupPtr;
+ *(little32_t *)FixupPtr =
+ (RawInstr & 0xFFFFF) | (static_cast<uint32_t>(Lo & 0xFFF) << 20);
+ break;
+ }
+ case R_RISCV_PCREL_LO12_S: {
+ // FIXME: We assume that R_RISCV_PCREL_HI20 is present in object code and
+ // pairs with current relocation R_RISCV_PCREL_LO12_S. So here may need a
+ // check.
+ auto RelHI20 = getRISCVPCRelHi20(E);
+ if (!RelHI20)
+ return RelHI20.takeError();
+ int64_t Value = RelHI20->getTarget().getAddress() +
+ RelHI20->getAddend() - E.getTarget().getAddress();
+ int64_t Lo = Value & 0xFFF;
+ uint32_t Imm11_5 = extractBits(Lo, 5, 7) << 25;
+ uint32_t Imm4_0 = extractBits(Lo, 0, 5) << 7;
+ uint32_t RawInstr = *(little32_t *)FixupPtr;
+
+ *(little32_t *)FixupPtr = (RawInstr & 0x1FFF07F) | Imm11_5 | Imm4_0;
+ break;
+ }
+ case R_RISCV_HI20: {
+ int64_t Value = (E.getTarget().getAddress() + E.getAddend()).getValue();
+ int64_t Hi = Value + 0x800;
+ if (LLVM_UNLIKELY(!isInRangeForImm(Hi, 32)))
+ return makeTargetOutOfRangeError(G, B, E);
+ uint32_t RawInstr = *(little32_t *)FixupPtr;
+ *(little32_t *)FixupPtr =
+ (RawInstr & 0xFFF) | (static_cast<uint32_t>(Hi & 0xFFFFF000));
+ break;
+ }
+ case R_RISCV_LO12_I: {
+ // FIXME: We assume that R_RISCV_HI20 is present in object code and pairs
+ // with current relocation R_RISCV_LO12_I. So here may need a check.
+ int64_t Value = (E.getTarget().getAddress() + E.getAddend()).getValue();
+ int32_t Lo = Value & 0xFFF;
+ uint32_t RawInstr = *(little32_t *)FixupPtr;
+ *(little32_t *)FixupPtr =
+ (RawInstr & 0xFFFFF) | (static_cast<uint32_t>(Lo & 0xFFF) << 20);
+ break;
+ }
+ case R_RISCV_LO12_S: {
+ // FIXME: We assume that R_RISCV_HI20 is present in object code and pairs
+ // with current relocation R_RISCV_LO12_S. So here may need a check.
+ int64_t Value = (E.getTarget().getAddress() + E.getAddend()).getValue();
+ int64_t Lo = Value & 0xFFF;
+ uint32_t Imm11_5 = extractBits(Lo, 5, 7) << 25;
+ uint32_t Imm4_0 = extractBits(Lo, 0, 5) << 7;
+ uint32_t RawInstr = *(little32_t *)FixupPtr;
+ *(little32_t *)FixupPtr = (RawInstr & 0x1FFF07F) | Imm11_5 | Imm4_0;
+ break;
+ }
+ case R_RISCV_ADD8: {
+ int64_t Value =
+ (E.getTarget().getAddress() +
+ *(reinterpret_cast<const uint8_t *>(FixupPtr)) + E.getAddend())
+ .getValue();
+ *FixupPtr = static_cast<uint8_t>(Value);
+ break;
+ }
+ case R_RISCV_ADD16: {
+ int64_t Value = (E.getTarget().getAddress() +
+ support::endian::read16le(FixupPtr) + E.getAddend())
+ .getValue();
+ *(little16_t *)FixupPtr = static_cast<uint16_t>(Value);
+ break;
+ }
+ case R_RISCV_ADD32: {
+ int64_t Value = (E.getTarget().getAddress() +
+ support::endian::read32le(FixupPtr) + E.getAddend())
+ .getValue();
+ *(little32_t *)FixupPtr = static_cast<uint32_t>(Value);
+ break;
+ }
+ case R_RISCV_ADD64: {
+ int64_t Value = (E.getTarget().getAddress() +
+ support::endian::read64le(FixupPtr) + E.getAddend())
+ .getValue();
+ *(little64_t *)FixupPtr = static_cast<uint64_t>(Value);
+ break;
+ }
+ case R_RISCV_SUB8: {
+ int64_t Value = *(reinterpret_cast<const uint8_t *>(FixupPtr)) -
+ E.getTarget().getAddress().getValue() - E.getAddend();
+ *FixupPtr = static_cast<uint8_t>(Value);
+ break;
+ }
+ case R_RISCV_SUB16: {
+ int64_t Value = support::endian::read16le(FixupPtr) -
+ E.getTarget().getAddress().getValue() - E.getAddend();
+ *(little16_t *)FixupPtr = static_cast<uint32_t>(Value);
+ break;
+ }
+ case R_RISCV_SUB32: {
+ int64_t Value = support::endian::read32le(FixupPtr) -
+ E.getTarget().getAddress().getValue() - E.getAddend();
+ *(little32_t *)FixupPtr = static_cast<uint32_t>(Value);
+ break;
+ }
+ case R_RISCV_SUB64: {
+ int64_t Value = support::endian::read64le(FixupPtr) -
+ E.getTarget().getAddress().getValue() - E.getAddend();
+ *(little64_t *)FixupPtr = static_cast<uint64_t>(Value);
+ break;
+ }
+ case R_RISCV_RVC_BRANCH: {
+ int64_t Value = E.getTarget().getAddress() + E.getAddend() - FixupAddress;
+ if (LLVM_UNLIKELY(!isInRangeForImm(Value >> 1, 8)))
+ return makeTargetOutOfRangeError(G, B, E);
+ if (LLVM_UNLIKELY(!isAlignmentCorrect(Value, 2)))
+ return makeAlignmentError(FixupAddress, Value, 2, E);
+ uint16_t Imm8 = extractBits(Value, 8, 1) << 12;
+ uint16_t Imm4_3 = extractBits(Value, 3, 2) << 10;
+ uint16_t Imm7_6 = extractBits(Value, 6, 2) << 5;
+ uint16_t Imm2_1 = extractBits(Value, 1, 2) << 3;
+ uint16_t Imm5 = extractBits(Value, 5, 1) << 2;
+ uint16_t RawInstr = *(little16_t *)FixupPtr;
+ *(little16_t *)FixupPtr =
+ (RawInstr & 0xE383) | Imm8 | Imm4_3 | Imm7_6 | Imm2_1 | Imm5;
+ break;
+ }
+ case R_RISCV_RVC_JUMP: {
+ int64_t Value = E.getTarget().getAddress() + E.getAddend() - FixupAddress;
+ if (LLVM_UNLIKELY(!isInRangeForImm(Value >> 1, 11)))
+ return makeTargetOutOfRangeError(G, B, E);
+ if (LLVM_UNLIKELY(!isAlignmentCorrect(Value, 2)))
+ return makeAlignmentError(FixupAddress, Value, 2, E);
+ uint16_t Imm11 = extractBits(Value, 11, 1) << 12;
+ uint16_t Imm4 = extractBits(Value, 4, 1) << 11;
+ uint16_t Imm9_8 = extractBits(Value, 8, 2) << 9;
+ uint16_t Imm10 = extractBits(Value, 10, 1) << 8;
+ uint16_t Imm6 = extractBits(Value, 6, 1) << 7;
+ uint16_t Imm7 = extractBits(Value, 7, 1) << 6;
+ uint16_t Imm3_1 = extractBits(Value, 1, 3) << 3;
+ uint16_t Imm5 = extractBits(Value, 5, 1) << 2;
+ uint16_t RawInstr = *(little16_t *)FixupPtr;
+ *(little16_t *)FixupPtr = (RawInstr & 0xE003) | Imm11 | Imm4 | Imm9_8 |
+ Imm10 | Imm6 | Imm7 | Imm3_1 | Imm5;
+ break;
+ }
+ case R_RISCV_SUB6: {
+ int64_t Value = *(reinterpret_cast<const uint8_t *>(FixupPtr)) & 0x3f;
+ Value -= E.getTarget().getAddress().getValue() - E.getAddend();
+ *FixupPtr = (*FixupPtr & 0xc0) | (static_cast<uint8_t>(Value) & 0x3f);
+ break;
+ }
+ case R_RISCV_SET6: {
+ int64_t Value = (E.getTarget().getAddress() + E.getAddend()).getValue();
+ uint32_t RawData = *(little32_t *)FixupPtr;
+ int64_t Word6 = Value & 0x3f;
+ *(little32_t *)FixupPtr = (RawData & 0xffffffc0) | Word6;
+ break;
+ }
+ case R_RISCV_SET8: {
+ int64_t Value = (E.getTarget().getAddress() + E.getAddend()).getValue();
+ uint32_t RawData = *(little32_t *)FixupPtr;
+ int64_t Word8 = Value & 0xff;
+ *(little32_t *)FixupPtr = (RawData & 0xffffff00) | Word8;
+ break;
+ }
+ case R_RISCV_SET16: {
+ int64_t Value = (E.getTarget().getAddress() + E.getAddend()).getValue();
+ uint32_t RawData = *(little32_t *)FixupPtr;
+ int64_t Word16 = Value & 0xffff;
+ *(little32_t *)FixupPtr = (RawData & 0xffff0000) | Word16;
+ break;
+ }
+ case R_RISCV_SET32: {
+ int64_t Value = (E.getTarget().getAddress() + E.getAddend()).getValue();
+ int64_t Word32 = Value & 0xffffffff;
+ *(little32_t *)FixupPtr = Word32;
+ break;
+ }
+ case R_RISCV_32_PCREL: {
+ int64_t Value = E.getTarget().getAddress() + E.getAddend() - FixupAddress;
+ int64_t Word32 = Value & 0xffffffff;
+ *(little32_t *)FixupPtr = Word32;
+ break;
+ }
+ case AlignRelaxable:
+ // Ignore when the relaxation pass did not run
+ break;
+ case NegDelta32: {
+ int64_t Value = FixupAddress - E.getTarget().getAddress() + E.getAddend();
+ if (LLVM_UNLIKELY(!isInRangeForImm(Value, 32)))
+ return makeTargetOutOfRangeError(G, B, E);
+ *(little32_t *)FixupPtr = static_cast<uint32_t>(Value);
+ break;
+ }
+ }
+ return Error::success();
+ }
+};
+
+namespace {
+
+struct SymbolAnchor {
+ uint64_t Offset;
+ Symbol *Sym;
+ bool End; // true for the anchor of getOffset() + getSize()
+};
+
+struct BlockRelaxAux {
+ // This records symbol start and end offsets which will be adjusted according
+ // to the nearest RelocDeltas element.
+ SmallVector<SymbolAnchor, 0> Anchors;
+ // All edges that either 1) are R_RISCV_ALIGN or 2) have a R_RISCV_RELAX edge
+ // at the same offset.
+ SmallVector<Edge *, 0> RelaxEdges;
+ // For RelaxEdges[I], the actual offset is RelaxEdges[I]->getOffset() - (I ?
+ // RelocDeltas[I - 1] : 0).
+ SmallVector<uint32_t, 0> RelocDeltas;
+ // For RelaxEdges[I], the actual type is EdgeKinds[I].
+ SmallVector<Edge::Kind, 0> EdgeKinds;
+ // List of rewritten instructions. Contains one raw encoded instruction per
+ // element in EdgeKinds that isn't Invalid or R_RISCV_ALIGN.
+ SmallVector<uint32_t, 0> Writes;
+};
+
+struct RelaxConfig {
+ bool IsRV32;
+ bool HasRVC;
+};
+
+struct RelaxAux {
+ RelaxConfig Config;
+ DenseMap<Block *, BlockRelaxAux> Blocks;
+};
+
+} // namespace
+
+static bool shouldRelax(const Section &S) {
+ return (S.getMemProt() & orc::MemProt::Exec) != orc::MemProt::None;
+}
+
+static bool isRelaxable(const Edge &E) {
+ switch (E.getKind()) {
+ default:
+ return false;
+ case CallRelaxable:
+ case AlignRelaxable:
+ return true;
+ }
+}
+
+static RelaxAux initRelaxAux(LinkGraph &G) {
+ RelaxAux Aux;
+ Aux.Config.IsRV32 = G.getTargetTriple().isRISCV32();
+ const auto &Features = G.getFeatures().getFeatures();
+ Aux.Config.HasRVC = llvm::is_contained(Features, "+c") ||
+ llvm::is_contained(Features, "+zca");
+
+ for (auto &S : G.sections()) {
+ if (!shouldRelax(S))
+ continue;
+ for (auto *B : S.blocks()) {
+ auto BlockEmplaceResult = Aux.Blocks.try_emplace(B);
+ assert(BlockEmplaceResult.second && "Block encountered twice");
+ auto &BlockAux = BlockEmplaceResult.first->second;
+
+ for (auto &E : B->edges())
+ if (isRelaxable(E))
+ BlockAux.RelaxEdges.push_back(&E);
+
+ if (BlockAux.RelaxEdges.empty()) {
+ Aux.Blocks.erase(BlockEmplaceResult.first);
+ continue;
+ }
+
+ const auto NumEdges = BlockAux.RelaxEdges.size();
+ BlockAux.RelocDeltas.resize(NumEdges, 0);
+ BlockAux.EdgeKinds.resize_for_overwrite(NumEdges);
+
+ // Store anchors (offset and offset+size) for symbols.
+ for (auto *Sym : S.symbols()) {
+ if (!Sym->isDefined() || &Sym->getBlock() != B)
+ continue;
+
+ BlockAux.Anchors.push_back({Sym->getOffset(), Sym, false});
+ BlockAux.Anchors.push_back(
+ {Sym->getOffset() + Sym->getSize(), Sym, true});
+ }
+ }
+ }
+
+ // Sort anchors by offset so that we can find the closest relocation
+ // efficiently. For a zero size symbol, ensure that its start anchor precedes
+ // its end anchor. For two symbols with anchors at the same offset, their
+ // order does not matter.
+ for (auto &BlockAuxIter : Aux.Blocks) {
+ llvm::sort(BlockAuxIter.second.Anchors, [](auto &A, auto &B) {
+ return std::make_pair(A.Offset, A.End) < std::make_pair(B.Offset, B.End);
+ });
+ }
+
+ return Aux;
+}
+
+static void relaxAlign(orc::ExecutorAddr Loc, const Edge &E, uint32_t &Remove,
+ Edge::Kind &NewEdgeKind) {
+ // E points to the start of the padding bytes.
+ // E + Addend points to the instruction to be aligned by removing padding.
+ // Alignment is the smallest power of 2 strictly greater than Addend.
+ const auto Align = NextPowerOf2(E.getAddend());
+ const auto DestLoc = alignTo(Loc.getValue(), Align);
+ const auto SrcLoc = Loc.getValue() + E.getAddend();
+ Remove = SrcLoc - DestLoc;
+ assert(static_cast<int32_t>(Remove) >= 0 &&
+ "R_RISCV_ALIGN needs expanding the content");
+ NewEdgeKind = AlignRelaxable;
+}
+
+static void relaxCall(const Block &B, BlockRelaxAux &Aux,
+ const RelaxConfig &Config, orc::ExecutorAddr Loc,
+ const Edge &E, uint32_t &Remove,
+ Edge::Kind &NewEdgeKind) {
+ const auto JALR =
+ support::endian::read32le(B.getContent().data() + E.getOffset() + 4);
+ const auto RD = extractBits(JALR, 7, 5);
+ const auto Dest = E.getTarget().getAddress() + E.getAddend();
+ const auto Displace = Dest - Loc;
+
+ if (Config.HasRVC && isInt<12>(Displace) && RD == 0) {
+ NewEdgeKind = R_RISCV_RVC_JUMP;
+ Aux.Writes.push_back(0xa001); // c.j
+ Remove = 6;
+ } else if (Config.HasRVC && Config.IsRV32 && isInt<12>(Displace) && RD == 1) {
+ NewEdgeKind = R_RISCV_RVC_JUMP;
+ Aux.Writes.push_back(0x2001); // c.jal
+ Remove = 6;
+ } else if (isInt<21>(Displace)) {
+ NewEdgeKind = R_RISCV_JAL;
+ Aux.Writes.push_back(0x6f | RD << 7); // jal
+ Remove = 4;
+ } else {
+ // Not relaxable
+ NewEdgeKind = R_RISCV_CALL_PLT;
+ Remove = 0;
+ }
+}
+
+static bool relaxBlock(LinkGraph &G, Block &Block, BlockRelaxAux &Aux,
+ const RelaxConfig &Config) {
+ const auto BlockAddr = Block.getAddress();
+ bool Changed = false;
+ ArrayRef<SymbolAnchor> SA = ArrayRef(Aux.Anchors);
+ uint32_t Delta = 0;
+
+ Aux.EdgeKinds.assign(Aux.EdgeKinds.size(), Edge::Invalid);
+ Aux.Writes.clear();
+
+ for (auto [I, E] : llvm::enumerate(Aux.RelaxEdges)) {
+ const auto Loc = BlockAddr + E->getOffset() - Delta;
+ auto &Cur = Aux.RelocDeltas[I];
+ uint32_t Remove = 0;
+ switch (E->getKind()) {
+ case AlignRelaxable:
+ relaxAlign(Loc, *E, Remove, Aux.EdgeKinds[I]);
+ break;
+ case CallRelaxable:
+ relaxCall(Block, Aux, Config, Loc, *E, Remove, Aux.EdgeKinds[I]);
+ break;
+ default:
+ llvm_unreachable("Unexpected relaxable edge kind");
+ }
+
+ // For all anchors whose offsets are <= E->getOffset(), they are preceded by
+ // the previous relocation whose RelocDeltas value equals Delta.
+ // Decrease their offset and update their size.
+ for (; SA.size() && SA[0].Offset <= E->getOffset(); SA = SA.slice(1)) {
+ if (SA[0].End)
+ SA[0].Sym->setSize(SA[0].Offset - Delta - SA[0].Sym->getOffset());
+ else
+ SA[0].Sym->setOffset(SA[0].Offset - Delta);
+ }
+
+ Delta += Remove;
+ if (Delta != Cur) {
+ Cur = Delta;
+ Changed = true;
+ }
+ }
+
+ for (const SymbolAnchor &A : SA) {
+ if (A.End)
+ A.Sym->setSize(A.Offset - Delta - A.Sym->getOffset());
+ else
+ A.Sym->setOffset(A.Offset - Delta);
+ }
+
+ return Changed;
+}
+
+static bool relaxOnce(LinkGraph &G, RelaxAux &Aux) {
+ bool Changed = false;
+
+ for (auto &[B, BlockAux] : Aux.Blocks)
+ Changed |= relaxBlock(G, *B, BlockAux, Aux.Config);
+
+ return Changed;
+}
+
+static void finalizeBlockRelax(LinkGraph &G, Block &Block, BlockRelaxAux &Aux) {
+ auto Contents = Block.getAlreadyMutableContent();
+ auto *Dest = Contents.data();
+ auto NextWrite = Aux.Writes.begin();
+ uint32_t Offset = 0;
+ uint32_t Delta = 0;
+
+ // Update section content: remove NOPs for R_RISCV_ALIGN and rewrite
+ // instructions for relaxed relocations.
+ for (auto [I, E] : llvm::enumerate(Aux.RelaxEdges)) {
+ uint32_t Remove = Aux.RelocDeltas[I] - Delta;
+ Delta = Aux.RelocDeltas[I];
+ if (Remove == 0 && Aux.EdgeKinds[I] == Edge::Invalid)
+ continue;
+
+ // Copy from last location to the current relocated location.
+ const auto Size = E->getOffset() - Offset;
+ std::memmove(Dest, Contents.data() + Offset, Size);
+ Dest += Size;
+
+ uint32_t Skip = 0;
+ switch (Aux.EdgeKinds[I]) {
+ case Edge::Invalid:
+ break;
+ case AlignRelaxable:
+ // For R_RISCV_ALIGN, we will place Offset in a location (among NOPs) to
+ // satisfy the alignment requirement. If both Remove and E->getAddend()
+ // are multiples of 4, it is as if we have skipped some NOPs. Otherwise we
+ // are in the middle of a 4-byte NOP, and we need to rewrite the NOP
+ // sequence.
+ if (Remove % 4 || E->getAddend() % 4) {
+ Skip = E->getAddend() - Remove;
+ uint32_t J = 0;
+ for (; J + 4 <= Skip; J += 4)
+ support::endian::write32le(Dest + J, 0x00000013); // nop
+ if (J != Skip) {
+ assert(J + 2 == Skip);
+ support::endian::write16le(Dest + J, 0x0001); // c.nop
+ }
+ }
+ break;
+ case R_RISCV_RVC_JUMP:
+ Skip = 2;
+ support::endian::write16le(Dest, *NextWrite++);
+ break;
+ case R_RISCV_JAL:
+ Skip = 4;
+ support::endian::write32le(Dest, *NextWrite++);
+ break;
+ }
+
+ Dest += Skip;
+ Offset = E->getOffset() + Skip + Remove;
+ }
+
+ std::memmove(Dest, Contents.data() + Offset, Contents.size() - Offset);
+
+ // Fixup edge offsets and kinds.
+ Delta = 0;
+ size_t I = 0;
+ for (auto &E : Block.edges()) {
+ E.setOffset(E.getOffset() - Delta);
+
+ if (I < Aux.RelaxEdges.size() && Aux.RelaxEdges[I] == &E) {
+ if (Aux.EdgeKinds[I] != Edge::Invalid)
+ E.setKind(Aux.EdgeKinds[I]);
+
+ Delta = Aux.RelocDeltas[I];
+ ++I;
+ }
+ }
+
+ // Remove AlignRelaxable edges: all other relaxable edges got modified and
+ // will be used later while linking. Alignment is entirely handled here so we
+ // don't need these edges anymore.
+ for (auto IE = Block.edges().begin(); IE != Block.edges().end();) {
+ if (IE->getKind() == AlignRelaxable)
+ IE = Block.removeEdge(IE);
+ else
+ ++IE;
+ }
+}
+
+static void finalizeRelax(LinkGraph &G, RelaxAux &Aux) {
+ for (auto &[B, BlockAux] : Aux.Blocks)
+ finalizeBlockRelax(G, *B, BlockAux);
+}
+
+static Error relax(LinkGraph &G) {
+ auto Aux = initRelaxAux(G);
+ while (relaxOnce(G, Aux)) {
+ }
+ finalizeRelax(G, Aux);
+ return Error::success();
+}
+
+template <typename ELFT>
+class ELFLinkGraphBuilder_riscv : public ELFLinkGraphBuilder<ELFT> {
+private:
+ static Expected<riscv::EdgeKind_riscv>
+ getRelocationKind(const uint32_t Type) {
+ using namespace riscv;
+ switch (Type) {
+ case ELF::R_RISCV_32:
+ return EdgeKind_riscv::R_RISCV_32;
+ case ELF::R_RISCV_64:
+ return EdgeKind_riscv::R_RISCV_64;
+ case ELF::R_RISCV_BRANCH:
+ return EdgeKind_riscv::R_RISCV_BRANCH;
+ case ELF::R_RISCV_JAL:
+ return EdgeKind_riscv::R_RISCV_JAL;
+ case ELF::R_RISCV_CALL:
+ return EdgeKind_riscv::R_RISCV_CALL;
+ case ELF::R_RISCV_CALL_PLT:
+ return EdgeKind_riscv::R_RISCV_CALL_PLT;
+ case ELF::R_RISCV_GOT_HI20:
+ return EdgeKind_riscv::R_RISCV_GOT_HI20;
+ case ELF::R_RISCV_PCREL_HI20:
+ return EdgeKind_riscv::R_RISCV_PCREL_HI20;
+ case ELF::R_RISCV_PCREL_LO12_I:
+ return EdgeKind_riscv::R_RISCV_PCREL_LO12_I;
+ case ELF::R_RISCV_PCREL_LO12_S:
+ return EdgeKind_riscv::R_RISCV_PCREL_LO12_S;
+ case ELF::R_RISCV_HI20:
+ return EdgeKind_riscv::R_RISCV_HI20;
+ case ELF::R_RISCV_LO12_I:
+ return EdgeKind_riscv::R_RISCV_LO12_I;
+ case ELF::R_RISCV_LO12_S:
+ return EdgeKind_riscv::R_RISCV_LO12_S;
+ case ELF::R_RISCV_ADD8:
+ return EdgeKind_riscv::R_RISCV_ADD8;
+ case ELF::R_RISCV_ADD16:
+ return EdgeKind_riscv::R_RISCV_ADD16;
+ case ELF::R_RISCV_ADD32:
+ return EdgeKind_riscv::R_RISCV_ADD32;
+ case ELF::R_RISCV_ADD64:
+ return EdgeKind_riscv::R_RISCV_ADD64;
+ case ELF::R_RISCV_SUB8:
+ return EdgeKind_riscv::R_RISCV_SUB8;
+ case ELF::R_RISCV_SUB16:
+ return EdgeKind_riscv::R_RISCV_SUB16;
+ case ELF::R_RISCV_SUB32:
+ return EdgeKind_riscv::R_RISCV_SUB32;
+ case ELF::R_RISCV_SUB64:
+ return EdgeKind_riscv::R_RISCV_SUB64;
+ case ELF::R_RISCV_RVC_BRANCH:
+ return EdgeKind_riscv::R_RISCV_RVC_BRANCH;
+ case ELF::R_RISCV_RVC_JUMP:
+ return EdgeKind_riscv::R_RISCV_RVC_JUMP;
+ case ELF::R_RISCV_SUB6:
+ return EdgeKind_riscv::R_RISCV_SUB6;
+ case ELF::R_RISCV_SET6:
+ return EdgeKind_riscv::R_RISCV_SET6;
+ case ELF::R_RISCV_SET8:
+ return EdgeKind_riscv::R_RISCV_SET8;
+ case ELF::R_RISCV_SET16:
+ return EdgeKind_riscv::R_RISCV_SET16;
+ case ELF::R_RISCV_SET32:
+ return EdgeKind_riscv::R_RISCV_SET32;
+ case ELF::R_RISCV_32_PCREL:
+ return EdgeKind_riscv::R_RISCV_32_PCREL;
+ case ELF::R_RISCV_ALIGN:
+ return EdgeKind_riscv::AlignRelaxable;
+ }
+
+ return make_error<JITLinkError>(
+ "Unsupported riscv relocation:" + formatv("{0:d}: ", Type) +
+ object::getELFRelocationTypeName(ELF::EM_RISCV, Type));
+ }
+
+ EdgeKind_riscv getRelaxableRelocationKind(EdgeKind_riscv Kind) {
+ switch (Kind) {
+ default:
+ // Just ignore unsupported relaxations
+ return Kind;
+ case R_RISCV_CALL:
+ case R_RISCV_CALL_PLT:
+ return CallRelaxable;
+ }
+ }
+
+ Error addRelocations() override {
+ LLVM_DEBUG(dbgs() << "Processing relocations:\n");
+
+ using Base = ELFLinkGraphBuilder<ELFT>;
+ using Self = ELFLinkGraphBuilder_riscv<ELFT>;
+ for (const auto &RelSect : Base::Sections)
+ if (Error Err = Base::forEachRelaRelocation(RelSect, this,
+ &Self::addSingleRelocation))
+ return Err;
+
+ return Error::success();
+ }
+
+ Error addSingleRelocation(const typename ELFT::Rela &Rel,
+ const typename ELFT::Shdr &FixupSect,
+ Block &BlockToFix) {
+ using Base = ELFLinkGraphBuilder<ELFT>;
+
+ uint32_t Type = Rel.getType(false);
+ int64_t Addend = Rel.r_addend;
+
+ if (Type == ELF::R_RISCV_RELAX) {
+ if (BlockToFix.edges_empty())
+ return make_error<StringError>(
+ "R_RISCV_RELAX without preceding relocation",
+ inconvertibleErrorCode());
+
+ auto &PrevEdge = *std::prev(BlockToFix.edges().end());
+ auto Kind = static_cast<EdgeKind_riscv>(PrevEdge.getKind());
+ PrevEdge.setKind(getRelaxableRelocationKind(Kind));
+ return Error::success();
+ }
+
+ Expected<riscv::EdgeKind_riscv> Kind = getRelocationKind(Type);
+ if (!Kind)
+ return Kind.takeError();
+
+ uint32_t SymbolIndex = Rel.getSymbol(false);
+ auto ObjSymbol = Base::Obj.getRelocationSymbol(Rel, Base::SymTabSec);
+ if (!ObjSymbol)
+ return ObjSymbol.takeError();
+
+ Symbol *GraphSymbol = Base::getGraphSymbol(SymbolIndex);
+ if (!GraphSymbol)
+ return make_error<StringError>(
+ formatv("Could not find symbol at given index, did you add it to "
+ "JITSymbolTable? index: {0}, shndx: {1} Size of table: {2}",
+ SymbolIndex, (*ObjSymbol)->st_shndx,
+ Base::GraphSymbols.size()),
+ inconvertibleErrorCode());
+
+ auto FixupAddress = orc::ExecutorAddr(FixupSect.sh_addr) + Rel.r_offset;
+ Edge::OffsetT Offset = FixupAddress - BlockToFix.getAddress();
+ Edge GE(*Kind, Offset, *GraphSymbol, Addend);
+ LLVM_DEBUG({
+ dbgs() << " ";
+ printEdge(dbgs(), BlockToFix, GE, riscv::getEdgeKindName(*Kind));
+ dbgs() << "\n";
+ });
+
+ BlockToFix.addEdge(std::move(GE));
+ return Error::success();
+ }
+
+public:
+ ELFLinkGraphBuilder_riscv(StringRef FileName,
+ const object::ELFFile<ELFT> &Obj, Triple TT,
+ SubtargetFeatures Features)
+ : ELFLinkGraphBuilder<ELFT>(Obj, std::move(TT), std::move(Features),
+ FileName, riscv::getEdgeKindName) {}
+};
+
+Expected<std::unique_ptr<LinkGraph>>
+createLinkGraphFromELFObject_riscv(MemoryBufferRef ObjectBuffer) {
+ LLVM_DEBUG({
+ dbgs() << "Building jitlink graph for new input "
+ << ObjectBuffer.getBufferIdentifier() << "...\n";
+ });
+
+ auto ELFObj = object::ObjectFile::createELFObjectFile(ObjectBuffer);
+ if (!ELFObj)
+ return ELFObj.takeError();
+
+ auto Features = (*ELFObj)->getFeatures();
+ if (!Features)
+ return Features.takeError();
+
+ if ((*ELFObj)->getArch() == Triple::riscv64) {
+ auto &ELFObjFile = cast<object::ELFObjectFile<object::ELF64LE>>(**ELFObj);
+ return ELFLinkGraphBuilder_riscv<object::ELF64LE>(
+ (*ELFObj)->getFileName(), ELFObjFile.getELFFile(),
+ (*ELFObj)->makeTriple(), std::move(*Features))
+ .buildGraph();
+ } else {
+ assert((*ELFObj)->getArch() == Triple::riscv32 &&
+ "Invalid triple for RISCV ELF object file");
+ auto &ELFObjFile = cast<object::ELFObjectFile<object::ELF32LE>>(**ELFObj);
+ return ELFLinkGraphBuilder_riscv<object::ELF32LE>(
+ (*ELFObj)->getFileName(), ELFObjFile.getELFFile(),
+ (*ELFObj)->makeTriple(), std::move(*Features))
+ .buildGraph();
+ }
+}
+
+void link_ELF_riscv(std::unique_ptr<LinkGraph> G,
+ std::unique_ptr<JITLinkContext> Ctx) {
+ PassConfiguration Config;
+ const Triple &TT = G->getTargetTriple();
+ if (Ctx->shouldAddDefaultTargetPasses(TT)) {
+
+ Config.PrePrunePasses.push_back(DWARFRecordSectionSplitter(".eh_frame"));
+ Config.PrePrunePasses.push_back(EHFrameEdgeFixer(
+ ".eh_frame", G->getPointerSize(), Edge::Invalid, Edge::Invalid,
+ Edge::Invalid, Edge::Invalid, NegDelta32));
+ Config.PrePrunePasses.push_back(EHFrameNullTerminator(".eh_frame"));
+
+ if (auto MarkLive = Ctx->getMarkLivePass(TT))
+ Config.PrePrunePasses.push_back(std::move(MarkLive));
+ else
+ Config.PrePrunePasses.push_back(markAllSymbolsLive);
+ Config.PostPrunePasses.push_back(
+ PerGraphGOTAndPLTStubsBuilder_ELF_riscv::asPass);
+ Config.PostAllocationPasses.push_back(relax);
+ }
+ if (auto Err = Ctx->modifyPassConfig(*G, Config))
+ return Ctx->notifyFailed(std::move(Err));
+
+ ELFJITLinker_riscv::link(std::move(Ctx), std::move(G), std::move(Config));
+}
+
+LinkGraphPassFunction createRelaxationPass_ELF_riscv() { return relax; }
+
+} // namespace jitlink
+} // namespace llvm
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/ELF_x86_64.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/ELF_x86_64.cpp
new file mode 100644
index 000000000000..b27a1a19acae
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/ELF_x86_64.cpp
@@ -0,0 +1,386 @@
+//===---- ELF_x86_64.cpp -JIT linker implementation for ELF/x86-64 ----===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// ELF/x86-64 jit-link implementation.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/JITLink/ELF_x86_64.h"
+#include "llvm/ExecutionEngine/JITLink/DWARFRecordSectionSplitter.h"
+#include "llvm/ExecutionEngine/JITLink/JITLink.h"
+#include "llvm/ExecutionEngine/JITLink/TableManager.h"
+#include "llvm/ExecutionEngine/JITLink/x86_64.h"
+#include "llvm/Object/ELFObjectFile.h"
+
+#include "DefineExternalSectionStartAndEndSymbols.h"
+#include "EHFrameSupportImpl.h"
+#include "ELFLinkGraphBuilder.h"
+#include "JITLinkGeneric.h"
+
+#define DEBUG_TYPE "jitlink"
+
+using namespace llvm;
+using namespace llvm::jitlink;
+
+namespace {
+
+constexpr StringRef ELFGOTSymbolName = "_GLOBAL_OFFSET_TABLE_";
+constexpr StringRef ELFTLSInfoSectionName = "$__TLSINFO";
+
+class TLSInfoTableManager_ELF_x86_64
+ : public TableManager<TLSInfoTableManager_ELF_x86_64> {
+public:
+ static const uint8_t TLSInfoEntryContent[16];
+
+ static StringRef getSectionName() { return ELFTLSInfoSectionName; }
+
+ bool visitEdge(LinkGraph &G, Block *B, Edge &E) {
+ if (E.getKind() == x86_64::RequestTLSDescInGOTAndTransformToDelta32) {
+ LLVM_DEBUG({
+ dbgs() << " Fixing " << G.getEdgeKindName(E.getKind()) << " edge at "
+ << formatv("{0:x}", B->getFixupAddress(E)) << " ("
+ << formatv("{0:x}", B->getAddress()) << " + "
+ << formatv("{0:x}", E.getOffset()) << ")\n";
+ });
+ E.setKind(x86_64::Delta32);
+ E.setTarget(getEntryForTarget(G, E.getTarget()));
+ return true;
+ }
+ return false;
+ }
+
+ Symbol &createEntry(LinkGraph &G, Symbol &Target) {
+ // the TLS Info entry's key value will be written by the fixTLVSectionByName
+ // pass, so create mutable content.
+ auto &TLSInfoEntry = G.createMutableContentBlock(
+ getTLSInfoSection(G), G.allocateContent(getTLSInfoEntryContent()),
+ orc::ExecutorAddr(), 8, 0);
+ TLSInfoEntry.addEdge(x86_64::Pointer64, 8, Target, 0);
+ return G.addAnonymousSymbol(TLSInfoEntry, 0, 16, false, false);
+ }
+
+private:
+ Section &getTLSInfoSection(LinkGraph &G) {
+ if (!TLSInfoTable)
+ TLSInfoTable =
+ &G.createSection(ELFTLSInfoSectionName, orc::MemProt::Read);
+ return *TLSInfoTable;
+ }
+
+ ArrayRef<char> getTLSInfoEntryContent() const {
+ return {reinterpret_cast<const char *>(TLSInfoEntryContent),
+ sizeof(TLSInfoEntryContent)};
+ }
+
+ Section *TLSInfoTable = nullptr;
+};
+
+const uint8_t TLSInfoTableManager_ELF_x86_64::TLSInfoEntryContent[16] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /*pthread key */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 /*data address*/
+};
+
+Error buildTables_ELF_x86_64(LinkGraph &G) {
+ LLVM_DEBUG(dbgs() << "Visiting edges in graph:\n");
+
+ x86_64::GOTTableManager GOT;
+ x86_64::PLTTableManager PLT(GOT);
+ TLSInfoTableManager_ELF_x86_64 TLSInfo;
+ visitExistingEdges(G, GOT, PLT, TLSInfo);
+ return Error::success();
+}
+} // namespace
+
+namespace llvm {
+namespace jitlink {
+
+class ELFLinkGraphBuilder_x86_64 : public ELFLinkGraphBuilder<object::ELF64LE> {
+private:
+ using ELFT = object::ELF64LE;
+
+ Error addRelocations() override {
+ LLVM_DEBUG(dbgs() << "Processing relocations:\n");
+
+ using Base = ELFLinkGraphBuilder<ELFT>;
+ using Self = ELFLinkGraphBuilder_x86_64;
+ for (const auto &RelSect : Base::Sections) {
+ // Validate the section to read relocation entries from.
+ if (RelSect.sh_type == ELF::SHT_REL)
+ return make_error<StringError>(
+ "No SHT_REL in valid x64 ELF object files",
+ inconvertibleErrorCode());
+
+ if (Error Err = Base::forEachRelaRelocation(RelSect, this,
+ &Self::addSingleRelocation))
+ return Err;
+ }
+
+ return Error::success();
+ }
+
+ Error addSingleRelocation(const typename ELFT::Rela &Rel,
+ const typename ELFT::Shdr &FixupSection,
+ Block &BlockToFix) {
+ using Base = ELFLinkGraphBuilder<ELFT>;
+
+ auto ELFReloc = Rel.getType(false);
+
+ // R_X86_64_NONE is a no-op.
+ if (LLVM_UNLIKELY(ELFReloc == ELF::R_X86_64_NONE))
+ return Error::success();
+
+ uint32_t SymbolIndex = Rel.getSymbol(false);
+ auto ObjSymbol = Base::Obj.getRelocationSymbol(Rel, Base::SymTabSec);
+ if (!ObjSymbol)
+ return ObjSymbol.takeError();
+
+ Symbol *GraphSymbol = Base::getGraphSymbol(SymbolIndex);
+ if (!GraphSymbol)
+ return make_error<StringError>(
+ formatv("Could not find symbol at given index, did you add it to "
+ "JITSymbolTable? index: {0}, shndx: {1} Size of table: {2}",
+ SymbolIndex, (*ObjSymbol)->st_shndx,
+ Base::GraphSymbols.size()),
+ inconvertibleErrorCode());
+
+ // Validate the relocation kind.
+ int64_t Addend = Rel.r_addend;
+ Edge::Kind Kind = Edge::Invalid;
+
+ switch (ELFReloc) {
+ case ELF::R_X86_64_PC8:
+ Kind = x86_64::Delta8;
+ break;
+ case ELF::R_X86_64_PC32:
+ case ELF::R_X86_64_GOTPC32:
+ Kind = x86_64::Delta32;
+ break;
+ case ELF::R_X86_64_PC64:
+ case ELF::R_X86_64_GOTPC64:
+ Kind = x86_64::Delta64;
+ break;
+ case ELF::R_X86_64_32:
+ Kind = x86_64::Pointer32;
+ break;
+ case ELF::R_X86_64_16:
+ Kind = x86_64::Pointer16;
+ break;
+ case ELF::R_X86_64_8:
+ Kind = x86_64::Pointer8;
+ break;
+ case ELF::R_X86_64_32S:
+ Kind = x86_64::Pointer32Signed;
+ break;
+ case ELF::R_X86_64_64:
+ Kind = x86_64::Pointer64;
+ break;
+ case ELF::R_X86_64_GOTPCREL:
+ Kind = x86_64::RequestGOTAndTransformToDelta32;
+ break;
+ case ELF::R_X86_64_REX_GOTPCRELX:
+ Kind = x86_64::RequestGOTAndTransformToPCRel32GOTLoadREXRelaxable;
+ Addend = 0;
+ break;
+ case ELF::R_X86_64_TLSGD:
+ Kind = x86_64::RequestTLSDescInGOTAndTransformToDelta32;
+ break;
+ case ELF::R_X86_64_GOTPCRELX:
+ Kind = x86_64::RequestGOTAndTransformToPCRel32GOTLoadRelaxable;
+ Addend = 0;
+ break;
+ case ELF::R_X86_64_GOTPCREL64:
+ Kind = x86_64::RequestGOTAndTransformToDelta64;
+ break;
+ case ELF::R_X86_64_GOT64:
+ Kind = x86_64::RequestGOTAndTransformToDelta64FromGOT;
+ break;
+ case ELF::R_X86_64_GOTOFF64:
+ Kind = x86_64::Delta64FromGOT;
+ break;
+ case ELF::R_X86_64_PLT32:
+ Kind = x86_64::BranchPCRel32;
+ // BranchPCRel32 implicitly handles the '-4' PC adjustment, so we have to
+ // adjust the addend by '+4' to compensate.
+ Addend += 4;
+ break;
+ default:
+ return make_error<JITLinkError>(
+ "In " + G->getName() + ": Unsupported x86-64 relocation type " +
+ object::getELFRelocationTypeName(ELF::EM_X86_64, ELFReloc));
+ }
+
+ auto FixupAddress = orc::ExecutorAddr(FixupSection.sh_addr) + Rel.r_offset;
+ Edge::OffsetT Offset = FixupAddress - BlockToFix.getAddress();
+ Edge GE(Kind, Offset, *GraphSymbol, Addend);
+ LLVM_DEBUG({
+ dbgs() << " ";
+ printEdge(dbgs(), BlockToFix, GE, x86_64::getEdgeKindName(Kind));
+ dbgs() << "\n";
+ });
+
+ BlockToFix.addEdge(std::move(GE));
+ return Error::success();
+ }
+
+public:
+ ELFLinkGraphBuilder_x86_64(StringRef FileName,
+ const object::ELFFile<object::ELF64LE> &Obj,
+ SubtargetFeatures Features)
+ : ELFLinkGraphBuilder(Obj, Triple("x86_64-unknown-linux"),
+ std::move(Features), FileName,
+ x86_64::getEdgeKindName) {}
+};
+
+class ELFJITLinker_x86_64 : public JITLinker<ELFJITLinker_x86_64> {
+ friend class JITLinker<ELFJITLinker_x86_64>;
+
+public:
+ ELFJITLinker_x86_64(std::unique_ptr<JITLinkContext> Ctx,
+ std::unique_ptr<LinkGraph> G,
+ PassConfiguration PassConfig)
+ : JITLinker(std::move(Ctx), std::move(G), std::move(PassConfig)) {
+
+ if (shouldAddDefaultTargetPasses(getGraph().getTargetTriple()))
+ getPassConfig().PostAllocationPasses.push_back(
+ [this](LinkGraph &G) { return getOrCreateGOTSymbol(G); });
+ }
+
+private:
+ Symbol *GOTSymbol = nullptr;
+
+ Error getOrCreateGOTSymbol(LinkGraph &G) {
+ auto DefineExternalGOTSymbolIfPresent =
+ createDefineExternalSectionStartAndEndSymbolsPass(
+ [&](LinkGraph &LG, Symbol &Sym) -> SectionRangeSymbolDesc {
+ if (Sym.getName() == ELFGOTSymbolName)
+ if (auto *GOTSection = G.findSectionByName(
+ x86_64::GOTTableManager::getSectionName())) {
+ GOTSymbol = &Sym;
+ return {*GOTSection, true};
+ }
+ return {};
+ });
+
+ // Try to attach _GLOBAL_OFFSET_TABLE_ to the GOT if it's defined as an
+ // external.
+ if (auto Err = DefineExternalGOTSymbolIfPresent(G))
+ return Err;
+
+ // If we succeeded then we're done.
+ if (GOTSymbol)
+ return Error::success();
+
+ // Otherwise look for a GOT section: If it already has a start symbol we'll
+ // record it, otherwise we'll create our own.
+ // If there's a GOT section but we didn't find an external GOT symbol...
+ if (auto *GOTSection =
+ G.findSectionByName(x86_64::GOTTableManager::getSectionName())) {
+
+ // Check for an existing defined symbol.
+ for (auto *Sym : GOTSection->symbols())
+ if (Sym->getName() == ELFGOTSymbolName) {
+ GOTSymbol = Sym;
+ return Error::success();
+ }
+
+ // If there's no defined symbol then create one.
+ SectionRange SR(*GOTSection);
+ if (SR.empty())
+ GOTSymbol =
+ &G.addAbsoluteSymbol(ELFGOTSymbolName, orc::ExecutorAddr(), 0,
+ Linkage::Strong, Scope::Local, true);
+ else
+ GOTSymbol =
+ &G.addDefinedSymbol(*SR.getFirstBlock(), 0, ELFGOTSymbolName, 0,
+ Linkage::Strong, Scope::Local, false, true);
+ }
+
+ // If we still haven't found a GOT symbol then double check the externals.
+ // We may have a GOT-relative reference but no GOT section, in which case
+ // we just need to point the GOT symbol at some address in this graph.
+ if (!GOTSymbol) {
+ for (auto *Sym : G.external_symbols()) {
+ if (Sym->getName() == ELFGOTSymbolName) {
+ auto Blocks = G.blocks();
+ if (!Blocks.empty()) {
+ G.makeAbsolute(*Sym, (*Blocks.begin())->getAddress());
+ GOTSymbol = Sym;
+ break;
+ }
+ }
+ }
+ }
+
+ return Error::success();
+ }
+
+ Error applyFixup(LinkGraph &G, Block &B, const Edge &E) const {
+ return x86_64::applyFixup(G, B, E, GOTSymbol);
+ }
+};
+
+Expected<std::unique_ptr<LinkGraph>>
+createLinkGraphFromELFObject_x86_64(MemoryBufferRef ObjectBuffer) {
+ LLVM_DEBUG({
+ dbgs() << "Building jitlink graph for new input "
+ << ObjectBuffer.getBufferIdentifier() << "...\n";
+ });
+
+ auto ELFObj = object::ObjectFile::createELFObjectFile(ObjectBuffer);
+ if (!ELFObj)
+ return ELFObj.takeError();
+
+ auto Features = (*ELFObj)->getFeatures();
+ if (!Features)
+ return Features.takeError();
+
+ auto &ELFObjFile = cast<object::ELFObjectFile<object::ELF64LE>>(**ELFObj);
+ return ELFLinkGraphBuilder_x86_64((*ELFObj)->getFileName(),
+ ELFObjFile.getELFFile(),
+ std::move(*Features))
+ .buildGraph();
+}
+
+void link_ELF_x86_64(std::unique_ptr<LinkGraph> G,
+ std::unique_ptr<JITLinkContext> Ctx) {
+ PassConfiguration Config;
+
+ if (Ctx->shouldAddDefaultTargetPasses(G->getTargetTriple())) {
+
+ Config.PrePrunePasses.push_back(DWARFRecordSectionSplitter(".eh_frame"));
+ Config.PrePrunePasses.push_back(EHFrameEdgeFixer(
+ ".eh_frame", x86_64::PointerSize, x86_64::Pointer32, x86_64::Pointer64,
+ x86_64::Delta32, x86_64::Delta64, x86_64::NegDelta32));
+ Config.PrePrunePasses.push_back(EHFrameNullTerminator(".eh_frame"));
+
+ // Construct a JITLinker and run the link function.
+ // Add a mark-live pass.
+ if (auto MarkLive = Ctx->getMarkLivePass(G->getTargetTriple()))
+ Config.PrePrunePasses.push_back(std::move(MarkLive));
+ else
+ Config.PrePrunePasses.push_back(markAllSymbolsLive);
+
+ // Add an in-place GOT/Stubs/TLSInfoEntry build pass.
+ Config.PostPrunePasses.push_back(buildTables_ELF_x86_64);
+
+ // Resolve any external section start / end symbols.
+ Config.PostAllocationPasses.push_back(
+ createDefineExternalSectionStartAndEndSymbolsPass(
+ identifyELFSectionStartAndEndSymbols));
+
+ // Add GOT/Stubs optimizer pass.
+ Config.PreFixupPasses.push_back(x86_64::optimizeGOTAndStubAccesses);
+ }
+
+ if (auto Err = Ctx->modifyPassConfig(*G, Config))
+ return Ctx->notifyFailed(std::move(Err));
+
+ ELFJITLinker_x86_64::link(std::move(Ctx), std::move(G), std::move(Config));
+}
+} // end namespace jitlink
+} // end namespace llvm
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/JITLink.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/JITLink.cpp
new file mode 100644
index 000000000000..b103a9ca98e1
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/JITLink.cpp
@@ -0,0 +1,521 @@
+//===------------- JITLink.cpp - Core Run-time JIT linker APIs ------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/JITLink/JITLink.h"
+
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/BinaryFormat/Magic.h"
+#include "llvm/ExecutionEngine/JITLink/COFF.h"
+#include "llvm/ExecutionEngine/JITLink/ELF.h"
+#include "llvm/ExecutionEngine/JITLink/MachO.h"
+#include "llvm/ExecutionEngine/JITLink/aarch64.h"
+#include "llvm/ExecutionEngine/JITLink/i386.h"
+#include "llvm/ExecutionEngine/JITLink/loongarch.h"
+#include "llvm/ExecutionEngine/JITLink/x86_64.h"
+#include "llvm/Support/Format.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace llvm;
+using namespace llvm::object;
+
+#define DEBUG_TYPE "jitlink"
+
+namespace {
+
+enum JITLinkErrorCode { GenericJITLinkError = 1 };
+
+// FIXME: This class is only here to support the transition to llvm::Error. It
+// will be removed once this transition is complete. Clients should prefer to
+// deal with the Error value directly, rather than converting to error_code.
+class JITLinkerErrorCategory : public std::error_category {
+public:
+ const char *name() const noexcept override { return "runtimedyld"; }
+
+ std::string message(int Condition) const override {
+ switch (static_cast<JITLinkErrorCode>(Condition)) {
+ case GenericJITLinkError:
+ return "Generic JITLink error";
+ }
+ llvm_unreachable("Unrecognized JITLinkErrorCode");
+ }
+};
+
+} // namespace
+
+namespace llvm {
+namespace jitlink {
+
+char JITLinkError::ID = 0;
+
+void JITLinkError::log(raw_ostream &OS) const { OS << ErrMsg; }
+
+std::error_code JITLinkError::convertToErrorCode() const {
+ static JITLinkerErrorCategory TheJITLinkerErrorCategory;
+ return std::error_code(GenericJITLinkError, TheJITLinkerErrorCategory);
+}
+
+const char *getGenericEdgeKindName(Edge::Kind K) {
+ switch (K) {
+ case Edge::Invalid:
+ return "INVALID RELOCATION";
+ case Edge::KeepAlive:
+ return "Keep-Alive";
+ default:
+ return "<Unrecognized edge kind>";
+ }
+}
+
+const char *getLinkageName(Linkage L) {
+ switch (L) {
+ case Linkage::Strong:
+ return "strong";
+ case Linkage::Weak:
+ return "weak";
+ }
+ llvm_unreachable("Unrecognized llvm.jitlink.Linkage enum");
+}
+
+const char *getScopeName(Scope S) {
+ switch (S) {
+ case Scope::Default:
+ return "default";
+ case Scope::Hidden:
+ return "hidden";
+ case Scope::Local:
+ return "local";
+ }
+ llvm_unreachable("Unrecognized llvm.jitlink.Scope enum");
+}
+
+bool isCStringBlock(Block &B) {
+ if (B.getSize() == 0) // Empty blocks are not valid C-strings.
+ return false;
+
+ // Zero-fill blocks of size one are valid empty strings.
+ if (B.isZeroFill())
+ return B.getSize() == 1;
+
+ for (size_t I = 0; I != B.getSize() - 1; ++I)
+ if (B.getContent()[I] == '\0')
+ return false;
+
+ return B.getContent()[B.getSize() - 1] == '\0';
+}
+
+raw_ostream &operator<<(raw_ostream &OS, const Block &B) {
+ return OS << B.getAddress() << " -- " << (B.getAddress() + B.getSize())
+ << ": "
+ << "size = " << formatv("{0:x8}", B.getSize()) << ", "
+ << (B.isZeroFill() ? "zero-fill" : "content")
+ << ", align = " << B.getAlignment()
+ << ", align-ofs = " << B.getAlignmentOffset()
+ << ", section = " << B.getSection().getName();
+}
+
+raw_ostream &operator<<(raw_ostream &OS, const Symbol &Sym) {
+ OS << Sym.getAddress() << " (" << (Sym.isDefined() ? "block" : "addressable")
+ << " + " << formatv("{0:x8}", Sym.getOffset())
+ << "): size: " << formatv("{0:x8}", Sym.getSize())
+ << ", linkage: " << formatv("{0:6}", getLinkageName(Sym.getLinkage()))
+ << ", scope: " << formatv("{0:8}", getScopeName(Sym.getScope())) << ", "
+ << (Sym.isLive() ? "live" : "dead") << " - "
+ << (Sym.hasName() ? Sym.getName() : "<anonymous symbol>");
+ return OS;
+}
+
+void printEdge(raw_ostream &OS, const Block &B, const Edge &E,
+ StringRef EdgeKindName) {
+ OS << "edge@" << B.getAddress() + E.getOffset() << ": " << B.getAddress()
+ << " + " << formatv("{0:x}", E.getOffset()) << " -- " << EdgeKindName
+ << " -> ";
+
+ auto &TargetSym = E.getTarget();
+ if (TargetSym.hasName())
+ OS << TargetSym.getName();
+ else {
+ auto &TargetBlock = TargetSym.getBlock();
+ auto &TargetSec = TargetBlock.getSection();
+ orc::ExecutorAddr SecAddress(~uint64_t(0));
+ for (auto *B : TargetSec.blocks())
+ if (B->getAddress() < SecAddress)
+ SecAddress = B->getAddress();
+
+ orc::ExecutorAddrDiff SecDelta = TargetSym.getAddress() - SecAddress;
+ OS << TargetSym.getAddress() << " (section " << TargetSec.getName();
+ if (SecDelta)
+ OS << " + " << formatv("{0:x}", SecDelta);
+ OS << " / block " << TargetBlock.getAddress();
+ if (TargetSym.getOffset())
+ OS << " + " << formatv("{0:x}", TargetSym.getOffset());
+ OS << ")";
+ }
+
+ if (E.getAddend() != 0)
+ OS << " + " << E.getAddend();
+}
+
+Section::~Section() {
+ for (auto *Sym : Symbols)
+ Sym->~Symbol();
+ for (auto *B : Blocks)
+ B->~Block();
+}
+
+Block &LinkGraph::splitBlock(Block &B, size_t SplitIndex,
+ SplitBlockCache *Cache) {
+
+ assert(SplitIndex > 0 && "splitBlock can not be called with SplitIndex == 0");
+
+ // If the split point covers all of B then just return B.
+ if (SplitIndex == B.getSize())
+ return B;
+
+ assert(SplitIndex < B.getSize() && "SplitIndex out of range");
+
+ // Create the new block covering [ 0, SplitIndex ).
+ auto &NewBlock =
+ B.isZeroFill()
+ ? createZeroFillBlock(B.getSection(), SplitIndex, B.getAddress(),
+ B.getAlignment(), B.getAlignmentOffset())
+ : createContentBlock(
+ B.getSection(), B.getContent().slice(0, SplitIndex),
+ B.getAddress(), B.getAlignment(), B.getAlignmentOffset());
+
+ // Modify B to cover [ SplitIndex, B.size() ).
+ B.setAddress(B.getAddress() + SplitIndex);
+ B.setContent(B.getContent().slice(SplitIndex));
+ B.setAlignmentOffset((B.getAlignmentOffset() + SplitIndex) %
+ B.getAlignment());
+
+ // Handle edge transfer/update.
+ {
+ // Copy edges to NewBlock (recording their iterators so that we can remove
+ // them from B), and update of Edges remaining on B.
+ std::vector<Block::edge_iterator> EdgesToRemove;
+ for (auto I = B.edges().begin(); I != B.edges().end();) {
+ if (I->getOffset() < SplitIndex) {
+ NewBlock.addEdge(*I);
+ I = B.removeEdge(I);
+ } else {
+ I->setOffset(I->getOffset() - SplitIndex);
+ ++I;
+ }
+ }
+ }
+
+ // Handle symbol transfer/update.
+ {
+ // Initialize the symbols cache if necessary.
+ SplitBlockCache LocalBlockSymbolsCache;
+ if (!Cache)
+ Cache = &LocalBlockSymbolsCache;
+ if (*Cache == std::nullopt) {
+ *Cache = SplitBlockCache::value_type();
+ for (auto *Sym : B.getSection().symbols())
+ if (&Sym->getBlock() == &B)
+ (*Cache)->push_back(Sym);
+
+ llvm::sort(**Cache, [](const Symbol *LHS, const Symbol *RHS) {
+ return LHS->getOffset() > RHS->getOffset();
+ });
+ }
+ auto &BlockSymbols = **Cache;
+
+ // Transfer all symbols with offset less than SplitIndex to NewBlock.
+ while (!BlockSymbols.empty() &&
+ BlockSymbols.back()->getOffset() < SplitIndex) {
+ auto *Sym = BlockSymbols.back();
+ // If the symbol extends beyond the split, update the size to be within
+ // the new block.
+ if (Sym->getOffset() + Sym->getSize() > SplitIndex)
+ Sym->setSize(SplitIndex - Sym->getOffset());
+ Sym->setBlock(NewBlock);
+ BlockSymbols.pop_back();
+ }
+
+ // Update offsets for all remaining symbols in B.
+ for (auto *Sym : BlockSymbols)
+ Sym->setOffset(Sym->getOffset() - SplitIndex);
+ }
+
+ return NewBlock;
+}
+
+void LinkGraph::dump(raw_ostream &OS) {
+ DenseMap<Block *, std::vector<Symbol *>> BlockSymbols;
+
+ // Map from blocks to the symbols pointing at them.
+ for (auto *Sym : defined_symbols())
+ BlockSymbols[&Sym->getBlock()].push_back(Sym);
+
+ // For each block, sort its symbols by something approximating
+ // relevance.
+ for (auto &KV : BlockSymbols)
+ llvm::sort(KV.second, [](const Symbol *LHS, const Symbol *RHS) {
+ if (LHS->getOffset() != RHS->getOffset())
+ return LHS->getOffset() < RHS->getOffset();
+ if (LHS->getLinkage() != RHS->getLinkage())
+ return LHS->getLinkage() < RHS->getLinkage();
+ if (LHS->getScope() != RHS->getScope())
+ return LHS->getScope() < RHS->getScope();
+ if (LHS->hasName()) {
+ if (!RHS->hasName())
+ return true;
+ return LHS->getName() < RHS->getName();
+ }
+ return false;
+ });
+
+ for (auto &Sec : sections()) {
+ OS << "section " << Sec.getName() << ":\n\n";
+
+ std::vector<Block *> SortedBlocks;
+ llvm::copy(Sec.blocks(), std::back_inserter(SortedBlocks));
+ llvm::sort(SortedBlocks, [](const Block *LHS, const Block *RHS) {
+ return LHS->getAddress() < RHS->getAddress();
+ });
+
+ for (auto *B : SortedBlocks) {
+ OS << " block " << B->getAddress()
+ << " size = " << formatv("{0:x8}", B->getSize())
+ << ", align = " << B->getAlignment()
+ << ", alignment-offset = " << B->getAlignmentOffset();
+ if (B->isZeroFill())
+ OS << ", zero-fill";
+ OS << "\n";
+
+ auto BlockSymsI = BlockSymbols.find(B);
+ if (BlockSymsI != BlockSymbols.end()) {
+ OS << " symbols:\n";
+ auto &Syms = BlockSymsI->second;
+ for (auto *Sym : Syms)
+ OS << " " << *Sym << "\n";
+ } else
+ OS << " no symbols\n";
+
+ if (!B->edges_empty()) {
+ OS << " edges:\n";
+ std::vector<Edge> SortedEdges;
+ llvm::copy(B->edges(), std::back_inserter(SortedEdges));
+ llvm::sort(SortedEdges, [](const Edge &LHS, const Edge &RHS) {
+ return LHS.getOffset() < RHS.getOffset();
+ });
+ for (auto &E : SortedEdges) {
+ OS << " " << B->getFixupAddress(E) << " (block + "
+ << formatv("{0:x8}", E.getOffset()) << "), addend = ";
+ if (E.getAddend() >= 0)
+ OS << formatv("+{0:x8}", E.getAddend());
+ else
+ OS << formatv("-{0:x8}", -E.getAddend());
+ OS << ", kind = " << getEdgeKindName(E.getKind()) << ", target = ";
+ if (E.getTarget().hasName())
+ OS << E.getTarget().getName();
+ else
+ OS << "addressable@"
+ << formatv("{0:x16}", E.getTarget().getAddress()) << "+"
+ << formatv("{0:x8}", E.getTarget().getOffset());
+ OS << "\n";
+ }
+ } else
+ OS << " no edges\n";
+ OS << "\n";
+ }
+ }
+
+ OS << "Absolute symbols:\n";
+ if (!absolute_symbols().empty()) {
+ for (auto *Sym : absolute_symbols())
+ OS << " " << Sym->getAddress() << ": " << *Sym << "\n";
+ } else
+ OS << " none\n";
+
+ OS << "\nExternal symbols:\n";
+ if (!external_symbols().empty()) {
+ for (auto *Sym : external_symbols())
+ OS << " " << Sym->getAddress() << ": " << *Sym
+ << (Sym->isWeaklyReferenced() ? " (weakly referenced)" : "") << "\n";
+ } else
+ OS << " none\n";
+}
+
+raw_ostream &operator<<(raw_ostream &OS, const SymbolLookupFlags &LF) {
+ switch (LF) {
+ case SymbolLookupFlags::RequiredSymbol:
+ return OS << "RequiredSymbol";
+ case SymbolLookupFlags::WeaklyReferencedSymbol:
+ return OS << "WeaklyReferencedSymbol";
+ }
+ llvm_unreachable("Unrecognized lookup flags");
+}
+
+void JITLinkAsyncLookupContinuation::anchor() {}
+
+JITLinkContext::~JITLinkContext() = default;
+
+bool JITLinkContext::shouldAddDefaultTargetPasses(const Triple &TT) const {
+ return true;
+}
+
+LinkGraphPassFunction JITLinkContext::getMarkLivePass(const Triple &TT) const {
+ return LinkGraphPassFunction();
+}
+
+Error JITLinkContext::modifyPassConfig(LinkGraph &G,
+ PassConfiguration &Config) {
+ return Error::success();
+}
+
+Error markAllSymbolsLive(LinkGraph &G) {
+ for (auto *Sym : G.defined_symbols())
+ Sym->setLive(true);
+ return Error::success();
+}
+
+Error makeTargetOutOfRangeError(const LinkGraph &G, const Block &B,
+ const Edge &E) {
+ std::string ErrMsg;
+ {
+ raw_string_ostream ErrStream(ErrMsg);
+ Section &Sec = B.getSection();
+ ErrStream << "In graph " << G.getName() << ", section " << Sec.getName()
+ << ": relocation target ";
+ if (E.getTarget().hasName()) {
+ ErrStream << "\"" << E.getTarget().getName() << "\"";
+ } else
+ ErrStream << E.getTarget().getBlock().getSection().getName() << " + "
+ << formatv("{0:x}", E.getOffset());
+ ErrStream << " at address " << formatv("{0:x}", E.getTarget().getAddress())
+ << " is out of range of " << G.getEdgeKindName(E.getKind())
+ << " fixup at " << formatv("{0:x}", B.getFixupAddress(E)) << " (";
+
+ Symbol *BestSymbolForBlock = nullptr;
+ for (auto *Sym : Sec.symbols())
+ if (&Sym->getBlock() == &B && Sym->hasName() && Sym->getOffset() == 0 &&
+ (!BestSymbolForBlock ||
+ Sym->getScope() < BestSymbolForBlock->getScope() ||
+ Sym->getLinkage() < BestSymbolForBlock->getLinkage()))
+ BestSymbolForBlock = Sym;
+
+ if (BestSymbolForBlock)
+ ErrStream << BestSymbolForBlock->getName() << ", ";
+ else
+ ErrStream << "<anonymous block> @ ";
+
+ ErrStream << formatv("{0:x}", B.getAddress()) << " + "
+ << formatv("{0:x}", E.getOffset()) << ")";
+ }
+ return make_error<JITLinkError>(std::move(ErrMsg));
+}
+
+Error makeAlignmentError(llvm::orc::ExecutorAddr Loc, uint64_t Value, int N,
+ const Edge &E) {
+ return make_error<JITLinkError>("0x" + llvm::utohexstr(Loc.getValue()) +
+ " improper alignment for relocation " +
+ formatv("{0:d}", E.getKind()) + ": 0x" +
+ llvm::utohexstr(Value) +
+ " is not aligned to " + Twine(N) + " bytes");
+}
+
+AnonymousPointerCreator getAnonymousPointerCreator(const Triple &TT) {
+ switch (TT.getArch()) {
+ case Triple::aarch64:
+ return aarch64::createAnonymousPointer;
+ case Triple::x86_64:
+ return x86_64::createAnonymousPointer;
+ case Triple::x86:
+ return i386::createAnonymousPointer;
+ case Triple::loongarch32:
+ case Triple::loongarch64:
+ return loongarch::createAnonymousPointer;
+ default:
+ return nullptr;
+ }
+}
+
+PointerJumpStubCreator getPointerJumpStubCreator(const Triple &TT) {
+ switch (TT.getArch()) {
+ case Triple::aarch64:
+ return aarch64::createAnonymousPointerJumpStub;
+ case Triple::x86_64:
+ return x86_64::createAnonymousPointerJumpStub;
+ case Triple::x86:
+ return i386::createAnonymousPointerJumpStub;
+ case Triple::loongarch32:
+ case Triple::loongarch64:
+ return loongarch::createAnonymousPointerJumpStub;
+ default:
+ return nullptr;
+ }
+}
+
+Expected<std::unique_ptr<LinkGraph>>
+createLinkGraphFromObject(MemoryBufferRef ObjectBuffer) {
+ auto Magic = identify_magic(ObjectBuffer.getBuffer());
+ switch (Magic) {
+ case file_magic::macho_object:
+ return createLinkGraphFromMachOObject(ObjectBuffer);
+ case file_magic::elf_relocatable:
+ return createLinkGraphFromELFObject(ObjectBuffer);
+ case file_magic::coff_object:
+ return createLinkGraphFromCOFFObject(ObjectBuffer);
+ default:
+ return make_error<JITLinkError>("Unsupported file format");
+ };
+}
+
+std::unique_ptr<LinkGraph> absoluteSymbolsLinkGraph(const Triple &TT,
+ orc::SymbolMap Symbols) {
+ unsigned PointerSize;
+ endianness Endianness =
+ TT.isLittleEndian() ? endianness::little : endianness::big;
+ switch (TT.getArch()) {
+ case Triple::aarch64:
+ case llvm::Triple::riscv64:
+ case Triple::x86_64:
+ PointerSize = 8;
+ break;
+ case llvm::Triple::arm:
+ case llvm::Triple::riscv32:
+ case llvm::Triple::x86:
+ PointerSize = 4;
+ break;
+ default:
+ llvm::report_fatal_error("unhandled target architecture");
+ }
+
+ static std::atomic<uint64_t> Counter = {0};
+ auto Index = Counter.fetch_add(1, std::memory_order_relaxed);
+ auto G = std::make_unique<LinkGraph>(
+ "<Absolute Symbols " + std::to_string(Index) + ">", TT, PointerSize,
+ Endianness, /*GetEdgeKindName=*/nullptr);
+ for (auto &[Name, Def] : Symbols) {
+ auto &Sym =
+ G->addAbsoluteSymbol(*Name, Def.getAddress(), /*Size=*/0,
+ Linkage::Strong, Scope::Default, /*IsLive=*/true);
+ Sym.setCallable(Def.getFlags().isCallable());
+ }
+
+ return G;
+}
+
+void link(std::unique_ptr<LinkGraph> G, std::unique_ptr<JITLinkContext> Ctx) {
+ switch (G->getTargetTriple().getObjectFormat()) {
+ case Triple::MachO:
+ return link_MachO(std::move(G), std::move(Ctx));
+ case Triple::ELF:
+ return link_ELF(std::move(G), std::move(Ctx));
+ case Triple::COFF:
+ return link_COFF(std::move(G), std::move(Ctx));
+ default:
+ Ctx->notifyFailed(make_error<JITLinkError>("Unsupported object format"));
+ };
+}
+
+} // end namespace jitlink
+} // end namespace llvm
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/JITLinkGeneric.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/JITLinkGeneric.cpp
new file mode 100644
index 000000000000..01144763ac4c
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/JITLinkGeneric.cpp
@@ -0,0 +1,354 @@
+//===--------- JITLinkGeneric.cpp - Generic JIT linker utilities ----------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Generic JITLinker utility class.
+//
+//===----------------------------------------------------------------------===//
+
+#include "JITLinkGeneric.h"
+
+#include "llvm/Support/BinaryStreamReader.h"
+#include "llvm/Support/MemoryBuffer.h"
+
+#define DEBUG_TYPE "jitlink"
+
+namespace llvm {
+namespace jitlink {
+
+JITLinkerBase::~JITLinkerBase() = default;
+
+void JITLinkerBase::linkPhase1(std::unique_ptr<JITLinkerBase> Self) {
+
+ LLVM_DEBUG({
+ dbgs() << "Starting link phase 1 for graph " << G->getName() << "\n";
+ });
+
+ // Prune and optimize the graph.
+ if (auto Err = runPasses(Passes.PrePrunePasses))
+ return Ctx->notifyFailed(std::move(Err));
+
+ LLVM_DEBUG({
+ dbgs() << "Link graph \"" << G->getName() << "\" pre-pruning:\n";
+ G->dump(dbgs());
+ });
+
+ prune(*G);
+
+ LLVM_DEBUG({
+ dbgs() << "Link graph \"" << G->getName() << "\" post-pruning:\n";
+ G->dump(dbgs());
+ });
+
+ // Run post-pruning passes.
+ if (auto Err = runPasses(Passes.PostPrunePasses))
+ return Ctx->notifyFailed(std::move(Err));
+
+ // Skip straight to phase 2 if the graph is empty with no associated actions.
+ if (G->allocActions().empty() && llvm::all_of(G->sections(), [](Section &S) {
+ return S.getMemLifetime() == orc::MemLifetime::NoAlloc;
+ })) {
+ linkPhase2(std::move(Self), nullptr);
+ return;
+ }
+
+ Ctx->getMemoryManager().allocate(
+ Ctx->getJITLinkDylib(), *G,
+ [S = std::move(Self)](AllocResult AR) mutable {
+ // FIXME: Once MSVC implements c++17 order of evaluation rules for calls
+ // this can be simplified to
+ // S->linkPhase2(std::move(S), std::move(AR));
+ auto *TmpSelf = S.get();
+ TmpSelf->linkPhase2(std::move(S), std::move(AR));
+ });
+}
+
+void JITLinkerBase::linkPhase2(std::unique_ptr<JITLinkerBase> Self,
+ AllocResult AR) {
+
+ if (AR)
+ Alloc = std::move(*AR);
+ else
+ return Ctx->notifyFailed(AR.takeError());
+
+ LLVM_DEBUG({
+ dbgs() << "Link graph \"" << G->getName()
+ << "\" before post-allocation passes:\n";
+ G->dump(dbgs());
+ });
+
+ // Run post-allocation passes.
+ if (auto Err = runPasses(Passes.PostAllocationPasses))
+ return abandonAllocAndBailOut(std::move(Self), std::move(Err));
+
+ // Notify client that the defined symbols have been assigned addresses.
+ LLVM_DEBUG(dbgs() << "Resolving symbols defined in " << G->getName() << "\n");
+
+ if (auto Err = Ctx->notifyResolved(*G))
+ return abandonAllocAndBailOut(std::move(Self), std::move(Err));
+
+ auto ExternalSymbols = getExternalSymbolNames();
+
+ // If there are no external symbols then proceed immediately with phase 3.
+ if (ExternalSymbols.empty()) {
+ LLVM_DEBUG({
+ dbgs() << "No external symbols for " << G->getName()
+ << ". Proceeding immediately with link phase 3.\n";
+ });
+ // FIXME: Once MSVC implements c++17 order of evaluation rules for calls
+ // this can be simplified. See below.
+ auto &TmpSelf = *Self;
+ TmpSelf.linkPhase3(std::move(Self), AsyncLookupResult());
+ return;
+ }
+
+ // Otherwise look up the externals.
+ LLVM_DEBUG({
+ dbgs() << "Issuing lookup for external symbols for " << G->getName()
+ << " (may trigger materialization/linking of other graphs)...\n";
+ });
+
+ // We're about to hand off ownership of ourself to the continuation. Grab a
+ // pointer to the context so that we can call it to initiate the lookup.
+ //
+ // FIXME: Once MSVC implements c++17 order of evaluation rules for calls this
+ // can be simplified to:
+ //
+ // Ctx->lookup(std::move(UnresolvedExternals),
+ // [Self=std::move(Self)](Expected<AsyncLookupResult> Result) {
+ // Self->linkPhase3(std::move(Self), std::move(Result));
+ // });
+ Ctx->lookup(std::move(ExternalSymbols),
+ createLookupContinuation(
+ [S = std::move(Self)](
+ Expected<AsyncLookupResult> LookupResult) mutable {
+ auto &TmpSelf = *S;
+ TmpSelf.linkPhase3(std::move(S), std::move(LookupResult));
+ }));
+}
+
+void JITLinkerBase::linkPhase3(std::unique_ptr<JITLinkerBase> Self,
+ Expected<AsyncLookupResult> LR) {
+
+ LLVM_DEBUG({
+ dbgs() << "Starting link phase 3 for graph " << G->getName() << "\n";
+ });
+
+ // If the lookup failed, bail out.
+ if (!LR)
+ return abandonAllocAndBailOut(std::move(Self), LR.takeError());
+
+ // Assign addresses to external addressables.
+ applyLookupResult(*LR);
+
+ LLVM_DEBUG({
+ dbgs() << "Link graph \"" << G->getName()
+ << "\" before pre-fixup passes:\n";
+ G->dump(dbgs());
+ });
+
+ if (auto Err = runPasses(Passes.PreFixupPasses))
+ return abandonAllocAndBailOut(std::move(Self), std::move(Err));
+
+ LLVM_DEBUG({
+ dbgs() << "Link graph \"" << G->getName() << "\" before copy-and-fixup:\n";
+ G->dump(dbgs());
+ });
+
+ // Fix up block content.
+ if (auto Err = fixUpBlocks(*G))
+ return abandonAllocAndBailOut(std::move(Self), std::move(Err));
+
+ LLVM_DEBUG({
+ dbgs() << "Link graph \"" << G->getName() << "\" after copy-and-fixup:\n";
+ G->dump(dbgs());
+ });
+
+ if (auto Err = runPasses(Passes.PostFixupPasses))
+ return abandonAllocAndBailOut(std::move(Self), std::move(Err));
+
+ // Skip straight to phase 4 if the graph has no allocation.
+ if (!Alloc) {
+ linkPhase4(std::move(Self), JITLinkMemoryManager::FinalizedAlloc{});
+ return;
+ }
+
+ Alloc->finalize([S = std::move(Self)](FinalizeResult FR) mutable {
+ // FIXME: Once MSVC implements c++17 order of evaluation rules for calls
+ // this can be simplified to
+ // S->linkPhase2(std::move(S), std::move(AR));
+ auto *TmpSelf = S.get();
+ TmpSelf->linkPhase4(std::move(S), std::move(FR));
+ });
+}
+
+void JITLinkerBase::linkPhase4(std::unique_ptr<JITLinkerBase> Self,
+ FinalizeResult FR) {
+
+ LLVM_DEBUG({
+ dbgs() << "Starting link phase 4 for graph " << G->getName() << "\n";
+ });
+
+ if (!FR)
+ return Ctx->notifyFailed(FR.takeError());
+
+ Ctx->notifyFinalized(std::move(*FR));
+
+ LLVM_DEBUG({ dbgs() << "Link of graph " << G->getName() << " complete\n"; });
+}
+
+Error JITLinkerBase::runPasses(LinkGraphPassList &Passes) {
+ for (auto &P : Passes)
+ if (auto Err = P(*G))
+ return Err;
+ return Error::success();
+}
+
+JITLinkContext::LookupMap JITLinkerBase::getExternalSymbolNames() const {
+ // Identify unresolved external symbols.
+ JITLinkContext::LookupMap UnresolvedExternals;
+ for (auto *Sym : G->external_symbols()) {
+ assert(!Sym->getAddress() &&
+ "External has already been assigned an address");
+ assert(Sym->getName() != StringRef() && Sym->getName() != "" &&
+ "Externals must be named");
+ SymbolLookupFlags LookupFlags =
+ Sym->isWeaklyReferenced() ? SymbolLookupFlags::WeaklyReferencedSymbol
+ : SymbolLookupFlags::RequiredSymbol;
+ UnresolvedExternals[Sym->getName()] = LookupFlags;
+ }
+ return UnresolvedExternals;
+}
+
+void JITLinkerBase::applyLookupResult(AsyncLookupResult Result) {
+ for (auto *Sym : G->external_symbols()) {
+ assert(Sym->getOffset() == 0 &&
+ "External symbol is not at the start of its addressable block");
+ assert(!Sym->getAddress() && "Symbol already resolved");
+ assert(!Sym->isDefined() && "Symbol being resolved is already defined");
+ auto ResultI = Result.find(Sym->getName());
+ if (ResultI != Result.end()) {
+ Sym->getAddressable().setAddress(ResultI->second.getAddress());
+ Sym->setLinkage(ResultI->second.getFlags().isWeak() ? Linkage::Weak
+ : Linkage::Strong);
+ Sym->setScope(ResultI->second.getFlags().isExported() ? Scope::Default
+ : Scope::Hidden);
+ } else
+ assert(Sym->isWeaklyReferenced() &&
+ "Failed to resolve non-weak reference");
+ }
+
+ LLVM_DEBUG({
+ dbgs() << "Externals after applying lookup result:\n";
+ for (auto *Sym : G->external_symbols()) {
+ dbgs() << " " << Sym->getName() << ": "
+ << formatv("{0:x16}", Sym->getAddress().getValue());
+ switch (Sym->getLinkage()) {
+ case Linkage::Strong:
+ break;
+ case Linkage::Weak:
+ dbgs() << " (weak)";
+ break;
+ }
+ switch (Sym->getScope()) {
+ case Scope::Local:
+ llvm_unreachable("External symbol should not have local linkage");
+ case Scope::Hidden:
+ break;
+ case Scope::Default:
+ dbgs() << " (exported)";
+ break;
+ }
+ dbgs() << "\n";
+ }
+ });
+}
+
+void JITLinkerBase::abandonAllocAndBailOut(std::unique_ptr<JITLinkerBase> Self,
+ Error Err) {
+ assert(Err && "Should not be bailing out on success value");
+ assert(Alloc && "can not call abandonAllocAndBailOut before allocation");
+ Alloc->abandon([S = std::move(Self), E1 = std::move(Err)](Error E2) mutable {
+ S->Ctx->notifyFailed(joinErrors(std::move(E1), std::move(E2)));
+ });
+}
+
+void prune(LinkGraph &G) {
+ std::vector<Symbol *> Worklist;
+ DenseSet<Block *> VisitedBlocks;
+
+ // Build the initial worklist from all symbols initially live.
+ for (auto *Sym : G.defined_symbols())
+ if (Sym->isLive())
+ Worklist.push_back(Sym);
+
+ // Propagate live flags to all symbols reachable from the initial live set.
+ while (!Worklist.empty()) {
+ auto *Sym = Worklist.back();
+ Worklist.pop_back();
+
+ auto &B = Sym->getBlock();
+
+ // Skip addressables that we've visited before.
+ if (VisitedBlocks.count(&B))
+ continue;
+
+ VisitedBlocks.insert(&B);
+
+ for (auto &E : Sym->getBlock().edges()) {
+ // If the edge target is a defined symbol that is being newly marked live
+ // then add it to the worklist.
+ if (E.getTarget().isDefined() && !E.getTarget().isLive())
+ Worklist.push_back(&E.getTarget());
+
+ // Mark the target live.
+ E.getTarget().setLive(true);
+ }
+ }
+
+ // Collect all defined symbols to remove, then remove them.
+ {
+ LLVM_DEBUG(dbgs() << "Dead-stripping defined symbols:\n");
+ std::vector<Symbol *> SymbolsToRemove;
+ for (auto *Sym : G.defined_symbols())
+ if (!Sym->isLive())
+ SymbolsToRemove.push_back(Sym);
+ for (auto *Sym : SymbolsToRemove) {
+ LLVM_DEBUG(dbgs() << " " << *Sym << "...\n");
+ G.removeDefinedSymbol(*Sym);
+ }
+ }
+
+ // Delete any unused blocks.
+ {
+ LLVM_DEBUG(dbgs() << "Dead-stripping blocks:\n");
+ std::vector<Block *> BlocksToRemove;
+ for (auto *B : G.blocks())
+ if (!VisitedBlocks.count(B))
+ BlocksToRemove.push_back(B);
+ for (auto *B : BlocksToRemove) {
+ LLVM_DEBUG(dbgs() << " " << *B << "...\n");
+ G.removeBlock(*B);
+ }
+ }
+
+ // Collect all external symbols to remove, then remove them.
+ {
+ LLVM_DEBUG(dbgs() << "Removing unused external symbols:\n");
+ std::vector<Symbol *> SymbolsToRemove;
+ for (auto *Sym : G.external_symbols())
+ if (!Sym->isLive())
+ SymbolsToRemove.push_back(Sym);
+ for (auto *Sym : SymbolsToRemove) {
+ LLVM_DEBUG(dbgs() << " " << *Sym << "...\n");
+ G.removeExternalSymbol(*Sym);
+ }
+ }
+}
+
+} // end namespace jitlink
+} // end namespace llvm
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/JITLinkGeneric.h b/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/JITLinkGeneric.h
new file mode 100644
index 000000000000..e5d05e6b1b7b
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/JITLinkGeneric.h
@@ -0,0 +1,193 @@
+//===------ JITLinkGeneric.h - Generic JIT linker utilities -----*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Generic JITLinker utilities. E.g. graph pruning, eh-frame parsing.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LIB_EXECUTIONENGINE_JITLINK_JITLINKGENERIC_H
+#define LIB_EXECUTIONENGINE_JITLINK_JITLINKGENERIC_H
+
+#include "llvm/ExecutionEngine/JITLink/JITLink.h"
+
+#define DEBUG_TYPE "jitlink"
+
+namespace llvm {
+namespace jitlink {
+
+/// Base class for a JIT linker.
+///
+/// A JITLinkerBase instance links one object file into an ongoing JIT
+/// session. Symbol resolution and finalization operations are pluggable,
+/// and called using continuation passing (passing a continuation for the
+/// remaining linker work) to allow them to be performed asynchronously.
+class JITLinkerBase {
+public:
+ JITLinkerBase(std::unique_ptr<JITLinkContext> Ctx,
+ std::unique_ptr<LinkGraph> G, PassConfiguration Passes)
+ : Ctx(std::move(Ctx)), G(std::move(G)), Passes(std::move(Passes)) {
+ assert(this->Ctx && "Ctx can not be null");
+ assert(this->G && "G can not be null");
+ }
+
+ virtual ~JITLinkerBase();
+
+protected:
+ using InFlightAlloc = JITLinkMemoryManager::InFlightAlloc;
+ using AllocResult = Expected<std::unique_ptr<InFlightAlloc>>;
+ using FinalizeResult = Expected<JITLinkMemoryManager::FinalizedAlloc>;
+
+ // Returns a reference to the graph being linked.
+ LinkGraph &getGraph() { return *G; }
+
+ // Returns true if the context says that the linker should add default
+ // passes. This can be used by JITLinkerBase implementations when deciding
+ // whether they should add default passes.
+ bool shouldAddDefaultTargetPasses(const Triple &TT) {
+ return Ctx->shouldAddDefaultTargetPasses(TT);
+ }
+
+ // Returns the PassConfiguration for this instance. This can be used by
+ // JITLinkerBase implementations to add late passes that reference their
+ // own data structures (e.g. for ELF implementations to locate / construct
+ // a GOT start symbol prior to fixup).
+ PassConfiguration &getPassConfig() { return Passes; }
+
+ // Phase 1:
+ // 1.1: Run pre-prune passes
+ // 1.2: Prune graph
+ // 1.3: Run post-prune passes
+ // 1.4: Allocate memory.
+ void linkPhase1(std::unique_ptr<JITLinkerBase> Self);
+
+ // Phase 2:
+ // 2.2: Run post-allocation passes
+ // 2.3: Notify context of final assigned symbol addresses
+ // 2.4: Identify external symbols and make an async call to resolve
+ void linkPhase2(std::unique_ptr<JITLinkerBase> Self, AllocResult AR);
+
+ // Phase 3:
+ // 3.1: Apply resolution results
+ // 3.2: Run pre-fixup passes
+ // 3.3: Fix up block contents
+ // 3.4: Run post-fixup passes
+ // 3.5: Make an async call to transfer and finalize memory.
+ void linkPhase3(std::unique_ptr<JITLinkerBase> Self,
+ Expected<AsyncLookupResult> LookupResult);
+
+ // Phase 4:
+ // 4.1: Call OnFinalized callback, handing off allocation.
+ void linkPhase4(std::unique_ptr<JITLinkerBase> Self, FinalizeResult FR);
+
+private:
+ // Run all passes in the given pass list, bailing out immediately if any pass
+ // returns an error.
+ Error runPasses(LinkGraphPassList &Passes);
+
+ // Copy block contents and apply relocations.
+ // Implemented in JITLinker.
+ virtual Error fixUpBlocks(LinkGraph &G) const = 0;
+
+ JITLinkContext::LookupMap getExternalSymbolNames() const;
+ void applyLookupResult(AsyncLookupResult LR);
+ void abandonAllocAndBailOut(std::unique_ptr<JITLinkerBase> Self, Error Err);
+
+ std::unique_ptr<JITLinkContext> Ctx;
+ std::unique_ptr<LinkGraph> G;
+ PassConfiguration Passes;
+ std::unique_ptr<InFlightAlloc> Alloc;
+};
+
+template <typename LinkerImpl> class JITLinker : public JITLinkerBase {
+public:
+ using JITLinkerBase::JITLinkerBase;
+
+ /// Link constructs a LinkerImpl instance and calls linkPhase1.
+ /// Link should be called with the constructor arguments for LinkerImpl, which
+ /// will be forwarded to the constructor.
+ template <typename... ArgTs> static void link(ArgTs &&... Args) {
+ auto L = std::make_unique<LinkerImpl>(std::forward<ArgTs>(Args)...);
+
+ // Ownership of the linker is passed into the linker's doLink function to
+ // allow it to be passed on to async continuations.
+ //
+ // FIXME: Remove LTmp once we have c++17.
+ // C++17 sequencing rules guarantee that function name expressions are
+ // sequenced before arguments, so L->linkPhase1(std::move(L), ...) will be
+ // well formed.
+ auto &LTmp = *L;
+ LTmp.linkPhase1(std::move(L));
+ }
+
+private:
+ const LinkerImpl &impl() const {
+ return static_cast<const LinkerImpl &>(*this);
+ }
+
+ Error fixUpBlocks(LinkGraph &G) const override {
+ LLVM_DEBUG(dbgs() << "Fixing up blocks:\n");
+
+ for (auto &Sec : G.sections()) {
+ bool NoAllocSection = Sec.getMemLifetime() == orc::MemLifetime::NoAlloc;
+
+ for (auto *B : Sec.blocks()) {
+ LLVM_DEBUG(dbgs() << " " << *B << ":\n");
+
+ // Copy Block data and apply fixups.
+ LLVM_DEBUG(dbgs() << " Applying fixups.\n");
+ assert((!B->isZeroFill() || all_of(B->edges(),
+ [](const Edge &E) {
+ return E.getKind() ==
+ Edge::KeepAlive;
+ })) &&
+ "Non-KeepAlive edges in zero-fill block?");
+
+ // If this is a no-alloc section then copy the block content into
+ // memory allocated on the Graph's allocator (if it hasn't been
+ // already).
+ if (NoAllocSection)
+ (void)B->getMutableContent(G);
+
+ for (auto &E : B->edges()) {
+
+ // Skip non-relocation edges.
+ if (!E.isRelocation())
+ continue;
+
+ // If B is a block in a Standard or Finalize section then make sure
+ // that no edges point to symbols in NoAlloc sections.
+ assert((NoAllocSection || !E.getTarget().isDefined() ||
+ E.getTarget().getBlock().getSection().getMemLifetime() !=
+ orc::MemLifetime::NoAlloc) &&
+ "Block in allocated section has edge pointing to no-alloc "
+ "section");
+
+ // Dispatch to LinkerImpl for fixup.
+ if (auto Err = impl().applyFixup(G, *B, E))
+ return Err;
+ }
+ }
+ }
+
+ return Error::success();
+ }
+};
+
+/// Removes dead symbols/blocks/addressables.
+///
+/// Finds the set of symbols and addressables reachable from any symbol
+/// initially marked live. All symbols/addressables not marked live at the end
+/// of this process are removed.
+void prune(LinkGraph &G);
+
+} // end namespace jitlink
+} // end namespace llvm
+
+#undef DEBUG_TYPE // "jitlink"
+
+#endif // LIB_EXECUTIONENGINE_JITLINK_JITLINKGENERIC_H
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/JITLinkMemoryManager.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/JITLinkMemoryManager.cpp
new file mode 100644
index 000000000000..dacf0e6c8aa4
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/JITLinkMemoryManager.cpp
@@ -0,0 +1,494 @@
+//===--- JITLinkMemoryManager.cpp - JITLinkMemoryManager implementation ---===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/JITLink/JITLinkMemoryManager.h"
+#include "llvm/ExecutionEngine/JITLink/JITLink.h"
+#include "llvm/Support/FormatVariadic.h"
+#include "llvm/Support/Process.h"
+
+#define DEBUG_TYPE "jitlink"
+
+using namespace llvm;
+
+namespace llvm {
+namespace jitlink {
+
+JITLinkMemoryManager::~JITLinkMemoryManager() = default;
+JITLinkMemoryManager::InFlightAlloc::~InFlightAlloc() = default;
+
+BasicLayout::BasicLayout(LinkGraph &G) : G(G) {
+
+ for (auto &Sec : G.sections()) {
+ // Skip empty sections, and sections with NoAlloc lifetime policies.
+ if (Sec.blocks().empty() ||
+ Sec.getMemLifetime() == orc::MemLifetime::NoAlloc)
+ continue;
+
+ auto &Seg = Segments[{Sec.getMemProt(), Sec.getMemLifetime()}];
+ for (auto *B : Sec.blocks())
+ if (LLVM_LIKELY(!B->isZeroFill()))
+ Seg.ContentBlocks.push_back(B);
+ else
+ Seg.ZeroFillBlocks.push_back(B);
+ }
+
+ // Build Segments map.
+ auto CompareBlocks = [](const Block *LHS, const Block *RHS) {
+ // Sort by section, address and size
+ if (LHS->getSection().getOrdinal() != RHS->getSection().getOrdinal())
+ return LHS->getSection().getOrdinal() < RHS->getSection().getOrdinal();
+ if (LHS->getAddress() != RHS->getAddress())
+ return LHS->getAddress() < RHS->getAddress();
+ return LHS->getSize() < RHS->getSize();
+ };
+
+ LLVM_DEBUG(dbgs() << "Generated BasicLayout for " << G.getName() << ":\n");
+ for (auto &KV : Segments) {
+ auto &Seg = KV.second;
+
+ llvm::sort(Seg.ContentBlocks, CompareBlocks);
+ llvm::sort(Seg.ZeroFillBlocks, CompareBlocks);
+
+ for (auto *B : Seg.ContentBlocks) {
+ Seg.ContentSize = alignToBlock(Seg.ContentSize, *B);
+ Seg.ContentSize += B->getSize();
+ Seg.Alignment = std::max(Seg.Alignment, Align(B->getAlignment()));
+ }
+
+ uint64_t SegEndOffset = Seg.ContentSize;
+ for (auto *B : Seg.ZeroFillBlocks) {
+ SegEndOffset = alignToBlock(SegEndOffset, *B);
+ SegEndOffset += B->getSize();
+ Seg.Alignment = std::max(Seg.Alignment, Align(B->getAlignment()));
+ }
+ Seg.ZeroFillSize = SegEndOffset - Seg.ContentSize;
+
+ LLVM_DEBUG({
+ dbgs() << " Seg " << KV.first
+ << ": content-size=" << formatv("{0:x}", Seg.ContentSize)
+ << ", zero-fill-size=" << formatv("{0:x}", Seg.ZeroFillSize)
+ << ", align=" << formatv("{0:x}", Seg.Alignment.value()) << "\n";
+ });
+ }
+}
+
+Expected<BasicLayout::ContiguousPageBasedLayoutSizes>
+BasicLayout::getContiguousPageBasedLayoutSizes(uint64_t PageSize) {
+ ContiguousPageBasedLayoutSizes SegsSizes;
+
+ for (auto &KV : segments()) {
+ auto &AG = KV.first;
+ auto &Seg = KV.second;
+
+ if (Seg.Alignment > PageSize)
+ return make_error<StringError>("Segment alignment greater than page size",
+ inconvertibleErrorCode());
+
+ uint64_t SegSize = alignTo(Seg.ContentSize + Seg.ZeroFillSize, PageSize);
+ if (AG.getMemLifetime() == orc::MemLifetime::Standard)
+ SegsSizes.StandardSegs += SegSize;
+ else
+ SegsSizes.FinalizeSegs += SegSize;
+ }
+
+ return SegsSizes;
+}
+
+Error BasicLayout::apply() {
+ for (auto &KV : Segments) {
+ auto &Seg = KV.second;
+
+ assert(!(Seg.ContentBlocks.empty() && Seg.ZeroFillBlocks.empty()) &&
+ "Empty section recorded?");
+
+ for (auto *B : Seg.ContentBlocks) {
+ // Align addr and working-mem-offset.
+ Seg.Addr = alignToBlock(Seg.Addr, *B);
+ Seg.NextWorkingMemOffset = alignToBlock(Seg.NextWorkingMemOffset, *B);
+
+ // Update block addr.
+ B->setAddress(Seg.Addr);
+ Seg.Addr += B->getSize();
+
+ // Copy content to working memory, then update content to point at working
+ // memory.
+ memcpy(Seg.WorkingMem + Seg.NextWorkingMemOffset, B->getContent().data(),
+ B->getSize());
+ B->setMutableContent(
+ {Seg.WorkingMem + Seg.NextWorkingMemOffset, B->getSize()});
+ Seg.NextWorkingMemOffset += B->getSize();
+ }
+
+ for (auto *B : Seg.ZeroFillBlocks) {
+ // Align addr.
+ Seg.Addr = alignToBlock(Seg.Addr, *B);
+ // Update block addr.
+ B->setAddress(Seg.Addr);
+ Seg.Addr += B->getSize();
+ }
+
+ Seg.ContentBlocks.clear();
+ Seg.ZeroFillBlocks.clear();
+ }
+
+ return Error::success();
+}
+
+orc::shared::AllocActions &BasicLayout::graphAllocActions() {
+ return G.allocActions();
+}
+
+void SimpleSegmentAlloc::Create(JITLinkMemoryManager &MemMgr,
+ const JITLinkDylib *JD, SegmentMap Segments,
+ OnCreatedFunction OnCreated) {
+
+ static_assert(orc::AllocGroup::NumGroups == 32,
+ "AllocGroup has changed. Section names below must be updated");
+ StringRef AGSectionNames[] = {
+ "__---.standard", "__R--.standard", "__-W-.standard", "__RW-.standard",
+ "__--X.standard", "__R-X.standard", "__-WX.standard", "__RWX.standard",
+ "__---.finalize", "__R--.finalize", "__-W-.finalize", "__RW-.finalize",
+ "__--X.finalize", "__R-X.finalize", "__-WX.finalize", "__RWX.finalize"};
+
+ auto G = std::make_unique<LinkGraph>("", Triple(), 0,
+ llvm::endianness::native, nullptr);
+ orc::AllocGroupSmallMap<Block *> ContentBlocks;
+
+ orc::ExecutorAddr NextAddr(0x100000);
+ for (auto &KV : Segments) {
+ auto &AG = KV.first;
+ auto &Seg = KV.second;
+
+ assert(AG.getMemLifetime() != orc::MemLifetime::NoAlloc &&
+ "NoAlloc segments are not supported by SimpleSegmentAlloc");
+
+ auto AGSectionName =
+ AGSectionNames[static_cast<unsigned>(AG.getMemProt()) |
+ static_cast<bool>(AG.getMemLifetime()) << 3];
+
+ auto &Sec = G->createSection(AGSectionName, AG.getMemProt());
+ Sec.setMemLifetime(AG.getMemLifetime());
+
+ if (Seg.ContentSize != 0) {
+ NextAddr =
+ orc::ExecutorAddr(alignTo(NextAddr.getValue(), Seg.ContentAlign));
+ auto &B =
+ G->createMutableContentBlock(Sec, G->allocateBuffer(Seg.ContentSize),
+ NextAddr, Seg.ContentAlign.value(), 0);
+ ContentBlocks[AG] = &B;
+ NextAddr += Seg.ContentSize;
+ }
+ }
+
+ // GRef declared separately since order-of-argument-eval isn't specified.
+ auto &GRef = *G;
+ MemMgr.allocate(JD, GRef,
+ [G = std::move(G), ContentBlocks = std::move(ContentBlocks),
+ OnCreated = std::move(OnCreated)](
+ JITLinkMemoryManager::AllocResult Alloc) mutable {
+ if (!Alloc)
+ OnCreated(Alloc.takeError());
+ else
+ OnCreated(SimpleSegmentAlloc(std::move(G),
+ std::move(ContentBlocks),
+ std::move(*Alloc)));
+ });
+}
+
+Expected<SimpleSegmentAlloc>
+SimpleSegmentAlloc::Create(JITLinkMemoryManager &MemMgr, const JITLinkDylib *JD,
+ SegmentMap Segments) {
+ std::promise<MSVCPExpected<SimpleSegmentAlloc>> AllocP;
+ auto AllocF = AllocP.get_future();
+ Create(MemMgr, JD, std::move(Segments),
+ [&](Expected<SimpleSegmentAlloc> Result) {
+ AllocP.set_value(std::move(Result));
+ });
+ return AllocF.get();
+}
+
+SimpleSegmentAlloc::SimpleSegmentAlloc(SimpleSegmentAlloc &&) = default;
+SimpleSegmentAlloc &
+SimpleSegmentAlloc::operator=(SimpleSegmentAlloc &&) = default;
+SimpleSegmentAlloc::~SimpleSegmentAlloc() = default;
+
+SimpleSegmentAlloc::SegmentInfo
+SimpleSegmentAlloc::getSegInfo(orc::AllocGroup AG) {
+ auto I = ContentBlocks.find(AG);
+ if (I != ContentBlocks.end()) {
+ auto &B = *I->second;
+ return {B.getAddress(), B.getAlreadyMutableContent()};
+ }
+ return {};
+}
+
+SimpleSegmentAlloc::SimpleSegmentAlloc(
+ std::unique_ptr<LinkGraph> G,
+ orc::AllocGroupSmallMap<Block *> ContentBlocks,
+ std::unique_ptr<JITLinkMemoryManager::InFlightAlloc> Alloc)
+ : G(std::move(G)), ContentBlocks(std::move(ContentBlocks)),
+ Alloc(std::move(Alloc)) {}
+
+class InProcessMemoryManager::IPInFlightAlloc
+ : public JITLinkMemoryManager::InFlightAlloc {
+public:
+ IPInFlightAlloc(InProcessMemoryManager &MemMgr, LinkGraph &G, BasicLayout BL,
+ sys::MemoryBlock StandardSegments,
+ sys::MemoryBlock FinalizationSegments)
+ : MemMgr(MemMgr), G(&G), BL(std::move(BL)),
+ StandardSegments(std::move(StandardSegments)),
+ FinalizationSegments(std::move(FinalizationSegments)) {}
+
+ ~IPInFlightAlloc() {
+ assert(!G && "InFlight alloc neither abandoned nor finalized");
+ }
+
+ void finalize(OnFinalizedFunction OnFinalized) override {
+
+ // Apply memory protections to all segments.
+ if (auto Err = applyProtections()) {
+ OnFinalized(std::move(Err));
+ return;
+ }
+
+ // Run finalization actions.
+ auto DeallocActions = runFinalizeActions(G->allocActions());
+ if (!DeallocActions) {
+ OnFinalized(DeallocActions.takeError());
+ return;
+ }
+
+ // Release the finalize segments slab.
+ if (auto EC = sys::Memory::releaseMappedMemory(FinalizationSegments)) {
+ OnFinalized(errorCodeToError(EC));
+ return;
+ }
+
+#ifndef NDEBUG
+ // Set 'G' to null to flag that we've been successfully finalized.
+ // This allows us to assert at destruction time that a call has been made
+ // to either finalize or abandon.
+ G = nullptr;
+#endif
+
+ // Continue with finalized allocation.
+ OnFinalized(MemMgr.createFinalizedAlloc(std::move(StandardSegments),
+ std::move(*DeallocActions)));
+ }
+
+ void abandon(OnAbandonedFunction OnAbandoned) override {
+ Error Err = Error::success();
+ if (auto EC = sys::Memory::releaseMappedMemory(FinalizationSegments))
+ Err = joinErrors(std::move(Err), errorCodeToError(EC));
+ if (auto EC = sys::Memory::releaseMappedMemory(StandardSegments))
+ Err = joinErrors(std::move(Err), errorCodeToError(EC));
+
+#ifndef NDEBUG
+ // Set 'G' to null to flag that we've been successfully finalized.
+ // This allows us to assert at destruction time that a call has been made
+ // to either finalize or abandon.
+ G = nullptr;
+#endif
+
+ OnAbandoned(std::move(Err));
+ }
+
+private:
+ Error applyProtections() {
+ for (auto &KV : BL.segments()) {
+ const auto &AG = KV.first;
+ auto &Seg = KV.second;
+
+ auto Prot = toSysMemoryProtectionFlags(AG.getMemProt());
+
+ uint64_t SegSize =
+ alignTo(Seg.ContentSize + Seg.ZeroFillSize, MemMgr.PageSize);
+ sys::MemoryBlock MB(Seg.WorkingMem, SegSize);
+ if (auto EC = sys::Memory::protectMappedMemory(MB, Prot))
+ return errorCodeToError(EC);
+ if (Prot & sys::Memory::MF_EXEC)
+ sys::Memory::InvalidateInstructionCache(MB.base(), MB.allocatedSize());
+ }
+ return Error::success();
+ }
+
+ InProcessMemoryManager &MemMgr;
+ LinkGraph *G;
+ BasicLayout BL;
+ sys::MemoryBlock StandardSegments;
+ sys::MemoryBlock FinalizationSegments;
+};
+
+Expected<std::unique_ptr<InProcessMemoryManager>>
+InProcessMemoryManager::Create() {
+ if (auto PageSize = sys::Process::getPageSize())
+ return std::make_unique<InProcessMemoryManager>(*PageSize);
+ else
+ return PageSize.takeError();
+}
+
+void InProcessMemoryManager::allocate(const JITLinkDylib *JD, LinkGraph &G,
+ OnAllocatedFunction OnAllocated) {
+
+ // FIXME: Just check this once on startup.
+ if (!isPowerOf2_64((uint64_t)PageSize)) {
+ OnAllocated(make_error<StringError>("Page size is not a power of 2",
+ inconvertibleErrorCode()));
+ return;
+ }
+
+ BasicLayout BL(G);
+
+ /// Scan the request and calculate the group and total sizes.
+ /// Check that segment size is no larger than a page.
+ auto SegsSizes = BL.getContiguousPageBasedLayoutSizes(PageSize);
+ if (!SegsSizes) {
+ OnAllocated(SegsSizes.takeError());
+ return;
+ }
+
+ /// Check that the total size requested (including zero fill) is not larger
+ /// than a size_t.
+ if (SegsSizes->total() > std::numeric_limits<size_t>::max()) {
+ OnAllocated(make_error<JITLinkError>(
+ "Total requested size " + formatv("{0:x}", SegsSizes->total()) +
+ " for graph " + G.getName() + " exceeds address space"));
+ return;
+ }
+
+ // Allocate one slab for the whole thing (to make sure everything is
+ // in-range), then partition into standard and finalization blocks.
+ //
+ // FIXME: Make two separate allocations in the future to reduce
+ // fragmentation: finalization segments will usually be a single page, and
+ // standard segments are likely to be more than one page. Where multiple
+ // allocations are in-flight at once (likely) the current approach will leave
+ // a lot of single-page holes.
+ sys::MemoryBlock Slab;
+ sys::MemoryBlock StandardSegsMem;
+ sys::MemoryBlock FinalizeSegsMem;
+ {
+ const sys::Memory::ProtectionFlags ReadWrite =
+ static_cast<sys::Memory::ProtectionFlags>(sys::Memory::MF_READ |
+ sys::Memory::MF_WRITE);
+
+ std::error_code EC;
+ Slab = sys::Memory::allocateMappedMemory(SegsSizes->total(), nullptr,
+ ReadWrite, EC);
+
+ if (EC) {
+ OnAllocated(errorCodeToError(EC));
+ return;
+ }
+
+ // Zero-fill the whole slab up-front.
+ memset(Slab.base(), 0, Slab.allocatedSize());
+
+ StandardSegsMem = {Slab.base(),
+ static_cast<size_t>(SegsSizes->StandardSegs)};
+ FinalizeSegsMem = {(void *)((char *)Slab.base() + SegsSizes->StandardSegs),
+ static_cast<size_t>(SegsSizes->FinalizeSegs)};
+ }
+
+ auto NextStandardSegAddr = orc::ExecutorAddr::fromPtr(StandardSegsMem.base());
+ auto NextFinalizeSegAddr = orc::ExecutorAddr::fromPtr(FinalizeSegsMem.base());
+
+ LLVM_DEBUG({
+ dbgs() << "InProcessMemoryManager allocated:\n";
+ if (SegsSizes->StandardSegs)
+ dbgs() << formatv(" [ {0:x16} -- {1:x16} ]", NextStandardSegAddr,
+ NextStandardSegAddr + StandardSegsMem.allocatedSize())
+ << " to stardard segs\n";
+ else
+ dbgs() << " no standard segs\n";
+ if (SegsSizes->FinalizeSegs)
+ dbgs() << formatv(" [ {0:x16} -- {1:x16} ]", NextFinalizeSegAddr,
+ NextFinalizeSegAddr + FinalizeSegsMem.allocatedSize())
+ << " to finalize segs\n";
+ else
+ dbgs() << " no finalize segs\n";
+ });
+
+ // Build ProtMap, assign addresses.
+ for (auto &KV : BL.segments()) {
+ auto &AG = KV.first;
+ auto &Seg = KV.second;
+
+ auto &SegAddr = (AG.getMemLifetime() == orc::MemLifetime::Standard)
+ ? NextStandardSegAddr
+ : NextFinalizeSegAddr;
+
+ Seg.WorkingMem = SegAddr.toPtr<char *>();
+ Seg.Addr = SegAddr;
+
+ SegAddr += alignTo(Seg.ContentSize + Seg.ZeroFillSize, PageSize);
+ }
+
+ if (auto Err = BL.apply()) {
+ OnAllocated(std::move(Err));
+ return;
+ }
+
+ OnAllocated(std::make_unique<IPInFlightAlloc>(*this, G, std::move(BL),
+ std::move(StandardSegsMem),
+ std::move(FinalizeSegsMem)));
+}
+
+void InProcessMemoryManager::deallocate(std::vector<FinalizedAlloc> Allocs,
+ OnDeallocatedFunction OnDeallocated) {
+ std::vector<sys::MemoryBlock> StandardSegmentsList;
+ std::vector<std::vector<orc::shared::WrapperFunctionCall>> DeallocActionsList;
+
+ {
+ std::lock_guard<std::mutex> Lock(FinalizedAllocsMutex);
+ for (auto &Alloc : Allocs) {
+ auto *FA = Alloc.release().toPtr<FinalizedAllocInfo *>();
+ StandardSegmentsList.push_back(std::move(FA->StandardSegments));
+ DeallocActionsList.push_back(std::move(FA->DeallocActions));
+ FA->~FinalizedAllocInfo();
+ FinalizedAllocInfos.Deallocate(FA);
+ }
+ }
+
+ Error DeallocErr = Error::success();
+
+ while (!DeallocActionsList.empty()) {
+ auto &DeallocActions = DeallocActionsList.back();
+ auto &StandardSegments = StandardSegmentsList.back();
+
+ /// Run any deallocate calls.
+ while (!DeallocActions.empty()) {
+ if (auto Err = DeallocActions.back().runWithSPSRetErrorMerged())
+ DeallocErr = joinErrors(std::move(DeallocErr), std::move(Err));
+ DeallocActions.pop_back();
+ }
+
+ /// Release the standard segments slab.
+ if (auto EC = sys::Memory::releaseMappedMemory(StandardSegments))
+ DeallocErr = joinErrors(std::move(DeallocErr), errorCodeToError(EC));
+
+ DeallocActionsList.pop_back();
+ StandardSegmentsList.pop_back();
+ }
+
+ OnDeallocated(std::move(DeallocErr));
+}
+
+JITLinkMemoryManager::FinalizedAlloc
+InProcessMemoryManager::createFinalizedAlloc(
+ sys::MemoryBlock StandardSegments,
+ std::vector<orc::shared::WrapperFunctionCall> DeallocActions) {
+ std::lock_guard<std::mutex> Lock(FinalizedAllocsMutex);
+ auto *FA = FinalizedAllocInfos.Allocate<FinalizedAllocInfo>();
+ new (FA) FinalizedAllocInfo(
+ {std::move(StandardSegments), std::move(DeallocActions)});
+ return FinalizedAlloc(orc::ExecutorAddr::fromPtr(FA));
+}
+
+} // end namespace jitlink
+} // end namespace llvm
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/MachO.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/MachO.cpp
new file mode 100644
index 000000000000..40086ccf2b66
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/MachO.cpp
@@ -0,0 +1,90 @@
+//===-------------- MachO.cpp - JIT linker function for MachO -------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// MachO jit-link function.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/JITLink/MachO.h"
+
+#include "llvm/BinaryFormat/MachO.h"
+#include "llvm/ExecutionEngine/JITLink/MachO_arm64.h"
+#include "llvm/ExecutionEngine/JITLink/MachO_x86_64.h"
+#include "llvm/Support/Format.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/SwapByteOrder.h"
+
+using namespace llvm;
+
+#define DEBUG_TYPE "jitlink"
+
+namespace llvm {
+namespace jitlink {
+
+Expected<std::unique_ptr<LinkGraph>>
+createLinkGraphFromMachOObject(MemoryBufferRef ObjectBuffer) {
+ StringRef Data = ObjectBuffer.getBuffer();
+ if (Data.size() < 4)
+ return make_error<JITLinkError>("Truncated MachO buffer \"" +
+ ObjectBuffer.getBufferIdentifier() + "\"");
+
+ uint32_t Magic;
+ memcpy(&Magic, Data.data(), sizeof(uint32_t));
+ LLVM_DEBUG({
+ dbgs() << "jitLink_MachO: magic = " << format("0x%08" PRIx32, Magic)
+ << ", identifier = \"" << ObjectBuffer.getBufferIdentifier()
+ << "\"\n";
+ });
+
+ if (Magic == MachO::MH_MAGIC || Magic == MachO::MH_CIGAM)
+ return make_error<JITLinkError>("MachO 32-bit platforms not supported");
+ else if (Magic == MachO::MH_MAGIC_64 || Magic == MachO::MH_CIGAM_64) {
+
+ if (Data.size() < sizeof(MachO::mach_header_64))
+ return make_error<JITLinkError>("Truncated MachO buffer \"" +
+ ObjectBuffer.getBufferIdentifier() +
+ "\"");
+
+ // Read the CPU type from the header.
+ uint32_t CPUType;
+ memcpy(&CPUType, Data.data() + 4, sizeof(uint32_t));
+ if (Magic == MachO::MH_CIGAM_64)
+ CPUType = llvm::byteswap<uint32_t>(CPUType);
+
+ LLVM_DEBUG({
+ dbgs() << "jitLink_MachO: cputype = " << format("0x%08" PRIx32, CPUType)
+ << "\n";
+ });
+
+ switch (CPUType) {
+ case MachO::CPU_TYPE_ARM64:
+ return createLinkGraphFromMachOObject_arm64(ObjectBuffer);
+ case MachO::CPU_TYPE_X86_64:
+ return createLinkGraphFromMachOObject_x86_64(ObjectBuffer);
+ }
+ return make_error<JITLinkError>("MachO-64 CPU type not valid");
+ } else
+ return make_error<JITLinkError>("Unrecognized MachO magic value");
+}
+
+void link_MachO(std::unique_ptr<LinkGraph> G,
+ std::unique_ptr<JITLinkContext> Ctx) {
+
+ switch (G->getTargetTriple().getArch()) {
+ case Triple::aarch64:
+ return link_MachO_arm64(std::move(G), std::move(Ctx));
+ case Triple::x86_64:
+ return link_MachO_x86_64(std::move(G), std::move(Ctx));
+ default:
+ Ctx->notifyFailed(make_error<JITLinkError>("MachO-64 CPU type not valid"));
+ return;
+ }
+}
+
+} // end namespace jitlink
+} // end namespace llvm
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/MachOLinkGraphBuilder.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/MachOLinkGraphBuilder.cpp
new file mode 100644
index 000000000000..bb21f633d982
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/MachOLinkGraphBuilder.cpp
@@ -0,0 +1,860 @@
+//=--------- MachOLinkGraphBuilder.cpp - MachO LinkGraph builder ----------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Generic MachO LinkGraph building code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "MachOLinkGraphBuilder.h"
+#include <optional>
+
+#define DEBUG_TYPE "jitlink"
+
+static const char *CommonSectionName = "__common";
+
+namespace llvm {
+namespace jitlink {
+
+MachOLinkGraphBuilder::~MachOLinkGraphBuilder() = default;
+
+Expected<std::unique_ptr<LinkGraph>> MachOLinkGraphBuilder::buildGraph() {
+
+ // We only operate on relocatable objects.
+ if (!Obj.isRelocatableObject())
+ return make_error<JITLinkError>("Object is not a relocatable MachO");
+
+ if (auto Err = createNormalizedSections())
+ return std::move(Err);
+
+ if (auto Err = createNormalizedSymbols())
+ return std::move(Err);
+
+ if (auto Err = graphifyRegularSymbols())
+ return std::move(Err);
+
+ if (auto Err = graphifySectionsWithCustomParsers())
+ return std::move(Err);
+
+ if (auto Err = addRelocations())
+ return std::move(Err);
+
+ return std::move(G);
+}
+
+MachOLinkGraphBuilder::MachOLinkGraphBuilder(
+ const object::MachOObjectFile &Obj, Triple TT, SubtargetFeatures Features,
+ LinkGraph::GetEdgeKindNameFunction GetEdgeKindName)
+ : Obj(Obj),
+ G(std::make_unique<LinkGraph>(std::string(Obj.getFileName()),
+ std::move(TT), std::move(Features),
+ getPointerSize(Obj), getEndianness(Obj),
+ std::move(GetEdgeKindName))) {
+ auto &MachHeader = Obj.getHeader64();
+ SubsectionsViaSymbols = MachHeader.flags & MachO::MH_SUBSECTIONS_VIA_SYMBOLS;
+}
+
+void MachOLinkGraphBuilder::addCustomSectionParser(
+ StringRef SectionName, SectionParserFunction Parser) {
+ assert(!CustomSectionParserFunctions.count(SectionName) &&
+ "Custom parser for this section already exists");
+ CustomSectionParserFunctions[SectionName] = std::move(Parser);
+}
+
+Linkage MachOLinkGraphBuilder::getLinkage(uint16_t Desc) {
+ if ((Desc & MachO::N_WEAK_DEF) || (Desc & MachO::N_WEAK_REF))
+ return Linkage::Weak;
+ return Linkage::Strong;
+}
+
+Scope MachOLinkGraphBuilder::getScope(StringRef Name, uint8_t Type) {
+ if (Type & MachO::N_EXT) {
+ if ((Type & MachO::N_PEXT) || Name.starts_with("l"))
+ return Scope::Hidden;
+ else
+ return Scope::Default;
+ }
+ return Scope::Local;
+}
+
+bool MachOLinkGraphBuilder::isAltEntry(const NormalizedSymbol &NSym) {
+ return NSym.Desc & MachO::N_ALT_ENTRY;
+}
+
+bool MachOLinkGraphBuilder::isDebugSection(const NormalizedSection &NSec) {
+ return (NSec.Flags & MachO::S_ATTR_DEBUG &&
+ strcmp(NSec.SegName, "__DWARF") == 0);
+}
+
+bool MachOLinkGraphBuilder::isZeroFillSection(const NormalizedSection &NSec) {
+ switch (NSec.Flags & MachO::SECTION_TYPE) {
+ case MachO::S_ZEROFILL:
+ case MachO::S_GB_ZEROFILL:
+ case MachO::S_THREAD_LOCAL_ZEROFILL:
+ return true;
+ default:
+ return false;
+ }
+}
+
+unsigned
+MachOLinkGraphBuilder::getPointerSize(const object::MachOObjectFile &Obj) {
+ return Obj.is64Bit() ? 8 : 4;
+}
+
+llvm::endianness
+MachOLinkGraphBuilder::getEndianness(const object::MachOObjectFile &Obj) {
+ return Obj.isLittleEndian() ? llvm::endianness::little
+ : llvm::endianness::big;
+}
+
+Section &MachOLinkGraphBuilder::getCommonSection() {
+ if (!CommonSection)
+ CommonSection = &G->createSection(CommonSectionName,
+ orc::MemProt::Read | orc::MemProt::Write);
+ return *CommonSection;
+}
+
+Error MachOLinkGraphBuilder::createNormalizedSections() {
+ // Build normalized sections. Verifies that section data is in-range (for
+ // sections with content) and that address ranges are non-overlapping.
+
+ LLVM_DEBUG(dbgs() << "Creating normalized sections...\n");
+
+ for (auto &SecRef : Obj.sections()) {
+ NormalizedSection NSec;
+ uint32_t DataOffset = 0;
+
+ auto SecIndex = Obj.getSectionIndex(SecRef.getRawDataRefImpl());
+
+ if (Obj.is64Bit()) {
+ const MachO::section_64 &Sec64 =
+ Obj.getSection64(SecRef.getRawDataRefImpl());
+
+ memcpy(&NSec.SectName, &Sec64.sectname, 16);
+ NSec.SectName[16] = '\0';
+ memcpy(&NSec.SegName, Sec64.segname, 16);
+ NSec.SegName[16] = '\0';
+
+ NSec.Address = orc::ExecutorAddr(Sec64.addr);
+ NSec.Size = Sec64.size;
+ NSec.Alignment = 1ULL << Sec64.align;
+ NSec.Flags = Sec64.flags;
+ DataOffset = Sec64.offset;
+ } else {
+ const MachO::section &Sec32 = Obj.getSection(SecRef.getRawDataRefImpl());
+
+ memcpy(&NSec.SectName, &Sec32.sectname, 16);
+ NSec.SectName[16] = '\0';
+ memcpy(&NSec.SegName, Sec32.segname, 16);
+ NSec.SegName[16] = '\0';
+
+ NSec.Address = orc::ExecutorAddr(Sec32.addr);
+ NSec.Size = Sec32.size;
+ NSec.Alignment = 1ULL << Sec32.align;
+ NSec.Flags = Sec32.flags;
+ DataOffset = Sec32.offset;
+ }
+
+ LLVM_DEBUG({
+ dbgs() << " " << NSec.SegName << "," << NSec.SectName << ": "
+ << formatv("{0:x16}", NSec.Address) << " -- "
+ << formatv("{0:x16}", NSec.Address + NSec.Size)
+ << ", align: " << NSec.Alignment << ", index: " << SecIndex
+ << "\n";
+ });
+
+ // Get the section data if any.
+ if (!isZeroFillSection(NSec)) {
+ if (DataOffset + NSec.Size > Obj.getData().size())
+ return make_error<JITLinkError>(
+ "Section data extends past end of file");
+
+ NSec.Data = Obj.getData().data() + DataOffset;
+ }
+
+ // Get prot flags.
+ // FIXME: Make sure this test is correct (it's probably missing cases
+ // as-is).
+ orc::MemProt Prot;
+ if (NSec.Flags & MachO::S_ATTR_PURE_INSTRUCTIONS)
+ Prot = orc::MemProt::Read | orc::MemProt::Exec;
+ else
+ Prot = orc::MemProt::Read | orc::MemProt::Write;
+
+ auto FullyQualifiedName =
+ G->allocateContent(StringRef(NSec.SegName) + "," + NSec.SectName);
+ NSec.GraphSection = &G->createSection(
+ StringRef(FullyQualifiedName.data(), FullyQualifiedName.size()), Prot);
+
+ // TODO: Are there any other criteria for NoAlloc lifetime?
+ if (NSec.Flags & MachO::S_ATTR_DEBUG)
+ NSec.GraphSection->setMemLifetime(orc::MemLifetime::NoAlloc);
+
+ IndexToSection.insert(std::make_pair(SecIndex, std::move(NSec)));
+ }
+
+ std::vector<NormalizedSection *> Sections;
+ Sections.reserve(IndexToSection.size());
+ for (auto &KV : IndexToSection)
+ Sections.push_back(&KV.second);
+
+ // If we didn't end up creating any sections then bail out. The code below
+ // assumes that we have at least one section.
+ if (Sections.empty())
+ return Error::success();
+
+ llvm::sort(Sections,
+ [](const NormalizedSection *LHS, const NormalizedSection *RHS) {
+ assert(LHS && RHS && "Null section?");
+ if (LHS->Address != RHS->Address)
+ return LHS->Address < RHS->Address;
+ return LHS->Size < RHS->Size;
+ });
+
+ for (unsigned I = 0, E = Sections.size() - 1; I != E; ++I) {
+ auto &Cur = *Sections[I];
+ auto &Next = *Sections[I + 1];
+ if (Next.Address < Cur.Address + Cur.Size)
+ return make_error<JITLinkError>(
+ "Address range for section " +
+ formatv("\"{0}/{1}\" [ {2:x16} -- {3:x16} ] ", Cur.SegName,
+ Cur.SectName, Cur.Address, Cur.Address + Cur.Size) +
+ "overlaps section \"" + Next.SegName + "/" + Next.SectName + "\"" +
+ formatv("\"{0}/{1}\" [ {2:x16} -- {3:x16} ] ", Next.SegName,
+ Next.SectName, Next.Address, Next.Address + Next.Size));
+ }
+
+ return Error::success();
+}
+
+Error MachOLinkGraphBuilder::createNormalizedSymbols() {
+ LLVM_DEBUG(dbgs() << "Creating normalized symbols...\n");
+
+ for (auto &SymRef : Obj.symbols()) {
+
+ unsigned SymbolIndex = Obj.getSymbolIndex(SymRef.getRawDataRefImpl());
+ uint64_t Value;
+ uint32_t NStrX;
+ uint8_t Type;
+ uint8_t Sect;
+ uint16_t Desc;
+
+ if (Obj.is64Bit()) {
+ const MachO::nlist_64 &NL64 =
+ Obj.getSymbol64TableEntry(SymRef.getRawDataRefImpl());
+ Value = NL64.n_value;
+ NStrX = NL64.n_strx;
+ Type = NL64.n_type;
+ Sect = NL64.n_sect;
+ Desc = NL64.n_desc;
+ } else {
+ const MachO::nlist &NL32 =
+ Obj.getSymbolTableEntry(SymRef.getRawDataRefImpl());
+ Value = NL32.n_value;
+ NStrX = NL32.n_strx;
+ Type = NL32.n_type;
+ Sect = NL32.n_sect;
+ Desc = NL32.n_desc;
+ }
+
+ // Skip stabs.
+ // FIXME: Are there other symbols we should be skipping?
+ if (Type & MachO::N_STAB)
+ continue;
+
+ std::optional<StringRef> Name;
+ if (NStrX) {
+ if (auto NameOrErr = SymRef.getName())
+ Name = *NameOrErr;
+ else
+ return NameOrErr.takeError();
+ } else if (Type & MachO::N_EXT)
+ return make_error<JITLinkError>("Symbol at index " +
+ formatv("{0}", SymbolIndex) +
+ " has no name (string table index 0), "
+ "but N_EXT bit is set");
+
+ LLVM_DEBUG({
+ dbgs() << " ";
+ if (!Name)
+ dbgs() << "<anonymous symbol>";
+ else
+ dbgs() << *Name;
+ dbgs() << ": value = " << formatv("{0:x16}", Value)
+ << ", type = " << formatv("{0:x2}", Type)
+ << ", desc = " << formatv("{0:x4}", Desc) << ", sect = ";
+ if (Sect)
+ dbgs() << static_cast<unsigned>(Sect - 1);
+ else
+ dbgs() << "none";
+ dbgs() << "\n";
+ });
+
+ // If this symbol has a section, verify that the addresses line up.
+ if (Sect != 0) {
+ auto NSec = findSectionByIndex(Sect - 1);
+ if (!NSec)
+ return NSec.takeError();
+
+ if (orc::ExecutorAddr(Value) < NSec->Address ||
+ orc::ExecutorAddr(Value) > NSec->Address + NSec->Size)
+ return make_error<JITLinkError>("Address " + formatv("{0:x}", Value) +
+ " for symbol " + *Name +
+ " does not fall within section");
+
+ if (!NSec->GraphSection) {
+ LLVM_DEBUG({
+ dbgs() << " Skipping: Symbol is in section " << NSec->SegName << "/"
+ << NSec->SectName
+ << " which has no associated graph section.\n";
+ });
+ continue;
+ }
+ }
+
+ IndexToSymbol[SymbolIndex] =
+ &createNormalizedSymbol(*Name, Value, Type, Sect, Desc,
+ getLinkage(Desc), getScope(*Name, Type));
+ }
+
+ return Error::success();
+}
+
+void MachOLinkGraphBuilder::addSectionStartSymAndBlock(
+ unsigned SecIndex, Section &GraphSec, orc::ExecutorAddr Address,
+ const char *Data, orc::ExecutorAddrDiff Size, uint32_t Alignment,
+ bool IsLive) {
+ Block &B =
+ Data ? G->createContentBlock(GraphSec, ArrayRef<char>(Data, Size),
+ Address, Alignment, 0)
+ : G->createZeroFillBlock(GraphSec, Size, Address, Alignment, 0);
+ auto &Sym = G->addAnonymousSymbol(B, 0, Size, false, IsLive);
+ auto SecI = IndexToSection.find(SecIndex);
+ assert(SecI != IndexToSection.end() && "SecIndex invalid");
+ auto &NSec = SecI->second;
+ assert(!NSec.CanonicalSymbols.count(Sym.getAddress()) &&
+ "Anonymous block start symbol clashes with existing symbol address");
+ NSec.CanonicalSymbols[Sym.getAddress()] = &Sym;
+}
+
+Error MachOLinkGraphBuilder::graphifyRegularSymbols() {
+
+ LLVM_DEBUG(dbgs() << "Creating graph symbols...\n");
+
+ /// We only have 256 section indexes: Use a vector rather than a map.
+ std::vector<std::vector<NormalizedSymbol *>> SecIndexToSymbols;
+ SecIndexToSymbols.resize(256);
+
+ // Create commons, externs, and absolutes, and partition all other symbols by
+ // section.
+ for (auto &KV : IndexToSymbol) {
+ auto &NSym = *KV.second;
+
+ switch (NSym.Type & MachO::N_TYPE) {
+ case MachO::N_UNDF:
+ if (NSym.Value) {
+ if (!NSym.Name)
+ return make_error<JITLinkError>("Anonymous common symbol at index " +
+ Twine(KV.first));
+ NSym.GraphSymbol = &G->addDefinedSymbol(
+ G->createZeroFillBlock(getCommonSection(),
+ orc::ExecutorAddrDiff(NSym.Value),
+ orc::ExecutorAddr(),
+ 1ull << MachO::GET_COMM_ALIGN(NSym.Desc), 0),
+ 0, *NSym.Name, orc::ExecutorAddrDiff(NSym.Value), Linkage::Strong,
+ NSym.S, false, NSym.Desc & MachO::N_NO_DEAD_STRIP);
+ } else {
+ if (!NSym.Name)
+ return make_error<JITLinkError>("Anonymous external symbol at "
+ "index " +
+ Twine(KV.first));
+ NSym.GraphSymbol = &G->addExternalSymbol(
+ *NSym.Name, 0, (NSym.Desc & MachO::N_WEAK_REF) != 0);
+ }
+ break;
+ case MachO::N_ABS:
+ if (!NSym.Name)
+ return make_error<JITLinkError>("Anonymous absolute symbol at index " +
+ Twine(KV.first));
+ NSym.GraphSymbol = &G->addAbsoluteSymbol(
+ *NSym.Name, orc::ExecutorAddr(NSym.Value), 0, Linkage::Strong,
+ getScope(*NSym.Name, NSym.Type), NSym.Desc & MachO::N_NO_DEAD_STRIP);
+ break;
+ case MachO::N_SECT:
+ SecIndexToSymbols[NSym.Sect - 1].push_back(&NSym);
+ break;
+ case MachO::N_PBUD:
+ return make_error<JITLinkError>(
+ "Unupported N_PBUD symbol " +
+ (NSym.Name ? ("\"" + *NSym.Name + "\"") : Twine("<anon>")) +
+ " at index " + Twine(KV.first));
+ case MachO::N_INDR:
+ return make_error<JITLinkError>(
+ "Unupported N_INDR symbol " +
+ (NSym.Name ? ("\"" + *NSym.Name + "\"") : Twine("<anon>")) +
+ " at index " + Twine(KV.first));
+ default:
+ return make_error<JITLinkError>(
+ "Unrecognized symbol type " + Twine(NSym.Type & MachO::N_TYPE) +
+ " for symbol " +
+ (NSym.Name ? ("\"" + *NSym.Name + "\"") : Twine("<anon>")) +
+ " at index " + Twine(KV.first));
+ }
+ }
+
+ // Loop over sections performing regular graphification for those that
+ // don't have custom parsers.
+ for (auto &KV : IndexToSection) {
+ auto SecIndex = KV.first;
+ auto &NSec = KV.second;
+
+ if (!NSec.GraphSection) {
+ LLVM_DEBUG({
+ dbgs() << " " << NSec.SegName << "/" << NSec.SectName
+ << " has no graph section. Skipping.\n";
+ });
+ continue;
+ }
+
+ // Skip sections with custom parsers.
+ if (CustomSectionParserFunctions.count(NSec.GraphSection->getName())) {
+ LLVM_DEBUG({
+ dbgs() << " Skipping section " << NSec.GraphSection->getName()
+ << " as it has a custom parser.\n";
+ });
+ continue;
+ } else if ((NSec.Flags & MachO::SECTION_TYPE) ==
+ MachO::S_CSTRING_LITERALS) {
+ if (auto Err = graphifyCStringSection(
+ NSec, std::move(SecIndexToSymbols[SecIndex])))
+ return Err;
+ continue;
+ } else
+ LLVM_DEBUG({
+ dbgs() << " Graphifying regular section "
+ << NSec.GraphSection->getName() << "...\n";
+ });
+
+ bool SectionIsNoDeadStrip = NSec.Flags & MachO::S_ATTR_NO_DEAD_STRIP;
+ bool SectionIsText = NSec.Flags & MachO::S_ATTR_PURE_INSTRUCTIONS;
+
+ auto &SecNSymStack = SecIndexToSymbols[SecIndex];
+
+ // If this section is non-empty but there are no symbols covering it then
+ // create one block and anonymous symbol to cover the entire section.
+ if (SecNSymStack.empty()) {
+ if (NSec.Size > 0) {
+ LLVM_DEBUG({
+ dbgs() << " Section non-empty, but contains no symbols. "
+ "Creating anonymous block to cover "
+ << formatv("{0:x16}", NSec.Address) << " -- "
+ << formatv("{0:x16}", NSec.Address + NSec.Size) << "\n";
+ });
+ addSectionStartSymAndBlock(SecIndex, *NSec.GraphSection, NSec.Address,
+ NSec.Data, NSec.Size, NSec.Alignment,
+ SectionIsNoDeadStrip);
+ } else
+ LLVM_DEBUG({
+ dbgs() << " Section empty and contains no symbols. Skipping.\n";
+ });
+ continue;
+ }
+
+ // Sort the symbol stack in by address, alt-entry status, scope, and name.
+ // We sort in reverse order so that symbols will be visited in the right
+ // order when we pop off the stack below.
+ llvm::sort(SecNSymStack, [](const NormalizedSymbol *LHS,
+ const NormalizedSymbol *RHS) {
+ if (LHS->Value != RHS->Value)
+ return LHS->Value > RHS->Value;
+ if (isAltEntry(*LHS) != isAltEntry(*RHS))
+ return isAltEntry(*RHS);
+ if (LHS->S != RHS->S)
+ return static_cast<uint8_t>(LHS->S) < static_cast<uint8_t>(RHS->S);
+ return LHS->Name < RHS->Name;
+ });
+
+ // The first symbol in a section can not be an alt-entry symbol.
+ if (!SecNSymStack.empty() && isAltEntry(*SecNSymStack.back()))
+ return make_error<JITLinkError>(
+ "First symbol in " + NSec.GraphSection->getName() + " is alt-entry");
+
+ // If the section is non-empty but there is no symbol covering the start
+ // address then add an anonymous one.
+ if (orc::ExecutorAddr(SecNSymStack.back()->Value) != NSec.Address) {
+ auto AnonBlockSize =
+ orc::ExecutorAddr(SecNSymStack.back()->Value) - NSec.Address;
+ LLVM_DEBUG({
+ dbgs() << " Section start not covered by symbol. "
+ << "Creating anonymous block to cover [ " << NSec.Address
+ << " -- " << (NSec.Address + AnonBlockSize) << " ]\n";
+ });
+ addSectionStartSymAndBlock(SecIndex, *NSec.GraphSection, NSec.Address,
+ NSec.Data, AnonBlockSize, NSec.Alignment,
+ SectionIsNoDeadStrip);
+ }
+
+ // Visit section symbols in order by popping off the reverse-sorted stack,
+ // building graph symbols as we go.
+ //
+ // If MH_SUBSECTIONS_VIA_SYMBOLS is set we'll build a block for each
+ // alt-entry chain.
+ //
+ // If MH_SUBSECTIONS_VIA_SYMBOLS is not set then we'll just build one block
+ // for the whole section.
+ while (!SecNSymStack.empty()) {
+ SmallVector<NormalizedSymbol *, 8> BlockSyms;
+
+ // Get the symbols in this alt-entry chain, or the whole section (if
+ // !SubsectionsViaSymbols).
+ BlockSyms.push_back(SecNSymStack.back());
+ SecNSymStack.pop_back();
+ while (!SecNSymStack.empty() &&
+ (isAltEntry(*SecNSymStack.back()) ||
+ SecNSymStack.back()->Value == BlockSyms.back()->Value ||
+ !SubsectionsViaSymbols)) {
+ BlockSyms.push_back(SecNSymStack.back());
+ SecNSymStack.pop_back();
+ }
+
+ // BlockNSyms now contains the block symbols in reverse canonical order.
+ auto BlockStart = orc::ExecutorAddr(BlockSyms.front()->Value);
+ orc::ExecutorAddr BlockEnd =
+ SecNSymStack.empty() ? NSec.Address + NSec.Size
+ : orc::ExecutorAddr(SecNSymStack.back()->Value);
+ orc::ExecutorAddrDiff BlockOffset = BlockStart - NSec.Address;
+ orc::ExecutorAddrDiff BlockSize = BlockEnd - BlockStart;
+
+ LLVM_DEBUG({
+ dbgs() << " Creating block for " << formatv("{0:x16}", BlockStart)
+ << " -- " << formatv("{0:x16}", BlockEnd) << ": "
+ << NSec.GraphSection->getName() << " + "
+ << formatv("{0:x16}", BlockOffset) << " with "
+ << BlockSyms.size() << " symbol(s)...\n";
+ });
+
+ Block &B =
+ NSec.Data
+ ? G->createContentBlock(
+ *NSec.GraphSection,
+ ArrayRef<char>(NSec.Data + BlockOffset, BlockSize),
+ BlockStart, NSec.Alignment, BlockStart % NSec.Alignment)
+ : G->createZeroFillBlock(*NSec.GraphSection, BlockSize,
+ BlockStart, NSec.Alignment,
+ BlockStart % NSec.Alignment);
+
+ std::optional<orc::ExecutorAddr> LastCanonicalAddr;
+ auto SymEnd = BlockEnd;
+ while (!BlockSyms.empty()) {
+ auto &NSym = *BlockSyms.back();
+ BlockSyms.pop_back();
+
+ bool SymLive =
+ (NSym.Desc & MachO::N_NO_DEAD_STRIP) || SectionIsNoDeadStrip;
+
+ auto &Sym = createStandardGraphSymbol(
+ NSym, B, SymEnd - orc::ExecutorAddr(NSym.Value), SectionIsText,
+ SymLive, LastCanonicalAddr != orc::ExecutorAddr(NSym.Value));
+
+ if (LastCanonicalAddr != Sym.getAddress()) {
+ if (LastCanonicalAddr)
+ SymEnd = *LastCanonicalAddr;
+ LastCanonicalAddr = Sym.getAddress();
+ }
+ }
+ }
+ }
+
+ return Error::success();
+}
+
+Symbol &MachOLinkGraphBuilder::createStandardGraphSymbol(NormalizedSymbol &NSym,
+ Block &B, size_t Size,
+ bool IsText,
+ bool IsNoDeadStrip,
+ bool IsCanonical) {
+
+ LLVM_DEBUG({
+ dbgs() << " " << formatv("{0:x16}", NSym.Value) << " -- "
+ << formatv("{0:x16}", NSym.Value + Size) << ": ";
+ if (!NSym.Name)
+ dbgs() << "<anonymous symbol>";
+ else
+ dbgs() << NSym.Name;
+ if (IsText)
+ dbgs() << " [text]";
+ if (IsNoDeadStrip)
+ dbgs() << " [no-dead-strip]";
+ if (!IsCanonical)
+ dbgs() << " [non-canonical]";
+ dbgs() << "\n";
+ });
+
+ auto SymOffset = orc::ExecutorAddr(NSym.Value) - B.getAddress();
+ auto &Sym =
+ NSym.Name
+ ? G->addDefinedSymbol(B, SymOffset, *NSym.Name, Size, NSym.L, NSym.S,
+ IsText, IsNoDeadStrip)
+ : G->addAnonymousSymbol(B, SymOffset, Size, IsText, IsNoDeadStrip);
+ NSym.GraphSymbol = &Sym;
+
+ if (IsCanonical)
+ setCanonicalSymbol(getSectionByIndex(NSym.Sect - 1), Sym);
+
+ return Sym;
+}
+
+Error MachOLinkGraphBuilder::graphifySectionsWithCustomParsers() {
+ // Graphify special sections.
+ for (auto &KV : IndexToSection) {
+ auto &NSec = KV.second;
+
+ // Skip non-graph sections.
+ if (!NSec.GraphSection)
+ continue;
+
+ auto HI = CustomSectionParserFunctions.find(NSec.GraphSection->getName());
+ if (HI != CustomSectionParserFunctions.end()) {
+ auto &Parse = HI->second;
+ if (auto Err = Parse(NSec))
+ return Err;
+ }
+ }
+
+ return Error::success();
+}
+
+Error MachOLinkGraphBuilder::graphifyCStringSection(
+ NormalizedSection &NSec, std::vector<NormalizedSymbol *> NSyms) {
+ assert(NSec.GraphSection && "C string literal section missing graph section");
+ assert(NSec.Data && "C string literal section has no data");
+
+ LLVM_DEBUG({
+ dbgs() << " Graphifying C-string literal section "
+ << NSec.GraphSection->getName() << "\n";
+ });
+
+ if (NSec.Data[NSec.Size - 1] != '\0')
+ return make_error<JITLinkError>("C string literal section " +
+ NSec.GraphSection->getName() +
+ " does not end with null terminator");
+
+ /// Sort into reverse order to use as a stack.
+ llvm::sort(NSyms,
+ [](const NormalizedSymbol *LHS, const NormalizedSymbol *RHS) {
+ if (LHS->Value != RHS->Value)
+ return LHS->Value > RHS->Value;
+ if (LHS->L != RHS->L)
+ return LHS->L > RHS->L;
+ if (LHS->S != RHS->S)
+ return LHS->S > RHS->S;
+ if (RHS->Name) {
+ if (!LHS->Name)
+ return true;
+ return *LHS->Name > *RHS->Name;
+ }
+ return false;
+ });
+
+ bool SectionIsNoDeadStrip = NSec.Flags & MachO::S_ATTR_NO_DEAD_STRIP;
+ bool SectionIsText = NSec.Flags & MachO::S_ATTR_PURE_INSTRUCTIONS;
+ orc::ExecutorAddrDiff BlockStart = 0;
+
+ // Scan section for null characters.
+ for (size_t I = 0; I != NSec.Size; ++I) {
+ if (NSec.Data[I] == '\0') {
+ size_t BlockSize = I + 1 - BlockStart;
+ // Create a block for this null terminated string.
+ auto &B = G->createContentBlock(*NSec.GraphSection,
+ {NSec.Data + BlockStart, BlockSize},
+ NSec.Address + BlockStart, NSec.Alignment,
+ BlockStart % NSec.Alignment);
+
+ LLVM_DEBUG({
+ dbgs() << " Created block " << B.getRange()
+ << ", align = " << B.getAlignment()
+ << ", align-ofs = " << B.getAlignmentOffset() << " for \"";
+ for (size_t J = 0; J != std::min(B.getSize(), size_t(16)); ++J)
+ switch (B.getContent()[J]) {
+ case '\0': break;
+ case '\n': dbgs() << "\\n"; break;
+ case '\t': dbgs() << "\\t"; break;
+ default: dbgs() << B.getContent()[J]; break;
+ }
+ if (B.getSize() > 16)
+ dbgs() << "...";
+ dbgs() << "\"\n";
+ });
+
+ // If there's no symbol at the start of this block then create one.
+ if (NSyms.empty() ||
+ orc::ExecutorAddr(NSyms.back()->Value) != B.getAddress()) {
+ auto &S = G->addAnonymousSymbol(B, 0, BlockSize, false, false);
+ setCanonicalSymbol(NSec, S);
+ LLVM_DEBUG({
+ dbgs() << " Adding symbol for c-string block " << B.getRange()
+ << ": <anonymous symbol> at offset 0\n";
+ });
+ }
+
+ // Process any remaining symbols that point into this block.
+ auto LastCanonicalAddr = B.getAddress() + BlockSize;
+ while (!NSyms.empty() && orc::ExecutorAddr(NSyms.back()->Value) <
+ B.getAddress() + BlockSize) {
+ auto &NSym = *NSyms.back();
+ size_t SymSize = (B.getAddress() + BlockSize) -
+ orc::ExecutorAddr(NSyms.back()->Value);
+ bool SymLive =
+ (NSym.Desc & MachO::N_NO_DEAD_STRIP) || SectionIsNoDeadStrip;
+
+ bool IsCanonical = false;
+ if (LastCanonicalAddr != orc::ExecutorAddr(NSym.Value)) {
+ IsCanonical = true;
+ LastCanonicalAddr = orc::ExecutorAddr(NSym.Value);
+ }
+
+ auto &Sym = createStandardGraphSymbol(NSym, B, SymSize, SectionIsText,
+ SymLive, IsCanonical);
+ (void)Sym;
+ LLVM_DEBUG({
+ dbgs() << " Adding symbol for c-string block " << B.getRange()
+ << ": "
+ << (Sym.hasName() ? Sym.getName() : "<anonymous symbol>")
+ << " at offset " << formatv("{0:x}", Sym.getOffset()) << "\n";
+ });
+
+ NSyms.pop_back();
+ }
+
+ BlockStart += BlockSize;
+ }
+ }
+
+ assert(llvm::all_of(NSec.GraphSection->blocks(),
+ [](Block *B) { return isCStringBlock(*B); }) &&
+ "All blocks in section should hold single c-strings");
+
+ return Error::success();
+}
+
+Error CompactUnwindSplitter::operator()(LinkGraph &G) {
+ auto *CUSec = G.findSectionByName(CompactUnwindSectionName);
+ if (!CUSec)
+ return Error::success();
+
+ if (!G.getTargetTriple().isOSBinFormatMachO())
+ return make_error<JITLinkError>(
+ "Error linking " + G.getName() +
+ ": compact unwind splitting not supported on non-macho target " +
+ G.getTargetTriple().str());
+
+ unsigned CURecordSize = 0;
+ unsigned PersonalityEdgeOffset = 0;
+ unsigned LSDAEdgeOffset = 0;
+ switch (G.getTargetTriple().getArch()) {
+ case Triple::aarch64:
+ case Triple::x86_64:
+ // 64-bit compact-unwind record format:
+ // Range start: 8 bytes.
+ // Range size: 4 bytes.
+ // CU encoding: 4 bytes.
+ // Personality: 8 bytes.
+ // LSDA: 8 bytes.
+ CURecordSize = 32;
+ PersonalityEdgeOffset = 16;
+ LSDAEdgeOffset = 24;
+ break;
+ default:
+ return make_error<JITLinkError>(
+ "Error linking " + G.getName() +
+ ": compact unwind splitting not supported on " +
+ G.getTargetTriple().getArchName());
+ }
+
+ std::vector<Block *> OriginalBlocks(CUSec->blocks().begin(),
+ CUSec->blocks().end());
+ LLVM_DEBUG({
+ dbgs() << "In " << G.getName() << " splitting compact unwind section "
+ << CompactUnwindSectionName << " containing "
+ << OriginalBlocks.size() << " initial blocks...\n";
+ });
+
+ while (!OriginalBlocks.empty()) {
+ auto *B = OriginalBlocks.back();
+ OriginalBlocks.pop_back();
+
+ if (B->getSize() == 0) {
+ LLVM_DEBUG({
+ dbgs() << " Skipping empty block at "
+ << formatv("{0:x16}", B->getAddress()) << "\n";
+ });
+ continue;
+ }
+
+ LLVM_DEBUG({
+ dbgs() << " Splitting block at " << formatv("{0:x16}", B->getAddress())
+ << " into " << (B->getSize() / CURecordSize)
+ << " compact unwind record(s)\n";
+ });
+
+ if (B->getSize() % CURecordSize)
+ return make_error<JITLinkError>(
+ "Error splitting compact unwind record in " + G.getName() +
+ ": block at " + formatv("{0:x}", B->getAddress()) + " has size " +
+ formatv("{0:x}", B->getSize()) +
+ " (not a multiple of CU record size of " +
+ formatv("{0:x}", CURecordSize) + ")");
+
+ unsigned NumBlocks = B->getSize() / CURecordSize;
+ LinkGraph::SplitBlockCache C;
+
+ for (unsigned I = 0; I != NumBlocks; ++I) {
+ auto &CURec = G.splitBlock(*B, CURecordSize, &C);
+ bool AddedKeepAlive = false;
+
+ for (auto &E : CURec.edges()) {
+ if (E.getOffset() == 0) {
+ LLVM_DEBUG({
+ dbgs() << " Updating compact unwind record at "
+ << formatv("{0:x16}", CURec.getAddress()) << " to point to "
+ << (E.getTarget().hasName() ? E.getTarget().getName()
+ : StringRef())
+ << " (at " << formatv("{0:x16}", E.getTarget().getAddress())
+ << ")\n";
+ });
+
+ if (E.getTarget().isExternal())
+ return make_error<JITLinkError>(
+ "Error adding keep-alive edge for compact unwind record at " +
+ formatv("{0:x}", CURec.getAddress()) + ": target " +
+ E.getTarget().getName() + " is an external symbol");
+ auto &TgtBlock = E.getTarget().getBlock();
+ auto &CURecSym =
+ G.addAnonymousSymbol(CURec, 0, CURecordSize, false, false);
+ TgtBlock.addEdge(Edge::KeepAlive, 0, CURecSym, 0);
+ AddedKeepAlive = true;
+ } else if (E.getOffset() != PersonalityEdgeOffset &&
+ E.getOffset() != LSDAEdgeOffset)
+ return make_error<JITLinkError>("Unexpected edge at offset " +
+ formatv("{0:x}", E.getOffset()) +
+ " in compact unwind record at " +
+ formatv("{0:x}", CURec.getAddress()));
+ }
+
+ if (!AddedKeepAlive)
+ return make_error<JITLinkError>(
+ "Error adding keep-alive edge for compact unwind record at " +
+ formatv("{0:x}", CURec.getAddress()) +
+ ": no outgoing target edge at offset 0");
+ }
+ }
+ return Error::success();
+}
+
+} // end namespace jitlink
+} // end namespace llvm
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/MachOLinkGraphBuilder.h b/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/MachOLinkGraphBuilder.h
new file mode 100644
index 000000000000..a4ae0ac1ecfc
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/MachOLinkGraphBuilder.h
@@ -0,0 +1,253 @@
+//===----- MachOLinkGraphBuilder.h - MachO LinkGraph builder ----*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Generic MachO LinkGraph building code.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LIB_EXECUTIONENGINE_JITLINK_MACHOLINKGRAPHBUILDER_H
+#define LIB_EXECUTIONENGINE_JITLINK_MACHOLINKGRAPHBUILDER_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ExecutionEngine/JITLink/JITLink.h"
+#include "llvm/Object/MachO.h"
+
+#include "EHFrameSupportImpl.h"
+#include "JITLinkGeneric.h"
+
+#include <list>
+
+namespace llvm {
+namespace jitlink {
+
+class MachOLinkGraphBuilder {
+public:
+ virtual ~MachOLinkGraphBuilder();
+ Expected<std::unique_ptr<LinkGraph>> buildGraph();
+
+protected:
+
+ struct NormalizedSymbol {
+ friend class MachOLinkGraphBuilder;
+
+ private:
+ NormalizedSymbol(std::optional<StringRef> Name, uint64_t Value,
+ uint8_t Type, uint8_t Sect, uint16_t Desc, Linkage L,
+ Scope S)
+ : Name(Name), Value(Value), Type(Type), Sect(Sect), Desc(Desc), L(L),
+ S(S) {
+ assert((!Name || !Name->empty()) && "Name must be none or non-empty");
+ }
+
+ public:
+ NormalizedSymbol(const NormalizedSymbol &) = delete;
+ NormalizedSymbol &operator=(const NormalizedSymbol &) = delete;
+ NormalizedSymbol(NormalizedSymbol &&) = delete;
+ NormalizedSymbol &operator=(NormalizedSymbol &&) = delete;
+
+ std::optional<StringRef> Name;
+ uint64_t Value = 0;
+ uint8_t Type = 0;
+ uint8_t Sect = 0;
+ uint16_t Desc = 0;
+ Linkage L = Linkage::Strong;
+ Scope S = Scope::Default;
+ Symbol *GraphSymbol = nullptr;
+ };
+
+ // Normalized section representation. Section and segment names are guaranteed
+ // to be null-terminated, hence the extra bytes on SegName and SectName.
+ class NormalizedSection {
+ friend class MachOLinkGraphBuilder;
+
+ private:
+ NormalizedSection() = default;
+
+ public:
+ char SectName[17];
+ char SegName[17];
+ orc::ExecutorAddr Address;
+ uint64_t Size = 0;
+ uint64_t Alignment = 0;
+ uint32_t Flags = 0;
+ const char *Data = nullptr;
+ Section *GraphSection = nullptr;
+ std::map<orc::ExecutorAddr, Symbol *> CanonicalSymbols;
+ };
+
+ using SectionParserFunction = std::function<Error(NormalizedSection &S)>;
+
+ MachOLinkGraphBuilder(const object::MachOObjectFile &Obj, Triple TT,
+ SubtargetFeatures Features,
+ LinkGraph::GetEdgeKindNameFunction GetEdgeKindName);
+
+ LinkGraph &getGraph() const { return *G; }
+
+ const object::MachOObjectFile &getObject() const { return Obj; }
+
+ void addCustomSectionParser(StringRef SectionName,
+ SectionParserFunction Parse);
+
+ virtual Error addRelocations() = 0;
+
+ /// Create a symbol.
+ template <typename... ArgTs>
+ NormalizedSymbol &createNormalizedSymbol(ArgTs &&... Args) {
+ NormalizedSymbol *Sym = reinterpret_cast<NormalizedSymbol *>(
+ Allocator.Allocate<NormalizedSymbol>());
+ new (Sym) NormalizedSymbol(std::forward<ArgTs>(Args)...);
+ return *Sym;
+ }
+
+ /// Index is zero-based (MachO section indexes are usually one-based) and
+ /// assumed to be in-range. Client is responsible for checking.
+ NormalizedSection &getSectionByIndex(unsigned Index) {
+ auto I = IndexToSection.find(Index);
+ assert(I != IndexToSection.end() && "No section recorded at index");
+ return I->second;
+ }
+
+ /// Try to get the section at the given index. Will return an error if the
+ /// given index is out of range, or if no section has been added for the given
+ /// index.
+ Expected<NormalizedSection &> findSectionByIndex(unsigned Index) {
+ auto I = IndexToSection.find(Index);
+ if (I == IndexToSection.end())
+ return make_error<JITLinkError>("No section recorded for index " +
+ formatv("{0:d}", Index));
+ return I->second;
+ }
+
+ /// Try to get the symbol at the given index. Will return an error if the
+ /// given index is out of range, or if no symbol has been added for the given
+ /// index.
+ Expected<NormalizedSymbol &> findSymbolByIndex(uint64_t Index) {
+ auto I = IndexToSymbol.find(Index);
+ if (I == IndexToSymbol.end())
+ return make_error<JITLinkError>("No symbol at index " +
+ formatv("{0:d}", Index));
+ assert(I->second && "Null symbol at index");
+ return *I->second;
+ }
+
+ /// Returns the symbol with the highest address not greater than the search
+ /// address, or null if no such symbol exists.
+ Symbol *getSymbolByAddress(NormalizedSection &NSec,
+ orc::ExecutorAddr Address) {
+ auto I = NSec.CanonicalSymbols.upper_bound(Address);
+ if (I == NSec.CanonicalSymbols.begin())
+ return nullptr;
+ return std::prev(I)->second;
+ }
+
+ /// Returns the symbol with the highest address not greater than the search
+ /// address, or an error if no such symbol exists.
+ Expected<Symbol &> findSymbolByAddress(NormalizedSection &NSec,
+ orc::ExecutorAddr Address) {
+ auto *Sym = getSymbolByAddress(NSec, Address);
+ if (Sym)
+ if (Address <= Sym->getAddress() + Sym->getSize())
+ return *Sym;
+ return make_error<JITLinkError>("No symbol covering address " +
+ formatv("{0:x16}", Address));
+ }
+
+ static Linkage getLinkage(uint16_t Desc);
+ static Scope getScope(StringRef Name, uint8_t Type);
+ static bool isAltEntry(const NormalizedSymbol &NSym);
+
+ static bool isDebugSection(const NormalizedSection &NSec);
+ static bool isZeroFillSection(const NormalizedSection &NSec);
+
+ MachO::relocation_info
+ getRelocationInfo(const object::relocation_iterator RelItr) {
+ MachO::any_relocation_info ARI =
+ getObject().getRelocation(RelItr->getRawDataRefImpl());
+ MachO::relocation_info RI;
+ RI.r_address = ARI.r_word0;
+ RI.r_symbolnum = ARI.r_word1 & 0xffffff;
+ RI.r_pcrel = (ARI.r_word1 >> 24) & 1;
+ RI.r_length = (ARI.r_word1 >> 25) & 3;
+ RI.r_extern = (ARI.r_word1 >> 27) & 1;
+ RI.r_type = (ARI.r_word1 >> 28);
+ return RI;
+ }
+
+private:
+ static unsigned getPointerSize(const object::MachOObjectFile &Obj);
+ static llvm::endianness getEndianness(const object::MachOObjectFile &Obj);
+
+ void setCanonicalSymbol(NormalizedSection &NSec, Symbol &Sym) {
+ auto *&CanonicalSymEntry = NSec.CanonicalSymbols[Sym.getAddress()];
+ // There should be no symbol at this address, or, if there is,
+ // it should be a zero-sized symbol from an empty section (which
+ // we can safely override).
+ assert((!CanonicalSymEntry || CanonicalSymEntry->getSize() == 0) &&
+ "Duplicate canonical symbol at address");
+ CanonicalSymEntry = &Sym;
+ }
+
+ Section &getCommonSection();
+ void addSectionStartSymAndBlock(unsigned SecIndex, Section &GraphSec,
+ orc::ExecutorAddr Address, const char *Data,
+ orc::ExecutorAddrDiff Size,
+ uint32_t Alignment, bool IsLive);
+
+ Error createNormalizedSections();
+ Error createNormalizedSymbols();
+
+ /// Create graph blocks and symbols for externals, absolutes, commons and
+ /// all defined symbols in sections without custom parsers.
+ Error graphifyRegularSymbols();
+
+ /// Create and return a graph symbol for the given normalized symbol.
+ ///
+ /// NSym's GraphSymbol member will be updated to point at the newly created
+ /// symbol.
+ Symbol &createStandardGraphSymbol(NormalizedSymbol &Sym, Block &B,
+ size_t Size, bool IsText,
+ bool IsNoDeadStrip, bool IsCanonical);
+
+ /// Create graph blocks and symbols for all sections.
+ Error graphifySectionsWithCustomParsers();
+
+ /// Graphify cstring section.
+ Error graphifyCStringSection(NormalizedSection &NSec,
+ std::vector<NormalizedSymbol *> NSyms);
+
+ // Put the BumpPtrAllocator first so that we don't free any of the underlying
+ // memory until the Symbol/Addressable destructors have been run.
+ BumpPtrAllocator Allocator;
+
+ const object::MachOObjectFile &Obj;
+ std::unique_ptr<LinkGraph> G;
+
+ bool SubsectionsViaSymbols = false;
+ DenseMap<unsigned, NormalizedSection> IndexToSection;
+ Section *CommonSection = nullptr;
+
+ DenseMap<uint32_t, NormalizedSymbol *> IndexToSymbol;
+ StringMap<SectionParserFunction> CustomSectionParserFunctions;
+};
+
+/// A pass to split up __LD,__compact_unwind sections.
+class CompactUnwindSplitter {
+public:
+ CompactUnwindSplitter(StringRef CompactUnwindSectionName)
+ : CompactUnwindSectionName(CompactUnwindSectionName) {}
+ Error operator()(LinkGraph &G);
+
+private:
+ StringRef CompactUnwindSectionName;
+};
+
+} // end namespace jitlink
+} // end namespace llvm
+
+#endif // LIB_EXECUTIONENGINE_JITLINK_MACHOLINKGRAPHBUILDER_H
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/MachO_arm64.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/MachO_arm64.cpp
new file mode 100644
index 000000000000..8733306bab6b
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/MachO_arm64.cpp
@@ -0,0 +1,625 @@
+//===---- MachO_arm64.cpp - JIT linker implementation for MachO/arm64 -----===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// MachO/arm64 jit-link implementation.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/JITLink/MachO_arm64.h"
+#include "llvm/ExecutionEngine/JITLink/DWARFRecordSectionSplitter.h"
+#include "llvm/ExecutionEngine/JITLink/aarch64.h"
+
+#include "DefineExternalSectionStartAndEndSymbols.h"
+#include "MachOLinkGraphBuilder.h"
+
+#define DEBUG_TYPE "jitlink"
+
+using namespace llvm;
+using namespace llvm::jitlink;
+
+namespace {
+
+class MachOLinkGraphBuilder_arm64 : public MachOLinkGraphBuilder {
+public:
+ MachOLinkGraphBuilder_arm64(const object::MachOObjectFile &Obj,
+ SubtargetFeatures Features)
+ : MachOLinkGraphBuilder(Obj, Triple("arm64-apple-darwin"),
+ std::move(Features), aarch64::getEdgeKindName),
+ NumSymbols(Obj.getSymtabLoadCommand().nsyms) {}
+
+private:
+ enum MachOARM64RelocationKind : Edge::Kind {
+ MachOBranch26 = Edge::FirstRelocation,
+ MachOPointer32,
+ MachOPointer64,
+ MachOPointer64Anon,
+ MachOPage21,
+ MachOPageOffset12,
+ MachOGOTPage21,
+ MachOGOTPageOffset12,
+ MachOTLVPage21,
+ MachOTLVPageOffset12,
+ MachOPointerToGOT,
+ MachOPairedAddend,
+ MachOLDRLiteral19,
+ MachODelta32,
+ MachODelta64,
+ MachONegDelta32,
+ MachONegDelta64,
+ };
+
+ static Expected<MachOARM64RelocationKind>
+ getRelocationKind(const MachO::relocation_info &RI) {
+ switch (RI.r_type) {
+ case MachO::ARM64_RELOC_UNSIGNED:
+ if (!RI.r_pcrel) {
+ if (RI.r_length == 3)
+ return RI.r_extern ? MachOPointer64 : MachOPointer64Anon;
+ else if (RI.r_length == 2)
+ return MachOPointer32;
+ }
+ break;
+ case MachO::ARM64_RELOC_SUBTRACTOR:
+ // SUBTRACTOR must be non-pc-rel, extern, with length 2 or 3.
+ // Initially represent SUBTRACTOR relocations with 'Delta<W>'.
+ // They may be turned into NegDelta<W> by parsePairRelocation.
+ if (!RI.r_pcrel && RI.r_extern) {
+ if (RI.r_length == 2)
+ return MachODelta32;
+ else if (RI.r_length == 3)
+ return MachODelta64;
+ }
+ break;
+ case MachO::ARM64_RELOC_BRANCH26:
+ if (RI.r_pcrel && RI.r_extern && RI.r_length == 2)
+ return MachOBranch26;
+ break;
+ case MachO::ARM64_RELOC_PAGE21:
+ if (RI.r_pcrel && RI.r_extern && RI.r_length == 2)
+ return MachOPage21;
+ break;
+ case MachO::ARM64_RELOC_PAGEOFF12:
+ if (!RI.r_pcrel && RI.r_extern && RI.r_length == 2)
+ return MachOPageOffset12;
+ break;
+ case MachO::ARM64_RELOC_GOT_LOAD_PAGE21:
+ if (RI.r_pcrel && RI.r_extern && RI.r_length == 2)
+ return MachOGOTPage21;
+ break;
+ case MachO::ARM64_RELOC_GOT_LOAD_PAGEOFF12:
+ if (!RI.r_pcrel && RI.r_extern && RI.r_length == 2)
+ return MachOGOTPageOffset12;
+ break;
+ case MachO::ARM64_RELOC_POINTER_TO_GOT:
+ if (RI.r_pcrel && RI.r_extern && RI.r_length == 2)
+ return MachOPointerToGOT;
+ break;
+ case MachO::ARM64_RELOC_ADDEND:
+ if (!RI.r_pcrel && !RI.r_extern && RI.r_length == 2)
+ return MachOPairedAddend;
+ break;
+ case MachO::ARM64_RELOC_TLVP_LOAD_PAGE21:
+ if (RI.r_pcrel && RI.r_extern && RI.r_length == 2)
+ return MachOTLVPage21;
+ break;
+ case MachO::ARM64_RELOC_TLVP_LOAD_PAGEOFF12:
+ if (!RI.r_pcrel && RI.r_extern && RI.r_length == 2)
+ return MachOTLVPageOffset12;
+ break;
+ }
+
+ return make_error<JITLinkError>(
+ "Unsupported arm64 relocation: address=" +
+ formatv("{0:x8}", RI.r_address) +
+ ", symbolnum=" + formatv("{0:x6}", RI.r_symbolnum) +
+ ", kind=" + formatv("{0:x1}", RI.r_type) +
+ ", pc_rel=" + (RI.r_pcrel ? "true" : "false") +
+ ", extern=" + (RI.r_extern ? "true" : "false") +
+ ", length=" + formatv("{0:d}", RI.r_length));
+ }
+
+ using PairRelocInfo = std::tuple<Edge::Kind, Symbol *, uint64_t>;
+
+ // Parses paired SUBTRACTOR/UNSIGNED relocations and, on success,
+ // returns the edge kind and addend to be used.
+ Expected<PairRelocInfo>
+ parsePairRelocation(Block &BlockToFix, Edge::Kind SubtractorKind,
+ const MachO::relocation_info &SubRI,
+ orc::ExecutorAddr FixupAddress, const char *FixupContent,
+ object::relocation_iterator &UnsignedRelItr,
+ object::relocation_iterator &RelEnd) {
+ using namespace support;
+
+ assert(((SubtractorKind == MachODelta32 && SubRI.r_length == 2) ||
+ (SubtractorKind == MachODelta64 && SubRI.r_length == 3)) &&
+ "Subtractor kind should match length");
+ assert(SubRI.r_extern && "SUBTRACTOR reloc symbol should be extern");
+ assert(!SubRI.r_pcrel && "SUBTRACTOR reloc should not be PCRel");
+
+ if (UnsignedRelItr == RelEnd)
+ return make_error<JITLinkError>("arm64 SUBTRACTOR without paired "
+ "UNSIGNED relocation");
+
+ auto UnsignedRI = getRelocationInfo(UnsignedRelItr);
+
+ if (SubRI.r_address != UnsignedRI.r_address)
+ return make_error<JITLinkError>("arm64 SUBTRACTOR and paired UNSIGNED "
+ "point to different addresses");
+
+ if (SubRI.r_length != UnsignedRI.r_length)
+ return make_error<JITLinkError>("length of arm64 SUBTRACTOR and paired "
+ "UNSIGNED reloc must match");
+
+ Symbol *FromSymbol;
+ if (auto FromSymbolOrErr = findSymbolByIndex(SubRI.r_symbolnum))
+ FromSymbol = FromSymbolOrErr->GraphSymbol;
+ else
+ return FromSymbolOrErr.takeError();
+
+ // Read the current fixup value.
+ uint64_t FixupValue = 0;
+ if (SubRI.r_length == 3)
+ FixupValue = *(const little64_t *)FixupContent;
+ else
+ FixupValue = *(const little32_t *)FixupContent;
+
+ // Find 'ToSymbol' using symbol number or address, depending on whether the
+ // paired UNSIGNED relocation is extern.
+ Symbol *ToSymbol = nullptr;
+ if (UnsignedRI.r_extern) {
+ // Find target symbol by symbol index.
+ if (auto ToSymbolOrErr = findSymbolByIndex(UnsignedRI.r_symbolnum))
+ ToSymbol = ToSymbolOrErr->GraphSymbol;
+ else
+ return ToSymbolOrErr.takeError();
+ } else {
+ auto ToSymbolSec = findSectionByIndex(UnsignedRI.r_symbolnum - 1);
+ if (!ToSymbolSec)
+ return ToSymbolSec.takeError();
+ ToSymbol = getSymbolByAddress(*ToSymbolSec, ToSymbolSec->Address);
+ assert(ToSymbol && "No symbol for section");
+ FixupValue -= ToSymbol->getAddress().getValue();
+ }
+
+ Edge::Kind DeltaKind;
+ Symbol *TargetSymbol;
+ uint64_t Addend;
+
+ bool FixingFromSymbol = true;
+ if (&BlockToFix == &FromSymbol->getAddressable()) {
+ if (LLVM_UNLIKELY(&BlockToFix == &ToSymbol->getAddressable())) {
+ // From and To are symbols in the same block. Decide direction by offset
+ // instead.
+ if (ToSymbol->getAddress() > FixupAddress)
+ FixingFromSymbol = true;
+ else if (FromSymbol->getAddress() > FixupAddress)
+ FixingFromSymbol = false;
+ else
+ FixingFromSymbol = FromSymbol->getAddress() >= ToSymbol->getAddress();
+ } else
+ FixingFromSymbol = true;
+ } else {
+ if (&BlockToFix == &ToSymbol->getAddressable())
+ FixingFromSymbol = false;
+ else {
+ // BlockToFix was neither FromSymbol nor ToSymbol.
+ return make_error<JITLinkError>("SUBTRACTOR relocation must fix up "
+ "either 'A' or 'B' (or a symbol in one "
+ "of their alt-entry groups)");
+ }
+ }
+
+ if (FixingFromSymbol) {
+ TargetSymbol = ToSymbol;
+ DeltaKind = (SubRI.r_length == 3) ? aarch64::Delta64 : aarch64::Delta32;
+ Addend = FixupValue + (FixupAddress - FromSymbol->getAddress());
+ // FIXME: handle extern 'from'.
+ } else {
+ TargetSymbol = &*FromSymbol;
+ DeltaKind =
+ (SubRI.r_length == 3) ? aarch64::NegDelta64 : aarch64::NegDelta32;
+ Addend = FixupValue - (FixupAddress - ToSymbol->getAddress());
+ }
+
+ return PairRelocInfo(DeltaKind, TargetSymbol, Addend);
+ }
+
+ Error addRelocations() override {
+ using namespace support;
+ auto &Obj = getObject();
+
+ LLVM_DEBUG(dbgs() << "Processing relocations:\n");
+
+ for (auto &S : Obj.sections()) {
+
+ orc::ExecutorAddr SectionAddress(S.getAddress());
+
+ // Skip relocations virtual sections.
+ if (S.isVirtual()) {
+ if (S.relocation_begin() != S.relocation_end())
+ return make_error<JITLinkError>("Virtual section contains "
+ "relocations");
+ continue;
+ }
+
+ auto NSec =
+ findSectionByIndex(Obj.getSectionIndex(S.getRawDataRefImpl()));
+ if (!NSec)
+ return NSec.takeError();
+
+ // Skip relocations for MachO sections without corresponding graph
+ // sections.
+ {
+ if (!NSec->GraphSection) {
+ LLVM_DEBUG({
+ dbgs() << " Skipping relocations for MachO section "
+ << NSec->SegName << "/" << NSec->SectName
+ << " which has no associated graph section\n";
+ });
+ continue;
+ }
+ }
+
+ for (auto RelItr = S.relocation_begin(), RelEnd = S.relocation_end();
+ RelItr != RelEnd; ++RelItr) {
+
+ MachO::relocation_info RI = getRelocationInfo(RelItr);
+
+ // Validate the relocation kind.
+ auto MachORelocKind = getRelocationKind(RI);
+ if (!MachORelocKind)
+ return MachORelocKind.takeError();
+
+ // Find the address of the value to fix up.
+ orc::ExecutorAddr FixupAddress =
+ SectionAddress + (uint32_t)RI.r_address;
+ LLVM_DEBUG({
+ dbgs() << " " << NSec->SectName << " + "
+ << formatv("{0:x8}", RI.r_address) << ":\n";
+ });
+
+ // Find the block that the fixup points to.
+ Block *BlockToFix = nullptr;
+ {
+ auto SymbolToFixOrErr = findSymbolByAddress(*NSec, FixupAddress);
+ if (!SymbolToFixOrErr)
+ return SymbolToFixOrErr.takeError();
+ BlockToFix = &SymbolToFixOrErr->getBlock();
+ }
+
+ if (FixupAddress + orc::ExecutorAddrDiff(1ULL << RI.r_length) >
+ BlockToFix->getAddress() + BlockToFix->getContent().size())
+ return make_error<JITLinkError>(
+ "Relocation content extends past end of fixup block");
+
+ Edge::Kind Kind = Edge::Invalid;
+
+ // Get a pointer to the fixup content.
+ const char *FixupContent = BlockToFix->getContent().data() +
+ (FixupAddress - BlockToFix->getAddress());
+
+ // The target symbol and addend will be populated by the switch below.
+ Symbol *TargetSymbol = nullptr;
+ uint64_t Addend = 0;
+
+ if (*MachORelocKind == MachOPairedAddend) {
+ // If this is an Addend relocation then process it and move to the
+ // paired reloc.
+
+ Addend = SignExtend64(RI.r_symbolnum, 24);
+
+ ++RelItr;
+ if (RelItr == RelEnd)
+ return make_error<JITLinkError>("Unpaired Addend reloc at " +
+ formatv("{0:x16}", FixupAddress));
+ RI = getRelocationInfo(RelItr);
+
+ MachORelocKind = getRelocationKind(RI);
+ if (!MachORelocKind)
+ return MachORelocKind.takeError();
+
+ if (*MachORelocKind != MachOBranch26 &&
+ *MachORelocKind != MachOPage21 &&
+ *MachORelocKind != MachOPageOffset12)
+ return make_error<JITLinkError>(
+ "Invalid relocation pair: Addend + " +
+ StringRef(getMachOARM64RelocationKindName(*MachORelocKind)));
+
+ LLVM_DEBUG({
+ dbgs() << " Addend: value = " << formatv("{0:x6}", Addend)
+ << ", pair is "
+ << getMachOARM64RelocationKindName(*MachORelocKind) << "\n";
+ });
+
+ // Find the address of the value to fix up.
+ orc::ExecutorAddr PairedFixupAddress =
+ SectionAddress + (uint32_t)RI.r_address;
+ if (PairedFixupAddress != FixupAddress)
+ return make_error<JITLinkError>("Paired relocation points at "
+ "different target");
+ }
+
+ switch (*MachORelocKind) {
+ case MachOBranch26: {
+ if (auto TargetSymbolOrErr = findSymbolByIndex(RI.r_symbolnum))
+ TargetSymbol = TargetSymbolOrErr->GraphSymbol;
+ else
+ return TargetSymbolOrErr.takeError();
+ uint32_t Instr = *(const ulittle32_t *)FixupContent;
+ if ((Instr & 0x7fffffff) != 0x14000000)
+ return make_error<JITLinkError>("BRANCH26 target is not a B or BL "
+ "instruction with a zero addend");
+ Kind = aarch64::Branch26PCRel;
+ break;
+ }
+ case MachOPointer32:
+ if (auto TargetSymbolOrErr = findSymbolByIndex(RI.r_symbolnum))
+ TargetSymbol = TargetSymbolOrErr->GraphSymbol;
+ else
+ return TargetSymbolOrErr.takeError();
+ Addend = *(const ulittle32_t *)FixupContent;
+ Kind = aarch64::Pointer32;
+ break;
+ case MachOPointer64:
+ if (auto TargetSymbolOrErr = findSymbolByIndex(RI.r_symbolnum))
+ TargetSymbol = TargetSymbolOrErr->GraphSymbol;
+ else
+ return TargetSymbolOrErr.takeError();
+ Addend = *(const ulittle64_t *)FixupContent;
+ Kind = aarch64::Pointer64;
+ break;
+ case MachOPointer64Anon: {
+ orc::ExecutorAddr TargetAddress(*(const ulittle64_t *)FixupContent);
+ auto TargetNSec = findSectionByIndex(RI.r_symbolnum - 1);
+ if (!TargetNSec)
+ return TargetNSec.takeError();
+ if (auto TargetSymbolOrErr =
+ findSymbolByAddress(*TargetNSec, TargetAddress))
+ TargetSymbol = &*TargetSymbolOrErr;
+ else
+ return TargetSymbolOrErr.takeError();
+ Addend = TargetAddress - TargetSymbol->getAddress();
+ Kind = aarch64::Pointer64;
+ break;
+ }
+ case MachOPage21:
+ case MachOGOTPage21:
+ case MachOTLVPage21: {
+ if (auto TargetSymbolOrErr = findSymbolByIndex(RI.r_symbolnum))
+ TargetSymbol = TargetSymbolOrErr->GraphSymbol;
+ else
+ return TargetSymbolOrErr.takeError();
+ uint32_t Instr = *(const ulittle32_t *)FixupContent;
+ if ((Instr & 0xffffffe0) != 0x90000000)
+ return make_error<JITLinkError>("PAGE21/GOTPAGE21 target is not an "
+ "ADRP instruction with a zero "
+ "addend");
+
+ if (*MachORelocKind == MachOPage21) {
+ Kind = aarch64::Page21;
+ } else if (*MachORelocKind == MachOGOTPage21) {
+ Kind = aarch64::RequestGOTAndTransformToPage21;
+ } else if (*MachORelocKind == MachOTLVPage21) {
+ Kind = aarch64::RequestTLVPAndTransformToPage21;
+ }
+ break;
+ }
+ case MachOPageOffset12: {
+ if (auto TargetSymbolOrErr = findSymbolByIndex(RI.r_symbolnum))
+ TargetSymbol = TargetSymbolOrErr->GraphSymbol;
+ else
+ return TargetSymbolOrErr.takeError();
+ uint32_t Instr = *(const ulittle32_t *)FixupContent;
+ uint32_t EncodedAddend = (Instr & 0x003FFC00) >> 10;
+ if (EncodedAddend != 0)
+ return make_error<JITLinkError>("GOTPAGEOFF12 target has non-zero "
+ "encoded addend");
+ Kind = aarch64::PageOffset12;
+ break;
+ }
+ case MachOGOTPageOffset12:
+ case MachOTLVPageOffset12: {
+ if (auto TargetSymbolOrErr = findSymbolByIndex(RI.r_symbolnum))
+ TargetSymbol = TargetSymbolOrErr->GraphSymbol;
+ else
+ return TargetSymbolOrErr.takeError();
+ uint32_t Instr = *(const ulittle32_t *)FixupContent;
+ if ((Instr & 0xfffffc00) != 0xf9400000)
+ return make_error<JITLinkError>("GOTPAGEOFF12 target is not an LDR "
+ "immediate instruction with a zero "
+ "addend");
+
+ if (*MachORelocKind == MachOGOTPageOffset12) {
+ Kind = aarch64::RequestGOTAndTransformToPageOffset12;
+ } else if (*MachORelocKind == MachOTLVPageOffset12) {
+ Kind = aarch64::RequestTLVPAndTransformToPageOffset12;
+ }
+ break;
+ }
+ case MachOPointerToGOT:
+ if (auto TargetSymbolOrErr = findSymbolByIndex(RI.r_symbolnum))
+ TargetSymbol = TargetSymbolOrErr->GraphSymbol;
+ else
+ return TargetSymbolOrErr.takeError();
+
+ Kind = aarch64::RequestGOTAndTransformToDelta32;
+ break;
+ case MachODelta32:
+ case MachODelta64: {
+ // We use Delta32/Delta64 to represent SUBTRACTOR relocations.
+ // parsePairRelocation handles the paired reloc, and returns the
+ // edge kind to be used (either Delta32/Delta64, or
+ // NegDelta32/NegDelta64, depending on the direction of the
+ // subtraction) along with the addend.
+ auto PairInfo =
+ parsePairRelocation(*BlockToFix, *MachORelocKind, RI,
+ FixupAddress, FixupContent, ++RelItr, RelEnd);
+ if (!PairInfo)
+ return PairInfo.takeError();
+ std::tie(Kind, TargetSymbol, Addend) = *PairInfo;
+ assert(TargetSymbol && "No target symbol from parsePairRelocation?");
+ break;
+ }
+ default:
+ llvm_unreachable("Special relocation kind should not appear in "
+ "mach-o file");
+ }
+
+ LLVM_DEBUG({
+ dbgs() << " ";
+ Edge GE(Kind, FixupAddress - BlockToFix->getAddress(), *TargetSymbol,
+ Addend);
+ printEdge(dbgs(), *BlockToFix, GE, aarch64::getEdgeKindName(Kind));
+ dbgs() << "\n";
+ });
+ BlockToFix->addEdge(Kind, FixupAddress - BlockToFix->getAddress(),
+ *TargetSymbol, Addend);
+ }
+ }
+ return Error::success();
+ }
+
+ /// Return the string name of the given MachO arm64 edge kind.
+ const char *getMachOARM64RelocationKindName(Edge::Kind R) {
+ switch (R) {
+ case MachOBranch26:
+ return "MachOBranch26";
+ case MachOPointer64:
+ return "MachOPointer64";
+ case MachOPointer64Anon:
+ return "MachOPointer64Anon";
+ case MachOPage21:
+ return "MachOPage21";
+ case MachOPageOffset12:
+ return "MachOPageOffset12";
+ case MachOGOTPage21:
+ return "MachOGOTPage21";
+ case MachOGOTPageOffset12:
+ return "MachOGOTPageOffset12";
+ case MachOTLVPage21:
+ return "MachOTLVPage21";
+ case MachOTLVPageOffset12:
+ return "MachOTLVPageOffset12";
+ case MachOPointerToGOT:
+ return "MachOPointerToGOT";
+ case MachOPairedAddend:
+ return "MachOPairedAddend";
+ case MachOLDRLiteral19:
+ return "MachOLDRLiteral19";
+ case MachODelta32:
+ return "MachODelta32";
+ case MachODelta64:
+ return "MachODelta64";
+ case MachONegDelta32:
+ return "MachONegDelta32";
+ case MachONegDelta64:
+ return "MachONegDelta64";
+ default:
+ return getGenericEdgeKindName(static_cast<Edge::Kind>(R));
+ }
+ }
+
+ unsigned NumSymbols = 0;
+};
+
+} // namespace
+
+namespace llvm {
+namespace jitlink {
+
+Error buildTables_MachO_arm64(LinkGraph &G) {
+ LLVM_DEBUG(dbgs() << "Visiting edges in graph:\n");
+
+ aarch64::GOTTableManager GOT;
+ aarch64::PLTTableManager PLT(GOT);
+ visitExistingEdges(G, GOT, PLT);
+ return Error::success();
+}
+
+class MachOJITLinker_arm64 : public JITLinker<MachOJITLinker_arm64> {
+ friend class JITLinker<MachOJITLinker_arm64>;
+
+public:
+ MachOJITLinker_arm64(std::unique_ptr<JITLinkContext> Ctx,
+ std::unique_ptr<LinkGraph> G,
+ PassConfiguration PassConfig)
+ : JITLinker(std::move(Ctx), std::move(G), std::move(PassConfig)) {}
+
+private:
+ Error applyFixup(LinkGraph &G, Block &B, const Edge &E) const {
+ return aarch64::applyFixup(G, B, E);
+ }
+
+ uint64_t NullValue = 0;
+};
+
+Expected<std::unique_ptr<LinkGraph>>
+createLinkGraphFromMachOObject_arm64(MemoryBufferRef ObjectBuffer) {
+ auto MachOObj = object::ObjectFile::createMachOObjectFile(ObjectBuffer);
+ if (!MachOObj)
+ return MachOObj.takeError();
+
+ auto Features = (*MachOObj)->getFeatures();
+ if (!Features)
+ return Features.takeError();
+
+ return MachOLinkGraphBuilder_arm64(**MachOObj, std::move(*Features))
+ .buildGraph();
+}
+
+void link_MachO_arm64(std::unique_ptr<LinkGraph> G,
+ std::unique_ptr<JITLinkContext> Ctx) {
+
+ PassConfiguration Config;
+
+ if (Ctx->shouldAddDefaultTargetPasses(G->getTargetTriple())) {
+ // Add a mark-live pass.
+ if (auto MarkLive = Ctx->getMarkLivePass(G->getTargetTriple()))
+ Config.PrePrunePasses.push_back(std::move(MarkLive));
+ else
+ Config.PrePrunePasses.push_back(markAllSymbolsLive);
+
+ // Add compact unwind splitter pass.
+ Config.PrePrunePasses.push_back(
+ CompactUnwindSplitter("__LD,__compact_unwind"));
+
+ // Add eh-frame passes.
+ // FIXME: Prune eh-frames for which compact-unwind is available once
+ // we support compact-unwind registration with libunwind.
+ Config.PrePrunePasses.push_back(createEHFrameSplitterPass_MachO_arm64());
+ Config.PrePrunePasses.push_back(createEHFrameEdgeFixerPass_MachO_arm64());
+
+ // Resolve any external section start / end symbols.
+ Config.PostAllocationPasses.push_back(
+ createDefineExternalSectionStartAndEndSymbolsPass(
+ identifyMachOSectionStartAndEndSymbols));
+
+ // Add an in-place GOT/Stubs pass.
+ Config.PostPrunePasses.push_back(buildTables_MachO_arm64);
+ }
+
+ if (auto Err = Ctx->modifyPassConfig(*G, Config))
+ return Ctx->notifyFailed(std::move(Err));
+
+ // Construct a JITLinker and run the link function.
+ MachOJITLinker_arm64::link(std::move(Ctx), std::move(G), std::move(Config));
+}
+
+LinkGraphPassFunction createEHFrameSplitterPass_MachO_arm64() {
+ return DWARFRecordSectionSplitter("__TEXT,__eh_frame");
+}
+
+LinkGraphPassFunction createEHFrameEdgeFixerPass_MachO_arm64() {
+ return EHFrameEdgeFixer("__TEXT,__eh_frame", aarch64::PointerSize,
+ aarch64::Pointer32, aarch64::Pointer64,
+ aarch64::Delta32, aarch64::Delta64,
+ aarch64::NegDelta32);
+}
+
+} // end namespace jitlink
+} // end namespace llvm
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/MachO_x86_64.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/MachO_x86_64.cpp
new file mode 100644
index 000000000000..2c69d61316a8
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/MachO_x86_64.cpp
@@ -0,0 +1,550 @@
+//===---- MachO_x86_64.cpp -JIT linker implementation for MachO/x86-64 ----===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// MachO/x86-64 jit-link implementation.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/JITLink/MachO_x86_64.h"
+#include "llvm/ExecutionEngine/JITLink/DWARFRecordSectionSplitter.h"
+#include "llvm/ExecutionEngine/JITLink/x86_64.h"
+
+#include "DefineExternalSectionStartAndEndSymbols.h"
+#include "MachOLinkGraphBuilder.h"
+
+#define DEBUG_TYPE "jitlink"
+
+using namespace llvm;
+using namespace llvm::jitlink;
+
+namespace {
+
+class MachOLinkGraphBuilder_x86_64 : public MachOLinkGraphBuilder {
+public:
+ MachOLinkGraphBuilder_x86_64(const object::MachOObjectFile &Obj,
+ SubtargetFeatures Features)
+ : MachOLinkGraphBuilder(Obj, Triple("x86_64-apple-darwin"),
+ std::move(Features), x86_64::getEdgeKindName) {}
+
+private:
+ enum MachONormalizedRelocationType : unsigned {
+ MachOBranch32,
+ MachOPointer32,
+ MachOPointer64,
+ MachOPointer64Anon,
+ MachOPCRel32,
+ MachOPCRel32Minus1,
+ MachOPCRel32Minus2,
+ MachOPCRel32Minus4,
+ MachOPCRel32Anon,
+ MachOPCRel32Minus1Anon,
+ MachOPCRel32Minus2Anon,
+ MachOPCRel32Minus4Anon,
+ MachOPCRel32GOTLoad,
+ MachOPCRel32GOT,
+ MachOPCRel32TLV,
+ MachOSubtractor32,
+ MachOSubtractor64,
+ };
+
+ static Expected<MachONormalizedRelocationType>
+ getRelocKind(const MachO::relocation_info &RI) {
+ switch (RI.r_type) {
+ case MachO::X86_64_RELOC_UNSIGNED:
+ if (!RI.r_pcrel) {
+ if (RI.r_length == 3)
+ return RI.r_extern ? MachOPointer64 : MachOPointer64Anon;
+ else if (RI.r_extern && RI.r_length == 2)
+ return MachOPointer32;
+ }
+ break;
+ case MachO::X86_64_RELOC_SIGNED:
+ if (RI.r_pcrel && RI.r_length == 2)
+ return RI.r_extern ? MachOPCRel32 : MachOPCRel32Anon;
+ break;
+ case MachO::X86_64_RELOC_BRANCH:
+ if (RI.r_pcrel && RI.r_extern && RI.r_length == 2)
+ return MachOBranch32;
+ break;
+ case MachO::X86_64_RELOC_GOT_LOAD:
+ if (RI.r_pcrel && RI.r_extern && RI.r_length == 2)
+ return MachOPCRel32GOTLoad;
+ break;
+ case MachO::X86_64_RELOC_GOT:
+ if (RI.r_pcrel && RI.r_extern && RI.r_length == 2)
+ return MachOPCRel32GOT;
+ break;
+ case MachO::X86_64_RELOC_SUBTRACTOR:
+ if (!RI.r_pcrel && RI.r_extern) {
+ if (RI.r_length == 2)
+ return MachOSubtractor32;
+ else if (RI.r_length == 3)
+ return MachOSubtractor64;
+ }
+ break;
+ case MachO::X86_64_RELOC_SIGNED_1:
+ if (RI.r_pcrel && RI.r_length == 2)
+ return RI.r_extern ? MachOPCRel32Minus1 : MachOPCRel32Minus1Anon;
+ break;
+ case MachO::X86_64_RELOC_SIGNED_2:
+ if (RI.r_pcrel && RI.r_length == 2)
+ return RI.r_extern ? MachOPCRel32Minus2 : MachOPCRel32Minus2Anon;
+ break;
+ case MachO::X86_64_RELOC_SIGNED_4:
+ if (RI.r_pcrel && RI.r_length == 2)
+ return RI.r_extern ? MachOPCRel32Minus4 : MachOPCRel32Minus4Anon;
+ break;
+ case MachO::X86_64_RELOC_TLV:
+ if (RI.r_pcrel && RI.r_extern && RI.r_length == 2)
+ return MachOPCRel32TLV;
+ break;
+ }
+
+ return make_error<JITLinkError>(
+ "Unsupported x86-64 relocation: address=" +
+ formatv("{0:x8}", RI.r_address) +
+ ", symbolnum=" + formatv("{0:x6}", RI.r_symbolnum) +
+ ", kind=" + formatv("{0:x1}", RI.r_type) +
+ ", pc_rel=" + (RI.r_pcrel ? "true" : "false") +
+ ", extern=" + (RI.r_extern ? "true" : "false") +
+ ", length=" + formatv("{0:d}", RI.r_length));
+ }
+
+ using PairRelocInfo = std::tuple<Edge::Kind, Symbol *, uint64_t>;
+
+ // Parses paired SUBTRACTOR/UNSIGNED relocations and, on success,
+ // returns the edge kind and addend to be used.
+ Expected<PairRelocInfo> parsePairRelocation(
+ Block &BlockToFix, MachONormalizedRelocationType SubtractorKind,
+ const MachO::relocation_info &SubRI, orc::ExecutorAddr FixupAddress,
+ const char *FixupContent, object::relocation_iterator &UnsignedRelItr,
+ object::relocation_iterator &RelEnd) {
+ using namespace support;
+
+ assert(((SubtractorKind == MachOSubtractor32 && SubRI.r_length == 2) ||
+ (SubtractorKind == MachOSubtractor64 && SubRI.r_length == 3)) &&
+ "Subtractor kind should match length");
+ assert(SubRI.r_extern && "SUBTRACTOR reloc symbol should be extern");
+ assert(!SubRI.r_pcrel && "SUBTRACTOR reloc should not be PCRel");
+
+ if (UnsignedRelItr == RelEnd)
+ return make_error<JITLinkError>("x86_64 SUBTRACTOR without paired "
+ "UNSIGNED relocation");
+
+ auto UnsignedRI = getRelocationInfo(UnsignedRelItr);
+
+ if (SubRI.r_address != UnsignedRI.r_address)
+ return make_error<JITLinkError>("x86_64 SUBTRACTOR and paired UNSIGNED "
+ "point to different addresses");
+
+ if (SubRI.r_length != UnsignedRI.r_length)
+ return make_error<JITLinkError>("length of x86_64 SUBTRACTOR and paired "
+ "UNSIGNED reloc must match");
+
+ Symbol *FromSymbol;
+ if (auto FromSymbolOrErr = findSymbolByIndex(SubRI.r_symbolnum))
+ FromSymbol = FromSymbolOrErr->GraphSymbol;
+ else
+ return FromSymbolOrErr.takeError();
+
+ // Read the current fixup value.
+ uint64_t FixupValue = 0;
+ if (SubRI.r_length == 3)
+ FixupValue = *(const little64_t *)FixupContent;
+ else
+ FixupValue = *(const little32_t *)FixupContent;
+
+ // Find 'ToSymbol' using symbol number or address, depending on whether the
+ // paired UNSIGNED relocation is extern.
+ Symbol *ToSymbol = nullptr;
+ if (UnsignedRI.r_extern) {
+ // Find target symbol by symbol index.
+ if (auto ToSymbolOrErr = findSymbolByIndex(UnsignedRI.r_symbolnum))
+ ToSymbol = ToSymbolOrErr->GraphSymbol;
+ else
+ return ToSymbolOrErr.takeError();
+ } else {
+ auto ToSymbolSec = findSectionByIndex(UnsignedRI.r_symbolnum - 1);
+ if (!ToSymbolSec)
+ return ToSymbolSec.takeError();
+ ToSymbol = getSymbolByAddress(*ToSymbolSec, ToSymbolSec->Address);
+ assert(ToSymbol && "No symbol for section");
+ FixupValue -= ToSymbol->getAddress().getValue();
+ }
+
+ Edge::Kind DeltaKind;
+ Symbol *TargetSymbol;
+ uint64_t Addend;
+
+ bool FixingFromSymbol = true;
+ if (&BlockToFix == &FromSymbol->getAddressable()) {
+ if (LLVM_UNLIKELY(&BlockToFix == &ToSymbol->getAddressable())) {
+ // From and To are symbols in the same block. Decide direction by offset
+ // instead.
+ if (ToSymbol->getAddress() > FixupAddress)
+ FixingFromSymbol = true;
+ else if (FromSymbol->getAddress() > FixupAddress)
+ FixingFromSymbol = false;
+ else
+ FixingFromSymbol = FromSymbol->getAddress() >= ToSymbol->getAddress();
+ } else
+ FixingFromSymbol = true;
+ } else {
+ if (&BlockToFix == &ToSymbol->getAddressable())
+ FixingFromSymbol = false;
+ else {
+ // BlockToFix was neither FromSymbol nor ToSymbol.
+ return make_error<JITLinkError>("SUBTRACTOR relocation must fix up "
+ "either 'A' or 'B' (or a symbol in one "
+ "of their alt-entry groups)");
+ }
+ }
+
+ if (FixingFromSymbol) {
+ TargetSymbol = ToSymbol;
+ DeltaKind = (SubRI.r_length == 3) ? x86_64::Delta64 : x86_64::Delta32;
+ Addend = FixupValue + (FixupAddress - FromSymbol->getAddress());
+ // FIXME: handle extern 'from'.
+ } else {
+ TargetSymbol = FromSymbol;
+ DeltaKind =
+ (SubRI.r_length == 3) ? x86_64::NegDelta64 : x86_64::NegDelta32;
+ Addend = FixupValue - (FixupAddress - ToSymbol->getAddress());
+ }
+
+ return PairRelocInfo(DeltaKind, TargetSymbol, Addend);
+ }
+
+ Error addRelocations() override {
+ using namespace support;
+ auto &Obj = getObject();
+
+ LLVM_DEBUG(dbgs() << "Processing relocations:\n");
+
+ for (const auto &S : Obj.sections()) {
+
+ orc::ExecutorAddr SectionAddress(S.getAddress());
+
+ // Skip relocations virtual sections.
+ if (S.isVirtual()) {
+ if (S.relocation_begin() != S.relocation_end())
+ return make_error<JITLinkError>("Virtual section contains "
+ "relocations");
+ continue;
+ }
+
+ auto NSec =
+ findSectionByIndex(Obj.getSectionIndex(S.getRawDataRefImpl()));
+ if (!NSec)
+ return NSec.takeError();
+
+ // Skip relocations for MachO sections without corresponding graph
+ // sections.
+ {
+ if (!NSec->GraphSection) {
+ LLVM_DEBUG({
+ dbgs() << " Skipping relocations for MachO section "
+ << NSec->SegName << "/" << NSec->SectName
+ << " which has no associated graph section\n";
+ });
+ continue;
+ }
+ }
+
+ // Add relocations for section.
+ for (auto RelItr = S.relocation_begin(), RelEnd = S.relocation_end();
+ RelItr != RelEnd; ++RelItr) {
+
+ MachO::relocation_info RI = getRelocationInfo(RelItr);
+
+ // Find the address of the value to fix up.
+ auto FixupAddress = SectionAddress + (uint32_t)RI.r_address;
+
+ LLVM_DEBUG({
+ dbgs() << " " << NSec->SectName << " + "
+ << formatv("{0:x8}", RI.r_address) << ":\n";
+ });
+
+ // Find the block that the fixup points to.
+ Block *BlockToFix = nullptr;
+ {
+ auto SymbolToFixOrErr = findSymbolByAddress(*NSec, FixupAddress);
+ if (!SymbolToFixOrErr)
+ return SymbolToFixOrErr.takeError();
+ BlockToFix = &SymbolToFixOrErr->getBlock();
+ }
+
+ if (FixupAddress + orc::ExecutorAddrDiff(1ULL << RI.r_length) >
+ BlockToFix->getAddress() + BlockToFix->getContent().size())
+ return make_error<JITLinkError>(
+ "Relocation extends past end of fixup block");
+
+ // Get a pointer to the fixup content.
+ const char *FixupContent = BlockToFix->getContent().data() +
+ (FixupAddress - BlockToFix->getAddress());
+
+ size_t FixupOffset = FixupAddress - BlockToFix->getAddress();
+
+ // The target symbol and addend will be populated by the switch below.
+ Symbol *TargetSymbol = nullptr;
+ uint64_t Addend = 0;
+
+ // Validate the relocation kind.
+ auto MachORelocKind = getRelocKind(RI);
+ if (!MachORelocKind)
+ return MachORelocKind.takeError();
+
+ Edge::Kind Kind = Edge::Invalid;
+
+ switch (*MachORelocKind) {
+ case MachOBranch32:
+ if (auto TargetSymbolOrErr = findSymbolByIndex(RI.r_symbolnum))
+ TargetSymbol = TargetSymbolOrErr->GraphSymbol;
+ else
+ return TargetSymbolOrErr.takeError();
+ Addend = *(const little32_t *)FixupContent;
+ Kind = x86_64::BranchPCRel32;
+ break;
+ case MachOPCRel32:
+ if (auto TargetSymbolOrErr = findSymbolByIndex(RI.r_symbolnum))
+ TargetSymbol = TargetSymbolOrErr->GraphSymbol;
+ else
+ return TargetSymbolOrErr.takeError();
+ Addend = *(const little32_t *)FixupContent - 4;
+ Kind = x86_64::Delta32;
+ break;
+ case MachOPCRel32GOTLoad:
+ if (auto TargetSymbolOrErr = findSymbolByIndex(RI.r_symbolnum))
+ TargetSymbol = TargetSymbolOrErr->GraphSymbol;
+ else
+ return TargetSymbolOrErr.takeError();
+ Addend = *(const little32_t *)FixupContent;
+ Kind = x86_64::RequestGOTAndTransformToPCRel32GOTLoadREXRelaxable;
+ if (FixupOffset < 3)
+ return make_error<JITLinkError>("GOTLD at invalid offset " +
+ formatv("{0}", FixupOffset));
+ break;
+ case MachOPCRel32GOT:
+ if (auto TargetSymbolOrErr = findSymbolByIndex(RI.r_symbolnum))
+ TargetSymbol = TargetSymbolOrErr->GraphSymbol;
+ else
+ return TargetSymbolOrErr.takeError();
+ Addend = *(const little32_t *)FixupContent - 4;
+ Kind = x86_64::RequestGOTAndTransformToDelta32;
+ break;
+ case MachOPCRel32TLV:
+ if (auto TargetSymbolOrErr = findSymbolByIndex(RI.r_symbolnum))
+ TargetSymbol = TargetSymbolOrErr->GraphSymbol;
+ else
+ return TargetSymbolOrErr.takeError();
+ Addend = *(const little32_t *)FixupContent;
+ Kind = x86_64::RequestTLVPAndTransformToPCRel32TLVPLoadREXRelaxable;
+ if (FixupOffset < 3)
+ return make_error<JITLinkError>("TLV at invalid offset " +
+ formatv("{0}", FixupOffset));
+ break;
+ case MachOPointer32:
+ if (auto TargetSymbolOrErr = findSymbolByIndex(RI.r_symbolnum))
+ TargetSymbol = TargetSymbolOrErr->GraphSymbol;
+ else
+ return TargetSymbolOrErr.takeError();
+ Addend = *(const ulittle32_t *)FixupContent;
+ Kind = x86_64::Pointer32;
+ break;
+ case MachOPointer64:
+ if (auto TargetSymbolOrErr = findSymbolByIndex(RI.r_symbolnum))
+ TargetSymbol = TargetSymbolOrErr->GraphSymbol;
+ else
+ return TargetSymbolOrErr.takeError();
+ Addend = *(const ulittle64_t *)FixupContent;
+ Kind = x86_64::Pointer64;
+ break;
+ case MachOPointer64Anon: {
+ orc::ExecutorAddr TargetAddress(*(const ulittle64_t *)FixupContent);
+ auto TargetNSec = findSectionByIndex(RI.r_symbolnum - 1);
+ if (!TargetNSec)
+ return TargetNSec.takeError();
+ if (auto TargetSymbolOrErr =
+ findSymbolByAddress(*TargetNSec, TargetAddress))
+ TargetSymbol = &*TargetSymbolOrErr;
+ else
+ return TargetSymbolOrErr.takeError();
+ Addend = TargetAddress - TargetSymbol->getAddress();
+ Kind = x86_64::Pointer64;
+ break;
+ }
+ case MachOPCRel32Minus1:
+ case MachOPCRel32Minus2:
+ case MachOPCRel32Minus4:
+ if (auto TargetSymbolOrErr = findSymbolByIndex(RI.r_symbolnum))
+ TargetSymbol = TargetSymbolOrErr->GraphSymbol;
+ else
+ return TargetSymbolOrErr.takeError();
+ Addend = *(const little32_t *)FixupContent - 4;
+ Kind = x86_64::Delta32;
+ break;
+ case MachOPCRel32Anon: {
+ orc::ExecutorAddr TargetAddress(FixupAddress + 4 +
+ *(const little32_t *)FixupContent);
+ auto TargetNSec = findSectionByIndex(RI.r_symbolnum - 1);
+ if (!TargetNSec)
+ return TargetNSec.takeError();
+ if (auto TargetSymbolOrErr =
+ findSymbolByAddress(*TargetNSec, TargetAddress))
+ TargetSymbol = &*TargetSymbolOrErr;
+ else
+ return TargetSymbolOrErr.takeError();
+ Addend = TargetAddress - TargetSymbol->getAddress() - 4;
+ Kind = x86_64::Delta32;
+ break;
+ }
+ case MachOPCRel32Minus1Anon:
+ case MachOPCRel32Minus2Anon:
+ case MachOPCRel32Minus4Anon: {
+ orc::ExecutorAddrDiff Delta =
+ 4 + orc::ExecutorAddrDiff(
+ 1ULL << (*MachORelocKind - MachOPCRel32Minus1Anon));
+ orc::ExecutorAddr TargetAddress =
+ FixupAddress + Delta + *(const little32_t *)FixupContent;
+ auto TargetNSec = findSectionByIndex(RI.r_symbolnum - 1);
+ if (!TargetNSec)
+ return TargetNSec.takeError();
+ if (auto TargetSymbolOrErr =
+ findSymbolByAddress(*TargetNSec, TargetAddress))
+ TargetSymbol = &*TargetSymbolOrErr;
+ else
+ return TargetSymbolOrErr.takeError();
+ Addend = TargetAddress - TargetSymbol->getAddress() - Delta;
+ Kind = x86_64::Delta32;
+ break;
+ }
+ case MachOSubtractor32:
+ case MachOSubtractor64: {
+ // We use Delta32/Delta64 to represent SUBTRACTOR relocations.
+ // parsePairRelocation handles the paired reloc, and returns the
+ // edge kind to be used (either Delta32/Delta64, or
+ // NegDelta32/NegDelta64, depending on the direction of the
+ // subtraction) along with the addend.
+ auto PairInfo =
+ parsePairRelocation(*BlockToFix, *MachORelocKind, RI,
+ FixupAddress, FixupContent, ++RelItr, RelEnd);
+ if (!PairInfo)
+ return PairInfo.takeError();
+ std::tie(Kind, TargetSymbol, Addend) = *PairInfo;
+ assert(TargetSymbol && "No target symbol from parsePairRelocation?");
+ break;
+ }
+ }
+
+ LLVM_DEBUG({
+ dbgs() << " ";
+ Edge GE(Kind, FixupAddress - BlockToFix->getAddress(), *TargetSymbol,
+ Addend);
+ printEdge(dbgs(), *BlockToFix, GE, x86_64::getEdgeKindName(Kind));
+ dbgs() << "\n";
+ });
+ BlockToFix->addEdge(Kind, FixupAddress - BlockToFix->getAddress(),
+ *TargetSymbol, Addend);
+ }
+ }
+ return Error::success();
+ }
+};
+
+Error buildGOTAndStubs_MachO_x86_64(LinkGraph &G) {
+ x86_64::GOTTableManager GOT;
+ x86_64::PLTTableManager PLT(GOT);
+ visitExistingEdges(G, GOT, PLT);
+ return Error::success();
+}
+
+} // namespace
+
+namespace llvm {
+namespace jitlink {
+
+class MachOJITLinker_x86_64 : public JITLinker<MachOJITLinker_x86_64> {
+ friend class JITLinker<MachOJITLinker_x86_64>;
+
+public:
+ MachOJITLinker_x86_64(std::unique_ptr<JITLinkContext> Ctx,
+ std::unique_ptr<LinkGraph> G,
+ PassConfiguration PassConfig)
+ : JITLinker(std::move(Ctx), std::move(G), std::move(PassConfig)) {}
+
+private:
+ Error applyFixup(LinkGraph &G, Block &B, const Edge &E) const {
+ return x86_64::applyFixup(G, B, E, nullptr);
+ }
+};
+
+Expected<std::unique_ptr<LinkGraph>>
+createLinkGraphFromMachOObject_x86_64(MemoryBufferRef ObjectBuffer) {
+ auto MachOObj = object::ObjectFile::createMachOObjectFile(ObjectBuffer);
+ if (!MachOObj)
+ return MachOObj.takeError();
+
+ auto Features = (*MachOObj)->getFeatures();
+ if (!Features)
+ return Features.takeError();
+
+ return MachOLinkGraphBuilder_x86_64(**MachOObj, std::move(*Features))
+ .buildGraph();
+}
+
+void link_MachO_x86_64(std::unique_ptr<LinkGraph> G,
+ std::unique_ptr<JITLinkContext> Ctx) {
+
+ PassConfiguration Config;
+
+ if (Ctx->shouldAddDefaultTargetPasses(G->getTargetTriple())) {
+ // Add eh-frame passes.
+ Config.PrePrunePasses.push_back(createEHFrameSplitterPass_MachO_x86_64());
+ Config.PrePrunePasses.push_back(createEHFrameEdgeFixerPass_MachO_x86_64());
+
+ // Add compact unwind splitter pass.
+ Config.PrePrunePasses.push_back(
+ CompactUnwindSplitter("__LD,__compact_unwind"));
+
+ // Add a mark-live pass.
+ if (auto MarkLive = Ctx->getMarkLivePass(G->getTargetTriple()))
+ Config.PrePrunePasses.push_back(std::move(MarkLive));
+ else
+ Config.PrePrunePasses.push_back(markAllSymbolsLive);
+
+ // Resolve any external section start / end symbols.
+ Config.PostAllocationPasses.push_back(
+ createDefineExternalSectionStartAndEndSymbolsPass(
+ identifyMachOSectionStartAndEndSymbols));
+
+ // Add an in-place GOT/Stubs pass.
+ Config.PostPrunePasses.push_back(buildGOTAndStubs_MachO_x86_64);
+
+ // Add GOT/Stubs optimizer pass.
+ Config.PreFixupPasses.push_back(x86_64::optimizeGOTAndStubAccesses);
+ }
+
+ if (auto Err = Ctx->modifyPassConfig(*G, Config))
+ return Ctx->notifyFailed(std::move(Err));
+
+ // Construct a JITLinker and run the link function.
+ MachOJITLinker_x86_64::link(std::move(Ctx), std::move(G), std::move(Config));
+}
+
+LinkGraphPassFunction createEHFrameSplitterPass_MachO_x86_64() {
+ return DWARFRecordSectionSplitter("__TEXT,__eh_frame");
+}
+
+LinkGraphPassFunction createEHFrameEdgeFixerPass_MachO_x86_64() {
+ return EHFrameEdgeFixer("__TEXT,__eh_frame", x86_64::PointerSize,
+ x86_64::Pointer32, x86_64::Pointer64, x86_64::Delta32,
+ x86_64::Delta64, x86_64::NegDelta32);
+}
+
+} // end namespace jitlink
+} // end namespace llvm
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/PerGraphGOTAndPLTStubsBuilder.h b/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/PerGraphGOTAndPLTStubsBuilder.h
new file mode 100644
index 000000000000..6e325f92bafb
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/PerGraphGOTAndPLTStubsBuilder.h
@@ -0,0 +1,126 @@
+//===--------------- PerGraphGOTAndPLTStubBuilder.h -------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Construct GOT and PLT entries for each graph.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTIONENGINE_JITLINK_PERGRAPHGOTANDPLTSTUBSBUILDER_H
+#define LLVM_EXECUTIONENGINE_JITLINK_PERGRAPHGOTANDPLTSTUBSBUILDER_H
+
+#include "llvm/ExecutionEngine/JITLink/JITLink.h"
+#include "llvm/Support/Debug.h"
+
+#define DEBUG_TYPE "jitlink"
+
+namespace llvm {
+namespace jitlink {
+
+/// Per-object GOT and PLT Stub builder.
+///
+/// Constructs GOT entries and PLT stubs in every graph for referenced symbols.
+/// Building these blocks in every graph is likely to lead to duplicate entries
+/// in the JITLinkDylib, but allows graphs to be trivially removed independently
+/// without affecting other graphs (since those other graphs will have their own
+/// copies of any required entries).
+template <typename BuilderImplT>
+class PerGraphGOTAndPLTStubsBuilder {
+public:
+ PerGraphGOTAndPLTStubsBuilder(LinkGraph &G) : G(G) {}
+
+ static Error asPass(LinkGraph &G) { return BuilderImplT(G).run(); }
+
+ Error run() {
+ LLVM_DEBUG(dbgs() << "Running Per-Graph GOT and Stubs builder:\n");
+
+ // We're going to be adding new blocks, but we don't want to iterate over
+ // the new ones, so build a worklist.
+ std::vector<Block *> Worklist(G.blocks().begin(), G.blocks().end());
+
+ for (auto *B : Worklist)
+ for (auto &E : B->edges()) {
+ if (impl().isGOTEdgeToFix(E)) {
+ LLVM_DEBUG({
+ dbgs() << " Fixing " << G.getEdgeKindName(E.getKind())
+ << " edge at " << B->getFixupAddress(E) << " ("
+ << B->getAddress() << " + "
+ << formatv("{0:x}", E.getOffset()) << ")\n";
+ });
+ impl().fixGOTEdge(E, getGOTEntry(E.getTarget()));
+ } else if (impl().isExternalBranchEdge(E)) {
+ LLVM_DEBUG({
+ dbgs() << " Fixing " << G.getEdgeKindName(E.getKind())
+ << " edge at " << B->getFixupAddress(E) << " ("
+ << B->getAddress() << " + "
+ << formatv("{0:x}", E.getOffset()) << ")\n";
+ });
+ impl().fixPLTEdge(E, getPLTStub(E.getTarget()));
+ }
+ }
+
+ return Error::success();
+ }
+
+protected:
+ Symbol &getGOTEntry(Symbol &Target) {
+ assert(Target.hasName() && "GOT edge cannot point to anonymous target");
+
+ auto GOTEntryI = GOTEntries.find(Target.getName());
+
+ // Build the entry if it doesn't exist.
+ if (GOTEntryI == GOTEntries.end()) {
+ auto &GOTEntry = impl().createGOTEntry(Target);
+ LLVM_DEBUG({
+ dbgs() << " Created GOT entry for " << Target.getName() << ": "
+ << GOTEntry << "\n";
+ });
+ GOTEntryI =
+ GOTEntries.insert(std::make_pair(Target.getName(), &GOTEntry)).first;
+ }
+
+ assert(GOTEntryI != GOTEntries.end() && "Could not get GOT entry symbol");
+ LLVM_DEBUG(
+ { dbgs() << " Using GOT entry " << *GOTEntryI->second << "\n"; });
+ return *GOTEntryI->second;
+ }
+
+ Symbol &getPLTStub(Symbol &Target) {
+ assert(Target.hasName() &&
+ "External branch edge can not point to an anonymous target");
+ auto StubI = PLTStubs.find(Target.getName());
+
+ if (StubI == PLTStubs.end()) {
+ auto &StubSymbol = impl().createPLTStub(Target);
+ LLVM_DEBUG({
+ dbgs() << " Created PLT stub for " << Target.getName() << ": "
+ << StubSymbol << "\n";
+ });
+ StubI =
+ PLTStubs.insert(std::make_pair(Target.getName(), &StubSymbol)).first;
+ }
+
+ assert(StubI != PLTStubs.end() && "Count not get stub symbol");
+ LLVM_DEBUG({ dbgs() << " Using PLT stub " << *StubI->second << "\n"; });
+ return *StubI->second;
+ }
+
+ LinkGraph &G;
+
+private:
+ BuilderImplT &impl() { return static_cast<BuilderImplT &>(*this); }
+
+ DenseMap<StringRef, Symbol *> GOTEntries;
+ DenseMap<StringRef, Symbol *> PLTStubs;
+};
+
+} // end namespace jitlink
+} // end namespace llvm
+
+#undef DEBUG_TYPE
+
+#endif // LLVM_EXECUTIONENGINE_JITLINK_PERGRAPHGOTANDPLTSTUBSBUILDER_H
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/SEHFrameSupport.h b/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/SEHFrameSupport.h
new file mode 100644
index 000000000000..f17dfe98ba4f
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/SEHFrameSupport.h
@@ -0,0 +1,62 @@
+//===------- SEHFrameSupport.h - JITLink seh-frame utils --------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// SEHFrame utils for JITLink.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTIONENGINE_JITLINK_SEHFRAMESUPPORT_H
+#define LLVM_EXECUTIONENGINE_JITLINK_SEHFRAMESUPPORT_H
+
+#include "llvm/ADT/SetVector.h"
+#include "llvm/ExecutionEngine/JITLink/JITLink.h"
+#include "llvm/ExecutionEngine/JITSymbol.h"
+#include "llvm/Support/Error.h"
+#include "llvm/TargetParser/Triple.h"
+
+namespace llvm {
+namespace jitlink {
+/// This pass adds keep-alive edge from SEH frame sections
+/// to the parent function content block.
+class SEHFrameKeepAlivePass {
+public:
+ SEHFrameKeepAlivePass(StringRef SEHFrameSectionName)
+ : SEHFrameSectionName(SEHFrameSectionName) {}
+
+ Error operator()(LinkGraph &G) {
+ auto *S = G.findSectionByName(SEHFrameSectionName);
+ if (!S)
+ return Error::success();
+
+ // Simply consider every block pointed by seh frame block as parants.
+ // This adds some unnecessary keep-alive edges to unwind info blocks,
+ // (xdata) but these blocks are usually dead by default, so they wouldn't
+ // count for the fate of seh frame block.
+ for (auto *B : S->blocks()) {
+ auto &DummySymbol = G.addAnonymousSymbol(*B, 0, 0, false, false);
+ SetVector<Block *> Children;
+ for (auto &E : B->edges()) {
+ auto &Sym = E.getTarget();
+ if (!Sym.isDefined())
+ continue;
+ Children.insert(&Sym.getBlock());
+ }
+ for (auto *Child : Children)
+ Child->addEdge(Edge(Edge::KeepAlive, 0, DummySymbol, 0));
+ }
+ return Error::success();
+ }
+
+private:
+ StringRef SEHFrameSectionName;
+};
+
+} // end namespace jitlink
+} // end namespace llvm
+
+#endif // LLVM_EXECUTIONENGINE_JITLINK_SEHFRAMESUPPORT_H
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/aarch32.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/aarch32.cpp
new file mode 100644
index 000000000000..00be2f57d066
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/aarch32.cpp
@@ -0,0 +1,993 @@
+//===--------- aarch32.cpp - Generic JITLink arm/thumb utilities ----------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Generic utilities for graphs representing arm/thumb objects.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/JITLink/aarch32.h"
+
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/BinaryFormat/ELF.h"
+#include "llvm/ExecutionEngine/JITLink/JITLink.h"
+#include "llvm/ExecutionEngine/Orc/Shared/MemoryFlags.h"
+#include "llvm/Object/ELFObjectFile.h"
+#include "llvm/Support/Endian.h"
+#include "llvm/Support/ManagedStatic.h"
+#include "llvm/Support/MathExtras.h"
+
+#define DEBUG_TYPE "jitlink"
+
+namespace llvm {
+namespace jitlink {
+namespace aarch32 {
+
+/// Check whether the given target flags are set for this Symbol.
+bool hasTargetFlags(Symbol &Sym, TargetFlagsType Flags) {
+ return static_cast<TargetFlagsType>(Sym.getTargetFlags()) & Flags;
+}
+
+/// Encode 22-bit immediate value for branch instructions without J1J2 range
+/// extension (formats B T4, BL T1 and BLX T2).
+///
+/// 00000:Imm11H:Imm11L:0 -> [ 00000:Imm11H, 00000:Imm11L ]
+/// J1^ ^J2 will always be 1
+///
+HalfWords encodeImmBT4BlT1BlxT2(int64_t Value) {
+ constexpr uint32_t J1J2 = 0x2800;
+ uint32_t Imm11H = (Value >> 12) & 0x07ff;
+ uint32_t Imm11L = (Value >> 1) & 0x07ff;
+ return HalfWords{Imm11H, Imm11L | J1J2};
+}
+
+/// Decode 22-bit immediate value for branch instructions without J1J2 range
+/// extension (formats B T4, BL T1 and BLX T2).
+///
+/// [ 00000:Imm11H, 00000:Imm11L ] -> 00000:Imm11H:Imm11L:0
+/// J1^ ^J2 will always be 1
+///
+int64_t decodeImmBT4BlT1BlxT2(uint32_t Hi, uint32_t Lo) {
+ uint32_t Imm11H = Hi & 0x07ff;
+ uint32_t Imm11L = Lo & 0x07ff;
+ return SignExtend64<22>(Imm11H << 12 | Imm11L << 1);
+}
+
+/// Encode 25-bit immediate value for branch instructions with J1J2 range
+/// extension (formats B T4, BL T1 and BLX T2).
+///
+/// S:I1:I2:Imm10:Imm11:0 -> [ 00000:S:Imm10, 00:J1:0:J2:Imm11 ]
+///
+HalfWords encodeImmBT4BlT1BlxT2_J1J2(int64_t Value) {
+ uint32_t S = (Value >> 14) & 0x0400;
+ uint32_t J1 = (((~(Value >> 10)) ^ (Value >> 11)) & 0x2000);
+ uint32_t J2 = (((~(Value >> 11)) ^ (Value >> 13)) & 0x0800);
+ uint32_t Imm10 = (Value >> 12) & 0x03ff;
+ uint32_t Imm11 = (Value >> 1) & 0x07ff;
+ return HalfWords{S | Imm10, J1 | J2 | Imm11};
+}
+
+/// Decode 25-bit immediate value for branch instructions with J1J2 range
+/// extension (formats B T4, BL T1 and BLX T2).
+///
+/// [ 00000:S:Imm10, 00:J1:0:J2:Imm11] -> S:I1:I2:Imm10:Imm11:0
+///
+int64_t decodeImmBT4BlT1BlxT2_J1J2(uint32_t Hi, uint32_t Lo) {
+ uint32_t S = Hi & 0x0400;
+ uint32_t I1 = ~((Lo ^ (Hi << 3)) << 10) & 0x00800000;
+ uint32_t I2 = ~((Lo ^ (Hi << 1)) << 11) & 0x00400000;
+ uint32_t Imm10 = Hi & 0x03ff;
+ uint32_t Imm11 = Lo & 0x07ff;
+ return SignExtend64<25>(S << 14 | I1 | I2 | Imm10 << 12 | Imm11 << 1);
+}
+
+/// Encode 26-bit immediate value for branch instructions
+/// (formats B A1, BL A1 and BLX A2).
+///
+/// Imm24:00 -> 00000000:Imm24
+///
+uint32_t encodeImmBA1BlA1BlxA2(int64_t Value) {
+ return (Value >> 2) & 0x00ffffff;
+}
+
+/// Decode 26-bit immediate value for branch instructions
+/// (formats B A1, BL A1 and BLX A2).
+///
+/// 00000000:Imm24 -> Imm24:00
+///
+int64_t decodeImmBA1BlA1BlxA2(int64_t Value) {
+ return SignExtend64<26>((Value & 0x00ffffff) << 2);
+}
+
+/// Encode 16-bit immediate value for move instruction formats MOVT T1 and
+/// MOVW T3.
+///
+/// Imm4:Imm1:Imm3:Imm8 -> [ 00000:i:000000:Imm4, 0:Imm3:0000:Imm8 ]
+///
+HalfWords encodeImmMovtT1MovwT3(uint16_t Value) {
+ uint32_t Imm4 = (Value >> 12) & 0x0f;
+ uint32_t Imm1 = (Value >> 11) & 0x01;
+ uint32_t Imm3 = (Value >> 8) & 0x07;
+ uint32_t Imm8 = Value & 0xff;
+ return HalfWords{Imm1 << 10 | Imm4, Imm3 << 12 | Imm8};
+}
+
+/// Decode 16-bit immediate value from move instruction formats MOVT T1 and
+/// MOVW T3.
+///
+/// [ 00000:i:000000:Imm4, 0:Imm3:0000:Imm8 ] -> Imm4:Imm1:Imm3:Imm8
+///
+uint16_t decodeImmMovtT1MovwT3(uint32_t Hi, uint32_t Lo) {
+ uint32_t Imm4 = Hi & 0x0f;
+ uint32_t Imm1 = (Hi >> 10) & 0x01;
+ uint32_t Imm3 = (Lo >> 12) & 0x07;
+ uint32_t Imm8 = Lo & 0xff;
+ uint32_t Imm16 = Imm4 << 12 | Imm1 << 11 | Imm3 << 8 | Imm8;
+ assert(Imm16 <= 0xffff && "Decoded value out-of-range");
+ return Imm16;
+}
+
+/// Encode register ID for instruction formats MOVT T1 and MOVW T3.
+///
+/// Rd4 -> [0000000000000000, 0000:Rd4:00000000]
+///
+HalfWords encodeRegMovtT1MovwT3(int64_t Value) {
+ uint32_t Rd4 = (Value & 0x0f) << 8;
+ return HalfWords{0, Rd4};
+}
+
+/// Decode register ID from instruction formats MOVT T1 and MOVW T3.
+///
+/// [0000000000000000, 0000:Rd4:00000000] -> Rd4
+///
+int64_t decodeRegMovtT1MovwT3(uint32_t Hi, uint32_t Lo) {
+ uint32_t Rd4 = (Lo >> 8) & 0x0f;
+ return Rd4;
+}
+
+/// Encode 16-bit immediate value for move instruction formats MOVT A1 and
+/// MOVW A2.
+///
+/// Imm4:Imm12 -> 000000000000:Imm4:0000:Imm12
+///
+uint32_t encodeImmMovtA1MovwA2(uint16_t Value) {
+ uint32_t Imm4 = (Value >> 12) & 0x0f;
+ uint32_t Imm12 = Value & 0x0fff;
+ return (Imm4 << 16) | Imm12;
+}
+
+/// Decode 16-bit immediate value for move instruction formats MOVT A1 and
+/// MOVW A2.
+///
+/// 000000000000:Imm4:0000:Imm12 -> Imm4:Imm12
+///
+uint16_t decodeImmMovtA1MovwA2(uint64_t Value) {
+ uint32_t Imm4 = (Value >> 16) & 0x0f;
+ uint32_t Imm12 = Value & 0x0fff;
+ return (Imm4 << 12) | Imm12;
+}
+
+/// Encode register ID for instruction formats MOVT A1 and
+/// MOVW A2.
+///
+/// Rd4 -> 0000000000000000:Rd4:000000000000
+///
+uint32_t encodeRegMovtA1MovwA2(int64_t Value) {
+ uint32_t Rd4 = (Value & 0x00000f) << 12;
+ return Rd4;
+}
+
+/// Decode register ID for instruction formats MOVT A1 and
+/// MOVW A2.
+///
+/// 0000000000000000:Rd4:000000000000 -> Rd4
+///
+int64_t decodeRegMovtA1MovwA2(uint64_t Value) {
+ uint32_t Rd4 = (Value >> 12) & 0x00000f;
+ return Rd4;
+}
+
+namespace {
+
+/// 32-bit Thumb instructions are stored as two little-endian halfwords.
+/// An instruction at address A encodes bytes A+1, A in the first halfword (Hi),
+/// followed by bytes A+3, A+2 in the second halfword (Lo).
+struct WritableThumbRelocation {
+ /// Create a writable reference to a Thumb32 fixup.
+ WritableThumbRelocation(char *FixupPtr)
+ : Hi{*reinterpret_cast<support::ulittle16_t *>(FixupPtr)},
+ Lo{*reinterpret_cast<support::ulittle16_t *>(FixupPtr + 2)} {}
+
+ support::ulittle16_t &Hi; // First halfword
+ support::ulittle16_t &Lo; // Second halfword
+};
+
+struct ThumbRelocation {
+ /// Create a read-only reference to a Thumb32 fixup.
+ ThumbRelocation(const char *FixupPtr)
+ : Hi{*reinterpret_cast<const support::ulittle16_t *>(FixupPtr)},
+ Lo{*reinterpret_cast<const support::ulittle16_t *>(FixupPtr + 2)} {}
+
+ /// Create a read-only Thumb32 fixup from a writeable one.
+ ThumbRelocation(WritableThumbRelocation &Writable)
+ : Hi{Writable.Hi}, Lo(Writable.Lo) {}
+
+ const support::ulittle16_t &Hi; // First halfword
+ const support::ulittle16_t &Lo; // Second halfword
+};
+
+struct WritableArmRelocation {
+ WritableArmRelocation(char *FixupPtr)
+ : Wd{*reinterpret_cast<support::ulittle32_t *>(FixupPtr)} {}
+
+ support::ulittle32_t &Wd;
+};
+
+struct ArmRelocation {
+ ArmRelocation(const char *FixupPtr)
+ : Wd{*reinterpret_cast<const support::ulittle32_t *>(FixupPtr)} {}
+
+ ArmRelocation(WritableArmRelocation &Writable) : Wd{Writable.Wd} {}
+
+ const support::ulittle32_t &Wd;
+};
+
+Error makeUnexpectedOpcodeError(const LinkGraph &G, const ThumbRelocation &R,
+ Edge::Kind Kind) {
+ return make_error<JITLinkError>(
+ formatv("Invalid opcode [ {0:x4}, {1:x4} ] for relocation: {2}",
+ static_cast<uint16_t>(R.Hi), static_cast<uint16_t>(R.Lo),
+ G.getEdgeKindName(Kind)));
+}
+
+Error makeUnexpectedOpcodeError(const LinkGraph &G, const ArmRelocation &R,
+ Edge::Kind Kind) {
+ return make_error<JITLinkError>(
+ formatv("Invalid opcode {0:x8} for relocation: {1}",
+ static_cast<uint32_t>(R.Wd), G.getEdgeKindName(Kind)));
+}
+
+template <EdgeKind_aarch32 K> constexpr bool isArm() {
+ return FirstArmRelocation <= K && K <= LastArmRelocation;
+}
+template <EdgeKind_aarch32 K> constexpr bool isThumb() {
+ return FirstThumbRelocation <= K && K <= LastThumbRelocation;
+}
+
+template <EdgeKind_aarch32 K> static bool checkOpcodeArm(uint32_t Wd) {
+ return (Wd & FixupInfo<K>::OpcodeMask) == FixupInfo<K>::Opcode;
+}
+
+template <EdgeKind_aarch32 K>
+static bool checkOpcodeThumb(uint16_t Hi, uint16_t Lo) {
+ return (Hi & FixupInfo<K>::OpcodeMask.Hi) == FixupInfo<K>::Opcode.Hi &&
+ (Lo & FixupInfo<K>::OpcodeMask.Lo) == FixupInfo<K>::Opcode.Lo;
+}
+
+class FixupInfoTable {
+ static constexpr size_t Items = LastRelocation + 1;
+
+public:
+ FixupInfoTable() {
+ populateEntries<FirstArmRelocation, LastArmRelocation>();
+ populateEntries<FirstThumbRelocation, LastThumbRelocation>();
+ }
+
+ const FixupInfoBase *getEntry(Edge::Kind K) {
+ assert(K < Data.size() && "Index out of bounds");
+ return Data.at(K).get();
+ }
+
+private:
+ template <EdgeKind_aarch32 K, EdgeKind_aarch32 LastK> void populateEntries() {
+ assert(K < Data.size() && "Index out of range");
+ assert(Data.at(K) == nullptr && "Initialized entries are immutable");
+ Data[K] = initEntry<K>();
+ if constexpr (K < LastK) {
+ constexpr auto Next = static_cast<EdgeKind_aarch32>(K + 1);
+ populateEntries<Next, LastK>();
+ }
+ }
+
+ template <EdgeKind_aarch32 K>
+ static std::unique_ptr<FixupInfoBase> initEntry() {
+ auto Entry = std::make_unique<FixupInfo<K>>();
+ static_assert(isArm<K>() != isThumb<K>(), "Classes are mutually exclusive");
+ if constexpr (isArm<K>())
+ Entry->checkOpcode = checkOpcodeArm<K>;
+ if constexpr (isThumb<K>())
+ Entry->checkOpcode = checkOpcodeThumb<K>;
+ return Entry;
+ }
+
+private:
+ std::array<std::unique_ptr<FixupInfoBase>, Items> Data;
+};
+
+ManagedStatic<FixupInfoTable> DynFixupInfos;
+
+} // namespace
+
+static Error checkOpcode(LinkGraph &G, const ArmRelocation &R,
+ Edge::Kind Kind) {
+ assert(Kind >= FirstArmRelocation && Kind <= LastArmRelocation &&
+ "Edge kind must be Arm relocation");
+ const FixupInfoBase *Entry = DynFixupInfos->getEntry(Kind);
+ const FixupInfoArm &Info = *static_cast<const FixupInfoArm *>(Entry);
+ assert(Info.checkOpcode && "Opcode check is mandatory for Arm edges");
+ if (!Info.checkOpcode(R.Wd))
+ return makeUnexpectedOpcodeError(G, R, Kind);
+
+ return Error::success();
+}
+
+static Error checkOpcode(LinkGraph &G, const ThumbRelocation &R,
+ Edge::Kind Kind) {
+ assert(Kind >= FirstThumbRelocation && Kind <= LastThumbRelocation &&
+ "Edge kind must be Thumb relocation");
+ const FixupInfoBase *Entry = DynFixupInfos->getEntry(Kind);
+ const FixupInfoThumb &Info = *static_cast<const FixupInfoThumb *>(Entry);
+ assert(Info.checkOpcode && "Opcode check is mandatory for Thumb edges");
+ if (!Info.checkOpcode(R.Hi, R.Lo))
+ return makeUnexpectedOpcodeError(G, R, Kind);
+
+ return Error::success();
+}
+
+const FixupInfoBase *FixupInfoBase::getDynFixupInfo(Edge::Kind K) {
+ return DynFixupInfos->getEntry(K);
+}
+
+template <EdgeKind_aarch32 Kind>
+bool checkRegister(const ThumbRelocation &R, HalfWords Reg) {
+ uint16_t Hi = R.Hi & FixupInfo<Kind>::RegMask.Hi;
+ uint16_t Lo = R.Lo & FixupInfo<Kind>::RegMask.Lo;
+ return Hi == Reg.Hi && Lo == Reg.Lo;
+}
+
+template <EdgeKind_aarch32 Kind>
+bool checkRegister(const ArmRelocation &R, uint32_t Reg) {
+ uint32_t Wd = R.Wd & FixupInfo<Kind>::RegMask;
+ return Wd == Reg;
+}
+
+template <EdgeKind_aarch32 Kind>
+void writeRegister(WritableThumbRelocation &R, HalfWords Reg) {
+ static constexpr HalfWords Mask = FixupInfo<Kind>::RegMask;
+ assert((Mask.Hi & Reg.Hi) == Reg.Hi && (Mask.Lo & Reg.Lo) == Reg.Lo &&
+ "Value bits exceed bit range of given mask");
+ R.Hi = (R.Hi & ~Mask.Hi) | Reg.Hi;
+ R.Lo = (R.Lo & ~Mask.Lo) | Reg.Lo;
+}
+
+template <EdgeKind_aarch32 Kind>
+void writeRegister(WritableArmRelocation &R, uint32_t Reg) {
+ static constexpr uint32_t Mask = FixupInfo<Kind>::RegMask;
+ assert((Mask & Reg) == Reg && "Value bits exceed bit range of given mask");
+ R.Wd = (R.Wd & ~Mask) | Reg;
+}
+
+template <EdgeKind_aarch32 Kind>
+void writeImmediate(WritableThumbRelocation &R, HalfWords Imm) {
+ static constexpr HalfWords Mask = FixupInfo<Kind>::ImmMask;
+ assert((Mask.Hi & Imm.Hi) == Imm.Hi && (Mask.Lo & Imm.Lo) == Imm.Lo &&
+ "Value bits exceed bit range of given mask");
+ R.Hi = (R.Hi & ~Mask.Hi) | Imm.Hi;
+ R.Lo = (R.Lo & ~Mask.Lo) | Imm.Lo;
+}
+
+template <EdgeKind_aarch32 Kind>
+void writeImmediate(WritableArmRelocation &R, uint32_t Imm) {
+ static constexpr uint32_t Mask = FixupInfo<Kind>::ImmMask;
+ assert((Mask & Imm) == Imm && "Value bits exceed bit range of given mask");
+ R.Wd = (R.Wd & ~Mask) | Imm;
+}
+
+Expected<int64_t> readAddendData(LinkGraph &G, Block &B, Edge::OffsetT Offset,
+ Edge::Kind Kind) {
+ endianness Endian = G.getEndianness();
+ const char *BlockWorkingMem = B.getContent().data();
+ const char *FixupPtr = BlockWorkingMem + Offset;
+
+ switch (Kind) {
+ case Data_Delta32:
+ case Data_Pointer32:
+ case Data_RequestGOTAndTransformToDelta32:
+ return SignExtend64<32>(support::endian::read32(FixupPtr, Endian));
+ case Data_PRel31:
+ return SignExtend64<31>(support::endian::read32(FixupPtr, Endian));
+ default:
+ return make_error<JITLinkError>(
+ "In graph " + G.getName() + ", section " + B.getSection().getName() +
+ " can not read implicit addend for aarch32 edge kind " +
+ G.getEdgeKindName(Kind));
+ }
+}
+
+Expected<int64_t> readAddendArm(LinkGraph &G, Block &B, Edge::OffsetT Offset,
+ Edge::Kind Kind) {
+ ArmRelocation R(B.getContent().data() + Offset);
+ if (Error Err = checkOpcode(G, R, Kind))
+ return std::move(Err);
+
+ switch (Kind) {
+ case Arm_Call:
+ case Arm_Jump24:
+ return decodeImmBA1BlA1BlxA2(R.Wd);
+
+ case Arm_MovtAbs:
+ case Arm_MovwAbsNC:
+ return decodeImmMovtA1MovwA2(R.Wd);
+
+ default:
+ return make_error<JITLinkError>(
+ "In graph " + G.getName() + ", section " + B.getSection().getName() +
+ " can not read implicit addend for aarch32 edge kind " +
+ G.getEdgeKindName(Kind));
+ }
+}
+
+Expected<int64_t> readAddendThumb(LinkGraph &G, Block &B, Edge::OffsetT Offset,
+ Edge::Kind Kind, const ArmConfig &ArmCfg) {
+ ThumbRelocation R(B.getContent().data() + Offset);
+ if (Error Err = checkOpcode(G, R, Kind))
+ return std::move(Err);
+
+ switch (Kind) {
+ case Thumb_Call:
+ case Thumb_Jump24:
+ return LLVM_LIKELY(ArmCfg.J1J2BranchEncoding)
+ ? decodeImmBT4BlT1BlxT2_J1J2(R.Hi, R.Lo)
+ : decodeImmBT4BlT1BlxT2(R.Hi, R.Lo);
+
+ case Thumb_MovwAbsNC:
+ case Thumb_MovwPrelNC:
+ // Initial addend is interpreted as a signed value
+ return SignExtend64<16>(decodeImmMovtT1MovwT3(R.Hi, R.Lo));
+
+ case Thumb_MovtAbs:
+ case Thumb_MovtPrel:
+ // Initial addend is interpreted as a signed value
+ return SignExtend64<16>(decodeImmMovtT1MovwT3(R.Hi, R.Lo));
+
+ default:
+ return make_error<JITLinkError>(
+ "In graph " + G.getName() + ", section " + B.getSection().getName() +
+ " can not read implicit addend for aarch32 edge kind " +
+ G.getEdgeKindName(Kind));
+ }
+}
+
+Error applyFixupData(LinkGraph &G, Block &B, const Edge &E) {
+ using namespace support;
+
+ char *BlockWorkingMem = B.getAlreadyMutableContent().data();
+ char *FixupPtr = BlockWorkingMem + E.getOffset();
+
+ Edge::Kind Kind = E.getKind();
+ uint64_t FixupAddress = (B.getAddress() + E.getOffset()).getValue();
+ int64_t Addend = E.getAddend();
+ Symbol &TargetSymbol = E.getTarget();
+ uint64_t TargetAddress = TargetSymbol.getAddress().getValue();
+
+ // Data relocations have alignment 1, size 4 (except R_ARM_ABS8 and
+ // R_ARM_ABS16) and write the full 32-bit result (except R_ARM_PREL31).
+ switch (Kind) {
+ case Data_Delta32: {
+ int64_t Value = TargetAddress - FixupAddress + Addend;
+ if (!isInt<32>(Value))
+ return makeTargetOutOfRangeError(G, B, E);
+ if (LLVM_LIKELY(G.getEndianness() == endianness::little))
+ endian::write32le(FixupPtr, Value);
+ else
+ endian::write32be(FixupPtr, Value);
+ return Error::success();
+ }
+ case Data_Pointer32: {
+ int64_t Value = TargetAddress + Addend;
+ if (!isUInt<32>(Value))
+ return makeTargetOutOfRangeError(G, B, E);
+ if (LLVM_LIKELY(G.getEndianness() == endianness::little))
+ endian::write32le(FixupPtr, Value);
+ else
+ endian::write32be(FixupPtr, Value);
+ return Error::success();
+ }
+ case Data_PRel31: {
+ int64_t Value = TargetAddress - FixupAddress + Addend;
+ if (!isInt<31>(Value))
+ return makeTargetOutOfRangeError(G, B, E);
+ if (LLVM_LIKELY(G.getEndianness() == endianness::little)) {
+ uint32_t MSB = endian::read32le(FixupPtr) & 0x80000000;
+ endian::write32le(FixupPtr, MSB | (Value & ~0x80000000));
+ } else {
+ uint32_t MSB = endian::read32be(FixupPtr) & 0x80000000;
+ endian::write32be(FixupPtr, MSB | (Value & ~0x80000000));
+ }
+ return Error::success();
+ }
+ case Data_RequestGOTAndTransformToDelta32:
+ llvm_unreachable("Should be transformed");
+ default:
+ return make_error<JITLinkError>(
+ "In graph " + G.getName() + ", section " + B.getSection().getName() +
+ " encountered unfixable aarch32 edge kind " +
+ G.getEdgeKindName(E.getKind()));
+ }
+}
+
+Error applyFixupArm(LinkGraph &G, Block &B, const Edge &E) {
+ WritableArmRelocation R(B.getAlreadyMutableContent().data() + E.getOffset());
+ Edge::Kind Kind = E.getKind();
+ if (Error Err = checkOpcode(G, R, Kind))
+ return Err;
+
+ uint64_t FixupAddress = (B.getAddress() + E.getOffset()).getValue();
+ int64_t Addend = E.getAddend();
+ Symbol &TargetSymbol = E.getTarget();
+ uint64_t TargetAddress = TargetSymbol.getAddress().getValue();
+
+ switch (Kind) {
+ case Arm_Jump24: {
+ if (hasTargetFlags(TargetSymbol, ThumbSymbol))
+ return make_error<JITLinkError>("Branch relocation needs interworking "
+ "stub when bridging to Thumb: " +
+ StringRef(G.getEdgeKindName(Kind)));
+
+ int64_t Value = TargetAddress - FixupAddress + Addend;
+
+ if (!isInt<26>(Value))
+ return makeTargetOutOfRangeError(G, B, E);
+ writeImmediate<Arm_Jump24>(R, encodeImmBA1BlA1BlxA2(Value));
+
+ return Error::success();
+ }
+ case Arm_Call: {
+ if ((R.Wd & FixupInfo<Arm_Call>::CondMask) !=
+ FixupInfo<Arm_Call>::Unconditional)
+ return make_error<JITLinkError>("Relocation expects an unconditional "
+ "BL/BLX branch instruction: " +
+ StringRef(G.getEdgeKindName(Kind)));
+
+ int64_t Value = TargetAddress - FixupAddress + Addend;
+
+ // The call instruction itself is Arm. The call destination can either be
+ // Thumb or Arm. We use BL to stay in Arm and BLX to change to Thumb.
+ bool TargetIsThumb = hasTargetFlags(TargetSymbol, ThumbSymbol);
+ bool InstrIsBlx = (~R.Wd & FixupInfo<Arm_Call>::BitBlx) == 0;
+ if (TargetIsThumb != InstrIsBlx) {
+ if (LLVM_LIKELY(TargetIsThumb)) {
+ // Change opcode BL -> BLX
+ R.Wd = R.Wd | FixupInfo<Arm_Call>::BitBlx;
+ R.Wd = R.Wd & ~FixupInfo<Arm_Call>::BitH;
+ } else {
+ // Change opcode BLX -> BL
+ R.Wd = R.Wd & ~FixupInfo<Arm_Call>::BitBlx;
+ }
+ }
+
+ if (!isInt<26>(Value))
+ return makeTargetOutOfRangeError(G, B, E);
+ writeImmediate<Arm_Call>(R, encodeImmBA1BlA1BlxA2(Value));
+
+ return Error::success();
+ }
+ case Arm_MovwAbsNC: {
+ uint16_t Value = (TargetAddress + Addend) & 0xffff;
+ writeImmediate<Arm_MovwAbsNC>(R, encodeImmMovtA1MovwA2(Value));
+ return Error::success();
+ }
+ case Arm_MovtAbs: {
+ uint16_t Value = ((TargetAddress + Addend) >> 16) & 0xffff;
+ writeImmediate<Arm_MovtAbs>(R, encodeImmMovtA1MovwA2(Value));
+ return Error::success();
+ }
+ default:
+ return make_error<JITLinkError>(
+ "In graph " + G.getName() + ", section " + B.getSection().getName() +
+ " encountered unfixable aarch32 edge kind " +
+ G.getEdgeKindName(E.getKind()));
+ }
+}
+
+Error applyFixupThumb(LinkGraph &G, Block &B, const Edge &E,
+ const ArmConfig &ArmCfg) {
+ WritableThumbRelocation R(B.getAlreadyMutableContent().data() +
+ E.getOffset());
+ Edge::Kind Kind = E.getKind();
+ if (Error Err = checkOpcode(G, R, Kind))
+ return Err;
+
+ uint64_t FixupAddress = (B.getAddress() + E.getOffset()).getValue();
+ int64_t Addend = E.getAddend();
+ Symbol &TargetSymbol = E.getTarget();
+ uint64_t TargetAddress = TargetSymbol.getAddress().getValue();
+
+ switch (Kind) {
+ case Thumb_Jump24: {
+ if (!hasTargetFlags(TargetSymbol, ThumbSymbol))
+ return make_error<JITLinkError>("Branch relocation needs interworking "
+ "stub when bridging to ARM: " +
+ StringRef(G.getEdgeKindName(Kind)));
+
+ int64_t Value = TargetAddress - FixupAddress + Addend;
+ if (LLVM_LIKELY(ArmCfg.J1J2BranchEncoding)) {
+ if (!isInt<25>(Value))
+ return makeTargetOutOfRangeError(G, B, E);
+ writeImmediate<Thumb_Jump24>(R, encodeImmBT4BlT1BlxT2_J1J2(Value));
+ } else {
+ if (!isInt<22>(Value))
+ return makeTargetOutOfRangeError(G, B, E);
+ writeImmediate<Thumb_Jump24>(R, encodeImmBT4BlT1BlxT2(Value));
+ }
+
+ return Error::success();
+ }
+
+ case Thumb_Call: {
+ int64_t Value = TargetAddress - FixupAddress + Addend;
+
+ // The call instruction itself is Thumb. The call destination can either be
+ // Thumb or Arm. We use BL to stay in Thumb and BLX to change to Arm.
+ bool TargetIsArm = !hasTargetFlags(TargetSymbol, ThumbSymbol);
+ bool InstrIsBlx = (R.Lo & FixupInfo<Thumb_Call>::LoBitNoBlx) == 0;
+ if (TargetIsArm != InstrIsBlx) {
+ if (LLVM_LIKELY(TargetIsArm)) {
+ // Change opcode BL -> BLX and fix range value: account for 4-byte
+ // aligned destination while instruction may only be 2-byte aligned
+ R.Lo = R.Lo & ~FixupInfo<Thumb_Call>::LoBitNoBlx;
+ R.Lo = R.Lo & ~FixupInfo<Thumb_Call>::LoBitH;
+ Value = alignTo(Value, 4);
+ } else {
+ // Change opcode BLX -> BL
+ R.Lo = R.Lo & ~FixupInfo<Thumb_Call>::LoBitNoBlx;
+ }
+ }
+
+ if (LLVM_LIKELY(ArmCfg.J1J2BranchEncoding)) {
+ if (!isInt<25>(Value))
+ return makeTargetOutOfRangeError(G, B, E);
+ writeImmediate<Thumb_Call>(R, encodeImmBT4BlT1BlxT2_J1J2(Value));
+ } else {
+ if (!isInt<22>(Value))
+ return makeTargetOutOfRangeError(G, B, E);
+ writeImmediate<Thumb_Call>(R, encodeImmBT4BlT1BlxT2(Value));
+ }
+
+ assert(((R.Lo & FixupInfo<Thumb_Call>::LoBitNoBlx) ||
+ (R.Lo & FixupInfo<Thumb_Call>::LoBitH) == 0) &&
+ "Opcode BLX implies H bit is clear (avoid UB in BLX T2)");
+ return Error::success();
+ }
+
+ case Thumb_MovwAbsNC: {
+ uint16_t Value = (TargetAddress + Addend) & 0xffff;
+ writeImmediate<Thumb_MovwAbsNC>(R, encodeImmMovtT1MovwT3(Value));
+ return Error::success();
+ }
+ case Thumb_MovtAbs: {
+ uint16_t Value = ((TargetAddress + Addend) >> 16) & 0xffff;
+ writeImmediate<Thumb_MovtAbs>(R, encodeImmMovtT1MovwT3(Value));
+ return Error::success();
+ }
+ case Thumb_MovwPrelNC: {
+ uint16_t Value = ((TargetAddress + Addend - FixupAddress) & 0xffff);
+ writeImmediate<Thumb_MovwPrelNC>(R, encodeImmMovtT1MovwT3(Value));
+ return Error::success();
+ }
+ case Thumb_MovtPrel: {
+ uint16_t Value = (((TargetAddress + Addend - FixupAddress) >> 16) & 0xffff);
+ writeImmediate<Thumb_MovtPrel>(R, encodeImmMovtT1MovwT3(Value));
+ return Error::success();
+ }
+
+ default:
+ return make_error<JITLinkError>(
+ "In graph " + G.getName() + ", section " + B.getSection().getName() +
+ " encountered unfixable aarch32 edge kind " +
+ G.getEdgeKindName(E.getKind()));
+ }
+}
+
+const uint8_t GOTEntryInit[] = {
+ 0x00,
+ 0x00,
+ 0x00,
+ 0x00,
+};
+
+/// Create a new node in the link-graph for the given pointer value.
+template <size_t Size>
+static Block &allocPointer(LinkGraph &G, Section &S,
+ const uint8_t (&Content)[Size]) {
+ static_assert(Size == 4, "Pointers are 32-bit");
+ constexpr uint64_t Alignment = 4;
+ ArrayRef<char> Init(reinterpret_cast<const char *>(Content), Size);
+ return G.createContentBlock(S, Init, orc::ExecutorAddr(), Alignment, 0);
+}
+
+Symbol &GOTBuilder::createEntry(LinkGraph &G, Symbol &Target) {
+ if (!GOTSection)
+ GOTSection = &G.createSection(getSectionName(), orc::MemProt::Read);
+ Block &B = allocPointer(G, *GOTSection, GOTEntryInit);
+ constexpr int64_t GOTEntryAddend = 0;
+ B.addEdge(Data_Pointer32, 0, Target, GOTEntryAddend);
+ return G.addAnonymousSymbol(B, 0, B.getSize(), false, false);
+}
+
+bool GOTBuilder::visitEdge(LinkGraph &G, Block *B, Edge &E) {
+ Edge::Kind KindToSet = Edge::Invalid;
+ switch (E.getKind()) {
+ case aarch32::Data_RequestGOTAndTransformToDelta32: {
+ KindToSet = aarch32::Data_Delta32;
+ break;
+ }
+ default:
+ return false;
+ }
+ LLVM_DEBUG(dbgs() << " Transforming " << G.getEdgeKindName(E.getKind())
+ << " edge at " << B->getFixupAddress(E) << " ("
+ << B->getAddress() << " + "
+ << formatv("{0:x}", E.getOffset()) << ") into "
+ << G.getEdgeKindName(KindToSet) << "\n");
+ E.setKind(KindToSet);
+ E.setTarget(getEntryForTarget(G, E.getTarget()));
+ return true;
+}
+
+const uint8_t ArmThumbv5LdrPc[] = {
+ 0x78, 0x47, // bx pc
+ 0xfd, 0xe7, // b #-6 ; Arm recommended sequence to follow bx pc
+ 0x04, 0xf0, 0x1f, 0xe5, // ldr pc, [pc,#-4] ; L1
+ 0x00, 0x00, 0x00, 0x00, // L1: .word S
+};
+
+const uint8_t Armv7ABS[] = {
+ 0x00, 0xc0, 0x00, 0xe3, // movw r12, #0x0000 ; lower 16-bit
+ 0x00, 0xc0, 0x40, 0xe3, // movt r12, #0x0000 ; upper 16-bit
+ 0x1c, 0xff, 0x2f, 0xe1 // bx r12
+};
+
+const uint8_t Thumbv7ABS[] = {
+ 0x40, 0xf2, 0x00, 0x0c, // movw r12, #0x0000 ; lower 16-bit
+ 0xc0, 0xf2, 0x00, 0x0c, // movt r12, #0x0000 ; upper 16-bit
+ 0x60, 0x47 // bx r12
+};
+
+/// Create a new node in the link-graph for the given stub template.
+template <size_t Size>
+static Block &allocStub(LinkGraph &G, Section &S, const uint8_t (&Code)[Size]) {
+ constexpr uint64_t Alignment = 4;
+ ArrayRef<char> Template(reinterpret_cast<const char *>(Code), Size);
+ return G.createContentBlock(S, Template, orc::ExecutorAddr(), Alignment, 0);
+}
+
+static Block &createStubPrev7(LinkGraph &G, Section &S, Symbol &Target) {
+ Block &B = allocStub(G, S, ArmThumbv5LdrPc);
+ B.addEdge(Data_Pointer32, 8, Target, 0);
+ return B;
+}
+
+static Block &createStubThumbv7(LinkGraph &G, Section &S, Symbol &Target) {
+ Block &B = allocStub(G, S, Thumbv7ABS);
+ B.addEdge(Thumb_MovwAbsNC, 0, Target, 0);
+ B.addEdge(Thumb_MovtAbs, 4, Target, 0);
+
+ [[maybe_unused]] const char *StubPtr = B.getContent().data();
+ [[maybe_unused]] HalfWords Reg12 = encodeRegMovtT1MovwT3(12);
+ assert(checkRegister<Thumb_MovwAbsNC>(StubPtr, Reg12) &&
+ checkRegister<Thumb_MovtAbs>(StubPtr + 4, Reg12) &&
+ "Linker generated stubs may only corrupt register r12 (IP)");
+ return B;
+}
+
+static Block &createStubArmv7(LinkGraph &G, Section &S, Symbol &Target) {
+ Block &B = allocStub(G, S, Armv7ABS);
+ B.addEdge(Arm_MovwAbsNC, 0, Target, 0);
+ B.addEdge(Arm_MovtAbs, 4, Target, 0);
+
+ [[maybe_unused]] const char *StubPtr = B.getContent().data();
+ [[maybe_unused]] uint32_t Reg12 = encodeRegMovtA1MovwA2(12);
+ assert(checkRegister<Arm_MovwAbsNC>(StubPtr, Reg12) &&
+ checkRegister<Arm_MovtAbs>(StubPtr + 4, Reg12) &&
+ "Linker generated stubs may only corrupt register r12 (IP)");
+ return B;
+}
+
+static bool needsStub(const Edge &E) {
+ Symbol &Target = E.getTarget();
+
+ // Create stubs for external branch targets.
+ if (!Target.isDefined()) {
+ switch (E.getKind()) {
+ case Arm_Call:
+ case Arm_Jump24:
+ case Thumb_Call:
+ case Thumb_Jump24:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ // For local targets, create interworking stubs if we switch Arm/Thumb with an
+ // instruction that cannot switch the instruction set state natively.
+ bool TargetIsThumb = Target.getTargetFlags() & ThumbSymbol;
+ switch (E.getKind()) {
+ case Arm_Jump24:
+ return TargetIsThumb; // Branch to Thumb needs interworking stub
+ case Thumb_Jump24:
+ return !TargetIsThumb; // Branch to Arm needs interworking stub
+ default:
+ break;
+ }
+
+ return false;
+}
+
+// The ArmThumbv5LdrPc stub has 2 entrypoints: Thumb at offset 0 is taken only
+// for Thumb B instructions. Thumb BL is rewritten to BLX and takes the Arm
+// entrypoint at offset 4. Arm branches always use that one.
+Symbol *StubsManager_prev7::getOrCreateSlotEntrypoint(LinkGraph &G,
+ StubMapEntry &Slot,
+ bool Thumb) {
+ constexpr orc::ExecutorAddrDiff ThumbEntrypointOffset = 0;
+ constexpr orc::ExecutorAddrDiff ArmEntrypointOffset = 4;
+ if (Thumb && !Slot.ThumbEntry) {
+ Slot.ThumbEntry =
+ &G.addAnonymousSymbol(*Slot.B, ThumbEntrypointOffset, 4, true, false);
+ Slot.ThumbEntry->setTargetFlags(ThumbSymbol);
+ }
+ if (!Thumb && !Slot.ArmEntry)
+ Slot.ArmEntry =
+ &G.addAnonymousSymbol(*Slot.B, ArmEntrypointOffset, 8, true, false);
+ return Thumb ? Slot.ThumbEntry : Slot.ArmEntry;
+}
+
+bool StubsManager_prev7::visitEdge(LinkGraph &G, Block *B, Edge &E) {
+ if (!needsStub(E))
+ return false;
+
+ Symbol &Target = E.getTarget();
+ assert(Target.hasName() && "Edge cannot point to anonymous target");
+ auto [Slot, NewStub] = getStubMapSlot(Target.getName());
+
+ if (NewStub) {
+ if (!StubsSection)
+ StubsSection = &G.createSection(getSectionName(),
+ orc::MemProt::Read | orc::MemProt::Exec);
+ LLVM_DEBUG({
+ dbgs() << " Created stub entry for " << Target.getName() << " in "
+ << StubsSection->getName() << "\n";
+ });
+ Slot->B = &createStubPrev7(G, *StubsSection, Target);
+ }
+
+ // The ArmThumbv5LdrPc stub has 2 entrypoints: Thumb at offset 0 is taken only
+ // for Thumb B instructions. Thumb BL is rewritten to BLX and takes the Arm
+ // entrypoint at offset 4. Arm branches always use that one.
+ bool UseThumb = E.getKind() == Thumb_Jump24;
+ Symbol *StubEntrypoint = getOrCreateSlotEntrypoint(G, *Slot, UseThumb);
+
+ LLVM_DEBUG({
+ dbgs() << " Using " << (UseThumb ? "Thumb" : "Arm") << " entrypoint "
+ << *StubEntrypoint << " in "
+ << StubEntrypoint->getBlock().getSection().getName() << "\n";
+ });
+
+ E.setTarget(*StubEntrypoint);
+ return true;
+}
+
+bool StubsManager_v7::visitEdge(LinkGraph &G, Block *B, Edge &E) {
+ if (!needsStub(E))
+ return false;
+
+ // Stub Arm/Thumb follows instruction set state at relocation site.
+ // TODO: We may reduce them at relaxation time and reuse freed slots.
+ bool MakeThumb = (E.getKind() > LastArmRelocation);
+ LLVM_DEBUG(dbgs() << " Preparing " << (MakeThumb ? "Thumb" : "Arm")
+ << " stub for " << G.getEdgeKindName(E.getKind())
+ << " edge at " << B->getFixupAddress(E) << " ("
+ << B->getAddress() << " + "
+ << formatv("{0:x}", E.getOffset()) << ")\n");
+
+ Symbol &Target = E.getTarget();
+ assert(Target.hasName() && "Edge cannot point to anonymous target");
+ Symbol *&StubSymbol = getStubSymbolSlot(Target.getName(), MakeThumb);
+
+ if (!StubSymbol) {
+ if (!StubsSection)
+ StubsSection = &G.createSection(getSectionName(),
+ orc::MemProt::Read | orc::MemProt::Exec);
+ Block &B = MakeThumb ? createStubThumbv7(G, *StubsSection, Target)
+ : createStubArmv7(G, *StubsSection, Target);
+ StubSymbol = &G.addAnonymousSymbol(B, 0, B.getSize(), true, false);
+ if (MakeThumb)
+ StubSymbol->setTargetFlags(ThumbSymbol);
+
+ LLVM_DEBUG({
+ dbgs() << " Created " << (MakeThumb ? "Thumb" : "Arm") << " entry for "
+ << Target.getName() << " in " << StubsSection->getName() << ": "
+ << *StubSymbol << "\n";
+ });
+ }
+
+ assert(MakeThumb == (StubSymbol->getTargetFlags() & ThumbSymbol) &&
+ "Instruction set states of stub and relocation site should be equal");
+ LLVM_DEBUG({
+ dbgs() << " Using " << (MakeThumb ? "Thumb" : "Arm") << " entry "
+ << *StubSymbol << " in "
+ << StubSymbol->getBlock().getSection().getName() << "\n";
+ });
+
+ E.setTarget(*StubSymbol);
+ return true;
+}
+
+const char *getEdgeKindName(Edge::Kind K) {
+#define KIND_NAME_CASE(K) \
+ case K: \
+ return #K;
+
+ switch (K) {
+ KIND_NAME_CASE(Data_Delta32)
+ KIND_NAME_CASE(Data_Pointer32)
+ KIND_NAME_CASE(Data_PRel31)
+ KIND_NAME_CASE(Data_RequestGOTAndTransformToDelta32)
+ KIND_NAME_CASE(Arm_Call)
+ KIND_NAME_CASE(Arm_Jump24)
+ KIND_NAME_CASE(Arm_MovwAbsNC)
+ KIND_NAME_CASE(Arm_MovtAbs)
+ KIND_NAME_CASE(Thumb_Call)
+ KIND_NAME_CASE(Thumb_Jump24)
+ KIND_NAME_CASE(Thumb_MovwAbsNC)
+ KIND_NAME_CASE(Thumb_MovtAbs)
+ KIND_NAME_CASE(Thumb_MovwPrelNC)
+ KIND_NAME_CASE(Thumb_MovtPrel)
+ KIND_NAME_CASE(None)
+ default:
+ return getGenericEdgeKindName(K);
+ }
+#undef KIND_NAME_CASE
+}
+
+const char *getCPUArchName(ARMBuildAttrs::CPUArch K) {
+#define CPUARCH_NAME_CASE(K) \
+ case K: \
+ return #K;
+
+ using namespace ARMBuildAttrs;
+ switch (K) {
+ CPUARCH_NAME_CASE(Pre_v4)
+ CPUARCH_NAME_CASE(v4)
+ CPUARCH_NAME_CASE(v4T)
+ CPUARCH_NAME_CASE(v5T)
+ CPUARCH_NAME_CASE(v5TE)
+ CPUARCH_NAME_CASE(v5TEJ)
+ CPUARCH_NAME_CASE(v6)
+ CPUARCH_NAME_CASE(v6KZ)
+ CPUARCH_NAME_CASE(v6T2)
+ CPUARCH_NAME_CASE(v6K)
+ CPUARCH_NAME_CASE(v7)
+ CPUARCH_NAME_CASE(v6_M)
+ CPUARCH_NAME_CASE(v6S_M)
+ CPUARCH_NAME_CASE(v7E_M)
+ CPUARCH_NAME_CASE(v8_A)
+ CPUARCH_NAME_CASE(v8_R)
+ CPUARCH_NAME_CASE(v8_M_Base)
+ CPUARCH_NAME_CASE(v8_M_Main)
+ CPUARCH_NAME_CASE(v8_1_M_Main)
+ CPUARCH_NAME_CASE(v9_A)
+ }
+ llvm_unreachable("Missing CPUArch in switch?");
+#undef CPUARCH_NAME_CASE
+}
+
+} // namespace aarch32
+} // namespace jitlink
+} // namespace llvm
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/aarch64.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/aarch64.cpp
new file mode 100644
index 000000000000..cc58255a338d
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/aarch64.cpp
@@ -0,0 +1,81 @@
+//===---- aarch64.cpp - Generic JITLink aarch64 edge kinds, utilities -----===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Generic utilities for graphs representing aarch64 objects.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/JITLink/aarch64.h"
+
+#define DEBUG_TYPE "jitlink"
+
+namespace llvm {
+namespace jitlink {
+namespace aarch64 {
+
+const char NullPointerContent[8] = {0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00};
+
+const char PointerJumpStubContent[12] = {
+ 0x10, 0x00, 0x00, (char)0x90u, // ADRP x16, <imm>@page21
+ 0x10, 0x02, 0x40, (char)0xf9u, // LDR x16, [x16, <imm>@pageoff12]
+ 0x00, 0x02, 0x1f, (char)0xd6u // BR x16
+};
+
+const char *getEdgeKindName(Edge::Kind R) {
+ switch (R) {
+ case Pointer64:
+ return "Pointer64";
+ case Pointer32:
+ return "Pointer32";
+ case Delta64:
+ return "Delta64";
+ case Delta32:
+ return "Delta32";
+ case NegDelta64:
+ return "NegDelta64";
+ case NegDelta32:
+ return "NegDelta32";
+ case Branch26PCRel:
+ return "Branch26PCRel";
+ case MoveWide16:
+ return "MoveWide16";
+ case LDRLiteral19:
+ return "LDRLiteral19";
+ case TestAndBranch14PCRel:
+ return "TestAndBranch14PCRel";
+ case CondBranch19PCRel:
+ return "CondBranch19PCRel";
+ case ADRLiteral21:
+ return "ADRLiteral21";
+ case Page21:
+ return "Page21";
+ case PageOffset12:
+ return "PageOffset12";
+ case RequestGOTAndTransformToPage21:
+ return "RequestGOTAndTransformToPage21";
+ case RequestGOTAndTransformToPageOffset12:
+ return "RequestGOTAndTransformToPageOffset12";
+ case RequestGOTAndTransformToDelta32:
+ return "RequestGOTAndTransformToDelta32";
+ case RequestTLVPAndTransformToPage21:
+ return "RequestTLVPAndTransformToPage21";
+ case RequestTLVPAndTransformToPageOffset12:
+ return "RequestTLVPAndTransformToPageOffset12";
+ case RequestTLSDescEntryAndTransformToPage21:
+ return "RequestTLSDescEntryAndTransformToPage21";
+ case RequestTLSDescEntryAndTransformToPageOffset12:
+ return "RequestTLSDescEntryAndTransformToPageOffset12";
+ default:
+ return getGenericEdgeKindName(static_cast<Edge::Kind>(R));
+ }
+}
+
+} // namespace aarch64
+} // namespace jitlink
+} // namespace llvm
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/i386.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/i386.cpp
new file mode 100644
index 000000000000..e984bb10983d
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/i386.cpp
@@ -0,0 +1,91 @@
+//===---- i386.cpp - Generic JITLink i386 edge kinds, utilities -----===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Generic utilities for graphs representing i386 objects.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/JITLink/i386.h"
+
+#define DEBUG_TYPE "jitlink"
+
+namespace llvm::jitlink::i386 {
+
+const char *getEdgeKindName(Edge::Kind K) {
+ switch (K) {
+ case None:
+ return "None";
+ case Pointer32:
+ return "Pointer32";
+ case PCRel32:
+ return "PCRel32";
+ case Pointer16:
+ return "Pointer16";
+ case PCRel16:
+ return "PCRel16";
+ case Delta32:
+ return "Delta32";
+ case Delta32FromGOT:
+ return "Delta32FromGOT";
+ case RequestGOTAndTransformToDelta32FromGOT:
+ return "RequestGOTAndTransformToDelta32FromGOT";
+ case BranchPCRel32:
+ return "BranchPCRel32";
+ case BranchPCRel32ToPtrJumpStub:
+ return "BranchPCRel32ToPtrJumpStub";
+ case BranchPCRel32ToPtrJumpStubBypassable:
+ return "BranchPCRel32ToPtrJumpStubBypassable";
+ }
+
+ return getGenericEdgeKindName(K);
+}
+
+const char NullPointerContent[PointerSize] = {0x00, 0x00, 0x00, 0x00};
+
+const char PointerJumpStubContent[6] = {
+ static_cast<char>(0xFFu), 0x25, 0x00, 0x00, 0x00, 0x00};
+
+Error optimizeGOTAndStubAccesses(LinkGraph &G) {
+ LLVM_DEBUG(dbgs() << "Optimizing GOT entries and stubs:\n");
+
+ for (auto *B : G.blocks())
+ for (auto &E : B->edges()) {
+ if (E.getKind() == i386::BranchPCRel32ToPtrJumpStubBypassable) {
+ auto &StubBlock = E.getTarget().getBlock();
+ assert(StubBlock.getSize() == sizeof(PointerJumpStubContent) &&
+ "Stub block should be stub sized");
+ assert(StubBlock.edges_size() == 1 &&
+ "Stub block should only have one outgoing edge");
+
+ auto &GOTBlock = StubBlock.edges().begin()->getTarget().getBlock();
+ assert(GOTBlock.getSize() == G.getPointerSize() &&
+ "GOT block should be pointer sized");
+ assert(GOTBlock.edges_size() == 1 &&
+ "GOT block should only have one outgoing edge");
+
+ auto &GOTTarget = GOTBlock.edges().begin()->getTarget();
+ orc::ExecutorAddr EdgeAddr = B->getAddress() + E.getOffset();
+ orc::ExecutorAddr TargetAddr = GOTTarget.getAddress();
+
+ int64_t Displacement = TargetAddr - EdgeAddr + 4;
+ if (isInt<32>(Displacement)) {
+ E.setKind(i386::BranchPCRel32);
+ E.setTarget(GOTTarget);
+ LLVM_DEBUG({
+ dbgs() << " Replaced stub branch with direct branch:\n ";
+ printEdge(dbgs(), *B, E, getEdgeKindName(E.getKind()));
+ dbgs() << "\n";
+ });
+ }
+ }
+ }
+
+ return Error::success();
+}
+
+} // namespace llvm::jitlink::i386
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/loongarch.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/loongarch.cpp
new file mode 100644
index 000000000000..d1e44ec187cc
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/loongarch.cpp
@@ -0,0 +1,60 @@
+//===--- loongarch.cpp - Generic JITLink loongarch edge kinds, utilities --===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Generic utilities for graphs representing loongarch objects.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/JITLink/loongarch.h"
+
+#define DEBUG_TYPE "jitlink"
+
+namespace llvm {
+namespace jitlink {
+namespace loongarch {
+
+const char NullPointerContent[8] = {0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00};
+
+const uint8_t LA64StubContent[StubEntrySize] = {
+ 0x14, 0x00, 0x00, 0x1a, // pcalau12i $t8, %page20(imm)
+ 0x94, 0x02, 0xc0, 0x28, // ld.d $t8, $t8, %pageoff12(imm)
+ 0x80, 0x02, 0x00, 0x4c // jr $t8
+};
+
+const uint8_t LA32StubContent[StubEntrySize] = {
+ 0x14, 0x00, 0x00, 0x1a, // pcalau12i $t8, %page20(imm)
+ 0x94, 0x02, 0x80, 0x28, // ld.w $t8, $t8, %pageoff12(imm)
+ 0x80, 0x02, 0x00, 0x4c // jr $t8
+};
+
+const char *getEdgeKindName(Edge::Kind K) {
+#define KIND_NAME_CASE(K) \
+ case K: \
+ return #K;
+
+ switch (K) {
+ KIND_NAME_CASE(Pointer64)
+ KIND_NAME_CASE(Pointer32)
+ KIND_NAME_CASE(Delta32)
+ KIND_NAME_CASE(NegDelta32)
+ KIND_NAME_CASE(Delta64)
+ KIND_NAME_CASE(Branch26PCRel)
+ KIND_NAME_CASE(Page20)
+ KIND_NAME_CASE(PageOffset12)
+ KIND_NAME_CASE(RequestGOTAndTransformToPage20)
+ KIND_NAME_CASE(RequestGOTAndTransformToPageOffset12)
+ default:
+ return getGenericEdgeKindName(K);
+ }
+#undef KIND_NAME_CASE
+}
+
+} // namespace loongarch
+} // namespace jitlink
+} // namespace llvm
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/ppc64.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/ppc64.cpp
new file mode 100644
index 000000000000..27484aaf2059
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/ppc64.cpp
@@ -0,0 +1,144 @@
+//===----- ppc64.cpp - Generic JITLink ppc64 edge kinds, utilities ------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Generic utilities for graphs representing 64-bit PowerPC objects.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/JITLink/ppc64.h"
+
+#define DEBUG_TYPE "jitlink"
+
+namespace llvm::jitlink::ppc64 {
+
+const char NullPointerContent[8] = {0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00};
+
+const char PointerJumpStubContent_little[20] = {
+ 0x18, 0x00, 0x41, (char)0xf8, // std r2, 24(r1)
+ 0x00, 0x00, (char)0x82, 0x3d, // addis r12, r2, OffHa
+ 0x00, 0x00, (char)0x8c, (char)0xe9, // ld r12, OffLo(r12)
+ (char)0xa6, 0x03, (char)0x89, 0x7d, // mtctr r12
+ 0x20, 0x04, (char)0x80, 0x4e, // bctr
+};
+
+const char PointerJumpStubContent_big[20] = {
+ (char)0xf8, 0x41, 0x00, 0x18, // std r2, 24(r1)
+ 0x3d, (char)0x82, 0x00, 0x00, // addis r12, r2, OffHa
+ (char)0xe9, (char)0x8c, 0x00, 0x00, // ld r12, OffLo(r12)
+ 0x7d, (char)0x89, 0x03, (char)0xa6, // mtctr r12
+ 0x4e, (char)0x80, 0x04, 0x20, // bctr
+};
+
+// TODO: We can use prefixed instructions if LLJIT is running on power10.
+const char PointerJumpStubNoTOCContent_little[32] = {
+ (char)0xa6, 0x02, (char)0x88, 0x7d, // mflr 12
+ 0x05, (char)0x00, (char)0x9f, 0x42, // bcl 20,31,.+4
+ (char)0xa6, 0x02, 0x68, 0x7d, // mflr 11
+ (char)0xa6, 0x03, (char)0x88, 0x7d, // mtlr 12
+ 0x00, 0x00, (char)0x8b, 0x3d, // addis 12,11,OffHa
+ 0x00, 0x00, (char)0x8c, (char)0xe9, // ld 12, OffLo(12)
+ (char)0xa6, 0x03, (char)0x89, 0x7d, // mtctr 12
+ 0x20, 0x04, (char)0x80, 0x4e, // bctr
+};
+
+const char PointerJumpStubNoTOCContent_big[32] = {
+ 0x7d, (char)0x88, 0x02, (char)0xa6, // mflr 12
+ 0x42, (char)0x9f, 0x00, 0x05, // bcl 20,31,.+4
+ 0x7d, 0x68, 0x02, (char)0xa6, // mflr 11
+ 0x7d, (char)0x88, 0x03, (char)0xa6, // mtlr 12
+ 0x3d, (char)0x8b, 0x00, 0x00, // addis 12,11,OffHa
+ (char)0xe9, (char)0x8c, 0x00, 0x00, // ld 12, OffLo(12)
+ 0x7d, (char)0x89, 0x03, (char)0xa6, // mtctr 12
+ 0x4e, (char)0x80, 0x04, 0x20, // bctr
+};
+
+const char *getEdgeKindName(Edge::Kind K) {
+ switch (K) {
+ case Pointer64:
+ return "Pointer64";
+ case Pointer32:
+ return "Pointer32";
+ case Pointer16:
+ return "Pointer16";
+ case Pointer16DS:
+ return "Pointer16DS";
+ case Pointer16HA:
+ return "Pointer16HA";
+ case Pointer16HI:
+ return "Pointer16HI";
+ case Pointer16HIGH:
+ return "Pointer16HIGH";
+ case Pointer16HIGHA:
+ return "Pointer16HIGHA";
+ case Pointer16HIGHER:
+ return "Pointer16HIGHER";
+ case Pointer16HIGHERA:
+ return "Pointer16HIGHERA";
+ case Pointer16HIGHEST:
+ return "Pointer16HIGHEST";
+ case Pointer16HIGHESTA:
+ return "Pointer16HIGHESTA";
+ case Pointer16LO:
+ return "Pointer16LO";
+ case Pointer16LODS:
+ return "Pointer16LODS";
+ case Pointer14:
+ return "Pointer14";
+ case Delta64:
+ return "Delta64";
+ case Delta34:
+ return "Delta34";
+ case Delta32:
+ return "Delta32";
+ case NegDelta32:
+ return "NegDelta32";
+ case Delta16:
+ return "Delta16";
+ case Delta16HA:
+ return "Delta16HA";
+ case Delta16HI:
+ return "Delta16HI";
+ case Delta16LO:
+ return "Delta16LO";
+ case TOC:
+ return "TOC";
+ case TOCDelta16:
+ return "TOCDelta16";
+ case TOCDelta16DS:
+ return "TOCDelta16DS";
+ case TOCDelta16HA:
+ return "TOCDelta16HA";
+ case TOCDelta16HI:
+ return "TOCDelta16HI";
+ case TOCDelta16LO:
+ return "TOCDelta16LO";
+ case TOCDelta16LODS:
+ return "TOCDelta16LODS";
+ case RequestGOTAndTransformToDelta34:
+ return "RequestGOTAndTransformToDelta34";
+ case CallBranchDelta:
+ return "CallBranchDelta";
+ case CallBranchDeltaRestoreTOC:
+ return "CallBranchDeltaRestoreTOC";
+ case RequestCall:
+ return "RequestCall";
+ case RequestCallNoTOC:
+ return "RequestCallNoTOC";
+ case RequestTLSDescInGOTAndTransformToTOCDelta16HA:
+ return "RequestTLSDescInGOTAndTransformToTOCDelta16HA";
+ case RequestTLSDescInGOTAndTransformToTOCDelta16LO:
+ return "RequestTLSDescInGOTAndTransformToTOCDelta16LO";
+ case RequestTLSDescInGOTAndTransformToDelta34:
+ return "RequestTLSDescInGOTAndTransformToDelta34";
+ default:
+ return getGenericEdgeKindName(static_cast<Edge::Kind>(K));
+ }
+}
+
+} // end namespace llvm::jitlink::ppc64
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/riscv.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/riscv.cpp
new file mode 100644
index 000000000000..a4e4daef97fb
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/riscv.cpp
@@ -0,0 +1,92 @@
+//===------ riscv.cpp - Generic JITLink riscv edge kinds, utilities -------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Generic utilities for graphs representing riscv objects.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/JITLink/riscv.h"
+
+#define DEBUG_TYPE "jitlink"
+
+namespace llvm {
+namespace jitlink {
+namespace riscv {
+
+const char *getEdgeKindName(Edge::Kind K) {
+ switch (K) {
+ case R_RISCV_32:
+ return "R_RISCV_32";
+ case R_RISCV_64:
+ return "R_RISCV_64";
+ case R_RISCV_BRANCH:
+ return "R_RISCV_BRANCH";
+ case R_RISCV_JAL:
+ return "R_RISCV_JAL";
+ case R_RISCV_CALL:
+ return "R_RISCV_CALL";
+ case R_RISCV_CALL_PLT:
+ return "R_RISCV_CALL_PLT";
+ case R_RISCV_GOT_HI20:
+ return "R_RISCV_GOT_HI20";
+ case R_RISCV_PCREL_HI20:
+ return "R_RISCV_PCREL_HI20";
+ case R_RISCV_PCREL_LO12_I:
+ return "R_RISCV_PCREL_LO12_I";
+ case R_RISCV_PCREL_LO12_S:
+ return "R_RISCV_PCREL_LO12_S";
+ case R_RISCV_HI20:
+ return "R_RISCV_HI20";
+ case R_RISCV_LO12_I:
+ return "R_RISCV_LO12_I";
+ case R_RISCV_LO12_S:
+ return "R_RISCV_LO12_S";
+ case R_RISCV_ADD8:
+ return "R_RISCV_ADD8";
+ case R_RISCV_ADD16:
+ return "R_RISCV_ADD16";
+ case R_RISCV_ADD32:
+ return "R_RISCV_ADD32";
+ case R_RISCV_ADD64:
+ return "R_RISCV_ADD64";
+ case R_RISCV_SUB8:
+ return "R_RISCV_SUB8";
+ case R_RISCV_SUB16:
+ return "R_RISCV_SUB16";
+ case R_RISCV_SUB32:
+ return "R_RISCV_SUB32";
+ case R_RISCV_SUB64:
+ return "R_RISCV_SUB64";
+ case R_RISCV_RVC_BRANCH:
+ return "R_RISCV_RVC_BRANCH";
+ case R_RISCV_RVC_JUMP:
+ return "R_RISCV_RVC_JUMP";
+ case R_RISCV_SUB6:
+ return "R_RISCV_SUB6";
+ case R_RISCV_SET6:
+ return "R_RISCV_SET6";
+ case R_RISCV_SET8:
+ return "R_RISCV_SET8";
+ case R_RISCV_SET16:
+ return "R_RISCV_SET16";
+ case R_RISCV_SET32:
+ return "R_RISCV_SET32";
+ case R_RISCV_32_PCREL:
+ return "R_RISCV_32_PCREL";
+ case CallRelaxable:
+ return "CallRelaxable";
+ case AlignRelaxable:
+ return "AlignRelaxable";
+ case NegDelta32:
+ return "NegDelta32";
+ }
+ return getGenericEdgeKindName(K);
+}
+} // namespace riscv
+} // namespace jitlink
+} // namespace llvm
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/x86_64.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/x86_64.cpp
new file mode 100644
index 000000000000..9f7ece8ffbbb
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/x86_64.cpp
@@ -0,0 +1,197 @@
+//===----- x86_64.cpp - Generic JITLink x86-64 edge kinds, utilities ------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Generic utilities for graphs representing x86-64 objects.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/JITLink/x86_64.h"
+
+#define DEBUG_TYPE "jitlink"
+
+namespace llvm {
+namespace jitlink {
+namespace x86_64 {
+
+const char *getEdgeKindName(Edge::Kind K) {
+ switch (K) {
+ case Pointer64:
+ return "Pointer64";
+ case Pointer32:
+ return "Pointer32";
+ case Pointer32Signed:
+ return "Pointer32Signed";
+ case Pointer16:
+ return "Pointer16";
+ case Pointer8:
+ return "Pointer8";
+ case Delta64:
+ return "Delta64";
+ case Delta32:
+ return "Delta32";
+ case Delta8:
+ return "Delta8";
+ case NegDelta64:
+ return "NegDelta64";
+ case NegDelta32:
+ return "NegDelta32";
+ case Delta64FromGOT:
+ return "Delta64FromGOT";
+ case PCRel32:
+ return "PCRel32";
+ case BranchPCRel32:
+ return "BranchPCRel32";
+ case BranchPCRel32ToPtrJumpStub:
+ return "BranchPCRel32ToPtrJumpStub";
+ case BranchPCRel32ToPtrJumpStubBypassable:
+ return "BranchPCRel32ToPtrJumpStubBypassable";
+ case RequestGOTAndTransformToDelta32:
+ return "RequestGOTAndTransformToDelta32";
+ case RequestGOTAndTransformToDelta64:
+ return "RequestGOTAndTransformToDelta64";
+ case RequestGOTAndTransformToDelta64FromGOT:
+ return "RequestGOTAndTransformToDelta64FromGOT";
+ case PCRel32GOTLoadREXRelaxable:
+ return "PCRel32GOTLoadREXRelaxable";
+ case RequestGOTAndTransformToPCRel32GOTLoadREXRelaxable:
+ return "RequestGOTAndTransformToPCRel32GOTLoadREXRelaxable";
+ case PCRel32GOTLoadRelaxable:
+ return "PCRel32GOTLoadRelaxable";
+ case RequestGOTAndTransformToPCRel32GOTLoadRelaxable:
+ return "RequestGOTAndTransformToPCRel32GOTLoadRelaxable";
+ case PCRel32TLVPLoadREXRelaxable:
+ return "PCRel32TLVPLoadREXRelaxable";
+ case RequestTLVPAndTransformToPCRel32TLVPLoadREXRelaxable:
+ return "RequestTLVPAndTransformToPCRel32TLVPLoadREXRelaxable";
+ default:
+ return getGenericEdgeKindName(static_cast<Edge::Kind>(K));
+ }
+}
+
+const char NullPointerContent[PointerSize] = {0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00};
+
+const char PointerJumpStubContent[6] = {
+ static_cast<char>(0xFFu), 0x25, 0x00, 0x00, 0x00, 0x00};
+
+Error optimizeGOTAndStubAccesses(LinkGraph &G) {
+ LLVM_DEBUG(dbgs() << "Optimizing GOT entries and stubs:\n");
+
+ for (auto *B : G.blocks())
+ for (auto &E : B->edges()) {
+ if (E.getKind() == x86_64::PCRel32GOTLoadRelaxable ||
+ E.getKind() == x86_64::PCRel32GOTLoadREXRelaxable) {
+#ifndef NDEBUG
+ bool REXPrefix = E.getKind() == x86_64::PCRel32GOTLoadREXRelaxable;
+ assert(E.getOffset() >= (REXPrefix ? 3u : 2u) &&
+ "GOT edge occurs too early in block");
+#endif
+ auto *FixupData = reinterpret_cast<uint8_t *>(
+ const_cast<char *>(B->getContent().data())) +
+ E.getOffset();
+ const uint8_t Op = FixupData[-2];
+ const uint8_t ModRM = FixupData[-1];
+
+ auto &GOTEntryBlock = E.getTarget().getBlock();
+ assert(GOTEntryBlock.getSize() == G.getPointerSize() &&
+ "GOT entry block should be pointer sized");
+ assert(GOTEntryBlock.edges_size() == 1 &&
+ "GOT entry should only have one outgoing edge");
+ auto &GOTTarget = GOTEntryBlock.edges().begin()->getTarget();
+ orc::ExecutorAddr TargetAddr = GOTTarget.getAddress();
+ orc::ExecutorAddr EdgeAddr = B->getFixupAddress(E);
+ int64_t Displacement = TargetAddr - EdgeAddr + 4;
+ bool TargetInRangeForImmU32 = isUInt<32>(TargetAddr.getValue());
+ bool DisplacementInRangeForImmS32 = isInt<32>(Displacement);
+
+ // If both of the Target and displacement is out of range, then
+ // there isn't optimization chance.
+ if (!(TargetInRangeForImmU32 || DisplacementInRangeForImmS32))
+ continue;
+
+ // Transform "mov foo@GOTPCREL(%rip),%reg" to "lea foo(%rip),%reg".
+ if (Op == 0x8b && DisplacementInRangeForImmS32) {
+ FixupData[-2] = 0x8d;
+ E.setKind(x86_64::Delta32);
+ E.setTarget(GOTTarget);
+ E.setAddend(E.getAddend() - 4);
+ LLVM_DEBUG({
+ dbgs() << " Replaced GOT load wih LEA:\n ";
+ printEdge(dbgs(), *B, E, getEdgeKindName(E.getKind()));
+ dbgs() << "\n";
+ });
+ continue;
+ }
+
+ // Transform call/jmp instructions
+ if (Op == 0xff && TargetInRangeForImmU32) {
+ if (ModRM == 0x15) {
+ // ABI says we can convert "call *foo@GOTPCREL(%rip)" to "nop; call
+ // foo" But lld convert it to "addr32 call foo, because that makes
+ // result expression to be a single instruction.
+ FixupData[-2] = 0x67;
+ FixupData[-1] = 0xe8;
+ LLVM_DEBUG({
+ dbgs() << " replaced call instruction's memory operand wih imm "
+ "operand:\n ";
+ printEdge(dbgs(), *B, E, getEdgeKindName(E.getKind()));
+ dbgs() << "\n";
+ });
+ } else {
+ // Transform "jmp *foo@GOTPCREL(%rip)" to "jmp foo; nop"
+ assert(ModRM == 0x25 && "Invalid ModRm for call/jmp instructions");
+ FixupData[-2] = 0xe9;
+ FixupData[3] = 0x90;
+ E.setOffset(E.getOffset() - 1);
+ LLVM_DEBUG({
+ dbgs() << " replaced jmp instruction's memory operand wih imm "
+ "operand:\n ";
+ printEdge(dbgs(), *B, E, getEdgeKindName(E.getKind()));
+ dbgs() << "\n";
+ });
+ }
+ E.setKind(x86_64::Pointer32);
+ E.setTarget(GOTTarget);
+ continue;
+ }
+ } else if (E.getKind() == x86_64::BranchPCRel32ToPtrJumpStubBypassable) {
+ auto &StubBlock = E.getTarget().getBlock();
+ assert(StubBlock.getSize() == sizeof(PointerJumpStubContent) &&
+ "Stub block should be stub sized");
+ assert(StubBlock.edges_size() == 1 &&
+ "Stub block should only have one outgoing edge");
+
+ auto &GOTBlock = StubBlock.edges().begin()->getTarget().getBlock();
+ assert(GOTBlock.getSize() == G.getPointerSize() &&
+ "GOT block should be pointer sized");
+ assert(GOTBlock.edges_size() == 1 &&
+ "GOT block should only have one outgoing edge");
+
+ auto &GOTTarget = GOTBlock.edges().begin()->getTarget();
+ orc::ExecutorAddr EdgeAddr = B->getAddress() + E.getOffset();
+ orc::ExecutorAddr TargetAddr = GOTTarget.getAddress();
+
+ int64_t Displacement = TargetAddr - EdgeAddr + 4;
+ if (isInt<32>(Displacement)) {
+ E.setKind(x86_64::BranchPCRel32);
+ E.setTarget(GOTTarget);
+ LLVM_DEBUG({
+ dbgs() << " Replaced stub branch with direct branch:\n ";
+ printEdge(dbgs(), *B, E, getEdgeKindName(E.getKind()));
+ dbgs() << "\n";
+ });
+ }
+ }
+ }
+
+ return Error::success();
+}
+
+} // end namespace x86_64
+} // end namespace jitlink
+} // end namespace llvm
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/MCJIT/MCJIT.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/MCJIT/MCJIT.cpp
new file mode 100644
index 000000000000..869b383dd064
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/MCJIT/MCJIT.cpp
@@ -0,0 +1,684 @@
+//===-- MCJIT.cpp - MC-based Just-in-Time Compiler ------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "MCJIT.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ExecutionEngine/GenericValue.h"
+#include "llvm/ExecutionEngine/JITEventListener.h"
+#include "llvm/ExecutionEngine/MCJIT.h"
+#include "llvm/ExecutionEngine/ObjectCache.h"
+#include "llvm/ExecutionEngine/SectionMemoryManager.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/LegacyPassManager.h"
+#include "llvm/IR/Mangler.h"
+#include "llvm/IR/Module.h"
+#include "llvm/MC/MCContext.h"
+#include "llvm/Object/Archive.h"
+#include "llvm/Object/ObjectFile.h"
+#include "llvm/Support/DynamicLibrary.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/SmallVectorMemoryBuffer.h"
+#include <mutex>
+
+using namespace llvm;
+
+namespace {
+
+static struct RegisterJIT {
+ RegisterJIT() { MCJIT::Register(); }
+} JITRegistrator;
+
+}
+
+extern "C" void LLVMLinkInMCJIT() {
+}
+
+ExecutionEngine *
+MCJIT::createJIT(std::unique_ptr<Module> M, std::string *ErrorStr,
+ std::shared_ptr<MCJITMemoryManager> MemMgr,
+ std::shared_ptr<LegacyJITSymbolResolver> Resolver,
+ std::unique_ptr<TargetMachine> TM) {
+ // Try to register the program as a source of symbols to resolve against.
+ //
+ // FIXME: Don't do this here.
+ sys::DynamicLibrary::LoadLibraryPermanently(nullptr, nullptr);
+
+ if (!MemMgr || !Resolver) {
+ auto RTDyldMM = std::make_shared<SectionMemoryManager>();
+ if (!MemMgr)
+ MemMgr = RTDyldMM;
+ if (!Resolver)
+ Resolver = RTDyldMM;
+ }
+
+ return new MCJIT(std::move(M), std::move(TM), std::move(MemMgr),
+ std::move(Resolver));
+}
+
+MCJIT::MCJIT(std::unique_ptr<Module> M, std::unique_ptr<TargetMachine> TM,
+ std::shared_ptr<MCJITMemoryManager> MemMgr,
+ std::shared_ptr<LegacyJITSymbolResolver> Resolver)
+ : ExecutionEngine(TM->createDataLayout(), std::move(M)), TM(std::move(TM)),
+ Ctx(nullptr), MemMgr(std::move(MemMgr)),
+ Resolver(*this, std::move(Resolver)), Dyld(*this->MemMgr, this->Resolver),
+ ObjCache(nullptr) {
+ // FIXME: We are managing our modules, so we do not want the base class
+ // ExecutionEngine to manage them as well. To avoid double destruction
+ // of the first (and only) module added in ExecutionEngine constructor
+ // we remove it from EE and will destruct it ourselves.
+ //
+ // It may make sense to move our module manager (based on SmallStPtr) back
+ // into EE if the JIT and Interpreter can live with it.
+ // If so, additional functions: addModule, removeModule, FindFunctionNamed,
+ // runStaticConstructorsDestructors could be moved back to EE as well.
+ //
+ std::unique_ptr<Module> First = std::move(Modules[0]);
+ Modules.clear();
+
+ if (First->getDataLayout().isDefault())
+ First->setDataLayout(getDataLayout());
+
+ OwnedModules.addModule(std::move(First));
+ RegisterJITEventListener(JITEventListener::createGDBRegistrationListener());
+}
+
+MCJIT::~MCJIT() {
+ std::lock_guard<sys::Mutex> locked(lock);
+
+ Dyld.deregisterEHFrames();
+
+ for (auto &Obj : LoadedObjects)
+ if (Obj)
+ notifyFreeingObject(*Obj);
+
+ Archives.clear();
+}
+
+void MCJIT::addModule(std::unique_ptr<Module> M) {
+ std::lock_guard<sys::Mutex> locked(lock);
+
+ if (M->getDataLayout().isDefault())
+ M->setDataLayout(getDataLayout());
+
+ OwnedModules.addModule(std::move(M));
+}
+
+bool MCJIT::removeModule(Module *M) {
+ std::lock_guard<sys::Mutex> locked(lock);
+ return OwnedModules.removeModule(M);
+}
+
+void MCJIT::addObjectFile(std::unique_ptr<object::ObjectFile> Obj) {
+ std::unique_ptr<RuntimeDyld::LoadedObjectInfo> L = Dyld.loadObject(*Obj);
+ if (Dyld.hasError())
+ report_fatal_error(Dyld.getErrorString());
+
+ notifyObjectLoaded(*Obj, *L);
+
+ LoadedObjects.push_back(std::move(Obj));
+}
+
+void MCJIT::addObjectFile(object::OwningBinary<object::ObjectFile> Obj) {
+ std::unique_ptr<object::ObjectFile> ObjFile;
+ std::unique_ptr<MemoryBuffer> MemBuf;
+ std::tie(ObjFile, MemBuf) = Obj.takeBinary();
+ addObjectFile(std::move(ObjFile));
+ Buffers.push_back(std::move(MemBuf));
+}
+
+void MCJIT::addArchive(object::OwningBinary<object::Archive> A) {
+ Archives.push_back(std::move(A));
+}
+
+void MCJIT::setObjectCache(ObjectCache* NewCache) {
+ std::lock_guard<sys::Mutex> locked(lock);
+ ObjCache = NewCache;
+}
+
+std::unique_ptr<MemoryBuffer> MCJIT::emitObject(Module *M) {
+ assert(M && "Can not emit a null module");
+
+ std::lock_guard<sys::Mutex> locked(lock);
+
+ // Materialize all globals in the module if they have not been
+ // materialized already.
+ cantFail(M->materializeAll());
+
+ // This must be a module which has already been added but not loaded to this
+ // MCJIT instance, since these conditions are tested by our caller,
+ // generateCodeForModule.
+
+ legacy::PassManager PM;
+
+ // The RuntimeDyld will take ownership of this shortly
+ SmallVector<char, 4096> ObjBufferSV;
+ raw_svector_ostream ObjStream(ObjBufferSV);
+
+ // Turn the machine code intermediate representation into bytes in memory
+ // that may be executed.
+ if (TM->addPassesToEmitMC(PM, Ctx, ObjStream, !getVerifyModules()))
+ report_fatal_error("Target does not support MC emission!");
+
+ // Initialize passes.
+ PM.run(*M);
+ // Flush the output buffer to get the generated code into memory
+
+ auto CompiledObjBuffer = std::make_unique<SmallVectorMemoryBuffer>(
+ std::move(ObjBufferSV), /*RequiresNullTerminator=*/false);
+
+ // If we have an object cache, tell it about the new object.
+ // Note that we're using the compiled image, not the loaded image (as below).
+ if (ObjCache) {
+ // MemoryBuffer is a thin wrapper around the actual memory, so it's OK
+ // to create a temporary object here and delete it after the call.
+ MemoryBufferRef MB = CompiledObjBuffer->getMemBufferRef();
+ ObjCache->notifyObjectCompiled(M, MB);
+ }
+
+ return CompiledObjBuffer;
+}
+
+void MCJIT::generateCodeForModule(Module *M) {
+ // Get a thread lock to make sure we aren't trying to load multiple times
+ std::lock_guard<sys::Mutex> locked(lock);
+
+ // This must be a module which has already been added to this MCJIT instance.
+ assert(OwnedModules.ownsModule(M) &&
+ "MCJIT::generateCodeForModule: Unknown module.");
+
+ // Re-compilation is not supported
+ if (OwnedModules.hasModuleBeenLoaded(M))
+ return;
+
+ std::unique_ptr<MemoryBuffer> ObjectToLoad;
+ // Try to load the pre-compiled object from cache if possible
+ if (ObjCache)
+ ObjectToLoad = ObjCache->getObject(M);
+
+ assert(M->getDataLayout() == getDataLayout() && "DataLayout Mismatch");
+
+ // If the cache did not contain a suitable object, compile the object
+ if (!ObjectToLoad) {
+ ObjectToLoad = emitObject(M);
+ assert(ObjectToLoad && "Compilation did not produce an object.");
+ }
+
+ // Load the object into the dynamic linker.
+ // MCJIT now owns the ObjectImage pointer (via its LoadedObjects list).
+ Expected<std::unique_ptr<object::ObjectFile>> LoadedObject =
+ object::ObjectFile::createObjectFile(ObjectToLoad->getMemBufferRef());
+ if (!LoadedObject) {
+ std::string Buf;
+ raw_string_ostream OS(Buf);
+ logAllUnhandledErrors(LoadedObject.takeError(), OS);
+ report_fatal_error(Twine(OS.str()));
+ }
+ std::unique_ptr<RuntimeDyld::LoadedObjectInfo> L =
+ Dyld.loadObject(*LoadedObject.get());
+
+ if (Dyld.hasError())
+ report_fatal_error(Dyld.getErrorString());
+
+ notifyObjectLoaded(*LoadedObject.get(), *L);
+
+ Buffers.push_back(std::move(ObjectToLoad));
+ LoadedObjects.push_back(std::move(*LoadedObject));
+
+ OwnedModules.markModuleAsLoaded(M);
+}
+
+void MCJIT::finalizeLoadedModules() {
+ std::lock_guard<sys::Mutex> locked(lock);
+
+ // Resolve any outstanding relocations.
+ Dyld.resolveRelocations();
+
+ // Check for Dyld error.
+ if (Dyld.hasError())
+ ErrMsg = Dyld.getErrorString().str();
+
+ OwnedModules.markAllLoadedModulesAsFinalized();
+
+ // Register EH frame data for any module we own which has been loaded
+ Dyld.registerEHFrames();
+
+ // Set page permissions.
+ MemMgr->finalizeMemory();
+}
+
+// FIXME: Rename this.
+void MCJIT::finalizeObject() {
+ std::lock_guard<sys::Mutex> locked(lock);
+
+ // Generate code for module is going to move objects out of the 'added' list,
+ // so we need to copy that out before using it:
+ SmallVector<Module*, 16> ModsToAdd;
+ for (auto *M : OwnedModules.added())
+ ModsToAdd.push_back(M);
+
+ for (auto *M : ModsToAdd)
+ generateCodeForModule(M);
+
+ finalizeLoadedModules();
+}
+
+void MCJIT::finalizeModule(Module *M) {
+ std::lock_guard<sys::Mutex> locked(lock);
+
+ // This must be a module which has already been added to this MCJIT instance.
+ assert(OwnedModules.ownsModule(M) && "MCJIT::finalizeModule: Unknown module.");
+
+ // If the module hasn't been compiled, just do that.
+ if (!OwnedModules.hasModuleBeenLoaded(M))
+ generateCodeForModule(M);
+
+ finalizeLoadedModules();
+}
+
+JITSymbol MCJIT::findExistingSymbol(const std::string &Name) {
+ if (void *Addr = getPointerToGlobalIfAvailable(Name))
+ return JITSymbol(static_cast<uint64_t>(
+ reinterpret_cast<uintptr_t>(Addr)),
+ JITSymbolFlags::Exported);
+
+ return Dyld.getSymbol(Name);
+}
+
+Module *MCJIT::findModuleForSymbol(const std::string &Name,
+ bool CheckFunctionsOnly) {
+ StringRef DemangledName = Name;
+ if (DemangledName[0] == getDataLayout().getGlobalPrefix())
+ DemangledName = DemangledName.substr(1);
+
+ std::lock_guard<sys::Mutex> locked(lock);
+
+ // If it hasn't already been generated, see if it's in one of our modules.
+ for (ModulePtrSet::iterator I = OwnedModules.begin_added(),
+ E = OwnedModules.end_added();
+ I != E; ++I) {
+ Module *M = *I;
+ Function *F = M->getFunction(DemangledName);
+ if (F && !F->isDeclaration())
+ return M;
+ if (!CheckFunctionsOnly) {
+ GlobalVariable *G = M->getGlobalVariable(DemangledName);
+ if (G && !G->isDeclaration())
+ return M;
+ // FIXME: Do we need to worry about global aliases?
+ }
+ }
+ // We didn't find the symbol in any of our modules.
+ return nullptr;
+}
+
+uint64_t MCJIT::getSymbolAddress(const std::string &Name,
+ bool CheckFunctionsOnly) {
+ std::string MangledName;
+ {
+ raw_string_ostream MangledNameStream(MangledName);
+ Mangler::getNameWithPrefix(MangledNameStream, Name, getDataLayout());
+ }
+ if (auto Sym = findSymbol(MangledName, CheckFunctionsOnly)) {
+ if (auto AddrOrErr = Sym.getAddress())
+ return *AddrOrErr;
+ else
+ report_fatal_error(AddrOrErr.takeError());
+ } else if (auto Err = Sym.takeError())
+ report_fatal_error(Sym.takeError());
+ return 0;
+}
+
+JITSymbol MCJIT::findSymbol(const std::string &Name,
+ bool CheckFunctionsOnly) {
+ std::lock_guard<sys::Mutex> locked(lock);
+
+ // First, check to see if we already have this symbol.
+ if (auto Sym = findExistingSymbol(Name))
+ return Sym;
+
+ for (object::OwningBinary<object::Archive> &OB : Archives) {
+ object::Archive *A = OB.getBinary();
+ // Look for our symbols in each Archive
+ auto OptionalChildOrErr = A->findSym(Name);
+ if (!OptionalChildOrErr)
+ report_fatal_error(OptionalChildOrErr.takeError());
+ auto &OptionalChild = *OptionalChildOrErr;
+ if (OptionalChild) {
+ // FIXME: Support nested archives?
+ Expected<std::unique_ptr<object::Binary>> ChildBinOrErr =
+ OptionalChild->getAsBinary();
+ if (!ChildBinOrErr) {
+ // TODO: Actually report errors helpfully.
+ consumeError(ChildBinOrErr.takeError());
+ continue;
+ }
+ std::unique_ptr<object::Binary> &ChildBin = ChildBinOrErr.get();
+ if (ChildBin->isObject()) {
+ std::unique_ptr<object::ObjectFile> OF(
+ static_cast<object::ObjectFile *>(ChildBin.release()));
+ // This causes the object file to be loaded.
+ addObjectFile(std::move(OF));
+ // The address should be here now.
+ if (auto Sym = findExistingSymbol(Name))
+ return Sym;
+ }
+ }
+ }
+
+ // If it hasn't already been generated, see if it's in one of our modules.
+ Module *M = findModuleForSymbol(Name, CheckFunctionsOnly);
+ if (M) {
+ generateCodeForModule(M);
+
+ // Check the RuntimeDyld table again, it should be there now.
+ return findExistingSymbol(Name);
+ }
+
+ // If a LazyFunctionCreator is installed, use it to get/create the function.
+ // FIXME: Should we instead have a LazySymbolCreator callback?
+ if (LazyFunctionCreator) {
+ auto Addr = static_cast<uint64_t>(
+ reinterpret_cast<uintptr_t>(LazyFunctionCreator(Name)));
+ return JITSymbol(Addr, JITSymbolFlags::Exported);
+ }
+
+ return nullptr;
+}
+
+uint64_t MCJIT::getGlobalValueAddress(const std::string &Name) {
+ std::lock_guard<sys::Mutex> locked(lock);
+ uint64_t Result = getSymbolAddress(Name, false);
+ if (Result != 0)
+ finalizeLoadedModules();
+ return Result;
+}
+
+uint64_t MCJIT::getFunctionAddress(const std::string &Name) {
+ std::lock_guard<sys::Mutex> locked(lock);
+ uint64_t Result = getSymbolAddress(Name, true);
+ if (Result != 0)
+ finalizeLoadedModules();
+ return Result;
+}
+
+// Deprecated. Use getFunctionAddress instead.
+void *MCJIT::getPointerToFunction(Function *F) {
+ std::lock_guard<sys::Mutex> locked(lock);
+
+ Mangler Mang;
+ SmallString<128> Name;
+ TM->getNameWithPrefix(Name, F, Mang);
+
+ if (F->isDeclaration() || F->hasAvailableExternallyLinkage()) {
+ bool AbortOnFailure = !F->hasExternalWeakLinkage();
+ void *Addr = getPointerToNamedFunction(Name, AbortOnFailure);
+ updateGlobalMapping(F, Addr);
+ return Addr;
+ }
+
+ Module *M = F->getParent();
+ bool HasBeenAddedButNotLoaded = OwnedModules.hasModuleBeenAddedButNotLoaded(M);
+
+ // Make sure the relevant module has been compiled and loaded.
+ if (HasBeenAddedButNotLoaded)
+ generateCodeForModule(M);
+ else if (!OwnedModules.hasModuleBeenLoaded(M)) {
+ // If this function doesn't belong to one of our modules, we're done.
+ // FIXME: Asking for the pointer to a function that hasn't been registered,
+ // and isn't a declaration (which is handled above) should probably
+ // be an assertion.
+ return nullptr;
+ }
+
+ // FIXME: Should the Dyld be retaining module information? Probably not.
+ //
+ // This is the accessor for the target address, so make sure to check the
+ // load address of the symbol, not the local address.
+ return (void*)Dyld.getSymbol(Name).getAddress();
+}
+
+void MCJIT::runStaticConstructorsDestructorsInModulePtrSet(
+ bool isDtors, ModulePtrSet::iterator I, ModulePtrSet::iterator E) {
+ for (; I != E; ++I) {
+ ExecutionEngine::runStaticConstructorsDestructors(**I, isDtors);
+ }
+}
+
+void MCJIT::runStaticConstructorsDestructors(bool isDtors) {
+ // Execute global ctors/dtors for each module in the program.
+ runStaticConstructorsDestructorsInModulePtrSet(
+ isDtors, OwnedModules.begin_added(), OwnedModules.end_added());
+ runStaticConstructorsDestructorsInModulePtrSet(
+ isDtors, OwnedModules.begin_loaded(), OwnedModules.end_loaded());
+ runStaticConstructorsDestructorsInModulePtrSet(
+ isDtors, OwnedModules.begin_finalized(), OwnedModules.end_finalized());
+}
+
+Function *MCJIT::FindFunctionNamedInModulePtrSet(StringRef FnName,
+ ModulePtrSet::iterator I,
+ ModulePtrSet::iterator E) {
+ for (; I != E; ++I) {
+ Function *F = (*I)->getFunction(FnName);
+ if (F && !F->isDeclaration())
+ return F;
+ }
+ return nullptr;
+}
+
+GlobalVariable *MCJIT::FindGlobalVariableNamedInModulePtrSet(StringRef Name,
+ bool AllowInternal,
+ ModulePtrSet::iterator I,
+ ModulePtrSet::iterator E) {
+ for (; I != E; ++I) {
+ GlobalVariable *GV = (*I)->getGlobalVariable(Name, AllowInternal);
+ if (GV && !GV->isDeclaration())
+ return GV;
+ }
+ return nullptr;
+}
+
+
+Function *MCJIT::FindFunctionNamed(StringRef FnName) {
+ Function *F = FindFunctionNamedInModulePtrSet(
+ FnName, OwnedModules.begin_added(), OwnedModules.end_added());
+ if (!F)
+ F = FindFunctionNamedInModulePtrSet(FnName, OwnedModules.begin_loaded(),
+ OwnedModules.end_loaded());
+ if (!F)
+ F = FindFunctionNamedInModulePtrSet(FnName, OwnedModules.begin_finalized(),
+ OwnedModules.end_finalized());
+ return F;
+}
+
+GlobalVariable *MCJIT::FindGlobalVariableNamed(StringRef Name, bool AllowInternal) {
+ GlobalVariable *GV = FindGlobalVariableNamedInModulePtrSet(
+ Name, AllowInternal, OwnedModules.begin_added(), OwnedModules.end_added());
+ if (!GV)
+ GV = FindGlobalVariableNamedInModulePtrSet(Name, AllowInternal, OwnedModules.begin_loaded(),
+ OwnedModules.end_loaded());
+ if (!GV)
+ GV = FindGlobalVariableNamedInModulePtrSet(Name, AllowInternal, OwnedModules.begin_finalized(),
+ OwnedModules.end_finalized());
+ return GV;
+}
+
+GenericValue MCJIT::runFunction(Function *F, ArrayRef<GenericValue> ArgValues) {
+ assert(F && "Function *F was null at entry to run()");
+
+ void *FPtr = getPointerToFunction(F);
+ finalizeModule(F->getParent());
+ assert(FPtr && "Pointer to fn's code was null after getPointerToFunction");
+ FunctionType *FTy = F->getFunctionType();
+ Type *RetTy = FTy->getReturnType();
+
+ assert((FTy->getNumParams() == ArgValues.size() ||
+ (FTy->isVarArg() && FTy->getNumParams() <= ArgValues.size())) &&
+ "Wrong number of arguments passed into function!");
+ assert(FTy->getNumParams() == ArgValues.size() &&
+ "This doesn't support passing arguments through varargs (yet)!");
+
+ // Handle some common cases first. These cases correspond to common `main'
+ // prototypes.
+ if (RetTy->isIntegerTy(32) || RetTy->isVoidTy()) {
+ switch (ArgValues.size()) {
+ case 3:
+ if (FTy->getParamType(0)->isIntegerTy(32) &&
+ FTy->getParamType(1)->isPointerTy() &&
+ FTy->getParamType(2)->isPointerTy()) {
+ int (*PF)(int, char **, const char **) =
+ (int(*)(int, char **, const char **))(intptr_t)FPtr;
+
+ // Call the function.
+ GenericValue rv;
+ rv.IntVal = APInt(32, PF(ArgValues[0].IntVal.getZExtValue(),
+ (char **)GVTOP(ArgValues[1]),
+ (const char **)GVTOP(ArgValues[2])));
+ return rv;
+ }
+ break;
+ case 2:
+ if (FTy->getParamType(0)->isIntegerTy(32) &&
+ FTy->getParamType(1)->isPointerTy()) {
+ int (*PF)(int, char **) = (int(*)(int, char **))(intptr_t)FPtr;
+
+ // Call the function.
+ GenericValue rv;
+ rv.IntVal = APInt(32, PF(ArgValues[0].IntVal.getZExtValue(),
+ (char **)GVTOP(ArgValues[1])));
+ return rv;
+ }
+ break;
+ case 1:
+ if (FTy->getNumParams() == 1 &&
+ FTy->getParamType(0)->isIntegerTy(32)) {
+ GenericValue rv;
+ int (*PF)(int) = (int(*)(int))(intptr_t)FPtr;
+ rv.IntVal = APInt(32, PF(ArgValues[0].IntVal.getZExtValue()));
+ return rv;
+ }
+ break;
+ }
+ }
+
+ // Handle cases where no arguments are passed first.
+ if (ArgValues.empty()) {
+ GenericValue rv;
+ switch (RetTy->getTypeID()) {
+ default: llvm_unreachable("Unknown return type for function call!");
+ case Type::IntegerTyID: {
+ unsigned BitWidth = cast<IntegerType>(RetTy)->getBitWidth();
+ if (BitWidth == 1)
+ rv.IntVal = APInt(BitWidth, ((bool(*)())(intptr_t)FPtr)());
+ else if (BitWidth <= 8)
+ rv.IntVal = APInt(BitWidth, ((char(*)())(intptr_t)FPtr)());
+ else if (BitWidth <= 16)
+ rv.IntVal = APInt(BitWidth, ((short(*)())(intptr_t)FPtr)());
+ else if (BitWidth <= 32)
+ rv.IntVal = APInt(BitWidth, ((int(*)())(intptr_t)FPtr)());
+ else if (BitWidth <= 64)
+ rv.IntVal = APInt(BitWidth, ((int64_t(*)())(intptr_t)FPtr)());
+ else
+ llvm_unreachable("Integer types > 64 bits not supported");
+ return rv;
+ }
+ case Type::VoidTyID:
+ rv.IntVal = APInt(32, ((int(*)())(intptr_t)FPtr)());
+ return rv;
+ case Type::FloatTyID:
+ rv.FloatVal = ((float(*)())(intptr_t)FPtr)();
+ return rv;
+ case Type::DoubleTyID:
+ rv.DoubleVal = ((double(*)())(intptr_t)FPtr)();
+ return rv;
+ case Type::X86_FP80TyID:
+ case Type::FP128TyID:
+ case Type::PPC_FP128TyID:
+ llvm_unreachable("long double not supported yet");
+ case Type::PointerTyID:
+ return PTOGV(((void*(*)())(intptr_t)FPtr)());
+ }
+ }
+
+ report_fatal_error("MCJIT::runFunction does not support full-featured "
+ "argument passing. Please use "
+ "ExecutionEngine::getFunctionAddress and cast the result "
+ "to the desired function pointer type.");
+}
+
+void *MCJIT::getPointerToNamedFunction(StringRef Name, bool AbortOnFailure) {
+ if (!isSymbolSearchingDisabled()) {
+ if (auto Sym = Resolver.findSymbol(std::string(Name))) {
+ if (auto AddrOrErr = Sym.getAddress())
+ return reinterpret_cast<void*>(
+ static_cast<uintptr_t>(*AddrOrErr));
+ } else if (auto Err = Sym.takeError())
+ report_fatal_error(std::move(Err));
+ }
+
+ /// If a LazyFunctionCreator is installed, use it to get/create the function.
+ if (LazyFunctionCreator)
+ if (void *RP = LazyFunctionCreator(std::string(Name)))
+ return RP;
+
+ if (AbortOnFailure) {
+ report_fatal_error("Program used external function '"+Name+
+ "' which could not be resolved!");
+ }
+ return nullptr;
+}
+
+void MCJIT::RegisterJITEventListener(JITEventListener *L) {
+ if (!L)
+ return;
+ std::lock_guard<sys::Mutex> locked(lock);
+ EventListeners.push_back(L);
+}
+
+void MCJIT::UnregisterJITEventListener(JITEventListener *L) {
+ if (!L)
+ return;
+ std::lock_guard<sys::Mutex> locked(lock);
+ auto I = find(reverse(EventListeners), L);
+ if (I != EventListeners.rend()) {
+ std::swap(*I, EventListeners.back());
+ EventListeners.pop_back();
+ }
+}
+
+void MCJIT::notifyObjectLoaded(const object::ObjectFile &Obj,
+ const RuntimeDyld::LoadedObjectInfo &L) {
+ uint64_t Key =
+ static_cast<uint64_t>(reinterpret_cast<uintptr_t>(Obj.getData().data()));
+ std::lock_guard<sys::Mutex> locked(lock);
+ MemMgr->notifyObjectLoaded(this, Obj);
+ for (JITEventListener *EL : EventListeners)
+ EL->notifyObjectLoaded(Key, Obj, L);
+}
+
+void MCJIT::notifyFreeingObject(const object::ObjectFile &Obj) {
+ uint64_t Key =
+ static_cast<uint64_t>(reinterpret_cast<uintptr_t>(Obj.getData().data()));
+ std::lock_guard<sys::Mutex> locked(lock);
+ for (JITEventListener *L : EventListeners)
+ L->notifyFreeingObject(Key);
+}
+
+JITSymbol
+LinkingSymbolResolver::findSymbol(const std::string &Name) {
+ auto Result = ParentEngine.findSymbol(Name, false);
+ if (Result)
+ return Result;
+ if (ParentEngine.isSymbolSearchingDisabled())
+ return nullptr;
+ return ClientResolver->findSymbol(Name);
+}
+
+void LinkingSymbolResolver::anchor() {}
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/MCJIT/MCJIT.h b/contrib/llvm-project/llvm/lib/ExecutionEngine/MCJIT/MCJIT.h
new file mode 100644
index 000000000000..f6c4cdbb8c91
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/MCJIT/MCJIT.h
@@ -0,0 +1,335 @@
+//===-- MCJIT.h - Class definition for the MCJIT ----------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_EXECUTIONENGINE_MCJIT_MCJIT_H
+#define LLVM_LIB_EXECUTIONENGINE_MCJIT_MCJIT_H
+
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ExecutionEngine/ExecutionEngine.h"
+#include "llvm/ExecutionEngine/RTDyldMemoryManager.h"
+#include "llvm/ExecutionEngine/RuntimeDyld.h"
+
+namespace llvm {
+class MCJIT;
+class Module;
+class ObjectCache;
+
+// This is a helper class that the MCJIT execution engine uses for linking
+// functions across modules that it owns. It aggregates the memory manager
+// that is passed in to the MCJIT constructor and defers most functionality
+// to that object.
+class LinkingSymbolResolver : public LegacyJITSymbolResolver {
+public:
+ LinkingSymbolResolver(MCJIT &Parent,
+ std::shared_ptr<LegacyJITSymbolResolver> Resolver)
+ : ParentEngine(Parent), ClientResolver(std::move(Resolver)) {}
+
+ JITSymbol findSymbol(const std::string &Name) override;
+
+ // MCJIT doesn't support logical dylibs.
+ JITSymbol findSymbolInLogicalDylib(const std::string &Name) override {
+ return nullptr;
+ }
+
+private:
+ MCJIT &ParentEngine;
+ std::shared_ptr<LegacyJITSymbolResolver> ClientResolver;
+ void anchor() override;
+};
+
+// About Module states: added->loaded->finalized.
+//
+// The purpose of the "added" state is having modules in standby. (added=known
+// but not compiled). The idea is that you can add a module to provide function
+// definitions but if nothing in that module is referenced by a module in which
+// a function is executed (note the wording here because it's not exactly the
+// ideal case) then the module never gets compiled. This is sort of lazy
+// compilation.
+//
+// The purpose of the "loaded" state (loaded=compiled and required sections
+// copied into local memory but not yet ready for execution) is to have an
+// intermediate state wherein clients can remap the addresses of sections, using
+// MCJIT::mapSectionAddress, (in preparation for later copying to a new location
+// or an external process) before relocations and page permissions are applied.
+//
+// It might not be obvious at first glance, but the "remote-mcjit" case in the
+// lli tool does this. In that case, the intermediate action is taken by the
+// RemoteMemoryManager in response to the notifyObjectLoaded function being
+// called.
+
+class MCJIT : public ExecutionEngine {
+ MCJIT(std::unique_ptr<Module> M, std::unique_ptr<TargetMachine> tm,
+ std::shared_ptr<MCJITMemoryManager> MemMgr,
+ std::shared_ptr<LegacyJITSymbolResolver> Resolver);
+
+ typedef llvm::SmallPtrSet<Module *, 4> ModulePtrSet;
+
+ class OwningModuleContainer {
+ public:
+ OwningModuleContainer() = default;
+ ~OwningModuleContainer() {
+ freeModulePtrSet(AddedModules);
+ freeModulePtrSet(LoadedModules);
+ freeModulePtrSet(FinalizedModules);
+ }
+
+ ModulePtrSet::iterator begin_added() { return AddedModules.begin(); }
+ ModulePtrSet::iterator end_added() { return AddedModules.end(); }
+ iterator_range<ModulePtrSet::iterator> added() {
+ return make_range(begin_added(), end_added());
+ }
+
+ ModulePtrSet::iterator begin_loaded() { return LoadedModules.begin(); }
+ ModulePtrSet::iterator end_loaded() { return LoadedModules.end(); }
+
+ ModulePtrSet::iterator begin_finalized() { return FinalizedModules.begin(); }
+ ModulePtrSet::iterator end_finalized() { return FinalizedModules.end(); }
+
+ void addModule(std::unique_ptr<Module> M) {
+ AddedModules.insert(M.release());
+ }
+
+ bool removeModule(Module *M) {
+ return AddedModules.erase(M) || LoadedModules.erase(M) ||
+ FinalizedModules.erase(M);
+ }
+
+ bool hasModuleBeenAddedButNotLoaded(Module *M) {
+ return AddedModules.contains(M);
+ }
+
+ bool hasModuleBeenLoaded(Module *M) {
+ // If the module is in either the "loaded" or "finalized" sections it
+ // has been loaded.
+ return LoadedModules.contains(M) || FinalizedModules.contains(M);
+ }
+
+ bool hasModuleBeenFinalized(Module *M) {
+ return FinalizedModules.contains(M);
+ }
+
+ bool ownsModule(Module* M) {
+ return AddedModules.contains(M) || LoadedModules.contains(M) ||
+ FinalizedModules.contains(M);
+ }
+
+ void markModuleAsLoaded(Module *M) {
+ // This checks against logic errors in the MCJIT implementation.
+ // This function should never be called with either a Module that MCJIT
+ // does not own or a Module that has already been loaded and/or finalized.
+ assert(AddedModules.count(M) &&
+ "markModuleAsLoaded: Module not found in AddedModules");
+
+ // Remove the module from the "Added" set.
+ AddedModules.erase(M);
+
+ // Add the Module to the "Loaded" set.
+ LoadedModules.insert(M);
+ }
+
+ void markModuleAsFinalized(Module *M) {
+ // This checks against logic errors in the MCJIT implementation.
+ // This function should never be called with either a Module that MCJIT
+ // does not own, a Module that has not been loaded or a Module that has
+ // already been finalized.
+ assert(LoadedModules.count(M) &&
+ "markModuleAsFinalized: Module not found in LoadedModules");
+
+ // Remove the module from the "Loaded" section of the list.
+ LoadedModules.erase(M);
+
+ // Add the Module to the "Finalized" section of the list by inserting it
+ // before the 'end' iterator.
+ FinalizedModules.insert(M);
+ }
+
+ void markAllLoadedModulesAsFinalized() {
+ for (Module *M : LoadedModules)
+ FinalizedModules.insert(M);
+ LoadedModules.clear();
+ }
+
+ private:
+ ModulePtrSet AddedModules;
+ ModulePtrSet LoadedModules;
+ ModulePtrSet FinalizedModules;
+
+ void freeModulePtrSet(ModulePtrSet& MPS) {
+ // Go through the module set and delete everything.
+ for (Module *M : MPS)
+ delete M;
+ MPS.clear();
+ }
+ };
+
+ std::unique_ptr<TargetMachine> TM;
+ MCContext *Ctx;
+ std::shared_ptr<MCJITMemoryManager> MemMgr;
+ LinkingSymbolResolver Resolver;
+ RuntimeDyld Dyld;
+ std::vector<JITEventListener*> EventListeners;
+
+ OwningModuleContainer OwnedModules;
+
+ SmallVector<object::OwningBinary<object::Archive>, 2> Archives;
+ SmallVector<std::unique_ptr<MemoryBuffer>, 2> Buffers;
+
+ SmallVector<std::unique_ptr<object::ObjectFile>, 2> LoadedObjects;
+
+ // An optional ObjectCache to be notified of compiled objects and used to
+ // perform lookup of pre-compiled code to avoid re-compilation.
+ ObjectCache *ObjCache;
+
+ Function *FindFunctionNamedInModulePtrSet(StringRef FnName,
+ ModulePtrSet::iterator I,
+ ModulePtrSet::iterator E);
+
+ GlobalVariable *FindGlobalVariableNamedInModulePtrSet(StringRef Name,
+ bool AllowInternal,
+ ModulePtrSet::iterator I,
+ ModulePtrSet::iterator E);
+
+ void runStaticConstructorsDestructorsInModulePtrSet(bool isDtors,
+ ModulePtrSet::iterator I,
+ ModulePtrSet::iterator E);
+
+public:
+ ~MCJIT() override;
+
+ /// @name ExecutionEngine interface implementation
+ /// @{
+ void addModule(std::unique_ptr<Module> M) override;
+ void addObjectFile(std::unique_ptr<object::ObjectFile> O) override;
+ void addObjectFile(object::OwningBinary<object::ObjectFile> O) override;
+ void addArchive(object::OwningBinary<object::Archive> O) override;
+ bool removeModule(Module *M) override;
+
+ /// FindFunctionNamed - Search all of the active modules to find the function that
+ /// defines FnName. This is very slow operation and shouldn't be used for
+ /// general code.
+ Function *FindFunctionNamed(StringRef FnName) override;
+
+ /// FindGlobalVariableNamed - Search all of the active modules to find the
+ /// global variable that defines Name. This is very slow operation and
+ /// shouldn't be used for general code.
+ GlobalVariable *FindGlobalVariableNamed(StringRef Name,
+ bool AllowInternal = false) override;
+
+ /// Sets the object manager that MCJIT should use to avoid compilation.
+ void setObjectCache(ObjectCache *manager) override;
+
+ void setProcessAllSections(bool ProcessAllSections) override {
+ Dyld.setProcessAllSections(ProcessAllSections);
+ }
+
+ void generateCodeForModule(Module *M) override;
+
+ /// finalizeObject - ensure the module is fully processed and is usable.
+ ///
+ /// It is the user-level function for completing the process of making the
+ /// object usable for execution. It should be called after sections within an
+ /// object have been relocated using mapSectionAddress. When this method is
+ /// called the MCJIT execution engine will reapply relocations for a loaded
+ /// object.
+ /// Is it OK to finalize a set of modules, add modules and finalize again.
+ // FIXME: Do we really need both of these?
+ void finalizeObject() override;
+ virtual void finalizeModule(Module *);
+ void finalizeLoadedModules();
+
+ /// runStaticConstructorsDestructors - This method is used to execute all of
+ /// the static constructors or destructors for a program.
+ ///
+ /// \param isDtors - Run the destructors instead of constructors.
+ void runStaticConstructorsDestructors(bool isDtors) override;
+
+ void *getPointerToFunction(Function *F) override;
+
+ GenericValue runFunction(Function *F,
+ ArrayRef<GenericValue> ArgValues) override;
+
+ /// getPointerToNamedFunction - This method returns the address of the
+ /// specified function by using the dlsym function call. As such it is only
+ /// useful for resolving library symbols, not code generated symbols.
+ ///
+ /// If AbortOnFailure is false and no function with the given name is
+ /// found, this function silently returns a null pointer. Otherwise,
+ /// it prints a message to stderr and aborts.
+ ///
+ void *getPointerToNamedFunction(StringRef Name,
+ bool AbortOnFailure = true) override;
+
+ /// mapSectionAddress - map a section to its target address space value.
+ /// Map the address of a JIT section as returned from the memory manager
+ /// to the address in the target process as the running code will see it.
+ /// This is the address which will be used for relocation resolution.
+ void mapSectionAddress(const void *LocalAddress,
+ uint64_t TargetAddress) override {
+ Dyld.mapSectionAddress(LocalAddress, TargetAddress);
+ }
+ void RegisterJITEventListener(JITEventListener *L) override;
+ void UnregisterJITEventListener(JITEventListener *L) override;
+
+ // If successful, these function will implicitly finalize all loaded objects.
+ // To get a function address within MCJIT without causing a finalize, use
+ // getSymbolAddress.
+ uint64_t getGlobalValueAddress(const std::string &Name) override;
+ uint64_t getFunctionAddress(const std::string &Name) override;
+
+ TargetMachine *getTargetMachine() override { return TM.get(); }
+
+ /// @}
+ /// @name (Private) Registration Interfaces
+ /// @{
+
+ static void Register() {
+ MCJITCtor = createJIT;
+ }
+
+ static ExecutionEngine *
+ createJIT(std::unique_ptr<Module> M, std::string *ErrorStr,
+ std::shared_ptr<MCJITMemoryManager> MemMgr,
+ std::shared_ptr<LegacyJITSymbolResolver> Resolver,
+ std::unique_ptr<TargetMachine> TM);
+
+ // @}
+
+ // Takes a mangled name and returns the corresponding JITSymbol (if a
+ // definition of that mangled name has been added to the JIT).
+ JITSymbol findSymbol(const std::string &Name, bool CheckFunctionsOnly);
+
+ // DEPRECATED - Please use findSymbol instead.
+ //
+ // This is not directly exposed via the ExecutionEngine API, but it is
+ // used by the LinkingMemoryManager.
+ //
+ // getSymbolAddress takes an unmangled name and returns the corresponding
+ // JITSymbol if a definition of the name has been added to the JIT.
+ uint64_t getSymbolAddress(const std::string &Name,
+ bool CheckFunctionsOnly);
+
+protected:
+ /// emitObject -- Generate a JITed object in memory from the specified module
+ /// Currently, MCJIT only supports a single module and the module passed to
+ /// this function call is expected to be the contained module. The module
+ /// is passed as a parameter here to prepare for multiple module support in
+ /// the future.
+ std::unique_ptr<MemoryBuffer> emitObject(Module *M);
+
+ void notifyObjectLoaded(const object::ObjectFile &Obj,
+ const RuntimeDyld::LoadedObjectInfo &L);
+ void notifyFreeingObject(const object::ObjectFile &Obj);
+
+ JITSymbol findExistingSymbol(const std::string &Name);
+ Module *findModuleForSymbol(const std::string &Name, bool CheckFunctionsOnly);
+};
+
+} // end llvm namespace
+
+#endif // LLVM_LIB_EXECUTIONENGINE_MCJIT_MCJIT_H
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/OProfileJIT/OProfileJITEventListener.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/OProfileJIT/OProfileJITEventListener.cpp
new file mode 100644
index 000000000000..bb5d96051da9
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/OProfileJIT/OProfileJITEventListener.cpp
@@ -0,0 +1,188 @@
+//===-- OProfileJITEventListener.cpp - Tell OProfile about JITted code ----===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a JITEventListener object that uses OProfileWrapper to tell
+// oprofile about JITted functions, including source line information.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm-c/ExecutionEngine.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/Config/config.h"
+#include "llvm/DebugInfo/DWARF/DWARFContext.h"
+#include "llvm/ExecutionEngine/JITEventListener.h"
+#include "llvm/ExecutionEngine/OProfileWrapper.h"
+#include "llvm/ExecutionEngine/RuntimeDyld.h"
+#include "llvm/IR/DebugInfo.h"
+#include "llvm/IR/Function.h"
+#include "llvm/Object/ObjectFile.h"
+#include "llvm/Object/SymbolSize.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/Errno.h"
+#include "llvm/Support/raw_ostream.h"
+#include <dirent.h>
+#include <fcntl.h>
+
+using namespace llvm;
+using namespace llvm::object;
+
+#define DEBUG_TYPE "oprofile-jit-event-listener"
+
+namespace {
+
+class OProfileJITEventListener : public JITEventListener {
+ std::unique_ptr<OProfileWrapper> Wrapper;
+
+ void initialize();
+ std::map<ObjectKey, OwningBinary<ObjectFile>> DebugObjects;
+
+public:
+ OProfileJITEventListener(std::unique_ptr<OProfileWrapper> LibraryWrapper)
+ : Wrapper(std::move(LibraryWrapper)) {
+ initialize();
+ }
+
+ ~OProfileJITEventListener();
+
+ void notifyObjectLoaded(ObjectKey Key, const ObjectFile &Obj,
+ const RuntimeDyld::LoadedObjectInfo &L) override;
+
+ void notifyFreeingObject(ObjectKey Key) override;
+};
+
+void OProfileJITEventListener::initialize() {
+ if (!Wrapper->op_open_agent()) {
+ const std::string err_str = sys::StrError();
+ LLVM_DEBUG(dbgs() << "Failed to connect to OProfile agent: " << err_str
+ << "\n");
+ } else {
+ LLVM_DEBUG(dbgs() << "Connected to OProfile agent.\n");
+ }
+}
+
+OProfileJITEventListener::~OProfileJITEventListener() {
+ if (Wrapper->isAgentAvailable()) {
+ if (Wrapper->op_close_agent() == -1) {
+ const std::string err_str = sys::StrError();
+ LLVM_DEBUG(dbgs() << "Failed to disconnect from OProfile agent: "
+ << err_str << "\n");
+ } else {
+ LLVM_DEBUG(dbgs() << "Disconnected from OProfile agent.\n");
+ }
+ }
+}
+
+void OProfileJITEventListener::notifyObjectLoaded(
+ ObjectKey Key, const ObjectFile &Obj,
+ const RuntimeDyld::LoadedObjectInfo &L) {
+ if (!Wrapper->isAgentAvailable()) {
+ return;
+ }
+
+ OwningBinary<ObjectFile> DebugObjOwner = L.getObjectForDebug(Obj);
+ const ObjectFile &DebugObj = *DebugObjOwner.getBinary();
+ std::unique_ptr<DIContext> Context = DWARFContext::create(DebugObj);
+
+ // Use symbol info to iterate functions in the object.
+ for (const std::pair<SymbolRef, uint64_t> &P : computeSymbolSizes(DebugObj)) {
+ SymbolRef Sym = P.first;
+ if (!Sym.getType() || *Sym.getType() != SymbolRef::ST_Function)
+ continue;
+
+ Expected<StringRef> NameOrErr = Sym.getName();
+ if (!NameOrErr)
+ continue;
+ StringRef Name = *NameOrErr;
+ Expected<uint64_t> AddrOrErr = Sym.getAddress();
+ if (!AddrOrErr)
+ continue;
+ uint64_t Addr = *AddrOrErr;
+ uint64_t Size = P.second;
+
+ if (Wrapper->op_write_native_code(Name.data(), Addr, (void *)Addr, Size) ==
+ -1) {
+ LLVM_DEBUG(dbgs() << "Failed to tell OProfile about native function "
+ << Name << " at [" << (void *)Addr << "-"
+ << ((char *)Addr + Size) << "]\n");
+ continue;
+ }
+
+ DILineInfoTable Lines = Context->getLineInfoForAddressRange(Addr, Size);
+ size_t i = 0;
+ size_t num_entries = Lines.size();
+ struct debug_line_info *debug_line;
+ debug_line = (struct debug_line_info *)calloc(
+ num_entries, sizeof(struct debug_line_info));
+
+ for (auto& It : Lines) {
+ debug_line[i].vma = (unsigned long)It.first;
+ debug_line[i].lineno = It.second.Line;
+ debug_line[i].filename =
+ const_cast<char *>(Lines.front().second.FileName.c_str());
+ ++i;
+ }
+
+ if (Wrapper->op_write_debug_line_info((void *)Addr, num_entries,
+ debug_line) == -1) {
+ LLVM_DEBUG(dbgs() << "Failed to tell OProfiler about debug object at ["
+ << (void *)Addr << "-" << ((char *)Addr + Size)
+ << "]\n");
+ continue;
+ }
+ }
+
+ DebugObjects[Key] = std::move(DebugObjOwner);
+}
+
+void OProfileJITEventListener::notifyFreeingObject(ObjectKey Key) {
+ if (Wrapper->isAgentAvailable()) {
+
+ // If there was no agent registered when the original object was loaded then
+ // we won't have created a debug object for it, so bail out.
+ if (DebugObjects.find(Key) == DebugObjects.end())
+ return;
+
+ const ObjectFile &DebugObj = *DebugObjects[Key].getBinary();
+
+ // Use symbol info to iterate functions in the object.
+ for (symbol_iterator I = DebugObj.symbol_begin(),
+ E = DebugObj.symbol_end();
+ I != E; ++I) {
+ if (I->getType() && *I->getType() == SymbolRef::ST_Function) {
+ Expected<uint64_t> AddrOrErr = I->getAddress();
+ if (!AddrOrErr)
+ continue;
+ uint64_t Addr = *AddrOrErr;
+
+ if (Wrapper->op_unload_native_code(Addr) == -1) {
+ LLVM_DEBUG(
+ dbgs()
+ << "Failed to tell OProfile about unload of native function at "
+ << (void *)Addr << "\n");
+ continue;
+ }
+ }
+ }
+ }
+
+ DebugObjects.erase(Key);
+}
+
+} // anonymous namespace.
+
+namespace llvm {
+JITEventListener *JITEventListener::createOProfileJITEventListener() {
+ return new OProfileJITEventListener(std::make_unique<OProfileWrapper>());
+}
+
+} // namespace llvm
+
+LLVMJITEventListenerRef LLVMCreateOProfileJITEventListener(void)
+{
+ return wrap(JITEventListener::createOProfileJITEventListener());
+}
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/OProfileJIT/OProfileWrapper.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/OProfileJIT/OProfileWrapper.cpp
new file mode 100644
index 000000000000..b78d2531382d
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/OProfileJIT/OProfileWrapper.cpp
@@ -0,0 +1,267 @@
+//===-- OProfileWrapper.cpp - OProfile JIT API Wrapper implementation -----===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the interface in OProfileWrapper.h. It is responsible
+// for loading the opagent dynamic library when the first call to an op_
+// function occurs.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/OProfileWrapper.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/DynamicLibrary.h"
+#include "llvm/Support/Mutex.h"
+#include "llvm/Support/raw_ostream.h"
+#include <cstring>
+#include <dirent.h>
+#include <fcntl.h>
+#include <mutex>
+#include <stddef.h>
+#include <sys/stat.h>
+#include <unistd.h>
+
+#define DEBUG_TYPE "oprofile-wrapper"
+
+namespace {
+
+// Global mutex to ensure a single thread initializes oprofile agent.
+llvm::sys::Mutex OProfileInitializationMutex;
+
+} // anonymous namespace
+
+namespace llvm {
+
+OProfileWrapper::OProfileWrapper()
+: Agent(0),
+ OpenAgentFunc(0),
+ CloseAgentFunc(0),
+ WriteNativeCodeFunc(0),
+ WriteDebugLineInfoFunc(0),
+ UnloadNativeCodeFunc(0),
+ MajorVersionFunc(0),
+ MinorVersionFunc(0),
+ IsOProfileRunningFunc(0),
+ Initialized(false) {
+}
+
+bool OProfileWrapper::initialize() {
+ using namespace llvm;
+ using namespace llvm::sys;
+
+ std::lock_guard<sys::Mutex> Guard(OProfileInitializationMutex);
+
+ if (Initialized)
+ return OpenAgentFunc != 0;
+
+ Initialized = true;
+
+ // If the oprofile daemon is not running, don't load the opagent library
+ if (!isOProfileRunning()) {
+ LLVM_DEBUG(dbgs() << "OProfile daemon is not detected.\n");
+ return false;
+ }
+
+ std::string error;
+ if(!DynamicLibrary::LoadLibraryPermanently("libopagent.so", &error)) {
+ LLVM_DEBUG(
+ dbgs()
+ << "OProfile connector library libopagent.so could not be loaded: "
+ << error << "\n");
+ }
+
+ // Get the addresses of the opagent functions
+ OpenAgentFunc = (op_open_agent_ptr_t)(intptr_t)
+ DynamicLibrary::SearchForAddressOfSymbol("op_open_agent");
+ CloseAgentFunc = (op_close_agent_ptr_t)(intptr_t)
+ DynamicLibrary::SearchForAddressOfSymbol("op_close_agent");
+ WriteNativeCodeFunc = (op_write_native_code_ptr_t)(intptr_t)
+ DynamicLibrary::SearchForAddressOfSymbol("op_write_native_code");
+ WriteDebugLineInfoFunc = (op_write_debug_line_info_ptr_t)(intptr_t)
+ DynamicLibrary::SearchForAddressOfSymbol("op_write_debug_line_info");
+ UnloadNativeCodeFunc = (op_unload_native_code_ptr_t)(intptr_t)
+ DynamicLibrary::SearchForAddressOfSymbol("op_unload_native_code");
+ MajorVersionFunc = (op_major_version_ptr_t)(intptr_t)
+ DynamicLibrary::SearchForAddressOfSymbol("op_major_version");
+ MinorVersionFunc = (op_major_version_ptr_t)(intptr_t)
+ DynamicLibrary::SearchForAddressOfSymbol("op_minor_version");
+
+ // With missing functions, we can do nothing
+ if (!OpenAgentFunc
+ || !CloseAgentFunc
+ || !WriteNativeCodeFunc
+ || !WriteDebugLineInfoFunc
+ || !UnloadNativeCodeFunc) {
+ OpenAgentFunc = 0;
+ CloseAgentFunc = 0;
+ WriteNativeCodeFunc = 0;
+ WriteDebugLineInfoFunc = 0;
+ UnloadNativeCodeFunc = 0;
+ return false;
+ }
+
+ return true;
+}
+
+bool OProfileWrapper::isOProfileRunning() {
+ if (IsOProfileRunningFunc != 0)
+ return IsOProfileRunningFunc();
+ return checkForOProfileProcEntry();
+}
+
+bool OProfileWrapper::checkForOProfileProcEntry() {
+ DIR* ProcDir;
+
+ ProcDir = opendir("/proc");
+ if (!ProcDir)
+ return false;
+
+ // Walk the /proc tree looking for the oprofile daemon
+ struct dirent* Entry;
+ while (0 != (Entry = readdir(ProcDir))) {
+ if (Entry->d_type == DT_DIR) {
+ // Build a path from the current entry name
+ SmallString<256> CmdLineFName;
+ raw_svector_ostream(CmdLineFName) << "/proc/" << Entry->d_name
+ << "/cmdline";
+
+ // Open the cmdline file
+ int CmdLineFD = open(CmdLineFName.c_str(), S_IRUSR);
+ if (CmdLineFD != -1) {
+ char ExeName[PATH_MAX+1];
+ char* BaseName = 0;
+
+ // Read the cmdline file
+ ssize_t NumRead = read(CmdLineFD, ExeName, PATH_MAX+1);
+ close(CmdLineFD);
+ ssize_t Idx = 0;
+
+ if (ExeName[0] != '/') {
+ BaseName = ExeName;
+ }
+
+ // Find the terminator for the first string
+ while (Idx < NumRead-1 && ExeName[Idx] != 0) {
+ Idx++;
+ }
+
+ // Go back to the last non-null character
+ Idx--;
+
+ // Find the last path separator in the first string
+ while (Idx > 0) {
+ if (ExeName[Idx] == '/') {
+ BaseName = ExeName + Idx + 1;
+ break;
+ }
+ Idx--;
+ }
+
+ // Test this to see if it is the oprofile daemon
+ if (BaseName != 0 && (!strcmp("oprofiled", BaseName) ||
+ !strcmp("operf", BaseName))) {
+ // If it is, we're done
+ closedir(ProcDir);
+ return true;
+ }
+ }
+ }
+ }
+
+ // We've looked through all the files and didn't find the daemon
+ closedir(ProcDir);
+ return false;
+}
+
+bool OProfileWrapper::op_open_agent() {
+ if (!Initialized)
+ initialize();
+
+ if (OpenAgentFunc != 0) {
+ Agent = OpenAgentFunc();
+ return Agent != 0;
+ }
+
+ return false;
+}
+
+int OProfileWrapper::op_close_agent() {
+ if (!Initialized)
+ initialize();
+
+ int ret = -1;
+ if (Agent && CloseAgentFunc) {
+ ret = CloseAgentFunc(Agent);
+ if (ret == 0) {
+ Agent = 0;
+ }
+ }
+ return ret;
+}
+
+bool OProfileWrapper::isAgentAvailable() {
+ return Agent != 0;
+}
+
+int OProfileWrapper::op_write_native_code(const char* Name,
+ uint64_t Addr,
+ void const* Code,
+ const unsigned int Size) {
+ if (!Initialized)
+ initialize();
+
+ if (Agent && WriteNativeCodeFunc)
+ return WriteNativeCodeFunc(Agent, Name, Addr, Code, Size);
+
+ return -1;
+}
+
+int OProfileWrapper::op_write_debug_line_info(
+ void const* Code,
+ size_t NumEntries,
+ struct debug_line_info const* Info) {
+ if (!Initialized)
+ initialize();
+
+ if (Agent && WriteDebugLineInfoFunc)
+ return WriteDebugLineInfoFunc(Agent, Code, NumEntries, Info);
+
+ return -1;
+}
+
+int OProfileWrapper::op_major_version() {
+ if (!Initialized)
+ initialize();
+
+ if (Agent && MajorVersionFunc)
+ return MajorVersionFunc();
+
+ return -1;
+}
+
+int OProfileWrapper::op_minor_version() {
+ if (!Initialized)
+ initialize();
+
+ if (Agent && MinorVersionFunc)
+ return MinorVersionFunc();
+
+ return -1;
+}
+
+int OProfileWrapper::op_unload_native_code(uint64_t Addr) {
+ if (!Initialized)
+ initialize();
+
+ if (Agent && UnloadNativeCodeFunc)
+ return UnloadNativeCodeFunc(Agent, Addr);
+
+ return -1;
+}
+
+} // namespace llvm
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/COFFPlatform.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/COFFPlatform.cpp
new file mode 100644
index 000000000000..c8f5a99099ea
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/COFFPlatform.cpp
@@ -0,0 +1,912 @@
+//===------- COFFPlatform.cpp - Utilities for executing COFF in Orc -------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/COFFPlatform.h"
+#include "llvm/ExecutionEngine/Orc/DebugUtils.h"
+#include "llvm/ExecutionEngine/Orc/LookupAndRecordAddrs.h"
+#include "llvm/ExecutionEngine/Orc/ObjectFileInterface.h"
+#include "llvm/ExecutionEngine/Orc/Shared/ObjectFormats.h"
+
+#include "llvm/Object/COFF.h"
+
+#include "llvm/ExecutionEngine/Orc/EPCDynamicLibrarySearchGenerator.h"
+
+#include "llvm/ExecutionEngine/JITLink/x86_64.h"
+
+#define DEBUG_TYPE "orc"
+
+using namespace llvm;
+using namespace llvm::orc;
+using namespace llvm::orc::shared;
+
+namespace llvm {
+namespace orc {
+namespace shared {
+
+using SPSCOFFJITDylibDepInfo = SPSSequence<SPSExecutorAddr>;
+using SPSCOFFJITDylibDepInfoMap =
+ SPSSequence<SPSTuple<SPSExecutorAddr, SPSCOFFJITDylibDepInfo>>;
+using SPSCOFFObjectSectionsMap =
+ SPSSequence<SPSTuple<SPSString, SPSExecutorAddrRange>>;
+using SPSCOFFRegisterObjectSectionsArgs =
+ SPSArgList<SPSExecutorAddr, SPSCOFFObjectSectionsMap, bool>;
+using SPSCOFFDeregisterObjectSectionsArgs =
+ SPSArgList<SPSExecutorAddr, SPSCOFFObjectSectionsMap>;
+
+} // namespace shared
+} // namespace orc
+} // namespace llvm
+namespace {
+
+class COFFHeaderMaterializationUnit : public MaterializationUnit {
+public:
+ COFFHeaderMaterializationUnit(COFFPlatform &CP,
+ const SymbolStringPtr &HeaderStartSymbol)
+ : MaterializationUnit(createHeaderInterface(CP, HeaderStartSymbol)),
+ CP(CP) {}
+
+ StringRef getName() const override { return "COFFHeaderMU"; }
+
+ void materialize(std::unique_ptr<MaterializationResponsibility> R) override {
+ unsigned PointerSize;
+ llvm::endianness Endianness;
+ const auto &TT = CP.getExecutionSession().getTargetTriple();
+
+ switch (TT.getArch()) {
+ case Triple::x86_64:
+ PointerSize = 8;
+ Endianness = llvm::endianness::little;
+ break;
+ default:
+ llvm_unreachable("Unrecognized architecture");
+ }
+
+ auto G = std::make_unique<jitlink::LinkGraph>(
+ "<COFFHeaderMU>", TT, PointerSize, Endianness,
+ jitlink::getGenericEdgeKindName);
+ auto &HeaderSection = G->createSection("__header", MemProt::Read);
+ auto &HeaderBlock = createHeaderBlock(*G, HeaderSection);
+
+ // Init symbol is __ImageBase symbol.
+ auto &ImageBaseSymbol = G->addDefinedSymbol(
+ HeaderBlock, 0, *R->getInitializerSymbol(), HeaderBlock.getSize(),
+ jitlink::Linkage::Strong, jitlink::Scope::Default, false, true);
+
+ addImageBaseRelocationEdge(HeaderBlock, ImageBaseSymbol);
+
+ CP.getObjectLinkingLayer().emit(std::move(R), std::move(G));
+ }
+
+ void discard(const JITDylib &JD, const SymbolStringPtr &Sym) override {}
+
+private:
+ struct HeaderSymbol {
+ const char *Name;
+ uint64_t Offset;
+ };
+
+ struct NTHeader {
+ support::ulittle32_t PEMagic;
+ object::coff_file_header FileHeader;
+ struct PEHeader {
+ object::pe32plus_header Header;
+ object::data_directory DataDirectory[COFF::NUM_DATA_DIRECTORIES + 1];
+ } OptionalHeader;
+ };
+
+ struct HeaderBlockContent {
+ object::dos_header DOSHeader;
+ COFFHeaderMaterializationUnit::NTHeader NTHeader;
+ };
+
+ static jitlink::Block &createHeaderBlock(jitlink::LinkGraph &G,
+ jitlink::Section &HeaderSection) {
+ HeaderBlockContent Hdr = {};
+
+ // Set up magic
+ Hdr.DOSHeader.Magic[0] = 'M';
+ Hdr.DOSHeader.Magic[1] = 'Z';
+ Hdr.DOSHeader.AddressOfNewExeHeader =
+ offsetof(HeaderBlockContent, NTHeader);
+ uint32_t PEMagic = *reinterpret_cast<const uint32_t *>(COFF::PEMagic);
+ Hdr.NTHeader.PEMagic = PEMagic;
+ Hdr.NTHeader.OptionalHeader.Header.Magic = COFF::PE32Header::PE32_PLUS;
+
+ switch (G.getTargetTriple().getArch()) {
+ case Triple::x86_64:
+ Hdr.NTHeader.FileHeader.Machine = COFF::IMAGE_FILE_MACHINE_AMD64;
+ break;
+ default:
+ llvm_unreachable("Unrecognized architecture");
+ }
+
+ auto HeaderContent = G.allocateContent(
+ ArrayRef<char>(reinterpret_cast<const char *>(&Hdr), sizeof(Hdr)));
+
+ return G.createContentBlock(HeaderSection, HeaderContent, ExecutorAddr(), 8,
+ 0);
+ }
+
+ static void addImageBaseRelocationEdge(jitlink::Block &B,
+ jitlink::Symbol &ImageBase) {
+ auto ImageBaseOffset = offsetof(HeaderBlockContent, NTHeader) +
+ offsetof(NTHeader, OptionalHeader) +
+ offsetof(object::pe32plus_header, ImageBase);
+ B.addEdge(jitlink::x86_64::Pointer64, ImageBaseOffset, ImageBase, 0);
+ }
+
+ static MaterializationUnit::Interface
+ createHeaderInterface(COFFPlatform &MOP,
+ const SymbolStringPtr &HeaderStartSymbol) {
+ SymbolFlagsMap HeaderSymbolFlags;
+
+ HeaderSymbolFlags[HeaderStartSymbol] = JITSymbolFlags::Exported;
+
+ return MaterializationUnit::Interface(std::move(HeaderSymbolFlags),
+ HeaderStartSymbol);
+ }
+
+ COFFPlatform &CP;
+};
+
+} // end anonymous namespace
+
+namespace llvm {
+namespace orc {
+
+Expected<std::unique_ptr<COFFPlatform>> COFFPlatform::Create(
+ ExecutionSession &ES, ObjectLinkingLayer &ObjLinkingLayer,
+ JITDylib &PlatformJD, std::unique_ptr<MemoryBuffer> OrcRuntimeArchiveBuffer,
+ LoadDynamicLibrary LoadDynLibrary, bool StaticVCRuntime,
+ const char *VCRuntimePath, std::optional<SymbolAliasMap> RuntimeAliases) {
+
+ // If the target is not supported then bail out immediately.
+ if (!supportedTarget(ES.getTargetTriple()))
+ return make_error<StringError>("Unsupported COFFPlatform triple: " +
+ ES.getTargetTriple().str(),
+ inconvertibleErrorCode());
+
+ auto &EPC = ES.getExecutorProcessControl();
+
+ auto GeneratorArchive =
+ object::Archive::create(OrcRuntimeArchiveBuffer->getMemBufferRef());
+ if (!GeneratorArchive)
+ return GeneratorArchive.takeError();
+
+ auto OrcRuntimeArchiveGenerator = StaticLibraryDefinitionGenerator::Create(
+ ObjLinkingLayer, nullptr, std::move(*GeneratorArchive));
+ if (!OrcRuntimeArchiveGenerator)
+ return OrcRuntimeArchiveGenerator.takeError();
+
+ // We need a second instance of the archive (for now) for the Platform. We
+ // can `cantFail` this call, since if it were going to fail it would have
+ // failed above.
+ auto RuntimeArchive = cantFail(
+ object::Archive::create(OrcRuntimeArchiveBuffer->getMemBufferRef()));
+
+ // Create default aliases if the caller didn't supply any.
+ if (!RuntimeAliases)
+ RuntimeAliases = standardPlatformAliases(ES);
+
+ // Define the aliases.
+ if (auto Err = PlatformJD.define(symbolAliases(std::move(*RuntimeAliases))))
+ return std::move(Err);
+
+ auto &HostFuncJD = ES.createBareJITDylib("$<PlatformRuntimeHostFuncJD>");
+
+ // Add JIT-dispatch function support symbols.
+ if (auto Err = HostFuncJD.define(
+ absoluteSymbols({{ES.intern("__orc_rt_jit_dispatch"),
+ {EPC.getJITDispatchInfo().JITDispatchFunction,
+ JITSymbolFlags::Exported}},
+ {ES.intern("__orc_rt_jit_dispatch_ctx"),
+ {EPC.getJITDispatchInfo().JITDispatchContext,
+ JITSymbolFlags::Exported}}})))
+ return std::move(Err);
+
+ PlatformJD.addToLinkOrder(HostFuncJD);
+
+ // Create the instance.
+ Error Err = Error::success();
+ auto P = std::unique_ptr<COFFPlatform>(new COFFPlatform(
+ ES, ObjLinkingLayer, PlatformJD, std::move(*OrcRuntimeArchiveGenerator),
+ std::move(OrcRuntimeArchiveBuffer), std::move(RuntimeArchive),
+ std::move(LoadDynLibrary), StaticVCRuntime, VCRuntimePath, Err));
+ if (Err)
+ return std::move(Err);
+ return std::move(P);
+}
+
+Expected<std::unique_ptr<COFFPlatform>>
+COFFPlatform::Create(ExecutionSession &ES, ObjectLinkingLayer &ObjLinkingLayer,
+ JITDylib &PlatformJD, const char *OrcRuntimePath,
+ LoadDynamicLibrary LoadDynLibrary, bool StaticVCRuntime,
+ const char *VCRuntimePath,
+ std::optional<SymbolAliasMap> RuntimeAliases) {
+
+ auto ArchiveBuffer = MemoryBuffer::getFile(OrcRuntimePath);
+ if (!ArchiveBuffer)
+ return createFileError(OrcRuntimePath, ArchiveBuffer.getError());
+
+ return Create(ES, ObjLinkingLayer, PlatformJD, std::move(*ArchiveBuffer),
+ std::move(LoadDynLibrary), StaticVCRuntime, VCRuntimePath,
+ std::move(RuntimeAliases));
+}
+
+Expected<MemoryBufferRef> COFFPlatform::getPerJDObjectFile() {
+ auto PerJDObj = OrcRuntimeArchive->findSym("__orc_rt_coff_per_jd_marker");
+ if (!PerJDObj)
+ return PerJDObj.takeError();
+
+ if (!*PerJDObj)
+ return make_error<StringError>("Could not find per jd object file",
+ inconvertibleErrorCode());
+
+ auto Buffer = (*PerJDObj)->getAsBinary();
+ if (!Buffer)
+ return Buffer.takeError();
+
+ return (*Buffer)->getMemoryBufferRef();
+}
+
+static void addAliases(ExecutionSession &ES, SymbolAliasMap &Aliases,
+ ArrayRef<std::pair<const char *, const char *>> AL) {
+ for (auto &KV : AL) {
+ auto AliasName = ES.intern(KV.first);
+ assert(!Aliases.count(AliasName) && "Duplicate symbol name in alias map");
+ Aliases[std::move(AliasName)] = {ES.intern(KV.second),
+ JITSymbolFlags::Exported};
+ }
+}
+
+Error COFFPlatform::setupJITDylib(JITDylib &JD) {
+ if (auto Err = JD.define(std::make_unique<COFFHeaderMaterializationUnit>(
+ *this, COFFHeaderStartSymbol)))
+ return Err;
+
+ if (auto Err = ES.lookup({&JD}, COFFHeaderStartSymbol).takeError())
+ return Err;
+
+ // Define the CXX aliases.
+ SymbolAliasMap CXXAliases;
+ addAliases(ES, CXXAliases, requiredCXXAliases());
+ if (auto Err = JD.define(symbolAliases(std::move(CXXAliases))))
+ return Err;
+
+ auto PerJDObj = getPerJDObjectFile();
+ if (!PerJDObj)
+ return PerJDObj.takeError();
+
+ auto I = getObjectFileInterface(ES, *PerJDObj);
+ if (!I)
+ return I.takeError();
+
+ if (auto Err = ObjLinkingLayer.add(
+ JD, MemoryBuffer::getMemBuffer(*PerJDObj, false), std::move(*I)))
+ return Err;
+
+ if (!Bootstrapping) {
+ auto ImportedLibs = StaticVCRuntime
+ ? VCRuntimeBootstrap->loadStaticVCRuntime(JD)
+ : VCRuntimeBootstrap->loadDynamicVCRuntime(JD);
+ if (!ImportedLibs)
+ return ImportedLibs.takeError();
+ for (auto &Lib : *ImportedLibs)
+ if (auto Err = LoadDynLibrary(JD, Lib))
+ return Err;
+ if (StaticVCRuntime)
+ if (auto Err = VCRuntimeBootstrap->initializeStaticVCRuntime(JD))
+ return Err;
+ }
+
+ JD.addGenerator(DLLImportDefinitionGenerator::Create(ES, ObjLinkingLayer));
+ return Error::success();
+}
+
+Error COFFPlatform::teardownJITDylib(JITDylib &JD) {
+ std::lock_guard<std::mutex> Lock(PlatformMutex);
+ auto I = JITDylibToHeaderAddr.find(&JD);
+ if (I != JITDylibToHeaderAddr.end()) {
+ assert(HeaderAddrToJITDylib.count(I->second) &&
+ "HeaderAddrToJITDylib missing entry");
+ HeaderAddrToJITDylib.erase(I->second);
+ JITDylibToHeaderAddr.erase(I);
+ }
+ return Error::success();
+}
+
+Error COFFPlatform::notifyAdding(ResourceTracker &RT,
+ const MaterializationUnit &MU) {
+ auto &JD = RT.getJITDylib();
+ const auto &InitSym = MU.getInitializerSymbol();
+ if (!InitSym)
+ return Error::success();
+
+ RegisteredInitSymbols[&JD].add(InitSym,
+ SymbolLookupFlags::WeaklyReferencedSymbol);
+
+ LLVM_DEBUG({
+ dbgs() << "COFFPlatform: Registered init symbol " << *InitSym << " for MU "
+ << MU.getName() << "\n";
+ });
+ return Error::success();
+}
+
+Error COFFPlatform::notifyRemoving(ResourceTracker &RT) {
+ llvm_unreachable("Not supported yet");
+}
+
+SymbolAliasMap COFFPlatform::standardPlatformAliases(ExecutionSession &ES) {
+ SymbolAliasMap Aliases;
+ addAliases(ES, Aliases, standardRuntimeUtilityAliases());
+ return Aliases;
+}
+
+ArrayRef<std::pair<const char *, const char *>>
+COFFPlatform::requiredCXXAliases() {
+ static const std::pair<const char *, const char *> RequiredCXXAliases[] = {
+ {"_CxxThrowException", "__orc_rt_coff_cxx_throw_exception"},
+ {"_onexit", "__orc_rt_coff_onexit_per_jd"},
+ {"atexit", "__orc_rt_coff_atexit_per_jd"}};
+
+ return ArrayRef<std::pair<const char *, const char *>>(RequiredCXXAliases);
+}
+
+ArrayRef<std::pair<const char *, const char *>>
+COFFPlatform::standardRuntimeUtilityAliases() {
+ static const std::pair<const char *, const char *>
+ StandardRuntimeUtilityAliases[] = {
+ {"__orc_rt_run_program", "__orc_rt_coff_run_program"},
+ {"__orc_rt_jit_dlerror", "__orc_rt_coff_jit_dlerror"},
+ {"__orc_rt_jit_dlopen", "__orc_rt_coff_jit_dlopen"},
+ {"__orc_rt_jit_dlclose", "__orc_rt_coff_jit_dlclose"},
+ {"__orc_rt_jit_dlsym", "__orc_rt_coff_jit_dlsym"},
+ {"__orc_rt_log_error", "__orc_rt_log_error_to_stderr"}};
+
+ return ArrayRef<std::pair<const char *, const char *>>(
+ StandardRuntimeUtilityAliases);
+}
+
+bool COFFPlatform::supportedTarget(const Triple &TT) {
+ switch (TT.getArch()) {
+ case Triple::x86_64:
+ return true;
+ default:
+ return false;
+ }
+}
+
+COFFPlatform::COFFPlatform(
+ ExecutionSession &ES, ObjectLinkingLayer &ObjLinkingLayer,
+ JITDylib &PlatformJD,
+ std::unique_ptr<StaticLibraryDefinitionGenerator> OrcRuntimeGenerator,
+ std::unique_ptr<MemoryBuffer> OrcRuntimeArchiveBuffer,
+ std::unique_ptr<object::Archive> OrcRuntimeArchive,
+ LoadDynamicLibrary LoadDynLibrary, bool StaticVCRuntime,
+ const char *VCRuntimePath, Error &Err)
+ : ES(ES), ObjLinkingLayer(ObjLinkingLayer),
+ LoadDynLibrary(std::move(LoadDynLibrary)),
+ OrcRuntimeArchiveBuffer(std::move(OrcRuntimeArchiveBuffer)),
+ OrcRuntimeArchive(std::move(OrcRuntimeArchive)),
+ StaticVCRuntime(StaticVCRuntime),
+ COFFHeaderStartSymbol(ES.intern("__ImageBase")) {
+ ErrorAsOutParameter _(&Err);
+
+ Bootstrapping.store(true);
+ ObjLinkingLayer.addPlugin(std::make_unique<COFFPlatformPlugin>(*this));
+
+ // Load vc runtime
+ auto VCRT =
+ COFFVCRuntimeBootstrapper::Create(ES, ObjLinkingLayer, VCRuntimePath);
+ if (!VCRT) {
+ Err = VCRT.takeError();
+ return;
+ }
+ VCRuntimeBootstrap = std::move(*VCRT);
+
+ for (auto &Lib : OrcRuntimeGenerator->getImportedDynamicLibraries())
+ DylibsToPreload.insert(Lib);
+
+ auto ImportedLibs =
+ StaticVCRuntime ? VCRuntimeBootstrap->loadStaticVCRuntime(PlatformJD)
+ : VCRuntimeBootstrap->loadDynamicVCRuntime(PlatformJD);
+ if (!ImportedLibs) {
+ Err = ImportedLibs.takeError();
+ return;
+ }
+
+ for (auto &Lib : *ImportedLibs)
+ DylibsToPreload.insert(Lib);
+
+ PlatformJD.addGenerator(std::move(OrcRuntimeGenerator));
+
+ // PlatformJD hasn't been set up by the platform yet (since we're creating
+ // the platform now), so set it up.
+ if (auto E2 = setupJITDylib(PlatformJD)) {
+ Err = std::move(E2);
+ return;
+ }
+
+ for (auto& Lib : DylibsToPreload)
+ if (auto E2 = this->LoadDynLibrary(PlatformJD, Lib)) {
+ Err = std::move(E2);
+ return;
+ }
+
+ if (StaticVCRuntime)
+ if (auto E2 = VCRuntimeBootstrap->initializeStaticVCRuntime(PlatformJD)) {
+ Err = std::move(E2);
+ return;
+ }
+
+ // Associate wrapper function tags with JIT-side function implementations.
+ if (auto E2 = associateRuntimeSupportFunctions(PlatformJD)) {
+ Err = std::move(E2);
+ return;
+ }
+
+ // Lookup addresses of runtime functions callable by the platform,
+ // call the platform bootstrap function to initialize the platform-state
+ // object in the executor.
+ if (auto E2 = bootstrapCOFFRuntime(PlatformJD)) {
+ Err = std::move(E2);
+ return;
+ }
+
+ Bootstrapping.store(false);
+ JDBootstrapStates.clear();
+}
+
+Expected<COFFPlatform::JITDylibDepMap>
+COFFPlatform::buildJDDepMap(JITDylib &JD) {
+ return ES.runSessionLocked([&]() -> Expected<JITDylibDepMap> {
+ JITDylibDepMap JDDepMap;
+
+ SmallVector<JITDylib *, 16> Worklist({&JD});
+ while (!Worklist.empty()) {
+ auto CurJD = Worklist.back();
+ Worklist.pop_back();
+
+ auto &DM = JDDepMap[CurJD];
+ CurJD->withLinkOrderDo([&](const JITDylibSearchOrder &O) {
+ DM.reserve(O.size());
+ for (auto &KV : O) {
+ if (KV.first == CurJD)
+ continue;
+ {
+ // Bare jitdylibs not known to the platform
+ std::lock_guard<std::mutex> Lock(PlatformMutex);
+ if (!JITDylibToHeaderAddr.count(KV.first)) {
+ LLVM_DEBUG({
+ dbgs() << "JITDylib unregistered to COFFPlatform detected in "
+ "LinkOrder: "
+ << CurJD->getName() << "\n";
+ });
+ continue;
+ }
+ }
+ DM.push_back(KV.first);
+ // Push unvisited entry.
+ if (!JDDepMap.count(KV.first)) {
+ Worklist.push_back(KV.first);
+ JDDepMap[KV.first] = {};
+ }
+ }
+ });
+ }
+ return std::move(JDDepMap);
+ });
+}
+
+void COFFPlatform::pushInitializersLoop(PushInitializersSendResultFn SendResult,
+ JITDylibSP JD,
+ JITDylibDepMap &JDDepMap) {
+ SmallVector<JITDylib *, 16> Worklist({JD.get()});
+ DenseSet<JITDylib *> Visited({JD.get()});
+ DenseMap<JITDylib *, SymbolLookupSet> NewInitSymbols;
+ ES.runSessionLocked([&]() {
+ while (!Worklist.empty()) {
+ auto CurJD = Worklist.back();
+ Worklist.pop_back();
+
+ auto RISItr = RegisteredInitSymbols.find(CurJD);
+ if (RISItr != RegisteredInitSymbols.end()) {
+ NewInitSymbols[CurJD] = std::move(RISItr->second);
+ RegisteredInitSymbols.erase(RISItr);
+ }
+
+ for (auto *DepJD : JDDepMap[CurJD])
+ if (!Visited.count(DepJD)) {
+ Worklist.push_back(DepJD);
+ Visited.insert(DepJD);
+ }
+ }
+ });
+
+ // If there are no further init symbols to look up then send the link order
+ // (as a list of header addresses) to the caller.
+ if (NewInitSymbols.empty()) {
+ // Build the dep info map to return.
+ COFFJITDylibDepInfoMap DIM;
+ DIM.reserve(JDDepMap.size());
+ for (auto &KV : JDDepMap) {
+ std::lock_guard<std::mutex> Lock(PlatformMutex);
+ COFFJITDylibDepInfo DepInfo;
+ DepInfo.reserve(KV.second.size());
+ for (auto &Dep : KV.second) {
+ DepInfo.push_back(JITDylibToHeaderAddr[Dep]);
+ }
+ auto H = JITDylibToHeaderAddr[KV.first];
+ DIM.push_back(std::make_pair(H, std::move(DepInfo)));
+ }
+ SendResult(DIM);
+ return;
+ }
+
+ // Otherwise issue a lookup and re-run this phase when it completes.
+ lookupInitSymbolsAsync(
+ [this, SendResult = std::move(SendResult), &JD,
+ JDDepMap = std::move(JDDepMap)](Error Err) mutable {
+ if (Err)
+ SendResult(std::move(Err));
+ else
+ pushInitializersLoop(std::move(SendResult), JD, JDDepMap);
+ },
+ ES, std::move(NewInitSymbols));
+}
+
+void COFFPlatform::rt_pushInitializers(PushInitializersSendResultFn SendResult,
+ ExecutorAddr JDHeaderAddr) {
+ JITDylibSP JD;
+ {
+ std::lock_guard<std::mutex> Lock(PlatformMutex);
+ auto I = HeaderAddrToJITDylib.find(JDHeaderAddr);
+ if (I != HeaderAddrToJITDylib.end())
+ JD = I->second;
+ }
+
+ LLVM_DEBUG({
+ dbgs() << "COFFPlatform::rt_pushInitializers(" << JDHeaderAddr << ") ";
+ if (JD)
+ dbgs() << "pushing initializers for " << JD->getName() << "\n";
+ else
+ dbgs() << "No JITDylib for header address.\n";
+ });
+
+ if (!JD) {
+ SendResult(make_error<StringError>("No JITDylib with header addr " +
+ formatv("{0:x}", JDHeaderAddr),
+ inconvertibleErrorCode()));
+ return;
+ }
+
+ auto JDDepMap = buildJDDepMap(*JD);
+ if (!JDDepMap) {
+ SendResult(JDDepMap.takeError());
+ return;
+ }
+
+ pushInitializersLoop(std::move(SendResult), JD, *JDDepMap);
+}
+
+void COFFPlatform::rt_lookupSymbol(SendSymbolAddressFn SendResult,
+ ExecutorAddr Handle, StringRef SymbolName) {
+ LLVM_DEBUG(dbgs() << "COFFPlatform::rt_lookupSymbol(\"" << Handle << "\")\n");
+
+ JITDylib *JD = nullptr;
+
+ {
+ std::lock_guard<std::mutex> Lock(PlatformMutex);
+ auto I = HeaderAddrToJITDylib.find(Handle);
+ if (I != HeaderAddrToJITDylib.end())
+ JD = I->second;
+ }
+
+ if (!JD) {
+ LLVM_DEBUG(dbgs() << " No JITDylib for handle " << Handle << "\n");
+ SendResult(make_error<StringError>("No JITDylib associated with handle " +
+ formatv("{0:x}", Handle),
+ inconvertibleErrorCode()));
+ return;
+ }
+
+ // Use functor class to work around XL build compiler issue on AIX.
+ class RtLookupNotifyComplete {
+ public:
+ RtLookupNotifyComplete(SendSymbolAddressFn &&SendResult)
+ : SendResult(std::move(SendResult)) {}
+ void operator()(Expected<SymbolMap> Result) {
+ if (Result) {
+ assert(Result->size() == 1 && "Unexpected result map count");
+ SendResult(Result->begin()->second.getAddress());
+ } else {
+ SendResult(Result.takeError());
+ }
+ }
+
+ private:
+ SendSymbolAddressFn SendResult;
+ };
+
+ ES.lookup(
+ LookupKind::DLSym, {{JD, JITDylibLookupFlags::MatchExportedSymbolsOnly}},
+ SymbolLookupSet(ES.intern(SymbolName)), SymbolState::Ready,
+ RtLookupNotifyComplete(std::move(SendResult)), NoDependenciesToRegister);
+}
+
+Error COFFPlatform::associateRuntimeSupportFunctions(JITDylib &PlatformJD) {
+ ExecutionSession::JITDispatchHandlerAssociationMap WFs;
+
+ using LookupSymbolSPSSig =
+ SPSExpected<SPSExecutorAddr>(SPSExecutorAddr, SPSString);
+ WFs[ES.intern("__orc_rt_coff_symbol_lookup_tag")] =
+ ES.wrapAsyncWithSPS<LookupSymbolSPSSig>(this,
+ &COFFPlatform::rt_lookupSymbol);
+ using PushInitializersSPSSig =
+ SPSExpected<SPSCOFFJITDylibDepInfoMap>(SPSExecutorAddr);
+ WFs[ES.intern("__orc_rt_coff_push_initializers_tag")] =
+ ES.wrapAsyncWithSPS<PushInitializersSPSSig>(
+ this, &COFFPlatform::rt_pushInitializers);
+
+ return ES.registerJITDispatchHandlers(PlatformJD, std::move(WFs));
+}
+
+Error COFFPlatform::runBootstrapInitializers(JDBootstrapState &BState) {
+ llvm::sort(BState.Initializers);
+ if (auto Err =
+ runBootstrapSubsectionInitializers(BState, ".CRT$XIA", ".CRT$XIZ"))
+ return Err;
+
+ if (auto Err = runSymbolIfExists(*BState.JD, "__run_after_c_init"))
+ return Err;
+
+ if (auto Err =
+ runBootstrapSubsectionInitializers(BState, ".CRT$XCA", ".CRT$XCZ"))
+ return Err;
+ return Error::success();
+}
+
+Error COFFPlatform::runBootstrapSubsectionInitializers(JDBootstrapState &BState,
+ StringRef Start,
+ StringRef End) {
+ for (auto &Initializer : BState.Initializers)
+ if (Initializer.first >= Start && Initializer.first <= End &&
+ Initializer.second) {
+ auto Res =
+ ES.getExecutorProcessControl().runAsVoidFunction(Initializer.second);
+ if (!Res)
+ return Res.takeError();
+ }
+ return Error::success();
+}
+
+Error COFFPlatform::bootstrapCOFFRuntime(JITDylib &PlatformJD) {
+ // Lookup of runtime symbols causes the collection of initializers if
+ // it's static linking setting.
+ if (auto Err = lookupAndRecordAddrs(
+ ES, LookupKind::Static, makeJITDylibSearchOrder(&PlatformJD),
+ {
+ {ES.intern("__orc_rt_coff_platform_bootstrap"),
+ &orc_rt_coff_platform_bootstrap},
+ {ES.intern("__orc_rt_coff_platform_shutdown"),
+ &orc_rt_coff_platform_shutdown},
+ {ES.intern("__orc_rt_coff_register_jitdylib"),
+ &orc_rt_coff_register_jitdylib},
+ {ES.intern("__orc_rt_coff_deregister_jitdylib"),
+ &orc_rt_coff_deregister_jitdylib},
+ {ES.intern("__orc_rt_coff_register_object_sections"),
+ &orc_rt_coff_register_object_sections},
+ {ES.intern("__orc_rt_coff_deregister_object_sections"),
+ &orc_rt_coff_deregister_object_sections},
+ }))
+ return Err;
+
+ // Call bootstrap functions
+ if (auto Err = ES.callSPSWrapper<void()>(orc_rt_coff_platform_bootstrap))
+ return Err;
+
+ // Do the pending jitdylib registration actions that we couldn't do
+ // because orc runtime was not linked fully.
+ for (auto KV : JDBootstrapStates) {
+ auto &JDBState = KV.second;
+ if (auto Err = ES.callSPSWrapper<void(SPSString, SPSExecutorAddr)>(
+ orc_rt_coff_register_jitdylib, JDBState.JDName,
+ JDBState.HeaderAddr))
+ return Err;
+
+ for (auto &ObjSectionMap : JDBState.ObjectSectionsMaps)
+ if (auto Err = ES.callSPSWrapper<void(SPSExecutorAddr,
+ SPSCOFFObjectSectionsMap, bool)>(
+ orc_rt_coff_register_object_sections, JDBState.HeaderAddr,
+ ObjSectionMap, false))
+ return Err;
+ }
+
+ // Run static initializers collected in bootstrap stage.
+ for (auto KV : JDBootstrapStates) {
+ auto &JDBState = KV.second;
+ if (auto Err = runBootstrapInitializers(JDBState))
+ return Err;
+ }
+
+ return Error::success();
+}
+
+Error COFFPlatform::runSymbolIfExists(JITDylib &PlatformJD,
+ StringRef SymbolName) {
+ ExecutorAddr jit_function;
+ auto AfterCLookupErr = lookupAndRecordAddrs(
+ ES, LookupKind::Static, makeJITDylibSearchOrder(&PlatformJD),
+ {{ES.intern(SymbolName), &jit_function}});
+ if (!AfterCLookupErr) {
+ auto Res = ES.getExecutorProcessControl().runAsVoidFunction(jit_function);
+ if (!Res)
+ return Res.takeError();
+ return Error::success();
+ }
+ if (!AfterCLookupErr.isA<SymbolsNotFound>())
+ return AfterCLookupErr;
+ consumeError(std::move(AfterCLookupErr));
+ return Error::success();
+}
+
+void COFFPlatform::COFFPlatformPlugin::modifyPassConfig(
+ MaterializationResponsibility &MR, jitlink::LinkGraph &LG,
+ jitlink::PassConfiguration &Config) {
+
+ bool IsBootstrapping = CP.Bootstrapping.load();
+
+ if (auto InitSymbol = MR.getInitializerSymbol()) {
+ if (InitSymbol == CP.COFFHeaderStartSymbol) {
+ Config.PostAllocationPasses.push_back(
+ [this, &MR, IsBootstrapping](jitlink::LinkGraph &G) {
+ return associateJITDylibHeaderSymbol(G, MR, IsBootstrapping);
+ });
+ return;
+ }
+ Config.PrePrunePasses.push_back([this, &MR](jitlink::LinkGraph &G) {
+ return preserveInitializerSections(G, MR);
+ });
+ }
+
+ if (!IsBootstrapping)
+ Config.PostFixupPasses.push_back(
+ [this, &JD = MR.getTargetJITDylib()](jitlink::LinkGraph &G) {
+ return registerObjectPlatformSections(G, JD);
+ });
+ else
+ Config.PostFixupPasses.push_back(
+ [this, &JD = MR.getTargetJITDylib()](jitlink::LinkGraph &G) {
+ return registerObjectPlatformSectionsInBootstrap(G, JD);
+ });
+}
+
+ObjectLinkingLayer::Plugin::SyntheticSymbolDependenciesMap
+COFFPlatform::COFFPlatformPlugin::getSyntheticSymbolDependencies(
+ MaterializationResponsibility &MR) {
+ std::lock_guard<std::mutex> Lock(PluginMutex);
+ auto I = InitSymbolDeps.find(&MR);
+ if (I != InitSymbolDeps.end()) {
+ SyntheticSymbolDependenciesMap Result;
+ Result[MR.getInitializerSymbol()] = std::move(I->second);
+ InitSymbolDeps.erase(&MR);
+ return Result;
+ }
+ return SyntheticSymbolDependenciesMap();
+}
+
+Error COFFPlatform::COFFPlatformPlugin::associateJITDylibHeaderSymbol(
+ jitlink::LinkGraph &G, MaterializationResponsibility &MR,
+ bool IsBootstraping) {
+ auto I = llvm::find_if(G.defined_symbols(), [this](jitlink::Symbol *Sym) {
+ return Sym->getName() == *CP.COFFHeaderStartSymbol;
+ });
+ assert(I != G.defined_symbols().end() && "Missing COFF header start symbol");
+
+ auto &JD = MR.getTargetJITDylib();
+ std::lock_guard<std::mutex> Lock(CP.PlatformMutex);
+ auto HeaderAddr = (*I)->getAddress();
+ CP.JITDylibToHeaderAddr[&JD] = HeaderAddr;
+ CP.HeaderAddrToJITDylib[HeaderAddr] = &JD;
+ if (!IsBootstraping) {
+ G.allocActions().push_back(
+ {cantFail(WrapperFunctionCall::Create<
+ SPSArgList<SPSString, SPSExecutorAddr>>(
+ CP.orc_rt_coff_register_jitdylib, JD.getName(), HeaderAddr)),
+ cantFail(WrapperFunctionCall::Create<SPSArgList<SPSExecutorAddr>>(
+ CP.orc_rt_coff_deregister_jitdylib, HeaderAddr))});
+ } else {
+ G.allocActions().push_back(
+ {{},
+ cantFail(WrapperFunctionCall::Create<SPSArgList<SPSExecutorAddr>>(
+ CP.orc_rt_coff_deregister_jitdylib, HeaderAddr))});
+ JDBootstrapState BState;
+ BState.JD = &JD;
+ BState.JDName = JD.getName();
+ BState.HeaderAddr = HeaderAddr;
+ CP.JDBootstrapStates.emplace(&JD, BState);
+ }
+
+ return Error::success();
+}
+
+Error COFFPlatform::COFFPlatformPlugin::registerObjectPlatformSections(
+ jitlink::LinkGraph &G, JITDylib &JD) {
+ COFFObjectSectionsMap ObjSecs;
+ auto HeaderAddr = CP.JITDylibToHeaderAddr[&JD];
+ assert(HeaderAddr && "Must be registered jitdylib");
+ for (auto &S : G.sections()) {
+ jitlink::SectionRange Range(S);
+ if (Range.getSize())
+ ObjSecs.push_back(std::make_pair(S.getName().str(), Range.getRange()));
+ }
+
+ G.allocActions().push_back(
+ {cantFail(WrapperFunctionCall::Create<SPSCOFFRegisterObjectSectionsArgs>(
+ CP.orc_rt_coff_register_object_sections, HeaderAddr, ObjSecs, true)),
+ cantFail(
+ WrapperFunctionCall::Create<SPSCOFFDeregisterObjectSectionsArgs>(
+ CP.orc_rt_coff_deregister_object_sections, HeaderAddr,
+ ObjSecs))});
+
+ return Error::success();
+}
+
+Error COFFPlatform::COFFPlatformPlugin::preserveInitializerSections(
+ jitlink::LinkGraph &G, MaterializationResponsibility &MR) {
+ JITLinkSymbolSet InitSectionSymbols;
+ for (auto &Sec : G.sections())
+ if (isCOFFInitializerSection(Sec.getName()))
+ for (auto *B : Sec.blocks())
+ if (!B->edges_empty())
+ InitSectionSymbols.insert(
+ &G.addAnonymousSymbol(*B, 0, 0, false, true));
+
+ std::lock_guard<std::mutex> Lock(PluginMutex);
+ InitSymbolDeps[&MR] = InitSectionSymbols;
+ return Error::success();
+}
+
+Error COFFPlatform::COFFPlatformPlugin::
+ registerObjectPlatformSectionsInBootstrap(jitlink::LinkGraph &G,
+ JITDylib &JD) {
+ std::lock_guard<std::mutex> Lock(CP.PlatformMutex);
+ auto HeaderAddr = CP.JITDylibToHeaderAddr[&JD];
+ COFFObjectSectionsMap ObjSecs;
+ for (auto &S : G.sections()) {
+ jitlink::SectionRange Range(S);
+ if (Range.getSize())
+ ObjSecs.push_back(std::make_pair(S.getName().str(), Range.getRange()));
+ }
+
+ G.allocActions().push_back(
+ {{},
+ cantFail(
+ WrapperFunctionCall::Create<SPSCOFFDeregisterObjectSectionsArgs>(
+ CP.orc_rt_coff_deregister_object_sections, HeaderAddr,
+ ObjSecs))});
+
+ auto &BState = CP.JDBootstrapStates[&JD];
+ BState.ObjectSectionsMaps.push_back(std::move(ObjSecs));
+
+ // Collect static initializers
+ for (auto &S : G.sections())
+ if (isCOFFInitializerSection(S.getName()))
+ for (auto *B : S.blocks()) {
+ if (B->edges_empty())
+ continue;
+ for (auto &E : B->edges())
+ BState.Initializers.push_back(std::make_pair(
+ S.getName().str(), E.getTarget().getAddress() + E.getAddend()));
+ }
+
+ return Error::success();
+}
+
+} // End namespace orc.
+} // End namespace llvm.
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/COFFVCRuntimeSupport.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/COFFVCRuntimeSupport.cpp
new file mode 100644
index 000000000000..94f696fa2086
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/COFFVCRuntimeSupport.cpp
@@ -0,0 +1,184 @@
+//===------- COFFVCRuntimeSupport.cpp - VC runtime support in ORC ---------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/COFFVCRuntimeSupport.h"
+
+#include "llvm/ExecutionEngine/Orc/ExecutionUtils.h"
+#include "llvm/ExecutionEngine/Orc/LookupAndRecordAddrs.h"
+#include "llvm/Support/VirtualFileSystem.h"
+#include "llvm/WindowsDriver/MSVCPaths.h"
+
+#define DEBUG_TYPE "orc"
+
+using namespace llvm;
+using namespace llvm::orc;
+using namespace llvm::orc::shared;
+
+Expected<std::unique_ptr<COFFVCRuntimeBootstrapper>>
+COFFVCRuntimeBootstrapper::Create(ExecutionSession &ES,
+ ObjectLinkingLayer &ObjLinkingLayer,
+ const char *RuntimePath) {
+ return std::unique_ptr<COFFVCRuntimeBootstrapper>(
+ new COFFVCRuntimeBootstrapper(ES, ObjLinkingLayer, RuntimePath));
+}
+
+COFFVCRuntimeBootstrapper::COFFVCRuntimeBootstrapper(
+ ExecutionSession &ES, ObjectLinkingLayer &ObjLinkingLayer,
+ const char *RuntimePath)
+ : ES(ES), ObjLinkingLayer(ObjLinkingLayer) {
+ if (RuntimePath)
+ this->RuntimePath = RuntimePath;
+}
+
+Expected<std::vector<std::string>>
+COFFVCRuntimeBootstrapper::loadStaticVCRuntime(JITDylib &JD,
+ bool DebugVersion) {
+ StringRef VCLibs[] = {"libvcruntime.lib", "libcmt.lib", "libcpmt.lib"};
+ StringRef UCRTLibs[] = {"libucrt.lib"};
+ std::vector<std::string> ImportedLibraries;
+ if (auto Err = loadVCRuntime(JD, ImportedLibraries, ArrayRef(VCLibs),
+ ArrayRef(UCRTLibs)))
+ return std::move(Err);
+ return ImportedLibraries;
+}
+
+Expected<std::vector<std::string>>
+COFFVCRuntimeBootstrapper::loadDynamicVCRuntime(JITDylib &JD,
+ bool DebugVersion) {
+ StringRef VCLibs[] = {"vcruntime.lib", "msvcrt.lib", "msvcprt.lib"};
+ StringRef UCRTLibs[] = {"ucrt.lib"};
+ std::vector<std::string> ImportedLibraries;
+ if (auto Err = loadVCRuntime(JD, ImportedLibraries, ArrayRef(VCLibs),
+ ArrayRef(UCRTLibs)))
+ return std::move(Err);
+ return ImportedLibraries;
+}
+
+Error COFFVCRuntimeBootstrapper::loadVCRuntime(
+ JITDylib &JD, std::vector<std::string> &ImportedLibraries,
+ ArrayRef<StringRef> VCLibs, ArrayRef<StringRef> UCRTLibs) {
+ MSVCToolchainPath Path;
+ if (!RuntimePath.empty()) {
+ Path.UCRTSdkLib = RuntimePath;
+ Path.VCToolchainLib = RuntimePath;
+ } else {
+ auto ToolchainPath = getMSVCToolchainPath();
+ if (!ToolchainPath)
+ return ToolchainPath.takeError();
+ Path = *ToolchainPath;
+ }
+ LLVM_DEBUG({
+ dbgs() << "Using VC toolchain pathes\n";
+ dbgs() << " VC toolchain path: " << Path.VCToolchainLib << "\n";
+ dbgs() << " UCRT path: " << Path.UCRTSdkLib << "\n";
+ });
+
+ auto LoadLibrary = [&](SmallString<256> LibPath, StringRef LibName) -> Error {
+ sys::path::append(LibPath, LibName);
+
+ auto G = StaticLibraryDefinitionGenerator::Load(ObjLinkingLayer,
+ LibPath.c_str());
+ if (!G)
+ return G.takeError();
+
+ for (auto &Lib : (*G)->getImportedDynamicLibraries())
+ ImportedLibraries.push_back(Lib);
+
+ JD.addGenerator(std::move(*G));
+
+ return Error::success();
+ };
+ for (auto &Lib : UCRTLibs)
+ if (auto Err = LoadLibrary(Path.UCRTSdkLib, Lib))
+ return Err;
+
+ for (auto &Lib : VCLibs)
+ if (auto Err = LoadLibrary(Path.VCToolchainLib, Lib))
+ return Err;
+ ImportedLibraries.push_back("ntdll.dll");
+ ImportedLibraries.push_back("Kernel32.dll");
+
+ return Error::success();
+}
+
+Error COFFVCRuntimeBootstrapper::initializeStaticVCRuntime(JITDylib &JD) {
+ ExecutorAddr jit_scrt_initialize, jit_scrt_dllmain_before_initialize_c,
+ jit_scrt_initialize_type_info,
+ jit_scrt_initialize_default_local_stdio_options;
+ if (auto Err = lookupAndRecordAddrs(
+ ES, LookupKind::Static, makeJITDylibSearchOrder(&JD),
+ {{ES.intern("__scrt_initialize_crt"), &jit_scrt_initialize},
+ {ES.intern("__scrt_dllmain_before_initialize_c"),
+ &jit_scrt_dllmain_before_initialize_c},
+ {ES.intern("?__scrt_initialize_type_info@@YAXXZ"),
+ &jit_scrt_initialize_type_info},
+ {ES.intern("__scrt_initialize_default_local_stdio_options"),
+ &jit_scrt_initialize_default_local_stdio_options}}))
+ return Err;
+
+ auto RunVoidInitFunc = [&](ExecutorAddr Addr) -> Error {
+ if (auto Res = ES.getExecutorProcessControl().runAsVoidFunction(Addr))
+ return Error::success();
+ else
+ return Res.takeError();
+ };
+
+ auto R =
+ ES.getExecutorProcessControl().runAsIntFunction(jit_scrt_initialize, 0);
+ if (!R)
+ return R.takeError();
+
+ if (auto Err = RunVoidInitFunc(jit_scrt_dllmain_before_initialize_c))
+ return Err;
+
+ if (auto Err = RunVoidInitFunc(jit_scrt_initialize_type_info))
+ return Err;
+
+ if (auto Err =
+ RunVoidInitFunc(jit_scrt_initialize_default_local_stdio_options))
+ return Err;
+
+ SymbolAliasMap Alias;
+ Alias[ES.intern("__run_after_c_init")] = {
+ ES.intern("__scrt_dllmain_after_initialize_c"), JITSymbolFlags::Exported};
+ if (auto Err = JD.define(symbolAliases(Alias)))
+ return Err;
+
+ return Error::success();
+}
+
+Expected<COFFVCRuntimeBootstrapper::MSVCToolchainPath>
+COFFVCRuntimeBootstrapper::getMSVCToolchainPath() {
+ std::string VCToolChainPath;
+ ToolsetLayout VSLayout;
+ IntrusiveRefCntPtr<vfs::FileSystem> VFS = vfs::getRealFileSystem();
+ if (!findVCToolChainViaCommandLine(*VFS, std::nullopt, std::nullopt,
+ std::nullopt, VCToolChainPath, VSLayout) &&
+ !findVCToolChainViaEnvironment(*VFS, VCToolChainPath, VSLayout) &&
+ !findVCToolChainViaSetupConfig(*VFS, {}, VCToolChainPath, VSLayout) &&
+ !findVCToolChainViaRegistry(VCToolChainPath, VSLayout))
+ return make_error<StringError>("Couldn't find msvc toolchain.",
+ inconvertibleErrorCode());
+
+ std::string UniversalCRTSdkPath;
+ std::string UCRTVersion;
+ if (!getUniversalCRTSdkDir(*VFS, std::nullopt, std::nullopt, std::nullopt,
+ UniversalCRTSdkPath, UCRTVersion))
+ return make_error<StringError>("Couldn't find universal sdk.",
+ inconvertibleErrorCode());
+
+ MSVCToolchainPath ToolchainPath;
+ SmallString<256> VCToolchainLib(VCToolChainPath);
+ sys::path::append(VCToolchainLib, "lib", "x64");
+ ToolchainPath.VCToolchainLib = VCToolchainLib;
+
+ SmallString<256> UCRTSdkLib(UniversalCRTSdkPath);
+ sys::path::append(UCRTSdkLib, "Lib", UCRTVersion, "ucrt", "x64");
+ ToolchainPath.UCRTSdkLib = UCRTSdkLib;
+ return ToolchainPath;
+}
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/CompileOnDemandLayer.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/CompileOnDemandLayer.cpp
new file mode 100644
index 000000000000..6448adaa0ceb
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/CompileOnDemandLayer.cpp
@@ -0,0 +1,382 @@
+//===----- CompileOnDemandLayer.cpp - Lazily emit IR on first call --------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/CompileOnDemandLayer.h"
+#include "llvm/ADT/Hashing.h"
+#include "llvm/ExecutionEngine/Orc/ExecutionUtils.h"
+#include "llvm/IR/Mangler.h"
+#include "llvm/IR/Module.h"
+#include "llvm/Support/FormatVariadic.h"
+#include <string>
+
+using namespace llvm;
+using namespace llvm::orc;
+
+static ThreadSafeModule extractSubModule(ThreadSafeModule &TSM,
+ StringRef Suffix,
+ GVPredicate ShouldExtract) {
+
+ auto DeleteExtractedDefs = [](GlobalValue &GV) {
+ // Bump the linkage: this global will be provided by the external module.
+ GV.setLinkage(GlobalValue::ExternalLinkage);
+
+ // Delete the definition in the source module.
+ if (isa<Function>(GV)) {
+ auto &F = cast<Function>(GV);
+ F.deleteBody();
+ F.setPersonalityFn(nullptr);
+ } else if (isa<GlobalVariable>(GV)) {
+ cast<GlobalVariable>(GV).setInitializer(nullptr);
+ } else if (isa<GlobalAlias>(GV)) {
+ // We need to turn deleted aliases into function or variable decls based
+ // on the type of their aliasee.
+ auto &A = cast<GlobalAlias>(GV);
+ Constant *Aliasee = A.getAliasee();
+ assert(A.hasName() && "Anonymous alias?");
+ assert(Aliasee->hasName() && "Anonymous aliasee");
+ std::string AliasName = std::string(A.getName());
+
+ if (isa<Function>(Aliasee)) {
+ auto *F = cloneFunctionDecl(*A.getParent(), *cast<Function>(Aliasee));
+ A.replaceAllUsesWith(F);
+ A.eraseFromParent();
+ F->setName(AliasName);
+ } else if (isa<GlobalVariable>(Aliasee)) {
+ auto *G = cloneGlobalVariableDecl(*A.getParent(),
+ *cast<GlobalVariable>(Aliasee));
+ A.replaceAllUsesWith(G);
+ A.eraseFromParent();
+ G->setName(AliasName);
+ } else
+ llvm_unreachable("Alias to unsupported type");
+ } else
+ llvm_unreachable("Unsupported global type");
+ };
+
+ auto NewTSM = cloneToNewContext(TSM, ShouldExtract, DeleteExtractedDefs);
+ NewTSM.withModuleDo([&](Module &M) {
+ M.setModuleIdentifier((M.getModuleIdentifier() + Suffix).str());
+ });
+
+ return NewTSM;
+}
+
+namespace llvm {
+namespace orc {
+
+class PartitioningIRMaterializationUnit : public IRMaterializationUnit {
+public:
+ PartitioningIRMaterializationUnit(ExecutionSession &ES,
+ const IRSymbolMapper::ManglingOptions &MO,
+ ThreadSafeModule TSM,
+ CompileOnDemandLayer &Parent)
+ : IRMaterializationUnit(ES, MO, std::move(TSM)), Parent(Parent) {}
+
+ PartitioningIRMaterializationUnit(
+ ThreadSafeModule TSM, Interface I,
+ SymbolNameToDefinitionMap SymbolToDefinition,
+ CompileOnDemandLayer &Parent)
+ : IRMaterializationUnit(std::move(TSM), std::move(I),
+ std::move(SymbolToDefinition)),
+ Parent(Parent) {}
+
+private:
+ void materialize(std::unique_ptr<MaterializationResponsibility> R) override {
+ Parent.emitPartition(std::move(R), std::move(TSM),
+ std::move(SymbolToDefinition));
+ }
+
+ void discard(const JITDylib &V, const SymbolStringPtr &Name) override {
+ // All original symbols were materialized by the CODLayer and should be
+ // final. The function bodies provided by M should never be overridden.
+ llvm_unreachable("Discard should never be called on an "
+ "ExtractingIRMaterializationUnit");
+ }
+
+ mutable std::mutex SourceModuleMutex;
+ CompileOnDemandLayer &Parent;
+};
+
+std::optional<CompileOnDemandLayer::GlobalValueSet>
+CompileOnDemandLayer::compileRequested(GlobalValueSet Requested) {
+ return std::move(Requested);
+}
+
+std::optional<CompileOnDemandLayer::GlobalValueSet>
+CompileOnDemandLayer::compileWholeModule(GlobalValueSet Requested) {
+ return std::nullopt;
+}
+
+CompileOnDemandLayer::CompileOnDemandLayer(
+ ExecutionSession &ES, IRLayer &BaseLayer, LazyCallThroughManager &LCTMgr,
+ IndirectStubsManagerBuilder BuildIndirectStubsManager)
+ : IRLayer(ES, BaseLayer.getManglingOptions()), BaseLayer(BaseLayer),
+ LCTMgr(LCTMgr),
+ BuildIndirectStubsManager(std::move(BuildIndirectStubsManager)) {}
+
+void CompileOnDemandLayer::setPartitionFunction(PartitionFunction Partition) {
+ this->Partition = std::move(Partition);
+}
+
+void CompileOnDemandLayer::setImplMap(ImplSymbolMap *Imp) {
+ this->AliaseeImpls = Imp;
+}
+void CompileOnDemandLayer::emit(
+ std::unique_ptr<MaterializationResponsibility> R, ThreadSafeModule TSM) {
+ assert(TSM && "Null module");
+
+ auto &ES = getExecutionSession();
+
+ // Sort the callables and non-callables, build re-exports and lodge the
+ // actual module with the implementation dylib.
+ auto &PDR = getPerDylibResources(R->getTargetJITDylib());
+
+ SymbolAliasMap NonCallables;
+ SymbolAliasMap Callables;
+ TSM.withModuleDo([&](Module &M) {
+ // First, do some cleanup on the module:
+ cleanUpModule(M);
+ });
+
+ for (auto &KV : R->getSymbols()) {
+ auto &Name = KV.first;
+ auto &Flags = KV.second;
+ if (Flags.isCallable())
+ Callables[Name] = SymbolAliasMapEntry(Name, Flags);
+ else
+ NonCallables[Name] = SymbolAliasMapEntry(Name, Flags);
+ }
+
+ // Create a partitioning materialization unit and lodge it with the
+ // implementation dylib.
+ if (auto Err = PDR.getImplDylib().define(
+ std::make_unique<PartitioningIRMaterializationUnit>(
+ ES, *getManglingOptions(), std::move(TSM), *this))) {
+ ES.reportError(std::move(Err));
+ R->failMaterialization();
+ return;
+ }
+
+ if (!NonCallables.empty())
+ if (auto Err =
+ R->replace(reexports(PDR.getImplDylib(), std::move(NonCallables),
+ JITDylibLookupFlags::MatchAllSymbols))) {
+ getExecutionSession().reportError(std::move(Err));
+ R->failMaterialization();
+ return;
+ }
+ if (!Callables.empty()) {
+ if (auto Err = R->replace(
+ lazyReexports(LCTMgr, PDR.getISManager(), PDR.getImplDylib(),
+ std::move(Callables), AliaseeImpls))) {
+ getExecutionSession().reportError(std::move(Err));
+ R->failMaterialization();
+ return;
+ }
+ }
+}
+
+CompileOnDemandLayer::PerDylibResources &
+CompileOnDemandLayer::getPerDylibResources(JITDylib &TargetD) {
+ std::lock_guard<std::mutex> Lock(CODLayerMutex);
+
+ auto I = DylibResources.find(&TargetD);
+ if (I == DylibResources.end()) {
+ auto &ImplD =
+ getExecutionSession().createBareJITDylib(TargetD.getName() + ".impl");
+ JITDylibSearchOrder NewLinkOrder;
+ TargetD.withLinkOrderDo([&](const JITDylibSearchOrder &TargetLinkOrder) {
+ NewLinkOrder = TargetLinkOrder;
+ });
+
+ assert(!NewLinkOrder.empty() && NewLinkOrder.front().first == &TargetD &&
+ NewLinkOrder.front().second ==
+ JITDylibLookupFlags::MatchAllSymbols &&
+ "TargetD must be at the front of its own search order and match "
+ "non-exported symbol");
+ NewLinkOrder.insert(std::next(NewLinkOrder.begin()),
+ {&ImplD, JITDylibLookupFlags::MatchAllSymbols});
+ ImplD.setLinkOrder(NewLinkOrder, false);
+ TargetD.setLinkOrder(std::move(NewLinkOrder), false);
+
+ PerDylibResources PDR(ImplD, BuildIndirectStubsManager());
+ I = DylibResources.insert(std::make_pair(&TargetD, std::move(PDR))).first;
+ }
+
+ return I->second;
+}
+
+void CompileOnDemandLayer::cleanUpModule(Module &M) {
+ for (auto &F : M.functions()) {
+ if (F.isDeclaration())
+ continue;
+
+ if (F.hasAvailableExternallyLinkage()) {
+ F.deleteBody();
+ F.setPersonalityFn(nullptr);
+ continue;
+ }
+ }
+}
+
+void CompileOnDemandLayer::expandPartition(GlobalValueSet &Partition) {
+ // Expands the partition to ensure the following rules hold:
+ // (1) If any alias is in the partition, its aliasee is also in the partition.
+ // (2) If any aliasee is in the partition, its aliases are also in the
+ // partiton.
+ // (3) If any global variable is in the partition then all global variables
+ // are in the partition.
+ assert(!Partition.empty() && "Unexpected empty partition");
+
+ const Module &M = *(*Partition.begin())->getParent();
+ bool ContainsGlobalVariables = false;
+ std::vector<const GlobalValue *> GVsToAdd;
+
+ for (const auto *GV : Partition)
+ if (isa<GlobalAlias>(GV))
+ GVsToAdd.push_back(
+ cast<GlobalValue>(cast<GlobalAlias>(GV)->getAliasee()));
+ else if (isa<GlobalVariable>(GV))
+ ContainsGlobalVariables = true;
+
+ for (auto &A : M.aliases())
+ if (Partition.count(cast<GlobalValue>(A.getAliasee())))
+ GVsToAdd.push_back(&A);
+
+ if (ContainsGlobalVariables)
+ for (auto &G : M.globals())
+ GVsToAdd.push_back(&G);
+
+ for (const auto *GV : GVsToAdd)
+ Partition.insert(GV);
+}
+
+void CompileOnDemandLayer::emitPartition(
+ std::unique_ptr<MaterializationResponsibility> R, ThreadSafeModule TSM,
+ IRMaterializationUnit::SymbolNameToDefinitionMap Defs) {
+
+ // FIXME: Need a 'notify lazy-extracting/emitting' callback to tie the
+ // extracted module key, extracted module, and source module key
+ // together. This could be used, for example, to provide a specific
+ // memory manager instance to the linking layer.
+
+ auto &ES = getExecutionSession();
+ GlobalValueSet RequestedGVs;
+ for (auto &Name : R->getRequestedSymbols()) {
+ if (Name == R->getInitializerSymbol())
+ TSM.withModuleDo([&](Module &M) {
+ for (auto &GV : getStaticInitGVs(M))
+ RequestedGVs.insert(&GV);
+ });
+ else {
+ assert(Defs.count(Name) && "No definition for symbol");
+ RequestedGVs.insert(Defs[Name]);
+ }
+ }
+
+ /// Perform partitioning with the context lock held, since the partition
+ /// function is allowed to access the globals to compute the partition.
+ auto GVsToExtract =
+ TSM.withModuleDo([&](Module &M) { return Partition(RequestedGVs); });
+
+ // Take a 'None' partition to mean the whole module (as opposed to an empty
+ // partition, which means "materialize nothing"). Emit the whole module
+ // unmodified to the base layer.
+ if (GVsToExtract == std::nullopt) {
+ Defs.clear();
+ BaseLayer.emit(std::move(R), std::move(TSM));
+ return;
+ }
+
+ // If the partition is empty, return the whole module to the symbol table.
+ if (GVsToExtract->empty()) {
+ if (auto Err =
+ R->replace(std::make_unique<PartitioningIRMaterializationUnit>(
+ std::move(TSM),
+ MaterializationUnit::Interface(R->getSymbols(),
+ R->getInitializerSymbol()),
+ std::move(Defs), *this))) {
+ getExecutionSession().reportError(std::move(Err));
+ R->failMaterialization();
+ return;
+ }
+ return;
+ }
+
+ // Ok -- we actually need to partition the symbols. Promote the symbol
+ // linkages/names, expand the partition to include any required symbols
+ // (i.e. symbols that can't be separated from our partition), and
+ // then extract the partition.
+ //
+ // FIXME: We apply this promotion once per partitioning. It's safe, but
+ // overkill.
+ auto ExtractedTSM =
+ TSM.withModuleDo([&](Module &M) -> Expected<ThreadSafeModule> {
+ auto PromotedGlobals = PromoteSymbols(M);
+ if (!PromotedGlobals.empty()) {
+
+ MangleAndInterner Mangle(ES, M.getDataLayout());
+ SymbolFlagsMap SymbolFlags;
+ IRSymbolMapper::add(ES, *getManglingOptions(),
+ PromotedGlobals, SymbolFlags);
+
+ if (auto Err = R->defineMaterializing(SymbolFlags))
+ return std::move(Err);
+ }
+
+ expandPartition(*GVsToExtract);
+
+ // Submodule name is given by hashing the names of the globals.
+ std::string SubModuleName;
+ {
+ std::vector<const GlobalValue*> HashGVs;
+ HashGVs.reserve(GVsToExtract->size());
+ for (const auto *GV : *GVsToExtract)
+ HashGVs.push_back(GV);
+ llvm::sort(HashGVs, [](const GlobalValue *LHS, const GlobalValue *RHS) {
+ return LHS->getName() < RHS->getName();
+ });
+ hash_code HC(0);
+ for (const auto *GV : HashGVs) {
+ assert(GV->hasName() && "All GVs to extract should be named by now");
+ auto GVName = GV->getName();
+ HC = hash_combine(HC, hash_combine_range(GVName.begin(), GVName.end()));
+ }
+ raw_string_ostream(SubModuleName)
+ << ".submodule."
+ << formatv(sizeof(size_t) == 8 ? "{0:x16}" : "{0:x8}",
+ static_cast<size_t>(HC))
+ << ".ll";
+ }
+
+ // Extract the requested partiton (plus any necessary aliases) and
+ // put the rest back into the impl dylib.
+ auto ShouldExtract = [&](const GlobalValue &GV) -> bool {
+ return GVsToExtract->count(&GV);
+ };
+
+ return extractSubModule(TSM, SubModuleName , ShouldExtract);
+ });
+
+ if (!ExtractedTSM) {
+ ES.reportError(ExtractedTSM.takeError());
+ R->failMaterialization();
+ return;
+ }
+
+ if (auto Err = R->replace(std::make_unique<PartitioningIRMaterializationUnit>(
+ ES, *getManglingOptions(), std::move(TSM), *this))) {
+ ES.reportError(std::move(Err));
+ R->failMaterialization();
+ return;
+ }
+ BaseLayer.emit(std::move(R), std::move(*ExtractedTSM));
+}
+
+} // end namespace orc
+} // end namespace llvm
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/CompileUtils.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/CompileUtils.cpp
new file mode 100644
index 000000000000..fad7428e1f90
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/CompileUtils.cpp
@@ -0,0 +1,96 @@
+//===------ CompileUtils.cpp - Utilities for compiling IR in the JIT ------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/CompileUtils.h"
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ExecutionEngine/ObjectCache.h"
+#include "llvm/IR/LegacyPassManager.h"
+#include "llvm/IR/Module.h"
+#include "llvm/MC/MCContext.h"
+#include "llvm/Object/ObjectFile.h"
+#include "llvm/Support/Error.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/SmallVectorMemoryBuffer.h"
+#include "llvm/Target/TargetMachine.h"
+
+#include <algorithm>
+
+namespace llvm {
+namespace orc {
+
+IRSymbolMapper::ManglingOptions
+irManglingOptionsFromTargetOptions(const TargetOptions &Opts) {
+ IRSymbolMapper::ManglingOptions MO;
+
+ MO.EmulatedTLS = Opts.EmulatedTLS;
+
+ return MO;
+}
+
+/// Compile a Module to an ObjectFile.
+Expected<SimpleCompiler::CompileResult> SimpleCompiler::operator()(Module &M) {
+ CompileResult CachedObject = tryToLoadFromObjectCache(M);
+ if (CachedObject)
+ return std::move(CachedObject);
+
+ SmallVector<char, 0> ObjBufferSV;
+
+ {
+ raw_svector_ostream ObjStream(ObjBufferSV);
+
+ legacy::PassManager PM;
+ MCContext *Ctx;
+ if (TM.addPassesToEmitMC(PM, Ctx, ObjStream))
+ return make_error<StringError>("Target does not support MC emission",
+ inconvertibleErrorCode());
+ PM.run(M);
+ }
+
+ auto ObjBuffer = std::make_unique<SmallVectorMemoryBuffer>(
+ std::move(ObjBufferSV), M.getModuleIdentifier() + "-jitted-objectbuffer",
+ /*RequiresNullTerminator=*/false);
+
+ auto Obj = object::ObjectFile::createObjectFile(ObjBuffer->getMemBufferRef());
+
+ if (!Obj)
+ return Obj.takeError();
+
+ notifyObjectCompiled(M, *ObjBuffer);
+ return std::move(ObjBuffer);
+}
+
+SimpleCompiler::CompileResult
+SimpleCompiler::tryToLoadFromObjectCache(const Module &M) {
+ if (!ObjCache)
+ return CompileResult();
+
+ return ObjCache->getObject(&M);
+}
+
+void SimpleCompiler::notifyObjectCompiled(const Module &M,
+ const MemoryBuffer &ObjBuffer) {
+ if (ObjCache)
+ ObjCache->notifyObjectCompiled(&M, ObjBuffer.getMemBufferRef());
+}
+
+ConcurrentIRCompiler::ConcurrentIRCompiler(JITTargetMachineBuilder JTMB,
+ ObjectCache *ObjCache)
+ : IRCompiler(irManglingOptionsFromTargetOptions(JTMB.getOptions())),
+ JTMB(std::move(JTMB)), ObjCache(ObjCache) {}
+
+Expected<std::unique_ptr<MemoryBuffer>>
+ConcurrentIRCompiler::operator()(Module &M) {
+ auto TM = cantFail(JTMB.createTargetMachine());
+ SimpleCompiler C(*TM, ObjCache);
+ return C(M);
+}
+
+} // end namespace orc
+} // end namespace llvm
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/Core.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/Core.cpp
new file mode 100644
index 000000000000..f70c2890521d
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/Core.cpp
@@ -0,0 +1,3777 @@
+//===--- Core.cpp - Core ORC APIs (MaterializationUnit, JITDylib, etc.) ---===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/Core.h"
+
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/Config/llvm-config.h"
+#include "llvm/ExecutionEngine/Orc/DebugUtils.h"
+#include "llvm/ExecutionEngine/Orc/Shared/OrcError.h"
+#include "llvm/Support/FormatVariadic.h"
+#include "llvm/Support/MSVCErrorWorkarounds.h"
+
+#include <condition_variable>
+#include <future>
+#include <optional>
+
+#define DEBUG_TYPE "orc"
+
+namespace llvm {
+namespace orc {
+
+char ResourceTrackerDefunct::ID = 0;
+char FailedToMaterialize::ID = 0;
+char SymbolsNotFound::ID = 0;
+char SymbolsCouldNotBeRemoved::ID = 0;
+char MissingSymbolDefinitions::ID = 0;
+char UnexpectedSymbolDefinitions::ID = 0;
+char UnsatisfiedSymbolDependencies::ID = 0;
+char MaterializationTask::ID = 0;
+char LookupTask::ID = 0;
+
+RegisterDependenciesFunction NoDependenciesToRegister =
+ RegisterDependenciesFunction();
+
+void MaterializationUnit::anchor() {}
+
+ResourceTracker::ResourceTracker(JITDylibSP JD) {
+ assert((reinterpret_cast<uintptr_t>(JD.get()) & 0x1) == 0 &&
+ "JITDylib must be two byte aligned");
+ JD->Retain();
+ JDAndFlag.store(reinterpret_cast<uintptr_t>(JD.get()));
+}
+
+ResourceTracker::~ResourceTracker() {
+ getJITDylib().getExecutionSession().destroyResourceTracker(*this);
+ getJITDylib().Release();
+}
+
+Error ResourceTracker::remove() {
+ return getJITDylib().getExecutionSession().removeResourceTracker(*this);
+}
+
+void ResourceTracker::transferTo(ResourceTracker &DstRT) {
+ getJITDylib().getExecutionSession().transferResourceTracker(DstRT, *this);
+}
+
+void ResourceTracker::makeDefunct() {
+ uintptr_t Val = JDAndFlag.load();
+ Val |= 0x1U;
+ JDAndFlag.store(Val);
+}
+
+ResourceManager::~ResourceManager() = default;
+
+ResourceTrackerDefunct::ResourceTrackerDefunct(ResourceTrackerSP RT)
+ : RT(std::move(RT)) {}
+
+std::error_code ResourceTrackerDefunct::convertToErrorCode() const {
+ return orcError(OrcErrorCode::UnknownORCError);
+}
+
+void ResourceTrackerDefunct::log(raw_ostream &OS) const {
+ OS << "Resource tracker " << (void *)RT.get() << " became defunct";
+}
+
+FailedToMaterialize::FailedToMaterialize(
+ std::shared_ptr<SymbolStringPool> SSP,
+ std::shared_ptr<SymbolDependenceMap> Symbols)
+ : SSP(std::move(SSP)), Symbols(std::move(Symbols)) {
+ assert(this->SSP && "String pool cannot be null");
+ assert(!this->Symbols->empty() && "Can not fail to resolve an empty set");
+
+ // FIXME: Use a new dep-map type for FailedToMaterialize errors so that we
+ // don't have to manually retain/release.
+ for (auto &[JD, Syms] : *this->Symbols)
+ JD->Retain();
+}
+
+FailedToMaterialize::~FailedToMaterialize() {
+ for (auto &[JD, Syms] : *Symbols)
+ JD->Release();
+}
+
+std::error_code FailedToMaterialize::convertToErrorCode() const {
+ return orcError(OrcErrorCode::UnknownORCError);
+}
+
+void FailedToMaterialize::log(raw_ostream &OS) const {
+ OS << "Failed to materialize symbols: " << *Symbols;
+}
+
+UnsatisfiedSymbolDependencies::UnsatisfiedSymbolDependencies(
+ std::shared_ptr<SymbolStringPool> SSP, JITDylibSP JD,
+ SymbolNameSet FailedSymbols, SymbolDependenceMap BadDeps,
+ std::string Explanation)
+ : SSP(std::move(SSP)), JD(std::move(JD)),
+ FailedSymbols(std::move(FailedSymbols)), BadDeps(std::move(BadDeps)),
+ Explanation(std::move(Explanation)) {}
+
+std::error_code UnsatisfiedSymbolDependencies::convertToErrorCode() const {
+ return orcError(OrcErrorCode::UnknownORCError);
+}
+
+void UnsatisfiedSymbolDependencies::log(raw_ostream &OS) const {
+ OS << "In " << JD->getName() << ", failed to materialize " << FailedSymbols
+ << ", due to unsatisfied dependencies " << BadDeps;
+ if (!Explanation.empty())
+ OS << " (" << Explanation << ")";
+}
+
+SymbolsNotFound::SymbolsNotFound(std::shared_ptr<SymbolStringPool> SSP,
+ SymbolNameSet Symbols)
+ : SSP(std::move(SSP)) {
+ for (auto &Sym : Symbols)
+ this->Symbols.push_back(Sym);
+ assert(!this->Symbols.empty() && "Can not fail to resolve an empty set");
+}
+
+SymbolsNotFound::SymbolsNotFound(std::shared_ptr<SymbolStringPool> SSP,
+ SymbolNameVector Symbols)
+ : SSP(std::move(SSP)), Symbols(std::move(Symbols)) {
+ assert(!this->Symbols.empty() && "Can not fail to resolve an empty set");
+}
+
+std::error_code SymbolsNotFound::convertToErrorCode() const {
+ return orcError(OrcErrorCode::UnknownORCError);
+}
+
+void SymbolsNotFound::log(raw_ostream &OS) const {
+ OS << "Symbols not found: " << Symbols;
+}
+
+SymbolsCouldNotBeRemoved::SymbolsCouldNotBeRemoved(
+ std::shared_ptr<SymbolStringPool> SSP, SymbolNameSet Symbols)
+ : SSP(std::move(SSP)), Symbols(std::move(Symbols)) {
+ assert(!this->Symbols.empty() && "Can not fail to resolve an empty set");
+}
+
+std::error_code SymbolsCouldNotBeRemoved::convertToErrorCode() const {
+ return orcError(OrcErrorCode::UnknownORCError);
+}
+
+void SymbolsCouldNotBeRemoved::log(raw_ostream &OS) const {
+ OS << "Symbols could not be removed: " << Symbols;
+}
+
+std::error_code MissingSymbolDefinitions::convertToErrorCode() const {
+ return orcError(OrcErrorCode::MissingSymbolDefinitions);
+}
+
+void MissingSymbolDefinitions::log(raw_ostream &OS) const {
+ OS << "Missing definitions in module " << ModuleName
+ << ": " << Symbols;
+}
+
+std::error_code UnexpectedSymbolDefinitions::convertToErrorCode() const {
+ return orcError(OrcErrorCode::UnexpectedSymbolDefinitions);
+}
+
+void UnexpectedSymbolDefinitions::log(raw_ostream &OS) const {
+ OS << "Unexpected definitions in module " << ModuleName
+ << ": " << Symbols;
+}
+
+AsynchronousSymbolQuery::AsynchronousSymbolQuery(
+ const SymbolLookupSet &Symbols, SymbolState RequiredState,
+ SymbolsResolvedCallback NotifyComplete)
+ : NotifyComplete(std::move(NotifyComplete)), RequiredState(RequiredState) {
+ assert(RequiredState >= SymbolState::Resolved &&
+ "Cannot query for a symbols that have not reached the resolve state "
+ "yet");
+
+ OutstandingSymbolsCount = Symbols.size();
+
+ for (auto &[Name, Flags] : Symbols)
+ ResolvedSymbols[Name] = ExecutorSymbolDef();
+}
+
+void AsynchronousSymbolQuery::notifySymbolMetRequiredState(
+ const SymbolStringPtr &Name, ExecutorSymbolDef Sym) {
+ auto I = ResolvedSymbols.find(Name);
+ assert(I != ResolvedSymbols.end() &&
+ "Resolving symbol outside the requested set");
+ assert(I->second == ExecutorSymbolDef() &&
+ "Redundantly resolving symbol Name");
+
+ // If this is a materialization-side-effects-only symbol then drop it,
+ // otherwise update its map entry with its resolved address.
+ if (Sym.getFlags().hasMaterializationSideEffectsOnly())
+ ResolvedSymbols.erase(I);
+ else
+ I->second = std::move(Sym);
+ --OutstandingSymbolsCount;
+}
+
+void AsynchronousSymbolQuery::handleComplete(ExecutionSession &ES) {
+ assert(OutstandingSymbolsCount == 0 &&
+ "Symbols remain, handleComplete called prematurely");
+
+ class RunQueryCompleteTask : public Task {
+ public:
+ RunQueryCompleteTask(SymbolMap ResolvedSymbols,
+ SymbolsResolvedCallback NotifyComplete)
+ : ResolvedSymbols(std::move(ResolvedSymbols)),
+ NotifyComplete(std::move(NotifyComplete)) {}
+ void printDescription(raw_ostream &OS) override {
+ OS << "Execute query complete callback for " << ResolvedSymbols;
+ }
+ void run() override { NotifyComplete(std::move(ResolvedSymbols)); }
+
+ private:
+ SymbolMap ResolvedSymbols;
+ SymbolsResolvedCallback NotifyComplete;
+ };
+
+ auto T = std::make_unique<RunQueryCompleteTask>(std::move(ResolvedSymbols),
+ std::move(NotifyComplete));
+ NotifyComplete = SymbolsResolvedCallback();
+ ES.dispatchTask(std::move(T));
+}
+
+void AsynchronousSymbolQuery::handleFailed(Error Err) {
+ assert(QueryRegistrations.empty() && ResolvedSymbols.empty() &&
+ OutstandingSymbolsCount == 0 &&
+ "Query should already have been abandoned");
+ NotifyComplete(std::move(Err));
+ NotifyComplete = SymbolsResolvedCallback();
+}
+
+void AsynchronousSymbolQuery::addQueryDependence(JITDylib &JD,
+ SymbolStringPtr Name) {
+ bool Added = QueryRegistrations[&JD].insert(std::move(Name)).second;
+ (void)Added;
+ assert(Added && "Duplicate dependence notification?");
+}
+
+void AsynchronousSymbolQuery::removeQueryDependence(
+ JITDylib &JD, const SymbolStringPtr &Name) {
+ auto QRI = QueryRegistrations.find(&JD);
+ assert(QRI != QueryRegistrations.end() &&
+ "No dependencies registered for JD");
+ assert(QRI->second.count(Name) && "No dependency on Name in JD");
+ QRI->second.erase(Name);
+ if (QRI->second.empty())
+ QueryRegistrations.erase(QRI);
+}
+
+void AsynchronousSymbolQuery::dropSymbol(const SymbolStringPtr &Name) {
+ auto I = ResolvedSymbols.find(Name);
+ assert(I != ResolvedSymbols.end() &&
+ "Redundant removal of weakly-referenced symbol");
+ ResolvedSymbols.erase(I);
+ --OutstandingSymbolsCount;
+}
+
+void AsynchronousSymbolQuery::detach() {
+ ResolvedSymbols.clear();
+ OutstandingSymbolsCount = 0;
+ for (auto &[JD, Syms] : QueryRegistrations)
+ JD->detachQueryHelper(*this, Syms);
+ QueryRegistrations.clear();
+}
+
+AbsoluteSymbolsMaterializationUnit::AbsoluteSymbolsMaterializationUnit(
+ SymbolMap Symbols)
+ : MaterializationUnit(extractFlags(Symbols)), Symbols(std::move(Symbols)) {}
+
+StringRef AbsoluteSymbolsMaterializationUnit::getName() const {
+ return "<Absolute Symbols>";
+}
+
+void AbsoluteSymbolsMaterializationUnit::materialize(
+ std::unique_ptr<MaterializationResponsibility> R) {
+ // Even though these are just absolute symbols we need to check for failure
+ // to resolve/emit: the tracker for these symbols may have been removed while
+ // the materialization was in flight (e.g. due to a failure in some action
+ // triggered by the queries attached to the resolution/emission of these
+ // symbols).
+ if (auto Err = R->notifyResolved(Symbols)) {
+ R->getExecutionSession().reportError(std::move(Err));
+ R->failMaterialization();
+ return;
+ }
+ if (auto Err = R->notifyEmitted({})) {
+ R->getExecutionSession().reportError(std::move(Err));
+ R->failMaterialization();
+ return;
+ }
+}
+
+void AbsoluteSymbolsMaterializationUnit::discard(const JITDylib &JD,
+ const SymbolStringPtr &Name) {
+ assert(Symbols.count(Name) && "Symbol is not part of this MU");
+ Symbols.erase(Name);
+}
+
+MaterializationUnit::Interface
+AbsoluteSymbolsMaterializationUnit::extractFlags(const SymbolMap &Symbols) {
+ SymbolFlagsMap Flags;
+ for (const auto &[Name, Def] : Symbols)
+ Flags[Name] = Def.getFlags();
+ return MaterializationUnit::Interface(std::move(Flags), nullptr);
+}
+
+ReExportsMaterializationUnit::ReExportsMaterializationUnit(
+ JITDylib *SourceJD, JITDylibLookupFlags SourceJDLookupFlags,
+ SymbolAliasMap Aliases)
+ : MaterializationUnit(extractFlags(Aliases)), SourceJD(SourceJD),
+ SourceJDLookupFlags(SourceJDLookupFlags), Aliases(std::move(Aliases)) {}
+
+StringRef ReExportsMaterializationUnit::getName() const {
+ return "<Reexports>";
+}
+
+void ReExportsMaterializationUnit::materialize(
+ std::unique_ptr<MaterializationResponsibility> R) {
+
+ auto &ES = R->getTargetJITDylib().getExecutionSession();
+ JITDylib &TgtJD = R->getTargetJITDylib();
+ JITDylib &SrcJD = SourceJD ? *SourceJD : TgtJD;
+
+ // Find the set of requested aliases and aliasees. Return any unrequested
+ // aliases back to the JITDylib so as to not prematurely materialize any
+ // aliasees.
+ auto RequestedSymbols = R->getRequestedSymbols();
+ SymbolAliasMap RequestedAliases;
+
+ for (auto &Name : RequestedSymbols) {
+ auto I = Aliases.find(Name);
+ assert(I != Aliases.end() && "Symbol not found in aliases map?");
+ RequestedAliases[Name] = std::move(I->second);
+ Aliases.erase(I);
+ }
+
+ LLVM_DEBUG({
+ ES.runSessionLocked([&]() {
+ dbgs() << "materializing reexports: target = " << TgtJD.getName()
+ << ", source = " << SrcJD.getName() << " " << RequestedAliases
+ << "\n";
+ });
+ });
+
+ if (!Aliases.empty()) {
+ auto Err = SourceJD ? R->replace(reexports(*SourceJD, std::move(Aliases),
+ SourceJDLookupFlags))
+ : R->replace(symbolAliases(std::move(Aliases)));
+
+ if (Err) {
+ // FIXME: Should this be reported / treated as failure to materialize?
+ // Or should this be treated as a sanctioned bailing-out?
+ ES.reportError(std::move(Err));
+ R->failMaterialization();
+ return;
+ }
+ }
+
+ // The OnResolveInfo struct will hold the aliases and responsibility for each
+ // query in the list.
+ struct OnResolveInfo {
+ OnResolveInfo(std::unique_ptr<MaterializationResponsibility> R,
+ SymbolAliasMap Aliases)
+ : R(std::move(R)), Aliases(std::move(Aliases)) {}
+
+ std::unique_ptr<MaterializationResponsibility> R;
+ SymbolAliasMap Aliases;
+ std::vector<SymbolDependenceGroup> SDGs;
+ };
+
+ // Build a list of queries to issue. In each round we build a query for the
+ // largest set of aliases that we can resolve without encountering a chain of
+ // aliases (e.g. Foo -> Bar, Bar -> Baz). Such a chain would deadlock as the
+ // query would be waiting on a symbol that it itself had to resolve. Creating
+ // a new query for each link in such a chain eliminates the possibility of
+ // deadlock. In practice chains are likely to be rare, and this algorithm will
+ // usually result in a single query to issue.
+
+ std::vector<std::pair<SymbolLookupSet, std::shared_ptr<OnResolveInfo>>>
+ QueryInfos;
+ while (!RequestedAliases.empty()) {
+ SymbolNameSet ResponsibilitySymbols;
+ SymbolLookupSet QuerySymbols;
+ SymbolAliasMap QueryAliases;
+
+ // Collect as many aliases as we can without including a chain.
+ for (auto &KV : RequestedAliases) {
+ // Chain detected. Skip this symbol for this round.
+ if (&SrcJD == &TgtJD && (QueryAliases.count(KV.second.Aliasee) ||
+ RequestedAliases.count(KV.second.Aliasee)))
+ continue;
+
+ ResponsibilitySymbols.insert(KV.first);
+ QuerySymbols.add(KV.second.Aliasee,
+ KV.second.AliasFlags.hasMaterializationSideEffectsOnly()
+ ? SymbolLookupFlags::WeaklyReferencedSymbol
+ : SymbolLookupFlags::RequiredSymbol);
+ QueryAliases[KV.first] = std::move(KV.second);
+ }
+
+ // Remove the aliases collected this round from the RequestedAliases map.
+ for (auto &KV : QueryAliases)
+ RequestedAliases.erase(KV.first);
+
+ assert(!QuerySymbols.empty() && "Alias cycle detected!");
+
+ auto NewR = R->delegate(ResponsibilitySymbols);
+ if (!NewR) {
+ ES.reportError(NewR.takeError());
+ R->failMaterialization();
+ return;
+ }
+
+ auto QueryInfo = std::make_shared<OnResolveInfo>(std::move(*NewR),
+ std::move(QueryAliases));
+ QueryInfos.push_back(
+ make_pair(std::move(QuerySymbols), std::move(QueryInfo)));
+ }
+
+ // Issue the queries.
+ while (!QueryInfos.empty()) {
+ auto QuerySymbols = std::move(QueryInfos.back().first);
+ auto QueryInfo = std::move(QueryInfos.back().second);
+
+ QueryInfos.pop_back();
+
+ auto RegisterDependencies = [QueryInfo,
+ &SrcJD](const SymbolDependenceMap &Deps) {
+ // If there were no materializing symbols, just bail out.
+ if (Deps.empty())
+ return;
+
+ // Otherwise the only deps should be on SrcJD.
+ assert(Deps.size() == 1 && Deps.count(&SrcJD) &&
+ "Unexpected dependencies for reexports");
+
+ auto &SrcJDDeps = Deps.find(&SrcJD)->second;
+
+ for (auto &[Alias, AliasInfo] : QueryInfo->Aliases)
+ if (SrcJDDeps.count(AliasInfo.Aliasee))
+ QueryInfo->SDGs.push_back({{Alias}, {{&SrcJD, {AliasInfo.Aliasee}}}});
+ };
+
+ auto OnComplete = [QueryInfo](Expected<SymbolMap> Result) {
+ auto &ES = QueryInfo->R->getTargetJITDylib().getExecutionSession();
+ if (Result) {
+ SymbolMap ResolutionMap;
+ for (auto &KV : QueryInfo->Aliases) {
+ assert((KV.second.AliasFlags.hasMaterializationSideEffectsOnly() ||
+ Result->count(KV.second.Aliasee)) &&
+ "Result map missing entry?");
+ // Don't try to resolve materialization-side-effects-only symbols.
+ if (KV.second.AliasFlags.hasMaterializationSideEffectsOnly())
+ continue;
+
+ ResolutionMap[KV.first] = {(*Result)[KV.second.Aliasee].getAddress(),
+ KV.second.AliasFlags};
+ }
+ if (auto Err = QueryInfo->R->notifyResolved(ResolutionMap)) {
+ ES.reportError(std::move(Err));
+ QueryInfo->R->failMaterialization();
+ return;
+ }
+ if (auto Err = QueryInfo->R->notifyEmitted(QueryInfo->SDGs)) {
+ ES.reportError(std::move(Err));
+ QueryInfo->R->failMaterialization();
+ return;
+ }
+ } else {
+ ES.reportError(Result.takeError());
+ QueryInfo->R->failMaterialization();
+ }
+ };
+
+ ES.lookup(LookupKind::Static,
+ JITDylibSearchOrder({{&SrcJD, SourceJDLookupFlags}}),
+ QuerySymbols, SymbolState::Resolved, std::move(OnComplete),
+ std::move(RegisterDependencies));
+ }
+}
+
+void ReExportsMaterializationUnit::discard(const JITDylib &JD,
+ const SymbolStringPtr &Name) {
+ assert(Aliases.count(Name) &&
+ "Symbol not covered by this MaterializationUnit");
+ Aliases.erase(Name);
+}
+
+MaterializationUnit::Interface
+ReExportsMaterializationUnit::extractFlags(const SymbolAliasMap &Aliases) {
+ SymbolFlagsMap SymbolFlags;
+ for (auto &KV : Aliases)
+ SymbolFlags[KV.first] = KV.second.AliasFlags;
+
+ return MaterializationUnit::Interface(std::move(SymbolFlags), nullptr);
+}
+
+Expected<SymbolAliasMap> buildSimpleReexportsAliasMap(JITDylib &SourceJD,
+ SymbolNameSet Symbols) {
+ SymbolLookupSet LookupSet(Symbols);
+ auto Flags = SourceJD.getExecutionSession().lookupFlags(
+ LookupKind::Static, {{&SourceJD, JITDylibLookupFlags::MatchAllSymbols}},
+ SymbolLookupSet(std::move(Symbols)));
+
+ if (!Flags)
+ return Flags.takeError();
+
+ SymbolAliasMap Result;
+ for (auto &Name : Symbols) {
+ assert(Flags->count(Name) && "Missing entry in flags map");
+ Result[Name] = SymbolAliasMapEntry(Name, (*Flags)[Name]);
+ }
+
+ return Result;
+}
+
+class InProgressLookupState {
+public:
+ // FIXME: Reduce the number of SymbolStringPtrs here. See
+ // https://github.com/llvm/llvm-project/issues/55576.
+
+ InProgressLookupState(LookupKind K, JITDylibSearchOrder SearchOrder,
+ SymbolLookupSet LookupSet, SymbolState RequiredState)
+ : K(K), SearchOrder(std::move(SearchOrder)),
+ LookupSet(std::move(LookupSet)), RequiredState(RequiredState) {
+ DefGeneratorCandidates = this->LookupSet;
+ }
+ virtual ~InProgressLookupState() = default;
+ virtual void complete(std::unique_ptr<InProgressLookupState> IPLS) = 0;
+ virtual void fail(Error Err) = 0;
+
+ LookupKind K;
+ JITDylibSearchOrder SearchOrder;
+ SymbolLookupSet LookupSet;
+ SymbolState RequiredState;
+
+ size_t CurSearchOrderIndex = 0;
+ bool NewJITDylib = true;
+ SymbolLookupSet DefGeneratorCandidates;
+ SymbolLookupSet DefGeneratorNonCandidates;
+
+ enum {
+ NotInGenerator, // Not currently using a generator.
+ ResumedForGenerator, // Resumed after being auto-suspended before generator.
+ InGenerator // Currently using generator.
+ } GenState = NotInGenerator;
+ std::vector<std::weak_ptr<DefinitionGenerator>> CurDefGeneratorStack;
+};
+
+class InProgressLookupFlagsState : public InProgressLookupState {
+public:
+ InProgressLookupFlagsState(
+ LookupKind K, JITDylibSearchOrder SearchOrder, SymbolLookupSet LookupSet,
+ unique_function<void(Expected<SymbolFlagsMap>)> OnComplete)
+ : InProgressLookupState(K, std::move(SearchOrder), std::move(LookupSet),
+ SymbolState::NeverSearched),
+ OnComplete(std::move(OnComplete)) {}
+
+ void complete(std::unique_ptr<InProgressLookupState> IPLS) override {
+ auto &ES = SearchOrder.front().first->getExecutionSession();
+ ES.OL_completeLookupFlags(std::move(IPLS), std::move(OnComplete));
+ }
+
+ void fail(Error Err) override { OnComplete(std::move(Err)); }
+
+private:
+ unique_function<void(Expected<SymbolFlagsMap>)> OnComplete;
+};
+
+class InProgressFullLookupState : public InProgressLookupState {
+public:
+ InProgressFullLookupState(LookupKind K, JITDylibSearchOrder SearchOrder,
+ SymbolLookupSet LookupSet,
+ SymbolState RequiredState,
+ std::shared_ptr<AsynchronousSymbolQuery> Q,
+ RegisterDependenciesFunction RegisterDependencies)
+ : InProgressLookupState(K, std::move(SearchOrder), std::move(LookupSet),
+ RequiredState),
+ Q(std::move(Q)), RegisterDependencies(std::move(RegisterDependencies)) {
+ }
+
+ void complete(std::unique_ptr<InProgressLookupState> IPLS) override {
+ auto &ES = SearchOrder.front().first->getExecutionSession();
+ ES.OL_completeLookup(std::move(IPLS), std::move(Q),
+ std::move(RegisterDependencies));
+ }
+
+ void fail(Error Err) override {
+ Q->detach();
+ Q->handleFailed(std::move(Err));
+ }
+
+private:
+ std::shared_ptr<AsynchronousSymbolQuery> Q;
+ RegisterDependenciesFunction RegisterDependencies;
+};
+
+ReexportsGenerator::ReexportsGenerator(JITDylib &SourceJD,
+ JITDylibLookupFlags SourceJDLookupFlags,
+ SymbolPredicate Allow)
+ : SourceJD(SourceJD), SourceJDLookupFlags(SourceJDLookupFlags),
+ Allow(std::move(Allow)) {}
+
+Error ReexportsGenerator::tryToGenerate(LookupState &LS, LookupKind K,
+ JITDylib &JD,
+ JITDylibLookupFlags JDLookupFlags,
+ const SymbolLookupSet &LookupSet) {
+ assert(&JD != &SourceJD && "Cannot re-export from the same dylib");
+
+ // Use lookupFlags to find the subset of symbols that match our lookup.
+ auto Flags = JD.getExecutionSession().lookupFlags(
+ K, {{&SourceJD, JDLookupFlags}}, LookupSet);
+ if (!Flags)
+ return Flags.takeError();
+
+ // Create an alias map.
+ orc::SymbolAliasMap AliasMap;
+ for (auto &KV : *Flags)
+ if (!Allow || Allow(KV.first))
+ AliasMap[KV.first] = SymbolAliasMapEntry(KV.first, KV.second);
+
+ if (AliasMap.empty())
+ return Error::success();
+
+ // Define the re-exports.
+ return JD.define(reexports(SourceJD, AliasMap, SourceJDLookupFlags));
+}
+
+LookupState::LookupState(std::unique_ptr<InProgressLookupState> IPLS)
+ : IPLS(std::move(IPLS)) {}
+
+void LookupState::reset(InProgressLookupState *IPLS) { this->IPLS.reset(IPLS); }
+
+LookupState::LookupState() = default;
+LookupState::LookupState(LookupState &&) = default;
+LookupState &LookupState::operator=(LookupState &&) = default;
+LookupState::~LookupState() = default;
+
+void LookupState::continueLookup(Error Err) {
+ assert(IPLS && "Cannot call continueLookup on empty LookupState");
+ auto &ES = IPLS->SearchOrder.begin()->first->getExecutionSession();
+ ES.OL_applyQueryPhase1(std::move(IPLS), std::move(Err));
+}
+
+DefinitionGenerator::~DefinitionGenerator() {
+ std::deque<LookupState> LookupsToFail;
+ {
+ std::lock_guard<std::mutex> Lock(M);
+ std::swap(PendingLookups, LookupsToFail);
+ InUse = false;
+ }
+
+ for (auto &LS : LookupsToFail)
+ LS.continueLookup(make_error<StringError>(
+ "Query waiting on DefinitionGenerator that was destroyed",
+ inconvertibleErrorCode()));
+}
+
+JITDylib::~JITDylib() {
+ LLVM_DEBUG(dbgs() << "Destroying JITDylib " << getName() << "\n");
+}
+
+Error JITDylib::clear() {
+ std::vector<ResourceTrackerSP> TrackersToRemove;
+ ES.runSessionLocked([&]() {
+ assert(State != Closed && "JD is defunct");
+ for (auto &KV : TrackerSymbols)
+ TrackersToRemove.push_back(KV.first);
+ TrackersToRemove.push_back(getDefaultResourceTracker());
+ });
+
+ Error Err = Error::success();
+ for (auto &RT : TrackersToRemove)
+ Err = joinErrors(std::move(Err), RT->remove());
+ return Err;
+}
+
+ResourceTrackerSP JITDylib::getDefaultResourceTracker() {
+ return ES.runSessionLocked([this] {
+ assert(State != Closed && "JD is defunct");
+ if (!DefaultTracker)
+ DefaultTracker = new ResourceTracker(this);
+ return DefaultTracker;
+ });
+}
+
+ResourceTrackerSP JITDylib::createResourceTracker() {
+ return ES.runSessionLocked([this] {
+ assert(State == Open && "JD is defunct");
+ ResourceTrackerSP RT = new ResourceTracker(this);
+ return RT;
+ });
+}
+
+void JITDylib::removeGenerator(DefinitionGenerator &G) {
+ // DefGenerator moved into TmpDG to ensure that it's destroyed outside the
+ // session lock (since it may have to send errors to pending queries).
+ std::shared_ptr<DefinitionGenerator> TmpDG;
+
+ ES.runSessionLocked([&] {
+ assert(State == Open && "JD is defunct");
+ auto I = llvm::find_if(DefGenerators,
+ [&](const std::shared_ptr<DefinitionGenerator> &H) {
+ return H.get() == &G;
+ });
+ assert(I != DefGenerators.end() && "Generator not found");
+ TmpDG = std::move(*I);
+ DefGenerators.erase(I);
+ });
+}
+
+Expected<SymbolFlagsMap>
+JITDylib::defineMaterializing(MaterializationResponsibility &FromMR,
+ SymbolFlagsMap SymbolFlags) {
+
+ return ES.runSessionLocked([&]() -> Expected<SymbolFlagsMap> {
+ if (FromMR.RT->isDefunct())
+ return make_error<ResourceTrackerDefunct>(FromMR.RT);
+
+ std::vector<NonOwningSymbolStringPtr> AddedSyms;
+ std::vector<NonOwningSymbolStringPtr> RejectedWeakDefs;
+
+ for (auto SFItr = SymbolFlags.begin(), SFEnd = SymbolFlags.end();
+ SFItr != SFEnd; ++SFItr) {
+
+ auto &Name = SFItr->first;
+ auto &Flags = SFItr->second;
+
+ auto EntryItr = Symbols.find(Name);
+
+ // If the entry already exists...
+ if (EntryItr != Symbols.end()) {
+
+ // If this is a strong definition then error out.
+ if (!Flags.isWeak()) {
+ // Remove any symbols already added.
+ for (auto &S : AddedSyms)
+ Symbols.erase(Symbols.find_as(S));
+
+ // FIXME: Return all duplicates.
+ return make_error<DuplicateDefinition>(std::string(*Name));
+ }
+
+ // Otherwise just make a note to discard this symbol after the loop.
+ RejectedWeakDefs.push_back(NonOwningSymbolStringPtr(Name));
+ continue;
+ } else
+ EntryItr =
+ Symbols.insert(std::make_pair(Name, SymbolTableEntry(Flags))).first;
+
+ AddedSyms.push_back(NonOwningSymbolStringPtr(Name));
+ EntryItr->second.setState(SymbolState::Materializing);
+ }
+
+ // Remove any rejected weak definitions from the SymbolFlags map.
+ while (!RejectedWeakDefs.empty()) {
+ SymbolFlags.erase(SymbolFlags.find_as(RejectedWeakDefs.back()));
+ RejectedWeakDefs.pop_back();
+ }
+
+ return SymbolFlags;
+ });
+}
+
+Error JITDylib::replace(MaterializationResponsibility &FromMR,
+ std::unique_ptr<MaterializationUnit> MU) {
+ assert(MU != nullptr && "Can not replace with a null MaterializationUnit");
+ std::unique_ptr<MaterializationUnit> MustRunMU;
+ std::unique_ptr<MaterializationResponsibility> MustRunMR;
+
+ auto Err =
+ ES.runSessionLocked([&, this]() -> Error {
+ if (FromMR.RT->isDefunct())
+ return make_error<ResourceTrackerDefunct>(std::move(FromMR.RT));
+
+#ifndef NDEBUG
+ for (auto &KV : MU->getSymbols()) {
+ auto SymI = Symbols.find(KV.first);
+ assert(SymI != Symbols.end() && "Replacing unknown symbol");
+ assert(SymI->second.getState() == SymbolState::Materializing &&
+ "Can not replace a symbol that ha is not materializing");
+ assert(!SymI->second.hasMaterializerAttached() &&
+ "Symbol should not have materializer attached already");
+ assert(UnmaterializedInfos.count(KV.first) == 0 &&
+ "Symbol being replaced should have no UnmaterializedInfo");
+ }
+#endif // NDEBUG
+
+ // If the tracker is defunct we need to bail out immediately.
+
+ // If any symbol has pending queries against it then we need to
+ // materialize MU immediately.
+ for (auto &KV : MU->getSymbols()) {
+ auto MII = MaterializingInfos.find(KV.first);
+ if (MII != MaterializingInfos.end()) {
+ if (MII->second.hasQueriesPending()) {
+ MustRunMR = ES.createMaterializationResponsibility(
+ *FromMR.RT, std::move(MU->SymbolFlags),
+ std::move(MU->InitSymbol));
+ MustRunMU = std::move(MU);
+ return Error::success();
+ }
+ }
+ }
+
+ // Otherwise, make MU responsible for all the symbols.
+ auto UMI = std::make_shared<UnmaterializedInfo>(std::move(MU),
+ FromMR.RT.get());
+ for (auto &KV : UMI->MU->getSymbols()) {
+ auto SymI = Symbols.find(KV.first);
+ assert(SymI->second.getState() == SymbolState::Materializing &&
+ "Can not replace a symbol that is not materializing");
+ assert(!SymI->second.hasMaterializerAttached() &&
+ "Can not replace a symbol that has a materializer attached");
+ assert(UnmaterializedInfos.count(KV.first) == 0 &&
+ "Unexpected materializer entry in map");
+ SymI->second.setAddress(SymI->second.getAddress());
+ SymI->second.setMaterializerAttached(true);
+
+ auto &UMIEntry = UnmaterializedInfos[KV.first];
+ assert((!UMIEntry || !UMIEntry->MU) &&
+ "Replacing symbol with materializer still attached");
+ UMIEntry = UMI;
+ }
+
+ return Error::success();
+ });
+
+ if (Err)
+ return Err;
+
+ if (MustRunMU) {
+ assert(MustRunMR && "MustRunMU set implies MustRunMR set");
+ ES.dispatchTask(std::make_unique<MaterializationTask>(
+ std::move(MustRunMU), std::move(MustRunMR)));
+ } else {
+ assert(!MustRunMR && "MustRunMU unset implies MustRunMR unset");
+ }
+
+ return Error::success();
+}
+
+Expected<std::unique_ptr<MaterializationResponsibility>>
+JITDylib::delegate(MaterializationResponsibility &FromMR,
+ SymbolFlagsMap SymbolFlags, SymbolStringPtr InitSymbol) {
+
+ return ES.runSessionLocked(
+ [&]() -> Expected<std::unique_ptr<MaterializationResponsibility>> {
+ if (FromMR.RT->isDefunct())
+ return make_error<ResourceTrackerDefunct>(std::move(FromMR.RT));
+
+ return ES.createMaterializationResponsibility(
+ *FromMR.RT, std::move(SymbolFlags), std::move(InitSymbol));
+ });
+}
+
+SymbolNameSet
+JITDylib::getRequestedSymbols(const SymbolFlagsMap &SymbolFlags) const {
+ return ES.runSessionLocked([&]() {
+ SymbolNameSet RequestedSymbols;
+
+ for (auto &KV : SymbolFlags) {
+ assert(Symbols.count(KV.first) && "JITDylib does not cover this symbol?");
+ assert(Symbols.find(KV.first)->second.getState() !=
+ SymbolState::NeverSearched &&
+ Symbols.find(KV.first)->second.getState() != SymbolState::Ready &&
+ "getRequestedSymbols can only be called for symbols that have "
+ "started materializing");
+ auto I = MaterializingInfos.find(KV.first);
+ if (I == MaterializingInfos.end())
+ continue;
+
+ if (I->second.hasQueriesPending())
+ RequestedSymbols.insert(KV.first);
+ }
+
+ return RequestedSymbols;
+ });
+}
+
+Error JITDylib::resolve(MaterializationResponsibility &MR,
+ const SymbolMap &Resolved) {
+ AsynchronousSymbolQuerySet CompletedQueries;
+
+ if (auto Err = ES.runSessionLocked([&, this]() -> Error {
+ if (MR.RT->isDefunct())
+ return make_error<ResourceTrackerDefunct>(MR.RT);
+
+ if (State != Open)
+ return make_error<StringError>("JITDylib " + getName() +
+ " is defunct",
+ inconvertibleErrorCode());
+
+ struct WorklistEntry {
+ SymbolTable::iterator SymI;
+ ExecutorSymbolDef ResolvedSym;
+ };
+
+ SymbolNameSet SymbolsInErrorState;
+ std::vector<WorklistEntry> Worklist;
+ Worklist.reserve(Resolved.size());
+
+ // Build worklist and check for any symbols in the error state.
+ for (const auto &KV : Resolved) {
+
+ assert(!KV.second.getFlags().hasError() &&
+ "Resolution result can not have error flag set");
+
+ auto SymI = Symbols.find(KV.first);
+
+ assert(SymI != Symbols.end() && "Symbol not found");
+ assert(!SymI->second.hasMaterializerAttached() &&
+ "Resolving symbol with materializer attached?");
+ assert(SymI->second.getState() == SymbolState::Materializing &&
+ "Symbol should be materializing");
+ assert(SymI->second.getAddress() == ExecutorAddr() &&
+ "Symbol has already been resolved");
+
+ if (SymI->second.getFlags().hasError())
+ SymbolsInErrorState.insert(KV.first);
+ else {
+ auto Flags = KV.second.getFlags();
+ Flags &= ~JITSymbolFlags::Common;
+ assert(Flags ==
+ (SymI->second.getFlags() & ~JITSymbolFlags::Common) &&
+ "Resolved flags should match the declared flags");
+
+ Worklist.push_back({SymI, {KV.second.getAddress(), Flags}});
+ }
+ }
+
+ // If any symbols were in the error state then bail out.
+ if (!SymbolsInErrorState.empty()) {
+ auto FailedSymbolsDepMap = std::make_shared<SymbolDependenceMap>();
+ (*FailedSymbolsDepMap)[this] = std::move(SymbolsInErrorState);
+ return make_error<FailedToMaterialize>(
+ getExecutionSession().getSymbolStringPool(),
+ std::move(FailedSymbolsDepMap));
+ }
+
+ while (!Worklist.empty()) {
+ auto SymI = Worklist.back().SymI;
+ auto ResolvedSym = Worklist.back().ResolvedSym;
+ Worklist.pop_back();
+
+ auto &Name = SymI->first;
+
+ // Resolved symbols can not be weak: discard the weak flag.
+ JITSymbolFlags ResolvedFlags = ResolvedSym.getFlags();
+ SymI->second.setAddress(ResolvedSym.getAddress());
+ SymI->second.setFlags(ResolvedFlags);
+ SymI->second.setState(SymbolState::Resolved);
+
+ auto MII = MaterializingInfos.find(Name);
+ if (MII == MaterializingInfos.end())
+ continue;
+
+ auto &MI = MII->second;
+ for (auto &Q : MI.takeQueriesMeeting(SymbolState::Resolved)) {
+ Q->notifySymbolMetRequiredState(Name, ResolvedSym);
+ Q->removeQueryDependence(*this, Name);
+ if (Q->isComplete())
+ CompletedQueries.insert(std::move(Q));
+ }
+ }
+
+ return Error::success();
+ }))
+ return Err;
+
+ // Otherwise notify all the completed queries.
+ for (auto &Q : CompletedQueries) {
+ assert(Q->isComplete() && "Q not completed");
+ Q->handleComplete(ES);
+ }
+
+ return Error::success();
+}
+
+void JITDylib::unlinkMaterializationResponsibility(
+ MaterializationResponsibility &MR) {
+ ES.runSessionLocked([&]() {
+ auto I = TrackerMRs.find(MR.RT.get());
+ assert(I != TrackerMRs.end() && "No MRs in TrackerMRs list for RT");
+ assert(I->second.count(&MR) && "MR not in TrackerMRs list for RT");
+ I->second.erase(&MR);
+ if (I->second.empty())
+ TrackerMRs.erase(MR.RT.get());
+ });
+}
+
+void JITDylib::shrinkMaterializationInfoMemory() {
+ // DenseMap::erase never shrinks its storage; use clear to heuristically free
+ // memory since we may have long-lived JDs after linking is done.
+
+ if (UnmaterializedInfos.empty())
+ UnmaterializedInfos.clear();
+
+ if (MaterializingInfos.empty())
+ MaterializingInfos.clear();
+}
+
+void JITDylib::setLinkOrder(JITDylibSearchOrder NewLinkOrder,
+ bool LinkAgainstThisJITDylibFirst) {
+ ES.runSessionLocked([&]() {
+ assert(State == Open && "JD is defunct");
+ if (LinkAgainstThisJITDylibFirst) {
+ LinkOrder.clear();
+ if (NewLinkOrder.empty() || NewLinkOrder.front().first != this)
+ LinkOrder.push_back(
+ std::make_pair(this, JITDylibLookupFlags::MatchAllSymbols));
+ llvm::append_range(LinkOrder, NewLinkOrder);
+ } else
+ LinkOrder = std::move(NewLinkOrder);
+ });
+}
+
+void JITDylib::addToLinkOrder(const JITDylibSearchOrder &NewLinks) {
+ ES.runSessionLocked([&]() {
+ for (auto &KV : NewLinks) {
+ // Skip elements of NewLinks that are already in the link order.
+ if (llvm::is_contained(LinkOrder, KV))
+ continue;
+
+ LinkOrder.push_back(std::move(KV));
+ }
+ });
+}
+
+void JITDylib::addToLinkOrder(JITDylib &JD, JITDylibLookupFlags JDLookupFlags) {
+ ES.runSessionLocked([&]() { LinkOrder.push_back({&JD, JDLookupFlags}); });
+}
+
+void JITDylib::replaceInLinkOrder(JITDylib &OldJD, JITDylib &NewJD,
+ JITDylibLookupFlags JDLookupFlags) {
+ ES.runSessionLocked([&]() {
+ assert(State == Open && "JD is defunct");
+ for (auto &KV : LinkOrder)
+ if (KV.first == &OldJD) {
+ KV = {&NewJD, JDLookupFlags};
+ break;
+ }
+ });
+}
+
+void JITDylib::removeFromLinkOrder(JITDylib &JD) {
+ ES.runSessionLocked([&]() {
+ assert(State == Open && "JD is defunct");
+ auto I = llvm::find_if(LinkOrder,
+ [&](const JITDylibSearchOrder::value_type &KV) {
+ return KV.first == &JD;
+ });
+ if (I != LinkOrder.end())
+ LinkOrder.erase(I);
+ });
+}
+
+Error JITDylib::remove(const SymbolNameSet &Names) {
+ return ES.runSessionLocked([&]() -> Error {
+ assert(State == Open && "JD is defunct");
+ using SymbolMaterializerItrPair =
+ std::pair<SymbolTable::iterator, UnmaterializedInfosMap::iterator>;
+ std::vector<SymbolMaterializerItrPair> SymbolsToRemove;
+ SymbolNameSet Missing;
+ SymbolNameSet Materializing;
+
+ for (auto &Name : Names) {
+ auto I = Symbols.find(Name);
+
+ // Note symbol missing.
+ if (I == Symbols.end()) {
+ Missing.insert(Name);
+ continue;
+ }
+
+ // Note symbol materializing.
+ if (I->second.getState() != SymbolState::NeverSearched &&
+ I->second.getState() != SymbolState::Ready) {
+ Materializing.insert(Name);
+ continue;
+ }
+
+ auto UMII = I->second.hasMaterializerAttached()
+ ? UnmaterializedInfos.find(Name)
+ : UnmaterializedInfos.end();
+ SymbolsToRemove.push_back(std::make_pair(I, UMII));
+ }
+
+ // If any of the symbols are not defined, return an error.
+ if (!Missing.empty())
+ return make_error<SymbolsNotFound>(ES.getSymbolStringPool(),
+ std::move(Missing));
+
+ // If any of the symbols are currently materializing, return an error.
+ if (!Materializing.empty())
+ return make_error<SymbolsCouldNotBeRemoved>(ES.getSymbolStringPool(),
+ std::move(Materializing));
+
+ // Remove the symbols.
+ for (auto &SymbolMaterializerItrPair : SymbolsToRemove) {
+ auto UMII = SymbolMaterializerItrPair.second;
+
+ // If there is a materializer attached, call discard.
+ if (UMII != UnmaterializedInfos.end()) {
+ UMII->second->MU->doDiscard(*this, UMII->first);
+ UnmaterializedInfos.erase(UMII);
+ }
+
+ auto SymI = SymbolMaterializerItrPair.first;
+ Symbols.erase(SymI);
+ }
+
+ shrinkMaterializationInfoMemory();
+
+ return Error::success();
+ });
+}
+
+void JITDylib::dump(raw_ostream &OS) {
+ ES.runSessionLocked([&, this]() {
+ OS << "JITDylib \"" << getName() << "\" (ES: "
+ << format("0x%016" PRIx64, reinterpret_cast<uintptr_t>(&ES))
+ << ", State = ";
+ switch (State) {
+ case Open:
+ OS << "Open";
+ break;
+ case Closing:
+ OS << "Closing";
+ break;
+ case Closed:
+ OS << "Closed";
+ break;
+ }
+ OS << ")\n";
+ if (State == Closed)
+ return;
+ OS << "Link order: " << LinkOrder << "\n"
+ << "Symbol table:\n";
+
+ // Sort symbols so we get a deterministic order and can check them in tests.
+ std::vector<std::pair<SymbolStringPtr, SymbolTableEntry *>> SymbolsSorted;
+ for (auto &KV : Symbols)
+ SymbolsSorted.emplace_back(KV.first, &KV.second);
+ std::sort(SymbolsSorted.begin(), SymbolsSorted.end(),
+ [](const auto &L, const auto &R) { return *L.first < *R.first; });
+
+ for (auto &KV : SymbolsSorted) {
+ OS << " \"" << *KV.first << "\": ";
+ if (auto Addr = KV.second->getAddress())
+ OS << Addr;
+ else
+ OS << "<not resolved> ";
+
+ OS << " " << KV.second->getFlags() << " " << KV.second->getState();
+
+ if (KV.second->hasMaterializerAttached()) {
+ OS << " (Materializer ";
+ auto I = UnmaterializedInfos.find(KV.first);
+ assert(I != UnmaterializedInfos.end() &&
+ "Lazy symbol should have UnmaterializedInfo");
+ OS << I->second->MU.get() << ", " << I->second->MU->getName() << ")\n";
+ } else
+ OS << "\n";
+ }
+
+ if (!MaterializingInfos.empty())
+ OS << " MaterializingInfos entries:\n";
+ for (auto &KV : MaterializingInfos) {
+ OS << " \"" << *KV.first << "\":\n"
+ << " " << KV.second.pendingQueries().size()
+ << " pending queries: { ";
+ for (const auto &Q : KV.second.pendingQueries())
+ OS << Q.get() << " (" << Q->getRequiredState() << ") ";
+ OS << "}\n Defining EDU: ";
+ if (KV.second.DefiningEDU) {
+ OS << KV.second.DefiningEDU.get() << " { ";
+ for (auto &[Name, Flags] : KV.second.DefiningEDU->Symbols)
+ OS << Name << " ";
+ OS << "}\n";
+ OS << " Dependencies:\n";
+ if (!KV.second.DefiningEDU->Dependencies.empty()) {
+ for (auto &[DepJD, Deps] : KV.second.DefiningEDU->Dependencies) {
+ OS << " " << DepJD->getName() << ": [ ";
+ for (auto &Dep : Deps)
+ OS << Dep << " ";
+ OS << "]\n";
+ }
+ } else
+ OS << " none\n";
+ } else
+ OS << "none\n";
+ OS << " Dependant EDUs:\n";
+ if (!KV.second.DependantEDUs.empty()) {
+ for (auto &DependantEDU : KV.second.DependantEDUs) {
+ OS << " " << DependantEDU << ": "
+ << DependantEDU->JD->getName() << " { ";
+ for (auto &[Name, Flags] : DependantEDU->Symbols)
+ OS << Name << " ";
+ OS << "}\n";
+ }
+ } else
+ OS << " none\n";
+ assert((Symbols[KV.first].getState() != SymbolState::Ready ||
+ (KV.second.pendingQueries().empty() && !KV.second.DefiningEDU &&
+ !KV.second.DependantEDUs.empty())) &&
+ "Stale materializing info entry");
+ }
+ });
+}
+
+void JITDylib::MaterializingInfo::addQuery(
+ std::shared_ptr<AsynchronousSymbolQuery> Q) {
+
+ auto I = llvm::lower_bound(
+ llvm::reverse(PendingQueries), Q->getRequiredState(),
+ [](const std::shared_ptr<AsynchronousSymbolQuery> &V, SymbolState S) {
+ return V->getRequiredState() <= S;
+ });
+ PendingQueries.insert(I.base(), std::move(Q));
+}
+
+void JITDylib::MaterializingInfo::removeQuery(
+ const AsynchronousSymbolQuery &Q) {
+ // FIXME: Implement 'find_as' for shared_ptr<T>/T*.
+ auto I = llvm::find_if(
+ PendingQueries, [&Q](const std::shared_ptr<AsynchronousSymbolQuery> &V) {
+ return V.get() == &Q;
+ });
+ assert(I != PendingQueries.end() &&
+ "Query is not attached to this MaterializingInfo");
+ PendingQueries.erase(I);
+}
+
+JITDylib::AsynchronousSymbolQueryList
+JITDylib::MaterializingInfo::takeQueriesMeeting(SymbolState RequiredState) {
+ AsynchronousSymbolQueryList Result;
+ while (!PendingQueries.empty()) {
+ if (PendingQueries.back()->getRequiredState() > RequiredState)
+ break;
+
+ Result.push_back(std::move(PendingQueries.back()));
+ PendingQueries.pop_back();
+ }
+
+ return Result;
+}
+
+JITDylib::JITDylib(ExecutionSession &ES, std::string Name)
+ : JITLinkDylib(std::move(Name)), ES(ES) {
+ LinkOrder.push_back({this, JITDylibLookupFlags::MatchAllSymbols});
+}
+
+std::pair<JITDylib::AsynchronousSymbolQuerySet,
+ std::shared_ptr<SymbolDependenceMap>>
+JITDylib::IL_removeTracker(ResourceTracker &RT) {
+ // Note: Should be called under the session lock.
+ assert(State != Closed && "JD is defunct");
+
+ SymbolNameVector SymbolsToRemove;
+ SymbolNameVector SymbolsToFail;
+
+ if (&RT == DefaultTracker.get()) {
+ SymbolNameSet TrackedSymbols;
+ for (auto &KV : TrackerSymbols)
+ for (auto &Sym : KV.second)
+ TrackedSymbols.insert(Sym);
+
+ for (auto &KV : Symbols) {
+ auto &Sym = KV.first;
+ if (!TrackedSymbols.count(Sym))
+ SymbolsToRemove.push_back(Sym);
+ }
+
+ DefaultTracker.reset();
+ } else {
+ /// Check for a non-default tracker.
+ auto I = TrackerSymbols.find(&RT);
+ if (I != TrackerSymbols.end()) {
+ SymbolsToRemove = std::move(I->second);
+ TrackerSymbols.erase(I);
+ }
+ // ... if not found this tracker was already defunct. Nothing to do.
+ }
+
+ for (auto &Sym : SymbolsToRemove) {
+ assert(Symbols.count(Sym) && "Symbol not in symbol table");
+
+ // If there is a MaterializingInfo then collect any queries to fail.
+ auto MII = MaterializingInfos.find(Sym);
+ if (MII != MaterializingInfos.end())
+ SymbolsToFail.push_back(Sym);
+ }
+
+ auto Result = ES.IL_failSymbols(*this, std::move(SymbolsToFail));
+
+ // Removed symbols should be taken out of the table altogether.
+ for (auto &Sym : SymbolsToRemove) {
+ auto I = Symbols.find(Sym);
+ assert(I != Symbols.end() && "Symbol not present in table");
+
+ // Remove Materializer if present.
+ if (I->second.hasMaterializerAttached()) {
+ // FIXME: Should this discard the symbols?
+ UnmaterializedInfos.erase(Sym);
+ } else {
+ assert(!UnmaterializedInfos.count(Sym) &&
+ "Symbol has materializer attached");
+ }
+
+ Symbols.erase(I);
+ }
+
+ shrinkMaterializationInfoMemory();
+
+ return Result;
+}
+
+void JITDylib::transferTracker(ResourceTracker &DstRT, ResourceTracker &SrcRT) {
+ assert(State != Closed && "JD is defunct");
+ assert(&DstRT != &SrcRT && "No-op transfers shouldn't call transferTracker");
+ assert(&DstRT.getJITDylib() == this && "DstRT is not for this JITDylib");
+ assert(&SrcRT.getJITDylib() == this && "SrcRT is not for this JITDylib");
+
+ // Update trackers for any not-yet materialized units.
+ for (auto &KV : UnmaterializedInfos) {
+ if (KV.second->RT == &SrcRT)
+ KV.second->RT = &DstRT;
+ }
+
+ // Update trackers for any active materialization responsibilities.
+ {
+ auto I = TrackerMRs.find(&SrcRT);
+ if (I != TrackerMRs.end()) {
+ auto &SrcMRs = I->second;
+ auto &DstMRs = TrackerMRs[&DstRT];
+ for (auto *MR : SrcMRs)
+ MR->RT = &DstRT;
+ if (DstMRs.empty())
+ DstMRs = std::move(SrcMRs);
+ else
+ for (auto *MR : SrcMRs)
+ DstMRs.insert(MR);
+ // Erase SrcRT entry in TrackerMRs. Use &SrcRT key rather than iterator I
+ // for this, since I may have been invalidated by 'TrackerMRs[&DstRT]'.
+ TrackerMRs.erase(&SrcRT);
+ }
+ }
+
+ // If we're transfering to the default tracker we just need to delete the
+ // tracked symbols for the source tracker.
+ if (&DstRT == DefaultTracker.get()) {
+ TrackerSymbols.erase(&SrcRT);
+ return;
+ }
+
+ // If we're transferring from the default tracker we need to find all
+ // currently untracked symbols.
+ if (&SrcRT == DefaultTracker.get()) {
+ assert(!TrackerSymbols.count(&SrcRT) &&
+ "Default tracker should not appear in TrackerSymbols");
+
+ SymbolNameVector SymbolsToTrack;
+
+ SymbolNameSet CurrentlyTrackedSymbols;
+ for (auto &KV : TrackerSymbols)
+ for (auto &Sym : KV.second)
+ CurrentlyTrackedSymbols.insert(Sym);
+
+ for (auto &KV : Symbols) {
+ auto &Sym = KV.first;
+ if (!CurrentlyTrackedSymbols.count(Sym))
+ SymbolsToTrack.push_back(Sym);
+ }
+
+ TrackerSymbols[&DstRT] = std::move(SymbolsToTrack);
+ return;
+ }
+
+ auto &DstTrackedSymbols = TrackerSymbols[&DstRT];
+
+ // Finally if neither SrtRT or DstRT are the default tracker then
+ // just append DstRT's tracked symbols to SrtRT's.
+ auto SI = TrackerSymbols.find(&SrcRT);
+ if (SI == TrackerSymbols.end())
+ return;
+
+ DstTrackedSymbols.reserve(DstTrackedSymbols.size() + SI->second.size());
+ for (auto &Sym : SI->second)
+ DstTrackedSymbols.push_back(std::move(Sym));
+ TrackerSymbols.erase(SI);
+}
+
+Error JITDylib::defineImpl(MaterializationUnit &MU) {
+ LLVM_DEBUG({ dbgs() << " " << MU.getSymbols() << "\n"; });
+
+ SymbolNameSet Duplicates;
+ std::vector<SymbolStringPtr> ExistingDefsOverridden;
+ std::vector<SymbolStringPtr> MUDefsOverridden;
+
+ for (const auto &KV : MU.getSymbols()) {
+ auto I = Symbols.find(KV.first);
+
+ if (I != Symbols.end()) {
+ if (KV.second.isStrong()) {
+ if (I->second.getFlags().isStrong() ||
+ I->second.getState() > SymbolState::NeverSearched)
+ Duplicates.insert(KV.first);
+ else {
+ assert(I->second.getState() == SymbolState::NeverSearched &&
+ "Overridden existing def should be in the never-searched "
+ "state");
+ ExistingDefsOverridden.push_back(KV.first);
+ }
+ } else
+ MUDefsOverridden.push_back(KV.first);
+ }
+ }
+
+ // If there were any duplicate definitions then bail out.
+ if (!Duplicates.empty()) {
+ LLVM_DEBUG(
+ { dbgs() << " Error: Duplicate symbols " << Duplicates << "\n"; });
+ return make_error<DuplicateDefinition>(std::string(**Duplicates.begin()));
+ }
+
+ // Discard any overridden defs in this MU.
+ LLVM_DEBUG({
+ if (!MUDefsOverridden.empty())
+ dbgs() << " Defs in this MU overridden: " << MUDefsOverridden << "\n";
+ });
+ for (auto &S : MUDefsOverridden)
+ MU.doDiscard(*this, S);
+
+ // Discard existing overridden defs.
+ LLVM_DEBUG({
+ if (!ExistingDefsOverridden.empty())
+ dbgs() << " Existing defs overridden by this MU: " << MUDefsOverridden
+ << "\n";
+ });
+ for (auto &S : ExistingDefsOverridden) {
+
+ auto UMII = UnmaterializedInfos.find(S);
+ assert(UMII != UnmaterializedInfos.end() &&
+ "Overridden existing def should have an UnmaterializedInfo");
+ UMII->second->MU->doDiscard(*this, S);
+ }
+
+ // Finally, add the defs from this MU.
+ for (auto &KV : MU.getSymbols()) {
+ auto &SymEntry = Symbols[KV.first];
+ SymEntry.setFlags(KV.second);
+ SymEntry.setState(SymbolState::NeverSearched);
+ SymEntry.setMaterializerAttached(true);
+ }
+
+ return Error::success();
+}
+
+void JITDylib::installMaterializationUnit(
+ std::unique_ptr<MaterializationUnit> MU, ResourceTracker &RT) {
+
+ /// defineImpl succeeded.
+ if (&RT != DefaultTracker.get()) {
+ auto &TS = TrackerSymbols[&RT];
+ TS.reserve(TS.size() + MU->getSymbols().size());
+ for (auto &KV : MU->getSymbols())
+ TS.push_back(KV.first);
+ }
+
+ auto UMI = std::make_shared<UnmaterializedInfo>(std::move(MU), &RT);
+ for (auto &KV : UMI->MU->getSymbols())
+ UnmaterializedInfos[KV.first] = UMI;
+}
+
+void JITDylib::detachQueryHelper(AsynchronousSymbolQuery &Q,
+ const SymbolNameSet &QuerySymbols) {
+ for (auto &QuerySymbol : QuerySymbols) {
+ assert(MaterializingInfos.count(QuerySymbol) &&
+ "QuerySymbol does not have MaterializingInfo");
+ auto &MI = MaterializingInfos[QuerySymbol];
+ MI.removeQuery(Q);
+ }
+}
+
+Platform::~Platform() = default;
+
+Expected<DenseMap<JITDylib *, SymbolMap>> Platform::lookupInitSymbols(
+ ExecutionSession &ES,
+ const DenseMap<JITDylib *, SymbolLookupSet> &InitSyms) {
+
+ DenseMap<JITDylib *, SymbolMap> CompoundResult;
+ Error CompoundErr = Error::success();
+ std::mutex LookupMutex;
+ std::condition_variable CV;
+ uint64_t Count = InitSyms.size();
+
+ LLVM_DEBUG({
+ dbgs() << "Issuing init-symbol lookup:\n";
+ for (auto &KV : InitSyms)
+ dbgs() << " " << KV.first->getName() << ": " << KV.second << "\n";
+ });
+
+ for (auto &KV : InitSyms) {
+ auto *JD = KV.first;
+ auto Names = std::move(KV.second);
+ ES.lookup(
+ LookupKind::Static,
+ JITDylibSearchOrder({{JD, JITDylibLookupFlags::MatchAllSymbols}}),
+ std::move(Names), SymbolState::Ready,
+ [&, JD](Expected<SymbolMap> Result) {
+ {
+ std::lock_guard<std::mutex> Lock(LookupMutex);
+ --Count;
+ if (Result) {
+ assert(!CompoundResult.count(JD) &&
+ "Duplicate JITDylib in lookup?");
+ CompoundResult[JD] = std::move(*Result);
+ } else
+ CompoundErr =
+ joinErrors(std::move(CompoundErr), Result.takeError());
+ }
+ CV.notify_one();
+ },
+ NoDependenciesToRegister);
+ }
+
+ std::unique_lock<std::mutex> Lock(LookupMutex);
+ CV.wait(Lock, [&] { return Count == 0 || CompoundErr; });
+
+ if (CompoundErr)
+ return std::move(CompoundErr);
+
+ return std::move(CompoundResult);
+}
+
+void Platform::lookupInitSymbolsAsync(
+ unique_function<void(Error)> OnComplete, ExecutionSession &ES,
+ const DenseMap<JITDylib *, SymbolLookupSet> &InitSyms) {
+
+ class TriggerOnComplete {
+ public:
+ using OnCompleteFn = unique_function<void(Error)>;
+ TriggerOnComplete(OnCompleteFn OnComplete)
+ : OnComplete(std::move(OnComplete)) {}
+ ~TriggerOnComplete() { OnComplete(std::move(LookupResult)); }
+ void reportResult(Error Err) {
+ std::lock_guard<std::mutex> Lock(ResultMutex);
+ LookupResult = joinErrors(std::move(LookupResult), std::move(Err));
+ }
+
+ private:
+ std::mutex ResultMutex;
+ Error LookupResult{Error::success()};
+ OnCompleteFn OnComplete;
+ };
+
+ LLVM_DEBUG({
+ dbgs() << "Issuing init-symbol lookup:\n";
+ for (auto &KV : InitSyms)
+ dbgs() << " " << KV.first->getName() << ": " << KV.second << "\n";
+ });
+
+ auto TOC = std::make_shared<TriggerOnComplete>(std::move(OnComplete));
+
+ for (auto &KV : InitSyms) {
+ auto *JD = KV.first;
+ auto Names = std::move(KV.second);
+ ES.lookup(
+ LookupKind::Static,
+ JITDylibSearchOrder({{JD, JITDylibLookupFlags::MatchAllSymbols}}),
+ std::move(Names), SymbolState::Ready,
+ [TOC](Expected<SymbolMap> Result) {
+ TOC->reportResult(Result.takeError());
+ },
+ NoDependenciesToRegister);
+ }
+}
+
+void MaterializationTask::printDescription(raw_ostream &OS) {
+ OS << "Materialization task: " << MU->getName() << " in "
+ << MR->getTargetJITDylib().getName();
+}
+
+void MaterializationTask::run() { MU->materialize(std::move(MR)); }
+
+void LookupTask::printDescription(raw_ostream &OS) { OS << "Lookup task"; }
+
+void LookupTask::run() { LS.continueLookup(Error::success()); }
+
+ExecutionSession::ExecutionSession(std::unique_ptr<ExecutorProcessControl> EPC)
+ : EPC(std::move(EPC)) {
+ // Associated EPC and this.
+ this->EPC->ES = this;
+}
+
+ExecutionSession::~ExecutionSession() {
+ // You must call endSession prior to destroying the session.
+ assert(!SessionOpen &&
+ "Session still open. Did you forget to call endSession?");
+}
+
+Error ExecutionSession::endSession() {
+ LLVM_DEBUG(dbgs() << "Ending ExecutionSession " << this << "\n");
+
+ auto JDsToRemove = runSessionLocked([&] {
+
+#ifdef EXPENSIVE_CHECKS
+ verifySessionState("Entering ExecutionSession::endSession");
+#endif
+
+ SessionOpen = false;
+ return JDs;
+ });
+
+ std::reverse(JDsToRemove.begin(), JDsToRemove.end());
+
+ auto Err = removeJITDylibs(std::move(JDsToRemove));
+
+ Err = joinErrors(std::move(Err), EPC->disconnect());
+
+ return Err;
+}
+
+void ExecutionSession::registerResourceManager(ResourceManager &RM) {
+ runSessionLocked([&] { ResourceManagers.push_back(&RM); });
+}
+
+void ExecutionSession::deregisterResourceManager(ResourceManager &RM) {
+ runSessionLocked([&] {
+ assert(!ResourceManagers.empty() && "No managers registered");
+ if (ResourceManagers.back() == &RM)
+ ResourceManagers.pop_back();
+ else {
+ auto I = llvm::find(ResourceManagers, &RM);
+ assert(I != ResourceManagers.end() && "RM not registered");
+ ResourceManagers.erase(I);
+ }
+ });
+}
+
+JITDylib *ExecutionSession::getJITDylibByName(StringRef Name) {
+ return runSessionLocked([&, this]() -> JITDylib * {
+ for (auto &JD : JDs)
+ if (JD->getName() == Name)
+ return JD.get();
+ return nullptr;
+ });
+}
+
+JITDylib &ExecutionSession::createBareJITDylib(std::string Name) {
+ assert(!getJITDylibByName(Name) && "JITDylib with that name already exists");
+ return runSessionLocked([&, this]() -> JITDylib & {
+ assert(SessionOpen && "Cannot create JITDylib after session is closed");
+ JDs.push_back(new JITDylib(*this, std::move(Name)));
+ return *JDs.back();
+ });
+}
+
+Expected<JITDylib &> ExecutionSession::createJITDylib(std::string Name) {
+ auto &JD = createBareJITDylib(Name);
+ if (P)
+ if (auto Err = P->setupJITDylib(JD))
+ return std::move(Err);
+ return JD;
+}
+
+Error ExecutionSession::removeJITDylibs(std::vector<JITDylibSP> JDsToRemove) {
+ // Set JD to 'Closing' state and remove JD from the ExecutionSession.
+ runSessionLocked([&] {
+ for (auto &JD : JDsToRemove) {
+ assert(JD->State == JITDylib::Open && "JD already closed");
+ JD->State = JITDylib::Closing;
+ auto I = llvm::find(JDs, JD);
+ assert(I != JDs.end() && "JD does not appear in session JDs");
+ JDs.erase(I);
+ }
+ });
+
+ // Clear JITDylibs and notify the platform.
+ Error Err = Error::success();
+ for (auto JD : JDsToRemove) {
+ Err = joinErrors(std::move(Err), JD->clear());
+ if (P)
+ Err = joinErrors(std::move(Err), P->teardownJITDylib(*JD));
+ }
+
+ // Set JD to closed state. Clear remaining data structures.
+ runSessionLocked([&] {
+ for (auto &JD : JDsToRemove) {
+ assert(JD->State == JITDylib::Closing && "JD should be closing");
+ JD->State = JITDylib::Closed;
+ assert(JD->Symbols.empty() && "JD.Symbols is not empty after clear");
+ assert(JD->UnmaterializedInfos.empty() &&
+ "JD.UnmaterializedInfos is not empty after clear");
+ assert(JD->MaterializingInfos.empty() &&
+ "JD.MaterializingInfos is not empty after clear");
+ assert(JD->TrackerSymbols.empty() &&
+ "TrackerSymbols is not empty after clear");
+ JD->DefGenerators.clear();
+ JD->LinkOrder.clear();
+ }
+ });
+
+ return Err;
+}
+
+Expected<std::vector<JITDylibSP>>
+JITDylib::getDFSLinkOrder(ArrayRef<JITDylibSP> JDs) {
+ if (JDs.empty())
+ return std::vector<JITDylibSP>();
+
+ auto &ES = JDs.front()->getExecutionSession();
+ return ES.runSessionLocked([&]() -> Expected<std::vector<JITDylibSP>> {
+ DenseSet<JITDylib *> Visited;
+ std::vector<JITDylibSP> Result;
+
+ for (auto &JD : JDs) {
+
+ if (JD->State != Open)
+ return make_error<StringError>(
+ "Error building link order: " + JD->getName() + " is defunct",
+ inconvertibleErrorCode());
+ if (Visited.count(JD.get()))
+ continue;
+
+ SmallVector<JITDylibSP, 64> WorkStack;
+ WorkStack.push_back(JD);
+ Visited.insert(JD.get());
+
+ while (!WorkStack.empty()) {
+ Result.push_back(std::move(WorkStack.back()));
+ WorkStack.pop_back();
+
+ for (auto &KV : llvm::reverse(Result.back()->LinkOrder)) {
+ auto &JD = *KV.first;
+ if (!Visited.insert(&JD).second)
+ continue;
+ WorkStack.push_back(&JD);
+ }
+ }
+ }
+ return Result;
+ });
+}
+
+Expected<std::vector<JITDylibSP>>
+JITDylib::getReverseDFSLinkOrder(ArrayRef<JITDylibSP> JDs) {
+ auto Result = getDFSLinkOrder(JDs);
+ if (Result)
+ std::reverse(Result->begin(), Result->end());
+ return Result;
+}
+
+Expected<std::vector<JITDylibSP>> JITDylib::getDFSLinkOrder() {
+ return getDFSLinkOrder({this});
+}
+
+Expected<std::vector<JITDylibSP>> JITDylib::getReverseDFSLinkOrder() {
+ return getReverseDFSLinkOrder({this});
+}
+
+void ExecutionSession::lookupFlags(
+ LookupKind K, JITDylibSearchOrder SearchOrder, SymbolLookupSet LookupSet,
+ unique_function<void(Expected<SymbolFlagsMap>)> OnComplete) {
+
+ OL_applyQueryPhase1(std::make_unique<InProgressLookupFlagsState>(
+ K, std::move(SearchOrder), std::move(LookupSet),
+ std::move(OnComplete)),
+ Error::success());
+}
+
+Expected<SymbolFlagsMap>
+ExecutionSession::lookupFlags(LookupKind K, JITDylibSearchOrder SearchOrder,
+ SymbolLookupSet LookupSet) {
+
+ std::promise<MSVCPExpected<SymbolFlagsMap>> ResultP;
+ OL_applyQueryPhase1(std::make_unique<InProgressLookupFlagsState>(
+ K, std::move(SearchOrder), std::move(LookupSet),
+ [&ResultP](Expected<SymbolFlagsMap> Result) {
+ ResultP.set_value(std::move(Result));
+ }),
+ Error::success());
+
+ auto ResultF = ResultP.get_future();
+ return ResultF.get();
+}
+
+void ExecutionSession::lookup(
+ LookupKind K, const JITDylibSearchOrder &SearchOrder,
+ SymbolLookupSet Symbols, SymbolState RequiredState,
+ SymbolsResolvedCallback NotifyComplete,
+ RegisterDependenciesFunction RegisterDependencies) {
+
+ LLVM_DEBUG({
+ runSessionLocked([&]() {
+ dbgs() << "Looking up " << Symbols << " in " << SearchOrder
+ << " (required state: " << RequiredState << ")\n";
+ });
+ });
+
+ // lookup can be re-entered recursively if running on a single thread. Run any
+ // outstanding MUs in case this query depends on them, otherwise this lookup
+ // will starve waiting for a result from an MU that is stuck in the queue.
+ dispatchOutstandingMUs();
+
+ auto Unresolved = std::move(Symbols);
+ auto Q = std::make_shared<AsynchronousSymbolQuery>(Unresolved, RequiredState,
+ std::move(NotifyComplete));
+
+ auto IPLS = std::make_unique<InProgressFullLookupState>(
+ K, SearchOrder, std::move(Unresolved), RequiredState, std::move(Q),
+ std::move(RegisterDependencies));
+
+ OL_applyQueryPhase1(std::move(IPLS), Error::success());
+}
+
+Expected<SymbolMap>
+ExecutionSession::lookup(const JITDylibSearchOrder &SearchOrder,
+ SymbolLookupSet Symbols, LookupKind K,
+ SymbolState RequiredState,
+ RegisterDependenciesFunction RegisterDependencies) {
+#if LLVM_ENABLE_THREADS
+ // In the threaded case we use promises to return the results.
+ std::promise<SymbolMap> PromisedResult;
+ Error ResolutionError = Error::success();
+
+ auto NotifyComplete = [&](Expected<SymbolMap> R) {
+ if (R)
+ PromisedResult.set_value(std::move(*R));
+ else {
+ ErrorAsOutParameter _(&ResolutionError);
+ ResolutionError = R.takeError();
+ PromisedResult.set_value(SymbolMap());
+ }
+ };
+
+#else
+ SymbolMap Result;
+ Error ResolutionError = Error::success();
+
+ auto NotifyComplete = [&](Expected<SymbolMap> R) {
+ ErrorAsOutParameter _(&ResolutionError);
+ if (R)
+ Result = std::move(*R);
+ else
+ ResolutionError = R.takeError();
+ };
+#endif
+
+ // Perform the asynchronous lookup.
+ lookup(K, SearchOrder, std::move(Symbols), RequiredState, NotifyComplete,
+ RegisterDependencies);
+
+#if LLVM_ENABLE_THREADS
+ auto ResultFuture = PromisedResult.get_future();
+ auto Result = ResultFuture.get();
+
+ if (ResolutionError)
+ return std::move(ResolutionError);
+
+ return std::move(Result);
+
+#else
+ if (ResolutionError)
+ return std::move(ResolutionError);
+
+ return Result;
+#endif
+}
+
+Expected<ExecutorSymbolDef>
+ExecutionSession::lookup(const JITDylibSearchOrder &SearchOrder,
+ SymbolStringPtr Name, SymbolState RequiredState) {
+ SymbolLookupSet Names({Name});
+
+ if (auto ResultMap = lookup(SearchOrder, std::move(Names), LookupKind::Static,
+ RequiredState, NoDependenciesToRegister)) {
+ assert(ResultMap->size() == 1 && "Unexpected number of results");
+ assert(ResultMap->count(Name) && "Missing result for symbol");
+ return std::move(ResultMap->begin()->second);
+ } else
+ return ResultMap.takeError();
+}
+
+Expected<ExecutorSymbolDef>
+ExecutionSession::lookup(ArrayRef<JITDylib *> SearchOrder, SymbolStringPtr Name,
+ SymbolState RequiredState) {
+ return lookup(makeJITDylibSearchOrder(SearchOrder), Name, RequiredState);
+}
+
+Expected<ExecutorSymbolDef>
+ExecutionSession::lookup(ArrayRef<JITDylib *> SearchOrder, StringRef Name,
+ SymbolState RequiredState) {
+ return lookup(SearchOrder, intern(Name), RequiredState);
+}
+
+Error ExecutionSession::registerJITDispatchHandlers(
+ JITDylib &JD, JITDispatchHandlerAssociationMap WFs) {
+
+ auto TagAddrs = lookup({{&JD, JITDylibLookupFlags::MatchAllSymbols}},
+ SymbolLookupSet::fromMapKeys(
+ WFs, SymbolLookupFlags::WeaklyReferencedSymbol));
+ if (!TagAddrs)
+ return TagAddrs.takeError();
+
+ // Associate tag addresses with implementations.
+ std::lock_guard<std::mutex> Lock(JITDispatchHandlersMutex);
+ for (auto &KV : *TagAddrs) {
+ auto TagAddr = KV.second.getAddress();
+ if (JITDispatchHandlers.count(TagAddr))
+ return make_error<StringError>("Tag " + formatv("{0:x16}", TagAddr) +
+ " (for " + *KV.first +
+ ") already registered",
+ inconvertibleErrorCode());
+ auto I = WFs.find(KV.first);
+ assert(I != WFs.end() && I->second &&
+ "JITDispatchHandler implementation missing");
+ JITDispatchHandlers[KV.second.getAddress()] =
+ std::make_shared<JITDispatchHandlerFunction>(std::move(I->second));
+ LLVM_DEBUG({
+ dbgs() << "Associated function tag \"" << *KV.first << "\" ("
+ << formatv("{0:x}", KV.second.getAddress()) << ") with handler\n";
+ });
+ }
+ return Error::success();
+}
+
+void ExecutionSession::runJITDispatchHandler(SendResultFunction SendResult,
+ ExecutorAddr HandlerFnTagAddr,
+ ArrayRef<char> ArgBuffer) {
+
+ std::shared_ptr<JITDispatchHandlerFunction> F;
+ {
+ std::lock_guard<std::mutex> Lock(JITDispatchHandlersMutex);
+ auto I = JITDispatchHandlers.find(HandlerFnTagAddr);
+ if (I != JITDispatchHandlers.end())
+ F = I->second;
+ }
+
+ if (F)
+ (*F)(std::move(SendResult), ArgBuffer.data(), ArgBuffer.size());
+ else
+ SendResult(shared::WrapperFunctionResult::createOutOfBandError(
+ ("No function registered for tag " +
+ formatv("{0:x16}", HandlerFnTagAddr))
+ .str()));
+}
+
+void ExecutionSession::dump(raw_ostream &OS) {
+ runSessionLocked([this, &OS]() {
+ for (auto &JD : JDs)
+ JD->dump(OS);
+ });
+}
+
+#ifdef EXPENSIVE_CHECKS
+bool ExecutionSession::verifySessionState(Twine Phase) {
+ return runSessionLocked([&]() {
+ bool AllOk = true;
+
+ // We'll collect these and verify them later to avoid redundant checks.
+ DenseSet<JITDylib::EmissionDepUnit *> EDUsToCheck;
+
+ for (auto &JD : JDs) {
+
+ auto LogFailure = [&]() -> raw_fd_ostream & {
+ auto &Stream = errs();
+ if (AllOk)
+ Stream << "ERROR: Bad ExecutionSession state detected " << Phase
+ << "\n";
+ Stream << " In JITDylib " << JD->getName() << ", ";
+ AllOk = false;
+ return Stream;
+ };
+
+ if (JD->State != JITDylib::Open) {
+ LogFailure()
+ << "state is not Open, but JD is in ExecutionSession list.";
+ }
+
+ // Check symbol table.
+ // 1. If the entry state isn't resolved then check that no address has
+ // been set.
+ // 2. Check that if the hasMaterializerAttached flag is set then there is
+ // an UnmaterializedInfo entry, and vice-versa.
+ for (auto &[Sym, Entry] : JD->Symbols) {
+ // Check that unresolved symbols have null addresses.
+ if (Entry.getState() < SymbolState::Resolved) {
+ if (Entry.getAddress()) {
+ LogFailure() << "symbol " << Sym << " has state "
+ << Entry.getState()
+ << " (not-yet-resolved) but non-null address "
+ << Entry.getAddress() << ".\n";
+ }
+ }
+
+ // Check that the hasMaterializerAttached flag is correct.
+ auto UMIItr = JD->UnmaterializedInfos.find(Sym);
+ if (Entry.hasMaterializerAttached()) {
+ if (UMIItr == JD->UnmaterializedInfos.end()) {
+ LogFailure() << "symbol " << Sym
+ << " entry claims materializer attached, but "
+ "UnmaterializedInfos has no corresponding entry.\n";
+ }
+ } else if (UMIItr != JD->UnmaterializedInfos.end()) {
+ LogFailure()
+ << "symbol " << Sym
+ << " entry claims no materializer attached, but "
+ "UnmaterializedInfos has an unexpected entry for it.\n";
+ }
+ }
+
+ // Check that every UnmaterializedInfo entry has a corresponding entry
+ // in the Symbols table.
+ for (auto &[Sym, UMI] : JD->UnmaterializedInfos) {
+ auto SymItr = JD->Symbols.find(Sym);
+ if (SymItr == JD->Symbols.end()) {
+ LogFailure()
+ << "symbol " << Sym
+ << " has UnmaterializedInfos entry, but no Symbols entry.\n";
+ }
+ }
+
+ // Check consistency of the MaterializingInfos table.
+ for (auto &[Sym, MII] : JD->MaterializingInfos) {
+
+ auto SymItr = JD->Symbols.find(Sym);
+ if (SymItr == JD->Symbols.end()) {
+ // If there's no Symbols entry for this MaterializingInfos entry then
+ // report that.
+ LogFailure()
+ << "symbol " << Sym
+ << " has MaterializingInfos entry, but no Symbols entry.\n";
+ } else {
+ // Otherwise check consistency between Symbols and MaterializingInfos.
+
+ // Ready symbols should not have MaterializingInfos.
+ if (SymItr->second.getState() == SymbolState::Ready) {
+ LogFailure()
+ << "symbol " << Sym
+ << " is in Ready state, should not have MaterializingInfo.\n";
+ }
+
+ // Pending queries should be for subsequent states.
+ auto CurState = static_cast<SymbolState>(
+ static_cast<std::underlying_type_t<SymbolState>>(
+ SymItr->second.getState()) + 1);
+ for (auto &Q : MII.PendingQueries) {
+ if (Q->getRequiredState() != CurState) {
+ if (Q->getRequiredState() > CurState)
+ CurState = Q->getRequiredState();
+ else
+ LogFailure() << "symbol " << Sym
+ << " has stale or misordered queries.\n";
+ }
+ }
+
+ // If there's a DefiningEDU then check that...
+ // 1. The JD matches.
+ // 2. The symbol is in the EDU's Symbols map.
+ // 3. The symbol table entry is in the Emitted state.
+ if (MII.DefiningEDU) {
+
+ EDUsToCheck.insert(MII.DefiningEDU.get());
+
+ if (MII.DefiningEDU->JD != JD.get()) {
+ LogFailure() << "symbol " << Sym
+ << " has DefiningEDU with incorrect JD"
+ << (llvm::is_contained(JDs, MII.DefiningEDU->JD)
+ ? " (JD not currently in ExecutionSession"
+ : "")
+ << "\n";
+ }
+
+ if (SymItr->second.getState() != SymbolState::Emitted) {
+ LogFailure()
+ << "symbol " << Sym
+ << " has DefiningEDU, but is not in Emitted state.\n";
+ }
+ }
+
+ // Check that JDs for any DependantEDUs are also in the session --
+ // that guarantees that we'll also visit them during this loop.
+ for (auto &DepEDU : MII.DependantEDUs) {
+ if (!llvm::is_contained(JDs, DepEDU->JD)) {
+ LogFailure() << "symbol " << Sym << " has DependantEDU "
+ << (void *)DepEDU << " with JD (" << DepEDU->JD
+ << ") that isn't in ExecutionSession.\n";
+ }
+ }
+ }
+ }
+ }
+
+ // Check EDUs.
+ for (auto *EDU : EDUsToCheck) {
+ assert(EDU->JD->State == JITDylib::Open && "EDU->JD is not Open");
+
+ auto LogFailure = [&]() -> raw_fd_ostream & {
+ AllOk = false;
+ auto &Stream = errs();
+ Stream << "In EDU defining " << EDU->JD->getName() << ": { ";
+ for (auto &[Sym, Flags] : EDU->Symbols)
+ Stream << Sym << " ";
+ Stream << "}, ";
+ return Stream;
+ };
+
+ if (EDU->Symbols.empty())
+ LogFailure() << "no symbols defined.\n";
+ else {
+ for (auto &[Sym, Flags] : EDU->Symbols) {
+ if (!Sym)
+ LogFailure() << "null symbol defined.\n";
+ else {
+ if (!EDU->JD->Symbols.count(SymbolStringPtr(Sym))) {
+ LogFailure() << "symbol " << Sym
+ << " isn't present in JD's symbol table.\n";
+ }
+ }
+ }
+ }
+
+ for (auto &[DepJD, Symbols] : EDU->Dependencies) {
+ if (!llvm::is_contained(JDs, DepJD)) {
+ LogFailure() << "dependant symbols listed for JD that isn't in "
+ "ExecutionSession.\n";
+ } else {
+ for (auto &DepSym : Symbols) {
+ if (!DepJD->Symbols.count(SymbolStringPtr(DepSym))) {
+ LogFailure()
+ << "dependant symbol " << DepSym
+ << " does not appear in symbol table for dependant JD "
+ << DepJD->getName() << ".\n";
+ }
+ }
+ }
+ }
+ }
+
+ return AllOk;
+ });
+}
+#endif // EXPENSIVE_CHECKS
+
+void ExecutionSession::dispatchOutstandingMUs() {
+ LLVM_DEBUG(dbgs() << "Dispatching MaterializationUnits...\n");
+ while (true) {
+ std::optional<std::pair<std::unique_ptr<MaterializationUnit>,
+ std::unique_ptr<MaterializationResponsibility>>>
+ JMU;
+
+ {
+ std::lock_guard<std::recursive_mutex> Lock(OutstandingMUsMutex);
+ if (!OutstandingMUs.empty()) {
+ JMU.emplace(std::move(OutstandingMUs.back()));
+ OutstandingMUs.pop_back();
+ }
+ }
+
+ if (!JMU)
+ break;
+
+ assert(JMU->first && "No MU?");
+ LLVM_DEBUG(dbgs() << " Dispatching \"" << JMU->first->getName() << "\"\n");
+ dispatchTask(std::make_unique<MaterializationTask>(std::move(JMU->first),
+ std::move(JMU->second)));
+ }
+ LLVM_DEBUG(dbgs() << "Done dispatching MaterializationUnits.\n");
+}
+
+Error ExecutionSession::removeResourceTracker(ResourceTracker &RT) {
+ LLVM_DEBUG({
+ dbgs() << "In " << RT.getJITDylib().getName() << " removing tracker "
+ << formatv("{0:x}", RT.getKeyUnsafe()) << "\n";
+ });
+ std::vector<ResourceManager *> CurrentResourceManagers;
+
+ JITDylib::AsynchronousSymbolQuerySet QueriesToFail;
+ std::shared_ptr<SymbolDependenceMap> FailedSymbols;
+
+ runSessionLocked([&] {
+ CurrentResourceManagers = ResourceManagers;
+ RT.makeDefunct();
+ std::tie(QueriesToFail, FailedSymbols) =
+ RT.getJITDylib().IL_removeTracker(RT);
+ });
+
+ Error Err = Error::success();
+
+ auto &JD = RT.getJITDylib();
+ for (auto *L : reverse(CurrentResourceManagers))
+ Err = joinErrors(std::move(Err),
+ L->handleRemoveResources(JD, RT.getKeyUnsafe()));
+
+ for (auto &Q : QueriesToFail)
+ Q->handleFailed(
+ make_error<FailedToMaterialize>(getSymbolStringPool(), FailedSymbols));
+
+ return Err;
+}
+
+void ExecutionSession::transferResourceTracker(ResourceTracker &DstRT,
+ ResourceTracker &SrcRT) {
+ LLVM_DEBUG({
+ dbgs() << "In " << SrcRT.getJITDylib().getName()
+ << " transfering resources from tracker "
+ << formatv("{0:x}", SrcRT.getKeyUnsafe()) << " to tracker "
+ << formatv("{0:x}", DstRT.getKeyUnsafe()) << "\n";
+ });
+
+ // No-op transfers are allowed and do not invalidate the source.
+ if (&DstRT == &SrcRT)
+ return;
+
+ assert(&DstRT.getJITDylib() == &SrcRT.getJITDylib() &&
+ "Can't transfer resources between JITDylibs");
+ runSessionLocked([&]() {
+ SrcRT.makeDefunct();
+ auto &JD = DstRT.getJITDylib();
+ JD.transferTracker(DstRT, SrcRT);
+ for (auto *L : reverse(ResourceManagers))
+ L->handleTransferResources(JD, DstRT.getKeyUnsafe(),
+ SrcRT.getKeyUnsafe());
+ });
+}
+
+void ExecutionSession::destroyResourceTracker(ResourceTracker &RT) {
+ runSessionLocked([&]() {
+ LLVM_DEBUG({
+ dbgs() << "In " << RT.getJITDylib().getName() << " destroying tracker "
+ << formatv("{0:x}", RT.getKeyUnsafe()) << "\n";
+ });
+ if (!RT.isDefunct())
+ transferResourceTracker(*RT.getJITDylib().getDefaultResourceTracker(),
+ RT);
+ });
+}
+
+Error ExecutionSession::IL_updateCandidatesFor(
+ JITDylib &JD, JITDylibLookupFlags JDLookupFlags,
+ SymbolLookupSet &Candidates, SymbolLookupSet *NonCandidates) {
+ return Candidates.forEachWithRemoval(
+ [&](const SymbolStringPtr &Name,
+ SymbolLookupFlags SymLookupFlags) -> Expected<bool> {
+ /// Search for the symbol. If not found then continue without
+ /// removal.
+ auto SymI = JD.Symbols.find(Name);
+ if (SymI == JD.Symbols.end())
+ return false;
+
+ // If this is a non-exported symbol and we're matching exported
+ // symbols only then remove this symbol from the candidates list.
+ //
+ // If we're tracking non-candidates then add this to the non-candidate
+ // list.
+ if (!SymI->second.getFlags().isExported() &&
+ JDLookupFlags == JITDylibLookupFlags::MatchExportedSymbolsOnly) {
+ if (NonCandidates)
+ NonCandidates->add(Name, SymLookupFlags);
+ return true;
+ }
+
+ // If we match against a materialization-side-effects only symbol
+ // then make sure it is weakly-referenced. Otherwise bail out with
+ // an error.
+ // FIXME: Use a "materialization-side-effects-only symbols must be
+ // weakly referenced" specific error here to reduce confusion.
+ if (SymI->second.getFlags().hasMaterializationSideEffectsOnly() &&
+ SymLookupFlags != SymbolLookupFlags::WeaklyReferencedSymbol)
+ return make_error<SymbolsNotFound>(getSymbolStringPool(),
+ SymbolNameVector({Name}));
+
+ // If we matched against this symbol but it is in the error state
+ // then bail out and treat it as a failure to materialize.
+ if (SymI->second.getFlags().hasError()) {
+ auto FailedSymbolsMap = std::make_shared<SymbolDependenceMap>();
+ (*FailedSymbolsMap)[&JD] = {Name};
+ return make_error<FailedToMaterialize>(getSymbolStringPool(),
+ std::move(FailedSymbolsMap));
+ }
+
+ // Otherwise this is a match. Remove it from the candidate set.
+ return true;
+ });
+}
+
+void ExecutionSession::OL_resumeLookupAfterGeneration(
+ InProgressLookupState &IPLS) {
+
+ assert(IPLS.GenState != InProgressLookupState::NotInGenerator &&
+ "Should not be called for not-in-generator lookups");
+ IPLS.GenState = InProgressLookupState::NotInGenerator;
+
+ LookupState LS;
+
+ if (auto DG = IPLS.CurDefGeneratorStack.back().lock()) {
+ IPLS.CurDefGeneratorStack.pop_back();
+ std::lock_guard<std::mutex> Lock(DG->M);
+
+ // If there are no pending lookups then mark the generator as free and
+ // return.
+ if (DG->PendingLookups.empty()) {
+ DG->InUse = false;
+ return;
+ }
+
+ // Otherwise resume the next lookup.
+ LS = std::move(DG->PendingLookups.front());
+ DG->PendingLookups.pop_front();
+ }
+
+ if (LS.IPLS) {
+ LS.IPLS->GenState = InProgressLookupState::ResumedForGenerator;
+ dispatchTask(std::make_unique<LookupTask>(std::move(LS)));
+ }
+}
+
+void ExecutionSession::OL_applyQueryPhase1(
+ std::unique_ptr<InProgressLookupState> IPLS, Error Err) {
+
+ LLVM_DEBUG({
+ dbgs() << "Entering OL_applyQueryPhase1:\n"
+ << " Lookup kind: " << IPLS->K << "\n"
+ << " Search order: " << IPLS->SearchOrder
+ << ", Current index = " << IPLS->CurSearchOrderIndex
+ << (IPLS->NewJITDylib ? " (entering new JITDylib)" : "") << "\n"
+ << " Lookup set: " << IPLS->LookupSet << "\n"
+ << " Definition generator candidates: "
+ << IPLS->DefGeneratorCandidates << "\n"
+ << " Definition generator non-candidates: "
+ << IPLS->DefGeneratorNonCandidates << "\n";
+ });
+
+ if (IPLS->GenState == InProgressLookupState::InGenerator)
+ OL_resumeLookupAfterGeneration(*IPLS);
+
+ assert(IPLS->GenState != InProgressLookupState::InGenerator &&
+ "Lookup should not be in InGenerator state here");
+
+ // FIXME: We should attach the query as we go: This provides a result in a
+ // single pass in the common case where all symbols have already reached the
+ // required state. The query could be detached again in the 'fail' method on
+ // IPLS. Phase 2 would be reduced to collecting and dispatching the MUs.
+
+ while (IPLS->CurSearchOrderIndex != IPLS->SearchOrder.size()) {
+
+ // If we've been handed an error or received one back from a generator then
+ // fail the query. We don't need to unlink: At this stage the query hasn't
+ // actually been lodged.
+ if (Err)
+ return IPLS->fail(std::move(Err));
+
+ // Get the next JITDylib and lookup flags.
+ auto &KV = IPLS->SearchOrder[IPLS->CurSearchOrderIndex];
+ auto &JD = *KV.first;
+ auto JDLookupFlags = KV.second;
+
+ LLVM_DEBUG({
+ dbgs() << "Visiting \"" << JD.getName() << "\" (" << JDLookupFlags
+ << ") with lookup set " << IPLS->LookupSet << ":\n";
+ });
+
+ // If we've just reached a new JITDylib then perform some setup.
+ if (IPLS->NewJITDylib) {
+ // Add any non-candidates from the last JITDylib (if any) back on to the
+ // list of definition candidates for this JITDylib, reset definition
+ // non-candidates to the empty set.
+ SymbolLookupSet Tmp;
+ std::swap(IPLS->DefGeneratorNonCandidates, Tmp);
+ IPLS->DefGeneratorCandidates.append(std::move(Tmp));
+
+ LLVM_DEBUG({
+ dbgs() << " First time visiting " << JD.getName()
+ << ", resetting candidate sets and building generator stack\n";
+ });
+
+ // Build the definition generator stack for this JITDylib.
+ runSessionLocked([&] {
+ IPLS->CurDefGeneratorStack.reserve(JD.DefGenerators.size());
+ for (auto &DG : reverse(JD.DefGenerators))
+ IPLS->CurDefGeneratorStack.push_back(DG);
+ });
+
+ // Flag that we've done our initialization.
+ IPLS->NewJITDylib = false;
+ }
+
+ // Remove any generation candidates that are already defined (and match) in
+ // this JITDylib.
+ runSessionLocked([&] {
+ // Update the list of candidates (and non-candidates) for definition
+ // generation.
+ LLVM_DEBUG(dbgs() << " Updating candidate set...\n");
+ Err = IL_updateCandidatesFor(
+ JD, JDLookupFlags, IPLS->DefGeneratorCandidates,
+ JD.DefGenerators.empty() ? nullptr
+ : &IPLS->DefGeneratorNonCandidates);
+ LLVM_DEBUG({
+ dbgs() << " Remaining candidates = " << IPLS->DefGeneratorCandidates
+ << "\n";
+ });
+
+ // If this lookup was resumed after auto-suspension but all candidates
+ // have already been generated (by some previous call to the generator)
+ // treat the lookup as if it had completed generation.
+ if (IPLS->GenState == InProgressLookupState::ResumedForGenerator &&
+ IPLS->DefGeneratorCandidates.empty())
+ OL_resumeLookupAfterGeneration(*IPLS);
+ });
+
+ // If we encountered an error while filtering generation candidates then
+ // bail out.
+ if (Err)
+ return IPLS->fail(std::move(Err));
+
+ /// Apply any definition generators on the stack.
+ LLVM_DEBUG({
+ if (IPLS->CurDefGeneratorStack.empty())
+ LLVM_DEBUG(dbgs() << " No generators to run for this JITDylib.\n");
+ else if (IPLS->DefGeneratorCandidates.empty())
+ LLVM_DEBUG(dbgs() << " No candidates to generate.\n");
+ else
+ dbgs() << " Running " << IPLS->CurDefGeneratorStack.size()
+ << " remaining generators for "
+ << IPLS->DefGeneratorCandidates.size() << " candidates\n";
+ });
+ while (!IPLS->CurDefGeneratorStack.empty() &&
+ !IPLS->DefGeneratorCandidates.empty()) {
+ auto DG = IPLS->CurDefGeneratorStack.back().lock();
+
+ if (!DG)
+ return IPLS->fail(make_error<StringError>(
+ "DefinitionGenerator removed while lookup in progress",
+ inconvertibleErrorCode()));
+
+ // At this point the lookup is in either the NotInGenerator state, or in
+ // the ResumedForGenerator state.
+ // If this lookup is in the NotInGenerator state then check whether the
+ // generator is in use. If the generator is not in use then move the
+ // lookup to the InGenerator state and continue. If the generator is
+ // already in use then just add this lookup to the pending lookups list
+ // and bail out.
+ // If this lookup is in the ResumedForGenerator state then just move it
+ // to InGenerator and continue.
+ if (IPLS->GenState == InProgressLookupState::NotInGenerator) {
+ std::lock_guard<std::mutex> Lock(DG->M);
+ if (DG->InUse) {
+ DG->PendingLookups.push_back(std::move(IPLS));
+ return;
+ }
+ DG->InUse = true;
+ }
+
+ IPLS->GenState = InProgressLookupState::InGenerator;
+
+ auto K = IPLS->K;
+ auto &LookupSet = IPLS->DefGeneratorCandidates;
+
+ // Run the generator. If the generator takes ownership of QA then this
+ // will break the loop.
+ {
+ LLVM_DEBUG(dbgs() << " Attempting to generate " << LookupSet << "\n");
+ LookupState LS(std::move(IPLS));
+ Err = DG->tryToGenerate(LS, K, JD, JDLookupFlags, LookupSet);
+ IPLS = std::move(LS.IPLS);
+ }
+
+ // If the lookup returned then pop the generator stack and unblock the
+ // next lookup on this generator (if any).
+ if (IPLS)
+ OL_resumeLookupAfterGeneration(*IPLS);
+
+ // If there was an error then fail the query.
+ if (Err) {
+ LLVM_DEBUG({
+ dbgs() << " Error attempting to generate " << LookupSet << "\n";
+ });
+ assert(IPLS && "LS cannot be retained if error is returned");
+ return IPLS->fail(std::move(Err));
+ }
+
+ // Otherwise if QA was captured then break the loop.
+ if (!IPLS) {
+ LLVM_DEBUG(
+ { dbgs() << " LookupState captured. Exiting phase1 for now.\n"; });
+ return;
+ }
+
+ // Otherwise if we're continuing around the loop then update candidates
+ // for the next round.
+ runSessionLocked([&] {
+ LLVM_DEBUG(dbgs() << " Updating candidate set post-generation\n");
+ Err = IL_updateCandidatesFor(
+ JD, JDLookupFlags, IPLS->DefGeneratorCandidates,
+ JD.DefGenerators.empty() ? nullptr
+ : &IPLS->DefGeneratorNonCandidates);
+ });
+
+ // If updating candidates failed then fail the query.
+ if (Err) {
+ LLVM_DEBUG(dbgs() << " Error encountered while updating candidates\n");
+ return IPLS->fail(std::move(Err));
+ }
+ }
+
+ if (IPLS->DefGeneratorCandidates.empty() &&
+ IPLS->DefGeneratorNonCandidates.empty()) {
+ // Early out if there are no remaining symbols.
+ LLVM_DEBUG(dbgs() << "All symbols matched.\n");
+ IPLS->CurSearchOrderIndex = IPLS->SearchOrder.size();
+ break;
+ } else {
+ // If we get here then we've moved on to the next JITDylib with candidates
+ // remaining.
+ LLVM_DEBUG(dbgs() << "Phase 1 moving to next JITDylib.\n");
+ ++IPLS->CurSearchOrderIndex;
+ IPLS->NewJITDylib = true;
+ }
+ }
+
+ // Remove any weakly referenced candidates that could not be found/generated.
+ IPLS->DefGeneratorCandidates.remove_if(
+ [](const SymbolStringPtr &Name, SymbolLookupFlags SymLookupFlags) {
+ return SymLookupFlags == SymbolLookupFlags::WeaklyReferencedSymbol;
+ });
+
+ // If we get here then we've finished searching all JITDylibs.
+ // If we matched all symbols then move to phase 2, otherwise fail the query
+ // with a SymbolsNotFound error.
+ if (IPLS->DefGeneratorCandidates.empty()) {
+ LLVM_DEBUG(dbgs() << "Phase 1 succeeded.\n");
+ IPLS->complete(std::move(IPLS));
+ } else {
+ LLVM_DEBUG(dbgs() << "Phase 1 failed with unresolved symbols.\n");
+ IPLS->fail(make_error<SymbolsNotFound>(
+ getSymbolStringPool(), IPLS->DefGeneratorCandidates.getSymbolNames()));
+ }
+}
+
+void ExecutionSession::OL_completeLookup(
+ std::unique_ptr<InProgressLookupState> IPLS,
+ std::shared_ptr<AsynchronousSymbolQuery> Q,
+ RegisterDependenciesFunction RegisterDependencies) {
+
+ LLVM_DEBUG({
+ dbgs() << "Entering OL_completeLookup:\n"
+ << " Lookup kind: " << IPLS->K << "\n"
+ << " Search order: " << IPLS->SearchOrder
+ << ", Current index = " << IPLS->CurSearchOrderIndex
+ << (IPLS->NewJITDylib ? " (entering new JITDylib)" : "") << "\n"
+ << " Lookup set: " << IPLS->LookupSet << "\n"
+ << " Definition generator candidates: "
+ << IPLS->DefGeneratorCandidates << "\n"
+ << " Definition generator non-candidates: "
+ << IPLS->DefGeneratorNonCandidates << "\n";
+ });
+
+ bool QueryComplete = false;
+ DenseMap<JITDylib *, JITDylib::UnmaterializedInfosList> CollectedUMIs;
+
+ auto LodgingErr = runSessionLocked([&]() -> Error {
+ for (auto &KV : IPLS->SearchOrder) {
+ auto &JD = *KV.first;
+ auto JDLookupFlags = KV.second;
+ LLVM_DEBUG({
+ dbgs() << "Visiting \"" << JD.getName() << "\" (" << JDLookupFlags
+ << ") with lookup set " << IPLS->LookupSet << ":\n";
+ });
+
+ auto Err = IPLS->LookupSet.forEachWithRemoval(
+ [&](const SymbolStringPtr &Name,
+ SymbolLookupFlags SymLookupFlags) -> Expected<bool> {
+ LLVM_DEBUG({
+ dbgs() << " Attempting to match \"" << Name << "\" ("
+ << SymLookupFlags << ")... ";
+ });
+
+ /// Search for the symbol. If not found then continue without
+ /// removal.
+ auto SymI = JD.Symbols.find(Name);
+ if (SymI == JD.Symbols.end()) {
+ LLVM_DEBUG(dbgs() << "skipping: not present\n");
+ return false;
+ }
+
+ // If this is a non-exported symbol and we're matching exported
+ // symbols only then skip this symbol without removal.
+ if (!SymI->second.getFlags().isExported() &&
+ JDLookupFlags ==
+ JITDylibLookupFlags::MatchExportedSymbolsOnly) {
+ LLVM_DEBUG(dbgs() << "skipping: not exported\n");
+ return false;
+ }
+
+ // If we match against a materialization-side-effects only symbol
+ // then make sure it is weakly-referenced. Otherwise bail out with
+ // an error.
+ // FIXME: Use a "materialization-side-effects-only symbols must be
+ // weakly referenced" specific error here to reduce confusion.
+ if (SymI->second.getFlags().hasMaterializationSideEffectsOnly() &&
+ SymLookupFlags != SymbolLookupFlags::WeaklyReferencedSymbol) {
+ LLVM_DEBUG({
+ dbgs() << "error: "
+ "required, but symbol is has-side-effects-only\n";
+ });
+ return make_error<SymbolsNotFound>(getSymbolStringPool(),
+ SymbolNameVector({Name}));
+ }
+
+ // If we matched against this symbol but it is in the error state
+ // then bail out and treat it as a failure to materialize.
+ if (SymI->second.getFlags().hasError()) {
+ LLVM_DEBUG(dbgs() << "error: symbol is in error state\n");
+ auto FailedSymbolsMap = std::make_shared<SymbolDependenceMap>();
+ (*FailedSymbolsMap)[&JD] = {Name};
+ return make_error<FailedToMaterialize>(
+ getSymbolStringPool(), std::move(FailedSymbolsMap));
+ }
+
+ // Otherwise this is a match.
+
+ // If this symbol is already in the required state then notify the
+ // query, remove the symbol and continue.
+ if (SymI->second.getState() >= Q->getRequiredState()) {
+ LLVM_DEBUG(dbgs()
+ << "matched, symbol already in required state\n");
+ Q->notifySymbolMetRequiredState(Name, SymI->second.getSymbol());
+ return true;
+ }
+
+ // Otherwise this symbol does not yet meet the required state. Check
+ // whether it has a materializer attached, and if so prepare to run
+ // it.
+ if (SymI->second.hasMaterializerAttached()) {
+ assert(SymI->second.getAddress() == ExecutorAddr() &&
+ "Symbol not resolved but already has address?");
+ auto UMII = JD.UnmaterializedInfos.find(Name);
+ assert(UMII != JD.UnmaterializedInfos.end() &&
+ "Lazy symbol should have UnmaterializedInfo");
+
+ auto UMI = UMII->second;
+ assert(UMI->MU && "Materializer should not be null");
+ assert(UMI->RT && "Tracker should not be null");
+ LLVM_DEBUG({
+ dbgs() << "matched, preparing to dispatch MU@" << UMI->MU.get()
+ << " (" << UMI->MU->getName() << ")\n";
+ });
+
+ // Move all symbols associated with this MaterializationUnit into
+ // materializing state.
+ for (auto &KV : UMI->MU->getSymbols()) {
+ auto SymK = JD.Symbols.find(KV.first);
+ assert(SymK != JD.Symbols.end() &&
+ "No entry for symbol covered by MaterializationUnit");
+ SymK->second.setMaterializerAttached(false);
+ SymK->second.setState(SymbolState::Materializing);
+ JD.UnmaterializedInfos.erase(KV.first);
+ }
+
+ // Add MU to the list of MaterializationUnits to be materialized.
+ CollectedUMIs[&JD].push_back(std::move(UMI));
+ } else
+ LLVM_DEBUG(dbgs() << "matched, registering query");
+
+ // Add the query to the PendingQueries list and continue, deleting
+ // the element from the lookup set.
+ assert(SymI->second.getState() != SymbolState::NeverSearched &&
+ SymI->second.getState() != SymbolState::Ready &&
+ "By this line the symbol should be materializing");
+ auto &MI = JD.MaterializingInfos[Name];
+ MI.addQuery(Q);
+ Q->addQueryDependence(JD, Name);
+
+ return true;
+ });
+
+ JD.shrinkMaterializationInfoMemory();
+
+ // Handle failure.
+ if (Err) {
+
+ LLVM_DEBUG({
+ dbgs() << "Lookup failed. Detaching query and replacing MUs.\n";
+ });
+
+ // Detach the query.
+ Q->detach();
+
+ // Replace the MUs.
+ for (auto &KV : CollectedUMIs) {
+ auto &JD = *KV.first;
+ for (auto &UMI : KV.second)
+ for (auto &KV2 : UMI->MU->getSymbols()) {
+ assert(!JD.UnmaterializedInfos.count(KV2.first) &&
+ "Unexpected materializer in map");
+ auto SymI = JD.Symbols.find(KV2.first);
+ assert(SymI != JD.Symbols.end() && "Missing symbol entry");
+ assert(SymI->second.getState() == SymbolState::Materializing &&
+ "Can not replace symbol that is not materializing");
+ assert(!SymI->second.hasMaterializerAttached() &&
+ "MaterializerAttached flag should not be set");
+ SymI->second.setMaterializerAttached(true);
+ JD.UnmaterializedInfos[KV2.first] = UMI;
+ }
+ }
+
+ return Err;
+ }
+ }
+
+ LLVM_DEBUG(dbgs() << "Stripping unmatched weakly-referenced symbols\n");
+ IPLS->LookupSet.forEachWithRemoval(
+ [&](const SymbolStringPtr &Name, SymbolLookupFlags SymLookupFlags) {
+ if (SymLookupFlags == SymbolLookupFlags::WeaklyReferencedSymbol) {
+ Q->dropSymbol(Name);
+ return true;
+ } else
+ return false;
+ });
+
+ if (!IPLS->LookupSet.empty()) {
+ LLVM_DEBUG(dbgs() << "Failing due to unresolved symbols\n");
+ return make_error<SymbolsNotFound>(getSymbolStringPool(),
+ IPLS->LookupSet.getSymbolNames());
+ }
+
+ // Record whether the query completed.
+ QueryComplete = Q->isComplete();
+
+ LLVM_DEBUG({
+ dbgs() << "Query successfully "
+ << (QueryComplete ? "completed" : "lodged") << "\n";
+ });
+
+ // Move the collected MUs to the OutstandingMUs list.
+ if (!CollectedUMIs.empty()) {
+ std::lock_guard<std::recursive_mutex> Lock(OutstandingMUsMutex);
+
+ LLVM_DEBUG(dbgs() << "Adding MUs to dispatch:\n");
+ for (auto &KV : CollectedUMIs) {
+ LLVM_DEBUG({
+ auto &JD = *KV.first;
+ dbgs() << " For " << JD.getName() << ": Adding " << KV.second.size()
+ << " MUs.\n";
+ });
+ for (auto &UMI : KV.second) {
+ auto MR = createMaterializationResponsibility(
+ *UMI->RT, std::move(UMI->MU->SymbolFlags),
+ std::move(UMI->MU->InitSymbol));
+ OutstandingMUs.push_back(
+ std::make_pair(std::move(UMI->MU), std::move(MR)));
+ }
+ }
+ } else
+ LLVM_DEBUG(dbgs() << "No MUs to dispatch.\n");
+
+ if (RegisterDependencies && !Q->QueryRegistrations.empty()) {
+ LLVM_DEBUG(dbgs() << "Registering dependencies\n");
+ RegisterDependencies(Q->QueryRegistrations);
+ } else
+ LLVM_DEBUG(dbgs() << "No dependencies to register\n");
+
+ return Error::success();
+ });
+
+ if (LodgingErr) {
+ LLVM_DEBUG(dbgs() << "Failing query\n");
+ Q->detach();
+ Q->handleFailed(std::move(LodgingErr));
+ return;
+ }
+
+ if (QueryComplete) {
+ LLVM_DEBUG(dbgs() << "Completing query\n");
+ Q->handleComplete(*this);
+ }
+
+ dispatchOutstandingMUs();
+}
+
+void ExecutionSession::OL_completeLookupFlags(
+ std::unique_ptr<InProgressLookupState> IPLS,
+ unique_function<void(Expected<SymbolFlagsMap>)> OnComplete) {
+
+ auto Result = runSessionLocked([&]() -> Expected<SymbolFlagsMap> {
+ LLVM_DEBUG({
+ dbgs() << "Entering OL_completeLookupFlags:\n"
+ << " Lookup kind: " << IPLS->K << "\n"
+ << " Search order: " << IPLS->SearchOrder
+ << ", Current index = " << IPLS->CurSearchOrderIndex
+ << (IPLS->NewJITDylib ? " (entering new JITDylib)" : "") << "\n"
+ << " Lookup set: " << IPLS->LookupSet << "\n"
+ << " Definition generator candidates: "
+ << IPLS->DefGeneratorCandidates << "\n"
+ << " Definition generator non-candidates: "
+ << IPLS->DefGeneratorNonCandidates << "\n";
+ });
+
+ SymbolFlagsMap Result;
+
+ // Attempt to find flags for each symbol.
+ for (auto &KV : IPLS->SearchOrder) {
+ auto &JD = *KV.first;
+ auto JDLookupFlags = KV.second;
+ LLVM_DEBUG({
+ dbgs() << "Visiting \"" << JD.getName() << "\" (" << JDLookupFlags
+ << ") with lookup set " << IPLS->LookupSet << ":\n";
+ });
+
+ IPLS->LookupSet.forEachWithRemoval([&](const SymbolStringPtr &Name,
+ SymbolLookupFlags SymLookupFlags) {
+ LLVM_DEBUG({
+ dbgs() << " Attempting to match \"" << Name << "\" ("
+ << SymLookupFlags << ")... ";
+ });
+
+ // Search for the symbol. If not found then continue without removing
+ // from the lookup set.
+ auto SymI = JD.Symbols.find(Name);
+ if (SymI == JD.Symbols.end()) {
+ LLVM_DEBUG(dbgs() << "skipping: not present\n");
+ return false;
+ }
+
+ // If this is a non-exported symbol then it doesn't match. Skip it.
+ if (!SymI->second.getFlags().isExported() &&
+ JDLookupFlags == JITDylibLookupFlags::MatchExportedSymbolsOnly) {
+ LLVM_DEBUG(dbgs() << "skipping: not exported\n");
+ return false;
+ }
+
+ LLVM_DEBUG({
+ dbgs() << "matched, \"" << Name << "\" -> " << SymI->second.getFlags()
+ << "\n";
+ });
+ Result[Name] = SymI->second.getFlags();
+ return true;
+ });
+ }
+
+ // Remove any weakly referenced symbols that haven't been resolved.
+ IPLS->LookupSet.remove_if(
+ [](const SymbolStringPtr &Name, SymbolLookupFlags SymLookupFlags) {
+ return SymLookupFlags == SymbolLookupFlags::WeaklyReferencedSymbol;
+ });
+
+ if (!IPLS->LookupSet.empty()) {
+ LLVM_DEBUG(dbgs() << "Failing due to unresolved symbols\n");
+ return make_error<SymbolsNotFound>(getSymbolStringPool(),
+ IPLS->LookupSet.getSymbolNames());
+ }
+
+ LLVM_DEBUG(dbgs() << "Succeded, result = " << Result << "\n");
+ return Result;
+ });
+
+ // Run the callback on the result.
+ LLVM_DEBUG(dbgs() << "Sending result to handler.\n");
+ OnComplete(std::move(Result));
+}
+
+void ExecutionSession::OL_destroyMaterializationResponsibility(
+ MaterializationResponsibility &MR) {
+
+ assert(MR.SymbolFlags.empty() &&
+ "All symbols should have been explicitly materialized or failed");
+ MR.JD.unlinkMaterializationResponsibility(MR);
+}
+
+SymbolNameSet ExecutionSession::OL_getRequestedSymbols(
+ const MaterializationResponsibility &MR) {
+ return MR.JD.getRequestedSymbols(MR.SymbolFlags);
+}
+
+Error ExecutionSession::OL_notifyResolved(MaterializationResponsibility &MR,
+ const SymbolMap &Symbols) {
+ LLVM_DEBUG({
+ dbgs() << "In " << MR.JD.getName() << " resolving " << Symbols << "\n";
+ });
+#ifndef NDEBUG
+ for (auto &KV : Symbols) {
+ auto I = MR.SymbolFlags.find(KV.first);
+ assert(I != MR.SymbolFlags.end() &&
+ "Resolving symbol outside this responsibility set");
+ assert(!I->second.hasMaterializationSideEffectsOnly() &&
+ "Can't resolve materialization-side-effects-only symbol");
+ assert((KV.second.getFlags() & ~JITSymbolFlags::Common) ==
+ (I->second & ~JITSymbolFlags::Common) &&
+ "Resolving symbol with incorrect flags");
+ }
+#endif
+
+ return MR.JD.resolve(MR, Symbols);
+}
+
+template <typename HandleNewDepFn>
+void ExecutionSession::propagateExtraEmitDeps(
+ std::deque<JITDylib::EmissionDepUnit *> Worklist, EDUInfosMap &EDUInfos,
+ HandleNewDepFn HandleNewDep) {
+
+ // Iterate to a fixed-point to propagate extra-emit dependencies through the
+ // EDU graph.
+ while (!Worklist.empty()) {
+ auto &EDU = *Worklist.front();
+ Worklist.pop_front();
+
+ assert(EDUInfos.count(&EDU) && "No info entry for EDU");
+ auto &EDUInfo = EDUInfos[&EDU];
+
+ // Propagate new dependencies to users.
+ for (auto *UserEDU : EDUInfo.IntraEmitUsers) {
+
+ // UserEDUInfo only present if UserEDU has its own users.
+ JITDylib::EmissionDepUnitInfo *UserEDUInfo = nullptr;
+ {
+ auto UserEDUInfoItr = EDUInfos.find(UserEDU);
+ if (UserEDUInfoItr != EDUInfos.end())
+ UserEDUInfo = &UserEDUInfoItr->second;
+ }
+
+ for (auto &[DepJD, Deps] : EDUInfo.NewDeps) {
+ auto &UserEDUDepsForJD = UserEDU->Dependencies[DepJD];
+ DenseSet<NonOwningSymbolStringPtr> *UserEDUNewDepsForJD = nullptr;
+ for (auto Dep : Deps) {
+ if (UserEDUDepsForJD.insert(Dep).second) {
+ HandleNewDep(*UserEDU, *DepJD, Dep);
+ if (UserEDUInfo) {
+ if (!UserEDUNewDepsForJD) {
+ // If UserEDU has no new deps then it's not in the worklist
+ // yet, so add it.
+ if (UserEDUInfo->NewDeps.empty())
+ Worklist.push_back(UserEDU);
+ UserEDUNewDepsForJD = &UserEDUInfo->NewDeps[DepJD];
+ }
+ // Add (DepJD, Dep) to NewDeps.
+ UserEDUNewDepsForJD->insert(Dep);
+ }
+ }
+ }
+ }
+ }
+
+ EDUInfo.NewDeps.clear();
+ }
+}
+
+// Note: This method modifies the emitted set.
+ExecutionSession::EDUInfosMap ExecutionSession::simplifyDepGroups(
+ MaterializationResponsibility &MR,
+ ArrayRef<SymbolDependenceGroup> EmittedDeps) {
+
+ auto &TargetJD = MR.getTargetJITDylib();
+
+ // 1. Build initial EmissionDepUnit -> EmissionDepUnitInfo and
+ // Symbol -> EmissionDepUnit mappings.
+ DenseMap<JITDylib::EmissionDepUnit *, JITDylib::EmissionDepUnitInfo> EDUInfos;
+ EDUInfos.reserve(EmittedDeps.size());
+ DenseMap<NonOwningSymbolStringPtr, JITDylib::EmissionDepUnit *> EDUForSymbol;
+ for (auto &DG : EmittedDeps) {
+ assert(!DG.Symbols.empty() && "DepGroup does not cover any symbols");
+
+ // Skip empty EDUs.
+ if (DG.Dependencies.empty())
+ continue;
+
+ auto TmpEDU = std::make_shared<JITDylib::EmissionDepUnit>(TargetJD);
+ auto &EDUInfo = EDUInfos[TmpEDU.get()];
+ EDUInfo.EDU = std::move(TmpEDU);
+ for (const auto &Symbol : DG.Symbols) {
+ NonOwningSymbolStringPtr NonOwningSymbol(Symbol);
+ assert(!EDUForSymbol.count(NonOwningSymbol) &&
+ "Symbol should not appear in more than one SymbolDependenceGroup");
+ assert(MR.getSymbols().count(Symbol) &&
+ "Symbol in DepGroups not in the emitted set");
+ auto NewlyEmittedItr = MR.getSymbols().find(Symbol);
+ EDUInfo.EDU->Symbols[NonOwningSymbol] = NewlyEmittedItr->second;
+ EDUForSymbol[NonOwningSymbol] = EDUInfo.EDU.get();
+ }
+ }
+
+ // 2. Build a "residual" EDU to cover all symbols that have no dependencies.
+ {
+ DenseMap<NonOwningSymbolStringPtr, JITSymbolFlags> ResidualSymbolFlags;
+ for (auto &[Sym, Flags] : MR.getSymbols()) {
+ if (!EDUForSymbol.count(NonOwningSymbolStringPtr(Sym)))
+ ResidualSymbolFlags[NonOwningSymbolStringPtr(Sym)] = Flags;
+ }
+ if (!ResidualSymbolFlags.empty()) {
+ auto ResidualEDU = std::make_shared<JITDylib::EmissionDepUnit>(TargetJD);
+ ResidualEDU->Symbols = std::move(ResidualSymbolFlags);
+ auto &ResidualEDUInfo = EDUInfos[ResidualEDU.get()];
+ ResidualEDUInfo.EDU = std::move(ResidualEDU);
+
+ // If the residual EDU is the only one then bail out early.
+ if (EDUInfos.size() == 1)
+ return EDUInfos;
+
+ // Otherwise add the residual EDU to the EDUForSymbol map.
+ for (auto &[Sym, Flags] : ResidualEDUInfo.EDU->Symbols)
+ EDUForSymbol[Sym] = ResidualEDUInfo.EDU.get();
+ }
+ }
+
+#ifndef NDEBUG
+ assert(EDUForSymbol.size() == MR.getSymbols().size() &&
+ "MR symbols not fully covered by EDUs?");
+ for (auto &[Sym, Flags] : MR.getSymbols()) {
+ assert(EDUForSymbol.count(NonOwningSymbolStringPtr(Sym)) &&
+ "Sym in MR not covered by EDU");
+ }
+#endif // NDEBUG
+
+ // 3. Use the DepGroups array to build a graph of dependencies between
+ // EmissionDepUnits in this finalization. We want to remove these
+ // intra-finalization uses, propagating dependencies on symbols outside
+ // this finalization. Add EDUs to the worklist.
+ for (auto &DG : EmittedDeps) {
+
+ // Skip SymbolDependenceGroups with no dependencies.
+ if (DG.Dependencies.empty())
+ continue;
+
+ assert(EDUForSymbol.count(NonOwningSymbolStringPtr(*DG.Symbols.begin())) &&
+ "No EDU for DG");
+ auto &EDU =
+ *EDUForSymbol.find(NonOwningSymbolStringPtr(*DG.Symbols.begin()))
+ ->second;
+
+ for (auto &[DepJD, Deps] : DG.Dependencies) {
+ DenseSet<NonOwningSymbolStringPtr> NewDepsForJD;
+
+ assert(!Deps.empty() && "Dependence set for DepJD is empty");
+
+ if (DepJD != &TargetJD) {
+ // DepJD is some other JITDylib.There can't be any intra-finalization
+ // edges here, so just skip.
+ for (auto &Dep : Deps)
+ NewDepsForJD.insert(NonOwningSymbolStringPtr(Dep));
+ } else {
+ // DepJD is the Target JITDylib. Check for intra-finaliztaion edges,
+ // skipping any and recording the intra-finalization use instead.
+ for (auto &Dep : Deps) {
+ NonOwningSymbolStringPtr NonOwningDep(Dep);
+ auto I = EDUForSymbol.find(NonOwningDep);
+ if (I == EDUForSymbol.end()) {
+ if (!MR.getSymbols().count(Dep))
+ NewDepsForJD.insert(NonOwningDep);
+ continue;
+ }
+
+ if (I->second != &EDU)
+ EDUInfos[I->second].IntraEmitUsers.insert(&EDU);
+ }
+ }
+
+ if (!NewDepsForJD.empty())
+ EDU.Dependencies[DepJD] = std::move(NewDepsForJD);
+ }
+ }
+
+ // 4. Build the worklist.
+ std::deque<JITDylib::EmissionDepUnit *> Worklist;
+ for (auto &[EDU, EDUInfo] : EDUInfos) {
+ // If this EDU has extra-finalization dependencies and intra-finalization
+ // users then add it to the worklist.
+ if (!EDU->Dependencies.empty()) {
+ auto I = EDUInfos.find(EDU);
+ if (I != EDUInfos.end()) {
+ auto &EDUInfo = I->second;
+ if (!EDUInfo.IntraEmitUsers.empty()) {
+ EDUInfo.NewDeps = EDU->Dependencies;
+ Worklist.push_back(EDU);
+ }
+ }
+ }
+ }
+
+ // 4. Propagate dependencies through the EDU graph.
+ propagateExtraEmitDeps(
+ Worklist, EDUInfos,
+ [](JITDylib::EmissionDepUnit &, JITDylib &, NonOwningSymbolStringPtr) {});
+
+ return EDUInfos;
+}
+
+void ExecutionSession::IL_makeEDUReady(
+ std::shared_ptr<JITDylib::EmissionDepUnit> EDU,
+ JITDylib::AsynchronousSymbolQuerySet &Queries) {
+
+ // The symbols for this EDU are ready.
+ auto &JD = *EDU->JD;
+
+ for (auto &[Sym, Flags] : EDU->Symbols) {
+ assert(JD.Symbols.count(SymbolStringPtr(Sym)) &&
+ "JD does not have an entry for Sym");
+ auto &Entry = JD.Symbols[SymbolStringPtr(Sym)];
+
+ assert(((Entry.getFlags().hasMaterializationSideEffectsOnly() &&
+ Entry.getState() == SymbolState::Materializing) ||
+ Entry.getState() == SymbolState::Resolved ||
+ Entry.getState() == SymbolState::Emitted) &&
+ "Emitting from state other than Resolved");
+
+ Entry.setState(SymbolState::Ready);
+
+ auto MII = JD.MaterializingInfos.find(SymbolStringPtr(Sym));
+
+ // Check for pending queries.
+ if (MII == JD.MaterializingInfos.end())
+ continue;
+ auto &MI = MII->second;
+
+ for (auto &Q : MI.takeQueriesMeeting(SymbolState::Ready)) {
+ Q->notifySymbolMetRequiredState(SymbolStringPtr(Sym), Entry.getSymbol());
+ if (Q->isComplete())
+ Queries.insert(Q);
+ Q->removeQueryDependence(JD, SymbolStringPtr(Sym));
+ }
+
+ JD.MaterializingInfos.erase(MII);
+ }
+
+ JD.shrinkMaterializationInfoMemory();
+}
+
+void ExecutionSession::IL_makeEDUEmitted(
+ std::shared_ptr<JITDylib::EmissionDepUnit> EDU,
+ JITDylib::AsynchronousSymbolQuerySet &Queries) {
+
+ // The symbols for this EDU are emitted, but not ready.
+ auto &JD = *EDU->JD;
+
+ for (auto &[Sym, Flags] : EDU->Symbols) {
+ assert(JD.Symbols.count(SymbolStringPtr(Sym)) &&
+ "JD does not have an entry for Sym");
+ auto &Entry = JD.Symbols[SymbolStringPtr(Sym)];
+
+ assert(((Entry.getFlags().hasMaterializationSideEffectsOnly() &&
+ Entry.getState() == SymbolState::Materializing) ||
+ Entry.getState() == SymbolState::Resolved ||
+ Entry.getState() == SymbolState::Emitted) &&
+ "Emitting from state other than Resolved");
+
+ if (Entry.getState() == SymbolState::Emitted) {
+ // This was already emitted, so we can skip the rest of this loop.
+#ifndef NDEBUG
+ for (auto &[Sym, Flags] : EDU->Symbols) {
+ assert(JD.Symbols.count(SymbolStringPtr(Sym)) &&
+ "JD does not have an entry for Sym");
+ auto &Entry = JD.Symbols[SymbolStringPtr(Sym)];
+ assert(Entry.getState() == SymbolState::Emitted &&
+ "Symbols for EDU in inconsistent state");
+ assert(JD.MaterializingInfos.count(SymbolStringPtr(Sym)) &&
+ "Emitted symbol has no MI");
+ auto MI = JD.MaterializingInfos[SymbolStringPtr(Sym)];
+ assert(MI.takeQueriesMeeting(SymbolState::Emitted).empty() &&
+ "Already-emitted symbol has waiting-on-emitted queries");
+ }
+#endif // NDEBUG
+ break;
+ }
+
+ Entry.setState(SymbolState::Emitted);
+ auto &MI = JD.MaterializingInfos[SymbolStringPtr(Sym)];
+ MI.DefiningEDU = EDU;
+
+ for (auto &Q : MI.takeQueriesMeeting(SymbolState::Emitted)) {
+ Q->notifySymbolMetRequiredState(SymbolStringPtr(Sym), Entry.getSymbol());
+ if (Q->isComplete())
+ Queries.insert(Q);
+ Q->removeQueryDependence(JD, SymbolStringPtr(Sym));
+ }
+ }
+
+ for (auto &[DepJD, Deps] : EDU->Dependencies) {
+ for (auto &Dep : Deps)
+ DepJD->MaterializingInfos[SymbolStringPtr(Dep)].DependantEDUs.insert(
+ EDU.get());
+ }
+}
+
+/// Removes the given dependence from EDU. If EDU's dependence set becomes
+/// empty then this function adds an entry for it to the EDUInfos map.
+/// Returns true if a new EDUInfosMap entry is added.
+bool ExecutionSession::IL_removeEDUDependence(JITDylib::EmissionDepUnit &EDU,
+ JITDylib &DepJD,
+ NonOwningSymbolStringPtr DepSym,
+ EDUInfosMap &EDUInfos) {
+ assert(EDU.Dependencies.count(&DepJD) &&
+ "JD does not appear in Dependencies of DependantEDU");
+ assert(EDU.Dependencies[&DepJD].count(DepSym) &&
+ "Symbol does not appear in Dependencies of DependantEDU");
+ auto &JDDeps = EDU.Dependencies[&DepJD];
+ JDDeps.erase(DepSym);
+ if (JDDeps.empty()) {
+ EDU.Dependencies.erase(&DepJD);
+ if (EDU.Dependencies.empty()) {
+ // If the dependencies set has become empty then EDU _may_ be ready
+ // (we won't know for sure until we've propagated the extra-emit deps).
+ // Create an EDUInfo for it (if it doesn't have one already) so that
+ // it'll be visited after propagation.
+ auto &DepEDUInfo = EDUInfos[&EDU];
+ if (!DepEDUInfo.EDU) {
+ assert(EDU.JD->Symbols.count(
+ SymbolStringPtr(EDU.Symbols.begin()->first)) &&
+ "Missing symbol entry for first symbol in EDU");
+ auto DepEDUFirstMI = EDU.JD->MaterializingInfos.find(
+ SymbolStringPtr(EDU.Symbols.begin()->first));
+ assert(DepEDUFirstMI != EDU.JD->MaterializingInfos.end() &&
+ "Missing MI for first symbol in DependantEDU");
+ DepEDUInfo.EDU = DepEDUFirstMI->second.DefiningEDU;
+ return true;
+ }
+ }
+ }
+ return false;
+}
+
+Error ExecutionSession::makeJDClosedError(JITDylib::EmissionDepUnit &EDU,
+ JITDylib &ClosedJD) {
+ SymbolNameSet FailedSymbols;
+ for (auto &[Sym, Flags] : EDU.Symbols)
+ FailedSymbols.insert(SymbolStringPtr(Sym));
+ SymbolDependenceMap BadDeps;
+ for (auto &Dep : EDU.Dependencies[&ClosedJD])
+ BadDeps[&ClosedJD].insert(SymbolStringPtr(Dep));
+ return make_error<UnsatisfiedSymbolDependencies>(
+ ClosedJD.getExecutionSession().getSymbolStringPool(), EDU.JD,
+ std::move(FailedSymbols), std::move(BadDeps),
+ ClosedJD.getName() + " is closed");
+}
+
+Error ExecutionSession::makeUnsatisfiedDepsError(JITDylib::EmissionDepUnit &EDU,
+ JITDylib &BadJD,
+ SymbolNameSet BadDeps) {
+ SymbolNameSet FailedSymbols;
+ for (auto &[Sym, Flags] : EDU.Symbols)
+ FailedSymbols.insert(SymbolStringPtr(Sym));
+ SymbolDependenceMap BadDepsMap;
+ BadDepsMap[&BadJD] = std::move(BadDeps);
+ return make_error<UnsatisfiedSymbolDependencies>(
+ BadJD.getExecutionSession().getSymbolStringPool(), &BadJD,
+ std::move(FailedSymbols), std::move(BadDepsMap),
+ "dependencies removed or in error state");
+}
+
+Expected<JITDylib::AsynchronousSymbolQuerySet>
+ExecutionSession::IL_emit(MaterializationResponsibility &MR,
+ EDUInfosMap EDUInfos) {
+
+ if (MR.RT->isDefunct())
+ return make_error<ResourceTrackerDefunct>(MR.RT);
+
+ auto &TargetJD = MR.getTargetJITDylib();
+ if (TargetJD.State != JITDylib::Open)
+ return make_error<StringError>("JITDylib " + TargetJD.getName() +
+ " is defunct",
+ inconvertibleErrorCode());
+#ifdef EXPENSIVE_CHECKS
+ verifySessionState("entering ExecutionSession::IL_emit");
+#endif
+
+ // Walk all EDUs:
+ // 1. Verifying that dependencies are available (not removed or in the error
+ // state.
+ // 2. Removing any dependencies that are already Ready.
+ // 3. Lifting any EDUs for Emitted symbols into the EDUInfos map.
+ // 4. Finding any dependant EDUs and lifting them into the EDUInfos map.
+ std::deque<JITDylib::EmissionDepUnit *> Worklist;
+ for (auto &[EDU, _] : EDUInfos)
+ Worklist.push_back(EDU);
+
+ for (auto *EDU : Worklist) {
+ auto *EDUInfo = &EDUInfos[EDU];
+
+ SmallVector<JITDylib *> DepJDsToRemove;
+ for (auto &[DepJD, Deps] : EDU->Dependencies) {
+ if (DepJD->State != JITDylib::Open)
+ return makeJDClosedError(*EDU, *DepJD);
+
+ SymbolNameSet BadDeps;
+ SmallVector<NonOwningSymbolStringPtr> DepsToRemove;
+ for (auto &Dep : Deps) {
+ auto DepEntryItr = DepJD->Symbols.find(SymbolStringPtr(Dep));
+
+ // If this dep has been removed or moved to the error state then add it
+ // to the bad deps set. We aggregate these bad deps for more
+ // comprehensive error messages.
+ if (DepEntryItr == DepJD->Symbols.end() ||
+ DepEntryItr->second.getFlags().hasError()) {
+ BadDeps.insert(SymbolStringPtr(Dep));
+ continue;
+ }
+
+ // If this dep isn't emitted yet then just add it to the NewDeps set to
+ // be propagated.
+ auto &DepEntry = DepEntryItr->second;
+ if (DepEntry.getState() < SymbolState::Emitted) {
+ EDUInfo->NewDeps[DepJD].insert(Dep);
+ continue;
+ }
+
+ // This dep has been emitted, so add it to the list to be removed from
+ // EDU.
+ DepsToRemove.push_back(Dep);
+
+ // If Dep is Ready then there's nothing further to do.
+ if (DepEntry.getState() == SymbolState::Ready) {
+ assert(!DepJD->MaterializingInfos.count(SymbolStringPtr(Dep)) &&
+ "Unexpected MaterializationInfo attached to ready symbol");
+ continue;
+ }
+
+ // If we get here thene Dep is Emitted. We need to look up its defining
+ // EDU and add this EDU to the defining EDU's list of users (this means
+ // creating an EDUInfos entry if the defining EDU doesn't have one
+ // already).
+ assert(DepJD->MaterializingInfos.count(SymbolStringPtr(Dep)) &&
+ "Expected MaterializationInfo for emitted dependency");
+ auto &DepMI = DepJD->MaterializingInfos[SymbolStringPtr(Dep)];
+ assert(DepMI.DefiningEDU &&
+ "Emitted symbol does not have a defining EDU");
+ assert(!DepMI.DefiningEDU->Dependencies.empty() &&
+ "Emitted symbol has empty dependencies (should be ready)");
+ assert(DepMI.DependantEDUs.empty() &&
+ "Already-emitted symbol has dependant EDUs?");
+ auto &DepEDUInfo = EDUInfos[DepMI.DefiningEDU.get()];
+ if (!DepEDUInfo.EDU) {
+ // No EDUInfo yet -- build initial entry, and reset the EDUInfo
+ // pointer, which we will have invalidated.
+ EDUInfo = &EDUInfos[EDU];
+ DepEDUInfo.EDU = DepMI.DefiningEDU;
+ for (auto &[DepDepJD, DepDeps] : DepEDUInfo.EDU->Dependencies) {
+ if (DepDepJD == &TargetJD) {
+ for (auto &DepDep : DepDeps)
+ if (!MR.getSymbols().count(SymbolStringPtr(DepDep)))
+ DepEDUInfo.NewDeps[DepDepJD].insert(DepDep);
+ } else
+ DepEDUInfo.NewDeps[DepDepJD] = DepDeps;
+ }
+ }
+ DepEDUInfo.IntraEmitUsers.insert(EDU);
+ }
+
+ // Some dependencies were removed or in an error state -- error out.
+ if (!BadDeps.empty())
+ return makeUnsatisfiedDepsError(*EDU, *DepJD, std::move(BadDeps));
+
+ // Remove the emitted / ready deps from DepJD.
+ for (auto &Dep : DepsToRemove)
+ Deps.erase(Dep);
+
+ // If there are no further deps in DepJD then flag it for removal too.
+ if (Deps.empty())
+ DepJDsToRemove.push_back(DepJD);
+ }
+
+ // Remove any JDs whose dependence sets have become empty.
+ for (auto &DepJD : DepJDsToRemove) {
+ assert(EDU->Dependencies.count(DepJD) &&
+ "Trying to remove non-existent dep entries");
+ EDU->Dependencies.erase(DepJD);
+ }
+
+ // Now look for users of this EDU.
+ for (auto &[Sym, Flags] : EDU->Symbols) {
+ assert(TargetJD.Symbols.count(SymbolStringPtr(Sym)) &&
+ "Sym not present in symbol table");
+ assert((TargetJD.Symbols[SymbolStringPtr(Sym)].getState() ==
+ SymbolState::Resolved ||
+ TargetJD.Symbols[SymbolStringPtr(Sym)]
+ .getFlags()
+ .hasMaterializationSideEffectsOnly()) &&
+ "Emitting symbol not in the resolved state");
+ assert(!TargetJD.Symbols[SymbolStringPtr(Sym)].getFlags().hasError() &&
+ "Symbol is already in an error state");
+
+ auto MII = TargetJD.MaterializingInfos.find(SymbolStringPtr(Sym));
+ if (MII == TargetJD.MaterializingInfos.end() ||
+ MII->second.DependantEDUs.empty())
+ continue;
+
+ for (auto &DependantEDU : MII->second.DependantEDUs) {
+ if (IL_removeEDUDependence(*DependantEDU, TargetJD, Sym, EDUInfos))
+ EDUInfo = &EDUInfos[EDU];
+ EDUInfo->IntraEmitUsers.insert(DependantEDU);
+ }
+ MII->second.DependantEDUs.clear();
+ }
+ }
+
+ Worklist.clear();
+ for (auto &[EDU, EDUInfo] : EDUInfos) {
+ if (!EDUInfo.IntraEmitUsers.empty() && !EDU->Dependencies.empty()) {
+ if (EDUInfo.NewDeps.empty())
+ EDUInfo.NewDeps = EDU->Dependencies;
+ Worklist.push_back(EDU);
+ }
+ }
+
+ propagateExtraEmitDeps(
+ Worklist, EDUInfos,
+ [](JITDylib::EmissionDepUnit &EDU, JITDylib &JD,
+ NonOwningSymbolStringPtr Sym) {
+ JD.MaterializingInfos[SymbolStringPtr(Sym)].DependantEDUs.insert(&EDU);
+ });
+
+ JITDylib::AsynchronousSymbolQuerySet CompletedQueries;
+
+ // Extract completed queries and lodge not-yet-ready EDUs in the
+ // session.
+ for (auto &[EDU, EDUInfo] : EDUInfos) {
+ if (EDU->Dependencies.empty())
+ IL_makeEDUReady(std::move(EDUInfo.EDU), CompletedQueries);
+ else
+ IL_makeEDUEmitted(std::move(EDUInfo.EDU), CompletedQueries);
+ }
+
+#ifdef EXPENSIVE_CHECKS
+ verifySessionState("exiting ExecutionSession::IL_emit");
+#endif
+
+ return std::move(CompletedQueries);
+}
+
+Error ExecutionSession::OL_notifyEmitted(
+ MaterializationResponsibility &MR,
+ ArrayRef<SymbolDependenceGroup> DepGroups) {
+ LLVM_DEBUG({
+ dbgs() << "In " << MR.JD.getName() << " emitting " << MR.SymbolFlags
+ << "\n";
+ if (!DepGroups.empty()) {
+ dbgs() << " Initial dependencies:\n";
+ for (auto &SDG : DepGroups) {
+ dbgs() << " Symbols: " << SDG.Symbols
+ << ", Dependencies: " << SDG.Dependencies << "\n";
+ }
+ }
+ });
+
+#ifndef NDEBUG
+ SymbolNameSet Visited;
+ for (auto &DG : DepGroups) {
+ for (auto &Sym : DG.Symbols) {
+ assert(MR.SymbolFlags.count(Sym) &&
+ "DG contains dependence for symbol outside this MR");
+ assert(Visited.insert(Sym).second &&
+ "DG contains duplicate entries for Name");
+ }
+ }
+#endif // NDEBUG
+
+ auto EDUInfos = simplifyDepGroups(MR, DepGroups);
+
+ LLVM_DEBUG({
+ dbgs() << " Simplified dependencies:\n";
+ for (auto &[EDU, EDUInfo] : EDUInfos) {
+ dbgs() << " Symbols: { ";
+ for (auto &[Sym, Flags] : EDU->Symbols)
+ dbgs() << Sym << " ";
+ dbgs() << "}, Dependencies: { ";
+ for (auto &[DepJD, Deps] : EDU->Dependencies) {
+ dbgs() << "(" << DepJD->getName() << ", { ";
+ for (auto &Dep : Deps)
+ dbgs() << Dep << " ";
+ dbgs() << "}) ";
+ }
+ dbgs() << "}\n";
+ }
+ });
+
+ auto CompletedQueries =
+ runSessionLocked([&]() { return IL_emit(MR, EDUInfos); });
+
+ // On error bail out.
+ if (!CompletedQueries)
+ return CompletedQueries.takeError();
+
+ MR.SymbolFlags.clear();
+
+ // Otherwise notify all the completed queries.
+ for (auto &Q : *CompletedQueries) {
+ assert(Q->isComplete() && "Q is not complete");
+ Q->handleComplete(*this);
+ }
+
+ return Error::success();
+}
+
+Error ExecutionSession::OL_defineMaterializing(
+ MaterializationResponsibility &MR, SymbolFlagsMap NewSymbolFlags) {
+
+ LLVM_DEBUG({
+ dbgs() << "In " << MR.JD.getName() << " defining materializing symbols "
+ << NewSymbolFlags << "\n";
+ });
+ if (auto AcceptedDefs =
+ MR.JD.defineMaterializing(MR, std::move(NewSymbolFlags))) {
+ // Add all newly accepted symbols to this responsibility object.
+ for (auto &KV : *AcceptedDefs)
+ MR.SymbolFlags.insert(KV);
+ return Error::success();
+ } else
+ return AcceptedDefs.takeError();
+}
+
+std::pair<JITDylib::AsynchronousSymbolQuerySet,
+ std::shared_ptr<SymbolDependenceMap>>
+ExecutionSession::IL_failSymbols(JITDylib &JD,
+ const SymbolNameVector &SymbolsToFail) {
+
+#ifdef EXPENSIVE_CHECKS
+ verifySessionState("entering ExecutionSession::IL_failSymbols");
+#endif
+
+ JITDylib::AsynchronousSymbolQuerySet FailedQueries;
+ auto FailedSymbolsMap = std::make_shared<SymbolDependenceMap>();
+ auto ExtractFailedQueries = [&](JITDylib::MaterializingInfo &MI) {
+ JITDylib::AsynchronousSymbolQueryList ToDetach;
+ for (auto &Q : MI.pendingQueries()) {
+ // Add the query to the list to be failed and detach it.
+ FailedQueries.insert(Q);
+ ToDetach.push_back(Q);
+ }
+ for (auto &Q : ToDetach)
+ Q->detach();
+ assert(!MI.hasQueriesPending() && "Queries still pending after detach");
+ };
+
+ for (auto &Name : SymbolsToFail) {
+ (*FailedSymbolsMap)[&JD].insert(Name);
+
+ // Look up the symbol to fail.
+ auto SymI = JD.Symbols.find(Name);
+
+ // FIXME: Revisit this. We should be able to assert sequencing between
+ // ResourceTracker removal and symbol failure.
+ //
+ // It's possible that this symbol has already been removed, e.g. if a
+ // materialization failure happens concurrently with a ResourceTracker or
+ // JITDylib removal. In that case we can safely skip this symbol and
+ // continue.
+ if (SymI == JD.Symbols.end())
+ continue;
+ auto &Sym = SymI->second;
+
+ // If the symbol is already in the error state then we must have visited
+ // it earlier.
+ if (Sym.getFlags().hasError()) {
+ assert(!JD.MaterializingInfos.count(Name) &&
+ "Symbol in error state still has MaterializingInfo");
+ continue;
+ }
+
+ // Move the symbol into the error state.
+ Sym.setFlags(Sym.getFlags() | JITSymbolFlags::HasError);
+
+ // FIXME: Come up with a sane mapping of state to
+ // presence-of-MaterializingInfo so that we can assert presence / absence
+ // here, rather than testing it.
+ auto MII = JD.MaterializingInfos.find(Name);
+ if (MII == JD.MaterializingInfos.end())
+ continue;
+
+ auto &MI = MII->second;
+
+ // Collect queries to be failed for this MII.
+ ExtractFailedQueries(MI);
+
+ if (MI.DefiningEDU) {
+ // If there is a DefiningEDU for this symbol then remove this
+ // symbol from it.
+ assert(MI.DependantEDUs.empty() &&
+ "Symbol with DefiningEDU should not have DependantEDUs");
+ assert(Sym.getState() >= SymbolState::Emitted &&
+ "Symbol has EDU, should have been emitted");
+ assert(MI.DefiningEDU->Symbols.count(NonOwningSymbolStringPtr(Name)) &&
+ "Symbol does not appear in its DefiningEDU");
+ MI.DefiningEDU->Symbols.erase(NonOwningSymbolStringPtr(Name));
+
+ // Remove this EDU from the dependants lists of its dependencies.
+ for (auto &[DepJD, DepSyms] : MI.DefiningEDU->Dependencies) {
+ for (auto DepSym : DepSyms) {
+ assert(DepJD->Symbols.count(SymbolStringPtr(DepSym)) &&
+ "DepSym not in DepJD");
+ assert(DepJD->MaterializingInfos.count(SymbolStringPtr(DepSym)) &&
+ "DepSym has not MaterializingInfo");
+ auto &SymMI = DepJD->MaterializingInfos[SymbolStringPtr(DepSym)];
+ assert(SymMI.DependantEDUs.count(MI.DefiningEDU.get()) &&
+ "DefiningEDU missing from DependantEDUs list of dependency");
+ SymMI.DependantEDUs.erase(MI.DefiningEDU.get());
+ }
+ }
+
+ MI.DefiningEDU = nullptr;
+ } else {
+ // Otherwise if there are any EDUs waiting on this symbol then move
+ // those symbols to the error state too, and deregister them from the
+ // symbols that they depend on.
+ // Note: We use a copy of DependantEDUs here since we'll be removing
+ // from the original set as we go.
+ for (auto &DependantEDU : MI.DependantEDUs) {
+
+ // Remove DependantEDU from all of its users DependantEDUs lists.
+ for (auto &[DepJD, DepSyms] : DependantEDU->Dependencies) {
+ for (auto DepSym : DepSyms) {
+ // Skip self-reference to avoid invalidating the MI.DependantEDUs
+ // map. We'll clear this later.
+ if (DepJD == &JD && DepSym == Name)
+ continue;
+ assert(DepJD->Symbols.count(SymbolStringPtr(DepSym)) &&
+ "DepSym not in DepJD?");
+ assert(DepJD->MaterializingInfos.count(SymbolStringPtr(DepSym)) &&
+ "DependantEDU not registered with symbol it depends on");
+ auto &SymMI = DepJD->MaterializingInfos[SymbolStringPtr(DepSym)];
+ assert(SymMI.DependantEDUs.count(DependantEDU) &&
+ "DependantEDU missing from DependantEDUs list");
+ SymMI.DependantEDUs.erase(DependantEDU);
+ }
+ }
+
+ // Move any symbols defined by DependantEDU into the error state and
+ // fail any queries waiting on them.
+ auto &DepJD = *DependantEDU->JD;
+ auto DepEDUSymbols = std::move(DependantEDU->Symbols);
+ for (auto &[DepName, Flags] : DepEDUSymbols) {
+ auto DepSymItr = DepJD.Symbols.find(SymbolStringPtr(DepName));
+ assert(DepSymItr != DepJD.Symbols.end() &&
+ "Symbol not present in table");
+ auto &DepSym = DepSymItr->second;
+
+ assert(DepSym.getState() >= SymbolState::Emitted &&
+ "Symbol has EDU, should have been emitted");
+ assert(!DepSym.getFlags().hasError() &&
+ "Symbol is already in the error state?");
+ DepSym.setFlags(DepSym.getFlags() | JITSymbolFlags::HasError);
+ (*FailedSymbolsMap)[&DepJD].insert(SymbolStringPtr(DepName));
+
+ // This symbol has a defining EDU so its MaterializingInfo object must
+ // exist.
+ auto DepMIItr =
+ DepJD.MaterializingInfos.find(SymbolStringPtr(DepName));
+ assert(DepMIItr != DepJD.MaterializingInfos.end() &&
+ "Symbol has defining EDU but not MaterializingInfo");
+ auto &DepMI = DepMIItr->second;
+ assert(DepMI.DefiningEDU.get() == DependantEDU &&
+ "Bad EDU dependence edge");
+ assert(DepMI.DependantEDUs.empty() &&
+ "Symbol was emitted, should not have any DependantEDUs");
+ ExtractFailedQueries(DepMI);
+ DepJD.MaterializingInfos.erase(SymbolStringPtr(DepName));
+ }
+
+ DepJD.shrinkMaterializationInfoMemory();
+ }
+
+ MI.DependantEDUs.clear();
+ }
+
+ assert(!MI.DefiningEDU && "DefiningEDU should have been reset");
+ assert(MI.DependantEDUs.empty() &&
+ "DependantEDUs should have been removed above");
+ assert(!MI.hasQueriesPending() &&
+ "Can not delete MaterializingInfo with queries pending");
+ JD.MaterializingInfos.erase(Name);
+ }
+
+ JD.shrinkMaterializationInfoMemory();
+
+#ifdef EXPENSIVE_CHECKS
+ verifySessionState("exiting ExecutionSession::IL_failSymbols");
+#endif
+
+ return std::make_pair(std::move(FailedQueries), std::move(FailedSymbolsMap));
+}
+
+void ExecutionSession::OL_notifyFailed(MaterializationResponsibility &MR) {
+
+ LLVM_DEBUG({
+ dbgs() << "In " << MR.JD.getName() << " failing materialization for "
+ << MR.SymbolFlags << "\n";
+ });
+
+ if (MR.SymbolFlags.empty())
+ return;
+
+ SymbolNameVector SymbolsToFail;
+ for (auto &[Name, Flags] : MR.SymbolFlags)
+ SymbolsToFail.push_back(Name);
+ MR.SymbolFlags.clear();
+
+ JITDylib::AsynchronousSymbolQuerySet FailedQueries;
+ std::shared_ptr<SymbolDependenceMap> FailedSymbols;
+
+ std::tie(FailedQueries, FailedSymbols) = runSessionLocked([&]() {
+ // If the tracker is defunct then there's nothing to do here.
+ if (MR.RT->isDefunct())
+ return std::pair<JITDylib::AsynchronousSymbolQuerySet,
+ std::shared_ptr<SymbolDependenceMap>>();
+ return IL_failSymbols(MR.getTargetJITDylib(), SymbolsToFail);
+ });
+
+ for (auto &Q : FailedQueries)
+ Q->handleFailed(
+ make_error<FailedToMaterialize>(getSymbolStringPool(), FailedSymbols));
+}
+
+Error ExecutionSession::OL_replace(MaterializationResponsibility &MR,
+ std::unique_ptr<MaterializationUnit> MU) {
+ for (auto &KV : MU->getSymbols()) {
+ assert(MR.SymbolFlags.count(KV.first) &&
+ "Replacing definition outside this responsibility set");
+ MR.SymbolFlags.erase(KV.first);
+ }
+
+ if (MU->getInitializerSymbol() == MR.InitSymbol)
+ MR.InitSymbol = nullptr;
+
+ LLVM_DEBUG(MR.JD.getExecutionSession().runSessionLocked([&]() {
+ dbgs() << "In " << MR.JD.getName() << " replacing symbols with " << *MU
+ << "\n";
+ }););
+
+ return MR.JD.replace(MR, std::move(MU));
+}
+
+Expected<std::unique_ptr<MaterializationResponsibility>>
+ExecutionSession::OL_delegate(MaterializationResponsibility &MR,
+ const SymbolNameSet &Symbols) {
+
+ SymbolStringPtr DelegatedInitSymbol;
+ SymbolFlagsMap DelegatedFlags;
+
+ for (auto &Name : Symbols) {
+ auto I = MR.SymbolFlags.find(Name);
+ assert(I != MR.SymbolFlags.end() &&
+ "Symbol is not tracked by this MaterializationResponsibility "
+ "instance");
+
+ DelegatedFlags[Name] = std::move(I->second);
+ if (Name == MR.InitSymbol)
+ std::swap(MR.InitSymbol, DelegatedInitSymbol);
+
+ MR.SymbolFlags.erase(I);
+ }
+
+ return MR.JD.delegate(MR, std::move(DelegatedFlags),
+ std::move(DelegatedInitSymbol));
+}
+
+#ifndef NDEBUG
+void ExecutionSession::dumpDispatchInfo(Task &T) {
+ runSessionLocked([&]() {
+ dbgs() << "Dispatching: ";
+ T.printDescription(dbgs());
+ dbgs() << "\n";
+ });
+}
+#endif // NDEBUG
+
+} // End namespace orc.
+} // End namespace llvm.
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/DebugObjectManagerPlugin.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/DebugObjectManagerPlugin.cpp
new file mode 100644
index 000000000000..acbf33888ade
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/DebugObjectManagerPlugin.cpp
@@ -0,0 +1,522 @@
+//===------- DebugObjectManagerPlugin.cpp - JITLink debug objects ---------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// FIXME: Update Plugin to poke the debug object into a new JITLink section,
+// rather than creating a new allocation.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/DebugObjectManagerPlugin.h"
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/BinaryFormat/ELF.h"
+#include "llvm/ExecutionEngine/JITLink/JITLinkDylib.h"
+#include "llvm/ExecutionEngine/JITLink/JITLinkMemoryManager.h"
+#include "llvm/ExecutionEngine/JITSymbol.h"
+#include "llvm/Object/ELFObjectFile.h"
+#include "llvm/Object/ObjectFile.h"
+#include "llvm/Support/Errc.h"
+#include "llvm/Support/MSVCErrorWorkarounds.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/Process.h"
+#include "llvm/Support/raw_ostream.h"
+
+#include <set>
+
+#define DEBUG_TYPE "orc"
+
+using namespace llvm::jitlink;
+using namespace llvm::object;
+
+namespace llvm {
+namespace orc {
+
+class DebugObjectSection {
+public:
+ virtual void setTargetMemoryRange(SectionRange Range) = 0;
+ virtual void dump(raw_ostream &OS, StringRef Name) {}
+ virtual ~DebugObjectSection() = default;
+};
+
+template <typename ELFT>
+class ELFDebugObjectSection : public DebugObjectSection {
+public:
+ // BinaryFormat ELF is not meant as a mutable format. We can only make changes
+ // that don't invalidate the file structure.
+ ELFDebugObjectSection(const typename ELFT::Shdr *Header)
+ : Header(const_cast<typename ELFT::Shdr *>(Header)) {}
+
+ void setTargetMemoryRange(SectionRange Range) override;
+ void dump(raw_ostream &OS, StringRef Name) override;
+
+ Error validateInBounds(StringRef Buffer, const char *Name) const;
+
+private:
+ typename ELFT::Shdr *Header;
+};
+
+template <typename ELFT>
+void ELFDebugObjectSection<ELFT>::setTargetMemoryRange(SectionRange Range) {
+ // All recorded sections are candidates for load-address patching.
+ Header->sh_addr =
+ static_cast<typename ELFT::uint>(Range.getStart().getValue());
+}
+
+template <typename ELFT>
+Error ELFDebugObjectSection<ELFT>::validateInBounds(StringRef Buffer,
+ const char *Name) const {
+ const uint8_t *Start = Buffer.bytes_begin();
+ const uint8_t *End = Buffer.bytes_end();
+ const uint8_t *HeaderPtr = reinterpret_cast<uint8_t *>(Header);
+ if (HeaderPtr < Start || HeaderPtr + sizeof(typename ELFT::Shdr) > End)
+ return make_error<StringError>(
+ formatv("{0} section header at {1:x16} not within bounds of the "
+ "given debug object buffer [{2:x16} - {3:x16}]",
+ Name, &Header->sh_addr, Start, End),
+ inconvertibleErrorCode());
+ if (Header->sh_offset + Header->sh_size > Buffer.size())
+ return make_error<StringError>(
+ formatv("{0} section data [{1:x16} - {2:x16}] not within bounds of "
+ "the given debug object buffer [{3:x16} - {4:x16}]",
+ Name, Start + Header->sh_offset,
+ Start + Header->sh_offset + Header->sh_size, Start, End),
+ inconvertibleErrorCode());
+ return Error::success();
+}
+
+template <typename ELFT>
+void ELFDebugObjectSection<ELFT>::dump(raw_ostream &OS, StringRef Name) {
+ if (uint64_t Addr = Header->sh_addr) {
+ OS << formatv(" {0:x16} {1}\n", Addr, Name);
+ } else {
+ OS << formatv(" {0}\n", Name);
+ }
+}
+
+enum DebugObjectFlags : int {
+ // Request final target memory load-addresses for all sections.
+ ReportFinalSectionLoadAddresses = 1 << 0,
+
+ // We found sections with debug information when processing the input object.
+ HasDebugSections = 1 << 1,
+};
+
+/// The plugin creates a debug object from when JITLink starts processing the
+/// corresponding LinkGraph. It provides access to the pass configuration of
+/// the LinkGraph and calls the finalization function, once the resulting link
+/// artifact was emitted.
+///
+class DebugObject {
+public:
+ DebugObject(JITLinkMemoryManager &MemMgr, const JITLinkDylib *JD,
+ ExecutionSession &ES)
+ : MemMgr(MemMgr), JD(JD), ES(ES), Flags(DebugObjectFlags{}) {}
+
+ bool hasFlags(DebugObjectFlags F) const { return Flags & F; }
+ void setFlags(DebugObjectFlags F) {
+ Flags = static_cast<DebugObjectFlags>(Flags | F);
+ }
+ void clearFlags(DebugObjectFlags F) {
+ Flags = static_cast<DebugObjectFlags>(Flags & ~F);
+ }
+
+ using FinalizeContinuation = std::function<void(Expected<ExecutorAddrRange>)>;
+
+ void finalizeAsync(FinalizeContinuation OnFinalize);
+
+ virtual ~DebugObject() {
+ if (Alloc) {
+ std::vector<FinalizedAlloc> Allocs;
+ Allocs.push_back(std::move(Alloc));
+ if (Error Err = MemMgr.deallocate(std::move(Allocs)))
+ ES.reportError(std::move(Err));
+ }
+ }
+
+ virtual void reportSectionTargetMemoryRange(StringRef Name,
+ SectionRange TargetMem) {}
+
+protected:
+ using InFlightAlloc = JITLinkMemoryManager::InFlightAlloc;
+ using FinalizedAlloc = JITLinkMemoryManager::FinalizedAlloc;
+
+ virtual Expected<SimpleSegmentAlloc> finalizeWorkingMemory() = 0;
+
+ JITLinkMemoryManager &MemMgr;
+ const JITLinkDylib *JD = nullptr;
+
+private:
+ ExecutionSession &ES;
+ DebugObjectFlags Flags;
+ FinalizedAlloc Alloc;
+};
+
+// Finalize working memory and take ownership of the resulting allocation. Start
+// copying memory over to the target and pass on the result once we're done.
+// Ownership of the allocation remains with us for the rest of our lifetime.
+void DebugObject::finalizeAsync(FinalizeContinuation OnFinalize) {
+ assert(!Alloc && "Cannot finalize more than once");
+
+ if (auto SimpleSegAlloc = finalizeWorkingMemory()) {
+ auto ROSeg = SimpleSegAlloc->getSegInfo(MemProt::Read);
+ ExecutorAddrRange DebugObjRange(ROSeg.Addr, ROSeg.WorkingMem.size());
+ SimpleSegAlloc->finalize(
+ [this, DebugObjRange,
+ OnFinalize = std::move(OnFinalize)](Expected<FinalizedAlloc> FA) {
+ if (FA) {
+ Alloc = std::move(*FA);
+ OnFinalize(DebugObjRange);
+ } else
+ OnFinalize(FA.takeError());
+ });
+ } else
+ OnFinalize(SimpleSegAlloc.takeError());
+}
+
+/// The current implementation of ELFDebugObject replicates the approach used in
+/// RuntimeDyld: It patches executable and data section headers in the given
+/// object buffer with load-addresses of their corresponding sections in target
+/// memory.
+///
+class ELFDebugObject : public DebugObject {
+public:
+ static Expected<std::unique_ptr<DebugObject>>
+ Create(MemoryBufferRef Buffer, JITLinkContext &Ctx, ExecutionSession &ES);
+
+ void reportSectionTargetMemoryRange(StringRef Name,
+ SectionRange TargetMem) override;
+
+ StringRef getBuffer() const { return Buffer->getMemBufferRef().getBuffer(); }
+
+protected:
+ Expected<SimpleSegmentAlloc> finalizeWorkingMemory() override;
+
+ template <typename ELFT>
+ Error recordSection(StringRef Name,
+ std::unique_ptr<ELFDebugObjectSection<ELFT>> Section);
+ DebugObjectSection *getSection(StringRef Name);
+
+private:
+ template <typename ELFT>
+ static Expected<std::unique_ptr<ELFDebugObject>>
+ CreateArchType(MemoryBufferRef Buffer, JITLinkMemoryManager &MemMgr,
+ const JITLinkDylib *JD, ExecutionSession &ES);
+
+ static std::unique_ptr<WritableMemoryBuffer>
+ CopyBuffer(MemoryBufferRef Buffer, Error &Err);
+
+ ELFDebugObject(std::unique_ptr<WritableMemoryBuffer> Buffer,
+ JITLinkMemoryManager &MemMgr, const JITLinkDylib *JD,
+ ExecutionSession &ES)
+ : DebugObject(MemMgr, JD, ES), Buffer(std::move(Buffer)) {
+ setFlags(ReportFinalSectionLoadAddresses);
+ }
+
+ std::unique_ptr<WritableMemoryBuffer> Buffer;
+ StringMap<std::unique_ptr<DebugObjectSection>> Sections;
+};
+
+static const std::set<StringRef> DwarfSectionNames = {
+#define HANDLE_DWARF_SECTION(ENUM_NAME, ELF_NAME, CMDLINE_NAME, OPTION) \
+ ELF_NAME,
+#include "llvm/BinaryFormat/Dwarf.def"
+#undef HANDLE_DWARF_SECTION
+};
+
+static bool isDwarfSection(StringRef SectionName) {
+ return DwarfSectionNames.count(SectionName) == 1;
+}
+
+std::unique_ptr<WritableMemoryBuffer>
+ELFDebugObject::CopyBuffer(MemoryBufferRef Buffer, Error &Err) {
+ ErrorAsOutParameter _(&Err);
+ size_t Size = Buffer.getBufferSize();
+ StringRef Name = Buffer.getBufferIdentifier();
+ if (auto Copy = WritableMemoryBuffer::getNewUninitMemBuffer(Size, Name)) {
+ memcpy(Copy->getBufferStart(), Buffer.getBufferStart(), Size);
+ return Copy;
+ }
+
+ Err = errorCodeToError(make_error_code(errc::not_enough_memory));
+ return nullptr;
+}
+
+template <typename ELFT>
+Expected<std::unique_ptr<ELFDebugObject>>
+ELFDebugObject::CreateArchType(MemoryBufferRef Buffer,
+ JITLinkMemoryManager &MemMgr,
+ const JITLinkDylib *JD, ExecutionSession &ES) {
+ using SectionHeader = typename ELFT::Shdr;
+
+ Error Err = Error::success();
+ std::unique_ptr<ELFDebugObject> DebugObj(
+ new ELFDebugObject(CopyBuffer(Buffer, Err), MemMgr, JD, ES));
+ if (Err)
+ return std::move(Err);
+
+ Expected<ELFFile<ELFT>> ObjRef = ELFFile<ELFT>::create(DebugObj->getBuffer());
+ if (!ObjRef)
+ return ObjRef.takeError();
+
+ Expected<ArrayRef<SectionHeader>> Sections = ObjRef->sections();
+ if (!Sections)
+ return Sections.takeError();
+
+ for (const SectionHeader &Header : *Sections) {
+ Expected<StringRef> Name = ObjRef->getSectionName(Header);
+ if (!Name)
+ return Name.takeError();
+ if (Name->empty())
+ continue;
+ if (isDwarfSection(*Name))
+ DebugObj->setFlags(HasDebugSections);
+
+ // Only record text and data sections (i.e. no bss, comments, rel, etc.)
+ if (Header.sh_type != ELF::SHT_PROGBITS &&
+ Header.sh_type != ELF::SHT_X86_64_UNWIND)
+ continue;
+ if (!(Header.sh_flags & ELF::SHF_ALLOC))
+ continue;
+
+ auto Wrapped = std::make_unique<ELFDebugObjectSection<ELFT>>(&Header);
+ if (Error Err = DebugObj->recordSection(*Name, std::move(Wrapped)))
+ return std::move(Err);
+ }
+
+ return std::move(DebugObj);
+}
+
+Expected<std::unique_ptr<DebugObject>>
+ELFDebugObject::Create(MemoryBufferRef Buffer, JITLinkContext &Ctx,
+ ExecutionSession &ES) {
+ unsigned char Class, Endian;
+ std::tie(Class, Endian) = getElfArchType(Buffer.getBuffer());
+
+ if (Class == ELF::ELFCLASS32) {
+ if (Endian == ELF::ELFDATA2LSB)
+ return CreateArchType<ELF32LE>(Buffer, Ctx.getMemoryManager(),
+ Ctx.getJITLinkDylib(), ES);
+ if (Endian == ELF::ELFDATA2MSB)
+ return CreateArchType<ELF32BE>(Buffer, Ctx.getMemoryManager(),
+ Ctx.getJITLinkDylib(), ES);
+ return nullptr;
+ }
+ if (Class == ELF::ELFCLASS64) {
+ if (Endian == ELF::ELFDATA2LSB)
+ return CreateArchType<ELF64LE>(Buffer, Ctx.getMemoryManager(),
+ Ctx.getJITLinkDylib(), ES);
+ if (Endian == ELF::ELFDATA2MSB)
+ return CreateArchType<ELF64BE>(Buffer, Ctx.getMemoryManager(),
+ Ctx.getJITLinkDylib(), ES);
+ return nullptr;
+ }
+ return nullptr;
+}
+
+Expected<SimpleSegmentAlloc> ELFDebugObject::finalizeWorkingMemory() {
+ LLVM_DEBUG({
+ dbgs() << "Section load-addresses in debug object for \""
+ << Buffer->getBufferIdentifier() << "\":\n";
+ for (const auto &KV : Sections)
+ KV.second->dump(dbgs(), KV.first());
+ });
+
+ // TODO: This works, but what actual alignment requirements do we have?
+ unsigned PageSize = sys::Process::getPageSizeEstimate();
+ size_t Size = Buffer->getBufferSize();
+
+ // Allocate working memory for debug object in read-only segment.
+ auto Alloc = SimpleSegmentAlloc::Create(
+ MemMgr, JD, {{MemProt::Read, {Size, Align(PageSize)}}});
+ if (!Alloc)
+ return Alloc;
+
+ // Initialize working memory with a copy of our object buffer.
+ auto SegInfo = Alloc->getSegInfo(MemProt::Read);
+ memcpy(SegInfo.WorkingMem.data(), Buffer->getBufferStart(), Size);
+ Buffer.reset();
+
+ return Alloc;
+}
+
+void ELFDebugObject::reportSectionTargetMemoryRange(StringRef Name,
+ SectionRange TargetMem) {
+ if (auto *DebugObjSection = getSection(Name))
+ DebugObjSection->setTargetMemoryRange(TargetMem);
+}
+
+template <typename ELFT>
+Error ELFDebugObject::recordSection(
+ StringRef Name, std::unique_ptr<ELFDebugObjectSection<ELFT>> Section) {
+ if (Error Err = Section->validateInBounds(this->getBuffer(), Name.data()))
+ return Err;
+ bool Inserted = Sections.try_emplace(Name, std::move(Section)).second;
+ if (!Inserted)
+ LLVM_DEBUG(dbgs() << "Skipping debug registration for section '" << Name
+ << "' in object " << Buffer->getBufferIdentifier()
+ << " (duplicate name)\n");
+ return Error::success();
+}
+
+DebugObjectSection *ELFDebugObject::getSection(StringRef Name) {
+ auto It = Sections.find(Name);
+ return It == Sections.end() ? nullptr : It->second.get();
+}
+
+/// Creates a debug object based on the input object file from
+/// ObjectLinkingLayerJITLinkContext.
+///
+static Expected<std::unique_ptr<DebugObject>>
+createDebugObjectFromBuffer(ExecutionSession &ES, LinkGraph &G,
+ JITLinkContext &Ctx, MemoryBufferRef ObjBuffer) {
+ switch (G.getTargetTriple().getObjectFormat()) {
+ case Triple::ELF:
+ return ELFDebugObject::Create(ObjBuffer, Ctx, ES);
+
+ default:
+ // TODO: Once we add support for other formats, we might want to split this
+ // into multiple files.
+ return nullptr;
+ }
+}
+
+DebugObjectManagerPlugin::DebugObjectManagerPlugin(
+ ExecutionSession &ES, std::unique_ptr<DebugObjectRegistrar> Target,
+ bool RequireDebugSections, bool AutoRegisterCode)
+ : ES(ES), Target(std::move(Target)),
+ RequireDebugSections(RequireDebugSections),
+ AutoRegisterCode(AutoRegisterCode) {}
+
+DebugObjectManagerPlugin::DebugObjectManagerPlugin(
+ ExecutionSession &ES, std::unique_ptr<DebugObjectRegistrar> Target)
+ : DebugObjectManagerPlugin(ES, std::move(Target), true, true) {}
+
+DebugObjectManagerPlugin::~DebugObjectManagerPlugin() = default;
+
+void DebugObjectManagerPlugin::notifyMaterializing(
+ MaterializationResponsibility &MR, LinkGraph &G, JITLinkContext &Ctx,
+ MemoryBufferRef ObjBuffer) {
+ std::lock_guard<std::mutex> Lock(PendingObjsLock);
+ assert(PendingObjs.count(&MR) == 0 &&
+ "Cannot have more than one pending debug object per "
+ "MaterializationResponsibility");
+
+ if (auto DebugObj = createDebugObjectFromBuffer(ES, G, Ctx, ObjBuffer)) {
+ // Not all link artifacts allow debugging.
+ if (*DebugObj == nullptr)
+ return;
+ if (RequireDebugSections && !(**DebugObj).hasFlags(HasDebugSections)) {
+ LLVM_DEBUG(dbgs() << "Skipping debug registration for LinkGraph '"
+ << G.getName() << "': no debug info\n");
+ return;
+ }
+ PendingObjs[&MR] = std::move(*DebugObj);
+ } else {
+ ES.reportError(DebugObj.takeError());
+ }
+}
+
+void DebugObjectManagerPlugin::modifyPassConfig(
+ MaterializationResponsibility &MR, LinkGraph &G,
+ PassConfiguration &PassConfig) {
+ // Not all link artifacts have associated debug objects.
+ std::lock_guard<std::mutex> Lock(PendingObjsLock);
+ auto It = PendingObjs.find(&MR);
+ if (It == PendingObjs.end())
+ return;
+
+ DebugObject &DebugObj = *It->second;
+ if (DebugObj.hasFlags(ReportFinalSectionLoadAddresses)) {
+ PassConfig.PostAllocationPasses.push_back(
+ [&DebugObj](LinkGraph &Graph) -> Error {
+ for (const Section &GraphSection : Graph.sections())
+ DebugObj.reportSectionTargetMemoryRange(GraphSection.getName(),
+ SectionRange(GraphSection));
+ return Error::success();
+ });
+ }
+}
+
+Error DebugObjectManagerPlugin::notifyEmitted(
+ MaterializationResponsibility &MR) {
+ std::lock_guard<std::mutex> Lock(PendingObjsLock);
+ auto It = PendingObjs.find(&MR);
+ if (It == PendingObjs.end())
+ return Error::success();
+
+ // During finalization the debug object is registered with the target.
+ // Materialization must wait for this process to finish. Otherwise we might
+ // start running code before the debugger processed the corresponding debug
+ // info.
+ std::promise<MSVCPError> FinalizePromise;
+ std::future<MSVCPError> FinalizeErr = FinalizePromise.get_future();
+
+ It->second->finalizeAsync(
+ [this, &FinalizePromise, &MR](Expected<ExecutorAddrRange> TargetMem) {
+ // Any failure here will fail materialization.
+ if (!TargetMem) {
+ FinalizePromise.set_value(TargetMem.takeError());
+ return;
+ }
+ if (Error Err =
+ Target->registerDebugObject(*TargetMem, AutoRegisterCode)) {
+ FinalizePromise.set_value(std::move(Err));
+ return;
+ }
+
+ // Once our tracking info is updated, notifyEmitted() can return and
+ // finish materialization.
+ FinalizePromise.set_value(MR.withResourceKeyDo([&](ResourceKey K) {
+ assert(PendingObjs.count(&MR) && "We still hold PendingObjsLock");
+ std::lock_guard<std::mutex> Lock(RegisteredObjsLock);
+ RegisteredObjs[K].push_back(std::move(PendingObjs[&MR]));
+ PendingObjs.erase(&MR);
+ }));
+ });
+
+ return FinalizeErr.get();
+}
+
+Error DebugObjectManagerPlugin::notifyFailed(
+ MaterializationResponsibility &MR) {
+ std::lock_guard<std::mutex> Lock(PendingObjsLock);
+ PendingObjs.erase(&MR);
+ return Error::success();
+}
+
+void DebugObjectManagerPlugin::notifyTransferringResources(JITDylib &JD,
+ ResourceKey DstKey,
+ ResourceKey SrcKey) {
+ // Debug objects are stored by ResourceKey only after registration.
+ // Thus, pending objects don't need to be updated here.
+ std::lock_guard<std::mutex> Lock(RegisteredObjsLock);
+ auto SrcIt = RegisteredObjs.find(SrcKey);
+ if (SrcIt != RegisteredObjs.end()) {
+ // Resources from distinct MaterializationResponsibilitys can get merged
+ // after emission, so we can have multiple debug objects per resource key.
+ for (std::unique_ptr<DebugObject> &DebugObj : SrcIt->second)
+ RegisteredObjs[DstKey].push_back(std::move(DebugObj));
+ RegisteredObjs.erase(SrcIt);
+ }
+}
+
+Error DebugObjectManagerPlugin::notifyRemovingResources(JITDylib &JD,
+ ResourceKey Key) {
+ // Removing the resource for a pending object fails materialization, so they
+ // get cleaned up in the notifyFailed() handler.
+ std::lock_guard<std::mutex> Lock(RegisteredObjsLock);
+ RegisteredObjs.erase(Key);
+
+ // TODO: Implement unregister notifications.
+ return Error::success();
+}
+
+} // namespace orc
+} // namespace llvm
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/DebugUtils.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/DebugUtils.cpp
new file mode 100644
index 000000000000..0f6923a7633f
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/DebugUtils.cpp
@@ -0,0 +1,362 @@
+//===---------- DebugUtils.cpp - Utilities for debugging ORC JITs ---------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/DebugUtils.h"
+
+#include "llvm/ExecutionEngine/Orc/Core.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/Format.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Support/raw_ostream.h"
+
+#define DEBUG_TYPE "orc"
+
+using namespace llvm;
+
+namespace {
+
+#ifndef NDEBUG
+
+cl::opt<bool> PrintHidden("debug-orc-print-hidden", cl::init(true),
+ cl::desc("debug print hidden symbols defined by "
+ "materialization units"),
+ cl::Hidden);
+
+cl::opt<bool> PrintCallable("debug-orc-print-callable", cl::init(true),
+ cl::desc("debug print callable symbols defined by "
+ "materialization units"),
+ cl::Hidden);
+
+cl::opt<bool> PrintData("debug-orc-print-data", cl::init(true),
+ cl::desc("debug print data symbols defined by "
+ "materialization units"),
+ cl::Hidden);
+
+#endif // NDEBUG
+
+// SetPrinter predicate that prints every element.
+template <typename T> struct PrintAll {
+ bool operator()(const T &E) { return true; }
+};
+
+bool anyPrintSymbolOptionSet() {
+#ifndef NDEBUG
+ return PrintHidden || PrintCallable || PrintData;
+#else
+ return false;
+#endif // NDEBUG
+}
+
+bool flagsMatchCLOpts(const JITSymbolFlags &Flags) {
+#ifndef NDEBUG
+ // Bail out early if this is a hidden symbol and we're not printing hiddens.
+ if (!PrintHidden && !Flags.isExported())
+ return false;
+
+ // Return true if this is callable and we're printing callables.
+ if (PrintCallable && Flags.isCallable())
+ return true;
+
+ // Return true if this is data and we're printing data.
+ if (PrintData && !Flags.isCallable())
+ return true;
+
+ // otherwise return false.
+ return false;
+#else
+ return false;
+#endif // NDEBUG
+}
+
+// Prints a sequence of items, filtered by an user-supplied predicate.
+template <typename Sequence,
+ typename Pred = PrintAll<typename Sequence::value_type>>
+class SequencePrinter {
+public:
+ SequencePrinter(const Sequence &S, char OpenSeq, char CloseSeq,
+ Pred ShouldPrint = Pred())
+ : S(S), OpenSeq(OpenSeq), CloseSeq(CloseSeq),
+ ShouldPrint(std::move(ShouldPrint)) {}
+
+ void printTo(llvm::raw_ostream &OS) const {
+ bool PrintComma = false;
+ OS << OpenSeq;
+ for (auto &E : S) {
+ if (ShouldPrint(E)) {
+ if (PrintComma)
+ OS << ',';
+ OS << ' ' << E;
+ PrintComma = true;
+ }
+ }
+ OS << ' ' << CloseSeq;
+ }
+
+private:
+ const Sequence &S;
+ char OpenSeq;
+ char CloseSeq;
+ mutable Pred ShouldPrint;
+};
+
+template <typename Sequence, typename Pred>
+SequencePrinter<Sequence, Pred> printSequence(const Sequence &S, char OpenSeq,
+ char CloseSeq, Pred P = Pred()) {
+ return SequencePrinter<Sequence, Pred>(S, OpenSeq, CloseSeq, std::move(P));
+}
+
+// Render a SequencePrinter by delegating to its printTo method.
+template <typename Sequence, typename Pred>
+llvm::raw_ostream &operator<<(llvm::raw_ostream &OS,
+ const SequencePrinter<Sequence, Pred> &Printer) {
+ Printer.printTo(OS);
+ return OS;
+}
+
+struct PrintSymbolFlagsMapElemsMatchingCLOpts {
+ bool operator()(const orc::SymbolFlagsMap::value_type &KV) {
+ return flagsMatchCLOpts(KV.second);
+ }
+};
+
+struct PrintSymbolMapElemsMatchingCLOpts {
+ bool operator()(const orc::SymbolMap::value_type &KV) {
+ return flagsMatchCLOpts(KV.second.getFlags());
+ }
+};
+
+} // end anonymous namespace
+
+namespace llvm {
+namespace orc {
+
+raw_ostream &operator<<(raw_ostream &OS, const SymbolStringPtr &Sym) {
+ return OS << *Sym;
+}
+
+raw_ostream &operator<<(raw_ostream &OS, NonOwningSymbolStringPtr Sym) {
+ return OS << *Sym;
+}
+
+raw_ostream &operator<<(raw_ostream &OS, const SymbolNameSet &Symbols) {
+ return OS << printSequence(Symbols, '{', '}', PrintAll<SymbolStringPtr>());
+}
+
+raw_ostream &operator<<(raw_ostream &OS, const SymbolNameVector &Symbols) {
+ return OS << printSequence(Symbols, '[', ']', PrintAll<SymbolStringPtr>());
+}
+
+raw_ostream &operator<<(raw_ostream &OS, ArrayRef<SymbolStringPtr> Symbols) {
+ return OS << printSequence(Symbols, '[', ']', PrintAll<SymbolStringPtr>());
+}
+
+raw_ostream &operator<<(raw_ostream &OS, const JITSymbolFlags &Flags) {
+ if (Flags.hasError())
+ OS << "[*ERROR*]";
+ if (Flags.isCallable())
+ OS << "[Callable]";
+ else
+ OS << "[Data]";
+ if (Flags.isWeak())
+ OS << "[Weak]";
+ else if (Flags.isCommon())
+ OS << "[Common]";
+
+ if (!Flags.isExported())
+ OS << "[Hidden]";
+
+ return OS;
+}
+
+raw_ostream &operator<<(raw_ostream &OS, const ExecutorSymbolDef &Sym) {
+ return OS << Sym.getAddress() << " " << Sym.getFlags();
+}
+
+raw_ostream &operator<<(raw_ostream &OS, const SymbolFlagsMap::value_type &KV) {
+ return OS << "(\"" << KV.first << "\", " << KV.second << ")";
+}
+
+raw_ostream &operator<<(raw_ostream &OS, const SymbolMap::value_type &KV) {
+ return OS << "(\"" << KV.first << "\": " << KV.second << ")";
+}
+
+raw_ostream &operator<<(raw_ostream &OS, const SymbolFlagsMap &SymbolFlags) {
+ return OS << printSequence(SymbolFlags, '{', '}',
+ PrintSymbolFlagsMapElemsMatchingCLOpts());
+}
+
+raw_ostream &operator<<(raw_ostream &OS, const SymbolMap &Symbols) {
+ return OS << printSequence(Symbols, '{', '}',
+ PrintSymbolMapElemsMatchingCLOpts());
+}
+
+raw_ostream &operator<<(raw_ostream &OS,
+ const SymbolDependenceMap::value_type &KV) {
+ return OS << "(" << KV.first->getName() << ", " << KV.second << ")";
+}
+
+raw_ostream &operator<<(raw_ostream &OS, const SymbolDependenceMap &Deps) {
+ return OS << printSequence(Deps, '{', '}',
+ PrintAll<SymbolDependenceMap::value_type>());
+}
+
+raw_ostream &operator<<(raw_ostream &OS, const MaterializationUnit &MU) {
+ OS << "MU@" << &MU << " (\"" << MU.getName() << "\"";
+ if (anyPrintSymbolOptionSet())
+ OS << ", " << MU.getSymbols();
+ return OS << ")";
+}
+
+raw_ostream &operator<<(raw_ostream &OS, const LookupKind &K) {
+ switch (K) {
+ case LookupKind::Static:
+ return OS << "Static";
+ case LookupKind::DLSym:
+ return OS << "DLSym";
+ }
+ llvm_unreachable("Invalid lookup kind");
+}
+
+raw_ostream &operator<<(raw_ostream &OS,
+ const JITDylibLookupFlags &JDLookupFlags) {
+ switch (JDLookupFlags) {
+ case JITDylibLookupFlags::MatchExportedSymbolsOnly:
+ return OS << "MatchExportedSymbolsOnly";
+ case JITDylibLookupFlags::MatchAllSymbols:
+ return OS << "MatchAllSymbols";
+ }
+ llvm_unreachable("Invalid JITDylib lookup flags");
+}
+
+raw_ostream &operator<<(raw_ostream &OS, const SymbolLookupFlags &LookupFlags) {
+ switch (LookupFlags) {
+ case SymbolLookupFlags::RequiredSymbol:
+ return OS << "RequiredSymbol";
+ case SymbolLookupFlags::WeaklyReferencedSymbol:
+ return OS << "WeaklyReferencedSymbol";
+ }
+ llvm_unreachable("Invalid symbol lookup flags");
+}
+
+raw_ostream &operator<<(raw_ostream &OS,
+ const SymbolLookupSet::value_type &KV) {
+ return OS << "(" << KV.first << ", " << KV.second << ")";
+}
+
+raw_ostream &operator<<(raw_ostream &OS, const SymbolLookupSet &LookupSet) {
+ return OS << printSequence(LookupSet, '{', '}',
+ PrintAll<SymbolLookupSet::value_type>());
+}
+
+raw_ostream &operator<<(raw_ostream &OS,
+ const JITDylibSearchOrder &SearchOrder) {
+ OS << "[";
+ if (!SearchOrder.empty()) {
+ assert(SearchOrder.front().first &&
+ "JITDylibList entries must not be null");
+ OS << " (\"" << SearchOrder.front().first->getName() << "\", "
+ << SearchOrder.begin()->second << ")";
+ for (auto &KV : llvm::drop_begin(SearchOrder)) {
+ assert(KV.first && "JITDylibList entries must not be null");
+ OS << ", (\"" << KV.first->getName() << "\", " << KV.second << ")";
+ }
+ }
+ OS << " ]";
+ return OS;
+}
+
+raw_ostream &operator<<(raw_ostream &OS, const SymbolAliasMap &Aliases) {
+ OS << "{";
+ for (auto &KV : Aliases)
+ OS << " " << *KV.first << ": " << KV.second.Aliasee << " "
+ << KV.second.AliasFlags;
+ OS << " }";
+ return OS;
+}
+
+raw_ostream &operator<<(raw_ostream &OS, const SymbolState &S) {
+ switch (S) {
+ case SymbolState::Invalid:
+ return OS << "Invalid";
+ case SymbolState::NeverSearched:
+ return OS << "Never-Searched";
+ case SymbolState::Materializing:
+ return OS << "Materializing";
+ case SymbolState::Resolved:
+ return OS << "Resolved";
+ case SymbolState::Emitted:
+ return OS << "Emitted";
+ case SymbolState::Ready:
+ return OS << "Ready";
+ }
+ llvm_unreachable("Invalid state");
+}
+
+raw_ostream &operator<<(raw_ostream &OS, const SymbolStringPool &SSP) {
+ std::lock_guard<std::mutex> Lock(SSP.PoolMutex);
+ SmallVector<std::pair<StringRef, int>, 0> Vec;
+ for (auto &KV : SSP.Pool)
+ Vec.emplace_back(KV.first(), KV.second);
+ llvm::sort(Vec, less_first());
+ for (auto &[K, V] : Vec)
+ OS << K << ": " << V << "\n";
+ return OS;
+}
+
+DumpObjects::DumpObjects(std::string DumpDir, std::string IdentifierOverride)
+ : DumpDir(std::move(DumpDir)),
+ IdentifierOverride(std::move(IdentifierOverride)) {
+
+ /// Discard any trailing separators.
+ while (!this->DumpDir.empty() &&
+ sys::path::is_separator(this->DumpDir.back()))
+ this->DumpDir.pop_back();
+}
+
+Expected<std::unique_ptr<MemoryBuffer>>
+DumpObjects::operator()(std::unique_ptr<MemoryBuffer> Obj) {
+ size_t Idx = 1;
+
+ std::string DumpPathStem;
+ raw_string_ostream(DumpPathStem)
+ << DumpDir << (DumpDir.empty() ? "" : "/") << getBufferIdentifier(*Obj);
+
+ std::string DumpPath = DumpPathStem + ".o";
+ while (sys::fs::exists(DumpPath)) {
+ DumpPath.clear();
+ raw_string_ostream(DumpPath) << DumpPathStem << "." << (++Idx) << ".o";
+ }
+
+ LLVM_DEBUG({
+ dbgs() << "Dumping object buffer [ " << (const void *)Obj->getBufferStart()
+ << " -- " << (const void *)(Obj->getBufferEnd() - 1) << " ] to "
+ << DumpPath << "\n";
+ });
+
+ std::error_code EC;
+ raw_fd_ostream DumpStream(DumpPath, EC);
+ if (EC)
+ return errorCodeToError(EC);
+ DumpStream.write(Obj->getBufferStart(), Obj->getBufferSize());
+
+ return std::move(Obj);
+}
+
+StringRef DumpObjects::getBufferIdentifier(MemoryBuffer &B) {
+ if (!IdentifierOverride.empty())
+ return IdentifierOverride;
+ StringRef Identifier = B.getBufferIdentifier();
+ Identifier.consume_back(".o");
+ return Identifier;
+}
+
+} // End namespace orc.
+} // End namespace llvm.
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/Debugging/DebugInfoSupport.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/Debugging/DebugInfoSupport.cpp
new file mode 100644
index 000000000000..5a058bd712a3
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/Debugging/DebugInfoSupport.cpp
@@ -0,0 +1,120 @@
+//===--- DebugInfoSupport.cpp -- Utils for debug info support ---*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Utilities to preserve and parse debug info from LinkGraphs.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/Debugging/DebugInfoSupport.h"
+
+#include "llvm/Support/SmallVectorMemoryBuffer.h"
+
+#define DEBUG_TYPE "orc"
+
+using namespace llvm;
+using namespace llvm::orc;
+using namespace llvm::jitlink;
+
+namespace {
+static DenseSet<StringRef> DWARFSectionNames = {
+#define HANDLE_DWARF_SECTION(ENUM_NAME, ELF_NAME, CMDLINE_NAME, OPTION) \
+ StringRef(ELF_NAME),
+#include "llvm/BinaryFormat/Dwarf.def"
+#undef HANDLE_DWARF_SECTION
+};
+
+// We might be able to drop relocations to symbols that do end up
+// being pruned by the linker, but for now we just preserve all
+static void preserveDWARFSection(LinkGraph &G, Section &Sec) {
+ DenseMap<Block *, Symbol *> Preserved;
+ for (auto Sym : Sec.symbols()) {
+ if (Sym->isLive())
+ Preserved[&Sym->getBlock()] = Sym;
+ else if (!Preserved.count(&Sym->getBlock()))
+ Preserved[&Sym->getBlock()] = Sym;
+ }
+ for (auto Block : Sec.blocks()) {
+ auto &PSym = Preserved[Block];
+ if (!PSym)
+ PSym = &G.addAnonymousSymbol(*Block, 0, 0, false, true);
+ else if (!PSym->isLive())
+ PSym->setLive(true);
+ }
+}
+
+static SmallVector<char, 0> getSectionData(Section &Sec) {
+ SmallVector<char, 0> SecData;
+ SmallVector<Block *, 8> SecBlocks(Sec.blocks().begin(), Sec.blocks().end());
+ std::sort(SecBlocks.begin(), SecBlocks.end(), [](Block *LHS, Block *RHS) {
+ return LHS->getAddress() < RHS->getAddress();
+ });
+ // Convert back to what object file would have, one blob of section content
+ // Assumes all zerofill
+ // TODO handle alignment?
+ // TODO handle alignment offset?
+ for (auto *Block : SecBlocks) {
+ if (Block->isZeroFill())
+ SecData.resize(SecData.size() + Block->getSize(), 0);
+ else
+ SecData.append(Block->getContent().begin(), Block->getContent().end());
+ }
+ return SecData;
+}
+
+static void dumpDWARFContext(DWARFContext &DC) {
+ auto options = llvm::DIDumpOptions();
+ options.DumpType &= ~DIDT_UUID;
+ options.DumpType &= ~(1 << DIDT_ID_DebugFrame);
+ LLVM_DEBUG(DC.dump(dbgs(), options));
+}
+
+} // namespace
+
+Error llvm::orc::preserveDebugSections(LinkGraph &G) {
+ if (!G.getTargetTriple().isOSBinFormatELF()) {
+ return make_error<StringError>(
+ "preserveDebugSections only supports ELF LinkGraphs!",
+ inconvertibleErrorCode());
+ }
+ for (auto &Sec : G.sections()) {
+ if (DWARFSectionNames.count(Sec.getName())) {
+ LLVM_DEBUG(dbgs() << "Preserving DWARF section " << Sec.getName()
+ << "\n");
+ preserveDWARFSection(G, Sec);
+ }
+ }
+ return Error::success();
+}
+
+Expected<std::pair<std::unique_ptr<DWARFContext>,
+ StringMap<std::unique_ptr<MemoryBuffer>>>>
+llvm::orc::createDWARFContext(LinkGraph &G) {
+ if (!G.getTargetTriple().isOSBinFormatELF()) {
+ return make_error<StringError>(
+ "createDWARFContext only supports ELF LinkGraphs!",
+ inconvertibleErrorCode());
+ }
+ StringMap<std::unique_ptr<MemoryBuffer>> DWARFSectionData;
+ for (auto &Sec : G.sections()) {
+ if (DWARFSectionNames.count(Sec.getName())) {
+ auto SecData = getSectionData(Sec);
+ auto Name = Sec.getName();
+ // DWARFContext expects the section name to not start with a dot
+ Name.consume_front(".");
+ LLVM_DEBUG(dbgs() << "Creating DWARFContext section " << Name
+ << " with size " << SecData.size() << "\n");
+ DWARFSectionData[Name] =
+ std::make_unique<SmallVectorMemoryBuffer>(std::move(SecData));
+ }
+ }
+ auto Ctx =
+ DWARFContext::create(DWARFSectionData, G.getPointerSize(),
+ G.getEndianness() == llvm::endianness::little);
+ dumpDWARFContext(*Ctx);
+ return std::make_pair(std::move(Ctx), std::move(DWARFSectionData));
+}
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/Debugging/DebuggerSupport.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/Debugging/DebuggerSupport.cpp
new file mode 100644
index 000000000000..1668473c0eb4
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/Debugging/DebuggerSupport.cpp
@@ -0,0 +1,61 @@
+//===------ DebuggerSupport.cpp - Utils for enabling debugger support -----===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/Debugging/DebuggerSupport.h"
+#include "llvm/ExecutionEngine/Orc/DebugObjectManagerPlugin.h"
+#include "llvm/ExecutionEngine/Orc/Debugging/DebuggerSupportPlugin.h"
+#include "llvm/ExecutionEngine/Orc/LLJIT.h"
+
+#define DEBUG_TYPE "orc"
+
+using namespace llvm;
+using namespace llvm::orc;
+
+namespace llvm::orc {
+
+Error enableDebuggerSupport(LLJIT &J) {
+ auto *ObjLinkingLayer = dyn_cast<ObjectLinkingLayer>(&J.getObjLinkingLayer());
+ if (!ObjLinkingLayer)
+ return make_error<StringError>("Cannot enable LLJIT debugger support: "
+ "Debugger support requires JITLink",
+ inconvertibleErrorCode());
+ auto ProcessSymsJD = J.getProcessSymbolsJITDylib();
+ if (!ProcessSymsJD)
+ return make_error<StringError>("Cannot enable LLJIT debugger support: "
+ "Process symbols are not available",
+ inconvertibleErrorCode());
+
+ auto &ES = J.getExecutionSession();
+ const auto &TT = J.getTargetTriple();
+
+ switch (TT.getObjectFormat()) {
+ case Triple::ELF: {
+ auto Registrar = createJITLoaderGDBRegistrar(ES);
+ if (!Registrar)
+ return Registrar.takeError();
+ ObjLinkingLayer->addPlugin(std::make_unique<DebugObjectManagerPlugin>(
+ ES, std::move(*Registrar), false, true));
+ return Error::success();
+ }
+ case Triple::MachO: {
+ auto DS = GDBJITDebugInfoRegistrationPlugin::Create(ES, *ProcessSymsJD, TT);
+ if (!DS)
+ return DS.takeError();
+ ObjLinkingLayer->addPlugin(std::move(*DS));
+ return Error::success();
+ }
+ default:
+ return make_error<StringError>(
+ "Cannot enable LLJIT debugger support: " +
+ Triple::getObjectFormatTypeName(TT.getObjectFormat()) +
+ " is not supported",
+ inconvertibleErrorCode());
+ }
+}
+
+} // namespace llvm::orc
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/Debugging/DebuggerSupportPlugin.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/Debugging/DebuggerSupportPlugin.cpp
new file mode 100644
index 000000000000..e387b06ee934
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/Debugging/DebuggerSupportPlugin.cpp
@@ -0,0 +1,423 @@
+//===------- DebuggerSupportPlugin.cpp - Utils for debugger support -------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/Debugging/DebuggerSupportPlugin.h"
+#include "llvm/ExecutionEngine/Orc/MachOBuilder.h"
+
+#include "llvm/ADT/SmallSet.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/BinaryFormat/MachO.h"
+#include "llvm/DebugInfo/DWARF/DWARFContext.h"
+#include "llvm/DebugInfo/DWARF/DWARFDebugLine.h"
+
+#include <chrono>
+
+#define DEBUG_TYPE "orc"
+
+using namespace llvm;
+using namespace llvm::jitlink;
+using namespace llvm::orc;
+
+static const char *SynthDebugSectionName = "__jitlink_synth_debug_object";
+
+namespace {
+
+class MachODebugObjectSynthesizerBase
+ : public GDBJITDebugInfoRegistrationPlugin::DebugSectionSynthesizer {
+public:
+ static bool isDebugSection(Section &Sec) {
+ return Sec.getName().starts_with("__DWARF,");
+ }
+
+ MachODebugObjectSynthesizerBase(LinkGraph &G, ExecutorAddr RegisterActionAddr)
+ : G(G), RegisterActionAddr(RegisterActionAddr) {}
+ virtual ~MachODebugObjectSynthesizerBase() = default;
+
+ Error preserveDebugSections() {
+ if (G.findSectionByName(SynthDebugSectionName)) {
+ LLVM_DEBUG({
+ dbgs() << "MachODebugObjectSynthesizer skipping graph " << G.getName()
+ << " which contains an unexpected existing "
+ << SynthDebugSectionName << " section.\n";
+ });
+ return Error::success();
+ }
+
+ LLVM_DEBUG({
+ dbgs() << "MachODebugObjectSynthesizer visiting graph " << G.getName()
+ << "\n";
+ });
+ for (auto &Sec : G.sections()) {
+ if (!isDebugSection(Sec))
+ continue;
+ // Preserve blocks in this debug section by marking one existing symbol
+ // live for each block, and introducing a new live, anonymous symbol for
+ // each currently unreferenced block.
+ LLVM_DEBUG({
+ dbgs() << " Preserving debug section " << Sec.getName() << "\n";
+ });
+ SmallSet<Block *, 8> PreservedBlocks;
+ for (auto *Sym : Sec.symbols()) {
+ bool NewPreservedBlock =
+ PreservedBlocks.insert(&Sym->getBlock()).second;
+ if (NewPreservedBlock)
+ Sym->setLive(true);
+ }
+ for (auto *B : Sec.blocks())
+ if (!PreservedBlocks.count(B))
+ G.addAnonymousSymbol(*B, 0, 0, false, true);
+ }
+
+ return Error::success();
+ }
+
+protected:
+ LinkGraph &G;
+ ExecutorAddr RegisterActionAddr;
+};
+
+template <typename MachOTraits>
+class MachODebugObjectSynthesizer : public MachODebugObjectSynthesizerBase {
+public:
+ MachODebugObjectSynthesizer(ExecutionSession &ES, LinkGraph &G,
+ ExecutorAddr RegisterActionAddr)
+ : MachODebugObjectSynthesizerBase(G, RegisterActionAddr),
+ Builder(ES.getPageSize()) {}
+
+ using MachODebugObjectSynthesizerBase::MachODebugObjectSynthesizerBase;
+
+ Error startSynthesis() override {
+ LLVM_DEBUG({
+ dbgs() << "Creating " << SynthDebugSectionName << " for " << G.getName()
+ << "\n";
+ });
+
+ for (auto &Sec : G.sections()) {
+ if (Sec.blocks().empty())
+ continue;
+
+ // Skip sections whose name's don't fit the MachO standard.
+ if (Sec.getName().empty() || Sec.getName().size() > 33 ||
+ Sec.getName().find(',') > 16)
+ continue;
+
+ if (isDebugSection(Sec))
+ DebugSections.push_back({&Sec, nullptr});
+ else if (Sec.getMemLifetime() != MemLifetime::NoAlloc)
+ NonDebugSections.push_back({&Sec, nullptr});
+ }
+
+ // Bail out early if no debug sections.
+ if (DebugSections.empty())
+ return Error::success();
+
+ // Write MachO header and debug section load commands.
+ Builder.Header.filetype = MachO::MH_OBJECT;
+ switch (G.getTargetTriple().getArch()) {
+ case Triple::x86_64:
+ Builder.Header.cputype = MachO::CPU_TYPE_X86_64;
+ Builder.Header.cpusubtype = MachO::CPU_SUBTYPE_X86_64_ALL;
+ break;
+ case Triple::aarch64:
+ Builder.Header.cputype = MachO::CPU_TYPE_ARM64;
+ Builder.Header.cpusubtype = MachO::CPU_SUBTYPE_ARM64_ALL;
+ break;
+ default:
+ llvm_unreachable("Unsupported architecture");
+ }
+
+ Seg = &Builder.addSegment("");
+
+ StringMap<std::unique_ptr<MemoryBuffer>> DebugSectionMap;
+ StringRef DebugLineSectionData;
+ for (auto &DSec : DebugSections) {
+ auto [SegName, SecName] = DSec.GraphSec->getName().split(',');
+ DSec.BuilderSec = &Seg->addSection(SecName, SegName);
+
+ SectionRange SR(*DSec.GraphSec);
+ DSec.BuilderSec->Content.Size = SR.getSize();
+ if (!SR.empty()) {
+ DSec.BuilderSec->align = Log2_64(SR.getFirstBlock()->getAlignment());
+ StringRef SectionData(SR.getFirstBlock()->getContent().data(),
+ SR.getFirstBlock()->getSize());
+ DebugSectionMap[SecName] =
+ MemoryBuffer::getMemBuffer(SectionData, G.getName(), false);
+ if (SecName == "__debug_line")
+ DebugLineSectionData = SectionData;
+ }
+ }
+
+ std::optional<StringRef> FileName;
+ if (!DebugLineSectionData.empty()) {
+ assert((G.getEndianness() == llvm::endianness::big ||
+ G.getEndianness() == llvm::endianness::little) &&
+ "G.getEndianness() must be either big or little");
+ auto DWARFCtx =
+ DWARFContext::create(DebugSectionMap, G.getPointerSize(),
+ G.getEndianness() == llvm::endianness::little);
+ DWARFDataExtractor DebugLineData(
+ DebugLineSectionData, G.getEndianness() == llvm::endianness::little,
+ G.getPointerSize());
+ uint64_t Offset = 0;
+ DWARFDebugLine::LineTable LineTable;
+
+ // Try to parse line data. Consume error on failure.
+ if (auto Err = LineTable.parse(DebugLineData, &Offset, *DWARFCtx, nullptr,
+ consumeError)) {
+ handleAllErrors(std::move(Err), [&](ErrorInfoBase &EIB) {
+ LLVM_DEBUG({
+ dbgs() << "Cannot parse line table for \"" << G.getName() << "\": ";
+ EIB.log(dbgs());
+ dbgs() << "\n";
+ });
+ });
+ } else {
+ if (!LineTable.Prologue.FileNames.empty())
+ FileName = *dwarf::toString(LineTable.Prologue.FileNames[0].Name);
+ }
+ }
+
+ // If no line table (or unable to use) then use graph name.
+ // FIXME: There are probably other debug sections we should look in first.
+ if (!FileName)
+ FileName = StringRef(G.getName());
+
+ Builder.addSymbol("", MachO::N_SO, 0, 0, 0);
+ Builder.addSymbol(*FileName, MachO::N_SO, 0, 0, 0);
+ auto TimeStamp = std::chrono::duration_cast<std::chrono::seconds>(
+ std::chrono::system_clock::now().time_since_epoch())
+ .count();
+ Builder.addSymbol("", MachO::N_OSO, 3, 1, TimeStamp);
+
+ for (auto &NDSP : NonDebugSections) {
+ auto [SegName, SecName] = NDSP.GraphSec->getName().split(',');
+ NDSP.BuilderSec = &Seg->addSection(SecName, SegName);
+ SectionRange SR(*NDSP.GraphSec);
+ if (!SR.empty())
+ NDSP.BuilderSec->align = Log2_64(SR.getFirstBlock()->getAlignment());
+
+ // Add stabs.
+ for (auto *Sym : NDSP.GraphSec->symbols()) {
+ // Skip anonymous symbols.
+ if (!Sym->hasName())
+ continue;
+
+ uint8_t SymType = Sym->isCallable() ? MachO::N_FUN : MachO::N_GSYM;
+
+ Builder.addSymbol("", MachO::N_BNSYM, 1, 0, 0);
+ StabSymbols.push_back(
+ {*Sym, Builder.addSymbol(Sym->getName(), SymType, 1, 0, 0),
+ Builder.addSymbol(Sym->getName(), SymType, 0, 0, 0)});
+ Builder.addSymbol("", MachO::N_ENSYM, 1, 0, 0);
+ }
+ }
+
+ Builder.addSymbol("", MachO::N_SO, 1, 0, 0);
+
+ // Lay out the debug object, create a section and block for it.
+ size_t DebugObjectSize = Builder.layout();
+
+ auto &SDOSec = G.createSection(SynthDebugSectionName, MemProt::Read);
+ MachOContainerBlock = &G.createMutableContentBlock(
+ SDOSec, G.allocateBuffer(DebugObjectSize), orc::ExecutorAddr(), 8, 0);
+
+ return Error::success();
+ }
+
+ Error completeSynthesisAndRegister() override {
+ if (!MachOContainerBlock) {
+ LLVM_DEBUG({
+ dbgs() << "Not writing MachO debug object header for " << G.getName()
+ << " since createDebugSection failed\n";
+ });
+
+ return Error::success();
+ }
+ ExecutorAddr MaxAddr;
+ for (auto &NDSec : NonDebugSections) {
+ SectionRange SR(*NDSec.GraphSec);
+ NDSec.BuilderSec->addr = SR.getStart().getValue();
+ NDSec.BuilderSec->size = SR.getSize();
+ NDSec.BuilderSec->offset = SR.getStart().getValue();
+ if (SR.getEnd() > MaxAddr)
+ MaxAddr = SR.getEnd();
+ }
+
+ for (auto &DSec : DebugSections) {
+ if (DSec.GraphSec->blocks_size() != 1)
+ return make_error<StringError>(
+ "Unexpected number of blocks in debug info section",
+ inconvertibleErrorCode());
+
+ if (ExecutorAddr(DSec.BuilderSec->addr) + DSec.BuilderSec->size > MaxAddr)
+ MaxAddr = ExecutorAddr(DSec.BuilderSec->addr) + DSec.BuilderSec->size;
+
+ auto &B = **DSec.GraphSec->blocks().begin();
+ DSec.BuilderSec->Content.Data = B.getContent().data();
+ DSec.BuilderSec->Content.Size = B.getContent().size();
+ DSec.BuilderSec->flags |= MachO::S_ATTR_DEBUG;
+ }
+
+ LLVM_DEBUG({
+ dbgs() << "Writing MachO debug object header for " << G.getName() << "\n";
+ });
+
+ // Update stab symbol addresses.
+ for (auto &SS : StabSymbols) {
+ SS.StartStab.nlist().n_value = SS.Sym.getAddress().getValue();
+ SS.EndStab.nlist().n_value = SS.Sym.getSize();
+ }
+
+ Builder.write(MachOContainerBlock->getAlreadyMutableContent());
+
+ static constexpr bool AutoRegisterCode = true;
+ SectionRange R(MachOContainerBlock->getSection());
+ G.allocActions().push_back(
+ {cantFail(shared::WrapperFunctionCall::Create<
+ shared::SPSArgList<shared::SPSExecutorAddrRange, bool>>(
+ RegisterActionAddr, R.getRange(), AutoRegisterCode)),
+ {}});
+
+ return Error::success();
+ }
+
+private:
+ struct SectionPair {
+ Section *GraphSec = nullptr;
+ typename MachOBuilder<MachOTraits>::Section *BuilderSec = nullptr;
+ };
+
+ struct StabSymbolsEntry {
+ using RelocTarget = typename MachOBuilder<MachOTraits>::RelocTarget;
+
+ StabSymbolsEntry(Symbol &Sym, RelocTarget StartStab, RelocTarget EndStab)
+ : Sym(Sym), StartStab(StartStab), EndStab(EndStab) {}
+
+ Symbol &Sym;
+ RelocTarget StartStab, EndStab;
+ };
+
+ using BuilderType = MachOBuilder<MachOTraits>;
+
+ Block *MachOContainerBlock = nullptr;
+ MachOBuilder<MachOTraits> Builder;
+ typename MachOBuilder<MachOTraits>::Segment *Seg = nullptr;
+ std::vector<StabSymbolsEntry> StabSymbols;
+ SmallVector<SectionPair, 16> DebugSections;
+ SmallVector<SectionPair, 16> NonDebugSections;
+};
+
+} // end anonymous namespace
+
+namespace llvm {
+namespace orc {
+
+Expected<std::unique_ptr<GDBJITDebugInfoRegistrationPlugin>>
+GDBJITDebugInfoRegistrationPlugin::Create(ExecutionSession &ES,
+ JITDylib &ProcessJD,
+ const Triple &TT) {
+ auto RegisterActionAddr =
+ TT.isOSBinFormatMachO()
+ ? ES.intern("_llvm_orc_registerJITLoaderGDBAllocAction")
+ : ES.intern("llvm_orc_registerJITLoaderGDBAllocAction");
+
+ if (auto RegisterSym = ES.lookup({&ProcessJD}, RegisterActionAddr))
+ return std::make_unique<GDBJITDebugInfoRegistrationPlugin>(
+ RegisterSym->getAddress());
+ else
+ return RegisterSym.takeError();
+}
+
+Error GDBJITDebugInfoRegistrationPlugin::notifyFailed(
+ MaterializationResponsibility &MR) {
+ return Error::success();
+}
+
+Error GDBJITDebugInfoRegistrationPlugin::notifyRemovingResources(
+ JITDylib &JD, ResourceKey K) {
+ return Error::success();
+}
+
+void GDBJITDebugInfoRegistrationPlugin::notifyTransferringResources(
+ JITDylib &JD, ResourceKey DstKey, ResourceKey SrcKey) {}
+
+void GDBJITDebugInfoRegistrationPlugin::modifyPassConfig(
+ MaterializationResponsibility &MR, LinkGraph &LG,
+ PassConfiguration &PassConfig) {
+
+ if (LG.getTargetTriple().getObjectFormat() == Triple::MachO)
+ modifyPassConfigForMachO(MR, LG, PassConfig);
+ else {
+ LLVM_DEBUG({
+ dbgs() << "GDBJITDebugInfoRegistrationPlugin skipping unspported graph "
+ << LG.getName() << "(triple = " << LG.getTargetTriple().str()
+ << "\n";
+ });
+ }
+}
+
+void GDBJITDebugInfoRegistrationPlugin::modifyPassConfigForMachO(
+ MaterializationResponsibility &MR, jitlink::LinkGraph &LG,
+ jitlink::PassConfiguration &PassConfig) {
+
+ switch (LG.getTargetTriple().getArch()) {
+ case Triple::x86_64:
+ case Triple::aarch64:
+ // Supported, continue.
+ assert(LG.getPointerSize() == 8 && "Graph has incorrect pointer size");
+ assert(LG.getEndianness() == llvm::endianness::little &&
+ "Graph has incorrect endianness");
+ break;
+ default:
+ // Unsupported.
+ LLVM_DEBUG({
+ dbgs() << "GDBJITDebugInfoRegistrationPlugin skipping unsupported "
+ << "MachO graph " << LG.getName()
+ << "(triple = " << LG.getTargetTriple().str()
+ << ", pointer size = " << LG.getPointerSize() << ", endianness = "
+ << (LG.getEndianness() == llvm::endianness::big ? "big" : "little")
+ << ")\n";
+ });
+ return;
+ }
+
+ // Scan for debug sections. If we find one then install passes.
+ bool HasDebugSections = false;
+ for (auto &Sec : LG.sections())
+ if (MachODebugObjectSynthesizerBase::isDebugSection(Sec)) {
+ HasDebugSections = true;
+ break;
+ }
+
+ if (HasDebugSections) {
+ LLVM_DEBUG({
+ dbgs() << "GDBJITDebugInfoRegistrationPlugin: Graph " << LG.getName()
+ << " contains debug info. Installing debugger support passes.\n";
+ });
+
+ auto MDOS = std::make_shared<MachODebugObjectSynthesizer<MachO64LE>>(
+ MR.getTargetJITDylib().getExecutionSession(), LG, RegisterActionAddr);
+ PassConfig.PrePrunePasses.push_back(
+ [=](LinkGraph &G) { return MDOS->preserveDebugSections(); });
+ PassConfig.PostPrunePasses.push_back(
+ [=](LinkGraph &G) { return MDOS->startSynthesis(); });
+ PassConfig.PostFixupPasses.push_back(
+ [=](LinkGraph &G) { return MDOS->completeSynthesisAndRegister(); });
+ } else {
+ LLVM_DEBUG({
+ dbgs() << "GDBJITDebugInfoRegistrationPlugin: Graph " << LG.getName()
+ << " contains no debug info. Skipping.\n";
+ });
+ }
+}
+
+} // namespace orc
+} // namespace llvm
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/Debugging/LLJITUtilsCBindings.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/Debugging/LLJITUtilsCBindings.cpp
new file mode 100644
index 000000000000..2df5aef733fb
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/Debugging/LLJITUtilsCBindings.cpp
@@ -0,0 +1,22 @@
+//===--------- LLJITUtilsCBindings.cpp - Advanced LLJIT features ----------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm-c/LLJIT.h"
+#include "llvm-c/LLJITUtils.h"
+
+#include "llvm/ExecutionEngine/Orc/Debugging/DebuggerSupport.h"
+#include "llvm/ExecutionEngine/Orc/LLJIT.h"
+
+using namespace llvm;
+using namespace llvm::orc;
+
+DEFINE_SIMPLE_CONVERSION_FUNCTIONS(LLJIT, LLVMOrcLLJITRef)
+
+LLVMErrorRef LLVMOrcLLJITEnableDebugSupport(LLVMOrcLLJITRef J) {
+ return wrap(llvm::orc::enableDebuggerSupport(*unwrap(J)));
+}
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/Debugging/PerfSupportPlugin.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/Debugging/PerfSupportPlugin.cpp
new file mode 100644
index 000000000000..fffecfc97814
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/Debugging/PerfSupportPlugin.cpp
@@ -0,0 +1,303 @@
+//===----- PerfSupportPlugin.cpp --- Utils for perf support -----*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Handles support for registering code with perf
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/Debugging/PerfSupportPlugin.h"
+
+#include "llvm/ExecutionEngine/JITLink/x86_64.h"
+#include "llvm/ExecutionEngine/Orc/Debugging/DebugInfoSupport.h"
+#include "llvm/ExecutionEngine/Orc/LookupAndRecordAddrs.h"
+#include "llvm/ExecutionEngine/Orc/Shared/WrapperFunctionUtils.h"
+
+#define DEBUG_TYPE "orc"
+
+using namespace llvm;
+using namespace llvm::orc;
+using namespace llvm::jitlink;
+
+namespace {
+
+// Creates an EH frame header prepared for a 32-bit relative relocation
+// to the start of the .eh_frame section. Absolute injects a 64-bit absolute
+// address space offset 4 bytes from the start instead of 4 bytes
+Expected<std::string> createX64EHFrameHeader(Section &EHFrame,
+ llvm::endianness endianness,
+ bool absolute) {
+ uint8_t Version = 1;
+ uint8_t EhFramePtrEnc = 0;
+ if (absolute) {
+ EhFramePtrEnc |= dwarf::DW_EH_PE_sdata8 | dwarf::DW_EH_PE_absptr;
+ } else {
+ EhFramePtrEnc |= dwarf::DW_EH_PE_sdata4 | dwarf::DW_EH_PE_datarel;
+ }
+ uint8_t FDECountEnc = dwarf::DW_EH_PE_omit;
+ uint8_t TableEnc = dwarf::DW_EH_PE_omit;
+ // X86_64_64 relocation to the start of the .eh_frame section
+ uint32_t EHFrameRelocation = 0;
+ // uint32_t FDECount = 0;
+ // Skip the FDE binary search table
+ // We'd have to reprocess the CIEs to get this information,
+ // which seems like more trouble than it's worth
+ // TODO consider implementing this.
+ // binary search table goes here
+
+ size_t HeaderSize =
+ (sizeof(Version) + sizeof(EhFramePtrEnc) + sizeof(FDECountEnc) +
+ sizeof(TableEnc) +
+ (absolute ? sizeof(uint64_t) : sizeof(EHFrameRelocation)));
+ std::string HeaderContent(HeaderSize, '\0');
+ BinaryStreamWriter Writer(
+ MutableArrayRef<uint8_t>(
+ reinterpret_cast<uint8_t *>(HeaderContent.data()), HeaderSize),
+ endianness);
+ if (auto Err = Writer.writeInteger(Version))
+ return std::move(Err);
+ if (auto Err = Writer.writeInteger(EhFramePtrEnc))
+ return std::move(Err);
+ if (auto Err = Writer.writeInteger(FDECountEnc))
+ return std::move(Err);
+ if (auto Err = Writer.writeInteger(TableEnc))
+ return std::move(Err);
+ if (absolute) {
+ uint64_t EHFrameAddr = SectionRange(EHFrame).getStart().getValue();
+ if (auto Err = Writer.writeInteger(EHFrameAddr))
+ return std::move(Err);
+ } else {
+ if (auto Err = Writer.writeInteger(EHFrameRelocation))
+ return std::move(Err);
+ }
+ return HeaderContent;
+}
+
+constexpr StringRef RegisterPerfStartSymbolName =
+ "llvm_orc_registerJITLoaderPerfStart";
+constexpr StringRef RegisterPerfEndSymbolName =
+ "llvm_orc_registerJITLoaderPerfEnd";
+constexpr StringRef RegisterPerfImplSymbolName =
+ "llvm_orc_registerJITLoaderPerfImpl";
+
+static PerfJITCodeLoadRecord
+getCodeLoadRecord(const Symbol &Sym, std::atomic<uint64_t> &CodeIndex) {
+ PerfJITCodeLoadRecord Record;
+ auto Name = Sym.getName();
+ auto Addr = Sym.getAddress();
+ auto Size = Sym.getSize();
+ Record.Prefix.Id = PerfJITRecordType::JIT_CODE_LOAD;
+ // Runtime sets PID
+ Record.Pid = 0;
+ // Runtime sets TID
+ Record.Tid = 0;
+ Record.Vma = Addr.getValue();
+ Record.CodeAddr = Addr.getValue();
+ Record.CodeSize = Size;
+ Record.CodeIndex = CodeIndex++;
+ Record.Name = Name.str();
+ // Initialize last, once all the other fields are filled
+ Record.Prefix.TotalSize =
+ (2 * sizeof(uint32_t) // id, total_size
+ + sizeof(uint64_t) // timestamp
+ + 2 * sizeof(uint32_t) // pid, tid
+ + 4 * sizeof(uint64_t) // vma, code_addr, code_size, code_index
+ + Name.size() + 1 // symbol name
+ + Record.CodeSize // code
+ );
+ return Record;
+}
+
+static std::optional<PerfJITDebugInfoRecord>
+getDebugInfoRecord(const Symbol &Sym, DWARFContext &DC) {
+ auto &Section = Sym.getBlock().getSection();
+ auto Addr = Sym.getAddress();
+ auto Size = Sym.getSize();
+ auto SAddr = object::SectionedAddress{Addr.getValue(), Section.getOrdinal()};
+ LLVM_DEBUG(dbgs() << "Getting debug info for symbol " << Sym.getName()
+ << " at address " << Addr.getValue() << " with size "
+ << Size << "\n"
+ << "Section ordinal: " << Section.getOrdinal() << "\n");
+ auto LInfo = DC.getLineInfoForAddressRange(
+ SAddr, Size, DILineInfoSpecifier::FileLineInfoKind::AbsoluteFilePath);
+ if (LInfo.empty()) {
+ // No line info available
+ LLVM_DEBUG(dbgs() << "No line info available\n");
+ return std::nullopt;
+ }
+ PerfJITDebugInfoRecord Record;
+ Record.Prefix.Id = PerfJITRecordType::JIT_CODE_DEBUG_INFO;
+ Record.CodeAddr = Addr.getValue();
+ for (const auto &Entry : LInfo) {
+ auto Addr = Entry.first;
+ // The function re-created by perf is preceded by a elf
+ // header. Need to adjust for that, otherwise the results are
+ // wrong.
+ Addr += 0x40;
+ Record.Entries.push_back({Addr, Entry.second.Line,
+ Entry.second.Discriminator,
+ Entry.second.FileName});
+ }
+ size_t EntriesBytes = (2 // record header
+ + 2 // record fields
+ ) *
+ sizeof(uint64_t);
+ for (const auto &Entry : Record.Entries) {
+ EntriesBytes +=
+ sizeof(uint64_t) + 2 * sizeof(uint32_t); // Addr, Line/Discrim
+ EntriesBytes += Entry.Name.size() + 1; // Name
+ }
+ Record.Prefix.TotalSize = EntriesBytes;
+ LLVM_DEBUG(dbgs() << "Created debug info record\n"
+ << "Total size: " << Record.Prefix.TotalSize << "\n"
+ << "Nr entries: " << Record.Entries.size() << "\n");
+ return Record;
+}
+
+static Expected<PerfJITCodeUnwindingInfoRecord>
+getUnwindingRecord(LinkGraph &G) {
+ PerfJITCodeUnwindingInfoRecord Record;
+ Record.Prefix.Id = PerfJITRecordType::JIT_CODE_UNWINDING_INFO;
+ Record.Prefix.TotalSize = 0;
+ auto Eh_frame = G.findSectionByName(".eh_frame");
+ if (!Eh_frame) {
+ LLVM_DEBUG(dbgs() << "No .eh_frame section found\n");
+ return Record;
+ }
+ if (!G.getTargetTriple().isOSBinFormatELF()) {
+ LLVM_DEBUG(dbgs() << "Not an ELF file, will not emit unwinding info\n");
+ return Record;
+ }
+ auto SR = SectionRange(*Eh_frame);
+ auto EHFrameSize = SR.getSize();
+ auto Eh_frame_hdr = G.findSectionByName(".eh_frame_hdr");
+ if (!Eh_frame_hdr) {
+ if (G.getTargetTriple().getArch() == Triple::x86_64) {
+ auto Hdr = createX64EHFrameHeader(*Eh_frame, G.getEndianness(), true);
+ if (!Hdr)
+ return Hdr.takeError();
+ Record.EHFrameHdr = std::move(*Hdr);
+ } else {
+ LLVM_DEBUG(dbgs() << "No .eh_frame_hdr section found\n");
+ return Record;
+ }
+ Record.EHFrameHdrAddr = 0;
+ Record.EHFrameHdrSize = Record.EHFrameHdr.size();
+ Record.UnwindDataSize = EHFrameSize + Record.EHFrameHdrSize;
+ Record.MappedSize = 0; // Because the EHFrame header was not mapped
+ } else {
+ auto SR = SectionRange(*Eh_frame_hdr);
+ Record.EHFrameHdrAddr = SR.getStart().getValue();
+ Record.EHFrameHdrSize = SR.getSize();
+ Record.UnwindDataSize = EHFrameSize + Record.EHFrameHdrSize;
+ Record.MappedSize = Record.UnwindDataSize;
+ }
+ Record.EHFrameAddr = SR.getStart().getValue();
+ Record.Prefix.TotalSize =
+ (2 * sizeof(uint32_t) // id, total_size
+ + sizeof(uint64_t) // timestamp
+ +
+ 3 * sizeof(uint64_t) // unwind_data_size, eh_frame_hdr_size, mapped_size
+ + Record.UnwindDataSize // eh_frame_hdr, eh_frame
+ );
+ LLVM_DEBUG(dbgs() << "Created unwind record\n"
+ << "Total size: " << Record.Prefix.TotalSize << "\n"
+ << "Unwind size: " << Record.UnwindDataSize << "\n"
+ << "EHFrame size: " << EHFrameSize << "\n"
+ << "EHFrameHdr size: " << Record.EHFrameHdrSize << "\n");
+ return Record;
+}
+
+static PerfJITRecordBatch getRecords(ExecutionSession &ES, LinkGraph &G,
+ std::atomic<uint64_t> &CodeIndex,
+ bool EmitDebugInfo, bool EmitUnwindInfo) {
+ std::unique_ptr<DWARFContext> DC;
+ StringMap<std::unique_ptr<MemoryBuffer>> DCBacking;
+ if (EmitDebugInfo) {
+ auto EDC = createDWARFContext(G);
+ if (!EDC) {
+ ES.reportError(EDC.takeError());
+ EmitDebugInfo = false;
+ } else {
+ DC = std::move(EDC->first);
+ DCBacking = std::move(EDC->second);
+ }
+ }
+ PerfJITRecordBatch Batch;
+ for (auto Sym : G.defined_symbols()) {
+ if (!Sym->hasName() || !Sym->isCallable())
+ continue;
+ if (EmitDebugInfo) {
+ auto DebugInfo = getDebugInfoRecord(*Sym, *DC);
+ if (DebugInfo)
+ Batch.DebugInfoRecords.push_back(std::move(*DebugInfo));
+ }
+ Batch.CodeLoadRecords.push_back(getCodeLoadRecord(*Sym, CodeIndex));
+ }
+ if (EmitUnwindInfo) {
+ auto UWR = getUnwindingRecord(G);
+ if (!UWR) {
+ ES.reportError(UWR.takeError());
+ } else {
+ Batch.UnwindingRecord = std::move(*UWR);
+ }
+ } else {
+ Batch.UnwindingRecord.Prefix.TotalSize = 0;
+ }
+ return Batch;
+}
+} // namespace
+
+PerfSupportPlugin::PerfSupportPlugin(ExecutorProcessControl &EPC,
+ ExecutorAddr RegisterPerfStartAddr,
+ ExecutorAddr RegisterPerfEndAddr,
+ ExecutorAddr RegisterPerfImplAddr,
+ bool EmitDebugInfo, bool EmitUnwindInfo)
+ : EPC(EPC), RegisterPerfStartAddr(RegisterPerfStartAddr),
+ RegisterPerfEndAddr(RegisterPerfEndAddr),
+ RegisterPerfImplAddr(RegisterPerfImplAddr), CodeIndex(0),
+ EmitDebugInfo(EmitDebugInfo), EmitUnwindInfo(EmitUnwindInfo) {
+ cantFail(EPC.callSPSWrapper<void()>(RegisterPerfStartAddr));
+}
+PerfSupportPlugin::~PerfSupportPlugin() {
+ cantFail(EPC.callSPSWrapper<void()>(RegisterPerfEndAddr));
+}
+
+void PerfSupportPlugin::modifyPassConfig(MaterializationResponsibility &MR,
+ LinkGraph &G,
+ PassConfiguration &Config) {
+ Config.PostFixupPasses.push_back([this](LinkGraph &G) {
+ auto Batch = getRecords(EPC.getExecutionSession(), G, CodeIndex,
+ EmitDebugInfo, EmitUnwindInfo);
+ G.allocActions().push_back(
+ {cantFail(shared::WrapperFunctionCall::Create<
+ shared::SPSArgList<shared::SPSPerfJITRecordBatch>>(
+ RegisterPerfImplAddr, Batch)),
+ {}});
+ return Error::success();
+ });
+}
+
+Expected<std::unique_ptr<PerfSupportPlugin>>
+PerfSupportPlugin::Create(ExecutorProcessControl &EPC, JITDylib &JD,
+ bool EmitDebugInfo, bool EmitUnwindInfo) {
+ if (!EPC.getTargetTriple().isOSBinFormatELF()) {
+ return make_error<StringError>(
+ "Perf support only available for ELF LinkGraphs!",
+ inconvertibleErrorCode());
+ }
+ auto &ES = EPC.getExecutionSession();
+ ExecutorAddr StartAddr, EndAddr, ImplAddr;
+ if (auto Err = lookupAndRecordAddrs(
+ ES, LookupKind::Static, makeJITDylibSearchOrder({&JD}),
+ {{ES.intern(RegisterPerfStartSymbolName), &StartAddr},
+ {ES.intern(RegisterPerfEndSymbolName), &EndAddr},
+ {ES.intern(RegisterPerfImplSymbolName), &ImplAddr}}))
+ return std::move(Err);
+ return std::make_unique<PerfSupportPlugin>(EPC, StartAddr, EndAddr, ImplAddr,
+ EmitDebugInfo, EmitUnwindInfo);
+}
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/Debugging/VTuneSupportPlugin.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/Debugging/VTuneSupportPlugin.cpp
new file mode 100644
index 000000000000..30a9728c8c20
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/Debugging/VTuneSupportPlugin.cpp
@@ -0,0 +1,185 @@
+//===--- VTuneSupportPlugin.cpp -- Support for VTune profiler --*- C++ -*--===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Handles support for registering code with VIntel Tune's Amplfiier JIT API.
+//
+//===----------------------------------------------------------------------===//
+#include "llvm/ExecutionEngine/Orc/Debugging/VTuneSupportPlugin.h"
+#include "llvm/DebugInfo/DWARF/DWARFContext.h"
+#include "llvm/ExecutionEngine/Orc/Debugging/DebugInfoSupport.h"
+
+using namespace llvm;
+using namespace llvm::orc;
+using namespace llvm::jitlink;
+
+static constexpr StringRef RegisterVTuneImplName = "llvm_orc_registerVTuneImpl";
+static constexpr StringRef UnregisterVTuneImplName =
+ "llvm_orc_unregisterVTuneImpl";
+static constexpr StringRef RegisterTestVTuneImplName =
+ "llvm_orc_test_registerVTuneImpl";
+
+static VTuneMethodBatch getMethodBatch(LinkGraph &G, bool EmitDebugInfo) {
+ VTuneMethodBatch Batch;
+ std::unique_ptr<DWARFContext> DC;
+ StringMap<std::unique_ptr<MemoryBuffer>> DCBacking;
+ if (EmitDebugInfo) {
+ auto EDC = createDWARFContext(G);
+ if (!EDC) {
+ EmitDebugInfo = false;
+ } else {
+ DC = std::move(EDC->first);
+ DCBacking = std::move(EDC->second);
+ }
+ }
+
+ auto GetStringIdx = [Deduplicator = StringMap<uint32_t>(),
+ &Batch](StringRef S) mutable {
+ auto I = Deduplicator.find(S);
+ if (I != Deduplicator.end())
+ return I->second;
+
+ Batch.Strings.push_back(S.str());
+ return Deduplicator[S] = Batch.Strings.size();
+ };
+ for (auto Sym : G.defined_symbols()) {
+ if (!Sym->isCallable())
+ continue;
+
+ Batch.Methods.push_back(VTuneMethodInfo());
+ auto &Method = Batch.Methods.back();
+ Method.MethodID = 0;
+ Method.ParentMI = 0;
+ Method.LoadAddr = Sym->getAddress();
+ Method.LoadSize = Sym->getSize();
+ Method.NameSI = GetStringIdx(Sym->getName());
+ Method.ClassFileSI = 0;
+ Method.SourceFileSI = 0;
+
+ if (!EmitDebugInfo)
+ continue;
+
+ auto &Section = Sym->getBlock().getSection();
+ auto Addr = Sym->getAddress();
+ auto SAddr =
+ object::SectionedAddress{Addr.getValue(), Section.getOrdinal()};
+ DILineInfoTable LinesInfo = DC->getLineInfoForAddressRange(
+ SAddr, Sym->getSize(),
+ DILineInfoSpecifier::FileLineInfoKind::AbsoluteFilePath);
+ Method.SourceFileSI = Batch.Strings.size();
+ Batch.Strings.push_back(DC->getLineInfoForAddress(SAddr).FileName);
+ for (auto &LInfo : LinesInfo) {
+ Method.LineTable.push_back(
+ std::pair<unsigned, unsigned>{/*unsigned*/ Sym->getOffset(),
+ /*DILineInfo*/ LInfo.second.Line});
+ }
+ }
+ return Batch;
+}
+
+void VTuneSupportPlugin::modifyPassConfig(MaterializationResponsibility &MR,
+ LinkGraph &G,
+ PassConfiguration &Config) {
+ Config.PostFixupPasses.push_back([this, MR = &MR](LinkGraph &G) {
+ // the object file is generated but not linked yet
+ auto Batch = getMethodBatch(G, EmitDebugInfo);
+ if (Batch.Methods.empty()) {
+ return Error::success();
+ }
+ {
+ std::lock_guard<std::mutex> Lock(PluginMutex);
+ uint64_t Allocated = Batch.Methods.size();
+ uint64_t Start = NextMethodID;
+ NextMethodID += Allocated;
+ for (size_t i = Start; i < NextMethodID; ++i) {
+ Batch.Methods[i - Start].MethodID = i;
+ }
+ this->PendingMethodIDs[MR] = {Start, Allocated};
+ }
+ G.allocActions().push_back(
+ {cantFail(shared::WrapperFunctionCall::Create<
+ shared::SPSArgList<shared::SPSVTuneMethodBatch>>(
+ RegisterVTuneImplAddr, Batch)),
+ {}});
+ return Error::success();
+ });
+}
+
+Error VTuneSupportPlugin::notifyEmitted(MaterializationResponsibility &MR) {
+ if (auto Err = MR.withResourceKeyDo([this, MR = &MR](ResourceKey K) {
+ std::lock_guard<std::mutex> Lock(PluginMutex);
+ auto I = PendingMethodIDs.find(MR);
+ if (I == PendingMethodIDs.end())
+ return;
+
+ LoadedMethodIDs[K].push_back(I->second);
+ PendingMethodIDs.erase(I);
+ })) {
+ return Err;
+ }
+ return Error::success();
+}
+
+Error VTuneSupportPlugin::notifyFailed(MaterializationResponsibility &MR) {
+ std::lock_guard<std::mutex> Lock(PluginMutex);
+ PendingMethodIDs.erase(&MR);
+ return Error::success();
+}
+
+Error VTuneSupportPlugin::notifyRemovingResources(JITDylib &JD, ResourceKey K) {
+ // Unregistration not required if not provided
+ if (!UnregisterVTuneImplAddr) {
+ return Error::success();
+ }
+ VTuneUnloadedMethodIDs UnloadedIDs;
+ {
+ std::lock_guard<std::mutex> Lock(PluginMutex);
+ auto I = LoadedMethodIDs.find(K);
+ if (I == LoadedMethodIDs.end())
+ return Error::success();
+
+ UnloadedIDs = std::move(I->second);
+ LoadedMethodIDs.erase(I);
+ }
+ if (auto Err = EPC.callSPSWrapper<void(shared::SPSVTuneUnloadedMethodIDs)>(
+ UnregisterVTuneImplAddr, UnloadedIDs))
+ return Err;
+
+ return Error::success();
+}
+
+void VTuneSupportPlugin::notifyTransferringResources(JITDylib &JD,
+ ResourceKey DstKey,
+ ResourceKey SrcKey) {
+ std::lock_guard<std::mutex> Lock(PluginMutex);
+ auto I = LoadedMethodIDs.find(SrcKey);
+ if (I == LoadedMethodIDs.end())
+ return;
+
+ auto &Dest = LoadedMethodIDs[DstKey];
+ Dest.insert(Dest.end(), I->second.begin(), I->second.end());
+ LoadedMethodIDs.erase(SrcKey);
+}
+
+Expected<std::unique_ptr<VTuneSupportPlugin>>
+VTuneSupportPlugin::Create(ExecutorProcessControl &EPC, JITDylib &JD,
+ bool EmitDebugInfo, bool TestMode) {
+ auto &ES = EPC.getExecutionSession();
+ auto RegisterImplName =
+ ES.intern(TestMode ? RegisterTestVTuneImplName : RegisterVTuneImplName);
+ auto UnregisterImplName = ES.intern(UnregisterVTuneImplName);
+ SymbolLookupSet SLS{RegisterImplName, UnregisterImplName};
+ auto Res = ES.lookup(makeJITDylibSearchOrder({&JD}), std::move(SLS));
+ if (!Res)
+ return Res.takeError();
+ ExecutorAddr RegisterImplAddr(
+ Res->find(RegisterImplName)->second.getAddress());
+ ExecutorAddr UnregisterImplAddr(
+ Res->find(UnregisterImplName)->second.getAddress());
+ return std::make_unique<VTuneSupportPlugin>(
+ EPC, RegisterImplAddr, UnregisterImplAddr, EmitDebugInfo);
+}
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/ELFNixPlatform.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/ELFNixPlatform.cpp
new file mode 100644
index 000000000000..2b6c4b9e7f43
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/ELFNixPlatform.cpp
@@ -0,0 +1,843 @@
+//===------ ELFNixPlatform.cpp - Utilities for executing MachO in Orc -----===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/ELFNixPlatform.h"
+
+#include "llvm/BinaryFormat/ELF.h"
+#include "llvm/ExecutionEngine/JITLink/ELF_x86_64.h"
+#include "llvm/ExecutionEngine/JITLink/aarch64.h"
+#include "llvm/ExecutionEngine/JITLink/ppc64.h"
+#include "llvm/ExecutionEngine/JITLink/x86_64.h"
+#include "llvm/ExecutionEngine/Orc/DebugUtils.h"
+#include "llvm/ExecutionEngine/Orc/ExecutionUtils.h"
+#include "llvm/ExecutionEngine/Orc/Shared/ObjectFormats.h"
+#include "llvm/Support/BinaryByteStream.h"
+#include "llvm/Support/Debug.h"
+#include <optional>
+
+#define DEBUG_TYPE "orc"
+
+using namespace llvm;
+using namespace llvm::orc;
+using namespace llvm::orc::shared;
+
+namespace {
+
+class DSOHandleMaterializationUnit : public MaterializationUnit {
+public:
+ DSOHandleMaterializationUnit(ELFNixPlatform &ENP,
+ const SymbolStringPtr &DSOHandleSymbol)
+ : MaterializationUnit(
+ createDSOHandleSectionInterface(ENP, DSOHandleSymbol)),
+ ENP(ENP) {}
+
+ StringRef getName() const override { return "DSOHandleMU"; }
+
+ void materialize(std::unique_ptr<MaterializationResponsibility> R) override {
+ unsigned PointerSize;
+ llvm::endianness Endianness;
+ jitlink::Edge::Kind EdgeKind;
+ const auto &TT = ENP.getExecutionSession().getTargetTriple();
+
+ switch (TT.getArch()) {
+ case Triple::x86_64:
+ PointerSize = 8;
+ Endianness = llvm::endianness::little;
+ EdgeKind = jitlink::x86_64::Pointer64;
+ break;
+ case Triple::aarch64:
+ PointerSize = 8;
+ Endianness = llvm::endianness::little;
+ EdgeKind = jitlink::aarch64::Pointer64;
+ break;
+ case Triple::ppc64:
+ PointerSize = 8;
+ Endianness = llvm::endianness::big;
+ EdgeKind = jitlink::ppc64::Pointer64;
+ break;
+ case Triple::ppc64le:
+ PointerSize = 8;
+ Endianness = llvm::endianness::little;
+ EdgeKind = jitlink::ppc64::Pointer64;
+ break;
+ default:
+ llvm_unreachable("Unrecognized architecture");
+ }
+
+ // void *__dso_handle = &__dso_handle;
+ auto G = std::make_unique<jitlink::LinkGraph>(
+ "<DSOHandleMU>", TT, PointerSize, Endianness,
+ jitlink::getGenericEdgeKindName);
+ auto &DSOHandleSection =
+ G->createSection(".data.__dso_handle", MemProt::Read);
+ auto &DSOHandleBlock = G->createContentBlock(
+ DSOHandleSection, getDSOHandleContent(PointerSize), orc::ExecutorAddr(),
+ 8, 0);
+ auto &DSOHandleSymbol = G->addDefinedSymbol(
+ DSOHandleBlock, 0, *R->getInitializerSymbol(), DSOHandleBlock.getSize(),
+ jitlink::Linkage::Strong, jitlink::Scope::Default, false, true);
+ DSOHandleBlock.addEdge(EdgeKind, 0, DSOHandleSymbol, 0);
+
+ ENP.getObjectLinkingLayer().emit(std::move(R), std::move(G));
+ }
+
+ void discard(const JITDylib &JD, const SymbolStringPtr &Sym) override {}
+
+private:
+ static MaterializationUnit::Interface
+ createDSOHandleSectionInterface(ELFNixPlatform &ENP,
+ const SymbolStringPtr &DSOHandleSymbol) {
+ SymbolFlagsMap SymbolFlags;
+ SymbolFlags[DSOHandleSymbol] = JITSymbolFlags::Exported;
+ return MaterializationUnit::Interface(std::move(SymbolFlags),
+ DSOHandleSymbol);
+ }
+
+ ArrayRef<char> getDSOHandleContent(size_t PointerSize) {
+ static const char Content[8] = {0};
+ assert(PointerSize <= sizeof Content);
+ return {Content, PointerSize};
+ }
+
+ ELFNixPlatform &ENP;
+};
+
+} // end anonymous namespace
+
+namespace llvm {
+namespace orc {
+
+Expected<std::unique_ptr<ELFNixPlatform>> ELFNixPlatform::Create(
+ ExecutionSession &ES, ObjectLinkingLayer &ObjLinkingLayer,
+ JITDylib &PlatformJD, std::unique_ptr<DefinitionGenerator> OrcRuntime,
+ std::optional<SymbolAliasMap> RuntimeAliases) {
+
+ // If the target is not supported then bail out immediately.
+ if (!supportedTarget(ES.getTargetTriple()))
+ return make_error<StringError>("Unsupported ELFNixPlatform triple: " +
+ ES.getTargetTriple().str(),
+ inconvertibleErrorCode());
+
+ auto &EPC = ES.getExecutorProcessControl();
+
+ // Create default aliases if the caller didn't supply any.
+ if (!RuntimeAliases) {
+ auto StandardRuntimeAliases = standardPlatformAliases(ES, PlatformJD);
+ if (!StandardRuntimeAliases)
+ return StandardRuntimeAliases.takeError();
+ RuntimeAliases = std::move(*StandardRuntimeAliases);
+ }
+
+ // Define the aliases.
+ if (auto Err = PlatformJD.define(symbolAliases(std::move(*RuntimeAliases))))
+ return std::move(Err);
+
+ // Add JIT-dispatch function support symbols.
+ if (auto Err = PlatformJD.define(
+ absoluteSymbols({{ES.intern("__orc_rt_jit_dispatch"),
+ {EPC.getJITDispatchInfo().JITDispatchFunction,
+ JITSymbolFlags::Exported}},
+ {ES.intern("__orc_rt_jit_dispatch_ctx"),
+ {EPC.getJITDispatchInfo().JITDispatchContext,
+ JITSymbolFlags::Exported}}})))
+ return std::move(Err);
+
+ // Create the instance.
+ Error Err = Error::success();
+ auto P = std::unique_ptr<ELFNixPlatform>(new ELFNixPlatform(
+ ES, ObjLinkingLayer, PlatformJD, std::move(OrcRuntime), Err));
+ if (Err)
+ return std::move(Err);
+ return std::move(P);
+}
+
+Expected<std::unique_ptr<ELFNixPlatform>>
+ELFNixPlatform::Create(ExecutionSession &ES,
+ ObjectLinkingLayer &ObjLinkingLayer,
+ JITDylib &PlatformJD, const char *OrcRuntimePath,
+ std::optional<SymbolAliasMap> RuntimeAliases) {
+
+ // Create a generator for the ORC runtime archive.
+ auto OrcRuntimeArchiveGenerator =
+ StaticLibraryDefinitionGenerator::Load(ObjLinkingLayer, OrcRuntimePath);
+ if (!OrcRuntimeArchiveGenerator)
+ return OrcRuntimeArchiveGenerator.takeError();
+
+ return Create(ES, ObjLinkingLayer, PlatformJD,
+ std::move(*OrcRuntimeArchiveGenerator),
+ std::move(RuntimeAliases));
+}
+
+Error ELFNixPlatform::setupJITDylib(JITDylib &JD) {
+ return JD.define(
+ std::make_unique<DSOHandleMaterializationUnit>(*this, DSOHandleSymbol));
+}
+
+Error ELFNixPlatform::teardownJITDylib(JITDylib &JD) {
+ return Error::success();
+}
+
+Error ELFNixPlatform::notifyAdding(ResourceTracker &RT,
+ const MaterializationUnit &MU) {
+ auto &JD = RT.getJITDylib();
+ const auto &InitSym = MU.getInitializerSymbol();
+ if (!InitSym)
+ return Error::success();
+
+ RegisteredInitSymbols[&JD].add(InitSym,
+ SymbolLookupFlags::WeaklyReferencedSymbol);
+ LLVM_DEBUG({
+ dbgs() << "ELFNixPlatform: Registered init symbol " << *InitSym
+ << " for MU " << MU.getName() << "\n";
+ });
+ return Error::success();
+}
+
+Error ELFNixPlatform::notifyRemoving(ResourceTracker &RT) {
+ llvm_unreachable("Not supported yet");
+}
+
+static void addAliases(ExecutionSession &ES, SymbolAliasMap &Aliases,
+ ArrayRef<std::pair<const char *, const char *>> AL) {
+ for (auto &KV : AL) {
+ auto AliasName = ES.intern(KV.first);
+ assert(!Aliases.count(AliasName) && "Duplicate symbol name in alias map");
+ Aliases[std::move(AliasName)] = {ES.intern(KV.second),
+ JITSymbolFlags::Exported};
+ }
+}
+
+Expected<SymbolAliasMap>
+ELFNixPlatform::standardPlatformAliases(ExecutionSession &ES,
+ JITDylib &PlatformJD) {
+ SymbolAliasMap Aliases;
+ addAliases(ES, Aliases, requiredCXXAliases());
+ addAliases(ES, Aliases, standardRuntimeUtilityAliases());
+ return Aliases;
+}
+
+ArrayRef<std::pair<const char *, const char *>>
+ELFNixPlatform::requiredCXXAliases() {
+ static const std::pair<const char *, const char *> RequiredCXXAliases[] = {
+ {"__cxa_atexit", "__orc_rt_elfnix_cxa_atexit"},
+ {"atexit", "__orc_rt_elfnix_atexit"}};
+
+ return ArrayRef<std::pair<const char *, const char *>>(RequiredCXXAliases);
+}
+
+ArrayRef<std::pair<const char *, const char *>>
+ELFNixPlatform::standardRuntimeUtilityAliases() {
+ static const std::pair<const char *, const char *>
+ StandardRuntimeUtilityAliases[] = {
+ {"__orc_rt_run_program", "__orc_rt_elfnix_run_program"},
+ {"__orc_rt_jit_dlerror", "__orc_rt_elfnix_jit_dlerror"},
+ {"__orc_rt_jit_dlopen", "__orc_rt_elfnix_jit_dlopen"},
+ {"__orc_rt_jit_dlclose", "__orc_rt_elfnix_jit_dlclose"},
+ {"__orc_rt_jit_dlsym", "__orc_rt_elfnix_jit_dlsym"},
+ {"__orc_rt_log_error", "__orc_rt_log_error_to_stderr"}};
+
+ return ArrayRef<std::pair<const char *, const char *>>(
+ StandardRuntimeUtilityAliases);
+}
+
+bool ELFNixPlatform::supportedTarget(const Triple &TT) {
+ switch (TT.getArch()) {
+ case Triple::x86_64:
+ case Triple::aarch64:
+ // FIXME: jitlink for ppc64 hasn't been well tested, leave it unsupported
+ // right now.
+ case Triple::ppc64le:
+ return true;
+ default:
+ return false;
+ }
+}
+
+ELFNixPlatform::ELFNixPlatform(
+ ExecutionSession &ES, ObjectLinkingLayer &ObjLinkingLayer,
+ JITDylib &PlatformJD,
+ std::unique_ptr<DefinitionGenerator> OrcRuntimeGenerator, Error &Err)
+ : ES(ES), ObjLinkingLayer(ObjLinkingLayer),
+ DSOHandleSymbol(ES.intern("__dso_handle")) {
+ ErrorAsOutParameter _(&Err);
+
+ ObjLinkingLayer.addPlugin(std::make_unique<ELFNixPlatformPlugin>(*this));
+
+ PlatformJD.addGenerator(std::move(OrcRuntimeGenerator));
+
+ // PlatformJD hasn't been 'set-up' by the platform yet (since we're creating
+ // the platform now), so set it up.
+ if (auto E2 = setupJITDylib(PlatformJD)) {
+ Err = std::move(E2);
+ return;
+ }
+
+ RegisteredInitSymbols[&PlatformJD].add(
+ DSOHandleSymbol, SymbolLookupFlags::WeaklyReferencedSymbol);
+
+ // Associate wrapper function tags with JIT-side function implementations.
+ if (auto E2 = associateRuntimeSupportFunctions(PlatformJD)) {
+ Err = std::move(E2);
+ return;
+ }
+
+ // Lookup addresses of runtime functions callable by the platform,
+ // call the platform bootstrap function to initialize the platform-state
+ // object in the executor.
+ if (auto E2 = bootstrapELFNixRuntime(PlatformJD)) {
+ Err = std::move(E2);
+ return;
+ }
+}
+
+Error ELFNixPlatform::associateRuntimeSupportFunctions(JITDylib &PlatformJD) {
+ ExecutionSession::JITDispatchHandlerAssociationMap WFs;
+
+ using GetInitializersSPSSig =
+ SPSExpected<SPSELFNixJITDylibInitializerSequence>(SPSString);
+ WFs[ES.intern("__orc_rt_elfnix_get_initializers_tag")] =
+ ES.wrapAsyncWithSPS<GetInitializersSPSSig>(
+ this, &ELFNixPlatform::rt_getInitializers);
+
+ using GetDeinitializersSPSSig =
+ SPSExpected<SPSELFJITDylibDeinitializerSequence>(SPSExecutorAddr);
+ WFs[ES.intern("__orc_rt_elfnix_get_deinitializers_tag")] =
+ ES.wrapAsyncWithSPS<GetDeinitializersSPSSig>(
+ this, &ELFNixPlatform::rt_getDeinitializers);
+
+ using LookupSymbolSPSSig =
+ SPSExpected<SPSExecutorAddr>(SPSExecutorAddr, SPSString);
+ WFs[ES.intern("__orc_rt_elfnix_symbol_lookup_tag")] =
+ ES.wrapAsyncWithSPS<LookupSymbolSPSSig>(this,
+ &ELFNixPlatform::rt_lookupSymbol);
+
+ return ES.registerJITDispatchHandlers(PlatformJD, std::move(WFs));
+}
+
+void ELFNixPlatform::getInitializersBuildSequencePhase(
+ SendInitializerSequenceFn SendResult, JITDylib &JD,
+ std::vector<JITDylibSP> DFSLinkOrder) {
+ ELFNixJITDylibInitializerSequence FullInitSeq;
+ {
+ std::lock_guard<std::mutex> Lock(PlatformMutex);
+ for (auto &InitJD : reverse(DFSLinkOrder)) {
+ LLVM_DEBUG({
+ dbgs() << "ELFNixPlatform: Appending inits for \"" << InitJD->getName()
+ << "\" to sequence\n";
+ });
+ auto ISItr = InitSeqs.find(InitJD.get());
+ if (ISItr != InitSeqs.end()) {
+ FullInitSeq.emplace_back(std::move(ISItr->second));
+ InitSeqs.erase(ISItr);
+ }
+ }
+ }
+
+ SendResult(std::move(FullInitSeq));
+}
+
+void ELFNixPlatform::getInitializersLookupPhase(
+ SendInitializerSequenceFn SendResult, JITDylib &JD) {
+
+ auto DFSLinkOrder = JD.getDFSLinkOrder();
+ if (!DFSLinkOrder) {
+ SendResult(DFSLinkOrder.takeError());
+ return;
+ }
+
+ DenseMap<JITDylib *, SymbolLookupSet> NewInitSymbols;
+ ES.runSessionLocked([&]() {
+ for (auto &InitJD : *DFSLinkOrder) {
+ auto RISItr = RegisteredInitSymbols.find(InitJD.get());
+ if (RISItr != RegisteredInitSymbols.end()) {
+ NewInitSymbols[InitJD.get()] = std::move(RISItr->second);
+ RegisteredInitSymbols.erase(RISItr);
+ }
+ }
+ });
+
+ // If there are no further init symbols to look up then move on to the next
+ // phase.
+ if (NewInitSymbols.empty()) {
+ getInitializersBuildSequencePhase(std::move(SendResult), JD,
+ std::move(*DFSLinkOrder));
+ return;
+ }
+
+ // Otherwise issue a lookup and re-run this phase when it completes.
+ lookupInitSymbolsAsync(
+ [this, SendResult = std::move(SendResult), &JD](Error Err) mutable {
+ if (Err)
+ SendResult(std::move(Err));
+ else
+ getInitializersLookupPhase(std::move(SendResult), JD);
+ },
+ ES, std::move(NewInitSymbols));
+}
+
+void ELFNixPlatform::rt_getInitializers(SendInitializerSequenceFn SendResult,
+ StringRef JDName) {
+ LLVM_DEBUG({
+ dbgs() << "ELFNixPlatform::rt_getInitializers(\"" << JDName << "\")\n";
+ });
+
+ JITDylib *JD = ES.getJITDylibByName(JDName);
+ if (!JD) {
+ LLVM_DEBUG({
+ dbgs() << " No such JITDylib \"" << JDName << "\". Sending error.\n";
+ });
+ SendResult(make_error<StringError>("No JITDylib named " + JDName,
+ inconvertibleErrorCode()));
+ return;
+ }
+
+ getInitializersLookupPhase(std::move(SendResult), *JD);
+}
+
+void ELFNixPlatform::rt_getDeinitializers(
+ SendDeinitializerSequenceFn SendResult, ExecutorAddr Handle) {
+ LLVM_DEBUG({
+ dbgs() << "ELFNixPlatform::rt_getDeinitializers(\"" << Handle << "\")\n";
+ });
+
+ JITDylib *JD = nullptr;
+
+ {
+ std::lock_guard<std::mutex> Lock(PlatformMutex);
+ auto I = HandleAddrToJITDylib.find(Handle);
+ if (I != HandleAddrToJITDylib.end())
+ JD = I->second;
+ }
+
+ if (!JD) {
+ LLVM_DEBUG(dbgs() << " No JITDylib for handle " << Handle << "\n");
+ SendResult(make_error<StringError>("No JITDylib associated with handle " +
+ formatv("{0:x}", Handle),
+ inconvertibleErrorCode()));
+ return;
+ }
+
+ SendResult(ELFNixJITDylibDeinitializerSequence());
+}
+
+void ELFNixPlatform::rt_lookupSymbol(SendSymbolAddressFn SendResult,
+ ExecutorAddr Handle,
+ StringRef SymbolName) {
+ LLVM_DEBUG({
+ dbgs() << "ELFNixPlatform::rt_lookupSymbol(\"" << Handle << "\")\n";
+ });
+
+ JITDylib *JD = nullptr;
+
+ {
+ std::lock_guard<std::mutex> Lock(PlatformMutex);
+ auto I = HandleAddrToJITDylib.find(Handle);
+ if (I != HandleAddrToJITDylib.end())
+ JD = I->second;
+ }
+
+ if (!JD) {
+ LLVM_DEBUG(dbgs() << " No JITDylib for handle " << Handle << "\n");
+ SendResult(make_error<StringError>("No JITDylib associated with handle " +
+ formatv("{0:x}", Handle),
+ inconvertibleErrorCode()));
+ return;
+ }
+
+ // Use functor class to work around XL build compiler issue on AIX.
+ class RtLookupNotifyComplete {
+ public:
+ RtLookupNotifyComplete(SendSymbolAddressFn &&SendResult)
+ : SendResult(std::move(SendResult)) {}
+ void operator()(Expected<SymbolMap> Result) {
+ if (Result) {
+ assert(Result->size() == 1 && "Unexpected result map count");
+ SendResult(Result->begin()->second.getAddress());
+ } else {
+ SendResult(Result.takeError());
+ }
+ }
+
+ private:
+ SendSymbolAddressFn SendResult;
+ };
+
+ ES.lookup(
+ LookupKind::DLSym, {{JD, JITDylibLookupFlags::MatchExportedSymbolsOnly}},
+ SymbolLookupSet(ES.intern(SymbolName)), SymbolState::Ready,
+ RtLookupNotifyComplete(std::move(SendResult)), NoDependenciesToRegister);
+}
+
+Error ELFNixPlatform::bootstrapELFNixRuntime(JITDylib &PlatformJD) {
+
+ std::pair<const char *, ExecutorAddr *> Symbols[] = {
+ {"__orc_rt_elfnix_platform_bootstrap", &orc_rt_elfnix_platform_bootstrap},
+ {"__orc_rt_elfnix_platform_shutdown", &orc_rt_elfnix_platform_shutdown},
+ {"__orc_rt_elfnix_register_object_sections",
+ &orc_rt_elfnix_register_object_sections},
+ {"__orc_rt_elfnix_create_pthread_key",
+ &orc_rt_elfnix_create_pthread_key}};
+
+ SymbolLookupSet RuntimeSymbols;
+ std::vector<std::pair<SymbolStringPtr, ExecutorAddr *>> AddrsToRecord;
+ for (const auto &KV : Symbols) {
+ auto Name = ES.intern(KV.first);
+ RuntimeSymbols.add(Name);
+ AddrsToRecord.push_back({std::move(Name), KV.second});
+ }
+
+ auto RuntimeSymbolAddrs = ES.lookup(
+ {{&PlatformJD, JITDylibLookupFlags::MatchAllSymbols}}, RuntimeSymbols);
+ if (!RuntimeSymbolAddrs)
+ return RuntimeSymbolAddrs.takeError();
+
+ for (const auto &KV : AddrsToRecord) {
+ auto &Name = KV.first;
+ assert(RuntimeSymbolAddrs->count(Name) && "Missing runtime symbol?");
+ *KV.second = (*RuntimeSymbolAddrs)[Name].getAddress();
+ }
+
+ auto PJDDSOHandle = ES.lookup(
+ {{&PlatformJD, JITDylibLookupFlags::MatchAllSymbols}}, DSOHandleSymbol);
+ if (!PJDDSOHandle)
+ return PJDDSOHandle.takeError();
+
+ if (auto Err = ES.callSPSWrapper<void(uint64_t)>(
+ orc_rt_elfnix_platform_bootstrap,
+ PJDDSOHandle->getAddress().getValue()))
+ return Err;
+
+ // FIXME: Ordering is fuzzy here. We're probably best off saying
+ // "behavior is undefined if code that uses the runtime is added before
+ // the platform constructor returns", then move all this to the constructor.
+ RuntimeBootstrapped = true;
+ std::vector<ELFPerObjectSectionsToRegister> DeferredPOSRs;
+ {
+ std::lock_guard<std::mutex> Lock(PlatformMutex);
+ DeferredPOSRs = std::move(BootstrapPOSRs);
+ }
+
+ for (auto &D : DeferredPOSRs)
+ if (auto Err = registerPerObjectSections(D))
+ return Err;
+
+ return Error::success();
+}
+
+Error ELFNixPlatform::registerInitInfo(
+ JITDylib &JD, ArrayRef<jitlink::Section *> InitSections) {
+
+ std::unique_lock<std::mutex> Lock(PlatformMutex);
+
+ ELFNixJITDylibInitializers *InitSeq = nullptr;
+ {
+ auto I = InitSeqs.find(&JD);
+ if (I == InitSeqs.end()) {
+ // If there's no init sequence entry yet then we need to look up the
+ // header symbol to force creation of one.
+ Lock.unlock();
+
+ auto SearchOrder =
+ JD.withLinkOrderDo([](const JITDylibSearchOrder &SO) { return SO; });
+ if (auto Err = ES.lookup(SearchOrder, DSOHandleSymbol).takeError())
+ return Err;
+
+ Lock.lock();
+ I = InitSeqs.find(&JD);
+ assert(I != InitSeqs.end() &&
+ "Entry missing after header symbol lookup?");
+ }
+ InitSeq = &I->second;
+ }
+
+ for (auto *Sec : InitSections) {
+ // FIXME: Avoid copy here.
+ jitlink::SectionRange R(*Sec);
+ InitSeq->InitSections[Sec->getName()].push_back(R.getRange());
+ }
+
+ return Error::success();
+}
+
+Error ELFNixPlatform::registerPerObjectSections(
+ const ELFPerObjectSectionsToRegister &POSR) {
+
+ if (!orc_rt_elfnix_register_object_sections)
+ return make_error<StringError>("Attempting to register per-object "
+ "sections, but runtime support has not "
+ "been loaded yet",
+ inconvertibleErrorCode());
+
+ Error ErrResult = Error::success();
+ if (auto Err = ES.callSPSWrapper<shared::SPSError(
+ SPSELFPerObjectSectionsToRegister)>(
+ orc_rt_elfnix_register_object_sections, ErrResult, POSR))
+ return Err;
+ return ErrResult;
+}
+
+Expected<uint64_t> ELFNixPlatform::createPThreadKey() {
+ if (!orc_rt_elfnix_create_pthread_key)
+ return make_error<StringError>(
+ "Attempting to create pthread key in target, but runtime support has "
+ "not been loaded yet",
+ inconvertibleErrorCode());
+
+ Expected<uint64_t> Result(0);
+ if (auto Err = ES.callSPSWrapper<SPSExpected<uint64_t>(void)>(
+ orc_rt_elfnix_create_pthread_key, Result))
+ return std::move(Err);
+ return Result;
+}
+
+void ELFNixPlatform::ELFNixPlatformPlugin::modifyPassConfig(
+ MaterializationResponsibility &MR, jitlink::LinkGraph &LG,
+ jitlink::PassConfiguration &Config) {
+
+ // If the initializer symbol is the __dso_handle symbol then just add
+ // the DSO handle support passes.
+ if (MR.getInitializerSymbol() == MP.DSOHandleSymbol) {
+ addDSOHandleSupportPasses(MR, Config);
+ // The DSOHandle materialization unit doesn't require any other
+ // support, so we can bail out early.
+ return;
+ }
+
+ // If the object contains initializers then add passes to record them.
+ if (MR.getInitializerSymbol())
+ addInitializerSupportPasses(MR, Config);
+
+ // Add passes for eh-frame and TLV support.
+ addEHAndTLVSupportPasses(MR, Config);
+}
+
+ObjectLinkingLayer::Plugin::SyntheticSymbolDependenciesMap
+ELFNixPlatform::ELFNixPlatformPlugin::getSyntheticSymbolDependencies(
+ MaterializationResponsibility &MR) {
+ std::lock_guard<std::mutex> Lock(PluginMutex);
+ auto I = InitSymbolDeps.find(&MR);
+ if (I != InitSymbolDeps.end()) {
+ SyntheticSymbolDependenciesMap Result;
+ Result[MR.getInitializerSymbol()] = std::move(I->second);
+ InitSymbolDeps.erase(&MR);
+ return Result;
+ }
+ return SyntheticSymbolDependenciesMap();
+}
+
+void ELFNixPlatform::ELFNixPlatformPlugin::addInitializerSupportPasses(
+ MaterializationResponsibility &MR, jitlink::PassConfiguration &Config) {
+
+ /// Preserve init sections.
+ Config.PrePrunePasses.push_back([this, &MR](jitlink::LinkGraph &G) -> Error {
+ if (auto Err = preserveInitSections(G, MR))
+ return Err;
+ return Error::success();
+ });
+
+ Config.PostFixupPasses.push_back(
+ [this, &JD = MR.getTargetJITDylib()](jitlink::LinkGraph &G) {
+ return registerInitSections(G, JD);
+ });
+}
+
+void ELFNixPlatform::ELFNixPlatformPlugin::addDSOHandleSupportPasses(
+ MaterializationResponsibility &MR, jitlink::PassConfiguration &Config) {
+
+ Config.PostAllocationPasses.push_back([this, &JD = MR.getTargetJITDylib()](
+ jitlink::LinkGraph &G) -> Error {
+ auto I = llvm::find_if(G.defined_symbols(), [this](jitlink::Symbol *Sym) {
+ return Sym->getName() == *MP.DSOHandleSymbol;
+ });
+ assert(I != G.defined_symbols().end() && "Missing DSO handle symbol");
+ {
+ std::lock_guard<std::mutex> Lock(MP.PlatformMutex);
+ auto HandleAddr = (*I)->getAddress();
+ MP.HandleAddrToJITDylib[HandleAddr] = &JD;
+ assert(!MP.InitSeqs.count(&JD) && "InitSeq entry for JD already exists");
+ MP.InitSeqs.insert(std::make_pair(
+ &JD, ELFNixJITDylibInitializers(JD.getName(), HandleAddr)));
+ }
+ return Error::success();
+ });
+}
+
+void ELFNixPlatform::ELFNixPlatformPlugin::addEHAndTLVSupportPasses(
+ MaterializationResponsibility &MR, jitlink::PassConfiguration &Config) {
+
+ // Insert TLV lowering at the start of the PostPrunePasses, since we want
+ // it to run before GOT/PLT lowering.
+
+ // TODO: Check that before the fixTLVSectionsAndEdges pass, the GOT/PLT build
+ // pass has done. Because the TLS descriptor need to be allocate in GOT.
+ Config.PostPrunePasses.push_back(
+ [this, &JD = MR.getTargetJITDylib()](jitlink::LinkGraph &G) {
+ return fixTLVSectionsAndEdges(G, JD);
+ });
+
+ // Add a pass to register the final addresses of the eh-frame and TLV sections
+ // with the runtime.
+ Config.PostFixupPasses.push_back([this](jitlink::LinkGraph &G) -> Error {
+ ELFPerObjectSectionsToRegister POSR;
+
+ if (auto *EHFrameSection = G.findSectionByName(ELFEHFrameSectionName)) {
+ jitlink::SectionRange R(*EHFrameSection);
+ if (!R.empty())
+ POSR.EHFrameSection = R.getRange();
+ }
+
+ // Get a pointer to the thread data section if there is one. It will be used
+ // below.
+ jitlink::Section *ThreadDataSection =
+ G.findSectionByName(ELFThreadDataSectionName);
+
+ // Handle thread BSS section if there is one.
+ if (auto *ThreadBSSSection = G.findSectionByName(ELFThreadBSSSectionName)) {
+ // If there's already a thread data section in this graph then merge the
+ // thread BSS section content into it, otherwise just treat the thread
+ // BSS section as the thread data section.
+ if (ThreadDataSection)
+ G.mergeSections(*ThreadDataSection, *ThreadBSSSection);
+ else
+ ThreadDataSection = ThreadBSSSection;
+ }
+
+ // Having merged thread BSS (if present) and thread data (if present),
+ // record the resulting section range.
+ if (ThreadDataSection) {
+ jitlink::SectionRange R(*ThreadDataSection);
+ if (!R.empty())
+ POSR.ThreadDataSection = R.getRange();
+ }
+
+ if (POSR.EHFrameSection.Start || POSR.ThreadDataSection.Start) {
+
+ // If we're still bootstrapping the runtime then just record this
+ // frame for now.
+ if (!MP.RuntimeBootstrapped) {
+ std::lock_guard<std::mutex> Lock(MP.PlatformMutex);
+ MP.BootstrapPOSRs.push_back(POSR);
+ return Error::success();
+ }
+
+ // Otherwise register it immediately.
+ if (auto Err = MP.registerPerObjectSections(POSR))
+ return Err;
+ }
+
+ return Error::success();
+ });
+}
+
+Error ELFNixPlatform::ELFNixPlatformPlugin::preserveInitSections(
+ jitlink::LinkGraph &G, MaterializationResponsibility &MR) {
+
+ JITLinkSymbolSet InitSectionSymbols;
+ for (auto &InitSection : G.sections()) {
+ // Skip non-init sections.
+ if (!isELFInitializerSection(InitSection.getName()))
+ continue;
+
+ // Make a pass over live symbols in the section: those blocks are already
+ // preserved.
+ DenseSet<jitlink::Block *> AlreadyLiveBlocks;
+ for (auto &Sym : InitSection.symbols()) {
+ auto &B = Sym->getBlock();
+ if (Sym->isLive() && Sym->getOffset() == 0 &&
+ Sym->getSize() == B.getSize() && !AlreadyLiveBlocks.count(&B)) {
+ InitSectionSymbols.insert(Sym);
+ AlreadyLiveBlocks.insert(&B);
+ }
+ }
+
+ // Add anonymous symbols to preserve any not-already-preserved blocks.
+ for (auto *B : InitSection.blocks())
+ if (!AlreadyLiveBlocks.count(B))
+ InitSectionSymbols.insert(
+ &G.addAnonymousSymbol(*B, 0, B->getSize(), false, true));
+ }
+
+ if (!InitSectionSymbols.empty()) {
+ std::lock_guard<std::mutex> Lock(PluginMutex);
+ InitSymbolDeps[&MR] = std::move(InitSectionSymbols);
+ }
+
+ return Error::success();
+}
+
+Error ELFNixPlatform::ELFNixPlatformPlugin::registerInitSections(
+ jitlink::LinkGraph &G, JITDylib &JD) {
+
+ SmallVector<jitlink::Section *> InitSections;
+
+ LLVM_DEBUG(dbgs() << "ELFNixPlatform::registerInitSections\n");
+
+ for (auto &Sec : G.sections()) {
+ if (isELFInitializerSection(Sec.getName())) {
+ InitSections.push_back(&Sec);
+ }
+ }
+
+ // Dump the scraped inits.
+ LLVM_DEBUG({
+ dbgs() << "ELFNixPlatform: Scraped " << G.getName() << " init sections:\n";
+ for (auto *Sec : InitSections) {
+ jitlink::SectionRange R(*Sec);
+ dbgs() << " " << Sec->getName() << ": " << R.getRange() << "\n";
+ }
+ });
+
+ return MP.registerInitInfo(JD, InitSections);
+}
+
+Error ELFNixPlatform::ELFNixPlatformPlugin::fixTLVSectionsAndEdges(
+ jitlink::LinkGraph &G, JITDylib &JD) {
+
+ for (auto *Sym : G.external_symbols()) {
+ if (Sym->getName() == "__tls_get_addr") {
+ Sym->setName("___orc_rt_elfnix_tls_get_addr");
+ } else if (Sym->getName() == "__tlsdesc_resolver") {
+ Sym->setName("___orc_rt_elfnix_tlsdesc_resolver");
+ }
+ }
+
+ auto *TLSInfoEntrySection = G.findSectionByName("$__TLSINFO");
+
+ if (TLSInfoEntrySection) {
+ std::optional<uint64_t> Key;
+ {
+ std::lock_guard<std::mutex> Lock(MP.PlatformMutex);
+ auto I = MP.JITDylibToPThreadKey.find(&JD);
+ if (I != MP.JITDylibToPThreadKey.end())
+ Key = I->second;
+ }
+ if (!Key) {
+ if (auto KeyOrErr = MP.createPThreadKey())
+ Key = *KeyOrErr;
+ else
+ return KeyOrErr.takeError();
+ }
+
+ uint64_t PlatformKeyBits =
+ support::endian::byte_swap(*Key, G.getEndianness());
+
+ for (auto *B : TLSInfoEntrySection->blocks()) {
+ // FIXME: The TLS descriptor byte length may different with different
+ // ISA
+ assert(B->getSize() == (G.getPointerSize() * 2) &&
+ "TLS descriptor must be 2 words length");
+ auto TLSInfoEntryContent = B->getMutableContent(G);
+ memcpy(TLSInfoEntryContent.data(), &PlatformKeyBits, G.getPointerSize());
+ }
+ }
+
+ return Error::success();
+}
+
+} // End namespace orc.
+} // End namespace llvm.
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/EPCDebugObjectRegistrar.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/EPCDebugObjectRegistrar.cpp
new file mode 100644
index 000000000000..acd7e5a409fc
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/EPCDebugObjectRegistrar.cpp
@@ -0,0 +1,59 @@
+//===----- EPCDebugObjectRegistrar.cpp - EPC-based debug registration -----===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/EPCDebugObjectRegistrar.h"
+
+#include "llvm/ExecutionEngine/Orc/Core.h"
+#include "llvm/ExecutionEngine/Orc/Shared/SimplePackedSerialization.h"
+#include "llvm/ExecutionEngine/Orc/TargetProcess/JITLoaderGDB.h"
+#include "llvm/Support/BinaryStreamWriter.h"
+
+namespace llvm {
+namespace orc {
+
+Expected<std::unique_ptr<EPCDebugObjectRegistrar>> createJITLoaderGDBRegistrar(
+ ExecutionSession &ES,
+ std::optional<ExecutorAddr> RegistrationFunctionDylib) {
+ auto &EPC = ES.getExecutorProcessControl();
+
+ if (!RegistrationFunctionDylib) {
+ if (auto D = EPC.loadDylib(nullptr))
+ RegistrationFunctionDylib = *D;
+ else
+ return D.takeError();
+ }
+
+ SymbolStringPtr RegisterFn =
+ EPC.getTargetTriple().isOSBinFormatMachO()
+ ? EPC.intern("_llvm_orc_registerJITLoaderGDBWrapper")
+ : EPC.intern("llvm_orc_registerJITLoaderGDBWrapper");
+
+ SymbolLookupSet RegistrationSymbols;
+ RegistrationSymbols.add(RegisterFn);
+
+ auto Result =
+ EPC.lookupSymbols({{*RegistrationFunctionDylib, RegistrationSymbols}});
+ if (!Result)
+ return Result.takeError();
+
+ assert(Result->size() == 1 && "Unexpected number of dylibs in result");
+ assert((*Result)[0].size() == 1 &&
+ "Unexpected number of addresses in result");
+
+ ExecutorAddr RegisterAddr = (*Result)[0][0].getAddress();
+ return std::make_unique<EPCDebugObjectRegistrar>(ES, RegisterAddr);
+}
+
+Error EPCDebugObjectRegistrar::registerDebugObject(ExecutorAddrRange TargetMem,
+ bool AutoRegisterCode) {
+ return ES.callSPSWrapper<void(shared::SPSExecutorAddrRange, bool)>(
+ RegisterFn, TargetMem, AutoRegisterCode);
+}
+
+} // namespace orc
+} // namespace llvm
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/EPCDynamicLibrarySearchGenerator.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/EPCDynamicLibrarySearchGenerator.cpp
new file mode 100644
index 000000000000..ec2187bad0f2
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/EPCDynamicLibrarySearchGenerator.cpp
@@ -0,0 +1,95 @@
+//===---------------- EPCDynamicLibrarySearchGenerator.cpp ----------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/EPCDynamicLibrarySearchGenerator.h"
+#include "llvm/ExecutionEngine/Orc/DebugUtils.h"
+#include "llvm/Support/Error.h"
+
+#define DEBUG_TYPE "orc"
+
+namespace llvm {
+namespace orc {
+
+Expected<std::unique_ptr<EPCDynamicLibrarySearchGenerator>>
+EPCDynamicLibrarySearchGenerator::Load(
+ ExecutionSession &ES, const char *LibraryPath, SymbolPredicate Allow,
+ AddAbsoluteSymbolsFn AddAbsoluteSymbols) {
+ auto Handle = ES.getExecutorProcessControl().loadDylib(LibraryPath);
+ if (!Handle)
+ return Handle.takeError();
+
+ return std::make_unique<EPCDynamicLibrarySearchGenerator>(
+ ES, *Handle, std::move(Allow), std::move(AddAbsoluteSymbols));
+}
+
+Error EPCDynamicLibrarySearchGenerator::tryToGenerate(
+ LookupState &LS, LookupKind K, JITDylib &JD,
+ JITDylibLookupFlags JDLookupFlags, const SymbolLookupSet &Symbols) {
+
+ if (Symbols.empty())
+ return Error::success();
+
+ LLVM_DEBUG({
+ dbgs() << "EPCDynamicLibrarySearchGenerator trying to generate "
+ << Symbols << "\n";
+ });
+
+ SymbolLookupSet LookupSymbols;
+
+ for (auto &KV : Symbols) {
+ // Skip symbols that don't match the filter.
+ if (Allow && !Allow(KV.first))
+ continue;
+ LookupSymbols.add(KV.first, SymbolLookupFlags::WeaklyReferencedSymbol);
+ }
+
+ ExecutorProcessControl::LookupRequest Request(H, LookupSymbols);
+ // Copy-capture LookupSymbols, since LookupRequest keeps a reference.
+ EPC.lookupSymbolsAsync(Request, [this, &JD, LS = std::move(LS),
+ LookupSymbols](auto Result) mutable {
+ if (!Result) {
+ LLVM_DEBUG({
+ dbgs() << "EPCDynamicLibrarySearchGenerator lookup failed due to error";
+ });
+ return LS.continueLookup(Result.takeError());
+ }
+
+ assert(Result->size() == 1 && "Results for more than one library returned");
+ assert(Result->front().size() == LookupSymbols.size() &&
+ "Result has incorrect number of elements");
+
+ SymbolMap NewSymbols;
+ auto ResultI = Result->front().begin();
+ for (auto &KV : LookupSymbols) {
+ if (ResultI->getAddress())
+ NewSymbols[KV.first] = *ResultI;
+ ++ResultI;
+ }
+
+ LLVM_DEBUG({
+ dbgs() << "EPCDynamicLibrarySearchGenerator lookup returned "
+ << NewSymbols << "\n";
+ });
+
+ // If there were no resolved symbols bail out.
+ if (NewSymbols.empty())
+ return LS.continueLookup(Error::success());
+
+ // Define resolved symbols.
+ Error Err = AddAbsoluteSymbols
+ ? AddAbsoluteSymbols(JD, std::move(NewSymbols))
+ : JD.define(absoluteSymbols(std::move(NewSymbols)));
+
+ LS.continueLookup(std::move(Err));
+ });
+
+ return Error::success();
+}
+
+} // end namespace orc
+} // end namespace llvm
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/EPCEHFrameRegistrar.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/EPCEHFrameRegistrar.cpp
new file mode 100644
index 000000000000..f15315260ab0
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/EPCEHFrameRegistrar.cpp
@@ -0,0 +1,49 @@
+//===------ EPCEHFrameRegistrar.cpp - EPC-based eh-frame registration -----===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/EPCEHFrameRegistrar.h"
+
+#include "llvm/ExecutionEngine/Orc/Core.h"
+#include "llvm/ExecutionEngine/Orc/Shared/OrcRTBridge.h"
+
+using namespace llvm::orc::shared;
+
+namespace llvm {
+namespace orc {
+
+Expected<std::unique_ptr<EPCEHFrameRegistrar>>
+EPCEHFrameRegistrar::Create(ExecutionSession &ES) {
+
+ // Lookup addresseses of the registration/deregistration functions in the
+ // bootstrap map.
+ ExecutorAddr RegisterEHFrameSectionWrapper;
+ ExecutorAddr DeregisterEHFrameSectionWrapper;
+ if (auto Err = ES.getExecutorProcessControl().getBootstrapSymbols(
+ {{RegisterEHFrameSectionWrapper,
+ rt::RegisterEHFrameSectionWrapperName},
+ {DeregisterEHFrameSectionWrapper,
+ rt::DeregisterEHFrameSectionWrapperName}}))
+ return std::move(Err);
+
+ return std::make_unique<EPCEHFrameRegistrar>(
+ ES, RegisterEHFrameSectionWrapper, DeregisterEHFrameSectionWrapper);
+}
+
+Error EPCEHFrameRegistrar::registerEHFrames(ExecutorAddrRange EHFrameSection) {
+ return ES.callSPSWrapper<void(SPSExecutorAddrRange)>(
+ RegisterEHFrameSectionWrapper, EHFrameSection);
+}
+
+Error EPCEHFrameRegistrar::deregisterEHFrames(
+ ExecutorAddrRange EHFrameSection) {
+ return ES.callSPSWrapper<void(SPSExecutorAddrRange)>(
+ DeregisterEHFrameSectionWrapper, EHFrameSection);
+}
+
+} // end namespace orc
+} // end namespace llvm
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/EPCGenericDylibManager.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/EPCGenericDylibManager.cpp
new file mode 100644
index 000000000000..298bde46ab75
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/EPCGenericDylibManager.cpp
@@ -0,0 +1,121 @@
+//===------- EPCGenericDylibManager.cpp -- Dylib management via EPC -------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/EPCGenericDylibManager.h"
+
+#include "llvm/ExecutionEngine/Orc/Core.h"
+#include "llvm/ExecutionEngine/Orc/Shared/OrcRTBridge.h"
+#include "llvm/ExecutionEngine/Orc/Shared/SimpleRemoteEPCUtils.h"
+
+namespace llvm {
+namespace orc {
+namespace shared {
+
+template <>
+class SPSSerializationTraits<SPSRemoteSymbolLookupSetElement,
+ SymbolLookupSet::value_type> {
+public:
+ static size_t size(const SymbolLookupSet::value_type &V) {
+ return SPSArgList<SPSString, bool>::size(
+ *V.first, V.second == SymbolLookupFlags::RequiredSymbol);
+ }
+
+ static bool serialize(SPSOutputBuffer &OB,
+ const SymbolLookupSet::value_type &V) {
+ return SPSArgList<SPSString, bool>::serialize(
+ OB, *V.first, V.second == SymbolLookupFlags::RequiredSymbol);
+ }
+};
+
+template <>
+class TrivialSPSSequenceSerialization<SPSRemoteSymbolLookupSetElement,
+ SymbolLookupSet> {
+public:
+ static constexpr bool available = true;
+};
+
+template <>
+class SPSSerializationTraits<SPSRemoteSymbolLookup,
+ ExecutorProcessControl::LookupRequest> {
+ using MemberSerialization =
+ SPSArgList<SPSExecutorAddr, SPSRemoteSymbolLookupSet>;
+
+public:
+ static size_t size(const ExecutorProcessControl::LookupRequest &LR) {
+ return MemberSerialization::size(ExecutorAddr(LR.Handle), LR.Symbols);
+ }
+
+ static bool serialize(SPSOutputBuffer &OB,
+ const ExecutorProcessControl::LookupRequest &LR) {
+ return MemberSerialization::serialize(OB, ExecutorAddr(LR.Handle),
+ LR.Symbols);
+ }
+};
+
+} // end namespace shared
+
+Expected<EPCGenericDylibManager>
+EPCGenericDylibManager::CreateWithDefaultBootstrapSymbols(
+ ExecutorProcessControl &EPC) {
+ SymbolAddrs SAs;
+ if (auto Err = EPC.getBootstrapSymbols(
+ {{SAs.Instance, rt::SimpleExecutorDylibManagerInstanceName},
+ {SAs.Open, rt::SimpleExecutorDylibManagerOpenWrapperName},
+ {SAs.Lookup, rt::SimpleExecutorDylibManagerLookupWrapperName}}))
+ return std::move(Err);
+ return EPCGenericDylibManager(EPC, std::move(SAs));
+}
+
+Expected<tpctypes::DylibHandle> EPCGenericDylibManager::open(StringRef Path,
+ uint64_t Mode) {
+ Expected<tpctypes::DylibHandle> H((ExecutorAddr()));
+ if (auto Err =
+ EPC.callSPSWrapper<rt::SPSSimpleExecutorDylibManagerOpenSignature>(
+ SAs.Open, H, SAs.Instance, Path, Mode))
+ return std::move(Err);
+ return H;
+}
+
+void EPCGenericDylibManager::lookupAsync(tpctypes::DylibHandle H,
+ const SymbolLookupSet &Lookup,
+ SymbolLookupCompleteFn Complete) {
+ EPC.callSPSWrapperAsync<rt::SPSSimpleExecutorDylibManagerLookupSignature>(
+ SAs.Lookup,
+ [Complete = std::move(Complete)](
+ Error SerializationErr,
+ Expected<std::vector<ExecutorSymbolDef>> Result) mutable {
+ if (SerializationErr) {
+ cantFail(Result.takeError());
+ Complete(std::move(SerializationErr));
+ return;
+ }
+ Complete(std::move(Result));
+ },
+ SAs.Instance, H, Lookup);
+}
+
+void EPCGenericDylibManager::lookupAsync(tpctypes::DylibHandle H,
+ const RemoteSymbolLookupSet &Lookup,
+ SymbolLookupCompleteFn Complete) {
+ EPC.callSPSWrapperAsync<rt::SPSSimpleExecutorDylibManagerLookupSignature>(
+ SAs.Lookup,
+ [Complete = std::move(Complete)](
+ Error SerializationErr,
+ Expected<std::vector<ExecutorSymbolDef>> Result) mutable {
+ if (SerializationErr) {
+ cantFail(Result.takeError());
+ Complete(std::move(SerializationErr));
+ return;
+ }
+ Complete(std::move(Result));
+ },
+ SAs.Instance, H, Lookup);
+}
+
+} // end namespace orc
+} // end namespace llvm
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/EPCGenericJITLinkMemoryManager.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/EPCGenericJITLinkMemoryManager.cpp
new file mode 100644
index 000000000000..b05f08fd7cdf
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/EPCGenericJITLinkMemoryManager.cpp
@@ -0,0 +1,173 @@
+//===---- EPCGenericJITLinkMemoryManager.cpp -- Mem management via EPC ----===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/EPCGenericJITLinkMemoryManager.h"
+
+#include "llvm/ExecutionEngine/JITLink/JITLink.h"
+#include "llvm/ExecutionEngine/Orc/LookupAndRecordAddrs.h"
+#include "llvm/ExecutionEngine/Orc/Shared/OrcRTBridge.h"
+
+#include <limits>
+
+using namespace llvm::jitlink;
+
+namespace llvm {
+namespace orc {
+
+class EPCGenericJITLinkMemoryManager::InFlightAlloc
+ : public jitlink::JITLinkMemoryManager::InFlightAlloc {
+public:
+
+ // FIXME: The C++98 initializer is an attempt to work around compile failures
+ // due to http://www.open-std.org/jtc1/sc22/wg21/docs/cwg_defects.html#1397.
+ // We should be able to switch this back to member initialization once that
+ // issue is fixed.
+ struct SegInfo {
+ SegInfo() : WorkingMem(nullptr), ContentSize(0), ZeroFillSize(0) {}
+
+ char *WorkingMem;
+ ExecutorAddr Addr;
+ uint64_t ContentSize;
+ uint64_t ZeroFillSize;
+ };
+
+ using SegInfoMap = AllocGroupSmallMap<SegInfo>;
+
+ InFlightAlloc(EPCGenericJITLinkMemoryManager &Parent, LinkGraph &G,
+ ExecutorAddr AllocAddr, SegInfoMap Segs)
+ : Parent(Parent), G(G), AllocAddr(AllocAddr), Segs(std::move(Segs)) {}
+
+ void finalize(OnFinalizedFunction OnFinalize) override {
+ tpctypes::FinalizeRequest FR;
+ for (auto &KV : Segs) {
+ assert(KV.second.ContentSize <= std::numeric_limits<size_t>::max());
+ FR.Segments.push_back(tpctypes::SegFinalizeRequest{
+ KV.first,
+ KV.second.Addr,
+ alignTo(KV.second.ContentSize + KV.second.ZeroFillSize,
+ Parent.EPC.getPageSize()),
+ {KV.second.WorkingMem, static_cast<size_t>(KV.second.ContentSize)}});
+ }
+
+ // Transfer allocation actions.
+ std::swap(FR.Actions, G.allocActions());
+
+ Parent.EPC.callSPSWrapperAsync<
+ rt::SPSSimpleExecutorMemoryManagerFinalizeSignature>(
+ Parent.SAs.Finalize,
+ [OnFinalize = std::move(OnFinalize), AllocAddr = this->AllocAddr](
+ Error SerializationErr, Error FinalizeErr) mutable {
+ // FIXME: Release abandoned alloc.
+ if (SerializationErr) {
+ cantFail(std::move(FinalizeErr));
+ OnFinalize(std::move(SerializationErr));
+ } else if (FinalizeErr)
+ OnFinalize(std::move(FinalizeErr));
+ else
+ OnFinalize(FinalizedAlloc(AllocAddr));
+ },
+ Parent.SAs.Allocator, std::move(FR));
+ }
+
+ void abandon(OnAbandonedFunction OnAbandoned) override {
+ // FIXME: Return memory to pool instead.
+ Parent.EPC.callSPSWrapperAsync<
+ rt::SPSSimpleExecutorMemoryManagerDeallocateSignature>(
+ Parent.SAs.Deallocate,
+ [OnAbandoned = std::move(OnAbandoned)](Error SerializationErr,
+ Error DeallocateErr) mutable {
+ if (SerializationErr) {
+ cantFail(std::move(DeallocateErr));
+ OnAbandoned(std::move(SerializationErr));
+ } else
+ OnAbandoned(std::move(DeallocateErr));
+ },
+ Parent.SAs.Allocator, ArrayRef<ExecutorAddr>(AllocAddr));
+ }
+
+private:
+ EPCGenericJITLinkMemoryManager &Parent;
+ LinkGraph &G;
+ ExecutorAddr AllocAddr;
+ SegInfoMap Segs;
+};
+
+void EPCGenericJITLinkMemoryManager::allocate(const JITLinkDylib *JD,
+ LinkGraph &G,
+ OnAllocatedFunction OnAllocated) {
+ BasicLayout BL(G);
+
+ auto Pages = BL.getContiguousPageBasedLayoutSizes(EPC.getPageSize());
+ if (!Pages)
+ return OnAllocated(Pages.takeError());
+
+ EPC.callSPSWrapperAsync<rt::SPSSimpleExecutorMemoryManagerReserveSignature>(
+ SAs.Reserve,
+ [this, BL = std::move(BL), OnAllocated = std::move(OnAllocated)](
+ Error SerializationErr, Expected<ExecutorAddr> AllocAddr) mutable {
+ if (SerializationErr) {
+ cantFail(AllocAddr.takeError());
+ return OnAllocated(std::move(SerializationErr));
+ }
+ if (!AllocAddr)
+ return OnAllocated(AllocAddr.takeError());
+
+ completeAllocation(*AllocAddr, std::move(BL), std::move(OnAllocated));
+ },
+ SAs.Allocator, Pages->total());
+}
+
+void EPCGenericJITLinkMemoryManager::deallocate(
+ std::vector<FinalizedAlloc> Allocs, OnDeallocatedFunction OnDeallocated) {
+ EPC.callSPSWrapperAsync<
+ rt::SPSSimpleExecutorMemoryManagerDeallocateSignature>(
+ SAs.Deallocate,
+ [OnDeallocated = std::move(OnDeallocated)](Error SerErr,
+ Error DeallocErr) mutable {
+ if (SerErr) {
+ cantFail(std::move(DeallocErr));
+ OnDeallocated(std::move(SerErr));
+ } else
+ OnDeallocated(std::move(DeallocErr));
+ },
+ SAs.Allocator, Allocs);
+ for (auto &A : Allocs)
+ A.release();
+}
+
+void EPCGenericJITLinkMemoryManager::completeAllocation(
+ ExecutorAddr AllocAddr, BasicLayout BL, OnAllocatedFunction OnAllocated) {
+
+ InFlightAlloc::SegInfoMap SegInfos;
+
+ ExecutorAddr NextSegAddr = AllocAddr;
+ for (auto &KV : BL.segments()) {
+ const auto &AG = KV.first;
+ auto &Seg = KV.second;
+
+ Seg.Addr = NextSegAddr;
+ KV.second.WorkingMem = BL.getGraph().allocateBuffer(Seg.ContentSize).data();
+ NextSegAddr += ExecutorAddrDiff(
+ alignTo(Seg.ContentSize + Seg.ZeroFillSize, EPC.getPageSize()));
+
+ auto &SegInfo = SegInfos[AG];
+ SegInfo.ContentSize = Seg.ContentSize;
+ SegInfo.ZeroFillSize = Seg.ZeroFillSize;
+ SegInfo.Addr = Seg.Addr;
+ SegInfo.WorkingMem = Seg.WorkingMem;
+ }
+
+ if (auto Err = BL.apply())
+ return OnAllocated(std::move(Err));
+
+ OnAllocated(std::make_unique<InFlightAlloc>(*this, BL.getGraph(), AllocAddr,
+ std::move(SegInfos)));
+}
+
+} // end namespace orc
+} // end namespace llvm
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/EPCGenericRTDyldMemoryManager.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/EPCGenericRTDyldMemoryManager.cpp
new file mode 100644
index 000000000000..fbe25d70c38a
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/EPCGenericRTDyldMemoryManager.cpp
@@ -0,0 +1,315 @@
+//===----- EPCGenericRTDyldMemoryManager.cpp - EPC-bbasde MemMgr -----===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/EPCGenericRTDyldMemoryManager.h"
+#include "llvm/ExecutionEngine/Orc/EPCGenericMemoryAccess.h"
+#include "llvm/ExecutionEngine/Orc/Shared/OrcRTBridge.h"
+#include "llvm/Support/Alignment.h"
+#include "llvm/Support/FormatVariadic.h"
+
+#define DEBUG_TYPE "orc"
+
+using namespace llvm::orc::shared;
+
+namespace llvm {
+namespace orc {
+
+Expected<std::unique_ptr<EPCGenericRTDyldMemoryManager>>
+EPCGenericRTDyldMemoryManager::CreateWithDefaultBootstrapSymbols(
+ ExecutorProcessControl &EPC) {
+ SymbolAddrs SAs;
+ if (auto Err = EPC.getBootstrapSymbols(
+ {{SAs.Instance, rt::SimpleExecutorMemoryManagerInstanceName},
+ {SAs.Reserve, rt::SimpleExecutorMemoryManagerReserveWrapperName},
+ {SAs.Finalize, rt::SimpleExecutorMemoryManagerFinalizeWrapperName},
+ {SAs.Deallocate,
+ rt::SimpleExecutorMemoryManagerDeallocateWrapperName},
+ {SAs.RegisterEHFrame, rt::RegisterEHFrameSectionWrapperName},
+ {SAs.DeregisterEHFrame, rt::DeregisterEHFrameSectionWrapperName}}))
+ return std::move(Err);
+ return std::make_unique<EPCGenericRTDyldMemoryManager>(EPC, std::move(SAs));
+}
+
+EPCGenericRTDyldMemoryManager::EPCGenericRTDyldMemoryManager(
+ ExecutorProcessControl &EPC, SymbolAddrs SAs)
+ : EPC(EPC), SAs(std::move(SAs)) {
+ LLVM_DEBUG(dbgs() << "Created remote allocator " << (void *)this << "\n");
+}
+
+EPCGenericRTDyldMemoryManager::~EPCGenericRTDyldMemoryManager() {
+ LLVM_DEBUG(dbgs() << "Destroyed remote allocator " << (void *)this << "\n");
+ if (!ErrMsg.empty())
+ errs() << "Destroying with existing errors:\n" << ErrMsg << "\n";
+
+ Error Err = Error::success();
+ if (auto Err2 = EPC.callSPSWrapper<
+ rt::SPSSimpleExecutorMemoryManagerDeallocateSignature>(
+ SAs.Reserve, Err, SAs.Instance, FinalizedAllocs)) {
+ // FIXME: Report errors through EPC once that functionality is available.
+ logAllUnhandledErrors(std::move(Err2), errs(), "");
+ return;
+ }
+
+ if (Err)
+ logAllUnhandledErrors(std::move(Err), errs(), "");
+}
+
+uint8_t *EPCGenericRTDyldMemoryManager::allocateCodeSection(
+ uintptr_t Size, unsigned Alignment, unsigned SectionID,
+ StringRef SectionName) {
+ std::lock_guard<std::mutex> Lock(M);
+ LLVM_DEBUG({
+ dbgs() << "Allocator " << (void *)this << " allocating code section "
+ << SectionName << ": size = " << formatv("{0:x}", Size)
+ << " bytes, alignment = " << Alignment << "\n";
+ });
+ auto &Seg = Unmapped.back().CodeAllocs;
+ Seg.emplace_back(Size, Alignment);
+ return reinterpret_cast<uint8_t *>(
+ alignAddr(Seg.back().Contents.get(), Align(Alignment)));
+}
+
+uint8_t *EPCGenericRTDyldMemoryManager::allocateDataSection(
+ uintptr_t Size, unsigned Alignment, unsigned SectionID,
+ StringRef SectionName, bool IsReadOnly) {
+ std::lock_guard<std::mutex> Lock(M);
+ LLVM_DEBUG({
+ dbgs() << "Allocator " << (void *)this << " allocating "
+ << (IsReadOnly ? "ro" : "rw") << "-data section " << SectionName
+ << ": size = " << formatv("{0:x}", Size) << " bytes, alignment "
+ << Alignment << ")\n";
+ });
+
+ auto &Seg =
+ IsReadOnly ? Unmapped.back().RODataAllocs : Unmapped.back().RWDataAllocs;
+
+ Seg.emplace_back(Size, Alignment);
+ return reinterpret_cast<uint8_t *>(
+ alignAddr(Seg.back().Contents.get(), Align(Alignment)));
+}
+
+void EPCGenericRTDyldMemoryManager::reserveAllocationSpace(
+ uintptr_t CodeSize, Align CodeAlign, uintptr_t RODataSize,
+ Align RODataAlign, uintptr_t RWDataSize, Align RWDataAlign) {
+
+ {
+ std::lock_guard<std::mutex> Lock(M);
+ // If there's already an error then bail out.
+ if (!ErrMsg.empty())
+ return;
+
+ if (CodeAlign > EPC.getPageSize()) {
+ ErrMsg = "Invalid code alignment in reserveAllocationSpace";
+ return;
+ }
+ if (RODataAlign > EPC.getPageSize()) {
+ ErrMsg = "Invalid ro-data alignment in reserveAllocationSpace";
+ return;
+ }
+ if (RWDataAlign > EPC.getPageSize()) {
+ ErrMsg = "Invalid rw-data alignment in reserveAllocationSpace";
+ return;
+ }
+ }
+
+ uint64_t TotalSize = 0;
+ TotalSize += alignTo(CodeSize, EPC.getPageSize());
+ TotalSize += alignTo(RODataSize, EPC.getPageSize());
+ TotalSize += alignTo(RWDataSize, EPC.getPageSize());
+
+ LLVM_DEBUG({
+ dbgs() << "Allocator " << (void *)this << " reserving "
+ << formatv("{0:x}", TotalSize) << " bytes.\n";
+ });
+
+ Expected<ExecutorAddr> TargetAllocAddr((ExecutorAddr()));
+ if (auto Err = EPC.callSPSWrapper<
+ rt::SPSSimpleExecutorMemoryManagerReserveSignature>(
+ SAs.Reserve, TargetAllocAddr, SAs.Instance, TotalSize)) {
+ std::lock_guard<std::mutex> Lock(M);
+ ErrMsg = toString(std::move(Err));
+ return;
+ }
+ if (!TargetAllocAddr) {
+ std::lock_guard<std::mutex> Lock(M);
+ ErrMsg = toString(TargetAllocAddr.takeError());
+ return;
+ }
+
+ std::lock_guard<std::mutex> Lock(M);
+ Unmapped.push_back(SectionAllocGroup());
+ Unmapped.back().RemoteCode = {
+ *TargetAllocAddr, ExecutorAddrDiff(alignTo(CodeSize, EPC.getPageSize()))};
+ Unmapped.back().RemoteROData = {
+ Unmapped.back().RemoteCode.End,
+ ExecutorAddrDiff(alignTo(RODataSize, EPC.getPageSize()))};
+ Unmapped.back().RemoteRWData = {
+ Unmapped.back().RemoteROData.End,
+ ExecutorAddrDiff(alignTo(RWDataSize, EPC.getPageSize()))};
+}
+
+bool EPCGenericRTDyldMemoryManager::needsToReserveAllocationSpace() {
+ return true;
+}
+
+void EPCGenericRTDyldMemoryManager::registerEHFrames(uint8_t *Addr,
+ uint64_t LoadAddr,
+ size_t Size) {
+ LLVM_DEBUG({
+ dbgs() << "Allocator " << (void *)this << " added unfinalized eh-frame "
+ << formatv("[ {0:x} {1:x} ]", LoadAddr, LoadAddr + Size) << "\n";
+ });
+ std::lock_guard<std::mutex> Lock(M);
+ // Bail out early if there's already an error.
+ if (!ErrMsg.empty())
+ return;
+
+ ExecutorAddr LA(LoadAddr);
+ for (auto &SecAllocGroup : llvm::reverse(Unfinalized)) {
+ if (SecAllocGroup.RemoteCode.contains(LA) ||
+ SecAllocGroup.RemoteROData.contains(LA) ||
+ SecAllocGroup.RemoteRWData.contains(LA)) {
+ SecAllocGroup.UnfinalizedEHFrames.push_back({LA, Size});
+ return;
+ }
+ }
+ ErrMsg = "eh-frame does not lie inside unfinalized alloc";
+}
+
+void EPCGenericRTDyldMemoryManager::deregisterEHFrames() {
+ // This is a no-op for us: We've registered a deallocation action for it.
+}
+
+void EPCGenericRTDyldMemoryManager::notifyObjectLoaded(
+ RuntimeDyld &Dyld, const object::ObjectFile &Obj) {
+ std::lock_guard<std::mutex> Lock(M);
+ LLVM_DEBUG(dbgs() << "Allocator " << (void *)this << " applied mappings:\n");
+ for (auto &ObjAllocs : Unmapped) {
+ mapAllocsToRemoteAddrs(Dyld, ObjAllocs.CodeAllocs,
+ ObjAllocs.RemoteCode.Start);
+ mapAllocsToRemoteAddrs(Dyld, ObjAllocs.RODataAllocs,
+ ObjAllocs.RemoteROData.Start);
+ mapAllocsToRemoteAddrs(Dyld, ObjAllocs.RWDataAllocs,
+ ObjAllocs.RemoteRWData.Start);
+ Unfinalized.push_back(std::move(ObjAllocs));
+ }
+ Unmapped.clear();
+}
+
+bool EPCGenericRTDyldMemoryManager::finalizeMemory(std::string *ErrMsg) {
+ LLVM_DEBUG(dbgs() << "Allocator " << (void *)this << " finalizing:\n");
+
+ // If there's an error then bail out here.
+ std::vector<SectionAllocGroup> SecAllocGroups;
+ {
+ std::lock_guard<std::mutex> Lock(M);
+ if (ErrMsg && !this->ErrMsg.empty()) {
+ *ErrMsg = std::move(this->ErrMsg);
+ return true;
+ }
+ std::swap(SecAllocGroups, Unfinalized);
+ }
+
+ // Loop over unfinalized objects to make finalization requests.
+ for (auto &SecAllocGroup : SecAllocGroups) {
+
+ MemProt SegMemProts[3] = {MemProt::Read | MemProt::Exec, MemProt::Read,
+ MemProt::Read | MemProt::Write};
+
+ ExecutorAddrRange *RemoteAddrs[3] = {&SecAllocGroup.RemoteCode,
+ &SecAllocGroup.RemoteROData,
+ &SecAllocGroup.RemoteRWData};
+
+ std::vector<SectionAlloc> *SegSections[3] = {&SecAllocGroup.CodeAllocs,
+ &SecAllocGroup.RODataAllocs,
+ &SecAllocGroup.RWDataAllocs};
+
+ tpctypes::FinalizeRequest FR;
+ std::unique_ptr<char[]> AggregateContents[3];
+
+ for (unsigned I = 0; I != 3; ++I) {
+ FR.Segments.push_back({});
+ auto &Seg = FR.Segments.back();
+ Seg.RAG = SegMemProts[I];
+ Seg.Addr = RemoteAddrs[I]->Start;
+ for (auto &SecAlloc : *SegSections[I]) {
+ Seg.Size = alignTo(Seg.Size, SecAlloc.Align);
+ Seg.Size += SecAlloc.Size;
+ }
+ AggregateContents[I] = std::make_unique<char[]>(Seg.Size);
+ size_t SecOffset = 0;
+ for (auto &SecAlloc : *SegSections[I]) {
+ SecOffset = alignTo(SecOffset, SecAlloc.Align);
+ memcpy(&AggregateContents[I][SecOffset],
+ reinterpret_cast<const char *>(
+ alignAddr(SecAlloc.Contents.get(), Align(SecAlloc.Align))),
+ SecAlloc.Size);
+ SecOffset += SecAlloc.Size;
+ // FIXME: Can we reset SecAlloc.Content here, now that it's copied into
+ // the aggregated content?
+ }
+ Seg.Content = {AggregateContents[I].get(), SecOffset};
+ }
+
+ for (auto &Frame : SecAllocGroup.UnfinalizedEHFrames)
+ FR.Actions.push_back(
+ {cantFail(
+ WrapperFunctionCall::Create<SPSArgList<SPSExecutorAddrRange>>(
+ SAs.RegisterEHFrame, Frame)),
+ cantFail(
+ WrapperFunctionCall::Create<SPSArgList<SPSExecutorAddrRange>>(
+ SAs.DeregisterEHFrame, Frame))});
+
+ // We'll also need to make an extra allocation for the eh-frame wrapper call
+ // arguments.
+ Error FinalizeErr = Error::success();
+ if (auto Err = EPC.callSPSWrapper<
+ rt::SPSSimpleExecutorMemoryManagerFinalizeSignature>(
+ SAs.Finalize, FinalizeErr, SAs.Instance, std::move(FR))) {
+ std::lock_guard<std::mutex> Lock(M);
+ this->ErrMsg = toString(std::move(Err));
+ dbgs() << "Serialization error: " << this->ErrMsg << "\n";
+ if (ErrMsg)
+ *ErrMsg = this->ErrMsg;
+ return true;
+ }
+ if (FinalizeErr) {
+ std::lock_guard<std::mutex> Lock(M);
+ this->ErrMsg = toString(std::move(FinalizeErr));
+ dbgs() << "Finalization error: " << this->ErrMsg << "\n";
+ if (ErrMsg)
+ *ErrMsg = this->ErrMsg;
+ return true;
+ }
+ }
+
+ return false;
+}
+
+void EPCGenericRTDyldMemoryManager::mapAllocsToRemoteAddrs(
+ RuntimeDyld &Dyld, std::vector<SectionAlloc> &Allocs,
+ ExecutorAddr NextAddr) {
+ for (auto &Alloc : Allocs) {
+ NextAddr.setValue(alignTo(NextAddr.getValue(), Alloc.Align));
+ LLVM_DEBUG({
+ dbgs() << " " << static_cast<void *>(Alloc.Contents.get()) << " -> "
+ << format("0x%016" PRIx64, NextAddr.getValue()) << "\n";
+ });
+ Dyld.mapSectionAddress(reinterpret_cast<const void *>(alignAddr(
+ Alloc.Contents.get(), Align(Alloc.Align))),
+ NextAddr.getValue());
+ Alloc.RemoteAddr = NextAddr;
+ // Only advance NextAddr if it was non-null to begin with,
+ // otherwise leave it as null.
+ if (NextAddr)
+ NextAddr += ExecutorAddrDiff(Alloc.Size);
+ }
+}
+
+} // end namespace orc
+} // end namespace llvm
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/EPCIndirectionUtils.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/EPCIndirectionUtils.cpp
new file mode 100644
index 000000000000..833be826f8ae
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/EPCIndirectionUtils.cpp
@@ -0,0 +1,427 @@
+//===------- EPCIndirectionUtils.cpp -- EPC based indirection APIs --------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/EPCIndirectionUtils.h"
+
+#include "llvm/ExecutionEngine/Orc/ExecutorProcessControl.h"
+#include "llvm/Support/MathExtras.h"
+
+#include <future>
+
+using namespace llvm;
+using namespace llvm::orc;
+
+namespace llvm {
+namespace orc {
+
+class EPCIndirectionUtilsAccess {
+public:
+ using IndirectStubInfo = EPCIndirectionUtils::IndirectStubInfo;
+ using IndirectStubInfoVector = EPCIndirectionUtils::IndirectStubInfoVector;
+
+ static Expected<IndirectStubInfoVector>
+ getIndirectStubs(EPCIndirectionUtils &EPCIU, unsigned NumStubs) {
+ return EPCIU.getIndirectStubs(NumStubs);
+ };
+};
+
+} // end namespace orc
+} // end namespace llvm
+
+namespace {
+
+class EPCTrampolinePool : public TrampolinePool {
+public:
+ EPCTrampolinePool(EPCIndirectionUtils &EPCIU);
+ Error deallocatePool();
+
+protected:
+ Error grow() override;
+
+ using FinalizedAlloc = jitlink::JITLinkMemoryManager::FinalizedAlloc;
+
+ EPCIndirectionUtils &EPCIU;
+ unsigned TrampolineSize = 0;
+ unsigned TrampolinesPerPage = 0;
+ std::vector<FinalizedAlloc> TrampolineBlocks;
+};
+
+class EPCIndirectStubsManager : public IndirectStubsManager,
+ private EPCIndirectionUtilsAccess {
+public:
+ EPCIndirectStubsManager(EPCIndirectionUtils &EPCIU) : EPCIU(EPCIU) {}
+
+ Error deallocateStubs();
+
+ Error createStub(StringRef StubName, ExecutorAddr StubAddr,
+ JITSymbolFlags StubFlags) override;
+
+ Error createStubs(const StubInitsMap &StubInits) override;
+
+ ExecutorSymbolDef findStub(StringRef Name, bool ExportedStubsOnly) override;
+
+ ExecutorSymbolDef findPointer(StringRef Name) override;
+
+ Error updatePointer(StringRef Name, ExecutorAddr NewAddr) override;
+
+private:
+ using StubInfo = std::pair<IndirectStubInfo, JITSymbolFlags>;
+
+ std::mutex ISMMutex;
+ EPCIndirectionUtils &EPCIU;
+ StringMap<StubInfo> StubInfos;
+};
+
+EPCTrampolinePool::EPCTrampolinePool(EPCIndirectionUtils &EPCIU)
+ : EPCIU(EPCIU) {
+ auto &EPC = EPCIU.getExecutorProcessControl();
+ auto &ABI = EPCIU.getABISupport();
+
+ TrampolineSize = ABI.getTrampolineSize();
+ TrampolinesPerPage =
+ (EPC.getPageSize() - ABI.getPointerSize()) / TrampolineSize;
+}
+
+Error EPCTrampolinePool::deallocatePool() {
+ std::promise<MSVCPError> DeallocResultP;
+ auto DeallocResultF = DeallocResultP.get_future();
+
+ EPCIU.getExecutorProcessControl().getMemMgr().deallocate(
+ std::move(TrampolineBlocks),
+ [&](Error Err) { DeallocResultP.set_value(std::move(Err)); });
+
+ return DeallocResultF.get();
+}
+
+Error EPCTrampolinePool::grow() {
+ using namespace jitlink;
+
+ assert(AvailableTrampolines.empty() &&
+ "Grow called with trampolines still available");
+
+ auto ResolverAddress = EPCIU.getResolverBlockAddress();
+ assert(ResolverAddress && "Resolver address can not be null");
+
+ auto &EPC = EPCIU.getExecutorProcessControl();
+ auto PageSize = EPC.getPageSize();
+ auto Alloc = SimpleSegmentAlloc::Create(
+ EPC.getMemMgr(), nullptr,
+ {{MemProt::Read | MemProt::Exec, {PageSize, Align(PageSize)}}});
+ if (!Alloc)
+ return Alloc.takeError();
+
+ unsigned NumTrampolines = TrampolinesPerPage;
+
+ auto SegInfo = Alloc->getSegInfo(MemProt::Read | MemProt::Exec);
+ EPCIU.getABISupport().writeTrampolines(
+ SegInfo.WorkingMem.data(), SegInfo.Addr, ResolverAddress, NumTrampolines);
+ for (unsigned I = 0; I < NumTrampolines; ++I)
+ AvailableTrampolines.push_back(SegInfo.Addr + (I * TrampolineSize));
+
+ auto FA = Alloc->finalize();
+ if (!FA)
+ return FA.takeError();
+
+ TrampolineBlocks.push_back(std::move(*FA));
+
+ return Error::success();
+}
+
+Error EPCIndirectStubsManager::createStub(StringRef StubName,
+ ExecutorAddr StubAddr,
+ JITSymbolFlags StubFlags) {
+ StubInitsMap SIM;
+ SIM[StubName] = std::make_pair(StubAddr, StubFlags);
+ return createStubs(SIM);
+}
+
+Error EPCIndirectStubsManager::createStubs(const StubInitsMap &StubInits) {
+ auto AvailableStubInfos = getIndirectStubs(EPCIU, StubInits.size());
+ if (!AvailableStubInfos)
+ return AvailableStubInfos.takeError();
+
+ {
+ std::lock_guard<std::mutex> Lock(ISMMutex);
+ unsigned ASIdx = 0;
+ for (auto &SI : StubInits) {
+ auto &A = (*AvailableStubInfos)[ASIdx++];
+ StubInfos[SI.first()] = std::make_pair(A, SI.second.second);
+ }
+ }
+
+ auto &MemAccess = EPCIU.getExecutorProcessControl().getMemoryAccess();
+ switch (EPCIU.getABISupport().getPointerSize()) {
+ case 4: {
+ unsigned ASIdx = 0;
+ std::vector<tpctypes::UInt32Write> PtrUpdates;
+ for (auto &SI : StubInits)
+ PtrUpdates.push_back({(*AvailableStubInfos)[ASIdx++].PointerAddress,
+ static_cast<uint32_t>(SI.second.first.getValue())});
+ return MemAccess.writeUInt32s(PtrUpdates);
+ }
+ case 8: {
+ unsigned ASIdx = 0;
+ std::vector<tpctypes::UInt64Write> PtrUpdates;
+ for (auto &SI : StubInits)
+ PtrUpdates.push_back({(*AvailableStubInfos)[ASIdx++].PointerAddress,
+ static_cast<uint64_t>(SI.second.first.getValue())});
+ return MemAccess.writeUInt64s(PtrUpdates);
+ }
+ default:
+ return make_error<StringError>("Unsupported pointer size",
+ inconvertibleErrorCode());
+ }
+}
+
+ExecutorSymbolDef EPCIndirectStubsManager::findStub(StringRef Name,
+ bool ExportedStubsOnly) {
+ std::lock_guard<std::mutex> Lock(ISMMutex);
+ auto I = StubInfos.find(Name);
+ if (I == StubInfos.end())
+ return ExecutorSymbolDef();
+ return {I->second.first.StubAddress, I->second.second};
+}
+
+ExecutorSymbolDef EPCIndirectStubsManager::findPointer(StringRef Name) {
+ std::lock_guard<std::mutex> Lock(ISMMutex);
+ auto I = StubInfos.find(Name);
+ if (I == StubInfos.end())
+ return ExecutorSymbolDef();
+ return {I->second.first.PointerAddress, I->second.second};
+}
+
+Error EPCIndirectStubsManager::updatePointer(StringRef Name,
+ ExecutorAddr NewAddr) {
+
+ ExecutorAddr PtrAddr;
+ {
+ std::lock_guard<std::mutex> Lock(ISMMutex);
+ auto I = StubInfos.find(Name);
+ if (I == StubInfos.end())
+ return make_error<StringError>("Unknown stub name",
+ inconvertibleErrorCode());
+ PtrAddr = I->second.first.PointerAddress;
+ }
+
+ auto &MemAccess = EPCIU.getExecutorProcessControl().getMemoryAccess();
+ switch (EPCIU.getABISupport().getPointerSize()) {
+ case 4: {
+ tpctypes::UInt32Write PUpdate(PtrAddr, NewAddr.getValue());
+ return MemAccess.writeUInt32s(PUpdate);
+ }
+ case 8: {
+ tpctypes::UInt64Write PUpdate(PtrAddr, NewAddr.getValue());
+ return MemAccess.writeUInt64s(PUpdate);
+ }
+ default:
+ return make_error<StringError>("Unsupported pointer size",
+ inconvertibleErrorCode());
+ }
+}
+
+} // end anonymous namespace.
+
+namespace llvm {
+namespace orc {
+
+EPCIndirectionUtils::ABISupport::~ABISupport() = default;
+
+Expected<std::unique_ptr<EPCIndirectionUtils>>
+EPCIndirectionUtils::Create(ExecutorProcessControl &EPC) {
+ const auto &TT = EPC.getTargetTriple();
+ switch (TT.getArch()) {
+ default:
+ return make_error<StringError>(
+ std::string("No EPCIndirectionUtils available for ") + TT.str(),
+ inconvertibleErrorCode());
+ case Triple::aarch64:
+ case Triple::aarch64_32:
+ return CreateWithABI<OrcAArch64>(EPC);
+
+ case Triple::x86:
+ return CreateWithABI<OrcI386>(EPC);
+
+ case Triple::loongarch64:
+ return CreateWithABI<OrcLoongArch64>(EPC);
+
+ case Triple::mips:
+ return CreateWithABI<OrcMips32Be>(EPC);
+
+ case Triple::mipsel:
+ return CreateWithABI<OrcMips32Le>(EPC);
+
+ case Triple::mips64:
+ case Triple::mips64el:
+ return CreateWithABI<OrcMips64>(EPC);
+
+ case Triple::riscv64:
+ return CreateWithABI<OrcRiscv64>(EPC);
+
+ case Triple::x86_64:
+ if (TT.getOS() == Triple::OSType::Win32)
+ return CreateWithABI<OrcX86_64_Win32>(EPC);
+ else
+ return CreateWithABI<OrcX86_64_SysV>(EPC);
+ }
+}
+
+Error EPCIndirectionUtils::cleanup() {
+
+ auto &MemMgr = EPC.getMemMgr();
+ auto Err = MemMgr.deallocate(std::move(IndirectStubAllocs));
+
+ if (TP)
+ Err = joinErrors(std::move(Err),
+ static_cast<EPCTrampolinePool &>(*TP).deallocatePool());
+
+ if (ResolverBlock)
+ Err =
+ joinErrors(std::move(Err), MemMgr.deallocate(std::move(ResolverBlock)));
+
+ return Err;
+}
+
+Expected<ExecutorAddr>
+EPCIndirectionUtils::writeResolverBlock(ExecutorAddr ReentryFnAddr,
+ ExecutorAddr ReentryCtxAddr) {
+ using namespace jitlink;
+
+ assert(ABI && "ABI can not be null");
+ auto ResolverSize = ABI->getResolverCodeSize();
+
+ auto Alloc =
+ SimpleSegmentAlloc::Create(EPC.getMemMgr(), nullptr,
+ {{MemProt::Read | MemProt::Exec,
+ {ResolverSize, Align(EPC.getPageSize())}}});
+
+ if (!Alloc)
+ return Alloc.takeError();
+
+ auto SegInfo = Alloc->getSegInfo(MemProt::Read | MemProt::Exec);
+ ResolverBlockAddr = SegInfo.Addr;
+ ABI->writeResolverCode(SegInfo.WorkingMem.data(), ResolverBlockAddr,
+ ReentryFnAddr, ReentryCtxAddr);
+
+ auto FA = Alloc->finalize();
+ if (!FA)
+ return FA.takeError();
+
+ ResolverBlock = std::move(*FA);
+ return ResolverBlockAddr;
+}
+
+std::unique_ptr<IndirectStubsManager>
+EPCIndirectionUtils::createIndirectStubsManager() {
+ return std::make_unique<EPCIndirectStubsManager>(*this);
+}
+
+TrampolinePool &EPCIndirectionUtils::getTrampolinePool() {
+ if (!TP)
+ TP = std::make_unique<EPCTrampolinePool>(*this);
+ return *TP;
+}
+
+LazyCallThroughManager &EPCIndirectionUtils::createLazyCallThroughManager(
+ ExecutionSession &ES, ExecutorAddr ErrorHandlerAddr) {
+ assert(!LCTM &&
+ "createLazyCallThroughManager can not have been called before");
+ LCTM = std::make_unique<LazyCallThroughManager>(ES, ErrorHandlerAddr,
+ &getTrampolinePool());
+ return *LCTM;
+}
+
+EPCIndirectionUtils::EPCIndirectionUtils(ExecutorProcessControl &EPC,
+ std::unique_ptr<ABISupport> ABI)
+ : EPC(EPC), ABI(std::move(ABI)) {
+ assert(this->ABI && "ABI can not be null");
+
+ assert(EPC.getPageSize() > getABISupport().getStubSize() &&
+ "Stubs larger than one page are not supported");
+}
+
+Expected<EPCIndirectionUtils::IndirectStubInfoVector>
+EPCIndirectionUtils::getIndirectStubs(unsigned NumStubs) {
+ using namespace jitlink;
+
+ std::lock_guard<std::mutex> Lock(EPCUIMutex);
+
+ // If there aren't enough stubs available then allocate some more.
+ if (NumStubs > AvailableIndirectStubs.size()) {
+ auto NumStubsToAllocate = NumStubs;
+ auto PageSize = EPC.getPageSize();
+ auto StubBytes = alignTo(NumStubsToAllocate * ABI->getStubSize(), PageSize);
+ NumStubsToAllocate = StubBytes / ABI->getStubSize();
+ auto PtrBytes =
+ alignTo(NumStubsToAllocate * ABI->getPointerSize(), PageSize);
+
+ auto StubProt = MemProt::Read | MemProt::Exec;
+ auto PtrProt = MemProt::Read | MemProt::Write;
+
+ auto Alloc = SimpleSegmentAlloc::Create(
+ EPC.getMemMgr(), nullptr,
+ {{StubProt, {static_cast<size_t>(StubBytes), Align(PageSize)}},
+ {PtrProt, {static_cast<size_t>(PtrBytes), Align(PageSize)}}});
+
+ if (!Alloc)
+ return Alloc.takeError();
+
+ auto StubSeg = Alloc->getSegInfo(StubProt);
+ auto PtrSeg = Alloc->getSegInfo(PtrProt);
+
+ ABI->writeIndirectStubsBlock(StubSeg.WorkingMem.data(), StubSeg.Addr,
+ PtrSeg.Addr, NumStubsToAllocate);
+
+ auto FA = Alloc->finalize();
+ if (!FA)
+ return FA.takeError();
+
+ IndirectStubAllocs.push_back(std::move(*FA));
+
+ auto StubExecutorAddr = StubSeg.Addr;
+ auto PtrExecutorAddr = PtrSeg.Addr;
+ for (unsigned I = 0; I != NumStubsToAllocate; ++I) {
+ AvailableIndirectStubs.push_back(
+ IndirectStubInfo(StubExecutorAddr, PtrExecutorAddr));
+ StubExecutorAddr += ABI->getStubSize();
+ PtrExecutorAddr += ABI->getPointerSize();
+ }
+ }
+
+ assert(NumStubs <= AvailableIndirectStubs.size() &&
+ "Sufficient stubs should have been allocated above");
+
+ IndirectStubInfoVector Result;
+ while (NumStubs--) {
+ Result.push_back(AvailableIndirectStubs.back());
+ AvailableIndirectStubs.pop_back();
+ }
+
+ return std::move(Result);
+}
+
+static JITTargetAddress reentry(JITTargetAddress LCTMAddr,
+ JITTargetAddress TrampolineAddr) {
+ auto &LCTM = *jitTargetAddressToPointer<LazyCallThroughManager *>(LCTMAddr);
+ std::promise<ExecutorAddr> LandingAddrP;
+ auto LandingAddrF = LandingAddrP.get_future();
+ LCTM.resolveTrampolineLandingAddress(
+ ExecutorAddr(TrampolineAddr),
+ [&](ExecutorAddr Addr) { LandingAddrP.set_value(Addr); });
+ return LandingAddrF.get().getValue();
+}
+
+Error setUpInProcessLCTMReentryViaEPCIU(EPCIndirectionUtils &EPCIU) {
+ auto &LCTM = EPCIU.getLazyCallThroughManager();
+ return EPCIU
+ .writeResolverBlock(ExecutorAddr::fromPtr(&reentry),
+ ExecutorAddr::fromPtr(&LCTM))
+ .takeError();
+}
+
+} // end namespace orc
+} // end namespace llvm
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/ExecutionUtils.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/ExecutionUtils.cpp
new file mode 100644
index 000000000000..c1a193f6a280
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/ExecutionUtils.cpp
@@ -0,0 +1,614 @@
+//===---- ExecutionUtils.cpp - Utilities for executing functions in Orc ---===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/ExecutionUtils.h"
+#include "llvm/ExecutionEngine/JITLink/x86_64.h"
+#include "llvm/ExecutionEngine/Orc/Layer.h"
+#include "llvm/ExecutionEngine/Orc/ObjectFileInterface.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/GlobalVariable.h"
+#include "llvm/IR/Module.h"
+#include "llvm/MC/TargetRegistry.h"
+#include "llvm/Object/MachOUniversal.h"
+#include "llvm/Support/FormatVariadic.h"
+#include "llvm/Support/StringSaver.h"
+#include "llvm/Target/TargetMachine.h"
+#include <string>
+
+namespace llvm {
+namespace orc {
+
+CtorDtorIterator::CtorDtorIterator(const GlobalVariable *GV, bool End)
+ : InitList(
+ GV ? dyn_cast_or_null<ConstantArray>(GV->getInitializer()) : nullptr),
+ I((InitList && End) ? InitList->getNumOperands() : 0) {
+}
+
+bool CtorDtorIterator::operator==(const CtorDtorIterator &Other) const {
+ assert(InitList == Other.InitList && "Incomparable iterators.");
+ return I == Other.I;
+}
+
+bool CtorDtorIterator::operator!=(const CtorDtorIterator &Other) const {
+ return !(*this == Other);
+}
+
+CtorDtorIterator& CtorDtorIterator::operator++() {
+ ++I;
+ return *this;
+}
+
+CtorDtorIterator CtorDtorIterator::operator++(int) {
+ CtorDtorIterator Temp = *this;
+ ++I;
+ return Temp;
+}
+
+CtorDtorIterator::Element CtorDtorIterator::operator*() const {
+ ConstantStruct *CS = dyn_cast<ConstantStruct>(InitList->getOperand(I));
+ assert(CS && "Unrecognized type in llvm.global_ctors/llvm.global_dtors");
+
+ Constant *FuncC = CS->getOperand(1);
+ Function *Func = nullptr;
+
+ // Extract function pointer, pulling off any casts.
+ while (FuncC) {
+ if (Function *F = dyn_cast_or_null<Function>(FuncC)) {
+ Func = F;
+ break;
+ } else if (ConstantExpr *CE = dyn_cast_or_null<ConstantExpr>(FuncC)) {
+ if (CE->isCast())
+ FuncC = CE->getOperand(0);
+ else
+ break;
+ } else {
+ // This isn't anything we recognize. Bail out with Func left set to null.
+ break;
+ }
+ }
+
+ auto *Priority = cast<ConstantInt>(CS->getOperand(0));
+ Value *Data = CS->getNumOperands() == 3 ? CS->getOperand(2) : nullptr;
+ if (Data && !isa<GlobalValue>(Data))
+ Data = nullptr;
+ return Element(Priority->getZExtValue(), Func, Data);
+}
+
+iterator_range<CtorDtorIterator> getConstructors(const Module &M) {
+ const GlobalVariable *CtorsList = M.getNamedGlobal("llvm.global_ctors");
+ return make_range(CtorDtorIterator(CtorsList, false),
+ CtorDtorIterator(CtorsList, true));
+}
+
+iterator_range<CtorDtorIterator> getDestructors(const Module &M) {
+ const GlobalVariable *DtorsList = M.getNamedGlobal("llvm.global_dtors");
+ return make_range(CtorDtorIterator(DtorsList, false),
+ CtorDtorIterator(DtorsList, true));
+}
+
+bool StaticInitGVIterator::isStaticInitGlobal(GlobalValue &GV) {
+ if (GV.isDeclaration())
+ return false;
+
+ if (GV.hasName() && (GV.getName() == "llvm.global_ctors" ||
+ GV.getName() == "llvm.global_dtors"))
+ return true;
+
+ if (ObjFmt == Triple::MachO) {
+ // FIXME: These section checks are too strict: We should match first and
+ // second word split by comma.
+ if (GV.hasSection() &&
+ (GV.getSection().starts_with("__DATA,__objc_classlist") ||
+ GV.getSection().starts_with("__DATA,__objc_selrefs")))
+ return true;
+ }
+
+ return false;
+}
+
+void CtorDtorRunner::add(iterator_range<CtorDtorIterator> CtorDtors) {
+ if (CtorDtors.empty())
+ return;
+
+ MangleAndInterner Mangle(
+ JD.getExecutionSession(),
+ (*CtorDtors.begin()).Func->getDataLayout());
+
+ for (auto CtorDtor : CtorDtors) {
+ assert(CtorDtor.Func && CtorDtor.Func->hasName() &&
+ "Ctor/Dtor function must be named to be runnable under the JIT");
+
+ // FIXME: Maybe use a symbol promoter here instead.
+ if (CtorDtor.Func->hasLocalLinkage()) {
+ CtorDtor.Func->setLinkage(GlobalValue::ExternalLinkage);
+ CtorDtor.Func->setVisibility(GlobalValue::HiddenVisibility);
+ }
+
+ if (CtorDtor.Data && cast<GlobalValue>(CtorDtor.Data)->isDeclaration()) {
+ dbgs() << " Skipping because why now?\n";
+ continue;
+ }
+
+ CtorDtorsByPriority[CtorDtor.Priority].push_back(
+ Mangle(CtorDtor.Func->getName()));
+ }
+}
+
+Error CtorDtorRunner::run() {
+ using CtorDtorTy = void (*)();
+
+ SymbolLookupSet LookupSet;
+ for (auto &KV : CtorDtorsByPriority)
+ for (auto &Name : KV.second)
+ LookupSet.add(Name);
+ assert(!LookupSet.containsDuplicates() &&
+ "Ctor/Dtor list contains duplicates");
+
+ auto &ES = JD.getExecutionSession();
+ if (auto CtorDtorMap = ES.lookup(
+ makeJITDylibSearchOrder(&JD, JITDylibLookupFlags::MatchAllSymbols),
+ std::move(LookupSet))) {
+ for (auto &KV : CtorDtorsByPriority) {
+ for (auto &Name : KV.second) {
+ assert(CtorDtorMap->count(Name) && "No entry for Name");
+ auto CtorDtor = (*CtorDtorMap)[Name].getAddress().toPtr<CtorDtorTy>();
+ CtorDtor();
+ }
+ }
+ CtorDtorsByPriority.clear();
+ return Error::success();
+ } else
+ return CtorDtorMap.takeError();
+}
+
+void LocalCXXRuntimeOverridesBase::runDestructors() {
+ auto& CXXDestructorDataPairs = DSOHandleOverride;
+ for (auto &P : CXXDestructorDataPairs)
+ P.first(P.second);
+ CXXDestructorDataPairs.clear();
+}
+
+int LocalCXXRuntimeOverridesBase::CXAAtExitOverride(DestructorPtr Destructor,
+ void *Arg,
+ void *DSOHandle) {
+ auto& CXXDestructorDataPairs =
+ *reinterpret_cast<CXXDestructorDataPairList*>(DSOHandle);
+ CXXDestructorDataPairs.push_back(std::make_pair(Destructor, Arg));
+ return 0;
+}
+
+Error LocalCXXRuntimeOverrides::enable(JITDylib &JD,
+ MangleAndInterner &Mangle) {
+ SymbolMap RuntimeInterposes;
+ RuntimeInterposes[Mangle("__dso_handle")] = {
+ ExecutorAddr::fromPtr(&DSOHandleOverride), JITSymbolFlags::Exported};
+ RuntimeInterposes[Mangle("__cxa_atexit")] = {
+ ExecutorAddr::fromPtr(&CXAAtExitOverride), JITSymbolFlags::Exported};
+
+ return JD.define(absoluteSymbols(std::move(RuntimeInterposes)));
+}
+
+void ItaniumCXAAtExitSupport::registerAtExit(void (*F)(void *), void *Ctx,
+ void *DSOHandle) {
+ std::lock_guard<std::mutex> Lock(AtExitsMutex);
+ AtExitRecords[DSOHandle].push_back({F, Ctx});
+}
+
+void ItaniumCXAAtExitSupport::runAtExits(void *DSOHandle) {
+ std::vector<AtExitRecord> AtExitsToRun;
+
+ {
+ std::lock_guard<std::mutex> Lock(AtExitsMutex);
+ auto I = AtExitRecords.find(DSOHandle);
+ if (I != AtExitRecords.end()) {
+ AtExitsToRun = std::move(I->second);
+ AtExitRecords.erase(I);
+ }
+ }
+
+ while (!AtExitsToRun.empty()) {
+ AtExitsToRun.back().F(AtExitsToRun.back().Ctx);
+ AtExitsToRun.pop_back();
+ }
+}
+
+DynamicLibrarySearchGenerator::DynamicLibrarySearchGenerator(
+ sys::DynamicLibrary Dylib, char GlobalPrefix, SymbolPredicate Allow,
+ AddAbsoluteSymbolsFn AddAbsoluteSymbols)
+ : Dylib(std::move(Dylib)), Allow(std::move(Allow)),
+ AddAbsoluteSymbols(std::move(AddAbsoluteSymbols)),
+ GlobalPrefix(GlobalPrefix) {}
+
+Expected<std::unique_ptr<DynamicLibrarySearchGenerator>>
+DynamicLibrarySearchGenerator::Load(const char *FileName, char GlobalPrefix,
+ SymbolPredicate Allow,
+ AddAbsoluteSymbolsFn AddAbsoluteSymbols) {
+ std::string ErrMsg;
+ auto Lib = sys::DynamicLibrary::getPermanentLibrary(FileName, &ErrMsg);
+ if (!Lib.isValid())
+ return make_error<StringError>(std::move(ErrMsg), inconvertibleErrorCode());
+ return std::make_unique<DynamicLibrarySearchGenerator>(
+ std::move(Lib), GlobalPrefix, std::move(Allow),
+ std::move(AddAbsoluteSymbols));
+}
+
+Error DynamicLibrarySearchGenerator::tryToGenerate(
+ LookupState &LS, LookupKind K, JITDylib &JD,
+ JITDylibLookupFlags JDLookupFlags, const SymbolLookupSet &Symbols) {
+ orc::SymbolMap NewSymbols;
+
+ bool HasGlobalPrefix = (GlobalPrefix != '\0');
+
+ for (auto &KV : Symbols) {
+ auto &Name = KV.first;
+
+ if ((*Name).empty())
+ continue;
+
+ if (Allow && !Allow(Name))
+ continue;
+
+ if (HasGlobalPrefix && (*Name).front() != GlobalPrefix)
+ continue;
+
+ std::string Tmp((*Name).data() + HasGlobalPrefix,
+ (*Name).size() - HasGlobalPrefix);
+ if (void *P = Dylib.getAddressOfSymbol(Tmp.c_str()))
+ NewSymbols[Name] = {ExecutorAddr::fromPtr(P), JITSymbolFlags::Exported};
+ }
+
+ if (NewSymbols.empty())
+ return Error::success();
+
+ if (AddAbsoluteSymbols)
+ return AddAbsoluteSymbols(JD, std::move(NewSymbols));
+ return JD.define(absoluteSymbols(std::move(NewSymbols)));
+}
+
+Expected<std::unique_ptr<StaticLibraryDefinitionGenerator>>
+StaticLibraryDefinitionGenerator::Load(
+ ObjectLayer &L, const char *FileName,
+ GetObjectFileInterface GetObjFileInterface) {
+
+ auto B = object::createBinary(FileName);
+ if (!B)
+ return createFileError(FileName, B.takeError());
+
+ // If this is a regular archive then create an instance from it.
+ if (isa<object::Archive>(B->getBinary())) {
+ auto [Archive, ArchiveBuffer] = B->takeBinary();
+ return Create(L, std::move(ArchiveBuffer),
+ std::unique_ptr<object::Archive>(
+ static_cast<object::Archive *>(Archive.release())),
+ std::move(GetObjFileInterface));
+ }
+
+ // If this is a universal binary then search for a slice matching the given
+ // Triple.
+ if (auto *UB = dyn_cast<object::MachOUniversalBinary>(B->getBinary())) {
+
+ const auto &TT = L.getExecutionSession().getTargetTriple();
+
+ auto SliceRange = getSliceRangeForArch(*UB, TT);
+ if (!SliceRange)
+ return SliceRange.takeError();
+
+ auto SliceBuffer = MemoryBuffer::getFileSlice(FileName, SliceRange->second,
+ SliceRange->first);
+ if (!SliceBuffer)
+ return make_error<StringError>(
+ Twine("Could not create buffer for ") + TT.str() + " slice of " +
+ FileName + ": [ " + formatv("{0:x}", SliceRange->first) + " .. " +
+ formatv("{0:x}", SliceRange->first + SliceRange->second) + ": " +
+ SliceBuffer.getError().message(),
+ SliceBuffer.getError());
+
+ return Create(L, std::move(*SliceBuffer), std::move(GetObjFileInterface));
+ }
+
+ return make_error<StringError>(Twine("Unrecognized file type for ") +
+ FileName,
+ inconvertibleErrorCode());
+}
+
+Expected<std::unique_ptr<StaticLibraryDefinitionGenerator>>
+StaticLibraryDefinitionGenerator::Create(
+ ObjectLayer &L, std::unique_ptr<MemoryBuffer> ArchiveBuffer,
+ std::unique_ptr<object::Archive> Archive,
+ GetObjectFileInterface GetObjFileInterface) {
+
+ Error Err = Error::success();
+
+ std::unique_ptr<StaticLibraryDefinitionGenerator> ADG(
+ new StaticLibraryDefinitionGenerator(
+ L, std::move(ArchiveBuffer), std::move(Archive),
+ std::move(GetObjFileInterface), Err));
+
+ if (Err)
+ return std::move(Err);
+
+ return std::move(ADG);
+}
+
+Expected<std::unique_ptr<StaticLibraryDefinitionGenerator>>
+StaticLibraryDefinitionGenerator::Create(
+ ObjectLayer &L, std::unique_ptr<MemoryBuffer> ArchiveBuffer,
+ GetObjectFileInterface GetObjFileInterface) {
+
+ auto B = object::createBinary(ArchiveBuffer->getMemBufferRef());
+ if (!B)
+ return B.takeError();
+
+ // If this is a regular archive then create an instance from it.
+ if (isa<object::Archive>(*B))
+ return Create(L, std::move(ArchiveBuffer),
+ std::unique_ptr<object::Archive>(
+ static_cast<object::Archive *>(B->release())),
+ std::move(GetObjFileInterface));
+
+ // If this is a universal binary then search for a slice matching the given
+ // Triple.
+ if (auto *UB = dyn_cast<object::MachOUniversalBinary>(B->get())) {
+
+ const auto &TT = L.getExecutionSession().getTargetTriple();
+
+ auto SliceRange = getSliceRangeForArch(*UB, TT);
+ if (!SliceRange)
+ return SliceRange.takeError();
+
+ MemoryBufferRef SliceRef(
+ StringRef(ArchiveBuffer->getBufferStart() + SliceRange->first,
+ SliceRange->second),
+ ArchiveBuffer->getBufferIdentifier());
+
+ auto Archive = object::Archive::create(SliceRef);
+ if (!Archive)
+ return Archive.takeError();
+
+ return Create(L, std::move(ArchiveBuffer), std::move(*Archive),
+ std::move(GetObjFileInterface));
+ }
+
+ return make_error<StringError>(Twine("Unrecognized file type for ") +
+ ArchiveBuffer->getBufferIdentifier(),
+ inconvertibleErrorCode());
+}
+
+Error StaticLibraryDefinitionGenerator::tryToGenerate(
+ LookupState &LS, LookupKind K, JITDylib &JD,
+ JITDylibLookupFlags JDLookupFlags, const SymbolLookupSet &Symbols) {
+ // Don't materialize symbols from static archives unless this is a static
+ // lookup.
+ if (K != LookupKind::Static)
+ return Error::success();
+
+ // Bail out early if we've already freed the archive.
+ if (!Archive)
+ return Error::success();
+
+ DenseSet<std::pair<StringRef, StringRef>> ChildBufferInfos;
+
+ for (const auto &KV : Symbols) {
+ const auto &Name = KV.first;
+ if (!ObjectFilesMap.count(Name))
+ continue;
+ auto ChildBuffer = ObjectFilesMap[Name];
+ ChildBufferInfos.insert(
+ {ChildBuffer.getBuffer(), ChildBuffer.getBufferIdentifier()});
+ }
+
+ for (auto ChildBufferInfo : ChildBufferInfos) {
+ MemoryBufferRef ChildBufferRef(ChildBufferInfo.first,
+ ChildBufferInfo.second);
+
+ auto I = GetObjFileInterface(L.getExecutionSession(), ChildBufferRef);
+ if (!I)
+ return I.takeError();
+
+ if (auto Err = L.add(JD, MemoryBuffer::getMemBuffer(ChildBufferRef, false),
+ std::move(*I)))
+ return Err;
+ }
+
+ return Error::success();
+}
+
+Error StaticLibraryDefinitionGenerator::buildObjectFilesMap() {
+ DenseMap<uint64_t, MemoryBufferRef> MemoryBuffers;
+ DenseSet<uint64_t> Visited;
+ DenseSet<uint64_t> Excluded;
+ StringSaver FileNames(ObjFileNameStorage);
+ for (auto &S : Archive->symbols()) {
+ StringRef SymName = S.getName();
+ auto Member = S.getMember();
+ if (!Member)
+ return Member.takeError();
+ auto DataOffset = Member->getDataOffset();
+ if (!Visited.count(DataOffset)) {
+ Visited.insert(DataOffset);
+ auto Child = Member->getAsBinary();
+ if (!Child)
+ return Child.takeError();
+ if ((*Child)->isCOFFImportFile()) {
+ ImportedDynamicLibraries.insert((*Child)->getFileName().str());
+ Excluded.insert(DataOffset);
+ continue;
+ }
+
+ // Give members of the archive a name that contains the archive path so
+ // that they can be differentiated from a member with the same name in a
+ // different archive. This also ensure initializer symbols names will be
+ // unique within a JITDylib.
+ StringRef FullName = FileNames.save(Archive->getFileName() + "(" +
+ (*Child)->getFileName() + ")");
+ MemoryBufferRef MemBuffer((*Child)->getMemoryBufferRef().getBuffer(),
+ FullName);
+
+ MemoryBuffers[DataOffset] = MemBuffer;
+ }
+ if (!Excluded.count(DataOffset))
+ ObjectFilesMap[L.getExecutionSession().intern(SymName)] =
+ MemoryBuffers[DataOffset];
+ }
+
+ return Error::success();
+}
+
+Expected<std::pair<size_t, size_t>>
+StaticLibraryDefinitionGenerator::getSliceRangeForArch(
+ object::MachOUniversalBinary &UB, const Triple &TT) {
+
+ for (const auto &Obj : UB.objects()) {
+ auto ObjTT = Obj.getTriple();
+ if (ObjTT.getArch() == TT.getArch() &&
+ ObjTT.getSubArch() == TT.getSubArch() &&
+ (TT.getVendor() == Triple::UnknownVendor ||
+ ObjTT.getVendor() == TT.getVendor())) {
+ // We found a match. Return the range for the slice.
+ return std::make_pair(Obj.getOffset(), Obj.getSize());
+ }
+ }
+
+ return make_error<StringError>(Twine("Universal binary ") + UB.getFileName() +
+ " does not contain a slice for " +
+ TT.str(),
+ inconvertibleErrorCode());
+}
+
+StaticLibraryDefinitionGenerator::StaticLibraryDefinitionGenerator(
+ ObjectLayer &L, std::unique_ptr<MemoryBuffer> ArchiveBuffer,
+ std::unique_ptr<object::Archive> Archive,
+ GetObjectFileInterface GetObjFileInterface, Error &Err)
+ : L(L), GetObjFileInterface(std::move(GetObjFileInterface)),
+ ArchiveBuffer(std::move(ArchiveBuffer)), Archive(std::move(Archive)) {
+ ErrorAsOutParameter _(&Err);
+ if (!this->GetObjFileInterface)
+ this->GetObjFileInterface = getObjectFileInterface;
+ if (!Err)
+ Err = buildObjectFilesMap();
+}
+
+std::unique_ptr<DLLImportDefinitionGenerator>
+DLLImportDefinitionGenerator::Create(ExecutionSession &ES,
+ ObjectLinkingLayer &L) {
+ return std::unique_ptr<DLLImportDefinitionGenerator>(
+ new DLLImportDefinitionGenerator(ES, L));
+}
+
+Error DLLImportDefinitionGenerator::tryToGenerate(
+ LookupState &LS, LookupKind K, JITDylib &JD,
+ JITDylibLookupFlags JDLookupFlags, const SymbolLookupSet &Symbols) {
+ JITDylibSearchOrder LinkOrder;
+ JD.withLinkOrderDo([&](const JITDylibSearchOrder &LO) {
+ LinkOrder.reserve(LO.size());
+ for (auto &KV : LO) {
+ if (KV.first == &JD)
+ continue;
+ LinkOrder.push_back(KV);
+ }
+ });
+
+ // FIXME: if regular symbol name start with __imp_ we have to issue lookup of
+ // both __imp_ and stripped name and use the lookup information to resolve the
+ // real symbol name.
+ SymbolLookupSet LookupSet;
+ DenseMap<StringRef, SymbolLookupFlags> ToLookUpSymbols;
+ for (auto &KV : Symbols) {
+ StringRef Deinterned = *KV.first;
+ if (Deinterned.starts_with(getImpPrefix()))
+ Deinterned = Deinterned.drop_front(StringRef(getImpPrefix()).size());
+ // Don't degrade the required state
+ if (ToLookUpSymbols.count(Deinterned) &&
+ ToLookUpSymbols[Deinterned] == SymbolLookupFlags::RequiredSymbol)
+ continue;
+ ToLookUpSymbols[Deinterned] = KV.second;
+ }
+
+ for (auto &KV : ToLookUpSymbols)
+ LookupSet.add(ES.intern(KV.first), KV.second);
+
+ auto Resolved =
+ ES.lookup(LinkOrder, LookupSet, LookupKind::DLSym, SymbolState::Resolved);
+ if (!Resolved)
+ return Resolved.takeError();
+
+ auto G = createStubsGraph(*Resolved);
+ if (!G)
+ return G.takeError();
+ return L.add(JD, std::move(*G));
+}
+
+Expected<unsigned>
+DLLImportDefinitionGenerator::getTargetPointerSize(const Triple &TT) {
+ switch (TT.getArch()) {
+ case Triple::x86_64:
+ return 8;
+ default:
+ return make_error<StringError>(
+ "architecture unsupported by DLLImportDefinitionGenerator",
+ inconvertibleErrorCode());
+ }
+}
+
+Expected<llvm::endianness>
+DLLImportDefinitionGenerator::getEndianness(const Triple &TT) {
+ switch (TT.getArch()) {
+ case Triple::x86_64:
+ return llvm::endianness::little;
+ default:
+ return make_error<StringError>(
+ "architecture unsupported by DLLImportDefinitionGenerator",
+ inconvertibleErrorCode());
+ }
+}
+
+Expected<std::unique_ptr<jitlink::LinkGraph>>
+DLLImportDefinitionGenerator::createStubsGraph(const SymbolMap &Resolved) {
+ Triple TT = ES.getTargetTriple();
+ auto PointerSize = getTargetPointerSize(TT);
+ if (!PointerSize)
+ return PointerSize.takeError();
+ auto Endianness = getEndianness(TT);
+ if (!Endianness)
+ return Endianness.takeError();
+
+ auto G = std::make_unique<jitlink::LinkGraph>(
+ "<DLLIMPORT_STUBS>", TT, *PointerSize, *Endianness,
+ jitlink::getGenericEdgeKindName);
+ jitlink::Section &Sec =
+ G->createSection(getSectionName(), MemProt::Read | MemProt::Exec);
+
+ for (auto &KV : Resolved) {
+ jitlink::Symbol &Target = G->addAbsoluteSymbol(
+ *KV.first, KV.second.getAddress(), *PointerSize,
+ jitlink::Linkage::Strong, jitlink::Scope::Local, false);
+
+ // Create __imp_ symbol
+ jitlink::Symbol &Ptr =
+ jitlink::x86_64::createAnonymousPointer(*G, Sec, &Target);
+ auto NameCopy = G->allocateContent(Twine(getImpPrefix()) + *KV.first);
+ StringRef NameCopyRef = StringRef(NameCopy.data(), NameCopy.size());
+ Ptr.setName(NameCopyRef);
+ Ptr.setLinkage(jitlink::Linkage::Strong);
+ Ptr.setScope(jitlink::Scope::Default);
+
+ // Create PLT stub
+ // FIXME: check PLT stub of data symbol is not accessed
+ jitlink::Block &StubBlock =
+ jitlink::x86_64::createPointerJumpStubBlock(*G, Sec, Ptr);
+ G->addDefinedSymbol(StubBlock, 0, *KV.first, StubBlock.getSize(),
+ jitlink::Linkage::Strong, jitlink::Scope::Default, true,
+ false);
+ }
+
+ return std::move(G);
+}
+
+} // End namespace orc.
+} // End namespace llvm.
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/ExecutorProcessControl.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/ExecutorProcessControl.cpp
new file mode 100644
index 000000000000..0df7c4f25eb8
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/ExecutorProcessControl.cpp
@@ -0,0 +1,221 @@
+//===---- ExecutorProcessControl.cpp -- Executor process control APIs -----===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/ExecutorProcessControl.h"
+
+#include "llvm/ExecutionEngine/Orc/Core.h"
+#include "llvm/ExecutionEngine/Orc/Shared/OrcRTBridge.h"
+#include "llvm/ExecutionEngine/Orc/TargetProcess/RegisterEHFrames.h"
+#include "llvm/ExecutionEngine/Orc/TargetProcess/TargetExecutionUtils.h"
+#include "llvm/Support/FormatVariadic.h"
+#include "llvm/Support/Process.h"
+#include "llvm/TargetParser/Host.h"
+
+#define DEBUG_TYPE "orc"
+
+namespace llvm {
+namespace orc {
+
+ExecutorProcessControl::MemoryAccess::~MemoryAccess() = default;
+
+ExecutorProcessControl::~ExecutorProcessControl() = default;
+
+SelfExecutorProcessControl::SelfExecutorProcessControl(
+ std::shared_ptr<SymbolStringPool> SSP, std::unique_ptr<TaskDispatcher> D,
+ Triple TargetTriple, unsigned PageSize,
+ std::unique_ptr<jitlink::JITLinkMemoryManager> MemMgr)
+ : ExecutorProcessControl(std::move(SSP), std::move(D)),
+ InProcessMemoryAccess(TargetTriple.isArch64Bit()) {
+
+ OwnedMemMgr = std::move(MemMgr);
+ if (!OwnedMemMgr)
+ OwnedMemMgr = std::make_unique<jitlink::InProcessMemoryManager>(
+ sys::Process::getPageSizeEstimate());
+
+ this->TargetTriple = std::move(TargetTriple);
+ this->PageSize = PageSize;
+ this->MemMgr = OwnedMemMgr.get();
+ this->MemAccess = this;
+ this->JDI = {ExecutorAddr::fromPtr(jitDispatchViaWrapperFunctionManager),
+ ExecutorAddr::fromPtr(this)};
+ if (this->TargetTriple.isOSBinFormatMachO())
+ GlobalManglingPrefix = '_';
+
+ this->BootstrapSymbols[rt::RegisterEHFrameSectionWrapperName] =
+ ExecutorAddr::fromPtr(&llvm_orc_registerEHFrameSectionWrapper);
+ this->BootstrapSymbols[rt::DeregisterEHFrameSectionWrapperName] =
+ ExecutorAddr::fromPtr(&llvm_orc_deregisterEHFrameSectionWrapper);
+}
+
+Expected<std::unique_ptr<SelfExecutorProcessControl>>
+SelfExecutorProcessControl::Create(
+ std::shared_ptr<SymbolStringPool> SSP,
+ std::unique_ptr<TaskDispatcher> D,
+ std::unique_ptr<jitlink::JITLinkMemoryManager> MemMgr) {
+
+ if (!SSP)
+ SSP = std::make_shared<SymbolStringPool>();
+
+ if (!D)
+ D = std::make_unique<InPlaceTaskDispatcher>();
+
+ auto PageSize = sys::Process::getPageSize();
+ if (!PageSize)
+ return PageSize.takeError();
+
+ Triple TT(sys::getProcessTriple());
+
+ return std::make_unique<SelfExecutorProcessControl>(
+ std::move(SSP), std::move(D), std::move(TT), *PageSize,
+ std::move(MemMgr));
+}
+
+Expected<tpctypes::DylibHandle>
+SelfExecutorProcessControl::loadDylib(const char *DylibPath) {
+ std::string ErrMsg;
+ auto Dylib = sys::DynamicLibrary::getPermanentLibrary(DylibPath, &ErrMsg);
+ if (!Dylib.isValid())
+ return make_error<StringError>(std::move(ErrMsg), inconvertibleErrorCode());
+ return ExecutorAddr::fromPtr(Dylib.getOSSpecificHandle());
+}
+
+void SelfExecutorProcessControl::lookupSymbolsAsync(
+ ArrayRef<LookupRequest> Request,
+ ExecutorProcessControl::SymbolLookupCompleteFn Complete) {
+ std::vector<tpctypes::LookupResult> R;
+
+ for (auto &Elem : Request) {
+ sys::DynamicLibrary Dylib(Elem.Handle.toPtr<void *>());
+ R.push_back(std::vector<ExecutorSymbolDef>());
+ for (auto &KV : Elem.Symbols) {
+ auto &Sym = KV.first;
+ std::string Tmp((*Sym).data() + !!GlobalManglingPrefix,
+ (*Sym).size() - !!GlobalManglingPrefix);
+ void *Addr = Dylib.getAddressOfSymbol(Tmp.c_str());
+ if (!Addr && KV.second == SymbolLookupFlags::RequiredSymbol) {
+ // FIXME: Collect all failing symbols before erroring out.
+ SymbolNameVector MissingSymbols;
+ MissingSymbols.push_back(Sym);
+ return Complete(
+ make_error<SymbolsNotFound>(SSP, std::move(MissingSymbols)));
+ }
+ // FIXME: determine accurate JITSymbolFlags.
+ R.back().push_back(
+ {ExecutorAddr::fromPtr(Addr), JITSymbolFlags::Exported});
+ }
+ }
+
+ Complete(std::move(R));
+}
+
+Expected<int32_t>
+SelfExecutorProcessControl::runAsMain(ExecutorAddr MainFnAddr,
+ ArrayRef<std::string> Args) {
+ using MainTy = int (*)(int, char *[]);
+ return orc::runAsMain(MainFnAddr.toPtr<MainTy>(), Args);
+}
+
+Expected<int32_t>
+SelfExecutorProcessControl::runAsVoidFunction(ExecutorAddr VoidFnAddr) {
+ using VoidTy = int (*)();
+ return orc::runAsVoidFunction(VoidFnAddr.toPtr<VoidTy>());
+}
+
+Expected<int32_t>
+SelfExecutorProcessControl::runAsIntFunction(ExecutorAddr IntFnAddr, int Arg) {
+ using IntTy = int (*)(int);
+ return orc::runAsIntFunction(IntFnAddr.toPtr<IntTy>(), Arg);
+}
+
+void SelfExecutorProcessControl::callWrapperAsync(ExecutorAddr WrapperFnAddr,
+ IncomingWFRHandler SendResult,
+ ArrayRef<char> ArgBuffer) {
+ using WrapperFnTy =
+ shared::CWrapperFunctionResult (*)(const char *Data, size_t Size);
+ auto *WrapperFn = WrapperFnAddr.toPtr<WrapperFnTy>();
+ SendResult(WrapperFn(ArgBuffer.data(), ArgBuffer.size()));
+}
+
+Error SelfExecutorProcessControl::disconnect() {
+ D->shutdown();
+ return Error::success();
+}
+
+void InProcessMemoryAccess::writeUInt8sAsync(ArrayRef<tpctypes::UInt8Write> Ws,
+ WriteResultFn OnWriteComplete) {
+ for (auto &W : Ws)
+ *W.Addr.toPtr<uint8_t *>() = W.Value;
+ OnWriteComplete(Error::success());
+}
+
+void InProcessMemoryAccess::writeUInt16sAsync(
+ ArrayRef<tpctypes::UInt16Write> Ws, WriteResultFn OnWriteComplete) {
+ for (auto &W : Ws)
+ *W.Addr.toPtr<uint16_t *>() = W.Value;
+ OnWriteComplete(Error::success());
+}
+
+void InProcessMemoryAccess::writeUInt32sAsync(
+ ArrayRef<tpctypes::UInt32Write> Ws, WriteResultFn OnWriteComplete) {
+ for (auto &W : Ws)
+ *W.Addr.toPtr<uint32_t *>() = W.Value;
+ OnWriteComplete(Error::success());
+}
+
+void InProcessMemoryAccess::writeUInt64sAsync(
+ ArrayRef<tpctypes::UInt64Write> Ws, WriteResultFn OnWriteComplete) {
+ for (auto &W : Ws)
+ *W.Addr.toPtr<uint64_t *>() = W.Value;
+ OnWriteComplete(Error::success());
+}
+
+void InProcessMemoryAccess::writeBuffersAsync(
+ ArrayRef<tpctypes::BufferWrite> Ws, WriteResultFn OnWriteComplete) {
+ for (auto &W : Ws)
+ memcpy(W.Addr.toPtr<char *>(), W.Buffer.data(), W.Buffer.size());
+ OnWriteComplete(Error::success());
+}
+
+void InProcessMemoryAccess::writePointersAsync(
+ ArrayRef<tpctypes::PointerWrite> Ws, WriteResultFn OnWriteComplete) {
+ if (IsArch64Bit) {
+ for (auto &W : Ws)
+ *W.Addr.toPtr<uint64_t *>() = W.Value.getValue();
+ } else {
+ for (auto &W : Ws)
+ *W.Addr.toPtr<uint32_t *>() = static_cast<uint32_t>(W.Value.getValue());
+ }
+
+ OnWriteComplete(Error::success());
+}
+
+shared::CWrapperFunctionResult
+SelfExecutorProcessControl::jitDispatchViaWrapperFunctionManager(
+ void *Ctx, const void *FnTag, const char *Data, size_t Size) {
+
+ LLVM_DEBUG({
+ dbgs() << "jit-dispatch call with tag " << FnTag << " and " << Size
+ << " byte payload.\n";
+ });
+
+ std::promise<shared::WrapperFunctionResult> ResultP;
+ auto ResultF = ResultP.get_future();
+ static_cast<SelfExecutorProcessControl *>(Ctx)
+ ->getExecutionSession()
+ .runJITDispatchHandler(
+ [ResultP = std::move(ResultP)](
+ shared::WrapperFunctionResult Result) mutable {
+ ResultP.set_value(std::move(Result));
+ },
+ ExecutorAddr::fromPtr(FnTag), {Data, Size});
+
+ return ResultF.get().release();
+}
+
+} // end namespace orc
+} // end namespace llvm
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/IRCompileLayer.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/IRCompileLayer.cpp
new file mode 100644
index 000000000000..69aba1fff59a
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/IRCompileLayer.cpp
@@ -0,0 +1,48 @@
+//===--------------- IRCompileLayer.cpp - IR Compiling Layer --------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/IRCompileLayer.h"
+
+namespace llvm {
+namespace orc {
+
+IRCompileLayer::IRCompiler::~IRCompiler() = default;
+
+IRCompileLayer::IRCompileLayer(ExecutionSession &ES, ObjectLayer &BaseLayer,
+ std::unique_ptr<IRCompiler> Compile)
+ : IRLayer(ES, ManglingOpts), BaseLayer(BaseLayer),
+ Compile(std::move(Compile)) {
+ ManglingOpts = &this->Compile->getManglingOptions();
+}
+
+void IRCompileLayer::setNotifyCompiled(NotifyCompiledFunction NotifyCompiled) {
+ std::lock_guard<std::mutex> Lock(IRLayerMutex);
+ this->NotifyCompiled = std::move(NotifyCompiled);
+}
+
+void IRCompileLayer::emit(std::unique_ptr<MaterializationResponsibility> R,
+ ThreadSafeModule TSM) {
+ assert(TSM && "Module must not be null");
+
+ if (auto Obj = TSM.withModuleDo(*Compile)) {
+ {
+ std::lock_guard<std::mutex> Lock(IRLayerMutex);
+ if (NotifyCompiled)
+ NotifyCompiled(*R, std::move(TSM));
+ else
+ TSM = ThreadSafeModule();
+ }
+ BaseLayer.emit(std::move(R), std::move(*Obj));
+ } else {
+ R->failMaterialization();
+ getExecutionSession().reportError(Obj.takeError());
+ }
+}
+
+} // End namespace orc.
+} // End namespace llvm.
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/IRTransformLayer.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/IRTransformLayer.cpp
new file mode 100644
index 000000000000..d5b11349277c
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/IRTransformLayer.cpp
@@ -0,0 +1,33 @@
+//===-------------- IRTransformLayer.cpp - IR Transform Layer -------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/IRTransformLayer.h"
+#include "llvm/Support/MemoryBuffer.h"
+
+namespace llvm {
+namespace orc {
+
+IRTransformLayer::IRTransformLayer(ExecutionSession &ES, IRLayer &BaseLayer,
+ TransformFunction Transform)
+ : IRLayer(ES, BaseLayer.getManglingOptions()), BaseLayer(BaseLayer),
+ Transform(std::move(Transform)) {}
+
+void IRTransformLayer::emit(std::unique_ptr<MaterializationResponsibility> R,
+ ThreadSafeModule TSM) {
+ assert(TSM && "Module must not be null");
+
+ if (auto TransformedTSM = Transform(std::move(TSM), *R))
+ BaseLayer.emit(std::move(R), std::move(*TransformedTSM));
+ else {
+ R->failMaterialization();
+ getExecutionSession().reportError(TransformedTSM.takeError());
+ }
+}
+
+} // End namespace orc.
+} // End namespace llvm.
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/IndirectionUtils.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/IndirectionUtils.cpp
new file mode 100644
index 000000000000..2fadb248b414
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/IndirectionUtils.cpp
@@ -0,0 +1,423 @@
+//===---- IndirectionUtils.cpp - Utilities for call indirection in Orc ----===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/IndirectionUtils.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ExecutionEngine/JITLink/x86_64.h"
+#include "llvm/ExecutionEngine/Orc/OrcABISupport.h"
+#include "llvm/IR/IRBuilder.h"
+#include "llvm/IR/Module.h"
+#include "llvm/MC/MCDisassembler/MCDisassembler.h"
+#include "llvm/MC/MCInstrAnalysis.h"
+#include "llvm/Support/Format.h"
+#include "llvm/TargetParser/Triple.h"
+#include "llvm/Transforms/Utils/Cloning.h"
+#include <sstream>
+
+#define DEBUG_TYPE "orc"
+
+using namespace llvm;
+using namespace llvm::orc;
+
+namespace {
+
+class CompileCallbackMaterializationUnit : public orc::MaterializationUnit {
+public:
+ using CompileFunction = JITCompileCallbackManager::CompileFunction;
+
+ CompileCallbackMaterializationUnit(SymbolStringPtr Name,
+ CompileFunction Compile)
+ : MaterializationUnit(Interface(
+ SymbolFlagsMap({{Name, JITSymbolFlags::Exported}}), nullptr)),
+ Name(std::move(Name)), Compile(std::move(Compile)) {}
+
+ StringRef getName() const override { return "<Compile Callbacks>"; }
+
+private:
+ void materialize(std::unique_ptr<MaterializationResponsibility> R) override {
+ SymbolMap Result;
+ Result[Name] = {Compile(), JITSymbolFlags::Exported};
+ // No dependencies, so these calls cannot fail.
+ cantFail(R->notifyResolved(Result));
+ cantFail(R->notifyEmitted({}));
+ }
+
+ void discard(const JITDylib &JD, const SymbolStringPtr &Name) override {
+ llvm_unreachable("Discard should never occur on a LMU?");
+ }
+
+ SymbolStringPtr Name;
+ CompileFunction Compile;
+};
+
+} // namespace
+
+namespace llvm {
+namespace orc {
+
+TrampolinePool::~TrampolinePool() = default;
+void IndirectStubsManager::anchor() {}
+
+Expected<ExecutorAddr>
+JITCompileCallbackManager::getCompileCallback(CompileFunction Compile) {
+ if (auto TrampolineAddr = TP->getTrampoline()) {
+ auto CallbackName =
+ ES.intern(std::string("cc") + std::to_string(++NextCallbackId));
+
+ std::lock_guard<std::mutex> Lock(CCMgrMutex);
+ AddrToSymbol[*TrampolineAddr] = CallbackName;
+ cantFail(
+ CallbacksJD.define(std::make_unique<CompileCallbackMaterializationUnit>(
+ std::move(CallbackName), std::move(Compile))));
+ return *TrampolineAddr;
+ } else
+ return TrampolineAddr.takeError();
+}
+
+ExecutorAddr
+JITCompileCallbackManager::executeCompileCallback(ExecutorAddr TrampolineAddr) {
+ SymbolStringPtr Name;
+
+ {
+ std::unique_lock<std::mutex> Lock(CCMgrMutex);
+ auto I = AddrToSymbol.find(TrampolineAddr);
+
+ // If this address is not associated with a compile callback then report an
+ // error to the execution session and return ErrorHandlerAddress to the
+ // callee.
+ if (I == AddrToSymbol.end()) {
+ Lock.unlock();
+ ES.reportError(
+ make_error<StringError>("No compile callback for trampoline at " +
+ formatv("{0:x}", TrampolineAddr),
+ inconvertibleErrorCode()));
+ return ErrorHandlerAddress;
+ } else
+ Name = I->second;
+ }
+
+ if (auto Sym =
+ ES.lookup(makeJITDylibSearchOrder(
+ &CallbacksJD, JITDylibLookupFlags::MatchAllSymbols),
+ Name))
+ return Sym->getAddress();
+ else {
+ llvm::dbgs() << "Didn't find callback.\n";
+ // If anything goes wrong materializing Sym then report it to the session
+ // and return the ErrorHandlerAddress;
+ ES.reportError(Sym.takeError());
+ return ErrorHandlerAddress;
+ }
+}
+
+Expected<std::unique_ptr<JITCompileCallbackManager>>
+createLocalCompileCallbackManager(const Triple &T, ExecutionSession &ES,
+ ExecutorAddr ErrorHandlerAddress) {
+ switch (T.getArch()) {
+ default:
+ return make_error<StringError>(
+ std::string("No callback manager available for ") + T.str(),
+ inconvertibleErrorCode());
+ case Triple::aarch64:
+ case Triple::aarch64_32: {
+ typedef orc::LocalJITCompileCallbackManager<orc::OrcAArch64> CCMgrT;
+ return CCMgrT::Create(ES, ErrorHandlerAddress);
+ }
+
+ case Triple::x86: {
+ typedef orc::LocalJITCompileCallbackManager<orc::OrcI386> CCMgrT;
+ return CCMgrT::Create(ES, ErrorHandlerAddress);
+ }
+
+ case Triple::loongarch64: {
+ typedef orc::LocalJITCompileCallbackManager<orc::OrcLoongArch64> CCMgrT;
+ return CCMgrT::Create(ES, ErrorHandlerAddress);
+ }
+
+ case Triple::mips: {
+ typedef orc::LocalJITCompileCallbackManager<orc::OrcMips32Be> CCMgrT;
+ return CCMgrT::Create(ES, ErrorHandlerAddress);
+ }
+ case Triple::mipsel: {
+ typedef orc::LocalJITCompileCallbackManager<orc::OrcMips32Le> CCMgrT;
+ return CCMgrT::Create(ES, ErrorHandlerAddress);
+ }
+
+ case Triple::mips64:
+ case Triple::mips64el: {
+ typedef orc::LocalJITCompileCallbackManager<orc::OrcMips64> CCMgrT;
+ return CCMgrT::Create(ES, ErrorHandlerAddress);
+ }
+
+ case Triple::riscv64: {
+ typedef orc::LocalJITCompileCallbackManager<orc::OrcRiscv64> CCMgrT;
+ return CCMgrT::Create(ES, ErrorHandlerAddress);
+ }
+
+ case Triple::x86_64: {
+ if (T.getOS() == Triple::OSType::Win32) {
+ typedef orc::LocalJITCompileCallbackManager<orc::OrcX86_64_Win32> CCMgrT;
+ return CCMgrT::Create(ES, ErrorHandlerAddress);
+ } else {
+ typedef orc::LocalJITCompileCallbackManager<orc::OrcX86_64_SysV> CCMgrT;
+ return CCMgrT::Create(ES, ErrorHandlerAddress);
+ }
+ }
+
+ }
+}
+
+std::function<std::unique_ptr<IndirectStubsManager>()>
+createLocalIndirectStubsManagerBuilder(const Triple &T) {
+ switch (T.getArch()) {
+ default:
+ return [](){
+ return std::make_unique<
+ orc::LocalIndirectStubsManager<orc::OrcGenericABI>>();
+ };
+
+ case Triple::aarch64:
+ case Triple::aarch64_32:
+ return [](){
+ return std::make_unique<
+ orc::LocalIndirectStubsManager<orc::OrcAArch64>>();
+ };
+
+ case Triple::x86:
+ return [](){
+ return std::make_unique<
+ orc::LocalIndirectStubsManager<orc::OrcI386>>();
+ };
+
+ case Triple::loongarch64:
+ return []() {
+ return std::make_unique<
+ orc::LocalIndirectStubsManager<orc::OrcLoongArch64>>();
+ };
+
+ case Triple::mips:
+ return [](){
+ return std::make_unique<
+ orc::LocalIndirectStubsManager<orc::OrcMips32Be>>();
+ };
+
+ case Triple::mipsel:
+ return [](){
+ return std::make_unique<
+ orc::LocalIndirectStubsManager<orc::OrcMips32Le>>();
+ };
+
+ case Triple::mips64:
+ case Triple::mips64el:
+ return [](){
+ return std::make_unique<
+ orc::LocalIndirectStubsManager<orc::OrcMips64>>();
+ };
+
+ case Triple::riscv64:
+ return []() {
+ return std::make_unique<
+ orc::LocalIndirectStubsManager<orc::OrcRiscv64>>();
+ };
+
+ case Triple::x86_64:
+ if (T.getOS() == Triple::OSType::Win32) {
+ return [](){
+ return std::make_unique<
+ orc::LocalIndirectStubsManager<orc::OrcX86_64_Win32>>();
+ };
+ } else {
+ return [](){
+ return std::make_unique<
+ orc::LocalIndirectStubsManager<orc::OrcX86_64_SysV>>();
+ };
+ }
+
+ }
+}
+
+Constant* createIRTypedAddress(FunctionType &FT, ExecutorAddr Addr) {
+ Constant *AddrIntVal =
+ ConstantInt::get(Type::getInt64Ty(FT.getContext()), Addr.getValue());
+ Constant *AddrPtrVal =
+ ConstantExpr::getIntToPtr(AddrIntVal, PointerType::get(&FT, 0));
+ return AddrPtrVal;
+}
+
+GlobalVariable* createImplPointer(PointerType &PT, Module &M,
+ const Twine &Name, Constant *Initializer) {
+ auto IP = new GlobalVariable(M, &PT, false, GlobalValue::ExternalLinkage,
+ Initializer, Name, nullptr,
+ GlobalValue::NotThreadLocal, 0, true);
+ IP->setVisibility(GlobalValue::HiddenVisibility);
+ return IP;
+}
+
+void makeStub(Function &F, Value &ImplPointer) {
+ assert(F.isDeclaration() && "Can't turn a definition into a stub.");
+ assert(F.getParent() && "Function isn't in a module.");
+ Module &M = *F.getParent();
+ BasicBlock *EntryBlock = BasicBlock::Create(M.getContext(), "entry", &F);
+ IRBuilder<> Builder(EntryBlock);
+ LoadInst *ImplAddr = Builder.CreateLoad(F.getType(), &ImplPointer);
+ std::vector<Value*> CallArgs;
+ for (auto &A : F.args())
+ CallArgs.push_back(&A);
+ CallInst *Call = Builder.CreateCall(F.getFunctionType(), ImplAddr, CallArgs);
+ Call->setTailCall();
+ Call->setAttributes(F.getAttributes());
+ if (F.getReturnType()->isVoidTy())
+ Builder.CreateRetVoid();
+ else
+ Builder.CreateRet(Call);
+}
+
+std::vector<GlobalValue *> SymbolLinkagePromoter::operator()(Module &M) {
+ std::vector<GlobalValue *> PromotedGlobals;
+
+ for (auto &GV : M.global_values()) {
+ bool Promoted = true;
+
+ // Rename if necessary.
+ if (!GV.hasName())
+ GV.setName("__orc_anon." + Twine(NextId++));
+ else if (GV.getName().starts_with("\01L"))
+ GV.setName("__" + GV.getName().substr(1) + "." + Twine(NextId++));
+ else if (GV.hasLocalLinkage())
+ GV.setName("__orc_lcl." + GV.getName() + "." + Twine(NextId++));
+ else
+ Promoted = false;
+
+ if (GV.hasLocalLinkage()) {
+ GV.setLinkage(GlobalValue::ExternalLinkage);
+ GV.setVisibility(GlobalValue::HiddenVisibility);
+ Promoted = true;
+ }
+ GV.setUnnamedAddr(GlobalValue::UnnamedAddr::None);
+
+ if (Promoted)
+ PromotedGlobals.push_back(&GV);
+ }
+
+ return PromotedGlobals;
+}
+
+Function* cloneFunctionDecl(Module &Dst, const Function &F,
+ ValueToValueMapTy *VMap) {
+ Function *NewF =
+ Function::Create(cast<FunctionType>(F.getValueType()),
+ F.getLinkage(), F.getName(), &Dst);
+ NewF->copyAttributesFrom(&F);
+
+ if (VMap) {
+ (*VMap)[&F] = NewF;
+ auto NewArgI = NewF->arg_begin();
+ for (auto ArgI = F.arg_begin(), ArgE = F.arg_end(); ArgI != ArgE;
+ ++ArgI, ++NewArgI)
+ (*VMap)[&*ArgI] = &*NewArgI;
+ }
+
+ return NewF;
+}
+
+GlobalVariable* cloneGlobalVariableDecl(Module &Dst, const GlobalVariable &GV,
+ ValueToValueMapTy *VMap) {
+ GlobalVariable *NewGV = new GlobalVariable(
+ Dst, GV.getValueType(), GV.isConstant(),
+ GV.getLinkage(), nullptr, GV.getName(), nullptr,
+ GV.getThreadLocalMode(), GV.getType()->getAddressSpace());
+ NewGV->copyAttributesFrom(&GV);
+ if (VMap)
+ (*VMap)[&GV] = NewGV;
+ return NewGV;
+}
+
+GlobalAlias* cloneGlobalAliasDecl(Module &Dst, const GlobalAlias &OrigA,
+ ValueToValueMapTy &VMap) {
+ assert(OrigA.getAliasee() && "Original alias doesn't have an aliasee?");
+ auto *NewA = GlobalAlias::create(OrigA.getValueType(),
+ OrigA.getType()->getPointerAddressSpace(),
+ OrigA.getLinkage(), OrigA.getName(), &Dst);
+ NewA->copyAttributesFrom(&OrigA);
+ VMap[&OrigA] = NewA;
+ return NewA;
+}
+
+Error addFunctionPointerRelocationsToCurrentSymbol(jitlink::Symbol &Sym,
+ jitlink::LinkGraph &G,
+ MCDisassembler &Disassembler,
+ MCInstrAnalysis &MIA) {
+ // AArch64 appears to already come with the necessary relocations. Among other
+ // architectures, only x86_64 is currently implemented here.
+ if (G.getTargetTriple().getArch() != Triple::x86_64)
+ return Error::success();
+
+ raw_null_ostream CommentStream;
+ auto &STI = Disassembler.getSubtargetInfo();
+
+ // Determine the function bounds
+ auto &B = Sym.getBlock();
+ assert(!B.isZeroFill() && "expected content block");
+ auto SymAddress = Sym.getAddress();
+ auto SymStartInBlock =
+ (const uint8_t *)B.getContent().data() + Sym.getOffset();
+ auto SymSize = Sym.getSize() ? Sym.getSize() : B.getSize() - Sym.getOffset();
+ auto Content = ArrayRef(SymStartInBlock, SymSize);
+
+ LLVM_DEBUG(dbgs() << "Adding self-relocations to " << Sym.getName() << "\n");
+
+ SmallDenseSet<uintptr_t, 8> ExistingRelocations;
+ for (auto &E : B.edges()) {
+ if (E.isRelocation())
+ ExistingRelocations.insert(E.getOffset());
+ }
+
+ size_t I = 0;
+ while (I < Content.size()) {
+ MCInst Instr;
+ uint64_t InstrSize = 0;
+ uint64_t InstrStart = SymAddress.getValue() + I;
+ auto DecodeStatus = Disassembler.getInstruction(
+ Instr, InstrSize, Content.drop_front(I), InstrStart, CommentStream);
+ if (DecodeStatus != MCDisassembler::Success) {
+ LLVM_DEBUG(dbgs() << "Aborting due to disassembly failure at address "
+ << InstrStart);
+ return make_error<StringError>(
+ formatv("failed to disassemble at address {0:x16}", InstrStart),
+ inconvertibleErrorCode());
+ }
+ // Advance to the next instruction.
+ I += InstrSize;
+
+ // Check for a PC-relative address equal to the symbol itself.
+ auto PCRelAddr =
+ MIA.evaluateMemoryOperandAddress(Instr, &STI, InstrStart, InstrSize);
+ if (!PCRelAddr || *PCRelAddr != SymAddress.getValue())
+ continue;
+
+ auto RelocOffInInstr =
+ MIA.getMemoryOperandRelocationOffset(Instr, InstrSize);
+ if (!RelocOffInInstr || InstrSize - *RelocOffInInstr != 4) {
+ LLVM_DEBUG(dbgs() << "Skipping unknown self-relocation at "
+ << InstrStart);
+ continue;
+ }
+
+ auto RelocOffInBlock = orc::ExecutorAddr(InstrStart) + *RelocOffInInstr -
+ SymAddress + Sym.getOffset();
+ if (ExistingRelocations.contains(RelocOffInBlock))
+ continue;
+
+ LLVM_DEBUG(dbgs() << "Adding delta32 self-relocation at " << InstrStart);
+ B.addEdge(jitlink::x86_64::Delta32, RelocOffInBlock, Sym, /*Addend=*/-4);
+ }
+ return Error::success();
+}
+
+} // End namespace orc.
+} // End namespace llvm.
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/JITTargetMachineBuilder.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/JITTargetMachineBuilder.cpp
new file mode 100644
index 000000000000..8d4e79c7d8af
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/JITTargetMachineBuilder.cpp
@@ -0,0 +1,147 @@
+//===----- JITTargetMachineBuilder.cpp - Build TargetMachines for JIT -----===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/JITTargetMachineBuilder.h"
+
+#include "llvm/ADT/StringMap.h"
+#include "llvm/MC/TargetRegistry.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/TargetParser/Host.h"
+
+namespace llvm {
+namespace orc {
+
+JITTargetMachineBuilder::JITTargetMachineBuilder(Triple TT)
+ : TT(std::move(TT)) {
+ Options.EmulatedTLS = true;
+ Options.UseInitArray = true;
+}
+
+Expected<JITTargetMachineBuilder> JITTargetMachineBuilder::detectHost() {
+ JITTargetMachineBuilder TMBuilder((Triple(sys::getProcessTriple())));
+
+ // Retrieve host CPU name and sub-target features and add them to builder.
+ // Relocation model, code model and codegen opt level are kept to default
+ // values.
+ for (const auto &Feature : llvm::sys::getHostCPUFeatures())
+ TMBuilder.getFeatures().AddFeature(Feature.first(), Feature.second);
+
+ TMBuilder.setCPU(std::string(llvm::sys::getHostCPUName()));
+
+ return TMBuilder;
+}
+
+Expected<std::unique_ptr<TargetMachine>>
+JITTargetMachineBuilder::createTargetMachine() {
+
+ std::string ErrMsg;
+ auto *TheTarget = TargetRegistry::lookupTarget(TT.getTriple(), ErrMsg);
+ if (!TheTarget)
+ return make_error<StringError>(std::move(ErrMsg), inconvertibleErrorCode());
+
+ if (!TheTarget->hasJIT())
+ return make_error<StringError>("Target has no JIT support",
+ inconvertibleErrorCode());
+
+ auto *TM =
+ TheTarget->createTargetMachine(TT.getTriple(), CPU, Features.getString(),
+ Options, RM, CM, OptLevel, /*JIT*/ true);
+ if (!TM)
+ return make_error<StringError>("Could not allocate target machine",
+ inconvertibleErrorCode());
+
+ return std::unique_ptr<TargetMachine>(TM);
+}
+
+JITTargetMachineBuilder &JITTargetMachineBuilder::addFeatures(
+ const std::vector<std::string> &FeatureVec) {
+ for (const auto &F : FeatureVec)
+ Features.AddFeature(F);
+ return *this;
+}
+
+#ifndef NDEBUG
+void JITTargetMachineBuilderPrinter::print(raw_ostream &OS) const {
+ OS << Indent << "{\n"
+ << Indent << " Triple = \"" << JTMB.TT.str() << "\"\n"
+ << Indent << " CPU = \"" << JTMB.CPU << "\"\n"
+ << Indent << " Features = \"" << JTMB.Features.getString() << "\"\n"
+ << Indent << " Options = <not-printable>\n"
+ << Indent << " Relocation Model = ";
+
+ if (JTMB.RM) {
+ switch (*JTMB.RM) {
+ case Reloc::Static:
+ OS << "Static";
+ break;
+ case Reloc::PIC_:
+ OS << "PIC_";
+ break;
+ case Reloc::DynamicNoPIC:
+ OS << "DynamicNoPIC";
+ break;
+ case Reloc::ROPI:
+ OS << "ROPI";
+ break;
+ case Reloc::RWPI:
+ OS << "RWPI";
+ break;
+ case Reloc::ROPI_RWPI:
+ OS << "ROPI_RWPI";
+ break;
+ }
+ } else
+ OS << "unspecified (will use target default)";
+
+ OS << "\n"
+ << Indent << " Code Model = ";
+
+ if (JTMB.CM) {
+ switch (*JTMB.CM) {
+ case CodeModel::Tiny:
+ OS << "Tiny";
+ break;
+ case CodeModel::Small:
+ OS << "Small";
+ break;
+ case CodeModel::Kernel:
+ OS << "Kernel";
+ break;
+ case CodeModel::Medium:
+ OS << "Medium";
+ break;
+ case CodeModel::Large:
+ OS << "Large";
+ break;
+ }
+ } else
+ OS << "unspecified (will use target default)";
+
+ OS << "\n"
+ << Indent << " Optimization Level = ";
+ switch (JTMB.OptLevel) {
+ case CodeGenOptLevel::None:
+ OS << "None";
+ break;
+ case CodeGenOptLevel::Less:
+ OS << "Less";
+ break;
+ case CodeGenOptLevel::Default:
+ OS << "Default";
+ break;
+ case CodeGenOptLevel::Aggressive:
+ OS << "Aggressive";
+ break;
+ }
+
+ OS << "\n" << Indent << "}\n";
+}
+#endif // NDEBUG
+
+} // End namespace orc.
+} // End namespace llvm.
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/LLJIT.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/LLJIT.cpp
new file mode 100644
index 000000000000..c053ef51411c
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/LLJIT.cpp
@@ -0,0 +1,1288 @@
+//===--------- LLJIT.cpp - An ORC-based JIT for compiling LLVM IR ---------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/LLJIT.h"
+#include "llvm/ExecutionEngine/JITLink/EHFrameSupport.h"
+#include "llvm/ExecutionEngine/JITLink/JITLinkMemoryManager.h"
+#include "llvm/ExecutionEngine/Orc/COFFPlatform.h"
+#include "llvm/ExecutionEngine/Orc/ELFNixPlatform.h"
+#include "llvm/ExecutionEngine/Orc/EPCDynamicLibrarySearchGenerator.h"
+#include "llvm/ExecutionEngine/Orc/EPCEHFrameRegistrar.h"
+#include "llvm/ExecutionEngine/Orc/ExecutorProcessControl.h"
+#include "llvm/ExecutionEngine/Orc/MachOPlatform.h"
+#include "llvm/ExecutionEngine/Orc/ObjectLinkingLayer.h"
+#include "llvm/ExecutionEngine/Orc/ObjectTransformLayer.h"
+#include "llvm/ExecutionEngine/Orc/RTDyldObjectLinkingLayer.h"
+#include "llvm/ExecutionEngine/Orc/Shared/OrcError.h"
+#include "llvm/ExecutionEngine/Orc/TargetProcess/RegisterEHFrames.h"
+#include "llvm/ExecutionEngine/SectionMemoryManager.h"
+#include "llvm/IR/GlobalVariable.h"
+#include "llvm/IR/IRBuilder.h"
+#include "llvm/IR/Mangler.h"
+#include "llvm/IR/Module.h"
+#include "llvm/Support/DynamicLibrary.h"
+
+#define DEBUG_TYPE "orc"
+
+using namespace llvm;
+using namespace llvm::orc;
+
+namespace {
+
+/// Adds helper function decls and wrapper functions that call the helper with
+/// some additional prefix arguments.
+///
+/// E.g. For wrapper "foo" with type i8(i8, i64), helper "bar", and prefix
+/// args i32 4 and i16 12345, this function will add:
+///
+/// declare i8 @bar(i32, i16, i8, i64)
+///
+/// define i8 @foo(i8, i64) {
+/// entry:
+/// %2 = call i8 @bar(i32 4, i16 12345, i8 %0, i64 %1)
+/// ret i8 %2
+/// }
+///
+Function *addHelperAndWrapper(Module &M, StringRef WrapperName,
+ FunctionType *WrapperFnType,
+ GlobalValue::VisibilityTypes WrapperVisibility,
+ StringRef HelperName,
+ ArrayRef<Value *> HelperPrefixArgs) {
+ std::vector<Type *> HelperArgTypes;
+ for (auto *Arg : HelperPrefixArgs)
+ HelperArgTypes.push_back(Arg->getType());
+ for (auto *T : WrapperFnType->params())
+ HelperArgTypes.push_back(T);
+ auto *HelperFnType =
+ FunctionType::get(WrapperFnType->getReturnType(), HelperArgTypes, false);
+ auto *HelperFn = Function::Create(HelperFnType, GlobalValue::ExternalLinkage,
+ HelperName, M);
+
+ auto *WrapperFn = Function::Create(
+ WrapperFnType, GlobalValue::ExternalLinkage, WrapperName, M);
+ WrapperFn->setVisibility(WrapperVisibility);
+
+ auto *EntryBlock = BasicBlock::Create(M.getContext(), "entry", WrapperFn);
+ IRBuilder<> IB(EntryBlock);
+
+ std::vector<Value *> HelperArgs;
+ for (auto *Arg : HelperPrefixArgs)
+ HelperArgs.push_back(Arg);
+ for (auto &Arg : WrapperFn->args())
+ HelperArgs.push_back(&Arg);
+ auto *HelperResult = IB.CreateCall(HelperFn, HelperArgs);
+ if (HelperFn->getReturnType()->isVoidTy())
+ IB.CreateRetVoid();
+ else
+ IB.CreateRet(HelperResult);
+
+ return WrapperFn;
+}
+
+class GenericLLVMIRPlatformSupport;
+
+/// orc::Platform component of Generic LLVM IR Platform support.
+/// Just forwards calls to the GenericLLVMIRPlatformSupport class below.
+class GenericLLVMIRPlatform : public Platform {
+public:
+ GenericLLVMIRPlatform(GenericLLVMIRPlatformSupport &S) : S(S) {}
+ Error setupJITDylib(JITDylib &JD) override;
+ Error teardownJITDylib(JITDylib &JD) override;
+ Error notifyAdding(ResourceTracker &RT,
+ const MaterializationUnit &MU) override;
+ Error notifyRemoving(ResourceTracker &RT) override {
+ // Noop -- Nothing to do (yet).
+ return Error::success();
+ }
+
+private:
+ GenericLLVMIRPlatformSupport &S;
+};
+
+/// This transform parses llvm.global_ctors to produce a single initialization
+/// function for the module, records the function, then deletes
+/// llvm.global_ctors.
+class GlobalCtorDtorScraper {
+public:
+ GlobalCtorDtorScraper(GenericLLVMIRPlatformSupport &PS,
+ StringRef InitFunctionPrefix,
+ StringRef DeInitFunctionPrefix)
+ : PS(PS), InitFunctionPrefix(InitFunctionPrefix),
+ DeInitFunctionPrefix(DeInitFunctionPrefix) {}
+ Expected<ThreadSafeModule> operator()(ThreadSafeModule TSM,
+ MaterializationResponsibility &R);
+
+private:
+ GenericLLVMIRPlatformSupport &PS;
+ StringRef InitFunctionPrefix;
+ StringRef DeInitFunctionPrefix;
+};
+
+/// Generic IR Platform Support
+///
+/// Scrapes llvm.global_ctors and llvm.global_dtors and replaces them with
+/// specially named 'init' and 'deinit'. Injects definitions / interposes for
+/// some runtime API, including __cxa_atexit, dlopen, and dlclose.
+class GenericLLVMIRPlatformSupport : public LLJIT::PlatformSupport {
+public:
+ GenericLLVMIRPlatformSupport(LLJIT &J, JITDylib &PlatformJD)
+ : J(J), InitFunctionPrefix(J.mangle("__orc_init_func.")),
+ DeInitFunctionPrefix(J.mangle("__orc_deinit_func.")) {
+
+ getExecutionSession().setPlatform(
+ std::make_unique<GenericLLVMIRPlatform>(*this));
+
+ setInitTransform(J, GlobalCtorDtorScraper(*this, InitFunctionPrefix,
+ DeInitFunctionPrefix));
+
+ SymbolMap StdInterposes;
+
+ StdInterposes[J.mangleAndIntern("__lljit.platform_support_instance")] = {
+ ExecutorAddr::fromPtr(this), JITSymbolFlags::Exported};
+ StdInterposes[J.mangleAndIntern("__lljit.cxa_atexit_helper")] = {
+ ExecutorAddr::fromPtr(registerCxaAtExitHelper), JITSymbolFlags()};
+
+ cantFail(PlatformJD.define(absoluteSymbols(std::move(StdInterposes))));
+ cantFail(setupJITDylib(PlatformJD));
+ cantFail(J.addIRModule(PlatformJD, createPlatformRuntimeModule()));
+ }
+
+ ExecutionSession &getExecutionSession() { return J.getExecutionSession(); }
+
+ /// Adds a module that defines the __dso_handle global.
+ Error setupJITDylib(JITDylib &JD) {
+
+ // Add per-jitdylib standard interposes.
+ SymbolMap PerJDInterposes;
+ PerJDInterposes[J.mangleAndIntern("__lljit.run_atexits_helper")] = {
+ ExecutorAddr::fromPtr(runAtExitsHelper), JITSymbolFlags()};
+ PerJDInterposes[J.mangleAndIntern("__lljit.atexit_helper")] = {
+ ExecutorAddr::fromPtr(registerAtExitHelper), JITSymbolFlags()};
+ cantFail(JD.define(absoluteSymbols(std::move(PerJDInterposes))));
+
+ auto Ctx = std::make_unique<LLVMContext>();
+ auto M = std::make_unique<Module>("__standard_lib", *Ctx);
+ M->setDataLayout(J.getDataLayout());
+
+ auto *Int64Ty = Type::getInt64Ty(*Ctx);
+ auto *DSOHandle = new GlobalVariable(
+ *M, Int64Ty, true, GlobalValue::ExternalLinkage,
+ ConstantInt::get(Int64Ty, reinterpret_cast<uintptr_t>(&JD)),
+ "__dso_handle");
+ DSOHandle->setVisibility(GlobalValue::DefaultVisibility);
+ DSOHandle->setInitializer(
+ ConstantInt::get(Int64Ty, ExecutorAddr::fromPtr(&JD).getValue()));
+
+ auto *GenericIRPlatformSupportTy =
+ StructType::create(*Ctx, "lljit.GenericLLJITIRPlatformSupport");
+
+ auto *PlatformInstanceDecl = new GlobalVariable(
+ *M, GenericIRPlatformSupportTy, true, GlobalValue::ExternalLinkage,
+ nullptr, "__lljit.platform_support_instance");
+
+ auto *VoidTy = Type::getVoidTy(*Ctx);
+ addHelperAndWrapper(
+ *M, "__lljit_run_atexits", FunctionType::get(VoidTy, {}, false),
+ GlobalValue::HiddenVisibility, "__lljit.run_atexits_helper",
+ {PlatformInstanceDecl, DSOHandle});
+
+ auto *IntTy = Type::getIntNTy(*Ctx, sizeof(int) * CHAR_BIT);
+ auto *AtExitCallbackTy = FunctionType::get(VoidTy, {}, false);
+ auto *AtExitCallbackPtrTy = PointerType::getUnqual(AtExitCallbackTy);
+ addHelperAndWrapper(*M, "atexit",
+ FunctionType::get(IntTy, {AtExitCallbackPtrTy}, false),
+ GlobalValue::HiddenVisibility, "__lljit.atexit_helper",
+ {PlatformInstanceDecl, DSOHandle});
+
+ return J.addIRModule(JD, ThreadSafeModule(std::move(M), std::move(Ctx)));
+ }
+
+ Error notifyAdding(ResourceTracker &RT, const MaterializationUnit &MU) {
+ auto &JD = RT.getJITDylib();
+ if (auto &InitSym = MU.getInitializerSymbol())
+ InitSymbols[&JD].add(InitSym, SymbolLookupFlags::WeaklyReferencedSymbol);
+ else {
+ // If there's no identified init symbol attached, but there is a symbol
+ // with the GenericIRPlatform::InitFunctionPrefix, then treat that as
+ // an init function. Add the symbol to both the InitSymbols map (which
+ // will trigger a lookup to materialize the module) and the InitFunctions
+ // map (which holds the names of the symbols to execute).
+ for (auto &KV : MU.getSymbols())
+ if ((*KV.first).starts_with(InitFunctionPrefix)) {
+ InitSymbols[&JD].add(KV.first,
+ SymbolLookupFlags::WeaklyReferencedSymbol);
+ InitFunctions[&JD].add(KV.first);
+ } else if ((*KV.first).starts_with(DeInitFunctionPrefix)) {
+ DeInitFunctions[&JD].add(KV.first);
+ }
+ }
+ return Error::success();
+ }
+
+ Error initialize(JITDylib &JD) override {
+ LLVM_DEBUG({
+ dbgs() << "GenericLLVMIRPlatformSupport getting initializers to run\n";
+ });
+ if (auto Initializers = getInitializers(JD)) {
+ LLVM_DEBUG(
+ { dbgs() << "GenericLLVMIRPlatformSupport running initializers\n"; });
+ for (auto InitFnAddr : *Initializers) {
+ LLVM_DEBUG({
+ dbgs() << " Running init " << formatv("{0:x16}", InitFnAddr)
+ << "...\n";
+ });
+ auto *InitFn = InitFnAddr.toPtr<void (*)()>();
+ InitFn();
+ }
+ } else
+ return Initializers.takeError();
+ return Error::success();
+ }
+
+ Error deinitialize(JITDylib &JD) override {
+ LLVM_DEBUG({
+ dbgs() << "GenericLLVMIRPlatformSupport getting deinitializers to run\n";
+ });
+ if (auto Deinitializers = getDeinitializers(JD)) {
+ LLVM_DEBUG({
+ dbgs() << "GenericLLVMIRPlatformSupport running deinitializers\n";
+ });
+ for (auto DeinitFnAddr : *Deinitializers) {
+ LLVM_DEBUG({
+ dbgs() << " Running deinit " << formatv("{0:x16}", DeinitFnAddr)
+ << "...\n";
+ });
+ auto *DeinitFn = DeinitFnAddr.toPtr<void (*)()>();
+ DeinitFn();
+ }
+ } else
+ return Deinitializers.takeError();
+
+ return Error::success();
+ }
+
+ void registerInitFunc(JITDylib &JD, SymbolStringPtr InitName) {
+ getExecutionSession().runSessionLocked([&]() {
+ InitFunctions[&JD].add(InitName);
+ });
+ }
+
+ void registerDeInitFunc(JITDylib &JD, SymbolStringPtr DeInitName) {
+ getExecutionSession().runSessionLocked(
+ [&]() { DeInitFunctions[&JD].add(DeInitName); });
+ }
+
+private:
+ Expected<std::vector<ExecutorAddr>> getInitializers(JITDylib &JD) {
+ if (auto Err = issueInitLookups(JD))
+ return std::move(Err);
+
+ DenseMap<JITDylib *, SymbolLookupSet> LookupSymbols;
+ std::vector<JITDylibSP> DFSLinkOrder;
+
+ if (auto Err = getExecutionSession().runSessionLocked([&]() -> Error {
+ if (auto DFSLinkOrderOrErr = JD.getDFSLinkOrder())
+ DFSLinkOrder = std::move(*DFSLinkOrderOrErr);
+ else
+ return DFSLinkOrderOrErr.takeError();
+
+ for (auto &NextJD : DFSLinkOrder) {
+ auto IFItr = InitFunctions.find(NextJD.get());
+ if (IFItr != InitFunctions.end()) {
+ LookupSymbols[NextJD.get()] = std::move(IFItr->second);
+ InitFunctions.erase(IFItr);
+ }
+ }
+ return Error::success();
+ }))
+ return std::move(Err);
+
+ LLVM_DEBUG({
+ dbgs() << "JITDylib init order is [ ";
+ for (auto &JD : llvm::reverse(DFSLinkOrder))
+ dbgs() << "\"" << JD->getName() << "\" ";
+ dbgs() << "]\n";
+ dbgs() << "Looking up init functions:\n";
+ for (auto &KV : LookupSymbols)
+ dbgs() << " \"" << KV.first->getName() << "\": " << KV.second << "\n";
+ });
+
+ auto &ES = getExecutionSession();
+ auto LookupResult = Platform::lookupInitSymbols(ES, LookupSymbols);
+
+ if (!LookupResult)
+ return LookupResult.takeError();
+
+ std::vector<ExecutorAddr> Initializers;
+ while (!DFSLinkOrder.empty()) {
+ auto &NextJD = *DFSLinkOrder.back();
+ DFSLinkOrder.pop_back();
+ auto InitsItr = LookupResult->find(&NextJD);
+ if (InitsItr == LookupResult->end())
+ continue;
+ for (auto &KV : InitsItr->second)
+ Initializers.push_back(KV.second.getAddress());
+ }
+
+ return Initializers;
+ }
+
+ Expected<std::vector<ExecutorAddr>> getDeinitializers(JITDylib &JD) {
+ auto &ES = getExecutionSession();
+
+ auto LLJITRunAtExits = J.mangleAndIntern("__lljit_run_atexits");
+
+ DenseMap<JITDylib *, SymbolLookupSet> LookupSymbols;
+ std::vector<JITDylibSP> DFSLinkOrder;
+
+ if (auto Err = ES.runSessionLocked([&]() -> Error {
+ if (auto DFSLinkOrderOrErr = JD.getDFSLinkOrder())
+ DFSLinkOrder = std::move(*DFSLinkOrderOrErr);
+ else
+ return DFSLinkOrderOrErr.takeError();
+
+ for (auto &NextJD : DFSLinkOrder) {
+ auto &JDLookupSymbols = LookupSymbols[NextJD.get()];
+ auto DIFItr = DeInitFunctions.find(NextJD.get());
+ if (DIFItr != DeInitFunctions.end()) {
+ LookupSymbols[NextJD.get()] = std::move(DIFItr->second);
+ DeInitFunctions.erase(DIFItr);
+ }
+ JDLookupSymbols.add(LLJITRunAtExits,
+ SymbolLookupFlags::WeaklyReferencedSymbol);
+ }
+ return Error::success();
+ }))
+ return std::move(Err);
+
+ LLVM_DEBUG({
+ dbgs() << "JITDylib deinit order is [ ";
+ for (auto &JD : DFSLinkOrder)
+ dbgs() << "\"" << JD->getName() << "\" ";
+ dbgs() << "]\n";
+ dbgs() << "Looking up deinit functions:\n";
+ for (auto &KV : LookupSymbols)
+ dbgs() << " \"" << KV.first->getName() << "\": " << KV.second << "\n";
+ });
+
+ auto LookupResult = Platform::lookupInitSymbols(ES, LookupSymbols);
+
+ if (!LookupResult)
+ return LookupResult.takeError();
+
+ std::vector<ExecutorAddr> DeInitializers;
+ for (auto &NextJD : DFSLinkOrder) {
+ auto DeInitsItr = LookupResult->find(NextJD.get());
+ assert(DeInitsItr != LookupResult->end() &&
+ "Every JD should have at least __lljit_run_atexits");
+
+ auto RunAtExitsItr = DeInitsItr->second.find(LLJITRunAtExits);
+ if (RunAtExitsItr != DeInitsItr->second.end())
+ DeInitializers.push_back(RunAtExitsItr->second.getAddress());
+
+ for (auto &KV : DeInitsItr->second)
+ if (KV.first != LLJITRunAtExits)
+ DeInitializers.push_back(KV.second.getAddress());
+ }
+
+ return DeInitializers;
+ }
+
+ /// Issue lookups for all init symbols required to initialize JD (and any
+ /// JITDylibs that it depends on).
+ Error issueInitLookups(JITDylib &JD) {
+ DenseMap<JITDylib *, SymbolLookupSet> RequiredInitSymbols;
+ std::vector<JITDylibSP> DFSLinkOrder;
+
+ if (auto Err = getExecutionSession().runSessionLocked([&]() -> Error {
+ if (auto DFSLinkOrderOrErr = JD.getDFSLinkOrder())
+ DFSLinkOrder = std::move(*DFSLinkOrderOrErr);
+ else
+ return DFSLinkOrderOrErr.takeError();
+
+ for (auto &NextJD : DFSLinkOrder) {
+ auto ISItr = InitSymbols.find(NextJD.get());
+ if (ISItr != InitSymbols.end()) {
+ RequiredInitSymbols[NextJD.get()] = std::move(ISItr->second);
+ InitSymbols.erase(ISItr);
+ }
+ }
+ return Error::success();
+ }))
+ return Err;
+
+ return Platform::lookupInitSymbols(getExecutionSession(),
+ RequiredInitSymbols)
+ .takeError();
+ }
+
+ static void registerCxaAtExitHelper(void *Self, void (*F)(void *), void *Ctx,
+ void *DSOHandle) {
+ LLVM_DEBUG({
+ dbgs() << "Registering cxa atexit function " << (void *)F << " for JD "
+ << (*static_cast<JITDylib **>(DSOHandle))->getName() << "\n";
+ });
+ static_cast<GenericLLVMIRPlatformSupport *>(Self)->AtExitMgr.registerAtExit(
+ F, Ctx, DSOHandle);
+ }
+
+ static void registerAtExitHelper(void *Self, void *DSOHandle, void (*F)()) {
+ LLVM_DEBUG({
+ dbgs() << "Registering atexit function " << (void *)F << " for JD "
+ << (*static_cast<JITDylib **>(DSOHandle))->getName() << "\n";
+ });
+ static_cast<GenericLLVMIRPlatformSupport *>(Self)->AtExitMgr.registerAtExit(
+ reinterpret_cast<void (*)(void *)>(F), nullptr, DSOHandle);
+ }
+
+ static void runAtExitsHelper(void *Self, void *DSOHandle) {
+ LLVM_DEBUG({
+ dbgs() << "Running atexit functions for JD "
+ << (*static_cast<JITDylib **>(DSOHandle))->getName() << "\n";
+ });
+ static_cast<GenericLLVMIRPlatformSupport *>(Self)->AtExitMgr.runAtExits(
+ DSOHandle);
+ }
+
+ // Constructs an LLVM IR module containing platform runtime globals,
+ // functions, and interposes.
+ ThreadSafeModule createPlatformRuntimeModule() {
+ auto Ctx = std::make_unique<LLVMContext>();
+ auto M = std::make_unique<Module>("__standard_lib", *Ctx);
+ M->setDataLayout(J.getDataLayout());
+
+ auto *GenericIRPlatformSupportTy =
+ StructType::create(*Ctx, "lljit.GenericLLJITIRPlatformSupport");
+
+ auto *PlatformInstanceDecl = new GlobalVariable(
+ *M, GenericIRPlatformSupportTy, true, GlobalValue::ExternalLinkage,
+ nullptr, "__lljit.platform_support_instance");
+
+ auto *Int8Ty = Type::getInt8Ty(*Ctx);
+ auto *IntTy = Type::getIntNTy(*Ctx, sizeof(int) * CHAR_BIT);
+ auto *VoidTy = Type::getVoidTy(*Ctx);
+ auto *BytePtrTy = PointerType::getUnqual(Int8Ty);
+ auto *CxaAtExitCallbackTy = FunctionType::get(VoidTy, {BytePtrTy}, false);
+ auto *CxaAtExitCallbackPtrTy = PointerType::getUnqual(CxaAtExitCallbackTy);
+
+ addHelperAndWrapper(
+ *M, "__cxa_atexit",
+ FunctionType::get(IntTy, {CxaAtExitCallbackPtrTy, BytePtrTy, BytePtrTy},
+ false),
+ GlobalValue::DefaultVisibility, "__lljit.cxa_atexit_helper",
+ {PlatformInstanceDecl});
+
+ return ThreadSafeModule(std::move(M), std::move(Ctx));
+ }
+
+ LLJIT &J;
+ std::string InitFunctionPrefix;
+ std::string DeInitFunctionPrefix;
+ DenseMap<JITDylib *, SymbolLookupSet> InitSymbols;
+ DenseMap<JITDylib *, SymbolLookupSet> InitFunctions;
+ DenseMap<JITDylib *, SymbolLookupSet> DeInitFunctions;
+ ItaniumCXAAtExitSupport AtExitMgr;
+};
+
+Error GenericLLVMIRPlatform::setupJITDylib(JITDylib &JD) {
+ return S.setupJITDylib(JD);
+}
+
+Error GenericLLVMIRPlatform::teardownJITDylib(JITDylib &JD) {
+ return Error::success();
+}
+
+Error GenericLLVMIRPlatform::notifyAdding(ResourceTracker &RT,
+ const MaterializationUnit &MU) {
+ return S.notifyAdding(RT, MU);
+}
+
+Expected<ThreadSafeModule>
+GlobalCtorDtorScraper::operator()(ThreadSafeModule TSM,
+ MaterializationResponsibility &R) {
+ auto Err = TSM.withModuleDo([&](Module &M) -> Error {
+ auto &Ctx = M.getContext();
+ auto *GlobalCtors = M.getNamedGlobal("llvm.global_ctors");
+ auto *GlobalDtors = M.getNamedGlobal("llvm.global_dtors");
+
+ auto RegisterCOrDtors = [&](GlobalVariable *GlobalCOrDtors,
+ bool isCtor) -> Error {
+ // If there's no llvm.global_c/dtor or it's just a decl then skip.
+ if (!GlobalCOrDtors || GlobalCOrDtors->isDeclaration())
+ return Error::success();
+ std::string InitOrDeInitFunctionName;
+ if (isCtor)
+ raw_string_ostream(InitOrDeInitFunctionName)
+ << InitFunctionPrefix << M.getModuleIdentifier();
+ else
+ raw_string_ostream(InitOrDeInitFunctionName)
+ << DeInitFunctionPrefix << M.getModuleIdentifier();
+
+ MangleAndInterner Mangle(PS.getExecutionSession(), M.getDataLayout());
+ auto InternedInitOrDeInitName = Mangle(InitOrDeInitFunctionName);
+ if (auto Err = R.defineMaterializing(
+ {{InternedInitOrDeInitName, JITSymbolFlags::Callable}}))
+ return Err;
+
+ auto *InitOrDeInitFunc = Function::Create(
+ FunctionType::get(Type::getVoidTy(Ctx), {}, false),
+ GlobalValue::ExternalLinkage, InitOrDeInitFunctionName, &M);
+ InitOrDeInitFunc->setVisibility(GlobalValue::HiddenVisibility);
+ std::vector<std::pair<Function *, unsigned>> InitsOrDeInits;
+ auto COrDtors = isCtor ? getConstructors(M) : getDestructors(M);
+
+ for (auto E : COrDtors)
+ InitsOrDeInits.push_back(std::make_pair(E.Func, E.Priority));
+ llvm::stable_sort(InitsOrDeInits, llvm::less_second());
+
+ auto *InitOrDeInitFuncEntryBlock =
+ BasicBlock::Create(Ctx, "entry", InitOrDeInitFunc);
+ IRBuilder<> IB(InitOrDeInitFuncEntryBlock);
+ for (auto &KV : InitsOrDeInits)
+ IB.CreateCall(KV.first);
+ IB.CreateRetVoid();
+
+ if (isCtor)
+ PS.registerInitFunc(R.getTargetJITDylib(), InternedInitOrDeInitName);
+ else
+ PS.registerDeInitFunc(R.getTargetJITDylib(), InternedInitOrDeInitName);
+
+ GlobalCOrDtors->eraseFromParent();
+ return Error::success();
+ };
+
+ if (auto Err = RegisterCOrDtors(GlobalCtors, true))
+ return Err;
+ if (auto Err = RegisterCOrDtors(GlobalDtors, false))
+ return Err;
+
+ return Error::success();
+ });
+
+ if (Err)
+ return std::move(Err);
+
+ return std::move(TSM);
+}
+
+/// Inactive Platform Support
+///
+/// Explicitly disables platform support. JITDylibs are not scanned for special
+/// init/deinit symbols. No runtime API interposes are injected.
+class InactivePlatformSupport : public LLJIT::PlatformSupport {
+public:
+ InactivePlatformSupport() = default;
+
+ Error initialize(JITDylib &JD) override {
+ LLVM_DEBUG(dbgs() << "InactivePlatformSupport: no initializers running for "
+ << JD.getName() << "\n");
+ return Error::success();
+ }
+
+ Error deinitialize(JITDylib &JD) override {
+ LLVM_DEBUG(
+ dbgs() << "InactivePlatformSupport: no deinitializers running for "
+ << JD.getName() << "\n");
+ return Error::success();
+ }
+};
+
+} // end anonymous namespace
+
+namespace llvm {
+namespace orc {
+
+Error ORCPlatformSupport::initialize(orc::JITDylib &JD) {
+ using llvm::orc::shared::SPSExecutorAddr;
+ using llvm::orc::shared::SPSString;
+ using SPSDLOpenSig = SPSExecutorAddr(SPSString, int32_t);
+ enum dlopen_mode : int32_t {
+ ORC_RT_RTLD_LAZY = 0x1,
+ ORC_RT_RTLD_NOW = 0x2,
+ ORC_RT_RTLD_LOCAL = 0x4,
+ ORC_RT_RTLD_GLOBAL = 0x8
+ };
+
+ auto &ES = J.getExecutionSession();
+ auto MainSearchOrder = J.getMainJITDylib().withLinkOrderDo(
+ [](const JITDylibSearchOrder &SO) { return SO; });
+
+ if (auto WrapperAddr = ES.lookup(
+ MainSearchOrder, J.mangleAndIntern("__orc_rt_jit_dlopen_wrapper"))) {
+ return ES.callSPSWrapper<SPSDLOpenSig>(WrapperAddr->getAddress(),
+ DSOHandles[&JD], JD.getName(),
+ int32_t(ORC_RT_RTLD_LAZY));
+ } else
+ return WrapperAddr.takeError();
+}
+
+Error ORCPlatformSupport::deinitialize(orc::JITDylib &JD) {
+ using llvm::orc::shared::SPSExecutorAddr;
+ using SPSDLCloseSig = int32_t(SPSExecutorAddr);
+
+ auto &ES = J.getExecutionSession();
+ auto MainSearchOrder = J.getMainJITDylib().withLinkOrderDo(
+ [](const JITDylibSearchOrder &SO) { return SO; });
+
+ if (auto WrapperAddr = ES.lookup(
+ MainSearchOrder, J.mangleAndIntern("__orc_rt_jit_dlclose_wrapper"))) {
+ int32_t result;
+ auto E = J.getExecutionSession().callSPSWrapper<SPSDLCloseSig>(
+ WrapperAddr->getAddress(), result, DSOHandles[&JD]);
+ if (E)
+ return E;
+ else if (result)
+ return make_error<StringError>("dlclose failed",
+ inconvertibleErrorCode());
+ DSOHandles.erase(&JD);
+ } else
+ return WrapperAddr.takeError();
+ return Error::success();
+}
+
+void LLJIT::PlatformSupport::setInitTransform(
+ LLJIT &J, IRTransformLayer::TransformFunction T) {
+ J.InitHelperTransformLayer->setTransform(std::move(T));
+}
+
+LLJIT::PlatformSupport::~PlatformSupport() = default;
+
+Error LLJITBuilderState::prepareForConstruction() {
+
+ LLVM_DEBUG(dbgs() << "Preparing to create LLJIT instance...\n");
+
+ if (!JTMB) {
+ LLVM_DEBUG({
+ dbgs() << " No explicitly set JITTargetMachineBuilder. "
+ "Detecting host...\n";
+ });
+ if (auto JTMBOrErr = JITTargetMachineBuilder::detectHost())
+ JTMB = std::move(*JTMBOrErr);
+ else
+ return JTMBOrErr.takeError();
+ }
+
+ if ((ES || EPC) && NumCompileThreads)
+ return make_error<StringError>(
+ "NumCompileThreads cannot be used with a custom ExecutionSession or "
+ "ExecutorProcessControl",
+ inconvertibleErrorCode());
+
+#if !LLVM_ENABLE_THREADS
+ if (NumCompileThreads)
+ return make_error<StringError>(
+ "LLJIT num-compile-threads is " + Twine(NumCompileThreads) +
+ " but LLVM was compiled with LLVM_ENABLE_THREADS=Off",
+ inconvertibleErrorCode());
+#endif // !LLVM_ENABLE_THREADS
+
+ // Only used in debug builds.
+ [[maybe_unused]] bool ConcurrentCompilationSettingDefaulted =
+ !SupportConcurrentCompilation;
+
+ if (!SupportConcurrentCompilation) {
+#if LLVM_ENABLE_THREADS
+ SupportConcurrentCompilation = NumCompileThreads || ES || EPC;
+#else
+ SupportConcurrentCompilation = false;
+#endif // LLVM_ENABLE_THREADS
+ } else {
+#if !LLVM_ENABLE_THREADS
+ if (*SupportConcurrentCompilation)
+ return make_error<StringError>(
+ "LLJIT concurrent compilation support requested, but LLVM was built "
+ "with LLVM_ENABLE_THREADS=Off",
+ inconvertibleErrorCode());
+#endif // !LLVM_ENABLE_THREADS
+ }
+
+ LLVM_DEBUG({
+ dbgs() << " JITTargetMachineBuilder is "
+ << JITTargetMachineBuilderPrinter(*JTMB, " ")
+ << " Pre-constructed ExecutionSession: " << (ES ? "Yes" : "No")
+ << "\n"
+ << " DataLayout: ";
+ if (DL)
+ dbgs() << DL->getStringRepresentation() << "\n";
+ else
+ dbgs() << "None (will be created by JITTargetMachineBuilder)\n";
+
+ dbgs() << " Custom object-linking-layer creator: "
+ << (CreateObjectLinkingLayer ? "Yes" : "No") << "\n"
+ << " Custom compile-function creator: "
+ << (CreateCompileFunction ? "Yes" : "No") << "\n"
+ << " Custom platform-setup function: "
+ << (SetUpPlatform ? "Yes" : "No") << "\n"
+ << " Support concurrent compilation: "
+ << (*SupportConcurrentCompilation ? "Yes" : "No");
+ if (ConcurrentCompilationSettingDefaulted)
+ dbgs() << " (defaulted based on ES / EPC / NumCompileThreads)\n";
+ else
+ dbgs() << "\n";
+ dbgs() << " Number of compile threads: " << NumCompileThreads << "\n";
+ });
+
+ // Create DL if not specified.
+ if (!DL) {
+ if (auto DLOrErr = JTMB->getDefaultDataLayoutForTarget())
+ DL = std::move(*DLOrErr);
+ else
+ return DLOrErr.takeError();
+ }
+
+ // If neither ES nor EPC has been set then create an EPC instance.
+ if (!ES && !EPC) {
+ LLVM_DEBUG({
+ dbgs() << "ExecutorProcessControl not specified, "
+ "Creating SelfExecutorProcessControl instance\n";
+ });
+
+ std::unique_ptr<TaskDispatcher> D = nullptr;
+#if LLVM_ENABLE_THREADS
+ if (*SupportConcurrentCompilation) {
+ std::optional<size_t> NumThreads = std ::nullopt;
+ if (NumCompileThreads)
+ NumThreads = NumCompileThreads;
+ D = std::make_unique<DynamicThreadPoolTaskDispatcher>(NumThreads);
+ } else
+ D = std::make_unique<InPlaceTaskDispatcher>();
+#endif // LLVM_ENABLE_THREADS
+ if (auto EPCOrErr =
+ SelfExecutorProcessControl::Create(nullptr, std::move(D), nullptr))
+ EPC = std::move(*EPCOrErr);
+ else
+ return EPCOrErr.takeError();
+ } else if (EPC) {
+ LLVM_DEBUG({
+ dbgs() << "Using explicitly specified ExecutorProcessControl instance "
+ << EPC.get() << "\n";
+ });
+ } else {
+ LLVM_DEBUG({
+ dbgs() << "Using explicitly specified ExecutionSession instance "
+ << ES.get() << "\n";
+ });
+ }
+
+ // If the client didn't configure any linker options then auto-configure the
+ // JIT linker.
+ if (!CreateObjectLinkingLayer) {
+ auto &TT = JTMB->getTargetTriple();
+ bool UseJITLink = false;
+ switch (TT.getArch()) {
+ case Triple::riscv64:
+ case Triple::loongarch64:
+ UseJITLink = true;
+ break;
+ case Triple::aarch64:
+ UseJITLink = !TT.isOSBinFormatCOFF();
+ break;
+ case Triple::arm:
+ case Triple::armeb:
+ case Triple::thumb:
+ case Triple::thumbeb:
+ UseJITLink = TT.isOSBinFormatELF();
+ break;
+ case Triple::x86_64:
+ UseJITLink = !TT.isOSBinFormatCOFF();
+ break;
+ case Triple::ppc64:
+ UseJITLink = TT.isPPC64ELFv2ABI();
+ break;
+ case Triple::ppc64le:
+ UseJITLink = TT.isOSBinFormatELF();
+ break;
+ default:
+ break;
+ }
+ if (UseJITLink) {
+ if (!JTMB->getCodeModel())
+ JTMB->setCodeModel(CodeModel::Small);
+ JTMB->setRelocationModel(Reloc::PIC_);
+ CreateObjectLinkingLayer =
+ [](ExecutionSession &ES,
+ const Triple &) -> Expected<std::unique_ptr<ObjectLayer>> {
+ auto ObjLinkingLayer = std::make_unique<ObjectLinkingLayer>(ES);
+ if (auto EHFrameRegistrar = EPCEHFrameRegistrar::Create(ES))
+ ObjLinkingLayer->addPlugin(
+ std::make_unique<EHFrameRegistrationPlugin>(
+ ES, std::move(*EHFrameRegistrar)));
+ else
+ return EHFrameRegistrar.takeError();
+ return std::move(ObjLinkingLayer);
+ };
+ }
+ }
+
+ // If we need a process JITDylib but no setup function has been given then
+ // create a default one.
+ if (!SetupProcessSymbolsJITDylib && LinkProcessSymbolsByDefault) {
+ LLVM_DEBUG(dbgs() << "Creating default Process JD setup function\n");
+ SetupProcessSymbolsJITDylib = [](LLJIT &J) -> Expected<JITDylibSP> {
+ auto &JD =
+ J.getExecutionSession().createBareJITDylib("<Process Symbols>");
+ auto G = EPCDynamicLibrarySearchGenerator::GetForTargetProcess(
+ J.getExecutionSession());
+ if (!G)
+ return G.takeError();
+ JD.addGenerator(std::move(*G));
+ return &JD;
+ };
+ }
+
+ return Error::success();
+}
+
+LLJIT::~LLJIT() {
+ if (auto Err = ES->endSession())
+ ES->reportError(std::move(Err));
+}
+
+JITDylibSP LLJIT::getProcessSymbolsJITDylib() { return ProcessSymbols; }
+
+JITDylibSP LLJIT::getPlatformJITDylib() { return Platform; }
+
+Expected<JITDylib &> LLJIT::createJITDylib(std::string Name) {
+ auto JD = ES->createJITDylib(std::move(Name));
+ if (!JD)
+ return JD.takeError();
+
+ JD->addToLinkOrder(DefaultLinks);
+ return JD;
+}
+
+Expected<JITDylib &> LLJIT::loadPlatformDynamicLibrary(const char *Path) {
+ auto G = EPCDynamicLibrarySearchGenerator::Load(*ES, Path);
+ if (!G)
+ return G.takeError();
+
+ if (auto *ExistingJD = ES->getJITDylibByName(Path))
+ return *ExistingJD;
+
+ auto &JD = ES->createBareJITDylib(Path);
+ JD.addGenerator(std::move(*G));
+ return JD;
+}
+
+Error LLJIT::linkStaticLibraryInto(JITDylib &JD,
+ std::unique_ptr<MemoryBuffer> LibBuffer) {
+ auto G = StaticLibraryDefinitionGenerator::Create(*ObjLinkingLayer,
+ std::move(LibBuffer));
+ if (!G)
+ return G.takeError();
+
+ JD.addGenerator(std::move(*G));
+
+ return Error::success();
+}
+
+Error LLJIT::linkStaticLibraryInto(JITDylib &JD, const char *Path) {
+ auto G = StaticLibraryDefinitionGenerator::Load(*ObjLinkingLayer, Path);
+ if (!G)
+ return G.takeError();
+
+ JD.addGenerator(std::move(*G));
+
+ return Error::success();
+}
+
+Error LLJIT::addIRModule(ResourceTrackerSP RT, ThreadSafeModule TSM) {
+ assert(TSM && "Can not add null module");
+
+ if (auto Err =
+ TSM.withModuleDo([&](Module &M) { return applyDataLayout(M); }))
+ return Err;
+
+ return InitHelperTransformLayer->add(std::move(RT), std::move(TSM));
+}
+
+Error LLJIT::addIRModule(JITDylib &JD, ThreadSafeModule TSM) {
+ return addIRModule(JD.getDefaultResourceTracker(), std::move(TSM));
+}
+
+Error LLJIT::addObjectFile(ResourceTrackerSP RT,
+ std::unique_ptr<MemoryBuffer> Obj) {
+ assert(Obj && "Can not add null object");
+
+ return ObjTransformLayer->add(std::move(RT), std::move(Obj));
+}
+
+Error LLJIT::addObjectFile(JITDylib &JD, std::unique_ptr<MemoryBuffer> Obj) {
+ return addObjectFile(JD.getDefaultResourceTracker(), std::move(Obj));
+}
+
+Expected<ExecutorAddr> LLJIT::lookupLinkerMangled(JITDylib &JD,
+ SymbolStringPtr Name) {
+ if (auto Sym = ES->lookup(
+ makeJITDylibSearchOrder(&JD, JITDylibLookupFlags::MatchAllSymbols),
+ Name))
+ return Sym->getAddress();
+ else
+ return Sym.takeError();
+}
+
+Expected<std::unique_ptr<ObjectLayer>>
+LLJIT::createObjectLinkingLayer(LLJITBuilderState &S, ExecutionSession &ES) {
+
+ // If the config state provided an ObjectLinkingLayer factory then use it.
+ if (S.CreateObjectLinkingLayer)
+ return S.CreateObjectLinkingLayer(ES, S.JTMB->getTargetTriple());
+
+ // Otherwise default to creating an RTDyldObjectLinkingLayer that constructs
+ // a new SectionMemoryManager for each object.
+ auto GetMemMgr = []() { return std::make_unique<SectionMemoryManager>(); };
+ auto Layer =
+ std::make_unique<RTDyldObjectLinkingLayer>(ES, std::move(GetMemMgr));
+
+ if (S.JTMB->getTargetTriple().isOSBinFormatCOFF()) {
+ Layer->setOverrideObjectFlagsWithResponsibilityFlags(true);
+ Layer->setAutoClaimResponsibilityForObjectSymbols(true);
+ }
+
+ if (S.JTMB->getTargetTriple().isOSBinFormatELF() &&
+ (S.JTMB->getTargetTriple().getArch() == Triple::ArchType::ppc64 ||
+ S.JTMB->getTargetTriple().getArch() == Triple::ArchType::ppc64le))
+ Layer->setAutoClaimResponsibilityForObjectSymbols(true);
+
+ // FIXME: Explicit conversion to std::unique_ptr<ObjectLayer> added to silence
+ // errors from some GCC / libstdc++ bots. Remove this conversion (i.e.
+ // just return ObjLinkingLayer) once those bots are upgraded.
+ return std::unique_ptr<ObjectLayer>(std::move(Layer));
+}
+
+Expected<std::unique_ptr<IRCompileLayer::IRCompiler>>
+LLJIT::createCompileFunction(LLJITBuilderState &S,
+ JITTargetMachineBuilder JTMB) {
+
+ /// If there is a custom compile function creator set then use it.
+ if (S.CreateCompileFunction)
+ return S.CreateCompileFunction(std::move(JTMB));
+
+ // If using a custom EPC then use a ConcurrentIRCompiler by default.
+ if (*S.SupportConcurrentCompilation)
+ return std::make_unique<ConcurrentIRCompiler>(std::move(JTMB));
+
+ auto TM = JTMB.createTargetMachine();
+ if (!TM)
+ return TM.takeError();
+
+ return std::make_unique<TMOwningSimpleCompiler>(std::move(*TM));
+}
+
+LLJIT::LLJIT(LLJITBuilderState &S, Error &Err)
+ : DL(std::move(*S.DL)), TT(S.JTMB->getTargetTriple()) {
+
+ ErrorAsOutParameter _(&Err);
+
+ assert(!(S.EPC && S.ES) && "EPC and ES should not both be set");
+
+ if (S.EPC) {
+ ES = std::make_unique<ExecutionSession>(std::move(S.EPC));
+ } else if (S.ES)
+ ES = std::move(S.ES);
+ else {
+ if (auto EPC = SelfExecutorProcessControl::Create()) {
+ ES = std::make_unique<ExecutionSession>(std::move(*EPC));
+ } else {
+ Err = EPC.takeError();
+ return;
+ }
+ }
+
+ auto ObjLayer = createObjectLinkingLayer(S, *ES);
+ if (!ObjLayer) {
+ Err = ObjLayer.takeError();
+ return;
+ }
+ ObjLinkingLayer = std::move(*ObjLayer);
+ ObjTransformLayer =
+ std::make_unique<ObjectTransformLayer>(*ES, *ObjLinkingLayer);
+
+ {
+ auto CompileFunction = createCompileFunction(S, std::move(*S.JTMB));
+ if (!CompileFunction) {
+ Err = CompileFunction.takeError();
+ return;
+ }
+ CompileLayer = std::make_unique<IRCompileLayer>(
+ *ES, *ObjTransformLayer, std::move(*CompileFunction));
+ TransformLayer = std::make_unique<IRTransformLayer>(*ES, *CompileLayer);
+ InitHelperTransformLayer =
+ std::make_unique<IRTransformLayer>(*ES, *TransformLayer);
+ }
+
+ if (*S.SupportConcurrentCompilation)
+ InitHelperTransformLayer->setCloneToNewContextOnEmit(true);
+
+ if (S.SetupProcessSymbolsJITDylib) {
+ if (auto ProcSymsJD = S.SetupProcessSymbolsJITDylib(*this)) {
+ ProcessSymbols = ProcSymsJD->get();
+ } else {
+ Err = ProcSymsJD.takeError();
+ return;
+ }
+ }
+
+ if (S.PrePlatformSetup) {
+ if (auto Err2 = S.PrePlatformSetup(*this)) {
+ Err = std::move(Err2);
+ return;
+ }
+ }
+
+ if (!S.SetUpPlatform)
+ S.SetUpPlatform = setUpGenericLLVMIRPlatform;
+
+ if (auto PlatformJDOrErr = S.SetUpPlatform(*this)) {
+ Platform = PlatformJDOrErr->get();
+ if (Platform)
+ DefaultLinks.push_back(
+ {Platform, JITDylibLookupFlags::MatchExportedSymbolsOnly});
+ } else {
+ Err = PlatformJDOrErr.takeError();
+ return;
+ }
+
+ if (S.LinkProcessSymbolsByDefault)
+ DefaultLinks.push_back(
+ {ProcessSymbols, JITDylibLookupFlags::MatchExportedSymbolsOnly});
+
+ if (auto MainOrErr = createJITDylib("main"))
+ Main = &*MainOrErr;
+ else {
+ Err = MainOrErr.takeError();
+ return;
+ }
+}
+
+std::string LLJIT::mangle(StringRef UnmangledName) const {
+ std::string MangledName;
+ {
+ raw_string_ostream MangledNameStream(MangledName);
+ Mangler::getNameWithPrefix(MangledNameStream, UnmangledName, DL);
+ }
+ return MangledName;
+}
+
+Error LLJIT::applyDataLayout(Module &M) {
+ if (M.getDataLayout().isDefault())
+ M.setDataLayout(DL);
+
+ if (M.getDataLayout() != DL)
+ return make_error<StringError>(
+ "Added modules have incompatible data layouts: " +
+ M.getDataLayout().getStringRepresentation() + " (module) vs " +
+ DL.getStringRepresentation() + " (jit)",
+ inconvertibleErrorCode());
+
+ return Error::success();
+}
+
+Error setUpOrcPlatformManually(LLJIT &J) {
+ LLVM_DEBUG({ dbgs() << "Setting up orc platform support for LLJIT\n"; });
+ J.setPlatformSupport(std::make_unique<ORCPlatformSupport>(J));
+ return Error::success();
+}
+
+class LoadAndLinkDynLibrary {
+public:
+ LoadAndLinkDynLibrary(LLJIT &J) : J(J) {}
+ Error operator()(JITDylib &JD, StringRef DLLName) {
+ if (!DLLName.ends_with_insensitive(".dll"))
+ return make_error<StringError>("DLLName not ending with .dll",
+ inconvertibleErrorCode());
+ auto DLLNameStr = DLLName.str(); // Guarantees null-termination.
+ auto DLLJD = J.loadPlatformDynamicLibrary(DLLNameStr.c_str());
+ if (!DLLJD)
+ return DLLJD.takeError();
+ JD.addToLinkOrder(*DLLJD);
+ return Error::success();
+ }
+
+private:
+ LLJIT &J;
+};
+
+Expected<JITDylibSP> ExecutorNativePlatform::operator()(LLJIT &J) {
+ auto ProcessSymbolsJD = J.getProcessSymbolsJITDylib();
+ if (!ProcessSymbolsJD)
+ return make_error<StringError>(
+ "Native platforms require a process symbols JITDylib",
+ inconvertibleErrorCode());
+
+ const Triple &TT = J.getTargetTriple();
+ ObjectLinkingLayer *ObjLinkingLayer =
+ dyn_cast<ObjectLinkingLayer>(&J.getObjLinkingLayer());
+
+ if (!ObjLinkingLayer)
+ return make_error<StringError>(
+ "ExecutorNativePlatform requires ObjectLinkingLayer",
+ inconvertibleErrorCode());
+
+ std::unique_ptr<MemoryBuffer> RuntimeArchiveBuffer;
+ if (OrcRuntime.index() == 0) {
+ auto A = errorOrToExpected(MemoryBuffer::getFile(std::get<0>(OrcRuntime)));
+ if (!A)
+ return A.takeError();
+ RuntimeArchiveBuffer = std::move(*A);
+ } else
+ RuntimeArchiveBuffer = std::move(std::get<1>(OrcRuntime));
+
+ auto &ES = J.getExecutionSession();
+ auto &PlatformJD = ES.createBareJITDylib("<Platform>");
+ PlatformJD.addToLinkOrder(*ProcessSymbolsJD);
+
+ J.setPlatformSupport(std::make_unique<ORCPlatformSupport>(J));
+
+ switch (TT.getObjectFormat()) {
+ case Triple::COFF: {
+ const char *VCRuntimePath = nullptr;
+ bool StaticVCRuntime = false;
+ if (VCRuntime) {
+ VCRuntimePath = VCRuntime->first.c_str();
+ StaticVCRuntime = VCRuntime->second;
+ }
+ if (auto P = COFFPlatform::Create(
+ ES, *ObjLinkingLayer, PlatformJD, std::move(RuntimeArchiveBuffer),
+ LoadAndLinkDynLibrary(J), StaticVCRuntime, VCRuntimePath))
+ J.getExecutionSession().setPlatform(std::move(*P));
+ else
+ return P.takeError();
+ break;
+ }
+ case Triple::ELF: {
+ auto G = StaticLibraryDefinitionGenerator::Create(
+ *ObjLinkingLayer, std::move(RuntimeArchiveBuffer));
+ if (!G)
+ return G.takeError();
+
+ if (auto P = ELFNixPlatform::Create(ES, *ObjLinkingLayer, PlatformJD,
+ std::move(*G)))
+ J.getExecutionSession().setPlatform(std::move(*P));
+ else
+ return P.takeError();
+ break;
+ }
+ case Triple::MachO: {
+ auto G = StaticLibraryDefinitionGenerator::Create(
+ *ObjLinkingLayer, std::move(RuntimeArchiveBuffer));
+ if (!G)
+ return G.takeError();
+
+ if (auto P = MachOPlatform::Create(ES, *ObjLinkingLayer, PlatformJD,
+ std::move(*G)))
+ ES.setPlatform(std::move(*P));
+ else
+ return P.takeError();
+ break;
+ }
+ default:
+ return make_error<StringError>("Unsupported object format in triple " +
+ TT.str(),
+ inconvertibleErrorCode());
+ }
+
+ return &PlatformJD;
+}
+
+Expected<JITDylibSP> setUpGenericLLVMIRPlatform(LLJIT &J) {
+ LLVM_DEBUG(
+ { dbgs() << "Setting up GenericLLVMIRPlatform support for LLJIT\n"; });
+ auto ProcessSymbolsJD = J.getProcessSymbolsJITDylib();
+ if (!ProcessSymbolsJD)
+ return make_error<StringError>(
+ "Native platforms require a process symbols JITDylib",
+ inconvertibleErrorCode());
+
+ auto &PlatformJD = J.getExecutionSession().createBareJITDylib("<Platform>");
+ PlatformJD.addToLinkOrder(*ProcessSymbolsJD);
+
+ J.setPlatformSupport(
+ std::make_unique<GenericLLVMIRPlatformSupport>(J, PlatformJD));
+
+ return &PlatformJD;
+}
+
+Expected<JITDylibSP> setUpInactivePlatform(LLJIT &J) {
+ LLVM_DEBUG(
+ { dbgs() << "Explicitly deactivated platform support for LLJIT\n"; });
+ J.setPlatformSupport(std::make_unique<InactivePlatformSupport>());
+ return nullptr;
+}
+
+Error LLLazyJITBuilderState::prepareForConstruction() {
+ if (auto Err = LLJITBuilderState::prepareForConstruction())
+ return Err;
+ TT = JTMB->getTargetTriple();
+ return Error::success();
+}
+
+Error LLLazyJIT::addLazyIRModule(JITDylib &JD, ThreadSafeModule TSM) {
+ assert(TSM && "Can not add null module");
+
+ if (auto Err = TSM.withModuleDo(
+ [&](Module &M) -> Error { return applyDataLayout(M); }))
+ return Err;
+
+ return CODLayer->add(JD, std::move(TSM));
+}
+
+LLLazyJIT::LLLazyJIT(LLLazyJITBuilderState &S, Error &Err) : LLJIT(S, Err) {
+
+ // If LLJIT construction failed then bail out.
+ if (Err)
+ return;
+
+ ErrorAsOutParameter _(&Err);
+
+ /// Take/Create the lazy-compile callthrough manager.
+ if (S.LCTMgr)
+ LCTMgr = std::move(S.LCTMgr);
+ else {
+ if (auto LCTMgrOrErr = createLocalLazyCallThroughManager(
+ S.TT, *ES, S.LazyCompileFailureAddr))
+ LCTMgr = std::move(*LCTMgrOrErr);
+ else {
+ Err = LCTMgrOrErr.takeError();
+ return;
+ }
+ }
+
+ // Take/Create the indirect stubs manager builder.
+ auto ISMBuilder = std::move(S.ISMBuilder);
+
+ // If none was provided, try to build one.
+ if (!ISMBuilder)
+ ISMBuilder = createLocalIndirectStubsManagerBuilder(S.TT);
+
+ // No luck. Bail out.
+ if (!ISMBuilder) {
+ Err = make_error<StringError>("Could not construct "
+ "IndirectStubsManagerBuilder for target " +
+ S.TT.str(),
+ inconvertibleErrorCode());
+ return;
+ }
+
+ // Create the COD layer.
+ CODLayer = std::make_unique<CompileOnDemandLayer>(
+ *ES, *InitHelperTransformLayer, *LCTMgr, std::move(ISMBuilder));
+
+ if (*S.SupportConcurrentCompilation)
+ CODLayer->setCloneToNewContextOnEmit(true);
+}
+
+// In-process LLJIT uses eh-frame section wrappers via EPC, so we need to force
+// them to be linked in.
+LLVM_ATTRIBUTE_USED void linkComponents() {
+ errs() << (void *)&llvm_orc_registerEHFrameSectionWrapper
+ << (void *)&llvm_orc_deregisterEHFrameSectionWrapper;
+}
+
+} // End namespace orc.
+} // End namespace llvm.
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/Layer.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/Layer.cpp
new file mode 100644
index 000000000000..d97336c914ea
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/Layer.cpp
@@ -0,0 +1,229 @@
+//===-------------------- Layer.cpp - Layer interfaces --------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/Layer.h"
+
+#include "llvm/ExecutionEngine/Orc/DebugUtils.h"
+#include "llvm/ExecutionEngine/Orc/ExecutionUtils.h"
+#include "llvm/ExecutionEngine/Orc/ObjectFileInterface.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/Support/Debug.h"
+
+#define DEBUG_TYPE "orc"
+
+namespace llvm {
+namespace orc {
+
+IRLayer::~IRLayer() = default;
+
+Error IRLayer::add(ResourceTrackerSP RT, ThreadSafeModule TSM) {
+ assert(RT && "RT can not be null");
+ auto &JD = RT->getJITDylib();
+ return JD.define(std::make_unique<BasicIRLayerMaterializationUnit>(
+ *this, *getManglingOptions(), std::move(TSM)),
+ std::move(RT));
+}
+
+IRMaterializationUnit::IRMaterializationUnit(
+ ExecutionSession &ES, const IRSymbolMapper::ManglingOptions &MO,
+ ThreadSafeModule TSM)
+ : MaterializationUnit(Interface()), TSM(std::move(TSM)) {
+
+ assert(this->TSM && "Module must not be null");
+
+ MangleAndInterner Mangle(ES, this->TSM.getModuleUnlocked()->getDataLayout());
+ this->TSM.withModuleDo([&](Module &M) {
+ for (auto &G : M.global_values()) {
+ // Skip globals that don't generate symbols.
+
+ if (!G.hasName() || G.isDeclaration() || G.hasLocalLinkage() ||
+ G.hasAvailableExternallyLinkage() || G.hasAppendingLinkage())
+ continue;
+
+ // thread locals generate different symbols depending on whether or not
+ // emulated TLS is enabled.
+ if (G.isThreadLocal() && MO.EmulatedTLS) {
+ auto &GV = cast<GlobalVariable>(G);
+
+ auto Flags = JITSymbolFlags::fromGlobalValue(GV);
+
+ auto EmuTLSV = Mangle(("__emutls_v." + GV.getName()).str());
+ SymbolFlags[EmuTLSV] = Flags;
+ SymbolToDefinition[EmuTLSV] = &GV;
+
+ // If this GV has a non-zero initializer we'll need to emit an
+ // __emutls.t symbol too.
+ if (GV.hasInitializer()) {
+ const auto *InitVal = GV.getInitializer();
+
+ // Skip zero-initializers.
+ if (isa<ConstantAggregateZero>(InitVal))
+ continue;
+ const auto *InitIntValue = dyn_cast<ConstantInt>(InitVal);
+ if (InitIntValue && InitIntValue->isZero())
+ continue;
+
+ auto EmuTLST = Mangle(("__emutls_t." + GV.getName()).str());
+ SymbolFlags[EmuTLST] = Flags;
+ }
+ continue;
+ }
+
+ // Otherwise we just need a normal linker mangling.
+ auto MangledName = Mangle(G.getName());
+ SymbolFlags[MangledName] = JITSymbolFlags::fromGlobalValue(G);
+ if (G.getComdat() &&
+ G.getComdat()->getSelectionKind() != Comdat::NoDeduplicate)
+ SymbolFlags[MangledName] |= JITSymbolFlags::Weak;
+ SymbolToDefinition[MangledName] = &G;
+ }
+
+ // If we need an init symbol for this module then create one.
+ if (!getStaticInitGVs(M).empty()) {
+ size_t Counter = 0;
+
+ do {
+ std::string InitSymbolName;
+ raw_string_ostream(InitSymbolName)
+ << "$." << M.getModuleIdentifier() << ".__inits." << Counter++;
+ InitSymbol = ES.intern(InitSymbolName);
+ } while (SymbolFlags.count(InitSymbol));
+
+ SymbolFlags[InitSymbol] = JITSymbolFlags::MaterializationSideEffectsOnly;
+ }
+ });
+}
+
+IRMaterializationUnit::IRMaterializationUnit(
+ ThreadSafeModule TSM, Interface I,
+ SymbolNameToDefinitionMap SymbolToDefinition)
+ : MaterializationUnit(std::move(I)), TSM(std::move(TSM)),
+ SymbolToDefinition(std::move(SymbolToDefinition)) {}
+
+StringRef IRMaterializationUnit::getName() const {
+ if (TSM)
+ return TSM.withModuleDo(
+ [](const Module &M) -> StringRef { return M.getModuleIdentifier(); });
+ return "<null module>";
+}
+
+void IRMaterializationUnit::discard(const JITDylib &JD,
+ const SymbolStringPtr &Name) {
+ LLVM_DEBUG(JD.getExecutionSession().runSessionLocked([&]() {
+ dbgs() << "In " << JD.getName() << " discarding " << *Name << " from MU@"
+ << this << " (" << getName() << ")\n";
+ }););
+
+ auto I = SymbolToDefinition.find(Name);
+ assert(I != SymbolToDefinition.end() &&
+ "Symbol not provided by this MU, or previously discarded");
+ assert(!I->second->isDeclaration() &&
+ "Discard should only apply to definitions");
+ I->second->setLinkage(GlobalValue::AvailableExternallyLinkage);
+ // According to the IR verifier, "Declaration[s] may not be in a Comdat!"
+ // Remove it, if this is a GlobalObject.
+ if (auto *GO = dyn_cast<GlobalObject>(I->second))
+ GO->setComdat(nullptr);
+ SymbolToDefinition.erase(I);
+}
+
+BasicIRLayerMaterializationUnit::BasicIRLayerMaterializationUnit(
+ IRLayer &L, const IRSymbolMapper::ManglingOptions &MO, ThreadSafeModule TSM)
+ : IRMaterializationUnit(L.getExecutionSession(), MO, std::move(TSM)), L(L) {
+}
+
+void BasicIRLayerMaterializationUnit::materialize(
+ std::unique_ptr<MaterializationResponsibility> R) {
+
+ // Throw away the SymbolToDefinition map: it's not usable after we hand
+ // off the module.
+ SymbolToDefinition.clear();
+
+ // If cloneToNewContextOnEmit is set, clone the module now.
+ if (L.getCloneToNewContextOnEmit())
+ TSM = cloneToNewContext(TSM);
+
+#ifndef NDEBUG
+ auto &ES = R->getTargetJITDylib().getExecutionSession();
+ auto &N = R->getTargetJITDylib().getName();
+#endif // NDEBUG
+
+ LLVM_DEBUG(ES.runSessionLocked(
+ [&]() { dbgs() << "Emitting, for " << N << ", " << *this << "\n"; }););
+ L.emit(std::move(R), std::move(TSM));
+ LLVM_DEBUG(ES.runSessionLocked([&]() {
+ dbgs() << "Finished emitting, for " << N << ", " << *this << "\n";
+ }););
+}
+
+char ObjectLayer::ID;
+
+ObjectLayer::ObjectLayer(ExecutionSession &ES) : ES(ES) {}
+
+ObjectLayer::~ObjectLayer() = default;
+
+Error ObjectLayer::add(ResourceTrackerSP RT, std::unique_ptr<MemoryBuffer> O,
+ MaterializationUnit::Interface I) {
+ assert(RT && "RT can not be null");
+ auto &JD = RT->getJITDylib();
+ return JD.define(std::make_unique<BasicObjectLayerMaterializationUnit>(
+ *this, std::move(O), std::move(I)),
+ std::move(RT));
+}
+
+Error ObjectLayer::add(ResourceTrackerSP RT, std::unique_ptr<MemoryBuffer> O) {
+ auto I = getObjectFileInterface(getExecutionSession(), O->getMemBufferRef());
+ if (!I)
+ return I.takeError();
+ return add(std::move(RT), std::move(O), std::move(*I));
+}
+
+Error ObjectLayer::add(JITDylib &JD, std::unique_ptr<MemoryBuffer> O) {
+ auto I = getObjectFileInterface(getExecutionSession(), O->getMemBufferRef());
+ if (!I)
+ return I.takeError();
+ return add(JD, std::move(O), std::move(*I));
+}
+
+Expected<std::unique_ptr<BasicObjectLayerMaterializationUnit>>
+BasicObjectLayerMaterializationUnit::Create(ObjectLayer &L,
+ std::unique_ptr<MemoryBuffer> O) {
+
+ auto ObjInterface =
+ getObjectFileInterface(L.getExecutionSession(), O->getMemBufferRef());
+
+ if (!ObjInterface)
+ return ObjInterface.takeError();
+
+ return std::make_unique<BasicObjectLayerMaterializationUnit>(
+ L, std::move(O), std::move(*ObjInterface));
+}
+
+BasicObjectLayerMaterializationUnit::BasicObjectLayerMaterializationUnit(
+ ObjectLayer &L, std::unique_ptr<MemoryBuffer> O, Interface I)
+ : MaterializationUnit(std::move(I)), L(L), O(std::move(O)) {}
+
+StringRef BasicObjectLayerMaterializationUnit::getName() const {
+ if (O)
+ return O->getBufferIdentifier();
+ return "<null object>";
+}
+
+void BasicObjectLayerMaterializationUnit::materialize(
+ std::unique_ptr<MaterializationResponsibility> R) {
+ L.emit(std::move(R), std::move(O));
+}
+
+void BasicObjectLayerMaterializationUnit::discard(const JITDylib &JD,
+ const SymbolStringPtr &Name) {
+ // This is a no-op for object files: Having removed 'Name' from SymbolFlags
+ // the symbol will be dead-stripped by the JIT linker.
+}
+
+} // End namespace orc.
+} // End namespace llvm.
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/LazyReexports.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/LazyReexports.cpp
new file mode 100644
index 000000000000..693a3f33f868
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/LazyReexports.cpp
@@ -0,0 +1,243 @@
+//===---------- LazyReexports.cpp - Utilities for lazy reexports ----------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/LazyReexports.h"
+
+#include "llvm/ExecutionEngine/Orc/OrcABISupport.h"
+#include "llvm/TargetParser/Triple.h"
+
+#define DEBUG_TYPE "orc"
+
+namespace llvm {
+namespace orc {
+
+LazyCallThroughManager::LazyCallThroughManager(ExecutionSession &ES,
+ ExecutorAddr ErrorHandlerAddr,
+ TrampolinePool *TP)
+ : ES(ES), ErrorHandlerAddr(ErrorHandlerAddr), TP(TP) {}
+
+Expected<ExecutorAddr> LazyCallThroughManager::getCallThroughTrampoline(
+ JITDylib &SourceJD, SymbolStringPtr SymbolName,
+ NotifyResolvedFunction NotifyResolved) {
+ assert(TP && "TrampolinePool not set");
+
+ std::lock_guard<std::mutex> Lock(LCTMMutex);
+ auto Trampoline = TP->getTrampoline();
+
+ if (!Trampoline)
+ return Trampoline.takeError();
+
+ Reexports[*Trampoline] = ReexportsEntry{&SourceJD, std::move(SymbolName)};
+ Notifiers[*Trampoline] = std::move(NotifyResolved);
+ return *Trampoline;
+}
+
+ExecutorAddr LazyCallThroughManager::reportCallThroughError(Error Err) {
+ ES.reportError(std::move(Err));
+ return ErrorHandlerAddr;
+}
+
+Expected<LazyCallThroughManager::ReexportsEntry>
+LazyCallThroughManager::findReexport(ExecutorAddr TrampolineAddr) {
+ std::lock_guard<std::mutex> Lock(LCTMMutex);
+ auto I = Reexports.find(TrampolineAddr);
+ if (I == Reexports.end())
+ return createStringError(inconvertibleErrorCode(),
+ "Missing reexport for trampoline address %p" +
+ formatv("{0:x}", TrampolineAddr));
+ return I->second;
+}
+
+Error LazyCallThroughManager::notifyResolved(ExecutorAddr TrampolineAddr,
+ ExecutorAddr ResolvedAddr) {
+ NotifyResolvedFunction NotifyResolved;
+ {
+ std::lock_guard<std::mutex> Lock(LCTMMutex);
+ auto I = Notifiers.find(TrampolineAddr);
+ if (I != Notifiers.end()) {
+ NotifyResolved = std::move(I->second);
+ Notifiers.erase(I);
+ }
+ }
+
+ return NotifyResolved ? NotifyResolved(ResolvedAddr) : Error::success();
+}
+
+void LazyCallThroughManager::resolveTrampolineLandingAddress(
+ ExecutorAddr TrampolineAddr,
+ NotifyLandingResolvedFunction NotifyLandingResolved) {
+
+ auto Entry = findReexport(TrampolineAddr);
+ if (!Entry)
+ return NotifyLandingResolved(reportCallThroughError(Entry.takeError()));
+
+ // Declaring SLS and the callback outside of the call to ES.lookup is a
+ // workaround to fix build failures on AIX and on z/OS platforms.
+ SymbolLookupSet SLS({Entry->SymbolName});
+ auto Callback = [this, TrampolineAddr, SymbolName = Entry->SymbolName,
+ NotifyLandingResolved = std::move(NotifyLandingResolved)](
+ Expected<SymbolMap> Result) mutable {
+ if (Result) {
+ assert(Result->size() == 1 && "Unexpected result size");
+ assert(Result->count(SymbolName) && "Unexpected result value");
+ ExecutorAddr LandingAddr = (*Result)[SymbolName].getAddress();
+
+ if (auto Err = notifyResolved(TrampolineAddr, LandingAddr))
+ NotifyLandingResolved(reportCallThroughError(std::move(Err)));
+ else
+ NotifyLandingResolved(LandingAddr);
+ } else {
+ NotifyLandingResolved(reportCallThroughError(Result.takeError()));
+ }
+ };
+
+ ES.lookup(LookupKind::Static,
+ makeJITDylibSearchOrder(Entry->SourceJD,
+ JITDylibLookupFlags::MatchAllSymbols),
+ std::move(SLS), SymbolState::Ready, std::move(Callback),
+ NoDependenciesToRegister);
+}
+
+Expected<std::unique_ptr<LazyCallThroughManager>>
+createLocalLazyCallThroughManager(const Triple &T, ExecutionSession &ES,
+ ExecutorAddr ErrorHandlerAddr) {
+ switch (T.getArch()) {
+ default:
+ return make_error<StringError>(
+ std::string("No callback manager available for ") + T.str(),
+ inconvertibleErrorCode());
+
+ case Triple::aarch64:
+ case Triple::aarch64_32:
+ return LocalLazyCallThroughManager::Create<OrcAArch64>(ES,
+ ErrorHandlerAddr);
+
+ case Triple::x86:
+ return LocalLazyCallThroughManager::Create<OrcI386>(ES, ErrorHandlerAddr);
+
+ case Triple::loongarch64:
+ return LocalLazyCallThroughManager::Create<OrcLoongArch64>(
+ ES, ErrorHandlerAddr);
+
+ case Triple::mips:
+ return LocalLazyCallThroughManager::Create<OrcMips32Be>(ES,
+ ErrorHandlerAddr);
+
+ case Triple::mipsel:
+ return LocalLazyCallThroughManager::Create<OrcMips32Le>(ES,
+ ErrorHandlerAddr);
+
+ case Triple::mips64:
+ case Triple::mips64el:
+ return LocalLazyCallThroughManager::Create<OrcMips64>(ES, ErrorHandlerAddr);
+
+ case Triple::riscv64:
+ return LocalLazyCallThroughManager::Create<OrcRiscv64>(ES,
+ ErrorHandlerAddr);
+
+ case Triple::x86_64:
+ if (T.getOS() == Triple::OSType::Win32)
+ return LocalLazyCallThroughManager::Create<OrcX86_64_Win32>(
+ ES, ErrorHandlerAddr);
+ else
+ return LocalLazyCallThroughManager::Create<OrcX86_64_SysV>(
+ ES, ErrorHandlerAddr);
+ }
+}
+
+LazyReexportsMaterializationUnit::LazyReexportsMaterializationUnit(
+ LazyCallThroughManager &LCTManager, IndirectStubsManager &ISManager,
+ JITDylib &SourceJD, SymbolAliasMap CallableAliases, ImplSymbolMap *SrcJDLoc)
+ : MaterializationUnit(extractFlags(CallableAliases)),
+ LCTManager(LCTManager), ISManager(ISManager), SourceJD(SourceJD),
+ CallableAliases(std::move(CallableAliases)), AliaseeTable(SrcJDLoc) {}
+
+StringRef LazyReexportsMaterializationUnit::getName() const {
+ return "<Lazy Reexports>";
+}
+
+void LazyReexportsMaterializationUnit::materialize(
+ std::unique_ptr<MaterializationResponsibility> R) {
+ auto RequestedSymbols = R->getRequestedSymbols();
+
+ SymbolAliasMap RequestedAliases;
+ for (auto &RequestedSymbol : RequestedSymbols) {
+ auto I = CallableAliases.find(RequestedSymbol);
+ assert(I != CallableAliases.end() && "Symbol not found in alias map?");
+ RequestedAliases[I->first] = std::move(I->second);
+ CallableAliases.erase(I);
+ }
+
+ if (!CallableAliases.empty())
+ if (auto Err = R->replace(lazyReexports(LCTManager, ISManager, SourceJD,
+ std::move(CallableAliases),
+ AliaseeTable))) {
+ R->getExecutionSession().reportError(std::move(Err));
+ R->failMaterialization();
+ return;
+ }
+
+ IndirectStubsManager::StubInitsMap StubInits;
+ for (auto &Alias : RequestedAliases) {
+
+ auto CallThroughTrampoline = LCTManager.getCallThroughTrampoline(
+ SourceJD, Alias.second.Aliasee,
+ [&ISManager = this->ISManager,
+ StubSym = Alias.first](ExecutorAddr ResolvedAddr) -> Error {
+ return ISManager.updatePointer(*StubSym, ResolvedAddr);
+ });
+
+ if (!CallThroughTrampoline) {
+ SourceJD.getExecutionSession().reportError(
+ CallThroughTrampoline.takeError());
+ R->failMaterialization();
+ return;
+ }
+
+ StubInits[*Alias.first] =
+ std::make_pair(*CallThroughTrampoline, Alias.second.AliasFlags);
+ }
+
+ if (AliaseeTable != nullptr && !RequestedAliases.empty())
+ AliaseeTable->trackImpls(RequestedAliases, &SourceJD);
+
+ if (auto Err = ISManager.createStubs(StubInits)) {
+ SourceJD.getExecutionSession().reportError(std::move(Err));
+ R->failMaterialization();
+ return;
+ }
+
+ SymbolMap Stubs;
+ for (auto &Alias : RequestedAliases)
+ Stubs[Alias.first] = ISManager.findStub(*Alias.first, false);
+
+ // No registered dependencies, so these calls cannot fail.
+ cantFail(R->notifyResolved(Stubs));
+ cantFail(R->notifyEmitted({}));
+}
+
+void LazyReexportsMaterializationUnit::discard(const JITDylib &JD,
+ const SymbolStringPtr &Name) {
+ assert(CallableAliases.count(Name) &&
+ "Symbol not covered by this MaterializationUnit");
+ CallableAliases.erase(Name);
+}
+
+MaterializationUnit::Interface
+LazyReexportsMaterializationUnit::extractFlags(const SymbolAliasMap &Aliases) {
+ SymbolFlagsMap SymbolFlags;
+ for (auto &KV : Aliases) {
+ assert(KV.second.AliasFlags.isCallable() &&
+ "Lazy re-exports must be callable symbols");
+ SymbolFlags[KV.first] = KV.second.AliasFlags;
+ }
+ return MaterializationUnit::Interface(std::move(SymbolFlags), nullptr);
+}
+
+} // End namespace orc.
+} // End namespace llvm.
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/LookupAndRecordAddrs.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/LookupAndRecordAddrs.cpp
new file mode 100644
index 000000000000..a369e1b53382
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/LookupAndRecordAddrs.cpp
@@ -0,0 +1,82 @@
+//===------- LookupAndRecordAddrs.h - Symbol lookup support utility -------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/LookupAndRecordAddrs.h"
+
+#include <future>
+
+namespace llvm {
+namespace orc {
+
+void lookupAndRecordAddrs(
+ unique_function<void(Error)> OnRecorded, ExecutionSession &ES, LookupKind K,
+ const JITDylibSearchOrder &SearchOrder,
+ std::vector<std::pair<SymbolStringPtr, ExecutorAddr *>> Pairs,
+ SymbolLookupFlags LookupFlags) {
+
+ SymbolLookupSet Symbols;
+ for (auto &KV : Pairs)
+ Symbols.add(KV.first, LookupFlags);
+
+ ES.lookup(
+ K, SearchOrder, std::move(Symbols), SymbolState::Ready,
+ [Pairs = std::move(Pairs),
+ OnRec = std::move(OnRecorded)](Expected<SymbolMap> Result) mutable {
+ if (!Result)
+ return OnRec(Result.takeError());
+ for (auto &KV : Pairs) {
+ auto I = Result->find(KV.first);
+ *KV.second =
+ I != Result->end() ? I->second.getAddress() : orc::ExecutorAddr();
+ }
+ OnRec(Error::success());
+ },
+ NoDependenciesToRegister);
+}
+
+Error lookupAndRecordAddrs(
+ ExecutionSession &ES, LookupKind K, const JITDylibSearchOrder &SearchOrder,
+ std::vector<std::pair<SymbolStringPtr, ExecutorAddr *>> Pairs,
+ SymbolLookupFlags LookupFlags) {
+
+ std::promise<MSVCPError> ResultP;
+ auto ResultF = ResultP.get_future();
+ lookupAndRecordAddrs([&](Error Err) { ResultP.set_value(std::move(Err)); },
+ ES, K, SearchOrder, std::move(Pairs), LookupFlags);
+ return ResultF.get();
+}
+
+Error lookupAndRecordAddrs(
+ ExecutorProcessControl &EPC, tpctypes::DylibHandle H,
+ std::vector<std::pair<SymbolStringPtr, ExecutorAddr *>> Pairs,
+ SymbolLookupFlags LookupFlags) {
+
+ SymbolLookupSet Symbols;
+ for (auto &KV : Pairs)
+ Symbols.add(KV.first, LookupFlags);
+
+ ExecutorProcessControl::LookupRequest LR(H, Symbols);
+ auto Result = EPC.lookupSymbols(LR);
+ if (!Result)
+ return Result.takeError();
+
+ if (Result->size() != 1)
+ return make_error<StringError>("Error in lookup result",
+ inconvertibleErrorCode());
+ if (Result->front().size() != Pairs.size())
+ return make_error<StringError>("Error in lookup result elements",
+ inconvertibleErrorCode());
+
+ for (unsigned I = 0; I != Pairs.size(); ++I)
+ *Pairs[I].second = Result->front()[I].getAddress();
+
+ return Error::success();
+}
+
+} // End namespace orc.
+} // End namespace llvm.
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/MachOPlatform.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/MachOPlatform.cpp
new file mode 100644
index 000000000000..0d117f7cf873
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/MachOPlatform.cpp
@@ -0,0 +1,1834 @@
+//===------ MachOPlatform.cpp - Utilities for executing MachO in Orc ------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/MachOPlatform.h"
+
+#include "llvm/BinaryFormat/MachO.h"
+#include "llvm/ExecutionEngine/JITLink/MachO.h"
+#include "llvm/ExecutionEngine/JITLink/aarch64.h"
+#include "llvm/ExecutionEngine/JITLink/x86_64.h"
+#include "llvm/ExecutionEngine/Orc/DebugUtils.h"
+#include "llvm/ExecutionEngine/Orc/ExecutionUtils.h"
+#include "llvm/ExecutionEngine/Orc/LookupAndRecordAddrs.h"
+#include "llvm/ExecutionEngine/Orc/MachOBuilder.h"
+#include "llvm/ExecutionEngine/Orc/Shared/ObjectFormats.h"
+#include "llvm/Support/BinaryByteStream.h"
+#include "llvm/Support/Debug.h"
+#include <optional>
+
+#define DEBUG_TYPE "orc"
+
+using namespace llvm;
+using namespace llvm::orc;
+using namespace llvm::orc::shared;
+
+namespace llvm {
+namespace orc {
+namespace shared {
+
+using SPSMachOJITDylibDepInfo = SPSTuple<bool, SPSSequence<SPSExecutorAddr>>;
+using SPSMachOJITDylibDepInfoMap =
+ SPSSequence<SPSTuple<SPSExecutorAddr, SPSMachOJITDylibDepInfo>>;
+
+class SPSMachOExecutorSymbolFlags;
+
+template <>
+class SPSSerializationTraits<SPSMachOJITDylibDepInfo,
+ MachOPlatform::MachOJITDylibDepInfo> {
+public:
+ static size_t size(const MachOPlatform::MachOJITDylibDepInfo &DDI) {
+ return SPSMachOJITDylibDepInfo::AsArgList::size(DDI.Sealed, DDI.DepHeaders);
+ }
+
+ static bool serialize(SPSOutputBuffer &OB,
+ const MachOPlatform::MachOJITDylibDepInfo &DDI) {
+ return SPSMachOJITDylibDepInfo::AsArgList::serialize(OB, DDI.Sealed,
+ DDI.DepHeaders);
+ }
+
+ static bool deserialize(SPSInputBuffer &IB,
+ MachOPlatform::MachOJITDylibDepInfo &DDI) {
+ return SPSMachOJITDylibDepInfo::AsArgList::deserialize(IB, DDI.Sealed,
+ DDI.DepHeaders);
+ }
+};
+
+template <>
+class SPSSerializationTraits<SPSMachOExecutorSymbolFlags,
+ MachOPlatform::MachOExecutorSymbolFlags> {
+private:
+ using UT = std::underlying_type_t<MachOPlatform::MachOExecutorSymbolFlags>;
+
+public:
+ static size_t size(const MachOPlatform::MachOExecutorSymbolFlags &SF) {
+ return sizeof(UT);
+ }
+
+ static bool serialize(SPSOutputBuffer &OB,
+ const MachOPlatform::MachOExecutorSymbolFlags &SF) {
+ return SPSArgList<UT>::serialize(OB, static_cast<UT>(SF));
+ }
+
+ static bool deserialize(SPSInputBuffer &IB,
+ MachOPlatform::MachOExecutorSymbolFlags &SF) {
+ UT Tmp;
+ if (!SPSArgList<UT>::deserialize(IB, Tmp))
+ return false;
+ SF = static_cast<MachOPlatform::MachOExecutorSymbolFlags>(Tmp);
+ return true;
+ }
+};
+
+} // namespace shared
+} // namespace orc
+} // namespace llvm
+
+namespace {
+
+using SPSRegisterSymbolsArgs =
+ SPSArgList<SPSExecutorAddr,
+ SPSSequence<SPSTuple<SPSExecutorAddr, SPSExecutorAddr,
+ SPSMachOExecutorSymbolFlags>>>;
+
+std::unique_ptr<jitlink::LinkGraph> createPlatformGraph(MachOPlatform &MOP,
+ std::string Name) {
+ unsigned PointerSize;
+ llvm::endianness Endianness;
+ const auto &TT = MOP.getExecutionSession().getTargetTriple();
+
+ switch (TT.getArch()) {
+ case Triple::aarch64:
+ case Triple::x86_64:
+ PointerSize = 8;
+ Endianness = llvm::endianness::little;
+ break;
+ default:
+ llvm_unreachable("Unrecognized architecture");
+ }
+
+ return std::make_unique<jitlink::LinkGraph>(std::move(Name), TT, PointerSize,
+ Endianness,
+ jitlink::getGenericEdgeKindName);
+}
+
+// Creates a Bootstrap-Complete LinkGraph to run deferred actions.
+class MachOPlatformCompleteBootstrapMaterializationUnit
+ : public MaterializationUnit {
+public:
+ using SymbolTableVector =
+ SmallVector<std::tuple<ExecutorAddr, ExecutorAddr,
+ MachOPlatform::MachOExecutorSymbolFlags>>;
+
+ MachOPlatformCompleteBootstrapMaterializationUnit(
+ MachOPlatform &MOP, StringRef PlatformJDName,
+ SymbolStringPtr CompleteBootstrapSymbol, SymbolTableVector SymTab,
+ shared::AllocActions DeferredAAs, ExecutorAddr MachOHeaderAddr,
+ ExecutorAddr PlatformBootstrap, ExecutorAddr PlatformShutdown,
+ ExecutorAddr RegisterJITDylib, ExecutorAddr DeregisterJITDylib,
+ ExecutorAddr RegisterObjectSymbolTable,
+ ExecutorAddr DeregisterObjectSymbolTable)
+ : MaterializationUnit(
+ {{{CompleteBootstrapSymbol, JITSymbolFlags::None}}, nullptr}),
+ MOP(MOP), PlatformJDName(PlatformJDName),
+ CompleteBootstrapSymbol(std::move(CompleteBootstrapSymbol)),
+ SymTab(std::move(SymTab)), DeferredAAs(std::move(DeferredAAs)),
+ MachOHeaderAddr(MachOHeaderAddr), PlatformBootstrap(PlatformBootstrap),
+ PlatformShutdown(PlatformShutdown), RegisterJITDylib(RegisterJITDylib),
+ DeregisterJITDylib(DeregisterJITDylib),
+ RegisterObjectSymbolTable(RegisterObjectSymbolTable),
+ DeregisterObjectSymbolTable(DeregisterObjectSymbolTable) {}
+
+ StringRef getName() const override {
+ return "MachOPlatformCompleteBootstrap";
+ }
+
+ void materialize(std::unique_ptr<MaterializationResponsibility> R) override {
+ using namespace jitlink;
+ auto G = createPlatformGraph(MOP, "<OrcRTCompleteBootstrap>");
+ auto &PlaceholderSection =
+ G->createSection("__orc_rt_cplt_bs", MemProt::Read);
+ auto &PlaceholderBlock =
+ G->createZeroFillBlock(PlaceholderSection, 1, ExecutorAddr(), 1, 0);
+ G->addDefinedSymbol(PlaceholderBlock, 0, *CompleteBootstrapSymbol, 1,
+ Linkage::Strong, Scope::Hidden, false, true);
+
+ // Reserve space for the stolen actions, plus two extras.
+ G->allocActions().reserve(DeferredAAs.size() + 3);
+
+ // 1. Bootstrap the platform support code.
+ G->allocActions().push_back(
+ {cantFail(WrapperFunctionCall::Create<SPSArgList<>>(PlatformBootstrap)),
+ cantFail(
+ WrapperFunctionCall::Create<SPSArgList<>>(PlatformShutdown))});
+
+ // 2. Register the platform JITDylib.
+ G->allocActions().push_back(
+ {cantFail(WrapperFunctionCall::Create<
+ SPSArgList<SPSString, SPSExecutorAddr>>(
+ RegisterJITDylib, PlatformJDName, MachOHeaderAddr)),
+ cantFail(WrapperFunctionCall::Create<SPSArgList<SPSExecutorAddr>>(
+ DeregisterJITDylib, MachOHeaderAddr))});
+
+ // 3. Register deferred symbols.
+ G->allocActions().push_back(
+ {cantFail(WrapperFunctionCall::Create<SPSRegisterSymbolsArgs>(
+ RegisterObjectSymbolTable, MachOHeaderAddr, SymTab)),
+ cantFail(WrapperFunctionCall::Create<SPSRegisterSymbolsArgs>(
+ DeregisterObjectSymbolTable, MachOHeaderAddr, SymTab))});
+
+ // 4. Add the deferred actions to the graph.
+ std::move(DeferredAAs.begin(), DeferredAAs.end(),
+ std::back_inserter(G->allocActions()));
+
+ MOP.getObjectLinkingLayer().emit(std::move(R), std::move(G));
+ }
+
+ void discard(const JITDylib &JD, const SymbolStringPtr &Sym) override {}
+
+private:
+ MachOPlatform &MOP;
+ StringRef PlatformJDName;
+ SymbolStringPtr CompleteBootstrapSymbol;
+ SymbolTableVector SymTab;
+ shared::AllocActions DeferredAAs;
+ ExecutorAddr MachOHeaderAddr;
+ ExecutorAddr PlatformBootstrap;
+ ExecutorAddr PlatformShutdown;
+ ExecutorAddr RegisterJITDylib;
+ ExecutorAddr DeregisterJITDylib;
+ ExecutorAddr RegisterObjectSymbolTable;
+ ExecutorAddr DeregisterObjectSymbolTable;
+};
+
+static StringRef ObjCRuntimeObjectSectionsData[] = {
+ MachOObjCCatListSectionName, MachOObjCCatList2SectionName,
+ MachOObjCClassListSectionName, MachOObjCClassRefsSectionName,
+ MachOObjCConstSectionName, MachOObjCDataSectionName,
+ MachOObjCProtoListSectionName, MachOObjCProtoRefsSectionName,
+ MachOObjCNLCatListSectionName, MachOObjCNLClassListSectionName,
+ MachOObjCSelRefsSectionName};
+
+static StringRef ObjCRuntimeObjectSectionsText[] = {
+ MachOObjCClassNameSectionName, MachOObjCMethNameSectionName,
+ MachOObjCMethTypeSectionName, MachOSwift5TypesSectionName,
+ MachOSwift5TypeRefSectionName, MachOSwift5FieldMetadataSectionName,
+ MachOSwift5EntrySectionName, MachOSwift5ProtoSectionName,
+ MachOSwift5ProtosSectionName};
+
+static StringRef ObjCRuntimeObjectSectionName =
+ "__llvm_jitlink_ObjCRuntimeRegistrationObject";
+
+static StringRef ObjCImageInfoSymbolName =
+ "__llvm_jitlink_macho_objc_imageinfo";
+
+struct ObjCImageInfoFlags {
+ uint16_t SwiftABIVersion;
+ uint16_t SwiftVersion;
+ bool HasCategoryClassProperties;
+ bool HasSignedObjCClassROs;
+
+ static constexpr uint32_t SIGNED_CLASS_RO = (1 << 4);
+ static constexpr uint32_t HAS_CATEGORY_CLASS_PROPERTIES = (1 << 6);
+
+ explicit ObjCImageInfoFlags(uint32_t RawFlags) {
+ HasSignedObjCClassROs = RawFlags & SIGNED_CLASS_RO;
+ HasCategoryClassProperties = RawFlags & HAS_CATEGORY_CLASS_PROPERTIES;
+ SwiftABIVersion = (RawFlags >> 8) & 0xFF;
+ SwiftVersion = (RawFlags >> 16) & 0xFFFF;
+ }
+
+ uint32_t rawFlags() const {
+ uint32_t Result = 0;
+ if (HasCategoryClassProperties)
+ Result |= HAS_CATEGORY_CLASS_PROPERTIES;
+ if (HasSignedObjCClassROs)
+ Result |= SIGNED_CLASS_RO;
+ Result |= (SwiftABIVersion << 8);
+ Result |= (SwiftVersion << 16);
+ return Result;
+ }
+};
+} // end anonymous namespace
+
+namespace llvm {
+namespace orc {
+
+std::optional<MachOPlatform::HeaderOptions::BuildVersionOpts>
+MachOPlatform::HeaderOptions::BuildVersionOpts::fromTriple(const Triple &TT,
+ uint32_t MinOS,
+ uint32_t SDK) {
+
+ uint32_t Platform;
+ switch (TT.getOS()) {
+ case Triple::IOS:
+ Platform = TT.isSimulatorEnvironment() ? MachO::PLATFORM_IOSSIMULATOR
+ : MachO::PLATFORM_IOS;
+ break;
+ case Triple::MacOSX:
+ Platform = MachO::PLATFORM_MACOS;
+ break;
+ case Triple::TvOS:
+ Platform = TT.isSimulatorEnvironment() ? MachO::PLATFORM_TVOSSIMULATOR
+ : MachO::PLATFORM_TVOS;
+ break;
+ case Triple::WatchOS:
+ Platform = TT.isSimulatorEnvironment() ? MachO::PLATFORM_WATCHOSSIMULATOR
+ : MachO::PLATFORM_WATCHOS;
+ break;
+ case Triple::XROS:
+ Platform = TT.isSimulatorEnvironment() ? MachO::PLATFORM_XROS_SIMULATOR
+ : MachO::PLATFORM_XROS;
+ break;
+ default:
+ return std::nullopt;
+ }
+
+ return MachOPlatform::HeaderOptions::BuildVersionOpts{Platform, MinOS, SDK};
+}
+
+Expected<std::unique_ptr<MachOPlatform>> MachOPlatform::Create(
+ ExecutionSession &ES, ObjectLinkingLayer &ObjLinkingLayer,
+ JITDylib &PlatformJD, std::unique_ptr<DefinitionGenerator> OrcRuntime,
+ HeaderOptions PlatformJDOpts, MachOHeaderMUBuilder BuildMachOHeaderMU,
+ std::optional<SymbolAliasMap> RuntimeAliases) {
+
+ // If the target is not supported then bail out immediately.
+ if (!supportedTarget(ES.getTargetTriple()))
+ return make_error<StringError>("Unsupported MachOPlatform triple: " +
+ ES.getTargetTriple().str(),
+ inconvertibleErrorCode());
+
+ auto &EPC = ES.getExecutorProcessControl();
+
+ // Create default aliases if the caller didn't supply any.
+ if (!RuntimeAliases)
+ RuntimeAliases = standardPlatformAliases(ES);
+
+ // Define the aliases.
+ if (auto Err = PlatformJD.define(symbolAliases(std::move(*RuntimeAliases))))
+ return std::move(Err);
+
+ // Add JIT-dispatch function support symbols.
+ if (auto Err = PlatformJD.define(
+ absoluteSymbols({{ES.intern("___orc_rt_jit_dispatch"),
+ {EPC.getJITDispatchInfo().JITDispatchFunction,
+ JITSymbolFlags::Exported}},
+ {ES.intern("___orc_rt_jit_dispatch_ctx"),
+ {EPC.getJITDispatchInfo().JITDispatchContext,
+ JITSymbolFlags::Exported}}})))
+ return std::move(Err);
+
+ // Create the instance.
+ Error Err = Error::success();
+ auto P = std::unique_ptr<MachOPlatform>(new MachOPlatform(
+ ES, ObjLinkingLayer, PlatformJD, std::move(OrcRuntime),
+ std::move(PlatformJDOpts), std::move(BuildMachOHeaderMU), Err));
+ if (Err)
+ return std::move(Err);
+ return std::move(P);
+}
+
+Expected<std::unique_ptr<MachOPlatform>>
+MachOPlatform::Create(ExecutionSession &ES, ObjectLinkingLayer &ObjLinkingLayer,
+ JITDylib &PlatformJD, const char *OrcRuntimePath,
+ HeaderOptions PlatformJDOpts,
+ MachOHeaderMUBuilder BuildMachOHeaderMU,
+ std::optional<SymbolAliasMap> RuntimeAliases) {
+
+ // Create a generator for the ORC runtime archive.
+ auto OrcRuntimeArchiveGenerator =
+ StaticLibraryDefinitionGenerator::Load(ObjLinkingLayer, OrcRuntimePath);
+ if (!OrcRuntimeArchiveGenerator)
+ return OrcRuntimeArchiveGenerator.takeError();
+
+ return Create(ES, ObjLinkingLayer, PlatformJD,
+ std::move(*OrcRuntimeArchiveGenerator),
+ std::move(PlatformJDOpts), std::move(BuildMachOHeaderMU),
+ std::move(RuntimeAliases));
+}
+
+Error MachOPlatform::setupJITDylib(JITDylib &JD) {
+ return setupJITDylib(JD, /*Opts=*/{});
+}
+
+Error MachOPlatform::setupJITDylib(JITDylib &JD, HeaderOptions Opts) {
+ if (auto Err = JD.define(BuildMachOHeaderMU(*this, std::move(Opts))))
+ return Err;
+
+ return ES.lookup({&JD}, MachOHeaderStartSymbol).takeError();
+}
+
+Error MachOPlatform::teardownJITDylib(JITDylib &JD) {
+ std::lock_guard<std::mutex> Lock(PlatformMutex);
+ auto I = JITDylibToHeaderAddr.find(&JD);
+ if (I != JITDylibToHeaderAddr.end()) {
+ assert(HeaderAddrToJITDylib.count(I->second) &&
+ "HeaderAddrToJITDylib missing entry");
+ HeaderAddrToJITDylib.erase(I->second);
+ JITDylibToHeaderAddr.erase(I);
+ }
+ JITDylibToPThreadKey.erase(&JD);
+ return Error::success();
+}
+
+Error MachOPlatform::notifyAdding(ResourceTracker &RT,
+ const MaterializationUnit &MU) {
+ auto &JD = RT.getJITDylib();
+ const auto &InitSym = MU.getInitializerSymbol();
+ if (!InitSym)
+ return Error::success();
+
+ RegisteredInitSymbols[&JD].add(InitSym,
+ SymbolLookupFlags::WeaklyReferencedSymbol);
+ LLVM_DEBUG({
+ dbgs() << "MachOPlatform: Registered init symbol " << *InitSym << " for MU "
+ << MU.getName() << "\n";
+ });
+ return Error::success();
+}
+
+Error MachOPlatform::notifyRemoving(ResourceTracker &RT) {
+ llvm_unreachable("Not supported yet");
+}
+
+static void addAliases(ExecutionSession &ES, SymbolAliasMap &Aliases,
+ ArrayRef<std::pair<const char *, const char *>> AL) {
+ for (auto &KV : AL) {
+ auto AliasName = ES.intern(KV.first);
+ assert(!Aliases.count(AliasName) && "Duplicate symbol name in alias map");
+ Aliases[std::move(AliasName)] = {ES.intern(KV.second),
+ JITSymbolFlags::Exported};
+ }
+}
+
+SymbolAliasMap MachOPlatform::standardPlatformAliases(ExecutionSession &ES) {
+ SymbolAliasMap Aliases;
+ addAliases(ES, Aliases, requiredCXXAliases());
+ addAliases(ES, Aliases, standardRuntimeUtilityAliases());
+ return Aliases;
+}
+
+ArrayRef<std::pair<const char *, const char *>>
+MachOPlatform::requiredCXXAliases() {
+ static const std::pair<const char *, const char *> RequiredCXXAliases[] = {
+ {"___cxa_atexit", "___orc_rt_macho_cxa_atexit"}};
+
+ return ArrayRef<std::pair<const char *, const char *>>(RequiredCXXAliases);
+}
+
+ArrayRef<std::pair<const char *, const char *>>
+MachOPlatform::standardRuntimeUtilityAliases() {
+ static const std::pair<const char *, const char *>
+ StandardRuntimeUtilityAliases[] = {
+ {"___orc_rt_run_program", "___orc_rt_macho_run_program"},
+ {"___orc_rt_jit_dlerror", "___orc_rt_macho_jit_dlerror"},
+ {"___orc_rt_jit_dlopen", "___orc_rt_macho_jit_dlopen"},
+ {"___orc_rt_jit_dlclose", "___orc_rt_macho_jit_dlclose"},
+ {"___orc_rt_jit_dlsym", "___orc_rt_macho_jit_dlsym"},
+ {"___orc_rt_log_error", "___orc_rt_log_error_to_stderr"}};
+
+ return ArrayRef<std::pair<const char *, const char *>>(
+ StandardRuntimeUtilityAliases);
+}
+
+bool MachOPlatform::supportedTarget(const Triple &TT) {
+ switch (TT.getArch()) {
+ case Triple::aarch64:
+ case Triple::x86_64:
+ return true;
+ default:
+ return false;
+ }
+}
+
+jitlink::Edge::Kind MachOPlatform::getPointerEdgeKind(jitlink::LinkGraph &G) {
+ switch (G.getTargetTriple().getArch()) {
+ case Triple::aarch64:
+ return jitlink::aarch64::Pointer64;
+ case Triple::x86_64:
+ return jitlink::x86_64::Pointer64;
+ default:
+ llvm_unreachable("Unsupported architecture");
+ }
+}
+
+MachOPlatform::MachOExecutorSymbolFlags
+MachOPlatform::flagsForSymbol(jitlink::Symbol &Sym) {
+ MachOPlatform::MachOExecutorSymbolFlags Flags{};
+ if (Sym.getLinkage() == jitlink::Linkage::Weak)
+ Flags |= MachOExecutorSymbolFlags::Weak;
+
+ if (Sym.isCallable())
+ Flags |= MachOExecutorSymbolFlags::Callable;
+
+ return Flags;
+}
+
+MachOPlatform::MachOPlatform(
+ ExecutionSession &ES, ObjectLinkingLayer &ObjLinkingLayer,
+ JITDylib &PlatformJD,
+ std::unique_ptr<DefinitionGenerator> OrcRuntimeGenerator,
+ HeaderOptions PlatformJDOpts, MachOHeaderMUBuilder BuildMachOHeaderMU,
+ Error &Err)
+ : ES(ES), PlatformJD(PlatformJD), ObjLinkingLayer(ObjLinkingLayer),
+ BuildMachOHeaderMU(std::move(BuildMachOHeaderMU)) {
+ ErrorAsOutParameter _(&Err);
+ ObjLinkingLayer.addPlugin(std::make_unique<MachOPlatformPlugin>(*this));
+ PlatformJD.addGenerator(std::move(OrcRuntimeGenerator));
+
+ BootstrapInfo BI;
+ Bootstrap = &BI;
+
+ // Bootstrap process -- here be phase-ordering dragons.
+ //
+ // The MachOPlatform class uses allocation actions to register metadata
+ // sections with the ORC runtime, however the runtime contains metadata
+ // registration functions that have their own metadata that they need to
+ // register (e.g. the frame-info registration functions have frame-info).
+ // We can't use an ordinary lookup to find these registration functions
+ // because their address is needed during the link of the containing graph
+ // itself (to build the allocation actions that will call the registration
+ // functions). Further complicating the situation (a) the graph containing
+ // the registration functions is allowed to depend on other graphs (e.g. the
+ // graph containing the ORC runtime RTTI support) so we need to handle an
+ // unknown set of dependencies during bootstrap, and (b) these graphs may
+ // be linked concurrently if the user has installed a concurrent dispatcher.
+ //
+ // We satisfy these constraints by implementing a bootstrap phase during which
+ // allocation actions generated by MachOPlatform are appended to a list of
+ // deferred allocation actions, rather than to the graphs themselves. At the
+ // end of the bootstrap process the deferred actions are attached to a final
+ // "complete-bootstrap" graph that causes them to be run.
+ //
+ // The bootstrap steps are as follows:
+ //
+ // 1. Request the graph containing the mach header. This graph is guaranteed
+ // not to have any metadata so the fact that the registration functions
+ // are not available yet is not a problem.
+ //
+ // 2. Look up the registration functions and discard the results. This will
+ // trigger linking of the graph containing these functions, and
+ // consequently any graphs that it depends on. We do not use the lookup
+ // result to find the addresses of the functions requested (as described
+ // above the lookup will return too late for that), instead we capture the
+ // addresses in a post-allocation pass injected by the platform runtime
+ // during bootstrap only.
+ //
+ // 3. During bootstrap the MachOPlatformPlugin keeps a count of the number of
+ // graphs being linked (potentially concurrently), and we block until all
+ // of these graphs have completed linking. This is to avoid a race on the
+ // deferred-actions vector: the lookup for the runtime registration
+ // functions may return while some functions (those that are being
+ // incidentally linked in, but aren't reachable via the runtime functions)
+ // are still being linked, and we need to capture any allocation actions
+ // for this incidental code before we proceed.
+ //
+ // 4. Once all active links are complete we transfer the deferred actions to
+ // a newly added CompleteBootstrap graph and then request a symbol from
+ // the CompleteBootstrap graph to trigger materialization. This will cause
+ // all deferred actions to be run, and once this lookup returns we can
+ // proceed.
+ //
+ // 5. Finally, we associate runtime support methods in MachOPlatform with
+ // the corresponding jit-dispatch tag variables in the ORC runtime to make
+ // the support methods callable. The bootstrap is now complete.
+
+ // Step (1) Add header materialization unit and request.
+ if ((Err = PlatformJD.define(
+ this->BuildMachOHeaderMU(*this, std::move(PlatformJDOpts)))))
+ return;
+ if ((Err = ES.lookup(&PlatformJD, MachOHeaderStartSymbol).takeError()))
+ return;
+
+ // Step (2) Request runtime registration functions to trigger
+ // materialization..
+ if ((Err = ES.lookup(makeJITDylibSearchOrder(&PlatformJD),
+ SymbolLookupSet(
+ {PlatformBootstrap.Name, PlatformShutdown.Name,
+ RegisterJITDylib.Name, DeregisterJITDylib.Name,
+ RegisterObjectSymbolTable.Name,
+ DeregisterObjectSymbolTable.Name,
+ RegisterObjectPlatformSections.Name,
+ DeregisterObjectPlatformSections.Name,
+ CreatePThreadKey.Name}))
+ .takeError()))
+ return;
+
+ // Step (3) Wait for any incidental linker work to complete.
+ {
+ std::unique_lock<std::mutex> Lock(BI.Mutex);
+ BI.CV.wait(Lock, [&]() { return BI.ActiveGraphs == 0; });
+ Bootstrap = nullptr;
+ }
+
+ // Step (4) Add complete-bootstrap materialization unit and request.
+ auto BootstrapCompleteSymbol = ES.intern("__orc_rt_macho_complete_bootstrap");
+ if ((Err = PlatformJD.define(
+ std::make_unique<MachOPlatformCompleteBootstrapMaterializationUnit>(
+ *this, PlatformJD.getName(), BootstrapCompleteSymbol,
+ std::move(BI.SymTab), std::move(BI.DeferredAAs),
+ BI.MachOHeaderAddr, PlatformBootstrap.Addr,
+ PlatformShutdown.Addr, RegisterJITDylib.Addr,
+ DeregisterJITDylib.Addr, RegisterObjectSymbolTable.Addr,
+ DeregisterObjectSymbolTable.Addr))))
+ return;
+ if ((Err = ES.lookup(makeJITDylibSearchOrder(
+ &PlatformJD, JITDylibLookupFlags::MatchAllSymbols),
+ std::move(BootstrapCompleteSymbol))
+ .takeError()))
+ return;
+
+ // (5) Associate runtime support functions.
+ if ((Err = associateRuntimeSupportFunctions()))
+ return;
+}
+
+Error MachOPlatform::associateRuntimeSupportFunctions() {
+ ExecutionSession::JITDispatchHandlerAssociationMap WFs;
+
+ using PushInitializersSPSSig =
+ SPSExpected<SPSMachOJITDylibDepInfoMap>(SPSExecutorAddr);
+ WFs[ES.intern("___orc_rt_macho_push_initializers_tag")] =
+ ES.wrapAsyncWithSPS<PushInitializersSPSSig>(
+ this, &MachOPlatform::rt_pushInitializers);
+
+ using PushSymbolsSPSSig =
+ SPSError(SPSExecutorAddr, SPSSequence<SPSTuple<SPSString, bool>>);
+ WFs[ES.intern("___orc_rt_macho_push_symbols_tag")] =
+ ES.wrapAsyncWithSPS<PushSymbolsSPSSig>(this,
+ &MachOPlatform::rt_pushSymbols);
+
+ return ES.registerJITDispatchHandlers(PlatformJD, std::move(WFs));
+}
+
+void MachOPlatform::pushInitializersLoop(
+ PushInitializersSendResultFn SendResult, JITDylibSP JD) {
+ DenseMap<JITDylib *, SymbolLookupSet> NewInitSymbols;
+ DenseMap<JITDylib *, SmallVector<JITDylib *>> JDDepMap;
+ SmallVector<JITDylib *, 16> Worklist({JD.get()});
+
+ ES.runSessionLocked([&]() {
+ while (!Worklist.empty()) {
+ // FIXME: Check for defunct dylibs.
+
+ auto DepJD = Worklist.back();
+ Worklist.pop_back();
+
+ // If we've already visited this JITDylib on this iteration then continue.
+ if (JDDepMap.count(DepJD))
+ continue;
+
+ // Add dep info.
+ auto &DM = JDDepMap[DepJD];
+ DepJD->withLinkOrderDo([&](const JITDylibSearchOrder &O) {
+ for (auto &KV : O) {
+ if (KV.first == DepJD)
+ continue;
+ DM.push_back(KV.first);
+ Worklist.push_back(KV.first);
+ }
+ });
+
+ // Add any registered init symbols.
+ auto RISItr = RegisteredInitSymbols.find(DepJD);
+ if (RISItr != RegisteredInitSymbols.end()) {
+ NewInitSymbols[DepJD] = std::move(RISItr->second);
+ RegisteredInitSymbols.erase(RISItr);
+ }
+ }
+ });
+
+ // If there are no further init symbols to look up then send the link order
+ // (as a list of header addresses) to the caller.
+ if (NewInitSymbols.empty()) {
+
+ // To make the list intelligible to the runtime we need to convert all
+ // JITDylib pointers to their header addresses. Only include JITDylibs
+ // that appear in the JITDylibToHeaderAddr map (i.e. those that have been
+ // through setupJITDylib) -- bare JITDylibs aren't managed by the platform.
+ DenseMap<JITDylib *, ExecutorAddr> HeaderAddrs;
+ HeaderAddrs.reserve(JDDepMap.size());
+ {
+ std::lock_guard<std::mutex> Lock(PlatformMutex);
+ for (auto &KV : JDDepMap) {
+ auto I = JITDylibToHeaderAddr.find(KV.first);
+ if (I != JITDylibToHeaderAddr.end())
+ HeaderAddrs[KV.first] = I->second;
+ }
+ }
+
+ // Build the dep info map to return.
+ MachOJITDylibDepInfoMap DIM;
+ DIM.reserve(JDDepMap.size());
+ for (auto &KV : JDDepMap) {
+ auto HI = HeaderAddrs.find(KV.first);
+ // Skip unmanaged JITDylibs.
+ if (HI == HeaderAddrs.end())
+ continue;
+ auto H = HI->second;
+ MachOJITDylibDepInfo DepInfo;
+ for (auto &Dep : KV.second) {
+ auto HJ = HeaderAddrs.find(Dep);
+ if (HJ != HeaderAddrs.end())
+ DepInfo.DepHeaders.push_back(HJ->second);
+ }
+ DIM.push_back(std::make_pair(H, std::move(DepInfo)));
+ }
+ SendResult(DIM);
+ return;
+ }
+
+ // Otherwise issue a lookup and re-run this phase when it completes.
+ lookupInitSymbolsAsync(
+ [this, SendResult = std::move(SendResult), JD](Error Err) mutable {
+ if (Err)
+ SendResult(std::move(Err));
+ else
+ pushInitializersLoop(std::move(SendResult), JD);
+ },
+ ES, std::move(NewInitSymbols));
+}
+
+void MachOPlatform::rt_pushInitializers(PushInitializersSendResultFn SendResult,
+ ExecutorAddr JDHeaderAddr) {
+ JITDylibSP JD;
+ {
+ std::lock_guard<std::mutex> Lock(PlatformMutex);
+ auto I = HeaderAddrToJITDylib.find(JDHeaderAddr);
+ if (I != HeaderAddrToJITDylib.end())
+ JD = I->second;
+ }
+
+ LLVM_DEBUG({
+ dbgs() << "MachOPlatform::rt_pushInitializers(" << JDHeaderAddr << ") ";
+ if (JD)
+ dbgs() << "pushing initializers for " << JD->getName() << "\n";
+ else
+ dbgs() << "No JITDylib for header address.\n";
+ });
+
+ if (!JD) {
+ SendResult(make_error<StringError>("No JITDylib with header addr " +
+ formatv("{0:x}", JDHeaderAddr),
+ inconvertibleErrorCode()));
+ return;
+ }
+
+ pushInitializersLoop(std::move(SendResult), JD);
+}
+
+void MachOPlatform::rt_pushSymbols(
+ PushSymbolsInSendResultFn SendResult, ExecutorAddr Handle,
+ const std::vector<std::pair<StringRef, bool>> &SymbolNames) {
+
+ JITDylib *JD = nullptr;
+
+ {
+ std::lock_guard<std::mutex> Lock(PlatformMutex);
+ auto I = HeaderAddrToJITDylib.find(Handle);
+ if (I != HeaderAddrToJITDylib.end())
+ JD = I->second;
+ }
+ LLVM_DEBUG({
+ dbgs() << "MachOPlatform::rt_pushSymbols(";
+ if (JD)
+ dbgs() << "\"" << JD->getName() << "\", [ ";
+ else
+ dbgs() << "<invalid handle " << Handle << ">, [ ";
+ for (auto &Name : SymbolNames)
+ dbgs() << "\"" << Name.first << "\" ";
+ dbgs() << "])\n";
+ });
+
+ if (!JD) {
+ SendResult(make_error<StringError>("No JITDylib associated with handle " +
+ formatv("{0:x}", Handle),
+ inconvertibleErrorCode()));
+ return;
+ }
+
+ SymbolLookupSet LS;
+ for (auto &[Name, Required] : SymbolNames)
+ LS.add(ES.intern(Name), Required
+ ? SymbolLookupFlags::RequiredSymbol
+ : SymbolLookupFlags::WeaklyReferencedSymbol);
+
+ ES.lookup(
+ LookupKind::DLSym, {{JD, JITDylibLookupFlags::MatchExportedSymbolsOnly}},
+ std::move(LS), SymbolState::Ready,
+ [SendResult = std::move(SendResult)](Expected<SymbolMap> Result) mutable {
+ SendResult(Result.takeError());
+ },
+ NoDependenciesToRegister);
+}
+
+Expected<uint64_t> MachOPlatform::createPThreadKey() {
+ if (!CreatePThreadKey.Addr)
+ return make_error<StringError>(
+ "Attempting to create pthread key in target, but runtime support has "
+ "not been loaded yet",
+ inconvertibleErrorCode());
+
+ Expected<uint64_t> Result(0);
+ if (auto Err = ES.callSPSWrapper<SPSExpected<uint64_t>(void)>(
+ CreatePThreadKey.Addr, Result))
+ return std::move(Err);
+ return Result;
+}
+
+void MachOPlatform::MachOPlatformPlugin::modifyPassConfig(
+ MaterializationResponsibility &MR, jitlink::LinkGraph &LG,
+ jitlink::PassConfiguration &Config) {
+
+ using namespace jitlink;
+
+ bool InBootstrapPhase =
+ &MR.getTargetJITDylib() == &MP.PlatformJD && MP.Bootstrap;
+
+ // If we're in the bootstrap phase then increment the active graphs.
+ if (InBootstrapPhase) {
+ Config.PrePrunePasses.push_back(
+ [this](LinkGraph &G) { return bootstrapPipelineStart(G); });
+ Config.PostAllocationPasses.push_back([this](LinkGraph &G) {
+ return bootstrapPipelineRecordRuntimeFunctions(G);
+ });
+ }
+
+ // --- Handle Initializers ---
+ if (auto InitSymbol = MR.getInitializerSymbol()) {
+
+ // If the initializer symbol is the MachOHeader start symbol then just
+ // register it and then bail out -- the header materialization unit
+ // definitely doesn't need any other passes.
+ if (InitSymbol == MP.MachOHeaderStartSymbol && !InBootstrapPhase) {
+ Config.PostAllocationPasses.push_back([this, &MR](LinkGraph &G) {
+ return associateJITDylibHeaderSymbol(G, MR);
+ });
+ return;
+ }
+
+ // If the object contains an init symbol other than the header start symbol
+ // then add passes to preserve, process and register the init
+ // sections/symbols.
+ Config.PrePrunePasses.push_back([this, &MR](LinkGraph &G) {
+ if (auto Err = preserveImportantSections(G, MR))
+ return Err;
+ return processObjCImageInfo(G, MR);
+ });
+ Config.PostPrunePasses.push_back(
+ [this](LinkGraph &G) { return createObjCRuntimeObject(G); });
+ Config.PostAllocationPasses.push_back(
+ [this, &MR](LinkGraph &G) { return populateObjCRuntimeObject(G, MR); });
+ }
+
+ // Insert TLV lowering at the start of the PostPrunePasses, since we want
+ // it to run before GOT/PLT lowering.
+ Config.PostPrunePasses.insert(
+ Config.PostPrunePasses.begin(),
+ [this, &JD = MR.getTargetJITDylib()](LinkGraph &G) {
+ return fixTLVSectionsAndEdges(G, JD);
+ });
+
+ // Add symbol table prepare and register passes: These will add strings for
+ // all symbols to the c-strings section, and build a symbol table registration
+ // call.
+ auto JITSymTabInfo = std::make_shared<JITSymTabVector>();
+ Config.PostPrunePasses.push_back([this, JITSymTabInfo](LinkGraph &G) {
+ return prepareSymbolTableRegistration(G, *JITSymTabInfo);
+ });
+ Config.PostFixupPasses.push_back([this, &MR, JITSymTabInfo,
+ InBootstrapPhase](LinkGraph &G) {
+ return addSymbolTableRegistration(G, MR, *JITSymTabInfo, InBootstrapPhase);
+ });
+
+ // Add a pass to register the final addresses of any special sections in the
+ // object with the runtime.
+ Config.PostAllocationPasses.push_back(
+ [this, &JD = MR.getTargetJITDylib(), InBootstrapPhase](LinkGraph &G) {
+ return registerObjectPlatformSections(G, JD, InBootstrapPhase);
+ });
+
+ // If we're in the bootstrap phase then steal allocation actions and then
+ // decrement the active graphs.
+ if (InBootstrapPhase)
+ Config.PostFixupPasses.push_back(
+ [this](LinkGraph &G) { return bootstrapPipelineEnd(G); });
+}
+
+ObjectLinkingLayer::Plugin::SyntheticSymbolDependenciesMap
+MachOPlatform::MachOPlatformPlugin::getSyntheticSymbolDependencies(
+ MaterializationResponsibility &MR) {
+ std::lock_guard<std::mutex> Lock(PluginMutex);
+ auto I = InitSymbolDeps.find(&MR);
+ if (I != InitSymbolDeps.end()) {
+ SyntheticSymbolDependenciesMap Result;
+ Result[MR.getInitializerSymbol()] = std::move(I->second);
+ InitSymbolDeps.erase(&MR);
+ return Result;
+ }
+ return SyntheticSymbolDependenciesMap();
+}
+
+Error MachOPlatform::MachOPlatformPlugin::bootstrapPipelineStart(
+ jitlink::LinkGraph &G) {
+ // Increment the active graphs count in BootstrapInfo.
+ std::lock_guard<std::mutex> Lock(MP.Bootstrap.load()->Mutex);
+ ++MP.Bootstrap.load()->ActiveGraphs;
+ return Error::success();
+}
+
+Error MachOPlatform::MachOPlatformPlugin::
+ bootstrapPipelineRecordRuntimeFunctions(jitlink::LinkGraph &G) {
+ // Record bootstrap function names.
+ std::pair<StringRef, ExecutorAddr *> RuntimeSymbols[] = {
+ {*MP.MachOHeaderStartSymbol, &MP.Bootstrap.load()->MachOHeaderAddr},
+ {*MP.PlatformBootstrap.Name, &MP.PlatformBootstrap.Addr},
+ {*MP.PlatformShutdown.Name, &MP.PlatformShutdown.Addr},
+ {*MP.RegisterJITDylib.Name, &MP.RegisterJITDylib.Addr},
+ {*MP.DeregisterJITDylib.Name, &MP.DeregisterJITDylib.Addr},
+ {*MP.RegisterObjectSymbolTable.Name, &MP.RegisterObjectSymbolTable.Addr},
+ {*MP.DeregisterObjectSymbolTable.Name,
+ &MP.DeregisterObjectSymbolTable.Addr},
+ {*MP.RegisterObjectPlatformSections.Name,
+ &MP.RegisterObjectPlatformSections.Addr},
+ {*MP.DeregisterObjectPlatformSections.Name,
+ &MP.DeregisterObjectPlatformSections.Addr},
+ {*MP.CreatePThreadKey.Name, &MP.CreatePThreadKey.Addr},
+ {*MP.RegisterObjCRuntimeObject.Name, &MP.RegisterObjCRuntimeObject.Addr},
+ {*MP.DeregisterObjCRuntimeObject.Name,
+ &MP.DeregisterObjCRuntimeObject.Addr}};
+
+ bool RegisterMachOHeader = false;
+
+ for (auto *Sym : G.defined_symbols()) {
+ for (auto &RTSym : RuntimeSymbols) {
+ if (Sym->hasName() && Sym->getName() == RTSym.first) {
+ if (*RTSym.second)
+ return make_error<StringError>(
+ "Duplicate " + RTSym.first +
+ " detected during MachOPlatform bootstrap",
+ inconvertibleErrorCode());
+
+ if (Sym->getName() == *MP.MachOHeaderStartSymbol)
+ RegisterMachOHeader = true;
+
+ *RTSym.second = Sym->getAddress();
+ }
+ }
+ }
+
+ if (RegisterMachOHeader) {
+ // If this graph defines the macho header symbol then create the internal
+ // mapping between it and PlatformJD.
+ std::lock_guard<std::mutex> Lock(MP.PlatformMutex);
+ MP.JITDylibToHeaderAddr[&MP.PlatformJD] =
+ MP.Bootstrap.load()->MachOHeaderAddr;
+ MP.HeaderAddrToJITDylib[MP.Bootstrap.load()->MachOHeaderAddr] =
+ &MP.PlatformJD;
+ }
+
+ return Error::success();
+}
+
+Error MachOPlatform::MachOPlatformPlugin::bootstrapPipelineEnd(
+ jitlink::LinkGraph &G) {
+ std::lock_guard<std::mutex> Lock(MP.Bootstrap.load()->Mutex);
+ assert(MP.Bootstrap && "DeferredAAs reset before bootstrap completed");
+ --MP.Bootstrap.load()->ActiveGraphs;
+ // Notify Bootstrap->CV while holding the mutex because the mutex is
+ // also keeping Bootstrap->CV alive.
+ if (MP.Bootstrap.load()->ActiveGraphs == 0)
+ MP.Bootstrap.load()->CV.notify_all();
+ return Error::success();
+}
+
+Error MachOPlatform::MachOPlatformPlugin::associateJITDylibHeaderSymbol(
+ jitlink::LinkGraph &G, MaterializationResponsibility &MR) {
+ auto I = llvm::find_if(G.defined_symbols(), [this](jitlink::Symbol *Sym) {
+ return Sym->getName() == *MP.MachOHeaderStartSymbol;
+ });
+ assert(I != G.defined_symbols().end() && "Missing MachO header start symbol");
+
+ auto &JD = MR.getTargetJITDylib();
+ std::lock_guard<std::mutex> Lock(MP.PlatformMutex);
+ auto HeaderAddr = (*I)->getAddress();
+ MP.JITDylibToHeaderAddr[&JD] = HeaderAddr;
+ MP.HeaderAddrToJITDylib[HeaderAddr] = &JD;
+ // We can unconditionally add these actions to the Graph because this pass
+ // isn't used during bootstrap.
+ G.allocActions().push_back(
+ {cantFail(
+ WrapperFunctionCall::Create<SPSArgList<SPSString, SPSExecutorAddr>>(
+ MP.RegisterJITDylib.Addr, JD.getName(), HeaderAddr)),
+ cantFail(WrapperFunctionCall::Create<SPSArgList<SPSExecutorAddr>>(
+ MP.DeregisterJITDylib.Addr, HeaderAddr))});
+ return Error::success();
+}
+
+Error MachOPlatform::MachOPlatformPlugin::preserveImportantSections(
+ jitlink::LinkGraph &G, MaterializationResponsibility &MR) {
+ // __objc_imageinfo is "important": we want to preserve it and record its
+ // address in the first graph that it appears in, then verify and discard it
+ // in all subsequent graphs. In this pass we preserve unconditionally -- we'll
+ // manually throw it away in the processObjCImageInfo pass.
+ if (auto *ObjCImageInfoSec =
+ G.findSectionByName(MachOObjCImageInfoSectionName)) {
+ if (ObjCImageInfoSec->blocks_size() != 1)
+ return make_error<StringError>(
+ "In " + G.getName() +
+ "__DATA,__objc_imageinfo contains multiple blocks",
+ inconvertibleErrorCode());
+ G.addAnonymousSymbol(**ObjCImageInfoSec->blocks().begin(), 0, 0, false,
+ true);
+
+ for (auto *B : ObjCImageInfoSec->blocks())
+ if (!B->edges_empty())
+ return make_error<StringError>("In " + G.getName() + ", " +
+ MachOObjCImageInfoSectionName +
+ " contains references to symbols",
+ inconvertibleErrorCode());
+ }
+
+ // Init sections are important: We need to preserve them and so that their
+ // addresses can be captured and reported to the ORC runtime in
+ // registerObjectPlatformSections.
+ JITLinkSymbolSet InitSectionSymbols;
+ for (auto &InitSectionName : MachOInitSectionNames) {
+ // Skip ObjCImageInfo -- this shouldn't have any dependencies, and we may
+ // remove it later.
+ if (InitSectionName == MachOObjCImageInfoSectionName)
+ continue;
+
+ // Skip non-init sections.
+ auto *InitSection = G.findSectionByName(InitSectionName);
+ if (!InitSection)
+ continue;
+
+ // Make a pass over live symbols in the section: those blocks are already
+ // preserved.
+ DenseSet<jitlink::Block *> AlreadyLiveBlocks;
+ for (auto &Sym : InitSection->symbols()) {
+ auto &B = Sym->getBlock();
+ if (Sym->isLive() && Sym->getOffset() == 0 &&
+ Sym->getSize() == B.getSize() && !AlreadyLiveBlocks.count(&B)) {
+ InitSectionSymbols.insert(Sym);
+ AlreadyLiveBlocks.insert(&B);
+ }
+ }
+
+ // Add anonymous symbols to preserve any not-already-preserved blocks.
+ for (auto *B : InitSection->blocks())
+ if (!AlreadyLiveBlocks.count(B))
+ InitSectionSymbols.insert(
+ &G.addAnonymousSymbol(*B, 0, B->getSize(), false, true));
+ }
+
+ if (!InitSectionSymbols.empty()) {
+ std::lock_guard<std::mutex> Lock(PluginMutex);
+ InitSymbolDeps[&MR] = std::move(InitSectionSymbols);
+ }
+
+ return Error::success();
+}
+
+Error MachOPlatform::MachOPlatformPlugin::processObjCImageInfo(
+ jitlink::LinkGraph &G, MaterializationResponsibility &MR) {
+
+ // If there's an ObjC imagine info then either
+ // (1) It's the first __objc_imageinfo we've seen in this JITDylib. In
+ // this case we name and record it.
+ // OR
+ // (2) We already have a recorded __objc_imageinfo for this JITDylib,
+ // in which case we just verify it.
+ auto *ObjCImageInfo = G.findSectionByName(MachOObjCImageInfoSectionName);
+ if (!ObjCImageInfo)
+ return Error::success();
+
+ auto ObjCImageInfoBlocks = ObjCImageInfo->blocks();
+
+ // Check that the section is not empty if present.
+ if (ObjCImageInfoBlocks.empty())
+ return make_error<StringError>("Empty " + MachOObjCImageInfoSectionName +
+ " section in " + G.getName(),
+ inconvertibleErrorCode());
+
+ // Check that there's only one block in the section.
+ if (std::next(ObjCImageInfoBlocks.begin()) != ObjCImageInfoBlocks.end())
+ return make_error<StringError>("Multiple blocks in " +
+ MachOObjCImageInfoSectionName +
+ " section in " + G.getName(),
+ inconvertibleErrorCode());
+
+ // Check that the __objc_imageinfo section is unreferenced.
+ // FIXME: We could optimize this check if Symbols had a ref-count.
+ for (auto &Sec : G.sections()) {
+ if (&Sec != ObjCImageInfo)
+ for (auto *B : Sec.blocks())
+ for (auto &E : B->edges())
+ if (E.getTarget().isDefined() &&
+ &E.getTarget().getBlock().getSection() == ObjCImageInfo)
+ return make_error<StringError>(MachOObjCImageInfoSectionName +
+ " is referenced within file " +
+ G.getName(),
+ inconvertibleErrorCode());
+ }
+
+ auto &ObjCImageInfoBlock = **ObjCImageInfoBlocks.begin();
+ auto *ObjCImageInfoData = ObjCImageInfoBlock.getContent().data();
+ auto Version = support::endian::read32(ObjCImageInfoData, G.getEndianness());
+ auto Flags =
+ support::endian::read32(ObjCImageInfoData + 4, G.getEndianness());
+
+ // Lock the mutex while we verify / update the ObjCImageInfos map.
+ std::lock_guard<std::mutex> Lock(PluginMutex);
+
+ auto ObjCImageInfoItr = ObjCImageInfos.find(&MR.getTargetJITDylib());
+ if (ObjCImageInfoItr != ObjCImageInfos.end()) {
+ // We've already registered an __objc_imageinfo section. Verify the
+ // content of this new section matches, then delete it.
+ if (ObjCImageInfoItr->second.Version != Version)
+ return make_error<StringError>(
+ "ObjC version in " + G.getName() +
+ " does not match first registered version",
+ inconvertibleErrorCode());
+ if (ObjCImageInfoItr->second.Flags != Flags)
+ if (Error E = mergeImageInfoFlags(G, MR, ObjCImageInfoItr->second, Flags))
+ return E;
+
+ // __objc_imageinfo is valid. Delete the block.
+ for (auto *S : ObjCImageInfo->symbols())
+ G.removeDefinedSymbol(*S);
+ G.removeBlock(ObjCImageInfoBlock);
+ } else {
+ LLVM_DEBUG({
+ dbgs() << "MachOPlatform: Registered __objc_imageinfo for "
+ << MR.getTargetJITDylib().getName() << " in " << G.getName()
+ << "; flags = " << formatv("{0:x4}", Flags) << "\n";
+ });
+ // We haven't registered an __objc_imageinfo section yet. Register and
+ // move on. The section should already be marked no-dead-strip.
+ G.addDefinedSymbol(ObjCImageInfoBlock, 0, ObjCImageInfoSymbolName,
+ ObjCImageInfoBlock.getSize(), jitlink::Linkage::Strong,
+ jitlink::Scope::Hidden, false, true);
+ if (auto Err = MR.defineMaterializing(
+ {{MR.getExecutionSession().intern(ObjCImageInfoSymbolName),
+ JITSymbolFlags()}}))
+ return Err;
+ ObjCImageInfos[&MR.getTargetJITDylib()] = {Version, Flags, false};
+ }
+
+ return Error::success();
+}
+
+Error MachOPlatform::MachOPlatformPlugin::mergeImageInfoFlags(
+ jitlink::LinkGraph &G, MaterializationResponsibility &MR,
+ ObjCImageInfo &Info, uint32_t NewFlags) {
+ if (Info.Flags == NewFlags)
+ return Error::success();
+
+ ObjCImageInfoFlags Old(Info.Flags);
+ ObjCImageInfoFlags New(NewFlags);
+
+ // Check for incompatible flags.
+ if (Old.SwiftABIVersion && New.SwiftABIVersion &&
+ Old.SwiftABIVersion != New.SwiftABIVersion)
+ return make_error<StringError>("Swift ABI version in " + G.getName() +
+ " does not match first registered flags",
+ inconvertibleErrorCode());
+
+ // HasCategoryClassProperties and HasSignedObjCClassROs can be disabled before
+ // they are registered, if necessary, but once they are in use must be
+ // supported by subsequent objects.
+ if (Info.Finalized && Old.HasCategoryClassProperties &&
+ !New.HasCategoryClassProperties)
+ return make_error<StringError>("ObjC category class property support in " +
+ G.getName() +
+ " does not match first registered flags",
+ inconvertibleErrorCode());
+ if (Info.Finalized && Old.HasSignedObjCClassROs && !New.HasSignedObjCClassROs)
+ return make_error<StringError>("ObjC class_ro_t pointer signing in " +
+ G.getName() +
+ " does not match first registered flags",
+ inconvertibleErrorCode());
+
+ // If we cannot change the flags, ignore any remaining differences. Adding
+ // Swift or changing its version are unlikely to cause problems in practice.
+ if (Info.Finalized)
+ return Error::success();
+
+ // Use the minimum Swift version.
+ if (Old.SwiftVersion && New.SwiftVersion)
+ New.SwiftVersion = std::min(Old.SwiftVersion, New.SwiftVersion);
+ else if (Old.SwiftVersion)
+ New.SwiftVersion = Old.SwiftVersion;
+ // Add a Swift ABI version if it was pure objc before.
+ if (!New.SwiftABIVersion)
+ New.SwiftABIVersion = Old.SwiftABIVersion;
+ // Disable class properties if any object does not support it.
+ if (Old.HasCategoryClassProperties != New.HasCategoryClassProperties)
+ New.HasCategoryClassProperties = false;
+ // Disable signed class ro data if any object does not support it.
+ if (Old.HasSignedObjCClassROs != New.HasSignedObjCClassROs)
+ New.HasSignedObjCClassROs = false;
+
+ LLVM_DEBUG({
+ dbgs() << "MachOPlatform: Merging __objc_imageinfo flags for "
+ << MR.getTargetJITDylib().getName() << " (was "
+ << formatv("{0:x4}", Old.rawFlags()) << ")"
+ << " with " << G.getName() << " (" << formatv("{0:x4}", NewFlags)
+ << ")"
+ << " -> " << formatv("{0:x4}", New.rawFlags()) << "\n";
+ });
+
+ Info.Flags = New.rawFlags();
+ return Error::success();
+}
+
+Error MachOPlatform::MachOPlatformPlugin::fixTLVSectionsAndEdges(
+ jitlink::LinkGraph &G, JITDylib &JD) {
+
+ // Rename external references to __tlv_bootstrap to ___orc_rt_tlv_get_addr.
+ for (auto *Sym : G.external_symbols())
+ if (Sym->getName() == "__tlv_bootstrap") {
+ Sym->setName("___orc_rt_macho_tlv_get_addr");
+ break;
+ }
+
+ // Store key in __thread_vars struct fields.
+ if (auto *ThreadDataSec = G.findSectionByName(MachOThreadVarsSectionName)) {
+ std::optional<uint64_t> Key;
+ {
+ std::lock_guard<std::mutex> Lock(MP.PlatformMutex);
+ auto I = MP.JITDylibToPThreadKey.find(&JD);
+ if (I != MP.JITDylibToPThreadKey.end())
+ Key = I->second;
+ }
+
+ if (!Key) {
+ if (auto KeyOrErr = MP.createPThreadKey())
+ Key = *KeyOrErr;
+ else
+ return KeyOrErr.takeError();
+ }
+
+ uint64_t PlatformKeyBits =
+ support::endian::byte_swap(*Key, G.getEndianness());
+
+ for (auto *B : ThreadDataSec->blocks()) {
+ if (B->getSize() != 3 * G.getPointerSize())
+ return make_error<StringError>("__thread_vars block at " +
+ formatv("{0:x}", B->getAddress()) +
+ " has unexpected size",
+ inconvertibleErrorCode());
+
+ auto NewBlockContent = G.allocateBuffer(B->getSize());
+ llvm::copy(B->getContent(), NewBlockContent.data());
+ memcpy(NewBlockContent.data() + G.getPointerSize(), &PlatformKeyBits,
+ G.getPointerSize());
+ B->setContent(NewBlockContent);
+ }
+ }
+
+ // Transform any TLV edges into GOT edges.
+ for (auto *B : G.blocks())
+ for (auto &E : B->edges())
+ if (E.getKind() ==
+ jitlink::x86_64::RequestTLVPAndTransformToPCRel32TLVPLoadREXRelaxable)
+ E.setKind(jitlink::x86_64::
+ RequestGOTAndTransformToPCRel32GOTLoadREXRelaxable);
+
+ return Error::success();
+}
+
+std::optional<MachOPlatform::MachOPlatformPlugin::UnwindSections>
+MachOPlatform::MachOPlatformPlugin::findUnwindSectionInfo(
+ jitlink::LinkGraph &G) {
+ using namespace jitlink;
+
+ UnwindSections US;
+
+ // ScanSection records a section range and adds any executable blocks that
+ // that section points to to the CodeBlocks vector.
+ SmallVector<Block *> CodeBlocks;
+ auto ScanUnwindInfoSection = [&](Section &Sec, ExecutorAddrRange &SecRange) {
+ if (Sec.blocks().empty())
+ return;
+ SecRange = (*Sec.blocks().begin())->getRange();
+ for (auto *B : Sec.blocks()) {
+ auto R = B->getRange();
+ SecRange.Start = std::min(SecRange.Start, R.Start);
+ SecRange.End = std::max(SecRange.End, R.End);
+ for (auto &E : B->edges()) {
+ if (!E.getTarget().isDefined())
+ continue;
+ auto &TargetBlock = E.getTarget().getBlock();
+ auto &TargetSection = TargetBlock.getSection();
+ if ((TargetSection.getMemProt() & MemProt::Exec) == MemProt::Exec)
+ CodeBlocks.push_back(&TargetBlock);
+ }
+ }
+ };
+
+ if (Section *EHFrameSec = G.findSectionByName(MachOEHFrameSectionName))
+ ScanUnwindInfoSection(*EHFrameSec, US.DwarfSection);
+
+ if (Section *CUInfoSec =
+ G.findSectionByName(MachOCompactUnwindInfoSectionName))
+ ScanUnwindInfoSection(*CUInfoSec, US.CompactUnwindSection);
+
+ // If we didn't find any pointed-to code-blocks then there's no need to
+ // register any info.
+ if (CodeBlocks.empty())
+ return std::nullopt;
+
+ // We have info to register. Sort the code blocks into address order and
+ // build a list of contiguous address ranges covering them all.
+ llvm::sort(CodeBlocks, [](const Block *LHS, const Block *RHS) {
+ return LHS->getAddress() < RHS->getAddress();
+ });
+ for (auto *B : CodeBlocks) {
+ if (US.CodeRanges.empty() || US.CodeRanges.back().End != B->getAddress())
+ US.CodeRanges.push_back(B->getRange());
+ else
+ US.CodeRanges.back().End = B->getRange().End;
+ }
+
+ LLVM_DEBUG({
+ dbgs() << "MachOPlatform identified unwind info in " << G.getName() << ":\n"
+ << " DWARF: ";
+ if (US.DwarfSection.Start)
+ dbgs() << US.DwarfSection << "\n";
+ else
+ dbgs() << "none\n";
+ dbgs() << " Compact-unwind: ";
+ if (US.CompactUnwindSection.Start)
+ dbgs() << US.CompactUnwindSection << "\n";
+ else
+ dbgs() << "none\n"
+ << "for code ranges:\n";
+ for (auto &CR : US.CodeRanges)
+ dbgs() << " " << CR << "\n";
+ if (US.CodeRanges.size() >= G.sections_size())
+ dbgs() << "WARNING: High number of discontiguous code ranges! "
+ "Padding may be interfering with coalescing.\n";
+ });
+
+ return US;
+}
+
+Error MachOPlatform::MachOPlatformPlugin::registerObjectPlatformSections(
+ jitlink::LinkGraph &G, JITDylib &JD, bool InBootstrapPhase) {
+
+ // Get a pointer to the thread data section if there is one. It will be used
+ // below.
+ jitlink::Section *ThreadDataSection =
+ G.findSectionByName(MachOThreadDataSectionName);
+
+ // Handle thread BSS section if there is one.
+ if (auto *ThreadBSSSection = G.findSectionByName(MachOThreadBSSSectionName)) {
+ // If there's already a thread data section in this graph then merge the
+ // thread BSS section content into it, otherwise just treat the thread
+ // BSS section as the thread data section.
+ if (ThreadDataSection)
+ G.mergeSections(*ThreadDataSection, *ThreadBSSSection);
+ else
+ ThreadDataSection = ThreadBSSSection;
+ }
+
+ SmallVector<std::pair<StringRef, ExecutorAddrRange>, 8> MachOPlatformSecs;
+
+ // Collect data sections to register.
+ StringRef DataSections[] = {MachODataDataSectionName,
+ MachODataCommonSectionName,
+ MachOEHFrameSectionName};
+ for (auto &SecName : DataSections) {
+ if (auto *Sec = G.findSectionByName(SecName)) {
+ jitlink::SectionRange R(*Sec);
+ if (!R.empty())
+ MachOPlatformSecs.push_back({SecName, R.getRange()});
+ }
+ }
+
+ // Having merged thread BSS (if present) and thread data (if present),
+ // record the resulting section range.
+ if (ThreadDataSection) {
+ jitlink::SectionRange R(*ThreadDataSection);
+ if (!R.empty())
+ MachOPlatformSecs.push_back({MachOThreadDataSectionName, R.getRange()});
+ }
+
+ // If any platform sections were found then add an allocation action to call
+ // the registration function.
+ StringRef PlatformSections[] = {MachOModInitFuncSectionName,
+ ObjCRuntimeObjectSectionName};
+
+ for (auto &SecName : PlatformSections) {
+ auto *Sec = G.findSectionByName(SecName);
+ if (!Sec)
+ continue;
+ jitlink::SectionRange R(*Sec);
+ if (R.empty())
+ continue;
+
+ MachOPlatformSecs.push_back({SecName, R.getRange()});
+ }
+
+ std::optional<std::tuple<SmallVector<ExecutorAddrRange>, ExecutorAddrRange,
+ ExecutorAddrRange>>
+ UnwindInfo;
+ if (auto UI = findUnwindSectionInfo(G))
+ UnwindInfo = std::make_tuple(std::move(UI->CodeRanges), UI->DwarfSection,
+ UI->CompactUnwindSection);
+
+ if (!MachOPlatformSecs.empty() || UnwindInfo) {
+ // Dump the scraped inits.
+ LLVM_DEBUG({
+ dbgs() << "MachOPlatform: Scraped " << G.getName() << " init sections:\n";
+ for (auto &KV : MachOPlatformSecs)
+ dbgs() << " " << KV.first << ": " << KV.second << "\n";
+ });
+
+ using SPSRegisterObjectPlatformSectionsArgs = SPSArgList<
+ SPSExecutorAddr,
+ SPSOptional<SPSTuple<SPSSequence<SPSExecutorAddrRange>,
+ SPSExecutorAddrRange, SPSExecutorAddrRange>>,
+ SPSSequence<SPSTuple<SPSString, SPSExecutorAddrRange>>>;
+
+ shared::AllocActions &allocActions = LLVM_LIKELY(!InBootstrapPhase)
+ ? G.allocActions()
+ : MP.Bootstrap.load()->DeferredAAs;
+
+ ExecutorAddr HeaderAddr;
+ {
+ std::lock_guard<std::mutex> Lock(MP.PlatformMutex);
+ auto I = MP.JITDylibToHeaderAddr.find(&JD);
+ assert(I != MP.JITDylibToHeaderAddr.end() &&
+ "No header registered for JD");
+ assert(I->second && "Null header registered for JD");
+ HeaderAddr = I->second;
+ }
+ allocActions.push_back(
+ {cantFail(
+ WrapperFunctionCall::Create<SPSRegisterObjectPlatformSectionsArgs>(
+ MP.RegisterObjectPlatformSections.Addr, HeaderAddr, UnwindInfo,
+ MachOPlatformSecs)),
+ cantFail(
+ WrapperFunctionCall::Create<SPSRegisterObjectPlatformSectionsArgs>(
+ MP.DeregisterObjectPlatformSections.Addr, HeaderAddr,
+ UnwindInfo, MachOPlatformSecs))});
+ }
+
+ return Error::success();
+}
+
+Error MachOPlatform::MachOPlatformPlugin::createObjCRuntimeObject(
+ jitlink::LinkGraph &G) {
+
+ bool NeedTextSegment = false;
+ size_t NumRuntimeSections = 0;
+
+ for (auto ObjCRuntimeSectionName : ObjCRuntimeObjectSectionsData)
+ if (G.findSectionByName(ObjCRuntimeSectionName))
+ ++NumRuntimeSections;
+
+ for (auto ObjCRuntimeSectionName : ObjCRuntimeObjectSectionsText) {
+ if (G.findSectionByName(ObjCRuntimeSectionName)) {
+ ++NumRuntimeSections;
+ NeedTextSegment = true;
+ }
+ }
+
+ // Early out for no runtime sections.
+ if (NumRuntimeSections == 0)
+ return Error::success();
+
+ // If there were any runtime sections then we need to add an __objc_imageinfo
+ // section.
+ ++NumRuntimeSections;
+
+ size_t MachOSize = sizeof(MachO::mach_header_64) +
+ (NeedTextSegment + 1) * sizeof(MachO::segment_command_64) +
+ NumRuntimeSections * sizeof(MachO::section_64);
+
+ auto &Sec = G.createSection(ObjCRuntimeObjectSectionName,
+ MemProt::Read | MemProt::Write);
+ G.createMutableContentBlock(Sec, MachOSize, ExecutorAddr(), 16, 0, true);
+
+ return Error::success();
+}
+
+Error MachOPlatform::MachOPlatformPlugin::populateObjCRuntimeObject(
+ jitlink::LinkGraph &G, MaterializationResponsibility &MR) {
+
+ auto *ObjCRuntimeObjectSec =
+ G.findSectionByName(ObjCRuntimeObjectSectionName);
+
+ if (!ObjCRuntimeObjectSec)
+ return Error::success();
+
+ switch (G.getTargetTriple().getArch()) {
+ case Triple::aarch64:
+ case Triple::x86_64:
+ // Supported.
+ break;
+ default:
+ return make_error<StringError>("Unrecognized MachO arch in triple " +
+ G.getTargetTriple().str(),
+ inconvertibleErrorCode());
+ }
+
+ auto &SecBlock = **ObjCRuntimeObjectSec->blocks().begin();
+
+ struct SecDesc {
+ MachO::section_64 Sec;
+ unique_function<void(size_t RecordOffset)> AddFixups;
+ };
+
+ std::vector<SecDesc> TextSections, DataSections;
+ auto AddSection = [&](SecDesc &SD, jitlink::Section &GraphSec) {
+ jitlink::SectionRange SR(GraphSec);
+ StringRef FQName = GraphSec.getName();
+ memset(&SD.Sec, 0, sizeof(MachO::section_64));
+ memcpy(SD.Sec.sectname, FQName.drop_front(7).data(), FQName.size() - 7);
+ memcpy(SD.Sec.segname, FQName.data(), 6);
+ SD.Sec.addr = SR.getStart() - SecBlock.getAddress();
+ SD.Sec.size = SR.getSize();
+ SD.Sec.flags = MachO::S_REGULAR;
+ };
+
+ // Add the __objc_imageinfo section.
+ {
+ DataSections.push_back({});
+ auto &SD = DataSections.back();
+ memset(&SD.Sec, 0, sizeof(SD.Sec));
+ memcpy(SD.Sec.sectname, "__objc_imageinfo", 16);
+ strcpy(SD.Sec.segname, "__DATA");
+ SD.Sec.size = 8;
+ SD.AddFixups = [&](size_t RecordOffset) {
+ auto PointerEdge = getPointerEdgeKind(G);
+
+ // Look for an existing __objc_imageinfo symbol.
+ jitlink::Symbol *ObjCImageInfoSym = nullptr;
+ for (auto *Sym : G.external_symbols())
+ if (Sym->getName() == ObjCImageInfoSymbolName) {
+ ObjCImageInfoSym = Sym;
+ break;
+ }
+ if (!ObjCImageInfoSym)
+ for (auto *Sym : G.absolute_symbols())
+ if (Sym->getName() == ObjCImageInfoSymbolName) {
+ ObjCImageInfoSym = Sym;
+ break;
+ }
+ if (!ObjCImageInfoSym)
+ for (auto *Sym : G.defined_symbols())
+ if (Sym->hasName() && Sym->getName() == ObjCImageInfoSymbolName) {
+ ObjCImageInfoSym = Sym;
+ std::optional<uint32_t> Flags;
+ {
+ std::lock_guard<std::mutex> Lock(PluginMutex);
+ auto It = ObjCImageInfos.find(&MR.getTargetJITDylib());
+ if (It != ObjCImageInfos.end()) {
+ It->second.Finalized = true;
+ Flags = It->second.Flags;
+ }
+ }
+
+ if (Flags) {
+ // We own the definition of __objc_image_info; write the final
+ // merged flags value.
+ auto Content = Sym->getBlock().getMutableContent(G);
+ assert(Content.size() == 8 &&
+ "__objc_image_info size should have been verified already");
+ support::endian::write32(&Content[4], *Flags, G.getEndianness());
+ }
+ break;
+ }
+ if (!ObjCImageInfoSym)
+ ObjCImageInfoSym =
+ &G.addExternalSymbol(ObjCImageInfoSymbolName, 8, false);
+
+ SecBlock.addEdge(PointerEdge,
+ RecordOffset + ((char *)&SD.Sec.addr - (char *)&SD.Sec),
+ *ObjCImageInfoSym, -SecBlock.getAddress().getValue());
+ };
+ }
+
+ for (auto ObjCRuntimeSectionName : ObjCRuntimeObjectSectionsData) {
+ if (auto *GraphSec = G.findSectionByName(ObjCRuntimeSectionName)) {
+ DataSections.push_back({});
+ AddSection(DataSections.back(), *GraphSec);
+ }
+ }
+
+ for (auto ObjCRuntimeSectionName : ObjCRuntimeObjectSectionsText) {
+ if (auto *GraphSec = G.findSectionByName(ObjCRuntimeSectionName)) {
+ TextSections.push_back({});
+ AddSection(TextSections.back(), *GraphSec);
+ }
+ }
+
+ assert(ObjCRuntimeObjectSec->blocks_size() == 1 &&
+ "Unexpected number of blocks in runtime sections object");
+
+ // Build the header struct up-front. This also gives us a chance to check
+ // that the triple is supported, which we'll assume below.
+ MachO::mach_header_64 Hdr;
+ Hdr.magic = MachO::MH_MAGIC_64;
+ switch (G.getTargetTriple().getArch()) {
+ case Triple::aarch64:
+ Hdr.cputype = MachO::CPU_TYPE_ARM64;
+ Hdr.cpusubtype = MachO::CPU_SUBTYPE_ARM64_ALL;
+ break;
+ case Triple::x86_64:
+ Hdr.cputype = MachO::CPU_TYPE_X86_64;
+ Hdr.cpusubtype = MachO::CPU_SUBTYPE_X86_64_ALL;
+ break;
+ default:
+ llvm_unreachable("Unsupported architecture");
+ }
+
+ Hdr.filetype = MachO::MH_DYLIB;
+ Hdr.ncmds = 1 + !TextSections.empty();
+ Hdr.sizeofcmds =
+ Hdr.ncmds * sizeof(MachO::segment_command_64) +
+ (TextSections.size() + DataSections.size()) * sizeof(MachO::section_64);
+ Hdr.flags = 0;
+ Hdr.reserved = 0;
+
+ auto SecContent = SecBlock.getAlreadyMutableContent();
+ char *P = SecContent.data();
+ auto WriteMachOStruct = [&](auto S) {
+ if (G.getEndianness() != llvm::endianness::native)
+ MachO::swapStruct(S);
+ memcpy(P, &S, sizeof(S));
+ P += sizeof(S);
+ };
+
+ auto WriteSegment = [&](StringRef Name, std::vector<SecDesc> &Secs) {
+ MachO::segment_command_64 SegLC;
+ memset(&SegLC, 0, sizeof(SegLC));
+ memcpy(SegLC.segname, Name.data(), Name.size());
+ SegLC.cmd = MachO::LC_SEGMENT_64;
+ SegLC.cmdsize = sizeof(MachO::segment_command_64) +
+ Secs.size() * sizeof(MachO::section_64);
+ SegLC.nsects = Secs.size();
+ WriteMachOStruct(SegLC);
+ for (auto &SD : Secs) {
+ if (SD.AddFixups)
+ SD.AddFixups(P - SecContent.data());
+ WriteMachOStruct(SD.Sec);
+ }
+ };
+
+ WriteMachOStruct(Hdr);
+ if (!TextSections.empty())
+ WriteSegment("__TEXT", TextSections);
+ if (!DataSections.empty())
+ WriteSegment("__DATA", DataSections);
+
+ assert(P == SecContent.end() && "Underflow writing ObjC runtime object");
+ return Error::success();
+}
+
+Error MachOPlatform::MachOPlatformPlugin::prepareSymbolTableRegistration(
+ jitlink::LinkGraph &G, JITSymTabVector &JITSymTabInfo) {
+
+ auto *CStringSec = G.findSectionByName(MachOCStringSectionName);
+ if (!CStringSec)
+ CStringSec = &G.createSection(MachOCStringSectionName,
+ MemProt::Read | MemProt::Exec);
+
+ // Make a map of existing strings so that we can re-use them:
+ DenseMap<StringRef, jitlink::Symbol *> ExistingStrings;
+ for (auto *Sym : CStringSec->symbols()) {
+
+ // The LinkGraph builder should have created single strings blocks, and all
+ // plugins should have maintained this invariant.
+ auto Content = Sym->getBlock().getContent();
+ ExistingStrings.insert(
+ std::make_pair(StringRef(Content.data(), Content.size()), Sym));
+ }
+
+ // Add all symbol names to the string section, and record the symbols for
+ // those names.
+ {
+ SmallVector<jitlink::Symbol *> SymsToProcess;
+ for (auto *Sym : G.defined_symbols())
+ SymsToProcess.push_back(Sym);
+ for (auto *Sym : G.absolute_symbols())
+ SymsToProcess.push_back(Sym);
+
+ for (auto *Sym : SymsToProcess) {
+ if (!Sym->hasName())
+ continue;
+
+ auto I = ExistingStrings.find(Sym->getName());
+ if (I == ExistingStrings.end()) {
+ auto &NameBlock = G.createMutableContentBlock(
+ *CStringSec, G.allocateCString(Sym->getName()), orc::ExecutorAddr(),
+ 1, 0);
+ auto &SymbolNameSym = G.addAnonymousSymbol(
+ NameBlock, 0, NameBlock.getSize(), false, true);
+ JITSymTabInfo.push_back({Sym, &SymbolNameSym});
+ } else
+ JITSymTabInfo.push_back({Sym, I->second});
+ }
+ }
+
+ return Error::success();
+}
+
+Error MachOPlatform::MachOPlatformPlugin::addSymbolTableRegistration(
+ jitlink::LinkGraph &G, MaterializationResponsibility &MR,
+ JITSymTabVector &JITSymTabInfo, bool InBootstrapPhase) {
+
+ ExecutorAddr HeaderAddr;
+ {
+ std::lock_guard<std::mutex> Lock(MP.PlatformMutex);
+ auto I = MP.JITDylibToHeaderAddr.find(&MR.getTargetJITDylib());
+ assert(I != MP.JITDylibToHeaderAddr.end() && "No header registered for JD");
+ assert(I->second && "Null header registered for JD");
+ HeaderAddr = I->second;
+ }
+
+ SymbolTableVector LocalSymTab;
+ auto &SymTab = LLVM_LIKELY(!InBootstrapPhase) ? LocalSymTab
+ : MP.Bootstrap.load()->SymTab;
+ for (auto &[OriginalSymbol, NameSym] : JITSymTabInfo)
+ SymTab.push_back({NameSym->getAddress(), OriginalSymbol->getAddress(),
+ flagsForSymbol(*OriginalSymbol)});
+
+ // Bail out if we're in the bootstrap phase -- registration of thees symbols
+ // will be attached to the bootstrap graph.
+ if (LLVM_UNLIKELY(InBootstrapPhase))
+ return Error::success();
+
+ shared::AllocActions &allocActions = LLVM_LIKELY(!InBootstrapPhase)
+ ? G.allocActions()
+ : MP.Bootstrap.load()->DeferredAAs;
+ allocActions.push_back(
+ {cantFail(WrapperFunctionCall::Create<SPSRegisterSymbolsArgs>(
+ MP.RegisterObjectSymbolTable.Addr, HeaderAddr, SymTab)),
+ cantFail(WrapperFunctionCall::Create<SPSRegisterSymbolsArgs>(
+ MP.DeregisterObjectSymbolTable.Addr, HeaderAddr, SymTab))});
+
+ return Error::success();
+}
+
+template <typename MachOTraits>
+jitlink::Block &createHeaderBlock(MachOPlatform &MOP,
+ const MachOPlatform::HeaderOptions &Opts,
+ JITDylib &JD, jitlink::LinkGraph &G,
+ jitlink::Section &HeaderSection) {
+ auto HdrInfo =
+ getMachOHeaderInfoFromTriple(MOP.getExecutionSession().getTargetTriple());
+ MachOBuilder<MachOTraits> B(HdrInfo.PageSize);
+
+ B.Header.filetype = MachO::MH_DYLIB;
+ B.Header.cputype = HdrInfo.CPUType;
+ B.Header.cpusubtype = HdrInfo.CPUSubType;
+
+ if (Opts.IDDylib)
+ B.template addLoadCommand<MachO::LC_ID_DYLIB>(
+ Opts.IDDylib->Name, Opts.IDDylib->Timestamp,
+ Opts.IDDylib->CurrentVersion, Opts.IDDylib->CompatibilityVersion);
+ else
+ B.template addLoadCommand<MachO::LC_ID_DYLIB>(JD.getName(), 0, 0, 0);
+
+ for (auto &BV : Opts.BuildVersions)
+ B.template addLoadCommand<MachO::LC_BUILD_VERSION>(
+ BV.Platform, BV.MinOS, BV.SDK, static_cast<uint32_t>(0));
+ for (auto &D : Opts.LoadDylibs)
+ B.template addLoadCommand<MachO::LC_LOAD_DYLIB>(
+ D.Name, D.Timestamp, D.CurrentVersion, D.CompatibilityVersion);
+ for (auto &P : Opts.RPaths)
+ B.template addLoadCommand<MachO::LC_RPATH>(P);
+
+ auto HeaderContent = G.allocateBuffer(B.layout());
+ B.write(HeaderContent);
+
+ return G.createContentBlock(HeaderSection, HeaderContent, ExecutorAddr(), 8,
+ 0);
+}
+
+SimpleMachOHeaderMU::SimpleMachOHeaderMU(MachOPlatform &MOP,
+ SymbolStringPtr HeaderStartSymbol,
+ MachOPlatform::HeaderOptions Opts)
+ : MaterializationUnit(
+ createHeaderInterface(MOP, std::move(HeaderStartSymbol))),
+ MOP(MOP), Opts(std::move(Opts)) {}
+
+void SimpleMachOHeaderMU::materialize(
+ std::unique_ptr<MaterializationResponsibility> R) {
+ auto G = createPlatformGraph(MOP, "<MachOHeaderMU>");
+ addMachOHeader(R->getTargetJITDylib(), *G, R->getInitializerSymbol());
+ MOP.getObjectLinkingLayer().emit(std::move(R), std::move(G));
+}
+
+void SimpleMachOHeaderMU::discard(const JITDylib &JD,
+ const SymbolStringPtr &Sym) {}
+
+void SimpleMachOHeaderMU::addMachOHeader(
+ JITDylib &JD, jitlink::LinkGraph &G,
+ const SymbolStringPtr &InitializerSymbol) {
+ auto &HeaderSection = G.createSection("__header", MemProt::Read);
+ auto &HeaderBlock = createHeaderBlock(JD, G, HeaderSection);
+
+ // Init symbol is header-start symbol.
+ G.addDefinedSymbol(HeaderBlock, 0, *InitializerSymbol, HeaderBlock.getSize(),
+ jitlink::Linkage::Strong, jitlink::Scope::Default, false,
+ true);
+ for (auto &HS : AdditionalHeaderSymbols)
+ G.addDefinedSymbol(HeaderBlock, HS.Offset, HS.Name, HeaderBlock.getSize(),
+ jitlink::Linkage::Strong, jitlink::Scope::Default, false,
+ true);
+}
+
+jitlink::Block &
+SimpleMachOHeaderMU::createHeaderBlock(JITDylib &JD, jitlink::LinkGraph &G,
+ jitlink::Section &HeaderSection) {
+ switch (MOP.getExecutionSession().getTargetTriple().getArch()) {
+ case Triple::aarch64:
+ case Triple::x86_64:
+ return ::createHeaderBlock<MachO64LE>(MOP, Opts, JD, G, HeaderSection);
+ default:
+ llvm_unreachable("Unsupported architecture");
+ }
+}
+
+MaterializationUnit::Interface SimpleMachOHeaderMU::createHeaderInterface(
+ MachOPlatform &MOP, const SymbolStringPtr &HeaderStartSymbol) {
+ SymbolFlagsMap HeaderSymbolFlags;
+
+ HeaderSymbolFlags[HeaderStartSymbol] = JITSymbolFlags::Exported;
+ for (auto &HS : AdditionalHeaderSymbols)
+ HeaderSymbolFlags[MOP.getExecutionSession().intern(HS.Name)] =
+ JITSymbolFlags::Exported;
+
+ return MaterializationUnit::Interface(std::move(HeaderSymbolFlags),
+ HeaderStartSymbol);
+}
+
+MachOHeaderInfo getMachOHeaderInfoFromTriple(const Triple &TT) {
+ switch (TT.getArch()) {
+ case Triple::aarch64:
+ return {/* PageSize = */ 16 * 1024,
+ /* CPUType = */ MachO::CPU_TYPE_ARM64,
+ /* CPUSubType = */ MachO::CPU_SUBTYPE_ARM64_ALL};
+ case Triple::x86_64:
+ return {/* PageSize = */ 4 * 1024,
+ /* CPUType = */ MachO::CPU_TYPE_X86_64,
+ /* CPUSubType = */ MachO::CPU_SUBTYPE_X86_64_ALL};
+ default:
+ llvm_unreachable("Unrecognized architecture");
+ }
+}
+
+} // End namespace orc.
+} // End namespace llvm.
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/Mangling.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/Mangling.cpp
new file mode 100644
index 000000000000..9abf7c11d546
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/Mangling.cpp
@@ -0,0 +1,84 @@
+//===----------- Mangling.cpp -- Name Mangling Utilities for ORC ----------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/Mangling.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/Mangler.h"
+#include "llvm/Support/Debug.h"
+
+#define DEBUG_TYPE "orc"
+
+namespace llvm {
+namespace orc {
+
+MangleAndInterner::MangleAndInterner(ExecutionSession &ES, const DataLayout &DL)
+ : ES(ES), DL(DL) {}
+
+SymbolStringPtr MangleAndInterner::operator()(StringRef Name) {
+ std::string MangledName;
+ {
+ raw_string_ostream MangledNameStream(MangledName);
+ Mangler::getNameWithPrefix(MangledNameStream, Name, DL);
+ }
+ return ES.intern(MangledName);
+}
+
+void IRSymbolMapper::add(ExecutionSession &ES, const ManglingOptions &MO,
+ ArrayRef<GlobalValue *> GVs,
+ SymbolFlagsMap &SymbolFlags,
+ SymbolNameToDefinitionMap *SymbolToDefinition) {
+ if (GVs.empty())
+ return;
+
+ MangleAndInterner Mangle(ES, GVs[0]->getDataLayout());
+ for (auto *G : GVs) {
+ assert(G && "GVs cannot contain null elements");
+ if (!G->hasName() || G->isDeclaration() || G->hasLocalLinkage() ||
+ G->hasAvailableExternallyLinkage() || G->hasAppendingLinkage())
+ continue;
+
+ if (G->isThreadLocal() && MO.EmulatedTLS) {
+ auto *GV = cast<GlobalVariable>(G);
+
+ auto Flags = JITSymbolFlags::fromGlobalValue(*GV);
+
+ auto EmuTLSV = Mangle(("__emutls_v." + GV->getName()).str());
+ SymbolFlags[EmuTLSV] = Flags;
+ if (SymbolToDefinition)
+ (*SymbolToDefinition)[EmuTLSV] = GV;
+
+ // If this GV has a non-zero initializer we'll need to emit an
+ // __emutls.t symbol too.
+ if (GV->hasInitializer()) {
+ const auto *InitVal = GV->getInitializer();
+
+ // Skip zero-initializers.
+ if (isa<ConstantAggregateZero>(InitVal))
+ continue;
+ const auto *InitIntValue = dyn_cast<ConstantInt>(InitVal);
+ if (InitIntValue && InitIntValue->isZero())
+ continue;
+
+ auto EmuTLST = Mangle(("__emutls_t." + GV->getName()).str());
+ SymbolFlags[EmuTLST] = Flags;
+ if (SymbolToDefinition)
+ (*SymbolToDefinition)[EmuTLST] = GV;
+ }
+ continue;
+ }
+
+ // Otherwise we just need a normal linker mangling.
+ auto MangledName = Mangle(G->getName());
+ SymbolFlags[MangledName] = JITSymbolFlags::fromGlobalValue(*G);
+ if (SymbolToDefinition)
+ (*SymbolToDefinition)[MangledName] = G;
+ }
+}
+
+} // End namespace orc.
+} // End namespace llvm.
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/MapperJITLinkMemoryManager.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/MapperJITLinkMemoryManager.cpp
new file mode 100644
index 000000000000..d099a251232e
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/MapperJITLinkMemoryManager.cpp
@@ -0,0 +1,189 @@
+//=== MapperJITLinkMemoryManager.cpp - Memory management with MemoryMapper ===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/MapperJITLinkMemoryManager.h"
+
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ExecutionEngine/JITLink/JITLink.h"
+#include "llvm/Support/Process.h"
+
+using namespace llvm::jitlink;
+
+namespace llvm {
+namespace orc {
+
+class MapperJITLinkMemoryManager::InFlightAlloc
+ : public JITLinkMemoryManager::InFlightAlloc {
+public:
+ InFlightAlloc(MapperJITLinkMemoryManager &Parent, LinkGraph &G,
+ ExecutorAddr AllocAddr,
+ std::vector<MemoryMapper::AllocInfo::SegInfo> Segs)
+ : Parent(Parent), G(G), AllocAddr(AllocAddr), Segs(std::move(Segs)) {}
+
+ void finalize(OnFinalizedFunction OnFinalize) override {
+ MemoryMapper::AllocInfo AI;
+ AI.MappingBase = AllocAddr;
+
+ std::swap(AI.Segments, Segs);
+ std::swap(AI.Actions, G.allocActions());
+
+ Parent.Mapper->initialize(AI, [OnFinalize = std::move(OnFinalize)](
+ Expected<ExecutorAddr> Result) mutable {
+ if (!Result) {
+ OnFinalize(Result.takeError());
+ return;
+ }
+
+ OnFinalize(FinalizedAlloc(*Result));
+ });
+ }
+
+ void abandon(OnAbandonedFunction OnFinalize) override {
+ Parent.Mapper->release({AllocAddr}, std::move(OnFinalize));
+ }
+
+private:
+ MapperJITLinkMemoryManager &Parent;
+ LinkGraph &G;
+ ExecutorAddr AllocAddr;
+ std::vector<MemoryMapper::AllocInfo::SegInfo> Segs;
+};
+
+MapperJITLinkMemoryManager::MapperJITLinkMemoryManager(
+ size_t ReservationGranularity, std::unique_ptr<MemoryMapper> Mapper)
+ : ReservationUnits(ReservationGranularity), AvailableMemory(AMAllocator),
+ Mapper(std::move(Mapper)) {}
+
+void MapperJITLinkMemoryManager::allocate(const JITLinkDylib *JD, LinkGraph &G,
+ OnAllocatedFunction OnAllocated) {
+ BasicLayout BL(G);
+
+ // find required address space
+ auto SegsSizes = BL.getContiguousPageBasedLayoutSizes(Mapper->getPageSize());
+ if (!SegsSizes) {
+ OnAllocated(SegsSizes.takeError());
+ return;
+ }
+
+ auto TotalSize = SegsSizes->total();
+
+ auto CompleteAllocation = [this, &G, BL = std::move(BL),
+ OnAllocated = std::move(OnAllocated)](
+ Expected<ExecutorAddrRange> Result) mutable {
+ if (!Result) {
+ Mutex.unlock();
+ return OnAllocated(Result.takeError());
+ }
+
+ auto NextSegAddr = Result->Start;
+
+ std::vector<MemoryMapper::AllocInfo::SegInfo> SegInfos;
+
+ for (auto &KV : BL.segments()) {
+ auto &AG = KV.first;
+ auto &Seg = KV.second;
+
+ auto TotalSize = Seg.ContentSize + Seg.ZeroFillSize;
+
+ Seg.Addr = NextSegAddr;
+ Seg.WorkingMem = Mapper->prepare(NextSegAddr, TotalSize);
+
+ NextSegAddr += alignTo(TotalSize, Mapper->getPageSize());
+
+ MemoryMapper::AllocInfo::SegInfo SI;
+ SI.Offset = Seg.Addr - Result->Start;
+ SI.ContentSize = Seg.ContentSize;
+ SI.ZeroFillSize = Seg.ZeroFillSize;
+ SI.AG = AG;
+ SI.WorkingMem = Seg.WorkingMem;
+
+ SegInfos.push_back(SI);
+ }
+
+ UsedMemory.insert({Result->Start, NextSegAddr - Result->Start});
+
+ if (NextSegAddr < Result->End) {
+ // Save the remaining memory for reuse in next allocation(s)
+ AvailableMemory.insert(NextSegAddr, Result->End - 1, true);
+ }
+ Mutex.unlock();
+
+ if (auto Err = BL.apply()) {
+ OnAllocated(std::move(Err));
+ return;
+ }
+
+ OnAllocated(std::make_unique<InFlightAlloc>(*this, G, Result->Start,
+ std::move(SegInfos)));
+ };
+
+ Mutex.lock();
+
+ // find an already reserved range that is large enough
+ ExecutorAddrRange SelectedRange{};
+
+ for (AvailableMemoryMap::iterator It = AvailableMemory.begin();
+ It != AvailableMemory.end(); It++) {
+ if (It.stop() - It.start() + 1 >= TotalSize) {
+ SelectedRange = ExecutorAddrRange(It.start(), It.stop() + 1);
+ It.erase();
+ break;
+ }
+ }
+
+ if (SelectedRange.empty()) { // no already reserved range was found
+ auto TotalAllocation = alignTo(TotalSize, ReservationUnits);
+ Mapper->reserve(TotalAllocation, std::move(CompleteAllocation));
+ } else {
+ CompleteAllocation(SelectedRange);
+ }
+}
+
+void MapperJITLinkMemoryManager::deallocate(
+ std::vector<FinalizedAlloc> Allocs, OnDeallocatedFunction OnDeallocated) {
+ std::vector<ExecutorAddr> Bases;
+ Bases.reserve(Allocs.size());
+ for (auto &FA : Allocs) {
+ ExecutorAddr Addr = FA.getAddress();
+ Bases.push_back(Addr);
+ }
+
+ Mapper->deinitialize(Bases, [this, Allocs = std::move(Allocs),
+ OnDeallocated = std::move(OnDeallocated)](
+ llvm::Error Err) mutable {
+ // TODO: How should we treat memory that we fail to deinitialize?
+ // We're currently bailing out and treating it as "burned" -- should we
+ // require that a failure to deinitialize still reset the memory so that
+ // we can reclaim it?
+ if (Err) {
+ for (auto &FA : Allocs)
+ FA.release();
+ OnDeallocated(std::move(Err));
+ return;
+ }
+
+ {
+ std::lock_guard<std::mutex> Lock(Mutex);
+
+ for (auto &FA : Allocs) {
+ ExecutorAddr Addr = FA.getAddress();
+ ExecutorAddrDiff Size = UsedMemory[Addr];
+
+ UsedMemory.erase(Addr);
+ AvailableMemory.insert(Addr, Addr + Size - 1, true);
+
+ FA.release();
+ }
+ }
+
+ OnDeallocated(Error::success());
+ });
+}
+
+} // end namespace orc
+} // end namespace llvm
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/MemoryMapper.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/MemoryMapper.cpp
new file mode 100644
index 000000000000..bba3329e8cc2
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/MemoryMapper.cpp
@@ -0,0 +1,466 @@
+//===- MemoryMapper.cpp - Cross-process memory mapper ------------*- C++ -*-==//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/MemoryMapper.h"
+
+#include "llvm/ExecutionEngine/Orc/Shared/OrcRTBridge.h"
+#include "llvm/Support/WindowsError.h"
+
+#include <algorithm>
+
+#if defined(LLVM_ON_UNIX) && !defined(__ANDROID__)
+#include <fcntl.h>
+#include <sys/mman.h>
+#if defined(__MVS__)
+#include "llvm/Support/BLAKE3.h"
+#include <sys/shm.h>
+#endif
+#include <unistd.h>
+#elif defined(_WIN32)
+#include <windows.h>
+#endif
+
+namespace llvm {
+namespace orc {
+
+MemoryMapper::~MemoryMapper() {}
+
+InProcessMemoryMapper::InProcessMemoryMapper(size_t PageSize)
+ : PageSize(PageSize) {}
+
+Expected<std::unique_ptr<InProcessMemoryMapper>>
+InProcessMemoryMapper::Create() {
+ auto PageSize = sys::Process::getPageSize();
+ if (!PageSize)
+ return PageSize.takeError();
+ return std::make_unique<InProcessMemoryMapper>(*PageSize);
+}
+
+void InProcessMemoryMapper::reserve(size_t NumBytes,
+ OnReservedFunction OnReserved) {
+ std::error_code EC;
+ auto MB = sys::Memory::allocateMappedMemory(
+ NumBytes, nullptr, sys::Memory::MF_READ | sys::Memory::MF_WRITE, EC);
+
+ if (EC)
+ return OnReserved(errorCodeToError(EC));
+
+ {
+ std::lock_guard<std::mutex> Lock(Mutex);
+ Reservations[MB.base()].Size = MB.allocatedSize();
+ }
+
+ OnReserved(
+ ExecutorAddrRange(ExecutorAddr::fromPtr(MB.base()), MB.allocatedSize()));
+}
+
+char *InProcessMemoryMapper::prepare(ExecutorAddr Addr, size_t ContentSize) {
+ return Addr.toPtr<char *>();
+}
+
+void InProcessMemoryMapper::initialize(MemoryMapper::AllocInfo &AI,
+ OnInitializedFunction OnInitialized) {
+ ExecutorAddr MinAddr(~0ULL);
+ ExecutorAddr MaxAddr(0);
+
+ // FIXME: Release finalize lifetime segments.
+ for (auto &Segment : AI.Segments) {
+ auto Base = AI.MappingBase + Segment.Offset;
+ auto Size = Segment.ContentSize + Segment.ZeroFillSize;
+
+ if (Base < MinAddr)
+ MinAddr = Base;
+
+ if (Base + Size > MaxAddr)
+ MaxAddr = Base + Size;
+
+ std::memset((Base + Segment.ContentSize).toPtr<void *>(), 0,
+ Segment.ZeroFillSize);
+
+ if (auto EC = sys::Memory::protectMappedMemory(
+ {Base.toPtr<void *>(), Size},
+ toSysMemoryProtectionFlags(Segment.AG.getMemProt()))) {
+ return OnInitialized(errorCodeToError(EC));
+ }
+ if ((Segment.AG.getMemProt() & MemProt::Exec) == MemProt::Exec)
+ sys::Memory::InvalidateInstructionCache(Base.toPtr<void *>(), Size);
+ }
+
+ auto DeinitializeActions = shared::runFinalizeActions(AI.Actions);
+ if (!DeinitializeActions)
+ return OnInitialized(DeinitializeActions.takeError());
+
+ {
+ std::lock_guard<std::mutex> Lock(Mutex);
+
+ // This is the maximum range whose permission have been possibly modified
+ Allocations[MinAddr].Size = MaxAddr - MinAddr;
+ Allocations[MinAddr].DeinitializationActions =
+ std::move(*DeinitializeActions);
+ Reservations[AI.MappingBase.toPtr<void *>()].Allocations.push_back(MinAddr);
+ }
+
+ OnInitialized(MinAddr);
+}
+
+void InProcessMemoryMapper::deinitialize(
+ ArrayRef<ExecutorAddr> Bases,
+ MemoryMapper::OnDeinitializedFunction OnDeinitialized) {
+ Error AllErr = Error::success();
+
+ {
+ std::lock_guard<std::mutex> Lock(Mutex);
+
+ for (auto Base : llvm::reverse(Bases)) {
+
+ if (Error Err = shared::runDeallocActions(
+ Allocations[Base].DeinitializationActions)) {
+ AllErr = joinErrors(std::move(AllErr), std::move(Err));
+ }
+
+ // Reset protections to read/write so the area can be reused
+ if (auto EC = sys::Memory::protectMappedMemory(
+ {Base.toPtr<void *>(), Allocations[Base].Size},
+ sys::Memory::ProtectionFlags::MF_READ |
+ sys::Memory::ProtectionFlags::MF_WRITE)) {
+ AllErr = joinErrors(std::move(AllErr), errorCodeToError(EC));
+ }
+
+ Allocations.erase(Base);
+ }
+ }
+
+ OnDeinitialized(std::move(AllErr));
+}
+
+void InProcessMemoryMapper::release(ArrayRef<ExecutorAddr> Bases,
+ OnReleasedFunction OnReleased) {
+ Error Err = Error::success();
+
+ for (auto Base : Bases) {
+ std::vector<ExecutorAddr> AllocAddrs;
+ size_t Size;
+ {
+ std::lock_guard<std::mutex> Lock(Mutex);
+ auto &R = Reservations[Base.toPtr<void *>()];
+ Size = R.Size;
+ AllocAddrs.swap(R.Allocations);
+ }
+
+ // deinitialize sub allocations
+ std::promise<MSVCPError> P;
+ auto F = P.get_future();
+ deinitialize(AllocAddrs, [&](Error Err) { P.set_value(std::move(Err)); });
+ if (Error E = F.get()) {
+ Err = joinErrors(std::move(Err), std::move(E));
+ }
+
+ // free the memory
+ auto MB = sys::MemoryBlock(Base.toPtr<void *>(), Size);
+
+ auto EC = sys::Memory::releaseMappedMemory(MB);
+ if (EC) {
+ Err = joinErrors(std::move(Err), errorCodeToError(EC));
+ }
+
+ std::lock_guard<std::mutex> Lock(Mutex);
+ Reservations.erase(Base.toPtr<void *>());
+ }
+
+ OnReleased(std::move(Err));
+}
+
+InProcessMemoryMapper::~InProcessMemoryMapper() {
+ std::vector<ExecutorAddr> ReservationAddrs;
+ {
+ std::lock_guard<std::mutex> Lock(Mutex);
+
+ ReservationAddrs.reserve(Reservations.size());
+ for (const auto &R : Reservations) {
+ ReservationAddrs.push_back(ExecutorAddr::fromPtr(R.getFirst()));
+ }
+ }
+
+ std::promise<MSVCPError> P;
+ auto F = P.get_future();
+ release(ReservationAddrs, [&](Error Err) { P.set_value(std::move(Err)); });
+ cantFail(F.get());
+}
+
+// SharedMemoryMapper
+
+SharedMemoryMapper::SharedMemoryMapper(ExecutorProcessControl &EPC,
+ SymbolAddrs SAs, size_t PageSize)
+ : EPC(EPC), SAs(SAs), PageSize(PageSize) {
+#if (!defined(LLVM_ON_UNIX) || defined(__ANDROID__)) && !defined(_WIN32)
+ llvm_unreachable("SharedMemoryMapper is not supported on this platform yet");
+#endif
+}
+
+Expected<std::unique_ptr<SharedMemoryMapper>>
+SharedMemoryMapper::Create(ExecutorProcessControl &EPC, SymbolAddrs SAs) {
+#if (defined(LLVM_ON_UNIX) && !defined(__ANDROID__)) || defined(_WIN32)
+ auto PageSize = sys::Process::getPageSize();
+ if (!PageSize)
+ return PageSize.takeError();
+
+ return std::make_unique<SharedMemoryMapper>(EPC, SAs, *PageSize);
+#else
+ return make_error<StringError>(
+ "SharedMemoryMapper is not supported on this platform yet",
+ inconvertibleErrorCode());
+#endif
+}
+
+void SharedMemoryMapper::reserve(size_t NumBytes,
+ OnReservedFunction OnReserved) {
+#if (defined(LLVM_ON_UNIX) && !defined(__ANDROID__)) || defined(_WIN32)
+
+ EPC.callSPSWrapperAsync<
+ rt::SPSExecutorSharedMemoryMapperServiceReserveSignature>(
+ SAs.Reserve,
+ [this, NumBytes, OnReserved = std::move(OnReserved)](
+ Error SerializationErr,
+ Expected<std::pair<ExecutorAddr, std::string>> Result) mutable {
+ if (SerializationErr) {
+ cantFail(Result.takeError());
+ return OnReserved(std::move(SerializationErr));
+ }
+
+ if (!Result)
+ return OnReserved(Result.takeError());
+
+ ExecutorAddr RemoteAddr;
+ std::string SharedMemoryName;
+ std::tie(RemoteAddr, SharedMemoryName) = std::move(*Result);
+
+ void *LocalAddr = nullptr;
+
+#if defined(LLVM_ON_UNIX)
+
+#if defined(__MVS__)
+ ArrayRef<uint8_t> Data(
+ reinterpret_cast<const uint8_t *>(SharedMemoryName.c_str()),
+ SharedMemoryName.size());
+ auto HashedName = BLAKE3::hash<sizeof(key_t)>(Data);
+ key_t Key = *reinterpret_cast<key_t *>(HashedName.data());
+ int SharedMemoryId =
+ shmget(Key, NumBytes, IPC_CREAT | __IPC_SHAREAS | 0700);
+ if (SharedMemoryId < 0) {
+ return OnReserved(errorCodeToError(
+ std::error_code(errno, std::generic_category())));
+ }
+ LocalAddr = shmat(SharedMemoryId, nullptr, 0);
+ if (LocalAddr == reinterpret_cast<void *>(-1)) {
+ return OnReserved(errorCodeToError(
+ std::error_code(errno, std::generic_category())));
+ }
+#else
+ int SharedMemoryFile = shm_open(SharedMemoryName.c_str(), O_RDWR, 0700);
+ if (SharedMemoryFile < 0) {
+ return OnReserved(errorCodeToError(errnoAsErrorCode()));
+ }
+
+ // this prevents other processes from accessing it by name
+ shm_unlink(SharedMemoryName.c_str());
+
+ LocalAddr = mmap(nullptr, NumBytes, PROT_READ | PROT_WRITE, MAP_SHARED,
+ SharedMemoryFile, 0);
+ if (LocalAddr == MAP_FAILED) {
+ return OnReserved(errorCodeToError(errnoAsErrorCode()));
+ }
+
+ close(SharedMemoryFile);
+#endif
+
+#elif defined(_WIN32)
+
+ std::wstring WideSharedMemoryName(SharedMemoryName.begin(),
+ SharedMemoryName.end());
+ HANDLE SharedMemoryFile = OpenFileMappingW(
+ FILE_MAP_ALL_ACCESS, FALSE, WideSharedMemoryName.c_str());
+ if (!SharedMemoryFile)
+ return OnReserved(errorCodeToError(mapWindowsError(GetLastError())));
+
+ LocalAddr =
+ MapViewOfFile(SharedMemoryFile, FILE_MAP_ALL_ACCESS, 0, 0, 0);
+ if (!LocalAddr) {
+ CloseHandle(SharedMemoryFile);
+ return OnReserved(errorCodeToError(mapWindowsError(GetLastError())));
+ }
+
+ CloseHandle(SharedMemoryFile);
+
+#endif
+ {
+ std::lock_guard<std::mutex> Lock(Mutex);
+ Reservations.insert({RemoteAddr, {LocalAddr, NumBytes}});
+ }
+
+ OnReserved(ExecutorAddrRange(RemoteAddr, NumBytes));
+ },
+ SAs.Instance, static_cast<uint64_t>(NumBytes));
+
+#else
+ OnReserved(make_error<StringError>(
+ "SharedMemoryMapper is not supported on this platform yet",
+ inconvertibleErrorCode()));
+#endif
+}
+
+char *SharedMemoryMapper::prepare(ExecutorAddr Addr, size_t ContentSize) {
+ auto R = Reservations.upper_bound(Addr);
+ assert(R != Reservations.begin() && "Attempt to prepare unreserved range");
+ R--;
+
+ ExecutorAddrDiff Offset = Addr - R->first;
+
+ return static_cast<char *>(R->second.LocalAddr) + Offset;
+}
+
+void SharedMemoryMapper::initialize(MemoryMapper::AllocInfo &AI,
+ OnInitializedFunction OnInitialized) {
+ auto Reservation = Reservations.upper_bound(AI.MappingBase);
+ assert(Reservation != Reservations.begin() && "Attempt to initialize unreserved range");
+ Reservation--;
+
+ auto AllocationOffset = AI.MappingBase - Reservation->first;
+
+ tpctypes::SharedMemoryFinalizeRequest FR;
+
+ AI.Actions.swap(FR.Actions);
+
+ FR.Segments.reserve(AI.Segments.size());
+
+ for (auto Segment : AI.Segments) {
+ char *Base = static_cast<char *>(Reservation->second.LocalAddr) +
+ AllocationOffset + Segment.Offset;
+ std::memset(Base + Segment.ContentSize, 0, Segment.ZeroFillSize);
+
+ tpctypes::SharedMemorySegFinalizeRequest SegReq;
+ SegReq.RAG = {Segment.AG.getMemProt(),
+ Segment.AG.getMemLifetime() == MemLifetime::Finalize};
+ SegReq.Addr = AI.MappingBase + Segment.Offset;
+ SegReq.Size = Segment.ContentSize + Segment.ZeroFillSize;
+
+ FR.Segments.push_back(SegReq);
+ }
+
+ EPC.callSPSWrapperAsync<
+ rt::SPSExecutorSharedMemoryMapperServiceInitializeSignature>(
+ SAs.Initialize,
+ [OnInitialized = std::move(OnInitialized)](
+ Error SerializationErr, Expected<ExecutorAddr> Result) mutable {
+ if (SerializationErr) {
+ cantFail(Result.takeError());
+ return OnInitialized(std::move(SerializationErr));
+ }
+
+ OnInitialized(std::move(Result));
+ },
+ SAs.Instance, Reservation->first, std::move(FR));
+}
+
+void SharedMemoryMapper::deinitialize(
+ ArrayRef<ExecutorAddr> Allocations,
+ MemoryMapper::OnDeinitializedFunction OnDeinitialized) {
+ EPC.callSPSWrapperAsync<
+ rt::SPSExecutorSharedMemoryMapperServiceDeinitializeSignature>(
+ SAs.Deinitialize,
+ [OnDeinitialized = std::move(OnDeinitialized)](Error SerializationErr,
+ Error Result) mutable {
+ if (SerializationErr) {
+ cantFail(std::move(Result));
+ return OnDeinitialized(std::move(SerializationErr));
+ }
+
+ OnDeinitialized(std::move(Result));
+ },
+ SAs.Instance, Allocations);
+}
+
+void SharedMemoryMapper::release(ArrayRef<ExecutorAddr> Bases,
+ OnReleasedFunction OnReleased) {
+#if (defined(LLVM_ON_UNIX) && !defined(__ANDROID__)) || defined(_WIN32)
+ Error Err = Error::success();
+
+ {
+ std::lock_guard<std::mutex> Lock(Mutex);
+
+ for (auto Base : Bases) {
+
+#if defined(LLVM_ON_UNIX)
+
+#if defined(__MVS__)
+ if (shmdt(Reservations[Base].LocalAddr) < 0)
+ Err = joinErrors(std::move(Err), errorCodeToError(errnoAsErrorCode()));
+#else
+ if (munmap(Reservations[Base].LocalAddr, Reservations[Base].Size) != 0)
+ Err = joinErrors(std::move(Err), errorCodeToError(errnoAsErrorCode()));
+#endif
+
+#elif defined(_WIN32)
+
+ if (!UnmapViewOfFile(Reservations[Base].LocalAddr))
+ Err = joinErrors(std::move(Err),
+ errorCodeToError(mapWindowsError(GetLastError())));
+
+#endif
+
+ Reservations.erase(Base);
+ }
+ }
+
+ EPC.callSPSWrapperAsync<
+ rt::SPSExecutorSharedMemoryMapperServiceReleaseSignature>(
+ SAs.Release,
+ [OnReleased = std::move(OnReleased),
+ Err = std::move(Err)](Error SerializationErr, Error Result) mutable {
+ if (SerializationErr) {
+ cantFail(std::move(Result));
+ return OnReleased(
+ joinErrors(std::move(Err), std::move(SerializationErr)));
+ }
+
+ return OnReleased(joinErrors(std::move(Err), std::move(Result)));
+ },
+ SAs.Instance, Bases);
+#else
+ OnReleased(make_error<StringError>(
+ "SharedMemoryMapper is not supported on this platform yet",
+ inconvertibleErrorCode()));
+#endif
+}
+
+SharedMemoryMapper::~SharedMemoryMapper() {
+ std::lock_guard<std::mutex> Lock(Mutex);
+ for (const auto &R : Reservations) {
+
+#if defined(LLVM_ON_UNIX) && !defined(__ANDROID__)
+
+#if defined(__MVS__)
+ shmdt(R.second.LocalAddr);
+#else
+ munmap(R.second.LocalAddr, R.second.Size);
+#endif
+
+#elif defined(_WIN32)
+
+ UnmapViewOfFile(R.second.LocalAddr);
+
+#else
+
+ (void)R;
+
+#endif
+ }
+}
+
+} // namespace orc
+
+} // namespace llvm
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/ObjectFileInterface.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/ObjectFileInterface.cpp
new file mode 100644
index 000000000000..0286b0c93197
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/ObjectFileInterface.cpp
@@ -0,0 +1,292 @@
+//===------ ObjectFileInterface.cpp - MU interface utils for objects ------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/ObjectFileInterface.h"
+#include "llvm/ExecutionEngine/Orc/COFFPlatform.h"
+#include "llvm/ExecutionEngine/Orc/ELFNixPlatform.h"
+#include "llvm/ExecutionEngine/Orc/MachOPlatform.h"
+#include "llvm/ExecutionEngine/Orc/Shared/ObjectFormats.h"
+#include "llvm/Object/COFF.h"
+#include "llvm/Object/ELFObjectFile.h"
+#include "llvm/Object/MachO.h"
+#include "llvm/Object/ObjectFile.h"
+#include "llvm/Support/Debug.h"
+#include <optional>
+
+#define DEBUG_TYPE "orc"
+
+namespace llvm {
+namespace orc {
+
+void addInitSymbol(MaterializationUnit::Interface &I, ExecutionSession &ES,
+ StringRef ObjFileName) {
+ assert(!I.InitSymbol && "I already has an init symbol");
+ size_t Counter = 0;
+
+ do {
+ std::string InitSymString;
+ raw_string_ostream(InitSymString)
+ << "$." << ObjFileName << ".__inits." << Counter++;
+ I.InitSymbol = ES.intern(InitSymString);
+ } while (I.SymbolFlags.count(I.InitSymbol));
+
+ I.SymbolFlags[I.InitSymbol] = JITSymbolFlags::MaterializationSideEffectsOnly;
+}
+
+static Expected<MaterializationUnit::Interface>
+getMachOObjectFileSymbolInfo(ExecutionSession &ES,
+ const object::MachOObjectFile &Obj) {
+ MaterializationUnit::Interface I;
+
+ for (auto &Sym : Obj.symbols()) {
+ Expected<uint32_t> SymFlagsOrErr = Sym.getFlags();
+ if (!SymFlagsOrErr)
+ // TODO: Test this error.
+ return SymFlagsOrErr.takeError();
+
+ // Skip symbols not defined in this object file.
+ if (*SymFlagsOrErr & object::BasicSymbolRef::SF_Undefined)
+ continue;
+
+ // Skip symbols that are not global.
+ if (!(*SymFlagsOrErr & object::BasicSymbolRef::SF_Global))
+ continue;
+
+ // Skip symbols that have type SF_File.
+ if (auto SymType = Sym.getType()) {
+ if (*SymType == object::SymbolRef::ST_File)
+ continue;
+ } else
+ return SymType.takeError();
+
+ auto Name = Sym.getName();
+ if (!Name)
+ return Name.takeError();
+ auto SymFlags = JITSymbolFlags::fromObjectSymbol(Sym);
+ if (!SymFlags)
+ return SymFlags.takeError();
+
+ // Strip the 'exported' flag from MachO linker-private symbols.
+ if (Name->starts_with("l"))
+ *SymFlags &= ~JITSymbolFlags::Exported;
+
+ I.SymbolFlags[ES.intern(*Name)] = std::move(*SymFlags);
+ }
+
+ for (auto &Sec : Obj.sections()) {
+ auto SecType = Obj.getSectionType(Sec);
+ if ((SecType & MachO::SECTION_TYPE) == MachO::S_MOD_INIT_FUNC_POINTERS) {
+ addInitSymbol(I, ES, Obj.getFileName());
+ break;
+ }
+ auto SegName = Obj.getSectionFinalSegmentName(Sec.getRawDataRefImpl());
+ auto SecName = cantFail(Obj.getSectionName(Sec.getRawDataRefImpl()));
+ if (isMachOInitializerSection(SegName, SecName)) {
+ addInitSymbol(I, ES, Obj.getFileName());
+ break;
+ }
+ }
+
+ return I;
+}
+
+static Expected<MaterializationUnit::Interface>
+getELFObjectFileSymbolInfo(ExecutionSession &ES,
+ const object::ELFObjectFileBase &Obj) {
+ MaterializationUnit::Interface I;
+
+ for (auto &Sym : Obj.symbols()) {
+ Expected<uint32_t> SymFlagsOrErr = Sym.getFlags();
+ if (!SymFlagsOrErr)
+ // TODO: Test this error.
+ return SymFlagsOrErr.takeError();
+
+ // Skip symbols not defined in this object file.
+ if (*SymFlagsOrErr & object::BasicSymbolRef::SF_Undefined)
+ continue;
+
+ // Skip symbols that are not global.
+ if (!(*SymFlagsOrErr & object::BasicSymbolRef::SF_Global))
+ continue;
+
+ // Skip symbols that have type SF_File.
+ if (auto SymType = Sym.getType()) {
+ if (*SymType == object::SymbolRef::ST_File)
+ continue;
+ } else
+ return SymType.takeError();
+
+ auto Name = Sym.getName();
+ if (!Name)
+ return Name.takeError();
+
+ auto SymFlags = JITSymbolFlags::fromObjectSymbol(Sym);
+ if (!SymFlags)
+ return SymFlags.takeError();
+
+ // ELF STB_GNU_UNIQUE should map to Weak for ORC.
+ if (Sym.getBinding() == ELF::STB_GNU_UNIQUE)
+ *SymFlags |= JITSymbolFlags::Weak;
+
+ I.SymbolFlags[ES.intern(*Name)] = std::move(*SymFlags);
+ }
+
+ SymbolStringPtr InitSymbol;
+ for (auto &Sec : Obj.sections()) {
+ if (auto SecName = Sec.getName()) {
+ if (isELFInitializerSection(*SecName)) {
+ addInitSymbol(I, ES, Obj.getFileName());
+ break;
+ }
+ }
+ }
+
+ return I;
+}
+
+static Expected<MaterializationUnit::Interface>
+getCOFFObjectFileSymbolInfo(ExecutionSession &ES,
+ const object::COFFObjectFile &Obj) {
+ MaterializationUnit::Interface I;
+ std::vector<std::optional<object::coff_aux_section_definition>> ComdatDefs(
+ Obj.getNumberOfSections() + 1);
+ for (auto &Sym : Obj.symbols()) {
+ Expected<uint32_t> SymFlagsOrErr = Sym.getFlags();
+ if (!SymFlagsOrErr)
+ // TODO: Test this error.
+ return SymFlagsOrErr.takeError();
+
+ // Handle comdat symbols
+ auto COFFSym = Obj.getCOFFSymbol(Sym);
+ bool IsWeak = false;
+ if (auto *Def = COFFSym.getSectionDefinition()) {
+ auto Sec = Obj.getSection(COFFSym.getSectionNumber());
+ if (!Sec)
+ return Sec.takeError();
+ if (((*Sec)->Characteristics & COFF::IMAGE_SCN_LNK_COMDAT) &&
+ Def->Selection != COFF::IMAGE_COMDAT_SELECT_ASSOCIATIVE) {
+ ComdatDefs[COFFSym.getSectionNumber()] = *Def;
+ continue;
+ }
+ }
+ if (!COFF::isReservedSectionNumber(COFFSym.getSectionNumber()) &&
+ ComdatDefs[COFFSym.getSectionNumber()]) {
+ auto Def = ComdatDefs[COFFSym.getSectionNumber()];
+ if (Def->Selection != COFF::IMAGE_COMDAT_SELECT_NODUPLICATES) {
+ IsWeak = true;
+ }
+ ComdatDefs[COFFSym.getSectionNumber()] = std::nullopt;
+ } else {
+ // Skip symbols not defined in this object file.
+ if (*SymFlagsOrErr & object::BasicSymbolRef::SF_Undefined)
+ continue;
+ }
+
+ // Skip symbols that are not global.
+ if (!(*SymFlagsOrErr & object::BasicSymbolRef::SF_Global))
+ continue;
+
+ // Skip symbols that have type SF_File.
+ if (auto SymType = Sym.getType()) {
+ if (*SymType == object::SymbolRef::ST_File)
+ continue;
+ } else
+ return SymType.takeError();
+
+ auto Name = Sym.getName();
+ if (!Name)
+ return Name.takeError();
+
+ auto SymFlags = JITSymbolFlags::fromObjectSymbol(Sym);
+ if (!SymFlags)
+ return SymFlags.takeError();
+ *SymFlags |= JITSymbolFlags::Exported;
+
+ // Weak external is always a function
+ if (COFFSym.isWeakExternal())
+ *SymFlags |= JITSymbolFlags::Callable;
+
+ if (IsWeak)
+ *SymFlags |= JITSymbolFlags::Weak;
+
+ I.SymbolFlags[ES.intern(*Name)] = std::move(*SymFlags);
+ }
+
+ SymbolStringPtr InitSymbol;
+ for (auto &Sec : Obj.sections()) {
+ if (auto SecName = Sec.getName()) {
+ if (isCOFFInitializerSection(*SecName)) {
+ addInitSymbol(I, ES, Obj.getFileName());
+ break;
+ }
+ } else
+ return SecName.takeError();
+ }
+
+ return I;
+}
+
+Expected<MaterializationUnit::Interface>
+getGenericObjectFileSymbolInfo(ExecutionSession &ES,
+ const object::ObjectFile &Obj) {
+ MaterializationUnit::Interface I;
+
+ for (auto &Sym : Obj.symbols()) {
+ Expected<uint32_t> SymFlagsOrErr = Sym.getFlags();
+ if (!SymFlagsOrErr)
+ // TODO: Test this error.
+ return SymFlagsOrErr.takeError();
+
+ // Skip symbols not defined in this object file.
+ if (*SymFlagsOrErr & object::BasicSymbolRef::SF_Undefined)
+ continue;
+
+ // Skip symbols that are not global.
+ if (!(*SymFlagsOrErr & object::BasicSymbolRef::SF_Global))
+ continue;
+
+ // Skip symbols that have type SF_File.
+ if (auto SymType = Sym.getType()) {
+ if (*SymType == object::SymbolRef::ST_File)
+ continue;
+ } else
+ return SymType.takeError();
+
+ auto Name = Sym.getName();
+ if (!Name)
+ return Name.takeError();
+
+ auto SymFlags = JITSymbolFlags::fromObjectSymbol(Sym);
+ if (!SymFlags)
+ return SymFlags.takeError();
+
+ I.SymbolFlags[ES.intern(*Name)] = std::move(*SymFlags);
+ }
+
+ return I;
+}
+
+Expected<MaterializationUnit::Interface>
+getObjectFileInterface(ExecutionSession &ES, MemoryBufferRef ObjBuffer) {
+ auto Obj = object::ObjectFile::createObjectFile(ObjBuffer);
+
+ if (!Obj)
+ return Obj.takeError();
+
+ if (auto *MachOObj = dyn_cast<object::MachOObjectFile>(Obj->get()))
+ return getMachOObjectFileSymbolInfo(ES, *MachOObj);
+ else if (auto *ELFObj = dyn_cast<object::ELFObjectFileBase>(Obj->get()))
+ return getELFObjectFileSymbolInfo(ES, *ELFObj);
+ else if (auto *COFFObj = dyn_cast<object::COFFObjectFile>(Obj->get()))
+ return getCOFFObjectFileSymbolInfo(ES, *COFFObj);
+
+ return getGenericObjectFileSymbolInfo(ES, **Obj);
+}
+
+} // End namespace orc.
+} // End namespace llvm.
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/ObjectLinkingLayer.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/ObjectLinkingLayer.cpp
new file mode 100644
index 000000000000..a66c40ddb687
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/ObjectLinkingLayer.cpp
@@ -0,0 +1,888 @@
+//===------- ObjectLinkingLayer.cpp - JITLink backed ORC ObjectLayer ------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/ObjectLinkingLayer.h"
+#include "llvm/ExecutionEngine/JITLink/EHFrameSupport.h"
+#include "llvm/ExecutionEngine/JITLink/aarch32.h"
+#include "llvm/ExecutionEngine/Orc/DebugObjectManagerPlugin.h"
+#include "llvm/ExecutionEngine/Orc/DebugUtils.h"
+#include "llvm/ExecutionEngine/Orc/ObjectFileInterface.h"
+#include "llvm/ExecutionEngine/Orc/Shared/ObjectFormats.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include <string>
+#include <vector>
+
+#define DEBUG_TYPE "orc"
+
+using namespace llvm;
+using namespace llvm::jitlink;
+using namespace llvm::orc;
+
+namespace {
+
+bool hasInitializerSection(jitlink::LinkGraph &G) {
+ bool IsMachO = G.getTargetTriple().isOSBinFormatMachO();
+ bool IsElf = G.getTargetTriple().isOSBinFormatELF();
+ if (!IsMachO && !IsElf)
+ return false;
+
+ for (auto &Sec : G.sections()) {
+ if (IsMachO && isMachOInitializerSection(Sec.getName()))
+ return true;
+ if (IsElf && isELFInitializerSection(Sec.getName()))
+ return true;
+ }
+
+ return false;
+}
+
+ExecutorAddr getJITSymbolPtrForSymbol(Symbol &Sym, const Triple &TT) {
+ switch (TT.getArch()) {
+ case Triple::arm:
+ case Triple::armeb:
+ case Triple::thumb:
+ case Triple::thumbeb:
+ if (hasTargetFlags(Sym, aarch32::ThumbSymbol)) {
+ // Set LSB to indicate thumb target
+ assert(Sym.isCallable() && "Only callable symbols can have thumb flag");
+ assert((Sym.getAddress().getValue() & 0x01) == 0 && "LSB is clear");
+ return Sym.getAddress() + 0x01;
+ }
+ return Sym.getAddress();
+ default:
+ return Sym.getAddress();
+ }
+}
+
+JITSymbolFlags getJITSymbolFlagsForSymbol(Symbol &Sym) {
+ JITSymbolFlags Flags;
+
+ if (Sym.getLinkage() == Linkage::Weak)
+ Flags |= JITSymbolFlags::Weak;
+
+ if (Sym.getScope() == Scope::Default)
+ Flags |= JITSymbolFlags::Exported;
+
+ if (Sym.isCallable())
+ Flags |= JITSymbolFlags::Callable;
+
+ return Flags;
+}
+
+class LinkGraphMaterializationUnit : public MaterializationUnit {
+public:
+ static std::unique_ptr<LinkGraphMaterializationUnit>
+ Create(ObjectLinkingLayer &ObjLinkingLayer, std::unique_ptr<LinkGraph> G) {
+ auto LGI = scanLinkGraph(ObjLinkingLayer.getExecutionSession(), *G);
+ return std::unique_ptr<LinkGraphMaterializationUnit>(
+ new LinkGraphMaterializationUnit(ObjLinkingLayer, std::move(G),
+ std::move(LGI)));
+ }
+
+ StringRef getName() const override { return G->getName(); }
+ void materialize(std::unique_ptr<MaterializationResponsibility> MR) override {
+ ObjLinkingLayer.emit(std::move(MR), std::move(G));
+ }
+
+private:
+ static Interface scanLinkGraph(ExecutionSession &ES, LinkGraph &G) {
+
+ Interface LGI;
+
+ auto AddSymbol = [&](Symbol *Sym) {
+ // Skip local symbols.
+ if (Sym->getScope() == Scope::Local)
+ return;
+ assert(Sym->hasName() && "Anonymous non-local symbol?");
+
+ LGI.SymbolFlags[ES.intern(Sym->getName())] =
+ getJITSymbolFlagsForSymbol(*Sym);
+ };
+
+ for (auto *Sym : G.defined_symbols())
+ AddSymbol(Sym);
+ for (auto *Sym : G.absolute_symbols())
+ AddSymbol(Sym);
+
+ if (hasInitializerSection(G))
+ LGI.InitSymbol = makeInitSymbol(ES, G);
+
+ return LGI;
+ }
+
+ static SymbolStringPtr makeInitSymbol(ExecutionSession &ES, LinkGraph &G) {
+ std::string InitSymString;
+ raw_string_ostream(InitSymString)
+ << "$." << G.getName() << ".__inits" << Counter++;
+ return ES.intern(InitSymString);
+ }
+
+ LinkGraphMaterializationUnit(ObjectLinkingLayer &ObjLinkingLayer,
+ std::unique_ptr<LinkGraph> G, Interface LGI)
+ : MaterializationUnit(std::move(LGI)), ObjLinkingLayer(ObjLinkingLayer),
+ G(std::move(G)) {}
+
+ void discard(const JITDylib &JD, const SymbolStringPtr &Name) override {
+ for (auto *Sym : G->defined_symbols())
+ if (Sym->getName() == *Name) {
+ assert(Sym->getLinkage() == Linkage::Weak &&
+ "Discarding non-weak definition");
+ G->makeExternal(*Sym);
+ break;
+ }
+ }
+
+ ObjectLinkingLayer &ObjLinkingLayer;
+ std::unique_ptr<LinkGraph> G;
+ static std::atomic<uint64_t> Counter;
+};
+
+std::atomic<uint64_t> LinkGraphMaterializationUnit::Counter{0};
+
+} // end anonymous namespace
+
+namespace llvm {
+namespace orc {
+
+class ObjectLinkingLayerJITLinkContext final : public JITLinkContext {
+public:
+ ObjectLinkingLayerJITLinkContext(
+ ObjectLinkingLayer &Layer,
+ std::unique_ptr<MaterializationResponsibility> MR,
+ std::unique_ptr<MemoryBuffer> ObjBuffer)
+ : JITLinkContext(&MR->getTargetJITDylib()), Layer(Layer),
+ MR(std::move(MR)), ObjBuffer(std::move(ObjBuffer)) {
+ std::lock_guard<std::mutex> Lock(Layer.LayerMutex);
+ Plugins = Layer.Plugins;
+ }
+
+ ~ObjectLinkingLayerJITLinkContext() {
+ // If there is an object buffer return function then use it to
+ // return ownership of the buffer.
+ if (Layer.ReturnObjectBuffer && ObjBuffer)
+ Layer.ReturnObjectBuffer(std::move(ObjBuffer));
+ }
+
+ JITLinkMemoryManager &getMemoryManager() override { return Layer.MemMgr; }
+
+ void notifyMaterializing(LinkGraph &G) {
+ for (auto &P : Plugins)
+ P->notifyMaterializing(*MR, G, *this,
+ ObjBuffer ? ObjBuffer->getMemBufferRef()
+ : MemoryBufferRef());
+ }
+
+ void notifyFailed(Error Err) override {
+ for (auto &P : Plugins)
+ Err = joinErrors(std::move(Err), P->notifyFailed(*MR));
+ Layer.getExecutionSession().reportError(std::move(Err));
+ MR->failMaterialization();
+ }
+
+ void lookup(const LookupMap &Symbols,
+ std::unique_ptr<JITLinkAsyncLookupContinuation> LC) override {
+
+ JITDylibSearchOrder LinkOrder;
+ MR->getTargetJITDylib().withLinkOrderDo(
+ [&](const JITDylibSearchOrder &LO) { LinkOrder = LO; });
+
+ auto &ES = Layer.getExecutionSession();
+
+ SymbolLookupSet LookupSet;
+ for (auto &KV : Symbols) {
+ orc::SymbolLookupFlags LookupFlags;
+ switch (KV.second) {
+ case jitlink::SymbolLookupFlags::RequiredSymbol:
+ LookupFlags = orc::SymbolLookupFlags::RequiredSymbol;
+ break;
+ case jitlink::SymbolLookupFlags::WeaklyReferencedSymbol:
+ LookupFlags = orc::SymbolLookupFlags::WeaklyReferencedSymbol;
+ break;
+ }
+ LookupSet.add(ES.intern(KV.first), LookupFlags);
+ }
+
+ // OnResolve -- De-intern the symbols and pass the result to the linker.
+ auto OnResolve = [LookupContinuation =
+ std::move(LC)](Expected<SymbolMap> Result) mutable {
+ if (!Result)
+ LookupContinuation->run(Result.takeError());
+ else {
+ AsyncLookupResult LR;
+ for (auto &KV : *Result)
+ LR[*KV.first] = KV.second;
+ LookupContinuation->run(std::move(LR));
+ }
+ };
+
+ ES.lookup(LookupKind::Static, LinkOrder, std::move(LookupSet),
+ SymbolState::Resolved, std::move(OnResolve),
+ [this](const SymbolDependenceMap &Deps) {
+ // Translate LookupDeps map to SymbolSourceJD.
+ for (auto &[DepJD, Deps] : Deps)
+ for (auto &DepSym : Deps)
+ SymbolSourceJDs[NonOwningSymbolStringPtr(DepSym)] = DepJD;
+ });
+ }
+
+ Error notifyResolved(LinkGraph &G) override {
+ auto &ES = Layer.getExecutionSession();
+
+ SymbolFlagsMap ExtraSymbolsToClaim;
+ bool AutoClaim = Layer.AutoClaimObjectSymbols;
+
+ SymbolMap InternedResult;
+ for (auto *Sym : G.defined_symbols())
+ if (Sym->hasName() && Sym->getScope() != Scope::Local) {
+ auto InternedName = ES.intern(Sym->getName());
+ auto Ptr = getJITSymbolPtrForSymbol(*Sym, G.getTargetTriple());
+ auto Flags = getJITSymbolFlagsForSymbol(*Sym);
+ InternedResult[InternedName] = {Ptr, Flags};
+ if (AutoClaim && !MR->getSymbols().count(InternedName)) {
+ assert(!ExtraSymbolsToClaim.count(InternedName) &&
+ "Duplicate symbol to claim?");
+ ExtraSymbolsToClaim[InternedName] = Flags;
+ }
+ }
+
+ for (auto *Sym : G.absolute_symbols())
+ if (Sym->hasName() && Sym->getScope() != Scope::Local) {
+ auto InternedName = ES.intern(Sym->getName());
+ auto Ptr = getJITSymbolPtrForSymbol(*Sym, G.getTargetTriple());
+ auto Flags = getJITSymbolFlagsForSymbol(*Sym);
+ InternedResult[InternedName] = {Ptr, Flags};
+ if (AutoClaim && !MR->getSymbols().count(InternedName)) {
+ assert(!ExtraSymbolsToClaim.count(InternedName) &&
+ "Duplicate symbol to claim?");
+ ExtraSymbolsToClaim[InternedName] = Flags;
+ }
+ }
+
+ if (!ExtraSymbolsToClaim.empty())
+ if (auto Err = MR->defineMaterializing(ExtraSymbolsToClaim))
+ return Err;
+
+ {
+
+ // Check that InternedResult matches up with MR->getSymbols(), overriding
+ // flags if requested.
+ // This guards against faulty transformations / compilers / object caches.
+
+ // First check that there aren't any missing symbols.
+ size_t NumMaterializationSideEffectsOnlySymbols = 0;
+ SymbolNameVector ExtraSymbols;
+ SymbolNameVector MissingSymbols;
+ for (auto &KV : MR->getSymbols()) {
+
+ auto I = InternedResult.find(KV.first);
+
+ // If this is a materialization-side-effects only symbol then bump
+ // the counter and make sure it's *not* defined, otherwise make
+ // sure that it is defined.
+ if (KV.second.hasMaterializationSideEffectsOnly()) {
+ ++NumMaterializationSideEffectsOnlySymbols;
+ if (I != InternedResult.end())
+ ExtraSymbols.push_back(KV.first);
+ continue;
+ } else if (I == InternedResult.end())
+ MissingSymbols.push_back(KV.first);
+ else if (Layer.OverrideObjectFlags)
+ I->second.setFlags(KV.second);
+ }
+
+ // If there were missing symbols then report the error.
+ if (!MissingSymbols.empty())
+ return make_error<MissingSymbolDefinitions>(
+ Layer.getExecutionSession().getSymbolStringPool(), G.getName(),
+ std::move(MissingSymbols));
+
+ // If there are more definitions than expected, add them to the
+ // ExtraSymbols vector.
+ if (InternedResult.size() >
+ MR->getSymbols().size() - NumMaterializationSideEffectsOnlySymbols) {
+ for (auto &KV : InternedResult)
+ if (!MR->getSymbols().count(KV.first))
+ ExtraSymbols.push_back(KV.first);
+ }
+
+ // If there were extra definitions then report the error.
+ if (!ExtraSymbols.empty())
+ return make_error<UnexpectedSymbolDefinitions>(
+ Layer.getExecutionSession().getSymbolStringPool(), G.getName(),
+ std::move(ExtraSymbols));
+ }
+
+ if (auto Err = MR->notifyResolved(InternedResult))
+ return Err;
+
+ notifyLoaded();
+ return Error::success();
+ }
+
+ void notifyFinalized(JITLinkMemoryManager::FinalizedAlloc A) override {
+ if (auto Err = notifyEmitted(std::move(A))) {
+ Layer.getExecutionSession().reportError(std::move(Err));
+ MR->failMaterialization();
+ return;
+ }
+ if (auto Err = MR->notifyEmitted(SymbolDepGroups)) {
+ Layer.getExecutionSession().reportError(std::move(Err));
+ MR->failMaterialization();
+ }
+ }
+
+ LinkGraphPassFunction getMarkLivePass(const Triple &TT) const override {
+ return [this](LinkGraph &G) { return markResponsibilitySymbolsLive(G); };
+ }
+
+ Error modifyPassConfig(LinkGraph &LG, PassConfiguration &Config) override {
+ // Add passes to mark duplicate defs as should-discard, and to walk the
+ // link graph to build the symbol dependence graph.
+ Config.PrePrunePasses.push_back([this](LinkGraph &G) {
+ return claimOrExternalizeWeakAndCommonSymbols(G);
+ });
+
+ for (auto &P : Plugins)
+ P->modifyPassConfig(*MR, LG, Config);
+
+ Config.PreFixupPasses.push_back(
+ [this](LinkGraph &G) { return registerDependencies(G); });
+
+ return Error::success();
+ }
+
+ void notifyLoaded() {
+ for (auto &P : Plugins)
+ P->notifyLoaded(*MR);
+ }
+
+ Error notifyEmitted(jitlink::JITLinkMemoryManager::FinalizedAlloc FA) {
+ Error Err = Error::success();
+ for (auto &P : Plugins)
+ Err = joinErrors(std::move(Err), P->notifyEmitted(*MR));
+
+ if (Err) {
+ if (FA)
+ Err =
+ joinErrors(std::move(Err), Layer.MemMgr.deallocate(std::move(FA)));
+ return Err;
+ }
+
+ if (FA)
+ return Layer.recordFinalizedAlloc(*MR, std::move(FA));
+
+ return Error::success();
+ }
+
+private:
+ // Symbol name dependencies:
+ // Internal: Defined in this graph.
+ // External: Defined externally.
+ struct BlockSymbolDependencies {
+ SymbolNameSet Internal, External;
+ };
+
+ // Lazily populated map of blocks to BlockSymbolDependencies values.
+ class BlockDependenciesMap {
+ public:
+ BlockDependenciesMap(ExecutionSession &ES,
+ DenseMap<const Block *, DenseSet<Block *>> BlockDeps)
+ : ES(ES), BlockDeps(std::move(BlockDeps)) {}
+
+ const BlockSymbolDependencies &operator[](const Block &B) {
+ // Check the cache first.
+ auto I = BlockTransitiveDepsCache.find(&B);
+ if (I != BlockTransitiveDepsCache.end())
+ return I->second;
+
+ // No value. Populate the cache.
+ BlockSymbolDependencies BTDCacheVal;
+ auto BDI = BlockDeps.find(&B);
+ assert(BDI != BlockDeps.end() && "No block dependencies");
+
+ for (auto *BDep : BDI->second) {
+ auto &BID = getBlockImmediateDeps(*BDep);
+ for (auto &ExternalDep : BID.External)
+ BTDCacheVal.External.insert(ExternalDep);
+ for (auto &InternalDep : BID.Internal)
+ BTDCacheVal.Internal.insert(InternalDep);
+ }
+
+ return BlockTransitiveDepsCache
+ .insert(std::make_pair(&B, std::move(BTDCacheVal)))
+ .first->second;
+ }
+
+ SymbolStringPtr &getInternedName(Symbol &Sym) {
+ auto I = NameCache.find(&Sym);
+ if (I != NameCache.end())
+ return I->second;
+
+ return NameCache.insert(std::make_pair(&Sym, ES.intern(Sym.getName())))
+ .first->second;
+ }
+
+ private:
+ BlockSymbolDependencies &getBlockImmediateDeps(Block &B) {
+ // Check the cache first.
+ auto I = BlockImmediateDepsCache.find(&B);
+ if (I != BlockImmediateDepsCache.end())
+ return I->second;
+
+ BlockSymbolDependencies BIDCacheVal;
+ for (auto &E : B.edges()) {
+ auto &Tgt = E.getTarget();
+ if (Tgt.getScope() != Scope::Local) {
+ if (Tgt.isExternal()) {
+ if (Tgt.getAddress() || !Tgt.isWeaklyReferenced())
+ BIDCacheVal.External.insert(getInternedName(Tgt));
+ } else
+ BIDCacheVal.Internal.insert(getInternedName(Tgt));
+ }
+ }
+
+ return BlockImmediateDepsCache
+ .insert(std::make_pair(&B, std::move(BIDCacheVal)))
+ .first->second;
+ }
+
+ ExecutionSession &ES;
+ DenseMap<const Block *, DenseSet<Block *>> BlockDeps;
+ DenseMap<const Symbol *, SymbolStringPtr> NameCache;
+ DenseMap<const Block *, BlockSymbolDependencies> BlockImmediateDepsCache;
+ DenseMap<const Block *, BlockSymbolDependencies> BlockTransitiveDepsCache;
+ };
+
+ Error claimOrExternalizeWeakAndCommonSymbols(LinkGraph &G) {
+ auto &ES = Layer.getExecutionSession();
+
+ SymbolFlagsMap NewSymbolsToClaim;
+ std::vector<std::pair<SymbolStringPtr, Symbol *>> NameToSym;
+
+ auto ProcessSymbol = [&](Symbol *Sym) {
+ if (Sym->hasName() && Sym->getLinkage() == Linkage::Weak &&
+ Sym->getScope() != Scope::Local) {
+ auto Name = ES.intern(Sym->getName());
+ if (!MR->getSymbols().count(ES.intern(Sym->getName()))) {
+ NewSymbolsToClaim[Name] =
+ getJITSymbolFlagsForSymbol(*Sym) | JITSymbolFlags::Weak;
+ NameToSym.push_back(std::make_pair(std::move(Name), Sym));
+ }
+ }
+ };
+
+ for (auto *Sym : G.defined_symbols())
+ ProcessSymbol(Sym);
+ for (auto *Sym : G.absolute_symbols())
+ ProcessSymbol(Sym);
+
+ // Attempt to claim all weak defs that we're not already responsible for.
+ // This may fail if the resource tracker has become defunct, but should
+ // always succeed otherwise.
+ if (auto Err = MR->defineMaterializing(std::move(NewSymbolsToClaim)))
+ return Err;
+
+ // Walk the list of symbols that we just tried to claim. Symbols that we're
+ // responsible for are marked live. Symbols that we're not responsible for
+ // are turned into external references.
+ for (auto &KV : NameToSym) {
+ if (MR->getSymbols().count(KV.first))
+ KV.second->setLive(true);
+ else
+ G.makeExternal(*KV.second);
+ }
+
+ return Error::success();
+ }
+
+ Error markResponsibilitySymbolsLive(LinkGraph &G) const {
+ auto &ES = Layer.getExecutionSession();
+ for (auto *Sym : G.defined_symbols())
+ if (Sym->hasName() && MR->getSymbols().count(ES.intern(Sym->getName())))
+ Sym->setLive(true);
+ return Error::success();
+ }
+
+ Error registerDependencies(LinkGraph &G) {
+ auto &TargetJD = MR->getTargetJITDylib();
+ auto &ES = TargetJD.getExecutionSession();
+ auto BlockDeps = computeBlockNonLocalDeps(G);
+
+ DenseSet<Block *> BlockDepsProcessed;
+ DenseMap<Block *, SymbolDependenceGroup> DepGroupForBlock;
+
+ // Compute dependencies for symbols defined in the JITLink graph.
+ for (auto *Sym : G.defined_symbols()) {
+
+ // Skip local symbols.
+ if (Sym->getScope() == Scope::Local)
+ continue;
+ assert(Sym->hasName() &&
+ "Defined non-local jitlink::Symbol should have a name");
+
+ auto &BDeps = BlockDeps[Sym->getBlock()];
+
+ // Skip symbols in blocks that don't depend on anything.
+ if (BDeps.Internal.empty() && BDeps.External.empty())
+ continue;
+
+ SymbolDependenceGroup &SDG = DepGroupForBlock[&Sym->getBlock()];
+ SDG.Symbols.insert(ES.intern(Sym->getName()));
+
+ if (!BlockDepsProcessed.count(&Sym->getBlock())) {
+ BlockDepsProcessed.insert(&Sym->getBlock());
+
+ if (!BDeps.Internal.empty())
+ SDG.Dependencies[&TargetJD] = BDeps.Internal;
+ for (auto &Dep : BDeps.External) {
+ auto DepSrcItr = SymbolSourceJDs.find(NonOwningSymbolStringPtr(Dep));
+ if (DepSrcItr != SymbolSourceJDs.end())
+ SDG.Dependencies[DepSrcItr->second].insert(Dep);
+ }
+ }
+ }
+
+ SymbolDependenceGroup SynthSDG;
+
+ for (auto &P : Plugins) {
+ auto SynthDeps = P->getSyntheticSymbolDependencies(*MR);
+ if (SynthDeps.empty())
+ continue;
+
+ DenseSet<Block *> BlockVisited;
+ for (auto &[Name, DepSyms] : SynthDeps) {
+ SynthSDG.Symbols.insert(Name);
+ for (auto *Sym : DepSyms) {
+ if (Sym->getScope() == Scope::Local) {
+ auto &BDeps = BlockDeps[Sym->getBlock()];
+ for (auto &S : BDeps.Internal)
+ SynthSDG.Dependencies[&TargetJD].insert(S);
+ for (auto &S : BDeps.External) {
+ auto DepSrcItr =
+ SymbolSourceJDs.find(NonOwningSymbolStringPtr(S));
+ if (DepSrcItr != SymbolSourceJDs.end())
+ SynthSDG.Dependencies[DepSrcItr->second].insert(S);
+ }
+ } else {
+ auto SymName = ES.intern(Sym->getName());
+ if (Sym->isExternal()) {
+ assert(SymbolSourceJDs.count(NonOwningSymbolStringPtr(SymName)) &&
+ "External symbol source entry missing");
+ SynthSDG
+ .Dependencies[SymbolSourceJDs[NonOwningSymbolStringPtr(
+ SymName)]]
+ .insert(SymName);
+ } else
+ SynthSDG.Dependencies[&TargetJD].insert(SymName);
+ }
+ }
+ }
+ }
+
+ // Transfer SDGs to SymbolDepGroups.
+ DepGroupForBlock.reserve(DepGroupForBlock.size() + 1);
+ for (auto &[B, SDG] : DepGroupForBlock) {
+ assert(!SDG.Symbols.empty() && "SymbolDependenceGroup covers no symbols");
+ if (!SDG.Dependencies.empty())
+ SymbolDepGroups.push_back(std::move(SDG));
+ }
+ if (!SynthSDG.Symbols.empty() && !SynthSDG.Dependencies.empty())
+ SymbolDepGroups.push_back(std::move(SynthSDG));
+
+ return Error::success();
+ }
+
+ BlockDependenciesMap computeBlockNonLocalDeps(LinkGraph &G) {
+ // First calculate the reachable-via-non-local-symbol blocks for each block.
+ struct BlockInfo {
+ DenseSet<Block *> Dependencies;
+ DenseSet<Block *> Dependants;
+ bool DependenciesChanged = true;
+ };
+ DenseMap<Block *, BlockInfo> BlockInfos;
+ SmallVector<Block *> WorkList;
+
+ // Pre-allocate map entries. This prevents any iterator/reference
+ // invalidation in the next loop.
+ for (auto *B : G.blocks())
+ (void)BlockInfos[B];
+
+ // Build initial worklist, record block dependencies/dependants and
+ // non-local symbol dependencies.
+ for (auto *B : G.blocks()) {
+ auto &BI = BlockInfos[B];
+ for (auto &E : B->edges()) {
+ if (E.getTarget().getScope() == Scope::Local &&
+ !E.getTarget().isAbsolute()) {
+ auto &TgtB = E.getTarget().getBlock();
+ if (&TgtB != B) {
+ BI.Dependencies.insert(&TgtB);
+ BlockInfos[&TgtB].Dependants.insert(B);
+ }
+ }
+ }
+ }
+
+ // Add blocks with both dependants and dependencies to the worklist to
+ // propagate dependencies to dependants.
+ for (auto &[B, BI] : BlockInfos) {
+ if (!BI.Dependants.empty() && !BI.Dependencies.empty())
+ WorkList.push_back(B);
+ }
+
+ // Propagate block-level dependencies through the block-dependence graph.
+ while (!WorkList.empty()) {
+ auto *B = WorkList.pop_back_val();
+
+ auto &BI = BlockInfos[B];
+ assert(BI.DependenciesChanged &&
+ "Block in worklist has unchanged dependencies");
+ BI.DependenciesChanged = false;
+ for (auto *Dependant : BI.Dependants) {
+ auto &DependantBI = BlockInfos[Dependant];
+ for (auto *Dependency : BI.Dependencies) {
+ if (Dependant != Dependency &&
+ DependantBI.Dependencies.insert(Dependency).second)
+ if (!DependantBI.DependenciesChanged) {
+ DependantBI.DependenciesChanged = true;
+ WorkList.push_back(Dependant);
+ }
+ }
+ }
+ }
+
+ DenseMap<const Block *, DenseSet<Block *>> BlockDeps;
+ for (auto &KV : BlockInfos)
+ BlockDeps[KV.first] = std::move(KV.second.Dependencies);
+
+ return BlockDependenciesMap(Layer.getExecutionSession(),
+ std::move(BlockDeps));
+ }
+
+ ObjectLinkingLayer &Layer;
+ std::vector<std::shared_ptr<ObjectLinkingLayer::Plugin>> Plugins;
+ std::unique_ptr<MaterializationResponsibility> MR;
+ std::unique_ptr<MemoryBuffer> ObjBuffer;
+ DenseMap<Block *, SymbolNameSet> ExternalBlockDeps;
+ DenseMap<Block *, SymbolNameSet> InternalBlockDeps;
+ DenseMap<NonOwningSymbolStringPtr, JITDylib *> SymbolSourceJDs;
+ std::vector<SymbolDependenceGroup> SymbolDepGroups;
+};
+
+ObjectLinkingLayer::Plugin::~Plugin() = default;
+
+char ObjectLinkingLayer::ID;
+
+using BaseT = RTTIExtends<ObjectLinkingLayer, ObjectLayer>;
+
+ObjectLinkingLayer::ObjectLinkingLayer(ExecutionSession &ES)
+ : BaseT(ES), MemMgr(ES.getExecutorProcessControl().getMemMgr()) {
+ ES.registerResourceManager(*this);
+}
+
+ObjectLinkingLayer::ObjectLinkingLayer(ExecutionSession &ES,
+ JITLinkMemoryManager &MemMgr)
+ : BaseT(ES), MemMgr(MemMgr) {
+ ES.registerResourceManager(*this);
+}
+
+ObjectLinkingLayer::ObjectLinkingLayer(
+ ExecutionSession &ES, std::unique_ptr<JITLinkMemoryManager> MemMgr)
+ : BaseT(ES), MemMgr(*MemMgr), MemMgrOwnership(std::move(MemMgr)) {
+ ES.registerResourceManager(*this);
+}
+
+ObjectLinkingLayer::~ObjectLinkingLayer() {
+ assert(Allocs.empty() && "Layer destroyed with resources still attached");
+ getExecutionSession().deregisterResourceManager(*this);
+}
+
+Error ObjectLinkingLayer::add(ResourceTrackerSP RT,
+ std::unique_ptr<LinkGraph> G) {
+ auto &JD = RT->getJITDylib();
+ return JD.define(LinkGraphMaterializationUnit::Create(*this, std::move(G)),
+ std::move(RT));
+}
+
+void ObjectLinkingLayer::emit(std::unique_ptr<MaterializationResponsibility> R,
+ std::unique_ptr<MemoryBuffer> O) {
+ assert(O && "Object must not be null");
+ MemoryBufferRef ObjBuffer = O->getMemBufferRef();
+
+ auto Ctx = std::make_unique<ObjectLinkingLayerJITLinkContext>(
+ *this, std::move(R), std::move(O));
+ if (auto G = createLinkGraphFromObject(ObjBuffer)) {
+ Ctx->notifyMaterializing(**G);
+ link(std::move(*G), std::move(Ctx));
+ } else {
+ Ctx->notifyFailed(G.takeError());
+ }
+}
+
+void ObjectLinkingLayer::emit(std::unique_ptr<MaterializationResponsibility> R,
+ std::unique_ptr<LinkGraph> G) {
+ auto Ctx = std::make_unique<ObjectLinkingLayerJITLinkContext>(
+ *this, std::move(R), nullptr);
+ Ctx->notifyMaterializing(*G);
+ link(std::move(G), std::move(Ctx));
+}
+
+Error ObjectLinkingLayer::recordFinalizedAlloc(
+ MaterializationResponsibility &MR, FinalizedAlloc FA) {
+ auto Err = MR.withResourceKeyDo(
+ [&](ResourceKey K) { Allocs[K].push_back(std::move(FA)); });
+
+ if (Err)
+ Err = joinErrors(std::move(Err), MemMgr.deallocate(std::move(FA)));
+
+ return Err;
+}
+
+Error ObjectLinkingLayer::handleRemoveResources(JITDylib &JD, ResourceKey K) {
+
+ {
+ Error Err = Error::success();
+ for (auto &P : Plugins)
+ Err = joinErrors(std::move(Err), P->notifyRemovingResources(JD, K));
+ if (Err)
+ return Err;
+ }
+
+ std::vector<FinalizedAlloc> AllocsToRemove;
+ getExecutionSession().runSessionLocked([&] {
+ auto I = Allocs.find(K);
+ if (I != Allocs.end()) {
+ std::swap(AllocsToRemove, I->second);
+ Allocs.erase(I);
+ }
+ });
+
+ if (AllocsToRemove.empty())
+ return Error::success();
+
+ return MemMgr.deallocate(std::move(AllocsToRemove));
+}
+
+void ObjectLinkingLayer::handleTransferResources(JITDylib &JD,
+ ResourceKey DstKey,
+ ResourceKey SrcKey) {
+ auto I = Allocs.find(SrcKey);
+ if (I != Allocs.end()) {
+ auto &SrcAllocs = I->second;
+ auto &DstAllocs = Allocs[DstKey];
+ DstAllocs.reserve(DstAllocs.size() + SrcAllocs.size());
+ for (auto &Alloc : SrcAllocs)
+ DstAllocs.push_back(std::move(Alloc));
+
+ // Erase SrcKey entry using value rather than iterator I: I may have been
+ // invalidated when we looked up DstKey.
+ Allocs.erase(SrcKey);
+ }
+
+ for (auto &P : Plugins)
+ P->notifyTransferringResources(JD, DstKey, SrcKey);
+}
+
+EHFrameRegistrationPlugin::EHFrameRegistrationPlugin(
+ ExecutionSession &ES, std::unique_ptr<EHFrameRegistrar> Registrar)
+ : ES(ES), Registrar(std::move(Registrar)) {}
+
+void EHFrameRegistrationPlugin::modifyPassConfig(
+ MaterializationResponsibility &MR, LinkGraph &G,
+ PassConfiguration &PassConfig) {
+
+ PassConfig.PostFixupPasses.push_back(createEHFrameRecorderPass(
+ G.getTargetTriple(), [this, &MR](ExecutorAddr Addr, size_t Size) {
+ if (Addr) {
+ std::lock_guard<std::mutex> Lock(EHFramePluginMutex);
+ assert(!InProcessLinks.count(&MR) &&
+ "Link for MR already being tracked?");
+ InProcessLinks[&MR] = {Addr, Size};
+ }
+ }));
+}
+
+Error EHFrameRegistrationPlugin::notifyEmitted(
+ MaterializationResponsibility &MR) {
+
+ ExecutorAddrRange EmittedRange;
+ {
+ std::lock_guard<std::mutex> Lock(EHFramePluginMutex);
+
+ auto EHFrameRangeItr = InProcessLinks.find(&MR);
+ if (EHFrameRangeItr == InProcessLinks.end())
+ return Error::success();
+
+ EmittedRange = EHFrameRangeItr->second;
+ assert(EmittedRange.Start && "eh-frame addr to register can not be null");
+ InProcessLinks.erase(EHFrameRangeItr);
+ }
+
+ if (auto Err = MR.withResourceKeyDo(
+ [&](ResourceKey K) { EHFrameRanges[K].push_back(EmittedRange); }))
+ return Err;
+
+ return Registrar->registerEHFrames(EmittedRange);
+}
+
+Error EHFrameRegistrationPlugin::notifyFailed(
+ MaterializationResponsibility &MR) {
+ std::lock_guard<std::mutex> Lock(EHFramePluginMutex);
+ InProcessLinks.erase(&MR);
+ return Error::success();
+}
+
+Error EHFrameRegistrationPlugin::notifyRemovingResources(JITDylib &JD,
+ ResourceKey K) {
+ std::vector<ExecutorAddrRange> RangesToRemove;
+
+ ES.runSessionLocked([&] {
+ auto I = EHFrameRanges.find(K);
+ if (I != EHFrameRanges.end()) {
+ RangesToRemove = std::move(I->second);
+ EHFrameRanges.erase(I);
+ }
+ });
+
+ Error Err = Error::success();
+ while (!RangesToRemove.empty()) {
+ auto RangeToRemove = RangesToRemove.back();
+ RangesToRemove.pop_back();
+ assert(RangeToRemove.Start && "Untracked eh-frame range must not be null");
+ Err = joinErrors(std::move(Err),
+ Registrar->deregisterEHFrames(RangeToRemove));
+ }
+
+ return Err;
+}
+
+void EHFrameRegistrationPlugin::notifyTransferringResources(
+ JITDylib &JD, ResourceKey DstKey, ResourceKey SrcKey) {
+ auto SI = EHFrameRanges.find(SrcKey);
+ if (SI == EHFrameRanges.end())
+ return;
+
+ auto DI = EHFrameRanges.find(DstKey);
+ if (DI != EHFrameRanges.end()) {
+ auto &SrcRanges = SI->second;
+ auto &DstRanges = DI->second;
+ DstRanges.reserve(DstRanges.size() + SrcRanges.size());
+ for (auto &SrcRange : SrcRanges)
+ DstRanges.push_back(std::move(SrcRange));
+ EHFrameRanges.erase(SI);
+ } else {
+ // We need to move SrcKey's ranges over without invalidating the SI
+ // iterator.
+ auto Tmp = std::move(SI->second);
+ EHFrameRanges.erase(SI);
+ EHFrameRanges[DstKey] = std::move(Tmp);
+ }
+}
+
+} // End namespace orc.
+} // End namespace llvm.
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/ObjectTransformLayer.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/ObjectTransformLayer.cpp
new file mode 100644
index 000000000000..207a31ec1940
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/ObjectTransformLayer.cpp
@@ -0,0 +1,44 @@
+//===---------- ObjectTransformLayer.cpp - Object Transform Layer ---------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/ObjectTransformLayer.h"
+#include "llvm/Support/MemoryBuffer.h"
+
+namespace llvm {
+namespace orc {
+
+char ObjectTransformLayer::ID;
+
+using BaseT = RTTIExtends<ObjectTransformLayer, ObjectLayer>;
+
+ObjectTransformLayer::ObjectTransformLayer(ExecutionSession &ES,
+ ObjectLayer &BaseLayer,
+ TransformFunction Transform)
+ : BaseT(ES), BaseLayer(BaseLayer), Transform(std::move(Transform)) {}
+
+void ObjectTransformLayer::emit(
+ std::unique_ptr<MaterializationResponsibility> R,
+ std::unique_ptr<MemoryBuffer> O) {
+ assert(O && "Module must not be null");
+
+ // If there is a transform set then apply it.
+ if (Transform) {
+ if (auto TransformedObj = Transform(std::move(O)))
+ O = std::move(*TransformedObj);
+ else {
+ R->failMaterialization();
+ getExecutionSession().reportError(TransformedObj.takeError());
+ return;
+ }
+ }
+
+ BaseLayer.emit(std::move(R), std::move(O));
+}
+
+} // End namespace orc.
+} // End namespace llvm.
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/OrcABISupport.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/OrcABISupport.cpp
new file mode 100644
index 000000000000..6d568199378a
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/OrcABISupport.cpp
@@ -0,0 +1,1242 @@
+//===------------- OrcABISupport.cpp - ABI specific support code ----------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/OrcABISupport.h"
+#include "llvm/Support/FormatVariadic.h"
+#include "llvm/Support/Process.h"
+#include "llvm/Support/raw_ostream.h"
+
+#define DEBUG_TYPE "orc"
+
+using namespace llvm;
+using namespace llvm::orc;
+
+template <typename ORCABI>
+static bool stubAndPointerRangesOk(ExecutorAddr StubBlockAddr,
+ ExecutorAddr PointerBlockAddr,
+ unsigned NumStubs) {
+ constexpr unsigned MaxDisp = ORCABI::StubToPointerMaxDisplacement;
+ ExecutorAddr FirstStub = StubBlockAddr;
+ ExecutorAddr LastStub = FirstStub + ((NumStubs - 1) * ORCABI::StubSize);
+ ExecutorAddr FirstPointer = PointerBlockAddr;
+ ExecutorAddr LastPointer = FirstPointer + ((NumStubs - 1) * ORCABI::StubSize);
+
+ if (FirstStub < FirstPointer) {
+ if (LastStub >= FirstPointer)
+ return false; // Ranges overlap.
+ return (FirstPointer - FirstStub <= MaxDisp) &&
+ (LastPointer - LastStub <= MaxDisp); // out-of-range.
+ }
+
+ if (LastPointer >= FirstStub)
+ return false; // Ranges overlap.
+
+ return (FirstStub - FirstPointer <= MaxDisp) &&
+ (LastStub - LastPointer <= MaxDisp);
+}
+
+namespace llvm {
+namespace orc {
+
+void OrcAArch64::writeResolverCode(char *ResolverWorkingMem,
+ ExecutorAddr ResolverTargetAddress,
+ ExecutorAddr ReentryFnAddr,
+ ExecutorAddr ReentryCtxAddr) {
+
+ const uint32_t ResolverCode[] = {
+ // resolver_entry:
+ 0xa9bf47fd, // 0x000: stp x29, x17, [sp, #-16]!
+ 0x910003fd, // 0x004: mov x29, sp
+ 0xa9bf73fb, // 0x008: stp x27, x28, [sp, #-16]!
+ 0xa9bf6bf9, // 0x00c: stp x25, x26, [sp, #-16]!
+ 0xa9bf63f7, // 0x010: stp x23, x24, [sp, #-16]!
+ 0xa9bf5bf5, // 0x014: stp x21, x22, [sp, #-16]!
+ 0xa9bf53f3, // 0x018: stp x19, x20, [sp, #-16]!
+ 0xa9bf3fee, // 0x01c: stp x14, x15, [sp, #-16]!
+ 0xa9bf37ec, // 0x020: stp x12, x13, [sp, #-16]!
+ 0xa9bf2fea, // 0x024: stp x10, x11, [sp, #-16]!
+ 0xa9bf27e8, // 0x028: stp x8, x9, [sp, #-16]!
+ 0xa9bf1fe6, // 0x02c: stp x6, x7, [sp, #-16]!
+ 0xa9bf17e4, // 0x030: stp x4, x5, [sp, #-16]!
+ 0xa9bf0fe2, // 0x034: stp x2, x3, [sp, #-16]!
+ 0xa9bf07e0, // 0x038: stp x0, x1, [sp, #-16]!
+ 0xadbf7ffe, // 0x03c: stp q30, q31, [sp, #-32]!
+ 0xadbf77fc, // 0x040: stp q28, q29, [sp, #-32]!
+ 0xadbf6ffa, // 0x044: stp q26, q27, [sp, #-32]!
+ 0xadbf67f8, // 0x048: stp q24, q25, [sp, #-32]!
+ 0xadbf5ff6, // 0x04c: stp q22, q23, [sp, #-32]!
+ 0xadbf57f4, // 0x050: stp q20, q21, [sp, #-32]!
+ 0xadbf4ff2, // 0x054: stp q18, q19, [sp, #-32]!
+ 0xadbf47f0, // 0x058: stp q16, q17, [sp, #-32]!
+ 0xadbf3fee, // 0x05c: stp q14, q15, [sp, #-32]!
+ 0xadbf37ec, // 0x060: stp q12, q13, [sp, #-32]!
+ 0xadbf2fea, // 0x064: stp q10, q11, [sp, #-32]!
+ 0xadbf27e8, // 0x068: stp q8, q9, [sp, #-32]!
+ 0xadbf1fe6, // 0x06c: stp q6, q7, [sp, #-32]!
+ 0xadbf17e4, // 0x070: stp q4, q5, [sp, #-32]!
+ 0xadbf0fe2, // 0x074: stp q2, q3, [sp, #-32]!
+ 0xadbf07e0, // 0x078: stp q0, q1, [sp, #-32]!
+ 0x580004e0, // 0x07c: ldr x0, Lreentry_ctx_ptr
+ 0xaa1e03e1, // 0x080: mov x1, x30
+ 0xd1003021, // 0x084: sub x1, x1, #12
+ 0x58000442, // 0x088: ldr x2, Lreentry_fn_ptr
+ 0xd63f0040, // 0x08c: blr x2
+ 0xaa0003f1, // 0x090: mov x17, x0
+ 0xacc107e0, // 0x094: ldp q0, q1, [sp], #32
+ 0xacc10fe2, // 0x098: ldp q2, q3, [sp], #32
+ 0xacc117e4, // 0x09c: ldp q4, q5, [sp], #32
+ 0xacc11fe6, // 0x0a0: ldp q6, q7, [sp], #32
+ 0xacc127e8, // 0x0a4: ldp q8, q9, [sp], #32
+ 0xacc12fea, // 0x0a8: ldp q10, q11, [sp], #32
+ 0xacc137ec, // 0x0ac: ldp q12, q13, [sp], #32
+ 0xacc13fee, // 0x0b0: ldp q14, q15, [sp], #32
+ 0xacc147f0, // 0x0b4: ldp q16, q17, [sp], #32
+ 0xacc14ff2, // 0x0b8: ldp q18, q19, [sp], #32
+ 0xacc157f4, // 0x0bc: ldp q20, q21, [sp], #32
+ 0xacc15ff6, // 0x0c0: ldp q22, q23, [sp], #32
+ 0xacc167f8, // 0x0c4: ldp q24, q25, [sp], #32
+ 0xacc16ffa, // 0x0c8: ldp q26, q27, [sp], #32
+ 0xacc177fc, // 0x0cc: ldp q28, q29, [sp], #32
+ 0xacc17ffe, // 0x0d0: ldp q30, q31, [sp], #32
+ 0xa8c107e0, // 0x0d4: ldp x0, x1, [sp], #16
+ 0xa8c10fe2, // 0x0d8: ldp x2, x3, [sp], #16
+ 0xa8c117e4, // 0x0dc: ldp x4, x5, [sp], #16
+ 0xa8c11fe6, // 0x0e0: ldp x6, x7, [sp], #16
+ 0xa8c127e8, // 0x0e4: ldp x8, x9, [sp], #16
+ 0xa8c12fea, // 0x0e8: ldp x10, x11, [sp], #16
+ 0xa8c137ec, // 0x0ec: ldp x12, x13, [sp], #16
+ 0xa8c13fee, // 0x0f0: ldp x14, x15, [sp], #16
+ 0xa8c153f3, // 0x0f4: ldp x19, x20, [sp], #16
+ 0xa8c15bf5, // 0x0f8: ldp x21, x22, [sp], #16
+ 0xa8c163f7, // 0x0fc: ldp x23, x24, [sp], #16
+ 0xa8c16bf9, // 0x100: ldp x25, x26, [sp], #16
+ 0xa8c173fb, // 0x104: ldp x27, x28, [sp], #16
+ 0xa8c17bfd, // 0x108: ldp x29, x30, [sp], #16
+ 0xd65f0220, // 0x10c: ret x17
+ 0x01234567, // 0x110: Lreentry_fn_ptr:
+ 0xdeadbeef, // 0x114: .quad 0
+ 0x98765432, // 0x118: Lreentry_ctx_ptr:
+ 0xcafef00d // 0x11c: .quad 0
+ };
+
+ const unsigned ReentryFnAddrOffset = 0x110;
+ const unsigned ReentryCtxAddrOffset = 0x118;
+
+ memcpy(ResolverWorkingMem, ResolverCode, sizeof(ResolverCode));
+ memcpy(ResolverWorkingMem + ReentryFnAddrOffset, &ReentryFnAddr,
+ sizeof(uint64_t));
+ memcpy(ResolverWorkingMem + ReentryCtxAddrOffset, &ReentryCtxAddr,
+ sizeof(uint64_t));
+}
+
+void OrcAArch64::writeTrampolines(char *TrampolineBlockWorkingMem,
+ ExecutorAddr TrampolineBlockTargetAddress,
+ ExecutorAddr ResolverAddr,
+ unsigned NumTrampolines) {
+
+ unsigned OffsetToPtr = alignTo(NumTrampolines * TrampolineSize, 8);
+
+ memcpy(TrampolineBlockWorkingMem + OffsetToPtr, &ResolverAddr,
+ sizeof(uint64_t));
+
+ // OffsetToPtr is actually the offset from the PC for the 2nd instruction, so
+ // subtract 32-bits.
+ OffsetToPtr -= 4;
+
+ uint32_t *Trampolines =
+ reinterpret_cast<uint32_t *>(TrampolineBlockWorkingMem);
+
+ for (unsigned I = 0; I < NumTrampolines; ++I, OffsetToPtr -= TrampolineSize) {
+ Trampolines[3 * I + 0] = 0xaa1e03f1; // mov x17, x30
+ Trampolines[3 * I + 1] = 0x58000010 | (OffsetToPtr << 3); // adr x16, Lptr
+ Trampolines[3 * I + 2] = 0xd63f0200; // blr x16
+ }
+}
+
+void OrcAArch64::writeIndirectStubsBlock(
+ char *StubsBlockWorkingMem, ExecutorAddr StubsBlockTargetAddress,
+ ExecutorAddr PointersBlockTargetAddress, unsigned NumStubs) {
+ // Stub format is:
+ //
+ // .section __orc_stubs
+ // stub1:
+ // ldr x16, ptr1 ; PC-rel load of ptr1
+ // br x16 ; Jump to resolver
+ // stub2:
+ // ldr x16, ptr2 ; PC-rel load of ptr2
+ // br x16 ; Jump to resolver
+ //
+ // ...
+ //
+ // .section __orc_ptrs
+ // ptr1:
+ // .quad 0x0
+ // ptr2:
+ // .quad 0x0
+ //
+ // ...
+
+ static_assert(StubSize == PointerSize,
+ "Pointer and stub size must match for algorithm below");
+ assert(stubAndPointerRangesOk<OrcAArch64>(
+ StubsBlockTargetAddress, PointersBlockTargetAddress, NumStubs) &&
+ "PointersBlock is out of range");
+ uint64_t PtrDisplacement =
+ PointersBlockTargetAddress - StubsBlockTargetAddress;
+ assert((PtrDisplacement % 8 == 0) &&
+ "Displacement to pointer is not a multiple of 8");
+ uint64_t *Stub = reinterpret_cast<uint64_t *>(StubsBlockWorkingMem);
+ uint64_t PtrOffsetField = ((PtrDisplacement >> 2) & 0x7ffff) << 5;
+
+ for (unsigned I = 0; I < NumStubs; ++I)
+ Stub[I] = 0xd61f020058000010 | PtrOffsetField;
+}
+
+void OrcX86_64_Base::writeTrampolines(char *TrampolineBlockWorkingMem,
+ ExecutorAddr TrampolineBlockTargetAddress,
+ ExecutorAddr ResolverAddr,
+ unsigned NumTrampolines) {
+
+ unsigned OffsetToPtr = NumTrampolines * TrampolineSize;
+
+ memcpy(TrampolineBlockWorkingMem + OffsetToPtr, &ResolverAddr,
+ sizeof(uint64_t));
+
+ uint64_t *Trampolines =
+ reinterpret_cast<uint64_t *>(TrampolineBlockWorkingMem);
+ uint64_t CallIndirPCRel = 0xf1c40000000015ff;
+
+ for (unsigned I = 0; I < NumTrampolines; ++I, OffsetToPtr -= TrampolineSize)
+ Trampolines[I] = CallIndirPCRel | ((OffsetToPtr - 6) << 16);
+}
+
+void OrcX86_64_Base::writeIndirectStubsBlock(
+ char *StubsBlockWorkingMem, ExecutorAddr StubsBlockTargetAddress,
+ ExecutorAddr PointersBlockTargetAddress, unsigned NumStubs) {
+ // Stub format is:
+ //
+ // .section __orc_stubs
+ // stub1:
+ // jmpq *ptr1(%rip)
+ // .byte 0xC4 ; <- Invalid opcode padding.
+ // .byte 0xF1
+ // stub2:
+ // jmpq *ptr2(%rip)
+ //
+ // ...
+ //
+ // .section __orc_ptrs
+ // ptr1:
+ // .quad 0x0
+ // ptr2:
+ // .quad 0x0
+ //
+ // ...
+
+ // Populate the stubs page stubs and mark it executable.
+ static_assert(StubSize == PointerSize,
+ "Pointer and stub size must match for algorithm below");
+ assert(stubAndPointerRangesOk<OrcX86_64_Base>(
+ StubsBlockTargetAddress, PointersBlockTargetAddress, NumStubs) &&
+ "PointersBlock is out of range");
+ uint64_t *Stub = reinterpret_cast<uint64_t *>(StubsBlockWorkingMem);
+ uint64_t PtrOffsetField =
+ (PointersBlockTargetAddress - StubsBlockTargetAddress - 6) << 16;
+ for (unsigned I = 0; I < NumStubs; ++I)
+ Stub[I] = 0xF1C40000000025ff | PtrOffsetField;
+}
+
+void OrcX86_64_SysV::writeResolverCode(char *ResolverWorkingMem,
+ ExecutorAddr ResolverTargetAddress,
+ ExecutorAddr ReentryFnAddr,
+ ExecutorAddr ReentryCtxAddr) {
+
+ LLVM_DEBUG({
+ dbgs() << "Writing resolver code to "
+ << formatv("{0:x16}", ResolverTargetAddress) << "\n";
+ });
+
+ const uint8_t ResolverCode[] = {
+ // resolver_entry:
+ 0x55, // 0x00: pushq %rbp
+ 0x48, 0x89, 0xe5, // 0x01: movq %rsp, %rbp
+ 0x50, // 0x04: pushq %rax
+ 0x53, // 0x05: pushq %rbx
+ 0x51, // 0x06: pushq %rcx
+ 0x52, // 0x07: pushq %rdx
+ 0x56, // 0x08: pushq %rsi
+ 0x57, // 0x09: pushq %rdi
+ 0x41, 0x50, // 0x0a: pushq %r8
+ 0x41, 0x51, // 0x0c: pushq %r9
+ 0x41, 0x52, // 0x0e: pushq %r10
+ 0x41, 0x53, // 0x10: pushq %r11
+ 0x41, 0x54, // 0x12: pushq %r12
+ 0x41, 0x55, // 0x14: pushq %r13
+ 0x41, 0x56, // 0x16: pushq %r14
+ 0x41, 0x57, // 0x18: pushq %r15
+ 0x48, 0x81, 0xec, 0x08, 0x02, 0x00, 0x00, // 0x1a: subq 0x208, %rsp
+ 0x48, 0x0f, 0xae, 0x04, 0x24, // 0x21: fxsave64 (%rsp)
+ 0x48, 0xbf, // 0x26: movabsq <CBMgr>, %rdi
+
+ // 0x28: JIT re-entry ctx addr.
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+
+ 0x48, 0x8b, 0x75, 0x08, // 0x30: movq 8(%rbp), %rsi
+ 0x48, 0x83, 0xee, 0x06, // 0x34: subq $6, %rsi
+ 0x48, 0xb8, // 0x38: movabsq <REntry>, %rax
+
+ // 0x3a: JIT re-entry fn addr:
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+
+ 0xff, 0xd0, // 0x42: callq *%rax
+ 0x48, 0x89, 0x45, 0x08, // 0x44: movq %rax, 8(%rbp)
+ 0x48, 0x0f, 0xae, 0x0c, 0x24, // 0x48: fxrstor64 (%rsp)
+ 0x48, 0x81, 0xc4, 0x08, 0x02, 0x00, 0x00, // 0x4d: addq 0x208, %rsp
+ 0x41, 0x5f, // 0x54: popq %r15
+ 0x41, 0x5e, // 0x56: popq %r14
+ 0x41, 0x5d, // 0x58: popq %r13
+ 0x41, 0x5c, // 0x5a: popq %r12
+ 0x41, 0x5b, // 0x5c: popq %r11
+ 0x41, 0x5a, // 0x5e: popq %r10
+ 0x41, 0x59, // 0x60: popq %r9
+ 0x41, 0x58, // 0x62: popq %r8
+ 0x5f, // 0x64: popq %rdi
+ 0x5e, // 0x65: popq %rsi
+ 0x5a, // 0x66: popq %rdx
+ 0x59, // 0x67: popq %rcx
+ 0x5b, // 0x68: popq %rbx
+ 0x58, // 0x69: popq %rax
+ 0x5d, // 0x6a: popq %rbp
+ 0xc3, // 0x6b: retq
+ };
+
+ const unsigned ReentryFnAddrOffset = 0x3a;
+ const unsigned ReentryCtxAddrOffset = 0x28;
+
+ memcpy(ResolverWorkingMem, ResolverCode, sizeof(ResolverCode));
+ memcpy(ResolverWorkingMem + ReentryFnAddrOffset, &ReentryFnAddr,
+ sizeof(uint64_t));
+ memcpy(ResolverWorkingMem + ReentryCtxAddrOffset, &ReentryCtxAddr,
+ sizeof(uint64_t));
+}
+
+void OrcX86_64_Win32::writeResolverCode(char *ResolverWorkingMem,
+ ExecutorAddr ResolverTargetAddress,
+ ExecutorAddr ReentryFnAddr,
+ ExecutorAddr ReentryCtxAddr) {
+
+ // resolverCode is similar to OrcX86_64 with differences specific to windows
+ // x64 calling convention: arguments go into rcx, rdx and come in reverse
+ // order, shadow space allocation on stack
+ const uint8_t ResolverCode[] = {
+ // resolver_entry:
+ 0x55, // 0x00: pushq %rbp
+ 0x48, 0x89, 0xe5, // 0x01: movq %rsp, %rbp
+ 0x50, // 0x04: pushq %rax
+ 0x53, // 0x05: pushq %rbx
+ 0x51, // 0x06: pushq %rcx
+ 0x52, // 0x07: pushq %rdx
+ 0x56, // 0x08: pushq %rsi
+ 0x57, // 0x09: pushq %rdi
+ 0x41, 0x50, // 0x0a: pushq %r8
+ 0x41, 0x51, // 0x0c: pushq %r9
+ 0x41, 0x52, // 0x0e: pushq %r10
+ 0x41, 0x53, // 0x10: pushq %r11
+ 0x41, 0x54, // 0x12: pushq %r12
+ 0x41, 0x55, // 0x14: pushq %r13
+ 0x41, 0x56, // 0x16: pushq %r14
+ 0x41, 0x57, // 0x18: pushq %r15
+ 0x48, 0x81, 0xec, 0x08, 0x02, 0x00, 0x00, // 0x1a: subq 0x208, %rsp
+ 0x48, 0x0f, 0xae, 0x04, 0x24, // 0x21: fxsave64 (%rsp)
+
+ 0x48, 0xb9, // 0x26: movabsq <CBMgr>, %rcx
+ // 0x28: JIT re-entry ctx addr.
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+
+ 0x48, 0x8B, 0x55, 0x08, // 0x30: mov rdx, [rbp+0x8]
+ 0x48, 0x83, 0xea, 0x06, // 0x34: sub rdx, 0x6
+
+ 0x48, 0xb8, // 0x38: movabsq <REntry>, %rax
+ // 0x3a: JIT re-entry fn addr:
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+
+ // 0x42: sub rsp, 0x20 (Allocate shadow space)
+ 0x48, 0x83, 0xEC, 0x20,
+ 0xff, 0xd0, // 0x46: callq *%rax
+
+ // 0x48: add rsp, 0x20 (Free shadow space)
+ 0x48, 0x83, 0xC4, 0x20,
+
+ 0x48, 0x89, 0x45, 0x08, // 0x4C: movq %rax, 8(%rbp)
+ 0x48, 0x0f, 0xae, 0x0c, 0x24, // 0x50: fxrstor64 (%rsp)
+ 0x48, 0x81, 0xc4, 0x08, 0x02, 0x00, 0x00, // 0x55: addq 0x208, %rsp
+ 0x41, 0x5f, // 0x5C: popq %r15
+ 0x41, 0x5e, // 0x5E: popq %r14
+ 0x41, 0x5d, // 0x60: popq %r13
+ 0x41, 0x5c, // 0x62: popq %r12
+ 0x41, 0x5b, // 0x64: popq %r11
+ 0x41, 0x5a, // 0x66: popq %r10
+ 0x41, 0x59, // 0x68: popq %r9
+ 0x41, 0x58, // 0x6a: popq %r8
+ 0x5f, // 0x6c: popq %rdi
+ 0x5e, // 0x6d: popq %rsi
+ 0x5a, // 0x6e: popq %rdx
+ 0x59, // 0x6f: popq %rcx
+ 0x5b, // 0x70: popq %rbx
+ 0x58, // 0x71: popq %rax
+ 0x5d, // 0x72: popq %rbp
+ 0xc3, // 0x73: retq
+ };
+
+ const unsigned ReentryFnAddrOffset = 0x3a;
+ const unsigned ReentryCtxAddrOffset = 0x28;
+
+ memcpy(ResolverWorkingMem, ResolverCode, sizeof(ResolverCode));
+ memcpy(ResolverWorkingMem + ReentryFnAddrOffset, &ReentryFnAddr,
+ sizeof(uint64_t));
+ memcpy(ResolverWorkingMem + ReentryCtxAddrOffset, &ReentryCtxAddr,
+ sizeof(uint64_t));
+}
+
+void OrcI386::writeResolverCode(char *ResolverWorkingMem,
+ ExecutorAddr ResolverTargetAddress,
+ ExecutorAddr ReentryFnAddr,
+ ExecutorAddr ReentryCtxAddr) {
+
+ assert((ReentryFnAddr.getValue() >> 32) == 0 && "ReentryFnAddr out of range");
+ assert((ReentryCtxAddr.getValue() >> 32) == 0 &&
+ "ReentryCtxAddr out of range");
+
+ const uint8_t ResolverCode[] = {
+ // resolver_entry:
+ 0x55, // 0x00: pushl %ebp
+ 0x89, 0xe5, // 0x01: movl %esp, %ebp
+ 0x54, // 0x03: pushl %esp
+ 0x83, 0xe4, 0xf0, // 0x04: andl $-0x10, %esp
+ 0x50, // 0x07: pushl %eax
+ 0x53, // 0x08: pushl %ebx
+ 0x51, // 0x09: pushl %ecx
+ 0x52, // 0x0a: pushl %edx
+ 0x56, // 0x0b: pushl %esi
+ 0x57, // 0x0c: pushl %edi
+ 0x81, 0xec, 0x18, 0x02, 0x00, 0x00, // 0x0d: subl $0x218, %esp
+ 0x0f, 0xae, 0x44, 0x24, 0x10, // 0x13: fxsave 0x10(%esp)
+ 0x8b, 0x75, 0x04, // 0x18: movl 0x4(%ebp), %esi
+ 0x83, 0xee, 0x05, // 0x1b: subl $0x5, %esi
+ 0x89, 0x74, 0x24, 0x04, // 0x1e: movl %esi, 0x4(%esp)
+ 0xc7, 0x04, 0x24, 0x00, 0x00, 0x00,
+ 0x00, // 0x22: movl <cbmgr>, (%esp)
+ 0xb8, 0x00, 0x00, 0x00, 0x00, // 0x29: movl <reentry>, %eax
+ 0xff, 0xd0, // 0x2e: calll *%eax
+ 0x89, 0x45, 0x04, // 0x30: movl %eax, 0x4(%ebp)
+ 0x0f, 0xae, 0x4c, 0x24, 0x10, // 0x33: fxrstor 0x10(%esp)
+ 0x81, 0xc4, 0x18, 0x02, 0x00, 0x00, // 0x38: addl $0x218, %esp
+ 0x5f, // 0x3e: popl %edi
+ 0x5e, // 0x3f: popl %esi
+ 0x5a, // 0x40: popl %edx
+ 0x59, // 0x41: popl %ecx
+ 0x5b, // 0x42: popl %ebx
+ 0x58, // 0x43: popl %eax
+ 0x8b, 0x65, 0xfc, // 0x44: movl -0x4(%ebp), %esp
+ 0x5d, // 0x48: popl %ebp
+ 0xc3 // 0x49: retl
+ };
+
+ const unsigned ReentryFnAddrOffset = 0x2a;
+ const unsigned ReentryCtxAddrOffset = 0x25;
+
+ memcpy(ResolverWorkingMem, ResolverCode, sizeof(ResolverCode));
+ memcpy(ResolverWorkingMem + ReentryFnAddrOffset, &ReentryFnAddr,
+ sizeof(uint32_t));
+ memcpy(ResolverWorkingMem + ReentryCtxAddrOffset, &ReentryCtxAddr,
+ sizeof(uint32_t));
+}
+
+void OrcI386::writeTrampolines(char *TrampolineWorkingMem,
+ ExecutorAddr TrampolineBlockTargetAddress,
+ ExecutorAddr ResolverAddr,
+ unsigned NumTrampolines) {
+ assert((ResolverAddr.getValue() >> 32) == 0 && "ResolverAddr out of range");
+
+ uint64_t CallRelImm = 0xF1C4C400000000e8;
+ uint64_t ResolverRel = ResolverAddr - TrampolineBlockTargetAddress - 5;
+
+ uint64_t *Trampolines = reinterpret_cast<uint64_t *>(TrampolineWorkingMem);
+ for (unsigned I = 0; I < NumTrampolines; ++I, ResolverRel -= TrampolineSize)
+ Trampolines[I] = CallRelImm | (ResolverRel << 8);
+}
+
+void OrcI386::writeIndirectStubsBlock(char *StubsBlockWorkingMem,
+ ExecutorAddr StubsBlockTargetAddress,
+ ExecutorAddr PointersBlockTargetAddress,
+ unsigned NumStubs) {
+ assert((StubsBlockTargetAddress.getValue() >> 32) == 0 &&
+ "StubsBlockTargetAddress is out of range");
+ assert((PointersBlockTargetAddress.getValue() >> 32) == 0 &&
+ "PointersBlockTargetAddress is out of range");
+
+ // Stub format is:
+ //
+ // .section __orc_stubs
+ // stub1:
+ // jmpq *ptr1
+ // .byte 0xC4 ; <- Invalid opcode padding.
+ // .byte 0xF1
+ // stub2:
+ // jmpq *ptr2
+ //
+ // ...
+ //
+ // .section __orc_ptrs
+ // ptr1:
+ // .quad 0x0
+ // ptr2:
+ // .quad 0x0
+ //
+ // ...
+
+ assert(stubAndPointerRangesOk<OrcI386>(
+ StubsBlockTargetAddress, PointersBlockTargetAddress, NumStubs) &&
+ "PointersBlock is out of range");
+
+ uint64_t *Stub = reinterpret_cast<uint64_t *>(StubsBlockWorkingMem);
+ uint64_t PtrAddr = PointersBlockTargetAddress.getValue();
+ for (unsigned I = 0; I < NumStubs; ++I, PtrAddr += 4)
+ Stub[I] = 0xF1C40000000025ff | (PtrAddr << 16);
+}
+
+void OrcMips32_Base::writeResolverCode(char *ResolverWorkingMem,
+ ExecutorAddr ResolverTargetAddress,
+ ExecutorAddr ReentryFnAddr,
+ ExecutorAddr ReentryCtxAddr,
+ bool isBigEndian) {
+
+ const uint32_t ResolverCode[] = {
+ // resolver_entry:
+ 0x27bdff98, // 0x00: addiu $sp,$sp,-104
+ 0xafa20000, // 0x04: sw $v0,0($sp)
+ 0xafa30004, // 0x08: sw $v1,4($sp)
+ 0xafa40008, // 0x0c: sw $a0,8($sp)
+ 0xafa5000c, // 0x10: sw $a1,12($sp)
+ 0xafa60010, // 0x14: sw $a2,16($sp)
+ 0xafa70014, // 0x18: sw $a3,20($sp)
+ 0xafb00018, // 0x1c: sw $s0,24($sp)
+ 0xafb1001c, // 0x20: sw $s1,28($sp)
+ 0xafb20020, // 0x24: sw $s2,32($sp)
+ 0xafb30024, // 0x28: sw $s3,36($sp)
+ 0xafb40028, // 0x2c: sw $s4,40($sp)
+ 0xafb5002c, // 0x30: sw $s5,44($sp)
+ 0xafb60030, // 0x34: sw $s6,48($sp)
+ 0xafb70034, // 0x38: sw $s7,52($sp)
+ 0xafa80038, // 0x3c: sw $t0,56($sp)
+ 0xafa9003c, // 0x40: sw $t1,60($sp)
+ 0xafaa0040, // 0x44: sw $t2,64($sp)
+ 0xafab0044, // 0x48: sw $t3,68($sp)
+ 0xafac0048, // 0x4c: sw $t4,72($sp)
+ 0xafad004c, // 0x50: sw $t5,76($sp)
+ 0xafae0050, // 0x54: sw $t6,80($sp)
+ 0xafaf0054, // 0x58: sw $t7,84($sp)
+ 0xafb80058, // 0x5c: sw $t8,88($sp)
+ 0xafb9005c, // 0x60: sw $t9,92($sp)
+ 0xafbe0060, // 0x64: sw $fp,96($sp)
+ 0xafbf0064, // 0x68: sw $ra,100($sp)
+
+ // JIT re-entry ctx addr.
+ 0x00000000, // 0x6c: lui $a0,ctx
+ 0x00000000, // 0x70: addiu $a0,$a0,ctx
+
+ 0x03e02825, // 0x74: move $a1, $ra
+ 0x24a5ffec, // 0x78: addiu $a1,$a1,-20
+
+ // JIT re-entry fn addr:
+ 0x00000000, // 0x7c: lui $t9,reentry
+ 0x00000000, // 0x80: addiu $t9,$t9,reentry
+
+ 0x0320f809, // 0x84: jalr $t9
+ 0x00000000, // 0x88: nop
+ 0x8fbf0064, // 0x8c: lw $ra,100($sp)
+ 0x8fbe0060, // 0x90: lw $fp,96($sp)
+ 0x8fb9005c, // 0x94: lw $t9,92($sp)
+ 0x8fb80058, // 0x98: lw $t8,88($sp)
+ 0x8faf0054, // 0x9c: lw $t7,84($sp)
+ 0x8fae0050, // 0xa0: lw $t6,80($sp)
+ 0x8fad004c, // 0xa4: lw $t5,76($sp)
+ 0x8fac0048, // 0xa8: lw $t4,72($sp)
+ 0x8fab0044, // 0xac: lw $t3,68($sp)
+ 0x8faa0040, // 0xb0: lw $t2,64($sp)
+ 0x8fa9003c, // 0xb4: lw $t1,60($sp)
+ 0x8fa80038, // 0xb8: lw $t0,56($sp)
+ 0x8fb70034, // 0xbc: lw $s7,52($sp)
+ 0x8fb60030, // 0xc0: lw $s6,48($sp)
+ 0x8fb5002c, // 0xc4: lw $s5,44($sp)
+ 0x8fb40028, // 0xc8: lw $s4,40($sp)
+ 0x8fb30024, // 0xcc: lw $s3,36($sp)
+ 0x8fb20020, // 0xd0: lw $s2,32($sp)
+ 0x8fb1001c, // 0xd4: lw $s1,28($sp)
+ 0x8fb00018, // 0xd8: lw $s0,24($sp)
+ 0x8fa70014, // 0xdc: lw $a3,20($sp)
+ 0x8fa60010, // 0xe0: lw $a2,16($sp)
+ 0x8fa5000c, // 0xe4: lw $a1,12($sp)
+ 0x8fa40008, // 0xe8: lw $a0,8($sp)
+ 0x27bd0068, // 0xec: addiu $sp,$sp,104
+ 0x0300f825, // 0xf0: move $ra, $t8
+ 0x03200008, // 0xf4: jr $t9
+ 0x00000000, // 0xf8: move $t9, $v0/v1
+ };
+
+ const unsigned ReentryFnAddrOffset = 0x7c; // JIT re-entry fn addr lui
+ const unsigned ReentryCtxAddrOffset = 0x6c; // JIT re-entry context addr lui
+ const unsigned Offsett = 0xf8;
+
+ memcpy(ResolverWorkingMem, ResolverCode, sizeof(ResolverCode));
+
+ // Depending on endian return value will be in v0 or v1.
+ uint32_t MoveVxT9 = isBigEndian ? 0x0060c825 : 0x0040c825;
+ memcpy(ResolverWorkingMem + Offsett, &MoveVxT9, sizeof(MoveVxT9));
+
+ uint32_t ReentryCtxLUi =
+ 0x3c040000 | (((ReentryCtxAddr.getValue() + 0x8000) >> 16) & 0xFFFF);
+ uint32_t ReentryCtxADDiu = 0x24840000 | (ReentryCtxAddr.getValue() & 0xFFFF);
+ memcpy(ResolverWorkingMem + ReentryCtxAddrOffset, &ReentryCtxLUi,
+ sizeof(ReentryCtxLUi));
+ memcpy(ResolverWorkingMem + ReentryCtxAddrOffset + 4, &ReentryCtxADDiu,
+ sizeof(ReentryCtxADDiu));
+
+ uint32_t ReentryFnLUi =
+ 0x3c190000 | (((ReentryFnAddr.getValue() + 0x8000) >> 16) & 0xFFFF);
+ uint32_t ReentryFnADDiu = 0x27390000 | (ReentryFnAddr.getValue() & 0xFFFF);
+ memcpy(ResolverWorkingMem + ReentryFnAddrOffset, &ReentryFnLUi,
+ sizeof(ReentryFnLUi));
+ memcpy(ResolverWorkingMem + ReentryFnAddrOffset + 4, &ReentryFnADDiu,
+ sizeof(ReentryFnADDiu));
+}
+
+void OrcMips32_Base::writeTrampolines(char *TrampolineBlockWorkingMem,
+ ExecutorAddr TrampolineBlockTargetAddress,
+ ExecutorAddr ResolverAddr,
+ unsigned NumTrampolines) {
+
+ assert((ResolverAddr.getValue() >> 32) == 0 && "ResolverAddr out of range");
+
+ uint32_t *Trampolines =
+ reinterpret_cast<uint32_t *>(TrampolineBlockWorkingMem);
+ uint32_t RHiAddr = ((ResolverAddr.getValue() + 0x8000) >> 16);
+
+ for (unsigned I = 0; I < NumTrampolines; ++I) {
+ // move $t8,$ra
+ // lui $t9,ResolverAddr
+ // addiu $t9,$t9,ResolverAddr
+ // jalr $t9
+ // nop
+ Trampolines[5 * I + 0] = 0x03e0c025;
+ Trampolines[5 * I + 1] = 0x3c190000 | (RHiAddr & 0xFFFF);
+ Trampolines[5 * I + 2] = 0x27390000 | (ResolverAddr.getValue() & 0xFFFF);
+ Trampolines[5 * I + 3] = 0x0320f809;
+ Trampolines[5 * I + 4] = 0x00000000;
+ }
+}
+
+void OrcMips32_Base::writeIndirectStubsBlock(
+ char *StubsBlockWorkingMem, ExecutorAddr StubsBlockTargetAddress,
+ ExecutorAddr PointersBlockTargetAddress, unsigned NumStubs) {
+ assert((StubsBlockTargetAddress.getValue() >> 32) == 0 &&
+ "InitialPtrVal is out of range");
+
+ // Stub format is:
+ //
+ // .section __orc_stubs
+ // stub1:
+ // lui $t9, ptr1
+ // lw $t9, %lo(ptr1)($t9)
+ // jr $t9
+ // stub2:
+ // lui $t9, ptr2
+ // lw $t9,%lo(ptr1)($t9)
+ // jr $t9
+ //
+ // ...
+ //
+ // .section __orc_ptrs
+ // ptr1:
+ // .word 0x0
+ // ptr2:
+ // .word 0x0
+ //
+ // i..
+
+ assert(stubAndPointerRangesOk<OrcMips32_Base>(
+ StubsBlockTargetAddress, PointersBlockTargetAddress, NumStubs) &&
+ "PointersBlock is out of range");
+
+ // Populate the stubs page stubs and mark it executable.
+ uint32_t *Stub = reinterpret_cast<uint32_t *>(StubsBlockWorkingMem);
+ uint64_t PtrAddr = PointersBlockTargetAddress.getValue();
+
+ for (unsigned I = 0; I < NumStubs; ++I) {
+ uint32_t HiAddr = ((PtrAddr + 0x8000) >> 16);
+ Stub[4 * I + 0] = 0x3c190000 | (HiAddr & 0xFFFF); // lui $t9,ptr1
+ Stub[4 * I + 1] = 0x8f390000 | (PtrAddr & 0xFFFF); // lw $t9,%lo(ptr1)($t9)
+ Stub[4 * I + 2] = 0x03200008; // jr $t9
+ Stub[4 * I + 3] = 0x00000000; // nop
+ PtrAddr += 4;
+ }
+}
+
+void OrcMips64::writeResolverCode(char *ResolverWorkingMem,
+ ExecutorAddr ResolverTargetAddress,
+ ExecutorAddr ReentryFnAddr,
+ ExecutorAddr ReentryCtxAddr) {
+
+ const uint32_t ResolverCode[] = {
+ //resolver_entry:
+ 0x67bdff30, // 0x00: daddiu $sp,$sp,-208
+ 0xffa20000, // 0x04: sd v0,0(sp)
+ 0xffa30008, // 0x08: sd v1,8(sp)
+ 0xffa40010, // 0x0c: sd a0,16(sp)
+ 0xffa50018, // 0x10: sd a1,24(sp)
+ 0xffa60020, // 0x14: sd a2,32(sp)
+ 0xffa70028, // 0x18: sd a3,40(sp)
+ 0xffa80030, // 0x1c: sd a4,48(sp)
+ 0xffa90038, // 0x20: sd a5,56(sp)
+ 0xffaa0040, // 0x24: sd a6,64(sp)
+ 0xffab0048, // 0x28: sd a7,72(sp)
+ 0xffac0050, // 0x2c: sd t0,80(sp)
+ 0xffad0058, // 0x30: sd t1,88(sp)
+ 0xffae0060, // 0x34: sd t2,96(sp)
+ 0xffaf0068, // 0x38: sd t3,104(sp)
+ 0xffb00070, // 0x3c: sd s0,112(sp)
+ 0xffb10078, // 0x40: sd s1,120(sp)
+ 0xffb20080, // 0x44: sd s2,128(sp)
+ 0xffb30088, // 0x48: sd s3,136(sp)
+ 0xffb40090, // 0x4c: sd s4,144(sp)
+ 0xffb50098, // 0x50: sd s5,152(sp)
+ 0xffb600a0, // 0x54: sd s6,160(sp)
+ 0xffb700a8, // 0x58: sd s7,168(sp)
+ 0xffb800b0, // 0x5c: sd t8,176(sp)
+ 0xffb900b8, // 0x60: sd t9,184(sp)
+ 0xffbe00c0, // 0x64: sd fp,192(sp)
+ 0xffbf00c8, // 0x68: sd ra,200(sp)
+
+ // JIT re-entry ctx addr.
+ 0x00000000, // 0x6c: lui $a0,heighest(ctx)
+ 0x00000000, // 0x70: daddiu $a0,$a0,heigher(ctx)
+ 0x00000000, // 0x74: dsll $a0,$a0,16
+ 0x00000000, // 0x78: daddiu $a0,$a0,hi(ctx)
+ 0x00000000, // 0x7c: dsll $a0,$a0,16
+ 0x00000000, // 0x80: daddiu $a0,$a0,lo(ctx)
+
+ 0x03e02825, // 0x84: move $a1, $ra
+ 0x64a5ffdc, // 0x88: daddiu $a1,$a1,-36
+
+ // JIT re-entry fn addr:
+ 0x00000000, // 0x8c: lui $t9,reentry
+ 0x00000000, // 0x90: daddiu $t9,$t9,reentry
+ 0x00000000, // 0x94: dsll $t9,$t9,
+ 0x00000000, // 0x98: daddiu $t9,$t9,
+ 0x00000000, // 0x9c: dsll $t9,$t9,
+ 0x00000000, // 0xa0: daddiu $t9,$t9,
+ 0x0320f809, // 0xa4: jalr $t9
+ 0x00000000, // 0xa8: nop
+ 0xdfbf00c8, // 0xac: ld ra, 200(sp)
+ 0xdfbe00c0, // 0xb0: ld fp, 192(sp)
+ 0xdfb900b8, // 0xb4: ld t9, 184(sp)
+ 0xdfb800b0, // 0xb8: ld t8, 176(sp)
+ 0xdfb700a8, // 0xbc: ld s7, 168(sp)
+ 0xdfb600a0, // 0xc0: ld s6, 160(sp)
+ 0xdfb50098, // 0xc4: ld s5, 152(sp)
+ 0xdfb40090, // 0xc8: ld s4, 144(sp)
+ 0xdfb30088, // 0xcc: ld s3, 136(sp)
+ 0xdfb20080, // 0xd0: ld s2, 128(sp)
+ 0xdfb10078, // 0xd4: ld s1, 120(sp)
+ 0xdfb00070, // 0xd8: ld s0, 112(sp)
+ 0xdfaf0068, // 0xdc: ld t3, 104(sp)
+ 0xdfae0060, // 0xe0: ld t2, 96(sp)
+ 0xdfad0058, // 0xe4: ld t1, 88(sp)
+ 0xdfac0050, // 0xe8: ld t0, 80(sp)
+ 0xdfab0048, // 0xec: ld a7, 72(sp)
+ 0xdfaa0040, // 0xf0: ld a6, 64(sp)
+ 0xdfa90038, // 0xf4: ld a5, 56(sp)
+ 0xdfa80030, // 0xf8: ld a4, 48(sp)
+ 0xdfa70028, // 0xfc: ld a3, 40(sp)
+ 0xdfa60020, // 0x100: ld a2, 32(sp)
+ 0xdfa50018, // 0x104: ld a1, 24(sp)
+ 0xdfa40010, // 0x108: ld a0, 16(sp)
+ 0xdfa30008, // 0x10c: ld v1, 8(sp)
+ 0x67bd00d0, // 0x110: daddiu $sp,$sp,208
+ 0x0300f825, // 0x114: move $ra, $t8
+ 0x03200008, // 0x118: jr $t9
+ 0x0040c825, // 0x11c: move $t9, $v0
+ };
+
+ const unsigned ReentryFnAddrOffset = 0x8c; // JIT re-entry fn addr lui
+ const unsigned ReentryCtxAddrOffset = 0x6c; // JIT re-entry ctx addr lui
+
+ memcpy(ResolverWorkingMem, ResolverCode, sizeof(ResolverCode));
+
+ uint32_t ReentryCtxLUi =
+ 0x3c040000 |
+ (((ReentryCtxAddr.getValue() + 0x800080008000) >> 48) & 0xFFFF);
+ uint32_t ReentryCtxDADDiu =
+ 0x64840000 | (((ReentryCtxAddr.getValue() + 0x80008000) >> 32) & 0xFFFF);
+ uint32_t ReentryCtxDSLL = 0x00042438;
+ uint32_t ReentryCtxDADDiu2 =
+ 0x64840000 | ((((ReentryCtxAddr.getValue() + 0x8000) >> 16) & 0xFFFF));
+ uint32_t ReentryCtxDSLL2 = 0x00042438;
+ uint32_t ReentryCtxDADDiu3 =
+ 0x64840000 | (ReentryCtxAddr.getValue() & 0xFFFF);
+
+ memcpy(ResolverWorkingMem + ReentryCtxAddrOffset, &ReentryCtxLUi,
+ sizeof(ReentryCtxLUi));
+ memcpy(ResolverWorkingMem + (ReentryCtxAddrOffset + 4), &ReentryCtxDADDiu,
+ sizeof(ReentryCtxDADDiu));
+ memcpy(ResolverWorkingMem + (ReentryCtxAddrOffset + 8), &ReentryCtxDSLL,
+ sizeof(ReentryCtxDSLL));
+ memcpy(ResolverWorkingMem + (ReentryCtxAddrOffset + 12), &ReentryCtxDADDiu2,
+ sizeof(ReentryCtxDADDiu2));
+ memcpy(ResolverWorkingMem + (ReentryCtxAddrOffset + 16), &ReentryCtxDSLL2,
+ sizeof(ReentryCtxDSLL2));
+ memcpy(ResolverWorkingMem + (ReentryCtxAddrOffset + 20), &ReentryCtxDADDiu3,
+ sizeof(ReentryCtxDADDiu3));
+
+ uint32_t ReentryFnLUi =
+ 0x3c190000 |
+ (((ReentryFnAddr.getValue() + 0x800080008000) >> 48) & 0xFFFF);
+
+ uint32_t ReentryFnDADDiu =
+ 0x67390000 | (((ReentryFnAddr.getValue() + 0x80008000) >> 32) & 0xFFFF);
+
+ uint32_t ReentryFnDSLL = 0x0019cc38;
+
+ uint32_t ReentryFnDADDiu2 =
+ 0x67390000 | (((ReentryFnAddr.getValue() + 0x8000) >> 16) & 0xFFFF);
+
+ uint32_t ReentryFnDSLL2 = 0x0019cc38;
+
+ uint32_t ReentryFnDADDiu3 = 0x67390000 | (ReentryFnAddr.getValue() & 0xFFFF);
+
+ memcpy(ResolverWorkingMem + ReentryFnAddrOffset, &ReentryFnLUi,
+ sizeof(ReentryFnLUi));
+ memcpy(ResolverWorkingMem + (ReentryFnAddrOffset + 4), &ReentryFnDADDiu,
+ sizeof(ReentryFnDADDiu));
+ memcpy(ResolverWorkingMem + (ReentryFnAddrOffset + 8), &ReentryFnDSLL,
+ sizeof(ReentryFnDSLL));
+ memcpy(ResolverWorkingMem + (ReentryFnAddrOffset + 12), &ReentryFnDADDiu2,
+ sizeof(ReentryFnDADDiu2));
+ memcpy(ResolverWorkingMem + (ReentryFnAddrOffset + 16), &ReentryFnDSLL2,
+ sizeof(ReentryFnDSLL2));
+ memcpy(ResolverWorkingMem + (ReentryFnAddrOffset + 20), &ReentryFnDADDiu3,
+ sizeof(ReentryFnDADDiu3));
+}
+
+void OrcMips64::writeTrampolines(char *TrampolineBlockWorkingMem,
+ ExecutorAddr TrampolineBlockTargetAddress,
+ ExecutorAddr ResolverAddr,
+ unsigned NumTrampolines) {
+
+ uint32_t *Trampolines =
+ reinterpret_cast<uint32_t *>(TrampolineBlockWorkingMem);
+
+ uint64_t HeighestAddr = ((ResolverAddr.getValue() + 0x800080008000) >> 48);
+ uint64_t HeigherAddr = ((ResolverAddr.getValue() + 0x80008000) >> 32);
+ uint64_t HiAddr = ((ResolverAddr.getValue() + 0x8000) >> 16);
+
+ for (unsigned I = 0; I < NumTrampolines; ++I) {
+ Trampolines[10 * I + 0] = 0x03e0c025; // move $t8,$ra
+ Trampolines[10 * I + 1] = 0x3c190000 | (HeighestAddr & 0xFFFF); // lui $t9,resolveAddr
+ Trampolines[10 * I + 2] = 0x67390000 | (HeigherAddr & 0xFFFF); // daddiu $t9,$t9,%higher(resolveAddr)
+ Trampolines[10 * I + 3] = 0x0019cc38; // dsll $t9,$t9,16
+ Trampolines[10 * I + 4] = 0x67390000 | (HiAddr & 0xFFFF); // daddiu $t9,$t9,%hi(ptr)
+ Trampolines[10 * I + 5] = 0x0019cc38; // dsll $t9,$t9,16
+ Trampolines[10 * I + 6] = 0x67390000 | (ResolverAddr.getValue() &
+ 0xFFFF); // daddiu $t9,$t9,%lo(ptr)
+ Trampolines[10 * I + 7] = 0x0320f809; // jalr $t9
+ Trampolines[10 * I + 8] = 0x00000000; // nop
+ Trampolines[10 * I + 9] = 0x00000000; // nop
+ }
+}
+
+void OrcMips64::writeIndirectStubsBlock(char *StubsBlockWorkingMem,
+ ExecutorAddr StubsBlockTargetAddress,
+ ExecutorAddr PointersBlockTargetAddress,
+ unsigned NumStubs) {
+ // Stub format is:
+ //
+ // .section __orc_stubs
+ // stub1:
+ // lui $t9,ptr1
+ // dsll $t9,$t9,16
+ // daddiu $t9,$t9,%hi(ptr)
+ // dsll $t9,$t9,16
+ // ld $t9,%lo(ptr)
+ // jr $t9
+ // stub2:
+ // lui $t9,ptr1
+ // dsll $t9,$t9,16
+ // daddiu $t9,$t9,%hi(ptr)
+ // dsll $t9,$t9,16
+ // ld $t9,%lo(ptr)
+ // jr $t9
+ //
+ // ...
+ //
+ // .section __orc_ptrs
+ // ptr1:
+ // .dword 0x0
+ // ptr2:
+ // .dword 0x0
+ //
+ // ...
+
+ assert(stubAndPointerRangesOk<OrcMips64>(
+ StubsBlockTargetAddress, PointersBlockTargetAddress, NumStubs) &&
+ "PointersBlock is out of range");
+
+ // Populate the stubs page stubs and mark it executable.
+ uint32_t *Stub = reinterpret_cast<uint32_t *>(StubsBlockWorkingMem);
+ uint64_t PtrAddr = PointersBlockTargetAddress.getValue();
+
+ for (unsigned I = 0; I < NumStubs; ++I, PtrAddr += 8) {
+ uint64_t HeighestAddr = ((PtrAddr + 0x800080008000) >> 48);
+ uint64_t HeigherAddr = ((PtrAddr + 0x80008000) >> 32);
+ uint64_t HiAddr = ((PtrAddr + 0x8000) >> 16);
+ Stub[8 * I + 0] = 0x3c190000 | (HeighestAddr & 0xFFFF); // lui $t9,ptr1
+ Stub[8 * I + 1] = 0x67390000 | (HeigherAddr & 0xFFFF); // daddiu $t9,$t9,%higher(ptr)
+ Stub[8 * I + 2] = 0x0019cc38; // dsll $t9,$t9,16
+ Stub[8 * I + 3] = 0x67390000 | (HiAddr & 0xFFFF); // daddiu $t9,$t9,%hi(ptr)
+ Stub[8 * I + 4] = 0x0019cc38; // dsll $t9,$t9,16
+ Stub[8 * I + 5] = 0xdf390000 | (PtrAddr & 0xFFFF); // ld $t9,%lo(ptr)
+ Stub[8 * I + 6] = 0x03200008; // jr $t9
+ Stub[8 * I + 7] = 0x00000000; // nop
+ }
+}
+
+void OrcRiscv64::writeResolverCode(char *ResolverWorkingMem,
+ ExecutorAddr ResolverTargetAddress,
+ ExecutorAddr ReentryFnAddr,
+ ExecutorAddr ReentryCtxAddr) {
+
+ const uint32_t ResolverCode[] = {
+ 0xef810113, // 0x00: addi sp,sp,-264
+ 0x00813023, // 0x04: sd s0,0(sp)
+ 0x00913423, // 0x08: sd s1,8(sp)
+ 0x01213823, // 0x0c: sd s2,16(sp)
+ 0x01313c23, // 0x10: sd s3,24(sp)
+ 0x03413023, // 0x14: sd s4,32(sp)
+ 0x03513423, // 0x18: sd s5,40(sp)
+ 0x03613823, // 0x1c: sd s6,48(sp)
+ 0x03713c23, // 0x20: sd s7,56(sp)
+ 0x05813023, // 0x24: sd s8,64(sp)
+ 0x05913423, // 0x28: sd s9,72(sp)
+ 0x05a13823, // 0x2c: sd s10,80(sp)
+ 0x05b13c23, // 0x30: sd s11,88(sp)
+ 0x06113023, // 0x34: sd ra,96(sp)
+ 0x06a13423, // 0x38: sd a0,104(sp)
+ 0x06b13823, // 0x3c: sd a1,112(sp)
+ 0x06c13c23, // 0x40: sd a2,120(sp)
+ 0x08d13023, // 0x44: sd a3,128(sp)
+ 0x08e13423, // 0x48: sd a4,136(sp)
+ 0x08f13823, // 0x4c: sd a5,144(sp)
+ 0x09013c23, // 0x50: sd a6,152(sp)
+ 0x0b113023, // 0x54: sd a7,160(sp)
+ 0x0a813427, // 0x58: fsd fs0,168(sp)
+ 0x0a913827, // 0x5c: fsd fs1,176(sp)
+ 0x0b213c27, // 0x60: fsd fs2,184(sp)
+ 0x0d313027, // 0x64: fsd fs3,192(sp)
+ 0x0d413427, // 0x68: fsd fs4,200(sp)
+ 0x0d513827, // 0x6c: fsd fs5,208(sp)
+ 0x0d613c27, // 0x70: fsd fs6,216(sp)
+ 0x0f713027, // 0x74: fsd fs7,224(sp)
+ 0x0f813427, // 0x78: fsd fs8,232(sp)
+ 0x0f913827, // 0x7c: fsd fs9,240(sp)
+ 0x0fa13c27, // 0x80: fsd fs10,248(sp)
+ 0x11b13027, // 0x84: fsd fs11,256(sp)
+ 0x00000517, // 0x88: auipc a0,0x0
+ 0x0b053503, // 0x8c: ld a0,176(a0) # 0x138
+ 0x00030593, // 0x90: mv a1,t1
+ 0xff458593, // 0x94: addi a1,a1,-12
+ 0x00000617, // 0x98: auipc a2,0x0
+ 0x0a863603, // 0x9c: ld a2,168(a2) # 0x140
+ 0x000600e7, // 0xa0: jalr a2
+ 0x00050293, // 0xa4: mv t0,a0
+ 0x00013403, // 0xa8: ld s0,0(sp)
+ 0x00813483, // 0xac: ld s1,8(sp)
+ 0x01013903, // 0xb0: ld s2,16(sp)
+ 0x01813983, // 0xb4: ld s3,24(sp)
+ 0x02013a03, // 0xb8: ld s4,32(sp)
+ 0x02813a83, // 0xbc: ld s5,40(sp)
+ 0x03013b03, // 0xc0: ld s6,48(sp)
+ 0x03813b83, // 0xc4: ld s7,56(sp)
+ 0x04013c03, // 0xc8: ld s8,64(sp)
+ 0x04813c83, // 0xcc: ld s9,72(sp)
+ 0x05013d03, // 0xd0: ld s10,80(sp)
+ 0x05813d83, // 0xd4: ld s11,88(sp)
+ 0x06013083, // 0xd8: ld ra,96(sp)
+ 0x06813503, // 0xdc: ld a0,104(sp)
+ 0x07013583, // 0xe0: ld a1,112(sp)
+ 0x07813603, // 0xe4: ld a2,120(sp)
+ 0x08013683, // 0xe8: ld a3,128(sp)
+ 0x08813703, // 0xec: ld a4,136(sp)
+ 0x09013783, // 0xf0: ld a5,144(sp)
+ 0x09813803, // 0xf4: ld a6,152(sp)
+ 0x0a013883, // 0xf8: ld a7,160(sp)
+ 0x0a813407, // 0xfc: fld fs0,168(sp)
+ 0x0b013487, // 0x100: fld fs1,176(sp)
+ 0x0b813907, // 0x104: fld fs2,184(sp)
+ 0x0c013987, // 0x108: fld fs3,192(sp)
+ 0x0c813a07, // 0x10c: fld fs4,200(sp)
+ 0x0d013a87, // 0x110: fld fs5,208(sp)
+ 0x0d813b07, // 0x114: fld fs6,216(sp)
+ 0x0e013b87, // 0x118: fld fs7,224(sp)
+ 0x0e813c07, // 0x11c: fld fs8,232(sp)
+ 0x0f013c87, // 0x120: fld fs9,240(sp)
+ 0x0f813d07, // 0x124: fld fs10,248(sp)
+ 0x10013d87, // 0x128: fld fs11,256(sp)
+ 0x10810113, // 0x12c: addi sp,sp,264
+ 0x00028067, // 0x130: jr t0
+ 0x12345678, // 0x134: padding to align at 8 byte
+ 0x12345678, // 0x138: Lreentry_ctx_ptr:
+ 0xdeadbeef, // 0x13c: .quad 0
+ 0x98765432, // 0x140: Lreentry_fn_ptr:
+ 0xcafef00d // 0x144: .quad 0
+ };
+
+ const unsigned ReentryCtxAddrOffset = 0x138;
+ const unsigned ReentryFnAddrOffset = 0x140;
+
+ memcpy(ResolverWorkingMem, ResolverCode, sizeof(ResolverCode));
+ memcpy(ResolverWorkingMem + ReentryFnAddrOffset, &ReentryFnAddr,
+ sizeof(uint64_t));
+ memcpy(ResolverWorkingMem + ReentryCtxAddrOffset, &ReentryCtxAddr,
+ sizeof(uint64_t));
+}
+
+void OrcRiscv64::writeTrampolines(char *TrampolineBlockWorkingMem,
+ ExecutorAddr TrampolineBlockTargetAddress,
+ ExecutorAddr ResolverAddr,
+ unsigned NumTrampolines) {
+
+ unsigned OffsetToPtr = alignTo(NumTrampolines * TrampolineSize, 8);
+
+ memcpy(TrampolineBlockWorkingMem + OffsetToPtr, &ResolverAddr,
+ sizeof(uint64_t));
+
+ uint32_t *Trampolines =
+ reinterpret_cast<uint32_t *>(TrampolineBlockWorkingMem);
+ for (unsigned I = 0; I < NumTrampolines; ++I, OffsetToPtr -= TrampolineSize) {
+ uint32_t Hi20 = (OffsetToPtr + 0x800) & 0xFFFFF000;
+ uint32_t Lo12 = OffsetToPtr - Hi20;
+ Trampolines[4 * I + 0] = 0x00000297 | Hi20; // auipc t0, %hi(Lptr)
+ Trampolines[4 * I + 1] =
+ 0x0002b283 | ((Lo12 & 0xFFF) << 20); // ld t0, %lo(Lptr)
+ Trampolines[4 * I + 2] = 0x00028367; // jalr t1, t0
+ Trampolines[4 * I + 3] = 0xdeadface; // padding
+ }
+}
+
+void OrcRiscv64::writeIndirectStubsBlock(
+ char *StubsBlockWorkingMem, ExecutorAddr StubsBlockTargetAddress,
+ ExecutorAddr PointersBlockTargetAddress, unsigned NumStubs) {
+ // Stub format is:
+ //
+ // .section __orc_stubs
+ // stub1:
+ // auipc t0, %hi(ptr1) ; PC-rel load of ptr1
+ // ld t0, %lo(t0)
+ // jr t0 ; Jump to resolver
+ // .quad 0 ; Pad to 16 bytes
+ // stub2:
+ // auipc t0, %hi(ptr1) ; PC-rel load of ptr1
+ // ld t0, %lo(t0)
+ // jr t0 ; Jump to resolver
+ // .quad 0
+ //
+ // ...
+ //
+ // .section __orc_ptrs
+ // ptr1:
+ // .quad 0x0
+ // ptr2:
+ // .quad 0x0
+ //
+ // ...
+
+ assert(stubAndPointerRangesOk<OrcRiscv64>(
+ StubsBlockTargetAddress, PointersBlockTargetAddress, NumStubs) &&
+ "PointersBlock is out of range");
+
+ uint32_t *Stub = reinterpret_cast<uint32_t *>(StubsBlockWorkingMem);
+
+ for (unsigned I = 0; I < NumStubs; ++I) {
+ uint64_t PtrDisplacement =
+ PointersBlockTargetAddress - StubsBlockTargetAddress;
+ uint32_t Hi20 = (PtrDisplacement + 0x800) & 0xFFFFF000;
+ uint32_t Lo12 = PtrDisplacement - Hi20;
+ Stub[4 * I + 0] = 0x00000297 | Hi20; // auipc t0, %hi(Lptr)
+ Stub[4 * I + 1] = 0x0002b283 | ((Lo12 & 0xFFF) << 20); // ld t0, %lo(Lptr)
+ Stub[4 * I + 2] = 0x00028067; // jr t0
+ Stub[4 * I + 3] = 0xfeedbeef; // padding
+ PointersBlockTargetAddress += PointerSize;
+ StubsBlockTargetAddress += StubSize;
+ }
+}
+
+void OrcLoongArch64::writeResolverCode(char *ResolverWorkingMem,
+ ExecutorAddr ResolverTargetAddress,
+ ExecutorAddr ReentryFnAddr,
+ ExecutorAddr ReentryCtxAddr) {
+
+ LLVM_DEBUG({
+ dbgs() << "Writing resolver code to "
+ << formatv("{0:x16}", ResolverTargetAddress) << "\n";
+ });
+
+ const uint32_t ResolverCode[] = {
+ 0x02fde063, // 0x0: addi.d $sp, $sp, -136(0xf78)
+ 0x29c00061, // 0x4: st.d $ra, $sp, 0
+ 0x29c02064, // 0x8: st.d $a0, $sp, 8(0x8)
+ 0x29c04065, // 0xc: st.d $a1, $sp, 16(0x10)
+ 0x29c06066, // 0x10: st.d $a2, $sp, 24(0x18)
+ 0x29c08067, // 0x14: st.d $a3, $sp, 32(0x20)
+ 0x29c0a068, // 0x18: st.d $a4, $sp, 40(0x28)
+ 0x29c0c069, // 0x1c: st.d $a5, $sp, 48(0x30)
+ 0x29c0e06a, // 0x20: st.d $a6, $sp, 56(0x38)
+ 0x29c1006b, // 0x24: st.d $a7, $sp, 64(0x40)
+ 0x2bc12060, // 0x28: fst.d $fa0, $sp, 72(0x48)
+ 0x2bc14061, // 0x2c: fst.d $fa1, $sp, 80(0x50)
+ 0x2bc16062, // 0x30: fst.d $fa2, $sp, 88(0x58)
+ 0x2bc18063, // 0x34: fst.d $fa3, $sp, 96(0x60)
+ 0x2bc1a064, // 0x38: fst.d $fa4, $sp, 104(0x68)
+ 0x2bc1c065, // 0x3c: fst.d $fa5, $sp, 112(0x70)
+ 0x2bc1e066, // 0x40: fst.d $fa6, $sp, 120(0x78)
+ 0x2bc20067, // 0x44: fst.d $fa7, $sp, 128(0x80)
+ 0x1c000004, // 0x48: pcaddu12i $a0, 0
+ 0x28c1c084, // 0x4c: ld.d $a0, $a0, 112(0x70)
+ 0x001501a5, // 0x50: move $a1, $t1
+ 0x02ffd0a5, // 0x54: addi.d $a1, $a1, -12(0xff4)
+ 0x1c000006, // 0x58: pcaddu12i $a2, 0
+ 0x28c1a0c6, // 0x5c: ld.d $a2, $a2, 104(0x68)
+ 0x4c0000c1, // 0x60: jirl $ra, $a2, 0
+ 0x0015008c, // 0x64: move $t0, $a0
+ 0x2b820067, // 0x68: fld.d $fa7, $sp, 128(0x80)
+ 0x2b81e066, // 0x6c: fld.d $fa6, $sp, 120(0x78)
+ 0x2b81c065, // 0x70: fld.d $fa5, $sp, 112(0x70)
+ 0x2b81a064, // 0x74: fld.d $fa4, $sp, 104(0x68)
+ 0x2b818063, // 0x78: fld.d $fa3, $sp, 96(0x60)
+ 0x2b816062, // 0x7c: fld.d $fa2, $sp, 88(0x58)
+ 0x2b814061, // 0x80: fld.d $fa1, $sp, 80(0x50)
+ 0x2b812060, // 0x84: fld.d $fa0, $sp, 72(0x48)
+ 0x28c1006b, // 0x88: ld.d $a7, $sp, 64(0x40)
+ 0x28c0e06a, // 0x8c: ld.d $a6, $sp, 56(0x38)
+ 0x28c0c069, // 0x90: ld.d $a5, $sp, 48(0x30)
+ 0x28c0a068, // 0x94: ld.d $a4, $sp, 40(0x28)
+ 0x28c08067, // 0x98: ld.d $a3, $sp, 32(0x20)
+ 0x28c06066, // 0x9c: ld.d $a2, $sp, 24(0x18)
+ 0x28c04065, // 0xa0: ld.d $a1, $sp, 16(0x10)
+ 0x28c02064, // 0xa4: ld.d $a0, $sp, 8(0x8)
+ 0x28c00061, // 0xa8: ld.d $ra, $sp, 0
+ 0x02c22063, // 0xac: addi.d $sp, $sp, 136(0x88)
+ 0x4c000180, // 0xb0: jr $t0
+ 0x00000000, // 0xb4: padding to align at 8 bytes
+ 0x01234567, // 0xb8: Lreentry_ctx_ptr:
+ 0xdeedbeef, // 0xbc: .dword 0
+ 0x98765432, // 0xc0: Lreentry_fn_ptr:
+ 0xcafef00d, // 0xc4: .dword 0
+ };
+
+ const unsigned ReentryCtxAddrOffset = 0xb8;
+ const unsigned ReentryFnAddrOffset = 0xc0;
+
+ memcpy(ResolverWorkingMem, ResolverCode, sizeof(ResolverCode));
+ memcpy(ResolverWorkingMem + ReentryFnAddrOffset, &ReentryFnAddr,
+ sizeof(uint64_t));
+ memcpy(ResolverWorkingMem + ReentryCtxAddrOffset, &ReentryCtxAddr,
+ sizeof(uint64_t));
+}
+
+void OrcLoongArch64::writeTrampolines(char *TrampolineBlockWorkingMem,
+ ExecutorAddr TrampolineBlockTargetAddress,
+ ExecutorAddr ResolverAddr,
+ unsigned NumTrampolines) {
+
+ LLVM_DEBUG({
+ dbgs() << "Writing trampoline code to "
+ << formatv("{0:x16}", TrampolineBlockTargetAddress) << "\n";
+ });
+
+ unsigned OffsetToPtr = alignTo(NumTrampolines * TrampolineSize, 8);
+
+ memcpy(TrampolineBlockWorkingMem + OffsetToPtr, &ResolverAddr,
+ sizeof(uint64_t));
+
+ uint32_t *Trampolines =
+ reinterpret_cast<uint32_t *>(TrampolineBlockWorkingMem);
+ for (unsigned I = 0; I < NumTrampolines; ++I, OffsetToPtr -= TrampolineSize) {
+ uint32_t Hi20 = (OffsetToPtr + 0x800) & 0xfffff000;
+ uint32_t Lo12 = OffsetToPtr - Hi20;
+ Trampolines[4 * I + 0] =
+ 0x1c00000c |
+ (((Hi20 >> 12) & 0xfffff) << 5); // pcaddu12i $t0, %pc_hi20(Lptr)
+ Trampolines[4 * I + 1] =
+ 0x28c0018c | ((Lo12 & 0xfff) << 10); // ld.d $t0, $t0, %pc_lo12(Lptr)
+ Trampolines[4 * I + 2] = 0x4c00018d; // jirl $t1, $t0, 0
+ Trampolines[4 * I + 3] = 0x0; // padding
+ }
+}
+
+void OrcLoongArch64::writeIndirectStubsBlock(
+ char *StubsBlockWorkingMem, ExecutorAddr StubsBlockTargetAddress,
+ ExecutorAddr PointersBlockTargetAddress, unsigned NumStubs) {
+ // Stub format is:
+ //
+ // .section __orc_stubs
+ // stub1:
+ // pcaddu12i $t0, %pc_hi20(ptr1) ; PC-rel load of ptr1
+ // ld.d $t0, $t0, %pc_lo12(ptr1)
+ // jr $t0 ; Jump to resolver
+ // .dword 0 ; Pad to 16 bytes
+ // stub2:
+ // pcaddu12i $t0, %pc_hi20(ptr2) ; PC-rel load of ptr2
+ // ld.d $t0, $t0, %pc_lo12(ptr2)
+ // jr $t0 ; Jump to resolver
+ // .dword 0 ; Pad to 16 bytes
+ // ...
+ //
+ // .section __orc_ptrs
+ // ptr1:
+ // .dword 0x0
+ // ptr2:
+ // .dword 0x0
+ // ...
+ LLVM_DEBUG({
+ dbgs() << "Writing stubs code to "
+ << formatv("{0:x16}", StubsBlockTargetAddress) << "\n";
+ });
+ assert(stubAndPointerRangesOk<OrcLoongArch64>(
+ StubsBlockTargetAddress, PointersBlockTargetAddress, NumStubs) &&
+ "PointersBlock is out of range");
+
+ uint32_t *Stub = reinterpret_cast<uint32_t *>(StubsBlockWorkingMem);
+
+ for (unsigned I = 0; I < NumStubs; ++I) {
+ uint64_t PtrDisplacement =
+ PointersBlockTargetAddress - StubsBlockTargetAddress;
+ uint32_t Hi20 = (PtrDisplacement + 0x800) & 0xfffff000;
+ uint32_t Lo12 = PtrDisplacement - Hi20;
+ Stub[4 * I + 0] = 0x1c00000c | (((Hi20 >> 12) & 0xfffff)
+ << 5); // pcaddu12i $t0, %pc_hi20(Lptr)
+ Stub[4 * I + 1] =
+ 0x28c0018c | ((Lo12 & 0xfff) << 10); // ld.d $t0, $t0, %pc_lo12(Lptr)
+ Stub[4 * I + 2] = 0x4c000180; // jr $t0
+ Stub[4 * I + 3] = 0x0; // padding
+ PointersBlockTargetAddress += PointerSize;
+ StubsBlockTargetAddress += StubSize;
+ }
+}
+
+} // End namespace orc.
+} // End namespace llvm.
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/OrcV2CBindings.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/OrcV2CBindings.cpp
new file mode 100644
index 000000000000..453b8f86868a
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/OrcV2CBindings.cpp
@@ -0,0 +1,1181 @@
+//===--------------- OrcV2CBindings.cpp - C bindings OrcV2 APIs -----------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm-c/LLJIT.h"
+#include "llvm-c/Orc.h"
+#include "llvm-c/OrcEE.h"
+#include "llvm-c/TargetMachine.h"
+
+#include "llvm/ExecutionEngine/Orc/JITTargetMachineBuilder.h"
+#include "llvm/ExecutionEngine/Orc/LLJIT.h"
+#include "llvm/ExecutionEngine/Orc/ObjectTransformLayer.h"
+#include "llvm/ExecutionEngine/Orc/RTDyldObjectLinkingLayer.h"
+#include "llvm/ExecutionEngine/SectionMemoryManager.h"
+
+using namespace llvm;
+using namespace llvm::orc;
+
+namespace llvm {
+namespace orc {
+
+class InProgressLookupState;
+
+class OrcV2CAPIHelper {
+public:
+ static InProgressLookupState *extractLookupState(LookupState &LS) {
+ return LS.IPLS.release();
+ }
+
+ static void resetLookupState(LookupState &LS, InProgressLookupState *IPLS) {
+ return LS.reset(IPLS);
+ }
+};
+
+} // namespace orc
+} // namespace llvm
+
+inline LLVMOrcSymbolStringPoolEntryRef wrap(SymbolStringPoolEntryUnsafe E) {
+ return reinterpret_cast<LLVMOrcSymbolStringPoolEntryRef>(E.rawPtr());
+}
+
+inline SymbolStringPoolEntryUnsafe unwrap(LLVMOrcSymbolStringPoolEntryRef E) {
+ return reinterpret_cast<SymbolStringPoolEntryUnsafe::PoolEntry *>(E);
+}
+
+DEFINE_SIMPLE_CONVERSION_FUNCTIONS(ExecutionSession, LLVMOrcExecutionSessionRef)
+DEFINE_SIMPLE_CONVERSION_FUNCTIONS(SymbolStringPool, LLVMOrcSymbolStringPoolRef)
+DEFINE_SIMPLE_CONVERSION_FUNCTIONS(MaterializationUnit,
+ LLVMOrcMaterializationUnitRef)
+DEFINE_SIMPLE_CONVERSION_FUNCTIONS(MaterializationResponsibility,
+ LLVMOrcMaterializationResponsibilityRef)
+DEFINE_SIMPLE_CONVERSION_FUNCTIONS(JITDylib, LLVMOrcJITDylibRef)
+DEFINE_SIMPLE_CONVERSION_FUNCTIONS(ResourceTracker, LLVMOrcResourceTrackerRef)
+DEFINE_SIMPLE_CONVERSION_FUNCTIONS(DefinitionGenerator,
+ LLVMOrcDefinitionGeneratorRef)
+DEFINE_SIMPLE_CONVERSION_FUNCTIONS(InProgressLookupState, LLVMOrcLookupStateRef)
+DEFINE_SIMPLE_CONVERSION_FUNCTIONS(ThreadSafeContext,
+ LLVMOrcThreadSafeContextRef)
+DEFINE_SIMPLE_CONVERSION_FUNCTIONS(ThreadSafeModule, LLVMOrcThreadSafeModuleRef)
+DEFINE_SIMPLE_CONVERSION_FUNCTIONS(JITTargetMachineBuilder,
+ LLVMOrcJITTargetMachineBuilderRef)
+DEFINE_SIMPLE_CONVERSION_FUNCTIONS(ObjectLayer, LLVMOrcObjectLayerRef)
+DEFINE_SIMPLE_CONVERSION_FUNCTIONS(IRTransformLayer, LLVMOrcIRTransformLayerRef)
+DEFINE_SIMPLE_CONVERSION_FUNCTIONS(ObjectTransformLayer,
+ LLVMOrcObjectTransformLayerRef)
+DEFINE_SIMPLE_CONVERSION_FUNCTIONS(DumpObjects, LLVMOrcDumpObjectsRef)
+DEFINE_SIMPLE_CONVERSION_FUNCTIONS(IndirectStubsManager,
+ LLVMOrcIndirectStubsManagerRef)
+DEFINE_SIMPLE_CONVERSION_FUNCTIONS(LazyCallThroughManager,
+ LLVMOrcLazyCallThroughManagerRef)
+DEFINE_SIMPLE_CONVERSION_FUNCTIONS(LLJITBuilder, LLVMOrcLLJITBuilderRef)
+DEFINE_SIMPLE_CONVERSION_FUNCTIONS(LLJIT, LLVMOrcLLJITRef)
+DEFINE_SIMPLE_CONVERSION_FUNCTIONS(TargetMachine, LLVMTargetMachineRef)
+
+namespace {
+
+class OrcCAPIMaterializationUnit : public llvm::orc::MaterializationUnit {
+public:
+ OrcCAPIMaterializationUnit(
+ std::string Name, SymbolFlagsMap InitialSymbolFlags,
+ SymbolStringPtr InitSymbol, void *Ctx,
+ LLVMOrcMaterializationUnitMaterializeFunction Materialize,
+ LLVMOrcMaterializationUnitDiscardFunction Discard,
+ LLVMOrcMaterializationUnitDestroyFunction Destroy)
+ : llvm::orc::MaterializationUnit(
+ Interface(std::move(InitialSymbolFlags), std::move(InitSymbol))),
+ Name(std::move(Name)), Ctx(Ctx), Materialize(Materialize),
+ Discard(Discard), Destroy(Destroy) {}
+
+ ~OrcCAPIMaterializationUnit() {
+ if (Ctx)
+ Destroy(Ctx);
+ }
+
+ StringRef getName() const override { return Name; }
+
+ void materialize(std::unique_ptr<MaterializationResponsibility> R) override {
+ void *Tmp = Ctx;
+ Ctx = nullptr;
+ Materialize(Tmp, wrap(R.release()));
+ }
+
+private:
+ void discard(const JITDylib &JD, const SymbolStringPtr &Name) override {
+ Discard(Ctx, wrap(&JD), wrap(SymbolStringPoolEntryUnsafe::from(Name)));
+ }
+
+ std::string Name;
+ void *Ctx = nullptr;
+ LLVMOrcMaterializationUnitMaterializeFunction Materialize = nullptr;
+ LLVMOrcMaterializationUnitDiscardFunction Discard = nullptr;
+ LLVMOrcMaterializationUnitDestroyFunction Destroy = nullptr;
+};
+
+static JITSymbolFlags toJITSymbolFlags(LLVMJITSymbolFlags F) {
+
+ JITSymbolFlags JSF;
+
+ if (F.GenericFlags & LLVMJITSymbolGenericFlagsExported)
+ JSF |= JITSymbolFlags::Exported;
+ if (F.GenericFlags & LLVMJITSymbolGenericFlagsWeak)
+ JSF |= JITSymbolFlags::Weak;
+ if (F.GenericFlags & LLVMJITSymbolGenericFlagsCallable)
+ JSF |= JITSymbolFlags::Callable;
+ if (F.GenericFlags & LLVMJITSymbolGenericFlagsMaterializationSideEffectsOnly)
+ JSF |= JITSymbolFlags::MaterializationSideEffectsOnly;
+
+ JSF.getTargetFlags() = F.TargetFlags;
+
+ return JSF;
+}
+
+static LLVMJITSymbolFlags fromJITSymbolFlags(JITSymbolFlags JSF) {
+ LLVMJITSymbolFlags F = {0, 0};
+ if (JSF & JITSymbolFlags::Exported)
+ F.GenericFlags |= LLVMJITSymbolGenericFlagsExported;
+ if (JSF & JITSymbolFlags::Weak)
+ F.GenericFlags |= LLVMJITSymbolGenericFlagsWeak;
+ if (JSF & JITSymbolFlags::Callable)
+ F.GenericFlags |= LLVMJITSymbolGenericFlagsCallable;
+ if (JSF & JITSymbolFlags::MaterializationSideEffectsOnly)
+ F.GenericFlags |= LLVMJITSymbolGenericFlagsMaterializationSideEffectsOnly;
+
+ F.TargetFlags = JSF.getTargetFlags();
+
+ return F;
+}
+
+static SymbolNameSet toSymbolNameSet(LLVMOrcCSymbolsList Symbols) {
+ SymbolNameSet Result;
+ Result.reserve(Symbols.Length);
+ for (size_t I = 0; I != Symbols.Length; ++I)
+ Result.insert(unwrap(Symbols.Symbols[I]).moveToSymbolStringPtr());
+ return Result;
+}
+
+static SymbolMap toSymbolMap(LLVMOrcCSymbolMapPairs Syms, size_t NumPairs) {
+ SymbolMap SM;
+ for (size_t I = 0; I != NumPairs; ++I) {
+ JITSymbolFlags Flags = toJITSymbolFlags(Syms[I].Sym.Flags);
+ SM[unwrap(Syms[I].Name).moveToSymbolStringPtr()] = {
+ ExecutorAddr(Syms[I].Sym.Address), Flags};
+ }
+ return SM;
+}
+
+static SymbolDependenceMap
+toSymbolDependenceMap(LLVMOrcCDependenceMapPairs Pairs, size_t NumPairs) {
+ SymbolDependenceMap SDM;
+ for (size_t I = 0; I != NumPairs; ++I) {
+ JITDylib *JD = unwrap(Pairs[I].JD);
+ SymbolNameSet Names;
+
+ for (size_t J = 0; J != Pairs[I].Names.Length; ++J) {
+ auto Sym = Pairs[I].Names.Symbols[J];
+ Names.insert(unwrap(Sym).moveToSymbolStringPtr());
+ }
+ SDM[JD] = Names;
+ }
+ return SDM;
+}
+
+static LookupKind toLookupKind(LLVMOrcLookupKind K) {
+ switch (K) {
+ case LLVMOrcLookupKindStatic:
+ return LookupKind::Static;
+ case LLVMOrcLookupKindDLSym:
+ return LookupKind::DLSym;
+ }
+ llvm_unreachable("unrecognized LLVMOrcLookupKind value");
+}
+
+static LLVMOrcLookupKind fromLookupKind(LookupKind K) {
+ switch (K) {
+ case LookupKind::Static:
+ return LLVMOrcLookupKindStatic;
+ case LookupKind::DLSym:
+ return LLVMOrcLookupKindDLSym;
+ }
+ llvm_unreachable("unrecognized LookupKind value");
+}
+
+static JITDylibLookupFlags
+toJITDylibLookupFlags(LLVMOrcJITDylibLookupFlags LF) {
+ switch (LF) {
+ case LLVMOrcJITDylibLookupFlagsMatchExportedSymbolsOnly:
+ return JITDylibLookupFlags::MatchExportedSymbolsOnly;
+ case LLVMOrcJITDylibLookupFlagsMatchAllSymbols:
+ return JITDylibLookupFlags::MatchAllSymbols;
+ }
+ llvm_unreachable("unrecognized LLVMOrcJITDylibLookupFlags value");
+}
+
+static LLVMOrcJITDylibLookupFlags
+fromJITDylibLookupFlags(JITDylibLookupFlags LF) {
+ switch (LF) {
+ case JITDylibLookupFlags::MatchExportedSymbolsOnly:
+ return LLVMOrcJITDylibLookupFlagsMatchExportedSymbolsOnly;
+ case JITDylibLookupFlags::MatchAllSymbols:
+ return LLVMOrcJITDylibLookupFlagsMatchAllSymbols;
+ }
+ llvm_unreachable("unrecognized JITDylibLookupFlags value");
+}
+
+static SymbolLookupFlags toSymbolLookupFlags(LLVMOrcSymbolLookupFlags SLF) {
+ switch (SLF) {
+ case LLVMOrcSymbolLookupFlagsRequiredSymbol:
+ return SymbolLookupFlags::RequiredSymbol;
+ case LLVMOrcSymbolLookupFlagsWeaklyReferencedSymbol:
+ return SymbolLookupFlags::WeaklyReferencedSymbol;
+ }
+ llvm_unreachable("unrecognized LLVMOrcSymbolLookupFlags value");
+}
+
+static LLVMOrcSymbolLookupFlags fromSymbolLookupFlags(SymbolLookupFlags SLF) {
+ switch (SLF) {
+ case SymbolLookupFlags::RequiredSymbol:
+ return LLVMOrcSymbolLookupFlagsRequiredSymbol;
+ case SymbolLookupFlags::WeaklyReferencedSymbol:
+ return LLVMOrcSymbolLookupFlagsWeaklyReferencedSymbol;
+ }
+ llvm_unreachable("unrecognized SymbolLookupFlags value");
+}
+
+static LLVMJITEvaluatedSymbol
+fromExecutorSymbolDef(const ExecutorSymbolDef &S) {
+ return {S.getAddress().getValue(), fromJITSymbolFlags(S.getFlags())};
+}
+
+} // end anonymous namespace
+
+namespace llvm {
+namespace orc {
+
+class CAPIDefinitionGenerator final : public DefinitionGenerator {
+public:
+ CAPIDefinitionGenerator(
+ LLVMOrcDisposeCAPIDefinitionGeneratorFunction Dispose, void *Ctx,
+ LLVMOrcCAPIDefinitionGeneratorTryToGenerateFunction TryToGenerate)
+ : Dispose(Dispose), Ctx(Ctx), TryToGenerate(TryToGenerate) {}
+
+ ~CAPIDefinitionGenerator() {
+ if (Dispose)
+ Dispose(Ctx);
+ }
+
+ Error tryToGenerate(LookupState &LS, LookupKind K, JITDylib &JD,
+ JITDylibLookupFlags JDLookupFlags,
+ const SymbolLookupSet &LookupSet) override {
+
+ // Take the lookup state.
+ LLVMOrcLookupStateRef LSR = ::wrap(OrcV2CAPIHelper::extractLookupState(LS));
+
+ // Translate the lookup kind.
+ LLVMOrcLookupKind CLookupKind = fromLookupKind(K);
+
+ // Translate the JITDylibLookupFlags.
+ LLVMOrcJITDylibLookupFlags CJDLookupFlags =
+ fromJITDylibLookupFlags(JDLookupFlags);
+
+ // Translate the lookup set.
+ std::vector<LLVMOrcCLookupSetElement> CLookupSet;
+ CLookupSet.reserve(LookupSet.size());
+ for (auto &KV : LookupSet) {
+ LLVMOrcSymbolStringPoolEntryRef Name =
+ ::wrap(SymbolStringPoolEntryUnsafe::from(KV.first));
+ LLVMOrcSymbolLookupFlags SLF = fromSymbolLookupFlags(KV.second);
+ CLookupSet.push_back({Name, SLF});
+ }
+
+ // Run the C TryToGenerate function.
+ auto Err = unwrap(TryToGenerate(::wrap(this), Ctx, &LSR, CLookupKind,
+ ::wrap(&JD), CJDLookupFlags,
+ CLookupSet.data(), CLookupSet.size()));
+
+ // Restore the lookup state.
+ OrcV2CAPIHelper::resetLookupState(LS, ::unwrap(LSR));
+
+ return Err;
+ }
+
+private:
+ LLVMOrcDisposeCAPIDefinitionGeneratorFunction Dispose;
+ void *Ctx;
+ LLVMOrcCAPIDefinitionGeneratorTryToGenerateFunction TryToGenerate;
+};
+
+} // end namespace orc
+} // end namespace llvm
+
+void LLVMOrcExecutionSessionSetErrorReporter(
+ LLVMOrcExecutionSessionRef ES, LLVMOrcErrorReporterFunction ReportError,
+ void *Ctx) {
+ unwrap(ES)->setErrorReporter(
+ [=](Error Err) { ReportError(Ctx, wrap(std::move(Err))); });
+}
+
+LLVMOrcSymbolStringPoolRef
+LLVMOrcExecutionSessionGetSymbolStringPool(LLVMOrcExecutionSessionRef ES) {
+ return wrap(
+ unwrap(ES)->getExecutorProcessControl().getSymbolStringPool().get());
+}
+
+void LLVMOrcSymbolStringPoolClearDeadEntries(LLVMOrcSymbolStringPoolRef SSP) {
+ unwrap(SSP)->clearDeadEntries();
+}
+
+LLVMOrcSymbolStringPoolEntryRef
+LLVMOrcExecutionSessionIntern(LLVMOrcExecutionSessionRef ES, const char *Name) {
+ return wrap(SymbolStringPoolEntryUnsafe::take(unwrap(ES)->intern(Name)));
+}
+
+void LLVMOrcExecutionSessionLookup(
+ LLVMOrcExecutionSessionRef ES, LLVMOrcLookupKind K,
+ LLVMOrcCJITDylibSearchOrder SearchOrder, size_t SearchOrderSize,
+ LLVMOrcCLookupSet Symbols, size_t SymbolsSize,
+ LLVMOrcExecutionSessionLookupHandleResultFunction HandleResult, void *Ctx) {
+ assert(ES && "ES cannot be null");
+ assert(SearchOrder && "SearchOrder cannot be null");
+ assert(Symbols && "Symbols cannot be null");
+ assert(HandleResult && "HandleResult cannot be null");
+
+ JITDylibSearchOrder SO;
+ for (size_t I = 0; I != SearchOrderSize; ++I)
+ SO.push_back({unwrap(SearchOrder[I].JD),
+ toJITDylibLookupFlags(SearchOrder[I].JDLookupFlags)});
+
+ SymbolLookupSet SLS;
+ for (size_t I = 0; I != SymbolsSize; ++I)
+ SLS.add(unwrap(Symbols[I].Name).moveToSymbolStringPtr(),
+ toSymbolLookupFlags(Symbols[I].LookupFlags));
+
+ unwrap(ES)->lookup(
+ toLookupKind(K), SO, std::move(SLS), SymbolState::Ready,
+ [HandleResult, Ctx](Expected<SymbolMap> Result) {
+ if (Result) {
+ SmallVector<LLVMOrcCSymbolMapPair> CResult;
+ for (auto &KV : *Result)
+ CResult.push_back(LLVMOrcCSymbolMapPair{
+ wrap(SymbolStringPoolEntryUnsafe::from(KV.first)),
+ fromExecutorSymbolDef(KV.second)});
+ HandleResult(LLVMErrorSuccess, CResult.data(), CResult.size(), Ctx);
+ } else
+ HandleResult(wrap(Result.takeError()), nullptr, 0, Ctx);
+ },
+ NoDependenciesToRegister);
+}
+
+void LLVMOrcRetainSymbolStringPoolEntry(LLVMOrcSymbolStringPoolEntryRef S) {
+ unwrap(S).retain();
+}
+
+void LLVMOrcReleaseSymbolStringPoolEntry(LLVMOrcSymbolStringPoolEntryRef S) {
+ unwrap(S).release();
+}
+
+const char *LLVMOrcSymbolStringPoolEntryStr(LLVMOrcSymbolStringPoolEntryRef S) {
+ return unwrap(S).rawPtr()->getKey().data();
+}
+
+LLVMOrcResourceTrackerRef
+LLVMOrcJITDylibCreateResourceTracker(LLVMOrcJITDylibRef JD) {
+ auto RT = unwrap(JD)->createResourceTracker();
+ // Retain the pointer for the C API client.
+ RT->Retain();
+ return wrap(RT.get());
+}
+
+LLVMOrcResourceTrackerRef
+LLVMOrcJITDylibGetDefaultResourceTracker(LLVMOrcJITDylibRef JD) {
+ auto RT = unwrap(JD)->getDefaultResourceTracker();
+ // Retain the pointer for the C API client.
+ return wrap(RT.get());
+}
+
+void LLVMOrcReleaseResourceTracker(LLVMOrcResourceTrackerRef RT) {
+ ResourceTrackerSP TmpRT(unwrap(RT));
+ TmpRT->Release();
+}
+
+void LLVMOrcResourceTrackerTransferTo(LLVMOrcResourceTrackerRef SrcRT,
+ LLVMOrcResourceTrackerRef DstRT) {
+ ResourceTrackerSP TmpRT(unwrap(SrcRT));
+ TmpRT->transferTo(*unwrap(DstRT));
+}
+
+LLVMErrorRef LLVMOrcResourceTrackerRemove(LLVMOrcResourceTrackerRef RT) {
+ ResourceTrackerSP TmpRT(unwrap(RT));
+ return wrap(TmpRT->remove());
+}
+
+void LLVMOrcDisposeDefinitionGenerator(LLVMOrcDefinitionGeneratorRef DG) {
+ std::unique_ptr<DefinitionGenerator> TmpDG(unwrap(DG));
+}
+
+void LLVMOrcDisposeMaterializationUnit(LLVMOrcMaterializationUnitRef MU) {
+ std::unique_ptr<MaterializationUnit> TmpMU(unwrap(MU));
+}
+
+LLVMOrcMaterializationUnitRef LLVMOrcCreateCustomMaterializationUnit(
+ const char *Name, void *Ctx, LLVMOrcCSymbolFlagsMapPairs Syms,
+ size_t NumSyms, LLVMOrcSymbolStringPoolEntryRef InitSym,
+ LLVMOrcMaterializationUnitMaterializeFunction Materialize,
+ LLVMOrcMaterializationUnitDiscardFunction Discard,
+ LLVMOrcMaterializationUnitDestroyFunction Destroy) {
+ SymbolFlagsMap SFM;
+ for (size_t I = 0; I != NumSyms; ++I)
+ SFM[unwrap(Syms[I].Name).moveToSymbolStringPtr()] =
+ toJITSymbolFlags(Syms[I].Flags);
+
+ auto IS = unwrap(InitSym).moveToSymbolStringPtr();
+
+ return wrap(new OrcCAPIMaterializationUnit(
+ Name, std::move(SFM), std::move(IS), Ctx, Materialize, Discard, Destroy));
+}
+
+LLVMOrcMaterializationUnitRef
+LLVMOrcAbsoluteSymbols(LLVMOrcCSymbolMapPairs Syms, size_t NumPairs) {
+ SymbolMap SM = toSymbolMap(Syms, NumPairs);
+ return wrap(absoluteSymbols(std::move(SM)).release());
+}
+
+LLVMOrcMaterializationUnitRef LLVMOrcLazyReexports(
+ LLVMOrcLazyCallThroughManagerRef LCTM, LLVMOrcIndirectStubsManagerRef ISM,
+ LLVMOrcJITDylibRef SourceJD, LLVMOrcCSymbolAliasMapPairs CallableAliases,
+ size_t NumPairs) {
+
+ SymbolAliasMap SAM;
+ for (size_t I = 0; I != NumPairs; ++I) {
+ auto pair = CallableAliases[I];
+ JITSymbolFlags Flags = toJITSymbolFlags(pair.Entry.Flags);
+ SymbolStringPtr Name = unwrap(pair.Entry.Name).moveToSymbolStringPtr();
+ SAM[unwrap(pair.Name).moveToSymbolStringPtr()] =
+ SymbolAliasMapEntry(Name, Flags);
+ }
+
+ return wrap(lazyReexports(*unwrap(LCTM), *unwrap(ISM), *unwrap(SourceJD),
+ std::move(SAM))
+ .release());
+}
+
+void LLVMOrcDisposeMaterializationResponsibility(
+ LLVMOrcMaterializationResponsibilityRef MR) {
+ std::unique_ptr<MaterializationResponsibility> TmpMR(unwrap(MR));
+}
+
+LLVMOrcJITDylibRef LLVMOrcMaterializationResponsibilityGetTargetDylib(
+ LLVMOrcMaterializationResponsibilityRef MR) {
+ return wrap(&unwrap(MR)->getTargetJITDylib());
+}
+
+LLVMOrcExecutionSessionRef
+LLVMOrcMaterializationResponsibilityGetExecutionSession(
+ LLVMOrcMaterializationResponsibilityRef MR) {
+ return wrap(&unwrap(MR)->getExecutionSession());
+}
+
+LLVMOrcCSymbolFlagsMapPairs LLVMOrcMaterializationResponsibilityGetSymbols(
+ LLVMOrcMaterializationResponsibilityRef MR, size_t *NumPairs) {
+
+ auto Symbols = unwrap(MR)->getSymbols();
+ LLVMOrcCSymbolFlagsMapPairs Result = static_cast<LLVMOrcCSymbolFlagsMapPairs>(
+ safe_malloc(Symbols.size() * sizeof(LLVMOrcCSymbolFlagsMapPair)));
+ size_t I = 0;
+ for (auto const &pair : Symbols) {
+ auto Name = wrap(SymbolStringPoolEntryUnsafe::from(pair.first));
+ auto Flags = pair.second;
+ Result[I] = {Name, fromJITSymbolFlags(Flags)};
+ I++;
+ }
+ *NumPairs = Symbols.size();
+ return Result;
+}
+
+void LLVMOrcDisposeCSymbolFlagsMap(LLVMOrcCSymbolFlagsMapPairs Pairs) {
+ free(Pairs);
+}
+
+LLVMOrcSymbolStringPoolEntryRef
+LLVMOrcMaterializationResponsibilityGetInitializerSymbol(
+ LLVMOrcMaterializationResponsibilityRef MR) {
+ auto Sym = unwrap(MR)->getInitializerSymbol();
+ return wrap(SymbolStringPoolEntryUnsafe::from(Sym));
+}
+
+LLVMOrcSymbolStringPoolEntryRef *
+LLVMOrcMaterializationResponsibilityGetRequestedSymbols(
+ LLVMOrcMaterializationResponsibilityRef MR, size_t *NumSymbols) {
+
+ auto Symbols = unwrap(MR)->getRequestedSymbols();
+ LLVMOrcSymbolStringPoolEntryRef *Result =
+ static_cast<LLVMOrcSymbolStringPoolEntryRef *>(safe_malloc(
+ Symbols.size() * sizeof(LLVMOrcSymbolStringPoolEntryRef)));
+ size_t I = 0;
+ for (auto &Name : Symbols) {
+ Result[I] = wrap(SymbolStringPoolEntryUnsafe::from(Name));
+ I++;
+ }
+ *NumSymbols = Symbols.size();
+ return Result;
+}
+
+void LLVMOrcDisposeSymbols(LLVMOrcSymbolStringPoolEntryRef *Symbols) {
+ free(Symbols);
+}
+
+LLVMErrorRef LLVMOrcMaterializationResponsibilityNotifyResolved(
+ LLVMOrcMaterializationResponsibilityRef MR, LLVMOrcCSymbolMapPairs Symbols,
+ size_t NumSymbols) {
+ SymbolMap SM = toSymbolMap(Symbols, NumSymbols);
+ return wrap(unwrap(MR)->notifyResolved(std::move(SM)));
+}
+
+LLVMErrorRef LLVMOrcMaterializationResponsibilityNotifyEmitted(
+ LLVMOrcMaterializationResponsibilityRef MR,
+ LLVMOrcCSymbolDependenceGroup *SymbolDepGroups, size_t NumSymbolDepGroups) {
+ std::vector<SymbolDependenceGroup> SDGs;
+ SDGs.reserve(NumSymbolDepGroups);
+ for (size_t I = 0; I != NumSymbolDepGroups; ++I) {
+ SDGs.push_back(SymbolDependenceGroup());
+ auto &SDG = SDGs.back();
+ SDG.Symbols = toSymbolNameSet(SymbolDepGroups[I].Symbols);
+ SDG.Dependencies = toSymbolDependenceMap(
+ SymbolDepGroups[I].Dependencies, SymbolDepGroups[I].NumDependencies);
+ }
+ return wrap(unwrap(MR)->notifyEmitted(SDGs));
+}
+
+LLVMErrorRef LLVMOrcMaterializationResponsibilityDefineMaterializing(
+ LLVMOrcMaterializationResponsibilityRef MR,
+ LLVMOrcCSymbolFlagsMapPairs Syms, size_t NumSyms) {
+ SymbolFlagsMap SFM;
+ for (size_t I = 0; I != NumSyms; ++I)
+ SFM[unwrap(Syms[I].Name).moveToSymbolStringPtr()] =
+ toJITSymbolFlags(Syms[I].Flags);
+
+ return wrap(unwrap(MR)->defineMaterializing(std::move(SFM)));
+}
+
+LLVMErrorRef LLVMOrcMaterializationResponsibilityReplace(
+ LLVMOrcMaterializationResponsibilityRef MR,
+ LLVMOrcMaterializationUnitRef MU) {
+ std::unique_ptr<MaterializationUnit> TmpMU(unwrap(MU));
+ return wrap(unwrap(MR)->replace(std::move(TmpMU)));
+}
+
+LLVMErrorRef LLVMOrcMaterializationResponsibilityDelegate(
+ LLVMOrcMaterializationResponsibilityRef MR,
+ LLVMOrcSymbolStringPoolEntryRef *Symbols, size_t NumSymbols,
+ LLVMOrcMaterializationResponsibilityRef *Result) {
+ SymbolNameSet Syms;
+ for (size_t I = 0; I != NumSymbols; I++) {
+ Syms.insert(unwrap(Symbols[I]).moveToSymbolStringPtr());
+ }
+ auto OtherMR = unwrap(MR)->delegate(Syms);
+
+ if (!OtherMR) {
+ return wrap(OtherMR.takeError());
+ }
+ *Result = wrap(OtherMR->release());
+ return LLVMErrorSuccess;
+}
+
+void LLVMOrcMaterializationResponsibilityFailMaterialization(
+ LLVMOrcMaterializationResponsibilityRef MR) {
+ unwrap(MR)->failMaterialization();
+}
+
+void LLVMOrcIRTransformLayerEmit(LLVMOrcIRTransformLayerRef IRLayer,
+ LLVMOrcMaterializationResponsibilityRef MR,
+ LLVMOrcThreadSafeModuleRef TSM) {
+ std::unique_ptr<ThreadSafeModule> TmpTSM(unwrap(TSM));
+ unwrap(IRLayer)->emit(
+ std::unique_ptr<MaterializationResponsibility>(unwrap(MR)),
+ std::move(*TmpTSM));
+}
+
+LLVMOrcJITDylibRef
+LLVMOrcExecutionSessionCreateBareJITDylib(LLVMOrcExecutionSessionRef ES,
+ const char *Name) {
+ return wrap(&unwrap(ES)->createBareJITDylib(Name));
+}
+
+LLVMErrorRef
+LLVMOrcExecutionSessionCreateJITDylib(LLVMOrcExecutionSessionRef ES,
+ LLVMOrcJITDylibRef *Result,
+ const char *Name) {
+ auto JD = unwrap(ES)->createJITDylib(Name);
+ if (!JD)
+ return wrap(JD.takeError());
+ *Result = wrap(&*JD);
+ return LLVMErrorSuccess;
+}
+
+LLVMOrcJITDylibRef
+LLVMOrcExecutionSessionGetJITDylibByName(LLVMOrcExecutionSessionRef ES,
+ const char *Name) {
+ return wrap(unwrap(ES)->getJITDylibByName(Name));
+}
+
+LLVMErrorRef LLVMOrcJITDylibDefine(LLVMOrcJITDylibRef JD,
+ LLVMOrcMaterializationUnitRef MU) {
+ std::unique_ptr<MaterializationUnit> TmpMU(unwrap(MU));
+
+ if (auto Err = unwrap(JD)->define(TmpMU)) {
+ TmpMU.release();
+ return wrap(std::move(Err));
+ }
+ return LLVMErrorSuccess;
+}
+
+LLVMErrorRef LLVMOrcJITDylibClear(LLVMOrcJITDylibRef JD) {
+ return wrap(unwrap(JD)->clear());
+}
+
+void LLVMOrcJITDylibAddGenerator(LLVMOrcJITDylibRef JD,
+ LLVMOrcDefinitionGeneratorRef DG) {
+ unwrap(JD)->addGenerator(std::unique_ptr<DefinitionGenerator>(unwrap(DG)));
+}
+
+LLVMOrcDefinitionGeneratorRef LLVMOrcCreateCustomCAPIDefinitionGenerator(
+ LLVMOrcCAPIDefinitionGeneratorTryToGenerateFunction F, void *Ctx,
+ LLVMOrcDisposeCAPIDefinitionGeneratorFunction Dispose) {
+ auto DG = std::make_unique<CAPIDefinitionGenerator>(Dispose, Ctx, F);
+ return wrap(DG.release());
+}
+
+void LLVMOrcLookupStateContinueLookup(LLVMOrcLookupStateRef S,
+ LLVMErrorRef Err) {
+ LookupState LS;
+ OrcV2CAPIHelper::resetLookupState(LS, ::unwrap(S));
+ LS.continueLookup(unwrap(Err));
+}
+
+LLVMErrorRef LLVMOrcCreateDynamicLibrarySearchGeneratorForProcess(
+ LLVMOrcDefinitionGeneratorRef *Result, char GlobalPrefix,
+ LLVMOrcSymbolPredicate Filter, void *FilterCtx) {
+ assert(Result && "Result can not be null");
+ assert((Filter || !FilterCtx) &&
+ "if Filter is null then FilterCtx must also be null");
+
+ DynamicLibrarySearchGenerator::SymbolPredicate Pred;
+ if (Filter)
+ Pred = [=](const SymbolStringPtr &Name) -> bool {
+ return Filter(FilterCtx, wrap(SymbolStringPoolEntryUnsafe::from(Name)));
+ };
+
+ auto ProcessSymsGenerator =
+ DynamicLibrarySearchGenerator::GetForCurrentProcess(GlobalPrefix, Pred);
+
+ if (!ProcessSymsGenerator) {
+ *Result = nullptr;
+ return wrap(ProcessSymsGenerator.takeError());
+ }
+
+ *Result = wrap(ProcessSymsGenerator->release());
+ return LLVMErrorSuccess;
+}
+
+LLVMErrorRef LLVMOrcCreateDynamicLibrarySearchGeneratorForPath(
+ LLVMOrcDefinitionGeneratorRef *Result, const char *FileName,
+ char GlobalPrefix, LLVMOrcSymbolPredicate Filter, void *FilterCtx) {
+ assert(Result && "Result can not be null");
+ assert(FileName && "FileName can not be null");
+ assert((Filter || !FilterCtx) &&
+ "if Filter is null then FilterCtx must also be null");
+
+ DynamicLibrarySearchGenerator::SymbolPredicate Pred;
+ if (Filter)
+ Pred = [=](const SymbolStringPtr &Name) -> bool {
+ return Filter(FilterCtx, wrap(SymbolStringPoolEntryUnsafe::from(Name)));
+ };
+
+ auto LibrarySymsGenerator =
+ DynamicLibrarySearchGenerator::Load(FileName, GlobalPrefix, Pred);
+
+ if (!LibrarySymsGenerator) {
+ *Result = nullptr;
+ return wrap(LibrarySymsGenerator.takeError());
+ }
+
+ *Result = wrap(LibrarySymsGenerator->release());
+ return LLVMErrorSuccess;
+}
+
+LLVMErrorRef LLVMOrcCreateStaticLibrarySearchGeneratorForPath(
+ LLVMOrcDefinitionGeneratorRef *Result, LLVMOrcObjectLayerRef ObjLayer,
+ const char *FileName) {
+ assert(Result && "Result can not be null");
+ assert(FileName && "Filename can not be null");
+ assert(ObjLayer && "ObjectLayer can not be null");
+
+ auto LibrarySymsGenerator =
+ StaticLibraryDefinitionGenerator::Load(*unwrap(ObjLayer), FileName);
+ if (!LibrarySymsGenerator) {
+ *Result = nullptr;
+ return wrap(LibrarySymsGenerator.takeError());
+ }
+ *Result = wrap(LibrarySymsGenerator->release());
+ return LLVMErrorSuccess;
+}
+
+LLVMOrcThreadSafeContextRef LLVMOrcCreateNewThreadSafeContext(void) {
+ return wrap(new ThreadSafeContext(std::make_unique<LLVMContext>()));
+}
+
+LLVMContextRef
+LLVMOrcThreadSafeContextGetContext(LLVMOrcThreadSafeContextRef TSCtx) {
+ return wrap(unwrap(TSCtx)->getContext());
+}
+
+void LLVMOrcDisposeThreadSafeContext(LLVMOrcThreadSafeContextRef TSCtx) {
+ delete unwrap(TSCtx);
+}
+
+LLVMErrorRef
+LLVMOrcThreadSafeModuleWithModuleDo(LLVMOrcThreadSafeModuleRef TSM,
+ LLVMOrcGenericIRModuleOperationFunction F,
+ void *Ctx) {
+ return wrap(unwrap(TSM)->withModuleDo(
+ [&](Module &M) { return unwrap(F(Ctx, wrap(&M))); }));
+}
+
+LLVMOrcThreadSafeModuleRef
+LLVMOrcCreateNewThreadSafeModule(LLVMModuleRef M,
+ LLVMOrcThreadSafeContextRef TSCtx) {
+ return wrap(
+ new ThreadSafeModule(std::unique_ptr<Module>(unwrap(M)), *unwrap(TSCtx)));
+}
+
+void LLVMOrcDisposeThreadSafeModule(LLVMOrcThreadSafeModuleRef TSM) {
+ delete unwrap(TSM);
+}
+
+LLVMErrorRef LLVMOrcJITTargetMachineBuilderDetectHost(
+ LLVMOrcJITTargetMachineBuilderRef *Result) {
+ assert(Result && "Result can not be null");
+
+ auto JTMB = JITTargetMachineBuilder::detectHost();
+ if (!JTMB) {
+ Result = nullptr;
+ return wrap(JTMB.takeError());
+ }
+
+ *Result = wrap(new JITTargetMachineBuilder(std::move(*JTMB)));
+ return LLVMErrorSuccess;
+}
+
+LLVMOrcJITTargetMachineBuilderRef
+LLVMOrcJITTargetMachineBuilderCreateFromTargetMachine(LLVMTargetMachineRef TM) {
+ auto *TemplateTM = unwrap(TM);
+
+ auto JTMB =
+ std::make_unique<JITTargetMachineBuilder>(TemplateTM->getTargetTriple());
+
+ (*JTMB)
+ .setCPU(TemplateTM->getTargetCPU().str())
+ .setRelocationModel(TemplateTM->getRelocationModel())
+ .setCodeModel(TemplateTM->getCodeModel())
+ .setCodeGenOptLevel(TemplateTM->getOptLevel())
+ .setFeatures(TemplateTM->getTargetFeatureString())
+ .setOptions(TemplateTM->Options);
+
+ LLVMDisposeTargetMachine(TM);
+
+ return wrap(JTMB.release());
+}
+
+void LLVMOrcDisposeJITTargetMachineBuilder(
+ LLVMOrcJITTargetMachineBuilderRef JTMB) {
+ delete unwrap(JTMB);
+}
+
+char *LLVMOrcJITTargetMachineBuilderGetTargetTriple(
+ LLVMOrcJITTargetMachineBuilderRef JTMB) {
+ auto Tmp = unwrap(JTMB)->getTargetTriple().str();
+ char *TargetTriple = (char *)malloc(Tmp.size() + 1);
+ strcpy(TargetTriple, Tmp.c_str());
+ return TargetTriple;
+}
+
+void LLVMOrcJITTargetMachineBuilderSetTargetTriple(
+ LLVMOrcJITTargetMachineBuilderRef JTMB, const char *TargetTriple) {
+ unwrap(JTMB)->getTargetTriple() = Triple(TargetTriple);
+}
+
+LLVMErrorRef LLVMOrcObjectLayerAddObjectFile(LLVMOrcObjectLayerRef ObjLayer,
+ LLVMOrcJITDylibRef JD,
+ LLVMMemoryBufferRef ObjBuffer) {
+ return wrap(unwrap(ObjLayer)->add(
+ *unwrap(JD), std::unique_ptr<MemoryBuffer>(unwrap(ObjBuffer))));
+}
+
+LLVMErrorRef LLVMOrcObjectLayerAddObjectFileWithRT(LLVMOrcObjectLayerRef ObjLayer,
+ LLVMOrcResourceTrackerRef RT,
+ LLVMMemoryBufferRef ObjBuffer) {
+ return wrap(
+ unwrap(ObjLayer)->add(ResourceTrackerSP(unwrap(RT)),
+ std::unique_ptr<MemoryBuffer>(unwrap(ObjBuffer))));
+}
+
+void LLVMOrcObjectLayerEmit(LLVMOrcObjectLayerRef ObjLayer,
+ LLVMOrcMaterializationResponsibilityRef R,
+ LLVMMemoryBufferRef ObjBuffer) {
+ unwrap(ObjLayer)->emit(
+ std::unique_ptr<MaterializationResponsibility>(unwrap(R)),
+ std::unique_ptr<MemoryBuffer>(unwrap(ObjBuffer)));
+}
+
+void LLVMOrcDisposeObjectLayer(LLVMOrcObjectLayerRef ObjLayer) {
+ delete unwrap(ObjLayer);
+}
+
+void LLVMOrcIRTransformLayerSetTransform(
+ LLVMOrcIRTransformLayerRef IRTransformLayer,
+ LLVMOrcIRTransformLayerTransformFunction TransformFunction, void *Ctx) {
+ unwrap(IRTransformLayer)
+ ->setTransform(
+ [=](ThreadSafeModule TSM,
+ MaterializationResponsibility &R) -> Expected<ThreadSafeModule> {
+ LLVMOrcThreadSafeModuleRef TSMRef =
+ wrap(new ThreadSafeModule(std::move(TSM)));
+ if (LLVMErrorRef Err = TransformFunction(Ctx, &TSMRef, wrap(&R))) {
+ assert(!TSMRef && "TSMRef was not reset to null on error");
+ return unwrap(Err);
+ }
+ assert(TSMRef && "Transform succeeded, but TSMRef was set to null");
+ ThreadSafeModule Result = std::move(*unwrap(TSMRef));
+ LLVMOrcDisposeThreadSafeModule(TSMRef);
+ return std::move(Result);
+ });
+}
+
+void LLVMOrcObjectTransformLayerSetTransform(
+ LLVMOrcObjectTransformLayerRef ObjTransformLayer,
+ LLVMOrcObjectTransformLayerTransformFunction TransformFunction, void *Ctx) {
+ unwrap(ObjTransformLayer)
+ ->setTransform([TransformFunction, Ctx](std::unique_ptr<MemoryBuffer> Obj)
+ -> Expected<std::unique_ptr<MemoryBuffer>> {
+ LLVMMemoryBufferRef ObjBuffer = wrap(Obj.release());
+ if (LLVMErrorRef Err = TransformFunction(Ctx, &ObjBuffer)) {
+ assert(!ObjBuffer && "ObjBuffer was not reset to null on error");
+ return unwrap(Err);
+ }
+ return std::unique_ptr<MemoryBuffer>(unwrap(ObjBuffer));
+ });
+}
+
+LLVMOrcDumpObjectsRef LLVMOrcCreateDumpObjects(const char *DumpDir,
+ const char *IdentifierOverride) {
+ assert(DumpDir && "DumpDir should not be null");
+ assert(IdentifierOverride && "IdentifierOverride should not be null");
+ return wrap(new DumpObjects(DumpDir, IdentifierOverride));
+}
+
+void LLVMOrcDisposeDumpObjects(LLVMOrcDumpObjectsRef DumpObjects) {
+ delete unwrap(DumpObjects);
+}
+
+LLVMErrorRef LLVMOrcDumpObjects_CallOperator(LLVMOrcDumpObjectsRef DumpObjects,
+ LLVMMemoryBufferRef *ObjBuffer) {
+ std::unique_ptr<MemoryBuffer> OB(unwrap(*ObjBuffer));
+ if (auto Result = (*unwrap(DumpObjects))(std::move(OB))) {
+ *ObjBuffer = wrap(Result->release());
+ return LLVMErrorSuccess;
+ } else {
+ *ObjBuffer = nullptr;
+ return wrap(Result.takeError());
+ }
+}
+
+LLVMOrcLLJITBuilderRef LLVMOrcCreateLLJITBuilder(void) {
+ return wrap(new LLJITBuilder());
+}
+
+void LLVMOrcDisposeLLJITBuilder(LLVMOrcLLJITBuilderRef Builder) {
+ delete unwrap(Builder);
+}
+
+void LLVMOrcLLJITBuilderSetJITTargetMachineBuilder(
+ LLVMOrcLLJITBuilderRef Builder, LLVMOrcJITTargetMachineBuilderRef JTMB) {
+ unwrap(Builder)->setJITTargetMachineBuilder(std::move(*unwrap(JTMB)));
+ LLVMOrcDisposeJITTargetMachineBuilder(JTMB);
+}
+
+void LLVMOrcLLJITBuilderSetObjectLinkingLayerCreator(
+ LLVMOrcLLJITBuilderRef Builder,
+ LLVMOrcLLJITBuilderObjectLinkingLayerCreatorFunction F, void *Ctx) {
+ unwrap(Builder)->setObjectLinkingLayerCreator(
+ [=](ExecutionSession &ES, const Triple &TT) {
+ auto TTStr = TT.str();
+ return std::unique_ptr<ObjectLayer>(
+ unwrap(F(Ctx, wrap(&ES), TTStr.c_str())));
+ });
+}
+
+LLVMErrorRef LLVMOrcCreateLLJIT(LLVMOrcLLJITRef *Result,
+ LLVMOrcLLJITBuilderRef Builder) {
+ assert(Result && "Result can not be null");
+
+ if (!Builder)
+ Builder = LLVMOrcCreateLLJITBuilder();
+
+ auto J = unwrap(Builder)->create();
+ LLVMOrcDisposeLLJITBuilder(Builder);
+
+ if (!J) {
+ Result = nullptr;
+ return wrap(J.takeError());
+ }
+
+ *Result = wrap(J->release());
+ return LLVMErrorSuccess;
+}
+
+LLVMErrorRef LLVMOrcDisposeLLJIT(LLVMOrcLLJITRef J) {
+ delete unwrap(J);
+ return LLVMErrorSuccess;
+}
+
+LLVMOrcExecutionSessionRef LLVMOrcLLJITGetExecutionSession(LLVMOrcLLJITRef J) {
+ return wrap(&unwrap(J)->getExecutionSession());
+}
+
+LLVMOrcJITDylibRef LLVMOrcLLJITGetMainJITDylib(LLVMOrcLLJITRef J) {
+ return wrap(&unwrap(J)->getMainJITDylib());
+}
+
+const char *LLVMOrcLLJITGetTripleString(LLVMOrcLLJITRef J) {
+ return unwrap(J)->getTargetTriple().str().c_str();
+}
+
+char LLVMOrcLLJITGetGlobalPrefix(LLVMOrcLLJITRef J) {
+ return unwrap(J)->getDataLayout().getGlobalPrefix();
+}
+
+LLVMOrcSymbolStringPoolEntryRef
+LLVMOrcLLJITMangleAndIntern(LLVMOrcLLJITRef J, const char *UnmangledName) {
+ return wrap(SymbolStringPoolEntryUnsafe::take(
+ unwrap(J)->mangleAndIntern(UnmangledName)));
+}
+
+LLVMErrorRef LLVMOrcLLJITAddObjectFile(LLVMOrcLLJITRef J, LLVMOrcJITDylibRef JD,
+ LLVMMemoryBufferRef ObjBuffer) {
+ return wrap(unwrap(J)->addObjectFile(
+ *unwrap(JD), std::unique_ptr<MemoryBuffer>(unwrap(ObjBuffer))));
+}
+
+LLVMErrorRef LLVMOrcLLJITAddObjectFileWithRT(LLVMOrcLLJITRef J,
+ LLVMOrcResourceTrackerRef RT,
+ LLVMMemoryBufferRef ObjBuffer) {
+ return wrap(unwrap(J)->addObjectFile(
+ ResourceTrackerSP(unwrap(RT)),
+ std::unique_ptr<MemoryBuffer>(unwrap(ObjBuffer))));
+}
+
+LLVMErrorRef LLVMOrcLLJITAddLLVMIRModule(LLVMOrcLLJITRef J,
+ LLVMOrcJITDylibRef JD,
+ LLVMOrcThreadSafeModuleRef TSM) {
+ std::unique_ptr<ThreadSafeModule> TmpTSM(unwrap(TSM));
+ return wrap(unwrap(J)->addIRModule(*unwrap(JD), std::move(*TmpTSM)));
+}
+
+LLVMErrorRef LLVMOrcLLJITAddLLVMIRModuleWithRT(LLVMOrcLLJITRef J,
+ LLVMOrcResourceTrackerRef RT,
+ LLVMOrcThreadSafeModuleRef TSM) {
+ std::unique_ptr<ThreadSafeModule> TmpTSM(unwrap(TSM));
+ return wrap(unwrap(J)->addIRModule(ResourceTrackerSP(unwrap(RT)),
+ std::move(*TmpTSM)));
+}
+
+LLVMErrorRef LLVMOrcLLJITLookup(LLVMOrcLLJITRef J,
+ LLVMOrcJITTargetAddress *Result,
+ const char *Name) {
+ assert(Result && "Result can not be null");
+
+ auto Sym = unwrap(J)->lookup(Name);
+ if (!Sym) {
+ *Result = 0;
+ return wrap(Sym.takeError());
+ }
+
+ *Result = Sym->getValue();
+ return LLVMErrorSuccess;
+}
+
+LLVMOrcObjectLayerRef LLVMOrcLLJITGetObjLinkingLayer(LLVMOrcLLJITRef J) {
+ return wrap(&unwrap(J)->getObjLinkingLayer());
+}
+
+LLVMOrcObjectTransformLayerRef
+LLVMOrcLLJITGetObjTransformLayer(LLVMOrcLLJITRef J) {
+ return wrap(&unwrap(J)->getObjTransformLayer());
+}
+
+LLVMOrcObjectLayerRef
+LLVMOrcCreateRTDyldObjectLinkingLayerWithSectionMemoryManager(
+ LLVMOrcExecutionSessionRef ES) {
+ assert(ES && "ES must not be null");
+ return wrap(new RTDyldObjectLinkingLayer(
+ *unwrap(ES), [] { return std::make_unique<SectionMemoryManager>(); }));
+}
+
+LLVMOrcObjectLayerRef
+LLVMOrcCreateRTDyldObjectLinkingLayerWithMCJITMemoryManagerLikeCallbacks(
+ LLVMOrcExecutionSessionRef ES, void *CreateContextCtx,
+ LLVMMemoryManagerCreateContextCallback CreateContext,
+ LLVMMemoryManagerNotifyTerminatingCallback NotifyTerminating,
+ LLVMMemoryManagerAllocateCodeSectionCallback AllocateCodeSection,
+ LLVMMemoryManagerAllocateDataSectionCallback AllocateDataSection,
+ LLVMMemoryManagerFinalizeMemoryCallback FinalizeMemory,
+ LLVMMemoryManagerDestroyCallback Destroy) {
+
+ struct MCJITMemoryManagerLikeCallbacks {
+ MCJITMemoryManagerLikeCallbacks() = default;
+ MCJITMemoryManagerLikeCallbacks(
+ void *CreateContextCtx,
+ LLVMMemoryManagerCreateContextCallback CreateContext,
+ LLVMMemoryManagerNotifyTerminatingCallback NotifyTerminating,
+ LLVMMemoryManagerAllocateCodeSectionCallback AllocateCodeSection,
+ LLVMMemoryManagerAllocateDataSectionCallback AllocateDataSection,
+ LLVMMemoryManagerFinalizeMemoryCallback FinalizeMemory,
+ LLVMMemoryManagerDestroyCallback Destroy)
+ : CreateContextCtx(CreateContextCtx), CreateContext(CreateContext),
+ NotifyTerminating(NotifyTerminating),
+ AllocateCodeSection(AllocateCodeSection),
+ AllocateDataSection(AllocateDataSection),
+ FinalizeMemory(FinalizeMemory), Destroy(Destroy) {}
+
+ MCJITMemoryManagerLikeCallbacks(MCJITMemoryManagerLikeCallbacks &&Other) {
+ std::swap(CreateContextCtx, Other.CreateContextCtx);
+ std::swap(CreateContext, Other.CreateContext);
+ std::swap(NotifyTerminating, Other.NotifyTerminating);
+ std::swap(AllocateCodeSection, Other.AllocateCodeSection);
+ std::swap(AllocateDataSection, Other.AllocateDataSection);
+ std::swap(FinalizeMemory, Other.FinalizeMemory);
+ std::swap(Destroy, Other.Destroy);
+ }
+
+ ~MCJITMemoryManagerLikeCallbacks() {
+ if (NotifyTerminating)
+ NotifyTerminating(CreateContextCtx);
+ }
+
+ void *CreateContextCtx = nullptr;
+ LLVMMemoryManagerCreateContextCallback CreateContext = nullptr;
+ LLVMMemoryManagerNotifyTerminatingCallback NotifyTerminating = nullptr;
+ LLVMMemoryManagerAllocateCodeSectionCallback AllocateCodeSection = nullptr;
+ LLVMMemoryManagerAllocateDataSectionCallback AllocateDataSection = nullptr;
+ LLVMMemoryManagerFinalizeMemoryCallback FinalizeMemory = nullptr;
+ LLVMMemoryManagerDestroyCallback Destroy = nullptr;
+ };
+
+ class MCJITMemoryManagerLikeCallbacksMemMgr : public RTDyldMemoryManager {
+ public:
+ MCJITMemoryManagerLikeCallbacksMemMgr(
+ const MCJITMemoryManagerLikeCallbacks &CBs)
+ : CBs(CBs) {
+ Opaque = CBs.CreateContext(CBs.CreateContextCtx);
+ }
+ ~MCJITMemoryManagerLikeCallbacksMemMgr() override { CBs.Destroy(Opaque); }
+
+ uint8_t *allocateCodeSection(uintptr_t Size, unsigned Alignment,
+ unsigned SectionID,
+ StringRef SectionName) override {
+ return CBs.AllocateCodeSection(Opaque, Size, Alignment, SectionID,
+ SectionName.str().c_str());
+ }
+
+ uint8_t *allocateDataSection(uintptr_t Size, unsigned Alignment,
+ unsigned SectionID, StringRef SectionName,
+ bool isReadOnly) override {
+ return CBs.AllocateDataSection(Opaque, Size, Alignment, SectionID,
+ SectionName.str().c_str(), isReadOnly);
+ }
+
+ bool finalizeMemory(std::string *ErrMsg) override {
+ char *ErrMsgCString = nullptr;
+ bool Result = CBs.FinalizeMemory(Opaque, &ErrMsgCString);
+ assert((Result || !ErrMsgCString) &&
+ "Did not expect an error message if FinalizeMemory succeeded");
+ if (ErrMsgCString) {
+ if (ErrMsg)
+ *ErrMsg = ErrMsgCString;
+ free(ErrMsgCString);
+ }
+ return Result;
+ }
+
+ private:
+ const MCJITMemoryManagerLikeCallbacks &CBs;
+ void *Opaque = nullptr;
+ };
+
+ assert(ES && "ES must not be null");
+ assert(CreateContext && "CreateContext must not be null");
+ assert(NotifyTerminating && "NotifyTerminating must not be null");
+ assert(AllocateCodeSection && "AllocateCodeSection must not be null");
+ assert(AllocateDataSection && "AllocateDataSection must not be null");
+ assert(FinalizeMemory && "FinalizeMemory must not be null");
+ assert(Destroy && "Destroy must not be null");
+
+ MCJITMemoryManagerLikeCallbacks CBs(
+ CreateContextCtx, CreateContext, NotifyTerminating, AllocateCodeSection,
+ AllocateDataSection, FinalizeMemory, Destroy);
+
+ return wrap(new RTDyldObjectLinkingLayer(*unwrap(ES), [CBs = std::move(CBs)] {
+ return std::make_unique<MCJITMemoryManagerLikeCallbacksMemMgr>(CBs);
+ }));
+
+ return nullptr;
+}
+
+void LLVMOrcRTDyldObjectLinkingLayerRegisterJITEventListener(
+ LLVMOrcObjectLayerRef RTDyldObjLinkingLayer,
+ LLVMJITEventListenerRef Listener) {
+ assert(RTDyldObjLinkingLayer && "RTDyldObjLinkingLayer must not be null");
+ assert(Listener && "Listener must not be null");
+ reinterpret_cast<RTDyldObjectLinkingLayer *>(unwrap(RTDyldObjLinkingLayer))
+ ->registerJITEventListener(*unwrap(Listener));
+}
+
+LLVMOrcIRTransformLayerRef LLVMOrcLLJITGetIRTransformLayer(LLVMOrcLLJITRef J) {
+ return wrap(&unwrap(J)->getIRTransformLayer());
+}
+
+const char *LLVMOrcLLJITGetDataLayoutStr(LLVMOrcLLJITRef J) {
+ return unwrap(J)->getDataLayout().getStringRepresentation().c_str();
+}
+
+LLVMOrcIndirectStubsManagerRef
+LLVMOrcCreateLocalIndirectStubsManager(const char *TargetTriple) {
+ auto builder = createLocalIndirectStubsManagerBuilder(Triple(TargetTriple));
+ return wrap(builder().release());
+}
+
+void LLVMOrcDisposeIndirectStubsManager(LLVMOrcIndirectStubsManagerRef ISM) {
+ std::unique_ptr<IndirectStubsManager> TmpISM(unwrap(ISM));
+}
+
+LLVMErrorRef LLVMOrcCreateLocalLazyCallThroughManager(
+ const char *TargetTriple, LLVMOrcExecutionSessionRef ES,
+ LLVMOrcJITTargetAddress ErrorHandlerAddr,
+ LLVMOrcLazyCallThroughManagerRef *Result) {
+ auto LCTM = createLocalLazyCallThroughManager(
+ Triple(TargetTriple), *unwrap(ES), ExecutorAddr(ErrorHandlerAddr));
+
+ if (!LCTM)
+ return wrap(LCTM.takeError());
+ *Result = wrap(LCTM->release());
+ return LLVMErrorSuccess;
+}
+
+void LLVMOrcDisposeLazyCallThroughManager(
+ LLVMOrcLazyCallThroughManagerRef LCM) {
+ std::unique_ptr<LazyCallThroughManager> TmpLCM(unwrap(LCM));
+}
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/RTDyldObjectLinkingLayer.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/RTDyldObjectLinkingLayer.cpp
new file mode 100644
index 000000000000..0d48c3435401
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/RTDyldObjectLinkingLayer.cpp
@@ -0,0 +1,443 @@
+//===-- RTDyldObjectLinkingLayer.cpp - RuntimeDyld backed ORC ObjectLayer -===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/RTDyldObjectLinkingLayer.h"
+#include "llvm/Object/COFF.h"
+
+namespace {
+
+using namespace llvm;
+using namespace llvm::orc;
+
+class JITDylibSearchOrderResolver : public JITSymbolResolver {
+public:
+ JITDylibSearchOrderResolver(MaterializationResponsibility &MR,
+ SymbolDependenceMap &Deps)
+ : MR(MR), Deps(Deps) {}
+
+ void lookup(const LookupSet &Symbols, OnResolvedFunction OnResolved) override {
+ auto &ES = MR.getTargetJITDylib().getExecutionSession();
+ SymbolLookupSet InternedSymbols;
+
+ // Intern the requested symbols: lookup takes interned strings.
+ for (auto &S : Symbols)
+ InternedSymbols.add(ES.intern(S));
+
+ // Build an OnResolve callback to unwrap the interned strings and pass them
+ // to the OnResolved callback.
+ auto OnResolvedWithUnwrap =
+ [OnResolved = std::move(OnResolved)](
+ Expected<SymbolMap> InternedResult) mutable {
+ if (!InternedResult) {
+ OnResolved(InternedResult.takeError());
+ return;
+ }
+
+ LookupResult Result;
+ for (auto &KV : *InternedResult)
+ Result[*KV.first] = {KV.second.getAddress().getValue(),
+ KV.second.getFlags()};
+ OnResolved(Result);
+ };
+
+ JITDylibSearchOrder LinkOrder;
+ MR.getTargetJITDylib().withLinkOrderDo(
+ [&](const JITDylibSearchOrder &LO) { LinkOrder = LO; });
+ ES.lookup(
+ LookupKind::Static, LinkOrder, InternedSymbols, SymbolState::Resolved,
+ std::move(OnResolvedWithUnwrap),
+ [this](const SymbolDependenceMap &LookupDeps) { Deps = LookupDeps; });
+ }
+
+ Expected<LookupSet> getResponsibilitySet(const LookupSet &Symbols) override {
+ LookupSet Result;
+
+ for (auto &KV : MR.getSymbols()) {
+ if (Symbols.count(*KV.first))
+ Result.insert(*KV.first);
+ }
+
+ return Result;
+ }
+
+private:
+ MaterializationResponsibility &MR;
+ SymbolDependenceMap &Deps;
+};
+
+} // end anonymous namespace
+
+namespace llvm {
+namespace orc {
+
+char RTDyldObjectLinkingLayer::ID;
+
+using BaseT = RTTIExtends<RTDyldObjectLinkingLayer, ObjectLayer>;
+
+RTDyldObjectLinkingLayer::RTDyldObjectLinkingLayer(
+ ExecutionSession &ES, GetMemoryManagerFunction GetMemoryManager)
+ : BaseT(ES), GetMemoryManager(std::move(GetMemoryManager)) {
+ ES.registerResourceManager(*this);
+}
+
+RTDyldObjectLinkingLayer::~RTDyldObjectLinkingLayer() {
+ assert(MemMgrs.empty() && "Layer destroyed with resources still attached");
+}
+
+void RTDyldObjectLinkingLayer::emit(
+ std::unique_ptr<MaterializationResponsibility> R,
+ std::unique_ptr<MemoryBuffer> O) {
+ assert(O && "Object must not be null");
+
+ auto &ES = getExecutionSession();
+
+ auto Obj = object::ObjectFile::createObjectFile(*O);
+
+ if (!Obj) {
+ getExecutionSession().reportError(Obj.takeError());
+ R->failMaterialization();
+ return;
+ }
+
+ // Collect the internal symbols from the object file: We will need to
+ // filter these later.
+ auto InternalSymbols = std::make_shared<std::set<StringRef>>();
+ {
+ SymbolFlagsMap ExtraSymbolsToClaim;
+ for (auto &Sym : (*Obj)->symbols()) {
+
+ // Skip file symbols.
+ if (auto SymType = Sym.getType()) {
+ if (*SymType == object::SymbolRef::ST_File)
+ continue;
+ } else {
+ ES.reportError(SymType.takeError());
+ R->failMaterialization();
+ return;
+ }
+
+ Expected<uint32_t> SymFlagsOrErr = Sym.getFlags();
+ if (!SymFlagsOrErr) {
+ // TODO: Test this error.
+ ES.reportError(SymFlagsOrErr.takeError());
+ R->failMaterialization();
+ return;
+ }
+
+ // Try to claim responsibility of weak symbols
+ // if AutoClaimObjectSymbols flag is set.
+ if (AutoClaimObjectSymbols &&
+ (*SymFlagsOrErr & object::BasicSymbolRef::SF_Weak)) {
+ auto SymName = Sym.getName();
+ if (!SymName) {
+ ES.reportError(SymName.takeError());
+ R->failMaterialization();
+ return;
+ }
+
+ // Already included in responsibility set, skip it
+ SymbolStringPtr SymbolName = ES.intern(*SymName);
+ if (R->getSymbols().count(SymbolName))
+ continue;
+
+ auto SymFlags = JITSymbolFlags::fromObjectSymbol(Sym);
+ if (!SymFlags) {
+ ES.reportError(SymFlags.takeError());
+ R->failMaterialization();
+ return;
+ }
+
+ ExtraSymbolsToClaim[SymbolName] = *SymFlags;
+ continue;
+ }
+
+ // Don't include symbols that aren't global.
+ if (!(*SymFlagsOrErr & object::BasicSymbolRef::SF_Global)) {
+ if (auto SymName = Sym.getName())
+ InternalSymbols->insert(*SymName);
+ else {
+ ES.reportError(SymName.takeError());
+ R->failMaterialization();
+ return;
+ }
+ }
+ }
+
+ if (!ExtraSymbolsToClaim.empty()) {
+ if (auto Err = R->defineMaterializing(ExtraSymbolsToClaim)) {
+ ES.reportError(std::move(Err));
+ R->failMaterialization();
+ }
+ }
+ }
+
+ auto MemMgr = GetMemoryManager();
+ auto &MemMgrRef = *MemMgr;
+
+ // Switch to shared ownership of MR so that it can be captured by both
+ // lambdas below.
+ std::shared_ptr<MaterializationResponsibility> SharedR(std::move(R));
+ auto Deps = std::make_unique<SymbolDependenceMap>();
+
+ JITDylibSearchOrderResolver Resolver(*SharedR, *Deps);
+
+ jitLinkForORC(
+ object::OwningBinary<object::ObjectFile>(std::move(*Obj), std::move(O)),
+ MemMgrRef, Resolver, ProcessAllSections,
+ [this, SharedR, &MemMgrRef, InternalSymbols](
+ const object::ObjectFile &Obj,
+ RuntimeDyld::LoadedObjectInfo &LoadedObjInfo,
+ std::map<StringRef, JITEvaluatedSymbol> ResolvedSymbols) {
+ return onObjLoad(*SharedR, Obj, MemMgrRef, LoadedObjInfo,
+ ResolvedSymbols, *InternalSymbols);
+ },
+ [this, SharedR, MemMgr = std::move(MemMgr), Deps = std::move(Deps)](
+ object::OwningBinary<object::ObjectFile> Obj,
+ std::unique_ptr<RuntimeDyld::LoadedObjectInfo> LoadedObjInfo,
+ Error Err) mutable {
+ onObjEmit(*SharedR, std::move(Obj), std::move(MemMgr),
+ std::move(LoadedObjInfo), std::move(Deps), std::move(Err));
+ });
+}
+
+void RTDyldObjectLinkingLayer::registerJITEventListener(JITEventListener &L) {
+ std::lock_guard<std::mutex> Lock(RTDyldLayerMutex);
+ assert(!llvm::is_contained(EventListeners, &L) &&
+ "Listener has already been registered");
+ EventListeners.push_back(&L);
+}
+
+void RTDyldObjectLinkingLayer::unregisterJITEventListener(JITEventListener &L) {
+ std::lock_guard<std::mutex> Lock(RTDyldLayerMutex);
+ auto I = llvm::find(EventListeners, &L);
+ assert(I != EventListeners.end() && "Listener not registered");
+ EventListeners.erase(I);
+}
+
+Error RTDyldObjectLinkingLayer::onObjLoad(
+ MaterializationResponsibility &R, const object::ObjectFile &Obj,
+ RuntimeDyld::MemoryManager &MemMgr,
+ RuntimeDyld::LoadedObjectInfo &LoadedObjInfo,
+ std::map<StringRef, JITEvaluatedSymbol> Resolved,
+ std::set<StringRef> &InternalSymbols) {
+ SymbolFlagsMap ExtraSymbolsToClaim;
+ SymbolMap Symbols;
+
+ // Hack to support COFF constant pool comdats introduced during compilation:
+ // (See http://llvm.org/PR40074)
+ if (auto *COFFObj = dyn_cast<object::COFFObjectFile>(&Obj)) {
+ auto &ES = getExecutionSession();
+
+ // For all resolved symbols that are not already in the responsibility set:
+ // check whether the symbol is in a comdat section and if so mark it as
+ // weak.
+ for (auto &Sym : COFFObj->symbols()) {
+ // getFlags() on COFF symbols can't fail.
+ uint32_t SymFlags = cantFail(Sym.getFlags());
+ if (SymFlags & object::BasicSymbolRef::SF_Undefined)
+ continue;
+ auto Name = Sym.getName();
+ if (!Name)
+ return Name.takeError();
+ auto I = Resolved.find(*Name);
+
+ // Skip unresolved symbols, internal symbols, and symbols that are
+ // already in the responsibility set.
+ if (I == Resolved.end() || InternalSymbols.count(*Name) ||
+ R.getSymbols().count(ES.intern(*Name)))
+ continue;
+ auto Sec = Sym.getSection();
+ if (!Sec)
+ return Sec.takeError();
+ if (*Sec == COFFObj->section_end())
+ continue;
+ auto &COFFSec = *COFFObj->getCOFFSection(**Sec);
+ if (COFFSec.Characteristics & COFF::IMAGE_SCN_LNK_COMDAT)
+ I->second.setFlags(I->second.getFlags() | JITSymbolFlags::Weak);
+ }
+
+ // Handle any aliases.
+ for (auto &Sym : COFFObj->symbols()) {
+ uint32_t SymFlags = cantFail(Sym.getFlags());
+ if (SymFlags & object::BasicSymbolRef::SF_Undefined)
+ continue;
+ auto Name = Sym.getName();
+ if (!Name)
+ return Name.takeError();
+ auto I = Resolved.find(*Name);
+
+ // Skip already-resolved symbols, and symbols that we're not responsible
+ // for.
+ if (I != Resolved.end() || !R.getSymbols().count(ES.intern(*Name)))
+ continue;
+
+ // Skip anything other than weak externals.
+ auto COFFSym = COFFObj->getCOFFSymbol(Sym);
+ if (!COFFSym.isWeakExternal())
+ continue;
+ auto *WeakExternal = COFFSym.getAux<object::coff_aux_weak_external>();
+ if (WeakExternal->Characteristics != COFF::IMAGE_WEAK_EXTERN_SEARCH_ALIAS)
+ continue;
+
+ // We found an alias. Reuse the resolution of the alias target for the
+ // alias itself.
+ Expected<object::COFFSymbolRef> TargetSymbol =
+ COFFObj->getSymbol(WeakExternal->TagIndex);
+ if (!TargetSymbol)
+ return TargetSymbol.takeError();
+ Expected<StringRef> TargetName = COFFObj->getSymbolName(*TargetSymbol);
+ if (!TargetName)
+ return TargetName.takeError();
+ auto J = Resolved.find(*TargetName);
+ if (J == Resolved.end())
+ return make_error<StringError>("Could alias target " + *TargetName +
+ " not resolved",
+ inconvertibleErrorCode());
+ Resolved[*Name] = J->second;
+ }
+ }
+
+ for (auto &KV : Resolved) {
+ // Scan the symbols and add them to the Symbols map for resolution.
+
+ // We never claim internal symbols.
+ if (InternalSymbols.count(KV.first))
+ continue;
+
+ auto InternedName = getExecutionSession().intern(KV.first);
+ auto Flags = KV.second.getFlags();
+ auto I = R.getSymbols().find(InternedName);
+ if (I != R.getSymbols().end()) {
+ // Override object flags and claim responsibility for symbols if
+ // requested.
+ if (OverrideObjectFlags)
+ Flags = I->second;
+ else {
+ // RuntimeDyld/MCJIT's weak tracking isn't compatible with ORC's. Even
+ // if we're not overriding flags in general we should set the weak flag
+ // according to the MaterializationResponsibility object symbol table.
+ if (I->second.isWeak())
+ Flags |= JITSymbolFlags::Weak;
+ }
+ } else if (AutoClaimObjectSymbols)
+ ExtraSymbolsToClaim[InternedName] = Flags;
+
+ Symbols[InternedName] = {ExecutorAddr(KV.second.getAddress()), Flags};
+ }
+
+ if (!ExtraSymbolsToClaim.empty()) {
+ if (auto Err = R.defineMaterializing(ExtraSymbolsToClaim))
+ return Err;
+
+ // If we claimed responsibility for any weak symbols but were rejected then
+ // we need to remove them from the resolved set.
+ for (auto &KV : ExtraSymbolsToClaim)
+ if (KV.second.isWeak() && !R.getSymbols().count(KV.first))
+ Symbols.erase(KV.first);
+ }
+
+ if (auto Err = R.notifyResolved(Symbols)) {
+ R.failMaterialization();
+ return Err;
+ }
+
+ if (NotifyLoaded)
+ NotifyLoaded(R, Obj, LoadedObjInfo);
+
+ return Error::success();
+}
+
+void RTDyldObjectLinkingLayer::onObjEmit(
+ MaterializationResponsibility &R,
+ object::OwningBinary<object::ObjectFile> O,
+ std::unique_ptr<RuntimeDyld::MemoryManager> MemMgr,
+ std::unique_ptr<RuntimeDyld::LoadedObjectInfo> LoadedObjInfo,
+ std::unique_ptr<SymbolDependenceMap> Deps, Error Err) {
+ if (Err) {
+ getExecutionSession().reportError(std::move(Err));
+ R.failMaterialization();
+ return;
+ }
+
+ SymbolDependenceGroup SDG;
+ for (auto &[Sym, Flags] : R.getSymbols())
+ SDG.Symbols.insert(Sym);
+ SDG.Dependencies = std::move(*Deps);
+
+ if (auto Err = R.notifyEmitted(SDG)) {
+ getExecutionSession().reportError(std::move(Err));
+ R.failMaterialization();
+ return;
+ }
+
+ std::unique_ptr<object::ObjectFile> Obj;
+ std::unique_ptr<MemoryBuffer> ObjBuffer;
+ std::tie(Obj, ObjBuffer) = O.takeBinary();
+
+ // Run EventListener notifyLoaded callbacks.
+ {
+ std::lock_guard<std::mutex> Lock(RTDyldLayerMutex);
+ for (auto *L : EventListeners)
+ L->notifyObjectLoaded(pointerToJITTargetAddress(MemMgr.get()), *Obj,
+ *LoadedObjInfo);
+ }
+
+ if (NotifyEmitted)
+ NotifyEmitted(R, std::move(ObjBuffer));
+
+ if (auto Err = R.withResourceKeyDo(
+ [&](ResourceKey K) { MemMgrs[K].push_back(std::move(MemMgr)); })) {
+ getExecutionSession().reportError(std::move(Err));
+ R.failMaterialization();
+ }
+}
+
+Error RTDyldObjectLinkingLayer::handleRemoveResources(JITDylib &JD,
+ ResourceKey K) {
+
+ std::vector<MemoryManagerUP> MemMgrsToRemove;
+
+ getExecutionSession().runSessionLocked([&] {
+ auto I = MemMgrs.find(K);
+ if (I != MemMgrs.end()) {
+ std::swap(MemMgrsToRemove, I->second);
+ MemMgrs.erase(I);
+ }
+ });
+
+ {
+ std::lock_guard<std::mutex> Lock(RTDyldLayerMutex);
+ for (auto &MemMgr : MemMgrsToRemove) {
+ for (auto *L : EventListeners)
+ L->notifyFreeingObject(pointerToJITTargetAddress(MemMgr.get()));
+ MemMgr->deregisterEHFrames();
+ }
+ }
+
+ return Error::success();
+}
+
+void RTDyldObjectLinkingLayer::handleTransferResources(JITDylib &JD,
+ ResourceKey DstKey,
+ ResourceKey SrcKey) {
+ auto I = MemMgrs.find(SrcKey);
+ if (I != MemMgrs.end()) {
+ auto &SrcMemMgrs = I->second;
+ auto &DstMemMgrs = MemMgrs[DstKey];
+ DstMemMgrs.reserve(DstMemMgrs.size() + SrcMemMgrs.size());
+ for (auto &MemMgr : SrcMemMgrs)
+ DstMemMgrs.push_back(std::move(MemMgr));
+
+ // Erase SrcKey entry using value rather than iterator I: I may have been
+ // invalidated when we looked up DstKey.
+ MemMgrs.erase(SrcKey);
+ }
+}
+
+} // End namespace orc.
+} // End namespace llvm.
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/SectCreate.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/SectCreate.cpp
new file mode 100644
index 000000000000..4f28b8bda529
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/SectCreate.cpp
@@ -0,0 +1,52 @@
+//===--------- SectCreate.cpp - Emulate ld64's -sectcreate option ---------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/SectCreate.h"
+
+#define DEBUG_TYPE "orc"
+
+using namespace llvm::jitlink;
+
+namespace llvm::orc {
+
+void SectCreateMaterializationUnit::materialize(
+ std::unique_ptr<MaterializationResponsibility> R) {
+ auto G = std::make_unique<LinkGraph>(
+ "orc_sectcreate_" + SectName,
+ ObjLinkingLayer.getExecutionSession().getTargetTriple(),
+ getGenericEdgeKindName);
+
+ auto &Sect = G->createSection(SectName, MP);
+ auto Content = G->allocateContent(
+ ArrayRef<char>(Data->getBuffer().data(), Data->getBuffer().size()));
+ auto &B = G->createContentBlock(Sect, Content, ExecutorAddr(), Alignment, 0);
+
+ for (auto &[Name, Info] : ExtraSymbols) {
+ auto L = Info.Flags.isStrong() ? Linkage::Strong : Linkage::Weak;
+ auto S = Info.Flags.isExported() ? Scope::Default : Scope::Hidden;
+ G->addDefinedSymbol(B, Info.Offset, *Name, 0, L, S, Info.Flags.isCallable(),
+ true);
+ }
+
+ ObjLinkingLayer.emit(std::move(R), std::move(G));
+}
+
+void SectCreateMaterializationUnit::discard(const JITDylib &JD,
+ const SymbolStringPtr &Name) {
+ ExtraSymbols.erase(Name);
+}
+
+MaterializationUnit::Interface SectCreateMaterializationUnit::getInterface(
+ const ExtraSymbolsMap &ExtraSymbols) {
+ SymbolFlagsMap SymbolFlags;
+ for (auto &[Name, Info] : ExtraSymbols)
+ SymbolFlags[Name] = Info.Flags;
+ return {std::move(SymbolFlags), nullptr};
+}
+
+} // End namespace llvm::orc.
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/Shared/AllocationActions.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/Shared/AllocationActions.cpp
new file mode 100644
index 000000000000..91f2899449ef
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/Shared/AllocationActions.cpp
@@ -0,0 +1,44 @@
+//===----- AllocationActions.gpp -- JITLink allocation support calls -----===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/Shared/AllocationActions.h"
+
+namespace llvm {
+namespace orc {
+namespace shared {
+
+Expected<std::vector<WrapperFunctionCall>>
+runFinalizeActions(AllocActions &AAs) {
+ std::vector<WrapperFunctionCall> DeallocActions;
+ DeallocActions.reserve(numDeallocActions(AAs));
+
+ for (auto &AA : AAs) {
+ if (AA.Finalize)
+ if (auto Err = AA.Finalize.runWithSPSRetErrorMerged())
+ return joinErrors(std::move(Err), runDeallocActions(DeallocActions));
+
+ if (AA.Dealloc)
+ DeallocActions.push_back(std::move(AA.Dealloc));
+ }
+
+ AAs.clear();
+ return DeallocActions;
+}
+
+Error runDeallocActions(ArrayRef<WrapperFunctionCall> DAs) {
+ Error Err = Error::success();
+ while (!DAs.empty()) {
+ Err = joinErrors(std::move(Err), DAs.back().runWithSPSRetErrorMerged());
+ DAs = DAs.drop_back();
+ }
+ return Err;
+}
+
+} // namespace shared
+} // namespace orc
+} // namespace llvm
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/Shared/ObjectFormats.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/Shared/ObjectFormats.cpp
new file mode 100644
index 000000000000..f94f4832c540
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/Shared/ObjectFormats.cpp
@@ -0,0 +1,113 @@
+//===---------- ObjectFormats.cpp - Object format details for ORC ---------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// ORC-specific object format details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/Shared/ObjectFormats.h"
+
+namespace llvm {
+namespace orc {
+
+StringRef MachODataCommonSectionName = "__DATA,__common";
+StringRef MachODataDataSectionName = "__DATA,__data";
+StringRef MachOEHFrameSectionName = "__TEXT,__eh_frame";
+StringRef MachOCompactUnwindInfoSectionName = "__TEXT,__unwind_info";
+StringRef MachOCStringSectionName = "__TEXT,__cstring";
+StringRef MachOModInitFuncSectionName = "__DATA,__mod_init_func";
+StringRef MachOObjCCatListSectionName = "__DATA,__objc_catlist";
+StringRef MachOObjCCatList2SectionName = "__DATA,__objc_catlist2";
+StringRef MachOObjCClassListSectionName = "__DATA,__objc_classlist";
+StringRef MachOObjCClassNameSectionName = "__TEXT,__objc_classname";
+StringRef MachOObjCClassRefsSectionName = "__DATA,__objc_classrefs";
+StringRef MachOObjCConstSectionName = "__DATA,__objc_const";
+StringRef MachOObjCDataSectionName = "__DATA,__objc_data";
+StringRef MachOObjCImageInfoSectionName = "__DATA,__objc_imageinfo";
+StringRef MachOObjCMethNameSectionName = "__TEXT,__objc_methname";
+StringRef MachOObjCMethTypeSectionName = "__TEXT,__objc_methtype";
+StringRef MachOObjCNLCatListSectionName = "__DATA,__objc_nlcatlist";
+StringRef MachOObjCNLClassListSectionName = "__DATA,__objc_nlclslist";
+StringRef MachOObjCProtoListSectionName = "__DATA,__objc_protolist";
+StringRef MachOObjCProtoRefsSectionName = "__DATA,__objc_protorefs";
+StringRef MachOObjCSelRefsSectionName = "__DATA,__objc_selrefs";
+StringRef MachOSwift5ProtoSectionName = "__TEXT,__swift5_proto";
+StringRef MachOSwift5ProtosSectionName = "__TEXT,__swift5_protos";
+StringRef MachOSwift5TypesSectionName = "__TEXT,__swift5_types";
+StringRef MachOSwift5TypeRefSectionName = "__TEXT,__swift5_typeref";
+StringRef MachOSwift5FieldMetadataSectionName = "__TEXT,__swift5_fieldmd";
+StringRef MachOSwift5EntrySectionName = "__TEXT,__swift5_entry";
+StringRef MachOThreadBSSSectionName = "__DATA,__thread_bss";
+StringRef MachOThreadDataSectionName = "__DATA,__thread_data";
+StringRef MachOThreadVarsSectionName = "__DATA,__thread_vars";
+
+StringRef MachOInitSectionNames[22] = {
+ MachOModInitFuncSectionName, MachOObjCCatListSectionName,
+ MachOObjCCatList2SectionName, MachOObjCClassListSectionName,
+ MachOObjCClassNameSectionName, MachOObjCClassRefsSectionName,
+ MachOObjCConstSectionName, MachOObjCDataSectionName,
+ MachOObjCImageInfoSectionName, MachOObjCMethNameSectionName,
+ MachOObjCMethTypeSectionName, MachOObjCNLCatListSectionName,
+ MachOObjCNLClassListSectionName, MachOObjCProtoListSectionName,
+ MachOObjCProtoRefsSectionName, MachOObjCSelRefsSectionName,
+ MachOSwift5ProtoSectionName, MachOSwift5ProtosSectionName,
+ MachOSwift5TypesSectionName, MachOSwift5TypeRefSectionName,
+ MachOSwift5FieldMetadataSectionName, MachOSwift5EntrySectionName,
+};
+
+StringRef ELFEHFrameSectionName = ".eh_frame";
+
+StringRef ELFInitArrayFuncSectionName = ".init_array";
+StringRef ELFInitFuncSectionName = ".init";
+StringRef ELFFiniArrayFuncSectionName = ".fini_array";
+StringRef ELFFiniFuncSectionName = ".fini";
+StringRef ELFCtorArrayFuncSectionName = ".ctors";
+StringRef ELFDtorArrayFuncSectionName = ".dtors";
+
+StringRef ELFInitSectionNames[3]{
+ ELFInitArrayFuncSectionName,
+ ELFInitFuncSectionName,
+ ELFCtorArrayFuncSectionName,
+};
+
+StringRef ELFThreadBSSSectionName = ".tbss";
+StringRef ELFThreadDataSectionName = ".tdata";
+
+bool isMachOInitializerSection(StringRef SegName, StringRef SecName) {
+ for (auto &InitSection : MachOInitSectionNames) {
+ // Loop below assumes all MachO init sectios have a length-6
+ // segment name.
+ assert(InitSection[6] == ',' && "Init section seg name has length != 6");
+ if (InitSection.starts_with(SegName) && InitSection.substr(7) == SecName)
+ return true;
+ }
+ return false;
+}
+
+bool isMachOInitializerSection(StringRef QualifiedName) {
+ for (auto &InitSection : MachOInitSectionNames)
+ if (InitSection == QualifiedName)
+ return true;
+ return false;
+}
+
+bool isELFInitializerSection(StringRef SecName) {
+ for (StringRef InitSection : ELFInitSectionNames) {
+ StringRef Name = SecName;
+ if (Name.consume_front(InitSection) && (Name.empty() || Name[0] == '.'))
+ return true;
+ }
+ return false;
+}
+
+bool isCOFFInitializerSection(StringRef SecName) {
+ return SecName.starts_with(".CRT");
+}
+
+} // namespace orc
+} // namespace llvm
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/Shared/OrcError.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/Shared/OrcError.cpp
new file mode 100644
index 000000000000..ec53338570db
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/Shared/OrcError.cpp
@@ -0,0 +1,122 @@
+//===---------------- OrcError.cpp - Error codes for ORC ------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Error codes for ORC.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/Shared/OrcError.h"
+#include "llvm/Support/ErrorHandling.h"
+
+#include <type_traits>
+
+using namespace llvm;
+using namespace llvm::orc;
+
+namespace {
+
+// FIXME: This class is only here to support the transition to llvm::Error. It
+// will be removed once this transition is complete. Clients should prefer to
+// deal with the Error value directly, rather than converting to error_code.
+class OrcErrorCategory : public std::error_category {
+public:
+ const char *name() const noexcept override { return "orc"; }
+
+ std::string message(int condition) const override {
+ switch (static_cast<OrcErrorCode>(condition)) {
+ case OrcErrorCode::UnknownORCError:
+ return "Unknown ORC error";
+ case OrcErrorCode::DuplicateDefinition:
+ return "Duplicate symbol definition";
+ case OrcErrorCode::JITSymbolNotFound:
+ return "JIT symbol not found";
+ case OrcErrorCode::RemoteAllocatorDoesNotExist:
+ return "Remote allocator does not exist";
+ case OrcErrorCode::RemoteAllocatorIdAlreadyInUse:
+ return "Remote allocator Id already in use";
+ case OrcErrorCode::RemoteMProtectAddrUnrecognized:
+ return "Remote mprotect call references unallocated memory";
+ case OrcErrorCode::RemoteIndirectStubsOwnerDoesNotExist:
+ return "Remote indirect stubs owner does not exist";
+ case OrcErrorCode::RemoteIndirectStubsOwnerIdAlreadyInUse:
+ return "Remote indirect stubs owner Id already in use";
+ case OrcErrorCode::RPCConnectionClosed:
+ return "RPC connection closed";
+ case OrcErrorCode::RPCCouldNotNegotiateFunction:
+ return "Could not negotiate RPC function";
+ case OrcErrorCode::RPCResponseAbandoned:
+ return "RPC response abandoned";
+ case OrcErrorCode::UnexpectedRPCCall:
+ return "Unexpected RPC call";
+ case OrcErrorCode::UnexpectedRPCResponse:
+ return "Unexpected RPC response";
+ case OrcErrorCode::UnknownErrorCodeFromRemote:
+ return "Unknown error returned from remote RPC function "
+ "(Use StringError to get error message)";
+ case OrcErrorCode::UnknownResourceHandle:
+ return "Unknown resource handle";
+ case OrcErrorCode::MissingSymbolDefinitions:
+ return "MissingSymbolsDefinitions";
+ case OrcErrorCode::UnexpectedSymbolDefinitions:
+ return "UnexpectedSymbolDefinitions";
+ }
+ llvm_unreachable("Unhandled error code");
+ }
+};
+
+OrcErrorCategory &getOrcErrCat() {
+ static OrcErrorCategory OrcErrCat;
+ return OrcErrCat;
+}
+} // namespace
+
+namespace llvm {
+namespace orc {
+
+char DuplicateDefinition::ID = 0;
+char JITSymbolNotFound::ID = 0;
+
+std::error_code orcError(OrcErrorCode ErrCode) {
+ typedef std::underlying_type_t<OrcErrorCode> UT;
+ return std::error_code(static_cast<UT>(ErrCode), getOrcErrCat());
+}
+
+DuplicateDefinition::DuplicateDefinition(std::string SymbolName)
+ : SymbolName(std::move(SymbolName)) {}
+
+std::error_code DuplicateDefinition::convertToErrorCode() const {
+ return orcError(OrcErrorCode::DuplicateDefinition);
+}
+
+void DuplicateDefinition::log(raw_ostream &OS) const {
+ OS << "Duplicate definition of symbol '" << SymbolName << "'";
+}
+
+const std::string &DuplicateDefinition::getSymbolName() const {
+ return SymbolName;
+}
+
+JITSymbolNotFound::JITSymbolNotFound(std::string SymbolName)
+ : SymbolName(std::move(SymbolName)) {}
+
+std::error_code JITSymbolNotFound::convertToErrorCode() const {
+ typedef std::underlying_type_t<OrcErrorCode> UT;
+ return std::error_code(static_cast<UT>(OrcErrorCode::JITSymbolNotFound),
+ getOrcErrCat());
+}
+
+void JITSymbolNotFound::log(raw_ostream &OS) const {
+ OS << "Could not find symbol '" << SymbolName << "'";
+}
+
+const std::string &JITSymbolNotFound::getSymbolName() const {
+ return SymbolName;
+}
+
+} // namespace orc
+} // namespace llvm
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/Shared/OrcRTBridge.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/Shared/OrcRTBridge.cpp
new file mode 100644
index 000000000000..ae39b1d1bfaa
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/Shared/OrcRTBridge.cpp
@@ -0,0 +1,66 @@
+//===------ OrcRTBridge.cpp - Executor functions for bootstrap -----===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/Shared/OrcRTBridge.h"
+
+namespace llvm {
+namespace orc {
+namespace rt {
+
+const char *SimpleExecutorDylibManagerInstanceName =
+ "__llvm_orc_SimpleExecutorDylibManager_Instance";
+const char *SimpleExecutorDylibManagerOpenWrapperName =
+ "__llvm_orc_SimpleExecutorDylibManager_open_wrapper";
+const char *SimpleExecutorDylibManagerLookupWrapperName =
+ "__llvm_orc_SimpleExecutorDylibManager_lookup_wrapper";
+
+const char *SimpleExecutorMemoryManagerInstanceName =
+ "__llvm_orc_SimpleExecutorMemoryManager_Instance";
+const char *SimpleExecutorMemoryManagerReserveWrapperName =
+ "__llvm_orc_SimpleExecutorMemoryManager_reserve_wrapper";
+const char *SimpleExecutorMemoryManagerFinalizeWrapperName =
+ "__llvm_orc_SimpleExecutorMemoryManager_finalize_wrapper";
+const char *SimpleExecutorMemoryManagerDeallocateWrapperName =
+ "__llvm_orc_SimpleExecutorMemoryManager_deallocate_wrapper";
+
+const char *ExecutorSharedMemoryMapperServiceInstanceName =
+ "__llvm_orc_ExecutorSharedMemoryMapperService_Instance";
+const char *ExecutorSharedMemoryMapperServiceReserveWrapperName =
+ "__llvm_orc_ExecutorSharedMemoryMapperService_Reserve";
+const char *ExecutorSharedMemoryMapperServiceInitializeWrapperName =
+ "__llvm_orc_ExecutorSharedMemoryMapperService_Initialize";
+const char *ExecutorSharedMemoryMapperServiceDeinitializeWrapperName =
+ "__llvm_orc_ExecutorSharedMemoryMapperService_Deinitialize";
+const char *ExecutorSharedMemoryMapperServiceReleaseWrapperName =
+ "__llvm_orc_ExecutorSharedMemoryMapperService_Release";
+
+const char *MemoryWriteUInt8sWrapperName =
+ "__llvm_orc_bootstrap_mem_write_uint8s_wrapper";
+const char *MemoryWriteUInt16sWrapperName =
+ "__llvm_orc_bootstrap_mem_write_uint16s_wrapper";
+const char *MemoryWriteUInt32sWrapperName =
+ "__llvm_orc_bootstrap_mem_write_uint32s_wrapper";
+const char *MemoryWriteUInt64sWrapperName =
+ "__llvm_orc_bootstrap_mem_write_uint64s_wrapper";
+const char *MemoryWriteBuffersWrapperName =
+ "__llvm_orc_bootstrap_mem_write_buffers_wrapper";
+
+const char *RegisterEHFrameSectionWrapperName =
+ "llvm_orc_registerEHFrameSectionWrapper";
+const char *DeregisterEHFrameSectionWrapperName =
+ "llvm_orc_deregisterEHFrameSectionWrapper";
+
+const char *RunAsMainWrapperName = "__llvm_orc_bootstrap_run_as_main_wrapper";
+const char *RunAsVoidFunctionWrapperName =
+ "__llvm_orc_bootstrap_run_as_void_function_wrapper";
+const char *RunAsIntFunctionWrapperName =
+ "__llvm_orc_bootstrap_run_as_int_function_wrapper";
+
+} // end namespace rt
+} // end namespace orc
+} // end namespace llvm
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/Shared/SimpleRemoteEPCUtils.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/Shared/SimpleRemoteEPCUtils.cpp
new file mode 100644
index 000000000000..921ac47d421d
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/Shared/SimpleRemoteEPCUtils.cpp
@@ -0,0 +1,250 @@
+//===------ SimpleRemoteEPCUtils.cpp - Utils for Simple Remote EPC --------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Message definitions and other utilities for SimpleRemoteEPC and
+// SimpleRemoteEPCServer.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/Shared/SimpleRemoteEPCUtils.h"
+#include "llvm/Support/Endian.h"
+#include "llvm/Support/FormatVariadic.h"
+
+#if !defined(_MSC_VER) && !defined(__MINGW32__)
+#include <unistd.h>
+#else
+#include <io.h>
+#endif
+
+namespace {
+
+struct FDMsgHeader {
+ static constexpr unsigned MsgSizeOffset = 0;
+ static constexpr unsigned OpCOffset = MsgSizeOffset + sizeof(uint64_t);
+ static constexpr unsigned SeqNoOffset = OpCOffset + sizeof(uint64_t);
+ static constexpr unsigned TagAddrOffset = SeqNoOffset + sizeof(uint64_t);
+ static constexpr unsigned Size = TagAddrOffset + sizeof(uint64_t);
+};
+
+} // namespace
+
+namespace llvm {
+namespace orc {
+namespace SimpleRemoteEPCDefaultBootstrapSymbolNames {
+
+const char *ExecutorSessionObjectName =
+ "__llvm_orc_SimpleRemoteEPC_dispatch_ctx";
+const char *DispatchFnName = "__llvm_orc_SimpleRemoteEPC_dispatch_fn";
+
+} // end namespace SimpleRemoteEPCDefaultBootstrapSymbolNames
+
+SimpleRemoteEPCTransportClient::~SimpleRemoteEPCTransportClient() = default;
+SimpleRemoteEPCTransport::~SimpleRemoteEPCTransport() = default;
+
+Expected<std::unique_ptr<FDSimpleRemoteEPCTransport>>
+FDSimpleRemoteEPCTransport::Create(SimpleRemoteEPCTransportClient &C, int InFD,
+ int OutFD) {
+#if LLVM_ENABLE_THREADS
+ if (InFD == -1)
+ return make_error<StringError>("Invalid input file descriptor " +
+ Twine(InFD),
+ inconvertibleErrorCode());
+ if (OutFD == -1)
+ return make_error<StringError>("Invalid output file descriptor " +
+ Twine(OutFD),
+ inconvertibleErrorCode());
+ std::unique_ptr<FDSimpleRemoteEPCTransport> FDT(
+ new FDSimpleRemoteEPCTransport(C, InFD, OutFD));
+ return std::move(FDT);
+#else
+ return make_error<StringError>("FD-based SimpleRemoteEPC transport requires "
+ "thread support, but llvm was built with "
+ "LLVM_ENABLE_THREADS=Off",
+ inconvertibleErrorCode());
+#endif
+}
+
+FDSimpleRemoteEPCTransport::~FDSimpleRemoteEPCTransport() {
+#if LLVM_ENABLE_THREADS
+ ListenerThread.join();
+#endif
+}
+
+Error FDSimpleRemoteEPCTransport::start() {
+#if LLVM_ENABLE_THREADS
+ ListenerThread = std::thread([this]() { listenLoop(); });
+ return Error::success();
+#endif
+ llvm_unreachable("Should not be called with LLVM_ENABLE_THREADS=Off");
+}
+
+Error FDSimpleRemoteEPCTransport::sendMessage(SimpleRemoteEPCOpcode OpC,
+ uint64_t SeqNo,
+ ExecutorAddr TagAddr,
+ ArrayRef<char> ArgBytes) {
+ char HeaderBuffer[FDMsgHeader::Size];
+
+ *((support::ulittle64_t *)(HeaderBuffer + FDMsgHeader::MsgSizeOffset)) =
+ FDMsgHeader::Size + ArgBytes.size();
+ *((support::ulittle64_t *)(HeaderBuffer + FDMsgHeader::OpCOffset)) =
+ static_cast<uint64_t>(OpC);
+ *((support::ulittle64_t *)(HeaderBuffer + FDMsgHeader::SeqNoOffset)) = SeqNo;
+ *((support::ulittle64_t *)(HeaderBuffer + FDMsgHeader::TagAddrOffset)) =
+ TagAddr.getValue();
+
+ std::lock_guard<std::mutex> Lock(M);
+ if (Disconnected)
+ return make_error<StringError>("FD-transport disconnected",
+ inconvertibleErrorCode());
+ if (int ErrNo = writeBytes(HeaderBuffer, FDMsgHeader::Size))
+ return errorCodeToError(std::error_code(ErrNo, std::generic_category()));
+ if (int ErrNo = writeBytes(ArgBytes.data(), ArgBytes.size()))
+ return errorCodeToError(std::error_code(ErrNo, std::generic_category()));
+ return Error::success();
+}
+
+void FDSimpleRemoteEPCTransport::disconnect() {
+ if (Disconnected)
+ return; // Return if already disconnected.
+
+ Disconnected = true;
+ bool CloseOutFD = InFD != OutFD;
+
+ // Close InFD.
+ while (close(InFD) == -1) {
+ if (errno == EBADF)
+ break;
+ }
+
+ // Close OutFD.
+ if (CloseOutFD) {
+ while (close(OutFD) == -1) {
+ if (errno == EBADF)
+ break;
+ }
+ }
+}
+
+static Error makeUnexpectedEOFError() {
+ return make_error<StringError>("Unexpected end-of-file",
+ inconvertibleErrorCode());
+}
+
+Error FDSimpleRemoteEPCTransport::readBytes(char *Dst, size_t Size,
+ bool *IsEOF) {
+ assert((Size == 0 || Dst) && "Attempt to read into null.");
+ ssize_t Completed = 0;
+ while (Completed < static_cast<ssize_t>(Size)) {
+ ssize_t Read = ::read(InFD, Dst + Completed, Size - Completed);
+ if (Read <= 0) {
+ auto ErrNo = errno;
+ if (Read == 0) {
+ if (Completed == 0 && IsEOF) {
+ *IsEOF = true;
+ return Error::success();
+ } else
+ return makeUnexpectedEOFError();
+ } else if (ErrNo == EAGAIN || ErrNo == EINTR)
+ continue;
+ else {
+ std::lock_guard<std::mutex> Lock(M);
+ if (Disconnected && IsEOF) { // disconnect called, pretend this is EOF.
+ *IsEOF = true;
+ return Error::success();
+ }
+ return errorCodeToError(
+ std::error_code(ErrNo, std::generic_category()));
+ }
+ }
+ Completed += Read;
+ }
+ return Error::success();
+}
+
+int FDSimpleRemoteEPCTransport::writeBytes(const char *Src, size_t Size) {
+ assert((Size == 0 || Src) && "Attempt to append from null.");
+ ssize_t Completed = 0;
+ while (Completed < static_cast<ssize_t>(Size)) {
+ ssize_t Written = ::write(OutFD, Src + Completed, Size - Completed);
+ if (Written < 0) {
+ auto ErrNo = errno;
+ if (ErrNo == EAGAIN || ErrNo == EINTR)
+ continue;
+ else
+ return ErrNo;
+ }
+ Completed += Written;
+ }
+ return 0;
+}
+
+void FDSimpleRemoteEPCTransport::listenLoop() {
+ Error Err = Error::success();
+ do {
+
+ char HeaderBuffer[FDMsgHeader::Size];
+ // Read the header buffer.
+ {
+ bool IsEOF = false;
+ if (auto Err2 = readBytes(HeaderBuffer, FDMsgHeader::Size, &IsEOF)) {
+ Err = joinErrors(std::move(Err), std::move(Err2));
+ break;
+ }
+ if (IsEOF)
+ break;
+ }
+
+ // Decode header buffer.
+ uint64_t MsgSize;
+ SimpleRemoteEPCOpcode OpC;
+ uint64_t SeqNo;
+ ExecutorAddr TagAddr;
+
+ MsgSize =
+ *((support::ulittle64_t *)(HeaderBuffer + FDMsgHeader::MsgSizeOffset));
+ OpC = static_cast<SimpleRemoteEPCOpcode>(static_cast<uint64_t>(
+ *((support::ulittle64_t *)(HeaderBuffer + FDMsgHeader::OpCOffset))));
+ SeqNo =
+ *((support::ulittle64_t *)(HeaderBuffer + FDMsgHeader::SeqNoOffset));
+ TagAddr.setValue(
+ *((support::ulittle64_t *)(HeaderBuffer + FDMsgHeader::TagAddrOffset)));
+
+ if (MsgSize < FDMsgHeader::Size) {
+ Err = joinErrors(std::move(Err),
+ make_error<StringError>("Message size too small",
+ inconvertibleErrorCode()));
+ break;
+ }
+
+ // Read the argument bytes.
+ SimpleRemoteEPCArgBytesVector ArgBytes;
+ ArgBytes.resize(MsgSize - FDMsgHeader::Size);
+ if (auto Err2 = readBytes(ArgBytes.data(), ArgBytes.size())) {
+ Err = joinErrors(std::move(Err), std::move(Err2));
+ break;
+ }
+
+ if (auto Action = C.handleMessage(OpC, SeqNo, TagAddr, ArgBytes)) {
+ if (*Action == SimpleRemoteEPCTransportClient::EndSession)
+ break;
+ } else {
+ Err = joinErrors(std::move(Err), Action.takeError());
+ break;
+ }
+ } while (true);
+
+ // Attempt to close FDs, set Disconnected to true so that subsequent
+ // sendMessage calls fail.
+ disconnect();
+
+ // Call up to the client to handle the disconnection.
+ C.handleDisconnect(std::move(Err));
+}
+
+} // end namespace orc
+} // end namespace llvm
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/SimpleRemoteEPC.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/SimpleRemoteEPC.cpp
new file mode 100644
index 000000000000..a81019cb1dab
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/SimpleRemoteEPC.cpp
@@ -0,0 +1,446 @@
+//===------- SimpleRemoteEPC.cpp -- Simple remote executor control --------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/SimpleRemoteEPC.h"
+#include "llvm/ExecutionEngine/Orc/EPCGenericJITLinkMemoryManager.h"
+#include "llvm/ExecutionEngine/Orc/EPCGenericMemoryAccess.h"
+#include "llvm/ExecutionEngine/Orc/Shared/OrcRTBridge.h"
+#include "llvm/Support/FormatVariadic.h"
+
+#define DEBUG_TYPE "orc"
+
+namespace llvm {
+namespace orc {
+
+SimpleRemoteEPC::~SimpleRemoteEPC() {
+#ifndef NDEBUG
+ std::lock_guard<std::mutex> Lock(SimpleRemoteEPCMutex);
+ assert(Disconnected && "Destroyed without disconnection");
+#endif // NDEBUG
+}
+
+Expected<tpctypes::DylibHandle>
+SimpleRemoteEPC::loadDylib(const char *DylibPath) {
+ return DylibMgr->open(DylibPath, 0);
+}
+
+/// Async helper to chain together calls to DylibMgr::lookupAsync to fulfill all
+/// all the requests.
+/// FIXME: The dylib manager should support multiple LookupRequests natively.
+static void
+lookupSymbolsAsyncHelper(EPCGenericDylibManager &DylibMgr,
+ ArrayRef<SimpleRemoteEPC::LookupRequest> Request,
+ std::vector<tpctypes::LookupResult> Result,
+ SimpleRemoteEPC::SymbolLookupCompleteFn Complete) {
+ if (Request.empty())
+ return Complete(std::move(Result));
+
+ auto &Element = Request.front();
+ DylibMgr.lookupAsync(Element.Handle, Element.Symbols,
+ [&DylibMgr, Request, Complete = std::move(Complete),
+ Result = std::move(Result)](auto R) mutable {
+ if (!R)
+ return Complete(R.takeError());
+ Result.push_back({});
+ Result.back().reserve(R->size());
+ for (auto Addr : *R)
+ Result.back().push_back(Addr);
+
+ lookupSymbolsAsyncHelper(
+ DylibMgr, Request.drop_front(), std::move(Result),
+ std::move(Complete));
+ });
+}
+
+void SimpleRemoteEPC::lookupSymbolsAsync(ArrayRef<LookupRequest> Request,
+ SymbolLookupCompleteFn Complete) {
+ lookupSymbolsAsyncHelper(*DylibMgr, Request, {}, std::move(Complete));
+}
+
+Expected<int32_t> SimpleRemoteEPC::runAsMain(ExecutorAddr MainFnAddr,
+ ArrayRef<std::string> Args) {
+ int64_t Result = 0;
+ if (auto Err = callSPSWrapper<rt::SPSRunAsMainSignature>(
+ RunAsMainAddr, Result, MainFnAddr, Args))
+ return std::move(Err);
+ return Result;
+}
+
+Expected<int32_t> SimpleRemoteEPC::runAsVoidFunction(ExecutorAddr VoidFnAddr) {
+ int32_t Result = 0;
+ if (auto Err = callSPSWrapper<rt::SPSRunAsVoidFunctionSignature>(
+ RunAsVoidFunctionAddr, Result, VoidFnAddr))
+ return std::move(Err);
+ return Result;
+}
+
+Expected<int32_t> SimpleRemoteEPC::runAsIntFunction(ExecutorAddr IntFnAddr,
+ int Arg) {
+ int32_t Result = 0;
+ if (auto Err = callSPSWrapper<rt::SPSRunAsIntFunctionSignature>(
+ RunAsIntFunctionAddr, Result, IntFnAddr, Arg))
+ return std::move(Err);
+ return Result;
+}
+
+void SimpleRemoteEPC::callWrapperAsync(ExecutorAddr WrapperFnAddr,
+ IncomingWFRHandler OnComplete,
+ ArrayRef<char> ArgBuffer) {
+ uint64_t SeqNo;
+ {
+ std::lock_guard<std::mutex> Lock(SimpleRemoteEPCMutex);
+ SeqNo = getNextSeqNo();
+ assert(!PendingCallWrapperResults.count(SeqNo) && "SeqNo already in use");
+ PendingCallWrapperResults[SeqNo] = std::move(OnComplete);
+ }
+
+ if (auto Err = sendMessage(SimpleRemoteEPCOpcode::CallWrapper, SeqNo,
+ WrapperFnAddr, ArgBuffer)) {
+ IncomingWFRHandler H;
+
+ // We just registered OnComplete, but there may be a race between this
+ // thread returning from sendMessage and handleDisconnect being called from
+ // the transport's listener thread. If handleDisconnect gets there first
+ // then it will have failed 'H' for us. If we get there first (or if
+ // handleDisconnect already ran) then we need to take care of it.
+ {
+ std::lock_guard<std::mutex> Lock(SimpleRemoteEPCMutex);
+ auto I = PendingCallWrapperResults.find(SeqNo);
+ if (I != PendingCallWrapperResults.end()) {
+ H = std::move(I->second);
+ PendingCallWrapperResults.erase(I);
+ }
+ }
+
+ if (H)
+ H(shared::WrapperFunctionResult::createOutOfBandError("disconnecting"));
+
+ getExecutionSession().reportError(std::move(Err));
+ }
+}
+
+Error SimpleRemoteEPC::disconnect() {
+ T->disconnect();
+ D->shutdown();
+ std::unique_lock<std::mutex> Lock(SimpleRemoteEPCMutex);
+ DisconnectCV.wait(Lock, [this] { return Disconnected; });
+ return std::move(DisconnectErr);
+}
+
+Expected<SimpleRemoteEPCTransportClient::HandleMessageAction>
+SimpleRemoteEPC::handleMessage(SimpleRemoteEPCOpcode OpC, uint64_t SeqNo,
+ ExecutorAddr TagAddr,
+ SimpleRemoteEPCArgBytesVector ArgBytes) {
+
+ LLVM_DEBUG({
+ dbgs() << "SimpleRemoteEPC::handleMessage: opc = ";
+ switch (OpC) {
+ case SimpleRemoteEPCOpcode::Setup:
+ dbgs() << "Setup";
+ assert(SeqNo == 0 && "Non-zero SeqNo for Setup?");
+ assert(!TagAddr && "Non-zero TagAddr for Setup?");
+ break;
+ case SimpleRemoteEPCOpcode::Hangup:
+ dbgs() << "Hangup";
+ assert(SeqNo == 0 && "Non-zero SeqNo for Hangup?");
+ assert(!TagAddr && "Non-zero TagAddr for Hangup?");
+ break;
+ case SimpleRemoteEPCOpcode::Result:
+ dbgs() << "Result";
+ assert(!TagAddr && "Non-zero TagAddr for Result?");
+ break;
+ case SimpleRemoteEPCOpcode::CallWrapper:
+ dbgs() << "CallWrapper";
+ break;
+ }
+ dbgs() << ", seqno = " << SeqNo << ", tag-addr = " << TagAddr
+ << ", arg-buffer = " << formatv("{0:x}", ArgBytes.size())
+ << " bytes\n";
+ });
+
+ using UT = std::underlying_type_t<SimpleRemoteEPCOpcode>;
+ if (static_cast<UT>(OpC) > static_cast<UT>(SimpleRemoteEPCOpcode::LastOpC))
+ return make_error<StringError>("Unexpected opcode",
+ inconvertibleErrorCode());
+
+ switch (OpC) {
+ case SimpleRemoteEPCOpcode::Setup:
+ if (auto Err = handleSetup(SeqNo, TagAddr, std::move(ArgBytes)))
+ return std::move(Err);
+ break;
+ case SimpleRemoteEPCOpcode::Hangup:
+ T->disconnect();
+ if (auto Err = handleHangup(std::move(ArgBytes)))
+ return std::move(Err);
+ return EndSession;
+ case SimpleRemoteEPCOpcode::Result:
+ if (auto Err = handleResult(SeqNo, TagAddr, std::move(ArgBytes)))
+ return std::move(Err);
+ break;
+ case SimpleRemoteEPCOpcode::CallWrapper:
+ handleCallWrapper(SeqNo, TagAddr, std::move(ArgBytes));
+ break;
+ }
+ return ContinueSession;
+}
+
+void SimpleRemoteEPC::handleDisconnect(Error Err) {
+ LLVM_DEBUG({
+ dbgs() << "SimpleRemoteEPC::handleDisconnect: "
+ << (Err ? "failure" : "success") << "\n";
+ });
+
+ PendingCallWrapperResultsMap TmpPending;
+
+ {
+ std::lock_guard<std::mutex> Lock(SimpleRemoteEPCMutex);
+ std::swap(TmpPending, PendingCallWrapperResults);
+ }
+
+ for (auto &KV : TmpPending)
+ KV.second(
+ shared::WrapperFunctionResult::createOutOfBandError("disconnecting"));
+
+ std::lock_guard<std::mutex> Lock(SimpleRemoteEPCMutex);
+ DisconnectErr = joinErrors(std::move(DisconnectErr), std::move(Err));
+ Disconnected = true;
+ DisconnectCV.notify_all();
+}
+
+Expected<std::unique_ptr<jitlink::JITLinkMemoryManager>>
+SimpleRemoteEPC::createDefaultMemoryManager(SimpleRemoteEPC &SREPC) {
+ EPCGenericJITLinkMemoryManager::SymbolAddrs SAs;
+ if (auto Err = SREPC.getBootstrapSymbols(
+ {{SAs.Allocator, rt::SimpleExecutorMemoryManagerInstanceName},
+ {SAs.Reserve, rt::SimpleExecutorMemoryManagerReserveWrapperName},
+ {SAs.Finalize, rt::SimpleExecutorMemoryManagerFinalizeWrapperName},
+ {SAs.Deallocate,
+ rt::SimpleExecutorMemoryManagerDeallocateWrapperName}}))
+ return std::move(Err);
+
+ return std::make_unique<EPCGenericJITLinkMemoryManager>(SREPC, SAs);
+}
+
+Expected<std::unique_ptr<ExecutorProcessControl::MemoryAccess>>
+SimpleRemoteEPC::createDefaultMemoryAccess(SimpleRemoteEPC &SREPC) {
+ return nullptr;
+}
+
+Error SimpleRemoteEPC::sendMessage(SimpleRemoteEPCOpcode OpC, uint64_t SeqNo,
+ ExecutorAddr TagAddr,
+ ArrayRef<char> ArgBytes) {
+ assert(OpC != SimpleRemoteEPCOpcode::Setup &&
+ "SimpleRemoteEPC sending Setup message? That's the wrong direction.");
+
+ LLVM_DEBUG({
+ dbgs() << "SimpleRemoteEPC::sendMessage: opc = ";
+ switch (OpC) {
+ case SimpleRemoteEPCOpcode::Hangup:
+ dbgs() << "Hangup";
+ assert(SeqNo == 0 && "Non-zero SeqNo for Hangup?");
+ assert(!TagAddr && "Non-zero TagAddr for Hangup?");
+ break;
+ case SimpleRemoteEPCOpcode::Result:
+ dbgs() << "Result";
+ assert(!TagAddr && "Non-zero TagAddr for Result?");
+ break;
+ case SimpleRemoteEPCOpcode::CallWrapper:
+ dbgs() << "CallWrapper";
+ break;
+ default:
+ llvm_unreachable("Invalid opcode");
+ }
+ dbgs() << ", seqno = " << SeqNo << ", tag-addr = " << TagAddr
+ << ", arg-buffer = " << formatv("{0:x}", ArgBytes.size())
+ << " bytes\n";
+ });
+ auto Err = T->sendMessage(OpC, SeqNo, TagAddr, ArgBytes);
+ LLVM_DEBUG({
+ if (Err)
+ dbgs() << " \\--> SimpleRemoteEPC::sendMessage failed\n";
+ });
+ return Err;
+}
+
+Error SimpleRemoteEPC::handleSetup(uint64_t SeqNo, ExecutorAddr TagAddr,
+ SimpleRemoteEPCArgBytesVector ArgBytes) {
+ if (SeqNo != 0)
+ return make_error<StringError>("Setup packet SeqNo not zero",
+ inconvertibleErrorCode());
+
+ if (TagAddr)
+ return make_error<StringError>("Setup packet TagAddr not zero",
+ inconvertibleErrorCode());
+
+ std::lock_guard<std::mutex> Lock(SimpleRemoteEPCMutex);
+ auto I = PendingCallWrapperResults.find(0);
+ assert(PendingCallWrapperResults.size() == 1 &&
+ I != PendingCallWrapperResults.end() &&
+ "Setup message handler not connectly set up");
+ auto SetupMsgHandler = std::move(I->second);
+ PendingCallWrapperResults.erase(I);
+
+ auto WFR =
+ shared::WrapperFunctionResult::copyFrom(ArgBytes.data(), ArgBytes.size());
+ SetupMsgHandler(std::move(WFR));
+ return Error::success();
+}
+
+Error SimpleRemoteEPC::setup(Setup S) {
+ using namespace SimpleRemoteEPCDefaultBootstrapSymbolNames;
+
+ std::promise<MSVCPExpected<SimpleRemoteEPCExecutorInfo>> EIP;
+ auto EIF = EIP.get_future();
+
+ // Prepare a handler for the setup packet.
+ PendingCallWrapperResults[0] =
+ RunInPlace()(
+ [&](shared::WrapperFunctionResult SetupMsgBytes) {
+ if (const char *ErrMsg = SetupMsgBytes.getOutOfBandError()) {
+ EIP.set_value(
+ make_error<StringError>(ErrMsg, inconvertibleErrorCode()));
+ return;
+ }
+ using SPSSerialize =
+ shared::SPSArgList<shared::SPSSimpleRemoteEPCExecutorInfo>;
+ shared::SPSInputBuffer IB(SetupMsgBytes.data(), SetupMsgBytes.size());
+ SimpleRemoteEPCExecutorInfo EI;
+ if (SPSSerialize::deserialize(IB, EI))
+ EIP.set_value(EI);
+ else
+ EIP.set_value(make_error<StringError>(
+ "Could not deserialize setup message", inconvertibleErrorCode()));
+ });
+
+ // Start the transport.
+ if (auto Err = T->start())
+ return Err;
+
+ // Wait for setup packet to arrive.
+ auto EI = EIF.get();
+ if (!EI) {
+ T->disconnect();
+ return EI.takeError();
+ }
+
+ LLVM_DEBUG({
+ dbgs() << "SimpleRemoteEPC received setup message:\n"
+ << " Triple: " << EI->TargetTriple << "\n"
+ << " Page size: " << EI->PageSize << "\n"
+ << " Bootstrap map" << (EI->BootstrapMap.empty() ? " empty" : ":")
+ << "\n";
+ for (const auto &KV : EI->BootstrapMap)
+ dbgs() << " " << KV.first() << ": " << KV.second.size()
+ << "-byte SPS encoded buffer\n";
+ dbgs() << " Bootstrap symbols"
+ << (EI->BootstrapSymbols.empty() ? " empty" : ":") << "\n";
+ for (const auto &KV : EI->BootstrapSymbols)
+ dbgs() << " " << KV.first() << ": " << KV.second << "\n";
+ });
+ TargetTriple = Triple(EI->TargetTriple);
+ PageSize = EI->PageSize;
+ BootstrapMap = std::move(EI->BootstrapMap);
+ BootstrapSymbols = std::move(EI->BootstrapSymbols);
+
+ if (auto Err = getBootstrapSymbols(
+ {{JDI.JITDispatchContext, ExecutorSessionObjectName},
+ {JDI.JITDispatchFunction, DispatchFnName},
+ {RunAsMainAddr, rt::RunAsMainWrapperName},
+ {RunAsVoidFunctionAddr, rt::RunAsVoidFunctionWrapperName},
+ {RunAsIntFunctionAddr, rt::RunAsIntFunctionWrapperName}}))
+ return Err;
+
+ if (auto DM =
+ EPCGenericDylibManager::CreateWithDefaultBootstrapSymbols(*this))
+ DylibMgr = std::make_unique<EPCGenericDylibManager>(std::move(*DM));
+ else
+ return DM.takeError();
+
+ // Set a default CreateMemoryManager if none is specified.
+ if (!S.CreateMemoryManager)
+ S.CreateMemoryManager = createDefaultMemoryManager;
+
+ if (auto MemMgr = S.CreateMemoryManager(*this)) {
+ OwnedMemMgr = std::move(*MemMgr);
+ this->MemMgr = OwnedMemMgr.get();
+ } else
+ return MemMgr.takeError();
+
+ // Set a default CreateMemoryAccess if none is specified.
+ if (!S.CreateMemoryAccess)
+ S.CreateMemoryAccess = createDefaultMemoryAccess;
+
+ if (auto MemAccess = S.CreateMemoryAccess(*this)) {
+ OwnedMemAccess = std::move(*MemAccess);
+ this->MemAccess = OwnedMemAccess.get();
+ } else
+ return MemAccess.takeError();
+
+ return Error::success();
+}
+
+Error SimpleRemoteEPC::handleResult(uint64_t SeqNo, ExecutorAddr TagAddr,
+ SimpleRemoteEPCArgBytesVector ArgBytes) {
+ IncomingWFRHandler SendResult;
+
+ if (TagAddr)
+ return make_error<StringError>("Unexpected TagAddr in result message",
+ inconvertibleErrorCode());
+
+ {
+ std::lock_guard<std::mutex> Lock(SimpleRemoteEPCMutex);
+ auto I = PendingCallWrapperResults.find(SeqNo);
+ if (I == PendingCallWrapperResults.end())
+ return make_error<StringError>("No call for sequence number " +
+ Twine(SeqNo),
+ inconvertibleErrorCode());
+ SendResult = std::move(I->second);
+ PendingCallWrapperResults.erase(I);
+ releaseSeqNo(SeqNo);
+ }
+
+ auto WFR =
+ shared::WrapperFunctionResult::copyFrom(ArgBytes.data(), ArgBytes.size());
+ SendResult(std::move(WFR));
+ return Error::success();
+}
+
+void SimpleRemoteEPC::handleCallWrapper(
+ uint64_t RemoteSeqNo, ExecutorAddr TagAddr,
+ SimpleRemoteEPCArgBytesVector ArgBytes) {
+ assert(ES && "No ExecutionSession attached");
+ D->dispatch(makeGenericNamedTask(
+ [this, RemoteSeqNo, TagAddr, ArgBytes = std::move(ArgBytes)]() {
+ ES->runJITDispatchHandler(
+ [this, RemoteSeqNo](shared::WrapperFunctionResult WFR) {
+ if (auto Err =
+ sendMessage(SimpleRemoteEPCOpcode::Result, RemoteSeqNo,
+ ExecutorAddr(), {WFR.data(), WFR.size()}))
+ getExecutionSession().reportError(std::move(Err));
+ },
+ TagAddr, ArgBytes);
+ },
+ "callWrapper task"));
+}
+
+Error SimpleRemoteEPC::handleHangup(SimpleRemoteEPCArgBytesVector ArgBytes) {
+ using namespace llvm::orc::shared;
+ auto WFR = WrapperFunctionResult::copyFrom(ArgBytes.data(), ArgBytes.size());
+ if (const char *ErrMsg = WFR.getOutOfBandError())
+ return make_error<StringError>(ErrMsg, inconvertibleErrorCode());
+
+ detail::SPSSerializableError Info;
+ SPSInputBuffer IB(WFR.data(), WFR.size());
+ if (!SPSArgList<SPSError>::deserialize(IB, Info))
+ return make_error<StringError>("Could not deserialize hangup info",
+ inconvertibleErrorCode());
+ return fromSPSSerializable(std::move(Info));
+}
+
+} // end namespace orc
+} // end namespace llvm
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/SpeculateAnalyses.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/SpeculateAnalyses.cpp
new file mode 100644
index 000000000000..8f42de91b5bb
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/SpeculateAnalyses.cpp
@@ -0,0 +1,305 @@
+//===-- SpeculateAnalyses.cpp --*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/SpeculateAnalyses.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Analysis/BlockFrequencyInfo.h"
+#include "llvm/Analysis/BranchProbabilityInfo.h"
+#include "llvm/Analysis/CFG.h"
+#include "llvm/IR/PassManager.h"
+#include "llvm/Passes/PassBuilder.h"
+#include "llvm/Support/ErrorHandling.h"
+
+#include <algorithm>
+
+namespace {
+using namespace llvm;
+SmallVector<const BasicBlock *, 8> findBBwithCalls(const Function &F,
+ bool IndirectCall = false) {
+ SmallVector<const BasicBlock *, 8> BBs;
+
+ auto findCallInst = [&IndirectCall](const Instruction &I) {
+ if (auto Call = dyn_cast<CallBase>(&I))
+ return Call->isIndirectCall() ? IndirectCall : true;
+ else
+ return false;
+ };
+ for (auto &BB : F)
+ if (findCallInst(*BB.getTerminator()) ||
+ llvm::any_of(BB.instructionsWithoutDebug(), findCallInst))
+ BBs.emplace_back(&BB);
+
+ return BBs;
+}
+} // namespace
+
+// Implementations of Queries shouldn't need to lock the resources
+// such as LLVMContext, each argument (function) has a non-shared LLVMContext
+// Plus, if Queries contain states necessary locking scheme should be provided.
+namespace llvm {
+namespace orc {
+
+// Collect direct calls only
+void SpeculateQuery::findCalles(const BasicBlock *BB,
+ DenseSet<StringRef> &CallesNames) {
+ assert(BB != nullptr && "Traversing Null BB to find calls?");
+
+ auto getCalledFunction = [&CallesNames](const CallBase *Call) {
+ auto CalledValue = Call->getCalledOperand()->stripPointerCasts();
+ if (auto DirectCall = dyn_cast<Function>(CalledValue))
+ CallesNames.insert(DirectCall->getName());
+ };
+ for (auto &I : BB->instructionsWithoutDebug())
+ if (auto CI = dyn_cast<CallInst>(&I))
+ getCalledFunction(CI);
+
+ if (auto II = dyn_cast<InvokeInst>(BB->getTerminator()))
+ getCalledFunction(II);
+}
+
+bool SpeculateQuery::isStraightLine(const Function &F) {
+ return llvm::all_of(F, [](const BasicBlock &BB) {
+ return BB.getSingleSuccessor() != nullptr;
+ });
+}
+
+// BlockFreqQuery Implementations
+
+size_t BlockFreqQuery::numBBToGet(size_t numBB) {
+ // small CFG
+ if (numBB < 4)
+ return numBB;
+ // mid-size CFG
+ else if (numBB < 20)
+ return (numBB / 2);
+ else
+ return (numBB / 2) + (numBB / 4);
+}
+
+BlockFreqQuery::ResultTy BlockFreqQuery::operator()(Function &F) {
+ DenseMap<StringRef, DenseSet<StringRef>> CallerAndCalles;
+ DenseSet<StringRef> Calles;
+ SmallVector<std::pair<const BasicBlock *, uint64_t>, 8> BBFreqs;
+
+ PassBuilder PB;
+ FunctionAnalysisManager FAM;
+ PB.registerFunctionAnalyses(FAM);
+
+ auto IBBs = findBBwithCalls(F);
+
+ if (IBBs.empty())
+ return std::nullopt;
+
+ auto &BFI = FAM.getResult<BlockFrequencyAnalysis>(F);
+
+ for (const auto I : IBBs)
+ BBFreqs.push_back({I, BFI.getBlockFreq(I).getFrequency()});
+
+ assert(IBBs.size() == BBFreqs.size() && "BB Count Mismatch");
+
+ llvm::sort(BBFreqs, [](decltype(BBFreqs)::const_reference BBF,
+ decltype(BBFreqs)::const_reference BBS) {
+ return BBF.second > BBS.second ? true : false;
+ });
+
+ // ignoring number of direct calls in a BB
+ auto Topk = numBBToGet(BBFreqs.size());
+
+ for (size_t i = 0; i < Topk; i++)
+ findCalles(BBFreqs[i].first, Calles);
+
+ assert(!Calles.empty() && "Running Analysis on Function with no calls?");
+
+ CallerAndCalles.insert({F.getName(), std::move(Calles)});
+
+ return CallerAndCalles;
+}
+
+// SequenceBBQuery Implementation
+std::size_t SequenceBBQuery::getHottestBlocks(std::size_t TotalBlocks) {
+ if (TotalBlocks == 1)
+ return TotalBlocks;
+ return TotalBlocks / 2;
+}
+
+// FIXME : find good implementation.
+SequenceBBQuery::BlockListTy
+SequenceBBQuery::rearrangeBB(const Function &F, const BlockListTy &BBList) {
+ BlockListTy RearrangedBBSet;
+
+ for (auto &Block : F)
+ if (llvm::is_contained(BBList, &Block))
+ RearrangedBBSet.push_back(&Block);
+
+ assert(RearrangedBBSet.size() == BBList.size() &&
+ "BasicBlock missing while rearranging?");
+ return RearrangedBBSet;
+}
+
+void SequenceBBQuery::traverseToEntryBlock(const BasicBlock *AtBB,
+ const BlockListTy &CallerBlocks,
+ const BackEdgesInfoTy &BackEdgesInfo,
+ const BranchProbabilityInfo *BPI,
+ VisitedBlocksInfoTy &VisitedBlocks) {
+ auto Itr = VisitedBlocks.find(AtBB);
+ if (Itr != VisitedBlocks.end()) { // already visited.
+ if (!Itr->second.Upward)
+ return;
+ Itr->second.Upward = false;
+ } else {
+ // Create hint for newly discoverd blocks.
+ WalkDirection BlockHint;
+ BlockHint.Upward = false;
+ // FIXME: Expensive Check
+ if (llvm::is_contained(CallerBlocks, AtBB))
+ BlockHint.CallerBlock = true;
+ VisitedBlocks.insert(std::make_pair(AtBB, BlockHint));
+ }
+
+ const_pred_iterator PIt = pred_begin(AtBB), EIt = pred_end(AtBB);
+ // Move this check to top, when we have code setup to launch speculative
+ // compiles for function in entry BB, this triggers the speculative compiles
+ // before running the program.
+ if (PIt == EIt) // No Preds.
+ return;
+
+ DenseSet<const BasicBlock *> PredSkipNodes;
+
+ // Since we are checking for predecessor's backedges, this Block
+ // occurs in second position.
+ for (auto &I : BackEdgesInfo)
+ if (I.second == AtBB)
+ PredSkipNodes.insert(I.first);
+
+ // Skip predecessors which source of back-edges.
+ for (; PIt != EIt; ++PIt)
+ // checking EdgeHotness is cheaper
+ if (BPI->isEdgeHot(*PIt, AtBB) && !PredSkipNodes.count(*PIt))
+ traverseToEntryBlock(*PIt, CallerBlocks, BackEdgesInfo, BPI,
+ VisitedBlocks);
+}
+
+void SequenceBBQuery::traverseToExitBlock(const BasicBlock *AtBB,
+ const BlockListTy &CallerBlocks,
+ const BackEdgesInfoTy &BackEdgesInfo,
+ const BranchProbabilityInfo *BPI,
+ VisitedBlocksInfoTy &VisitedBlocks) {
+ auto Itr = VisitedBlocks.find(AtBB);
+ if (Itr != VisitedBlocks.end()) { // already visited.
+ if (!Itr->second.Downward)
+ return;
+ Itr->second.Downward = false;
+ } else {
+ // Create hint for newly discoverd blocks.
+ WalkDirection BlockHint;
+ BlockHint.Downward = false;
+ // FIXME: Expensive Check
+ if (llvm::is_contained(CallerBlocks, AtBB))
+ BlockHint.CallerBlock = true;
+ VisitedBlocks.insert(std::make_pair(AtBB, BlockHint));
+ }
+
+ const_succ_iterator PIt = succ_begin(AtBB), EIt = succ_end(AtBB);
+ if (PIt == EIt) // No succs.
+ return;
+
+ // If there are hot edges, then compute SuccSkipNodes.
+ DenseSet<const BasicBlock *> SuccSkipNodes;
+
+ // Since we are checking for successor's backedges, this Block
+ // occurs in first position.
+ for (auto &I : BackEdgesInfo)
+ if (I.first == AtBB)
+ SuccSkipNodes.insert(I.second);
+
+ for (; PIt != EIt; ++PIt)
+ if (BPI->isEdgeHot(AtBB, *PIt) && !SuccSkipNodes.count(*PIt))
+ traverseToExitBlock(*PIt, CallerBlocks, BackEdgesInfo, BPI,
+ VisitedBlocks);
+}
+
+// Get Block frequencies for blocks and take most frequently executed block,
+// walk towards the entry block from those blocks and discover the basic blocks
+// with call.
+SequenceBBQuery::BlockListTy
+SequenceBBQuery::queryCFG(Function &F, const BlockListTy &CallerBlocks) {
+
+ BlockFreqInfoTy BBFreqs;
+ VisitedBlocksInfoTy VisitedBlocks;
+ BackEdgesInfoTy BackEdgesInfo;
+
+ PassBuilder PB;
+ FunctionAnalysisManager FAM;
+ PB.registerFunctionAnalyses(FAM);
+
+ auto &BFI = FAM.getResult<BlockFrequencyAnalysis>(F);
+
+ llvm::FindFunctionBackedges(F, BackEdgesInfo);
+
+ for (const auto I : CallerBlocks)
+ BBFreqs.push_back({I, BFI.getBlockFreq(I).getFrequency()});
+
+ llvm::sort(BBFreqs, [](decltype(BBFreqs)::const_reference Bbf,
+ decltype(BBFreqs)::const_reference Bbs) {
+ return Bbf.second > Bbs.second;
+ });
+
+ ArrayRef<std::pair<const BasicBlock *, uint64_t>> HotBlocksRef(BBFreqs);
+ HotBlocksRef =
+ HotBlocksRef.drop_back(BBFreqs.size() - getHottestBlocks(BBFreqs.size()));
+
+ BranchProbabilityInfo *BPI =
+ FAM.getCachedResult<BranchProbabilityAnalysis>(F);
+
+ // visit NHotBlocks,
+ // traverse upwards to entry
+ // traverse downwards to end.
+
+ for (auto I : HotBlocksRef) {
+ traverseToEntryBlock(I.first, CallerBlocks, BackEdgesInfo, BPI,
+ VisitedBlocks);
+ traverseToExitBlock(I.first, CallerBlocks, BackEdgesInfo, BPI,
+ VisitedBlocks);
+ }
+
+ BlockListTy MinCallerBlocks;
+ for (auto &I : VisitedBlocks)
+ if (I.second.CallerBlock)
+ MinCallerBlocks.push_back(std::move(I.first));
+
+ return rearrangeBB(F, MinCallerBlocks);
+}
+
+SpeculateQuery::ResultTy SequenceBBQuery::operator()(Function &F) {
+ // reduce the number of lists!
+ DenseMap<StringRef, DenseSet<StringRef>> CallerAndCalles;
+ DenseSet<StringRef> Calles;
+ BlockListTy SequencedBlocks;
+ BlockListTy CallerBlocks;
+
+ CallerBlocks = findBBwithCalls(F);
+ if (CallerBlocks.empty())
+ return std::nullopt;
+
+ if (isStraightLine(F))
+ SequencedBlocks = rearrangeBB(F, CallerBlocks);
+ else
+ SequencedBlocks = queryCFG(F, CallerBlocks);
+
+ for (const auto *BB : SequencedBlocks)
+ findCalles(BB, Calles);
+
+ CallerAndCalles.insert({F.getName(), std::move(Calles)});
+ return CallerAndCalles;
+}
+
+} // namespace orc
+} // namespace llvm
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/Speculation.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/Speculation.cpp
new file mode 100644
index 000000000000..70b536d2feda
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/Speculation.cpp
@@ -0,0 +1,142 @@
+//===---------- speculation.cpp - Utilities for Speculation ----------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/Speculation.h"
+#include "llvm/IR/BasicBlock.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/IRBuilder.h"
+#include "llvm/IR/Instruction.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/Module.h"
+#include "llvm/IR/Type.h"
+#include "llvm/IR/Verifier.h"
+
+namespace llvm {
+
+namespace orc {
+
+// ImplSymbolMap methods
+void ImplSymbolMap::trackImpls(SymbolAliasMap ImplMaps, JITDylib *SrcJD) {
+ assert(SrcJD && "Tracking on Null Source .impl dylib");
+ std::lock_guard<std::mutex> Lockit(ConcurrentAccess);
+ for (auto &I : ImplMaps) {
+ auto It = Maps.insert({I.first, {I.second.Aliasee, SrcJD}});
+ // check rationale when independent dylibs have same symbol name?
+ assert(It.second && "ImplSymbols are already tracked for this Symbol?");
+ (void)(It);
+ }
+}
+
+// Trigger Speculative Compiles.
+void Speculator::speculateForEntryPoint(Speculator *Ptr, uint64_t StubId) {
+ assert(Ptr && " Null Address Received in orc_speculate_for ");
+ Ptr->speculateFor(ExecutorAddr(StubId));
+}
+
+Error Speculator::addSpeculationRuntime(JITDylib &JD,
+ MangleAndInterner &Mangle) {
+ ExecutorSymbolDef ThisPtr(ExecutorAddr::fromPtr(this),
+ JITSymbolFlags::Exported);
+ ExecutorSymbolDef SpeculateForEntryPtr(
+ ExecutorAddr::fromPtr(&speculateForEntryPoint), JITSymbolFlags::Exported);
+ return JD.define(absoluteSymbols({
+ {Mangle("__orc_speculator"), ThisPtr}, // Data Symbol
+ {Mangle("__orc_speculate_for"), SpeculateForEntryPtr} // Callable Symbol
+ }));
+}
+
+// If two modules, share the same LLVMContext, different threads must
+// not access them concurrently without locking the associated LLVMContext
+// this implementation follows this contract.
+void IRSpeculationLayer::emit(std::unique_ptr<MaterializationResponsibility> R,
+ ThreadSafeModule TSM) {
+
+ assert(TSM && "Speculation Layer received Null Module ?");
+ assert(TSM.getContext().getContext() != nullptr &&
+ "Module with null LLVMContext?");
+
+ // Instrumentation of runtime calls, lock the Module
+ TSM.withModuleDo([this, &R](Module &M) {
+ auto &MContext = M.getContext();
+ auto SpeculatorVTy = StructType::create(MContext, "Class.Speculator");
+ auto RuntimeCallTy = FunctionType::get(
+ Type::getVoidTy(MContext),
+ {PointerType::getUnqual(MContext), Type::getInt64Ty(MContext)}, false);
+ auto RuntimeCall =
+ Function::Create(RuntimeCallTy, Function::LinkageTypes::ExternalLinkage,
+ "__orc_speculate_for", &M);
+ auto SpeclAddr = new GlobalVariable(
+ M, SpeculatorVTy, false, GlobalValue::LinkageTypes::ExternalLinkage,
+ nullptr, "__orc_speculator");
+
+ IRBuilder<> Mutator(MContext);
+
+ // QueryAnalysis allowed to transform the IR source, one such example is
+ // Simplify CFG helps the static branch prediction heuristics!
+ for (auto &Fn : M.getFunctionList()) {
+ if (!Fn.isDeclaration()) {
+
+ auto IRNames = QueryAnalysis(Fn);
+ // Instrument and register if Query has result
+ if (IRNames) {
+
+ // Emit globals for each function.
+ auto LoadValueTy = Type::getInt8Ty(MContext);
+ auto SpeculatorGuard = new GlobalVariable(
+ M, LoadValueTy, false, GlobalValue::LinkageTypes::InternalLinkage,
+ ConstantInt::get(LoadValueTy, 0),
+ "__orc_speculate.guard.for." + Fn.getName());
+ SpeculatorGuard->setAlignment(Align(1));
+ SpeculatorGuard->setUnnamedAddr(GlobalValue::UnnamedAddr::Local);
+
+ BasicBlock &ProgramEntry = Fn.getEntryBlock();
+ // Create BasicBlocks before the program's entry basicblock
+ BasicBlock *SpeculateBlock = BasicBlock::Create(
+ MContext, "__orc_speculate.block", &Fn, &ProgramEntry);
+ BasicBlock *SpeculateDecisionBlock = BasicBlock::Create(
+ MContext, "__orc_speculate.decision.block", &Fn, SpeculateBlock);
+
+ assert(SpeculateDecisionBlock == &Fn.getEntryBlock() &&
+ "SpeculateDecisionBlock not updated?");
+ Mutator.SetInsertPoint(SpeculateDecisionBlock);
+
+ auto LoadGuard =
+ Mutator.CreateLoad(LoadValueTy, SpeculatorGuard, "guard.value");
+ // if just loaded value equal to 0,return true.
+ auto CanSpeculate =
+ Mutator.CreateICmpEQ(LoadGuard, ConstantInt::get(LoadValueTy, 0),
+ "compare.to.speculate");
+ Mutator.CreateCondBr(CanSpeculate, SpeculateBlock, &ProgramEntry);
+
+ Mutator.SetInsertPoint(SpeculateBlock);
+ auto ImplAddrToUint =
+ Mutator.CreatePtrToInt(&Fn, Type::getInt64Ty(MContext));
+ Mutator.CreateCall(RuntimeCallTy, RuntimeCall,
+ {SpeclAddr, ImplAddrToUint});
+ Mutator.CreateStore(ConstantInt::get(LoadValueTy, 1),
+ SpeculatorGuard);
+ Mutator.CreateBr(&ProgramEntry);
+
+ assert(Mutator.GetInsertBlock()->getParent() == &Fn &&
+ "IR builder association mismatch?");
+ S.registerSymbols(internToJITSymbols(*IRNames),
+ &R->getTargetJITDylib());
+ }
+ }
+ }
+ });
+
+ assert(!TSM.withModuleDo([](const Module &M) { return verifyModule(M); }) &&
+ "Speculation Instrumentation breaks IR?");
+
+ NextLayer.emit(std::move(R), std::move(TSM));
+}
+
+} // namespace orc
+} // namespace llvm
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/TargetProcess/ExecutorSharedMemoryMapperService.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/TargetProcess/ExecutorSharedMemoryMapperService.cpp
new file mode 100644
index 000000000000..f5118c0f2bfa
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/TargetProcess/ExecutorSharedMemoryMapperService.cpp
@@ -0,0 +1,371 @@
+//===---------- ExecutorSharedMemoryMapperService.cpp -----------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/TargetProcess/ExecutorSharedMemoryMapperService.h"
+
+#include "llvm/ExecutionEngine/Orc/Shared/OrcRTBridge.h"
+#include "llvm/Support/Process.h"
+#include "llvm/Support/WindowsError.h"
+
+#include <sstream>
+
+#if defined(LLVM_ON_UNIX)
+#include <errno.h>
+#include <fcntl.h>
+#include <sys/mman.h>
+#if defined(__MVS__)
+#include "llvm/Support/BLAKE3.h"
+#include <sys/shm.h>
+#endif
+#include <unistd.h>
+#endif
+
+namespace llvm {
+namespace orc {
+namespace rt_bootstrap {
+
+#if defined(_WIN32)
+static DWORD getWindowsProtectionFlags(MemProt MP) {
+ if (MP == MemProt::Read)
+ return PAGE_READONLY;
+ if (MP == MemProt::Write ||
+ MP == (MemProt::Write | MemProt::Read)) {
+ // Note: PAGE_WRITE is not supported by VirtualProtect
+ return PAGE_READWRITE;
+ }
+ if (MP == (MemProt::Read | MemProt::Exec))
+ return PAGE_EXECUTE_READ;
+ if (MP == (MemProt::Read | MemProt::Write | MemProt::Exec))
+ return PAGE_EXECUTE_READWRITE;
+ if (MP == MemProt::Exec)
+ return PAGE_EXECUTE;
+
+ return PAGE_NOACCESS;
+}
+#endif
+
+Expected<std::pair<ExecutorAddr, std::string>>
+ExecutorSharedMemoryMapperService::reserve(uint64_t Size) {
+#if (defined(LLVM_ON_UNIX) && !defined(__ANDROID__)) || defined(_WIN32)
+
+#if defined(LLVM_ON_UNIX)
+
+ std::string SharedMemoryName;
+ {
+ std::stringstream SharedMemoryNameStream;
+ SharedMemoryNameStream << "/jitlink_" << sys::Process::getProcessId() << '_'
+ << (++SharedMemoryCount);
+ SharedMemoryName = SharedMemoryNameStream.str();
+ }
+
+#if defined(__MVS__)
+ ArrayRef<uint8_t> Data(
+ reinterpret_cast<const uint8_t *>(SharedMemoryName.c_str()),
+ SharedMemoryName.size());
+ auto HashedName = BLAKE3::hash<sizeof(key_t)>(Data);
+ key_t Key = *reinterpret_cast<key_t *>(HashedName.data());
+ int SharedMemoryId =
+ shmget(Key, Size, IPC_CREAT | IPC_EXCL | __IPC_SHAREAS | 0700);
+ if (SharedMemoryId < 0)
+ return errorCodeToError(errnoAsErrorCode());
+
+ void *Addr = shmat(SharedMemoryId, nullptr, 0);
+ if (Addr == reinterpret_cast<void *>(-1))
+ return errorCodeToError(errnoAsErrorCode());
+#else
+ int SharedMemoryFile =
+ shm_open(SharedMemoryName.c_str(), O_RDWR | O_CREAT | O_EXCL, 0700);
+ if (SharedMemoryFile < 0)
+ return errorCodeToError(errnoAsErrorCode());
+
+ // by default size is 0
+ if (ftruncate(SharedMemoryFile, Size) < 0)
+ return errorCodeToError(errnoAsErrorCode());
+
+ void *Addr = mmap(nullptr, Size, PROT_NONE, MAP_SHARED, SharedMemoryFile, 0);
+ if (Addr == MAP_FAILED)
+ return errorCodeToError(errnoAsErrorCode());
+
+ close(SharedMemoryFile);
+#endif
+
+#elif defined(_WIN32)
+
+ std::string SharedMemoryName;
+ {
+ std::stringstream SharedMemoryNameStream;
+ SharedMemoryNameStream << "jitlink_" << sys::Process::getProcessId() << '_'
+ << (++SharedMemoryCount);
+ SharedMemoryName = SharedMemoryNameStream.str();
+ }
+
+ std::wstring WideSharedMemoryName(SharedMemoryName.begin(),
+ SharedMemoryName.end());
+ HANDLE SharedMemoryFile = CreateFileMappingW(
+ INVALID_HANDLE_VALUE, NULL, PAGE_EXECUTE_READWRITE, Size >> 32,
+ Size & 0xffffffff, WideSharedMemoryName.c_str());
+ if (!SharedMemoryFile)
+ return errorCodeToError(mapWindowsError(GetLastError()));
+
+ void *Addr = MapViewOfFile(SharedMemoryFile,
+ FILE_MAP_ALL_ACCESS | FILE_MAP_EXECUTE, 0, 0, 0);
+ if (!Addr) {
+ CloseHandle(SharedMemoryFile);
+ return errorCodeToError(mapWindowsError(GetLastError()));
+ }
+
+#endif
+
+ {
+ std::lock_guard<std::mutex> Lock(Mutex);
+ Reservations[Addr].Size = Size;
+#if defined(_WIN32)
+ Reservations[Addr].SharedMemoryFile = SharedMemoryFile;
+#endif
+ }
+
+ return std::make_pair(ExecutorAddr::fromPtr(Addr),
+ std::move(SharedMemoryName));
+#else
+ return make_error<StringError>(
+ "SharedMemoryMapper is not supported on this platform yet",
+ inconvertibleErrorCode());
+#endif
+}
+
+Expected<ExecutorAddr> ExecutorSharedMemoryMapperService::initialize(
+ ExecutorAddr Reservation, tpctypes::SharedMemoryFinalizeRequest &FR) {
+#if (defined(LLVM_ON_UNIX) && !defined(__ANDROID__)) || defined(_WIN32)
+
+ ExecutorAddr MinAddr(~0ULL);
+
+ // Contents are already in place
+ for (auto &Segment : FR.Segments) {
+ if (Segment.Addr < MinAddr)
+ MinAddr = Segment.Addr;
+
+#if defined(LLVM_ON_UNIX)
+
+#if defined(__MVS__)
+ // TODO Is it possible to change the protection level?
+#else
+ int NativeProt = 0;
+ if ((Segment.RAG.Prot & MemProt::Read) == MemProt::Read)
+ NativeProt |= PROT_READ;
+ if ((Segment.RAG.Prot & MemProt::Write) == MemProt::Write)
+ NativeProt |= PROT_WRITE;
+ if ((Segment.RAG.Prot & MemProt::Exec) == MemProt::Exec)
+ NativeProt |= PROT_EXEC;
+
+ if (mprotect(Segment.Addr.toPtr<void *>(), Segment.Size, NativeProt))
+ return errorCodeToError(errnoAsErrorCode());
+#endif
+
+#elif defined(_WIN32)
+
+ DWORD NativeProt = getWindowsProtectionFlags(Segment.RAG.Prot);
+
+ if (!VirtualProtect(Segment.Addr.toPtr<void *>(), Segment.Size, NativeProt,
+ &NativeProt))
+ return errorCodeToError(mapWindowsError(GetLastError()));
+
+#endif
+
+ if ((Segment.RAG.Prot & MemProt::Exec) == MemProt::Exec)
+ sys::Memory::InvalidateInstructionCache(Segment.Addr.toPtr<void *>(),
+ Segment.Size);
+ }
+
+ // Run finalization actions and get deinitlization action list.
+ auto DeinitializeActions = shared::runFinalizeActions(FR.Actions);
+ if (!DeinitializeActions) {
+ return DeinitializeActions.takeError();
+ }
+
+ {
+ std::lock_guard<std::mutex> Lock(Mutex);
+ Allocations[MinAddr].DeinitializationActions =
+ std::move(*DeinitializeActions);
+ Reservations[Reservation.toPtr<void *>()].Allocations.push_back(MinAddr);
+ }
+
+ return MinAddr;
+
+#else
+ return make_error<StringError>(
+ "SharedMemoryMapper is not supported on this platform yet",
+ inconvertibleErrorCode());
+#endif
+}
+
+Error ExecutorSharedMemoryMapperService::deinitialize(
+ const std::vector<ExecutorAddr> &Bases) {
+ Error AllErr = Error::success();
+
+ {
+ std::lock_guard<std::mutex> Lock(Mutex);
+
+ for (auto Base : llvm::reverse(Bases)) {
+ if (Error Err = shared::runDeallocActions(
+ Allocations[Base].DeinitializationActions)) {
+ AllErr = joinErrors(std::move(AllErr), std::move(Err));
+ }
+
+ // Remove the allocation from the allocation list of its reservation
+ for (auto &Reservation : Reservations) {
+ auto AllocationIt = llvm::find(Reservation.second.Allocations, Base);
+ if (AllocationIt != Reservation.second.Allocations.end()) {
+ Reservation.second.Allocations.erase(AllocationIt);
+ break;
+ }
+ }
+
+ Allocations.erase(Base);
+ }
+ }
+
+ return AllErr;
+}
+
+Error ExecutorSharedMemoryMapperService::release(
+ const std::vector<ExecutorAddr> &Bases) {
+#if (defined(LLVM_ON_UNIX) && !defined(__ANDROID__)) || defined(_WIN32)
+ Error Err = Error::success();
+
+ for (auto Base : Bases) {
+ std::vector<ExecutorAddr> AllocAddrs;
+ size_t Size;
+
+#if defined(_WIN32)
+ HANDLE SharedMemoryFile;
+#endif
+
+ {
+ std::lock_guard<std::mutex> Lock(Mutex);
+ auto &R = Reservations[Base.toPtr<void *>()];
+ Size = R.Size;
+
+#if defined(_WIN32)
+ SharedMemoryFile = R.SharedMemoryFile;
+#endif
+
+ AllocAddrs.swap(R.Allocations);
+ }
+
+ // deinitialize sub allocations
+ if (Error E = deinitialize(AllocAddrs))
+ Err = joinErrors(std::move(Err), std::move(E));
+
+#if defined(LLVM_ON_UNIX)
+
+#if defined(__MVS__)
+ (void)Size;
+
+ if (shmdt(Base.toPtr<void *>()) < 0)
+ Err = joinErrors(std::move(Err), errorCodeToError(errnoAsErrorCode()));
+#else
+ if (munmap(Base.toPtr<void *>(), Size) != 0)
+ Err = joinErrors(std::move(Err), errorCodeToError(errnoAsErrorCode()));
+#endif
+
+#elif defined(_WIN32)
+ (void)Size;
+
+ if (!UnmapViewOfFile(Base.toPtr<void *>()))
+ Err = joinErrors(std::move(Err),
+ errorCodeToError(mapWindowsError(GetLastError())));
+
+ CloseHandle(SharedMemoryFile);
+
+#endif
+
+ std::lock_guard<std::mutex> Lock(Mutex);
+ Reservations.erase(Base.toPtr<void *>());
+ }
+
+ return Err;
+#else
+ return make_error<StringError>(
+ "SharedMemoryMapper is not supported on this platform yet",
+ inconvertibleErrorCode());
+#endif
+}
+
+Error ExecutorSharedMemoryMapperService::shutdown() {
+ if (Reservations.empty())
+ return Error::success();
+
+ std::vector<ExecutorAddr> ReservationAddrs;
+ ReservationAddrs.reserve(Reservations.size());
+ for (const auto &R : Reservations)
+ ReservationAddrs.push_back(ExecutorAddr::fromPtr(R.getFirst()));
+
+ return release(std::move(ReservationAddrs));
+}
+
+void ExecutorSharedMemoryMapperService::addBootstrapSymbols(
+ StringMap<ExecutorAddr> &M) {
+ M[rt::ExecutorSharedMemoryMapperServiceInstanceName] =
+ ExecutorAddr::fromPtr(this);
+ M[rt::ExecutorSharedMemoryMapperServiceReserveWrapperName] =
+ ExecutorAddr::fromPtr(&reserveWrapper);
+ M[rt::ExecutorSharedMemoryMapperServiceInitializeWrapperName] =
+ ExecutorAddr::fromPtr(&initializeWrapper);
+ M[rt::ExecutorSharedMemoryMapperServiceDeinitializeWrapperName] =
+ ExecutorAddr::fromPtr(&deinitializeWrapper);
+ M[rt::ExecutorSharedMemoryMapperServiceReleaseWrapperName] =
+ ExecutorAddr::fromPtr(&releaseWrapper);
+}
+
+llvm::orc::shared::CWrapperFunctionResult
+ExecutorSharedMemoryMapperService::reserveWrapper(const char *ArgData,
+ size_t ArgSize) {
+ return shared::WrapperFunction<
+ rt::SPSExecutorSharedMemoryMapperServiceReserveSignature>::
+ handle(ArgData, ArgSize,
+ shared::makeMethodWrapperHandler(
+ &ExecutorSharedMemoryMapperService::reserve))
+ .release();
+}
+
+llvm::orc::shared::CWrapperFunctionResult
+ExecutorSharedMemoryMapperService::initializeWrapper(const char *ArgData,
+ size_t ArgSize) {
+ return shared::WrapperFunction<
+ rt::SPSExecutorSharedMemoryMapperServiceInitializeSignature>::
+ handle(ArgData, ArgSize,
+ shared::makeMethodWrapperHandler(
+ &ExecutorSharedMemoryMapperService::initialize))
+ .release();
+}
+
+llvm::orc::shared::CWrapperFunctionResult
+ExecutorSharedMemoryMapperService::deinitializeWrapper(const char *ArgData,
+ size_t ArgSize) {
+ return shared::WrapperFunction<
+ rt::SPSExecutorSharedMemoryMapperServiceDeinitializeSignature>::
+ handle(ArgData, ArgSize,
+ shared::makeMethodWrapperHandler(
+ &ExecutorSharedMemoryMapperService::deinitialize))
+ .release();
+}
+
+llvm::orc::shared::CWrapperFunctionResult
+ExecutorSharedMemoryMapperService::releaseWrapper(const char *ArgData,
+ size_t ArgSize) {
+ return shared::WrapperFunction<
+ rt::SPSExecutorSharedMemoryMapperServiceReleaseSignature>::
+ handle(ArgData, ArgSize,
+ shared::makeMethodWrapperHandler(
+ &ExecutorSharedMemoryMapperService::release))
+ .release();
+}
+
+} // namespace rt_bootstrap
+} // end namespace orc
+} // end namespace llvm
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/TargetProcess/JITLoaderGDB.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/TargetProcess/JITLoaderGDB.cpp
new file mode 100644
index 000000000000..7529d9cef67e
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/TargetProcess/JITLoaderGDB.cpp
@@ -0,0 +1,109 @@
+//===- JITLoaderGDB.h - Register objects via GDB JIT interface -*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/TargetProcess/JITLoaderGDB.h"
+
+#include "llvm/ExecutionEngine/JITSymbol.h"
+#include "llvm/Support/BinaryStreamReader.h"
+#include "llvm/Support/FormatVariadic.h"
+
+#include <cstdint>
+#include <mutex>
+#include <utility>
+
+#define DEBUG_TYPE "orc"
+
+// First version as landed in August 2009
+static constexpr uint32_t JitDescriptorVersion = 1;
+
+extern "C" {
+
+// We put information about the JITed function in this global, which the
+// debugger reads. Make sure to specify the version statically, because the
+// debugger checks the version before we can set it during runtime.
+LLVM_ATTRIBUTE_VISIBILITY_DEFAULT
+struct jit_descriptor __jit_debug_descriptor = {JitDescriptorVersion, 0,
+ nullptr, nullptr};
+
+// Debuggers that implement the GDB JIT interface put a special breakpoint in
+// this function.
+LLVM_ATTRIBUTE_VISIBILITY_DEFAULT
+LLVM_ATTRIBUTE_NOINLINE void __jit_debug_register_code() {
+ // The noinline and the asm prevent calls to this function from being
+ // optimized out.
+#if !defined(_MSC_VER)
+ asm volatile("" ::: "memory");
+#endif
+}
+}
+
+using namespace llvm;
+using namespace llvm::orc;
+
+// Register debug object, return error message or null for success.
+static void appendJITDebugDescriptor(const char *ObjAddr, size_t Size) {
+ LLVM_DEBUG({
+ dbgs() << "Adding debug object to GDB JIT interface "
+ << formatv("([{0:x16} -- {1:x16}])",
+ reinterpret_cast<uintptr_t>(ObjAddr),
+ reinterpret_cast<uintptr_t>(ObjAddr + Size))
+ << "\n";
+ });
+
+ jit_code_entry *E = new jit_code_entry;
+ E->symfile_addr = ObjAddr;
+ E->symfile_size = Size;
+ E->prev_entry = nullptr;
+
+ // Serialize rendezvous with the debugger as well as access to shared data.
+ static std::mutex JITDebugLock;
+ std::lock_guard<std::mutex> Lock(JITDebugLock);
+
+ // Insert this entry at the head of the list.
+ jit_code_entry *NextEntry = __jit_debug_descriptor.first_entry;
+ E->next_entry = NextEntry;
+ if (NextEntry) {
+ NextEntry->prev_entry = E;
+ }
+
+ __jit_debug_descriptor.first_entry = E;
+ __jit_debug_descriptor.relevant_entry = E;
+ __jit_debug_descriptor.action_flag = JIT_REGISTER_FN;
+}
+
+extern "C" orc::shared::CWrapperFunctionResult
+llvm_orc_registerJITLoaderGDBAllocAction(const char *Data, size_t Size) {
+ using namespace orc::shared;
+ return WrapperFunction<SPSError(SPSExecutorAddrRange, bool)>::handle(
+ Data, Size,
+ [](ExecutorAddrRange R, bool AutoRegisterCode) {
+ appendJITDebugDescriptor(R.Start.toPtr<const char *>(),
+ R.size());
+ // Run into the rendezvous breakpoint.
+ if (AutoRegisterCode)
+ __jit_debug_register_code();
+ return Error::success();
+ })
+ .release();
+}
+
+extern "C" orc::shared::CWrapperFunctionResult
+llvm_orc_registerJITLoaderGDBWrapper(const char *Data, uint64_t Size) {
+ using namespace orc::shared;
+ return WrapperFunction<SPSError(SPSExecutorAddrRange, bool)>::handle(
+ Data, Size,
+ [](ExecutorAddrRange R, bool AutoRegisterCode) {
+ appendJITDebugDescriptor(R.Start.toPtr<const char *>(),
+ R.size());
+ // Run into the rendezvous breakpoint.
+ if (AutoRegisterCode)
+ __jit_debug_register_code();
+ return Error::success();
+ })
+ .release();
+}
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/TargetProcess/JITLoaderPerf.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/TargetProcess/JITLoaderPerf.cpp
new file mode 100644
index 000000000000..f7852b0ca62e
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/TargetProcess/JITLoaderPerf.cpp
@@ -0,0 +1,457 @@
+//===------- JITLoaderPerf.cpp - Register profiler objects ------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Register objects for access by profilers via the perf JIT interface.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/TargetProcess/JITLoaderPerf.h"
+
+#include "llvm/ExecutionEngine/Orc/Shared/PerfSharedStructs.h"
+
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Support/Process.h"
+#include "llvm/Support/Threading.h"
+
+#include <mutex>
+#include <optional>
+
+#ifdef __linux__
+
+#include <sys/mman.h> // mmap()
+#include <time.h> // clock_gettime(), time(), localtime_r() */
+#include <unistd.h> // for read(), close()
+
+#define DEBUG_TYPE "orc"
+
+// language identifier (XXX: should we generate something better from debug
+// info?)
+#define JIT_LANG "llvm-IR"
+#define LLVM_PERF_JIT_MAGIC \
+ ((uint32_t)'J' << 24 | (uint32_t)'i' << 16 | (uint32_t)'T' << 8 | \
+ (uint32_t)'D')
+#define LLVM_PERF_JIT_VERSION 1
+
+using namespace llvm;
+using namespace llvm::orc;
+
+struct PerfState {
+ // cache lookups
+ uint32_t Pid;
+
+ // base directory for output data
+ std::string JitPath;
+
+ // output data stream, closed via Dumpstream
+ int DumpFd = -1;
+
+ // output data stream
+ std::unique_ptr<raw_fd_ostream> Dumpstream;
+
+ // perf mmap marker
+ void *MarkerAddr = NULL;
+};
+
+// prevent concurrent dumps from messing up the output file
+static std::mutex Mutex;
+static std::optional<PerfState> State;
+
+struct RecHeader {
+ uint32_t Id;
+ uint32_t TotalSize;
+ uint64_t Timestamp;
+};
+
+struct DIR {
+ RecHeader Prefix;
+ uint64_t CodeAddr;
+ uint64_t NrEntry;
+};
+
+struct DIE {
+ uint64_t CodeAddr;
+ uint32_t Line;
+ uint32_t Discrim;
+};
+
+struct CLR {
+ RecHeader Prefix;
+ uint32_t Pid;
+ uint32_t Tid;
+ uint64_t Vma;
+ uint64_t CodeAddr;
+ uint64_t CodeSize;
+ uint64_t CodeIndex;
+};
+
+struct UWR {
+ RecHeader Prefix;
+ uint64_t UnwindDataSize;
+ uint64_t EhFrameHeaderSize;
+ uint64_t MappedSize;
+};
+
+static inline uint64_t timespec_to_ns(const struct timespec *TS) {
+ const uint64_t NanoSecPerSec = 1000000000;
+ return ((uint64_t)TS->tv_sec * NanoSecPerSec) + TS->tv_nsec;
+}
+
+static inline uint64_t perf_get_timestamp() {
+ timespec TS;
+ if (clock_gettime(CLOCK_MONOTONIC, &TS))
+ return 0;
+
+ return timespec_to_ns(&TS);
+}
+
+static void writeDebugRecord(const PerfJITDebugInfoRecord &DebugRecord) {
+ assert(State && "PerfState not initialized");
+ LLVM_DEBUG(dbgs() << "Writing debug record with "
+ << DebugRecord.Entries.size() << " entries\n");
+ [[maybe_unused]] size_t Written = 0;
+ DIR Dir{RecHeader{static_cast<uint32_t>(DebugRecord.Prefix.Id),
+ DebugRecord.Prefix.TotalSize, perf_get_timestamp()},
+ DebugRecord.CodeAddr, DebugRecord.Entries.size()};
+ State->Dumpstream->write(reinterpret_cast<const char *>(&Dir), sizeof(Dir));
+ Written += sizeof(Dir);
+ for (auto &Die : DebugRecord.Entries) {
+ DIE d{Die.Addr, Die.Lineno, Die.Discrim};
+ State->Dumpstream->write(reinterpret_cast<const char *>(&d), sizeof(d));
+ State->Dumpstream->write(Die.Name.data(), Die.Name.size() + 1);
+ Written += sizeof(d) + Die.Name.size() + 1;
+ }
+ LLVM_DEBUG(dbgs() << "wrote " << Written << " bytes of debug info\n");
+}
+
+static void writeCodeRecord(const PerfJITCodeLoadRecord &CodeRecord) {
+ assert(State && "PerfState not initialized");
+ uint32_t Tid = get_threadid();
+ LLVM_DEBUG(dbgs() << "Writing code record with code size "
+ << CodeRecord.CodeSize << " and code index "
+ << CodeRecord.CodeIndex << "\n");
+ CLR Clr{RecHeader{static_cast<uint32_t>(CodeRecord.Prefix.Id),
+ CodeRecord.Prefix.TotalSize, perf_get_timestamp()},
+ State->Pid,
+ Tid,
+ CodeRecord.Vma,
+ CodeRecord.CodeAddr,
+ CodeRecord.CodeSize,
+ CodeRecord.CodeIndex};
+ LLVM_DEBUG(dbgs() << "wrote " << sizeof(Clr) << " bytes of CLR, "
+ << CodeRecord.Name.size() + 1 << " bytes of name, "
+ << CodeRecord.CodeSize << " bytes of code\n");
+ State->Dumpstream->write(reinterpret_cast<const char *>(&Clr), sizeof(Clr));
+ State->Dumpstream->write(CodeRecord.Name.data(), CodeRecord.Name.size() + 1);
+ State->Dumpstream->write((const char *)CodeRecord.CodeAddr,
+ CodeRecord.CodeSize);
+}
+
+static void
+writeUnwindRecord(const PerfJITCodeUnwindingInfoRecord &UnwindRecord) {
+ assert(State && "PerfState not initialized");
+ dbgs() << "Writing unwind record with unwind data size "
+ << UnwindRecord.UnwindDataSize << " and EH frame header size "
+ << UnwindRecord.EHFrameHdrSize << " and mapped size "
+ << UnwindRecord.MappedSize << "\n";
+ UWR Uwr{RecHeader{static_cast<uint32_t>(UnwindRecord.Prefix.Id),
+ UnwindRecord.Prefix.TotalSize, perf_get_timestamp()},
+ UnwindRecord.UnwindDataSize, UnwindRecord.EHFrameHdrSize,
+ UnwindRecord.MappedSize};
+ LLVM_DEBUG(dbgs() << "wrote " << sizeof(Uwr) << " bytes of UWR, "
+ << UnwindRecord.EHFrameHdrSize
+ << " bytes of EH frame header, "
+ << UnwindRecord.UnwindDataSize - UnwindRecord.EHFrameHdrSize
+ << " bytes of EH frame\n");
+ State->Dumpstream->write(reinterpret_cast<const char *>(&Uwr), sizeof(Uwr));
+ if (UnwindRecord.EHFrameHdrAddr)
+ State->Dumpstream->write((const char *)UnwindRecord.EHFrameHdrAddr,
+ UnwindRecord.EHFrameHdrSize);
+ else
+ State->Dumpstream->write(UnwindRecord.EHFrameHdr.data(),
+ UnwindRecord.EHFrameHdrSize);
+ State->Dumpstream->write((const char *)UnwindRecord.EHFrameAddr,
+ UnwindRecord.UnwindDataSize -
+ UnwindRecord.EHFrameHdrSize);
+}
+
+static Error registerJITLoaderPerfImpl(const PerfJITRecordBatch &Batch) {
+ if (!State)
+ return make_error<StringError>("PerfState not initialized",
+ inconvertibleErrorCode());
+
+ // Serialize the batch
+ std::lock_guard<std::mutex> Lock(Mutex);
+ if (Batch.UnwindingRecord.Prefix.TotalSize > 0)
+ writeUnwindRecord(Batch.UnwindingRecord);
+
+ for (const auto &DebugInfo : Batch.DebugInfoRecords)
+ writeDebugRecord(DebugInfo);
+
+ for (const auto &CodeLoad : Batch.CodeLoadRecords)
+ writeCodeRecord(CodeLoad);
+
+ State->Dumpstream->flush();
+
+ return Error::success();
+}
+
+struct Header {
+ uint32_t Magic; // characters "JiTD"
+ uint32_t Version; // header version
+ uint32_t TotalSize; // total size of header
+ uint32_t ElfMach; // elf mach target
+ uint32_t Pad1; // reserved
+ uint32_t Pid;
+ uint64_t Timestamp; // timestamp
+ uint64_t Flags; // flags
+};
+
+static Error OpenMarker(PerfState &State) {
+ // We mmap the jitdump to create an MMAP RECORD in perf.data file. The mmap
+ // is captured either live (perf record running when we mmap) or in deferred
+ // mode, via /proc/PID/maps. The MMAP record is used as a marker of a jitdump
+ // file for more meta data info about the jitted code. Perf report/annotate
+ // detect this special filename and process the jitdump file.
+ //
+ // Mapping must be PROT_EXEC to ensure it is captured by perf record
+ // even when not using -d option.
+ State.MarkerAddr =
+ ::mmap(NULL, sys::Process::getPageSizeEstimate(), PROT_READ | PROT_EXEC,
+ MAP_PRIVATE, State.DumpFd, 0);
+
+ if (State.MarkerAddr == MAP_FAILED)
+ return make_error<llvm::StringError>("could not mmap JIT marker",
+ inconvertibleErrorCode());
+
+ return Error::success();
+}
+
+void CloseMarker(PerfState &State) {
+ if (!State.MarkerAddr)
+ return;
+
+ munmap(State.MarkerAddr, sys::Process::getPageSizeEstimate());
+ State.MarkerAddr = nullptr;
+}
+
+static Expected<Header> FillMachine(PerfState &State) {
+ Header Hdr;
+ Hdr.Magic = LLVM_PERF_JIT_MAGIC;
+ Hdr.Version = LLVM_PERF_JIT_VERSION;
+ Hdr.TotalSize = sizeof(Hdr);
+ Hdr.Pid = State.Pid;
+ Hdr.Timestamp = perf_get_timestamp();
+
+ char Id[16];
+ struct {
+ uint16_t e_type;
+ uint16_t e_machine;
+ } Info;
+
+ size_t RequiredMemory = sizeof(Id) + sizeof(Info);
+
+ ErrorOr<std::unique_ptr<MemoryBuffer>> MB =
+ MemoryBuffer::getFileSlice("/proc/self/exe", RequiredMemory, 0);
+
+ // This'll not guarantee that enough data was actually read from the
+ // underlying file. Instead the trailing part of the buffer would be
+ // zeroed. Given the ELF signature check below that seems ok though,
+ // it's unlikely that the file ends just after that, and the
+ // consequence would just be that perf wouldn't recognize the
+ // signature.
+ if (!MB)
+ return make_error<llvm::StringError>("could not open /proc/self/exe",
+ MB.getError());
+
+ memcpy(&Id, (*MB)->getBufferStart(), sizeof(Id));
+ memcpy(&Info, (*MB)->getBufferStart() + sizeof(Id), sizeof(Info));
+
+ // check ELF signature
+ if (Id[0] != 0x7f || Id[1] != 'E' || Id[2] != 'L' || Id[3] != 'F')
+ return make_error<llvm::StringError>("invalid ELF signature",
+ inconvertibleErrorCode());
+
+ Hdr.ElfMach = Info.e_machine;
+
+ return Hdr;
+}
+
+static Error InitDebuggingDir(PerfState &State) {
+ time_t Time;
+ struct tm LocalTime;
+ char TimeBuffer[sizeof("YYYYMMDD")];
+ SmallString<64> Path;
+
+ // search for location to dump data to
+ if (const char *BaseDir = getenv("JITDUMPDIR"))
+ Path.append(BaseDir);
+ else if (!sys::path::home_directory(Path))
+ Path = ".";
+
+ // create debug directory
+ Path += "/.debug/jit/";
+ if (auto EC = sys::fs::create_directories(Path)) {
+ std::string ErrStr;
+ raw_string_ostream ErrStream(ErrStr);
+ ErrStream << "could not create jit cache directory " << Path << ": "
+ << EC.message() << "\n";
+ return make_error<StringError>(std::move(ErrStr), inconvertibleErrorCode());
+ }
+
+ // create unique directory for dump data related to this process
+ time(&Time);
+ localtime_r(&Time, &LocalTime);
+ strftime(TimeBuffer, sizeof(TimeBuffer), "%Y%m%d", &LocalTime);
+ Path += JIT_LANG "-jit-";
+ Path += TimeBuffer;
+
+ SmallString<128> UniqueDebugDir;
+
+ using sys::fs::createUniqueDirectory;
+ if (auto EC = createUniqueDirectory(Path, UniqueDebugDir)) {
+ std::string ErrStr;
+ raw_string_ostream ErrStream(ErrStr);
+ ErrStream << "could not create unique jit cache directory "
+ << UniqueDebugDir << ": " << EC.message() << "\n";
+ return make_error<StringError>(std::move(ErrStr), inconvertibleErrorCode());
+ }
+
+ State.JitPath = std::string(UniqueDebugDir);
+
+ return Error::success();
+}
+
+static Error registerJITLoaderPerfStartImpl() {
+ PerfState Tentative;
+ Tentative.Pid = sys::Process::getProcessId();
+ // check if clock-source is supported
+ if (!perf_get_timestamp())
+ return make_error<StringError>("kernel does not support CLOCK_MONOTONIC",
+ inconvertibleErrorCode());
+
+ if (auto Err = InitDebuggingDir(Tentative))
+ return Err;
+
+ std::string Filename;
+ raw_string_ostream FilenameBuf(Filename);
+ FilenameBuf << Tentative.JitPath << "/jit-" << Tentative.Pid << ".dump";
+
+ // Need to open ourselves, because we need to hand the FD to OpenMarker() and
+ // raw_fd_ostream doesn't expose the FD.
+ using sys::fs::openFileForWrite;
+ if (auto EC = openFileForReadWrite(FilenameBuf.str(), Tentative.DumpFd,
+ sys::fs::CD_CreateNew, sys::fs::OF_None)) {
+ std::string ErrStr;
+ raw_string_ostream ErrStream(ErrStr);
+ ErrStream << "could not open JIT dump file " << FilenameBuf.str() << ": "
+ << EC.message() << "\n";
+ return make_error<StringError>(std::move(ErrStr), inconvertibleErrorCode());
+ }
+
+ Tentative.Dumpstream =
+ std::make_unique<raw_fd_ostream>(Tentative.DumpFd, true);
+
+ auto Header = FillMachine(Tentative);
+ if (!Header)
+ return Header.takeError();
+
+ // signal this process emits JIT information
+ if (auto Err = OpenMarker(Tentative))
+ return Err;
+
+ Tentative.Dumpstream->write(reinterpret_cast<const char *>(&Header.get()),
+ sizeof(*Header));
+
+ // Everything initialized, can do profiling now.
+ if (Tentative.Dumpstream->has_error())
+ return make_error<StringError>("could not write JIT dump header",
+ inconvertibleErrorCode());
+
+ State = std::move(Tentative);
+ return Error::success();
+}
+
+static Error registerJITLoaderPerfEndImpl() {
+ if (!State)
+ return make_error<StringError>("PerfState not initialized",
+ inconvertibleErrorCode());
+
+ RecHeader Close;
+ Close.Id = static_cast<uint32_t>(PerfJITRecordType::JIT_CODE_CLOSE);
+ Close.TotalSize = sizeof(Close);
+ Close.Timestamp = perf_get_timestamp();
+ State->Dumpstream->write(reinterpret_cast<const char *>(&Close),
+ sizeof(Close));
+ if (State->MarkerAddr)
+ CloseMarker(*State);
+
+ State.reset();
+ return Error::success();
+}
+
+extern "C" llvm::orc::shared::CWrapperFunctionResult
+llvm_orc_registerJITLoaderPerfImpl(const char *Data, uint64_t Size) {
+ using namespace orc::shared;
+ return WrapperFunction<SPSError(SPSPerfJITRecordBatch)>::handle(
+ Data, Size, registerJITLoaderPerfImpl)
+ .release();
+}
+
+extern "C" llvm::orc::shared::CWrapperFunctionResult
+llvm_orc_registerJITLoaderPerfStart(const char *Data, uint64_t Size) {
+ using namespace orc::shared;
+ return WrapperFunction<SPSError()>::handle(Data, Size,
+ registerJITLoaderPerfStartImpl)
+ .release();
+}
+
+extern "C" llvm::orc::shared::CWrapperFunctionResult
+llvm_orc_registerJITLoaderPerfEnd(const char *Data, uint64_t Size) {
+ using namespace orc::shared;
+ return WrapperFunction<SPSError()>::handle(Data, Size,
+ registerJITLoaderPerfEndImpl)
+ .release();
+}
+
+#else
+
+using namespace llvm;
+using namespace llvm::orc;
+
+static Error badOS() {
+ using namespace llvm;
+ return llvm::make_error<StringError>(
+ "unsupported OS (perf support is only available on linux!)",
+ inconvertibleErrorCode());
+}
+
+static Error badOSBatch(PerfJITRecordBatch &Batch) { return badOS(); }
+
+extern "C" llvm::orc::shared::CWrapperFunctionResult
+llvm_orc_registerJITLoaderPerfImpl(const char *Data, uint64_t Size) {
+ using namespace shared;
+ return WrapperFunction<SPSError(SPSPerfJITRecordBatch)>::handle(Data, Size,
+ badOSBatch)
+ .release();
+}
+
+extern "C" llvm::orc::shared::CWrapperFunctionResult
+llvm_orc_registerJITLoaderPerfStart(const char *Data, uint64_t Size) {
+ using namespace shared;
+ return WrapperFunction<SPSError()>::handle(Data, Size, badOS).release();
+}
+
+extern "C" llvm::orc::shared::CWrapperFunctionResult
+llvm_orc_registerJITLoaderPerfEnd(const char *Data, uint64_t Size) {
+ using namespace shared;
+ return WrapperFunction<SPSError()>::handle(Data, Size, badOS).release();
+}
+
+#endif
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/TargetProcess/JITLoaderVTune.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/TargetProcess/JITLoaderVTune.cpp
new file mode 100644
index 000000000000..57ac991ee37f
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/TargetProcess/JITLoaderVTune.cpp
@@ -0,0 +1,224 @@
+//===------- JITLoaderVTune.cpp - Register profiler objects -----*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Register objects for access by profilers via the VTune JIT interface.
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/TargetProcess/JITLoaderVTune.h"
+#include "llvm/ExecutionEngine/Orc/Shared/VTuneSharedStructs.h"
+#include <map>
+
+#if LLVM_USE_INTEL_JITEVENTS
+#include "IntelJITEventsWrapper.h"
+#include "ittnotify.h"
+
+using namespace llvm;
+using namespace llvm::orc;
+
+namespace {
+class JITEventWrapper {
+public:
+ static std::unique_ptr<IntelJITEventsWrapper> Wrapper;
+};
+std::unique_ptr<IntelJITEventsWrapper> JITEventWrapper::Wrapper;
+} // namespace
+
+static Error registerJITLoaderVTuneRegisterImpl(const VTuneMethodBatch &MB) {
+ const size_t StringsSize = MB.Strings.size();
+
+ for (const auto &MethodInfo : MB.Methods) {
+ iJIT_Method_Load MethodMessage;
+ memset(&MethodMessage, 0, sizeof(iJIT_Method_Load));
+
+ MethodMessage.method_id = MethodInfo.MethodID;
+ if (MethodInfo.NameSI != 0 && MethodInfo.NameSI < StringsSize) {
+ MethodMessage.method_name =
+ const_cast<char *>(MB.Strings.at(MethodInfo.NameSI).data());
+ } else {
+ MethodMessage.method_name = NULL;
+ }
+ if (MethodInfo.ClassFileSI != 0 && MethodInfo.ClassFileSI < StringsSize) {
+ MethodMessage.class_file_name =
+ const_cast<char *>(MB.Strings.at(MethodInfo.ClassFileSI).data());
+ } else {
+ MethodMessage.class_file_name = NULL;
+ }
+ if (MethodInfo.SourceFileSI != 0 && MethodInfo.SourceFileSI < StringsSize) {
+ MethodMessage.source_file_name =
+ const_cast<char *>(MB.Strings.at(MethodInfo.SourceFileSI).data());
+ } else {
+ MethodMessage.source_file_name = NULL;
+ }
+
+ MethodMessage.method_load_address = MethodInfo.LoadAddr.toPtr<void *>();
+ MethodMessage.method_size = MethodInfo.LoadSize;
+ MethodMessage.class_id = 0;
+
+ MethodMessage.user_data = NULL;
+ MethodMessage.user_data_size = 0;
+ MethodMessage.env = iJDE_JittingAPI;
+
+ std::vector<LineNumberInfo> LineInfo;
+ for (const auto &LInfo : MethodInfo.LineTable) {
+ LineInfo.push_back(LineNumberInfo{LInfo.first, LInfo.second});
+ }
+
+ if (LineInfo.size() == 0) {
+ MethodMessage.line_number_size = 0;
+ MethodMessage.line_number_table = 0;
+ } else {
+ MethodMessage.line_number_size = LineInfo.size();
+ MethodMessage.line_number_table = &*LineInfo.begin();
+ }
+ JITEventWrapper::Wrapper->iJIT_NotifyEvent(
+ iJVM_EVENT_TYPE_METHOD_LOAD_FINISHED, &MethodMessage);
+ }
+
+ return Error::success();
+}
+
+static void registerJITLoaderVTuneUnregisterImpl(
+ const std::vector<std::pair<uint64_t, uint64_t>> &UM) {
+ for (auto &Method : UM) {
+ JITEventWrapper::Wrapper->iJIT_NotifyEvent(
+ iJVM_EVENT_TYPE_METHOD_UNLOAD_START,
+ const_cast<uint64_t *>(&Method.first));
+ }
+}
+
+extern "C" llvm::orc::shared::CWrapperFunctionResult
+llvm_orc_registerVTuneImpl(const char *Data, uint64_t Size) {
+ using namespace orc::shared;
+ if (!JITEventWrapper::Wrapper)
+ JITEventWrapper::Wrapper.reset(new IntelJITEventsWrapper);
+
+ return WrapperFunction<SPSError(SPSVTuneMethodBatch)>::handle(
+ Data, Size, registerJITLoaderVTuneRegisterImpl)
+ .release();
+}
+
+extern "C" llvm::orc::shared::CWrapperFunctionResult
+llvm_orc_unregisterVTuneImpl(const char *Data, uint64_t Size) {
+ using namespace orc::shared;
+ return WrapperFunction<void(SPSVTuneUnloadedMethodIDs)>::handle(
+ Data, Size, registerJITLoaderVTuneUnregisterImpl)
+ .release();
+}
+
+// For Testing: following code comes from llvm-jitlistener.cpp in llvm tools
+namespace {
+using SourceLocations = std::vector<std::pair<std::string, unsigned int>>;
+using NativeCodeMap = std::map<uint64_t, SourceLocations>;
+NativeCodeMap ReportedDebugFuncs;
+} // namespace
+
+static int NotifyEvent(iJIT_JVM_EVENT EventType, void *EventSpecificData) {
+ switch (EventType) {
+ case iJVM_EVENT_TYPE_METHOD_LOAD_FINISHED: {
+ if (!EventSpecificData) {
+ errs() << "Error: The JIT event listener did not provide a event data.";
+ return -1;
+ }
+ iJIT_Method_Load *msg = static_cast<iJIT_Method_Load *>(EventSpecificData);
+
+ ReportedDebugFuncs[msg->method_id];
+
+ outs() << "Method load [" << msg->method_id << "]: " << msg->method_name
+ << ", Size = " << msg->method_size << "\n";
+
+ for (unsigned int i = 0; i < msg->line_number_size; ++i) {
+ if (!msg->line_number_table) {
+ errs() << "A function with a non-zero line count had no line table.";
+ return -1;
+ }
+ std::pair<std::string, unsigned int> loc(
+ std::string(msg->source_file_name),
+ msg->line_number_table[i].LineNumber);
+ ReportedDebugFuncs[msg->method_id].push_back(loc);
+ outs() << " Line info @ " << msg->line_number_table[i].Offset << ": "
+ << msg->source_file_name << ", line "
+ << msg->line_number_table[i].LineNumber << "\n";
+ }
+ outs() << "\n";
+ } break;
+ case iJVM_EVENT_TYPE_METHOD_UNLOAD_START: {
+ if (!EventSpecificData) {
+ errs() << "Error: The JIT event listener did not provide a event data.";
+ return -1;
+ }
+ unsigned int UnloadId =
+ *reinterpret_cast<unsigned int *>(EventSpecificData);
+ assert(1 == ReportedDebugFuncs.erase(UnloadId));
+ outs() << "Method unload [" << UnloadId << "]\n";
+ } break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static iJIT_IsProfilingActiveFlags IsProfilingActive(void) {
+ // for testing, pretend we have an Intel Parallel Amplifier XE 2011
+ // instance attached
+ return iJIT_SAMPLING_ON;
+}
+
+static unsigned int GetNewMethodID(void) {
+ static unsigned int id = 0;
+ return ++id;
+}
+
+extern "C" llvm::orc::shared::CWrapperFunctionResult
+llvm_orc_test_registerVTuneImpl(const char *Data, uint64_t Size) {
+ using namespace orc::shared;
+ JITEventWrapper::Wrapper.reset(new IntelJITEventsWrapper(
+ NotifyEvent, NULL, NULL, IsProfilingActive, 0, 0, GetNewMethodID));
+ return WrapperFunction<SPSError(SPSVTuneMethodBatch)>::handle(
+ Data, Size, registerJITLoaderVTuneRegisterImpl)
+ .release();
+}
+
+#else
+
+using namespace llvm;
+using namespace llvm::orc;
+
+static Error unsupportedBatch(const VTuneMethodBatch &MB) {
+ return llvm::make_error<StringError>("unsupported for Intel VTune",
+ inconvertibleErrorCode());
+}
+
+static void unsuppported(const std::vector<std::pair<uint64_t, uint64_t>> &UM) {
+
+}
+
+extern "C" llvm::orc::shared::CWrapperFunctionResult
+llvm_orc_registerVTuneImpl(const char *Data, uint64_t Size) {
+ using namespace orc::shared;
+ return WrapperFunction<SPSError(SPSVTuneMethodBatch)>::handle(
+ Data, Size, unsupportedBatch)
+ .release();
+}
+
+extern "C" llvm::orc::shared::CWrapperFunctionResult
+llvm_orc_unregisterVTuneImpl(const char *Data, uint64_t Size) {
+ using namespace orc::shared;
+ return WrapperFunction<void(SPSVTuneUnloadedMethodIDs)>::handle(Data, Size,
+ unsuppported)
+ .release();
+}
+
+extern "C" llvm::orc::shared::CWrapperFunctionResult
+llvm_orc_test_registerVTuneImpl(const char *Data, uint64_t Size) {
+ using namespace orc::shared;
+ return WrapperFunction<SPSError(SPSVTuneMethodBatch)>::handle(
+ Data, Size, unsupportedBatch)
+ .release();
+}
+
+#endif
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/TargetProcess/OrcRTBootstrap.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/TargetProcess/OrcRTBootstrap.cpp
new file mode 100644
index 000000000000..b38877955282
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/TargetProcess/OrcRTBootstrap.cpp
@@ -0,0 +1,108 @@
+//===------------------------ OrcRTBootstrap.cpp --------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "OrcRTBootstrap.h"
+
+#include "llvm/ExecutionEngine/Orc/Shared/OrcRTBridge.h"
+#include "llvm/ExecutionEngine/Orc/Shared/WrapperFunctionUtils.h"
+#include "llvm/ExecutionEngine/Orc/TargetProcess/RegisterEHFrames.h"
+#include "llvm/ExecutionEngine/Orc/TargetProcess/TargetExecutionUtils.h"
+
+#define DEBUG_TYPE "orc"
+
+using namespace llvm::orc::shared;
+
+namespace llvm {
+namespace orc {
+namespace rt_bootstrap {
+
+template <typename WriteT, typename SPSWriteT>
+static llvm::orc::shared::CWrapperFunctionResult
+writeUIntsWrapper(const char *ArgData, size_t ArgSize) {
+ return WrapperFunction<void(SPSSequence<SPSWriteT>)>::handle(
+ ArgData, ArgSize,
+ [](std::vector<WriteT> Ws) {
+ for (auto &W : Ws)
+ *W.Addr.template toPtr<decltype(W.Value) *>() = W.Value;
+ })
+ .release();
+}
+
+static llvm::orc::shared::CWrapperFunctionResult
+writeBuffersWrapper(const char *ArgData, size_t ArgSize) {
+ return WrapperFunction<void(SPSSequence<SPSMemoryAccessBufferWrite>)>::handle(
+ ArgData, ArgSize,
+ [](std::vector<tpctypes::BufferWrite> Ws) {
+ for (auto &W : Ws)
+ memcpy(W.Addr.template toPtr<char *>(), W.Buffer.data(),
+ W.Buffer.size());
+ })
+ .release();
+}
+
+static llvm::orc::shared::CWrapperFunctionResult
+runAsMainWrapper(const char *ArgData, size_t ArgSize) {
+ return WrapperFunction<rt::SPSRunAsMainSignature>::handle(
+ ArgData, ArgSize,
+ [](ExecutorAddr MainAddr,
+ std::vector<std::string> Args) -> int64_t {
+ return runAsMain(MainAddr.toPtr<int (*)(int, char *[])>(), Args);
+ })
+ .release();
+}
+
+static llvm::orc::shared::CWrapperFunctionResult
+runAsVoidFunctionWrapper(const char *ArgData, size_t ArgSize) {
+ return WrapperFunction<rt::SPSRunAsVoidFunctionSignature>::handle(
+ ArgData, ArgSize,
+ [](ExecutorAddr MainAddr) -> int32_t {
+ return runAsVoidFunction(MainAddr.toPtr<int32_t (*)(void)>());
+ })
+ .release();
+}
+
+static llvm::orc::shared::CWrapperFunctionResult
+runAsIntFunctionWrapper(const char *ArgData, size_t ArgSize) {
+ return WrapperFunction<rt::SPSRunAsIntFunctionSignature>::handle(
+ ArgData, ArgSize,
+ [](ExecutorAddr MainAddr, int32_t Arg) -> int32_t {
+ return runAsIntFunction(MainAddr.toPtr<int32_t (*)(int32_t)>(),
+ Arg);
+ })
+ .release();
+}
+
+void addTo(StringMap<ExecutorAddr> &M) {
+ M[rt::MemoryWriteUInt8sWrapperName] = ExecutorAddr::fromPtr(
+ &writeUIntsWrapper<tpctypes::UInt8Write,
+ shared::SPSMemoryAccessUInt8Write>);
+ M[rt::MemoryWriteUInt16sWrapperName] = ExecutorAddr::fromPtr(
+ &writeUIntsWrapper<tpctypes::UInt16Write,
+ shared::SPSMemoryAccessUInt16Write>);
+ M[rt::MemoryWriteUInt32sWrapperName] = ExecutorAddr::fromPtr(
+ &writeUIntsWrapper<tpctypes::UInt32Write,
+ shared::SPSMemoryAccessUInt32Write>);
+ M[rt::MemoryWriteUInt64sWrapperName] = ExecutorAddr::fromPtr(
+ &writeUIntsWrapper<tpctypes::UInt64Write,
+ shared::SPSMemoryAccessUInt64Write>);
+ M[rt::MemoryWriteBuffersWrapperName] =
+ ExecutorAddr::fromPtr(&writeBuffersWrapper);
+ M[rt::RegisterEHFrameSectionWrapperName] =
+ ExecutorAddr::fromPtr(&llvm_orc_registerEHFrameSectionWrapper);
+ M[rt::DeregisterEHFrameSectionWrapperName] =
+ ExecutorAddr::fromPtr(&llvm_orc_deregisterEHFrameSectionWrapper);
+ M[rt::RunAsMainWrapperName] = ExecutorAddr::fromPtr(&runAsMainWrapper);
+ M[rt::RunAsVoidFunctionWrapperName] =
+ ExecutorAddr::fromPtr(&runAsVoidFunctionWrapper);
+ M[rt::RunAsIntFunctionWrapperName] =
+ ExecutorAddr::fromPtr(&runAsIntFunctionWrapper);
+}
+
+} // end namespace rt_bootstrap
+} // end namespace orc
+} // end namespace llvm
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/TargetProcess/OrcRTBootstrap.h b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/TargetProcess/OrcRTBootstrap.h
new file mode 100644
index 000000000000..92b513d0bb53
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/TargetProcess/OrcRTBootstrap.h
@@ -0,0 +1,36 @@
+//===----------------------- OrcRTBootstrap.h -------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// OrcRTPrelinkImpl provides functions that should be linked into the executor
+// to bootstrap common JIT functionality (e.g. memory allocation and memory
+// access).
+//
+// Call rt_impl::addTo to add these functions to a bootstrap symbols map.
+//
+// FIXME: The functionality in this file should probably be moved to an ORC
+// runtime bootstrap library in compiler-rt.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LIB_EXECUTIONENGINE_ORC_TARGETPROCESS_ORCRTBOOTSTRAP_H
+#define LIB_EXECUTIONENGINE_ORC_TARGETPROCESS_ORCRTBOOTSTRAP_H
+
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ExecutionEngine/Orc/Shared/ExecutorAddress.h"
+
+namespace llvm {
+namespace orc {
+namespace rt_bootstrap {
+
+void addTo(StringMap<ExecutorAddr> &M);
+
+} // namespace rt_bootstrap
+} // end namespace orc
+} // end namespace llvm
+
+#endif // LIB_EXECUTIONENGINE_ORC_TARGETPROCESS_ORCRTBOOTSTRAP_H
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/TargetProcess/RegisterEHFrames.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/TargetProcess/RegisterEHFrames.cpp
new file mode 100644
index 000000000000..fdae0e45da65
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/TargetProcess/RegisterEHFrames.cpp
@@ -0,0 +1,183 @@
+//===--------- RegisterEHFrames.cpp - Register EH frame sections ----------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/TargetProcess/RegisterEHFrames.h"
+
+#include "llvm/Config/config.h"
+#include "llvm/ExecutionEngine/JITSymbol.h"
+#include "llvm/Support/BinaryStreamReader.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/DynamicLibrary.h"
+#include "llvm/Support/raw_ostream.h"
+
+#include "llvm/Support/FormatVariadic.h"
+
+#define DEBUG_TYPE "orc"
+
+using namespace llvm;
+using namespace llvm::orc;
+using namespace llvm::orc::shared;
+
+namespace llvm {
+namespace orc {
+
+#if defined(HAVE_REGISTER_FRAME) && defined(HAVE_DEREGISTER_FRAME) && \
+ !defined(__SEH__) && !defined(__USING_SJLJ_EXCEPTIONS__)
+
+extern "C" void __register_frame(const void *);
+extern "C" void __deregister_frame(const void *);
+
+Error registerFrameWrapper(const void *P) {
+ __register_frame(P);
+ return Error::success();
+}
+
+Error deregisterFrameWrapper(const void *P) {
+ __deregister_frame(P);
+ return Error::success();
+}
+
+#else
+
+// The building compiler does not have __(de)register_frame but
+// it may be found at runtime in a dynamically-loaded library.
+// For example, this happens when building LLVM with Visual C++
+// but using the MingW runtime.
+static Error registerFrameWrapper(const void *P) {
+ static void((*RegisterFrame)(const void *)) = 0;
+
+ if (!RegisterFrame)
+ *(void **)&RegisterFrame =
+ llvm::sys::DynamicLibrary::SearchForAddressOfSymbol("__register_frame");
+
+ if (RegisterFrame) {
+ RegisterFrame(P);
+ return Error::success();
+ }
+
+ return make_error<StringError>("could not register eh-frame: "
+ "__register_frame function not found",
+ inconvertibleErrorCode());
+}
+
+static Error deregisterFrameWrapper(const void *P) {
+ static void((*DeregisterFrame)(const void *)) = 0;
+
+ if (!DeregisterFrame)
+ *(void **)&DeregisterFrame =
+ llvm::sys::DynamicLibrary::SearchForAddressOfSymbol(
+ "__deregister_frame");
+
+ if (DeregisterFrame) {
+ DeregisterFrame(P);
+ return Error::success();
+ }
+
+ return make_error<StringError>("could not deregister eh-frame: "
+ "__deregister_frame function not found",
+ inconvertibleErrorCode());
+}
+#endif
+
+#if defined(HAVE_UNW_ADD_DYNAMIC_FDE) || defined(__APPLE__)
+
+template <typename HandleFDEFn>
+Error walkLibunwindEHFrameSection(const char *const SectionStart,
+ size_t SectionSize, HandleFDEFn HandleFDE) {
+ const char *CurCFIRecord = SectionStart;
+ const char *End = SectionStart + SectionSize;
+ uint64_t Size = *reinterpret_cast<const uint32_t *>(CurCFIRecord);
+
+ while (CurCFIRecord != End && Size != 0) {
+ const char *OffsetField = CurCFIRecord + (Size == 0xffffffff ? 12 : 4);
+ if (Size == 0xffffffff)
+ Size = *reinterpret_cast<const uint64_t *>(CurCFIRecord + 4) + 12;
+ else
+ Size += 4;
+ uint32_t Offset = *reinterpret_cast<const uint32_t *>(OffsetField);
+
+ LLVM_DEBUG({
+ dbgs() << "Registering eh-frame section:\n";
+ dbgs() << "Processing " << (Offset ? "FDE" : "CIE") << " @"
+ << (void *)CurCFIRecord << ": [";
+ for (unsigned I = 0; I < Size; ++I)
+ dbgs() << format(" 0x%02" PRIx8, *(CurCFIRecord + I));
+ dbgs() << " ]\n";
+ });
+
+ if (Offset != 0)
+ if (auto Err = HandleFDE(CurCFIRecord))
+ return Err;
+
+ CurCFIRecord += Size;
+
+ Size = *reinterpret_cast<const uint32_t *>(CurCFIRecord);
+ }
+
+ return Error::success();
+}
+
+#endif // HAVE_UNW_ADD_DYNAMIC_FDE || __APPLE__
+
+Error registerEHFrameSection(const void *EHFrameSectionAddr,
+ size_t EHFrameSectionSize) {
+ /* libgcc and libunwind __register_frame behave differently. We use the
+ * presence of __unw_add_dynamic_fde to detect libunwind. */
+#if defined(HAVE_UNW_ADD_DYNAMIC_FDE) || defined(__APPLE__)
+ // With libunwind, __register_frame has to be called for each FDE entry.
+ return walkLibunwindEHFrameSection(
+ static_cast<const char *>(EHFrameSectionAddr), EHFrameSectionSize,
+ registerFrameWrapper);
+#else
+ // With libgcc, __register_frame takes a single argument:
+ // a pointer to the start of the .eh_frame section.
+
+ // How can it find the end? Because crtendS.o is linked
+ // in and it has an .eh_frame section with four zero chars.
+ return registerFrameWrapper(EHFrameSectionAddr);
+#endif
+}
+
+Error deregisterEHFrameSection(const void *EHFrameSectionAddr,
+ size_t EHFrameSectionSize) {
+#if defined(HAVE_UNW_ADD_DYNAMIC_FDE) || defined(__APPLE__)
+ return walkLibunwindEHFrameSection(
+ static_cast<const char *>(EHFrameSectionAddr), EHFrameSectionSize,
+ deregisterFrameWrapper);
+#else
+ return deregisterFrameWrapper(EHFrameSectionAddr);
+#endif
+}
+
+} // end namespace orc
+} // end namespace llvm
+
+static Error registerEHFrameWrapper(ExecutorAddrRange EHFrame) {
+ return llvm::orc::registerEHFrameSection(EHFrame.Start.toPtr<const void *>(),
+ EHFrame.size());
+}
+
+static Error deregisterEHFrameWrapper(ExecutorAddrRange EHFrame) {
+ return llvm::orc::deregisterEHFrameSection(
+ EHFrame.Start.toPtr<const void *>(), EHFrame.size());
+}
+
+extern "C" orc::shared::CWrapperFunctionResult
+llvm_orc_registerEHFrameSectionWrapper(const char *Data, uint64_t Size) {
+ return WrapperFunction<SPSError(SPSExecutorAddrRange)>::handle(
+ Data, Size, registerEHFrameWrapper)
+ .release();
+}
+
+extern "C" orc::shared::CWrapperFunctionResult
+llvm_orc_deregisterEHFrameSectionWrapper(const char *Data, uint64_t Size) {
+ return WrapperFunction<SPSError(SPSExecutorAddrRange)>::handle(
+ Data, Size, deregisterEHFrameWrapper)
+ .release();
+}
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/TargetProcess/SimpleExecutorDylibManager.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/TargetProcess/SimpleExecutorDylibManager.cpp
new file mode 100644
index 000000000000..b7e256a826ca
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/TargetProcess/SimpleExecutorDylibManager.cpp
@@ -0,0 +1,124 @@
+//===--- SimpleExecutorDylibManager.cpp - Executor-side dylib management --===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/TargetProcess/SimpleExecutorDylibManager.h"
+
+#include "llvm/ExecutionEngine/Orc/Shared/OrcRTBridge.h"
+#include "llvm/Support/FormatVariadic.h"
+
+#define DEBUG_TYPE "orc"
+
+namespace llvm {
+namespace orc {
+namespace rt_bootstrap {
+
+SimpleExecutorDylibManager::~SimpleExecutorDylibManager() {
+ assert(Dylibs.empty() && "shutdown not called?");
+}
+
+Expected<tpctypes::DylibHandle>
+SimpleExecutorDylibManager::open(const std::string &Path, uint64_t Mode) {
+ if (Mode != 0)
+ return make_error<StringError>("open: non-zero mode bits not yet supported",
+ inconvertibleErrorCode());
+
+ const char *PathCStr = Path.empty() ? nullptr : Path.c_str();
+ std::string ErrMsg;
+
+ auto DL = sys::DynamicLibrary::getPermanentLibrary(PathCStr, &ErrMsg);
+ if (!DL.isValid())
+ return make_error<StringError>(std::move(ErrMsg), inconvertibleErrorCode());
+
+ std::lock_guard<std::mutex> Lock(M);
+ auto H = ExecutorAddr::fromPtr(DL.getOSSpecificHandle());
+ Dylibs.insert(DL.getOSSpecificHandle());
+ return H;
+}
+
+Expected<std::vector<ExecutorSymbolDef>>
+SimpleExecutorDylibManager::lookup(tpctypes::DylibHandle H,
+ const RemoteSymbolLookupSet &L) {
+ std::vector<ExecutorSymbolDef> Result;
+ auto DL = sys::DynamicLibrary(H.toPtr<void *>());
+
+ for (const auto &E : L) {
+ if (E.Name.empty()) {
+ if (E.Required)
+ return make_error<StringError>("Required address for empty symbol \"\"",
+ inconvertibleErrorCode());
+ else
+ Result.push_back(ExecutorSymbolDef());
+ } else {
+
+ const char *DemangledSymName = E.Name.c_str();
+#ifdef __APPLE__
+ if (E.Name.front() != '_')
+ return make_error<StringError>(Twine("MachO symbol \"") + E.Name +
+ "\" missing leading '_'",
+ inconvertibleErrorCode());
+ ++DemangledSymName;
+#endif
+
+ void *Addr = DL.getAddressOfSymbol(DemangledSymName);
+ if (!Addr && E.Required)
+ return make_error<StringError>(Twine("Missing definition for ") +
+ DemangledSymName,
+ inconvertibleErrorCode());
+
+ // FIXME: determine accurate JITSymbolFlags.
+ Result.push_back({ExecutorAddr::fromPtr(Addr), JITSymbolFlags::Exported});
+ }
+ }
+
+ return Result;
+}
+
+Error SimpleExecutorDylibManager::shutdown() {
+
+ DylibSet DS;
+ {
+ std::lock_guard<std::mutex> Lock(M);
+ std::swap(DS, Dylibs);
+ }
+
+ // There is no removal of dylibs at the moment, so nothing to do here.
+ return Error::success();
+}
+
+void SimpleExecutorDylibManager::addBootstrapSymbols(
+ StringMap<ExecutorAddr> &M) {
+ M[rt::SimpleExecutorDylibManagerInstanceName] = ExecutorAddr::fromPtr(this);
+ M[rt::SimpleExecutorDylibManagerOpenWrapperName] =
+ ExecutorAddr::fromPtr(&openWrapper);
+ M[rt::SimpleExecutorDylibManagerLookupWrapperName] =
+ ExecutorAddr::fromPtr(&lookupWrapper);
+}
+
+llvm::orc::shared::CWrapperFunctionResult
+SimpleExecutorDylibManager::openWrapper(const char *ArgData, size_t ArgSize) {
+ return shared::
+ WrapperFunction<rt::SPSSimpleExecutorDylibManagerOpenSignature>::handle(
+ ArgData, ArgSize,
+ shared::makeMethodWrapperHandler(
+ &SimpleExecutorDylibManager::open))
+ .release();
+}
+
+llvm::orc::shared::CWrapperFunctionResult
+SimpleExecutorDylibManager::lookupWrapper(const char *ArgData, size_t ArgSize) {
+ return shared::
+ WrapperFunction<rt::SPSSimpleExecutorDylibManagerLookupSignature>::handle(
+ ArgData, ArgSize,
+ shared::makeMethodWrapperHandler(
+ &SimpleExecutorDylibManager::lookup))
+ .release();
+}
+
+} // namespace rt_bootstrap
+} // end namespace orc
+} // end namespace llvm
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/TargetProcess/SimpleExecutorMemoryManager.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/TargetProcess/SimpleExecutorMemoryManager.cpp
new file mode 100644
index 000000000000..3cdffb8cd061
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/TargetProcess/SimpleExecutorMemoryManager.cpp
@@ -0,0 +1,262 @@
+//===- SimpleExecuorMemoryManagare.cpp - Simple executor-side memory mgmt -===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/TargetProcess/SimpleExecutorMemoryManager.h"
+
+#include "llvm/ExecutionEngine/Orc/Shared/OrcRTBridge.h"
+#include "llvm/Support/FormatVariadic.h"
+
+#define DEBUG_TYPE "orc"
+
+namespace llvm {
+namespace orc {
+namespace rt_bootstrap {
+
+SimpleExecutorMemoryManager::~SimpleExecutorMemoryManager() {
+ assert(Allocations.empty() && "shutdown not called?");
+}
+
+Expected<ExecutorAddr> SimpleExecutorMemoryManager::allocate(uint64_t Size) {
+ std::error_code EC;
+ auto MB = sys::Memory::allocateMappedMemory(
+ Size, nullptr, sys::Memory::MF_READ | sys::Memory::MF_WRITE, EC);
+ if (EC)
+ return errorCodeToError(EC);
+ std::lock_guard<std::mutex> Lock(M);
+ assert(!Allocations.count(MB.base()) && "Duplicate allocation addr");
+ Allocations[MB.base()].Size = Size;
+ return ExecutorAddr::fromPtr(MB.base());
+}
+
+Error SimpleExecutorMemoryManager::finalize(tpctypes::FinalizeRequest &FR) {
+ ExecutorAddr Base(~0ULL);
+ std::vector<shared::WrapperFunctionCall> DeallocationActions;
+ size_t SuccessfulFinalizationActions = 0;
+
+ if (FR.Segments.empty()) {
+ // NOTE: Finalizing nothing is currently a no-op. Should it be an error?
+ if (FR.Actions.empty())
+ return Error::success();
+ else
+ return make_error<StringError>("Finalization actions attached to empty "
+ "finalization request",
+ inconvertibleErrorCode());
+ }
+
+ for (auto &Seg : FR.Segments)
+ Base = std::min(Base, Seg.Addr);
+
+ for (auto &ActPair : FR.Actions)
+ if (ActPair.Dealloc)
+ DeallocationActions.push_back(ActPair.Dealloc);
+
+ // Get the Allocation for this finalization.
+ size_t AllocSize = 0;
+ {
+ std::lock_guard<std::mutex> Lock(M);
+ auto I = Allocations.find(Base.toPtr<void *>());
+ if (I == Allocations.end())
+ return make_error<StringError>("Attempt to finalize unrecognized "
+ "allocation " +
+ formatv("{0:x}", Base.getValue()),
+ inconvertibleErrorCode());
+ AllocSize = I->second.Size;
+ I->second.DeallocationActions = std::move(DeallocationActions);
+ }
+ ExecutorAddr AllocEnd = Base + ExecutorAddrDiff(AllocSize);
+
+ // Bail-out function: this will run deallocation actions corresponding to any
+ // completed finalization actions, then deallocate memory.
+ auto BailOut = [&](Error Err) {
+ std::pair<void *, Allocation> AllocToDestroy;
+
+ // Get allocation to destroy.
+ {
+ std::lock_guard<std::mutex> Lock(M);
+ auto I = Allocations.find(Base.toPtr<void *>());
+
+ // Check for missing allocation (effective a double free).
+ if (I == Allocations.end())
+ return joinErrors(
+ std::move(Err),
+ make_error<StringError>("No allocation entry found "
+ "for " +
+ formatv("{0:x}", Base.getValue()),
+ inconvertibleErrorCode()));
+ AllocToDestroy = std::move(*I);
+ Allocations.erase(I);
+ }
+
+ // Run deallocation actions for all completed finalization actions.
+ while (SuccessfulFinalizationActions)
+ Err =
+ joinErrors(std::move(Err), FR.Actions[--SuccessfulFinalizationActions]
+ .Dealloc.runWithSPSRetErrorMerged());
+
+ // Deallocate memory.
+ sys::MemoryBlock MB(AllocToDestroy.first, AllocToDestroy.second.Size);
+ if (auto EC = sys::Memory::releaseMappedMemory(MB))
+ Err = joinErrors(std::move(Err), errorCodeToError(EC));
+
+ return Err;
+ };
+
+ // Copy content and apply permissions.
+ for (auto &Seg : FR.Segments) {
+
+ // Check segment ranges.
+ if (LLVM_UNLIKELY(Seg.Size < Seg.Content.size()))
+ return BailOut(make_error<StringError>(
+ formatv("Segment {0:x} content size ({1:x} bytes) "
+ "exceeds segment size ({2:x} bytes)",
+ Seg.Addr.getValue(), Seg.Content.size(), Seg.Size),
+ inconvertibleErrorCode()));
+ ExecutorAddr SegEnd = Seg.Addr + ExecutorAddrDiff(Seg.Size);
+ if (LLVM_UNLIKELY(Seg.Addr < Base || SegEnd > AllocEnd))
+ return BailOut(make_error<StringError>(
+ formatv("Segment {0:x} -- {1:x} crosses boundary of "
+ "allocation {2:x} -- {3:x}",
+ Seg.Addr.getValue(), SegEnd.getValue(), Base.getValue(),
+ AllocEnd.getValue()),
+ inconvertibleErrorCode()));
+
+ char *Mem = Seg.Addr.toPtr<char *>();
+ if (!Seg.Content.empty())
+ memcpy(Mem, Seg.Content.data(), Seg.Content.size());
+ memset(Mem + Seg.Content.size(), 0, Seg.Size - Seg.Content.size());
+ assert(Seg.Size <= std::numeric_limits<size_t>::max());
+ if (auto EC = sys::Memory::protectMappedMemory(
+ {Mem, static_cast<size_t>(Seg.Size)},
+ toSysMemoryProtectionFlags(Seg.RAG.Prot)))
+ return BailOut(errorCodeToError(EC));
+ if ((Seg.RAG.Prot & MemProt::Exec) == MemProt::Exec)
+ sys::Memory::InvalidateInstructionCache(Mem, Seg.Size);
+ }
+
+ // Run finalization actions.
+ for (auto &ActPair : FR.Actions) {
+ if (auto Err = ActPair.Finalize.runWithSPSRetErrorMerged())
+ return BailOut(std::move(Err));
+ ++SuccessfulFinalizationActions;
+ }
+
+ return Error::success();
+}
+
+Error SimpleExecutorMemoryManager::deallocate(
+ const std::vector<ExecutorAddr> &Bases) {
+ std::vector<std::pair<void *, Allocation>> AllocPairs;
+ AllocPairs.reserve(Bases.size());
+
+ // Get allocation to destroy.
+ Error Err = Error::success();
+ {
+ std::lock_guard<std::mutex> Lock(M);
+ for (auto &Base : Bases) {
+ auto I = Allocations.find(Base.toPtr<void *>());
+
+ // Check for missing allocation (effective a double free).
+ if (I != Allocations.end()) {
+ AllocPairs.push_back(std::move(*I));
+ Allocations.erase(I);
+ } else
+ Err = joinErrors(
+ std::move(Err),
+ make_error<StringError>("No allocation entry found "
+ "for " +
+ formatv("{0:x}", Base.getValue()),
+ inconvertibleErrorCode()));
+ }
+ }
+
+ while (!AllocPairs.empty()) {
+ auto &P = AllocPairs.back();
+ Err = joinErrors(std::move(Err), deallocateImpl(P.first, P.second));
+ AllocPairs.pop_back();
+ }
+
+ return Err;
+}
+
+Error SimpleExecutorMemoryManager::shutdown() {
+
+ AllocationsMap AM;
+ {
+ std::lock_guard<std::mutex> Lock(M);
+ AM = std::move(Allocations);
+ }
+
+ Error Err = Error::success();
+ for (auto &KV : AM)
+ Err = joinErrors(std::move(Err), deallocateImpl(KV.first, KV.second));
+ return Err;
+}
+
+void SimpleExecutorMemoryManager::addBootstrapSymbols(
+ StringMap<ExecutorAddr> &M) {
+ M[rt::SimpleExecutorMemoryManagerInstanceName] = ExecutorAddr::fromPtr(this);
+ M[rt::SimpleExecutorMemoryManagerReserveWrapperName] =
+ ExecutorAddr::fromPtr(&reserveWrapper);
+ M[rt::SimpleExecutorMemoryManagerFinalizeWrapperName] =
+ ExecutorAddr::fromPtr(&finalizeWrapper);
+ M[rt::SimpleExecutorMemoryManagerDeallocateWrapperName] =
+ ExecutorAddr::fromPtr(&deallocateWrapper);
+}
+
+Error SimpleExecutorMemoryManager::deallocateImpl(void *Base, Allocation &A) {
+ Error Err = Error::success();
+
+ while (!A.DeallocationActions.empty()) {
+ Err = joinErrors(std::move(Err),
+ A.DeallocationActions.back().runWithSPSRetErrorMerged());
+ A.DeallocationActions.pop_back();
+ }
+
+ sys::MemoryBlock MB(Base, A.Size);
+ if (auto EC = sys::Memory::releaseMappedMemory(MB))
+ Err = joinErrors(std::move(Err), errorCodeToError(EC));
+
+ return Err;
+}
+
+llvm::orc::shared::CWrapperFunctionResult
+SimpleExecutorMemoryManager::reserveWrapper(const char *ArgData,
+ size_t ArgSize) {
+ return shared::WrapperFunction<
+ rt::SPSSimpleExecutorMemoryManagerReserveSignature>::
+ handle(ArgData, ArgSize,
+ shared::makeMethodWrapperHandler(
+ &SimpleExecutorMemoryManager::allocate))
+ .release();
+}
+
+llvm::orc::shared::CWrapperFunctionResult
+SimpleExecutorMemoryManager::finalizeWrapper(const char *ArgData,
+ size_t ArgSize) {
+ return shared::WrapperFunction<
+ rt::SPSSimpleExecutorMemoryManagerFinalizeSignature>::
+ handle(ArgData, ArgSize,
+ shared::makeMethodWrapperHandler(
+ &SimpleExecutorMemoryManager::finalize))
+ .release();
+}
+
+llvm::orc::shared::CWrapperFunctionResult
+SimpleExecutorMemoryManager::deallocateWrapper(const char *ArgData,
+ size_t ArgSize) {
+ return shared::WrapperFunction<
+ rt::SPSSimpleExecutorMemoryManagerDeallocateSignature>::
+ handle(ArgData, ArgSize,
+ shared::makeMethodWrapperHandler(
+ &SimpleExecutorMemoryManager::deallocate))
+ .release();
+}
+
+} // namespace rt_bootstrap
+} // end namespace orc
+} // end namespace llvm
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/TargetProcess/SimpleRemoteEPCServer.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/TargetProcess/SimpleRemoteEPCServer.cpp
new file mode 100644
index 000000000000..a585767bf474
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/TargetProcess/SimpleRemoteEPCServer.cpp
@@ -0,0 +1,299 @@
+//===------- SimpleEPCServer.cpp - EPC over simple abstract channel -------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/TargetProcess/SimpleRemoteEPCServer.h"
+
+#include "llvm/ExecutionEngine/Orc/Shared/OrcRTBridge.h"
+#include "llvm/ExecutionEngine/Orc/Shared/TargetProcessControlTypes.h"
+#include "llvm/ExecutionEngine/Orc/TargetProcess/RegisterEHFrames.h"
+#include "llvm/Support/FormatVariadic.h"
+#include "llvm/Support/Process.h"
+#include "llvm/TargetParser/Host.h"
+
+#include "OrcRTBootstrap.h"
+
+#define DEBUG_TYPE "orc"
+
+using namespace llvm::orc::shared;
+
+namespace llvm {
+namespace orc {
+
+ExecutorBootstrapService::~ExecutorBootstrapService() = default;
+
+SimpleRemoteEPCServer::Dispatcher::~Dispatcher() = default;
+
+#if LLVM_ENABLE_THREADS
+void SimpleRemoteEPCServer::ThreadDispatcher::dispatch(
+ unique_function<void()> Work) {
+ {
+ std::lock_guard<std::mutex> Lock(DispatchMutex);
+ if (!Running)
+ return;
+ ++Outstanding;
+ }
+
+ std::thread([this, Work = std::move(Work)]() mutable {
+ Work();
+ std::lock_guard<std::mutex> Lock(DispatchMutex);
+ --Outstanding;
+ OutstandingCV.notify_all();
+ }).detach();
+}
+
+void SimpleRemoteEPCServer::ThreadDispatcher::shutdown() {
+ std::unique_lock<std::mutex> Lock(DispatchMutex);
+ Running = false;
+ OutstandingCV.wait(Lock, [this]() { return Outstanding == 0; });
+}
+#endif
+
+StringMap<ExecutorAddr> SimpleRemoteEPCServer::defaultBootstrapSymbols() {
+ StringMap<ExecutorAddr> DBS;
+ rt_bootstrap::addTo(DBS);
+ return DBS;
+}
+
+Expected<SimpleRemoteEPCTransportClient::HandleMessageAction>
+SimpleRemoteEPCServer::handleMessage(SimpleRemoteEPCOpcode OpC, uint64_t SeqNo,
+ ExecutorAddr TagAddr,
+ SimpleRemoteEPCArgBytesVector ArgBytes) {
+
+ LLVM_DEBUG({
+ dbgs() << "SimpleRemoteEPCServer::handleMessage: opc = ";
+ switch (OpC) {
+ case SimpleRemoteEPCOpcode::Setup:
+ dbgs() << "Setup";
+ assert(SeqNo == 0 && "Non-zero SeqNo for Setup?");
+ assert(!TagAddr && "Non-zero TagAddr for Setup?");
+ break;
+ case SimpleRemoteEPCOpcode::Hangup:
+ dbgs() << "Hangup";
+ assert(SeqNo == 0 && "Non-zero SeqNo for Hangup?");
+ assert(!TagAddr && "Non-zero TagAddr for Hangup?");
+ break;
+ case SimpleRemoteEPCOpcode::Result:
+ dbgs() << "Result";
+ assert(!TagAddr && "Non-zero TagAddr for Result?");
+ break;
+ case SimpleRemoteEPCOpcode::CallWrapper:
+ dbgs() << "CallWrapper";
+ break;
+ }
+ dbgs() << ", seqno = " << SeqNo << ", tag-addr = " << TagAddr
+ << ", arg-buffer = " << formatv("{0:x}", ArgBytes.size())
+ << " bytes\n";
+ });
+
+ using UT = std::underlying_type_t<SimpleRemoteEPCOpcode>;
+ if (static_cast<UT>(OpC) > static_cast<UT>(SimpleRemoteEPCOpcode::LastOpC))
+ return make_error<StringError>("Unexpected opcode",
+ inconvertibleErrorCode());
+
+ // TODO: Clean detach message?
+ switch (OpC) {
+ case SimpleRemoteEPCOpcode::Setup:
+ return make_error<StringError>("Unexpected Setup opcode",
+ inconvertibleErrorCode());
+ case SimpleRemoteEPCOpcode::Hangup:
+ return SimpleRemoteEPCTransportClient::EndSession;
+ case SimpleRemoteEPCOpcode::Result:
+ if (auto Err = handleResult(SeqNo, TagAddr, std::move(ArgBytes)))
+ return std::move(Err);
+ break;
+ case SimpleRemoteEPCOpcode::CallWrapper:
+ handleCallWrapper(SeqNo, TagAddr, std::move(ArgBytes));
+ break;
+ }
+ return ContinueSession;
+}
+
+Error SimpleRemoteEPCServer::waitForDisconnect() {
+ std::unique_lock<std::mutex> Lock(ServerStateMutex);
+ ShutdownCV.wait(Lock, [this]() { return RunState == ServerShutDown; });
+ return std::move(ShutdownErr);
+}
+
+void SimpleRemoteEPCServer::handleDisconnect(Error Err) {
+ PendingJITDispatchResultsMap TmpPending;
+
+ {
+ std::lock_guard<std::mutex> Lock(ServerStateMutex);
+ std::swap(TmpPending, PendingJITDispatchResults);
+ RunState = ServerShuttingDown;
+ }
+
+ // Send out-of-band errors to any waiting threads.
+ for (auto &KV : TmpPending)
+ KV.second->set_value(
+ shared::WrapperFunctionResult::createOutOfBandError("disconnecting"));
+
+ // Wait for dispatcher to clear.
+ D->shutdown();
+
+ // Shut down services.
+ while (!Services.empty()) {
+ ShutdownErr =
+ joinErrors(std::move(ShutdownErr), Services.back()->shutdown());
+ Services.pop_back();
+ }
+
+ std::lock_guard<std::mutex> Lock(ServerStateMutex);
+ ShutdownErr = joinErrors(std::move(ShutdownErr), std::move(Err));
+ RunState = ServerShutDown;
+ ShutdownCV.notify_all();
+}
+
+Error SimpleRemoteEPCServer::sendMessage(SimpleRemoteEPCOpcode OpC,
+ uint64_t SeqNo, ExecutorAddr TagAddr,
+ ArrayRef<char> ArgBytes) {
+
+ LLVM_DEBUG({
+ dbgs() << "SimpleRemoteEPCServer::sendMessage: opc = ";
+ switch (OpC) {
+ case SimpleRemoteEPCOpcode::Setup:
+ dbgs() << "Setup";
+ assert(SeqNo == 0 && "Non-zero SeqNo for Setup?");
+ assert(!TagAddr && "Non-zero TagAddr for Setup?");
+ break;
+ case SimpleRemoteEPCOpcode::Hangup:
+ dbgs() << "Hangup";
+ assert(SeqNo == 0 && "Non-zero SeqNo for Hangup?");
+ assert(!TagAddr && "Non-zero TagAddr for Hangup?");
+ break;
+ case SimpleRemoteEPCOpcode::Result:
+ dbgs() << "Result";
+ assert(!TagAddr && "Non-zero TagAddr for Result?");
+ break;
+ case SimpleRemoteEPCOpcode::CallWrapper:
+ dbgs() << "CallWrapper";
+ break;
+ }
+ dbgs() << ", seqno = " << SeqNo << ", tag-addr = " << TagAddr
+ << ", arg-buffer = " << formatv("{0:x}", ArgBytes.size())
+ << " bytes\n";
+ });
+ auto Err = T->sendMessage(OpC, SeqNo, TagAddr, ArgBytes);
+ LLVM_DEBUG({
+ if (Err)
+ dbgs() << " \\--> SimpleRemoteEPC::sendMessage failed\n";
+ });
+ return Err;
+}
+
+Error SimpleRemoteEPCServer::sendSetupMessage(
+ StringMap<std::vector<char>> BootstrapMap,
+ StringMap<ExecutorAddr> BootstrapSymbols) {
+
+ using namespace SimpleRemoteEPCDefaultBootstrapSymbolNames;
+
+ std::vector<char> SetupPacket;
+ SimpleRemoteEPCExecutorInfo EI;
+ EI.TargetTriple = sys::getProcessTriple();
+ if (auto PageSize = sys::Process::getPageSize())
+ EI.PageSize = *PageSize;
+ else
+ return PageSize.takeError();
+ EI.BootstrapMap = std::move(BootstrapMap);
+ EI.BootstrapSymbols = std::move(BootstrapSymbols);
+
+ assert(!EI.BootstrapSymbols.count(ExecutorSessionObjectName) &&
+ "Dispatch context name should not be set");
+ assert(!EI.BootstrapSymbols.count(DispatchFnName) &&
+ "Dispatch function name should not be set");
+ EI.BootstrapSymbols[ExecutorSessionObjectName] = ExecutorAddr::fromPtr(this);
+ EI.BootstrapSymbols[DispatchFnName] = ExecutorAddr::fromPtr(jitDispatchEntry);
+ EI.BootstrapSymbols[rt::RegisterEHFrameSectionWrapperName] =
+ ExecutorAddr::fromPtr(&llvm_orc_registerEHFrameSectionWrapper);
+ EI.BootstrapSymbols[rt::DeregisterEHFrameSectionWrapperName] =
+ ExecutorAddr::fromPtr(&llvm_orc_deregisterEHFrameSectionWrapper);
+
+ using SPSSerialize =
+ shared::SPSArgList<shared::SPSSimpleRemoteEPCExecutorInfo>;
+ auto SetupPacketBytes =
+ shared::WrapperFunctionResult::allocate(SPSSerialize::size(EI));
+ shared::SPSOutputBuffer OB(SetupPacketBytes.data(), SetupPacketBytes.size());
+ if (!SPSSerialize::serialize(OB, EI))
+ return make_error<StringError>("Could not send setup packet",
+ inconvertibleErrorCode());
+
+ return sendMessage(SimpleRemoteEPCOpcode::Setup, 0, ExecutorAddr(),
+ {SetupPacketBytes.data(), SetupPacketBytes.size()});
+}
+
+Error SimpleRemoteEPCServer::handleResult(
+ uint64_t SeqNo, ExecutorAddr TagAddr,
+ SimpleRemoteEPCArgBytesVector ArgBytes) {
+ std::promise<shared::WrapperFunctionResult> *P = nullptr;
+ {
+ std::lock_guard<std::mutex> Lock(ServerStateMutex);
+ auto I = PendingJITDispatchResults.find(SeqNo);
+ if (I == PendingJITDispatchResults.end())
+ return make_error<StringError>("No call for sequence number " +
+ Twine(SeqNo),
+ inconvertibleErrorCode());
+ P = I->second;
+ PendingJITDispatchResults.erase(I);
+ releaseSeqNo(SeqNo);
+ }
+ auto R = shared::WrapperFunctionResult::allocate(ArgBytes.size());
+ memcpy(R.data(), ArgBytes.data(), ArgBytes.size());
+ P->set_value(std::move(R));
+ return Error::success();
+}
+
+void SimpleRemoteEPCServer::handleCallWrapper(
+ uint64_t RemoteSeqNo, ExecutorAddr TagAddr,
+ SimpleRemoteEPCArgBytesVector ArgBytes) {
+ D->dispatch([this, RemoteSeqNo, TagAddr, ArgBytes = std::move(ArgBytes)]() {
+ using WrapperFnTy =
+ shared::CWrapperFunctionResult (*)(const char *, size_t);
+ auto *Fn = TagAddr.toPtr<WrapperFnTy>();
+ shared::WrapperFunctionResult ResultBytes(
+ Fn(ArgBytes.data(), ArgBytes.size()));
+ if (auto Err = sendMessage(SimpleRemoteEPCOpcode::Result, RemoteSeqNo,
+ ExecutorAddr(),
+ {ResultBytes.data(), ResultBytes.size()}))
+ ReportError(std::move(Err));
+ });
+}
+
+shared::WrapperFunctionResult
+SimpleRemoteEPCServer::doJITDispatch(const void *FnTag, const char *ArgData,
+ size_t ArgSize) {
+ uint64_t SeqNo;
+ std::promise<shared::WrapperFunctionResult> ResultP;
+ auto ResultF = ResultP.get_future();
+ {
+ std::lock_guard<std::mutex> Lock(ServerStateMutex);
+ if (RunState != ServerRunning)
+ return shared::WrapperFunctionResult::createOutOfBandError(
+ "jit_dispatch not available (EPC server shut down)");
+
+ SeqNo = getNextSeqNo();
+ assert(!PendingJITDispatchResults.count(SeqNo) && "SeqNo already in use");
+ PendingJITDispatchResults[SeqNo] = &ResultP;
+ }
+
+ if (auto Err = sendMessage(SimpleRemoteEPCOpcode::CallWrapper, SeqNo,
+ ExecutorAddr::fromPtr(FnTag), {ArgData, ArgSize}))
+ ReportError(std::move(Err));
+
+ return ResultF.get();
+}
+
+shared::CWrapperFunctionResult
+SimpleRemoteEPCServer::jitDispatchEntry(void *DispatchCtx, const void *FnTag,
+ const char *ArgData, size_t ArgSize) {
+ return reinterpret_cast<SimpleRemoteEPCServer *>(DispatchCtx)
+ ->doJITDispatch(FnTag, ArgData, ArgSize)
+ .release();
+}
+
+} // end namespace orc
+} // end namespace llvm
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/TargetProcess/TargetExecutionUtils.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/TargetProcess/TargetExecutionUtils.cpp
new file mode 100644
index 000000000000..7546b3f8d0fa
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/TargetProcess/TargetExecutionUtils.cpp
@@ -0,0 +1,47 @@
+//===--- TargetExecutionUtils.cpp - Execution utils for target processes --===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/TargetProcess/TargetExecutionUtils.h"
+
+#include <vector>
+
+namespace llvm {
+namespace orc {
+
+int runAsMain(int (*Main)(int, char *[]), ArrayRef<std::string> Args,
+ std::optional<StringRef> ProgramName) {
+ std::vector<std::unique_ptr<char[]>> ArgVStorage;
+ std::vector<char *> ArgV;
+
+ ArgVStorage.reserve(Args.size() + (ProgramName ? 1 : 0));
+ ArgV.reserve(Args.size() + 1 + (ProgramName ? 1 : 0));
+
+ if (ProgramName) {
+ ArgVStorage.push_back(std::make_unique<char[]>(ProgramName->size() + 1));
+ llvm::copy(*ProgramName, &ArgVStorage.back()[0]);
+ ArgVStorage.back()[ProgramName->size()] = '\0';
+ ArgV.push_back(ArgVStorage.back().get());
+ }
+
+ for (const auto &Arg : Args) {
+ ArgVStorage.push_back(std::make_unique<char[]>(Arg.size() + 1));
+ llvm::copy(Arg, &ArgVStorage.back()[0]);
+ ArgVStorage.back()[Arg.size()] = '\0';
+ ArgV.push_back(ArgVStorage.back().get());
+ }
+ ArgV.push_back(nullptr);
+
+ return Main(Args.size() + !!ProgramName, ArgV.data());
+}
+
+int runAsVoidFunction(int (*Func)(void)) { return Func(); }
+
+int runAsIntFunction(int (*Func)(int), int Arg) { return Func(Arg); }
+
+} // End namespace orc.
+} // End namespace llvm.
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/TaskDispatch.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/TaskDispatch.cpp
new file mode 100644
index 000000000000..4ac2a4209185
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/TaskDispatch.cpp
@@ -0,0 +1,85 @@
+//===------------ TaskDispatch.cpp - ORC task dispatch utils --------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/TaskDispatch.h"
+#include "llvm/ExecutionEngine/Orc/Core.h"
+
+namespace llvm {
+namespace orc {
+
+char Task::ID = 0;
+char GenericNamedTask::ID = 0;
+const char *GenericNamedTask::DefaultDescription = "Generic Task";
+
+void Task::anchor() {}
+TaskDispatcher::~TaskDispatcher() = default;
+
+void InPlaceTaskDispatcher::dispatch(std::unique_ptr<Task> T) { T->run(); }
+
+void InPlaceTaskDispatcher::shutdown() {}
+
+#if LLVM_ENABLE_THREADS
+void DynamicThreadPoolTaskDispatcher::dispatch(std::unique_ptr<Task> T) {
+ bool IsMaterializationTask = isa<MaterializationTask>(*T);
+
+ {
+ std::lock_guard<std::mutex> Lock(DispatchMutex);
+
+ if (IsMaterializationTask) {
+
+ // If this is a materialization task and there are too many running
+ // already then queue this one up and return early.
+ if (MaxMaterializationThreads &&
+ NumMaterializationThreads == *MaxMaterializationThreads) {
+ MaterializationTaskQueue.push_back(std::move(T));
+ return;
+ }
+
+ // Otherwise record that we have a materialization task running.
+ ++NumMaterializationThreads;
+ }
+
+ ++Outstanding;
+ }
+
+ std::thread([this, T = std::move(T), IsMaterializationTask]() mutable {
+ while (true) {
+
+ // Run the task.
+ T->run();
+
+ std::lock_guard<std::mutex> Lock(DispatchMutex);
+ if (!MaterializationTaskQueue.empty()) {
+ // If there are any materialization tasks running then steal that work.
+ T = std::move(MaterializationTaskQueue.front());
+ MaterializationTaskQueue.pop_front();
+ if (!IsMaterializationTask) {
+ ++NumMaterializationThreads;
+ IsMaterializationTask = true;
+ }
+ } else {
+ // Otherwise decrement work counters.
+ if (IsMaterializationTask)
+ --NumMaterializationThreads;
+ --Outstanding;
+ OutstandingCV.notify_all();
+ return;
+ }
+ }
+ }).detach();
+}
+
+void DynamicThreadPoolTaskDispatcher::shutdown() {
+ std::unique_lock<std::mutex> Lock(DispatchMutex);
+ Running = false;
+ OutstandingCV.wait(Lock, [this]() { return Outstanding == 0; });
+}
+#endif
+
+} // namespace orc
+} // namespace llvm
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/ThreadSafeModule.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/ThreadSafeModule.cpp
new file mode 100644
index 000000000000..2e128dd23744
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/ThreadSafeModule.cpp
@@ -0,0 +1,64 @@
+//===-- ThreadSafeModule.cpp - Thread safe Module, Context, and Utilities
+//h-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/ThreadSafeModule.h"
+#include "llvm/Bitcode/BitcodeReader.h"
+#include "llvm/Bitcode/BitcodeWriter.h"
+#include "llvm/Transforms/Utils/Cloning.h"
+
+namespace llvm {
+namespace orc {
+
+ThreadSafeModule cloneToNewContext(const ThreadSafeModule &TSM,
+ GVPredicate ShouldCloneDef,
+ GVModifier UpdateClonedDefSource) {
+ assert(TSM && "Can not clone null module");
+
+ if (!ShouldCloneDef)
+ ShouldCloneDef = [](const GlobalValue &) { return true; };
+
+ return TSM.withModuleDo([&](Module &M) {
+ SmallVector<char, 1> ClonedModuleBuffer;
+
+ {
+ std::set<GlobalValue *> ClonedDefsInSrc;
+ ValueToValueMapTy VMap;
+ auto Tmp = CloneModule(M, VMap, [&](const GlobalValue *GV) {
+ if (ShouldCloneDef(*GV)) {
+ ClonedDefsInSrc.insert(const_cast<GlobalValue *>(GV));
+ return true;
+ }
+ return false;
+ });
+
+ if (UpdateClonedDefSource)
+ for (auto *GV : ClonedDefsInSrc)
+ UpdateClonedDefSource(*GV);
+
+ BitcodeWriter BCWriter(ClonedModuleBuffer);
+
+ BCWriter.writeModule(*Tmp);
+ BCWriter.writeSymtab();
+ BCWriter.writeStrtab();
+ }
+
+ MemoryBufferRef ClonedModuleBufferRef(
+ StringRef(ClonedModuleBuffer.data(), ClonedModuleBuffer.size()),
+ "cloned module buffer");
+ ThreadSafeContext NewTSCtx(std::make_unique<LLVMContext>());
+
+ auto ClonedModule = cantFail(
+ parseBitcodeFile(ClonedModuleBufferRef, *NewTSCtx.getContext()));
+ ClonedModule->setModuleIdentifier(M.getName());
+ return ThreadSafeModule(std::move(ClonedModule), std::move(NewTSCtx));
+ });
+}
+
+} // end namespace orc
+} // end namespace llvm
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/PerfJITEvents/PerfJITEventListener.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/PerfJITEvents/PerfJITEventListener.cpp
new file mode 100644
index 000000000000..e2b5ce49ba2e
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/PerfJITEvents/PerfJITEventListener.cpp
@@ -0,0 +1,505 @@
+//===-- PerfJITEventListener.cpp - Tell Linux's perf about JITted code ----===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a JITEventListener object that tells perf about JITted
+// functions, including source line information.
+//
+// Documentation for perf jit integration is available at:
+// https://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/tree/tools/perf/Documentation/jitdump-specification.txt
+// https://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/tree/tools/perf/Documentation/jit-interface.txt
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ADT/Twine.h"
+#include "llvm/Config/config.h"
+#include "llvm/DebugInfo/DWARF/DWARFContext.h"
+#include "llvm/ExecutionEngine/JITEventListener.h"
+#include "llvm/Object/ObjectFile.h"
+#include "llvm/Object/SymbolSize.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/Errno.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/Mutex.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Support/Process.h"
+#include "llvm/Support/Threading.h"
+#include "llvm/Support/raw_ostream.h"
+#include <mutex>
+
+#include <sys/mman.h> // mmap()
+#include <time.h> // clock_gettime(), time(), localtime_r() */
+#include <unistd.h> // for read(), close()
+
+using namespace llvm;
+using namespace llvm::object;
+typedef DILineInfoSpecifier::FileLineInfoKind FileLineInfoKind;
+
+namespace {
+
+// language identifier (XXX: should we generate something better from debug
+// info?)
+#define JIT_LANG "llvm-IR"
+#define LLVM_PERF_JIT_MAGIC \
+ ((uint32_t)'J' << 24 | (uint32_t)'i' << 16 | (uint32_t)'T' << 8 | \
+ (uint32_t)'D')
+#define LLVM_PERF_JIT_VERSION 1
+
+// bit 0: set if the jitdump file is using an architecture-specific timestamp
+// clock source
+#define JITDUMP_FLAGS_ARCH_TIMESTAMP (1ULL << 0)
+
+struct LLVMPerfJitHeader;
+
+class PerfJITEventListener : public JITEventListener {
+public:
+ PerfJITEventListener();
+ ~PerfJITEventListener() {
+ if (MarkerAddr)
+ CloseMarker();
+ }
+
+ void notifyObjectLoaded(ObjectKey K, const ObjectFile &Obj,
+ const RuntimeDyld::LoadedObjectInfo &L) override;
+ void notifyFreeingObject(ObjectKey K) override;
+
+private:
+ bool InitDebuggingDir();
+ bool OpenMarker();
+ void CloseMarker();
+ static bool FillMachine(LLVMPerfJitHeader &hdr);
+
+ void NotifyCode(Expected<llvm::StringRef> &Symbol, uint64_t CodeAddr,
+ uint64_t CodeSize);
+ void NotifyDebug(uint64_t CodeAddr, DILineInfoTable Lines);
+
+ // cache lookups
+ sys::Process::Pid Pid;
+
+ // base directory for output data
+ std::string JitPath;
+
+ // output data stream, closed via Dumpstream
+ int DumpFd = -1;
+
+ // output data stream
+ std::unique_ptr<raw_fd_ostream> Dumpstream;
+
+ // prevent concurrent dumps from messing up the output file
+ sys::Mutex Mutex;
+
+ // perf mmap marker
+ void *MarkerAddr = NULL;
+
+ // perf support ready
+ bool SuccessfullyInitialized = false;
+
+ // identifier for functions, primarily to identify when moving them around
+ uint64_t CodeGeneration = 1;
+};
+
+// The following are POD struct definitions from the perf jit specification
+
+enum LLVMPerfJitRecordType {
+ JIT_CODE_LOAD = 0,
+ JIT_CODE_MOVE = 1, // not emitted, code isn't moved
+ JIT_CODE_DEBUG_INFO = 2,
+ JIT_CODE_CLOSE = 3, // not emitted, unnecessary
+ JIT_CODE_UNWINDING_INFO = 4, // not emitted
+
+ JIT_CODE_MAX
+};
+
+struct LLVMPerfJitHeader {
+ uint32_t Magic; // characters "JiTD"
+ uint32_t Version; // header version
+ uint32_t TotalSize; // total size of header
+ uint32_t ElfMach; // elf mach target
+ uint32_t Pad1; // reserved
+ uint32_t Pid;
+ uint64_t Timestamp; // timestamp
+ uint64_t Flags; // flags
+};
+
+// record prefix (mandatory in each record)
+struct LLVMPerfJitRecordPrefix {
+ uint32_t Id; // record type identifier
+ uint32_t TotalSize;
+ uint64_t Timestamp;
+};
+
+struct LLVMPerfJitRecordCodeLoad {
+ LLVMPerfJitRecordPrefix Prefix;
+
+ uint32_t Pid;
+ uint32_t Tid;
+ uint64_t Vma;
+ uint64_t CodeAddr;
+ uint64_t CodeSize;
+ uint64_t CodeIndex;
+};
+
+struct LLVMPerfJitDebugEntry {
+ uint64_t Addr;
+ int Lineno; // source line number starting at 1
+ int Discrim; // column discriminator, 0 is default
+ // followed by null terminated filename, \xff\0 if same as previous entry
+};
+
+struct LLVMPerfJitRecordDebugInfo {
+ LLVMPerfJitRecordPrefix Prefix;
+
+ uint64_t CodeAddr;
+ uint64_t NrEntry;
+ // followed by NrEntry LLVMPerfJitDebugEntry records
+};
+
+static inline uint64_t timespec_to_ns(const struct timespec *ts) {
+ const uint64_t NanoSecPerSec = 1000000000;
+ return ((uint64_t)ts->tv_sec * NanoSecPerSec) + ts->tv_nsec;
+}
+
+static inline uint64_t perf_get_timestamp(void) {
+ struct timespec ts;
+ int ret;
+
+ ret = clock_gettime(CLOCK_MONOTONIC, &ts);
+ if (ret)
+ return 0;
+
+ return timespec_to_ns(&ts);
+}
+
+PerfJITEventListener::PerfJITEventListener()
+ : Pid(sys::Process::getProcessId()) {
+ // check if clock-source is supported
+ if (!perf_get_timestamp()) {
+ errs() << "kernel does not support CLOCK_MONOTONIC\n";
+ return;
+ }
+
+ if (!InitDebuggingDir()) {
+ errs() << "could not initialize debugging directory\n";
+ return;
+ }
+
+ std::string Filename;
+ raw_string_ostream FilenameBuf(Filename);
+ FilenameBuf << JitPath << "/jit-" << Pid << ".dump";
+
+ // Need to open ourselves, because we need to hand the FD to OpenMarker() and
+ // raw_fd_ostream doesn't expose the FD.
+ using sys::fs::openFileForWrite;
+ if (auto EC =
+ openFileForReadWrite(FilenameBuf.str(), DumpFd,
+ sys::fs::CD_CreateNew, sys::fs::OF_None)) {
+ errs() << "could not open JIT dump file " << FilenameBuf.str() << ": "
+ << EC.message() << "\n";
+ return;
+ }
+
+ Dumpstream = std::make_unique<raw_fd_ostream>(DumpFd, true);
+
+ LLVMPerfJitHeader Header = {0, 0, 0, 0, 0, 0, 0, 0};
+ if (!FillMachine(Header))
+ return;
+
+ // signal this process emits JIT information
+ if (!OpenMarker())
+ return;
+
+ // emit dumpstream header
+ Header.Magic = LLVM_PERF_JIT_MAGIC;
+ Header.Version = LLVM_PERF_JIT_VERSION;
+ Header.TotalSize = sizeof(Header);
+ Header.Pid = Pid;
+ Header.Timestamp = perf_get_timestamp();
+ Dumpstream->write(reinterpret_cast<const char *>(&Header), sizeof(Header));
+
+ // Everything initialized, can do profiling now.
+ if (!Dumpstream->has_error())
+ SuccessfullyInitialized = true;
+}
+
+void PerfJITEventListener::notifyObjectLoaded(
+ ObjectKey K, const ObjectFile &Obj,
+ const RuntimeDyld::LoadedObjectInfo &L) {
+
+ if (!SuccessfullyInitialized)
+ return;
+
+ OwningBinary<ObjectFile> DebugObjOwner = L.getObjectForDebug(Obj);
+ const ObjectFile &DebugObj = *DebugObjOwner.getBinary();
+
+ // Get the address of the object image for use as a unique identifier
+ std::unique_ptr<DIContext> Context = DWARFContext::create(DebugObj);
+
+ // Use symbol info to iterate over functions in the object.
+ for (const std::pair<SymbolRef, uint64_t> &P : computeSymbolSizes(DebugObj)) {
+ SymbolRef Sym = P.first;
+ std::string SourceFileName;
+
+ Expected<SymbolRef::Type> SymTypeOrErr = Sym.getType();
+ if (!SymTypeOrErr) {
+ // There's not much we can with errors here
+ consumeError(SymTypeOrErr.takeError());
+ continue;
+ }
+ SymbolRef::Type SymType = *SymTypeOrErr;
+ if (SymType != SymbolRef::ST_Function)
+ continue;
+
+ Expected<StringRef> Name = Sym.getName();
+ if (!Name) {
+ consumeError(Name.takeError());
+ continue;
+ }
+
+ Expected<uint64_t> AddrOrErr = Sym.getAddress();
+ if (!AddrOrErr) {
+ consumeError(AddrOrErr.takeError());
+ continue;
+ }
+ uint64_t Size = P.second;
+ object::SectionedAddress Address;
+ Address.Address = *AddrOrErr;
+
+ uint64_t SectionIndex = object::SectionedAddress::UndefSection;
+ if (auto SectOrErr = Sym.getSection())
+ if (*SectOrErr != Obj.section_end())
+ SectionIndex = SectOrErr.get()->getIndex();
+
+ // According to spec debugging info has to come before loading the
+ // corresponding code load.
+ DILineInfoTable Lines = Context->getLineInfoForAddressRange(
+ {*AddrOrErr, SectionIndex}, Size, FileLineInfoKind::AbsoluteFilePath);
+
+ NotifyDebug(*AddrOrErr, Lines);
+ NotifyCode(Name, *AddrOrErr, Size);
+ }
+
+ // avoid races with writes
+ std::lock_guard<sys::Mutex> Guard(Mutex);
+
+ Dumpstream->flush();
+}
+
+void PerfJITEventListener::notifyFreeingObject(ObjectKey K) {
+ // perf currently doesn't have an interface for unloading. But munmap()ing the
+ // code section does, so that's ok.
+}
+
+bool PerfJITEventListener::InitDebuggingDir() {
+ time_t Time;
+ struct tm LocalTime;
+ char TimeBuffer[sizeof("YYYYMMDD")];
+ SmallString<64> Path;
+
+ // search for location to dump data to
+ if (const char *BaseDir = getenv("JITDUMPDIR"))
+ Path.append(BaseDir);
+ else if (!sys::path::home_directory(Path))
+ Path = ".";
+
+ // create debug directory
+ Path += "/.debug/jit/";
+ if (auto EC = sys::fs::create_directories(Path)) {
+ errs() << "could not create jit cache directory " << Path << ": "
+ << EC.message() << "\n";
+ return false;
+ }
+
+ // create unique directory for dump data related to this process
+ time(&Time);
+ localtime_r(&Time, &LocalTime);
+ strftime(TimeBuffer, sizeof(TimeBuffer), "%Y%m%d", &LocalTime);
+ Path += JIT_LANG "-jit-";
+ Path += TimeBuffer;
+
+ SmallString<128> UniqueDebugDir;
+
+ using sys::fs::createUniqueDirectory;
+ if (auto EC = createUniqueDirectory(Path, UniqueDebugDir)) {
+ errs() << "could not create unique jit cache directory " << UniqueDebugDir
+ << ": " << EC.message() << "\n";
+ return false;
+ }
+
+ JitPath = std::string(UniqueDebugDir.str());
+
+ return true;
+}
+
+bool PerfJITEventListener::OpenMarker() {
+ // We mmap the jitdump to create an MMAP RECORD in perf.data file. The mmap
+ // is captured either live (perf record running when we mmap) or in deferred
+ // mode, via /proc/PID/maps. The MMAP record is used as a marker of a jitdump
+ // file for more meta data info about the jitted code. Perf report/annotate
+ // detect this special filename and process the jitdump file.
+ //
+ // Mapping must be PROT_EXEC to ensure it is captured by perf record
+ // even when not using -d option.
+ MarkerAddr = ::mmap(NULL, sys::Process::getPageSizeEstimate(),
+ PROT_READ | PROT_EXEC, MAP_PRIVATE, DumpFd, 0);
+
+ if (MarkerAddr == MAP_FAILED) {
+ errs() << "could not mmap JIT marker\n";
+ return false;
+ }
+ return true;
+}
+
+void PerfJITEventListener::CloseMarker() {
+ if (!MarkerAddr)
+ return;
+
+ munmap(MarkerAddr, sys::Process::getPageSizeEstimate());
+ MarkerAddr = nullptr;
+}
+
+bool PerfJITEventListener::FillMachine(LLVMPerfJitHeader &hdr) {
+ char id[16];
+ struct {
+ uint16_t e_type;
+ uint16_t e_machine;
+ } info;
+
+ size_t RequiredMemory = sizeof(id) + sizeof(info);
+
+ ErrorOr<std::unique_ptr<MemoryBuffer>> MB =
+ MemoryBuffer::getFileSlice("/proc/self/exe",
+ RequiredMemory,
+ 0);
+
+ // This'll not guarantee that enough data was actually read from the
+ // underlying file. Instead the trailing part of the buffer would be
+ // zeroed. Given the ELF signature check below that seems ok though,
+ // it's unlikely that the file ends just after that, and the
+ // consequence would just be that perf wouldn't recognize the
+ // signature.
+ if (auto EC = MB.getError()) {
+ errs() << "could not open /proc/self/exe: " << EC.message() << "\n";
+ return false;
+ }
+
+ memcpy(&id, (*MB)->getBufferStart(), sizeof(id));
+ memcpy(&info, (*MB)->getBufferStart() + sizeof(id), sizeof(info));
+
+ // check ELF signature
+ if (id[0] != 0x7f || id[1] != 'E' || id[2] != 'L' || id[3] != 'F') {
+ errs() << "invalid elf signature\n";
+ return false;
+ }
+
+ hdr.ElfMach = info.e_machine;
+
+ return true;
+}
+
+void PerfJITEventListener::NotifyCode(Expected<llvm::StringRef> &Symbol,
+ uint64_t CodeAddr, uint64_t CodeSize) {
+ assert(SuccessfullyInitialized);
+
+ // 0 length functions can't have samples.
+ if (CodeSize == 0)
+ return;
+
+ LLVMPerfJitRecordCodeLoad rec;
+ rec.Prefix.Id = JIT_CODE_LOAD;
+ rec.Prefix.TotalSize = sizeof(rec) + // debug record itself
+ Symbol->size() + 1 + // symbol name
+ CodeSize; // and code
+ rec.Prefix.Timestamp = perf_get_timestamp();
+
+ rec.CodeSize = CodeSize;
+ rec.Vma = CodeAddr;
+ rec.CodeAddr = CodeAddr;
+ rec.Pid = Pid;
+ rec.Tid = get_threadid();
+
+ // avoid interspersing output
+ std::lock_guard<sys::Mutex> Guard(Mutex);
+
+ rec.CodeIndex = CodeGeneration++; // under lock!
+
+ Dumpstream->write(reinterpret_cast<const char *>(&rec), sizeof(rec));
+ Dumpstream->write(Symbol->data(), Symbol->size() + 1);
+ Dumpstream->write(reinterpret_cast<const char *>(CodeAddr), CodeSize);
+}
+
+void PerfJITEventListener::NotifyDebug(uint64_t CodeAddr,
+ DILineInfoTable Lines) {
+ assert(SuccessfullyInitialized);
+
+ // Didn't get useful debug info.
+ if (Lines.empty())
+ return;
+
+ LLVMPerfJitRecordDebugInfo rec;
+ rec.Prefix.Id = JIT_CODE_DEBUG_INFO;
+ rec.Prefix.TotalSize = sizeof(rec); // will be increased further
+ rec.Prefix.Timestamp = perf_get_timestamp();
+ rec.CodeAddr = CodeAddr;
+ rec.NrEntry = Lines.size();
+
+ // compute total size of record (variable due to filenames)
+ DILineInfoTable::iterator Begin = Lines.begin();
+ DILineInfoTable::iterator End = Lines.end();
+ for (DILineInfoTable::iterator It = Begin; It != End; ++It) {
+ DILineInfo &line = It->second;
+ rec.Prefix.TotalSize += sizeof(LLVMPerfJitDebugEntry);
+ rec.Prefix.TotalSize += line.FileName.size() + 1;
+ }
+
+ // The debug_entry describes the source line information. It is defined as
+ // follows in order:
+ // * uint64_t code_addr: address of function for which the debug information
+ // is generated
+ // * uint32_t line : source file line number (starting at 1)
+ // * uint32_t discrim : column discriminator, 0 is default
+ // * char name[n] : source file name in ASCII, including null termination
+
+ // avoid interspersing output
+ std::lock_guard<sys::Mutex> Guard(Mutex);
+
+ Dumpstream->write(reinterpret_cast<const char *>(&rec), sizeof(rec));
+
+ for (DILineInfoTable::iterator It = Begin; It != End; ++It) {
+ LLVMPerfJitDebugEntry LineInfo;
+ DILineInfo &Line = It->second;
+
+ LineInfo.Addr = It->first;
+ // The function re-created by perf is preceded by a elf
+ // header. Need to adjust for that, otherwise the results are
+ // wrong.
+ LineInfo.Addr += 0x40;
+ LineInfo.Lineno = Line.Line;
+ LineInfo.Discrim = Line.Discriminator;
+
+ Dumpstream->write(reinterpret_cast<const char *>(&LineInfo),
+ sizeof(LineInfo));
+ Dumpstream->write(Line.FileName.c_str(), Line.FileName.size() + 1);
+ }
+}
+
+} // end anonymous namespace
+
+namespace llvm {
+JITEventListener *JITEventListener::createPerfJITEventListener() {
+ // There should be only a single event listener per process, otherwise perf
+ // gets confused.
+ static PerfJITEventListener PerfListener;
+ return &PerfListener;
+}
+
+} // namespace llvm
+
+LLVMJITEventListenerRef LLVMCreatePerfJITEventListener(void)
+{
+ return wrap(JITEventListener::createPerfJITEventListener());
+}
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/JITSymbol.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/JITSymbol.cpp
new file mode 100644
index 000000000000..c153b4464568
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/JITSymbol.cpp
@@ -0,0 +1,169 @@
+//===----------- JITSymbol.cpp - JITSymbol class implementation -----------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// JITSymbol class implementation plus helper functions.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/JITSymbol.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/GlobalAlias.h"
+#include "llvm/IR/GlobalValue.h"
+#include "llvm/IR/ModuleSummaryIndex.h"
+#include "llvm/Object/ObjectFile.h"
+
+using namespace llvm;
+
+JITSymbolFlags llvm::JITSymbolFlags::fromGlobalValue(const GlobalValue &GV) {
+ assert(GV.hasName() && "Can't get flags for anonymous symbol");
+
+ JITSymbolFlags Flags = JITSymbolFlags::None;
+ if (GV.hasWeakLinkage() || GV.hasLinkOnceLinkage())
+ Flags |= JITSymbolFlags::Weak;
+ if (GV.hasCommonLinkage())
+ Flags |= JITSymbolFlags::Common;
+ if (!GV.hasLocalLinkage() && !GV.hasHiddenVisibility())
+ Flags |= JITSymbolFlags::Exported;
+
+ if (isa<Function>(GV))
+ Flags |= JITSymbolFlags::Callable;
+ else if (isa<GlobalAlias>(GV) &&
+ isa<Function>(cast<GlobalAlias>(GV).getAliasee()))
+ Flags |= JITSymbolFlags::Callable;
+
+ // Check for a linker-private-global-prefix on the symbol name, in which
+ // case it must be marked as non-exported.
+ if (auto *M = GV.getParent()) {
+ const auto &DL = M->getDataLayout();
+ StringRef LPGP = DL.getLinkerPrivateGlobalPrefix();
+ if (!LPGP.empty() && GV.getName().front() == '\01' &&
+ GV.getName().substr(1).starts_with(LPGP))
+ Flags &= ~JITSymbolFlags::Exported;
+ }
+
+ return Flags;
+}
+
+JITSymbolFlags llvm::JITSymbolFlags::fromSummary(GlobalValueSummary *S) {
+ JITSymbolFlags Flags = JITSymbolFlags::None;
+ auto L = S->linkage();
+ if (GlobalValue::isWeakLinkage(L) || GlobalValue::isLinkOnceLinkage(L))
+ Flags |= JITSymbolFlags::Weak;
+ if (GlobalValue::isCommonLinkage(L))
+ Flags |= JITSymbolFlags::Common;
+ if (GlobalValue::isExternalLinkage(L) || GlobalValue::isExternalWeakLinkage(L))
+ Flags |= JITSymbolFlags::Exported;
+
+ if (isa<FunctionSummary>(S))
+ Flags |= JITSymbolFlags::Callable;
+
+ return Flags;
+}
+
+Expected<JITSymbolFlags>
+llvm::JITSymbolFlags::fromObjectSymbol(const object::SymbolRef &Symbol) {
+ Expected<uint32_t> SymbolFlagsOrErr = Symbol.getFlags();
+ if (!SymbolFlagsOrErr)
+ // TODO: Test this error.
+ return SymbolFlagsOrErr.takeError();
+
+ JITSymbolFlags Flags = JITSymbolFlags::None;
+ if (*SymbolFlagsOrErr & object::BasicSymbolRef::SF_Weak)
+ Flags |= JITSymbolFlags::Weak;
+ if (*SymbolFlagsOrErr & object::BasicSymbolRef::SF_Common)
+ Flags |= JITSymbolFlags::Common;
+ if (*SymbolFlagsOrErr & object::BasicSymbolRef::SF_Exported)
+ Flags |= JITSymbolFlags::Exported;
+
+ auto SymbolType = Symbol.getType();
+ if (!SymbolType)
+ return SymbolType.takeError();
+
+ if (*SymbolType == object::SymbolRef::ST_Function)
+ Flags |= JITSymbolFlags::Callable;
+
+ return Flags;
+}
+
+ARMJITSymbolFlags
+llvm::ARMJITSymbolFlags::fromObjectSymbol(const object::SymbolRef &Symbol) {
+ Expected<uint32_t> SymbolFlagsOrErr = Symbol.getFlags();
+ if (!SymbolFlagsOrErr)
+ // TODO: Actually report errors helpfully.
+ report_fatal_error(SymbolFlagsOrErr.takeError());
+ ARMJITSymbolFlags Flags;
+ if (*SymbolFlagsOrErr & object::BasicSymbolRef::SF_Thumb)
+ Flags |= ARMJITSymbolFlags::Thumb;
+ return Flags;
+}
+
+/// Performs lookup by, for each symbol, first calling
+/// findSymbolInLogicalDylib and if that fails calling
+/// findSymbol.
+void LegacyJITSymbolResolver::lookup(const LookupSet &Symbols,
+ OnResolvedFunction OnResolved) {
+ JITSymbolResolver::LookupResult Result;
+ for (auto &Symbol : Symbols) {
+ std::string SymName = Symbol.str();
+ if (auto Sym = findSymbolInLogicalDylib(SymName)) {
+ if (auto AddrOrErr = Sym.getAddress())
+ Result[Symbol] = JITEvaluatedSymbol(*AddrOrErr, Sym.getFlags());
+ else {
+ OnResolved(AddrOrErr.takeError());
+ return;
+ }
+ } else if (auto Err = Sym.takeError()) {
+ OnResolved(std::move(Err));
+ return;
+ } else {
+ // findSymbolInLogicalDylib failed. Lets try findSymbol.
+ if (auto Sym = findSymbol(SymName)) {
+ if (auto AddrOrErr = Sym.getAddress())
+ Result[Symbol] = JITEvaluatedSymbol(*AddrOrErr, Sym.getFlags());
+ else {
+ OnResolved(AddrOrErr.takeError());
+ return;
+ }
+ } else if (auto Err = Sym.takeError()) {
+ OnResolved(std::move(Err));
+ return;
+ } else {
+ OnResolved(make_error<StringError>("Symbol not found: " + Symbol,
+ inconvertibleErrorCode()));
+ return;
+ }
+ }
+ }
+
+ OnResolved(std::move(Result));
+}
+
+/// Performs flags lookup by calling findSymbolInLogicalDylib and
+/// returning the flags value for that symbol.
+Expected<JITSymbolResolver::LookupSet>
+LegacyJITSymbolResolver::getResponsibilitySet(const LookupSet &Symbols) {
+ JITSymbolResolver::LookupSet Result;
+
+ for (auto &Symbol : Symbols) {
+ std::string SymName = Symbol.str();
+ if (auto Sym = findSymbolInLogicalDylib(SymName)) {
+ // If there's an existing def but it is not strong, then the caller is
+ // responsible for it.
+ if (!Sym.getFlags().isStrong())
+ Result.insert(Symbol);
+ } else if (auto Err = Sym.takeError())
+ return std::move(Err);
+ else {
+ // If there is no existing definition then the caller is responsible for
+ // it.
+ Result.insert(Symbol);
+ }
+ }
+
+ return std::move(Result);
+}
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/RTDyldMemoryManager.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/RTDyldMemoryManager.cpp
new file mode 100644
index 000000000000..fd11450b635b
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/RTDyldMemoryManager.cpp
@@ -0,0 +1,295 @@
+//===-- RTDyldMemoryManager.cpp - Memory manager for MC-JIT -----*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Implementation of the runtime dynamic memory manager base class.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Config/config.h"
+#include "llvm/ExecutionEngine/RTDyldMemoryManager.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/DynamicLibrary.h"
+#include "llvm/Support/ErrorHandling.h"
+#include <cstdlib>
+
+#ifdef __linux__
+ // These includes used by RTDyldMemoryManager::getPointerToNamedFunction()
+ // for Glibc trickery. See comments in this function for more information.
+ #ifdef HAVE_SYS_STAT_H
+ #include <sys/stat.h>
+ #endif
+ #include <fcntl.h>
+ #include <unistd.h>
+#endif
+
+namespace llvm {
+
+RTDyldMemoryManager::~RTDyldMemoryManager() = default;
+
+#if defined(HAVE_REGISTER_FRAME) && defined(HAVE_DEREGISTER_FRAME) && \
+ !defined(__SEH__) && !defined(__USING_SJLJ_EXCEPTIONS__)
+extern "C" void __register_frame(void *);
+extern "C" void __deregister_frame(void *);
+#else
+// The building compiler does not have __(de)register_frame but
+// it may be found at runtime in a dynamically-loaded library.
+// For example, this happens when building LLVM with Visual C++
+// but using the MingW runtime.
+static void __register_frame(void *p) {
+ static bool Searched = false;
+ static void((*rf)(void *)) = 0;
+
+ if (!Searched) {
+ Searched = true;
+ *(void **)&rf =
+ llvm::sys::DynamicLibrary::SearchForAddressOfSymbol("__register_frame");
+ }
+ if (rf)
+ rf(p);
+}
+
+static void __deregister_frame(void *p) {
+ static bool Searched = false;
+ static void((*df)(void *)) = 0;
+
+ if (!Searched) {
+ Searched = true;
+ *(void **)&df = llvm::sys::DynamicLibrary::SearchForAddressOfSymbol(
+ "__deregister_frame");
+ }
+ if (df)
+ df(p);
+}
+#endif
+
+/* libgcc and libunwind __register_frame behave differently. We use the presence
+ * of __unw_add_dynamic_fde to detect libunwind. */
+#if defined(HAVE_UNW_ADD_DYNAMIC_FDE) || defined(__APPLE__)
+
+static const char *processFDE(const char *Entry, bool isDeregister) {
+ const char *P = Entry;
+ uint32_t Length = *((const uint32_t *)P);
+ P += 4;
+ uint32_t Offset = *((const uint32_t *)P);
+ if (Offset != 0) {
+ if (isDeregister)
+ __deregister_frame(const_cast<char *>(Entry));
+ else
+ __register_frame(const_cast<char *>(Entry));
+ }
+ return P + Length;
+}
+
+// This implementation handles frame registration for local targets.
+// Memory managers for remote targets should re-implement this function
+// and use the LoadAddr parameter.
+void RTDyldMemoryManager::registerEHFramesInProcess(uint8_t *Addr,
+ size_t Size) {
+ // On OS X OS X __register_frame takes a single FDE as an argument.
+ // See http://lists.llvm.org/pipermail/llvm-dev/2013-April/061737.html
+ // and projects/libunwind/src/UnwindLevel1-gcc-ext.c.
+ const char *P = (const char *)Addr;
+ const char *End = P + Size;
+ while (P != End)
+ P = processFDE(P, false);
+}
+
+void RTDyldMemoryManager::deregisterEHFramesInProcess(uint8_t *Addr,
+ size_t Size) {
+ const char *P = (const char *)Addr;
+ const char *End = P + Size;
+ while (P != End)
+ P = processFDE(P, true);
+}
+
+#else
+
+void RTDyldMemoryManager::registerEHFramesInProcess(uint8_t *Addr,
+ size_t Size) {
+ // On Linux __register_frame takes a single argument:
+ // a pointer to the start of the .eh_frame section.
+
+ // How can it find the end? Because crtendS.o is linked
+ // in and it has an .eh_frame section with four zero chars.
+ __register_frame(Addr);
+}
+
+void RTDyldMemoryManager::deregisterEHFramesInProcess(uint8_t *Addr,
+ size_t Size) {
+ __deregister_frame(Addr);
+}
+
+#endif
+
+void RTDyldMemoryManager::registerEHFrames(uint8_t *Addr, uint64_t LoadAddr,
+ size_t Size) {
+ registerEHFramesInProcess(Addr, Size);
+ EHFrames.push_back({Addr, Size});
+}
+
+void RTDyldMemoryManager::deregisterEHFrames() {
+ for (auto &Frame : EHFrames)
+ deregisterEHFramesInProcess(Frame.Addr, Frame.Size);
+ EHFrames.clear();
+}
+
+static int jit_noop() {
+ return 0;
+}
+
+// ARM math functions are statically linked on Android from libgcc.a, but not
+// available at runtime for dynamic linking. On Linux these are usually placed
+// in libgcc_s.so so can be found by normal dynamic lookup.
+#if defined(__BIONIC__) && defined(__arm__)
+// List of functions which are statically linked on Android and can be generated
+// by LLVM. This is done as a nested macro which is used once to declare the
+// imported functions with ARM_MATH_DECL and once to compare them to the
+// user-requested symbol in getSymbolAddress with ARM_MATH_CHECK. The test
+// assumes that all functions start with __aeabi_ and getSymbolAddress must be
+// modified if that changes.
+#define ARM_MATH_IMPORTS(PP) \
+ PP(__aeabi_d2f) \
+ PP(__aeabi_d2iz) \
+ PP(__aeabi_d2lz) \
+ PP(__aeabi_d2uiz) \
+ PP(__aeabi_d2ulz) \
+ PP(__aeabi_dadd) \
+ PP(__aeabi_dcmpeq) \
+ PP(__aeabi_dcmpge) \
+ PP(__aeabi_dcmpgt) \
+ PP(__aeabi_dcmple) \
+ PP(__aeabi_dcmplt) \
+ PP(__aeabi_dcmpun) \
+ PP(__aeabi_ddiv) \
+ PP(__aeabi_dmul) \
+ PP(__aeabi_dsub) \
+ PP(__aeabi_f2d) \
+ PP(__aeabi_f2iz) \
+ PP(__aeabi_f2lz) \
+ PP(__aeabi_f2uiz) \
+ PP(__aeabi_f2ulz) \
+ PP(__aeabi_fadd) \
+ PP(__aeabi_fcmpeq) \
+ PP(__aeabi_fcmpge) \
+ PP(__aeabi_fcmpgt) \
+ PP(__aeabi_fcmple) \
+ PP(__aeabi_fcmplt) \
+ PP(__aeabi_fcmpun) \
+ PP(__aeabi_fdiv) \
+ PP(__aeabi_fmul) \
+ PP(__aeabi_fsub) \
+ PP(__aeabi_i2d) \
+ PP(__aeabi_i2f) \
+ PP(__aeabi_idiv) \
+ PP(__aeabi_idivmod) \
+ PP(__aeabi_l2d) \
+ PP(__aeabi_l2f) \
+ PP(__aeabi_lasr) \
+ PP(__aeabi_ldivmod) \
+ PP(__aeabi_llsl) \
+ PP(__aeabi_llsr) \
+ PP(__aeabi_lmul) \
+ PP(__aeabi_ui2d) \
+ PP(__aeabi_ui2f) \
+ PP(__aeabi_uidiv) \
+ PP(__aeabi_uidivmod) \
+ PP(__aeabi_ul2d) \
+ PP(__aeabi_ul2f) \
+ PP(__aeabi_uldivmod)
+
+// Declare statically linked math functions on ARM. The function declarations
+// here do not have the correct prototypes for each function in
+// ARM_MATH_IMPORTS, but it doesn't matter because only the symbol addresses are
+// needed. In particular the __aeabi_*divmod functions do not have calling
+// conventions which match any C prototype.
+#define ARM_MATH_DECL(name) extern "C" void name();
+ARM_MATH_IMPORTS(ARM_MATH_DECL)
+#undef ARM_MATH_DECL
+#endif
+
+#if defined(__linux__) && defined(__GLIBC__) && \
+ (defined(__i386__) || defined(__x86_64__))
+extern "C" LLVM_ATTRIBUTE_WEAK void __morestack();
+#endif
+
+uint64_t
+RTDyldMemoryManager::getSymbolAddressInProcess(const std::string &Name) {
+ // This implementation assumes that the host program is the target.
+ // Clients generating code for a remote target should implement their own
+ // memory manager.
+#if defined(__linux__) && defined(__GLIBC__)
+ //===--------------------------------------------------------------------===//
+ // Function stubs that are invoked instead of certain library calls
+ //
+ // Force the following functions to be linked in to anything that uses the
+ // JIT. This is a hack designed to work around the all-too-clever Glibc
+ // strategy of making these functions work differently when inlined vs. when
+ // not inlined, and hiding their real definitions in a separate archive file
+ // that the dynamic linker can't see. For more info, search for
+ // 'libc_nonshared.a' on Google, or read http://llvm.org/PR274.
+ if (Name == "stat") return (uint64_t)&stat;
+ if (Name == "fstat") return (uint64_t)&fstat;
+ if (Name == "lstat") return (uint64_t)&lstat;
+ if (Name == "stat64") return (uint64_t)&stat64;
+ if (Name == "fstat64") return (uint64_t)&fstat64;
+ if (Name == "lstat64") return (uint64_t)&lstat64;
+ if (Name == "atexit") return (uint64_t)&atexit;
+ if (Name == "mknod") return (uint64_t)&mknod;
+
+#if defined(__i386__) || defined(__x86_64__)
+ // __morestack lives in libgcc, a static library.
+ if (&__morestack && Name == "__morestack")
+ return (uint64_t)&__morestack;
+#endif
+#endif // __linux__ && __GLIBC__
+
+ // See ARM_MATH_IMPORTS definition for explanation
+#if defined(__BIONIC__) && defined(__arm__)
+ if (Name.compare(0, 8, "__aeabi_") == 0) {
+ // Check if the user has requested any of the functions listed in
+ // ARM_MATH_IMPORTS, and if so redirect to the statically linked symbol.
+#define ARM_MATH_CHECK(fn) if (Name == #fn) return (uint64_t)&fn;
+ ARM_MATH_IMPORTS(ARM_MATH_CHECK)
+#undef ARM_MATH_CHECK
+ }
+#endif
+
+ // We should not invoke parent's ctors/dtors from generated main()!
+ // On Mingw and Cygwin, the symbol __main is resolved to
+ // callee's(eg. tools/lli) one, to invoke wrong duplicated ctors
+ // (and register wrong callee's dtors with atexit(3)).
+ // We expect ExecutionEngine::runStaticConstructorsDestructors()
+ // is called before ExecutionEngine::runFunctionAsMain() is called.
+ if (Name == "__main") return (uint64_t)&jit_noop;
+
+ const char *NameStr = Name.c_str();
+
+ // DynamicLibrary::SearchForAddressOfSymbol expects an unmangled 'C' symbol
+ // name so ff we're on Darwin, strip the leading '_' off.
+#ifdef __APPLE__
+ if (NameStr[0] == '_')
+ ++NameStr;
+#endif
+
+ return (uint64_t)sys::DynamicLibrary::SearchForAddressOfSymbol(NameStr);
+}
+
+void *RTDyldMemoryManager::getPointerToNamedFunction(const std::string &Name,
+ bool AbortOnFailure) {
+ uint64_t Addr = getSymbolAddress(Name);
+
+ if (!Addr && AbortOnFailure)
+ report_fatal_error(Twine("Program used external function '") + Name +
+ "' which could not be resolved!");
+
+ return (void*)Addr;
+}
+
+void RTDyldMemoryManager::anchor() {}
+void MCJITMemoryManager::anchor() {}
+} // namespace llvm
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyld.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyld.cpp
new file mode 100644
index 000000000000..7eb7da0138c9
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyld.cpp
@@ -0,0 +1,1477 @@
+//===-- RuntimeDyld.cpp - Run-time dynamic linker for MC-JIT ----*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Implementation of the MC-JIT runtime dynamic linker.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/RuntimeDyld.h"
+#include "RuntimeDyldCOFF.h"
+#include "RuntimeDyldELF.h"
+#include "RuntimeDyldImpl.h"
+#include "RuntimeDyldMachO.h"
+#include "llvm/Object/COFF.h"
+#include "llvm/Object/ELFObjectFile.h"
+#include "llvm/Support/Alignment.h"
+#include "llvm/Support/MSVCErrorWorkarounds.h"
+#include "llvm/Support/MathExtras.h"
+#include <mutex>
+
+#include <future>
+
+using namespace llvm;
+using namespace llvm::object;
+
+#define DEBUG_TYPE "dyld"
+
+namespace {
+
+enum RuntimeDyldErrorCode {
+ GenericRTDyldError = 1
+};
+
+// FIXME: This class is only here to support the transition to llvm::Error. It
+// will be removed once this transition is complete. Clients should prefer to
+// deal with the Error value directly, rather than converting to error_code.
+class RuntimeDyldErrorCategory : public std::error_category {
+public:
+ const char *name() const noexcept override { return "runtimedyld"; }
+
+ std::string message(int Condition) const override {
+ switch (static_cast<RuntimeDyldErrorCode>(Condition)) {
+ case GenericRTDyldError: return "Generic RuntimeDyld error";
+ }
+ llvm_unreachable("Unrecognized RuntimeDyldErrorCode");
+ }
+};
+
+}
+
+char RuntimeDyldError::ID = 0;
+
+void RuntimeDyldError::log(raw_ostream &OS) const {
+ OS << ErrMsg << "\n";
+}
+
+std::error_code RuntimeDyldError::convertToErrorCode() const {
+ static RuntimeDyldErrorCategory RTDyldErrorCategory;
+ return std::error_code(GenericRTDyldError, RTDyldErrorCategory);
+}
+
+// Empty out-of-line virtual destructor as the key function.
+RuntimeDyldImpl::~RuntimeDyldImpl() = default;
+
+// Pin LoadedObjectInfo's vtables to this file.
+void RuntimeDyld::LoadedObjectInfo::anchor() {}
+
+namespace llvm {
+
+void RuntimeDyldImpl::registerEHFrames() {}
+
+void RuntimeDyldImpl::deregisterEHFrames() {
+ MemMgr.deregisterEHFrames();
+}
+
+#ifndef NDEBUG
+static void dumpSectionMemory(const SectionEntry &S, StringRef State) {
+ dbgs() << "----- Contents of section " << S.getName() << " " << State
+ << " -----";
+
+ if (S.getAddress() == nullptr) {
+ dbgs() << "\n <section not emitted>\n";
+ return;
+ }
+
+ const unsigned ColsPerRow = 16;
+
+ uint8_t *DataAddr = S.getAddress();
+ uint64_t LoadAddr = S.getLoadAddress();
+
+ unsigned StartPadding = LoadAddr & (ColsPerRow - 1);
+ unsigned BytesRemaining = S.getSize();
+
+ if (StartPadding) {
+ dbgs() << "\n" << format("0x%016" PRIx64,
+ LoadAddr & ~(uint64_t)(ColsPerRow - 1)) << ":";
+ while (StartPadding--)
+ dbgs() << " ";
+ }
+
+ while (BytesRemaining > 0) {
+ if ((LoadAddr & (ColsPerRow - 1)) == 0)
+ dbgs() << "\n" << format("0x%016" PRIx64, LoadAddr) << ":";
+
+ dbgs() << " " << format("%02x", *DataAddr);
+
+ ++DataAddr;
+ ++LoadAddr;
+ --BytesRemaining;
+ }
+
+ dbgs() << "\n";
+}
+#endif
+
+// Resolve the relocations for all symbols we currently know about.
+void RuntimeDyldImpl::resolveRelocations() {
+ std::lock_guard<sys::Mutex> locked(lock);
+
+ // Print out the sections prior to relocation.
+ LLVM_DEBUG({
+ for (SectionEntry &S : Sections)
+ dumpSectionMemory(S, "before relocations");
+ });
+
+ // First, resolve relocations associated with external symbols.
+ if (auto Err = resolveExternalSymbols()) {
+ HasError = true;
+ ErrorStr = toString(std::move(Err));
+ }
+
+ resolveLocalRelocations();
+
+ // Print out sections after relocation.
+ LLVM_DEBUG({
+ for (SectionEntry &S : Sections)
+ dumpSectionMemory(S, "after relocations");
+ });
+}
+
+void RuntimeDyldImpl::resolveLocalRelocations() {
+ // Iterate over all outstanding relocations
+ for (const auto &Rel : Relocations) {
+ // The Section here (Sections[i]) refers to the section in which the
+ // symbol for the relocation is located. The SectionID in the relocation
+ // entry provides the section to which the relocation will be applied.
+ unsigned Idx = Rel.first;
+ uint64_t Addr = getSectionLoadAddress(Idx);
+ LLVM_DEBUG(dbgs() << "Resolving relocations Section #" << Idx << "\t"
+ << format("%p", (uintptr_t)Addr) << "\n");
+ resolveRelocationList(Rel.second, Addr);
+ }
+ Relocations.clear();
+}
+
+void RuntimeDyldImpl::mapSectionAddress(const void *LocalAddress,
+ uint64_t TargetAddress) {
+ std::lock_guard<sys::Mutex> locked(lock);
+ for (unsigned i = 0, e = Sections.size(); i != e; ++i) {
+ if (Sections[i].getAddress() == LocalAddress) {
+ reassignSectionAddress(i, TargetAddress);
+ return;
+ }
+ }
+ llvm_unreachable("Attempting to remap address of unknown section!");
+}
+
+static Error getOffset(const SymbolRef &Sym, SectionRef Sec,
+ uint64_t &Result) {
+ Expected<uint64_t> AddressOrErr = Sym.getAddress();
+ if (!AddressOrErr)
+ return AddressOrErr.takeError();
+ Result = *AddressOrErr - Sec.getAddress();
+ return Error::success();
+}
+
+Expected<RuntimeDyldImpl::ObjSectionToIDMap>
+RuntimeDyldImpl::loadObjectImpl(const object::ObjectFile &Obj) {
+ std::lock_guard<sys::Mutex> locked(lock);
+
+ // Save information about our target
+ Arch = (Triple::ArchType)Obj.getArch();
+ IsTargetLittleEndian = Obj.isLittleEndian();
+ setMipsABI(Obj);
+
+ // Compute the memory size required to load all sections to be loaded
+ // and pass this information to the memory manager
+ if (MemMgr.needsToReserveAllocationSpace()) {
+ uint64_t CodeSize = 0, RODataSize = 0, RWDataSize = 0;
+ Align CodeAlign, RODataAlign, RWDataAlign;
+ if (auto Err = computeTotalAllocSize(Obj, CodeSize, CodeAlign, RODataSize,
+ RODataAlign, RWDataSize, RWDataAlign))
+ return std::move(Err);
+ MemMgr.reserveAllocationSpace(CodeSize, CodeAlign, RODataSize, RODataAlign,
+ RWDataSize, RWDataAlign);
+ }
+
+ // Used sections from the object file
+ ObjSectionToIDMap LocalSections;
+
+ // Common symbols requiring allocation, with their sizes and alignments
+ CommonSymbolList CommonSymbolsToAllocate;
+
+ uint64_t CommonSize = 0;
+ uint32_t CommonAlign = 0;
+
+ // First, collect all weak and common symbols. We need to know if stronger
+ // definitions occur elsewhere.
+ JITSymbolResolver::LookupSet ResponsibilitySet;
+ {
+ JITSymbolResolver::LookupSet Symbols;
+ for (auto &Sym : Obj.symbols()) {
+ Expected<uint32_t> FlagsOrErr = Sym.getFlags();
+ if (!FlagsOrErr)
+ // TODO: Test this error.
+ return FlagsOrErr.takeError();
+ if ((*FlagsOrErr & SymbolRef::SF_Common) ||
+ (*FlagsOrErr & SymbolRef::SF_Weak)) {
+ // Get symbol name.
+ if (auto NameOrErr = Sym.getName())
+ Symbols.insert(*NameOrErr);
+ else
+ return NameOrErr.takeError();
+ }
+ }
+
+ if (auto ResultOrErr = Resolver.getResponsibilitySet(Symbols))
+ ResponsibilitySet = std::move(*ResultOrErr);
+ else
+ return ResultOrErr.takeError();
+ }
+
+ // Parse symbols
+ LLVM_DEBUG(dbgs() << "Parse symbols:\n");
+ for (symbol_iterator I = Obj.symbol_begin(), E = Obj.symbol_end(); I != E;
+ ++I) {
+ Expected<uint32_t> FlagsOrErr = I->getFlags();
+ if (!FlagsOrErr)
+ // TODO: Test this error.
+ return FlagsOrErr.takeError();
+
+ // Skip undefined symbols.
+ if (*FlagsOrErr & SymbolRef::SF_Undefined)
+ continue;
+
+ // Get the symbol type.
+ object::SymbolRef::Type SymType;
+ if (auto SymTypeOrErr = I->getType())
+ SymType = *SymTypeOrErr;
+ else
+ return SymTypeOrErr.takeError();
+
+ // Get symbol name.
+ StringRef Name;
+ if (auto NameOrErr = I->getName())
+ Name = *NameOrErr;
+ else
+ return NameOrErr.takeError();
+
+ // Compute JIT symbol flags.
+ auto JITSymFlags = getJITSymbolFlags(*I);
+ if (!JITSymFlags)
+ return JITSymFlags.takeError();
+
+ // If this is a weak definition, check to see if there's a strong one.
+ // If there is, skip this symbol (we won't be providing it: the strong
+ // definition will). If there's no strong definition, make this definition
+ // strong.
+ if (JITSymFlags->isWeak() || JITSymFlags->isCommon()) {
+ // First check whether there's already a definition in this instance.
+ if (GlobalSymbolTable.count(Name))
+ continue;
+
+ // If we're not responsible for this symbol, skip it.
+ if (!ResponsibilitySet.count(Name))
+ continue;
+
+ // Otherwise update the flags on the symbol to make this definition
+ // strong.
+ if (JITSymFlags->isWeak())
+ *JITSymFlags &= ~JITSymbolFlags::Weak;
+ if (JITSymFlags->isCommon()) {
+ *JITSymFlags &= ~JITSymbolFlags::Common;
+ uint32_t Align = I->getAlignment();
+ uint64_t Size = I->getCommonSize();
+ if (!CommonAlign)
+ CommonAlign = Align;
+ CommonSize = alignTo(CommonSize, Align) + Size;
+ CommonSymbolsToAllocate.push_back(*I);
+ }
+ }
+
+ if (*FlagsOrErr & SymbolRef::SF_Absolute &&
+ SymType != object::SymbolRef::ST_File) {
+ uint64_t Addr = 0;
+ if (auto AddrOrErr = I->getAddress())
+ Addr = *AddrOrErr;
+ else
+ return AddrOrErr.takeError();
+
+ unsigned SectionID = AbsoluteSymbolSection;
+
+ LLVM_DEBUG(dbgs() << "\tType: " << SymType << " (absolute) Name: " << Name
+ << " SID: " << SectionID
+ << " Offset: " << format("%p", (uintptr_t)Addr)
+ << " flags: " << *FlagsOrErr << "\n");
+ // Skip absolute symbol relocations.
+ if (!Name.empty()) {
+ auto Result = GlobalSymbolTable.insert_or_assign(
+ Name, SymbolTableEntry(SectionID, Addr, *JITSymFlags));
+ processNewSymbol(*I, Result.first->getValue());
+ }
+ } else if (SymType == object::SymbolRef::ST_Function ||
+ SymType == object::SymbolRef::ST_Data ||
+ SymType == object::SymbolRef::ST_Unknown ||
+ SymType == object::SymbolRef::ST_Other) {
+
+ section_iterator SI = Obj.section_end();
+ if (auto SIOrErr = I->getSection())
+ SI = *SIOrErr;
+ else
+ return SIOrErr.takeError();
+
+ if (SI == Obj.section_end())
+ continue;
+
+ // Get symbol offset.
+ uint64_t SectOffset;
+ if (auto Err = getOffset(*I, *SI, SectOffset))
+ return std::move(Err);
+
+ bool IsCode = SI->isText();
+ unsigned SectionID;
+ if (auto SectionIDOrErr =
+ findOrEmitSection(Obj, *SI, IsCode, LocalSections))
+ SectionID = *SectionIDOrErr;
+ else
+ return SectionIDOrErr.takeError();
+
+ LLVM_DEBUG(dbgs() << "\tType: " << SymType << " Name: " << Name
+ << " SID: " << SectionID
+ << " Offset: " << format("%p", (uintptr_t)SectOffset)
+ << " flags: " << *FlagsOrErr << "\n");
+ // Skip absolute symbol relocations.
+ if (!Name.empty()) {
+ auto Result = GlobalSymbolTable.insert_or_assign(
+ Name, SymbolTableEntry(SectionID, SectOffset, *JITSymFlags));
+ processNewSymbol(*I, Result.first->getValue());
+ }
+ }
+ }
+
+ // Allocate common symbols
+ if (auto Err = emitCommonSymbols(Obj, CommonSymbolsToAllocate, CommonSize,
+ CommonAlign))
+ return std::move(Err);
+
+ // Parse and process relocations
+ LLVM_DEBUG(dbgs() << "Parse relocations:\n");
+ for (section_iterator SI = Obj.section_begin(), SE = Obj.section_end();
+ SI != SE; ++SI) {
+ StubMap Stubs;
+
+ Expected<section_iterator> RelSecOrErr = SI->getRelocatedSection();
+ if (!RelSecOrErr)
+ return RelSecOrErr.takeError();
+
+ section_iterator RelocatedSection = *RelSecOrErr;
+ if (RelocatedSection == SE)
+ continue;
+
+ relocation_iterator I = SI->relocation_begin();
+ relocation_iterator E = SI->relocation_end();
+
+ if (I == E && !ProcessAllSections)
+ continue;
+
+ bool IsCode = RelocatedSection->isText();
+ unsigned SectionID = 0;
+ if (auto SectionIDOrErr = findOrEmitSection(Obj, *RelocatedSection, IsCode,
+ LocalSections))
+ SectionID = *SectionIDOrErr;
+ else
+ return SectionIDOrErr.takeError();
+
+ LLVM_DEBUG(dbgs() << "\tSectionID: " << SectionID << "\n");
+
+ for (; I != E;)
+ if (auto IOrErr = processRelocationRef(SectionID, I, Obj, LocalSections, Stubs))
+ I = *IOrErr;
+ else
+ return IOrErr.takeError();
+
+ // If there is a NotifyStubEmitted callback set, call it to register any
+ // stubs created for this section.
+ if (NotifyStubEmitted) {
+ StringRef FileName = Obj.getFileName();
+ StringRef SectionName = Sections[SectionID].getName();
+ for (auto &KV : Stubs) {
+
+ auto &VR = KV.first;
+ uint64_t StubAddr = KV.second;
+
+ // If this is a named stub, just call NotifyStubEmitted.
+ if (VR.SymbolName) {
+ NotifyStubEmitted(FileName, SectionName, VR.SymbolName, SectionID,
+ StubAddr);
+ continue;
+ }
+
+ // Otherwise we will have to try a reverse lookup on the globla symbol table.
+ for (auto &GSTMapEntry : GlobalSymbolTable) {
+ StringRef SymbolName = GSTMapEntry.first();
+ auto &GSTEntry = GSTMapEntry.second;
+ if (GSTEntry.getSectionID() == VR.SectionID &&
+ GSTEntry.getOffset() == VR.Offset) {
+ NotifyStubEmitted(FileName, SectionName, SymbolName, SectionID,
+ StubAddr);
+ break;
+ }
+ }
+ }
+ }
+ }
+
+ // Process remaining sections
+ if (ProcessAllSections) {
+ LLVM_DEBUG(dbgs() << "Process remaining sections:\n");
+ for (section_iterator SI = Obj.section_begin(), SE = Obj.section_end();
+ SI != SE; ++SI) {
+
+ /* Ignore already loaded sections */
+ if (LocalSections.find(*SI) != LocalSections.end())
+ continue;
+
+ bool IsCode = SI->isText();
+ if (auto SectionIDOrErr =
+ findOrEmitSection(Obj, *SI, IsCode, LocalSections))
+ LLVM_DEBUG(dbgs() << "\tSectionID: " << (*SectionIDOrErr) << "\n");
+ else
+ return SectionIDOrErr.takeError();
+ }
+ }
+
+ // Give the subclasses a chance to tie-up any loose ends.
+ if (auto Err = finalizeLoad(Obj, LocalSections))
+ return std::move(Err);
+
+// for (auto E : LocalSections)
+// llvm::dbgs() << "Added: " << E.first.getRawDataRefImpl() << " -> " << E.second << "\n";
+
+ return LocalSections;
+}
+
+// A helper method for computeTotalAllocSize.
+// Computes the memory size required to allocate sections with the given sizes,
+// assuming that all sections are allocated with the given alignment
+static uint64_t
+computeAllocationSizeForSections(std::vector<uint64_t> &SectionSizes,
+ Align Alignment) {
+ uint64_t TotalSize = 0;
+ for (uint64_t SectionSize : SectionSizes)
+ TotalSize += alignTo(SectionSize, Alignment);
+ return TotalSize;
+}
+
+static bool isRequiredForExecution(const SectionRef Section) {
+ const ObjectFile *Obj = Section.getObject();
+ if (isa<object::ELFObjectFileBase>(Obj))
+ return ELFSectionRef(Section).getFlags() & ELF::SHF_ALLOC;
+ if (auto *COFFObj = dyn_cast<object::COFFObjectFile>(Obj)) {
+ const coff_section *CoffSection = COFFObj->getCOFFSection(Section);
+ // Avoid loading zero-sized COFF sections.
+ // In PE files, VirtualSize gives the section size, and SizeOfRawData
+ // may be zero for sections with content. In Obj files, SizeOfRawData
+ // gives the section size, and VirtualSize is always zero. Hence
+ // the need to check for both cases below.
+ bool HasContent =
+ (CoffSection->VirtualSize > 0) || (CoffSection->SizeOfRawData > 0);
+ bool IsDiscardable =
+ CoffSection->Characteristics &
+ (COFF::IMAGE_SCN_MEM_DISCARDABLE | COFF::IMAGE_SCN_LNK_INFO);
+ return HasContent && !IsDiscardable;
+ }
+
+ assert(isa<MachOObjectFile>(Obj));
+ return true;
+}
+
+static bool isReadOnlyData(const SectionRef Section) {
+ const ObjectFile *Obj = Section.getObject();
+ if (isa<object::ELFObjectFileBase>(Obj))
+ return !(ELFSectionRef(Section).getFlags() &
+ (ELF::SHF_WRITE | ELF::SHF_EXECINSTR));
+ if (auto *COFFObj = dyn_cast<object::COFFObjectFile>(Obj))
+ return ((COFFObj->getCOFFSection(Section)->Characteristics &
+ (COFF::IMAGE_SCN_CNT_INITIALIZED_DATA
+ | COFF::IMAGE_SCN_MEM_READ
+ | COFF::IMAGE_SCN_MEM_WRITE))
+ ==
+ (COFF::IMAGE_SCN_CNT_INITIALIZED_DATA
+ | COFF::IMAGE_SCN_MEM_READ));
+
+ assert(isa<MachOObjectFile>(Obj));
+ return false;
+}
+
+static bool isZeroInit(const SectionRef Section) {
+ const ObjectFile *Obj = Section.getObject();
+ if (isa<object::ELFObjectFileBase>(Obj))
+ return ELFSectionRef(Section).getType() == ELF::SHT_NOBITS;
+ if (auto *COFFObj = dyn_cast<object::COFFObjectFile>(Obj))
+ return COFFObj->getCOFFSection(Section)->Characteristics &
+ COFF::IMAGE_SCN_CNT_UNINITIALIZED_DATA;
+
+ auto *MachO = cast<MachOObjectFile>(Obj);
+ unsigned SectionType = MachO->getSectionType(Section);
+ return SectionType == MachO::S_ZEROFILL ||
+ SectionType == MachO::S_GB_ZEROFILL;
+}
+
+static bool isTLS(const SectionRef Section) {
+ const ObjectFile *Obj = Section.getObject();
+ if (isa<object::ELFObjectFileBase>(Obj))
+ return ELFSectionRef(Section).getFlags() & ELF::SHF_TLS;
+ return false;
+}
+
+// Compute an upper bound of the memory size that is required to load all
+// sections
+Error RuntimeDyldImpl::computeTotalAllocSize(
+ const ObjectFile &Obj, uint64_t &CodeSize, Align &CodeAlign,
+ uint64_t &RODataSize, Align &RODataAlign, uint64_t &RWDataSize,
+ Align &RWDataAlign) {
+ // Compute the size of all sections required for execution
+ std::vector<uint64_t> CodeSectionSizes;
+ std::vector<uint64_t> ROSectionSizes;
+ std::vector<uint64_t> RWSectionSizes;
+
+ // Collect sizes of all sections to be loaded;
+ // also determine the max alignment of all sections
+ for (section_iterator SI = Obj.section_begin(), SE = Obj.section_end();
+ SI != SE; ++SI) {
+ const SectionRef &Section = *SI;
+
+ bool IsRequired = isRequiredForExecution(Section) || ProcessAllSections;
+
+ // Consider only the sections that are required to be loaded for execution
+ if (IsRequired) {
+ uint64_t DataSize = Section.getSize();
+ Align Alignment = Section.getAlignment();
+ bool IsCode = Section.isText();
+ bool IsReadOnly = isReadOnlyData(Section);
+ bool IsTLS = isTLS(Section);
+
+ Expected<StringRef> NameOrErr = Section.getName();
+ if (!NameOrErr)
+ return NameOrErr.takeError();
+ StringRef Name = *NameOrErr;
+
+ uint64_t StubBufSize = computeSectionStubBufSize(Obj, Section);
+
+ uint64_t PaddingSize = 0;
+ if (Name == ".eh_frame")
+ PaddingSize += 4;
+ if (StubBufSize != 0)
+ PaddingSize += getStubAlignment().value() - 1;
+
+ uint64_t SectionSize = DataSize + PaddingSize + StubBufSize;
+
+ // The .eh_frame section (at least on Linux) needs an extra four bytes
+ // padded
+ // with zeroes added at the end. For MachO objects, this section has a
+ // slightly different name, so this won't have any effect for MachO
+ // objects.
+ if (Name == ".eh_frame")
+ SectionSize += 4;
+
+ if (!SectionSize)
+ SectionSize = 1;
+
+ if (IsCode) {
+ CodeAlign = std::max(CodeAlign, Alignment);
+ CodeSectionSizes.push_back(SectionSize);
+ } else if (IsReadOnly) {
+ RODataAlign = std::max(RODataAlign, Alignment);
+ ROSectionSizes.push_back(SectionSize);
+ } else if (!IsTLS) {
+ RWDataAlign = std::max(RWDataAlign, Alignment);
+ RWSectionSizes.push_back(SectionSize);
+ }
+ }
+ }
+
+ // Compute Global Offset Table size. If it is not zero we
+ // also update alignment, which is equal to a size of a
+ // single GOT entry.
+ if (unsigned GotSize = computeGOTSize(Obj)) {
+ RWSectionSizes.push_back(GotSize);
+ RWDataAlign = std::max(RWDataAlign, Align(getGOTEntrySize()));
+ }
+
+ // Compute the size of all common symbols
+ uint64_t CommonSize = 0;
+ Align CommonAlign;
+ for (symbol_iterator I = Obj.symbol_begin(), E = Obj.symbol_end(); I != E;
+ ++I) {
+ Expected<uint32_t> FlagsOrErr = I->getFlags();
+ if (!FlagsOrErr)
+ // TODO: Test this error.
+ return FlagsOrErr.takeError();
+ if (*FlagsOrErr & SymbolRef::SF_Common) {
+ // Add the common symbols to a list. We'll allocate them all below.
+ uint64_t Size = I->getCommonSize();
+ Align Alignment = Align(I->getAlignment());
+ // If this is the first common symbol, use its alignment as the alignment
+ // for the common symbols section.
+ if (CommonSize == 0)
+ CommonAlign = Alignment;
+ CommonSize = alignTo(CommonSize, Alignment) + Size;
+ }
+ }
+ if (CommonSize != 0) {
+ RWSectionSizes.push_back(CommonSize);
+ RWDataAlign = std::max(RWDataAlign, CommonAlign);
+ }
+
+ if (!CodeSectionSizes.empty()) {
+ // Add 64 bytes for a potential IFunc resolver stub
+ CodeSectionSizes.push_back(64);
+ }
+
+ // Compute the required allocation space for each different type of sections
+ // (code, read-only data, read-write data) assuming that all sections are
+ // allocated with the max alignment. Note that we cannot compute with the
+ // individual alignments of the sections, because then the required size
+ // depends on the order, in which the sections are allocated.
+ CodeSize = computeAllocationSizeForSections(CodeSectionSizes, CodeAlign);
+ RODataSize = computeAllocationSizeForSections(ROSectionSizes, RODataAlign);
+ RWDataSize = computeAllocationSizeForSections(RWSectionSizes, RWDataAlign);
+
+ return Error::success();
+}
+
+// compute GOT size
+unsigned RuntimeDyldImpl::computeGOTSize(const ObjectFile &Obj) {
+ size_t GotEntrySize = getGOTEntrySize();
+ if (!GotEntrySize)
+ return 0;
+
+ size_t GotSize = 0;
+ for (section_iterator SI = Obj.section_begin(), SE = Obj.section_end();
+ SI != SE; ++SI) {
+
+ for (const RelocationRef &Reloc : SI->relocations())
+ if (relocationNeedsGot(Reloc))
+ GotSize += GotEntrySize;
+ }
+
+ return GotSize;
+}
+
+// compute stub buffer size for the given section
+unsigned RuntimeDyldImpl::computeSectionStubBufSize(const ObjectFile &Obj,
+ const SectionRef &Section) {
+ if (!MemMgr.allowStubAllocation()) {
+ return 0;
+ }
+
+ unsigned StubSize = getMaxStubSize();
+ if (StubSize == 0) {
+ return 0;
+ }
+ // FIXME: this is an inefficient way to handle this. We should computed the
+ // necessary section allocation size in loadObject by walking all the sections
+ // once.
+ unsigned StubBufSize = 0;
+ for (section_iterator SI = Obj.section_begin(), SE = Obj.section_end();
+ SI != SE; ++SI) {
+
+ Expected<section_iterator> RelSecOrErr = SI->getRelocatedSection();
+ if (!RelSecOrErr)
+ report_fatal_error(Twine(toString(RelSecOrErr.takeError())));
+
+ section_iterator RelSecI = *RelSecOrErr;
+ if (!(RelSecI == Section))
+ continue;
+
+ for (const RelocationRef &Reloc : SI->relocations())
+ if (relocationNeedsStub(Reloc))
+ StubBufSize += StubSize;
+ }
+
+ // Get section data size and alignment
+ uint64_t DataSize = Section.getSize();
+ Align Alignment = Section.getAlignment();
+
+ // Add stubbuf size alignment
+ Align StubAlignment = getStubAlignment();
+ Align EndAlignment = commonAlignment(Alignment, DataSize);
+ if (StubAlignment > EndAlignment)
+ StubBufSize += StubAlignment.value() - EndAlignment.value();
+ return StubBufSize;
+}
+
+uint64_t RuntimeDyldImpl::readBytesUnaligned(uint8_t *Src,
+ unsigned Size) const {
+ uint64_t Result = 0;
+ if (IsTargetLittleEndian) {
+ Src += Size - 1;
+ while (Size--)
+ Result = (Result << 8) | *Src--;
+ } else
+ while (Size--)
+ Result = (Result << 8) | *Src++;
+
+ return Result;
+}
+
+void RuntimeDyldImpl::writeBytesUnaligned(uint64_t Value, uint8_t *Dst,
+ unsigned Size) const {
+ if (IsTargetLittleEndian) {
+ while (Size--) {
+ *Dst++ = Value & 0xFF;
+ Value >>= 8;
+ }
+ } else {
+ Dst += Size - 1;
+ while (Size--) {
+ *Dst-- = Value & 0xFF;
+ Value >>= 8;
+ }
+ }
+}
+
+Expected<JITSymbolFlags>
+RuntimeDyldImpl::getJITSymbolFlags(const SymbolRef &SR) {
+ return JITSymbolFlags::fromObjectSymbol(SR);
+}
+
+Error RuntimeDyldImpl::emitCommonSymbols(const ObjectFile &Obj,
+ CommonSymbolList &SymbolsToAllocate,
+ uint64_t CommonSize,
+ uint32_t CommonAlign) {
+ if (SymbolsToAllocate.empty())
+ return Error::success();
+
+ // Allocate memory for the section
+ unsigned SectionID = Sections.size();
+ uint8_t *Addr = MemMgr.allocateDataSection(CommonSize, CommonAlign, SectionID,
+ "<common symbols>", false);
+ if (!Addr)
+ report_fatal_error("Unable to allocate memory for common symbols!");
+ uint64_t Offset = 0;
+ Sections.push_back(
+ SectionEntry("<common symbols>", Addr, CommonSize, CommonSize, 0));
+ memset(Addr, 0, CommonSize);
+
+ LLVM_DEBUG(dbgs() << "emitCommonSection SectionID: " << SectionID
+ << " new addr: " << format("%p", Addr)
+ << " DataSize: " << CommonSize << "\n");
+
+ // Assign the address of each symbol
+ for (auto &Sym : SymbolsToAllocate) {
+ uint32_t Alignment = Sym.getAlignment();
+ uint64_t Size = Sym.getCommonSize();
+ StringRef Name;
+ if (auto NameOrErr = Sym.getName())
+ Name = *NameOrErr;
+ else
+ return NameOrErr.takeError();
+ if (Alignment) {
+ // This symbol has an alignment requirement.
+ uint64_t AlignOffset =
+ offsetToAlignment((uint64_t)Addr, Align(Alignment));
+ Addr += AlignOffset;
+ Offset += AlignOffset;
+ }
+ auto JITSymFlags = getJITSymbolFlags(Sym);
+
+ if (!JITSymFlags)
+ return JITSymFlags.takeError();
+
+ LLVM_DEBUG(dbgs() << "Allocating common symbol " << Name << " address "
+ << format("%p", Addr) << "\n");
+ if (!Name.empty()) // Skip absolute symbol relocations.
+ GlobalSymbolTable[Name] =
+ SymbolTableEntry(SectionID, Offset, std::move(*JITSymFlags));
+ Offset += Size;
+ Addr += Size;
+ }
+
+ return Error::success();
+}
+
+Expected<unsigned>
+RuntimeDyldImpl::emitSection(const ObjectFile &Obj,
+ const SectionRef &Section,
+ bool IsCode) {
+ StringRef data;
+ Align Alignment = Section.getAlignment();
+
+ unsigned PaddingSize = 0;
+ unsigned StubBufSize = 0;
+ bool IsRequired = isRequiredForExecution(Section);
+ bool IsVirtual = Section.isVirtual();
+ bool IsZeroInit = isZeroInit(Section);
+ bool IsReadOnly = isReadOnlyData(Section);
+ bool IsTLS = isTLS(Section);
+ uint64_t DataSize = Section.getSize();
+
+ Expected<StringRef> NameOrErr = Section.getName();
+ if (!NameOrErr)
+ return NameOrErr.takeError();
+ StringRef Name = *NameOrErr;
+
+ StubBufSize = computeSectionStubBufSize(Obj, Section);
+
+ // The .eh_frame section (at least on Linux) needs an extra four bytes padded
+ // with zeroes added at the end. For MachO objects, this section has a
+ // slightly different name, so this won't have any effect for MachO objects.
+ if (Name == ".eh_frame")
+ PaddingSize = 4;
+
+ uintptr_t Allocate;
+ unsigned SectionID = Sections.size();
+ uint8_t *Addr;
+ uint64_t LoadAddress = 0;
+ const char *pData = nullptr;
+
+ // If this section contains any bits (i.e. isn't a virtual or bss section),
+ // grab a reference to them.
+ if (!IsVirtual && !IsZeroInit) {
+ // In either case, set the location of the unrelocated section in memory,
+ // since we still process relocations for it even if we're not applying them.
+ if (Expected<StringRef> E = Section.getContents())
+ data = *E;
+ else
+ return E.takeError();
+ pData = data.data();
+ }
+
+ // If there are any stubs then the section alignment needs to be at least as
+ // high as stub alignment or padding calculations may by incorrect when the
+ // section is remapped.
+ if (StubBufSize != 0) {
+ Alignment = std::max(Alignment, getStubAlignment());
+ PaddingSize += getStubAlignment().value() - 1;
+ }
+
+ // Some sections, such as debug info, don't need to be loaded for execution.
+ // Process those only if explicitly requested.
+ if (IsRequired || ProcessAllSections) {
+ Allocate = DataSize + PaddingSize + StubBufSize;
+ if (!Allocate)
+ Allocate = 1;
+ if (IsTLS) {
+ auto TLSSection = MemMgr.allocateTLSSection(Allocate, Alignment.value(),
+ SectionID, Name);
+ Addr = TLSSection.InitializationImage;
+ LoadAddress = TLSSection.Offset;
+ } else if (IsCode) {
+ Addr = MemMgr.allocateCodeSection(Allocate, Alignment.value(), SectionID,
+ Name);
+ } else {
+ Addr = MemMgr.allocateDataSection(Allocate, Alignment.value(), SectionID,
+ Name, IsReadOnly);
+ }
+ if (!Addr)
+ report_fatal_error("Unable to allocate section memory!");
+
+ // Zero-initialize or copy the data from the image
+ if (IsZeroInit || IsVirtual)
+ memset(Addr, 0, DataSize);
+ else
+ memcpy(Addr, pData, DataSize);
+
+ // Fill in any extra bytes we allocated for padding
+ if (PaddingSize != 0) {
+ memset(Addr + DataSize, 0, PaddingSize);
+ // Update the DataSize variable to include padding.
+ DataSize += PaddingSize;
+
+ // Align DataSize to stub alignment if we have any stubs (PaddingSize will
+ // have been increased above to account for this).
+ if (StubBufSize > 0)
+ DataSize &= -(uint64_t)getStubAlignment().value();
+ }
+
+ LLVM_DEBUG(dbgs() << "emitSection SectionID: " << SectionID << " Name: "
+ << Name << " obj addr: " << format("%p", pData)
+ << " new addr: " << format("%p", Addr) << " DataSize: "
+ << DataSize << " StubBufSize: " << StubBufSize
+ << " Allocate: " << Allocate << "\n");
+ } else {
+ // Even if we didn't load the section, we need to record an entry for it
+ // to handle later processing (and by 'handle' I mean don't do anything
+ // with these sections).
+ Allocate = 0;
+ Addr = nullptr;
+ LLVM_DEBUG(
+ dbgs() << "emitSection SectionID: " << SectionID << " Name: " << Name
+ << " obj addr: " << format("%p", data.data()) << " new addr: 0"
+ << " DataSize: " << DataSize << " StubBufSize: " << StubBufSize
+ << " Allocate: " << Allocate << "\n");
+ }
+
+ Sections.push_back(
+ SectionEntry(Name, Addr, DataSize, Allocate, (uintptr_t)pData));
+
+ // The load address of a TLS section is not equal to the address of its
+ // initialization image
+ if (IsTLS)
+ Sections.back().setLoadAddress(LoadAddress);
+ // Debug info sections are linked as if their load address was zero
+ if (!IsRequired)
+ Sections.back().setLoadAddress(0);
+
+ return SectionID;
+}
+
+Expected<unsigned>
+RuntimeDyldImpl::findOrEmitSection(const ObjectFile &Obj,
+ const SectionRef &Section,
+ bool IsCode,
+ ObjSectionToIDMap &LocalSections) {
+
+ unsigned SectionID = 0;
+ ObjSectionToIDMap::iterator i = LocalSections.find(Section);
+ if (i != LocalSections.end())
+ SectionID = i->second;
+ else {
+ if (auto SectionIDOrErr = emitSection(Obj, Section, IsCode))
+ SectionID = *SectionIDOrErr;
+ else
+ return SectionIDOrErr.takeError();
+ LocalSections[Section] = SectionID;
+ }
+ return SectionID;
+}
+
+void RuntimeDyldImpl::addRelocationForSection(const RelocationEntry &RE,
+ unsigned SectionID) {
+ Relocations[SectionID].push_back(RE);
+}
+
+void RuntimeDyldImpl::addRelocationForSymbol(const RelocationEntry &RE,
+ StringRef SymbolName) {
+ // Relocation by symbol. If the symbol is found in the global symbol table,
+ // create an appropriate section relocation. Otherwise, add it to
+ // ExternalSymbolRelocations.
+ RTDyldSymbolTable::const_iterator Loc = GlobalSymbolTable.find(SymbolName);
+ if (Loc == GlobalSymbolTable.end()) {
+ ExternalSymbolRelocations[SymbolName].push_back(RE);
+ } else {
+ assert(!SymbolName.empty() &&
+ "Empty symbol should not be in GlobalSymbolTable");
+ // Copy the RE since we want to modify its addend.
+ RelocationEntry RECopy = RE;
+ const auto &SymInfo = Loc->second;
+ RECopy.Addend += SymInfo.getOffset();
+ Relocations[SymInfo.getSectionID()].push_back(RECopy);
+ }
+}
+
+uint8_t *RuntimeDyldImpl::createStubFunction(uint8_t *Addr,
+ unsigned AbiVariant) {
+ if (Arch == Triple::aarch64 || Arch == Triple::aarch64_be ||
+ Arch == Triple::aarch64_32) {
+ // This stub has to be able to access the full address space,
+ // since symbol lookup won't necessarily find a handy, in-range,
+ // PLT stub for functions which could be anywhere.
+ // Stub can use ip0 (== x16) to calculate address
+ writeBytesUnaligned(0xd2e00010, Addr, 4); // movz ip0, #:abs_g3:<addr>
+ writeBytesUnaligned(0xf2c00010, Addr+4, 4); // movk ip0, #:abs_g2_nc:<addr>
+ writeBytesUnaligned(0xf2a00010, Addr+8, 4); // movk ip0, #:abs_g1_nc:<addr>
+ writeBytesUnaligned(0xf2800010, Addr+12, 4); // movk ip0, #:abs_g0_nc:<addr>
+ writeBytesUnaligned(0xd61f0200, Addr+16, 4); // br ip0
+
+ return Addr;
+ } else if (Arch == Triple::arm || Arch == Triple::armeb) {
+ // TODO: There is only ARM far stub now. We should add the Thumb stub,
+ // and stubs for branches Thumb - ARM and ARM - Thumb.
+ writeBytesUnaligned(0xe51ff004, Addr, 4); // ldr pc, [pc, #-4]
+ return Addr + 4;
+ } else if (IsMipsO32ABI || IsMipsN32ABI) {
+ // 0: 3c190000 lui t9,%hi(addr).
+ // 4: 27390000 addiu t9,t9,%lo(addr).
+ // 8: 03200008 jr t9.
+ // c: 00000000 nop.
+ const unsigned LuiT9Instr = 0x3c190000, AdduiT9Instr = 0x27390000;
+ const unsigned NopInstr = 0x0;
+ unsigned JrT9Instr = 0x03200008;
+ if ((AbiVariant & ELF::EF_MIPS_ARCH) == ELF::EF_MIPS_ARCH_32R6 ||
+ (AbiVariant & ELF::EF_MIPS_ARCH) == ELF::EF_MIPS_ARCH_64R6)
+ JrT9Instr = 0x03200009;
+
+ writeBytesUnaligned(LuiT9Instr, Addr, 4);
+ writeBytesUnaligned(AdduiT9Instr, Addr + 4, 4);
+ writeBytesUnaligned(JrT9Instr, Addr + 8, 4);
+ writeBytesUnaligned(NopInstr, Addr + 12, 4);
+ return Addr;
+ } else if (IsMipsN64ABI) {
+ // 0: 3c190000 lui t9,%highest(addr).
+ // 4: 67390000 daddiu t9,t9,%higher(addr).
+ // 8: 0019CC38 dsll t9,t9,16.
+ // c: 67390000 daddiu t9,t9,%hi(addr).
+ // 10: 0019CC38 dsll t9,t9,16.
+ // 14: 67390000 daddiu t9,t9,%lo(addr).
+ // 18: 03200008 jr t9.
+ // 1c: 00000000 nop.
+ const unsigned LuiT9Instr = 0x3c190000, DaddiuT9Instr = 0x67390000,
+ DsllT9Instr = 0x19CC38;
+ const unsigned NopInstr = 0x0;
+ unsigned JrT9Instr = 0x03200008;
+ if ((AbiVariant & ELF::EF_MIPS_ARCH) == ELF::EF_MIPS_ARCH_64R6)
+ JrT9Instr = 0x03200009;
+
+ writeBytesUnaligned(LuiT9Instr, Addr, 4);
+ writeBytesUnaligned(DaddiuT9Instr, Addr + 4, 4);
+ writeBytesUnaligned(DsllT9Instr, Addr + 8, 4);
+ writeBytesUnaligned(DaddiuT9Instr, Addr + 12, 4);
+ writeBytesUnaligned(DsllT9Instr, Addr + 16, 4);
+ writeBytesUnaligned(DaddiuT9Instr, Addr + 20, 4);
+ writeBytesUnaligned(JrT9Instr, Addr + 24, 4);
+ writeBytesUnaligned(NopInstr, Addr + 28, 4);
+ return Addr;
+ } else if (Arch == Triple::ppc64 || Arch == Triple::ppc64le) {
+ // Depending on which version of the ELF ABI is in use, we need to
+ // generate one of two variants of the stub. They both start with
+ // the same sequence to load the target address into r12.
+ writeInt32BE(Addr, 0x3D800000); // lis r12, highest(addr)
+ writeInt32BE(Addr+4, 0x618C0000); // ori r12, higher(addr)
+ writeInt32BE(Addr+8, 0x798C07C6); // sldi r12, r12, 32
+ writeInt32BE(Addr+12, 0x658C0000); // oris r12, r12, h(addr)
+ writeInt32BE(Addr+16, 0x618C0000); // ori r12, r12, l(addr)
+ if (AbiVariant == 2) {
+ // PowerPC64 stub ELFv2 ABI: The address points to the function itself.
+ // The address is already in r12 as required by the ABI. Branch to it.
+ writeInt32BE(Addr+20, 0xF8410018); // std r2, 24(r1)
+ writeInt32BE(Addr+24, 0x7D8903A6); // mtctr r12
+ writeInt32BE(Addr+28, 0x4E800420); // bctr
+ } else {
+ // PowerPC64 stub ELFv1 ABI: The address points to a function descriptor.
+ // Load the function address on r11 and sets it to control register. Also
+ // loads the function TOC in r2 and environment pointer to r11.
+ writeInt32BE(Addr+20, 0xF8410028); // std r2, 40(r1)
+ writeInt32BE(Addr+24, 0xE96C0000); // ld r11, 0(r12)
+ writeInt32BE(Addr+28, 0xE84C0008); // ld r2, 0(r12)
+ writeInt32BE(Addr+32, 0x7D6903A6); // mtctr r11
+ writeInt32BE(Addr+36, 0xE96C0010); // ld r11, 16(r2)
+ writeInt32BE(Addr+40, 0x4E800420); // bctr
+ }
+ return Addr;
+ } else if (Arch == Triple::systemz) {
+ writeInt16BE(Addr, 0xC418); // lgrl %r1,.+8
+ writeInt16BE(Addr+2, 0x0000);
+ writeInt16BE(Addr+4, 0x0004);
+ writeInt16BE(Addr+6, 0x07F1); // brc 15,%r1
+ // 8-byte address stored at Addr + 8
+ return Addr;
+ } else if (Arch == Triple::x86_64) {
+ *Addr = 0xFF; // jmp
+ *(Addr+1) = 0x25; // rip
+ // 32-bit PC-relative address of the GOT entry will be stored at Addr+2
+ } else if (Arch == Triple::x86) {
+ *Addr = 0xE9; // 32-bit pc-relative jump.
+ }
+ return Addr;
+}
+
+// Assign an address to a symbol name and resolve all the relocations
+// associated with it.
+void RuntimeDyldImpl::reassignSectionAddress(unsigned SectionID,
+ uint64_t Addr) {
+ // The address to use for relocation resolution is not
+ // the address of the local section buffer. We must be doing
+ // a remote execution environment of some sort. Relocations can't
+ // be applied until all the sections have been moved. The client must
+ // trigger this with a call to MCJIT::finalize() or
+ // RuntimeDyld::resolveRelocations().
+ //
+ // Addr is a uint64_t because we can't assume the pointer width
+ // of the target is the same as that of the host. Just use a generic
+ // "big enough" type.
+ LLVM_DEBUG(
+ dbgs() << "Reassigning address for section " << SectionID << " ("
+ << Sections[SectionID].getName() << "): "
+ << format("0x%016" PRIx64, Sections[SectionID].getLoadAddress())
+ << " -> " << format("0x%016" PRIx64, Addr) << "\n");
+ Sections[SectionID].setLoadAddress(Addr);
+}
+
+void RuntimeDyldImpl::resolveRelocationList(const RelocationList &Relocs,
+ uint64_t Value) {
+ for (const RelocationEntry &RE : Relocs) {
+ // Ignore relocations for sections that were not loaded
+ if (RE.SectionID != AbsoluteSymbolSection &&
+ Sections[RE.SectionID].getAddress() == nullptr)
+ continue;
+ resolveRelocation(RE, Value);
+ }
+}
+
+void RuntimeDyldImpl::applyExternalSymbolRelocations(
+ const StringMap<JITEvaluatedSymbol> ExternalSymbolMap) {
+ for (auto &RelocKV : ExternalSymbolRelocations) {
+ StringRef Name = RelocKV.first();
+ RelocationList &Relocs = RelocKV.second;
+ if (Name.size() == 0) {
+ // This is an absolute symbol, use an address of zero.
+ LLVM_DEBUG(dbgs() << "Resolving absolute relocations."
+ << "\n");
+ resolveRelocationList(Relocs, 0);
+ } else {
+ uint64_t Addr = 0;
+ JITSymbolFlags Flags;
+ RTDyldSymbolTable::const_iterator Loc = GlobalSymbolTable.find(Name);
+ if (Loc == GlobalSymbolTable.end()) {
+ auto RRI = ExternalSymbolMap.find(Name);
+ assert(RRI != ExternalSymbolMap.end() && "No result for symbol");
+ Addr = RRI->second.getAddress();
+ Flags = RRI->second.getFlags();
+ } else {
+ // We found the symbol in our global table. It was probably in a
+ // Module that we loaded previously.
+ const auto &SymInfo = Loc->second;
+ Addr = getSectionLoadAddress(SymInfo.getSectionID()) +
+ SymInfo.getOffset();
+ Flags = SymInfo.getFlags();
+ }
+
+ // FIXME: Implement error handling that doesn't kill the host program!
+ if (!Addr && !Resolver.allowsZeroSymbols())
+ report_fatal_error(Twine("Program used external function '") + Name +
+ "' which could not be resolved!");
+
+ // If Resolver returned UINT64_MAX, the client wants to handle this symbol
+ // manually and we shouldn't resolve its relocations.
+ if (Addr != UINT64_MAX) {
+
+ // Tweak the address based on the symbol flags if necessary.
+ // For example, this is used by RuntimeDyldMachOARM to toggle the low bit
+ // if the target symbol is Thumb.
+ Addr = modifyAddressBasedOnFlags(Addr, Flags);
+
+ LLVM_DEBUG(dbgs() << "Resolving relocations Name: " << Name << "\t"
+ << format("0x%lx", Addr) << "\n");
+ resolveRelocationList(Relocs, Addr);
+ }
+ }
+ }
+ ExternalSymbolRelocations.clear();
+}
+
+Error RuntimeDyldImpl::resolveExternalSymbols() {
+ StringMap<JITEvaluatedSymbol> ExternalSymbolMap;
+
+ // Resolution can trigger emission of more symbols, so iterate until
+ // we've resolved *everything*.
+ {
+ JITSymbolResolver::LookupSet ResolvedSymbols;
+
+ while (true) {
+ JITSymbolResolver::LookupSet NewSymbols;
+
+ for (auto &RelocKV : ExternalSymbolRelocations) {
+ StringRef Name = RelocKV.first();
+ if (!Name.empty() && !GlobalSymbolTable.count(Name) &&
+ !ResolvedSymbols.count(Name))
+ NewSymbols.insert(Name);
+ }
+
+ if (NewSymbols.empty())
+ break;
+
+#ifdef _MSC_VER
+ using ExpectedLookupResult =
+ MSVCPExpected<JITSymbolResolver::LookupResult>;
+#else
+ using ExpectedLookupResult = Expected<JITSymbolResolver::LookupResult>;
+#endif
+
+ auto NewSymbolsP = std::make_shared<std::promise<ExpectedLookupResult>>();
+ auto NewSymbolsF = NewSymbolsP->get_future();
+ Resolver.lookup(NewSymbols,
+ [=](Expected<JITSymbolResolver::LookupResult> Result) {
+ NewSymbolsP->set_value(std::move(Result));
+ });
+
+ auto NewResolverResults = NewSymbolsF.get();
+
+ if (!NewResolverResults)
+ return NewResolverResults.takeError();
+
+ assert(NewResolverResults->size() == NewSymbols.size() &&
+ "Should have errored on unresolved symbols");
+
+ for (auto &RRKV : *NewResolverResults) {
+ assert(!ResolvedSymbols.count(RRKV.first) && "Redundant resolution?");
+ ExternalSymbolMap.insert(RRKV);
+ ResolvedSymbols.insert(RRKV.first);
+ }
+ }
+ }
+
+ applyExternalSymbolRelocations(ExternalSymbolMap);
+
+ return Error::success();
+}
+
+void RuntimeDyldImpl::finalizeAsync(
+ std::unique_ptr<RuntimeDyldImpl> This,
+ unique_function<void(object::OwningBinary<object::ObjectFile>,
+ std::unique_ptr<RuntimeDyld::LoadedObjectInfo>, Error)>
+ OnEmitted,
+ object::OwningBinary<object::ObjectFile> O,
+ std::unique_ptr<RuntimeDyld::LoadedObjectInfo> Info) {
+
+ auto SharedThis = std::shared_ptr<RuntimeDyldImpl>(std::move(This));
+ auto PostResolveContinuation =
+ [SharedThis, OnEmitted = std::move(OnEmitted), O = std::move(O),
+ Info = std::move(Info)](
+ Expected<JITSymbolResolver::LookupResult> Result) mutable {
+ if (!Result) {
+ OnEmitted(std::move(O), std::move(Info), Result.takeError());
+ return;
+ }
+
+ /// Copy the result into a StringMap, where the keys are held by value.
+ StringMap<JITEvaluatedSymbol> Resolved;
+ for (auto &KV : *Result)
+ Resolved[KV.first] = KV.second;
+
+ SharedThis->applyExternalSymbolRelocations(Resolved);
+ SharedThis->resolveLocalRelocations();
+ SharedThis->registerEHFrames();
+ std::string ErrMsg;
+ if (SharedThis->MemMgr.finalizeMemory(&ErrMsg))
+ OnEmitted(std::move(O), std::move(Info),
+ make_error<StringError>(std::move(ErrMsg),
+ inconvertibleErrorCode()));
+ else
+ OnEmitted(std::move(O), std::move(Info), Error::success());
+ };
+
+ JITSymbolResolver::LookupSet Symbols;
+
+ for (auto &RelocKV : SharedThis->ExternalSymbolRelocations) {
+ StringRef Name = RelocKV.first();
+ if (Name.empty()) // Skip absolute symbol relocations.
+ continue;
+ assert(!SharedThis->GlobalSymbolTable.count(Name) &&
+ "Name already processed. RuntimeDyld instances can not be re-used "
+ "when finalizing with finalizeAsync.");
+ Symbols.insert(Name);
+ }
+
+ if (!Symbols.empty()) {
+ SharedThis->Resolver.lookup(Symbols, std::move(PostResolveContinuation));
+ } else
+ PostResolveContinuation(std::map<StringRef, JITEvaluatedSymbol>());
+}
+
+//===----------------------------------------------------------------------===//
+// RuntimeDyld class implementation
+
+uint64_t RuntimeDyld::LoadedObjectInfo::getSectionLoadAddress(
+ const object::SectionRef &Sec) const {
+
+ auto I = ObjSecToIDMap.find(Sec);
+ if (I != ObjSecToIDMap.end())
+ return RTDyld.Sections[I->second].getLoadAddress();
+
+ return 0;
+}
+
+RuntimeDyld::MemoryManager::TLSSection
+RuntimeDyld::MemoryManager::allocateTLSSection(uintptr_t Size,
+ unsigned Alignment,
+ unsigned SectionID,
+ StringRef SectionName) {
+ report_fatal_error("allocation of TLS not implemented");
+}
+
+void RuntimeDyld::MemoryManager::anchor() {}
+void JITSymbolResolver::anchor() {}
+void LegacyJITSymbolResolver::anchor() {}
+
+RuntimeDyld::RuntimeDyld(RuntimeDyld::MemoryManager &MemMgr,
+ JITSymbolResolver &Resolver)
+ : MemMgr(MemMgr), Resolver(Resolver) {
+ // FIXME: There's a potential issue lurking here if a single instance of
+ // RuntimeDyld is used to load multiple objects. The current implementation
+ // associates a single memory manager with a RuntimeDyld instance. Even
+ // though the public class spawns a new 'impl' instance for each load,
+ // they share a single memory manager. This can become a problem when page
+ // permissions are applied.
+ Dyld = nullptr;
+ ProcessAllSections = false;
+}
+
+RuntimeDyld::~RuntimeDyld() = default;
+
+static std::unique_ptr<RuntimeDyldCOFF>
+createRuntimeDyldCOFF(
+ Triple::ArchType Arch, RuntimeDyld::MemoryManager &MM,
+ JITSymbolResolver &Resolver, bool ProcessAllSections,
+ RuntimeDyld::NotifyStubEmittedFunction NotifyStubEmitted) {
+ std::unique_ptr<RuntimeDyldCOFF> Dyld =
+ RuntimeDyldCOFF::create(Arch, MM, Resolver);
+ Dyld->setProcessAllSections(ProcessAllSections);
+ Dyld->setNotifyStubEmitted(std::move(NotifyStubEmitted));
+ return Dyld;
+}
+
+static std::unique_ptr<RuntimeDyldELF>
+createRuntimeDyldELF(Triple::ArchType Arch, RuntimeDyld::MemoryManager &MM,
+ JITSymbolResolver &Resolver, bool ProcessAllSections,
+ RuntimeDyld::NotifyStubEmittedFunction NotifyStubEmitted) {
+ std::unique_ptr<RuntimeDyldELF> Dyld =
+ RuntimeDyldELF::create(Arch, MM, Resolver);
+ Dyld->setProcessAllSections(ProcessAllSections);
+ Dyld->setNotifyStubEmitted(std::move(NotifyStubEmitted));
+ return Dyld;
+}
+
+static std::unique_ptr<RuntimeDyldMachO>
+createRuntimeDyldMachO(
+ Triple::ArchType Arch, RuntimeDyld::MemoryManager &MM,
+ JITSymbolResolver &Resolver,
+ bool ProcessAllSections,
+ RuntimeDyld::NotifyStubEmittedFunction NotifyStubEmitted) {
+ std::unique_ptr<RuntimeDyldMachO> Dyld =
+ RuntimeDyldMachO::create(Arch, MM, Resolver);
+ Dyld->setProcessAllSections(ProcessAllSections);
+ Dyld->setNotifyStubEmitted(std::move(NotifyStubEmitted));
+ return Dyld;
+}
+
+std::unique_ptr<RuntimeDyld::LoadedObjectInfo>
+RuntimeDyld::loadObject(const ObjectFile &Obj) {
+ if (!Dyld) {
+ if (Obj.isELF())
+ Dyld =
+ createRuntimeDyldELF(static_cast<Triple::ArchType>(Obj.getArch()),
+ MemMgr, Resolver, ProcessAllSections,
+ std::move(NotifyStubEmitted));
+ else if (Obj.isMachO())
+ Dyld = createRuntimeDyldMachO(
+ static_cast<Triple::ArchType>(Obj.getArch()), MemMgr, Resolver,
+ ProcessAllSections, std::move(NotifyStubEmitted));
+ else if (Obj.isCOFF())
+ Dyld = createRuntimeDyldCOFF(
+ static_cast<Triple::ArchType>(Obj.getArch()), MemMgr, Resolver,
+ ProcessAllSections, std::move(NotifyStubEmitted));
+ else
+ report_fatal_error("Incompatible object format!");
+ }
+
+ if (!Dyld->isCompatibleFile(Obj))
+ report_fatal_error("Incompatible object format!");
+
+ auto LoadedObjInfo = Dyld->loadObject(Obj);
+ MemMgr.notifyObjectLoaded(*this, Obj);
+ return LoadedObjInfo;
+}
+
+void *RuntimeDyld::getSymbolLocalAddress(StringRef Name) const {
+ if (!Dyld)
+ return nullptr;
+ return Dyld->getSymbolLocalAddress(Name);
+}
+
+unsigned RuntimeDyld::getSymbolSectionID(StringRef Name) const {
+ assert(Dyld && "No RuntimeDyld instance attached");
+ return Dyld->getSymbolSectionID(Name);
+}
+
+JITEvaluatedSymbol RuntimeDyld::getSymbol(StringRef Name) const {
+ if (!Dyld)
+ return nullptr;
+ return Dyld->getSymbol(Name);
+}
+
+std::map<StringRef, JITEvaluatedSymbol> RuntimeDyld::getSymbolTable() const {
+ if (!Dyld)
+ return std::map<StringRef, JITEvaluatedSymbol>();
+ return Dyld->getSymbolTable();
+}
+
+void RuntimeDyld::resolveRelocations() { Dyld->resolveRelocations(); }
+
+void RuntimeDyld::reassignSectionAddress(unsigned SectionID, uint64_t Addr) {
+ Dyld->reassignSectionAddress(SectionID, Addr);
+}
+
+void RuntimeDyld::mapSectionAddress(const void *LocalAddress,
+ uint64_t TargetAddress) {
+ Dyld->mapSectionAddress(LocalAddress, TargetAddress);
+}
+
+bool RuntimeDyld::hasError() { return Dyld->hasError(); }
+
+StringRef RuntimeDyld::getErrorString() { return Dyld->getErrorString(); }
+
+void RuntimeDyld::finalizeWithMemoryManagerLocking() {
+ bool MemoryFinalizationLocked = MemMgr.FinalizationLocked;
+ MemMgr.FinalizationLocked = true;
+ resolveRelocations();
+ registerEHFrames();
+ if (!MemoryFinalizationLocked) {
+ MemMgr.finalizeMemory();
+ MemMgr.FinalizationLocked = false;
+ }
+}
+
+StringRef RuntimeDyld::getSectionContent(unsigned SectionID) const {
+ assert(Dyld && "No Dyld instance attached");
+ return Dyld->getSectionContent(SectionID);
+}
+
+uint64_t RuntimeDyld::getSectionLoadAddress(unsigned SectionID) const {
+ assert(Dyld && "No Dyld instance attached");
+ return Dyld->getSectionLoadAddress(SectionID);
+}
+
+void RuntimeDyld::registerEHFrames() {
+ if (Dyld)
+ Dyld->registerEHFrames();
+}
+
+void RuntimeDyld::deregisterEHFrames() {
+ if (Dyld)
+ Dyld->deregisterEHFrames();
+}
+// FIXME: Kill this with fire once we have a new JIT linker: this is only here
+// so that we can re-use RuntimeDyld's implementation without twisting the
+// interface any further for ORC's purposes.
+void jitLinkForORC(
+ object::OwningBinary<object::ObjectFile> O,
+ RuntimeDyld::MemoryManager &MemMgr, JITSymbolResolver &Resolver,
+ bool ProcessAllSections,
+ unique_function<Error(const object::ObjectFile &Obj,
+ RuntimeDyld::LoadedObjectInfo &LoadedObj,
+ std::map<StringRef, JITEvaluatedSymbol>)>
+ OnLoaded,
+ unique_function<void(object::OwningBinary<object::ObjectFile>,
+ std::unique_ptr<RuntimeDyld::LoadedObjectInfo>, Error)>
+ OnEmitted) {
+
+ RuntimeDyld RTDyld(MemMgr, Resolver);
+ RTDyld.setProcessAllSections(ProcessAllSections);
+
+ auto Info = RTDyld.loadObject(*O.getBinary());
+
+ if (RTDyld.hasError()) {
+ OnEmitted(std::move(O), std::move(Info),
+ make_error<StringError>(RTDyld.getErrorString(),
+ inconvertibleErrorCode()));
+ return;
+ }
+
+ if (auto Err = OnLoaded(*O.getBinary(), *Info, RTDyld.getSymbolTable())) {
+ OnEmitted(std::move(O), std::move(Info), std::move(Err));
+ return;
+ }
+
+ RuntimeDyldImpl::finalizeAsync(std::move(RTDyld.Dyld), std::move(OnEmitted),
+ std::move(O), std::move(Info));
+}
+
+} // end namespace llvm
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldCOFF.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldCOFF.cpp
new file mode 100644
index 000000000000..25a2d8780fb5
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldCOFF.cpp
@@ -0,0 +1,122 @@
+//===-- RuntimeDyldCOFF.cpp - Run-time dynamic linker for MC-JIT -*- C++ -*-==//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Implementation of COFF support for the MC-JIT runtime dynamic linker.
+//
+//===----------------------------------------------------------------------===//
+
+#include "RuntimeDyldCOFF.h"
+#include "Targets/RuntimeDyldCOFFAArch64.h"
+#include "Targets/RuntimeDyldCOFFI386.h"
+#include "Targets/RuntimeDyldCOFFThumb.h"
+#include "Targets/RuntimeDyldCOFFX86_64.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/Object/ObjectFile.h"
+#include "llvm/Support/FormatVariadic.h"
+#include "llvm/TargetParser/Triple.h"
+
+using namespace llvm;
+using namespace llvm::object;
+
+#define DEBUG_TYPE "dyld"
+
+namespace {
+
+class LoadedCOFFObjectInfo final
+ : public LoadedObjectInfoHelper<LoadedCOFFObjectInfo,
+ RuntimeDyld::LoadedObjectInfo> {
+public:
+ LoadedCOFFObjectInfo(
+ RuntimeDyldImpl &RTDyld,
+ RuntimeDyld::LoadedObjectInfo::ObjSectionToIDMap ObjSecToIDMap)
+ : LoadedObjectInfoHelper(RTDyld, std::move(ObjSecToIDMap)) {}
+
+ OwningBinary<ObjectFile>
+ getObjectForDebug(const ObjectFile &Obj) const override {
+ return OwningBinary<ObjectFile>();
+ }
+};
+}
+
+namespace llvm {
+
+std::unique_ptr<RuntimeDyldCOFF>
+llvm::RuntimeDyldCOFF::create(Triple::ArchType Arch,
+ RuntimeDyld::MemoryManager &MemMgr,
+ JITSymbolResolver &Resolver) {
+ switch (Arch) {
+ default: llvm_unreachable("Unsupported target for RuntimeDyldCOFF.");
+ case Triple::x86:
+ return std::make_unique<RuntimeDyldCOFFI386>(MemMgr, Resolver);
+ case Triple::thumb:
+ return std::make_unique<RuntimeDyldCOFFThumb>(MemMgr, Resolver);
+ case Triple::x86_64:
+ return std::make_unique<RuntimeDyldCOFFX86_64>(MemMgr, Resolver);
+ case Triple::aarch64:
+ return std::make_unique<RuntimeDyldCOFFAArch64>(MemMgr, Resolver);
+ }
+}
+
+std::unique_ptr<RuntimeDyld::LoadedObjectInfo>
+RuntimeDyldCOFF::loadObject(const object::ObjectFile &O) {
+ if (auto ObjSectionToIDOrErr = loadObjectImpl(O)) {
+ return std::make_unique<LoadedCOFFObjectInfo>(*this, *ObjSectionToIDOrErr);
+ } else {
+ HasError = true;
+ raw_string_ostream ErrStream(ErrorStr);
+ logAllUnhandledErrors(ObjSectionToIDOrErr.takeError(), ErrStream);
+ return nullptr;
+ }
+}
+
+uint64_t RuntimeDyldCOFF::getSymbolOffset(const SymbolRef &Sym) {
+ // The value in a relocatable COFF object is the offset.
+ return cantFail(Sym.getValue());
+}
+
+uint64_t RuntimeDyldCOFF::getDLLImportOffset(unsigned SectionID, StubMap &Stubs,
+ StringRef Name,
+ bool SetSectionIDMinus1) {
+ LLVM_DEBUG(dbgs() << "Getting DLLImport entry for " << Name << "... ");
+ assert(Name.starts_with(getImportSymbolPrefix()) &&
+ "Not a DLLImport symbol?");
+ RelocationValueRef Reloc;
+ Reloc.SymbolName = Name.data();
+ auto I = Stubs.find(Reloc);
+ if (I != Stubs.end()) {
+ LLVM_DEBUG(dbgs() << format("{0:x8}", I->second) << "\n");
+ return I->second;
+ }
+
+ assert(SectionID < Sections.size() && "SectionID out of range");
+ auto &Sec = Sections[SectionID];
+ auto EntryOffset = alignTo(Sec.getStubOffset(), PointerSize);
+ Sec.advanceStubOffset(EntryOffset + PointerSize - Sec.getStubOffset());
+ Stubs[Reloc] = EntryOffset;
+
+ RelocationEntry RE(SectionID, EntryOffset, PointerReloc, 0, false,
+ Log2_64(PointerSize));
+ // Hack to tell I386/Thumb resolveRelocation that this isn't section relative.
+ if (SetSectionIDMinus1)
+ RE.Sections.SectionA = -1;
+ addRelocationForSymbol(RE, Name.drop_front(getImportSymbolPrefix().size()));
+
+ LLVM_DEBUG({
+ dbgs() << "Creating entry at "
+ << formatv("{0:x16} + {1:x8} ( {2:x16} )", Sec.getLoadAddress(),
+ EntryOffset, Sec.getLoadAddress() + EntryOffset)
+ << "\n";
+ });
+ return EntryOffset;
+}
+
+bool RuntimeDyldCOFF::isCompatibleFile(const object::ObjectFile &Obj) const {
+ return Obj.isCOFF();
+}
+
+} // namespace llvm
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldCOFF.h b/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldCOFF.h
new file mode 100644
index 000000000000..41ee06c15448
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldCOFF.h
@@ -0,0 +1,61 @@
+//===-- RuntimeDyldCOFF.h - Run-time dynamic linker for MC-JIT ---*- C++ -*-==//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// COFF support for MC-JIT runtime dynamic linker.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_RUNTIME_DYLD_COFF_H
+#define LLVM_RUNTIME_DYLD_COFF_H
+
+#include "RuntimeDyldImpl.h"
+
+#define DEBUG_TYPE "dyld"
+
+using namespace llvm;
+
+namespace llvm {
+
+// Common base class for COFF dynamic linker support.
+// Concrete subclasses for each target can be found in ./Targets.
+class RuntimeDyldCOFF : public RuntimeDyldImpl {
+
+public:
+ std::unique_ptr<RuntimeDyld::LoadedObjectInfo>
+ loadObject(const object::ObjectFile &Obj) override;
+ bool isCompatibleFile(const object::ObjectFile &Obj) const override;
+
+ static std::unique_ptr<RuntimeDyldCOFF>
+ create(Triple::ArchType Arch, RuntimeDyld::MemoryManager &MemMgr,
+ JITSymbolResolver &Resolver);
+
+protected:
+ RuntimeDyldCOFF(RuntimeDyld::MemoryManager &MemMgr,
+ JITSymbolResolver &Resolver, unsigned PointerSize,
+ uint32_t PointerReloc)
+ : RuntimeDyldImpl(MemMgr, Resolver), PointerSize(PointerSize),
+ PointerReloc(PointerReloc) {
+ assert((PointerSize == 4 || PointerSize == 8) && "Unexpected pointer size");
+ }
+
+ uint64_t getSymbolOffset(const SymbolRef &Sym);
+ uint64_t getDLLImportOffset(unsigned SectionID, StubMap &Stubs,
+ StringRef Name, bool SetSectionIDMinus1 = false);
+
+ static constexpr StringRef getImportSymbolPrefix() { return "__imp_"; }
+
+private:
+ unsigned PointerSize;
+ uint32_t PointerReloc;
+};
+
+} // end namespace llvm
+
+#undef DEBUG_TYPE
+
+#endif
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldChecker.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldChecker.cpp
new file mode 100644
index 000000000000..b98d455cea37
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldChecker.cpp
@@ -0,0 +1,1061 @@
+//===--- RuntimeDyldChecker.cpp - RuntimeDyld tester framework --*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/RuntimeDyldChecker.h"
+#include "RuntimeDyldCheckerImpl.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/MC/MCAsmInfo.h"
+#include "llvm/MC/MCContext.h"
+#include "llvm/MC/MCDisassembler/MCDisassembler.h"
+#include "llvm/MC/MCInst.h"
+#include "llvm/MC/MCInstPrinter.h"
+#include "llvm/MC/MCInstrInfo.h"
+#include "llvm/MC/MCRegisterInfo.h"
+#include "llvm/MC/MCSubtargetInfo.h"
+#include "llvm/MC/MCTargetOptions.h"
+#include "llvm/MC/TargetRegistry.h"
+#include "llvm/Support/Endian.h"
+#include "llvm/Support/MSVCErrorWorkarounds.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/Path.h"
+#include <cctype>
+#include <memory>
+#include <utility>
+
+#define DEBUG_TYPE "rtdyld"
+
+using namespace llvm;
+
+namespace {
+struct TargetInfo {
+ const Target *TheTarget;
+ std::unique_ptr<MCSubtargetInfo> STI;
+ std::unique_ptr<MCRegisterInfo> MRI;
+ std::unique_ptr<MCAsmInfo> MAI;
+ std::unique_ptr<MCContext> Ctx;
+ std::unique_ptr<MCDisassembler> Disassembler;
+ std::unique_ptr<MCInstrInfo> MII;
+ std::unique_ptr<MCInstPrinter> InstPrinter;
+};
+} // anonymous namespace
+
+namespace llvm {
+
+// Helper class that implements the language evaluated by RuntimeDyldChecker.
+class RuntimeDyldCheckerExprEval {
+public:
+ RuntimeDyldCheckerExprEval(const RuntimeDyldCheckerImpl &Checker,
+ raw_ostream &ErrStream)
+ : Checker(Checker) {}
+
+ bool evaluate(StringRef Expr) const {
+ // Expect equality expression of the form 'LHS = RHS'.
+ Expr = Expr.trim();
+ size_t EQIdx = Expr.find('=');
+
+ ParseContext OutsideLoad(false);
+
+ // Evaluate LHS.
+ StringRef LHSExpr = Expr.substr(0, EQIdx).rtrim();
+ StringRef RemainingExpr;
+ EvalResult LHSResult;
+ std::tie(LHSResult, RemainingExpr) =
+ evalComplexExpr(evalSimpleExpr(LHSExpr, OutsideLoad), OutsideLoad);
+ if (LHSResult.hasError())
+ return handleError(Expr, LHSResult);
+ if (RemainingExpr != "")
+ return handleError(Expr, unexpectedToken(RemainingExpr, LHSExpr, ""));
+
+ // Evaluate RHS.
+ StringRef RHSExpr = Expr.substr(EQIdx + 1).ltrim();
+ EvalResult RHSResult;
+ std::tie(RHSResult, RemainingExpr) =
+ evalComplexExpr(evalSimpleExpr(RHSExpr, OutsideLoad), OutsideLoad);
+ if (RHSResult.hasError())
+ return handleError(Expr, RHSResult);
+ if (RemainingExpr != "")
+ return handleError(Expr, unexpectedToken(RemainingExpr, RHSExpr, ""));
+
+ if (LHSResult.getValue() != RHSResult.getValue()) {
+ Checker.ErrStream << "Expression '" << Expr << "' is false: "
+ << format("0x%" PRIx64, LHSResult.getValue())
+ << " != " << format("0x%" PRIx64, RHSResult.getValue())
+ << "\n";
+ return false;
+ }
+ return true;
+ }
+
+private:
+ // RuntimeDyldCheckerExprEval requires some context when parsing exprs. In
+ // particular, it needs to know whether a symbol is being evaluated in the
+ // context of a load, in which case we want the linker's local address for
+ // the symbol, or outside of a load, in which case we want the symbol's
+ // address in the remote target.
+
+ struct ParseContext {
+ bool IsInsideLoad;
+ ParseContext(bool IsInsideLoad) : IsInsideLoad(IsInsideLoad) {}
+ };
+
+ const RuntimeDyldCheckerImpl &Checker;
+
+ enum class BinOpToken : unsigned {
+ Invalid,
+ Add,
+ Sub,
+ BitwiseAnd,
+ BitwiseOr,
+ ShiftLeft,
+ ShiftRight
+ };
+
+ class EvalResult {
+ public:
+ EvalResult() : Value(0) {}
+ EvalResult(uint64_t Value) : Value(Value) {}
+ EvalResult(std::string ErrorMsg)
+ : Value(0), ErrorMsg(std::move(ErrorMsg)) {}
+ uint64_t getValue() const { return Value; }
+ bool hasError() const { return ErrorMsg != ""; }
+ const std::string &getErrorMsg() const { return ErrorMsg; }
+
+ private:
+ uint64_t Value;
+ std::string ErrorMsg;
+ };
+
+ StringRef getTokenForError(StringRef Expr) const {
+ if (Expr.empty())
+ return "";
+
+ StringRef Token, Remaining;
+ if (isalpha(Expr[0]))
+ std::tie(Token, Remaining) = parseSymbol(Expr);
+ else if (isdigit(Expr[0]))
+ std::tie(Token, Remaining) = parseNumberString(Expr);
+ else {
+ unsigned TokLen = 1;
+ if (Expr.starts_with("<<") || Expr.starts_with(">>"))
+ TokLen = 2;
+ Token = Expr.substr(0, TokLen);
+ }
+ return Token;
+ }
+
+ EvalResult unexpectedToken(StringRef TokenStart, StringRef SubExpr,
+ StringRef ErrText) const {
+ std::string ErrorMsg("Encountered unexpected token '");
+ ErrorMsg += getTokenForError(TokenStart);
+ if (SubExpr != "") {
+ ErrorMsg += "' while parsing subexpression '";
+ ErrorMsg += SubExpr;
+ }
+ ErrorMsg += "'";
+ if (ErrText != "") {
+ ErrorMsg += " ";
+ ErrorMsg += ErrText;
+ }
+ return EvalResult(std::move(ErrorMsg));
+ }
+
+ bool handleError(StringRef Expr, const EvalResult &R) const {
+ assert(R.hasError() && "Not an error result.");
+ Checker.ErrStream << "Error evaluating expression '" << Expr
+ << "': " << R.getErrorMsg() << "\n";
+ return false;
+ }
+
+ std::pair<BinOpToken, StringRef> parseBinOpToken(StringRef Expr) const {
+ if (Expr.empty())
+ return std::make_pair(BinOpToken::Invalid, "");
+
+ // Handle the two 2-character tokens.
+ if (Expr.starts_with("<<"))
+ return std::make_pair(BinOpToken::ShiftLeft, Expr.substr(2).ltrim());
+ if (Expr.starts_with(">>"))
+ return std::make_pair(BinOpToken::ShiftRight, Expr.substr(2).ltrim());
+
+ // Handle one-character tokens.
+ BinOpToken Op;
+ switch (Expr[0]) {
+ default:
+ return std::make_pair(BinOpToken::Invalid, Expr);
+ case '+':
+ Op = BinOpToken::Add;
+ break;
+ case '-':
+ Op = BinOpToken::Sub;
+ break;
+ case '&':
+ Op = BinOpToken::BitwiseAnd;
+ break;
+ case '|':
+ Op = BinOpToken::BitwiseOr;
+ break;
+ }
+
+ return std::make_pair(Op, Expr.substr(1).ltrim());
+ }
+
+ EvalResult computeBinOpResult(BinOpToken Op, const EvalResult &LHSResult,
+ const EvalResult &RHSResult) const {
+ switch (Op) {
+ default:
+ llvm_unreachable("Tried to evaluate unrecognized operation.");
+ case BinOpToken::Add:
+ return EvalResult(LHSResult.getValue() + RHSResult.getValue());
+ case BinOpToken::Sub:
+ return EvalResult(LHSResult.getValue() - RHSResult.getValue());
+ case BinOpToken::BitwiseAnd:
+ return EvalResult(LHSResult.getValue() & RHSResult.getValue());
+ case BinOpToken::BitwiseOr:
+ return EvalResult(LHSResult.getValue() | RHSResult.getValue());
+ case BinOpToken::ShiftLeft:
+ return EvalResult(LHSResult.getValue() << RHSResult.getValue());
+ case BinOpToken::ShiftRight:
+ return EvalResult(LHSResult.getValue() >> RHSResult.getValue());
+ }
+ }
+
+ // Parse a symbol and return a (string, string) pair representing the symbol
+ // name and expression remaining to be parsed.
+ std::pair<StringRef, StringRef> parseSymbol(StringRef Expr) const {
+ size_t FirstNonSymbol = Expr.find_first_not_of("0123456789"
+ "abcdefghijklmnopqrstuvwxyz"
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
+ ":_.$");
+ return std::make_pair(Expr.substr(0, FirstNonSymbol),
+ Expr.substr(FirstNonSymbol).ltrim());
+ }
+
+ // Evaluate a call to decode_operand. Decode the instruction operand at the
+ // given symbol and get the value of the requested operand.
+ // Returns an error if the instruction cannot be decoded, or the requested
+ // operand is not an immediate.
+ // On success, returns a pair containing the value of the operand, plus
+ // the expression remaining to be evaluated.
+ std::pair<EvalResult, StringRef> evalDecodeOperand(StringRef Expr) const {
+ if (!Expr.starts_with("("))
+ return std::make_pair(unexpectedToken(Expr, Expr, "expected '('"), "");
+ StringRef RemainingExpr = Expr.substr(1).ltrim();
+ StringRef Symbol;
+ std::tie(Symbol, RemainingExpr) = parseSymbol(RemainingExpr);
+
+ if (!Checker.isSymbolValid(Symbol))
+ return std::make_pair(
+ EvalResult(("Cannot decode unknown symbol '" + Symbol + "'").str()),
+ "");
+
+ // if there is an offset number expr
+ int64_t Offset = 0;
+ BinOpToken BinOp;
+ std::tie(BinOp, RemainingExpr) = parseBinOpToken(RemainingExpr);
+ switch (BinOp) {
+ case BinOpToken::Add: {
+ EvalResult Number;
+ std::tie(Number, RemainingExpr) = evalNumberExpr(RemainingExpr);
+ Offset = Number.getValue();
+ break;
+ }
+ case BinOpToken::Invalid:
+ break;
+ default:
+ return std::make_pair(
+ unexpectedToken(RemainingExpr, RemainingExpr,
+ "expected '+' for offset or ',' if no offset"),
+ "");
+ }
+
+ if (!RemainingExpr.starts_with(","))
+ return std::make_pair(
+ unexpectedToken(RemainingExpr, RemainingExpr, "expected ','"), "");
+ RemainingExpr = RemainingExpr.substr(1).ltrim();
+
+ EvalResult OpIdxExpr;
+ std::tie(OpIdxExpr, RemainingExpr) = evalNumberExpr(RemainingExpr);
+ if (OpIdxExpr.hasError())
+ return std::make_pair(OpIdxExpr, "");
+
+ if (!RemainingExpr.starts_with(")"))
+ return std::make_pair(
+ unexpectedToken(RemainingExpr, RemainingExpr, "expected ')'"), "");
+ RemainingExpr = RemainingExpr.substr(1).ltrim();
+
+ MCInst Inst;
+ uint64_t Size;
+ if (!decodeInst(Symbol, Inst, Size, Offset))
+ return std::make_pair(
+ EvalResult(("Couldn't decode instruction at '" + Symbol + "'").str()),
+ "");
+
+ unsigned OpIdx = OpIdxExpr.getValue();
+
+ auto printInst = [this](StringRef Symbol, MCInst Inst,
+ raw_string_ostream &ErrMsgStream) {
+ auto TT = Checker.getTripleForSymbol(Checker.getTargetFlag(Symbol));
+ auto TI = getTargetInfo(TT, Checker.getCPU(), Checker.getFeatures());
+ if (auto E = TI.takeError()) {
+ errs() << "Error obtaining instruction printer: "
+ << toString(std::move(E)) << "\n";
+ return std::make_pair(EvalResult(ErrMsgStream.str()), "");
+ }
+ Inst.dump_pretty(ErrMsgStream, TI->InstPrinter.get());
+ return std::make_pair(EvalResult(ErrMsgStream.str()), "");
+ };
+
+ if (OpIdx >= Inst.getNumOperands()) {
+ std::string ErrMsg;
+ raw_string_ostream ErrMsgStream(ErrMsg);
+ ErrMsgStream << "Invalid operand index '" << format("%i", OpIdx)
+ << "' for instruction '" << Symbol
+ << "'. Instruction has only "
+ << format("%i", Inst.getNumOperands())
+ << " operands.\nInstruction is:\n ";
+
+ return printInst(Symbol, Inst, ErrMsgStream);
+ }
+
+ const MCOperand &Op = Inst.getOperand(OpIdx);
+ if (!Op.isImm()) {
+ std::string ErrMsg;
+ raw_string_ostream ErrMsgStream(ErrMsg);
+ ErrMsgStream << "Operand '" << format("%i", OpIdx) << "' of instruction '"
+ << Symbol << "' is not an immediate.\nInstruction is:\n ";
+
+ return printInst(Symbol, Inst, ErrMsgStream);
+ }
+
+ return std::make_pair(EvalResult(Op.getImm()), RemainingExpr);
+ }
+
+ // Evaluate a call to next_pc.
+ // Decode the instruction at the given symbol and return the following program
+ // counter.
+ // Returns an error if the instruction cannot be decoded.
+ // On success, returns a pair containing the next PC, plus of the
+ // expression remaining to be evaluated.
+ std::pair<EvalResult, StringRef> evalNextPC(StringRef Expr,
+ ParseContext PCtx) const {
+ if (!Expr.starts_with("("))
+ return std::make_pair(unexpectedToken(Expr, Expr, "expected '('"), "");
+ StringRef RemainingExpr = Expr.substr(1).ltrim();
+ StringRef Symbol;
+ std::tie(Symbol, RemainingExpr) = parseSymbol(RemainingExpr);
+
+ if (!Checker.isSymbolValid(Symbol))
+ return std::make_pair(
+ EvalResult(("Cannot decode unknown symbol '" + Symbol + "'").str()),
+ "");
+
+ if (!RemainingExpr.starts_with(")"))
+ return std::make_pair(
+ unexpectedToken(RemainingExpr, RemainingExpr, "expected ')'"), "");
+ RemainingExpr = RemainingExpr.substr(1).ltrim();
+
+ MCInst Inst;
+ uint64_t InstSize;
+ if (!decodeInst(Symbol, Inst, InstSize, 0))
+ return std::make_pair(
+ EvalResult(("Couldn't decode instruction at '" + Symbol + "'").str()),
+ "");
+
+ uint64_t SymbolAddr = PCtx.IsInsideLoad
+ ? Checker.getSymbolLocalAddr(Symbol)
+ : Checker.getSymbolRemoteAddr(Symbol);
+
+ // ARM PC offset is 8 instead of 4, because it accounts for an additional
+ // prefetch instruction that increments PC even though it is implicit.
+ auto TT = Checker.getTripleForSymbol(Checker.getTargetFlag(Symbol));
+ uint64_t PCOffset = TT.getArch() == Triple::ArchType::arm ? 4 : 0;
+
+ uint64_t NextPC = SymbolAddr + InstSize + PCOffset;
+
+ return std::make_pair(EvalResult(NextPC), RemainingExpr);
+ }
+
+ // Evaluate a call to stub_addr/got_addr.
+ // Look up and return the address of the stub for the given
+ // (<file name>, <section name>, <symbol name>) tuple.
+ // On success, returns a pair containing the stub address, plus the expression
+ // remaining to be evaluated.
+ std::pair<EvalResult, StringRef>
+ evalStubOrGOTAddr(StringRef Expr, ParseContext PCtx, bool IsStubAddr) const {
+ if (!Expr.starts_with("("))
+ return std::make_pair(unexpectedToken(Expr, Expr, "expected '('"), "");
+ StringRef RemainingExpr = Expr.substr(1).ltrim();
+
+ // Handle file-name specially, as it may contain characters that aren't
+ // legal for symbols.
+ StringRef StubContainerName;
+ size_t ComaIdx = RemainingExpr.find(',');
+ StubContainerName = RemainingExpr.substr(0, ComaIdx).rtrim();
+ RemainingExpr = RemainingExpr.substr(ComaIdx).ltrim();
+
+ if (!RemainingExpr.starts_with(","))
+ return std::make_pair(
+ unexpectedToken(RemainingExpr, Expr, "expected ','"), "");
+ RemainingExpr = RemainingExpr.substr(1).ltrim();
+
+ StringRef Symbol;
+ std::tie(Symbol, RemainingExpr) = parseSymbol(RemainingExpr);
+
+ // Parse optional parameter to filter by stub kind
+ StringRef KindNameFilter;
+ if (RemainingExpr.starts_with(",")) {
+ RemainingExpr = RemainingExpr.substr(1).ltrim();
+ size_t ClosingBracket = RemainingExpr.find(")");
+ KindNameFilter = RemainingExpr.substr(0, ClosingBracket);
+ RemainingExpr = RemainingExpr.substr(ClosingBracket);
+ }
+
+ if (!RemainingExpr.starts_with(")"))
+ return std::make_pair(
+ unexpectedToken(RemainingExpr, Expr, "expected ')'"), "");
+ RemainingExpr = RemainingExpr.substr(1).ltrim();
+
+ uint64_t StubAddr;
+ std::string ErrorMsg;
+ std::tie(StubAddr, ErrorMsg) =
+ Checker.getStubOrGOTAddrFor(StubContainerName, Symbol, KindNameFilter,
+ PCtx.IsInsideLoad, IsStubAddr);
+
+ if (ErrorMsg != "")
+ return std::make_pair(EvalResult(ErrorMsg), "");
+
+ return std::make_pair(EvalResult(StubAddr), RemainingExpr);
+ }
+
+ std::pair<EvalResult, StringRef> evalSectionAddr(StringRef Expr,
+ ParseContext PCtx) const {
+ if (!Expr.starts_with("("))
+ return std::make_pair(unexpectedToken(Expr, Expr, "expected '('"), "");
+ StringRef RemainingExpr = Expr.substr(1).ltrim();
+
+ // Handle file-name specially, as it may contain characters that aren't
+ // legal for symbols.
+ StringRef FileName;
+ size_t ComaIdx = RemainingExpr.find(',');
+ FileName = RemainingExpr.substr(0, ComaIdx).rtrim();
+ RemainingExpr = RemainingExpr.substr(ComaIdx).ltrim();
+
+ if (!RemainingExpr.starts_with(","))
+ return std::make_pair(
+ unexpectedToken(RemainingExpr, Expr, "expected ','"), "");
+ RemainingExpr = RemainingExpr.substr(1).ltrim();
+
+ StringRef SectionName;
+ size_t CloseParensIdx = RemainingExpr.find(')');
+ SectionName = RemainingExpr.substr(0, CloseParensIdx).rtrim();
+ RemainingExpr = RemainingExpr.substr(CloseParensIdx).ltrim();
+
+ if (!RemainingExpr.starts_with(")"))
+ return std::make_pair(
+ unexpectedToken(RemainingExpr, Expr, "expected ')'"), "");
+ RemainingExpr = RemainingExpr.substr(1).ltrim();
+
+ uint64_t StubAddr;
+ std::string ErrorMsg;
+ std::tie(StubAddr, ErrorMsg) = Checker.getSectionAddr(
+ FileName, SectionName, PCtx.IsInsideLoad);
+
+ if (ErrorMsg != "")
+ return std::make_pair(EvalResult(ErrorMsg), "");
+
+ return std::make_pair(EvalResult(StubAddr), RemainingExpr);
+ }
+
+ // Evaluate an identifier expr, which may be a symbol, or a call to
+ // one of the builtin functions: get_insn_opcode or get_insn_length.
+ // Return the result, plus the expression remaining to be parsed.
+ std::pair<EvalResult, StringRef> evalIdentifierExpr(StringRef Expr,
+ ParseContext PCtx) const {
+ StringRef Symbol;
+ StringRef RemainingExpr;
+ std::tie(Symbol, RemainingExpr) = parseSymbol(Expr);
+
+ // Check for builtin function calls.
+ if (Symbol == "decode_operand")
+ return evalDecodeOperand(RemainingExpr);
+ else if (Symbol == "next_pc")
+ return evalNextPC(RemainingExpr, PCtx);
+ else if (Symbol == "stub_addr")
+ return evalStubOrGOTAddr(RemainingExpr, PCtx, true);
+ else if (Symbol == "got_addr")
+ return evalStubOrGOTAddr(RemainingExpr, PCtx, false);
+ else if (Symbol == "section_addr")
+ return evalSectionAddr(RemainingExpr, PCtx);
+
+ if (!Checker.isSymbolValid(Symbol)) {
+ std::string ErrMsg("No known address for symbol '");
+ ErrMsg += Symbol;
+ ErrMsg += "'";
+ if (Symbol.starts_with("L"))
+ ErrMsg += " (this appears to be an assembler local label - "
+ " perhaps drop the 'L'?)";
+
+ return std::make_pair(EvalResult(ErrMsg), "");
+ }
+
+ // The value for the symbol depends on the context we're evaluating in:
+ // Inside a load this is the address in the linker's memory, outside a
+ // load it's the address in the target processes memory.
+ uint64_t Value = PCtx.IsInsideLoad ? Checker.getSymbolLocalAddr(Symbol)
+ : Checker.getSymbolRemoteAddr(Symbol);
+
+ // Looks like a plain symbol reference.
+ return std::make_pair(EvalResult(Value), RemainingExpr);
+ }
+
+ // Parse a number (hexadecimal or decimal) and return a (string, string)
+ // pair representing the number and the expression remaining to be parsed.
+ std::pair<StringRef, StringRef> parseNumberString(StringRef Expr) const {
+ size_t FirstNonDigit = StringRef::npos;
+ if (Expr.starts_with("0x")) {
+ FirstNonDigit = Expr.find_first_not_of("0123456789abcdefABCDEF", 2);
+ if (FirstNonDigit == StringRef::npos)
+ FirstNonDigit = Expr.size();
+ } else {
+ FirstNonDigit = Expr.find_first_not_of("0123456789");
+ if (FirstNonDigit == StringRef::npos)
+ FirstNonDigit = Expr.size();
+ }
+ return std::make_pair(Expr.substr(0, FirstNonDigit),
+ Expr.substr(FirstNonDigit));
+ }
+
+ // Evaluate a constant numeric expression (hexadecimal or decimal) and
+ // return a pair containing the result, and the expression remaining to be
+ // evaluated.
+ std::pair<EvalResult, StringRef> evalNumberExpr(StringRef Expr) const {
+ StringRef ValueStr;
+ StringRef RemainingExpr;
+ std::tie(ValueStr, RemainingExpr) = parseNumberString(Expr);
+
+ if (ValueStr.empty() || !isdigit(ValueStr[0]))
+ return std::make_pair(
+ unexpectedToken(RemainingExpr, RemainingExpr, "expected number"), "");
+ uint64_t Value;
+ ValueStr.getAsInteger(0, Value);
+ return std::make_pair(EvalResult(Value), RemainingExpr);
+ }
+
+ // Evaluate an expression of the form "(<expr>)" and return a pair
+ // containing the result of evaluating <expr>, plus the expression
+ // remaining to be parsed.
+ std::pair<EvalResult, StringRef> evalParensExpr(StringRef Expr,
+ ParseContext PCtx) const {
+ assert(Expr.starts_with("(") && "Not a parenthesized expression");
+ EvalResult SubExprResult;
+ StringRef RemainingExpr;
+ std::tie(SubExprResult, RemainingExpr) =
+ evalComplexExpr(evalSimpleExpr(Expr.substr(1).ltrim(), PCtx), PCtx);
+ if (SubExprResult.hasError())
+ return std::make_pair(SubExprResult, "");
+ if (!RemainingExpr.starts_with(")"))
+ return std::make_pair(
+ unexpectedToken(RemainingExpr, Expr, "expected ')'"), "");
+ RemainingExpr = RemainingExpr.substr(1).ltrim();
+ return std::make_pair(SubExprResult, RemainingExpr);
+ }
+
+ // Evaluate an expression in one of the following forms:
+ // *{<number>}<expr>
+ // Return a pair containing the result, plus the expression remaining to be
+ // parsed.
+ std::pair<EvalResult, StringRef> evalLoadExpr(StringRef Expr) const {
+ assert(Expr.starts_with("*") && "Not a load expression");
+ StringRef RemainingExpr = Expr.substr(1).ltrim();
+
+ // Parse read size.
+ if (!RemainingExpr.starts_with("{"))
+ return std::make_pair(EvalResult("Expected '{' following '*'."), "");
+ RemainingExpr = RemainingExpr.substr(1).ltrim();
+ EvalResult ReadSizeExpr;
+ std::tie(ReadSizeExpr, RemainingExpr) = evalNumberExpr(RemainingExpr);
+ if (ReadSizeExpr.hasError())
+ return std::make_pair(ReadSizeExpr, RemainingExpr);
+ uint64_t ReadSize = ReadSizeExpr.getValue();
+ if (ReadSize < 1 || ReadSize > 8)
+ return std::make_pair(EvalResult("Invalid size for dereference."), "");
+ if (!RemainingExpr.starts_with("}"))
+ return std::make_pair(EvalResult("Missing '}' for dereference."), "");
+ RemainingExpr = RemainingExpr.substr(1).ltrim();
+
+ // Evaluate the expression representing the load address.
+ ParseContext LoadCtx(true);
+ EvalResult LoadAddrExprResult;
+ std::tie(LoadAddrExprResult, RemainingExpr) =
+ evalComplexExpr(evalSimpleExpr(RemainingExpr, LoadCtx), LoadCtx);
+
+ if (LoadAddrExprResult.hasError())
+ return std::make_pair(LoadAddrExprResult, "");
+
+ uint64_t LoadAddr = LoadAddrExprResult.getValue();
+
+ // If there is no error but the content pointer is null then this is a
+ // zero-fill symbol/section.
+ if (LoadAddr == 0)
+ return std::make_pair(0, RemainingExpr);
+
+ return std::make_pair(
+ EvalResult(Checker.readMemoryAtAddr(LoadAddr, ReadSize)),
+ RemainingExpr);
+ }
+
+ // Evaluate a "simple" expression. This is any expression that _isn't_ an
+ // un-parenthesized binary expression.
+ //
+ // "Simple" expressions can be optionally bit-sliced. See evalSlicedExpr.
+ //
+ // Returns a pair containing the result of the evaluation, plus the
+ // expression remaining to be parsed.
+ std::pair<EvalResult, StringRef> evalSimpleExpr(StringRef Expr,
+ ParseContext PCtx) const {
+ EvalResult SubExprResult;
+ StringRef RemainingExpr;
+
+ if (Expr.empty())
+ return std::make_pair(EvalResult("Unexpected end of expression"), "");
+
+ if (Expr[0] == '(')
+ std::tie(SubExprResult, RemainingExpr) = evalParensExpr(Expr, PCtx);
+ else if (Expr[0] == '*')
+ std::tie(SubExprResult, RemainingExpr) = evalLoadExpr(Expr);
+ else if (isalpha(Expr[0]) || Expr[0] == '_')
+ std::tie(SubExprResult, RemainingExpr) = evalIdentifierExpr(Expr, PCtx);
+ else if (isdigit(Expr[0]))
+ std::tie(SubExprResult, RemainingExpr) = evalNumberExpr(Expr);
+ else
+ return std::make_pair(
+ unexpectedToken(Expr, Expr,
+ "expected '(', '*', identifier, or number"), "");
+
+ if (SubExprResult.hasError())
+ return std::make_pair(SubExprResult, RemainingExpr);
+
+ // Evaluate bit-slice if present.
+ if (RemainingExpr.starts_with("["))
+ std::tie(SubExprResult, RemainingExpr) =
+ evalSliceExpr(std::make_pair(SubExprResult, RemainingExpr));
+
+ return std::make_pair(SubExprResult, RemainingExpr);
+ }
+
+ // Evaluate a bit-slice of an expression.
+ // A bit-slice has the form "<expr>[high:low]". The result of evaluating a
+ // slice is the bits between high and low (inclusive) in the original
+ // expression, right shifted so that the "low" bit is in position 0 in the
+ // result.
+ // Returns a pair containing the result of the slice operation, plus the
+ // expression remaining to be parsed.
+ std::pair<EvalResult, StringRef>
+ evalSliceExpr(const std::pair<EvalResult, StringRef> &Ctx) const {
+ EvalResult SubExprResult;
+ StringRef RemainingExpr;
+ std::tie(SubExprResult, RemainingExpr) = Ctx;
+
+ assert(RemainingExpr.starts_with("[") && "Not a slice expr.");
+ RemainingExpr = RemainingExpr.substr(1).ltrim();
+
+ EvalResult HighBitExpr;
+ std::tie(HighBitExpr, RemainingExpr) = evalNumberExpr(RemainingExpr);
+
+ if (HighBitExpr.hasError())
+ return std::make_pair(HighBitExpr, RemainingExpr);
+
+ if (!RemainingExpr.starts_with(":"))
+ return std::make_pair(
+ unexpectedToken(RemainingExpr, RemainingExpr, "expected ':'"), "");
+ RemainingExpr = RemainingExpr.substr(1).ltrim();
+
+ EvalResult LowBitExpr;
+ std::tie(LowBitExpr, RemainingExpr) = evalNumberExpr(RemainingExpr);
+
+ if (LowBitExpr.hasError())
+ return std::make_pair(LowBitExpr, RemainingExpr);
+
+ if (!RemainingExpr.starts_with("]"))
+ return std::make_pair(
+ unexpectedToken(RemainingExpr, RemainingExpr, "expected ']'"), "");
+ RemainingExpr = RemainingExpr.substr(1).ltrim();
+
+ unsigned HighBit = HighBitExpr.getValue();
+ unsigned LowBit = LowBitExpr.getValue();
+ uint64_t Mask = ((uint64_t)1 << (HighBit - LowBit + 1)) - 1;
+ uint64_t SlicedValue = (SubExprResult.getValue() >> LowBit) & Mask;
+ return std::make_pair(EvalResult(SlicedValue), RemainingExpr);
+ }
+
+ // Evaluate a "complex" expression.
+ // Takes an already evaluated subexpression and checks for the presence of a
+ // binary operator, computing the result of the binary operation if one is
+ // found. Used to make arithmetic expressions left-associative.
+ // Returns a pair containing the ultimate result of evaluating the
+ // expression, plus the expression remaining to be evaluated.
+ std::pair<EvalResult, StringRef>
+ evalComplexExpr(const std::pair<EvalResult, StringRef> &LHSAndRemaining,
+ ParseContext PCtx) const {
+ EvalResult LHSResult;
+ StringRef RemainingExpr;
+ std::tie(LHSResult, RemainingExpr) = LHSAndRemaining;
+
+ // If there was an error, or there's nothing left to evaluate, return the
+ // result.
+ if (LHSResult.hasError() || RemainingExpr == "")
+ return std::make_pair(LHSResult, RemainingExpr);
+
+ // Otherwise check if this is a binary expression.
+ BinOpToken BinOp;
+ std::tie(BinOp, RemainingExpr) = parseBinOpToken(RemainingExpr);
+
+ // If this isn't a recognized expression just return.
+ if (BinOp == BinOpToken::Invalid)
+ return std::make_pair(LHSResult, RemainingExpr);
+
+ // This is a recognized bin-op. Evaluate the RHS, then evaluate the binop.
+ EvalResult RHSResult;
+ std::tie(RHSResult, RemainingExpr) = evalSimpleExpr(RemainingExpr, PCtx);
+
+ // If there was an error evaluating the RHS, return it.
+ if (RHSResult.hasError())
+ return std::make_pair(RHSResult, RemainingExpr);
+
+ // This is a binary expression - evaluate and try to continue as a
+ // complex expr.
+ EvalResult ThisResult(computeBinOpResult(BinOp, LHSResult, RHSResult));
+
+ return evalComplexExpr(std::make_pair(ThisResult, RemainingExpr), PCtx);
+ }
+
+ bool decodeInst(StringRef Symbol, MCInst &Inst, uint64_t &Size,
+ int64_t Offset) const {
+ auto TT = Checker.getTripleForSymbol(Checker.getTargetFlag(Symbol));
+ auto TI = getTargetInfo(TT, Checker.getCPU(), Checker.getFeatures());
+
+ if (auto E = TI.takeError()) {
+ errs() << "Error obtaining disassembler: " << toString(std::move(E))
+ << "\n";
+ return false;
+ }
+
+ StringRef SymbolMem = Checker.getSymbolContent(Symbol);
+ ArrayRef<uint8_t> SymbolBytes(SymbolMem.bytes_begin() + Offset,
+ SymbolMem.size() - Offset);
+
+ MCDisassembler::DecodeStatus S =
+ TI->Disassembler->getInstruction(Inst, Size, SymbolBytes, 0, nulls());
+
+ return (S == MCDisassembler::Success);
+ }
+
+ Expected<TargetInfo> getTargetInfo(const Triple &TT, const StringRef &CPU,
+ const SubtargetFeatures &TF) const {
+
+ auto TripleName = TT.str();
+ std::string ErrorStr;
+ const Target *TheTarget =
+ TargetRegistry::lookupTarget(TripleName, ErrorStr);
+ if (!TheTarget)
+ return make_error<StringError>("Error accessing target '" + TripleName +
+ "': " + ErrorStr,
+ inconvertibleErrorCode());
+
+ std::unique_ptr<MCSubtargetInfo> STI(
+ TheTarget->createMCSubtargetInfo(TripleName, CPU, TF.getString()));
+ if (!STI)
+ return make_error<StringError>("Unable to create subtarget for " +
+ TripleName,
+ inconvertibleErrorCode());
+
+ std::unique_ptr<MCRegisterInfo> MRI(TheTarget->createMCRegInfo(TripleName));
+ if (!MRI)
+ return make_error<StringError>("Unable to create target register info "
+ "for " +
+ TripleName,
+ inconvertibleErrorCode());
+
+ MCTargetOptions MCOptions;
+ std::unique_ptr<MCAsmInfo> MAI(
+ TheTarget->createMCAsmInfo(*MRI, TripleName, MCOptions));
+ if (!MAI)
+ return make_error<StringError>("Unable to create target asm info " +
+ TripleName,
+ inconvertibleErrorCode());
+
+ auto Ctx = std::make_unique<MCContext>(Triple(TripleName), MAI.get(),
+ MRI.get(), STI.get());
+
+ std::unique_ptr<MCDisassembler> Disassembler(
+ TheTarget->createMCDisassembler(*STI, *Ctx));
+ if (!Disassembler)
+ return make_error<StringError>("Unable to create disassembler for " +
+ TripleName,
+ inconvertibleErrorCode());
+
+ std::unique_ptr<MCInstrInfo> MII(TheTarget->createMCInstrInfo());
+ if (!MII)
+ return make_error<StringError>("Unable to create instruction info for" +
+ TripleName,
+ inconvertibleErrorCode());
+
+ std::unique_ptr<MCInstPrinter> InstPrinter(TheTarget->createMCInstPrinter(
+ Triple(TripleName), 0, *MAI, *MII, *MRI));
+ if (!InstPrinter)
+ return make_error<StringError>(
+ "Unable to create instruction printer for" + TripleName,
+ inconvertibleErrorCode());
+
+ return TargetInfo({TheTarget, std::move(STI), std::move(MRI),
+ std::move(MAI), std::move(Ctx), std::move(Disassembler),
+ std::move(MII), std::move(InstPrinter)});
+ }
+};
+} // namespace llvm
+
+RuntimeDyldCheckerImpl::RuntimeDyldCheckerImpl(
+ IsSymbolValidFunction IsSymbolValid, GetSymbolInfoFunction GetSymbolInfo,
+ GetSectionInfoFunction GetSectionInfo, GetStubInfoFunction GetStubInfo,
+ GetGOTInfoFunction GetGOTInfo, llvm::endianness Endianness, Triple TT,
+ StringRef CPU, SubtargetFeatures TF, raw_ostream &ErrStream)
+ : IsSymbolValid(std::move(IsSymbolValid)),
+ GetSymbolInfo(std::move(GetSymbolInfo)),
+ GetSectionInfo(std::move(GetSectionInfo)),
+ GetStubInfo(std::move(GetStubInfo)), GetGOTInfo(std::move(GetGOTInfo)),
+ Endianness(Endianness), TT(std::move(TT)), CPU(std::move(CPU)),
+ TF(std::move(TF)), ErrStream(ErrStream) {}
+
+bool RuntimeDyldCheckerImpl::check(StringRef CheckExpr) const {
+ CheckExpr = CheckExpr.trim();
+ LLVM_DEBUG(dbgs() << "RuntimeDyldChecker: Checking '" << CheckExpr
+ << "'...\n");
+ RuntimeDyldCheckerExprEval P(*this, ErrStream);
+ bool Result = P.evaluate(CheckExpr);
+ (void)Result;
+ LLVM_DEBUG(dbgs() << "RuntimeDyldChecker: '" << CheckExpr << "' "
+ << (Result ? "passed" : "FAILED") << ".\n");
+ return Result;
+}
+
+bool RuntimeDyldCheckerImpl::checkAllRulesInBuffer(StringRef RulePrefix,
+ MemoryBuffer *MemBuf) const {
+ bool DidAllTestsPass = true;
+ unsigned NumRules = 0;
+
+ std::string CheckExpr;
+ const char *LineStart = MemBuf->getBufferStart();
+
+ // Eat whitespace.
+ while (LineStart != MemBuf->getBufferEnd() && isSpace(*LineStart))
+ ++LineStart;
+
+ while (LineStart != MemBuf->getBufferEnd() && *LineStart != '\0') {
+ const char *LineEnd = LineStart;
+ while (LineEnd != MemBuf->getBufferEnd() && *LineEnd != '\r' &&
+ *LineEnd != '\n')
+ ++LineEnd;
+
+ StringRef Line(LineStart, LineEnd - LineStart);
+ if (Line.starts_with(RulePrefix))
+ CheckExpr += Line.substr(RulePrefix.size()).str();
+
+ // If there's a check expr string...
+ if (!CheckExpr.empty()) {
+ // ... and it's complete then run it, otherwise remove the trailer '\'.
+ if (CheckExpr.back() != '\\') {
+ DidAllTestsPass &= check(CheckExpr);
+ CheckExpr.clear();
+ ++NumRules;
+ } else
+ CheckExpr.pop_back();
+ }
+
+ // Eat whitespace.
+ LineStart = LineEnd;
+ while (LineStart != MemBuf->getBufferEnd() && isSpace(*LineStart))
+ ++LineStart;
+ }
+ return DidAllTestsPass && (NumRules != 0);
+}
+
+bool RuntimeDyldCheckerImpl::isSymbolValid(StringRef Symbol) const {
+ return IsSymbolValid(Symbol);
+}
+
+uint64_t RuntimeDyldCheckerImpl::getSymbolLocalAddr(StringRef Symbol) const {
+ auto SymInfo = GetSymbolInfo(Symbol);
+ if (!SymInfo) {
+ logAllUnhandledErrors(SymInfo.takeError(), errs(), "RTDyldChecker: ");
+ return 0;
+ }
+
+ if (SymInfo->isZeroFill())
+ return 0;
+
+ return static_cast<uint64_t>(
+ reinterpret_cast<uintptr_t>(SymInfo->getContent().data()));
+}
+
+uint64_t RuntimeDyldCheckerImpl::getSymbolRemoteAddr(StringRef Symbol) const {
+ auto SymInfo = GetSymbolInfo(Symbol);
+ if (!SymInfo) {
+ logAllUnhandledErrors(SymInfo.takeError(), errs(), "RTDyldChecker: ");
+ return 0;
+ }
+
+ return SymInfo->getTargetAddress();
+}
+
+uint64_t RuntimeDyldCheckerImpl::readMemoryAtAddr(uint64_t SrcAddr,
+ unsigned Size) const {
+ uintptr_t PtrSizedAddr = static_cast<uintptr_t>(SrcAddr);
+ assert(PtrSizedAddr == SrcAddr && "Linker memory pointer out-of-range.");
+ void *Ptr = reinterpret_cast<void*>(PtrSizedAddr);
+
+ switch (Size) {
+ case 1:
+ return support::endian::read<uint8_t>(Ptr, Endianness);
+ case 2:
+ return support::endian::read<uint16_t>(Ptr, Endianness);
+ case 4:
+ return support::endian::read<uint32_t>(Ptr, Endianness);
+ case 8:
+ return support::endian::read<uint64_t>(Ptr, Endianness);
+ }
+ llvm_unreachable("Unsupported read size");
+}
+
+StringRef RuntimeDyldCheckerImpl::getSymbolContent(StringRef Symbol) const {
+ auto SymInfo = GetSymbolInfo(Symbol);
+ if (!SymInfo) {
+ logAllUnhandledErrors(SymInfo.takeError(), errs(), "RTDyldChecker: ");
+ return StringRef();
+ }
+ return {SymInfo->getContent().data(), SymInfo->getContent().size()};
+}
+
+TargetFlagsType RuntimeDyldCheckerImpl::getTargetFlag(StringRef Symbol) const {
+ auto SymInfo = GetSymbolInfo(Symbol);
+ if (!SymInfo) {
+ logAllUnhandledErrors(SymInfo.takeError(), errs(), "RTDyldChecker: ");
+ return TargetFlagsType{};
+ }
+ return SymInfo->getTargetFlags();
+}
+
+Triple
+RuntimeDyldCheckerImpl::getTripleForSymbol(TargetFlagsType Flag) const {
+ Triple TheTriple = TT;
+
+ switch (TT.getArch()) {
+ case Triple::ArchType::arm:
+ if (~Flag & 0x1)
+ return TT;
+ TheTriple.setArchName((Twine("thumb") + TT.getArchName().substr(3)).str());
+ return TheTriple;
+ case Triple::ArchType::thumb:
+ if (Flag & 0x1)
+ return TT;
+ TheTriple.setArchName((Twine("arm") + TT.getArchName().substr(5)).str());
+ return TheTriple;
+
+ default:
+ return TT;
+ }
+}
+
+std::pair<uint64_t, std::string> RuntimeDyldCheckerImpl::getSectionAddr(
+ StringRef FileName, StringRef SectionName, bool IsInsideLoad) const {
+
+ auto SecInfo = GetSectionInfo(FileName, SectionName);
+ if (!SecInfo) {
+ std::string ErrMsg;
+ {
+ raw_string_ostream ErrMsgStream(ErrMsg);
+ logAllUnhandledErrors(SecInfo.takeError(), ErrMsgStream,
+ "RTDyldChecker: ");
+ }
+ return std::make_pair(0, std::move(ErrMsg));
+ }
+
+ // If this address is being looked up in "load" mode, return the content
+ // pointer, otherwise return the target address.
+
+ uint64_t Addr = 0;
+
+ if (IsInsideLoad) {
+ if (SecInfo->isZeroFill())
+ Addr = 0;
+ else
+ Addr = pointerToJITTargetAddress(SecInfo->getContent().data());
+ } else
+ Addr = SecInfo->getTargetAddress();
+
+ return std::make_pair(Addr, "");
+}
+
+std::pair<uint64_t, std::string> RuntimeDyldCheckerImpl::getStubOrGOTAddrFor(
+ StringRef StubContainerName, StringRef SymbolName, StringRef StubKindFilter,
+ bool IsInsideLoad, bool IsStubAddr) const {
+
+ assert((StubKindFilter.empty() || IsStubAddr) &&
+ "Kind name filter only supported for stubs");
+ auto StubInfo =
+ IsStubAddr ? GetStubInfo(StubContainerName, SymbolName, StubKindFilter)
+ : GetGOTInfo(StubContainerName, SymbolName);
+
+ if (!StubInfo) {
+ std::string ErrMsg;
+ {
+ raw_string_ostream ErrMsgStream(ErrMsg);
+ logAllUnhandledErrors(StubInfo.takeError(), ErrMsgStream,
+ "RTDyldChecker: ");
+ }
+ return std::make_pair((uint64_t)0, std::move(ErrMsg));
+ }
+
+ uint64_t Addr = 0;
+
+ if (IsInsideLoad) {
+ if (StubInfo->isZeroFill())
+ return std::make_pair((uint64_t)0, "Detected zero-filled stub/GOT entry");
+ Addr = pointerToJITTargetAddress(StubInfo->getContent().data());
+ } else
+ Addr = StubInfo->getTargetAddress();
+
+ return std::make_pair(Addr, "");
+}
+
+RuntimeDyldChecker::RuntimeDyldChecker(
+ IsSymbolValidFunction IsSymbolValid, GetSymbolInfoFunction GetSymbolInfo,
+ GetSectionInfoFunction GetSectionInfo, GetStubInfoFunction GetStubInfo,
+ GetGOTInfoFunction GetGOTInfo, llvm::endianness Endianness, Triple TT,
+ StringRef CPU, SubtargetFeatures TF, raw_ostream &ErrStream)
+ : Impl(::std::make_unique<RuntimeDyldCheckerImpl>(
+ std::move(IsSymbolValid), std::move(GetSymbolInfo),
+ std::move(GetSectionInfo), std::move(GetStubInfo),
+ std::move(GetGOTInfo), Endianness, std::move(TT), std::move(CPU),
+ std::move(TF), ErrStream)) {}
+
+RuntimeDyldChecker::~RuntimeDyldChecker() = default;
+
+bool RuntimeDyldChecker::check(StringRef CheckExpr) const {
+ return Impl->check(CheckExpr);
+}
+
+bool RuntimeDyldChecker::checkAllRulesInBuffer(StringRef RulePrefix,
+ MemoryBuffer *MemBuf) const {
+ return Impl->checkAllRulesInBuffer(RulePrefix, MemBuf);
+}
+
+std::pair<uint64_t, std::string>
+RuntimeDyldChecker::getSectionAddr(StringRef FileName, StringRef SectionName,
+ bool LocalAddress) {
+ return Impl->getSectionAddr(FileName, SectionName, LocalAddress);
+}
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldCheckerImpl.h b/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldCheckerImpl.h
new file mode 100644
index 000000000000..bda554e9e5b6
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldCheckerImpl.h
@@ -0,0 +1,85 @@
+//===-- RuntimeDyldCheckerImpl.h -- RuntimeDyld test framework --*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_EXECUTIONENGINE_RUNTIMEDYLD_RUNTIMEDYLDCHECKERIMPL_H
+#define LLVM_LIB_EXECUTIONENGINE_RUNTIMEDYLD_RUNTIMEDYLDCHECKERIMPL_H
+
+#include "RuntimeDyldImpl.h"
+
+namespace llvm {
+
+/// Holds target-specific properties for a symbol.
+using TargetFlagsType = uint8_t;
+
+class RuntimeDyldCheckerImpl {
+ friend class RuntimeDyldChecker;
+ friend class RuntimeDyldCheckerExprEval;
+
+ using IsSymbolValidFunction =
+ RuntimeDyldChecker::IsSymbolValidFunction;
+ using GetSymbolInfoFunction = RuntimeDyldChecker::GetSymbolInfoFunction;
+ using GetSectionInfoFunction = RuntimeDyldChecker::GetSectionInfoFunction;
+ using GetStubInfoFunction = RuntimeDyldChecker::GetStubInfoFunction;
+ using GetGOTInfoFunction = RuntimeDyldChecker::GetGOTInfoFunction;
+
+public:
+ RuntimeDyldCheckerImpl(IsSymbolValidFunction IsSymbolValid,
+ GetSymbolInfoFunction GetSymbolInfo,
+ GetSectionInfoFunction GetSectionInfo,
+ GetStubInfoFunction GetStubInfo,
+ GetGOTInfoFunction GetGOTInfo,
+ llvm::endianness Endianness, Triple TT, StringRef CPU,
+ SubtargetFeatures TF, llvm::raw_ostream &ErrStream);
+
+ bool check(StringRef CheckExpr) const;
+ bool checkAllRulesInBuffer(StringRef RulePrefix, MemoryBuffer *MemBuf) const;
+
+private:
+
+ // StubMap typedefs.
+
+ Expected<JITSymbolResolver::LookupResult>
+ lookup(const JITSymbolResolver::LookupSet &Symbols) const;
+
+ bool isSymbolValid(StringRef Symbol) const;
+ uint64_t getSymbolLocalAddr(StringRef Symbol) const;
+ uint64_t getSymbolRemoteAddr(StringRef Symbol) const;
+ uint64_t readMemoryAtAddr(uint64_t Addr, unsigned Size) const;
+
+ StringRef getSymbolContent(StringRef Symbol) const;
+
+ TargetFlagsType getTargetFlag(StringRef Symbol) const;
+ Triple getTripleForSymbol(TargetFlagsType Flag) const;
+ StringRef getCPU() const { return CPU; }
+ SubtargetFeatures getFeatures() const { return TF; }
+
+ std::pair<uint64_t, std::string> getSectionAddr(StringRef FileName,
+ StringRef SectionName,
+ bool IsInsideLoad) const;
+
+ std::pair<uint64_t, std::string>
+ getStubOrGOTAddrFor(StringRef StubContainerName, StringRef Symbol,
+ StringRef StubKindFilter, bool IsInsideLoad,
+ bool IsStubAddr) const;
+
+ std::optional<uint64_t> getSectionLoadAddress(void *LocalAddr) const;
+
+ IsSymbolValidFunction IsSymbolValid;
+ GetSymbolInfoFunction GetSymbolInfo;
+ GetSectionInfoFunction GetSectionInfo;
+ GetStubInfoFunction GetStubInfo;
+ GetGOTInfoFunction GetGOTInfo;
+ llvm::endianness Endianness;
+ Triple TT;
+ std::string CPU;
+ SubtargetFeatures TF;
+ llvm::raw_ostream &ErrStream;
+};
+}
+
+#endif
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.cpp
new file mode 100644
index 000000000000..736d9a3e056f
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.cpp
@@ -0,0 +1,2572 @@
+//===-- RuntimeDyldELF.cpp - Run-time dynamic linker for MC-JIT -*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Implementation of ELF support for the MC-JIT runtime dynamic linker.
+//
+//===----------------------------------------------------------------------===//
+
+#include "RuntimeDyldELF.h"
+#include "RuntimeDyldCheckerImpl.h"
+#include "Targets/RuntimeDyldELFMips.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/BinaryFormat/ELF.h"
+#include "llvm/Object/ELFObjectFile.h"
+#include "llvm/Object/ObjectFile.h"
+#include "llvm/Support/Endian.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/TargetParser/Triple.h"
+
+using namespace llvm;
+using namespace llvm::object;
+using namespace llvm::support::endian;
+
+#define DEBUG_TYPE "dyld"
+
+static void or32le(void *P, int32_t V) { write32le(P, read32le(P) | V); }
+
+static void or32AArch64Imm(void *L, uint64_t Imm) {
+ or32le(L, (Imm & 0xFFF) << 10);
+}
+
+template <class T> static void write(bool isBE, void *P, T V) {
+ isBE ? write<T, llvm::endianness::big>(P, V)
+ : write<T, llvm::endianness::little>(P, V);
+}
+
+static void write32AArch64Addr(void *L, uint64_t Imm) {
+ uint32_t ImmLo = (Imm & 0x3) << 29;
+ uint32_t ImmHi = (Imm & 0x1FFFFC) << 3;
+ uint64_t Mask = (0x3 << 29) | (0x1FFFFC << 3);
+ write32le(L, (read32le(L) & ~Mask) | ImmLo | ImmHi);
+}
+
+// Return the bits [Start, End] from Val shifted Start bits.
+// For instance, getBits(0xF0, 4, 8) returns 0xF.
+static uint64_t getBits(uint64_t Val, int Start, int End) {
+ uint64_t Mask = ((uint64_t)1 << (End + 1 - Start)) - 1;
+ return (Val >> Start) & Mask;
+}
+
+namespace {
+
+template <class ELFT> class DyldELFObject : public ELFObjectFile<ELFT> {
+ LLVM_ELF_IMPORT_TYPES_ELFT(ELFT)
+
+ typedef typename ELFT::uint addr_type;
+
+ DyldELFObject(ELFObjectFile<ELFT> &&Obj);
+
+public:
+ static Expected<std::unique_ptr<DyldELFObject>>
+ create(MemoryBufferRef Wrapper);
+
+ void updateSectionAddress(const SectionRef &Sec, uint64_t Addr);
+
+ void updateSymbolAddress(const SymbolRef &SymRef, uint64_t Addr);
+
+ // Methods for type inquiry through isa, cast and dyn_cast
+ static bool classof(const Binary *v) {
+ return (isa<ELFObjectFile<ELFT>>(v) &&
+ classof(cast<ELFObjectFile<ELFT>>(v)));
+ }
+ static bool classof(const ELFObjectFile<ELFT> *v) {
+ return v->isDyldType();
+ }
+};
+
+
+
+// The MemoryBuffer passed into this constructor is just a wrapper around the
+// actual memory. Ultimately, the Binary parent class will take ownership of
+// this MemoryBuffer object but not the underlying memory.
+template <class ELFT>
+DyldELFObject<ELFT>::DyldELFObject(ELFObjectFile<ELFT> &&Obj)
+ : ELFObjectFile<ELFT>(std::move(Obj)) {
+ this->isDyldELFObject = true;
+}
+
+template <class ELFT>
+Expected<std::unique_ptr<DyldELFObject<ELFT>>>
+DyldELFObject<ELFT>::create(MemoryBufferRef Wrapper) {
+ auto Obj = ELFObjectFile<ELFT>::create(Wrapper);
+ if (auto E = Obj.takeError())
+ return std::move(E);
+ std::unique_ptr<DyldELFObject<ELFT>> Ret(
+ new DyldELFObject<ELFT>(std::move(*Obj)));
+ return std::move(Ret);
+}
+
+template <class ELFT>
+void DyldELFObject<ELFT>::updateSectionAddress(const SectionRef &Sec,
+ uint64_t Addr) {
+ DataRefImpl ShdrRef = Sec.getRawDataRefImpl();
+ Elf_Shdr *shdr =
+ const_cast<Elf_Shdr *>(reinterpret_cast<const Elf_Shdr *>(ShdrRef.p));
+
+ // This assumes the address passed in matches the target address bitness
+ // The template-based type cast handles everything else.
+ shdr->sh_addr = static_cast<addr_type>(Addr);
+}
+
+template <class ELFT>
+void DyldELFObject<ELFT>::updateSymbolAddress(const SymbolRef &SymRef,
+ uint64_t Addr) {
+
+ Elf_Sym *sym = const_cast<Elf_Sym *>(
+ ELFObjectFile<ELFT>::getSymbol(SymRef.getRawDataRefImpl()));
+
+ // This assumes the address passed in matches the target address bitness
+ // The template-based type cast handles everything else.
+ sym->st_value = static_cast<addr_type>(Addr);
+}
+
+class LoadedELFObjectInfo final
+ : public LoadedObjectInfoHelper<LoadedELFObjectInfo,
+ RuntimeDyld::LoadedObjectInfo> {
+public:
+ LoadedELFObjectInfo(RuntimeDyldImpl &RTDyld, ObjSectionToIDMap ObjSecToIDMap)
+ : LoadedObjectInfoHelper(RTDyld, std::move(ObjSecToIDMap)) {}
+
+ OwningBinary<ObjectFile>
+ getObjectForDebug(const ObjectFile &Obj) const override;
+};
+
+template <typename ELFT>
+static Expected<std::unique_ptr<DyldELFObject<ELFT>>>
+createRTDyldELFObject(MemoryBufferRef Buffer, const ObjectFile &SourceObject,
+ const LoadedELFObjectInfo &L) {
+ typedef typename ELFT::Shdr Elf_Shdr;
+ typedef typename ELFT::uint addr_type;
+
+ Expected<std::unique_ptr<DyldELFObject<ELFT>>> ObjOrErr =
+ DyldELFObject<ELFT>::create(Buffer);
+ if (Error E = ObjOrErr.takeError())
+ return std::move(E);
+
+ std::unique_ptr<DyldELFObject<ELFT>> Obj = std::move(*ObjOrErr);
+
+ // Iterate over all sections in the object.
+ auto SI = SourceObject.section_begin();
+ for (const auto &Sec : Obj->sections()) {
+ Expected<StringRef> NameOrErr = Sec.getName();
+ if (!NameOrErr) {
+ consumeError(NameOrErr.takeError());
+ continue;
+ }
+
+ if (*NameOrErr != "") {
+ DataRefImpl ShdrRef = Sec.getRawDataRefImpl();
+ Elf_Shdr *shdr = const_cast<Elf_Shdr *>(
+ reinterpret_cast<const Elf_Shdr *>(ShdrRef.p));
+
+ if (uint64_t SecLoadAddr = L.getSectionLoadAddress(*SI)) {
+ // This assumes that the address passed in matches the target address
+ // bitness. The template-based type cast handles everything else.
+ shdr->sh_addr = static_cast<addr_type>(SecLoadAddr);
+ }
+ }
+ ++SI;
+ }
+
+ return std::move(Obj);
+}
+
+static OwningBinary<ObjectFile>
+createELFDebugObject(const ObjectFile &Obj, const LoadedELFObjectInfo &L) {
+ assert(Obj.isELF() && "Not an ELF object file.");
+
+ std::unique_ptr<MemoryBuffer> Buffer =
+ MemoryBuffer::getMemBufferCopy(Obj.getData(), Obj.getFileName());
+
+ Expected<std::unique_ptr<ObjectFile>> DebugObj(nullptr);
+ handleAllErrors(DebugObj.takeError());
+ if (Obj.getBytesInAddress() == 4 && Obj.isLittleEndian())
+ DebugObj =
+ createRTDyldELFObject<ELF32LE>(Buffer->getMemBufferRef(), Obj, L);
+ else if (Obj.getBytesInAddress() == 4 && !Obj.isLittleEndian())
+ DebugObj =
+ createRTDyldELFObject<ELF32BE>(Buffer->getMemBufferRef(), Obj, L);
+ else if (Obj.getBytesInAddress() == 8 && !Obj.isLittleEndian())
+ DebugObj =
+ createRTDyldELFObject<ELF64BE>(Buffer->getMemBufferRef(), Obj, L);
+ else if (Obj.getBytesInAddress() == 8 && Obj.isLittleEndian())
+ DebugObj =
+ createRTDyldELFObject<ELF64LE>(Buffer->getMemBufferRef(), Obj, L);
+ else
+ llvm_unreachable("Unexpected ELF format");
+
+ handleAllErrors(DebugObj.takeError());
+ return OwningBinary<ObjectFile>(std::move(*DebugObj), std::move(Buffer));
+}
+
+OwningBinary<ObjectFile>
+LoadedELFObjectInfo::getObjectForDebug(const ObjectFile &Obj) const {
+ return createELFDebugObject(Obj, *this);
+}
+
+} // anonymous namespace
+
+namespace llvm {
+
+RuntimeDyldELF::RuntimeDyldELF(RuntimeDyld::MemoryManager &MemMgr,
+ JITSymbolResolver &Resolver)
+ : RuntimeDyldImpl(MemMgr, Resolver), GOTSectionID(0), CurrentGOTIndex(0) {}
+RuntimeDyldELF::~RuntimeDyldELF() = default;
+
+void RuntimeDyldELF::registerEHFrames() {
+ for (SID EHFrameSID : UnregisteredEHFrameSections) {
+ uint8_t *EHFrameAddr = Sections[EHFrameSID].getAddress();
+ uint64_t EHFrameLoadAddr = Sections[EHFrameSID].getLoadAddress();
+ size_t EHFrameSize = Sections[EHFrameSID].getSize();
+ MemMgr.registerEHFrames(EHFrameAddr, EHFrameLoadAddr, EHFrameSize);
+ }
+ UnregisteredEHFrameSections.clear();
+}
+
+std::unique_ptr<RuntimeDyldELF>
+llvm::RuntimeDyldELF::create(Triple::ArchType Arch,
+ RuntimeDyld::MemoryManager &MemMgr,
+ JITSymbolResolver &Resolver) {
+ switch (Arch) {
+ default:
+ return std::make_unique<RuntimeDyldELF>(MemMgr, Resolver);
+ case Triple::mips:
+ case Triple::mipsel:
+ case Triple::mips64:
+ case Triple::mips64el:
+ return std::make_unique<RuntimeDyldELFMips>(MemMgr, Resolver);
+ }
+}
+
+std::unique_ptr<RuntimeDyld::LoadedObjectInfo>
+RuntimeDyldELF::loadObject(const object::ObjectFile &O) {
+ if (auto ObjSectionToIDOrErr = loadObjectImpl(O))
+ return std::make_unique<LoadedELFObjectInfo>(*this, *ObjSectionToIDOrErr);
+ else {
+ HasError = true;
+ raw_string_ostream ErrStream(ErrorStr);
+ logAllUnhandledErrors(ObjSectionToIDOrErr.takeError(), ErrStream);
+ return nullptr;
+ }
+}
+
+void RuntimeDyldELF::resolveX86_64Relocation(const SectionEntry &Section,
+ uint64_t Offset, uint64_t Value,
+ uint32_t Type, int64_t Addend,
+ uint64_t SymOffset) {
+ switch (Type) {
+ default:
+ report_fatal_error("Relocation type not implemented yet!");
+ break;
+ case ELF::R_X86_64_NONE:
+ break;
+ case ELF::R_X86_64_8: {
+ Value += Addend;
+ assert((int64_t)Value <= INT8_MAX && (int64_t)Value >= INT8_MIN);
+ uint8_t TruncatedAddr = (Value & 0xFF);
+ *Section.getAddressWithOffset(Offset) = TruncatedAddr;
+ LLVM_DEBUG(dbgs() << "Writing " << format("%p", TruncatedAddr) << " at "
+ << format("%p\n", Section.getAddressWithOffset(Offset)));
+ break;
+ }
+ case ELF::R_X86_64_16: {
+ Value += Addend;
+ assert((int64_t)Value <= INT16_MAX && (int64_t)Value >= INT16_MIN);
+ uint16_t TruncatedAddr = (Value & 0xFFFF);
+ support::ulittle16_t::ref(Section.getAddressWithOffset(Offset)) =
+ TruncatedAddr;
+ LLVM_DEBUG(dbgs() << "Writing " << format("%p", TruncatedAddr) << " at "
+ << format("%p\n", Section.getAddressWithOffset(Offset)));
+ break;
+ }
+ case ELF::R_X86_64_64: {
+ support::ulittle64_t::ref(Section.getAddressWithOffset(Offset)) =
+ Value + Addend;
+ LLVM_DEBUG(dbgs() << "Writing " << format("%p", (Value + Addend)) << " at "
+ << format("%p\n", Section.getAddressWithOffset(Offset)));
+ break;
+ }
+ case ELF::R_X86_64_32:
+ case ELF::R_X86_64_32S: {
+ Value += Addend;
+ assert((Type == ELF::R_X86_64_32 && (Value <= UINT32_MAX)) ||
+ (Type == ELF::R_X86_64_32S &&
+ ((int64_t)Value <= INT32_MAX && (int64_t)Value >= INT32_MIN)));
+ uint32_t TruncatedAddr = (Value & 0xFFFFFFFF);
+ support::ulittle32_t::ref(Section.getAddressWithOffset(Offset)) =
+ TruncatedAddr;
+ LLVM_DEBUG(dbgs() << "Writing " << format("%p", TruncatedAddr) << " at "
+ << format("%p\n", Section.getAddressWithOffset(Offset)));
+ break;
+ }
+ case ELF::R_X86_64_PC8: {
+ uint64_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
+ int64_t RealOffset = Value + Addend - FinalAddress;
+ assert(isInt<8>(RealOffset));
+ int8_t TruncOffset = (RealOffset & 0xFF);
+ Section.getAddress()[Offset] = TruncOffset;
+ break;
+ }
+ case ELF::R_X86_64_PC32: {
+ uint64_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
+ int64_t RealOffset = Value + Addend - FinalAddress;
+ assert(isInt<32>(RealOffset));
+ int32_t TruncOffset = (RealOffset & 0xFFFFFFFF);
+ support::ulittle32_t::ref(Section.getAddressWithOffset(Offset)) =
+ TruncOffset;
+ break;
+ }
+ case ELF::R_X86_64_PC64: {
+ uint64_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
+ int64_t RealOffset = Value + Addend - FinalAddress;
+ support::ulittle64_t::ref(Section.getAddressWithOffset(Offset)) =
+ RealOffset;
+ LLVM_DEBUG(dbgs() << "Writing " << format("%p", RealOffset) << " at "
+ << format("%p\n", FinalAddress));
+ break;
+ }
+ case ELF::R_X86_64_GOTOFF64: {
+ // Compute Value - GOTBase.
+ uint64_t GOTBase = 0;
+ for (const auto &Section : Sections) {
+ if (Section.getName() == ".got") {
+ GOTBase = Section.getLoadAddressWithOffset(0);
+ break;
+ }
+ }
+ assert(GOTBase != 0 && "missing GOT");
+ int64_t GOTOffset = Value - GOTBase + Addend;
+ support::ulittle64_t::ref(Section.getAddressWithOffset(Offset)) = GOTOffset;
+ break;
+ }
+ case ELF::R_X86_64_DTPMOD64: {
+ // We only have one DSO, so the module id is always 1.
+ support::ulittle64_t::ref(Section.getAddressWithOffset(Offset)) = 1;
+ break;
+ }
+ case ELF::R_X86_64_DTPOFF64:
+ case ELF::R_X86_64_TPOFF64: {
+ // DTPOFF64 should resolve to the offset in the TLS block, TPOFF64 to the
+ // offset in the *initial* TLS block. Since we are statically linking, all
+ // TLS blocks already exist in the initial block, so resolve both
+ // relocations equally.
+ support::ulittle64_t::ref(Section.getAddressWithOffset(Offset)) =
+ Value + Addend;
+ break;
+ }
+ case ELF::R_X86_64_DTPOFF32:
+ case ELF::R_X86_64_TPOFF32: {
+ // As for the (D)TPOFF64 relocations above, both DTPOFF32 and TPOFF32 can
+ // be resolved equally.
+ int64_t RealValue = Value + Addend;
+ assert(RealValue >= INT32_MIN && RealValue <= INT32_MAX);
+ int32_t TruncValue = RealValue;
+ support::ulittle32_t::ref(Section.getAddressWithOffset(Offset)) =
+ TruncValue;
+ break;
+ }
+ }
+}
+
+void RuntimeDyldELF::resolveX86Relocation(const SectionEntry &Section,
+ uint64_t Offset, uint32_t Value,
+ uint32_t Type, int32_t Addend) {
+ switch (Type) {
+ case ELF::R_386_32: {
+ support::ulittle32_t::ref(Section.getAddressWithOffset(Offset)) =
+ Value + Addend;
+ break;
+ }
+ // Handle R_386_PLT32 like R_386_PC32 since it should be able to
+ // reach any 32 bit address.
+ case ELF::R_386_PLT32:
+ case ELF::R_386_PC32: {
+ uint32_t FinalAddress =
+ Section.getLoadAddressWithOffset(Offset) & 0xFFFFFFFF;
+ uint32_t RealOffset = Value + Addend - FinalAddress;
+ support::ulittle32_t::ref(Section.getAddressWithOffset(Offset)) =
+ RealOffset;
+ break;
+ }
+ default:
+ // There are other relocation types, but it appears these are the
+ // only ones currently used by the LLVM ELF object writer
+ report_fatal_error("Relocation type not implemented yet!");
+ break;
+ }
+}
+
+void RuntimeDyldELF::resolveAArch64Relocation(const SectionEntry &Section,
+ uint64_t Offset, uint64_t Value,
+ uint32_t Type, int64_t Addend) {
+ uint32_t *TargetPtr =
+ reinterpret_cast<uint32_t *>(Section.getAddressWithOffset(Offset));
+ uint64_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
+ // Data should use target endian. Code should always use little endian.
+ bool isBE = Arch == Triple::aarch64_be;
+
+ LLVM_DEBUG(dbgs() << "resolveAArch64Relocation, LocalAddress: 0x"
+ << format("%llx", Section.getAddressWithOffset(Offset))
+ << " FinalAddress: 0x" << format("%llx", FinalAddress)
+ << " Value: 0x" << format("%llx", Value) << " Type: 0x"
+ << format("%x", Type) << " Addend: 0x"
+ << format("%llx", Addend) << "\n");
+
+ switch (Type) {
+ default:
+ report_fatal_error("Relocation type not implemented yet!");
+ break;
+ case ELF::R_AARCH64_NONE:
+ break;
+ case ELF::R_AARCH64_ABS16: {
+ uint64_t Result = Value + Addend;
+ assert(Result == static_cast<uint64_t>(llvm::SignExtend64(Result, 16)) ||
+ (Result >> 16) == 0);
+ write(isBE, TargetPtr, static_cast<uint16_t>(Result & 0xffffU));
+ break;
+ }
+ case ELF::R_AARCH64_ABS32: {
+ uint64_t Result = Value + Addend;
+ assert(Result == static_cast<uint64_t>(llvm::SignExtend64(Result, 32)) ||
+ (Result >> 32) == 0);
+ write(isBE, TargetPtr, static_cast<uint32_t>(Result & 0xffffffffU));
+ break;
+ }
+ case ELF::R_AARCH64_ABS64:
+ write(isBE, TargetPtr, Value + Addend);
+ break;
+ case ELF::R_AARCH64_PLT32: {
+ uint64_t Result = Value + Addend - FinalAddress;
+ assert(static_cast<int64_t>(Result) >= INT32_MIN &&
+ static_cast<int64_t>(Result) <= INT32_MAX);
+ write(isBE, TargetPtr, static_cast<uint32_t>(Result));
+ break;
+ }
+ case ELF::R_AARCH64_PREL16: {
+ uint64_t Result = Value + Addend - FinalAddress;
+ assert(static_cast<int64_t>(Result) >= INT16_MIN &&
+ static_cast<int64_t>(Result) <= UINT16_MAX);
+ write(isBE, TargetPtr, static_cast<uint16_t>(Result & 0xffffU));
+ break;
+ }
+ case ELF::R_AARCH64_PREL32: {
+ uint64_t Result = Value + Addend - FinalAddress;
+ assert(static_cast<int64_t>(Result) >= INT32_MIN &&
+ static_cast<int64_t>(Result) <= UINT32_MAX);
+ write(isBE, TargetPtr, static_cast<uint32_t>(Result & 0xffffffffU));
+ break;
+ }
+ case ELF::R_AARCH64_PREL64:
+ write(isBE, TargetPtr, Value + Addend - FinalAddress);
+ break;
+ case ELF::R_AARCH64_CONDBR19: {
+ uint64_t BranchImm = Value + Addend - FinalAddress;
+
+ assert(isInt<21>(BranchImm));
+ *TargetPtr &= 0xff00001fU;
+ // Immediate:20:2 goes in bits 23:5 of Bcc, CBZ, CBNZ
+ or32le(TargetPtr, (BranchImm & 0x001FFFFC) << 3);
+ break;
+ }
+ case ELF::R_AARCH64_TSTBR14: {
+ uint64_t BranchImm = Value + Addend - FinalAddress;
+
+ assert(isInt<16>(BranchImm));
+
+ uint32_t RawInstr = *(support::little32_t *)TargetPtr;
+ *(support::little32_t *)TargetPtr = RawInstr & 0xfff8001fU;
+
+ // Immediate:15:2 goes in bits 18:5 of TBZ, TBNZ
+ or32le(TargetPtr, (BranchImm & 0x0000FFFC) << 3);
+ break;
+ }
+ case ELF::R_AARCH64_CALL26: // fallthrough
+ case ELF::R_AARCH64_JUMP26: {
+ // Operation: S+A-P. Set Call or B immediate value to bits fff_fffc of the
+ // calculation.
+ uint64_t BranchImm = Value + Addend - FinalAddress;
+
+ // "Check that -2^27 <= result < 2^27".
+ assert(isInt<28>(BranchImm));
+ or32le(TargetPtr, (BranchImm & 0x0FFFFFFC) >> 2);
+ break;
+ }
+ case ELF::R_AARCH64_MOVW_UABS_G3:
+ or32le(TargetPtr, ((Value + Addend) & 0xFFFF000000000000) >> 43);
+ break;
+ case ELF::R_AARCH64_MOVW_UABS_G2_NC:
+ or32le(TargetPtr, ((Value + Addend) & 0xFFFF00000000) >> 27);
+ break;
+ case ELF::R_AARCH64_MOVW_UABS_G1_NC:
+ or32le(TargetPtr, ((Value + Addend) & 0xFFFF0000) >> 11);
+ break;
+ case ELF::R_AARCH64_MOVW_UABS_G0_NC:
+ or32le(TargetPtr, ((Value + Addend) & 0xFFFF) << 5);
+ break;
+ case ELF::R_AARCH64_ADR_PREL_PG_HI21: {
+ // Operation: Page(S+A) - Page(P)
+ uint64_t Result =
+ ((Value + Addend) & ~0xfffULL) - (FinalAddress & ~0xfffULL);
+
+ // Check that -2^32 <= X < 2^32
+ assert(isInt<33>(Result) && "overflow check failed for relocation");
+
+ // Immediate goes in bits 30:29 + 5:23 of ADRP instruction, taken
+ // from bits 32:12 of X.
+ write32AArch64Addr(TargetPtr, Result >> 12);
+ break;
+ }
+ case ELF::R_AARCH64_ADD_ABS_LO12_NC:
+ // Operation: S + A
+ // Immediate goes in bits 21:10 of LD/ST instruction, taken
+ // from bits 11:0 of X
+ or32AArch64Imm(TargetPtr, Value + Addend);
+ break;
+ case ELF::R_AARCH64_LDST8_ABS_LO12_NC:
+ // Operation: S + A
+ // Immediate goes in bits 21:10 of LD/ST instruction, taken
+ // from bits 11:0 of X
+ or32AArch64Imm(TargetPtr, getBits(Value + Addend, 0, 11));
+ break;
+ case ELF::R_AARCH64_LDST16_ABS_LO12_NC:
+ // Operation: S + A
+ // Immediate goes in bits 21:10 of LD/ST instruction, taken
+ // from bits 11:1 of X
+ or32AArch64Imm(TargetPtr, getBits(Value + Addend, 1, 11));
+ break;
+ case ELF::R_AARCH64_LDST32_ABS_LO12_NC:
+ // Operation: S + A
+ // Immediate goes in bits 21:10 of LD/ST instruction, taken
+ // from bits 11:2 of X
+ or32AArch64Imm(TargetPtr, getBits(Value + Addend, 2, 11));
+ break;
+ case ELF::R_AARCH64_LDST64_ABS_LO12_NC:
+ // Operation: S + A
+ // Immediate goes in bits 21:10 of LD/ST instruction, taken
+ // from bits 11:3 of X
+ or32AArch64Imm(TargetPtr, getBits(Value + Addend, 3, 11));
+ break;
+ case ELF::R_AARCH64_LDST128_ABS_LO12_NC:
+ // Operation: S + A
+ // Immediate goes in bits 21:10 of LD/ST instruction, taken
+ // from bits 11:4 of X
+ or32AArch64Imm(TargetPtr, getBits(Value + Addend, 4, 11));
+ break;
+ case ELF::R_AARCH64_LD_PREL_LO19: {
+ // Operation: S + A - P
+ uint64_t Result = Value + Addend - FinalAddress;
+
+ // "Check that -2^20 <= result < 2^20".
+ assert(isInt<21>(Result));
+
+ *TargetPtr &= 0xff00001fU;
+ // Immediate goes in bits 23:5 of LD imm instruction, taken
+ // from bits 20:2 of X
+ *TargetPtr |= ((Result & 0xffc) << (5 - 2));
+ break;
+ }
+ case ELF::R_AARCH64_ADR_PREL_LO21: {
+ // Operation: S + A - P
+ uint64_t Result = Value + Addend - FinalAddress;
+
+ // "Check that -2^20 <= result < 2^20".
+ assert(isInt<21>(Result));
+
+ *TargetPtr &= 0x9f00001fU;
+ // Immediate goes in bits 23:5, 30:29 of ADR imm instruction, taken
+ // from bits 20:0 of X
+ *TargetPtr |= ((Result & 0xffc) << (5 - 2));
+ *TargetPtr |= (Result & 0x3) << 29;
+ break;
+ }
+ }
+}
+
+void RuntimeDyldELF::resolveARMRelocation(const SectionEntry &Section,
+ uint64_t Offset, uint32_t Value,
+ uint32_t Type, int32_t Addend) {
+ // TODO: Add Thumb relocations.
+ uint32_t *TargetPtr =
+ reinterpret_cast<uint32_t *>(Section.getAddressWithOffset(Offset));
+ uint32_t FinalAddress = Section.getLoadAddressWithOffset(Offset) & 0xFFFFFFFF;
+ Value += Addend;
+
+ LLVM_DEBUG(dbgs() << "resolveARMRelocation, LocalAddress: "
+ << Section.getAddressWithOffset(Offset)
+ << " FinalAddress: " << format("%p", FinalAddress)
+ << " Value: " << format("%x", Value)
+ << " Type: " << format("%x", Type)
+ << " Addend: " << format("%x", Addend) << "\n");
+
+ switch (Type) {
+ default:
+ llvm_unreachable("Not implemented relocation type!");
+
+ case ELF::R_ARM_NONE:
+ break;
+ // Write a 31bit signed offset
+ case ELF::R_ARM_PREL31:
+ support::ulittle32_t::ref{TargetPtr} =
+ (support::ulittle32_t::ref{TargetPtr} & 0x80000000) |
+ ((Value - FinalAddress) & ~0x80000000);
+ break;
+ case ELF::R_ARM_TARGET1:
+ case ELF::R_ARM_ABS32:
+ support::ulittle32_t::ref{TargetPtr} = Value;
+ break;
+ // Write first 16 bit of 32 bit value to the mov instruction.
+ // Last 4 bit should be shifted.
+ case ELF::R_ARM_MOVW_ABS_NC:
+ case ELF::R_ARM_MOVT_ABS:
+ if (Type == ELF::R_ARM_MOVW_ABS_NC)
+ Value = Value & 0xFFFF;
+ else if (Type == ELF::R_ARM_MOVT_ABS)
+ Value = (Value >> 16) & 0xFFFF;
+ support::ulittle32_t::ref{TargetPtr} =
+ (support::ulittle32_t::ref{TargetPtr} & ~0x000F0FFF) | (Value & 0xFFF) |
+ (((Value >> 12) & 0xF) << 16);
+ break;
+ // Write 24 bit relative value to the branch instruction.
+ case ELF::R_ARM_PC24: // Fall through.
+ case ELF::R_ARM_CALL: // Fall through.
+ case ELF::R_ARM_JUMP24:
+ int32_t RelValue = static_cast<int32_t>(Value - FinalAddress - 8);
+ RelValue = (RelValue & 0x03FFFFFC) >> 2;
+ assert((support::ulittle32_t::ref{TargetPtr} & 0xFFFFFF) == 0xFFFFFE);
+ support::ulittle32_t::ref{TargetPtr} =
+ (support::ulittle32_t::ref{TargetPtr} & 0xFF000000) | RelValue;
+ break;
+ }
+}
+
+void RuntimeDyldELF::setMipsABI(const ObjectFile &Obj) {
+ if (Arch == Triple::UnknownArch ||
+ Triple::getArchTypePrefix(Arch) != "mips") {
+ IsMipsO32ABI = false;
+ IsMipsN32ABI = false;
+ IsMipsN64ABI = false;
+ return;
+ }
+ if (auto *E = dyn_cast<ELFObjectFileBase>(&Obj)) {
+ unsigned AbiVariant = E->getPlatformFlags();
+ IsMipsO32ABI = AbiVariant & ELF::EF_MIPS_ABI_O32;
+ IsMipsN32ABI = AbiVariant & ELF::EF_MIPS_ABI2;
+ }
+ IsMipsN64ABI = Obj.getFileFormatName() == "elf64-mips";
+}
+
+// Return the .TOC. section and offset.
+Error RuntimeDyldELF::findPPC64TOCSection(const ELFObjectFileBase &Obj,
+ ObjSectionToIDMap &LocalSections,
+ RelocationValueRef &Rel) {
+ // Set a default SectionID in case we do not find a TOC section below.
+ // This may happen for references to TOC base base (sym@toc, .odp
+ // relocation) without a .toc directive. In this case just use the
+ // first section (which is usually the .odp) since the code won't
+ // reference the .toc base directly.
+ Rel.SymbolName = nullptr;
+ Rel.SectionID = 0;
+
+ // The TOC consists of sections .got, .toc, .tocbss, .plt in that
+ // order. The TOC starts where the first of these sections starts.
+ for (auto &Section : Obj.sections()) {
+ Expected<StringRef> NameOrErr = Section.getName();
+ if (!NameOrErr)
+ return NameOrErr.takeError();
+ StringRef SectionName = *NameOrErr;
+
+ if (SectionName == ".got"
+ || SectionName == ".toc"
+ || SectionName == ".tocbss"
+ || SectionName == ".plt") {
+ if (auto SectionIDOrErr =
+ findOrEmitSection(Obj, Section, false, LocalSections))
+ Rel.SectionID = *SectionIDOrErr;
+ else
+ return SectionIDOrErr.takeError();
+ break;
+ }
+ }
+
+ // Per the ppc64-elf-linux ABI, The TOC base is TOC value plus 0x8000
+ // thus permitting a full 64 Kbytes segment.
+ Rel.Addend = 0x8000;
+
+ return Error::success();
+}
+
+// Returns the sections and offset associated with the ODP entry referenced
+// by Symbol.
+Error RuntimeDyldELF::findOPDEntrySection(const ELFObjectFileBase &Obj,
+ ObjSectionToIDMap &LocalSections,
+ RelocationValueRef &Rel) {
+ // Get the ELF symbol value (st_value) to compare with Relocation offset in
+ // .opd entries
+ for (section_iterator si = Obj.section_begin(), se = Obj.section_end();
+ si != se; ++si) {
+
+ Expected<section_iterator> RelSecOrErr = si->getRelocatedSection();
+ if (!RelSecOrErr)
+ report_fatal_error(Twine(toString(RelSecOrErr.takeError())));
+
+ section_iterator RelSecI = *RelSecOrErr;
+ if (RelSecI == Obj.section_end())
+ continue;
+
+ Expected<StringRef> NameOrErr = RelSecI->getName();
+ if (!NameOrErr)
+ return NameOrErr.takeError();
+ StringRef RelSectionName = *NameOrErr;
+
+ if (RelSectionName != ".opd")
+ continue;
+
+ for (elf_relocation_iterator i = si->relocation_begin(),
+ e = si->relocation_end();
+ i != e;) {
+ // The R_PPC64_ADDR64 relocation indicates the first field
+ // of a .opd entry
+ uint64_t TypeFunc = i->getType();
+ if (TypeFunc != ELF::R_PPC64_ADDR64) {
+ ++i;
+ continue;
+ }
+
+ uint64_t TargetSymbolOffset = i->getOffset();
+ symbol_iterator TargetSymbol = i->getSymbol();
+ int64_t Addend;
+ if (auto AddendOrErr = i->getAddend())
+ Addend = *AddendOrErr;
+ else
+ return AddendOrErr.takeError();
+
+ ++i;
+ if (i == e)
+ break;
+
+ // Just check if following relocation is a R_PPC64_TOC
+ uint64_t TypeTOC = i->getType();
+ if (TypeTOC != ELF::R_PPC64_TOC)
+ continue;
+
+ // Finally compares the Symbol value and the target symbol offset
+ // to check if this .opd entry refers to the symbol the relocation
+ // points to.
+ if (Rel.Addend != (int64_t)TargetSymbolOffset)
+ continue;
+
+ section_iterator TSI = Obj.section_end();
+ if (auto TSIOrErr = TargetSymbol->getSection())
+ TSI = *TSIOrErr;
+ else
+ return TSIOrErr.takeError();
+ assert(TSI != Obj.section_end() && "TSI should refer to a valid section");
+
+ bool IsCode = TSI->isText();
+ if (auto SectionIDOrErr = findOrEmitSection(Obj, *TSI, IsCode,
+ LocalSections))
+ Rel.SectionID = *SectionIDOrErr;
+ else
+ return SectionIDOrErr.takeError();
+ Rel.Addend = (intptr_t)Addend;
+ return Error::success();
+ }
+ }
+ llvm_unreachable("Attempting to get address of ODP entry!");
+}
+
+// Relocation masks following the #lo(value), #hi(value), #ha(value),
+// #higher(value), #highera(value), #highest(value), and #highesta(value)
+// macros defined in section 4.5.1. Relocation Types of the PPC-elf64abi
+// document.
+
+static inline uint16_t applyPPClo(uint64_t value) { return value & 0xffff; }
+
+static inline uint16_t applyPPChi(uint64_t value) {
+ return (value >> 16) & 0xffff;
+}
+
+static inline uint16_t applyPPCha (uint64_t value) {
+ return ((value + 0x8000) >> 16) & 0xffff;
+}
+
+static inline uint16_t applyPPChigher(uint64_t value) {
+ return (value >> 32) & 0xffff;
+}
+
+static inline uint16_t applyPPChighera (uint64_t value) {
+ return ((value + 0x8000) >> 32) & 0xffff;
+}
+
+static inline uint16_t applyPPChighest(uint64_t value) {
+ return (value >> 48) & 0xffff;
+}
+
+static inline uint16_t applyPPChighesta (uint64_t value) {
+ return ((value + 0x8000) >> 48) & 0xffff;
+}
+
+void RuntimeDyldELF::resolvePPC32Relocation(const SectionEntry &Section,
+ uint64_t Offset, uint64_t Value,
+ uint32_t Type, int64_t Addend) {
+ uint8_t *LocalAddress = Section.getAddressWithOffset(Offset);
+ switch (Type) {
+ default:
+ report_fatal_error("Relocation type not implemented yet!");
+ break;
+ case ELF::R_PPC_ADDR16_LO:
+ writeInt16BE(LocalAddress, applyPPClo(Value + Addend));
+ break;
+ case ELF::R_PPC_ADDR16_HI:
+ writeInt16BE(LocalAddress, applyPPChi(Value + Addend));
+ break;
+ case ELF::R_PPC_ADDR16_HA:
+ writeInt16BE(LocalAddress, applyPPCha(Value + Addend));
+ break;
+ }
+}
+
+void RuntimeDyldELF::resolvePPC64Relocation(const SectionEntry &Section,
+ uint64_t Offset, uint64_t Value,
+ uint32_t Type, int64_t Addend) {
+ uint8_t *LocalAddress = Section.getAddressWithOffset(Offset);
+ switch (Type) {
+ default:
+ report_fatal_error("Relocation type not implemented yet!");
+ break;
+ case ELF::R_PPC64_ADDR16:
+ writeInt16BE(LocalAddress, applyPPClo(Value + Addend));
+ break;
+ case ELF::R_PPC64_ADDR16_DS:
+ writeInt16BE(LocalAddress, applyPPClo(Value + Addend) & ~3);
+ break;
+ case ELF::R_PPC64_ADDR16_LO:
+ writeInt16BE(LocalAddress, applyPPClo(Value + Addend));
+ break;
+ case ELF::R_PPC64_ADDR16_LO_DS:
+ writeInt16BE(LocalAddress, applyPPClo(Value + Addend) & ~3);
+ break;
+ case ELF::R_PPC64_ADDR16_HI:
+ case ELF::R_PPC64_ADDR16_HIGH:
+ writeInt16BE(LocalAddress, applyPPChi(Value + Addend));
+ break;
+ case ELF::R_PPC64_ADDR16_HA:
+ case ELF::R_PPC64_ADDR16_HIGHA:
+ writeInt16BE(LocalAddress, applyPPCha(Value + Addend));
+ break;
+ case ELF::R_PPC64_ADDR16_HIGHER:
+ writeInt16BE(LocalAddress, applyPPChigher(Value + Addend));
+ break;
+ case ELF::R_PPC64_ADDR16_HIGHERA:
+ writeInt16BE(LocalAddress, applyPPChighera(Value + Addend));
+ break;
+ case ELF::R_PPC64_ADDR16_HIGHEST:
+ writeInt16BE(LocalAddress, applyPPChighest(Value + Addend));
+ break;
+ case ELF::R_PPC64_ADDR16_HIGHESTA:
+ writeInt16BE(LocalAddress, applyPPChighesta(Value + Addend));
+ break;
+ case ELF::R_PPC64_ADDR14: {
+ assert(((Value + Addend) & 3) == 0);
+ // Preserve the AA/LK bits in the branch instruction
+ uint8_t aalk = *(LocalAddress + 3);
+ writeInt16BE(LocalAddress + 2, (aalk & 3) | ((Value + Addend) & 0xfffc));
+ } break;
+ case ELF::R_PPC64_REL16_LO: {
+ uint64_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
+ uint64_t Delta = Value - FinalAddress + Addend;
+ writeInt16BE(LocalAddress, applyPPClo(Delta));
+ } break;
+ case ELF::R_PPC64_REL16_HI: {
+ uint64_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
+ uint64_t Delta = Value - FinalAddress + Addend;
+ writeInt16BE(LocalAddress, applyPPChi(Delta));
+ } break;
+ case ELF::R_PPC64_REL16_HA: {
+ uint64_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
+ uint64_t Delta = Value - FinalAddress + Addend;
+ writeInt16BE(LocalAddress, applyPPCha(Delta));
+ } break;
+ case ELF::R_PPC64_ADDR32: {
+ int64_t Result = static_cast<int64_t>(Value + Addend);
+ if (SignExtend64<32>(Result) != Result)
+ llvm_unreachable("Relocation R_PPC64_ADDR32 overflow");
+ writeInt32BE(LocalAddress, Result);
+ } break;
+ case ELF::R_PPC64_REL24: {
+ uint64_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
+ int64_t delta = static_cast<int64_t>(Value - FinalAddress + Addend);
+ if (SignExtend64<26>(delta) != delta)
+ llvm_unreachable("Relocation R_PPC64_REL24 overflow");
+ // We preserve bits other than LI field, i.e. PO and AA/LK fields.
+ uint32_t Inst = readBytesUnaligned(LocalAddress, 4);
+ writeInt32BE(LocalAddress, (Inst & 0xFC000003) | (delta & 0x03FFFFFC));
+ } break;
+ case ELF::R_PPC64_REL32: {
+ uint64_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
+ int64_t delta = static_cast<int64_t>(Value - FinalAddress + Addend);
+ if (SignExtend64<32>(delta) != delta)
+ llvm_unreachable("Relocation R_PPC64_REL32 overflow");
+ writeInt32BE(LocalAddress, delta);
+ } break;
+ case ELF::R_PPC64_REL64: {
+ uint64_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
+ uint64_t Delta = Value - FinalAddress + Addend;
+ writeInt64BE(LocalAddress, Delta);
+ } break;
+ case ELF::R_PPC64_ADDR64:
+ writeInt64BE(LocalAddress, Value + Addend);
+ break;
+ }
+}
+
+void RuntimeDyldELF::resolveSystemZRelocation(const SectionEntry &Section,
+ uint64_t Offset, uint64_t Value,
+ uint32_t Type, int64_t Addend) {
+ uint8_t *LocalAddress = Section.getAddressWithOffset(Offset);
+ switch (Type) {
+ default:
+ report_fatal_error("Relocation type not implemented yet!");
+ break;
+ case ELF::R_390_PC16DBL:
+ case ELF::R_390_PLT16DBL: {
+ int64_t Delta = (Value + Addend) - Section.getLoadAddressWithOffset(Offset);
+ assert(int16_t(Delta / 2) * 2 == Delta && "R_390_PC16DBL overflow");
+ writeInt16BE(LocalAddress, Delta / 2);
+ break;
+ }
+ case ELF::R_390_PC32DBL:
+ case ELF::R_390_PLT32DBL: {
+ int64_t Delta = (Value + Addend) - Section.getLoadAddressWithOffset(Offset);
+ assert(int32_t(Delta / 2) * 2 == Delta && "R_390_PC32DBL overflow");
+ writeInt32BE(LocalAddress, Delta / 2);
+ break;
+ }
+ case ELF::R_390_PC16: {
+ int64_t Delta = (Value + Addend) - Section.getLoadAddressWithOffset(Offset);
+ assert(int16_t(Delta) == Delta && "R_390_PC16 overflow");
+ writeInt16BE(LocalAddress, Delta);
+ break;
+ }
+ case ELF::R_390_PC32: {
+ int64_t Delta = (Value + Addend) - Section.getLoadAddressWithOffset(Offset);
+ assert(int32_t(Delta) == Delta && "R_390_PC32 overflow");
+ writeInt32BE(LocalAddress, Delta);
+ break;
+ }
+ case ELF::R_390_PC64: {
+ int64_t Delta = (Value + Addend) - Section.getLoadAddressWithOffset(Offset);
+ writeInt64BE(LocalAddress, Delta);
+ break;
+ }
+ case ELF::R_390_8:
+ *LocalAddress = (uint8_t)(Value + Addend);
+ break;
+ case ELF::R_390_16:
+ writeInt16BE(LocalAddress, Value + Addend);
+ break;
+ case ELF::R_390_32:
+ writeInt32BE(LocalAddress, Value + Addend);
+ break;
+ case ELF::R_390_64:
+ writeInt64BE(LocalAddress, Value + Addend);
+ break;
+ }
+}
+
+void RuntimeDyldELF::resolveBPFRelocation(const SectionEntry &Section,
+ uint64_t Offset, uint64_t Value,
+ uint32_t Type, int64_t Addend) {
+ bool isBE = Arch == Triple::bpfeb;
+
+ switch (Type) {
+ default:
+ report_fatal_error("Relocation type not implemented yet!");
+ break;
+ case ELF::R_BPF_NONE:
+ case ELF::R_BPF_64_64:
+ case ELF::R_BPF_64_32:
+ case ELF::R_BPF_64_NODYLD32:
+ break;
+ case ELF::R_BPF_64_ABS64: {
+ write(isBE, Section.getAddressWithOffset(Offset), Value + Addend);
+ LLVM_DEBUG(dbgs() << "Writing " << format("%p", (Value + Addend)) << " at "
+ << format("%p\n", Section.getAddressWithOffset(Offset)));
+ break;
+ }
+ case ELF::R_BPF_64_ABS32: {
+ Value += Addend;
+ assert(Value <= UINT32_MAX);
+ write(isBE, Section.getAddressWithOffset(Offset), static_cast<uint32_t>(Value));
+ LLVM_DEBUG(dbgs() << "Writing " << format("%p", Value) << " at "
+ << format("%p\n", Section.getAddressWithOffset(Offset)));
+ break;
+ }
+ }
+}
+
+// The target location for the relocation is described by RE.SectionID and
+// RE.Offset. RE.SectionID can be used to find the SectionEntry. Each
+// SectionEntry has three members describing its location.
+// SectionEntry::Address is the address at which the section has been loaded
+// into memory in the current (host) process. SectionEntry::LoadAddress is the
+// address that the section will have in the target process.
+// SectionEntry::ObjAddress is the address of the bits for this section in the
+// original emitted object image (also in the current address space).
+//
+// Relocations will be applied as if the section were loaded at
+// SectionEntry::LoadAddress, but they will be applied at an address based
+// on SectionEntry::Address. SectionEntry::ObjAddress will be used to refer to
+// Target memory contents if they are required for value calculations.
+//
+// The Value parameter here is the load address of the symbol for the
+// relocation to be applied. For relocations which refer to symbols in the
+// current object Value will be the LoadAddress of the section in which
+// the symbol resides (RE.Addend provides additional information about the
+// symbol location). For external symbols, Value will be the address of the
+// symbol in the target address space.
+void RuntimeDyldELF::resolveRelocation(const RelocationEntry &RE,
+ uint64_t Value) {
+ const SectionEntry &Section = Sections[RE.SectionID];
+ return resolveRelocation(Section, RE.Offset, Value, RE.RelType, RE.Addend,
+ RE.SymOffset, RE.SectionID);
+}
+
+void RuntimeDyldELF::resolveRelocation(const SectionEntry &Section,
+ uint64_t Offset, uint64_t Value,
+ uint32_t Type, int64_t Addend,
+ uint64_t SymOffset, SID SectionID) {
+ switch (Arch) {
+ case Triple::x86_64:
+ resolveX86_64Relocation(Section, Offset, Value, Type, Addend, SymOffset);
+ break;
+ case Triple::x86:
+ resolveX86Relocation(Section, Offset, (uint32_t)(Value & 0xffffffffL), Type,
+ (uint32_t)(Addend & 0xffffffffL));
+ break;
+ case Triple::aarch64:
+ case Triple::aarch64_be:
+ resolveAArch64Relocation(Section, Offset, Value, Type, Addend);
+ break;
+ case Triple::arm: // Fall through.
+ case Triple::armeb:
+ case Triple::thumb:
+ case Triple::thumbeb:
+ resolveARMRelocation(Section, Offset, (uint32_t)(Value & 0xffffffffL), Type,
+ (uint32_t)(Addend & 0xffffffffL));
+ break;
+ case Triple::ppc: // Fall through.
+ case Triple::ppcle:
+ resolvePPC32Relocation(Section, Offset, Value, Type, Addend);
+ break;
+ case Triple::ppc64: // Fall through.
+ case Triple::ppc64le:
+ resolvePPC64Relocation(Section, Offset, Value, Type, Addend);
+ break;
+ case Triple::systemz:
+ resolveSystemZRelocation(Section, Offset, Value, Type, Addend);
+ break;
+ case Triple::bpfel:
+ case Triple::bpfeb:
+ resolveBPFRelocation(Section, Offset, Value, Type, Addend);
+ break;
+ default:
+ llvm_unreachable("Unsupported CPU type!");
+ }
+}
+
+void *RuntimeDyldELF::computePlaceholderAddress(unsigned SectionID, uint64_t Offset) const {
+ return (void *)(Sections[SectionID].getObjAddress() + Offset);
+}
+
+void RuntimeDyldELF::processSimpleRelocation(unsigned SectionID, uint64_t Offset, unsigned RelType, RelocationValueRef Value) {
+ RelocationEntry RE(SectionID, Offset, RelType, Value.Addend, Value.Offset);
+ if (Value.SymbolName)
+ addRelocationForSymbol(RE, Value.SymbolName);
+ else
+ addRelocationForSection(RE, Value.SectionID);
+}
+
+uint32_t RuntimeDyldELF::getMatchingLoRelocation(uint32_t RelType,
+ bool IsLocal) const {
+ switch (RelType) {
+ case ELF::R_MICROMIPS_GOT16:
+ if (IsLocal)
+ return ELF::R_MICROMIPS_LO16;
+ break;
+ case ELF::R_MICROMIPS_HI16:
+ return ELF::R_MICROMIPS_LO16;
+ case ELF::R_MIPS_GOT16:
+ if (IsLocal)
+ return ELF::R_MIPS_LO16;
+ break;
+ case ELF::R_MIPS_HI16:
+ return ELF::R_MIPS_LO16;
+ case ELF::R_MIPS_PCHI16:
+ return ELF::R_MIPS_PCLO16;
+ default:
+ break;
+ }
+ return ELF::R_MIPS_NONE;
+}
+
+// Sometimes we don't need to create thunk for a branch.
+// This typically happens when branch target is located
+// in the same object file. In such case target is either
+// a weak symbol or symbol in a different executable section.
+// This function checks if branch target is located in the
+// same object file and if distance between source and target
+// fits R_AARCH64_CALL26 relocation. If both conditions are
+// met, it emits direct jump to the target and returns true.
+// Otherwise false is returned and thunk is created.
+bool RuntimeDyldELF::resolveAArch64ShortBranch(
+ unsigned SectionID, relocation_iterator RelI,
+ const RelocationValueRef &Value) {
+ uint64_t TargetOffset;
+ unsigned TargetSectionID;
+ if (Value.SymbolName) {
+ auto Loc = GlobalSymbolTable.find(Value.SymbolName);
+
+ // Don't create direct branch for external symbols.
+ if (Loc == GlobalSymbolTable.end())
+ return false;
+
+ const auto &SymInfo = Loc->second;
+
+ TargetSectionID = SymInfo.getSectionID();
+ TargetOffset = SymInfo.getOffset();
+ } else {
+ TargetSectionID = Value.SectionID;
+ TargetOffset = 0;
+ }
+
+ // We don't actually know the load addresses at this point, so if the
+ // branch is cross-section, we don't know exactly how far away it is.
+ if (TargetSectionID != SectionID)
+ return false;
+
+ uint64_t SourceOffset = RelI->getOffset();
+
+ // R_AARCH64_CALL26 requires immediate to be in range -2^27 <= imm < 2^27
+ // If distance between source and target is out of range then we should
+ // create thunk.
+ if (!isInt<28>(TargetOffset + Value.Addend - SourceOffset))
+ return false;
+
+ RelocationEntry RE(SectionID, SourceOffset, RelI->getType(), Value.Addend);
+ if (Value.SymbolName)
+ addRelocationForSymbol(RE, Value.SymbolName);
+ else
+ addRelocationForSection(RE, Value.SectionID);
+
+ return true;
+}
+
+void RuntimeDyldELF::resolveAArch64Branch(unsigned SectionID,
+ const RelocationValueRef &Value,
+ relocation_iterator RelI,
+ StubMap &Stubs) {
+
+ LLVM_DEBUG(dbgs() << "\t\tThis is an AArch64 branch relocation.");
+ SectionEntry &Section = Sections[SectionID];
+
+ uint64_t Offset = RelI->getOffset();
+ unsigned RelType = RelI->getType();
+ // Look for an existing stub.
+ StubMap::const_iterator i = Stubs.find(Value);
+ if (i != Stubs.end()) {
+ resolveRelocation(Section, Offset,
+ Section.getLoadAddressWithOffset(i->second), RelType, 0);
+ LLVM_DEBUG(dbgs() << " Stub function found\n");
+ } else if (!resolveAArch64ShortBranch(SectionID, RelI, Value)) {
+ // Create a new stub function.
+ LLVM_DEBUG(dbgs() << " Create a new stub function\n");
+ Stubs[Value] = Section.getStubOffset();
+ uint8_t *StubTargetAddr = createStubFunction(
+ Section.getAddressWithOffset(Section.getStubOffset()));
+
+ RelocationEntry REmovz_g3(SectionID, StubTargetAddr - Section.getAddress(),
+ ELF::R_AARCH64_MOVW_UABS_G3, Value.Addend);
+ RelocationEntry REmovk_g2(SectionID,
+ StubTargetAddr - Section.getAddress() + 4,
+ ELF::R_AARCH64_MOVW_UABS_G2_NC, Value.Addend);
+ RelocationEntry REmovk_g1(SectionID,
+ StubTargetAddr - Section.getAddress() + 8,
+ ELF::R_AARCH64_MOVW_UABS_G1_NC, Value.Addend);
+ RelocationEntry REmovk_g0(SectionID,
+ StubTargetAddr - Section.getAddress() + 12,
+ ELF::R_AARCH64_MOVW_UABS_G0_NC, Value.Addend);
+
+ if (Value.SymbolName) {
+ addRelocationForSymbol(REmovz_g3, Value.SymbolName);
+ addRelocationForSymbol(REmovk_g2, Value.SymbolName);
+ addRelocationForSymbol(REmovk_g1, Value.SymbolName);
+ addRelocationForSymbol(REmovk_g0, Value.SymbolName);
+ } else {
+ addRelocationForSection(REmovz_g3, Value.SectionID);
+ addRelocationForSection(REmovk_g2, Value.SectionID);
+ addRelocationForSection(REmovk_g1, Value.SectionID);
+ addRelocationForSection(REmovk_g0, Value.SectionID);
+ }
+ resolveRelocation(Section, Offset,
+ Section.getLoadAddressWithOffset(Section.getStubOffset()),
+ RelType, 0);
+ Section.advanceStubOffset(getMaxStubSize());
+ }
+}
+
+Expected<relocation_iterator>
+RuntimeDyldELF::processRelocationRef(
+ unsigned SectionID, relocation_iterator RelI, const ObjectFile &O,
+ ObjSectionToIDMap &ObjSectionToID, StubMap &Stubs) {
+ const auto &Obj = cast<ELFObjectFileBase>(O);
+ uint64_t RelType = RelI->getType();
+ int64_t Addend = 0;
+ if (Expected<int64_t> AddendOrErr = ELFRelocationRef(*RelI).getAddend())
+ Addend = *AddendOrErr;
+ else
+ consumeError(AddendOrErr.takeError());
+ elf_symbol_iterator Symbol = RelI->getSymbol();
+
+ // Obtain the symbol name which is referenced in the relocation
+ StringRef TargetName;
+ if (Symbol != Obj.symbol_end()) {
+ if (auto TargetNameOrErr = Symbol->getName())
+ TargetName = *TargetNameOrErr;
+ else
+ return TargetNameOrErr.takeError();
+ }
+ LLVM_DEBUG(dbgs() << "\t\tRelType: " << RelType << " Addend: " << Addend
+ << " TargetName: " << TargetName << "\n");
+ RelocationValueRef Value;
+ // First search for the symbol in the local symbol table
+ SymbolRef::Type SymType = SymbolRef::ST_Unknown;
+
+ // Search for the symbol in the global symbol table
+ RTDyldSymbolTable::const_iterator gsi = GlobalSymbolTable.end();
+ if (Symbol != Obj.symbol_end()) {
+ gsi = GlobalSymbolTable.find(TargetName.data());
+ Expected<SymbolRef::Type> SymTypeOrErr = Symbol->getType();
+ if (!SymTypeOrErr) {
+ std::string Buf;
+ raw_string_ostream OS(Buf);
+ logAllUnhandledErrors(SymTypeOrErr.takeError(), OS);
+ report_fatal_error(Twine(OS.str()));
+ }
+ SymType = *SymTypeOrErr;
+ }
+ if (gsi != GlobalSymbolTable.end()) {
+ const auto &SymInfo = gsi->second;
+ Value.SectionID = SymInfo.getSectionID();
+ Value.Offset = SymInfo.getOffset();
+ Value.Addend = SymInfo.getOffset() + Addend;
+ } else {
+ switch (SymType) {
+ case SymbolRef::ST_Debug: {
+ // TODO: Now ELF SymbolRef::ST_Debug = STT_SECTION, it's not obviously
+ // and can be changed by another developers. Maybe best way is add
+ // a new symbol type ST_Section to SymbolRef and use it.
+ auto SectionOrErr = Symbol->getSection();
+ if (!SectionOrErr) {
+ std::string Buf;
+ raw_string_ostream OS(Buf);
+ logAllUnhandledErrors(SectionOrErr.takeError(), OS);
+ report_fatal_error(Twine(OS.str()));
+ }
+ section_iterator si = *SectionOrErr;
+ if (si == Obj.section_end())
+ llvm_unreachable("Symbol section not found, bad object file format!");
+ LLVM_DEBUG(dbgs() << "\t\tThis is section symbol\n");
+ bool isCode = si->isText();
+ if (auto SectionIDOrErr = findOrEmitSection(Obj, (*si), isCode,
+ ObjSectionToID))
+ Value.SectionID = *SectionIDOrErr;
+ else
+ return SectionIDOrErr.takeError();
+ Value.Addend = Addend;
+ break;
+ }
+ case SymbolRef::ST_Data:
+ case SymbolRef::ST_Function:
+ case SymbolRef::ST_Other:
+ case SymbolRef::ST_Unknown: {
+ Value.SymbolName = TargetName.data();
+ Value.Addend = Addend;
+
+ // Absolute relocations will have a zero symbol ID (STN_UNDEF), which
+ // will manifest here as a NULL symbol name.
+ // We can set this as a valid (but empty) symbol name, and rely
+ // on addRelocationForSymbol to handle this.
+ if (!Value.SymbolName)
+ Value.SymbolName = "";
+ break;
+ }
+ default:
+ llvm_unreachable("Unresolved symbol type!");
+ break;
+ }
+ }
+
+ uint64_t Offset = RelI->getOffset();
+
+ LLVM_DEBUG(dbgs() << "\t\tSectionID: " << SectionID << " Offset: " << Offset
+ << "\n");
+ if ((Arch == Triple::aarch64 || Arch == Triple::aarch64_be)) {
+ if ((RelType == ELF::R_AARCH64_CALL26 ||
+ RelType == ELF::R_AARCH64_JUMP26) &&
+ MemMgr.allowStubAllocation()) {
+ resolveAArch64Branch(SectionID, Value, RelI, Stubs);
+ } else if (RelType == ELF::R_AARCH64_ADR_GOT_PAGE) {
+ // Create new GOT entry or find existing one. If GOT entry is
+ // to be created, then we also emit ABS64 relocation for it.
+ uint64_t GOTOffset = findOrAllocGOTEntry(Value, ELF::R_AARCH64_ABS64);
+ resolveGOTOffsetRelocation(SectionID, Offset, GOTOffset + Addend,
+ ELF::R_AARCH64_ADR_PREL_PG_HI21);
+
+ } else if (RelType == ELF::R_AARCH64_LD64_GOT_LO12_NC) {
+ uint64_t GOTOffset = findOrAllocGOTEntry(Value, ELF::R_AARCH64_ABS64);
+ resolveGOTOffsetRelocation(SectionID, Offset, GOTOffset + Addend,
+ ELF::R_AARCH64_LDST64_ABS_LO12_NC);
+ } else {
+ processSimpleRelocation(SectionID, Offset, RelType, Value);
+ }
+ } else if (Arch == Triple::arm) {
+ if (RelType == ELF::R_ARM_PC24 || RelType == ELF::R_ARM_CALL ||
+ RelType == ELF::R_ARM_JUMP24) {
+ // This is an ARM branch relocation, need to use a stub function.
+ LLVM_DEBUG(dbgs() << "\t\tThis is an ARM branch relocation.\n");
+ SectionEntry &Section = Sections[SectionID];
+
+ // Look for an existing stub.
+ StubMap::const_iterator i = Stubs.find(Value);
+ if (i != Stubs.end()) {
+ resolveRelocation(Section, Offset,
+ Section.getLoadAddressWithOffset(i->second), RelType,
+ 0);
+ LLVM_DEBUG(dbgs() << " Stub function found\n");
+ } else {
+ // Create a new stub function.
+ LLVM_DEBUG(dbgs() << " Create a new stub function\n");
+ Stubs[Value] = Section.getStubOffset();
+ uint8_t *StubTargetAddr = createStubFunction(
+ Section.getAddressWithOffset(Section.getStubOffset()));
+ RelocationEntry RE(SectionID, StubTargetAddr - Section.getAddress(),
+ ELF::R_ARM_ABS32, Value.Addend);
+ if (Value.SymbolName)
+ addRelocationForSymbol(RE, Value.SymbolName);
+ else
+ addRelocationForSection(RE, Value.SectionID);
+
+ resolveRelocation(
+ Section, Offset,
+ Section.getLoadAddressWithOffset(Section.getStubOffset()), RelType,
+ 0);
+ Section.advanceStubOffset(getMaxStubSize());
+ }
+ } else {
+ uint32_t *Placeholder =
+ reinterpret_cast<uint32_t*>(computePlaceholderAddress(SectionID, Offset));
+ if (RelType == ELF::R_ARM_PREL31 || RelType == ELF::R_ARM_TARGET1 ||
+ RelType == ELF::R_ARM_ABS32) {
+ Value.Addend += *Placeholder;
+ } else if (RelType == ELF::R_ARM_MOVW_ABS_NC || RelType == ELF::R_ARM_MOVT_ABS) {
+ // See ELF for ARM documentation
+ Value.Addend += (int16_t)((*Placeholder & 0xFFF) | (((*Placeholder >> 16) & 0xF) << 12));
+ }
+ processSimpleRelocation(SectionID, Offset, RelType, Value);
+ }
+ } else if (IsMipsO32ABI) {
+ uint8_t *Placeholder = reinterpret_cast<uint8_t *>(
+ computePlaceholderAddress(SectionID, Offset));
+ uint32_t Opcode = readBytesUnaligned(Placeholder, 4);
+ if (RelType == ELF::R_MIPS_26) {
+ // This is an Mips branch relocation, need to use a stub function.
+ LLVM_DEBUG(dbgs() << "\t\tThis is a Mips branch relocation.");
+ SectionEntry &Section = Sections[SectionID];
+
+ // Extract the addend from the instruction.
+ // We shift up by two since the Value will be down shifted again
+ // when applying the relocation.
+ uint32_t Addend = (Opcode & 0x03ffffff) << 2;
+
+ Value.Addend += Addend;
+
+ // Look up for existing stub.
+ StubMap::const_iterator i = Stubs.find(Value);
+ if (i != Stubs.end()) {
+ RelocationEntry RE(SectionID, Offset, RelType, i->second);
+ addRelocationForSection(RE, SectionID);
+ LLVM_DEBUG(dbgs() << " Stub function found\n");
+ } else {
+ // Create a new stub function.
+ LLVM_DEBUG(dbgs() << " Create a new stub function\n");
+ Stubs[Value] = Section.getStubOffset();
+
+ unsigned AbiVariant = Obj.getPlatformFlags();
+
+ uint8_t *StubTargetAddr = createStubFunction(
+ Section.getAddressWithOffset(Section.getStubOffset()), AbiVariant);
+
+ // Creating Hi and Lo relocations for the filled stub instructions.
+ RelocationEntry REHi(SectionID, StubTargetAddr - Section.getAddress(),
+ ELF::R_MIPS_HI16, Value.Addend);
+ RelocationEntry RELo(SectionID,
+ StubTargetAddr - Section.getAddress() + 4,
+ ELF::R_MIPS_LO16, Value.Addend);
+
+ if (Value.SymbolName) {
+ addRelocationForSymbol(REHi, Value.SymbolName);
+ addRelocationForSymbol(RELo, Value.SymbolName);
+ } else {
+ addRelocationForSection(REHi, Value.SectionID);
+ addRelocationForSection(RELo, Value.SectionID);
+ }
+
+ RelocationEntry RE(SectionID, Offset, RelType, Section.getStubOffset());
+ addRelocationForSection(RE, SectionID);
+ Section.advanceStubOffset(getMaxStubSize());
+ }
+ } else if (RelType == ELF::R_MIPS_HI16 || RelType == ELF::R_MIPS_PCHI16) {
+ int64_t Addend = (Opcode & 0x0000ffff) << 16;
+ RelocationEntry RE(SectionID, Offset, RelType, Addend);
+ PendingRelocs.push_back(std::make_pair(Value, RE));
+ } else if (RelType == ELF::R_MIPS_LO16 || RelType == ELF::R_MIPS_PCLO16) {
+ int64_t Addend = Value.Addend + SignExtend32<16>(Opcode & 0x0000ffff);
+ for (auto I = PendingRelocs.begin(); I != PendingRelocs.end();) {
+ const RelocationValueRef &MatchingValue = I->first;
+ RelocationEntry &Reloc = I->second;
+ if (MatchingValue == Value &&
+ RelType == getMatchingLoRelocation(Reloc.RelType) &&
+ SectionID == Reloc.SectionID) {
+ Reloc.Addend += Addend;
+ if (Value.SymbolName)
+ addRelocationForSymbol(Reloc, Value.SymbolName);
+ else
+ addRelocationForSection(Reloc, Value.SectionID);
+ I = PendingRelocs.erase(I);
+ } else
+ ++I;
+ }
+ RelocationEntry RE(SectionID, Offset, RelType, Addend);
+ if (Value.SymbolName)
+ addRelocationForSymbol(RE, Value.SymbolName);
+ else
+ addRelocationForSection(RE, Value.SectionID);
+ } else {
+ if (RelType == ELF::R_MIPS_32)
+ Value.Addend += Opcode;
+ else if (RelType == ELF::R_MIPS_PC16)
+ Value.Addend += SignExtend32<18>((Opcode & 0x0000ffff) << 2);
+ else if (RelType == ELF::R_MIPS_PC19_S2)
+ Value.Addend += SignExtend32<21>((Opcode & 0x0007ffff) << 2);
+ else if (RelType == ELF::R_MIPS_PC21_S2)
+ Value.Addend += SignExtend32<23>((Opcode & 0x001fffff) << 2);
+ else if (RelType == ELF::R_MIPS_PC26_S2)
+ Value.Addend += SignExtend32<28>((Opcode & 0x03ffffff) << 2);
+ processSimpleRelocation(SectionID, Offset, RelType, Value);
+ }
+ } else if (IsMipsN32ABI || IsMipsN64ABI) {
+ uint32_t r_type = RelType & 0xff;
+ RelocationEntry RE(SectionID, Offset, RelType, Value.Addend);
+ if (r_type == ELF::R_MIPS_CALL16 || r_type == ELF::R_MIPS_GOT_PAGE
+ || r_type == ELF::R_MIPS_GOT_DISP) {
+ StringMap<uint64_t>::iterator i = GOTSymbolOffsets.find(TargetName);
+ if (i != GOTSymbolOffsets.end())
+ RE.SymOffset = i->second;
+ else {
+ RE.SymOffset = allocateGOTEntries(1);
+ GOTSymbolOffsets[TargetName] = RE.SymOffset;
+ }
+ if (Value.SymbolName)
+ addRelocationForSymbol(RE, Value.SymbolName);
+ else
+ addRelocationForSection(RE, Value.SectionID);
+ } else if (RelType == ELF::R_MIPS_26) {
+ // This is an Mips branch relocation, need to use a stub function.
+ LLVM_DEBUG(dbgs() << "\t\tThis is a Mips branch relocation.");
+ SectionEntry &Section = Sections[SectionID];
+
+ // Look up for existing stub.
+ StubMap::const_iterator i = Stubs.find(Value);
+ if (i != Stubs.end()) {
+ RelocationEntry RE(SectionID, Offset, RelType, i->second);
+ addRelocationForSection(RE, SectionID);
+ LLVM_DEBUG(dbgs() << " Stub function found\n");
+ } else {
+ // Create a new stub function.
+ LLVM_DEBUG(dbgs() << " Create a new stub function\n");
+ Stubs[Value] = Section.getStubOffset();
+
+ unsigned AbiVariant = Obj.getPlatformFlags();
+
+ uint8_t *StubTargetAddr = createStubFunction(
+ Section.getAddressWithOffset(Section.getStubOffset()), AbiVariant);
+
+ if (IsMipsN32ABI) {
+ // Creating Hi and Lo relocations for the filled stub instructions.
+ RelocationEntry REHi(SectionID, StubTargetAddr - Section.getAddress(),
+ ELF::R_MIPS_HI16, Value.Addend);
+ RelocationEntry RELo(SectionID,
+ StubTargetAddr - Section.getAddress() + 4,
+ ELF::R_MIPS_LO16, Value.Addend);
+ if (Value.SymbolName) {
+ addRelocationForSymbol(REHi, Value.SymbolName);
+ addRelocationForSymbol(RELo, Value.SymbolName);
+ } else {
+ addRelocationForSection(REHi, Value.SectionID);
+ addRelocationForSection(RELo, Value.SectionID);
+ }
+ } else {
+ // Creating Highest, Higher, Hi and Lo relocations for the filled stub
+ // instructions.
+ RelocationEntry REHighest(SectionID,
+ StubTargetAddr - Section.getAddress(),
+ ELF::R_MIPS_HIGHEST, Value.Addend);
+ RelocationEntry REHigher(SectionID,
+ StubTargetAddr - Section.getAddress() + 4,
+ ELF::R_MIPS_HIGHER, Value.Addend);
+ RelocationEntry REHi(SectionID,
+ StubTargetAddr - Section.getAddress() + 12,
+ ELF::R_MIPS_HI16, Value.Addend);
+ RelocationEntry RELo(SectionID,
+ StubTargetAddr - Section.getAddress() + 20,
+ ELF::R_MIPS_LO16, Value.Addend);
+ if (Value.SymbolName) {
+ addRelocationForSymbol(REHighest, Value.SymbolName);
+ addRelocationForSymbol(REHigher, Value.SymbolName);
+ addRelocationForSymbol(REHi, Value.SymbolName);
+ addRelocationForSymbol(RELo, Value.SymbolName);
+ } else {
+ addRelocationForSection(REHighest, Value.SectionID);
+ addRelocationForSection(REHigher, Value.SectionID);
+ addRelocationForSection(REHi, Value.SectionID);
+ addRelocationForSection(RELo, Value.SectionID);
+ }
+ }
+ RelocationEntry RE(SectionID, Offset, RelType, Section.getStubOffset());
+ addRelocationForSection(RE, SectionID);
+ Section.advanceStubOffset(getMaxStubSize());
+ }
+ } else {
+ processSimpleRelocation(SectionID, Offset, RelType, Value);
+ }
+
+ } else if (Arch == Triple::ppc64 || Arch == Triple::ppc64le) {
+ if (RelType == ELF::R_PPC64_REL24) {
+ // Determine ABI variant in use for this object.
+ unsigned AbiVariant = Obj.getPlatformFlags();
+ AbiVariant &= ELF::EF_PPC64_ABI;
+ // A PPC branch relocation will need a stub function if the target is
+ // an external symbol (either Value.SymbolName is set, or SymType is
+ // Symbol::ST_Unknown) or if the target address is not within the
+ // signed 24-bits branch address.
+ SectionEntry &Section = Sections[SectionID];
+ uint8_t *Target = Section.getAddressWithOffset(Offset);
+ bool RangeOverflow = false;
+ bool IsExtern = Value.SymbolName || SymType == SymbolRef::ST_Unknown;
+ if (!IsExtern) {
+ if (AbiVariant != 2) {
+ // In the ELFv1 ABI, a function call may point to the .opd entry,
+ // so the final symbol value is calculated based on the relocation
+ // values in the .opd section.
+ if (auto Err = findOPDEntrySection(Obj, ObjSectionToID, Value))
+ return std::move(Err);
+ } else {
+ // In the ELFv2 ABI, a function symbol may provide a local entry
+ // point, which must be used for direct calls.
+ if (Value.SectionID == SectionID){
+ uint8_t SymOther = Symbol->getOther();
+ Value.Addend += ELF::decodePPC64LocalEntryOffset(SymOther);
+ }
+ }
+ uint8_t *RelocTarget =
+ Sections[Value.SectionID].getAddressWithOffset(Value.Addend);
+ int64_t delta = static_cast<int64_t>(Target - RelocTarget);
+ // If it is within 26-bits branch range, just set the branch target
+ if (SignExtend64<26>(delta) != delta) {
+ RangeOverflow = true;
+ } else if ((AbiVariant != 2) ||
+ (AbiVariant == 2 && Value.SectionID == SectionID)) {
+ RelocationEntry RE(SectionID, Offset, RelType, Value.Addend);
+ addRelocationForSection(RE, Value.SectionID);
+ }
+ }
+ if (IsExtern || (AbiVariant == 2 && Value.SectionID != SectionID) ||
+ RangeOverflow) {
+ // It is an external symbol (either Value.SymbolName is set, or
+ // SymType is SymbolRef::ST_Unknown) or out of range.
+ StubMap::const_iterator i = Stubs.find(Value);
+ if (i != Stubs.end()) {
+ // Symbol function stub already created, just relocate to it
+ resolveRelocation(Section, Offset,
+ Section.getLoadAddressWithOffset(i->second),
+ RelType, 0);
+ LLVM_DEBUG(dbgs() << " Stub function found\n");
+ } else {
+ // Create a new stub function.
+ LLVM_DEBUG(dbgs() << " Create a new stub function\n");
+ Stubs[Value] = Section.getStubOffset();
+ uint8_t *StubTargetAddr = createStubFunction(
+ Section.getAddressWithOffset(Section.getStubOffset()),
+ AbiVariant);
+ RelocationEntry RE(SectionID, StubTargetAddr - Section.getAddress(),
+ ELF::R_PPC64_ADDR64, Value.Addend);
+
+ // Generates the 64-bits address loads as exemplified in section
+ // 4.5.1 in PPC64 ELF ABI. Note that the relocations need to
+ // apply to the low part of the instructions, so we have to update
+ // the offset according to the target endianness.
+ uint64_t StubRelocOffset = StubTargetAddr - Section.getAddress();
+ if (!IsTargetLittleEndian)
+ StubRelocOffset += 2;
+
+ RelocationEntry REhst(SectionID, StubRelocOffset + 0,
+ ELF::R_PPC64_ADDR16_HIGHEST, Value.Addend);
+ RelocationEntry REhr(SectionID, StubRelocOffset + 4,
+ ELF::R_PPC64_ADDR16_HIGHER, Value.Addend);
+ RelocationEntry REh(SectionID, StubRelocOffset + 12,
+ ELF::R_PPC64_ADDR16_HI, Value.Addend);
+ RelocationEntry REl(SectionID, StubRelocOffset + 16,
+ ELF::R_PPC64_ADDR16_LO, Value.Addend);
+
+ if (Value.SymbolName) {
+ addRelocationForSymbol(REhst, Value.SymbolName);
+ addRelocationForSymbol(REhr, Value.SymbolName);
+ addRelocationForSymbol(REh, Value.SymbolName);
+ addRelocationForSymbol(REl, Value.SymbolName);
+ } else {
+ addRelocationForSection(REhst, Value.SectionID);
+ addRelocationForSection(REhr, Value.SectionID);
+ addRelocationForSection(REh, Value.SectionID);
+ addRelocationForSection(REl, Value.SectionID);
+ }
+
+ resolveRelocation(
+ Section, Offset,
+ Section.getLoadAddressWithOffset(Section.getStubOffset()),
+ RelType, 0);
+ Section.advanceStubOffset(getMaxStubSize());
+ }
+ if (IsExtern || (AbiVariant == 2 && Value.SectionID != SectionID)) {
+ // Restore the TOC for external calls
+ if (AbiVariant == 2)
+ writeInt32BE(Target + 4, 0xE8410018); // ld r2,24(r1)
+ else
+ writeInt32BE(Target + 4, 0xE8410028); // ld r2,40(r1)
+ }
+ }
+ } else if (RelType == ELF::R_PPC64_TOC16 ||
+ RelType == ELF::R_PPC64_TOC16_DS ||
+ RelType == ELF::R_PPC64_TOC16_LO ||
+ RelType == ELF::R_PPC64_TOC16_LO_DS ||
+ RelType == ELF::R_PPC64_TOC16_HI ||
+ RelType == ELF::R_PPC64_TOC16_HA) {
+ // These relocations are supposed to subtract the TOC address from
+ // the final value. This does not fit cleanly into the RuntimeDyld
+ // scheme, since there may be *two* sections involved in determining
+ // the relocation value (the section of the symbol referred to by the
+ // relocation, and the TOC section associated with the current module).
+ //
+ // Fortunately, these relocations are currently only ever generated
+ // referring to symbols that themselves reside in the TOC, which means
+ // that the two sections are actually the same. Thus they cancel out
+ // and we can immediately resolve the relocation right now.
+ switch (RelType) {
+ case ELF::R_PPC64_TOC16: RelType = ELF::R_PPC64_ADDR16; break;
+ case ELF::R_PPC64_TOC16_DS: RelType = ELF::R_PPC64_ADDR16_DS; break;
+ case ELF::R_PPC64_TOC16_LO: RelType = ELF::R_PPC64_ADDR16_LO; break;
+ case ELF::R_PPC64_TOC16_LO_DS: RelType = ELF::R_PPC64_ADDR16_LO_DS; break;
+ case ELF::R_PPC64_TOC16_HI: RelType = ELF::R_PPC64_ADDR16_HI; break;
+ case ELF::R_PPC64_TOC16_HA: RelType = ELF::R_PPC64_ADDR16_HA; break;
+ default: llvm_unreachable("Wrong relocation type.");
+ }
+
+ RelocationValueRef TOCValue;
+ if (auto Err = findPPC64TOCSection(Obj, ObjSectionToID, TOCValue))
+ return std::move(Err);
+ if (Value.SymbolName || Value.SectionID != TOCValue.SectionID)
+ llvm_unreachable("Unsupported TOC relocation.");
+ Value.Addend -= TOCValue.Addend;
+ resolveRelocation(Sections[SectionID], Offset, Value.Addend, RelType, 0);
+ } else {
+ // There are two ways to refer to the TOC address directly: either
+ // via a ELF::R_PPC64_TOC relocation (where both symbol and addend are
+ // ignored), or via any relocation that refers to the magic ".TOC."
+ // symbols (in which case the addend is respected).
+ if (RelType == ELF::R_PPC64_TOC) {
+ RelType = ELF::R_PPC64_ADDR64;
+ if (auto Err = findPPC64TOCSection(Obj, ObjSectionToID, Value))
+ return std::move(Err);
+ } else if (TargetName == ".TOC.") {
+ if (auto Err = findPPC64TOCSection(Obj, ObjSectionToID, Value))
+ return std::move(Err);
+ Value.Addend += Addend;
+ }
+
+ RelocationEntry RE(SectionID, Offset, RelType, Value.Addend);
+
+ if (Value.SymbolName)
+ addRelocationForSymbol(RE, Value.SymbolName);
+ else
+ addRelocationForSection(RE, Value.SectionID);
+ }
+ } else if (Arch == Triple::systemz &&
+ (RelType == ELF::R_390_PLT32DBL || RelType == ELF::R_390_GOTENT)) {
+ // Create function stubs for both PLT and GOT references, regardless of
+ // whether the GOT reference is to data or code. The stub contains the
+ // full address of the symbol, as needed by GOT references, and the
+ // executable part only adds an overhead of 8 bytes.
+ //
+ // We could try to conserve space by allocating the code and data
+ // parts of the stub separately. However, as things stand, we allocate
+ // a stub for every relocation, so using a GOT in JIT code should be
+ // no less space efficient than using an explicit constant pool.
+ LLVM_DEBUG(dbgs() << "\t\tThis is a SystemZ indirect relocation.");
+ SectionEntry &Section = Sections[SectionID];
+
+ // Look for an existing stub.
+ StubMap::const_iterator i = Stubs.find(Value);
+ uintptr_t StubAddress;
+ if (i != Stubs.end()) {
+ StubAddress = uintptr_t(Section.getAddressWithOffset(i->second));
+ LLVM_DEBUG(dbgs() << " Stub function found\n");
+ } else {
+ // Create a new stub function.
+ LLVM_DEBUG(dbgs() << " Create a new stub function\n");
+
+ uintptr_t BaseAddress = uintptr_t(Section.getAddress());
+ StubAddress =
+ alignTo(BaseAddress + Section.getStubOffset(), getStubAlignment());
+ unsigned StubOffset = StubAddress - BaseAddress;
+
+ Stubs[Value] = StubOffset;
+ createStubFunction((uint8_t *)StubAddress);
+ RelocationEntry RE(SectionID, StubOffset + 8, ELF::R_390_64,
+ Value.Offset);
+ if (Value.SymbolName)
+ addRelocationForSymbol(RE, Value.SymbolName);
+ else
+ addRelocationForSection(RE, Value.SectionID);
+ Section.advanceStubOffset(getMaxStubSize());
+ }
+
+ if (RelType == ELF::R_390_GOTENT)
+ resolveRelocation(Section, Offset, StubAddress + 8, ELF::R_390_PC32DBL,
+ Addend);
+ else
+ resolveRelocation(Section, Offset, StubAddress, RelType, Addend);
+ } else if (Arch == Triple::x86_64) {
+ if (RelType == ELF::R_X86_64_PLT32) {
+ // The way the PLT relocations normally work is that the linker allocates
+ // the
+ // PLT and this relocation makes a PC-relative call into the PLT. The PLT
+ // entry will then jump to an address provided by the GOT. On first call,
+ // the
+ // GOT address will point back into PLT code that resolves the symbol. After
+ // the first call, the GOT entry points to the actual function.
+ //
+ // For local functions we're ignoring all of that here and just replacing
+ // the PLT32 relocation type with PC32, which will translate the relocation
+ // into a PC-relative call directly to the function. For external symbols we
+ // can't be sure the function will be within 2^32 bytes of the call site, so
+ // we need to create a stub, which calls into the GOT. This case is
+ // equivalent to the usual PLT implementation except that we use the stub
+ // mechanism in RuntimeDyld (which puts stubs at the end of the section)
+ // rather than allocating a PLT section.
+ if (Value.SymbolName && MemMgr.allowStubAllocation()) {
+ // This is a call to an external function.
+ // Look for an existing stub.
+ SectionEntry *Section = &Sections[SectionID];
+ StubMap::const_iterator i = Stubs.find(Value);
+ uintptr_t StubAddress;
+ if (i != Stubs.end()) {
+ StubAddress = uintptr_t(Section->getAddress()) + i->second;
+ LLVM_DEBUG(dbgs() << " Stub function found\n");
+ } else {
+ // Create a new stub function (equivalent to a PLT entry).
+ LLVM_DEBUG(dbgs() << " Create a new stub function\n");
+
+ uintptr_t BaseAddress = uintptr_t(Section->getAddress());
+ StubAddress = alignTo(BaseAddress + Section->getStubOffset(),
+ getStubAlignment());
+ unsigned StubOffset = StubAddress - BaseAddress;
+ Stubs[Value] = StubOffset;
+ createStubFunction((uint8_t *)StubAddress);
+
+ // Bump our stub offset counter
+ Section->advanceStubOffset(getMaxStubSize());
+
+ // Allocate a GOT Entry
+ uint64_t GOTOffset = allocateGOTEntries(1);
+ // This potentially creates a new Section which potentially
+ // invalidates the Section pointer, so reload it.
+ Section = &Sections[SectionID];
+
+ // The load of the GOT address has an addend of -4
+ resolveGOTOffsetRelocation(SectionID, StubOffset + 2, GOTOffset - 4,
+ ELF::R_X86_64_PC32);
+
+ // Fill in the value of the symbol we're targeting into the GOT
+ addRelocationForSymbol(
+ computeGOTOffsetRE(GOTOffset, 0, ELF::R_X86_64_64),
+ Value.SymbolName);
+ }
+
+ // Make the target call a call into the stub table.
+ resolveRelocation(*Section, Offset, StubAddress, ELF::R_X86_64_PC32,
+ Addend);
+ } else {
+ Value.Addend += support::ulittle32_t::ref(
+ computePlaceholderAddress(SectionID, Offset));
+ processSimpleRelocation(SectionID, Offset, ELF::R_X86_64_PC32, Value);
+ }
+ } else if (RelType == ELF::R_X86_64_GOTPCREL ||
+ RelType == ELF::R_X86_64_GOTPCRELX ||
+ RelType == ELF::R_X86_64_REX_GOTPCRELX) {
+ uint64_t GOTOffset = allocateGOTEntries(1);
+ resolveGOTOffsetRelocation(SectionID, Offset, GOTOffset + Addend,
+ ELF::R_X86_64_PC32);
+
+ // Fill in the value of the symbol we're targeting into the GOT
+ RelocationEntry RE =
+ computeGOTOffsetRE(GOTOffset, Value.Offset, ELF::R_X86_64_64);
+ if (Value.SymbolName)
+ addRelocationForSymbol(RE, Value.SymbolName);
+ else
+ addRelocationForSection(RE, Value.SectionID);
+ } else if (RelType == ELF::R_X86_64_GOT64) {
+ // Fill in a 64-bit GOT offset.
+ uint64_t GOTOffset = allocateGOTEntries(1);
+ resolveRelocation(Sections[SectionID], Offset, GOTOffset,
+ ELF::R_X86_64_64, 0);
+
+ // Fill in the value of the symbol we're targeting into the GOT
+ RelocationEntry RE =
+ computeGOTOffsetRE(GOTOffset, Value.Offset, ELF::R_X86_64_64);
+ if (Value.SymbolName)
+ addRelocationForSymbol(RE, Value.SymbolName);
+ else
+ addRelocationForSection(RE, Value.SectionID);
+ } else if (RelType == ELF::R_X86_64_GOTPC32) {
+ // Materialize the address of the base of the GOT relative to the PC.
+ // This doesn't create a GOT entry, but it does mean we need a GOT
+ // section.
+ (void)allocateGOTEntries(0);
+ resolveGOTOffsetRelocation(SectionID, Offset, Addend, ELF::R_X86_64_PC32);
+ } else if (RelType == ELF::R_X86_64_GOTPC64) {
+ (void)allocateGOTEntries(0);
+ resolveGOTOffsetRelocation(SectionID, Offset, Addend, ELF::R_X86_64_PC64);
+ } else if (RelType == ELF::R_X86_64_GOTOFF64) {
+ // GOTOFF relocations ultimately require a section difference relocation.
+ (void)allocateGOTEntries(0);
+ processSimpleRelocation(SectionID, Offset, RelType, Value);
+ } else if (RelType == ELF::R_X86_64_PC32) {
+ Value.Addend += support::ulittle32_t::ref(computePlaceholderAddress(SectionID, Offset));
+ processSimpleRelocation(SectionID, Offset, RelType, Value);
+ } else if (RelType == ELF::R_X86_64_PC64) {
+ Value.Addend += support::ulittle64_t::ref(computePlaceholderAddress(SectionID, Offset));
+ processSimpleRelocation(SectionID, Offset, RelType, Value);
+ } else if (RelType == ELF::R_X86_64_GOTTPOFF) {
+ processX86_64GOTTPOFFRelocation(SectionID, Offset, Value, Addend);
+ } else if (RelType == ELF::R_X86_64_TLSGD ||
+ RelType == ELF::R_X86_64_TLSLD) {
+ // The next relocation must be the relocation for __tls_get_addr.
+ ++RelI;
+ auto &GetAddrRelocation = *RelI;
+ processX86_64TLSRelocation(SectionID, Offset, RelType, Value, Addend,
+ GetAddrRelocation);
+ } else {
+ processSimpleRelocation(SectionID, Offset, RelType, Value);
+ }
+ } else {
+ if (Arch == Triple::x86) {
+ Value.Addend += support::ulittle32_t::ref(computePlaceholderAddress(SectionID, Offset));
+ }
+ processSimpleRelocation(SectionID, Offset, RelType, Value);
+ }
+ return ++RelI;
+}
+
+void RuntimeDyldELF::processX86_64GOTTPOFFRelocation(unsigned SectionID,
+ uint64_t Offset,
+ RelocationValueRef Value,
+ int64_t Addend) {
+ // Use the approach from "x86-64 Linker Optimizations" from the TLS spec
+ // to replace the GOTTPOFF relocation with a TPOFF relocation. The spec
+ // only mentions one optimization even though there are two different
+ // code sequences for the Initial Exec TLS Model. We match the code to
+ // find out which one was used.
+
+ // A possible TLS code sequence and its replacement
+ struct CodeSequence {
+ // The expected code sequence
+ ArrayRef<uint8_t> ExpectedCodeSequence;
+ // The negative offset of the GOTTPOFF relocation to the beginning of
+ // the sequence
+ uint64_t TLSSequenceOffset;
+ // The new code sequence
+ ArrayRef<uint8_t> NewCodeSequence;
+ // The offset of the new TPOFF relocation
+ uint64_t TpoffRelocationOffset;
+ };
+
+ std::array<CodeSequence, 2> CodeSequences;
+
+ // Initial Exec Code Model Sequence
+ {
+ static const std::initializer_list<uint8_t> ExpectedCodeSequenceList = {
+ 0x64, 0x48, 0x8b, 0x04, 0x25, 0x00, 0x00, 0x00,
+ 0x00, // mov %fs:0, %rax
+ 0x48, 0x03, 0x05, 0x00, 0x00, 0x00, 0x00 // add x@gotpoff(%rip),
+ // %rax
+ };
+ CodeSequences[0].ExpectedCodeSequence =
+ ArrayRef<uint8_t>(ExpectedCodeSequenceList);
+ CodeSequences[0].TLSSequenceOffset = 12;
+
+ static const std::initializer_list<uint8_t> NewCodeSequenceList = {
+ 0x64, 0x48, 0x8b, 0x04, 0x25, 0x00, 0x00, 0x00, 0x00, // mov %fs:0, %rax
+ 0x48, 0x8d, 0x80, 0x00, 0x00, 0x00, 0x00 // lea x@tpoff(%rax), %rax
+ };
+ CodeSequences[0].NewCodeSequence = ArrayRef<uint8_t>(NewCodeSequenceList);
+ CodeSequences[0].TpoffRelocationOffset = 12;
+ }
+
+ // Initial Exec Code Model Sequence, II
+ {
+ static const std::initializer_list<uint8_t> ExpectedCodeSequenceList = {
+ 0x48, 0x8b, 0x05, 0x00, 0x00, 0x00, 0x00, // mov x@gotpoff(%rip), %rax
+ 0x64, 0x48, 0x8b, 0x00, 0x00, 0x00, 0x00 // mov %fs:(%rax), %rax
+ };
+ CodeSequences[1].ExpectedCodeSequence =
+ ArrayRef<uint8_t>(ExpectedCodeSequenceList);
+ CodeSequences[1].TLSSequenceOffset = 3;
+
+ static const std::initializer_list<uint8_t> NewCodeSequenceList = {
+ 0x66, 0x0f, 0x1f, 0x44, 0x00, 0x00, // 6 byte nop
+ 0x64, 0x8b, 0x04, 0x25, 0x00, 0x00, 0x00, 0x00, // mov %fs:x@tpoff, %rax
+ };
+ CodeSequences[1].NewCodeSequence = ArrayRef<uint8_t>(NewCodeSequenceList);
+ CodeSequences[1].TpoffRelocationOffset = 10;
+ }
+
+ bool Resolved = false;
+ auto &Section = Sections[SectionID];
+ for (const auto &C : CodeSequences) {
+ assert(C.ExpectedCodeSequence.size() == C.NewCodeSequence.size() &&
+ "Old and new code sequences must have the same size");
+
+ if (Offset < C.TLSSequenceOffset ||
+ (Offset - C.TLSSequenceOffset + C.NewCodeSequence.size()) >
+ Section.getSize()) {
+ // This can't be a matching sequence as it doesn't fit in the current
+ // section
+ continue;
+ }
+
+ auto TLSSequenceStartOffset = Offset - C.TLSSequenceOffset;
+ auto *TLSSequence = Section.getAddressWithOffset(TLSSequenceStartOffset);
+ if (ArrayRef<uint8_t>(TLSSequence, C.ExpectedCodeSequence.size()) !=
+ C.ExpectedCodeSequence) {
+ continue;
+ }
+
+ memcpy(TLSSequence, C.NewCodeSequence.data(), C.NewCodeSequence.size());
+
+ // The original GOTTPOFF relocation has an addend as it is PC relative,
+ // so it needs to be corrected. The TPOFF32 relocation is used as an
+ // absolute value (which is an offset from %fs:0), so remove the addend
+ // again.
+ RelocationEntry RE(SectionID,
+ TLSSequenceStartOffset + C.TpoffRelocationOffset,
+ ELF::R_X86_64_TPOFF32, Value.Addend - Addend);
+
+ if (Value.SymbolName)
+ addRelocationForSymbol(RE, Value.SymbolName);
+ else
+ addRelocationForSection(RE, Value.SectionID);
+
+ Resolved = true;
+ break;
+ }
+
+ if (!Resolved) {
+ // The GOTTPOFF relocation was not used in one of the sequences
+ // described in the spec, so we can't optimize it to a TPOFF
+ // relocation.
+ uint64_t GOTOffset = allocateGOTEntries(1);
+ resolveGOTOffsetRelocation(SectionID, Offset, GOTOffset + Addend,
+ ELF::R_X86_64_PC32);
+ RelocationEntry RE =
+ computeGOTOffsetRE(GOTOffset, Value.Offset, ELF::R_X86_64_TPOFF64);
+ if (Value.SymbolName)
+ addRelocationForSymbol(RE, Value.SymbolName);
+ else
+ addRelocationForSection(RE, Value.SectionID);
+ }
+}
+
+void RuntimeDyldELF::processX86_64TLSRelocation(
+ unsigned SectionID, uint64_t Offset, uint64_t RelType,
+ RelocationValueRef Value, int64_t Addend,
+ const RelocationRef &GetAddrRelocation) {
+ // Since we are statically linking and have no additional DSOs, we can resolve
+ // the relocation directly without using __tls_get_addr.
+ // Use the approach from "x86-64 Linker Optimizations" from the TLS spec
+ // to replace it with the Local Exec relocation variant.
+
+ // Find out whether the code was compiled with the large or small memory
+ // model. For this we look at the next relocation which is the relocation
+ // for the __tls_get_addr function. If it's a 32 bit relocation, it's the
+ // small code model, with a 64 bit relocation it's the large code model.
+ bool IsSmallCodeModel;
+ // Is the relocation for the __tls_get_addr a PC-relative GOT relocation?
+ bool IsGOTPCRel = false;
+
+ switch (GetAddrRelocation.getType()) {
+ case ELF::R_X86_64_GOTPCREL:
+ case ELF::R_X86_64_REX_GOTPCRELX:
+ case ELF::R_X86_64_GOTPCRELX:
+ IsGOTPCRel = true;
+ [[fallthrough]];
+ case ELF::R_X86_64_PLT32:
+ IsSmallCodeModel = true;
+ break;
+ case ELF::R_X86_64_PLTOFF64:
+ IsSmallCodeModel = false;
+ break;
+ default:
+ report_fatal_error(
+ "invalid TLS relocations for General/Local Dynamic TLS Model: "
+ "expected PLT or GOT relocation for __tls_get_addr function");
+ }
+
+ // The negative offset to the start of the TLS code sequence relative to
+ // the offset of the TLSGD/TLSLD relocation
+ uint64_t TLSSequenceOffset;
+ // The expected start of the code sequence
+ ArrayRef<uint8_t> ExpectedCodeSequence;
+ // The new TLS code sequence that will replace the existing code
+ ArrayRef<uint8_t> NewCodeSequence;
+
+ if (RelType == ELF::R_X86_64_TLSGD) {
+ // The offset of the new TPOFF32 relocation (offset starting from the
+ // beginning of the whole TLS sequence)
+ uint64_t TpoffRelocOffset;
+
+ if (IsSmallCodeModel) {
+ if (!IsGOTPCRel) {
+ static const std::initializer_list<uint8_t> CodeSequence = {
+ 0x66, // data16 (no-op prefix)
+ 0x48, 0x8d, 0x3d, 0x00, 0x00,
+ 0x00, 0x00, // lea <disp32>(%rip), %rdi
+ 0x66, 0x66, // two data16 prefixes
+ 0x48, // rex64 (no-op prefix)
+ 0xe8, 0x00, 0x00, 0x00, 0x00 // call __tls_get_addr@plt
+ };
+ ExpectedCodeSequence = ArrayRef<uint8_t>(CodeSequence);
+ TLSSequenceOffset = 4;
+ } else {
+ // This code sequence is not described in the TLS spec but gcc
+ // generates it sometimes.
+ static const std::initializer_list<uint8_t> CodeSequence = {
+ 0x66, // data16 (no-op prefix)
+ 0x48, 0x8d, 0x3d, 0x00, 0x00,
+ 0x00, 0x00, // lea <disp32>(%rip), %rdi
+ 0x66, // data16 prefix (no-op prefix)
+ 0x48, // rex64 (no-op prefix)
+ 0xff, 0x15, 0x00, 0x00, 0x00,
+ 0x00 // call *__tls_get_addr@gotpcrel(%rip)
+ };
+ ExpectedCodeSequence = ArrayRef<uint8_t>(CodeSequence);
+ TLSSequenceOffset = 4;
+ }
+
+ // The replacement code for the small code model. It's the same for
+ // both sequences.
+ static const std::initializer_list<uint8_t> SmallSequence = {
+ 0x64, 0x48, 0x8b, 0x04, 0x25, 0x00, 0x00, 0x00,
+ 0x00, // mov %fs:0, %rax
+ 0x48, 0x8d, 0x80, 0x00, 0x00, 0x00, 0x00 // lea x@tpoff(%rax),
+ // %rax
+ };
+ NewCodeSequence = ArrayRef<uint8_t>(SmallSequence);
+ TpoffRelocOffset = 12;
+ } else {
+ static const std::initializer_list<uint8_t> CodeSequence = {
+ 0x48, 0x8d, 0x3d, 0x00, 0x00, 0x00, 0x00, // lea <disp32>(%rip),
+ // %rdi
+ 0x48, 0xb8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, // movabs $__tls_get_addr@pltoff, %rax
+ 0x48, 0x01, 0xd8, // add %rbx, %rax
+ 0xff, 0xd0 // call *%rax
+ };
+ ExpectedCodeSequence = ArrayRef<uint8_t>(CodeSequence);
+ TLSSequenceOffset = 3;
+
+ // The replacement code for the large code model
+ static const std::initializer_list<uint8_t> LargeSequence = {
+ 0x64, 0x48, 0x8b, 0x04, 0x25, 0x00, 0x00, 0x00,
+ 0x00, // mov %fs:0, %rax
+ 0x48, 0x8d, 0x80, 0x00, 0x00, 0x00, 0x00, // lea x@tpoff(%rax),
+ // %rax
+ 0x66, 0x0f, 0x1f, 0x44, 0x00, 0x00 // nopw 0x0(%rax,%rax,1)
+ };
+ NewCodeSequence = ArrayRef<uint8_t>(LargeSequence);
+ TpoffRelocOffset = 12;
+ }
+
+ // The TLSGD/TLSLD relocations are PC-relative, so they have an addend.
+ // The new TPOFF32 relocations is used as an absolute offset from
+ // %fs:0, so remove the TLSGD/TLSLD addend again.
+ RelocationEntry RE(SectionID, Offset - TLSSequenceOffset + TpoffRelocOffset,
+ ELF::R_X86_64_TPOFF32, Value.Addend - Addend);
+ if (Value.SymbolName)
+ addRelocationForSymbol(RE, Value.SymbolName);
+ else
+ addRelocationForSection(RE, Value.SectionID);
+ } else if (RelType == ELF::R_X86_64_TLSLD) {
+ if (IsSmallCodeModel) {
+ if (!IsGOTPCRel) {
+ static const std::initializer_list<uint8_t> CodeSequence = {
+ 0x48, 0x8d, 0x3d, 0x00, 0x00, 0x00, // leaq <disp32>(%rip), %rdi
+ 0x00, 0xe8, 0x00, 0x00, 0x00, 0x00 // call __tls_get_addr@plt
+ };
+ ExpectedCodeSequence = ArrayRef<uint8_t>(CodeSequence);
+ TLSSequenceOffset = 3;
+
+ // The replacement code for the small code model
+ static const std::initializer_list<uint8_t> SmallSequence = {
+ 0x66, 0x66, 0x66, // three data16 prefixes (no-op)
+ 0x64, 0x48, 0x8b, 0x04, 0x25,
+ 0x00, 0x00, 0x00, 0x00 // mov %fs:0, %rax
+ };
+ NewCodeSequence = ArrayRef<uint8_t>(SmallSequence);
+ } else {
+ // This code sequence is not described in the TLS spec but gcc
+ // generates it sometimes.
+ static const std::initializer_list<uint8_t> CodeSequence = {
+ 0x48, 0x8d, 0x3d, 0x00,
+ 0x00, 0x00, 0x00, // leaq <disp32>(%rip), %rdi
+ 0xff, 0x15, 0x00, 0x00,
+ 0x00, 0x00 // call
+ // *__tls_get_addr@gotpcrel(%rip)
+ };
+ ExpectedCodeSequence = ArrayRef<uint8_t>(CodeSequence);
+ TLSSequenceOffset = 3;
+
+ // The replacement is code is just like above but it needs to be
+ // one byte longer.
+ static const std::initializer_list<uint8_t> SmallSequence = {
+ 0x0f, 0x1f, 0x40, 0x00, // 4 byte nop
+ 0x64, 0x48, 0x8b, 0x04, 0x25,
+ 0x00, 0x00, 0x00, 0x00 // mov %fs:0, %rax
+ };
+ NewCodeSequence = ArrayRef<uint8_t>(SmallSequence);
+ }
+ } else {
+ // This is the same sequence as for the TLSGD sequence with the large
+ // memory model above
+ static const std::initializer_list<uint8_t> CodeSequence = {
+ 0x48, 0x8d, 0x3d, 0x00, 0x00, 0x00, 0x00, // lea <disp32>(%rip),
+ // %rdi
+ 0x48, 0xb8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x48, // movabs $__tls_get_addr@pltoff, %rax
+ 0x01, 0xd8, // add %rbx, %rax
+ 0xff, 0xd0 // call *%rax
+ };
+ ExpectedCodeSequence = ArrayRef<uint8_t>(CodeSequence);
+ TLSSequenceOffset = 3;
+
+ // The replacement code for the large code model
+ static const std::initializer_list<uint8_t> LargeSequence = {
+ 0x66, 0x66, 0x66, // three data16 prefixes (no-op)
+ 0x66, 0x66, 0x0f, 0x1f, 0x84, 0x00, 0x00, 0x00, 0x00,
+ 0x00, // 10 byte nop
+ 0x64, 0x48, 0x8b, 0x04, 0x25, 0x00, 0x00, 0x00, 0x00 // mov %fs:0,%rax
+ };
+ NewCodeSequence = ArrayRef<uint8_t>(LargeSequence);
+ }
+ } else {
+ llvm_unreachable("both TLS relocations handled above");
+ }
+
+ assert(ExpectedCodeSequence.size() == NewCodeSequence.size() &&
+ "Old and new code sequences must have the same size");
+
+ auto &Section = Sections[SectionID];
+ if (Offset < TLSSequenceOffset ||
+ (Offset - TLSSequenceOffset + NewCodeSequence.size()) >
+ Section.getSize()) {
+ report_fatal_error("unexpected end of section in TLS sequence");
+ }
+
+ auto *TLSSequence = Section.getAddressWithOffset(Offset - TLSSequenceOffset);
+ if (ArrayRef<uint8_t>(TLSSequence, ExpectedCodeSequence.size()) !=
+ ExpectedCodeSequence) {
+ report_fatal_error(
+ "invalid TLS sequence for Global/Local Dynamic TLS Model");
+ }
+
+ memcpy(TLSSequence, NewCodeSequence.data(), NewCodeSequence.size());
+}
+
+size_t RuntimeDyldELF::getGOTEntrySize() {
+ // We don't use the GOT in all of these cases, but it's essentially free
+ // to put them all here.
+ size_t Result = 0;
+ switch (Arch) {
+ case Triple::x86_64:
+ case Triple::aarch64:
+ case Triple::aarch64_be:
+ case Triple::ppc64:
+ case Triple::ppc64le:
+ case Triple::systemz:
+ Result = sizeof(uint64_t);
+ break;
+ case Triple::x86:
+ case Triple::arm:
+ case Triple::thumb:
+ Result = sizeof(uint32_t);
+ break;
+ case Triple::mips:
+ case Triple::mipsel:
+ case Triple::mips64:
+ case Triple::mips64el:
+ if (IsMipsO32ABI || IsMipsN32ABI)
+ Result = sizeof(uint32_t);
+ else if (IsMipsN64ABI)
+ Result = sizeof(uint64_t);
+ else
+ llvm_unreachable("Mips ABI not handled");
+ break;
+ default:
+ llvm_unreachable("Unsupported CPU type!");
+ }
+ return Result;
+}
+
+uint64_t RuntimeDyldELF::allocateGOTEntries(unsigned no) {
+ if (GOTSectionID == 0) {
+ GOTSectionID = Sections.size();
+ // Reserve a section id. We'll allocate the section later
+ // once we know the total size
+ Sections.push_back(SectionEntry(".got", nullptr, 0, 0, 0));
+ }
+ uint64_t StartOffset = CurrentGOTIndex * getGOTEntrySize();
+ CurrentGOTIndex += no;
+ return StartOffset;
+}
+
+uint64_t RuntimeDyldELF::findOrAllocGOTEntry(const RelocationValueRef &Value,
+ unsigned GOTRelType) {
+ auto E = GOTOffsetMap.insert({Value, 0});
+ if (E.second) {
+ uint64_t GOTOffset = allocateGOTEntries(1);
+
+ // Create relocation for newly created GOT entry
+ RelocationEntry RE =
+ computeGOTOffsetRE(GOTOffset, Value.Offset, GOTRelType);
+ if (Value.SymbolName)
+ addRelocationForSymbol(RE, Value.SymbolName);
+ else
+ addRelocationForSection(RE, Value.SectionID);
+
+ E.first->second = GOTOffset;
+ }
+
+ return E.first->second;
+}
+
+void RuntimeDyldELF::resolveGOTOffsetRelocation(unsigned SectionID,
+ uint64_t Offset,
+ uint64_t GOTOffset,
+ uint32_t Type) {
+ // Fill in the relative address of the GOT Entry into the stub
+ RelocationEntry GOTRE(SectionID, Offset, Type, GOTOffset);
+ addRelocationForSection(GOTRE, GOTSectionID);
+}
+
+RelocationEntry RuntimeDyldELF::computeGOTOffsetRE(uint64_t GOTOffset,
+ uint64_t SymbolOffset,
+ uint32_t Type) {
+ return RelocationEntry(GOTSectionID, GOTOffset, Type, SymbolOffset);
+}
+
+void RuntimeDyldELF::processNewSymbol(const SymbolRef &ObjSymbol, SymbolTableEntry& Symbol) {
+ // This should never return an error as `processNewSymbol` wouldn't have been
+ // called if getFlags() returned an error before.
+ auto ObjSymbolFlags = cantFail(ObjSymbol.getFlags());
+
+ if (ObjSymbolFlags & SymbolRef::SF_Indirect) {
+ if (IFuncStubSectionID == 0) {
+ // Create a dummy section for the ifunc stubs. It will be actually
+ // allocated in finalizeLoad() below.
+ IFuncStubSectionID = Sections.size();
+ Sections.push_back(
+ SectionEntry(".text.__llvm_IFuncStubs", nullptr, 0, 0, 0));
+ // First 64B are reserverd for the IFunc resolver
+ IFuncStubOffset = 64;
+ }
+
+ IFuncStubs.push_back(IFuncStub{IFuncStubOffset, Symbol});
+ // Modify the symbol so that it points to the ifunc stub instead of to the
+ // resolver function.
+ Symbol = SymbolTableEntry(IFuncStubSectionID, IFuncStubOffset,
+ Symbol.getFlags());
+ IFuncStubOffset += getMaxIFuncStubSize();
+ }
+}
+
+Error RuntimeDyldELF::finalizeLoad(const ObjectFile &Obj,
+ ObjSectionToIDMap &SectionMap) {
+ if (IsMipsO32ABI)
+ if (!PendingRelocs.empty())
+ return make_error<RuntimeDyldError>("Can't find matching LO16 reloc");
+
+ // Create the IFunc stubs if necessary. This must be done before processing
+ // the GOT entries, as the IFunc stubs may create some.
+ if (IFuncStubSectionID != 0) {
+ uint8_t *IFuncStubsAddr = MemMgr.allocateCodeSection(
+ IFuncStubOffset, 1, IFuncStubSectionID, ".text.__llvm_IFuncStubs");
+ if (!IFuncStubsAddr)
+ return make_error<RuntimeDyldError>(
+ "Unable to allocate memory for IFunc stubs!");
+ Sections[IFuncStubSectionID] =
+ SectionEntry(".text.__llvm_IFuncStubs", IFuncStubsAddr, IFuncStubOffset,
+ IFuncStubOffset, 0);
+
+ createIFuncResolver(IFuncStubsAddr);
+
+ LLVM_DEBUG(dbgs() << "Creating IFunc stubs SectionID: "
+ << IFuncStubSectionID << " Addr: "
+ << Sections[IFuncStubSectionID].getAddress() << '\n');
+ for (auto &IFuncStub : IFuncStubs) {
+ auto &Symbol = IFuncStub.OriginalSymbol;
+ LLVM_DEBUG(dbgs() << "\tSectionID: " << Symbol.getSectionID()
+ << " Offset: " << format("%p", Symbol.getOffset())
+ << " IFuncStubOffset: "
+ << format("%p\n", IFuncStub.StubOffset));
+ createIFuncStub(IFuncStubSectionID, 0, IFuncStub.StubOffset,
+ Symbol.getSectionID(), Symbol.getOffset());
+ }
+
+ IFuncStubSectionID = 0;
+ IFuncStubOffset = 0;
+ IFuncStubs.clear();
+ }
+
+ // If necessary, allocate the global offset table
+ if (GOTSectionID != 0) {
+ // Allocate memory for the section
+ size_t TotalSize = CurrentGOTIndex * getGOTEntrySize();
+ uint8_t *Addr = MemMgr.allocateDataSection(TotalSize, getGOTEntrySize(),
+ GOTSectionID, ".got", false);
+ if (!Addr)
+ return make_error<RuntimeDyldError>("Unable to allocate memory for GOT!");
+
+ Sections[GOTSectionID] =
+ SectionEntry(".got", Addr, TotalSize, TotalSize, 0);
+
+ // For now, initialize all GOT entries to zero. We'll fill them in as
+ // needed when GOT-based relocations are applied.
+ memset(Addr, 0, TotalSize);
+ if (IsMipsN32ABI || IsMipsN64ABI) {
+ // To correctly resolve Mips GOT relocations, we need a mapping from
+ // object's sections to GOTs.
+ for (section_iterator SI = Obj.section_begin(), SE = Obj.section_end();
+ SI != SE; ++SI) {
+ if (SI->relocation_begin() != SI->relocation_end()) {
+ Expected<section_iterator> RelSecOrErr = SI->getRelocatedSection();
+ if (!RelSecOrErr)
+ return make_error<RuntimeDyldError>(
+ toString(RelSecOrErr.takeError()));
+
+ section_iterator RelocatedSection = *RelSecOrErr;
+ ObjSectionToIDMap::iterator i = SectionMap.find(*RelocatedSection);
+ assert(i != SectionMap.end());
+ SectionToGOTMap[i->second] = GOTSectionID;
+ }
+ }
+ GOTSymbolOffsets.clear();
+ }
+ }
+
+ // Look for and record the EH frame section.
+ ObjSectionToIDMap::iterator i, e;
+ for (i = SectionMap.begin(), e = SectionMap.end(); i != e; ++i) {
+ const SectionRef &Section = i->first;
+
+ StringRef Name;
+ Expected<StringRef> NameOrErr = Section.getName();
+ if (NameOrErr)
+ Name = *NameOrErr;
+ else
+ consumeError(NameOrErr.takeError());
+
+ if (Name == ".eh_frame") {
+ UnregisteredEHFrameSections.push_back(i->second);
+ break;
+ }
+ }
+
+ GOTOffsetMap.clear();
+ GOTSectionID = 0;
+ CurrentGOTIndex = 0;
+
+ return Error::success();
+}
+
+bool RuntimeDyldELF::isCompatibleFile(const object::ObjectFile &Obj) const {
+ return Obj.isELF();
+}
+
+void RuntimeDyldELF::createIFuncResolver(uint8_t *Addr) const {
+ if (Arch == Triple::x86_64) {
+ // The adddres of the GOT1 entry is in %r11, the GOT2 entry is in %r11+8
+ // (see createIFuncStub() for details)
+ // The following code first saves all registers that contain the original
+ // function arguments as those registers are not saved by the resolver
+ // function. %r11 is saved as well so that the GOT2 entry can be updated
+ // afterwards. Then it calls the actual IFunc resolver function whose
+ // address is stored in GOT2. After the resolver function returns, all
+ // saved registers are restored and the return value is written to GOT1.
+ // Finally, jump to the now resolved function.
+ // clang-format off
+ const uint8_t StubCode[] = {
+ 0x57, // push %rdi
+ 0x56, // push %rsi
+ 0x52, // push %rdx
+ 0x51, // push %rcx
+ 0x41, 0x50, // push %r8
+ 0x41, 0x51, // push %r9
+ 0x41, 0x53, // push %r11
+ 0x41, 0xff, 0x53, 0x08, // call *0x8(%r11)
+ 0x41, 0x5b, // pop %r11
+ 0x41, 0x59, // pop %r9
+ 0x41, 0x58, // pop %r8
+ 0x59, // pop %rcx
+ 0x5a, // pop %rdx
+ 0x5e, // pop %rsi
+ 0x5f, // pop %rdi
+ 0x49, 0x89, 0x03, // mov %rax,(%r11)
+ 0xff, 0xe0 // jmp *%rax
+ };
+ // clang-format on
+ static_assert(sizeof(StubCode) <= 64,
+ "maximum size of the IFunc resolver is 64B");
+ memcpy(Addr, StubCode, sizeof(StubCode));
+ } else {
+ report_fatal_error(
+ "IFunc resolver is not supported for target architecture");
+ }
+}
+
+void RuntimeDyldELF::createIFuncStub(unsigned IFuncStubSectionID,
+ uint64_t IFuncResolverOffset,
+ uint64_t IFuncStubOffset,
+ unsigned IFuncSectionID,
+ uint64_t IFuncOffset) {
+ auto &IFuncStubSection = Sections[IFuncStubSectionID];
+ auto *Addr = IFuncStubSection.getAddressWithOffset(IFuncStubOffset);
+
+ if (Arch == Triple::x86_64) {
+ // The first instruction loads a PC-relative address into %r11 which is a
+ // GOT entry for this stub. This initially contains the address to the
+ // IFunc resolver. We can use %r11 here as it's caller saved but not used
+ // to pass any arguments. In fact, x86_64 ABI even suggests using %r11 for
+ // code in the PLT. The IFunc resolver will use %r11 to update the GOT
+ // entry.
+ //
+ // The next instruction just jumps to the address contained in the GOT
+ // entry. As mentioned above, we do this two-step jump by first setting
+ // %r11 so that the IFunc resolver has access to it.
+ //
+ // The IFunc resolver of course also needs to know the actual address of
+ // the actual IFunc resolver function. This will be stored in a GOT entry
+ // right next to the first one for this stub. So, the IFunc resolver will
+ // be able to call it with %r11+8.
+ //
+ // In total, two adjacent GOT entries (+relocation) and one additional
+ // relocation are required:
+ // GOT1: Address of the IFunc resolver.
+ // GOT2: Address of the IFunc resolver function.
+ // IFuncStubOffset+3: 32-bit PC-relative address of GOT1.
+ uint64_t GOT1 = allocateGOTEntries(2);
+ uint64_t GOT2 = GOT1 + getGOTEntrySize();
+
+ RelocationEntry RE1(GOTSectionID, GOT1, ELF::R_X86_64_64,
+ IFuncResolverOffset, {});
+ addRelocationForSection(RE1, IFuncStubSectionID);
+ RelocationEntry RE2(GOTSectionID, GOT2, ELF::R_X86_64_64, IFuncOffset, {});
+ addRelocationForSection(RE2, IFuncSectionID);
+
+ const uint8_t StubCode[] = {
+ 0x4c, 0x8d, 0x1d, 0x00, 0x00, 0x00, 0x00, // leaq 0x0(%rip),%r11
+ 0x41, 0xff, 0x23 // jmpq *(%r11)
+ };
+ assert(sizeof(StubCode) <= getMaxIFuncStubSize() &&
+ "IFunc stub size must not exceed getMaxIFuncStubSize()");
+ memcpy(Addr, StubCode, sizeof(StubCode));
+
+ // The PC-relative value starts 4 bytes from the end of the leaq
+ // instruction, so the addend is -4.
+ resolveGOTOffsetRelocation(IFuncStubSectionID, IFuncStubOffset + 3,
+ GOT1 - 4, ELF::R_X86_64_PC32);
+ } else {
+ report_fatal_error("IFunc stub is not supported for target architecture");
+ }
+}
+
+unsigned RuntimeDyldELF::getMaxIFuncStubSize() const {
+ if (Arch == Triple::x86_64) {
+ return 10;
+ }
+ return 0;
+}
+
+bool RuntimeDyldELF::relocationNeedsGot(const RelocationRef &R) const {
+ unsigned RelTy = R.getType();
+ if (Arch == Triple::aarch64 || Arch == Triple::aarch64_be)
+ return RelTy == ELF::R_AARCH64_ADR_GOT_PAGE ||
+ RelTy == ELF::R_AARCH64_LD64_GOT_LO12_NC;
+
+ if (Arch == Triple::x86_64)
+ return RelTy == ELF::R_X86_64_GOTPCREL ||
+ RelTy == ELF::R_X86_64_GOTPCRELX ||
+ RelTy == ELF::R_X86_64_GOT64 ||
+ RelTy == ELF::R_X86_64_REX_GOTPCRELX;
+ return false;
+}
+
+bool RuntimeDyldELF::relocationNeedsStub(const RelocationRef &R) const {
+ if (Arch != Triple::x86_64)
+ return true; // Conservative answer
+
+ switch (R.getType()) {
+ default:
+ return true; // Conservative answer
+
+
+ case ELF::R_X86_64_GOTPCREL:
+ case ELF::R_X86_64_GOTPCRELX:
+ case ELF::R_X86_64_REX_GOTPCRELX:
+ case ELF::R_X86_64_GOTPC64:
+ case ELF::R_X86_64_GOT64:
+ case ELF::R_X86_64_GOTOFF64:
+ case ELF::R_X86_64_PC32:
+ case ELF::R_X86_64_PC64:
+ case ELF::R_X86_64_64:
+ // We know that these reloation types won't need a stub function. This list
+ // can be extended as needed.
+ return false;
+ }
+}
+
+} // namespace llvm
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.h b/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.h
new file mode 100644
index 000000000000..b73d2af8c0c4
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.h
@@ -0,0 +1,236 @@
+//===-- RuntimeDyldELF.h - Run-time dynamic linker for MC-JIT ---*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// ELF support for MC-JIT runtime dynamic linker.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_EXECUTIONENGINE_RUNTIMEDYLD_RUNTIMEDYLDELF_H
+#define LLVM_LIB_EXECUTIONENGINE_RUNTIMEDYLD_RUNTIMEDYLDELF_H
+
+#include "RuntimeDyldImpl.h"
+#include "llvm/ADT/DenseMap.h"
+
+using namespace llvm;
+
+namespace llvm {
+namespace object {
+class ELFObjectFileBase;
+}
+
+class RuntimeDyldELF : public RuntimeDyldImpl {
+
+ void resolveRelocation(const SectionEntry &Section, uint64_t Offset,
+ uint64_t Value, uint32_t Type, int64_t Addend,
+ uint64_t SymOffset = 0, SID SectionID = 0);
+
+ void resolveX86_64Relocation(const SectionEntry &Section, uint64_t Offset,
+ uint64_t Value, uint32_t Type, int64_t Addend,
+ uint64_t SymOffset);
+
+ void resolveX86Relocation(const SectionEntry &Section, uint64_t Offset,
+ uint32_t Value, uint32_t Type, int32_t Addend);
+
+ void resolveAArch64Relocation(const SectionEntry &Section, uint64_t Offset,
+ uint64_t Value, uint32_t Type, int64_t Addend);
+
+ bool resolveAArch64ShortBranch(unsigned SectionID, relocation_iterator RelI,
+ const RelocationValueRef &Value);
+
+ void resolveAArch64Branch(unsigned SectionID, const RelocationValueRef &Value,
+ relocation_iterator RelI, StubMap &Stubs);
+
+ void resolveARMRelocation(const SectionEntry &Section, uint64_t Offset,
+ uint32_t Value, uint32_t Type, int32_t Addend);
+
+ void resolvePPC32Relocation(const SectionEntry &Section, uint64_t Offset,
+ uint64_t Value, uint32_t Type, int64_t Addend);
+
+ void resolvePPC64Relocation(const SectionEntry &Section, uint64_t Offset,
+ uint64_t Value, uint32_t Type, int64_t Addend);
+
+ void resolveSystemZRelocation(const SectionEntry &Section, uint64_t Offset,
+ uint64_t Value, uint32_t Type, int64_t Addend);
+
+ void resolveBPFRelocation(const SectionEntry &Section, uint64_t Offset,
+ uint64_t Value, uint32_t Type, int64_t Addend);
+
+ unsigned getMaxStubSize() const override {
+ if (Arch == Triple::aarch64 || Arch == Triple::aarch64_be)
+ return 20; // movz; movk; movk; movk; br
+ if (Arch == Triple::arm || Arch == Triple::thumb)
+ return 8; // 32-bit instruction and 32-bit address
+ else if (IsMipsO32ABI || IsMipsN32ABI)
+ return 16;
+ else if (IsMipsN64ABI)
+ return 32;
+ else if (Arch == Triple::ppc64 || Arch == Triple::ppc64le)
+ return 44;
+ else if (Arch == Triple::x86_64)
+ return 6; // 2-byte jmp instruction + 32-bit relative address
+ else if (Arch == Triple::systemz)
+ return 16;
+ else
+ return 0;
+ }
+
+ Align getStubAlignment() override {
+ if (Arch == Triple::systemz)
+ return Align(8);
+ else
+ return Align(1);
+ }
+
+ void setMipsABI(const ObjectFile &Obj) override;
+
+ Error findPPC64TOCSection(const object::ELFObjectFileBase &Obj,
+ ObjSectionToIDMap &LocalSections,
+ RelocationValueRef &Rel);
+ Error findOPDEntrySection(const object::ELFObjectFileBase &Obj,
+ ObjSectionToIDMap &LocalSections,
+ RelocationValueRef &Rel);
+
+protected:
+ size_t getGOTEntrySize() override;
+
+private:
+ SectionEntry &getSection(unsigned SectionID) { return Sections[SectionID]; }
+
+ // Allocate no GOT entries for use in the given section.
+ uint64_t allocateGOTEntries(unsigned no);
+
+ // Find GOT entry corresponding to relocation or create new one.
+ uint64_t findOrAllocGOTEntry(const RelocationValueRef &Value,
+ unsigned GOTRelType);
+
+ // Resolve the relative address of GOTOffset in Section ID and place
+ // it at the given Offset
+ void resolveGOTOffsetRelocation(unsigned SectionID, uint64_t Offset,
+ uint64_t GOTOffset, uint32_t Type);
+
+ // For a GOT entry referenced from SectionID, compute a relocation entry
+ // that will place the final resolved value in the GOT slot
+ RelocationEntry computeGOTOffsetRE(uint64_t GOTOffset, uint64_t SymbolOffset,
+ unsigned Type);
+
+ // Compute the address in memory where we can find the placeholder
+ void *computePlaceholderAddress(unsigned SectionID, uint64_t Offset) const;
+
+ // Split out common case for creating the RelocationEntry for when the
+ // relocation requires no particular advanced processing.
+ void processSimpleRelocation(unsigned SectionID, uint64_t Offset, unsigned RelType, RelocationValueRef Value);
+
+ // Return matching *LO16 relocation (Mips specific)
+ uint32_t getMatchingLoRelocation(uint32_t RelType,
+ bool IsLocal = false) const;
+
+ // The tentative ID for the GOT section
+ unsigned GOTSectionID;
+
+ // Records the current number of allocated slots in the GOT
+ // (This would be equivalent to GOTEntries.size() were it not for relocations
+ // that consume more than one slot)
+ unsigned CurrentGOTIndex;
+
+protected:
+ // A map from section to a GOT section that has entries for section's GOT
+ // relocations. (Mips64 specific)
+ DenseMap<SID, SID> SectionToGOTMap;
+
+private:
+ // A map to avoid duplicate got entries (Mips64 specific)
+ StringMap<uint64_t> GOTSymbolOffsets;
+
+ // *HI16 relocations will be added for resolving when we find matching
+ // *LO16 part. (Mips specific)
+ SmallVector<std::pair<RelocationValueRef, RelocationEntry>, 8> PendingRelocs;
+
+ // When a module is loaded we save the SectionID of the EH frame section
+ // in a table until we receive a request to register all unregistered
+ // EH frame sections with the memory manager.
+ SmallVector<SID, 2> UnregisteredEHFrameSections;
+
+ // Map between GOT relocation value and corresponding GOT offset
+ std::map<RelocationValueRef, uint64_t> GOTOffsetMap;
+
+ /// The ID of the current IFunc stub section
+ unsigned IFuncStubSectionID = 0;
+ /// The current offset into the IFunc stub section
+ uint64_t IFuncStubOffset = 0;
+
+ /// A IFunc stub and its original symbol
+ struct IFuncStub {
+ /// The offset of this stub in the IFunc stub section
+ uint64_t StubOffset;
+ /// The symbol table entry of the original symbol
+ SymbolTableEntry OriginalSymbol;
+ };
+
+ /// The IFunc stubs
+ SmallVector<IFuncStub, 2> IFuncStubs;
+
+ /// Create the code for the IFunc resolver at the given address. This code
+ /// works together with the stubs created in createIFuncStub() to call the
+ /// resolver function and then jump to the real function address.
+ /// It must not be larger than 64B.
+ void createIFuncResolver(uint8_t *Addr) const;
+ /// Create the code for an IFunc stub for the IFunc that is defined in
+ /// section IFuncSectionID at offset IFuncOffset. The IFunc resolver created
+ /// by createIFuncResolver() is defined in the section IFuncStubSectionID at
+ /// offset IFuncResolverOffset. The code should be written into the section
+ /// with the id IFuncStubSectionID at the offset IFuncStubOffset.
+ void createIFuncStub(unsigned IFuncStubSectionID,
+ uint64_t IFuncResolverOffset, uint64_t IFuncStubOffset,
+ unsigned IFuncSectionID, uint64_t IFuncOffset);
+ /// Return the maximum size of a stub created by createIFuncStub()
+ unsigned getMaxIFuncStubSize() const;
+
+ void processNewSymbol(const SymbolRef &ObjSymbol,
+ SymbolTableEntry &Entry) override;
+ bool relocationNeedsGot(const RelocationRef &R) const override;
+ bool relocationNeedsStub(const RelocationRef &R) const override;
+
+ // Process a GOTTPOFF TLS relocation for x86-64
+ // NOLINTNEXTLINE(readability-identifier-naming)
+ void processX86_64GOTTPOFFRelocation(unsigned SectionID, uint64_t Offset,
+ RelocationValueRef Value,
+ int64_t Addend);
+ // Process a TLSLD/TLSGD relocation for x86-64
+ // NOLINTNEXTLINE(readability-identifier-naming)
+ void processX86_64TLSRelocation(unsigned SectionID, uint64_t Offset,
+ uint64_t RelType, RelocationValueRef Value,
+ int64_t Addend,
+ const RelocationRef &GetAddrRelocation);
+
+public:
+ RuntimeDyldELF(RuntimeDyld::MemoryManager &MemMgr,
+ JITSymbolResolver &Resolver);
+ ~RuntimeDyldELF() override;
+
+ static std::unique_ptr<RuntimeDyldELF>
+ create(Triple::ArchType Arch, RuntimeDyld::MemoryManager &MemMgr,
+ JITSymbolResolver &Resolver);
+
+ std::unique_ptr<RuntimeDyld::LoadedObjectInfo>
+ loadObject(const object::ObjectFile &O) override;
+
+ void resolveRelocation(const RelocationEntry &RE, uint64_t Value) override;
+ Expected<relocation_iterator>
+ processRelocationRef(unsigned SectionID, relocation_iterator RelI,
+ const ObjectFile &Obj,
+ ObjSectionToIDMap &ObjSectionToID,
+ StubMap &Stubs) override;
+ bool isCompatibleFile(const object::ObjectFile &Obj) const override;
+ void registerEHFrames() override;
+ Error finalizeLoad(const ObjectFile &Obj,
+ ObjSectionToIDMap &SectionMap) override;
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldImpl.h b/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldImpl.h
new file mode 100644
index 000000000000..e09c632842d6
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldImpl.h
@@ -0,0 +1,594 @@
+//===-- RuntimeDyldImpl.h - Run-time dynamic linker for MC-JIT --*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Interface for the implementations of runtime dynamic linker facilities.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_EXECUTIONENGINE_RUNTIMEDYLD_RUNTIMEDYLDIMPL_H
+#define LLVM_LIB_EXECUTIONENGINE_RUNTIMEDYLD_RUNTIMEDYLDIMPL_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ExecutionEngine/RTDyldMemoryManager.h"
+#include "llvm/ExecutionEngine/RuntimeDyld.h"
+#include "llvm/ExecutionEngine/RuntimeDyldChecker.h"
+#include "llvm/Object/ObjectFile.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/Format.h"
+#include "llvm/Support/Mutex.h"
+#include "llvm/Support/SwapByteOrder.h"
+#include "llvm/TargetParser/Host.h"
+#include "llvm/TargetParser/Triple.h"
+#include <deque>
+#include <map>
+#include <system_error>
+#include <unordered_map>
+
+using namespace llvm;
+using namespace llvm::object;
+
+namespace llvm {
+
+#define UNIMPLEMENTED_RELOC(RelType) \
+ case RelType: \
+ return make_error<RuntimeDyldError>("Unimplemented relocation: " #RelType)
+
+/// SectionEntry - represents a section emitted into memory by the dynamic
+/// linker.
+class SectionEntry {
+ /// Name - section name.
+ std::string Name;
+
+ /// Address - address in the linker's memory where the section resides.
+ uint8_t *Address;
+
+ /// Size - section size. Doesn't include the stubs.
+ size_t Size;
+
+ /// LoadAddress - the address of the section in the target process's memory.
+ /// Used for situations in which JIT-ed code is being executed in the address
+ /// space of a separate process. If the code executes in the same address
+ /// space where it was JIT-ed, this just equals Address.
+ uint64_t LoadAddress;
+
+ /// StubOffset - used for architectures with stub functions for far
+ /// relocations (like ARM).
+ uintptr_t StubOffset;
+
+ /// The total amount of space allocated for this section. This includes the
+ /// section size and the maximum amount of space that the stubs can occupy.
+ size_t AllocationSize;
+
+ /// ObjAddress - address of the section in the in-memory object file. Used
+ /// for calculating relocations in some object formats (like MachO).
+ uintptr_t ObjAddress;
+
+public:
+ SectionEntry(StringRef name, uint8_t *address, size_t size,
+ size_t allocationSize, uintptr_t objAddress)
+ : Name(std::string(name)), Address(address), Size(size),
+ LoadAddress(reinterpret_cast<uintptr_t>(address)), StubOffset(size),
+ AllocationSize(allocationSize), ObjAddress(objAddress) {
+ // AllocationSize is used only in asserts, prevent an "unused private field"
+ // warning:
+ (void)AllocationSize;
+ }
+
+ StringRef getName() const { return Name; }
+
+ uint8_t *getAddress() const { return Address; }
+
+ /// Return the address of this section with an offset.
+ uint8_t *getAddressWithOffset(unsigned OffsetBytes) const {
+ assert(OffsetBytes <= AllocationSize && "Offset out of bounds!");
+ return Address + OffsetBytes;
+ }
+
+ size_t getSize() const { return Size; }
+
+ uint64_t getLoadAddress() const { return LoadAddress; }
+ void setLoadAddress(uint64_t LA) { LoadAddress = LA; }
+
+ /// Return the load address of this section with an offset.
+ uint64_t getLoadAddressWithOffset(unsigned OffsetBytes) const {
+ assert(OffsetBytes <= AllocationSize && "Offset out of bounds!");
+ return LoadAddress + OffsetBytes;
+ }
+
+ uintptr_t getStubOffset() const { return StubOffset; }
+
+ void advanceStubOffset(unsigned StubSize) {
+ StubOffset += StubSize;
+ assert(StubOffset <= AllocationSize && "Not enough space allocated!");
+ }
+
+ uintptr_t getObjAddress() const { return ObjAddress; }
+};
+
+/// RelocationEntry - used to represent relocations internally in the dynamic
+/// linker.
+class RelocationEntry {
+public:
+ /// Offset - offset into the section.
+ uint64_t Offset;
+
+ /// Addend - the relocation addend encoded in the instruction itself. Also
+ /// used to make a relocation section relative instead of symbol relative.
+ int64_t Addend;
+
+ /// SectionID - the section this relocation points to.
+ unsigned SectionID;
+
+ /// RelType - relocation type.
+ uint32_t RelType;
+
+ struct SectionPair {
+ uint32_t SectionA;
+ uint32_t SectionB;
+ };
+
+ /// SymOffset - Section offset of the relocation entry's symbol (used for GOT
+ /// lookup).
+ union {
+ uint64_t SymOffset;
+ SectionPair Sections;
+ };
+
+ /// The size of this relocation (MachO specific).
+ unsigned Size;
+
+ /// True if this is a PCRel relocation (MachO specific).
+ bool IsPCRel : 1;
+
+ // ARM (MachO and COFF) specific.
+ bool IsTargetThumbFunc : 1;
+
+ RelocationEntry(unsigned id, uint64_t offset, uint32_t type, int64_t addend)
+ : Offset(offset), Addend(addend), SectionID(id), RelType(type),
+ SymOffset(0), Size(0), IsPCRel(false), IsTargetThumbFunc(false) {}
+
+ RelocationEntry(unsigned id, uint64_t offset, uint32_t type, int64_t addend,
+ uint64_t symoffset)
+ : Offset(offset), Addend(addend), SectionID(id), RelType(type),
+ SymOffset(symoffset), Size(0), IsPCRel(false),
+ IsTargetThumbFunc(false) {}
+
+ RelocationEntry(unsigned id, uint64_t offset, uint32_t type, int64_t addend,
+ bool IsPCRel, unsigned Size)
+ : Offset(offset), Addend(addend), SectionID(id), RelType(type),
+ SymOffset(0), Size(Size), IsPCRel(IsPCRel), IsTargetThumbFunc(false) {}
+
+ RelocationEntry(unsigned id, uint64_t offset, uint32_t type, int64_t addend,
+ unsigned SectionA, uint64_t SectionAOffset, unsigned SectionB,
+ uint64_t SectionBOffset, bool IsPCRel, unsigned Size)
+ : Offset(offset), Addend(SectionAOffset - SectionBOffset + addend),
+ SectionID(id), RelType(type), Size(Size), IsPCRel(IsPCRel),
+ IsTargetThumbFunc(false) {
+ Sections.SectionA = SectionA;
+ Sections.SectionB = SectionB;
+ }
+
+ RelocationEntry(unsigned id, uint64_t offset, uint32_t type, int64_t addend,
+ unsigned SectionA, uint64_t SectionAOffset, unsigned SectionB,
+ uint64_t SectionBOffset, bool IsPCRel, unsigned Size,
+ bool IsTargetThumbFunc)
+ : Offset(offset), Addend(SectionAOffset - SectionBOffset + addend),
+ SectionID(id), RelType(type), Size(Size), IsPCRel(IsPCRel),
+ IsTargetThumbFunc(IsTargetThumbFunc) {
+ Sections.SectionA = SectionA;
+ Sections.SectionB = SectionB;
+ }
+};
+
+class RelocationValueRef {
+public:
+ unsigned SectionID = 0;
+ uint64_t Offset = 0;
+ int64_t Addend = 0;
+ const char *SymbolName = nullptr;
+ bool IsStubThumb = false;
+
+ inline bool operator==(const RelocationValueRef &Other) const {
+ return SectionID == Other.SectionID && Offset == Other.Offset &&
+ Addend == Other.Addend && SymbolName == Other.SymbolName &&
+ IsStubThumb == Other.IsStubThumb;
+ }
+ inline bool operator<(const RelocationValueRef &Other) const {
+ if (SectionID != Other.SectionID)
+ return SectionID < Other.SectionID;
+ if (Offset != Other.Offset)
+ return Offset < Other.Offset;
+ if (Addend != Other.Addend)
+ return Addend < Other.Addend;
+ if (IsStubThumb != Other.IsStubThumb)
+ return IsStubThumb < Other.IsStubThumb;
+ return SymbolName < Other.SymbolName;
+ }
+};
+
+/// Symbol info for RuntimeDyld.
+class SymbolTableEntry {
+public:
+ SymbolTableEntry() = default;
+
+ SymbolTableEntry(unsigned SectionID, uint64_t Offset, JITSymbolFlags Flags)
+ : Offset(Offset), SectionID(SectionID), Flags(Flags) {}
+
+ unsigned getSectionID() const { return SectionID; }
+ uint64_t getOffset() const { return Offset; }
+ void setOffset(uint64_t NewOffset) { Offset = NewOffset; }
+
+ JITSymbolFlags getFlags() const { return Flags; }
+
+private:
+ uint64_t Offset = 0;
+ unsigned SectionID = 0;
+ JITSymbolFlags Flags = JITSymbolFlags::None;
+};
+
+typedef StringMap<SymbolTableEntry> RTDyldSymbolTable;
+
+class RuntimeDyldImpl {
+ friend class RuntimeDyld::LoadedObjectInfo;
+protected:
+ static const unsigned AbsoluteSymbolSection = ~0U;
+
+ // The MemoryManager to load objects into.
+ RuntimeDyld::MemoryManager &MemMgr;
+
+ // The symbol resolver to use for external symbols.
+ JITSymbolResolver &Resolver;
+
+ // A list of all sections emitted by the dynamic linker. These sections are
+ // referenced in the code by means of their index in this list - SectionID.
+ // Because references may be kept while the list grows, use a container that
+ // guarantees reference stability.
+ typedef std::deque<SectionEntry> SectionList;
+ SectionList Sections;
+
+ typedef unsigned SID; // Type for SectionIDs
+#define RTDYLD_INVALID_SECTION_ID ((RuntimeDyldImpl::SID)(-1))
+
+ // Keep a map of sections from object file to the SectionID which
+ // references it.
+ typedef std::map<SectionRef, unsigned> ObjSectionToIDMap;
+
+ // A global symbol table for symbols from all loaded modules.
+ RTDyldSymbolTable GlobalSymbolTable;
+
+ // Keep a map of common symbols to their info pairs
+ typedef std::vector<SymbolRef> CommonSymbolList;
+
+ // For each symbol, keep a list of relocations based on it. Anytime
+ // its address is reassigned (the JIT re-compiled the function, e.g.),
+ // the relocations get re-resolved.
+ // The symbol (or section) the relocation is sourced from is the Key
+ // in the relocation list where it's stored.
+ typedef SmallVector<RelocationEntry, 64> RelocationList;
+ // Relocations to sections already loaded. Indexed by SectionID which is the
+ // source of the address. The target where the address will be written is
+ // SectionID/Offset in the relocation itself.
+ std::unordered_map<unsigned, RelocationList> Relocations;
+
+ // Relocations to external symbols that are not yet resolved. Symbols are
+ // external when they aren't found in the global symbol table of all loaded
+ // modules. This map is indexed by symbol name.
+ StringMap<RelocationList> ExternalSymbolRelocations;
+
+
+ typedef std::map<RelocationValueRef, uintptr_t> StubMap;
+
+ Triple::ArchType Arch;
+ bool IsTargetLittleEndian;
+ bool IsMipsO32ABI;
+ bool IsMipsN32ABI;
+ bool IsMipsN64ABI;
+
+ // True if all sections should be passed to the memory manager, false if only
+ // sections containing relocations should be. Defaults to 'false'.
+ bool ProcessAllSections;
+
+ // This mutex prevents simultaneously loading objects from two different
+ // threads. This keeps us from having to protect individual data structures
+ // and guarantees that section allocation requests to the memory manager
+ // won't be interleaved between modules. It is also used in mapSectionAddress
+ // and resolveRelocations to protect write access to internal data structures.
+ //
+ // loadObject may be called on the same thread during the handling of
+ // processRelocations, and that's OK. The handling of the relocation lists
+ // is written in such a way as to work correctly if new elements are added to
+ // the end of the list while the list is being processed.
+ sys::Mutex lock;
+
+ using NotifyStubEmittedFunction =
+ RuntimeDyld::NotifyStubEmittedFunction;
+ NotifyStubEmittedFunction NotifyStubEmitted;
+
+ virtual unsigned getMaxStubSize() const = 0;
+ virtual Align getStubAlignment() = 0;
+
+ bool HasError;
+ std::string ErrorStr;
+
+ void writeInt16BE(uint8_t *Addr, uint16_t Value) {
+ llvm::support::endian::write<uint16_t>(Addr, Value,
+ IsTargetLittleEndian
+ ? llvm::endianness::little
+ : llvm::endianness::big);
+ }
+
+ void writeInt32BE(uint8_t *Addr, uint32_t Value) {
+ llvm::support::endian::write<uint32_t>(Addr, Value,
+ IsTargetLittleEndian
+ ? llvm::endianness::little
+ : llvm::endianness::big);
+ }
+
+ void writeInt64BE(uint8_t *Addr, uint64_t Value) {
+ llvm::support::endian::write<uint64_t>(Addr, Value,
+ IsTargetLittleEndian
+ ? llvm::endianness::little
+ : llvm::endianness::big);
+ }
+
+ virtual void setMipsABI(const ObjectFile &Obj) {
+ IsMipsO32ABI = false;
+ IsMipsN32ABI = false;
+ IsMipsN64ABI = false;
+ }
+
+ /// Endian-aware read Read the least significant Size bytes from Src.
+ uint64_t readBytesUnaligned(uint8_t *Src, unsigned Size) const;
+
+ /// Endian-aware write. Write the least significant Size bytes from Value to
+ /// Dst.
+ void writeBytesUnaligned(uint64_t Value, uint8_t *Dst, unsigned Size) const;
+
+ /// Generate JITSymbolFlags from a libObject symbol.
+ virtual Expected<JITSymbolFlags> getJITSymbolFlags(const SymbolRef &Sym);
+
+ /// Modify the given target address based on the given symbol flags.
+ /// This can be used by subclasses to tweak addresses based on symbol flags,
+ /// For example: the MachO/ARM target uses it to set the low bit if the target
+ /// is a thumb symbol.
+ virtual uint64_t modifyAddressBasedOnFlags(uint64_t Addr,
+ JITSymbolFlags Flags) const {
+ return Addr;
+ }
+
+ /// Given the common symbols discovered in the object file, emit a
+ /// new section for them and update the symbol mappings in the object and
+ /// symbol table.
+ Error emitCommonSymbols(const ObjectFile &Obj,
+ CommonSymbolList &CommonSymbols, uint64_t CommonSize,
+ uint32_t CommonAlign);
+
+ /// Emits section data from the object file to the MemoryManager.
+ /// \param IsCode if it's true then allocateCodeSection() will be
+ /// used for emits, else allocateDataSection() will be used.
+ /// \return SectionID.
+ Expected<unsigned> emitSection(const ObjectFile &Obj,
+ const SectionRef &Section,
+ bool IsCode);
+
+ /// Find Section in LocalSections. If the secton is not found - emit
+ /// it and store in LocalSections.
+ /// \param IsCode if it's true then allocateCodeSection() will be
+ /// used for emmits, else allocateDataSection() will be used.
+ /// \return SectionID.
+ Expected<unsigned> findOrEmitSection(const ObjectFile &Obj,
+ const SectionRef &Section, bool IsCode,
+ ObjSectionToIDMap &LocalSections);
+
+ // Add a relocation entry that uses the given section.
+ void addRelocationForSection(const RelocationEntry &RE, unsigned SectionID);
+
+ // Add a relocation entry that uses the given symbol. This symbol may
+ // be found in the global symbol table, or it may be external.
+ void addRelocationForSymbol(const RelocationEntry &RE, StringRef SymbolName);
+
+ /// Emits long jump instruction to Addr.
+ /// \return Pointer to the memory area for emitting target address.
+ uint8_t *createStubFunction(uint8_t *Addr, unsigned AbiVariant = 0);
+
+ /// Resolves relocations from Relocs list with address from Value.
+ void resolveRelocationList(const RelocationList &Relocs, uint64_t Value);
+
+ /// A object file specific relocation resolver
+ /// \param RE The relocation to be resolved
+ /// \param Value Target symbol address to apply the relocation action
+ virtual void resolveRelocation(const RelocationEntry &RE, uint64_t Value) = 0;
+
+ /// Parses one or more object file relocations (some object files use
+ /// relocation pairs) and stores it to Relocations or SymbolRelocations
+ /// (this depends on the object file type).
+ /// \return Iterator to the next relocation that needs to be parsed.
+ virtual Expected<relocation_iterator>
+ processRelocationRef(unsigned SectionID, relocation_iterator RelI,
+ const ObjectFile &Obj, ObjSectionToIDMap &ObjSectionToID,
+ StubMap &Stubs) = 0;
+
+ void applyExternalSymbolRelocations(
+ const StringMap<JITEvaluatedSymbol> ExternalSymbolMap);
+
+ /// Resolve relocations to external symbols.
+ Error resolveExternalSymbols();
+
+ // Compute an upper bound of the memory that is required to load all
+ // sections
+ Error computeTotalAllocSize(const ObjectFile &Obj, uint64_t &CodeSize,
+ Align &CodeAlign, uint64_t &RODataSize,
+ Align &RODataAlign, uint64_t &RWDataSize,
+ Align &RWDataAlign);
+
+ // Compute GOT size
+ unsigned computeGOTSize(const ObjectFile &Obj);
+
+ // Compute the stub buffer size required for a section
+ unsigned computeSectionStubBufSize(const ObjectFile &Obj,
+ const SectionRef &Section);
+
+ // Implementation of the generic part of the loadObject algorithm.
+ Expected<ObjSectionToIDMap> loadObjectImpl(const object::ObjectFile &Obj);
+
+ // Return size of Global Offset Table (GOT) entry
+ virtual size_t getGOTEntrySize() { return 0; }
+
+ // Hook for the subclasses to do further processing when a symbol is added to
+ // the global symbol table. This function may modify the symbol table entry.
+ virtual void processNewSymbol(const SymbolRef &ObjSymbol, SymbolTableEntry& Entry) {}
+
+ // Return true if the relocation R may require allocating a GOT entry.
+ virtual bool relocationNeedsGot(const RelocationRef &R) const {
+ return false;
+ }
+
+ // Return true if the relocation R may require allocating a stub.
+ virtual bool relocationNeedsStub(const RelocationRef &R) const {
+ return true; // Conservative answer
+ }
+
+public:
+ RuntimeDyldImpl(RuntimeDyld::MemoryManager &MemMgr,
+ JITSymbolResolver &Resolver)
+ : MemMgr(MemMgr), Resolver(Resolver),
+ ProcessAllSections(false), HasError(false) {
+ }
+
+ virtual ~RuntimeDyldImpl();
+
+ void setProcessAllSections(bool ProcessAllSections) {
+ this->ProcessAllSections = ProcessAllSections;
+ }
+
+ virtual std::unique_ptr<RuntimeDyld::LoadedObjectInfo>
+ loadObject(const object::ObjectFile &Obj) = 0;
+
+ uint64_t getSectionLoadAddress(unsigned SectionID) const {
+ if (SectionID == AbsoluteSymbolSection)
+ return 0;
+ else
+ return Sections[SectionID].getLoadAddress();
+ }
+
+ uint8_t *getSectionAddress(unsigned SectionID) const {
+ if (SectionID == AbsoluteSymbolSection)
+ return nullptr;
+ else
+ return Sections[SectionID].getAddress();
+ }
+
+ StringRef getSectionContent(unsigned SectionID) const {
+ if (SectionID == AbsoluteSymbolSection)
+ return {};
+ else
+ return StringRef(
+ reinterpret_cast<char *>(Sections[SectionID].getAddress()),
+ Sections[SectionID].getStubOffset() + getMaxStubSize());
+ }
+
+ uint8_t* getSymbolLocalAddress(StringRef Name) const {
+ // FIXME: Just look up as a function for now. Overly simple of course.
+ // Work in progress.
+ RTDyldSymbolTable::const_iterator pos = GlobalSymbolTable.find(Name);
+ if (pos == GlobalSymbolTable.end())
+ return nullptr;
+ const auto &SymInfo = pos->second;
+ // Absolute symbols do not have a local address.
+ if (SymInfo.getSectionID() == AbsoluteSymbolSection)
+ return nullptr;
+ return getSectionAddress(SymInfo.getSectionID()) + SymInfo.getOffset();
+ }
+
+ unsigned getSymbolSectionID(StringRef Name) const {
+ auto GSTItr = GlobalSymbolTable.find(Name);
+ if (GSTItr == GlobalSymbolTable.end())
+ return ~0U;
+ return GSTItr->second.getSectionID();
+ }
+
+ JITEvaluatedSymbol getSymbol(StringRef Name) const {
+ // FIXME: Just look up as a function for now. Overly simple of course.
+ // Work in progress.
+ RTDyldSymbolTable::const_iterator pos = GlobalSymbolTable.find(Name);
+ if (pos == GlobalSymbolTable.end())
+ return nullptr;
+ const auto &SymEntry = pos->second;
+ uint64_t SectionAddr = 0;
+ if (SymEntry.getSectionID() != AbsoluteSymbolSection)
+ SectionAddr = getSectionLoadAddress(SymEntry.getSectionID());
+ uint64_t TargetAddr = SectionAddr + SymEntry.getOffset();
+
+ // FIXME: Have getSymbol should return the actual address and the client
+ // modify it based on the flags. This will require clients to be
+ // aware of the target architecture, which we should build
+ // infrastructure for.
+ TargetAddr = modifyAddressBasedOnFlags(TargetAddr, SymEntry.getFlags());
+ return JITEvaluatedSymbol(TargetAddr, SymEntry.getFlags());
+ }
+
+ std::map<StringRef, JITEvaluatedSymbol> getSymbolTable() const {
+ std::map<StringRef, JITEvaluatedSymbol> Result;
+
+ for (const auto &KV : GlobalSymbolTable) {
+ auto SectionID = KV.second.getSectionID();
+ uint64_t SectionAddr = getSectionLoadAddress(SectionID);
+ Result[KV.first()] =
+ JITEvaluatedSymbol(SectionAddr + KV.second.getOffset(), KV.second.getFlags());
+ }
+
+ return Result;
+ }
+
+ void resolveRelocations();
+
+ void resolveLocalRelocations();
+
+ static void finalizeAsync(
+ std::unique_ptr<RuntimeDyldImpl> This,
+ unique_function<void(object::OwningBinary<object::ObjectFile>,
+ std::unique_ptr<RuntimeDyld::LoadedObjectInfo>,
+ Error)>
+ OnEmitted,
+ object::OwningBinary<object::ObjectFile> O,
+ std::unique_ptr<RuntimeDyld::LoadedObjectInfo> Info);
+
+ void reassignSectionAddress(unsigned SectionID, uint64_t Addr);
+
+ void mapSectionAddress(const void *LocalAddress, uint64_t TargetAddress);
+
+ // Is the linker in an error state?
+ bool hasError() { return HasError; }
+
+ // Mark the error condition as handled and continue.
+ void clearError() { HasError = false; }
+
+ // Get the error message.
+ StringRef getErrorString() { return ErrorStr; }
+
+ virtual bool isCompatibleFile(const ObjectFile &Obj) const = 0;
+
+ void setNotifyStubEmitted(NotifyStubEmittedFunction NotifyStubEmitted) {
+ this->NotifyStubEmitted = std::move(NotifyStubEmitted);
+ }
+
+ virtual void registerEHFrames();
+
+ void deregisterEHFrames();
+
+ virtual Error finalizeLoad(const ObjectFile &ObjImg,
+ ObjSectionToIDMap &SectionMap) {
+ return Error::success();
+ }
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.cpp
new file mode 100644
index 000000000000..9ca76602ea18
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.cpp
@@ -0,0 +1,382 @@
+//===-- RuntimeDyldMachO.cpp - Run-time dynamic linker for MC-JIT -*- C++ -*-=//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Implementation of the MC-JIT runtime dynamic linker.
+//
+//===----------------------------------------------------------------------===//
+
+#include "RuntimeDyldMachO.h"
+#include "Targets/RuntimeDyldMachOAArch64.h"
+#include "Targets/RuntimeDyldMachOARM.h"
+#include "Targets/RuntimeDyldMachOI386.h"
+#include "Targets/RuntimeDyldMachOX86_64.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringRef.h"
+
+using namespace llvm;
+using namespace llvm::object;
+
+#define DEBUG_TYPE "dyld"
+
+namespace {
+
+class LoadedMachOObjectInfo final
+ : public LoadedObjectInfoHelper<LoadedMachOObjectInfo,
+ RuntimeDyld::LoadedObjectInfo> {
+public:
+ LoadedMachOObjectInfo(RuntimeDyldImpl &RTDyld,
+ ObjSectionToIDMap ObjSecToIDMap)
+ : LoadedObjectInfoHelper(RTDyld, std::move(ObjSecToIDMap)) {}
+
+ OwningBinary<ObjectFile>
+ getObjectForDebug(const ObjectFile &Obj) const override {
+ return OwningBinary<ObjectFile>();
+ }
+};
+
+}
+
+namespace llvm {
+
+int64_t RuntimeDyldMachO::memcpyAddend(const RelocationEntry &RE) const {
+ unsigned NumBytes = 1 << RE.Size;
+ uint8_t *Src = Sections[RE.SectionID].getAddress() + RE.Offset;
+
+ return static_cast<int64_t>(readBytesUnaligned(Src, NumBytes));
+}
+
+Expected<relocation_iterator>
+RuntimeDyldMachO::processScatteredVANILLA(
+ unsigned SectionID, relocation_iterator RelI,
+ const ObjectFile &BaseObjT,
+ RuntimeDyldMachO::ObjSectionToIDMap &ObjSectionToID,
+ bool TargetIsLocalThumbFunc) {
+ const MachOObjectFile &Obj =
+ static_cast<const MachOObjectFile&>(BaseObjT);
+ MachO::any_relocation_info RE =
+ Obj.getRelocation(RelI->getRawDataRefImpl());
+
+ SectionEntry &Section = Sections[SectionID];
+ uint32_t RelocType = Obj.getAnyRelocationType(RE);
+ bool IsPCRel = Obj.getAnyRelocationPCRel(RE);
+ unsigned Size = Obj.getAnyRelocationLength(RE);
+ uint64_t Offset = RelI->getOffset();
+ uint8_t *LocalAddress = Section.getAddressWithOffset(Offset);
+ unsigned NumBytes = 1 << Size;
+ int64_t Addend = readBytesUnaligned(LocalAddress, NumBytes);
+
+ unsigned SymbolBaseAddr = Obj.getScatteredRelocationValue(RE);
+ section_iterator TargetSI = getSectionByAddress(Obj, SymbolBaseAddr);
+ assert(TargetSI != Obj.section_end() && "Can't find section for symbol");
+ uint64_t SectionBaseAddr = TargetSI->getAddress();
+ SectionRef TargetSection = *TargetSI;
+ bool IsCode = TargetSection.isText();
+ uint32_t TargetSectionID = ~0U;
+ if (auto TargetSectionIDOrErr =
+ findOrEmitSection(Obj, TargetSection, IsCode, ObjSectionToID))
+ TargetSectionID = *TargetSectionIDOrErr;
+ else
+ return TargetSectionIDOrErr.takeError();
+
+ Addend -= SectionBaseAddr;
+ RelocationEntry R(SectionID, Offset, RelocType, Addend, IsPCRel, Size);
+ R.IsTargetThumbFunc = TargetIsLocalThumbFunc;
+
+ addRelocationForSection(R, TargetSectionID);
+
+ return ++RelI;
+}
+
+
+Expected<RelocationValueRef>
+RuntimeDyldMachO::getRelocationValueRef(
+ const ObjectFile &BaseTObj, const relocation_iterator &RI,
+ const RelocationEntry &RE, ObjSectionToIDMap &ObjSectionToID) {
+
+ const MachOObjectFile &Obj =
+ static_cast<const MachOObjectFile &>(BaseTObj);
+ MachO::any_relocation_info RelInfo =
+ Obj.getRelocation(RI->getRawDataRefImpl());
+ RelocationValueRef Value;
+
+ bool IsExternal = Obj.getPlainRelocationExternal(RelInfo);
+ if (IsExternal) {
+ symbol_iterator Symbol = RI->getSymbol();
+ StringRef TargetName;
+ if (auto TargetNameOrErr = Symbol->getName())
+ TargetName = *TargetNameOrErr;
+ else
+ return TargetNameOrErr.takeError();
+ RTDyldSymbolTable::const_iterator SI =
+ GlobalSymbolTable.find(TargetName.data());
+ if (SI != GlobalSymbolTable.end()) {
+ const auto &SymInfo = SI->second;
+ Value.SectionID = SymInfo.getSectionID();
+ Value.Offset = SymInfo.getOffset() + RE.Addend;
+ } else {
+ Value.SymbolName = TargetName.data();
+ Value.Offset = RE.Addend;
+ }
+ } else {
+ SectionRef Sec = Obj.getAnyRelocationSection(RelInfo);
+ bool IsCode = Sec.isText();
+ if (auto SectionIDOrErr = findOrEmitSection(Obj, Sec, IsCode,
+ ObjSectionToID))
+ Value.SectionID = *SectionIDOrErr;
+ else
+ return SectionIDOrErr.takeError();
+ uint64_t Addr = Sec.getAddress();
+ Value.Offset = RE.Addend - Addr;
+ }
+
+ return Value;
+}
+
+void RuntimeDyldMachO::makeValueAddendPCRel(RelocationValueRef &Value,
+ const relocation_iterator &RI,
+ unsigned OffsetToNextPC) {
+ auto &O = *cast<MachOObjectFile>(RI->getObject());
+ section_iterator SecI = O.getRelocationRelocatedSection(RI);
+ Value.Offset += RI->getOffset() + OffsetToNextPC + SecI->getAddress();
+}
+
+void RuntimeDyldMachO::dumpRelocationToResolve(const RelocationEntry &RE,
+ uint64_t Value) const {
+ const SectionEntry &Section = Sections[RE.SectionID];
+ uint8_t *LocalAddress = Section.getAddress() + RE.Offset;
+ uint64_t FinalAddress = Section.getLoadAddress() + RE.Offset;
+
+ dbgs() << "resolveRelocation Section: " << RE.SectionID
+ << " LocalAddress: " << format("%p", LocalAddress)
+ << " FinalAddress: " << format("0x%016" PRIx64, FinalAddress)
+ << " Value: " << format("0x%016" PRIx64, Value) << " Addend: " << RE.Addend
+ << " isPCRel: " << RE.IsPCRel << " MachoType: " << RE.RelType
+ << " Size: " << (1 << RE.Size) << "\n";
+}
+
+section_iterator
+RuntimeDyldMachO::getSectionByAddress(const MachOObjectFile &Obj,
+ uint64_t Addr) {
+ section_iterator SI = Obj.section_begin();
+ section_iterator SE = Obj.section_end();
+
+ for (; SI != SE; ++SI) {
+ uint64_t SAddr = SI->getAddress();
+ uint64_t SSize = SI->getSize();
+ if ((Addr >= SAddr) && (Addr < SAddr + SSize))
+ return SI;
+ }
+
+ return SE;
+}
+
+
+// Populate __pointers section.
+Error RuntimeDyldMachO::populateIndirectSymbolPointersSection(
+ const MachOObjectFile &Obj,
+ const SectionRef &PTSection,
+ unsigned PTSectionID) {
+ assert(!Obj.is64Bit() &&
+ "Pointer table section not supported in 64-bit MachO.");
+
+ MachO::dysymtab_command DySymTabCmd = Obj.getDysymtabLoadCommand();
+ MachO::section Sec32 = Obj.getSection(PTSection.getRawDataRefImpl());
+ uint32_t PTSectionSize = Sec32.size;
+ unsigned FirstIndirectSymbol = Sec32.reserved1;
+ const unsigned PTEntrySize = 4;
+ unsigned NumPTEntries = PTSectionSize / PTEntrySize;
+ unsigned PTEntryOffset = 0;
+
+ assert((PTSectionSize % PTEntrySize) == 0 &&
+ "Pointers section does not contain a whole number of stubs?");
+
+ LLVM_DEBUG(dbgs() << "Populating pointer table section "
+ << Sections[PTSectionID].getName() << ", Section ID "
+ << PTSectionID << ", " << NumPTEntries << " entries, "
+ << PTEntrySize << " bytes each:\n");
+
+ for (unsigned i = 0; i < NumPTEntries; ++i) {
+ unsigned SymbolIndex =
+ Obj.getIndirectSymbolTableEntry(DySymTabCmd, FirstIndirectSymbol + i);
+ symbol_iterator SI = Obj.getSymbolByIndex(SymbolIndex);
+ StringRef IndirectSymbolName;
+ if (auto IndirectSymbolNameOrErr = SI->getName())
+ IndirectSymbolName = *IndirectSymbolNameOrErr;
+ else
+ return IndirectSymbolNameOrErr.takeError();
+ LLVM_DEBUG(dbgs() << " " << IndirectSymbolName << ": index " << SymbolIndex
+ << ", PT offset: " << PTEntryOffset << "\n");
+ RelocationEntry RE(PTSectionID, PTEntryOffset,
+ MachO::GENERIC_RELOC_VANILLA, 0, false, 2);
+ addRelocationForSymbol(RE, IndirectSymbolName);
+ PTEntryOffset += PTEntrySize;
+ }
+ return Error::success();
+}
+
+bool RuntimeDyldMachO::isCompatibleFile(const object::ObjectFile &Obj) const {
+ return Obj.isMachO();
+}
+
+template <typename Impl>
+Error
+RuntimeDyldMachOCRTPBase<Impl>::finalizeLoad(const ObjectFile &Obj,
+ ObjSectionToIDMap &SectionMap) {
+ unsigned EHFrameSID = RTDYLD_INVALID_SECTION_ID;
+ unsigned TextSID = RTDYLD_INVALID_SECTION_ID;
+ unsigned ExceptTabSID = RTDYLD_INVALID_SECTION_ID;
+
+ for (const auto &Section : Obj.sections()) {
+ StringRef Name;
+ if (Expected<StringRef> NameOrErr = Section.getName())
+ Name = *NameOrErr;
+ else
+ consumeError(NameOrErr.takeError());
+
+ // Force emission of the __text, __eh_frame, and __gcc_except_tab sections
+ // if they're present. Otherwise call down to the impl to handle other
+ // sections that have already been emitted.
+ if (Name == "__text") {
+ if (auto TextSIDOrErr = findOrEmitSection(Obj, Section, true, SectionMap))
+ TextSID = *TextSIDOrErr;
+ else
+ return TextSIDOrErr.takeError();
+ } else if (Name == "__eh_frame") {
+ if (auto EHFrameSIDOrErr = findOrEmitSection(Obj, Section, false,
+ SectionMap))
+ EHFrameSID = *EHFrameSIDOrErr;
+ else
+ return EHFrameSIDOrErr.takeError();
+ } else if (Name == "__gcc_except_tab") {
+ if (auto ExceptTabSIDOrErr = findOrEmitSection(Obj, Section, true,
+ SectionMap))
+ ExceptTabSID = *ExceptTabSIDOrErr;
+ else
+ return ExceptTabSIDOrErr.takeError();
+ } else {
+ auto I = SectionMap.find(Section);
+ if (I != SectionMap.end())
+ if (auto Err = impl().finalizeSection(Obj, I->second, Section))
+ return Err;
+ }
+ }
+ UnregisteredEHFrameSections.push_back(
+ EHFrameRelatedSections(EHFrameSID, TextSID, ExceptTabSID));
+
+ return Error::success();
+}
+
+template <typename Impl>
+unsigned char *RuntimeDyldMachOCRTPBase<Impl>::processFDE(uint8_t *P,
+ int64_t DeltaForText,
+ int64_t DeltaForEH) {
+ typedef typename Impl::TargetPtrT TargetPtrT;
+
+ LLVM_DEBUG(dbgs() << "Processing FDE: Delta for text: " << DeltaForText
+ << ", Delta for EH: " << DeltaForEH << "\n");
+ uint32_t Length = readBytesUnaligned(P, 4);
+ P += 4;
+ uint8_t *Ret = P + Length;
+ uint32_t Offset = readBytesUnaligned(P, 4);
+ if (Offset == 0) // is a CIE
+ return Ret;
+
+ P += 4;
+ TargetPtrT FDELocation = readBytesUnaligned(P, sizeof(TargetPtrT));
+ TargetPtrT NewLocation = FDELocation - DeltaForText;
+ writeBytesUnaligned(NewLocation, P, sizeof(TargetPtrT));
+
+ P += sizeof(TargetPtrT);
+
+ // Skip the FDE address range
+ P += sizeof(TargetPtrT);
+
+ uint8_t Augmentationsize = *P;
+ P += 1;
+ if (Augmentationsize != 0) {
+ TargetPtrT LSDA = readBytesUnaligned(P, sizeof(TargetPtrT));
+ TargetPtrT NewLSDA = LSDA - DeltaForEH;
+ writeBytesUnaligned(NewLSDA, P, sizeof(TargetPtrT));
+ }
+
+ return Ret;
+}
+
+static int64_t computeDelta(SectionEntry *A, SectionEntry *B) {
+ int64_t ObjDistance = static_cast<int64_t>(A->getObjAddress()) -
+ static_cast<int64_t>(B->getObjAddress());
+ int64_t MemDistance = A->getLoadAddress() - B->getLoadAddress();
+ return ObjDistance - MemDistance;
+}
+
+template <typename Impl>
+void RuntimeDyldMachOCRTPBase<Impl>::registerEHFrames() {
+
+ for (int i = 0, e = UnregisteredEHFrameSections.size(); i != e; ++i) {
+ EHFrameRelatedSections &SectionInfo = UnregisteredEHFrameSections[i];
+ if (SectionInfo.EHFrameSID == RTDYLD_INVALID_SECTION_ID ||
+ SectionInfo.TextSID == RTDYLD_INVALID_SECTION_ID)
+ continue;
+ SectionEntry *Text = &Sections[SectionInfo.TextSID];
+ SectionEntry *EHFrame = &Sections[SectionInfo.EHFrameSID];
+ SectionEntry *ExceptTab = nullptr;
+ if (SectionInfo.ExceptTabSID != RTDYLD_INVALID_SECTION_ID)
+ ExceptTab = &Sections[SectionInfo.ExceptTabSID];
+
+ int64_t DeltaForText = computeDelta(Text, EHFrame);
+ int64_t DeltaForEH = 0;
+ if (ExceptTab)
+ DeltaForEH = computeDelta(ExceptTab, EHFrame);
+
+ uint8_t *P = EHFrame->getAddress();
+ uint8_t *End = P + EHFrame->getSize();
+ while (P != End) {
+ P = processFDE(P, DeltaForText, DeltaForEH);
+ }
+
+ MemMgr.registerEHFrames(EHFrame->getAddress(), EHFrame->getLoadAddress(),
+ EHFrame->getSize());
+ }
+ UnregisteredEHFrameSections.clear();
+}
+
+std::unique_ptr<RuntimeDyldMachO>
+RuntimeDyldMachO::create(Triple::ArchType Arch,
+ RuntimeDyld::MemoryManager &MemMgr,
+ JITSymbolResolver &Resolver) {
+ switch (Arch) {
+ default:
+ llvm_unreachable("Unsupported target for RuntimeDyldMachO.");
+ break;
+ case Triple::arm:
+ return std::make_unique<RuntimeDyldMachOARM>(MemMgr, Resolver);
+ case Triple::aarch64:
+ return std::make_unique<RuntimeDyldMachOAArch64>(MemMgr, Resolver);
+ case Triple::aarch64_32:
+ return std::make_unique<RuntimeDyldMachOAArch64>(MemMgr, Resolver);
+ case Triple::x86:
+ return std::make_unique<RuntimeDyldMachOI386>(MemMgr, Resolver);
+ case Triple::x86_64:
+ return std::make_unique<RuntimeDyldMachOX86_64>(MemMgr, Resolver);
+ }
+}
+
+std::unique_ptr<RuntimeDyld::LoadedObjectInfo>
+RuntimeDyldMachO::loadObject(const object::ObjectFile &O) {
+ if (auto ObjSectionToIDOrErr = loadObjectImpl(O))
+ return std::make_unique<LoadedMachOObjectInfo>(*this,
+ *ObjSectionToIDOrErr);
+ else {
+ HasError = true;
+ raw_string_ostream ErrStream(ErrorStr);
+ logAllUnhandledErrors(ObjSectionToIDOrErr.takeError(), ErrStream);
+ return nullptr;
+ }
+}
+
+} // end namespace llvm
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.h b/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.h
new file mode 100644
index 000000000000..650e7b79fbb8
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.h
@@ -0,0 +1,167 @@
+//===-- RuntimeDyldMachO.h - Run-time dynamic linker for MC-JIT ---*- C++ -*-=//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// MachO support for MC-JIT runtime dynamic linker.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_EXECUTIONENGINE_RUNTIMEDYLD_RUNTIMEDYLDMACHO_H
+#define LLVM_LIB_EXECUTIONENGINE_RUNTIMEDYLD_RUNTIMEDYLDMACHO_H
+
+#include "RuntimeDyldImpl.h"
+#include "llvm/Object/MachO.h"
+#include "llvm/Support/Format.h"
+
+#define DEBUG_TYPE "dyld"
+
+using namespace llvm;
+using namespace llvm::object;
+
+namespace llvm {
+class RuntimeDyldMachO : public RuntimeDyldImpl {
+protected:
+ struct SectionOffsetPair {
+ unsigned SectionID;
+ uint64_t Offset;
+ };
+
+ struct EHFrameRelatedSections {
+ EHFrameRelatedSections()
+ : EHFrameSID(RTDYLD_INVALID_SECTION_ID),
+ TextSID(RTDYLD_INVALID_SECTION_ID),
+ ExceptTabSID(RTDYLD_INVALID_SECTION_ID) {}
+
+ EHFrameRelatedSections(SID EH, SID T, SID Ex)
+ : EHFrameSID(EH), TextSID(T), ExceptTabSID(Ex) {}
+ SID EHFrameSID;
+ SID TextSID;
+ SID ExceptTabSID;
+ };
+
+ // When a module is loaded we save the SectionID of the EH frame section
+ // in a table until we receive a request to register all unregistered
+ // EH frame sections with the memory manager.
+ SmallVector<EHFrameRelatedSections, 2> UnregisteredEHFrameSections;
+
+ RuntimeDyldMachO(RuntimeDyld::MemoryManager &MemMgr,
+ JITSymbolResolver &Resolver)
+ : RuntimeDyldImpl(MemMgr, Resolver) {}
+
+ /// This convenience method uses memcpy to extract a contiguous addend (the
+ /// addend size and offset are taken from the corresponding fields of the RE).
+ int64_t memcpyAddend(const RelocationEntry &RE) const;
+
+ /// Given a relocation_iterator for a non-scattered relocation, construct a
+ /// RelocationEntry and fill in the common fields. The 'Addend' field is *not*
+ /// filled in, since immediate encodings are highly target/opcode specific.
+ /// For targets/opcodes with simple, contiguous immediates (e.g. X86) the
+ /// memcpyAddend method can be used to read the immediate.
+ RelocationEntry getRelocationEntry(unsigned SectionID,
+ const ObjectFile &BaseTObj,
+ const relocation_iterator &RI) const {
+ const MachOObjectFile &Obj =
+ static_cast<const MachOObjectFile &>(BaseTObj);
+ MachO::any_relocation_info RelInfo =
+ Obj.getRelocation(RI->getRawDataRefImpl());
+
+ bool IsPCRel = Obj.getAnyRelocationPCRel(RelInfo);
+ unsigned Size = Obj.getAnyRelocationLength(RelInfo);
+ uint64_t Offset = RI->getOffset();
+ MachO::RelocationInfoType RelType =
+ static_cast<MachO::RelocationInfoType>(Obj.getAnyRelocationType(RelInfo));
+
+ return RelocationEntry(SectionID, Offset, RelType, 0, IsPCRel, Size);
+ }
+
+ /// Process a scattered vanilla relocation.
+ Expected<relocation_iterator>
+ processScatteredVANILLA(unsigned SectionID, relocation_iterator RelI,
+ const ObjectFile &BaseObjT,
+ RuntimeDyldMachO::ObjSectionToIDMap &ObjSectionToID,
+ bool TargetIsLocalThumbFunc = false);
+
+ /// Construct a RelocationValueRef representing the relocation target.
+ /// For Symbols in known sections, this will return a RelocationValueRef
+ /// representing a (SectionID, Offset) pair.
+ /// For Symbols whose section is not known, this will return a
+ /// (SymbolName, Offset) pair, where the Offset is taken from the instruction
+ /// immediate (held in RE.Addend).
+ /// In both cases the Addend field is *NOT* fixed up to be PC-relative. That
+ /// should be done by the caller where appropriate by calling makePCRel on
+ /// the RelocationValueRef.
+ Expected<RelocationValueRef>
+ getRelocationValueRef(const ObjectFile &BaseTObj,
+ const relocation_iterator &RI,
+ const RelocationEntry &RE,
+ ObjSectionToIDMap &ObjSectionToID);
+
+ /// Make the RelocationValueRef addend PC-relative.
+ void makeValueAddendPCRel(RelocationValueRef &Value,
+ const relocation_iterator &RI,
+ unsigned OffsetToNextPC);
+
+ /// Dump information about the relocation entry (RE) and resolved value.
+ void dumpRelocationToResolve(const RelocationEntry &RE, uint64_t Value) const;
+
+ // Return a section iterator for the section containing the given address.
+ static section_iterator getSectionByAddress(const MachOObjectFile &Obj,
+ uint64_t Addr);
+
+
+ // Populate __pointers section.
+ Error populateIndirectSymbolPointersSection(const MachOObjectFile &Obj,
+ const SectionRef &PTSection,
+ unsigned PTSectionID);
+
+public:
+
+ /// Create a RuntimeDyldMachO instance for the given target architecture.
+ static std::unique_ptr<RuntimeDyldMachO>
+ create(Triple::ArchType Arch,
+ RuntimeDyld::MemoryManager &MemMgr,
+ JITSymbolResolver &Resolver);
+
+ std::unique_ptr<RuntimeDyld::LoadedObjectInfo>
+ loadObject(const object::ObjectFile &O) override;
+
+ SectionEntry &getSection(unsigned SectionID) { return Sections[SectionID]; }
+
+ bool isCompatibleFile(const object::ObjectFile &Obj) const override;
+};
+
+/// RuntimeDyldMachOTarget - Templated base class for generic MachO linker
+/// algorithms and data structures.
+///
+/// Concrete, target specific sub-classes can be accessed via the impl()
+/// methods. (i.e. the RuntimeDyldMachO hierarchy uses the Curiously
+/// Recurring Template Idiom). Concrete subclasses for each target
+/// can be found in ./Targets.
+template <typename Impl>
+class RuntimeDyldMachOCRTPBase : public RuntimeDyldMachO {
+private:
+ Impl &impl() { return static_cast<Impl &>(*this); }
+ const Impl &impl() const { return static_cast<const Impl &>(*this); }
+
+ unsigned char *processFDE(uint8_t *P, int64_t DeltaForText,
+ int64_t DeltaForEH);
+
+public:
+ RuntimeDyldMachOCRTPBase(RuntimeDyld::MemoryManager &MemMgr,
+ JITSymbolResolver &Resolver)
+ : RuntimeDyldMachO(MemMgr, Resolver) {}
+
+ Error finalizeLoad(const ObjectFile &Obj,
+ ObjSectionToIDMap &SectionMap) override;
+ void registerEHFrames() override;
+};
+
+} // end namespace llvm
+
+#undef DEBUG_TYPE
+
+#endif
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldCOFFAArch64.h b/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldCOFFAArch64.h
new file mode 100644
index 000000000000..66c9753a72fd
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldCOFFAArch64.h
@@ -0,0 +1,377 @@
+//===-- RuntimeDyldCOFFAArch64.h --- COFF/AArch64 specific code ---*- C++
+//-*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// COFF AArch64 support for MC-JIT runtime dynamic linker.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_EXECUTIONENGINE_RUNTIMEDYLD_TARGETS_RUNTIMEDYLDCOFFAARCH64_H
+#define LLVM_LIB_EXECUTIONENGINE_RUNTIMEDYLD_TARGETS_RUNTIMEDYLDCOFFAARCH64_H
+
+#include "../RuntimeDyldCOFF.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/BinaryFormat/COFF.h"
+#include "llvm/Object/COFF.h"
+#include "llvm/Support/Endian.h"
+
+#define DEBUG_TYPE "dyld"
+
+using namespace llvm::support::endian;
+
+namespace llvm {
+
+// This relocation type is used for handling long branch instruction
+// through the Stub.
+enum InternalRelocationType : unsigned {
+ INTERNAL_REL_ARM64_LONG_BRANCH26 = 0x111,
+};
+
+static void add16(uint8_t *p, int16_t v) { write16le(p, read16le(p) + v); }
+static void or32le(void *P, int32_t V) { write32le(P, read32le(P) | V); }
+
+static void write32AArch64Imm(uint8_t *T, uint64_t imm, uint32_t rangeLimit) {
+ uint32_t orig = read32le(T);
+ orig &= ~(0xFFF << 10);
+ write32le(T, orig | ((imm & (0xFFF >> rangeLimit)) << 10));
+}
+
+static void write32AArch64Ldr(uint8_t *T, uint64_t imm) {
+ uint32_t orig = read32le(T);
+ uint32_t size = orig >> 30;
+ // 0x04000000 indicates SIMD/FP registers
+ // 0x00800000 indicates 128 bit
+ if ((orig & 0x04800000) == 0x04800000)
+ size += 4;
+ if ((imm & ((1 << size) - 1)) != 0)
+ assert(0 && "misaligned ldr/str offset");
+ write32AArch64Imm(T, imm >> size, size);
+}
+
+static void write32AArch64Addr(void *T, uint64_t s, uint64_t p, int shift) {
+ uint64_t Imm = (s >> shift) - (p >> shift);
+ uint32_t ImmLo = (Imm & 0x3) << 29;
+ uint32_t ImmHi = (Imm & 0x1FFFFC) << 3;
+ uint64_t Mask = (0x3 << 29) | (0x1FFFFC << 3);
+ write32le(T, (read32le(T) & ~Mask) | ImmLo | ImmHi);
+}
+
+class RuntimeDyldCOFFAArch64 : public RuntimeDyldCOFF {
+
+private:
+ // When a module is loaded we save the SectionID of the unwind
+ // sections in a table until we receive a request to register all
+ // unregisteredEH frame sections with the memory manager.
+ SmallVector<SID, 2> UnregisteredEHFrameSections;
+ SmallVector<SID, 2> RegisteredEHFrameSections;
+ uint64_t ImageBase;
+
+ // Fake an __ImageBase pointer by returning the section with the lowest adress
+ uint64_t getImageBase() {
+ if (!ImageBase) {
+ ImageBase = std::numeric_limits<uint64_t>::max();
+ for (const SectionEntry &Section : Sections)
+ // The Sections list may contain sections that weren't loaded for
+ // whatever reason: they may be debug sections, and ProcessAllSections
+ // is false, or they may be sections that contain 0 bytes. If the
+ // section isn't loaded, the load address will be 0, and it should not
+ // be included in the ImageBase calculation.
+ if (Section.getLoadAddress() != 0)
+ ImageBase = std::min(ImageBase, Section.getLoadAddress());
+ }
+ return ImageBase;
+ }
+
+public:
+ RuntimeDyldCOFFAArch64(RuntimeDyld::MemoryManager &MM,
+ JITSymbolResolver &Resolver)
+ : RuntimeDyldCOFF(MM, Resolver, 8, COFF::IMAGE_REL_ARM64_ADDR64),
+ ImageBase(0) {}
+
+ Align getStubAlignment() override { return Align(8); }
+
+ unsigned getMaxStubSize() const override { return 20; }
+
+ std::tuple<uint64_t, uint64_t, uint64_t>
+ generateRelocationStub(unsigned SectionID, StringRef TargetName,
+ uint64_t Offset, uint64_t RelType, uint64_t Addend,
+ StubMap &Stubs) {
+ uintptr_t StubOffset;
+ SectionEntry &Section = Sections[SectionID];
+
+ RelocationValueRef OriginalRelValueRef;
+ OriginalRelValueRef.SectionID = SectionID;
+ OriginalRelValueRef.Offset = Offset;
+ OriginalRelValueRef.Addend = Addend;
+ OriginalRelValueRef.SymbolName = TargetName.data();
+
+ auto Stub = Stubs.find(OriginalRelValueRef);
+ if (Stub == Stubs.end()) {
+ LLVM_DEBUG(dbgs() << " Create a new stub function for "
+ << TargetName.data() << "\n");
+
+ StubOffset = Section.getStubOffset();
+ Stubs[OriginalRelValueRef] = StubOffset;
+ createStubFunction(Section.getAddressWithOffset(StubOffset));
+ Section.advanceStubOffset(getMaxStubSize());
+ } else {
+ LLVM_DEBUG(dbgs() << " Stub function found for " << TargetName.data()
+ << "\n");
+ StubOffset = Stub->second;
+ }
+
+ // Resolve original relocation to stub function.
+ const RelocationEntry RE(SectionID, Offset, RelType, Addend);
+ resolveRelocation(RE, Section.getLoadAddressWithOffset(StubOffset));
+
+ // adjust relocation info so resolution writes to the stub function
+ // Here an internal relocation type is used for resolving long branch via
+ // stub instruction.
+ Addend = 0;
+ Offset = StubOffset;
+ RelType = INTERNAL_REL_ARM64_LONG_BRANCH26;
+
+ return std::make_tuple(Offset, RelType, Addend);
+ }
+
+ Expected<object::relocation_iterator>
+ processRelocationRef(unsigned SectionID, object::relocation_iterator RelI,
+ const object::ObjectFile &Obj,
+ ObjSectionToIDMap &ObjSectionToID,
+ StubMap &Stubs) override {
+
+ auto Symbol = RelI->getSymbol();
+ if (Symbol == Obj.symbol_end())
+ report_fatal_error("Unknown symbol in relocation");
+
+ Expected<StringRef> TargetNameOrErr = Symbol->getName();
+ if (!TargetNameOrErr)
+ return TargetNameOrErr.takeError();
+ StringRef TargetName = *TargetNameOrErr;
+
+ auto SectionOrErr = Symbol->getSection();
+ if (!SectionOrErr)
+ return SectionOrErr.takeError();
+ auto Section = *SectionOrErr;
+
+ uint64_t RelType = RelI->getType();
+ uint64_t Offset = RelI->getOffset();
+
+ // If there is no section, this must be an external reference.
+ bool IsExtern = Section == Obj.section_end();
+
+ // Determine the Addend used to adjust the relocation value.
+ uint64_t Addend = 0;
+ SectionEntry &AddendSection = Sections[SectionID];
+ uintptr_t ObjTarget = AddendSection.getObjAddress() + Offset;
+ uint8_t *Displacement = (uint8_t *)ObjTarget;
+
+ unsigned TargetSectionID = -1;
+ uint64_t TargetOffset = -1;
+
+ if (TargetName.starts_with(getImportSymbolPrefix())) {
+ TargetSectionID = SectionID;
+ TargetOffset = getDLLImportOffset(SectionID, Stubs, TargetName);
+ TargetName = StringRef();
+ IsExtern = false;
+ } else if (!IsExtern) {
+ if (auto TargetSectionIDOrErr = findOrEmitSection(
+ Obj, *Section, Section->isText(), ObjSectionToID))
+ TargetSectionID = *TargetSectionIDOrErr;
+ else
+ return TargetSectionIDOrErr.takeError();
+
+ TargetOffset = getSymbolOffset(*Symbol);
+ }
+
+ switch (RelType) {
+ case COFF::IMAGE_REL_ARM64_ADDR32:
+ case COFF::IMAGE_REL_ARM64_ADDR32NB:
+ case COFF::IMAGE_REL_ARM64_REL32:
+ case COFF::IMAGE_REL_ARM64_SECREL:
+ Addend = read32le(Displacement);
+ break;
+ case COFF::IMAGE_REL_ARM64_BRANCH26: {
+ uint32_t orig = read32le(Displacement);
+ Addend = (orig & 0x03FFFFFF) << 2;
+
+ if (IsExtern)
+ std::tie(Offset, RelType, Addend) = generateRelocationStub(
+ SectionID, TargetName, Offset, RelType, Addend, Stubs);
+ break;
+ }
+ case COFF::IMAGE_REL_ARM64_BRANCH19: {
+ uint32_t orig = read32le(Displacement);
+ Addend = (orig & 0x00FFFFE0) >> 3;
+ break;
+ }
+ case COFF::IMAGE_REL_ARM64_BRANCH14: {
+ uint32_t orig = read32le(Displacement);
+ Addend = (orig & 0x000FFFE0) >> 3;
+ break;
+ }
+ case COFF::IMAGE_REL_ARM64_REL21:
+ case COFF::IMAGE_REL_ARM64_PAGEBASE_REL21: {
+ uint32_t orig = read32le(Displacement);
+ Addend = ((orig >> 29) & 0x3) | ((orig >> 3) & 0x1FFFFC);
+ break;
+ }
+ case COFF::IMAGE_REL_ARM64_PAGEOFFSET_12L:
+ case COFF::IMAGE_REL_ARM64_PAGEOFFSET_12A: {
+ uint32_t orig = read32le(Displacement);
+ Addend = ((orig >> 10) & 0xFFF);
+ break;
+ }
+ case COFF::IMAGE_REL_ARM64_ADDR64: {
+ Addend = read64le(Displacement);
+ break;
+ }
+ default:
+ break;
+ }
+
+#if !defined(NDEBUG)
+ SmallString<32> RelTypeName;
+ RelI->getTypeName(RelTypeName);
+
+ LLVM_DEBUG(dbgs() << "\t\tIn Section " << SectionID << " Offset " << Offset
+ << " RelType: " << RelTypeName << " TargetName: "
+ << TargetName << " Addend " << Addend << "\n");
+#endif
+
+ if (IsExtern) {
+ RelocationEntry RE(SectionID, Offset, RelType, Addend);
+ addRelocationForSymbol(RE, TargetName);
+ } else {
+ RelocationEntry RE(SectionID, Offset, RelType, TargetOffset + Addend);
+ addRelocationForSection(RE, TargetSectionID);
+ }
+ return ++RelI;
+ }
+
+ void resolveRelocation(const RelocationEntry &RE, uint64_t Value) override {
+ const auto Section = Sections[RE.SectionID];
+ uint8_t *Target = Section.getAddressWithOffset(RE.Offset);
+ uint64_t FinalAddress = Section.getLoadAddressWithOffset(RE.Offset);
+
+ switch (RE.RelType) {
+ default:
+ llvm_unreachable("unsupported relocation type");
+ case COFF::IMAGE_REL_ARM64_ABSOLUTE: {
+ // This relocation is ignored.
+ break;
+ }
+ case COFF::IMAGE_REL_ARM64_PAGEBASE_REL21: {
+ // The page base of the target, for ADRP instruction.
+ Value += RE.Addend;
+ write32AArch64Addr(Target, Value, FinalAddress, 12);
+ break;
+ }
+ case COFF::IMAGE_REL_ARM64_REL21: {
+ // The 12-bit relative displacement to the target, for instruction ADR
+ Value += RE.Addend;
+ write32AArch64Addr(Target, Value, FinalAddress, 0);
+ break;
+ }
+ case COFF::IMAGE_REL_ARM64_PAGEOFFSET_12A: {
+ // The 12-bit page offset of the target,
+ // for instructions ADD/ADDS (immediate) with zero shift.
+ Value += RE.Addend;
+ write32AArch64Imm(Target, Value & 0xFFF, 0);
+ break;
+ }
+ case COFF::IMAGE_REL_ARM64_PAGEOFFSET_12L: {
+ // The 12-bit page offset of the target,
+ // for instruction LDR (indexed, unsigned immediate).
+ Value += RE.Addend;
+ write32AArch64Ldr(Target, Value & 0xFFF);
+ break;
+ }
+ case COFF::IMAGE_REL_ARM64_ADDR32: {
+ // The 32-bit VA of the target.
+ uint32_t VA = Value + RE.Addend;
+ write32le(Target, VA);
+ break;
+ }
+ case COFF::IMAGE_REL_ARM64_ADDR32NB: {
+ // The target's 32-bit RVA.
+ uint64_t RVA = Value + RE.Addend - getImageBase();
+ write32le(Target, RVA);
+ break;
+ }
+ case INTERNAL_REL_ARM64_LONG_BRANCH26: {
+ // Encode the immadiate value for generated Stub instruction (MOVZ)
+ or32le(Target + 12, ((Value + RE.Addend) & 0xFFFF) << 5);
+ or32le(Target + 8, ((Value + RE.Addend) & 0xFFFF0000) >> 11);
+ or32le(Target + 4, ((Value + RE.Addend) & 0xFFFF00000000) >> 27);
+ or32le(Target + 0, ((Value + RE.Addend) & 0xFFFF000000000000) >> 43);
+ break;
+ }
+ case COFF::IMAGE_REL_ARM64_BRANCH26: {
+ // The 26-bit relative displacement to the target, for B and BL
+ // instructions.
+ uint64_t PCRelVal = Value + RE.Addend - FinalAddress;
+ assert(isInt<28>(PCRelVal) && "Branch target is out of range.");
+ write32le(Target, (read32le(Target) & ~(0x03FFFFFF)) |
+ (PCRelVal & 0x0FFFFFFC) >> 2);
+ break;
+ }
+ case COFF::IMAGE_REL_ARM64_BRANCH19: {
+ // The 19-bit offset to the relocation target,
+ // for conditional B instruction.
+ uint64_t PCRelVal = Value + RE.Addend - FinalAddress;
+ assert(isInt<21>(PCRelVal) && "Branch target is out of range.");
+ write32le(Target, (read32le(Target) & ~(0x00FFFFE0)) |
+ (PCRelVal & 0x001FFFFC) << 3);
+ break;
+ }
+ case COFF::IMAGE_REL_ARM64_BRANCH14: {
+ // The 14-bit offset to the relocation target,
+ // for instructions TBZ and TBNZ.
+ uint64_t PCRelVal = Value + RE.Addend - FinalAddress;
+ assert(isInt<16>(PCRelVal) && "Branch target is out of range.");
+ write32le(Target, (read32le(Target) & ~(0x000FFFE0)) |
+ (PCRelVal & 0x0000FFFC) << 3);
+ break;
+ }
+ case COFF::IMAGE_REL_ARM64_ADDR64: {
+ // The 64-bit VA of the relocation target.
+ write64le(Target, Value + RE.Addend);
+ break;
+ }
+ case COFF::IMAGE_REL_ARM64_SECTION: {
+ // 16-bit section index of the section that contains the target.
+ assert(static_cast<uint32_t>(RE.SectionID) <= UINT16_MAX &&
+ "relocation overflow");
+ add16(Target, RE.SectionID);
+ break;
+ }
+ case COFF::IMAGE_REL_ARM64_SECREL: {
+ // 32-bit offset of the target from the beginning of its section.
+ assert(static_cast<int64_t>(RE.Addend) <= INT32_MAX &&
+ "Relocation overflow");
+ assert(static_cast<int64_t>(RE.Addend) >= INT32_MIN &&
+ "Relocation underflow");
+ write32le(Target, RE.Addend);
+ break;
+ }
+ case COFF::IMAGE_REL_ARM64_REL32: {
+ // The 32-bit relative address from the byte following the relocation.
+ uint64_t Result = Value - FinalAddress - 4;
+ write32le(Target, Result + RE.Addend);
+ break;
+ }
+ }
+ }
+
+ void registerEHFrames() override {}
+};
+
+} // End namespace llvm
+
+#endif
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldCOFFI386.h b/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldCOFFI386.h
new file mode 100644
index 000000000000..0d5afc289b8c
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldCOFFI386.h
@@ -0,0 +1,228 @@
+//===--- RuntimeDyldCOFFI386.h --- COFF/X86_64 specific code ---*- C++ --*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// COFF x86 support for MC-JIT runtime dynamic linker.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_EXECUTIONENGINE_RUNTIMEDYLD_TARGETS_RUNTIMEDYLDCOFFI386_H
+#define LLVM_LIB_EXECUTIONENGINE_RUNTIMEDYLD_TARGETS_RUNTIMEDYLDCOFFI386_H
+
+#include "../RuntimeDyldCOFF.h"
+#include "llvm/BinaryFormat/COFF.h"
+#include "llvm/Object/COFF.h"
+
+#define DEBUG_TYPE "dyld"
+
+namespace llvm {
+
+class RuntimeDyldCOFFI386 : public RuntimeDyldCOFF {
+public:
+ RuntimeDyldCOFFI386(RuntimeDyld::MemoryManager &MM,
+ JITSymbolResolver &Resolver)
+ : RuntimeDyldCOFF(MM, Resolver, 4, COFF::IMAGE_REL_I386_DIR32) {}
+
+ unsigned getMaxStubSize() const override {
+ return 8; // 2-byte jmp instruction + 32-bit relative address + 2 byte pad
+ }
+
+ Align getStubAlignment() override { return Align(1); }
+
+ Expected<object::relocation_iterator>
+ processRelocationRef(unsigned SectionID,
+ object::relocation_iterator RelI,
+ const object::ObjectFile &Obj,
+ ObjSectionToIDMap &ObjSectionToID,
+ StubMap &Stubs) override {
+
+ auto Symbol = RelI->getSymbol();
+ if (Symbol == Obj.symbol_end())
+ report_fatal_error("Unknown symbol in relocation");
+
+ Expected<StringRef> TargetNameOrErr = Symbol->getName();
+ if (!TargetNameOrErr)
+ return TargetNameOrErr.takeError();
+ StringRef TargetName = *TargetNameOrErr;
+
+ auto SectionOrErr = Symbol->getSection();
+ if (!SectionOrErr)
+ return SectionOrErr.takeError();
+ auto Section = *SectionOrErr;
+ bool IsExtern = Section == Obj.section_end();
+
+ uint64_t RelType = RelI->getType();
+ uint64_t Offset = RelI->getOffset();
+
+ unsigned TargetSectionID = -1;
+ uint64_t TargetOffset = -1;
+ if (TargetName.starts_with(getImportSymbolPrefix())) {
+ TargetSectionID = SectionID;
+ TargetOffset = getDLLImportOffset(SectionID, Stubs, TargetName, true);
+ TargetName = StringRef();
+ IsExtern = false;
+ } else if (!IsExtern) {
+ if (auto TargetSectionIDOrErr = findOrEmitSection(
+ Obj, *Section, Section->isText(), ObjSectionToID))
+ TargetSectionID = *TargetSectionIDOrErr;
+ else
+ return TargetSectionIDOrErr.takeError();
+ if (RelType != COFF::IMAGE_REL_I386_SECTION)
+ TargetOffset = getSymbolOffset(*Symbol);
+ }
+
+ // Determine the Addend used to adjust the relocation value.
+ uint64_t Addend = 0;
+ SectionEntry &AddendSection = Sections[SectionID];
+ uintptr_t ObjTarget = AddendSection.getObjAddress() + Offset;
+ uint8_t *Displacement = (uint8_t *)ObjTarget;
+
+ switch (RelType) {
+ case COFF::IMAGE_REL_I386_DIR32:
+ case COFF::IMAGE_REL_I386_DIR32NB:
+ case COFF::IMAGE_REL_I386_SECREL:
+ case COFF::IMAGE_REL_I386_REL32: {
+ Addend = readBytesUnaligned(Displacement, 4);
+ break;
+ }
+ default:
+ break;
+ }
+
+#if !defined(NDEBUG)
+ SmallString<32> RelTypeName;
+ RelI->getTypeName(RelTypeName);
+#endif
+ LLVM_DEBUG(dbgs() << "\t\tIn Section " << SectionID << " Offset " << Offset
+ << " RelType: " << RelTypeName << " TargetName: "
+ << TargetName << " Addend " << Addend << "\n");
+
+ if (IsExtern) {
+ RelocationEntry RE(SectionID, Offset, RelType, 0, -1, 0, 0, 0, false, 0);
+ addRelocationForSymbol(RE, TargetName);
+ } else {
+
+ switch (RelType) {
+ case COFF::IMAGE_REL_I386_ABSOLUTE:
+ // This relocation is ignored.
+ break;
+ case COFF::IMAGE_REL_I386_DIR32:
+ case COFF::IMAGE_REL_I386_DIR32NB:
+ case COFF::IMAGE_REL_I386_REL32: {
+ RelocationEntry RE =
+ RelocationEntry(SectionID, Offset, RelType, Addend, TargetSectionID,
+ TargetOffset, 0, 0, false, 0);
+ addRelocationForSection(RE, TargetSectionID);
+ break;
+ }
+ case COFF::IMAGE_REL_I386_SECTION: {
+ RelocationEntry RE =
+ RelocationEntry(TargetSectionID, Offset, RelType, 0);
+ addRelocationForSection(RE, TargetSectionID);
+ break;
+ }
+ case COFF::IMAGE_REL_I386_SECREL: {
+ RelocationEntry RE =
+ RelocationEntry(SectionID, Offset, RelType, TargetOffset + Addend);
+ addRelocationForSection(RE, TargetSectionID);
+ break;
+ }
+ default:
+ llvm_unreachable("unsupported relocation type");
+ }
+ }
+
+ return ++RelI;
+ }
+
+ void resolveRelocation(const RelocationEntry &RE, uint64_t Value) override {
+ const auto Section = Sections[RE.SectionID];
+ uint8_t *Target = Section.getAddressWithOffset(RE.Offset);
+
+ switch (RE.RelType) {
+ case COFF::IMAGE_REL_I386_ABSOLUTE:
+ // This relocation is ignored.
+ break;
+ case COFF::IMAGE_REL_I386_DIR32: {
+ // The target's 32-bit VA.
+ uint64_t Result =
+ RE.Sections.SectionA == static_cast<uint32_t>(-1)
+ ? Value
+ : Sections[RE.Sections.SectionA].getLoadAddressWithOffset(
+ RE.Addend);
+ assert(Result <= UINT32_MAX && "relocation overflow");
+ LLVM_DEBUG(dbgs() << "\t\tOffset: " << RE.Offset
+ << " RelType: IMAGE_REL_I386_DIR32"
+ << " TargetSection: " << RE.Sections.SectionA
+ << " Value: " << format("0x%08" PRIx32, Result)
+ << '\n');
+ writeBytesUnaligned(Result, Target, 4);
+ break;
+ }
+ case COFF::IMAGE_REL_I386_DIR32NB: {
+ // The target's 32-bit RVA.
+ // NOTE: use Section[0].getLoadAddress() as an approximation of ImageBase
+ uint64_t Result =
+ Sections[RE.Sections.SectionA].getLoadAddressWithOffset(RE.Addend) -
+ Sections[0].getLoadAddress();
+ assert(Result <= UINT32_MAX && "relocation overflow");
+ LLVM_DEBUG(dbgs() << "\t\tOffset: " << RE.Offset
+ << " RelType: IMAGE_REL_I386_DIR32NB"
+ << " TargetSection: " << RE.Sections.SectionA
+ << " Value: " << format("0x%08" PRIx32, Result)
+ << '\n');
+ writeBytesUnaligned(Result, Target, 4);
+ break;
+ }
+ case COFF::IMAGE_REL_I386_REL32: {
+ // 32-bit relative displacement to the target.
+ uint64_t Result = RE.Sections.SectionA == static_cast<uint32_t>(-1)
+ ? Value
+ : Sections[RE.Sections.SectionA].getLoadAddress();
+ Result = Result - Section.getLoadAddress() + RE.Addend - 4 - RE.Offset;
+ assert(static_cast<int64_t>(Result) <= INT32_MAX &&
+ "relocation overflow");
+ assert(static_cast<int64_t>(Result) >= INT32_MIN &&
+ "relocation underflow");
+ LLVM_DEBUG(dbgs() << "\t\tOffset: " << RE.Offset
+ << " RelType: IMAGE_REL_I386_REL32"
+ << " TargetSection: " << RE.Sections.SectionA
+ << " Value: " << format("0x%08" PRIx32, Result)
+ << '\n');
+ writeBytesUnaligned(Result, Target, 4);
+ break;
+ }
+ case COFF::IMAGE_REL_I386_SECTION:
+ // 16-bit section index of the section that contains the target.
+ assert(static_cast<uint32_t>(RE.SectionID) <= UINT16_MAX &&
+ "relocation overflow");
+ LLVM_DEBUG(dbgs() << "\t\tOffset: " << RE.Offset
+ << " RelType: IMAGE_REL_I386_SECTION Value: "
+ << RE.SectionID << '\n');
+ writeBytesUnaligned(RE.SectionID, Target, 2);
+ break;
+ case COFF::IMAGE_REL_I386_SECREL:
+ // 32-bit offset of the target from the beginning of its section.
+ assert(static_cast<uint64_t>(RE.Addend) <= UINT32_MAX &&
+ "relocation overflow");
+ LLVM_DEBUG(dbgs() << "\t\tOffset: " << RE.Offset
+ << " RelType: IMAGE_REL_I386_SECREL Value: "
+ << RE.Addend << '\n');
+ writeBytesUnaligned(RE.Addend, Target, 4);
+ break;
+ default:
+ llvm_unreachable("unsupported relocation type");
+ }
+ }
+
+ void registerEHFrames() override {}
+};
+
+}
+
+#endif
+
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldCOFFThumb.h b/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldCOFFThumb.h
new file mode 100644
index 000000000000..c079d8896c1d
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldCOFFThumb.h
@@ -0,0 +1,348 @@
+//===--- RuntimeDyldCOFFThumb.h --- COFF/Thumb specific code ---*- C++ --*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// COFF thumb support for MC-JIT runtime dynamic linker.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_EXECUTIONENGINE_RUNTIMEDYLD_TARGETS_RUNTIMEDYLDCOFFTHUMB_H
+#define LLVM_LIB_EXECUTIONENGINE_RUNTIMEDYLD_TARGETS_RUNTIMEDYLDCOFFTHUMB_H
+
+#include "../RuntimeDyldCOFF.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/BinaryFormat/COFF.h"
+#include "llvm/Object/COFF.h"
+
+#define DEBUG_TYPE "dyld"
+
+namespace llvm {
+
+static bool isThumbFunc(object::symbol_iterator Symbol,
+ const object::ObjectFile &Obj,
+ object::section_iterator Section) {
+ Expected<object::SymbolRef::Type> SymTypeOrErr = Symbol->getType();
+ if (!SymTypeOrErr) {
+ std::string Buf;
+ raw_string_ostream OS(Buf);
+ logAllUnhandledErrors(SymTypeOrErr.takeError(), OS);
+ report_fatal_error(Twine(OS.str()));
+ }
+
+ if (*SymTypeOrErr != object::SymbolRef::ST_Function)
+ return false;
+
+ // We check the IMAGE_SCN_MEM_16BIT flag in the section of the symbol to tell
+ // if it's thumb or not
+ return cast<object::COFFObjectFile>(Obj)
+ .getCOFFSection(*Section)
+ ->Characteristics &
+ COFF::IMAGE_SCN_MEM_16BIT;
+}
+
+class RuntimeDyldCOFFThumb : public RuntimeDyldCOFF {
+public:
+ RuntimeDyldCOFFThumb(RuntimeDyld::MemoryManager &MM,
+ JITSymbolResolver &Resolver)
+ : RuntimeDyldCOFF(MM, Resolver, 4, COFF::IMAGE_REL_ARM_ADDR32) {}
+
+ unsigned getMaxStubSize() const override {
+ return 16; // 8-byte load instructions, 4-byte jump, 4-byte padding
+ }
+
+ Expected<JITSymbolFlags> getJITSymbolFlags(const SymbolRef &SR) override {
+
+ auto Flags = RuntimeDyldImpl::getJITSymbolFlags(SR);
+
+ if (!Flags) {
+ return Flags.takeError();
+ }
+ auto SectionIterOrErr = SR.getSection();
+ if (!SectionIterOrErr) {
+ return SectionIterOrErr.takeError();
+ }
+ SectionRef Sec = *SectionIterOrErr.get();
+ const object::COFFObjectFile *COFFObjPtr =
+ cast<object::COFFObjectFile>(Sec.getObject());
+ const coff_section *CoffSec = COFFObjPtr->getCOFFSection(Sec);
+ bool isThumb = CoffSec->Characteristics & COFF::IMAGE_SCN_MEM_16BIT;
+
+ Flags->getTargetFlags() = isThumb;
+
+ return Flags;
+ }
+
+ Align getStubAlignment() override { return Align(1); }
+
+ Expected<object::relocation_iterator>
+ processRelocationRef(unsigned SectionID,
+ object::relocation_iterator RelI,
+ const object::ObjectFile &Obj,
+ ObjSectionToIDMap &ObjSectionToID,
+ StubMap &Stubs) override {
+ auto Symbol = RelI->getSymbol();
+ if (Symbol == Obj.symbol_end())
+ report_fatal_error("Unknown symbol in relocation");
+
+ Expected<StringRef> TargetNameOrErr = Symbol->getName();
+ if (!TargetNameOrErr)
+ return TargetNameOrErr.takeError();
+ StringRef TargetName = *TargetNameOrErr;
+
+ auto SectionOrErr = Symbol->getSection();
+ if (!SectionOrErr)
+ return SectionOrErr.takeError();
+ auto Section = *SectionOrErr;
+
+ uint64_t RelType = RelI->getType();
+ uint64_t Offset = RelI->getOffset();
+
+ // Determine the Addend used to adjust the relocation value.
+ uint64_t Addend = 0;
+ SectionEntry &AddendSection = Sections[SectionID];
+ uintptr_t ObjTarget = AddendSection.getObjAddress() + Offset;
+ uint8_t *Displacement = (uint8_t *)ObjTarget;
+
+ switch (RelType) {
+ case COFF::IMAGE_REL_ARM_ADDR32:
+ case COFF::IMAGE_REL_ARM_ADDR32NB:
+ case COFF::IMAGE_REL_ARM_SECREL:
+ Addend = readBytesUnaligned(Displacement, 4);
+ break;
+ default:
+ break;
+ }
+
+#if !defined(NDEBUG)
+ SmallString<32> RelTypeName;
+ RelI->getTypeName(RelTypeName);
+#endif
+ LLVM_DEBUG(dbgs() << "\t\tIn Section " << SectionID << " Offset " << Offset
+ << " RelType: " << RelTypeName << " TargetName: "
+ << TargetName << " Addend " << Addend << "\n");
+
+ bool IsExtern = Section == Obj.section_end();
+ unsigned TargetSectionID = -1;
+ uint64_t TargetOffset = -1;
+
+ if (TargetName.starts_with(getImportSymbolPrefix())) {
+ TargetSectionID = SectionID;
+ TargetOffset = getDLLImportOffset(SectionID, Stubs, TargetName, true);
+ TargetName = StringRef();
+ IsExtern = false;
+ } else if (!IsExtern) {
+ if (auto TargetSectionIDOrErr =
+ findOrEmitSection(Obj, *Section, Section->isText(), ObjSectionToID))
+ TargetSectionID = *TargetSectionIDOrErr;
+ else
+ return TargetSectionIDOrErr.takeError();
+ if (RelType != COFF::IMAGE_REL_ARM_SECTION)
+ TargetOffset = getSymbolOffset(*Symbol);
+ }
+
+ if (IsExtern) {
+ RelocationEntry RE(SectionID, Offset, RelType, 0, -1, 0, 0, 0, false, 0);
+ addRelocationForSymbol(RE, TargetName);
+ } else {
+
+ // We need to find out if the relocation is relative to a thumb function
+ // so that we include the ISA selection bit when resolve the relocation
+ bool IsTargetThumbFunc = isThumbFunc(Symbol, Obj, Section);
+
+ switch (RelType) {
+ default: llvm_unreachable("unsupported relocation type");
+ case COFF::IMAGE_REL_ARM_ABSOLUTE:
+ // This relocation is ignored.
+ break;
+ case COFF::IMAGE_REL_ARM_ADDR32: {
+ RelocationEntry RE =
+ RelocationEntry(SectionID, Offset, RelType, Addend, TargetSectionID,
+ TargetOffset, 0, 0, false, 0, IsTargetThumbFunc);
+ addRelocationForSection(RE, TargetSectionID);
+ break;
+ }
+ case COFF::IMAGE_REL_ARM_ADDR32NB: {
+ RelocationEntry RE =
+ RelocationEntry(SectionID, Offset, RelType, Addend, TargetSectionID,
+ TargetOffset, 0, 0, false, 0);
+ addRelocationForSection(RE, TargetSectionID);
+ break;
+ }
+ case COFF::IMAGE_REL_ARM_SECTION: {
+ RelocationEntry RE =
+ RelocationEntry(TargetSectionID, Offset, RelType, 0);
+ addRelocationForSection(RE, TargetSectionID);
+ break;
+ }
+ case COFF::IMAGE_REL_ARM_SECREL: {
+ RelocationEntry RE =
+ RelocationEntry(SectionID, Offset, RelType, TargetOffset + Addend);
+ addRelocationForSection(RE, TargetSectionID);
+ break;
+ }
+ case COFF::IMAGE_REL_ARM_MOV32T: {
+ RelocationEntry RE =
+ RelocationEntry(SectionID, Offset, RelType, Addend, TargetSectionID,
+ TargetOffset, 0, 0, false, 0, IsTargetThumbFunc);
+ addRelocationForSection(RE, TargetSectionID);
+ break;
+ }
+ case COFF::IMAGE_REL_ARM_BRANCH20T:
+ case COFF::IMAGE_REL_ARM_BRANCH24T:
+ case COFF::IMAGE_REL_ARM_BLX23T: {
+ RelocationEntry RE = RelocationEntry(SectionID, Offset, RelType,
+ TargetOffset + Addend, true, 0);
+ addRelocationForSection(RE, TargetSectionID);
+ break;
+ }
+ }
+ }
+
+ return ++RelI;
+ }
+
+ void resolveRelocation(const RelocationEntry &RE, uint64_t Value) override {
+ const auto Section = Sections[RE.SectionID];
+ uint8_t *Target = Section.getAddressWithOffset(RE.Offset);
+ int ISASelectionBit = RE.IsTargetThumbFunc ? 1 : 0;
+
+ switch (RE.RelType) {
+ default: llvm_unreachable("unsupported relocation type");
+ case COFF::IMAGE_REL_ARM_ABSOLUTE:
+ // This relocation is ignored.
+ break;
+ case COFF::IMAGE_REL_ARM_ADDR32: {
+ // The target's 32-bit VA.
+ uint64_t Result =
+ RE.Sections.SectionA == static_cast<uint32_t>(-1)
+ ? Value
+ : Sections[RE.Sections.SectionA].getLoadAddressWithOffset(RE.Addend);
+ Result |= ISASelectionBit;
+ assert(Result <= UINT32_MAX && "relocation overflow");
+ LLVM_DEBUG(dbgs() << "\t\tOffset: " << RE.Offset
+ << " RelType: IMAGE_REL_ARM_ADDR32"
+ << " TargetSection: " << RE.Sections.SectionA
+ << " Value: " << format("0x%08" PRIx32, Result)
+ << '\n');
+ writeBytesUnaligned(Result, Target, 4);
+ break;
+ }
+ case COFF::IMAGE_REL_ARM_ADDR32NB: {
+ // The target's 32-bit RVA.
+ // NOTE: use Section[0].getLoadAddress() as an approximation of ImageBase
+ uint64_t Result = Sections[RE.Sections.SectionA].getLoadAddress() -
+ Sections[0].getLoadAddress() + RE.Addend;
+ assert(Result <= UINT32_MAX && "relocation overflow");
+ LLVM_DEBUG(dbgs() << "\t\tOffset: " << RE.Offset
+ << " RelType: IMAGE_REL_ARM_ADDR32NB"
+ << " TargetSection: " << RE.Sections.SectionA
+ << " Value: " << format("0x%08" PRIx32, Result)
+ << '\n');
+ Result |= ISASelectionBit;
+ writeBytesUnaligned(Result, Target, 4);
+ break;
+ }
+ case COFF::IMAGE_REL_ARM_SECTION:
+ // 16-bit section index of the section that contains the target.
+ assert(static_cast<uint32_t>(RE.SectionID) <= UINT16_MAX &&
+ "relocation overflow");
+ LLVM_DEBUG(dbgs() << "\t\tOffset: " << RE.Offset
+ << " RelType: IMAGE_REL_ARM_SECTION Value: "
+ << RE.SectionID << '\n');
+ writeBytesUnaligned(RE.SectionID, Target, 2);
+ break;
+ case COFF::IMAGE_REL_ARM_SECREL:
+ // 32-bit offset of the target from the beginning of its section.
+ assert(static_cast<uint64_t>(RE.Addend) <= UINT32_MAX &&
+ "relocation overflow");
+ LLVM_DEBUG(dbgs() << "\t\tOffset: " << RE.Offset
+ << " RelType: IMAGE_REL_ARM_SECREL Value: " << RE.Addend
+ << '\n');
+ writeBytesUnaligned(RE.Addend, Target, 2);
+ break;
+ case COFF::IMAGE_REL_ARM_MOV32T: {
+ // 32-bit VA of the target applied to a contiguous MOVW+MOVT pair.
+ uint64_t Result =
+ Sections[RE.Sections.SectionA].getLoadAddressWithOffset(RE.Addend);
+ assert(Result <= UINT32_MAX && "relocation overflow");
+ LLVM_DEBUG(dbgs() << "\t\tOffset: " << RE.Offset
+ << " RelType: IMAGE_REL_ARM_MOV32T"
+ << " TargetSection: " << RE.Sections.SectionA
+ << " Value: " << format("0x%08" PRIx32, Result)
+ << '\n');
+
+ // MOVW(T3): |11110|i|10|0|1|0|0|imm4|0|imm3|Rd|imm8|
+ // imm32 = zext imm4:i:imm3:imm8
+ // MOVT(T1): |11110|i|10|1|1|0|0|imm4|0|imm3|Rd|imm8|
+ // imm16 = imm4:i:imm3:imm8
+
+ auto EncodeImmediate = [](uint8_t *Bytes, uint16_t Immediate) {
+ Bytes[0] |= ((Immediate & 0xf000) >> 12);
+ Bytes[1] |= ((Immediate & 0x0800) >> 11);
+ Bytes[2] |= ((Immediate & 0x00ff) >> 0);
+ Bytes[3] |= (((Immediate & 0x0700) >> 8) << 4);
+ };
+
+ EncodeImmediate(&Target[0],
+ (static_cast<uint32_t>(Result) >> 00) | ISASelectionBit);
+ EncodeImmediate(&Target[4], static_cast<uint32_t>(Result) >> 16);
+ break;
+ }
+ case COFF::IMAGE_REL_ARM_BRANCH20T: {
+ // The most significant 20-bits of the signed 21-bit relative displacement
+ uint64_t Value =
+ RE.Addend - (Sections[RE.SectionID].getLoadAddress() + RE.Offset) - 4;
+ assert(static_cast<int64_t>(RE.Addend) <= INT32_MAX &&
+ "relocation overflow");
+ assert(static_cast<int64_t>(RE.Addend) >= INT32_MIN &&
+ "relocation underflow");
+ LLVM_DEBUG(dbgs() << "\t\tOffset: " << RE.Offset
+ << " RelType: IMAGE_REL_ARM_BRANCH20T"
+ << " Value: " << static_cast<int32_t>(Value) << '\n');
+ static_cast<void>(Value);
+ llvm_unreachable("unimplemented relocation");
+ break;
+ }
+ case COFF::IMAGE_REL_ARM_BRANCH24T: {
+ // The most significant 24-bits of the signed 25-bit relative displacement
+ uint64_t Value =
+ RE.Addend - (Sections[RE.SectionID].getLoadAddress() + RE.Offset) - 4;
+ assert(static_cast<int64_t>(RE.Addend) <= INT32_MAX &&
+ "relocation overflow");
+ assert(static_cast<int64_t>(RE.Addend) >= INT32_MIN &&
+ "relocation underflow");
+ LLVM_DEBUG(dbgs() << "\t\tOffset: " << RE.Offset
+ << " RelType: IMAGE_REL_ARM_BRANCH24T"
+ << " Value: " << static_cast<int32_t>(Value) << '\n');
+ static_cast<void>(Value);
+ llvm_unreachable("unimplemented relocation");
+ break;
+ }
+ case COFF::IMAGE_REL_ARM_BLX23T: {
+ // The most significant 24-bits of the signed 25-bit relative displacement
+ uint64_t Value =
+ RE.Addend - (Sections[RE.SectionID].getLoadAddress() + RE.Offset) - 4;
+ assert(static_cast<int64_t>(RE.Addend) <= INT32_MAX &&
+ "relocation overflow");
+ assert(static_cast<int64_t>(RE.Addend) >= INT32_MIN &&
+ "relocation underflow");
+ LLVM_DEBUG(dbgs() << "\t\tOffset: " << RE.Offset
+ << " RelType: IMAGE_REL_ARM_BLX23T"
+ << " Value: " << static_cast<int32_t>(Value) << '\n');
+ static_cast<void>(Value);
+ llvm_unreachable("unimplemented relocation");
+ break;
+ }
+ }
+ }
+
+ void registerEHFrames() override {}
+};
+
+}
+
+#endif
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldCOFFX86_64.h b/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldCOFFX86_64.h
new file mode 100644
index 000000000000..984a8d765c84
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldCOFFX86_64.h
@@ -0,0 +1,322 @@
+//===-- RuntimeDyldCOFFX86_64.h --- COFF/X86_64 specific code ---*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// COFF x86_x64 support for MC-JIT runtime dynamic linker.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_EXECUTIONENGINE_RUNTIMEDYLD_TARGETS_RUNTIMEDYLDCOFF86_64_H
+#define LLVM_LIB_EXECUTIONENGINE_RUNTIMEDYLD_TARGETS_RUNTIMEDYLDCOFF86_64_H
+
+#include "../RuntimeDyldCOFF.h"
+#include "llvm/BinaryFormat/COFF.h"
+#include "llvm/Object/COFF.h"
+
+#define DEBUG_TYPE "dyld"
+
+namespace llvm {
+
+class RuntimeDyldCOFFX86_64 : public RuntimeDyldCOFF {
+
+private:
+ // When a module is loaded we save the SectionID of the unwind
+ // sections in a table until we receive a request to register all
+ // unregisteredEH frame sections with the memory manager.
+ SmallVector<SID, 2> UnregisteredEHFrameSections;
+ SmallVector<SID, 2> RegisteredEHFrameSections;
+ uint64_t ImageBase;
+
+ // Fake an __ImageBase pointer by returning the section with the lowest adress
+ uint64_t getImageBase() {
+ if (!ImageBase) {
+ ImageBase = std::numeric_limits<uint64_t>::max();
+ for (const SectionEntry &Section : Sections)
+ // The Sections list may contain sections that weren't loaded for
+ // whatever reason: they may be debug sections, and ProcessAllSections
+ // is false, or they may be sections that contain 0 bytes. If the
+ // section isn't loaded, the load address will be 0, and it should not
+ // be included in the ImageBase calculation.
+ if (Section.getLoadAddress() != 0)
+ ImageBase = std::min(ImageBase, Section.getLoadAddress());
+ }
+ return ImageBase;
+ }
+
+ void write32BitOffset(uint8_t *Target, int64_t Addend, uint64_t Delta) {
+ uint64_t Result = Addend + Delta;
+ assert(Result <= UINT32_MAX && "Relocation overflow");
+ writeBytesUnaligned(Result, Target, 4);
+ }
+
+public:
+ RuntimeDyldCOFFX86_64(RuntimeDyld::MemoryManager &MM,
+ JITSymbolResolver &Resolver)
+ : RuntimeDyldCOFF(MM, Resolver, 8, COFF::IMAGE_REL_AMD64_ADDR64),
+ ImageBase(0) {}
+
+ Align getStubAlignment() override { return Align(1); }
+
+ // 2-byte jmp instruction + 32-bit relative address + 64-bit absolute jump
+ unsigned getMaxStubSize() const override { return 14; }
+
+ // The target location for the relocation is described by RE.SectionID and
+ // RE.Offset. RE.SectionID can be used to find the SectionEntry. Each
+ // SectionEntry has three members describing its location.
+ // SectionEntry::Address is the address at which the section has been loaded
+ // into memory in the current (host) process. SectionEntry::LoadAddress is
+ // the address that the section will have in the target process.
+ // SectionEntry::ObjAddress is the address of the bits for this section in the
+ // original emitted object image (also in the current address space).
+ //
+ // Relocations will be applied as if the section were loaded at
+ // SectionEntry::LoadAddress, but they will be applied at an address based
+ // on SectionEntry::Address. SectionEntry::ObjAddress will be used to refer
+ // to Target memory contents if they are required for value calculations.
+ //
+ // The Value parameter here is the load address of the symbol for the
+ // relocation to be applied. For relocations which refer to symbols in the
+ // current object Value will be the LoadAddress of the section in which
+ // the symbol resides (RE.Addend provides additional information about the
+ // symbol location). For external symbols, Value will be the address of the
+ // symbol in the target address space.
+ void resolveRelocation(const RelocationEntry &RE, uint64_t Value) override {
+ const SectionEntry &Section = Sections[RE.SectionID];
+ uint8_t *Target = Section.getAddressWithOffset(RE.Offset);
+
+ switch (RE.RelType) {
+
+ case COFF::IMAGE_REL_AMD64_REL32:
+ case COFF::IMAGE_REL_AMD64_REL32_1:
+ case COFF::IMAGE_REL_AMD64_REL32_2:
+ case COFF::IMAGE_REL_AMD64_REL32_3:
+ case COFF::IMAGE_REL_AMD64_REL32_4:
+ case COFF::IMAGE_REL_AMD64_REL32_5: {
+ uint64_t FinalAddress = Section.getLoadAddressWithOffset(RE.Offset);
+ // Delta is the distance from the start of the reloc to the end of the
+ // instruction with the reloc.
+ uint64_t Delta = 4 + (RE.RelType - COFF::IMAGE_REL_AMD64_REL32);
+ Value -= FinalAddress + Delta;
+ uint64_t Result = Value + RE.Addend;
+ assert(((int64_t)Result <= INT32_MAX) && "Relocation overflow");
+ assert(((int64_t)Result >= INT32_MIN) && "Relocation underflow");
+ writeBytesUnaligned(Result, Target, 4);
+ break;
+ }
+
+ case COFF::IMAGE_REL_AMD64_ADDR32NB: {
+ // ADDR32NB requires an offset less than 2GB from 'ImageBase'.
+ // The MemoryManager can make sure this is always true by forcing the
+ // memory layout to be: CodeSection < ReadOnlySection < ReadWriteSection.
+ const uint64_t ImageBase = getImageBase();
+ if (Value < ImageBase || ((Value - ImageBase) > UINT32_MAX))
+ report_fatal_error("IMAGE_REL_AMD64_ADDR32NB relocation requires an "
+ "ordered section layout");
+ else {
+ write32BitOffset(Target, RE.Addend, Value - ImageBase);
+ }
+ break;
+ }
+
+ case COFF::IMAGE_REL_AMD64_ADDR64: {
+ writeBytesUnaligned(Value + RE.Addend, Target, 8);
+ break;
+ }
+
+ case COFF::IMAGE_REL_AMD64_SECREL: {
+ assert(static_cast<int64_t>(RE.Addend) <= INT32_MAX && "Relocation overflow");
+ assert(static_cast<int64_t>(RE.Addend) >= INT32_MIN && "Relocation underflow");
+ writeBytesUnaligned(RE.Addend, Target, 4);
+ break;
+ }
+
+ case COFF::IMAGE_REL_AMD64_SECTION: {
+ assert(static_cast<int16_t>(RE.SectionID) <= INT16_MAX && "Relocation overflow");
+ assert(static_cast<int16_t>(RE.SectionID) >= INT16_MIN && "Relocation underflow");
+ writeBytesUnaligned(RE.SectionID, Target, 2);
+ break;
+ }
+
+ default:
+ llvm_unreachable("Relocation type not implemented yet!");
+ break;
+ }
+ }
+
+ std::tuple<uint64_t, uint64_t, uint64_t>
+ generateRelocationStub(unsigned SectionID, StringRef TargetName,
+ uint64_t Offset, uint64_t RelType, uint64_t Addend,
+ StubMap &Stubs) {
+ uintptr_t StubOffset;
+ SectionEntry &Section = Sections[SectionID];
+
+ RelocationValueRef OriginalRelValueRef;
+ OriginalRelValueRef.SectionID = SectionID;
+ OriginalRelValueRef.Offset = Offset;
+ OriginalRelValueRef.Addend = Addend;
+ OriginalRelValueRef.SymbolName = TargetName.data();
+
+ auto Stub = Stubs.find(OriginalRelValueRef);
+ if (Stub == Stubs.end()) {
+ LLVM_DEBUG(dbgs() << " Create a new stub function for "
+ << TargetName.data() << "\n");
+
+ StubOffset = Section.getStubOffset();
+ Stubs[OriginalRelValueRef] = StubOffset;
+ createStubFunction(Section.getAddressWithOffset(StubOffset));
+ Section.advanceStubOffset(getMaxStubSize());
+ } else {
+ LLVM_DEBUG(dbgs() << " Stub function found for " << TargetName.data()
+ << "\n");
+ StubOffset = Stub->second;
+ }
+
+ // FIXME: If RelType == COFF::IMAGE_REL_AMD64_ADDR32NB we should be able
+ // to ignore the __ImageBase requirement and just forward to the stub
+ // directly as an offset of this section:
+ // write32BitOffset(Section.getAddressWithOffset(Offset), 0, StubOffset);
+ // .xdata exception handler's aren't having this though.
+
+ // Resolve original relocation to stub function.
+ const RelocationEntry RE(SectionID, Offset, RelType, Addend);
+ resolveRelocation(RE, Section.getLoadAddressWithOffset(StubOffset));
+
+ // adjust relocation info so resolution writes to the stub function
+ Addend = 0;
+ Offset = StubOffset + 6;
+ RelType = COFF::IMAGE_REL_AMD64_ADDR64;
+
+ return std::make_tuple(Offset, RelType, Addend);
+ }
+
+ Expected<object::relocation_iterator>
+ processRelocationRef(unsigned SectionID,
+ object::relocation_iterator RelI,
+ const object::ObjectFile &Obj,
+ ObjSectionToIDMap &ObjSectionToID,
+ StubMap &Stubs) override {
+ // If possible, find the symbol referred to in the relocation,
+ // and the section that contains it.
+ object::symbol_iterator Symbol = RelI->getSymbol();
+ if (Symbol == Obj.symbol_end())
+ report_fatal_error("Unknown symbol in relocation");
+ auto SectionOrError = Symbol->getSection();
+ if (!SectionOrError)
+ return SectionOrError.takeError();
+ object::section_iterator SecI = *SectionOrError;
+ // If there is no section, this must be an external reference.
+ bool IsExtern = SecI == Obj.section_end();
+
+ // Determine the Addend used to adjust the relocation value.
+ uint64_t RelType = RelI->getType();
+ uint64_t Offset = RelI->getOffset();
+ uint64_t Addend = 0;
+ SectionEntry &Section = Sections[SectionID];
+ uintptr_t ObjTarget = Section.getObjAddress() + Offset;
+
+ Expected<StringRef> TargetNameOrErr = Symbol->getName();
+ if (!TargetNameOrErr)
+ return TargetNameOrErr.takeError();
+
+ StringRef TargetName = *TargetNameOrErr;
+ unsigned TargetSectionID = 0;
+ uint64_t TargetOffset = 0;
+
+ if (TargetName.starts_with(getImportSymbolPrefix())) {
+ assert(IsExtern && "DLLImport not marked extern?");
+ TargetSectionID = SectionID;
+ TargetOffset = getDLLImportOffset(SectionID, Stubs, TargetName);
+ TargetName = StringRef();
+ IsExtern = false;
+ } else if (!IsExtern) {
+ if (auto TargetSectionIDOrErr =
+ findOrEmitSection(Obj, *SecI, SecI->isText(), ObjSectionToID))
+ TargetSectionID = *TargetSectionIDOrErr;
+ else
+ return TargetSectionIDOrErr.takeError();
+ TargetOffset = getSymbolOffset(*Symbol);
+ }
+
+ switch (RelType) {
+
+ case COFF::IMAGE_REL_AMD64_REL32:
+ case COFF::IMAGE_REL_AMD64_REL32_1:
+ case COFF::IMAGE_REL_AMD64_REL32_2:
+ case COFF::IMAGE_REL_AMD64_REL32_3:
+ case COFF::IMAGE_REL_AMD64_REL32_4:
+ case COFF::IMAGE_REL_AMD64_REL32_5:
+ case COFF::IMAGE_REL_AMD64_ADDR32NB: {
+ uint8_t *Displacement = (uint8_t *)ObjTarget;
+ Addend = readBytesUnaligned(Displacement, 4);
+
+ if (IsExtern)
+ std::tie(Offset, RelType, Addend) = generateRelocationStub(
+ SectionID, TargetName, Offset, RelType, Addend, Stubs);
+
+ break;
+ }
+
+ case COFF::IMAGE_REL_AMD64_ADDR64: {
+ uint8_t *Displacement = (uint8_t *)ObjTarget;
+ Addend = readBytesUnaligned(Displacement, 8);
+ break;
+ }
+
+ default:
+ break;
+ }
+
+ LLVM_DEBUG(dbgs() << "\t\tIn Section " << SectionID << " Offset " << Offset
+ << " RelType: " << RelType << " TargetName: "
+ << TargetName << " Addend " << Addend << "\n");
+
+ if (IsExtern) {
+ RelocationEntry RE(SectionID, Offset, RelType, Addend);
+ addRelocationForSymbol(RE, TargetName);
+ } else {
+ RelocationEntry RE(SectionID, Offset, RelType, TargetOffset + Addend);
+ addRelocationForSection(RE, TargetSectionID);
+ }
+
+ return ++RelI;
+ }
+
+ void registerEHFrames() override {
+ for (auto const &EHFrameSID : UnregisteredEHFrameSections) {
+ uint8_t *EHFrameAddr = Sections[EHFrameSID].getAddress();
+ uint64_t EHFrameLoadAddr = Sections[EHFrameSID].getLoadAddress();
+ size_t EHFrameSize = Sections[EHFrameSID].getSize();
+ MemMgr.registerEHFrames(EHFrameAddr, EHFrameLoadAddr, EHFrameSize);
+ RegisteredEHFrameSections.push_back(EHFrameSID);
+ }
+ UnregisteredEHFrameSections.clear();
+ }
+
+ Error finalizeLoad(const object::ObjectFile &Obj,
+ ObjSectionToIDMap &SectionMap) override {
+ // Look for and record the EH frame section IDs.
+ for (const auto &SectionPair : SectionMap) {
+ const object::SectionRef &Section = SectionPair.first;
+ Expected<StringRef> NameOrErr = Section.getName();
+ if (!NameOrErr)
+ return NameOrErr.takeError();
+
+ // Note unwind info is stored in .pdata but often points to .xdata
+ // with an IMAGE_REL_AMD64_ADDR32NB relocation. Using a memory manager
+ // that keeps sections ordered in relation to __ImageBase is necessary.
+ if ((*NameOrErr) == ".pdata")
+ UnregisteredEHFrameSections.push_back(SectionPair.second);
+ }
+ return Error::success();
+ }
+};
+
+} // end namespace llvm
+
+#undef DEBUG_TYPE
+
+#endif
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldELFMips.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldELFMips.cpp
new file mode 100644
index 000000000000..17cbe612fb43
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldELFMips.cpp
@@ -0,0 +1,320 @@
+//===-- RuntimeDyldELFMips.cpp ---- ELF/Mips specific code. -----*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "RuntimeDyldELFMips.h"
+#include "llvm/BinaryFormat/ELF.h"
+
+#define DEBUG_TYPE "dyld"
+
+void RuntimeDyldELFMips::resolveRelocation(const RelocationEntry &RE,
+ uint64_t Value) {
+ const SectionEntry &Section = Sections[RE.SectionID];
+ if (IsMipsO32ABI)
+ resolveMIPSO32Relocation(Section, RE.Offset, Value, RE.RelType, RE.Addend);
+ else if (IsMipsN32ABI) {
+ resolveMIPSN32Relocation(Section, RE.Offset, Value, RE.RelType, RE.Addend,
+ RE.SymOffset, RE.SectionID);
+ } else if (IsMipsN64ABI)
+ resolveMIPSN64Relocation(Section, RE.Offset, Value, RE.RelType, RE.Addend,
+ RE.SymOffset, RE.SectionID);
+ else
+ llvm_unreachable("Mips ABI not handled");
+}
+
+uint64_t RuntimeDyldELFMips::evaluateRelocation(const RelocationEntry &RE,
+ uint64_t Value,
+ uint64_t Addend) {
+ if (IsMipsN32ABI) {
+ const SectionEntry &Section = Sections[RE.SectionID];
+ Value = evaluateMIPS64Relocation(Section, RE.Offset, Value, RE.RelType,
+ Addend, RE.SymOffset, RE.SectionID);
+ return Value;
+ }
+ llvm_unreachable("Not reachable");
+}
+
+void RuntimeDyldELFMips::applyRelocation(const RelocationEntry &RE,
+ uint64_t Value) {
+ if (IsMipsN32ABI) {
+ const SectionEntry &Section = Sections[RE.SectionID];
+ applyMIPSRelocation(Section.getAddressWithOffset(RE.Offset), Value,
+ RE.RelType);
+ return;
+ }
+ llvm_unreachable("Not reachable");
+}
+
+int64_t
+RuntimeDyldELFMips::evaluateMIPS32Relocation(const SectionEntry &Section,
+ uint64_t Offset, uint64_t Value,
+ uint32_t Type) {
+
+ LLVM_DEBUG(dbgs() << "evaluateMIPS32Relocation, LocalAddress: 0x"
+ << format("%llx", Section.getAddressWithOffset(Offset))
+ << " FinalAddress: 0x"
+ << format("%llx", Section.getLoadAddressWithOffset(Offset))
+ << " Value: 0x" << format("%llx", Value) << " Type: 0x"
+ << format("%x", Type) << "\n");
+
+ switch (Type) {
+ default:
+ llvm_unreachable("Unknown relocation type!");
+ return Value;
+ case ELF::R_MIPS_32:
+ return Value;
+ case ELF::R_MIPS_26:
+ return Value >> 2;
+ case ELF::R_MIPS_HI16:
+ // Get the higher 16-bits. Also add 1 if bit 15 is 1.
+ return (Value + 0x8000) >> 16;
+ case ELF::R_MIPS_LO16:
+ return Value;
+ case ELF::R_MIPS_PC32: {
+ uint32_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
+ return Value - FinalAddress;
+ }
+ case ELF::R_MIPS_PC16: {
+ uint32_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
+ return (Value - FinalAddress) >> 2;
+ }
+ case ELF::R_MIPS_PC19_S2: {
+ uint32_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
+ return (Value - (FinalAddress & ~0x3)) >> 2;
+ }
+ case ELF::R_MIPS_PC21_S2: {
+ uint32_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
+ return (Value - FinalAddress) >> 2;
+ }
+ case ELF::R_MIPS_PC26_S2: {
+ uint32_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
+ return (Value - FinalAddress) >> 2;
+ }
+ case ELF::R_MIPS_PCHI16: {
+ uint32_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
+ return (Value - FinalAddress + 0x8000) >> 16;
+ }
+ case ELF::R_MIPS_PCLO16: {
+ uint32_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
+ return Value - FinalAddress;
+ }
+ }
+}
+
+int64_t RuntimeDyldELFMips::evaluateMIPS64Relocation(
+ const SectionEntry &Section, uint64_t Offset, uint64_t Value, uint32_t Type,
+ int64_t Addend, uint64_t SymOffset, SID SectionID) {
+
+ LLVM_DEBUG(dbgs() << "evaluateMIPS64Relocation, LocalAddress: 0x"
+ << format("%llx", Section.getAddressWithOffset(Offset))
+ << " FinalAddress: 0x"
+ << format("%llx", Section.getLoadAddressWithOffset(Offset))
+ << " Value: 0x" << format("%llx", Value) << " Type: 0x"
+ << format("%x", Type) << " Addend: 0x"
+ << format("%llx", Addend)
+ << " Offset: " << format("%llx" PRIx64, Offset)
+ << " SID: " << format("%d", SectionID)
+ << " SymOffset: " << format("%x", SymOffset) << "\n");
+
+ switch (Type) {
+ default:
+ llvm_unreachable("Not implemented relocation type!");
+ break;
+ case ELF::R_MIPS_JALR:
+ case ELF::R_MIPS_NONE:
+ break;
+ case ELF::R_MIPS_32:
+ case ELF::R_MIPS_64:
+ return Value + Addend;
+ case ELF::R_MIPS_26:
+ return ((Value + Addend) >> 2) & 0x3ffffff;
+ case ELF::R_MIPS_GPREL16: {
+ uint64_t GOTAddr = getSectionLoadAddress(SectionToGOTMap[SectionID]);
+ return Value + Addend - (GOTAddr + 0x7ff0);
+ }
+ case ELF::R_MIPS_SUB:
+ return Value - Addend;
+ case ELF::R_MIPS_HI16:
+ // Get the higher 16-bits. Also add 1 if bit 15 is 1.
+ return ((Value + Addend + 0x8000) >> 16) & 0xffff;
+ case ELF::R_MIPS_LO16:
+ return (Value + Addend) & 0xffff;
+ case ELF::R_MIPS_HIGHER:
+ return ((Value + Addend + 0x80008000) >> 32) & 0xffff;
+ case ELF::R_MIPS_HIGHEST:
+ return ((Value + Addend + 0x800080008000) >> 48) & 0xffff;
+ case ELF::R_MIPS_CALL16:
+ case ELF::R_MIPS_GOT_DISP:
+ case ELF::R_MIPS_GOT_PAGE: {
+ uint8_t *LocalGOTAddr =
+ getSectionAddress(SectionToGOTMap[SectionID]) + SymOffset;
+ uint64_t GOTEntry = readBytesUnaligned(LocalGOTAddr, getGOTEntrySize());
+
+ Value += Addend;
+ if (Type == ELF::R_MIPS_GOT_PAGE)
+ Value = (Value + 0x8000) & ~0xffff;
+
+ if (GOTEntry)
+ assert(GOTEntry == Value &&
+ "GOT entry has two different addresses.");
+ else
+ writeBytesUnaligned(Value, LocalGOTAddr, getGOTEntrySize());
+
+ return (SymOffset - 0x7ff0) & 0xffff;
+ }
+ case ELF::R_MIPS_GOT_OFST: {
+ int64_t page = (Value + Addend + 0x8000) & ~0xffff;
+ return (Value + Addend - page) & 0xffff;
+ }
+ case ELF::R_MIPS_GPREL32: {
+ uint64_t GOTAddr = getSectionLoadAddress(SectionToGOTMap[SectionID]);
+ return Value + Addend - (GOTAddr + 0x7ff0);
+ }
+ case ELF::R_MIPS_PC16: {
+ uint64_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
+ return ((Value + Addend - FinalAddress) >> 2) & 0xffff;
+ }
+ case ELF::R_MIPS_PC32: {
+ uint64_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
+ return Value + Addend - FinalAddress;
+ }
+ case ELF::R_MIPS_PC18_S3: {
+ uint64_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
+ return ((Value + Addend - (FinalAddress & ~0x7)) >> 3) & 0x3ffff;
+ }
+ case ELF::R_MIPS_PC19_S2: {
+ uint64_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
+ return ((Value + Addend - (FinalAddress & ~0x3)) >> 2) & 0x7ffff;
+ }
+ case ELF::R_MIPS_PC21_S2: {
+ uint64_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
+ return ((Value + Addend - FinalAddress) >> 2) & 0x1fffff;
+ }
+ case ELF::R_MIPS_PC26_S2: {
+ uint64_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
+ return ((Value + Addend - FinalAddress) >> 2) & 0x3ffffff;
+ }
+ case ELF::R_MIPS_PCHI16: {
+ uint64_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
+ return ((Value + Addend - FinalAddress + 0x8000) >> 16) & 0xffff;
+ }
+ case ELF::R_MIPS_PCLO16: {
+ uint64_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
+ return (Value + Addend - FinalAddress) & 0xffff;
+ }
+ }
+ return 0;
+}
+
+void RuntimeDyldELFMips::applyMIPSRelocation(uint8_t *TargetPtr, int64_t Value,
+ uint32_t Type) {
+ uint32_t Insn = readBytesUnaligned(TargetPtr, 4);
+
+ switch (Type) {
+ default:
+ llvm_unreachable("Unknown relocation type!");
+ break;
+ case ELF::R_MIPS_GPREL16:
+ case ELF::R_MIPS_HI16:
+ case ELF::R_MIPS_LO16:
+ case ELF::R_MIPS_HIGHER:
+ case ELF::R_MIPS_HIGHEST:
+ case ELF::R_MIPS_PC16:
+ case ELF::R_MIPS_PCHI16:
+ case ELF::R_MIPS_PCLO16:
+ case ELF::R_MIPS_CALL16:
+ case ELF::R_MIPS_GOT_DISP:
+ case ELF::R_MIPS_GOT_PAGE:
+ case ELF::R_MIPS_GOT_OFST:
+ Insn = (Insn & 0xffff0000) | (Value & 0x0000ffff);
+ writeBytesUnaligned(Insn, TargetPtr, 4);
+ break;
+ case ELF::R_MIPS_PC18_S3:
+ Insn = (Insn & 0xfffc0000) | (Value & 0x0003ffff);
+ writeBytesUnaligned(Insn, TargetPtr, 4);
+ break;
+ case ELF::R_MIPS_PC19_S2:
+ Insn = (Insn & 0xfff80000) | (Value & 0x0007ffff);
+ writeBytesUnaligned(Insn, TargetPtr, 4);
+ break;
+ case ELF::R_MIPS_PC21_S2:
+ Insn = (Insn & 0xffe00000) | (Value & 0x001fffff);
+ writeBytesUnaligned(Insn, TargetPtr, 4);
+ break;
+ case ELF::R_MIPS_26:
+ case ELF::R_MIPS_PC26_S2:
+ Insn = (Insn & 0xfc000000) | (Value & 0x03ffffff);
+ writeBytesUnaligned(Insn, TargetPtr, 4);
+ break;
+ case ELF::R_MIPS_32:
+ case ELF::R_MIPS_GPREL32:
+ case ELF::R_MIPS_PC32:
+ writeBytesUnaligned(Value & 0xffffffff, TargetPtr, 4);
+ break;
+ case ELF::R_MIPS_64:
+ case ELF::R_MIPS_SUB:
+ writeBytesUnaligned(Value, TargetPtr, 8);
+ break;
+ }
+}
+
+void RuntimeDyldELFMips::resolveMIPSN32Relocation(
+ const SectionEntry &Section, uint64_t Offset, uint64_t Value, uint32_t Type,
+ int64_t Addend, uint64_t SymOffset, SID SectionID) {
+ int64_t CalculatedValue = evaluateMIPS64Relocation(
+ Section, Offset, Value, Type, Addend, SymOffset, SectionID);
+ applyMIPSRelocation(Section.getAddressWithOffset(Offset), CalculatedValue,
+ Type);
+}
+
+void RuntimeDyldELFMips::resolveMIPSN64Relocation(
+ const SectionEntry &Section, uint64_t Offset, uint64_t Value, uint32_t Type,
+ int64_t Addend, uint64_t SymOffset, SID SectionID) {
+ uint32_t r_type = Type & 0xff;
+ uint32_t r_type2 = (Type >> 8) & 0xff;
+ uint32_t r_type3 = (Type >> 16) & 0xff;
+
+ // RelType is used to keep information for which relocation type we are
+ // applying relocation.
+ uint32_t RelType = r_type;
+ int64_t CalculatedValue = evaluateMIPS64Relocation(Section, Offset, Value,
+ RelType, Addend,
+ SymOffset, SectionID);
+ if (r_type2 != ELF::R_MIPS_NONE) {
+ RelType = r_type2;
+ CalculatedValue = evaluateMIPS64Relocation(Section, Offset, 0, RelType,
+ CalculatedValue, SymOffset,
+ SectionID);
+ }
+ if (r_type3 != ELF::R_MIPS_NONE) {
+ RelType = r_type3;
+ CalculatedValue = evaluateMIPS64Relocation(Section, Offset, 0, RelType,
+ CalculatedValue, SymOffset,
+ SectionID);
+ }
+ applyMIPSRelocation(Section.getAddressWithOffset(Offset), CalculatedValue,
+ RelType);
+}
+
+void RuntimeDyldELFMips::resolveMIPSO32Relocation(const SectionEntry &Section,
+ uint64_t Offset,
+ uint32_t Value, uint32_t Type,
+ int32_t Addend) {
+ uint8_t *TargetPtr = Section.getAddressWithOffset(Offset);
+ Value += Addend;
+
+ LLVM_DEBUG(dbgs() << "resolveMIPSO32Relocation, LocalAddress: "
+ << Section.getAddressWithOffset(Offset) << " FinalAddress: "
+ << format("%p", Section.getLoadAddressWithOffset(Offset))
+ << " Value: " << format("%x", Value) << " Type: "
+ << format("%x", Type) << " Addend: " << format("%x", Addend)
+ << " SymOffset: " << format("%x", Offset) << "\n");
+
+ Value = evaluateMIPS32Relocation(Section, Offset, Value, Type);
+
+ applyMIPSRelocation(TargetPtr, Value, Type);
+}
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldELFMips.h b/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldELFMips.h
new file mode 100644
index 000000000000..f03acb41d670
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldELFMips.h
@@ -0,0 +1,66 @@
+//===-- RuntimeDyldELFMips.h ---- ELF/Mips specific code. -------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_EXECUTIONENGINE_RUNTIMEDYLD_TARGETS_RUNTIMEDYLDELFMIPS_H
+#define LLVM_LIB_EXECUTIONENGINE_RUNTIMEDYLD_TARGETS_RUNTIMEDYLDELFMIPS_H
+
+#include "../RuntimeDyldELF.h"
+
+#define DEBUG_TYPE "dyld"
+
+namespace llvm {
+
+class RuntimeDyldELFMips : public RuntimeDyldELF {
+public:
+
+ typedef uint64_t TargetPtrT;
+
+ RuntimeDyldELFMips(RuntimeDyld::MemoryManager &MM,
+ JITSymbolResolver &Resolver)
+ : RuntimeDyldELF(MM, Resolver) {}
+
+ void resolveRelocation(const RelocationEntry &RE, uint64_t Value) override;
+
+protected:
+ void resolveMIPSO32Relocation(const SectionEntry &Section, uint64_t Offset,
+ uint32_t Value, uint32_t Type, int32_t Addend);
+ void resolveMIPSN32Relocation(const SectionEntry &Section, uint64_t Offset,
+ uint64_t Value, uint32_t Type, int64_t Addend,
+ uint64_t SymOffset, SID SectionID);
+ void resolveMIPSN64Relocation(const SectionEntry &Section, uint64_t Offset,
+ uint64_t Value, uint32_t Type, int64_t Addend,
+ uint64_t SymOffset, SID SectionID);
+
+private:
+ /// A object file specific relocation resolver
+ /// \param RE The relocation to be resolved
+ /// \param Value Target symbol address to apply the relocation action
+ uint64_t evaluateRelocation(const RelocationEntry &RE, uint64_t Value,
+ uint64_t Addend);
+
+ /// A object file specific relocation resolver
+ /// \param RE The relocation to be resolved
+ /// \param Value Target symbol address to apply the relocation action
+ void applyRelocation(const RelocationEntry &RE, uint64_t Value);
+
+ int64_t evaluateMIPS32Relocation(const SectionEntry &Section, uint64_t Offset,
+ uint64_t Value, uint32_t Type);
+ int64_t evaluateMIPS64Relocation(const SectionEntry &Section,
+ uint64_t Offset, uint64_t Value,
+ uint32_t Type, int64_t Addend,
+ uint64_t SymOffset, SID SectionID);
+
+ void applyMIPSRelocation(uint8_t *TargetPtr, int64_t CalculatedValue,
+ uint32_t Type);
+
+};
+}
+
+#undef DEBUG_TYPE
+
+#endif
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOAArch64.h b/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOAArch64.h
new file mode 100644
index 000000000000..701cc3a88149
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOAArch64.h
@@ -0,0 +1,541 @@
+//===-- RuntimeDyldMachOAArch64.h -- MachO/AArch64 specific code. -*- C++ -*-=//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_EXECUTIONENGINE_RUNTIMEDYLD_TARGETS_RUNTIMEDYLDMACHOAARCH64_H
+#define LLVM_LIB_EXECUTIONENGINE_RUNTIMEDYLD_TARGETS_RUNTIMEDYLDMACHOAARCH64_H
+
+#include "../RuntimeDyldMachO.h"
+#include "llvm/Support/Endian.h"
+
+#define DEBUG_TYPE "dyld"
+
+namespace llvm {
+
+class RuntimeDyldMachOAArch64
+ : public RuntimeDyldMachOCRTPBase<RuntimeDyldMachOAArch64> {
+public:
+
+ typedef uint64_t TargetPtrT;
+
+ RuntimeDyldMachOAArch64(RuntimeDyld::MemoryManager &MM,
+ JITSymbolResolver &Resolver)
+ : RuntimeDyldMachOCRTPBase(MM, Resolver) {}
+
+ unsigned getMaxStubSize() const override { return 8; }
+
+ Align getStubAlignment() override { return Align(8); }
+
+ /// Extract the addend encoded in the instruction / memory location.
+ Expected<int64_t> decodeAddend(const RelocationEntry &RE) const {
+ const SectionEntry &Section = Sections[RE.SectionID];
+ uint8_t *LocalAddress = Section.getAddressWithOffset(RE.Offset);
+ unsigned NumBytes = 1 << RE.Size;
+ int64_t Addend = 0;
+ // Verify that the relocation has the correct size and alignment.
+ switch (RE.RelType) {
+ default: {
+ std::string ErrMsg;
+ {
+ raw_string_ostream ErrStream(ErrMsg);
+ ErrStream << "Unsupported relocation type: "
+ << getRelocName(RE.RelType);
+ }
+ return make_error<StringError>(std::move(ErrMsg),
+ inconvertibleErrorCode());
+ }
+ case MachO::ARM64_RELOC_POINTER_TO_GOT:
+ case MachO::ARM64_RELOC_UNSIGNED: {
+ if (NumBytes != 4 && NumBytes != 8) {
+ std::string ErrMsg;
+ {
+ raw_string_ostream ErrStream(ErrMsg);
+ ErrStream << "Invalid relocation size for relocation "
+ << getRelocName(RE.RelType);
+ }
+ return make_error<StringError>(std::move(ErrMsg),
+ inconvertibleErrorCode());
+ }
+ break;
+ }
+ case MachO::ARM64_RELOC_BRANCH26:
+ case MachO::ARM64_RELOC_PAGE21:
+ case MachO::ARM64_RELOC_PAGEOFF12:
+ case MachO::ARM64_RELOC_GOT_LOAD_PAGE21:
+ case MachO::ARM64_RELOC_GOT_LOAD_PAGEOFF12:
+ assert(NumBytes == 4 && "Invalid relocation size.");
+ assert((((uintptr_t)LocalAddress & 0x3) == 0) &&
+ "Instruction address is not aligned to 4 bytes.");
+ break;
+ }
+
+ switch (RE.RelType) {
+ default:
+ llvm_unreachable("Unsupported relocation type!");
+ case MachO::ARM64_RELOC_POINTER_TO_GOT:
+ case MachO::ARM64_RELOC_UNSIGNED:
+ // This could be an unaligned memory location.
+ if (NumBytes == 4)
+ Addend = *reinterpret_cast<support::ulittle32_t *>(LocalAddress);
+ else
+ Addend = *reinterpret_cast<support::ulittle64_t *>(LocalAddress);
+ break;
+ case MachO::ARM64_RELOC_BRANCH26: {
+ // Verify that the relocation points to a B/BL instruction.
+ auto *p = reinterpret_cast<support::aligned_ulittle32_t *>(LocalAddress);
+ assert(((*p & 0xFC000000) == 0x14000000 ||
+ (*p & 0xFC000000) == 0x94000000) &&
+ "Expected branch instruction.");
+
+ // Get the 26 bit addend encoded in the branch instruction and sign-extend
+ // to 64 bit. The lower 2 bits are always zeros and are therefore implicit
+ // (<< 2).
+ Addend = (*p & 0x03FFFFFF) << 2;
+ Addend = SignExtend64(Addend, 28);
+ break;
+ }
+ case MachO::ARM64_RELOC_GOT_LOAD_PAGE21:
+ case MachO::ARM64_RELOC_PAGE21: {
+ // Verify that the relocation points to the expected adrp instruction.
+ auto *p = reinterpret_cast<support::aligned_ulittle32_t *>(LocalAddress);
+ assert((*p & 0x9F000000) == 0x90000000 && "Expected adrp instruction.");
+
+ // Get the 21 bit addend encoded in the adrp instruction and sign-extend
+ // to 64 bit. The lower 12 bits (4096 byte page) are always zeros and are
+ // therefore implicit (<< 12).
+ Addend = ((*p & 0x60000000) >> 29) | ((*p & 0x01FFFFE0) >> 3) << 12;
+ Addend = SignExtend64(Addend, 33);
+ break;
+ }
+ case MachO::ARM64_RELOC_GOT_LOAD_PAGEOFF12: {
+ // Verify that the relocation points to one of the expected load / store
+ // instructions.
+ auto *p = reinterpret_cast<support::aligned_ulittle32_t *>(LocalAddress);
+ (void)p;
+ assert((*p & 0x3B000000) == 0x39000000 &&
+ "Only expected load / store instructions.");
+ [[fallthrough]];
+ }
+ case MachO::ARM64_RELOC_PAGEOFF12: {
+ // Verify that the relocation points to one of the expected load / store
+ // or add / sub instructions.
+ auto *p = reinterpret_cast<support::aligned_ulittle32_t *>(LocalAddress);
+ assert((((*p & 0x3B000000) == 0x39000000) ||
+ ((*p & 0x11C00000) == 0x11000000) ) &&
+ "Expected load / store or add/sub instruction.");
+
+ // Get the 12 bit addend encoded in the instruction.
+ Addend = (*p & 0x003FFC00) >> 10;
+
+ // Check which instruction we are decoding to obtain the implicit shift
+ // factor of the instruction.
+ int ImplicitShift = 0;
+ if ((*p & 0x3B000000) == 0x39000000) { // << load / store
+ // For load / store instructions the size is encoded in bits 31:30.
+ ImplicitShift = ((*p >> 30) & 0x3);
+ if (ImplicitShift == 0) {
+ // Check if this a vector op to get the correct shift value.
+ if ((*p & 0x04800000) == 0x04800000)
+ ImplicitShift = 4;
+ }
+ }
+ // Compensate for implicit shift.
+ Addend <<= ImplicitShift;
+ break;
+ }
+ }
+ return Addend;
+ }
+
+ /// Extract the addend encoded in the instruction.
+ void encodeAddend(uint8_t *LocalAddress, unsigned NumBytes,
+ MachO::RelocationInfoType RelType, int64_t Addend) const {
+ // Verify that the relocation has the correct alignment.
+ switch (RelType) {
+ default:
+ llvm_unreachable("Unsupported relocation type!");
+ case MachO::ARM64_RELOC_POINTER_TO_GOT:
+ case MachO::ARM64_RELOC_UNSIGNED:
+ assert((NumBytes == 4 || NumBytes == 8) && "Invalid relocation size.");
+ break;
+ case MachO::ARM64_RELOC_BRANCH26:
+ case MachO::ARM64_RELOC_PAGE21:
+ case MachO::ARM64_RELOC_PAGEOFF12:
+ case MachO::ARM64_RELOC_GOT_LOAD_PAGE21:
+ case MachO::ARM64_RELOC_GOT_LOAD_PAGEOFF12:
+ assert(NumBytes == 4 && "Invalid relocation size.");
+ assert((((uintptr_t)LocalAddress & 0x3) == 0) &&
+ "Instruction address is not aligned to 4 bytes.");
+ break;
+ }
+
+ switch (RelType) {
+ default:
+ llvm_unreachable("Unsupported relocation type!");
+ case MachO::ARM64_RELOC_POINTER_TO_GOT:
+ case MachO::ARM64_RELOC_UNSIGNED:
+ // This could be an unaligned memory location.
+ if (NumBytes == 4)
+ *reinterpret_cast<support::ulittle32_t *>(LocalAddress) = Addend;
+ else
+ *reinterpret_cast<support::ulittle64_t *>(LocalAddress) = Addend;
+ break;
+ case MachO::ARM64_RELOC_BRANCH26: {
+ auto *p = reinterpret_cast<support::aligned_ulittle32_t *>(LocalAddress);
+ // Verify that the relocation points to the expected branch instruction.
+ assert(((*p & 0xFC000000) == 0x14000000 ||
+ (*p & 0xFC000000) == 0x94000000) &&
+ "Expected branch instruction.");
+
+ // Verify addend value.
+ assert((Addend & 0x3) == 0 && "Branch target is not aligned");
+ assert(isInt<28>(Addend) && "Branch target is out of range.");
+
+ // Encode the addend as 26 bit immediate in the branch instruction.
+ *p = (*p & 0xFC000000) | ((uint32_t)(Addend >> 2) & 0x03FFFFFF);
+ break;
+ }
+ case MachO::ARM64_RELOC_GOT_LOAD_PAGE21:
+ case MachO::ARM64_RELOC_PAGE21: {
+ // Verify that the relocation points to the expected adrp instruction.
+ auto *p = reinterpret_cast<support::aligned_ulittle32_t *>(LocalAddress);
+ assert((*p & 0x9F000000) == 0x90000000 && "Expected adrp instruction.");
+
+ // Check that the addend fits into 21 bits (+ 12 lower bits).
+ assert((Addend & 0xFFF) == 0 && "ADRP target is not page aligned.");
+ assert(isInt<33>(Addend) && "Invalid page reloc value.");
+
+ // Encode the addend into the instruction.
+ uint32_t ImmLoValue = ((uint64_t)Addend << 17) & 0x60000000;
+ uint32_t ImmHiValue = ((uint64_t)Addend >> 9) & 0x00FFFFE0;
+ *p = (*p & 0x9F00001F) | ImmHiValue | ImmLoValue;
+ break;
+ }
+ case MachO::ARM64_RELOC_GOT_LOAD_PAGEOFF12: {
+ // Verify that the relocation points to one of the expected load / store
+ // instructions.
+ auto *p = reinterpret_cast<support::aligned_ulittle32_t *>(LocalAddress);
+ assert((*p & 0x3B000000) == 0x39000000 &&
+ "Only expected load / store instructions.");
+ (void)p;
+ [[fallthrough]];
+ }
+ case MachO::ARM64_RELOC_PAGEOFF12: {
+ // Verify that the relocation points to one of the expected load / store
+ // or add / sub instructions.
+ auto *p = reinterpret_cast<support::aligned_ulittle32_t *>(LocalAddress);
+ assert((((*p & 0x3B000000) == 0x39000000) ||
+ ((*p & 0x11C00000) == 0x11000000) ) &&
+ "Expected load / store or add/sub instruction.");
+
+ // Check which instruction we are decoding to obtain the implicit shift
+ // factor of the instruction and verify alignment.
+ int ImplicitShift = 0;
+ if ((*p & 0x3B000000) == 0x39000000) { // << load / store
+ // For load / store instructions the size is encoded in bits 31:30.
+ ImplicitShift = ((*p >> 30) & 0x3);
+ switch (ImplicitShift) {
+ case 0:
+ // Check if this a vector op to get the correct shift value.
+ if ((*p & 0x04800000) == 0x04800000) {
+ ImplicitShift = 4;
+ assert(((Addend & 0xF) == 0) &&
+ "128-bit LDR/STR not 16-byte aligned.");
+ }
+ break;
+ case 1:
+ assert(((Addend & 0x1) == 0) && "16-bit LDR/STR not 2-byte aligned.");
+ break;
+ case 2:
+ assert(((Addend & 0x3) == 0) && "32-bit LDR/STR not 4-byte aligned.");
+ break;
+ case 3:
+ assert(((Addend & 0x7) == 0) && "64-bit LDR/STR not 8-byte aligned.");
+ break;
+ }
+ }
+ // Compensate for implicit shift.
+ Addend >>= ImplicitShift;
+ assert(isUInt<12>(Addend) && "Addend cannot be encoded.");
+
+ // Encode the addend into the instruction.
+ *p = (*p & 0xFFC003FF) | ((uint32_t)(Addend << 10) & 0x003FFC00);
+ break;
+ }
+ }
+ }
+
+ Expected<relocation_iterator>
+ processRelocationRef(unsigned SectionID, relocation_iterator RelI,
+ const ObjectFile &BaseObjT,
+ ObjSectionToIDMap &ObjSectionToID,
+ StubMap &Stubs) override {
+ const MachOObjectFile &Obj =
+ static_cast<const MachOObjectFile &>(BaseObjT);
+ MachO::any_relocation_info RelInfo =
+ Obj.getRelocation(RelI->getRawDataRefImpl());
+
+ if (Obj.isRelocationScattered(RelInfo))
+ return make_error<RuntimeDyldError>("Scattered relocations not supported "
+ "for MachO AArch64");
+
+ // ARM64 has an ARM64_RELOC_ADDEND relocation type that carries an explicit
+ // addend for the following relocation. If found: (1) store the associated
+ // addend, (2) consume the next relocation, and (3) use the stored addend to
+ // override the addend.
+ int64_t ExplicitAddend = 0;
+ if (Obj.getAnyRelocationType(RelInfo) == MachO::ARM64_RELOC_ADDEND) {
+ assert(!Obj.getPlainRelocationExternal(RelInfo));
+ assert(!Obj.getAnyRelocationPCRel(RelInfo));
+ assert(Obj.getAnyRelocationLength(RelInfo) == 2);
+ int64_t RawAddend = Obj.getPlainRelocationSymbolNum(RelInfo);
+ // Sign-extend the 24-bit to 64-bit.
+ ExplicitAddend = SignExtend64(RawAddend, 24);
+ ++RelI;
+ RelInfo = Obj.getRelocation(RelI->getRawDataRefImpl());
+ }
+
+ if (Obj.getAnyRelocationType(RelInfo) == MachO::ARM64_RELOC_SUBTRACTOR)
+ return processSubtractRelocation(SectionID, RelI, Obj, ObjSectionToID);
+
+ RelocationEntry RE(getRelocationEntry(SectionID, Obj, RelI));
+
+ if (RE.RelType == MachO::ARM64_RELOC_POINTER_TO_GOT) {
+ bool Valid =
+ (RE.Size == 2 && RE.IsPCRel) || (RE.Size == 3 && !RE.IsPCRel);
+ if (!Valid)
+ return make_error<StringError>("ARM64_RELOC_POINTER_TO_GOT supports "
+ "32-bit pc-rel or 64-bit absolute only",
+ inconvertibleErrorCode());
+ }
+
+ if (auto Addend = decodeAddend(RE))
+ RE.Addend = *Addend;
+ else
+ return Addend.takeError();
+
+ assert((ExplicitAddend == 0 || RE.Addend == 0) && "Relocation has "\
+ "ARM64_RELOC_ADDEND and embedded addend in the instruction.");
+ if (ExplicitAddend)
+ RE.Addend = ExplicitAddend;
+
+ RelocationValueRef Value;
+ if (auto ValueOrErr = getRelocationValueRef(Obj, RelI, RE, ObjSectionToID))
+ Value = *ValueOrErr;
+ else
+ return ValueOrErr.takeError();
+
+ bool IsExtern = Obj.getPlainRelocationExternal(RelInfo);
+ if (RE.RelType == MachO::ARM64_RELOC_POINTER_TO_GOT) {
+ // We'll take care of the offset in processGOTRelocation.
+ Value.Offset = 0;
+ } else if (!IsExtern && RE.IsPCRel)
+ makeValueAddendPCRel(Value, RelI, 1 << RE.Size);
+
+ RE.Addend = Value.Offset;
+
+ if (RE.RelType == MachO::ARM64_RELOC_GOT_LOAD_PAGE21 ||
+ RE.RelType == MachO::ARM64_RELOC_GOT_LOAD_PAGEOFF12 ||
+ RE.RelType == MachO::ARM64_RELOC_POINTER_TO_GOT)
+ processGOTRelocation(RE, Value, Stubs);
+ else {
+ if (Value.SymbolName)
+ addRelocationForSymbol(RE, Value.SymbolName);
+ else
+ addRelocationForSection(RE, Value.SectionID);
+ }
+
+ return ++RelI;
+ }
+
+ void resolveRelocation(const RelocationEntry &RE, uint64_t Value) override {
+ LLVM_DEBUG(dumpRelocationToResolve(RE, Value));
+
+ const SectionEntry &Section = Sections[RE.SectionID];
+ uint8_t *LocalAddress = Section.getAddressWithOffset(RE.Offset);
+ MachO::RelocationInfoType RelType =
+ static_cast<MachO::RelocationInfoType>(RE.RelType);
+
+ switch (RelType) {
+ default:
+ llvm_unreachable("Invalid relocation type!");
+ case MachO::ARM64_RELOC_UNSIGNED: {
+ assert(!RE.IsPCRel && "PCRel and ARM64_RELOC_UNSIGNED not supported");
+ // Mask in the target value a byte at a time (we don't have an alignment
+ // guarantee for the target address, so this is safest).
+ if (RE.Size < 2)
+ llvm_unreachable("Invalid size for ARM64_RELOC_UNSIGNED");
+
+ encodeAddend(LocalAddress, 1 << RE.Size, RelType, Value + RE.Addend);
+ break;
+ }
+
+ case MachO::ARM64_RELOC_POINTER_TO_GOT: {
+ assert(((RE.Size == 2 && RE.IsPCRel) || (RE.Size == 3 && !RE.IsPCRel)) &&
+ "ARM64_RELOC_POINTER_TO_GOT only supports 32-bit pc-rel or 64-bit "
+ "absolute");
+ // Addend is the GOT entry address and RE.Offset the target of the
+ // relocation.
+ uint64_t Result =
+ RE.IsPCRel ? (RE.Addend - RE.Offset) : (Value + RE.Addend);
+ encodeAddend(LocalAddress, 1 << RE.Size, RelType, Result);
+ break;
+ }
+
+ case MachO::ARM64_RELOC_BRANCH26: {
+ assert(RE.IsPCRel && "not PCRel and ARM64_RELOC_BRANCH26 not supported");
+ // Check if branch is in range.
+ uint64_t FinalAddress = Section.getLoadAddressWithOffset(RE.Offset);
+ int64_t PCRelVal = Value - FinalAddress + RE.Addend;
+ encodeAddend(LocalAddress, /*Size=*/4, RelType, PCRelVal);
+ break;
+ }
+ case MachO::ARM64_RELOC_GOT_LOAD_PAGE21:
+ case MachO::ARM64_RELOC_PAGE21: {
+ assert(RE.IsPCRel && "not PCRel and ARM64_RELOC_PAGE21 not supported");
+ // Adjust for PC-relative relocation and offset.
+ uint64_t FinalAddress = Section.getLoadAddressWithOffset(RE.Offset);
+ int64_t PCRelVal =
+ ((Value + RE.Addend) & (-4096)) - (FinalAddress & (-4096));
+ encodeAddend(LocalAddress, /*Size=*/4, RelType, PCRelVal);
+ break;
+ }
+ case MachO::ARM64_RELOC_GOT_LOAD_PAGEOFF12:
+ case MachO::ARM64_RELOC_PAGEOFF12: {
+ assert(!RE.IsPCRel && "PCRel and ARM64_RELOC_PAGEOFF21 not supported");
+ // Add the offset from the symbol.
+ Value += RE.Addend;
+ // Mask out the page address and only use the lower 12 bits.
+ Value &= 0xFFF;
+ encodeAddend(LocalAddress, /*Size=*/4, RelType, Value);
+ break;
+ }
+ case MachO::ARM64_RELOC_SUBTRACTOR: {
+ uint64_t SectionABase = Sections[RE.Sections.SectionA].getLoadAddress();
+ uint64_t SectionBBase = Sections[RE.Sections.SectionB].getLoadAddress();
+ assert((Value == SectionABase || Value == SectionBBase) &&
+ "Unexpected SUBTRACTOR relocation value.");
+ Value = SectionABase - SectionBBase + RE.Addend;
+ writeBytesUnaligned(Value, LocalAddress, 1 << RE.Size);
+ break;
+ }
+
+ case MachO::ARM64_RELOC_TLVP_LOAD_PAGE21:
+ case MachO::ARM64_RELOC_TLVP_LOAD_PAGEOFF12:
+ llvm_unreachable("Relocation type not yet implemented!");
+ case MachO::ARM64_RELOC_ADDEND:
+ llvm_unreachable("ARM64_RELOC_ADDEND should have been handeled by "
+ "processRelocationRef!");
+ }
+ }
+
+ Error finalizeSection(const ObjectFile &Obj, unsigned SectionID,
+ const SectionRef &Section) {
+ return Error::success();
+ }
+
+private:
+ void processGOTRelocation(const RelocationEntry &RE,
+ RelocationValueRef &Value, StubMap &Stubs) {
+ assert((RE.RelType == MachO::ARM64_RELOC_POINTER_TO_GOT &&
+ (RE.Size == 2 || RE.Size == 3)) ||
+ RE.Size == 2);
+ SectionEntry &Section = Sections[RE.SectionID];
+ StubMap::const_iterator i = Stubs.find(Value);
+ int64_t Offset;
+ if (i != Stubs.end())
+ Offset = static_cast<int64_t>(i->second);
+ else {
+ // FIXME: There must be a better way to do this then to check and fix the
+ // alignment every time!!!
+ uintptr_t BaseAddress = uintptr_t(Section.getAddress());
+ uintptr_t StubAlignment = getStubAlignment().value();
+ uintptr_t StubAddress =
+ (BaseAddress + Section.getStubOffset() + StubAlignment - 1) &
+ -StubAlignment;
+ unsigned StubOffset = StubAddress - BaseAddress;
+ Stubs[Value] = StubOffset;
+ assert(isAligned(getStubAlignment(), StubAddress) &&
+ "GOT entry not aligned");
+ RelocationEntry GOTRE(RE.SectionID, StubOffset,
+ MachO::ARM64_RELOC_UNSIGNED, Value.Offset,
+ /*IsPCRel=*/false, /*Size=*/3);
+ if (Value.SymbolName)
+ addRelocationForSymbol(GOTRE, Value.SymbolName);
+ else
+ addRelocationForSection(GOTRE, Value.SectionID);
+ Section.advanceStubOffset(getMaxStubSize());
+ Offset = static_cast<int64_t>(StubOffset);
+ }
+ RelocationEntry TargetRE(RE.SectionID, RE.Offset, RE.RelType, Offset,
+ RE.IsPCRel, RE.Size);
+ addRelocationForSection(TargetRE, RE.SectionID);
+ }
+
+ Expected<relocation_iterator>
+ processSubtractRelocation(unsigned SectionID, relocation_iterator RelI,
+ const ObjectFile &BaseObjT,
+ ObjSectionToIDMap &ObjSectionToID) {
+ const MachOObjectFile &Obj =
+ static_cast<const MachOObjectFile&>(BaseObjT);
+ MachO::any_relocation_info RE =
+ Obj.getRelocation(RelI->getRawDataRefImpl());
+
+ unsigned Size = Obj.getAnyRelocationLength(RE);
+ uint64_t Offset = RelI->getOffset();
+ uint8_t *LocalAddress = Sections[SectionID].getAddressWithOffset(Offset);
+ unsigned NumBytes = 1 << Size;
+
+ Expected<StringRef> SubtrahendNameOrErr = RelI->getSymbol()->getName();
+ if (!SubtrahendNameOrErr)
+ return SubtrahendNameOrErr.takeError();
+ auto SubtrahendI = GlobalSymbolTable.find(*SubtrahendNameOrErr);
+ unsigned SectionBID = SubtrahendI->second.getSectionID();
+ uint64_t SectionBOffset = SubtrahendI->second.getOffset();
+ int64_t Addend =
+ SignExtend64(readBytesUnaligned(LocalAddress, NumBytes), NumBytes * 8);
+
+ ++RelI;
+ Expected<StringRef> MinuendNameOrErr = RelI->getSymbol()->getName();
+ if (!MinuendNameOrErr)
+ return MinuendNameOrErr.takeError();
+ auto MinuendI = GlobalSymbolTable.find(*MinuendNameOrErr);
+ unsigned SectionAID = MinuendI->second.getSectionID();
+ uint64_t SectionAOffset = MinuendI->second.getOffset();
+
+ RelocationEntry R(SectionID, Offset, MachO::ARM64_RELOC_SUBTRACTOR, (uint64_t)Addend,
+ SectionAID, SectionAOffset, SectionBID, SectionBOffset,
+ false, Size);
+
+ addRelocationForSection(R, SectionAID);
+
+ return ++RelI;
+ }
+
+ static const char *getRelocName(uint32_t RelocType) {
+ switch (RelocType) {
+ case MachO::ARM64_RELOC_UNSIGNED: return "ARM64_RELOC_UNSIGNED";
+ case MachO::ARM64_RELOC_SUBTRACTOR: return "ARM64_RELOC_SUBTRACTOR";
+ case MachO::ARM64_RELOC_BRANCH26: return "ARM64_RELOC_BRANCH26";
+ case MachO::ARM64_RELOC_PAGE21: return "ARM64_RELOC_PAGE21";
+ case MachO::ARM64_RELOC_PAGEOFF12: return "ARM64_RELOC_PAGEOFF12";
+ case MachO::ARM64_RELOC_GOT_LOAD_PAGE21: return "ARM64_RELOC_GOT_LOAD_PAGE21";
+ case MachO::ARM64_RELOC_GOT_LOAD_PAGEOFF12: return "ARM64_RELOC_GOT_LOAD_PAGEOFF12";
+ case MachO::ARM64_RELOC_POINTER_TO_GOT: return "ARM64_RELOC_POINTER_TO_GOT";
+ case MachO::ARM64_RELOC_TLVP_LOAD_PAGE21: return "ARM64_RELOC_TLVP_LOAD_PAGE21";
+ case MachO::ARM64_RELOC_TLVP_LOAD_PAGEOFF12: return "ARM64_RELOC_TLVP_LOAD_PAGEOFF12";
+ case MachO::ARM64_RELOC_ADDEND: return "ARM64_RELOC_ADDEND";
+ }
+ return "Unrecognized arm64 addend";
+ }
+
+};
+}
+
+#undef DEBUG_TYPE
+
+#endif
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOARM.h b/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOARM.h
new file mode 100644
index 000000000000..79b558eb7796
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOARM.h
@@ -0,0 +1,431 @@
+//===----- RuntimeDyldMachOARM.h ---- MachO/ARM specific code. ----*- C++ -*-=//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_EXECUTIONENGINE_RUNTIMEDYLD_TARGETS_RUNTIMEDYLDMACHOARM_H
+#define LLVM_LIB_EXECUTIONENGINE_RUNTIMEDYLD_TARGETS_RUNTIMEDYLDMACHOARM_H
+
+#include "../RuntimeDyldMachO.h"
+
+#define DEBUG_TYPE "dyld"
+
+namespace llvm {
+
+class RuntimeDyldMachOARM
+ : public RuntimeDyldMachOCRTPBase<RuntimeDyldMachOARM> {
+private:
+ typedef RuntimeDyldMachOCRTPBase<RuntimeDyldMachOARM> ParentT;
+
+public:
+
+ typedef uint32_t TargetPtrT;
+
+ RuntimeDyldMachOARM(RuntimeDyld::MemoryManager &MM,
+ JITSymbolResolver &Resolver)
+ : RuntimeDyldMachOCRTPBase(MM, Resolver) {}
+
+ unsigned getMaxStubSize() const override { return 8; }
+
+ Align getStubAlignment() override { return Align(4); }
+
+ Expected<JITSymbolFlags> getJITSymbolFlags(const SymbolRef &SR) override {
+ auto Flags = RuntimeDyldImpl::getJITSymbolFlags(SR);
+ if (!Flags)
+ return Flags.takeError();
+ Flags->getTargetFlags() = ARMJITSymbolFlags::fromObjectSymbol(SR);
+ return Flags;
+ }
+
+ uint64_t modifyAddressBasedOnFlags(uint64_t Addr,
+ JITSymbolFlags Flags) const override {
+ if (Flags.getTargetFlags() & ARMJITSymbolFlags::Thumb)
+ Addr |= 0x1;
+ return Addr;
+ }
+
+ bool isAddrTargetThumb(unsigned SectionID, uint64_t Offset) {
+ auto TargetObjAddr = Sections[SectionID].getObjAddress() + Offset;
+ for (auto &KV : GlobalSymbolTable) {
+ auto &Entry = KV.second;
+ auto SymbolObjAddr =
+ Sections[Entry.getSectionID()].getObjAddress() + Entry.getOffset();
+ if (TargetObjAddr == SymbolObjAddr)
+ return (Entry.getFlags().getTargetFlags() & ARMJITSymbolFlags::Thumb);
+ }
+ return false;
+ }
+
+ Expected<int64_t> decodeAddend(const RelocationEntry &RE) const {
+ const SectionEntry &Section = Sections[RE.SectionID];
+ uint8_t *LocalAddress = Section.getAddressWithOffset(RE.Offset);
+
+ switch (RE.RelType) {
+ default:
+ return memcpyAddend(RE);
+ case MachO::ARM_RELOC_BR24: {
+ uint32_t Temp = readBytesUnaligned(LocalAddress, 4);
+ Temp &= 0x00ffffff; // Mask out the opcode.
+ // Now we've got the shifted immediate, shift by 2, sign extend and ret.
+ return SignExtend32<26>(Temp << 2);
+ }
+
+ case MachO::ARM_THUMB_RELOC_BR22: {
+ // This is a pair of instructions whose operands combine to provide 22
+ // bits of displacement:
+ // Encoding for high bits 1111 0XXX XXXX XXXX
+ // Encoding for low bits 1111 1XXX XXXX XXXX
+ uint16_t HighInsn = readBytesUnaligned(LocalAddress, 2);
+ if ((HighInsn & 0xf800) != 0xf000)
+ return make_error<StringError>("Unrecognized thumb branch encoding "
+ "(BR22 high bits)",
+ inconvertibleErrorCode());
+
+ uint16_t LowInsn = readBytesUnaligned(LocalAddress + 2, 2);
+ if ((LowInsn & 0xf800) != 0xf800)
+ return make_error<StringError>("Unrecognized thumb branch encoding "
+ "(BR22 low bits)",
+ inconvertibleErrorCode());
+
+ return SignExtend64<23>(((HighInsn & 0x7ff) << 12) |
+ ((LowInsn & 0x7ff) << 1));
+ }
+ }
+ }
+
+ Expected<relocation_iterator>
+ processRelocationRef(unsigned SectionID, relocation_iterator RelI,
+ const ObjectFile &BaseObjT,
+ ObjSectionToIDMap &ObjSectionToID,
+ StubMap &Stubs) override {
+ const MachOObjectFile &Obj =
+ static_cast<const MachOObjectFile &>(BaseObjT);
+ MachO::any_relocation_info RelInfo =
+ Obj.getRelocation(RelI->getRawDataRefImpl());
+ uint32_t RelType = Obj.getAnyRelocationType(RelInfo);
+
+ // Set to true for thumb functions in this (or previous) TUs.
+ // Will be used to set the TargetIsThumbFunc member on the relocation entry.
+ bool TargetIsLocalThumbFunc = false;
+ if (Obj.getPlainRelocationExternal(RelInfo)) {
+ auto Symbol = RelI->getSymbol();
+ StringRef TargetName;
+ if (auto TargetNameOrErr = Symbol->getName())
+ TargetName = *TargetNameOrErr;
+ else
+ return TargetNameOrErr.takeError();
+
+ // If the target is external but the value doesn't have a name then we've
+ // converted the value to a section/offset pair, but we still need to set
+ // the IsTargetThumbFunc bit, so look the value up in the globla symbol table.
+ auto EntryItr = GlobalSymbolTable.find(TargetName);
+ if (EntryItr != GlobalSymbolTable.end()) {
+ TargetIsLocalThumbFunc =
+ EntryItr->second.getFlags().getTargetFlags() &
+ ARMJITSymbolFlags::Thumb;
+ }
+ }
+
+ if (Obj.isRelocationScattered(RelInfo)) {
+ if (RelType == MachO::ARM_RELOC_HALF_SECTDIFF)
+ return processHALFSECTDIFFRelocation(SectionID, RelI, Obj,
+ ObjSectionToID);
+ else if (RelType == MachO::GENERIC_RELOC_VANILLA)
+ return processScatteredVANILLA(SectionID, RelI, Obj, ObjSectionToID,
+ TargetIsLocalThumbFunc);
+ else
+ return ++RelI;
+ }
+
+ // Validate the relocation type.
+ switch (RelType) {
+ UNIMPLEMENTED_RELOC(MachO::ARM_RELOC_PAIR);
+ UNIMPLEMENTED_RELOC(MachO::ARM_RELOC_SECTDIFF);
+ UNIMPLEMENTED_RELOC(MachO::ARM_RELOC_LOCAL_SECTDIFF);
+ UNIMPLEMENTED_RELOC(MachO::ARM_RELOC_PB_LA_PTR);
+ UNIMPLEMENTED_RELOC(MachO::ARM_THUMB_32BIT_BRANCH);
+ UNIMPLEMENTED_RELOC(MachO::ARM_RELOC_HALF);
+ default:
+ if (RelType > MachO::ARM_RELOC_HALF_SECTDIFF)
+ return make_error<RuntimeDyldError>(("MachO ARM relocation type " +
+ Twine(RelType) +
+ " is out of range").str());
+ break;
+ }
+
+ RelocationEntry RE(getRelocationEntry(SectionID, Obj, RelI));
+ if (auto AddendOrErr = decodeAddend(RE))
+ RE.Addend = *AddendOrErr;
+ else
+ return AddendOrErr.takeError();
+ RE.IsTargetThumbFunc = TargetIsLocalThumbFunc;
+
+ RelocationValueRef Value;
+ if (auto ValueOrErr = getRelocationValueRef(Obj, RelI, RE, ObjSectionToID))
+ Value = *ValueOrErr;
+ else
+ return ValueOrErr.takeError();
+
+ // If this is a branch from a thumb function (BR22) then make sure we mark
+ // the value as being a thumb stub: we don't want to mix it up with an ARM
+ // stub targeting the same function.
+ if (RE.RelType == MachO::ARM_THUMB_RELOC_BR22)
+ Value.IsStubThumb = true;
+
+ if (RE.IsPCRel)
+ makeValueAddendPCRel(Value, RelI,
+ (RE.RelType == MachO::ARM_THUMB_RELOC_BR22) ? 4 : 8);
+
+ // If this is a non-external branch target check whether Value points to a
+ // thumb func.
+ if (!Value.SymbolName && (RelType == MachO::ARM_RELOC_BR24 ||
+ RelType == MachO::ARM_THUMB_RELOC_BR22))
+ RE.IsTargetThumbFunc = isAddrTargetThumb(Value.SectionID, Value.Offset);
+
+ if (RE.RelType == MachO::ARM_RELOC_BR24 ||
+ RE.RelType == MachO::ARM_THUMB_RELOC_BR22)
+ processBranchRelocation(RE, Value, Stubs);
+ else {
+ RE.Addend = Value.Offset;
+ if (Value.SymbolName)
+ addRelocationForSymbol(RE, Value.SymbolName);
+ else
+ addRelocationForSection(RE, Value.SectionID);
+ }
+
+ return ++RelI;
+ }
+
+ void resolveRelocation(const RelocationEntry &RE, uint64_t Value) override {
+ LLVM_DEBUG(dumpRelocationToResolve(RE, Value));
+ const SectionEntry &Section = Sections[RE.SectionID];
+ uint8_t *LocalAddress = Section.getAddressWithOffset(RE.Offset);
+
+ // If the relocation is PC-relative, the value to be encoded is the
+ // pointer difference.
+ if (RE.IsPCRel) {
+ uint64_t FinalAddress = Section.getLoadAddressWithOffset(RE.Offset);
+ Value -= FinalAddress;
+ // ARM PCRel relocations have an effective-PC offset of two instructions
+ // (four bytes in Thumb mode, 8 bytes in ARM mode).
+ Value -= (RE.RelType == MachO::ARM_THUMB_RELOC_BR22) ? 4 : 8;
+ }
+
+ switch (RE.RelType) {
+ case MachO::ARM_THUMB_RELOC_BR22: {
+ Value += RE.Addend;
+ uint16_t HighInsn = readBytesUnaligned(LocalAddress, 2);
+ assert((HighInsn & 0xf800) == 0xf000 &&
+ "Unrecognized thumb branch encoding (BR22 high bits)");
+ HighInsn = (HighInsn & 0xf800) | ((Value >> 12) & 0x7ff);
+
+ uint16_t LowInsn = readBytesUnaligned(LocalAddress + 2, 2);
+ assert((LowInsn & 0xf800) == 0xf800 &&
+ "Unrecognized thumb branch encoding (BR22 low bits)");
+ LowInsn = (LowInsn & 0xf800) | ((Value >> 1) & 0x7ff);
+
+ writeBytesUnaligned(HighInsn, LocalAddress, 2);
+ writeBytesUnaligned(LowInsn, LocalAddress + 2, 2);
+ break;
+ }
+
+ case MachO::ARM_RELOC_VANILLA:
+ if (RE.IsTargetThumbFunc)
+ Value |= 0x01;
+ writeBytesUnaligned(Value + RE.Addend, LocalAddress, 1 << RE.Size);
+ break;
+ case MachO::ARM_RELOC_BR24: {
+ // Mask the value into the target address. We know instructions are
+ // 32-bit aligned, so we can do it all at once.
+ Value += RE.Addend;
+ // The low two bits of the value are not encoded.
+ Value >>= 2;
+ // Mask the value to 24 bits.
+ uint64_t FinalValue = Value & 0xffffff;
+ // FIXME: If the destination is a Thumb function (and the instruction
+ // is a non-predicated BL instruction), we need to change it to a BLX
+ // instruction instead.
+
+ // Insert the value into the instruction.
+ uint32_t Temp = readBytesUnaligned(LocalAddress, 4);
+ writeBytesUnaligned((Temp & ~0xffffff) | FinalValue, LocalAddress, 4);
+
+ break;
+ }
+ case MachO::ARM_RELOC_HALF_SECTDIFF: {
+ uint64_t SectionABase = Sections[RE.Sections.SectionA].getLoadAddress();
+ uint64_t SectionBBase = Sections[RE.Sections.SectionB].getLoadAddress();
+ assert((Value == SectionABase || Value == SectionBBase) &&
+ "Unexpected HALFSECTDIFF relocation value.");
+ Value = SectionABase - SectionBBase + RE.Addend;
+ if (RE.Size & 0x1) // :upper16:
+ Value = (Value >> 16);
+
+ bool IsThumb = RE.Size & 0x2;
+
+ Value &= 0xffff;
+
+ uint32_t Insn = readBytesUnaligned(LocalAddress, 4);
+
+ if (IsThumb)
+ Insn = (Insn & 0x8f00fbf0) | ((Value & 0xf000) >> 12) |
+ ((Value & 0x0800) >> 1) | ((Value & 0x0700) << 20) |
+ ((Value & 0x00ff) << 16);
+ else
+ Insn = (Insn & 0xfff0f000) | ((Value & 0xf000) << 4) | (Value & 0x0fff);
+ writeBytesUnaligned(Insn, LocalAddress, 4);
+ break;
+ }
+
+ default:
+ llvm_unreachable("Invalid relocation type");
+ }
+ }
+
+ Error finalizeSection(const ObjectFile &Obj, unsigned SectionID,
+ const SectionRef &Section) {
+ StringRef Name;
+ if (Expected<StringRef> NameOrErr = Section.getName())
+ Name = *NameOrErr;
+ else
+ consumeError(NameOrErr.takeError());
+
+ if (Name == "__nl_symbol_ptr")
+ return populateIndirectSymbolPointersSection(cast<MachOObjectFile>(Obj),
+ Section, SectionID);
+ return Error::success();
+ }
+
+private:
+
+ void processBranchRelocation(const RelocationEntry &RE,
+ const RelocationValueRef &Value,
+ StubMap &Stubs) {
+ // This is an ARM branch relocation, need to use a stub function.
+ // Look up for existing stub.
+ SectionEntry &Section = Sections[RE.SectionID];
+ RuntimeDyldMachO::StubMap::const_iterator i = Stubs.find(Value);
+ uint8_t *Addr;
+ if (i != Stubs.end()) {
+ Addr = Section.getAddressWithOffset(i->second);
+ } else {
+ // Create a new stub function.
+ assert(Section.getStubOffset() % 4 == 0 && "Misaligned stub");
+ Stubs[Value] = Section.getStubOffset();
+ uint32_t StubOpcode = 0;
+ if (RE.RelType == MachO::ARM_RELOC_BR24)
+ StubOpcode = 0xe51ff004; // ldr pc, [pc, #-4]
+ else if (RE.RelType == MachO::ARM_THUMB_RELOC_BR22)
+ StubOpcode = 0xf000f8df; // ldr pc, [pc]
+ else
+ llvm_unreachable("Unrecognized relocation");
+ Addr = Section.getAddressWithOffset(Section.getStubOffset());
+ writeBytesUnaligned(StubOpcode, Addr, 4);
+ uint8_t *StubTargetAddr = Addr + 4;
+ RelocationEntry StubRE(
+ RE.SectionID, StubTargetAddr - Section.getAddress(),
+ MachO::GENERIC_RELOC_VANILLA, Value.Offset, false, 2);
+ StubRE.IsTargetThumbFunc = RE.IsTargetThumbFunc;
+ if (Value.SymbolName)
+ addRelocationForSymbol(StubRE, Value.SymbolName);
+ else
+ addRelocationForSection(StubRE, Value.SectionID);
+ Section.advanceStubOffset(getMaxStubSize());
+ }
+ RelocationEntry TargetRE(RE.SectionID, RE.Offset, RE.RelType, 0,
+ RE.IsPCRel, RE.Size);
+ resolveRelocation(TargetRE, (uint64_t)Addr);
+ }
+
+ Expected<relocation_iterator>
+ processHALFSECTDIFFRelocation(unsigned SectionID, relocation_iterator RelI,
+ const ObjectFile &BaseTObj,
+ ObjSectionToIDMap &ObjSectionToID) {
+ const MachOObjectFile &MachO =
+ static_cast<const MachOObjectFile&>(BaseTObj);
+ MachO::any_relocation_info RE =
+ MachO.getRelocation(RelI->getRawDataRefImpl());
+
+ // For a half-diff relocation the length bits actually record whether this
+ // is a movw/movt, and whether this is arm or thumb.
+ // Bit 0 indicates movw (b0 == 0) or movt (b0 == 1).
+ // Bit 1 indicates arm (b1 == 0) or thumb (b1 == 1).
+ unsigned HalfDiffKindBits = MachO.getAnyRelocationLength(RE);
+ bool IsThumb = HalfDiffKindBits & 0x2;
+
+ SectionEntry &Section = Sections[SectionID];
+ uint32_t RelocType = MachO.getAnyRelocationType(RE);
+ bool IsPCRel = MachO.getAnyRelocationPCRel(RE);
+ uint64_t Offset = RelI->getOffset();
+ uint8_t *LocalAddress = Section.getAddressWithOffset(Offset);
+ int64_t Immediate = readBytesUnaligned(LocalAddress, 4); // Copy the whole instruction out.
+
+ if (IsThumb)
+ Immediate = ((Immediate & 0x0000000f) << 12) |
+ ((Immediate & 0x00000400) << 1) |
+ ((Immediate & 0x70000000) >> 20) |
+ ((Immediate & 0x00ff0000) >> 16);
+ else
+ Immediate = ((Immediate >> 4) & 0xf000) | (Immediate & 0xfff);
+
+ ++RelI;
+ MachO::any_relocation_info RE2 =
+ MachO.getRelocation(RelI->getRawDataRefImpl());
+ uint32_t AddrA = MachO.getScatteredRelocationValue(RE);
+ section_iterator SAI = getSectionByAddress(MachO, AddrA);
+ assert(SAI != MachO.section_end() && "Can't find section for address A");
+ uint64_t SectionABase = SAI->getAddress();
+ uint64_t SectionAOffset = AddrA - SectionABase;
+ SectionRef SectionA = *SAI;
+ bool IsCode = SectionA.isText();
+ uint32_t SectionAID = ~0U;
+ if (auto SectionAIDOrErr =
+ findOrEmitSection(MachO, SectionA, IsCode, ObjSectionToID))
+ SectionAID = *SectionAIDOrErr;
+ else
+ return SectionAIDOrErr.takeError();
+
+ uint32_t AddrB = MachO.getScatteredRelocationValue(RE2);
+ section_iterator SBI = getSectionByAddress(MachO, AddrB);
+ assert(SBI != MachO.section_end() && "Can't find section for address B");
+ uint64_t SectionBBase = SBI->getAddress();
+ uint64_t SectionBOffset = AddrB - SectionBBase;
+ SectionRef SectionB = *SBI;
+ uint32_t SectionBID = ~0U;
+ if (auto SectionBIDOrErr =
+ findOrEmitSection(MachO, SectionB, IsCode, ObjSectionToID))
+ SectionBID = *SectionBIDOrErr;
+ else
+ return SectionBIDOrErr.takeError();
+
+ uint32_t OtherHalf = MachO.getAnyRelocationAddress(RE2) & 0xffff;
+ unsigned Shift = (HalfDiffKindBits & 0x1) ? 16 : 0;
+ uint32_t FullImmVal = (Immediate << Shift) | (OtherHalf << (16 - Shift));
+ int64_t Addend = FullImmVal - (AddrA - AddrB);
+
+ // addend = Encoded - Expected
+ // = Encoded - (AddrA - AddrB)
+
+ LLVM_DEBUG(dbgs() << "Found SECTDIFF: AddrA: " << AddrA
+ << ", AddrB: " << AddrB << ", Addend: " << Addend
+ << ", SectionA ID: " << SectionAID << ", SectionAOffset: "
+ << SectionAOffset << ", SectionB ID: " << SectionBID
+ << ", SectionBOffset: " << SectionBOffset << "\n");
+ RelocationEntry R(SectionID, Offset, RelocType, Addend, SectionAID,
+ SectionAOffset, SectionBID, SectionBOffset, IsPCRel,
+ HalfDiffKindBits);
+
+ addRelocationForSection(R, SectionAID);
+
+ return ++RelI;
+ }
+
+};
+}
+
+#undef DEBUG_TYPE
+
+#endif
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOI386.h b/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOI386.h
new file mode 100644
index 000000000000..a983e22671b2
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOI386.h
@@ -0,0 +1,250 @@
+//===---- RuntimeDyldMachOI386.h ---- MachO/I386 specific code. ---*- C++ -*-=//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_EXECUTIONENGINE_RUNTIMEDYLD_TARGETS_RUNTIMEDYLDMACHOI386_H
+#define LLVM_LIB_EXECUTIONENGINE_RUNTIMEDYLD_TARGETS_RUNTIMEDYLDMACHOI386_H
+
+#include "../RuntimeDyldMachO.h"
+
+#define DEBUG_TYPE "dyld"
+
+namespace llvm {
+
+class RuntimeDyldMachOI386
+ : public RuntimeDyldMachOCRTPBase<RuntimeDyldMachOI386> {
+public:
+
+ typedef uint32_t TargetPtrT;
+
+ RuntimeDyldMachOI386(RuntimeDyld::MemoryManager &MM,
+ JITSymbolResolver &Resolver)
+ : RuntimeDyldMachOCRTPBase(MM, Resolver) {}
+
+ unsigned getMaxStubSize() const override { return 0; }
+
+ Align getStubAlignment() override { return Align(1); }
+
+ Expected<relocation_iterator>
+ processRelocationRef(unsigned SectionID, relocation_iterator RelI,
+ const ObjectFile &BaseObjT,
+ ObjSectionToIDMap &ObjSectionToID,
+ StubMap &Stubs) override {
+ const MachOObjectFile &Obj =
+ static_cast<const MachOObjectFile &>(BaseObjT);
+ MachO::any_relocation_info RelInfo =
+ Obj.getRelocation(RelI->getRawDataRefImpl());
+ uint32_t RelType = Obj.getAnyRelocationType(RelInfo);
+
+ if (Obj.isRelocationScattered(RelInfo)) {
+ if (RelType == MachO::GENERIC_RELOC_SECTDIFF ||
+ RelType == MachO::GENERIC_RELOC_LOCAL_SECTDIFF)
+ return processSECTDIFFRelocation(SectionID, RelI, Obj,
+ ObjSectionToID);
+ else if (RelType == MachO::GENERIC_RELOC_VANILLA)
+ return processScatteredVANILLA(SectionID, RelI, Obj, ObjSectionToID);
+ return make_error<RuntimeDyldError>(("Unhandled I386 scattered relocation "
+ "type: " + Twine(RelType)).str());
+ }
+
+ switch (RelType) {
+ UNIMPLEMENTED_RELOC(MachO::GENERIC_RELOC_PAIR);
+ UNIMPLEMENTED_RELOC(MachO::GENERIC_RELOC_PB_LA_PTR);
+ UNIMPLEMENTED_RELOC(MachO::GENERIC_RELOC_TLV);
+ default:
+ if (RelType > MachO::GENERIC_RELOC_TLV)
+ return make_error<RuntimeDyldError>(("MachO I386 relocation type " +
+ Twine(RelType) +
+ " is out of range").str());
+ break;
+ }
+
+ RelocationEntry RE(getRelocationEntry(SectionID, Obj, RelI));
+ RE.Addend = memcpyAddend(RE);
+ RelocationValueRef Value;
+ if (auto ValueOrErr = getRelocationValueRef(Obj, RelI, RE, ObjSectionToID))
+ Value = *ValueOrErr;
+ else
+ return ValueOrErr.takeError();
+
+ // Addends for external, PC-rel relocations on i386 point back to the zero
+ // offset. Calculate the final offset from the relocation target instead.
+ // This allows us to use the same logic for both external and internal
+ // relocations in resolveI386RelocationRef.
+ // bool IsExtern = Obj.getPlainRelocationExternal(RelInfo);
+ // if (IsExtern && RE.IsPCRel) {
+ // uint64_t RelocAddr = 0;
+ // RelI->getAddress(RelocAddr);
+ // Value.Addend += RelocAddr + 4;
+ // }
+ if (RE.IsPCRel)
+ makeValueAddendPCRel(Value, RelI, 1 << RE.Size);
+
+ RE.Addend = Value.Offset;
+
+ if (Value.SymbolName)
+ addRelocationForSymbol(RE, Value.SymbolName);
+ else
+ addRelocationForSection(RE, Value.SectionID);
+
+ return ++RelI;
+ }
+
+ void resolveRelocation(const RelocationEntry &RE, uint64_t Value) override {
+ LLVM_DEBUG(dumpRelocationToResolve(RE, Value));
+
+ const SectionEntry &Section = Sections[RE.SectionID];
+ uint8_t *LocalAddress = Section.getAddressWithOffset(RE.Offset);
+
+ if (RE.IsPCRel) {
+ uint64_t FinalAddress = Section.getLoadAddressWithOffset(RE.Offset);
+ Value -= FinalAddress + 4; // see MachOX86_64::resolveRelocation.
+ }
+
+ switch (RE.RelType) {
+ case MachO::GENERIC_RELOC_VANILLA:
+ writeBytesUnaligned(Value + RE.Addend, LocalAddress, 1 << RE.Size);
+ break;
+ case MachO::GENERIC_RELOC_SECTDIFF:
+ case MachO::GENERIC_RELOC_LOCAL_SECTDIFF: {
+ uint64_t SectionABase = Sections[RE.Sections.SectionA].getLoadAddress();
+ uint64_t SectionBBase = Sections[RE.Sections.SectionB].getLoadAddress();
+ assert((Value == SectionABase || Value == SectionBBase) &&
+ "Unexpected SECTDIFF relocation value.");
+ Value = SectionABase - SectionBBase + RE.Addend;
+ writeBytesUnaligned(Value, LocalAddress, 1 << RE.Size);
+ break;
+ }
+ default:
+ llvm_unreachable("Invalid relocation type!");
+ }
+ }
+
+ Error finalizeSection(const ObjectFile &Obj, unsigned SectionID,
+ const SectionRef &Section) {
+ StringRef Name;
+ if (Expected<StringRef> NameOrErr = Section.getName())
+ Name = *NameOrErr;
+ else
+ consumeError(NameOrErr.takeError());
+
+ if (Name == "__jump_table")
+ return populateJumpTable(cast<MachOObjectFile>(Obj), Section, SectionID);
+ else if (Name == "__pointers")
+ return populateIndirectSymbolPointersSection(cast<MachOObjectFile>(Obj),
+ Section, SectionID);
+ return Error::success();
+ }
+
+private:
+ Expected<relocation_iterator>
+ processSECTDIFFRelocation(unsigned SectionID, relocation_iterator RelI,
+ const ObjectFile &BaseObjT,
+ ObjSectionToIDMap &ObjSectionToID) {
+ const MachOObjectFile &Obj =
+ static_cast<const MachOObjectFile&>(BaseObjT);
+ MachO::any_relocation_info RE =
+ Obj.getRelocation(RelI->getRawDataRefImpl());
+
+ SectionEntry &Section = Sections[SectionID];
+ uint32_t RelocType = Obj.getAnyRelocationType(RE);
+ bool IsPCRel = Obj.getAnyRelocationPCRel(RE);
+ unsigned Size = Obj.getAnyRelocationLength(RE);
+ uint64_t Offset = RelI->getOffset();
+ uint8_t *LocalAddress = Section.getAddressWithOffset(Offset);
+ unsigned NumBytes = 1 << Size;
+ uint64_t Addend = readBytesUnaligned(LocalAddress, NumBytes);
+
+ ++RelI;
+ MachO::any_relocation_info RE2 =
+ Obj.getRelocation(RelI->getRawDataRefImpl());
+
+ uint32_t AddrA = Obj.getScatteredRelocationValue(RE);
+ section_iterator SAI = getSectionByAddress(Obj, AddrA);
+ assert(SAI != Obj.section_end() && "Can't find section for address A");
+ uint64_t SectionABase = SAI->getAddress();
+ uint64_t SectionAOffset = AddrA - SectionABase;
+ SectionRef SectionA = *SAI;
+ bool IsCode = SectionA.isText();
+ uint32_t SectionAID = ~0U;
+ if (auto SectionAIDOrErr =
+ findOrEmitSection(Obj, SectionA, IsCode, ObjSectionToID))
+ SectionAID = *SectionAIDOrErr;
+ else
+ return SectionAIDOrErr.takeError();
+
+ uint32_t AddrB = Obj.getScatteredRelocationValue(RE2);
+ section_iterator SBI = getSectionByAddress(Obj, AddrB);
+ assert(SBI != Obj.section_end() && "Can't find section for address B");
+ uint64_t SectionBBase = SBI->getAddress();
+ uint64_t SectionBOffset = AddrB - SectionBBase;
+ SectionRef SectionB = *SBI;
+ uint32_t SectionBID = ~0U;
+ if (auto SectionBIDOrErr =
+ findOrEmitSection(Obj, SectionB, IsCode, ObjSectionToID))
+ SectionBID = *SectionBIDOrErr;
+ else
+ return SectionBIDOrErr.takeError();
+
+ // Compute the addend 'C' from the original expression 'A - B + C'.
+ Addend -= AddrA - AddrB;
+
+ LLVM_DEBUG(dbgs() << "Found SECTDIFF: AddrA: " << AddrA
+ << ", AddrB: " << AddrB << ", Addend: " << Addend
+ << ", SectionA ID: " << SectionAID << ", SectionAOffset: "
+ << SectionAOffset << ", SectionB ID: " << SectionBID
+ << ", SectionBOffset: " << SectionBOffset << "\n");
+ RelocationEntry R(SectionID, Offset, RelocType, Addend, SectionAID,
+ SectionAOffset, SectionBID, SectionBOffset,
+ IsPCRel, Size);
+
+ addRelocationForSection(R, SectionAID);
+
+ return ++RelI;
+ }
+
+ // Populate stubs in __jump_table section.
+ Error populateJumpTable(const MachOObjectFile &Obj,
+ const SectionRef &JTSection,
+ unsigned JTSectionID) {
+ MachO::dysymtab_command DySymTabCmd = Obj.getDysymtabLoadCommand();
+ MachO::section Sec32 = Obj.getSection(JTSection.getRawDataRefImpl());
+ uint32_t JTSectionSize = Sec32.size;
+ unsigned FirstIndirectSymbol = Sec32.reserved1;
+ unsigned JTEntrySize = Sec32.reserved2;
+ unsigned NumJTEntries = JTSectionSize / JTEntrySize;
+ uint8_t *JTSectionAddr = getSectionAddress(JTSectionID);
+ unsigned JTEntryOffset = 0;
+
+ if (JTSectionSize % JTEntrySize != 0)
+ return make_error<RuntimeDyldError>("Jump-table section does not contain "
+ "a whole number of stubs?");
+
+ for (unsigned i = 0; i < NumJTEntries; ++i) {
+ unsigned SymbolIndex =
+ Obj.getIndirectSymbolTableEntry(DySymTabCmd, FirstIndirectSymbol + i);
+ symbol_iterator SI = Obj.getSymbolByIndex(SymbolIndex);
+ Expected<StringRef> IndirectSymbolName = SI->getName();
+ if (!IndirectSymbolName)
+ return IndirectSymbolName.takeError();
+ uint8_t *JTEntryAddr = JTSectionAddr + JTEntryOffset;
+ createStubFunction(JTEntryAddr);
+ RelocationEntry RE(JTSectionID, JTEntryOffset + 1,
+ MachO::GENERIC_RELOC_VANILLA, 0, true, 2);
+ addRelocationForSymbol(RE, *IndirectSymbolName);
+ JTEntryOffset += JTEntrySize;
+ }
+
+ return Error::success();
+ }
+
+};
+}
+
+#undef DEBUG_TYPE
+
+#endif
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOX86_64.h b/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOX86_64.h
new file mode 100644
index 000000000000..bd0d72f9e117
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOX86_64.h
@@ -0,0 +1,238 @@
+//===-- RuntimeDyldMachOX86_64.h ---- MachO/X86_64 specific code. -*- C++ -*-=//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_EXECUTIONENGINE_RUNTIMEDYLD_TARGETS_RUNTIMEDYLDMACHOX86_64_H
+#define LLVM_LIB_EXECUTIONENGINE_RUNTIMEDYLD_TARGETS_RUNTIMEDYLDMACHOX86_64_H
+
+#include "../RuntimeDyldMachO.h"
+
+#define DEBUG_TYPE "dyld"
+
+namespace llvm {
+
+class RuntimeDyldMachOX86_64
+ : public RuntimeDyldMachOCRTPBase<RuntimeDyldMachOX86_64> {
+public:
+
+ typedef uint64_t TargetPtrT;
+
+ RuntimeDyldMachOX86_64(RuntimeDyld::MemoryManager &MM,
+ JITSymbolResolver &Resolver)
+ : RuntimeDyldMachOCRTPBase(MM, Resolver) {}
+
+ unsigned getMaxStubSize() const override { return 8; }
+
+ Align getStubAlignment() override { return Align(8); }
+
+ Expected<relocation_iterator>
+ processRelocationRef(unsigned SectionID, relocation_iterator RelI,
+ const ObjectFile &BaseObjT,
+ ObjSectionToIDMap &ObjSectionToID,
+ StubMap &Stubs) override {
+ const MachOObjectFile &Obj =
+ static_cast<const MachOObjectFile &>(BaseObjT);
+ MachO::any_relocation_info RelInfo =
+ Obj.getRelocation(RelI->getRawDataRefImpl());
+ uint32_t RelType = Obj.getAnyRelocationType(RelInfo);
+
+ if (RelType == MachO::X86_64_RELOC_SUBTRACTOR)
+ return processSubtractRelocation(SectionID, RelI, Obj, ObjSectionToID);
+
+ assert(!Obj.isRelocationScattered(RelInfo) &&
+ "Scattered relocations not supported on X86_64");
+
+ RelocationEntry RE(getRelocationEntry(SectionID, Obj, RelI));
+ RE.Addend = memcpyAddend(RE);
+ RelocationValueRef Value;
+ if (auto ValueOrErr = getRelocationValueRef(Obj, RelI, RE, ObjSectionToID))
+ Value = *ValueOrErr;
+ else
+ return ValueOrErr.takeError();
+
+ bool IsExtern = Obj.getPlainRelocationExternal(RelInfo);
+ if (!IsExtern && RE.IsPCRel)
+ makeValueAddendPCRel(Value, RelI, 1 << RE.Size);
+
+ switch (RelType) {
+ UNIMPLEMENTED_RELOC(MachO::X86_64_RELOC_TLV);
+ default:
+ if (RelType > MachO::X86_64_RELOC_TLV)
+ return make_error<RuntimeDyldError>(("MachO X86_64 relocation type " +
+ Twine(RelType) +
+ " is out of range").str());
+ break;
+ }
+
+ if (RE.RelType == MachO::X86_64_RELOC_GOT ||
+ RE.RelType == MachO::X86_64_RELOC_GOT_LOAD)
+ processGOTRelocation(RE, Value, Stubs);
+ else {
+ RE.Addend = Value.Offset;
+ if (Value.SymbolName)
+ addRelocationForSymbol(RE, Value.SymbolName);
+ else
+ addRelocationForSection(RE, Value.SectionID);
+ }
+
+ return ++RelI;
+ }
+
+ void resolveRelocation(const RelocationEntry &RE, uint64_t Value) override {
+ LLVM_DEBUG(dumpRelocationToResolve(RE, Value));
+ const SectionEntry &Section = Sections[RE.SectionID];
+ uint8_t *LocalAddress = Section.getAddressWithOffset(RE.Offset);
+
+ // If the relocation is PC-relative, the value to be encoded is the
+ // pointer difference.
+ if (RE.IsPCRel) {
+ // FIXME: It seems this value needs to be adjusted by 4 for an effective
+ // PC address. Is that expected? Only for branches, perhaps?
+ uint64_t FinalAddress = Section.getLoadAddressWithOffset(RE.Offset);
+ Value -= FinalAddress + 4;
+ }
+
+ switch (RE.RelType) {
+ default:
+ llvm_unreachable("Invalid relocation type!");
+ case MachO::X86_64_RELOC_SIGNED_1:
+ case MachO::X86_64_RELOC_SIGNED_2:
+ case MachO::X86_64_RELOC_SIGNED_4:
+ case MachO::X86_64_RELOC_SIGNED:
+ case MachO::X86_64_RELOC_UNSIGNED:
+ case MachO::X86_64_RELOC_BRANCH:
+ writeBytesUnaligned(Value + RE.Addend, LocalAddress, 1 << RE.Size);
+ break;
+ case MachO::X86_64_RELOC_SUBTRACTOR: {
+ uint64_t SectionABase = Sections[RE.Sections.SectionA].getLoadAddress();
+ uint64_t SectionBBase = Sections[RE.Sections.SectionB].getLoadAddress();
+ assert((Value == SectionABase || Value == SectionBBase) &&
+ "Unexpected SUBTRACTOR relocation value.");
+ Value = SectionABase - SectionBBase + RE.Addend;
+ writeBytesUnaligned(Value, LocalAddress, 1 << RE.Size);
+ break;
+ }
+ }
+ }
+
+ Error finalizeSection(const ObjectFile &Obj, unsigned SectionID,
+ const SectionRef &Section) {
+ return Error::success();
+ }
+
+private:
+ void processGOTRelocation(const RelocationEntry &RE,
+ RelocationValueRef &Value, StubMap &Stubs) {
+ SectionEntry &Section = Sections[RE.SectionID];
+ assert(RE.IsPCRel);
+ assert(RE.Size == 2);
+ Value.Offset -= RE.Addend;
+ RuntimeDyldMachO::StubMap::const_iterator i = Stubs.find(Value);
+ uint8_t *Addr;
+ if (i != Stubs.end()) {
+ Addr = Section.getAddressWithOffset(i->second);
+ } else {
+ Stubs[Value] = Section.getStubOffset();
+ uint8_t *GOTEntry = Section.getAddressWithOffset(Section.getStubOffset());
+ RelocationEntry GOTRE(RE.SectionID, Section.getStubOffset(),
+ MachO::X86_64_RELOC_UNSIGNED, Value.Offset, false,
+ 3);
+ if (Value.SymbolName)
+ addRelocationForSymbol(GOTRE, Value.SymbolName);
+ else
+ addRelocationForSection(GOTRE, Value.SectionID);
+ Section.advanceStubOffset(8);
+ Addr = GOTEntry;
+ }
+ RelocationEntry TargetRE(RE.SectionID, RE.Offset,
+ MachO::X86_64_RELOC_UNSIGNED, RE.Addend, true, 2);
+ resolveRelocation(TargetRE, (uint64_t)Addr);
+ }
+
+ Expected<relocation_iterator>
+ processSubtractRelocation(unsigned SectionID, relocation_iterator RelI,
+ const MachOObjectFile &BaseObj,
+ ObjSectionToIDMap &ObjSectionToID) {
+ const MachOObjectFile &Obj =
+ static_cast<const MachOObjectFile&>(BaseObj);
+ MachO::any_relocation_info RE =
+ Obj.getRelocation(RelI->getRawDataRefImpl());
+
+ unsigned Size = Obj.getAnyRelocationLength(RE);
+ uint64_t Offset = RelI->getOffset();
+ uint8_t *LocalAddress = Sections[SectionID].getAddressWithOffset(Offset);
+ unsigned NumBytes = 1 << Size;
+ int64_t Addend =
+ SignExtend64(readBytesUnaligned(LocalAddress, NumBytes), NumBytes * 8);
+
+ unsigned SectionBID = ~0U;
+ uint64_t SectionBOffset = 0;
+
+ MachO::any_relocation_info RelInfo =
+ Obj.getRelocation(RelI->getRawDataRefImpl());
+
+ bool AIsExternal = BaseObj.getPlainRelocationExternal(RelInfo);
+
+ if (AIsExternal) {
+ Expected<StringRef> SubtrahendNameOrErr = RelI->getSymbol()->getName();
+ if (!SubtrahendNameOrErr)
+ return SubtrahendNameOrErr.takeError();
+ auto SubtrahendI = GlobalSymbolTable.find(*SubtrahendNameOrErr);
+ SectionBID = SubtrahendI->second.getSectionID();
+ SectionBOffset = SubtrahendI->second.getOffset();
+ } else {
+ SectionRef SecB = Obj.getAnyRelocationSection(RelInfo);
+ bool IsCode = SecB.isText();
+ Expected<unsigned> SectionBIDOrErr =
+ findOrEmitSection(Obj, SecB, IsCode, ObjSectionToID);
+ if (!SectionBIDOrErr)
+ return SectionBIDOrErr.takeError();
+ SectionBID = *SectionBIDOrErr;
+ Addend += SecB.getAddress();
+ }
+
+ ++RelI;
+
+ unsigned SectionAID = ~0U;
+ uint64_t SectionAOffset = 0;
+
+ RelInfo = Obj.getRelocation(RelI->getRawDataRefImpl());
+
+ bool BIsExternal = BaseObj.getPlainRelocationExternal(RelInfo);
+ if (BIsExternal) {
+ Expected<StringRef> MinuendNameOrErr = RelI->getSymbol()->getName();
+ if (!MinuendNameOrErr)
+ return MinuendNameOrErr.takeError();
+ auto MinuendI = GlobalSymbolTable.find(*MinuendNameOrErr);
+ SectionAID = MinuendI->second.getSectionID();
+ SectionAOffset = MinuendI->second.getOffset();
+ } else {
+ SectionRef SecA = Obj.getAnyRelocationSection(RelInfo);
+ bool IsCode = SecA.isText();
+ Expected<unsigned> SectionAIDOrErr =
+ findOrEmitSection(Obj, SecA, IsCode, ObjSectionToID);
+ if (!SectionAIDOrErr)
+ return SectionAIDOrErr.takeError();
+ SectionAID = *SectionAIDOrErr;
+ Addend -= SecA.getAddress();
+ }
+
+ RelocationEntry R(SectionID, Offset, MachO::X86_64_RELOC_SUBTRACTOR, (uint64_t)Addend,
+ SectionAID, SectionAOffset, SectionBID, SectionBOffset,
+ false, Size);
+
+ addRelocationForSection(R, SectionAID);
+
+ return ++RelI;
+ }
+
+};
+}
+
+#undef DEBUG_TYPE
+
+#endif
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/SectionMemoryManager.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/SectionMemoryManager.cpp
new file mode 100644
index 000000000000..436888730bfb
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/SectionMemoryManager.cpp
@@ -0,0 +1,276 @@
+//===- SectionMemoryManager.cpp - Memory manager for MCJIT/RtDyld *- C++ -*-==//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the section-based memory manager used by the MCJIT
+// execution engine and RuntimeDyld
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/SectionMemoryManager.h"
+#include "llvm/Config/config.h"
+#include "llvm/Support/MathExtras.h"
+#include "llvm/Support/Process.h"
+
+namespace llvm {
+
+uint8_t *SectionMemoryManager::allocateDataSection(uintptr_t Size,
+ unsigned Alignment,
+ unsigned SectionID,
+ StringRef SectionName,
+ bool IsReadOnly) {
+ if (IsReadOnly)
+ return allocateSection(SectionMemoryManager::AllocationPurpose::ROData,
+ Size, Alignment);
+ return allocateSection(SectionMemoryManager::AllocationPurpose::RWData, Size,
+ Alignment);
+}
+
+uint8_t *SectionMemoryManager::allocateCodeSection(uintptr_t Size,
+ unsigned Alignment,
+ unsigned SectionID,
+ StringRef SectionName) {
+ return allocateSection(SectionMemoryManager::AllocationPurpose::Code, Size,
+ Alignment);
+}
+
+uint8_t *SectionMemoryManager::allocateSection(
+ SectionMemoryManager::AllocationPurpose Purpose, uintptr_t Size,
+ unsigned Alignment) {
+ if (!Alignment)
+ Alignment = 16;
+
+ assert(!(Alignment & (Alignment - 1)) && "Alignment must be a power of two.");
+
+ uintptr_t RequiredSize = Alignment * ((Size + Alignment - 1) / Alignment + 1);
+ uintptr_t Addr = 0;
+
+ MemoryGroup &MemGroup = [&]() -> MemoryGroup & {
+ switch (Purpose) {
+ case AllocationPurpose::Code:
+ return CodeMem;
+ case AllocationPurpose::ROData:
+ return RODataMem;
+ case AllocationPurpose::RWData:
+ return RWDataMem;
+ }
+ llvm_unreachable("Unknown SectionMemoryManager::AllocationPurpose");
+ }();
+
+ // Look in the list of free memory regions and use a block there if one
+ // is available.
+ for (FreeMemBlock &FreeMB : MemGroup.FreeMem) {
+ if (FreeMB.Free.allocatedSize() >= RequiredSize) {
+ Addr = (uintptr_t)FreeMB.Free.base();
+ uintptr_t EndOfBlock = Addr + FreeMB.Free.allocatedSize();
+ // Align the address.
+ Addr = (Addr + Alignment - 1) & ~(uintptr_t)(Alignment - 1);
+
+ if (FreeMB.PendingPrefixIndex == (unsigned)-1) {
+ // The part of the block we're giving out to the user is now pending
+ MemGroup.PendingMem.push_back(sys::MemoryBlock((void *)Addr, Size));
+
+ // Remember this pending block, such that future allocations can just
+ // modify it rather than creating a new one
+ FreeMB.PendingPrefixIndex = MemGroup.PendingMem.size() - 1;
+ } else {
+ sys::MemoryBlock &PendingMB =
+ MemGroup.PendingMem[FreeMB.PendingPrefixIndex];
+ PendingMB = sys::MemoryBlock(PendingMB.base(),
+ Addr + Size - (uintptr_t)PendingMB.base());
+ }
+
+ // Remember how much free space is now left in this block
+ FreeMB.Free =
+ sys::MemoryBlock((void *)(Addr + Size), EndOfBlock - Addr - Size);
+ return (uint8_t *)Addr;
+ }
+ }
+
+ // No pre-allocated free block was large enough. Allocate a new memory region.
+ // Note that all sections get allocated as read-write. The permissions will
+ // be updated later based on memory group.
+ //
+ // FIXME: It would be useful to define a default allocation size (or add
+ // it as a constructor parameter) to minimize the number of allocations.
+ //
+ // FIXME: Initialize the Near member for each memory group to avoid
+ // interleaving.
+ std::error_code ec;
+ sys::MemoryBlock MB = MMapper->allocateMappedMemory(
+ Purpose, RequiredSize, &MemGroup.Near,
+ sys::Memory::MF_READ | sys::Memory::MF_WRITE, ec);
+ if (ec) {
+ // FIXME: Add error propagation to the interface.
+ return nullptr;
+ }
+
+ // Save this address as the basis for our next request
+ MemGroup.Near = MB;
+
+ // Copy the address to all the other groups, if they have not
+ // been initialized.
+ if (CodeMem.Near.base() == nullptr)
+ CodeMem.Near = MB;
+ if (RODataMem.Near.base() == nullptr)
+ RODataMem.Near = MB;
+ if (RWDataMem.Near.base() == nullptr)
+ RWDataMem.Near = MB;
+
+ // Remember that we allocated this memory
+ MemGroup.AllocatedMem.push_back(MB);
+ Addr = (uintptr_t)MB.base();
+ uintptr_t EndOfBlock = Addr + MB.allocatedSize();
+
+ // Align the address.
+ Addr = (Addr + Alignment - 1) & ~(uintptr_t)(Alignment - 1);
+
+ // The part of the block we're giving out to the user is now pending
+ MemGroup.PendingMem.push_back(sys::MemoryBlock((void *)Addr, Size));
+
+ // The allocateMappedMemory may allocate much more memory than we need. In
+ // this case, we store the unused memory as a free memory block.
+ unsigned FreeSize = EndOfBlock - Addr - Size;
+ if (FreeSize > 16) {
+ FreeMemBlock FreeMB;
+ FreeMB.Free = sys::MemoryBlock((void *)(Addr + Size), FreeSize);
+ FreeMB.PendingPrefixIndex = (unsigned)-1;
+ MemGroup.FreeMem.push_back(FreeMB);
+ }
+
+ // Return aligned address
+ return (uint8_t *)Addr;
+}
+
+bool SectionMemoryManager::finalizeMemory(std::string *ErrMsg) {
+ // FIXME: Should in-progress permissions be reverted if an error occurs?
+ std::error_code ec;
+
+ // Make code memory executable.
+ ec = applyMemoryGroupPermissions(CodeMem,
+ sys::Memory::MF_READ | sys::Memory::MF_EXEC);
+ if (ec) {
+ if (ErrMsg) {
+ *ErrMsg = ec.message();
+ }
+ return true;
+ }
+
+ // Make read-only data memory read-only.
+ ec = applyMemoryGroupPermissions(RODataMem, sys::Memory::MF_READ);
+ if (ec) {
+ if (ErrMsg) {
+ *ErrMsg = ec.message();
+ }
+ return true;
+ }
+
+ // Read-write data memory already has the correct permissions
+
+ // Some platforms with separate data cache and instruction cache require
+ // explicit cache flush, otherwise JIT code manipulations (like resolved
+ // relocations) will get to the data cache but not to the instruction cache.
+ invalidateInstructionCache();
+
+ return false;
+}
+
+static sys::MemoryBlock trimBlockToPageSize(sys::MemoryBlock M) {
+ static const size_t PageSize = sys::Process::getPageSizeEstimate();
+
+ size_t StartOverlap =
+ (PageSize - ((uintptr_t)M.base() % PageSize)) % PageSize;
+
+ size_t TrimmedSize = M.allocatedSize();
+ TrimmedSize -= StartOverlap;
+ TrimmedSize -= TrimmedSize % PageSize;
+
+ sys::MemoryBlock Trimmed((void *)((uintptr_t)M.base() + StartOverlap),
+ TrimmedSize);
+
+ assert(((uintptr_t)Trimmed.base() % PageSize) == 0);
+ assert((Trimmed.allocatedSize() % PageSize) == 0);
+ assert(M.base() <= Trimmed.base() &&
+ Trimmed.allocatedSize() <= M.allocatedSize());
+
+ return Trimmed;
+}
+
+std::error_code
+SectionMemoryManager::applyMemoryGroupPermissions(MemoryGroup &MemGroup,
+ unsigned Permissions) {
+ for (sys::MemoryBlock &MB : MemGroup.PendingMem)
+ if (std::error_code EC = MMapper->protectMappedMemory(MB, Permissions))
+ return EC;
+
+ MemGroup.PendingMem.clear();
+
+ // Now go through free blocks and trim any of them that don't span the entire
+ // page because one of the pending blocks may have overlapped it.
+ for (FreeMemBlock &FreeMB : MemGroup.FreeMem) {
+ FreeMB.Free = trimBlockToPageSize(FreeMB.Free);
+ // We cleared the PendingMem list, so all these pointers are now invalid
+ FreeMB.PendingPrefixIndex = (unsigned)-1;
+ }
+
+ // Remove all blocks which are now empty
+ erase_if(MemGroup.FreeMem, [](FreeMemBlock &FreeMB) {
+ return FreeMB.Free.allocatedSize() == 0;
+ });
+
+ return std::error_code();
+}
+
+void SectionMemoryManager::invalidateInstructionCache() {
+ for (sys::MemoryBlock &Block : CodeMem.PendingMem)
+ sys::Memory::InvalidateInstructionCache(Block.base(),
+ Block.allocatedSize());
+}
+
+SectionMemoryManager::~SectionMemoryManager() {
+ for (MemoryGroup *Group : {&CodeMem, &RWDataMem, &RODataMem}) {
+ for (sys::MemoryBlock &Block : Group->AllocatedMem)
+ MMapper->releaseMappedMemory(Block);
+ }
+}
+
+SectionMemoryManager::MemoryMapper::~MemoryMapper() = default;
+
+void SectionMemoryManager::anchor() {}
+
+namespace {
+// Trivial implementation of SectionMemoryManager::MemoryMapper that just calls
+// into sys::Memory.
+class DefaultMMapper final : public SectionMemoryManager::MemoryMapper {
+public:
+ sys::MemoryBlock
+ allocateMappedMemory(SectionMemoryManager::AllocationPurpose Purpose,
+ size_t NumBytes, const sys::MemoryBlock *const NearBlock,
+ unsigned Flags, std::error_code &EC) override {
+ return sys::Memory::allocateMappedMemory(NumBytes, NearBlock, Flags, EC);
+ }
+
+ std::error_code protectMappedMemory(const sys::MemoryBlock &Block,
+ unsigned Flags) override {
+ return sys::Memory::protectMappedMemory(Block, Flags);
+ }
+
+ std::error_code releaseMappedMemory(sys::MemoryBlock &M) override {
+ return sys::Memory::releaseMappedMemory(M);
+ }
+};
+} // namespace
+
+SectionMemoryManager::SectionMemoryManager(MemoryMapper *UnownedMM)
+ : MMapper(UnownedMM), OwnedMMapper(nullptr) {
+ if (!MMapper) {
+ OwnedMMapper = std::make_unique<DefaultMMapper>();
+ MMapper = OwnedMMapper.get();
+ }
+}
+
+} // namespace llvm
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/TargetSelect.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/TargetSelect.cpp
new file mode 100644
index 000000000000..72fb16fbf203
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/TargetSelect.cpp
@@ -0,0 +1,95 @@
+//===-- TargetSelect.cpp - Target Chooser Code ----------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This just asks the TargetRegistry for the appropriate target to use, and
+// allows the user to specify a specific one on the commandline with -march=x,
+// -mcpu=y, and -mattr=a,-b,+c. Clients should initialize targets prior to
+// calling selectTarget().
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/ExecutionEngine.h"
+#include "llvm/IR/Module.h"
+#include "llvm/MC/TargetRegistry.h"
+#include "llvm/Target/TargetMachine.h"
+#include "llvm/TargetParser/Host.h"
+#include "llvm/TargetParser/SubtargetFeature.h"
+#include "llvm/TargetParser/Triple.h"
+
+using namespace llvm;
+
+TargetMachine *EngineBuilder::selectTarget() {
+ Triple TT;
+
+ // MCJIT can generate code for remote targets, but the old JIT and Interpreter
+ // must use the host architecture.
+ if (WhichEngine != EngineKind::Interpreter && M)
+ TT.setTriple(M->getTargetTriple());
+
+ return selectTarget(TT, MArch, MCPU, MAttrs);
+}
+
+/// selectTarget - Pick a target either via -march or by guessing the native
+/// arch. Add any CPU features specified via -mcpu or -mattr.
+TargetMachine *EngineBuilder::selectTarget(const Triple &TargetTriple,
+ StringRef MArch,
+ StringRef MCPU,
+ const SmallVectorImpl<std::string>& MAttrs) {
+ Triple TheTriple(TargetTriple);
+ if (TheTriple.getTriple().empty())
+ TheTriple.setTriple(sys::getProcessTriple());
+
+ // Adjust the triple to match what the user requested.
+ const Target *TheTarget = nullptr;
+ if (!MArch.empty()) {
+ auto I = find_if(TargetRegistry::targets(),
+ [&](const Target &T) { return MArch == T.getName(); });
+
+ if (I == TargetRegistry::targets().end()) {
+ if (ErrorStr)
+ *ErrorStr = "No available targets are compatible with this -march, "
+ "see -version for the available targets.\n";
+ return nullptr;
+ }
+
+ TheTarget = &*I;
+
+ // Adjust the triple to match (if known), otherwise stick with the
+ // requested/host triple.
+ Triple::ArchType Type = Triple::getArchTypeForLLVMName(MArch);
+ if (Type != Triple::UnknownArch)
+ TheTriple.setArch(Type);
+ } else {
+ std::string Error;
+ TheTarget = TargetRegistry::lookupTarget(TheTriple.getTriple(), Error);
+ if (!TheTarget) {
+ if (ErrorStr)
+ *ErrorStr = Error;
+ return nullptr;
+ }
+ }
+
+ // Package up features to be passed to target/subtarget
+ std::string FeaturesStr;
+ if (!MAttrs.empty()) {
+ SubtargetFeatures Features;
+ for (unsigned i = 0; i != MAttrs.size(); ++i)
+ Features.AddFeature(MAttrs[i]);
+ FeaturesStr = Features.getString();
+ }
+
+ // Allocate a target...
+ TargetMachine *Target =
+ TheTarget->createTargetMachine(TheTriple.getTriple(), MCPU, FeaturesStr,
+ Options, RelocModel, CMModel, OptLevel,
+ /*JIT*/ true);
+ Target->Options.EmulatedTLS = EmulatedTLS;
+
+ assert(Target && "Could not allocate target machine!");
+ return Target;
+}