diff options
Diffstat (limited to 'lib/ExecutionEngine')
20 files changed, 7981 insertions, 0 deletions
diff --git a/lib/ExecutionEngine/CMakeLists.txt b/lib/ExecutionEngine/CMakeLists.txt new file mode 100644 index 0000000000000..e26b98fb9d436 --- /dev/null +++ b/lib/ExecutionEngine/CMakeLists.txt @@ -0,0 +1,4 @@ +add_partially_linked_object(LLVMExecutionEngine + ExecutionEngine.cpp + ExecutionEngineBindings.cpp + ) diff --git a/lib/ExecutionEngine/ExecutionEngine.cpp b/lib/ExecutionEngine/ExecutionEngine.cpp new file mode 100644 index 0000000000000..29a05bbbdb64c --- /dev/null +++ b/lib/ExecutionEngine/ExecutionEngine.cpp @@ -0,0 +1,1010 @@ +//===-- ExecutionEngine.cpp - Common Implementation shared by EEs ---------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines the common interface used by the various execution engine +// subclasses. +// +//===----------------------------------------------------------------------===// + +#define DEBUG_TYPE "jit" +#include "llvm/Constants.h" +#include "llvm/DerivedTypes.h" +#include "llvm/Module.h" +#include "llvm/ModuleProvider.h" +#include "llvm/ADT/Statistic.h" +#include "llvm/Config/alloca.h" +#include "llvm/ExecutionEngine/ExecutionEngine.h" +#include "llvm/ExecutionEngine/GenericValue.h" +#include "llvm/Support/Debug.h" +#include "llvm/Support/MutexGuard.h" +#include "llvm/System/DynamicLibrary.h" +#include "llvm/System/Host.h" +#include "llvm/Target/TargetData.h" +#include <cmath> +#include <cstring> +using namespace llvm; + +STATISTIC(NumInitBytes, "Number of bytes of global vars initialized"); +STATISTIC(NumGlobals , "Number of global vars initialized"); + +ExecutionEngine::EECtorFn ExecutionEngine::JITCtor = 0; +ExecutionEngine::EECtorFn ExecutionEngine::InterpCtor = 0; +ExecutionEngine::EERegisterFn ExecutionEngine::ExceptionTableRegister = 0; + + +ExecutionEngine::ExecutionEngine(ModuleProvider *P) : LazyFunctionCreator(0) { + LazyCompilationDisabled = false; + GVCompilationDisabled = false; + SymbolSearchingDisabled = false; + DlsymStubsEnabled = false; + Modules.push_back(P); + assert(P && "ModuleProvider is null?"); +} + +ExecutionEngine::~ExecutionEngine() { + clearAllGlobalMappings(); + for (unsigned i = 0, e = Modules.size(); i != e; ++i) + delete Modules[i]; +} + +char* ExecutionEngine::getMemoryForGV(const GlobalVariable* GV) { + const Type *ElTy = GV->getType()->getElementType(); + size_t GVSize = (size_t)getTargetData()->getTypeAllocSize(ElTy); + return new char[GVSize]; +} + +/// removeModuleProvider - Remove a ModuleProvider from the list of modules. +/// Relases the Module from the ModuleProvider, materializing it in the +/// process, and returns the materialized Module. +Module* ExecutionEngine::removeModuleProvider(ModuleProvider *P, + std::string *ErrInfo) { + for(SmallVector<ModuleProvider *, 1>::iterator I = Modules.begin(), + E = Modules.end(); I != E; ++I) { + ModuleProvider *MP = *I; + if (MP == P) { + Modules.erase(I); + clearGlobalMappingsFromModule(MP->getModule()); + return MP->releaseModule(ErrInfo); + } + } + return NULL; +} + +/// deleteModuleProvider - Remove a ModuleProvider from the list of modules, +/// and deletes the ModuleProvider and owned Module. Avoids materializing +/// the underlying module. +void ExecutionEngine::deleteModuleProvider(ModuleProvider *P, + std::string *ErrInfo) { + for(SmallVector<ModuleProvider *, 1>::iterator I = Modules.begin(), + E = Modules.end(); I != E; ++I) { + ModuleProvider *MP = *I; + if (MP == P) { + Modules.erase(I); + clearGlobalMappingsFromModule(MP->getModule()); + delete MP; + return; + } + } +} + +/// FindFunctionNamed - Search all of the active modules to find the one that +/// defines FnName. This is very slow operation and shouldn't be used for +/// general code. +Function *ExecutionEngine::FindFunctionNamed(const char *FnName) { + for (unsigned i = 0, e = Modules.size(); i != e; ++i) { + if (Function *F = Modules[i]->getModule()->getFunction(FnName)) + return F; + } + return 0; +} + + +/// addGlobalMapping - Tell the execution engine that the specified global is +/// at the specified location. This is used internally as functions are JIT'd +/// and as global variables are laid out in memory. It can and should also be +/// used by clients of the EE that want to have an LLVM global overlay +/// existing data in memory. +void ExecutionEngine::addGlobalMapping(const GlobalValue *GV, void *Addr) { + MutexGuard locked(lock); + + DOUT << "JIT: Map \'" << GV->getNameStart() << "\' to [" << Addr << "]\n"; + void *&CurVal = state.getGlobalAddressMap(locked)[GV]; + assert((CurVal == 0 || Addr == 0) && "GlobalMapping already established!"); + CurVal = Addr; + + // If we are using the reverse mapping, add it too + if (!state.getGlobalAddressReverseMap(locked).empty()) { + const GlobalValue *&V = state.getGlobalAddressReverseMap(locked)[Addr]; + assert((V == 0 || GV == 0) && "GlobalMapping already established!"); + V = GV; + } +} + +/// clearAllGlobalMappings - Clear all global mappings and start over again +/// use in dynamic compilation scenarios when you want to move globals +void ExecutionEngine::clearAllGlobalMappings() { + MutexGuard locked(lock); + + state.getGlobalAddressMap(locked).clear(); + state.getGlobalAddressReverseMap(locked).clear(); +} + +/// clearGlobalMappingsFromModule - Clear all global mappings that came from a +/// particular module, because it has been removed from the JIT. +void ExecutionEngine::clearGlobalMappingsFromModule(Module *M) { + MutexGuard locked(lock); + + for (Module::iterator FI = M->begin(), FE = M->end(); FI != FE; ++FI) { + state.getGlobalAddressMap(locked).erase(FI); + state.getGlobalAddressReverseMap(locked).erase(FI); + } + for (Module::global_iterator GI = M->global_begin(), GE = M->global_end(); + GI != GE; ++GI) { + state.getGlobalAddressMap(locked).erase(GI); + state.getGlobalAddressReverseMap(locked).erase(GI); + } +} + +/// updateGlobalMapping - Replace an existing mapping for GV with a new +/// address. This updates both maps as required. If "Addr" is null, the +/// entry for the global is removed from the mappings. +void *ExecutionEngine::updateGlobalMapping(const GlobalValue *GV, void *Addr) { + MutexGuard locked(lock); + + std::map<const GlobalValue*, void *> &Map = state.getGlobalAddressMap(locked); + + // Deleting from the mapping? + if (Addr == 0) { + std::map<const GlobalValue*, void *>::iterator I = Map.find(GV); + void *OldVal; + if (I == Map.end()) + OldVal = 0; + else { + OldVal = I->second; + Map.erase(I); + } + + if (!state.getGlobalAddressReverseMap(locked).empty()) + state.getGlobalAddressReverseMap(locked).erase(Addr); + return OldVal; + } + + void *&CurVal = Map[GV]; + void *OldVal = CurVal; + + if (CurVal && !state.getGlobalAddressReverseMap(locked).empty()) + state.getGlobalAddressReverseMap(locked).erase(CurVal); + CurVal = Addr; + + // If we are using the reverse mapping, add it too + if (!state.getGlobalAddressReverseMap(locked).empty()) { + const GlobalValue *&V = state.getGlobalAddressReverseMap(locked)[Addr]; + assert((V == 0 || GV == 0) && "GlobalMapping already established!"); + V = GV; + } + return OldVal; +} + +/// getPointerToGlobalIfAvailable - This returns the address of the specified +/// global value if it is has already been codegen'd, otherwise it returns null. +/// +void *ExecutionEngine::getPointerToGlobalIfAvailable(const GlobalValue *GV) { + MutexGuard locked(lock); + + std::map<const GlobalValue*, void*>::iterator I = + state.getGlobalAddressMap(locked).find(GV); + return I != state.getGlobalAddressMap(locked).end() ? I->second : 0; +} + +/// getGlobalValueAtAddress - Return the LLVM global value object that starts +/// at the specified address. +/// +const GlobalValue *ExecutionEngine::getGlobalValueAtAddress(void *Addr) { + MutexGuard locked(lock); + + // If we haven't computed the reverse mapping yet, do so first. + if (state.getGlobalAddressReverseMap(locked).empty()) { + for (std::map<const GlobalValue*, void *>::iterator + I = state.getGlobalAddressMap(locked).begin(), + E = state.getGlobalAddressMap(locked).end(); I != E; ++I) + state.getGlobalAddressReverseMap(locked).insert(std::make_pair(I->second, + I->first)); + } + + std::map<void *, const GlobalValue*>::iterator I = + state.getGlobalAddressReverseMap(locked).find(Addr); + return I != state.getGlobalAddressReverseMap(locked).end() ? I->second : 0; +} + +// CreateArgv - Turn a vector of strings into a nice argv style array of +// pointers to null terminated strings. +// +static void *CreateArgv(ExecutionEngine *EE, + const std::vector<std::string> &InputArgv) { + unsigned PtrSize = EE->getTargetData()->getPointerSize(); + char *Result = new char[(InputArgv.size()+1)*PtrSize]; + + DOUT << "JIT: ARGV = " << (void*)Result << "\n"; + const Type *SBytePtr = PointerType::getUnqual(Type::Int8Ty); + + for (unsigned i = 0; i != InputArgv.size(); ++i) { + unsigned Size = InputArgv[i].size()+1; + char *Dest = new char[Size]; + DOUT << "JIT: ARGV[" << i << "] = " << (void*)Dest << "\n"; + + std::copy(InputArgv[i].begin(), InputArgv[i].end(), Dest); + Dest[Size-1] = 0; + + // Endian safe: Result[i] = (PointerTy)Dest; + EE->StoreValueToMemory(PTOGV(Dest), (GenericValue*)(Result+i*PtrSize), + SBytePtr); + } + + // Null terminate it + EE->StoreValueToMemory(PTOGV(0), + (GenericValue*)(Result+InputArgv.size()*PtrSize), + SBytePtr); + return Result; +} + + +/// runStaticConstructorsDestructors - This method is used to execute all of +/// the static constructors or destructors for a module, depending on the +/// value of isDtors. +void ExecutionEngine::runStaticConstructorsDestructors(Module *module, bool isDtors) { + const char *Name = isDtors ? "llvm.global_dtors" : "llvm.global_ctors"; + + // Execute global ctors/dtors for each module in the program. + + GlobalVariable *GV = module->getNamedGlobal(Name); + + // If this global has internal linkage, or if it has a use, then it must be + // an old-style (llvmgcc3) static ctor with __main linked in and in use. If + // this is the case, don't execute any of the global ctors, __main will do + // it. + if (!GV || GV->isDeclaration() || GV->hasLocalLinkage()) return; + + // Should be an array of '{ int, void ()* }' structs. The first value is + // the init priority, which we ignore. + ConstantArray *InitList = dyn_cast<ConstantArray>(GV->getInitializer()); + if (!InitList) return; + for (unsigned i = 0, e = InitList->getNumOperands(); i != e; ++i) + if (ConstantStruct *CS = + dyn_cast<ConstantStruct>(InitList->getOperand(i))) { + if (CS->getNumOperands() != 2) return; // Not array of 2-element structs. + + Constant *FP = CS->getOperand(1); + if (FP->isNullValue()) + break; // Found a null terminator, exit. + + if (ConstantExpr *CE = dyn_cast<ConstantExpr>(FP)) + if (CE->isCast()) + FP = CE->getOperand(0); + if (Function *F = dyn_cast<Function>(FP)) { + // Execute the ctor/dtor function! + runFunction(F, std::vector<GenericValue>()); + } + } +} + +/// runStaticConstructorsDestructors - This method is used to execute all of +/// the static constructors or destructors for a program, depending on the +/// value of isDtors. +void ExecutionEngine::runStaticConstructorsDestructors(bool isDtors) { + // Execute global ctors/dtors for each module in the program. + for (unsigned m = 0, e = Modules.size(); m != e; ++m) + runStaticConstructorsDestructors(Modules[m]->getModule(), isDtors); +} + +#ifndef NDEBUG +/// isTargetNullPtr - Return whether the target pointer stored at Loc is null. +static bool isTargetNullPtr(ExecutionEngine *EE, void *Loc) { + unsigned PtrSize = EE->getTargetData()->getPointerSize(); + for (unsigned i = 0; i < PtrSize; ++i) + if (*(i + (uint8_t*)Loc)) + return false; + return true; +} +#endif + +/// runFunctionAsMain - This is a helper function which wraps runFunction to +/// handle the common task of starting up main with the specified argc, argv, +/// and envp parameters. +int ExecutionEngine::runFunctionAsMain(Function *Fn, + const std::vector<std::string> &argv, + const char * const * envp) { + std::vector<GenericValue> GVArgs; + GenericValue GVArgc; + GVArgc.IntVal = APInt(32, argv.size()); + + // Check main() type + unsigned NumArgs = Fn->getFunctionType()->getNumParams(); + const FunctionType *FTy = Fn->getFunctionType(); + const Type* PPInt8Ty = + PointerType::getUnqual(PointerType::getUnqual(Type::Int8Ty)); + switch (NumArgs) { + case 3: + if (FTy->getParamType(2) != PPInt8Ty) { + cerr << "Invalid type for third argument of main() supplied\n"; + abort(); + } + // FALLS THROUGH + case 2: + if (FTy->getParamType(1) != PPInt8Ty) { + cerr << "Invalid type for second argument of main() supplied\n"; + abort(); + } + // FALLS THROUGH + case 1: + if (FTy->getParamType(0) != Type::Int32Ty) { + cerr << "Invalid type for first argument of main() supplied\n"; + abort(); + } + // FALLS THROUGH + case 0: + if (!isa<IntegerType>(FTy->getReturnType()) && + FTy->getReturnType() != Type::VoidTy) { + cerr << "Invalid return type of main() supplied\n"; + abort(); + } + break; + default: + cerr << "Invalid number of arguments of main() supplied\n"; + abort(); + } + + if (NumArgs) { + GVArgs.push_back(GVArgc); // Arg #0 = argc. + if (NumArgs > 1) { + GVArgs.push_back(PTOGV(CreateArgv(this, argv))); // Arg #1 = argv. + assert(!isTargetNullPtr(this, GVTOP(GVArgs[1])) && + "argv[0] was null after CreateArgv"); + if (NumArgs > 2) { + std::vector<std::string> EnvVars; + for (unsigned i = 0; envp[i]; ++i) + EnvVars.push_back(envp[i]); + GVArgs.push_back(PTOGV(CreateArgv(this, EnvVars))); // Arg #2 = envp. + } + } + } + return runFunction(Fn, GVArgs).IntVal.getZExtValue(); +} + +/// If possible, create a JIT, unless the caller specifically requests an +/// Interpreter or there's an error. If even an Interpreter cannot be created, +/// NULL is returned. +/// +ExecutionEngine *ExecutionEngine::create(ModuleProvider *MP, + bool ForceInterpreter, + std::string *ErrorStr, + CodeGenOpt::Level OptLevel) { + ExecutionEngine *EE = 0; + + // Make sure we can resolve symbols in the program as well. The zero arg + // to the function tells DynamicLibrary to load the program, not a library. + if (sys::DynamicLibrary::LoadLibraryPermanently(0, ErrorStr)) + return 0; + + // Unless the interpreter was explicitly selected, try making a JIT. + if (!ForceInterpreter && JITCtor) + EE = JITCtor(MP, ErrorStr, OptLevel); + + // If we can't make a JIT, make an interpreter instead. + if (EE == 0 && InterpCtor) + EE = InterpCtor(MP, ErrorStr, OptLevel); + + return EE; +} + +ExecutionEngine *ExecutionEngine::create(Module *M) { + return create(new ExistingModuleProvider(M)); +} + +/// getPointerToGlobal - This returns the address of the specified global +/// value. This may involve code generation if it's a function. +/// +void *ExecutionEngine::getPointerToGlobal(const GlobalValue *GV) { + if (Function *F = const_cast<Function*>(dyn_cast<Function>(GV))) + return getPointerToFunction(F); + + MutexGuard locked(lock); + void *p = state.getGlobalAddressMap(locked)[GV]; + if (p) + return p; + + // Global variable might have been added since interpreter started. + if (GlobalVariable *GVar = + const_cast<GlobalVariable *>(dyn_cast<GlobalVariable>(GV))) + EmitGlobalVariable(GVar); + else + assert(0 && "Global hasn't had an address allocated yet!"); + return state.getGlobalAddressMap(locked)[GV]; +} + +/// This function converts a Constant* into a GenericValue. The interesting +/// part is if C is a ConstantExpr. +/// @brief Get a GenericValue for a Constant* +GenericValue ExecutionEngine::getConstantValue(const Constant *C) { + // If its undefined, return the garbage. + if (isa<UndefValue>(C)) + return GenericValue(); + + // If the value is a ConstantExpr + if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) { + Constant *Op0 = CE->getOperand(0); + switch (CE->getOpcode()) { + case Instruction::GetElementPtr: { + // Compute the index + GenericValue Result = getConstantValue(Op0); + SmallVector<Value*, 8> Indices(CE->op_begin()+1, CE->op_end()); + uint64_t Offset = + TD->getIndexedOffset(Op0->getType(), &Indices[0], Indices.size()); + + char* tmp = (char*) Result.PointerVal; + Result = PTOGV(tmp + Offset); + return Result; + } + case Instruction::Trunc: { + GenericValue GV = getConstantValue(Op0); + uint32_t BitWidth = cast<IntegerType>(CE->getType())->getBitWidth(); + GV.IntVal = GV.IntVal.trunc(BitWidth); + return GV; + } + case Instruction::ZExt: { + GenericValue GV = getConstantValue(Op0); + uint32_t BitWidth = cast<IntegerType>(CE->getType())->getBitWidth(); + GV.IntVal = GV.IntVal.zext(BitWidth); + return GV; + } + case Instruction::SExt: { + GenericValue GV = getConstantValue(Op0); + uint32_t BitWidth = cast<IntegerType>(CE->getType())->getBitWidth(); + GV.IntVal = GV.IntVal.sext(BitWidth); + return GV; + } + case Instruction::FPTrunc: { + // FIXME long double + GenericValue GV = getConstantValue(Op0); + GV.FloatVal = float(GV.DoubleVal); + return GV; + } + case Instruction::FPExt:{ + // FIXME long double + GenericValue GV = getConstantValue(Op0); + GV.DoubleVal = double(GV.FloatVal); + return GV; + } + case Instruction::UIToFP: { + GenericValue GV = getConstantValue(Op0); + if (CE->getType() == Type::FloatTy) + GV.FloatVal = float(GV.IntVal.roundToDouble()); + else if (CE->getType() == Type::DoubleTy) + GV.DoubleVal = GV.IntVal.roundToDouble(); + else if (CE->getType() == Type::X86_FP80Ty) { + const uint64_t zero[] = {0, 0}; + APFloat apf = APFloat(APInt(80, 2, zero)); + (void)apf.convertFromAPInt(GV.IntVal, + false, + APFloat::rmNearestTiesToEven); + GV.IntVal = apf.bitcastToAPInt(); + } + return GV; + } + case Instruction::SIToFP: { + GenericValue GV = getConstantValue(Op0); + if (CE->getType() == Type::FloatTy) + GV.FloatVal = float(GV.IntVal.signedRoundToDouble()); + else if (CE->getType() == Type::DoubleTy) + GV.DoubleVal = GV.IntVal.signedRoundToDouble(); + else if (CE->getType() == Type::X86_FP80Ty) { + const uint64_t zero[] = { 0, 0}; + APFloat apf = APFloat(APInt(80, 2, zero)); + (void)apf.convertFromAPInt(GV.IntVal, + true, + APFloat::rmNearestTiesToEven); + GV.IntVal = apf.bitcastToAPInt(); + } + return GV; + } + case Instruction::FPToUI: // double->APInt conversion handles sign + case Instruction::FPToSI: { + GenericValue GV = getConstantValue(Op0); + uint32_t BitWidth = cast<IntegerType>(CE->getType())->getBitWidth(); + if (Op0->getType() == Type::FloatTy) + GV.IntVal = APIntOps::RoundFloatToAPInt(GV.FloatVal, BitWidth); + else if (Op0->getType() == Type::DoubleTy) + GV.IntVal = APIntOps::RoundDoubleToAPInt(GV.DoubleVal, BitWidth); + else if (Op0->getType() == Type::X86_FP80Ty) { + APFloat apf = APFloat(GV.IntVal); + uint64_t v; + bool ignored; + (void)apf.convertToInteger(&v, BitWidth, + CE->getOpcode()==Instruction::FPToSI, + APFloat::rmTowardZero, &ignored); + GV.IntVal = v; // endian? + } + return GV; + } + case Instruction::PtrToInt: { + GenericValue GV = getConstantValue(Op0); + uint32_t PtrWidth = TD->getPointerSizeInBits(); + GV.IntVal = APInt(PtrWidth, uintptr_t(GV.PointerVal)); + return GV; + } + case Instruction::IntToPtr: { + GenericValue GV = getConstantValue(Op0); + uint32_t PtrWidth = TD->getPointerSizeInBits(); + if (PtrWidth != GV.IntVal.getBitWidth()) + GV.IntVal = GV.IntVal.zextOrTrunc(PtrWidth); + assert(GV.IntVal.getBitWidth() <= 64 && "Bad pointer width"); + GV.PointerVal = PointerTy(uintptr_t(GV.IntVal.getZExtValue())); + return GV; + } + case Instruction::BitCast: { + GenericValue GV = getConstantValue(Op0); + const Type* DestTy = CE->getType(); + switch (Op0->getType()->getTypeID()) { + default: assert(0 && "Invalid bitcast operand"); + case Type::IntegerTyID: + assert(DestTy->isFloatingPoint() && "invalid bitcast"); + if (DestTy == Type::FloatTy) + GV.FloatVal = GV.IntVal.bitsToFloat(); + else if (DestTy == Type::DoubleTy) + GV.DoubleVal = GV.IntVal.bitsToDouble(); + break; + case Type::FloatTyID: + assert(DestTy == Type::Int32Ty && "Invalid bitcast"); + GV.IntVal.floatToBits(GV.FloatVal); + break; + case Type::DoubleTyID: + assert(DestTy == Type::Int64Ty && "Invalid bitcast"); + GV.IntVal.doubleToBits(GV.DoubleVal); + break; + case Type::PointerTyID: + assert(isa<PointerType>(DestTy) && "Invalid bitcast"); + break; // getConstantValue(Op0) above already converted it + } + return GV; + } + case Instruction::Add: + case Instruction::Sub: + case Instruction::Mul: + case Instruction::UDiv: + case Instruction::SDiv: + case Instruction::URem: + case Instruction::SRem: + case Instruction::And: + case Instruction::Or: + case Instruction::Xor: { + GenericValue LHS = getConstantValue(Op0); + GenericValue RHS = getConstantValue(CE->getOperand(1)); + GenericValue GV; + switch (CE->getOperand(0)->getType()->getTypeID()) { + default: assert(0 && "Bad add type!"); abort(); + case Type::IntegerTyID: + switch (CE->getOpcode()) { + default: assert(0 && "Invalid integer opcode"); + case Instruction::Add: GV.IntVal = LHS.IntVal + RHS.IntVal; break; + case Instruction::Sub: GV.IntVal = LHS.IntVal - RHS.IntVal; break; + case Instruction::Mul: GV.IntVal = LHS.IntVal * RHS.IntVal; break; + case Instruction::UDiv:GV.IntVal = LHS.IntVal.udiv(RHS.IntVal); break; + case Instruction::SDiv:GV.IntVal = LHS.IntVal.sdiv(RHS.IntVal); break; + case Instruction::URem:GV.IntVal = LHS.IntVal.urem(RHS.IntVal); break; + case Instruction::SRem:GV.IntVal = LHS.IntVal.srem(RHS.IntVal); break; + case Instruction::And: GV.IntVal = LHS.IntVal & RHS.IntVal; break; + case Instruction::Or: GV.IntVal = LHS.IntVal | RHS.IntVal; break; + case Instruction::Xor: GV.IntVal = LHS.IntVal ^ RHS.IntVal; break; + } + break; + case Type::FloatTyID: + switch (CE->getOpcode()) { + default: assert(0 && "Invalid float opcode"); abort(); + case Instruction::Add: + GV.FloatVal = LHS.FloatVal + RHS.FloatVal; break; + case Instruction::Sub: + GV.FloatVal = LHS.FloatVal - RHS.FloatVal; break; + case Instruction::Mul: + GV.FloatVal = LHS.FloatVal * RHS.FloatVal; break; + case Instruction::FDiv: + GV.FloatVal = LHS.FloatVal / RHS.FloatVal; break; + case Instruction::FRem: + GV.FloatVal = ::fmodf(LHS.FloatVal,RHS.FloatVal); break; + } + break; + case Type::DoubleTyID: + switch (CE->getOpcode()) { + default: assert(0 && "Invalid double opcode"); abort(); + case Instruction::Add: + GV.DoubleVal = LHS.DoubleVal + RHS.DoubleVal; break; + case Instruction::Sub: + GV.DoubleVal = LHS.DoubleVal - RHS.DoubleVal; break; + case Instruction::Mul: + GV.DoubleVal = LHS.DoubleVal * RHS.DoubleVal; break; + case Instruction::FDiv: + GV.DoubleVal = LHS.DoubleVal / RHS.DoubleVal; break; + case Instruction::FRem: + GV.DoubleVal = ::fmod(LHS.DoubleVal,RHS.DoubleVal); break; + } + break; + case Type::X86_FP80TyID: + case Type::PPC_FP128TyID: + case Type::FP128TyID: { + APFloat apfLHS = APFloat(LHS.IntVal); + switch (CE->getOpcode()) { + default: assert(0 && "Invalid long double opcode"); abort(); + case Instruction::Add: + apfLHS.add(APFloat(RHS.IntVal), APFloat::rmNearestTiesToEven); + GV.IntVal = apfLHS.bitcastToAPInt(); + break; + case Instruction::Sub: + apfLHS.subtract(APFloat(RHS.IntVal), APFloat::rmNearestTiesToEven); + GV.IntVal = apfLHS.bitcastToAPInt(); + break; + case Instruction::Mul: + apfLHS.multiply(APFloat(RHS.IntVal), APFloat::rmNearestTiesToEven); + GV.IntVal = apfLHS.bitcastToAPInt(); + break; + case Instruction::FDiv: + apfLHS.divide(APFloat(RHS.IntVal), APFloat::rmNearestTiesToEven); + GV.IntVal = apfLHS.bitcastToAPInt(); + break; + case Instruction::FRem: + apfLHS.mod(APFloat(RHS.IntVal), APFloat::rmNearestTiesToEven); + GV.IntVal = apfLHS.bitcastToAPInt(); + break; + } + } + break; + } + return GV; + } + default: + break; + } + cerr << "ConstantExpr not handled: " << *CE << "\n"; + abort(); + } + + GenericValue Result; + switch (C->getType()->getTypeID()) { + case Type::FloatTyID: + Result.FloatVal = cast<ConstantFP>(C)->getValueAPF().convertToFloat(); + break; + case Type::DoubleTyID: + Result.DoubleVal = cast<ConstantFP>(C)->getValueAPF().convertToDouble(); + break; + case Type::X86_FP80TyID: + case Type::FP128TyID: + case Type::PPC_FP128TyID: + Result.IntVal = cast <ConstantFP>(C)->getValueAPF().bitcastToAPInt(); + break; + case Type::IntegerTyID: + Result.IntVal = cast<ConstantInt>(C)->getValue(); + break; + case Type::PointerTyID: + if (isa<ConstantPointerNull>(C)) + Result.PointerVal = 0; + else if (const Function *F = dyn_cast<Function>(C)) + Result = PTOGV(getPointerToFunctionOrStub(const_cast<Function*>(F))); + else if (const GlobalVariable* GV = dyn_cast<GlobalVariable>(C)) + Result = PTOGV(getOrEmitGlobalVariable(const_cast<GlobalVariable*>(GV))); + else + assert(0 && "Unknown constant pointer type!"); + break; + default: + cerr << "ERROR: Constant unimplemented for type: " << *C->getType() << "\n"; + abort(); + } + return Result; +} + +/// StoreIntToMemory - Fills the StoreBytes bytes of memory starting from Dst +/// with the integer held in IntVal. +static void StoreIntToMemory(const APInt &IntVal, uint8_t *Dst, + unsigned StoreBytes) { + assert((IntVal.getBitWidth()+7)/8 >= StoreBytes && "Integer too small!"); + uint8_t *Src = (uint8_t *)IntVal.getRawData(); + + if (sys::isLittleEndianHost()) + // Little-endian host - the source is ordered from LSB to MSB. Order the + // destination from LSB to MSB: Do a straight copy. + memcpy(Dst, Src, StoreBytes); + else { + // Big-endian host - the source is an array of 64 bit words ordered from + // LSW to MSW. Each word is ordered from MSB to LSB. Order the destination + // from MSB to LSB: Reverse the word order, but not the bytes in a word. + while (StoreBytes > sizeof(uint64_t)) { + StoreBytes -= sizeof(uint64_t); + // May not be aligned so use memcpy. + memcpy(Dst + StoreBytes, Src, sizeof(uint64_t)); + Src += sizeof(uint64_t); + } + + memcpy(Dst, Src + sizeof(uint64_t) - StoreBytes, StoreBytes); + } +} + +/// StoreValueToMemory - Stores the data in Val of type Ty at address Ptr. Ptr +/// is the address of the memory at which to store Val, cast to GenericValue *. +/// It is not a pointer to a GenericValue containing the address at which to +/// store Val. +void ExecutionEngine::StoreValueToMemory(const GenericValue &Val, + GenericValue *Ptr, const Type *Ty) { + const unsigned StoreBytes = getTargetData()->getTypeStoreSize(Ty); + + switch (Ty->getTypeID()) { + case Type::IntegerTyID: + StoreIntToMemory(Val.IntVal, (uint8_t*)Ptr, StoreBytes); + break; + case Type::FloatTyID: + *((float*)Ptr) = Val.FloatVal; + break; + case Type::DoubleTyID: + *((double*)Ptr) = Val.DoubleVal; + break; + case Type::X86_FP80TyID: + memcpy(Ptr, Val.IntVal.getRawData(), 10); + break; + case Type::PointerTyID: + // Ensure 64 bit target pointers are fully initialized on 32 bit hosts. + if (StoreBytes != sizeof(PointerTy)) + memset(Ptr, 0, StoreBytes); + + *((PointerTy*)Ptr) = Val.PointerVal; + break; + default: + cerr << "Cannot store value of type " << *Ty << "!\n"; + } + + if (sys::isLittleEndianHost() != getTargetData()->isLittleEndian()) + // Host and target are different endian - reverse the stored bytes. + std::reverse((uint8_t*)Ptr, StoreBytes + (uint8_t*)Ptr); +} + +/// LoadIntFromMemory - Loads the integer stored in the LoadBytes bytes starting +/// from Src into IntVal, which is assumed to be wide enough and to hold zero. +static void LoadIntFromMemory(APInt &IntVal, uint8_t *Src, unsigned LoadBytes) { + assert((IntVal.getBitWidth()+7)/8 >= LoadBytes && "Integer too small!"); + uint8_t *Dst = (uint8_t *)IntVal.getRawData(); + + if (sys::isLittleEndianHost()) + // Little-endian host - the destination must be ordered from LSB to MSB. + // The source is ordered from LSB to MSB: Do a straight copy. + memcpy(Dst, Src, LoadBytes); + else { + // Big-endian - the destination is an array of 64 bit words ordered from + // LSW to MSW. Each word must be ordered from MSB to LSB. The source is + // ordered from MSB to LSB: Reverse the word order, but not the bytes in + // a word. + while (LoadBytes > sizeof(uint64_t)) { + LoadBytes -= sizeof(uint64_t); + // May not be aligned so use memcpy. + memcpy(Dst, Src + LoadBytes, sizeof(uint64_t)); + Dst += sizeof(uint64_t); + } + + memcpy(Dst + sizeof(uint64_t) - LoadBytes, Src, LoadBytes); + } +} + +/// FIXME: document +/// +void ExecutionEngine::LoadValueFromMemory(GenericValue &Result, + GenericValue *Ptr, + const Type *Ty) { + const unsigned LoadBytes = getTargetData()->getTypeStoreSize(Ty); + + if (sys::isLittleEndianHost() != getTargetData()->isLittleEndian()) { + // Host and target are different endian - reverse copy the stored + // bytes into a buffer, and load from that. + uint8_t *Src = (uint8_t*)Ptr; + uint8_t *Buf = (uint8_t*)alloca(LoadBytes); + std::reverse_copy(Src, Src + LoadBytes, Buf); + Ptr = (GenericValue*)Buf; + } + + switch (Ty->getTypeID()) { + case Type::IntegerTyID: + // An APInt with all words initially zero. + Result.IntVal = APInt(cast<IntegerType>(Ty)->getBitWidth(), 0); + LoadIntFromMemory(Result.IntVal, (uint8_t*)Ptr, LoadBytes); + break; + case Type::FloatTyID: + Result.FloatVal = *((float*)Ptr); + break; + case Type::DoubleTyID: + Result.DoubleVal = *((double*)Ptr); + break; + case Type::PointerTyID: + Result.PointerVal = *((PointerTy*)Ptr); + break; + case Type::X86_FP80TyID: { + // This is endian dependent, but it will only work on x86 anyway. + // FIXME: Will not trap if loading a signaling NaN. + uint64_t y[2]; + memcpy(y, Ptr, 10); + Result.IntVal = APInt(80, 2, y); + break; + } + default: + cerr << "Cannot load value of type " << *Ty << "!\n"; + abort(); + } +} + +// InitializeMemory - Recursive function to apply a Constant value into the +// specified memory location... +// +void ExecutionEngine::InitializeMemory(const Constant *Init, void *Addr) { + DOUT << "JIT: Initializing " << Addr << " "; + DEBUG(Init->dump()); + if (isa<UndefValue>(Init)) { + return; + } else if (const ConstantVector *CP = dyn_cast<ConstantVector>(Init)) { + unsigned ElementSize = + getTargetData()->getTypeAllocSize(CP->getType()->getElementType()); + for (unsigned i = 0, e = CP->getNumOperands(); i != e; ++i) + InitializeMemory(CP->getOperand(i), (char*)Addr+i*ElementSize); + return; + } else if (isa<ConstantAggregateZero>(Init)) { + memset(Addr, 0, (size_t)getTargetData()->getTypeAllocSize(Init->getType())); + return; + } else if (const ConstantArray *CPA = dyn_cast<ConstantArray>(Init)) { + unsigned ElementSize = + getTargetData()->getTypeAllocSize(CPA->getType()->getElementType()); + for (unsigned i = 0, e = CPA->getNumOperands(); i != e; ++i) + InitializeMemory(CPA->getOperand(i), (char*)Addr+i*ElementSize); + return; + } else if (const ConstantStruct *CPS = dyn_cast<ConstantStruct>(Init)) { + const StructLayout *SL = + getTargetData()->getStructLayout(cast<StructType>(CPS->getType())); + for (unsigned i = 0, e = CPS->getNumOperands(); i != e; ++i) + InitializeMemory(CPS->getOperand(i), (char*)Addr+SL->getElementOffset(i)); + return; + } else if (Init->getType()->isFirstClassType()) { + GenericValue Val = getConstantValue(Init); + StoreValueToMemory(Val, (GenericValue*)Addr, Init->getType()); + return; + } + + cerr << "Bad Type: " << *Init->getType() << "\n"; + assert(0 && "Unknown constant type to initialize memory with!"); +} + +/// EmitGlobals - Emit all of the global variables to memory, storing their +/// addresses into GlobalAddress. This must make sure to copy the contents of +/// their initializers into the memory. +/// +void ExecutionEngine::emitGlobals() { + + // Loop over all of the global variables in the program, allocating the memory + // to hold them. If there is more than one module, do a prepass over globals + // to figure out how the different modules should link together. + // + std::map<std::pair<std::string, const Type*>, + const GlobalValue*> LinkedGlobalsMap; + + if (Modules.size() != 1) { + for (unsigned m = 0, e = Modules.size(); m != e; ++m) { + Module &M = *Modules[m]->getModule(); + for (Module::const_global_iterator I = M.global_begin(), + E = M.global_end(); I != E; ++I) { + const GlobalValue *GV = I; + if (GV->hasLocalLinkage() || GV->isDeclaration() || + GV->hasAppendingLinkage() || !GV->hasName()) + continue;// Ignore external globals and globals with internal linkage. + + const GlobalValue *&GVEntry = + LinkedGlobalsMap[std::make_pair(GV->getName(), GV->getType())]; + + // If this is the first time we've seen this global, it is the canonical + // version. + if (!GVEntry) { + GVEntry = GV; + continue; + } + + // If the existing global is strong, never replace it. + if (GVEntry->hasExternalLinkage() || + GVEntry->hasDLLImportLinkage() || + GVEntry->hasDLLExportLinkage()) + continue; + + // Otherwise, we know it's linkonce/weak, replace it if this is a strong + // symbol. FIXME is this right for common? + if (GV->hasExternalLinkage() || GVEntry->hasExternalWeakLinkage()) + GVEntry = GV; + } + } + } + + std::vector<const GlobalValue*> NonCanonicalGlobals; + for (unsigned m = 0, e = Modules.size(); m != e; ++m) { + Module &M = *Modules[m]->getModule(); + for (Module::const_global_iterator I = M.global_begin(), E = M.global_end(); + I != E; ++I) { + // In the multi-module case, see what this global maps to. + if (!LinkedGlobalsMap.empty()) { + if (const GlobalValue *GVEntry = + LinkedGlobalsMap[std::make_pair(I->getName(), I->getType())]) { + // If something else is the canonical global, ignore this one. + if (GVEntry != &*I) { + NonCanonicalGlobals.push_back(I); + continue; + } + } + } + + if (!I->isDeclaration()) { + addGlobalMapping(I, getMemoryForGV(I)); + } else { + // External variable reference. Try to use the dynamic loader to + // get a pointer to it. + if (void *SymAddr = + sys::DynamicLibrary::SearchForAddressOfSymbol(I->getName().c_str())) + addGlobalMapping(I, SymAddr); + else { + cerr << "Could not resolve external global address: " + << I->getName() << "\n"; + abort(); + } + } + } + + // If there are multiple modules, map the non-canonical globals to their + // canonical location. + if (!NonCanonicalGlobals.empty()) { + for (unsigned i = 0, e = NonCanonicalGlobals.size(); i != e; ++i) { + const GlobalValue *GV = NonCanonicalGlobals[i]; + const GlobalValue *CGV = + LinkedGlobalsMap[std::make_pair(GV->getName(), GV->getType())]; + void *Ptr = getPointerToGlobalIfAvailable(CGV); + assert(Ptr && "Canonical global wasn't codegen'd!"); + addGlobalMapping(GV, Ptr); + } + } + + // Now that all of the globals are set up in memory, loop through them all + // and initialize their contents. + for (Module::const_global_iterator I = M.global_begin(), E = M.global_end(); + I != E; ++I) { + if (!I->isDeclaration()) { + if (!LinkedGlobalsMap.empty()) { + if (const GlobalValue *GVEntry = + LinkedGlobalsMap[std::make_pair(I->getName(), I->getType())]) + if (GVEntry != &*I) // Not the canonical variable. + continue; + } + EmitGlobalVariable(I); + } + } + } +} + +// EmitGlobalVariable - This method emits the specified global variable to the +// address specified in GlobalAddresses, or allocates new memory if it's not +// already in the map. +void ExecutionEngine::EmitGlobalVariable(const GlobalVariable *GV) { + void *GA = getPointerToGlobalIfAvailable(GV); + + if (GA == 0) { + // If it's not already specified, allocate memory for the global. + GA = getMemoryForGV(GV); + addGlobalMapping(GV, GA); + } + + // Don't initialize if it's thread local, let the client do it. + if (!GV->isThreadLocal()) + InitializeMemory(GV->getInitializer(), GA); + + const Type *ElTy = GV->getType()->getElementType(); + size_t GVSize = (size_t)getTargetData()->getTypeAllocSize(ElTy); + NumInitBytes += (unsigned)GVSize; + ++NumGlobals; +} diff --git a/lib/ExecutionEngine/ExecutionEngineBindings.cpp b/lib/ExecutionEngine/ExecutionEngineBindings.cpp new file mode 100644 index 0000000000000..83397a586d537 --- /dev/null +++ b/lib/ExecutionEngine/ExecutionEngineBindings.cpp @@ -0,0 +1,206 @@ +//===-- ExecutionEngineBindings.cpp - C bindings for EEs ------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines the C bindings for the ExecutionEngine library. +// +//===----------------------------------------------------------------------===// + +#define DEBUG_TYPE "jit" +#include "llvm-c/ExecutionEngine.h" +#include "llvm/ExecutionEngine/GenericValue.h" +#include "llvm/ExecutionEngine/ExecutionEngine.h" +#include <cstring> + +using namespace llvm; + +/*===-- Operations on generic values --------------------------------------===*/ + +LLVMGenericValueRef LLVMCreateGenericValueOfInt(LLVMTypeRef Ty, + unsigned long long N, + int IsSigned) { + GenericValue *GenVal = new GenericValue(); + GenVal->IntVal = APInt(unwrap<IntegerType>(Ty)->getBitWidth(), N, IsSigned); + return wrap(GenVal); +} + +LLVMGenericValueRef LLVMCreateGenericValueOfPointer(void *P) { + GenericValue *GenVal = new GenericValue(); + GenVal->PointerVal = P; + return wrap(GenVal); +} + +LLVMGenericValueRef LLVMCreateGenericValueOfFloat(LLVMTypeRef TyRef, double N) { + GenericValue *GenVal = new GenericValue(); + switch (unwrap(TyRef)->getTypeID()) { + case Type::FloatTyID: + GenVal->FloatVal = N; + break; + case Type::DoubleTyID: + GenVal->DoubleVal = N; + break; + default: + assert(0 && "LLVMGenericValueToFloat supports only float and double."); + break; + } + return wrap(GenVal); +} + +unsigned LLVMGenericValueIntWidth(LLVMGenericValueRef GenValRef) { + return unwrap(GenValRef)->IntVal.getBitWidth(); +} + +unsigned long long LLVMGenericValueToInt(LLVMGenericValueRef GenValRef, + int IsSigned) { + GenericValue *GenVal = unwrap(GenValRef); + if (IsSigned) + return GenVal->IntVal.getSExtValue(); + else + return GenVal->IntVal.getZExtValue(); +} + +void *LLVMGenericValueToPointer(LLVMGenericValueRef GenVal) { + return unwrap(GenVal)->PointerVal; +} + +double LLVMGenericValueToFloat(LLVMTypeRef TyRef, LLVMGenericValueRef GenVal) { + switch (unwrap(TyRef)->getTypeID()) { + case Type::FloatTyID: + return unwrap(GenVal)->FloatVal; + case Type::DoubleTyID: + return unwrap(GenVal)->DoubleVal; + default: + assert(0 && "LLVMGenericValueToFloat supports only float and double."); + break; + } + return 0; // Not reached +} + +void LLVMDisposeGenericValue(LLVMGenericValueRef GenVal) { + delete unwrap(GenVal); +} + +/*===-- Operations on execution engines -----------------------------------===*/ + +int LLVMCreateExecutionEngine(LLVMExecutionEngineRef *OutEE, + LLVMModuleProviderRef MP, + char **OutError) { + std::string Error; + if (ExecutionEngine *EE = ExecutionEngine::create(unwrap(MP), false, &Error)){ + *OutEE = wrap(EE); + return 0; + } + *OutError = strdup(Error.c_str()); + return 1; +} + +int LLVMCreateInterpreter(LLVMExecutionEngineRef *OutInterp, + LLVMModuleProviderRef MP, + char **OutError) { + std::string Error; + if (ExecutionEngine *Interp = + ExecutionEngine::create(unwrap(MP), true, &Error)) { + *OutInterp = wrap(Interp); + return 0; + } + *OutError = strdup(Error.c_str()); + return 1; +} + +int LLVMCreateJITCompiler(LLVMExecutionEngineRef *OutJIT, + LLVMModuleProviderRef MP, + unsigned OptLevel, + char **OutError) { + std::string Error; + if (ExecutionEngine *JIT = + ExecutionEngine::createJIT(unwrap(MP), &Error, 0, + (CodeGenOpt::Level)OptLevel)) { + *OutJIT = wrap(JIT); + return 0; + } + *OutError = strdup(Error.c_str()); + return 1; +} + +void LLVMDisposeExecutionEngine(LLVMExecutionEngineRef EE) { + delete unwrap(EE); +} + +void LLVMRunStaticConstructors(LLVMExecutionEngineRef EE) { + unwrap(EE)->runStaticConstructorsDestructors(false); +} + +void LLVMRunStaticDestructors(LLVMExecutionEngineRef EE) { + unwrap(EE)->runStaticConstructorsDestructors(true); +} + +int LLVMRunFunctionAsMain(LLVMExecutionEngineRef EE, LLVMValueRef F, + unsigned ArgC, const char * const *ArgV, + const char * const *EnvP) { + std::vector<std::string> ArgVec; + for (unsigned I = 0; I != ArgC; ++I) + ArgVec.push_back(ArgV[I]); + + return unwrap(EE)->runFunctionAsMain(unwrap<Function>(F), ArgVec, EnvP); +} + +LLVMGenericValueRef LLVMRunFunction(LLVMExecutionEngineRef EE, LLVMValueRef F, + unsigned NumArgs, + LLVMGenericValueRef *Args) { + std::vector<GenericValue> ArgVec; + ArgVec.reserve(NumArgs); + for (unsigned I = 0; I != NumArgs; ++I) + ArgVec.push_back(*unwrap(Args[I])); + + GenericValue *Result = new GenericValue(); + *Result = unwrap(EE)->runFunction(unwrap<Function>(F), ArgVec); + return wrap(Result); +} + +void LLVMFreeMachineCodeForFunction(LLVMExecutionEngineRef EE, LLVMValueRef F) { + unwrap(EE)->freeMachineCodeForFunction(unwrap<Function>(F)); +} + +void LLVMAddModuleProvider(LLVMExecutionEngineRef EE, LLVMModuleProviderRef MP){ + unwrap(EE)->addModuleProvider(unwrap(MP)); +} + +int LLVMRemoveModuleProvider(LLVMExecutionEngineRef EE, + LLVMModuleProviderRef MP, + LLVMModuleRef *OutMod, char **OutError) { + std::string Error; + if (Module *Gone = unwrap(EE)->removeModuleProvider(unwrap(MP), &Error)) { + *OutMod = wrap(Gone); + return 0; + } + if (OutError) + *OutError = strdup(Error.c_str()); + return 1; +} + +int LLVMFindFunction(LLVMExecutionEngineRef EE, const char *Name, + LLVMValueRef *OutFn) { + if (Function *F = unwrap(EE)->FindFunctionNamed(Name)) { + *OutFn = wrap(F); + return 0; + } + return 1; +} + +LLVMTargetDataRef LLVMGetExecutionEngineTargetData(LLVMExecutionEngineRef EE) { + return wrap(unwrap(EE)->getTargetData()); +} + +void LLVMAddGlobalMapping(LLVMExecutionEngineRef EE, LLVMValueRef Global, + void* Addr) { + unwrap(EE)->addGlobalMapping(unwrap<GlobalValue>(Global), Addr); +} + +void *LLVMGetPointerToGlobal(LLVMExecutionEngineRef EE, LLVMValueRef Global) { + return unwrap(EE)->getPointerToGlobal(unwrap<GlobalValue>(Global)); +} diff --git a/lib/ExecutionEngine/Interpreter/CMakeLists.txt b/lib/ExecutionEngine/Interpreter/CMakeLists.txt new file mode 100644 index 0000000000000..626e804e78e68 --- /dev/null +++ b/lib/ExecutionEngine/Interpreter/CMakeLists.txt @@ -0,0 +1,5 @@ +add_partially_linked_object(LLVMInterpreter + Execution.cpp + ExternalFunctions.cpp + Interpreter.cpp + ) diff --git a/lib/ExecutionEngine/Interpreter/Execution.cpp b/lib/ExecutionEngine/Interpreter/Execution.cpp new file mode 100644 index 0000000000000..765fed248f988 --- /dev/null +++ b/lib/ExecutionEngine/Interpreter/Execution.cpp @@ -0,0 +1,1382 @@ +//===-- Execution.cpp - Implement code to simulate the program ------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file contains the actual instruction interpreter. +// +//===----------------------------------------------------------------------===// + +#define DEBUG_TYPE "interpreter" +#include "Interpreter.h" +#include "llvm/Constants.h" +#include "llvm/DerivedTypes.h" +#include "llvm/Instructions.h" +#include "llvm/CodeGen/IntrinsicLowering.h" +#include "llvm/Support/GetElementPtrTypeIterator.h" +#include "llvm/ADT/APInt.h" +#include "llvm/ADT/Statistic.h" +#include "llvm/Support/CommandLine.h" +#include "llvm/Support/Debug.h" +#include "llvm/Support/MathExtras.h" +#include <algorithm> +#include <cmath> +#include <cstring> +using namespace llvm; + +STATISTIC(NumDynamicInsts, "Number of dynamic instructions executed"); +static Interpreter *TheEE = 0; + +static cl::opt<bool> PrintVolatile("interpreter-print-volatile", cl::Hidden, + cl::desc("make the interpreter print every volatile load and store")); + +//===----------------------------------------------------------------------===// +// Various Helper Functions +//===----------------------------------------------------------------------===// + +static inline uint64_t doSignExtension(uint64_t Val, const IntegerType* ITy) { + // Determine if the value is signed or not + bool isSigned = (Val & (1 << (ITy->getBitWidth()-1))) != 0; + // If its signed, extend the sign bits + if (isSigned) + Val |= ~ITy->getBitMask(); + return Val; +} + +static void SetValue(Value *V, GenericValue Val, ExecutionContext &SF) { + SF.Values[V] = Val; +} + +void Interpreter::initializeExecutionEngine() { + TheEE = this; +} + +//===----------------------------------------------------------------------===// +// Binary Instruction Implementations +//===----------------------------------------------------------------------===// + +#define IMPLEMENT_BINARY_OPERATOR(OP, TY) \ + case Type::TY##TyID: \ + Dest.TY##Val = Src1.TY##Val OP Src2.TY##Val; \ + break + +#define IMPLEMENT_INTEGER_BINOP1(OP, TY) \ + case Type::IntegerTyID: { \ + Dest.IntVal = Src1.IntVal OP Src2.IntVal; \ + break; \ + } + + +static void executeAddInst(GenericValue &Dest, GenericValue Src1, + GenericValue Src2, const Type *Ty) { + switch (Ty->getTypeID()) { + IMPLEMENT_INTEGER_BINOP1(+, Ty); + IMPLEMENT_BINARY_OPERATOR(+, Float); + IMPLEMENT_BINARY_OPERATOR(+, Double); + default: + cerr << "Unhandled type for Add instruction: " << *Ty << "\n"; + abort(); + } +} + +static void executeSubInst(GenericValue &Dest, GenericValue Src1, + GenericValue Src2, const Type *Ty) { + switch (Ty->getTypeID()) { + IMPLEMENT_INTEGER_BINOP1(-, Ty); + IMPLEMENT_BINARY_OPERATOR(-, Float); + IMPLEMENT_BINARY_OPERATOR(-, Double); + default: + cerr << "Unhandled type for Sub instruction: " << *Ty << "\n"; + abort(); + } +} + +static void executeMulInst(GenericValue &Dest, GenericValue Src1, + GenericValue Src2, const Type *Ty) { + switch (Ty->getTypeID()) { + IMPLEMENT_INTEGER_BINOP1(*, Ty); + IMPLEMENT_BINARY_OPERATOR(*, Float); + IMPLEMENT_BINARY_OPERATOR(*, Double); + default: + cerr << "Unhandled type for Mul instruction: " << *Ty << "\n"; + abort(); + } +} + +static void executeFDivInst(GenericValue &Dest, GenericValue Src1, + GenericValue Src2, const Type *Ty) { + switch (Ty->getTypeID()) { + IMPLEMENT_BINARY_OPERATOR(/, Float); + IMPLEMENT_BINARY_OPERATOR(/, Double); + default: + cerr << "Unhandled type for FDiv instruction: " << *Ty << "\n"; + abort(); + } +} + +static void executeFRemInst(GenericValue &Dest, GenericValue Src1, + GenericValue Src2, const Type *Ty) { + switch (Ty->getTypeID()) { + case Type::FloatTyID: + Dest.FloatVal = fmod(Src1.FloatVal, Src2.FloatVal); + break; + case Type::DoubleTyID: + Dest.DoubleVal = fmod(Src1.DoubleVal, Src2.DoubleVal); + break; + default: + cerr << "Unhandled type for Rem instruction: " << *Ty << "\n"; + abort(); + } +} + +#define IMPLEMENT_INTEGER_ICMP(OP, TY) \ + case Type::IntegerTyID: \ + Dest.IntVal = APInt(1,Src1.IntVal.OP(Src2.IntVal)); \ + break; + +// Handle pointers specially because they must be compared with only as much +// width as the host has. We _do not_ want to be comparing 64 bit values when +// running on a 32-bit target, otherwise the upper 32 bits might mess up +// comparisons if they contain garbage. +#define IMPLEMENT_POINTER_ICMP(OP) \ + case Type::PointerTyID: \ + Dest.IntVal = APInt(1,(void*)(intptr_t)Src1.PointerVal OP \ + (void*)(intptr_t)Src2.PointerVal); \ + break; + +static GenericValue executeICMP_EQ(GenericValue Src1, GenericValue Src2, + const Type *Ty) { + GenericValue Dest; + switch (Ty->getTypeID()) { + IMPLEMENT_INTEGER_ICMP(eq,Ty); + IMPLEMENT_POINTER_ICMP(==); + default: + cerr << "Unhandled type for ICMP_EQ predicate: " << *Ty << "\n"; + abort(); + } + return Dest; +} + +static GenericValue executeICMP_NE(GenericValue Src1, GenericValue Src2, + const Type *Ty) { + GenericValue Dest; + switch (Ty->getTypeID()) { + IMPLEMENT_INTEGER_ICMP(ne,Ty); + IMPLEMENT_POINTER_ICMP(!=); + default: + cerr << "Unhandled type for ICMP_NE predicate: " << *Ty << "\n"; + abort(); + } + return Dest; +} + +static GenericValue executeICMP_ULT(GenericValue Src1, GenericValue Src2, + const Type *Ty) { + GenericValue Dest; + switch (Ty->getTypeID()) { + IMPLEMENT_INTEGER_ICMP(ult,Ty); + IMPLEMENT_POINTER_ICMP(<); + default: + cerr << "Unhandled type for ICMP_ULT predicate: " << *Ty << "\n"; + abort(); + } + return Dest; +} + +static GenericValue executeICMP_SLT(GenericValue Src1, GenericValue Src2, + const Type *Ty) { + GenericValue Dest; + switch (Ty->getTypeID()) { + IMPLEMENT_INTEGER_ICMP(slt,Ty); + IMPLEMENT_POINTER_ICMP(<); + default: + cerr << "Unhandled type for ICMP_SLT predicate: " << *Ty << "\n"; + abort(); + } + return Dest; +} + +static GenericValue executeICMP_UGT(GenericValue Src1, GenericValue Src2, + const Type *Ty) { + GenericValue Dest; + switch (Ty->getTypeID()) { + IMPLEMENT_INTEGER_ICMP(ugt,Ty); + IMPLEMENT_POINTER_ICMP(>); + default: + cerr << "Unhandled type for ICMP_UGT predicate: " << *Ty << "\n"; + abort(); + } + return Dest; +} + +static GenericValue executeICMP_SGT(GenericValue Src1, GenericValue Src2, + const Type *Ty) { + GenericValue Dest; + switch (Ty->getTypeID()) { + IMPLEMENT_INTEGER_ICMP(sgt,Ty); + IMPLEMENT_POINTER_ICMP(>); + default: + cerr << "Unhandled type for ICMP_SGT predicate: " << *Ty << "\n"; + abort(); + } + return Dest; +} + +static GenericValue executeICMP_ULE(GenericValue Src1, GenericValue Src2, + const Type *Ty) { + GenericValue Dest; + switch (Ty->getTypeID()) { + IMPLEMENT_INTEGER_ICMP(ule,Ty); + IMPLEMENT_POINTER_ICMP(<=); + default: + cerr << "Unhandled type for ICMP_ULE predicate: " << *Ty << "\n"; + abort(); + } + return Dest; +} + +static GenericValue executeICMP_SLE(GenericValue Src1, GenericValue Src2, + const Type *Ty) { + GenericValue Dest; + switch (Ty->getTypeID()) { + IMPLEMENT_INTEGER_ICMP(sle,Ty); + IMPLEMENT_POINTER_ICMP(<=); + default: + cerr << "Unhandled type for ICMP_SLE predicate: " << *Ty << "\n"; + abort(); + } + return Dest; +} + +static GenericValue executeICMP_UGE(GenericValue Src1, GenericValue Src2, + const Type *Ty) { + GenericValue Dest; + switch (Ty->getTypeID()) { + IMPLEMENT_INTEGER_ICMP(uge,Ty); + IMPLEMENT_POINTER_ICMP(>=); + default: + cerr << "Unhandled type for ICMP_UGE predicate: " << *Ty << "\n"; + abort(); + } + return Dest; +} + +static GenericValue executeICMP_SGE(GenericValue Src1, GenericValue Src2, + const Type *Ty) { + GenericValue Dest; + switch (Ty->getTypeID()) { + IMPLEMENT_INTEGER_ICMP(sge,Ty); + IMPLEMENT_POINTER_ICMP(>=); + default: + cerr << "Unhandled type for ICMP_SGE predicate: " << *Ty << "\n"; + abort(); + } + return Dest; +} + +void Interpreter::visitICmpInst(ICmpInst &I) { + ExecutionContext &SF = ECStack.back(); + const Type *Ty = I.getOperand(0)->getType(); + GenericValue Src1 = getOperandValue(I.getOperand(0), SF); + GenericValue Src2 = getOperandValue(I.getOperand(1), SF); + GenericValue R; // Result + + switch (I.getPredicate()) { + case ICmpInst::ICMP_EQ: R = executeICMP_EQ(Src1, Src2, Ty); break; + case ICmpInst::ICMP_NE: R = executeICMP_NE(Src1, Src2, Ty); break; + case ICmpInst::ICMP_ULT: R = executeICMP_ULT(Src1, Src2, Ty); break; + case ICmpInst::ICMP_SLT: R = executeICMP_SLT(Src1, Src2, Ty); break; + case ICmpInst::ICMP_UGT: R = executeICMP_UGT(Src1, Src2, Ty); break; + case ICmpInst::ICMP_SGT: R = executeICMP_SGT(Src1, Src2, Ty); break; + case ICmpInst::ICMP_ULE: R = executeICMP_ULE(Src1, Src2, Ty); break; + case ICmpInst::ICMP_SLE: R = executeICMP_SLE(Src1, Src2, Ty); break; + case ICmpInst::ICMP_UGE: R = executeICMP_UGE(Src1, Src2, Ty); break; + case ICmpInst::ICMP_SGE: R = executeICMP_SGE(Src1, Src2, Ty); break; + default: + cerr << "Don't know how to handle this ICmp predicate!\n-->" << I; + abort(); + } + + SetValue(&I, R, SF); +} + +#define IMPLEMENT_FCMP(OP, TY) \ + case Type::TY##TyID: \ + Dest.IntVal = APInt(1,Src1.TY##Val OP Src2.TY##Val); \ + break + +static GenericValue executeFCMP_OEQ(GenericValue Src1, GenericValue Src2, + const Type *Ty) { + GenericValue Dest; + switch (Ty->getTypeID()) { + IMPLEMENT_FCMP(==, Float); + IMPLEMENT_FCMP(==, Double); + default: + cerr << "Unhandled type for FCmp EQ instruction: " << *Ty << "\n"; + abort(); + } + return Dest; +} + +static GenericValue executeFCMP_ONE(GenericValue Src1, GenericValue Src2, + const Type *Ty) { + GenericValue Dest; + switch (Ty->getTypeID()) { + IMPLEMENT_FCMP(!=, Float); + IMPLEMENT_FCMP(!=, Double); + + default: + cerr << "Unhandled type for FCmp NE instruction: " << *Ty << "\n"; + abort(); + } + return Dest; +} + +static GenericValue executeFCMP_OLE(GenericValue Src1, GenericValue Src2, + const Type *Ty) { + GenericValue Dest; + switch (Ty->getTypeID()) { + IMPLEMENT_FCMP(<=, Float); + IMPLEMENT_FCMP(<=, Double); + default: + cerr << "Unhandled type for FCmp LE instruction: " << *Ty << "\n"; + abort(); + } + return Dest; +} + +static GenericValue executeFCMP_OGE(GenericValue Src1, GenericValue Src2, + const Type *Ty) { + GenericValue Dest; + switch (Ty->getTypeID()) { + IMPLEMENT_FCMP(>=, Float); + IMPLEMENT_FCMP(>=, Double); + default: + cerr << "Unhandled type for FCmp GE instruction: " << *Ty << "\n"; + abort(); + } + return Dest; +} + +static GenericValue executeFCMP_OLT(GenericValue Src1, GenericValue Src2, + const Type *Ty) { + GenericValue Dest; + switch (Ty->getTypeID()) { + IMPLEMENT_FCMP(<, Float); + IMPLEMENT_FCMP(<, Double); + default: + cerr << "Unhandled type for FCmp LT instruction: " << *Ty << "\n"; + abort(); + } + return Dest; +} + +static GenericValue executeFCMP_OGT(GenericValue Src1, GenericValue Src2, + const Type *Ty) { + GenericValue Dest; + switch (Ty->getTypeID()) { + IMPLEMENT_FCMP(>, Float); + IMPLEMENT_FCMP(>, Double); + default: + cerr << "Unhandled type for FCmp GT instruction: " << *Ty << "\n"; + abort(); + } + return Dest; +} + +#define IMPLEMENT_UNORDERED(TY, X,Y) \ + if (TY == Type::FloatTy) { \ + if (X.FloatVal != X.FloatVal || Y.FloatVal != Y.FloatVal) { \ + Dest.IntVal = APInt(1,true); \ + return Dest; \ + } \ + } else if (X.DoubleVal != X.DoubleVal || Y.DoubleVal != Y.DoubleVal) { \ + Dest.IntVal = APInt(1,true); \ + return Dest; \ + } + + +static GenericValue executeFCMP_UEQ(GenericValue Src1, GenericValue Src2, + const Type *Ty) { + GenericValue Dest; + IMPLEMENT_UNORDERED(Ty, Src1, Src2) + return executeFCMP_OEQ(Src1, Src2, Ty); +} + +static GenericValue executeFCMP_UNE(GenericValue Src1, GenericValue Src2, + const Type *Ty) { + GenericValue Dest; + IMPLEMENT_UNORDERED(Ty, Src1, Src2) + return executeFCMP_ONE(Src1, Src2, Ty); +} + +static GenericValue executeFCMP_ULE(GenericValue Src1, GenericValue Src2, + const Type *Ty) { + GenericValue Dest; + IMPLEMENT_UNORDERED(Ty, Src1, Src2) + return executeFCMP_OLE(Src1, Src2, Ty); +} + +static GenericValue executeFCMP_UGE(GenericValue Src1, GenericValue Src2, + const Type *Ty) { + GenericValue Dest; + IMPLEMENT_UNORDERED(Ty, Src1, Src2) + return executeFCMP_OGE(Src1, Src2, Ty); +} + +static GenericValue executeFCMP_ULT(GenericValue Src1, GenericValue Src2, + const Type *Ty) { + GenericValue Dest; + IMPLEMENT_UNORDERED(Ty, Src1, Src2) + return executeFCMP_OLT(Src1, Src2, Ty); +} + +static GenericValue executeFCMP_UGT(GenericValue Src1, GenericValue Src2, + const Type *Ty) { + GenericValue Dest; + IMPLEMENT_UNORDERED(Ty, Src1, Src2) + return executeFCMP_OGT(Src1, Src2, Ty); +} + +static GenericValue executeFCMP_ORD(GenericValue Src1, GenericValue Src2, + const Type *Ty) { + GenericValue Dest; + if (Ty == Type::FloatTy) + Dest.IntVal = APInt(1,(Src1.FloatVal == Src1.FloatVal && + Src2.FloatVal == Src2.FloatVal)); + else + Dest.IntVal = APInt(1,(Src1.DoubleVal == Src1.DoubleVal && + Src2.DoubleVal == Src2.DoubleVal)); + return Dest; +} + +static GenericValue executeFCMP_UNO(GenericValue Src1, GenericValue Src2, + const Type *Ty) { + GenericValue Dest; + if (Ty == Type::FloatTy) + Dest.IntVal = APInt(1,(Src1.FloatVal != Src1.FloatVal || + Src2.FloatVal != Src2.FloatVal)); + else + Dest.IntVal = APInt(1,(Src1.DoubleVal != Src1.DoubleVal || + Src2.DoubleVal != Src2.DoubleVal)); + return Dest; +} + +void Interpreter::visitFCmpInst(FCmpInst &I) { + ExecutionContext &SF = ECStack.back(); + const Type *Ty = I.getOperand(0)->getType(); + GenericValue Src1 = getOperandValue(I.getOperand(0), SF); + GenericValue Src2 = getOperandValue(I.getOperand(1), SF); + GenericValue R; // Result + + switch (I.getPredicate()) { + case FCmpInst::FCMP_FALSE: R.IntVal = APInt(1,false); break; + case FCmpInst::FCMP_TRUE: R.IntVal = APInt(1,true); break; + case FCmpInst::FCMP_ORD: R = executeFCMP_ORD(Src1, Src2, Ty); break; + case FCmpInst::FCMP_UNO: R = executeFCMP_UNO(Src1, Src2, Ty); break; + case FCmpInst::FCMP_UEQ: R = executeFCMP_UEQ(Src1, Src2, Ty); break; + case FCmpInst::FCMP_OEQ: R = executeFCMP_OEQ(Src1, Src2, Ty); break; + case FCmpInst::FCMP_UNE: R = executeFCMP_UNE(Src1, Src2, Ty); break; + case FCmpInst::FCMP_ONE: R = executeFCMP_ONE(Src1, Src2, Ty); break; + case FCmpInst::FCMP_ULT: R = executeFCMP_ULT(Src1, Src2, Ty); break; + case FCmpInst::FCMP_OLT: R = executeFCMP_OLT(Src1, Src2, Ty); break; + case FCmpInst::FCMP_UGT: R = executeFCMP_UGT(Src1, Src2, Ty); break; + case FCmpInst::FCMP_OGT: R = executeFCMP_OGT(Src1, Src2, Ty); break; + case FCmpInst::FCMP_ULE: R = executeFCMP_ULE(Src1, Src2, Ty); break; + case FCmpInst::FCMP_OLE: R = executeFCMP_OLE(Src1, Src2, Ty); break; + case FCmpInst::FCMP_UGE: R = executeFCMP_UGE(Src1, Src2, Ty); break; + case FCmpInst::FCMP_OGE: R = executeFCMP_OGE(Src1, Src2, Ty); break; + default: + cerr << "Don't know how to handle this FCmp predicate!\n-->" << I; + abort(); + } + + SetValue(&I, R, SF); +} + +static GenericValue executeCmpInst(unsigned predicate, GenericValue Src1, + GenericValue Src2, const Type *Ty) { + GenericValue Result; + switch (predicate) { + case ICmpInst::ICMP_EQ: return executeICMP_EQ(Src1, Src2, Ty); + case ICmpInst::ICMP_NE: return executeICMP_NE(Src1, Src2, Ty); + case ICmpInst::ICMP_UGT: return executeICMP_UGT(Src1, Src2, Ty); + case ICmpInst::ICMP_SGT: return executeICMP_SGT(Src1, Src2, Ty); + case ICmpInst::ICMP_ULT: return executeICMP_ULT(Src1, Src2, Ty); + case ICmpInst::ICMP_SLT: return executeICMP_SLT(Src1, Src2, Ty); + case ICmpInst::ICMP_UGE: return executeICMP_UGE(Src1, Src2, Ty); + case ICmpInst::ICMP_SGE: return executeICMP_SGE(Src1, Src2, Ty); + case ICmpInst::ICMP_ULE: return executeICMP_ULE(Src1, Src2, Ty); + case ICmpInst::ICMP_SLE: return executeICMP_SLE(Src1, Src2, Ty); + case FCmpInst::FCMP_ORD: return executeFCMP_ORD(Src1, Src2, Ty); + case FCmpInst::FCMP_UNO: return executeFCMP_UNO(Src1, Src2, Ty); + case FCmpInst::FCMP_OEQ: return executeFCMP_OEQ(Src1, Src2, Ty); + case FCmpInst::FCMP_UEQ: return executeFCMP_UEQ(Src1, Src2, Ty); + case FCmpInst::FCMP_ONE: return executeFCMP_ONE(Src1, Src2, Ty); + case FCmpInst::FCMP_UNE: return executeFCMP_UNE(Src1, Src2, Ty); + case FCmpInst::FCMP_OLT: return executeFCMP_OLT(Src1, Src2, Ty); + case FCmpInst::FCMP_ULT: return executeFCMP_ULT(Src1, Src2, Ty); + case FCmpInst::FCMP_OGT: return executeFCMP_OGT(Src1, Src2, Ty); + case FCmpInst::FCMP_UGT: return executeFCMP_UGT(Src1, Src2, Ty); + case FCmpInst::FCMP_OLE: return executeFCMP_OLE(Src1, Src2, Ty); + case FCmpInst::FCMP_ULE: return executeFCMP_ULE(Src1, Src2, Ty); + case FCmpInst::FCMP_OGE: return executeFCMP_OGE(Src1, Src2, Ty); + case FCmpInst::FCMP_UGE: return executeFCMP_UGE(Src1, Src2, Ty); + case FCmpInst::FCMP_FALSE: { + GenericValue Result; + Result.IntVal = APInt(1, false); + return Result; + } + case FCmpInst::FCMP_TRUE: { + GenericValue Result; + Result.IntVal = APInt(1, true); + return Result; + } + default: + cerr << "Unhandled Cmp predicate\n"; + abort(); + } +} + +void Interpreter::visitBinaryOperator(BinaryOperator &I) { + ExecutionContext &SF = ECStack.back(); + const Type *Ty = I.getOperand(0)->getType(); + GenericValue Src1 = getOperandValue(I.getOperand(0), SF); + GenericValue Src2 = getOperandValue(I.getOperand(1), SF); + GenericValue R; // Result + + switch (I.getOpcode()) { + case Instruction::Add: executeAddInst (R, Src1, Src2, Ty); break; + case Instruction::Sub: executeSubInst (R, Src1, Src2, Ty); break; + case Instruction::Mul: executeMulInst (R, Src1, Src2, Ty); break; + case Instruction::FDiv: executeFDivInst (R, Src1, Src2, Ty); break; + case Instruction::FRem: executeFRemInst (R, Src1, Src2, Ty); break; + case Instruction::UDiv: R.IntVal = Src1.IntVal.udiv(Src2.IntVal); break; + case Instruction::SDiv: R.IntVal = Src1.IntVal.sdiv(Src2.IntVal); break; + case Instruction::URem: R.IntVal = Src1.IntVal.urem(Src2.IntVal); break; + case Instruction::SRem: R.IntVal = Src1.IntVal.srem(Src2.IntVal); break; + case Instruction::And: R.IntVal = Src1.IntVal & Src2.IntVal; break; + case Instruction::Or: R.IntVal = Src1.IntVal | Src2.IntVal; break; + case Instruction::Xor: R.IntVal = Src1.IntVal ^ Src2.IntVal; break; + default: + cerr << "Don't know how to handle this binary operator!\n-->" << I; + abort(); + } + + SetValue(&I, R, SF); +} + +static GenericValue executeSelectInst(GenericValue Src1, GenericValue Src2, + GenericValue Src3) { + return Src1.IntVal == 0 ? Src3 : Src2; +} + +void Interpreter::visitSelectInst(SelectInst &I) { + ExecutionContext &SF = ECStack.back(); + GenericValue Src1 = getOperandValue(I.getOperand(0), SF); + GenericValue Src2 = getOperandValue(I.getOperand(1), SF); + GenericValue Src3 = getOperandValue(I.getOperand(2), SF); + GenericValue R = executeSelectInst(Src1, Src2, Src3); + SetValue(&I, R, SF); +} + + +//===----------------------------------------------------------------------===// +// Terminator Instruction Implementations +//===----------------------------------------------------------------------===// + +void Interpreter::exitCalled(GenericValue GV) { + // runAtExitHandlers() assumes there are no stack frames, but + // if exit() was called, then it had a stack frame. Blow away + // the stack before interpreting atexit handlers. + ECStack.clear (); + runAtExitHandlers (); + exit (GV.IntVal.zextOrTrunc(32).getZExtValue()); +} + +/// Pop the last stack frame off of ECStack and then copy the result +/// back into the result variable if we are not returning void. The +/// result variable may be the ExitValue, or the Value of the calling +/// CallInst if there was a previous stack frame. This method may +/// invalidate any ECStack iterators you have. This method also takes +/// care of switching to the normal destination BB, if we are returning +/// from an invoke. +/// +void Interpreter::popStackAndReturnValueToCaller (const Type *RetTy, + GenericValue Result) { + // Pop the current stack frame. + ECStack.pop_back(); + + if (ECStack.empty()) { // Finished main. Put result into exit code... + if (RetTy && RetTy->isInteger()) { // Nonvoid return type? + ExitValue = Result; // Capture the exit value of the program + } else { + memset(&ExitValue.Untyped, 0, sizeof(ExitValue.Untyped)); + } + } else { + // If we have a previous stack frame, and we have a previous call, + // fill in the return value... + ExecutionContext &CallingSF = ECStack.back(); + if (Instruction *I = CallingSF.Caller.getInstruction()) { + if (CallingSF.Caller.getType() != Type::VoidTy) // Save result... + SetValue(I, Result, CallingSF); + if (InvokeInst *II = dyn_cast<InvokeInst> (I)) + SwitchToNewBasicBlock (II->getNormalDest (), CallingSF); + CallingSF.Caller = CallSite(); // We returned from the call... + } + } +} + +void Interpreter::visitReturnInst(ReturnInst &I) { + ExecutionContext &SF = ECStack.back(); + const Type *RetTy = Type::VoidTy; + GenericValue Result; + + // Save away the return value... (if we are not 'ret void') + if (I.getNumOperands()) { + RetTy = I.getReturnValue()->getType(); + Result = getOperandValue(I.getReturnValue(), SF); + } + + popStackAndReturnValueToCaller(RetTy, Result); +} + +void Interpreter::visitUnwindInst(UnwindInst &I) { + // Unwind stack + Instruction *Inst; + do { + ECStack.pop_back (); + if (ECStack.empty ()) + abort (); + Inst = ECStack.back ().Caller.getInstruction (); + } while (!(Inst && isa<InvokeInst> (Inst))); + + // Return from invoke + ExecutionContext &InvokingSF = ECStack.back (); + InvokingSF.Caller = CallSite (); + + // Go to exceptional destination BB of invoke instruction + SwitchToNewBasicBlock(cast<InvokeInst>(Inst)->getUnwindDest(), InvokingSF); +} + +void Interpreter::visitUnreachableInst(UnreachableInst &I) { + cerr << "ERROR: Program executed an 'unreachable' instruction!\n"; + abort(); +} + +void Interpreter::visitBranchInst(BranchInst &I) { + ExecutionContext &SF = ECStack.back(); + BasicBlock *Dest; + + Dest = I.getSuccessor(0); // Uncond branches have a fixed dest... + if (!I.isUnconditional()) { + Value *Cond = I.getCondition(); + if (getOperandValue(Cond, SF).IntVal == 0) // If false cond... + Dest = I.getSuccessor(1); + } + SwitchToNewBasicBlock(Dest, SF); +} + +void Interpreter::visitSwitchInst(SwitchInst &I) { + ExecutionContext &SF = ECStack.back(); + GenericValue CondVal = getOperandValue(I.getOperand(0), SF); + const Type *ElTy = I.getOperand(0)->getType(); + + // Check to see if any of the cases match... + BasicBlock *Dest = 0; + for (unsigned i = 2, e = I.getNumOperands(); i != e; i += 2) + if (executeICMP_EQ(CondVal, getOperandValue(I.getOperand(i), SF), ElTy) + .IntVal != 0) { + Dest = cast<BasicBlock>(I.getOperand(i+1)); + break; + } + + if (!Dest) Dest = I.getDefaultDest(); // No cases matched: use default + SwitchToNewBasicBlock(Dest, SF); +} + +// SwitchToNewBasicBlock - This method is used to jump to a new basic block. +// This function handles the actual updating of block and instruction iterators +// as well as execution of all of the PHI nodes in the destination block. +// +// This method does this because all of the PHI nodes must be executed +// atomically, reading their inputs before any of the results are updated. Not +// doing this can cause problems if the PHI nodes depend on other PHI nodes for +// their inputs. If the input PHI node is updated before it is read, incorrect +// results can happen. Thus we use a two phase approach. +// +void Interpreter::SwitchToNewBasicBlock(BasicBlock *Dest, ExecutionContext &SF){ + BasicBlock *PrevBB = SF.CurBB; // Remember where we came from... + SF.CurBB = Dest; // Update CurBB to branch destination + SF.CurInst = SF.CurBB->begin(); // Update new instruction ptr... + + if (!isa<PHINode>(SF.CurInst)) return; // Nothing fancy to do + + // Loop over all of the PHI nodes in the current block, reading their inputs. + std::vector<GenericValue> ResultValues; + + for (; PHINode *PN = dyn_cast<PHINode>(SF.CurInst); ++SF.CurInst) { + // Search for the value corresponding to this previous bb... + int i = PN->getBasicBlockIndex(PrevBB); + assert(i != -1 && "PHINode doesn't contain entry for predecessor??"); + Value *IncomingValue = PN->getIncomingValue(i); + + // Save the incoming value for this PHI node... + ResultValues.push_back(getOperandValue(IncomingValue, SF)); + } + + // Now loop over all of the PHI nodes setting their values... + SF.CurInst = SF.CurBB->begin(); + for (unsigned i = 0; isa<PHINode>(SF.CurInst); ++SF.CurInst, ++i) { + PHINode *PN = cast<PHINode>(SF.CurInst); + SetValue(PN, ResultValues[i], SF); + } +} + +//===----------------------------------------------------------------------===// +// Memory Instruction Implementations +//===----------------------------------------------------------------------===// + +void Interpreter::visitAllocationInst(AllocationInst &I) { + ExecutionContext &SF = ECStack.back(); + + const Type *Ty = I.getType()->getElementType(); // Type to be allocated + + // Get the number of elements being allocated by the array... + unsigned NumElements = + getOperandValue(I.getOperand(0), SF).IntVal.getZExtValue(); + + unsigned TypeSize = (size_t)TD.getTypeAllocSize(Ty); + + // Avoid malloc-ing zero bytes, use max()... + unsigned MemToAlloc = std::max(1U, NumElements * TypeSize); + + // Allocate enough memory to hold the type... + void *Memory = malloc(MemToAlloc); + + DOUT << "Allocated Type: " << *Ty << " (" << TypeSize << " bytes) x " + << NumElements << " (Total: " << MemToAlloc << ") at " + << uintptr_t(Memory) << '\n'; + + GenericValue Result = PTOGV(Memory); + assert(Result.PointerVal != 0 && "Null pointer returned by malloc!"); + SetValue(&I, Result, SF); + + if (I.getOpcode() == Instruction::Alloca) + ECStack.back().Allocas.add(Memory); +} + +void Interpreter::visitFreeInst(FreeInst &I) { + ExecutionContext &SF = ECStack.back(); + assert(isa<PointerType>(I.getOperand(0)->getType()) && "Freeing nonptr?"); + GenericValue Value = getOperandValue(I.getOperand(0), SF); + // TODO: Check to make sure memory is allocated + free(GVTOP(Value)); // Free memory +} + +// getElementOffset - The workhorse for getelementptr. +// +GenericValue Interpreter::executeGEPOperation(Value *Ptr, gep_type_iterator I, + gep_type_iterator E, + ExecutionContext &SF) { + assert(isa<PointerType>(Ptr->getType()) && + "Cannot getElementOffset of a nonpointer type!"); + + uint64_t Total = 0; + + for (; I != E; ++I) { + if (const StructType *STy = dyn_cast<StructType>(*I)) { + const StructLayout *SLO = TD.getStructLayout(STy); + + const ConstantInt *CPU = cast<ConstantInt>(I.getOperand()); + unsigned Index = unsigned(CPU->getZExtValue()); + + Total += SLO->getElementOffset(Index); + } else { + const SequentialType *ST = cast<SequentialType>(*I); + // Get the index number for the array... which must be long type... + GenericValue IdxGV = getOperandValue(I.getOperand(), SF); + + int64_t Idx; + unsigned BitWidth = + cast<IntegerType>(I.getOperand()->getType())->getBitWidth(); + if (BitWidth == 32) + Idx = (int64_t)(int32_t)IdxGV.IntVal.getZExtValue(); + else { + assert(BitWidth == 64 && "Invalid index type for getelementptr"); + Idx = (int64_t)IdxGV.IntVal.getZExtValue(); + } + Total += TD.getTypeAllocSize(ST->getElementType())*Idx; + } + } + + GenericValue Result; + Result.PointerVal = ((char*)getOperandValue(Ptr, SF).PointerVal) + Total; + DOUT << "GEP Index " << Total << " bytes.\n"; + return Result; +} + +void Interpreter::visitGetElementPtrInst(GetElementPtrInst &I) { + ExecutionContext &SF = ECStack.back(); + SetValue(&I, TheEE->executeGEPOperation(I.getPointerOperand(), + gep_type_begin(I), gep_type_end(I), SF), SF); +} + +void Interpreter::visitLoadInst(LoadInst &I) { + ExecutionContext &SF = ECStack.back(); + GenericValue SRC = getOperandValue(I.getPointerOperand(), SF); + GenericValue *Ptr = (GenericValue*)GVTOP(SRC); + GenericValue Result; + LoadValueFromMemory(Result, Ptr, I.getType()); + SetValue(&I, Result, SF); + if (I.isVolatile() && PrintVolatile) + cerr << "Volatile load " << I; +} + +void Interpreter::visitStoreInst(StoreInst &I) { + ExecutionContext &SF = ECStack.back(); + GenericValue Val = getOperandValue(I.getOperand(0), SF); + GenericValue SRC = getOperandValue(I.getPointerOperand(), SF); + StoreValueToMemory(Val, (GenericValue *)GVTOP(SRC), + I.getOperand(0)->getType()); + if (I.isVolatile() && PrintVolatile) + cerr << "Volatile store: " << I; +} + +//===----------------------------------------------------------------------===// +// Miscellaneous Instruction Implementations +//===----------------------------------------------------------------------===// + +void Interpreter::visitCallSite(CallSite CS) { + ExecutionContext &SF = ECStack.back(); + + // Check to see if this is an intrinsic function call... + Function *F = CS.getCalledFunction(); + if (F && F->isDeclaration ()) + switch (F->getIntrinsicID()) { + case Intrinsic::not_intrinsic: + break; + case Intrinsic::vastart: { // va_start + GenericValue ArgIndex; + ArgIndex.UIntPairVal.first = ECStack.size() - 1; + ArgIndex.UIntPairVal.second = 0; + SetValue(CS.getInstruction(), ArgIndex, SF); + return; + } + case Intrinsic::vaend: // va_end is a noop for the interpreter + return; + case Intrinsic::vacopy: // va_copy: dest = src + SetValue(CS.getInstruction(), getOperandValue(*CS.arg_begin(), SF), SF); + return; + default: + // If it is an unknown intrinsic function, use the intrinsic lowering + // class to transform it into hopefully tasty LLVM code. + // + BasicBlock::iterator me(CS.getInstruction()); + BasicBlock *Parent = CS.getInstruction()->getParent(); + bool atBegin(Parent->begin() == me); + if (!atBegin) + --me; + IL->LowerIntrinsicCall(cast<CallInst>(CS.getInstruction())); + + // Restore the CurInst pointer to the first instruction newly inserted, if + // any. + if (atBegin) { + SF.CurInst = Parent->begin(); + } else { + SF.CurInst = me; + ++SF.CurInst; + } + return; + } + + + SF.Caller = CS; + std::vector<GenericValue> ArgVals; + const unsigned NumArgs = SF.Caller.arg_size(); + ArgVals.reserve(NumArgs); + uint16_t pNum = 1; + for (CallSite::arg_iterator i = SF.Caller.arg_begin(), + e = SF.Caller.arg_end(); i != e; ++i, ++pNum) { + Value *V = *i; + ArgVals.push_back(getOperandValue(V, SF)); + // Promote all integral types whose size is < sizeof(i32) into i32. + // We do this by zero or sign extending the value as appropriate + // according to the parameter attributes + const Type *Ty = V->getType(); + if (Ty->isInteger() && (ArgVals.back().IntVal.getBitWidth() < 32)) { + if (CS.paramHasAttr(pNum, Attribute::ZExt)) + ArgVals.back().IntVal = ArgVals.back().IntVal.zext(32); + else if (CS.paramHasAttr(pNum, Attribute::SExt)) + ArgVals.back().IntVal = ArgVals.back().IntVal.sext(32); + } + } + + // To handle indirect calls, we must get the pointer value from the argument + // and treat it as a function pointer. + GenericValue SRC = getOperandValue(SF.Caller.getCalledValue(), SF); + callFunction((Function*)GVTOP(SRC), ArgVals); +} + +void Interpreter::visitShl(BinaryOperator &I) { + ExecutionContext &SF = ECStack.back(); + GenericValue Src1 = getOperandValue(I.getOperand(0), SF); + GenericValue Src2 = getOperandValue(I.getOperand(1), SF); + GenericValue Dest; + if (Src2.IntVal.getZExtValue() < Src1.IntVal.getBitWidth()) + Dest.IntVal = Src1.IntVal.shl(Src2.IntVal.getZExtValue()); + else + Dest.IntVal = Src1.IntVal; + + SetValue(&I, Dest, SF); +} + +void Interpreter::visitLShr(BinaryOperator &I) { + ExecutionContext &SF = ECStack.back(); + GenericValue Src1 = getOperandValue(I.getOperand(0), SF); + GenericValue Src2 = getOperandValue(I.getOperand(1), SF); + GenericValue Dest; + if (Src2.IntVal.getZExtValue() < Src1.IntVal.getBitWidth()) + Dest.IntVal = Src1.IntVal.lshr(Src2.IntVal.getZExtValue()); + else + Dest.IntVal = Src1.IntVal; + + SetValue(&I, Dest, SF); +} + +void Interpreter::visitAShr(BinaryOperator &I) { + ExecutionContext &SF = ECStack.back(); + GenericValue Src1 = getOperandValue(I.getOperand(0), SF); + GenericValue Src2 = getOperandValue(I.getOperand(1), SF); + GenericValue Dest; + if (Src2.IntVal.getZExtValue() < Src1.IntVal.getBitWidth()) + Dest.IntVal = Src1.IntVal.ashr(Src2.IntVal.getZExtValue()); + else + Dest.IntVal = Src1.IntVal; + + SetValue(&I, Dest, SF); +} + +GenericValue Interpreter::executeTruncInst(Value *SrcVal, const Type *DstTy, + ExecutionContext &SF) { + GenericValue Dest, Src = getOperandValue(SrcVal, SF); + const IntegerType *DITy = cast<IntegerType>(DstTy); + unsigned DBitWidth = DITy->getBitWidth(); + Dest.IntVal = Src.IntVal.trunc(DBitWidth); + return Dest; +} + +GenericValue Interpreter::executeSExtInst(Value *SrcVal, const Type *DstTy, + ExecutionContext &SF) { + GenericValue Dest, Src = getOperandValue(SrcVal, SF); + const IntegerType *DITy = cast<IntegerType>(DstTy); + unsigned DBitWidth = DITy->getBitWidth(); + Dest.IntVal = Src.IntVal.sext(DBitWidth); + return Dest; +} + +GenericValue Interpreter::executeZExtInst(Value *SrcVal, const Type *DstTy, + ExecutionContext &SF) { + GenericValue Dest, Src = getOperandValue(SrcVal, SF); + const IntegerType *DITy = cast<IntegerType>(DstTy); + unsigned DBitWidth = DITy->getBitWidth(); + Dest.IntVal = Src.IntVal.zext(DBitWidth); + return Dest; +} + +GenericValue Interpreter::executeFPTruncInst(Value *SrcVal, const Type *DstTy, + ExecutionContext &SF) { + GenericValue Dest, Src = getOperandValue(SrcVal, SF); + assert(SrcVal->getType() == Type::DoubleTy && DstTy == Type::FloatTy && + "Invalid FPTrunc instruction"); + Dest.FloatVal = (float) Src.DoubleVal; + return Dest; +} + +GenericValue Interpreter::executeFPExtInst(Value *SrcVal, const Type *DstTy, + ExecutionContext &SF) { + GenericValue Dest, Src = getOperandValue(SrcVal, SF); + assert(SrcVal->getType() == Type::FloatTy && DstTy == Type::DoubleTy && + "Invalid FPTrunc instruction"); + Dest.DoubleVal = (double) Src.FloatVal; + return Dest; +} + +GenericValue Interpreter::executeFPToUIInst(Value *SrcVal, const Type *DstTy, + ExecutionContext &SF) { + const Type *SrcTy = SrcVal->getType(); + uint32_t DBitWidth = cast<IntegerType>(DstTy)->getBitWidth(); + GenericValue Dest, Src = getOperandValue(SrcVal, SF); + assert(SrcTy->isFloatingPoint() && "Invalid FPToUI instruction"); + + if (SrcTy->getTypeID() == Type::FloatTyID) + Dest.IntVal = APIntOps::RoundFloatToAPInt(Src.FloatVal, DBitWidth); + else + Dest.IntVal = APIntOps::RoundDoubleToAPInt(Src.DoubleVal, DBitWidth); + return Dest; +} + +GenericValue Interpreter::executeFPToSIInst(Value *SrcVal, const Type *DstTy, + ExecutionContext &SF) { + const Type *SrcTy = SrcVal->getType(); + uint32_t DBitWidth = cast<IntegerType>(DstTy)->getBitWidth(); + GenericValue Dest, Src = getOperandValue(SrcVal, SF); + assert(SrcTy->isFloatingPoint() && "Invalid FPToSI instruction"); + + if (SrcTy->getTypeID() == Type::FloatTyID) + Dest.IntVal = APIntOps::RoundFloatToAPInt(Src.FloatVal, DBitWidth); + else + Dest.IntVal = APIntOps::RoundDoubleToAPInt(Src.DoubleVal, DBitWidth); + return Dest; +} + +GenericValue Interpreter::executeUIToFPInst(Value *SrcVal, const Type *DstTy, + ExecutionContext &SF) { + GenericValue Dest, Src = getOperandValue(SrcVal, SF); + assert(DstTy->isFloatingPoint() && "Invalid UIToFP instruction"); + + if (DstTy->getTypeID() == Type::FloatTyID) + Dest.FloatVal = APIntOps::RoundAPIntToFloat(Src.IntVal); + else + Dest.DoubleVal = APIntOps::RoundAPIntToDouble(Src.IntVal); + return Dest; +} + +GenericValue Interpreter::executeSIToFPInst(Value *SrcVal, const Type *DstTy, + ExecutionContext &SF) { + GenericValue Dest, Src = getOperandValue(SrcVal, SF); + assert(DstTy->isFloatingPoint() && "Invalid SIToFP instruction"); + + if (DstTy->getTypeID() == Type::FloatTyID) + Dest.FloatVal = APIntOps::RoundSignedAPIntToFloat(Src.IntVal); + else + Dest.DoubleVal = APIntOps::RoundSignedAPIntToDouble(Src.IntVal); + return Dest; + +} + +GenericValue Interpreter::executePtrToIntInst(Value *SrcVal, const Type *DstTy, + ExecutionContext &SF) { + uint32_t DBitWidth = cast<IntegerType>(DstTy)->getBitWidth(); + GenericValue Dest, Src = getOperandValue(SrcVal, SF); + assert(isa<PointerType>(SrcVal->getType()) && "Invalid PtrToInt instruction"); + + Dest.IntVal = APInt(DBitWidth, (intptr_t) Src.PointerVal); + return Dest; +} + +GenericValue Interpreter::executeIntToPtrInst(Value *SrcVal, const Type *DstTy, + ExecutionContext &SF) { + GenericValue Dest, Src = getOperandValue(SrcVal, SF); + assert(isa<PointerType>(DstTy) && "Invalid PtrToInt instruction"); + + uint32_t PtrSize = TD.getPointerSizeInBits(); + if (PtrSize != Src.IntVal.getBitWidth()) + Src.IntVal = Src.IntVal.zextOrTrunc(PtrSize); + + Dest.PointerVal = PointerTy(intptr_t(Src.IntVal.getZExtValue())); + return Dest; +} + +GenericValue Interpreter::executeBitCastInst(Value *SrcVal, const Type *DstTy, + ExecutionContext &SF) { + + const Type *SrcTy = SrcVal->getType(); + GenericValue Dest, Src = getOperandValue(SrcVal, SF); + if (isa<PointerType>(DstTy)) { + assert(isa<PointerType>(SrcTy) && "Invalid BitCast"); + Dest.PointerVal = Src.PointerVal; + } else if (DstTy->isInteger()) { + if (SrcTy == Type::FloatTy) { + Dest.IntVal.zext(sizeof(Src.FloatVal) * CHAR_BIT); + Dest.IntVal.floatToBits(Src.FloatVal); + } else if (SrcTy == Type::DoubleTy) { + Dest.IntVal.zext(sizeof(Src.DoubleVal) * CHAR_BIT); + Dest.IntVal.doubleToBits(Src.DoubleVal); + } else if (SrcTy->isInteger()) { + Dest.IntVal = Src.IntVal; + } else + assert(0 && "Invalid BitCast"); + } else if (DstTy == Type::FloatTy) { + if (SrcTy->isInteger()) + Dest.FloatVal = Src.IntVal.bitsToFloat(); + else + Dest.FloatVal = Src.FloatVal; + } else if (DstTy == Type::DoubleTy) { + if (SrcTy->isInteger()) + Dest.DoubleVal = Src.IntVal.bitsToDouble(); + else + Dest.DoubleVal = Src.DoubleVal; + } else + assert(0 && "Invalid Bitcast"); + + return Dest; +} + +void Interpreter::visitTruncInst(TruncInst &I) { + ExecutionContext &SF = ECStack.back(); + SetValue(&I, executeTruncInst(I.getOperand(0), I.getType(), SF), SF); +} + +void Interpreter::visitSExtInst(SExtInst &I) { + ExecutionContext &SF = ECStack.back(); + SetValue(&I, executeSExtInst(I.getOperand(0), I.getType(), SF), SF); +} + +void Interpreter::visitZExtInst(ZExtInst &I) { + ExecutionContext &SF = ECStack.back(); + SetValue(&I, executeZExtInst(I.getOperand(0), I.getType(), SF), SF); +} + +void Interpreter::visitFPTruncInst(FPTruncInst &I) { + ExecutionContext &SF = ECStack.back(); + SetValue(&I, executeFPTruncInst(I.getOperand(0), I.getType(), SF), SF); +} + +void Interpreter::visitFPExtInst(FPExtInst &I) { + ExecutionContext &SF = ECStack.back(); + SetValue(&I, executeFPExtInst(I.getOperand(0), I.getType(), SF), SF); +} + +void Interpreter::visitUIToFPInst(UIToFPInst &I) { + ExecutionContext &SF = ECStack.back(); + SetValue(&I, executeUIToFPInst(I.getOperand(0), I.getType(), SF), SF); +} + +void Interpreter::visitSIToFPInst(SIToFPInst &I) { + ExecutionContext &SF = ECStack.back(); + SetValue(&I, executeSIToFPInst(I.getOperand(0), I.getType(), SF), SF); +} + +void Interpreter::visitFPToUIInst(FPToUIInst &I) { + ExecutionContext &SF = ECStack.back(); + SetValue(&I, executeFPToUIInst(I.getOperand(0), I.getType(), SF), SF); +} + +void Interpreter::visitFPToSIInst(FPToSIInst &I) { + ExecutionContext &SF = ECStack.back(); + SetValue(&I, executeFPToSIInst(I.getOperand(0), I.getType(), SF), SF); +} + +void Interpreter::visitPtrToIntInst(PtrToIntInst &I) { + ExecutionContext &SF = ECStack.back(); + SetValue(&I, executePtrToIntInst(I.getOperand(0), I.getType(), SF), SF); +} + +void Interpreter::visitIntToPtrInst(IntToPtrInst &I) { + ExecutionContext &SF = ECStack.back(); + SetValue(&I, executeIntToPtrInst(I.getOperand(0), I.getType(), SF), SF); +} + +void Interpreter::visitBitCastInst(BitCastInst &I) { + ExecutionContext &SF = ECStack.back(); + SetValue(&I, executeBitCastInst(I.getOperand(0), I.getType(), SF), SF); +} + +#define IMPLEMENT_VAARG(TY) \ + case Type::TY##TyID: Dest.TY##Val = Src.TY##Val; break + +void Interpreter::visitVAArgInst(VAArgInst &I) { + ExecutionContext &SF = ECStack.back(); + + // Get the incoming valist parameter. LLI treats the valist as a + // (ec-stack-depth var-arg-index) pair. + GenericValue VAList = getOperandValue(I.getOperand(0), SF); + GenericValue Dest; + GenericValue Src = ECStack[VAList.UIntPairVal.first] + .VarArgs[VAList.UIntPairVal.second]; + const Type *Ty = I.getType(); + switch (Ty->getTypeID()) { + case Type::IntegerTyID: Dest.IntVal = Src.IntVal; + IMPLEMENT_VAARG(Pointer); + IMPLEMENT_VAARG(Float); + IMPLEMENT_VAARG(Double); + default: + cerr << "Unhandled dest type for vaarg instruction: " << *Ty << "\n"; + abort(); + } + + // Set the Value of this Instruction. + SetValue(&I, Dest, SF); + + // Move the pointer to the next vararg. + ++VAList.UIntPairVal.second; +} + +GenericValue Interpreter::getConstantExprValue (ConstantExpr *CE, + ExecutionContext &SF) { + switch (CE->getOpcode()) { + case Instruction::Trunc: + return executeTruncInst(CE->getOperand(0), CE->getType(), SF); + case Instruction::ZExt: + return executeZExtInst(CE->getOperand(0), CE->getType(), SF); + case Instruction::SExt: + return executeSExtInst(CE->getOperand(0), CE->getType(), SF); + case Instruction::FPTrunc: + return executeFPTruncInst(CE->getOperand(0), CE->getType(), SF); + case Instruction::FPExt: + return executeFPExtInst(CE->getOperand(0), CE->getType(), SF); + case Instruction::UIToFP: + return executeUIToFPInst(CE->getOperand(0), CE->getType(), SF); + case Instruction::SIToFP: + return executeSIToFPInst(CE->getOperand(0), CE->getType(), SF); + case Instruction::FPToUI: + return executeFPToUIInst(CE->getOperand(0), CE->getType(), SF); + case Instruction::FPToSI: + return executeFPToSIInst(CE->getOperand(0), CE->getType(), SF); + case Instruction::PtrToInt: + return executePtrToIntInst(CE->getOperand(0), CE->getType(), SF); + case Instruction::IntToPtr: + return executeIntToPtrInst(CE->getOperand(0), CE->getType(), SF); + case Instruction::BitCast: + return executeBitCastInst(CE->getOperand(0), CE->getType(), SF); + case Instruction::GetElementPtr: + return executeGEPOperation(CE->getOperand(0), gep_type_begin(CE), + gep_type_end(CE), SF); + case Instruction::FCmp: + case Instruction::ICmp: + return executeCmpInst(CE->getPredicate(), + getOperandValue(CE->getOperand(0), SF), + getOperandValue(CE->getOperand(1), SF), + CE->getOperand(0)->getType()); + case Instruction::Select: + return executeSelectInst(getOperandValue(CE->getOperand(0), SF), + getOperandValue(CE->getOperand(1), SF), + getOperandValue(CE->getOperand(2), SF)); + default : + break; + } + + // The cases below here require a GenericValue parameter for the result + // so we initialize one, compute it and then return it. + GenericValue Op0 = getOperandValue(CE->getOperand(0), SF); + GenericValue Op1 = getOperandValue(CE->getOperand(1), SF); + GenericValue Dest; + const Type * Ty = CE->getOperand(0)->getType(); + switch (CE->getOpcode()) { + case Instruction::Add: executeAddInst (Dest, Op0, Op1, Ty); break; + case Instruction::Sub: executeSubInst (Dest, Op0, Op1, Ty); break; + case Instruction::Mul: executeMulInst (Dest, Op0, Op1, Ty); break; + case Instruction::FDiv: executeFDivInst(Dest, Op0, Op1, Ty); break; + case Instruction::FRem: executeFRemInst(Dest, Op0, Op1, Ty); break; + case Instruction::SDiv: Dest.IntVal = Op0.IntVal.sdiv(Op1.IntVal); break; + case Instruction::UDiv: Dest.IntVal = Op0.IntVal.udiv(Op1.IntVal); break; + case Instruction::URem: Dest.IntVal = Op0.IntVal.urem(Op1.IntVal); break; + case Instruction::SRem: Dest.IntVal = Op0.IntVal.srem(Op1.IntVal); break; + case Instruction::And: Dest.IntVal = Op0.IntVal.And(Op1.IntVal); break; + case Instruction::Or: Dest.IntVal = Op0.IntVal.Or(Op1.IntVal); break; + case Instruction::Xor: Dest.IntVal = Op0.IntVal.Xor(Op1.IntVal); break; + case Instruction::Shl: + Dest.IntVal = Op0.IntVal.shl(Op1.IntVal.getZExtValue()); + break; + case Instruction::LShr: + Dest.IntVal = Op0.IntVal.lshr(Op1.IntVal.getZExtValue()); + break; + case Instruction::AShr: + Dest.IntVal = Op0.IntVal.ashr(Op1.IntVal.getZExtValue()); + break; + default: + cerr << "Unhandled ConstantExpr: " << *CE << "\n"; + abort(); + return GenericValue(); + } + return Dest; +} + +GenericValue Interpreter::getOperandValue(Value *V, ExecutionContext &SF) { + if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) { + return getConstantExprValue(CE, SF); + } else if (Constant *CPV = dyn_cast<Constant>(V)) { + return getConstantValue(CPV); + } else if (GlobalValue *GV = dyn_cast<GlobalValue>(V)) { + return PTOGV(getPointerToGlobal(GV)); + } else { + return SF.Values[V]; + } +} + +//===----------------------------------------------------------------------===// +// Dispatch and Execution Code +//===----------------------------------------------------------------------===// + +//===----------------------------------------------------------------------===// +// callFunction - Execute the specified function... +// +void Interpreter::callFunction(Function *F, + const std::vector<GenericValue> &ArgVals) { + assert((ECStack.empty() || ECStack.back().Caller.getInstruction() == 0 || + ECStack.back().Caller.arg_size() == ArgVals.size()) && + "Incorrect number of arguments passed into function call!"); + // Make a new stack frame... and fill it in. + ECStack.push_back(ExecutionContext()); + ExecutionContext &StackFrame = ECStack.back(); + StackFrame.CurFunction = F; + + // Special handling for external functions. + if (F->isDeclaration()) { + GenericValue Result = callExternalFunction (F, ArgVals); + // Simulate a 'ret' instruction of the appropriate type. + popStackAndReturnValueToCaller (F->getReturnType (), Result); + return; + } + + // Get pointers to first LLVM BB & Instruction in function. + StackFrame.CurBB = F->begin(); + StackFrame.CurInst = StackFrame.CurBB->begin(); + + // Run through the function arguments and initialize their values... + assert((ArgVals.size() == F->arg_size() || + (ArgVals.size() > F->arg_size() && F->getFunctionType()->isVarArg()))&& + "Invalid number of values passed to function invocation!"); + + // Handle non-varargs arguments... + unsigned i = 0; + for (Function::arg_iterator AI = F->arg_begin(), E = F->arg_end(); + AI != E; ++AI, ++i) + SetValue(AI, ArgVals[i], StackFrame); + + // Handle varargs arguments... + StackFrame.VarArgs.assign(ArgVals.begin()+i, ArgVals.end()); +} + + +void Interpreter::run() { + while (!ECStack.empty()) { + // Interpret a single instruction & increment the "PC". + ExecutionContext &SF = ECStack.back(); // Current stack frame + Instruction &I = *SF.CurInst++; // Increment before execute + + // Track the number of dynamic instructions executed. + ++NumDynamicInsts; + + DOUT << "About to interpret: " << I; + visit(I); // Dispatch to one of the visit* methods... +#if 0 + // This is not safe, as visiting the instruction could lower it and free I. +#ifndef NDEBUG + if (!isa<CallInst>(I) && !isa<InvokeInst>(I) && + I.getType() != Type::VoidTy) { + DOUT << " --> "; + const GenericValue &Val = SF.Values[&I]; + switch (I.getType()->getTypeID()) { + default: assert(0 && "Invalid GenericValue Type"); + case Type::VoidTyID: DOUT << "void"; break; + case Type::FloatTyID: DOUT << "float " << Val.FloatVal; break; + case Type::DoubleTyID: DOUT << "double " << Val.DoubleVal; break; + case Type::PointerTyID: DOUT << "void* " << intptr_t(Val.PointerVal); + break; + case Type::IntegerTyID: + DOUT << "i" << Val.IntVal.getBitWidth() << " " + << Val.IntVal.toStringUnsigned(10) + << " (0x" << Val.IntVal.toStringUnsigned(16) << ")\n"; + break; + } + } +#endif +#endif + } +} diff --git a/lib/ExecutionEngine/Interpreter/ExternalFunctions.cpp b/lib/ExecutionEngine/Interpreter/ExternalFunctions.cpp new file mode 100644 index 0000000000000..160f1ba9f6c51 --- /dev/null +++ b/lib/ExecutionEngine/Interpreter/ExternalFunctions.cpp @@ -0,0 +1,542 @@ +//===-- ExternalFunctions.cpp - Implement External Functions --------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file contains both code to deal with invoking "external" functions, but +// also contains code that implements "exported" external functions. +// +// There are currently two mechanisms for handling external functions in the +// Interpreter. The first is to implement lle_* wrapper functions that are +// specific to well-known library functions which manually translate the +// arguments from GenericValues and make the call. If such a wrapper does +// not exist, and libffi is available, then the Interpreter will attempt to +// invoke the function using libffi, after finding its address. +// +//===----------------------------------------------------------------------===// + +#include "Interpreter.h" +#include "llvm/DerivedTypes.h" +#include "llvm/Module.h" +#include "llvm/Config/config.h" // Detect libffi +#include "llvm/Support/Streams.h" +#include "llvm/System/DynamicLibrary.h" +#include "llvm/Target/TargetData.h" +#include "llvm/Support/ManagedStatic.h" +#include <csignal> +#include <cstdio> +#include <map> +#include <cmath> +#include <cstring> + +#ifdef HAVE_FFI_CALL +#ifdef HAVE_FFI_H +#include <ffi.h> +#define USE_LIBFFI +#elif HAVE_FFI_FFI_H +#include <ffi/ffi.h> +#define USE_LIBFFI +#endif +#endif + +using namespace llvm; + +typedef GenericValue (*ExFunc)(const FunctionType *, + const std::vector<GenericValue> &); +static ManagedStatic<std::map<const Function *, ExFunc> > ExportedFunctions; +static std::map<std::string, ExFunc> FuncNames; + +#ifdef USE_LIBFFI +typedef void (*RawFunc)(void); +static ManagedStatic<std::map<const Function *, RawFunc> > RawFunctions; +#endif + +static Interpreter *TheInterpreter; + +static char getTypeID(const Type *Ty) { + switch (Ty->getTypeID()) { + case Type::VoidTyID: return 'V'; + case Type::IntegerTyID: + switch (cast<IntegerType>(Ty)->getBitWidth()) { + case 1: return 'o'; + case 8: return 'B'; + case 16: return 'S'; + case 32: return 'I'; + case 64: return 'L'; + default: return 'N'; + } + case Type::FloatTyID: return 'F'; + case Type::DoubleTyID: return 'D'; + case Type::PointerTyID: return 'P'; + case Type::FunctionTyID:return 'M'; + case Type::StructTyID: return 'T'; + case Type::ArrayTyID: return 'A'; + case Type::OpaqueTyID: return 'O'; + default: return 'U'; + } +} + +// Try to find address of external function given a Function object. +// Please note, that interpreter doesn't know how to assemble a +// real call in general case (this is JIT job), that's why it assumes, +// that all external functions has the same (and pretty "general") signature. +// The typical example of such functions are "lle_X_" ones. +static ExFunc lookupFunction(const Function *F) { + // Function not found, look it up... start by figuring out what the + // composite function name should be. + std::string ExtName = "lle_"; + const FunctionType *FT = F->getFunctionType(); + for (unsigned i = 0, e = FT->getNumContainedTypes(); i != e; ++i) + ExtName += getTypeID(FT->getContainedType(i)); + ExtName += "_" + F->getName(); + + ExFunc FnPtr = FuncNames[ExtName]; + if (FnPtr == 0) + FnPtr = FuncNames["lle_X_"+F->getName()]; + if (FnPtr == 0) // Try calling a generic function... if it exists... + FnPtr = (ExFunc)(intptr_t)sys::DynamicLibrary::SearchForAddressOfSymbol( + ("lle_X_"+F->getName()).c_str()); + if (FnPtr != 0) + ExportedFunctions->insert(std::make_pair(F, FnPtr)); // Cache for later + return FnPtr; +} + +#ifdef USE_LIBFFI +static ffi_type *ffiTypeFor(const Type *Ty) { + switch (Ty->getTypeID()) { + case Type::VoidTyID: return &ffi_type_void; + case Type::IntegerTyID: + switch (cast<IntegerType>(Ty)->getBitWidth()) { + case 8: return &ffi_type_sint8; + case 16: return &ffi_type_sint16; + case 32: return &ffi_type_sint32; + case 64: return &ffi_type_sint64; + } + case Type::FloatTyID: return &ffi_type_float; + case Type::DoubleTyID: return &ffi_type_double; + case Type::PointerTyID: return &ffi_type_pointer; + default: break; + } + // TODO: Support other types such as StructTyID, ArrayTyID, OpaqueTyID, etc. + cerr << "Type could not be mapped for use with libffi.\n"; + abort(); + return NULL; +} + +static void *ffiValueFor(const Type *Ty, const GenericValue &AV, + void *ArgDataPtr) { + switch (Ty->getTypeID()) { + case Type::IntegerTyID: + switch (cast<IntegerType>(Ty)->getBitWidth()) { + case 8: { + int8_t *I8Ptr = (int8_t *) ArgDataPtr; + *I8Ptr = (int8_t) AV.IntVal.getZExtValue(); + return ArgDataPtr; + } + case 16: { + int16_t *I16Ptr = (int16_t *) ArgDataPtr; + *I16Ptr = (int16_t) AV.IntVal.getZExtValue(); + return ArgDataPtr; + } + case 32: { + int32_t *I32Ptr = (int32_t *) ArgDataPtr; + *I32Ptr = (int32_t) AV.IntVal.getZExtValue(); + return ArgDataPtr; + } + case 64: { + int64_t *I64Ptr = (int64_t *) ArgDataPtr; + *I64Ptr = (int64_t) AV.IntVal.getZExtValue(); + return ArgDataPtr; + } + } + case Type::FloatTyID: { + float *FloatPtr = (float *) ArgDataPtr; + *FloatPtr = AV.DoubleVal; + return ArgDataPtr; + } + case Type::DoubleTyID: { + double *DoublePtr = (double *) ArgDataPtr; + *DoublePtr = AV.DoubleVal; + return ArgDataPtr; + } + case Type::PointerTyID: { + void **PtrPtr = (void **) ArgDataPtr; + *PtrPtr = GVTOP(AV); + return ArgDataPtr; + } + default: break; + } + // TODO: Support other types such as StructTyID, ArrayTyID, OpaqueTyID, etc. + cerr << "Type value could not be mapped for use with libffi.\n"; + abort(); + return NULL; +} + +static bool ffiInvoke(RawFunc Fn, Function *F, + const std::vector<GenericValue> &ArgVals, + const TargetData *TD, GenericValue &Result) { + ffi_cif cif; + const FunctionType *FTy = F->getFunctionType(); + const unsigned NumArgs = F->arg_size(); + + // TODO: We don't have type information about the remaining arguments, because + // this information is never passed into ExecutionEngine::runFunction(). + if (ArgVals.size() > NumArgs && F->isVarArg()) { + cerr << "Calling external var arg function '" << F->getName() + << "' is not supported by the Interpreter.\n"; + abort(); + } + + unsigned ArgBytes = 0; + + std::vector<ffi_type*> args(NumArgs); + for (Function::const_arg_iterator A = F->arg_begin(), E = F->arg_end(); + A != E; ++A) { + const unsigned ArgNo = A->getArgNo(); + const Type *ArgTy = FTy->getParamType(ArgNo); + args[ArgNo] = ffiTypeFor(ArgTy); + ArgBytes += TD->getTypeStoreSize(ArgTy); + } + + uint8_t *ArgData = (uint8_t*) alloca(ArgBytes); + uint8_t *ArgDataPtr = ArgData; + std::vector<void*> values(NumArgs); + for (Function::const_arg_iterator A = F->arg_begin(), E = F->arg_end(); + A != E; ++A) { + const unsigned ArgNo = A->getArgNo(); + const Type *ArgTy = FTy->getParamType(ArgNo); + values[ArgNo] = ffiValueFor(ArgTy, ArgVals[ArgNo], ArgDataPtr); + ArgDataPtr += TD->getTypeStoreSize(ArgTy); + } + + const Type *RetTy = FTy->getReturnType(); + ffi_type *rtype = ffiTypeFor(RetTy); + + if (ffi_prep_cif(&cif, FFI_DEFAULT_ABI, NumArgs, rtype, &args[0]) == FFI_OK) { + void *ret = NULL; + if (RetTy->getTypeID() != Type::VoidTyID) + ret = alloca(TD->getTypeStoreSize(RetTy)); + ffi_call(&cif, Fn, ret, &values[0]); + switch (RetTy->getTypeID()) { + case Type::IntegerTyID: + switch (cast<IntegerType>(RetTy)->getBitWidth()) { + case 8: Result.IntVal = APInt(8 , *(int8_t *) ret); break; + case 16: Result.IntVal = APInt(16, *(int16_t*) ret); break; + case 32: Result.IntVal = APInt(32, *(int32_t*) ret); break; + case 64: Result.IntVal = APInt(64, *(int64_t*) ret); break; + } + break; + case Type::FloatTyID: Result.FloatVal = *(float *) ret; break; + case Type::DoubleTyID: Result.DoubleVal = *(double*) ret; break; + case Type::PointerTyID: Result.PointerVal = *(void **) ret; break; + default: break; + } + return true; + } + + return false; +} +#endif // USE_LIBFFI + +GenericValue Interpreter::callExternalFunction(Function *F, + const std::vector<GenericValue> &ArgVals) { + TheInterpreter = this; + + // Do a lookup to see if the function is in our cache... this should just be a + // deferred annotation! + std::map<const Function *, ExFunc>::iterator FI = ExportedFunctions->find(F); + if (ExFunc Fn = (FI == ExportedFunctions->end()) ? lookupFunction(F) + : FI->second) + return Fn(F->getFunctionType(), ArgVals); + +#ifdef USE_LIBFFI + std::map<const Function *, RawFunc>::iterator RF = RawFunctions->find(F); + RawFunc RawFn; + if (RF == RawFunctions->end()) { + RawFn = (RawFunc)(intptr_t) + sys::DynamicLibrary::SearchForAddressOfSymbol(F->getName()); + if (RawFn != 0) + RawFunctions->insert(std::make_pair(F, RawFn)); // Cache for later + } else { + RawFn = RF->second; + } + + GenericValue Result; + if (RawFn != 0 && ffiInvoke(RawFn, F, ArgVals, getTargetData(), Result)) + return Result; +#endif // USE_LIBFFI + + cerr << "Tried to execute an unknown external function: " + << F->getType()->getDescription() << " " << F->getName() << "\n"; + if (F->getName() != "__main") + abort(); + return GenericValue(); +} + + +//===----------------------------------------------------------------------===// +// Functions "exported" to the running application... +// +extern "C" { // Don't add C++ manglings to llvm mangling :) + +// void atexit(Function*) +GenericValue lle_X_atexit(const FunctionType *FT, + const std::vector<GenericValue> &Args) { + assert(Args.size() == 1); + TheInterpreter->addAtExitHandler((Function*)GVTOP(Args[0])); + GenericValue GV; + GV.IntVal = 0; + return GV; +} + +// void exit(int) +GenericValue lle_X_exit(const FunctionType *FT, + const std::vector<GenericValue> &Args) { + TheInterpreter->exitCalled(Args[0]); + return GenericValue(); +} + +// void abort(void) +GenericValue lle_X_abort(const FunctionType *FT, + const std::vector<GenericValue> &Args) { + raise (SIGABRT); + return GenericValue(); +} + +// int sprintf(char *, const char *, ...) - a very rough implementation to make +// output useful. +GenericValue lle_X_sprintf(const FunctionType *FT, + const std::vector<GenericValue> &Args) { + char *OutputBuffer = (char *)GVTOP(Args[0]); + const char *FmtStr = (const char *)GVTOP(Args[1]); + unsigned ArgNo = 2; + + // printf should return # chars printed. This is completely incorrect, but + // close enough for now. + GenericValue GV; + GV.IntVal = APInt(32, strlen(FmtStr)); + while (1) { + switch (*FmtStr) { + case 0: return GV; // Null terminator... + default: // Normal nonspecial character + sprintf(OutputBuffer++, "%c", *FmtStr++); + break; + case '\\': { // Handle escape codes + sprintf(OutputBuffer, "%c%c", *FmtStr, *(FmtStr+1)); + FmtStr += 2; OutputBuffer += 2; + break; + } + case '%': { // Handle format specifiers + char FmtBuf[100] = "", Buffer[1000] = ""; + char *FB = FmtBuf; + *FB++ = *FmtStr++; + char Last = *FB++ = *FmtStr++; + unsigned HowLong = 0; + while (Last != 'c' && Last != 'd' && Last != 'i' && Last != 'u' && + Last != 'o' && Last != 'x' && Last != 'X' && Last != 'e' && + Last != 'E' && Last != 'g' && Last != 'G' && Last != 'f' && + Last != 'p' && Last != 's' && Last != '%') { + if (Last == 'l' || Last == 'L') HowLong++; // Keep track of l's + Last = *FB++ = *FmtStr++; + } + *FB = 0; + + switch (Last) { + case '%': + strcpy(Buffer, "%"); break; + case 'c': + sprintf(Buffer, FmtBuf, uint32_t(Args[ArgNo++].IntVal.getZExtValue())); + break; + case 'd': case 'i': + case 'u': case 'o': + case 'x': case 'X': + if (HowLong >= 1) { + if (HowLong == 1 && + TheInterpreter->getTargetData()->getPointerSizeInBits() == 64 && + sizeof(long) < sizeof(int64_t)) { + // Make sure we use %lld with a 64 bit argument because we might be + // compiling LLI on a 32 bit compiler. + unsigned Size = strlen(FmtBuf); + FmtBuf[Size] = FmtBuf[Size-1]; + FmtBuf[Size+1] = 0; + FmtBuf[Size-1] = 'l'; + } + sprintf(Buffer, FmtBuf, Args[ArgNo++].IntVal.getZExtValue()); + } else + sprintf(Buffer, FmtBuf,uint32_t(Args[ArgNo++].IntVal.getZExtValue())); + break; + case 'e': case 'E': case 'g': case 'G': case 'f': + sprintf(Buffer, FmtBuf, Args[ArgNo++].DoubleVal); break; + case 'p': + sprintf(Buffer, FmtBuf, (void*)GVTOP(Args[ArgNo++])); break; + case 's': + sprintf(Buffer, FmtBuf, (char*)GVTOP(Args[ArgNo++])); break; + default: cerr << "<unknown printf code '" << *FmtStr << "'!>"; + ArgNo++; break; + } + strcpy(OutputBuffer, Buffer); + OutputBuffer += strlen(Buffer); + } + break; + } + } + return GV; +} + +// int printf(const char *, ...) - a very rough implementation to make output +// useful. +GenericValue lle_X_printf(const FunctionType *FT, + const std::vector<GenericValue> &Args) { + char Buffer[10000]; + std::vector<GenericValue> NewArgs; + NewArgs.push_back(PTOGV((void*)&Buffer[0])); + NewArgs.insert(NewArgs.end(), Args.begin(), Args.end()); + GenericValue GV = lle_X_sprintf(FT, NewArgs); + cout << Buffer; + return GV; +} + +static void ByteswapSCANFResults(const char *Fmt, void *Arg0, void *Arg1, + void *Arg2, void *Arg3, void *Arg4, void *Arg5, + void *Arg6, void *Arg7, void *Arg8) { + void *Args[] = { Arg0, Arg1, Arg2, Arg3, Arg4, Arg5, Arg6, Arg7, Arg8, 0 }; + + // Loop over the format string, munging read values as appropriate (performs + // byteswaps as necessary). + unsigned ArgNo = 0; + while (*Fmt) { + if (*Fmt++ == '%') { + // Read any flag characters that may be present... + bool Suppress = false; + bool Half = false; + bool Long = false; + bool LongLong = false; // long long or long double + + while (1) { + switch (*Fmt++) { + case '*': Suppress = true; break; + case 'a': /*Allocate = true;*/ break; // We don't need to track this + case 'h': Half = true; break; + case 'l': Long = true; break; + case 'q': + case 'L': LongLong = true; break; + default: + if (Fmt[-1] > '9' || Fmt[-1] < '0') // Ignore field width specs + goto Out; + } + } + Out: + + // Read the conversion character + if (!Suppress && Fmt[-1] != '%') { // Nothing to do? + unsigned Size = 0; + const Type *Ty = 0; + + switch (Fmt[-1]) { + case 'i': case 'o': case 'u': case 'x': case 'X': case 'n': case 'p': + case 'd': + if (Long || LongLong) { + Size = 8; Ty = Type::Int64Ty; + } else if (Half) { + Size = 4; Ty = Type::Int16Ty; + } else { + Size = 4; Ty = Type::Int32Ty; + } + break; + + case 'e': case 'g': case 'E': + case 'f': + if (Long || LongLong) { + Size = 8; Ty = Type::DoubleTy; + } else { + Size = 4; Ty = Type::FloatTy; + } + break; + + case 's': case 'c': case '[': // No byteswap needed + Size = 1; + Ty = Type::Int8Ty; + break; + + default: break; + } + + if (Size) { + GenericValue GV; + void *Arg = Args[ArgNo++]; + memcpy(&GV, Arg, Size); + TheInterpreter->StoreValueToMemory(GV, (GenericValue*)Arg, Ty); + } + } + } + } +} + +// int sscanf(const char *format, ...); +GenericValue lle_X_sscanf(const FunctionType *FT, + const std::vector<GenericValue> &args) { + assert(args.size() < 10 && "Only handle up to 10 args to sscanf right now!"); + + char *Args[10]; + for (unsigned i = 0; i < args.size(); ++i) + Args[i] = (char*)GVTOP(args[i]); + + GenericValue GV; + GV.IntVal = APInt(32, sscanf(Args[0], Args[1], Args[2], Args[3], Args[4], + Args[5], Args[6], Args[7], Args[8], Args[9])); + ByteswapSCANFResults(Args[1], Args[2], Args[3], Args[4], + Args[5], Args[6], Args[7], Args[8], Args[9], 0); + return GV; +} + +// int scanf(const char *format, ...); +GenericValue lle_X_scanf(const FunctionType *FT, + const std::vector<GenericValue> &args) { + assert(args.size() < 10 && "Only handle up to 10 args to scanf right now!"); + + char *Args[10]; + for (unsigned i = 0; i < args.size(); ++i) + Args[i] = (char*)GVTOP(args[i]); + + GenericValue GV; + GV.IntVal = APInt(32, scanf( Args[0], Args[1], Args[2], Args[3], Args[4], + Args[5], Args[6], Args[7], Args[8], Args[9])); + ByteswapSCANFResults(Args[0], Args[1], Args[2], Args[3], Args[4], + Args[5], Args[6], Args[7], Args[8], Args[9]); + return GV; +} + +// int fprintf(FILE *, const char *, ...) - a very rough implementation to make +// output useful. +GenericValue lle_X_fprintf(const FunctionType *FT, + const std::vector<GenericValue> &Args) { + assert(Args.size() >= 2); + char Buffer[10000]; + std::vector<GenericValue> NewArgs; + NewArgs.push_back(PTOGV(Buffer)); + NewArgs.insert(NewArgs.end(), Args.begin()+1, Args.end()); + GenericValue GV = lle_X_sprintf(FT, NewArgs); + + fputs(Buffer, (FILE *) GVTOP(Args[0])); + return GV; +} + +} // End extern "C" + + +void Interpreter::initializeExternalFunctions() { + FuncNames["lle_X_atexit"] = lle_X_atexit; + FuncNames["lle_X_exit"] = lle_X_exit; + FuncNames["lle_X_abort"] = lle_X_abort; + + FuncNames["lle_X_printf"] = lle_X_printf; + FuncNames["lle_X_sprintf"] = lle_X_sprintf; + FuncNames["lle_X_sscanf"] = lle_X_sscanf; + FuncNames["lle_X_scanf"] = lle_X_scanf; + FuncNames["lle_X_fprintf"] = lle_X_fprintf; +} + diff --git a/lib/ExecutionEngine/Interpreter/Interpreter.cpp b/lib/ExecutionEngine/Interpreter/Interpreter.cpp new file mode 100644 index 0000000000000..ded65d5467017 --- /dev/null +++ b/lib/ExecutionEngine/Interpreter/Interpreter.cpp @@ -0,0 +1,104 @@ +//===- Interpreter.cpp - Top-Level LLVM Interpreter Implementation --------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements the top-level functionality for the LLVM interpreter. +// This interpreter is designed to be a very simple, portable, inefficient +// interpreter. +// +//===----------------------------------------------------------------------===// + +#include "Interpreter.h" +#include "llvm/CodeGen/IntrinsicLowering.h" +#include "llvm/DerivedTypes.h" +#include "llvm/Module.h" +#include "llvm/ModuleProvider.h" +#include <cstring> +using namespace llvm; + +namespace { + +static struct RegisterInterp { + RegisterInterp() { Interpreter::Register(); } +} InterpRegistrator; + +} + +namespace llvm { + void LinkInInterpreter() { + } +} + +/// create - Create a new interpreter object. This can never fail. +/// +ExecutionEngine *Interpreter::create(ModuleProvider *MP, std::string* ErrStr, + CodeGenOpt::Level OptLevel /*unused*/) { + // Tell this ModuleProvide to materialize and release the module + if (!MP->materializeModule(ErrStr)) + // We got an error, just return 0 + return 0; + + return new Interpreter(MP); +} + +//===----------------------------------------------------------------------===// +// Interpreter ctor - Initialize stuff +// +Interpreter::Interpreter(ModuleProvider *M) + : ExecutionEngine(M), TD(M->getModule()) { + + memset(&ExitValue.Untyped, 0, sizeof(ExitValue.Untyped)); + setTargetData(&TD); + // Initialize the "backend" + initializeExecutionEngine(); + initializeExternalFunctions(); + emitGlobals(); + + IL = new IntrinsicLowering(TD); +} + +Interpreter::~Interpreter() { + delete IL; +} + +void Interpreter::runAtExitHandlers () { + while (!AtExitHandlers.empty()) { + callFunction(AtExitHandlers.back(), std::vector<GenericValue>()); + AtExitHandlers.pop_back(); + run(); + } +} + +/// run - Start execution with the specified function and arguments. +/// +GenericValue +Interpreter::runFunction(Function *F, + const std::vector<GenericValue> &ArgValues) { + assert (F && "Function *F was null at entry to run()"); + + // Try extra hard not to pass extra args to a function that isn't + // expecting them. C programmers frequently bend the rules and + // declare main() with fewer parameters than it actually gets + // passed, and the interpreter barfs if you pass a function more + // parameters than it is declared to take. This does not attempt to + // take into account gratuitous differences in declared types, + // though. + std::vector<GenericValue> ActualArgs; + const unsigned ArgCount = F->getFunctionType()->getNumParams(); + for (unsigned i = 0; i < ArgCount; ++i) + ActualArgs.push_back(ArgValues[i]); + + // Set up the function call. + callFunction(F, ActualArgs); + + // Start executing the function. + run(); + + return ExitValue; +} + diff --git a/lib/ExecutionEngine/Interpreter/Interpreter.h b/lib/ExecutionEngine/Interpreter/Interpreter.h new file mode 100644 index 0000000000000..8a285ecb82c00 --- /dev/null +++ b/lib/ExecutionEngine/Interpreter/Interpreter.h @@ -0,0 +1,241 @@ +//===-- Interpreter.h ------------------------------------------*- C++ -*--===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This header file defines the interpreter structure +// +//===----------------------------------------------------------------------===// + +#ifndef LLI_INTERPRETER_H +#define LLI_INTERPRETER_H + +#include "llvm/Function.h" +#include "llvm/ExecutionEngine/ExecutionEngine.h" +#include "llvm/ExecutionEngine/GenericValue.h" +#include "llvm/Support/InstVisitor.h" +#include "llvm/Support/CallSite.h" +#include "llvm/Target/TargetData.h" +#include "llvm/Support/DataTypes.h" + +namespace llvm { + +class IntrinsicLowering; +struct FunctionInfo; +template<typename T> class generic_gep_type_iterator; +class ConstantExpr; +typedef generic_gep_type_iterator<User::const_op_iterator> gep_type_iterator; + + +// AllocaHolder - Object to track all of the blocks of memory allocated by +// alloca. When the function returns, this object is popped off the execution +// stack, which causes the dtor to be run, which frees all the alloca'd memory. +// +class AllocaHolder { + friend class AllocaHolderHandle; + std::vector<void*> Allocations; + unsigned RefCnt; +public: + AllocaHolder() : RefCnt(0) {} + void add(void *mem) { Allocations.push_back(mem); } + ~AllocaHolder() { + for (unsigned i = 0; i < Allocations.size(); ++i) + free(Allocations[i]); + } +}; + +// AllocaHolderHandle gives AllocaHolder value semantics so we can stick it into +// a vector... +// +class AllocaHolderHandle { + AllocaHolder *H; +public: + AllocaHolderHandle() : H(new AllocaHolder()) { H->RefCnt++; } + AllocaHolderHandle(const AllocaHolderHandle &AH) : H(AH.H) { H->RefCnt++; } + ~AllocaHolderHandle() { if (--H->RefCnt == 0) delete H; } + + void add(void *mem) { H->add(mem); } +}; + +typedef std::vector<GenericValue> ValuePlaneTy; + +// ExecutionContext struct - This struct represents one stack frame currently +// executing. +// +struct ExecutionContext { + Function *CurFunction;// The currently executing function + BasicBlock *CurBB; // The currently executing BB + BasicBlock::iterator CurInst; // The next instruction to execute + std::map<Value *, GenericValue> Values; // LLVM values used in this invocation + std::vector<GenericValue> VarArgs; // Values passed through an ellipsis + CallSite Caller; // Holds the call that called subframes. + // NULL if main func or debugger invoked fn + AllocaHolderHandle Allocas; // Track memory allocated by alloca +}; + +// Interpreter - This class represents the entirety of the interpreter. +// +class Interpreter : public ExecutionEngine, public InstVisitor<Interpreter> { + GenericValue ExitValue; // The return value of the called function + TargetData TD; + IntrinsicLowering *IL; + + // The runtime stack of executing code. The top of the stack is the current + // function record. + std::vector<ExecutionContext> ECStack; + + // AtExitHandlers - List of functions to call when the program exits, + // registered with the atexit() library function. + std::vector<Function*> AtExitHandlers; + +public: + explicit Interpreter(ModuleProvider *M); + ~Interpreter(); + + /// runAtExitHandlers - Run any functions registered by the program's calls to + /// atexit(3), which we intercept and store in AtExitHandlers. + /// + void runAtExitHandlers(); + + static void Register() { + InterpCtor = create; + } + + /// create - Create an interpreter ExecutionEngine. This can never fail. + /// + static ExecutionEngine *create(ModuleProvider *M, std::string *ErrorStr = 0, + CodeGenOpt::Level = CodeGenOpt::Default); + + /// run - Start execution with the specified function and arguments. + /// + virtual GenericValue runFunction(Function *F, + const std::vector<GenericValue> &ArgValues); + + /// recompileAndRelinkFunction - For the interpreter, functions are always + /// up-to-date. + /// + virtual void *recompileAndRelinkFunction(Function *F) { + return getPointerToFunction(F); + } + + /// freeMachineCodeForFunction - The interpreter does not generate any code. + /// + void freeMachineCodeForFunction(Function *F) { } + + // Methods used to execute code: + // Place a call on the stack + void callFunction(Function *F, const std::vector<GenericValue> &ArgVals); + void run(); // Execute instructions until nothing left to do + + // Opcode Implementations + void visitReturnInst(ReturnInst &I); + void visitBranchInst(BranchInst &I); + void visitSwitchInst(SwitchInst &I); + + void visitBinaryOperator(BinaryOperator &I); + void visitICmpInst(ICmpInst &I); + void visitFCmpInst(FCmpInst &I); + void visitAllocationInst(AllocationInst &I); + void visitFreeInst(FreeInst &I); + void visitLoadInst(LoadInst &I); + void visitStoreInst(StoreInst &I); + void visitGetElementPtrInst(GetElementPtrInst &I); + void visitPHINode(PHINode &PN) { assert(0 && "PHI nodes already handled!"); } + void visitTruncInst(TruncInst &I); + void visitZExtInst(ZExtInst &I); + void visitSExtInst(SExtInst &I); + void visitFPTruncInst(FPTruncInst &I); + void visitFPExtInst(FPExtInst &I); + void visitUIToFPInst(UIToFPInst &I); + void visitSIToFPInst(SIToFPInst &I); + void visitFPToUIInst(FPToUIInst &I); + void visitFPToSIInst(FPToSIInst &I); + void visitPtrToIntInst(PtrToIntInst &I); + void visitIntToPtrInst(IntToPtrInst &I); + void visitBitCastInst(BitCastInst &I); + void visitSelectInst(SelectInst &I); + + + void visitCallSite(CallSite CS); + void visitCallInst(CallInst &I) { visitCallSite (CallSite (&I)); } + void visitInvokeInst(InvokeInst &I) { visitCallSite (CallSite (&I)); } + void visitUnwindInst(UnwindInst &I); + void visitUnreachableInst(UnreachableInst &I); + + void visitShl(BinaryOperator &I); + void visitLShr(BinaryOperator &I); + void visitAShr(BinaryOperator &I); + + void visitVAArgInst(VAArgInst &I); + void visitInstruction(Instruction &I) { + cerr << I; + assert(0 && "Instruction not interpretable yet!"); + } + + GenericValue callExternalFunction(Function *F, + const std::vector<GenericValue> &ArgVals); + void exitCalled(GenericValue GV); + + void addAtExitHandler(Function *F) { + AtExitHandlers.push_back(F); + } + + GenericValue *getFirstVarArg () { + return &(ECStack.back ().VarArgs[0]); + } + + //FIXME: private: +public: + GenericValue executeGEPOperation(Value *Ptr, gep_type_iterator I, + gep_type_iterator E, ExecutionContext &SF); + +private: // Helper functions + // SwitchToNewBasicBlock - Start execution in a new basic block and run any + // PHI nodes in the top of the block. This is used for intraprocedural + // control flow. + // + void SwitchToNewBasicBlock(BasicBlock *Dest, ExecutionContext &SF); + + void *getPointerToFunction(Function *F) { return (void*)F; } + + void initializeExecutionEngine(); + void initializeExternalFunctions(); + GenericValue getConstantExprValue(ConstantExpr *CE, ExecutionContext &SF); + GenericValue getOperandValue(Value *V, ExecutionContext &SF); + GenericValue executeTruncInst(Value *SrcVal, const Type *DstTy, + ExecutionContext &SF); + GenericValue executeSExtInst(Value *SrcVal, const Type *DstTy, + ExecutionContext &SF); + GenericValue executeZExtInst(Value *SrcVal, const Type *DstTy, + ExecutionContext &SF); + GenericValue executeFPTruncInst(Value *SrcVal, const Type *DstTy, + ExecutionContext &SF); + GenericValue executeFPExtInst(Value *SrcVal, const Type *DstTy, + ExecutionContext &SF); + GenericValue executeFPToUIInst(Value *SrcVal, const Type *DstTy, + ExecutionContext &SF); + GenericValue executeFPToSIInst(Value *SrcVal, const Type *DstTy, + ExecutionContext &SF); + GenericValue executeUIToFPInst(Value *SrcVal, const Type *DstTy, + ExecutionContext &SF); + GenericValue executeSIToFPInst(Value *SrcVal, const Type *DstTy, + ExecutionContext &SF); + GenericValue executePtrToIntInst(Value *SrcVal, const Type *DstTy, + ExecutionContext &SF); + GenericValue executeIntToPtrInst(Value *SrcVal, const Type *DstTy, + ExecutionContext &SF); + GenericValue executeBitCastInst(Value *SrcVal, const Type *DstTy, + ExecutionContext &SF); + GenericValue executeCastOperation(Instruction::CastOps opcode, Value *SrcVal, + const Type *Ty, ExecutionContext &SF); + void popStackAndReturnValueToCaller(const Type *RetTy, GenericValue Result); + +}; + +} // End llvm namespace + +#endif diff --git a/lib/ExecutionEngine/Interpreter/Makefile b/lib/ExecutionEngine/Interpreter/Makefile new file mode 100644 index 0000000000000..5f937c3ad6f29 --- /dev/null +++ b/lib/ExecutionEngine/Interpreter/Makefile @@ -0,0 +1,12 @@ +##===- lib/ExecutionEngine/Interpreter/Makefile ------------*- Makefile -*-===## +# +# The LLVM Compiler Infrastructure +# +# This file is distributed under the University of Illinois Open Source +# License. See LICENSE.TXT for details. +# +##===----------------------------------------------------------------------===## +LEVEL = ../../.. +LIBRARYNAME = LLVMInterpreter + +include $(LEVEL)/Makefile.common diff --git a/lib/ExecutionEngine/JIT/CMakeLists.txt b/lib/ExecutionEngine/JIT/CMakeLists.txt new file mode 100644 index 0000000000000..d7980d077282a --- /dev/null +++ b/lib/ExecutionEngine/JIT/CMakeLists.txt @@ -0,0 +1,11 @@ +# TODO: Support other architectures. See Makefile. +add_definitions(-DENABLE_X86_JIT) + +add_partially_linked_object(LLVMJIT + Intercept.cpp + JIT.cpp + JITDwarfEmitter.cpp + JITEmitter.cpp + JITMemoryManager.cpp + TargetSelect.cpp + ) diff --git a/lib/ExecutionEngine/JIT/Intercept.cpp b/lib/ExecutionEngine/JIT/Intercept.cpp new file mode 100644 index 0000000000000..3dcc4626a1fa4 --- /dev/null +++ b/lib/ExecutionEngine/JIT/Intercept.cpp @@ -0,0 +1,148 @@ +//===-- Intercept.cpp - System function interception routines -------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// If a function call occurs to an external function, the JIT is designed to use +// the dynamic loader interface to find a function to call. This is useful for +// calling system calls and library functions that are not available in LLVM. +// Some system calls, however, need to be handled specially. For this reason, +// we intercept some of them here and use our own stubs to handle them. +// +//===----------------------------------------------------------------------===// + +#include "JIT.h" +#include "llvm/Support/Streams.h" +#include "llvm/System/DynamicLibrary.h" +#include "llvm/Config/config.h" +using namespace llvm; + +// AtExitHandlers - List of functions to call when the program exits, +// registered with the atexit() library function. +static std::vector<void (*)()> AtExitHandlers; + +/// runAtExitHandlers - Run any functions registered by the program's +/// calls to atexit(3), which we intercept and store in +/// AtExitHandlers. +/// +static void runAtExitHandlers() { + while (!AtExitHandlers.empty()) { + void (*Fn)() = AtExitHandlers.back(); + AtExitHandlers.pop_back(); + Fn(); + } +} + +//===----------------------------------------------------------------------===// +// Function stubs that are invoked instead of certain library calls +//===----------------------------------------------------------------------===// + +// Force the following functions to be linked in to anything that uses the +// JIT. This is a hack designed to work around the all-too-clever Glibc +// strategy of making these functions work differently when inlined vs. when +// not inlined, and hiding their real definitions in a separate archive file +// that the dynamic linker can't see. For more info, search for +// 'libc_nonshared.a' on Google, or read http://llvm.org/PR274. +#if defined(__linux__) +#if defined(HAVE_SYS_STAT_H) +#include <sys/stat.h> +#endif +#include <fcntl.h> +/* stat functions are redirecting to __xstat with a version number. On x86-64 + * linking with libc_nonshared.a and -Wl,--export-dynamic doesn't make 'stat' + * available as an exported symbol, so we have to add it explicitly. + */ +class StatSymbols { +public: + StatSymbols() { + sys::DynamicLibrary::AddSymbol("stat", (void*)(intptr_t)stat); + sys::DynamicLibrary::AddSymbol("fstat", (void*)(intptr_t)fstat); + sys::DynamicLibrary::AddSymbol("lstat", (void*)(intptr_t)lstat); + sys::DynamicLibrary::AddSymbol("stat64", (void*)(intptr_t)stat64); + sys::DynamicLibrary::AddSymbol("\x1stat64", (void*)(intptr_t)stat64); + sys::DynamicLibrary::AddSymbol("\x1open64", (void*)(intptr_t)open64); + sys::DynamicLibrary::AddSymbol("\x1lseek64", (void*)(intptr_t)lseek64); + sys::DynamicLibrary::AddSymbol("fstat64", (void*)(intptr_t)fstat64); + sys::DynamicLibrary::AddSymbol("lstat64", (void*)(intptr_t)lstat64); + sys::DynamicLibrary::AddSymbol("atexit", (void*)(intptr_t)atexit); + sys::DynamicLibrary::AddSymbol("mknod", (void*)(intptr_t)mknod); + } +}; +static StatSymbols initStatSymbols; +#endif // __linux__ + +// jit_exit - Used to intercept the "exit" library call. +static void jit_exit(int Status) { + runAtExitHandlers(); // Run atexit handlers... + exit(Status); +} + +// jit_atexit - Used to intercept the "atexit" library call. +static int jit_atexit(void (*Fn)(void)) { + AtExitHandlers.push_back(Fn); // Take note of atexit handler... + return 0; // Always successful +} + +//===----------------------------------------------------------------------===// +// +/// getPointerToNamedFunction - This method returns the address of the specified +/// function by using the dynamic loader interface. As such it is only useful +/// for resolving library symbols, not code generated symbols. +/// +void *JIT::getPointerToNamedFunction(const std::string &Name, + bool AbortOnFailure) { + if (!isSymbolSearchingDisabled()) { + // Check to see if this is one of the functions we want to intercept. Note, + // we cast to intptr_t here to silence a -pedantic warning that complains + // about casting a function pointer to a normal pointer. + if (Name == "exit") return (void*)(intptr_t)&jit_exit; + if (Name == "atexit") return (void*)(intptr_t)&jit_atexit; + + const char *NameStr = Name.c_str(); + // If this is an asm specifier, skip the sentinal. + if (NameStr[0] == 1) ++NameStr; + + // If it's an external function, look it up in the process image... + void *Ptr = sys::DynamicLibrary::SearchForAddressOfSymbol(NameStr); + if (Ptr) return Ptr; + + // If it wasn't found and if it starts with an underscore ('_') character, + // and has an asm specifier, try again without the underscore. + if (Name[0] == 1 && NameStr[0] == '_') { + Ptr = sys::DynamicLibrary::SearchForAddressOfSymbol(NameStr+1); + if (Ptr) return Ptr; + } + + // Darwin/PPC adds $LDBLStub suffixes to various symbols like printf. These + // are references to hidden visibility symbols that dlsym cannot resolve. + // If we have one of these, strip off $LDBLStub and try again. +#if defined(__APPLE__) && defined(__ppc__) + if (Name.size() > 9 && Name[Name.size()-9] == '$' && + memcmp(&Name[Name.size()-8], "LDBLStub", 8) == 0) { + // First try turning $LDBLStub into $LDBL128. If that fails, strip it off. + // This mirrors logic in libSystemStubs.a. + std::string Prefix = std::string(Name.begin(), Name.end()-9); + if (void *Ptr = getPointerToNamedFunction(Prefix+"$LDBL128", false)) + return Ptr; + if (void *Ptr = getPointerToNamedFunction(Prefix, false)) + return Ptr; + } +#endif + } + + /// If a LazyFunctionCreator is installed, use it to get/create the function. + if (LazyFunctionCreator) + if (void *RP = LazyFunctionCreator(Name)) + return RP; + + if (AbortOnFailure) { + cerr << "ERROR: Program used external function '" << Name + << "' which could not be resolved!\n"; + abort(); + } + return 0; +} diff --git a/lib/ExecutionEngine/JIT/JIT.cpp b/lib/ExecutionEngine/JIT/JIT.cpp new file mode 100644 index 0000000000000..f8ae8844616dc --- /dev/null +++ b/lib/ExecutionEngine/JIT/JIT.cpp @@ -0,0 +1,708 @@ +//===-- JIT.cpp - LLVM Just in Time Compiler ------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This tool implements a just-in-time compiler for LLVM, allowing direct +// execution of LLVM bitcode in an efficient manner. +// +//===----------------------------------------------------------------------===// + +#include "JIT.h" +#include "llvm/Constants.h" +#include "llvm/DerivedTypes.h" +#include "llvm/Function.h" +#include "llvm/GlobalVariable.h" +#include "llvm/Instructions.h" +#include "llvm/ModuleProvider.h" +#include "llvm/CodeGen/JITCodeEmitter.h" +#include "llvm/ExecutionEngine/GenericValue.h" +#include "llvm/CodeGen/MachineCodeInfo.h" +#include "llvm/Target/TargetData.h" +#include "llvm/Target/TargetMachine.h" +#include "llvm/Target/TargetJITInfo.h" +#include "llvm/Support/Dwarf.h" +#include "llvm/Support/MutexGuard.h" +#include "llvm/System/DynamicLibrary.h" +#include "llvm/Config/config.h" + +using namespace llvm; + +#ifdef __APPLE__ +// Apple gcc defaults to -fuse-cxa-atexit (i.e. calls __cxa_atexit instead +// of atexit). It passes the address of linker generated symbol __dso_handle +// to the function. +// This configuration change happened at version 5330. +# include <AvailabilityMacros.h> +# if defined(MAC_OS_X_VERSION_10_4) && \ + ((MAC_OS_X_VERSION_MIN_REQUIRED > MAC_OS_X_VERSION_10_4) || \ + (MAC_OS_X_VERSION_MIN_REQUIRED == MAC_OS_X_VERSION_10_4 && \ + __APPLE_CC__ >= 5330)) +# ifndef HAVE___DSO_HANDLE +# define HAVE___DSO_HANDLE 1 +# endif +# endif +#endif + +#if HAVE___DSO_HANDLE +extern void *__dso_handle __attribute__ ((__visibility__ ("hidden"))); +#endif + +namespace { + +static struct RegisterJIT { + RegisterJIT() { JIT::Register(); } +} JITRegistrator; + +} + +namespace llvm { + void LinkInJIT() { + } +} + + +#if defined(__GNUC__) && !defined(__ARM__EABI__) + +// libgcc defines the __register_frame function to dynamically register new +// dwarf frames for exception handling. This functionality is not portable +// across compilers and is only provided by GCC. We use the __register_frame +// function here so that code generated by the JIT cooperates with the unwinding +// runtime of libgcc. When JITting with exception handling enable, LLVM +// generates dwarf frames and registers it to libgcc with __register_frame. +// +// The __register_frame function works with Linux. +// +// Unfortunately, this functionality seems to be in libgcc after the unwinding +// library of libgcc for darwin was written. The code for darwin overwrites the +// value updated by __register_frame with a value fetched with "keymgr". +// "keymgr" is an obsolete functionality, which should be rewritten some day. +// In the meantime, since "keymgr" is on all libgccs shipped with apple-gcc, we +// need a workaround in LLVM which uses the "keymgr" to dynamically modify the +// values of an opaque key, used by libgcc to find dwarf tables. + +extern "C" void __register_frame(void*); + +#if defined(__APPLE__) && MAC_OS_X_VERSION_MAX_ALLOWED <= 1050 +# define USE_KEYMGR 1 +#else +# define USE_KEYMGR 0 +#endif + +#if USE_KEYMGR + +namespace { + +// LibgccObject - This is the structure defined in libgcc. There is no #include +// provided for this structure, so we also define it here. libgcc calls it +// "struct object". The structure is undocumented in libgcc. +struct LibgccObject { + void *unused1; + void *unused2; + void *unused3; + + /// frame - Pointer to the exception table. + void *frame; + + /// encoding - The encoding of the object? + union { + struct { + unsigned long sorted : 1; + unsigned long from_array : 1; + unsigned long mixed_encoding : 1; + unsigned long encoding : 8; + unsigned long count : 21; + } b; + size_t i; + } encoding; + + /// fde_end - libgcc defines this field only if some macro is defined. We + /// include this field even if it may not there, to make libgcc happy. + char *fde_end; + + /// next - At least we know it's a chained list! + struct LibgccObject *next; +}; + +// "kemgr" stuff. Apparently, all frame tables are stored there. +extern "C" void _keymgr_set_and_unlock_processwide_ptr(int, void *); +extern "C" void *_keymgr_get_and_lock_processwide_ptr(int); +#define KEYMGR_GCC3_DW2_OBJ_LIST 302 /* Dwarf2 object list */ + +/// LibgccObjectInfo - libgcc defines this struct as km_object_info. It +/// probably contains all dwarf tables that are loaded. +struct LibgccObjectInfo { + + /// seenObjects - LibgccObjects already parsed by the unwinding runtime. + /// + struct LibgccObject* seenObjects; + + /// unseenObjects - LibgccObjects not parsed yet by the unwinding runtime. + /// + struct LibgccObject* unseenObjects; + + unsigned unused[2]; +}; + +/// darwin_register_frame - Since __register_frame does not work with darwin's +/// libgcc,we provide our own function, which "tricks" libgcc by modifying the +/// "Dwarf2 object list" key. +void DarwinRegisterFrame(void* FrameBegin) { + // Get the key. + LibgccObjectInfo* LOI = (struct LibgccObjectInfo*) + _keymgr_get_and_lock_processwide_ptr(KEYMGR_GCC3_DW2_OBJ_LIST); + assert(LOI && "This should be preallocated by the runtime"); + + // Allocate a new LibgccObject to represent this frame. Deallocation of this + // object may be impossible: since darwin code in libgcc was written after + // the ability to dynamically register frames, things may crash if we + // deallocate it. + struct LibgccObject* ob = (struct LibgccObject*) + malloc(sizeof(struct LibgccObject)); + + // Do like libgcc for the values of the field. + ob->unused1 = (void *)-1; + ob->unused2 = 0; + ob->unused3 = 0; + ob->frame = FrameBegin; + ob->encoding.i = 0; + ob->encoding.b.encoding = llvm::dwarf::DW_EH_PE_omit; + + // Put the info on both places, as libgcc uses the first or the the second + // field. Note that we rely on having two pointers here. If fde_end was a + // char, things would get complicated. + ob->fde_end = (char*)LOI->unseenObjects; + ob->next = LOI->unseenObjects; + + // Update the key's unseenObjects list. + LOI->unseenObjects = ob; + + // Finally update the "key". Apparently, libgcc requires it. + _keymgr_set_and_unlock_processwide_ptr(KEYMGR_GCC3_DW2_OBJ_LIST, + LOI); + +} + +} +#endif // __APPLE__ +#endif // __GNUC__ + +/// createJIT - This is the factory method for creating a JIT for the current +/// machine, it does not fall back to the interpreter. This takes ownership +/// of the module provider. +ExecutionEngine *ExecutionEngine::createJIT(ModuleProvider *MP, + std::string *ErrorStr, + JITMemoryManager *JMM, + CodeGenOpt::Level OptLevel) { + ExecutionEngine *EE = JIT::createJIT(MP, ErrorStr, JMM, OptLevel); + if (!EE) return 0; + + // Make sure we can resolve symbols in the program as well. The zero arg + // to the function tells DynamicLibrary to load the program, not a library. + sys::DynamicLibrary::LoadLibraryPermanently(0, ErrorStr); + return EE; +} + +JIT::JIT(ModuleProvider *MP, TargetMachine &tm, TargetJITInfo &tji, + JITMemoryManager *JMM, CodeGenOpt::Level OptLevel) + : ExecutionEngine(MP), TM(tm), TJI(tji) { + setTargetData(TM.getTargetData()); + + jitstate = new JITState(MP); + + // Initialize JCE + JCE = createEmitter(*this, JMM); + + // Add target data + MutexGuard locked(lock); + FunctionPassManager &PM = jitstate->getPM(locked); + PM.add(new TargetData(*TM.getTargetData())); + + // Turn the machine code intermediate representation into bytes in memory that + // may be executed. + if (TM.addPassesToEmitMachineCode(PM, *JCE, OptLevel)) { + cerr << "Target does not support machine code emission!\n"; + abort(); + } + + // Register routine for informing unwinding runtime about new EH frames +#if defined(__GNUC__) && !defined(__ARM_EABI__) +#if USE_KEYMGR + struct LibgccObjectInfo* LOI = (struct LibgccObjectInfo*) + _keymgr_get_and_lock_processwide_ptr(KEYMGR_GCC3_DW2_OBJ_LIST); + + // The key is created on demand, and libgcc creates it the first time an + // exception occurs. Since we need the key to register frames, we create + // it now. + if (!LOI) + LOI = (LibgccObjectInfo*)calloc(sizeof(struct LibgccObjectInfo), 1); + _keymgr_set_and_unlock_processwide_ptr(KEYMGR_GCC3_DW2_OBJ_LIST, LOI); + InstallExceptionTableRegister(DarwinRegisterFrame); +#else + InstallExceptionTableRegister(__register_frame); +#endif // __APPLE__ +#endif // __GNUC__ + + // Initialize passes. + PM.doInitialization(); +} + +JIT::~JIT() { + delete jitstate; + delete JCE; + delete &TM; +} + +/// addModuleProvider - Add a new ModuleProvider to the JIT. If we previously +/// removed the last ModuleProvider, we need re-initialize jitstate with a valid +/// ModuleProvider. +void JIT::addModuleProvider(ModuleProvider *MP) { + MutexGuard locked(lock); + + if (Modules.empty()) { + assert(!jitstate && "jitstate should be NULL if Modules vector is empty!"); + + jitstate = new JITState(MP); + + FunctionPassManager &PM = jitstate->getPM(locked); + PM.add(new TargetData(*TM.getTargetData())); + + // Turn the machine code intermediate representation into bytes in memory + // that may be executed. + if (TM.addPassesToEmitMachineCode(PM, *JCE, CodeGenOpt::Default)) { + cerr << "Target does not support machine code emission!\n"; + abort(); + } + + // Initialize passes. + PM.doInitialization(); + } + + ExecutionEngine::addModuleProvider(MP); +} + +/// removeModuleProvider - If we are removing the last ModuleProvider, +/// invalidate the jitstate since the PassManager it contains references a +/// released ModuleProvider. +Module *JIT::removeModuleProvider(ModuleProvider *MP, std::string *E) { + Module *result = ExecutionEngine::removeModuleProvider(MP, E); + + MutexGuard locked(lock); + + if (jitstate->getMP() == MP) { + delete jitstate; + jitstate = 0; + } + + if (!jitstate && !Modules.empty()) { + jitstate = new JITState(Modules[0]); + + FunctionPassManager &PM = jitstate->getPM(locked); + PM.add(new TargetData(*TM.getTargetData())); + + // Turn the machine code intermediate representation into bytes in memory + // that may be executed. + if (TM.addPassesToEmitMachineCode(PM, *JCE, CodeGenOpt::Default)) { + cerr << "Target does not support machine code emission!\n"; + abort(); + } + + // Initialize passes. + PM.doInitialization(); + } + return result; +} + +/// deleteModuleProvider - Remove a ModuleProvider from the list of modules, +/// and deletes the ModuleProvider and owned Module. Avoids materializing +/// the underlying module. +void JIT::deleteModuleProvider(ModuleProvider *MP, std::string *E) { + ExecutionEngine::deleteModuleProvider(MP, E); + + MutexGuard locked(lock); + + if (jitstate->getMP() == MP) { + delete jitstate; + jitstate = 0; + } + + if (!jitstate && !Modules.empty()) { + jitstate = new JITState(Modules[0]); + + FunctionPassManager &PM = jitstate->getPM(locked); + PM.add(new TargetData(*TM.getTargetData())); + + // Turn the machine code intermediate representation into bytes in memory + // that may be executed. + if (TM.addPassesToEmitMachineCode(PM, *JCE, CodeGenOpt::Default)) { + cerr << "Target does not support machine code emission!\n"; + abort(); + } + + // Initialize passes. + PM.doInitialization(); + } +} + +/// run - Start execution with the specified function and arguments. +/// +GenericValue JIT::runFunction(Function *F, + const std::vector<GenericValue> &ArgValues) { + assert(F && "Function *F was null at entry to run()"); + + void *FPtr = getPointerToFunction(F); + assert(FPtr && "Pointer to fn's code was null after getPointerToFunction"); + const FunctionType *FTy = F->getFunctionType(); + const Type *RetTy = FTy->getReturnType(); + + assert((FTy->getNumParams() == ArgValues.size() || + (FTy->isVarArg() && FTy->getNumParams() <= ArgValues.size())) && + "Wrong number of arguments passed into function!"); + assert(FTy->getNumParams() == ArgValues.size() && + "This doesn't support passing arguments through varargs (yet)!"); + + // Handle some common cases first. These cases correspond to common `main' + // prototypes. + if (RetTy == Type::Int32Ty || RetTy == Type::VoidTy) { + switch (ArgValues.size()) { + case 3: + if (FTy->getParamType(0) == Type::Int32Ty && + isa<PointerType>(FTy->getParamType(1)) && + isa<PointerType>(FTy->getParamType(2))) { + int (*PF)(int, char **, const char **) = + (int(*)(int, char **, const char **))(intptr_t)FPtr; + + // Call the function. + GenericValue rv; + rv.IntVal = APInt(32, PF(ArgValues[0].IntVal.getZExtValue(), + (char **)GVTOP(ArgValues[1]), + (const char **)GVTOP(ArgValues[2]))); + return rv; + } + break; + case 2: + if (FTy->getParamType(0) == Type::Int32Ty && + isa<PointerType>(FTy->getParamType(1))) { + int (*PF)(int, char **) = (int(*)(int, char **))(intptr_t)FPtr; + + // Call the function. + GenericValue rv; + rv.IntVal = APInt(32, PF(ArgValues[0].IntVal.getZExtValue(), + (char **)GVTOP(ArgValues[1]))); + return rv; + } + break; + case 1: + if (FTy->getNumParams() == 1 && + FTy->getParamType(0) == Type::Int32Ty) { + GenericValue rv; + int (*PF)(int) = (int(*)(int))(intptr_t)FPtr; + rv.IntVal = APInt(32, PF(ArgValues[0].IntVal.getZExtValue())); + return rv; + } + break; + } + } + + // Handle cases where no arguments are passed first. + if (ArgValues.empty()) { + GenericValue rv; + switch (RetTy->getTypeID()) { + default: assert(0 && "Unknown return type for function call!"); + case Type::IntegerTyID: { + unsigned BitWidth = cast<IntegerType>(RetTy)->getBitWidth(); + if (BitWidth == 1) + rv.IntVal = APInt(BitWidth, ((bool(*)())(intptr_t)FPtr)()); + else if (BitWidth <= 8) + rv.IntVal = APInt(BitWidth, ((char(*)())(intptr_t)FPtr)()); + else if (BitWidth <= 16) + rv.IntVal = APInt(BitWidth, ((short(*)())(intptr_t)FPtr)()); + else if (BitWidth <= 32) + rv.IntVal = APInt(BitWidth, ((int(*)())(intptr_t)FPtr)()); + else if (BitWidth <= 64) + rv.IntVal = APInt(BitWidth, ((int64_t(*)())(intptr_t)FPtr)()); + else + assert(0 && "Integer types > 64 bits not supported"); + return rv; + } + case Type::VoidTyID: + rv.IntVal = APInt(32, ((int(*)())(intptr_t)FPtr)()); + return rv; + case Type::FloatTyID: + rv.FloatVal = ((float(*)())(intptr_t)FPtr)(); + return rv; + case Type::DoubleTyID: + rv.DoubleVal = ((double(*)())(intptr_t)FPtr)(); + return rv; + case Type::X86_FP80TyID: + case Type::FP128TyID: + case Type::PPC_FP128TyID: + assert(0 && "long double not supported yet"); + return rv; + case Type::PointerTyID: + return PTOGV(((void*(*)())(intptr_t)FPtr)()); + } + } + + // Okay, this is not one of our quick and easy cases. Because we don't have a + // full FFI, we have to codegen a nullary stub function that just calls the + // function we are interested in, passing in constants for all of the + // arguments. Make this function and return. + + // First, create the function. + FunctionType *STy=FunctionType::get(RetTy, std::vector<const Type*>(), false); + Function *Stub = Function::Create(STy, Function::InternalLinkage, "", + F->getParent()); + + // Insert a basic block. + BasicBlock *StubBB = BasicBlock::Create("", Stub); + + // Convert all of the GenericValue arguments over to constants. Note that we + // currently don't support varargs. + SmallVector<Value*, 8> Args; + for (unsigned i = 0, e = ArgValues.size(); i != e; ++i) { + Constant *C = 0; + const Type *ArgTy = FTy->getParamType(i); + const GenericValue &AV = ArgValues[i]; + switch (ArgTy->getTypeID()) { + default: assert(0 && "Unknown argument type for function call!"); + case Type::IntegerTyID: + C = ConstantInt::get(AV.IntVal); + break; + case Type::FloatTyID: + C = ConstantFP::get(APFloat(AV.FloatVal)); + break; + case Type::DoubleTyID: + C = ConstantFP::get(APFloat(AV.DoubleVal)); + break; + case Type::PPC_FP128TyID: + case Type::X86_FP80TyID: + case Type::FP128TyID: + C = ConstantFP::get(APFloat(AV.IntVal)); + break; + case Type::PointerTyID: + void *ArgPtr = GVTOP(AV); + if (sizeof(void*) == 4) + C = ConstantInt::get(Type::Int32Ty, (int)(intptr_t)ArgPtr); + else + C = ConstantInt::get(Type::Int64Ty, (intptr_t)ArgPtr); + C = ConstantExpr::getIntToPtr(C, ArgTy); // Cast the integer to pointer + break; + } + Args.push_back(C); + } + + CallInst *TheCall = CallInst::Create(F, Args.begin(), Args.end(), + "", StubBB); + TheCall->setCallingConv(F->getCallingConv()); + TheCall->setTailCall(); + if (TheCall->getType() != Type::VoidTy) + ReturnInst::Create(TheCall, StubBB); // Return result of the call. + else + ReturnInst::Create(StubBB); // Just return void. + + // Finally, return the value returned by our nullary stub function. + return runFunction(Stub, std::vector<GenericValue>()); +} + +/// runJITOnFunction - Run the FunctionPassManager full of +/// just-in-time compilation passes on F, hopefully filling in +/// GlobalAddress[F] with the address of F's machine code. +/// +void JIT::runJITOnFunction(Function *F, MachineCodeInfo *MCI) { + MutexGuard locked(lock); + + registerMachineCodeInfo(MCI); + + runJITOnFunctionUnlocked(F, locked); + + registerMachineCodeInfo(0); +} + +void JIT::runJITOnFunctionUnlocked(Function *F, const MutexGuard &locked) { + static bool isAlreadyCodeGenerating = false; + assert(!isAlreadyCodeGenerating && "Error: Recursive compilation detected!"); + + // JIT the function + isAlreadyCodeGenerating = true; + jitstate->getPM(locked).run(*F); + isAlreadyCodeGenerating = false; + + // If the function referred to another function that had not yet been + // read from bitcode, but we are jitting non-lazily, emit it now. + while (!jitstate->getPendingFunctions(locked).empty()) { + Function *PF = jitstate->getPendingFunctions(locked).back(); + jitstate->getPendingFunctions(locked).pop_back(); + + // JIT the function + isAlreadyCodeGenerating = true; + jitstate->getPM(locked).run(*PF); + isAlreadyCodeGenerating = false; + + // Now that the function has been jitted, ask the JITEmitter to rewrite + // the stub with real address of the function. + updateFunctionStub(PF); + } + + // If the JIT is configured to emit info so that dlsym can be used to + // rewrite stubs to external globals, do so now. + if (areDlsymStubsEnabled() && isLazyCompilationDisabled()) + updateDlsymStubTable(); +} + +/// getPointerToFunction - This method is used to get the address of the +/// specified function, compiling it if neccesary. +/// +void *JIT::getPointerToFunction(Function *F) { + + if (void *Addr = getPointerToGlobalIfAvailable(F)) + return Addr; // Check if function already code gen'd + + MutexGuard locked(lock); + + // Make sure we read in the function if it exists in this Module. + if (F->hasNotBeenReadFromBitcode()) { + // Determine the module provider this function is provided by. + Module *M = F->getParent(); + ModuleProvider *MP = 0; + for (unsigned i = 0, e = Modules.size(); i != e; ++i) { + if (Modules[i]->getModule() == M) { + MP = Modules[i]; + break; + } + } + assert(MP && "Function isn't in a module we know about!"); + + std::string ErrorMsg; + if (MP->materializeFunction(F, &ErrorMsg)) { + cerr << "Error reading function '" << F->getName() + << "' from bitcode file: " << ErrorMsg << "\n"; + abort(); + } + + // Now retry to get the address. + if (void *Addr = getPointerToGlobalIfAvailable(F)) + return Addr; + } + + if (F->isDeclaration()) { + bool AbortOnFailure = + !areDlsymStubsEnabled() && !F->hasExternalWeakLinkage(); + void *Addr = getPointerToNamedFunction(F->getName(), AbortOnFailure); + addGlobalMapping(F, Addr); + return Addr; + } + + runJITOnFunctionUnlocked(F, locked); + + void *Addr = getPointerToGlobalIfAvailable(F); + assert(Addr && "Code generation didn't add function to GlobalAddress table!"); + return Addr; +} + +/// getOrEmitGlobalVariable - Return the address of the specified global +/// variable, possibly emitting it to memory if needed. This is used by the +/// Emitter. +void *JIT::getOrEmitGlobalVariable(const GlobalVariable *GV) { + MutexGuard locked(lock); + + void *Ptr = getPointerToGlobalIfAvailable(GV); + if (Ptr) return Ptr; + + // If the global is external, just remember the address. + if (GV->isDeclaration()) { +#if HAVE___DSO_HANDLE + if (GV->getName() == "__dso_handle") + return (void*)&__dso_handle; +#endif + Ptr = sys::DynamicLibrary::SearchForAddressOfSymbol(GV->getName().c_str()); + if (Ptr == 0 && !areDlsymStubsEnabled()) { + cerr << "Could not resolve external global address: " + << GV->getName() << "\n"; + abort(); + } + addGlobalMapping(GV, Ptr); + } else { + // GlobalVariable's which are not "constant" will cause trouble in a server + // situation. It's returned in the same block of memory as code which may + // not be writable. + if (isGVCompilationDisabled() && !GV->isConstant()) { + cerr << "Compilation of non-internal GlobalValue is disabled!\n"; + abort(); + } + // If the global hasn't been emitted to memory yet, allocate space and + // emit it into memory. It goes in the same array as the generated + // code, jump tables, etc. + const Type *GlobalType = GV->getType()->getElementType(); + size_t S = getTargetData()->getTypeAllocSize(GlobalType); + size_t A = getTargetData()->getPreferredAlignment(GV); + if (GV->isThreadLocal()) { + MutexGuard locked(lock); + Ptr = TJI.allocateThreadLocalMemory(S); + } else if (TJI.allocateSeparateGVMemory()) { + if (A <= 8) { + Ptr = malloc(S); + } else { + // Allocate S+A bytes of memory, then use an aligned pointer within that + // space. + Ptr = malloc(S+A); + unsigned MisAligned = ((intptr_t)Ptr & (A-1)); + Ptr = (char*)Ptr + (MisAligned ? (A-MisAligned) : 0); + } + } else { + Ptr = JCE->allocateSpace(S, A); + } + addGlobalMapping(GV, Ptr); + EmitGlobalVariable(GV); + } + return Ptr; +} + +/// recompileAndRelinkFunction - This method is used to force a function +/// which has already been compiled, to be compiled again, possibly +/// after it has been modified. Then the entry to the old copy is overwritten +/// with a branch to the new copy. If there was no old copy, this acts +/// just like JIT::getPointerToFunction(). +/// +void *JIT::recompileAndRelinkFunction(Function *F) { + void *OldAddr = getPointerToGlobalIfAvailable(F); + + // If it's not already compiled there is no reason to patch it up. + if (OldAddr == 0) { return getPointerToFunction(F); } + + // Delete the old function mapping. + addGlobalMapping(F, 0); + + // Recodegen the function + runJITOnFunction(F); + + // Update state, forward the old function to the new function. + void *Addr = getPointerToGlobalIfAvailable(F); + assert(Addr && "Code generation didn't add function to GlobalAddress table!"); + TJI.replaceMachineCodeForFunction(OldAddr, Addr); + return Addr; +} + +/// getMemoryForGV - This method abstracts memory allocation of global +/// variable so that the JIT can allocate thread local variables depending +/// on the target. +/// +char* JIT::getMemoryForGV(const GlobalVariable* GV) { + const Type *ElTy = GV->getType()->getElementType(); + size_t GVSize = (size_t)getTargetData()->getTypeAllocSize(ElTy); + if (GV->isThreadLocal()) { + MutexGuard locked(lock); + return TJI.allocateThreadLocalMemory(GVSize); + } else { + return new char[GVSize]; + } +} + +void JIT::addPendingFunction(Function *F) { + MutexGuard locked(lock); + jitstate->getPendingFunctions(locked).push_back(F); +} diff --git a/lib/ExecutionEngine/JIT/JIT.h b/lib/ExecutionEngine/JIT/JIT.h new file mode 100644 index 0000000000000..3ccb2dd8126be --- /dev/null +++ b/lib/ExecutionEngine/JIT/JIT.h @@ -0,0 +1,176 @@ +//===-- JIT.h - Class definition for the JIT --------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines the top-level JIT data structure. +// +//===----------------------------------------------------------------------===// + +#ifndef JIT_H +#define JIT_H + +#include "llvm/ExecutionEngine/ExecutionEngine.h" +#include "llvm/PassManager.h" + +namespace llvm { + +class Function; +class TargetMachine; +class TargetJITInfo; +class MachineCodeEmitter; +class MachineCodeInfo; + +class JITState { +private: + FunctionPassManager PM; // Passes to compile a function + ModuleProvider *MP; // ModuleProvider used to create the PM + + /// PendingFunctions - Functions which have not been code generated yet, but + /// were called from a function being code generated. + std::vector<Function*> PendingFunctions; + +public: + explicit JITState(ModuleProvider *MP) : PM(MP), MP(MP) {} + + FunctionPassManager &getPM(const MutexGuard &L) { + return PM; + } + + ModuleProvider *getMP() const { return MP; } + std::vector<Function*> &getPendingFunctions(const MutexGuard &L) { + return PendingFunctions; + } +}; + + +class JIT : public ExecutionEngine { + TargetMachine &TM; // The current target we are compiling to + TargetJITInfo &TJI; // The JITInfo for the target we are compiling to + JITCodeEmitter *JCE; // JCE object + + JITState *jitstate; + + JIT(ModuleProvider *MP, TargetMachine &tm, TargetJITInfo &tji, + JITMemoryManager *JMM, CodeGenOpt::Level OptLevel); +public: + ~JIT(); + + static void Register() { + JITCtor = create; + } + + /// getJITInfo - Return the target JIT information structure. + /// + TargetJITInfo &getJITInfo() const { return TJI; } + + /// create - Create an return a new JIT compiler if there is one available + /// for the current target. Otherwise, return null. + /// + static ExecutionEngine *create(ModuleProvider *MP, std::string *Err, + CodeGenOpt::Level OptLevel = + CodeGenOpt::Default) { + return createJIT(MP, Err, 0, OptLevel); + } + + virtual void addModuleProvider(ModuleProvider *MP); + + /// removeModuleProvider - Remove a ModuleProvider from the list of modules. + /// Relases the Module from the ModuleProvider, materializing it in the + /// process, and returns the materialized Module. + virtual Module *removeModuleProvider(ModuleProvider *MP, + std::string *ErrInfo = 0); + + /// deleteModuleProvider - Remove a ModuleProvider from the list of modules, + /// and deletes the ModuleProvider and owned Module. Avoids materializing + /// the underlying module. + virtual void deleteModuleProvider(ModuleProvider *P,std::string *ErrInfo = 0); + + /// runFunction - Start execution with the specified function and arguments. + /// + virtual GenericValue runFunction(Function *F, + const std::vector<GenericValue> &ArgValues); + + /// getPointerToNamedFunction - This method returns the address of the + /// specified function by using the dlsym function call. As such it is only + /// useful for resolving library symbols, not code generated symbols. + /// + /// If AbortOnFailure is false and no function with the given name is + /// found, this function silently returns a null pointer. Otherwise, + /// it prints a message to stderr and aborts. + /// + void *getPointerToNamedFunction(const std::string &Name, + bool AbortOnFailure = true); + + // CompilationCallback - Invoked the first time that a call site is found, + // which causes lazy compilation of the target function. + // + static void CompilationCallback(); + + /// getPointerToFunction - This returns the address of the specified function, + /// compiling it if necessary. + /// + void *getPointerToFunction(Function *F); + + /// getOrEmitGlobalVariable - Return the address of the specified global + /// variable, possibly emitting it to memory if needed. This is used by the + /// Emitter. + void *getOrEmitGlobalVariable(const GlobalVariable *GV); + + /// getPointerToFunctionOrStub - If the specified function has been + /// code-gen'd, return a pointer to the function. If not, compile it, or use + /// a stub to implement lazy compilation if available. + /// + void *getPointerToFunctionOrStub(Function *F); + + /// recompileAndRelinkFunction - This method is used to force a function + /// which has already been compiled, to be compiled again, possibly + /// after it has been modified. Then the entry to the old copy is overwritten + /// with a branch to the new copy. If there was no old copy, this acts + /// just like JIT::getPointerToFunction(). + /// + void *recompileAndRelinkFunction(Function *F); + + /// freeMachineCodeForFunction - deallocate memory used to code-generate this + /// Function. + /// + void freeMachineCodeForFunction(Function *F); + + /// addPendingFunction - while jitting non-lazily, a called but non-codegen'd + /// function was encountered. Add it to a pending list to be processed after + /// the current function. + /// + void addPendingFunction(Function *F); + + /// getCodeEmitter - Return the code emitter this JIT is emitting into. + JITCodeEmitter *getCodeEmitter() const { return JCE; } + + static ExecutionEngine *createJIT(ModuleProvider *MP, std::string *Err, + JITMemoryManager *JMM, + CodeGenOpt::Level OptLevel); + + + // Run the JIT on F and return information about the generated code + void runJITOnFunction(Function *F, MachineCodeInfo *MCI = 0); + +private: + static JITCodeEmitter *createEmitter(JIT &J, JITMemoryManager *JMM); + void registerMachineCodeInfo(MachineCodeInfo *MCI); + void runJITOnFunctionUnlocked(Function *F, const MutexGuard &locked); + void updateFunctionStub(Function *F); + void updateDlsymStubTable(); + +protected: + + /// getMemoryforGV - Allocate memory for a global variable. + virtual char* getMemoryForGV(const GlobalVariable* GV); + +}; + +} // End llvm namespace + +#endif diff --git a/lib/ExecutionEngine/JIT/JITDwarfEmitter.cpp b/lib/ExecutionEngine/JIT/JITDwarfEmitter.cpp new file mode 100644 index 0000000000000..e101ef371ed04 --- /dev/null +++ b/lib/ExecutionEngine/JIT/JITDwarfEmitter.cpp @@ -0,0 +1,1056 @@ +//===----- JITDwarfEmitter.cpp - Write dwarf tables into memory -----------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines a JITDwarfEmitter object that is used by the JIT to +// write dwarf tables to memory. +// +//===----------------------------------------------------------------------===// + +#include "JIT.h" +#include "JITDwarfEmitter.h" +#include "llvm/Function.h" +#include "llvm/ADT/DenseMap.h" +#include "llvm/CodeGen/JITCodeEmitter.h" +#include "llvm/CodeGen/MachineFunction.h" +#include "llvm/CodeGen/MachineLocation.h" +#include "llvm/CodeGen/MachineModuleInfo.h" +#include "llvm/ExecutionEngine/JITMemoryManager.h" +#include "llvm/Target/TargetAsmInfo.h" +#include "llvm/Target/TargetData.h" +#include "llvm/Target/TargetInstrInfo.h" +#include "llvm/Target/TargetFrameInfo.h" +#include "llvm/Target/TargetMachine.h" +#include "llvm/Target/TargetRegisterInfo.h" + +using namespace llvm; + +JITDwarfEmitter::JITDwarfEmitter(JIT& theJit) : Jit(theJit) {} + + +unsigned char* JITDwarfEmitter::EmitDwarfTable(MachineFunction& F, + JITCodeEmitter& jce, + unsigned char* StartFunction, + unsigned char* EndFunction) { + const TargetMachine& TM = F.getTarget(); + TD = TM.getTargetData(); + needsIndirectEncoding = TM.getTargetAsmInfo()->getNeedsIndirectEncoding(); + stackGrowthDirection = TM.getFrameInfo()->getStackGrowthDirection(); + RI = TM.getRegisterInfo(); + JCE = &jce; + + unsigned char* ExceptionTable = EmitExceptionTable(&F, StartFunction, + EndFunction); + + unsigned char* Result = 0; + unsigned char* EHFramePtr = 0; + + const std::vector<Function *> Personalities = MMI->getPersonalities(); + EHFramePtr = EmitCommonEHFrame(Personalities[MMI->getPersonalityIndex()]); + + Result = EmitEHFrame(Personalities[MMI->getPersonalityIndex()], EHFramePtr, + StartFunction, EndFunction, ExceptionTable); + + return Result; +} + + +void +JITDwarfEmitter::EmitFrameMoves(intptr_t BaseLabelPtr, + const std::vector<MachineMove> &Moves) const { + unsigned PointerSize = TD->getPointerSize(); + int stackGrowth = stackGrowthDirection == TargetFrameInfo::StackGrowsUp ? + PointerSize : -PointerSize; + bool IsLocal = false; + unsigned BaseLabelID = 0; + + for (unsigned i = 0, N = Moves.size(); i < N; ++i) { + const MachineMove &Move = Moves[i]; + unsigned LabelID = Move.getLabelID(); + + if (LabelID) { + LabelID = MMI->MappedLabel(LabelID); + + // Throw out move if the label is invalid. + if (!LabelID) continue; + } + + intptr_t LabelPtr = 0; + if (LabelID) LabelPtr = JCE->getLabelAddress(LabelID); + + const MachineLocation &Dst = Move.getDestination(); + const MachineLocation &Src = Move.getSource(); + + // Advance row if new location. + if (BaseLabelPtr && LabelID && (BaseLabelID != LabelID || !IsLocal)) { + JCE->emitByte(dwarf::DW_CFA_advance_loc4); + JCE->emitInt32(LabelPtr - BaseLabelPtr); + + BaseLabelID = LabelID; + BaseLabelPtr = LabelPtr; + IsLocal = true; + } + + // If advancing cfa. + if (Dst.isReg() && Dst.getReg() == MachineLocation::VirtualFP) { + if (!Src.isReg()) { + if (Src.getReg() == MachineLocation::VirtualFP) { + JCE->emitByte(dwarf::DW_CFA_def_cfa_offset); + } else { + JCE->emitByte(dwarf::DW_CFA_def_cfa); + JCE->emitULEB128Bytes(RI->getDwarfRegNum(Src.getReg(), true)); + } + + int Offset = -Src.getOffset(); + + JCE->emitULEB128Bytes(Offset); + } else { + assert(0 && "Machine move no supported yet."); + } + } else if (Src.isReg() && + Src.getReg() == MachineLocation::VirtualFP) { + if (Dst.isReg()) { + JCE->emitByte(dwarf::DW_CFA_def_cfa_register); + JCE->emitULEB128Bytes(RI->getDwarfRegNum(Dst.getReg(), true)); + } else { + assert(0 && "Machine move no supported yet."); + } + } else { + unsigned Reg = RI->getDwarfRegNum(Src.getReg(), true); + int Offset = Dst.getOffset() / stackGrowth; + + if (Offset < 0) { + JCE->emitByte(dwarf::DW_CFA_offset_extended_sf); + JCE->emitULEB128Bytes(Reg); + JCE->emitSLEB128Bytes(Offset); + } else if (Reg < 64) { + JCE->emitByte(dwarf::DW_CFA_offset + Reg); + JCE->emitULEB128Bytes(Offset); + } else { + JCE->emitByte(dwarf::DW_CFA_offset_extended); + JCE->emitULEB128Bytes(Reg); + JCE->emitULEB128Bytes(Offset); + } + } + } +} + +/// SharedTypeIds - How many leading type ids two landing pads have in common. +static unsigned SharedTypeIds(const LandingPadInfo *L, + const LandingPadInfo *R) { + const std::vector<int> &LIds = L->TypeIds, &RIds = R->TypeIds; + unsigned LSize = LIds.size(), RSize = RIds.size(); + unsigned MinSize = LSize < RSize ? LSize : RSize; + unsigned Count = 0; + + for (; Count != MinSize; ++Count) + if (LIds[Count] != RIds[Count]) + return Count; + + return Count; +} + + +/// PadLT - Order landing pads lexicographically by type id. +static bool PadLT(const LandingPadInfo *L, const LandingPadInfo *R) { + const std::vector<int> &LIds = L->TypeIds, &RIds = R->TypeIds; + unsigned LSize = LIds.size(), RSize = RIds.size(); + unsigned MinSize = LSize < RSize ? LSize : RSize; + + for (unsigned i = 0; i != MinSize; ++i) + if (LIds[i] != RIds[i]) + return LIds[i] < RIds[i]; + + return LSize < RSize; +} + +namespace { + +struct KeyInfo { + static inline unsigned getEmptyKey() { return -1U; } + static inline unsigned getTombstoneKey() { return -2U; } + static unsigned getHashValue(const unsigned &Key) { return Key; } + static bool isEqual(unsigned LHS, unsigned RHS) { return LHS == RHS; } + static bool isPod() { return true; } +}; + +/// ActionEntry - Structure describing an entry in the actions table. +struct ActionEntry { + int ValueForTypeID; // The value to write - may not be equal to the type id. + int NextAction; + struct ActionEntry *Previous; +}; + +/// PadRange - Structure holding a try-range and the associated landing pad. +struct PadRange { + // The index of the landing pad. + unsigned PadIndex; + // The index of the begin and end labels in the landing pad's label lists. + unsigned RangeIndex; +}; + +typedef DenseMap<unsigned, PadRange, KeyInfo> RangeMapType; + +/// CallSiteEntry - Structure describing an entry in the call-site table. +struct CallSiteEntry { + unsigned BeginLabel; // zero indicates the start of the function. + unsigned EndLabel; // zero indicates the end of the function. + unsigned PadLabel; // zero indicates that there is no landing pad. + unsigned Action; +}; + +} + +unsigned char* JITDwarfEmitter::EmitExceptionTable(MachineFunction* MF, + unsigned char* StartFunction, + unsigned char* EndFunction) const { + // Map all labels and get rid of any dead landing pads. + MMI->TidyLandingPads(); + + const std::vector<GlobalVariable *> &TypeInfos = MMI->getTypeInfos(); + const std::vector<unsigned> &FilterIds = MMI->getFilterIds(); + const std::vector<LandingPadInfo> &PadInfos = MMI->getLandingPads(); + if (PadInfos.empty()) return 0; + + // Sort the landing pads in order of their type ids. This is used to fold + // duplicate actions. + SmallVector<const LandingPadInfo *, 64> LandingPads; + LandingPads.reserve(PadInfos.size()); + for (unsigned i = 0, N = PadInfos.size(); i != N; ++i) + LandingPads.push_back(&PadInfos[i]); + std::sort(LandingPads.begin(), LandingPads.end(), PadLT); + + // Negative type ids index into FilterIds, positive type ids index into + // TypeInfos. The value written for a positive type id is just the type + // id itself. For a negative type id, however, the value written is the + // (negative) byte offset of the corresponding FilterIds entry. The byte + // offset is usually equal to the type id, because the FilterIds entries + // are written using a variable width encoding which outputs one byte per + // entry as long as the value written is not too large, but can differ. + // This kind of complication does not occur for positive type ids because + // type infos are output using a fixed width encoding. + // FilterOffsets[i] holds the byte offset corresponding to FilterIds[i]. + SmallVector<int, 16> FilterOffsets; + FilterOffsets.reserve(FilterIds.size()); + int Offset = -1; + for(std::vector<unsigned>::const_iterator I = FilterIds.begin(), + E = FilterIds.end(); I != E; ++I) { + FilterOffsets.push_back(Offset); + Offset -= TargetAsmInfo::getULEB128Size(*I); + } + + // Compute the actions table and gather the first action index for each + // landing pad site. + SmallVector<ActionEntry, 32> Actions; + SmallVector<unsigned, 64> FirstActions; + FirstActions.reserve(LandingPads.size()); + + int FirstAction = 0; + unsigned SizeActions = 0; + for (unsigned i = 0, N = LandingPads.size(); i != N; ++i) { + const LandingPadInfo *LP = LandingPads[i]; + const std::vector<int> &TypeIds = LP->TypeIds; + const unsigned NumShared = i ? SharedTypeIds(LP, LandingPads[i-1]) : 0; + unsigned SizeSiteActions = 0; + + if (NumShared < TypeIds.size()) { + unsigned SizeAction = 0; + ActionEntry *PrevAction = 0; + + if (NumShared) { + const unsigned SizePrevIds = LandingPads[i-1]->TypeIds.size(); + assert(Actions.size()); + PrevAction = &Actions.back(); + SizeAction = TargetAsmInfo::getSLEB128Size(PrevAction->NextAction) + + TargetAsmInfo::getSLEB128Size(PrevAction->ValueForTypeID); + for (unsigned j = NumShared; j != SizePrevIds; ++j) { + SizeAction -= TargetAsmInfo::getSLEB128Size(PrevAction->ValueForTypeID); + SizeAction += -PrevAction->NextAction; + PrevAction = PrevAction->Previous; + } + } + + // Compute the actions. + for (unsigned I = NumShared, M = TypeIds.size(); I != M; ++I) { + int TypeID = TypeIds[I]; + assert(-1-TypeID < (int)FilterOffsets.size() && "Unknown filter id!"); + int ValueForTypeID = TypeID < 0 ? FilterOffsets[-1 - TypeID] : TypeID; + unsigned SizeTypeID = TargetAsmInfo::getSLEB128Size(ValueForTypeID); + + int NextAction = SizeAction ? -(SizeAction + SizeTypeID) : 0; + SizeAction = SizeTypeID + TargetAsmInfo::getSLEB128Size(NextAction); + SizeSiteActions += SizeAction; + + ActionEntry Action = {ValueForTypeID, NextAction, PrevAction}; + Actions.push_back(Action); + + PrevAction = &Actions.back(); + } + + // Record the first action of the landing pad site. + FirstAction = SizeActions + SizeSiteActions - SizeAction + 1; + } // else identical - re-use previous FirstAction + + FirstActions.push_back(FirstAction); + + // Compute this sites contribution to size. + SizeActions += SizeSiteActions; + } + + // Compute the call-site table. Entries must be ordered by address. + SmallVector<CallSiteEntry, 64> CallSites; + + RangeMapType PadMap; + for (unsigned i = 0, N = LandingPads.size(); i != N; ++i) { + const LandingPadInfo *LandingPad = LandingPads[i]; + for (unsigned j=0, E = LandingPad->BeginLabels.size(); j != E; ++j) { + unsigned BeginLabel = LandingPad->BeginLabels[j]; + assert(!PadMap.count(BeginLabel) && "Duplicate landing pad labels!"); + PadRange P = { i, j }; + PadMap[BeginLabel] = P; + } + } + + bool MayThrow = false; + unsigned LastLabel = 0; + for (MachineFunction::const_iterator I = MF->begin(), E = MF->end(); + I != E; ++I) { + for (MachineBasicBlock::const_iterator MI = I->begin(), E = I->end(); + MI != E; ++MI) { + if (!MI->isLabel()) { + MayThrow |= MI->getDesc().isCall(); + continue; + } + + unsigned BeginLabel = MI->getOperand(0).getImm(); + assert(BeginLabel && "Invalid label!"); + + if (BeginLabel == LastLabel) + MayThrow = false; + + RangeMapType::iterator L = PadMap.find(BeginLabel); + + if (L == PadMap.end()) + continue; + + PadRange P = L->second; + const LandingPadInfo *LandingPad = LandingPads[P.PadIndex]; + + assert(BeginLabel == LandingPad->BeginLabels[P.RangeIndex] && + "Inconsistent landing pad map!"); + + // If some instruction between the previous try-range and this one may + // throw, create a call-site entry with no landing pad for the region + // between the try-ranges. + if (MayThrow) { + CallSiteEntry Site = {LastLabel, BeginLabel, 0, 0}; + CallSites.push_back(Site); + } + + LastLabel = LandingPad->EndLabels[P.RangeIndex]; + CallSiteEntry Site = {BeginLabel, LastLabel, + LandingPad->LandingPadLabel, FirstActions[P.PadIndex]}; + + assert(Site.BeginLabel && Site.EndLabel && Site.PadLabel && + "Invalid landing pad!"); + + // Try to merge with the previous call-site. + if (CallSites.size()) { + CallSiteEntry &Prev = CallSites.back(); + if (Site.PadLabel == Prev.PadLabel && Site.Action == Prev.Action) { + // Extend the range of the previous entry. + Prev.EndLabel = Site.EndLabel; + continue; + } + } + + // Otherwise, create a new call-site. + CallSites.push_back(Site); + } + } + // If some instruction between the previous try-range and the end of the + // function may throw, create a call-site entry with no landing pad for the + // region following the try-range. + if (MayThrow) { + CallSiteEntry Site = {LastLabel, 0, 0, 0}; + CallSites.push_back(Site); + } + + // Final tallies. + unsigned SizeSites = CallSites.size() * (sizeof(int32_t) + // Site start. + sizeof(int32_t) + // Site length. + sizeof(int32_t)); // Landing pad. + for (unsigned i = 0, e = CallSites.size(); i < e; ++i) + SizeSites += TargetAsmInfo::getULEB128Size(CallSites[i].Action); + + unsigned SizeTypes = TypeInfos.size() * TD->getPointerSize(); + + unsigned TypeOffset = sizeof(int8_t) + // Call site format + // Call-site table length + TargetAsmInfo::getULEB128Size(SizeSites) + + SizeSites + SizeActions + SizeTypes; + + unsigned TotalSize = sizeof(int8_t) + // LPStart format + sizeof(int8_t) + // TType format + TargetAsmInfo::getULEB128Size(TypeOffset) + // TType base offset + TypeOffset; + + unsigned SizeAlign = (4 - TotalSize) & 3; + + // Begin the exception table. + JCE->emitAlignment(4); + for (unsigned i = 0; i != SizeAlign; ++i) { + JCE->emitByte(0); + // Asm->EOL("Padding"); + } + + unsigned char* DwarfExceptionTable = (unsigned char*)JCE->getCurrentPCValue(); + + // Emit the header. + JCE->emitByte(dwarf::DW_EH_PE_omit); + // Asm->EOL("LPStart format (DW_EH_PE_omit)"); + JCE->emitByte(dwarf::DW_EH_PE_absptr); + // Asm->EOL("TType format (DW_EH_PE_absptr)"); + JCE->emitULEB128Bytes(TypeOffset); + // Asm->EOL("TType base offset"); + JCE->emitByte(dwarf::DW_EH_PE_udata4); + // Asm->EOL("Call site format (DW_EH_PE_udata4)"); + JCE->emitULEB128Bytes(SizeSites); + // Asm->EOL("Call-site table length"); + + // Emit the landing pad site information. + for (unsigned i = 0; i < CallSites.size(); ++i) { + CallSiteEntry &S = CallSites[i]; + intptr_t BeginLabelPtr = 0; + intptr_t EndLabelPtr = 0; + + if (!S.BeginLabel) { + BeginLabelPtr = (intptr_t)StartFunction; + JCE->emitInt32(0); + } else { + BeginLabelPtr = JCE->getLabelAddress(S.BeginLabel); + JCE->emitInt32(BeginLabelPtr - (intptr_t)StartFunction); + } + + // Asm->EOL("Region start"); + + if (!S.EndLabel) { + EndLabelPtr = (intptr_t)EndFunction; + JCE->emitInt32((intptr_t)EndFunction - BeginLabelPtr); + } else { + EndLabelPtr = JCE->getLabelAddress(S.EndLabel); + JCE->emitInt32(EndLabelPtr - BeginLabelPtr); + } + //Asm->EOL("Region length"); + + if (!S.PadLabel) { + JCE->emitInt32(0); + } else { + unsigned PadLabelPtr = JCE->getLabelAddress(S.PadLabel); + JCE->emitInt32(PadLabelPtr - (intptr_t)StartFunction); + } + // Asm->EOL("Landing pad"); + + JCE->emitULEB128Bytes(S.Action); + // Asm->EOL("Action"); + } + + // Emit the actions. + for (unsigned I = 0, N = Actions.size(); I != N; ++I) { + ActionEntry &Action = Actions[I]; + + JCE->emitSLEB128Bytes(Action.ValueForTypeID); + //Asm->EOL("TypeInfo index"); + JCE->emitSLEB128Bytes(Action.NextAction); + //Asm->EOL("Next action"); + } + + // Emit the type ids. + for (unsigned M = TypeInfos.size(); M; --M) { + GlobalVariable *GV = TypeInfos[M - 1]; + + if (GV) { + if (TD->getPointerSize() == sizeof(int32_t)) { + JCE->emitInt32((intptr_t)Jit.getOrEmitGlobalVariable(GV)); + } else { + JCE->emitInt64((intptr_t)Jit.getOrEmitGlobalVariable(GV)); + } + } else { + if (TD->getPointerSize() == sizeof(int32_t)) + JCE->emitInt32(0); + else + JCE->emitInt64(0); + } + // Asm->EOL("TypeInfo"); + } + + // Emit the filter typeids. + for (unsigned j = 0, M = FilterIds.size(); j < M; ++j) { + unsigned TypeID = FilterIds[j]; + JCE->emitULEB128Bytes(TypeID); + //Asm->EOL("Filter TypeInfo index"); + } + + JCE->emitAlignment(4); + + return DwarfExceptionTable; +} + +unsigned char* +JITDwarfEmitter::EmitCommonEHFrame(const Function* Personality) const { + unsigned PointerSize = TD->getPointerSize(); + int stackGrowth = stackGrowthDirection == TargetFrameInfo::StackGrowsUp ? + PointerSize : -PointerSize; + + unsigned char* StartCommonPtr = (unsigned char*)JCE->getCurrentPCValue(); + // EH Common Frame header + JCE->allocateSpace(4, 0); + unsigned char* FrameCommonBeginPtr = (unsigned char*)JCE->getCurrentPCValue(); + JCE->emitInt32((int)0); + JCE->emitByte(dwarf::DW_CIE_VERSION); + JCE->emitString(Personality ? "zPLR" : "zR"); + JCE->emitULEB128Bytes(1); + JCE->emitSLEB128Bytes(stackGrowth); + JCE->emitByte(RI->getDwarfRegNum(RI->getRARegister(), true)); + + if (Personality) { + // Augmentation Size: 3 small ULEBs of one byte each, and the personality + // function which size is PointerSize. + JCE->emitULEB128Bytes(3 + PointerSize); + + // We set the encoding of the personality as direct encoding because we use + // the function pointer. The encoding is not relative because the current + // PC value may be bigger than the personality function pointer. + if (PointerSize == 4) { + JCE->emitByte(dwarf::DW_EH_PE_sdata4); + JCE->emitInt32(((intptr_t)Jit.getPointerToGlobal(Personality))); + } else { + JCE->emitByte(dwarf::DW_EH_PE_sdata8); + JCE->emitInt64(((intptr_t)Jit.getPointerToGlobal(Personality))); + } + + JCE->emitULEB128Bytes(dwarf::DW_EH_PE_pcrel | dwarf::DW_EH_PE_sdata4); + JCE->emitULEB128Bytes(dwarf::DW_EH_PE_pcrel | dwarf::DW_EH_PE_sdata4); + + } else { + JCE->emitULEB128Bytes(1); + JCE->emitULEB128Bytes(dwarf::DW_EH_PE_pcrel | dwarf::DW_EH_PE_sdata4); + } + + std::vector<MachineMove> Moves; + RI->getInitialFrameState(Moves); + EmitFrameMoves(0, Moves); + JCE->emitAlignment(PointerSize); + + JCE->emitInt32At((uintptr_t*)StartCommonPtr, + (uintptr_t)((unsigned char*)JCE->getCurrentPCValue() - + FrameCommonBeginPtr)); + + return StartCommonPtr; +} + + +unsigned char* +JITDwarfEmitter::EmitEHFrame(const Function* Personality, + unsigned char* StartCommonPtr, + unsigned char* StartFunction, + unsigned char* EndFunction, + unsigned char* ExceptionTable) const { + unsigned PointerSize = TD->getPointerSize(); + + // EH frame header. + unsigned char* StartEHPtr = (unsigned char*)JCE->getCurrentPCValue(); + JCE->allocateSpace(4, 0); + unsigned char* FrameBeginPtr = (unsigned char*)JCE->getCurrentPCValue(); + // FDE CIE Offset + JCE->emitInt32(FrameBeginPtr - StartCommonPtr); + JCE->emitInt32(StartFunction - (unsigned char*)JCE->getCurrentPCValue()); + JCE->emitInt32(EndFunction - StartFunction); + + // If there is a personality and landing pads then point to the language + // specific data area in the exception table. + if (MMI->getPersonalityIndex()) { + JCE->emitULEB128Bytes(4); + + if (!MMI->getLandingPads().empty()) { + JCE->emitInt32(ExceptionTable - (unsigned char*)JCE->getCurrentPCValue()); + } else { + JCE->emitInt32((int)0); + } + } else { + JCE->emitULEB128Bytes(0); + } + + // Indicate locations of function specific callee saved registers in + // frame. + EmitFrameMoves((intptr_t)StartFunction, MMI->getFrameMoves()); + + JCE->emitAlignment(PointerSize); + + // Indicate the size of the table + JCE->emitInt32At((uintptr_t*)StartEHPtr, + (uintptr_t)((unsigned char*)JCE->getCurrentPCValue() - + StartEHPtr)); + + // Double zeroes for the unwind runtime + if (PointerSize == 8) { + JCE->emitInt64(0); + JCE->emitInt64(0); + } else { + JCE->emitInt32(0); + JCE->emitInt32(0); + } + + + return StartEHPtr; +} + +unsigned JITDwarfEmitter::GetDwarfTableSizeInBytes(MachineFunction& F, + JITCodeEmitter& jce, + unsigned char* StartFunction, + unsigned char* EndFunction) { + const TargetMachine& TM = F.getTarget(); + TD = TM.getTargetData(); + needsIndirectEncoding = TM.getTargetAsmInfo()->getNeedsIndirectEncoding(); + stackGrowthDirection = TM.getFrameInfo()->getStackGrowthDirection(); + RI = TM.getRegisterInfo(); + JCE = &jce; + unsigned FinalSize = 0; + + FinalSize += GetExceptionTableSizeInBytes(&F); + + const std::vector<Function *> Personalities = MMI->getPersonalities(); + FinalSize += + GetCommonEHFrameSizeInBytes(Personalities[MMI->getPersonalityIndex()]); + + FinalSize += GetEHFrameSizeInBytes(Personalities[MMI->getPersonalityIndex()], + StartFunction); + + return FinalSize; +} + +/// RoundUpToAlign - Add the specified alignment to FinalSize and returns +/// the new value. +static unsigned RoundUpToAlign(unsigned FinalSize, unsigned Alignment) { + if (Alignment == 0) Alignment = 1; + // Since we do not know where the buffer will be allocated, be pessimistic. + return FinalSize + Alignment; +} + +unsigned +JITDwarfEmitter::GetEHFrameSizeInBytes(const Function* Personality, + unsigned char* StartFunction) const { + unsigned PointerSize = TD->getPointerSize(); + unsigned FinalSize = 0; + // EH frame header. + FinalSize += PointerSize; + // FDE CIE Offset + FinalSize += 3 * PointerSize; + // If there is a personality and landing pads then point to the language + // specific data area in the exception table. + if (MMI->getPersonalityIndex()) { + FinalSize += TargetAsmInfo::getULEB128Size(4); + FinalSize += PointerSize; + } else { + FinalSize += TargetAsmInfo::getULEB128Size(0); + } + + // Indicate locations of function specific callee saved registers in + // frame. + FinalSize += GetFrameMovesSizeInBytes((intptr_t)StartFunction, + MMI->getFrameMoves()); + + FinalSize = RoundUpToAlign(FinalSize, 4); + + // Double zeroes for the unwind runtime + FinalSize += 2 * PointerSize; + + return FinalSize; +} + +unsigned JITDwarfEmitter::GetCommonEHFrameSizeInBytes(const Function* Personality) + const { + + unsigned PointerSize = TD->getPointerSize(); + int stackGrowth = stackGrowthDirection == TargetFrameInfo::StackGrowsUp ? + PointerSize : -PointerSize; + unsigned FinalSize = 0; + // EH Common Frame header + FinalSize += PointerSize; + FinalSize += 4; + FinalSize += 1; + FinalSize += Personality ? 5 : 3; // "zPLR" or "zR" + FinalSize += TargetAsmInfo::getULEB128Size(1); + FinalSize += TargetAsmInfo::getSLEB128Size(stackGrowth); + FinalSize += 1; + + if (Personality) { + FinalSize += TargetAsmInfo::getULEB128Size(7); + + // Encoding + FinalSize+= 1; + //Personality + FinalSize += PointerSize; + + FinalSize += TargetAsmInfo::getULEB128Size(dwarf::DW_EH_PE_pcrel); + FinalSize += TargetAsmInfo::getULEB128Size(dwarf::DW_EH_PE_pcrel); + + } else { + FinalSize += TargetAsmInfo::getULEB128Size(1); + FinalSize += TargetAsmInfo::getULEB128Size(dwarf::DW_EH_PE_pcrel); + } + + std::vector<MachineMove> Moves; + RI->getInitialFrameState(Moves); + FinalSize += GetFrameMovesSizeInBytes(0, Moves); + FinalSize = RoundUpToAlign(FinalSize, 4); + return FinalSize; +} + +unsigned +JITDwarfEmitter::GetFrameMovesSizeInBytes(intptr_t BaseLabelPtr, + const std::vector<MachineMove> &Moves) const { + unsigned PointerSize = TD->getPointerSize(); + int stackGrowth = stackGrowthDirection == TargetFrameInfo::StackGrowsUp ? + PointerSize : -PointerSize; + bool IsLocal = BaseLabelPtr; + unsigned FinalSize = 0; + + for (unsigned i = 0, N = Moves.size(); i < N; ++i) { + const MachineMove &Move = Moves[i]; + unsigned LabelID = Move.getLabelID(); + + if (LabelID) { + LabelID = MMI->MappedLabel(LabelID); + + // Throw out move if the label is invalid. + if (!LabelID) continue; + } + + intptr_t LabelPtr = 0; + if (LabelID) LabelPtr = JCE->getLabelAddress(LabelID); + + const MachineLocation &Dst = Move.getDestination(); + const MachineLocation &Src = Move.getSource(); + + // Advance row if new location. + if (BaseLabelPtr && LabelID && (BaseLabelPtr != LabelPtr || !IsLocal)) { + FinalSize++; + FinalSize += PointerSize; + BaseLabelPtr = LabelPtr; + IsLocal = true; + } + + // If advancing cfa. + if (Dst.isReg() && Dst.getReg() == MachineLocation::VirtualFP) { + if (!Src.isReg()) { + if (Src.getReg() == MachineLocation::VirtualFP) { + ++FinalSize; + } else { + ++FinalSize; + unsigned RegNum = RI->getDwarfRegNum(Src.getReg(), true); + FinalSize += TargetAsmInfo::getULEB128Size(RegNum); + } + + int Offset = -Src.getOffset(); + + FinalSize += TargetAsmInfo::getULEB128Size(Offset); + } else { + assert(0 && "Machine move no supported yet."); + } + } else if (Src.isReg() && + Src.getReg() == MachineLocation::VirtualFP) { + if (Dst.isReg()) { + ++FinalSize; + unsigned RegNum = RI->getDwarfRegNum(Dst.getReg(), true); + FinalSize += TargetAsmInfo::getULEB128Size(RegNum); + } else { + assert(0 && "Machine move no supported yet."); + } + } else { + unsigned Reg = RI->getDwarfRegNum(Src.getReg(), true); + int Offset = Dst.getOffset() / stackGrowth; + + if (Offset < 0) { + ++FinalSize; + FinalSize += TargetAsmInfo::getULEB128Size(Reg); + FinalSize += TargetAsmInfo::getSLEB128Size(Offset); + } else if (Reg < 64) { + ++FinalSize; + FinalSize += TargetAsmInfo::getULEB128Size(Offset); + } else { + ++FinalSize; + FinalSize += TargetAsmInfo::getULEB128Size(Reg); + FinalSize += TargetAsmInfo::getULEB128Size(Offset); + } + } + } + return FinalSize; +} + +unsigned +JITDwarfEmitter::GetExceptionTableSizeInBytes(MachineFunction* MF) const { + unsigned FinalSize = 0; + + // Map all labels and get rid of any dead landing pads. + MMI->TidyLandingPads(); + + const std::vector<GlobalVariable *> &TypeInfos = MMI->getTypeInfos(); + const std::vector<unsigned> &FilterIds = MMI->getFilterIds(); + const std::vector<LandingPadInfo> &PadInfos = MMI->getLandingPads(); + if (PadInfos.empty()) return 0; + + // Sort the landing pads in order of their type ids. This is used to fold + // duplicate actions. + SmallVector<const LandingPadInfo *, 64> LandingPads; + LandingPads.reserve(PadInfos.size()); + for (unsigned i = 0, N = PadInfos.size(); i != N; ++i) + LandingPads.push_back(&PadInfos[i]); + std::sort(LandingPads.begin(), LandingPads.end(), PadLT); + + // Negative type ids index into FilterIds, positive type ids index into + // TypeInfos. The value written for a positive type id is just the type + // id itself. For a negative type id, however, the value written is the + // (negative) byte offset of the corresponding FilterIds entry. The byte + // offset is usually equal to the type id, because the FilterIds entries + // are written using a variable width encoding which outputs one byte per + // entry as long as the value written is not too large, but can differ. + // This kind of complication does not occur for positive type ids because + // type infos are output using a fixed width encoding. + // FilterOffsets[i] holds the byte offset corresponding to FilterIds[i]. + SmallVector<int, 16> FilterOffsets; + FilterOffsets.reserve(FilterIds.size()); + int Offset = -1; + for(std::vector<unsigned>::const_iterator I = FilterIds.begin(), + E = FilterIds.end(); I != E; ++I) { + FilterOffsets.push_back(Offset); + Offset -= TargetAsmInfo::getULEB128Size(*I); + } + + // Compute the actions table and gather the first action index for each + // landing pad site. + SmallVector<ActionEntry, 32> Actions; + SmallVector<unsigned, 64> FirstActions; + FirstActions.reserve(LandingPads.size()); + + int FirstAction = 0; + unsigned SizeActions = 0; + for (unsigned i = 0, N = LandingPads.size(); i != N; ++i) { + const LandingPadInfo *LP = LandingPads[i]; + const std::vector<int> &TypeIds = LP->TypeIds; + const unsigned NumShared = i ? SharedTypeIds(LP, LandingPads[i-1]) : 0; + unsigned SizeSiteActions = 0; + + if (NumShared < TypeIds.size()) { + unsigned SizeAction = 0; + ActionEntry *PrevAction = 0; + + if (NumShared) { + const unsigned SizePrevIds = LandingPads[i-1]->TypeIds.size(); + assert(Actions.size()); + PrevAction = &Actions.back(); + SizeAction = TargetAsmInfo::getSLEB128Size(PrevAction->NextAction) + + TargetAsmInfo::getSLEB128Size(PrevAction->ValueForTypeID); + for (unsigned j = NumShared; j != SizePrevIds; ++j) { + SizeAction -= TargetAsmInfo::getSLEB128Size(PrevAction->ValueForTypeID); + SizeAction += -PrevAction->NextAction; + PrevAction = PrevAction->Previous; + } + } + + // Compute the actions. + for (unsigned I = NumShared, M = TypeIds.size(); I != M; ++I) { + int TypeID = TypeIds[I]; + assert(-1-TypeID < (int)FilterOffsets.size() && "Unknown filter id!"); + int ValueForTypeID = TypeID < 0 ? FilterOffsets[-1 - TypeID] : TypeID; + unsigned SizeTypeID = TargetAsmInfo::getSLEB128Size(ValueForTypeID); + + int NextAction = SizeAction ? -(SizeAction + SizeTypeID) : 0; + SizeAction = SizeTypeID + TargetAsmInfo::getSLEB128Size(NextAction); + SizeSiteActions += SizeAction; + + ActionEntry Action = {ValueForTypeID, NextAction, PrevAction}; + Actions.push_back(Action); + + PrevAction = &Actions.back(); + } + + // Record the first action of the landing pad site. + FirstAction = SizeActions + SizeSiteActions - SizeAction + 1; + } // else identical - re-use previous FirstAction + + FirstActions.push_back(FirstAction); + + // Compute this sites contribution to size. + SizeActions += SizeSiteActions; + } + + // Compute the call-site table. Entries must be ordered by address. + SmallVector<CallSiteEntry, 64> CallSites; + + RangeMapType PadMap; + for (unsigned i = 0, N = LandingPads.size(); i != N; ++i) { + const LandingPadInfo *LandingPad = LandingPads[i]; + for (unsigned j=0, E = LandingPad->BeginLabels.size(); j != E; ++j) { + unsigned BeginLabel = LandingPad->BeginLabels[j]; + assert(!PadMap.count(BeginLabel) && "Duplicate landing pad labels!"); + PadRange P = { i, j }; + PadMap[BeginLabel] = P; + } + } + + bool MayThrow = false; + unsigned LastLabel = 0; + for (MachineFunction::const_iterator I = MF->begin(), E = MF->end(); + I != E; ++I) { + for (MachineBasicBlock::const_iterator MI = I->begin(), E = I->end(); + MI != E; ++MI) { + if (!MI->isLabel()) { + MayThrow |= MI->getDesc().isCall(); + continue; + } + + unsigned BeginLabel = MI->getOperand(0).getImm(); + assert(BeginLabel && "Invalid label!"); + + if (BeginLabel == LastLabel) + MayThrow = false; + + RangeMapType::iterator L = PadMap.find(BeginLabel); + + if (L == PadMap.end()) + continue; + + PadRange P = L->second; + const LandingPadInfo *LandingPad = LandingPads[P.PadIndex]; + + assert(BeginLabel == LandingPad->BeginLabels[P.RangeIndex] && + "Inconsistent landing pad map!"); + + // If some instruction between the previous try-range and this one may + // throw, create a call-site entry with no landing pad for the region + // between the try-ranges. + if (MayThrow) { + CallSiteEntry Site = {LastLabel, BeginLabel, 0, 0}; + CallSites.push_back(Site); + } + + LastLabel = LandingPad->EndLabels[P.RangeIndex]; + CallSiteEntry Site = {BeginLabel, LastLabel, + LandingPad->LandingPadLabel, FirstActions[P.PadIndex]}; + + assert(Site.BeginLabel && Site.EndLabel && Site.PadLabel && + "Invalid landing pad!"); + + // Try to merge with the previous call-site. + if (CallSites.size()) { + CallSiteEntry &Prev = CallSites.back(); + if (Site.PadLabel == Prev.PadLabel && Site.Action == Prev.Action) { + // Extend the range of the previous entry. + Prev.EndLabel = Site.EndLabel; + continue; + } + } + + // Otherwise, create a new call-site. + CallSites.push_back(Site); + } + } + // If some instruction between the previous try-range and the end of the + // function may throw, create a call-site entry with no landing pad for the + // region following the try-range. + if (MayThrow) { + CallSiteEntry Site = {LastLabel, 0, 0, 0}; + CallSites.push_back(Site); + } + + // Final tallies. + unsigned SizeSites = CallSites.size() * (sizeof(int32_t) + // Site start. + sizeof(int32_t) + // Site length. + sizeof(int32_t)); // Landing pad. + for (unsigned i = 0, e = CallSites.size(); i < e; ++i) + SizeSites += TargetAsmInfo::getULEB128Size(CallSites[i].Action); + + unsigned SizeTypes = TypeInfos.size() * TD->getPointerSize(); + + unsigned TypeOffset = sizeof(int8_t) + // Call site format + // Call-site table length + TargetAsmInfo::getULEB128Size(SizeSites) + + SizeSites + SizeActions + SizeTypes; + + unsigned TotalSize = sizeof(int8_t) + // LPStart format + sizeof(int8_t) + // TType format + TargetAsmInfo::getULEB128Size(TypeOffset) + // TType base offset + TypeOffset; + + unsigned SizeAlign = (4 - TotalSize) & 3; + + // Begin the exception table. + FinalSize = RoundUpToAlign(FinalSize, 4); + for (unsigned i = 0; i != SizeAlign; ++i) { + ++FinalSize; + } + + unsigned PointerSize = TD->getPointerSize(); + + // Emit the header. + ++FinalSize; + // Asm->EOL("LPStart format (DW_EH_PE_omit)"); + ++FinalSize; + // Asm->EOL("TType format (DW_EH_PE_absptr)"); + ++FinalSize; + // Asm->EOL("TType base offset"); + ++FinalSize; + // Asm->EOL("Call site format (DW_EH_PE_udata4)"); + ++FinalSize; + // Asm->EOL("Call-site table length"); + + // Emit the landing pad site information. + for (unsigned i = 0; i < CallSites.size(); ++i) { + CallSiteEntry &S = CallSites[i]; + + // Asm->EOL("Region start"); + FinalSize += PointerSize; + + //Asm->EOL("Region length"); + FinalSize += PointerSize; + + // Asm->EOL("Landing pad"); + FinalSize += PointerSize; + + FinalSize += TargetAsmInfo::getULEB128Size(S.Action); + // Asm->EOL("Action"); + } + + // Emit the actions. + for (unsigned I = 0, N = Actions.size(); I != N; ++I) { + ActionEntry &Action = Actions[I]; + + //Asm->EOL("TypeInfo index"); + FinalSize += TargetAsmInfo::getSLEB128Size(Action.ValueForTypeID); + //Asm->EOL("Next action"); + FinalSize += TargetAsmInfo::getSLEB128Size(Action.NextAction); + } + + // Emit the type ids. + for (unsigned M = TypeInfos.size(); M; --M) { + // Asm->EOL("TypeInfo"); + FinalSize += PointerSize; + } + + // Emit the filter typeids. + for (unsigned j = 0, M = FilterIds.size(); j < M; ++j) { + unsigned TypeID = FilterIds[j]; + FinalSize += TargetAsmInfo::getULEB128Size(TypeID); + //Asm->EOL("Filter TypeInfo index"); + } + + FinalSize = RoundUpToAlign(FinalSize, 4); + + return FinalSize; +} diff --git a/lib/ExecutionEngine/JIT/JITDwarfEmitter.h b/lib/ExecutionEngine/JIT/JITDwarfEmitter.h new file mode 100644 index 0000000000000..9120ed44e6a60 --- /dev/null +++ b/lib/ExecutionEngine/JIT/JITDwarfEmitter.h @@ -0,0 +1,87 @@ +//===------ JITDwarfEmitter.h - Write dwarf tables into memory ------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines a JITDwarfEmitter object that is used by the JIT to +// write dwarf tables to memory. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_EXECUTION_ENGINE_JIT_DWARFEMITTER_H +#define LLVM_EXECUTION_ENGINE_JIT_DWARFEMITTER_H + +namespace llvm { + +class Function; +class JITCodeEmitter; +class MachineFunction; +class MachineModuleInfo; +class MachineMove; +class TargetData; +class TargetMachine; +class TargetRegisterInfo; + +class JITDwarfEmitter { + const TargetData* TD; + JITCodeEmitter* JCE; + const TargetRegisterInfo* RI; + MachineModuleInfo* MMI; + JIT& Jit; + bool needsIndirectEncoding; + bool stackGrowthDirection; + + unsigned char* EmitExceptionTable(MachineFunction* MF, + unsigned char* StartFunction, + unsigned char* EndFunction) const; + + void EmitFrameMoves(intptr_t BaseLabelPtr, + const std::vector<MachineMove> &Moves) const; + + unsigned char* EmitCommonEHFrame(const Function* Personality) const; + + unsigned char* EmitEHFrame(const Function* Personality, + unsigned char* StartBufferPtr, + unsigned char* StartFunction, + unsigned char* EndFunction, + unsigned char* ExceptionTable) const; + + unsigned GetExceptionTableSizeInBytes(MachineFunction* MF) const; + + unsigned + GetFrameMovesSizeInBytes(intptr_t BaseLabelPtr, + const std::vector<MachineMove> &Moves) const; + + unsigned GetCommonEHFrameSizeInBytes(const Function* Personality) const; + + unsigned GetEHFrameSizeInBytes(const Function* Personality, + unsigned char* StartFunction) const; + +public: + + JITDwarfEmitter(JIT& jit); + + unsigned char* EmitDwarfTable(MachineFunction& F, + JITCodeEmitter& JCE, + unsigned char* StartFunction, + unsigned char* EndFunction); + + + unsigned GetDwarfTableSizeInBytes(MachineFunction& F, + JITCodeEmitter& JCE, + unsigned char* StartFunction, + unsigned char* EndFunction); + + void setModuleInfo(MachineModuleInfo* Info) { + MMI = Info; + } +}; + + +} // end namespace llvm + +#endif // LLVM_EXECUTION_ENGINE_JIT_DWARFEMITTER_H diff --git a/lib/ExecutionEngine/JIT/JITEmitter.cpp b/lib/ExecutionEngine/JIT/JITEmitter.cpp new file mode 100644 index 0000000000000..d3b0820c5f05f --- /dev/null +++ b/lib/ExecutionEngine/JIT/JITEmitter.cpp @@ -0,0 +1,1615 @@ +//===-- JITEmitter.cpp - Write machine code to executable memory ----------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines a MachineCodeEmitter object that is used by the JIT to +// write machine code to memory and remember where relocatable values are. +// +//===----------------------------------------------------------------------===// + +#define DEBUG_TYPE "jit" +#include "JIT.h" +#include "JITDwarfEmitter.h" +#include "llvm/Constants.h" +#include "llvm/Module.h" +#include "llvm/DerivedTypes.h" +#include "llvm/CodeGen/JITCodeEmitter.h" +#include "llvm/CodeGen/MachineFunction.h" +#include "llvm/CodeGen/MachineConstantPool.h" +#include "llvm/CodeGen/MachineJumpTableInfo.h" +#include "llvm/CodeGen/MachineModuleInfo.h" +#include "llvm/CodeGen/MachineRelocation.h" +#include "llvm/ExecutionEngine/JITMemoryManager.h" +#include "llvm/ExecutionEngine/GenericValue.h" +#include "llvm/CodeGen/MachineCodeInfo.h" +#include "llvm/Target/TargetData.h" +#include "llvm/Target/TargetJITInfo.h" +#include "llvm/Target/TargetMachine.h" +#include "llvm/Target/TargetOptions.h" +#include "llvm/Support/Debug.h" +#include "llvm/Support/MutexGuard.h" +#include "llvm/Support/ValueHandle.h" +#include "llvm/System/Disassembler.h" +#include "llvm/System/Memory.h" +#include "llvm/Target/TargetInstrInfo.h" +#include "llvm/ADT/SmallPtrSet.h" +#include "llvm/ADT/SmallVector.h" +#include "llvm/ADT/Statistic.h" +#include <algorithm> +#ifndef NDEBUG +#include <iomanip> +#endif +using namespace llvm; + +STATISTIC(NumBytes, "Number of bytes of machine code compiled"); +STATISTIC(NumRelos, "Number of relocations applied"); +static JIT *TheJIT = 0; + + +//===----------------------------------------------------------------------===// +// JIT lazy compilation code. +// +namespace { + class JITResolverState { + public: + typedef std::map<AssertingVH<Function>, void*> FunctionToStubMapTy; + typedef std::map<void*, Function*> StubToFunctionMapTy; + typedef std::map<AssertingVH<GlobalValue>, void*> GlobalToIndirectSymMapTy; + private: + /// FunctionToStubMap - Keep track of the stub created for a particular + /// function so that we can reuse them if necessary. + FunctionToStubMapTy FunctionToStubMap; + + /// StubToFunctionMap - Keep track of the function that each stub + /// corresponds to. + StubToFunctionMapTy StubToFunctionMap; + + /// GlobalToIndirectSymMap - Keep track of the indirect symbol created for a + /// particular GlobalVariable so that we can reuse them if necessary. + GlobalToIndirectSymMapTy GlobalToIndirectSymMap; + + public: + FunctionToStubMapTy& getFunctionToStubMap(const MutexGuard& locked) { + assert(locked.holds(TheJIT->lock)); + return FunctionToStubMap; + } + + StubToFunctionMapTy& getStubToFunctionMap(const MutexGuard& locked) { + assert(locked.holds(TheJIT->lock)); + return StubToFunctionMap; + } + + GlobalToIndirectSymMapTy& getGlobalToIndirectSymMap(const MutexGuard& locked) { + assert(locked.holds(TheJIT->lock)); + return GlobalToIndirectSymMap; + } + }; + + /// JITResolver - Keep track of, and resolve, call sites for functions that + /// have not yet been compiled. + class JITResolver { + typedef JITResolverState::FunctionToStubMapTy FunctionToStubMapTy; + typedef JITResolverState::StubToFunctionMapTy StubToFunctionMapTy; + typedef JITResolverState::GlobalToIndirectSymMapTy GlobalToIndirectSymMapTy; + + /// LazyResolverFn - The target lazy resolver function that we actually + /// rewrite instructions to use. + TargetJITInfo::LazyResolverFn LazyResolverFn; + + JITResolverState state; + + /// ExternalFnToStubMap - This is the equivalent of FunctionToStubMap for + /// external functions. + std::map<void*, void*> ExternalFnToStubMap; + + /// revGOTMap - map addresses to indexes in the GOT + std::map<void*, unsigned> revGOTMap; + unsigned nextGOTIndex; + + static JITResolver *TheJITResolver; + public: + explicit JITResolver(JIT &jit) : nextGOTIndex(0) { + TheJIT = &jit; + + LazyResolverFn = jit.getJITInfo().getLazyResolverFunction(JITCompilerFn); + assert(TheJITResolver == 0 && "Multiple JIT resolvers?"); + TheJITResolver = this; + } + + ~JITResolver() { + TheJITResolver = 0; + } + + /// getFunctionStubIfAvailable - This returns a pointer to a function stub + /// if it has already been created. + void *getFunctionStubIfAvailable(Function *F); + + /// getFunctionStub - This returns a pointer to a function stub, creating + /// one on demand as needed. If empty is true, create a function stub + /// pointing at address 0, to be filled in later. + void *getFunctionStub(Function *F); + + /// getExternalFunctionStub - Return a stub for the function at the + /// specified address, created lazily on demand. + void *getExternalFunctionStub(void *FnAddr); + + /// getGlobalValueIndirectSym - Return an indirect symbol containing the + /// specified GV address. + void *getGlobalValueIndirectSym(GlobalValue *V, void *GVAddress); + + /// AddCallbackAtLocation - If the target is capable of rewriting an + /// instruction without the use of a stub, record the location of the use so + /// we know which function is being used at the location. + void *AddCallbackAtLocation(Function *F, void *Location) { + MutexGuard locked(TheJIT->lock); + /// Get the target-specific JIT resolver function. + state.getStubToFunctionMap(locked)[Location] = F; + return (void*)(intptr_t)LazyResolverFn; + } + + void getRelocatableGVs(SmallVectorImpl<GlobalValue*> &GVs, + SmallVectorImpl<void*> &Ptrs); + + GlobalValue *invalidateStub(void *Stub); + + /// getGOTIndexForAddress - Return a new or existing index in the GOT for + /// an address. This function only manages slots, it does not manage the + /// contents of the slots or the memory associated with the GOT. + unsigned getGOTIndexForAddr(void *addr); + + /// JITCompilerFn - This function is called to resolve a stub to a compiled + /// address. If the LLVM Function corresponding to the stub has not yet + /// been compiled, this function compiles it first. + static void *JITCompilerFn(void *Stub); + }; +} + +JITResolver *JITResolver::TheJITResolver = 0; + +/// getFunctionStubIfAvailable - This returns a pointer to a function stub +/// if it has already been created. +void *JITResolver::getFunctionStubIfAvailable(Function *F) { + MutexGuard locked(TheJIT->lock); + + // If we already have a stub for this function, recycle it. + void *&Stub = state.getFunctionToStubMap(locked)[F]; + return Stub; +} + +/// getFunctionStub - This returns a pointer to a function stub, creating +/// one on demand as needed. +void *JITResolver::getFunctionStub(Function *F) { + MutexGuard locked(TheJIT->lock); + + // If we already have a stub for this function, recycle it. + void *&Stub = state.getFunctionToStubMap(locked)[F]; + if (Stub) return Stub; + + // Call the lazy resolver function unless we are JIT'ing non-lazily, in which + // case we must resolve the symbol now. + void *Actual = TheJIT->isLazyCompilationDisabled() + ? (void *)0 : (void *)(intptr_t)LazyResolverFn; + + // If this is an external declaration, attempt to resolve the address now + // to place in the stub. + if (F->isDeclaration() && !F->hasNotBeenReadFromBitcode()) { + Actual = TheJIT->getPointerToFunction(F); + + // If we resolved the symbol to a null address (eg. a weak external) + // don't emit a stub. Return a null pointer to the application. If dlsym + // stubs are enabled, not being able to resolve the address is not + // meaningful. + if (!Actual && !TheJIT->areDlsymStubsEnabled()) return 0; + } + + // Codegen a new stub, calling the lazy resolver or the actual address of the + // external function, if it was resolved. + Stub = TheJIT->getJITInfo().emitFunctionStub(F, Actual, + *TheJIT->getCodeEmitter()); + + if (Actual != (void*)(intptr_t)LazyResolverFn) { + // If we are getting the stub for an external function, we really want the + // address of the stub in the GlobalAddressMap for the JIT, not the address + // of the external function. + TheJIT->updateGlobalMapping(F, Stub); + } + + DOUT << "JIT: Stub emitted at [" << Stub << "] for function '" + << F->getName() << "'\n"; + + // Finally, keep track of the stub-to-Function mapping so that the + // JITCompilerFn knows which function to compile! + state.getStubToFunctionMap(locked)[Stub] = F; + + // If we are JIT'ing non-lazily but need to call a function that does not + // exist yet, add it to the JIT's work list so that we can fill in the stub + // address later. + if (!Actual && TheJIT->isLazyCompilationDisabled()) + if (!F->isDeclaration() || F->hasNotBeenReadFromBitcode()) + TheJIT->addPendingFunction(F); + + return Stub; +} + +/// getGlobalValueIndirectSym - Return a lazy pointer containing the specified +/// GV address. +void *JITResolver::getGlobalValueIndirectSym(GlobalValue *GV, void *GVAddress) { + MutexGuard locked(TheJIT->lock); + + // If we already have a stub for this global variable, recycle it. + void *&IndirectSym = state.getGlobalToIndirectSymMap(locked)[GV]; + if (IndirectSym) return IndirectSym; + + // Otherwise, codegen a new indirect symbol. + IndirectSym = TheJIT->getJITInfo().emitGlobalValueIndirectSym(GV, GVAddress, + *TheJIT->getCodeEmitter()); + + DOUT << "JIT: Indirect symbol emitted at [" << IndirectSym << "] for GV '" + << GV->getName() << "'\n"; + + return IndirectSym; +} + +/// getExternalFunctionStub - Return a stub for the function at the +/// specified address, created lazily on demand. +void *JITResolver::getExternalFunctionStub(void *FnAddr) { + // If we already have a stub for this function, recycle it. + void *&Stub = ExternalFnToStubMap[FnAddr]; + if (Stub) return Stub; + + Stub = TheJIT->getJITInfo().emitFunctionStub(0, FnAddr, + *TheJIT->getCodeEmitter()); + + DOUT << "JIT: Stub emitted at [" << Stub + << "] for external function at '" << FnAddr << "'\n"; + return Stub; +} + +unsigned JITResolver::getGOTIndexForAddr(void* addr) { + unsigned idx = revGOTMap[addr]; + if (!idx) { + idx = ++nextGOTIndex; + revGOTMap[addr] = idx; + DOUT << "JIT: Adding GOT entry " << idx << " for addr [" << addr << "]\n"; + } + return idx; +} + +void JITResolver::getRelocatableGVs(SmallVectorImpl<GlobalValue*> &GVs, + SmallVectorImpl<void*> &Ptrs) { + MutexGuard locked(TheJIT->lock); + + FunctionToStubMapTy &FM = state.getFunctionToStubMap(locked); + GlobalToIndirectSymMapTy &GM = state.getGlobalToIndirectSymMap(locked); + + for (FunctionToStubMapTy::iterator i = FM.begin(), e = FM.end(); i != e; ++i){ + Function *F = i->first; + if (F->isDeclaration() && F->hasExternalLinkage()) { + GVs.push_back(i->first); + Ptrs.push_back(i->second); + } + } + for (GlobalToIndirectSymMapTy::iterator i = GM.begin(), e = GM.end(); + i != e; ++i) { + GVs.push_back(i->first); + Ptrs.push_back(i->second); + } +} + +GlobalValue *JITResolver::invalidateStub(void *Stub) { + MutexGuard locked(TheJIT->lock); + + FunctionToStubMapTy &FM = state.getFunctionToStubMap(locked); + StubToFunctionMapTy &SM = state.getStubToFunctionMap(locked); + GlobalToIndirectSymMapTy &GM = state.getGlobalToIndirectSymMap(locked); + + // Look up the cheap way first, to see if it's a function stub we are + // invalidating. If so, remove it from both the forward and reverse maps. + if (SM.find(Stub) != SM.end()) { + Function *F = SM[Stub]; + SM.erase(Stub); + FM.erase(F); + return F; + } + + // Otherwise, it might be an indirect symbol stub. Find it and remove it. + for (GlobalToIndirectSymMapTy::iterator i = GM.begin(), e = GM.end(); + i != e; ++i) { + if (i->second != Stub) + continue; + GlobalValue *GV = i->first; + GM.erase(i); + return GV; + } + + // Lastly, check to see if it's in the ExternalFnToStubMap. + for (std::map<void *, void *>::iterator i = ExternalFnToStubMap.begin(), + e = ExternalFnToStubMap.end(); i != e; ++i) { + if (i->second != Stub) + continue; + ExternalFnToStubMap.erase(i); + break; + } + + return 0; +} + +/// JITCompilerFn - This function is called when a lazy compilation stub has +/// been entered. It looks up which function this stub corresponds to, compiles +/// it if necessary, then returns the resultant function pointer. +void *JITResolver::JITCompilerFn(void *Stub) { + JITResolver &JR = *TheJITResolver; + + Function* F = 0; + void* ActualPtr = 0; + + { + // Only lock for getting the Function. The call getPointerToFunction made + // in this function might trigger function materializing, which requires + // JIT lock to be unlocked. + MutexGuard locked(TheJIT->lock); + + // The address given to us for the stub may not be exactly right, it might be + // a little bit after the stub. As such, use upper_bound to find it. + StubToFunctionMapTy::iterator I = + JR.state.getStubToFunctionMap(locked).upper_bound(Stub); + assert(I != JR.state.getStubToFunctionMap(locked).begin() && + "This is not a known stub!"); + F = (--I)->second; + ActualPtr = I->first; + } + + // If we have already code generated the function, just return the address. + void *Result = TheJIT->getPointerToGlobalIfAvailable(F); + + if (!Result) { + // Otherwise we don't have it, do lazy compilation now. + + // If lazy compilation is disabled, emit a useful error message and abort. + if (TheJIT->isLazyCompilationDisabled()) { + cerr << "LLVM JIT requested to do lazy compilation of function '" + << F->getName() << "' when lazy compiles are disabled!\n"; + abort(); + } + + // We might like to remove the stub from the StubToFunction map. + // We can't do that! Multiple threads could be stuck, waiting to acquire the + // lock above. As soon as the 1st function finishes compiling the function, + // the next one will be released, and needs to be able to find the function + // it needs to call. + //JR.state.getStubToFunctionMap(locked).erase(I); + + DOUT << "JIT: Lazily resolving function '" << F->getName() + << "' In stub ptr = " << Stub << " actual ptr = " + << ActualPtr << "\n"; + + Result = TheJIT->getPointerToFunction(F); + } + + // Reacquire the lock to erase the stub in the map. + MutexGuard locked(TheJIT->lock); + + // We don't need to reuse this stub in the future, as F is now compiled. + JR.state.getFunctionToStubMap(locked).erase(F); + + // FIXME: We could rewrite all references to this stub if we knew them. + + // What we will do is set the compiled function address to map to the + // same GOT entry as the stub so that later clients may update the GOT + // if they see it still using the stub address. + // Note: this is done so the Resolver doesn't have to manage GOT memory + // Do this without allocating map space if the target isn't using a GOT + if(JR.revGOTMap.find(Stub) != JR.revGOTMap.end()) + JR.revGOTMap[Result] = JR.revGOTMap[Stub]; + + return Result; +} + +//===----------------------------------------------------------------------===// +// Function Index Support + +// On MacOS we generate an index of currently JIT'd functions so that +// performance tools can determine a symbol name and accurate code range for a +// PC value. Because performance tools are generally asynchronous, the code +// below is written with the hope that it could be interrupted at any time and +// have useful answers. However, we don't go crazy with atomic operations, we +// just do a "reasonable effort". +#ifdef __APPLE__ +#define ENABLE_JIT_SYMBOL_TABLE 0 +#endif + +/// JitSymbolEntry - Each function that is JIT compiled results in one of these +/// being added to an array of symbols. This indicates the name of the function +/// as well as the address range it occupies. This allows the client to map +/// from a PC value to the name of the function. +struct JitSymbolEntry { + const char *FnName; // FnName - a strdup'd string. + void *FnStart; + intptr_t FnSize; +}; + + +struct JitSymbolTable { + /// NextPtr - This forms a linked list of JitSymbolTable entries. This + /// pointer is not used right now, but might be used in the future. Consider + /// it reserved for future use. + JitSymbolTable *NextPtr; + + /// Symbols - This is an array of JitSymbolEntry entries. Only the first + /// 'NumSymbols' symbols are valid. + JitSymbolEntry *Symbols; + + /// NumSymbols - This indicates the number entries in the Symbols array that + /// are valid. + unsigned NumSymbols; + + /// NumAllocated - This indicates the amount of space we have in the Symbols + /// array. This is a private field that should not be read by external tools. + unsigned NumAllocated; +}; + +#if ENABLE_JIT_SYMBOL_TABLE +JitSymbolTable *__jitSymbolTable; +#endif + +static void AddFunctionToSymbolTable(const char *FnName, + void *FnStart, intptr_t FnSize) { + assert(FnName != 0 && FnStart != 0 && "Bad symbol to add"); + JitSymbolTable **SymTabPtrPtr = 0; +#if !ENABLE_JIT_SYMBOL_TABLE + return; +#else + SymTabPtrPtr = &__jitSymbolTable; +#endif + + // If this is the first entry in the symbol table, add the JitSymbolTable + // index. + if (*SymTabPtrPtr == 0) { + JitSymbolTable *New = new JitSymbolTable(); + New->NextPtr = 0; + New->Symbols = 0; + New->NumSymbols = 0; + New->NumAllocated = 0; + *SymTabPtrPtr = New; + } + + JitSymbolTable *SymTabPtr = *SymTabPtrPtr; + + // If we have space in the table, reallocate the table. + if (SymTabPtr->NumSymbols >= SymTabPtr->NumAllocated) { + // If we don't have space, reallocate the table. + unsigned NewSize = std::max(64U, SymTabPtr->NumAllocated*2); + JitSymbolEntry *NewSymbols = new JitSymbolEntry[NewSize]; + JitSymbolEntry *OldSymbols = SymTabPtr->Symbols; + + // Copy the old entries over. + memcpy(NewSymbols, OldSymbols, SymTabPtr->NumSymbols*sizeof(OldSymbols[0])); + + // Swap the new symbols in, delete the old ones. + SymTabPtr->Symbols = NewSymbols; + SymTabPtr->NumAllocated = NewSize; + delete [] OldSymbols; + } + + // Otherwise, we have enough space, just tack it onto the end of the array. + JitSymbolEntry &Entry = SymTabPtr->Symbols[SymTabPtr->NumSymbols]; + Entry.FnName = strdup(FnName); + Entry.FnStart = FnStart; + Entry.FnSize = FnSize; + ++SymTabPtr->NumSymbols; +} + +static void RemoveFunctionFromSymbolTable(void *FnStart) { + assert(FnStart && "Invalid function pointer"); + JitSymbolTable **SymTabPtrPtr = 0; +#if !ENABLE_JIT_SYMBOL_TABLE + return; +#else + SymTabPtrPtr = &__jitSymbolTable; +#endif + + JitSymbolTable *SymTabPtr = *SymTabPtrPtr; + JitSymbolEntry *Symbols = SymTabPtr->Symbols; + + // Scan the table to find its index. The table is not sorted, so do a linear + // scan. + unsigned Index; + for (Index = 0; Symbols[Index].FnStart != FnStart; ++Index) + assert(Index != SymTabPtr->NumSymbols && "Didn't find function!"); + + // Once we have an index, we know to nuke this entry, overwrite it with the + // entry at the end of the array, making the last entry redundant. + const char *OldName = Symbols[Index].FnName; + Symbols[Index] = Symbols[SymTabPtr->NumSymbols-1]; + free((void*)OldName); + + // Drop the number of symbols in the table. + --SymTabPtr->NumSymbols; + + // Finally, if we deleted the final symbol, deallocate the table itself. + if (SymTabPtr->NumSymbols != 0) + return; + + *SymTabPtrPtr = 0; + delete [] Symbols; + delete SymTabPtr; +} + +//===----------------------------------------------------------------------===// +// JITEmitter code. +// +namespace { + /// JITEmitter - The JIT implementation of the MachineCodeEmitter, which is + /// used to output functions to memory for execution. + class JITEmitter : public JITCodeEmitter { + JITMemoryManager *MemMgr; + + // When outputting a function stub in the context of some other function, we + // save BufferBegin/BufferEnd/CurBufferPtr here. + uint8_t *SavedBufferBegin, *SavedBufferEnd, *SavedCurBufferPtr; + + /// Relocations - These are the relocations that the function needs, as + /// emitted. + std::vector<MachineRelocation> Relocations; + + /// MBBLocations - This vector is a mapping from MBB ID's to their address. + /// It is filled in by the StartMachineBasicBlock callback and queried by + /// the getMachineBasicBlockAddress callback. + std::vector<uintptr_t> MBBLocations; + + /// ConstantPool - The constant pool for the current function. + /// + MachineConstantPool *ConstantPool; + + /// ConstantPoolBase - A pointer to the first entry in the constant pool. + /// + void *ConstantPoolBase; + + /// ConstPoolAddresses - Addresses of individual constant pool entries. + /// + SmallVector<uintptr_t, 8> ConstPoolAddresses; + + /// JumpTable - The jump tables for the current function. + /// + MachineJumpTableInfo *JumpTable; + + /// JumpTableBase - A pointer to the first entry in the jump table. + /// + void *JumpTableBase; + + /// Resolver - This contains info about the currently resolved functions. + JITResolver Resolver; + + /// DE - The dwarf emitter for the jit. + JITDwarfEmitter *DE; + + /// LabelLocations - This vector is a mapping from Label ID's to their + /// address. + std::vector<uintptr_t> LabelLocations; + + /// MMI - Machine module info for exception informations + MachineModuleInfo* MMI; + + // GVSet - a set to keep track of which globals have been seen + SmallPtrSet<const GlobalVariable*, 8> GVSet; + + // CurFn - The llvm function being emitted. Only valid during + // finishFunction(). + const Function *CurFn; + + // CurFnStubUses - For a given Function, a vector of stubs that it + // references. This facilitates the JIT detecting that a stub is no + // longer used, so that it may be deallocated. + DenseMap<const Function *, SmallVector<void*, 1> > CurFnStubUses; + + // StubFnRefs - For a given pointer to a stub, a set of Functions which + // reference the stub. When the count of a stub's references drops to zero, + // the stub is unused. + DenseMap<void *, SmallPtrSet<const Function*, 1> > StubFnRefs; + + // ExtFnStubs - A map of external function names to stubs which have entries + // in the JITResolver's ExternalFnToStubMap. + StringMap<void *> ExtFnStubs; + + // MCI - A pointer to a MachineCodeInfo object to update with information. + MachineCodeInfo *MCI; + + public: + JITEmitter(JIT &jit, JITMemoryManager *JMM) : Resolver(jit), CurFn(0), MCI(0) { + MemMgr = JMM ? JMM : JITMemoryManager::CreateDefaultMemManager(); + if (jit.getJITInfo().needsGOT()) { + MemMgr->AllocateGOT(); + DOUT << "JIT is managing a GOT\n"; + } + + if (ExceptionHandling) DE = new JITDwarfEmitter(jit); + } + ~JITEmitter() { + delete MemMgr; + if (ExceptionHandling) delete DE; + } + + /// classof - Methods for support type inquiry through isa, cast, and + /// dyn_cast: + /// + static inline bool classof(const JITEmitter*) { return true; } + static inline bool classof(const MachineCodeEmitter*) { return true; } + + JITResolver &getJITResolver() { return Resolver; } + + virtual void startFunction(MachineFunction &F); + virtual bool finishFunction(MachineFunction &F); + + void emitConstantPool(MachineConstantPool *MCP); + void initJumpTableInfo(MachineJumpTableInfo *MJTI); + void emitJumpTableInfo(MachineJumpTableInfo *MJTI); + + virtual void startGVStub(const GlobalValue* GV, unsigned StubSize, + unsigned Alignment = 1); + virtual void startGVStub(const GlobalValue* GV, void *Buffer, + unsigned StubSize); + virtual void* finishGVStub(const GlobalValue *GV); + + /// allocateSpace - Reserves space in the current block if any, or + /// allocate a new one of the given size. + virtual void *allocateSpace(uintptr_t Size, unsigned Alignment); + + virtual void addRelocation(const MachineRelocation &MR) { + Relocations.push_back(MR); + } + + virtual void StartMachineBasicBlock(MachineBasicBlock *MBB) { + if (MBBLocations.size() <= (unsigned)MBB->getNumber()) + MBBLocations.resize((MBB->getNumber()+1)*2); + MBBLocations[MBB->getNumber()] = getCurrentPCValue(); + DOUT << "JIT: Emitting BB" << MBB->getNumber() << " at [" + << (void*) getCurrentPCValue() << "]\n"; + } + + virtual uintptr_t getConstantPoolEntryAddress(unsigned Entry) const; + virtual uintptr_t getJumpTableEntryAddress(unsigned Entry) const; + + virtual uintptr_t getMachineBasicBlockAddress(MachineBasicBlock *MBB) const { + assert(MBBLocations.size() > (unsigned)MBB->getNumber() && + MBBLocations[MBB->getNumber()] && "MBB not emitted!"); + return MBBLocations[MBB->getNumber()]; + } + + /// deallocateMemForFunction - Deallocate all memory for the specified + /// function body. + void deallocateMemForFunction(Function *F); + + /// AddStubToCurrentFunction - Mark the current function being JIT'd as + /// using the stub at the specified address. Allows + /// deallocateMemForFunction to also remove stubs no longer referenced. + void AddStubToCurrentFunction(void *Stub); + + /// getExternalFnStubs - Accessor for the JIT to find stubs emitted for + /// MachineRelocations that reference external functions by name. + const StringMap<void*> &getExternalFnStubs() const { return ExtFnStubs; } + + virtual void emitLabel(uint64_t LabelID) { + if (LabelLocations.size() <= LabelID) + LabelLocations.resize((LabelID+1)*2); + LabelLocations[LabelID] = getCurrentPCValue(); + } + + virtual uintptr_t getLabelAddress(uint64_t LabelID) const { + assert(LabelLocations.size() > (unsigned)LabelID && + LabelLocations[LabelID] && "Label not emitted!"); + return LabelLocations[LabelID]; + } + + virtual void setModuleInfo(MachineModuleInfo* Info) { + MMI = Info; + if (ExceptionHandling) DE->setModuleInfo(Info); + } + + void setMemoryExecutable(void) { + MemMgr->setMemoryExecutable(); + } + + JITMemoryManager *getMemMgr(void) const { return MemMgr; } + + void setMachineCodeInfo(MachineCodeInfo *mci) { + MCI = mci; + } + + private: + void *getPointerToGlobal(GlobalValue *GV, void *Reference, bool NoNeedStub); + void *getPointerToGVIndirectSym(GlobalValue *V, void *Reference, + bool NoNeedStub); + unsigned addSizeOfGlobal(const GlobalVariable *GV, unsigned Size); + unsigned addSizeOfGlobalsInConstantVal(const Constant *C, unsigned Size); + unsigned addSizeOfGlobalsInInitializer(const Constant *Init, unsigned Size); + unsigned GetSizeOfGlobalsInBytes(MachineFunction &MF); + }; +} + +void *JITEmitter::getPointerToGlobal(GlobalValue *V, void *Reference, + bool DoesntNeedStub) { + if (GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) + return TheJIT->getOrEmitGlobalVariable(GV); + + if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) + return TheJIT->getPointerToGlobal(GA->resolveAliasedGlobal(false)); + + // If we have already compiled the function, return a pointer to its body. + Function *F = cast<Function>(V); + void *ResultPtr; + if (!DoesntNeedStub && !TheJIT->isLazyCompilationDisabled()) { + // Return the function stub if it's already created. + ResultPtr = Resolver.getFunctionStubIfAvailable(F); + if (ResultPtr) + AddStubToCurrentFunction(ResultPtr); + } else { + ResultPtr = TheJIT->getPointerToGlobalIfAvailable(F); + } + if (ResultPtr) return ResultPtr; + + // If this is an external function pointer, we can force the JIT to + // 'compile' it, which really just adds it to the map. In dlsym mode, + // external functions are forced through a stub, regardless of reloc type. + if (F->isDeclaration() && !F->hasNotBeenReadFromBitcode() && + DoesntNeedStub && !TheJIT->areDlsymStubsEnabled()) + return TheJIT->getPointerToFunction(F); + + // Okay, the function has not been compiled yet, if the target callback + // mechanism is capable of rewriting the instruction directly, prefer to do + // that instead of emitting a stub. This uses the lazy resolver, so is not + // legal if lazy compilation is disabled. + if (DoesntNeedStub && !TheJIT->isLazyCompilationDisabled()) + return Resolver.AddCallbackAtLocation(F, Reference); + + // Otherwise, we have to emit a stub. + void *StubAddr = Resolver.getFunctionStub(F); + + // Add the stub to the current function's list of referenced stubs, so we can + // deallocate them if the current function is ever freed. It's possible to + // return null from getFunctionStub in the case of a weak extern that fails + // to resolve. + if (StubAddr) + AddStubToCurrentFunction(StubAddr); + + return StubAddr; +} + +void *JITEmitter::getPointerToGVIndirectSym(GlobalValue *V, void *Reference, + bool NoNeedStub) { + // Make sure GV is emitted first, and create a stub containing the fully + // resolved address. + void *GVAddress = getPointerToGlobal(V, Reference, true); + void *StubAddr = Resolver.getGlobalValueIndirectSym(V, GVAddress); + + // Add the stub to the current function's list of referenced stubs, so we can + // deallocate them if the current function is ever freed. + AddStubToCurrentFunction(StubAddr); + + return StubAddr; +} + +void JITEmitter::AddStubToCurrentFunction(void *StubAddr) { + if (!TheJIT->areDlsymStubsEnabled()) + return; + + assert(CurFn && "Stub added to current function, but current function is 0!"); + + SmallVectorImpl<void*> &StubsUsed = CurFnStubUses[CurFn]; + StubsUsed.push_back(StubAddr); + + SmallPtrSet<const Function *, 1> &FnRefs = StubFnRefs[StubAddr]; + FnRefs.insert(CurFn); +} + +static unsigned GetConstantPoolSizeInBytes(MachineConstantPool *MCP, + const TargetData *TD) { + const std::vector<MachineConstantPoolEntry> &Constants = MCP->getConstants(); + if (Constants.empty()) return 0; + + unsigned Size = 0; + for (unsigned i = 0, e = Constants.size(); i != e; ++i) { + MachineConstantPoolEntry CPE = Constants[i]; + unsigned AlignMask = CPE.getAlignment() - 1; + Size = (Size + AlignMask) & ~AlignMask; + const Type *Ty = CPE.getType(); + Size += TD->getTypeAllocSize(Ty); + } + return Size; +} + +static unsigned GetJumpTableSizeInBytes(MachineJumpTableInfo *MJTI) { + const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables(); + if (JT.empty()) return 0; + + unsigned NumEntries = 0; + for (unsigned i = 0, e = JT.size(); i != e; ++i) + NumEntries += JT[i].MBBs.size(); + + unsigned EntrySize = MJTI->getEntrySize(); + + return NumEntries * EntrySize; +} + +static uintptr_t RoundUpToAlign(uintptr_t Size, unsigned Alignment) { + if (Alignment == 0) Alignment = 1; + // Since we do not know where the buffer will be allocated, be pessimistic. + return Size + Alignment; +} + +/// addSizeOfGlobal - add the size of the global (plus any alignment padding) +/// into the running total Size. + +unsigned JITEmitter::addSizeOfGlobal(const GlobalVariable *GV, unsigned Size) { + const Type *ElTy = GV->getType()->getElementType(); + size_t GVSize = (size_t)TheJIT->getTargetData()->getTypeAllocSize(ElTy); + size_t GVAlign = + (size_t)TheJIT->getTargetData()->getPreferredAlignment(GV); + DOUT << "JIT: Adding in size " << GVSize << " alignment " << GVAlign; + DEBUG(GV->dump()); + // Assume code section ends with worst possible alignment, so first + // variable needs maximal padding. + if (Size==0) + Size = 1; + Size = ((Size+GVAlign-1)/GVAlign)*GVAlign; + Size += GVSize; + return Size; +} + +/// addSizeOfGlobalsInConstantVal - find any globals that we haven't seen yet +/// but are referenced from the constant; put them in GVSet and add their +/// size into the running total Size. + +unsigned JITEmitter::addSizeOfGlobalsInConstantVal(const Constant *C, + unsigned Size) { + // If its undefined, return the garbage. + if (isa<UndefValue>(C)) + return Size; + + // If the value is a ConstantExpr + if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) { + Constant *Op0 = CE->getOperand(0); + switch (CE->getOpcode()) { + case Instruction::GetElementPtr: + case Instruction::Trunc: + case Instruction::ZExt: + case Instruction::SExt: + case Instruction::FPTrunc: + case Instruction::FPExt: + case Instruction::UIToFP: + case Instruction::SIToFP: + case Instruction::FPToUI: + case Instruction::FPToSI: + case Instruction::PtrToInt: + case Instruction::IntToPtr: + case Instruction::BitCast: { + Size = addSizeOfGlobalsInConstantVal(Op0, Size); + break; + } + case Instruction::Add: + case Instruction::Sub: + case Instruction::Mul: + case Instruction::UDiv: + case Instruction::SDiv: + case Instruction::URem: + case Instruction::SRem: + case Instruction::And: + case Instruction::Or: + case Instruction::Xor: { + Size = addSizeOfGlobalsInConstantVal(Op0, Size); + Size = addSizeOfGlobalsInConstantVal(CE->getOperand(1), Size); + break; + } + default: { + cerr << "ConstantExpr not handled: " << *CE << "\n"; + abort(); + } + } + } + + if (C->getType()->getTypeID() == Type::PointerTyID) + if (const GlobalVariable* GV = dyn_cast<GlobalVariable>(C)) + if (GVSet.insert(GV)) + Size = addSizeOfGlobal(GV, Size); + + return Size; +} + +/// addSizeOfGLobalsInInitializer - handle any globals that we haven't seen yet +/// but are referenced from the given initializer. + +unsigned JITEmitter::addSizeOfGlobalsInInitializer(const Constant *Init, + unsigned Size) { + if (!isa<UndefValue>(Init) && + !isa<ConstantVector>(Init) && + !isa<ConstantAggregateZero>(Init) && + !isa<ConstantArray>(Init) && + !isa<ConstantStruct>(Init) && + Init->getType()->isFirstClassType()) + Size = addSizeOfGlobalsInConstantVal(Init, Size); + return Size; +} + +/// GetSizeOfGlobalsInBytes - walk the code for the function, looking for +/// globals; then walk the initializers of those globals looking for more. +/// If their size has not been considered yet, add it into the running total +/// Size. + +unsigned JITEmitter::GetSizeOfGlobalsInBytes(MachineFunction &MF) { + unsigned Size = 0; + GVSet.clear(); + + for (MachineFunction::iterator MBB = MF.begin(), E = MF.end(); + MBB != E; ++MBB) { + for (MachineBasicBlock::const_iterator I = MBB->begin(), E = MBB->end(); + I != E; ++I) { + const TargetInstrDesc &Desc = I->getDesc(); + const MachineInstr &MI = *I; + unsigned NumOps = Desc.getNumOperands(); + for (unsigned CurOp = 0; CurOp < NumOps; CurOp++) { + const MachineOperand &MO = MI.getOperand(CurOp); + if (MO.isGlobal()) { + GlobalValue* V = MO.getGlobal(); + const GlobalVariable *GV = dyn_cast<const GlobalVariable>(V); + if (!GV) + continue; + // If seen in previous function, it will have an entry here. + if (TheJIT->getPointerToGlobalIfAvailable(GV)) + continue; + // If seen earlier in this function, it will have an entry here. + // FIXME: it should be possible to combine these tables, by + // assuming the addresses of the new globals in this module + // start at 0 (or something) and adjusting them after codegen + // complete. Another possibility is to grab a marker bit in GV. + if (GVSet.insert(GV)) + // A variable as yet unseen. Add in its size. + Size = addSizeOfGlobal(GV, Size); + } + } + } + } + DOUT << "JIT: About to look through initializers\n"; + // Look for more globals that are referenced only from initializers. + // GVSet.end is computed each time because the set can grow as we go. + for (SmallPtrSet<const GlobalVariable *, 8>::iterator I = GVSet.begin(); + I != GVSet.end(); I++) { + const GlobalVariable* GV = *I; + if (GV->hasInitializer()) + Size = addSizeOfGlobalsInInitializer(GV->getInitializer(), Size); + } + + return Size; +} + +void JITEmitter::startFunction(MachineFunction &F) { + DOUT << "JIT: Starting CodeGen of Function " + << F.getFunction()->getName() << "\n"; + + uintptr_t ActualSize = 0; + // Set the memory writable, if it's not already + MemMgr->setMemoryWritable(); + if (MemMgr->NeedsExactSize()) { + DOUT << "JIT: ExactSize\n"; + const TargetInstrInfo* TII = F.getTarget().getInstrInfo(); + MachineJumpTableInfo *MJTI = F.getJumpTableInfo(); + MachineConstantPool *MCP = F.getConstantPool(); + + // Ensure the constant pool/jump table info is at least 4-byte aligned. + ActualSize = RoundUpToAlign(ActualSize, 16); + + // Add the alignment of the constant pool + ActualSize = RoundUpToAlign(ActualSize, MCP->getConstantPoolAlignment()); + + // Add the constant pool size + ActualSize += GetConstantPoolSizeInBytes(MCP, TheJIT->getTargetData()); + + // Add the aligment of the jump table info + ActualSize = RoundUpToAlign(ActualSize, MJTI->getAlignment()); + + // Add the jump table size + ActualSize += GetJumpTableSizeInBytes(MJTI); + + // Add the alignment for the function + ActualSize = RoundUpToAlign(ActualSize, + std::max(F.getFunction()->getAlignment(), 8U)); + + // Add the function size + ActualSize += TII->GetFunctionSizeInBytes(F); + + DOUT << "JIT: ActualSize before globals " << ActualSize << "\n"; + // Add the size of the globals that will be allocated after this function. + // These are all the ones referenced from this function that were not + // previously allocated. + ActualSize += GetSizeOfGlobalsInBytes(F); + DOUT << "JIT: ActualSize after globals " << ActualSize << "\n"; + } + + BufferBegin = CurBufferPtr = MemMgr->startFunctionBody(F.getFunction(), + ActualSize); + BufferEnd = BufferBegin+ActualSize; + + // Ensure the constant pool/jump table info is at least 4-byte aligned. + emitAlignment(16); + + emitConstantPool(F.getConstantPool()); + initJumpTableInfo(F.getJumpTableInfo()); + + // About to start emitting the machine code for the function. + emitAlignment(std::max(F.getFunction()->getAlignment(), 8U)); + TheJIT->updateGlobalMapping(F.getFunction(), CurBufferPtr); + + MBBLocations.clear(); +} + +bool JITEmitter::finishFunction(MachineFunction &F) { + if (CurBufferPtr == BufferEnd) { + // FIXME: Allocate more space, then try again. + cerr << "JIT: Ran out of space for generated machine code!\n"; + abort(); + } + + emitJumpTableInfo(F.getJumpTableInfo()); + + // FnStart is the start of the text, not the start of the constant pool and + // other per-function data. + uint8_t *FnStart = + (uint8_t *)TheJIT->getPointerToGlobalIfAvailable(F.getFunction()); + + // FnEnd is the end of the function's machine code. + uint8_t *FnEnd = CurBufferPtr; + + if (!Relocations.empty()) { + CurFn = F.getFunction(); + NumRelos += Relocations.size(); + + // Resolve the relocations to concrete pointers. + for (unsigned i = 0, e = Relocations.size(); i != e; ++i) { + MachineRelocation &MR = Relocations[i]; + void *ResultPtr = 0; + if (!MR.letTargetResolve()) { + if (MR.isExternalSymbol()) { + ResultPtr = TheJIT->getPointerToNamedFunction(MR.getExternalSymbol(), + false); + DOUT << "JIT: Map \'" << MR.getExternalSymbol() << "\' to [" + << ResultPtr << "]\n"; + + // If the target REALLY wants a stub for this function, emit it now. + if (!MR.doesntNeedStub()) { + if (!TheJIT->areDlsymStubsEnabled()) { + ResultPtr = Resolver.getExternalFunctionStub(ResultPtr); + } else { + void *&Stub = ExtFnStubs[MR.getExternalSymbol()]; + if (!Stub) { + Stub = Resolver.getExternalFunctionStub((void *)&Stub); + AddStubToCurrentFunction(Stub); + } + ResultPtr = Stub; + } + } + } else if (MR.isGlobalValue()) { + ResultPtr = getPointerToGlobal(MR.getGlobalValue(), + BufferBegin+MR.getMachineCodeOffset(), + MR.doesntNeedStub()); + } else if (MR.isIndirectSymbol()) { + ResultPtr = getPointerToGVIndirectSym(MR.getGlobalValue(), + BufferBegin+MR.getMachineCodeOffset(), + MR.doesntNeedStub()); + } else if (MR.isBasicBlock()) { + ResultPtr = (void*)getMachineBasicBlockAddress(MR.getBasicBlock()); + } else if (MR.isConstantPoolIndex()) { + ResultPtr = (void*)getConstantPoolEntryAddress(MR.getConstantPoolIndex()); + } else { + assert(MR.isJumpTableIndex()); + ResultPtr=(void*)getJumpTableEntryAddress(MR.getJumpTableIndex()); + } + + MR.setResultPointer(ResultPtr); + } + + // if we are managing the GOT and the relocation wants an index, + // give it one + if (MR.isGOTRelative() && MemMgr->isManagingGOT()) { + unsigned idx = Resolver.getGOTIndexForAddr(ResultPtr); + MR.setGOTIndex(idx); + if (((void**)MemMgr->getGOTBase())[idx] != ResultPtr) { + DOUT << "JIT: GOT was out of date for " << ResultPtr + << " pointing at " << ((void**)MemMgr->getGOTBase())[idx] + << "\n"; + ((void**)MemMgr->getGOTBase())[idx] = ResultPtr; + } + } + } + + CurFn = 0; + TheJIT->getJITInfo().relocate(BufferBegin, &Relocations[0], + Relocations.size(), MemMgr->getGOTBase()); + } + + // Update the GOT entry for F to point to the new code. + if (MemMgr->isManagingGOT()) { + unsigned idx = Resolver.getGOTIndexForAddr((void*)BufferBegin); + if (((void**)MemMgr->getGOTBase())[idx] != (void*)BufferBegin) { + DOUT << "JIT: GOT was out of date for " << (void*)BufferBegin + << " pointing at " << ((void**)MemMgr->getGOTBase())[idx] << "\n"; + ((void**)MemMgr->getGOTBase())[idx] = (void*)BufferBegin; + } + } + + // CurBufferPtr may have moved beyond FnEnd, due to memory allocation for + // global variables that were referenced in the relocations. + MemMgr->endFunctionBody(F.getFunction(), BufferBegin, CurBufferPtr); + + if (CurBufferPtr == BufferEnd) { + // FIXME: Allocate more space, then try again. + cerr << "JIT: Ran out of space for generated machine code!\n"; + abort(); + } + + BufferBegin = CurBufferPtr = 0; + NumBytes += FnEnd-FnStart; + + // Invalidate the icache if necessary. + sys::Memory::InvalidateInstructionCache(FnStart, FnEnd-FnStart); + + // Add it to the JIT symbol table if the host wants it. + AddFunctionToSymbolTable(F.getFunction()->getNameStart(), + FnStart, FnEnd-FnStart); + + DOUT << "JIT: Finished CodeGen of [" << (void*)FnStart + << "] Function: " << F.getFunction()->getName() + << ": " << (FnEnd-FnStart) << " bytes of text, " + << Relocations.size() << " relocations\n"; + + if (MCI) { + MCI->setAddress(FnStart); + MCI->setSize(FnEnd-FnStart); + } + + Relocations.clear(); + ConstPoolAddresses.clear(); + + // Mark code region readable and executable if it's not so already. + MemMgr->setMemoryExecutable(); + +#ifndef NDEBUG + { + if (sys::hasDisassembler()) { + DOUT << "JIT: Disassembled code:\n"; + DOUT << sys::disassembleBuffer(FnStart, FnEnd-FnStart, (uintptr_t)FnStart); + } else { + DOUT << "JIT: Binary code:\n"; + DOUT << std::hex; + uint8_t* q = FnStart; + for (int i = 0; q < FnEnd; q += 4, ++i) { + if (i == 4) + i = 0; + if (i == 0) + DOUT << "JIT: " << std::setw(8) << std::setfill('0') + << (long)(q - FnStart) << ": "; + bool Done = false; + for (int j = 3; j >= 0; --j) { + if (q + j >= FnEnd) + Done = true; + else + DOUT << std::setw(2) << std::setfill('0') << (unsigned short)q[j]; + } + if (Done) + break; + DOUT << ' '; + if (i == 3) + DOUT << '\n'; + } + DOUT << std::dec; + DOUT<< '\n'; + } + } +#endif + if (ExceptionHandling) { + uintptr_t ActualSize = 0; + SavedBufferBegin = BufferBegin; + SavedBufferEnd = BufferEnd; + SavedCurBufferPtr = CurBufferPtr; + + if (MemMgr->NeedsExactSize()) { + ActualSize = DE->GetDwarfTableSizeInBytes(F, *this, FnStart, FnEnd); + } + + BufferBegin = CurBufferPtr = MemMgr->startExceptionTable(F.getFunction(), + ActualSize); + BufferEnd = BufferBegin+ActualSize; + uint8_t* FrameRegister = DE->EmitDwarfTable(F, *this, FnStart, FnEnd); + MemMgr->endExceptionTable(F.getFunction(), BufferBegin, CurBufferPtr, + FrameRegister); + BufferBegin = SavedBufferBegin; + BufferEnd = SavedBufferEnd; + CurBufferPtr = SavedCurBufferPtr; + + TheJIT->RegisterTable(FrameRegister); + } + + if (MMI) + MMI->EndFunction(); + + return false; +} + +/// deallocateMemForFunction - Deallocate all memory for the specified +/// function body. Also drop any references the function has to stubs. +void JITEmitter::deallocateMemForFunction(Function *F) { + MemMgr->deallocateMemForFunction(F); + + // If the function did not reference any stubs, return. + if (CurFnStubUses.find(F) == CurFnStubUses.end()) + return; + + // For each referenced stub, erase the reference to this function, and then + // erase the list of referenced stubs. + SmallVectorImpl<void *> &StubList = CurFnStubUses[F]; + for (unsigned i = 0, e = StubList.size(); i != e; ++i) { + void *Stub = StubList[i]; + + // If we already invalidated this stub for this function, continue. + if (StubFnRefs.count(Stub) == 0) + continue; + + SmallPtrSet<const Function *, 1> &FnRefs = StubFnRefs[Stub]; + FnRefs.erase(F); + + // If this function was the last reference to the stub, invalidate the stub + // in the JITResolver. Were there a memory manager deallocateStub routine, + // we could call that at this point too. + if (FnRefs.empty()) { + DOUT << "\nJIT: Invalidated Stub at [" << Stub << "]\n"; + StubFnRefs.erase(Stub); + + // Invalidate the stub. If it is a GV stub, update the JIT's global + // mapping for that GV to zero, otherwise, search the string map of + // external function names to stubs and remove the entry for this stub. + GlobalValue *GV = Resolver.invalidateStub(Stub); + if (GV) { + TheJIT->updateGlobalMapping(GV, 0); + } else { + for (StringMapIterator<void*> i = ExtFnStubs.begin(), + e = ExtFnStubs.end(); i != e; ++i) { + if (i->second == Stub) { + ExtFnStubs.erase(i); + break; + } + } + } + } + } + CurFnStubUses.erase(F); +} + + +void* JITEmitter::allocateSpace(uintptr_t Size, unsigned Alignment) { + if (BufferBegin) + return JITCodeEmitter::allocateSpace(Size, Alignment); + + // create a new memory block if there is no active one. + // care must be taken so that BufferBegin is invalidated when a + // block is trimmed + BufferBegin = CurBufferPtr = MemMgr->allocateSpace(Size, Alignment); + BufferEnd = BufferBegin+Size; + return CurBufferPtr; +} + +void JITEmitter::emitConstantPool(MachineConstantPool *MCP) { + if (TheJIT->getJITInfo().hasCustomConstantPool()) + return; + + const std::vector<MachineConstantPoolEntry> &Constants = MCP->getConstants(); + if (Constants.empty()) return; + + unsigned Size = GetConstantPoolSizeInBytes(MCP, TheJIT->getTargetData()); + unsigned Align = MCP->getConstantPoolAlignment(); + ConstantPoolBase = allocateSpace(Size, Align); + ConstantPool = MCP; + + if (ConstantPoolBase == 0) return; // Buffer overflow. + + DOUT << "JIT: Emitted constant pool at [" << ConstantPoolBase + << "] (size: " << Size << ", alignment: " << Align << ")\n"; + + // Initialize the memory for all of the constant pool entries. + unsigned Offset = 0; + for (unsigned i = 0, e = Constants.size(); i != e; ++i) { + MachineConstantPoolEntry CPE = Constants[i]; + unsigned AlignMask = CPE.getAlignment() - 1; + Offset = (Offset + AlignMask) & ~AlignMask; + + uintptr_t CAddr = (uintptr_t)ConstantPoolBase + Offset; + ConstPoolAddresses.push_back(CAddr); + if (CPE.isMachineConstantPoolEntry()) { + // FIXME: add support to lower machine constant pool values into bytes! + cerr << "Initialize memory with machine specific constant pool entry" + << " has not been implemented!\n"; + abort(); + } + TheJIT->InitializeMemory(CPE.Val.ConstVal, (void*)CAddr); + DOUT << "JIT: CP" << i << " at [0x" + << std::hex << CAddr << std::dec << "]\n"; + + const Type *Ty = CPE.Val.ConstVal->getType(); + Offset += TheJIT->getTargetData()->getTypeAllocSize(Ty); + } +} + +void JITEmitter::initJumpTableInfo(MachineJumpTableInfo *MJTI) { + if (TheJIT->getJITInfo().hasCustomJumpTables()) + return; + + const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables(); + if (JT.empty()) return; + + unsigned NumEntries = 0; + for (unsigned i = 0, e = JT.size(); i != e; ++i) + NumEntries += JT[i].MBBs.size(); + + unsigned EntrySize = MJTI->getEntrySize(); + + // Just allocate space for all the jump tables now. We will fix up the actual + // MBB entries in the tables after we emit the code for each block, since then + // we will know the final locations of the MBBs in memory. + JumpTable = MJTI; + JumpTableBase = allocateSpace(NumEntries * EntrySize, MJTI->getAlignment()); +} + +void JITEmitter::emitJumpTableInfo(MachineJumpTableInfo *MJTI) { + if (TheJIT->getJITInfo().hasCustomJumpTables()) + return; + + const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables(); + if (JT.empty() || JumpTableBase == 0) return; + + if (TargetMachine::getRelocationModel() == Reloc::PIC_) { + assert(MJTI->getEntrySize() == 4 && "Cross JIT'ing?"); + // For each jump table, place the offset from the beginning of the table + // to the target address. + int *SlotPtr = (int*)JumpTableBase; + + for (unsigned i = 0, e = JT.size(); i != e; ++i) { + const std::vector<MachineBasicBlock*> &MBBs = JT[i].MBBs; + // Store the offset of the basic block for this jump table slot in the + // memory we allocated for the jump table in 'initJumpTableInfo' + uintptr_t Base = (uintptr_t)SlotPtr; + for (unsigned mi = 0, me = MBBs.size(); mi != me; ++mi) { + uintptr_t MBBAddr = getMachineBasicBlockAddress(MBBs[mi]); + *SlotPtr++ = TheJIT->getJITInfo().getPICJumpTableEntry(MBBAddr, Base); + } + } + } else { + assert(MJTI->getEntrySize() == sizeof(void*) && "Cross JIT'ing?"); + + // For each jump table, map each target in the jump table to the address of + // an emitted MachineBasicBlock. + intptr_t *SlotPtr = (intptr_t*)JumpTableBase; + + for (unsigned i = 0, e = JT.size(); i != e; ++i) { + const std::vector<MachineBasicBlock*> &MBBs = JT[i].MBBs; + // Store the address of the basic block for this jump table slot in the + // memory we allocated for the jump table in 'initJumpTableInfo' + for (unsigned mi = 0, me = MBBs.size(); mi != me; ++mi) + *SlotPtr++ = getMachineBasicBlockAddress(MBBs[mi]); + } + } +} + +void JITEmitter::startGVStub(const GlobalValue* GV, unsigned StubSize, + unsigned Alignment) { + SavedBufferBegin = BufferBegin; + SavedBufferEnd = BufferEnd; + SavedCurBufferPtr = CurBufferPtr; + + BufferBegin = CurBufferPtr = MemMgr->allocateStub(GV, StubSize, Alignment); + BufferEnd = BufferBegin+StubSize+1; +} + +void JITEmitter::startGVStub(const GlobalValue* GV, void *Buffer, + unsigned StubSize) { + SavedBufferBegin = BufferBegin; + SavedBufferEnd = BufferEnd; + SavedCurBufferPtr = CurBufferPtr; + + BufferBegin = CurBufferPtr = (uint8_t *)Buffer; + BufferEnd = BufferBegin+StubSize+1; +} + +void *JITEmitter::finishGVStub(const GlobalValue* GV) { + NumBytes += getCurrentPCOffset(); + std::swap(SavedBufferBegin, BufferBegin); + BufferEnd = SavedBufferEnd; + CurBufferPtr = SavedCurBufferPtr; + return SavedBufferBegin; +} + +// getConstantPoolEntryAddress - Return the address of the 'ConstantNum' entry +// in the constant pool that was last emitted with the 'emitConstantPool' +// method. +// +uintptr_t JITEmitter::getConstantPoolEntryAddress(unsigned ConstantNum) const { + assert(ConstantNum < ConstantPool->getConstants().size() && + "Invalid ConstantPoolIndex!"); + return ConstPoolAddresses[ConstantNum]; +} + +// getJumpTableEntryAddress - Return the address of the JumpTable with index +// 'Index' in the jumpp table that was last initialized with 'initJumpTableInfo' +// +uintptr_t JITEmitter::getJumpTableEntryAddress(unsigned Index) const { + const std::vector<MachineJumpTableEntry> &JT = JumpTable->getJumpTables(); + assert(Index < JT.size() && "Invalid jump table index!"); + + unsigned Offset = 0; + unsigned EntrySize = JumpTable->getEntrySize(); + + for (unsigned i = 0; i < Index; ++i) + Offset += JT[i].MBBs.size(); + + Offset *= EntrySize; + + return (uintptr_t)((char *)JumpTableBase + Offset); +} + +//===----------------------------------------------------------------------===// +// Public interface to this file +//===----------------------------------------------------------------------===// + +JITCodeEmitter *JIT::createEmitter(JIT &jit, JITMemoryManager *JMM) { + return new JITEmitter(jit, JMM); +} + +// getPointerToNamedFunction - This function is used as a global wrapper to +// JIT::getPointerToNamedFunction for the purpose of resolving symbols when +// bugpoint is debugging the JIT. In that scenario, we are loading an .so and +// need to resolve function(s) that are being mis-codegenerated, so we need to +// resolve their addresses at runtime, and this is the way to do it. +extern "C" { + void *getPointerToNamedFunction(const char *Name) { + if (Function *F = TheJIT->FindFunctionNamed(Name)) + return TheJIT->getPointerToFunction(F); + return TheJIT->getPointerToNamedFunction(Name); + } +} + +// getPointerToFunctionOrStub - If the specified function has been +// code-gen'd, return a pointer to the function. If not, compile it, or use +// a stub to implement lazy compilation if available. +// +void *JIT::getPointerToFunctionOrStub(Function *F) { + // If we have already code generated the function, just return the address. + if (void *Addr = getPointerToGlobalIfAvailable(F)) + return Addr; + + // Get a stub if the target supports it. + assert(isa<JITEmitter>(JCE) && "Unexpected MCE?"); + JITEmitter *JE = cast<JITEmitter>(getCodeEmitter()); + return JE->getJITResolver().getFunctionStub(F); +} + +void JIT::registerMachineCodeInfo(MachineCodeInfo *mc) { + assert(isa<JITEmitter>(JCE) && "Unexpected MCE?"); + JITEmitter *JE = cast<JITEmitter>(getCodeEmitter()); + + JE->setMachineCodeInfo(mc); +} + +void JIT::updateFunctionStub(Function *F) { + // Get the empty stub we generated earlier. + assert(isa<JITEmitter>(JCE) && "Unexpected MCE?"); + JITEmitter *JE = cast<JITEmitter>(getCodeEmitter()); + void *Stub = JE->getJITResolver().getFunctionStub(F); + + // Tell the target jit info to rewrite the stub at the specified address, + // rather than creating a new one. + void *Addr = getPointerToGlobalIfAvailable(F); + getJITInfo().emitFunctionStubAtAddr(F, Addr, Stub, *getCodeEmitter()); +} + +/// updateDlsymStubTable - Emit the data necessary to relocate the stubs +/// that were emitted during code generation. +/// +void JIT::updateDlsymStubTable() { + assert(isa<JITEmitter>(JCE) && "Unexpected MCE?"); + JITEmitter *JE = cast<JITEmitter>(getCodeEmitter()); + + SmallVector<GlobalValue*, 8> GVs; + SmallVector<void*, 8> Ptrs; + const StringMap<void *> &ExtFns = JE->getExternalFnStubs(); + + JE->getJITResolver().getRelocatableGVs(GVs, Ptrs); + + unsigned nStubs = GVs.size() + ExtFns.size(); + + // If there are no relocatable stubs, return. + if (nStubs == 0) + return; + + // If there are no new relocatable stubs, return. + void *CurTable = JE->getMemMgr()->getDlsymTable(); + if (CurTable && (*(unsigned *)CurTable == nStubs)) + return; + + // Calculate the size of the stub info + unsigned offset = 4 + 4 * nStubs + sizeof(intptr_t) * nStubs; + + SmallVector<unsigned, 8> Offsets; + for (unsigned i = 0; i != GVs.size(); ++i) { + Offsets.push_back(offset); + offset += GVs[i]->getName().length() + 1; + } + for (StringMapConstIterator<void*> i = ExtFns.begin(), e = ExtFns.end(); + i != e; ++i) { + Offsets.push_back(offset); + offset += strlen(i->first()) + 1; + } + + // Allocate space for the new "stub", which contains the dlsym table. + JE->startGVStub(0, offset, 4); + + // Emit the number of records + JE->emitInt32(nStubs); + + // Emit the string offsets + for (unsigned i = 0; i != nStubs; ++i) + JE->emitInt32(Offsets[i]); + + // Emit the pointers. Verify that they are at least 2-byte aligned, and set + // the low bit to 0 == GV, 1 == Function, so that the client code doing the + // relocation can write the relocated pointer at the appropriate place in + // the stub. + for (unsigned i = 0; i != GVs.size(); ++i) { + intptr_t Ptr = (intptr_t)Ptrs[i]; + assert((Ptr & 1) == 0 && "Stub pointers must be at least 2-byte aligned!"); + + if (isa<Function>(GVs[i])) + Ptr |= (intptr_t)1; + + if (sizeof(Ptr) == 8) + JE->emitInt64(Ptr); + else + JE->emitInt32(Ptr); + } + for (StringMapConstIterator<void*> i = ExtFns.begin(), e = ExtFns.end(); + i != e; ++i) { + intptr_t Ptr = (intptr_t)i->second | 1; + + if (sizeof(Ptr) == 8) + JE->emitInt64(Ptr); + else + JE->emitInt32(Ptr); + } + + // Emit the strings. + for (unsigned i = 0; i != GVs.size(); ++i) + JE->emitString(GVs[i]->getName()); + for (StringMapConstIterator<void*> i = ExtFns.begin(), e = ExtFns.end(); + i != e; ++i) + JE->emitString(i->first()); + + // Tell the JIT memory manager where it is. The JIT Memory Manager will + // deallocate space for the old one, if one existed. + JE->getMemMgr()->SetDlsymTable(JE->finishGVStub(0)); +} + +/// freeMachineCodeForFunction - release machine code memory for given Function. +/// +void JIT::freeMachineCodeForFunction(Function *F) { + + // Delete translation for this from the ExecutionEngine, so it will get + // retranslated next time it is used. + void *OldPtr = updateGlobalMapping(F, 0); + + if (OldPtr) + RemoveFunctionFromSymbolTable(OldPtr); + + // Free the actual memory for the function body and related stuff. + assert(isa<JITEmitter>(JCE) && "Unexpected MCE?"); + cast<JITEmitter>(JCE)->deallocateMemForFunction(F); +} + diff --git a/lib/ExecutionEngine/JIT/JITMemoryManager.cpp b/lib/ExecutionEngine/JIT/JITMemoryManager.cpp new file mode 100644 index 0000000000000..70ccdccb8049c --- /dev/null +++ b/lib/ExecutionEngine/JIT/JITMemoryManager.cpp @@ -0,0 +1,541 @@ +//===-- JITMemoryManager.cpp - Memory Allocator for JIT'd code ------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines the DefaultJITMemoryManager class. +// +//===----------------------------------------------------------------------===// + +#include "llvm/GlobalValue.h" +#include "llvm/ExecutionEngine/JITMemoryManager.h" +#include "llvm/Support/Compiler.h" +#include "llvm/System/Memory.h" +#include <map> +#include <vector> +#include <cassert> +#include <climits> +#include <cstdio> +#include <cstdlib> +#include <cstring> +using namespace llvm; + + +JITMemoryManager::~JITMemoryManager() {} + +//===----------------------------------------------------------------------===// +// Memory Block Implementation. +//===----------------------------------------------------------------------===// + +namespace { + /// MemoryRangeHeader - For a range of memory, this is the header that we put + /// on the block of memory. It is carefully crafted to be one word of memory. + /// Allocated blocks have just this header, free'd blocks have FreeRangeHeader + /// which starts with this. + struct FreeRangeHeader; + struct MemoryRangeHeader { + /// ThisAllocated - This is true if this block is currently allocated. If + /// not, this can be converted to a FreeRangeHeader. + unsigned ThisAllocated : 1; + + /// PrevAllocated - Keep track of whether the block immediately before us is + /// allocated. If not, the word immediately before this header is the size + /// of the previous block. + unsigned PrevAllocated : 1; + + /// BlockSize - This is the size in bytes of this memory block, + /// including this header. + uintptr_t BlockSize : (sizeof(intptr_t)*CHAR_BIT - 2); + + + /// getBlockAfter - Return the memory block immediately after this one. + /// + MemoryRangeHeader &getBlockAfter() const { + return *(MemoryRangeHeader*)((char*)this+BlockSize); + } + + /// getFreeBlockBefore - If the block before this one is free, return it, + /// otherwise return null. + FreeRangeHeader *getFreeBlockBefore() const { + if (PrevAllocated) return 0; + intptr_t PrevSize = ((intptr_t *)this)[-1]; + return (FreeRangeHeader*)((char*)this-PrevSize); + } + + /// FreeBlock - Turn an allocated block into a free block, adjusting + /// bits in the object headers, and adding an end of region memory block. + FreeRangeHeader *FreeBlock(FreeRangeHeader *FreeList); + + /// TrimAllocationToSize - If this allocated block is significantly larger + /// than NewSize, split it into two pieces (where the former is NewSize + /// bytes, including the header), and add the new block to the free list. + FreeRangeHeader *TrimAllocationToSize(FreeRangeHeader *FreeList, + uint64_t NewSize); + }; + + /// FreeRangeHeader - For a memory block that isn't already allocated, this + /// keeps track of the current block and has a pointer to the next free block. + /// Free blocks are kept on a circularly linked list. + struct FreeRangeHeader : public MemoryRangeHeader { + FreeRangeHeader *Prev; + FreeRangeHeader *Next; + + /// getMinBlockSize - Get the minimum size for a memory block. Blocks + /// smaller than this size cannot be created. + static unsigned getMinBlockSize() { + return sizeof(FreeRangeHeader)+sizeof(intptr_t); + } + + /// SetEndOfBlockSizeMarker - The word at the end of every free block is + /// known to be the size of the free block. Set it for this block. + void SetEndOfBlockSizeMarker() { + void *EndOfBlock = (char*)this + BlockSize; + ((intptr_t *)EndOfBlock)[-1] = BlockSize; + } + + FreeRangeHeader *RemoveFromFreeList() { + assert(Next->Prev == this && Prev->Next == this && "Freelist broken!"); + Next->Prev = Prev; + return Prev->Next = Next; + } + + void AddToFreeList(FreeRangeHeader *FreeList) { + Next = FreeList; + Prev = FreeList->Prev; + Prev->Next = this; + Next->Prev = this; + } + + /// GrowBlock - The block after this block just got deallocated. Merge it + /// into the current block. + void GrowBlock(uintptr_t NewSize); + + /// AllocateBlock - Mark this entire block allocated, updating freelists + /// etc. This returns a pointer to the circular free-list. + FreeRangeHeader *AllocateBlock(); + }; +} + + +/// AllocateBlock - Mark this entire block allocated, updating freelists +/// etc. This returns a pointer to the circular free-list. +FreeRangeHeader *FreeRangeHeader::AllocateBlock() { + assert(!ThisAllocated && !getBlockAfter().PrevAllocated && + "Cannot allocate an allocated block!"); + // Mark this block allocated. + ThisAllocated = 1; + getBlockAfter().PrevAllocated = 1; + + // Remove it from the free list. + return RemoveFromFreeList(); +} + +/// FreeBlock - Turn an allocated block into a free block, adjusting +/// bits in the object headers, and adding an end of region memory block. +/// If possible, coalesce this block with neighboring blocks. Return the +/// FreeRangeHeader to allocate from. +FreeRangeHeader *MemoryRangeHeader::FreeBlock(FreeRangeHeader *FreeList) { + MemoryRangeHeader *FollowingBlock = &getBlockAfter(); + assert(ThisAllocated && "This block is already allocated!"); + assert(FollowingBlock->PrevAllocated && "Flags out of sync!"); + + FreeRangeHeader *FreeListToReturn = FreeList; + + // If the block after this one is free, merge it into this block. + if (!FollowingBlock->ThisAllocated) { + FreeRangeHeader &FollowingFreeBlock = *(FreeRangeHeader *)FollowingBlock; + // "FreeList" always needs to be a valid free block. If we're about to + // coalesce with it, update our notion of what the free list is. + if (&FollowingFreeBlock == FreeList) { + FreeList = FollowingFreeBlock.Next; + FreeListToReturn = 0; + assert(&FollowingFreeBlock != FreeList && "No tombstone block?"); + } + FollowingFreeBlock.RemoveFromFreeList(); + + // Include the following block into this one. + BlockSize += FollowingFreeBlock.BlockSize; + FollowingBlock = &FollowingFreeBlock.getBlockAfter(); + + // Tell the block after the block we are coalescing that this block is + // allocated. + FollowingBlock->PrevAllocated = 1; + } + + assert(FollowingBlock->ThisAllocated && "Missed coalescing?"); + + if (FreeRangeHeader *PrevFreeBlock = getFreeBlockBefore()) { + PrevFreeBlock->GrowBlock(PrevFreeBlock->BlockSize + BlockSize); + return FreeListToReturn ? FreeListToReturn : PrevFreeBlock; + } + + // Otherwise, mark this block free. + FreeRangeHeader &FreeBlock = *(FreeRangeHeader*)this; + FollowingBlock->PrevAllocated = 0; + FreeBlock.ThisAllocated = 0; + + // Link this into the linked list of free blocks. + FreeBlock.AddToFreeList(FreeList); + + // Add a marker at the end of the block, indicating the size of this free + // block. + FreeBlock.SetEndOfBlockSizeMarker(); + return FreeListToReturn ? FreeListToReturn : &FreeBlock; +} + +/// GrowBlock - The block after this block just got deallocated. Merge it +/// into the current block. +void FreeRangeHeader::GrowBlock(uintptr_t NewSize) { + assert(NewSize > BlockSize && "Not growing block?"); + BlockSize = NewSize; + SetEndOfBlockSizeMarker(); + getBlockAfter().PrevAllocated = 0; +} + +/// TrimAllocationToSize - If this allocated block is significantly larger +/// than NewSize, split it into two pieces (where the former is NewSize +/// bytes, including the header), and add the new block to the free list. +FreeRangeHeader *MemoryRangeHeader:: +TrimAllocationToSize(FreeRangeHeader *FreeList, uint64_t NewSize) { + assert(ThisAllocated && getBlockAfter().PrevAllocated && + "Cannot deallocate part of an allocated block!"); + + // Don't allow blocks to be trimmed below minimum required size + NewSize = std::max<uint64_t>(FreeRangeHeader::getMinBlockSize(), NewSize); + + // Round up size for alignment of header. + unsigned HeaderAlign = __alignof(FreeRangeHeader); + NewSize = (NewSize+ (HeaderAlign-1)) & ~(HeaderAlign-1); + + // Size is now the size of the block we will remove from the start of the + // current block. + assert(NewSize <= BlockSize && + "Allocating more space from this block than exists!"); + + // If splitting this block will cause the remainder to be too small, do not + // split the block. + if (BlockSize <= NewSize+FreeRangeHeader::getMinBlockSize()) + return FreeList; + + // Otherwise, we splice the required number of bytes out of this block, form + // a new block immediately after it, then mark this block allocated. + MemoryRangeHeader &FormerNextBlock = getBlockAfter(); + + // Change the size of this block. + BlockSize = NewSize; + + // Get the new block we just sliced out and turn it into a free block. + FreeRangeHeader &NewNextBlock = (FreeRangeHeader &)getBlockAfter(); + NewNextBlock.BlockSize = (char*)&FormerNextBlock - (char*)&NewNextBlock; + NewNextBlock.ThisAllocated = 0; + NewNextBlock.PrevAllocated = 1; + NewNextBlock.SetEndOfBlockSizeMarker(); + FormerNextBlock.PrevAllocated = 0; + NewNextBlock.AddToFreeList(FreeList); + return &NewNextBlock; +} + +//===----------------------------------------------------------------------===// +// Memory Block Implementation. +//===----------------------------------------------------------------------===// + +namespace { + /// DefaultJITMemoryManager - Manage memory for the JIT code generation. + /// This splits a large block of MAP_NORESERVE'd memory into two + /// sections, one for function stubs, one for the functions themselves. We + /// have to do this because we may need to emit a function stub while in the + /// middle of emitting a function, and we don't know how large the function we + /// are emitting is. + class VISIBILITY_HIDDEN DefaultJITMemoryManager : public JITMemoryManager { + std::vector<sys::MemoryBlock> Blocks; // Memory blocks allocated by the JIT + FreeRangeHeader *FreeMemoryList; // Circular list of free blocks. + + // When emitting code into a memory block, this is the block. + MemoryRangeHeader *CurBlock; + + uint8_t *CurStubPtr, *StubBase; + uint8_t *GOTBase; // Target Specific reserved memory + void *DlsymTable; // Stub external symbol information + + // Centralize memory block allocation. + sys::MemoryBlock getNewMemoryBlock(unsigned size); + + std::map<const Function*, MemoryRangeHeader*> FunctionBlocks; + std::map<const Function*, MemoryRangeHeader*> TableBlocks; + public: + DefaultJITMemoryManager(); + ~DefaultJITMemoryManager(); + + void AllocateGOT(); + void SetDlsymTable(void *); + + uint8_t *allocateStub(const GlobalValue* F, unsigned StubSize, + unsigned Alignment); + + /// startFunctionBody - When a function starts, allocate a block of free + /// executable memory, returning a pointer to it and its actual size. + uint8_t *startFunctionBody(const Function *F, uintptr_t &ActualSize) { + + FreeRangeHeader* candidateBlock = FreeMemoryList; + FreeRangeHeader* head = FreeMemoryList; + FreeRangeHeader* iter = head->Next; + + uintptr_t largest = candidateBlock->BlockSize; + + // Search for the largest free block + while (iter != head) { + if (iter->BlockSize > largest) { + largest = iter->BlockSize; + candidateBlock = iter; + } + iter = iter->Next; + } + + // Select this candidate block for allocation + CurBlock = candidateBlock; + + // Allocate the entire memory block. + FreeMemoryList = candidateBlock->AllocateBlock(); + ActualSize = CurBlock->BlockSize-sizeof(MemoryRangeHeader); + return (uint8_t *)(CurBlock+1); + } + + /// endFunctionBody - The function F is now allocated, and takes the memory + /// in the range [FunctionStart,FunctionEnd). + void endFunctionBody(const Function *F, uint8_t *FunctionStart, + uint8_t *FunctionEnd) { + assert(FunctionEnd > FunctionStart); + assert(FunctionStart == (uint8_t *)(CurBlock+1) && + "Mismatched function start/end!"); + + uintptr_t BlockSize = FunctionEnd - (uint8_t *)CurBlock; + FunctionBlocks[F] = CurBlock; + + // Release the memory at the end of this block that isn't needed. + FreeMemoryList =CurBlock->TrimAllocationToSize(FreeMemoryList, BlockSize); + } + + /// allocateSpace - Allocate a memory block of the given size. + uint8_t *allocateSpace(intptr_t Size, unsigned Alignment) { + CurBlock = FreeMemoryList; + FreeMemoryList = FreeMemoryList->AllocateBlock(); + + uint8_t *result = (uint8_t *)CurBlock+1; + + if (Alignment == 0) Alignment = 1; + result = (uint8_t*)(((intptr_t)result+Alignment-1) & + ~(intptr_t)(Alignment-1)); + + uintptr_t BlockSize = result + Size - (uint8_t *)CurBlock; + FreeMemoryList =CurBlock->TrimAllocationToSize(FreeMemoryList, BlockSize); + + return result; + } + + /// startExceptionTable - Use startFunctionBody to allocate memory for the + /// function's exception table. + uint8_t* startExceptionTable(const Function* F, uintptr_t &ActualSize) { + return startFunctionBody(F, ActualSize); + } + + /// endExceptionTable - The exception table of F is now allocated, + /// and takes the memory in the range [TableStart,TableEnd). + void endExceptionTable(const Function *F, uint8_t *TableStart, + uint8_t *TableEnd, uint8_t* FrameRegister) { + assert(TableEnd > TableStart); + assert(TableStart == (uint8_t *)(CurBlock+1) && + "Mismatched table start/end!"); + + uintptr_t BlockSize = TableEnd - (uint8_t *)CurBlock; + TableBlocks[F] = CurBlock; + + // Release the memory at the end of this block that isn't needed. + FreeMemoryList =CurBlock->TrimAllocationToSize(FreeMemoryList, BlockSize); + } + + uint8_t *getGOTBase() const { + return GOTBase; + } + + void *getDlsymTable() const { + return DlsymTable; + } + + /// deallocateMemForFunction - Deallocate all memory for the specified + /// function body. + void deallocateMemForFunction(const Function *F) { + std::map<const Function*, MemoryRangeHeader*>::iterator + I = FunctionBlocks.find(F); + if (I == FunctionBlocks.end()) return; + + // Find the block that is allocated for this function. + MemoryRangeHeader *MemRange = I->second; + assert(MemRange->ThisAllocated && "Block isn't allocated!"); + + // Fill the buffer with garbage! +#ifndef NDEBUG + memset(MemRange+1, 0xCD, MemRange->BlockSize-sizeof(*MemRange)); +#endif + + // Free the memory. + FreeMemoryList = MemRange->FreeBlock(FreeMemoryList); + + // Finally, remove this entry from FunctionBlocks. + FunctionBlocks.erase(I); + + I = TableBlocks.find(F); + if (I == TableBlocks.end()) return; + + // Find the block that is allocated for this function. + MemRange = I->second; + assert(MemRange->ThisAllocated && "Block isn't allocated!"); + + // Fill the buffer with garbage! +#ifndef NDEBUG + memset(MemRange+1, 0xCD, MemRange->BlockSize-sizeof(*MemRange)); +#endif + + // Free the memory. + FreeMemoryList = MemRange->FreeBlock(FreeMemoryList); + + // Finally, remove this entry from TableBlocks. + TableBlocks.erase(I); + } + + /// setMemoryWritable - When code generation is in progress, + /// the code pages may need permissions changed. + void setMemoryWritable(void) + { + for (unsigned i = 0, e = Blocks.size(); i != e; ++i) + sys::Memory::setWritable(Blocks[i]); + } + /// setMemoryExecutable - When code generation is done and we're ready to + /// start execution, the code pages may need permissions changed. + void setMemoryExecutable(void) + { + for (unsigned i = 0, e = Blocks.size(); i != e; ++i) + sys::Memory::setExecutable(Blocks[i]); + } + }; +} + +DefaultJITMemoryManager::DefaultJITMemoryManager() { + // Allocate a 16M block of memory for functions. +#if defined(__APPLE__) && defined(__arm__) + sys::MemoryBlock MemBlock = getNewMemoryBlock(4 << 20); +#else + sys::MemoryBlock MemBlock = getNewMemoryBlock(16 << 20); +#endif + + uint8_t *MemBase = static_cast<uint8_t*>(MemBlock.base()); + + // Allocate stubs backwards from the base, allocate functions forward + // from the base. + StubBase = MemBase; + CurStubPtr = MemBase + 512*1024; // Use 512k for stubs, working backwards. + + // We set up the memory chunk with 4 mem regions, like this: + // [ START + // [ Free #0 ] -> Large space to allocate functions from. + // [ Allocated #1 ] -> Tiny space to separate regions. + // [ Free #2 ] -> Tiny space so there is always at least 1 free block. + // [ Allocated #3 ] -> Tiny space to prevent looking past end of block. + // END ] + // + // The last three blocks are never deallocated or touched. + + // Add MemoryRangeHeader to the end of the memory region, indicating that + // the space after the block of memory is allocated. This is block #3. + MemoryRangeHeader *Mem3 = (MemoryRangeHeader*)(MemBase+MemBlock.size())-1; + Mem3->ThisAllocated = 1; + Mem3->PrevAllocated = 0; + Mem3->BlockSize = 0; + + /// Add a tiny free region so that the free list always has one entry. + FreeRangeHeader *Mem2 = + (FreeRangeHeader *)(((char*)Mem3)-FreeRangeHeader::getMinBlockSize()); + Mem2->ThisAllocated = 0; + Mem2->PrevAllocated = 1; + Mem2->BlockSize = FreeRangeHeader::getMinBlockSize(); + Mem2->SetEndOfBlockSizeMarker(); + Mem2->Prev = Mem2; // Mem2 *is* the free list for now. + Mem2->Next = Mem2; + + /// Add a tiny allocated region so that Mem2 is never coalesced away. + MemoryRangeHeader *Mem1 = (MemoryRangeHeader*)Mem2-1; + Mem1->ThisAllocated = 1; + Mem1->PrevAllocated = 0; + Mem1->BlockSize = (char*)Mem2 - (char*)Mem1; + + // Add a FreeRangeHeader to the start of the function body region, indicating + // that the space is free. Mark the previous block allocated so we never look + // at it. + FreeRangeHeader *Mem0 = (FreeRangeHeader*)CurStubPtr; + Mem0->ThisAllocated = 0; + Mem0->PrevAllocated = 1; + Mem0->BlockSize = (char*)Mem1-(char*)Mem0; + Mem0->SetEndOfBlockSizeMarker(); + Mem0->AddToFreeList(Mem2); + + // Start out with the freelist pointing to Mem0. + FreeMemoryList = Mem0; + + GOTBase = NULL; + DlsymTable = NULL; +} + +void DefaultJITMemoryManager::AllocateGOT() { + assert(GOTBase == 0 && "Cannot allocate the got multiple times"); + GOTBase = new uint8_t[sizeof(void*) * 8192]; + HasGOT = true; +} + +void DefaultJITMemoryManager::SetDlsymTable(void *ptr) { + DlsymTable = ptr; +} + +DefaultJITMemoryManager::~DefaultJITMemoryManager() { + for (unsigned i = 0, e = Blocks.size(); i != e; ++i) + sys::Memory::ReleaseRWX(Blocks[i]); + + delete[] GOTBase; + Blocks.clear(); +} + +uint8_t *DefaultJITMemoryManager::allocateStub(const GlobalValue* F, + unsigned StubSize, + unsigned Alignment) { + CurStubPtr -= StubSize; + CurStubPtr = (uint8_t*)(((intptr_t)CurStubPtr) & + ~(intptr_t)(Alignment-1)); + if (CurStubPtr < StubBase) { + // FIXME: allocate a new block + fprintf(stderr, "JIT ran out of memory for function stubs!\n"); + abort(); + } + return CurStubPtr; +} + +sys::MemoryBlock DefaultJITMemoryManager::getNewMemoryBlock(unsigned size) { + // Allocate a new block close to the last one. + const sys::MemoryBlock *BOld = Blocks.empty() ? 0 : &Blocks.front(); + std::string ErrMsg; + sys::MemoryBlock B = sys::Memory::AllocateRWX(size, BOld, &ErrMsg); + if (B.base() == 0) { + fprintf(stderr, + "Allocation failed when allocating new memory in the JIT\n%s\n", + ErrMsg.c_str()); + abort(); + } + Blocks.push_back(B); + return B; +} + + +JITMemoryManager *JITMemoryManager::CreateDefaultMemManager() { + return new DefaultJITMemoryManager(); +} diff --git a/lib/ExecutionEngine/JIT/Makefile b/lib/ExecutionEngine/JIT/Makefile new file mode 100644 index 0000000000000..e2c9c61e88faa --- /dev/null +++ b/lib/ExecutionEngine/JIT/Makefile @@ -0,0 +1,37 @@ +##===- lib/ExecutionEngine/JIT/Makefile --------------------*- Makefile -*-===## +# +# The LLVM Compiler Infrastructure +# +# This file is distributed under the University of Illinois Open Source +# License. See LICENSE.TXT for details. +# +##===----------------------------------------------------------------------===## +LEVEL = ../../.. +LIBRARYNAME = LLVMJIT + +# Get the $(ARCH) setting +include $(LEVEL)/Makefile.config + +# Enable the X86 JIT if compiling on X86 +ifeq ($(ARCH), x86) + ENABLE_X86_JIT = 1 +endif + +# This flag can also be used on the command line to force inclusion +# of the X86 JIT on non-X86 hosts +ifdef ENABLE_X86_JIT + CPPFLAGS += -DENABLE_X86_JIT +endif + +# Enable the Sparc JIT if compiling on Sparc +ifeq ($(ARCH), Sparc) + ENABLE_SPARC_JIT = 1 +endif + +# This flag can also be used on the command line to force inclusion +# of the Sparc JIT on non-Sparc hosts +ifdef ENABLE_SPARC_JIT + CPPFLAGS += -DENABLE_SPARC_JIT +endif + +include $(LEVEL)/Makefile.common diff --git a/lib/ExecutionEngine/JIT/TargetSelect.cpp b/lib/ExecutionEngine/JIT/TargetSelect.cpp new file mode 100644 index 0000000000000..0f208193075b8 --- /dev/null +++ b/lib/ExecutionEngine/JIT/TargetSelect.cpp @@ -0,0 +1,83 @@ +//===-- TargetSelect.cpp - Target Chooser Code ----------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This just asks the TargetMachineRegistry for the appropriate JIT to use, and +// allows the user to specify a specific one on the commandline with -march=x. +// +//===----------------------------------------------------------------------===// + +#include "JIT.h" +#include "llvm/Module.h" +#include "llvm/ModuleProvider.h" +#include "llvm/Support/RegistryParser.h" +#include "llvm/Support/Streams.h" +#include "llvm/Target/SubtargetFeature.h" +#include "llvm/Target/TargetMachine.h" +#include "llvm/Target/TargetMachineRegistry.h" +using namespace llvm; + +static cl::opt<const TargetMachineRegistry::entry*, false, + RegistryParser<TargetMachine> > +MArch("march", cl::desc("Architecture to generate assembly for:")); + +static cl::opt<std::string> +MCPU("mcpu", + cl::desc("Target a specific cpu type (-mcpu=help for details)"), + cl::value_desc("cpu-name"), + cl::init("")); + +static cl::list<std::string> +MAttrs("mattr", + cl::CommaSeparated, + cl::desc("Target specific attributes (-mattr=help for details)"), + cl::value_desc("a1,+a2,-a3,...")); + +/// createInternal - Create an return a new JIT compiler if there is one +/// available for the current target. Otherwise, return null. +/// +ExecutionEngine *JIT::createJIT(ModuleProvider *MP, std::string *ErrorStr, + JITMemoryManager *JMM, + CodeGenOpt::Level OptLevel) { + const TargetMachineRegistry::entry *TheArch = MArch; + if (TheArch == 0) { + std::string Error; + TheArch = TargetMachineRegistry::getClosestTargetForJIT(Error); + if (TheArch == 0) { + if (ErrorStr) + *ErrorStr = Error; + return 0; + } + } else if (TheArch->JITMatchQualityFn() == 0) { + cerr << "WARNING: This target JIT is not designed for the host you are" + << " running. If bad things happen, please choose a different " + << "-march switch.\n"; + } + + // Package up features to be passed to target/subtarget + std::string FeaturesStr; + if (!MCPU.empty() || !MAttrs.empty()) { + SubtargetFeatures Features; + Features.setCPU(MCPU); + for (unsigned i = 0; i != MAttrs.size(); ++i) + Features.AddFeature(MAttrs[i]); + FeaturesStr = Features.getString(); + } + + // Allocate a target... + TargetMachine *Target = TheArch->CtorFn(*MP->getModule(), FeaturesStr); + assert(Target && "Could not allocate target machine!"); + + // If the target supports JIT code generation, return a new JIT now. + if (TargetJITInfo *TJ = Target->getJITInfo()) + return new JIT(MP, *Target, *TJ, JMM, OptLevel); + + if (ErrorStr) + *ErrorStr = "target does not support JIT code generation"; + return 0; +} diff --git a/lib/ExecutionEngine/Makefile b/lib/ExecutionEngine/Makefile new file mode 100644 index 0000000000000..e0e050e89728b --- /dev/null +++ b/lib/ExecutionEngine/Makefile @@ -0,0 +1,13 @@ +##===- lib/ExecutionEngine/Makefile ------------------------*- Makefile -*-===## +# +# The LLVM Compiler Infrastructure +# +# This file is distributed under the University of Illinois Open Source +# License. See LICENSE.TXT for details. +# +##===----------------------------------------------------------------------===## +LEVEL = ../.. +LIBRARYNAME = LLVMExecutionEngine +PARALLEL_DIRS = Interpreter JIT + +include $(LEVEL)/Makefile.common |