diff options
Diffstat (limited to 'llvm/lib/ExecutionEngine/Interpreter/Execution.cpp')
-rw-r--r-- | llvm/lib/ExecutionEngine/Interpreter/Execution.cpp | 172 |
1 files changed, 84 insertions, 88 deletions
diff --git a/llvm/lib/ExecutionEngine/Interpreter/Execution.cpp b/llvm/lib/ExecutionEngine/Interpreter/Execution.cpp index 51f31d3d5d8f..62e1ea6e0f0a 100644 --- a/llvm/lib/ExecutionEngine/Interpreter/Execution.cpp +++ b/llvm/lib/ExecutionEngine/Interpreter/Execution.cpp @@ -169,13 +169,14 @@ static void executeFRemInst(GenericValue &Dest, GenericValue Src1, Dest.IntVal = APInt(1,Src1.IntVal.OP(Src2.IntVal)); \ break; -#define IMPLEMENT_VECTOR_INTEGER_ICMP(OP, TY) \ - case Type::VectorTyID: { \ - assert(Src1.AggregateVal.size() == Src2.AggregateVal.size()); \ - Dest.AggregateVal.resize( Src1.AggregateVal.size() ); \ - for( uint32_t _i=0;_i<Src1.AggregateVal.size();_i++) \ - Dest.AggregateVal[_i].IntVal = APInt(1, \ - Src1.AggregateVal[_i].IntVal.OP(Src2.AggregateVal[_i].IntVal));\ +#define IMPLEMENT_VECTOR_INTEGER_ICMP(OP, TY) \ + case Type::FixedVectorTyID: \ + case Type::ScalableVectorTyID: { \ + assert(Src1.AggregateVal.size() == Src2.AggregateVal.size()); \ + Dest.AggregateVal.resize(Src1.AggregateVal.size()); \ + for (uint32_t _i = 0; _i < Src1.AggregateVal.size(); _i++) \ + Dest.AggregateVal[_i].IntVal = APInt( \ + 1, Src1.AggregateVal[_i].IntVal.OP(Src2.AggregateVal[_i].IntVal)); \ } break; // Handle pointers specially because they must be compared with only as much @@ -367,12 +368,13 @@ void Interpreter::visitICmpInst(ICmpInst &I) { Src1.AggregateVal[_i].TY##Val OP Src2.AggregateVal[_i].TY##Val);\ break; -#define IMPLEMENT_VECTOR_FCMP(OP) \ - case Type::VectorTyID: \ - if (cast<VectorType>(Ty)->getElementType()->isFloatTy()) { \ - IMPLEMENT_VECTOR_FCMP_T(OP, Float); \ - } else { \ - IMPLEMENT_VECTOR_FCMP_T(OP, Double); \ +#define IMPLEMENT_VECTOR_FCMP(OP) \ + case Type::FixedVectorTyID: \ + case Type::ScalableVectorTyID: \ + if (cast<VectorType>(Ty)->getElementType()->isFloatTy()) { \ + IMPLEMENT_VECTOR_FCMP_T(OP, Float); \ + } else { \ + IMPLEMENT_VECTOR_FCMP_T(OP, Double); \ } static GenericValue executeFCMP_OEQ(GenericValue Src1, GenericValue Src2, @@ -902,13 +904,13 @@ void Interpreter::popStackAndReturnValueToCaller(Type *RetTy, // If we have a previous stack frame, and we have a previous call, // fill in the return value... ExecutionContext &CallingSF = ECStack.back(); - if (Instruction *I = CallingSF.Caller.getInstruction()) { + if (CallingSF.Caller) { // Save result... - if (!CallingSF.Caller.getType()->isVoidTy()) - SetValue(I, Result, CallingSF); - if (InvokeInst *II = dyn_cast<InvokeInst> (I)) + if (!CallingSF.Caller->getType()->isVoidTy()) + SetValue(CallingSF.Caller, Result, CallingSF); + if (InvokeInst *II = dyn_cast<InvokeInst>(CallingSF.Caller)) SwitchToNewBasicBlock (II->getNormalDest (), CallingSF); - CallingSF.Caller = CallSite(); // We returned from the call... + CallingSF.Caller = nullptr; // We returned from the call... } } } @@ -1113,64 +1115,59 @@ void Interpreter::visitStoreInst(StoreInst &I) { // Miscellaneous Instruction Implementations //===----------------------------------------------------------------------===// -void Interpreter::visitCallSite(CallSite CS) { +void Interpreter::visitVAStartInst(VAStartInst &I) { ExecutionContext &SF = ECStack.back(); + GenericValue ArgIndex; + ArgIndex.UIntPairVal.first = ECStack.size() - 1; + ArgIndex.UIntPairVal.second = 0; + SetValue(&I, ArgIndex, SF); +} - // Check to see if this is an intrinsic function call... - Function *F = CS.getCalledFunction(); - if (F && F->isDeclaration()) - switch (F->getIntrinsicID()) { - case Intrinsic::not_intrinsic: - break; - case Intrinsic::vastart: { // va_start - GenericValue ArgIndex; - ArgIndex.UIntPairVal.first = ECStack.size() - 1; - ArgIndex.UIntPairVal.second = 0; - SetValue(CS.getInstruction(), ArgIndex, SF); - return; - } - case Intrinsic::vaend: // va_end is a noop for the interpreter - return; - case Intrinsic::vacopy: // va_copy: dest = src - SetValue(CS.getInstruction(), getOperandValue(*CS.arg_begin(), SF), SF); - return; - default: - // If it is an unknown intrinsic function, use the intrinsic lowering - // class to transform it into hopefully tasty LLVM code. - // - BasicBlock::iterator me(CS.getInstruction()); - BasicBlock *Parent = CS.getInstruction()->getParent(); - bool atBegin(Parent->begin() == me); - if (!atBegin) - --me; - IL->LowerIntrinsicCall(cast<CallInst>(CS.getInstruction())); - - // Restore the CurInst pointer to the first instruction newly inserted, if - // any. - if (atBegin) { - SF.CurInst = Parent->begin(); - } else { - SF.CurInst = me; - ++SF.CurInst; - } - return; - } +void Interpreter::visitVAEndInst(VAEndInst &I) { + // va_end is a noop for the interpreter +} + +void Interpreter::visitVACopyInst(VACopyInst &I) { + ExecutionContext &SF = ECStack.back(); + SetValue(&I, getOperandValue(*I.arg_begin(), SF), SF); +} + +void Interpreter::visitIntrinsicInst(IntrinsicInst &I) { + ExecutionContext &SF = ECStack.back(); + + // If it is an unknown intrinsic function, use the intrinsic lowering + // class to transform it into hopefully tasty LLVM code. + // + BasicBlock::iterator Me(&I); + BasicBlock *Parent = I.getParent(); + bool atBegin(Parent->begin() == Me); + if (!atBegin) + --Me; + IL->LowerIntrinsicCall(&I); + + // Restore the CurInst pointer to the first instruction newly inserted, if + // any. + if (atBegin) { + SF.CurInst = Parent->begin(); + } else { + SF.CurInst = Me; + ++SF.CurInst; + } +} +void Interpreter::visitCallBase(CallBase &I) { + ExecutionContext &SF = ECStack.back(); - SF.Caller = CS; + SF.Caller = &I; std::vector<GenericValue> ArgVals; - const unsigned NumArgs = SF.Caller.arg_size(); + const unsigned NumArgs = SF.Caller->arg_size(); ArgVals.reserve(NumArgs); - uint16_t pNum = 1; - for (CallSite::arg_iterator i = SF.Caller.arg_begin(), - e = SF.Caller.arg_end(); i != e; ++i, ++pNum) { - Value *V = *i; + for (Value *V : SF.Caller->args()) ArgVals.push_back(getOperandValue(V, SF)); - } // To handle indirect calls, we must get the pointer value from the argument // and treat it as a function pointer. - GenericValue SRC = getOperandValue(SF.Caller.getCalledValue(), SF); + GenericValue SRC = getOperandValue(SF.Caller->getCalledOperand(), SF); callFunction((Function*)GVTOP(SRC), ArgVals); } @@ -1332,7 +1329,7 @@ GenericValue Interpreter::executeFPTruncInst(Value *SrcVal, Type *DstTy, ExecutionContext &SF) { GenericValue Dest, Src = getOperandValue(SrcVal, SF); - if (SrcVal->getType()->getTypeID() == Type::VectorTyID) { + if (isa<VectorType>(SrcVal->getType())) { assert(SrcVal->getType()->getScalarType()->isDoubleTy() && DstTy->getScalarType()->isFloatTy() && "Invalid FPTrunc instruction"); @@ -1355,7 +1352,7 @@ GenericValue Interpreter::executeFPExtInst(Value *SrcVal, Type *DstTy, ExecutionContext &SF) { GenericValue Dest, Src = getOperandValue(SrcVal, SF); - if (SrcVal->getType()->getTypeID() == Type::VectorTyID) { + if (isa<VectorType>(SrcVal->getType())) { assert(SrcVal->getType()->getScalarType()->isFloatTy() && DstTy->getScalarType()->isDoubleTy() && "Invalid FPExt instruction"); @@ -1378,7 +1375,7 @@ GenericValue Interpreter::executeFPToUIInst(Value *SrcVal, Type *DstTy, Type *SrcTy = SrcVal->getType(); GenericValue Dest, Src = getOperandValue(SrcVal, SF); - if (SrcTy->getTypeID() == Type::VectorTyID) { + if (isa<VectorType>(SrcTy)) { Type *DstVecTy = DstTy->getScalarType(); Type *SrcVecTy = SrcTy->getScalarType(); uint32_t DBitWidth = cast<IntegerType>(DstVecTy)->getBitWidth(); @@ -1416,7 +1413,7 @@ GenericValue Interpreter::executeFPToSIInst(Value *SrcVal, Type *DstTy, Type *SrcTy = SrcVal->getType(); GenericValue Dest, Src = getOperandValue(SrcVal, SF); - if (SrcTy->getTypeID() == Type::VectorTyID) { + if (isa<VectorType>(SrcTy)) { Type *DstVecTy = DstTy->getScalarType(); Type *SrcVecTy = SrcTy->getScalarType(); uint32_t DBitWidth = cast<IntegerType>(DstVecTy)->getBitWidth(); @@ -1452,7 +1449,7 @@ GenericValue Interpreter::executeUIToFPInst(Value *SrcVal, Type *DstTy, ExecutionContext &SF) { GenericValue Dest, Src = getOperandValue(SrcVal, SF); - if (SrcVal->getType()->getTypeID() == Type::VectorTyID) { + if (isa<VectorType>(SrcVal->getType())) { Type *DstVecTy = DstTy->getScalarType(); unsigned size = Src.AggregateVal.size(); // the sizes of src and dst vectors must be equal @@ -1484,7 +1481,7 @@ GenericValue Interpreter::executeSIToFPInst(Value *SrcVal, Type *DstTy, ExecutionContext &SF) { GenericValue Dest, Src = getOperandValue(SrcVal, SF); - if (SrcVal->getType()->getTypeID() == Type::VectorTyID) { + if (isa<VectorType>(SrcVal->getType())) { Type *DstVecTy = DstTy->getScalarType(); unsigned size = Src.AggregateVal.size(); // the sizes of src and dst vectors must be equal @@ -1545,8 +1542,7 @@ GenericValue Interpreter::executeBitCastInst(Value *SrcVal, Type *DstTy, Type *SrcTy = SrcVal->getType(); GenericValue Dest, Src = getOperandValue(SrcVal, SF); - if ((SrcTy->getTypeID() == Type::VectorTyID) || - (DstTy->getTypeID() == Type::VectorTyID)) { + if (isa<VectorType>(SrcTy) || isa<VectorType>(DstTy)) { // vector src bitcast to vector dst or vector src bitcast to scalar dst or // scalar src bitcast to vector dst bool isLittleEndian = getDataLayout().isLittleEndian(); @@ -1558,7 +1554,7 @@ GenericValue Interpreter::executeBitCastInst(Value *SrcVal, Type *DstTy, unsigned SrcNum; unsigned DstNum; - if (SrcTy->getTypeID() == Type::VectorTyID) { + if (isa<VectorType>(SrcTy)) { SrcElemTy = SrcTy->getScalarType(); SrcBitSize = SrcTy->getScalarSizeInBits(); SrcNum = Src.AggregateVal.size(); @@ -1571,7 +1567,7 @@ GenericValue Interpreter::executeBitCastInst(Value *SrcVal, Type *DstTy, SrcVec.AggregateVal.push_back(Src); } - if (DstTy->getTypeID() == Type::VectorTyID) { + if (isa<VectorType>(DstTy)) { DstElemTy = DstTy->getScalarType(); DstBitSize = DstTy->getScalarSizeInBits(); DstNum = (SrcNum * SrcBitSize) / DstBitSize; @@ -1644,7 +1640,7 @@ GenericValue Interpreter::executeBitCastInst(Value *SrcVal, Type *DstTy, } // convert result from integer to specified type - if (DstTy->getTypeID() == Type::VectorTyID) { + if (isa<VectorType>(DstTy)) { if (DstElemTy->isDoubleTy()) { Dest.AggregateVal.resize(DstNum); for (unsigned i = 0; i < DstNum; i++) @@ -1667,8 +1663,7 @@ GenericValue Interpreter::executeBitCastInst(Value *SrcVal, Type *DstTy, Dest.IntVal = TempDst.AggregateVal[0].IntVal; } } - } else { // if ((SrcTy->getTypeID() == Type::VectorTyID) || - // (DstTy->getTypeID() == Type::VectorTyID)) + } else { // if (isa<VectorType>(SrcTy)) || isa<VectorType>(DstTy)) // scalar src bitcast to scalar dst if (DstTy->isPointerTy()) { @@ -1868,7 +1863,6 @@ void Interpreter::visitShuffleVectorInst(ShuffleVectorInst &I){ GenericValue Src1 = getOperandValue(I.getOperand(0), SF); GenericValue Src2 = getOperandValue(I.getOperand(1), SF); - GenericValue Src3 = getOperandValue(I.getOperand(2), SF); GenericValue Dest; // There is no need to check types of src1 and src2, because the compiled @@ -1878,7 +1872,7 @@ void Interpreter::visitShuffleVectorInst(ShuffleVectorInst &I){ Type *TyContained = Ty->getElementType(); unsigned src1Size = (unsigned)Src1.AggregateVal.size(); unsigned src2Size = (unsigned)Src2.AggregateVal.size(); - unsigned src3Size = (unsigned)Src3.AggregateVal.size(); + unsigned src3Size = I.getShuffleMask().size(); Dest.AggregateVal.resize(src3Size); @@ -1888,7 +1882,7 @@ void Interpreter::visitShuffleVectorInst(ShuffleVectorInst &I){ break; case Type::IntegerTyID: for( unsigned i=0; i<src3Size; i++) { - unsigned j = Src3.AggregateVal[i].IntVal.getZExtValue(); + unsigned j = std::max(0, I.getMaskValue(i)); if(j < src1Size) Dest.AggregateVal[i].IntVal = Src1.AggregateVal[j].IntVal; else if(j < src1Size + src2Size) @@ -1904,7 +1898,7 @@ void Interpreter::visitShuffleVectorInst(ShuffleVectorInst &I){ break; case Type::FloatTyID: for( unsigned i=0; i<src3Size; i++) { - unsigned j = Src3.AggregateVal[i].IntVal.getZExtValue(); + unsigned j = std::max(0, I.getMaskValue(i)); if(j < src1Size) Dest.AggregateVal[i].FloatVal = Src1.AggregateVal[j].FloatVal; else if(j < src1Size + src2Size) @@ -1915,7 +1909,7 @@ void Interpreter::visitShuffleVectorInst(ShuffleVectorInst &I){ break; case Type::DoubleTyID: for( unsigned i=0; i<src3Size; i++) { - unsigned j = Src3.AggregateVal[i].IntVal.getZExtValue(); + unsigned j = std::max(0, I.getMaskValue(i)); if(j < src1Size) Dest.AggregateVal[i].DoubleVal = Src1.AggregateVal[j].DoubleVal; else if(j < src1Size + src2Size) @@ -1960,7 +1954,8 @@ void Interpreter::visitExtractValueInst(ExtractValueInst &I) { break; case Type::ArrayTyID: case Type::StructTyID: - case Type::VectorTyID: + case Type::FixedVectorTyID: + case Type::ScalableVectorTyID: Dest.AggregateVal = pSrc->AggregateVal; break; case Type::PointerTyID: @@ -2007,7 +2002,8 @@ void Interpreter::visitInsertValueInst(InsertValueInst &I) { break; case Type::ArrayTyID: case Type::StructTyID: - case Type::VectorTyID: + case Type::FixedVectorTyID: + case Type::ScalableVectorTyID: pDest->AggregateVal = Src2.AggregateVal; break; case Type::PointerTyID: @@ -2121,8 +2117,8 @@ GenericValue Interpreter::getOperandValue(Value *V, ExecutionContext &SF) { // callFunction - Execute the specified function... // void Interpreter::callFunction(Function *F, ArrayRef<GenericValue> ArgVals) { - assert((ECStack.empty() || !ECStack.back().Caller.getInstruction() || - ECStack.back().Caller.arg_size() == ArgVals.size()) && + assert((ECStack.empty() || !ECStack.back().Caller || + ECStack.back().Caller->arg_size() == ArgVals.size()) && "Incorrect number of arguments passed into function call!"); // Make a new stack frame... and fill it in. ECStack.emplace_back(); |