diff options
Diffstat (limited to 'llvm/lib/CodeGen/ScalarizeMaskedMemIntrin.cpp')
-rw-r--r-- | llvm/lib/CodeGen/ScalarizeMaskedMemIntrin.cpp | 106 |
1 files changed, 59 insertions, 47 deletions
diff --git a/llvm/lib/CodeGen/ScalarizeMaskedMemIntrin.cpp b/llvm/lib/CodeGen/ScalarizeMaskedMemIntrin.cpp index ee72de67d8752..c93b296174384 100644 --- a/llvm/lib/CodeGen/ScalarizeMaskedMemIntrin.cpp +++ b/llvm/lib/CodeGen/ScalarizeMaskedMemIntrin.cpp @@ -43,6 +43,7 @@ namespace { class ScalarizeMaskedMemIntrin : public FunctionPass { const TargetTransformInfo *TTI = nullptr; + const DataLayout *DL = nullptr; public: static char ID; // Pass identification, replacement for typeid @@ -82,7 +83,7 @@ static bool isConstantIntVector(Value *Mask) { if (!C) return false; - unsigned NumElts = Mask->getType()->getVectorNumElements(); + unsigned NumElts = cast<FixedVectorType>(Mask->getType())->getNumElements(); for (unsigned i = 0; i != NumElts; ++i) { Constant *CElt = C->getAggregateElement(i); if (!CElt || !isa<ConstantInt>(CElt)) @@ -130,8 +131,8 @@ static void scalarizeMaskedLoad(CallInst *CI, bool &ModifiedDT) { Value *Mask = CI->getArgOperand(2); Value *Src0 = CI->getArgOperand(3); - unsigned AlignVal = cast<ConstantInt>(Alignment)->getZExtValue(); - VectorType *VecType = cast<VectorType>(CI->getType()); + const Align AlignVal = cast<ConstantInt>(Alignment)->getAlignValue(); + VectorType *VecType = cast<FixedVectorType>(CI->getType()); Type *EltTy = VecType->getElementType(); @@ -151,12 +152,13 @@ static void scalarizeMaskedLoad(CallInst *CI, bool &ModifiedDT) { } // Adjust alignment for the scalar instruction. - AlignVal = MinAlign(AlignVal, EltTy->getPrimitiveSizeInBits() / 8); + const Align AdjustedAlignVal = + commonAlignment(AlignVal, EltTy->getPrimitiveSizeInBits() / 8); // Bitcast %addr from i8* to EltTy* Type *NewPtrType = EltTy->getPointerTo(Ptr->getType()->getPointerAddressSpace()); Value *FirstEltPtr = Builder.CreateBitCast(Ptr, NewPtrType); - unsigned VectorWidth = VecType->getNumElements(); + unsigned VectorWidth = cast<FixedVectorType>(VecType)->getNumElements(); // The result vector Value *VResult = Src0; @@ -166,7 +168,7 @@ static void scalarizeMaskedLoad(CallInst *CI, bool &ModifiedDT) { if (cast<Constant>(Mask)->getAggregateElement(Idx)->isNullValue()) continue; Value *Gep = Builder.CreateConstInBoundsGEP1_32(EltTy, FirstEltPtr, Idx); - LoadInst *Load = Builder.CreateAlignedLoad(EltTy, Gep, AlignVal); + LoadInst *Load = Builder.CreateAlignedLoad(EltTy, Gep, AdjustedAlignVal); VResult = Builder.CreateInsertElement(VResult, Load, Idx); } CI->replaceAllUsesWith(VResult); @@ -210,7 +212,7 @@ static void scalarizeMaskedLoad(CallInst *CI, bool &ModifiedDT) { Builder.SetInsertPoint(InsertPt); Value *Gep = Builder.CreateConstInBoundsGEP1_32(EltTy, FirstEltPtr, Idx); - LoadInst *Load = Builder.CreateAlignedLoad(EltTy, Gep, AlignVal); + LoadInst *Load = Builder.CreateAlignedLoad(EltTy, Gep, AdjustedAlignVal); Value *NewVResult = Builder.CreateInsertElement(VResult, Load, Idx); // Create "else" block, fill it in the next iteration @@ -268,8 +270,8 @@ static void scalarizeMaskedStore(CallInst *CI, bool &ModifiedDT) { Value *Alignment = CI->getArgOperand(2); Value *Mask = CI->getArgOperand(3); - unsigned AlignVal = cast<ConstantInt>(Alignment)->getZExtValue(); - VectorType *VecType = cast<VectorType>(Src->getType()); + const Align AlignVal = cast<ConstantInt>(Alignment)->getAlignValue(); + auto *VecType = cast<VectorType>(Src->getType()); Type *EltTy = VecType->getElementType(); @@ -287,12 +289,13 @@ static void scalarizeMaskedStore(CallInst *CI, bool &ModifiedDT) { } // Adjust alignment for the scalar instruction. - AlignVal = MinAlign(AlignVal, EltTy->getPrimitiveSizeInBits() / 8); + const Align AdjustedAlignVal = + commonAlignment(AlignVal, EltTy->getPrimitiveSizeInBits() / 8); // Bitcast %addr from i8* to EltTy* Type *NewPtrType = EltTy->getPointerTo(Ptr->getType()->getPointerAddressSpace()); Value *FirstEltPtr = Builder.CreateBitCast(Ptr, NewPtrType); - unsigned VectorWidth = VecType->getNumElements(); + unsigned VectorWidth = cast<FixedVectorType>(VecType)->getNumElements(); if (isConstantIntVector(Mask)) { for (unsigned Idx = 0; Idx < VectorWidth; ++Idx) { @@ -300,7 +303,7 @@ static void scalarizeMaskedStore(CallInst *CI, bool &ModifiedDT) { continue; Value *OneElt = Builder.CreateExtractElement(Src, Idx); Value *Gep = Builder.CreateConstInBoundsGEP1_32(EltTy, FirstEltPtr, Idx); - Builder.CreateAlignedStore(OneElt, Gep, AlignVal); + Builder.CreateAlignedStore(OneElt, Gep, AdjustedAlignVal); } CI->eraseFromParent(); return; @@ -342,7 +345,7 @@ static void scalarizeMaskedStore(CallInst *CI, bool &ModifiedDT) { Value *OneElt = Builder.CreateExtractElement(Src, Idx); Value *Gep = Builder.CreateConstInBoundsGEP1_32(EltTy, FirstEltPtr, Idx); - Builder.CreateAlignedStore(OneElt, Gep, AlignVal); + Builder.CreateAlignedStore(OneElt, Gep, AdjustedAlignVal); // Create "else" block, fill it in the next iteration BasicBlock *NewIfBlock = @@ -393,14 +396,14 @@ static void scalarizeMaskedGather(CallInst *CI, bool &ModifiedDT) { Value *Mask = CI->getArgOperand(2); Value *Src0 = CI->getArgOperand(3); - VectorType *VecType = cast<VectorType>(CI->getType()); + auto *VecType = cast<FixedVectorType>(CI->getType()); Type *EltTy = VecType->getElementType(); IRBuilder<> Builder(CI->getContext()); Instruction *InsertPt = CI; BasicBlock *IfBlock = CI->getParent(); Builder.SetInsertPoint(InsertPt); - unsigned AlignVal = cast<ConstantInt>(Alignment)->getZExtValue(); + MaybeAlign AlignVal = cast<ConstantInt>(Alignment)->getMaybeAlignValue(); Builder.SetCurrentDebugLocation(CI->getDebugLoc()); @@ -517,11 +520,12 @@ static void scalarizeMaskedScatter(CallInst *CI, bool &ModifiedDT) { Value *Alignment = CI->getArgOperand(2); Value *Mask = CI->getArgOperand(3); - assert(isa<VectorType>(Src->getType()) && - "Unexpected data type in masked scatter intrinsic"); - assert(isa<VectorType>(Ptrs->getType()) && - isa<PointerType>(Ptrs->getType()->getVectorElementType()) && - "Vector of pointers is expected in masked scatter intrinsic"); + auto *SrcFVTy = cast<FixedVectorType>(Src->getType()); + + assert( + isa<VectorType>(Ptrs->getType()) && + isa<PointerType>(cast<VectorType>(Ptrs->getType())->getElementType()) && + "Vector of pointers is expected in masked scatter intrinsic"); IRBuilder<> Builder(CI->getContext()); Instruction *InsertPt = CI; @@ -529,8 +533,8 @@ static void scalarizeMaskedScatter(CallInst *CI, bool &ModifiedDT) { Builder.SetInsertPoint(InsertPt); Builder.SetCurrentDebugLocation(CI->getDebugLoc()); - unsigned AlignVal = cast<ConstantInt>(Alignment)->getZExtValue(); - unsigned VectorWidth = Src->getType()->getVectorNumElements(); + MaybeAlign AlignVal = cast<ConstantInt>(Alignment)->getMaybeAlignValue(); + unsigned VectorWidth = SrcFVTy->getNumElements(); // Shorten the way if the mask is a vector of constants. if (isConstantIntVector(Mask)) { @@ -601,7 +605,7 @@ static void scalarizeMaskedExpandLoad(CallInst *CI, bool &ModifiedDT) { Value *Mask = CI->getArgOperand(1); Value *PassThru = CI->getArgOperand(2); - VectorType *VecType = cast<VectorType>(CI->getType()); + auto *VecType = cast<FixedVectorType>(CI->getType()); Type *EltTy = VecType->getElementType(); @@ -624,8 +628,8 @@ static void scalarizeMaskedExpandLoad(CallInst *CI, bool &ModifiedDT) { if (cast<Constant>(Mask)->getAggregateElement(Idx)->isNullValue()) continue; Value *NewPtr = Builder.CreateConstInBoundsGEP1_32(EltTy, Ptr, MemIndex); - LoadInst *Load = - Builder.CreateAlignedLoad(EltTy, NewPtr, 1, "Load" + Twine(Idx)); + LoadInst *Load = Builder.CreateAlignedLoad(EltTy, NewPtr, Align(1), + "Load" + Twine(Idx)); VResult = Builder.CreateInsertElement(VResult, Load, Idx, "Res" + Twine(Idx)); ++MemIndex; @@ -670,7 +674,7 @@ static void scalarizeMaskedExpandLoad(CallInst *CI, bool &ModifiedDT) { "cond.load"); Builder.SetInsertPoint(InsertPt); - LoadInst *Load = Builder.CreateAlignedLoad(EltTy, Ptr, 1); + LoadInst *Load = Builder.CreateAlignedLoad(EltTy, Ptr, Align(1)); Value *NewVResult = Builder.CreateInsertElement(VResult, Load, Idx); // Move the pointer if there are more blocks to come. @@ -714,7 +718,7 @@ static void scalarizeMaskedCompressStore(CallInst *CI, bool &ModifiedDT) { Value *Ptr = CI->getArgOperand(1); Value *Mask = CI->getArgOperand(2); - VectorType *VecType = cast<VectorType>(Src->getType()); + auto *VecType = cast<FixedVectorType>(Src->getType()); IRBuilder<> Builder(CI->getContext()); Instruction *InsertPt = CI; @@ -723,7 +727,7 @@ static void scalarizeMaskedCompressStore(CallInst *CI, bool &ModifiedDT) { Builder.SetInsertPoint(InsertPt); Builder.SetCurrentDebugLocation(CI->getDebugLoc()); - Type *EltTy = VecType->getVectorElementType(); + Type *EltTy = VecType->getElementType(); unsigned VectorWidth = VecType->getNumElements(); @@ -736,7 +740,7 @@ static void scalarizeMaskedCompressStore(CallInst *CI, bool &ModifiedDT) { Value *OneElt = Builder.CreateExtractElement(Src, Idx, "Elt" + Twine(Idx)); Value *NewPtr = Builder.CreateConstInBoundsGEP1_32(EltTy, Ptr, MemIndex); - Builder.CreateAlignedStore(OneElt, NewPtr, 1); + Builder.CreateAlignedStore(OneElt, NewPtr, Align(1)); ++MemIndex; } CI->eraseFromParent(); @@ -777,7 +781,7 @@ static void scalarizeMaskedCompressStore(CallInst *CI, bool &ModifiedDT) { Builder.SetInsertPoint(InsertPt); Value *OneElt = Builder.CreateExtractElement(Src, Idx); - Builder.CreateAlignedStore(OneElt, Ptr, 1); + Builder.CreateAlignedStore(OneElt, Ptr, Align(1)); // Move the pointer if there are more blocks to come. Value *NewPtr; @@ -811,6 +815,7 @@ bool ScalarizeMaskedMemIntrin::runOnFunction(Function &F) { bool EverMadeChange = false; TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); + DL = &F.getParent()->getDataLayout(); bool MadeChange = true; while (MadeChange) { @@ -849,39 +854,46 @@ bool ScalarizeMaskedMemIntrin::optimizeCallInst(CallInst *CI, bool &ModifiedDT) { IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI); if (II) { - unsigned Alignment; switch (II->getIntrinsicID()) { default: break; - case Intrinsic::masked_load: { + case Intrinsic::masked_load: // Scalarize unsupported vector masked load - Alignment = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue(); - if (TTI->isLegalMaskedLoad(CI->getType(), MaybeAlign(Alignment))) + if (TTI->isLegalMaskedLoad( + CI->getType(), + cast<ConstantInt>(CI->getArgOperand(1))->getAlignValue())) return false; scalarizeMaskedLoad(CI, ModifiedDT); return true; - } - case Intrinsic::masked_store: { - Alignment = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue(); - if (TTI->isLegalMaskedStore(CI->getArgOperand(0)->getType(), - MaybeAlign(Alignment))) + case Intrinsic::masked_store: + if (TTI->isLegalMaskedStore( + CI->getArgOperand(0)->getType(), + cast<ConstantInt>(CI->getArgOperand(2))->getAlignValue())) return false; scalarizeMaskedStore(CI, ModifiedDT); return true; - } - case Intrinsic::masked_gather: - Alignment = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue(); - if (TTI->isLegalMaskedGather(CI->getType(), MaybeAlign(Alignment))) + case Intrinsic::masked_gather: { + unsigned AlignmentInt = + cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue(); + Type *LoadTy = CI->getType(); + Align Alignment = + DL->getValueOrABITypeAlignment(MaybeAlign(AlignmentInt), LoadTy); + if (TTI->isLegalMaskedGather(LoadTy, Alignment)) return false; scalarizeMaskedGather(CI, ModifiedDT); return true; - case Intrinsic::masked_scatter: - Alignment = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue(); - if (TTI->isLegalMaskedScatter(CI->getArgOperand(0)->getType(), - MaybeAlign(Alignment))) + } + case Intrinsic::masked_scatter: { + unsigned AlignmentInt = + cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue(); + Type *StoreTy = CI->getArgOperand(0)->getType(); + Align Alignment = + DL->getValueOrABITypeAlignment(MaybeAlign(AlignmentInt), StoreTy); + if (TTI->isLegalMaskedScatter(StoreTy, Alignment)) return false; scalarizeMaskedScatter(CI, ModifiedDT); return true; + } case Intrinsic::masked_expandload: if (TTI->isLegalMaskedExpandLoad(CI->getType())) return false; |