diff options
Diffstat (limited to 'lib/Analysis')
29 files changed, 761 insertions, 541 deletions
| diff --git a/lib/Analysis/AliasAnalysis.cpp b/lib/Analysis/AliasAnalysis.cpp index c456990d8ae2..0234965a0065 100644 --- a/lib/Analysis/AliasAnalysis.cpp +++ b/lib/Analysis/AliasAnalysis.cpp @@ -239,7 +239,7 @@ bool llvm::isNoAliasCall(const Value *V) {  ///    NoAlias returns  ///  bool llvm::isIdentifiedObject(const Value *V) { -  if (isa<AllocationInst>(V) || isNoAliasCall(V)) +  if (isa<AllocaInst>(V) || isNoAliasCall(V))      return true;    if (isa<GlobalValue>(V) && !isa<GlobalAlias>(V))      return true; diff --git a/lib/Analysis/AliasAnalysisCounter.cpp b/lib/Analysis/AliasAnalysisCounter.cpp index 272c871ce239..030bcd26f072 100644 --- a/lib/Analysis/AliasAnalysisCounter.cpp +++ b/lib/Analysis/AliasAnalysisCounter.cpp @@ -17,7 +17,6 @@  #include "llvm/Analysis/AliasAnalysis.h"  #include "llvm/Assembly/Writer.h"  #include "llvm/Support/CommandLine.h" -#include "llvm/Support/Compiler.h"  #include "llvm/Support/ErrorHandling.h"  #include "llvm/Support/raw_ostream.h"  using namespace llvm; @@ -28,8 +27,7 @@ static cl::opt<bool>  PrintAllFailures("count-aa-print-all-failed-queries", cl::ReallyHidden);  namespace { -  class VISIBILITY_HIDDEN AliasAnalysisCounter  -      : public ModulePass, public AliasAnalysis { +  class AliasAnalysisCounter : public ModulePass, public AliasAnalysis {      unsigned No, May, Must;      unsigned NoMR, JustRef, JustMod, MR;      const char *Name; diff --git a/lib/Analysis/AliasAnalysisEvaluator.cpp b/lib/Analysis/AliasAnalysisEvaluator.cpp index bb95c01e2ea9..6a2564cbe385 100644 --- a/lib/Analysis/AliasAnalysisEvaluator.cpp +++ b/lib/Analysis/AliasAnalysisEvaluator.cpp @@ -28,7 +28,6 @@  #include "llvm/Target/TargetData.h"  #include "llvm/Support/InstIterator.h"  #include "llvm/Support/CommandLine.h" -#include "llvm/Support/Compiler.h"  #include "llvm/Support/raw_ostream.h"  #include "llvm/ADT/SetVector.h"  using namespace llvm; @@ -45,7 +44,7 @@ static cl::opt<bool> PrintRef("print-ref", cl::ReallyHidden);  static cl::opt<bool> PrintModRef("print-modref", cl::ReallyHidden);  namespace { -  class VISIBILITY_HIDDEN AAEval : public FunctionPass { +  class AAEval : public FunctionPass {      unsigned NoAlias, MayAlias, MustAlias;      unsigned NoModRef, Mod, Ref, ModRef; diff --git a/lib/Analysis/AliasDebugger.cpp b/lib/Analysis/AliasDebugger.cpp index 1e82621e0202..cf4727f1ebee 100644 --- a/lib/Analysis/AliasDebugger.cpp +++ b/lib/Analysis/AliasDebugger.cpp @@ -23,14 +23,12 @@  #include "llvm/Constants.h"  #include "llvm/DerivedTypes.h"  #include "llvm/Analysis/AliasAnalysis.h" -#include "llvm/Support/Compiler.h"  #include <set>  using namespace llvm;  namespace { -  class VISIBILITY_HIDDEN AliasDebugger  -      : public ModulePass, public AliasAnalysis { +  class AliasDebugger : public ModulePass, public AliasAnalysis {      //What we do is simple.  Keep track of every value the AA could      //know about, and verify that queries are one of those. diff --git a/lib/Analysis/AliasSetTracker.cpp b/lib/Analysis/AliasSetTracker.cpp index b056d0091a09..c037c8d63afb 100644 --- a/lib/Analysis/AliasSetTracker.cpp +++ b/lib/Analysis/AliasSetTracker.cpp @@ -19,7 +19,6 @@  #include "llvm/Type.h"  #include "llvm/Target/TargetData.h"  #include "llvm/Assembly/Writer.h" -#include "llvm/Support/Compiler.h"  #include "llvm/Support/ErrorHandling.h"  #include "llvm/Support/InstIterator.h"  #include "llvm/Support/Format.h" @@ -297,12 +296,6 @@ bool AliasSetTracker::add(StoreInst *SI) {    return NewPtr;  } -bool AliasSetTracker::add(FreeInst *FI) { -  bool NewPtr; -  addPointer(FI->getOperand(0), ~0, AliasSet::Mods, NewPtr); -  return NewPtr; -} -  bool AliasSetTracker::add(VAArgInst *VAAI) {    bool NewPtr;    addPointer(VAAI->getOperand(0), ~0, AliasSet::ModRef, NewPtr); @@ -338,8 +331,6 @@ bool AliasSetTracker::add(Instruction *I) {      return add(CI);    else if (InvokeInst *II = dyn_cast<InvokeInst>(I))      return add(II); -  else if (FreeInst *FI = dyn_cast<FreeInst>(I)) -    return add(FI);    else if (VAArgInst *VAAI = dyn_cast<VAArgInst>(I))      return add(VAAI);    return true; @@ -428,13 +419,6 @@ bool AliasSetTracker::remove(StoreInst *SI) {    return true;  } -bool AliasSetTracker::remove(FreeInst *FI) { -  AliasSet *AS = findAliasSetForPointer(FI->getOperand(0), ~0); -  if (!AS) return false; -  remove(*AS); -  return true; -} -  bool AliasSetTracker::remove(VAArgInst *VAAI) {    AliasSet *AS = findAliasSetForPointer(VAAI->getOperand(0), ~0);    if (!AS) return false; @@ -460,8 +444,6 @@ bool AliasSetTracker::remove(Instruction *I) {      return remove(SI);    else if (CallInst *CI = dyn_cast<CallInst>(I))      return remove(CI); -  else if (FreeInst *FI = dyn_cast<FreeInst>(I)) -    return remove(FI);    else if (VAArgInst *VAAI = dyn_cast<VAArgInst>(I))      return remove(VAAI);    return true; @@ -599,7 +581,7 @@ AliasSetTracker::ASTCallbackVH::operator=(Value *V) {  //===----------------------------------------------------------------------===//  namespace { -  class VISIBILITY_HIDDEN AliasSetPrinter : public FunctionPass { +  class AliasSetPrinter : public FunctionPass {      AliasSetTracker *Tracker;    public:      static char ID; // Pass identification, replacement for typeid diff --git a/lib/Analysis/BasicAliasAnalysis.cpp b/lib/Analysis/BasicAliasAnalysis.cpp index 756ffea66b09..c81190b41846 100644 --- a/lib/Analysis/BasicAliasAnalysis.cpp +++ b/lib/Analysis/BasicAliasAnalysis.cpp @@ -15,7 +15,7 @@  #include "llvm/Analysis/AliasAnalysis.h"  #include "llvm/Analysis/CaptureTracking.h" -#include "llvm/Analysis/MallocHelper.h" +#include "llvm/Analysis/MemoryBuiltins.h"  #include "llvm/Analysis/Passes.h"  #include "llvm/Constants.h"  #include "llvm/DerivedTypes.h" @@ -30,7 +30,6 @@  #include "llvm/ADT/SmallSet.h"  #include "llvm/ADT/SmallVector.h"  #include "llvm/ADT/STLExtras.h" -#include "llvm/Support/Compiler.h"  #include "llvm/Support/ErrorHandling.h"  #include "llvm/Support/GetElementPtrTypeIterator.h"  #include <algorithm> @@ -80,7 +79,7 @@ static bool isKnownNonNull(const Value *V) {  /// object that never escapes from the function.  static bool isNonEscapingLocalObject(const Value *V) {    // If this is a local allocation, check to see if it escapes. -  if (isa<AllocationInst>(V) || isNoAliasCall(V)) +  if (isa<AllocaInst>(V) || isNoAliasCall(V))      return !PointerMayBeCaptured(V, false);    // If this is an argument that corresponds to a byval or noalias argument, @@ -104,7 +103,7 @@ static bool isObjectSmallerThan(const Value *V, unsigned Size,    const Type *AccessTy;    if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) {      AccessTy = GV->getType()->getElementType(); -  } else if (const AllocationInst *AI = dyn_cast<AllocationInst>(V)) { +  } else if (const AllocaInst *AI = dyn_cast<AllocaInst>(V)) {      if (!AI->isArrayAllocation())        AccessTy = AI->getType()->getElementType();      else @@ -139,7 +138,7 @@ namespace {    /// implementations, in that it does not chain to a previous analysis.  As    /// such it doesn't follow many of the rules that other alias analyses must.    /// -  struct VISIBILITY_HIDDEN NoAA : public ImmutablePass, public AliasAnalysis { +  struct NoAA : public ImmutablePass, public AliasAnalysis {      static char ID; // Class identification, replacement for typeinfo      NoAA() : ImmutablePass(&ID) {}      explicit NoAA(void *PID) : ImmutablePass(PID) { } @@ -194,7 +193,7 @@ namespace {    /// BasicAliasAnalysis - This is the default alias analysis implementation.    /// Because it doesn't chain to a previous alias analysis (like -no-aa), it    /// derives from the NoAA class. -  struct VISIBILITY_HIDDEN BasicAliasAnalysis : public NoAA { +  struct BasicAliasAnalysis : public NoAA {      static char ID; // Class identification, replacement for typeinfo      BasicAliasAnalysis() : NoAA(&ID) {}      AliasResult alias(const Value *V1, unsigned V1Size, @@ -218,7 +217,7 @@ namespace {    private:      // VisitedPHIs - Track PHI nodes visited by a aliasCheck() call. -    SmallPtrSet<const PHINode*, 16> VisitedPHIs; +    SmallPtrSet<const Value*, 16> VisitedPHIs;      // aliasGEP - Provide a bunch of ad-hoc rules to disambiguate a GEP instruction      // against another. @@ -230,6 +229,10 @@ namespace {      AliasResult aliasPHI(const PHINode *PN, unsigned PNSize,                           const Value *V2, unsigned V2Size); +    /// aliasSelect - Disambiguate a Select instruction against another value. +    AliasResult aliasSelect(const SelectInst *SI, unsigned SISize, +                            const Value *V2, unsigned V2Size); +      AliasResult aliasCheck(const Value *V1, unsigned V1Size,                             const Value *V2, unsigned V2Size); @@ -520,6 +523,41 @@ BasicAliasAnalysis::aliasGEP(const Value *V1, unsigned V1Size,    return MayAlias;  } +// aliasSelect - Provide a bunch of ad-hoc rules to disambiguate a Select instruction +// against another. +AliasAnalysis::AliasResult +BasicAliasAnalysis::aliasSelect(const SelectInst *SI, unsigned SISize, +                                const Value *V2, unsigned V2Size) { +  // If the values are Selects with the same condition, we can do a more precise +  // check: just check for aliases between the values on corresponding arms. +  if (const SelectInst *SI2 = dyn_cast<SelectInst>(V2)) +    if (SI->getCondition() == SI2->getCondition()) { +      AliasResult Alias = +        aliasCheck(SI->getTrueValue(), SISize, +                   SI2->getTrueValue(), V2Size); +      if (Alias == MayAlias) +        return MayAlias; +      AliasResult ThisAlias = +        aliasCheck(SI->getFalseValue(), SISize, +                   SI2->getFalseValue(), V2Size); +      if (ThisAlias != Alias) +        return MayAlias; +      return Alias; +    } + +  // If both arms of the Select node NoAlias or MustAlias V2, then returns +  // NoAlias / MustAlias. Otherwise, returns MayAlias. +  AliasResult Alias = +    aliasCheck(SI->getTrueValue(), SISize, V2, V2Size); +  if (Alias == MayAlias) +    return MayAlias; +  AliasResult ThisAlias = +    aliasCheck(SI->getFalseValue(), SISize, V2, V2Size); +  if (ThisAlias != Alias) +    return MayAlias; +  return Alias; +} +  // aliasPHI - Provide a bunch of ad-hoc rules to disambiguate a PHI instruction  // against another.  AliasAnalysis::AliasResult @@ -529,6 +567,28 @@ BasicAliasAnalysis::aliasPHI(const PHINode *PN, unsigned PNSize,    if (!VisitedPHIs.insert(PN))      return MayAlias; +  // If the values are PHIs in the same block, we can do a more precise +  // as well as efficient check: just check for aliases between the values +  // on corresponding edges. +  if (const PHINode *PN2 = dyn_cast<PHINode>(V2)) +    if (PN2->getParent() == PN->getParent()) { +      AliasResult Alias = +        aliasCheck(PN->getIncomingValue(0), PNSize, +                   PN2->getIncomingValueForBlock(PN->getIncomingBlock(0)), +                   V2Size); +      if (Alias == MayAlias) +        return MayAlias; +      for (unsigned i = 1, e = PN->getNumIncomingValues(); i != e; ++i) { +        AliasResult ThisAlias = +          aliasCheck(PN->getIncomingValue(i), PNSize, +                     PN2->getIncomingValueForBlock(PN->getIncomingBlock(i)), +                     V2Size); +        if (ThisAlias != Alias) +          return MayAlias; +      } +      return Alias; +    } +    SmallPtrSet<Value*, 4> UniqueSrc;    SmallVector<Value*, 4> V1Srcs;    for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { @@ -543,7 +603,7 @@ BasicAliasAnalysis::aliasPHI(const PHINode *PN, unsigned PNSize,        V1Srcs.push_back(PV1);    } -  AliasResult Alias = aliasCheck(V1Srcs[0], PNSize, V2, V2Size); +  AliasResult Alias = aliasCheck(V2, V2Size, V1Srcs[0], PNSize);    // Early exit if the check of the first PHI source against V2 is MayAlias.    // Other results are not possible.    if (Alias == MayAlias) @@ -553,6 +613,12 @@ BasicAliasAnalysis::aliasPHI(const PHINode *PN, unsigned PNSize,    // NoAlias / MustAlias. Otherwise, returns MayAlias.    for (unsigned i = 1, e = V1Srcs.size(); i != e; ++i) {      Value *V = V1Srcs[i]; + +    // If V2 is a PHI, the recursive case will have been caught in the +    // above aliasCheck call, so these subsequent calls to aliasCheck +    // don't need to assume that V2 is being visited recursively. +    VisitedPHIs.erase(V2); +      AliasResult ThisAlias = aliasCheck(V2, V2Size, V, PNSize);      if (ThisAlias != Alias || ThisAlias == MayAlias)        return MayAlias; @@ -587,8 +653,8 @@ BasicAliasAnalysis::aliasCheck(const Value *V1, unsigned V1Size,        return NoAlias;      // Arguments can't alias with local allocations or noalias calls. -    if ((isa<Argument>(O1) && (isa<AllocationInst>(O2) || isNoAliasCall(O2))) || -        (isa<Argument>(O2) && (isa<AllocationInst>(O1) || isNoAliasCall(O1)))) +    if ((isa<Argument>(O1) && (isa<AllocaInst>(O2) || isNoAliasCall(O2))) || +        (isa<Argument>(O2) && (isa<AllocaInst>(O1) || isNoAliasCall(O1))))        return NoAlias;      // Most objects can't alias null. @@ -629,6 +695,13 @@ BasicAliasAnalysis::aliasCheck(const Value *V1, unsigned V1Size,    if (const PHINode *PN = dyn_cast<PHINode>(V1))      return aliasPHI(PN, V1Size, V2, V2Size); +  if (isa<SelectInst>(V2) && !isa<SelectInst>(V1)) { +    std::swap(V1, V2); +    std::swap(V1Size, V2Size); +  } +  if (const SelectInst *S1 = dyn_cast<SelectInst>(V1)) +    return aliasSelect(S1, V1Size, V2, V2Size); +    return MayAlias;  } diff --git a/lib/Analysis/CFGPrinter.cpp b/lib/Analysis/CFGPrinter.cpp index 08f070c9be3e..e06704bd897c 100644 --- a/lib/Analysis/CFGPrinter.cpp +++ b/lib/Analysis/CFGPrinter.cpp @@ -20,11 +20,10 @@  #include "llvm/Analysis/CFGPrinter.h"  #include "llvm/Pass.h" -#include "llvm/Support/Compiler.h"  using namespace llvm;  namespace { -  struct VISIBILITY_HIDDEN CFGViewer : public FunctionPass { +  struct CFGViewer : public FunctionPass {      static char ID; // Pass identifcation, replacement for typeid      CFGViewer() : FunctionPass(&ID) {} @@ -46,7 +45,7 @@ static RegisterPass<CFGViewer>  V0("view-cfg", "View CFG of function", false, true);  namespace { -  struct VISIBILITY_HIDDEN CFGOnlyViewer : public FunctionPass { +  struct CFGOnlyViewer : public FunctionPass {      static char ID; // Pass identifcation, replacement for typeid      CFGOnlyViewer() : FunctionPass(&ID) {} @@ -69,7 +68,7 @@ V1("view-cfg-only",     "View CFG of function (with no function bodies)", false, true);  namespace { -  struct VISIBILITY_HIDDEN CFGPrinter : public FunctionPass { +  struct CFGPrinter : public FunctionPass {      static char ID; // Pass identification, replacement for typeid      CFGPrinter() : FunctionPass(&ID) {}      explicit CFGPrinter(void *pid) : FunctionPass(pid) {} @@ -102,7 +101,7 @@ static RegisterPass<CFGPrinter>  P1("dot-cfg", "Print CFG of function to 'dot' file", false, true);  namespace { -  struct VISIBILITY_HIDDEN CFGOnlyPrinter : public FunctionPass { +  struct CFGOnlyPrinter : public FunctionPass {      static char ID; // Pass identification, replacement for typeid      CFGOnlyPrinter() : FunctionPass(&ID) {}      explicit CFGOnlyPrinter(void *pid) : FunctionPass(pid) {} diff --git a/lib/Analysis/CMakeLists.txt b/lib/Analysis/CMakeLists.txt index d4be9863b6a0..f21fd54596d8 100644 --- a/lib/Analysis/CMakeLists.txt +++ b/lib/Analysis/CMakeLists.txt @@ -23,7 +23,7 @@ add_llvm_library(LLVMAnalysis    LoopDependenceAnalysis.cpp    LoopInfo.cpp    LoopPass.cpp -  MallocHelper.cpp +  MemoryBuiltins.cpp    MemoryDependenceAnalysis.cpp    PointerTracking.cpp    PostDominators.cpp diff --git a/lib/Analysis/CaptureTracking.cpp b/lib/Analysis/CaptureTracking.cpp index b30ac719ae0e..f615881829c6 100644 --- a/lib/Analysis/CaptureTracking.cpp +++ b/lib/Analysis/CaptureTracking.cpp @@ -73,9 +73,6 @@ bool llvm::PointerMayBeCaptured(const Value *V, bool ReturnCaptures) {        // captured.        break;      } -    case Instruction::Free: -      // Freeing a pointer does not cause it to be captured. -      break;      case Instruction::Load:        // Loading from a pointer does not cause it to be captured.        break; diff --git a/lib/Analysis/ConstantFolding.cpp b/lib/Analysis/ConstantFolding.cpp index 214caeb92a0e..33a5792796f2 100644 --- a/lib/Analysis/ConstantFolding.cpp +++ b/lib/Analysis/ConstantFolding.cpp @@ -39,6 +39,138 @@ using namespace llvm;  // Constant Folding internal helper functions  //===----------------------------------------------------------------------===// +/// FoldBitCast - Constant fold bitcast, symbolically evaluating it with  +/// TargetData.  This always returns a non-null constant, but it may be a +/// ConstantExpr if unfoldable. +static Constant *FoldBitCast(Constant *C, const Type *DestTy, +                             const TargetData &TD) { +   +  // This only handles casts to vectors currently. +  const VectorType *DestVTy = dyn_cast<VectorType>(DestTy); +  if (DestVTy == 0) +    return ConstantExpr::getBitCast(C, DestTy); +   +  // If this is a scalar -> vector cast, convert the input into a <1 x scalar> +  // vector so the code below can handle it uniformly. +  if (isa<ConstantFP>(C) || isa<ConstantInt>(C)) { +    Constant *Ops = C; // don't take the address of C! +    return FoldBitCast(ConstantVector::get(&Ops, 1), DestTy, TD); +  } +   +  // If this is a bitcast from constant vector -> vector, fold it. +  ConstantVector *CV = dyn_cast<ConstantVector>(C); +  if (CV == 0) +    return ConstantExpr::getBitCast(C, DestTy); +   +  // If the element types match, VMCore can fold it. +  unsigned NumDstElt = DestVTy->getNumElements(); +  unsigned NumSrcElt = CV->getNumOperands(); +  if (NumDstElt == NumSrcElt) +    return ConstantExpr::getBitCast(C, DestTy); +   +  const Type *SrcEltTy = CV->getType()->getElementType(); +  const Type *DstEltTy = DestVTy->getElementType(); +   +  // Otherwise, we're changing the number of elements in a vector, which  +  // requires endianness information to do the right thing.  For example, +  //    bitcast (<2 x i64> <i64 0, i64 1> to <4 x i32>) +  // folds to (little endian): +  //    <4 x i32> <i32 0, i32 0, i32 1, i32 0> +  // and to (big endian): +  //    <4 x i32> <i32 0, i32 0, i32 0, i32 1> +   +  // First thing is first.  We only want to think about integer here, so if +  // we have something in FP form, recast it as integer. +  if (DstEltTy->isFloatingPoint()) { +    // Fold to an vector of integers with same size as our FP type. +    unsigned FPWidth = DstEltTy->getPrimitiveSizeInBits(); +    const Type *DestIVTy = +      VectorType::get(IntegerType::get(C->getContext(), FPWidth), NumDstElt); +    // Recursively handle this integer conversion, if possible. +    C = FoldBitCast(C, DestIVTy, TD); +    if (!C) return ConstantExpr::getBitCast(C, DestTy); +     +    // Finally, VMCore can handle this now that #elts line up. +    return ConstantExpr::getBitCast(C, DestTy); +  } +   +  // Okay, we know the destination is integer, if the input is FP, convert +  // it to integer first. +  if (SrcEltTy->isFloatingPoint()) { +    unsigned FPWidth = SrcEltTy->getPrimitiveSizeInBits(); +    const Type *SrcIVTy = +      VectorType::get(IntegerType::get(C->getContext(), FPWidth), NumSrcElt); +    // Ask VMCore to do the conversion now that #elts line up. +    C = ConstantExpr::getBitCast(C, SrcIVTy); +    CV = dyn_cast<ConstantVector>(C); +    if (!CV)  // If VMCore wasn't able to fold it, bail out. +      return C; +  } +   +  // Now we know that the input and output vectors are both integer vectors +  // of the same size, and that their #elements is not the same.  Do the +  // conversion here, which depends on whether the input or output has +  // more elements. +  bool isLittleEndian = TD.isLittleEndian(); +   +  SmallVector<Constant*, 32> Result; +  if (NumDstElt < NumSrcElt) { +    // Handle: bitcast (<4 x i32> <i32 0, i32 1, i32 2, i32 3> to <2 x i64>) +    Constant *Zero = Constant::getNullValue(DstEltTy); +    unsigned Ratio = NumSrcElt/NumDstElt; +    unsigned SrcBitSize = SrcEltTy->getPrimitiveSizeInBits(); +    unsigned SrcElt = 0; +    for (unsigned i = 0; i != NumDstElt; ++i) { +      // Build each element of the result. +      Constant *Elt = Zero; +      unsigned ShiftAmt = isLittleEndian ? 0 : SrcBitSize*(Ratio-1); +      for (unsigned j = 0; j != Ratio; ++j) { +        Constant *Src = dyn_cast<ConstantInt>(CV->getOperand(SrcElt++)); +        if (!Src)  // Reject constantexpr elements. +          return ConstantExpr::getBitCast(C, DestTy); +         +        // Zero extend the element to the right size. +        Src = ConstantExpr::getZExt(Src, Elt->getType()); +         +        // Shift it to the right place, depending on endianness. +        Src = ConstantExpr::getShl(Src,  +                                   ConstantInt::get(Src->getType(), ShiftAmt)); +        ShiftAmt += isLittleEndian ? SrcBitSize : -SrcBitSize; +         +        // Mix it in. +        Elt = ConstantExpr::getOr(Elt, Src); +      } +      Result.push_back(Elt); +    } +  } else { +    // Handle: bitcast (<2 x i64> <i64 0, i64 1> to <4 x i32>) +    unsigned Ratio = NumDstElt/NumSrcElt; +    unsigned DstBitSize = DstEltTy->getPrimitiveSizeInBits(); +     +    // Loop over each source value, expanding into multiple results. +    for (unsigned i = 0; i != NumSrcElt; ++i) { +      Constant *Src = dyn_cast<ConstantInt>(CV->getOperand(i)); +      if (!Src)  // Reject constantexpr elements. +        return ConstantExpr::getBitCast(C, DestTy); +       +      unsigned ShiftAmt = isLittleEndian ? 0 : DstBitSize*(Ratio-1); +      for (unsigned j = 0; j != Ratio; ++j) { +        // Shift the piece of the value into the right place, depending on +        // endianness. +        Constant *Elt = ConstantExpr::getLShr(Src,  +                                    ConstantInt::get(Src->getType(), ShiftAmt)); +        ShiftAmt += isLittleEndian ? DstBitSize : -DstBitSize; +         +        // Truncate and remember this piece. +        Result.push_back(ConstantExpr::getTrunc(Elt, DstEltTy)); +      } +    } +  } +   +  return ConstantVector::get(Result.data(), Result.size()); +} + +  /// IsConstantOffsetFromGlobal - If this constant is actually a constant offset  /// from a global, return the global and the constant.  Because of  /// constantexprs, this function is recursive. @@ -103,6 +235,8 @@ static bool ReadDataFromGlobal(Constant *C, uint64_t ByteOffset,    assert(ByteOffset <= TD.getTypeAllocSize(C->getType()) &&           "Out of range access"); +  // If this element is zero or undefined, we can just return since *CurPtr is +  // zero initialized.    if (isa<ConstantAggregateZero>(C) || isa<UndefValue>(C))      return true; @@ -115,7 +249,7 @@ static bool ReadDataFromGlobal(Constant *C, uint64_t ByteOffset,      unsigned IntBytes = unsigned(CI->getBitWidth()/8);      for (unsigned i = 0; i != BytesLeft && ByteOffset != IntBytes; ++i) { -      CurPtr[i] = (unsigned char)(Val >> ByteOffset * 8); +      CurPtr[i] = (unsigned char)(Val >> (ByteOffset * 8));        ++ByteOffset;      }      return true; @@ -123,13 +257,14 @@ static bool ReadDataFromGlobal(Constant *C, uint64_t ByteOffset,    if (ConstantFP *CFP = dyn_cast<ConstantFP>(C)) {      if (CFP->getType()->isDoubleTy()) { -      C = ConstantExpr::getBitCast(C, Type::getInt64Ty(C->getContext())); +      C = FoldBitCast(C, Type::getInt64Ty(C->getContext()), TD);        return ReadDataFromGlobal(C, ByteOffset, CurPtr, BytesLeft, TD);      }      if (CFP->getType()->isFloatTy()){ -      C = ConstantExpr::getBitCast(C, Type::getInt32Ty(C->getContext())); +      C = FoldBitCast(C, Type::getInt32Ty(C->getContext()), TD);        return ReadDataFromGlobal(C, ByteOffset, CurPtr, BytesLeft, TD);      } +    return false;    }    if (ConstantStruct *CS = dyn_cast<ConstantStruct>(C)) { @@ -161,6 +296,7 @@ static bool ReadDataFromGlobal(Constant *C, uint64_t ByteOffset,          return true;        // Move to the next element of the struct. +      CurPtr += NextEltOffset-CurEltOffset-ByteOffset;        BytesLeft -= NextEltOffset-CurEltOffset-ByteOffset;        ByteOffset = 0;        CurEltOffset = NextEltOffset; @@ -231,9 +367,9 @@ static Constant *FoldReinterpretLoadFromConstPtr(Constant *C,      } else        return 0; -    C = ConstantExpr::getBitCast(C, MapTy); +    C = FoldBitCast(C, MapTy, TD);      if (Constant *Res = FoldReinterpretLoadFromConstPtr(C, TD)) -      return ConstantExpr::getBitCast(Res, LoadTy); +      return FoldBitCast(Res, LoadTy, TD);      return 0;    } @@ -246,8 +382,7 @@ static Constant *FoldReinterpretLoadFromConstPtr(Constant *C,      return 0;    GlobalVariable *GV = dyn_cast<GlobalVariable>(GVal); -  if (!GV || !GV->isConstant() || !GV->hasInitializer() || -      !GV->hasDefinitiveInitializer() || +  if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer() ||        !GV->getInitializer()->getType()->isSized())      return 0; @@ -476,126 +611,11 @@ static Constant *SymbolicallyEvaluateGEP(Constant* const* Ops, unsigned NumOps,    // If we ended up indexing a member with a type that doesn't match    // the type of what the original indices indexed, add a cast.    if (Ty != cast<PointerType>(ResultTy)->getElementType()) -    C = ConstantExpr::getBitCast(C, ResultTy); +    C = FoldBitCast(C, ResultTy, *TD);    return C;  } -/// FoldBitCast - Constant fold bitcast, symbolically evaluating it with  -/// targetdata.  Return 0 if unfoldable. -static Constant *FoldBitCast(Constant *C, const Type *DestTy, -                             const TargetData &TD, LLVMContext &Context) { -  // If this is a bitcast from constant vector -> vector, fold it. -  if (ConstantVector *CV = dyn_cast<ConstantVector>(C)) { -    if (const VectorType *DestVTy = dyn_cast<VectorType>(DestTy)) { -      // If the element types match, VMCore can fold it. -      unsigned NumDstElt = DestVTy->getNumElements(); -      unsigned NumSrcElt = CV->getNumOperands(); -      if (NumDstElt == NumSrcElt) -        return 0; -       -      const Type *SrcEltTy = CV->getType()->getElementType(); -      const Type *DstEltTy = DestVTy->getElementType(); -       -      // Otherwise, we're changing the number of elements in a vector, which  -      // requires endianness information to do the right thing.  For example, -      //    bitcast (<2 x i64> <i64 0, i64 1> to <4 x i32>) -      // folds to (little endian): -      //    <4 x i32> <i32 0, i32 0, i32 1, i32 0> -      // and to (big endian): -      //    <4 x i32> <i32 0, i32 0, i32 0, i32 1> -       -      // First thing is first.  We only want to think about integer here, so if -      // we have something in FP form, recast it as integer. -      if (DstEltTy->isFloatingPoint()) { -        // Fold to an vector of integers with same size as our FP type. -        unsigned FPWidth = DstEltTy->getPrimitiveSizeInBits(); -        const Type *DestIVTy = VectorType::get( -                                 IntegerType::get(Context, FPWidth), NumDstElt); -        // Recursively handle this integer conversion, if possible. -        C = FoldBitCast(C, DestIVTy, TD, Context); -        if (!C) return 0; -         -        // Finally, VMCore can handle this now that #elts line up. -        return ConstantExpr::getBitCast(C, DestTy); -      } -       -      // Okay, we know the destination is integer, if the input is FP, convert -      // it to integer first. -      if (SrcEltTy->isFloatingPoint()) { -        unsigned FPWidth = SrcEltTy->getPrimitiveSizeInBits(); -        const Type *SrcIVTy = VectorType::get( -                                 IntegerType::get(Context, FPWidth), NumSrcElt); -        // Ask VMCore to do the conversion now that #elts line up. -        C = ConstantExpr::getBitCast(C, SrcIVTy); -        CV = dyn_cast<ConstantVector>(C); -        if (!CV) return 0;  // If VMCore wasn't able to fold it, bail out. -      } -       -      // Now we know that the input and output vectors are both integer vectors -      // of the same size, and that their #elements is not the same.  Do the -      // conversion here, which depends on whether the input or output has -      // more elements. -      bool isLittleEndian = TD.isLittleEndian(); -       -      SmallVector<Constant*, 32> Result; -      if (NumDstElt < NumSrcElt) { -        // Handle: bitcast (<4 x i32> <i32 0, i32 1, i32 2, i32 3> to <2 x i64>) -        Constant *Zero = Constant::getNullValue(DstEltTy); -        unsigned Ratio = NumSrcElt/NumDstElt; -        unsigned SrcBitSize = SrcEltTy->getPrimitiveSizeInBits(); -        unsigned SrcElt = 0; -        for (unsigned i = 0; i != NumDstElt; ++i) { -          // Build each element of the result. -          Constant *Elt = Zero; -          unsigned ShiftAmt = isLittleEndian ? 0 : SrcBitSize*(Ratio-1); -          for (unsigned j = 0; j != Ratio; ++j) { -            Constant *Src = dyn_cast<ConstantInt>(CV->getOperand(SrcElt++)); -            if (!Src) return 0;  // Reject constantexpr elements. -             -            // Zero extend the element to the right size. -            Src = ConstantExpr::getZExt(Src, Elt->getType()); -             -            // Shift it to the right place, depending on endianness. -            Src = ConstantExpr::getShl(Src,  -                             ConstantInt::get(Src->getType(), ShiftAmt)); -            ShiftAmt += isLittleEndian ? SrcBitSize : -SrcBitSize; -             -            // Mix it in. -            Elt = ConstantExpr::getOr(Elt, Src); -          } -          Result.push_back(Elt); -        } -      } else { -        // Handle: bitcast (<2 x i64> <i64 0, i64 1> to <4 x i32>) -        unsigned Ratio = NumDstElt/NumSrcElt; -        unsigned DstBitSize = DstEltTy->getPrimitiveSizeInBits(); -         -        // Loop over each source value, expanding into multiple results. -        for (unsigned i = 0; i != NumSrcElt; ++i) { -          Constant *Src = dyn_cast<ConstantInt>(CV->getOperand(i)); -          if (!Src) return 0;  // Reject constantexpr elements. - -          unsigned ShiftAmt = isLittleEndian ? 0 : DstBitSize*(Ratio-1); -          for (unsigned j = 0; j != Ratio; ++j) { -            // Shift the piece of the value into the right place, depending on -            // endianness. -            Constant *Elt = ConstantExpr::getLShr(Src,  -                            ConstantInt::get(Src->getType(), ShiftAmt)); -            ShiftAmt += isLittleEndian ? DstBitSize : -DstBitSize; - -            // Truncate and remember this piece. -            Result.push_back(ConstantExpr::getTrunc(Elt, DstEltTy)); -          } -        } -      } -       -      return ConstantVector::get(Result.data(), Result.size()); -    } -  } -   -  return 0; -}  //===----------------------------------------------------------------------===// @@ -721,11 +741,9 @@ Constant *llvm::ConstantFoldInstOperands(unsigned Opcode, const Type *DestTy,        if (TD &&            TD->getPointerSizeInBits() <=            CE->getType()->getScalarSizeInBits()) { -        if (CE->getOpcode() == Instruction::PtrToInt) { -          Constant *Input = CE->getOperand(0); -          Constant *C = FoldBitCast(Input, DestTy, *TD, Context); -          return C ? C : ConstantExpr::getBitCast(Input, DestTy); -        } +        if (CE->getOpcode() == Instruction::PtrToInt) +          return FoldBitCast(CE->getOperand(0), DestTy, *TD); +                  // If there's a constant offset added to the integer value before          // it is casted back to a pointer, see if the expression can be          // converted into a GEP. @@ -771,8 +789,7 @@ Constant *llvm::ConstantFoldInstOperands(unsigned Opcode, const Type *DestTy,        return ConstantExpr::getCast(Opcode, Ops[0], DestTy);    case Instruction::BitCast:      if (TD) -      if (Constant *C = FoldBitCast(Ops[0], DestTy, *TD, Context)) -        return C; +      return FoldBitCast(Ops[0], DestTy, *TD);      return ConstantExpr::getBitCast(Ops[0], DestTy);    case Instruction::Select:      return ConstantExpr::getSelect(Ops[0], Ops[1], Ops[2]); diff --git a/lib/Analysis/DbgInfoPrinter.cpp b/lib/Analysis/DbgInfoPrinter.cpp index 2bbe2e0ecb4f..ab92e3f9bd52 100644 --- a/lib/Analysis/DbgInfoPrinter.cpp +++ b/lib/Analysis/DbgInfoPrinter.cpp @@ -35,7 +35,7 @@ PrintDirectory("print-fullpath",                 cl::Hidden);  namespace { -  class VISIBILITY_HIDDEN PrintDbgInfo : public FunctionPass { +  class PrintDbgInfo : public FunctionPass {      raw_ostream &Out;      void printStopPoint(const DbgStopPointInst *DSI);      void printFuncStart(const DbgFuncStartInst *FS); diff --git a/lib/Analysis/DebugInfo.cpp b/lib/Analysis/DebugInfo.cpp index 7bb7e9b4af2d..7bff11ec5b45 100644 --- a/lib/Analysis/DebugInfo.cpp +++ b/lib/Analysis/DebugInfo.cpp @@ -84,8 +84,11 @@ DIDescriptor::getStringField(unsigned Elt) const {      return NULL;    if (Elt < DbgNode->getNumElements()) -    if (MDString *MDS = dyn_cast_or_null<MDString>(DbgNode->getElement(Elt))) +    if (MDString *MDS = dyn_cast_or_null<MDString>(DbgNode->getElement(Elt))) { +      if (MDS->getLength() == 0) +        return NULL;        return MDS->getString().data(); +    }    return NULL;  } @@ -398,10 +401,10 @@ bool DIVariable::Verify() const {  /// getOriginalTypeSize - If this type is derived from a base type then  /// return base type size.  uint64_t DIDerivedType::getOriginalTypeSize() const { -  if (getTag() != dwarf::DW_TAG_member) -    return getSizeInBits();    DIType BT = getTypeDerivedFrom(); -  if (BT.getTag() != dwarf::DW_TAG_base_type) +  if (!BT.isNull() && BT.isDerivedType()) +    return DIDerivedType(BT.getNode()).getOriginalTypeSize(); +  if (BT.isNull())      return getSizeInBits();    return BT.getSizeInBits();  } @@ -695,6 +698,32 @@ DIBasicType DIFactory::CreateBasicType(DIDescriptor Context,    return DIBasicType(MDNode::get(VMContext, &Elts[0], 10));  } + +/// CreateBasicType - Create a basic type like int, float, etc. +DIBasicType DIFactory::CreateBasicTypeEx(DIDescriptor Context, +                                         StringRef Name, +                                         DICompileUnit CompileUnit, +                                         unsigned LineNumber, +                                         Constant *SizeInBits, +                                         Constant *AlignInBits, +                                         Constant *OffsetInBits, unsigned Flags, +                                         unsigned Encoding) { +  Value *Elts[] = { +    GetTagConstant(dwarf::DW_TAG_base_type), +    Context.getNode(), +    MDString::get(VMContext, Name), +    CompileUnit.getNode(), +    ConstantInt::get(Type::getInt32Ty(VMContext), LineNumber), +    SizeInBits, +    AlignInBits, +    OffsetInBits, +    ConstantInt::get(Type::getInt32Ty(VMContext), Flags), +    ConstantInt::get(Type::getInt32Ty(VMContext), Encoding) +  }; +  return DIBasicType(MDNode::get(VMContext, &Elts[0], 10)); +} + +  /// CreateDerivedType - Create a derived type like const qualified type,  /// pointer, typedef, etc.  DIDerivedType DIFactory::CreateDerivedType(unsigned Tag, @@ -722,6 +751,35 @@ DIDerivedType DIFactory::CreateDerivedType(unsigned Tag,    return DIDerivedType(MDNode::get(VMContext, &Elts[0], 10));  } + +/// CreateDerivedType - Create a derived type like const qualified type, +/// pointer, typedef, etc. +DIDerivedType DIFactory::CreateDerivedTypeEx(unsigned Tag, +                                             DIDescriptor Context, +                                             StringRef Name, +                                             DICompileUnit CompileUnit, +                                             unsigned LineNumber, +                                             Constant *SizeInBits, +                                             Constant *AlignInBits, +                                             Constant *OffsetInBits, +                                             unsigned Flags, +                                             DIType DerivedFrom) { +  Value *Elts[] = { +    GetTagConstant(Tag), +    Context.getNode(), +    MDString::get(VMContext, Name), +    CompileUnit.getNode(), +    ConstantInt::get(Type::getInt32Ty(VMContext), LineNumber), +    SizeInBits, +    AlignInBits, +    OffsetInBits, +    ConstantInt::get(Type::getInt32Ty(VMContext), Flags), +    DerivedFrom.getNode(), +  }; +  return DIDerivedType(MDNode::get(VMContext, &Elts[0], 10)); +} + +  /// CreateCompositeType - Create a composite type like array, struct, etc.  DICompositeType DIFactory::CreateCompositeType(unsigned Tag,                                                 DIDescriptor Context, @@ -754,6 +812,38 @@ DICompositeType DIFactory::CreateCompositeType(unsigned Tag,  } +/// CreateCompositeType - Create a composite type like array, struct, etc. +DICompositeType DIFactory::CreateCompositeTypeEx(unsigned Tag, +                                                 DIDescriptor Context, +                                                 StringRef Name, +                                                 DICompileUnit CompileUnit, +                                                 unsigned LineNumber, +                                                 Constant *SizeInBits, +                                                 Constant *AlignInBits, +                                                 Constant *OffsetInBits, +                                                 unsigned Flags, +                                                 DIType DerivedFrom, +                                                 DIArray Elements, +                                                 unsigned RuntimeLang) { + +  Value *Elts[] = { +    GetTagConstant(Tag), +    Context.getNode(), +    MDString::get(VMContext, Name), +    CompileUnit.getNode(), +    ConstantInt::get(Type::getInt32Ty(VMContext), LineNumber), +    SizeInBits, +    AlignInBits, +    OffsetInBits, +    ConstantInt::get(Type::getInt32Ty(VMContext), Flags), +    DerivedFrom.getNode(), +    Elements.getNode(), +    ConstantInt::get(Type::getInt32Ty(VMContext), RuntimeLang) +  }; +  return DICompositeType(MDNode::get(VMContext, &Elts[0], 12)); +} + +  /// CreateSubprogram - Create a new descriptor for the specified subprogram.  /// See comments in DISubprogram for descriptions of these fields.  This  /// method does not unique the generated descriptors. @@ -1217,9 +1307,10 @@ namespace llvm {        // Look for the bitcast.        for (Value::use_const_iterator I = V->use_begin(), E =V->use_end();              I != E; ++I) -        if (isa<BitCastInst>(I)) -          return findDbgDeclare(*I, false); - +        if (isa<BitCastInst>(I)) { +          const DbgDeclareInst *DDI = findDbgDeclare(*I, false); +          if (DDI) return DDI; +        }        return 0;      } diff --git a/lib/Analysis/IPA/Andersens.cpp b/lib/Analysis/IPA/Andersens.cpp index 1c9159dfbfcc..17f304c02119 100644 --- a/lib/Analysis/IPA/Andersens.cpp +++ b/lib/Analysis/IPA/Andersens.cpp @@ -59,12 +59,11 @@  #include "llvm/Instructions.h"  #include "llvm/Module.h"  #include "llvm/Pass.h" -#include "llvm/Support/Compiler.h"  #include "llvm/Support/ErrorHandling.h"  #include "llvm/Support/InstIterator.h"  #include "llvm/Support/InstVisitor.h"  #include "llvm/Analysis/AliasAnalysis.h" -#include "llvm/Analysis/MallocHelper.h" +#include "llvm/Analysis/MemoryBuiltins.h"  #include "llvm/Analysis/Passes.h"  #include "llvm/Support/Debug.h"  #include "llvm/System/Atomic.h" @@ -126,8 +125,8 @@ namespace {      static bool isPod() { return true; }    }; -  class VISIBILITY_HIDDEN Andersens : public ModulePass, public AliasAnalysis, -                                      private InstVisitor<Andersens> { +  class Andersens : public ModulePass, public AliasAnalysis, +                    private InstVisitor<Andersens> {      struct Node;      /// Constraint - Objects of this structure are used to represent the various @@ -594,11 +593,12 @@ namespace {      void visitReturnInst(ReturnInst &RI);      void visitInvokeInst(InvokeInst &II) { visitCallSite(CallSite(&II)); }      void visitCallInst(CallInst &CI) {  -      if (isMalloc(&CI)) visitAllocationInst(CI); +      if (isMalloc(&CI)) visitAlloc(CI);        else visitCallSite(CallSite(&CI));       }      void visitCallSite(CallSite CS); -    void visitAllocationInst(Instruction &I); +    void visitAllocaInst(AllocaInst &I); +    void visitAlloc(Instruction &I);      void visitLoadInst(LoadInst &LI);      void visitStoreInst(StoreInst &SI);      void visitGetElementPtrInst(GetElementPtrInst &GEP); @@ -792,7 +792,7 @@ void Andersens::IdentifyObjects(Module &M) {        // object.        if (isa<PointerType>(II->getType())) {          ValueNodes[&*II] = NumObjects++; -        if (AllocationInst *AI = dyn_cast<AllocationInst>(&*II)) +        if (AllocaInst *AI = dyn_cast<AllocaInst>(&*II))            ObjectNodes[AI] = NumObjects++;          else if (isMalloc(&*II))            ObjectNodes[&*II] = NumObjects++; @@ -1016,6 +1016,8 @@ bool Andersens::AnalyzeUsesOfFunction(Value *V) {        }      } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(*UI)) {        if (AnalyzeUsesOfFunction(GEP)) return true; +    } else if (isFreeCall(*UI)) { +      return false;      } else if (CallInst *CI = dyn_cast<CallInst>(*UI)) {        // Make sure that this is just the function being called, not that it is        // passing into the function. @@ -1037,8 +1039,6 @@ bool Andersens::AnalyzeUsesOfFunction(Value *V) {      } else if (ICmpInst *ICI = dyn_cast<ICmpInst>(*UI)) {        if (!isa<ConstantPointerNull>(ICI->getOperand(1)))          return true;  // Allow comparison against null. -    } else if (isa<FreeInst>(*UI)) { -      return false;      } else {        return true;      } @@ -1156,7 +1156,6 @@ void Andersens::visitInstruction(Instruction &I) {    case Instruction::Switch:    case Instruction::Unwind:    case Instruction::Unreachable: -  case Instruction::Free:    case Instruction::ICmp:    case Instruction::FCmp:      return; @@ -1167,7 +1166,11 @@ void Andersens::visitInstruction(Instruction &I) {    }  } -void Andersens::visitAllocationInst(Instruction &I) { +void Andersens::visitAllocaInst(AllocaInst &I) { +  visitAlloc(I); +} + +void Andersens::visitAlloc(Instruction &I) {    unsigned ObjectIndex = getObject(&I);    GraphNodes[ObjectIndex].setValue(&I);    Constraints.push_back(Constraint(Constraint::AddressOf, getNodeValue(I), @@ -2819,7 +2822,7 @@ void Andersens::PrintNode(const Node *N) const {    else      errs() << "(unnamed)"; -  if (isa<GlobalValue>(V) || isa<AllocationInst>(V) || isMalloc(V)) +  if (isa<GlobalValue>(V) || isa<AllocaInst>(V) || isMalloc(V))      if (N == &GraphNodes[getObject(V)])        errs() << "<mem>";  } diff --git a/lib/Analysis/IPA/CallGraph.cpp b/lib/Analysis/IPA/CallGraph.cpp index e2b288d1ba96..9cd8bb8c2df1 100644 --- a/lib/Analysis/IPA/CallGraph.cpp +++ b/lib/Analysis/IPA/CallGraph.cpp @@ -17,7 +17,6 @@  #include "llvm/Instructions.h"  #include "llvm/IntrinsicInst.h"  #include "llvm/Support/CallSite.h" -#include "llvm/Support/Compiler.h"  #include "llvm/Support/raw_ostream.h"  using namespace llvm; @@ -26,7 +25,7 @@ namespace {  //===----------------------------------------------------------------------===//  // BasicCallGraph class definition  // -class VISIBILITY_HIDDEN BasicCallGraph : public CallGraph, public ModulePass { +class BasicCallGraph : public CallGraph, public ModulePass {    // Root is root of the call graph, or the external node if a 'main' function    // couldn't be found.    // diff --git a/lib/Analysis/IPA/GlobalsModRef.cpp b/lib/Analysis/IPA/GlobalsModRef.cpp index 7949288340a8..ddd6ff9bd825 100644 --- a/lib/Analysis/IPA/GlobalsModRef.cpp +++ b/lib/Analysis/IPA/GlobalsModRef.cpp @@ -23,8 +23,7 @@  #include "llvm/DerivedTypes.h"  #include "llvm/Analysis/AliasAnalysis.h"  #include "llvm/Analysis/CallGraph.h" -#include "llvm/Analysis/MallocHelper.h" -#include "llvm/Support/Compiler.h" +#include "llvm/Analysis/MemoryBuiltins.h"  #include "llvm/Support/CommandLine.h"  #include "llvm/Support/InstIterator.h"  #include "llvm/ADT/Statistic.h" @@ -44,7 +43,7 @@ namespace {    /// function in the program.  Later, the entries for these functions are    /// removed if the function is found to call an external function (in which    /// case we know nothing about it. -  struct VISIBILITY_HIDDEN FunctionRecord { +  struct FunctionRecord {      /// GlobalInfo - Maintain mod/ref info for all of the globals without      /// addresses taken that are read or written (transitively) by this      /// function. @@ -69,8 +68,7 @@ namespace {    };    /// GlobalsModRef - The actual analysis pass. -  class VISIBILITY_HIDDEN GlobalsModRef -      : public ModulePass, public AliasAnalysis { +  class GlobalsModRef : public ModulePass, public AliasAnalysis {      /// NonAddressTakenGlobals - The globals that do not have their addresses      /// taken.      std::set<GlobalValue*> NonAddressTakenGlobals; @@ -240,6 +238,8 @@ bool GlobalsModRef::AnalyzeUsesOfPointer(Value *V,      } else if (BitCastInst *BCI = dyn_cast<BitCastInst>(*UI)) {        if (AnalyzeUsesOfPointer(BCI, Readers, Writers, OkayStoreDest))          return true; +    } else if (isFreeCall(*UI)) { +      Writers.push_back(cast<Instruction>(*UI)->getParent()->getParent());      } else if (CallInst *CI = dyn_cast<CallInst>(*UI)) {        // Make sure that this is just the function being called, not that it is        // passing into the function. @@ -261,8 +261,6 @@ bool GlobalsModRef::AnalyzeUsesOfPointer(Value *V,      } else if (ICmpInst *ICI = dyn_cast<ICmpInst>(*UI)) {        if (!isa<ConstantPointerNull>(ICI->getOperand(1)))          return true;  // Allow comparison against null. -    } else if (FreeInst *F = dyn_cast<FreeInst>(*UI)) { -      Writers.push_back(F->getParent()->getParent());      } else {        return true;      } @@ -439,7 +437,8 @@ void GlobalsModRef::AnalyzeCallGraph(CallGraph &CG, Module &M) {            if (cast<StoreInst>(*II).isVolatile())              // Treat volatile stores as reading memory somewhere.              FunctionEffect |= Ref; -        } else if (isMalloc(&cast<Instruction>(*II)) || isa<FreeInst>(*II)) { +        } else if (isMalloc(&cast<Instruction>(*II)) || +                   isFreeCall(&cast<Instruction>(*II))) {            FunctionEffect |= ModRef;          } diff --git a/lib/Analysis/InlineCost.cpp b/lib/Analysis/InlineCost.cpp index b833baacedef..bd9377bf87fb 100644 --- a/lib/Analysis/InlineCost.cpp +++ b/lib/Analysis/InlineCost.cpp @@ -31,6 +31,9 @@ unsigned InlineCostAnalyzer::FunctionInfo::        // Eliminating a switch is a big win, proportional to the number of edges        // deleted.        Reduction += (SI->getNumSuccessors()-1) * 40; +    else if (isa<IndirectBrInst>(*UI)) +      // Eliminating an indirect branch is a big win. +      Reduction += 200;      else if (CallInst *CI = dyn_cast<CallInst>(*UI)) {        // Turning an indirect call into a direct call is a BIG win        Reduction += CI->getCalledValue() == V ? 500 : 0; @@ -50,7 +53,7 @@ unsigned InlineCostAnalyzer::FunctionInfo::        // Unfortunately, we don't know the pointer that may get propagated here,        // so we can't make this decision.        if (Inst.mayReadFromMemory() || Inst.mayHaveSideEffects() || -          isa<AllocationInst>(Inst))  +          isa<AllocaInst>(Inst))           continue;        bool AllOperandsConstant = true; @@ -130,10 +133,6 @@ void CodeMetrics::analyzeBasicBlock(const BasicBlock *BB) {          NumInsts += InlineConstants::CallPenalty;      } -    // These, too, are calls. -    if (isa<FreeInst>(II)) -      NumInsts += InlineConstants::CallPenalty; -      if (const AllocaInst *AI = dyn_cast<AllocaInst>(II)) {        if (!AI->isStaticAlloca())          this->usesDynamicAlloca = true; @@ -147,19 +146,26 @@ void CodeMetrics::analyzeBasicBlock(const BasicBlock *BB) {        if (CI->isLosslessCast() || isa<IntToPtrInst>(CI) ||             isa<PtrToIntInst>(CI))          continue; -    } else if (const GetElementPtrInst *GEPI = -               dyn_cast<GetElementPtrInst>(II)) { +    } else if (const GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(II)){        // If a GEP has all constant indices, it will probably be folded with        // a load/store.        if (GEPI->hasAllConstantIndices())          continue;      } -    if (isa<ReturnInst>(II)) -      ++NumRets; -          ++NumInsts;    } +   +  if (isa<ReturnInst>(BB->getTerminator())) +    ++NumRets; +   +  // We never want to inline functions that contain an indirectbr.  This is +  // incorrect because all the blockaddress's (in static global initializers +  // for example) would be referring to the original function, and this indirect +  // jump would jump from the inlined copy of the function into the original +  // function which is extremely undefined behavior. +  if (isa<IndirectBrInst>(BB->getTerminator())) +    NeverInline = true;  }  /// analyzeFunction - Fill in the current structure with information gleaned diff --git a/lib/Analysis/InstCount.cpp b/lib/Analysis/InstCount.cpp index 4cde79357728..a4b041f02a3a 100644 --- a/lib/Analysis/InstCount.cpp +++ b/lib/Analysis/InstCount.cpp @@ -15,7 +15,6 @@  #include "llvm/Analysis/Passes.h"  #include "llvm/Pass.h"  #include "llvm/Function.h" -#include "llvm/Support/Compiler.h"  #include "llvm/Support/ErrorHandling.h"  #include "llvm/Support/InstVisitor.h"  #include "llvm/Support/raw_ostream.h" @@ -34,8 +33,7 @@ STATISTIC(TotalMemInst, "Number of memory instructions");  namespace { -  class VISIBILITY_HIDDEN InstCount  -      : public FunctionPass, public InstVisitor<InstCount> { +  class InstCount : public FunctionPass, public InstVisitor<InstCount> {      friend class InstVisitor<InstCount>;      void visitFunction  (Function &F) { ++TotalFuncs; } @@ -76,11 +74,11 @@ FunctionPass *llvm::createInstCountPass() { return new InstCount(); }  bool InstCount::runOnFunction(Function &F) {    unsigned StartMemInsts =      NumGetElementPtrInst + NumLoadInst + NumStoreInst + NumCallInst + -    NumInvokeInst + NumAllocaInst + NumFreeInst; +    NumInvokeInst + NumAllocaInst;    visit(F);    unsigned EndMemInsts =      NumGetElementPtrInst + NumLoadInst + NumStoreInst + NumCallInst + -    NumInvokeInst + NumAllocaInst + NumFreeInst; +    NumInvokeInst + NumAllocaInst;    TotalMemInst += EndMemInsts-StartMemInsts;    return false;  } diff --git a/lib/Analysis/MallocHelper.cpp b/lib/Analysis/MallocHelper.cpp deleted file mode 100644 index e7bb41eeec28..000000000000 --- a/lib/Analysis/MallocHelper.cpp +++ /dev/null @@ -1,265 +0,0 @@ -//===-- MallocHelper.cpp - Functions to identify malloc calls -------------===// -// -//                     The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// -// -// This family of functions identifies calls to malloc, bitcasts of malloc -// calls, and the types and array sizes associated with them. -// -//===----------------------------------------------------------------------===// - -#include "llvm/Analysis/MallocHelper.h" -#include "llvm/Constants.h" -#include "llvm/Instructions.h" -#include "llvm/Module.h" -#include "llvm/Analysis/ConstantFolding.h" -using namespace llvm; - -//===----------------------------------------------------------------------===// -//  malloc Call Utility Functions. -// - -/// isMalloc - Returns true if the the value is either a malloc call or a -/// bitcast of the result of a malloc call. -bool llvm::isMalloc(const Value* I) { -  return extractMallocCall(I) || extractMallocCallFromBitCast(I); -} - -static bool isMallocCall(const CallInst *CI) { -  if (!CI) -    return false; - -  const Module* M = CI->getParent()->getParent()->getParent(); -  Function *MallocFunc = M->getFunction("malloc"); - -  if (CI->getOperand(0) != MallocFunc) -    return false; - -  // Check malloc prototype. -  // FIXME: workaround for PR5130, this will be obsolete when a nobuiltin  -  // attribute will exist. -  const FunctionType *FTy = MallocFunc->getFunctionType(); -  if (FTy->getNumParams() != 1) -    return false; -  if (IntegerType *ITy = dyn_cast<IntegerType>(FTy->param_begin()->get())) { -    if (ITy->getBitWidth() != 32 && ITy->getBitWidth() != 64) -      return false; -    return true; -  } - -  return false; -} - -/// extractMallocCall - Returns the corresponding CallInst if the instruction -/// is a malloc call.  Since CallInst::CreateMalloc() only creates calls, we -/// ignore InvokeInst here. -const CallInst* llvm::extractMallocCall(const Value* I) { -  const CallInst *CI = dyn_cast<CallInst>(I); -  return (isMallocCall(CI)) ? CI : NULL; -} - -CallInst* llvm::extractMallocCall(Value* I) { -  CallInst *CI = dyn_cast<CallInst>(I); -  return (isMallocCall(CI)) ? CI : NULL; -} - -static bool isBitCastOfMallocCall(const BitCastInst* BCI) { -  if (!BCI) -    return false; -     -  return isMallocCall(dyn_cast<CallInst>(BCI->getOperand(0))); -} - -/// extractMallocCallFromBitCast - Returns the corresponding CallInst if the -/// instruction is a bitcast of the result of a malloc call. -CallInst* llvm::extractMallocCallFromBitCast(Value* I) { -  BitCastInst *BCI = dyn_cast<BitCastInst>(I); -  return (isBitCastOfMallocCall(BCI)) ? cast<CallInst>(BCI->getOperand(0)) -                                      : NULL; -} - -const CallInst* llvm::extractMallocCallFromBitCast(const Value* I) { -  const BitCastInst *BCI = dyn_cast<BitCastInst>(I); -  return (isBitCastOfMallocCall(BCI)) ? cast<CallInst>(BCI->getOperand(0)) -                                      : NULL; -} - -static bool isArrayMallocHelper(const CallInst *CI, LLVMContext &Context, -                                const TargetData* TD) { -  if (!CI) -    return false; - -  const Type* T = getMallocAllocatedType(CI); - -  // We can only indentify an array malloc if we know the type of the malloc  -  // call. -  if (!T) return false; - -  Value* MallocArg = CI->getOperand(1); -  Constant *ElementSize = ConstantExpr::getSizeOf(T); -  ElementSize = ConstantExpr::getTruncOrBitCast(ElementSize,  -                                                MallocArg->getType()); -  Constant *FoldedElementSize = ConstantFoldConstantExpression( -                                       cast<ConstantExpr>(ElementSize),  -                                       Context, TD); - - -  if (isa<ConstantExpr>(MallocArg)) -    return (MallocArg != ElementSize); - -  BinaryOperator *BI = dyn_cast<BinaryOperator>(MallocArg); -  if (!BI) -    return false; - -  if (BI->getOpcode() == Instruction::Mul) -    // ArraySize * ElementSize -    if (BI->getOperand(1) == ElementSize || -        (FoldedElementSize && BI->getOperand(1) == FoldedElementSize)) -      return true; - -  // TODO: Detect case where MallocArg mul has been transformed to shl. - -  return false; -} - -/// isArrayMalloc - Returns the corresponding CallInst if the instruction  -/// matches the malloc call IR generated by CallInst::CreateMalloc().  This  -/// means that it is a malloc call with one bitcast use AND the malloc call's  -/// size argument is: -///  1. a constant not equal to the size of the malloced type -/// or -///  2. the result of a multiplication by the size of the malloced type -/// Otherwise it returns NULL. -/// The unique bitcast is needed to determine the type/size of the array -/// allocation. -CallInst* llvm::isArrayMalloc(Value* I, LLVMContext &Context, -                              const TargetData* TD) { -  CallInst *CI = extractMallocCall(I); -  return (isArrayMallocHelper(CI, Context, TD)) ? CI : NULL; -} - -const CallInst* llvm::isArrayMalloc(const Value* I, LLVMContext &Context, -                                    const TargetData* TD) { -  const CallInst *CI = extractMallocCall(I); -  return (isArrayMallocHelper(CI, Context, TD)) ? CI : NULL; -} - -/// getMallocType - Returns the PointerType resulting from the malloc call. -/// This PointerType is the result type of the call's only bitcast use. -/// If there is no unique bitcast use, then return NULL. -const PointerType* llvm::getMallocType(const CallInst* CI) { -  assert(isMalloc(CI) && "GetMallocType and not malloc call"); -   -  const BitCastInst* BCI = NULL; -   -  // Determine if CallInst has a bitcast use. -  for (Value::use_const_iterator UI = CI->use_begin(), E = CI->use_end(); -       UI != E; ) -    if ((BCI = dyn_cast<BitCastInst>(cast<Instruction>(*UI++)))) -      break; - -  // Malloc call has 1 bitcast use and no other uses, so type is the bitcast's -  // destination type. -  if (BCI && CI->hasOneUse()) -    return cast<PointerType>(BCI->getDestTy()); - -  // Malloc call was not bitcast, so type is the malloc function's return type. -  if (!BCI) -    return cast<PointerType>(CI->getType()); - -  // Type could not be determined. -  return NULL; -} - -/// getMallocAllocatedType - Returns the Type allocated by malloc call. This -/// Type is the result type of the call's only bitcast use. If there is no -/// unique bitcast use, then return NULL. -const Type* llvm::getMallocAllocatedType(const CallInst* CI) { -  const PointerType* PT = getMallocType(CI); -  return PT ? PT->getElementType() : NULL; -} - -/// isSafeToGetMallocArraySize - Returns true if the array size of a malloc can -/// be determined.  It can be determined in these 3 cases of malloc codegen: -/// 1. non-array malloc: The malloc's size argument is a constant and equals the ///    size of the type being malloced. -/// 2. array malloc: This is a malloc call with one bitcast use AND the malloc -///    call's size argument is a constant multiple of the size of the malloced -///    type. -/// 3. array malloc: This is a malloc call with one bitcast use AND the malloc -///    call's size argument is the result of a multiplication by the size of the -///    malloced type. -/// Otherwise returns false. -static bool isSafeToGetMallocArraySize(const CallInst *CI, -                                       LLVMContext &Context, -                                       const TargetData* TD) { -  if (!CI) -    return false; - -  // Type must be known to determine array size. -  const Type* T = getMallocAllocatedType(CI); -  if (!T) return false; - -  Value* MallocArg = CI->getOperand(1); -  Constant *ElementSize = ConstantExpr::getSizeOf(T); -  ElementSize = ConstantExpr::getTruncOrBitCast(ElementSize,  -                                                MallocArg->getType()); - -  // First, check if it is a non-array malloc. -  if (isa<ConstantExpr>(MallocArg) && (MallocArg == ElementSize)) -    return true; - -  // Second, check if it can be determined that this is an array malloc. -  return isArrayMallocHelper(CI, Context, TD); -} - -/// isConstantOne - Return true only if val is constant int 1. -static bool isConstantOne(Value *val) { -  return isa<ConstantInt>(val) && cast<ConstantInt>(val)->isOne(); -} - -/// getMallocArraySize - Returns the array size of a malloc call.  For array -/// mallocs, the size is computated in 1 of 3 ways: -///  1. If the element type is of size 1, then array size is the argument to  -///     malloc. -///  2. Else if the malloc's argument is a constant, the array size is that -///     argument divided by the element type's size. -///  3. Else the malloc argument must be a multiplication and the array size is -///     the first operand of the multiplication. -/// For non-array mallocs, the computed size is constant 1.  -/// This function returns NULL for all mallocs whose array size cannot be -/// determined. -Value* llvm::getMallocArraySize(CallInst* CI, LLVMContext &Context, -                                const TargetData* TD) { -  if (!isSafeToGetMallocArraySize(CI, Context, TD)) -    return NULL; - -  // Match CreateMalloc's use of constant 1 array-size for non-array mallocs. -  if (!isArrayMalloc(CI, Context, TD)) -    return ConstantInt::get(CI->getOperand(1)->getType(), 1); - -  Value* MallocArg = CI->getOperand(1); -  assert(getMallocAllocatedType(CI) && "getMallocArraySize and no type"); -  Constant *ElementSize = ConstantExpr::getSizeOf(getMallocAllocatedType(CI)); -  ElementSize = ConstantExpr::getTruncOrBitCast(ElementSize,  -                                                MallocArg->getType()); - -  Constant* CO = dyn_cast<Constant>(MallocArg); -  BinaryOperator* BO = dyn_cast<BinaryOperator>(MallocArg); -  assert((isConstantOne(ElementSize) || CO || BO) && -         "getMallocArraySize and malformed malloc IR"); -       -  if (isConstantOne(ElementSize)) -    return MallocArg; -     -  if (CO) -    return CO->getOperand(0); -     -  // TODO: Detect case where MallocArg mul has been transformed to shl. - -  assert(BO && "getMallocArraySize not constant but not multiplication either"); -  return BO->getOperand(0); -} diff --git a/lib/Analysis/MemoryBuiltins.cpp b/lib/Analysis/MemoryBuiltins.cpp new file mode 100644 index 000000000000..e710350fa063 --- /dev/null +++ b/lib/Analysis/MemoryBuiltins.cpp @@ -0,0 +1,277 @@ +//===------ MemoryBuiltins.cpp - Identify calls to memory builtins --------===// +// +//                     The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This family of functions identifies calls to builtin functions that allocate +// or free memory.   +// +//===----------------------------------------------------------------------===// + +#include "llvm/Analysis/MemoryBuiltins.h" +#include "llvm/Constants.h" +#include "llvm/Instructions.h" +#include "llvm/Module.h" +#include "llvm/Analysis/ConstantFolding.h" +using namespace llvm; + +//===----------------------------------------------------------------------===// +//  malloc Call Utility Functions. +// + +/// isMalloc - Returns true if the the value is either a malloc call or a +/// bitcast of the result of a malloc call. +bool llvm::isMalloc(const Value *I) { +  return extractMallocCall(I) || extractMallocCallFromBitCast(I); +} + +static bool isMallocCall(const CallInst *CI) { +  if (!CI) +    return false; + +  Function *Callee = CI->getCalledFunction(); +  if (Callee == 0 || !Callee->isDeclaration() || Callee->getName() != "malloc") +    return false; + +  // Check malloc prototype. +  // FIXME: workaround for PR5130, this will be obsolete when a nobuiltin  +  // attribute will exist. +  const FunctionType *FTy = Callee->getFunctionType(); +  if (FTy->getNumParams() != 1) +    return false; +  if (IntegerType *ITy = dyn_cast<IntegerType>(FTy->param_begin()->get())) { +    if (ITy->getBitWidth() != 32 && ITy->getBitWidth() != 64) +      return false; +    return true; +  } + +  return false; +} + +/// extractMallocCall - Returns the corresponding CallInst if the instruction +/// is a malloc call.  Since CallInst::CreateMalloc() only creates calls, we +/// ignore InvokeInst here. +const CallInst *llvm::extractMallocCall(const Value *I) { +  const CallInst *CI = dyn_cast<CallInst>(I); +  return (isMallocCall(CI)) ? CI : NULL; +} + +CallInst *llvm::extractMallocCall(Value *I) { +  CallInst *CI = dyn_cast<CallInst>(I); +  return (isMallocCall(CI)) ? CI : NULL; +} + +static bool isBitCastOfMallocCall(const BitCastInst *BCI) { +  if (!BCI) +    return false; +     +  return isMallocCall(dyn_cast<CallInst>(BCI->getOperand(0))); +} + +/// extractMallocCallFromBitCast - Returns the corresponding CallInst if the +/// instruction is a bitcast of the result of a malloc call. +CallInst *llvm::extractMallocCallFromBitCast(Value *I) { +  BitCastInst *BCI = dyn_cast<BitCastInst>(I); +  return (isBitCastOfMallocCall(BCI)) ? cast<CallInst>(BCI->getOperand(0)) +                                      : NULL; +} + +const CallInst *llvm::extractMallocCallFromBitCast(const Value *I) { +  const BitCastInst *BCI = dyn_cast<BitCastInst>(I); +  return (isBitCastOfMallocCall(BCI)) ? cast<CallInst>(BCI->getOperand(0)) +                                      : NULL; +} + +/// isConstantOne - Return true only if val is constant int 1. +static bool isConstantOne(Value *val) { +  return isa<ConstantInt>(val) && cast<ConstantInt>(val)->isOne(); +} + +static Value *isArrayMallocHelper(const CallInst *CI, LLVMContext &Context, +                                  const TargetData *TD) { +  if (!CI) +    return NULL; + +  // Type must be known to determine array size. +  const Type *T = getMallocAllocatedType(CI); +  if (!T) +    return NULL; + +  Value *MallocArg = CI->getOperand(1); +  ConstantExpr *CO = dyn_cast<ConstantExpr>(MallocArg); +  BinaryOperator *BO = dyn_cast<BinaryOperator>(MallocArg); + +  Constant *ElementSize = ConstantExpr::getSizeOf(T); +  ElementSize = ConstantExpr::getTruncOrBitCast(ElementSize,  +                                                MallocArg->getType()); +  Constant *FoldedElementSize = +   ConstantFoldConstantExpression(cast<ConstantExpr>(ElementSize), Context, TD); + +  // First, check if CI is a non-array malloc. +  if (CO && ((CO == ElementSize) || +             (FoldedElementSize && (CO == FoldedElementSize)))) +    // Match CreateMalloc's use of constant 1 array-size for non-array mallocs. +    return ConstantInt::get(MallocArg->getType(), 1); + +  // Second, check if CI is an array malloc whose array size can be determined. +  if (isConstantOne(ElementSize) ||  +      (FoldedElementSize && isConstantOne(FoldedElementSize))) +    return MallocArg; + +  if (!CO && !BO) +    return NULL; + +  Value *Op0 = NULL; +  Value *Op1 = NULL; +  unsigned Opcode = 0; +  if (CO && ((CO->getOpcode() == Instruction::Mul) ||  +             (CO->getOpcode() == Instruction::Shl))) { +    Op0 = CO->getOperand(0); +    Op1 = CO->getOperand(1); +    Opcode = CO->getOpcode(); +  } +  if (BO && ((BO->getOpcode() == Instruction::Mul) ||  +             (BO->getOpcode() == Instruction::Shl))) { +    Op0 = BO->getOperand(0); +    Op1 = BO->getOperand(1); +    Opcode = BO->getOpcode(); +  } + +  // Determine array size if malloc's argument is the product of a mul or shl. +  if (Op0) { +    if (Opcode == Instruction::Mul) { +      if ((Op1 == ElementSize) || +          (FoldedElementSize && (Op1 == FoldedElementSize))) +        // ArraySize * ElementSize +        return Op0; +      if ((Op0 == ElementSize) || +          (FoldedElementSize && (Op0 == FoldedElementSize))) +        // ElementSize * ArraySize +        return Op1; +    } +    if (Opcode == Instruction::Shl) { +      ConstantInt *Op1CI = dyn_cast<ConstantInt>(Op1); +      if (!Op1CI) return NULL; +       +      APInt Op1Int = Op1CI->getValue(); +      uint64_t BitToSet = Op1Int.getLimitedValue(Op1Int.getBitWidth() - 1); +      Value *Op1Pow = ConstantInt::get(Context,  +                                  APInt(Op1Int.getBitWidth(), 0).set(BitToSet)); +      if (Op0 == ElementSize || (FoldedElementSize && Op0 == FoldedElementSize)) +        // ArraySize << log2(ElementSize) +        return Op1Pow; +      if (Op1Pow == ElementSize || +          (FoldedElementSize && Op1Pow == FoldedElementSize)) +        // ElementSize << log2(ArraySize) +        return Op0; +    } +  } + +  // We could not determine the malloc array size from MallocArg. +  return NULL; +} + +/// isArrayMalloc - Returns the corresponding CallInst if the instruction  +/// is a call to malloc whose array size can be determined and the array size +/// is not constant 1.  Otherwise, return NULL. +CallInst *llvm::isArrayMalloc(Value *I, LLVMContext &Context, +                              const TargetData *TD) { +  CallInst *CI = extractMallocCall(I); +  Value *ArraySize = isArrayMallocHelper(CI, Context, TD); + +  if (ArraySize && +      ArraySize != ConstantInt::get(CI->getOperand(1)->getType(), 1)) +    return CI; + +  // CI is a non-array malloc or we can't figure out that it is an array malloc. +  return NULL; +} + +const CallInst *llvm::isArrayMalloc(const Value *I, LLVMContext &Context, +                                    const TargetData *TD) { +  const CallInst *CI = extractMallocCall(I); +  Value *ArraySize = isArrayMallocHelper(CI, Context, TD); + +  if (ArraySize && +      ArraySize != ConstantInt::get(CI->getOperand(1)->getType(), 1)) +    return CI; + +  // CI is a non-array malloc or we can't figure out that it is an array malloc. +  return NULL; +} + +/// getMallocType - Returns the PointerType resulting from the malloc call. +/// This PointerType is the result type of the call's only bitcast use. +/// If there is no unique bitcast use, then return NULL. +const PointerType *llvm::getMallocType(const CallInst *CI) { +  assert(isMalloc(CI) && "GetMallocType and not malloc call"); +   +  const BitCastInst *BCI = NULL; +   +  // Determine if CallInst has a bitcast use. +  for (Value::use_const_iterator UI = CI->use_begin(), E = CI->use_end(); +       UI != E; ) +    if ((BCI = dyn_cast<BitCastInst>(cast<Instruction>(*UI++)))) +      break; + +  // Malloc call has 1 bitcast use and no other uses, so type is the bitcast's +  // destination type. +  if (BCI && CI->hasOneUse()) +    return cast<PointerType>(BCI->getDestTy()); + +  // Malloc call was not bitcast, so type is the malloc function's return type. +  if (!BCI) +    return cast<PointerType>(CI->getType()); + +  // Type could not be determined. +  return NULL; +} + +/// getMallocAllocatedType - Returns the Type allocated by malloc call. This +/// Type is the result type of the call's only bitcast use. If there is no +/// unique bitcast use, then return NULL. +const Type *llvm::getMallocAllocatedType(const CallInst *CI) { +  const PointerType *PT = getMallocType(CI); +  return PT ? PT->getElementType() : NULL; +} + +/// getMallocArraySize - Returns the array size of a malloc call.  If the  +/// argument passed to malloc is a multiple of the size of the malloced type, +/// then return that multiple.  For non-array mallocs, the multiple is +/// constant 1.  Otherwise, return NULL for mallocs whose array size cannot be +/// determined. +Value *llvm::getMallocArraySize(CallInst *CI, LLVMContext &Context, +                                const TargetData *TD) { +  return isArrayMallocHelper(CI, Context, TD); +} + +//===----------------------------------------------------------------------===// +//  free Call Utility Functions. +// + +/// isFreeCall - Returns true if the the value is a call to the builtin free() +bool llvm::isFreeCall(const Value *I) { +  const CallInst *CI = dyn_cast<CallInst>(I); +  if (!CI) +    return false; +  Function *Callee = CI->getCalledFunction(); +  if (Callee == 0 || !Callee->isDeclaration() || Callee->getName() != "free") +    return false; + +  // Check free prototype. +  // FIXME: workaround for PR5130, this will be obsolete when a nobuiltin  +  // attribute will exist. +  const FunctionType *FTy = Callee->getFunctionType(); +  if (!FTy->getReturnType()->isVoidTy()) +    return false; +  if (FTy->getNumParams() != 1) +    return false; +  if (FTy->param_begin()->get() != Type::getInt8PtrTy(Callee->getContext())) +    return false; + +  return true; +} diff --git a/lib/Analysis/MemoryDependenceAnalysis.cpp b/lib/Analysis/MemoryDependenceAnalysis.cpp index d6400757a513..0ec0e74233b3 100644 --- a/lib/Analysis/MemoryDependenceAnalysis.cpp +++ b/lib/Analysis/MemoryDependenceAnalysis.cpp @@ -20,7 +20,7 @@  #include "llvm/IntrinsicInst.h"  #include "llvm/Function.h"  #include "llvm/Analysis/AliasAnalysis.h" -#include "llvm/Analysis/MallocHelper.h" +#include "llvm/Analysis/MemoryBuiltins.h"  #include "llvm/ADT/Statistic.h"  #include "llvm/ADT/STLExtras.h"  #include "llvm/Support/PredIteratorCache.h" @@ -113,10 +113,13 @@ getCallSiteDependencyFrom(CallSite CS, bool isReadOnlyCall,      } else if (VAArgInst *V = dyn_cast<VAArgInst>(Inst)) {        Pointer = V->getOperand(0);        PointerSize = AA->getTypeStoreSize(V->getType()); -    } else if (FreeInst *F = dyn_cast<FreeInst>(Inst)) { -      Pointer = F->getPointerOperand(); -       -      // FreeInsts erase the entire structure +    } else if (isFreeCall(Inst)) { +      Pointer = Inst->getOperand(1); +      // calls to free() erase the entire structure +      PointerSize = ~0ULL; +    } else if (isFreeCall(Inst)) { +      Pointer = Inst->getOperand(0); +      // calls to free() erase the entire structure        PointerSize = ~0ULL;      } else if (isa<CallInst>(Inst) || isa<InvokeInst>(Inst)) {        // Debug intrinsics don't cause dependences. @@ -168,13 +171,54 @@ getCallSiteDependencyFrom(CallSite CS, bool isReadOnlyCall,  /// location depends.  If isLoad is true, this routine ignore may-aliases with  /// read-only operations.  MemDepResult MemoryDependenceAnalysis:: -getPointerDependencyFrom(Value *MemPtr, uint64_t MemSize, bool isLoad, +getPointerDependencyFrom(Value *MemPtr, uint64_t MemSize, bool isLoad,                            BasicBlock::iterator ScanIt, BasicBlock *BB) { +  Value* invariantTag = 0; +    // Walk backwards through the basic block, looking for dependencies.    while (ScanIt != BB->begin()) {      Instruction *Inst = --ScanIt; +    // If we're in an invariant region, no dependencies can be found before +    // we pass an invariant-begin marker. +    if (invariantTag == Inst) { +      invariantTag = 0; +      continue; +    } else if (IntrinsicInst* II = dyn_cast<IntrinsicInst>(Inst)) { +      // If we pass an invariant-end marker, then we've just entered an +      // invariant region and can start ignoring dependencies. +      if (II->getIntrinsicID() == Intrinsic::invariant_end) { +        uint64_t invariantSize = ~0ULL; +        if (ConstantInt* CI = dyn_cast<ConstantInt>(II->getOperand(2))) +          invariantSize = CI->getZExtValue(); +         +        AliasAnalysis::AliasResult R = +          AA->alias(II->getOperand(3), invariantSize, MemPtr, MemSize); +        if (R == AliasAnalysis::MustAlias) { +          invariantTag = II->getOperand(1); +          continue; +        } +       +      // If we reach a lifetime begin or end marker, then the query ends here +      // because the value is undefined. +      } else if (II->getIntrinsicID() == Intrinsic::lifetime_start || +                   II->getIntrinsicID() == Intrinsic::lifetime_end) { +        uint64_t invariantSize = ~0ULL; +        if (ConstantInt* CI = dyn_cast<ConstantInt>(II->getOperand(1))) +          invariantSize = CI->getZExtValue(); + +        AliasAnalysis::AliasResult R = +          AA->alias(II->getOperand(2), invariantSize, MemPtr, MemSize); +        if (R == AliasAnalysis::MustAlias) +          return MemDepResult::getDef(II); +      } +    } + +    // If we're querying on a load and we're in an invariant region, we're done +    // at this point. Nothing a load depends on can live in an invariant region. +    if (isLoad && invariantTag) continue; +      // Debug intrinsics don't cause dependences.      if (isa<DbgInfoIntrinsic>(Inst)) continue; @@ -199,6 +243,10 @@ getPointerDependencyFrom(Value *MemPtr, uint64_t MemSize, bool isLoad,      }      if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) { +      // There can't be stores to the value we care about inside an  +      // invariant region. +      if (invariantTag) continue; +              // If alias analysis can tell that this store is guaranteed to not modify        // the query pointer, ignore it.  Use getModRefInfo to handle cases where        // the query pointer points to constant memory etc. @@ -229,7 +277,7 @@ getPointerDependencyFrom(Value *MemPtr, uint64_t MemSize, bool isLoad,      // a subsequent bitcast of the malloc call result.  There can be stores to      // the malloced memory between the malloc call and its bitcast uses, and we      // need to continue scanning until the malloc call. -    if (isa<AllocationInst>(Inst) || extractMallocCall(Inst)) { +    if (isa<AllocaInst>(Inst) || extractMallocCall(Inst)) {        Value *AccessPtr = MemPtr->getUnderlyingObject();        if (AccessPtr == Inst || @@ -243,12 +291,16 @@ getPointerDependencyFrom(Value *MemPtr, uint64_t MemSize, bool isLoad,      case AliasAnalysis::NoModRef:        // If the call has no effect on the queried pointer, just ignore it.        continue; +    case AliasAnalysis::Mod: +      // If we're in an invariant region, we can ignore calls that ONLY +      // modify the pointer. +      if (invariantTag) continue; +      return MemDepResult::getClobber(Inst);      case AliasAnalysis::Ref:        // If the call is known to never store to the pointer, and if this is a        // load query, we can safely ignore it (scan past it).        if (isLoad)          continue; -      // FALL THROUGH.      default:        // Otherwise, there is a potential dependence.  Return a clobber.        return MemDepResult::getClobber(Inst); @@ -314,15 +366,15 @@ MemDepResult MemoryDependenceAnalysis::getDependency(Instruction *QueryInst) {        MemPtr = LI->getPointerOperand();        MemSize = AA->getTypeStoreSize(LI->getType());      } +  } else if (isFreeCall(QueryInst)) { +    MemPtr = QueryInst->getOperand(1); +    // calls to free() erase the entire structure, not just a field. +    MemSize = ~0UL;    } else if (isa<CallInst>(QueryInst) || isa<InvokeInst>(QueryInst)) {      CallSite QueryCS = CallSite::get(QueryInst);      bool isReadOnly = AA->onlyReadsMemory(QueryCS);      LocalCache = getCallSiteDependencyFrom(QueryCS, isReadOnly, ScanPos,                                             QueryParent); -  } else if (FreeInst *FI = dyn_cast<FreeInst>(QueryInst)) { -    MemPtr = FI->getPointerOperand(); -    // FreeInsts erase the entire structure, not just a field. -    MemSize = ~0UL;    } else {      // Non-memory instruction.      LocalCache = MemDepResult::getClobber(--BasicBlock::iterator(ScanPos)); diff --git a/lib/Analysis/PointerTracking.cpp b/lib/Analysis/PointerTracking.cpp index 2309fbc952cc..2251b62b1809 100644 --- a/lib/Analysis/PointerTracking.cpp +++ b/lib/Analysis/PointerTracking.cpp @@ -13,7 +13,7 @@  #include "llvm/Analysis/ConstantFolding.h"  #include "llvm/Analysis/Dominators.h"  #include "llvm/Analysis/LoopInfo.h" -#include "llvm/Analysis/MallocHelper.h" +#include "llvm/Analysis/MemoryBuiltins.h"  #include "llvm/Analysis/PointerTracking.h"  #include "llvm/Analysis/ScalarEvolution.h"  #include "llvm/Analysis/ScalarEvolutionExpressions.h" @@ -93,7 +93,7 @@ bool PointerTracking::doInitialization(Module &M) {  const SCEV *PointerTracking::computeAllocationCount(Value *P,                                                      const Type *&Ty) const {    Value *V = P->stripPointerCasts(); -  if (AllocationInst *AI = dyn_cast<AllocationInst>(V)) { +  if (AllocaInst *AI = dyn_cast<AllocaInst>(V)) {      Value *arraySize = AI->getArraySize();      Ty = AI->getAllocatedType();      // arraySize elements of type Ty. diff --git a/lib/Analysis/ProfileEstimatorPass.cpp b/lib/Analysis/ProfileEstimatorPass.cpp index c585c1dced04..e767891eab3c 100644 --- a/lib/Analysis/ProfileEstimatorPass.cpp +++ b/lib/Analysis/ProfileEstimatorPass.cpp @@ -30,8 +30,7 @@ LoopWeight(  );  namespace { -  class VISIBILITY_HIDDEN ProfileEstimatorPass : -      public FunctionPass, public ProfileInfo { +  class ProfileEstimatorPass : public FunctionPass, public ProfileInfo {      double ExecCount;      LoopInfo *LI;      std::set<BasicBlock*>  BBToVisit; diff --git a/lib/Analysis/ProfileInfo.cpp b/lib/Analysis/ProfileInfo.cpp index 9efdd23081c4..7f24f5a238eb 100644 --- a/lib/Analysis/ProfileInfo.cpp +++ b/lib/Analysis/ProfileInfo.cpp @@ -16,7 +16,6 @@  #include "llvm/Analysis/ProfileInfo.h"  #include "llvm/Pass.h"  #include "llvm/Support/CFG.h" -#include "llvm/Support/Compiler.h"  #include "llvm/Support/Debug.h"  #include "llvm/Support/raw_ostream.h"  #include "llvm/Support/Format.h" @@ -178,8 +177,7 @@ raw_ostream& llvm::operator<<(raw_ostream &O, ProfileInfo::Edge E) {  //  namespace { -  struct VISIBILITY_HIDDEN NoProfileInfo  -    : public ImmutablePass, public ProfileInfo { +  struct NoProfileInfo : public ImmutablePass, public ProfileInfo {      static char ID; // Class identification, replacement for typeinfo      NoProfileInfo() : ImmutablePass(&ID) {}    }; diff --git a/lib/Analysis/ProfileInfoLoaderPass.cpp b/lib/Analysis/ProfileInfoLoaderPass.cpp index 89d90bca2166..9e1dfb6ff711 100644 --- a/lib/Analysis/ProfileInfoLoaderPass.cpp +++ b/lib/Analysis/ProfileInfoLoaderPass.cpp @@ -20,7 +20,6 @@  #include "llvm/Analysis/ProfileInfo.h"  #include "llvm/Analysis/ProfileInfoLoader.h"  #include "llvm/Support/CommandLine.h" -#include "llvm/Support/Compiler.h"  #include "llvm/Support/CFG.h"  #include "llvm/Support/Debug.h"  #include "llvm/Support/raw_ostream.h" @@ -38,7 +37,7 @@ ProfileInfoFilename("profile-info-file", cl::init("llvmprof.out"),                      cl::desc("Profile file loaded by -profile-loader"));  namespace { -  class VISIBILITY_HIDDEN LoaderPass : public ModulePass, public ProfileInfo { +  class LoaderPass : public ModulePass, public ProfileInfo {      std::string Filename;      std::set<Edge> SpanningTree;      std::set<const BasicBlock*> BBisUnvisited; @@ -61,7 +60,7 @@ namespace {      // recurseBasicBlock() - Calculates the edge weights for as much basic      // blocks as possbile.      virtual void recurseBasicBlock(const BasicBlock *BB); -    virtual void readEdgeOrRemember(Edge, Edge&, unsigned &, unsigned &); +    virtual void readEdgeOrRemember(Edge, Edge&, unsigned &, double &);      virtual void readEdge(ProfileInfo::Edge, std::vector<unsigned>&);      /// run - Load the profile information from the specified file. @@ -85,7 +84,7 @@ Pass *llvm::createProfileLoaderPass(const std::string &Filename) {  }  void LoaderPass::readEdgeOrRemember(Edge edge, Edge &tocalc,  -                                    unsigned &uncalc, unsigned &count) { +                                    unsigned &uncalc, double &count) {    double w;    if ((w = getEdgeWeight(edge)) == MissingValue) {      tocalc = edge; @@ -118,7 +117,7 @@ void LoaderPass::recurseBasicBlock(const BasicBlock *BB) {    // collect weights of all incoming and outgoing edges, rememer edges that    // have no value -  unsigned incount = 0; +  double incount = 0;    SmallSet<const BasicBlock*,8> pred_visited;    pred_const_iterator bbi = pred_begin(BB), bbe = pred_end(BB);    if (bbi==bbe) { @@ -130,7 +129,7 @@ void LoaderPass::recurseBasicBlock(const BasicBlock *BB) {      }    } -  unsigned outcount = 0; +  double outcount = 0;    SmallSet<const BasicBlock*,8> succ_visited;    succ_const_iterator sbbi = succ_begin(BB), sbbe = succ_end(BB);    if (sbbi==sbbe) { diff --git a/lib/Analysis/ProfileVerifierPass.cpp b/lib/Analysis/ProfileVerifierPass.cpp index 9766da5992df..5f362944dc3c 100644 --- a/lib/Analysis/ProfileVerifierPass.cpp +++ b/lib/Analysis/ProfileVerifierPass.cpp @@ -30,7 +30,7 @@ ProfileVerifierDisableAssertions("profile-verifier-noassert",       cl::desc("Disable assertions"));  namespace { -  class VISIBILITY_HIDDEN ProfileVerifierPass : public FunctionPass { +  class ProfileVerifierPass : public FunctionPass {      struct DetailedBlockInfo {        const BasicBlock *BB; @@ -229,7 +229,8 @@ void ProfileVerifierPass::recurseBasicBlock(const BasicBlock *BB) {    // to debug printers.    DetailedBlockInfo DI;    DI.BB = BB; -  DI.outCount = DI.inCount = DI.inWeight = DI.outWeight = 0; +  DI.outCount = DI.inCount = 0; +  DI.inWeight = DI.outWeight = 0.0;    // Read predecessors.    std::set<const BasicBlock*> ProcessedPreds; diff --git a/lib/Analysis/ScalarEvolution.cpp b/lib/Analysis/ScalarEvolution.cpp index 62f3aa1dcae4..3e87ca22be32 100644 --- a/lib/Analysis/ScalarEvolution.cpp +++ b/lib/Analysis/ScalarEvolution.cpp @@ -74,7 +74,6 @@  #include "llvm/Assembly/Writer.h"  #include "llvm/Target/TargetData.h"  #include "llvm/Support/CommandLine.h" -#include "llvm/Support/Compiler.h"  #include "llvm/Support/ConstantRange.h"  #include "llvm/Support/ErrorHandling.h"  #include "llvm/Support/GetElementPtrTypeIterator.h" @@ -401,7 +400,7 @@ namespace {    /// SCEVComplexityCompare - Return true if the complexity of the LHS is less    /// than the complexity of the RHS.  This comparator is used to canonicalize    /// expressions. -  class VISIBILITY_HIDDEN SCEVComplexityCompare { +  class SCEVComplexityCompare {      LoopInfo *LI;    public:      explicit SCEVComplexityCompare(LoopInfo *li) : LI(li) {} @@ -3266,9 +3265,8 @@ ScalarEvolution::getBackedgeTakenInfo(const Loop *L) {      // Now that we know more about the trip count for this loop, forget any      // existing SCEV values for PHI nodes in this loop since they are only      // conservative estimates made without the benefit of trip count -    // information. This is similar to the code in -    // forgetLoopBackedgeTakenCount, except that it handles SCEVUnknown PHI -    // nodes specially. +    // information. This is similar to the code in forgetLoop, except that +    // it handles SCEVUnknown PHI nodes specially.      if (ItCount.hasAnyInfo()) {        SmallVector<Instruction *, 16> Worklist;        PushLoopPHIs(L, Worklist); @@ -3302,13 +3300,14 @@ ScalarEvolution::getBackedgeTakenInfo(const Loop *L) {    return Pair.first->second;  } -/// forgetLoopBackedgeTakenCount - This method should be called by the -/// client when it has changed a loop in a way that may effect -/// ScalarEvolution's ability to compute a trip count, or if the loop -/// is deleted. -void ScalarEvolution::forgetLoopBackedgeTakenCount(const Loop *L) { +/// forgetLoop - This method should be called by the client when it has +/// changed a loop in a way that may effect ScalarEvolution's ability to +/// compute a trip count, or if the loop is deleted. +void ScalarEvolution::forgetLoop(const Loop *L) { +  // Drop any stored trip count value.    BackedgeTakenCounts.erase(L); +  // Drop information about expressions based on loop-header PHIs.    SmallVector<Instruction *, 16> Worklist;    PushLoopPHIs(L, Worklist); diff --git a/lib/Analysis/ScalarEvolutionAliasAnalysis.cpp b/lib/Analysis/ScalarEvolutionAliasAnalysis.cpp index cc79e6c3b130..ef0e97b6e10e 100644 --- a/lib/Analysis/ScalarEvolutionAliasAnalysis.cpp +++ b/lib/Analysis/ScalarEvolutionAliasAnalysis.cpp @@ -19,14 +19,13 @@  #include "llvm/Analysis/ScalarEvolutionExpressions.h"  #include "llvm/Analysis/Passes.h"  #include "llvm/Pass.h" -#include "llvm/Support/Compiler.h"  using namespace llvm;  namespace {    /// ScalarEvolutionAliasAnalysis - This is a simple alias analysis    /// implementation that uses ScalarEvolution to answer queries. -  class VISIBILITY_HIDDEN ScalarEvolutionAliasAnalysis : public FunctionPass, -                                                         public AliasAnalysis { +  class ScalarEvolutionAliasAnalysis : public FunctionPass, +                                       public AliasAnalysis {      ScalarEvolution *SE;    public: @@ -39,7 +38,7 @@ namespace {      virtual AliasResult alias(const Value *V1, unsigned V1Size,                                const Value *V2, unsigned V2Size); -    Value *GetUnderlyingIdentifiedObject(const SCEV *S); +    Value *GetBaseValue(const SCEV *S);    };  }  // End of anonymous namespace @@ -69,25 +68,22 @@ ScalarEvolutionAliasAnalysis::runOnFunction(Function &F) {    return false;  } -/// GetUnderlyingIdentifiedObject - Given an expression, try to find an -/// "identified object" (see AliasAnalysis::isIdentifiedObject) base -/// value. Return null is none was found. +/// GetBaseValue - Given an expression, try to find a +/// base value. Return null is none was found.  Value * -ScalarEvolutionAliasAnalysis::GetUnderlyingIdentifiedObject(const SCEV *S) { +ScalarEvolutionAliasAnalysis::GetBaseValue(const SCEV *S) {    if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) {      // In an addrec, assume that the base will be in the start, rather      // than the step. -    return GetUnderlyingIdentifiedObject(AR->getStart()); +    return GetBaseValue(AR->getStart());    } else if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(S)) {      // If there's a pointer operand, it'll be sorted at the end of the list.      const SCEV *Last = A->getOperand(A->getNumOperands()-1);      if (isa<PointerType>(Last->getType())) -      return GetUnderlyingIdentifiedObject(Last); +      return GetBaseValue(Last);    } else if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) { -    // Determine if we've found an Identified object. -    Value *V = U->getValue(); -    if (isIdentifiedObject(V)) -      return V; +    // This is a leaf node. +    return U->getValue();    }    // No Identified object found.    return 0; @@ -121,8 +117,8 @@ ScalarEvolutionAliasAnalysis::alias(const Value *A, unsigned ASize,    // If ScalarEvolution can find an underlying object, form a new query.    // The correctness of this depends on ScalarEvolution not recognizing    // inttoptr and ptrtoint operators. -  Value *AO = GetUnderlyingIdentifiedObject(AS); -  Value *BO = GetUnderlyingIdentifiedObject(BS); +  Value *AO = GetBaseValue(AS); +  Value *BO = GetBaseValue(BS);    if ((AO && AO != A) || (BO && BO != B))      if (alias(AO ? AO : A, AO ? ~0u : ASize,                BO ? BO : B, BO ? ~0u : BSize) == NoAlias) diff --git a/lib/Analysis/SparsePropagation.cpp b/lib/Analysis/SparsePropagation.cpp index b7844f022765..d7bcac2b1e2d 100644 --- a/lib/Analysis/SparsePropagation.cpp +++ b/lib/Analysis/SparsePropagation.cpp @@ -166,6 +166,11 @@ void SparseSolver::getFeasibleSuccessors(TerminatorInst &TI,      return;    } +  if (isa<IndirectBrInst>(TI)) { +    Succs.assign(Succs.size(), true); +    return; +  } +      SwitchInst &SI = cast<SwitchInst>(TI);    LatticeVal SCValue;    if (AggressiveUndef) diff --git a/lib/Analysis/ValueTracking.cpp b/lib/Analysis/ValueTracking.cpp index dc0d48904735..5672510a7220 100644 --- a/lib/Analysis/ValueTracking.cpp +++ b/lib/Analysis/ValueTracking.cpp @@ -470,7 +470,7 @@ void llvm::ComputeMaskedBits(Value *V, const APInt &Mask,    }    case Instruction::Alloca: { -    AllocationInst *AI = cast<AllocationInst>(V); +    AllocaInst *AI = cast<AllocaInst>(V);      unsigned Align = AI->getAlignment();      if (Align == 0 && TD)        Align = TD->getABITypeAlignment(AI->getType()->getElementType()); | 
