summaryrefslogtreecommitdiff
path: root/lib/Transforms/IPO
diff options
context:
space:
mode:
Diffstat (limited to 'lib/Transforms/IPO')
-rw-r--r--lib/Transforms/IPO/FunctionImport.cpp182
-rw-r--r--lib/Transforms/IPO/LowerTypeTests.cpp316
2 files changed, 309 insertions, 189 deletions
diff --git a/lib/Transforms/IPO/FunctionImport.cpp b/lib/Transforms/IPO/FunctionImport.cpp
index 6dd95f8dcd55..6b32f6c31f72 100644
--- a/lib/Transforms/IPO/FunctionImport.cpp
+++ b/lib/Transforms/IPO/FunctionImport.cpp
@@ -36,7 +36,10 @@
using namespace llvm;
-STATISTIC(NumImported, "Number of functions imported");
+STATISTIC(NumImportedFunctions, "Number of functions imported");
+STATISTIC(NumImportedModules, "Number of modules imported from");
+STATISTIC(NumDeadSymbols, "Number of dead stripped symbols in index");
+STATISTIC(NumLiveSymbols, "Number of live symbols in index");
/// Limit on instruction count of imported functions.
static cl::opt<unsigned> ImportInstrLimit(
@@ -69,6 +72,9 @@ static cl::opt<float> ImportColdMultiplier(
static cl::opt<bool> PrintImports("print-imports", cl::init(false), cl::Hidden,
cl::desc("Print imported functions"));
+static cl::opt<bool> ComputeDead("compute-dead", cl::init(true), cl::Hidden,
+ cl::desc("Compute dead symbols"));
+
// Temporary allows the function import pass to disable always linking
// referenced discardable symbols.
static cl::opt<bool>
@@ -105,78 +111,6 @@ static std::unique_ptr<Module> loadFile(const std::string &FileName,
namespace {
-// Return true if the Summary describes a GlobalValue that can be externally
-// referenced, i.e. it does not need renaming (linkage is not local) or renaming
-// is possible (does not have a section for instance).
-static bool canBeExternallyReferenced(const GlobalValueSummary &Summary) {
- if (!Summary.needsRenaming())
- return true;
-
- if (Summary.noRename())
- // Can't externally reference a global that needs renaming if has a section
- // or is referenced from inline assembly, for example.
- return false;
-
- return true;
-}
-
-// Return true if \p GUID describes a GlobalValue that can be externally
-// referenced, i.e. it does not need renaming (linkage is not local) or
-// renaming is possible (does not have a section for instance).
-static bool canBeExternallyReferenced(const ModuleSummaryIndex &Index,
- GlobalValue::GUID GUID) {
- auto Summaries = Index.findGlobalValueSummaryList(GUID);
- if (Summaries == Index.end())
- return true;
- if (Summaries->second.size() != 1)
- // If there are multiple globals with this GUID, then we know it is
- // not a local symbol, and it is necessarily externally referenced.
- return true;
-
- // We don't need to check for the module path, because if it can't be
- // externally referenced and we call it, it is necessarilly in the same
- // module
- return canBeExternallyReferenced(**Summaries->second.begin());
-}
-
-// Return true if the global described by \p Summary can be imported in another
-// module.
-static bool eligibleForImport(const ModuleSummaryIndex &Index,
- const GlobalValueSummary &Summary) {
- if (!canBeExternallyReferenced(Summary))
- // Can't import a global that needs renaming if has a section for instance.
- // FIXME: we may be able to import it by copying it without promotion.
- return false;
-
- // Don't import functions that are not viable to inline.
- if (Summary.isNotViableToInline())
- return false;
-
- // Check references (and potential calls) in the same module. If the current
- // value references a global that can't be externally referenced it is not
- // eligible for import. First check the flag set when we have possible
- // opaque references (e.g. inline asm calls), then check the call and
- // reference sets.
- if (Summary.hasInlineAsmMaybeReferencingInternal())
- return false;
- bool AllRefsCanBeExternallyReferenced =
- llvm::all_of(Summary.refs(), [&](const ValueInfo &VI) {
- return canBeExternallyReferenced(Index, VI.getGUID());
- });
- if (!AllRefsCanBeExternallyReferenced)
- return false;
-
- if (auto *FuncSummary = dyn_cast<FunctionSummary>(&Summary)) {
- bool AllCallsCanBeExternallyReferenced = llvm::all_of(
- FuncSummary->calls(), [&](const FunctionSummary::EdgeTy &Edge) {
- return canBeExternallyReferenced(Index, Edge.first.getGUID());
- });
- if (!AllCallsCanBeExternallyReferenced)
- return false;
- }
- return true;
-}
-
/// Given a list of possible callee implementation for a call site, select one
/// that fits the \p Threshold.
///
@@ -214,7 +148,7 @@ selectCallee(const ModuleSummaryIndex &Index,
if (Summary->instCount() > Threshold)
return false;
- if (!eligibleForImport(Index, *Summary))
+ if (Summary->notEligibleToImport())
return false;
return true;
@@ -346,7 +280,8 @@ static void computeImportForFunction(
static void ComputeImportForModule(
const GVSummaryMapTy &DefinedGVSummaries, const ModuleSummaryIndex &Index,
FunctionImporter::ImportMapTy &ImportList,
- StringMap<FunctionImporter::ExportSetTy> *ExportLists = nullptr) {
+ StringMap<FunctionImporter::ExportSetTy> *ExportLists = nullptr,
+ const DenseSet<GlobalValue::GUID> *DeadSymbols = nullptr) {
// Worklist contains the list of function imported in this module, for which
// we will analyse the callees and may import further down the callgraph.
SmallVector<EdgeInfo, 128> Worklist;
@@ -354,6 +289,10 @@ static void ComputeImportForModule(
// Populate the worklist with the import for the functions in the current
// module
for (auto &GVSummary : DefinedGVSummaries) {
+ if (DeadSymbols && DeadSymbols->count(GVSummary.first)) {
+ DEBUG(dbgs() << "Ignores Dead GUID: " << GVSummary.first << "\n");
+ continue;
+ }
auto *Summary = GVSummary.second;
if (auto *AS = dyn_cast<AliasSummary>(Summary))
Summary = &AS->getAliasee();
@@ -393,14 +332,15 @@ void llvm::ComputeCrossModuleImport(
const ModuleSummaryIndex &Index,
const StringMap<GVSummaryMapTy> &ModuleToDefinedGVSummaries,
StringMap<FunctionImporter::ImportMapTy> &ImportLists,
- StringMap<FunctionImporter::ExportSetTy> &ExportLists) {
+ StringMap<FunctionImporter::ExportSetTy> &ExportLists,
+ const DenseSet<GlobalValue::GUID> *DeadSymbols) {
// For each module that has function defined, compute the import/export lists.
for (auto &DefinedGVSummaries : ModuleToDefinedGVSummaries) {
auto &ImportList = ImportLists[DefinedGVSummaries.first()];
DEBUG(dbgs() << "Computing import for Module '"
<< DefinedGVSummaries.first() << "'\n");
ComputeImportForModule(DefinedGVSummaries.second, Index, ImportList,
- &ExportLists);
+ &ExportLists, DeadSymbols);
}
// When computing imports we added all GUIDs referenced by anything
@@ -462,6 +402,86 @@ void llvm::ComputeCrossModuleImportForModule(
#endif
}
+DenseSet<GlobalValue::GUID> llvm::computeDeadSymbols(
+ const ModuleSummaryIndex &Index,
+ const DenseSet<GlobalValue::GUID> &GUIDPreservedSymbols) {
+ if (!ComputeDead)
+ return DenseSet<GlobalValue::GUID>();
+ if (GUIDPreservedSymbols.empty())
+ // Don't do anything when nothing is live, this is friendly with tests.
+ return DenseSet<GlobalValue::GUID>();
+ DenseSet<GlobalValue::GUID> LiveSymbols = GUIDPreservedSymbols;
+ SmallVector<GlobalValue::GUID, 128> Worklist;
+ Worklist.reserve(LiveSymbols.size() * 2);
+ for (auto GUID : LiveSymbols) {
+ DEBUG(dbgs() << "Live root: " << GUID << "\n");
+ Worklist.push_back(GUID);
+ }
+ // Add values flagged in the index as live roots to the worklist.
+ for (const auto &Entry : Index) {
+ bool IsLiveRoot = llvm::any_of(
+ Entry.second,
+ [&](const std::unique_ptr<llvm::GlobalValueSummary> &Summary) {
+ return Summary->liveRoot();
+ });
+ if (!IsLiveRoot)
+ continue;
+ DEBUG(dbgs() << "Live root (summary): " << Entry.first << "\n");
+ Worklist.push_back(Entry.first);
+ }
+
+ while (!Worklist.empty()) {
+ auto GUID = Worklist.pop_back_val();
+ auto It = Index.findGlobalValueSummaryList(GUID);
+ if (It == Index.end()) {
+ DEBUG(dbgs() << "Not in index: " << GUID << "\n");
+ continue;
+ }
+
+ // FIXME: we should only make the prevailing copy live here
+ for (auto &Summary : It->second) {
+ for (auto Ref : Summary->refs()) {
+ auto RefGUID = Ref.getGUID();
+ if (LiveSymbols.insert(RefGUID).second) {
+ DEBUG(dbgs() << "Marking live (ref): " << RefGUID << "\n");
+ Worklist.push_back(RefGUID);
+ }
+ }
+ if (auto *FS = dyn_cast<FunctionSummary>(Summary.get())) {
+ for (auto Call : FS->calls()) {
+ auto CallGUID = Call.first.getGUID();
+ if (LiveSymbols.insert(CallGUID).second) {
+ DEBUG(dbgs() << "Marking live (call): " << CallGUID << "\n");
+ Worklist.push_back(CallGUID);
+ }
+ }
+ }
+ if (auto *AS = dyn_cast<AliasSummary>(Summary.get())) {
+ auto AliaseeGUID = AS->getAliasee().getOriginalName();
+ if (LiveSymbols.insert(AliaseeGUID).second) {
+ DEBUG(dbgs() << "Marking live (alias): " << AliaseeGUID << "\n");
+ Worklist.push_back(AliaseeGUID);
+ }
+ }
+ }
+ }
+ DenseSet<GlobalValue::GUID> DeadSymbols;
+ DeadSymbols.reserve(
+ std::min(Index.size(), Index.size() - LiveSymbols.size()));
+ for (auto &Entry : Index) {
+ auto GUID = Entry.first;
+ if (!LiveSymbols.count(GUID)) {
+ DEBUG(dbgs() << "Marking dead: " << GUID << "\n");
+ DeadSymbols.insert(GUID);
+ }
+ }
+ DEBUG(dbgs() << LiveSymbols.size() << " symbols Live, and "
+ << DeadSymbols.size() << " symbols Dead \n");
+ NumDeadSymbols += DeadSymbols.size();
+ NumLiveSymbols += LiveSymbols.size();
+ return DeadSymbols;
+}
+
/// Compute the set of summaries needed for a ThinLTO backend compilation of
/// \p ModulePath.
void llvm::gatherImportedSummariesForModule(
@@ -625,7 +645,6 @@ Expected<bool> FunctionImporter::importFunctions(
// now, before linking it (otherwise this will be a noop).
if (Error Err = SrcModule->materializeMetadata())
return std::move(Err);
- UpgradeDebugInfo(*SrcModule);
auto &ImportGUIDs = FunctionsToImportPerModule->second;
// Find the globals to import
@@ -698,6 +717,10 @@ Expected<bool> FunctionImporter::importFunctions(
}
}
+ // Upgrade debug info after we're done materializing all the globals and we
+ // have loaded all the required metadata!
+ UpgradeDebugInfo(*SrcModule);
+
// Link in the specified functions.
if (renameModuleForThinLTO(*SrcModule, Index, &GlobalsToImport))
return true;
@@ -717,9 +740,10 @@ Expected<bool> FunctionImporter::importFunctions(
report_fatal_error("Function Import: link error");
ImportedCount += GlobalsToImport.size();
+ NumImportedModules++;
}
- NumImported += ImportedCount;
+ NumImportedFunctions += ImportedCount;
DEBUG(dbgs() << "Imported " << ImportedCount << " functions for Module "
<< DestModule.getModuleIdentifier() << "\n");
diff --git a/lib/Transforms/IPO/LowerTypeTests.cpp b/lib/Transforms/IPO/LowerTypeTests.cpp
index 2948878cffc4..f4742aaf748f 100644
--- a/lib/Transforms/IPO/LowerTypeTests.cpp
+++ b/lib/Transforms/IPO/LowerTypeTests.cpp
@@ -27,9 +27,12 @@
#include "llvm/IR/Instructions.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/Module.h"
+#include "llvm/IR/ModuleSummaryIndexYAML.h"
#include "llvm/IR/Operator.h"
#include "llvm/Pass.h"
#include "llvm/Support/Debug.h"
+#include "llvm/Support/Error.h"
+#include "llvm/Support/FileSystem.h"
#include "llvm/Support/TrailingObjects.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Transforms/IPO.h"
@@ -52,6 +55,20 @@ static cl::opt<bool> AvoidReuse(
cl::desc("Try to avoid reuse of byte array addresses using aliases"),
cl::Hidden, cl::init(true));
+static cl::opt<std::string> ClSummaryAction(
+ "lowertypetests-summary-action",
+ cl::desc("What to do with the summary when running this pass"), cl::Hidden);
+
+static cl::opt<std::string> ClReadSummary(
+ "lowertypetests-read-summary",
+ cl::desc("Read summary from given YAML file before running pass"),
+ cl::Hidden);
+
+static cl::opt<std::string> ClWriteSummary(
+ "lowertypetests-write-summary",
+ cl::desc("Write summary to given YAML file after running pass"),
+ cl::Hidden);
+
bool BitSetInfo::containsGlobalOffset(uint64_t Offset) const {
if (Offset < ByteOffset)
return false;
@@ -66,38 +83,6 @@ bool BitSetInfo::containsGlobalOffset(uint64_t Offset) const {
return Bits.count(BitOffset);
}
-bool BitSetInfo::containsValue(
- const DataLayout &DL,
- const DenseMap<GlobalObject *, uint64_t> &GlobalLayout, Value *V,
- uint64_t COffset) const {
- if (auto GV = dyn_cast<GlobalObject>(V)) {
- auto I = GlobalLayout.find(GV);
- if (I == GlobalLayout.end())
- return false;
- return containsGlobalOffset(I->second + COffset);
- }
-
- if (auto GEP = dyn_cast<GEPOperator>(V)) {
- APInt APOffset(DL.getPointerSizeInBits(0), 0);
- bool Result = GEP->accumulateConstantOffset(DL, APOffset);
- if (!Result)
- return false;
- COffset += APOffset.getZExtValue();
- return containsValue(DL, GlobalLayout, GEP->getPointerOperand(), COffset);
- }
-
- if (auto Op = dyn_cast<Operator>(V)) {
- if (Op->getOpcode() == Instruction::BitCast)
- return containsValue(DL, GlobalLayout, Op->getOperand(0), COffset);
-
- if (Op->getOpcode() == Instruction::Select)
- return containsValue(DL, GlobalLayout, Op->getOperand(1), COffset) &&
- containsValue(DL, GlobalLayout, Op->getOperand(2), COffset);
- }
-
- return false;
-}
-
void BitSetInfo::print(raw_ostream &OS) const {
OS << "offset " << ByteOffset << " size " << BitSize << " align "
<< (1 << AlignLog2);
@@ -204,7 +189,7 @@ struct ByteArrayInfo {
std::set<uint64_t> Bits;
uint64_t BitSize;
GlobalVariable *ByteArray;
- Constant *Mask;
+ GlobalVariable *MaskGlobal;
};
/// A POD-like structure that we use to store a global reference together with
@@ -241,6 +226,9 @@ public:
class LowerTypeTestsModule {
Module &M;
+ // This is for testing purposes only.
+ std::unique_ptr<ModuleSummaryIndex> OwnedSummary;
+
bool LinkerSubsectionsViaSymbols;
Triple::ArchType Arch;
Triple::OSType OS;
@@ -248,6 +236,7 @@ class LowerTypeTestsModule {
IntegerType *Int1Ty = Type::getInt1Ty(M.getContext());
IntegerType *Int8Ty = Type::getInt8Ty(M.getContext());
+ PointerType *Int8PtrTy = Type::getInt8PtrTy(M.getContext());
IntegerType *Int32Ty = Type::getInt32Ty(M.getContext());
PointerType *Int32PtrTy = PointerType::getUnqual(Int32Ty);
IntegerType *Int64Ty = Type::getInt64Ty(M.getContext());
@@ -259,6 +248,37 @@ class LowerTypeTestsModule {
// Mapping from type identifiers to the call sites that test them.
DenseMap<Metadata *, std::vector<CallInst *>> TypeTestCallSites;
+ /// This structure describes how to lower type tests for a particular type
+ /// identifier. It is either built directly from the global analysis (during
+ /// regular LTO or the regular LTO phase of ThinLTO), or indirectly using type
+ /// identifier summaries and external symbol references (in ThinLTO backends).
+ struct TypeIdLowering {
+ TypeTestResolution::Kind TheKind;
+
+ /// All except Unsat: the start address within the combined global.
+ Constant *OffsetedGlobal;
+
+ /// ByteArray, Inline, AllOnes: log2 of the required global alignment
+ /// relative to the start address.
+ Constant *AlignLog2;
+
+ /// ByteArray, Inline, AllOnes: size of the memory region covering members
+ /// of this type identifier as a multiple of 2^AlignLog2.
+ Constant *Size;
+
+ /// ByteArray, Inline, AllOnes: range of the size expressed as a bit width.
+ unsigned SizeBitWidth;
+
+ /// ByteArray: the byte array to test the address against.
+ Constant *TheByteArray;
+
+ /// ByteArray: the bit mask to apply to bytes loaded from the byte array.
+ Constant *BitMask;
+
+ /// Inline: the bit mask to test the address against.
+ Constant *InlineBits;
+ };
+
std::vector<ByteArrayInfo> ByteArrayInfos;
Function *WeakInitializerFn = nullptr;
@@ -268,15 +288,13 @@ class LowerTypeTestsModule {
const DenseMap<GlobalTypeMember *, uint64_t> &GlobalLayout);
ByteArrayInfo *createByteArray(BitSetInfo &BSI);
void allocateByteArrays();
- Value *createBitSetTest(IRBuilder<> &B, BitSetInfo &BSI, ByteArrayInfo *&BAI,
+ Value *createBitSetTest(IRBuilder<> &B, const TypeIdLowering &TIL,
Value *BitOffset);
void lowerTypeTestCalls(
ArrayRef<Metadata *> TypeIds, Constant *CombinedGlobalAddr,
const DenseMap<GlobalTypeMember *, uint64_t> &GlobalLayout);
- Value *
- lowerBitSetCall(CallInst *CI, BitSetInfo &BSI, ByteArrayInfo *&BAI,
- Constant *CombinedGlobal,
- const DenseMap<GlobalObject *, uint64_t> &GlobalLayout);
+ Value *lowerTypeTestCall(Metadata *TypeId, CallInst *CI,
+ const TypeIdLowering &TIL);
void buildBitSetsFromGlobalVariables(ArrayRef<Metadata *> TypeIds,
ArrayRef<GlobalTypeMember *> Globals);
unsigned getJumpTableEntrySize();
@@ -302,6 +320,7 @@ class LowerTypeTestsModule {
public:
LowerTypeTestsModule(Module &M);
+ ~LowerTypeTestsModule();
bool lower();
};
@@ -380,7 +399,7 @@ ByteArrayInfo *LowerTypeTestsModule::createByteArray(BitSetInfo &BSI) {
BAI->Bits = BSI.Bits;
BAI->BitSize = BSI.BitSize;
BAI->ByteArray = ByteArrayGlobal;
- BAI->Mask = ConstantExpr::getPtrToInt(MaskGlobal, Int8Ty);
+ BAI->MaskGlobal = MaskGlobal;
return BAI;
}
@@ -399,8 +418,9 @@ void LowerTypeTestsModule::allocateByteArrays() {
uint8_t Mask;
BAB.allocate(BAI->Bits, BAI->BitSize, ByteArrayOffsets[I], Mask);
- BAI->Mask->replaceAllUsesWith(ConstantInt::get(Int8Ty, Mask));
- cast<GlobalVariable>(BAI->Mask->getOperand(0))->eraseFromParent();
+ BAI->MaskGlobal->replaceAllUsesWith(
+ ConstantExpr::getIntToPtr(ConstantInt::get(Int8Ty, Mask), Int8PtrTy));
+ BAI->MaskGlobal->eraseFromParent();
}
Constant *ByteArrayConst = ConstantDataArray::get(M.getContext(), BAB.Bytes);
@@ -435,101 +455,121 @@ void LowerTypeTestsModule::allocateByteArrays() {
ByteArraySizeBytes = BAB.Bytes.size();
}
-/// Build a test that bit BitOffset is set in BSI, where
-/// BitSetGlobal is a global containing the bits in BSI.
-Value *LowerTypeTestsModule::createBitSetTest(IRBuilder<> &B, BitSetInfo &BSI,
- ByteArrayInfo *&BAI,
+/// Build a test that bit BitOffset is set in the type identifier that was
+/// lowered to TIL, which must be either an Inline or a ByteArray.
+Value *LowerTypeTestsModule::createBitSetTest(IRBuilder<> &B,
+ const TypeIdLowering &TIL,
Value *BitOffset) {
- if (BSI.BitSize <= 64) {
+ if (TIL.TheKind == TypeTestResolution::Inline) {
// If the bit set is sufficiently small, we can avoid a load by bit testing
// a constant.
- IntegerType *BitsTy;
- if (BSI.BitSize <= 32)
- BitsTy = Int32Ty;
- else
- BitsTy = Int64Ty;
-
- uint64_t Bits = 0;
- for (auto Bit : BSI.Bits)
- Bits |= uint64_t(1) << Bit;
- Constant *BitsConst = ConstantInt::get(BitsTy, Bits);
- return createMaskedBitTest(B, BitsConst, BitOffset);
+ return createMaskedBitTest(B, TIL.InlineBits, BitOffset);
} else {
- if (!BAI) {
- ++NumByteArraysCreated;
- BAI = createByteArray(BSI);
- }
-
- Constant *ByteArray = BAI->ByteArray;
- Type *Ty = BAI->ByteArray->getValueType();
+ Constant *ByteArray = TIL.TheByteArray;
if (!LinkerSubsectionsViaSymbols && AvoidReuse) {
// Each use of the byte array uses a different alias. This makes the
// backend less likely to reuse previously computed byte array addresses,
// improving the security of the CFI mechanism based on this pass.
- ByteArray = GlobalAlias::create(BAI->ByteArray->getValueType(), 0,
- GlobalValue::PrivateLinkage, "bits_use",
- ByteArray, &M);
+ ByteArray = GlobalAlias::create(Int8Ty, 0, GlobalValue::PrivateLinkage,
+ "bits_use", ByteArray, &M);
}
- Value *ByteAddr = B.CreateGEP(Ty, ByteArray, BitOffset);
+ Value *ByteAddr = B.CreateGEP(Int8Ty, ByteArray, BitOffset);
Value *Byte = B.CreateLoad(ByteAddr);
- Value *ByteAndMask = B.CreateAnd(Byte, BAI->Mask);
+ Value *ByteAndMask =
+ B.CreateAnd(Byte, ConstantExpr::getPtrToInt(TIL.BitMask, Int8Ty));
return B.CreateICmpNE(ByteAndMask, ConstantInt::get(Int8Ty, 0));
}
}
+static bool isKnownTypeIdMember(Metadata *TypeId, const DataLayout &DL,
+ Value *V, uint64_t COffset) {
+ if (auto GV = dyn_cast<GlobalObject>(V)) {
+ SmallVector<MDNode *, 2> Types;
+ GV->getMetadata(LLVMContext::MD_type, Types);
+ for (MDNode *Type : Types) {
+ if (Type->getOperand(1) != TypeId)
+ continue;
+ uint64_t Offset =
+ cast<ConstantInt>(
+ cast<ConstantAsMetadata>(Type->getOperand(0))->getValue())
+ ->getZExtValue();
+ if (COffset == Offset)
+ return true;
+ }
+ return false;
+ }
+
+ if (auto GEP = dyn_cast<GEPOperator>(V)) {
+ APInt APOffset(DL.getPointerSizeInBits(0), 0);
+ bool Result = GEP->accumulateConstantOffset(DL, APOffset);
+ if (!Result)
+ return false;
+ COffset += APOffset.getZExtValue();
+ return isKnownTypeIdMember(TypeId, DL, GEP->getPointerOperand(), COffset);
+ }
+
+ if (auto Op = dyn_cast<Operator>(V)) {
+ if (Op->getOpcode() == Instruction::BitCast)
+ return isKnownTypeIdMember(TypeId, DL, Op->getOperand(0), COffset);
+
+ if (Op->getOpcode() == Instruction::Select)
+ return isKnownTypeIdMember(TypeId, DL, Op->getOperand(1), COffset) &&
+ isKnownTypeIdMember(TypeId, DL, Op->getOperand(2), COffset);
+ }
+
+ return false;
+}
+
/// Lower a llvm.type.test call to its implementation. Returns the value to
/// replace the call with.
-Value *LowerTypeTestsModule::lowerBitSetCall(
- CallInst *CI, BitSetInfo &BSI, ByteArrayInfo *&BAI,
- Constant *CombinedGlobalIntAddr,
- const DenseMap<GlobalObject *, uint64_t> &GlobalLayout) {
+Value *LowerTypeTestsModule::lowerTypeTestCall(Metadata *TypeId, CallInst *CI,
+ const TypeIdLowering &TIL) {
+ if (TIL.TheKind == TypeTestResolution::Unsat)
+ return ConstantInt::getFalse(M.getContext());
+
Value *Ptr = CI->getArgOperand(0);
const DataLayout &DL = M.getDataLayout();
-
- if (BSI.containsValue(DL, GlobalLayout, Ptr))
+ if (isKnownTypeIdMember(TypeId, DL, Ptr, 0))
return ConstantInt::getTrue(M.getContext());
- Constant *OffsetedGlobalAsInt = ConstantExpr::getAdd(
- CombinedGlobalIntAddr, ConstantInt::get(IntPtrTy, BSI.ByteOffset));
-
BasicBlock *InitialBB = CI->getParent();
IRBuilder<> B(CI);
Value *PtrAsInt = B.CreatePtrToInt(Ptr, IntPtrTy);
- if (BSI.isSingleOffset())
+ Constant *OffsetedGlobalAsInt =
+ ConstantExpr::getPtrToInt(TIL.OffsetedGlobal, IntPtrTy);
+ if (TIL.TheKind == TypeTestResolution::Single)
return B.CreateICmpEQ(PtrAsInt, OffsetedGlobalAsInt);
Value *PtrOffset = B.CreateSub(PtrAsInt, OffsetedGlobalAsInt);
- Value *BitOffset;
- if (BSI.AlignLog2 == 0) {
- BitOffset = PtrOffset;
- } else {
- // We need to check that the offset both falls within our range and is
- // suitably aligned. We can check both properties at the same time by
- // performing a right rotate by log2(alignment) followed by an integer
- // comparison against the bitset size. The rotate will move the lower
- // order bits that need to be zero into the higher order bits of the
- // result, causing the comparison to fail if they are nonzero. The rotate
- // also conveniently gives us a bit offset to use during the load from
- // the bitset.
- Value *OffsetSHR =
- B.CreateLShr(PtrOffset, ConstantInt::get(IntPtrTy, BSI.AlignLog2));
- Value *OffsetSHL = B.CreateShl(
- PtrOffset,
- ConstantInt::get(IntPtrTy, DL.getPointerSizeInBits(0) - BSI.AlignLog2));
- BitOffset = B.CreateOr(OffsetSHR, OffsetSHL);
- }
-
- Constant *BitSizeConst = ConstantInt::get(IntPtrTy, BSI.BitSize);
+ // We need to check that the offset both falls within our range and is
+ // suitably aligned. We can check both properties at the same time by
+ // performing a right rotate by log2(alignment) followed by an integer
+ // comparison against the bitset size. The rotate will move the lower
+ // order bits that need to be zero into the higher order bits of the
+ // result, causing the comparison to fail if they are nonzero. The rotate
+ // also conveniently gives us a bit offset to use during the load from
+ // the bitset.
+ Value *OffsetSHR =
+ B.CreateLShr(PtrOffset, ConstantExpr::getZExt(TIL.AlignLog2, IntPtrTy));
+ Value *OffsetSHL = B.CreateShl(
+ PtrOffset, ConstantExpr::getZExt(
+ ConstantExpr::getSub(
+ ConstantInt::get(Int8Ty, DL.getPointerSizeInBits(0)),
+ TIL.AlignLog2),
+ IntPtrTy));
+ Value *BitOffset = B.CreateOr(OffsetSHR, OffsetSHL);
+
+ Constant *BitSizeConst = ConstantExpr::getZExt(TIL.Size, IntPtrTy);
Value *OffsetInRange = B.CreateICmpULT(BitOffset, BitSizeConst);
// If the bit set is all ones, testing against it is unnecessary.
- if (BSI.isAllOnes())
+ if (TIL.TheKind == TypeTestResolution::AllOnes)
return OffsetInRange;
TerminatorInst *Term = SplitBlockAndInsertIfThen(OffsetInRange, CI, false);
@@ -537,7 +577,7 @@ Value *LowerTypeTestsModule::lowerBitSetCall(
// Now that we know that the offset is in range and aligned, load the
// appropriate bit from the bitset.
- Value *Bit = createBitSetTest(ThenB, BSI, BAI, BitOffset);
+ Value *Bit = createBitSetTest(ThenB, TIL, BitOffset);
// The value we want is 0 if we came directly from the initial block
// (having failed the range or alignment checks), or the loaded bit if
@@ -622,11 +662,7 @@ void LowerTypeTestsModule::buildBitSetsFromGlobalVariables(
void LowerTypeTestsModule::lowerTypeTestCalls(
ArrayRef<Metadata *> TypeIds, Constant *CombinedGlobalAddr,
const DenseMap<GlobalTypeMember *, uint64_t> &GlobalLayout) {
- Constant *CombinedGlobalIntAddr =
- ConstantExpr::getPtrToInt(CombinedGlobalAddr, IntPtrTy);
- DenseMap<GlobalObject *, uint64_t> GlobalObjLayout;
- for (auto &P : GlobalLayout)
- GlobalObjLayout[P.first->getGlobal()] = P.second;
+ CombinedGlobalAddr = ConstantExpr::getBitCast(CombinedGlobalAddr, Int8PtrTy);
// For each type identifier in this disjoint set...
for (Metadata *TypeId : TypeIds) {
@@ -640,13 +676,43 @@ void LowerTypeTestsModule::lowerTypeTestCalls(
BSI.print(dbgs());
});
- ByteArrayInfo *BAI = nullptr;
+ TypeIdLowering TIL;
+ TIL.OffsetedGlobal = ConstantExpr::getGetElementPtr(
+ Int8Ty, CombinedGlobalAddr, ConstantInt::get(IntPtrTy, BSI.ByteOffset)),
+ TIL.AlignLog2 = ConstantInt::get(Int8Ty, BSI.AlignLog2);
+ if (BSI.isAllOnes()) {
+ TIL.TheKind = (BSI.BitSize == 1) ? TypeTestResolution::Single
+ : TypeTestResolution::AllOnes;
+ TIL.SizeBitWidth = (BSI.BitSize <= 256) ? 8 : 32;
+ TIL.Size = ConstantInt::get((BSI.BitSize <= 256) ? Int8Ty : Int32Ty,
+ BSI.BitSize);
+ } else if (BSI.BitSize <= 64) {
+ TIL.TheKind = TypeTestResolution::Inline;
+ TIL.SizeBitWidth = (BSI.BitSize <= 32) ? 5 : 6;
+ TIL.Size = ConstantInt::get(Int8Ty, BSI.BitSize);
+ uint64_t InlineBits = 0;
+ for (auto Bit : BSI.Bits)
+ InlineBits |= uint64_t(1) << Bit;
+ if (InlineBits == 0)
+ TIL.TheKind = TypeTestResolution::Unsat;
+ else
+ TIL.InlineBits = ConstantInt::get(
+ (BSI.BitSize <= 32) ? Int32Ty : Int64Ty, InlineBits);
+ } else {
+ TIL.TheKind = TypeTestResolution::ByteArray;
+ TIL.SizeBitWidth = (BSI.BitSize <= 256) ? 8 : 32;
+ TIL.Size = ConstantInt::get((BSI.BitSize <= 256) ? Int8Ty : Int32Ty,
+ BSI.BitSize);
+ ++NumByteArraysCreated;
+ ByteArrayInfo *BAI = createByteArray(BSI);
+ TIL.TheByteArray = BAI->ByteArray;
+ TIL.BitMask = BAI->MaskGlobal;
+ }
// Lower each call to llvm.type.test for this type identifier.
for (CallInst *CI : TypeTestCallSites[TypeId]) {
++NumTypeTestCallsLowered;
- Value *Lowered =
- lowerBitSetCall(CI, BSI, BAI, CombinedGlobalIntAddr, GlobalObjLayout);
+ Value *Lowered = lowerTypeTestCall(TypeId, CI, TIL);
CI->replaceAllUsesWith(Lowered);
CI->eraseFromParent();
}
@@ -1080,6 +1146,22 @@ void LowerTypeTestsModule::buildBitSetsFromDisjointSet(
/// Lower all type tests in this module.
LowerTypeTestsModule::LowerTypeTestsModule(Module &M) : M(M) {
+ // Handle the command-line summary arguments. This code is for testing
+ // purposes only, so we handle errors directly.
+ if (!ClSummaryAction.empty()) {
+ OwnedSummary = make_unique<ModuleSummaryIndex>();
+ if (!ClReadSummary.empty()) {
+ ExitOnError ExitOnErr("-lowertypetests-read-summary: " + ClReadSummary +
+ ": ");
+ auto ReadSummaryFile =
+ ExitOnErr(errorOrToExpected(MemoryBuffer::getFile(ClReadSummary)));
+
+ yaml::Input In(ReadSummaryFile->getBuffer());
+ In >> *OwnedSummary;
+ ExitOnErr(errorCodeToError(In.error()));
+ }
+ }
+
Triple TargetTriple(M.getTargetTriple());
LinkerSubsectionsViaSymbols = TargetTriple.isMacOSX();
Arch = TargetTriple.getArch();
@@ -1087,6 +1169,20 @@ LowerTypeTestsModule::LowerTypeTestsModule(Module &M) : M(M) {
ObjectFormat = TargetTriple.getObjectFormat();
}
+LowerTypeTestsModule::~LowerTypeTestsModule() {
+ if (ClSummaryAction.empty() || ClWriteSummary.empty())
+ return;
+
+ ExitOnError ExitOnErr("-lowertypetests-write-summary: " + ClWriteSummary +
+ ": ");
+ std::error_code EC;
+ raw_fd_ostream OS(ClWriteSummary, EC, sys::fs::F_Text);
+ ExitOnErr(errorCodeToError(EC));
+
+ yaml::Output Out(OS);
+ Out << *OwnedSummary;
+}
+
bool LowerTypeTestsModule::lower() {
Function *TypeTestFunc =
M.getFunction(Intrinsic::getName(Intrinsic::type_test));