summaryrefslogtreecommitdiff
path: root/lib/DebugInfo
diff options
context:
space:
mode:
Diffstat (limited to 'lib/DebugInfo')
-rw-r--r--lib/DebugInfo/DWARF/DWARFAcceleratorTable.cpp1
-rw-r--r--lib/DebugInfo/DWARF/DWARFContext.cpp153
-rw-r--r--lib/DebugInfo/DWARF/DWARFUnit.cpp505
3 files changed, 567 insertions, 92 deletions
diff --git a/lib/DebugInfo/DWARF/DWARFAcceleratorTable.cpp b/lib/DebugInfo/DWARF/DWARFAcceleratorTable.cpp
index 17f29737bf93b..6a6b7fc6fc203 100644
--- a/lib/DebugInfo/DWARF/DWARFAcceleratorTable.cpp
+++ b/lib/DebugInfo/DWARF/DWARFAcceleratorTable.cpp
@@ -83,6 +83,7 @@ bool DWARFAcceleratorTable::validateForms() {
!FormValue.isFormClass(DWARFFormValue::FC_Flag)) ||
FormValue.getForm() == dwarf::DW_FORM_sdata)
return false;
+ break;
default:
break;
}
diff --git a/lib/DebugInfo/DWARF/DWARFContext.cpp b/lib/DebugInfo/DWARF/DWARFContext.cpp
index a5defa90eb35f..eb23ca8229a31 100644
--- a/lib/DebugInfo/DWARF/DWARFContext.cpp
+++ b/lib/DebugInfo/DWARF/DWARFContext.cpp
@@ -88,70 +88,101 @@ static void dumpUUID(raw_ostream &OS, const ObjectFile &Obj) {
}
}
-static void
-dumpDWARFv5StringOffsetsSection(raw_ostream &OS, StringRef SectionName,
- const DWARFObject &Obj,
- const DWARFSection &StringOffsetsSection,
- StringRef StringSection, bool LittleEndian) {
+using ContributionCollection =
+ std::vector<Optional<StrOffsetsContributionDescriptor>>;
+
+// Collect all the contributions to the string offsets table from all units,
+// sort them by their starting offsets and remove duplicates.
+static ContributionCollection
+collectContributionData(DWARFContext::cu_iterator_range CUs,
+ DWARFContext::tu_section_iterator_range TUSs) {
+ ContributionCollection Contributions;
+ for (const auto &CU : CUs)
+ Contributions.push_back(CU->getStringOffsetsTableContribution());
+ for (const auto &TUS : TUSs)
+ for (const auto &TU : TUS)
+ Contributions.push_back(TU->getStringOffsetsTableContribution());
+
+ // Sort the contributions so that any invalid ones are placed at
+ // the start of the contributions vector. This way they are reported
+ // first.
+ std::sort(Contributions.begin(), Contributions.end(),
+ [](const Optional<StrOffsetsContributionDescriptor> &L,
+ const Optional<StrOffsetsContributionDescriptor> &R) {
+ if (L && R) return L->Base < R->Base;
+ return R.hasValue();
+ });
+
+ // Uniquify contributions, as it is possible that units (specifically
+ // type units in dwo or dwp files) share contributions. We don't want
+ // to report them more than once.
+ Contributions.erase(
+ std::unique(Contributions.begin(), Contributions.end(),
+ [](const Optional<StrOffsetsContributionDescriptor> &L,
+ const Optional<StrOffsetsContributionDescriptor> &R) {
+ if (L && R)
+ return L->Base == R->Base && L->Size == R->Size;
+ return false;
+ }),
+ Contributions.end());
+ return Contributions;
+}
+
+static void dumpDWARFv5StringOffsetsSection(
+ raw_ostream &OS, StringRef SectionName, const DWARFObject &Obj,
+ const DWARFSection &StringOffsetsSection, StringRef StringSection,
+ DWARFContext::cu_iterator_range CUs,
+ DWARFContext::tu_section_iterator_range TUSs, bool LittleEndian) {
+ auto Contributions = collectContributionData(CUs, TUSs);
DWARFDataExtractor StrOffsetExt(Obj, StringOffsetsSection, LittleEndian, 0);
- uint32_t Offset = 0;
+ DataExtractor StrData(StringSection, LittleEndian, 0);
uint64_t SectionSize = StringOffsetsSection.Data.size();
-
- while (Offset < SectionSize) {
- unsigned Version = 0;
- DwarfFormat Format = DWARF32;
- unsigned EntrySize = 4;
- // Perform validation and extract the segment size from the header.
- if (!StrOffsetExt.isValidOffsetForDataOfSize(Offset, 4)) {
+ uint32_t Offset = 0;
+ for (auto &Contribution : Contributions) {
+ // Report an ill-formed contribution.
+ if (!Contribution) {
OS << "error: invalid contribution to string offsets table in section ."
<< SectionName << ".\n";
return;
}
- uint32_t ContributionStart = Offset;
- uint64_t ContributionSize = StrOffsetExt.getU32(&Offset);
- // A contribution size of 0xffffffff indicates DWARF64, with the actual size
- // in the following 8 bytes. Otherwise, the DWARF standard mandates that
- // the contribution size must be at most 0xfffffff0.
- if (ContributionSize == 0xffffffff) {
- if (!StrOffsetExt.isValidOffsetForDataOfSize(Offset, 8)) {
- OS << "error: invalid contribution to string offsets table in section ."
- << SectionName << ".\n";
- return;
- }
- Format = DWARF64;
- EntrySize = 8;
- ContributionSize = StrOffsetExt.getU64(&Offset);
- } else if (ContributionSize > 0xfffffff0) {
- OS << "error: invalid contribution to string offsets table in section ."
+
+ dwarf::DwarfFormat Format = Contribution->getFormat();
+ uint16_t Version = Contribution->getVersion();
+ uint64_t ContributionHeader = Contribution->Base;
+ // In DWARF v5 there is a contribution header that immediately precedes
+ // the string offsets base (the location we have previously retrieved from
+ // the CU DIE's DW_AT_str_offsets attribute). The header is located either
+ // 8 or 16 bytes before the base, depending on the contribution's format.
+ if (Version >= 5)
+ ContributionHeader -= Format == DWARF32 ? 8 : 16;
+
+ // Detect overlapping contributions.
+ if (Offset > ContributionHeader) {
+ OS << "error: overlapping contributions to string offsets table in "
+ "section ."
<< SectionName << ".\n";
return;
}
-
- // We must ensure that we don't read a partial record at the end, so we
- // validate for a multiple of EntrySize. Also, we're expecting a version
- // number and padding, which adds an additional 4 bytes.
- uint64_t ValidationSize =
- 4 + ((ContributionSize + EntrySize - 1) & (-(uint64_t)EntrySize));
- if (!StrOffsetExt.isValidOffsetForDataOfSize(Offset, ValidationSize)) {
- OS << "error: contribution to string offsets table in section ."
- << SectionName << " has invalid length.\n";
- return;
+ // Report a gap in the table.
+ if (Offset < ContributionHeader) {
+ OS << format("0x%8.8x: Gap, length = ", Offset);
+ OS << (ContributionHeader - Offset) << "\n";
}
-
- Version = StrOffsetExt.getU16(&Offset);
- Offset += 2;
- OS << format("0x%8.8x: ", ContributionStart);
- OS << "Contribution size = " << ContributionSize
+ OS << format("0x%8.8x: ", (uint32_t)ContributionHeader);
+ OS << "Contribution size = " << Contribution->Size
+ << ", Format = " << (Format == DWARF32 ? "DWARF32" : "DWARF64")
<< ", Version = " << Version << "\n";
- uint32_t ContributionBase = Offset;
- DataExtractor StrData(StringSection, LittleEndian, 0);
- while (Offset - ContributionBase < ContributionSize) {
+ Offset = Contribution->Base;
+ unsigned EntrySize = Contribution->getDwarfOffsetByteSize();
+ while (Offset - Contribution->Base < Contribution->Size) {
OS << format("0x%8.8x: ", Offset);
- // FIXME: We can only extract strings in DWARF32 format at the moment.
+ // FIXME: We can only extract strings if the offset fits in 32 bits.
uint64_t StringOffset =
StrOffsetExt.getRelocatedValue(EntrySize, &Offset);
- if (Format == DWARF32) {
+ // Extract the string if we can and display it. Otherwise just report
+ // the offset.
+ if (StringOffset <= std::numeric_limits<uint32_t>::max()) {
uint32_t StringOffset32 = (uint32_t)StringOffset;
OS << format("%8.8x ", StringOffset32);
const char *S = StrData.getCStr(&StringOffset32);
@@ -162,6 +193,11 @@ dumpDWARFv5StringOffsetsSection(raw_ostream &OS, StringRef SectionName,
OS << "\n";
}
}
+ // Report a gap at the end of the table.
+ if (Offset < SectionSize) {
+ OS << format("0x%8.8x: Gap, length = ", Offset);
+ OS << (SectionSize - Offset) << "\n";
+ }
}
// Dump a DWARF string offsets section. This may be a DWARF v5 formatted
@@ -170,17 +206,18 @@ dumpDWARFv5StringOffsetsSection(raw_ostream &OS, StringRef SectionName,
// a header containing size and version number. Alternatively, it may be a
// monolithic series of string offsets, as generated by the pre-DWARF v5
// implementation of split DWARF.
-static void dumpStringOffsetsSection(raw_ostream &OS, StringRef SectionName,
- const DWARFObject &Obj,
- const DWARFSection &StringOffsetsSection,
- StringRef StringSection, bool LittleEndian,
- unsigned MaxVersion) {
+static void dumpStringOffsetsSection(
+ raw_ostream &OS, StringRef SectionName, const DWARFObject &Obj,
+ const DWARFSection &StringOffsetsSection, StringRef StringSection,
+ DWARFContext::cu_iterator_range CUs,
+ DWARFContext::tu_section_iterator_range TUSs, bool LittleEndian,
+ unsigned MaxVersion) {
// If we have at least one (compile or type) unit with DWARF v5 or greater,
// we assume that the section is formatted like a DWARF v5 string offsets
// section.
if (MaxVersion >= 5)
dumpDWARFv5StringOffsetsSection(OS, SectionName, Obj, StringOffsetsSection,
- StringSection, LittleEndian);
+ StringSection, CUs, TUSs, LittleEndian);
else {
DataExtractor strOffsetExt(StringOffsetsSection.Data, LittleEndian, 0);
uint32_t offset = 0;
@@ -468,12 +505,14 @@ void DWARFContext::dump(
DObj->getStringOffsetSection().Data))
dumpStringOffsetsSection(
OS, "debug_str_offsets", *DObj, DObj->getStringOffsetSection(),
- DObj->getStringSection(), isLittleEndian(), getMaxVersion());
+ DObj->getStringSection(), compile_units(), type_unit_sections(),
+ isLittleEndian(), getMaxVersion());
if (shouldDump(ExplicitDWO, ".debug_str_offsets.dwo", DIDT_ID_DebugStrOffsets,
DObj->getStringOffsetDWOSection().Data))
dumpStringOffsetsSection(
OS, "debug_str_offsets.dwo", *DObj, DObj->getStringOffsetDWOSection(),
- DObj->getStringDWOSection(), isLittleEndian(), getMaxVersion());
+ DObj->getStringDWOSection(), dwo_compile_units(),
+ dwo_type_unit_sections(), isLittleEndian(), getMaxVersion());
if (shouldDump(Explicit, ".gnu_index", DIDT_ID_GdbIndex,
DObj->getGdbIndexSection())) {
diff --git a/lib/DebugInfo/DWARF/DWARFUnit.cpp b/lib/DebugInfo/DWARF/DWARFUnit.cpp
index c3d8ff2cbc294..df55d7debf926 100644
--- a/lib/DebugInfo/DWARF/DWARFUnit.cpp
+++ b/lib/DebugInfo/DWARF/DWARFUnit.cpp
@@ -8,6 +8,7 @@
//===----------------------------------------------------------------------===//
#include "llvm/DebugInfo/DWARF/DWARFUnit.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/DebugInfo/DWARF/DWARFAbbreviationDeclaration.h"
@@ -79,8 +80,10 @@ bool DWARFUnit::getAddrOffsetSectionItem(uint32_t Index,
bool DWARFUnit::getStringOffsetSectionItem(uint32_t Index,
uint64_t &Result) const {
- unsigned ItemSize = getDwarfOffsetByteSize();
- uint32_t Offset = StringOffsetSectionBase + Index * ItemSize;
+ if (!StringOffsetsTableContribution)
+ return false;
+ unsigned ItemSize = getDwarfStringOffsetsByteSize();
+ uint32_t Offset = getStringOffsetsBase() + Index * ItemSize;
if (StringOffsetSection.Data.size() < Offset + ItemSize)
return false;
DWARFDataExtractor DA(Context.getDWARFObj(), StringOffsetSection,
@@ -251,15 +254,28 @@ size_t DWARFUnit::extractDIEsIfNeeded(bool CUDieOnly) {
RangeSectionBase = toSectionOffset(UnitDie.find(DW_AT_rnglists_base), 0);
}
- // In general, we derive the offset of the unit's contibution to the
- // debug_str_offsets{.dwo} section from the unit DIE's
- // DW_AT_str_offsets_base attribute. In dwp files we add to it the offset
- // we get from the index table.
- StringOffsetSectionBase =
- toSectionOffset(UnitDie.find(DW_AT_str_offsets_base), 0);
+ // In general, in DWARF v5 and beyond we derive the start of the unit's
+ // contribution to the string offsets table from the unit DIE's
+ // DW_AT_str_offsets_base attribute. Split DWARF units do not use this
+ // attribute, so we assume that there is a contribution to the string
+ // offsets table starting at offset 0 of the debug_str_offsets.dwo section.
+ // In both cases we need to determine the format of the contribution,
+ // which may differ from the unit's format.
+ uint64_t StringOffsetsContributionBase =
+ isDWO ? 0 : toSectionOffset(UnitDie.find(DW_AT_str_offsets_base), 0);
if (IndexEntry)
if (const auto *C = IndexEntry->getOffset(DW_SECT_STR_OFFSETS))
- StringOffsetSectionBase += C->Offset;
+ StringOffsetsContributionBase += C->Offset;
+
+ DWARFDataExtractor DA(Context.getDWARFObj(), StringOffsetSection,
+ isLittleEndian, 0);
+ if (isDWO)
+ StringOffsetsTableContribution =
+ determineStringOffsetsTableContributionDWO(
+ DA, StringOffsetsContributionBase);
+ else if (getVersion() >= 5)
+ StringOffsetsTableContribution = determineStringOffsetsTableContribution(
+ DA, StringOffsetsContributionBase);
// Don't fall back to DW_AT_GNU_ranges_base: it should be ignored for
// skeleton CU DIE, so that DWARF users not aware of it are not broken.
@@ -344,45 +360,378 @@ void DWARFUnit::collectAddressRanges(DWARFAddressRangesVector &CURanges) {
clearDIEs(true);
}
-void DWARFUnit::updateAddressDieMap(DWARFDie Die) {
- if (Die.isSubroutineDIE()) {
+// Populates a map from PC addresses to subprogram DIEs.
+//
+// This routine tries to look at the smallest amount of the debug info it can
+// to locate the DIEs. This is because many subprograms will never end up being
+// read or needed at all. We want to be as lazy as possible.
+void DWARFUnit::buildSubprogramDIEAddrMap() {
+ assert(SubprogramDIEAddrMap.empty() && "Must only build this map once!");
+ SmallVector<DWARFDie, 16> Worklist;
+ Worklist.push_back(getUnitDIE());
+ do {
+ DWARFDie Die = Worklist.pop_back_val();
+
+ // Queue up child DIEs to recurse through.
+ // FIXME: This causes us to read a lot more debug info than we really need.
+ // We should look at pruning out DIEs which cannot transitively hold
+ // separate subprograms.
+ for (DWARFDie Child : Die.children())
+ Worklist.push_back(Child);
+
+ // If handling a non-subprogram DIE, nothing else to do.
+ if (!Die.isSubprogramDIE())
+ continue;
+
+ // For subprogram DIEs, store them, and insert relevant markers into the
+ // address map. We don't care about overlap at all here as DWARF doesn't
+ // meaningfully support that, so we simply will insert a range with no DIE
+ // starting from the high PC. In the event there are overlaps, sorting
+ // these may truncate things in surprising ways but still will allow
+ // lookups to proceed.
+ int DIEIndex = SubprogramDIEAddrInfos.size();
+ SubprogramDIEAddrInfos.push_back({Die, (uint64_t)-1, {}});
for (const auto &R : Die.getAddressRanges()) {
// Ignore 0-sized ranges.
if (R.LowPC == R.HighPC)
continue;
- auto B = AddrDieMap.upper_bound(R.LowPC);
- if (B != AddrDieMap.begin() && R.LowPC < (--B)->second.first) {
- // The range is a sub-range of existing ranges, we need to split the
- // existing range.
- if (R.HighPC < B->second.first)
- AddrDieMap[R.HighPC] = B->second;
- if (R.LowPC > B->first)
- AddrDieMap[B->first].first = R.LowPC;
+
+ SubprogramDIEAddrMap.push_back({R.LowPC, DIEIndex});
+ SubprogramDIEAddrMap.push_back({R.HighPC, -1});
+
+ if (R.LowPC < SubprogramDIEAddrInfos.back().SubprogramBasePC)
+ SubprogramDIEAddrInfos.back().SubprogramBasePC = R.LowPC;
+ }
+ } while (!Worklist.empty());
+
+ if (SubprogramDIEAddrMap.empty()) {
+ // If we found no ranges, create a no-op map so that lookups remain simple
+ // but never find anything.
+ SubprogramDIEAddrMap.push_back({0, -1});
+ return;
+ }
+
+ // Next, sort the ranges and remove both exact duplicates and runs with the
+ // same DIE index. We order the ranges so that non-empty ranges are
+ // preferred. Because there may be ties, we also need to use stable sort.
+ std::stable_sort(SubprogramDIEAddrMap.begin(), SubprogramDIEAddrMap.end(),
+ [](const std::pair<uint64_t, int64_t> &LHS,
+ const std::pair<uint64_t, int64_t> &RHS) {
+ if (LHS.first < RHS.first)
+ return true;
+ if (LHS.first > RHS.first)
+ return false;
+
+ // For ranges that start at the same address, keep the one
+ // with a DIE.
+ if (LHS.second != -1 && RHS.second == -1)
+ return true;
+
+ return false;
+ });
+ SubprogramDIEAddrMap.erase(
+ std::unique(SubprogramDIEAddrMap.begin(), SubprogramDIEAddrMap.end(),
+ [](const std::pair<uint64_t, int64_t> &LHS,
+ const std::pair<uint64_t, int64_t> &RHS) {
+ // If the start addresses are exactly the same, we can
+ // remove all but the first one as it is the only one that
+ // will be found and used.
+ //
+ // If the DIE indices are the same, we can "merge" the
+ // ranges by eliminating the second.
+ return LHS.first == RHS.first || LHS.second == RHS.second;
+ }),
+ SubprogramDIEAddrMap.end());
+
+ assert(SubprogramDIEAddrMap.back().second == -1 &&
+ "The last interval must not have a DIE as each DIE's address range is "
+ "bounded.");
+}
+
+// Build the second level of mapping from PC to DIE, specifically one that maps
+// a PC *within* a particular DWARF subprogram into a precise, maximally nested
+// inlined subroutine DIE (if any exists). We build a separate map for each
+// subprogram because many subprograms will never get queried for an address
+// and this allows us to be significantly lazier in reading the DWARF itself.
+void DWARFUnit::buildInlinedSubroutineDIEAddrMap(
+ SubprogramDIEAddrInfo &SPInfo) {
+ auto &AddrMap = SPInfo.InlinedSubroutineDIEAddrMap;
+ uint64_t BasePC = SPInfo.SubprogramBasePC;
+
+ auto SubroutineAddrMapSorter = [](const std::pair<int, int> &LHS,
+ const std::pair<int, int> &RHS) {
+ if (LHS.first < RHS.first)
+ return true;
+ if (LHS.first > RHS.first)
+ return false;
+
+ // For ranges that start at the same address, keep the
+ // non-empty one.
+ if (LHS.second != -1 && RHS.second == -1)
+ return true;
+
+ return false;
+ };
+ auto SubroutineAddrMapUniquer = [](const std::pair<int, int> &LHS,
+ const std::pair<int, int> &RHS) {
+ // If the start addresses are exactly the same, we can
+ // remove all but the first one as it is the only one that
+ // will be found and used.
+ //
+ // If the DIE indices are the same, we can "merge" the
+ // ranges by eliminating the second.
+ return LHS.first == RHS.first || LHS.second == RHS.second;
+ };
+
+ struct DieAndParentIntervalRange {
+ DWARFDie Die;
+ int ParentIntervalsBeginIdx, ParentIntervalsEndIdx;
+ };
+
+ SmallVector<DieAndParentIntervalRange, 16> Worklist;
+ auto EnqueueChildDIEs = [&](const DWARFDie &Die, int ParentIntervalsBeginIdx,
+ int ParentIntervalsEndIdx) {
+ for (DWARFDie Child : Die.children())
+ Worklist.push_back(
+ {Child, ParentIntervalsBeginIdx, ParentIntervalsEndIdx});
+ };
+ EnqueueChildDIEs(SPInfo.SubprogramDIE, 0, 0);
+ while (!Worklist.empty()) {
+ DWARFDie Die = Worklist.back().Die;
+ int ParentIntervalsBeginIdx = Worklist.back().ParentIntervalsBeginIdx;
+ int ParentIntervalsEndIdx = Worklist.back().ParentIntervalsEndIdx;
+ Worklist.pop_back();
+
+ // If we encounter a nested subprogram, simply ignore it. We map to
+ // (disjoint) subprograms before arriving here and we don't want to examine
+ // any inlined subroutines of an unrelated subpragram.
+ if (Die.getTag() == DW_TAG_subprogram)
+ continue;
+
+ // For non-subroutines, just recurse to keep searching for inlined
+ // subroutines.
+ if (Die.getTag() != DW_TAG_inlined_subroutine) {
+ EnqueueChildDIEs(Die, ParentIntervalsBeginIdx, ParentIntervalsEndIdx);
+ continue;
+ }
+
+ // Capture the inlined subroutine DIE that we will reference from the map.
+ int DIEIndex = InlinedSubroutineDIEs.size();
+ InlinedSubroutineDIEs.push_back(Die);
+
+ int DieIntervalsBeginIdx = AddrMap.size();
+ // First collect the PC ranges for this DIE into our subroutine interval
+ // map.
+ for (auto R : Die.getAddressRanges()) {
+ // Clamp the PCs to be above the base.
+ R.LowPC = std::max(R.LowPC, BasePC);
+ R.HighPC = std::max(R.HighPC, BasePC);
+ // Compute relative PCs from the subprogram base and drop down to an
+ // unsigned 32-bit int to represent them within the data structure. This
+ // lets us cover a 4gb single subprogram. Because subprograms may be
+ // partitioned into distant parts of a binary (think hot/cold
+ // partitioning) we want to preserve as much as we can here without
+ // burning extra memory. Past that, we will simply truncate and lose the
+ // ability to map those PCs to a DIE more precise than the subprogram.
+ const uint32_t MaxRelativePC = std::numeric_limits<uint32_t>::max();
+ uint32_t RelativeLowPC = (R.LowPC - BasePC) > (uint64_t)MaxRelativePC
+ ? MaxRelativePC
+ : (uint32_t)(R.LowPC - BasePC);
+ uint32_t RelativeHighPC = (R.HighPC - BasePC) > (uint64_t)MaxRelativePC
+ ? MaxRelativePC
+ : (uint32_t)(R.HighPC - BasePC);
+ // Ignore empty or bogus ranges.
+ if (RelativeLowPC >= RelativeHighPC)
+ continue;
+ AddrMap.push_back({RelativeLowPC, DIEIndex});
+ AddrMap.push_back({RelativeHighPC, -1});
+ }
+
+ // If there are no address ranges, there is nothing to do to map into them
+ // and there cannot be any child subroutine DIEs with address ranges of
+ // interest as those would all be required to nest within this DIE's
+ // non-existent ranges, so we can immediately continue to the next DIE in
+ // the worklist.
+ if (DieIntervalsBeginIdx == (int)AddrMap.size())
+ continue;
+
+ // The PCs from this DIE should never overlap, so we can easily sort them
+ // here.
+ std::sort(AddrMap.begin() + DieIntervalsBeginIdx, AddrMap.end(),
+ SubroutineAddrMapSorter);
+ // Remove any dead ranges. These should only come from "empty" ranges that
+ // were clobbered by some other range.
+ AddrMap.erase(std::unique(AddrMap.begin() + DieIntervalsBeginIdx,
+ AddrMap.end(), SubroutineAddrMapUniquer),
+ AddrMap.end());
+
+ // Compute the end index of this DIE's addr map intervals.
+ int DieIntervalsEndIdx = AddrMap.size();
+
+ assert(DieIntervalsBeginIdx != DieIntervalsEndIdx &&
+ "Must not have an empty map for this layer!");
+ assert(AddrMap.back().second == -1 && "Must end with an empty range!");
+ assert(std::is_sorted(AddrMap.begin() + DieIntervalsBeginIdx, AddrMap.end(),
+ less_first()) &&
+ "Failed to sort this DIE's interals!");
+
+ // If we have any parent intervals, walk the newly added ranges and find
+ // the parent ranges they were inserted into. Both of these are sorted and
+ // neither has any overlaps. We need to append new ranges to split up any
+ // parent ranges these new ranges would overlap when we merge them.
+ if (ParentIntervalsBeginIdx != ParentIntervalsEndIdx) {
+ int ParentIntervalIdx = ParentIntervalsBeginIdx;
+ for (int i = DieIntervalsBeginIdx, e = DieIntervalsEndIdx - 1; i < e;
+ ++i) {
+ const uint32_t IntervalStart = AddrMap[i].first;
+ const uint32_t IntervalEnd = AddrMap[i + 1].first;
+ const int IntervalDieIdx = AddrMap[i].second;
+ if (IntervalDieIdx == -1) {
+ // For empty intervals, nothing is required. This is a bit surprising
+ // however. If the prior interval overlaps a parent interval and this
+ // would be necessary to mark the end, we will synthesize a new end
+ // that switches back to the parent DIE below. And this interval will
+ // get dropped in favor of one with a DIE attached. However, we'll
+ // still include this and so worst-case, it will still end the prior
+ // interval.
+ continue;
+ }
+
+ // We are walking the new ranges in order, so search forward from the
+ // last point for a parent range that might overlap.
+ auto ParentIntervalsRange =
+ make_range(AddrMap.begin() + ParentIntervalIdx,
+ AddrMap.begin() + ParentIntervalsEndIdx);
+ assert(std::is_sorted(ParentIntervalsRange.begin(),
+ ParentIntervalsRange.end(), less_first()) &&
+ "Unsorted parent intervals can't be searched!");
+ auto PI = std::upper_bound(
+ ParentIntervalsRange.begin(), ParentIntervalsRange.end(),
+ IntervalStart,
+ [](uint32_t LHS, const std::pair<uint32_t, int32_t> &RHS) {
+ return LHS < RHS.first;
+ });
+ if (PI == ParentIntervalsRange.begin() ||
+ PI == ParentIntervalsRange.end())
+ continue;
+
+ ParentIntervalIdx = PI - AddrMap.begin();
+ int32_t &ParentIntervalDieIdx = std::prev(PI)->second;
+ uint32_t &ParentIntervalStart = std::prev(PI)->first;
+ const uint32_t ParentIntervalEnd = PI->first;
+
+ // If the new range starts exactly at the position of the parent range,
+ // we need to adjust the parent range. Note that these collisions can
+ // only happen with the original parent range because we will merge any
+ // adjacent ranges in the child.
+ if (IntervalStart == ParentIntervalStart) {
+ // If there will be a tail, just shift the start of the parent
+ // forward. Note that this cannot change the parent ordering.
+ if (IntervalEnd < ParentIntervalEnd) {
+ ParentIntervalStart = IntervalEnd;
+ continue;
+ }
+ // Otherwise, mark this as becoming empty so we'll remove it and
+ // prefer the child range.
+ ParentIntervalDieIdx = -1;
+ continue;
+ }
+
+ // Finally, if the parent interval will need to remain as a prefix to
+ // this one, insert a new interval to cover any tail.
+ if (IntervalEnd < ParentIntervalEnd)
+ AddrMap.push_back({IntervalEnd, ParentIntervalDieIdx});
}
- AddrDieMap[R.LowPC] = std::make_pair(R.HighPC, Die);
}
+
+ // Note that we don't need to re-sort even this DIE's address map intervals
+ // after this. All of the newly added intervals actually fill in *gaps* in
+ // this DIE's address map, and we know that children won't need to lookup
+ // into those gaps.
+
+ // Recurse through its children, giving them the interval map range of this
+ // DIE to use as their parent intervals.
+ EnqueueChildDIEs(Die, DieIntervalsBeginIdx, DieIntervalsEndIdx);
+ }
+
+ if (AddrMap.empty()) {
+ AddrMap.push_back({0, -1});
+ return;
}
- // Parent DIEs are added to the AddrDieMap prior to the Children DIEs to
- // simplify the logic to update AddrDieMap. The child's range will always
- // be equal or smaller than the parent's range. With this assumption, when
- // adding one range into the map, it will at most split a range into 3
- // sub-ranges.
- for (DWARFDie Child = Die.getFirstChild(); Child; Child = Child.getSibling())
- updateAddressDieMap(Child);
+
+ // Now that we've added all of the intervals needed, we need to resort and
+ // unique them. Most notably, this will remove all the empty ranges that had
+ // a parent range covering, etc. We only expect a single non-empty interval
+ // at any given start point, so we just use std::sort. This could potentially
+ // produce non-deterministic maps for invalid DWARF.
+ std::sort(AddrMap.begin(), AddrMap.end(), SubroutineAddrMapSorter);
+ AddrMap.erase(
+ std::unique(AddrMap.begin(), AddrMap.end(), SubroutineAddrMapUniquer),
+ AddrMap.end());
}
DWARFDie DWARFUnit::getSubroutineForAddress(uint64_t Address) {
extractDIEsIfNeeded(false);
- if (AddrDieMap.empty())
- updateAddressDieMap(getUnitDIE());
- auto R = AddrDieMap.upper_bound(Address);
- if (R == AddrDieMap.begin())
+
+ // We use a two-level mapping structure to locate subroutines for a given PC
+ // address.
+ //
+ // First, we map the address to a subprogram. This can be done more cheaply
+ // because subprograms cannot nest within each other. It also allows us to
+ // avoid detailed examination of many subprograms, instead only focusing on
+ // the ones which we end up actively querying.
+ if (SubprogramDIEAddrMap.empty())
+ buildSubprogramDIEAddrMap();
+
+ assert(!SubprogramDIEAddrMap.empty() &&
+ "We must always end up with a non-empty map!");
+
+ auto I = std::upper_bound(
+ SubprogramDIEAddrMap.begin(), SubprogramDIEAddrMap.end(), Address,
+ [](uint64_t LHS, const std::pair<uint64_t, int64_t> &RHS) {
+ return LHS < RHS.first;
+ });
+ // If we find the beginning, then the address is before the first subprogram.
+ if (I == SubprogramDIEAddrMap.begin())
return DWARFDie();
- // upper_bound's previous item contains Address.
- --R;
- if (Address >= R->second.first)
+ // Back up to the interval containing the address and see if it
+ // has a DIE associated with it.
+ --I;
+ if (I->second == -1)
return DWARFDie();
- return R->second.second;
+
+ auto &SPInfo = SubprogramDIEAddrInfos[I->second];
+
+ // Now that we have the subprogram for this address, we do the second level
+ // mapping by building a map within a subprogram's PC range to any specific
+ // inlined subroutine.
+ if (SPInfo.InlinedSubroutineDIEAddrMap.empty())
+ buildInlinedSubroutineDIEAddrMap(SPInfo);
+
+ // We lookup within the inlined subroutine using a subprogram-relative
+ // address.
+ assert(Address >= SPInfo.SubprogramBasePC &&
+ "Address isn't above the start of the subprogram!");
+ uint32_t RelativeAddr = ((Address - SPInfo.SubprogramBasePC) >
+ (uint64_t)std::numeric_limits<uint32_t>::max())
+ ? std::numeric_limits<uint32_t>::max()
+ : (uint32_t)(Address - SPInfo.SubprogramBasePC);
+
+ auto J =
+ std::upper_bound(SPInfo.InlinedSubroutineDIEAddrMap.begin(),
+ SPInfo.InlinedSubroutineDIEAddrMap.end(), RelativeAddr,
+ [](uint32_t LHS, const std::pair<uint32_t, int32_t> &RHS) {
+ return LHS < RHS.first;
+ });
+ // If we find the beginning, the address is before any inlined subroutine so
+ // return the subprogram DIE.
+ if (J == SPInfo.InlinedSubroutineDIEAddrMap.begin())
+ return SPInfo.SubprogramDIE;
+ // Back up `J` and return the inlined subroutine if we have one or the
+ // subprogram if we don't.
+ --J;
+ return J->second == -1 ? SPInfo.SubprogramDIE
+ : InlinedSubroutineDIEs[J->second];
}
void
@@ -466,3 +815,89 @@ const DWARFAbbreviationDeclarationSet *DWARFUnit::getAbbreviations() const {
Abbrevs = Abbrev->getAbbreviationDeclarationSet(AbbrOffset);
return Abbrevs;
}
+
+Optional<StrOffsetsContributionDescriptor>
+StrOffsetsContributionDescriptor::validateContributionSize(
+ DWARFDataExtractor &DA) {
+ uint8_t EntrySize = getDwarfOffsetByteSize();
+ // In order to ensure that we don't read a partial record at the end of
+ // the section we validate for a multiple of the entry size.
+ uint64_t ValidationSize = alignTo(Size, EntrySize);
+ // Guard against overflow.
+ if (ValidationSize >= Size)
+ if (DA.isValidOffsetForDataOfSize((uint32_t)Base, ValidationSize))
+ return *this;
+ return Optional<StrOffsetsContributionDescriptor>();
+}
+
+// Look for a DWARF64-formatted contribution to the string offsets table
+// starting at a given offset and record it in a descriptor.
+static Optional<StrOffsetsContributionDescriptor>
+parseDWARF64StringOffsetsTableHeader(DWARFDataExtractor &DA, uint32_t Offset) {
+ if (!DA.isValidOffsetForDataOfSize(Offset, 16))
+ return Optional<StrOffsetsContributionDescriptor>();
+
+ if (DA.getU32(&Offset) != 0xffffffff)
+ return Optional<StrOffsetsContributionDescriptor>();
+
+ uint64_t Size = DA.getU64(&Offset);
+ uint8_t Version = DA.getU16(&Offset);
+ (void)DA.getU16(&Offset); // padding
+ return StrOffsetsContributionDescriptor(Offset, Size, Version, DWARF64);
+ //return Optional<StrOffsetsContributionDescriptor>(Descriptor);
+}
+
+// Look for a DWARF32-formatted contribution to the string offsets table
+// starting at a given offset and record it in a descriptor.
+static Optional<StrOffsetsContributionDescriptor>
+parseDWARF32StringOffsetsTableHeader(DWARFDataExtractor &DA, uint32_t Offset) {
+ if (!DA.isValidOffsetForDataOfSize(Offset, 8))
+ return Optional<StrOffsetsContributionDescriptor>();
+ uint32_t ContributionSize = DA.getU32(&Offset);
+ if (ContributionSize >= 0xfffffff0)
+ return Optional<StrOffsetsContributionDescriptor>();
+ uint8_t Version = DA.getU16(&Offset);
+ (void)DA.getU16(&Offset); // padding
+ return StrOffsetsContributionDescriptor(Offset, ContributionSize, Version, DWARF32);
+ //return Optional<StrOffsetsContributionDescriptor>(Descriptor);
+}
+
+Optional<StrOffsetsContributionDescriptor>
+DWARFUnit::determineStringOffsetsTableContribution(DWARFDataExtractor &DA,
+ uint64_t Offset) {
+ Optional<StrOffsetsContributionDescriptor> Descriptor;
+ // Attempt to find a DWARF64 contribution 16 bytes before the base.
+ if (Offset >= 16)
+ Descriptor =
+ parseDWARF64StringOffsetsTableHeader(DA, (uint32_t)Offset - 16);
+ // Try to find a DWARF32 contribution 8 bytes before the base.
+ if (!Descriptor && Offset >= 8)
+ Descriptor = parseDWARF32StringOffsetsTableHeader(DA, (uint32_t)Offset - 8);
+ return Descriptor ? Descriptor->validateContributionSize(DA) : Descriptor;
+}
+
+Optional<StrOffsetsContributionDescriptor>
+DWARFUnit::determineStringOffsetsTableContributionDWO(DWARFDataExtractor &DA,
+ uint64_t Offset) {
+ if (getVersion() >= 5) {
+ // Look for a valid contribution at the given offset.
+ auto Descriptor =
+ parseDWARF64StringOffsetsTableHeader(DA, (uint32_t)Offset);
+ if (!Descriptor)
+ Descriptor = parseDWARF32StringOffsetsTableHeader(DA, (uint32_t)Offset);
+ return Descriptor ? Descriptor->validateContributionSize(DA) : Descriptor;
+ }
+ // Prior to DWARF v5, we derive the contribution size from the
+ // index table (in a package file). In a .dwo file it is simply
+ // the length of the string offsets section.
+ uint64_t Size = 0;
+ if (!IndexEntry)
+ Size = StringOffsetSection.Data.size();
+ else if (const auto *C = IndexEntry->getOffset(DW_SECT_STR_OFFSETS))
+ Size = C->Length;
+ // Return a descriptor with the given offset as base, version 4 and
+ // DWARF32 format.
+ //return Optional<StrOffsetsContributionDescriptor>(
+ //StrOffsetsContributionDescriptor(Offset, Size, 4, DWARF32));
+ return StrOffsetsContributionDescriptor(Offset, Size, 4, DWARF32);
+}