summaryrefslogtreecommitdiff
path: root/lib/DebugInfo
diff options
context:
space:
mode:
authorDimitry Andric <dim@FreeBSD.org>2017-05-29 16:25:25 +0000
committerDimitry Andric <dim@FreeBSD.org>2017-05-29 16:25:25 +0000
commitab44ce3d598882e51a25eb82eb7ae6308de85ae6 (patch)
tree568d786a59d49bef961dcb9bd09d422701b9da5b /lib/DebugInfo
parentb5630dbadf9a2a06754194387d6b0fd9962a67f1 (diff)
Diffstat (limited to 'lib/DebugInfo')
-rw-r--r--lib/DebugInfo/CodeView/CMakeLists.txt1
-rw-r--r--lib/DebugInfo/CodeView/CVTypeVisitor.cpp20
-rw-r--r--lib/DebugInfo/CodeView/TypeIndexDiscovery.cpp371
-rw-r--r--lib/DebugInfo/CodeView/TypeSerializer.cpp230
-rw-r--r--lib/DebugInfo/CodeView/TypeStreamMerger.cpp428
-rw-r--r--lib/DebugInfo/CodeView/TypeTableCollection.cpp3
-rw-r--r--lib/DebugInfo/DWARF/DWARFContext.cpp119
-rw-r--r--lib/DebugInfo/DWARF/DWARFDebugRangeList.cpp8
-rw-r--r--lib/DebugInfo/DWARF/DWARFDie.cpp13
-rw-r--r--lib/DebugInfo/DWARF/DWARFFormValue.cpp4
-rw-r--r--lib/DebugInfo/DWARF/DWARFUnit.cpp39
-rw-r--r--lib/DebugInfo/MSF/MappedBlockStream.cpp32
-rw-r--r--lib/DebugInfo/PDB/Native/DbiStreamBuilder.cpp25
-rw-r--r--lib/DebugInfo/PDB/Native/PDBTypeServerHandler.cpp19
-rw-r--r--lib/DebugInfo/PDB/Native/TpiStream.cpp4
15 files changed, 857 insertions, 459 deletions
diff --git a/lib/DebugInfo/CodeView/CMakeLists.txt b/lib/DebugInfo/CodeView/CMakeLists.txt
index 556ebf78622f..90193d07b95d 100644
--- a/lib/DebugInfo/CodeView/CMakeLists.txt
+++ b/lib/DebugInfo/CodeView/CMakeLists.txt
@@ -22,6 +22,7 @@ add_llvm_library(LLVMDebugInfoCodeView
TypeDatabaseVisitor.cpp
TypeDumpVisitor.cpp
TypeIndex.cpp
+ TypeIndexDiscovery.cpp
TypeRecordMapping.cpp
TypeSerializer.cpp
TypeStreamMerger.cpp
diff --git a/lib/DebugInfo/CodeView/CVTypeVisitor.cpp b/lib/DebugInfo/CodeView/CVTypeVisitor.cpp
index f95c3e79388e..705b548141b0 100644
--- a/lib/DebugInfo/CodeView/CVTypeVisitor.cpp
+++ b/lib/DebugInfo/CodeView/CVTypeVisitor.cpp
@@ -45,24 +45,9 @@ static Error visitKnownMember(CVMemberRecord &Record,
}
static Expected<TypeServer2Record> deserializeTypeServerRecord(CVType &Record) {
- class StealTypeServerVisitor : public TypeVisitorCallbacks {
- public:
- explicit StealTypeServerVisitor(TypeServer2Record &TR) : TR(TR) {}
-
- Error visitKnownRecord(CVType &CVR, TypeServer2Record &Record) override {
- TR = Record;
- return Error::success();
- }
-
- private:
- TypeServer2Record &TR;
- };
-
TypeServer2Record R(TypeRecordKind::TypeServer2);
- StealTypeServerVisitor Thief(R);
- if (auto EC = visitTypeRecord(Record, Thief))
+ if (auto EC = TypeDeserializer::deserializeAs(Record, R))
return std::move(EC);
-
return R;
}
@@ -308,8 +293,9 @@ Error llvm::codeview::visitTypeRecord(CVType &Record,
Error llvm::codeview::visitTypeStream(const CVTypeArray &Types,
TypeVisitorCallbacks &Callbacks,
+ VisitorDataSource Source,
TypeServerHandler *TS) {
- VisitHelper V(Callbacks, VDS_BytesPresent);
+ VisitHelper V(Callbacks, Source);
if (TS)
V.Visitor.addTypeServerHandler(*TS);
return V.Visitor.visitTypeStream(Types);
diff --git a/lib/DebugInfo/CodeView/TypeIndexDiscovery.cpp b/lib/DebugInfo/CodeView/TypeIndexDiscovery.cpp
new file mode 100644
index 000000000000..11e2e215303c
--- /dev/null
+++ b/lib/DebugInfo/CodeView/TypeIndexDiscovery.cpp
@@ -0,0 +1,371 @@
+//===- TypeIndexDiscovery.cpp -----------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+#include "llvm/DebugInfo/CodeView/TypeIndexDiscovery.h"
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/Support/Endian.h"
+
+using namespace llvm;
+using namespace llvm::codeview;
+
+static inline MethodKind getMethodKind(uint16_t Attrs) {
+ Attrs &= uint16_t(MethodOptions::MethodKindMask);
+ Attrs >>= 2;
+ return MethodKind(Attrs);
+}
+
+static inline bool isIntroVirtual(uint16_t Attrs) {
+ MethodKind MK = getMethodKind(Attrs);
+ return MK == MethodKind::IntroducingVirtual ||
+ MK == MethodKind::PureIntroducingVirtual;
+}
+
+static inline PointerMode getPointerMode(uint32_t Attrs) {
+ return static_cast<PointerMode>((Attrs >> PointerRecord::PointerModeShift) &
+ PointerRecord::PointerModeMask);
+}
+
+static inline bool isMemberPointer(uint32_t Attrs) {
+ PointerMode Mode = getPointerMode(Attrs);
+ return Mode == PointerMode::PointerToDataMember ||
+ Mode == PointerMode::PointerToDataMember;
+}
+
+static inline uint32_t getEncodedIntegerLength(ArrayRef<uint8_t> Data) {
+ uint16_t N = support::endian::read16le(Data.data());
+ if (N < LF_NUMERIC)
+ return 2;
+
+ assert(N <= LF_UQUADWORD);
+
+ constexpr uint32_t Sizes[] = {
+ 1, // LF_CHAR
+ 2, // LF_SHORT
+ 2, // LF_USHORT
+ 4, // LF_LONG
+ 4, // LF_ULONG
+ 4, // LF_REAL32
+ 8, // LF_REAL64
+ 10, // LF_REAL80
+ 16, // LF_REAL128
+ 8, // LF_QUADWORD
+ 8, // LF_UQUADWORD
+ };
+
+ return Sizes[N - LF_NUMERIC];
+}
+
+static inline uint32_t getCStringLength(ArrayRef<uint8_t> Data) {
+ const char *S = reinterpret_cast<const char *>(Data.data());
+ return strlen(S) + 1;
+}
+
+static void handleMethodOverloadList(ArrayRef<uint8_t> Content,
+ SmallVectorImpl<TiReference> &Refs) {
+ uint32_t Offset = 0;
+
+ while (!Content.empty()) {
+ // Array of:
+ // 0: Attrs
+ // 2: Padding
+ // 4: TypeIndex
+ // if (isIntroVirtual())
+ // 8: VFTableOffset
+
+ // At least 8 bytes are guaranteed. 4 extra bytes come iff function is an
+ // intro virtual.
+ uint32_t Len = 8;
+
+ uint16_t Attrs = support::endian::read16le(Content.data());
+ Refs.push_back({TiRefKind::TypeRef, Offset + 4, 1});
+
+ if (LLVM_UNLIKELY(isIntroVirtual(Attrs)))
+ Len += 4;
+ Offset += Len;
+ Content = Content.drop_front(Len);
+ }
+}
+
+static uint32_t handleBaseClass(ArrayRef<uint8_t> Data, uint32_t Offset,
+ SmallVectorImpl<TiReference> &Refs) {
+ // 0: Kind
+ // 2: Padding
+ // 4: TypeIndex
+ // 8: Encoded Integer
+ Refs.push_back({TiRefKind::TypeRef, Offset + 4, 1});
+ return 8 + getEncodedIntegerLength(Data.drop_front(8));
+}
+
+static uint32_t handleEnumerator(ArrayRef<uint8_t> Data, uint32_t Offset,
+ SmallVectorImpl<TiReference> &Refs) {
+ // 0: Kind
+ // 2: Padding
+ // 4: Encoded Integer
+ // <next>: Name
+ uint32_t Size = 4 + getEncodedIntegerLength(Data.drop_front(4));
+ return Size + getCStringLength(Data.drop_front(Size));
+}
+
+static uint32_t handleDataMember(ArrayRef<uint8_t> Data, uint32_t Offset,
+ SmallVectorImpl<TiReference> &Refs) {
+ // 0: Kind
+ // 2: Padding
+ // 4: TypeIndex
+ // 8: Encoded Integer
+ // <next>: Name
+ Refs.push_back({TiRefKind::TypeRef, Offset + 4, 1});
+ uint32_t Size = 8 + getEncodedIntegerLength(Data.drop_front(8));
+ return Size + getCStringLength(Data.drop_front(Size));
+}
+
+static uint32_t handleOverloadedMethod(ArrayRef<uint8_t> Data, uint32_t Offset,
+ SmallVectorImpl<TiReference> &Refs) {
+ // 0: Kind
+ // 2: Padding
+ // 4: TypeIndex
+ // 8: Name
+ Refs.push_back({TiRefKind::TypeRef, Offset + 4, 1});
+ return 8 + getCStringLength(Data.drop_front(8));
+}
+
+static uint32_t handleOneMethod(ArrayRef<uint8_t> Data, uint32_t Offset,
+ SmallVectorImpl<TiReference> &Refs) {
+ // 0: Kind
+ // 2: Attributes
+ // 4: Type
+ // if (isIntroVirtual)
+ // 8: VFTableOffset
+ // <next>: Name
+ uint32_t Size = 8;
+ Refs.push_back({TiRefKind::TypeRef, Offset + 4, 1});
+
+ uint16_t Attrs = support::endian::read16le(Data.drop_front(2).data());
+ if (LLVM_UNLIKELY(isIntroVirtual(Attrs)))
+ Size += 4;
+
+ return Size + getCStringLength(Data.drop_front(Size));
+}
+
+static uint32_t handleNestedType(ArrayRef<uint8_t> Data, uint32_t Offset,
+ SmallVectorImpl<TiReference> &Refs) {
+ // 0: Kind
+ // 2: Padding
+ // 4: TypeIndex
+ // 8: Name
+ Refs.push_back({TiRefKind::TypeRef, Offset + 4, 1});
+ return 8 + getCStringLength(Data.drop_front(8));
+}
+
+static uint32_t handleStaticDataMember(ArrayRef<uint8_t> Data, uint32_t Offset,
+ SmallVectorImpl<TiReference> &Refs) {
+ // 0: Kind
+ // 2: Padding
+ // 4: TypeIndex
+ // 8: Name
+ Refs.push_back({TiRefKind::TypeRef, Offset + 4, 1});
+ return 8 + getCStringLength(Data.drop_front(8));
+}
+
+static uint32_t handleVirtualBaseClass(ArrayRef<uint8_t> Data, uint32_t Offset,
+ bool IsIndirect,
+ SmallVectorImpl<TiReference> &Refs) {
+ // 0: Kind
+ // 2: Attrs
+ // 4: TypeIndex
+ // 8: TypeIndex
+ // 12: Encoded Integer
+ // <next>: Encoded Integer
+ uint32_t Size = 12;
+ Refs.push_back({TiRefKind::TypeRef, Offset + 4, 2});
+ Size += getEncodedIntegerLength(Data.drop_front(Size));
+ Size += getEncodedIntegerLength(Data.drop_front(Size));
+ return Size;
+}
+
+static uint32_t handleVFPtr(ArrayRef<uint8_t> Data, uint32_t Offset,
+ SmallVectorImpl<TiReference> &Refs) {
+ // 0: Kind
+ // 2: Padding
+ // 4: TypeIndex
+ Refs.push_back({TiRefKind::TypeRef, Offset + 4, 1});
+ return 8;
+}
+
+static uint32_t handleListContinuation(ArrayRef<uint8_t> Data, uint32_t Offset,
+ SmallVectorImpl<TiReference> &Refs) {
+ // 0: Kind
+ // 2: Padding
+ // 4: TypeIndex
+ Refs.push_back({TiRefKind::TypeRef, Offset + 4, 1});
+ return 8;
+}
+
+static void handleFieldList(ArrayRef<uint8_t> Content,
+ SmallVectorImpl<TiReference> &Refs) {
+ uint32_t Offset = 0;
+ uint32_t ThisLen = 0;
+ while (!Content.empty()) {
+ TypeLeafKind Kind =
+ static_cast<TypeLeafKind>(support::endian::read16le(Content.data()));
+ switch (Kind) {
+ case LF_BCLASS:
+ ThisLen = handleBaseClass(Content, Offset, Refs);
+ break;
+ case LF_ENUMERATE:
+ ThisLen = handleEnumerator(Content, Offset, Refs);
+ break;
+ case LF_MEMBER:
+ ThisLen = handleDataMember(Content, Offset, Refs);
+ break;
+ case LF_METHOD:
+ ThisLen = handleOverloadedMethod(Content, Offset, Refs);
+ break;
+ case LF_ONEMETHOD:
+ ThisLen = handleOneMethod(Content, Offset, Refs);
+ break;
+ case LF_NESTTYPE:
+ ThisLen = handleNestedType(Content, Offset, Refs);
+ break;
+ case LF_STMEMBER:
+ ThisLen = handleStaticDataMember(Content, Offset, Refs);
+ break;
+ case LF_VBCLASS:
+ case LF_IVBCLASS:
+ ThisLen =
+ handleVirtualBaseClass(Content, Offset, Kind == LF_VBCLASS, Refs);
+ break;
+ case LF_VFUNCTAB:
+ ThisLen = handleVFPtr(Content, Offset, Refs);
+ break;
+ case LF_INDEX:
+ ThisLen = handleListContinuation(Content, Offset, Refs);
+ break;
+ default:
+ return;
+ }
+ Content = Content.drop_front(ThisLen);
+ Offset += ThisLen;
+ if (!Content.empty()) {
+ uint8_t Pad = Content.front();
+ if (Pad >= LF_PAD0) {
+ uint32_t Skip = Pad & 0x0F;
+ Content = Content.drop_front(Skip);
+ Offset += Skip;
+ }
+ }
+ }
+}
+
+static void handlePointer(ArrayRef<uint8_t> Content,
+ SmallVectorImpl<TiReference> &Refs) {
+ Refs.push_back({TiRefKind::TypeRef, 0, 1});
+
+ uint32_t Attrs = support::endian::read32le(Content.drop_front(4).data());
+ if (isMemberPointer(Attrs))
+ Refs.push_back({TiRefKind::TypeRef, 8, 1});
+}
+
+static void discoverTypeIndices(ArrayRef<uint8_t> Content, TypeLeafKind Kind,
+ SmallVectorImpl<TiReference> &Refs) {
+ uint32_t Count;
+ // FIXME: In the future it would be nice if we could avoid hardcoding these
+ // values. One idea is to define some structures representing these types
+ // that would allow the use of offsetof().
+ switch (Kind) {
+ case TypeLeafKind::LF_FUNC_ID:
+ Refs.push_back({TiRefKind::IndexRef, 0, 1});
+ Refs.push_back({TiRefKind::TypeRef, 4, 1});
+ break;
+ case TypeLeafKind::LF_MFUNC_ID:
+ Refs.push_back({TiRefKind::TypeRef, 0, 2});
+ break;
+ case TypeLeafKind::LF_STRING_ID:
+ Refs.push_back({TiRefKind::IndexRef, 0, 1});
+ break;
+ case TypeLeafKind::LF_SUBSTR_LIST:
+ Count = support::endian::read32le(Content.data());
+ if (Count > 0)
+ Refs.push_back({TiRefKind::IndexRef, 4, Count});
+ break;
+ case TypeLeafKind::LF_BUILDINFO:
+ Count = support::endian::read16le(Content.data());
+ if (Count > 0)
+ Refs.push_back({TiRefKind::IndexRef, 2, Count});
+ break;
+ case TypeLeafKind::LF_UDT_SRC_LINE:
+ Refs.push_back({TiRefKind::TypeRef, 0, 1});
+ Refs.push_back({TiRefKind::IndexRef, 4, 1});
+ break;
+ case TypeLeafKind::LF_UDT_MOD_SRC_LINE:
+ Refs.push_back({TiRefKind::TypeRef, 0, 1});
+ break;
+ case TypeLeafKind::LF_MODIFIER:
+ Refs.push_back({TiRefKind::TypeRef, 0, 1});
+ break;
+ case TypeLeafKind::LF_PROCEDURE:
+ Refs.push_back({TiRefKind::TypeRef, 0, 1});
+ Refs.push_back({TiRefKind::TypeRef, 8, 1});
+ break;
+ case TypeLeafKind::LF_MFUNCTION:
+ Refs.push_back({TiRefKind::TypeRef, 0, 3});
+ Refs.push_back({TiRefKind::TypeRef, 16, 1});
+ break;
+ case TypeLeafKind::LF_ARGLIST:
+ Count = support::endian::read32le(Content.data());
+ if (Count > 0)
+ Refs.push_back({TiRefKind::TypeRef, 4, Count});
+ break;
+ case TypeLeafKind::LF_ARRAY:
+ Refs.push_back({TiRefKind::TypeRef, 0, 2});
+ break;
+ case TypeLeafKind::LF_CLASS:
+ case TypeLeafKind::LF_STRUCTURE:
+ case TypeLeafKind::LF_INTERFACE:
+ Refs.push_back({TiRefKind::TypeRef, 4, 3});
+ break;
+ case TypeLeafKind::LF_UNION:
+ Refs.push_back({TiRefKind::TypeRef, 4, 1});
+ break;
+ case TypeLeafKind::LF_ENUM:
+ Refs.push_back({TiRefKind::TypeRef, 4, 2});
+ break;
+ case TypeLeafKind::LF_BITFIELD:
+ Refs.push_back({TiRefKind::TypeRef, 0, 1});
+ break;
+ case TypeLeafKind::LF_VFTABLE:
+ Refs.push_back({TiRefKind::TypeRef, 0, 2});
+ break;
+ case TypeLeafKind::LF_VTSHAPE:
+ break;
+ case TypeLeafKind::LF_METHODLIST:
+ handleMethodOverloadList(Content, Refs);
+ break;
+ case TypeLeafKind::LF_FIELDLIST:
+ handleFieldList(Content, Refs);
+ break;
+ case TypeLeafKind::LF_POINTER:
+ handlePointer(Content, Refs);
+ break;
+ default:
+ break;
+ }
+}
+
+void llvm::codeview::discoverTypeIndices(const CVType &Type,
+ SmallVectorImpl<TiReference> &Refs) {
+ ::discoverTypeIndices(Type.content(), Type.kind(), Refs);
+}
+
+void llvm::codeview::discoverTypeIndices(ArrayRef<uint8_t> RecordData,
+ SmallVectorImpl<TiReference> &Refs) {
+ const RecordPrefix *P =
+ reinterpret_cast<const RecordPrefix *>(RecordData.data());
+ TypeLeafKind K = static_cast<TypeLeafKind>(uint16_t(P->RecordKind));
+ ::discoverTypeIndices(RecordData.drop_front(sizeof(RecordPrefix)), K, Refs);
+}
diff --git a/lib/DebugInfo/CodeView/TypeSerializer.cpp b/lib/DebugInfo/CodeView/TypeSerializer.cpp
index 3b061e67e05e..93c1198e36ce 100644
--- a/lib/DebugInfo/CodeView/TypeSerializer.cpp
+++ b/lib/DebugInfo/CodeView/TypeSerializer.cpp
@@ -9,6 +9,7 @@
#include "llvm/DebugInfo/CodeView/TypeSerializer.h"
+#include "llvm/ADT/DenseSet.h"
#include "llvm/Support/BinaryStreamWriter.h"
#include <string.h>
@@ -16,21 +17,109 @@
using namespace llvm;
using namespace llvm::codeview;
-bool TypeSerializer::isInFieldList() const {
- return TypeKind.hasValue() && *TypeKind == TypeLeafKind::LF_FIELDLIST;
+namespace {
+struct HashedType {
+ uint64_t Hash;
+ const uint8_t *Data;
+ unsigned Size; // FIXME: Go to uint16_t?
+ TypeIndex Index;
+};
+
+/// Wrapper around a poitner to a HashedType. Hash and equality operations are
+/// based on data in the pointee.
+struct HashedTypePtr {
+ HashedTypePtr() = default;
+ HashedTypePtr(HashedType *Ptr) : Ptr(Ptr) {}
+ HashedType *Ptr = nullptr;
+};
+} // namespace
+
+namespace llvm {
+template <> struct DenseMapInfo<HashedTypePtr> {
+ static inline HashedTypePtr getEmptyKey() { return HashedTypePtr(nullptr); }
+ static inline HashedTypePtr getTombstoneKey() {
+ return HashedTypePtr(reinterpret_cast<HashedType *>(1));
+ }
+ static unsigned getHashValue(HashedTypePtr Val) {
+ assert(Val.Ptr != getEmptyKey().Ptr && Val.Ptr != getTombstoneKey().Ptr);
+ return Val.Ptr->Hash;
+ }
+ static bool isEqual(HashedTypePtr LHSP, HashedTypePtr RHSP) {
+ HashedType *LHS = LHSP.Ptr;
+ HashedType *RHS = RHSP.Ptr;
+ if (RHS == getEmptyKey().Ptr || RHS == getTombstoneKey().Ptr)
+ return LHS == RHS;
+ if (LHS->Hash != RHS->Hash || LHS->Size != RHS->Size)
+ return false;
+ return ::memcmp(LHS->Data, RHS->Data, LHS->Size) == 0;
+ }
+};
}
-TypeIndex TypeSerializer::calcNextTypeIndex() const {
- if (LastTypeIndex.isNoneType())
- return TypeIndex(TypeIndex::FirstNonSimpleIndex);
- else
- return TypeIndex(LastTypeIndex.getIndex() + 1);
+/// Private implementation so that we don't leak our DenseMap instantiations to
+/// users.
+class llvm::codeview::TypeHasher {
+private:
+ /// Storage for type record provided by the caller. Records will outlive the
+ /// hasher object, so they should be allocated here.
+ BumpPtrAllocator &RecordStorage;
+
+ /// Storage for hash keys. These only need to live as long as the hashing
+ /// operation.
+ BumpPtrAllocator KeyStorage;
+
+ /// Hash table. We really want a DenseMap<ArrayRef<uint8_t>, TypeIndex> here,
+ /// but DenseMap is inefficient when the keys are long (like type records)
+ /// because it recomputes the hash value of every key when it grows. This
+ /// value type stores the hash out of line in KeyStorage, so that table
+ /// entries are small and easy to rehash.
+ DenseSet<HashedTypePtr> HashedRecords;
+
+public:
+ TypeHasher(BumpPtrAllocator &RecordStorage) : RecordStorage(RecordStorage) {}
+
+ void reset() { HashedRecords.clear(); }
+
+ /// Takes the bytes of type record, inserts them into the hash table, saves
+ /// them, and returns a pointer to an identical stable type record along with
+ /// its type index in the destination stream.
+ TypeIndex getOrCreateRecord(ArrayRef<uint8_t> &Record, TypeIndex TI);
+};
+
+TypeIndex TypeHasher::getOrCreateRecord(ArrayRef<uint8_t> &Record,
+ TypeIndex TI) {
+ assert(Record.size() < UINT32_MAX && "Record too big");
+ assert(Record.size() % 4 == 0 && "Record is not aligned to 4 bytes!");
+
+ // Compute the hash up front so we can store it in the key.
+ HashedType TempHashedType = {hash_value(Record), Record.data(),
+ unsigned(Record.size()), TI};
+ auto Result = HashedRecords.insert(HashedTypePtr(&TempHashedType));
+ HashedType *&Hashed = Result.first->Ptr;
+
+ if (Result.second) {
+ // This was a new type record. We need stable storage for both the key and
+ // the record. The record should outlive the hashing operation.
+ Hashed = KeyStorage.Allocate<HashedType>();
+ *Hashed = TempHashedType;
+
+ uint8_t *Stable = RecordStorage.Allocate<uint8_t>(Record.size());
+ memcpy(Stable, Record.data(), Record.size());
+ Hashed->Data = Stable;
+ assert(Hashed->Size == Record.size());
+ }
+
+ // Update the caller's copy of Record to point a stable copy.
+ Record = ArrayRef<uint8_t>(Hashed->Data, Hashed->Size);
+ return Hashed->Index;
}
-TypeIndex TypeSerializer::incrementTypeIndex() {
- TypeIndex Previous = LastTypeIndex;
- LastTypeIndex = calcNextTypeIndex();
- return Previous;
+TypeIndex TypeSerializer::nextTypeIndex() const {
+ return TypeIndex::fromArrayIndex(SeenRecords.size());
+}
+
+bool TypeSerializer::isInFieldList() const {
+ return TypeKind.hasValue() && *TypeKind == TypeLeafKind::LF_FIELDLIST;
}
MutableArrayRef<uint8_t> TypeSerializer::getCurrentSubRecordData() {
@@ -51,46 +140,6 @@ Error TypeSerializer::writeRecordPrefix(TypeLeafKind Kind) {
return Error::success();
}
-TypeIndex
-TypeSerializer::insertRecordBytesPrivate(MutableArrayRef<uint8_t> Record) {
- assert(Record.size() % 4 == 0 && "Record is not aligned to 4 bytes!");
-
- StringRef S(reinterpret_cast<const char *>(Record.data()), Record.size());
-
- TypeIndex NextTypeIndex = calcNextTypeIndex();
- auto Result = HashedRecords.try_emplace(S, NextTypeIndex);
- if (Result.second) {
- LastTypeIndex = NextTypeIndex;
- SeenRecords.push_back(Record);
- }
- return Result.first->getValue();
-}
-
-TypeIndex
-TypeSerializer::insertRecordBytesWithCopy(CVType &Record,
- MutableArrayRef<uint8_t> Data) {
- assert(Data.size() % 4 == 0 && "Record is not aligned to 4 bytes!");
-
- StringRef S(reinterpret_cast<const char *>(Data.data()), Data.size());
-
- // Do a two state lookup / insert so that we don't have to allocate unless
- // we're going
- // to do an insert. This is a big memory savings.
- auto Iter = HashedRecords.find(S);
- if (Iter != HashedRecords.end())
- return Iter->second;
-
- LastTypeIndex = calcNextTypeIndex();
- uint8_t *Copy = RecordStorage.Allocate<uint8_t>(Data.size());
- ::memcpy(Copy, Data.data(), Data.size());
- Data = MutableArrayRef<uint8_t>(Copy, Data.size());
- S = StringRef(reinterpret_cast<const char *>(Data.data()), Data.size());
- HashedRecords.insert(std::make_pair(S, LastTypeIndex));
- SeenRecords.push_back(Data);
- Record.RecordData = Data;
- return LastTypeIndex;
-}
-
Expected<MutableArrayRef<uint8_t>>
TypeSerializer::addPadding(MutableArrayRef<uint8_t> Record) {
uint32_t Align = Record.size() % 4;
@@ -108,27 +157,79 @@ TypeSerializer::addPadding(MutableArrayRef<uint8_t> Record) {
return MutableArrayRef<uint8_t>(Record.data(), Record.size() + N);
}
-TypeSerializer::TypeSerializer(BumpPtrAllocator &Storage)
- : RecordStorage(Storage), LastTypeIndex(),
- RecordBuffer(MaxRecordLength * 2),
+TypeSerializer::TypeSerializer(BumpPtrAllocator &Storage, bool Hash)
+ : RecordStorage(Storage), RecordBuffer(MaxRecordLength * 2),
Stream(RecordBuffer, llvm::support::little), Writer(Stream),
Mapping(Writer) {
// RecordBuffer needs to be able to hold enough data so that if we are 1
// byte short of MaxRecordLen, and then we try to write MaxRecordLen bytes,
// we won't overflow.
+ if (Hash)
+ Hasher = make_unique<TypeHasher>(Storage);
}
-ArrayRef<MutableArrayRef<uint8_t>> TypeSerializer::records() const {
+TypeSerializer::~TypeSerializer() = default;
+
+ArrayRef<ArrayRef<uint8_t>> TypeSerializer::records() const {
return SeenRecords;
}
-TypeIndex TypeSerializer::getLastTypeIndex() const { return LastTypeIndex; }
+void TypeSerializer::reset() {
+ if (Hasher)
+ Hasher->reset();
+ Writer.setOffset(0);
+ CurrentSegment = RecordSegment();
+ FieldListSegments.clear();
+ TypeKind.reset();
+ MemberKind.reset();
+ SeenRecords.clear();
+}
-TypeIndex TypeSerializer::insertRecordBytes(MutableArrayRef<uint8_t> Record) {
+TypeIndex TypeSerializer::insertRecordBytes(ArrayRef<uint8_t> &Record) {
assert(!TypeKind.hasValue() && "Already in a type mapping!");
assert(Writer.getOffset() == 0 && "Stream has data already!");
- return insertRecordBytesPrivate(Record);
+ if (Hasher) {
+ TypeIndex ActualTI = Hasher->getOrCreateRecord(Record, nextTypeIndex());
+ if (nextTypeIndex() == ActualTI)
+ SeenRecords.push_back(Record);
+ return ActualTI;
+ }
+
+ TypeIndex NewTI = nextTypeIndex();
+ uint8_t *Stable = RecordStorage.Allocate<uint8_t>(Record.size());
+ memcpy(Stable, Record.data(), Record.size());
+ Record = ArrayRef<uint8_t>(Stable, Record.size());
+ SeenRecords.push_back(Record);
+ return NewTI;
+}
+
+TypeIndex TypeSerializer::insertRecord(const RemappedType &Record) {
+ assert(!TypeKind.hasValue() && "Already in a type mapping!");
+ assert(Writer.getOffset() == 0 && "Stream has data already!");
+
+ TypeIndex TI;
+ ArrayRef<uint8_t> OriginalData = Record.OriginalRecord.RecordData;
+ if (Record.Mappings.empty()) {
+ // This record did not remap any type indices. Just write it.
+ return insertRecordBytes(OriginalData);
+ }
+
+ // At least one type index was remapped. Before we can hash it we have to
+ // copy the full record bytes, re-write each type index, then hash the copy.
+ // We do this in temporary storage since only the DenseMap can decide whether
+ // this record already exists, and if it does we don't want the memory to
+ // stick around.
+ RemapStorage.resize(OriginalData.size());
+ ::memcpy(&RemapStorage[0], OriginalData.data(), OriginalData.size());
+ uint8_t *ContentBegin = RemapStorage.data() + sizeof(RecordPrefix);
+ for (const auto &M : Record.Mappings) {
+ // First 4 bytes of every record are the record prefix, but the mapping
+ // offset is relative to the content which starts after.
+ *(TypeIndex *)(ContentBegin + M.first) = M.second;
+ }
+ auto RemapRef = makeArrayRef(RemapStorage);
+ return insertRecordBytes(RemapRef);
}
Error TypeSerializer::visitTypeBegin(CVType &Record) {
@@ -163,8 +264,13 @@ Expected<TypeIndex> TypeSerializer::visitTypeEndGetIndex(CVType &Record) {
Prefix->RecordLen = ThisRecordData.size() - sizeof(uint16_t);
Record.Type = *TypeKind;
- TypeIndex InsertedTypeIndex =
- insertRecordBytesWithCopy(Record, ThisRecordData);
+ Record.RecordData = ThisRecordData;
+
+ // insertRecordBytes assumes we're not in a mapping, so do this first.
+ TypeKind.reset();
+ Writer.setOffset(0);
+
+ TypeIndex InsertedTypeIndex = insertRecordBytes(Record.RecordData);
// Write out each additional segment in reverse order, and update each
// record's continuation index to point to the previous one.
@@ -174,11 +280,9 @@ Expected<TypeIndex> TypeSerializer::visitTypeEndGetIndex(CVType &Record) {
reinterpret_cast<support::ulittle32_t *>(CIBytes.data());
assert(*CI == 0xB0C0B0C0 && "Invalid TypeIndex placeholder");
*CI = InsertedTypeIndex.getIndex();
- InsertedTypeIndex = insertRecordBytesPrivate(X);
+ InsertedTypeIndex = insertRecordBytes(X);
}
- TypeKind.reset();
- Writer.setOffset(0);
FieldListSegments.clear();
CurrentSegment.SubRecords.clear();
diff --git a/lib/DebugInfo/CodeView/TypeStreamMerger.cpp b/lib/DebugInfo/CodeView/TypeStreamMerger.cpp
index 46747f8eab99..71a0966df036 100644
--- a/lib/DebugInfo/CodeView/TypeStreamMerger.cpp
+++ b/lib/DebugInfo/CodeView/TypeStreamMerger.cpp
@@ -11,7 +11,9 @@
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/DebugInfo/CodeView/CVTypeVisitor.h"
+#include "llvm/DebugInfo/CodeView/TypeDeserializer.h"
#include "llvm/DebugInfo/CodeView/TypeIndex.h"
+#include "llvm/DebugInfo/CodeView/TypeIndexDiscovery.h"
#include "llvm/DebugInfo/CodeView/TypeRecord.h"
#include "llvm/DebugInfo/CodeView/TypeTableBuilder.h"
#include "llvm/DebugInfo/CodeView/TypeVisitorCallbacks.h"
@@ -57,37 +59,56 @@ namespace {
/// looking at the record kind.
class TypeStreamMerger : public TypeVisitorCallbacks {
public:
- TypeStreamMerger(TypeTableBuilder &DestIdStream,
- TypeTableBuilder &DestTypeStream,
- SmallVectorImpl<TypeIndex> &SourceToDest,
- TypeServerHandler *Handler)
- : DestIdStream(DestIdStream), DestTypeStream(DestTypeStream),
- FieldListBuilder(DestTypeStream), Handler(Handler),
- IndexMap(SourceToDest) {}
+ explicit TypeStreamMerger(SmallVectorImpl<TypeIndex> &SourceToDest,
+ TypeServerHandler *Handler)
+ : Handler(Handler), IndexMap(SourceToDest) {
+ SourceToDest.clear();
+ }
static const TypeIndex Untranslated;
-/// TypeVisitorCallbacks overrides.
-#define TYPE_RECORD(EnumName, EnumVal, Name) \
- Error visitKnownRecord(CVType &CVR, Name##Record &Record) override;
-#define MEMBER_RECORD(EnumName, EnumVal, Name) \
- Error visitKnownMember(CVMemberRecord &CVR, Name##Record &Record) override;
-#define TYPE_RECORD_ALIAS(EnumName, EnumVal, Name, AliasName)
-#define MEMBER_RECORD_ALIAS(EnumName, EnumVal, Name, AliasName)
-#include "llvm/DebugInfo/CodeView/TypeRecords.def"
-
- Error visitUnknownType(CVType &Record) override;
-
Error visitTypeBegin(CVType &Record) override;
Error visitTypeEnd(CVType &Record) override;
- Error visitMemberEnd(CVMemberRecord &Record) override;
- Error mergeStream(const CVTypeArray &Types);
+ Error mergeTypesAndIds(TypeTableBuilder &DestIds, TypeTableBuilder &DestTypes,
+ const CVTypeArray &IdsAndTypes);
+ Error mergeIdRecords(TypeTableBuilder &Dest,
+ ArrayRef<TypeIndex> TypeSourceToDest,
+ const CVTypeArray &Ids);
+ Error mergeTypeRecords(TypeTableBuilder &Dest, const CVTypeArray &Types);
private:
+ Error doit(const CVTypeArray &Types);
+
void addMapping(TypeIndex Idx);
- bool remapIndex(TypeIndex &Idx);
+ bool remapTypeIndex(TypeIndex &Idx);
+ bool remapItemIndex(TypeIndex &Idx);
+
+ bool remapIndices(RemappedType &Record, ArrayRef<TiReference> Refs) {
+ auto OriginalData = Record.OriginalRecord.content();
+ bool Success = true;
+ for (auto &Ref : Refs) {
+ uint32_t Offset = Ref.Offset;
+ ArrayRef<uint8_t> Bytes =
+ OriginalData.slice(Ref.Offset, sizeof(TypeIndex));
+ ArrayRef<TypeIndex> TIs(reinterpret_cast<const TypeIndex *>(Bytes.data()),
+ Ref.Count);
+ for (auto TI : TIs) {
+ TypeIndex NewTI = TI;
+ bool ThisSuccess = (Ref.Kind == TiRefKind::IndexRef)
+ ? remapItemIndex(NewTI)
+ : remapTypeIndex(NewTI);
+ if (ThisSuccess && NewTI != TI)
+ Record.Mappings.emplace_back(Offset, NewTI);
+ Offset += sizeof(TypeIndex);
+ Success &= ThisSuccess;
+ }
+ }
+ return Success;
+ }
+
+ bool remapIndex(TypeIndex &Idx, ArrayRef<TypeIndex> Map);
size_t slotForIndex(TypeIndex Idx) const {
assert(!Idx.isSimple() && "simple type indices have no slots");
@@ -98,49 +119,45 @@ private:
return llvm::make_error<CodeViewError>(cv_error_code::corrupt_record);
}
- template <typename RecordType>
- Error writeRecord(RecordType &R, bool RemapSuccess) {
+ Error writeRecord(TypeTableBuilder &Dest, const RemappedType &Record,
+ bool RemapSuccess) {
TypeIndex DestIdx = Untranslated;
if (RemapSuccess)
- DestIdx = DestTypeStream.writeKnownType(R);
+ DestIdx = Dest.writeSerializedRecord(Record);
addMapping(DestIdx);
return Error::success();
}
- template <typename RecordType>
- Error writeIdRecord(RecordType &R, bool RemapSuccess) {
- TypeIndex DestIdx = Untranslated;
- if (RemapSuccess)
- DestIdx = DestIdStream.writeKnownType(R);
+ Error writeTypeRecord(const CVType &Record) {
+ TypeIndex DestIdx =
+ DestTypeStream->writeSerializedRecord(Record.RecordData);
addMapping(DestIdx);
return Error::success();
}
- template <typename RecordType>
- Error writeMember(RecordType &R, bool RemapSuccess) {
- if (RemapSuccess)
- FieldListBuilder.writeMemberType(R);
- else
- HadUntranslatedMember = true;
- return Error::success();
+ Error writeTypeRecord(const RemappedType &Record, bool RemapSuccess) {
+ return writeRecord(*DestTypeStream, Record, RemapSuccess);
+ }
+
+ Error writeIdRecord(const RemappedType &Record, bool RemapSuccess) {
+ return writeRecord(*DestIdStream, Record, RemapSuccess);
}
Optional<Error> LastError;
bool IsSecondPass = false;
- bool HadUntranslatedMember = false;
-
unsigned NumBadIndices = 0;
- BumpPtrAllocator Allocator;
+ TypeIndex CurIndex{TypeIndex::FirstNonSimpleIndex};
- TypeTableBuilder &DestIdStream;
- TypeTableBuilder &DestTypeStream;
- FieldListRecordBuilder FieldListBuilder;
- TypeServerHandler *Handler;
+ TypeTableBuilder *DestIdStream = nullptr;
+ TypeTableBuilder *DestTypeStream = nullptr;
+ TypeServerHandler *Handler = nullptr;
- TypeIndex CurIndex{TypeIndex::FirstNonSimpleIndex};
+ // If we're only mapping id records, this array contains the mapping for
+ // type records.
+ ArrayRef<TypeIndex> TypeLookup;
/// Map from source type index to destination type index. Indexed by source
/// type index minus 0x1000.
@@ -151,22 +168,34 @@ private:
const TypeIndex TypeStreamMerger::Untranslated(SimpleTypeKind::NotTranslated);
-Error TypeStreamMerger::visitTypeBegin(CVRecord<TypeLeafKind> &Rec) {
+Error TypeStreamMerger::visitTypeBegin(CVType &Rec) {
+ RemappedType R(Rec);
+ SmallVector<TiReference, 32> Refs;
+ discoverTypeIndices(Rec.RecordData, Refs);
+ bool Success = remapIndices(R, Refs);
+ switch (Rec.kind()) {
+ case TypeLeafKind::LF_FUNC_ID:
+ case TypeLeafKind::LF_MFUNC_ID:
+ case TypeLeafKind::LF_STRING_ID:
+ case TypeLeafKind::LF_SUBSTR_LIST:
+ case TypeLeafKind::LF_BUILDINFO:
+ case TypeLeafKind::LF_UDT_SRC_LINE:
+ case TypeLeafKind::LF_UDT_MOD_SRC_LINE:
+ return writeIdRecord(R, Success);
+ default:
+ return writeTypeRecord(R, Success);
+ }
return Error::success();
}
-Error TypeStreamMerger::visitTypeEnd(CVRecord<TypeLeafKind> &Rec) {
- CurIndex = TypeIndex(CurIndex.getIndex() + 1);
+Error TypeStreamMerger::visitTypeEnd(CVType &Rec) {
+ ++CurIndex;
if (!IsSecondPass)
assert(IndexMap.size() == slotForIndex(CurIndex) &&
"visitKnownRecord should add one index map entry");
return Error::success();
}
-Error TypeStreamMerger::visitMemberEnd(CVMemberRecord &Rec) {
- return Error::success();
-}
-
void TypeStreamMerger::addMapping(TypeIndex Idx) {
if (!IsSecondPass) {
assert(IndexMap.size() == slotForIndex(CurIndex) &&
@@ -178,7 +207,7 @@ void TypeStreamMerger::addMapping(TypeIndex Idx) {
}
}
-bool TypeStreamMerger::remapIndex(TypeIndex &Idx) {
+bool TypeStreamMerger::remapIndex(TypeIndex &Idx, ArrayRef<TypeIndex> Map) {
// Simple types are unchanged.
if (Idx.isSimple())
return true;
@@ -187,14 +216,14 @@ bool TypeStreamMerger::remapIndex(TypeIndex &Idx) {
// successfully. If it refers to a type later in the stream or a record we
// had to defer, defer it until later pass.
unsigned MapPos = slotForIndex(Idx);
- if (MapPos < IndexMap.size() && IndexMap[MapPos] != Untranslated) {
- Idx = IndexMap[MapPos];
+ if (MapPos < Map.size() && Map[MapPos] != Untranslated) {
+ Idx = Map[MapPos];
return true;
}
// If this is the second pass and this index isn't in the map, then it points
// outside the current type stream, and this is a corrupt record.
- if (IsSecondPass && MapPos >= IndexMap.size()) {
+ if (IsSecondPass && MapPos >= Map.size()) {
// FIXME: Print a more useful error. We can give the current record and the
// index that we think its pointing to.
LastError = joinErrors(std::move(*LastError), errorCorruptRecord());
@@ -208,241 +237,61 @@ bool TypeStreamMerger::remapIndex(TypeIndex &Idx) {
return false;
}
-//----------------------------------------------------------------------------//
-// Item records
-//----------------------------------------------------------------------------//
+bool TypeStreamMerger::remapTypeIndex(TypeIndex &Idx) {
+ // If we're mapping a pure index stream, then IndexMap only contains mappings
+ // from OldIdStream -> NewIdStream, in which case we will need to use the
+ // special mapping from OldTypeStream -> NewTypeStream which was computed
+ // externally. Regardless, we use this special map if and only if we are
+ // doing an id-only mapping.
+ if (DestTypeStream == nullptr)
+ return remapIndex(Idx, TypeLookup);
-Error TypeStreamMerger::visitKnownRecord(CVType &, FuncIdRecord &R) {
- bool Success = true;
- Success &= remapIndex(R.ParentScope);
- Success &= remapIndex(R.FunctionType);
- return writeIdRecord(R, Success);
+ assert(TypeLookup.empty());
+ return remapIndex(Idx, IndexMap);
}
-Error TypeStreamMerger::visitKnownRecord(CVType &, MemberFuncIdRecord &R) {
- bool Success = true;
- Success &= remapIndex(R.ClassType);
- Success &= remapIndex(R.FunctionType);
- return writeIdRecord(R, Success);
+bool TypeStreamMerger::remapItemIndex(TypeIndex &Idx) {
+ assert(DestIdStream);
+ return remapIndex(Idx, IndexMap);
}
-Error TypeStreamMerger::visitKnownRecord(CVType &, StringIdRecord &R) {
- return writeIdRecord(R, remapIndex(R.Id));
-}
+Error TypeStreamMerger::mergeTypeRecords(TypeTableBuilder &Dest,
+ const CVTypeArray &Types) {
+ DestTypeStream = &Dest;
-Error TypeStreamMerger::visitKnownRecord(CVType &, StringListRecord &R) {
- bool Success = true;
- for (TypeIndex &Str : R.StringIndices)
- Success &= remapIndex(Str);
- return writeIdRecord(R, Success);
+ return doit(Types);
}
-Error TypeStreamMerger::visitKnownRecord(CVType &, BuildInfoRecord &R) {
- bool Success = true;
- for (TypeIndex &Arg : R.ArgIndices)
- Success &= remapIndex(Arg);
- return writeIdRecord(R, Success);
-}
+Error TypeStreamMerger::mergeIdRecords(TypeTableBuilder &Dest,
+ ArrayRef<TypeIndex> TypeSourceToDest,
+ const CVTypeArray &Ids) {
+ DestIdStream = &Dest;
+ TypeLookup = TypeSourceToDest;
-Error TypeStreamMerger::visitKnownRecord(CVType &, UdtSourceLineRecord &R) {
- bool Success = true;
- Success &= remapIndex(R.UDT);
- Success &= remapIndex(R.SourceFile);
- // FIXME: Translate UdtSourceLineRecord into UdtModSourceLineRecords in the
- // IPI stream.
- return writeIdRecord(R, Success);
+ return doit(Ids);
}
-Error TypeStreamMerger::visitKnownRecord(CVType &, UdtModSourceLineRecord &R) {
- bool Success = true;
- Success &= remapIndex(R.UDT);
- Success &= remapIndex(R.SourceFile);
- return writeIdRecord(R, Success);
-}
+Error TypeStreamMerger::mergeTypesAndIds(TypeTableBuilder &DestIds,
+ TypeTableBuilder &DestTypes,
+ const CVTypeArray &IdsAndTypes) {
+ DestIdStream = &DestIds;
+ DestTypeStream = &DestTypes;
-//----------------------------------------------------------------------------//
-// Type records
-//----------------------------------------------------------------------------//
-
-Error TypeStreamMerger::visitKnownRecord(CVType &, ModifierRecord &R) {
- return writeRecord(R, remapIndex(R.ModifiedType));
-}
-
-Error TypeStreamMerger::visitKnownRecord(CVType &, ProcedureRecord &R) {
- bool Success = true;
- Success &= remapIndex(R.ReturnType);
- Success &= remapIndex(R.ArgumentList);
- return writeRecord(R, Success);
-}
-
-Error TypeStreamMerger::visitKnownRecord(CVType &, MemberFunctionRecord &R) {
- bool Success = true;
- Success &= remapIndex(R.ReturnType);
- Success &= remapIndex(R.ClassType);
- Success &= remapIndex(R.ThisType);
- Success &= remapIndex(R.ArgumentList);
- return writeRecord(R, Success);
-}
-
-Error TypeStreamMerger::visitKnownRecord(CVType &Type, ArgListRecord &R) {
- bool Success = true;
- for (TypeIndex &Arg : R.ArgIndices)
- Success &= remapIndex(Arg);
- if (auto EC = writeRecord(R, Success))
- return EC;
- return Error::success();
-}
-
-Error TypeStreamMerger::visitKnownRecord(CVType &, PointerRecord &R) {
- bool Success = true;
- Success &= remapIndex(R.ReferentType);
- if (R.isPointerToMember())
- Success &= remapIndex(R.MemberInfo->ContainingType);
- return writeRecord(R, Success);
+ return doit(IdsAndTypes);
}
-Error TypeStreamMerger::visitKnownRecord(CVType &, ArrayRecord &R) {
- bool Success = true;
- Success &= remapIndex(R.ElementType);
- Success &= remapIndex(R.IndexType);
- return writeRecord(R, Success);
-}
-
-Error TypeStreamMerger::visitKnownRecord(CVType &, ClassRecord &R) {
- bool Success = true;
- Success &= remapIndex(R.FieldList);
- Success &= remapIndex(R.DerivationList);
- Success &= remapIndex(R.VTableShape);
- return writeRecord(R, Success);
-}
-
-Error TypeStreamMerger::visitKnownRecord(CVType &, UnionRecord &R) {
- return writeRecord(R, remapIndex(R.FieldList));
-}
-
-Error TypeStreamMerger::visitKnownRecord(CVType &, EnumRecord &R) {
- bool Success = true;
- Success &= remapIndex(R.FieldList);
- Success &= remapIndex(R.UnderlyingType);
- return writeRecord(R, Success);
-}
-
-Error TypeStreamMerger::visitKnownRecord(CVType &, BitFieldRecord &R) {
- return writeRecord(R, remapIndex(R.Type));
-}
-
-Error TypeStreamMerger::visitKnownRecord(CVType &, VFTableShapeRecord &R) {
- return writeRecord(R, true);
-}
-
-Error TypeStreamMerger::visitKnownRecord(CVType &, TypeServer2Record &R) {
- return writeRecord(R, true);
-}
-
-Error TypeStreamMerger::visitKnownRecord(CVType &, LabelRecord &R) {
- return writeRecord(R, true);
-}
-
-Error TypeStreamMerger::visitKnownRecord(CVType &, VFTableRecord &R) {
- bool Success = true;
- Success &= remapIndex(R.CompleteClass);
- Success &= remapIndex(R.OverriddenVFTable);
- return writeRecord(R, Success);
-}
-
-Error TypeStreamMerger::visitKnownRecord(CVType &,
- MethodOverloadListRecord &R) {
- bool Success = true;
- for (OneMethodRecord &Meth : R.Methods)
- Success &= remapIndex(Meth.Type);
- return writeRecord(R, Success);
-}
-
-Error TypeStreamMerger::visitKnownRecord(CVType &, FieldListRecord &R) {
- // Visit the members inside the field list.
- HadUntranslatedMember = false;
- FieldListBuilder.begin();
- if (auto EC = codeview::visitMemberRecordStream(R.Data, *this))
- return EC;
-
- // Write the record if we translated all field list members.
- TypeIndex DestIdx = Untranslated;
- if (!HadUntranslatedMember)
- DestIdx = FieldListBuilder.end();
- else
- FieldListBuilder.reset();
- addMapping(DestIdx);
-
- return Error::success();
-}
-
-//----------------------------------------------------------------------------//
-// Member records
-//----------------------------------------------------------------------------//
-
-Error TypeStreamMerger::visitKnownMember(CVMemberRecord &,
- NestedTypeRecord &R) {
- return writeMember(R, remapIndex(R.Type));
-}
-
-Error TypeStreamMerger::visitKnownMember(CVMemberRecord &, OneMethodRecord &R) {
- bool Success = true;
- Success &= remapIndex(R.Type);
- return writeMember(R, Success);
-}
-
-Error TypeStreamMerger::visitKnownMember(CVMemberRecord &,
- OverloadedMethodRecord &R) {
- return writeMember(R, remapIndex(R.MethodList));
-}
-
-Error TypeStreamMerger::visitKnownMember(CVMemberRecord &,
- DataMemberRecord &R) {
- return writeMember(R, remapIndex(R.Type));
-}
-
-Error TypeStreamMerger::visitKnownMember(CVMemberRecord &,
- StaticDataMemberRecord &R) {
- return writeMember(R, remapIndex(R.Type));
-}
-
-Error TypeStreamMerger::visitKnownMember(CVMemberRecord &,
- EnumeratorRecord &R) {
- return writeMember(R, true);
-}
-
-Error TypeStreamMerger::visitKnownMember(CVMemberRecord &, VFPtrRecord &R) {
- return writeMember(R, remapIndex(R.Type));
-}
-
-Error TypeStreamMerger::visitKnownMember(CVMemberRecord &, BaseClassRecord &R) {
- return writeMember(R, remapIndex(R.Type));
-}
-
-Error TypeStreamMerger::visitKnownMember(CVMemberRecord &,
- VirtualBaseClassRecord &R) {
- bool Success = true;
- Success &= remapIndex(R.BaseType);
- Success &= remapIndex(R.VBPtrType);
- return writeMember(R, Success);
-}
-
-Error TypeStreamMerger::visitKnownMember(CVMemberRecord &,
- ListContinuationRecord &R) {
- return writeMember(R, remapIndex(R.ContinuationIndex));
-}
-
-Error TypeStreamMerger::visitUnknownType(CVType &Rec) {
- // We failed to translate a type. Translate this index as "not translated".
- addMapping(TypeIndex(SimpleTypeKind::NotTranslated));
- return errorCorruptRecord();
-}
-
-Error TypeStreamMerger::mergeStream(const CVTypeArray &Types) {
- assert(IndexMap.empty());
+Error TypeStreamMerger::doit(const CVTypeArray &Types) {
LastError = Error::success();
- if (auto EC = codeview::visitTypeStream(Types, *this, Handler))
+ // We don't want to deserialize records. I guess this flag is poorly named,
+ // but it really means "Don't deserialize records before switching on the
+ // concrete type.
+ // FIXME: We can probably get even more speed here if we don't use the visitor
+ // pipeline here, but instead write the switch ourselves. I don't think it
+ // would buy us much since it's already pretty fast, but it's probably worth
+ // a few cycles.
+ if (auto EC =
+ codeview::visitTypeStream(Types, *this, VDS_BytesExternal, Handler))
return EC;
// If we found bad indices but no other errors, try doing another pass and see
@@ -458,7 +307,8 @@ Error TypeStreamMerger::mergeStream(const CVTypeArray &Types) {
NumBadIndices = 0;
CurIndex = TypeIndex(TypeIndex::FirstNonSimpleIndex);
- if (auto EC = codeview::visitTypeStream(Types, *this, Handler))
+ if (auto EC =
+ codeview::visitTypeStream(Types, *this, VDS_BytesExternal, Handler))
return EC;
assert(NumBadIndices <= BadIndicesRemaining &&
@@ -469,18 +319,32 @@ Error TypeStreamMerger::mergeStream(const CVTypeArray &Types) {
}
}
- IndexMap.clear();
-
Error Ret = std::move(*LastError);
LastError.reset();
return Ret;
}
-Error llvm::codeview::mergeTypeStreams(TypeTableBuilder &DestIdStream,
- TypeTableBuilder &DestTypeStream,
+Error llvm::codeview::mergeTypeRecords(TypeTableBuilder &Dest,
SmallVectorImpl<TypeIndex> &SourceToDest,
TypeServerHandler *Handler,
- const CVTypeArray &Types) {
- return TypeStreamMerger(DestIdStream, DestTypeStream, SourceToDest, Handler)
- .mergeStream(Types);
+ const CVTypeArray &Types) {
+ TypeStreamMerger M(SourceToDest, Handler);
+ return M.mergeTypeRecords(Dest, Types);
+}
+
+Error llvm::codeview::mergeIdRecords(TypeTableBuilder &Dest,
+ ArrayRef<TypeIndex> TypeSourceToDest,
+ SmallVectorImpl<TypeIndex> &SourceToDest,
+ const CVTypeArray &Ids) {
+ TypeStreamMerger M(SourceToDest, nullptr);
+ return M.mergeIdRecords(Dest, TypeSourceToDest, Ids);
+}
+
+Error llvm::codeview::mergeTypeAndIdRecords(
+ TypeTableBuilder &DestIds, TypeTableBuilder &DestTypes,
+ SmallVectorImpl<TypeIndex> &SourceToDest, TypeServerHandler *Handler,
+ const CVTypeArray &IdsAndTypes) {
+
+ TypeStreamMerger M(SourceToDest, Handler);
+ return M.mergeTypesAndIds(DestIds, DestTypes, IdsAndTypes);
}
diff --git a/lib/DebugInfo/CodeView/TypeTableCollection.cpp b/lib/DebugInfo/CodeView/TypeTableCollection.cpp
index a18710d6ab52..699694fde928 100644
--- a/lib/DebugInfo/CodeView/TypeTableCollection.cpp
+++ b/lib/DebugInfo/CodeView/TypeTableCollection.cpp
@@ -24,8 +24,7 @@ static void error(Error &&EC) {
consumeError(std::move(EC));
}
-TypeTableCollection::TypeTableCollection(
- ArrayRef<MutableArrayRef<uint8_t>> Records)
+TypeTableCollection::TypeTableCollection(ArrayRef<ArrayRef<uint8_t>> Records)
: Records(Records), Database(Records.size()) {}
Optional<TypeIndex> TypeTableCollection::getFirst() {
diff --git a/lib/DebugInfo/DWARF/DWARFContext.cpp b/lib/DebugInfo/DWARF/DWARFContext.cpp
index 8e7c6c43d1a2..5ed55ce4c0dc 100644
--- a/lib/DebugInfo/DWARF/DWARFContext.cpp
+++ b/lib/DebugInfo/DWARF/DWARFContext.cpp
@@ -60,12 +60,15 @@ typedef DILineInfoSpecifier::FileLineInfoKind FileLineInfoKind;
typedef DILineInfoSpecifier::FunctionNameKind FunctionNameKind;
uint64_t llvm::getRelocatedValue(const DataExtractor &Data, uint32_t Size,
- uint32_t *Off, const RelocAddrMap *Relocs) {
+ uint32_t *Off, const RelocAddrMap *Relocs,
+ uint64_t *SectionIndex) {
if (!Relocs)
return Data.getUnsigned(Off, Size);
RelocAddrMap::const_iterator AI = Relocs->find(*Off);
if (AI == Relocs->end())
return Data.getUnsigned(Off, Size);
+ if (SectionIndex)
+ *SectionIndex = AI->second.SectionIndex;
return Data.getUnsigned(Off, Size) + AI->second.Value;
}
@@ -287,6 +290,15 @@ void DWARFContext::dump(raw_ostream &OS, DIDumpType DumpType, bool DumpEH,
getStringSection(), isLittleEndian());
}
+DWARFCompileUnit *DWARFContext::getDWOCompileUnitForHash(uint64_t Hash) {
+ // FIXME: Improve this for the case where this DWO file is really a DWP file
+ // with an index - use the index for lookup instead of a linear search.
+ for (const auto &DWOCU : dwo_compile_units())
+ if (DWOCU->getDWOId() == Hash)
+ return DWOCU.get();
+ return nullptr;
+}
+
DWARFDie DWARFContext::getDIEForOffset(uint32_t Offset) {
parseCompileUnits();
if (auto *CU = CUs.getUnitForOffset(Offset))
@@ -897,28 +909,81 @@ DWARFContext::getInliningInfoForAddress(uint64_t Address,
return InliningInfo;
}
+std::shared_ptr<DWARFContext>
+DWARFContext::getDWOContext(StringRef AbsolutePath) {
+ if (auto S = DWP.lock()) {
+ DWARFContext *Ctxt = S->Context.get();
+ return std::shared_ptr<DWARFContext>(std::move(S), Ctxt);
+ }
+
+ std::weak_ptr<DWOFile> *Entry = &DWOFiles[AbsolutePath];
+
+ if (auto S = Entry->lock()) {
+ DWARFContext *Ctxt = S->Context.get();
+ return std::shared_ptr<DWARFContext>(std::move(S), Ctxt);
+ }
+
+ SmallString<128> DWPName;
+ Expected<OwningBinary<ObjectFile>> Obj = [&] {
+ if (!CheckedForDWP) {
+ (getFileName() + ".dwp").toVector(DWPName);
+ auto Obj = object::ObjectFile::createObjectFile(DWPName);
+ if (Obj) {
+ Entry = &DWP;
+ return Obj;
+ } else {
+ CheckedForDWP = true;
+ // TODO: Should this error be handled (maybe in a high verbosity mode)
+ // before falling back to .dwo files?
+ consumeError(Obj.takeError());
+ }
+ }
+
+ return object::ObjectFile::createObjectFile(AbsolutePath);
+ }();
+
+ if (!Obj) {
+ // TODO: Actually report errors helpfully.
+ consumeError(Obj.takeError());
+ return nullptr;
+ }
+
+ auto S = std::make_shared<DWOFile>();
+ S->File = std::move(Obj.get());
+ S->Context = llvm::make_unique<DWARFContextInMemory>(*S->File.getBinary());
+ *Entry = S;
+ auto *Ctxt = S->Context.get();
+ return std::shared_ptr<DWARFContext>(std::move(S), Ctxt);
+}
+
static Error createError(const Twine &Reason, llvm::Error E) {
return make_error<StringError>(Reason + toString(std::move(E)),
inconvertibleErrorCode());
}
-/// Returns the address of symbol relocation used against. Used for futher
-/// relocations computation. Symbol's section load address is taken in account if
-/// LoadedObjectInfo interface is provided.
-static Expected<uint64_t>
-getSymbolAddress(const object::ObjectFile &Obj, const RelocationRef &Reloc,
- const LoadedObjectInfo *L,
- std::map<SymbolRef, uint64_t> &Cache) {
- uint64_t Ret = 0;
+/// SymInfo contains information about symbol: it's address
+/// and section index which is -1LL for absolute symbols.
+struct SymInfo {
+ uint64_t Address;
+ uint64_t SectionIndex;
+};
+
+/// Returns the address of symbol relocation used against and a section index.
+/// Used for futher relocations computation. Symbol's section load address is
+static Expected<SymInfo> getSymbolInfo(const object::ObjectFile &Obj,
+ const RelocationRef &Reloc,
+ const LoadedObjectInfo *L,
+ std::map<SymbolRef, SymInfo> &Cache) {
+ SymInfo Ret = {0, (uint64_t)-1LL};
object::section_iterator RSec = Obj.section_end();
object::symbol_iterator Sym = Reloc.getSymbol();
- std::map<SymbolRef, uint64_t>::iterator CacheIt = Cache.end();
+ std::map<SymbolRef, SymInfo>::iterator CacheIt = Cache.end();
// First calculate the address of the symbol or section as it appears
// in the object file
if (Sym != Obj.symbol_end()) {
bool New;
- std::tie(CacheIt, New) = Cache.insert({*Sym, 0});
+ std::tie(CacheIt, New) = Cache.insert({*Sym, {0, 0}});
if (!New)
return CacheIt->second;
@@ -934,12 +999,15 @@ getSymbolAddress(const object::ObjectFile &Obj, const RelocationRef &Reloc,
SectOrErr.takeError());
RSec = *SectOrErr;
- Ret = *SymAddrOrErr;
+ Ret.Address = *SymAddrOrErr;
} else if (auto *MObj = dyn_cast<MachOObjectFile>(&Obj)) {
RSec = MObj->getRelocationSection(Reloc.getRawDataRefImpl());
- Ret = RSec->getAddress();
+ Ret.Address = RSec->getAddress();
}
+ if (RSec != Obj.section_end())
+ Ret.SectionIndex = RSec->getIndex();
+
// If we are given load addresses for the sections, we need to adjust:
// SymAddr = (Address of Symbol Or Section in File) -
// (Address of Section in File) +
@@ -949,7 +1017,7 @@ getSymbolAddress(const object::ObjectFile &Obj, const RelocationRef &Reloc,
// we need to perform the same computation.
if (L && RSec != Obj.section_end())
if (uint64_t SectionLoadAddress = L->getSectionLoadAddress(*RSec))
- Ret += SectionLoadAddress - RSec->getAddress();
+ Ret.Address += SectionLoadAddress - RSec->getAddress();
if (CacheIt != Cache.end())
CacheIt->second = Ret;
@@ -989,8 +1057,8 @@ Error DWARFContextInMemory::maybeDecompress(const SectionRef &Sec,
}
DWARFContextInMemory::DWARFContextInMemory(const object::ObjectFile &Obj,
- const LoadedObjectInfo *L)
- : IsLittleEndian(Obj.isLittleEndian()),
+ const LoadedObjectInfo *L)
+ : FileName(Obj.getFileName()), IsLittleEndian(Obj.isLittleEndian()),
AddressSize(Obj.getBytesInAddress()) {
for (const SectionRef &Section : Obj.sections()) {
StringRef name;
@@ -1008,7 +1076,7 @@ DWARFContextInMemory::DWARFContextInMemory(const object::ObjectFile &Obj,
// Try to obtain an already relocated version of this section.
// Else use the unrelocated section from the object file. We'll have to
// apply relocations ourselves later.
- if (!L || !L->getLoadedSectionContents(*RelocatedSection,data))
+ if (!L || !L->getLoadedSectionContents(*RelocatedSection, data))
Section.getContents(data);
if (auto Err = maybeDecompress(Section, name, data)) {
@@ -1047,7 +1115,7 @@ DWARFContextInMemory::DWARFContextInMemory(const object::ObjectFile &Obj,
// If the section we're relocating was relocated already by the JIT,
// then we used the relocated version above, so we do not need to process
// relocations for it now.
- if (L && L->getLoadedSectionContents(*RelocatedSection,RelSecData))
+ if (L && L->getLoadedSectionContents(*RelocatedSection, RelSecData))
continue;
// In Mach-o files, the relocations do not need to be applied if
@@ -1091,29 +1159,30 @@ DWARFContextInMemory::DWARFContextInMemory(const object::ObjectFile &Obj,
if (Section.relocation_begin() == Section.relocation_end())
continue;
- std::map<SymbolRef, uint64_t> AddrCache;
+ // Symbol to [address, section index] cache mapping.
+ std::map<SymbolRef, SymInfo> AddrCache;
for (const RelocationRef &Reloc : Section.relocations()) {
// FIXME: it's not clear how to correctly handle scattered
// relocations.
if (isRelocScattered(Obj, Reloc))
continue;
- Expected<uint64_t> SymAddrOrErr =
- getSymbolAddress(Obj, Reloc, L, AddrCache);
- if (!SymAddrOrErr) {
- errs() << toString(SymAddrOrErr.takeError()) << '\n';
+ Expected<SymInfo> SymInfoOrErr = getSymbolInfo(Obj, Reloc, L, AddrCache);
+ if (!SymInfoOrErr) {
+ errs() << toString(SymInfoOrErr.takeError()) << '\n';
continue;
}
object::RelocVisitor V(Obj);
- uint64_t Val = V.visit(Reloc.getType(), Reloc, *SymAddrOrErr);
+ uint64_t Val = V.visit(Reloc.getType(), Reloc, SymInfoOrErr->Address);
if (V.error()) {
SmallString<32> Name;
Reloc.getTypeName(Name);
errs() << "error: failed to compute relocation: " << Name << "\n";
continue;
}
- Map->insert({Reloc.getOffset(), {Val}});
+ llvm::RelocAddrEntry Rel = {SymInfoOrErr->SectionIndex, Val};
+ Map->insert({Reloc.getOffset(), Rel});
}
}
}
diff --git a/lib/DebugInfo/DWARF/DWARFDebugRangeList.cpp b/lib/DebugInfo/DWARF/DWARFDebugRangeList.cpp
index 8da797750abd..6b5e1d3c931b 100644
--- a/lib/DebugInfo/DWARF/DWARFDebugRangeList.cpp
+++ b/lib/DebugInfo/DWARF/DWARFDebugRangeList.cpp
@@ -35,8 +35,8 @@ bool DWARFDebugRangeList::extract(DataExtractor data, uint32_t *offset_ptr,
while (true) {
RangeListEntry entry;
uint32_t prev_offset = *offset_ptr;
- entry.StartAddress =
- getRelocatedValue(data, AddressSize, offset_ptr, &Relocs);
+ entry.StartAddress = getRelocatedValue(data, AddressSize, offset_ptr,
+ &Relocs, &entry.SectionIndex);
entry.EndAddress =
getRelocatedValue(data, AddressSize, offset_ptr, &Relocs);
@@ -69,8 +69,8 @@ DWARFDebugRangeList::getAbsoluteRanges(uint64_t BaseAddress) const {
if (RLE.isBaseAddressSelectionEntry(AddressSize)) {
BaseAddress = RLE.EndAddress;
} else {
- Res.push_back(
- {BaseAddress + RLE.StartAddress, BaseAddress + RLE.EndAddress});
+ Res.push_back({BaseAddress + RLE.StartAddress,
+ BaseAddress + RLE.EndAddress, RLE.SectionIndex});
}
}
return Res;
diff --git a/lib/DebugInfo/DWARF/DWARFDie.cpp b/lib/DebugInfo/DWARF/DWARFDie.cpp
index e3bd759ba94b..fd45c77d3745 100644
--- a/lib/DebugInfo/DWARF/DWARFDie.cpp
+++ b/lib/DebugInfo/DWARF/DWARFDie.cpp
@@ -211,13 +211,16 @@ Optional<uint64_t> DWARFDie::getHighPC(uint64_t LowPC) const {
return None;
}
-bool DWARFDie::getLowAndHighPC(uint64_t &LowPC, uint64_t &HighPC) const {
- auto LowPcAddr = toAddress(find(DW_AT_low_pc));
+bool DWARFDie::getLowAndHighPC(uint64_t &LowPC, uint64_t &HighPC,
+ uint64_t &SectionIndex) const {
+ auto F = find(DW_AT_low_pc);
+ auto LowPcAddr = toAddress(F);
if (!LowPcAddr)
return false;
if (auto HighPcAddr = getHighPC(*LowPcAddr)) {
LowPC = *LowPcAddr;
HighPC = *HighPcAddr;
+ SectionIndex = F->getSectionIndex();
return true;
}
return false;
@@ -228,9 +231,9 @@ DWARFDie::getAddressRanges() const {
if (isNULL())
return DWARFAddressRangesVector();
// Single range specified by low/high PC.
- uint64_t LowPC, HighPC;
- if (getLowAndHighPC(LowPC, HighPC))
- return {{LowPC, HighPC}};
+ uint64_t LowPC, HighPC, Index;
+ if (getLowAndHighPC(LowPC, HighPC, Index))
+ return {{LowPC, HighPC, Index}};
// Multiple ranges from .debug_ranges section.
auto RangesOffset = toSectionOffset(find(DW_AT_ranges));
diff --git a/lib/DebugInfo/DWARF/DWARFFormValue.cpp b/lib/DebugInfo/DWARF/DWARFFormValue.cpp
index 1cbd3ea2c869..0963d7bfd713 100644
--- a/lib/DebugInfo/DWARF/DWARFFormValue.cpp
+++ b/lib/DebugInfo/DWARF/DWARFFormValue.cpp
@@ -333,8 +333,8 @@ bool DWARFFormValue::extractValue(const DataExtractor &Data,
return false;
uint16_t AddrSize = (Form == DW_FORM_addr) ? U->getAddressByteSize()
: U->getRefAddrByteSize();
- Value.uval =
- getRelocatedValue(Data, AddrSize, OffsetPtr, U->getRelocMap());
+ Value.uval = getRelocatedValue(Data, AddrSize, OffsetPtr,
+ U->getRelocMap(), &Value.SectionIndex);
break;
}
case DW_FORM_exprloc:
diff --git a/lib/DebugInfo/DWARF/DWARFUnit.cpp b/lib/DebugInfo/DWARF/DWARFUnit.cpp
index c268afc222c3..c5add6a478b3 100644
--- a/lib/DebugInfo/DWARF/DWARFUnit.cpp
+++ b/lib/DebugInfo/DWARF/DWARFUnit.cpp
@@ -249,23 +249,6 @@ size_t DWARFUnit::extractDIEsIfNeeded(bool CUDieOnly) {
return DieArray.size();
}
-DWARFUnit::DWOHolder::DWOHolder(StringRef DWOPath, uint64_t DWOId) {
- auto Obj = object::ObjectFile::createObjectFile(DWOPath);
- if (!Obj) {
- // TODO: Actually report errors helpfully.
- consumeError(Obj.takeError());
- return;
- }
- DWOFile = std::move(Obj.get());
- DWOContext.reset(
- cast<DWARFContext>(new DWARFContextInMemory(*DWOFile.getBinary())));
- for (const auto &DWOCU : DWOContext->dwo_compile_units())
- if (DWOCU->getDWOId() == DWOId) {
- DWOU = DWOCU.get();
- return;
- }
-}
-
bool DWARFUnit::parseDWO() {
if (isDWO)
return false;
@@ -287,16 +270,18 @@ bool DWARFUnit::parseDWO() {
auto DWOId = getDWOId();
if (!DWOId)
return false;
- DWO = llvm::make_unique<DWOHolder>(AbsolutePath, *DWOId);
- DWARFUnit *DWOCU = DWO->getUnit();
- if (!DWOCU) {
- DWO.reset();
+ auto DWOContext = Context.getDWOContext(AbsolutePath);
+ if (!DWOContext)
return false;
- }
+
+ DWARFCompileUnit *DWOCU = DWOContext->getDWOCompileUnitForHash(*DWOId);
+ if (!DWOCU)
+ return false;
+ DWO = std::shared_ptr<DWARFCompileUnit>(std::move(DWOContext), DWOCU);
// Share .debug_addr and .debug_ranges section with compile unit in .dwo
- DWOCU->setAddrOffsetSection(AddrOffsetSection, AddrOffsetSectionBase);
+ DWO->setAddrOffsetSection(AddrOffsetSection, AddrOffsetSectionBase);
auto DWORangesBase = UnitDie.getRangesBaseAttribute();
- DWOCU->setRangesSection(RangeSection, DWORangesBase ? *DWORangesBase : 0);
+ DWO->setRangesSection(RangeSection, DWORangesBase ? *DWORangesBase : 0);
return true;
}
@@ -339,8 +324,8 @@ void DWARFUnit::collectAddressRanges(DWARFAddressRangesVector &CURanges) {
// Collect address ranges from DIEs in .dwo if necessary.
bool DWOCreated = parseDWO();
- if (DWO.get())
- DWO->getUnit()->collectAddressRanges(CURanges);
+ if (DWO)
+ DWO->collectAddressRanges(CURanges);
if (DWOCreated)
DWO.reset();
@@ -400,7 +385,7 @@ DWARFUnit::getInlinedChainForAddress(uint64_t Address,
// First, find the subroutine that contains the given address (the leaf
// of inlined chain).
DWARFDie SubroutineDIE =
- (DWO ? DWO->getUnit() : this)->getSubroutineForAddress(Address);
+ (DWO ? DWO.get() : this)->getSubroutineForAddress(Address);
while (SubroutineDIE) {
if (SubroutineDIE.isSubroutineDIE())
diff --git a/lib/DebugInfo/MSF/MappedBlockStream.cpp b/lib/DebugInfo/MSF/MappedBlockStream.cpp
index 57953cfa338e..dfdeb8414212 100644
--- a/lib/DebugInfo/MSF/MappedBlockStream.cpp
+++ b/lib/DebugInfo/MSF/MappedBlockStream.cpp
@@ -45,18 +45,17 @@ static Interval intersect(const Interval &I1, const Interval &I2) {
std::min(I1.second, I2.second));
}
-MappedBlockStream::MappedBlockStream(uint32_t BlockSize, uint32_t NumBlocks,
+MappedBlockStream::MappedBlockStream(uint32_t BlockSize,
const MSFStreamLayout &Layout,
BinaryStreamRef MsfData)
- : BlockSize(BlockSize), NumBlocks(NumBlocks), StreamLayout(Layout),
- MsfData(MsfData) {}
+ : BlockSize(BlockSize), StreamLayout(Layout), MsfData(MsfData) {}
std::unique_ptr<MappedBlockStream>
-MappedBlockStream::createStream(uint32_t BlockSize, uint32_t NumBlocks,
+MappedBlockStream::createStream(uint32_t BlockSize,
const MSFStreamLayout &Layout,
BinaryStreamRef MsfData) {
return llvm::make_unique<MappedBlockStreamImpl<MappedBlockStream>>(
- BlockSize, NumBlocks, Layout, MsfData);
+ BlockSize, Layout, MsfData);
}
std::unique_ptr<MappedBlockStream> MappedBlockStream::createIndexedStream(
@@ -66,7 +65,7 @@ std::unique_ptr<MappedBlockStream> MappedBlockStream::createIndexedStream(
SL.Blocks = Layout.StreamMap[StreamIndex];
SL.Length = Layout.StreamSizes[StreamIndex];
return llvm::make_unique<MappedBlockStreamImpl<MappedBlockStream>>(
- Layout.SB->BlockSize, Layout.SB->NumBlocks, SL, MsfData);
+ Layout.SB->BlockSize, SL, MsfData);
}
std::unique_ptr<MappedBlockStream>
@@ -75,7 +74,7 @@ MappedBlockStream::createDirectoryStream(const MSFLayout &Layout,
MSFStreamLayout SL;
SL.Blocks = Layout.DirectoryBlocks;
SL.Length = Layout.SB->NumDirectoryBytes;
- return createStream(Layout.SB->BlockSize, Layout.SB->NumBlocks, SL, MsfData);
+ return createStream(Layout.SB->BlockSize, SL, MsfData);
}
std::unique_ptr<MappedBlockStream>
@@ -83,7 +82,7 @@ MappedBlockStream::createFpmStream(const MSFLayout &Layout,
BinaryStreamRef MsfData) {
MSFStreamLayout SL;
initializeFpmStreamLayout(Layout, SL);
- return createStream(Layout.SB->BlockSize, Layout.SB->NumBlocks, SL, MsfData);
+ return createStream(Layout.SB->BlockSize, SL, MsfData);
}
Error MappedBlockStream::readBytes(uint32_t Offset, uint32_t Size,
@@ -173,7 +172,7 @@ Error MappedBlockStream::readLongestContiguousChunk(uint32_t Offset,
uint32_t First = Offset / BlockSize;
uint32_t Last = First;
- while (Last < NumBlocks - 1) {
+ while (Last < getNumBlocks() - 1) {
if (StreamLayout.Blocks[Last] != StreamLayout.Blocks[Last + 1] - 1)
break;
++Last;
@@ -313,17 +312,16 @@ void MappedBlockStream::fixCacheAfterWrite(uint32_t Offset,
}
WritableMappedBlockStream::WritableMappedBlockStream(
- uint32_t BlockSize, uint32_t NumBlocks, const MSFStreamLayout &Layout,
+ uint32_t BlockSize, const MSFStreamLayout &Layout,
WritableBinaryStreamRef MsfData)
- : ReadInterface(BlockSize, NumBlocks, Layout, MsfData),
- WriteInterface(MsfData) {}
+ : ReadInterface(BlockSize, Layout, MsfData), WriteInterface(MsfData) {}
std::unique_ptr<WritableMappedBlockStream>
-WritableMappedBlockStream::createStream(uint32_t BlockSize, uint32_t NumBlocks,
+WritableMappedBlockStream::createStream(uint32_t BlockSize,
const MSFStreamLayout &Layout,
WritableBinaryStreamRef MsfData) {
return llvm::make_unique<MappedBlockStreamImpl<WritableMappedBlockStream>>(
- BlockSize, NumBlocks, Layout, MsfData);
+ BlockSize, Layout, MsfData);
}
std::unique_ptr<WritableMappedBlockStream>
@@ -334,7 +332,7 @@ WritableMappedBlockStream::createIndexedStream(const MSFLayout &Layout,
MSFStreamLayout SL;
SL.Blocks = Layout.StreamMap[StreamIndex];
SL.Length = Layout.StreamSizes[StreamIndex];
- return createStream(Layout.SB->BlockSize, Layout.SB->NumBlocks, SL, MsfData);
+ return createStream(Layout.SB->BlockSize, SL, MsfData);
}
std::unique_ptr<WritableMappedBlockStream>
@@ -343,7 +341,7 @@ WritableMappedBlockStream::createDirectoryStream(
MSFStreamLayout SL;
SL.Blocks = Layout.DirectoryBlocks;
SL.Length = Layout.SB->NumDirectoryBytes;
- return createStream(Layout.SB->BlockSize, Layout.SB->NumBlocks, SL, MsfData);
+ return createStream(Layout.SB->BlockSize, SL, MsfData);
}
std::unique_ptr<WritableMappedBlockStream>
@@ -351,7 +349,7 @@ WritableMappedBlockStream::createFpmStream(const MSFLayout &Layout,
WritableBinaryStreamRef MsfData) {
MSFStreamLayout SL;
initializeFpmStreamLayout(Layout, SL);
- return createStream(Layout.SB->BlockSize, Layout.SB->NumBlocks, SL, MsfData);
+ return createStream(Layout.SB->BlockSize, SL, MsfData);
}
Error WritableMappedBlockStream::readBytes(uint32_t Offset, uint32_t Size,
diff --git a/lib/DebugInfo/PDB/Native/DbiStreamBuilder.cpp b/lib/DebugInfo/PDB/Native/DbiStreamBuilder.cpp
index c19a2f0d3110..23c7456d7772 100644
--- a/lib/DebugInfo/PDB/Native/DbiStreamBuilder.cpp
+++ b/lib/DebugInfo/PDB/Native/DbiStreamBuilder.cpp
@@ -129,16 +129,21 @@ uint32_t DbiStreamBuilder::calculateSectionMapStreamSize() const {
return sizeof(SecMapHeader) + sizeof(SecMapEntry) * SectionMap.size();
}
-uint32_t DbiStreamBuilder::calculateFileInfoSubstreamSize() const {
- uint32_t Size = 0;
- Size += sizeof(ulittle16_t); // NumModules
- Size += sizeof(ulittle16_t); // NumSourceFiles
- Size += ModiList.size() * sizeof(ulittle16_t); // ModIndices
- Size += ModiList.size() * sizeof(ulittle16_t); // ModFileCounts
+uint32_t DbiStreamBuilder::calculateNamesOffset() const {
+ uint32_t Offset = 0;
+ Offset += sizeof(ulittle16_t); // NumModules
+ Offset += sizeof(ulittle16_t); // NumSourceFiles
+ Offset += ModiList.size() * sizeof(ulittle16_t); // ModIndices
+ Offset += ModiList.size() * sizeof(ulittle16_t); // ModFileCounts
uint32_t NumFileInfos = 0;
for (const auto &M : ModiList)
NumFileInfos += M->source_files().size();
- Size += NumFileInfos * sizeof(ulittle32_t); // FileNameOffsets
+ Offset += NumFileInfos * sizeof(ulittle32_t); // FileNameOffsets
+ return Offset;
+}
+
+uint32_t DbiStreamBuilder::calculateFileInfoSubstreamSize() const {
+ uint32_t Size = calculateNamesOffset();
Size += calculateNamesBufferSize();
return alignTo(Size, sizeof(uint32_t));
}
@@ -157,9 +162,8 @@ uint32_t DbiStreamBuilder::calculateDbgStreamsSize() const {
Error DbiStreamBuilder::generateFileInfoSubstream() {
uint32_t Size = calculateFileInfoSubstreamSize();
- uint32_t NameSize = calculateNamesBufferSize();
auto Data = Allocator.Allocate<uint8_t>(Size);
- uint32_t NamesOffset = Size - NameSize;
+ uint32_t NamesOffset = calculateNamesOffset();
FileInfoBuffer = MutableBinaryByteStream(MutableArrayRef<uint8_t>(Data, Size),
llvm::support::little);
@@ -207,6 +211,9 @@ Error DbiStreamBuilder::generateFileInfoSubstream() {
}
}
+ if (auto EC = NameBufferWriter.padToAlignment(sizeof(uint32_t)))
+ return EC;
+
if (NameBufferWriter.bytesRemaining() > 0)
return make_error<RawError>(raw_error_code::invalid_format,
"The names buffer contained unexpected data.");
diff --git a/lib/DebugInfo/PDB/Native/PDBTypeServerHandler.cpp b/lib/DebugInfo/PDB/Native/PDBTypeServerHandler.cpp
index f00567db743e..9fd90102f72c 100644
--- a/lib/DebugInfo/PDB/Native/PDBTypeServerHandler.cpp
+++ b/lib/DebugInfo/PDB/Native/PDBTypeServerHandler.cpp
@@ -47,7 +47,7 @@ void PDBTypeServerHandler::addSearchPath(StringRef Path) {
if (Path.empty() || !sys::fs::is_directory(Path))
return;
- SearchPaths.push_back(Path);
+ SearchPaths.insert(Path);
}
Expected<bool>
@@ -57,7 +57,13 @@ PDBTypeServerHandler::handleInternal(PDBFile &File,
if (!ExpectedTpi)
return ExpectedTpi.takeError();
- if (auto EC = codeview::visitTypeStream(ExpectedTpi->typeArray(), Callbacks))
+ // For handling a type server, we should be using whatever the callback array
+ // was
+ // that is being used for the original file. We shouldn't allow the visitor
+ // to
+ // arbitrarily stick a deserializer in there.
+ if (auto EC = codeview::visitTypeStream(ExpectedTpi->typeArray(), Callbacks,
+ VDS_BytesExternal))
return std::move(EC);
return true;
@@ -80,13 +86,14 @@ Expected<bool> PDBTypeServerHandler::handle(TypeServer2Record &TS,
cv_error_code::corrupt_record,
"TypeServer2Record does not contain filename!");
- for (auto Path : SearchPaths) {
- sys::path::append(Path, File);
- if (!sys::fs::exists(Path))
+ for (auto &Path : SearchPaths) {
+ SmallString<64> PathStr = Path.getKey();
+ sys::path::append(PathStr, File);
+ if (!sys::fs::exists(PathStr))
continue;
std::unique_ptr<IPDBSession> ThisSession;
- if (auto EC = loadDataForPDB(PDB_ReaderType::Native, Path, ThisSession)) {
+ if (auto EC = loadDataForPDB(PDB_ReaderType::Native, PathStr, ThisSession)) {
// It is not an error if this PDB fails to load, it just means that it
// doesn't match and we should continue searching.
ignoreErrors(std::move(EC));
diff --git a/lib/DebugInfo/PDB/Native/TpiStream.cpp b/lib/DebugInfo/PDB/Native/TpiStream.cpp
index 8e0065873892..623afb371b50 100644
--- a/lib/DebugInfo/PDB/Native/TpiStream.cpp
+++ b/lib/DebugInfo/PDB/Native/TpiStream.cpp
@@ -8,7 +8,9 @@
//===----------------------------------------------------------------------===//
#include "llvm/DebugInfo/PDB/Native/TpiStream.h"
+
#include "llvm/ADT/iterator_range.h"
+#include "llvm/DebugInfo/CodeView/LazyRandomTypeCollection.h"
#include "llvm/DebugInfo/CodeView/TypeRecord.h"
#include "llvm/DebugInfo/MSF/MappedBlockStream.h"
#include "llvm/DebugInfo/PDB/Native/PDBFile.h"
@@ -104,6 +106,8 @@ Error TpiStream::reload() {
HashStream = std::move(HS);
}
+ Types = llvm::make_unique<LazyRandomTypeCollection>(
+ TypeRecords, getNumTypeRecords(), getTypeIndexOffsets());
return Error::success();
}