diff options
Diffstat (limited to 'contrib/llvm/lib/ProfileData')
| -rw-r--r-- | contrib/llvm/lib/ProfileData/Coverage/CoverageMapping.cpp | 778 | ||||
| -rw-r--r-- | contrib/llvm/lib/ProfileData/Coverage/CoverageMappingReader.cpp | 750 | ||||
| -rw-r--r-- | contrib/llvm/lib/ProfileData/Coverage/CoverageMappingWriter.cpp | 208 | ||||
| -rw-r--r-- | contrib/llvm/lib/ProfileData/GCOV.cpp | 822 | ||||
| -rw-r--r-- | contrib/llvm/lib/ProfileData/InstrProf.cpp | 1014 | ||||
| -rw-r--r-- | contrib/llvm/lib/ProfileData/InstrProfReader.cpp | 747 | ||||
| -rw-r--r-- | contrib/llvm/lib/ProfileData/InstrProfWriter.cpp | 390 | ||||
| -rw-r--r-- | contrib/llvm/lib/ProfileData/ProfileSummaryBuilder.cpp | 115 | ||||
| -rw-r--r-- | contrib/llvm/lib/ProfileData/SampleProf.cpp | 183 | ||||
| -rw-r--r-- | contrib/llvm/lib/ProfileData/SampleProfReader.cpp | 898 | ||||
| -rw-r--r-- | contrib/llvm/lib/ProfileData/SampleProfWriter.cpp | 345 |
11 files changed, 6250 insertions, 0 deletions
diff --git a/contrib/llvm/lib/ProfileData/Coverage/CoverageMapping.cpp b/contrib/llvm/lib/ProfileData/Coverage/CoverageMapping.cpp new file mode 100644 index 000000000000..b3c2b182e76c --- /dev/null +++ b/contrib/llvm/lib/ProfileData/Coverage/CoverageMapping.cpp @@ -0,0 +1,778 @@ +//===- CoverageMapping.cpp - Code coverage mapping support ----------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file contains support for clang's and llvm's instrumentation based +// code coverage. +// +//===----------------------------------------------------------------------===// + +#include "llvm/ProfileData/Coverage/CoverageMapping.h" +#include "llvm/ADT/ArrayRef.h" +#include "llvm/ADT/DenseMap.h" +#include "llvm/ADT/None.h" +#include "llvm/ADT/Optional.h" +#include "llvm/ADT/SmallBitVector.h" +#include "llvm/ADT/SmallVector.h" +#include "llvm/ADT/StringRef.h" +#include "llvm/ProfileData/Coverage/CoverageMappingReader.h" +#include "llvm/ProfileData/InstrProfReader.h" +#include "llvm/Support/Debug.h" +#include "llvm/Support/Errc.h" +#include "llvm/Support/Error.h" +#include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/ManagedStatic.h" +#include "llvm/Support/MemoryBuffer.h" +#include "llvm/Support/raw_ostream.h" +#include <algorithm> +#include <cassert> +#include <cstdint> +#include <iterator> +#include <map> +#include <memory> +#include <string> +#include <system_error> +#include <utility> +#include <vector> + +using namespace llvm; +using namespace coverage; + +#define DEBUG_TYPE "coverage-mapping" + +Counter CounterExpressionBuilder::get(const CounterExpression &E) { + auto It = ExpressionIndices.find(E); + if (It != ExpressionIndices.end()) + return Counter::getExpression(It->second); + unsigned I = Expressions.size(); + Expressions.push_back(E); + ExpressionIndices[E] = I; + return Counter::getExpression(I); +} + +void CounterExpressionBuilder::extractTerms(Counter C, int Factor, + SmallVectorImpl<Term> &Terms) { + switch (C.getKind()) { + case Counter::Zero: + break; + case Counter::CounterValueReference: + Terms.emplace_back(C.getCounterID(), Factor); + break; + case Counter::Expression: + const auto &E = Expressions[C.getExpressionID()]; + extractTerms(E.LHS, Factor, Terms); + extractTerms( + E.RHS, E.Kind == CounterExpression::Subtract ? -Factor : Factor, Terms); + break; + } +} + +Counter CounterExpressionBuilder::simplify(Counter ExpressionTree) { + // Gather constant terms. + SmallVector<Term, 32> Terms; + extractTerms(ExpressionTree, +1, Terms); + + // If there are no terms, this is just a zero. The algorithm below assumes at + // least one term. + if (Terms.size() == 0) + return Counter::getZero(); + + // Group the terms by counter ID. + llvm::sort(Terms.begin(), Terms.end(), [](const Term &LHS, const Term &RHS) { + return LHS.CounterID < RHS.CounterID; + }); + + // Combine terms by counter ID to eliminate counters that sum to zero. + auto Prev = Terms.begin(); + for (auto I = Prev + 1, E = Terms.end(); I != E; ++I) { + if (I->CounterID == Prev->CounterID) { + Prev->Factor += I->Factor; + continue; + } + ++Prev; + *Prev = *I; + } + Terms.erase(++Prev, Terms.end()); + + Counter C; + // Create additions. We do this before subtractions to avoid constructs like + // ((0 - X) + Y), as opposed to (Y - X). + for (auto T : Terms) { + if (T.Factor <= 0) + continue; + for (int I = 0; I < T.Factor; ++I) + if (C.isZero()) + C = Counter::getCounter(T.CounterID); + else + C = get(CounterExpression(CounterExpression::Add, C, + Counter::getCounter(T.CounterID))); + } + + // Create subtractions. + for (auto T : Terms) { + if (T.Factor >= 0) + continue; + for (int I = 0; I < -T.Factor; ++I) + C = get(CounterExpression(CounterExpression::Subtract, C, + Counter::getCounter(T.CounterID))); + } + return C; +} + +Counter CounterExpressionBuilder::add(Counter LHS, Counter RHS) { + return simplify(get(CounterExpression(CounterExpression::Add, LHS, RHS))); +} + +Counter CounterExpressionBuilder::subtract(Counter LHS, Counter RHS) { + return simplify( + get(CounterExpression(CounterExpression::Subtract, LHS, RHS))); +} + +void CounterMappingContext::dump(const Counter &C, raw_ostream &OS) const { + switch (C.getKind()) { + case Counter::Zero: + OS << '0'; + return; + case Counter::CounterValueReference: + OS << '#' << C.getCounterID(); + break; + case Counter::Expression: { + if (C.getExpressionID() >= Expressions.size()) + return; + const auto &E = Expressions[C.getExpressionID()]; + OS << '('; + dump(E.LHS, OS); + OS << (E.Kind == CounterExpression::Subtract ? " - " : " + "); + dump(E.RHS, OS); + OS << ')'; + break; + } + } + if (CounterValues.empty()) + return; + Expected<int64_t> Value = evaluate(C); + if (auto E = Value.takeError()) { + consumeError(std::move(E)); + return; + } + OS << '[' << *Value << ']'; +} + +Expected<int64_t> CounterMappingContext::evaluate(const Counter &C) const { + switch (C.getKind()) { + case Counter::Zero: + return 0; + case Counter::CounterValueReference: + if (C.getCounterID() >= CounterValues.size()) + return errorCodeToError(errc::argument_out_of_domain); + return CounterValues[C.getCounterID()]; + case Counter::Expression: { + if (C.getExpressionID() >= Expressions.size()) + return errorCodeToError(errc::argument_out_of_domain); + const auto &E = Expressions[C.getExpressionID()]; + Expected<int64_t> LHS = evaluate(E.LHS); + if (!LHS) + return LHS; + Expected<int64_t> RHS = evaluate(E.RHS); + if (!RHS) + return RHS; + return E.Kind == CounterExpression::Subtract ? *LHS - *RHS : *LHS + *RHS; + } + } + llvm_unreachable("Unhandled CounterKind"); +} + +void FunctionRecordIterator::skipOtherFiles() { + while (Current != Records.end() && !Filename.empty() && + Filename != Current->Filenames[0]) + ++Current; + if (Current == Records.end()) + *this = FunctionRecordIterator(); +} + +Error CoverageMapping::loadFunctionRecord( + const CoverageMappingRecord &Record, + IndexedInstrProfReader &ProfileReader) { + StringRef OrigFuncName = Record.FunctionName; + if (OrigFuncName.empty()) + return make_error<CoverageMapError>(coveragemap_error::malformed); + + if (Record.Filenames.empty()) + OrigFuncName = getFuncNameWithoutPrefix(OrigFuncName); + else + OrigFuncName = getFuncNameWithoutPrefix(OrigFuncName, Record.Filenames[0]); + + // Don't load records for (filenames, function) pairs we've already seen. + auto FilenamesHash = hash_combine_range(Record.Filenames.begin(), + Record.Filenames.end()); + if (!RecordProvenance[FilenamesHash].insert(hash_value(OrigFuncName)).second) + return Error::success(); + + CounterMappingContext Ctx(Record.Expressions); + + std::vector<uint64_t> Counts; + if (Error E = ProfileReader.getFunctionCounts(Record.FunctionName, + Record.FunctionHash, Counts)) { + instrprof_error IPE = InstrProfError::take(std::move(E)); + if (IPE == instrprof_error::hash_mismatch) { + FuncHashMismatches.emplace_back(Record.FunctionName, Record.FunctionHash); + return Error::success(); + } else if (IPE != instrprof_error::unknown_function) + return make_error<InstrProfError>(IPE); + Counts.assign(Record.MappingRegions.size(), 0); + } + Ctx.setCounts(Counts); + + assert(!Record.MappingRegions.empty() && "Function has no regions"); + + FunctionRecord Function(OrigFuncName, Record.Filenames); + for (const auto &Region : Record.MappingRegions) { + Expected<int64_t> ExecutionCount = Ctx.evaluate(Region.Count); + if (auto E = ExecutionCount.takeError()) { + consumeError(std::move(E)); + return Error::success(); + } + Function.pushRegion(Region, *ExecutionCount); + } + if (Function.CountedRegions.size() != Record.MappingRegions.size()) { + FuncCounterMismatches.emplace_back(Record.FunctionName, + Function.CountedRegions.size()); + return Error::success(); + } + + Functions.push_back(std::move(Function)); + return Error::success(); +} + +Expected<std::unique_ptr<CoverageMapping>> CoverageMapping::load( + ArrayRef<std::unique_ptr<CoverageMappingReader>> CoverageReaders, + IndexedInstrProfReader &ProfileReader) { + auto Coverage = std::unique_ptr<CoverageMapping>(new CoverageMapping()); + + for (const auto &CoverageReader : CoverageReaders) { + for (auto RecordOrErr : *CoverageReader) { + if (Error E = RecordOrErr.takeError()) + return std::move(E); + const auto &Record = *RecordOrErr; + if (Error E = Coverage->loadFunctionRecord(Record, ProfileReader)) + return std::move(E); + } + } + + return std::move(Coverage); +} + +Expected<std::unique_ptr<CoverageMapping>> +CoverageMapping::load(ArrayRef<StringRef> ObjectFilenames, + StringRef ProfileFilename, ArrayRef<StringRef> Arches) { + auto ProfileReaderOrErr = IndexedInstrProfReader::create(ProfileFilename); + if (Error E = ProfileReaderOrErr.takeError()) + return std::move(E); + auto ProfileReader = std::move(ProfileReaderOrErr.get()); + + SmallVector<std::unique_ptr<CoverageMappingReader>, 4> Readers; + SmallVector<std::unique_ptr<MemoryBuffer>, 4> Buffers; + for (const auto &File : llvm::enumerate(ObjectFilenames)) { + auto CovMappingBufOrErr = MemoryBuffer::getFileOrSTDIN(File.value()); + if (std::error_code EC = CovMappingBufOrErr.getError()) + return errorCodeToError(EC); + StringRef Arch = Arches.empty() ? StringRef() : Arches[File.index()]; + auto CoverageReaderOrErr = + BinaryCoverageReader::create(CovMappingBufOrErr.get(), Arch); + if (Error E = CoverageReaderOrErr.takeError()) + return std::move(E); + Readers.push_back(std::move(CoverageReaderOrErr.get())); + Buffers.push_back(std::move(CovMappingBufOrErr.get())); + } + return load(Readers, *ProfileReader); +} + +namespace { + +/// Distributes functions into instantiation sets. +/// +/// An instantiation set is a collection of functions that have the same source +/// code, ie, template functions specializations. +class FunctionInstantiationSetCollector { + using MapT = std::map<LineColPair, std::vector<const FunctionRecord *>>; + MapT InstantiatedFunctions; + +public: + void insert(const FunctionRecord &Function, unsigned FileID) { + auto I = Function.CountedRegions.begin(), E = Function.CountedRegions.end(); + while (I != E && I->FileID != FileID) + ++I; + assert(I != E && "function does not cover the given file"); + auto &Functions = InstantiatedFunctions[I->startLoc()]; + Functions.push_back(&Function); + } + + MapT::iterator begin() { return InstantiatedFunctions.begin(); } + MapT::iterator end() { return InstantiatedFunctions.end(); } +}; + +class SegmentBuilder { + std::vector<CoverageSegment> &Segments; + SmallVector<const CountedRegion *, 8> ActiveRegions; + + SegmentBuilder(std::vector<CoverageSegment> &Segments) : Segments(Segments) {} + + /// Emit a segment with the count from \p Region starting at \p StartLoc. + // + /// \p IsRegionEntry: The segment is at the start of a new non-gap region. + /// \p EmitSkippedRegion: The segment must be emitted as a skipped region. + void startSegment(const CountedRegion &Region, LineColPair StartLoc, + bool IsRegionEntry, bool EmitSkippedRegion = false) { + bool HasCount = !EmitSkippedRegion && + (Region.Kind != CounterMappingRegion::SkippedRegion); + + // If the new segment wouldn't affect coverage rendering, skip it. + if (!Segments.empty() && !IsRegionEntry && !EmitSkippedRegion) { + const auto &Last = Segments.back(); + if (Last.HasCount == HasCount && Last.Count == Region.ExecutionCount && + !Last.IsRegionEntry) + return; + } + + if (HasCount) + Segments.emplace_back(StartLoc.first, StartLoc.second, + Region.ExecutionCount, IsRegionEntry, + Region.Kind == CounterMappingRegion::GapRegion); + else + Segments.emplace_back(StartLoc.first, StartLoc.second, IsRegionEntry); + + LLVM_DEBUG({ + const auto &Last = Segments.back(); + dbgs() << "Segment at " << Last.Line << ":" << Last.Col + << " (count = " << Last.Count << ")" + << (Last.IsRegionEntry ? ", RegionEntry" : "") + << (!Last.HasCount ? ", Skipped" : "") + << (Last.IsGapRegion ? ", Gap" : "") << "\n"; + }); + } + + /// Emit segments for active regions which end before \p Loc. + /// + /// \p Loc: The start location of the next region. If None, all active + /// regions are completed. + /// \p FirstCompletedRegion: Index of the first completed region. + void completeRegionsUntil(Optional<LineColPair> Loc, + unsigned FirstCompletedRegion) { + // Sort the completed regions by end location. This makes it simple to + // emit closing segments in sorted order. + auto CompletedRegionsIt = ActiveRegions.begin() + FirstCompletedRegion; + std::stable_sort(CompletedRegionsIt, ActiveRegions.end(), + [](const CountedRegion *L, const CountedRegion *R) { + return L->endLoc() < R->endLoc(); + }); + + // Emit segments for all completed regions. + for (unsigned I = FirstCompletedRegion + 1, E = ActiveRegions.size(); I < E; + ++I) { + const auto *CompletedRegion = ActiveRegions[I]; + assert((!Loc || CompletedRegion->endLoc() <= *Loc) && + "Completed region ends after start of new region"); + + const auto *PrevCompletedRegion = ActiveRegions[I - 1]; + auto CompletedSegmentLoc = PrevCompletedRegion->endLoc(); + + // Don't emit any more segments if they start where the new region begins. + if (Loc && CompletedSegmentLoc == *Loc) + break; + + // Don't emit a segment if the next completed region ends at the same + // location as this one. + if (CompletedSegmentLoc == CompletedRegion->endLoc()) + continue; + + // Use the count from the last completed region which ends at this loc. + for (unsigned J = I + 1; J < E; ++J) + if (CompletedRegion->endLoc() == ActiveRegions[J]->endLoc()) + CompletedRegion = ActiveRegions[J]; + + startSegment(*CompletedRegion, CompletedSegmentLoc, false); + } + + auto Last = ActiveRegions.back(); + if (FirstCompletedRegion && Last->endLoc() != *Loc) { + // If there's a gap after the end of the last completed region and the + // start of the new region, use the last active region to fill the gap. + startSegment(*ActiveRegions[FirstCompletedRegion - 1], Last->endLoc(), + false); + } else if (!FirstCompletedRegion && (!Loc || *Loc != Last->endLoc())) { + // Emit a skipped segment if there are no more active regions. This + // ensures that gaps between functions are marked correctly. + startSegment(*Last, Last->endLoc(), false, true); + } + + // Pop the completed regions. + ActiveRegions.erase(CompletedRegionsIt, ActiveRegions.end()); + } + + void buildSegmentsImpl(ArrayRef<CountedRegion> Regions) { + for (const auto &CR : enumerate(Regions)) { + auto CurStartLoc = CR.value().startLoc(); + + // Active regions which end before the current region need to be popped. + auto CompletedRegions = + std::stable_partition(ActiveRegions.begin(), ActiveRegions.end(), + [&](const CountedRegion *Region) { + return !(Region->endLoc() <= CurStartLoc); + }); + if (CompletedRegions != ActiveRegions.end()) { + unsigned FirstCompletedRegion = + std::distance(ActiveRegions.begin(), CompletedRegions); + completeRegionsUntil(CurStartLoc, FirstCompletedRegion); + } + + bool GapRegion = CR.value().Kind == CounterMappingRegion::GapRegion; + + // Try to emit a segment for the current region. + if (CurStartLoc == CR.value().endLoc()) { + // Avoid making zero-length regions active. If it's the last region, + // emit a skipped segment. Otherwise use its predecessor's count. + const bool Skipped = (CR.index() + 1) == Regions.size(); + startSegment(ActiveRegions.empty() ? CR.value() : *ActiveRegions.back(), + CurStartLoc, !GapRegion, Skipped); + continue; + } + if (CR.index() + 1 == Regions.size() || + CurStartLoc != Regions[CR.index() + 1].startLoc()) { + // Emit a segment if the next region doesn't start at the same location + // as this one. + startSegment(CR.value(), CurStartLoc, !GapRegion); + } + + // This region is active (i.e not completed). + ActiveRegions.push_back(&CR.value()); + } + + // Complete any remaining active regions. + if (!ActiveRegions.empty()) + completeRegionsUntil(None, 0); + } + + /// Sort a nested sequence of regions from a single file. + static void sortNestedRegions(MutableArrayRef<CountedRegion> Regions) { + llvm::sort(Regions.begin(), Regions.end(), [](const CountedRegion &LHS, + const CountedRegion &RHS) { + if (LHS.startLoc() != RHS.startLoc()) + return LHS.startLoc() < RHS.startLoc(); + if (LHS.endLoc() != RHS.endLoc()) + // When LHS completely contains RHS, we sort LHS first. + return RHS.endLoc() < LHS.endLoc(); + // If LHS and RHS cover the same area, we need to sort them according + // to their kinds so that the most suitable region will become "active" + // in combineRegions(). Because we accumulate counter values only from + // regions of the same kind as the first region of the area, prefer + // CodeRegion to ExpansionRegion and ExpansionRegion to SkippedRegion. + static_assert(CounterMappingRegion::CodeRegion < + CounterMappingRegion::ExpansionRegion && + CounterMappingRegion::ExpansionRegion < + CounterMappingRegion::SkippedRegion, + "Unexpected order of region kind values"); + return LHS.Kind < RHS.Kind; + }); + } + + /// Combine counts of regions which cover the same area. + static ArrayRef<CountedRegion> + combineRegions(MutableArrayRef<CountedRegion> Regions) { + if (Regions.empty()) + return Regions; + auto Active = Regions.begin(); + auto End = Regions.end(); + for (auto I = Regions.begin() + 1; I != End; ++I) { + if (Active->startLoc() != I->startLoc() || + Active->endLoc() != I->endLoc()) { + // Shift to the next region. + ++Active; + if (Active != I) + *Active = *I; + continue; + } + // Merge duplicate region. + // If CodeRegions and ExpansionRegions cover the same area, it's probably + // a macro which is fully expanded to another macro. In that case, we need + // to accumulate counts only from CodeRegions, or else the area will be + // counted twice. + // On the other hand, a macro may have a nested macro in its body. If the + // outer macro is used several times, the ExpansionRegion for the nested + // macro will also be added several times. These ExpansionRegions cover + // the same source locations and have to be combined to reach the correct + // value for that area. + // We add counts of the regions of the same kind as the active region + // to handle the both situations. + if (I->Kind == Active->Kind) + Active->ExecutionCount += I->ExecutionCount; + } + return Regions.drop_back(std::distance(++Active, End)); + } + +public: + /// Build a sorted list of CoverageSegments from a list of Regions. + static std::vector<CoverageSegment> + buildSegments(MutableArrayRef<CountedRegion> Regions) { + std::vector<CoverageSegment> Segments; + SegmentBuilder Builder(Segments); + + sortNestedRegions(Regions); + ArrayRef<CountedRegion> CombinedRegions = combineRegions(Regions); + + LLVM_DEBUG({ + dbgs() << "Combined regions:\n"; + for (const auto &CR : CombinedRegions) + dbgs() << " " << CR.LineStart << ":" << CR.ColumnStart << " -> " + << CR.LineEnd << ":" << CR.ColumnEnd + << " (count=" << CR.ExecutionCount << ")\n"; + }); + + Builder.buildSegmentsImpl(CombinedRegions); + +#ifndef NDEBUG + for (unsigned I = 1, E = Segments.size(); I < E; ++I) { + const auto &L = Segments[I - 1]; + const auto &R = Segments[I]; + if (!(L.Line < R.Line) && !(L.Line == R.Line && L.Col < R.Col)) { + LLVM_DEBUG(dbgs() << " ! Segment " << L.Line << ":" << L.Col + << " followed by " << R.Line << ":" << R.Col << "\n"); + assert(false && "Coverage segments not unique or sorted"); + } + } +#endif + + return Segments; + } +}; + +} // end anonymous namespace + +std::vector<StringRef> CoverageMapping::getUniqueSourceFiles() const { + std::vector<StringRef> Filenames; + for (const auto &Function : getCoveredFunctions()) + Filenames.insert(Filenames.end(), Function.Filenames.begin(), + Function.Filenames.end()); + llvm::sort(Filenames.begin(), Filenames.end()); + auto Last = std::unique(Filenames.begin(), Filenames.end()); + Filenames.erase(Last, Filenames.end()); + return Filenames; +} + +static SmallBitVector gatherFileIDs(StringRef SourceFile, + const FunctionRecord &Function) { + SmallBitVector FilenameEquivalence(Function.Filenames.size(), false); + for (unsigned I = 0, E = Function.Filenames.size(); I < E; ++I) + if (SourceFile == Function.Filenames[I]) + FilenameEquivalence[I] = true; + return FilenameEquivalence; +} + +/// Return the ID of the file where the definition of the function is located. +static Optional<unsigned> findMainViewFileID(const FunctionRecord &Function) { + SmallBitVector IsNotExpandedFile(Function.Filenames.size(), true); + for (const auto &CR : Function.CountedRegions) + if (CR.Kind == CounterMappingRegion::ExpansionRegion) + IsNotExpandedFile[CR.ExpandedFileID] = false; + int I = IsNotExpandedFile.find_first(); + if (I == -1) + return None; + return I; +} + +/// Check if SourceFile is the file that contains the definition of +/// the Function. Return the ID of the file in that case or None otherwise. +static Optional<unsigned> findMainViewFileID(StringRef SourceFile, + const FunctionRecord &Function) { + Optional<unsigned> I = findMainViewFileID(Function); + if (I && SourceFile == Function.Filenames[*I]) + return I; + return None; +} + +static bool isExpansion(const CountedRegion &R, unsigned FileID) { + return R.Kind == CounterMappingRegion::ExpansionRegion && R.FileID == FileID; +} + +CoverageData CoverageMapping::getCoverageForFile(StringRef Filename) const { + CoverageData FileCoverage(Filename); + std::vector<CountedRegion> Regions; + + for (const auto &Function : Functions) { + auto MainFileID = findMainViewFileID(Filename, Function); + auto FileIDs = gatherFileIDs(Filename, Function); + for (const auto &CR : Function.CountedRegions) + if (FileIDs.test(CR.FileID)) { + Regions.push_back(CR); + if (MainFileID && isExpansion(CR, *MainFileID)) + FileCoverage.Expansions.emplace_back(CR, Function); + } + } + + LLVM_DEBUG(dbgs() << "Emitting segments for file: " << Filename << "\n"); + FileCoverage.Segments = SegmentBuilder::buildSegments(Regions); + + return FileCoverage; +} + +std::vector<InstantiationGroup> +CoverageMapping::getInstantiationGroups(StringRef Filename) const { + FunctionInstantiationSetCollector InstantiationSetCollector; + for (const auto &Function : Functions) { + auto MainFileID = findMainViewFileID(Filename, Function); + if (!MainFileID) + continue; + InstantiationSetCollector.insert(Function, *MainFileID); + } + + std::vector<InstantiationGroup> Result; + for (auto &InstantiationSet : InstantiationSetCollector) { + InstantiationGroup IG{InstantiationSet.first.first, + InstantiationSet.first.second, + std::move(InstantiationSet.second)}; + Result.emplace_back(std::move(IG)); + } + return Result; +} + +CoverageData +CoverageMapping::getCoverageForFunction(const FunctionRecord &Function) const { + auto MainFileID = findMainViewFileID(Function); + if (!MainFileID) + return CoverageData(); + + CoverageData FunctionCoverage(Function.Filenames[*MainFileID]); + std::vector<CountedRegion> Regions; + for (const auto &CR : Function.CountedRegions) + if (CR.FileID == *MainFileID) { + Regions.push_back(CR); + if (isExpansion(CR, *MainFileID)) + FunctionCoverage.Expansions.emplace_back(CR, Function); + } + + LLVM_DEBUG(dbgs() << "Emitting segments for function: " << Function.Name + << "\n"); + FunctionCoverage.Segments = SegmentBuilder::buildSegments(Regions); + + return FunctionCoverage; +} + +CoverageData CoverageMapping::getCoverageForExpansion( + const ExpansionRecord &Expansion) const { + CoverageData ExpansionCoverage( + Expansion.Function.Filenames[Expansion.FileID]); + std::vector<CountedRegion> Regions; + for (const auto &CR : Expansion.Function.CountedRegions) + if (CR.FileID == Expansion.FileID) { + Regions.push_back(CR); + if (isExpansion(CR, Expansion.FileID)) + ExpansionCoverage.Expansions.emplace_back(CR, Expansion.Function); + } + + LLVM_DEBUG(dbgs() << "Emitting segments for expansion of file " + << Expansion.FileID << "\n"); + ExpansionCoverage.Segments = SegmentBuilder::buildSegments(Regions); + + return ExpansionCoverage; +} + +LineCoverageStats::LineCoverageStats( + ArrayRef<const CoverageSegment *> LineSegments, + const CoverageSegment *WrappedSegment, unsigned Line) + : ExecutionCount(0), HasMultipleRegions(false), Mapped(false), Line(Line), + LineSegments(LineSegments), WrappedSegment(WrappedSegment) { + // Find the minimum number of regions which start in this line. + unsigned MinRegionCount = 0; + auto isStartOfRegion = [](const CoverageSegment *S) { + return !S->IsGapRegion && S->HasCount && S->IsRegionEntry; + }; + for (unsigned I = 0; I < LineSegments.size() && MinRegionCount < 2; ++I) + if (isStartOfRegion(LineSegments[I])) + ++MinRegionCount; + + bool StartOfSkippedRegion = !LineSegments.empty() && + !LineSegments.front()->HasCount && + LineSegments.front()->IsRegionEntry; + + HasMultipleRegions = MinRegionCount > 1; + Mapped = + !StartOfSkippedRegion && + ((WrappedSegment && WrappedSegment->HasCount) || (MinRegionCount > 0)); + + if (!Mapped) + return; + + // Pick the max count from the non-gap, region entry segments and the + // wrapped count. + if (WrappedSegment) + ExecutionCount = WrappedSegment->Count; + if (!MinRegionCount) + return; + for (const auto *LS : LineSegments) + if (isStartOfRegion(LS)) + ExecutionCount = std::max(ExecutionCount, LS->Count); +} + +LineCoverageIterator &LineCoverageIterator::operator++() { + if (Next == CD.end()) { + Stats = LineCoverageStats(); + Ended = true; + return *this; + } + if (Segments.size()) + WrappedSegment = Segments.back(); + Segments.clear(); + while (Next != CD.end() && Next->Line == Line) + Segments.push_back(&*Next++); + Stats = LineCoverageStats(Segments, WrappedSegment, Line); + ++Line; + return *this; +} + +static std::string getCoverageMapErrString(coveragemap_error Err) { + switch (Err) { + case coveragemap_error::success: + return "Success"; + case coveragemap_error::eof: + return "End of File"; + case coveragemap_error::no_data_found: + return "No coverage data found"; + case coveragemap_error::unsupported_version: + return "Unsupported coverage format version"; + case coveragemap_error::truncated: + return "Truncated coverage data"; + case coveragemap_error::malformed: + return "Malformed coverage data"; + } + llvm_unreachable("A value of coveragemap_error has no message."); +} + +namespace { + +// FIXME: This class is only here to support the transition to llvm::Error. It +// will be removed once this transition is complete. Clients should prefer to +// deal with the Error value directly, rather than converting to error_code. +class CoverageMappingErrorCategoryType : public std::error_category { + const char *name() const noexcept override { return "llvm.coveragemap"; } + std::string message(int IE) const override { + return getCoverageMapErrString(static_cast<coveragemap_error>(IE)); + } +}; + +} // end anonymous namespace + +std::string CoverageMapError::message() const { + return getCoverageMapErrString(Err); +} + +static ManagedStatic<CoverageMappingErrorCategoryType> ErrorCategory; + +const std::error_category &llvm::coverage::coveragemap_category() { + return *ErrorCategory; +} + +char CoverageMapError::ID = 0; diff --git a/contrib/llvm/lib/ProfileData/Coverage/CoverageMappingReader.cpp b/contrib/llvm/lib/ProfileData/Coverage/CoverageMappingReader.cpp new file mode 100644 index 000000000000..ee48256bc2e5 --- /dev/null +++ b/contrib/llvm/lib/ProfileData/Coverage/CoverageMappingReader.cpp @@ -0,0 +1,750 @@ +//===- CoverageMappingReader.cpp - Code coverage mapping reader -----------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file contains support for reading coverage mapping data for +// instrumentation based coverage. +// +//===----------------------------------------------------------------------===// + +#include "llvm/ProfileData/Coverage/CoverageMappingReader.h" +#include "llvm/ADT/ArrayRef.h" +#include "llvm/ADT/DenseMap.h" +#include "llvm/ADT/STLExtras.h" +#include "llvm/ADT/SmallVector.h" +#include "llvm/ADT/StringRef.h" +#include "llvm/ADT/Triple.h" +#include "llvm/Object/Binary.h" +#include "llvm/Object/Error.h" +#include "llvm/Object/MachOUniversal.h" +#include "llvm/Object/ObjectFile.h" +#include "llvm/ProfileData/InstrProf.h" +#include "llvm/Support/Casting.h" +#include "llvm/Support/Debug.h" +#include "llvm/Support/Endian.h" +#include "llvm/Support/Error.h" +#include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/LEB128.h" +#include "llvm/Support/MathExtras.h" +#include "llvm/Support/raw_ostream.h" +#include <vector> + +using namespace llvm; +using namespace coverage; +using namespace object; + +#define DEBUG_TYPE "coverage-mapping" + +void CoverageMappingIterator::increment() { + if (ReadErr != coveragemap_error::success) + return; + + // Check if all the records were read or if an error occurred while reading + // the next record. + if (auto E = Reader->readNextRecord(Record)) + handleAllErrors(std::move(E), [&](const CoverageMapError &CME) { + if (CME.get() == coveragemap_error::eof) + *this = CoverageMappingIterator(); + else + ReadErr = CME.get(); + }); +} + +Error RawCoverageReader::readULEB128(uint64_t &Result) { + if (Data.empty()) + return make_error<CoverageMapError>(coveragemap_error::truncated); + unsigned N = 0; + Result = decodeULEB128(reinterpret_cast<const uint8_t *>(Data.data()), &N); + if (N > Data.size()) + return make_error<CoverageMapError>(coveragemap_error::malformed); + Data = Data.substr(N); + return Error::success(); +} + +Error RawCoverageReader::readIntMax(uint64_t &Result, uint64_t MaxPlus1) { + if (auto Err = readULEB128(Result)) + return Err; + if (Result >= MaxPlus1) + return make_error<CoverageMapError>(coveragemap_error::malformed); + return Error::success(); +} + +Error RawCoverageReader::readSize(uint64_t &Result) { + if (auto Err = readULEB128(Result)) + return Err; + // Sanity check the number. + if (Result > Data.size()) + return make_error<CoverageMapError>(coveragemap_error::malformed); + return Error::success(); +} + +Error RawCoverageReader::readString(StringRef &Result) { + uint64_t Length; + if (auto Err = readSize(Length)) + return Err; + Result = Data.substr(0, Length); + Data = Data.substr(Length); + return Error::success(); +} + +Error RawCoverageFilenamesReader::read() { + uint64_t NumFilenames; + if (auto Err = readSize(NumFilenames)) + return Err; + for (size_t I = 0; I < NumFilenames; ++I) { + StringRef Filename; + if (auto Err = readString(Filename)) + return Err; + Filenames.push_back(Filename); + } + return Error::success(); +} + +Error RawCoverageMappingReader::decodeCounter(unsigned Value, Counter &C) { + auto Tag = Value & Counter::EncodingTagMask; + switch (Tag) { + case Counter::Zero: + C = Counter::getZero(); + return Error::success(); + case Counter::CounterValueReference: + C = Counter::getCounter(Value >> Counter::EncodingTagBits); + return Error::success(); + default: + break; + } + Tag -= Counter::Expression; + switch (Tag) { + case CounterExpression::Subtract: + case CounterExpression::Add: { + auto ID = Value >> Counter::EncodingTagBits; + if (ID >= Expressions.size()) + return make_error<CoverageMapError>(coveragemap_error::malformed); + Expressions[ID].Kind = CounterExpression::ExprKind(Tag); + C = Counter::getExpression(ID); + break; + } + default: + return make_error<CoverageMapError>(coveragemap_error::malformed); + } + return Error::success(); +} + +Error RawCoverageMappingReader::readCounter(Counter &C) { + uint64_t EncodedCounter; + if (auto Err = + readIntMax(EncodedCounter, std::numeric_limits<unsigned>::max())) + return Err; + if (auto Err = decodeCounter(EncodedCounter, C)) + return Err; + return Error::success(); +} + +static const unsigned EncodingExpansionRegionBit = 1 + << Counter::EncodingTagBits; + +/// Read the sub-array of regions for the given inferred file id. +/// \param NumFileIDs the number of file ids that are defined for this +/// function. +Error RawCoverageMappingReader::readMappingRegionsSubArray( + std::vector<CounterMappingRegion> &MappingRegions, unsigned InferredFileID, + size_t NumFileIDs) { + uint64_t NumRegions; + if (auto Err = readSize(NumRegions)) + return Err; + unsigned LineStart = 0; + for (size_t I = 0; I < NumRegions; ++I) { + Counter C; + CounterMappingRegion::RegionKind Kind = CounterMappingRegion::CodeRegion; + + // Read the combined counter + region kind. + uint64_t EncodedCounterAndRegion; + if (auto Err = readIntMax(EncodedCounterAndRegion, + std::numeric_limits<unsigned>::max())) + return Err; + unsigned Tag = EncodedCounterAndRegion & Counter::EncodingTagMask; + uint64_t ExpandedFileID = 0; + if (Tag != Counter::Zero) { + if (auto Err = decodeCounter(EncodedCounterAndRegion, C)) + return Err; + } else { + // Is it an expansion region? + if (EncodedCounterAndRegion & EncodingExpansionRegionBit) { + Kind = CounterMappingRegion::ExpansionRegion; + ExpandedFileID = EncodedCounterAndRegion >> + Counter::EncodingCounterTagAndExpansionRegionTagBits; + if (ExpandedFileID >= NumFileIDs) + return make_error<CoverageMapError>(coveragemap_error::malformed); + } else { + switch (EncodedCounterAndRegion >> + Counter::EncodingCounterTagAndExpansionRegionTagBits) { + case CounterMappingRegion::CodeRegion: + // Don't do anything when we have a code region with a zero counter. + break; + case CounterMappingRegion::SkippedRegion: + Kind = CounterMappingRegion::SkippedRegion; + break; + default: + return make_error<CoverageMapError>(coveragemap_error::malformed); + } + } + } + + // Read the source range. + uint64_t LineStartDelta, ColumnStart, NumLines, ColumnEnd; + if (auto Err = + readIntMax(LineStartDelta, std::numeric_limits<unsigned>::max())) + return Err; + if (auto Err = readULEB128(ColumnStart)) + return Err; + if (ColumnStart > std::numeric_limits<unsigned>::max()) + return make_error<CoverageMapError>(coveragemap_error::malformed); + if (auto Err = readIntMax(NumLines, std::numeric_limits<unsigned>::max())) + return Err; + if (auto Err = readIntMax(ColumnEnd, std::numeric_limits<unsigned>::max())) + return Err; + LineStart += LineStartDelta; + + // If the high bit of ColumnEnd is set, this is a gap region. + if (ColumnEnd & (1U << 31)) { + Kind = CounterMappingRegion::GapRegion; + ColumnEnd &= ~(1U << 31); + } + + // Adjust the column locations for the empty regions that are supposed to + // cover whole lines. Those regions should be encoded with the + // column range (1 -> std::numeric_limits<unsigned>::max()), but because + // the encoded std::numeric_limits<unsigned>::max() is several bytes long, + // we set the column range to (0 -> 0) to ensure that the column start and + // column end take up one byte each. + // The std::numeric_limits<unsigned>::max() is used to represent a column + // position at the end of the line without knowing the length of that line. + if (ColumnStart == 0 && ColumnEnd == 0) { + ColumnStart = 1; + ColumnEnd = std::numeric_limits<unsigned>::max(); + } + + LLVM_DEBUG({ + dbgs() << "Counter in file " << InferredFileID << " " << LineStart << ":" + << ColumnStart << " -> " << (LineStart + NumLines) << ":" + << ColumnEnd << ", "; + if (Kind == CounterMappingRegion::ExpansionRegion) + dbgs() << "Expands to file " << ExpandedFileID; + else + CounterMappingContext(Expressions).dump(C, dbgs()); + dbgs() << "\n"; + }); + + auto CMR = CounterMappingRegion(C, InferredFileID, ExpandedFileID, + LineStart, ColumnStart, + LineStart + NumLines, ColumnEnd, Kind); + if (CMR.startLoc() > CMR.endLoc()) + return make_error<CoverageMapError>(coveragemap_error::malformed); + MappingRegions.push_back(CMR); + } + return Error::success(); +} + +Error RawCoverageMappingReader::read() { + // Read the virtual file mapping. + SmallVector<unsigned, 8> VirtualFileMapping; + uint64_t NumFileMappings; + if (auto Err = readSize(NumFileMappings)) + return Err; + for (size_t I = 0; I < NumFileMappings; ++I) { + uint64_t FilenameIndex; + if (auto Err = readIntMax(FilenameIndex, TranslationUnitFilenames.size())) + return Err; + VirtualFileMapping.push_back(FilenameIndex); + } + + // Construct the files using unique filenames and virtual file mapping. + for (auto I : VirtualFileMapping) { + Filenames.push_back(TranslationUnitFilenames[I]); + } + + // Read the expressions. + uint64_t NumExpressions; + if (auto Err = readSize(NumExpressions)) + return Err; + // Create an array of dummy expressions that get the proper counters + // when the expressions are read, and the proper kinds when the counters + // are decoded. + Expressions.resize( + NumExpressions, + CounterExpression(CounterExpression::Subtract, Counter(), Counter())); + for (size_t I = 0; I < NumExpressions; ++I) { + if (auto Err = readCounter(Expressions[I].LHS)) + return Err; + if (auto Err = readCounter(Expressions[I].RHS)) + return Err; + } + + // Read the mapping regions sub-arrays. + for (unsigned InferredFileID = 0, S = VirtualFileMapping.size(); + InferredFileID < S; ++InferredFileID) { + if (auto Err = readMappingRegionsSubArray(MappingRegions, InferredFileID, + VirtualFileMapping.size())) + return Err; + } + + // Set the counters for the expansion regions. + // i.e. Counter of expansion region = counter of the first region + // from the expanded file. + // Perform multiple passes to correctly propagate the counters through + // all the nested expansion regions. + SmallVector<CounterMappingRegion *, 8> FileIDExpansionRegionMapping; + FileIDExpansionRegionMapping.resize(VirtualFileMapping.size(), nullptr); + for (unsigned Pass = 1, S = VirtualFileMapping.size(); Pass < S; ++Pass) { + for (auto &R : MappingRegions) { + if (R.Kind != CounterMappingRegion::ExpansionRegion) + continue; + assert(!FileIDExpansionRegionMapping[R.ExpandedFileID]); + FileIDExpansionRegionMapping[R.ExpandedFileID] = &R; + } + for (auto &R : MappingRegions) { + if (FileIDExpansionRegionMapping[R.FileID]) { + FileIDExpansionRegionMapping[R.FileID]->Count = R.Count; + FileIDExpansionRegionMapping[R.FileID] = nullptr; + } + } + } + + return Error::success(); +} + +Expected<bool> RawCoverageMappingDummyChecker::isDummy() { + // A dummy coverage mapping data consists of just one region with zero count. + uint64_t NumFileMappings; + if (Error Err = readSize(NumFileMappings)) + return std::move(Err); + if (NumFileMappings != 1) + return false; + // We don't expect any specific value for the filename index, just skip it. + uint64_t FilenameIndex; + if (Error Err = + readIntMax(FilenameIndex, std::numeric_limits<unsigned>::max())) + return std::move(Err); + uint64_t NumExpressions; + if (Error Err = readSize(NumExpressions)) + return std::move(Err); + if (NumExpressions != 0) + return false; + uint64_t NumRegions; + if (Error Err = readSize(NumRegions)) + return std::move(Err); + if (NumRegions != 1) + return false; + uint64_t EncodedCounterAndRegion; + if (Error Err = readIntMax(EncodedCounterAndRegion, + std::numeric_limits<unsigned>::max())) + return std::move(Err); + unsigned Tag = EncodedCounterAndRegion & Counter::EncodingTagMask; + return Tag == Counter::Zero; +} + +Error InstrProfSymtab::create(SectionRef &Section) { + if (auto EC = Section.getContents(Data)) + return errorCodeToError(EC); + Address = Section.getAddress(); + return Error::success(); +} + +StringRef InstrProfSymtab::getFuncName(uint64_t Pointer, size_t Size) { + if (Pointer < Address) + return StringRef(); + auto Offset = Pointer - Address; + if (Offset + Size > Data.size()) + return StringRef(); + return Data.substr(Pointer - Address, Size); +} + +// Check if the mapping data is a dummy, i.e. is emitted for an unused function. +static Expected<bool> isCoverageMappingDummy(uint64_t Hash, StringRef Mapping) { + // The hash value of dummy mapping records is always zero. + if (Hash) + return false; + return RawCoverageMappingDummyChecker(Mapping).isDummy(); +} + +namespace { + +struct CovMapFuncRecordReader { + virtual ~CovMapFuncRecordReader() = default; + + // The interface to read coverage mapping function records for a module. + // + // \p Buf points to the buffer containing the \c CovHeader of the coverage + // mapping data associated with the module. + // + // Returns a pointer to the next \c CovHeader if it exists, or a pointer + // greater than \p End if not. + virtual Expected<const char *> readFunctionRecords(const char *Buf, + const char *End) = 0; + + template <class IntPtrT, support::endianness Endian> + static Expected<std::unique_ptr<CovMapFuncRecordReader>> + get(CovMapVersion Version, InstrProfSymtab &P, + std::vector<BinaryCoverageReader::ProfileMappingRecord> &R, + std::vector<StringRef> &F); +}; + +// A class for reading coverage mapping function records for a module. +template <CovMapVersion Version, class IntPtrT, support::endianness Endian> +class VersionedCovMapFuncRecordReader : public CovMapFuncRecordReader { + using FuncRecordType = + typename CovMapTraits<Version, IntPtrT>::CovMapFuncRecordType; + using NameRefType = typename CovMapTraits<Version, IntPtrT>::NameRefType; + + // Maps function's name references to the indexes of their records + // in \c Records. + DenseMap<NameRefType, size_t> FunctionRecords; + InstrProfSymtab &ProfileNames; + std::vector<StringRef> &Filenames; + std::vector<BinaryCoverageReader::ProfileMappingRecord> &Records; + + // Add the record to the collection if we don't already have a record that + // points to the same function name. This is useful to ignore the redundant + // records for the functions with ODR linkage. + // In addition, prefer records with real coverage mapping data to dummy + // records, which were emitted for inline functions which were seen but + // not used in the corresponding translation unit. + Error insertFunctionRecordIfNeeded(const FuncRecordType *CFR, + StringRef Mapping, size_t FilenamesBegin) { + uint64_t FuncHash = CFR->template getFuncHash<Endian>(); + NameRefType NameRef = CFR->template getFuncNameRef<Endian>(); + auto InsertResult = + FunctionRecords.insert(std::make_pair(NameRef, Records.size())); + if (InsertResult.second) { + StringRef FuncName; + if (Error Err = CFR->template getFuncName<Endian>(ProfileNames, FuncName)) + return Err; + if (FuncName.empty()) + return make_error<InstrProfError>(instrprof_error::malformed); + Records.emplace_back(Version, FuncName, FuncHash, Mapping, FilenamesBegin, + Filenames.size() - FilenamesBegin); + return Error::success(); + } + // Update the existing record if it's a dummy and the new record is real. + size_t OldRecordIndex = InsertResult.first->second; + BinaryCoverageReader::ProfileMappingRecord &OldRecord = + Records[OldRecordIndex]; + Expected<bool> OldIsDummyExpected = isCoverageMappingDummy( + OldRecord.FunctionHash, OldRecord.CoverageMapping); + if (Error Err = OldIsDummyExpected.takeError()) + return Err; + if (!*OldIsDummyExpected) + return Error::success(); + Expected<bool> NewIsDummyExpected = + isCoverageMappingDummy(FuncHash, Mapping); + if (Error Err = NewIsDummyExpected.takeError()) + return Err; + if (*NewIsDummyExpected) + return Error::success(); + OldRecord.FunctionHash = FuncHash; + OldRecord.CoverageMapping = Mapping; + OldRecord.FilenamesBegin = FilenamesBegin; + OldRecord.FilenamesSize = Filenames.size() - FilenamesBegin; + return Error::success(); + } + +public: + VersionedCovMapFuncRecordReader( + InstrProfSymtab &P, + std::vector<BinaryCoverageReader::ProfileMappingRecord> &R, + std::vector<StringRef> &F) + : ProfileNames(P), Filenames(F), Records(R) {} + + ~VersionedCovMapFuncRecordReader() override = default; + + Expected<const char *> readFunctionRecords(const char *Buf, + const char *End) override { + using namespace support; + + if (Buf + sizeof(CovMapHeader) > End) + return make_error<CoverageMapError>(coveragemap_error::malformed); + auto CovHeader = reinterpret_cast<const CovMapHeader *>(Buf); + uint32_t NRecords = CovHeader->getNRecords<Endian>(); + uint32_t FilenamesSize = CovHeader->getFilenamesSize<Endian>(); + uint32_t CoverageSize = CovHeader->getCoverageSize<Endian>(); + assert((CovMapVersion)CovHeader->getVersion<Endian>() == Version); + Buf = reinterpret_cast<const char *>(CovHeader + 1); + + // Skip past the function records, saving the start and end for later. + const char *FunBuf = Buf; + Buf += NRecords * sizeof(FuncRecordType); + const char *FunEnd = Buf; + + // Get the filenames. + if (Buf + FilenamesSize > End) + return make_error<CoverageMapError>(coveragemap_error::malformed); + size_t FilenamesBegin = Filenames.size(); + RawCoverageFilenamesReader Reader(StringRef(Buf, FilenamesSize), Filenames); + if (auto Err = Reader.read()) + return std::move(Err); + Buf += FilenamesSize; + + // We'll read the coverage mapping records in the loop below. + const char *CovBuf = Buf; + Buf += CoverageSize; + const char *CovEnd = Buf; + + if (Buf > End) + return make_error<CoverageMapError>(coveragemap_error::malformed); + // Each coverage map has an alignment of 8, so we need to adjust alignment + // before reading the next map. + Buf += alignmentAdjustment(Buf, 8); + + auto CFR = reinterpret_cast<const FuncRecordType *>(FunBuf); + while ((const char *)CFR < FunEnd) { + // Read the function information + uint32_t DataSize = CFR->template getDataSize<Endian>(); + + // Now use that to read the coverage data. + if (CovBuf + DataSize > CovEnd) + return make_error<CoverageMapError>(coveragemap_error::malformed); + auto Mapping = StringRef(CovBuf, DataSize); + CovBuf += DataSize; + + if (Error Err = + insertFunctionRecordIfNeeded(CFR, Mapping, FilenamesBegin)) + return std::move(Err); + CFR++; + } + return Buf; + } +}; + +} // end anonymous namespace + +template <class IntPtrT, support::endianness Endian> +Expected<std::unique_ptr<CovMapFuncRecordReader>> CovMapFuncRecordReader::get( + CovMapVersion Version, InstrProfSymtab &P, + std::vector<BinaryCoverageReader::ProfileMappingRecord> &R, + std::vector<StringRef> &F) { + using namespace coverage; + + switch (Version) { + case CovMapVersion::Version1: + return llvm::make_unique<VersionedCovMapFuncRecordReader< + CovMapVersion::Version1, IntPtrT, Endian>>(P, R, F); + case CovMapVersion::Version2: + case CovMapVersion::Version3: + // Decompress the name data. + if (Error E = P.create(P.getNameData())) + return std::move(E); + if (Version == CovMapVersion::Version2) + return llvm::make_unique<VersionedCovMapFuncRecordReader< + CovMapVersion::Version2, IntPtrT, Endian>>(P, R, F); + else + return llvm::make_unique<VersionedCovMapFuncRecordReader< + CovMapVersion::Version3, IntPtrT, Endian>>(P, R, F); + } + llvm_unreachable("Unsupported version"); +} + +template <typename T, support::endianness Endian> +static Error readCoverageMappingData( + InstrProfSymtab &ProfileNames, StringRef Data, + std::vector<BinaryCoverageReader::ProfileMappingRecord> &Records, + std::vector<StringRef> &Filenames) { + using namespace coverage; + + // Read the records in the coverage data section. + auto CovHeader = + reinterpret_cast<const CovMapHeader *>(Data.data()); + CovMapVersion Version = (CovMapVersion)CovHeader->getVersion<Endian>(); + if (Version > CovMapVersion::CurrentVersion) + return make_error<CoverageMapError>(coveragemap_error::unsupported_version); + Expected<std::unique_ptr<CovMapFuncRecordReader>> ReaderExpected = + CovMapFuncRecordReader::get<T, Endian>(Version, ProfileNames, Records, + Filenames); + if (Error E = ReaderExpected.takeError()) + return E; + auto Reader = std::move(ReaderExpected.get()); + for (const char *Buf = Data.data(), *End = Buf + Data.size(); Buf < End;) { + auto NextHeaderOrErr = Reader->readFunctionRecords(Buf, End); + if (auto E = NextHeaderOrErr.takeError()) + return E; + Buf = NextHeaderOrErr.get(); + } + return Error::success(); +} + +static const char *TestingFormatMagic = "llvmcovmtestdata"; + +static Error loadTestingFormat(StringRef Data, InstrProfSymtab &ProfileNames, + StringRef &CoverageMapping, + uint8_t &BytesInAddress, + support::endianness &Endian) { + BytesInAddress = 8; + Endian = support::endianness::little; + + Data = Data.substr(StringRef(TestingFormatMagic).size()); + if (Data.empty()) + return make_error<CoverageMapError>(coveragemap_error::truncated); + unsigned N = 0; + auto ProfileNamesSize = + decodeULEB128(reinterpret_cast<const uint8_t *>(Data.data()), &N); + if (N > Data.size()) + return make_error<CoverageMapError>(coveragemap_error::malformed); + Data = Data.substr(N); + if (Data.empty()) + return make_error<CoverageMapError>(coveragemap_error::truncated); + N = 0; + uint64_t Address = + decodeULEB128(reinterpret_cast<const uint8_t *>(Data.data()), &N); + if (N > Data.size()) + return make_error<CoverageMapError>(coveragemap_error::malformed); + Data = Data.substr(N); + if (Data.size() < ProfileNamesSize) + return make_error<CoverageMapError>(coveragemap_error::malformed); + if (Error E = ProfileNames.create(Data.substr(0, ProfileNamesSize), Address)) + return E; + CoverageMapping = Data.substr(ProfileNamesSize); + // Skip the padding bytes because coverage map data has an alignment of 8. + if (CoverageMapping.empty()) + return make_error<CoverageMapError>(coveragemap_error::truncated); + size_t Pad = alignmentAdjustment(CoverageMapping.data(), 8); + if (CoverageMapping.size() < Pad) + return make_error<CoverageMapError>(coveragemap_error::malformed); + CoverageMapping = CoverageMapping.substr(Pad); + return Error::success(); +} + +static Expected<SectionRef> lookupSection(ObjectFile &OF, StringRef Name) { + StringRef FoundName; + for (const auto &Section : OF.sections()) { + if (auto EC = Section.getName(FoundName)) + return errorCodeToError(EC); + if (FoundName == Name) + return Section; + } + return make_error<CoverageMapError>(coveragemap_error::no_data_found); +} + +static Error loadBinaryFormat(MemoryBufferRef ObjectBuffer, + InstrProfSymtab &ProfileNames, + StringRef &CoverageMapping, + uint8_t &BytesInAddress, + support::endianness &Endian, StringRef Arch) { + auto BinOrErr = createBinary(ObjectBuffer); + if (!BinOrErr) + return BinOrErr.takeError(); + auto Bin = std::move(BinOrErr.get()); + std::unique_ptr<ObjectFile> OF; + if (auto *Universal = dyn_cast<MachOUniversalBinary>(Bin.get())) { + // If we have a universal binary, try to look up the object for the + // appropriate architecture. + auto ObjectFileOrErr = Universal->getObjectForArch(Arch); + if (!ObjectFileOrErr) + return ObjectFileOrErr.takeError(); + OF = std::move(ObjectFileOrErr.get()); + } else if (isa<ObjectFile>(Bin.get())) { + // For any other object file, upcast and take ownership. + OF.reset(cast<ObjectFile>(Bin.release())); + // If we've asked for a particular arch, make sure they match. + if (!Arch.empty() && OF->getArch() != Triple(Arch).getArch()) + return errorCodeToError(object_error::arch_not_found); + } else + // We can only handle object files. + return make_error<CoverageMapError>(coveragemap_error::malformed); + + // The coverage uses native pointer sizes for the object it's written in. + BytesInAddress = OF->getBytesInAddress(); + Endian = OF->isLittleEndian() ? support::endianness::little + : support::endianness::big; + + // Look for the sections that we are interested in. + auto ObjFormat = OF->getTripleObjectFormat(); + auto NamesSection = + lookupSection(*OF, getInstrProfSectionName(IPSK_name, ObjFormat, + /*AddSegmentInfo=*/false)); + if (auto E = NamesSection.takeError()) + return E; + auto CoverageSection = + lookupSection(*OF, getInstrProfSectionName(IPSK_covmap, ObjFormat, + /*AddSegmentInfo=*/false)); + if (auto E = CoverageSection.takeError()) + return E; + + // Get the contents of the given sections. + if (auto EC = CoverageSection->getContents(CoverageMapping)) + return errorCodeToError(EC); + if (Error E = ProfileNames.create(*NamesSection)) + return E; + + return Error::success(); +} + +Expected<std::unique_ptr<BinaryCoverageReader>> +BinaryCoverageReader::create(std::unique_ptr<MemoryBuffer> &ObjectBuffer, + StringRef Arch) { + std::unique_ptr<BinaryCoverageReader> Reader(new BinaryCoverageReader()); + + StringRef Coverage; + uint8_t BytesInAddress; + support::endianness Endian; + Error E = Error::success(); + consumeError(std::move(E)); + if (ObjectBuffer->getBuffer().startswith(TestingFormatMagic)) + // This is a special format used for testing. + E = loadTestingFormat(ObjectBuffer->getBuffer(), Reader->ProfileNames, + Coverage, BytesInAddress, Endian); + else + E = loadBinaryFormat(ObjectBuffer->getMemBufferRef(), Reader->ProfileNames, + Coverage, BytesInAddress, Endian, Arch); + if (E) + return std::move(E); + + if (BytesInAddress == 4 && Endian == support::endianness::little) + E = readCoverageMappingData<uint32_t, support::endianness::little>( + Reader->ProfileNames, Coverage, Reader->MappingRecords, + Reader->Filenames); + else if (BytesInAddress == 4 && Endian == support::endianness::big) + E = readCoverageMappingData<uint32_t, support::endianness::big>( + Reader->ProfileNames, Coverage, Reader->MappingRecords, + Reader->Filenames); + else if (BytesInAddress == 8 && Endian == support::endianness::little) + E = readCoverageMappingData<uint64_t, support::endianness::little>( + Reader->ProfileNames, Coverage, Reader->MappingRecords, + Reader->Filenames); + else if (BytesInAddress == 8 && Endian == support::endianness::big) + E = readCoverageMappingData<uint64_t, support::endianness::big>( + Reader->ProfileNames, Coverage, Reader->MappingRecords, + Reader->Filenames); + else + return make_error<CoverageMapError>(coveragemap_error::malformed); + if (E) + return std::move(E); + return std::move(Reader); +} + +Error BinaryCoverageReader::readNextRecord(CoverageMappingRecord &Record) { + if (CurrentRecord >= MappingRecords.size()) + return make_error<CoverageMapError>(coveragemap_error::eof); + + FunctionsFilenames.clear(); + Expressions.clear(); + MappingRegions.clear(); + auto &R = MappingRecords[CurrentRecord]; + RawCoverageMappingReader Reader( + R.CoverageMapping, + makeArrayRef(Filenames).slice(R.FilenamesBegin, R.FilenamesSize), + FunctionsFilenames, Expressions, MappingRegions); + if (auto Err = Reader.read()) + return Err; + + Record.FunctionName = R.FunctionName; + Record.FunctionHash = R.FunctionHash; + Record.Filenames = FunctionsFilenames; + Record.Expressions = Expressions; + Record.MappingRegions = MappingRegions; + + ++CurrentRecord; + return Error::success(); +} diff --git a/contrib/llvm/lib/ProfileData/Coverage/CoverageMappingWriter.cpp b/contrib/llvm/lib/ProfileData/Coverage/CoverageMappingWriter.cpp new file mode 100644 index 000000000000..bb3f4f854e04 --- /dev/null +++ b/contrib/llvm/lib/ProfileData/Coverage/CoverageMappingWriter.cpp @@ -0,0 +1,208 @@ +//===- CoverageMappingWriter.cpp - Code coverage mapping writer -----------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file contains support for writing coverage mapping data for +// instrumentation based coverage. +// +//===----------------------------------------------------------------------===// + +#include "llvm/ProfileData/Coverage/CoverageMappingWriter.h" +#include "llvm/ADT/ArrayRef.h" +#include "llvm/ADT/SmallVector.h" +#include "llvm/Support/LEB128.h" +#include "llvm/Support/raw_ostream.h" +#include <algorithm> +#include <cassert> +#include <limits> +#include <vector> + +using namespace llvm; +using namespace coverage; + +void CoverageFilenamesSectionWriter::write(raw_ostream &OS) { + encodeULEB128(Filenames.size(), OS); + for (const auto &Filename : Filenames) { + encodeULEB128(Filename.size(), OS); + OS << Filename; + } +} + +namespace { + +/// Gather only the expressions that are used by the mapping +/// regions in this function. +class CounterExpressionsMinimizer { + ArrayRef<CounterExpression> Expressions; + SmallVector<CounterExpression, 16> UsedExpressions; + std::vector<unsigned> AdjustedExpressionIDs; + +public: + CounterExpressionsMinimizer(ArrayRef<CounterExpression> Expressions, + ArrayRef<CounterMappingRegion> MappingRegions) + : Expressions(Expressions) { + AdjustedExpressionIDs.resize(Expressions.size(), 0); + for (const auto &I : MappingRegions) + mark(I.Count); + for (const auto &I : MappingRegions) + gatherUsed(I.Count); + } + + void mark(Counter C) { + if (!C.isExpression()) + return; + unsigned ID = C.getExpressionID(); + AdjustedExpressionIDs[ID] = 1; + mark(Expressions[ID].LHS); + mark(Expressions[ID].RHS); + } + + void gatherUsed(Counter C) { + if (!C.isExpression() || !AdjustedExpressionIDs[C.getExpressionID()]) + return; + AdjustedExpressionIDs[C.getExpressionID()] = UsedExpressions.size(); + const auto &E = Expressions[C.getExpressionID()]; + UsedExpressions.push_back(E); + gatherUsed(E.LHS); + gatherUsed(E.RHS); + } + + ArrayRef<CounterExpression> getExpressions() const { return UsedExpressions; } + + /// Adjust the given counter to correctly transition from the old + /// expression ids to the new expression ids. + Counter adjust(Counter C) const { + if (C.isExpression()) + C = Counter::getExpression(AdjustedExpressionIDs[C.getExpressionID()]); + return C; + } +}; + +} // end anonymous namespace + +/// Encode the counter. +/// +/// The encoding uses the following format: +/// Low 2 bits - Tag: +/// Counter::Zero(0) - A Counter with kind Counter::Zero +/// Counter::CounterValueReference(1) - A counter with kind +/// Counter::CounterValueReference +/// Counter::Expression(2) + CounterExpression::Subtract(0) - +/// A counter with kind Counter::Expression and an expression +/// with kind CounterExpression::Subtract +/// Counter::Expression(2) + CounterExpression::Add(1) - +/// A counter with kind Counter::Expression and an expression +/// with kind CounterExpression::Add +/// Remaining bits - Counter/Expression ID. +static unsigned encodeCounter(ArrayRef<CounterExpression> Expressions, + Counter C) { + unsigned Tag = unsigned(C.getKind()); + if (C.isExpression()) + Tag += Expressions[C.getExpressionID()].Kind; + unsigned ID = C.getCounterID(); + assert(ID <= + (std::numeric_limits<unsigned>::max() >> Counter::EncodingTagBits)); + return Tag | (ID << Counter::EncodingTagBits); +} + +static void writeCounter(ArrayRef<CounterExpression> Expressions, Counter C, + raw_ostream &OS) { + encodeULEB128(encodeCounter(Expressions, C), OS); +} + +void CoverageMappingWriter::write(raw_ostream &OS) { + // Check that we don't have any bogus regions. + assert(all_of(MappingRegions, + [](const CounterMappingRegion &CMR) { + return CMR.startLoc() <= CMR.endLoc(); + }) && + "Source region does not begin before it ends"); + + // Sort the regions in an ascending order by the file id and the starting + // location. Sort by region kinds to ensure stable order for tests. + std::stable_sort( + MappingRegions.begin(), MappingRegions.end(), + [](const CounterMappingRegion &LHS, const CounterMappingRegion &RHS) { + if (LHS.FileID != RHS.FileID) + return LHS.FileID < RHS.FileID; + if (LHS.startLoc() != RHS.startLoc()) + return LHS.startLoc() < RHS.startLoc(); + return LHS.Kind < RHS.Kind; + }); + + // Write out the fileid -> filename mapping. + encodeULEB128(VirtualFileMapping.size(), OS); + for (const auto &FileID : VirtualFileMapping) + encodeULEB128(FileID, OS); + + // Write out the expressions. + CounterExpressionsMinimizer Minimizer(Expressions, MappingRegions); + auto MinExpressions = Minimizer.getExpressions(); + encodeULEB128(MinExpressions.size(), OS); + for (const auto &E : MinExpressions) { + writeCounter(MinExpressions, Minimizer.adjust(E.LHS), OS); + writeCounter(MinExpressions, Minimizer.adjust(E.RHS), OS); + } + + // Write out the mapping regions. + // Split the regions into subarrays where each region in a + // subarray has a fileID which is the index of that subarray. + unsigned PrevLineStart = 0; + unsigned CurrentFileID = ~0U; + for (auto I = MappingRegions.begin(), E = MappingRegions.end(); I != E; ++I) { + if (I->FileID != CurrentFileID) { + // Ensure that all file ids have at least one mapping region. + assert(I->FileID == (CurrentFileID + 1)); + // Find the number of regions with this file id. + unsigned RegionCount = 1; + for (auto J = I + 1; J != E && I->FileID == J->FileID; ++J) + ++RegionCount; + // Start a new region sub-array. + encodeULEB128(RegionCount, OS); + + CurrentFileID = I->FileID; + PrevLineStart = 0; + } + Counter Count = Minimizer.adjust(I->Count); + switch (I->Kind) { + case CounterMappingRegion::CodeRegion: + case CounterMappingRegion::GapRegion: + writeCounter(MinExpressions, Count, OS); + break; + case CounterMappingRegion::ExpansionRegion: { + assert(Count.isZero()); + assert(I->ExpandedFileID <= + (std::numeric_limits<unsigned>::max() >> + Counter::EncodingCounterTagAndExpansionRegionTagBits)); + // Mark an expansion region with a set bit that follows the counter tag, + // and pack the expanded file id into the remaining bits. + unsigned EncodedTagExpandedFileID = + (1 << Counter::EncodingTagBits) | + (I->ExpandedFileID + << Counter::EncodingCounterTagAndExpansionRegionTagBits); + encodeULEB128(EncodedTagExpandedFileID, OS); + break; + } + case CounterMappingRegion::SkippedRegion: + assert(Count.isZero()); + encodeULEB128(unsigned(I->Kind) + << Counter::EncodingCounterTagAndExpansionRegionTagBits, + OS); + break; + } + assert(I->LineStart >= PrevLineStart); + encodeULEB128(I->LineStart - PrevLineStart, OS); + encodeULEB128(I->ColumnStart, OS); + assert(I->LineEnd >= I->LineStart); + encodeULEB128(I->LineEnd - I->LineStart, OS); + encodeULEB128(I->ColumnEnd, OS); + PrevLineStart = I->LineStart; + } + // Ensure that all file ids have at least one mapping region. + assert(CurrentFileID == (VirtualFileMapping.size() - 1)); +} diff --git a/contrib/llvm/lib/ProfileData/GCOV.cpp b/contrib/llvm/lib/ProfileData/GCOV.cpp new file mode 100644 index 000000000000..c9155439ec46 --- /dev/null +++ b/contrib/llvm/lib/ProfileData/GCOV.cpp @@ -0,0 +1,822 @@ +//===- GCOV.cpp - LLVM coverage tool --------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// GCOV implements the interface to read and write coverage files that use +// 'gcov' format. +// +//===----------------------------------------------------------------------===// + +#include "llvm/ProfileData/GCOV.h" +#include "llvm/ADT/STLExtras.h" +#include "llvm/Config/llvm-config.h" +#include "llvm/Support/Debug.h" +#include "llvm/Support/FileSystem.h" +#include "llvm/Support/Format.h" +#include "llvm/Support/Path.h" +#include "llvm/Support/raw_ostream.h" +#include <algorithm> +#include <system_error> + +using namespace llvm; + +//===----------------------------------------------------------------------===// +// GCOVFile implementation. + +/// readGCNO - Read GCNO buffer. +bool GCOVFile::readGCNO(GCOVBuffer &Buffer) { + if (!Buffer.readGCNOFormat()) + return false; + if (!Buffer.readGCOVVersion(Version)) + return false; + + if (!Buffer.readInt(Checksum)) + return false; + while (true) { + if (!Buffer.readFunctionTag()) + break; + auto GFun = make_unique<GCOVFunction>(*this); + if (!GFun->readGCNO(Buffer, Version)) + return false; + Functions.push_back(std::move(GFun)); + } + + GCNOInitialized = true; + return true; +} + +/// readGCDA - Read GCDA buffer. It is required that readGCDA() can only be +/// called after readGCNO(). +bool GCOVFile::readGCDA(GCOVBuffer &Buffer) { + assert(GCNOInitialized && "readGCDA() can only be called after readGCNO()"); + if (!Buffer.readGCDAFormat()) + return false; + GCOV::GCOVVersion GCDAVersion; + if (!Buffer.readGCOVVersion(GCDAVersion)) + return false; + if (Version != GCDAVersion) { + errs() << "GCOV versions do not match.\n"; + return false; + } + + uint32_t GCDAChecksum; + if (!Buffer.readInt(GCDAChecksum)) + return false; + if (Checksum != GCDAChecksum) { + errs() << "File checksums do not match: " << Checksum + << " != " << GCDAChecksum << ".\n"; + return false; + } + for (size_t i = 0, e = Functions.size(); i < e; ++i) { + if (!Buffer.readFunctionTag()) { + errs() << "Unexpected number of functions.\n"; + return false; + } + if (!Functions[i]->readGCDA(Buffer, Version)) + return false; + } + if (Buffer.readObjectTag()) { + uint32_t Length; + uint32_t Dummy; + if (!Buffer.readInt(Length)) + return false; + if (!Buffer.readInt(Dummy)) + return false; // checksum + if (!Buffer.readInt(Dummy)) + return false; // num + if (!Buffer.readInt(RunCount)) + return false; + Buffer.advanceCursor(Length - 3); + } + while (Buffer.readProgramTag()) { + uint32_t Length; + if (!Buffer.readInt(Length)) + return false; + Buffer.advanceCursor(Length); + ++ProgramCount; + } + + return true; +} + +void GCOVFile::print(raw_ostream &OS) const { + for (const auto &FPtr : Functions) + FPtr->print(OS); +} + +#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) +/// dump - Dump GCOVFile content to dbgs() for debugging purposes. +LLVM_DUMP_METHOD void GCOVFile::dump() const { + print(dbgs()); +} +#endif + +/// collectLineCounts - Collect line counts. This must be used after +/// reading .gcno and .gcda files. +void GCOVFile::collectLineCounts(FileInfo &FI) { + for (const auto &FPtr : Functions) + FPtr->collectLineCounts(FI); + FI.setRunCount(RunCount); + FI.setProgramCount(ProgramCount); +} + +//===----------------------------------------------------------------------===// +// GCOVFunction implementation. + +/// readGCNO - Read a function from the GCNO buffer. Return false if an error +/// occurs. +bool GCOVFunction::readGCNO(GCOVBuffer &Buff, GCOV::GCOVVersion Version) { + uint32_t Dummy; + if (!Buff.readInt(Dummy)) + return false; // Function header length + if (!Buff.readInt(Ident)) + return false; + if (!Buff.readInt(Checksum)) + return false; + if (Version != GCOV::V402) { + uint32_t CfgChecksum; + if (!Buff.readInt(CfgChecksum)) + return false; + if (Parent.getChecksum() != CfgChecksum) { + errs() << "File checksums do not match: " << Parent.getChecksum() + << " != " << CfgChecksum << " in (" << Name << ").\n"; + return false; + } + } + if (!Buff.readString(Name)) + return false; + if (!Buff.readString(Filename)) + return false; + if (!Buff.readInt(LineNumber)) + return false; + + // read blocks. + if (!Buff.readBlockTag()) { + errs() << "Block tag not found.\n"; + return false; + } + uint32_t BlockCount; + if (!Buff.readInt(BlockCount)) + return false; + for (uint32_t i = 0, e = BlockCount; i != e; ++i) { + if (!Buff.readInt(Dummy)) + return false; // Block flags; + Blocks.push_back(make_unique<GCOVBlock>(*this, i)); + } + + // read edges. + while (Buff.readEdgeTag()) { + uint32_t EdgeCount; + if (!Buff.readInt(EdgeCount)) + return false; + EdgeCount = (EdgeCount - 1) / 2; + uint32_t BlockNo; + if (!Buff.readInt(BlockNo)) + return false; + if (BlockNo >= BlockCount) { + errs() << "Unexpected block number: " << BlockNo << " (in " << Name + << ").\n"; + return false; + } + for (uint32_t i = 0, e = EdgeCount; i != e; ++i) { + uint32_t Dst; + if (!Buff.readInt(Dst)) + return false; + Edges.push_back(make_unique<GCOVEdge>(*Blocks[BlockNo], *Blocks[Dst])); + GCOVEdge *Edge = Edges.back().get(); + Blocks[BlockNo]->addDstEdge(Edge); + Blocks[Dst]->addSrcEdge(Edge); + if (!Buff.readInt(Dummy)) + return false; // Edge flag + } + } + + // read line table. + while (Buff.readLineTag()) { + uint32_t LineTableLength; + // Read the length of this line table. + if (!Buff.readInt(LineTableLength)) + return false; + uint32_t EndPos = Buff.getCursor() + LineTableLength * 4; + uint32_t BlockNo; + // Read the block number this table is associated with. + if (!Buff.readInt(BlockNo)) + return false; + if (BlockNo >= BlockCount) { + errs() << "Unexpected block number: " << BlockNo << " (in " << Name + << ").\n"; + return false; + } + GCOVBlock &Block = *Blocks[BlockNo]; + // Read the word that pads the beginning of the line table. This may be a + // flag of some sort, but seems to always be zero. + if (!Buff.readInt(Dummy)) + return false; + + // Line information starts here and continues up until the last word. + if (Buff.getCursor() != (EndPos - sizeof(uint32_t))) { + StringRef F; + // Read the source file name. + if (!Buff.readString(F)) + return false; + if (Filename != F) { + errs() << "Multiple sources for a single basic block: " << Filename + << " != " << F << " (in " << Name << ").\n"; + return false; + } + // Read lines up to, but not including, the null terminator. + while (Buff.getCursor() < (EndPos - 2 * sizeof(uint32_t))) { + uint32_t Line; + if (!Buff.readInt(Line)) + return false; + // Line 0 means this instruction was injected by the compiler. Skip it. + if (!Line) + continue; + Block.addLine(Line); + } + // Read the null terminator. + if (!Buff.readInt(Dummy)) + return false; + } + // The last word is either a flag or padding, it isn't clear which. Skip + // over it. + if (!Buff.readInt(Dummy)) + return false; + } + return true; +} + +/// readGCDA - Read a function from the GCDA buffer. Return false if an error +/// occurs. +bool GCOVFunction::readGCDA(GCOVBuffer &Buff, GCOV::GCOVVersion Version) { + uint32_t HeaderLength; + if (!Buff.readInt(HeaderLength)) + return false; // Function header length + + uint64_t EndPos = Buff.getCursor() + HeaderLength * sizeof(uint32_t); + + uint32_t GCDAIdent; + if (!Buff.readInt(GCDAIdent)) + return false; + if (Ident != GCDAIdent) { + errs() << "Function identifiers do not match: " << Ident + << " != " << GCDAIdent << " (in " << Name << ").\n"; + return false; + } + + uint32_t GCDAChecksum; + if (!Buff.readInt(GCDAChecksum)) + return false; + if (Checksum != GCDAChecksum) { + errs() << "Function checksums do not match: " << Checksum + << " != " << GCDAChecksum << " (in " << Name << ").\n"; + return false; + } + + uint32_t CfgChecksum; + if (Version != GCOV::V402) { + if (!Buff.readInt(CfgChecksum)) + return false; + if (Parent.getChecksum() != CfgChecksum) { + errs() << "File checksums do not match: " << Parent.getChecksum() + << " != " << CfgChecksum << " (in " << Name << ").\n"; + return false; + } + } + + if (Buff.getCursor() < EndPos) { + StringRef GCDAName; + if (!Buff.readString(GCDAName)) + return false; + if (Name != GCDAName) { + errs() << "Function names do not match: " << Name << " != " << GCDAName + << ".\n"; + return false; + } + } + + if (!Buff.readArcTag()) { + errs() << "Arc tag not found (in " << Name << ").\n"; + return false; + } + + uint32_t Count; + if (!Buff.readInt(Count)) + return false; + Count /= 2; + + // This for loop adds the counts for each block. A second nested loop is + // required to combine the edge counts that are contained in the GCDA file. + for (uint32_t BlockNo = 0; Count > 0; ++BlockNo) { + // The last block is always reserved for exit block + if (BlockNo >= Blocks.size()) { + errs() << "Unexpected number of edges (in " << Name << ").\n"; + return false; + } + if (BlockNo == Blocks.size() - 1) + errs() << "(" << Name << ") has arcs from exit block.\n"; + GCOVBlock &Block = *Blocks[BlockNo]; + for (size_t EdgeNo = 0, End = Block.getNumDstEdges(); EdgeNo < End; + ++EdgeNo) { + if (Count == 0) { + errs() << "Unexpected number of edges (in " << Name << ").\n"; + return false; + } + uint64_t ArcCount; + if (!Buff.readInt64(ArcCount)) + return false; + Block.addCount(EdgeNo, ArcCount); + --Count; + } + Block.sortDstEdges(); + } + return true; +} + +/// getEntryCount - Get the number of times the function was called by +/// retrieving the entry block's count. +uint64_t GCOVFunction::getEntryCount() const { + return Blocks.front()->getCount(); +} + +/// getExitCount - Get the number of times the function returned by retrieving +/// the exit block's count. +uint64_t GCOVFunction::getExitCount() const { + return Blocks.back()->getCount(); +} + +void GCOVFunction::print(raw_ostream &OS) const { + OS << "===== " << Name << " (" << Ident << ") @ " << Filename << ":" + << LineNumber << "\n"; + for (const auto &Block : Blocks) + Block->print(OS); +} + +#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) +/// dump - Dump GCOVFunction content to dbgs() for debugging purposes. +LLVM_DUMP_METHOD void GCOVFunction::dump() const { + print(dbgs()); +} +#endif + +/// collectLineCounts - Collect line counts. This must be used after +/// reading .gcno and .gcda files. +void GCOVFunction::collectLineCounts(FileInfo &FI) { + // If the line number is zero, this is a function that doesn't actually appear + // in the source file, so there isn't anything we can do with it. + if (LineNumber == 0) + return; + + for (const auto &Block : Blocks) + Block->collectLineCounts(FI); + FI.addFunctionLine(Filename, LineNumber, this); +} + +//===----------------------------------------------------------------------===// +// GCOVBlock implementation. + +/// ~GCOVBlock - Delete GCOVBlock and its content. +GCOVBlock::~GCOVBlock() { + SrcEdges.clear(); + DstEdges.clear(); + Lines.clear(); +} + +/// addCount - Add to block counter while storing the edge count. If the +/// destination has no outgoing edges, also update that block's count too. +void GCOVBlock::addCount(size_t DstEdgeNo, uint64_t N) { + assert(DstEdgeNo < DstEdges.size()); // up to caller to ensure EdgeNo is valid + DstEdges[DstEdgeNo]->Count = N; + Counter += N; + if (!DstEdges[DstEdgeNo]->Dst.getNumDstEdges()) + DstEdges[DstEdgeNo]->Dst.Counter += N; +} + +/// sortDstEdges - Sort destination edges by block number, nop if already +/// sorted. This is required for printing branch info in the correct order. +void GCOVBlock::sortDstEdges() { + if (!DstEdgesAreSorted) { + SortDstEdgesFunctor SortEdges; + std::stable_sort(DstEdges.begin(), DstEdges.end(), SortEdges); + } +} + +/// collectLineCounts - Collect line counts. This must be used after +/// reading .gcno and .gcda files. +void GCOVBlock::collectLineCounts(FileInfo &FI) { + for (uint32_t N : Lines) + FI.addBlockLine(Parent.getFilename(), N, this); +} + +void GCOVBlock::print(raw_ostream &OS) const { + OS << "Block : " << Number << " Counter : " << Counter << "\n"; + if (!SrcEdges.empty()) { + OS << "\tSource Edges : "; + for (const GCOVEdge *Edge : SrcEdges) + OS << Edge->Src.Number << " (" << Edge->Count << "), "; + OS << "\n"; + } + if (!DstEdges.empty()) { + OS << "\tDestination Edges : "; + for (const GCOVEdge *Edge : DstEdges) + OS << Edge->Dst.Number << " (" << Edge->Count << "), "; + OS << "\n"; + } + if (!Lines.empty()) { + OS << "\tLines : "; + for (uint32_t N : Lines) + OS << (N) << ","; + OS << "\n"; + } +} + +#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) +/// dump - Dump GCOVBlock content to dbgs() for debugging purposes. +LLVM_DUMP_METHOD void GCOVBlock::dump() const { + print(dbgs()); +} +#endif + +//===----------------------------------------------------------------------===// +// FileInfo implementation. + +// Safe integer division, returns 0 if numerator is 0. +static uint32_t safeDiv(uint64_t Numerator, uint64_t Divisor) { + if (!Numerator) + return 0; + return Numerator / Divisor; +} + +// This custom division function mimics gcov's branch ouputs: +// - Round to closest whole number +// - Only output 0% or 100% if it's exactly that value +static uint32_t branchDiv(uint64_t Numerator, uint64_t Divisor) { + if (!Numerator) + return 0; + if (Numerator == Divisor) + return 100; + + uint8_t Res = (Numerator * 100 + Divisor / 2) / Divisor; + if (Res == 0) + return 1; + if (Res == 100) + return 99; + return Res; +} + +namespace { +struct formatBranchInfo { + formatBranchInfo(const GCOV::Options &Options, uint64_t Count, uint64_t Total) + : Options(Options), Count(Count), Total(Total) {} + + void print(raw_ostream &OS) const { + if (!Total) + OS << "never executed"; + else if (Options.BranchCount) + OS << "taken " << Count; + else + OS << "taken " << branchDiv(Count, Total) << "%"; + } + + const GCOV::Options &Options; + uint64_t Count; + uint64_t Total; +}; + +static raw_ostream &operator<<(raw_ostream &OS, const formatBranchInfo &FBI) { + FBI.print(OS); + return OS; +} + +class LineConsumer { + std::unique_ptr<MemoryBuffer> Buffer; + StringRef Remaining; + +public: + LineConsumer(StringRef Filename) { + ErrorOr<std::unique_ptr<MemoryBuffer>> BufferOrErr = + MemoryBuffer::getFileOrSTDIN(Filename); + if (std::error_code EC = BufferOrErr.getError()) { + errs() << Filename << ": " << EC.message() << "\n"; + Remaining = ""; + } else { + Buffer = std::move(BufferOrErr.get()); + Remaining = Buffer->getBuffer(); + } + } + bool empty() { return Remaining.empty(); } + void printNext(raw_ostream &OS, uint32_t LineNum) { + StringRef Line; + if (empty()) + Line = "/*EOF*/"; + else + std::tie(Line, Remaining) = Remaining.split("\n"); + OS << format("%5u:", LineNum) << Line << "\n"; + } +}; +} // end anonymous namespace + +/// Convert a path to a gcov filename. If PreservePaths is true, this +/// translates "/" to "#", ".." to "^", and drops ".", to match gcov. +static std::string mangleCoveragePath(StringRef Filename, bool PreservePaths) { + if (!PreservePaths) + return sys::path::filename(Filename).str(); + + // This behaviour is defined by gcov in terms of text replacements, so it's + // not likely to do anything useful on filesystems with different textual + // conventions. + llvm::SmallString<256> Result(""); + StringRef::iterator I, S, E; + for (I = S = Filename.begin(), E = Filename.end(); I != E; ++I) { + if (*I != '/') + continue; + + if (I - S == 1 && *S == '.') { + // ".", the current directory, is skipped. + } else if (I - S == 2 && *S == '.' && *(S + 1) == '.') { + // "..", the parent directory, is replaced with "^". + Result.append("^#"); + } else { + if (S < I) + // Leave other components intact, + Result.append(S, I); + // And separate with "#". + Result.push_back('#'); + } + S = I + 1; + } + + if (S < I) + Result.append(S, I); + return Result.str(); +} + +std::string FileInfo::getCoveragePath(StringRef Filename, + StringRef MainFilename) { + if (Options.NoOutput) + // This is probably a bug in gcov, but when -n is specified, paths aren't + // mangled at all, and the -l and -p options are ignored. Here, we do the + // same. + return Filename; + + std::string CoveragePath; + if (Options.LongFileNames && !Filename.equals(MainFilename)) + CoveragePath = + mangleCoveragePath(MainFilename, Options.PreservePaths) + "##"; + CoveragePath += mangleCoveragePath(Filename, Options.PreservePaths) + ".gcov"; + return CoveragePath; +} + +std::unique_ptr<raw_ostream> +FileInfo::openCoveragePath(StringRef CoveragePath) { + if (Options.NoOutput) + return llvm::make_unique<raw_null_ostream>(); + + std::error_code EC; + auto OS = llvm::make_unique<raw_fd_ostream>(CoveragePath, EC, + sys::fs::F_Text); + if (EC) { + errs() << EC.message() << "\n"; + return llvm::make_unique<raw_null_ostream>(); + } + return std::move(OS); +} + +/// print - Print source files with collected line count information. +void FileInfo::print(raw_ostream &InfoOS, StringRef MainFilename, + StringRef GCNOFile, StringRef GCDAFile) { + SmallVector<StringRef, 4> Filenames; + for (const auto &LI : LineInfo) + Filenames.push_back(LI.first()); + llvm::sort(Filenames.begin(), Filenames.end()); + + for (StringRef Filename : Filenames) { + auto AllLines = LineConsumer(Filename); + + std::string CoveragePath = getCoveragePath(Filename, MainFilename); + std::unique_ptr<raw_ostream> CovStream = openCoveragePath(CoveragePath); + raw_ostream &CovOS = *CovStream; + + CovOS << " -: 0:Source:" << Filename << "\n"; + CovOS << " -: 0:Graph:" << GCNOFile << "\n"; + CovOS << " -: 0:Data:" << GCDAFile << "\n"; + CovOS << " -: 0:Runs:" << RunCount << "\n"; + CovOS << " -: 0:Programs:" << ProgramCount << "\n"; + + const LineData &Line = LineInfo[Filename]; + GCOVCoverage FileCoverage(Filename); + for (uint32_t LineIndex = 0; LineIndex < Line.LastLine || !AllLines.empty(); + ++LineIndex) { + if (Options.BranchInfo) { + FunctionLines::const_iterator FuncsIt = Line.Functions.find(LineIndex); + if (FuncsIt != Line.Functions.end()) + printFunctionSummary(CovOS, FuncsIt->second); + } + + BlockLines::const_iterator BlocksIt = Line.Blocks.find(LineIndex); + if (BlocksIt == Line.Blocks.end()) { + // No basic blocks are on this line. Not an executable line of code. + CovOS << " -:"; + AllLines.printNext(CovOS, LineIndex + 1); + } else { + const BlockVector &Blocks = BlocksIt->second; + + // Add up the block counts to form line counts. + DenseMap<const GCOVFunction *, bool> LineExecs; + uint64_t LineCount = 0; + for (const GCOVBlock *Block : Blocks) { + if (Options.AllBlocks) { + // Only take the highest block count for that line. + uint64_t BlockCount = Block->getCount(); + LineCount = LineCount > BlockCount ? LineCount : BlockCount; + } else { + // Sum up all of the block counts. + LineCount += Block->getCount(); + } + + if (Options.FuncCoverage) { + // This is a slightly convoluted way to most accurately gather line + // statistics for functions. Basically what is happening is that we + // don't want to count a single line with multiple blocks more than + // once. However, we also don't simply want to give the total line + // count to every function that starts on the line. Thus, what is + // happening here are two things: + // 1) Ensure that the number of logical lines is only incremented + // once per function. + // 2) If there are multiple blocks on the same line, ensure that the + // number of lines executed is incremented as long as at least + // one of the blocks are executed. + const GCOVFunction *Function = &Block->getParent(); + if (FuncCoverages.find(Function) == FuncCoverages.end()) { + std::pair<const GCOVFunction *, GCOVCoverage> KeyValue( + Function, GCOVCoverage(Function->getName())); + FuncCoverages.insert(KeyValue); + } + GCOVCoverage &FuncCoverage = FuncCoverages.find(Function)->second; + + if (LineExecs.find(Function) == LineExecs.end()) { + if (Block->getCount()) { + ++FuncCoverage.LinesExec; + LineExecs[Function] = true; + } else { + LineExecs[Function] = false; + } + ++FuncCoverage.LogicalLines; + } else if (!LineExecs[Function] && Block->getCount()) { + ++FuncCoverage.LinesExec; + LineExecs[Function] = true; + } + } + } + + if (LineCount == 0) + CovOS << " #####:"; + else { + CovOS << format("%9" PRIu64 ":", LineCount); + ++FileCoverage.LinesExec; + } + ++FileCoverage.LogicalLines; + + AllLines.printNext(CovOS, LineIndex + 1); + + uint32_t BlockNo = 0; + uint32_t EdgeNo = 0; + for (const GCOVBlock *Block : Blocks) { + // Only print block and branch information at the end of the block. + if (Block->getLastLine() != LineIndex + 1) + continue; + if (Options.AllBlocks) + printBlockInfo(CovOS, *Block, LineIndex, BlockNo); + if (Options.BranchInfo) { + size_t NumEdges = Block->getNumDstEdges(); + if (NumEdges > 1) + printBranchInfo(CovOS, *Block, FileCoverage, EdgeNo); + else if (Options.UncondBranch && NumEdges == 1) + printUncondBranchInfo(CovOS, EdgeNo, + (*Block->dst_begin())->Count); + } + } + } + } + FileCoverages.push_back(std::make_pair(CoveragePath, FileCoverage)); + } + + // FIXME: There is no way to detect calls given current instrumentation. + if (Options.FuncCoverage) + printFuncCoverage(InfoOS); + printFileCoverage(InfoOS); +} + +/// printFunctionSummary - Print function and block summary. +void FileInfo::printFunctionSummary(raw_ostream &OS, + const FunctionVector &Funcs) const { + for (const GCOVFunction *Func : Funcs) { + uint64_t EntryCount = Func->getEntryCount(); + uint32_t BlocksExec = 0; + for (const GCOVBlock &Block : Func->blocks()) + if (Block.getNumDstEdges() && Block.getCount()) + ++BlocksExec; + + OS << "function " << Func->getName() << " called " << EntryCount + << " returned " << safeDiv(Func->getExitCount() * 100, EntryCount) + << "% blocks executed " + << safeDiv(BlocksExec * 100, Func->getNumBlocks() - 1) << "%\n"; + } +} + +/// printBlockInfo - Output counts for each block. +void FileInfo::printBlockInfo(raw_ostream &OS, const GCOVBlock &Block, + uint32_t LineIndex, uint32_t &BlockNo) const { + if (Block.getCount() == 0) + OS << " $$$$$:"; + else + OS << format("%9" PRIu64 ":", Block.getCount()); + OS << format("%5u-block %2u\n", LineIndex + 1, BlockNo++); +} + +/// printBranchInfo - Print conditional branch probabilities. +void FileInfo::printBranchInfo(raw_ostream &OS, const GCOVBlock &Block, + GCOVCoverage &Coverage, uint32_t &EdgeNo) { + SmallVector<uint64_t, 16> BranchCounts; + uint64_t TotalCounts = 0; + for (const GCOVEdge *Edge : Block.dsts()) { + BranchCounts.push_back(Edge->Count); + TotalCounts += Edge->Count; + if (Block.getCount()) + ++Coverage.BranchesExec; + if (Edge->Count) + ++Coverage.BranchesTaken; + ++Coverage.Branches; + + if (Options.FuncCoverage) { + const GCOVFunction *Function = &Block.getParent(); + GCOVCoverage &FuncCoverage = FuncCoverages.find(Function)->second; + if (Block.getCount()) + ++FuncCoverage.BranchesExec; + if (Edge->Count) + ++FuncCoverage.BranchesTaken; + ++FuncCoverage.Branches; + } + } + + for (uint64_t N : BranchCounts) + OS << format("branch %2u ", EdgeNo++) + << formatBranchInfo(Options, N, TotalCounts) << "\n"; +} + +/// printUncondBranchInfo - Print unconditional branch probabilities. +void FileInfo::printUncondBranchInfo(raw_ostream &OS, uint32_t &EdgeNo, + uint64_t Count) const { + OS << format("unconditional %2u ", EdgeNo++) + << formatBranchInfo(Options, Count, Count) << "\n"; +} + +// printCoverage - Print generic coverage info used by both printFuncCoverage +// and printFileCoverage. +void FileInfo::printCoverage(raw_ostream &OS, + const GCOVCoverage &Coverage) const { + OS << format("Lines executed:%.2f%% of %u\n", + double(Coverage.LinesExec) * 100 / Coverage.LogicalLines, + Coverage.LogicalLines); + if (Options.BranchInfo) { + if (Coverage.Branches) { + OS << format("Branches executed:%.2f%% of %u\n", + double(Coverage.BranchesExec) * 100 / Coverage.Branches, + Coverage.Branches); + OS << format("Taken at least once:%.2f%% of %u\n", + double(Coverage.BranchesTaken) * 100 / Coverage.Branches, + Coverage.Branches); + } else { + OS << "No branches\n"; + } + OS << "No calls\n"; // to be consistent with gcov + } +} + +// printFuncCoverage - Print per-function coverage info. +void FileInfo::printFuncCoverage(raw_ostream &OS) const { + for (const auto &FC : FuncCoverages) { + const GCOVCoverage &Coverage = FC.second; + OS << "Function '" << Coverage.Name << "'\n"; + printCoverage(OS, Coverage); + OS << "\n"; + } +} + +// printFileCoverage - Print per-file coverage info. +void FileInfo::printFileCoverage(raw_ostream &OS) const { + for (const auto &FC : FileCoverages) { + const std::string &Filename = FC.first; + const GCOVCoverage &Coverage = FC.second; + OS << "File '" << Coverage.Name << "'\n"; + printCoverage(OS, Coverage); + if (!Options.NoOutput) + OS << Coverage.Name << ":creating '" << Filename << "'\n"; + OS << "\n"; + } +} diff --git a/contrib/llvm/lib/ProfileData/InstrProf.cpp b/contrib/llvm/lib/ProfileData/InstrProf.cpp new file mode 100644 index 000000000000..544a77ec20a5 --- /dev/null +++ b/contrib/llvm/lib/ProfileData/InstrProf.cpp @@ -0,0 +1,1014 @@ +//===- InstrProf.cpp - Instrumented profiling format support --------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file contains support for clang's instrumentation based PGO and +// coverage. +// +//===----------------------------------------------------------------------===// + +#include "llvm/ProfileData/InstrProf.h" +#include "llvm/ADT/ArrayRef.h" +#include "llvm/ADT/SmallString.h" +#include "llvm/ADT/SmallVector.h" +#include "llvm/ADT/StringExtras.h" +#include "llvm/ADT/StringRef.h" +#include "llvm/ADT/Triple.h" +#include "llvm/IR/Constant.h" +#include "llvm/IR/Constants.h" +#include "llvm/IR/Function.h" +#include "llvm/IR/GlobalValue.h" +#include "llvm/IR/GlobalVariable.h" +#include "llvm/IR/Instruction.h" +#include "llvm/IR/LLVMContext.h" +#include "llvm/IR/MDBuilder.h" +#include "llvm/IR/Metadata.h" +#include "llvm/IR/Module.h" +#include "llvm/IR/Type.h" +#include "llvm/Support/Casting.h" +#include "llvm/Support/CommandLine.h" +#include "llvm/Support/Compiler.h" +#include "llvm/Support/Compression.h" +#include "llvm/Support/Endian.h" +#include "llvm/Support/Error.h" +#include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/LEB128.h" +#include "llvm/Support/ManagedStatic.h" +#include "llvm/Support/MathExtras.h" +#include "llvm/Support/Path.h" +#include "llvm/Support/SwapByteOrder.h" +#include <algorithm> +#include <cassert> +#include <cstddef> +#include <cstdint> +#include <cstring> +#include <memory> +#include <string> +#include <system_error> +#include <utility> +#include <vector> + +using namespace llvm; + +static cl::opt<bool> StaticFuncFullModulePrefix( + "static-func-full-module-prefix", cl::init(true), cl::Hidden, + cl::desc("Use full module build paths in the profile counter names for " + "static functions.")); + +// This option is tailored to users that have different top-level directory in +// profile-gen and profile-use compilation. Users need to specific the number +// of levels to strip. A value larger than the number of directories in the +// source file will strip all the directory names and only leave the basename. +// +// Note current ThinLTO module importing for the indirect-calls assumes +// the source directory name not being stripped. A non-zero option value here +// can potentially prevent some inter-module indirect-call-promotions. +static cl::opt<unsigned> StaticFuncStripDirNamePrefix( + "static-func-strip-dirname-prefix", cl::init(0), cl::Hidden, + cl::desc("Strip specified level of directory name from source path in " + "the profile counter name for static functions.")); + +static std::string getInstrProfErrString(instrprof_error Err) { + switch (Err) { + case instrprof_error::success: + return "Success"; + case instrprof_error::eof: + return "End of File"; + case instrprof_error::unrecognized_format: + return "Unrecognized instrumentation profile encoding format"; + case instrprof_error::bad_magic: + return "Invalid instrumentation profile data (bad magic)"; + case instrprof_error::bad_header: + return "Invalid instrumentation profile data (file header is corrupt)"; + case instrprof_error::unsupported_version: + return "Unsupported instrumentation profile format version"; + case instrprof_error::unsupported_hash_type: + return "Unsupported instrumentation profile hash type"; + case instrprof_error::too_large: + return "Too much profile data"; + case instrprof_error::truncated: + return "Truncated profile data"; + case instrprof_error::malformed: + return "Malformed instrumentation profile data"; + case instrprof_error::unknown_function: + return "No profile data available for function"; + case instrprof_error::hash_mismatch: + return "Function control flow change detected (hash mismatch)"; + case instrprof_error::count_mismatch: + return "Function basic block count change detected (counter mismatch)"; + case instrprof_error::counter_overflow: + return "Counter overflow"; + case instrprof_error::value_site_count_mismatch: + return "Function value site count change detected (counter mismatch)"; + case instrprof_error::compress_failed: + return "Failed to compress data (zlib)"; + case instrprof_error::uncompress_failed: + return "Failed to uncompress data (zlib)"; + case instrprof_error::empty_raw_profile: + return "Empty raw profile file"; + case instrprof_error::zlib_unavailable: + return "Profile uses zlib compression but the profile reader was built without zlib support"; + } + llvm_unreachable("A value of instrprof_error has no message."); +} + +namespace { + +// FIXME: This class is only here to support the transition to llvm::Error. It +// will be removed once this transition is complete. Clients should prefer to +// deal with the Error value directly, rather than converting to error_code. +class InstrProfErrorCategoryType : public std::error_category { + const char *name() const noexcept override { return "llvm.instrprof"; } + + std::string message(int IE) const override { + return getInstrProfErrString(static_cast<instrprof_error>(IE)); + } +}; + +} // end anonymous namespace + +static ManagedStatic<InstrProfErrorCategoryType> ErrorCategory; + +const std::error_category &llvm::instrprof_category() { + return *ErrorCategory; +} + +namespace { + +const char *InstrProfSectNameCommon[] = { +#define INSTR_PROF_SECT_ENTRY(Kind, SectNameCommon, SectNameCoff, Prefix) \ + SectNameCommon, +#include "llvm/ProfileData/InstrProfData.inc" +}; + +const char *InstrProfSectNameCoff[] = { +#define INSTR_PROF_SECT_ENTRY(Kind, SectNameCommon, SectNameCoff, Prefix) \ + SectNameCoff, +#include "llvm/ProfileData/InstrProfData.inc" +}; + +const char *InstrProfSectNamePrefix[] = { +#define INSTR_PROF_SECT_ENTRY(Kind, SectNameCommon, SectNameCoff, Prefix) \ + Prefix, +#include "llvm/ProfileData/InstrProfData.inc" +}; + +} // namespace + +namespace llvm { + +std::string getInstrProfSectionName(InstrProfSectKind IPSK, + Triple::ObjectFormatType OF, + bool AddSegmentInfo) { + std::string SectName; + + if (OF == Triple::MachO && AddSegmentInfo) + SectName = InstrProfSectNamePrefix[IPSK]; + + if (OF == Triple::COFF) + SectName += InstrProfSectNameCoff[IPSK]; + else + SectName += InstrProfSectNameCommon[IPSK]; + + if (OF == Triple::MachO && IPSK == IPSK_data && AddSegmentInfo) + SectName += ",regular,live_support"; + + return SectName; +} + +void SoftInstrProfErrors::addError(instrprof_error IE) { + if (IE == instrprof_error::success) + return; + + if (FirstError == instrprof_error::success) + FirstError = IE; + + switch (IE) { + case instrprof_error::hash_mismatch: + ++NumHashMismatches; + break; + case instrprof_error::count_mismatch: + ++NumCountMismatches; + break; + case instrprof_error::counter_overflow: + ++NumCounterOverflows; + break; + case instrprof_error::value_site_count_mismatch: + ++NumValueSiteCountMismatches; + break; + default: + llvm_unreachable("Not a soft error"); + } +} + +std::string InstrProfError::message() const { + return getInstrProfErrString(Err); +} + +char InstrProfError::ID = 0; + +std::string getPGOFuncName(StringRef RawFuncName, + GlobalValue::LinkageTypes Linkage, + StringRef FileName, + uint64_t Version LLVM_ATTRIBUTE_UNUSED) { + return GlobalValue::getGlobalIdentifier(RawFuncName, Linkage, FileName); +} + +// Strip NumPrefix level of directory name from PathNameStr. If the number of +// directory separators is less than NumPrefix, strip all the directories and +// leave base file name only. +static StringRef stripDirPrefix(StringRef PathNameStr, uint32_t NumPrefix) { + uint32_t Count = NumPrefix; + uint32_t Pos = 0, LastPos = 0; + for (auto & CI : PathNameStr) { + ++Pos; + if (llvm::sys::path::is_separator(CI)) { + LastPos = Pos; + --Count; + } + if (Count == 0) + break; + } + return PathNameStr.substr(LastPos); +} + +// Return the PGOFuncName. This function has some special handling when called +// in LTO optimization. The following only applies when calling in LTO passes +// (when \c InLTO is true): LTO's internalization privatizes many global linkage +// symbols. This happens after value profile annotation, but those internal +// linkage functions should not have a source prefix. +// Additionally, for ThinLTO mode, exported internal functions are promoted +// and renamed. We need to ensure that the original internal PGO name is +// used when computing the GUID that is compared against the profiled GUIDs. +// To differentiate compiler generated internal symbols from original ones, +// PGOFuncName meta data are created and attached to the original internal +// symbols in the value profile annotation step +// (PGOUseFunc::annotateIndirectCallSites). If a symbol does not have the meta +// data, its original linkage must be non-internal. +std::string getPGOFuncName(const Function &F, bool InLTO, uint64_t Version) { + if (!InLTO) { + StringRef FileName = (StaticFuncFullModulePrefix + ? F.getParent()->getName() + : sys::path::filename(F.getParent()->getName())); + if (StaticFuncFullModulePrefix && StaticFuncStripDirNamePrefix != 0) + FileName = stripDirPrefix(FileName, StaticFuncStripDirNamePrefix); + return getPGOFuncName(F.getName(), F.getLinkage(), FileName, Version); + } + + // In LTO mode (when InLTO is true), first check if there is a meta data. + if (MDNode *MD = getPGOFuncNameMetadata(F)) { + StringRef S = cast<MDString>(MD->getOperand(0))->getString(); + return S.str(); + } + + // If there is no meta data, the function must be a global before the value + // profile annotation pass. Its current linkage may be internal if it is + // internalized in LTO mode. + return getPGOFuncName(F.getName(), GlobalValue::ExternalLinkage, ""); +} + +StringRef getFuncNameWithoutPrefix(StringRef PGOFuncName, StringRef FileName) { + if (FileName.empty()) + return PGOFuncName; + // Drop the file name including ':'. See also getPGOFuncName. + if (PGOFuncName.startswith(FileName)) + PGOFuncName = PGOFuncName.drop_front(FileName.size() + 1); + return PGOFuncName; +} + +// \p FuncName is the string used as profile lookup key for the function. A +// symbol is created to hold the name. Return the legalized symbol name. +std::string getPGOFuncNameVarName(StringRef FuncName, + GlobalValue::LinkageTypes Linkage) { + std::string VarName = getInstrProfNameVarPrefix(); + VarName += FuncName; + + if (!GlobalValue::isLocalLinkage(Linkage)) + return VarName; + + // Now fix up illegal chars in local VarName that may upset the assembler. + const char *InvalidChars = "-:<>/\"'"; + size_t found = VarName.find_first_of(InvalidChars); + while (found != std::string::npos) { + VarName[found] = '_'; + found = VarName.find_first_of(InvalidChars, found + 1); + } + return VarName; +} + +GlobalVariable *createPGOFuncNameVar(Module &M, + GlobalValue::LinkageTypes Linkage, + StringRef PGOFuncName) { + // We generally want to match the function's linkage, but available_externally + // and extern_weak both have the wrong semantics, and anything that doesn't + // need to link across compilation units doesn't need to be visible at all. + if (Linkage == GlobalValue::ExternalWeakLinkage) + Linkage = GlobalValue::LinkOnceAnyLinkage; + else if (Linkage == GlobalValue::AvailableExternallyLinkage) + Linkage = GlobalValue::LinkOnceODRLinkage; + else if (Linkage == GlobalValue::InternalLinkage || + Linkage == GlobalValue::ExternalLinkage) + Linkage = GlobalValue::PrivateLinkage; + + auto *Value = + ConstantDataArray::getString(M.getContext(), PGOFuncName, false); + auto FuncNameVar = + new GlobalVariable(M, Value->getType(), true, Linkage, Value, + getPGOFuncNameVarName(PGOFuncName, Linkage)); + + // Hide the symbol so that we correctly get a copy for each executable. + if (!GlobalValue::isLocalLinkage(FuncNameVar->getLinkage())) + FuncNameVar->setVisibility(GlobalValue::HiddenVisibility); + + return FuncNameVar; +} + +GlobalVariable *createPGOFuncNameVar(Function &F, StringRef PGOFuncName) { + return createPGOFuncNameVar(*F.getParent(), F.getLinkage(), PGOFuncName); +} + +Error InstrProfSymtab::create(Module &M, bool InLTO) { + for (Function &F : M) { + // Function may not have a name: like using asm("") to overwrite the name. + // Ignore in this case. + if (!F.hasName()) + continue; + const std::string &PGOFuncName = getPGOFuncName(F, InLTO); + if (Error E = addFuncName(PGOFuncName)) + return E; + MD5FuncMap.emplace_back(Function::getGUID(PGOFuncName), &F); + // In ThinLTO, local function may have been promoted to global and have + // suffix added to the function name. We need to add the stripped function + // name to the symbol table so that we can find a match from profile. + if (InLTO) { + auto pos = PGOFuncName.find('.'); + if (pos != std::string::npos) { + const std::string &OtherFuncName = PGOFuncName.substr(0, pos); + if (Error E = addFuncName(OtherFuncName)) + return E; + MD5FuncMap.emplace_back(Function::getGUID(OtherFuncName), &F); + } + } + } + Sorted = false; + finalizeSymtab(); + return Error::success(); +} + +uint64_t InstrProfSymtab::getFunctionHashFromAddress(uint64_t Address) { + finalizeSymtab(); + auto Result = + std::lower_bound(AddrToMD5Map.begin(), AddrToMD5Map.end(), Address, + [](const std::pair<uint64_t, uint64_t> &LHS, + uint64_t RHS) { return LHS.first < RHS; }); + // Raw function pointer collected by value profiler may be from + // external functions that are not instrumented. They won't have + // mapping data to be used by the deserializer. Force the value to + // be 0 in this case. + if (Result != AddrToMD5Map.end() && Result->first == Address) + return (uint64_t)Result->second; + return 0; +} + +Error collectPGOFuncNameStrings(ArrayRef<std::string> NameStrs, + bool doCompression, std::string &Result) { + assert(!NameStrs.empty() && "No name data to emit"); + + uint8_t Header[16], *P = Header; + std::string UncompressedNameStrings = + join(NameStrs.begin(), NameStrs.end(), getInstrProfNameSeparator()); + + assert(StringRef(UncompressedNameStrings) + .count(getInstrProfNameSeparator()) == (NameStrs.size() - 1) && + "PGO name is invalid (contains separator token)"); + + unsigned EncLen = encodeULEB128(UncompressedNameStrings.length(), P); + P += EncLen; + + auto WriteStringToResult = [&](size_t CompressedLen, StringRef InputStr) { + EncLen = encodeULEB128(CompressedLen, P); + P += EncLen; + char *HeaderStr = reinterpret_cast<char *>(&Header[0]); + unsigned HeaderLen = P - &Header[0]; + Result.append(HeaderStr, HeaderLen); + Result += InputStr; + return Error::success(); + }; + + if (!doCompression) { + return WriteStringToResult(0, UncompressedNameStrings); + } + + SmallString<128> CompressedNameStrings; + Error E = zlib::compress(StringRef(UncompressedNameStrings), + CompressedNameStrings, zlib::BestSizeCompression); + if (E) { + consumeError(std::move(E)); + return make_error<InstrProfError>(instrprof_error::compress_failed); + } + + return WriteStringToResult(CompressedNameStrings.size(), + CompressedNameStrings); +} + +StringRef getPGOFuncNameVarInitializer(GlobalVariable *NameVar) { + auto *Arr = cast<ConstantDataArray>(NameVar->getInitializer()); + StringRef NameStr = + Arr->isCString() ? Arr->getAsCString() : Arr->getAsString(); + return NameStr; +} + +Error collectPGOFuncNameStrings(ArrayRef<GlobalVariable *> NameVars, + std::string &Result, bool doCompression) { + std::vector<std::string> NameStrs; + for (auto *NameVar : NameVars) { + NameStrs.push_back(getPGOFuncNameVarInitializer(NameVar)); + } + return collectPGOFuncNameStrings( + NameStrs, zlib::isAvailable() && doCompression, Result); +} + +Error readPGOFuncNameStrings(StringRef NameStrings, InstrProfSymtab &Symtab) { + const uint8_t *P = reinterpret_cast<const uint8_t *>(NameStrings.data()); + const uint8_t *EndP = reinterpret_cast<const uint8_t *>(NameStrings.data() + + NameStrings.size()); + while (P < EndP) { + uint32_t N; + uint64_t UncompressedSize = decodeULEB128(P, &N); + P += N; + uint64_t CompressedSize = decodeULEB128(P, &N); + P += N; + bool isCompressed = (CompressedSize != 0); + SmallString<128> UncompressedNameStrings; + StringRef NameStrings; + if (isCompressed) { + if (!llvm::zlib::isAvailable()) + return make_error<InstrProfError>(instrprof_error::zlib_unavailable); + + StringRef CompressedNameStrings(reinterpret_cast<const char *>(P), + CompressedSize); + if (Error E = + zlib::uncompress(CompressedNameStrings, UncompressedNameStrings, + UncompressedSize)) { + consumeError(std::move(E)); + return make_error<InstrProfError>(instrprof_error::uncompress_failed); + } + P += CompressedSize; + NameStrings = StringRef(UncompressedNameStrings.data(), + UncompressedNameStrings.size()); + } else { + NameStrings = + StringRef(reinterpret_cast<const char *>(P), UncompressedSize); + P += UncompressedSize; + } + // Now parse the name strings. + SmallVector<StringRef, 0> Names; + NameStrings.split(Names, getInstrProfNameSeparator()); + for (StringRef &Name : Names) + if (Error E = Symtab.addFuncName(Name)) + return E; + + while (P < EndP && *P == 0) + P++; + } + return Error::success(); +} + +void InstrProfValueSiteRecord::merge(InstrProfValueSiteRecord &Input, + uint64_t Weight, + function_ref<void(instrprof_error)> Warn) { + this->sortByTargetValues(); + Input.sortByTargetValues(); + auto I = ValueData.begin(); + auto IE = ValueData.end(); + for (auto J = Input.ValueData.begin(), JE = Input.ValueData.end(); J != JE; + ++J) { + while (I != IE && I->Value < J->Value) + ++I; + if (I != IE && I->Value == J->Value) { + bool Overflowed; + I->Count = SaturatingMultiplyAdd(J->Count, Weight, I->Count, &Overflowed); + if (Overflowed) + Warn(instrprof_error::counter_overflow); + ++I; + continue; + } + ValueData.insert(I, *J); + } +} + +void InstrProfValueSiteRecord::scale(uint64_t Weight, + function_ref<void(instrprof_error)> Warn) { + for (auto I = ValueData.begin(), IE = ValueData.end(); I != IE; ++I) { + bool Overflowed; + I->Count = SaturatingMultiply(I->Count, Weight, &Overflowed); + if (Overflowed) + Warn(instrprof_error::counter_overflow); + } +} + +// Merge Value Profile data from Src record to this record for ValueKind. +// Scale merged value counts by \p Weight. +void InstrProfRecord::mergeValueProfData( + uint32_t ValueKind, InstrProfRecord &Src, uint64_t Weight, + function_ref<void(instrprof_error)> Warn) { + uint32_t ThisNumValueSites = getNumValueSites(ValueKind); + uint32_t OtherNumValueSites = Src.getNumValueSites(ValueKind); + if (ThisNumValueSites != OtherNumValueSites) { + Warn(instrprof_error::value_site_count_mismatch); + return; + } + if (!ThisNumValueSites) + return; + std::vector<InstrProfValueSiteRecord> &ThisSiteRecords = + getOrCreateValueSitesForKind(ValueKind); + MutableArrayRef<InstrProfValueSiteRecord> OtherSiteRecords = + Src.getValueSitesForKind(ValueKind); + for (uint32_t I = 0; I < ThisNumValueSites; I++) + ThisSiteRecords[I].merge(OtherSiteRecords[I], Weight, Warn); +} + +void InstrProfRecord::merge(InstrProfRecord &Other, uint64_t Weight, + function_ref<void(instrprof_error)> Warn) { + // If the number of counters doesn't match we either have bad data + // or a hash collision. + if (Counts.size() != Other.Counts.size()) { + Warn(instrprof_error::count_mismatch); + return; + } + + for (size_t I = 0, E = Other.Counts.size(); I < E; ++I) { + bool Overflowed; + Counts[I] = + SaturatingMultiplyAdd(Other.Counts[I], Weight, Counts[I], &Overflowed); + if (Overflowed) + Warn(instrprof_error::counter_overflow); + } + + for (uint32_t Kind = IPVK_First; Kind <= IPVK_Last; ++Kind) + mergeValueProfData(Kind, Other, Weight, Warn); +} + +void InstrProfRecord::scaleValueProfData( + uint32_t ValueKind, uint64_t Weight, + function_ref<void(instrprof_error)> Warn) { + for (auto &R : getValueSitesForKind(ValueKind)) + R.scale(Weight, Warn); +} + +void InstrProfRecord::scale(uint64_t Weight, + function_ref<void(instrprof_error)> Warn) { + for (auto &Count : this->Counts) { + bool Overflowed; + Count = SaturatingMultiply(Count, Weight, &Overflowed); + if (Overflowed) + Warn(instrprof_error::counter_overflow); + } + for (uint32_t Kind = IPVK_First; Kind <= IPVK_Last; ++Kind) + scaleValueProfData(Kind, Weight, Warn); +} + +// Map indirect call target name hash to name string. +uint64_t InstrProfRecord::remapValue(uint64_t Value, uint32_t ValueKind, + InstrProfSymtab *SymTab) { + if (!SymTab) + return Value; + + if (ValueKind == IPVK_IndirectCallTarget) + return SymTab->getFunctionHashFromAddress(Value); + + return Value; +} + +void InstrProfRecord::addValueData(uint32_t ValueKind, uint32_t Site, + InstrProfValueData *VData, uint32_t N, + InstrProfSymtab *ValueMap) { + for (uint32_t I = 0; I < N; I++) { + VData[I].Value = remapValue(VData[I].Value, ValueKind, ValueMap); + } + std::vector<InstrProfValueSiteRecord> &ValueSites = + getOrCreateValueSitesForKind(ValueKind); + if (N == 0) + ValueSites.emplace_back(); + else + ValueSites.emplace_back(VData, VData + N); +} + +#define INSTR_PROF_COMMON_API_IMPL +#include "llvm/ProfileData/InstrProfData.inc" + +/*! + * ValueProfRecordClosure Interface implementation for InstrProfRecord + * class. These C wrappers are used as adaptors so that C++ code can be + * invoked as callbacks. + */ +uint32_t getNumValueKindsInstrProf(const void *Record) { + return reinterpret_cast<const InstrProfRecord *>(Record)->getNumValueKinds(); +} + +uint32_t getNumValueSitesInstrProf(const void *Record, uint32_t VKind) { + return reinterpret_cast<const InstrProfRecord *>(Record) + ->getNumValueSites(VKind); +} + +uint32_t getNumValueDataInstrProf(const void *Record, uint32_t VKind) { + return reinterpret_cast<const InstrProfRecord *>(Record) + ->getNumValueData(VKind); +} + +uint32_t getNumValueDataForSiteInstrProf(const void *R, uint32_t VK, + uint32_t S) { + return reinterpret_cast<const InstrProfRecord *>(R) + ->getNumValueDataForSite(VK, S); +} + +void getValueForSiteInstrProf(const void *R, InstrProfValueData *Dst, + uint32_t K, uint32_t S) { + reinterpret_cast<const InstrProfRecord *>(R)->getValueForSite(Dst, K, S); +} + +ValueProfData *allocValueProfDataInstrProf(size_t TotalSizeInBytes) { + ValueProfData *VD = + (ValueProfData *)(new (::operator new(TotalSizeInBytes)) ValueProfData()); + memset(VD, 0, TotalSizeInBytes); + return VD; +} + +static ValueProfRecordClosure InstrProfRecordClosure = { + nullptr, + getNumValueKindsInstrProf, + getNumValueSitesInstrProf, + getNumValueDataInstrProf, + getNumValueDataForSiteInstrProf, + nullptr, + getValueForSiteInstrProf, + allocValueProfDataInstrProf}; + +// Wrapper implementation using the closure mechanism. +uint32_t ValueProfData::getSize(const InstrProfRecord &Record) { + auto Closure = InstrProfRecordClosure; + Closure.Record = &Record; + return getValueProfDataSize(&Closure); +} + +// Wrapper implementation using the closure mechanism. +std::unique_ptr<ValueProfData> +ValueProfData::serializeFrom(const InstrProfRecord &Record) { + InstrProfRecordClosure.Record = &Record; + + std::unique_ptr<ValueProfData> VPD( + serializeValueProfDataFrom(&InstrProfRecordClosure, nullptr)); + return VPD; +} + +void ValueProfRecord::deserializeTo(InstrProfRecord &Record, + InstrProfSymtab *SymTab) { + Record.reserveSites(Kind, NumValueSites); + + InstrProfValueData *ValueData = getValueProfRecordValueData(this); + for (uint64_t VSite = 0; VSite < NumValueSites; ++VSite) { + uint8_t ValueDataCount = this->SiteCountArray[VSite]; + Record.addValueData(Kind, VSite, ValueData, ValueDataCount, SymTab); + ValueData += ValueDataCount; + } +} + +// For writing/serializing, Old is the host endianness, and New is +// byte order intended on disk. For Reading/deserialization, Old +// is the on-disk source endianness, and New is the host endianness. +void ValueProfRecord::swapBytes(support::endianness Old, + support::endianness New) { + using namespace support; + + if (Old == New) + return; + + if (getHostEndianness() != Old) { + sys::swapByteOrder<uint32_t>(NumValueSites); + sys::swapByteOrder<uint32_t>(Kind); + } + uint32_t ND = getValueProfRecordNumValueData(this); + InstrProfValueData *VD = getValueProfRecordValueData(this); + + // No need to swap byte array: SiteCountArrray. + for (uint32_t I = 0; I < ND; I++) { + sys::swapByteOrder<uint64_t>(VD[I].Value); + sys::swapByteOrder<uint64_t>(VD[I].Count); + } + if (getHostEndianness() == Old) { + sys::swapByteOrder<uint32_t>(NumValueSites); + sys::swapByteOrder<uint32_t>(Kind); + } +} + +void ValueProfData::deserializeTo(InstrProfRecord &Record, + InstrProfSymtab *SymTab) { + if (NumValueKinds == 0) + return; + + ValueProfRecord *VR = getFirstValueProfRecord(this); + for (uint32_t K = 0; K < NumValueKinds; K++) { + VR->deserializeTo(Record, SymTab); + VR = getValueProfRecordNext(VR); + } +} + +template <class T> +static T swapToHostOrder(const unsigned char *&D, support::endianness Orig) { + using namespace support; + + if (Orig == little) + return endian::readNext<T, little, unaligned>(D); + else + return endian::readNext<T, big, unaligned>(D); +} + +static std::unique_ptr<ValueProfData> allocValueProfData(uint32_t TotalSize) { + return std::unique_ptr<ValueProfData>(new (::operator new(TotalSize)) + ValueProfData()); +} + +Error ValueProfData::checkIntegrity() { + if (NumValueKinds > IPVK_Last + 1) + return make_error<InstrProfError>(instrprof_error::malformed); + // Total size needs to be mulltiple of quadword size. + if (TotalSize % sizeof(uint64_t)) + return make_error<InstrProfError>(instrprof_error::malformed); + + ValueProfRecord *VR = getFirstValueProfRecord(this); + for (uint32_t K = 0; K < this->NumValueKinds; K++) { + if (VR->Kind > IPVK_Last) + return make_error<InstrProfError>(instrprof_error::malformed); + VR = getValueProfRecordNext(VR); + if ((char *)VR - (char *)this > (ptrdiff_t)TotalSize) + return make_error<InstrProfError>(instrprof_error::malformed); + } + return Error::success(); +} + +Expected<std::unique_ptr<ValueProfData>> +ValueProfData::getValueProfData(const unsigned char *D, + const unsigned char *const BufferEnd, + support::endianness Endianness) { + using namespace support; + + if (D + sizeof(ValueProfData) > BufferEnd) + return make_error<InstrProfError>(instrprof_error::truncated); + + const unsigned char *Header = D; + uint32_t TotalSize = swapToHostOrder<uint32_t>(Header, Endianness); + if (D + TotalSize > BufferEnd) + return make_error<InstrProfError>(instrprof_error::too_large); + + std::unique_ptr<ValueProfData> VPD = allocValueProfData(TotalSize); + memcpy(VPD.get(), D, TotalSize); + // Byte swap. + VPD->swapBytesToHost(Endianness); + + Error E = VPD->checkIntegrity(); + if (E) + return std::move(E); + + return std::move(VPD); +} + +void ValueProfData::swapBytesToHost(support::endianness Endianness) { + using namespace support; + + if (Endianness == getHostEndianness()) + return; + + sys::swapByteOrder<uint32_t>(TotalSize); + sys::swapByteOrder<uint32_t>(NumValueKinds); + + ValueProfRecord *VR = getFirstValueProfRecord(this); + for (uint32_t K = 0; K < NumValueKinds; K++) { + VR->swapBytes(Endianness, getHostEndianness()); + VR = getValueProfRecordNext(VR); + } +} + +void ValueProfData::swapBytesFromHost(support::endianness Endianness) { + using namespace support; + + if (Endianness == getHostEndianness()) + return; + + ValueProfRecord *VR = getFirstValueProfRecord(this); + for (uint32_t K = 0; K < NumValueKinds; K++) { + ValueProfRecord *NVR = getValueProfRecordNext(VR); + VR->swapBytes(getHostEndianness(), Endianness); + VR = NVR; + } + sys::swapByteOrder<uint32_t>(TotalSize); + sys::swapByteOrder<uint32_t>(NumValueKinds); +} + +void annotateValueSite(Module &M, Instruction &Inst, + const InstrProfRecord &InstrProfR, + InstrProfValueKind ValueKind, uint32_t SiteIdx, + uint32_t MaxMDCount) { + uint32_t NV = InstrProfR.getNumValueDataForSite(ValueKind, SiteIdx); + if (!NV) + return; + + uint64_t Sum = 0; + std::unique_ptr<InstrProfValueData[]> VD = + InstrProfR.getValueForSite(ValueKind, SiteIdx, &Sum); + + ArrayRef<InstrProfValueData> VDs(VD.get(), NV); + annotateValueSite(M, Inst, VDs, Sum, ValueKind, MaxMDCount); +} + +void annotateValueSite(Module &M, Instruction &Inst, + ArrayRef<InstrProfValueData> VDs, + uint64_t Sum, InstrProfValueKind ValueKind, + uint32_t MaxMDCount) { + LLVMContext &Ctx = M.getContext(); + MDBuilder MDHelper(Ctx); + SmallVector<Metadata *, 3> Vals; + // Tag + Vals.push_back(MDHelper.createString("VP")); + // Value Kind + Vals.push_back(MDHelper.createConstant( + ConstantInt::get(Type::getInt32Ty(Ctx), ValueKind))); + // Total Count + Vals.push_back( + MDHelper.createConstant(ConstantInt::get(Type::getInt64Ty(Ctx), Sum))); + + // Value Profile Data + uint32_t MDCount = MaxMDCount; + for (auto &VD : VDs) { + Vals.push_back(MDHelper.createConstant( + ConstantInt::get(Type::getInt64Ty(Ctx), VD.Value))); + Vals.push_back(MDHelper.createConstant( + ConstantInt::get(Type::getInt64Ty(Ctx), VD.Count))); + if (--MDCount == 0) + break; + } + Inst.setMetadata(LLVMContext::MD_prof, MDNode::get(Ctx, Vals)); +} + +bool getValueProfDataFromInst(const Instruction &Inst, + InstrProfValueKind ValueKind, + uint32_t MaxNumValueData, + InstrProfValueData ValueData[], + uint32_t &ActualNumValueData, uint64_t &TotalC) { + MDNode *MD = Inst.getMetadata(LLVMContext::MD_prof); + if (!MD) + return false; + + unsigned NOps = MD->getNumOperands(); + + if (NOps < 5) + return false; + + // Operand 0 is a string tag "VP": + MDString *Tag = cast<MDString>(MD->getOperand(0)); + if (!Tag) + return false; + + if (!Tag->getString().equals("VP")) + return false; + + // Now check kind: + ConstantInt *KindInt = mdconst::dyn_extract<ConstantInt>(MD->getOperand(1)); + if (!KindInt) + return false; + if (KindInt->getZExtValue() != ValueKind) + return false; + + // Get total count + ConstantInt *TotalCInt = mdconst::dyn_extract<ConstantInt>(MD->getOperand(2)); + if (!TotalCInt) + return false; + TotalC = TotalCInt->getZExtValue(); + + ActualNumValueData = 0; + + for (unsigned I = 3; I < NOps; I += 2) { + if (ActualNumValueData >= MaxNumValueData) + break; + ConstantInt *Value = mdconst::dyn_extract<ConstantInt>(MD->getOperand(I)); + ConstantInt *Count = + mdconst::dyn_extract<ConstantInt>(MD->getOperand(I + 1)); + if (!Value || !Count) + return false; + ValueData[ActualNumValueData].Value = Value->getZExtValue(); + ValueData[ActualNumValueData].Count = Count->getZExtValue(); + ActualNumValueData++; + } + return true; +} + +MDNode *getPGOFuncNameMetadata(const Function &F) { + return F.getMetadata(getPGOFuncNameMetadataName()); +} + +void createPGOFuncNameMetadata(Function &F, StringRef PGOFuncName) { + // Only for internal linkage functions. + if (PGOFuncName == F.getName()) + return; + // Don't create duplicated meta-data. + if (getPGOFuncNameMetadata(F)) + return; + LLVMContext &C = F.getContext(); + MDNode *N = MDNode::get(C, MDString::get(C, PGOFuncName)); + F.setMetadata(getPGOFuncNameMetadataName(), N); +} + +bool needsComdatForCounter(const Function &F, const Module &M) { + if (F.hasComdat()) + return true; + + if (!Triple(M.getTargetTriple()).supportsCOMDAT()) + return false; + + // See createPGOFuncNameVar for more details. To avoid link errors, profile + // counters for function with available_externally linkage needs to be changed + // to linkonce linkage. On ELF based systems, this leads to weak symbols to be + // created. Without using comdat, duplicate entries won't be removed by the + // linker leading to increased data segement size and raw profile size. Even + // worse, since the referenced counter from profile per-function data object + // will be resolved to the common strong definition, the profile counts for + // available_externally functions will end up being duplicated in raw profile + // data. This can result in distorted profile as the counts of those dups + // will be accumulated by the profile merger. + GlobalValue::LinkageTypes Linkage = F.getLinkage(); + if (Linkage != GlobalValue::ExternalWeakLinkage && + Linkage != GlobalValue::AvailableExternallyLinkage) + return false; + + return true; +} + +// Check if INSTR_PROF_RAW_VERSION_VAR is defined. +bool isIRPGOFlagSet(const Module *M) { + auto IRInstrVar = + M->getNamedGlobal(INSTR_PROF_QUOTE(INSTR_PROF_RAW_VERSION_VAR)); + if (!IRInstrVar || IRInstrVar->isDeclaration() || + IRInstrVar->hasLocalLinkage()) + return false; + + // Check if the flag is set. + if (!IRInstrVar->hasInitializer()) + return false; + + const Constant *InitVal = IRInstrVar->getInitializer(); + if (!InitVal) + return false; + + return (dyn_cast<ConstantInt>(InitVal)->getZExtValue() & + VARIANT_MASK_IR_PROF) != 0; +} + +// Check if we can safely rename this Comdat function. +bool canRenameComdatFunc(const Function &F, bool CheckAddressTaken) { + if (F.getName().empty()) + return false; + if (!needsComdatForCounter(F, *(F.getParent()))) + return false; + // Unsafe to rename the address-taken function (which can be used in + // function comparison). + if (CheckAddressTaken && F.hasAddressTaken()) + return false; + // Only safe to do if this function may be discarded if it is not used + // in the compilation unit. + if (!GlobalValue::isDiscardableIfUnused(F.getLinkage())) + return false; + + // For AvailableExternallyLinkage functions. + if (!F.hasComdat()) { + assert(F.getLinkage() == GlobalValue::AvailableExternallyLinkage); + return true; + } + return true; +} + +// Parse the value profile options. +void getMemOPSizeRangeFromOption(StringRef MemOPSizeRange, int64_t &RangeStart, + int64_t &RangeLast) { + static const int64_t DefaultMemOPSizeRangeStart = 0; + static const int64_t DefaultMemOPSizeRangeLast = 8; + RangeStart = DefaultMemOPSizeRangeStart; + RangeLast = DefaultMemOPSizeRangeLast; + + if (!MemOPSizeRange.empty()) { + auto Pos = MemOPSizeRange.find(':'); + if (Pos != std::string::npos) { + if (Pos > 0) + MemOPSizeRange.substr(0, Pos).getAsInteger(10, RangeStart); + if (Pos < MemOPSizeRange.size() - 1) + MemOPSizeRange.substr(Pos + 1).getAsInteger(10, RangeLast); + } else + MemOPSizeRange.getAsInteger(10, RangeLast); + } + assert(RangeLast >= RangeStart); +} + +} // end namespace llvm diff --git a/contrib/llvm/lib/ProfileData/InstrProfReader.cpp b/contrib/llvm/lib/ProfileData/InstrProfReader.cpp new file mode 100644 index 000000000000..3b704158a5c5 --- /dev/null +++ b/contrib/llvm/lib/ProfileData/InstrProfReader.cpp @@ -0,0 +1,747 @@ +//===- InstrProfReader.cpp - Instrumented profiling reader ----------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file contains support for reading profiling data for clang's +// instrumentation based PGO and coverage. +// +//===----------------------------------------------------------------------===// + +#include "llvm/ProfileData/InstrProfReader.h" +#include "llvm/ADT/ArrayRef.h" +#include "llvm/ADT/STLExtras.h" +#include "llvm/ADT/StringRef.h" +#include "llvm/IR/ProfileSummary.h" +#include "llvm/ProfileData/InstrProf.h" +#include "llvm/ProfileData/ProfileCommon.h" +#include "llvm/Support/Endian.h" +#include "llvm/Support/Error.h" +#include "llvm/Support/ErrorOr.h" +#include "llvm/Support/MemoryBuffer.h" +#include "llvm/Support/SwapByteOrder.h" +#include <algorithm> +#include <cctype> +#include <cstddef> +#include <cstdint> +#include <limits> +#include <memory> +#include <system_error> +#include <utility> +#include <vector> + +using namespace llvm; + +static Expected<std::unique_ptr<MemoryBuffer>> +setupMemoryBuffer(const Twine &Path) { + ErrorOr<std::unique_ptr<MemoryBuffer>> BufferOrErr = + MemoryBuffer::getFileOrSTDIN(Path); + if (std::error_code EC = BufferOrErr.getError()) + return errorCodeToError(EC); + return std::move(BufferOrErr.get()); +} + +static Error initializeReader(InstrProfReader &Reader) { + return Reader.readHeader(); +} + +Expected<std::unique_ptr<InstrProfReader>> +InstrProfReader::create(const Twine &Path) { + // Set up the buffer to read. + auto BufferOrError = setupMemoryBuffer(Path); + if (Error E = BufferOrError.takeError()) + return std::move(E); + return InstrProfReader::create(std::move(BufferOrError.get())); +} + +Expected<std::unique_ptr<InstrProfReader>> +InstrProfReader::create(std::unique_ptr<MemoryBuffer> Buffer) { + // Sanity check the buffer. + if (uint64_t(Buffer->getBufferSize()) > std::numeric_limits<unsigned>::max()) + return make_error<InstrProfError>(instrprof_error::too_large); + + if (Buffer->getBufferSize() == 0) + return make_error<InstrProfError>(instrprof_error::empty_raw_profile); + + std::unique_ptr<InstrProfReader> Result; + // Create the reader. + if (IndexedInstrProfReader::hasFormat(*Buffer)) + Result.reset(new IndexedInstrProfReader(std::move(Buffer))); + else if (RawInstrProfReader64::hasFormat(*Buffer)) + Result.reset(new RawInstrProfReader64(std::move(Buffer))); + else if (RawInstrProfReader32::hasFormat(*Buffer)) + Result.reset(new RawInstrProfReader32(std::move(Buffer))); + else if (TextInstrProfReader::hasFormat(*Buffer)) + Result.reset(new TextInstrProfReader(std::move(Buffer))); + else + return make_error<InstrProfError>(instrprof_error::unrecognized_format); + + // Initialize the reader and return the result. + if (Error E = initializeReader(*Result)) + return std::move(E); + + return std::move(Result); +} + +Expected<std::unique_ptr<IndexedInstrProfReader>> +IndexedInstrProfReader::create(const Twine &Path) { + // Set up the buffer to read. + auto BufferOrError = setupMemoryBuffer(Path); + if (Error E = BufferOrError.takeError()) + return std::move(E); + return IndexedInstrProfReader::create(std::move(BufferOrError.get())); +} + +Expected<std::unique_ptr<IndexedInstrProfReader>> +IndexedInstrProfReader::create(std::unique_ptr<MemoryBuffer> Buffer) { + // Sanity check the buffer. + if (uint64_t(Buffer->getBufferSize()) > std::numeric_limits<unsigned>::max()) + return make_error<InstrProfError>(instrprof_error::too_large); + + // Create the reader. + if (!IndexedInstrProfReader::hasFormat(*Buffer)) + return make_error<InstrProfError>(instrprof_error::bad_magic); + auto Result = llvm::make_unique<IndexedInstrProfReader>(std::move(Buffer)); + + // Initialize the reader and return the result. + if (Error E = initializeReader(*Result)) + return std::move(E); + + return std::move(Result); +} + +void InstrProfIterator::Increment() { + if (auto E = Reader->readNextRecord(Record)) { + // Handle errors in the reader. + InstrProfError::take(std::move(E)); + *this = InstrProfIterator(); + } +} + +bool TextInstrProfReader::hasFormat(const MemoryBuffer &Buffer) { + // Verify that this really looks like plain ASCII text by checking a + // 'reasonable' number of characters (up to profile magic size). + size_t count = std::min(Buffer.getBufferSize(), sizeof(uint64_t)); + StringRef buffer = Buffer.getBufferStart(); + return count == 0 || + std::all_of(buffer.begin(), buffer.begin() + count, + [](char c) { return isPrint(c) || ::isspace(c); }); +} + +// Read the profile variant flag from the header: ":FE" means this is a FE +// generated profile. ":IR" means this is an IR level profile. Other strings +// with a leading ':' will be reported an error format. +Error TextInstrProfReader::readHeader() { + Symtab.reset(new InstrProfSymtab()); + bool IsIRInstr = false; + if (!Line->startswith(":")) { + IsIRLevelProfile = false; + return success(); + } + StringRef Str = (Line)->substr(1); + if (Str.equals_lower("ir")) + IsIRInstr = true; + else if (Str.equals_lower("fe")) + IsIRInstr = false; + else + return error(instrprof_error::bad_header); + + ++Line; + IsIRLevelProfile = IsIRInstr; + return success(); +} + +Error +TextInstrProfReader::readValueProfileData(InstrProfRecord &Record) { + +#define CHECK_LINE_END(Line) \ + if (Line.is_at_end()) \ + return error(instrprof_error::truncated); +#define READ_NUM(Str, Dst) \ + if ((Str).getAsInteger(10, (Dst))) \ + return error(instrprof_error::malformed); +#define VP_READ_ADVANCE(Val) \ + CHECK_LINE_END(Line); \ + uint32_t Val; \ + READ_NUM((*Line), (Val)); \ + Line++; + + if (Line.is_at_end()) + return success(); + + uint32_t NumValueKinds; + if (Line->getAsInteger(10, NumValueKinds)) { + // No value profile data + return success(); + } + if (NumValueKinds == 0 || NumValueKinds > IPVK_Last + 1) + return error(instrprof_error::malformed); + Line++; + + for (uint32_t VK = 0; VK < NumValueKinds; VK++) { + VP_READ_ADVANCE(ValueKind); + if (ValueKind > IPVK_Last) + return error(instrprof_error::malformed); + VP_READ_ADVANCE(NumValueSites); + if (!NumValueSites) + continue; + + Record.reserveSites(VK, NumValueSites); + for (uint32_t S = 0; S < NumValueSites; S++) { + VP_READ_ADVANCE(NumValueData); + + std::vector<InstrProfValueData> CurrentValues; + for (uint32_t V = 0; V < NumValueData; V++) { + CHECK_LINE_END(Line); + std::pair<StringRef, StringRef> VD = Line->rsplit(':'); + uint64_t TakenCount, Value; + if (ValueKind == IPVK_IndirectCallTarget) { + if (InstrProfSymtab::isExternalSymbol(VD.first)) { + Value = 0; + } else { + if (Error E = Symtab->addFuncName(VD.first)) + return E; + Value = IndexedInstrProf::ComputeHash(VD.first); + } + } else { + READ_NUM(VD.first, Value); + } + READ_NUM(VD.second, TakenCount); + CurrentValues.push_back({Value, TakenCount}); + Line++; + } + Record.addValueData(ValueKind, S, CurrentValues.data(), NumValueData, + nullptr); + } + } + return success(); + +#undef CHECK_LINE_END +#undef READ_NUM +#undef VP_READ_ADVANCE +} + +Error TextInstrProfReader::readNextRecord(NamedInstrProfRecord &Record) { + // Skip empty lines and comments. + while (!Line.is_at_end() && (Line->empty() || Line->startswith("#"))) + ++Line; + // If we hit EOF while looking for a name, we're done. + if (Line.is_at_end()) { + return error(instrprof_error::eof); + } + + // Read the function name. + Record.Name = *Line++; + if (Error E = Symtab->addFuncName(Record.Name)) + return error(std::move(E)); + + // Read the function hash. + if (Line.is_at_end()) + return error(instrprof_error::truncated); + if ((Line++)->getAsInteger(0, Record.Hash)) + return error(instrprof_error::malformed); + + // Read the number of counters. + uint64_t NumCounters; + if (Line.is_at_end()) + return error(instrprof_error::truncated); + if ((Line++)->getAsInteger(10, NumCounters)) + return error(instrprof_error::malformed); + if (NumCounters == 0) + return error(instrprof_error::malformed); + + // Read each counter and fill our internal storage with the values. + Record.Clear(); + Record.Counts.reserve(NumCounters); + for (uint64_t I = 0; I < NumCounters; ++I) { + if (Line.is_at_end()) + return error(instrprof_error::truncated); + uint64_t Count; + if ((Line++)->getAsInteger(10, Count)) + return error(instrprof_error::malformed); + Record.Counts.push_back(Count); + } + + // Check if value profile data exists and read it if so. + if (Error E = readValueProfileData(Record)) + return error(std::move(E)); + + return success(); +} + +template <class IntPtrT> +bool RawInstrProfReader<IntPtrT>::hasFormat(const MemoryBuffer &DataBuffer) { + if (DataBuffer.getBufferSize() < sizeof(uint64_t)) + return false; + uint64_t Magic = + *reinterpret_cast<const uint64_t *>(DataBuffer.getBufferStart()); + return RawInstrProf::getMagic<IntPtrT>() == Magic || + sys::getSwappedBytes(RawInstrProf::getMagic<IntPtrT>()) == Magic; +} + +template <class IntPtrT> +Error RawInstrProfReader<IntPtrT>::readHeader() { + if (!hasFormat(*DataBuffer)) + return error(instrprof_error::bad_magic); + if (DataBuffer->getBufferSize() < sizeof(RawInstrProf::Header)) + return error(instrprof_error::bad_header); + auto *Header = reinterpret_cast<const RawInstrProf::Header *>( + DataBuffer->getBufferStart()); + ShouldSwapBytes = Header->Magic != RawInstrProf::getMagic<IntPtrT>(); + return readHeader(*Header); +} + +template <class IntPtrT> +Error RawInstrProfReader<IntPtrT>::readNextHeader(const char *CurrentPos) { + const char *End = DataBuffer->getBufferEnd(); + // Skip zero padding between profiles. + while (CurrentPos != End && *CurrentPos == 0) + ++CurrentPos; + // If there's nothing left, we're done. + if (CurrentPos == End) + return make_error<InstrProfError>(instrprof_error::eof); + // If there isn't enough space for another header, this is probably just + // garbage at the end of the file. + if (CurrentPos + sizeof(RawInstrProf::Header) > End) + return make_error<InstrProfError>(instrprof_error::malformed); + // The writer ensures each profile is padded to start at an aligned address. + if (reinterpret_cast<size_t>(CurrentPos) % alignof(uint64_t)) + return make_error<InstrProfError>(instrprof_error::malformed); + // The magic should have the same byte order as in the previous header. + uint64_t Magic = *reinterpret_cast<const uint64_t *>(CurrentPos); + if (Magic != swap(RawInstrProf::getMagic<IntPtrT>())) + return make_error<InstrProfError>(instrprof_error::bad_magic); + + // There's another profile to read, so we need to process the header. + auto *Header = reinterpret_cast<const RawInstrProf::Header *>(CurrentPos); + return readHeader(*Header); +} + +template <class IntPtrT> +Error RawInstrProfReader<IntPtrT>::createSymtab(InstrProfSymtab &Symtab) { + if (Error E = Symtab.create(StringRef(NamesStart, NamesSize))) + return error(std::move(E)); + for (const RawInstrProf::ProfileData<IntPtrT> *I = Data; I != DataEnd; ++I) { + const IntPtrT FPtr = swap(I->FunctionPointer); + if (!FPtr) + continue; + Symtab.mapAddress(FPtr, I->NameRef); + } + return success(); +} + +template <class IntPtrT> +Error RawInstrProfReader<IntPtrT>::readHeader( + const RawInstrProf::Header &Header) { + Version = swap(Header.Version); + if (GET_VERSION(Version) != RawInstrProf::Version) + return error(instrprof_error::unsupported_version); + + CountersDelta = swap(Header.CountersDelta); + NamesDelta = swap(Header.NamesDelta); + auto DataSize = swap(Header.DataSize); + auto CountersSize = swap(Header.CountersSize); + NamesSize = swap(Header.NamesSize); + ValueKindLast = swap(Header.ValueKindLast); + + auto DataSizeInBytes = DataSize * sizeof(RawInstrProf::ProfileData<IntPtrT>); + auto PaddingSize = getNumPaddingBytes(NamesSize); + + ptrdiff_t DataOffset = sizeof(RawInstrProf::Header); + ptrdiff_t CountersOffset = DataOffset + DataSizeInBytes; + ptrdiff_t NamesOffset = CountersOffset + sizeof(uint64_t) * CountersSize; + ptrdiff_t ValueDataOffset = NamesOffset + NamesSize + PaddingSize; + + auto *Start = reinterpret_cast<const char *>(&Header); + if (Start + ValueDataOffset > DataBuffer->getBufferEnd()) + return error(instrprof_error::bad_header); + + Data = reinterpret_cast<const RawInstrProf::ProfileData<IntPtrT> *>( + Start + DataOffset); + DataEnd = Data + DataSize; + CountersStart = reinterpret_cast<const uint64_t *>(Start + CountersOffset); + NamesStart = Start + NamesOffset; + ValueDataStart = reinterpret_cast<const uint8_t *>(Start + ValueDataOffset); + + std::unique_ptr<InstrProfSymtab> NewSymtab = make_unique<InstrProfSymtab>(); + if (Error E = createSymtab(*NewSymtab.get())) + return E; + + Symtab = std::move(NewSymtab); + return success(); +} + +template <class IntPtrT> +Error RawInstrProfReader<IntPtrT>::readName(NamedInstrProfRecord &Record) { + Record.Name = getName(Data->NameRef); + return success(); +} + +template <class IntPtrT> +Error RawInstrProfReader<IntPtrT>::readFuncHash(NamedInstrProfRecord &Record) { + Record.Hash = swap(Data->FuncHash); + return success(); +} + +template <class IntPtrT> +Error RawInstrProfReader<IntPtrT>::readRawCounts( + InstrProfRecord &Record) { + uint32_t NumCounters = swap(Data->NumCounters); + IntPtrT CounterPtr = Data->CounterPtr; + if (NumCounters == 0) + return error(instrprof_error::malformed); + + auto RawCounts = makeArrayRef(getCounter(CounterPtr), NumCounters); + auto *NamesStartAsCounter = reinterpret_cast<const uint64_t *>(NamesStart); + + // Check bounds. + if (RawCounts.data() < CountersStart || + RawCounts.data() + RawCounts.size() > NamesStartAsCounter) + return error(instrprof_error::malformed); + + if (ShouldSwapBytes) { + Record.Counts.clear(); + Record.Counts.reserve(RawCounts.size()); + for (uint64_t Count : RawCounts) + Record.Counts.push_back(swap(Count)); + } else + Record.Counts = RawCounts; + + return success(); +} + +template <class IntPtrT> +Error RawInstrProfReader<IntPtrT>::readValueProfilingData( + InstrProfRecord &Record) { + Record.clearValueData(); + CurValueDataSize = 0; + // Need to match the logic in value profile dumper code in compiler-rt: + uint32_t NumValueKinds = 0; + for (uint32_t I = 0; I < IPVK_Last + 1; I++) + NumValueKinds += (Data->NumValueSites[I] != 0); + + if (!NumValueKinds) + return success(); + + Expected<std::unique_ptr<ValueProfData>> VDataPtrOrErr = + ValueProfData::getValueProfData( + ValueDataStart, (const unsigned char *)DataBuffer->getBufferEnd(), + getDataEndianness()); + + if (Error E = VDataPtrOrErr.takeError()) + return E; + + // Note that besides deserialization, this also performs the conversion for + // indirect call targets. The function pointers from the raw profile are + // remapped into function name hashes. + VDataPtrOrErr.get()->deserializeTo(Record, Symtab.get()); + CurValueDataSize = VDataPtrOrErr.get()->getSize(); + return success(); +} + +template <class IntPtrT> +Error RawInstrProfReader<IntPtrT>::readNextRecord(NamedInstrProfRecord &Record) { + if (atEnd()) + // At this point, ValueDataStart field points to the next header. + if (Error E = readNextHeader(getNextHeaderPos())) + return error(std::move(E)); + + // Read name ad set it in Record. + if (Error E = readName(Record)) + return error(std::move(E)); + + // Read FuncHash and set it in Record. + if (Error E = readFuncHash(Record)) + return error(std::move(E)); + + // Read raw counts and set Record. + if (Error E = readRawCounts(Record)) + return error(std::move(E)); + + // Read value data and set Record. + if (Error E = readValueProfilingData(Record)) + return error(std::move(E)); + + // Iterate. + advanceData(); + return success(); +} + +namespace llvm { + +template class RawInstrProfReader<uint32_t>; +template class RawInstrProfReader<uint64_t>; + +} // end namespace llvm + +InstrProfLookupTrait::hash_value_type +InstrProfLookupTrait::ComputeHash(StringRef K) { + return IndexedInstrProf::ComputeHash(HashType, K); +} + +using data_type = InstrProfLookupTrait::data_type; +using offset_type = InstrProfLookupTrait::offset_type; + +bool InstrProfLookupTrait::readValueProfilingData( + const unsigned char *&D, const unsigned char *const End) { + Expected<std::unique_ptr<ValueProfData>> VDataPtrOrErr = + ValueProfData::getValueProfData(D, End, ValueProfDataEndianness); + + if (VDataPtrOrErr.takeError()) + return false; + + VDataPtrOrErr.get()->deserializeTo(DataBuffer.back(), nullptr); + D += VDataPtrOrErr.get()->TotalSize; + + return true; +} + +data_type InstrProfLookupTrait::ReadData(StringRef K, const unsigned char *D, + offset_type N) { + using namespace support; + + // Check if the data is corrupt. If so, don't try to read it. + if (N % sizeof(uint64_t)) + return data_type(); + + DataBuffer.clear(); + std::vector<uint64_t> CounterBuffer; + + const unsigned char *End = D + N; + while (D < End) { + // Read hash. + if (D + sizeof(uint64_t) >= End) + return data_type(); + uint64_t Hash = endian::readNext<uint64_t, little, unaligned>(D); + + // Initialize number of counters for GET_VERSION(FormatVersion) == 1. + uint64_t CountsSize = N / sizeof(uint64_t) - 1; + // If format version is different then read the number of counters. + if (GET_VERSION(FormatVersion) != IndexedInstrProf::ProfVersion::Version1) { + if (D + sizeof(uint64_t) > End) + return data_type(); + CountsSize = endian::readNext<uint64_t, little, unaligned>(D); + } + // Read counter values. + if (D + CountsSize * sizeof(uint64_t) > End) + return data_type(); + + CounterBuffer.clear(); + CounterBuffer.reserve(CountsSize); + for (uint64_t J = 0; J < CountsSize; ++J) + CounterBuffer.push_back(endian::readNext<uint64_t, little, unaligned>(D)); + + DataBuffer.emplace_back(K, Hash, std::move(CounterBuffer)); + + // Read value profiling data. + if (GET_VERSION(FormatVersion) > IndexedInstrProf::ProfVersion::Version2 && + !readValueProfilingData(D, End)) { + DataBuffer.clear(); + return data_type(); + } + } + return DataBuffer; +} + +template <typename HashTableImpl> +Error InstrProfReaderIndex<HashTableImpl>::getRecords( + StringRef FuncName, ArrayRef<NamedInstrProfRecord> &Data) { + auto Iter = HashTable->find(FuncName); + if (Iter == HashTable->end()) + return make_error<InstrProfError>(instrprof_error::unknown_function); + + Data = (*Iter); + if (Data.empty()) + return make_error<InstrProfError>(instrprof_error::malformed); + + return Error::success(); +} + +template <typename HashTableImpl> +Error InstrProfReaderIndex<HashTableImpl>::getRecords( + ArrayRef<NamedInstrProfRecord> &Data) { + if (atEnd()) + return make_error<InstrProfError>(instrprof_error::eof); + + Data = *RecordIterator; + + if (Data.empty()) + return make_error<InstrProfError>(instrprof_error::malformed); + + return Error::success(); +} + +template <typename HashTableImpl> +InstrProfReaderIndex<HashTableImpl>::InstrProfReaderIndex( + const unsigned char *Buckets, const unsigned char *const Payload, + const unsigned char *const Base, IndexedInstrProf::HashT HashType, + uint64_t Version) { + FormatVersion = Version; + HashTable.reset(HashTableImpl::Create( + Buckets, Payload, Base, + typename HashTableImpl::InfoType(HashType, Version))); + RecordIterator = HashTable->data_begin(); +} + +bool IndexedInstrProfReader::hasFormat(const MemoryBuffer &DataBuffer) { + using namespace support; + + if (DataBuffer.getBufferSize() < 8) + return false; + uint64_t Magic = + endian::read<uint64_t, little, aligned>(DataBuffer.getBufferStart()); + // Verify that it's magical. + return Magic == IndexedInstrProf::Magic; +} + +const unsigned char * +IndexedInstrProfReader::readSummary(IndexedInstrProf::ProfVersion Version, + const unsigned char *Cur) { + using namespace IndexedInstrProf; + using namespace support; + + if (Version >= IndexedInstrProf::Version4) { + const IndexedInstrProf::Summary *SummaryInLE = + reinterpret_cast<const IndexedInstrProf::Summary *>(Cur); + uint64_t NFields = + endian::byte_swap<uint64_t, little>(SummaryInLE->NumSummaryFields); + uint64_t NEntries = + endian::byte_swap<uint64_t, little>(SummaryInLE->NumCutoffEntries); + uint32_t SummarySize = + IndexedInstrProf::Summary::getSize(NFields, NEntries); + std::unique_ptr<IndexedInstrProf::Summary> SummaryData = + IndexedInstrProf::allocSummary(SummarySize); + + const uint64_t *Src = reinterpret_cast<const uint64_t *>(SummaryInLE); + uint64_t *Dst = reinterpret_cast<uint64_t *>(SummaryData.get()); + for (unsigned I = 0; I < SummarySize / sizeof(uint64_t); I++) + Dst[I] = endian::byte_swap<uint64_t, little>(Src[I]); + + SummaryEntryVector DetailedSummary; + for (unsigned I = 0; I < SummaryData->NumCutoffEntries; I++) { + const IndexedInstrProf::Summary::Entry &Ent = SummaryData->getEntry(I); + DetailedSummary.emplace_back((uint32_t)Ent.Cutoff, Ent.MinBlockCount, + Ent.NumBlocks); + } + // initialize InstrProfSummary using the SummaryData from disk. + this->Summary = llvm::make_unique<ProfileSummary>( + ProfileSummary::PSK_Instr, DetailedSummary, + SummaryData->get(Summary::TotalBlockCount), + SummaryData->get(Summary::MaxBlockCount), + SummaryData->get(Summary::MaxInternalBlockCount), + SummaryData->get(Summary::MaxFunctionCount), + SummaryData->get(Summary::TotalNumBlocks), + SummaryData->get(Summary::TotalNumFunctions)); + return Cur + SummarySize; + } else { + // For older version of profile data, we need to compute on the fly: + using namespace IndexedInstrProf; + + InstrProfSummaryBuilder Builder(ProfileSummaryBuilder::DefaultCutoffs); + // FIXME: This only computes an empty summary. Need to call addRecord for + // all NamedInstrProfRecords to get the correct summary. + this->Summary = Builder.getSummary(); + return Cur; + } +} + +Error IndexedInstrProfReader::readHeader() { + using namespace support; + + const unsigned char *Start = + (const unsigned char *)DataBuffer->getBufferStart(); + const unsigned char *Cur = Start; + if ((const unsigned char *)DataBuffer->getBufferEnd() - Cur < 24) + return error(instrprof_error::truncated); + + auto *Header = reinterpret_cast<const IndexedInstrProf::Header *>(Cur); + Cur += sizeof(IndexedInstrProf::Header); + + // Check the magic number. + uint64_t Magic = endian::byte_swap<uint64_t, little>(Header->Magic); + if (Magic != IndexedInstrProf::Magic) + return error(instrprof_error::bad_magic); + + // Read the version. + uint64_t FormatVersion = endian::byte_swap<uint64_t, little>(Header->Version); + if (GET_VERSION(FormatVersion) > + IndexedInstrProf::ProfVersion::CurrentVersion) + return error(instrprof_error::unsupported_version); + + Cur = readSummary((IndexedInstrProf::ProfVersion)FormatVersion, Cur); + + // Read the hash type and start offset. + IndexedInstrProf::HashT HashType = static_cast<IndexedInstrProf::HashT>( + endian::byte_swap<uint64_t, little>(Header->HashType)); + if (HashType > IndexedInstrProf::HashT::Last) + return error(instrprof_error::unsupported_hash_type); + + uint64_t HashOffset = endian::byte_swap<uint64_t, little>(Header->HashOffset); + + // The rest of the file is an on disk hash table. + InstrProfReaderIndexBase *IndexPtr = nullptr; + IndexPtr = new InstrProfReaderIndex<OnDiskHashTableImplV3>( + Start + HashOffset, Cur, Start, HashType, FormatVersion); + Index.reset(IndexPtr); + return success(); +} + +InstrProfSymtab &IndexedInstrProfReader::getSymtab() { + if (Symtab.get()) + return *Symtab.get(); + + std::unique_ptr<InstrProfSymtab> NewSymtab = make_unique<InstrProfSymtab>(); + if (Error E = Index->populateSymtab(*NewSymtab.get())) { + consumeError(error(InstrProfError::take(std::move(E)))); + } + + Symtab = std::move(NewSymtab); + return *Symtab.get(); +} + +Expected<InstrProfRecord> +IndexedInstrProfReader::getInstrProfRecord(StringRef FuncName, + uint64_t FuncHash) { + ArrayRef<NamedInstrProfRecord> Data; + Error Err = Index->getRecords(FuncName, Data); + if (Err) + return std::move(Err); + // Found it. Look for counters with the right hash. + for (unsigned I = 0, E = Data.size(); I < E; ++I) { + // Check for a match and fill the vector if there is one. + if (Data[I].Hash == FuncHash) { + return std::move(Data[I]); + } + } + return error(instrprof_error::hash_mismatch); +} + +Error IndexedInstrProfReader::getFunctionCounts(StringRef FuncName, + uint64_t FuncHash, + std::vector<uint64_t> &Counts) { + Expected<InstrProfRecord> Record = getInstrProfRecord(FuncName, FuncHash); + if (Error E = Record.takeError()) + return error(std::move(E)); + + Counts = Record.get().Counts; + return success(); +} + +Error IndexedInstrProfReader::readNextRecord(NamedInstrProfRecord &Record) { + ArrayRef<NamedInstrProfRecord> Data; + + Error E = Index->getRecords(Data); + if (E) + return error(std::move(E)); + + Record = Data[RecordIndex++]; + if (RecordIndex >= Data.size()) { + Index->advanceToNextKey(); + RecordIndex = 0; + } + return success(); +} diff --git a/contrib/llvm/lib/ProfileData/InstrProfWriter.cpp b/contrib/llvm/lib/ProfileData/InstrProfWriter.cpp new file mode 100644 index 000000000000..18b9deec158f --- /dev/null +++ b/contrib/llvm/lib/ProfileData/InstrProfWriter.cpp @@ -0,0 +1,390 @@ +//===- InstrProfWriter.cpp - Instrumented profiling writer ----------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file contains support for writing profiling data for clang's +// instrumentation based PGO and coverage. +// +//===----------------------------------------------------------------------===// + +#include "llvm/ProfileData/InstrProfWriter.h" +#include "llvm/ADT/STLExtras.h" +#include "llvm/ADT/StringRef.h" +#include "llvm/IR/ProfileSummary.h" +#include "llvm/ProfileData/InstrProf.h" +#include "llvm/ProfileData/ProfileCommon.h" +#include "llvm/Support/Endian.h" +#include "llvm/Support/EndianStream.h" +#include "llvm/Support/Error.h" +#include "llvm/Support/MemoryBuffer.h" +#include "llvm/Support/OnDiskHashTable.h" +#include "llvm/Support/raw_ostream.h" +#include <algorithm> +#include <cstdint> +#include <memory> +#include <string> +#include <tuple> +#include <utility> +#include <vector> + +using namespace llvm; + +// A struct to define how the data stream should be patched. For Indexed +// profiling, only uint64_t data type is needed. +struct PatchItem { + uint64_t Pos; // Where to patch. + uint64_t *D; // Pointer to an array of source data. + int N; // Number of elements in \c D array. +}; + +namespace llvm { + +// A wrapper class to abstract writer stream with support of bytes +// back patching. +class ProfOStream { +public: + ProfOStream(raw_fd_ostream &FD) + : IsFDOStream(true), OS(FD), LE(FD, support::little) {} + ProfOStream(raw_string_ostream &STR) + : IsFDOStream(false), OS(STR), LE(STR, support::little) {} + + uint64_t tell() { return OS.tell(); } + void write(uint64_t V) { LE.write<uint64_t>(V); } + + // \c patch can only be called when all data is written and flushed. + // For raw_string_ostream, the patch is done on the target string + // directly and it won't be reflected in the stream's internal buffer. + void patch(PatchItem *P, int NItems) { + using namespace support; + + if (IsFDOStream) { + raw_fd_ostream &FDOStream = static_cast<raw_fd_ostream &>(OS); + for (int K = 0; K < NItems; K++) { + FDOStream.seek(P[K].Pos); + for (int I = 0; I < P[K].N; I++) + write(P[K].D[I]); + } + } else { + raw_string_ostream &SOStream = static_cast<raw_string_ostream &>(OS); + std::string &Data = SOStream.str(); // with flush + for (int K = 0; K < NItems; K++) { + for (int I = 0; I < P[K].N; I++) { + uint64_t Bytes = endian::byte_swap<uint64_t, little>(P[K].D[I]); + Data.replace(P[K].Pos + I * sizeof(uint64_t), sizeof(uint64_t), + (const char *)&Bytes, sizeof(uint64_t)); + } + } + } + } + + // If \c OS is an instance of \c raw_fd_ostream, this field will be + // true. Otherwise, \c OS will be an raw_string_ostream. + bool IsFDOStream; + raw_ostream &OS; + support::endian::Writer LE; +}; + +class InstrProfRecordWriterTrait { +public: + using key_type = StringRef; + using key_type_ref = StringRef; + + using data_type = const InstrProfWriter::ProfilingData *const; + using data_type_ref = const InstrProfWriter::ProfilingData *const; + + using hash_value_type = uint64_t; + using offset_type = uint64_t; + + support::endianness ValueProfDataEndianness = support::little; + InstrProfSummaryBuilder *SummaryBuilder; + + InstrProfRecordWriterTrait() = default; + + static hash_value_type ComputeHash(key_type_ref K) { + return IndexedInstrProf::ComputeHash(K); + } + + static std::pair<offset_type, offset_type> + EmitKeyDataLength(raw_ostream &Out, key_type_ref K, data_type_ref V) { + using namespace support; + + endian::Writer LE(Out, little); + + offset_type N = K.size(); + LE.write<offset_type>(N); + + offset_type M = 0; + for (const auto &ProfileData : *V) { + const InstrProfRecord &ProfRecord = ProfileData.second; + M += sizeof(uint64_t); // The function hash + M += sizeof(uint64_t); // The size of the Counts vector + M += ProfRecord.Counts.size() * sizeof(uint64_t); + + // Value data + M += ValueProfData::getSize(ProfileData.second); + } + LE.write<offset_type>(M); + + return std::make_pair(N, M); + } + + void EmitKey(raw_ostream &Out, key_type_ref K, offset_type N) { + Out.write(K.data(), N); + } + + void EmitData(raw_ostream &Out, key_type_ref, data_type_ref V, offset_type) { + using namespace support; + + endian::Writer LE(Out, little); + for (const auto &ProfileData : *V) { + const InstrProfRecord &ProfRecord = ProfileData.second; + SummaryBuilder->addRecord(ProfRecord); + + LE.write<uint64_t>(ProfileData.first); // Function hash + LE.write<uint64_t>(ProfRecord.Counts.size()); + for (uint64_t I : ProfRecord.Counts) + LE.write<uint64_t>(I); + + // Write value data + std::unique_ptr<ValueProfData> VDataPtr = + ValueProfData::serializeFrom(ProfileData.second); + uint32_t S = VDataPtr->getSize(); + VDataPtr->swapBytesFromHost(ValueProfDataEndianness); + Out.write((const char *)VDataPtr.get(), S); + } + } +}; + +} // end namespace llvm + +InstrProfWriter::InstrProfWriter(bool Sparse) + : Sparse(Sparse), InfoObj(new InstrProfRecordWriterTrait()) {} + +InstrProfWriter::~InstrProfWriter() { delete InfoObj; } + +// Internal interface for testing purpose only. +void InstrProfWriter::setValueProfDataEndianness( + support::endianness Endianness) { + InfoObj->ValueProfDataEndianness = Endianness; +} + +void InstrProfWriter::setOutputSparse(bool Sparse) { + this->Sparse = Sparse; +} + +void InstrProfWriter::addRecord(NamedInstrProfRecord &&I, uint64_t Weight, + function_ref<void(Error)> Warn) { + auto Name = I.Name; + auto Hash = I.Hash; + addRecord(Name, Hash, std::move(I), Weight, Warn); +} + +void InstrProfWriter::addRecord(StringRef Name, uint64_t Hash, + InstrProfRecord &&I, uint64_t Weight, + function_ref<void(Error)> Warn) { + auto &ProfileDataMap = FunctionData[Name]; + + bool NewFunc; + ProfilingData::iterator Where; + std::tie(Where, NewFunc) = + ProfileDataMap.insert(std::make_pair(Hash, InstrProfRecord())); + InstrProfRecord &Dest = Where->second; + + auto MapWarn = [&](instrprof_error E) { + Warn(make_error<InstrProfError>(E)); + }; + + if (NewFunc) { + // We've never seen a function with this name and hash, add it. + Dest = std::move(I); + if (Weight > 1) + Dest.scale(Weight, MapWarn); + } else { + // We're updating a function we've seen before. + Dest.merge(I, Weight, MapWarn); + } + + Dest.sortValueData(); +} + +void InstrProfWriter::mergeRecordsFromWriter(InstrProfWriter &&IPW, + function_ref<void(Error)> Warn) { + for (auto &I : IPW.FunctionData) + for (auto &Func : I.getValue()) + addRecord(I.getKey(), Func.first, std::move(Func.second), 1, Warn); +} + +bool InstrProfWriter::shouldEncodeData(const ProfilingData &PD) { + if (!Sparse) + return true; + for (const auto &Func : PD) { + const InstrProfRecord &IPR = Func.second; + if (llvm::any_of(IPR.Counts, [](uint64_t Count) { return Count > 0; })) + return true; + } + return false; +} + +static void setSummary(IndexedInstrProf::Summary *TheSummary, + ProfileSummary &PS) { + using namespace IndexedInstrProf; + + std::vector<ProfileSummaryEntry> &Res = PS.getDetailedSummary(); + TheSummary->NumSummaryFields = Summary::NumKinds; + TheSummary->NumCutoffEntries = Res.size(); + TheSummary->set(Summary::MaxFunctionCount, PS.getMaxFunctionCount()); + TheSummary->set(Summary::MaxBlockCount, PS.getMaxCount()); + TheSummary->set(Summary::MaxInternalBlockCount, PS.getMaxInternalCount()); + TheSummary->set(Summary::TotalBlockCount, PS.getTotalCount()); + TheSummary->set(Summary::TotalNumBlocks, PS.getNumCounts()); + TheSummary->set(Summary::TotalNumFunctions, PS.getNumFunctions()); + for (unsigned I = 0; I < Res.size(); I++) + TheSummary->setEntry(I, Res[I]); +} + +void InstrProfWriter::writeImpl(ProfOStream &OS) { + using namespace IndexedInstrProf; + + OnDiskChainedHashTableGenerator<InstrProfRecordWriterTrait> Generator; + + InstrProfSummaryBuilder ISB(ProfileSummaryBuilder::DefaultCutoffs); + InfoObj->SummaryBuilder = &ISB; + + // Populate the hash table generator. + for (const auto &I : FunctionData) + if (shouldEncodeData(I.getValue())) + Generator.insert(I.getKey(), &I.getValue()); + // Write the header. + IndexedInstrProf::Header Header; + Header.Magic = IndexedInstrProf::Magic; + Header.Version = IndexedInstrProf::ProfVersion::CurrentVersion; + if (ProfileKind == PF_IRLevel) + Header.Version |= VARIANT_MASK_IR_PROF; + Header.Unused = 0; + Header.HashType = static_cast<uint64_t>(IndexedInstrProf::HashType); + Header.HashOffset = 0; + int N = sizeof(IndexedInstrProf::Header) / sizeof(uint64_t); + + // Only write out all the fields except 'HashOffset'. We need + // to remember the offset of that field to allow back patching + // later. + for (int I = 0; I < N - 1; I++) + OS.write(reinterpret_cast<uint64_t *>(&Header)[I]); + + // Save the location of Header.HashOffset field in \c OS. + uint64_t HashTableStartFieldOffset = OS.tell(); + // Reserve the space for HashOffset field. + OS.write(0); + + // Reserve space to write profile summary data. + uint32_t NumEntries = ProfileSummaryBuilder::DefaultCutoffs.size(); + uint32_t SummarySize = Summary::getSize(Summary::NumKinds, NumEntries); + // Remember the summary offset. + uint64_t SummaryOffset = OS.tell(); + for (unsigned I = 0; I < SummarySize / sizeof(uint64_t); I++) + OS.write(0); + + // Write the hash table. + uint64_t HashTableStart = Generator.Emit(OS.OS, *InfoObj); + + // Allocate space for data to be serialized out. + std::unique_ptr<IndexedInstrProf::Summary> TheSummary = + IndexedInstrProf::allocSummary(SummarySize); + // Compute the Summary and copy the data to the data + // structure to be serialized out (to disk or buffer). + std::unique_ptr<ProfileSummary> PS = ISB.getSummary(); + setSummary(TheSummary.get(), *PS); + InfoObj->SummaryBuilder = nullptr; + + // Now do the final patch: + PatchItem PatchItems[] = { + // Patch the Header.HashOffset field. + {HashTableStartFieldOffset, &HashTableStart, 1}, + // Patch the summary data. + {SummaryOffset, reinterpret_cast<uint64_t *>(TheSummary.get()), + (int)(SummarySize / sizeof(uint64_t))}}; + OS.patch(PatchItems, sizeof(PatchItems) / sizeof(*PatchItems)); +} + +void InstrProfWriter::write(raw_fd_ostream &OS) { + // Write the hash table. + ProfOStream POS(OS); + writeImpl(POS); +} + +std::unique_ptr<MemoryBuffer> InstrProfWriter::writeBuffer() { + std::string Data; + raw_string_ostream OS(Data); + ProfOStream POS(OS); + // Write the hash table. + writeImpl(POS); + // Return this in an aligned memory buffer. + return MemoryBuffer::getMemBufferCopy(Data); +} + +static const char *ValueProfKindStr[] = { +#define VALUE_PROF_KIND(Enumerator, Value) #Enumerator, +#include "llvm/ProfileData/InstrProfData.inc" +}; + +void InstrProfWriter::writeRecordInText(StringRef Name, uint64_t Hash, + const InstrProfRecord &Func, + InstrProfSymtab &Symtab, + raw_fd_ostream &OS) { + OS << Name << "\n"; + OS << "# Func Hash:\n" << Hash << "\n"; + OS << "# Num Counters:\n" << Func.Counts.size() << "\n"; + OS << "# Counter Values:\n"; + for (uint64_t Count : Func.Counts) + OS << Count << "\n"; + + uint32_t NumValueKinds = Func.getNumValueKinds(); + if (!NumValueKinds) { + OS << "\n"; + return; + } + + OS << "# Num Value Kinds:\n" << Func.getNumValueKinds() << "\n"; + for (uint32_t VK = 0; VK < IPVK_Last + 1; VK++) { + uint32_t NS = Func.getNumValueSites(VK); + if (!NS) + continue; + OS << "# ValueKind = " << ValueProfKindStr[VK] << ":\n" << VK << "\n"; + OS << "# NumValueSites:\n" << NS << "\n"; + for (uint32_t S = 0; S < NS; S++) { + uint32_t ND = Func.getNumValueDataForSite(VK, S); + OS << ND << "\n"; + std::unique_ptr<InstrProfValueData[]> VD = Func.getValueForSite(VK, S); + for (uint32_t I = 0; I < ND; I++) { + if (VK == IPVK_IndirectCallTarget) + OS << Symtab.getFuncNameOrExternalSymbol(VD[I].Value) << ":" + << VD[I].Count << "\n"; + else + OS << VD[I].Value << ":" << VD[I].Count << "\n"; + } + } + } + + OS << "\n"; +} + +Error InstrProfWriter::writeText(raw_fd_ostream &OS) { + if (ProfileKind == PF_IRLevel) + OS << "# IR level Instrumentation Flag\n:ir\n"; + InstrProfSymtab Symtab; + for (const auto &I : FunctionData) + if (shouldEncodeData(I.getValue())) + if (Error E = Symtab.addFuncName(I.getKey())) + return E; + + for (const auto &I : FunctionData) + if (shouldEncodeData(I.getValue())) + for (const auto &Func : I.getValue()) + writeRecordInText(I.getKey(), Func.first, Func.second, Symtab, OS); + return Error::success(); +} diff --git a/contrib/llvm/lib/ProfileData/ProfileSummaryBuilder.cpp b/contrib/llvm/lib/ProfileData/ProfileSummaryBuilder.cpp new file mode 100644 index 000000000000..62f00d693c68 --- /dev/null +++ b/contrib/llvm/lib/ProfileData/ProfileSummaryBuilder.cpp @@ -0,0 +1,115 @@ +//=-- ProfilesummaryBuilder.cpp - Profile summary computation ---------------=// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file contains support for computing profile summary data. +// +//===----------------------------------------------------------------------===// + +#include "llvm/IR/Attributes.h" +#include "llvm/IR/Function.h" +#include "llvm/IR/Metadata.h" +#include "llvm/IR/Type.h" +#include "llvm/ProfileData/InstrProf.h" +#include "llvm/ProfileData/ProfileCommon.h" +#include "llvm/ProfileData/SampleProf.h" +#include "llvm/Support/Casting.h" + +using namespace llvm; + +// A set of cutoff values. Each value, when divided by ProfileSummary::Scale +// (which is 1000000) is a desired percentile of total counts. +static const uint32_t DefaultCutoffsData[] = { + 10000, /* 1% */ + 100000, /* 10% */ + 200000, 300000, 400000, 500000, 600000, 700000, 800000, + 900000, 950000, 990000, 999000, 999900, 999990, 999999}; +const ArrayRef<uint32_t> ProfileSummaryBuilder::DefaultCutoffs = + DefaultCutoffsData; + +void InstrProfSummaryBuilder::addRecord(const InstrProfRecord &R) { + // The first counter is not necessarily an entry count for IR + // instrumentation profiles. + // Eventually MaxFunctionCount will become obsolete and this can be + // removed. + addEntryCount(R.Counts[0]); + for (size_t I = 1, E = R.Counts.size(); I < E; ++I) + addInternalCount(R.Counts[I]); +} + +// To compute the detailed summary, we consider each line containing samples as +// equivalent to a block with a count in the instrumented profile. +void SampleProfileSummaryBuilder::addRecord( + const sampleprof::FunctionSamples &FS) { + NumFunctions++; + if (FS.getHeadSamples() > MaxFunctionCount) + MaxFunctionCount = FS.getHeadSamples(); + for (const auto &I : FS.getBodySamples()) + addCount(I.second.getSamples()); +} + +// The argument to this method is a vector of cutoff percentages and the return +// value is a vector of (Cutoff, MinCount, NumCounts) triplets. +void ProfileSummaryBuilder::computeDetailedSummary() { + if (DetailedSummaryCutoffs.empty()) + return; + llvm::sort(DetailedSummaryCutoffs.begin(), DetailedSummaryCutoffs.end()); + auto Iter = CountFrequencies.begin(); + const auto End = CountFrequencies.end(); + + uint32_t CountsSeen = 0; + uint64_t CurrSum = 0, Count = 0; + + for (const uint32_t Cutoff : DetailedSummaryCutoffs) { + assert(Cutoff <= 999999); + APInt Temp(128, TotalCount); + APInt N(128, Cutoff); + APInt D(128, ProfileSummary::Scale); + Temp *= N; + Temp = Temp.sdiv(D); + uint64_t DesiredCount = Temp.getZExtValue(); + assert(DesiredCount <= TotalCount); + while (CurrSum < DesiredCount && Iter != End) { + Count = Iter->first; + uint32_t Freq = Iter->second; + CurrSum += (Count * Freq); + CountsSeen += Freq; + Iter++; + } + assert(CurrSum >= DesiredCount); + ProfileSummaryEntry PSE = {Cutoff, Count, CountsSeen}; + DetailedSummary.push_back(PSE); + } +} + +std::unique_ptr<ProfileSummary> SampleProfileSummaryBuilder::getSummary() { + computeDetailedSummary(); + return llvm::make_unique<ProfileSummary>( + ProfileSummary::PSK_Sample, DetailedSummary, TotalCount, MaxCount, 0, + MaxFunctionCount, NumCounts, NumFunctions); +} + +std::unique_ptr<ProfileSummary> InstrProfSummaryBuilder::getSummary() { + computeDetailedSummary(); + return llvm::make_unique<ProfileSummary>( + ProfileSummary::PSK_Instr, DetailedSummary, TotalCount, MaxCount, + MaxInternalBlockCount, MaxFunctionCount, NumCounts, NumFunctions); +} + +void InstrProfSummaryBuilder::addEntryCount(uint64_t Count) { + addCount(Count); + NumFunctions++; + if (Count > MaxFunctionCount) + MaxFunctionCount = Count; +} + +void InstrProfSummaryBuilder::addInternalCount(uint64_t Count) { + addCount(Count); + if (Count > MaxInternalBlockCount) + MaxInternalBlockCount = Count; +} diff --git a/contrib/llvm/lib/ProfileData/SampleProf.cpp b/contrib/llvm/lib/ProfileData/SampleProf.cpp new file mode 100644 index 000000000000..30438ba7962a --- /dev/null +++ b/contrib/llvm/lib/ProfileData/SampleProf.cpp @@ -0,0 +1,183 @@ +//=-- SampleProf.cpp - Sample profiling format support --------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file contains common definitions used in the reading and writing of +// sample profile data. +// +//===----------------------------------------------------------------------===// + +#include "llvm/ProfileData/SampleProf.h" +#include "llvm/Config/llvm-config.h" +#include "llvm/IR/DebugInfoMetadata.h" +#include "llvm/Support/Compiler.h" +#include "llvm/Support/Debug.h" +#include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/ManagedStatic.h" +#include "llvm/Support/raw_ostream.h" +#include <string> +#include <system_error> + +using namespace llvm; +using namespace sampleprof; + +namespace { + +// FIXME: This class is only here to support the transition to llvm::Error. It +// will be removed once this transition is complete. Clients should prefer to +// deal with the Error value directly, rather than converting to error_code. +class SampleProfErrorCategoryType : public std::error_category { + const char *name() const noexcept override { return "llvm.sampleprof"; } + + std::string message(int IE) const override { + sampleprof_error E = static_cast<sampleprof_error>(IE); + switch (E) { + case sampleprof_error::success: + return "Success"; + case sampleprof_error::bad_magic: + return "Invalid sample profile data (bad magic)"; + case sampleprof_error::unsupported_version: + return "Unsupported sample profile format version"; + case sampleprof_error::too_large: + return "Too much profile data"; + case sampleprof_error::truncated: + return "Truncated profile data"; + case sampleprof_error::malformed: + return "Malformed sample profile data"; + case sampleprof_error::unrecognized_format: + return "Unrecognized sample profile encoding format"; + case sampleprof_error::unsupported_writing_format: + return "Profile encoding format unsupported for writing operations"; + case sampleprof_error::truncated_name_table: + return "Truncated function name table"; + case sampleprof_error::not_implemented: + return "Unimplemented feature"; + case sampleprof_error::counter_overflow: + return "Counter overflow"; + } + llvm_unreachable("A value of sampleprof_error has no message."); + } +}; + +} // end anonymous namespace + +static ManagedStatic<SampleProfErrorCategoryType> ErrorCategory; + +const std::error_category &llvm::sampleprof_category() { + return *ErrorCategory; +} + +void LineLocation::print(raw_ostream &OS) const { + OS << LineOffset; + if (Discriminator > 0) + OS << "." << Discriminator; +} + +raw_ostream &llvm::sampleprof::operator<<(raw_ostream &OS, + const LineLocation &Loc) { + Loc.print(OS); + return OS; +} + +#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) +LLVM_DUMP_METHOD void LineLocation::dump() const { print(dbgs()); } +#endif + +/// Print the sample record to the stream \p OS indented by \p Indent. +void SampleRecord::print(raw_ostream &OS, unsigned Indent) const { + OS << NumSamples; + if (hasCalls()) { + OS << ", calls:"; + for (const auto &I : getCallTargets()) + OS << " " << I.first() << ":" << I.second; + } + OS << "\n"; +} + +#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) +LLVM_DUMP_METHOD void SampleRecord::dump() const { print(dbgs(), 0); } +#endif + +raw_ostream &llvm::sampleprof::operator<<(raw_ostream &OS, + const SampleRecord &Sample) { + Sample.print(OS, 0); + return OS; +} + +/// Print the samples collected for a function on stream \p OS. +void FunctionSamples::print(raw_ostream &OS, unsigned Indent) const { + OS << TotalSamples << ", " << TotalHeadSamples << ", " << BodySamples.size() + << " sampled lines\n"; + + OS.indent(Indent); + if (!BodySamples.empty()) { + OS << "Samples collected in the function's body {\n"; + SampleSorter<LineLocation, SampleRecord> SortedBodySamples(BodySamples); + for (const auto &SI : SortedBodySamples.get()) { + OS.indent(Indent + 2); + OS << SI->first << ": " << SI->second; + } + OS.indent(Indent); + OS << "}\n"; + } else { + OS << "No samples collected in the function's body\n"; + } + + OS.indent(Indent); + if (!CallsiteSamples.empty()) { + OS << "Samples collected in inlined callsites {\n"; + SampleSorter<LineLocation, FunctionSamplesMap> SortedCallsiteSamples( + CallsiteSamples); + for (const auto &CS : SortedCallsiteSamples.get()) { + for (const auto &FS : CS->second) { + OS.indent(Indent + 2); + OS << CS->first << ": inlined callee: " << FS.second.getName() << ": "; + FS.second.print(OS, Indent + 4); + } + } + OS << "}\n"; + } else { + OS << "No inlined callsites in this function\n"; + } +} + +raw_ostream &llvm::sampleprof::operator<<(raw_ostream &OS, + const FunctionSamples &FS) { + FS.print(OS); + return OS; +} + +unsigned FunctionSamples::getOffset(const DILocation *DIL) { + return (DIL->getLine() - DIL->getScope()->getSubprogram()->getLine()) & + 0xffff; +} + +const FunctionSamples * +FunctionSamples::findFunctionSamples(const DILocation *DIL) const { + assert(DIL); + SmallVector<std::pair<LineLocation, StringRef>, 10> S; + + const DILocation *PrevDIL = DIL; + for (DIL = DIL->getInlinedAt(); DIL; DIL = DIL->getInlinedAt()) { + S.push_back(std::make_pair( + LineLocation(getOffset(DIL), DIL->getBaseDiscriminator()), + PrevDIL->getScope()->getSubprogram()->getLinkageName())); + PrevDIL = DIL; + } + if (S.size() == 0) + return this; + const FunctionSamples *FS = this; + for (int i = S.size() - 1; i >= 0 && FS != nullptr; i--) { + FS = FS->findFunctionSamplesAt(S[i].first, S[i].second); + } + return FS; +} + +#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) +LLVM_DUMP_METHOD void FunctionSamples::dump() const { print(dbgs(), 0); } +#endif diff --git a/contrib/llvm/lib/ProfileData/SampleProfReader.cpp b/contrib/llvm/lib/ProfileData/SampleProfReader.cpp new file mode 100644 index 000000000000..79335e67cd98 --- /dev/null +++ b/contrib/llvm/lib/ProfileData/SampleProfReader.cpp @@ -0,0 +1,898 @@ +//===- SampleProfReader.cpp - Read LLVM sample profile data ---------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements the class that reads LLVM sample profiles. It +// supports three file formats: text, binary and gcov. +// +// The textual representation is useful for debugging and testing purposes. The +// binary representation is more compact, resulting in smaller file sizes. +// +// The gcov encoding is the one generated by GCC's AutoFDO profile creation +// tool (https://github.com/google/autofdo) +// +// All three encodings can be used interchangeably as an input sample profile. +// +//===----------------------------------------------------------------------===// + +#include "llvm/ProfileData/SampleProfReader.h" +#include "llvm/ADT/DenseMap.h" +#include "llvm/ADT/STLExtras.h" +#include "llvm/ADT/StringRef.h" +#include "llvm/IR/ProfileSummary.h" +#include "llvm/ProfileData/ProfileCommon.h" +#include "llvm/ProfileData/SampleProf.h" +#include "llvm/Support/ErrorOr.h" +#include "llvm/Support/LEB128.h" +#include "llvm/Support/LineIterator.h" +#include "llvm/Support/MemoryBuffer.h" +#include "llvm/Support/raw_ostream.h" +#include <algorithm> +#include <cstddef> +#include <cstdint> +#include <limits> +#include <memory> +#include <system_error> +#include <vector> + +using namespace llvm; +using namespace sampleprof; + +/// Dump the function profile for \p FName. +/// +/// \param FName Name of the function to print. +/// \param OS Stream to emit the output to. +void SampleProfileReader::dumpFunctionProfile(StringRef FName, + raw_ostream &OS) { + OS << "Function: " << FName << ": " << Profiles[FName]; +} + +/// Dump all the function profiles found on stream \p OS. +void SampleProfileReader::dump(raw_ostream &OS) { + for (const auto &I : Profiles) + dumpFunctionProfile(I.getKey(), OS); +} + +/// Parse \p Input as function head. +/// +/// Parse one line of \p Input, and update function name in \p FName, +/// function's total sample count in \p NumSamples, function's entry +/// count in \p NumHeadSamples. +/// +/// \returns true if parsing is successful. +static bool ParseHead(const StringRef &Input, StringRef &FName, + uint64_t &NumSamples, uint64_t &NumHeadSamples) { + if (Input[0] == ' ') + return false; + size_t n2 = Input.rfind(':'); + size_t n1 = Input.rfind(':', n2 - 1); + FName = Input.substr(0, n1); + if (Input.substr(n1 + 1, n2 - n1 - 1).getAsInteger(10, NumSamples)) + return false; + if (Input.substr(n2 + 1).getAsInteger(10, NumHeadSamples)) + return false; + return true; +} + +/// Returns true if line offset \p L is legal (only has 16 bits). +static bool isOffsetLegal(unsigned L) { return (L & 0xffff) == L; } + +/// Parse \p Input as line sample. +/// +/// \param Input input line. +/// \param IsCallsite true if the line represents an inlined callsite. +/// \param Depth the depth of the inline stack. +/// \param NumSamples total samples of the line/inlined callsite. +/// \param LineOffset line offset to the start of the function. +/// \param Discriminator discriminator of the line. +/// \param TargetCountMap map from indirect call target to count. +/// +/// returns true if parsing is successful. +static bool ParseLine(const StringRef &Input, bool &IsCallsite, uint32_t &Depth, + uint64_t &NumSamples, uint32_t &LineOffset, + uint32_t &Discriminator, StringRef &CalleeName, + DenseMap<StringRef, uint64_t> &TargetCountMap) { + for (Depth = 0; Input[Depth] == ' '; Depth++) + ; + if (Depth == 0) + return false; + + size_t n1 = Input.find(':'); + StringRef Loc = Input.substr(Depth, n1 - Depth); + size_t n2 = Loc.find('.'); + if (n2 == StringRef::npos) { + if (Loc.getAsInteger(10, LineOffset) || !isOffsetLegal(LineOffset)) + return false; + Discriminator = 0; + } else { + if (Loc.substr(0, n2).getAsInteger(10, LineOffset)) + return false; + if (Loc.substr(n2 + 1).getAsInteger(10, Discriminator)) + return false; + } + + StringRef Rest = Input.substr(n1 + 2); + if (Rest[0] >= '0' && Rest[0] <= '9') { + IsCallsite = false; + size_t n3 = Rest.find(' '); + if (n3 == StringRef::npos) { + if (Rest.getAsInteger(10, NumSamples)) + return false; + } else { + if (Rest.substr(0, n3).getAsInteger(10, NumSamples)) + return false; + } + // Find call targets and their sample counts. + // Note: In some cases, there are symbols in the profile which are not + // mangled. To accommodate such cases, use colon + integer pairs as the + // anchor points. + // An example: + // _M_construct<char *>:1000 string_view<std::allocator<char> >:437 + // ":1000" and ":437" are used as anchor points so the string above will + // be interpreted as + // target: _M_construct<char *> + // count: 1000 + // target: string_view<std::allocator<char> > + // count: 437 + while (n3 != StringRef::npos) { + n3 += Rest.substr(n3).find_first_not_of(' '); + Rest = Rest.substr(n3); + n3 = Rest.find_first_of(':'); + if (n3 == StringRef::npos || n3 == 0) + return false; + + StringRef Target; + uint64_t count, n4; + while (true) { + // Get the segment after the current colon. + StringRef AfterColon = Rest.substr(n3 + 1); + // Get the target symbol before the current colon. + Target = Rest.substr(0, n3); + // Check if the word after the current colon is an integer. + n4 = AfterColon.find_first_of(' '); + n4 = (n4 != StringRef::npos) ? n3 + n4 + 1 : Rest.size(); + StringRef WordAfterColon = Rest.substr(n3 + 1, n4 - n3 - 1); + if (!WordAfterColon.getAsInteger(10, count)) + break; + + // Try to find the next colon. + uint64_t n5 = AfterColon.find_first_of(':'); + if (n5 == StringRef::npos) + return false; + n3 += n5 + 1; + } + + // An anchor point is found. Save the {target, count} pair + TargetCountMap[Target] = count; + if (n4 == Rest.size()) + break; + // Change n3 to the next blank space after colon + integer pair. + n3 = n4; + } + } else { + IsCallsite = true; + size_t n3 = Rest.find_last_of(':'); + CalleeName = Rest.substr(0, n3); + if (Rest.substr(n3 + 1).getAsInteger(10, NumSamples)) + return false; + } + return true; +} + +/// Load samples from a text file. +/// +/// See the documentation at the top of the file for an explanation of +/// the expected format. +/// +/// \returns true if the file was loaded successfully, false otherwise. +std::error_code SampleProfileReaderText::read() { + line_iterator LineIt(*Buffer, /*SkipBlanks=*/true, '#'); + sampleprof_error Result = sampleprof_error::success; + + InlineCallStack InlineStack; + + for (; !LineIt.is_at_eof(); ++LineIt) { + if ((*LineIt)[(*LineIt).find_first_not_of(' ')] == '#') + continue; + // Read the header of each function. + // + // Note that for function identifiers we are actually expecting + // mangled names, but we may not always get them. This happens when + // the compiler decides not to emit the function (e.g., it was inlined + // and removed). In this case, the binary will not have the linkage + // name for the function, so the profiler will emit the function's + // unmangled name, which may contain characters like ':' and '>' in its + // name (member functions, templates, etc). + // + // The only requirement we place on the identifier, then, is that it + // should not begin with a number. + if ((*LineIt)[0] != ' ') { + uint64_t NumSamples, NumHeadSamples; + StringRef FName; + if (!ParseHead(*LineIt, FName, NumSamples, NumHeadSamples)) { + reportError(LineIt.line_number(), + "Expected 'mangled_name:NUM:NUM', found " + *LineIt); + return sampleprof_error::malformed; + } + Profiles[FName] = FunctionSamples(); + FunctionSamples &FProfile = Profiles[FName]; + FProfile.setName(FName); + MergeResult(Result, FProfile.addTotalSamples(NumSamples)); + MergeResult(Result, FProfile.addHeadSamples(NumHeadSamples)); + InlineStack.clear(); + InlineStack.push_back(&FProfile); + } else { + uint64_t NumSamples; + StringRef FName; + DenseMap<StringRef, uint64_t> TargetCountMap; + bool IsCallsite; + uint32_t Depth, LineOffset, Discriminator; + if (!ParseLine(*LineIt, IsCallsite, Depth, NumSamples, LineOffset, + Discriminator, FName, TargetCountMap)) { + reportError(LineIt.line_number(), + "Expected 'NUM[.NUM]: NUM[ mangled_name:NUM]*', found " + + *LineIt); + return sampleprof_error::malformed; + } + if (IsCallsite) { + while (InlineStack.size() > Depth) { + InlineStack.pop_back(); + } + FunctionSamples &FSamples = InlineStack.back()->functionSamplesAt( + LineLocation(LineOffset, Discriminator))[FName]; + FSamples.setName(FName); + MergeResult(Result, FSamples.addTotalSamples(NumSamples)); + InlineStack.push_back(&FSamples); + } else { + while (InlineStack.size() > Depth) { + InlineStack.pop_back(); + } + FunctionSamples &FProfile = *InlineStack.back(); + for (const auto &name_count : TargetCountMap) { + MergeResult(Result, FProfile.addCalledTargetSamples( + LineOffset, Discriminator, name_count.first, + name_count.second)); + } + MergeResult(Result, FProfile.addBodySamples(LineOffset, Discriminator, + NumSamples)); + } + } + } + if (Result == sampleprof_error::success) + computeSummary(); + + return Result; +} + +bool SampleProfileReaderText::hasFormat(const MemoryBuffer &Buffer) { + bool result = false; + + // Check that the first non-comment line is a valid function header. + line_iterator LineIt(Buffer, /*SkipBlanks=*/true, '#'); + if (!LineIt.is_at_eof()) { + if ((*LineIt)[0] != ' ') { + uint64_t NumSamples, NumHeadSamples; + StringRef FName; + result = ParseHead(*LineIt, FName, NumSamples, NumHeadSamples); + } + } + + return result; +} + +template <typename T> ErrorOr<T> SampleProfileReaderBinary::readNumber() { + unsigned NumBytesRead = 0; + std::error_code EC; + uint64_t Val = decodeULEB128(Data, &NumBytesRead); + + if (Val > std::numeric_limits<T>::max()) + EC = sampleprof_error::malformed; + else if (Data + NumBytesRead > End) + EC = sampleprof_error::truncated; + else + EC = sampleprof_error::success; + + if (EC) { + reportError(0, EC.message()); + return EC; + } + + Data += NumBytesRead; + return static_cast<T>(Val); +} + +ErrorOr<StringRef> SampleProfileReaderBinary::readString() { + std::error_code EC; + StringRef Str(reinterpret_cast<const char *>(Data)); + if (Data + Str.size() + 1 > End) { + EC = sampleprof_error::truncated; + reportError(0, EC.message()); + return EC; + } + + Data += Str.size() + 1; + return Str; +} + +template <typename T> +inline ErrorOr<uint32_t> SampleProfileReaderBinary::readStringIndex(T &Table) { + std::error_code EC; + auto Idx = readNumber<uint32_t>(); + if (std::error_code EC = Idx.getError()) + return EC; + if (*Idx >= Table.size()) + return sampleprof_error::truncated_name_table; + return *Idx; +} + +ErrorOr<StringRef> SampleProfileReaderRawBinary::readStringFromTable() { + auto Idx = readStringIndex(NameTable); + if (std::error_code EC = Idx.getError()) + return EC; + + return NameTable[*Idx]; +} + +ErrorOr<StringRef> SampleProfileReaderCompactBinary::readStringFromTable() { + auto Idx = readStringIndex(NameTable); + if (std::error_code EC = Idx.getError()) + return EC; + + return StringRef(NameTable[*Idx]); +} + +std::error_code +SampleProfileReaderBinary::readProfile(FunctionSamples &FProfile) { + auto NumSamples = readNumber<uint64_t>(); + if (std::error_code EC = NumSamples.getError()) + return EC; + FProfile.addTotalSamples(*NumSamples); + + // Read the samples in the body. + auto NumRecords = readNumber<uint32_t>(); + if (std::error_code EC = NumRecords.getError()) + return EC; + + for (uint32_t I = 0; I < *NumRecords; ++I) { + auto LineOffset = readNumber<uint64_t>(); + if (std::error_code EC = LineOffset.getError()) + return EC; + + if (!isOffsetLegal(*LineOffset)) { + return std::error_code(); + } + + auto Discriminator = readNumber<uint64_t>(); + if (std::error_code EC = Discriminator.getError()) + return EC; + + auto NumSamples = readNumber<uint64_t>(); + if (std::error_code EC = NumSamples.getError()) + return EC; + + auto NumCalls = readNumber<uint32_t>(); + if (std::error_code EC = NumCalls.getError()) + return EC; + + for (uint32_t J = 0; J < *NumCalls; ++J) { + auto CalledFunction(readStringFromTable()); + if (std::error_code EC = CalledFunction.getError()) + return EC; + + auto CalledFunctionSamples = readNumber<uint64_t>(); + if (std::error_code EC = CalledFunctionSamples.getError()) + return EC; + + FProfile.addCalledTargetSamples(*LineOffset, *Discriminator, + *CalledFunction, *CalledFunctionSamples); + } + + FProfile.addBodySamples(*LineOffset, *Discriminator, *NumSamples); + } + + // Read all the samples for inlined function calls. + auto NumCallsites = readNumber<uint32_t>(); + if (std::error_code EC = NumCallsites.getError()) + return EC; + + for (uint32_t J = 0; J < *NumCallsites; ++J) { + auto LineOffset = readNumber<uint64_t>(); + if (std::error_code EC = LineOffset.getError()) + return EC; + + auto Discriminator = readNumber<uint64_t>(); + if (std::error_code EC = Discriminator.getError()) + return EC; + + auto FName(readStringFromTable()); + if (std::error_code EC = FName.getError()) + return EC; + + FunctionSamples &CalleeProfile = FProfile.functionSamplesAt( + LineLocation(*LineOffset, *Discriminator))[*FName]; + CalleeProfile.setName(*FName); + if (std::error_code EC = readProfile(CalleeProfile)) + return EC; + } + + return sampleprof_error::success; +} + +std::error_code SampleProfileReaderBinary::read() { + while (!at_eof()) { + auto NumHeadSamples = readNumber<uint64_t>(); + if (std::error_code EC = NumHeadSamples.getError()) + return EC; + + auto FName(readStringFromTable()); + if (std::error_code EC = FName.getError()) + return EC; + + Profiles[*FName] = FunctionSamples(); + FunctionSamples &FProfile = Profiles[*FName]; + FProfile.setName(*FName); + + FProfile.addHeadSamples(*NumHeadSamples); + + if (std::error_code EC = readProfile(FProfile)) + return EC; + } + + return sampleprof_error::success; +} + +std::error_code SampleProfileReaderRawBinary::verifySPMagic(uint64_t Magic) { + if (Magic == SPMagic()) + return sampleprof_error::success; + return sampleprof_error::bad_magic; +} + +std::error_code +SampleProfileReaderCompactBinary::verifySPMagic(uint64_t Magic) { + if (Magic == SPMagic(SPF_Compact_Binary)) + return sampleprof_error::success; + return sampleprof_error::bad_magic; +} + +std::error_code SampleProfileReaderRawBinary::readNameTable() { + auto Size = readNumber<uint32_t>(); + if (std::error_code EC = Size.getError()) + return EC; + NameTable.reserve(*Size); + for (uint32_t I = 0; I < *Size; ++I) { + auto Name(readString()); + if (std::error_code EC = Name.getError()) + return EC; + NameTable.push_back(*Name); + } + + return sampleprof_error::success; +} + +std::error_code SampleProfileReaderCompactBinary::readNameTable() { + auto Size = readNumber<uint64_t>(); + if (std::error_code EC = Size.getError()) + return EC; + NameTable.reserve(*Size); + for (uint32_t I = 0; I < *Size; ++I) { + auto FID = readNumber<uint64_t>(); + if (std::error_code EC = FID.getError()) + return EC; + NameTable.push_back(std::to_string(*FID)); + } + return sampleprof_error::success; +} + +std::error_code SampleProfileReaderBinary::readHeader() { + Data = reinterpret_cast<const uint8_t *>(Buffer->getBufferStart()); + End = Data + Buffer->getBufferSize(); + + // Read and check the magic identifier. + auto Magic = readNumber<uint64_t>(); + if (std::error_code EC = Magic.getError()) + return EC; + else if (std::error_code EC = verifySPMagic(*Magic)) + return EC; + + // Read the version number. + auto Version = readNumber<uint64_t>(); + if (std::error_code EC = Version.getError()) + return EC; + else if (*Version != SPVersion()) + return sampleprof_error::unsupported_version; + + if (std::error_code EC = readSummary()) + return EC; + + if (std::error_code EC = readNameTable()) + return EC; + return sampleprof_error::success; +} + +std::error_code SampleProfileReaderBinary::readSummaryEntry( + std::vector<ProfileSummaryEntry> &Entries) { + auto Cutoff = readNumber<uint64_t>(); + if (std::error_code EC = Cutoff.getError()) + return EC; + + auto MinBlockCount = readNumber<uint64_t>(); + if (std::error_code EC = MinBlockCount.getError()) + return EC; + + auto NumBlocks = readNumber<uint64_t>(); + if (std::error_code EC = NumBlocks.getError()) + return EC; + + Entries.emplace_back(*Cutoff, *MinBlockCount, *NumBlocks); + return sampleprof_error::success; +} + +std::error_code SampleProfileReaderBinary::readSummary() { + auto TotalCount = readNumber<uint64_t>(); + if (std::error_code EC = TotalCount.getError()) + return EC; + + auto MaxBlockCount = readNumber<uint64_t>(); + if (std::error_code EC = MaxBlockCount.getError()) + return EC; + + auto MaxFunctionCount = readNumber<uint64_t>(); + if (std::error_code EC = MaxFunctionCount.getError()) + return EC; + + auto NumBlocks = readNumber<uint64_t>(); + if (std::error_code EC = NumBlocks.getError()) + return EC; + + auto NumFunctions = readNumber<uint64_t>(); + if (std::error_code EC = NumFunctions.getError()) + return EC; + + auto NumSummaryEntries = readNumber<uint64_t>(); + if (std::error_code EC = NumSummaryEntries.getError()) + return EC; + + std::vector<ProfileSummaryEntry> Entries; + for (unsigned i = 0; i < *NumSummaryEntries; i++) { + std::error_code EC = readSummaryEntry(Entries); + if (EC != sampleprof_error::success) + return EC; + } + Summary = llvm::make_unique<ProfileSummary>( + ProfileSummary::PSK_Sample, Entries, *TotalCount, *MaxBlockCount, 0, + *MaxFunctionCount, *NumBlocks, *NumFunctions); + + return sampleprof_error::success; +} + +bool SampleProfileReaderRawBinary::hasFormat(const MemoryBuffer &Buffer) { + const uint8_t *Data = + reinterpret_cast<const uint8_t *>(Buffer.getBufferStart()); + uint64_t Magic = decodeULEB128(Data); + return Magic == SPMagic(); +} + +bool SampleProfileReaderCompactBinary::hasFormat(const MemoryBuffer &Buffer) { + const uint8_t *Data = + reinterpret_cast<const uint8_t *>(Buffer.getBufferStart()); + uint64_t Magic = decodeULEB128(Data); + return Magic == SPMagic(SPF_Compact_Binary); +} + +std::error_code SampleProfileReaderGCC::skipNextWord() { + uint32_t dummy; + if (!GcovBuffer.readInt(dummy)) + return sampleprof_error::truncated; + return sampleprof_error::success; +} + +template <typename T> ErrorOr<T> SampleProfileReaderGCC::readNumber() { + if (sizeof(T) <= sizeof(uint32_t)) { + uint32_t Val; + if (GcovBuffer.readInt(Val) && Val <= std::numeric_limits<T>::max()) + return static_cast<T>(Val); + } else if (sizeof(T) <= sizeof(uint64_t)) { + uint64_t Val; + if (GcovBuffer.readInt64(Val) && Val <= std::numeric_limits<T>::max()) + return static_cast<T>(Val); + } + + std::error_code EC = sampleprof_error::malformed; + reportError(0, EC.message()); + return EC; +} + +ErrorOr<StringRef> SampleProfileReaderGCC::readString() { + StringRef Str; + if (!GcovBuffer.readString(Str)) + return sampleprof_error::truncated; + return Str; +} + +std::error_code SampleProfileReaderGCC::readHeader() { + // Read the magic identifier. + if (!GcovBuffer.readGCDAFormat()) + return sampleprof_error::unrecognized_format; + + // Read the version number. Note - the GCC reader does not validate this + // version, but the profile creator generates v704. + GCOV::GCOVVersion version; + if (!GcovBuffer.readGCOVVersion(version)) + return sampleprof_error::unrecognized_format; + + if (version != GCOV::V704) + return sampleprof_error::unsupported_version; + + // Skip the empty integer. + if (std::error_code EC = skipNextWord()) + return EC; + + return sampleprof_error::success; +} + +std::error_code SampleProfileReaderGCC::readSectionTag(uint32_t Expected) { + uint32_t Tag; + if (!GcovBuffer.readInt(Tag)) + return sampleprof_error::truncated; + + if (Tag != Expected) + return sampleprof_error::malformed; + + if (std::error_code EC = skipNextWord()) + return EC; + + return sampleprof_error::success; +} + +std::error_code SampleProfileReaderGCC::readNameTable() { + if (std::error_code EC = readSectionTag(GCOVTagAFDOFileNames)) + return EC; + + uint32_t Size; + if (!GcovBuffer.readInt(Size)) + return sampleprof_error::truncated; + + for (uint32_t I = 0; I < Size; ++I) { + StringRef Str; + if (!GcovBuffer.readString(Str)) + return sampleprof_error::truncated; + Names.push_back(Str); + } + + return sampleprof_error::success; +} + +std::error_code SampleProfileReaderGCC::readFunctionProfiles() { + if (std::error_code EC = readSectionTag(GCOVTagAFDOFunction)) + return EC; + + uint32_t NumFunctions; + if (!GcovBuffer.readInt(NumFunctions)) + return sampleprof_error::truncated; + + InlineCallStack Stack; + for (uint32_t I = 0; I < NumFunctions; ++I) + if (std::error_code EC = readOneFunctionProfile(Stack, true, 0)) + return EC; + + computeSummary(); + return sampleprof_error::success; +} + +std::error_code SampleProfileReaderGCC::readOneFunctionProfile( + const InlineCallStack &InlineStack, bool Update, uint32_t Offset) { + uint64_t HeadCount = 0; + if (InlineStack.size() == 0) + if (!GcovBuffer.readInt64(HeadCount)) + return sampleprof_error::truncated; + + uint32_t NameIdx; + if (!GcovBuffer.readInt(NameIdx)) + return sampleprof_error::truncated; + + StringRef Name(Names[NameIdx]); + + uint32_t NumPosCounts; + if (!GcovBuffer.readInt(NumPosCounts)) + return sampleprof_error::truncated; + + uint32_t NumCallsites; + if (!GcovBuffer.readInt(NumCallsites)) + return sampleprof_error::truncated; + + FunctionSamples *FProfile = nullptr; + if (InlineStack.size() == 0) { + // If this is a top function that we have already processed, do not + // update its profile again. This happens in the presence of + // function aliases. Since these aliases share the same function + // body, there will be identical replicated profiles for the + // original function. In this case, we simply not bother updating + // the profile of the original function. + FProfile = &Profiles[Name]; + FProfile->addHeadSamples(HeadCount); + if (FProfile->getTotalSamples() > 0) + Update = false; + } else { + // Otherwise, we are reading an inlined instance. The top of the + // inline stack contains the profile of the caller. Insert this + // callee in the caller's CallsiteMap. + FunctionSamples *CallerProfile = InlineStack.front(); + uint32_t LineOffset = Offset >> 16; + uint32_t Discriminator = Offset & 0xffff; + FProfile = &CallerProfile->functionSamplesAt( + LineLocation(LineOffset, Discriminator))[Name]; + } + FProfile->setName(Name); + + for (uint32_t I = 0; I < NumPosCounts; ++I) { + uint32_t Offset; + if (!GcovBuffer.readInt(Offset)) + return sampleprof_error::truncated; + + uint32_t NumTargets; + if (!GcovBuffer.readInt(NumTargets)) + return sampleprof_error::truncated; + + uint64_t Count; + if (!GcovBuffer.readInt64(Count)) + return sampleprof_error::truncated; + + // The line location is encoded in the offset as: + // high 16 bits: line offset to the start of the function. + // low 16 bits: discriminator. + uint32_t LineOffset = Offset >> 16; + uint32_t Discriminator = Offset & 0xffff; + + InlineCallStack NewStack; + NewStack.push_back(FProfile); + NewStack.insert(NewStack.end(), InlineStack.begin(), InlineStack.end()); + if (Update) { + // Walk up the inline stack, adding the samples on this line to + // the total sample count of the callers in the chain. + for (auto CallerProfile : NewStack) + CallerProfile->addTotalSamples(Count); + + // Update the body samples for the current profile. + FProfile->addBodySamples(LineOffset, Discriminator, Count); + } + + // Process the list of functions called at an indirect call site. + // These are all the targets that a function pointer (or virtual + // function) resolved at runtime. + for (uint32_t J = 0; J < NumTargets; J++) { + uint32_t HistVal; + if (!GcovBuffer.readInt(HistVal)) + return sampleprof_error::truncated; + + if (HistVal != HIST_TYPE_INDIR_CALL_TOPN) + return sampleprof_error::malformed; + + uint64_t TargetIdx; + if (!GcovBuffer.readInt64(TargetIdx)) + return sampleprof_error::truncated; + StringRef TargetName(Names[TargetIdx]); + + uint64_t TargetCount; + if (!GcovBuffer.readInt64(TargetCount)) + return sampleprof_error::truncated; + + if (Update) + FProfile->addCalledTargetSamples(LineOffset, Discriminator, + TargetName, TargetCount); + } + } + + // Process all the inlined callers into the current function. These + // are all the callsites that were inlined into this function. + for (uint32_t I = 0; I < NumCallsites; I++) { + // The offset is encoded as: + // high 16 bits: line offset to the start of the function. + // low 16 bits: discriminator. + uint32_t Offset; + if (!GcovBuffer.readInt(Offset)) + return sampleprof_error::truncated; + InlineCallStack NewStack; + NewStack.push_back(FProfile); + NewStack.insert(NewStack.end(), InlineStack.begin(), InlineStack.end()); + if (std::error_code EC = readOneFunctionProfile(NewStack, Update, Offset)) + return EC; + } + + return sampleprof_error::success; +} + +/// Read a GCC AutoFDO profile. +/// +/// This format is generated by the Linux Perf conversion tool at +/// https://github.com/google/autofdo. +std::error_code SampleProfileReaderGCC::read() { + // Read the string table. + if (std::error_code EC = readNameTable()) + return EC; + + // Read the source profile. + if (std::error_code EC = readFunctionProfiles()) + return EC; + + return sampleprof_error::success; +} + +bool SampleProfileReaderGCC::hasFormat(const MemoryBuffer &Buffer) { + StringRef Magic(reinterpret_cast<const char *>(Buffer.getBufferStart())); + return Magic == "adcg*704"; +} + +/// Prepare a memory buffer for the contents of \p Filename. +/// +/// \returns an error code indicating the status of the buffer. +static ErrorOr<std::unique_ptr<MemoryBuffer>> +setupMemoryBuffer(const Twine &Filename) { + auto BufferOrErr = MemoryBuffer::getFileOrSTDIN(Filename); + if (std::error_code EC = BufferOrErr.getError()) + return EC; + auto Buffer = std::move(BufferOrErr.get()); + + // Sanity check the file. + if (uint64_t(Buffer->getBufferSize()) > std::numeric_limits<uint32_t>::max()) + return sampleprof_error::too_large; + + return std::move(Buffer); +} + +/// Create a sample profile reader based on the format of the input file. +/// +/// \param Filename The file to open. +/// +/// \param C The LLVM context to use to emit diagnostics. +/// +/// \returns an error code indicating the status of the created reader. +ErrorOr<std::unique_ptr<SampleProfileReader>> +SampleProfileReader::create(const Twine &Filename, LLVMContext &C) { + auto BufferOrError = setupMemoryBuffer(Filename); + if (std::error_code EC = BufferOrError.getError()) + return EC; + return create(BufferOrError.get(), C); +} + +/// Create a sample profile reader based on the format of the input data. +/// +/// \param B The memory buffer to create the reader from (assumes ownership). +/// +/// \param C The LLVM context to use to emit diagnostics. +/// +/// \returns an error code indicating the status of the created reader. +ErrorOr<std::unique_ptr<SampleProfileReader>> +SampleProfileReader::create(std::unique_ptr<MemoryBuffer> &B, LLVMContext &C) { + std::unique_ptr<SampleProfileReader> Reader; + if (SampleProfileReaderRawBinary::hasFormat(*B)) + Reader.reset(new SampleProfileReaderRawBinary(std::move(B), C)); + else if (SampleProfileReaderCompactBinary::hasFormat(*B)) + Reader.reset(new SampleProfileReaderCompactBinary(std::move(B), C)); + else if (SampleProfileReaderGCC::hasFormat(*B)) + Reader.reset(new SampleProfileReaderGCC(std::move(B), C)); + else if (SampleProfileReaderText::hasFormat(*B)) + Reader.reset(new SampleProfileReaderText(std::move(B), C)); + else + return sampleprof_error::unrecognized_format; + + if (std::error_code EC = Reader->readHeader()) + return EC; + + return std::move(Reader); +} + +// For text and GCC file formats, we compute the summary after reading the +// profile. Binary format has the profile summary in its header. +void SampleProfileReader::computeSummary() { + SampleProfileSummaryBuilder Builder(ProfileSummaryBuilder::DefaultCutoffs); + for (const auto &I : Profiles) { + const FunctionSamples &Profile = I.second; + Builder.addRecord(Profile); + } + Summary = Builder.getSummary(); +} diff --git a/contrib/llvm/lib/ProfileData/SampleProfWriter.cpp b/contrib/llvm/lib/ProfileData/SampleProfWriter.cpp new file mode 100644 index 000000000000..b4de30118b8b --- /dev/null +++ b/contrib/llvm/lib/ProfileData/SampleProfWriter.cpp @@ -0,0 +1,345 @@ +//===- SampleProfWriter.cpp - Write LLVM sample profile data --------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements the class that writes LLVM sample profiles. It +// supports two file formats: text and binary. The textual representation +// is useful for debugging and testing purposes. The binary representation +// is more compact, resulting in smaller file sizes. However, they can +// both be used interchangeably. +// +// See lib/ProfileData/SampleProfReader.cpp for documentation on each of the +// supported formats. +// +//===----------------------------------------------------------------------===// + +#include "llvm/ProfileData/SampleProfWriter.h" +#include "llvm/ADT/StringRef.h" +#include "llvm/ProfileData/ProfileCommon.h" +#include "llvm/ProfileData/SampleProf.h" +#include "llvm/Support/ErrorOr.h" +#include "llvm/Support/FileSystem.h" +#include "llvm/Support/LEB128.h" +#include "llvm/Support/MD5.h" +#include "llvm/Support/raw_ostream.h" +#include <algorithm> +#include <cstdint> +#include <memory> +#include <set> +#include <system_error> +#include <utility> +#include <vector> + +using namespace llvm; +using namespace sampleprof; + +std::error_code +SampleProfileWriter::write(const StringMap<FunctionSamples> &ProfileMap) { + if (std::error_code EC = writeHeader(ProfileMap)) + return EC; + + // Sort the ProfileMap by total samples. + typedef std::pair<StringRef, const FunctionSamples *> NameFunctionSamples; + std::vector<NameFunctionSamples> V; + for (const auto &I : ProfileMap) + V.push_back(std::make_pair(I.getKey(), &I.second)); + + std::stable_sort( + V.begin(), V.end(), + [](const NameFunctionSamples &A, const NameFunctionSamples &B) { + if (A.second->getTotalSamples() == B.second->getTotalSamples()) + return A.first > B.first; + return A.second->getTotalSamples() > B.second->getTotalSamples(); + }); + + for (const auto &I : V) { + if (std::error_code EC = write(*I.second)) + return EC; + } + return sampleprof_error::success; +} + +/// Write samples to a text file. +/// +/// Note: it may be tempting to implement this in terms of +/// FunctionSamples::print(). Please don't. The dump functionality is intended +/// for debugging and has no specified form. +/// +/// The format used here is more structured and deliberate because +/// it needs to be parsed by the SampleProfileReaderText class. +std::error_code SampleProfileWriterText::write(const FunctionSamples &S) { + auto &OS = *OutputStream; + OS << S.getName() << ":" << S.getTotalSamples(); + if (Indent == 0) + OS << ":" << S.getHeadSamples(); + OS << "\n"; + + SampleSorter<LineLocation, SampleRecord> SortedSamples(S.getBodySamples()); + for (const auto &I : SortedSamples.get()) { + LineLocation Loc = I->first; + const SampleRecord &Sample = I->second; + OS.indent(Indent + 1); + if (Loc.Discriminator == 0) + OS << Loc.LineOffset << ": "; + else + OS << Loc.LineOffset << "." << Loc.Discriminator << ": "; + + OS << Sample.getSamples(); + + for (const auto &J : Sample.getCallTargets()) + OS << " " << J.first() << ":" << J.second; + OS << "\n"; + } + + SampleSorter<LineLocation, FunctionSamplesMap> SortedCallsiteSamples( + S.getCallsiteSamples()); + Indent += 1; + for (const auto &I : SortedCallsiteSamples.get()) + for (const auto &FS : I->second) { + LineLocation Loc = I->first; + const FunctionSamples &CalleeSamples = FS.second; + OS.indent(Indent); + if (Loc.Discriminator == 0) + OS << Loc.LineOffset << ": "; + else + OS << Loc.LineOffset << "." << Loc.Discriminator << ": "; + if (std::error_code EC = write(CalleeSamples)) + return EC; + } + Indent -= 1; + + return sampleprof_error::success; +} + +std::error_code SampleProfileWriterBinary::writeNameIdx(StringRef FName) { + const auto &ret = NameTable.find(FName); + if (ret == NameTable.end()) + return sampleprof_error::truncated_name_table; + encodeULEB128(ret->second, *OutputStream); + return sampleprof_error::success; +} + +void SampleProfileWriterBinary::addName(StringRef FName) { + NameTable.insert(std::make_pair(FName, 0)); +} + +void SampleProfileWriterBinary::addNames(const FunctionSamples &S) { + // Add all the names in indirect call targets. + for (const auto &I : S.getBodySamples()) { + const SampleRecord &Sample = I.second; + for (const auto &J : Sample.getCallTargets()) + addName(J.first()); + } + + // Recursively add all the names for inlined callsites. + for (const auto &J : S.getCallsiteSamples()) + for (const auto &FS : J.second) { + const FunctionSamples &CalleeSamples = FS.second; + addName(CalleeSamples.getName()); + addNames(CalleeSamples); + } +} + +void SampleProfileWriterBinary::stablizeNameTable(std::set<StringRef> &V) { + // Sort the names to make NameTable deterministic. + for (const auto &I : NameTable) + V.insert(I.first); + int i = 0; + for (const StringRef &N : V) + NameTable[N] = i++; +} + +std::error_code SampleProfileWriterRawBinary::writeNameTable() { + auto &OS = *OutputStream; + std::set<StringRef> V; + stablizeNameTable(V); + + // Write out the name table. + encodeULEB128(NameTable.size(), OS); + for (auto N : V) { + OS << N; + encodeULEB128(0, OS); + } + return sampleprof_error::success; +} + +std::error_code SampleProfileWriterCompactBinary::writeNameTable() { + auto &OS = *OutputStream; + std::set<StringRef> V; + stablizeNameTable(V); + + // Write out the name table. + encodeULEB128(NameTable.size(), OS); + for (auto N : V) { + encodeULEB128(MD5Hash(N), OS); + } + return sampleprof_error::success; +} + +std::error_code SampleProfileWriterRawBinary::writeMagicIdent() { + auto &OS = *OutputStream; + // Write file magic identifier. + encodeULEB128(SPMagic(), OS); + encodeULEB128(SPVersion(), OS); + return sampleprof_error::success; +} + +std::error_code SampleProfileWriterCompactBinary::writeMagicIdent() { + auto &OS = *OutputStream; + // Write file magic identifier. + encodeULEB128(SPMagic(SPF_Compact_Binary), OS); + encodeULEB128(SPVersion(), OS); + return sampleprof_error::success; +} + +std::error_code SampleProfileWriterBinary::writeHeader( + const StringMap<FunctionSamples> &ProfileMap) { + writeMagicIdent(); + + computeSummary(ProfileMap); + if (auto EC = writeSummary()) + return EC; + + // Generate the name table for all the functions referenced in the profile. + for (const auto &I : ProfileMap) { + addName(I.first()); + addNames(I.second); + } + + writeNameTable(); + return sampleprof_error::success; +} + +std::error_code SampleProfileWriterBinary::writeSummary() { + auto &OS = *OutputStream; + encodeULEB128(Summary->getTotalCount(), OS); + encodeULEB128(Summary->getMaxCount(), OS); + encodeULEB128(Summary->getMaxFunctionCount(), OS); + encodeULEB128(Summary->getNumCounts(), OS); + encodeULEB128(Summary->getNumFunctions(), OS); + std::vector<ProfileSummaryEntry> &Entries = Summary->getDetailedSummary(); + encodeULEB128(Entries.size(), OS); + for (auto Entry : Entries) { + encodeULEB128(Entry.Cutoff, OS); + encodeULEB128(Entry.MinCount, OS); + encodeULEB128(Entry.NumCounts, OS); + } + return sampleprof_error::success; +} +std::error_code SampleProfileWriterBinary::writeBody(const FunctionSamples &S) { + auto &OS = *OutputStream; + + if (std::error_code EC = writeNameIdx(S.getName())) + return EC; + + encodeULEB128(S.getTotalSamples(), OS); + + // Emit all the body samples. + encodeULEB128(S.getBodySamples().size(), OS); + for (const auto &I : S.getBodySamples()) { + LineLocation Loc = I.first; + const SampleRecord &Sample = I.second; + encodeULEB128(Loc.LineOffset, OS); + encodeULEB128(Loc.Discriminator, OS); + encodeULEB128(Sample.getSamples(), OS); + encodeULEB128(Sample.getCallTargets().size(), OS); + for (const auto &J : Sample.getCallTargets()) { + StringRef Callee = J.first(); + uint64_t CalleeSamples = J.second; + if (std::error_code EC = writeNameIdx(Callee)) + return EC; + encodeULEB128(CalleeSamples, OS); + } + } + + // Recursively emit all the callsite samples. + uint64_t NumCallsites = 0; + for (const auto &J : S.getCallsiteSamples()) + NumCallsites += J.second.size(); + encodeULEB128(NumCallsites, OS); + for (const auto &J : S.getCallsiteSamples()) + for (const auto &FS : J.second) { + LineLocation Loc = J.first; + const FunctionSamples &CalleeSamples = FS.second; + encodeULEB128(Loc.LineOffset, OS); + encodeULEB128(Loc.Discriminator, OS); + if (std::error_code EC = writeBody(CalleeSamples)) + return EC; + } + + return sampleprof_error::success; +} + +/// Write samples of a top-level function to a binary file. +/// +/// \returns true if the samples were written successfully, false otherwise. +std::error_code SampleProfileWriterBinary::write(const FunctionSamples &S) { + encodeULEB128(S.getHeadSamples(), *OutputStream); + return writeBody(S); +} + +/// Create a sample profile file writer based on the specified format. +/// +/// \param Filename The file to create. +/// +/// \param Format Encoding format for the profile file. +/// +/// \returns an error code indicating the status of the created writer. +ErrorOr<std::unique_ptr<SampleProfileWriter>> +SampleProfileWriter::create(StringRef Filename, SampleProfileFormat Format) { + std::error_code EC; + std::unique_ptr<raw_ostream> OS; + if (Format == SPF_Binary || Format == SPF_Compact_Binary) + OS.reset(new raw_fd_ostream(Filename, EC, sys::fs::F_None)); + else + OS.reset(new raw_fd_ostream(Filename, EC, sys::fs::F_Text)); + if (EC) + return EC; + + return create(OS, Format); +} + +/// Create a sample profile stream writer based on the specified format. +/// +/// \param OS The output stream to store the profile data to. +/// +/// \param Format Encoding format for the profile file. +/// +/// \returns an error code indicating the status of the created writer. +ErrorOr<std::unique_ptr<SampleProfileWriter>> +SampleProfileWriter::create(std::unique_ptr<raw_ostream> &OS, + SampleProfileFormat Format) { + std::error_code EC; + std::unique_ptr<SampleProfileWriter> Writer; + + if (Format == SPF_Binary) + Writer.reset(new SampleProfileWriterRawBinary(OS)); + else if (Format == SPF_Compact_Binary) + Writer.reset(new SampleProfileWriterCompactBinary(OS)); + else if (Format == SPF_Text) + Writer.reset(new SampleProfileWriterText(OS)); + else if (Format == SPF_GCC) + EC = sampleprof_error::unsupported_writing_format; + else + EC = sampleprof_error::unrecognized_format; + + if (EC) + return EC; + + return std::move(Writer); +} + +void SampleProfileWriter::computeSummary( + const StringMap<FunctionSamples> &ProfileMap) { + SampleProfileSummaryBuilder Builder(ProfileSummaryBuilder::DefaultCutoffs); + for (const auto &I : ProfileMap) { + const FunctionSamples &Profile = I.second; + Builder.addRecord(Profile); + } + Summary = Builder.getSummary(); +} |
