diff options
Diffstat (limited to 'contrib/llvm/lib/Bitcode')
| -rw-r--r-- | contrib/llvm/lib/Bitcode/Reader/BitReader.cpp | 134 | ||||
| -rw-r--r-- | contrib/llvm/lib/Bitcode/Reader/BitcodeReader.cpp | 5958 | ||||
| -rw-r--r-- | contrib/llvm/lib/Bitcode/Reader/BitstreamReader.cpp | 390 | ||||
| -rw-r--r-- | contrib/llvm/lib/Bitcode/Reader/MetadataLoader.cpp | 2015 | ||||
| -rw-r--r-- | contrib/llvm/lib/Bitcode/Reader/MetadataLoader.h | 88 | ||||
| -rw-r--r-- | contrib/llvm/lib/Bitcode/Reader/ValueList.cpp | 216 | ||||
| -rw-r--r-- | contrib/llvm/lib/Bitcode/Reader/ValueList.h | 86 | ||||
| -rw-r--r-- | contrib/llvm/lib/Bitcode/Writer/BitWriter.cpp | 50 | ||||
| -rw-r--r-- | contrib/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp | 4384 | ||||
| -rw-r--r-- | contrib/llvm/lib/Bitcode/Writer/BitcodeWriterPass.cpp | 86 | ||||
| -rw-r--r-- | contrib/llvm/lib/Bitcode/Writer/ValueEnumerator.cpp | 1041 | ||||
| -rw-r--r-- | contrib/llvm/lib/Bitcode/Writer/ValueEnumerator.h | 304 | 
12 files changed, 14752 insertions, 0 deletions
| diff --git a/contrib/llvm/lib/Bitcode/Reader/BitReader.cpp b/contrib/llvm/lib/Bitcode/Reader/BitReader.cpp new file mode 100644 index 000000000000..3ec45956b3e5 --- /dev/null +++ b/contrib/llvm/lib/Bitcode/Reader/BitReader.cpp @@ -0,0 +1,134 @@ +//===-- BitReader.cpp -----------------------------------------------------===// +// +//                     The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +#include "llvm-c/BitReader.h" +#include "llvm-c/Core.h" +#include "llvm/Bitcode/BitcodeReader.h" +#include "llvm/IR/LLVMContext.h" +#include "llvm/IR/Module.h" +#include "llvm/Support/MemoryBuffer.h" +#include "llvm/Support/raw_ostream.h" +#include <cstring> +#include <string> + +using namespace llvm; + +/* Builds a module from the bitcode in the specified memory buffer, returning a +   reference to the module via the OutModule parameter. Returns 0 on success. +   Optionally returns a human-readable error message via OutMessage. */ +LLVMBool LLVMParseBitcode(LLVMMemoryBufferRef MemBuf, LLVMModuleRef *OutModule, +                          char **OutMessage) { +  return LLVMParseBitcodeInContext(LLVMGetGlobalContext(), MemBuf, OutModule, +                                   OutMessage); +} + +LLVMBool LLVMParseBitcode2(LLVMMemoryBufferRef MemBuf, +                           LLVMModuleRef *OutModule) { +  return LLVMParseBitcodeInContext2(LLVMGetGlobalContext(), MemBuf, OutModule); +} + +LLVMBool LLVMParseBitcodeInContext(LLVMContextRef ContextRef, +                                   LLVMMemoryBufferRef MemBuf, +                                   LLVMModuleRef *OutModule, +                                   char **OutMessage) { +  MemoryBufferRef Buf = unwrap(MemBuf)->getMemBufferRef(); +  LLVMContext &Ctx = *unwrap(ContextRef); + +  Expected<std::unique_ptr<Module>> ModuleOrErr = parseBitcodeFile(Buf, Ctx); +  if (Error Err = ModuleOrErr.takeError()) { +    std::string Message; +    handleAllErrors(std::move(Err), [&](ErrorInfoBase &EIB) { +      Message = EIB.message(); +    }); +    if (OutMessage) +      *OutMessage = strdup(Message.c_str()); +    *OutModule = wrap((Module *)nullptr); +    return 1; +  } + +  *OutModule = wrap(ModuleOrErr.get().release()); +  return 0; +} + +LLVMBool LLVMParseBitcodeInContext2(LLVMContextRef ContextRef, +                                    LLVMMemoryBufferRef MemBuf, +                                    LLVMModuleRef *OutModule) { +  MemoryBufferRef Buf = unwrap(MemBuf)->getMemBufferRef(); +  LLVMContext &Ctx = *unwrap(ContextRef); + +  ErrorOr<std::unique_ptr<Module>> ModuleOrErr = +      expectedToErrorOrAndEmitErrors(Ctx, parseBitcodeFile(Buf, Ctx)); +  if (ModuleOrErr.getError()) { +    *OutModule = wrap((Module *)nullptr); +    return 1; +  } + +  *OutModule = wrap(ModuleOrErr.get().release()); +  return 0; +} + +/* Reads a module from the specified path, returning via the OutModule parameter +   a module provider which performs lazy deserialization. Returns 0 on success. +   Optionally returns a human-readable error message via OutMessage. */ +LLVMBool LLVMGetBitcodeModuleInContext(LLVMContextRef ContextRef, +                                       LLVMMemoryBufferRef MemBuf, +                                       LLVMModuleRef *OutM, char **OutMessage) { +  LLVMContext &Ctx = *unwrap(ContextRef); +  std::unique_ptr<MemoryBuffer> Owner(unwrap(MemBuf)); +  Expected<std::unique_ptr<Module>> ModuleOrErr = +      getOwningLazyBitcodeModule(std::move(Owner), Ctx); +  // Release the buffer if we didn't take ownership of it since we never owned +  // it anyway. +  (void)Owner.release(); + +  if (Error Err = ModuleOrErr.takeError()) { +    std::string Message; +    handleAllErrors(std::move(Err), [&](ErrorInfoBase &EIB) { +      Message = EIB.message(); +    }); +    if (OutMessage) +      *OutMessage = strdup(Message.c_str()); +    *OutM = wrap((Module *)nullptr); +    return 1; +  } + +  *OutM = wrap(ModuleOrErr.get().release()); + +  return 0; +} + +LLVMBool LLVMGetBitcodeModuleInContext2(LLVMContextRef ContextRef, +                                        LLVMMemoryBufferRef MemBuf, +                                        LLVMModuleRef *OutM) { +  LLVMContext &Ctx = *unwrap(ContextRef); +  std::unique_ptr<MemoryBuffer> Owner(unwrap(MemBuf)); + +  ErrorOr<std::unique_ptr<Module>> ModuleOrErr = expectedToErrorOrAndEmitErrors( +      Ctx, getOwningLazyBitcodeModule(std::move(Owner), Ctx)); +  Owner.release(); + +  if (ModuleOrErr.getError()) { +    *OutM = wrap((Module *)nullptr); +    return 1; +  } + +  *OutM = wrap(ModuleOrErr.get().release()); +  return 0; +} + +LLVMBool LLVMGetBitcodeModule(LLVMMemoryBufferRef MemBuf, LLVMModuleRef *OutM, +                              char **OutMessage) { +  return LLVMGetBitcodeModuleInContext(LLVMGetGlobalContext(), MemBuf, OutM, +                                       OutMessage); +} + +LLVMBool LLVMGetBitcodeModule2(LLVMMemoryBufferRef MemBuf, +                               LLVMModuleRef *OutM) { +  return LLVMGetBitcodeModuleInContext2(LLVMGetGlobalContext(), MemBuf, OutM); +} diff --git a/contrib/llvm/lib/Bitcode/Reader/BitcodeReader.cpp b/contrib/llvm/lib/Bitcode/Reader/BitcodeReader.cpp new file mode 100644 index 000000000000..c45b441238bc --- /dev/null +++ b/contrib/llvm/lib/Bitcode/Reader/BitcodeReader.cpp @@ -0,0 +1,5958 @@ +//===- BitcodeReader.cpp - Internal BitcodeReader implementation ----------===// +// +//                     The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +#include "llvm/Bitcode/BitcodeReader.h" +#include "MetadataLoader.h" +#include "ValueList.h" +#include "llvm/ADT/APFloat.h" +#include "llvm/ADT/APInt.h" +#include "llvm/ADT/ArrayRef.h" +#include "llvm/ADT/DenseMap.h" +#include "llvm/ADT/Optional.h" +#include "llvm/ADT/STLExtras.h" +#include "llvm/ADT/SmallString.h" +#include "llvm/ADT/SmallVector.h" +#include "llvm/ADT/StringRef.h" +#include "llvm/ADT/Triple.h" +#include "llvm/ADT/Twine.h" +#include "llvm/Bitcode/BitstreamReader.h" +#include "llvm/Bitcode/LLVMBitCodes.h" +#include "llvm/Config/llvm-config.h" +#include "llvm/IR/Argument.h" +#include "llvm/IR/Attributes.h" +#include "llvm/IR/AutoUpgrade.h" +#include "llvm/IR/BasicBlock.h" +#include "llvm/IR/CallSite.h" +#include "llvm/IR/CallingConv.h" +#include "llvm/IR/Comdat.h" +#include "llvm/IR/Constant.h" +#include "llvm/IR/Constants.h" +#include "llvm/IR/DataLayout.h" +#include "llvm/IR/DebugInfo.h" +#include "llvm/IR/DebugInfoMetadata.h" +#include "llvm/IR/DebugLoc.h" +#include "llvm/IR/DerivedTypes.h" +#include "llvm/IR/Function.h" +#include "llvm/IR/GVMaterializer.h" +#include "llvm/IR/GlobalAlias.h" +#include "llvm/IR/GlobalIFunc.h" +#include "llvm/IR/GlobalIndirectSymbol.h" +#include "llvm/IR/GlobalObject.h" +#include "llvm/IR/GlobalValue.h" +#include "llvm/IR/GlobalVariable.h" +#include "llvm/IR/InlineAsm.h" +#include "llvm/IR/InstIterator.h" +#include "llvm/IR/InstrTypes.h" +#include "llvm/IR/Instruction.h" +#include "llvm/IR/Instructions.h" +#include "llvm/IR/Intrinsics.h" +#include "llvm/IR/LLVMContext.h" +#include "llvm/IR/Metadata.h" +#include "llvm/IR/Module.h" +#include "llvm/IR/ModuleSummaryIndex.h" +#include "llvm/IR/Operator.h" +#include "llvm/IR/Type.h" +#include "llvm/IR/Value.h" +#include "llvm/IR/Verifier.h" +#include "llvm/Support/AtomicOrdering.h" +#include "llvm/Support/Casting.h" +#include "llvm/Support/CommandLine.h" +#include "llvm/Support/Compiler.h" +#include "llvm/Support/Debug.h" +#include "llvm/Support/Error.h" +#include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/ErrorOr.h" +#include "llvm/Support/ManagedStatic.h" +#include "llvm/Support/MathExtras.h" +#include "llvm/Support/MemoryBuffer.h" +#include "llvm/Support/raw_ostream.h" +#include <algorithm> +#include <cassert> +#include <cstddef> +#include <cstdint> +#include <deque> +#include <map> +#include <memory> +#include <set> +#include <string> +#include <system_error> +#include <tuple> +#include <utility> +#include <vector> + +using namespace llvm; + +static cl::opt<bool> PrintSummaryGUIDs( +    "print-summary-global-ids", cl::init(false), cl::Hidden, +    cl::desc( +        "Print the global id for each value when reading the module summary")); + +namespace { + +enum { +  SWITCH_INST_MAGIC = 0x4B5 // May 2012 => 1205 => Hex +}; + +} // end anonymous namespace + +static Error error(const Twine &Message) { +  return make_error<StringError>( +      Message, make_error_code(BitcodeError::CorruptedBitcode)); +} + +/// Helper to read the header common to all bitcode files. +static bool hasValidBitcodeHeader(BitstreamCursor &Stream) { +  // Sniff for the signature. +  if (!Stream.canSkipToPos(4) || +      Stream.Read(8) != 'B' || +      Stream.Read(8) != 'C' || +      Stream.Read(4) != 0x0 || +      Stream.Read(4) != 0xC || +      Stream.Read(4) != 0xE || +      Stream.Read(4) != 0xD) +    return false; +  return true; +} + +static Expected<BitstreamCursor> initStream(MemoryBufferRef Buffer) { +  const unsigned char *BufPtr = (const unsigned char *)Buffer.getBufferStart(); +  const unsigned char *BufEnd = BufPtr + Buffer.getBufferSize(); + +  if (Buffer.getBufferSize() & 3) +    return error("Invalid bitcode signature"); + +  // If we have a wrapper header, parse it and ignore the non-bc file contents. +  // The magic number is 0x0B17C0DE stored in little endian. +  if (isBitcodeWrapper(BufPtr, BufEnd)) +    if (SkipBitcodeWrapperHeader(BufPtr, BufEnd, true)) +      return error("Invalid bitcode wrapper header"); + +  BitstreamCursor Stream(ArrayRef<uint8_t>(BufPtr, BufEnd)); +  if (!hasValidBitcodeHeader(Stream)) +    return error("Invalid bitcode signature"); + +  return std::move(Stream); +} + +/// Convert a string from a record into an std::string, return true on failure. +template <typename StrTy> +static bool convertToString(ArrayRef<uint64_t> Record, unsigned Idx, +                            StrTy &Result) { +  if (Idx > Record.size()) +    return true; + +  for (unsigned i = Idx, e = Record.size(); i != e; ++i) +    Result += (char)Record[i]; +  return false; +} + +// Strip all the TBAA attachment for the module. +static void stripTBAA(Module *M) { +  for (auto &F : *M) { +    if (F.isMaterializable()) +      continue; +    for (auto &I : instructions(F)) +      I.setMetadata(LLVMContext::MD_tbaa, nullptr); +  } +} + +/// Read the "IDENTIFICATION_BLOCK_ID" block, do some basic enforcement on the +/// "epoch" encoded in the bitcode, and return the producer name if any. +static Expected<std::string> readIdentificationBlock(BitstreamCursor &Stream) { +  if (Stream.EnterSubBlock(bitc::IDENTIFICATION_BLOCK_ID)) +    return error("Invalid record"); + +  // Read all the records. +  SmallVector<uint64_t, 64> Record; + +  std::string ProducerIdentification; + +  while (true) { +    BitstreamEntry Entry = Stream.advance(); + +    switch (Entry.Kind) { +    default: +    case BitstreamEntry::Error: +      return error("Malformed block"); +    case BitstreamEntry::EndBlock: +      return ProducerIdentification; +    case BitstreamEntry::Record: +      // The interesting case. +      break; +    } + +    // Read a record. +    Record.clear(); +    unsigned BitCode = Stream.readRecord(Entry.ID, Record); +    switch (BitCode) { +    default: // Default behavior: reject +      return error("Invalid value"); +    case bitc::IDENTIFICATION_CODE_STRING: // IDENTIFICATION: [strchr x N] +      convertToString(Record, 0, ProducerIdentification); +      break; +    case bitc::IDENTIFICATION_CODE_EPOCH: { // EPOCH: [epoch#] +      unsigned epoch = (unsigned)Record[0]; +      if (epoch != bitc::BITCODE_CURRENT_EPOCH) { +        return error( +          Twine("Incompatible epoch: Bitcode '") + Twine(epoch) + +          "' vs current: '" + Twine(bitc::BITCODE_CURRENT_EPOCH) + "'"); +      } +    } +    } +  } +} + +static Expected<std::string> readIdentificationCode(BitstreamCursor &Stream) { +  // We expect a number of well-defined blocks, though we don't necessarily +  // need to understand them all. +  while (true) { +    if (Stream.AtEndOfStream()) +      return ""; + +    BitstreamEntry Entry = Stream.advance(); +    switch (Entry.Kind) { +    case BitstreamEntry::EndBlock: +    case BitstreamEntry::Error: +      return error("Malformed block"); + +    case BitstreamEntry::SubBlock: +      if (Entry.ID == bitc::IDENTIFICATION_BLOCK_ID) +        return readIdentificationBlock(Stream); + +      // Ignore other sub-blocks. +      if (Stream.SkipBlock()) +        return error("Malformed block"); +      continue; +    case BitstreamEntry::Record: +      Stream.skipRecord(Entry.ID); +      continue; +    } +  } +} + +static Expected<bool> hasObjCCategoryInModule(BitstreamCursor &Stream) { +  if (Stream.EnterSubBlock(bitc::MODULE_BLOCK_ID)) +    return error("Invalid record"); + +  SmallVector<uint64_t, 64> Record; +  // Read all the records for this module. + +  while (true) { +    BitstreamEntry Entry = Stream.advanceSkippingSubblocks(); + +    switch (Entry.Kind) { +    case BitstreamEntry::SubBlock: // Handled for us already. +    case BitstreamEntry::Error: +      return error("Malformed block"); +    case BitstreamEntry::EndBlock: +      return false; +    case BitstreamEntry::Record: +      // The interesting case. +      break; +    } + +    // Read a record. +    switch (Stream.readRecord(Entry.ID, Record)) { +    default: +      break; // Default behavior, ignore unknown content. +    case bitc::MODULE_CODE_SECTIONNAME: { // SECTIONNAME: [strchr x N] +      std::string S; +      if (convertToString(Record, 0, S)) +        return error("Invalid record"); +      // Check for the i386 and other (x86_64, ARM) conventions +      if (S.find("__DATA,__objc_catlist") != std::string::npos || +          S.find("__OBJC,__category") != std::string::npos) +        return true; +      break; +    } +    } +    Record.clear(); +  } +  llvm_unreachable("Exit infinite loop"); +} + +static Expected<bool> hasObjCCategory(BitstreamCursor &Stream) { +  // We expect a number of well-defined blocks, though we don't necessarily +  // need to understand them all. +  while (true) { +    BitstreamEntry Entry = Stream.advance(); + +    switch (Entry.Kind) { +    case BitstreamEntry::Error: +      return error("Malformed block"); +    case BitstreamEntry::EndBlock: +      return false; + +    case BitstreamEntry::SubBlock: +      if (Entry.ID == bitc::MODULE_BLOCK_ID) +        return hasObjCCategoryInModule(Stream); + +      // Ignore other sub-blocks. +      if (Stream.SkipBlock()) +        return error("Malformed block"); +      continue; + +    case BitstreamEntry::Record: +      Stream.skipRecord(Entry.ID); +      continue; +    } +  } +} + +static Expected<std::string> readModuleTriple(BitstreamCursor &Stream) { +  if (Stream.EnterSubBlock(bitc::MODULE_BLOCK_ID)) +    return error("Invalid record"); + +  SmallVector<uint64_t, 64> Record; + +  std::string Triple; + +  // Read all the records for this module. +  while (true) { +    BitstreamEntry Entry = Stream.advanceSkippingSubblocks(); + +    switch (Entry.Kind) { +    case BitstreamEntry::SubBlock: // Handled for us already. +    case BitstreamEntry::Error: +      return error("Malformed block"); +    case BitstreamEntry::EndBlock: +      return Triple; +    case BitstreamEntry::Record: +      // The interesting case. +      break; +    } + +    // Read a record. +    switch (Stream.readRecord(Entry.ID, Record)) { +    default: break;  // Default behavior, ignore unknown content. +    case bitc::MODULE_CODE_TRIPLE: {  // TRIPLE: [strchr x N] +      std::string S; +      if (convertToString(Record, 0, S)) +        return error("Invalid record"); +      Triple = S; +      break; +    } +    } +    Record.clear(); +  } +  llvm_unreachable("Exit infinite loop"); +} + +static Expected<std::string> readTriple(BitstreamCursor &Stream) { +  // We expect a number of well-defined blocks, though we don't necessarily +  // need to understand them all. +  while (true) { +    BitstreamEntry Entry = Stream.advance(); + +    switch (Entry.Kind) { +    case BitstreamEntry::Error: +      return error("Malformed block"); +    case BitstreamEntry::EndBlock: +      return ""; + +    case BitstreamEntry::SubBlock: +      if (Entry.ID == bitc::MODULE_BLOCK_ID) +        return readModuleTriple(Stream); + +      // Ignore other sub-blocks. +      if (Stream.SkipBlock()) +        return error("Malformed block"); +      continue; + +    case BitstreamEntry::Record: +      Stream.skipRecord(Entry.ID); +      continue; +    } +  } +} + +namespace { + +class BitcodeReaderBase { +protected: +  BitcodeReaderBase(BitstreamCursor Stream, StringRef Strtab) +      : Stream(std::move(Stream)), Strtab(Strtab) { +    this->Stream.setBlockInfo(&BlockInfo); +  } + +  BitstreamBlockInfo BlockInfo; +  BitstreamCursor Stream; +  StringRef Strtab; + +  /// In version 2 of the bitcode we store names of global values and comdats in +  /// a string table rather than in the VST. +  bool UseStrtab = false; + +  Expected<unsigned> parseVersionRecord(ArrayRef<uint64_t> Record); + +  /// If this module uses a string table, pop the reference to the string table +  /// and return the referenced string and the rest of the record. Otherwise +  /// just return the record itself. +  std::pair<StringRef, ArrayRef<uint64_t>> +  readNameFromStrtab(ArrayRef<uint64_t> Record); + +  bool readBlockInfo(); + +  // Contains an arbitrary and optional string identifying the bitcode producer +  std::string ProducerIdentification; + +  Error error(const Twine &Message); +}; + +} // end anonymous namespace + +Error BitcodeReaderBase::error(const Twine &Message) { +  std::string FullMsg = Message.str(); +  if (!ProducerIdentification.empty()) +    FullMsg += " (Producer: '" + ProducerIdentification + "' Reader: 'LLVM " + +               LLVM_VERSION_STRING "')"; +  return ::error(FullMsg); +} + +Expected<unsigned> +BitcodeReaderBase::parseVersionRecord(ArrayRef<uint64_t> Record) { +  if (Record.empty()) +    return error("Invalid record"); +  unsigned ModuleVersion = Record[0]; +  if (ModuleVersion > 2) +    return error("Invalid value"); +  UseStrtab = ModuleVersion >= 2; +  return ModuleVersion; +} + +std::pair<StringRef, ArrayRef<uint64_t>> +BitcodeReaderBase::readNameFromStrtab(ArrayRef<uint64_t> Record) { +  if (!UseStrtab) +    return {"", Record}; +  // Invalid reference. Let the caller complain about the record being empty. +  if (Record[0] + Record[1] > Strtab.size()) +    return {"", {}}; +  return {StringRef(Strtab.data() + Record[0], Record[1]), Record.slice(2)}; +} + +namespace { + +class BitcodeReader : public BitcodeReaderBase, public GVMaterializer { +  LLVMContext &Context; +  Module *TheModule = nullptr; +  // Next offset to start scanning for lazy parsing of function bodies. +  uint64_t NextUnreadBit = 0; +  // Last function offset found in the VST. +  uint64_t LastFunctionBlockBit = 0; +  bool SeenValueSymbolTable = false; +  uint64_t VSTOffset = 0; + +  std::vector<std::string> SectionTable; +  std::vector<std::string> GCTable; + +  std::vector<Type*> TypeList; +  BitcodeReaderValueList ValueList; +  Optional<MetadataLoader> MDLoader; +  std::vector<Comdat *> ComdatList; +  SmallVector<Instruction *, 64> InstructionList; + +  std::vector<std::pair<GlobalVariable *, unsigned>> GlobalInits; +  std::vector<std::pair<GlobalIndirectSymbol *, unsigned>> IndirectSymbolInits; +  std::vector<std::pair<Function *, unsigned>> FunctionPrefixes; +  std::vector<std::pair<Function *, unsigned>> FunctionPrologues; +  std::vector<std::pair<Function *, unsigned>> FunctionPersonalityFns; + +  /// The set of attributes by index.  Index zero in the file is for null, and +  /// is thus not represented here.  As such all indices are off by one. +  std::vector<AttributeList> MAttributes; + +  /// The set of attribute groups. +  std::map<unsigned, AttributeList> MAttributeGroups; + +  /// While parsing a function body, this is a list of the basic blocks for the +  /// function. +  std::vector<BasicBlock*> FunctionBBs; + +  // When reading the module header, this list is populated with functions that +  // have bodies later in the file. +  std::vector<Function*> FunctionsWithBodies; + +  // When intrinsic functions are encountered which require upgrading they are +  // stored here with their replacement function. +  using UpdatedIntrinsicMap = DenseMap<Function *, Function *>; +  UpdatedIntrinsicMap UpgradedIntrinsics; +  // Intrinsics which were remangled because of types rename +  UpdatedIntrinsicMap RemangledIntrinsics; + +  // Several operations happen after the module header has been read, but +  // before function bodies are processed. This keeps track of whether +  // we've done this yet. +  bool SeenFirstFunctionBody = false; + +  /// When function bodies are initially scanned, this map contains info about +  /// where to find deferred function body in the stream. +  DenseMap<Function*, uint64_t> DeferredFunctionInfo; + +  /// When Metadata block is initially scanned when parsing the module, we may +  /// choose to defer parsing of the metadata. This vector contains info about +  /// which Metadata blocks are deferred. +  std::vector<uint64_t> DeferredMetadataInfo; + +  /// These are basic blocks forward-referenced by block addresses.  They are +  /// inserted lazily into functions when they're loaded.  The basic block ID is +  /// its index into the vector. +  DenseMap<Function *, std::vector<BasicBlock *>> BasicBlockFwdRefs; +  std::deque<Function *> BasicBlockFwdRefQueue; + +  /// Indicates that we are using a new encoding for instruction operands where +  /// most operands in the current FUNCTION_BLOCK are encoded relative to the +  /// instruction number, for a more compact encoding.  Some instruction +  /// operands are not relative to the instruction ID: basic block numbers, and +  /// types. Once the old style function blocks have been phased out, we would +  /// not need this flag. +  bool UseRelativeIDs = false; + +  /// True if all functions will be materialized, negating the need to process +  /// (e.g.) blockaddress forward references. +  bool WillMaterializeAllForwardRefs = false; + +  bool StripDebugInfo = false; +  TBAAVerifier TBAAVerifyHelper; + +  std::vector<std::string> BundleTags; +  SmallVector<SyncScope::ID, 8> SSIDs; + +public: +  BitcodeReader(BitstreamCursor Stream, StringRef Strtab, +                StringRef ProducerIdentification, LLVMContext &Context); + +  Error materializeForwardReferencedFunctions(); + +  Error materialize(GlobalValue *GV) override; +  Error materializeModule() override; +  std::vector<StructType *> getIdentifiedStructTypes() const override; + +  /// Main interface to parsing a bitcode buffer. +  /// \returns true if an error occurred. +  Error parseBitcodeInto(Module *M, bool ShouldLazyLoadMetadata = false, +                         bool IsImporting = false); + +  static uint64_t decodeSignRotatedValue(uint64_t V); + +  /// Materialize any deferred Metadata block. +  Error materializeMetadata() override; + +  void setStripDebugInfo() override; + +private: +  std::vector<StructType *> IdentifiedStructTypes; +  StructType *createIdentifiedStructType(LLVMContext &Context, StringRef Name); +  StructType *createIdentifiedStructType(LLVMContext &Context); + +  Type *getTypeByID(unsigned ID); + +  Value *getFnValueByID(unsigned ID, Type *Ty) { +    if (Ty && Ty->isMetadataTy()) +      return MetadataAsValue::get(Ty->getContext(), getFnMetadataByID(ID)); +    return ValueList.getValueFwdRef(ID, Ty); +  } + +  Metadata *getFnMetadataByID(unsigned ID) { +    return MDLoader->getMetadataFwdRefOrLoad(ID); +  } + +  BasicBlock *getBasicBlock(unsigned ID) const { +    if (ID >= FunctionBBs.size()) return nullptr; // Invalid ID +    return FunctionBBs[ID]; +  } + +  AttributeList getAttributes(unsigned i) const { +    if (i-1 < MAttributes.size()) +      return MAttributes[i-1]; +    return AttributeList(); +  } + +  /// Read a value/type pair out of the specified record from slot 'Slot'. +  /// Increment Slot past the number of slots used in the record. Return true on +  /// failure. +  bool getValueTypePair(SmallVectorImpl<uint64_t> &Record, unsigned &Slot, +                        unsigned InstNum, Value *&ResVal) { +    if (Slot == Record.size()) return true; +    unsigned ValNo = (unsigned)Record[Slot++]; +    // Adjust the ValNo, if it was encoded relative to the InstNum. +    if (UseRelativeIDs) +      ValNo = InstNum - ValNo; +    if (ValNo < InstNum) { +      // If this is not a forward reference, just return the value we already +      // have. +      ResVal = getFnValueByID(ValNo, nullptr); +      return ResVal == nullptr; +    } +    if (Slot == Record.size()) +      return true; + +    unsigned TypeNo = (unsigned)Record[Slot++]; +    ResVal = getFnValueByID(ValNo, getTypeByID(TypeNo)); +    return ResVal == nullptr; +  } + +  /// Read a value out of the specified record from slot 'Slot'. Increment Slot +  /// past the number of slots used by the value in the record. Return true if +  /// there is an error. +  bool popValue(SmallVectorImpl<uint64_t> &Record, unsigned &Slot, +                unsigned InstNum, Type *Ty, Value *&ResVal) { +    if (getValue(Record, Slot, InstNum, Ty, ResVal)) +      return true; +    // All values currently take a single record slot. +    ++Slot; +    return false; +  } + +  /// Like popValue, but does not increment the Slot number. +  bool getValue(SmallVectorImpl<uint64_t> &Record, unsigned Slot, +                unsigned InstNum, Type *Ty, Value *&ResVal) { +    ResVal = getValue(Record, Slot, InstNum, Ty); +    return ResVal == nullptr; +  } + +  /// Version of getValue that returns ResVal directly, or 0 if there is an +  /// error. +  Value *getValue(SmallVectorImpl<uint64_t> &Record, unsigned Slot, +                  unsigned InstNum, Type *Ty) { +    if (Slot == Record.size()) return nullptr; +    unsigned ValNo = (unsigned)Record[Slot]; +    // Adjust the ValNo, if it was encoded relative to the InstNum. +    if (UseRelativeIDs) +      ValNo = InstNum - ValNo; +    return getFnValueByID(ValNo, Ty); +  } + +  /// Like getValue, but decodes signed VBRs. +  Value *getValueSigned(SmallVectorImpl<uint64_t> &Record, unsigned Slot, +                        unsigned InstNum, Type *Ty) { +    if (Slot == Record.size()) return nullptr; +    unsigned ValNo = (unsigned)decodeSignRotatedValue(Record[Slot]); +    // Adjust the ValNo, if it was encoded relative to the InstNum. +    if (UseRelativeIDs) +      ValNo = InstNum - ValNo; +    return getFnValueByID(ValNo, Ty); +  } + +  /// Converts alignment exponent (i.e. power of two (or zero)) to the +  /// corresponding alignment to use. If alignment is too large, returns +  /// a corresponding error code. +  Error parseAlignmentValue(uint64_t Exponent, unsigned &Alignment); +  Error parseAttrKind(uint64_t Code, Attribute::AttrKind *Kind); +  Error parseModule(uint64_t ResumeBit, bool ShouldLazyLoadMetadata = false); + +  Error parseComdatRecord(ArrayRef<uint64_t> Record); +  Error parseGlobalVarRecord(ArrayRef<uint64_t> Record); +  Error parseFunctionRecord(ArrayRef<uint64_t> Record); +  Error parseGlobalIndirectSymbolRecord(unsigned BitCode, +                                        ArrayRef<uint64_t> Record); + +  Error parseAttributeBlock(); +  Error parseAttributeGroupBlock(); +  Error parseTypeTable(); +  Error parseTypeTableBody(); +  Error parseOperandBundleTags(); +  Error parseSyncScopeNames(); + +  Expected<Value *> recordValue(SmallVectorImpl<uint64_t> &Record, +                                unsigned NameIndex, Triple &TT); +  void setDeferredFunctionInfo(unsigned FuncBitcodeOffsetDelta, Function *F, +                               ArrayRef<uint64_t> Record); +  Error parseValueSymbolTable(uint64_t Offset = 0); +  Error parseGlobalValueSymbolTable(); +  Error parseConstants(); +  Error rememberAndSkipFunctionBodies(); +  Error rememberAndSkipFunctionBody(); +  /// Save the positions of the Metadata blocks and skip parsing the blocks. +  Error rememberAndSkipMetadata(); +  Error typeCheckLoadStoreInst(Type *ValType, Type *PtrType); +  Error parseFunctionBody(Function *F); +  Error globalCleanup(); +  Error resolveGlobalAndIndirectSymbolInits(); +  Error parseUseLists(); +  Error findFunctionInStream( +      Function *F, +      DenseMap<Function *, uint64_t>::iterator DeferredFunctionInfoIterator); + +  SyncScope::ID getDecodedSyncScopeID(unsigned Val); +}; + +/// Class to manage reading and parsing function summary index bitcode +/// files/sections. +class ModuleSummaryIndexBitcodeReader : public BitcodeReaderBase { +  /// The module index built during parsing. +  ModuleSummaryIndex &TheIndex; + +  /// Indicates whether we have encountered a global value summary section +  /// yet during parsing. +  bool SeenGlobalValSummary = false; + +  /// Indicates whether we have already parsed the VST, used for error checking. +  bool SeenValueSymbolTable = false; + +  /// Set to the offset of the VST recorded in the MODULE_CODE_VSTOFFSET record. +  /// Used to enable on-demand parsing of the VST. +  uint64_t VSTOffset = 0; + +  // Map to save ValueId to ValueInfo association that was recorded in the +  // ValueSymbolTable. It is used after the VST is parsed to convert +  // call graph edges read from the function summary from referencing +  // callees by their ValueId to using the ValueInfo instead, which is how +  // they are recorded in the summary index being built. +  // We save a GUID which refers to the same global as the ValueInfo, but +  // ignoring the linkage, i.e. for values other than local linkage they are +  // identical. +  DenseMap<unsigned, std::pair<ValueInfo, GlobalValue::GUID>> +      ValueIdToValueInfoMap; + +  /// Map populated during module path string table parsing, from the +  /// module ID to a string reference owned by the index's module +  /// path string table, used to correlate with combined index +  /// summary records. +  DenseMap<uint64_t, StringRef> ModuleIdMap; + +  /// Original source file name recorded in a bitcode record. +  std::string SourceFileName; + +  /// The string identifier given to this module by the client, normally the +  /// path to the bitcode file. +  StringRef ModulePath; + +  /// For per-module summary indexes, the unique numerical identifier given to +  /// this module by the client. +  unsigned ModuleId; + +public: +  ModuleSummaryIndexBitcodeReader(BitstreamCursor Stream, StringRef Strtab, +                                  ModuleSummaryIndex &TheIndex, +                                  StringRef ModulePath, unsigned ModuleId); + +  Error parseModule(); + +private: +  void setValueGUID(uint64_t ValueID, StringRef ValueName, +                    GlobalValue::LinkageTypes Linkage, +                    StringRef SourceFileName); +  Error parseValueSymbolTable( +      uint64_t Offset, +      DenseMap<unsigned, GlobalValue::LinkageTypes> &ValueIdToLinkageMap); +  std::vector<ValueInfo> makeRefList(ArrayRef<uint64_t> Record); +  std::vector<FunctionSummary::EdgeTy> makeCallList(ArrayRef<uint64_t> Record, +                                                    bool IsOldProfileFormat, +                                                    bool HasProfile, +                                                    bool HasRelBF); +  Error parseEntireSummary(unsigned ID); +  Error parseModuleStringTable(); + +  std::pair<ValueInfo, GlobalValue::GUID> +  getValueInfoFromValueId(unsigned ValueId); + +  void addThisModule(); +  ModuleSummaryIndex::ModuleInfo *getThisModule(); +}; + +} // end anonymous namespace + +std::error_code llvm::errorToErrorCodeAndEmitErrors(LLVMContext &Ctx, +                                                    Error Err) { +  if (Err) { +    std::error_code EC; +    handleAllErrors(std::move(Err), [&](ErrorInfoBase &EIB) { +      EC = EIB.convertToErrorCode(); +      Ctx.emitError(EIB.message()); +    }); +    return EC; +  } +  return std::error_code(); +} + +BitcodeReader::BitcodeReader(BitstreamCursor Stream, StringRef Strtab, +                             StringRef ProducerIdentification, +                             LLVMContext &Context) +    : BitcodeReaderBase(std::move(Stream), Strtab), Context(Context), +      ValueList(Context) { +  this->ProducerIdentification = ProducerIdentification; +} + +Error BitcodeReader::materializeForwardReferencedFunctions() { +  if (WillMaterializeAllForwardRefs) +    return Error::success(); + +  // Prevent recursion. +  WillMaterializeAllForwardRefs = true; + +  while (!BasicBlockFwdRefQueue.empty()) { +    Function *F = BasicBlockFwdRefQueue.front(); +    BasicBlockFwdRefQueue.pop_front(); +    assert(F && "Expected valid function"); +    if (!BasicBlockFwdRefs.count(F)) +      // Already materialized. +      continue; + +    // Check for a function that isn't materializable to prevent an infinite +    // loop.  When parsing a blockaddress stored in a global variable, there +    // isn't a trivial way to check if a function will have a body without a +    // linear search through FunctionsWithBodies, so just check it here. +    if (!F->isMaterializable()) +      return error("Never resolved function from blockaddress"); + +    // Try to materialize F. +    if (Error Err = materialize(F)) +      return Err; +  } +  assert(BasicBlockFwdRefs.empty() && "Function missing from queue"); + +  // Reset state. +  WillMaterializeAllForwardRefs = false; +  return Error::success(); +} + +//===----------------------------------------------------------------------===// +//  Helper functions to implement forward reference resolution, etc. +//===----------------------------------------------------------------------===// + +static bool hasImplicitComdat(size_t Val) { +  switch (Val) { +  default: +    return false; +  case 1:  // Old WeakAnyLinkage +  case 4:  // Old LinkOnceAnyLinkage +  case 10: // Old WeakODRLinkage +  case 11: // Old LinkOnceODRLinkage +    return true; +  } +} + +static GlobalValue::LinkageTypes getDecodedLinkage(unsigned Val) { +  switch (Val) { +  default: // Map unknown/new linkages to external +  case 0: +    return GlobalValue::ExternalLinkage; +  case 2: +    return GlobalValue::AppendingLinkage; +  case 3: +    return GlobalValue::InternalLinkage; +  case 5: +    return GlobalValue::ExternalLinkage; // Obsolete DLLImportLinkage +  case 6: +    return GlobalValue::ExternalLinkage; // Obsolete DLLExportLinkage +  case 7: +    return GlobalValue::ExternalWeakLinkage; +  case 8: +    return GlobalValue::CommonLinkage; +  case 9: +    return GlobalValue::PrivateLinkage; +  case 12: +    return GlobalValue::AvailableExternallyLinkage; +  case 13: +    return GlobalValue::PrivateLinkage; // Obsolete LinkerPrivateLinkage +  case 14: +    return GlobalValue::PrivateLinkage; // Obsolete LinkerPrivateWeakLinkage +  case 15: +    return GlobalValue::ExternalLinkage; // Obsolete LinkOnceODRAutoHideLinkage +  case 1: // Old value with implicit comdat. +  case 16: +    return GlobalValue::WeakAnyLinkage; +  case 10: // Old value with implicit comdat. +  case 17: +    return GlobalValue::WeakODRLinkage; +  case 4: // Old value with implicit comdat. +  case 18: +    return GlobalValue::LinkOnceAnyLinkage; +  case 11: // Old value with implicit comdat. +  case 19: +    return GlobalValue::LinkOnceODRLinkage; +  } +} + +static FunctionSummary::FFlags getDecodedFFlags(uint64_t RawFlags) { +  FunctionSummary::FFlags Flags; +  Flags.ReadNone = RawFlags & 0x1; +  Flags.ReadOnly = (RawFlags >> 1) & 0x1; +  Flags.NoRecurse = (RawFlags >> 2) & 0x1; +  Flags.ReturnDoesNotAlias = (RawFlags >> 3) & 0x1; +  return Flags; +} + +/// Decode the flags for GlobalValue in the summary. +static GlobalValueSummary::GVFlags getDecodedGVSummaryFlags(uint64_t RawFlags, +                                                            uint64_t Version) { +  // Summary were not emitted before LLVM 3.9, we don't need to upgrade Linkage +  // like getDecodedLinkage() above. Any future change to the linkage enum and +  // to getDecodedLinkage() will need to be taken into account here as above. +  auto Linkage = GlobalValue::LinkageTypes(RawFlags & 0xF); // 4 bits +  RawFlags = RawFlags >> 4; +  bool NotEligibleToImport = (RawFlags & 0x1) || Version < 3; +  // The Live flag wasn't introduced until version 3. For dead stripping +  // to work correctly on earlier versions, we must conservatively treat all +  // values as live. +  bool Live = (RawFlags & 0x2) || Version < 3; +  bool Local = (RawFlags & 0x4); + +  return GlobalValueSummary::GVFlags(Linkage, NotEligibleToImport, Live, Local); +} + +static GlobalValue::VisibilityTypes getDecodedVisibility(unsigned Val) { +  switch (Val) { +  default: // Map unknown visibilities to default. +  case 0: return GlobalValue::DefaultVisibility; +  case 1: return GlobalValue::HiddenVisibility; +  case 2: return GlobalValue::ProtectedVisibility; +  } +} + +static GlobalValue::DLLStorageClassTypes +getDecodedDLLStorageClass(unsigned Val) { +  switch (Val) { +  default: // Map unknown values to default. +  case 0: return GlobalValue::DefaultStorageClass; +  case 1: return GlobalValue::DLLImportStorageClass; +  case 2: return GlobalValue::DLLExportStorageClass; +  } +} + +static bool getDecodedDSOLocal(unsigned Val) { +  switch(Val) { +  default: // Map unknown values to preemptable. +  case 0:  return false; +  case 1:  return true; +  } +} + +static GlobalVariable::ThreadLocalMode getDecodedThreadLocalMode(unsigned Val) { +  switch (Val) { +    case 0: return GlobalVariable::NotThreadLocal; +    default: // Map unknown non-zero value to general dynamic. +    case 1: return GlobalVariable::GeneralDynamicTLSModel; +    case 2: return GlobalVariable::LocalDynamicTLSModel; +    case 3: return GlobalVariable::InitialExecTLSModel; +    case 4: return GlobalVariable::LocalExecTLSModel; +  } +} + +static GlobalVariable::UnnamedAddr getDecodedUnnamedAddrType(unsigned Val) { +  switch (Val) { +    default: // Map unknown to UnnamedAddr::None. +    case 0: return GlobalVariable::UnnamedAddr::None; +    case 1: return GlobalVariable::UnnamedAddr::Global; +    case 2: return GlobalVariable::UnnamedAddr::Local; +  } +} + +static int getDecodedCastOpcode(unsigned Val) { +  switch (Val) { +  default: return -1; +  case bitc::CAST_TRUNC   : return Instruction::Trunc; +  case bitc::CAST_ZEXT    : return Instruction::ZExt; +  case bitc::CAST_SEXT    : return Instruction::SExt; +  case bitc::CAST_FPTOUI  : return Instruction::FPToUI; +  case bitc::CAST_FPTOSI  : return Instruction::FPToSI; +  case bitc::CAST_UITOFP  : return Instruction::UIToFP; +  case bitc::CAST_SITOFP  : return Instruction::SIToFP; +  case bitc::CAST_FPTRUNC : return Instruction::FPTrunc; +  case bitc::CAST_FPEXT   : return Instruction::FPExt; +  case bitc::CAST_PTRTOINT: return Instruction::PtrToInt; +  case bitc::CAST_INTTOPTR: return Instruction::IntToPtr; +  case bitc::CAST_BITCAST : return Instruction::BitCast; +  case bitc::CAST_ADDRSPACECAST: return Instruction::AddrSpaceCast; +  } +} + +static int getDecodedBinaryOpcode(unsigned Val, Type *Ty) { +  bool IsFP = Ty->isFPOrFPVectorTy(); +  // BinOps are only valid for int/fp or vector of int/fp types +  if (!IsFP && !Ty->isIntOrIntVectorTy()) +    return -1; + +  switch (Val) { +  default: +    return -1; +  case bitc::BINOP_ADD: +    return IsFP ? Instruction::FAdd : Instruction::Add; +  case bitc::BINOP_SUB: +    return IsFP ? Instruction::FSub : Instruction::Sub; +  case bitc::BINOP_MUL: +    return IsFP ? Instruction::FMul : Instruction::Mul; +  case bitc::BINOP_UDIV: +    return IsFP ? -1 : Instruction::UDiv; +  case bitc::BINOP_SDIV: +    return IsFP ? Instruction::FDiv : Instruction::SDiv; +  case bitc::BINOP_UREM: +    return IsFP ? -1 : Instruction::URem; +  case bitc::BINOP_SREM: +    return IsFP ? Instruction::FRem : Instruction::SRem; +  case bitc::BINOP_SHL: +    return IsFP ? -1 : Instruction::Shl; +  case bitc::BINOP_LSHR: +    return IsFP ? -1 : Instruction::LShr; +  case bitc::BINOP_ASHR: +    return IsFP ? -1 : Instruction::AShr; +  case bitc::BINOP_AND: +    return IsFP ? -1 : Instruction::And; +  case bitc::BINOP_OR: +    return IsFP ? -1 : Instruction::Or; +  case bitc::BINOP_XOR: +    return IsFP ? -1 : Instruction::Xor; +  } +} + +static AtomicRMWInst::BinOp getDecodedRMWOperation(unsigned Val) { +  switch (Val) { +  default: return AtomicRMWInst::BAD_BINOP; +  case bitc::RMW_XCHG: return AtomicRMWInst::Xchg; +  case bitc::RMW_ADD: return AtomicRMWInst::Add; +  case bitc::RMW_SUB: return AtomicRMWInst::Sub; +  case bitc::RMW_AND: return AtomicRMWInst::And; +  case bitc::RMW_NAND: return AtomicRMWInst::Nand; +  case bitc::RMW_OR: return AtomicRMWInst::Or; +  case bitc::RMW_XOR: return AtomicRMWInst::Xor; +  case bitc::RMW_MAX: return AtomicRMWInst::Max; +  case bitc::RMW_MIN: return AtomicRMWInst::Min; +  case bitc::RMW_UMAX: return AtomicRMWInst::UMax; +  case bitc::RMW_UMIN: return AtomicRMWInst::UMin; +  } +} + +static AtomicOrdering getDecodedOrdering(unsigned Val) { +  switch (Val) { +  case bitc::ORDERING_NOTATOMIC: return AtomicOrdering::NotAtomic; +  case bitc::ORDERING_UNORDERED: return AtomicOrdering::Unordered; +  case bitc::ORDERING_MONOTONIC: return AtomicOrdering::Monotonic; +  case bitc::ORDERING_ACQUIRE: return AtomicOrdering::Acquire; +  case bitc::ORDERING_RELEASE: return AtomicOrdering::Release; +  case bitc::ORDERING_ACQREL: return AtomicOrdering::AcquireRelease; +  default: // Map unknown orderings to sequentially-consistent. +  case bitc::ORDERING_SEQCST: return AtomicOrdering::SequentiallyConsistent; +  } +} + +static Comdat::SelectionKind getDecodedComdatSelectionKind(unsigned Val) { +  switch (Val) { +  default: // Map unknown selection kinds to any. +  case bitc::COMDAT_SELECTION_KIND_ANY: +    return Comdat::Any; +  case bitc::COMDAT_SELECTION_KIND_EXACT_MATCH: +    return Comdat::ExactMatch; +  case bitc::COMDAT_SELECTION_KIND_LARGEST: +    return Comdat::Largest; +  case bitc::COMDAT_SELECTION_KIND_NO_DUPLICATES: +    return Comdat::NoDuplicates; +  case bitc::COMDAT_SELECTION_KIND_SAME_SIZE: +    return Comdat::SameSize; +  } +} + +static FastMathFlags getDecodedFastMathFlags(unsigned Val) { +  FastMathFlags FMF; +  if (0 != (Val & bitc::UnsafeAlgebra)) +    FMF.setFast(); +  if (0 != (Val & bitc::AllowReassoc)) +    FMF.setAllowReassoc(); +  if (0 != (Val & bitc::NoNaNs)) +    FMF.setNoNaNs(); +  if (0 != (Val & bitc::NoInfs)) +    FMF.setNoInfs(); +  if (0 != (Val & bitc::NoSignedZeros)) +    FMF.setNoSignedZeros(); +  if (0 != (Val & bitc::AllowReciprocal)) +    FMF.setAllowReciprocal(); +  if (0 != (Val & bitc::AllowContract)) +    FMF.setAllowContract(true); +  if (0 != (Val & bitc::ApproxFunc)) +    FMF.setApproxFunc(); +  return FMF; +} + +static void upgradeDLLImportExportLinkage(GlobalValue *GV, unsigned Val) { +  switch (Val) { +  case 5: GV->setDLLStorageClass(GlobalValue::DLLImportStorageClass); break; +  case 6: GV->setDLLStorageClass(GlobalValue::DLLExportStorageClass); break; +  } +} + +Type *BitcodeReader::getTypeByID(unsigned ID) { +  // The type table size is always specified correctly. +  if (ID >= TypeList.size()) +    return nullptr; + +  if (Type *Ty = TypeList[ID]) +    return Ty; + +  // If we have a forward reference, the only possible case is when it is to a +  // named struct.  Just create a placeholder for now. +  return TypeList[ID] = createIdentifiedStructType(Context); +} + +StructType *BitcodeReader::createIdentifiedStructType(LLVMContext &Context, +                                                      StringRef Name) { +  auto *Ret = StructType::create(Context, Name); +  IdentifiedStructTypes.push_back(Ret); +  return Ret; +} + +StructType *BitcodeReader::createIdentifiedStructType(LLVMContext &Context) { +  auto *Ret = StructType::create(Context); +  IdentifiedStructTypes.push_back(Ret); +  return Ret; +} + +//===----------------------------------------------------------------------===// +//  Functions for parsing blocks from the bitcode file +//===----------------------------------------------------------------------===// + +static uint64_t getRawAttributeMask(Attribute::AttrKind Val) { +  switch (Val) { +  case Attribute::EndAttrKinds: +    llvm_unreachable("Synthetic enumerators which should never get here"); + +  case Attribute::None:            return 0; +  case Attribute::ZExt:            return 1 << 0; +  case Attribute::SExt:            return 1 << 1; +  case Attribute::NoReturn:        return 1 << 2; +  case Attribute::InReg:           return 1 << 3; +  case Attribute::StructRet:       return 1 << 4; +  case Attribute::NoUnwind:        return 1 << 5; +  case Attribute::NoAlias:         return 1 << 6; +  case Attribute::ByVal:           return 1 << 7; +  case Attribute::Nest:            return 1 << 8; +  case Attribute::ReadNone:        return 1 << 9; +  case Attribute::ReadOnly:        return 1 << 10; +  case Attribute::NoInline:        return 1 << 11; +  case Attribute::AlwaysInline:    return 1 << 12; +  case Attribute::OptimizeForSize: return 1 << 13; +  case Attribute::StackProtect:    return 1 << 14; +  case Attribute::StackProtectReq: return 1 << 15; +  case Attribute::Alignment:       return 31 << 16; +  case Attribute::NoCapture:       return 1 << 21; +  case Attribute::NoRedZone:       return 1 << 22; +  case Attribute::NoImplicitFloat: return 1 << 23; +  case Attribute::Naked:           return 1 << 24; +  case Attribute::InlineHint:      return 1 << 25; +  case Attribute::StackAlignment:  return 7 << 26; +  case Attribute::ReturnsTwice:    return 1 << 29; +  case Attribute::UWTable:         return 1 << 30; +  case Attribute::NonLazyBind:     return 1U << 31; +  case Attribute::SanitizeAddress: return 1ULL << 32; +  case Attribute::MinSize:         return 1ULL << 33; +  case Attribute::NoDuplicate:     return 1ULL << 34; +  case Attribute::StackProtectStrong: return 1ULL << 35; +  case Attribute::SanitizeThread:  return 1ULL << 36; +  case Attribute::SanitizeMemory:  return 1ULL << 37; +  case Attribute::NoBuiltin:       return 1ULL << 38; +  case Attribute::Returned:        return 1ULL << 39; +  case Attribute::Cold:            return 1ULL << 40; +  case Attribute::Builtin:         return 1ULL << 41; +  case Attribute::OptimizeNone:    return 1ULL << 42; +  case Attribute::InAlloca:        return 1ULL << 43; +  case Attribute::NonNull:         return 1ULL << 44; +  case Attribute::JumpTable:       return 1ULL << 45; +  case Attribute::Convergent:      return 1ULL << 46; +  case Attribute::SafeStack:       return 1ULL << 47; +  case Attribute::NoRecurse:       return 1ULL << 48; +  case Attribute::InaccessibleMemOnly:         return 1ULL << 49; +  case Attribute::InaccessibleMemOrArgMemOnly: return 1ULL << 50; +  case Attribute::SwiftSelf:       return 1ULL << 51; +  case Attribute::SwiftError:      return 1ULL << 52; +  case Attribute::WriteOnly:       return 1ULL << 53; +  case Attribute::Speculatable:    return 1ULL << 54; +  case Attribute::StrictFP:        return 1ULL << 55; +  case Attribute::SanitizeHWAddress: return 1ULL << 56; +  case Attribute::NoCfCheck:       return 1ULL << 57; +  case Attribute::OptForFuzzing:   return 1ULL << 58; +  case Attribute::ShadowCallStack: return 1ULL << 59; +  case Attribute::Dereferenceable: +    llvm_unreachable("dereferenceable attribute not supported in raw format"); +    break; +  case Attribute::DereferenceableOrNull: +    llvm_unreachable("dereferenceable_or_null attribute not supported in raw " +                     "format"); +    break; +  case Attribute::ArgMemOnly: +    llvm_unreachable("argmemonly attribute not supported in raw format"); +    break; +  case Attribute::AllocSize: +    llvm_unreachable("allocsize not supported in raw format"); +    break; +  } +  llvm_unreachable("Unsupported attribute type"); +} + +static void addRawAttributeValue(AttrBuilder &B, uint64_t Val) { +  if (!Val) return; + +  for (Attribute::AttrKind I = Attribute::None; I != Attribute::EndAttrKinds; +       I = Attribute::AttrKind(I + 1)) { +    if (I == Attribute::Dereferenceable || +        I == Attribute::DereferenceableOrNull || +        I == Attribute::ArgMemOnly || +        I == Attribute::AllocSize) +      continue; +    if (uint64_t A = (Val & getRawAttributeMask(I))) { +      if (I == Attribute::Alignment) +        B.addAlignmentAttr(1ULL << ((A >> 16) - 1)); +      else if (I == Attribute::StackAlignment) +        B.addStackAlignmentAttr(1ULL << ((A >> 26)-1)); +      else +        B.addAttribute(I); +    } +  } +} + +/// This fills an AttrBuilder object with the LLVM attributes that have +/// been decoded from the given integer. This function must stay in sync with +/// 'encodeLLVMAttributesForBitcode'. +static void decodeLLVMAttributesForBitcode(AttrBuilder &B, +                                           uint64_t EncodedAttrs) { +  // FIXME: Remove in 4.0. + +  // The alignment is stored as a 16-bit raw value from bits 31--16.  We shift +  // the bits above 31 down by 11 bits. +  unsigned Alignment = (EncodedAttrs & (0xffffULL << 16)) >> 16; +  assert((!Alignment || isPowerOf2_32(Alignment)) && +         "Alignment must be a power of two."); + +  if (Alignment) +    B.addAlignmentAttr(Alignment); +  addRawAttributeValue(B, ((EncodedAttrs & (0xfffffULL << 32)) >> 11) | +                          (EncodedAttrs & 0xffff)); +} + +Error BitcodeReader::parseAttributeBlock() { +  if (Stream.EnterSubBlock(bitc::PARAMATTR_BLOCK_ID)) +    return error("Invalid record"); + +  if (!MAttributes.empty()) +    return error("Invalid multiple blocks"); + +  SmallVector<uint64_t, 64> Record; + +  SmallVector<AttributeList, 8> Attrs; + +  // Read all the records. +  while (true) { +    BitstreamEntry Entry = Stream.advanceSkippingSubblocks(); + +    switch (Entry.Kind) { +    case BitstreamEntry::SubBlock: // Handled for us already. +    case BitstreamEntry::Error: +      return error("Malformed block"); +    case BitstreamEntry::EndBlock: +      return Error::success(); +    case BitstreamEntry::Record: +      // The interesting case. +      break; +    } + +    // Read a record. +    Record.clear(); +    switch (Stream.readRecord(Entry.ID, Record)) { +    default:  // Default behavior: ignore. +      break; +    case bitc::PARAMATTR_CODE_ENTRY_OLD: // ENTRY: [paramidx0, attr0, ...] +      // FIXME: Remove in 4.0. +      if (Record.size() & 1) +        return error("Invalid record"); + +      for (unsigned i = 0, e = Record.size(); i != e; i += 2) { +        AttrBuilder B; +        decodeLLVMAttributesForBitcode(B, Record[i+1]); +        Attrs.push_back(AttributeList::get(Context, Record[i], B)); +      } + +      MAttributes.push_back(AttributeList::get(Context, Attrs)); +      Attrs.clear(); +      break; +    case bitc::PARAMATTR_CODE_ENTRY: // ENTRY: [attrgrp0, attrgrp1, ...] +      for (unsigned i = 0, e = Record.size(); i != e; ++i) +        Attrs.push_back(MAttributeGroups[Record[i]]); + +      MAttributes.push_back(AttributeList::get(Context, Attrs)); +      Attrs.clear(); +      break; +    } +  } +} + +// Returns Attribute::None on unrecognized codes. +static Attribute::AttrKind getAttrFromCode(uint64_t Code) { +  switch (Code) { +  default: +    return Attribute::None; +  case bitc::ATTR_KIND_ALIGNMENT: +    return Attribute::Alignment; +  case bitc::ATTR_KIND_ALWAYS_INLINE: +    return Attribute::AlwaysInline; +  case bitc::ATTR_KIND_ARGMEMONLY: +    return Attribute::ArgMemOnly; +  case bitc::ATTR_KIND_BUILTIN: +    return Attribute::Builtin; +  case bitc::ATTR_KIND_BY_VAL: +    return Attribute::ByVal; +  case bitc::ATTR_KIND_IN_ALLOCA: +    return Attribute::InAlloca; +  case bitc::ATTR_KIND_COLD: +    return Attribute::Cold; +  case bitc::ATTR_KIND_CONVERGENT: +    return Attribute::Convergent; +  case bitc::ATTR_KIND_INACCESSIBLEMEM_ONLY: +    return Attribute::InaccessibleMemOnly; +  case bitc::ATTR_KIND_INACCESSIBLEMEM_OR_ARGMEMONLY: +    return Attribute::InaccessibleMemOrArgMemOnly; +  case bitc::ATTR_KIND_INLINE_HINT: +    return Attribute::InlineHint; +  case bitc::ATTR_KIND_IN_REG: +    return Attribute::InReg; +  case bitc::ATTR_KIND_JUMP_TABLE: +    return Attribute::JumpTable; +  case bitc::ATTR_KIND_MIN_SIZE: +    return Attribute::MinSize; +  case bitc::ATTR_KIND_NAKED: +    return Attribute::Naked; +  case bitc::ATTR_KIND_NEST: +    return Attribute::Nest; +  case bitc::ATTR_KIND_NO_ALIAS: +    return Attribute::NoAlias; +  case bitc::ATTR_KIND_NO_BUILTIN: +    return Attribute::NoBuiltin; +  case bitc::ATTR_KIND_NO_CAPTURE: +    return Attribute::NoCapture; +  case bitc::ATTR_KIND_NO_DUPLICATE: +    return Attribute::NoDuplicate; +  case bitc::ATTR_KIND_NO_IMPLICIT_FLOAT: +    return Attribute::NoImplicitFloat; +  case bitc::ATTR_KIND_NO_INLINE: +    return Attribute::NoInline; +  case bitc::ATTR_KIND_NO_RECURSE: +    return Attribute::NoRecurse; +  case bitc::ATTR_KIND_NON_LAZY_BIND: +    return Attribute::NonLazyBind; +  case bitc::ATTR_KIND_NON_NULL: +    return Attribute::NonNull; +  case bitc::ATTR_KIND_DEREFERENCEABLE: +    return Attribute::Dereferenceable; +  case bitc::ATTR_KIND_DEREFERENCEABLE_OR_NULL: +    return Attribute::DereferenceableOrNull; +  case bitc::ATTR_KIND_ALLOC_SIZE: +    return Attribute::AllocSize; +  case bitc::ATTR_KIND_NO_RED_ZONE: +    return Attribute::NoRedZone; +  case bitc::ATTR_KIND_NO_RETURN: +    return Attribute::NoReturn; +  case bitc::ATTR_KIND_NOCF_CHECK: +    return Attribute::NoCfCheck; +  case bitc::ATTR_KIND_NO_UNWIND: +    return Attribute::NoUnwind; +  case bitc::ATTR_KIND_OPT_FOR_FUZZING: +    return Attribute::OptForFuzzing; +  case bitc::ATTR_KIND_OPTIMIZE_FOR_SIZE: +    return Attribute::OptimizeForSize; +  case bitc::ATTR_KIND_OPTIMIZE_NONE: +    return Attribute::OptimizeNone; +  case bitc::ATTR_KIND_READ_NONE: +    return Attribute::ReadNone; +  case bitc::ATTR_KIND_READ_ONLY: +    return Attribute::ReadOnly; +  case bitc::ATTR_KIND_RETURNED: +    return Attribute::Returned; +  case bitc::ATTR_KIND_RETURNS_TWICE: +    return Attribute::ReturnsTwice; +  case bitc::ATTR_KIND_S_EXT: +    return Attribute::SExt; +  case bitc::ATTR_KIND_SPECULATABLE: +    return Attribute::Speculatable; +  case bitc::ATTR_KIND_STACK_ALIGNMENT: +    return Attribute::StackAlignment; +  case bitc::ATTR_KIND_STACK_PROTECT: +    return Attribute::StackProtect; +  case bitc::ATTR_KIND_STACK_PROTECT_REQ: +    return Attribute::StackProtectReq; +  case bitc::ATTR_KIND_STACK_PROTECT_STRONG: +    return Attribute::StackProtectStrong; +  case bitc::ATTR_KIND_SAFESTACK: +    return Attribute::SafeStack; +  case bitc::ATTR_KIND_SHADOWCALLSTACK: +    return Attribute::ShadowCallStack; +  case bitc::ATTR_KIND_STRICT_FP: +    return Attribute::StrictFP; +  case bitc::ATTR_KIND_STRUCT_RET: +    return Attribute::StructRet; +  case bitc::ATTR_KIND_SANITIZE_ADDRESS: +    return Attribute::SanitizeAddress; +  case bitc::ATTR_KIND_SANITIZE_HWADDRESS: +    return Attribute::SanitizeHWAddress; +  case bitc::ATTR_KIND_SANITIZE_THREAD: +    return Attribute::SanitizeThread; +  case bitc::ATTR_KIND_SANITIZE_MEMORY: +    return Attribute::SanitizeMemory; +  case bitc::ATTR_KIND_SWIFT_ERROR: +    return Attribute::SwiftError; +  case bitc::ATTR_KIND_SWIFT_SELF: +    return Attribute::SwiftSelf; +  case bitc::ATTR_KIND_UW_TABLE: +    return Attribute::UWTable; +  case bitc::ATTR_KIND_WRITEONLY: +    return Attribute::WriteOnly; +  case bitc::ATTR_KIND_Z_EXT: +    return Attribute::ZExt; +  } +} + +Error BitcodeReader::parseAlignmentValue(uint64_t Exponent, +                                         unsigned &Alignment) { +  // Note: Alignment in bitcode files is incremented by 1, so that zero +  // can be used for default alignment. +  if (Exponent > Value::MaxAlignmentExponent + 1) +    return error("Invalid alignment value"); +  Alignment = (1 << static_cast<unsigned>(Exponent)) >> 1; +  return Error::success(); +} + +Error BitcodeReader::parseAttrKind(uint64_t Code, Attribute::AttrKind *Kind) { +  *Kind = getAttrFromCode(Code); +  if (*Kind == Attribute::None) +    return error("Unknown attribute kind (" + Twine(Code) + ")"); +  return Error::success(); +} + +Error BitcodeReader::parseAttributeGroupBlock() { +  if (Stream.EnterSubBlock(bitc::PARAMATTR_GROUP_BLOCK_ID)) +    return error("Invalid record"); + +  if (!MAttributeGroups.empty()) +    return error("Invalid multiple blocks"); + +  SmallVector<uint64_t, 64> Record; + +  // Read all the records. +  while (true) { +    BitstreamEntry Entry = Stream.advanceSkippingSubblocks(); + +    switch (Entry.Kind) { +    case BitstreamEntry::SubBlock: // Handled for us already. +    case BitstreamEntry::Error: +      return error("Malformed block"); +    case BitstreamEntry::EndBlock: +      return Error::success(); +    case BitstreamEntry::Record: +      // The interesting case. +      break; +    } + +    // Read a record. +    Record.clear(); +    switch (Stream.readRecord(Entry.ID, Record)) { +    default:  // Default behavior: ignore. +      break; +    case bitc::PARAMATTR_GRP_CODE_ENTRY: { // ENTRY: [grpid, idx, a0, a1, ...] +      if (Record.size() < 3) +        return error("Invalid record"); + +      uint64_t GrpID = Record[0]; +      uint64_t Idx = Record[1]; // Index of the object this attribute refers to. + +      AttrBuilder B; +      for (unsigned i = 2, e = Record.size(); i != e; ++i) { +        if (Record[i] == 0) {        // Enum attribute +          Attribute::AttrKind Kind; +          if (Error Err = parseAttrKind(Record[++i], &Kind)) +            return Err; + +          B.addAttribute(Kind); +        } else if (Record[i] == 1) { // Integer attribute +          Attribute::AttrKind Kind; +          if (Error Err = parseAttrKind(Record[++i], &Kind)) +            return Err; +          if (Kind == Attribute::Alignment) +            B.addAlignmentAttr(Record[++i]); +          else if (Kind == Attribute::StackAlignment) +            B.addStackAlignmentAttr(Record[++i]); +          else if (Kind == Attribute::Dereferenceable) +            B.addDereferenceableAttr(Record[++i]); +          else if (Kind == Attribute::DereferenceableOrNull) +            B.addDereferenceableOrNullAttr(Record[++i]); +          else if (Kind == Attribute::AllocSize) +            B.addAllocSizeAttrFromRawRepr(Record[++i]); +        } else {                     // String attribute +          assert((Record[i] == 3 || Record[i] == 4) && +                 "Invalid attribute group entry"); +          bool HasValue = (Record[i++] == 4); +          SmallString<64> KindStr; +          SmallString<64> ValStr; + +          while (Record[i] != 0 && i != e) +            KindStr += Record[i++]; +          assert(Record[i] == 0 && "Kind string not null terminated"); + +          if (HasValue) { +            // Has a value associated with it. +            ++i; // Skip the '0' that terminates the "kind" string. +            while (Record[i] != 0 && i != e) +              ValStr += Record[i++]; +            assert(Record[i] == 0 && "Value string not null terminated"); +          } + +          B.addAttribute(KindStr.str(), ValStr.str()); +        } +      } + +      MAttributeGroups[GrpID] = AttributeList::get(Context, Idx, B); +      break; +    } +    } +  } +} + +Error BitcodeReader::parseTypeTable() { +  if (Stream.EnterSubBlock(bitc::TYPE_BLOCK_ID_NEW)) +    return error("Invalid record"); + +  return parseTypeTableBody(); +} + +Error BitcodeReader::parseTypeTableBody() { +  if (!TypeList.empty()) +    return error("Invalid multiple blocks"); + +  SmallVector<uint64_t, 64> Record; +  unsigned NumRecords = 0; + +  SmallString<64> TypeName; + +  // Read all the records for this type table. +  while (true) { +    BitstreamEntry Entry = Stream.advanceSkippingSubblocks(); + +    switch (Entry.Kind) { +    case BitstreamEntry::SubBlock: // Handled for us already. +    case BitstreamEntry::Error: +      return error("Malformed block"); +    case BitstreamEntry::EndBlock: +      if (NumRecords != TypeList.size()) +        return error("Malformed block"); +      return Error::success(); +    case BitstreamEntry::Record: +      // The interesting case. +      break; +    } + +    // Read a record. +    Record.clear(); +    Type *ResultTy = nullptr; +    switch (Stream.readRecord(Entry.ID, Record)) { +    default: +      return error("Invalid value"); +    case bitc::TYPE_CODE_NUMENTRY: // TYPE_CODE_NUMENTRY: [numentries] +      // TYPE_CODE_NUMENTRY contains a count of the number of types in the +      // type list.  This allows us to reserve space. +      if (Record.size() < 1) +        return error("Invalid record"); +      TypeList.resize(Record[0]); +      continue; +    case bitc::TYPE_CODE_VOID:      // VOID +      ResultTy = Type::getVoidTy(Context); +      break; +    case bitc::TYPE_CODE_HALF:     // HALF +      ResultTy = Type::getHalfTy(Context); +      break; +    case bitc::TYPE_CODE_FLOAT:     // FLOAT +      ResultTy = Type::getFloatTy(Context); +      break; +    case bitc::TYPE_CODE_DOUBLE:    // DOUBLE +      ResultTy = Type::getDoubleTy(Context); +      break; +    case bitc::TYPE_CODE_X86_FP80:  // X86_FP80 +      ResultTy = Type::getX86_FP80Ty(Context); +      break; +    case bitc::TYPE_CODE_FP128:     // FP128 +      ResultTy = Type::getFP128Ty(Context); +      break; +    case bitc::TYPE_CODE_PPC_FP128: // PPC_FP128 +      ResultTy = Type::getPPC_FP128Ty(Context); +      break; +    case bitc::TYPE_CODE_LABEL:     // LABEL +      ResultTy = Type::getLabelTy(Context); +      break; +    case bitc::TYPE_CODE_METADATA:  // METADATA +      ResultTy = Type::getMetadataTy(Context); +      break; +    case bitc::TYPE_CODE_X86_MMX:   // X86_MMX +      ResultTy = Type::getX86_MMXTy(Context); +      break; +    case bitc::TYPE_CODE_TOKEN:     // TOKEN +      ResultTy = Type::getTokenTy(Context); +      break; +    case bitc::TYPE_CODE_INTEGER: { // INTEGER: [width] +      if (Record.size() < 1) +        return error("Invalid record"); + +      uint64_t NumBits = Record[0]; +      if (NumBits < IntegerType::MIN_INT_BITS || +          NumBits > IntegerType::MAX_INT_BITS) +        return error("Bitwidth for integer type out of range"); +      ResultTy = IntegerType::get(Context, NumBits); +      break; +    } +    case bitc::TYPE_CODE_POINTER: { // POINTER: [pointee type] or +                                    //          [pointee type, address space] +      if (Record.size() < 1) +        return error("Invalid record"); +      unsigned AddressSpace = 0; +      if (Record.size() == 2) +        AddressSpace = Record[1]; +      ResultTy = getTypeByID(Record[0]); +      if (!ResultTy || +          !PointerType::isValidElementType(ResultTy)) +        return error("Invalid type"); +      ResultTy = PointerType::get(ResultTy, AddressSpace); +      break; +    } +    case bitc::TYPE_CODE_FUNCTION_OLD: { +      // FIXME: attrid is dead, remove it in LLVM 4.0 +      // FUNCTION: [vararg, attrid, retty, paramty x N] +      if (Record.size() < 3) +        return error("Invalid record"); +      SmallVector<Type*, 8> ArgTys; +      for (unsigned i = 3, e = Record.size(); i != e; ++i) { +        if (Type *T = getTypeByID(Record[i])) +          ArgTys.push_back(T); +        else +          break; +      } + +      ResultTy = getTypeByID(Record[2]); +      if (!ResultTy || ArgTys.size() < Record.size()-3) +        return error("Invalid type"); + +      ResultTy = FunctionType::get(ResultTy, ArgTys, Record[0]); +      break; +    } +    case bitc::TYPE_CODE_FUNCTION: { +      // FUNCTION: [vararg, retty, paramty x N] +      if (Record.size() < 2) +        return error("Invalid record"); +      SmallVector<Type*, 8> ArgTys; +      for (unsigned i = 2, e = Record.size(); i != e; ++i) { +        if (Type *T = getTypeByID(Record[i])) { +          if (!FunctionType::isValidArgumentType(T)) +            return error("Invalid function argument type"); +          ArgTys.push_back(T); +        } +        else +          break; +      } + +      ResultTy = getTypeByID(Record[1]); +      if (!ResultTy || ArgTys.size() < Record.size()-2) +        return error("Invalid type"); + +      ResultTy = FunctionType::get(ResultTy, ArgTys, Record[0]); +      break; +    } +    case bitc::TYPE_CODE_STRUCT_ANON: {  // STRUCT: [ispacked, eltty x N] +      if (Record.size() < 1) +        return error("Invalid record"); +      SmallVector<Type*, 8> EltTys; +      for (unsigned i = 1, e = Record.size(); i != e; ++i) { +        if (Type *T = getTypeByID(Record[i])) +          EltTys.push_back(T); +        else +          break; +      } +      if (EltTys.size() != Record.size()-1) +        return error("Invalid type"); +      ResultTy = StructType::get(Context, EltTys, Record[0]); +      break; +    } +    case bitc::TYPE_CODE_STRUCT_NAME:   // STRUCT_NAME: [strchr x N] +      if (convertToString(Record, 0, TypeName)) +        return error("Invalid record"); +      continue; + +    case bitc::TYPE_CODE_STRUCT_NAMED: { // STRUCT: [ispacked, eltty x N] +      if (Record.size() < 1) +        return error("Invalid record"); + +      if (NumRecords >= TypeList.size()) +        return error("Invalid TYPE table"); + +      // Check to see if this was forward referenced, if so fill in the temp. +      StructType *Res = cast_or_null<StructType>(TypeList[NumRecords]); +      if (Res) { +        Res->setName(TypeName); +        TypeList[NumRecords] = nullptr; +      } else  // Otherwise, create a new struct. +        Res = createIdentifiedStructType(Context, TypeName); +      TypeName.clear(); + +      SmallVector<Type*, 8> EltTys; +      for (unsigned i = 1, e = Record.size(); i != e; ++i) { +        if (Type *T = getTypeByID(Record[i])) +          EltTys.push_back(T); +        else +          break; +      } +      if (EltTys.size() != Record.size()-1) +        return error("Invalid record"); +      Res->setBody(EltTys, Record[0]); +      ResultTy = Res; +      break; +    } +    case bitc::TYPE_CODE_OPAQUE: {       // OPAQUE: [] +      if (Record.size() != 1) +        return error("Invalid record"); + +      if (NumRecords >= TypeList.size()) +        return error("Invalid TYPE table"); + +      // Check to see if this was forward referenced, if so fill in the temp. +      StructType *Res = cast_or_null<StructType>(TypeList[NumRecords]); +      if (Res) { +        Res->setName(TypeName); +        TypeList[NumRecords] = nullptr; +      } else  // Otherwise, create a new struct with no body. +        Res = createIdentifiedStructType(Context, TypeName); +      TypeName.clear(); +      ResultTy = Res; +      break; +    } +    case bitc::TYPE_CODE_ARRAY:     // ARRAY: [numelts, eltty] +      if (Record.size() < 2) +        return error("Invalid record"); +      ResultTy = getTypeByID(Record[1]); +      if (!ResultTy || !ArrayType::isValidElementType(ResultTy)) +        return error("Invalid type"); +      ResultTy = ArrayType::get(ResultTy, Record[0]); +      break; +    case bitc::TYPE_CODE_VECTOR:    // VECTOR: [numelts, eltty] +      if (Record.size() < 2) +        return error("Invalid record"); +      if (Record[0] == 0) +        return error("Invalid vector length"); +      ResultTy = getTypeByID(Record[1]); +      if (!ResultTy || !StructType::isValidElementType(ResultTy)) +        return error("Invalid type"); +      ResultTy = VectorType::get(ResultTy, Record[0]); +      break; +    } + +    if (NumRecords >= TypeList.size()) +      return error("Invalid TYPE table"); +    if (TypeList[NumRecords]) +      return error( +          "Invalid TYPE table: Only named structs can be forward referenced"); +    assert(ResultTy && "Didn't read a type?"); +    TypeList[NumRecords++] = ResultTy; +  } +} + +Error BitcodeReader::parseOperandBundleTags() { +  if (Stream.EnterSubBlock(bitc::OPERAND_BUNDLE_TAGS_BLOCK_ID)) +    return error("Invalid record"); + +  if (!BundleTags.empty()) +    return error("Invalid multiple blocks"); + +  SmallVector<uint64_t, 64> Record; + +  while (true) { +    BitstreamEntry Entry = Stream.advanceSkippingSubblocks(); + +    switch (Entry.Kind) { +    case BitstreamEntry::SubBlock: // Handled for us already. +    case BitstreamEntry::Error: +      return error("Malformed block"); +    case BitstreamEntry::EndBlock: +      return Error::success(); +    case BitstreamEntry::Record: +      // The interesting case. +      break; +    } + +    // Tags are implicitly mapped to integers by their order. + +    if (Stream.readRecord(Entry.ID, Record) != bitc::OPERAND_BUNDLE_TAG) +      return error("Invalid record"); + +    // OPERAND_BUNDLE_TAG: [strchr x N] +    BundleTags.emplace_back(); +    if (convertToString(Record, 0, BundleTags.back())) +      return error("Invalid record"); +    Record.clear(); +  } +} + +Error BitcodeReader::parseSyncScopeNames() { +  if (Stream.EnterSubBlock(bitc::SYNC_SCOPE_NAMES_BLOCK_ID)) +    return error("Invalid record"); + +  if (!SSIDs.empty()) +    return error("Invalid multiple synchronization scope names blocks"); + +  SmallVector<uint64_t, 64> Record; +  while (true) { +    BitstreamEntry Entry = Stream.advanceSkippingSubblocks(); +    switch (Entry.Kind) { +    case BitstreamEntry::SubBlock: // Handled for us already. +    case BitstreamEntry::Error: +      return error("Malformed block"); +    case BitstreamEntry::EndBlock: +      if (SSIDs.empty()) +        return error("Invalid empty synchronization scope names block"); +      return Error::success(); +    case BitstreamEntry::Record: +      // The interesting case. +      break; +    } + +    // Synchronization scope names are implicitly mapped to synchronization +    // scope IDs by their order. + +    if (Stream.readRecord(Entry.ID, Record) != bitc::SYNC_SCOPE_NAME) +      return error("Invalid record"); + +    SmallString<16> SSN; +    if (convertToString(Record, 0, SSN)) +      return error("Invalid record"); + +    SSIDs.push_back(Context.getOrInsertSyncScopeID(SSN)); +    Record.clear(); +  } +} + +/// Associate a value with its name from the given index in the provided record. +Expected<Value *> BitcodeReader::recordValue(SmallVectorImpl<uint64_t> &Record, +                                             unsigned NameIndex, Triple &TT) { +  SmallString<128> ValueName; +  if (convertToString(Record, NameIndex, ValueName)) +    return error("Invalid record"); +  unsigned ValueID = Record[0]; +  if (ValueID >= ValueList.size() || !ValueList[ValueID]) +    return error("Invalid record"); +  Value *V = ValueList[ValueID]; + +  StringRef NameStr(ValueName.data(), ValueName.size()); +  if (NameStr.find_first_of(0) != StringRef::npos) +    return error("Invalid value name"); +  V->setName(NameStr); +  auto *GO = dyn_cast<GlobalObject>(V); +  if (GO) { +    if (GO->getComdat() == reinterpret_cast<Comdat *>(1)) { +      if (TT.supportsCOMDAT()) +        GO->setComdat(TheModule->getOrInsertComdat(V->getName())); +      else +        GO->setComdat(nullptr); +    } +  } +  return V; +} + +/// Helper to note and return the current location, and jump to the given +/// offset. +static uint64_t jumpToValueSymbolTable(uint64_t Offset, +                                       BitstreamCursor &Stream) { +  // Save the current parsing location so we can jump back at the end +  // of the VST read. +  uint64_t CurrentBit = Stream.GetCurrentBitNo(); +  Stream.JumpToBit(Offset * 32); +#ifndef NDEBUG +  // Do some checking if we are in debug mode. +  BitstreamEntry Entry = Stream.advance(); +  assert(Entry.Kind == BitstreamEntry::SubBlock); +  assert(Entry.ID == bitc::VALUE_SYMTAB_BLOCK_ID); +#else +  // In NDEBUG mode ignore the output so we don't get an unused variable +  // warning. +  Stream.advance(); +#endif +  return CurrentBit; +} + +void BitcodeReader::setDeferredFunctionInfo(unsigned FuncBitcodeOffsetDelta, +                                            Function *F, +                                            ArrayRef<uint64_t> Record) { +  // Note that we subtract 1 here because the offset is relative to one word +  // before the start of the identification or module block, which was +  // historically always the start of the regular bitcode header. +  uint64_t FuncWordOffset = Record[1] - 1; +  uint64_t FuncBitOffset = FuncWordOffset * 32; +  DeferredFunctionInfo[F] = FuncBitOffset + FuncBitcodeOffsetDelta; +  // Set the LastFunctionBlockBit to point to the last function block. +  // Later when parsing is resumed after function materialization, +  // we can simply skip that last function block. +  if (FuncBitOffset > LastFunctionBlockBit) +    LastFunctionBlockBit = FuncBitOffset; +} + +/// Read a new-style GlobalValue symbol table. +Error BitcodeReader::parseGlobalValueSymbolTable() { +  unsigned FuncBitcodeOffsetDelta = +      Stream.getAbbrevIDWidth() + bitc::BlockIDWidth; + +  if (Stream.EnterSubBlock(bitc::VALUE_SYMTAB_BLOCK_ID)) +    return error("Invalid record"); + +  SmallVector<uint64_t, 64> Record; +  while (true) { +    BitstreamEntry Entry = Stream.advanceSkippingSubblocks(); + +    switch (Entry.Kind) { +    case BitstreamEntry::SubBlock: +    case BitstreamEntry::Error: +      return error("Malformed block"); +    case BitstreamEntry::EndBlock: +      return Error::success(); +    case BitstreamEntry::Record: +      break; +    } + +    Record.clear(); +    switch (Stream.readRecord(Entry.ID, Record)) { +    case bitc::VST_CODE_FNENTRY: // [valueid, offset] +      setDeferredFunctionInfo(FuncBitcodeOffsetDelta, +                              cast<Function>(ValueList[Record[0]]), Record); +      break; +    } +  } +} + +/// Parse the value symbol table at either the current parsing location or +/// at the given bit offset if provided. +Error BitcodeReader::parseValueSymbolTable(uint64_t Offset) { +  uint64_t CurrentBit; +  // Pass in the Offset to distinguish between calling for the module-level +  // VST (where we want to jump to the VST offset) and the function-level +  // VST (where we don't). +  if (Offset > 0) { +    CurrentBit = jumpToValueSymbolTable(Offset, Stream); +    // If this module uses a string table, read this as a module-level VST. +    if (UseStrtab) { +      if (Error Err = parseGlobalValueSymbolTable()) +        return Err; +      Stream.JumpToBit(CurrentBit); +      return Error::success(); +    } +    // Otherwise, the VST will be in a similar format to a function-level VST, +    // and will contain symbol names. +  } + +  // Compute the delta between the bitcode indices in the VST (the word offset +  // to the word-aligned ENTER_SUBBLOCK for the function block, and that +  // expected by the lazy reader. The reader's EnterSubBlock expects to have +  // already read the ENTER_SUBBLOCK code (size getAbbrevIDWidth) and BlockID +  // (size BlockIDWidth). Note that we access the stream's AbbrevID width here +  // just before entering the VST subblock because: 1) the EnterSubBlock +  // changes the AbbrevID width; 2) the VST block is nested within the same +  // outer MODULE_BLOCK as the FUNCTION_BLOCKs and therefore have the same +  // AbbrevID width before calling EnterSubBlock; and 3) when we want to +  // jump to the FUNCTION_BLOCK using this offset later, we don't want +  // to rely on the stream's AbbrevID width being that of the MODULE_BLOCK. +  unsigned FuncBitcodeOffsetDelta = +      Stream.getAbbrevIDWidth() + bitc::BlockIDWidth; + +  if (Stream.EnterSubBlock(bitc::VALUE_SYMTAB_BLOCK_ID)) +    return error("Invalid record"); + +  SmallVector<uint64_t, 64> Record; + +  Triple TT(TheModule->getTargetTriple()); + +  // Read all the records for this value table. +  SmallString<128> ValueName; + +  while (true) { +    BitstreamEntry Entry = Stream.advanceSkippingSubblocks(); + +    switch (Entry.Kind) { +    case BitstreamEntry::SubBlock: // Handled for us already. +    case BitstreamEntry::Error: +      return error("Malformed block"); +    case BitstreamEntry::EndBlock: +      if (Offset > 0) +        Stream.JumpToBit(CurrentBit); +      return Error::success(); +    case BitstreamEntry::Record: +      // The interesting case. +      break; +    } + +    // Read a record. +    Record.clear(); +    switch (Stream.readRecord(Entry.ID, Record)) { +    default:  // Default behavior: unknown type. +      break; +    case bitc::VST_CODE_ENTRY: {  // VST_CODE_ENTRY: [valueid, namechar x N] +      Expected<Value *> ValOrErr = recordValue(Record, 1, TT); +      if (Error Err = ValOrErr.takeError()) +        return Err; +      ValOrErr.get(); +      break; +    } +    case bitc::VST_CODE_FNENTRY: { +      // VST_CODE_FNENTRY: [valueid, offset, namechar x N] +      Expected<Value *> ValOrErr = recordValue(Record, 2, TT); +      if (Error Err = ValOrErr.takeError()) +        return Err; +      Value *V = ValOrErr.get(); + +      // Ignore function offsets emitted for aliases of functions in older +      // versions of LLVM. +      if (auto *F = dyn_cast<Function>(V)) +        setDeferredFunctionInfo(FuncBitcodeOffsetDelta, F, Record); +      break; +    } +    case bitc::VST_CODE_BBENTRY: { +      if (convertToString(Record, 1, ValueName)) +        return error("Invalid record"); +      BasicBlock *BB = getBasicBlock(Record[0]); +      if (!BB) +        return error("Invalid record"); + +      BB->setName(StringRef(ValueName.data(), ValueName.size())); +      ValueName.clear(); +      break; +    } +    } +  } +} + +/// Decode a signed value stored with the sign bit in the LSB for dense VBR +/// encoding. +uint64_t BitcodeReader::decodeSignRotatedValue(uint64_t V) { +  if ((V & 1) == 0) +    return V >> 1; +  if (V != 1) +    return -(V >> 1); +  // There is no such thing as -0 with integers.  "-0" really means MININT. +  return 1ULL << 63; +} + +/// Resolve all of the initializers for global values and aliases that we can. +Error BitcodeReader::resolveGlobalAndIndirectSymbolInits() { +  std::vector<std::pair<GlobalVariable *, unsigned>> GlobalInitWorklist; +  std::vector<std::pair<GlobalIndirectSymbol *, unsigned>> +      IndirectSymbolInitWorklist; +  std::vector<std::pair<Function *, unsigned>> FunctionPrefixWorklist; +  std::vector<std::pair<Function *, unsigned>> FunctionPrologueWorklist; +  std::vector<std::pair<Function *, unsigned>> FunctionPersonalityFnWorklist; + +  GlobalInitWorklist.swap(GlobalInits); +  IndirectSymbolInitWorklist.swap(IndirectSymbolInits); +  FunctionPrefixWorklist.swap(FunctionPrefixes); +  FunctionPrologueWorklist.swap(FunctionPrologues); +  FunctionPersonalityFnWorklist.swap(FunctionPersonalityFns); + +  while (!GlobalInitWorklist.empty()) { +    unsigned ValID = GlobalInitWorklist.back().second; +    if (ValID >= ValueList.size()) { +      // Not ready to resolve this yet, it requires something later in the file. +      GlobalInits.push_back(GlobalInitWorklist.back()); +    } else { +      if (Constant *C = dyn_cast_or_null<Constant>(ValueList[ValID])) +        GlobalInitWorklist.back().first->setInitializer(C); +      else +        return error("Expected a constant"); +    } +    GlobalInitWorklist.pop_back(); +  } + +  while (!IndirectSymbolInitWorklist.empty()) { +    unsigned ValID = IndirectSymbolInitWorklist.back().second; +    if (ValID >= ValueList.size()) { +      IndirectSymbolInits.push_back(IndirectSymbolInitWorklist.back()); +    } else { +      Constant *C = dyn_cast_or_null<Constant>(ValueList[ValID]); +      if (!C) +        return error("Expected a constant"); +      GlobalIndirectSymbol *GIS = IndirectSymbolInitWorklist.back().first; +      if (isa<GlobalAlias>(GIS) && C->getType() != GIS->getType()) +        return error("Alias and aliasee types don't match"); +      GIS->setIndirectSymbol(C); +    } +    IndirectSymbolInitWorklist.pop_back(); +  } + +  while (!FunctionPrefixWorklist.empty()) { +    unsigned ValID = FunctionPrefixWorklist.back().second; +    if (ValID >= ValueList.size()) { +      FunctionPrefixes.push_back(FunctionPrefixWorklist.back()); +    } else { +      if (Constant *C = dyn_cast_or_null<Constant>(ValueList[ValID])) +        FunctionPrefixWorklist.back().first->setPrefixData(C); +      else +        return error("Expected a constant"); +    } +    FunctionPrefixWorklist.pop_back(); +  } + +  while (!FunctionPrologueWorklist.empty()) { +    unsigned ValID = FunctionPrologueWorklist.back().second; +    if (ValID >= ValueList.size()) { +      FunctionPrologues.push_back(FunctionPrologueWorklist.back()); +    } else { +      if (Constant *C = dyn_cast_or_null<Constant>(ValueList[ValID])) +        FunctionPrologueWorklist.back().first->setPrologueData(C); +      else +        return error("Expected a constant"); +    } +    FunctionPrologueWorklist.pop_back(); +  } + +  while (!FunctionPersonalityFnWorklist.empty()) { +    unsigned ValID = FunctionPersonalityFnWorklist.back().second; +    if (ValID >= ValueList.size()) { +      FunctionPersonalityFns.push_back(FunctionPersonalityFnWorklist.back()); +    } else { +      if (Constant *C = dyn_cast_or_null<Constant>(ValueList[ValID])) +        FunctionPersonalityFnWorklist.back().first->setPersonalityFn(C); +      else +        return error("Expected a constant"); +    } +    FunctionPersonalityFnWorklist.pop_back(); +  } + +  return Error::success(); +} + +static APInt readWideAPInt(ArrayRef<uint64_t> Vals, unsigned TypeBits) { +  SmallVector<uint64_t, 8> Words(Vals.size()); +  transform(Vals, Words.begin(), +                 BitcodeReader::decodeSignRotatedValue); + +  return APInt(TypeBits, Words); +} + +Error BitcodeReader::parseConstants() { +  if (Stream.EnterSubBlock(bitc::CONSTANTS_BLOCK_ID)) +    return error("Invalid record"); + +  SmallVector<uint64_t, 64> Record; + +  // Read all the records for this value table. +  Type *CurTy = Type::getInt32Ty(Context); +  unsigned NextCstNo = ValueList.size(); + +  while (true) { +    BitstreamEntry Entry = Stream.advanceSkippingSubblocks(); + +    switch (Entry.Kind) { +    case BitstreamEntry::SubBlock: // Handled for us already. +    case BitstreamEntry::Error: +      return error("Malformed block"); +    case BitstreamEntry::EndBlock: +      if (NextCstNo != ValueList.size()) +        return error("Invalid constant reference"); + +      // Once all the constants have been read, go through and resolve forward +      // references. +      ValueList.resolveConstantForwardRefs(); +      return Error::success(); +    case BitstreamEntry::Record: +      // The interesting case. +      break; +    } + +    // Read a record. +    Record.clear(); +    Type *VoidType = Type::getVoidTy(Context); +    Value *V = nullptr; +    unsigned BitCode = Stream.readRecord(Entry.ID, Record); +    switch (BitCode) { +    default:  // Default behavior: unknown constant +    case bitc::CST_CODE_UNDEF:     // UNDEF +      V = UndefValue::get(CurTy); +      break; +    case bitc::CST_CODE_SETTYPE:   // SETTYPE: [typeid] +      if (Record.empty()) +        return error("Invalid record"); +      if (Record[0] >= TypeList.size() || !TypeList[Record[0]]) +        return error("Invalid record"); +      if (TypeList[Record[0]] == VoidType) +        return error("Invalid constant type"); +      CurTy = TypeList[Record[0]]; +      continue;  // Skip the ValueList manipulation. +    case bitc::CST_CODE_NULL:      // NULL +      V = Constant::getNullValue(CurTy); +      break; +    case bitc::CST_CODE_INTEGER:   // INTEGER: [intval] +      if (!CurTy->isIntegerTy() || Record.empty()) +        return error("Invalid record"); +      V = ConstantInt::get(CurTy, decodeSignRotatedValue(Record[0])); +      break; +    case bitc::CST_CODE_WIDE_INTEGER: {// WIDE_INTEGER: [n x intval] +      if (!CurTy->isIntegerTy() || Record.empty()) +        return error("Invalid record"); + +      APInt VInt = +          readWideAPInt(Record, cast<IntegerType>(CurTy)->getBitWidth()); +      V = ConstantInt::get(Context, VInt); + +      break; +    } +    case bitc::CST_CODE_FLOAT: {    // FLOAT: [fpval] +      if (Record.empty()) +        return error("Invalid record"); +      if (CurTy->isHalfTy()) +        V = ConstantFP::get(Context, APFloat(APFloat::IEEEhalf(), +                                             APInt(16, (uint16_t)Record[0]))); +      else if (CurTy->isFloatTy()) +        V = ConstantFP::get(Context, APFloat(APFloat::IEEEsingle(), +                                             APInt(32, (uint32_t)Record[0]))); +      else if (CurTy->isDoubleTy()) +        V = ConstantFP::get(Context, APFloat(APFloat::IEEEdouble(), +                                             APInt(64, Record[0]))); +      else if (CurTy->isX86_FP80Ty()) { +        // Bits are not stored the same way as a normal i80 APInt, compensate. +        uint64_t Rearrange[2]; +        Rearrange[0] = (Record[1] & 0xffffLL) | (Record[0] << 16); +        Rearrange[1] = Record[0] >> 48; +        V = ConstantFP::get(Context, APFloat(APFloat::x87DoubleExtended(), +                                             APInt(80, Rearrange))); +      } else if (CurTy->isFP128Ty()) +        V = ConstantFP::get(Context, APFloat(APFloat::IEEEquad(), +                                             APInt(128, Record))); +      else if (CurTy->isPPC_FP128Ty()) +        V = ConstantFP::get(Context, APFloat(APFloat::PPCDoubleDouble(), +                                             APInt(128, Record))); +      else +        V = UndefValue::get(CurTy); +      break; +    } + +    case bitc::CST_CODE_AGGREGATE: {// AGGREGATE: [n x value number] +      if (Record.empty()) +        return error("Invalid record"); + +      unsigned Size = Record.size(); +      SmallVector<Constant*, 16> Elts; + +      if (StructType *STy = dyn_cast<StructType>(CurTy)) { +        for (unsigned i = 0; i != Size; ++i) +          Elts.push_back(ValueList.getConstantFwdRef(Record[i], +                                                     STy->getElementType(i))); +        V = ConstantStruct::get(STy, Elts); +      } else if (ArrayType *ATy = dyn_cast<ArrayType>(CurTy)) { +        Type *EltTy = ATy->getElementType(); +        for (unsigned i = 0; i != Size; ++i) +          Elts.push_back(ValueList.getConstantFwdRef(Record[i], EltTy)); +        V = ConstantArray::get(ATy, Elts); +      } else if (VectorType *VTy = dyn_cast<VectorType>(CurTy)) { +        Type *EltTy = VTy->getElementType(); +        for (unsigned i = 0; i != Size; ++i) +          Elts.push_back(ValueList.getConstantFwdRef(Record[i], EltTy)); +        V = ConstantVector::get(Elts); +      } else { +        V = UndefValue::get(CurTy); +      } +      break; +    } +    case bitc::CST_CODE_STRING:    // STRING: [values] +    case bitc::CST_CODE_CSTRING: { // CSTRING: [values] +      if (Record.empty()) +        return error("Invalid record"); + +      SmallString<16> Elts(Record.begin(), Record.end()); +      V = ConstantDataArray::getString(Context, Elts, +                                       BitCode == bitc::CST_CODE_CSTRING); +      break; +    } +    case bitc::CST_CODE_DATA: {// DATA: [n x value] +      if (Record.empty()) +        return error("Invalid record"); + +      Type *EltTy = cast<SequentialType>(CurTy)->getElementType(); +      if (EltTy->isIntegerTy(8)) { +        SmallVector<uint8_t, 16> Elts(Record.begin(), Record.end()); +        if (isa<VectorType>(CurTy)) +          V = ConstantDataVector::get(Context, Elts); +        else +          V = ConstantDataArray::get(Context, Elts); +      } else if (EltTy->isIntegerTy(16)) { +        SmallVector<uint16_t, 16> Elts(Record.begin(), Record.end()); +        if (isa<VectorType>(CurTy)) +          V = ConstantDataVector::get(Context, Elts); +        else +          V = ConstantDataArray::get(Context, Elts); +      } else if (EltTy->isIntegerTy(32)) { +        SmallVector<uint32_t, 16> Elts(Record.begin(), Record.end()); +        if (isa<VectorType>(CurTy)) +          V = ConstantDataVector::get(Context, Elts); +        else +          V = ConstantDataArray::get(Context, Elts); +      } else if (EltTy->isIntegerTy(64)) { +        SmallVector<uint64_t, 16> Elts(Record.begin(), Record.end()); +        if (isa<VectorType>(CurTy)) +          V = ConstantDataVector::get(Context, Elts); +        else +          V = ConstantDataArray::get(Context, Elts); +      } else if (EltTy->isHalfTy()) { +        SmallVector<uint16_t, 16> Elts(Record.begin(), Record.end()); +        if (isa<VectorType>(CurTy)) +          V = ConstantDataVector::getFP(Context, Elts); +        else +          V = ConstantDataArray::getFP(Context, Elts); +      } else if (EltTy->isFloatTy()) { +        SmallVector<uint32_t, 16> Elts(Record.begin(), Record.end()); +        if (isa<VectorType>(CurTy)) +          V = ConstantDataVector::getFP(Context, Elts); +        else +          V = ConstantDataArray::getFP(Context, Elts); +      } else if (EltTy->isDoubleTy()) { +        SmallVector<uint64_t, 16> Elts(Record.begin(), Record.end()); +        if (isa<VectorType>(CurTy)) +          V = ConstantDataVector::getFP(Context, Elts); +        else +          V = ConstantDataArray::getFP(Context, Elts); +      } else { +        return error("Invalid type for value"); +      } +      break; +    } +    case bitc::CST_CODE_CE_BINOP: {  // CE_BINOP: [opcode, opval, opval] +      if (Record.size() < 3) +        return error("Invalid record"); +      int Opc = getDecodedBinaryOpcode(Record[0], CurTy); +      if (Opc < 0) { +        V = UndefValue::get(CurTy);  // Unknown binop. +      } else { +        Constant *LHS = ValueList.getConstantFwdRef(Record[1], CurTy); +        Constant *RHS = ValueList.getConstantFwdRef(Record[2], CurTy); +        unsigned Flags = 0; +        if (Record.size() >= 4) { +          if (Opc == Instruction::Add || +              Opc == Instruction::Sub || +              Opc == Instruction::Mul || +              Opc == Instruction::Shl) { +            if (Record[3] & (1 << bitc::OBO_NO_SIGNED_WRAP)) +              Flags |= OverflowingBinaryOperator::NoSignedWrap; +            if (Record[3] & (1 << bitc::OBO_NO_UNSIGNED_WRAP)) +              Flags |= OverflowingBinaryOperator::NoUnsignedWrap; +          } else if (Opc == Instruction::SDiv || +                     Opc == Instruction::UDiv || +                     Opc == Instruction::LShr || +                     Opc == Instruction::AShr) { +            if (Record[3] & (1 << bitc::PEO_EXACT)) +              Flags |= SDivOperator::IsExact; +          } +        } +        V = ConstantExpr::get(Opc, LHS, RHS, Flags); +      } +      break; +    } +    case bitc::CST_CODE_CE_CAST: {  // CE_CAST: [opcode, opty, opval] +      if (Record.size() < 3) +        return error("Invalid record"); +      int Opc = getDecodedCastOpcode(Record[0]); +      if (Opc < 0) { +        V = UndefValue::get(CurTy);  // Unknown cast. +      } else { +        Type *OpTy = getTypeByID(Record[1]); +        if (!OpTy) +          return error("Invalid record"); +        Constant *Op = ValueList.getConstantFwdRef(Record[2], OpTy); +        V = UpgradeBitCastExpr(Opc, Op, CurTy); +        if (!V) V = ConstantExpr::getCast(Opc, Op, CurTy); +      } +      break; +    } +    case bitc::CST_CODE_CE_INBOUNDS_GEP: // [ty, n x operands] +    case bitc::CST_CODE_CE_GEP: // [ty, n x operands] +    case bitc::CST_CODE_CE_GEP_WITH_INRANGE_INDEX: { // [ty, flags, n x +                                                     // operands] +      unsigned OpNum = 0; +      Type *PointeeType = nullptr; +      if (BitCode == bitc::CST_CODE_CE_GEP_WITH_INRANGE_INDEX || +          Record.size() % 2) +        PointeeType = getTypeByID(Record[OpNum++]); + +      bool InBounds = false; +      Optional<unsigned> InRangeIndex; +      if (BitCode == bitc::CST_CODE_CE_GEP_WITH_INRANGE_INDEX) { +        uint64_t Op = Record[OpNum++]; +        InBounds = Op & 1; +        InRangeIndex = Op >> 1; +      } else if (BitCode == bitc::CST_CODE_CE_INBOUNDS_GEP) +        InBounds = true; + +      SmallVector<Constant*, 16> Elts; +      while (OpNum != Record.size()) { +        Type *ElTy = getTypeByID(Record[OpNum++]); +        if (!ElTy) +          return error("Invalid record"); +        Elts.push_back(ValueList.getConstantFwdRef(Record[OpNum++], ElTy)); +      } + +      if (PointeeType && +          PointeeType != +              cast<PointerType>(Elts[0]->getType()->getScalarType()) +                  ->getElementType()) +        return error("Explicit gep operator type does not match pointee type " +                     "of pointer operand"); + +      if (Elts.size() < 1) +        return error("Invalid gep with no operands"); + +      ArrayRef<Constant *> Indices(Elts.begin() + 1, Elts.end()); +      V = ConstantExpr::getGetElementPtr(PointeeType, Elts[0], Indices, +                                         InBounds, InRangeIndex); +      break; +    } +    case bitc::CST_CODE_CE_SELECT: {  // CE_SELECT: [opval#, opval#, opval#] +      if (Record.size() < 3) +        return error("Invalid record"); + +      Type *SelectorTy = Type::getInt1Ty(Context); + +      // The selector might be an i1 or an <n x i1> +      // Get the type from the ValueList before getting a forward ref. +      if (VectorType *VTy = dyn_cast<VectorType>(CurTy)) +        if (Value *V = ValueList[Record[0]]) +          if (SelectorTy != V->getType()) +            SelectorTy = VectorType::get(SelectorTy, VTy->getNumElements()); + +      V = ConstantExpr::getSelect(ValueList.getConstantFwdRef(Record[0], +                                                              SelectorTy), +                                  ValueList.getConstantFwdRef(Record[1],CurTy), +                                  ValueList.getConstantFwdRef(Record[2],CurTy)); +      break; +    } +    case bitc::CST_CODE_CE_EXTRACTELT +        : { // CE_EXTRACTELT: [opty, opval, opty, opval] +      if (Record.size() < 3) +        return error("Invalid record"); +      VectorType *OpTy = +        dyn_cast_or_null<VectorType>(getTypeByID(Record[0])); +      if (!OpTy) +        return error("Invalid record"); +      Constant *Op0 = ValueList.getConstantFwdRef(Record[1], OpTy); +      Constant *Op1 = nullptr; +      if (Record.size() == 4) { +        Type *IdxTy = getTypeByID(Record[2]); +        if (!IdxTy) +          return error("Invalid record"); +        Op1 = ValueList.getConstantFwdRef(Record[3], IdxTy); +      } else // TODO: Remove with llvm 4.0 +        Op1 = ValueList.getConstantFwdRef(Record[2], Type::getInt32Ty(Context)); +      if (!Op1) +        return error("Invalid record"); +      V = ConstantExpr::getExtractElement(Op0, Op1); +      break; +    } +    case bitc::CST_CODE_CE_INSERTELT +        : { // CE_INSERTELT: [opval, opval, opty, opval] +      VectorType *OpTy = dyn_cast<VectorType>(CurTy); +      if (Record.size() < 3 || !OpTy) +        return error("Invalid record"); +      Constant *Op0 = ValueList.getConstantFwdRef(Record[0], OpTy); +      Constant *Op1 = ValueList.getConstantFwdRef(Record[1], +                                                  OpTy->getElementType()); +      Constant *Op2 = nullptr; +      if (Record.size() == 4) { +        Type *IdxTy = getTypeByID(Record[2]); +        if (!IdxTy) +          return error("Invalid record"); +        Op2 = ValueList.getConstantFwdRef(Record[3], IdxTy); +      } else // TODO: Remove with llvm 4.0 +        Op2 = ValueList.getConstantFwdRef(Record[2], Type::getInt32Ty(Context)); +      if (!Op2) +        return error("Invalid record"); +      V = ConstantExpr::getInsertElement(Op0, Op1, Op2); +      break; +    } +    case bitc::CST_CODE_CE_SHUFFLEVEC: { // CE_SHUFFLEVEC: [opval, opval, opval] +      VectorType *OpTy = dyn_cast<VectorType>(CurTy); +      if (Record.size() < 3 || !OpTy) +        return error("Invalid record"); +      Constant *Op0 = ValueList.getConstantFwdRef(Record[0], OpTy); +      Constant *Op1 = ValueList.getConstantFwdRef(Record[1], OpTy); +      Type *ShufTy = VectorType::get(Type::getInt32Ty(Context), +                                                 OpTy->getNumElements()); +      Constant *Op2 = ValueList.getConstantFwdRef(Record[2], ShufTy); +      V = ConstantExpr::getShuffleVector(Op0, Op1, Op2); +      break; +    } +    case bitc::CST_CODE_CE_SHUFVEC_EX: { // [opty, opval, opval, opval] +      VectorType *RTy = dyn_cast<VectorType>(CurTy); +      VectorType *OpTy = +        dyn_cast_or_null<VectorType>(getTypeByID(Record[0])); +      if (Record.size() < 4 || !RTy || !OpTy) +        return error("Invalid record"); +      Constant *Op0 = ValueList.getConstantFwdRef(Record[1], OpTy); +      Constant *Op1 = ValueList.getConstantFwdRef(Record[2], OpTy); +      Type *ShufTy = VectorType::get(Type::getInt32Ty(Context), +                                                 RTy->getNumElements()); +      Constant *Op2 = ValueList.getConstantFwdRef(Record[3], ShufTy); +      V = ConstantExpr::getShuffleVector(Op0, Op1, Op2); +      break; +    } +    case bitc::CST_CODE_CE_CMP: {     // CE_CMP: [opty, opval, opval, pred] +      if (Record.size() < 4) +        return error("Invalid record"); +      Type *OpTy = getTypeByID(Record[0]); +      if (!OpTy) +        return error("Invalid record"); +      Constant *Op0 = ValueList.getConstantFwdRef(Record[1], OpTy); +      Constant *Op1 = ValueList.getConstantFwdRef(Record[2], OpTy); + +      if (OpTy->isFPOrFPVectorTy()) +        V = ConstantExpr::getFCmp(Record[3], Op0, Op1); +      else +        V = ConstantExpr::getICmp(Record[3], Op0, Op1); +      break; +    } +    // This maintains backward compatibility, pre-asm dialect keywords. +    // FIXME: Remove with the 4.0 release. +    case bitc::CST_CODE_INLINEASM_OLD: { +      if (Record.size() < 2) +        return error("Invalid record"); +      std::string AsmStr, ConstrStr; +      bool HasSideEffects = Record[0] & 1; +      bool IsAlignStack = Record[0] >> 1; +      unsigned AsmStrSize = Record[1]; +      if (2+AsmStrSize >= Record.size()) +        return error("Invalid record"); +      unsigned ConstStrSize = Record[2+AsmStrSize]; +      if (3+AsmStrSize+ConstStrSize > Record.size()) +        return error("Invalid record"); + +      for (unsigned i = 0; i != AsmStrSize; ++i) +        AsmStr += (char)Record[2+i]; +      for (unsigned i = 0; i != ConstStrSize; ++i) +        ConstrStr += (char)Record[3+AsmStrSize+i]; +      PointerType *PTy = cast<PointerType>(CurTy); +      UpgradeInlineAsmString(&AsmStr); +      V = InlineAsm::get(cast<FunctionType>(PTy->getElementType()), +                         AsmStr, ConstrStr, HasSideEffects, IsAlignStack); +      break; +    } +    // This version adds support for the asm dialect keywords (e.g., +    // inteldialect). +    case bitc::CST_CODE_INLINEASM: { +      if (Record.size() < 2) +        return error("Invalid record"); +      std::string AsmStr, ConstrStr; +      bool HasSideEffects = Record[0] & 1; +      bool IsAlignStack = (Record[0] >> 1) & 1; +      unsigned AsmDialect = Record[0] >> 2; +      unsigned AsmStrSize = Record[1]; +      if (2+AsmStrSize >= Record.size()) +        return error("Invalid record"); +      unsigned ConstStrSize = Record[2+AsmStrSize]; +      if (3+AsmStrSize+ConstStrSize > Record.size()) +        return error("Invalid record"); + +      for (unsigned i = 0; i != AsmStrSize; ++i) +        AsmStr += (char)Record[2+i]; +      for (unsigned i = 0; i != ConstStrSize; ++i) +        ConstrStr += (char)Record[3+AsmStrSize+i]; +      PointerType *PTy = cast<PointerType>(CurTy); +      UpgradeInlineAsmString(&AsmStr); +      V = InlineAsm::get(cast<FunctionType>(PTy->getElementType()), +                         AsmStr, ConstrStr, HasSideEffects, IsAlignStack, +                         InlineAsm::AsmDialect(AsmDialect)); +      break; +    } +    case bitc::CST_CODE_BLOCKADDRESS:{ +      if (Record.size() < 3) +        return error("Invalid record"); +      Type *FnTy = getTypeByID(Record[0]); +      if (!FnTy) +        return error("Invalid record"); +      Function *Fn = +        dyn_cast_or_null<Function>(ValueList.getConstantFwdRef(Record[1],FnTy)); +      if (!Fn) +        return error("Invalid record"); + +      // If the function is already parsed we can insert the block address right +      // away. +      BasicBlock *BB; +      unsigned BBID = Record[2]; +      if (!BBID) +        // Invalid reference to entry block. +        return error("Invalid ID"); +      if (!Fn->empty()) { +        Function::iterator BBI = Fn->begin(), BBE = Fn->end(); +        for (size_t I = 0, E = BBID; I != E; ++I) { +          if (BBI == BBE) +            return error("Invalid ID"); +          ++BBI; +        } +        BB = &*BBI; +      } else { +        // Otherwise insert a placeholder and remember it so it can be inserted +        // when the function is parsed. +        auto &FwdBBs = BasicBlockFwdRefs[Fn]; +        if (FwdBBs.empty()) +          BasicBlockFwdRefQueue.push_back(Fn); +        if (FwdBBs.size() < BBID + 1) +          FwdBBs.resize(BBID + 1); +        if (!FwdBBs[BBID]) +          FwdBBs[BBID] = BasicBlock::Create(Context); +        BB = FwdBBs[BBID]; +      } +      V = BlockAddress::get(Fn, BB); +      break; +    } +    } + +    ValueList.assignValue(V, NextCstNo); +    ++NextCstNo; +  } +} + +Error BitcodeReader::parseUseLists() { +  if (Stream.EnterSubBlock(bitc::USELIST_BLOCK_ID)) +    return error("Invalid record"); + +  // Read all the records. +  SmallVector<uint64_t, 64> Record; + +  while (true) { +    BitstreamEntry Entry = Stream.advanceSkippingSubblocks(); + +    switch (Entry.Kind) { +    case BitstreamEntry::SubBlock: // Handled for us already. +    case BitstreamEntry::Error: +      return error("Malformed block"); +    case BitstreamEntry::EndBlock: +      return Error::success(); +    case BitstreamEntry::Record: +      // The interesting case. +      break; +    } + +    // Read a use list record. +    Record.clear(); +    bool IsBB = false; +    switch (Stream.readRecord(Entry.ID, Record)) { +    default:  // Default behavior: unknown type. +      break; +    case bitc::USELIST_CODE_BB: +      IsBB = true; +      LLVM_FALLTHROUGH; +    case bitc::USELIST_CODE_DEFAULT: { +      unsigned RecordLength = Record.size(); +      if (RecordLength < 3) +        // Records should have at least an ID and two indexes. +        return error("Invalid record"); +      unsigned ID = Record.back(); +      Record.pop_back(); + +      Value *V; +      if (IsBB) { +        assert(ID < FunctionBBs.size() && "Basic block not found"); +        V = FunctionBBs[ID]; +      } else +        V = ValueList[ID]; +      unsigned NumUses = 0; +      SmallDenseMap<const Use *, unsigned, 16> Order; +      for (const Use &U : V->materialized_uses()) { +        if (++NumUses > Record.size()) +          break; +        Order[&U] = Record[NumUses - 1]; +      } +      if (Order.size() != Record.size() || NumUses > Record.size()) +        // Mismatches can happen if the functions are being materialized lazily +        // (out-of-order), or a value has been upgraded. +        break; + +      V->sortUseList([&](const Use &L, const Use &R) { +        return Order.lookup(&L) < Order.lookup(&R); +      }); +      break; +    } +    } +  } +} + +/// When we see the block for metadata, remember where it is and then skip it. +/// This lets us lazily deserialize the metadata. +Error BitcodeReader::rememberAndSkipMetadata() { +  // Save the current stream state. +  uint64_t CurBit = Stream.GetCurrentBitNo(); +  DeferredMetadataInfo.push_back(CurBit); + +  // Skip over the block for now. +  if (Stream.SkipBlock()) +    return error("Invalid record"); +  return Error::success(); +} + +Error BitcodeReader::materializeMetadata() { +  for (uint64_t BitPos : DeferredMetadataInfo) { +    // Move the bit stream to the saved position. +    Stream.JumpToBit(BitPos); +    if (Error Err = MDLoader->parseModuleMetadata()) +      return Err; +  } + +  // Upgrade "Linker Options" module flag to "llvm.linker.options" module-level +  // metadata. +  if (Metadata *Val = TheModule->getModuleFlag("Linker Options")) { +    NamedMDNode *LinkerOpts = +        TheModule->getOrInsertNamedMetadata("llvm.linker.options"); +    for (const MDOperand &MDOptions : cast<MDNode>(Val)->operands()) +      LinkerOpts->addOperand(cast<MDNode>(MDOptions)); +  } + +  DeferredMetadataInfo.clear(); +  return Error::success(); +} + +void BitcodeReader::setStripDebugInfo() { StripDebugInfo = true; } + +/// When we see the block for a function body, remember where it is and then +/// skip it.  This lets us lazily deserialize the functions. +Error BitcodeReader::rememberAndSkipFunctionBody() { +  // Get the function we are talking about. +  if (FunctionsWithBodies.empty()) +    return error("Insufficient function protos"); + +  Function *Fn = FunctionsWithBodies.back(); +  FunctionsWithBodies.pop_back(); + +  // Save the current stream state. +  uint64_t CurBit = Stream.GetCurrentBitNo(); +  assert( +      (DeferredFunctionInfo[Fn] == 0 || DeferredFunctionInfo[Fn] == CurBit) && +      "Mismatch between VST and scanned function offsets"); +  DeferredFunctionInfo[Fn] = CurBit; + +  // Skip over the function block for now. +  if (Stream.SkipBlock()) +    return error("Invalid record"); +  return Error::success(); +} + +Error BitcodeReader::globalCleanup() { +  // Patch the initializers for globals and aliases up. +  if (Error Err = resolveGlobalAndIndirectSymbolInits()) +    return Err; +  if (!GlobalInits.empty() || !IndirectSymbolInits.empty()) +    return error("Malformed global initializer set"); + +  // Look for intrinsic functions which need to be upgraded at some point +  for (Function &F : *TheModule) { +    MDLoader->upgradeDebugIntrinsics(F); +    Function *NewFn; +    if (UpgradeIntrinsicFunction(&F, NewFn)) +      UpgradedIntrinsics[&F] = NewFn; +    else if (auto Remangled = Intrinsic::remangleIntrinsicFunction(&F)) +      // Some types could be renamed during loading if several modules are +      // loaded in the same LLVMContext (LTO scenario). In this case we should +      // remangle intrinsics names as well. +      RemangledIntrinsics[&F] = Remangled.getValue(); +  } + +  // Look for global variables which need to be renamed. +  for (GlobalVariable &GV : TheModule->globals()) +    UpgradeGlobalVariable(&GV); + +  // Force deallocation of memory for these vectors to favor the client that +  // want lazy deserialization. +  std::vector<std::pair<GlobalVariable *, unsigned>>().swap(GlobalInits); +  std::vector<std::pair<GlobalIndirectSymbol *, unsigned>>().swap( +      IndirectSymbolInits); +  return Error::success(); +} + +/// Support for lazy parsing of function bodies. This is required if we +/// either have an old bitcode file without a VST forward declaration record, +/// or if we have an anonymous function being materialized, since anonymous +/// functions do not have a name and are therefore not in the VST. +Error BitcodeReader::rememberAndSkipFunctionBodies() { +  Stream.JumpToBit(NextUnreadBit); + +  if (Stream.AtEndOfStream()) +    return error("Could not find function in stream"); + +  if (!SeenFirstFunctionBody) +    return error("Trying to materialize functions before seeing function blocks"); + +  // An old bitcode file with the symbol table at the end would have +  // finished the parse greedily. +  assert(SeenValueSymbolTable); + +  SmallVector<uint64_t, 64> Record; + +  while (true) { +    BitstreamEntry Entry = Stream.advance(); +    switch (Entry.Kind) { +    default: +      return error("Expect SubBlock"); +    case BitstreamEntry::SubBlock: +      switch (Entry.ID) { +      default: +        return error("Expect function block"); +      case bitc::FUNCTION_BLOCK_ID: +        if (Error Err = rememberAndSkipFunctionBody()) +          return Err; +        NextUnreadBit = Stream.GetCurrentBitNo(); +        return Error::success(); +      } +    } +  } +} + +bool BitcodeReaderBase::readBlockInfo() { +  Optional<BitstreamBlockInfo> NewBlockInfo = Stream.ReadBlockInfoBlock(); +  if (!NewBlockInfo) +    return true; +  BlockInfo = std::move(*NewBlockInfo); +  return false; +} + +Error BitcodeReader::parseComdatRecord(ArrayRef<uint64_t> Record) { +  // v1: [selection_kind, name] +  // v2: [strtab_offset, strtab_size, selection_kind] +  StringRef Name; +  std::tie(Name, Record) = readNameFromStrtab(Record); + +  if (Record.empty()) +    return error("Invalid record"); +  Comdat::SelectionKind SK = getDecodedComdatSelectionKind(Record[0]); +  std::string OldFormatName; +  if (!UseStrtab) { +    if (Record.size() < 2) +      return error("Invalid record"); +    unsigned ComdatNameSize = Record[1]; +    OldFormatName.reserve(ComdatNameSize); +    for (unsigned i = 0; i != ComdatNameSize; ++i) +      OldFormatName += (char)Record[2 + i]; +    Name = OldFormatName; +  } +  Comdat *C = TheModule->getOrInsertComdat(Name); +  C->setSelectionKind(SK); +  ComdatList.push_back(C); +  return Error::success(); +} + +static void inferDSOLocal(GlobalValue *GV) { +  // infer dso_local from linkage and visibility if it is not encoded. +  if (GV->hasLocalLinkage() || +      (!GV->hasDefaultVisibility() && !GV->hasExternalWeakLinkage())) +    GV->setDSOLocal(true); +} + +Error BitcodeReader::parseGlobalVarRecord(ArrayRef<uint64_t> Record) { +  // v1: [pointer type, isconst, initid, linkage, alignment, section, +  // visibility, threadlocal, unnamed_addr, externally_initialized, +  // dllstorageclass, comdat, attributes, preemption specifier] (name in VST) +  // v2: [strtab_offset, strtab_size, v1] +  StringRef Name; +  std::tie(Name, Record) = readNameFromStrtab(Record); + +  if (Record.size() < 6) +    return error("Invalid record"); +  Type *Ty = getTypeByID(Record[0]); +  if (!Ty) +    return error("Invalid record"); +  bool isConstant = Record[1] & 1; +  bool explicitType = Record[1] & 2; +  unsigned AddressSpace; +  if (explicitType) { +    AddressSpace = Record[1] >> 2; +  } else { +    if (!Ty->isPointerTy()) +      return error("Invalid type for value"); +    AddressSpace = cast<PointerType>(Ty)->getAddressSpace(); +    Ty = cast<PointerType>(Ty)->getElementType(); +  } + +  uint64_t RawLinkage = Record[3]; +  GlobalValue::LinkageTypes Linkage = getDecodedLinkage(RawLinkage); +  unsigned Alignment; +  if (Error Err = parseAlignmentValue(Record[4], Alignment)) +    return Err; +  std::string Section; +  if (Record[5]) { +    if (Record[5] - 1 >= SectionTable.size()) +      return error("Invalid ID"); +    Section = SectionTable[Record[5] - 1]; +  } +  GlobalValue::VisibilityTypes Visibility = GlobalValue::DefaultVisibility; +  // Local linkage must have default visibility. +  if (Record.size() > 6 && !GlobalValue::isLocalLinkage(Linkage)) +    // FIXME: Change to an error if non-default in 4.0. +    Visibility = getDecodedVisibility(Record[6]); + +  GlobalVariable::ThreadLocalMode TLM = GlobalVariable::NotThreadLocal; +  if (Record.size() > 7) +    TLM = getDecodedThreadLocalMode(Record[7]); + +  GlobalValue::UnnamedAddr UnnamedAddr = GlobalValue::UnnamedAddr::None; +  if (Record.size() > 8) +    UnnamedAddr = getDecodedUnnamedAddrType(Record[8]); + +  bool ExternallyInitialized = false; +  if (Record.size() > 9) +    ExternallyInitialized = Record[9]; + +  GlobalVariable *NewGV = +      new GlobalVariable(*TheModule, Ty, isConstant, Linkage, nullptr, Name, +                         nullptr, TLM, AddressSpace, ExternallyInitialized); +  NewGV->setAlignment(Alignment); +  if (!Section.empty()) +    NewGV->setSection(Section); +  NewGV->setVisibility(Visibility); +  NewGV->setUnnamedAddr(UnnamedAddr); + +  if (Record.size() > 10) +    NewGV->setDLLStorageClass(getDecodedDLLStorageClass(Record[10])); +  else +    upgradeDLLImportExportLinkage(NewGV, RawLinkage); + +  ValueList.push_back(NewGV); + +  // Remember which value to use for the global initializer. +  if (unsigned InitID = Record[2]) +    GlobalInits.push_back(std::make_pair(NewGV, InitID - 1)); + +  if (Record.size() > 11) { +    if (unsigned ComdatID = Record[11]) { +      if (ComdatID > ComdatList.size()) +        return error("Invalid global variable comdat ID"); +      NewGV->setComdat(ComdatList[ComdatID - 1]); +    } +  } else if (hasImplicitComdat(RawLinkage)) { +    NewGV->setComdat(reinterpret_cast<Comdat *>(1)); +  } + +  if (Record.size() > 12) { +    auto AS = getAttributes(Record[12]).getFnAttributes(); +    NewGV->setAttributes(AS); +  } + +  if (Record.size() > 13) { +    NewGV->setDSOLocal(getDecodedDSOLocal(Record[13])); +  } +  inferDSOLocal(NewGV); + +  return Error::success(); +} + +Error BitcodeReader::parseFunctionRecord(ArrayRef<uint64_t> Record) { +  // v1: [type, callingconv, isproto, linkage, paramattr, alignment, section, +  // visibility, gc, unnamed_addr, prologuedata, dllstorageclass, comdat, +  // prefixdata,  personalityfn, preemption specifier] (name in VST) +  // v2: [strtab_offset, strtab_size, v1] +  StringRef Name; +  std::tie(Name, Record) = readNameFromStrtab(Record); + +  if (Record.size() < 8) +    return error("Invalid record"); +  Type *Ty = getTypeByID(Record[0]); +  if (!Ty) +    return error("Invalid record"); +  if (auto *PTy = dyn_cast<PointerType>(Ty)) +    Ty = PTy->getElementType(); +  auto *FTy = dyn_cast<FunctionType>(Ty); +  if (!FTy) +    return error("Invalid type for value"); +  auto CC = static_cast<CallingConv::ID>(Record[1]); +  if (CC & ~CallingConv::MaxID) +    return error("Invalid calling convention ID"); + +  Function *Func = +      Function::Create(FTy, GlobalValue::ExternalLinkage, Name, TheModule); + +  Func->setCallingConv(CC); +  bool isProto = Record[2]; +  uint64_t RawLinkage = Record[3]; +  Func->setLinkage(getDecodedLinkage(RawLinkage)); +  Func->setAttributes(getAttributes(Record[4])); + +  unsigned Alignment; +  if (Error Err = parseAlignmentValue(Record[5], Alignment)) +    return Err; +  Func->setAlignment(Alignment); +  if (Record[6]) { +    if (Record[6] - 1 >= SectionTable.size()) +      return error("Invalid ID"); +    Func->setSection(SectionTable[Record[6] - 1]); +  } +  // Local linkage must have default visibility. +  if (!Func->hasLocalLinkage()) +    // FIXME: Change to an error if non-default in 4.0. +    Func->setVisibility(getDecodedVisibility(Record[7])); +  if (Record.size() > 8 && Record[8]) { +    if (Record[8] - 1 >= GCTable.size()) +      return error("Invalid ID"); +    Func->setGC(GCTable[Record[8] - 1]); +  } +  GlobalValue::UnnamedAddr UnnamedAddr = GlobalValue::UnnamedAddr::None; +  if (Record.size() > 9) +    UnnamedAddr = getDecodedUnnamedAddrType(Record[9]); +  Func->setUnnamedAddr(UnnamedAddr); +  if (Record.size() > 10 && Record[10] != 0) +    FunctionPrologues.push_back(std::make_pair(Func, Record[10] - 1)); + +  if (Record.size() > 11) +    Func->setDLLStorageClass(getDecodedDLLStorageClass(Record[11])); +  else +    upgradeDLLImportExportLinkage(Func, RawLinkage); + +  if (Record.size() > 12) { +    if (unsigned ComdatID = Record[12]) { +      if (ComdatID > ComdatList.size()) +        return error("Invalid function comdat ID"); +      Func->setComdat(ComdatList[ComdatID - 1]); +    } +  } else if (hasImplicitComdat(RawLinkage)) { +    Func->setComdat(reinterpret_cast<Comdat *>(1)); +  } + +  if (Record.size() > 13 && Record[13] != 0) +    FunctionPrefixes.push_back(std::make_pair(Func, Record[13] - 1)); + +  if (Record.size() > 14 && Record[14] != 0) +    FunctionPersonalityFns.push_back(std::make_pair(Func, Record[14] - 1)); + +  if (Record.size() > 15) { +    Func->setDSOLocal(getDecodedDSOLocal(Record[15])); +  } +  inferDSOLocal(Func); + +  ValueList.push_back(Func); + +  // If this is a function with a body, remember the prototype we are +  // creating now, so that we can match up the body with them later. +  if (!isProto) { +    Func->setIsMaterializable(true); +    FunctionsWithBodies.push_back(Func); +    DeferredFunctionInfo[Func] = 0; +  } +  return Error::success(); +} + +Error BitcodeReader::parseGlobalIndirectSymbolRecord( +    unsigned BitCode, ArrayRef<uint64_t> Record) { +  // v1 ALIAS_OLD: [alias type, aliasee val#, linkage] (name in VST) +  // v1 ALIAS: [alias type, addrspace, aliasee val#, linkage, visibility, +  // dllstorageclass, threadlocal, unnamed_addr, +  // preemption specifier] (name in VST) +  // v1 IFUNC: [alias type, addrspace, aliasee val#, linkage, +  // visibility, dllstorageclass, threadlocal, unnamed_addr, +  // preemption specifier] (name in VST) +  // v2: [strtab_offset, strtab_size, v1] +  StringRef Name; +  std::tie(Name, Record) = readNameFromStrtab(Record); + +  bool NewRecord = BitCode != bitc::MODULE_CODE_ALIAS_OLD; +  if (Record.size() < (3 + (unsigned)NewRecord)) +    return error("Invalid record"); +  unsigned OpNum = 0; +  Type *Ty = getTypeByID(Record[OpNum++]); +  if (!Ty) +    return error("Invalid record"); + +  unsigned AddrSpace; +  if (!NewRecord) { +    auto *PTy = dyn_cast<PointerType>(Ty); +    if (!PTy) +      return error("Invalid type for value"); +    Ty = PTy->getElementType(); +    AddrSpace = PTy->getAddressSpace(); +  } else { +    AddrSpace = Record[OpNum++]; +  } + +  auto Val = Record[OpNum++]; +  auto Linkage = Record[OpNum++]; +  GlobalIndirectSymbol *NewGA; +  if (BitCode == bitc::MODULE_CODE_ALIAS || +      BitCode == bitc::MODULE_CODE_ALIAS_OLD) +    NewGA = GlobalAlias::create(Ty, AddrSpace, getDecodedLinkage(Linkage), Name, +                                TheModule); +  else +    NewGA = GlobalIFunc::create(Ty, AddrSpace, getDecodedLinkage(Linkage), Name, +                                nullptr, TheModule); +  // Old bitcode files didn't have visibility field. +  // Local linkage must have default visibility. +  if (OpNum != Record.size()) { +    auto VisInd = OpNum++; +    if (!NewGA->hasLocalLinkage()) +      // FIXME: Change to an error if non-default in 4.0. +      NewGA->setVisibility(getDecodedVisibility(Record[VisInd])); +  } +  if (BitCode == bitc::MODULE_CODE_ALIAS || +      BitCode == bitc::MODULE_CODE_ALIAS_OLD) { +    if (OpNum != Record.size()) +      NewGA->setDLLStorageClass(getDecodedDLLStorageClass(Record[OpNum++])); +    else +      upgradeDLLImportExportLinkage(NewGA, Linkage); +    if (OpNum != Record.size()) +      NewGA->setThreadLocalMode(getDecodedThreadLocalMode(Record[OpNum++])); +    if (OpNum != Record.size()) +      NewGA->setUnnamedAddr(getDecodedUnnamedAddrType(Record[OpNum++])); +  } +  if (OpNum != Record.size()) +    NewGA->setDSOLocal(getDecodedDSOLocal(Record[OpNum++])); +  inferDSOLocal(NewGA); + +  ValueList.push_back(NewGA); +  IndirectSymbolInits.push_back(std::make_pair(NewGA, Val)); +  return Error::success(); +} + +Error BitcodeReader::parseModule(uint64_t ResumeBit, +                                 bool ShouldLazyLoadMetadata) { +  if (ResumeBit) +    Stream.JumpToBit(ResumeBit); +  else if (Stream.EnterSubBlock(bitc::MODULE_BLOCK_ID)) +    return error("Invalid record"); + +  SmallVector<uint64_t, 64> Record; + +  // Read all the records for this module. +  while (true) { +    BitstreamEntry Entry = Stream.advance(); + +    switch (Entry.Kind) { +    case BitstreamEntry::Error: +      return error("Malformed block"); +    case BitstreamEntry::EndBlock: +      return globalCleanup(); + +    case BitstreamEntry::SubBlock: +      switch (Entry.ID) { +      default:  // Skip unknown content. +        if (Stream.SkipBlock()) +          return error("Invalid record"); +        break; +      case bitc::BLOCKINFO_BLOCK_ID: +        if (readBlockInfo()) +          return error("Malformed block"); +        break; +      case bitc::PARAMATTR_BLOCK_ID: +        if (Error Err = parseAttributeBlock()) +          return Err; +        break; +      case bitc::PARAMATTR_GROUP_BLOCK_ID: +        if (Error Err = parseAttributeGroupBlock()) +          return Err; +        break; +      case bitc::TYPE_BLOCK_ID_NEW: +        if (Error Err = parseTypeTable()) +          return Err; +        break; +      case bitc::VALUE_SYMTAB_BLOCK_ID: +        if (!SeenValueSymbolTable) { +          // Either this is an old form VST without function index and an +          // associated VST forward declaration record (which would have caused +          // the VST to be jumped to and parsed before it was encountered +          // normally in the stream), or there were no function blocks to +          // trigger an earlier parsing of the VST. +          assert(VSTOffset == 0 || FunctionsWithBodies.empty()); +          if (Error Err = parseValueSymbolTable()) +            return Err; +          SeenValueSymbolTable = true; +        } else { +          // We must have had a VST forward declaration record, which caused +          // the parser to jump to and parse the VST earlier. +          assert(VSTOffset > 0); +          if (Stream.SkipBlock()) +            return error("Invalid record"); +        } +        break; +      case bitc::CONSTANTS_BLOCK_ID: +        if (Error Err = parseConstants()) +          return Err; +        if (Error Err = resolveGlobalAndIndirectSymbolInits()) +          return Err; +        break; +      case bitc::METADATA_BLOCK_ID: +        if (ShouldLazyLoadMetadata) { +          if (Error Err = rememberAndSkipMetadata()) +            return Err; +          break; +        } +        assert(DeferredMetadataInfo.empty() && "Unexpected deferred metadata"); +        if (Error Err = MDLoader->parseModuleMetadata()) +          return Err; +        break; +      case bitc::METADATA_KIND_BLOCK_ID: +        if (Error Err = MDLoader->parseMetadataKinds()) +          return Err; +        break; +      case bitc::FUNCTION_BLOCK_ID: +        // If this is the first function body we've seen, reverse the +        // FunctionsWithBodies list. +        if (!SeenFirstFunctionBody) { +          std::reverse(FunctionsWithBodies.begin(), FunctionsWithBodies.end()); +          if (Error Err = globalCleanup()) +            return Err; +          SeenFirstFunctionBody = true; +        } + +        if (VSTOffset > 0) { +          // If we have a VST forward declaration record, make sure we +          // parse the VST now if we haven't already. It is needed to +          // set up the DeferredFunctionInfo vector for lazy reading. +          if (!SeenValueSymbolTable) { +            if (Error Err = BitcodeReader::parseValueSymbolTable(VSTOffset)) +              return Err; +            SeenValueSymbolTable = true; +            // Fall through so that we record the NextUnreadBit below. +            // This is necessary in case we have an anonymous function that +            // is later materialized. Since it will not have a VST entry we +            // need to fall back to the lazy parse to find its offset. +          } else { +            // If we have a VST forward declaration record, but have already +            // parsed the VST (just above, when the first function body was +            // encountered here), then we are resuming the parse after +            // materializing functions. The ResumeBit points to the +            // start of the last function block recorded in the +            // DeferredFunctionInfo map. Skip it. +            if (Stream.SkipBlock()) +              return error("Invalid record"); +            continue; +          } +        } + +        // Support older bitcode files that did not have the function +        // index in the VST, nor a VST forward declaration record, as +        // well as anonymous functions that do not have VST entries. +        // Build the DeferredFunctionInfo vector on the fly. +        if (Error Err = rememberAndSkipFunctionBody()) +          return Err; + +        // Suspend parsing when we reach the function bodies. Subsequent +        // materialization calls will resume it when necessary. If the bitcode +        // file is old, the symbol table will be at the end instead and will not +        // have been seen yet. In this case, just finish the parse now. +        if (SeenValueSymbolTable) { +          NextUnreadBit = Stream.GetCurrentBitNo(); +          // After the VST has been parsed, we need to make sure intrinsic name +          // are auto-upgraded. +          return globalCleanup(); +        } +        break; +      case bitc::USELIST_BLOCK_ID: +        if (Error Err = parseUseLists()) +          return Err; +        break; +      case bitc::OPERAND_BUNDLE_TAGS_BLOCK_ID: +        if (Error Err = parseOperandBundleTags()) +          return Err; +        break; +      case bitc::SYNC_SCOPE_NAMES_BLOCK_ID: +        if (Error Err = parseSyncScopeNames()) +          return Err; +        break; +      } +      continue; + +    case BitstreamEntry::Record: +      // The interesting case. +      break; +    } + +    // Read a record. +    auto BitCode = Stream.readRecord(Entry.ID, Record); +    switch (BitCode) { +    default: break;  // Default behavior, ignore unknown content. +    case bitc::MODULE_CODE_VERSION: { +      Expected<unsigned> VersionOrErr = parseVersionRecord(Record); +      if (!VersionOrErr) +        return VersionOrErr.takeError(); +      UseRelativeIDs = *VersionOrErr >= 1; +      break; +    } +    case bitc::MODULE_CODE_TRIPLE: {  // TRIPLE: [strchr x N] +      std::string S; +      if (convertToString(Record, 0, S)) +        return error("Invalid record"); +      TheModule->setTargetTriple(S); +      break; +    } +    case bitc::MODULE_CODE_DATALAYOUT: {  // DATALAYOUT: [strchr x N] +      std::string S; +      if (convertToString(Record, 0, S)) +        return error("Invalid record"); +      TheModule->setDataLayout(S); +      break; +    } +    case bitc::MODULE_CODE_ASM: {  // ASM: [strchr x N] +      std::string S; +      if (convertToString(Record, 0, S)) +        return error("Invalid record"); +      TheModule->setModuleInlineAsm(S); +      break; +    } +    case bitc::MODULE_CODE_DEPLIB: {  // DEPLIB: [strchr x N] +      // FIXME: Remove in 4.0. +      std::string S; +      if (convertToString(Record, 0, S)) +        return error("Invalid record"); +      // Ignore value. +      break; +    } +    case bitc::MODULE_CODE_SECTIONNAME: {  // SECTIONNAME: [strchr x N] +      std::string S; +      if (convertToString(Record, 0, S)) +        return error("Invalid record"); +      SectionTable.push_back(S); +      break; +    } +    case bitc::MODULE_CODE_GCNAME: {  // SECTIONNAME: [strchr x N] +      std::string S; +      if (convertToString(Record, 0, S)) +        return error("Invalid record"); +      GCTable.push_back(S); +      break; +    } +    case bitc::MODULE_CODE_COMDAT: +      if (Error Err = parseComdatRecord(Record)) +        return Err; +      break; +    case bitc::MODULE_CODE_GLOBALVAR: +      if (Error Err = parseGlobalVarRecord(Record)) +        return Err; +      break; +    case bitc::MODULE_CODE_FUNCTION: +      if (Error Err = parseFunctionRecord(Record)) +        return Err; +      break; +    case bitc::MODULE_CODE_IFUNC: +    case bitc::MODULE_CODE_ALIAS: +    case bitc::MODULE_CODE_ALIAS_OLD: +      if (Error Err = parseGlobalIndirectSymbolRecord(BitCode, Record)) +        return Err; +      break; +    /// MODULE_CODE_VSTOFFSET: [offset] +    case bitc::MODULE_CODE_VSTOFFSET: +      if (Record.size() < 1) +        return error("Invalid record"); +      // Note that we subtract 1 here because the offset is relative to one word +      // before the start of the identification or module block, which was +      // historically always the start of the regular bitcode header. +      VSTOffset = Record[0] - 1; +      break; +    /// MODULE_CODE_SOURCE_FILENAME: [namechar x N] +    case bitc::MODULE_CODE_SOURCE_FILENAME: +      SmallString<128> ValueName; +      if (convertToString(Record, 0, ValueName)) +        return error("Invalid record"); +      TheModule->setSourceFileName(ValueName); +      break; +    } +    Record.clear(); +  } +} + +Error BitcodeReader::parseBitcodeInto(Module *M, bool ShouldLazyLoadMetadata, +                                      bool IsImporting) { +  TheModule = M; +  MDLoader = MetadataLoader(Stream, *M, ValueList, IsImporting, +                            [&](unsigned ID) { return getTypeByID(ID); }); +  return parseModule(0, ShouldLazyLoadMetadata); +} + +Error BitcodeReader::typeCheckLoadStoreInst(Type *ValType, Type *PtrType) { +  if (!isa<PointerType>(PtrType)) +    return error("Load/Store operand is not a pointer type"); +  Type *ElemType = cast<PointerType>(PtrType)->getElementType(); + +  if (ValType && ValType != ElemType) +    return error("Explicit load/store type does not match pointee " +                 "type of pointer operand"); +  if (!PointerType::isLoadableOrStorableType(ElemType)) +    return error("Cannot load/store from pointer"); +  return Error::success(); +} + +/// Lazily parse the specified function body block. +Error BitcodeReader::parseFunctionBody(Function *F) { +  if (Stream.EnterSubBlock(bitc::FUNCTION_BLOCK_ID)) +    return error("Invalid record"); + +  // Unexpected unresolved metadata when parsing function. +  if (MDLoader->hasFwdRefs()) +    return error("Invalid function metadata: incoming forward references"); + +  InstructionList.clear(); +  unsigned ModuleValueListSize = ValueList.size(); +  unsigned ModuleMDLoaderSize = MDLoader->size(); + +  // Add all the function arguments to the value table. +  for (Argument &I : F->args()) +    ValueList.push_back(&I); + +  unsigned NextValueNo = ValueList.size(); +  BasicBlock *CurBB = nullptr; +  unsigned CurBBNo = 0; + +  DebugLoc LastLoc; +  auto getLastInstruction = [&]() -> Instruction * { +    if (CurBB && !CurBB->empty()) +      return &CurBB->back(); +    else if (CurBBNo && FunctionBBs[CurBBNo - 1] && +             !FunctionBBs[CurBBNo - 1]->empty()) +      return &FunctionBBs[CurBBNo - 1]->back(); +    return nullptr; +  }; + +  std::vector<OperandBundleDef> OperandBundles; + +  // Read all the records. +  SmallVector<uint64_t, 64> Record; + +  while (true) { +    BitstreamEntry Entry = Stream.advance(); + +    switch (Entry.Kind) { +    case BitstreamEntry::Error: +      return error("Malformed block"); +    case BitstreamEntry::EndBlock: +      goto OutOfRecordLoop; + +    case BitstreamEntry::SubBlock: +      switch (Entry.ID) { +      default:  // Skip unknown content. +        if (Stream.SkipBlock()) +          return error("Invalid record"); +        break; +      case bitc::CONSTANTS_BLOCK_ID: +        if (Error Err = parseConstants()) +          return Err; +        NextValueNo = ValueList.size(); +        break; +      case bitc::VALUE_SYMTAB_BLOCK_ID: +        if (Error Err = parseValueSymbolTable()) +          return Err; +        break; +      case bitc::METADATA_ATTACHMENT_ID: +        if (Error Err = MDLoader->parseMetadataAttachment(*F, InstructionList)) +          return Err; +        break; +      case bitc::METADATA_BLOCK_ID: +        assert(DeferredMetadataInfo.empty() && +               "Must read all module-level metadata before function-level"); +        if (Error Err = MDLoader->parseFunctionMetadata()) +          return Err; +        break; +      case bitc::USELIST_BLOCK_ID: +        if (Error Err = parseUseLists()) +          return Err; +        break; +      } +      continue; + +    case BitstreamEntry::Record: +      // The interesting case. +      break; +    } + +    // Read a record. +    Record.clear(); +    Instruction *I = nullptr; +    unsigned BitCode = Stream.readRecord(Entry.ID, Record); +    switch (BitCode) { +    default: // Default behavior: reject +      return error("Invalid value"); +    case bitc::FUNC_CODE_DECLAREBLOCKS: {   // DECLAREBLOCKS: [nblocks] +      if (Record.size() < 1 || Record[0] == 0) +        return error("Invalid record"); +      // Create all the basic blocks for the function. +      FunctionBBs.resize(Record[0]); + +      // See if anything took the address of blocks in this function. +      auto BBFRI = BasicBlockFwdRefs.find(F); +      if (BBFRI == BasicBlockFwdRefs.end()) { +        for (unsigned i = 0, e = FunctionBBs.size(); i != e; ++i) +          FunctionBBs[i] = BasicBlock::Create(Context, "", F); +      } else { +        auto &BBRefs = BBFRI->second; +        // Check for invalid basic block references. +        if (BBRefs.size() > FunctionBBs.size()) +          return error("Invalid ID"); +        assert(!BBRefs.empty() && "Unexpected empty array"); +        assert(!BBRefs.front() && "Invalid reference to entry block"); +        for (unsigned I = 0, E = FunctionBBs.size(), RE = BBRefs.size(); I != E; +             ++I) +          if (I < RE && BBRefs[I]) { +            BBRefs[I]->insertInto(F); +            FunctionBBs[I] = BBRefs[I]; +          } else { +            FunctionBBs[I] = BasicBlock::Create(Context, "", F); +          } + +        // Erase from the table. +        BasicBlockFwdRefs.erase(BBFRI); +      } + +      CurBB = FunctionBBs[0]; +      continue; +    } + +    case bitc::FUNC_CODE_DEBUG_LOC_AGAIN:  // DEBUG_LOC_AGAIN +      // This record indicates that the last instruction is at the same +      // location as the previous instruction with a location. +      I = getLastInstruction(); + +      if (!I) +        return error("Invalid record"); +      I->setDebugLoc(LastLoc); +      I = nullptr; +      continue; + +    case bitc::FUNC_CODE_DEBUG_LOC: {      // DEBUG_LOC: [line, col, scope, ia] +      I = getLastInstruction(); +      if (!I || Record.size() < 4) +        return error("Invalid record"); + +      unsigned Line = Record[0], Col = Record[1]; +      unsigned ScopeID = Record[2], IAID = Record[3]; + +      MDNode *Scope = nullptr, *IA = nullptr; +      if (ScopeID) { +        Scope = MDLoader->getMDNodeFwdRefOrNull(ScopeID - 1); +        if (!Scope) +          return error("Invalid record"); +      } +      if (IAID) { +        IA = MDLoader->getMDNodeFwdRefOrNull(IAID - 1); +        if (!IA) +          return error("Invalid record"); +      } +      LastLoc = DebugLoc::get(Line, Col, Scope, IA); +      I->setDebugLoc(LastLoc); +      I = nullptr; +      continue; +    } + +    case bitc::FUNC_CODE_INST_BINOP: {    // BINOP: [opval, ty, opval, opcode] +      unsigned OpNum = 0; +      Value *LHS, *RHS; +      if (getValueTypePair(Record, OpNum, NextValueNo, LHS) || +          popValue(Record, OpNum, NextValueNo, LHS->getType(), RHS) || +          OpNum+1 > Record.size()) +        return error("Invalid record"); + +      int Opc = getDecodedBinaryOpcode(Record[OpNum++], LHS->getType()); +      if (Opc == -1) +        return error("Invalid record"); +      I = BinaryOperator::Create((Instruction::BinaryOps)Opc, LHS, RHS); +      InstructionList.push_back(I); +      if (OpNum < Record.size()) { +        if (Opc == Instruction::Add || +            Opc == Instruction::Sub || +            Opc == Instruction::Mul || +            Opc == Instruction::Shl) { +          if (Record[OpNum] & (1 << bitc::OBO_NO_SIGNED_WRAP)) +            cast<BinaryOperator>(I)->setHasNoSignedWrap(true); +          if (Record[OpNum] & (1 << bitc::OBO_NO_UNSIGNED_WRAP)) +            cast<BinaryOperator>(I)->setHasNoUnsignedWrap(true); +        } else if (Opc == Instruction::SDiv || +                   Opc == Instruction::UDiv || +                   Opc == Instruction::LShr || +                   Opc == Instruction::AShr) { +          if (Record[OpNum] & (1 << bitc::PEO_EXACT)) +            cast<BinaryOperator>(I)->setIsExact(true); +        } else if (isa<FPMathOperator>(I)) { +          FastMathFlags FMF = getDecodedFastMathFlags(Record[OpNum]); +          if (FMF.any()) +            I->setFastMathFlags(FMF); +        } + +      } +      break; +    } +    case bitc::FUNC_CODE_INST_CAST: {    // CAST: [opval, opty, destty, castopc] +      unsigned OpNum = 0; +      Value *Op; +      if (getValueTypePair(Record, OpNum, NextValueNo, Op) || +          OpNum+2 != Record.size()) +        return error("Invalid record"); + +      Type *ResTy = getTypeByID(Record[OpNum]); +      int Opc = getDecodedCastOpcode(Record[OpNum + 1]); +      if (Opc == -1 || !ResTy) +        return error("Invalid record"); +      Instruction *Temp = nullptr; +      if ((I = UpgradeBitCastInst(Opc, Op, ResTy, Temp))) { +        if (Temp) { +          InstructionList.push_back(Temp); +          CurBB->getInstList().push_back(Temp); +        } +      } else { +        auto CastOp = (Instruction::CastOps)Opc; +        if (!CastInst::castIsValid(CastOp, Op, ResTy)) +          return error("Invalid cast"); +        I = CastInst::Create(CastOp, Op, ResTy); +      } +      InstructionList.push_back(I); +      break; +    } +    case bitc::FUNC_CODE_INST_INBOUNDS_GEP_OLD: +    case bitc::FUNC_CODE_INST_GEP_OLD: +    case bitc::FUNC_CODE_INST_GEP: { // GEP: type, [n x operands] +      unsigned OpNum = 0; + +      Type *Ty; +      bool InBounds; + +      if (BitCode == bitc::FUNC_CODE_INST_GEP) { +        InBounds = Record[OpNum++]; +        Ty = getTypeByID(Record[OpNum++]); +      } else { +        InBounds = BitCode == bitc::FUNC_CODE_INST_INBOUNDS_GEP_OLD; +        Ty = nullptr; +      } + +      Value *BasePtr; +      if (getValueTypePair(Record, OpNum, NextValueNo, BasePtr)) +        return error("Invalid record"); + +      if (!Ty) +        Ty = cast<PointerType>(BasePtr->getType()->getScalarType()) +                 ->getElementType(); +      else if (Ty != +               cast<PointerType>(BasePtr->getType()->getScalarType()) +                   ->getElementType()) +        return error( +            "Explicit gep type does not match pointee type of pointer operand"); + +      SmallVector<Value*, 16> GEPIdx; +      while (OpNum != Record.size()) { +        Value *Op; +        if (getValueTypePair(Record, OpNum, NextValueNo, Op)) +          return error("Invalid record"); +        GEPIdx.push_back(Op); +      } + +      I = GetElementPtrInst::Create(Ty, BasePtr, GEPIdx); + +      InstructionList.push_back(I); +      if (InBounds) +        cast<GetElementPtrInst>(I)->setIsInBounds(true); +      break; +    } + +    case bitc::FUNC_CODE_INST_EXTRACTVAL: { +                                       // EXTRACTVAL: [opty, opval, n x indices] +      unsigned OpNum = 0; +      Value *Agg; +      if (getValueTypePair(Record, OpNum, NextValueNo, Agg)) +        return error("Invalid record"); + +      unsigned RecSize = Record.size(); +      if (OpNum == RecSize) +        return error("EXTRACTVAL: Invalid instruction with 0 indices"); + +      SmallVector<unsigned, 4> EXTRACTVALIdx; +      Type *CurTy = Agg->getType(); +      for (; OpNum != RecSize; ++OpNum) { +        bool IsArray = CurTy->isArrayTy(); +        bool IsStruct = CurTy->isStructTy(); +        uint64_t Index = Record[OpNum]; + +        if (!IsStruct && !IsArray) +          return error("EXTRACTVAL: Invalid type"); +        if ((unsigned)Index != Index) +          return error("Invalid value"); +        if (IsStruct && Index >= CurTy->subtypes().size()) +          return error("EXTRACTVAL: Invalid struct index"); +        if (IsArray && Index >= CurTy->getArrayNumElements()) +          return error("EXTRACTVAL: Invalid array index"); +        EXTRACTVALIdx.push_back((unsigned)Index); + +        if (IsStruct) +          CurTy = CurTy->subtypes()[Index]; +        else +          CurTy = CurTy->subtypes()[0]; +      } + +      I = ExtractValueInst::Create(Agg, EXTRACTVALIdx); +      InstructionList.push_back(I); +      break; +    } + +    case bitc::FUNC_CODE_INST_INSERTVAL: { +                           // INSERTVAL: [opty, opval, opty, opval, n x indices] +      unsigned OpNum = 0; +      Value *Agg; +      if (getValueTypePair(Record, OpNum, NextValueNo, Agg)) +        return error("Invalid record"); +      Value *Val; +      if (getValueTypePair(Record, OpNum, NextValueNo, Val)) +        return error("Invalid record"); + +      unsigned RecSize = Record.size(); +      if (OpNum == RecSize) +        return error("INSERTVAL: Invalid instruction with 0 indices"); + +      SmallVector<unsigned, 4> INSERTVALIdx; +      Type *CurTy = Agg->getType(); +      for (; OpNum != RecSize; ++OpNum) { +        bool IsArray = CurTy->isArrayTy(); +        bool IsStruct = CurTy->isStructTy(); +        uint64_t Index = Record[OpNum]; + +        if (!IsStruct && !IsArray) +          return error("INSERTVAL: Invalid type"); +        if ((unsigned)Index != Index) +          return error("Invalid value"); +        if (IsStruct && Index >= CurTy->subtypes().size()) +          return error("INSERTVAL: Invalid struct index"); +        if (IsArray && Index >= CurTy->getArrayNumElements()) +          return error("INSERTVAL: Invalid array index"); + +        INSERTVALIdx.push_back((unsigned)Index); +        if (IsStruct) +          CurTy = CurTy->subtypes()[Index]; +        else +          CurTy = CurTy->subtypes()[0]; +      } + +      if (CurTy != Val->getType()) +        return error("Inserted value type doesn't match aggregate type"); + +      I = InsertValueInst::Create(Agg, Val, INSERTVALIdx); +      InstructionList.push_back(I); +      break; +    } + +    case bitc::FUNC_CODE_INST_SELECT: { // SELECT: [opval, ty, opval, opval] +      // obsolete form of select +      // handles select i1 ... in old bitcode +      unsigned OpNum = 0; +      Value *TrueVal, *FalseVal, *Cond; +      if (getValueTypePair(Record, OpNum, NextValueNo, TrueVal) || +          popValue(Record, OpNum, NextValueNo, TrueVal->getType(), FalseVal) || +          popValue(Record, OpNum, NextValueNo, Type::getInt1Ty(Context), Cond)) +        return error("Invalid record"); + +      I = SelectInst::Create(Cond, TrueVal, FalseVal); +      InstructionList.push_back(I); +      break; +    } + +    case bitc::FUNC_CODE_INST_VSELECT: {// VSELECT: [ty,opval,opval,predty,pred] +      // new form of select +      // handles select i1 or select [N x i1] +      unsigned OpNum = 0; +      Value *TrueVal, *FalseVal, *Cond; +      if (getValueTypePair(Record, OpNum, NextValueNo, TrueVal) || +          popValue(Record, OpNum, NextValueNo, TrueVal->getType(), FalseVal) || +          getValueTypePair(Record, OpNum, NextValueNo, Cond)) +        return error("Invalid record"); + +      // select condition can be either i1 or [N x i1] +      if (VectorType* vector_type = +          dyn_cast<VectorType>(Cond->getType())) { +        // expect <n x i1> +        if (vector_type->getElementType() != Type::getInt1Ty(Context)) +          return error("Invalid type for value"); +      } else { +        // expect i1 +        if (Cond->getType() != Type::getInt1Ty(Context)) +          return error("Invalid type for value"); +      } + +      I = SelectInst::Create(Cond, TrueVal, FalseVal); +      InstructionList.push_back(I); +      break; +    } + +    case bitc::FUNC_CODE_INST_EXTRACTELT: { // EXTRACTELT: [opty, opval, opval] +      unsigned OpNum = 0; +      Value *Vec, *Idx; +      if (getValueTypePair(Record, OpNum, NextValueNo, Vec) || +          getValueTypePair(Record, OpNum, NextValueNo, Idx)) +        return error("Invalid record"); +      if (!Vec->getType()->isVectorTy()) +        return error("Invalid type for value"); +      I = ExtractElementInst::Create(Vec, Idx); +      InstructionList.push_back(I); +      break; +    } + +    case bitc::FUNC_CODE_INST_INSERTELT: { // INSERTELT: [ty, opval,opval,opval] +      unsigned OpNum = 0; +      Value *Vec, *Elt, *Idx; +      if (getValueTypePair(Record, OpNum, NextValueNo, Vec)) +        return error("Invalid record"); +      if (!Vec->getType()->isVectorTy()) +        return error("Invalid type for value"); +      if (popValue(Record, OpNum, NextValueNo, +                   cast<VectorType>(Vec->getType())->getElementType(), Elt) || +          getValueTypePair(Record, OpNum, NextValueNo, Idx)) +        return error("Invalid record"); +      I = InsertElementInst::Create(Vec, Elt, Idx); +      InstructionList.push_back(I); +      break; +    } + +    case bitc::FUNC_CODE_INST_SHUFFLEVEC: {// SHUFFLEVEC: [opval,ty,opval,opval] +      unsigned OpNum = 0; +      Value *Vec1, *Vec2, *Mask; +      if (getValueTypePair(Record, OpNum, NextValueNo, Vec1) || +          popValue(Record, OpNum, NextValueNo, Vec1->getType(), Vec2)) +        return error("Invalid record"); + +      if (getValueTypePair(Record, OpNum, NextValueNo, Mask)) +        return error("Invalid record"); +      if (!Vec1->getType()->isVectorTy() || !Vec2->getType()->isVectorTy()) +        return error("Invalid type for value"); +      I = new ShuffleVectorInst(Vec1, Vec2, Mask); +      InstructionList.push_back(I); +      break; +    } + +    case bitc::FUNC_CODE_INST_CMP:   // CMP: [opty, opval, opval, pred] +      // Old form of ICmp/FCmp returning bool +      // Existed to differentiate between icmp/fcmp and vicmp/vfcmp which were +      // both legal on vectors but had different behaviour. +    case bitc::FUNC_CODE_INST_CMP2: { // CMP2: [opty, opval, opval, pred] +      // FCmp/ICmp returning bool or vector of bool + +      unsigned OpNum = 0; +      Value *LHS, *RHS; +      if (getValueTypePair(Record, OpNum, NextValueNo, LHS) || +          popValue(Record, OpNum, NextValueNo, LHS->getType(), RHS)) +        return error("Invalid record"); + +      unsigned PredVal = Record[OpNum]; +      bool IsFP = LHS->getType()->isFPOrFPVectorTy(); +      FastMathFlags FMF; +      if (IsFP && Record.size() > OpNum+1) +        FMF = getDecodedFastMathFlags(Record[++OpNum]); + +      if (OpNum+1 != Record.size()) +        return error("Invalid record"); + +      if (LHS->getType()->isFPOrFPVectorTy()) +        I = new FCmpInst((FCmpInst::Predicate)PredVal, LHS, RHS); +      else +        I = new ICmpInst((ICmpInst::Predicate)PredVal, LHS, RHS); + +      if (FMF.any()) +        I->setFastMathFlags(FMF); +      InstructionList.push_back(I); +      break; +    } + +    case bitc::FUNC_CODE_INST_RET: // RET: [opty,opval<optional>] +      { +        unsigned Size = Record.size(); +        if (Size == 0) { +          I = ReturnInst::Create(Context); +          InstructionList.push_back(I); +          break; +        } + +        unsigned OpNum = 0; +        Value *Op = nullptr; +        if (getValueTypePair(Record, OpNum, NextValueNo, Op)) +          return error("Invalid record"); +        if (OpNum != Record.size()) +          return error("Invalid record"); + +        I = ReturnInst::Create(Context, Op); +        InstructionList.push_back(I); +        break; +      } +    case bitc::FUNC_CODE_INST_BR: { // BR: [bb#, bb#, opval] or [bb#] +      if (Record.size() != 1 && Record.size() != 3) +        return error("Invalid record"); +      BasicBlock *TrueDest = getBasicBlock(Record[0]); +      if (!TrueDest) +        return error("Invalid record"); + +      if (Record.size() == 1) { +        I = BranchInst::Create(TrueDest); +        InstructionList.push_back(I); +      } +      else { +        BasicBlock *FalseDest = getBasicBlock(Record[1]); +        Value *Cond = getValue(Record, 2, NextValueNo, +                               Type::getInt1Ty(Context)); +        if (!FalseDest || !Cond) +          return error("Invalid record"); +        I = BranchInst::Create(TrueDest, FalseDest, Cond); +        InstructionList.push_back(I); +      } +      break; +    } +    case bitc::FUNC_CODE_INST_CLEANUPRET: { // CLEANUPRET: [val] or [val,bb#] +      if (Record.size() != 1 && Record.size() != 2) +        return error("Invalid record"); +      unsigned Idx = 0; +      Value *CleanupPad = +          getValue(Record, Idx++, NextValueNo, Type::getTokenTy(Context)); +      if (!CleanupPad) +        return error("Invalid record"); +      BasicBlock *UnwindDest = nullptr; +      if (Record.size() == 2) { +        UnwindDest = getBasicBlock(Record[Idx++]); +        if (!UnwindDest) +          return error("Invalid record"); +      } + +      I = CleanupReturnInst::Create(CleanupPad, UnwindDest); +      InstructionList.push_back(I); +      break; +    } +    case bitc::FUNC_CODE_INST_CATCHRET: { // CATCHRET: [val,bb#] +      if (Record.size() != 2) +        return error("Invalid record"); +      unsigned Idx = 0; +      Value *CatchPad = +          getValue(Record, Idx++, NextValueNo, Type::getTokenTy(Context)); +      if (!CatchPad) +        return error("Invalid record"); +      BasicBlock *BB = getBasicBlock(Record[Idx++]); +      if (!BB) +        return error("Invalid record"); + +      I = CatchReturnInst::Create(CatchPad, BB); +      InstructionList.push_back(I); +      break; +    } +    case bitc::FUNC_CODE_INST_CATCHSWITCH: { // CATCHSWITCH: [tok,num,(bb)*,bb?] +      // We must have, at minimum, the outer scope and the number of arguments. +      if (Record.size() < 2) +        return error("Invalid record"); + +      unsigned Idx = 0; + +      Value *ParentPad = +          getValue(Record, Idx++, NextValueNo, Type::getTokenTy(Context)); + +      unsigned NumHandlers = Record[Idx++]; + +      SmallVector<BasicBlock *, 2> Handlers; +      for (unsigned Op = 0; Op != NumHandlers; ++Op) { +        BasicBlock *BB = getBasicBlock(Record[Idx++]); +        if (!BB) +          return error("Invalid record"); +        Handlers.push_back(BB); +      } + +      BasicBlock *UnwindDest = nullptr; +      if (Idx + 1 == Record.size()) { +        UnwindDest = getBasicBlock(Record[Idx++]); +        if (!UnwindDest) +          return error("Invalid record"); +      } + +      if (Record.size() != Idx) +        return error("Invalid record"); + +      auto *CatchSwitch = +          CatchSwitchInst::Create(ParentPad, UnwindDest, NumHandlers); +      for (BasicBlock *Handler : Handlers) +        CatchSwitch->addHandler(Handler); +      I = CatchSwitch; +      InstructionList.push_back(I); +      break; +    } +    case bitc::FUNC_CODE_INST_CATCHPAD: +    case bitc::FUNC_CODE_INST_CLEANUPPAD: { // [tok,num,(ty,val)*] +      // We must have, at minimum, the outer scope and the number of arguments. +      if (Record.size() < 2) +        return error("Invalid record"); + +      unsigned Idx = 0; + +      Value *ParentPad = +          getValue(Record, Idx++, NextValueNo, Type::getTokenTy(Context)); + +      unsigned NumArgOperands = Record[Idx++]; + +      SmallVector<Value *, 2> Args; +      for (unsigned Op = 0; Op != NumArgOperands; ++Op) { +        Value *Val; +        if (getValueTypePair(Record, Idx, NextValueNo, Val)) +          return error("Invalid record"); +        Args.push_back(Val); +      } + +      if (Record.size() != Idx) +        return error("Invalid record"); + +      if (BitCode == bitc::FUNC_CODE_INST_CLEANUPPAD) +        I = CleanupPadInst::Create(ParentPad, Args); +      else +        I = CatchPadInst::Create(ParentPad, Args); +      InstructionList.push_back(I); +      break; +    } +    case bitc::FUNC_CODE_INST_SWITCH: { // SWITCH: [opty, op0, op1, ...] +      // Check magic +      if ((Record[0] >> 16) == SWITCH_INST_MAGIC) { +        // "New" SwitchInst format with case ranges. The changes to write this +        // format were reverted but we still recognize bitcode that uses it. +        // Hopefully someday we will have support for case ranges and can use +        // this format again. + +        Type *OpTy = getTypeByID(Record[1]); +        unsigned ValueBitWidth = cast<IntegerType>(OpTy)->getBitWidth(); + +        Value *Cond = getValue(Record, 2, NextValueNo, OpTy); +        BasicBlock *Default = getBasicBlock(Record[3]); +        if (!OpTy || !Cond || !Default) +          return error("Invalid record"); + +        unsigned NumCases = Record[4]; + +        SwitchInst *SI = SwitchInst::Create(Cond, Default, NumCases); +        InstructionList.push_back(SI); + +        unsigned CurIdx = 5; +        for (unsigned i = 0; i != NumCases; ++i) { +          SmallVector<ConstantInt*, 1> CaseVals; +          unsigned NumItems = Record[CurIdx++]; +          for (unsigned ci = 0; ci != NumItems; ++ci) { +            bool isSingleNumber = Record[CurIdx++]; + +            APInt Low; +            unsigned ActiveWords = 1; +            if (ValueBitWidth > 64) +              ActiveWords = Record[CurIdx++]; +            Low = readWideAPInt(makeArrayRef(&Record[CurIdx], ActiveWords), +                                ValueBitWidth); +            CurIdx += ActiveWords; + +            if (!isSingleNumber) { +              ActiveWords = 1; +              if (ValueBitWidth > 64) +                ActiveWords = Record[CurIdx++]; +              APInt High = readWideAPInt( +                  makeArrayRef(&Record[CurIdx], ActiveWords), ValueBitWidth); +              CurIdx += ActiveWords; + +              // FIXME: It is not clear whether values in the range should be +              // compared as signed or unsigned values. The partially +              // implemented changes that used this format in the past used +              // unsigned comparisons. +              for ( ; Low.ule(High); ++Low) +                CaseVals.push_back(ConstantInt::get(Context, Low)); +            } else +              CaseVals.push_back(ConstantInt::get(Context, Low)); +          } +          BasicBlock *DestBB = getBasicBlock(Record[CurIdx++]); +          for (SmallVector<ConstantInt*, 1>::iterator cvi = CaseVals.begin(), +                 cve = CaseVals.end(); cvi != cve; ++cvi) +            SI->addCase(*cvi, DestBB); +        } +        I = SI; +        break; +      } + +      // Old SwitchInst format without case ranges. + +      if (Record.size() < 3 || (Record.size() & 1) == 0) +        return error("Invalid record"); +      Type *OpTy = getTypeByID(Record[0]); +      Value *Cond = getValue(Record, 1, NextValueNo, OpTy); +      BasicBlock *Default = getBasicBlock(Record[2]); +      if (!OpTy || !Cond || !Default) +        return error("Invalid record"); +      unsigned NumCases = (Record.size()-3)/2; +      SwitchInst *SI = SwitchInst::Create(Cond, Default, NumCases); +      InstructionList.push_back(SI); +      for (unsigned i = 0, e = NumCases; i != e; ++i) { +        ConstantInt *CaseVal = +          dyn_cast_or_null<ConstantInt>(getFnValueByID(Record[3+i*2], OpTy)); +        BasicBlock *DestBB = getBasicBlock(Record[1+3+i*2]); +        if (!CaseVal || !DestBB) { +          delete SI; +          return error("Invalid record"); +        } +        SI->addCase(CaseVal, DestBB); +      } +      I = SI; +      break; +    } +    case bitc::FUNC_CODE_INST_INDIRECTBR: { // INDIRECTBR: [opty, op0, op1, ...] +      if (Record.size() < 2) +        return error("Invalid record"); +      Type *OpTy = getTypeByID(Record[0]); +      Value *Address = getValue(Record, 1, NextValueNo, OpTy); +      if (!OpTy || !Address) +        return error("Invalid record"); +      unsigned NumDests = Record.size()-2; +      IndirectBrInst *IBI = IndirectBrInst::Create(Address, NumDests); +      InstructionList.push_back(IBI); +      for (unsigned i = 0, e = NumDests; i != e; ++i) { +        if (BasicBlock *DestBB = getBasicBlock(Record[2+i])) { +          IBI->addDestination(DestBB); +        } else { +          delete IBI; +          return error("Invalid record"); +        } +      } +      I = IBI; +      break; +    } + +    case bitc::FUNC_CODE_INST_INVOKE: { +      // INVOKE: [attrs, cc, normBB, unwindBB, fnty, op0,op1,op2, ...] +      if (Record.size() < 4) +        return error("Invalid record"); +      unsigned OpNum = 0; +      AttributeList PAL = getAttributes(Record[OpNum++]); +      unsigned CCInfo = Record[OpNum++]; +      BasicBlock *NormalBB = getBasicBlock(Record[OpNum++]); +      BasicBlock *UnwindBB = getBasicBlock(Record[OpNum++]); + +      FunctionType *FTy = nullptr; +      if (CCInfo >> 13 & 1 && +          !(FTy = dyn_cast<FunctionType>(getTypeByID(Record[OpNum++])))) +        return error("Explicit invoke type is not a function type"); + +      Value *Callee; +      if (getValueTypePair(Record, OpNum, NextValueNo, Callee)) +        return error("Invalid record"); + +      PointerType *CalleeTy = dyn_cast<PointerType>(Callee->getType()); +      if (!CalleeTy) +        return error("Callee is not a pointer"); +      if (!FTy) { +        FTy = dyn_cast<FunctionType>(CalleeTy->getElementType()); +        if (!FTy) +          return error("Callee is not of pointer to function type"); +      } else if (CalleeTy->getElementType() != FTy) +        return error("Explicit invoke type does not match pointee type of " +                     "callee operand"); +      if (Record.size() < FTy->getNumParams() + OpNum) +        return error("Insufficient operands to call"); + +      SmallVector<Value*, 16> Ops; +      for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i, ++OpNum) { +        Ops.push_back(getValue(Record, OpNum, NextValueNo, +                               FTy->getParamType(i))); +        if (!Ops.back()) +          return error("Invalid record"); +      } + +      if (!FTy->isVarArg()) { +        if (Record.size() != OpNum) +          return error("Invalid record"); +      } else { +        // Read type/value pairs for varargs params. +        while (OpNum != Record.size()) { +          Value *Op; +          if (getValueTypePair(Record, OpNum, NextValueNo, Op)) +            return error("Invalid record"); +          Ops.push_back(Op); +        } +      } + +      I = InvokeInst::Create(Callee, NormalBB, UnwindBB, Ops, OperandBundles); +      OperandBundles.clear(); +      InstructionList.push_back(I); +      cast<InvokeInst>(I)->setCallingConv( +          static_cast<CallingConv::ID>(CallingConv::MaxID & CCInfo)); +      cast<InvokeInst>(I)->setAttributes(PAL); +      break; +    } +    case bitc::FUNC_CODE_INST_RESUME: { // RESUME: [opval] +      unsigned Idx = 0; +      Value *Val = nullptr; +      if (getValueTypePair(Record, Idx, NextValueNo, Val)) +        return error("Invalid record"); +      I = ResumeInst::Create(Val); +      InstructionList.push_back(I); +      break; +    } +    case bitc::FUNC_CODE_INST_UNREACHABLE: // UNREACHABLE +      I = new UnreachableInst(Context); +      InstructionList.push_back(I); +      break; +    case bitc::FUNC_CODE_INST_PHI: { // PHI: [ty, val0,bb0, ...] +      if (Record.size() < 1 || ((Record.size()-1)&1)) +        return error("Invalid record"); +      Type *Ty = getTypeByID(Record[0]); +      if (!Ty) +        return error("Invalid record"); + +      PHINode *PN = PHINode::Create(Ty, (Record.size()-1)/2); +      InstructionList.push_back(PN); + +      for (unsigned i = 0, e = Record.size()-1; i != e; i += 2) { +        Value *V; +        // With the new function encoding, it is possible that operands have +        // negative IDs (for forward references).  Use a signed VBR +        // representation to keep the encoding small. +        if (UseRelativeIDs) +          V = getValueSigned(Record, 1+i, NextValueNo, Ty); +        else +          V = getValue(Record, 1+i, NextValueNo, Ty); +        BasicBlock *BB = getBasicBlock(Record[2+i]); +        if (!V || !BB) +          return error("Invalid record"); +        PN->addIncoming(V, BB); +      } +      I = PN; +      break; +    } + +    case bitc::FUNC_CODE_INST_LANDINGPAD: +    case bitc::FUNC_CODE_INST_LANDINGPAD_OLD: { +      // LANDINGPAD: [ty, val, val, num, (id0,val0 ...)?] +      unsigned Idx = 0; +      if (BitCode == bitc::FUNC_CODE_INST_LANDINGPAD) { +        if (Record.size() < 3) +          return error("Invalid record"); +      } else { +        assert(BitCode == bitc::FUNC_CODE_INST_LANDINGPAD_OLD); +        if (Record.size() < 4) +          return error("Invalid record"); +      } +      Type *Ty = getTypeByID(Record[Idx++]); +      if (!Ty) +        return error("Invalid record"); +      if (BitCode == bitc::FUNC_CODE_INST_LANDINGPAD_OLD) { +        Value *PersFn = nullptr; +        if (getValueTypePair(Record, Idx, NextValueNo, PersFn)) +          return error("Invalid record"); + +        if (!F->hasPersonalityFn()) +          F->setPersonalityFn(cast<Constant>(PersFn)); +        else if (F->getPersonalityFn() != cast<Constant>(PersFn)) +          return error("Personality function mismatch"); +      } + +      bool IsCleanup = !!Record[Idx++]; +      unsigned NumClauses = Record[Idx++]; +      LandingPadInst *LP = LandingPadInst::Create(Ty, NumClauses); +      LP->setCleanup(IsCleanup); +      for (unsigned J = 0; J != NumClauses; ++J) { +        LandingPadInst::ClauseType CT = +          LandingPadInst::ClauseType(Record[Idx++]); (void)CT; +        Value *Val; + +        if (getValueTypePair(Record, Idx, NextValueNo, Val)) { +          delete LP; +          return error("Invalid record"); +        } + +        assert((CT != LandingPadInst::Catch || +                !isa<ArrayType>(Val->getType())) && +               "Catch clause has a invalid type!"); +        assert((CT != LandingPadInst::Filter || +                isa<ArrayType>(Val->getType())) && +               "Filter clause has invalid type!"); +        LP->addClause(cast<Constant>(Val)); +      } + +      I = LP; +      InstructionList.push_back(I); +      break; +    } + +    case bitc::FUNC_CODE_INST_ALLOCA: { // ALLOCA: [instty, opty, op, align] +      if (Record.size() != 4) +        return error("Invalid record"); +      uint64_t AlignRecord = Record[3]; +      const uint64_t InAllocaMask = uint64_t(1) << 5; +      const uint64_t ExplicitTypeMask = uint64_t(1) << 6; +      const uint64_t SwiftErrorMask = uint64_t(1) << 7; +      const uint64_t FlagMask = InAllocaMask | ExplicitTypeMask | +                                SwiftErrorMask; +      bool InAlloca = AlignRecord & InAllocaMask; +      bool SwiftError = AlignRecord & SwiftErrorMask; +      Type *Ty = getTypeByID(Record[0]); +      if ((AlignRecord & ExplicitTypeMask) == 0) { +        auto *PTy = dyn_cast_or_null<PointerType>(Ty); +        if (!PTy) +          return error("Old-style alloca with a non-pointer type"); +        Ty = PTy->getElementType(); +      } +      Type *OpTy = getTypeByID(Record[1]); +      Value *Size = getFnValueByID(Record[2], OpTy); +      unsigned Align; +      if (Error Err = parseAlignmentValue(AlignRecord & ~FlagMask, Align)) { +        return Err; +      } +      if (!Ty || !Size) +        return error("Invalid record"); + +      // FIXME: Make this an optional field. +      const DataLayout &DL = TheModule->getDataLayout(); +      unsigned AS = DL.getAllocaAddrSpace(); + +      AllocaInst *AI = new AllocaInst(Ty, AS, Size, Align); +      AI->setUsedWithInAlloca(InAlloca); +      AI->setSwiftError(SwiftError); +      I = AI; +      InstructionList.push_back(I); +      break; +    } +    case bitc::FUNC_CODE_INST_LOAD: { // LOAD: [opty, op, align, vol] +      unsigned OpNum = 0; +      Value *Op; +      if (getValueTypePair(Record, OpNum, NextValueNo, Op) || +          (OpNum + 2 != Record.size() && OpNum + 3 != Record.size())) +        return error("Invalid record"); + +      Type *Ty = nullptr; +      if (OpNum + 3 == Record.size()) +        Ty = getTypeByID(Record[OpNum++]); +      if (Error Err = typeCheckLoadStoreInst(Ty, Op->getType())) +        return Err; +      if (!Ty) +        Ty = cast<PointerType>(Op->getType())->getElementType(); + +      unsigned Align; +      if (Error Err = parseAlignmentValue(Record[OpNum], Align)) +        return Err; +      I = new LoadInst(Ty, Op, "", Record[OpNum + 1], Align); + +      InstructionList.push_back(I); +      break; +    } +    case bitc::FUNC_CODE_INST_LOADATOMIC: { +       // LOADATOMIC: [opty, op, align, vol, ordering, ssid] +      unsigned OpNum = 0; +      Value *Op; +      if (getValueTypePair(Record, OpNum, NextValueNo, Op) || +          (OpNum + 4 != Record.size() && OpNum + 5 != Record.size())) +        return error("Invalid record"); + +      Type *Ty = nullptr; +      if (OpNum + 5 == Record.size()) +        Ty = getTypeByID(Record[OpNum++]); +      if (Error Err = typeCheckLoadStoreInst(Ty, Op->getType())) +        return Err; +      if (!Ty) +        Ty = cast<PointerType>(Op->getType())->getElementType(); + +      AtomicOrdering Ordering = getDecodedOrdering(Record[OpNum + 2]); +      if (Ordering == AtomicOrdering::NotAtomic || +          Ordering == AtomicOrdering::Release || +          Ordering == AtomicOrdering::AcquireRelease) +        return error("Invalid record"); +      if (Ordering != AtomicOrdering::NotAtomic && Record[OpNum] == 0) +        return error("Invalid record"); +      SyncScope::ID SSID = getDecodedSyncScopeID(Record[OpNum + 3]); + +      unsigned Align; +      if (Error Err = parseAlignmentValue(Record[OpNum], Align)) +        return Err; +      I = new LoadInst(Op, "", Record[OpNum+1], Align, Ordering, SSID); + +      InstructionList.push_back(I); +      break; +    } +    case bitc::FUNC_CODE_INST_STORE: +    case bitc::FUNC_CODE_INST_STORE_OLD: { // STORE2:[ptrty, ptr, val, align, vol] +      unsigned OpNum = 0; +      Value *Val, *Ptr; +      if (getValueTypePair(Record, OpNum, NextValueNo, Ptr) || +          (BitCode == bitc::FUNC_CODE_INST_STORE +               ? getValueTypePair(Record, OpNum, NextValueNo, Val) +               : popValue(Record, OpNum, NextValueNo, +                          cast<PointerType>(Ptr->getType())->getElementType(), +                          Val)) || +          OpNum + 2 != Record.size()) +        return error("Invalid record"); + +      if (Error Err = typeCheckLoadStoreInst(Val->getType(), Ptr->getType())) +        return Err; +      unsigned Align; +      if (Error Err = parseAlignmentValue(Record[OpNum], Align)) +        return Err; +      I = new StoreInst(Val, Ptr, Record[OpNum+1], Align); +      InstructionList.push_back(I); +      break; +    } +    case bitc::FUNC_CODE_INST_STOREATOMIC: +    case bitc::FUNC_CODE_INST_STOREATOMIC_OLD: { +      // STOREATOMIC: [ptrty, ptr, val, align, vol, ordering, ssid] +      unsigned OpNum = 0; +      Value *Val, *Ptr; +      if (getValueTypePair(Record, OpNum, NextValueNo, Ptr) || +          !isa<PointerType>(Ptr->getType()) || +          (BitCode == bitc::FUNC_CODE_INST_STOREATOMIC +               ? getValueTypePair(Record, OpNum, NextValueNo, Val) +               : popValue(Record, OpNum, NextValueNo, +                          cast<PointerType>(Ptr->getType())->getElementType(), +                          Val)) || +          OpNum + 4 != Record.size()) +        return error("Invalid record"); + +      if (Error Err = typeCheckLoadStoreInst(Val->getType(), Ptr->getType())) +        return Err; +      AtomicOrdering Ordering = getDecodedOrdering(Record[OpNum + 2]); +      if (Ordering == AtomicOrdering::NotAtomic || +          Ordering == AtomicOrdering::Acquire || +          Ordering == AtomicOrdering::AcquireRelease) +        return error("Invalid record"); +      SyncScope::ID SSID = getDecodedSyncScopeID(Record[OpNum + 3]); +      if (Ordering != AtomicOrdering::NotAtomic && Record[OpNum] == 0) +        return error("Invalid record"); + +      unsigned Align; +      if (Error Err = parseAlignmentValue(Record[OpNum], Align)) +        return Err; +      I = new StoreInst(Val, Ptr, Record[OpNum+1], Align, Ordering, SSID); +      InstructionList.push_back(I); +      break; +    } +    case bitc::FUNC_CODE_INST_CMPXCHG_OLD: +    case bitc::FUNC_CODE_INST_CMPXCHG: { +      // CMPXCHG:[ptrty, ptr, cmp, new, vol, successordering, ssid, +      //          failureordering?, isweak?] +      unsigned OpNum = 0; +      Value *Ptr, *Cmp, *New; +      if (getValueTypePair(Record, OpNum, NextValueNo, Ptr) || +          (BitCode == bitc::FUNC_CODE_INST_CMPXCHG +               ? getValueTypePair(Record, OpNum, NextValueNo, Cmp) +               : popValue(Record, OpNum, NextValueNo, +                          cast<PointerType>(Ptr->getType())->getElementType(), +                          Cmp)) || +          popValue(Record, OpNum, NextValueNo, Cmp->getType(), New) || +          Record.size() < OpNum + 3 || Record.size() > OpNum + 5) +        return error("Invalid record"); +      AtomicOrdering SuccessOrdering = getDecodedOrdering(Record[OpNum + 1]); +      if (SuccessOrdering == AtomicOrdering::NotAtomic || +          SuccessOrdering == AtomicOrdering::Unordered) +        return error("Invalid record"); +      SyncScope::ID SSID = getDecodedSyncScopeID(Record[OpNum + 2]); + +      if (Error Err = typeCheckLoadStoreInst(Cmp->getType(), Ptr->getType())) +        return Err; +      AtomicOrdering FailureOrdering; +      if (Record.size() < 7) +        FailureOrdering = +            AtomicCmpXchgInst::getStrongestFailureOrdering(SuccessOrdering); +      else +        FailureOrdering = getDecodedOrdering(Record[OpNum + 3]); + +      I = new AtomicCmpXchgInst(Ptr, Cmp, New, SuccessOrdering, FailureOrdering, +                                SSID); +      cast<AtomicCmpXchgInst>(I)->setVolatile(Record[OpNum]); + +      if (Record.size() < 8) { +        // Before weak cmpxchgs existed, the instruction simply returned the +        // value loaded from memory, so bitcode files from that era will be +        // expecting the first component of a modern cmpxchg. +        CurBB->getInstList().push_back(I); +        I = ExtractValueInst::Create(I, 0); +      } else { +        cast<AtomicCmpXchgInst>(I)->setWeak(Record[OpNum+4]); +      } + +      InstructionList.push_back(I); +      break; +    } +    case bitc::FUNC_CODE_INST_ATOMICRMW: { +      // ATOMICRMW:[ptrty, ptr, val, op, vol, ordering, ssid] +      unsigned OpNum = 0; +      Value *Ptr, *Val; +      if (getValueTypePair(Record, OpNum, NextValueNo, Ptr) || +          !isa<PointerType>(Ptr->getType()) || +          popValue(Record, OpNum, NextValueNo, +                    cast<PointerType>(Ptr->getType())->getElementType(), Val) || +          OpNum+4 != Record.size()) +        return error("Invalid record"); +      AtomicRMWInst::BinOp Operation = getDecodedRMWOperation(Record[OpNum]); +      if (Operation < AtomicRMWInst::FIRST_BINOP || +          Operation > AtomicRMWInst::LAST_BINOP) +        return error("Invalid record"); +      AtomicOrdering Ordering = getDecodedOrdering(Record[OpNum + 2]); +      if (Ordering == AtomicOrdering::NotAtomic || +          Ordering == AtomicOrdering::Unordered) +        return error("Invalid record"); +      SyncScope::ID SSID = getDecodedSyncScopeID(Record[OpNum + 3]); +      I = new AtomicRMWInst(Operation, Ptr, Val, Ordering, SSID); +      cast<AtomicRMWInst>(I)->setVolatile(Record[OpNum+1]); +      InstructionList.push_back(I); +      break; +    } +    case bitc::FUNC_CODE_INST_FENCE: { // FENCE:[ordering, ssid] +      if (2 != Record.size()) +        return error("Invalid record"); +      AtomicOrdering Ordering = getDecodedOrdering(Record[0]); +      if (Ordering == AtomicOrdering::NotAtomic || +          Ordering == AtomicOrdering::Unordered || +          Ordering == AtomicOrdering::Monotonic) +        return error("Invalid record"); +      SyncScope::ID SSID = getDecodedSyncScopeID(Record[1]); +      I = new FenceInst(Context, Ordering, SSID); +      InstructionList.push_back(I); +      break; +    } +    case bitc::FUNC_CODE_INST_CALL: { +      // CALL: [paramattrs, cc, fmf, fnty, fnid, arg0, arg1...] +      if (Record.size() < 3) +        return error("Invalid record"); + +      unsigned OpNum = 0; +      AttributeList PAL = getAttributes(Record[OpNum++]); +      unsigned CCInfo = Record[OpNum++]; + +      FastMathFlags FMF; +      if ((CCInfo >> bitc::CALL_FMF) & 1) { +        FMF = getDecodedFastMathFlags(Record[OpNum++]); +        if (!FMF.any()) +          return error("Fast math flags indicator set for call with no FMF"); +      } + +      FunctionType *FTy = nullptr; +      if (CCInfo >> bitc::CALL_EXPLICIT_TYPE & 1 && +          !(FTy = dyn_cast<FunctionType>(getTypeByID(Record[OpNum++])))) +        return error("Explicit call type is not a function type"); + +      Value *Callee; +      if (getValueTypePair(Record, OpNum, NextValueNo, Callee)) +        return error("Invalid record"); + +      PointerType *OpTy = dyn_cast<PointerType>(Callee->getType()); +      if (!OpTy) +        return error("Callee is not a pointer type"); +      if (!FTy) { +        FTy = dyn_cast<FunctionType>(OpTy->getElementType()); +        if (!FTy) +          return error("Callee is not of pointer to function type"); +      } else if (OpTy->getElementType() != FTy) +        return error("Explicit call type does not match pointee type of " +                     "callee operand"); +      if (Record.size() < FTy->getNumParams() + OpNum) +        return error("Insufficient operands to call"); + +      SmallVector<Value*, 16> Args; +      // Read the fixed params. +      for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i, ++OpNum) { +        if (FTy->getParamType(i)->isLabelTy()) +          Args.push_back(getBasicBlock(Record[OpNum])); +        else +          Args.push_back(getValue(Record, OpNum, NextValueNo, +                                  FTy->getParamType(i))); +        if (!Args.back()) +          return error("Invalid record"); +      } + +      // Read type/value pairs for varargs params. +      if (!FTy->isVarArg()) { +        if (OpNum != Record.size()) +          return error("Invalid record"); +      } else { +        while (OpNum != Record.size()) { +          Value *Op; +          if (getValueTypePair(Record, OpNum, NextValueNo, Op)) +            return error("Invalid record"); +          Args.push_back(Op); +        } +      } + +      I = CallInst::Create(FTy, Callee, Args, OperandBundles); +      OperandBundles.clear(); +      InstructionList.push_back(I); +      cast<CallInst>(I)->setCallingConv( +          static_cast<CallingConv::ID>((0x7ff & CCInfo) >> bitc::CALL_CCONV)); +      CallInst::TailCallKind TCK = CallInst::TCK_None; +      if (CCInfo & 1 << bitc::CALL_TAIL) +        TCK = CallInst::TCK_Tail; +      if (CCInfo & (1 << bitc::CALL_MUSTTAIL)) +        TCK = CallInst::TCK_MustTail; +      if (CCInfo & (1 << bitc::CALL_NOTAIL)) +        TCK = CallInst::TCK_NoTail; +      cast<CallInst>(I)->setTailCallKind(TCK); +      cast<CallInst>(I)->setAttributes(PAL); +      if (FMF.any()) { +        if (!isa<FPMathOperator>(I)) +          return error("Fast-math-flags specified for call without " +                       "floating-point scalar or vector return type"); +        I->setFastMathFlags(FMF); +      } +      break; +    } +    case bitc::FUNC_CODE_INST_VAARG: { // VAARG: [valistty, valist, instty] +      if (Record.size() < 3) +        return error("Invalid record"); +      Type *OpTy = getTypeByID(Record[0]); +      Value *Op = getValue(Record, 1, NextValueNo, OpTy); +      Type *ResTy = getTypeByID(Record[2]); +      if (!OpTy || !Op || !ResTy) +        return error("Invalid record"); +      I = new VAArgInst(Op, ResTy); +      InstructionList.push_back(I); +      break; +    } + +    case bitc::FUNC_CODE_OPERAND_BUNDLE: { +      // A call or an invoke can be optionally prefixed with some variable +      // number of operand bundle blocks.  These blocks are read into +      // OperandBundles and consumed at the next call or invoke instruction. + +      if (Record.size() < 1 || Record[0] >= BundleTags.size()) +        return error("Invalid record"); + +      std::vector<Value *> Inputs; + +      unsigned OpNum = 1; +      while (OpNum != Record.size()) { +        Value *Op; +        if (getValueTypePair(Record, OpNum, NextValueNo, Op)) +          return error("Invalid record"); +        Inputs.push_back(Op); +      } + +      OperandBundles.emplace_back(BundleTags[Record[0]], std::move(Inputs)); +      continue; +    } +    } + +    // Add instruction to end of current BB.  If there is no current BB, reject +    // this file. +    if (!CurBB) { +      I->deleteValue(); +      return error("Invalid instruction with no BB"); +    } +    if (!OperandBundles.empty()) { +      I->deleteValue(); +      return error("Operand bundles found with no consumer"); +    } +    CurBB->getInstList().push_back(I); + +    // If this was a terminator instruction, move to the next block. +    if (isa<TerminatorInst>(I)) { +      ++CurBBNo; +      CurBB = CurBBNo < FunctionBBs.size() ? FunctionBBs[CurBBNo] : nullptr; +    } + +    // Non-void values get registered in the value table for future use. +    if (I && !I->getType()->isVoidTy()) +      ValueList.assignValue(I, NextValueNo++); +  } + +OutOfRecordLoop: + +  if (!OperandBundles.empty()) +    return error("Operand bundles found with no consumer"); + +  // Check the function list for unresolved values. +  if (Argument *A = dyn_cast<Argument>(ValueList.back())) { +    if (!A->getParent()) { +      // We found at least one unresolved value.  Nuke them all to avoid leaks. +      for (unsigned i = ModuleValueListSize, e = ValueList.size(); i != e; ++i){ +        if ((A = dyn_cast_or_null<Argument>(ValueList[i])) && !A->getParent()) { +          A->replaceAllUsesWith(UndefValue::get(A->getType())); +          delete A; +        } +      } +      return error("Never resolved value found in function"); +    } +  } + +  // Unexpected unresolved metadata about to be dropped. +  if (MDLoader->hasFwdRefs()) +    return error("Invalid function metadata: outgoing forward refs"); + +  // Trim the value list down to the size it was before we parsed this function. +  ValueList.shrinkTo(ModuleValueListSize); +  MDLoader->shrinkTo(ModuleMDLoaderSize); +  std::vector<BasicBlock*>().swap(FunctionBBs); +  return Error::success(); +} + +/// Find the function body in the bitcode stream +Error BitcodeReader::findFunctionInStream( +    Function *F, +    DenseMap<Function *, uint64_t>::iterator DeferredFunctionInfoIterator) { +  while (DeferredFunctionInfoIterator->second == 0) { +    // This is the fallback handling for the old format bitcode that +    // didn't contain the function index in the VST, or when we have +    // an anonymous function which would not have a VST entry. +    // Assert that we have one of those two cases. +    assert(VSTOffset == 0 || !F->hasName()); +    // Parse the next body in the stream and set its position in the +    // DeferredFunctionInfo map. +    if (Error Err = rememberAndSkipFunctionBodies()) +      return Err; +  } +  return Error::success(); +} + +SyncScope::ID BitcodeReader::getDecodedSyncScopeID(unsigned Val) { +  if (Val == SyncScope::SingleThread || Val == SyncScope::System) +    return SyncScope::ID(Val); +  if (Val >= SSIDs.size()) +    return SyncScope::System; // Map unknown synchronization scopes to system. +  return SSIDs[Val]; +} + +//===----------------------------------------------------------------------===// +// GVMaterializer implementation +//===----------------------------------------------------------------------===// + +Error BitcodeReader::materialize(GlobalValue *GV) { +  Function *F = dyn_cast<Function>(GV); +  // If it's not a function or is already material, ignore the request. +  if (!F || !F->isMaterializable()) +    return Error::success(); + +  DenseMap<Function*, uint64_t>::iterator DFII = DeferredFunctionInfo.find(F); +  assert(DFII != DeferredFunctionInfo.end() && "Deferred function not found!"); +  // If its position is recorded as 0, its body is somewhere in the stream +  // but we haven't seen it yet. +  if (DFII->second == 0) +    if (Error Err = findFunctionInStream(F, DFII)) +      return Err; + +  // Materialize metadata before parsing any function bodies. +  if (Error Err = materializeMetadata()) +    return Err; + +  // Move the bit stream to the saved position of the deferred function body. +  Stream.JumpToBit(DFII->second); + +  if (Error Err = parseFunctionBody(F)) +    return Err; +  F->setIsMaterializable(false); + +  if (StripDebugInfo) +    stripDebugInfo(*F); + +  // Upgrade any old intrinsic calls in the function. +  for (auto &I : UpgradedIntrinsics) { +    for (auto UI = I.first->materialized_user_begin(), UE = I.first->user_end(); +         UI != UE;) { +      User *U = *UI; +      ++UI; +      if (CallInst *CI = dyn_cast<CallInst>(U)) +        UpgradeIntrinsicCall(CI, I.second); +    } +  } + +  // Update calls to the remangled intrinsics +  for (auto &I : RemangledIntrinsics) +    for (auto UI = I.first->materialized_user_begin(), UE = I.first->user_end(); +         UI != UE;) +      // Don't expect any other users than call sites +      CallSite(*UI++).setCalledFunction(I.second); + +  // Finish fn->subprogram upgrade for materialized functions. +  if (DISubprogram *SP = MDLoader->lookupSubprogramForFunction(F)) +    F->setSubprogram(SP); + +  // Check if the TBAA Metadata are valid, otherwise we will need to strip them. +  if (!MDLoader->isStrippingTBAA()) { +    for (auto &I : instructions(F)) { +      MDNode *TBAA = I.getMetadata(LLVMContext::MD_tbaa); +      if (!TBAA || TBAAVerifyHelper.visitTBAAMetadata(I, TBAA)) +        continue; +      MDLoader->setStripTBAA(true); +      stripTBAA(F->getParent()); +    } +  } + +  // Bring in any functions that this function forward-referenced via +  // blockaddresses. +  return materializeForwardReferencedFunctions(); +} + +Error BitcodeReader::materializeModule() { +  if (Error Err = materializeMetadata()) +    return Err; + +  // Promise to materialize all forward references. +  WillMaterializeAllForwardRefs = true; + +  // Iterate over the module, deserializing any functions that are still on +  // disk. +  for (Function &F : *TheModule) { +    if (Error Err = materialize(&F)) +      return Err; +  } +  // At this point, if there are any function bodies, parse the rest of +  // the bits in the module past the last function block we have recorded +  // through either lazy scanning or the VST. +  if (LastFunctionBlockBit || NextUnreadBit) +    if (Error Err = parseModule(LastFunctionBlockBit > NextUnreadBit +                                    ? LastFunctionBlockBit +                                    : NextUnreadBit)) +      return Err; + +  // Check that all block address forward references got resolved (as we +  // promised above). +  if (!BasicBlockFwdRefs.empty()) +    return error("Never resolved function from blockaddress"); + +  // Upgrade any intrinsic calls that slipped through (should not happen!) and +  // delete the old functions to clean up. We can't do this unless the entire +  // module is materialized because there could always be another function body +  // with calls to the old function. +  for (auto &I : UpgradedIntrinsics) { +    for (auto *U : I.first->users()) { +      if (CallInst *CI = dyn_cast<CallInst>(U)) +        UpgradeIntrinsicCall(CI, I.second); +    } +    if (!I.first->use_empty()) +      I.first->replaceAllUsesWith(I.second); +    I.first->eraseFromParent(); +  } +  UpgradedIntrinsics.clear(); +  // Do the same for remangled intrinsics +  for (auto &I : RemangledIntrinsics) { +    I.first->replaceAllUsesWith(I.second); +    I.first->eraseFromParent(); +  } +  RemangledIntrinsics.clear(); + +  UpgradeDebugInfo(*TheModule); + +  UpgradeModuleFlags(*TheModule); + +  UpgradeRetainReleaseMarker(*TheModule); + +  return Error::success(); +} + +std::vector<StructType *> BitcodeReader::getIdentifiedStructTypes() const { +  return IdentifiedStructTypes; +} + +ModuleSummaryIndexBitcodeReader::ModuleSummaryIndexBitcodeReader( +    BitstreamCursor Cursor, StringRef Strtab, ModuleSummaryIndex &TheIndex, +    StringRef ModulePath, unsigned ModuleId) +    : BitcodeReaderBase(std::move(Cursor), Strtab), TheIndex(TheIndex), +      ModulePath(ModulePath), ModuleId(ModuleId) {} + +void ModuleSummaryIndexBitcodeReader::addThisModule() { +  TheIndex.addModule(ModulePath, ModuleId); +} + +ModuleSummaryIndex::ModuleInfo * +ModuleSummaryIndexBitcodeReader::getThisModule() { +  return TheIndex.getModule(ModulePath); +} + +std::pair<ValueInfo, GlobalValue::GUID> +ModuleSummaryIndexBitcodeReader::getValueInfoFromValueId(unsigned ValueId) { +  auto VGI = ValueIdToValueInfoMap[ValueId]; +  assert(VGI.first); +  return VGI; +} + +void ModuleSummaryIndexBitcodeReader::setValueGUID( +    uint64_t ValueID, StringRef ValueName, GlobalValue::LinkageTypes Linkage, +    StringRef SourceFileName) { +  std::string GlobalId = +      GlobalValue::getGlobalIdentifier(ValueName, Linkage, SourceFileName); +  auto ValueGUID = GlobalValue::getGUID(GlobalId); +  auto OriginalNameID = ValueGUID; +  if (GlobalValue::isLocalLinkage(Linkage)) +    OriginalNameID = GlobalValue::getGUID(ValueName); +  if (PrintSummaryGUIDs) +    dbgs() << "GUID " << ValueGUID << "(" << OriginalNameID << ") is " +           << ValueName << "\n"; + +  // UseStrtab is false for legacy summary formats and value names are +  // created on stack. In that case we save the name in a string saver in +  // the index so that the value name can be recorded. +  ValueIdToValueInfoMap[ValueID] = std::make_pair( +      TheIndex.getOrInsertValueInfo( +          ValueGUID, +          UseStrtab ? ValueName : TheIndex.saveString(ValueName.str())), +      OriginalNameID); +} + +// Specialized value symbol table parser used when reading module index +// blocks where we don't actually create global values. The parsed information +// is saved in the bitcode reader for use when later parsing summaries. +Error ModuleSummaryIndexBitcodeReader::parseValueSymbolTable( +    uint64_t Offset, +    DenseMap<unsigned, GlobalValue::LinkageTypes> &ValueIdToLinkageMap) { +  // With a strtab the VST is not required to parse the summary. +  if (UseStrtab) +    return Error::success(); + +  assert(Offset > 0 && "Expected non-zero VST offset"); +  uint64_t CurrentBit = jumpToValueSymbolTable(Offset, Stream); + +  if (Stream.EnterSubBlock(bitc::VALUE_SYMTAB_BLOCK_ID)) +    return error("Invalid record"); + +  SmallVector<uint64_t, 64> Record; + +  // Read all the records for this value table. +  SmallString<128> ValueName; + +  while (true) { +    BitstreamEntry Entry = Stream.advanceSkippingSubblocks(); + +    switch (Entry.Kind) { +    case BitstreamEntry::SubBlock: // Handled for us already. +    case BitstreamEntry::Error: +      return error("Malformed block"); +    case BitstreamEntry::EndBlock: +      // Done parsing VST, jump back to wherever we came from. +      Stream.JumpToBit(CurrentBit); +      return Error::success(); +    case BitstreamEntry::Record: +      // The interesting case. +      break; +    } + +    // Read a record. +    Record.clear(); +    switch (Stream.readRecord(Entry.ID, Record)) { +    default: // Default behavior: ignore (e.g. VST_CODE_BBENTRY records). +      break; +    case bitc::VST_CODE_ENTRY: { // VST_CODE_ENTRY: [valueid, namechar x N] +      if (convertToString(Record, 1, ValueName)) +        return error("Invalid record"); +      unsigned ValueID = Record[0]; +      assert(!SourceFileName.empty()); +      auto VLI = ValueIdToLinkageMap.find(ValueID); +      assert(VLI != ValueIdToLinkageMap.end() && +             "No linkage found for VST entry?"); +      auto Linkage = VLI->second; +      setValueGUID(ValueID, ValueName, Linkage, SourceFileName); +      ValueName.clear(); +      break; +    } +    case bitc::VST_CODE_FNENTRY: { +      // VST_CODE_FNENTRY: [valueid, offset, namechar x N] +      if (convertToString(Record, 2, ValueName)) +        return error("Invalid record"); +      unsigned ValueID = Record[0]; +      assert(!SourceFileName.empty()); +      auto VLI = ValueIdToLinkageMap.find(ValueID); +      assert(VLI != ValueIdToLinkageMap.end() && +             "No linkage found for VST entry?"); +      auto Linkage = VLI->second; +      setValueGUID(ValueID, ValueName, Linkage, SourceFileName); +      ValueName.clear(); +      break; +    } +    case bitc::VST_CODE_COMBINED_ENTRY: { +      // VST_CODE_COMBINED_ENTRY: [valueid, refguid] +      unsigned ValueID = Record[0]; +      GlobalValue::GUID RefGUID = Record[1]; +      // The "original name", which is the second value of the pair will be +      // overriden later by a FS_COMBINED_ORIGINAL_NAME in the combined index. +      ValueIdToValueInfoMap[ValueID] = +          std::make_pair(TheIndex.getOrInsertValueInfo(RefGUID), RefGUID); +      break; +    } +    } +  } +} + +// Parse just the blocks needed for building the index out of the module. +// At the end of this routine the module Index is populated with a map +// from global value id to GlobalValueSummary objects. +Error ModuleSummaryIndexBitcodeReader::parseModule() { +  if (Stream.EnterSubBlock(bitc::MODULE_BLOCK_ID)) +    return error("Invalid record"); + +  SmallVector<uint64_t, 64> Record; +  DenseMap<unsigned, GlobalValue::LinkageTypes> ValueIdToLinkageMap; +  unsigned ValueId = 0; + +  // Read the index for this module. +  while (true) { +    BitstreamEntry Entry = Stream.advance(); + +    switch (Entry.Kind) { +    case BitstreamEntry::Error: +      return error("Malformed block"); +    case BitstreamEntry::EndBlock: +      return Error::success(); + +    case BitstreamEntry::SubBlock: +      switch (Entry.ID) { +      default: // Skip unknown content. +        if (Stream.SkipBlock()) +          return error("Invalid record"); +        break; +      case bitc::BLOCKINFO_BLOCK_ID: +        // Need to parse these to get abbrev ids (e.g. for VST) +        if (readBlockInfo()) +          return error("Malformed block"); +        break; +      case bitc::VALUE_SYMTAB_BLOCK_ID: +        // Should have been parsed earlier via VSTOffset, unless there +        // is no summary section. +        assert(((SeenValueSymbolTable && VSTOffset > 0) || +                !SeenGlobalValSummary) && +               "Expected early VST parse via VSTOffset record"); +        if (Stream.SkipBlock()) +          return error("Invalid record"); +        break; +      case bitc::GLOBALVAL_SUMMARY_BLOCK_ID: +      case bitc::FULL_LTO_GLOBALVAL_SUMMARY_BLOCK_ID: +        // Add the module if it is a per-module index (has a source file name). +        if (!SourceFileName.empty()) +          addThisModule(); +        assert(!SeenValueSymbolTable && +               "Already read VST when parsing summary block?"); +        // We might not have a VST if there were no values in the +        // summary. An empty summary block generated when we are +        // performing ThinLTO compiles so we don't later invoke +        // the regular LTO process on them. +        if (VSTOffset > 0) { +          if (Error Err = parseValueSymbolTable(VSTOffset, ValueIdToLinkageMap)) +            return Err; +          SeenValueSymbolTable = true; +        } +        SeenGlobalValSummary = true; +        if (Error Err = parseEntireSummary(Entry.ID)) +          return Err; +        break; +      case bitc::MODULE_STRTAB_BLOCK_ID: +        if (Error Err = parseModuleStringTable()) +          return Err; +        break; +      } +      continue; + +    case BitstreamEntry::Record: { +        Record.clear(); +        auto BitCode = Stream.readRecord(Entry.ID, Record); +        switch (BitCode) { +        default: +          break; // Default behavior, ignore unknown content. +        case bitc::MODULE_CODE_VERSION: { +          if (Error Err = parseVersionRecord(Record).takeError()) +            return Err; +          break; +        } +        /// MODULE_CODE_SOURCE_FILENAME: [namechar x N] +        case bitc::MODULE_CODE_SOURCE_FILENAME: { +          SmallString<128> ValueName; +          if (convertToString(Record, 0, ValueName)) +            return error("Invalid record"); +          SourceFileName = ValueName.c_str(); +          break; +        } +        /// MODULE_CODE_HASH: [5*i32] +        case bitc::MODULE_CODE_HASH: { +          if (Record.size() != 5) +            return error("Invalid hash length " + Twine(Record.size()).str()); +          auto &Hash = getThisModule()->second.second; +          int Pos = 0; +          for (auto &Val : Record) { +            assert(!(Val >> 32) && "Unexpected high bits set"); +            Hash[Pos++] = Val; +          } +          break; +        } +        /// MODULE_CODE_VSTOFFSET: [offset] +        case bitc::MODULE_CODE_VSTOFFSET: +          if (Record.size() < 1) +            return error("Invalid record"); +          // Note that we subtract 1 here because the offset is relative to one +          // word before the start of the identification or module block, which +          // was historically always the start of the regular bitcode header. +          VSTOffset = Record[0] - 1; +          break; +        // v1 GLOBALVAR: [pointer type, isconst,     initid,       linkage, ...] +        // v1 FUNCTION:  [type,         callingconv, isproto,      linkage, ...] +        // v1 ALIAS:     [alias type,   addrspace,   aliasee val#, linkage, ...] +        // v2: [strtab offset, strtab size, v1] +        case bitc::MODULE_CODE_GLOBALVAR: +        case bitc::MODULE_CODE_FUNCTION: +        case bitc::MODULE_CODE_ALIAS: { +          StringRef Name; +          ArrayRef<uint64_t> GVRecord; +          std::tie(Name, GVRecord) = readNameFromStrtab(Record); +          if (GVRecord.size() <= 3) +            return error("Invalid record"); +          uint64_t RawLinkage = GVRecord[3]; +          GlobalValue::LinkageTypes Linkage = getDecodedLinkage(RawLinkage); +          if (!UseStrtab) { +            ValueIdToLinkageMap[ValueId++] = Linkage; +            break; +          } + +          setValueGUID(ValueId++, Name, Linkage, SourceFileName); +          break; +        } +        } +      } +      continue; +    } +  } +} + +std::vector<ValueInfo> +ModuleSummaryIndexBitcodeReader::makeRefList(ArrayRef<uint64_t> Record) { +  std::vector<ValueInfo> Ret; +  Ret.reserve(Record.size()); +  for (uint64_t RefValueId : Record) +    Ret.push_back(getValueInfoFromValueId(RefValueId).first); +  return Ret; +} + +std::vector<FunctionSummary::EdgeTy> +ModuleSummaryIndexBitcodeReader::makeCallList(ArrayRef<uint64_t> Record, +                                              bool IsOldProfileFormat, +                                              bool HasProfile, bool HasRelBF) { +  std::vector<FunctionSummary::EdgeTy> Ret; +  Ret.reserve(Record.size()); +  for (unsigned I = 0, E = Record.size(); I != E; ++I) { +    CalleeInfo::HotnessType Hotness = CalleeInfo::HotnessType::Unknown; +    uint64_t RelBF = 0; +    ValueInfo Callee = getValueInfoFromValueId(Record[I]).first; +    if (IsOldProfileFormat) { +      I += 1; // Skip old callsitecount field +      if (HasProfile) +        I += 1; // Skip old profilecount field +    } else if (HasProfile) +      Hotness = static_cast<CalleeInfo::HotnessType>(Record[++I]); +    else if (HasRelBF) +      RelBF = Record[++I]; +    Ret.push_back(FunctionSummary::EdgeTy{Callee, CalleeInfo(Hotness, RelBF)}); +  } +  return Ret; +} + +static void +parseWholeProgramDevirtResolutionByArg(ArrayRef<uint64_t> Record, size_t &Slot, +                                       WholeProgramDevirtResolution &Wpd) { +  uint64_t ArgNum = Record[Slot++]; +  WholeProgramDevirtResolution::ByArg &B = +      Wpd.ResByArg[{Record.begin() + Slot, Record.begin() + Slot + ArgNum}]; +  Slot += ArgNum; + +  B.TheKind = +      static_cast<WholeProgramDevirtResolution::ByArg::Kind>(Record[Slot++]); +  B.Info = Record[Slot++]; +  B.Byte = Record[Slot++]; +  B.Bit = Record[Slot++]; +} + +static void parseWholeProgramDevirtResolution(ArrayRef<uint64_t> Record, +                                              StringRef Strtab, size_t &Slot, +                                              TypeIdSummary &TypeId) { +  uint64_t Id = Record[Slot++]; +  WholeProgramDevirtResolution &Wpd = TypeId.WPDRes[Id]; + +  Wpd.TheKind = static_cast<WholeProgramDevirtResolution::Kind>(Record[Slot++]); +  Wpd.SingleImplName = {Strtab.data() + Record[Slot], +                        static_cast<size_t>(Record[Slot + 1])}; +  Slot += 2; + +  uint64_t ResByArgNum = Record[Slot++]; +  for (uint64_t I = 0; I != ResByArgNum; ++I) +    parseWholeProgramDevirtResolutionByArg(Record, Slot, Wpd); +} + +static void parseTypeIdSummaryRecord(ArrayRef<uint64_t> Record, +                                     StringRef Strtab, +                                     ModuleSummaryIndex &TheIndex) { +  size_t Slot = 0; +  TypeIdSummary &TypeId = TheIndex.getOrInsertTypeIdSummary( +      {Strtab.data() + Record[Slot], static_cast<size_t>(Record[Slot + 1])}); +  Slot += 2; + +  TypeId.TTRes.TheKind = static_cast<TypeTestResolution::Kind>(Record[Slot++]); +  TypeId.TTRes.SizeM1BitWidth = Record[Slot++]; +  TypeId.TTRes.AlignLog2 = Record[Slot++]; +  TypeId.TTRes.SizeM1 = Record[Slot++]; +  TypeId.TTRes.BitMask = Record[Slot++]; +  TypeId.TTRes.InlineBits = Record[Slot++]; + +  while (Slot < Record.size()) +    parseWholeProgramDevirtResolution(Record, Strtab, Slot, TypeId); +} + +// Eagerly parse the entire summary block. This populates the GlobalValueSummary +// objects in the index. +Error ModuleSummaryIndexBitcodeReader::parseEntireSummary(unsigned ID) { +  if (Stream.EnterSubBlock(ID)) +    return error("Invalid record"); +  SmallVector<uint64_t, 64> Record; + +  // Parse version +  { +    BitstreamEntry Entry = Stream.advanceSkippingSubblocks(); +    if (Entry.Kind != BitstreamEntry::Record) +      return error("Invalid Summary Block: record for version expected"); +    if (Stream.readRecord(Entry.ID, Record) != bitc::FS_VERSION) +      return error("Invalid Summary Block: version expected"); +  } +  const uint64_t Version = Record[0]; +  const bool IsOldProfileFormat = Version == 1; +  if (Version < 1 || Version > 4) +    return error("Invalid summary version " + Twine(Version) + +                 ", 1, 2, 3 or 4 expected"); +  Record.clear(); + +  // Keep around the last seen summary to be used when we see an optional +  // "OriginalName" attachement. +  GlobalValueSummary *LastSeenSummary = nullptr; +  GlobalValue::GUID LastSeenGUID = 0; + +  // We can expect to see any number of type ID information records before +  // each function summary records; these variables store the information +  // collected so far so that it can be used to create the summary object. +  std::vector<GlobalValue::GUID> PendingTypeTests; +  std::vector<FunctionSummary::VFuncId> PendingTypeTestAssumeVCalls, +      PendingTypeCheckedLoadVCalls; +  std::vector<FunctionSummary::ConstVCall> PendingTypeTestAssumeConstVCalls, +      PendingTypeCheckedLoadConstVCalls; + +  while (true) { +    BitstreamEntry Entry = Stream.advanceSkippingSubblocks(); + +    switch (Entry.Kind) { +    case BitstreamEntry::SubBlock: // Handled for us already. +    case BitstreamEntry::Error: +      return error("Malformed block"); +    case BitstreamEntry::EndBlock: +      return Error::success(); +    case BitstreamEntry::Record: +      // The interesting case. +      break; +    } + +    // Read a record. The record format depends on whether this +    // is a per-module index or a combined index file. In the per-module +    // case the records contain the associated value's ID for correlation +    // with VST entries. In the combined index the correlation is done +    // via the bitcode offset of the summary records (which were saved +    // in the combined index VST entries). The records also contain +    // information used for ThinLTO renaming and importing. +    Record.clear(); +    auto BitCode = Stream.readRecord(Entry.ID, Record); +    switch (BitCode) { +    default: // Default behavior: ignore. +      break; +    case bitc::FS_FLAGS: {  // [flags] +      uint64_t Flags = Record[0]; +      // Scan flags (set only on the combined index). +      assert(Flags <= 0x3 && "Unexpected bits in flag"); + +      // 1 bit: WithGlobalValueDeadStripping flag. +      if (Flags & 0x1) +        TheIndex.setWithGlobalValueDeadStripping(); +      // 1 bit: SkipModuleByDistributedBackend flag. +      if (Flags & 0x2) +        TheIndex.setSkipModuleByDistributedBackend(); +      break; +    } +    case bitc::FS_VALUE_GUID: { // [valueid, refguid] +      uint64_t ValueID = Record[0]; +      GlobalValue::GUID RefGUID = Record[1]; +      ValueIdToValueInfoMap[ValueID] = +          std::make_pair(TheIndex.getOrInsertValueInfo(RefGUID), RefGUID); +      break; +    } +    // FS_PERMODULE: [valueid, flags, instcount, fflags, numrefs, +    //                numrefs x valueid, n x (valueid)] +    // FS_PERMODULE_PROFILE: [valueid, flags, instcount, fflags, numrefs, +    //                        numrefs x valueid, +    //                        n x (valueid, hotness)] +    // FS_PERMODULE_RELBF: [valueid, flags, instcount, fflags, numrefs, +    //                      numrefs x valueid, +    //                      n x (valueid, relblockfreq)] +    case bitc::FS_PERMODULE: +    case bitc::FS_PERMODULE_RELBF: +    case bitc::FS_PERMODULE_PROFILE: { +      unsigned ValueID = Record[0]; +      uint64_t RawFlags = Record[1]; +      unsigned InstCount = Record[2]; +      uint64_t RawFunFlags = 0; +      unsigned NumRefs = Record[3]; +      int RefListStartIndex = 4; +      if (Version >= 4) { +        RawFunFlags = Record[3]; +        NumRefs = Record[4]; +        RefListStartIndex = 5; +      } + +      auto Flags = getDecodedGVSummaryFlags(RawFlags, Version); +      // The module path string ref set in the summary must be owned by the +      // index's module string table. Since we don't have a module path +      // string table section in the per-module index, we create a single +      // module path string table entry with an empty (0) ID to take +      // ownership. +      int CallGraphEdgeStartIndex = RefListStartIndex + NumRefs; +      assert(Record.size() >= RefListStartIndex + NumRefs && +             "Record size inconsistent with number of references"); +      std::vector<ValueInfo> Refs = makeRefList( +          ArrayRef<uint64_t>(Record).slice(RefListStartIndex, NumRefs)); +      bool HasProfile = (BitCode == bitc::FS_PERMODULE_PROFILE); +      bool HasRelBF = (BitCode == bitc::FS_PERMODULE_RELBF); +      std::vector<FunctionSummary::EdgeTy> Calls = makeCallList( +          ArrayRef<uint64_t>(Record).slice(CallGraphEdgeStartIndex), +          IsOldProfileFormat, HasProfile, HasRelBF); +      auto FS = llvm::make_unique<FunctionSummary>( +          Flags, InstCount, getDecodedFFlags(RawFunFlags), std::move(Refs), +          std::move(Calls), std::move(PendingTypeTests), +          std::move(PendingTypeTestAssumeVCalls), +          std::move(PendingTypeCheckedLoadVCalls), +          std::move(PendingTypeTestAssumeConstVCalls), +          std::move(PendingTypeCheckedLoadConstVCalls)); +      PendingTypeTests.clear(); +      PendingTypeTestAssumeVCalls.clear(); +      PendingTypeCheckedLoadVCalls.clear(); +      PendingTypeTestAssumeConstVCalls.clear(); +      PendingTypeCheckedLoadConstVCalls.clear(); +      auto VIAndOriginalGUID = getValueInfoFromValueId(ValueID); +      FS->setModulePath(getThisModule()->first()); +      FS->setOriginalName(VIAndOriginalGUID.second); +      TheIndex.addGlobalValueSummary(VIAndOriginalGUID.first, std::move(FS)); +      break; +    } +    // FS_ALIAS: [valueid, flags, valueid] +    // Aliases must be emitted (and parsed) after all FS_PERMODULE entries, as +    // they expect all aliasee summaries to be available. +    case bitc::FS_ALIAS: { +      unsigned ValueID = Record[0]; +      uint64_t RawFlags = Record[1]; +      unsigned AliaseeID = Record[2]; +      auto Flags = getDecodedGVSummaryFlags(RawFlags, Version); +      auto AS = llvm::make_unique<AliasSummary>(Flags); +      // The module path string ref set in the summary must be owned by the +      // index's module string table. Since we don't have a module path +      // string table section in the per-module index, we create a single +      // module path string table entry with an empty (0) ID to take +      // ownership. +      AS->setModulePath(getThisModule()->first()); + +      GlobalValue::GUID AliaseeGUID = +          getValueInfoFromValueId(AliaseeID).first.getGUID(); +      auto AliaseeInModule = +          TheIndex.findSummaryInModule(AliaseeGUID, ModulePath); +      if (!AliaseeInModule) +        return error("Alias expects aliasee summary to be parsed"); +      AS->setAliasee(AliaseeInModule); +      AS->setAliaseeGUID(AliaseeGUID); + +      auto GUID = getValueInfoFromValueId(ValueID); +      AS->setOriginalName(GUID.second); +      TheIndex.addGlobalValueSummary(GUID.first, std::move(AS)); +      break; +    } +    // FS_PERMODULE_GLOBALVAR_INIT_REFS: [valueid, flags, n x valueid] +    case bitc::FS_PERMODULE_GLOBALVAR_INIT_REFS: { +      unsigned ValueID = Record[0]; +      uint64_t RawFlags = Record[1]; +      auto Flags = getDecodedGVSummaryFlags(RawFlags, Version); +      std::vector<ValueInfo> Refs = +          makeRefList(ArrayRef<uint64_t>(Record).slice(2)); +      auto FS = llvm::make_unique<GlobalVarSummary>(Flags, std::move(Refs)); +      FS->setModulePath(getThisModule()->first()); +      auto GUID = getValueInfoFromValueId(ValueID); +      FS->setOriginalName(GUID.second); +      TheIndex.addGlobalValueSummary(GUID.first, std::move(FS)); +      break; +    } +    // FS_COMBINED: [valueid, modid, flags, instcount, fflags, numrefs, +    //               numrefs x valueid, n x (valueid)] +    // FS_COMBINED_PROFILE: [valueid, modid, flags, instcount, fflags, numrefs, +    //                       numrefs x valueid, n x (valueid, hotness)] +    case bitc::FS_COMBINED: +    case bitc::FS_COMBINED_PROFILE: { +      unsigned ValueID = Record[0]; +      uint64_t ModuleId = Record[1]; +      uint64_t RawFlags = Record[2]; +      unsigned InstCount = Record[3]; +      uint64_t RawFunFlags = 0; +      unsigned NumRefs = Record[4]; +      int RefListStartIndex = 5; + +      if (Version >= 4) { +        RawFunFlags = Record[4]; +        NumRefs = Record[5]; +        RefListStartIndex = 6; +      } + +      auto Flags = getDecodedGVSummaryFlags(RawFlags, Version); +      int CallGraphEdgeStartIndex = RefListStartIndex + NumRefs; +      assert(Record.size() >= RefListStartIndex + NumRefs && +             "Record size inconsistent with number of references"); +      std::vector<ValueInfo> Refs = makeRefList( +          ArrayRef<uint64_t>(Record).slice(RefListStartIndex, NumRefs)); +      bool HasProfile = (BitCode == bitc::FS_COMBINED_PROFILE); +      std::vector<FunctionSummary::EdgeTy> Edges = makeCallList( +          ArrayRef<uint64_t>(Record).slice(CallGraphEdgeStartIndex), +          IsOldProfileFormat, HasProfile, false); +      ValueInfo VI = getValueInfoFromValueId(ValueID).first; +      auto FS = llvm::make_unique<FunctionSummary>( +          Flags, InstCount, getDecodedFFlags(RawFunFlags), std::move(Refs), +          std::move(Edges), std::move(PendingTypeTests), +          std::move(PendingTypeTestAssumeVCalls), +          std::move(PendingTypeCheckedLoadVCalls), +          std::move(PendingTypeTestAssumeConstVCalls), +          std::move(PendingTypeCheckedLoadConstVCalls)); +      PendingTypeTests.clear(); +      PendingTypeTestAssumeVCalls.clear(); +      PendingTypeCheckedLoadVCalls.clear(); +      PendingTypeTestAssumeConstVCalls.clear(); +      PendingTypeCheckedLoadConstVCalls.clear(); +      LastSeenSummary = FS.get(); +      LastSeenGUID = VI.getGUID(); +      FS->setModulePath(ModuleIdMap[ModuleId]); +      TheIndex.addGlobalValueSummary(VI, std::move(FS)); +      break; +    } +    // FS_COMBINED_ALIAS: [valueid, modid, flags, valueid] +    // Aliases must be emitted (and parsed) after all FS_COMBINED entries, as +    // they expect all aliasee summaries to be available. +    case bitc::FS_COMBINED_ALIAS: { +      unsigned ValueID = Record[0]; +      uint64_t ModuleId = Record[1]; +      uint64_t RawFlags = Record[2]; +      unsigned AliaseeValueId = Record[3]; +      auto Flags = getDecodedGVSummaryFlags(RawFlags, Version); +      auto AS = llvm::make_unique<AliasSummary>(Flags); +      LastSeenSummary = AS.get(); +      AS->setModulePath(ModuleIdMap[ModuleId]); + +      auto AliaseeGUID = +          getValueInfoFromValueId(AliaseeValueId).first.getGUID(); +      auto AliaseeInModule = +          TheIndex.findSummaryInModule(AliaseeGUID, AS->modulePath()); +      AS->setAliasee(AliaseeInModule); +      AS->setAliaseeGUID(AliaseeGUID); + +      ValueInfo VI = getValueInfoFromValueId(ValueID).first; +      LastSeenGUID = VI.getGUID(); +      TheIndex.addGlobalValueSummary(VI, std::move(AS)); +      break; +    } +    // FS_COMBINED_GLOBALVAR_INIT_REFS: [valueid, modid, flags, n x valueid] +    case bitc::FS_COMBINED_GLOBALVAR_INIT_REFS: { +      unsigned ValueID = Record[0]; +      uint64_t ModuleId = Record[1]; +      uint64_t RawFlags = Record[2]; +      auto Flags = getDecodedGVSummaryFlags(RawFlags, Version); +      std::vector<ValueInfo> Refs = +          makeRefList(ArrayRef<uint64_t>(Record).slice(3)); +      auto FS = llvm::make_unique<GlobalVarSummary>(Flags, std::move(Refs)); +      LastSeenSummary = FS.get(); +      FS->setModulePath(ModuleIdMap[ModuleId]); +      ValueInfo VI = getValueInfoFromValueId(ValueID).first; +      LastSeenGUID = VI.getGUID(); +      TheIndex.addGlobalValueSummary(VI, std::move(FS)); +      break; +    } +    // FS_COMBINED_ORIGINAL_NAME: [original_name] +    case bitc::FS_COMBINED_ORIGINAL_NAME: { +      uint64_t OriginalName = Record[0]; +      if (!LastSeenSummary) +        return error("Name attachment that does not follow a combined record"); +      LastSeenSummary->setOriginalName(OriginalName); +      TheIndex.addOriginalName(LastSeenGUID, OriginalName); +      // Reset the LastSeenSummary +      LastSeenSummary = nullptr; +      LastSeenGUID = 0; +      break; +    } +    case bitc::FS_TYPE_TESTS: +      assert(PendingTypeTests.empty()); +      PendingTypeTests.insert(PendingTypeTests.end(), Record.begin(), +                              Record.end()); +      break; + +    case bitc::FS_TYPE_TEST_ASSUME_VCALLS: +      assert(PendingTypeTestAssumeVCalls.empty()); +      for (unsigned I = 0; I != Record.size(); I += 2) +        PendingTypeTestAssumeVCalls.push_back({Record[I], Record[I+1]}); +      break; + +    case bitc::FS_TYPE_CHECKED_LOAD_VCALLS: +      assert(PendingTypeCheckedLoadVCalls.empty()); +      for (unsigned I = 0; I != Record.size(); I += 2) +        PendingTypeCheckedLoadVCalls.push_back({Record[I], Record[I+1]}); +      break; + +    case bitc::FS_TYPE_TEST_ASSUME_CONST_VCALL: +      PendingTypeTestAssumeConstVCalls.push_back( +          {{Record[0], Record[1]}, {Record.begin() + 2, Record.end()}}); +      break; + +    case bitc::FS_TYPE_CHECKED_LOAD_CONST_VCALL: +      PendingTypeCheckedLoadConstVCalls.push_back( +          {{Record[0], Record[1]}, {Record.begin() + 2, Record.end()}}); +      break; + +    case bitc::FS_CFI_FUNCTION_DEFS: { +      std::set<std::string> &CfiFunctionDefs = TheIndex.cfiFunctionDefs(); +      for (unsigned I = 0; I != Record.size(); I += 2) +        CfiFunctionDefs.insert( +            {Strtab.data() + Record[I], static_cast<size_t>(Record[I + 1])}); +      break; +    } + +    case bitc::FS_CFI_FUNCTION_DECLS: { +      std::set<std::string> &CfiFunctionDecls = TheIndex.cfiFunctionDecls(); +      for (unsigned I = 0; I != Record.size(); I += 2) +        CfiFunctionDecls.insert( +            {Strtab.data() + Record[I], static_cast<size_t>(Record[I + 1])}); +      break; +    } + +    case bitc::FS_TYPE_ID: +      parseTypeIdSummaryRecord(Record, Strtab, TheIndex); +      break; +    } +  } +  llvm_unreachable("Exit infinite loop"); +} + +// Parse the  module string table block into the Index. +// This populates the ModulePathStringTable map in the index. +Error ModuleSummaryIndexBitcodeReader::parseModuleStringTable() { +  if (Stream.EnterSubBlock(bitc::MODULE_STRTAB_BLOCK_ID)) +    return error("Invalid record"); + +  SmallVector<uint64_t, 64> Record; + +  SmallString<128> ModulePath; +  ModuleSummaryIndex::ModuleInfo *LastSeenModule = nullptr; + +  while (true) { +    BitstreamEntry Entry = Stream.advanceSkippingSubblocks(); + +    switch (Entry.Kind) { +    case BitstreamEntry::SubBlock: // Handled for us already. +    case BitstreamEntry::Error: +      return error("Malformed block"); +    case BitstreamEntry::EndBlock: +      return Error::success(); +    case BitstreamEntry::Record: +      // The interesting case. +      break; +    } + +    Record.clear(); +    switch (Stream.readRecord(Entry.ID, Record)) { +    default: // Default behavior: ignore. +      break; +    case bitc::MST_CODE_ENTRY: { +      // MST_ENTRY: [modid, namechar x N] +      uint64_t ModuleId = Record[0]; + +      if (convertToString(Record, 1, ModulePath)) +        return error("Invalid record"); + +      LastSeenModule = TheIndex.addModule(ModulePath, ModuleId); +      ModuleIdMap[ModuleId] = LastSeenModule->first(); + +      ModulePath.clear(); +      break; +    } +    /// MST_CODE_HASH: [5*i32] +    case bitc::MST_CODE_HASH: { +      if (Record.size() != 5) +        return error("Invalid hash length " + Twine(Record.size()).str()); +      if (!LastSeenModule) +        return error("Invalid hash that does not follow a module path"); +      int Pos = 0; +      for (auto &Val : Record) { +        assert(!(Val >> 32) && "Unexpected high bits set"); +        LastSeenModule->second.second[Pos++] = Val; +      } +      // Reset LastSeenModule to avoid overriding the hash unexpectedly. +      LastSeenModule = nullptr; +      break; +    } +    } +  } +  llvm_unreachable("Exit infinite loop"); +} + +namespace { + +// FIXME: This class is only here to support the transition to llvm::Error. It +// will be removed once this transition is complete. Clients should prefer to +// deal with the Error value directly, rather than converting to error_code. +class BitcodeErrorCategoryType : public std::error_category { +  const char *name() const noexcept override { +    return "llvm.bitcode"; +  } + +  std::string message(int IE) const override { +    BitcodeError E = static_cast<BitcodeError>(IE); +    switch (E) { +    case BitcodeError::CorruptedBitcode: +      return "Corrupted bitcode"; +    } +    llvm_unreachable("Unknown error type!"); +  } +}; + +} // end anonymous namespace + +static ManagedStatic<BitcodeErrorCategoryType> ErrorCategory; + +const std::error_category &llvm::BitcodeErrorCategory() { +  return *ErrorCategory; +} + +static Expected<StringRef> readBlobInRecord(BitstreamCursor &Stream, +                                            unsigned Block, unsigned RecordID) { +  if (Stream.EnterSubBlock(Block)) +    return error("Invalid record"); + +  StringRef Strtab; +  while (true) { +    BitstreamEntry Entry = Stream.advance(); +    switch (Entry.Kind) { +    case BitstreamEntry::EndBlock: +      return Strtab; + +    case BitstreamEntry::Error: +      return error("Malformed block"); + +    case BitstreamEntry::SubBlock: +      if (Stream.SkipBlock()) +        return error("Malformed block"); +      break; + +    case BitstreamEntry::Record: +      StringRef Blob; +      SmallVector<uint64_t, 1> Record; +      if (Stream.readRecord(Entry.ID, Record, &Blob) == RecordID) +        Strtab = Blob; +      break; +    } +  } +} + +//===----------------------------------------------------------------------===// +// External interface +//===----------------------------------------------------------------------===// + +Expected<std::vector<BitcodeModule>> +llvm::getBitcodeModuleList(MemoryBufferRef Buffer) { +  auto FOrErr = getBitcodeFileContents(Buffer); +  if (!FOrErr) +    return FOrErr.takeError(); +  return std::move(FOrErr->Mods); +} + +Expected<BitcodeFileContents> +llvm::getBitcodeFileContents(MemoryBufferRef Buffer) { +  Expected<BitstreamCursor> StreamOrErr = initStream(Buffer); +  if (!StreamOrErr) +    return StreamOrErr.takeError(); +  BitstreamCursor &Stream = *StreamOrErr; + +  BitcodeFileContents F; +  while (true) { +    uint64_t BCBegin = Stream.getCurrentByteNo(); + +    // We may be consuming bitcode from a client that leaves garbage at the end +    // of the bitcode stream (e.g. Apple's ar tool). If we are close enough to +    // the end that there cannot possibly be another module, stop looking. +    if (BCBegin + 8 >= Stream.getBitcodeBytes().size()) +      return F; + +    BitstreamEntry Entry = Stream.advance(); +    switch (Entry.Kind) { +    case BitstreamEntry::EndBlock: +    case BitstreamEntry::Error: +      return error("Malformed block"); + +    case BitstreamEntry::SubBlock: { +      uint64_t IdentificationBit = -1ull; +      if (Entry.ID == bitc::IDENTIFICATION_BLOCK_ID) { +        IdentificationBit = Stream.GetCurrentBitNo() - BCBegin * 8; +        if (Stream.SkipBlock()) +          return error("Malformed block"); + +        Entry = Stream.advance(); +        if (Entry.Kind != BitstreamEntry::SubBlock || +            Entry.ID != bitc::MODULE_BLOCK_ID) +          return error("Malformed block"); +      } + +      if (Entry.ID == bitc::MODULE_BLOCK_ID) { +        uint64_t ModuleBit = Stream.GetCurrentBitNo() - BCBegin * 8; +        if (Stream.SkipBlock()) +          return error("Malformed block"); + +        F.Mods.push_back({Stream.getBitcodeBytes().slice( +                              BCBegin, Stream.getCurrentByteNo() - BCBegin), +                          Buffer.getBufferIdentifier(), IdentificationBit, +                          ModuleBit}); +        continue; +      } + +      if (Entry.ID == bitc::STRTAB_BLOCK_ID) { +        Expected<StringRef> Strtab = +            readBlobInRecord(Stream, bitc::STRTAB_BLOCK_ID, bitc::STRTAB_BLOB); +        if (!Strtab) +          return Strtab.takeError(); +        // This string table is used by every preceding bitcode module that does +        // not have its own string table. A bitcode file may have multiple +        // string tables if it was created by binary concatenation, for example +        // with "llvm-cat -b". +        for (auto I = F.Mods.rbegin(), E = F.Mods.rend(); I != E; ++I) { +          if (!I->Strtab.empty()) +            break; +          I->Strtab = *Strtab; +        } +        // Similarly, the string table is used by every preceding symbol table; +        // normally there will be just one unless the bitcode file was created +        // by binary concatenation. +        if (!F.Symtab.empty() && F.StrtabForSymtab.empty()) +          F.StrtabForSymtab = *Strtab; +        continue; +      } + +      if (Entry.ID == bitc::SYMTAB_BLOCK_ID) { +        Expected<StringRef> SymtabOrErr = +            readBlobInRecord(Stream, bitc::SYMTAB_BLOCK_ID, bitc::SYMTAB_BLOB); +        if (!SymtabOrErr) +          return SymtabOrErr.takeError(); + +        // We can expect the bitcode file to have multiple symbol tables if it +        // was created by binary concatenation. In that case we silently +        // ignore any subsequent symbol tables, which is fine because this is a +        // low level function. The client is expected to notice that the number +        // of modules in the symbol table does not match the number of modules +        // in the input file and regenerate the symbol table. +        if (F.Symtab.empty()) +          F.Symtab = *SymtabOrErr; +        continue; +      } + +      if (Stream.SkipBlock()) +        return error("Malformed block"); +      continue; +    } +    case BitstreamEntry::Record: +      Stream.skipRecord(Entry.ID); +      continue; +    } +  } +} + +/// Get a lazy one-at-time loading module from bitcode. +/// +/// This isn't always used in a lazy context.  In particular, it's also used by +/// \a parseModule().  If this is truly lazy, then we need to eagerly pull +/// in forward-referenced functions from block address references. +/// +/// \param[in] MaterializeAll Set to \c true if we should materialize +/// everything. +Expected<std::unique_ptr<Module>> +BitcodeModule::getModuleImpl(LLVMContext &Context, bool MaterializeAll, +                             bool ShouldLazyLoadMetadata, bool IsImporting) { +  BitstreamCursor Stream(Buffer); + +  std::string ProducerIdentification; +  if (IdentificationBit != -1ull) { +    Stream.JumpToBit(IdentificationBit); +    Expected<std::string> ProducerIdentificationOrErr = +        readIdentificationBlock(Stream); +    if (!ProducerIdentificationOrErr) +      return ProducerIdentificationOrErr.takeError(); + +    ProducerIdentification = *ProducerIdentificationOrErr; +  } + +  Stream.JumpToBit(ModuleBit); +  auto *R = new BitcodeReader(std::move(Stream), Strtab, ProducerIdentification, +                              Context); + +  std::unique_ptr<Module> M = +      llvm::make_unique<Module>(ModuleIdentifier, Context); +  M->setMaterializer(R); + +  // Delay parsing Metadata if ShouldLazyLoadMetadata is true. +  if (Error Err = +          R->parseBitcodeInto(M.get(), ShouldLazyLoadMetadata, IsImporting)) +    return std::move(Err); + +  if (MaterializeAll) { +    // Read in the entire module, and destroy the BitcodeReader. +    if (Error Err = M->materializeAll()) +      return std::move(Err); +  } else { +    // Resolve forward references from blockaddresses. +    if (Error Err = R->materializeForwardReferencedFunctions()) +      return std::move(Err); +  } +  return std::move(M); +} + +Expected<std::unique_ptr<Module>> +BitcodeModule::getLazyModule(LLVMContext &Context, bool ShouldLazyLoadMetadata, +                             bool IsImporting) { +  return getModuleImpl(Context, false, ShouldLazyLoadMetadata, IsImporting); +} + +// Parse the specified bitcode buffer and merge the index into CombinedIndex. +// We don't use ModuleIdentifier here because the client may need to control the +// module path used in the combined summary (e.g. when reading summaries for +// regular LTO modules). +Error BitcodeModule::readSummary(ModuleSummaryIndex &CombinedIndex, +                                 StringRef ModulePath, uint64_t ModuleId) { +  BitstreamCursor Stream(Buffer); +  Stream.JumpToBit(ModuleBit); + +  ModuleSummaryIndexBitcodeReader R(std::move(Stream), Strtab, CombinedIndex, +                                    ModulePath, ModuleId); +  return R.parseModule(); +} + +// Parse the specified bitcode buffer, returning the function info index. +Expected<std::unique_ptr<ModuleSummaryIndex>> BitcodeModule::getSummary() { +  BitstreamCursor Stream(Buffer); +  Stream.JumpToBit(ModuleBit); + +  auto Index = llvm::make_unique<ModuleSummaryIndex>(/*HaveGVs=*/false); +  ModuleSummaryIndexBitcodeReader R(std::move(Stream), Strtab, *Index, +                                    ModuleIdentifier, 0); + +  if (Error Err = R.parseModule()) +    return std::move(Err); + +  return std::move(Index); +} + +// Check if the given bitcode buffer contains a global value summary block. +Expected<BitcodeLTOInfo> BitcodeModule::getLTOInfo() { +  BitstreamCursor Stream(Buffer); +  Stream.JumpToBit(ModuleBit); + +  if (Stream.EnterSubBlock(bitc::MODULE_BLOCK_ID)) +    return error("Invalid record"); + +  while (true) { +    BitstreamEntry Entry = Stream.advance(); + +    switch (Entry.Kind) { +    case BitstreamEntry::Error: +      return error("Malformed block"); +    case BitstreamEntry::EndBlock: +      return BitcodeLTOInfo{/*IsThinLTO=*/false, /*HasSummary=*/false}; + +    case BitstreamEntry::SubBlock: +      if (Entry.ID == bitc::GLOBALVAL_SUMMARY_BLOCK_ID) +        return BitcodeLTOInfo{/*IsThinLTO=*/true, /*HasSummary=*/true}; + +      if (Entry.ID == bitc::FULL_LTO_GLOBALVAL_SUMMARY_BLOCK_ID) +        return BitcodeLTOInfo{/*IsThinLTO=*/false, /*HasSummary=*/true}; + +      // Ignore other sub-blocks. +      if (Stream.SkipBlock()) +        return error("Malformed block"); +      continue; + +    case BitstreamEntry::Record: +      Stream.skipRecord(Entry.ID); +      continue; +    } +  } +} + +static Expected<BitcodeModule> getSingleModule(MemoryBufferRef Buffer) { +  Expected<std::vector<BitcodeModule>> MsOrErr = getBitcodeModuleList(Buffer); +  if (!MsOrErr) +    return MsOrErr.takeError(); + +  if (MsOrErr->size() != 1) +    return error("Expected a single module"); + +  return (*MsOrErr)[0]; +} + +Expected<std::unique_ptr<Module>> +llvm::getLazyBitcodeModule(MemoryBufferRef Buffer, LLVMContext &Context, +                           bool ShouldLazyLoadMetadata, bool IsImporting) { +  Expected<BitcodeModule> BM = getSingleModule(Buffer); +  if (!BM) +    return BM.takeError(); + +  return BM->getLazyModule(Context, ShouldLazyLoadMetadata, IsImporting); +} + +Expected<std::unique_ptr<Module>> llvm::getOwningLazyBitcodeModule( +    std::unique_ptr<MemoryBuffer> &&Buffer, LLVMContext &Context, +    bool ShouldLazyLoadMetadata, bool IsImporting) { +  auto MOrErr = getLazyBitcodeModule(*Buffer, Context, ShouldLazyLoadMetadata, +                                     IsImporting); +  if (MOrErr) +    (*MOrErr)->setOwnedMemoryBuffer(std::move(Buffer)); +  return MOrErr; +} + +Expected<std::unique_ptr<Module>> +BitcodeModule::parseModule(LLVMContext &Context) { +  return getModuleImpl(Context, true, false, false); +  // TODO: Restore the use-lists to the in-memory state when the bitcode was +  // written.  We must defer until the Module has been fully materialized. +} + +Expected<std::unique_ptr<Module>> llvm::parseBitcodeFile(MemoryBufferRef Buffer, +                                                         LLVMContext &Context) { +  Expected<BitcodeModule> BM = getSingleModule(Buffer); +  if (!BM) +    return BM.takeError(); + +  return BM->parseModule(Context); +} + +Expected<std::string> llvm::getBitcodeTargetTriple(MemoryBufferRef Buffer) { +  Expected<BitstreamCursor> StreamOrErr = initStream(Buffer); +  if (!StreamOrErr) +    return StreamOrErr.takeError(); + +  return readTriple(*StreamOrErr); +} + +Expected<bool> llvm::isBitcodeContainingObjCCategory(MemoryBufferRef Buffer) { +  Expected<BitstreamCursor> StreamOrErr = initStream(Buffer); +  if (!StreamOrErr) +    return StreamOrErr.takeError(); + +  return hasObjCCategory(*StreamOrErr); +} + +Expected<std::string> llvm::getBitcodeProducerString(MemoryBufferRef Buffer) { +  Expected<BitstreamCursor> StreamOrErr = initStream(Buffer); +  if (!StreamOrErr) +    return StreamOrErr.takeError(); + +  return readIdentificationCode(*StreamOrErr); +} + +Error llvm::readModuleSummaryIndex(MemoryBufferRef Buffer, +                                   ModuleSummaryIndex &CombinedIndex, +                                   uint64_t ModuleId) { +  Expected<BitcodeModule> BM = getSingleModule(Buffer); +  if (!BM) +    return BM.takeError(); + +  return BM->readSummary(CombinedIndex, BM->getModuleIdentifier(), ModuleId); +} + +Expected<std::unique_ptr<ModuleSummaryIndex>> +llvm::getModuleSummaryIndex(MemoryBufferRef Buffer) { +  Expected<BitcodeModule> BM = getSingleModule(Buffer); +  if (!BM) +    return BM.takeError(); + +  return BM->getSummary(); +} + +Expected<BitcodeLTOInfo> llvm::getBitcodeLTOInfo(MemoryBufferRef Buffer) { +  Expected<BitcodeModule> BM = getSingleModule(Buffer); +  if (!BM) +    return BM.takeError(); + +  return BM->getLTOInfo(); +} + +Expected<std::unique_ptr<ModuleSummaryIndex>> +llvm::getModuleSummaryIndexForFile(StringRef Path, +                                   bool IgnoreEmptyThinLTOIndexFile) { +  ErrorOr<std::unique_ptr<MemoryBuffer>> FileOrErr = +      MemoryBuffer::getFileOrSTDIN(Path); +  if (!FileOrErr) +    return errorCodeToError(FileOrErr.getError()); +  if (IgnoreEmptyThinLTOIndexFile && !(*FileOrErr)->getBufferSize()) +    return nullptr; +  return getModuleSummaryIndex(**FileOrErr); +} diff --git a/contrib/llvm/lib/Bitcode/Reader/BitstreamReader.cpp b/contrib/llvm/lib/Bitcode/Reader/BitstreamReader.cpp new file mode 100644 index 000000000000..771cf3d927bc --- /dev/null +++ b/contrib/llvm/lib/Bitcode/Reader/BitstreamReader.cpp @@ -0,0 +1,390 @@ +//===- BitstreamReader.cpp - BitstreamReader implementation ---------------===// +// +//                     The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +#include "llvm/Bitcode/BitstreamReader.h" +#include "llvm/ADT/StringRef.h" +#include <cassert> +#include <string> + +using namespace llvm; + +//===----------------------------------------------------------------------===// +//  BitstreamCursor implementation +//===----------------------------------------------------------------------===// + +/// EnterSubBlock - Having read the ENTER_SUBBLOCK abbrevid, enter +/// the block, and return true if the block has an error. +bool BitstreamCursor::EnterSubBlock(unsigned BlockID, unsigned *NumWordsP) { +  // Save the current block's state on BlockScope. +  BlockScope.push_back(Block(CurCodeSize)); +  BlockScope.back().PrevAbbrevs.swap(CurAbbrevs); + +  // Add the abbrevs specific to this block to the CurAbbrevs list. +  if (BlockInfo) { +    if (const BitstreamBlockInfo::BlockInfo *Info = +            BlockInfo->getBlockInfo(BlockID)) { +      CurAbbrevs.insert(CurAbbrevs.end(), Info->Abbrevs.begin(), +                        Info->Abbrevs.end()); +    } +  } + +  // Get the codesize of this block. +  CurCodeSize = ReadVBR(bitc::CodeLenWidth); +  // We can't read more than MaxChunkSize at a time +  if (CurCodeSize > MaxChunkSize) +    return true; + +  SkipToFourByteBoundary(); +  unsigned NumWords = Read(bitc::BlockSizeWidth); +  if (NumWordsP) *NumWordsP = NumWords; + +  // Validate that this block is sane. +  return CurCodeSize == 0 || AtEndOfStream(); +} + +static uint64_t readAbbreviatedField(BitstreamCursor &Cursor, +                                     const BitCodeAbbrevOp &Op) { +  assert(!Op.isLiteral() && "Not to be used with literals!"); + +  // Decode the value as we are commanded. +  switch (Op.getEncoding()) { +  case BitCodeAbbrevOp::Array: +  case BitCodeAbbrevOp::Blob: +    llvm_unreachable("Should not reach here"); +  case BitCodeAbbrevOp::Fixed: +    assert((unsigned)Op.getEncodingData() <= Cursor.MaxChunkSize); +    return Cursor.Read((unsigned)Op.getEncodingData()); +  case BitCodeAbbrevOp::VBR: +    assert((unsigned)Op.getEncodingData() <= Cursor.MaxChunkSize); +    return Cursor.ReadVBR64((unsigned)Op.getEncodingData()); +  case BitCodeAbbrevOp::Char6: +    return BitCodeAbbrevOp::DecodeChar6(Cursor.Read(6)); +  } +  llvm_unreachable("invalid abbreviation encoding"); +} + +static void skipAbbreviatedField(BitstreamCursor &Cursor, +                                 const BitCodeAbbrevOp &Op) { +  assert(!Op.isLiteral() && "Not to be used with literals!"); + +  // Decode the value as we are commanded. +  switch (Op.getEncoding()) { +  case BitCodeAbbrevOp::Array: +  case BitCodeAbbrevOp::Blob: +    llvm_unreachable("Should not reach here"); +  case BitCodeAbbrevOp::Fixed: +    assert((unsigned)Op.getEncodingData() <= Cursor.MaxChunkSize); +    Cursor.Read((unsigned)Op.getEncodingData()); +    break; +  case BitCodeAbbrevOp::VBR: +    assert((unsigned)Op.getEncodingData() <= Cursor.MaxChunkSize); +    Cursor.ReadVBR64((unsigned)Op.getEncodingData()); +    break; +  case BitCodeAbbrevOp::Char6: +    Cursor.Read(6); +    break; +  } +} + +/// skipRecord - Read the current record and discard it. +unsigned BitstreamCursor::skipRecord(unsigned AbbrevID) { +  // Skip unabbreviated records by reading past their entries. +  if (AbbrevID == bitc::UNABBREV_RECORD) { +    unsigned Code = ReadVBR(6); +    unsigned NumElts = ReadVBR(6); +    for (unsigned i = 0; i != NumElts; ++i) +      (void)ReadVBR64(6); +    return Code; +  } + +  const BitCodeAbbrev *Abbv = getAbbrev(AbbrevID); +  const BitCodeAbbrevOp &CodeOp = Abbv->getOperandInfo(0); +  unsigned Code; +  if (CodeOp.isLiteral()) +    Code = CodeOp.getLiteralValue(); +  else { +    if (CodeOp.getEncoding() == BitCodeAbbrevOp::Array || +        CodeOp.getEncoding() == BitCodeAbbrevOp::Blob) +      report_fatal_error("Abbreviation starts with an Array or a Blob"); +    Code = readAbbreviatedField(*this, CodeOp); +  } + +  for (unsigned i = 1, e = Abbv->getNumOperandInfos(); i < e; ++i) { +    const BitCodeAbbrevOp &Op = Abbv->getOperandInfo(i); +    if (Op.isLiteral()) +      continue; + +    if (Op.getEncoding() != BitCodeAbbrevOp::Array && +        Op.getEncoding() != BitCodeAbbrevOp::Blob) { +      skipAbbreviatedField(*this, Op); +      continue; +    } + +    if (Op.getEncoding() == BitCodeAbbrevOp::Array) { +      // Array case.  Read the number of elements as a vbr6. +      unsigned NumElts = ReadVBR(6); + +      // Get the element encoding. +      assert(i+2 == e && "array op not second to last?"); +      const BitCodeAbbrevOp &EltEnc = Abbv->getOperandInfo(++i); + +      // Read all the elements. +      // Decode the value as we are commanded. +      switch (EltEnc.getEncoding()) { +      default: +        report_fatal_error("Array element type can't be an Array or a Blob"); +      case BitCodeAbbrevOp::Fixed: +        assert((unsigned)EltEnc.getEncodingData() <= MaxChunkSize); +        JumpToBit(GetCurrentBitNo() + NumElts * EltEnc.getEncodingData()); +        break; +      case BitCodeAbbrevOp::VBR: +        assert((unsigned)EltEnc.getEncodingData() <= MaxChunkSize); +        for (; NumElts; --NumElts) +          ReadVBR64((unsigned)EltEnc.getEncodingData()); +        break; +      case BitCodeAbbrevOp::Char6: +        JumpToBit(GetCurrentBitNo() + NumElts * 6); +        break; +      } +      continue; +    } + +    assert(Op.getEncoding() == BitCodeAbbrevOp::Blob); +    // Blob case.  Read the number of bytes as a vbr6. +    unsigned NumElts = ReadVBR(6); +    SkipToFourByteBoundary();  // 32-bit alignment + +    // Figure out where the end of this blob will be including tail padding. +    size_t NewEnd = GetCurrentBitNo()+((NumElts+3)&~3)*8; + +    // If this would read off the end of the bitcode file, just set the +    // record to empty and return. +    if (!canSkipToPos(NewEnd/8)) { +      skipToEnd(); +      break; +    } + +    // Skip over the blob. +    JumpToBit(NewEnd); +  } +  return Code; +} + +unsigned BitstreamCursor::readRecord(unsigned AbbrevID, +                                     SmallVectorImpl<uint64_t> &Vals, +                                     StringRef *Blob) { +  if (AbbrevID == bitc::UNABBREV_RECORD) { +    unsigned Code = ReadVBR(6); +    unsigned NumElts = ReadVBR(6); +    for (unsigned i = 0; i != NumElts; ++i) +      Vals.push_back(ReadVBR64(6)); +    return Code; +  } + +  const BitCodeAbbrev *Abbv = getAbbrev(AbbrevID); + +  // Read the record code first. +  assert(Abbv->getNumOperandInfos() != 0 && "no record code in abbreviation?"); +  const BitCodeAbbrevOp &CodeOp = Abbv->getOperandInfo(0); +  unsigned Code; +  if (CodeOp.isLiteral()) +    Code = CodeOp.getLiteralValue(); +  else { +    if (CodeOp.getEncoding() == BitCodeAbbrevOp::Array || +        CodeOp.getEncoding() == BitCodeAbbrevOp::Blob) +      report_fatal_error("Abbreviation starts with an Array or a Blob"); +    Code = readAbbreviatedField(*this, CodeOp); +  } + +  for (unsigned i = 1, e = Abbv->getNumOperandInfos(); i != e; ++i) { +    const BitCodeAbbrevOp &Op = Abbv->getOperandInfo(i); +    if (Op.isLiteral()) { +      Vals.push_back(Op.getLiteralValue()); +      continue; +    } + +    if (Op.getEncoding() != BitCodeAbbrevOp::Array && +        Op.getEncoding() != BitCodeAbbrevOp::Blob) { +      Vals.push_back(readAbbreviatedField(*this, Op)); +      continue; +    } + +    if (Op.getEncoding() == BitCodeAbbrevOp::Array) { +      // Array case.  Read the number of elements as a vbr6. +      unsigned NumElts = ReadVBR(6); + +      // Get the element encoding. +      if (i + 2 != e) +        report_fatal_error("Array op not second to last"); +      const BitCodeAbbrevOp &EltEnc = Abbv->getOperandInfo(++i); +      if (!EltEnc.isEncoding()) +        report_fatal_error( +            "Array element type has to be an encoding of a type"); + +      // Read all the elements. +      switch (EltEnc.getEncoding()) { +      default: +        report_fatal_error("Array element type can't be an Array or a Blob"); +      case BitCodeAbbrevOp::Fixed: +        for (; NumElts; --NumElts) +          Vals.push_back(Read((unsigned)EltEnc.getEncodingData())); +        break; +      case BitCodeAbbrevOp::VBR: +        for (; NumElts; --NumElts) +          Vals.push_back(ReadVBR64((unsigned)EltEnc.getEncodingData())); +        break; +      case BitCodeAbbrevOp::Char6: +        for (; NumElts; --NumElts) +          Vals.push_back(BitCodeAbbrevOp::DecodeChar6(Read(6))); +      } +      continue; +    } + +    assert(Op.getEncoding() == BitCodeAbbrevOp::Blob); +    // Blob case.  Read the number of bytes as a vbr6. +    unsigned NumElts = ReadVBR(6); +    SkipToFourByteBoundary();  // 32-bit alignment + +    // Figure out where the end of this blob will be including tail padding. +    size_t CurBitPos = GetCurrentBitNo(); +    size_t NewEnd = CurBitPos+((NumElts+3)&~3)*8; + +    // If this would read off the end of the bitcode file, just set the +    // record to empty and return. +    if (!canSkipToPos(NewEnd/8)) { +      Vals.append(NumElts, 0); +      skipToEnd(); +      break; +    } + +    // Otherwise, inform the streamer that we need these bytes in memory.  Skip +    // over tail padding first, in case jumping to NewEnd invalidates the Blob +    // pointer. +    JumpToBit(NewEnd); +    const char *Ptr = (const char *)getPointerToBit(CurBitPos, NumElts); + +    // If we can return a reference to the data, do so to avoid copying it. +    if (Blob) { +      *Blob = StringRef(Ptr, NumElts); +    } else { +      // Otherwise, unpack into Vals with zero extension. +      for (; NumElts; --NumElts) +        Vals.push_back((unsigned char)*Ptr++); +    } +  } + +  return Code; +} + +void BitstreamCursor::ReadAbbrevRecord() { +  auto Abbv = std::make_shared<BitCodeAbbrev>(); +  unsigned NumOpInfo = ReadVBR(5); +  for (unsigned i = 0; i != NumOpInfo; ++i) { +    bool IsLiteral = Read(1); +    if (IsLiteral) { +      Abbv->Add(BitCodeAbbrevOp(ReadVBR64(8))); +      continue; +    } + +    BitCodeAbbrevOp::Encoding E = (BitCodeAbbrevOp::Encoding)Read(3); +    if (BitCodeAbbrevOp::hasEncodingData(E)) { +      uint64_t Data = ReadVBR64(5); + +      // As a special case, handle fixed(0) (i.e., a fixed field with zero bits) +      // and vbr(0) as a literal zero.  This is decoded the same way, and avoids +      // a slow path in Read() to have to handle reading zero bits. +      if ((E == BitCodeAbbrevOp::Fixed || E == BitCodeAbbrevOp::VBR) && +          Data == 0) { +        Abbv->Add(BitCodeAbbrevOp(0)); +        continue; +      } + +      if ((E == BitCodeAbbrevOp::Fixed || E == BitCodeAbbrevOp::VBR) && +          Data > MaxChunkSize) +        report_fatal_error( +            "Fixed or VBR abbrev record with size > MaxChunkData"); + +      Abbv->Add(BitCodeAbbrevOp(E, Data)); +    } else +      Abbv->Add(BitCodeAbbrevOp(E)); +  } + +  if (Abbv->getNumOperandInfos() == 0) +    report_fatal_error("Abbrev record with no operands"); +  CurAbbrevs.push_back(std::move(Abbv)); +} + +Optional<BitstreamBlockInfo> +BitstreamCursor::ReadBlockInfoBlock(bool ReadBlockInfoNames) { +  if (EnterSubBlock(bitc::BLOCKINFO_BLOCK_ID)) return None; + +  BitstreamBlockInfo NewBlockInfo; + +  SmallVector<uint64_t, 64> Record; +  BitstreamBlockInfo::BlockInfo *CurBlockInfo = nullptr; + +  // Read all the records for this module. +  while (true) { +    BitstreamEntry Entry = advanceSkippingSubblocks(AF_DontAutoprocessAbbrevs); + +    switch (Entry.Kind) { +    case llvm::BitstreamEntry::SubBlock: // Handled for us already. +    case llvm::BitstreamEntry::Error: +      return None; +    case llvm::BitstreamEntry::EndBlock: +      return std::move(NewBlockInfo); +    case llvm::BitstreamEntry::Record: +      // The interesting case. +      break; +    } + +    // Read abbrev records, associate them with CurBID. +    if (Entry.ID == bitc::DEFINE_ABBREV) { +      if (!CurBlockInfo) return None; +      ReadAbbrevRecord(); + +      // ReadAbbrevRecord installs the abbrev in CurAbbrevs.  Move it to the +      // appropriate BlockInfo. +      CurBlockInfo->Abbrevs.push_back(std::move(CurAbbrevs.back())); +      CurAbbrevs.pop_back(); +      continue; +    } + +    // Read a record. +    Record.clear(); +    switch (readRecord(Entry.ID, Record)) { +      default: break;  // Default behavior, ignore unknown content. +      case bitc::BLOCKINFO_CODE_SETBID: +        if (Record.size() < 1) return None; +        CurBlockInfo = &NewBlockInfo.getOrCreateBlockInfo((unsigned)Record[0]); +        break; +      case bitc::BLOCKINFO_CODE_BLOCKNAME: { +        if (!CurBlockInfo) return None; +        if (!ReadBlockInfoNames) +          break; // Ignore name. +        std::string Name; +        for (unsigned i = 0, e = Record.size(); i != e; ++i) +          Name += (char)Record[i]; +        CurBlockInfo->Name = Name; +        break; +      } +      case bitc::BLOCKINFO_CODE_SETRECORDNAME: { +        if (!CurBlockInfo) return None; +        if (!ReadBlockInfoNames) +          break; // Ignore name. +        std::string Name; +        for (unsigned i = 1, e = Record.size(); i != e; ++i) +          Name += (char)Record[i]; +        CurBlockInfo->RecordNames.push_back(std::make_pair((unsigned)Record[0], +                                                           Name)); +        break; +      } +    } +  } +} diff --git a/contrib/llvm/lib/Bitcode/Reader/MetadataLoader.cpp b/contrib/llvm/lib/Bitcode/Reader/MetadataLoader.cpp new file mode 100644 index 000000000000..011c41e2cecd --- /dev/null +++ b/contrib/llvm/lib/Bitcode/Reader/MetadataLoader.cpp @@ -0,0 +1,2015 @@ +//===- MetadataLoader.cpp - Internal BitcodeReader implementation ---------===// +// +//                     The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +#include "MetadataLoader.h" +#include "ValueList.h" + +#include "llvm/ADT/APFloat.h" +#include "llvm/ADT/APInt.h" +#include "llvm/ADT/ArrayRef.h" +#include "llvm/ADT/DenseMap.h" +#include "llvm/ADT/DenseSet.h" +#include "llvm/ADT/None.h" +#include "llvm/ADT/STLExtras.h" +#include "llvm/ADT/SmallString.h" +#include "llvm/ADT/SmallVector.h" +#include "llvm/ADT/Statistic.h" +#include "llvm/ADT/StringRef.h" +#include "llvm/ADT/Twine.h" +#include "llvm/Bitcode/BitcodeReader.h" +#include "llvm/Bitcode/BitstreamReader.h" +#include "llvm/Bitcode/LLVMBitCodes.h" +#include "llvm/IR/Argument.h" +#include "llvm/IR/Attributes.h" +#include "llvm/IR/AutoUpgrade.h" +#include "llvm/IR/BasicBlock.h" +#include "llvm/IR/CallingConv.h" +#include "llvm/IR/Comdat.h" +#include "llvm/IR/Constant.h" +#include "llvm/IR/Constants.h" +#include "llvm/IR/DebugInfo.h" +#include "llvm/IR/DebugInfoMetadata.h" +#include "llvm/IR/DebugLoc.h" +#include "llvm/IR/DerivedTypes.h" +#include "llvm/IR/DiagnosticPrinter.h" +#include "llvm/IR/Function.h" +#include "llvm/IR/GVMaterializer.h" +#include "llvm/IR/GlobalAlias.h" +#include "llvm/IR/GlobalIFunc.h" +#include "llvm/IR/GlobalIndirectSymbol.h" +#include "llvm/IR/GlobalObject.h" +#include "llvm/IR/GlobalValue.h" +#include "llvm/IR/GlobalVariable.h" +#include "llvm/IR/InlineAsm.h" +#include "llvm/IR/InstrTypes.h" +#include "llvm/IR/Instruction.h" +#include "llvm/IR/Instructions.h" +#include "llvm/IR/IntrinsicInst.h" +#include "llvm/IR/Intrinsics.h" +#include "llvm/IR/LLVMContext.h" +#include "llvm/IR/Module.h" +#include "llvm/IR/ModuleSummaryIndex.h" +#include "llvm/IR/OperandTraits.h" +#include "llvm/IR/TrackingMDRef.h" +#include "llvm/IR/Type.h" +#include "llvm/IR/ValueHandle.h" +#include "llvm/Support/AtomicOrdering.h" +#include "llvm/Support/Casting.h" +#include "llvm/Support/CommandLine.h" +#include "llvm/Support/Compiler.h" +#include "llvm/Support/Debug.h" +#include "llvm/Support/Error.h" +#include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/ManagedStatic.h" +#include "llvm/Support/MemoryBuffer.h" +#include "llvm/Support/raw_ostream.h" +#include <algorithm> +#include <cassert> +#include <cstddef> +#include <cstdint> +#include <deque> +#include <limits> +#include <map> +#include <memory> +#include <string> +#include <system_error> +#include <tuple> +#include <utility> +#include <vector> + +using namespace llvm; + +#define DEBUG_TYPE "bitcode-reader" + +STATISTIC(NumMDStringLoaded, "Number of MDStrings loaded"); +STATISTIC(NumMDNodeTemporary, "Number of MDNode::Temporary created"); +STATISTIC(NumMDRecordLoaded, "Number of Metadata records loaded"); + +/// Flag whether we need to import full type definitions for ThinLTO. +/// Currently needed for Darwin and LLDB. +static cl::opt<bool> ImportFullTypeDefinitions( +    "import-full-type-definitions", cl::init(false), cl::Hidden, +    cl::desc("Import full type definitions for ThinLTO.")); + +static cl::opt<bool> DisableLazyLoading( +    "disable-ondemand-mds-loading", cl::init(false), cl::Hidden, +    cl::desc("Force disable the lazy-loading on-demand of metadata when " +             "loading bitcode for importing.")); + +namespace { + +static int64_t unrotateSign(uint64_t U) { return U & 1 ? ~(U >> 1) : U >> 1; } + +class BitcodeReaderMetadataList { +  /// Array of metadata references. +  /// +  /// Don't use std::vector here.  Some versions of libc++ copy (instead of +  /// move) on resize, and TrackingMDRef is very expensive to copy. +  SmallVector<TrackingMDRef, 1> MetadataPtrs; + +  /// The set of indices in MetadataPtrs above of forward references that were +  /// generated. +  SmallDenseSet<unsigned, 1> ForwardReference; + +  /// The set of indices in MetadataPtrs above of Metadata that need to be +  /// resolved. +  SmallDenseSet<unsigned, 1> UnresolvedNodes; + +  /// Structures for resolving old type refs. +  struct { +    SmallDenseMap<MDString *, TempMDTuple, 1> Unknown; +    SmallDenseMap<MDString *, DICompositeType *, 1> Final; +    SmallDenseMap<MDString *, DICompositeType *, 1> FwdDecls; +    SmallVector<std::pair<TrackingMDRef, TempMDTuple>, 1> Arrays; +  } OldTypeRefs; + +  LLVMContext &Context; + +public: +  BitcodeReaderMetadataList(LLVMContext &C) : Context(C) {} + +  // vector compatibility methods +  unsigned size() const { return MetadataPtrs.size(); } +  void resize(unsigned N) { MetadataPtrs.resize(N); } +  void push_back(Metadata *MD) { MetadataPtrs.emplace_back(MD); } +  void clear() { MetadataPtrs.clear(); } +  Metadata *back() const { return MetadataPtrs.back(); } +  void pop_back() { MetadataPtrs.pop_back(); } +  bool empty() const { return MetadataPtrs.empty(); } + +  Metadata *operator[](unsigned i) const { +    assert(i < MetadataPtrs.size()); +    return MetadataPtrs[i]; +  } + +  Metadata *lookup(unsigned I) const { +    if (I < MetadataPtrs.size()) +      return MetadataPtrs[I]; +    return nullptr; +  } + +  void shrinkTo(unsigned N) { +    assert(N <= size() && "Invalid shrinkTo request!"); +    assert(ForwardReference.empty() && "Unexpected forward refs"); +    assert(UnresolvedNodes.empty() && "Unexpected unresolved node"); +    MetadataPtrs.resize(N); +  } + +  /// Return the given metadata, creating a replaceable forward reference if +  /// necessary. +  Metadata *getMetadataFwdRef(unsigned Idx); + +  /// Return the given metadata only if it is fully resolved. +  /// +  /// Gives the same result as \a lookup(), unless \a MDNode::isResolved() +  /// would give \c false. +  Metadata *getMetadataIfResolved(unsigned Idx); + +  MDNode *getMDNodeFwdRefOrNull(unsigned Idx); +  void assignValue(Metadata *MD, unsigned Idx); +  void tryToResolveCycles(); +  bool hasFwdRefs() const { return !ForwardReference.empty(); } +  int getNextFwdRef() { +    assert(hasFwdRefs()); +    return *ForwardReference.begin(); +  } + +  /// Upgrade a type that had an MDString reference. +  void addTypeRef(MDString &UUID, DICompositeType &CT); + +  /// Upgrade a type that had an MDString reference. +  Metadata *upgradeTypeRef(Metadata *MaybeUUID); + +  /// Upgrade a type ref array that may have MDString references. +  Metadata *upgradeTypeRefArray(Metadata *MaybeTuple); + +private: +  Metadata *resolveTypeRefArray(Metadata *MaybeTuple); +}; + +void BitcodeReaderMetadataList::assignValue(Metadata *MD, unsigned Idx) { +  if (auto *MDN = dyn_cast<MDNode>(MD)) +    if (!MDN->isResolved()) +      UnresolvedNodes.insert(Idx); + +  if (Idx == size()) { +    push_back(MD); +    return; +  } + +  if (Idx >= size()) +    resize(Idx + 1); + +  TrackingMDRef &OldMD = MetadataPtrs[Idx]; +  if (!OldMD) { +    OldMD.reset(MD); +    return; +  } + +  // If there was a forward reference to this value, replace it. +  TempMDTuple PrevMD(cast<MDTuple>(OldMD.get())); +  PrevMD->replaceAllUsesWith(MD); +  ForwardReference.erase(Idx); +} + +Metadata *BitcodeReaderMetadataList::getMetadataFwdRef(unsigned Idx) { +  if (Idx >= size()) +    resize(Idx + 1); + +  if (Metadata *MD = MetadataPtrs[Idx]) +    return MD; + +  // Track forward refs to be resolved later. +  ForwardReference.insert(Idx); + +  // Create and return a placeholder, which will later be RAUW'd. +  ++NumMDNodeTemporary; +  Metadata *MD = MDNode::getTemporary(Context, None).release(); +  MetadataPtrs[Idx].reset(MD); +  return MD; +} + +Metadata *BitcodeReaderMetadataList::getMetadataIfResolved(unsigned Idx) { +  Metadata *MD = lookup(Idx); +  if (auto *N = dyn_cast_or_null<MDNode>(MD)) +    if (!N->isResolved()) +      return nullptr; +  return MD; +} + +MDNode *BitcodeReaderMetadataList::getMDNodeFwdRefOrNull(unsigned Idx) { +  return dyn_cast_or_null<MDNode>(getMetadataFwdRef(Idx)); +} + +void BitcodeReaderMetadataList::tryToResolveCycles() { +  if (!ForwardReference.empty()) +    // Still forward references... can't resolve cycles. +    return; + +  // Give up on finding a full definition for any forward decls that remain. +  for (const auto &Ref : OldTypeRefs.FwdDecls) +    OldTypeRefs.Final.insert(Ref); +  OldTypeRefs.FwdDecls.clear(); + +  // Upgrade from old type ref arrays.  In strange cases, this could add to +  // OldTypeRefs.Unknown. +  for (const auto &Array : OldTypeRefs.Arrays) +    Array.second->replaceAllUsesWith(resolveTypeRefArray(Array.first.get())); +  OldTypeRefs.Arrays.clear(); + +  // Replace old string-based type refs with the resolved node, if possible. +  // If we haven't seen the node, leave it to the verifier to complain about +  // the invalid string reference. +  for (const auto &Ref : OldTypeRefs.Unknown) { +    if (DICompositeType *CT = OldTypeRefs.Final.lookup(Ref.first)) +      Ref.second->replaceAllUsesWith(CT); +    else +      Ref.second->replaceAllUsesWith(Ref.first); +  } +  OldTypeRefs.Unknown.clear(); + +  if (UnresolvedNodes.empty()) +    // Nothing to do. +    return; + +  // Resolve any cycles. +  for (unsigned I : UnresolvedNodes) { +    auto &MD = MetadataPtrs[I]; +    auto *N = dyn_cast_or_null<MDNode>(MD); +    if (!N) +      continue; + +    assert(!N->isTemporary() && "Unexpected forward reference"); +    N->resolveCycles(); +  } + +  // Make sure we return early again until there's another unresolved ref. +  UnresolvedNodes.clear(); +} + +void BitcodeReaderMetadataList::addTypeRef(MDString &UUID, +                                           DICompositeType &CT) { +  assert(CT.getRawIdentifier() == &UUID && "Mismatched UUID"); +  if (CT.isForwardDecl()) +    OldTypeRefs.FwdDecls.insert(std::make_pair(&UUID, &CT)); +  else +    OldTypeRefs.Final.insert(std::make_pair(&UUID, &CT)); +} + +Metadata *BitcodeReaderMetadataList::upgradeTypeRef(Metadata *MaybeUUID) { +  auto *UUID = dyn_cast_or_null<MDString>(MaybeUUID); +  if (LLVM_LIKELY(!UUID)) +    return MaybeUUID; + +  if (auto *CT = OldTypeRefs.Final.lookup(UUID)) +    return CT; + +  auto &Ref = OldTypeRefs.Unknown[UUID]; +  if (!Ref) +    Ref = MDNode::getTemporary(Context, None); +  return Ref.get(); +} + +Metadata *BitcodeReaderMetadataList::upgradeTypeRefArray(Metadata *MaybeTuple) { +  auto *Tuple = dyn_cast_or_null<MDTuple>(MaybeTuple); +  if (!Tuple || Tuple->isDistinct()) +    return MaybeTuple; + +  // Look through the array immediately if possible. +  if (!Tuple->isTemporary()) +    return resolveTypeRefArray(Tuple); + +  // Create and return a placeholder to use for now.  Eventually +  // resolveTypeRefArrays() will be resolve this forward reference. +  OldTypeRefs.Arrays.emplace_back( +      std::piecewise_construct, std::forward_as_tuple(Tuple), +      std::forward_as_tuple(MDTuple::getTemporary(Context, None))); +  return OldTypeRefs.Arrays.back().second.get(); +} + +Metadata *BitcodeReaderMetadataList::resolveTypeRefArray(Metadata *MaybeTuple) { +  auto *Tuple = dyn_cast_or_null<MDTuple>(MaybeTuple); +  if (!Tuple || Tuple->isDistinct()) +    return MaybeTuple; + +  // Look through the DITypeRefArray, upgrading each DITypeRef. +  SmallVector<Metadata *, 32> Ops; +  Ops.reserve(Tuple->getNumOperands()); +  for (Metadata *MD : Tuple->operands()) +    Ops.push_back(upgradeTypeRef(MD)); + +  return MDTuple::get(Context, Ops); +} + +namespace { + +class PlaceholderQueue { +  // Placeholders would thrash around when moved, so store in a std::deque +  // instead of some sort of vector. +  std::deque<DistinctMDOperandPlaceholder> PHs; + +public: +  ~PlaceholderQueue() { +    assert(empty() && "PlaceholderQueue hasn't been flushed before being destroyed"); +  } +  bool empty() { return PHs.empty(); } +  DistinctMDOperandPlaceholder &getPlaceholderOp(unsigned ID); +  void flush(BitcodeReaderMetadataList &MetadataList); + +  /// Return the list of temporaries nodes in the queue, these need to be +  /// loaded before we can flush the queue. +  void getTemporaries(BitcodeReaderMetadataList &MetadataList, +                      DenseSet<unsigned> &Temporaries) { +    for (auto &PH : PHs) { +      auto ID = PH.getID(); +      auto *MD = MetadataList.lookup(ID); +      if (!MD) { +        Temporaries.insert(ID); +        continue; +      } +      auto *N = dyn_cast_or_null<MDNode>(MD); +      if (N && N->isTemporary()) +        Temporaries.insert(ID); +    } +  } +}; + +} // end anonymous namespace + +DistinctMDOperandPlaceholder &PlaceholderQueue::getPlaceholderOp(unsigned ID) { +  PHs.emplace_back(ID); +  return PHs.back(); +} + +void PlaceholderQueue::flush(BitcodeReaderMetadataList &MetadataList) { +  while (!PHs.empty()) { +    auto *MD = MetadataList.lookup(PHs.front().getID()); +    assert(MD && "Flushing placeholder on unassigned MD"); +#ifndef NDEBUG +    if (auto *MDN = dyn_cast<MDNode>(MD)) +      assert(MDN->isResolved() && +             "Flushing Placeholder while cycles aren't resolved"); +#endif +    PHs.front().replaceUseWith(MD); +    PHs.pop_front(); +  } +} + +} // anonynous namespace + +static Error error(const Twine &Message) { +  return make_error<StringError>( +      Message, make_error_code(BitcodeError::CorruptedBitcode)); +} + +class MetadataLoader::MetadataLoaderImpl { +  BitcodeReaderMetadataList MetadataList; +  BitcodeReaderValueList &ValueList; +  BitstreamCursor &Stream; +  LLVMContext &Context; +  Module &TheModule; +  std::function<Type *(unsigned)> getTypeByID; + +  /// Cursor associated with the lazy-loading of Metadata. This is the easy way +  /// to keep around the right "context" (Abbrev list) to be able to jump in +  /// the middle of the metadata block and load any record. +  BitstreamCursor IndexCursor; + +  /// Index that keeps track of MDString values. +  std::vector<StringRef> MDStringRef; + +  /// On-demand loading of a single MDString. Requires the index above to be +  /// populated. +  MDString *lazyLoadOneMDString(unsigned Idx); + +  /// Index that keeps track of where to find a metadata record in the stream. +  std::vector<uint64_t> GlobalMetadataBitPosIndex; + +  /// Populate the index above to enable lazily loading of metadata, and load +  /// the named metadata as well as the transitively referenced global +  /// Metadata. +  Expected<bool> lazyLoadModuleMetadataBlock(); + +  /// On-demand loading of a single metadata. Requires the index above to be +  /// populated. +  void lazyLoadOneMetadata(unsigned Idx, PlaceholderQueue &Placeholders); + +  // Keep mapping of seens pair of old-style CU <-> SP, and update pointers to +  // point from SP to CU after a block is completly parsed. +  std::vector<std::pair<DICompileUnit *, Metadata *>> CUSubprograms; + +  /// Functions that need to be matched with subprograms when upgrading old +  /// metadata. +  SmallDenseMap<Function *, DISubprogram *, 16> FunctionsWithSPs; + +  // Map the bitcode's custom MDKind ID to the Module's MDKind ID. +  DenseMap<unsigned, unsigned> MDKindMap; + +  bool StripTBAA = false; +  bool HasSeenOldLoopTags = false; +  bool NeedUpgradeToDIGlobalVariableExpression = false; +  bool NeedDeclareExpressionUpgrade = false; + +  /// True if metadata is being parsed for a module being ThinLTO imported. +  bool IsImporting = false; + +  Error parseOneMetadata(SmallVectorImpl<uint64_t> &Record, unsigned Code, +                         PlaceholderQueue &Placeholders, StringRef Blob, +                         unsigned &NextMetadataNo); +  Error parseMetadataStrings(ArrayRef<uint64_t> Record, StringRef Blob, +                             function_ref<void(StringRef)> CallBack); +  Error parseGlobalObjectAttachment(GlobalObject &GO, +                                    ArrayRef<uint64_t> Record); +  Error parseMetadataKindRecord(SmallVectorImpl<uint64_t> &Record); + +  void resolveForwardRefsAndPlaceholders(PlaceholderQueue &Placeholders); + +  /// Upgrade old-style CU <-> SP pointers to point from SP to CU. +  void upgradeCUSubprograms() { +    for (auto CU_SP : CUSubprograms) +      if (auto *SPs = dyn_cast_or_null<MDTuple>(CU_SP.second)) +        for (auto &Op : SPs->operands()) +          if (auto *SP = dyn_cast_or_null<DISubprogram>(Op)) +            SP->replaceUnit(CU_SP.first); +    CUSubprograms.clear(); +  } + +  /// Upgrade old-style bare DIGlobalVariables to DIGlobalVariableExpressions. +  void upgradeCUVariables() { +    if (!NeedUpgradeToDIGlobalVariableExpression) +      return; + +    // Upgrade list of variables attached to the CUs. +    if (NamedMDNode *CUNodes = TheModule.getNamedMetadata("llvm.dbg.cu")) +      for (unsigned I = 0, E = CUNodes->getNumOperands(); I != E; ++I) { +        auto *CU = cast<DICompileUnit>(CUNodes->getOperand(I)); +        if (auto *GVs = dyn_cast_or_null<MDTuple>(CU->getRawGlobalVariables())) +          for (unsigned I = 0; I < GVs->getNumOperands(); I++) +            if (auto *GV = +                    dyn_cast_or_null<DIGlobalVariable>(GVs->getOperand(I))) { +              auto *DGVE = DIGlobalVariableExpression::getDistinct( +                  Context, GV, DIExpression::get(Context, {})); +              GVs->replaceOperandWith(I, DGVE); +            } +      } + +    // Upgrade variables attached to globals. +    for (auto &GV : TheModule.globals()) { +      SmallVector<MDNode *, 1> MDs; +      GV.getMetadata(LLVMContext::MD_dbg, MDs); +      GV.eraseMetadata(LLVMContext::MD_dbg); +      for (auto *MD : MDs) +        if (auto *DGV = dyn_cast_or_null<DIGlobalVariable>(MD)) { +          auto *DGVE = DIGlobalVariableExpression::getDistinct( +              Context, DGV, DIExpression::get(Context, {})); +          GV.addMetadata(LLVMContext::MD_dbg, *DGVE); +        } else +          GV.addMetadata(LLVMContext::MD_dbg, *MD); +    } +  } + +  /// Remove a leading DW_OP_deref from DIExpressions in a dbg.declare that +  /// describes a function argument. +  void upgradeDeclareExpressions(Function &F) { +    if (!NeedDeclareExpressionUpgrade) +      return; + +    for (auto &BB : F) +      for (auto &I : BB) +        if (auto *DDI = dyn_cast<DbgDeclareInst>(&I)) +          if (auto *DIExpr = DDI->getExpression()) +            if (DIExpr->startsWithDeref() && +                dyn_cast_or_null<Argument>(DDI->getAddress())) { +              SmallVector<uint64_t, 8> Ops; +              Ops.append(std::next(DIExpr->elements_begin()), +                         DIExpr->elements_end()); +              auto *E = DIExpression::get(Context, Ops); +              DDI->setOperand(2, MetadataAsValue::get(Context, E)); +            } +  } + +  /// Upgrade the expression from previous versions. +  Error upgradeDIExpression(uint64_t FromVersion, +                            MutableArrayRef<uint64_t> &Expr, +                            SmallVectorImpl<uint64_t> &Buffer) { +    auto N = Expr.size(); +    switch (FromVersion) { +    default: +      return error("Invalid record"); +    case 0: +      if (N >= 3 && Expr[N - 3] == dwarf::DW_OP_bit_piece) +        Expr[N - 3] = dwarf::DW_OP_LLVM_fragment; +      LLVM_FALLTHROUGH; +    case 1: +      // Move DW_OP_deref to the end. +      if (N && Expr[0] == dwarf::DW_OP_deref) { +        auto End = Expr.end(); +        if (Expr.size() >= 3 && +            *std::prev(End, 3) == dwarf::DW_OP_LLVM_fragment) +          End = std::prev(End, 3); +        std::move(std::next(Expr.begin()), End, Expr.begin()); +        *std::prev(End) = dwarf::DW_OP_deref; +      } +      NeedDeclareExpressionUpgrade = true; +      LLVM_FALLTHROUGH; +    case 2: { +      // Change DW_OP_plus to DW_OP_plus_uconst. +      // Change DW_OP_minus to DW_OP_uconst, DW_OP_minus +      auto SubExpr = ArrayRef<uint64_t>(Expr); +      while (!SubExpr.empty()) { +        // Skip past other operators with their operands +        // for this version of the IR, obtained from +        // from historic DIExpression::ExprOperand::getSize(). +        size_t HistoricSize; +        switch (SubExpr.front()) { +        default: +          HistoricSize = 1; +          break; +        case dwarf::DW_OP_constu: +        case dwarf::DW_OP_minus: +        case dwarf::DW_OP_plus: +          HistoricSize = 2; +          break; +        case dwarf::DW_OP_LLVM_fragment: +          HistoricSize = 3; +          break; +        } + +        // If the expression is malformed, make sure we don't +        // copy more elements than we should. +        HistoricSize = std::min(SubExpr.size(), HistoricSize); +        ArrayRef<uint64_t> Args = SubExpr.slice(1, HistoricSize-1); + +        switch (SubExpr.front()) { +        case dwarf::DW_OP_plus: +          Buffer.push_back(dwarf::DW_OP_plus_uconst); +          Buffer.append(Args.begin(), Args.end()); +          break; +        case dwarf::DW_OP_minus: +          Buffer.push_back(dwarf::DW_OP_constu); +          Buffer.append(Args.begin(), Args.end()); +          Buffer.push_back(dwarf::DW_OP_minus); +          break; +        default: +          Buffer.push_back(*SubExpr.begin()); +          Buffer.append(Args.begin(), Args.end()); +          break; +        } + +        // Continue with remaining elements. +        SubExpr = SubExpr.slice(HistoricSize); +      } +      Expr = MutableArrayRef<uint64_t>(Buffer); +      LLVM_FALLTHROUGH; +    } +    case 3: +      // Up-to-date! +      break; +    } + +    return Error::success(); +  } + +  void upgradeDebugInfo() { +    upgradeCUSubprograms(); +    upgradeCUVariables(); +  } + +public: +  MetadataLoaderImpl(BitstreamCursor &Stream, Module &TheModule, +                     BitcodeReaderValueList &ValueList, +                     std::function<Type *(unsigned)> getTypeByID, +                     bool IsImporting) +      : MetadataList(TheModule.getContext()), ValueList(ValueList), +        Stream(Stream), Context(TheModule.getContext()), TheModule(TheModule), +        getTypeByID(std::move(getTypeByID)), IsImporting(IsImporting) {} + +  Error parseMetadata(bool ModuleLevel); + +  bool hasFwdRefs() const { return MetadataList.hasFwdRefs(); } + +  Metadata *getMetadataFwdRefOrLoad(unsigned ID) { +    if (ID < MDStringRef.size()) +      return lazyLoadOneMDString(ID); +    if (auto *MD = MetadataList.lookup(ID)) +      return MD; +    // If lazy-loading is enabled, we try recursively to load the operand +    // instead of creating a temporary. +    if (ID < (MDStringRef.size() + GlobalMetadataBitPosIndex.size())) { +      PlaceholderQueue Placeholders; +      lazyLoadOneMetadata(ID, Placeholders); +      resolveForwardRefsAndPlaceholders(Placeholders); +      return MetadataList.lookup(ID); +    } +    return MetadataList.getMetadataFwdRef(ID); +  } + +  MDNode *getMDNodeFwdRefOrNull(unsigned Idx) { +    return MetadataList.getMDNodeFwdRefOrNull(Idx); +  } + +  DISubprogram *lookupSubprogramForFunction(Function *F) { +    return FunctionsWithSPs.lookup(F); +  } + +  bool hasSeenOldLoopTags() { return HasSeenOldLoopTags; } + +  Error parseMetadataAttachment( +      Function &F, const SmallVectorImpl<Instruction *> &InstructionList); + +  Error parseMetadataKinds(); + +  void setStripTBAA(bool Value) { StripTBAA = Value; } +  bool isStrippingTBAA() { return StripTBAA; } + +  unsigned size() const { return MetadataList.size(); } +  void shrinkTo(unsigned N) { MetadataList.shrinkTo(N); } +  void upgradeDebugIntrinsics(Function &F) { upgradeDeclareExpressions(F); } +}; + +Expected<bool> +MetadataLoader::MetadataLoaderImpl::lazyLoadModuleMetadataBlock() { +  IndexCursor = Stream; +  SmallVector<uint64_t, 64> Record; +  // Get the abbrevs, and preload record positions to make them lazy-loadable. +  while (true) { +    BitstreamEntry Entry = IndexCursor.advanceSkippingSubblocks( +        BitstreamCursor::AF_DontPopBlockAtEnd); +    switch (Entry.Kind) { +    case BitstreamEntry::SubBlock: // Handled for us already. +    case BitstreamEntry::Error: +      return error("Malformed block"); +    case BitstreamEntry::EndBlock: { +      return true; +    } +    case BitstreamEntry::Record: { +      // The interesting case. +      ++NumMDRecordLoaded; +      uint64_t CurrentPos = IndexCursor.GetCurrentBitNo(); +      auto Code = IndexCursor.skipRecord(Entry.ID); +      switch (Code) { +      case bitc::METADATA_STRINGS: { +        // Rewind and parse the strings. +        IndexCursor.JumpToBit(CurrentPos); +        StringRef Blob; +        Record.clear(); +        IndexCursor.readRecord(Entry.ID, Record, &Blob); +        unsigned NumStrings = Record[0]; +        MDStringRef.reserve(NumStrings); +        auto IndexNextMDString = [&](StringRef Str) { +          MDStringRef.push_back(Str); +        }; +        if (auto Err = parseMetadataStrings(Record, Blob, IndexNextMDString)) +          return std::move(Err); +        break; +      } +      case bitc::METADATA_INDEX_OFFSET: { +        // This is the offset to the index, when we see this we skip all the +        // records and load only an index to these. +        IndexCursor.JumpToBit(CurrentPos); +        Record.clear(); +        IndexCursor.readRecord(Entry.ID, Record); +        if (Record.size() != 2) +          return error("Invalid record"); +        auto Offset = Record[0] + (Record[1] << 32); +        auto BeginPos = IndexCursor.GetCurrentBitNo(); +        IndexCursor.JumpToBit(BeginPos + Offset); +        Entry = IndexCursor.advanceSkippingSubblocks( +            BitstreamCursor::AF_DontPopBlockAtEnd); +        assert(Entry.Kind == BitstreamEntry::Record && +               "Corrupted bitcode: Expected `Record` when trying to find the " +               "Metadata index"); +        Record.clear(); +        auto Code = IndexCursor.readRecord(Entry.ID, Record); +        (void)Code; +        assert(Code == bitc::METADATA_INDEX && "Corrupted bitcode: Expected " +                                               "`METADATA_INDEX` when trying " +                                               "to find the Metadata index"); + +        // Delta unpack +        auto CurrentValue = BeginPos; +        GlobalMetadataBitPosIndex.reserve(Record.size()); +        for (auto &Elt : Record) { +          CurrentValue += Elt; +          GlobalMetadataBitPosIndex.push_back(CurrentValue); +        } +        break; +      } +      case bitc::METADATA_INDEX: +        // We don't expect to get there, the Index is loaded when we encounter +        // the offset. +        return error("Corrupted Metadata block"); +      case bitc::METADATA_NAME: { +        // Named metadata need to be materialized now and aren't deferred. +        IndexCursor.JumpToBit(CurrentPos); +        Record.clear(); +        unsigned Code = IndexCursor.readRecord(Entry.ID, Record); +        assert(Code == bitc::METADATA_NAME); + +        // Read name of the named metadata. +        SmallString<8> Name(Record.begin(), Record.end()); +        Code = IndexCursor.ReadCode(); + +        // Named Metadata comes in two parts, we expect the name to be followed +        // by the node +        Record.clear(); +        unsigned NextBitCode = IndexCursor.readRecord(Code, Record); +        assert(NextBitCode == bitc::METADATA_NAMED_NODE); +        (void)NextBitCode; + +        // Read named metadata elements. +        unsigned Size = Record.size(); +        NamedMDNode *NMD = TheModule.getOrInsertNamedMetadata(Name); +        for (unsigned i = 0; i != Size; ++i) { +          // FIXME: We could use a placeholder here, however NamedMDNode are +          // taking MDNode as operand and not using the Metadata infrastructure. +          // It is acknowledged by 'TODO: Inherit from Metadata' in the +          // NamedMDNode class definition. +          MDNode *MD = MetadataList.getMDNodeFwdRefOrNull(Record[i]); +          assert(MD && "Invalid record"); +          NMD->addOperand(MD); +        } +        break; +      } +      case bitc::METADATA_GLOBAL_DECL_ATTACHMENT: { +        // FIXME: we need to do this early because we don't materialize global +        // value explicitly. +        IndexCursor.JumpToBit(CurrentPos); +        Record.clear(); +        IndexCursor.readRecord(Entry.ID, Record); +        if (Record.size() % 2 == 0) +          return error("Invalid record"); +        unsigned ValueID = Record[0]; +        if (ValueID >= ValueList.size()) +          return error("Invalid record"); +        if (auto *GO = dyn_cast<GlobalObject>(ValueList[ValueID])) +          if (Error Err = parseGlobalObjectAttachment( +                  *GO, ArrayRef<uint64_t>(Record).slice(1))) +            return std::move(Err); +        break; +      } +      case bitc::METADATA_KIND: +      case bitc::METADATA_STRING_OLD: +      case bitc::METADATA_OLD_FN_NODE: +      case bitc::METADATA_OLD_NODE: +      case bitc::METADATA_VALUE: +      case bitc::METADATA_DISTINCT_NODE: +      case bitc::METADATA_NODE: +      case bitc::METADATA_LOCATION: +      case bitc::METADATA_GENERIC_DEBUG: +      case bitc::METADATA_SUBRANGE: +      case bitc::METADATA_ENUMERATOR: +      case bitc::METADATA_BASIC_TYPE: +      case bitc::METADATA_DERIVED_TYPE: +      case bitc::METADATA_COMPOSITE_TYPE: +      case bitc::METADATA_SUBROUTINE_TYPE: +      case bitc::METADATA_MODULE: +      case bitc::METADATA_FILE: +      case bitc::METADATA_COMPILE_UNIT: +      case bitc::METADATA_SUBPROGRAM: +      case bitc::METADATA_LEXICAL_BLOCK: +      case bitc::METADATA_LEXICAL_BLOCK_FILE: +      case bitc::METADATA_NAMESPACE: +      case bitc::METADATA_MACRO: +      case bitc::METADATA_MACRO_FILE: +      case bitc::METADATA_TEMPLATE_TYPE: +      case bitc::METADATA_TEMPLATE_VALUE: +      case bitc::METADATA_GLOBAL_VAR: +      case bitc::METADATA_LOCAL_VAR: +      case bitc::METADATA_LABEL: +      case bitc::METADATA_EXPRESSION: +      case bitc::METADATA_OBJC_PROPERTY: +      case bitc::METADATA_IMPORTED_ENTITY: +      case bitc::METADATA_GLOBAL_VAR_EXPR: +        // We don't expect to see any of these, if we see one, give up on +        // lazy-loading and fallback. +        MDStringRef.clear(); +        GlobalMetadataBitPosIndex.clear(); +        return false; +      } +      break; +    } +    } +  } +} + +/// Parse a METADATA_BLOCK. If ModuleLevel is true then we are parsing +/// module level metadata. +Error MetadataLoader::MetadataLoaderImpl::parseMetadata(bool ModuleLevel) { +  if (!ModuleLevel && MetadataList.hasFwdRefs()) +    return error("Invalid metadata: fwd refs into function blocks"); + +  // Record the entry position so that we can jump back here and efficiently +  // skip the whole block in case we lazy-load. +  auto EntryPos = Stream.GetCurrentBitNo(); + +  if (Stream.EnterSubBlock(bitc::METADATA_BLOCK_ID)) +    return error("Invalid record"); + +  SmallVector<uint64_t, 64> Record; +  PlaceholderQueue Placeholders; + +  // We lazy-load module-level metadata: we build an index for each record, and +  // then load individual record as needed, starting with the named metadata. +  if (ModuleLevel && IsImporting && MetadataList.empty() && +      !DisableLazyLoading) { +    auto SuccessOrErr = lazyLoadModuleMetadataBlock(); +    if (!SuccessOrErr) +      return SuccessOrErr.takeError(); +    if (SuccessOrErr.get()) { +      // An index was successfully created and we will be able to load metadata +      // on-demand. +      MetadataList.resize(MDStringRef.size() + +                          GlobalMetadataBitPosIndex.size()); + +      // Reading the named metadata created forward references and/or +      // placeholders, that we flush here. +      resolveForwardRefsAndPlaceholders(Placeholders); +      upgradeDebugInfo(); +      // Return at the beginning of the block, since it is easy to skip it +      // entirely from there. +      Stream.ReadBlockEnd(); // Pop the abbrev block context. +      Stream.JumpToBit(EntryPos); +      if (Stream.SkipBlock()) +        return error("Invalid record"); +      return Error::success(); +    } +    // Couldn't load an index, fallback to loading all the block "old-style". +  } + +  unsigned NextMetadataNo = MetadataList.size(); + +  // Read all the records. +  while (true) { +    BitstreamEntry Entry = Stream.advanceSkippingSubblocks(); + +    switch (Entry.Kind) { +    case BitstreamEntry::SubBlock: // Handled for us already. +    case BitstreamEntry::Error: +      return error("Malformed block"); +    case BitstreamEntry::EndBlock: +      resolveForwardRefsAndPlaceholders(Placeholders); +      upgradeDebugInfo(); +      return Error::success(); +    case BitstreamEntry::Record: +      // The interesting case. +      break; +    } + +    // Read a record. +    Record.clear(); +    StringRef Blob; +    ++NumMDRecordLoaded; +    unsigned Code = Stream.readRecord(Entry.ID, Record, &Blob); +    if (Error Err = +            parseOneMetadata(Record, Code, Placeholders, Blob, NextMetadataNo)) +      return Err; +  } +} + +MDString *MetadataLoader::MetadataLoaderImpl::lazyLoadOneMDString(unsigned ID) { +  ++NumMDStringLoaded; +  if (Metadata *MD = MetadataList.lookup(ID)) +    return cast<MDString>(MD); +  auto MDS = MDString::get(Context, MDStringRef[ID]); +  MetadataList.assignValue(MDS, ID); +  return MDS; +} + +void MetadataLoader::MetadataLoaderImpl::lazyLoadOneMetadata( +    unsigned ID, PlaceholderQueue &Placeholders) { +  assert(ID < (MDStringRef.size()) + GlobalMetadataBitPosIndex.size()); +  assert(ID >= MDStringRef.size() && "Unexpected lazy-loading of MDString"); +  // Lookup first if the metadata hasn't already been loaded. +  if (auto *MD = MetadataList.lookup(ID)) { +    auto *N = dyn_cast_or_null<MDNode>(MD); +    if (!N->isTemporary()) +      return; +  } +  SmallVector<uint64_t, 64> Record; +  StringRef Blob; +  IndexCursor.JumpToBit(GlobalMetadataBitPosIndex[ID - MDStringRef.size()]); +  auto Entry = IndexCursor.advanceSkippingSubblocks(); +  ++NumMDRecordLoaded; +  unsigned Code = IndexCursor.readRecord(Entry.ID, Record, &Blob); +  if (Error Err = parseOneMetadata(Record, Code, Placeholders, Blob, ID)) +    report_fatal_error("Can't lazyload MD"); +} + +/// Ensure that all forward-references and placeholders are resolved. +/// Iteratively lazy-loading metadata on-demand if needed. +void MetadataLoader::MetadataLoaderImpl::resolveForwardRefsAndPlaceholders( +    PlaceholderQueue &Placeholders) { +  DenseSet<unsigned> Temporaries; +  while (1) { +    // Populate Temporaries with the placeholders that haven't been loaded yet. +    Placeholders.getTemporaries(MetadataList, Temporaries); + +    // If we don't have any temporary, or FwdReference, we're done! +    if (Temporaries.empty() && !MetadataList.hasFwdRefs()) +      break; + +    // First, load all the temporaries. This can add new placeholders or +    // forward references. +    for (auto ID : Temporaries) +      lazyLoadOneMetadata(ID, Placeholders); +    Temporaries.clear(); + +    // Second, load the forward-references. This can also add new placeholders +    // or forward references. +    while (MetadataList.hasFwdRefs()) +      lazyLoadOneMetadata(MetadataList.getNextFwdRef(), Placeholders); +  } +  // At this point we don't have any forward reference remaining, or temporary +  // that haven't been loaded. We can safely drop RAUW support and mark cycles +  // as resolved. +  MetadataList.tryToResolveCycles(); + +  // Finally, everything is in place, we can replace the placeholders operands +  // with the final node they refer to. +  Placeholders.flush(MetadataList); +} + +Error MetadataLoader::MetadataLoaderImpl::parseOneMetadata( +    SmallVectorImpl<uint64_t> &Record, unsigned Code, +    PlaceholderQueue &Placeholders, StringRef Blob, unsigned &NextMetadataNo) { + +  bool IsDistinct = false; +  auto getMD = [&](unsigned ID) -> Metadata * { +    if (ID < MDStringRef.size()) +      return lazyLoadOneMDString(ID); +    if (!IsDistinct) { +      if (auto *MD = MetadataList.lookup(ID)) +        return MD; +      // If lazy-loading is enabled, we try recursively to load the operand +      // instead of creating a temporary. +      if (ID < (MDStringRef.size() + GlobalMetadataBitPosIndex.size())) { +        // Create a temporary for the node that is referencing the operand we +        // will lazy-load. It is needed before recursing in case there are +        // uniquing cycles. +        MetadataList.getMetadataFwdRef(NextMetadataNo); +        lazyLoadOneMetadata(ID, Placeholders); +        return MetadataList.lookup(ID); +      } +      // Return a temporary. +      return MetadataList.getMetadataFwdRef(ID); +    } +    if (auto *MD = MetadataList.getMetadataIfResolved(ID)) +      return MD; +    return &Placeholders.getPlaceholderOp(ID); +  }; +  auto getMDOrNull = [&](unsigned ID) -> Metadata * { +    if (ID) +      return getMD(ID - 1); +    return nullptr; +  }; +  auto getMDOrNullWithoutPlaceholders = [&](unsigned ID) -> Metadata * { +    if (ID) +      return MetadataList.getMetadataFwdRef(ID - 1); +    return nullptr; +  }; +  auto getMDString = [&](unsigned ID) -> MDString * { +    // This requires that the ID is not really a forward reference.  In +    // particular, the MDString must already have been resolved. +    auto MDS = getMDOrNull(ID); +    return cast_or_null<MDString>(MDS); +  }; + +  // Support for old type refs. +  auto getDITypeRefOrNull = [&](unsigned ID) { +    return MetadataList.upgradeTypeRef(getMDOrNull(ID)); +  }; + +#define GET_OR_DISTINCT(CLASS, ARGS)                                           \ +  (IsDistinct ? CLASS::getDistinct ARGS : CLASS::get ARGS) + +  switch (Code) { +  default: // Default behavior: ignore. +    break; +  case bitc::METADATA_NAME: { +    // Read name of the named metadata. +    SmallString<8> Name(Record.begin(), Record.end()); +    Record.clear(); +    Code = Stream.ReadCode(); + +    ++NumMDRecordLoaded; +    unsigned NextBitCode = Stream.readRecord(Code, Record); +    if (NextBitCode != bitc::METADATA_NAMED_NODE) +      return error("METADATA_NAME not followed by METADATA_NAMED_NODE"); + +    // Read named metadata elements. +    unsigned Size = Record.size(); +    NamedMDNode *NMD = TheModule.getOrInsertNamedMetadata(Name); +    for (unsigned i = 0; i != Size; ++i) { +      MDNode *MD = MetadataList.getMDNodeFwdRefOrNull(Record[i]); +      if (!MD) +        return error("Invalid record"); +      NMD->addOperand(MD); +    } +    break; +  } +  case bitc::METADATA_OLD_FN_NODE: { +    // FIXME: Remove in 4.0. +    // This is a LocalAsMetadata record, the only type of function-local +    // metadata. +    if (Record.size() % 2 == 1) +      return error("Invalid record"); + +    // If this isn't a LocalAsMetadata record, we're dropping it.  This used +    // to be legal, but there's no upgrade path. +    auto dropRecord = [&] { +      MetadataList.assignValue(MDNode::get(Context, None), NextMetadataNo); +      NextMetadataNo++; +    }; +    if (Record.size() != 2) { +      dropRecord(); +      break; +    } + +    Type *Ty = getTypeByID(Record[0]); +    if (Ty->isMetadataTy() || Ty->isVoidTy()) { +      dropRecord(); +      break; +    } + +    MetadataList.assignValue( +        LocalAsMetadata::get(ValueList.getValueFwdRef(Record[1], Ty)), +        NextMetadataNo); +    NextMetadataNo++; +    break; +  } +  case bitc::METADATA_OLD_NODE: { +    // FIXME: Remove in 4.0. +    if (Record.size() % 2 == 1) +      return error("Invalid record"); + +    unsigned Size = Record.size(); +    SmallVector<Metadata *, 8> Elts; +    for (unsigned i = 0; i != Size; i += 2) { +      Type *Ty = getTypeByID(Record[i]); +      if (!Ty) +        return error("Invalid record"); +      if (Ty->isMetadataTy()) +        Elts.push_back(getMD(Record[i + 1])); +      else if (!Ty->isVoidTy()) { +        auto *MD = +            ValueAsMetadata::get(ValueList.getValueFwdRef(Record[i + 1], Ty)); +        assert(isa<ConstantAsMetadata>(MD) && +               "Expected non-function-local metadata"); +        Elts.push_back(MD); +      } else +        Elts.push_back(nullptr); +    } +    MetadataList.assignValue(MDNode::get(Context, Elts), NextMetadataNo); +    NextMetadataNo++; +    break; +  } +  case bitc::METADATA_VALUE: { +    if (Record.size() != 2) +      return error("Invalid record"); + +    Type *Ty = getTypeByID(Record[0]); +    if (Ty->isMetadataTy() || Ty->isVoidTy()) +      return error("Invalid record"); + +    MetadataList.assignValue( +        ValueAsMetadata::get(ValueList.getValueFwdRef(Record[1], Ty)), +        NextMetadataNo); +    NextMetadataNo++; +    break; +  } +  case bitc::METADATA_DISTINCT_NODE: +    IsDistinct = true; +    LLVM_FALLTHROUGH; +  case bitc::METADATA_NODE: { +    SmallVector<Metadata *, 8> Elts; +    Elts.reserve(Record.size()); +    for (unsigned ID : Record) +      Elts.push_back(getMDOrNull(ID)); +    MetadataList.assignValue(IsDistinct ? MDNode::getDistinct(Context, Elts) +                                        : MDNode::get(Context, Elts), +                             NextMetadataNo); +    NextMetadataNo++; +    break; +  } +  case bitc::METADATA_LOCATION: { +    if (Record.size() != 5) +      return error("Invalid record"); + +    IsDistinct = Record[0]; +    unsigned Line = Record[1]; +    unsigned Column = Record[2]; +    Metadata *Scope = getMD(Record[3]); +    Metadata *InlinedAt = getMDOrNull(Record[4]); +    MetadataList.assignValue( +        GET_OR_DISTINCT(DILocation, (Context, Line, Column, Scope, InlinedAt)), +        NextMetadataNo); +    NextMetadataNo++; +    break; +  } +  case bitc::METADATA_GENERIC_DEBUG: { +    if (Record.size() < 4) +      return error("Invalid record"); + +    IsDistinct = Record[0]; +    unsigned Tag = Record[1]; +    unsigned Version = Record[2]; + +    if (Tag >= 1u << 16 || Version != 0) +      return error("Invalid record"); + +    auto *Header = getMDString(Record[3]); +    SmallVector<Metadata *, 8> DwarfOps; +    for (unsigned I = 4, E = Record.size(); I != E; ++I) +      DwarfOps.push_back(getMDOrNull(Record[I])); +    MetadataList.assignValue( +        GET_OR_DISTINCT(GenericDINode, (Context, Tag, Header, DwarfOps)), +        NextMetadataNo); +    NextMetadataNo++; +    break; +  } +  case bitc::METADATA_SUBRANGE: { +    Metadata *Val = nullptr; +    // Operand 'count' is interpreted as: +    // - Signed integer (version 0) +    // - Metadata node  (version 1) +    switch (Record[0] >> 1) { +    case 0: +      Val = GET_OR_DISTINCT(DISubrange, +                            (Context, Record[1], unrotateSign(Record.back()))); +      break; +    case 1: +      Val = GET_OR_DISTINCT(DISubrange, (Context, getMDOrNull(Record[1]), +                                         unrotateSign(Record.back()))); +      break; +    default: +      return error("Invalid record: Unsupported version of DISubrange"); +    } + +    MetadataList.assignValue(Val, NextMetadataNo); +    IsDistinct = Record[0] & 1; +    NextMetadataNo++; +    break; +  } +  case bitc::METADATA_ENUMERATOR: { +    if (Record.size() != 3) +      return error("Invalid record"); + +    IsDistinct = Record[0] & 1; +    bool IsUnsigned = Record[0] & 2; +    MetadataList.assignValue( +        GET_OR_DISTINCT(DIEnumerator, (Context, unrotateSign(Record[1]), +                                       IsUnsigned, getMDString(Record[2]))), +        NextMetadataNo); +    NextMetadataNo++; +    break; +  } +  case bitc::METADATA_BASIC_TYPE: { +    if (Record.size() != 6) +      return error("Invalid record"); + +    IsDistinct = Record[0]; +    MetadataList.assignValue( +        GET_OR_DISTINCT(DIBasicType, +                        (Context, Record[1], getMDString(Record[2]), Record[3], +                         Record[4], Record[5])), +        NextMetadataNo); +    NextMetadataNo++; +    break; +  } +  case bitc::METADATA_DERIVED_TYPE: { +    if (Record.size() < 12 || Record.size() > 13) +      return error("Invalid record"); + +    // DWARF address space is encoded as N->getDWARFAddressSpace() + 1. 0 means +    // that there is no DWARF address space associated with DIDerivedType. +    Optional<unsigned> DWARFAddressSpace; +    if (Record.size() > 12 && Record[12]) +      DWARFAddressSpace = Record[12] - 1; + +    IsDistinct = Record[0]; +    DINode::DIFlags Flags = static_cast<DINode::DIFlags>(Record[10]); +    MetadataList.assignValue( +        GET_OR_DISTINCT(DIDerivedType, +                        (Context, Record[1], getMDString(Record[2]), +                         getMDOrNull(Record[3]), Record[4], +                         getDITypeRefOrNull(Record[5]), +                         getDITypeRefOrNull(Record[6]), Record[7], Record[8], +                         Record[9], DWARFAddressSpace, Flags, +                         getDITypeRefOrNull(Record[11]))), +        NextMetadataNo); +    NextMetadataNo++; +    break; +  } +  case bitc::METADATA_COMPOSITE_TYPE: { +    if (Record.size() < 16 || Record.size() > 17) +      return error("Invalid record"); + +    // If we have a UUID and this is not a forward declaration, lookup the +    // mapping. +    IsDistinct = Record[0] & 0x1; +    bool IsNotUsedInTypeRef = Record[0] >= 2; +    unsigned Tag = Record[1]; +    MDString *Name = getMDString(Record[2]); +    Metadata *File = getMDOrNull(Record[3]); +    unsigned Line = Record[4]; +    Metadata *Scope = getDITypeRefOrNull(Record[5]); +    Metadata *BaseType = nullptr; +    uint64_t SizeInBits = Record[7]; +    if (Record[8] > (uint64_t)std::numeric_limits<uint32_t>::max()) +      return error("Alignment value is too large"); +    uint32_t AlignInBits = Record[8]; +    uint64_t OffsetInBits = 0; +    DINode::DIFlags Flags = static_cast<DINode::DIFlags>(Record[10]); +    Metadata *Elements = nullptr; +    unsigned RuntimeLang = Record[12]; +    Metadata *VTableHolder = nullptr; +    Metadata *TemplateParams = nullptr; +    Metadata *Discriminator = nullptr; +    auto *Identifier = getMDString(Record[15]); +    // If this module is being parsed so that it can be ThinLTO imported +    // into another module, composite types only need to be imported +    // as type declarations (unless full type definitions requested). +    // Create type declarations up front to save memory. Also, buildODRType +    // handles the case where this is type ODRed with a definition needed +    // by the importing module, in which case the existing definition is +    // used. +    if (IsImporting && !ImportFullTypeDefinitions && Identifier && +        (Tag == dwarf::DW_TAG_enumeration_type || +         Tag == dwarf::DW_TAG_class_type || +         Tag == dwarf::DW_TAG_structure_type || +         Tag == dwarf::DW_TAG_union_type)) { +      Flags = Flags | DINode::FlagFwdDecl; +    } else { +      BaseType = getDITypeRefOrNull(Record[6]); +      OffsetInBits = Record[9]; +      Elements = getMDOrNull(Record[11]); +      VTableHolder = getDITypeRefOrNull(Record[13]); +      TemplateParams = getMDOrNull(Record[14]); +      if (Record.size() > 16) +        Discriminator = getMDOrNull(Record[16]); +    } +    DICompositeType *CT = nullptr; +    if (Identifier) +      CT = DICompositeType::buildODRType( +          Context, *Identifier, Tag, Name, File, Line, Scope, BaseType, +          SizeInBits, AlignInBits, OffsetInBits, Flags, Elements, RuntimeLang, +          VTableHolder, TemplateParams, Discriminator); + +    // Create a node if we didn't get a lazy ODR type. +    if (!CT) +      CT = GET_OR_DISTINCT(DICompositeType, +                           (Context, Tag, Name, File, Line, Scope, BaseType, +                            SizeInBits, AlignInBits, OffsetInBits, Flags, +                            Elements, RuntimeLang, VTableHolder, TemplateParams, +                            Identifier)); +    if (!IsNotUsedInTypeRef && Identifier) +      MetadataList.addTypeRef(*Identifier, *cast<DICompositeType>(CT)); + +    MetadataList.assignValue(CT, NextMetadataNo); +    NextMetadataNo++; +    break; +  } +  case bitc::METADATA_SUBROUTINE_TYPE: { +    if (Record.size() < 3 || Record.size() > 4) +      return error("Invalid record"); +    bool IsOldTypeRefArray = Record[0] < 2; +    unsigned CC = (Record.size() > 3) ? Record[3] : 0; + +    IsDistinct = Record[0] & 0x1; +    DINode::DIFlags Flags = static_cast<DINode::DIFlags>(Record[1]); +    Metadata *Types = getMDOrNull(Record[2]); +    if (LLVM_UNLIKELY(IsOldTypeRefArray)) +      Types = MetadataList.upgradeTypeRefArray(Types); + +    MetadataList.assignValue( +        GET_OR_DISTINCT(DISubroutineType, (Context, Flags, CC, Types)), +        NextMetadataNo); +    NextMetadataNo++; +    break; +  } + +  case bitc::METADATA_MODULE: { +    if (Record.size() != 6) +      return error("Invalid record"); + +    IsDistinct = Record[0]; +    MetadataList.assignValue( +        GET_OR_DISTINCT(DIModule, +                        (Context, getMDOrNull(Record[1]), +                         getMDString(Record[2]), getMDString(Record[3]), +                         getMDString(Record[4]), getMDString(Record[5]))), +        NextMetadataNo); +    NextMetadataNo++; +    break; +  } + +  case bitc::METADATA_FILE: { +    if (Record.size() != 3 && Record.size() != 5 && Record.size() != 6) +      return error("Invalid record"); + +    IsDistinct = Record[0]; +    Optional<DIFile::ChecksumInfo<MDString *>> Checksum; +    // The BitcodeWriter writes null bytes into Record[3:4] when the Checksum +    // is not present. This matches up with the old internal representation, +    // and the old encoding for CSK_None in the ChecksumKind. The new +    // representation reserves the value 0 in the ChecksumKind to continue to +    // encode None in a backwards-compatible way. +    if (Record.size() > 4 && Record[3] && Record[4]) +      Checksum.emplace(static_cast<DIFile::ChecksumKind>(Record[3]), +                       getMDString(Record[4])); +    MetadataList.assignValue( +        GET_OR_DISTINCT( +            DIFile, +            (Context, getMDString(Record[1]), getMDString(Record[2]), Checksum, +             Record.size() > 5 ? Optional<MDString *>(getMDString(Record[5])) +                               : None)), +        NextMetadataNo); +    NextMetadataNo++; +    break; +  } +  case bitc::METADATA_COMPILE_UNIT: { +    if (Record.size() < 14 || Record.size() > 19) +      return error("Invalid record"); + +    // Ignore Record[0], which indicates whether this compile unit is +    // distinct.  It's always distinct. +    IsDistinct = true; +    auto *CU = DICompileUnit::getDistinct( +        Context, Record[1], getMDOrNull(Record[2]), getMDString(Record[3]), +        Record[4], getMDString(Record[5]), Record[6], getMDString(Record[7]), +        Record[8], getMDOrNull(Record[9]), getMDOrNull(Record[10]), +        getMDOrNull(Record[12]), getMDOrNull(Record[13]), +        Record.size() <= 15 ? nullptr : getMDOrNull(Record[15]), +        Record.size() <= 14 ? 0 : Record[14], +        Record.size() <= 16 ? true : Record[16], +        Record.size() <= 17 ? false : Record[17], +        Record.size() <= 18 ? false : Record[18]); + +    MetadataList.assignValue(CU, NextMetadataNo); +    NextMetadataNo++; + +    // Move the Upgrade the list of subprograms. +    if (Metadata *SPs = getMDOrNullWithoutPlaceholders(Record[11])) +      CUSubprograms.push_back({CU, SPs}); +    break; +  } +  case bitc::METADATA_SUBPROGRAM: { +    if (Record.size() < 18 || Record.size() > 21) +      return error("Invalid record"); + +    IsDistinct = +        (Record[0] & 1) || Record[8]; // All definitions should be distinct. +    // Version 1 has a Function as Record[15]. +    // Version 2 has removed Record[15]. +    // Version 3 has the Unit as Record[15]. +    // Version 4 added thisAdjustment. +    bool HasUnit = Record[0] >= 2; +    if (HasUnit && Record.size() < 19) +      return error("Invalid record"); +    Metadata *CUorFn = getMDOrNull(Record[15]); +    unsigned Offset = Record.size() >= 19 ? 1 : 0; +    bool HasFn = Offset && !HasUnit; +    bool HasThisAdj = Record.size() >= 20; +    bool HasThrownTypes = Record.size() >= 21; +    DISubprogram *SP = GET_OR_DISTINCT( +        DISubprogram, +        (Context, +         getDITypeRefOrNull(Record[1]),                     // scope +         getMDString(Record[2]),                            // name +         getMDString(Record[3]),                            // linkageName +         getMDOrNull(Record[4]),                            // file +         Record[5],                                         // line +         getMDOrNull(Record[6]),                            // type +         Record[7],                                         // isLocal +         Record[8],                                         // isDefinition +         Record[9],                                         // scopeLine +         getDITypeRefOrNull(Record[10]),                    // containingType +         Record[11],                                        // virtuality +         Record[12],                                        // virtualIndex +         HasThisAdj ? Record[19] : 0,                       // thisAdjustment +         static_cast<DINode::DIFlags>(Record[13]),          // flags +         Record[14],                                        // isOptimized +         HasUnit ? CUorFn : nullptr,                        // unit +         getMDOrNull(Record[15 + Offset]),                  // templateParams +         getMDOrNull(Record[16 + Offset]),                  // declaration +         getMDOrNull(Record[17 + Offset]),                  // retainedNodes +         HasThrownTypes ? getMDOrNull(Record[20]) : nullptr // thrownTypes +         )); +    MetadataList.assignValue(SP, NextMetadataNo); +    NextMetadataNo++; + +    // Upgrade sp->function mapping to function->sp mapping. +    if (HasFn) { +      if (auto *CMD = dyn_cast_or_null<ConstantAsMetadata>(CUorFn)) +        if (auto *F = dyn_cast<Function>(CMD->getValue())) { +          if (F->isMaterializable()) +            // Defer until materialized; unmaterialized functions may not have +            // metadata. +            FunctionsWithSPs[F] = SP; +          else if (!F->empty()) +            F->setSubprogram(SP); +        } +    } +    break; +  } +  case bitc::METADATA_LEXICAL_BLOCK: { +    if (Record.size() != 5) +      return error("Invalid record"); + +    IsDistinct = Record[0]; +    MetadataList.assignValue( +        GET_OR_DISTINCT(DILexicalBlock, +                        (Context, getMDOrNull(Record[1]), +                         getMDOrNull(Record[2]), Record[3], Record[4])), +        NextMetadataNo); +    NextMetadataNo++; +    break; +  } +  case bitc::METADATA_LEXICAL_BLOCK_FILE: { +    if (Record.size() != 4) +      return error("Invalid record"); + +    IsDistinct = Record[0]; +    MetadataList.assignValue( +        GET_OR_DISTINCT(DILexicalBlockFile, +                        (Context, getMDOrNull(Record[1]), +                         getMDOrNull(Record[2]), Record[3])), +        NextMetadataNo); +    NextMetadataNo++; +    break; +  } +  case bitc::METADATA_NAMESPACE: { +    // Newer versions of DINamespace dropped file and line. +    MDString *Name; +    if (Record.size() == 3) +      Name = getMDString(Record[2]); +    else if (Record.size() == 5) +      Name = getMDString(Record[3]); +    else +      return error("Invalid record"); + +    IsDistinct = Record[0] & 1; +    bool ExportSymbols = Record[0] & 2; +    MetadataList.assignValue( +        GET_OR_DISTINCT(DINamespace, +                        (Context, getMDOrNull(Record[1]), Name, ExportSymbols)), +        NextMetadataNo); +    NextMetadataNo++; +    break; +  } +  case bitc::METADATA_MACRO: { +    if (Record.size() != 5) +      return error("Invalid record"); + +    IsDistinct = Record[0]; +    MetadataList.assignValue( +        GET_OR_DISTINCT(DIMacro, +                        (Context, Record[1], Record[2], getMDString(Record[3]), +                         getMDString(Record[4]))), +        NextMetadataNo); +    NextMetadataNo++; +    break; +  } +  case bitc::METADATA_MACRO_FILE: { +    if (Record.size() != 5) +      return error("Invalid record"); + +    IsDistinct = Record[0]; +    MetadataList.assignValue( +        GET_OR_DISTINCT(DIMacroFile, +                        (Context, Record[1], Record[2], getMDOrNull(Record[3]), +                         getMDOrNull(Record[4]))), +        NextMetadataNo); +    NextMetadataNo++; +    break; +  } +  case bitc::METADATA_TEMPLATE_TYPE: { +    if (Record.size() != 3) +      return error("Invalid record"); + +    IsDistinct = Record[0]; +    MetadataList.assignValue(GET_OR_DISTINCT(DITemplateTypeParameter, +                                             (Context, getMDString(Record[1]), +                                              getDITypeRefOrNull(Record[2]))), +                             NextMetadataNo); +    NextMetadataNo++; +    break; +  } +  case bitc::METADATA_TEMPLATE_VALUE: { +    if (Record.size() != 5) +      return error("Invalid record"); + +    IsDistinct = Record[0]; +    MetadataList.assignValue( +        GET_OR_DISTINCT(DITemplateValueParameter, +                        (Context, Record[1], getMDString(Record[2]), +                         getDITypeRefOrNull(Record[3]), +                         getMDOrNull(Record[4]))), +        NextMetadataNo); +    NextMetadataNo++; +    break; +  } +  case bitc::METADATA_GLOBAL_VAR: { +    if (Record.size() < 11 || Record.size() > 12) +      return error("Invalid record"); + +    IsDistinct = Record[0] & 1; +    unsigned Version = Record[0] >> 1; + +    if (Version == 1) { +      MetadataList.assignValue( +          GET_OR_DISTINCT(DIGlobalVariable, +                          (Context, getMDOrNull(Record[1]), +                           getMDString(Record[2]), getMDString(Record[3]), +                           getMDOrNull(Record[4]), Record[5], +                           getDITypeRefOrNull(Record[6]), Record[7], Record[8], +                           getMDOrNull(Record[10]), Record[11])), +          NextMetadataNo); +      NextMetadataNo++; +    } else if (Version == 0) { +      // Upgrade old metadata, which stored a global variable reference or a +      // ConstantInt here. +      NeedUpgradeToDIGlobalVariableExpression = true; +      Metadata *Expr = getMDOrNull(Record[9]); +      uint32_t AlignInBits = 0; +      if (Record.size() > 11) { +        if (Record[11] > (uint64_t)std::numeric_limits<uint32_t>::max()) +          return error("Alignment value is too large"); +        AlignInBits = Record[11]; +      } +      GlobalVariable *Attach = nullptr; +      if (auto *CMD = dyn_cast_or_null<ConstantAsMetadata>(Expr)) { +        if (auto *GV = dyn_cast<GlobalVariable>(CMD->getValue())) { +          Attach = GV; +          Expr = nullptr; +        } else if (auto *CI = dyn_cast<ConstantInt>(CMD->getValue())) { +          Expr = DIExpression::get(Context, +                                   {dwarf::DW_OP_constu, CI->getZExtValue(), +                                    dwarf::DW_OP_stack_value}); +        } else { +          Expr = nullptr; +        } +      } +      DIGlobalVariable *DGV = GET_OR_DISTINCT( +          DIGlobalVariable, +          (Context, getMDOrNull(Record[1]), getMDString(Record[2]), +           getMDString(Record[3]), getMDOrNull(Record[4]), Record[5], +           getDITypeRefOrNull(Record[6]), Record[7], Record[8], +           getMDOrNull(Record[10]), AlignInBits)); + +      DIGlobalVariableExpression *DGVE = nullptr; +      if (Attach || Expr) +        DGVE = DIGlobalVariableExpression::getDistinct( +            Context, DGV, Expr ? Expr : DIExpression::get(Context, {})); +      if (Attach) +        Attach->addDebugInfo(DGVE); + +      auto *MDNode = Expr ? cast<Metadata>(DGVE) : cast<Metadata>(DGV); +      MetadataList.assignValue(MDNode, NextMetadataNo); +      NextMetadataNo++; +    } else +      return error("Invalid record"); + +    break; +  } +  case bitc::METADATA_LOCAL_VAR: { +    // 10th field is for the obseleted 'inlinedAt:' field. +    if (Record.size() < 8 || Record.size() > 10) +      return error("Invalid record"); + +    IsDistinct = Record[0] & 1; +    bool HasAlignment = Record[0] & 2; +    // 2nd field used to be an artificial tag, either DW_TAG_auto_variable or +    // DW_TAG_arg_variable, if we have alignment flag encoded it means, that +    // this is newer version of record which doesn't have artificial tag. +    bool HasTag = !HasAlignment && Record.size() > 8; +    DINode::DIFlags Flags = static_cast<DINode::DIFlags>(Record[7 + HasTag]); +    uint32_t AlignInBits = 0; +    if (HasAlignment) { +      if (Record[8 + HasTag] > (uint64_t)std::numeric_limits<uint32_t>::max()) +        return error("Alignment value is too large"); +      AlignInBits = Record[8 + HasTag]; +    } +    MetadataList.assignValue( +        GET_OR_DISTINCT(DILocalVariable, +                        (Context, getMDOrNull(Record[1 + HasTag]), +                         getMDString(Record[2 + HasTag]), +                         getMDOrNull(Record[3 + HasTag]), Record[4 + HasTag], +                         getDITypeRefOrNull(Record[5 + HasTag]), +                         Record[6 + HasTag], Flags, AlignInBits)), +        NextMetadataNo); +    NextMetadataNo++; +    break; +  } +  case bitc::METADATA_LABEL: { +    if (Record.size() != 5) +      return error("Invalid record"); + +    IsDistinct = Record[0] & 1; +    MetadataList.assignValue( +        GET_OR_DISTINCT(DILabel, +                        (Context, getMDOrNull(Record[1]), +                         getMDString(Record[2]), +                         getMDOrNull(Record[3]), Record[4])), +        NextMetadataNo); +    NextMetadataNo++; +    break; +  } +  case bitc::METADATA_EXPRESSION: { +    if (Record.size() < 1) +      return error("Invalid record"); + +    IsDistinct = Record[0] & 1; +    uint64_t Version = Record[0] >> 1; +    auto Elts = MutableArrayRef<uint64_t>(Record).slice(1); + +    SmallVector<uint64_t, 6> Buffer; +    if (Error Err = upgradeDIExpression(Version, Elts, Buffer)) +      return Err; + +    MetadataList.assignValue( +        GET_OR_DISTINCT(DIExpression, (Context, Elts)), NextMetadataNo); +    NextMetadataNo++; +    break; +  } +  case bitc::METADATA_GLOBAL_VAR_EXPR: { +    if (Record.size() != 3) +      return error("Invalid record"); + +    IsDistinct = Record[0]; +    Metadata *Expr = getMDOrNull(Record[2]); +    if (!Expr) +      Expr = DIExpression::get(Context, {}); +    MetadataList.assignValue( +        GET_OR_DISTINCT(DIGlobalVariableExpression, +                        (Context, getMDOrNull(Record[1]), Expr)), +        NextMetadataNo); +    NextMetadataNo++; +    break; +  } +  case bitc::METADATA_OBJC_PROPERTY: { +    if (Record.size() != 8) +      return error("Invalid record"); + +    IsDistinct = Record[0]; +    MetadataList.assignValue( +        GET_OR_DISTINCT(DIObjCProperty, +                        (Context, getMDString(Record[1]), +                         getMDOrNull(Record[2]), Record[3], +                         getMDString(Record[4]), getMDString(Record[5]), +                         Record[6], getDITypeRefOrNull(Record[7]))), +        NextMetadataNo); +    NextMetadataNo++; +    break; +  } +  case bitc::METADATA_IMPORTED_ENTITY: { +    if (Record.size() != 6 && Record.size() != 7) +      return error("Invalid record"); + +    IsDistinct = Record[0]; +    bool HasFile = (Record.size() == 7); +    MetadataList.assignValue( +        GET_OR_DISTINCT(DIImportedEntity, +                        (Context, Record[1], getMDOrNull(Record[2]), +                         getDITypeRefOrNull(Record[3]), +                         HasFile ? getMDOrNull(Record[6]) : nullptr, +                         HasFile ? Record[4] : 0, getMDString(Record[5]))), +        NextMetadataNo); +    NextMetadataNo++; +    break; +  } +  case bitc::METADATA_STRING_OLD: { +    std::string String(Record.begin(), Record.end()); + +    // Test for upgrading !llvm.loop. +    HasSeenOldLoopTags |= mayBeOldLoopAttachmentTag(String); +    ++NumMDStringLoaded; +    Metadata *MD = MDString::get(Context, String); +    MetadataList.assignValue(MD, NextMetadataNo); +    NextMetadataNo++; +    break; +  } +  case bitc::METADATA_STRINGS: { +    auto CreateNextMDString = [&](StringRef Str) { +      ++NumMDStringLoaded; +      MetadataList.assignValue(MDString::get(Context, Str), NextMetadataNo); +      NextMetadataNo++; +    }; +    if (Error Err = parseMetadataStrings(Record, Blob, CreateNextMDString)) +      return Err; +    break; +  } +  case bitc::METADATA_GLOBAL_DECL_ATTACHMENT: { +    if (Record.size() % 2 == 0) +      return error("Invalid record"); +    unsigned ValueID = Record[0]; +    if (ValueID >= ValueList.size()) +      return error("Invalid record"); +    if (auto *GO = dyn_cast<GlobalObject>(ValueList[ValueID])) +      if (Error Err = parseGlobalObjectAttachment( +              *GO, ArrayRef<uint64_t>(Record).slice(1))) +        return Err; +    break; +  } +  case bitc::METADATA_KIND: { +    // Support older bitcode files that had METADATA_KIND records in a +    // block with METADATA_BLOCK_ID. +    if (Error Err = parseMetadataKindRecord(Record)) +      return Err; +    break; +  } +  } +  return Error::success(); +#undef GET_OR_DISTINCT +} + +Error MetadataLoader::MetadataLoaderImpl::parseMetadataStrings( +    ArrayRef<uint64_t> Record, StringRef Blob, +    function_ref<void(StringRef)> CallBack) { +  // All the MDStrings in the block are emitted together in a single +  // record.  The strings are concatenated and stored in a blob along with +  // their sizes. +  if (Record.size() != 2) +    return error("Invalid record: metadata strings layout"); + +  unsigned NumStrings = Record[0]; +  unsigned StringsOffset = Record[1]; +  if (!NumStrings) +    return error("Invalid record: metadata strings with no strings"); +  if (StringsOffset > Blob.size()) +    return error("Invalid record: metadata strings corrupt offset"); + +  StringRef Lengths = Blob.slice(0, StringsOffset); +  SimpleBitstreamCursor R(Lengths); + +  StringRef Strings = Blob.drop_front(StringsOffset); +  do { +    if (R.AtEndOfStream()) +      return error("Invalid record: metadata strings bad length"); + +    unsigned Size = R.ReadVBR(6); +    if (Strings.size() < Size) +      return error("Invalid record: metadata strings truncated chars"); + +    CallBack(Strings.slice(0, Size)); +    Strings = Strings.drop_front(Size); +  } while (--NumStrings); + +  return Error::success(); +} + +Error MetadataLoader::MetadataLoaderImpl::parseGlobalObjectAttachment( +    GlobalObject &GO, ArrayRef<uint64_t> Record) { +  assert(Record.size() % 2 == 0); +  for (unsigned I = 0, E = Record.size(); I != E; I += 2) { +    auto K = MDKindMap.find(Record[I]); +    if (K == MDKindMap.end()) +      return error("Invalid ID"); +    MDNode *MD = MetadataList.getMDNodeFwdRefOrNull(Record[I + 1]); +    if (!MD) +      return error("Invalid metadata attachment"); +    GO.addMetadata(K->second, *MD); +  } +  return Error::success(); +} + +/// Parse metadata attachments. +Error MetadataLoader::MetadataLoaderImpl::parseMetadataAttachment( +    Function &F, const SmallVectorImpl<Instruction *> &InstructionList) { +  if (Stream.EnterSubBlock(bitc::METADATA_ATTACHMENT_ID)) +    return error("Invalid record"); + +  SmallVector<uint64_t, 64> Record; +  PlaceholderQueue Placeholders; + +  while (true) { +    BitstreamEntry Entry = Stream.advanceSkippingSubblocks(); + +    switch (Entry.Kind) { +    case BitstreamEntry::SubBlock: // Handled for us already. +    case BitstreamEntry::Error: +      return error("Malformed block"); +    case BitstreamEntry::EndBlock: +      resolveForwardRefsAndPlaceholders(Placeholders); +      return Error::success(); +    case BitstreamEntry::Record: +      // The interesting case. +      break; +    } + +    // Read a metadata attachment record. +    Record.clear(); +    ++NumMDRecordLoaded; +    switch (Stream.readRecord(Entry.ID, Record)) { +    default: // Default behavior: ignore. +      break; +    case bitc::METADATA_ATTACHMENT: { +      unsigned RecordLength = Record.size(); +      if (Record.empty()) +        return error("Invalid record"); +      if (RecordLength % 2 == 0) { +        // A function attachment. +        if (Error Err = parseGlobalObjectAttachment(F, Record)) +          return Err; +        continue; +      } + +      // An instruction attachment. +      Instruction *Inst = InstructionList[Record[0]]; +      for (unsigned i = 1; i != RecordLength; i = i + 2) { +        unsigned Kind = Record[i]; +        DenseMap<unsigned, unsigned>::iterator I = MDKindMap.find(Kind); +        if (I == MDKindMap.end()) +          return error("Invalid ID"); +        if (I->second == LLVMContext::MD_tbaa && StripTBAA) +          continue; + +        auto Idx = Record[i + 1]; +        if (Idx < (MDStringRef.size() + GlobalMetadataBitPosIndex.size()) && +            !MetadataList.lookup(Idx)) { +          // Load the attachment if it is in the lazy-loadable range and hasn't +          // been loaded yet. +          lazyLoadOneMetadata(Idx, Placeholders); +          resolveForwardRefsAndPlaceholders(Placeholders); +        } + +        Metadata *Node = MetadataList.getMetadataFwdRef(Idx); +        if (isa<LocalAsMetadata>(Node)) +          // Drop the attachment.  This used to be legal, but there's no +          // upgrade path. +          break; +        MDNode *MD = dyn_cast_or_null<MDNode>(Node); +        if (!MD) +          return error("Invalid metadata attachment"); + +        if (HasSeenOldLoopTags && I->second == LLVMContext::MD_loop) +          MD = upgradeInstructionLoopAttachment(*MD); + +        if (I->second == LLVMContext::MD_tbaa) { +          assert(!MD->isTemporary() && "should load MDs before attachments"); +          MD = UpgradeTBAANode(*MD); +        } +        Inst->setMetadata(I->second, MD); +      } +      break; +    } +    } +  } +} + +/// Parse a single METADATA_KIND record, inserting result in MDKindMap. +Error MetadataLoader::MetadataLoaderImpl::parseMetadataKindRecord( +    SmallVectorImpl<uint64_t> &Record) { +  if (Record.size() < 2) +    return error("Invalid record"); + +  unsigned Kind = Record[0]; +  SmallString<8> Name(Record.begin() + 1, Record.end()); + +  unsigned NewKind = TheModule.getMDKindID(Name.str()); +  if (!MDKindMap.insert(std::make_pair(Kind, NewKind)).second) +    return error("Conflicting METADATA_KIND records"); +  return Error::success(); +} + +/// Parse the metadata kinds out of the METADATA_KIND_BLOCK. +Error MetadataLoader::MetadataLoaderImpl::parseMetadataKinds() { +  if (Stream.EnterSubBlock(bitc::METADATA_KIND_BLOCK_ID)) +    return error("Invalid record"); + +  SmallVector<uint64_t, 64> Record; + +  // Read all the records. +  while (true) { +    BitstreamEntry Entry = Stream.advanceSkippingSubblocks(); + +    switch (Entry.Kind) { +    case BitstreamEntry::SubBlock: // Handled for us already. +    case BitstreamEntry::Error: +      return error("Malformed block"); +    case BitstreamEntry::EndBlock: +      return Error::success(); +    case BitstreamEntry::Record: +      // The interesting case. +      break; +    } + +    // Read a record. +    Record.clear(); +    ++NumMDRecordLoaded; +    unsigned Code = Stream.readRecord(Entry.ID, Record); +    switch (Code) { +    default: // Default behavior: ignore. +      break; +    case bitc::METADATA_KIND: { +      if (Error Err = parseMetadataKindRecord(Record)) +        return Err; +      break; +    } +    } +  } +} + +MetadataLoader &MetadataLoader::operator=(MetadataLoader &&RHS) { +  Pimpl = std::move(RHS.Pimpl); +  return *this; +} +MetadataLoader::MetadataLoader(MetadataLoader &&RHS) +    : Pimpl(std::move(RHS.Pimpl)) {} + +MetadataLoader::~MetadataLoader() = default; +MetadataLoader::MetadataLoader(BitstreamCursor &Stream, Module &TheModule, +                               BitcodeReaderValueList &ValueList, +                               bool IsImporting, +                               std::function<Type *(unsigned)> getTypeByID) +    : Pimpl(llvm::make_unique<MetadataLoaderImpl>( +          Stream, TheModule, ValueList, std::move(getTypeByID), IsImporting)) {} + +Error MetadataLoader::parseMetadata(bool ModuleLevel) { +  return Pimpl->parseMetadata(ModuleLevel); +} + +bool MetadataLoader::hasFwdRefs() const { return Pimpl->hasFwdRefs(); } + +/// Return the given metadata, creating a replaceable forward reference if +/// necessary. +Metadata *MetadataLoader::getMetadataFwdRefOrLoad(unsigned Idx) { +  return Pimpl->getMetadataFwdRefOrLoad(Idx); +} + +MDNode *MetadataLoader::getMDNodeFwdRefOrNull(unsigned Idx) { +  return Pimpl->getMDNodeFwdRefOrNull(Idx); +} + +DISubprogram *MetadataLoader::lookupSubprogramForFunction(Function *F) { +  return Pimpl->lookupSubprogramForFunction(F); +} + +Error MetadataLoader::parseMetadataAttachment( +    Function &F, const SmallVectorImpl<Instruction *> &InstructionList) { +  return Pimpl->parseMetadataAttachment(F, InstructionList); +} + +Error MetadataLoader::parseMetadataKinds() { +  return Pimpl->parseMetadataKinds(); +} + +void MetadataLoader::setStripTBAA(bool StripTBAA) { +  return Pimpl->setStripTBAA(StripTBAA); +} + +bool MetadataLoader::isStrippingTBAA() { return Pimpl->isStrippingTBAA(); } + +unsigned MetadataLoader::size() const { return Pimpl->size(); } +void MetadataLoader::shrinkTo(unsigned N) { return Pimpl->shrinkTo(N); } + +void MetadataLoader::upgradeDebugIntrinsics(Function &F) { +  return Pimpl->upgradeDebugIntrinsics(F); +} diff --git a/contrib/llvm/lib/Bitcode/Reader/MetadataLoader.h b/contrib/llvm/lib/Bitcode/Reader/MetadataLoader.h new file mode 100644 index 000000000000..f23dcc06cc94 --- /dev/null +++ b/contrib/llvm/lib/Bitcode/Reader/MetadataLoader.h @@ -0,0 +1,88 @@ +//===-- Bitcode/Reader/MetadataLoader.h - Load Metadatas -------*- C++ -*-====// +// +//                     The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This class handles loading Metadatas. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_LIB_BITCODE_READER_METADATALOADER_H +#define LLVM_LIB_BITCODE_READER_METADATALOADER_H + +#include "llvm/ADT/SmallVector.h" +#include "llvm/Support/Error.h" + +#include <functional> +#include <memory> + +namespace llvm { +class BitcodeReaderValueList; +class BitstreamCursor; +class DISubprogram; +class Error; +class Function; +class Instruction; +class Metadata; +class MDNode; +class Module; +class Type; + +/// Helper class that handles loading Metadatas and keeping them available. +class MetadataLoader { +  class MetadataLoaderImpl; +  std::unique_ptr<MetadataLoaderImpl> Pimpl; +  Error parseMetadata(bool ModuleLevel); + +public: +  ~MetadataLoader(); +  MetadataLoader(BitstreamCursor &Stream, Module &TheModule, +                 BitcodeReaderValueList &ValueList, bool IsImporting, +                 std::function<Type *(unsigned)> getTypeByID); +  MetadataLoader &operator=(MetadataLoader &&); +  MetadataLoader(MetadataLoader &&); + +  // Parse a module metadata block +  Error parseModuleMetadata() { return parseMetadata(true); } + +  // Parse a function metadata block +  Error parseFunctionMetadata() { return parseMetadata(false); } + +  /// Set the mode to strip TBAA metadata on load. +  void setStripTBAA(bool StripTBAA = true); + +  /// Return true if the Loader is stripping TBAA metadata. +  bool isStrippingTBAA(); + +  // Return true there are remaining unresolved forward references. +  bool hasFwdRefs() const; + +  /// Return the given metadata, creating a replaceable forward reference if +  /// necessary. +  Metadata *getMetadataFwdRefOrLoad(unsigned Idx); + +  MDNode *getMDNodeFwdRefOrNull(unsigned Idx); + +  /// Return the DISubprogra metadata for a Function if any, null otherwise. +  DISubprogram *lookupSubprogramForFunction(Function *F); + +  /// Parse a `METADATA_ATTACHMENT` block for a function. +  Error parseMetadataAttachment( +      Function &F, const SmallVectorImpl<Instruction *> &InstructionList); + +  /// Parse a `METADATA_KIND` block for the current module. +  Error parseMetadataKinds(); + +  unsigned size() const; +  void shrinkTo(unsigned N); + +  /// Perform bitcode upgrades on llvm.dbg.* calls. +  void upgradeDebugIntrinsics(Function &F); +}; +} + +#endif // LLVM_LIB_BITCODE_READER_METADATALOADER_H diff --git a/contrib/llvm/lib/Bitcode/Reader/ValueList.cpp b/contrib/llvm/lib/Bitcode/Reader/ValueList.cpp new file mode 100644 index 000000000000..1ab22b5cc3d1 --- /dev/null +++ b/contrib/llvm/lib/Bitcode/Reader/ValueList.cpp @@ -0,0 +1,216 @@ +//===- ValueList.cpp - Internal BitcodeReader implementation --------------===// +// +//                     The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +#include "ValueList.h" +#include "llvm/ADT/SmallVector.h" +#include "llvm/IR/Argument.h" +#include "llvm/IR/Constant.h" +#include "llvm/IR/Constants.h" +#include "llvm/IR/GlobalValue.h" +#include "llvm/IR/Instruction.h" +#include "llvm/IR/Type.h" +#include "llvm/IR/User.h" +#include "llvm/IR/Value.h" +#include "llvm/IR/ValueHandle.h" +#include "llvm/Support/Casting.h" +#include "llvm/Support/ErrorHandling.h" +#include <algorithm> +#include <cassert> +#include <cstddef> +#include <limits> +#include <utility> + +using namespace llvm; + +namespace llvm { + +namespace { + +/// A class for maintaining the slot number definition +/// as a placeholder for the actual definition for forward constants defs. +class ConstantPlaceHolder : public ConstantExpr { +public: +  explicit ConstantPlaceHolder(Type *Ty, LLVMContext &Context) +      : ConstantExpr(Ty, Instruction::UserOp1, &Op<0>(), 1) { +    Op<0>() = UndefValue::get(Type::getInt32Ty(Context)); +  } + +  ConstantPlaceHolder &operator=(const ConstantPlaceHolder &) = delete; + +  // allocate space for exactly one operand +  void *operator new(size_t s) { return User::operator new(s, 1); } + +  /// Methods to support type inquiry through isa, cast, and dyn_cast. +  static bool classof(const Value *V) { +    return isa<ConstantExpr>(V) && +           cast<ConstantExpr>(V)->getOpcode() == Instruction::UserOp1; +  } + +  /// Provide fast operand accessors +  DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); +}; + +} // end anonymous namespace + +// FIXME: can we inherit this from ConstantExpr? +template <> +struct OperandTraits<ConstantPlaceHolder> +    : public FixedNumOperandTraits<ConstantPlaceHolder, 1> {}; +DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ConstantPlaceHolder, Value) + +} // end namespace llvm + +void BitcodeReaderValueList::assignValue(Value *V, unsigned Idx) { +  if (Idx == size()) { +    push_back(V); +    return; +  } + +  if (Idx >= size()) +    resize(Idx + 1); + +  WeakTrackingVH &OldV = ValuePtrs[Idx]; +  if (!OldV) { +    OldV = V; +    return; +  } + +  // Handle constants and non-constants (e.g. instrs) differently for +  // efficiency. +  if (Constant *PHC = dyn_cast<Constant>(&*OldV)) { +    ResolveConstants.push_back(std::make_pair(PHC, Idx)); +    OldV = V; +  } else { +    // If there was a forward reference to this value, replace it. +    Value *PrevVal = OldV; +    OldV->replaceAllUsesWith(V); +    PrevVal->deleteValue(); +  } +} + +Constant *BitcodeReaderValueList::getConstantFwdRef(unsigned Idx, Type *Ty) { +  if (Idx >= size()) +    resize(Idx + 1); + +  if (Value *V = ValuePtrs[Idx]) { +    if (Ty != V->getType()) +      report_fatal_error("Type mismatch in constant table!"); +    return cast<Constant>(V); +  } + +  // Create and return a placeholder, which will later be RAUW'd. +  Constant *C = new ConstantPlaceHolder(Ty, Context); +  ValuePtrs[Idx] = C; +  return C; +} + +Value *BitcodeReaderValueList::getValueFwdRef(unsigned Idx, Type *Ty) { +  // Bail out for a clearly invalid value. This would make us call resize(0) +  if (Idx == std::numeric_limits<unsigned>::max()) +    return nullptr; + +  if (Idx >= size()) +    resize(Idx + 1); + +  if (Value *V = ValuePtrs[Idx]) { +    // If the types don't match, it's invalid. +    if (Ty && Ty != V->getType()) +      return nullptr; +    return V; +  } + +  // No type specified, must be invalid reference. +  if (!Ty) +    return nullptr; + +  // Create and return a placeholder, which will later be RAUW'd. +  Value *V = new Argument(Ty); +  ValuePtrs[Idx] = V; +  return V; +} + +/// Once all constants are read, this method bulk resolves any forward +/// references.  The idea behind this is that we sometimes get constants (such +/// as large arrays) which reference *many* forward ref constants.  Replacing +/// each of these causes a lot of thrashing when building/reuniquing the +/// constant.  Instead of doing this, we look at all the uses and rewrite all +/// the place holders at once for any constant that uses a placeholder. +void BitcodeReaderValueList::resolveConstantForwardRefs() { +  // Sort the values by-pointer so that they are efficient to look up with a +  // binary search. +  llvm::sort(ResolveConstants.begin(), ResolveConstants.end()); + +  SmallVector<Constant *, 64> NewOps; + +  while (!ResolveConstants.empty()) { +    Value *RealVal = operator[](ResolveConstants.back().second); +    Constant *Placeholder = ResolveConstants.back().first; +    ResolveConstants.pop_back(); + +    // Loop over all users of the placeholder, updating them to reference the +    // new value.  If they reference more than one placeholder, update them all +    // at once. +    while (!Placeholder->use_empty()) { +      auto UI = Placeholder->user_begin(); +      User *U = *UI; + +      // If the using object isn't uniqued, just update the operands.  This +      // handles instructions and initializers for global variables. +      if (!isa<Constant>(U) || isa<GlobalValue>(U)) { +        UI.getUse().set(RealVal); +        continue; +      } + +      // Otherwise, we have a constant that uses the placeholder.  Replace that +      // constant with a new constant that has *all* placeholder uses updated. +      Constant *UserC = cast<Constant>(U); +      for (User::op_iterator I = UserC->op_begin(), E = UserC->op_end(); I != E; +           ++I) { +        Value *NewOp; +        if (!isa<ConstantPlaceHolder>(*I)) { +          // Not a placeholder reference. +          NewOp = *I; +        } else if (*I == Placeholder) { +          // Common case is that it just references this one placeholder. +          NewOp = RealVal; +        } else { +          // Otherwise, look up the placeholder in ResolveConstants. +          ResolveConstantsTy::iterator It = std::lower_bound( +              ResolveConstants.begin(), ResolveConstants.end(), +              std::pair<Constant *, unsigned>(cast<Constant>(*I), 0)); +          assert(It != ResolveConstants.end() && It->first == *I); +          NewOp = operator[](It->second); +        } + +        NewOps.push_back(cast<Constant>(NewOp)); +      } + +      // Make the new constant. +      Constant *NewC; +      if (ConstantArray *UserCA = dyn_cast<ConstantArray>(UserC)) { +        NewC = ConstantArray::get(UserCA->getType(), NewOps); +      } else if (ConstantStruct *UserCS = dyn_cast<ConstantStruct>(UserC)) { +        NewC = ConstantStruct::get(UserCS->getType(), NewOps); +      } else if (isa<ConstantVector>(UserC)) { +        NewC = ConstantVector::get(NewOps); +      } else { +        assert(isa<ConstantExpr>(UserC) && "Must be a ConstantExpr."); +        NewC = cast<ConstantExpr>(UserC)->getWithOperands(NewOps); +      } + +      UserC->replaceAllUsesWith(NewC); +      UserC->destroyConstant(); +      NewOps.clear(); +    } + +    // Update all ValueHandles, they should be the only users at this point. +    Placeholder->replaceAllUsesWith(RealVal); +    Placeholder->deleteValue(); +  } +} diff --git a/contrib/llvm/lib/Bitcode/Reader/ValueList.h b/contrib/llvm/lib/Bitcode/Reader/ValueList.h new file mode 100644 index 000000000000..5ad7899347ad --- /dev/null +++ b/contrib/llvm/lib/Bitcode/Reader/ValueList.h @@ -0,0 +1,86 @@ +//===-- Bitcode/Reader/ValueList.h - Number values --------------*- C++ -*-===// +// +//                     The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This class gives values and types Unique ID's. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_LIB_BITCODE_READER_VALUELIST_H +#define LLVM_LIB_BITCODE_READER_VALUELIST_H + +#include "llvm/IR/ValueHandle.h" +#include <cassert> +#include <utility> +#include <vector> + +namespace llvm { + +class Constant; +class LLVMContext; +class Type; +class Value; + +class BitcodeReaderValueList { +  std::vector<WeakTrackingVH> ValuePtrs; + +  /// As we resolve forward-referenced constants, we add information about them +  /// to this vector.  This allows us to resolve them in bulk instead of +  /// resolving each reference at a time.  See the code in +  /// ResolveConstantForwardRefs for more information about this. +  /// +  /// The key of this vector is the placeholder constant, the value is the slot +  /// number that holds the resolved value. +  using ResolveConstantsTy = std::vector<std::pair<Constant *, unsigned>>; +  ResolveConstantsTy ResolveConstants; +  LLVMContext &Context; + +public: +  BitcodeReaderValueList(LLVMContext &C) : Context(C) {} + +  ~BitcodeReaderValueList() { +    assert(ResolveConstants.empty() && "Constants not resolved?"); +  } + +  // vector compatibility methods +  unsigned size() const { return ValuePtrs.size(); } +  void resize(unsigned N) { ValuePtrs.resize(N); } +  void push_back(Value *V) { ValuePtrs.emplace_back(V); } + +  void clear() { +    assert(ResolveConstants.empty() && "Constants not resolved?"); +    ValuePtrs.clear(); +  } + +  Value *operator[](unsigned i) const { +    assert(i < ValuePtrs.size()); +    return ValuePtrs[i]; +  } + +  Value *back() const { return ValuePtrs.back(); } +  void pop_back() { ValuePtrs.pop_back(); } +  bool empty() const { return ValuePtrs.empty(); } + +  void shrinkTo(unsigned N) { +    assert(N <= size() && "Invalid shrinkTo request!"); +    ValuePtrs.resize(N); +  } + +  Constant *getConstantFwdRef(unsigned Idx, Type *Ty); +  Value *getValueFwdRef(unsigned Idx, Type *Ty); + +  void assignValue(Value *V, unsigned Idx); + +  /// Once all constants are read, this method bulk resolves any forward +  /// references. +  void resolveConstantForwardRefs(); +}; + +} // end namespace llvm + +#endif // LLVM_LIB_BITCODE_READER_VALUELIST_H diff --git a/contrib/llvm/lib/Bitcode/Writer/BitWriter.cpp b/contrib/llvm/lib/Bitcode/Writer/BitWriter.cpp new file mode 100644 index 000000000000..763cd12aa2d7 --- /dev/null +++ b/contrib/llvm/lib/Bitcode/Writer/BitWriter.cpp @@ -0,0 +1,50 @@ +//===-- BitWriter.cpp -----------------------------------------------------===// +// +//                     The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +#include "llvm-c/BitWriter.h" +#include "llvm/Bitcode/BitcodeWriter.h" +#include "llvm/IR/Module.h" +#include "llvm/Support/FileSystem.h" +#include "llvm/Support/MemoryBuffer.h" +#include "llvm/Support/raw_ostream.h" +using namespace llvm; + + +/*===-- Operations on modules ---------------------------------------------===*/ + +int LLVMWriteBitcodeToFile(LLVMModuleRef M, const char *Path) { +  std::error_code EC; +  raw_fd_ostream OS(Path, EC, sys::fs::F_None); + +  if (EC) +    return -1; + +  WriteBitcodeToFile(*unwrap(M), OS); +  return 0; +} + +int LLVMWriteBitcodeToFD(LLVMModuleRef M, int FD, int ShouldClose, +                         int Unbuffered) { +  raw_fd_ostream OS(FD, ShouldClose, Unbuffered); + +  WriteBitcodeToFile(*unwrap(M), OS); +  return 0; +} + +int LLVMWriteBitcodeToFileHandle(LLVMModuleRef M, int FileHandle) { +  return LLVMWriteBitcodeToFD(M, FileHandle, true, false); +} + +LLVMMemoryBufferRef LLVMWriteBitcodeToMemoryBuffer(LLVMModuleRef M) { +  std::string Data; +  raw_string_ostream OS(Data); + +  WriteBitcodeToFile(*unwrap(M), OS); +  return wrap(MemoryBuffer::getMemBufferCopy(OS.str()).release()); +} diff --git a/contrib/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp b/contrib/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp new file mode 100644 index 000000000000..87b47dc354b5 --- /dev/null +++ b/contrib/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp @@ -0,0 +1,4384 @@ +//===- Bitcode/Writer/BitcodeWriter.cpp - Bitcode Writer ------------------===// +// +//                     The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// Bitcode writer implementation. +// +//===----------------------------------------------------------------------===// + +#include "llvm/Bitcode/BitcodeWriter.h" +#include "ValueEnumerator.h" +#include "llvm/ADT/APFloat.h" +#include "llvm/ADT/APInt.h" +#include "llvm/ADT/ArrayRef.h" +#include "llvm/ADT/DenseMap.h" +#include "llvm/ADT/None.h" +#include "llvm/ADT/Optional.h" +#include "llvm/ADT/STLExtras.h" +#include "llvm/ADT/SmallString.h" +#include "llvm/ADT/SmallVector.h" +#include "llvm/ADT/StringMap.h" +#include "llvm/ADT/StringRef.h" +#include "llvm/ADT/Triple.h" +#include "llvm/Bitcode/BitCodes.h" +#include "llvm/Bitcode/BitstreamWriter.h" +#include "llvm/Bitcode/LLVMBitCodes.h" +#include "llvm/Config/llvm-config.h" +#include "llvm/IR/Attributes.h" +#include "llvm/IR/BasicBlock.h" +#include "llvm/IR/CallSite.h" +#include "llvm/IR/Comdat.h" +#include "llvm/IR/Constant.h" +#include "llvm/IR/Constants.h" +#include "llvm/IR/DebugInfoMetadata.h" +#include "llvm/IR/DebugLoc.h" +#include "llvm/IR/DerivedTypes.h" +#include "llvm/IR/Function.h" +#include "llvm/IR/GlobalAlias.h" +#include "llvm/IR/GlobalIFunc.h" +#include "llvm/IR/GlobalObject.h" +#include "llvm/IR/GlobalValue.h" +#include "llvm/IR/GlobalVariable.h" +#include "llvm/IR/InlineAsm.h" +#include "llvm/IR/InstrTypes.h" +#include "llvm/IR/Instruction.h" +#include "llvm/IR/Instructions.h" +#include "llvm/IR/LLVMContext.h" +#include "llvm/IR/Metadata.h" +#include "llvm/IR/Module.h" +#include "llvm/IR/ModuleSummaryIndex.h" +#include "llvm/IR/Operator.h" +#include "llvm/IR/Type.h" +#include "llvm/IR/UseListOrder.h" +#include "llvm/IR/Value.h" +#include "llvm/IR/ValueSymbolTable.h" +#include "llvm/MC/StringTableBuilder.h" +#include "llvm/Object/IRSymtab.h" +#include "llvm/Support/AtomicOrdering.h" +#include "llvm/Support/Casting.h" +#include "llvm/Support/CommandLine.h" +#include "llvm/Support/Endian.h" +#include "llvm/Support/Error.h" +#include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/MathExtras.h" +#include "llvm/Support/SHA1.h" +#include "llvm/Support/TargetRegistry.h" +#include "llvm/Support/raw_ostream.h" +#include <algorithm> +#include <cassert> +#include <cstddef> +#include <cstdint> +#include <iterator> +#include <map> +#include <memory> +#include <string> +#include <utility> +#include <vector> + +using namespace llvm; + +static cl::opt<unsigned> +    IndexThreshold("bitcode-mdindex-threshold", cl::Hidden, cl::init(25), +                   cl::desc("Number of metadatas above which we emit an index " +                            "to enable lazy-loading")); + +cl::opt<bool> WriteRelBFToSummary( +    "write-relbf-to-summary", cl::Hidden, cl::init(false), +    cl::desc("Write relative block frequency to function summary ")); + +extern FunctionSummary::ForceSummaryHotnessType ForceSummaryEdgesCold; + +namespace { + +/// These are manifest constants used by the bitcode writer. They do not need to +/// be kept in sync with the reader, but need to be consistent within this file. +enum { +  // VALUE_SYMTAB_BLOCK abbrev id's. +  VST_ENTRY_8_ABBREV = bitc::FIRST_APPLICATION_ABBREV, +  VST_ENTRY_7_ABBREV, +  VST_ENTRY_6_ABBREV, +  VST_BBENTRY_6_ABBREV, + +  // CONSTANTS_BLOCK abbrev id's. +  CONSTANTS_SETTYPE_ABBREV = bitc::FIRST_APPLICATION_ABBREV, +  CONSTANTS_INTEGER_ABBREV, +  CONSTANTS_CE_CAST_Abbrev, +  CONSTANTS_NULL_Abbrev, + +  // FUNCTION_BLOCK abbrev id's. +  FUNCTION_INST_LOAD_ABBREV = bitc::FIRST_APPLICATION_ABBREV, +  FUNCTION_INST_BINOP_ABBREV, +  FUNCTION_INST_BINOP_FLAGS_ABBREV, +  FUNCTION_INST_CAST_ABBREV, +  FUNCTION_INST_RET_VOID_ABBREV, +  FUNCTION_INST_RET_VAL_ABBREV, +  FUNCTION_INST_UNREACHABLE_ABBREV, +  FUNCTION_INST_GEP_ABBREV, +}; + +/// Abstract class to manage the bitcode writing, subclassed for each bitcode +/// file type. +class BitcodeWriterBase { +protected: +  /// The stream created and owned by the client. +  BitstreamWriter &Stream; + +  StringTableBuilder &StrtabBuilder; + +public: +  /// Constructs a BitcodeWriterBase object that writes to the provided +  /// \p Stream. +  BitcodeWriterBase(BitstreamWriter &Stream, StringTableBuilder &StrtabBuilder) +      : Stream(Stream), StrtabBuilder(StrtabBuilder) {} + +protected: +  void writeBitcodeHeader(); +  void writeModuleVersion(); +}; + +void BitcodeWriterBase::writeModuleVersion() { +  // VERSION: [version#] +  Stream.EmitRecord(bitc::MODULE_CODE_VERSION, ArrayRef<uint64_t>{2}); +} + +/// Base class to manage the module bitcode writing, currently subclassed for +/// ModuleBitcodeWriter and ThinLinkBitcodeWriter. +class ModuleBitcodeWriterBase : public BitcodeWriterBase { +protected: +  /// The Module to write to bitcode. +  const Module &M; + +  /// Enumerates ids for all values in the module. +  ValueEnumerator VE; + +  /// Optional per-module index to write for ThinLTO. +  const ModuleSummaryIndex *Index; + +  /// Map that holds the correspondence between GUIDs in the summary index, +  /// that came from indirect call profiles, and a value id generated by this +  /// class to use in the VST and summary block records. +  std::map<GlobalValue::GUID, unsigned> GUIDToValueIdMap; + +  /// Tracks the last value id recorded in the GUIDToValueMap. +  unsigned GlobalValueId; + +  /// Saves the offset of the VSTOffset record that must eventually be +  /// backpatched with the offset of the actual VST. +  uint64_t VSTOffsetPlaceholder = 0; + +public: +  /// Constructs a ModuleBitcodeWriterBase object for the given Module, +  /// writing to the provided \p Buffer. +  ModuleBitcodeWriterBase(const Module &M, StringTableBuilder &StrtabBuilder, +                          BitstreamWriter &Stream, +                          bool ShouldPreserveUseListOrder, +                          const ModuleSummaryIndex *Index) +      : BitcodeWriterBase(Stream, StrtabBuilder), M(M), +        VE(M, ShouldPreserveUseListOrder), Index(Index) { +    // Assign ValueIds to any callee values in the index that came from +    // indirect call profiles and were recorded as a GUID not a Value* +    // (which would have been assigned an ID by the ValueEnumerator). +    // The starting ValueId is just after the number of values in the +    // ValueEnumerator, so that they can be emitted in the VST. +    GlobalValueId = VE.getValues().size(); +    if (!Index) +      return; +    for (const auto &GUIDSummaryLists : *Index) +      // Examine all summaries for this GUID. +      for (auto &Summary : GUIDSummaryLists.second.SummaryList) +        if (auto FS = dyn_cast<FunctionSummary>(Summary.get())) +          // For each call in the function summary, see if the call +          // is to a GUID (which means it is for an indirect call, +          // otherwise we would have a Value for it). If so, synthesize +          // a value id. +          for (auto &CallEdge : FS->calls()) +            if (!CallEdge.first.haveGVs() || !CallEdge.first.getValue()) +              assignValueId(CallEdge.first.getGUID()); +  } + +protected: +  void writePerModuleGlobalValueSummary(); + +private: +  void writePerModuleFunctionSummaryRecord(SmallVector<uint64_t, 64> &NameVals, +                                           GlobalValueSummary *Summary, +                                           unsigned ValueID, +                                           unsigned FSCallsAbbrev, +                                           unsigned FSCallsProfileAbbrev, +                                           const Function &F); +  void writeModuleLevelReferences(const GlobalVariable &V, +                                  SmallVector<uint64_t, 64> &NameVals, +                                  unsigned FSModRefsAbbrev); + +  void assignValueId(GlobalValue::GUID ValGUID) { +    GUIDToValueIdMap[ValGUID] = ++GlobalValueId; +  } + +  unsigned getValueId(GlobalValue::GUID ValGUID) { +    const auto &VMI = GUIDToValueIdMap.find(ValGUID); +    // Expect that any GUID value had a value Id assigned by an +    // earlier call to assignValueId. +    assert(VMI != GUIDToValueIdMap.end() && +           "GUID does not have assigned value Id"); +    return VMI->second; +  } + +  // Helper to get the valueId for the type of value recorded in VI. +  unsigned getValueId(ValueInfo VI) { +    if (!VI.haveGVs() || !VI.getValue()) +      return getValueId(VI.getGUID()); +    return VE.getValueID(VI.getValue()); +  } + +  std::map<GlobalValue::GUID, unsigned> &valueIds() { return GUIDToValueIdMap; } +}; + +/// Class to manage the bitcode writing for a module. +class ModuleBitcodeWriter : public ModuleBitcodeWriterBase { +  /// Pointer to the buffer allocated by caller for bitcode writing. +  const SmallVectorImpl<char> &Buffer; + +  /// True if a module hash record should be written. +  bool GenerateHash; + +  /// If non-null, when GenerateHash is true, the resulting hash is written +  /// into ModHash. +  ModuleHash *ModHash; + +  SHA1 Hasher; + +  /// The start bit of the identification block. +  uint64_t BitcodeStartBit; + +public: +  /// Constructs a ModuleBitcodeWriter object for the given Module, +  /// writing to the provided \p Buffer. +  ModuleBitcodeWriter(const Module &M, SmallVectorImpl<char> &Buffer, +                      StringTableBuilder &StrtabBuilder, +                      BitstreamWriter &Stream, bool ShouldPreserveUseListOrder, +                      const ModuleSummaryIndex *Index, bool GenerateHash, +                      ModuleHash *ModHash = nullptr) +      : ModuleBitcodeWriterBase(M, StrtabBuilder, Stream, +                                ShouldPreserveUseListOrder, Index), +        Buffer(Buffer), GenerateHash(GenerateHash), ModHash(ModHash), +        BitcodeStartBit(Stream.GetCurrentBitNo()) {} + +  /// Emit the current module to the bitstream. +  void write(); + +private: +  uint64_t bitcodeStartBit() { return BitcodeStartBit; } + +  size_t addToStrtab(StringRef Str); + +  void writeAttributeGroupTable(); +  void writeAttributeTable(); +  void writeTypeTable(); +  void writeComdats(); +  void writeValueSymbolTableForwardDecl(); +  void writeModuleInfo(); +  void writeValueAsMetadata(const ValueAsMetadata *MD, +                            SmallVectorImpl<uint64_t> &Record); +  void writeMDTuple(const MDTuple *N, SmallVectorImpl<uint64_t> &Record, +                    unsigned Abbrev); +  unsigned createDILocationAbbrev(); +  void writeDILocation(const DILocation *N, SmallVectorImpl<uint64_t> &Record, +                       unsigned &Abbrev); +  unsigned createGenericDINodeAbbrev(); +  void writeGenericDINode(const GenericDINode *N, +                          SmallVectorImpl<uint64_t> &Record, unsigned &Abbrev); +  void writeDISubrange(const DISubrange *N, SmallVectorImpl<uint64_t> &Record, +                       unsigned Abbrev); +  void writeDIEnumerator(const DIEnumerator *N, +                         SmallVectorImpl<uint64_t> &Record, unsigned Abbrev); +  void writeDIBasicType(const DIBasicType *N, SmallVectorImpl<uint64_t> &Record, +                        unsigned Abbrev); +  void writeDIDerivedType(const DIDerivedType *N, +                          SmallVectorImpl<uint64_t> &Record, unsigned Abbrev); +  void writeDICompositeType(const DICompositeType *N, +                            SmallVectorImpl<uint64_t> &Record, unsigned Abbrev); +  void writeDISubroutineType(const DISubroutineType *N, +                             SmallVectorImpl<uint64_t> &Record, +                             unsigned Abbrev); +  void writeDIFile(const DIFile *N, SmallVectorImpl<uint64_t> &Record, +                   unsigned Abbrev); +  void writeDICompileUnit(const DICompileUnit *N, +                          SmallVectorImpl<uint64_t> &Record, unsigned Abbrev); +  void writeDISubprogram(const DISubprogram *N, +                         SmallVectorImpl<uint64_t> &Record, unsigned Abbrev); +  void writeDILexicalBlock(const DILexicalBlock *N, +                           SmallVectorImpl<uint64_t> &Record, unsigned Abbrev); +  void writeDILexicalBlockFile(const DILexicalBlockFile *N, +                               SmallVectorImpl<uint64_t> &Record, +                               unsigned Abbrev); +  void writeDINamespace(const DINamespace *N, SmallVectorImpl<uint64_t> &Record, +                        unsigned Abbrev); +  void writeDIMacro(const DIMacro *N, SmallVectorImpl<uint64_t> &Record, +                    unsigned Abbrev); +  void writeDIMacroFile(const DIMacroFile *N, SmallVectorImpl<uint64_t> &Record, +                        unsigned Abbrev); +  void writeDIModule(const DIModule *N, SmallVectorImpl<uint64_t> &Record, +                     unsigned Abbrev); +  void writeDITemplateTypeParameter(const DITemplateTypeParameter *N, +                                    SmallVectorImpl<uint64_t> &Record, +                                    unsigned Abbrev); +  void writeDITemplateValueParameter(const DITemplateValueParameter *N, +                                     SmallVectorImpl<uint64_t> &Record, +                                     unsigned Abbrev); +  void writeDIGlobalVariable(const DIGlobalVariable *N, +                             SmallVectorImpl<uint64_t> &Record, +                             unsigned Abbrev); +  void writeDILocalVariable(const DILocalVariable *N, +                            SmallVectorImpl<uint64_t> &Record, unsigned Abbrev); +  void writeDILabel(const DILabel *N, +                    SmallVectorImpl<uint64_t> &Record, unsigned Abbrev); +  void writeDIExpression(const DIExpression *N, +                         SmallVectorImpl<uint64_t> &Record, unsigned Abbrev); +  void writeDIGlobalVariableExpression(const DIGlobalVariableExpression *N, +                                       SmallVectorImpl<uint64_t> &Record, +                                       unsigned Abbrev); +  void writeDIObjCProperty(const DIObjCProperty *N, +                           SmallVectorImpl<uint64_t> &Record, unsigned Abbrev); +  void writeDIImportedEntity(const DIImportedEntity *N, +                             SmallVectorImpl<uint64_t> &Record, +                             unsigned Abbrev); +  unsigned createNamedMetadataAbbrev(); +  void writeNamedMetadata(SmallVectorImpl<uint64_t> &Record); +  unsigned createMetadataStringsAbbrev(); +  void writeMetadataStrings(ArrayRef<const Metadata *> Strings, +                            SmallVectorImpl<uint64_t> &Record); +  void writeMetadataRecords(ArrayRef<const Metadata *> MDs, +                            SmallVectorImpl<uint64_t> &Record, +                            std::vector<unsigned> *MDAbbrevs = nullptr, +                            std::vector<uint64_t> *IndexPos = nullptr); +  void writeModuleMetadata(); +  void writeFunctionMetadata(const Function &F); +  void writeFunctionMetadataAttachment(const Function &F); +  void writeGlobalVariableMetadataAttachment(const GlobalVariable &GV); +  void pushGlobalMetadataAttachment(SmallVectorImpl<uint64_t> &Record, +                                    const GlobalObject &GO); +  void writeModuleMetadataKinds(); +  void writeOperandBundleTags(); +  void writeSyncScopeNames(); +  void writeConstants(unsigned FirstVal, unsigned LastVal, bool isGlobal); +  void writeModuleConstants(); +  bool pushValueAndType(const Value *V, unsigned InstID, +                        SmallVectorImpl<unsigned> &Vals); +  void writeOperandBundles(ImmutableCallSite CS, unsigned InstID); +  void pushValue(const Value *V, unsigned InstID, +                 SmallVectorImpl<unsigned> &Vals); +  void pushValueSigned(const Value *V, unsigned InstID, +                       SmallVectorImpl<uint64_t> &Vals); +  void writeInstruction(const Instruction &I, unsigned InstID, +                        SmallVectorImpl<unsigned> &Vals); +  void writeFunctionLevelValueSymbolTable(const ValueSymbolTable &VST); +  void writeGlobalValueSymbolTable( +      DenseMap<const Function *, uint64_t> &FunctionToBitcodeIndex); +  void writeUseList(UseListOrder &&Order); +  void writeUseListBlock(const Function *F); +  void +  writeFunction(const Function &F, +                DenseMap<const Function *, uint64_t> &FunctionToBitcodeIndex); +  void writeBlockInfo(); +  void writeModuleHash(size_t BlockStartPos); + +  unsigned getEncodedSyncScopeID(SyncScope::ID SSID) { +    return unsigned(SSID); +  } +}; + +/// Class to manage the bitcode writing for a combined index. +class IndexBitcodeWriter : public BitcodeWriterBase { +  /// The combined index to write to bitcode. +  const ModuleSummaryIndex &Index; + +  /// When writing a subset of the index for distributed backends, client +  /// provides a map of modules to the corresponding GUIDs/summaries to write. +  const std::map<std::string, GVSummaryMapTy> *ModuleToSummariesForIndex; + +  /// Map that holds the correspondence between the GUID used in the combined +  /// index and a value id generated by this class to use in references. +  std::map<GlobalValue::GUID, unsigned> GUIDToValueIdMap; + +  /// Tracks the last value id recorded in the GUIDToValueMap. +  unsigned GlobalValueId = 0; + +public: +  /// Constructs a IndexBitcodeWriter object for the given combined index, +  /// writing to the provided \p Buffer. When writing a subset of the index +  /// for a distributed backend, provide a \p ModuleToSummariesForIndex map. +  IndexBitcodeWriter(BitstreamWriter &Stream, StringTableBuilder &StrtabBuilder, +                     const ModuleSummaryIndex &Index, +                     const std::map<std::string, GVSummaryMapTy> +                         *ModuleToSummariesForIndex = nullptr) +      : BitcodeWriterBase(Stream, StrtabBuilder), Index(Index), +        ModuleToSummariesForIndex(ModuleToSummariesForIndex) { +    // Assign unique value ids to all summaries to be written, for use +    // in writing out the call graph edges. Save the mapping from GUID +    // to the new global value id to use when writing those edges, which +    // are currently saved in the index in terms of GUID. +    forEachSummary([&](GVInfo I, bool) { +      GUIDToValueIdMap[I.first] = ++GlobalValueId; +    }); +  } + +  /// The below iterator returns the GUID and associated summary. +  using GVInfo = std::pair<GlobalValue::GUID, GlobalValueSummary *>; + +  /// Calls the callback for each value GUID and summary to be written to +  /// bitcode. This hides the details of whether they are being pulled from the +  /// entire index or just those in a provided ModuleToSummariesForIndex map. +  template<typename Functor> +  void forEachSummary(Functor Callback) { +    if (ModuleToSummariesForIndex) { +      for (auto &M : *ModuleToSummariesForIndex) +        for (auto &Summary : M.second) { +          Callback(Summary, false); +          // Ensure aliasee is handled, e.g. for assigning a valueId, +          // even if we are not importing the aliasee directly (the +          // imported alias will contain a copy of aliasee). +          if (auto *AS = dyn_cast<AliasSummary>(Summary.getSecond())) +            Callback({AS->getAliaseeGUID(), &AS->getAliasee()}, true); +        } +    } else { +      for (auto &Summaries : Index) +        for (auto &Summary : Summaries.second.SummaryList) +          Callback({Summaries.first, Summary.get()}, false); +    } +  } + +  /// Calls the callback for each entry in the modulePaths StringMap that +  /// should be written to the module path string table. This hides the details +  /// of whether they are being pulled from the entire index or just those in a +  /// provided ModuleToSummariesForIndex map. +  template <typename Functor> void forEachModule(Functor Callback) { +    if (ModuleToSummariesForIndex) { +      for (const auto &M : *ModuleToSummariesForIndex) { +        const auto &MPI = Index.modulePaths().find(M.first); +        if (MPI == Index.modulePaths().end()) { +          // This should only happen if the bitcode file was empty, in which +          // case we shouldn't be importing (the ModuleToSummariesForIndex +          // would only include the module we are writing and index for). +          assert(ModuleToSummariesForIndex->size() == 1); +          continue; +        } +        Callback(*MPI); +      } +    } else { +      for (const auto &MPSE : Index.modulePaths()) +        Callback(MPSE); +    } +  } + +  /// Main entry point for writing a combined index to bitcode. +  void write(); + +private: +  void writeModStrings(); +  void writeCombinedGlobalValueSummary(); + +  Optional<unsigned> getValueId(GlobalValue::GUID ValGUID) { +    auto VMI = GUIDToValueIdMap.find(ValGUID); +    if (VMI == GUIDToValueIdMap.end()) +      return None; +    return VMI->second; +  } + +  std::map<GlobalValue::GUID, unsigned> &valueIds() { return GUIDToValueIdMap; } +}; + +} // end anonymous namespace + +static unsigned getEncodedCastOpcode(unsigned Opcode) { +  switch (Opcode) { +  default: llvm_unreachable("Unknown cast instruction!"); +  case Instruction::Trunc   : return bitc::CAST_TRUNC; +  case Instruction::ZExt    : return bitc::CAST_ZEXT; +  case Instruction::SExt    : return bitc::CAST_SEXT; +  case Instruction::FPToUI  : return bitc::CAST_FPTOUI; +  case Instruction::FPToSI  : return bitc::CAST_FPTOSI; +  case Instruction::UIToFP  : return bitc::CAST_UITOFP; +  case Instruction::SIToFP  : return bitc::CAST_SITOFP; +  case Instruction::FPTrunc : return bitc::CAST_FPTRUNC; +  case Instruction::FPExt   : return bitc::CAST_FPEXT; +  case Instruction::PtrToInt: return bitc::CAST_PTRTOINT; +  case Instruction::IntToPtr: return bitc::CAST_INTTOPTR; +  case Instruction::BitCast : return bitc::CAST_BITCAST; +  case Instruction::AddrSpaceCast: return bitc::CAST_ADDRSPACECAST; +  } +} + +static unsigned getEncodedBinaryOpcode(unsigned Opcode) { +  switch (Opcode) { +  default: llvm_unreachable("Unknown binary instruction!"); +  case Instruction::Add: +  case Instruction::FAdd: return bitc::BINOP_ADD; +  case Instruction::Sub: +  case Instruction::FSub: return bitc::BINOP_SUB; +  case Instruction::Mul: +  case Instruction::FMul: return bitc::BINOP_MUL; +  case Instruction::UDiv: return bitc::BINOP_UDIV; +  case Instruction::FDiv: +  case Instruction::SDiv: return bitc::BINOP_SDIV; +  case Instruction::URem: return bitc::BINOP_UREM; +  case Instruction::FRem: +  case Instruction::SRem: return bitc::BINOP_SREM; +  case Instruction::Shl:  return bitc::BINOP_SHL; +  case Instruction::LShr: return bitc::BINOP_LSHR; +  case Instruction::AShr: return bitc::BINOP_ASHR; +  case Instruction::And:  return bitc::BINOP_AND; +  case Instruction::Or:   return bitc::BINOP_OR; +  case Instruction::Xor:  return bitc::BINOP_XOR; +  } +} + +static unsigned getEncodedRMWOperation(AtomicRMWInst::BinOp Op) { +  switch (Op) { +  default: llvm_unreachable("Unknown RMW operation!"); +  case AtomicRMWInst::Xchg: return bitc::RMW_XCHG; +  case AtomicRMWInst::Add: return bitc::RMW_ADD; +  case AtomicRMWInst::Sub: return bitc::RMW_SUB; +  case AtomicRMWInst::And: return bitc::RMW_AND; +  case AtomicRMWInst::Nand: return bitc::RMW_NAND; +  case AtomicRMWInst::Or: return bitc::RMW_OR; +  case AtomicRMWInst::Xor: return bitc::RMW_XOR; +  case AtomicRMWInst::Max: return bitc::RMW_MAX; +  case AtomicRMWInst::Min: return bitc::RMW_MIN; +  case AtomicRMWInst::UMax: return bitc::RMW_UMAX; +  case AtomicRMWInst::UMin: return bitc::RMW_UMIN; +  } +} + +static unsigned getEncodedOrdering(AtomicOrdering Ordering) { +  switch (Ordering) { +  case AtomicOrdering::NotAtomic: return bitc::ORDERING_NOTATOMIC; +  case AtomicOrdering::Unordered: return bitc::ORDERING_UNORDERED; +  case AtomicOrdering::Monotonic: return bitc::ORDERING_MONOTONIC; +  case AtomicOrdering::Acquire: return bitc::ORDERING_ACQUIRE; +  case AtomicOrdering::Release: return bitc::ORDERING_RELEASE; +  case AtomicOrdering::AcquireRelease: return bitc::ORDERING_ACQREL; +  case AtomicOrdering::SequentiallyConsistent: return bitc::ORDERING_SEQCST; +  } +  llvm_unreachable("Invalid ordering"); +} + +static void writeStringRecord(BitstreamWriter &Stream, unsigned Code, +                              StringRef Str, unsigned AbbrevToUse) { +  SmallVector<unsigned, 64> Vals; + +  // Code: [strchar x N] +  for (unsigned i = 0, e = Str.size(); i != e; ++i) { +    if (AbbrevToUse && !BitCodeAbbrevOp::isChar6(Str[i])) +      AbbrevToUse = 0; +    Vals.push_back(Str[i]); +  } + +  // Emit the finished record. +  Stream.EmitRecord(Code, Vals, AbbrevToUse); +} + +static uint64_t getAttrKindEncoding(Attribute::AttrKind Kind) { +  switch (Kind) { +  case Attribute::Alignment: +    return bitc::ATTR_KIND_ALIGNMENT; +  case Attribute::AllocSize: +    return bitc::ATTR_KIND_ALLOC_SIZE; +  case Attribute::AlwaysInline: +    return bitc::ATTR_KIND_ALWAYS_INLINE; +  case Attribute::ArgMemOnly: +    return bitc::ATTR_KIND_ARGMEMONLY; +  case Attribute::Builtin: +    return bitc::ATTR_KIND_BUILTIN; +  case Attribute::ByVal: +    return bitc::ATTR_KIND_BY_VAL; +  case Attribute::Convergent: +    return bitc::ATTR_KIND_CONVERGENT; +  case Attribute::InAlloca: +    return bitc::ATTR_KIND_IN_ALLOCA; +  case Attribute::Cold: +    return bitc::ATTR_KIND_COLD; +  case Attribute::InaccessibleMemOnly: +    return bitc::ATTR_KIND_INACCESSIBLEMEM_ONLY; +  case Attribute::InaccessibleMemOrArgMemOnly: +    return bitc::ATTR_KIND_INACCESSIBLEMEM_OR_ARGMEMONLY; +  case Attribute::InlineHint: +    return bitc::ATTR_KIND_INLINE_HINT; +  case Attribute::InReg: +    return bitc::ATTR_KIND_IN_REG; +  case Attribute::JumpTable: +    return bitc::ATTR_KIND_JUMP_TABLE; +  case Attribute::MinSize: +    return bitc::ATTR_KIND_MIN_SIZE; +  case Attribute::Naked: +    return bitc::ATTR_KIND_NAKED; +  case Attribute::Nest: +    return bitc::ATTR_KIND_NEST; +  case Attribute::NoAlias: +    return bitc::ATTR_KIND_NO_ALIAS; +  case Attribute::NoBuiltin: +    return bitc::ATTR_KIND_NO_BUILTIN; +  case Attribute::NoCapture: +    return bitc::ATTR_KIND_NO_CAPTURE; +  case Attribute::NoDuplicate: +    return bitc::ATTR_KIND_NO_DUPLICATE; +  case Attribute::NoImplicitFloat: +    return bitc::ATTR_KIND_NO_IMPLICIT_FLOAT; +  case Attribute::NoInline: +    return bitc::ATTR_KIND_NO_INLINE; +  case Attribute::NoRecurse: +    return bitc::ATTR_KIND_NO_RECURSE; +  case Attribute::NonLazyBind: +    return bitc::ATTR_KIND_NON_LAZY_BIND; +  case Attribute::NonNull: +    return bitc::ATTR_KIND_NON_NULL; +  case Attribute::Dereferenceable: +    return bitc::ATTR_KIND_DEREFERENCEABLE; +  case Attribute::DereferenceableOrNull: +    return bitc::ATTR_KIND_DEREFERENCEABLE_OR_NULL; +  case Attribute::NoRedZone: +    return bitc::ATTR_KIND_NO_RED_ZONE; +  case Attribute::NoReturn: +    return bitc::ATTR_KIND_NO_RETURN; +  case Attribute::NoCfCheck: +    return bitc::ATTR_KIND_NOCF_CHECK; +  case Attribute::NoUnwind: +    return bitc::ATTR_KIND_NO_UNWIND; +  case Attribute::OptForFuzzing: +    return bitc::ATTR_KIND_OPT_FOR_FUZZING; +  case Attribute::OptimizeForSize: +    return bitc::ATTR_KIND_OPTIMIZE_FOR_SIZE; +  case Attribute::OptimizeNone: +    return bitc::ATTR_KIND_OPTIMIZE_NONE; +  case Attribute::ReadNone: +    return bitc::ATTR_KIND_READ_NONE; +  case Attribute::ReadOnly: +    return bitc::ATTR_KIND_READ_ONLY; +  case Attribute::Returned: +    return bitc::ATTR_KIND_RETURNED; +  case Attribute::ReturnsTwice: +    return bitc::ATTR_KIND_RETURNS_TWICE; +  case Attribute::SExt: +    return bitc::ATTR_KIND_S_EXT; +  case Attribute::Speculatable: +    return bitc::ATTR_KIND_SPECULATABLE; +  case Attribute::StackAlignment: +    return bitc::ATTR_KIND_STACK_ALIGNMENT; +  case Attribute::StackProtect: +    return bitc::ATTR_KIND_STACK_PROTECT; +  case Attribute::StackProtectReq: +    return bitc::ATTR_KIND_STACK_PROTECT_REQ; +  case Attribute::StackProtectStrong: +    return bitc::ATTR_KIND_STACK_PROTECT_STRONG; +  case Attribute::SafeStack: +    return bitc::ATTR_KIND_SAFESTACK; +  case Attribute::ShadowCallStack: +    return bitc::ATTR_KIND_SHADOWCALLSTACK; +  case Attribute::StrictFP: +    return bitc::ATTR_KIND_STRICT_FP; +  case Attribute::StructRet: +    return bitc::ATTR_KIND_STRUCT_RET; +  case Attribute::SanitizeAddress: +    return bitc::ATTR_KIND_SANITIZE_ADDRESS; +  case Attribute::SanitizeHWAddress: +    return bitc::ATTR_KIND_SANITIZE_HWADDRESS; +  case Attribute::SanitizeThread: +    return bitc::ATTR_KIND_SANITIZE_THREAD; +  case Attribute::SanitizeMemory: +    return bitc::ATTR_KIND_SANITIZE_MEMORY; +  case Attribute::SwiftError: +    return bitc::ATTR_KIND_SWIFT_ERROR; +  case Attribute::SwiftSelf: +    return bitc::ATTR_KIND_SWIFT_SELF; +  case Attribute::UWTable: +    return bitc::ATTR_KIND_UW_TABLE; +  case Attribute::WriteOnly: +    return bitc::ATTR_KIND_WRITEONLY; +  case Attribute::ZExt: +    return bitc::ATTR_KIND_Z_EXT; +  case Attribute::EndAttrKinds: +    llvm_unreachable("Can not encode end-attribute kinds marker."); +  case Attribute::None: +    llvm_unreachable("Can not encode none-attribute."); +  } + +  llvm_unreachable("Trying to encode unknown attribute"); +} + +void ModuleBitcodeWriter::writeAttributeGroupTable() { +  const std::vector<ValueEnumerator::IndexAndAttrSet> &AttrGrps = +      VE.getAttributeGroups(); +  if (AttrGrps.empty()) return; + +  Stream.EnterSubblock(bitc::PARAMATTR_GROUP_BLOCK_ID, 3); + +  SmallVector<uint64_t, 64> Record; +  for (ValueEnumerator::IndexAndAttrSet Pair : AttrGrps) { +    unsigned AttrListIndex = Pair.first; +    AttributeSet AS = Pair.second; +    Record.push_back(VE.getAttributeGroupID(Pair)); +    Record.push_back(AttrListIndex); + +    for (Attribute Attr : AS) { +      if (Attr.isEnumAttribute()) { +        Record.push_back(0); +        Record.push_back(getAttrKindEncoding(Attr.getKindAsEnum())); +      } else if (Attr.isIntAttribute()) { +        Record.push_back(1); +        Record.push_back(getAttrKindEncoding(Attr.getKindAsEnum())); +        Record.push_back(Attr.getValueAsInt()); +      } else { +        StringRef Kind = Attr.getKindAsString(); +        StringRef Val = Attr.getValueAsString(); + +        Record.push_back(Val.empty() ? 3 : 4); +        Record.append(Kind.begin(), Kind.end()); +        Record.push_back(0); +        if (!Val.empty()) { +          Record.append(Val.begin(), Val.end()); +          Record.push_back(0); +        } +      } +    } + +    Stream.EmitRecord(bitc::PARAMATTR_GRP_CODE_ENTRY, Record); +    Record.clear(); +  } + +  Stream.ExitBlock(); +} + +void ModuleBitcodeWriter::writeAttributeTable() { +  const std::vector<AttributeList> &Attrs = VE.getAttributeLists(); +  if (Attrs.empty()) return; + +  Stream.EnterSubblock(bitc::PARAMATTR_BLOCK_ID, 3); + +  SmallVector<uint64_t, 64> Record; +  for (unsigned i = 0, e = Attrs.size(); i != e; ++i) { +    AttributeList AL = Attrs[i]; +    for (unsigned i = AL.index_begin(), e = AL.index_end(); i != e; ++i) { +      AttributeSet AS = AL.getAttributes(i); +      if (AS.hasAttributes()) +        Record.push_back(VE.getAttributeGroupID({i, AS})); +    } + +    Stream.EmitRecord(bitc::PARAMATTR_CODE_ENTRY, Record); +    Record.clear(); +  } + +  Stream.ExitBlock(); +} + +/// WriteTypeTable - Write out the type table for a module. +void ModuleBitcodeWriter::writeTypeTable() { +  const ValueEnumerator::TypeList &TypeList = VE.getTypes(); + +  Stream.EnterSubblock(bitc::TYPE_BLOCK_ID_NEW, 4 /*count from # abbrevs */); +  SmallVector<uint64_t, 64> TypeVals; + +  uint64_t NumBits = VE.computeBitsRequiredForTypeIndicies(); + +  // Abbrev for TYPE_CODE_POINTER. +  auto Abbv = std::make_shared<BitCodeAbbrev>(); +  Abbv->Add(BitCodeAbbrevOp(bitc::TYPE_CODE_POINTER)); +  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, NumBits)); +  Abbv->Add(BitCodeAbbrevOp(0));  // Addrspace = 0 +  unsigned PtrAbbrev = Stream.EmitAbbrev(std::move(Abbv)); + +  // Abbrev for TYPE_CODE_FUNCTION. +  Abbv = std::make_shared<BitCodeAbbrev>(); +  Abbv->Add(BitCodeAbbrevOp(bitc::TYPE_CODE_FUNCTION)); +  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1));  // isvararg +  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array)); +  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, NumBits)); +  unsigned FunctionAbbrev = Stream.EmitAbbrev(std::move(Abbv)); + +  // Abbrev for TYPE_CODE_STRUCT_ANON. +  Abbv = std::make_shared<BitCodeAbbrev>(); +  Abbv->Add(BitCodeAbbrevOp(bitc::TYPE_CODE_STRUCT_ANON)); +  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1));  // ispacked +  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array)); +  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, NumBits)); +  unsigned StructAnonAbbrev = Stream.EmitAbbrev(std::move(Abbv)); + +  // Abbrev for TYPE_CODE_STRUCT_NAME. +  Abbv = std::make_shared<BitCodeAbbrev>(); +  Abbv->Add(BitCodeAbbrevOp(bitc::TYPE_CODE_STRUCT_NAME)); +  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array)); +  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Char6)); +  unsigned StructNameAbbrev = Stream.EmitAbbrev(std::move(Abbv)); + +  // Abbrev for TYPE_CODE_STRUCT_NAMED. +  Abbv = std::make_shared<BitCodeAbbrev>(); +  Abbv->Add(BitCodeAbbrevOp(bitc::TYPE_CODE_STRUCT_NAMED)); +  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1));  // ispacked +  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array)); +  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, NumBits)); +  unsigned StructNamedAbbrev = Stream.EmitAbbrev(std::move(Abbv)); + +  // Abbrev for TYPE_CODE_ARRAY. +  Abbv = std::make_shared<BitCodeAbbrev>(); +  Abbv->Add(BitCodeAbbrevOp(bitc::TYPE_CODE_ARRAY)); +  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8));   // size +  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, NumBits)); +  unsigned ArrayAbbrev = Stream.EmitAbbrev(std::move(Abbv)); + +  // Emit an entry count so the reader can reserve space. +  TypeVals.push_back(TypeList.size()); +  Stream.EmitRecord(bitc::TYPE_CODE_NUMENTRY, TypeVals); +  TypeVals.clear(); + +  // Loop over all of the types, emitting each in turn. +  for (unsigned i = 0, e = TypeList.size(); i != e; ++i) { +    Type *T = TypeList[i]; +    int AbbrevToUse = 0; +    unsigned Code = 0; + +    switch (T->getTypeID()) { +    case Type::VoidTyID:      Code = bitc::TYPE_CODE_VOID;      break; +    case Type::HalfTyID:      Code = bitc::TYPE_CODE_HALF;      break; +    case Type::FloatTyID:     Code = bitc::TYPE_CODE_FLOAT;     break; +    case Type::DoubleTyID:    Code = bitc::TYPE_CODE_DOUBLE;    break; +    case Type::X86_FP80TyID:  Code = bitc::TYPE_CODE_X86_FP80;  break; +    case Type::FP128TyID:     Code = bitc::TYPE_CODE_FP128;     break; +    case Type::PPC_FP128TyID: Code = bitc::TYPE_CODE_PPC_FP128; break; +    case Type::LabelTyID:     Code = bitc::TYPE_CODE_LABEL;     break; +    case Type::MetadataTyID:  Code = bitc::TYPE_CODE_METADATA;  break; +    case Type::X86_MMXTyID:   Code = bitc::TYPE_CODE_X86_MMX;   break; +    case Type::TokenTyID:     Code = bitc::TYPE_CODE_TOKEN;     break; +    case Type::IntegerTyID: +      // INTEGER: [width] +      Code = bitc::TYPE_CODE_INTEGER; +      TypeVals.push_back(cast<IntegerType>(T)->getBitWidth()); +      break; +    case Type::PointerTyID: { +      PointerType *PTy = cast<PointerType>(T); +      // POINTER: [pointee type, address space] +      Code = bitc::TYPE_CODE_POINTER; +      TypeVals.push_back(VE.getTypeID(PTy->getElementType())); +      unsigned AddressSpace = PTy->getAddressSpace(); +      TypeVals.push_back(AddressSpace); +      if (AddressSpace == 0) AbbrevToUse = PtrAbbrev; +      break; +    } +    case Type::FunctionTyID: { +      FunctionType *FT = cast<FunctionType>(T); +      // FUNCTION: [isvararg, retty, paramty x N] +      Code = bitc::TYPE_CODE_FUNCTION; +      TypeVals.push_back(FT->isVarArg()); +      TypeVals.push_back(VE.getTypeID(FT->getReturnType())); +      for (unsigned i = 0, e = FT->getNumParams(); i != e; ++i) +        TypeVals.push_back(VE.getTypeID(FT->getParamType(i))); +      AbbrevToUse = FunctionAbbrev; +      break; +    } +    case Type::StructTyID: { +      StructType *ST = cast<StructType>(T); +      // STRUCT: [ispacked, eltty x N] +      TypeVals.push_back(ST->isPacked()); +      // Output all of the element types. +      for (StructType::element_iterator I = ST->element_begin(), +           E = ST->element_end(); I != E; ++I) +        TypeVals.push_back(VE.getTypeID(*I)); + +      if (ST->isLiteral()) { +        Code = bitc::TYPE_CODE_STRUCT_ANON; +        AbbrevToUse = StructAnonAbbrev; +      } else { +        if (ST->isOpaque()) { +          Code = bitc::TYPE_CODE_OPAQUE; +        } else { +          Code = bitc::TYPE_CODE_STRUCT_NAMED; +          AbbrevToUse = StructNamedAbbrev; +        } + +        // Emit the name if it is present. +        if (!ST->getName().empty()) +          writeStringRecord(Stream, bitc::TYPE_CODE_STRUCT_NAME, ST->getName(), +                            StructNameAbbrev); +      } +      break; +    } +    case Type::ArrayTyID: { +      ArrayType *AT = cast<ArrayType>(T); +      // ARRAY: [numelts, eltty] +      Code = bitc::TYPE_CODE_ARRAY; +      TypeVals.push_back(AT->getNumElements()); +      TypeVals.push_back(VE.getTypeID(AT->getElementType())); +      AbbrevToUse = ArrayAbbrev; +      break; +    } +    case Type::VectorTyID: { +      VectorType *VT = cast<VectorType>(T); +      // VECTOR [numelts, eltty] +      Code = bitc::TYPE_CODE_VECTOR; +      TypeVals.push_back(VT->getNumElements()); +      TypeVals.push_back(VE.getTypeID(VT->getElementType())); +      break; +    } +    } + +    // Emit the finished record. +    Stream.EmitRecord(Code, TypeVals, AbbrevToUse); +    TypeVals.clear(); +  } + +  Stream.ExitBlock(); +} + +static unsigned getEncodedLinkage(const GlobalValue::LinkageTypes Linkage) { +  switch (Linkage) { +  case GlobalValue::ExternalLinkage: +    return 0; +  case GlobalValue::WeakAnyLinkage: +    return 16; +  case GlobalValue::AppendingLinkage: +    return 2; +  case GlobalValue::InternalLinkage: +    return 3; +  case GlobalValue::LinkOnceAnyLinkage: +    return 18; +  case GlobalValue::ExternalWeakLinkage: +    return 7; +  case GlobalValue::CommonLinkage: +    return 8; +  case GlobalValue::PrivateLinkage: +    return 9; +  case GlobalValue::WeakODRLinkage: +    return 17; +  case GlobalValue::LinkOnceODRLinkage: +    return 19; +  case GlobalValue::AvailableExternallyLinkage: +    return 12; +  } +  llvm_unreachable("Invalid linkage"); +} + +static unsigned getEncodedLinkage(const GlobalValue &GV) { +  return getEncodedLinkage(GV.getLinkage()); +} + +static uint64_t getEncodedFFlags(FunctionSummary::FFlags Flags) { +  uint64_t RawFlags = 0; +  RawFlags |= Flags.ReadNone; +  RawFlags |= (Flags.ReadOnly << 1); +  RawFlags |= (Flags.NoRecurse << 2); +  RawFlags |= (Flags.ReturnDoesNotAlias << 3); +  return RawFlags; +} + +// Decode the flags for GlobalValue in the summary +static uint64_t getEncodedGVSummaryFlags(GlobalValueSummary::GVFlags Flags) { +  uint64_t RawFlags = 0; + +  RawFlags |= Flags.NotEligibleToImport; // bool +  RawFlags |= (Flags.Live << 1); +  RawFlags |= (Flags.DSOLocal << 2); + +  // Linkage don't need to be remapped at that time for the summary. Any future +  // change to the getEncodedLinkage() function will need to be taken into +  // account here as well. +  RawFlags = (RawFlags << 4) | Flags.Linkage; // 4 bits + +  return RawFlags; +} + +static unsigned getEncodedVisibility(const GlobalValue &GV) { +  switch (GV.getVisibility()) { +  case GlobalValue::DefaultVisibility:   return 0; +  case GlobalValue::HiddenVisibility:    return 1; +  case GlobalValue::ProtectedVisibility: return 2; +  } +  llvm_unreachable("Invalid visibility"); +} + +static unsigned getEncodedDLLStorageClass(const GlobalValue &GV) { +  switch (GV.getDLLStorageClass()) { +  case GlobalValue::DefaultStorageClass:   return 0; +  case GlobalValue::DLLImportStorageClass: return 1; +  case GlobalValue::DLLExportStorageClass: return 2; +  } +  llvm_unreachable("Invalid DLL storage class"); +} + +static unsigned getEncodedThreadLocalMode(const GlobalValue &GV) { +  switch (GV.getThreadLocalMode()) { +    case GlobalVariable::NotThreadLocal:         return 0; +    case GlobalVariable::GeneralDynamicTLSModel: return 1; +    case GlobalVariable::LocalDynamicTLSModel:   return 2; +    case GlobalVariable::InitialExecTLSModel:    return 3; +    case GlobalVariable::LocalExecTLSModel:      return 4; +  } +  llvm_unreachable("Invalid TLS model"); +} + +static unsigned getEncodedComdatSelectionKind(const Comdat &C) { +  switch (C.getSelectionKind()) { +  case Comdat::Any: +    return bitc::COMDAT_SELECTION_KIND_ANY; +  case Comdat::ExactMatch: +    return bitc::COMDAT_SELECTION_KIND_EXACT_MATCH; +  case Comdat::Largest: +    return bitc::COMDAT_SELECTION_KIND_LARGEST; +  case Comdat::NoDuplicates: +    return bitc::COMDAT_SELECTION_KIND_NO_DUPLICATES; +  case Comdat::SameSize: +    return bitc::COMDAT_SELECTION_KIND_SAME_SIZE; +  } +  llvm_unreachable("Invalid selection kind"); +} + +static unsigned getEncodedUnnamedAddr(const GlobalValue &GV) { +  switch (GV.getUnnamedAddr()) { +  case GlobalValue::UnnamedAddr::None:   return 0; +  case GlobalValue::UnnamedAddr::Local:  return 2; +  case GlobalValue::UnnamedAddr::Global: return 1; +  } +  llvm_unreachable("Invalid unnamed_addr"); +} + +size_t ModuleBitcodeWriter::addToStrtab(StringRef Str) { +  if (GenerateHash) +    Hasher.update(Str); +  return StrtabBuilder.add(Str); +} + +void ModuleBitcodeWriter::writeComdats() { +  SmallVector<unsigned, 64> Vals; +  for (const Comdat *C : VE.getComdats()) { +    // COMDAT: [strtab offset, strtab size, selection_kind] +    Vals.push_back(addToStrtab(C->getName())); +    Vals.push_back(C->getName().size()); +    Vals.push_back(getEncodedComdatSelectionKind(*C)); +    Stream.EmitRecord(bitc::MODULE_CODE_COMDAT, Vals, /*AbbrevToUse=*/0); +    Vals.clear(); +  } +} + +/// Write a record that will eventually hold the word offset of the +/// module-level VST. For now the offset is 0, which will be backpatched +/// after the real VST is written. Saves the bit offset to backpatch. +void ModuleBitcodeWriter::writeValueSymbolTableForwardDecl() { +  // Write a placeholder value in for the offset of the real VST, +  // which is written after the function blocks so that it can include +  // the offset of each function. The placeholder offset will be +  // updated when the real VST is written. +  auto Abbv = std::make_shared<BitCodeAbbrev>(); +  Abbv->Add(BitCodeAbbrevOp(bitc::MODULE_CODE_VSTOFFSET)); +  // Blocks are 32-bit aligned, so we can use a 32-bit word offset to +  // hold the real VST offset. Must use fixed instead of VBR as we don't +  // know how many VBR chunks to reserve ahead of time. +  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32)); +  unsigned VSTOffsetAbbrev = Stream.EmitAbbrev(std::move(Abbv)); + +  // Emit the placeholder +  uint64_t Vals[] = {bitc::MODULE_CODE_VSTOFFSET, 0}; +  Stream.EmitRecordWithAbbrev(VSTOffsetAbbrev, Vals); + +  // Compute and save the bit offset to the placeholder, which will be +  // patched when the real VST is written. We can simply subtract the 32-bit +  // fixed size from the current bit number to get the location to backpatch. +  VSTOffsetPlaceholder = Stream.GetCurrentBitNo() - 32; +} + +enum StringEncoding { SE_Char6, SE_Fixed7, SE_Fixed8 }; + +/// Determine the encoding to use for the given string name and length. +static StringEncoding getStringEncoding(StringRef Str) { +  bool isChar6 = true; +  for (char C : Str) { +    if (isChar6) +      isChar6 = BitCodeAbbrevOp::isChar6(C); +    if ((unsigned char)C & 128) +      // don't bother scanning the rest. +      return SE_Fixed8; +  } +  if (isChar6) +    return SE_Char6; +  return SE_Fixed7; +} + +/// Emit top-level description of module, including target triple, inline asm, +/// descriptors for global variables, and function prototype info. +/// Returns the bit offset to backpatch with the location of the real VST. +void ModuleBitcodeWriter::writeModuleInfo() { +  // Emit various pieces of data attached to a module. +  if (!M.getTargetTriple().empty()) +    writeStringRecord(Stream, bitc::MODULE_CODE_TRIPLE, M.getTargetTriple(), +                      0 /*TODO*/); +  const std::string &DL = M.getDataLayoutStr(); +  if (!DL.empty()) +    writeStringRecord(Stream, bitc::MODULE_CODE_DATALAYOUT, DL, 0 /*TODO*/); +  if (!M.getModuleInlineAsm().empty()) +    writeStringRecord(Stream, bitc::MODULE_CODE_ASM, M.getModuleInlineAsm(), +                      0 /*TODO*/); + +  // Emit information about sections and GC, computing how many there are. Also +  // compute the maximum alignment value. +  std::map<std::string, unsigned> SectionMap; +  std::map<std::string, unsigned> GCMap; +  unsigned MaxAlignment = 0; +  unsigned MaxGlobalType = 0; +  for (const GlobalValue &GV : M.globals()) { +    MaxAlignment = std::max(MaxAlignment, GV.getAlignment()); +    MaxGlobalType = std::max(MaxGlobalType, VE.getTypeID(GV.getValueType())); +    if (GV.hasSection()) { +      // Give section names unique ID's. +      unsigned &Entry = SectionMap[GV.getSection()]; +      if (!Entry) { +        writeStringRecord(Stream, bitc::MODULE_CODE_SECTIONNAME, GV.getSection(), +                          0 /*TODO*/); +        Entry = SectionMap.size(); +      } +    } +  } +  for (const Function &F : M) { +    MaxAlignment = std::max(MaxAlignment, F.getAlignment()); +    if (F.hasSection()) { +      // Give section names unique ID's. +      unsigned &Entry = SectionMap[F.getSection()]; +      if (!Entry) { +        writeStringRecord(Stream, bitc::MODULE_CODE_SECTIONNAME, F.getSection(), +                          0 /*TODO*/); +        Entry = SectionMap.size(); +      } +    } +    if (F.hasGC()) { +      // Same for GC names. +      unsigned &Entry = GCMap[F.getGC()]; +      if (!Entry) { +        writeStringRecord(Stream, bitc::MODULE_CODE_GCNAME, F.getGC(), +                          0 /*TODO*/); +        Entry = GCMap.size(); +      } +    } +  } + +  // Emit abbrev for globals, now that we know # sections and max alignment. +  unsigned SimpleGVarAbbrev = 0; +  if (!M.global_empty()) { +    // Add an abbrev for common globals with no visibility or thread localness. +    auto Abbv = std::make_shared<BitCodeAbbrev>(); +    Abbv->Add(BitCodeAbbrevOp(bitc::MODULE_CODE_GLOBALVAR)); +    Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); +    Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); +    Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, +                              Log2_32_Ceil(MaxGlobalType+1))); +    Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6));   // AddrSpace << 2 +                                                           //| explicitType << 1 +                                                           //| constant +    Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6));   // Initializer. +    Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 5)); // Linkage. +    if (MaxAlignment == 0)                                 // Alignment. +      Abbv->Add(BitCodeAbbrevOp(0)); +    else { +      unsigned MaxEncAlignment = Log2_32(MaxAlignment)+1; +      Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, +                               Log2_32_Ceil(MaxEncAlignment+1))); +    } +    if (SectionMap.empty())                                    // Section. +      Abbv->Add(BitCodeAbbrevOp(0)); +    else +      Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, +                               Log2_32_Ceil(SectionMap.size()+1))); +    // Don't bother emitting vis + thread local. +    SimpleGVarAbbrev = Stream.EmitAbbrev(std::move(Abbv)); +  } + +  SmallVector<unsigned, 64> Vals; +  // Emit the module's source file name. +  { +    StringEncoding Bits = getStringEncoding(M.getSourceFileName()); +    BitCodeAbbrevOp AbbrevOpToUse = BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 8); +    if (Bits == SE_Char6) +      AbbrevOpToUse = BitCodeAbbrevOp(BitCodeAbbrevOp::Char6); +    else if (Bits == SE_Fixed7) +      AbbrevOpToUse = BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 7); + +    // MODULE_CODE_SOURCE_FILENAME: [namechar x N] +    auto Abbv = std::make_shared<BitCodeAbbrev>(); +    Abbv->Add(BitCodeAbbrevOp(bitc::MODULE_CODE_SOURCE_FILENAME)); +    Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array)); +    Abbv->Add(AbbrevOpToUse); +    unsigned FilenameAbbrev = Stream.EmitAbbrev(std::move(Abbv)); + +    for (const auto P : M.getSourceFileName()) +      Vals.push_back((unsigned char)P); + +    // Emit the finished record. +    Stream.EmitRecord(bitc::MODULE_CODE_SOURCE_FILENAME, Vals, FilenameAbbrev); +    Vals.clear(); +  } + +  // Emit the global variable information. +  for (const GlobalVariable &GV : M.globals()) { +    unsigned AbbrevToUse = 0; + +    // GLOBALVAR: [strtab offset, strtab size, type, isconst, initid, +    //             linkage, alignment, section, visibility, threadlocal, +    //             unnamed_addr, externally_initialized, dllstorageclass, +    //             comdat, attributes, DSO_Local] +    Vals.push_back(addToStrtab(GV.getName())); +    Vals.push_back(GV.getName().size()); +    Vals.push_back(VE.getTypeID(GV.getValueType())); +    Vals.push_back(GV.getType()->getAddressSpace() << 2 | 2 | GV.isConstant()); +    Vals.push_back(GV.isDeclaration() ? 0 : +                   (VE.getValueID(GV.getInitializer()) + 1)); +    Vals.push_back(getEncodedLinkage(GV)); +    Vals.push_back(Log2_32(GV.getAlignment())+1); +    Vals.push_back(GV.hasSection() ? SectionMap[GV.getSection()] : 0); +    if (GV.isThreadLocal() || +        GV.getVisibility() != GlobalValue::DefaultVisibility || +        GV.getUnnamedAddr() != GlobalValue::UnnamedAddr::None || +        GV.isExternallyInitialized() || +        GV.getDLLStorageClass() != GlobalValue::DefaultStorageClass || +        GV.hasComdat() || +        GV.hasAttributes() || +        GV.isDSOLocal()) { +      Vals.push_back(getEncodedVisibility(GV)); +      Vals.push_back(getEncodedThreadLocalMode(GV)); +      Vals.push_back(getEncodedUnnamedAddr(GV)); +      Vals.push_back(GV.isExternallyInitialized()); +      Vals.push_back(getEncodedDLLStorageClass(GV)); +      Vals.push_back(GV.hasComdat() ? VE.getComdatID(GV.getComdat()) : 0); + +      auto AL = GV.getAttributesAsList(AttributeList::FunctionIndex); +      Vals.push_back(VE.getAttributeListID(AL)); + +      Vals.push_back(GV.isDSOLocal()); +    } else { +      AbbrevToUse = SimpleGVarAbbrev; +    } + +    Stream.EmitRecord(bitc::MODULE_CODE_GLOBALVAR, Vals, AbbrevToUse); +    Vals.clear(); +  } + +  // Emit the function proto information. +  for (const Function &F : M) { +    // FUNCTION:  [strtab offset, strtab size, type, callingconv, isproto, +    //             linkage, paramattrs, alignment, section, visibility, gc, +    //             unnamed_addr, prologuedata, dllstorageclass, comdat, +    //             prefixdata, personalityfn, DSO_Local] +    Vals.push_back(addToStrtab(F.getName())); +    Vals.push_back(F.getName().size()); +    Vals.push_back(VE.getTypeID(F.getFunctionType())); +    Vals.push_back(F.getCallingConv()); +    Vals.push_back(F.isDeclaration()); +    Vals.push_back(getEncodedLinkage(F)); +    Vals.push_back(VE.getAttributeListID(F.getAttributes())); +    Vals.push_back(Log2_32(F.getAlignment())+1); +    Vals.push_back(F.hasSection() ? SectionMap[F.getSection()] : 0); +    Vals.push_back(getEncodedVisibility(F)); +    Vals.push_back(F.hasGC() ? GCMap[F.getGC()] : 0); +    Vals.push_back(getEncodedUnnamedAddr(F)); +    Vals.push_back(F.hasPrologueData() ? (VE.getValueID(F.getPrologueData()) + 1) +                                       : 0); +    Vals.push_back(getEncodedDLLStorageClass(F)); +    Vals.push_back(F.hasComdat() ? VE.getComdatID(F.getComdat()) : 0); +    Vals.push_back(F.hasPrefixData() ? (VE.getValueID(F.getPrefixData()) + 1) +                                     : 0); +    Vals.push_back( +        F.hasPersonalityFn() ? (VE.getValueID(F.getPersonalityFn()) + 1) : 0); + +    Vals.push_back(F.isDSOLocal()); +    unsigned AbbrevToUse = 0; +    Stream.EmitRecord(bitc::MODULE_CODE_FUNCTION, Vals, AbbrevToUse); +    Vals.clear(); +  } + +  // Emit the alias information. +  for (const GlobalAlias &A : M.aliases()) { +    // ALIAS: [strtab offset, strtab size, alias type, aliasee val#, linkage, +    //         visibility, dllstorageclass, threadlocal, unnamed_addr, +    //         DSO_Local] +    Vals.push_back(addToStrtab(A.getName())); +    Vals.push_back(A.getName().size()); +    Vals.push_back(VE.getTypeID(A.getValueType())); +    Vals.push_back(A.getType()->getAddressSpace()); +    Vals.push_back(VE.getValueID(A.getAliasee())); +    Vals.push_back(getEncodedLinkage(A)); +    Vals.push_back(getEncodedVisibility(A)); +    Vals.push_back(getEncodedDLLStorageClass(A)); +    Vals.push_back(getEncodedThreadLocalMode(A)); +    Vals.push_back(getEncodedUnnamedAddr(A)); +    Vals.push_back(A.isDSOLocal()); + +    unsigned AbbrevToUse = 0; +    Stream.EmitRecord(bitc::MODULE_CODE_ALIAS, Vals, AbbrevToUse); +    Vals.clear(); +  } + +  // Emit the ifunc information. +  for (const GlobalIFunc &I : M.ifuncs()) { +    // IFUNC: [strtab offset, strtab size, ifunc type, address space, resolver +    //         val#, linkage, visibility, DSO_Local] +    Vals.push_back(addToStrtab(I.getName())); +    Vals.push_back(I.getName().size()); +    Vals.push_back(VE.getTypeID(I.getValueType())); +    Vals.push_back(I.getType()->getAddressSpace()); +    Vals.push_back(VE.getValueID(I.getResolver())); +    Vals.push_back(getEncodedLinkage(I)); +    Vals.push_back(getEncodedVisibility(I)); +    Vals.push_back(I.isDSOLocal()); +    Stream.EmitRecord(bitc::MODULE_CODE_IFUNC, Vals); +    Vals.clear(); +  } + +  writeValueSymbolTableForwardDecl(); +} + +static uint64_t getOptimizationFlags(const Value *V) { +  uint64_t Flags = 0; + +  if (const auto *OBO = dyn_cast<OverflowingBinaryOperator>(V)) { +    if (OBO->hasNoSignedWrap()) +      Flags |= 1 << bitc::OBO_NO_SIGNED_WRAP; +    if (OBO->hasNoUnsignedWrap()) +      Flags |= 1 << bitc::OBO_NO_UNSIGNED_WRAP; +  } else if (const auto *PEO = dyn_cast<PossiblyExactOperator>(V)) { +    if (PEO->isExact()) +      Flags |= 1 << bitc::PEO_EXACT; +  } else if (const auto *FPMO = dyn_cast<FPMathOperator>(V)) { +    if (FPMO->hasAllowReassoc()) +      Flags |= bitc::AllowReassoc; +    if (FPMO->hasNoNaNs()) +      Flags |= bitc::NoNaNs; +    if (FPMO->hasNoInfs()) +      Flags |= bitc::NoInfs; +    if (FPMO->hasNoSignedZeros()) +      Flags |= bitc::NoSignedZeros; +    if (FPMO->hasAllowReciprocal()) +      Flags |= bitc::AllowReciprocal; +    if (FPMO->hasAllowContract()) +      Flags |= bitc::AllowContract; +    if (FPMO->hasApproxFunc()) +      Flags |= bitc::ApproxFunc; +  } + +  return Flags; +} + +void ModuleBitcodeWriter::writeValueAsMetadata( +    const ValueAsMetadata *MD, SmallVectorImpl<uint64_t> &Record) { +  // Mimic an MDNode with a value as one operand. +  Value *V = MD->getValue(); +  Record.push_back(VE.getTypeID(V->getType())); +  Record.push_back(VE.getValueID(V)); +  Stream.EmitRecord(bitc::METADATA_VALUE, Record, 0); +  Record.clear(); +} + +void ModuleBitcodeWriter::writeMDTuple(const MDTuple *N, +                                       SmallVectorImpl<uint64_t> &Record, +                                       unsigned Abbrev) { +  for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { +    Metadata *MD = N->getOperand(i); +    assert(!(MD && isa<LocalAsMetadata>(MD)) && +           "Unexpected function-local metadata"); +    Record.push_back(VE.getMetadataOrNullID(MD)); +  } +  Stream.EmitRecord(N->isDistinct() ? bitc::METADATA_DISTINCT_NODE +                                    : bitc::METADATA_NODE, +                    Record, Abbrev); +  Record.clear(); +} + +unsigned ModuleBitcodeWriter::createDILocationAbbrev() { +  // Assume the column is usually under 128, and always output the inlined-at +  // location (it's never more expensive than building an array size 1). +  auto Abbv = std::make_shared<BitCodeAbbrev>(); +  Abbv->Add(BitCodeAbbrevOp(bitc::METADATA_LOCATION)); +  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); +  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); +  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); +  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); +  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); +  return Stream.EmitAbbrev(std::move(Abbv)); +} + +void ModuleBitcodeWriter::writeDILocation(const DILocation *N, +                                          SmallVectorImpl<uint64_t> &Record, +                                          unsigned &Abbrev) { +  if (!Abbrev) +    Abbrev = createDILocationAbbrev(); + +  Record.push_back(N->isDistinct()); +  Record.push_back(N->getLine()); +  Record.push_back(N->getColumn()); +  Record.push_back(VE.getMetadataID(N->getScope())); +  Record.push_back(VE.getMetadataOrNullID(N->getInlinedAt())); + +  Stream.EmitRecord(bitc::METADATA_LOCATION, Record, Abbrev); +  Record.clear(); +} + +unsigned ModuleBitcodeWriter::createGenericDINodeAbbrev() { +  // Assume the column is usually under 128, and always output the inlined-at +  // location (it's never more expensive than building an array size 1). +  auto Abbv = std::make_shared<BitCodeAbbrev>(); +  Abbv->Add(BitCodeAbbrevOp(bitc::METADATA_GENERIC_DEBUG)); +  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); +  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); +  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); +  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); +  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array)); +  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); +  return Stream.EmitAbbrev(std::move(Abbv)); +} + +void ModuleBitcodeWriter::writeGenericDINode(const GenericDINode *N, +                                             SmallVectorImpl<uint64_t> &Record, +                                             unsigned &Abbrev) { +  if (!Abbrev) +    Abbrev = createGenericDINodeAbbrev(); + +  Record.push_back(N->isDistinct()); +  Record.push_back(N->getTag()); +  Record.push_back(0); // Per-tag version field; unused for now. + +  for (auto &I : N->operands()) +    Record.push_back(VE.getMetadataOrNullID(I)); + +  Stream.EmitRecord(bitc::METADATA_GENERIC_DEBUG, Record, Abbrev); +  Record.clear(); +} + +static uint64_t rotateSign(int64_t I) { +  uint64_t U = I; +  return I < 0 ? ~(U << 1) : U << 1; +} + +void ModuleBitcodeWriter::writeDISubrange(const DISubrange *N, +                                          SmallVectorImpl<uint64_t> &Record, +                                          unsigned Abbrev) { +  const uint64_t Version = 1 << 1; +  Record.push_back((uint64_t)N->isDistinct() | Version); +  Record.push_back(VE.getMetadataOrNullID(N->getRawCountNode())); +  Record.push_back(rotateSign(N->getLowerBound())); + +  Stream.EmitRecord(bitc::METADATA_SUBRANGE, Record, Abbrev); +  Record.clear(); +} + +void ModuleBitcodeWriter::writeDIEnumerator(const DIEnumerator *N, +                                            SmallVectorImpl<uint64_t> &Record, +                                            unsigned Abbrev) { +  Record.push_back((N->isUnsigned() << 1) | N->isDistinct()); +  Record.push_back(rotateSign(N->getValue())); +  Record.push_back(VE.getMetadataOrNullID(N->getRawName())); + +  Stream.EmitRecord(bitc::METADATA_ENUMERATOR, Record, Abbrev); +  Record.clear(); +} + +void ModuleBitcodeWriter::writeDIBasicType(const DIBasicType *N, +                                           SmallVectorImpl<uint64_t> &Record, +                                           unsigned Abbrev) { +  Record.push_back(N->isDistinct()); +  Record.push_back(N->getTag()); +  Record.push_back(VE.getMetadataOrNullID(N->getRawName())); +  Record.push_back(N->getSizeInBits()); +  Record.push_back(N->getAlignInBits()); +  Record.push_back(N->getEncoding()); + +  Stream.EmitRecord(bitc::METADATA_BASIC_TYPE, Record, Abbrev); +  Record.clear(); +} + +void ModuleBitcodeWriter::writeDIDerivedType(const DIDerivedType *N, +                                             SmallVectorImpl<uint64_t> &Record, +                                             unsigned Abbrev) { +  Record.push_back(N->isDistinct()); +  Record.push_back(N->getTag()); +  Record.push_back(VE.getMetadataOrNullID(N->getRawName())); +  Record.push_back(VE.getMetadataOrNullID(N->getFile())); +  Record.push_back(N->getLine()); +  Record.push_back(VE.getMetadataOrNullID(N->getScope())); +  Record.push_back(VE.getMetadataOrNullID(N->getBaseType())); +  Record.push_back(N->getSizeInBits()); +  Record.push_back(N->getAlignInBits()); +  Record.push_back(N->getOffsetInBits()); +  Record.push_back(N->getFlags()); +  Record.push_back(VE.getMetadataOrNullID(N->getExtraData())); + +  // DWARF address space is encoded as N->getDWARFAddressSpace() + 1. 0 means +  // that there is no DWARF address space associated with DIDerivedType. +  if (const auto &DWARFAddressSpace = N->getDWARFAddressSpace()) +    Record.push_back(*DWARFAddressSpace + 1); +  else +    Record.push_back(0); + +  Stream.EmitRecord(bitc::METADATA_DERIVED_TYPE, Record, Abbrev); +  Record.clear(); +} + +void ModuleBitcodeWriter::writeDICompositeType( +    const DICompositeType *N, SmallVectorImpl<uint64_t> &Record, +    unsigned Abbrev) { +  const unsigned IsNotUsedInOldTypeRef = 0x2; +  Record.push_back(IsNotUsedInOldTypeRef | (unsigned)N->isDistinct()); +  Record.push_back(N->getTag()); +  Record.push_back(VE.getMetadataOrNullID(N->getRawName())); +  Record.push_back(VE.getMetadataOrNullID(N->getFile())); +  Record.push_back(N->getLine()); +  Record.push_back(VE.getMetadataOrNullID(N->getScope())); +  Record.push_back(VE.getMetadataOrNullID(N->getBaseType())); +  Record.push_back(N->getSizeInBits()); +  Record.push_back(N->getAlignInBits()); +  Record.push_back(N->getOffsetInBits()); +  Record.push_back(N->getFlags()); +  Record.push_back(VE.getMetadataOrNullID(N->getElements().get())); +  Record.push_back(N->getRuntimeLang()); +  Record.push_back(VE.getMetadataOrNullID(N->getVTableHolder())); +  Record.push_back(VE.getMetadataOrNullID(N->getTemplateParams().get())); +  Record.push_back(VE.getMetadataOrNullID(N->getRawIdentifier())); +  Record.push_back(VE.getMetadataOrNullID(N->getDiscriminator())); + +  Stream.EmitRecord(bitc::METADATA_COMPOSITE_TYPE, Record, Abbrev); +  Record.clear(); +} + +void ModuleBitcodeWriter::writeDISubroutineType( +    const DISubroutineType *N, SmallVectorImpl<uint64_t> &Record, +    unsigned Abbrev) { +  const unsigned HasNoOldTypeRefs = 0x2; +  Record.push_back(HasNoOldTypeRefs | (unsigned)N->isDistinct()); +  Record.push_back(N->getFlags()); +  Record.push_back(VE.getMetadataOrNullID(N->getTypeArray().get())); +  Record.push_back(N->getCC()); + +  Stream.EmitRecord(bitc::METADATA_SUBROUTINE_TYPE, Record, Abbrev); +  Record.clear(); +} + +void ModuleBitcodeWriter::writeDIFile(const DIFile *N, +                                      SmallVectorImpl<uint64_t> &Record, +                                      unsigned Abbrev) { +  Record.push_back(N->isDistinct()); +  Record.push_back(VE.getMetadataOrNullID(N->getRawFilename())); +  Record.push_back(VE.getMetadataOrNullID(N->getRawDirectory())); +  if (N->getRawChecksum()) { +    Record.push_back(N->getRawChecksum()->Kind); +    Record.push_back(VE.getMetadataOrNullID(N->getRawChecksum()->Value)); +  } else { +    // Maintain backwards compatibility with the old internal representation of +    // CSK_None in ChecksumKind by writing nulls here when Checksum is None. +    Record.push_back(0); +    Record.push_back(VE.getMetadataOrNullID(nullptr)); +  } +  auto Source = N->getRawSource(); +  if (Source) +    Record.push_back(VE.getMetadataOrNullID(*Source)); + +  Stream.EmitRecord(bitc::METADATA_FILE, Record, Abbrev); +  Record.clear(); +} + +void ModuleBitcodeWriter::writeDICompileUnit(const DICompileUnit *N, +                                             SmallVectorImpl<uint64_t> &Record, +                                             unsigned Abbrev) { +  assert(N->isDistinct() && "Expected distinct compile units"); +  Record.push_back(/* IsDistinct */ true); +  Record.push_back(N->getSourceLanguage()); +  Record.push_back(VE.getMetadataOrNullID(N->getFile())); +  Record.push_back(VE.getMetadataOrNullID(N->getRawProducer())); +  Record.push_back(N->isOptimized()); +  Record.push_back(VE.getMetadataOrNullID(N->getRawFlags())); +  Record.push_back(N->getRuntimeVersion()); +  Record.push_back(VE.getMetadataOrNullID(N->getRawSplitDebugFilename())); +  Record.push_back(N->getEmissionKind()); +  Record.push_back(VE.getMetadataOrNullID(N->getEnumTypes().get())); +  Record.push_back(VE.getMetadataOrNullID(N->getRetainedTypes().get())); +  Record.push_back(/* subprograms */ 0); +  Record.push_back(VE.getMetadataOrNullID(N->getGlobalVariables().get())); +  Record.push_back(VE.getMetadataOrNullID(N->getImportedEntities().get())); +  Record.push_back(N->getDWOId()); +  Record.push_back(VE.getMetadataOrNullID(N->getMacros().get())); +  Record.push_back(N->getSplitDebugInlining()); +  Record.push_back(N->getDebugInfoForProfiling()); +  Record.push_back(N->getGnuPubnames()); + +  Stream.EmitRecord(bitc::METADATA_COMPILE_UNIT, Record, Abbrev); +  Record.clear(); +} + +void ModuleBitcodeWriter::writeDISubprogram(const DISubprogram *N, +                                            SmallVectorImpl<uint64_t> &Record, +                                            unsigned Abbrev) { +  uint64_t HasUnitFlag = 1 << 1; +  Record.push_back(N->isDistinct() | HasUnitFlag); +  Record.push_back(VE.getMetadataOrNullID(N->getScope())); +  Record.push_back(VE.getMetadataOrNullID(N->getRawName())); +  Record.push_back(VE.getMetadataOrNullID(N->getRawLinkageName())); +  Record.push_back(VE.getMetadataOrNullID(N->getFile())); +  Record.push_back(N->getLine()); +  Record.push_back(VE.getMetadataOrNullID(N->getType())); +  Record.push_back(N->isLocalToUnit()); +  Record.push_back(N->isDefinition()); +  Record.push_back(N->getScopeLine()); +  Record.push_back(VE.getMetadataOrNullID(N->getContainingType())); +  Record.push_back(N->getVirtuality()); +  Record.push_back(N->getVirtualIndex()); +  Record.push_back(N->getFlags()); +  Record.push_back(N->isOptimized()); +  Record.push_back(VE.getMetadataOrNullID(N->getRawUnit())); +  Record.push_back(VE.getMetadataOrNullID(N->getTemplateParams().get())); +  Record.push_back(VE.getMetadataOrNullID(N->getDeclaration())); +  Record.push_back(VE.getMetadataOrNullID(N->getRetainedNodes().get())); +  Record.push_back(N->getThisAdjustment()); +  Record.push_back(VE.getMetadataOrNullID(N->getThrownTypes().get())); + +  Stream.EmitRecord(bitc::METADATA_SUBPROGRAM, Record, Abbrev); +  Record.clear(); +} + +void ModuleBitcodeWriter::writeDILexicalBlock(const DILexicalBlock *N, +                                              SmallVectorImpl<uint64_t> &Record, +                                              unsigned Abbrev) { +  Record.push_back(N->isDistinct()); +  Record.push_back(VE.getMetadataOrNullID(N->getScope())); +  Record.push_back(VE.getMetadataOrNullID(N->getFile())); +  Record.push_back(N->getLine()); +  Record.push_back(N->getColumn()); + +  Stream.EmitRecord(bitc::METADATA_LEXICAL_BLOCK, Record, Abbrev); +  Record.clear(); +} + +void ModuleBitcodeWriter::writeDILexicalBlockFile( +    const DILexicalBlockFile *N, SmallVectorImpl<uint64_t> &Record, +    unsigned Abbrev) { +  Record.push_back(N->isDistinct()); +  Record.push_back(VE.getMetadataOrNullID(N->getScope())); +  Record.push_back(VE.getMetadataOrNullID(N->getFile())); +  Record.push_back(N->getDiscriminator()); + +  Stream.EmitRecord(bitc::METADATA_LEXICAL_BLOCK_FILE, Record, Abbrev); +  Record.clear(); +} + +void ModuleBitcodeWriter::writeDINamespace(const DINamespace *N, +                                           SmallVectorImpl<uint64_t> &Record, +                                           unsigned Abbrev) { +  Record.push_back(N->isDistinct() | N->getExportSymbols() << 1); +  Record.push_back(VE.getMetadataOrNullID(N->getScope())); +  Record.push_back(VE.getMetadataOrNullID(N->getRawName())); + +  Stream.EmitRecord(bitc::METADATA_NAMESPACE, Record, Abbrev); +  Record.clear(); +} + +void ModuleBitcodeWriter::writeDIMacro(const DIMacro *N, +                                       SmallVectorImpl<uint64_t> &Record, +                                       unsigned Abbrev) { +  Record.push_back(N->isDistinct()); +  Record.push_back(N->getMacinfoType()); +  Record.push_back(N->getLine()); +  Record.push_back(VE.getMetadataOrNullID(N->getRawName())); +  Record.push_back(VE.getMetadataOrNullID(N->getRawValue())); + +  Stream.EmitRecord(bitc::METADATA_MACRO, Record, Abbrev); +  Record.clear(); +} + +void ModuleBitcodeWriter::writeDIMacroFile(const DIMacroFile *N, +                                           SmallVectorImpl<uint64_t> &Record, +                                           unsigned Abbrev) { +  Record.push_back(N->isDistinct()); +  Record.push_back(N->getMacinfoType()); +  Record.push_back(N->getLine()); +  Record.push_back(VE.getMetadataOrNullID(N->getFile())); +  Record.push_back(VE.getMetadataOrNullID(N->getElements().get())); + +  Stream.EmitRecord(bitc::METADATA_MACRO_FILE, Record, Abbrev); +  Record.clear(); +} + +void ModuleBitcodeWriter::writeDIModule(const DIModule *N, +                                        SmallVectorImpl<uint64_t> &Record, +                                        unsigned Abbrev) { +  Record.push_back(N->isDistinct()); +  for (auto &I : N->operands()) +    Record.push_back(VE.getMetadataOrNullID(I)); + +  Stream.EmitRecord(bitc::METADATA_MODULE, Record, Abbrev); +  Record.clear(); +} + +void ModuleBitcodeWriter::writeDITemplateTypeParameter( +    const DITemplateTypeParameter *N, SmallVectorImpl<uint64_t> &Record, +    unsigned Abbrev) { +  Record.push_back(N->isDistinct()); +  Record.push_back(VE.getMetadataOrNullID(N->getRawName())); +  Record.push_back(VE.getMetadataOrNullID(N->getType())); + +  Stream.EmitRecord(bitc::METADATA_TEMPLATE_TYPE, Record, Abbrev); +  Record.clear(); +} + +void ModuleBitcodeWriter::writeDITemplateValueParameter( +    const DITemplateValueParameter *N, SmallVectorImpl<uint64_t> &Record, +    unsigned Abbrev) { +  Record.push_back(N->isDistinct()); +  Record.push_back(N->getTag()); +  Record.push_back(VE.getMetadataOrNullID(N->getRawName())); +  Record.push_back(VE.getMetadataOrNullID(N->getType())); +  Record.push_back(VE.getMetadataOrNullID(N->getValue())); + +  Stream.EmitRecord(bitc::METADATA_TEMPLATE_VALUE, Record, Abbrev); +  Record.clear(); +} + +void ModuleBitcodeWriter::writeDIGlobalVariable( +    const DIGlobalVariable *N, SmallVectorImpl<uint64_t> &Record, +    unsigned Abbrev) { +  const uint64_t Version = 1 << 1; +  Record.push_back((uint64_t)N->isDistinct() | Version); +  Record.push_back(VE.getMetadataOrNullID(N->getScope())); +  Record.push_back(VE.getMetadataOrNullID(N->getRawName())); +  Record.push_back(VE.getMetadataOrNullID(N->getRawLinkageName())); +  Record.push_back(VE.getMetadataOrNullID(N->getFile())); +  Record.push_back(N->getLine()); +  Record.push_back(VE.getMetadataOrNullID(N->getType())); +  Record.push_back(N->isLocalToUnit()); +  Record.push_back(N->isDefinition()); +  Record.push_back(/* expr */ 0); +  Record.push_back(VE.getMetadataOrNullID(N->getStaticDataMemberDeclaration())); +  Record.push_back(N->getAlignInBits()); + +  Stream.EmitRecord(bitc::METADATA_GLOBAL_VAR, Record, Abbrev); +  Record.clear(); +} + +void ModuleBitcodeWriter::writeDILocalVariable( +    const DILocalVariable *N, SmallVectorImpl<uint64_t> &Record, +    unsigned Abbrev) { +  // In order to support all possible bitcode formats in BitcodeReader we need +  // to distinguish the following cases: +  // 1) Record has no artificial tag (Record[1]), +  //   has no obsolete inlinedAt field (Record[9]). +  //   In this case Record size will be 8, HasAlignment flag is false. +  // 2) Record has artificial tag (Record[1]), +  //   has no obsolete inlignedAt field (Record[9]). +  //   In this case Record size will be 9, HasAlignment flag is false. +  // 3) Record has both artificial tag (Record[1]) and +  //   obsolete inlignedAt field (Record[9]). +  //   In this case Record size will be 10, HasAlignment flag is false. +  // 4) Record has neither artificial tag, nor inlignedAt field, but +  //   HasAlignment flag is true and Record[8] contains alignment value. +  const uint64_t HasAlignmentFlag = 1 << 1; +  Record.push_back((uint64_t)N->isDistinct() | HasAlignmentFlag); +  Record.push_back(VE.getMetadataOrNullID(N->getScope())); +  Record.push_back(VE.getMetadataOrNullID(N->getRawName())); +  Record.push_back(VE.getMetadataOrNullID(N->getFile())); +  Record.push_back(N->getLine()); +  Record.push_back(VE.getMetadataOrNullID(N->getType())); +  Record.push_back(N->getArg()); +  Record.push_back(N->getFlags()); +  Record.push_back(N->getAlignInBits()); + +  Stream.EmitRecord(bitc::METADATA_LOCAL_VAR, Record, Abbrev); +  Record.clear(); +} + +void ModuleBitcodeWriter::writeDILabel( +    const DILabel *N, SmallVectorImpl<uint64_t> &Record, +    unsigned Abbrev) { +  Record.push_back((uint64_t)N->isDistinct()); +  Record.push_back(VE.getMetadataOrNullID(N->getScope())); +  Record.push_back(VE.getMetadataOrNullID(N->getRawName())); +  Record.push_back(VE.getMetadataOrNullID(N->getFile())); +  Record.push_back(N->getLine()); + +  Stream.EmitRecord(bitc::METADATA_LABEL, Record, Abbrev); +  Record.clear(); +} + +void ModuleBitcodeWriter::writeDIExpression(const DIExpression *N, +                                            SmallVectorImpl<uint64_t> &Record, +                                            unsigned Abbrev) { +  Record.reserve(N->getElements().size() + 1); +  const uint64_t Version = 3 << 1; +  Record.push_back((uint64_t)N->isDistinct() | Version); +  Record.append(N->elements_begin(), N->elements_end()); + +  Stream.EmitRecord(bitc::METADATA_EXPRESSION, Record, Abbrev); +  Record.clear(); +} + +void ModuleBitcodeWriter::writeDIGlobalVariableExpression( +    const DIGlobalVariableExpression *N, SmallVectorImpl<uint64_t> &Record, +    unsigned Abbrev) { +  Record.push_back(N->isDistinct()); +  Record.push_back(VE.getMetadataOrNullID(N->getVariable())); +  Record.push_back(VE.getMetadataOrNullID(N->getExpression())); + +  Stream.EmitRecord(bitc::METADATA_GLOBAL_VAR_EXPR, Record, Abbrev); +  Record.clear(); +} + +void ModuleBitcodeWriter::writeDIObjCProperty(const DIObjCProperty *N, +                                              SmallVectorImpl<uint64_t> &Record, +                                              unsigned Abbrev) { +  Record.push_back(N->isDistinct()); +  Record.push_back(VE.getMetadataOrNullID(N->getRawName())); +  Record.push_back(VE.getMetadataOrNullID(N->getFile())); +  Record.push_back(N->getLine()); +  Record.push_back(VE.getMetadataOrNullID(N->getRawSetterName())); +  Record.push_back(VE.getMetadataOrNullID(N->getRawGetterName())); +  Record.push_back(N->getAttributes()); +  Record.push_back(VE.getMetadataOrNullID(N->getType())); + +  Stream.EmitRecord(bitc::METADATA_OBJC_PROPERTY, Record, Abbrev); +  Record.clear(); +} + +void ModuleBitcodeWriter::writeDIImportedEntity( +    const DIImportedEntity *N, SmallVectorImpl<uint64_t> &Record, +    unsigned Abbrev) { +  Record.push_back(N->isDistinct()); +  Record.push_back(N->getTag()); +  Record.push_back(VE.getMetadataOrNullID(N->getScope())); +  Record.push_back(VE.getMetadataOrNullID(N->getEntity())); +  Record.push_back(N->getLine()); +  Record.push_back(VE.getMetadataOrNullID(N->getRawName())); +  Record.push_back(VE.getMetadataOrNullID(N->getRawFile())); + +  Stream.EmitRecord(bitc::METADATA_IMPORTED_ENTITY, Record, Abbrev); +  Record.clear(); +} + +unsigned ModuleBitcodeWriter::createNamedMetadataAbbrev() { +  auto Abbv = std::make_shared<BitCodeAbbrev>(); +  Abbv->Add(BitCodeAbbrevOp(bitc::METADATA_NAME)); +  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array)); +  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 8)); +  return Stream.EmitAbbrev(std::move(Abbv)); +} + +void ModuleBitcodeWriter::writeNamedMetadata( +    SmallVectorImpl<uint64_t> &Record) { +  if (M.named_metadata_empty()) +    return; + +  unsigned Abbrev = createNamedMetadataAbbrev(); +  for (const NamedMDNode &NMD : M.named_metadata()) { +    // Write name. +    StringRef Str = NMD.getName(); +    Record.append(Str.bytes_begin(), Str.bytes_end()); +    Stream.EmitRecord(bitc::METADATA_NAME, Record, Abbrev); +    Record.clear(); + +    // Write named metadata operands. +    for (const MDNode *N : NMD.operands()) +      Record.push_back(VE.getMetadataID(N)); +    Stream.EmitRecord(bitc::METADATA_NAMED_NODE, Record, 0); +    Record.clear(); +  } +} + +unsigned ModuleBitcodeWriter::createMetadataStringsAbbrev() { +  auto Abbv = std::make_shared<BitCodeAbbrev>(); +  Abbv->Add(BitCodeAbbrevOp(bitc::METADATA_STRINGS)); +  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // # of strings +  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // offset to chars +  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); +  return Stream.EmitAbbrev(std::move(Abbv)); +} + +/// Write out a record for MDString. +/// +/// All the metadata strings in a metadata block are emitted in a single +/// record.  The sizes and strings themselves are shoved into a blob. +void ModuleBitcodeWriter::writeMetadataStrings( +    ArrayRef<const Metadata *> Strings, SmallVectorImpl<uint64_t> &Record) { +  if (Strings.empty()) +    return; + +  // Start the record with the number of strings. +  Record.push_back(bitc::METADATA_STRINGS); +  Record.push_back(Strings.size()); + +  // Emit the sizes of the strings in the blob. +  SmallString<256> Blob; +  { +    BitstreamWriter W(Blob); +    for (const Metadata *MD : Strings) +      W.EmitVBR(cast<MDString>(MD)->getLength(), 6); +    W.FlushToWord(); +  } + +  // Add the offset to the strings to the record. +  Record.push_back(Blob.size()); + +  // Add the strings to the blob. +  for (const Metadata *MD : Strings) +    Blob.append(cast<MDString>(MD)->getString()); + +  // Emit the final record. +  Stream.EmitRecordWithBlob(createMetadataStringsAbbrev(), Record, Blob); +  Record.clear(); +} + +// Generates an enum to use as an index in the Abbrev array of Metadata record. +enum MetadataAbbrev : unsigned { +#define HANDLE_MDNODE_LEAF(CLASS) CLASS##AbbrevID, +#include "llvm/IR/Metadata.def" +  LastPlusOne +}; + +void ModuleBitcodeWriter::writeMetadataRecords( +    ArrayRef<const Metadata *> MDs, SmallVectorImpl<uint64_t> &Record, +    std::vector<unsigned> *MDAbbrevs, std::vector<uint64_t> *IndexPos) { +  if (MDs.empty()) +    return; + +  // Initialize MDNode abbreviations. +#define HANDLE_MDNODE_LEAF(CLASS) unsigned CLASS##Abbrev = 0; +#include "llvm/IR/Metadata.def" + +  for (const Metadata *MD : MDs) { +    if (IndexPos) +      IndexPos->push_back(Stream.GetCurrentBitNo()); +    if (const MDNode *N = dyn_cast<MDNode>(MD)) { +      assert(N->isResolved() && "Expected forward references to be resolved"); + +      switch (N->getMetadataID()) { +      default: +        llvm_unreachable("Invalid MDNode subclass"); +#define HANDLE_MDNODE_LEAF(CLASS)                                              \ +  case Metadata::CLASS##Kind:                                                  \ +    if (MDAbbrevs)                                                             \ +      write##CLASS(cast<CLASS>(N), Record,                                     \ +                   (*MDAbbrevs)[MetadataAbbrev::CLASS##AbbrevID]);             \ +    else                                                                       \ +      write##CLASS(cast<CLASS>(N), Record, CLASS##Abbrev);                     \ +    continue; +#include "llvm/IR/Metadata.def" +      } +    } +    writeValueAsMetadata(cast<ValueAsMetadata>(MD), Record); +  } +} + +void ModuleBitcodeWriter::writeModuleMetadata() { +  if (!VE.hasMDs() && M.named_metadata_empty()) +    return; + +  Stream.EnterSubblock(bitc::METADATA_BLOCK_ID, 4); +  SmallVector<uint64_t, 64> Record; + +  // Emit all abbrevs upfront, so that the reader can jump in the middle of the +  // block and load any metadata. +  std::vector<unsigned> MDAbbrevs; + +  MDAbbrevs.resize(MetadataAbbrev::LastPlusOne); +  MDAbbrevs[MetadataAbbrev::DILocationAbbrevID] = createDILocationAbbrev(); +  MDAbbrevs[MetadataAbbrev::GenericDINodeAbbrevID] = +      createGenericDINodeAbbrev(); + +  auto Abbv = std::make_shared<BitCodeAbbrev>(); +  Abbv->Add(BitCodeAbbrevOp(bitc::METADATA_INDEX_OFFSET)); +  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32)); +  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32)); +  unsigned OffsetAbbrev = Stream.EmitAbbrev(std::move(Abbv)); + +  Abbv = std::make_shared<BitCodeAbbrev>(); +  Abbv->Add(BitCodeAbbrevOp(bitc::METADATA_INDEX)); +  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array)); +  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); +  unsigned IndexAbbrev = Stream.EmitAbbrev(std::move(Abbv)); + +  // Emit MDStrings together upfront. +  writeMetadataStrings(VE.getMDStrings(), Record); + +  // We only emit an index for the metadata record if we have more than a given +  // (naive) threshold of metadatas, otherwise it is not worth it. +  if (VE.getNonMDStrings().size() > IndexThreshold) { +    // Write a placeholder value in for the offset of the metadata index, +    // which is written after the records, so that it can include +    // the offset of each entry. The placeholder offset will be +    // updated after all records are emitted. +    uint64_t Vals[] = {0, 0}; +    Stream.EmitRecord(bitc::METADATA_INDEX_OFFSET, Vals, OffsetAbbrev); +  } + +  // Compute and save the bit offset to the current position, which will be +  // patched when we emit the index later. We can simply subtract the 64-bit +  // fixed size from the current bit number to get the location to backpatch. +  uint64_t IndexOffsetRecordBitPos = Stream.GetCurrentBitNo(); + +  // This index will contain the bitpos for each individual record. +  std::vector<uint64_t> IndexPos; +  IndexPos.reserve(VE.getNonMDStrings().size()); + +  // Write all the records +  writeMetadataRecords(VE.getNonMDStrings(), Record, &MDAbbrevs, &IndexPos); + +  if (VE.getNonMDStrings().size() > IndexThreshold) { +    // Now that we have emitted all the records we will emit the index. But +    // first +    // backpatch the forward reference so that the reader can skip the records +    // efficiently. +    Stream.BackpatchWord64(IndexOffsetRecordBitPos - 64, +                           Stream.GetCurrentBitNo() - IndexOffsetRecordBitPos); + +    // Delta encode the index. +    uint64_t PreviousValue = IndexOffsetRecordBitPos; +    for (auto &Elt : IndexPos) { +      auto EltDelta = Elt - PreviousValue; +      PreviousValue = Elt; +      Elt = EltDelta; +    } +    // Emit the index record. +    Stream.EmitRecord(bitc::METADATA_INDEX, IndexPos, IndexAbbrev); +    IndexPos.clear(); +  } + +  // Write the named metadata now. +  writeNamedMetadata(Record); + +  auto AddDeclAttachedMetadata = [&](const GlobalObject &GO) { +    SmallVector<uint64_t, 4> Record; +    Record.push_back(VE.getValueID(&GO)); +    pushGlobalMetadataAttachment(Record, GO); +    Stream.EmitRecord(bitc::METADATA_GLOBAL_DECL_ATTACHMENT, Record); +  }; +  for (const Function &F : M) +    if (F.isDeclaration() && F.hasMetadata()) +      AddDeclAttachedMetadata(F); +  // FIXME: Only store metadata for declarations here, and move data for global +  // variable definitions to a separate block (PR28134). +  for (const GlobalVariable &GV : M.globals()) +    if (GV.hasMetadata()) +      AddDeclAttachedMetadata(GV); + +  Stream.ExitBlock(); +} + +void ModuleBitcodeWriter::writeFunctionMetadata(const Function &F) { +  if (!VE.hasMDs()) +    return; + +  Stream.EnterSubblock(bitc::METADATA_BLOCK_ID, 3); +  SmallVector<uint64_t, 64> Record; +  writeMetadataStrings(VE.getMDStrings(), Record); +  writeMetadataRecords(VE.getNonMDStrings(), Record); +  Stream.ExitBlock(); +} + +void ModuleBitcodeWriter::pushGlobalMetadataAttachment( +    SmallVectorImpl<uint64_t> &Record, const GlobalObject &GO) { +  // [n x [id, mdnode]] +  SmallVector<std::pair<unsigned, MDNode *>, 4> MDs; +  GO.getAllMetadata(MDs); +  for (const auto &I : MDs) { +    Record.push_back(I.first); +    Record.push_back(VE.getMetadataID(I.second)); +  } +} + +void ModuleBitcodeWriter::writeFunctionMetadataAttachment(const Function &F) { +  Stream.EnterSubblock(bitc::METADATA_ATTACHMENT_ID, 3); + +  SmallVector<uint64_t, 64> Record; + +  if (F.hasMetadata()) { +    pushGlobalMetadataAttachment(Record, F); +    Stream.EmitRecord(bitc::METADATA_ATTACHMENT, Record, 0); +    Record.clear(); +  } + +  // Write metadata attachments +  // METADATA_ATTACHMENT - [m x [value, [n x [id, mdnode]]] +  SmallVector<std::pair<unsigned, MDNode *>, 4> MDs; +  for (const BasicBlock &BB : F) +    for (const Instruction &I : BB) { +      MDs.clear(); +      I.getAllMetadataOtherThanDebugLoc(MDs); + +      // If no metadata, ignore instruction. +      if (MDs.empty()) continue; + +      Record.push_back(VE.getInstructionID(&I)); + +      for (unsigned i = 0, e = MDs.size(); i != e; ++i) { +        Record.push_back(MDs[i].first); +        Record.push_back(VE.getMetadataID(MDs[i].second)); +      } +      Stream.EmitRecord(bitc::METADATA_ATTACHMENT, Record, 0); +      Record.clear(); +    } + +  Stream.ExitBlock(); +} + +void ModuleBitcodeWriter::writeModuleMetadataKinds() { +  SmallVector<uint64_t, 64> Record; + +  // Write metadata kinds +  // METADATA_KIND - [n x [id, name]] +  SmallVector<StringRef, 8> Names; +  M.getMDKindNames(Names); + +  if (Names.empty()) return; + +  Stream.EnterSubblock(bitc::METADATA_KIND_BLOCK_ID, 3); + +  for (unsigned MDKindID = 0, e = Names.size(); MDKindID != e; ++MDKindID) { +    Record.push_back(MDKindID); +    StringRef KName = Names[MDKindID]; +    Record.append(KName.begin(), KName.end()); + +    Stream.EmitRecord(bitc::METADATA_KIND, Record, 0); +    Record.clear(); +  } + +  Stream.ExitBlock(); +} + +void ModuleBitcodeWriter::writeOperandBundleTags() { +  // Write metadata kinds +  // +  // OPERAND_BUNDLE_TAGS_BLOCK_ID : N x OPERAND_BUNDLE_TAG +  // +  // OPERAND_BUNDLE_TAG - [strchr x N] + +  SmallVector<StringRef, 8> Tags; +  M.getOperandBundleTags(Tags); + +  if (Tags.empty()) +    return; + +  Stream.EnterSubblock(bitc::OPERAND_BUNDLE_TAGS_BLOCK_ID, 3); + +  SmallVector<uint64_t, 64> Record; + +  for (auto Tag : Tags) { +    Record.append(Tag.begin(), Tag.end()); + +    Stream.EmitRecord(bitc::OPERAND_BUNDLE_TAG, Record, 0); +    Record.clear(); +  } + +  Stream.ExitBlock(); +} + +void ModuleBitcodeWriter::writeSyncScopeNames() { +  SmallVector<StringRef, 8> SSNs; +  M.getContext().getSyncScopeNames(SSNs); +  if (SSNs.empty()) +    return; + +  Stream.EnterSubblock(bitc::SYNC_SCOPE_NAMES_BLOCK_ID, 2); + +  SmallVector<uint64_t, 64> Record; +  for (auto SSN : SSNs) { +    Record.append(SSN.begin(), SSN.end()); +    Stream.EmitRecord(bitc::SYNC_SCOPE_NAME, Record, 0); +    Record.clear(); +  } + +  Stream.ExitBlock(); +} + +static void emitSignedInt64(SmallVectorImpl<uint64_t> &Vals, uint64_t V) { +  if ((int64_t)V >= 0) +    Vals.push_back(V << 1); +  else +    Vals.push_back((-V << 1) | 1); +} + +void ModuleBitcodeWriter::writeConstants(unsigned FirstVal, unsigned LastVal, +                                         bool isGlobal) { +  if (FirstVal == LastVal) return; + +  Stream.EnterSubblock(bitc::CONSTANTS_BLOCK_ID, 4); + +  unsigned AggregateAbbrev = 0; +  unsigned String8Abbrev = 0; +  unsigned CString7Abbrev = 0; +  unsigned CString6Abbrev = 0; +  // If this is a constant pool for the module, emit module-specific abbrevs. +  if (isGlobal) { +    // Abbrev for CST_CODE_AGGREGATE. +    auto Abbv = std::make_shared<BitCodeAbbrev>(); +    Abbv->Add(BitCodeAbbrevOp(bitc::CST_CODE_AGGREGATE)); +    Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array)); +    Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, Log2_32_Ceil(LastVal+1))); +    AggregateAbbrev = Stream.EmitAbbrev(std::move(Abbv)); + +    // Abbrev for CST_CODE_STRING. +    Abbv = std::make_shared<BitCodeAbbrev>(); +    Abbv->Add(BitCodeAbbrevOp(bitc::CST_CODE_STRING)); +    Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array)); +    Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 8)); +    String8Abbrev = Stream.EmitAbbrev(std::move(Abbv)); +    // Abbrev for CST_CODE_CSTRING. +    Abbv = std::make_shared<BitCodeAbbrev>(); +    Abbv->Add(BitCodeAbbrevOp(bitc::CST_CODE_CSTRING)); +    Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array)); +    Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 7)); +    CString7Abbrev = Stream.EmitAbbrev(std::move(Abbv)); +    // Abbrev for CST_CODE_CSTRING. +    Abbv = std::make_shared<BitCodeAbbrev>(); +    Abbv->Add(BitCodeAbbrevOp(bitc::CST_CODE_CSTRING)); +    Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array)); +    Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Char6)); +    CString6Abbrev = Stream.EmitAbbrev(std::move(Abbv)); +  } + +  SmallVector<uint64_t, 64> Record; + +  const ValueEnumerator::ValueList &Vals = VE.getValues(); +  Type *LastTy = nullptr; +  for (unsigned i = FirstVal; i != LastVal; ++i) { +    const Value *V = Vals[i].first; +    // If we need to switch types, do so now. +    if (V->getType() != LastTy) { +      LastTy = V->getType(); +      Record.push_back(VE.getTypeID(LastTy)); +      Stream.EmitRecord(bitc::CST_CODE_SETTYPE, Record, +                        CONSTANTS_SETTYPE_ABBREV); +      Record.clear(); +    } + +    if (const InlineAsm *IA = dyn_cast<InlineAsm>(V)) { +      Record.push_back(unsigned(IA->hasSideEffects()) | +                       unsigned(IA->isAlignStack()) << 1 | +                       unsigned(IA->getDialect()&1) << 2); + +      // Add the asm string. +      const std::string &AsmStr = IA->getAsmString(); +      Record.push_back(AsmStr.size()); +      Record.append(AsmStr.begin(), AsmStr.end()); + +      // Add the constraint string. +      const std::string &ConstraintStr = IA->getConstraintString(); +      Record.push_back(ConstraintStr.size()); +      Record.append(ConstraintStr.begin(), ConstraintStr.end()); +      Stream.EmitRecord(bitc::CST_CODE_INLINEASM, Record); +      Record.clear(); +      continue; +    } +    const Constant *C = cast<Constant>(V); +    unsigned Code = -1U; +    unsigned AbbrevToUse = 0; +    if (C->isNullValue()) { +      Code = bitc::CST_CODE_NULL; +    } else if (isa<UndefValue>(C)) { +      Code = bitc::CST_CODE_UNDEF; +    } else if (const ConstantInt *IV = dyn_cast<ConstantInt>(C)) { +      if (IV->getBitWidth() <= 64) { +        uint64_t V = IV->getSExtValue(); +        emitSignedInt64(Record, V); +        Code = bitc::CST_CODE_INTEGER; +        AbbrevToUse = CONSTANTS_INTEGER_ABBREV; +      } else {                             // Wide integers, > 64 bits in size. +        // We have an arbitrary precision integer value to write whose +        // bit width is > 64. However, in canonical unsigned integer +        // format it is likely that the high bits are going to be zero. +        // So, we only write the number of active words. +        unsigned NWords = IV->getValue().getActiveWords(); +        const uint64_t *RawWords = IV->getValue().getRawData(); +        for (unsigned i = 0; i != NWords; ++i) { +          emitSignedInt64(Record, RawWords[i]); +        } +        Code = bitc::CST_CODE_WIDE_INTEGER; +      } +    } else if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C)) { +      Code = bitc::CST_CODE_FLOAT; +      Type *Ty = CFP->getType(); +      if (Ty->isHalfTy() || Ty->isFloatTy() || Ty->isDoubleTy()) { +        Record.push_back(CFP->getValueAPF().bitcastToAPInt().getZExtValue()); +      } else if (Ty->isX86_FP80Ty()) { +        // api needed to prevent premature destruction +        // bits are not in the same order as a normal i80 APInt, compensate. +        APInt api = CFP->getValueAPF().bitcastToAPInt(); +        const uint64_t *p = api.getRawData(); +        Record.push_back((p[1] << 48) | (p[0] >> 16)); +        Record.push_back(p[0] & 0xffffLL); +      } else if (Ty->isFP128Ty() || Ty->isPPC_FP128Ty()) { +        APInt api = CFP->getValueAPF().bitcastToAPInt(); +        const uint64_t *p = api.getRawData(); +        Record.push_back(p[0]); +        Record.push_back(p[1]); +      } else { +        assert(0 && "Unknown FP type!"); +      } +    } else if (isa<ConstantDataSequential>(C) && +               cast<ConstantDataSequential>(C)->isString()) { +      const ConstantDataSequential *Str = cast<ConstantDataSequential>(C); +      // Emit constant strings specially. +      unsigned NumElts = Str->getNumElements(); +      // If this is a null-terminated string, use the denser CSTRING encoding. +      if (Str->isCString()) { +        Code = bitc::CST_CODE_CSTRING; +        --NumElts;  // Don't encode the null, which isn't allowed by char6. +      } else { +        Code = bitc::CST_CODE_STRING; +        AbbrevToUse = String8Abbrev; +      } +      bool isCStr7 = Code == bitc::CST_CODE_CSTRING; +      bool isCStrChar6 = Code == bitc::CST_CODE_CSTRING; +      for (unsigned i = 0; i != NumElts; ++i) { +        unsigned char V = Str->getElementAsInteger(i); +        Record.push_back(V); +        isCStr7 &= (V & 128) == 0; +        if (isCStrChar6) +          isCStrChar6 = BitCodeAbbrevOp::isChar6(V); +      } + +      if (isCStrChar6) +        AbbrevToUse = CString6Abbrev; +      else if (isCStr7) +        AbbrevToUse = CString7Abbrev; +    } else if (const ConstantDataSequential *CDS = +                  dyn_cast<ConstantDataSequential>(C)) { +      Code = bitc::CST_CODE_DATA; +      Type *EltTy = CDS->getType()->getElementType(); +      if (isa<IntegerType>(EltTy)) { +        for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) +          Record.push_back(CDS->getElementAsInteger(i)); +      } else { +        for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) +          Record.push_back( +              CDS->getElementAsAPFloat(i).bitcastToAPInt().getLimitedValue()); +      } +    } else if (isa<ConstantAggregate>(C)) { +      Code = bitc::CST_CODE_AGGREGATE; +      for (const Value *Op : C->operands()) +        Record.push_back(VE.getValueID(Op)); +      AbbrevToUse = AggregateAbbrev; +    } else if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) { +      switch (CE->getOpcode()) { +      default: +        if (Instruction::isCast(CE->getOpcode())) { +          Code = bitc::CST_CODE_CE_CAST; +          Record.push_back(getEncodedCastOpcode(CE->getOpcode())); +          Record.push_back(VE.getTypeID(C->getOperand(0)->getType())); +          Record.push_back(VE.getValueID(C->getOperand(0))); +          AbbrevToUse = CONSTANTS_CE_CAST_Abbrev; +        } else { +          assert(CE->getNumOperands() == 2 && "Unknown constant expr!"); +          Code = bitc::CST_CODE_CE_BINOP; +          Record.push_back(getEncodedBinaryOpcode(CE->getOpcode())); +          Record.push_back(VE.getValueID(C->getOperand(0))); +          Record.push_back(VE.getValueID(C->getOperand(1))); +          uint64_t Flags = getOptimizationFlags(CE); +          if (Flags != 0) +            Record.push_back(Flags); +        } +        break; +      case Instruction::GetElementPtr: { +        Code = bitc::CST_CODE_CE_GEP; +        const auto *GO = cast<GEPOperator>(C); +        Record.push_back(VE.getTypeID(GO->getSourceElementType())); +        if (Optional<unsigned> Idx = GO->getInRangeIndex()) { +          Code = bitc::CST_CODE_CE_GEP_WITH_INRANGE_INDEX; +          Record.push_back((*Idx << 1) | GO->isInBounds()); +        } else if (GO->isInBounds()) +          Code = bitc::CST_CODE_CE_INBOUNDS_GEP; +        for (unsigned i = 0, e = CE->getNumOperands(); i != e; ++i) { +          Record.push_back(VE.getTypeID(C->getOperand(i)->getType())); +          Record.push_back(VE.getValueID(C->getOperand(i))); +        } +        break; +      } +      case Instruction::Select: +        Code = bitc::CST_CODE_CE_SELECT; +        Record.push_back(VE.getValueID(C->getOperand(0))); +        Record.push_back(VE.getValueID(C->getOperand(1))); +        Record.push_back(VE.getValueID(C->getOperand(2))); +        break; +      case Instruction::ExtractElement: +        Code = bitc::CST_CODE_CE_EXTRACTELT; +        Record.push_back(VE.getTypeID(C->getOperand(0)->getType())); +        Record.push_back(VE.getValueID(C->getOperand(0))); +        Record.push_back(VE.getTypeID(C->getOperand(1)->getType())); +        Record.push_back(VE.getValueID(C->getOperand(1))); +        break; +      case Instruction::InsertElement: +        Code = bitc::CST_CODE_CE_INSERTELT; +        Record.push_back(VE.getValueID(C->getOperand(0))); +        Record.push_back(VE.getValueID(C->getOperand(1))); +        Record.push_back(VE.getTypeID(C->getOperand(2)->getType())); +        Record.push_back(VE.getValueID(C->getOperand(2))); +        break; +      case Instruction::ShuffleVector: +        // If the return type and argument types are the same, this is a +        // standard shufflevector instruction.  If the types are different, +        // then the shuffle is widening or truncating the input vectors, and +        // the argument type must also be encoded. +        if (C->getType() == C->getOperand(0)->getType()) { +          Code = bitc::CST_CODE_CE_SHUFFLEVEC; +        } else { +          Code = bitc::CST_CODE_CE_SHUFVEC_EX; +          Record.push_back(VE.getTypeID(C->getOperand(0)->getType())); +        } +        Record.push_back(VE.getValueID(C->getOperand(0))); +        Record.push_back(VE.getValueID(C->getOperand(1))); +        Record.push_back(VE.getValueID(C->getOperand(2))); +        break; +      case Instruction::ICmp: +      case Instruction::FCmp: +        Code = bitc::CST_CODE_CE_CMP; +        Record.push_back(VE.getTypeID(C->getOperand(0)->getType())); +        Record.push_back(VE.getValueID(C->getOperand(0))); +        Record.push_back(VE.getValueID(C->getOperand(1))); +        Record.push_back(CE->getPredicate()); +        break; +      } +    } else if (const BlockAddress *BA = dyn_cast<BlockAddress>(C)) { +      Code = bitc::CST_CODE_BLOCKADDRESS; +      Record.push_back(VE.getTypeID(BA->getFunction()->getType())); +      Record.push_back(VE.getValueID(BA->getFunction())); +      Record.push_back(VE.getGlobalBasicBlockID(BA->getBasicBlock())); +    } else { +#ifndef NDEBUG +      C->dump(); +#endif +      llvm_unreachable("Unknown constant!"); +    } +    Stream.EmitRecord(Code, Record, AbbrevToUse); +    Record.clear(); +  } + +  Stream.ExitBlock(); +} + +void ModuleBitcodeWriter::writeModuleConstants() { +  const ValueEnumerator::ValueList &Vals = VE.getValues(); + +  // Find the first constant to emit, which is the first non-globalvalue value. +  // We know globalvalues have been emitted by WriteModuleInfo. +  for (unsigned i = 0, e = Vals.size(); i != e; ++i) { +    if (!isa<GlobalValue>(Vals[i].first)) { +      writeConstants(i, Vals.size(), true); +      return; +    } +  } +} + +/// pushValueAndType - The file has to encode both the value and type id for +/// many values, because we need to know what type to create for forward +/// references.  However, most operands are not forward references, so this type +/// field is not needed. +/// +/// This function adds V's value ID to Vals.  If the value ID is higher than the +/// instruction ID, then it is a forward reference, and it also includes the +/// type ID.  The value ID that is written is encoded relative to the InstID. +bool ModuleBitcodeWriter::pushValueAndType(const Value *V, unsigned InstID, +                                           SmallVectorImpl<unsigned> &Vals) { +  unsigned ValID = VE.getValueID(V); +  // Make encoding relative to the InstID. +  Vals.push_back(InstID - ValID); +  if (ValID >= InstID) { +    Vals.push_back(VE.getTypeID(V->getType())); +    return true; +  } +  return false; +} + +void ModuleBitcodeWriter::writeOperandBundles(ImmutableCallSite CS, +                                              unsigned InstID) { +  SmallVector<unsigned, 64> Record; +  LLVMContext &C = CS.getInstruction()->getContext(); + +  for (unsigned i = 0, e = CS.getNumOperandBundles(); i != e; ++i) { +    const auto &Bundle = CS.getOperandBundleAt(i); +    Record.push_back(C.getOperandBundleTagID(Bundle.getTagName())); + +    for (auto &Input : Bundle.Inputs) +      pushValueAndType(Input, InstID, Record); + +    Stream.EmitRecord(bitc::FUNC_CODE_OPERAND_BUNDLE, Record); +    Record.clear(); +  } +} + +/// pushValue - Like pushValueAndType, but where the type of the value is +/// omitted (perhaps it was already encoded in an earlier operand). +void ModuleBitcodeWriter::pushValue(const Value *V, unsigned InstID, +                                    SmallVectorImpl<unsigned> &Vals) { +  unsigned ValID = VE.getValueID(V); +  Vals.push_back(InstID - ValID); +} + +void ModuleBitcodeWriter::pushValueSigned(const Value *V, unsigned InstID, +                                          SmallVectorImpl<uint64_t> &Vals) { +  unsigned ValID = VE.getValueID(V); +  int64_t diff = ((int32_t)InstID - (int32_t)ValID); +  emitSignedInt64(Vals, diff); +} + +/// WriteInstruction - Emit an instruction to the specified stream. +void ModuleBitcodeWriter::writeInstruction(const Instruction &I, +                                           unsigned InstID, +                                           SmallVectorImpl<unsigned> &Vals) { +  unsigned Code = 0; +  unsigned AbbrevToUse = 0; +  VE.setInstructionID(&I); +  switch (I.getOpcode()) { +  default: +    if (Instruction::isCast(I.getOpcode())) { +      Code = bitc::FUNC_CODE_INST_CAST; +      if (!pushValueAndType(I.getOperand(0), InstID, Vals)) +        AbbrevToUse = FUNCTION_INST_CAST_ABBREV; +      Vals.push_back(VE.getTypeID(I.getType())); +      Vals.push_back(getEncodedCastOpcode(I.getOpcode())); +    } else { +      assert(isa<BinaryOperator>(I) && "Unknown instruction!"); +      Code = bitc::FUNC_CODE_INST_BINOP; +      if (!pushValueAndType(I.getOperand(0), InstID, Vals)) +        AbbrevToUse = FUNCTION_INST_BINOP_ABBREV; +      pushValue(I.getOperand(1), InstID, Vals); +      Vals.push_back(getEncodedBinaryOpcode(I.getOpcode())); +      uint64_t Flags = getOptimizationFlags(&I); +      if (Flags != 0) { +        if (AbbrevToUse == FUNCTION_INST_BINOP_ABBREV) +          AbbrevToUse = FUNCTION_INST_BINOP_FLAGS_ABBREV; +        Vals.push_back(Flags); +      } +    } +    break; + +  case Instruction::GetElementPtr: { +    Code = bitc::FUNC_CODE_INST_GEP; +    AbbrevToUse = FUNCTION_INST_GEP_ABBREV; +    auto &GEPInst = cast<GetElementPtrInst>(I); +    Vals.push_back(GEPInst.isInBounds()); +    Vals.push_back(VE.getTypeID(GEPInst.getSourceElementType())); +    for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i) +      pushValueAndType(I.getOperand(i), InstID, Vals); +    break; +  } +  case Instruction::ExtractValue: { +    Code = bitc::FUNC_CODE_INST_EXTRACTVAL; +    pushValueAndType(I.getOperand(0), InstID, Vals); +    const ExtractValueInst *EVI = cast<ExtractValueInst>(&I); +    Vals.append(EVI->idx_begin(), EVI->idx_end()); +    break; +  } +  case Instruction::InsertValue: { +    Code = bitc::FUNC_CODE_INST_INSERTVAL; +    pushValueAndType(I.getOperand(0), InstID, Vals); +    pushValueAndType(I.getOperand(1), InstID, Vals); +    const InsertValueInst *IVI = cast<InsertValueInst>(&I); +    Vals.append(IVI->idx_begin(), IVI->idx_end()); +    break; +  } +  case Instruction::Select: +    Code = bitc::FUNC_CODE_INST_VSELECT; +    pushValueAndType(I.getOperand(1), InstID, Vals); +    pushValue(I.getOperand(2), InstID, Vals); +    pushValueAndType(I.getOperand(0), InstID, Vals); +    break; +  case Instruction::ExtractElement: +    Code = bitc::FUNC_CODE_INST_EXTRACTELT; +    pushValueAndType(I.getOperand(0), InstID, Vals); +    pushValueAndType(I.getOperand(1), InstID, Vals); +    break; +  case Instruction::InsertElement: +    Code = bitc::FUNC_CODE_INST_INSERTELT; +    pushValueAndType(I.getOperand(0), InstID, Vals); +    pushValue(I.getOperand(1), InstID, Vals); +    pushValueAndType(I.getOperand(2), InstID, Vals); +    break; +  case Instruction::ShuffleVector: +    Code = bitc::FUNC_CODE_INST_SHUFFLEVEC; +    pushValueAndType(I.getOperand(0), InstID, Vals); +    pushValue(I.getOperand(1), InstID, Vals); +    pushValue(I.getOperand(2), InstID, Vals); +    break; +  case Instruction::ICmp: +  case Instruction::FCmp: { +    // compare returning Int1Ty or vector of Int1Ty +    Code = bitc::FUNC_CODE_INST_CMP2; +    pushValueAndType(I.getOperand(0), InstID, Vals); +    pushValue(I.getOperand(1), InstID, Vals); +    Vals.push_back(cast<CmpInst>(I).getPredicate()); +    uint64_t Flags = getOptimizationFlags(&I); +    if (Flags != 0) +      Vals.push_back(Flags); +    break; +  } + +  case Instruction::Ret: +    { +      Code = bitc::FUNC_CODE_INST_RET; +      unsigned NumOperands = I.getNumOperands(); +      if (NumOperands == 0) +        AbbrevToUse = FUNCTION_INST_RET_VOID_ABBREV; +      else if (NumOperands == 1) { +        if (!pushValueAndType(I.getOperand(0), InstID, Vals)) +          AbbrevToUse = FUNCTION_INST_RET_VAL_ABBREV; +      } else { +        for (unsigned i = 0, e = NumOperands; i != e; ++i) +          pushValueAndType(I.getOperand(i), InstID, Vals); +      } +    } +    break; +  case Instruction::Br: +    { +      Code = bitc::FUNC_CODE_INST_BR; +      const BranchInst &II = cast<BranchInst>(I); +      Vals.push_back(VE.getValueID(II.getSuccessor(0))); +      if (II.isConditional()) { +        Vals.push_back(VE.getValueID(II.getSuccessor(1))); +        pushValue(II.getCondition(), InstID, Vals); +      } +    } +    break; +  case Instruction::Switch: +    { +      Code = bitc::FUNC_CODE_INST_SWITCH; +      const SwitchInst &SI = cast<SwitchInst>(I); +      Vals.push_back(VE.getTypeID(SI.getCondition()->getType())); +      pushValue(SI.getCondition(), InstID, Vals); +      Vals.push_back(VE.getValueID(SI.getDefaultDest())); +      for (auto Case : SI.cases()) { +        Vals.push_back(VE.getValueID(Case.getCaseValue())); +        Vals.push_back(VE.getValueID(Case.getCaseSuccessor())); +      } +    } +    break; +  case Instruction::IndirectBr: +    Code = bitc::FUNC_CODE_INST_INDIRECTBR; +    Vals.push_back(VE.getTypeID(I.getOperand(0)->getType())); +    // Encode the address operand as relative, but not the basic blocks. +    pushValue(I.getOperand(0), InstID, Vals); +    for (unsigned i = 1, e = I.getNumOperands(); i != e; ++i) +      Vals.push_back(VE.getValueID(I.getOperand(i))); +    break; + +  case Instruction::Invoke: { +    const InvokeInst *II = cast<InvokeInst>(&I); +    const Value *Callee = II->getCalledValue(); +    FunctionType *FTy = II->getFunctionType(); + +    if (II->hasOperandBundles()) +      writeOperandBundles(II, InstID); + +    Code = bitc::FUNC_CODE_INST_INVOKE; + +    Vals.push_back(VE.getAttributeListID(II->getAttributes())); +    Vals.push_back(II->getCallingConv() | 1 << 13); +    Vals.push_back(VE.getValueID(II->getNormalDest())); +    Vals.push_back(VE.getValueID(II->getUnwindDest())); +    Vals.push_back(VE.getTypeID(FTy)); +    pushValueAndType(Callee, InstID, Vals); + +    // Emit value #'s for the fixed parameters. +    for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i) +      pushValue(I.getOperand(i), InstID, Vals); // fixed param. + +    // Emit type/value pairs for varargs params. +    if (FTy->isVarArg()) { +      for (unsigned i = FTy->getNumParams(), e = II->getNumArgOperands(); +           i != e; ++i) +        pushValueAndType(I.getOperand(i), InstID, Vals); // vararg +    } +    break; +  } +  case Instruction::Resume: +    Code = bitc::FUNC_CODE_INST_RESUME; +    pushValueAndType(I.getOperand(0), InstID, Vals); +    break; +  case Instruction::CleanupRet: { +    Code = bitc::FUNC_CODE_INST_CLEANUPRET; +    const auto &CRI = cast<CleanupReturnInst>(I); +    pushValue(CRI.getCleanupPad(), InstID, Vals); +    if (CRI.hasUnwindDest()) +      Vals.push_back(VE.getValueID(CRI.getUnwindDest())); +    break; +  } +  case Instruction::CatchRet: { +    Code = bitc::FUNC_CODE_INST_CATCHRET; +    const auto &CRI = cast<CatchReturnInst>(I); +    pushValue(CRI.getCatchPad(), InstID, Vals); +    Vals.push_back(VE.getValueID(CRI.getSuccessor())); +    break; +  } +  case Instruction::CleanupPad: +  case Instruction::CatchPad: { +    const auto &FuncletPad = cast<FuncletPadInst>(I); +    Code = isa<CatchPadInst>(FuncletPad) ? bitc::FUNC_CODE_INST_CATCHPAD +                                         : bitc::FUNC_CODE_INST_CLEANUPPAD; +    pushValue(FuncletPad.getParentPad(), InstID, Vals); + +    unsigned NumArgOperands = FuncletPad.getNumArgOperands(); +    Vals.push_back(NumArgOperands); +    for (unsigned Op = 0; Op != NumArgOperands; ++Op) +      pushValueAndType(FuncletPad.getArgOperand(Op), InstID, Vals); +    break; +  } +  case Instruction::CatchSwitch: { +    Code = bitc::FUNC_CODE_INST_CATCHSWITCH; +    const auto &CatchSwitch = cast<CatchSwitchInst>(I); + +    pushValue(CatchSwitch.getParentPad(), InstID, Vals); + +    unsigned NumHandlers = CatchSwitch.getNumHandlers(); +    Vals.push_back(NumHandlers); +    for (const BasicBlock *CatchPadBB : CatchSwitch.handlers()) +      Vals.push_back(VE.getValueID(CatchPadBB)); + +    if (CatchSwitch.hasUnwindDest()) +      Vals.push_back(VE.getValueID(CatchSwitch.getUnwindDest())); +    break; +  } +  case Instruction::Unreachable: +    Code = bitc::FUNC_CODE_INST_UNREACHABLE; +    AbbrevToUse = FUNCTION_INST_UNREACHABLE_ABBREV; +    break; + +  case Instruction::PHI: { +    const PHINode &PN = cast<PHINode>(I); +    Code = bitc::FUNC_CODE_INST_PHI; +    // With the newer instruction encoding, forward references could give +    // negative valued IDs.  This is most common for PHIs, so we use +    // signed VBRs. +    SmallVector<uint64_t, 128> Vals64; +    Vals64.push_back(VE.getTypeID(PN.getType())); +    for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i) { +      pushValueSigned(PN.getIncomingValue(i), InstID, Vals64); +      Vals64.push_back(VE.getValueID(PN.getIncomingBlock(i))); +    } +    // Emit a Vals64 vector and exit. +    Stream.EmitRecord(Code, Vals64, AbbrevToUse); +    Vals64.clear(); +    return; +  } + +  case Instruction::LandingPad: { +    const LandingPadInst &LP = cast<LandingPadInst>(I); +    Code = bitc::FUNC_CODE_INST_LANDINGPAD; +    Vals.push_back(VE.getTypeID(LP.getType())); +    Vals.push_back(LP.isCleanup()); +    Vals.push_back(LP.getNumClauses()); +    for (unsigned I = 0, E = LP.getNumClauses(); I != E; ++I) { +      if (LP.isCatch(I)) +        Vals.push_back(LandingPadInst::Catch); +      else +        Vals.push_back(LandingPadInst::Filter); +      pushValueAndType(LP.getClause(I), InstID, Vals); +    } +    break; +  } + +  case Instruction::Alloca: { +    Code = bitc::FUNC_CODE_INST_ALLOCA; +    const AllocaInst &AI = cast<AllocaInst>(I); +    Vals.push_back(VE.getTypeID(AI.getAllocatedType())); +    Vals.push_back(VE.getTypeID(I.getOperand(0)->getType())); +    Vals.push_back(VE.getValueID(I.getOperand(0))); // size. +    unsigned AlignRecord = Log2_32(AI.getAlignment()) + 1; +    assert(Log2_32(Value::MaximumAlignment) + 1 < 1 << 5 && +           "not enough bits for maximum alignment"); +    assert(AlignRecord < 1 << 5 && "alignment greater than 1 << 64"); +    AlignRecord |= AI.isUsedWithInAlloca() << 5; +    AlignRecord |= 1 << 6; +    AlignRecord |= AI.isSwiftError() << 7; +    Vals.push_back(AlignRecord); +    break; +  } + +  case Instruction::Load: +    if (cast<LoadInst>(I).isAtomic()) { +      Code = bitc::FUNC_CODE_INST_LOADATOMIC; +      pushValueAndType(I.getOperand(0), InstID, Vals); +    } else { +      Code = bitc::FUNC_CODE_INST_LOAD; +      if (!pushValueAndType(I.getOperand(0), InstID, Vals)) // ptr +        AbbrevToUse = FUNCTION_INST_LOAD_ABBREV; +    } +    Vals.push_back(VE.getTypeID(I.getType())); +    Vals.push_back(Log2_32(cast<LoadInst>(I).getAlignment())+1); +    Vals.push_back(cast<LoadInst>(I).isVolatile()); +    if (cast<LoadInst>(I).isAtomic()) { +      Vals.push_back(getEncodedOrdering(cast<LoadInst>(I).getOrdering())); +      Vals.push_back(getEncodedSyncScopeID(cast<LoadInst>(I).getSyncScopeID())); +    } +    break; +  case Instruction::Store: +    if (cast<StoreInst>(I).isAtomic()) +      Code = bitc::FUNC_CODE_INST_STOREATOMIC; +    else +      Code = bitc::FUNC_CODE_INST_STORE; +    pushValueAndType(I.getOperand(1), InstID, Vals); // ptrty + ptr +    pushValueAndType(I.getOperand(0), InstID, Vals); // valty + val +    Vals.push_back(Log2_32(cast<StoreInst>(I).getAlignment())+1); +    Vals.push_back(cast<StoreInst>(I).isVolatile()); +    if (cast<StoreInst>(I).isAtomic()) { +      Vals.push_back(getEncodedOrdering(cast<StoreInst>(I).getOrdering())); +      Vals.push_back( +          getEncodedSyncScopeID(cast<StoreInst>(I).getSyncScopeID())); +    } +    break; +  case Instruction::AtomicCmpXchg: +    Code = bitc::FUNC_CODE_INST_CMPXCHG; +    pushValueAndType(I.getOperand(0), InstID, Vals); // ptrty + ptr +    pushValueAndType(I.getOperand(1), InstID, Vals); // cmp. +    pushValue(I.getOperand(2), InstID, Vals);        // newval. +    Vals.push_back(cast<AtomicCmpXchgInst>(I).isVolatile()); +    Vals.push_back( +        getEncodedOrdering(cast<AtomicCmpXchgInst>(I).getSuccessOrdering())); +    Vals.push_back( +        getEncodedSyncScopeID(cast<AtomicCmpXchgInst>(I).getSyncScopeID())); +    Vals.push_back( +        getEncodedOrdering(cast<AtomicCmpXchgInst>(I).getFailureOrdering())); +    Vals.push_back(cast<AtomicCmpXchgInst>(I).isWeak()); +    break; +  case Instruction::AtomicRMW: +    Code = bitc::FUNC_CODE_INST_ATOMICRMW; +    pushValueAndType(I.getOperand(0), InstID, Vals); // ptrty + ptr +    pushValue(I.getOperand(1), InstID, Vals);        // val. +    Vals.push_back( +        getEncodedRMWOperation(cast<AtomicRMWInst>(I).getOperation())); +    Vals.push_back(cast<AtomicRMWInst>(I).isVolatile()); +    Vals.push_back(getEncodedOrdering(cast<AtomicRMWInst>(I).getOrdering())); +    Vals.push_back( +        getEncodedSyncScopeID(cast<AtomicRMWInst>(I).getSyncScopeID())); +    break; +  case Instruction::Fence: +    Code = bitc::FUNC_CODE_INST_FENCE; +    Vals.push_back(getEncodedOrdering(cast<FenceInst>(I).getOrdering())); +    Vals.push_back(getEncodedSyncScopeID(cast<FenceInst>(I).getSyncScopeID())); +    break; +  case Instruction::Call: { +    const CallInst &CI = cast<CallInst>(I); +    FunctionType *FTy = CI.getFunctionType(); + +    if (CI.hasOperandBundles()) +      writeOperandBundles(&CI, InstID); + +    Code = bitc::FUNC_CODE_INST_CALL; + +    Vals.push_back(VE.getAttributeListID(CI.getAttributes())); + +    unsigned Flags = getOptimizationFlags(&I); +    Vals.push_back(CI.getCallingConv() << bitc::CALL_CCONV | +                   unsigned(CI.isTailCall()) << bitc::CALL_TAIL | +                   unsigned(CI.isMustTailCall()) << bitc::CALL_MUSTTAIL | +                   1 << bitc::CALL_EXPLICIT_TYPE | +                   unsigned(CI.isNoTailCall()) << bitc::CALL_NOTAIL | +                   unsigned(Flags != 0) << bitc::CALL_FMF); +    if (Flags != 0) +      Vals.push_back(Flags); + +    Vals.push_back(VE.getTypeID(FTy)); +    pushValueAndType(CI.getCalledValue(), InstID, Vals); // Callee + +    // Emit value #'s for the fixed parameters. +    for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i) { +      // Check for labels (can happen with asm labels). +      if (FTy->getParamType(i)->isLabelTy()) +        Vals.push_back(VE.getValueID(CI.getArgOperand(i))); +      else +        pushValue(CI.getArgOperand(i), InstID, Vals); // fixed param. +    } + +    // Emit type/value pairs for varargs params. +    if (FTy->isVarArg()) { +      for (unsigned i = FTy->getNumParams(), e = CI.getNumArgOperands(); +           i != e; ++i) +        pushValueAndType(CI.getArgOperand(i), InstID, Vals); // varargs +    } +    break; +  } +  case Instruction::VAArg: +    Code = bitc::FUNC_CODE_INST_VAARG; +    Vals.push_back(VE.getTypeID(I.getOperand(0)->getType()));   // valistty +    pushValue(I.getOperand(0), InstID, Vals);                   // valist. +    Vals.push_back(VE.getTypeID(I.getType())); // restype. +    break; +  } + +  Stream.EmitRecord(Code, Vals, AbbrevToUse); +  Vals.clear(); +} + +/// Write a GlobalValue VST to the module. The purpose of this data structure is +/// to allow clients to efficiently find the function body. +void ModuleBitcodeWriter::writeGlobalValueSymbolTable( +  DenseMap<const Function *, uint64_t> &FunctionToBitcodeIndex) { +  // Get the offset of the VST we are writing, and backpatch it into +  // the VST forward declaration record. +  uint64_t VSTOffset = Stream.GetCurrentBitNo(); +  // The BitcodeStartBit was the stream offset of the identification block. +  VSTOffset -= bitcodeStartBit(); +  assert((VSTOffset & 31) == 0 && "VST block not 32-bit aligned"); +  // Note that we add 1 here because the offset is relative to one word +  // before the start of the identification block, which was historically +  // always the start of the regular bitcode header. +  Stream.BackpatchWord(VSTOffsetPlaceholder, VSTOffset / 32 + 1); + +  Stream.EnterSubblock(bitc::VALUE_SYMTAB_BLOCK_ID, 4); + +  auto Abbv = std::make_shared<BitCodeAbbrev>(); +  Abbv->Add(BitCodeAbbrevOp(bitc::VST_CODE_FNENTRY)); +  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); // value id +  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); // funcoffset +  unsigned FnEntryAbbrev = Stream.EmitAbbrev(std::move(Abbv)); + +  for (const Function &F : M) { +    uint64_t Record[2]; + +    if (F.isDeclaration()) +      continue; + +    Record[0] = VE.getValueID(&F); + +    // Save the word offset of the function (from the start of the +    // actual bitcode written to the stream). +    uint64_t BitcodeIndex = FunctionToBitcodeIndex[&F] - bitcodeStartBit(); +    assert((BitcodeIndex & 31) == 0 && "function block not 32-bit aligned"); +    // Note that we add 1 here because the offset is relative to one word +    // before the start of the identification block, which was historically +    // always the start of the regular bitcode header. +    Record[1] = BitcodeIndex / 32 + 1; + +    Stream.EmitRecord(bitc::VST_CODE_FNENTRY, Record, FnEntryAbbrev); +  } + +  Stream.ExitBlock(); +} + +/// Emit names for arguments, instructions and basic blocks in a function. +void ModuleBitcodeWriter::writeFunctionLevelValueSymbolTable( +    const ValueSymbolTable &VST) { +  if (VST.empty()) +    return; + +  Stream.EnterSubblock(bitc::VALUE_SYMTAB_BLOCK_ID, 4); + +  // FIXME: Set up the abbrev, we know how many values there are! +  // FIXME: We know if the type names can use 7-bit ascii. +  SmallVector<uint64_t, 64> NameVals; + +  for (const ValueName &Name : VST) { +    // Figure out the encoding to use for the name. +    StringEncoding Bits = getStringEncoding(Name.getKey()); + +    unsigned AbbrevToUse = VST_ENTRY_8_ABBREV; +    NameVals.push_back(VE.getValueID(Name.getValue())); + +    // VST_CODE_ENTRY:   [valueid, namechar x N] +    // VST_CODE_BBENTRY: [bbid, namechar x N] +    unsigned Code; +    if (isa<BasicBlock>(Name.getValue())) { +      Code = bitc::VST_CODE_BBENTRY; +      if (Bits == SE_Char6) +        AbbrevToUse = VST_BBENTRY_6_ABBREV; +    } else { +      Code = bitc::VST_CODE_ENTRY; +      if (Bits == SE_Char6) +        AbbrevToUse = VST_ENTRY_6_ABBREV; +      else if (Bits == SE_Fixed7) +        AbbrevToUse = VST_ENTRY_7_ABBREV; +    } + +    for (const auto P : Name.getKey()) +      NameVals.push_back((unsigned char)P); + +    // Emit the finished record. +    Stream.EmitRecord(Code, NameVals, AbbrevToUse); +    NameVals.clear(); +  } + +  Stream.ExitBlock(); +} + +void ModuleBitcodeWriter::writeUseList(UseListOrder &&Order) { +  assert(Order.Shuffle.size() >= 2 && "Shuffle too small"); +  unsigned Code; +  if (isa<BasicBlock>(Order.V)) +    Code = bitc::USELIST_CODE_BB; +  else +    Code = bitc::USELIST_CODE_DEFAULT; + +  SmallVector<uint64_t, 64> Record(Order.Shuffle.begin(), Order.Shuffle.end()); +  Record.push_back(VE.getValueID(Order.V)); +  Stream.EmitRecord(Code, Record); +} + +void ModuleBitcodeWriter::writeUseListBlock(const Function *F) { +  assert(VE.shouldPreserveUseListOrder() && +         "Expected to be preserving use-list order"); + +  auto hasMore = [&]() { +    return !VE.UseListOrders.empty() && VE.UseListOrders.back().F == F; +  }; +  if (!hasMore()) +    // Nothing to do. +    return; + +  Stream.EnterSubblock(bitc::USELIST_BLOCK_ID, 3); +  while (hasMore()) { +    writeUseList(std::move(VE.UseListOrders.back())); +    VE.UseListOrders.pop_back(); +  } +  Stream.ExitBlock(); +} + +/// Emit a function body to the module stream. +void ModuleBitcodeWriter::writeFunction( +    const Function &F, +    DenseMap<const Function *, uint64_t> &FunctionToBitcodeIndex) { +  // Save the bitcode index of the start of this function block for recording +  // in the VST. +  FunctionToBitcodeIndex[&F] = Stream.GetCurrentBitNo(); + +  Stream.EnterSubblock(bitc::FUNCTION_BLOCK_ID, 4); +  VE.incorporateFunction(F); + +  SmallVector<unsigned, 64> Vals; + +  // Emit the number of basic blocks, so the reader can create them ahead of +  // time. +  Vals.push_back(VE.getBasicBlocks().size()); +  Stream.EmitRecord(bitc::FUNC_CODE_DECLAREBLOCKS, Vals); +  Vals.clear(); + +  // If there are function-local constants, emit them now. +  unsigned CstStart, CstEnd; +  VE.getFunctionConstantRange(CstStart, CstEnd); +  writeConstants(CstStart, CstEnd, false); + +  // If there is function-local metadata, emit it now. +  writeFunctionMetadata(F); + +  // Keep a running idea of what the instruction ID is. +  unsigned InstID = CstEnd; + +  bool NeedsMetadataAttachment = F.hasMetadata(); + +  DILocation *LastDL = nullptr; +  // Finally, emit all the instructions, in order. +  for (Function::const_iterator BB = F.begin(), E = F.end(); BB != E; ++BB) +    for (BasicBlock::const_iterator I = BB->begin(), E = BB->end(); +         I != E; ++I) { +      writeInstruction(*I, InstID, Vals); + +      if (!I->getType()->isVoidTy()) +        ++InstID; + +      // If the instruction has metadata, write a metadata attachment later. +      NeedsMetadataAttachment |= I->hasMetadataOtherThanDebugLoc(); + +      // If the instruction has a debug location, emit it. +      DILocation *DL = I->getDebugLoc(); +      if (!DL) +        continue; + +      if (DL == LastDL) { +        // Just repeat the same debug loc as last time. +        Stream.EmitRecord(bitc::FUNC_CODE_DEBUG_LOC_AGAIN, Vals); +        continue; +      } + +      Vals.push_back(DL->getLine()); +      Vals.push_back(DL->getColumn()); +      Vals.push_back(VE.getMetadataOrNullID(DL->getScope())); +      Vals.push_back(VE.getMetadataOrNullID(DL->getInlinedAt())); +      Stream.EmitRecord(bitc::FUNC_CODE_DEBUG_LOC, Vals); +      Vals.clear(); + +      LastDL = DL; +    } + +  // Emit names for all the instructions etc. +  if (auto *Symtab = F.getValueSymbolTable()) +    writeFunctionLevelValueSymbolTable(*Symtab); + +  if (NeedsMetadataAttachment) +    writeFunctionMetadataAttachment(F); +  if (VE.shouldPreserveUseListOrder()) +    writeUseListBlock(&F); +  VE.purgeFunction(); +  Stream.ExitBlock(); +} + +// Emit blockinfo, which defines the standard abbreviations etc. +void ModuleBitcodeWriter::writeBlockInfo() { +  // We only want to emit block info records for blocks that have multiple +  // instances: CONSTANTS_BLOCK, FUNCTION_BLOCK and VALUE_SYMTAB_BLOCK. +  // Other blocks can define their abbrevs inline. +  Stream.EnterBlockInfoBlock(); + +  { // 8-bit fixed-width VST_CODE_ENTRY/VST_CODE_BBENTRY strings. +    auto Abbv = std::make_shared<BitCodeAbbrev>(); +    Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 3)); +    Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); +    Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array)); +    Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 8)); +    if (Stream.EmitBlockInfoAbbrev(bitc::VALUE_SYMTAB_BLOCK_ID, Abbv) != +        VST_ENTRY_8_ABBREV) +      llvm_unreachable("Unexpected abbrev ordering!"); +  } + +  { // 7-bit fixed width VST_CODE_ENTRY strings. +    auto Abbv = std::make_shared<BitCodeAbbrev>(); +    Abbv->Add(BitCodeAbbrevOp(bitc::VST_CODE_ENTRY)); +    Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); +    Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array)); +    Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 7)); +    if (Stream.EmitBlockInfoAbbrev(bitc::VALUE_SYMTAB_BLOCK_ID, Abbv) != +        VST_ENTRY_7_ABBREV) +      llvm_unreachable("Unexpected abbrev ordering!"); +  } +  { // 6-bit char6 VST_CODE_ENTRY strings. +    auto Abbv = std::make_shared<BitCodeAbbrev>(); +    Abbv->Add(BitCodeAbbrevOp(bitc::VST_CODE_ENTRY)); +    Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); +    Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array)); +    Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Char6)); +    if (Stream.EmitBlockInfoAbbrev(bitc::VALUE_SYMTAB_BLOCK_ID, Abbv) != +        VST_ENTRY_6_ABBREV) +      llvm_unreachable("Unexpected abbrev ordering!"); +  } +  { // 6-bit char6 VST_CODE_BBENTRY strings. +    auto Abbv = std::make_shared<BitCodeAbbrev>(); +    Abbv->Add(BitCodeAbbrevOp(bitc::VST_CODE_BBENTRY)); +    Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); +    Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array)); +    Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Char6)); +    if (Stream.EmitBlockInfoAbbrev(bitc::VALUE_SYMTAB_BLOCK_ID, Abbv) != +        VST_BBENTRY_6_ABBREV) +      llvm_unreachable("Unexpected abbrev ordering!"); +  } + +  { // SETTYPE abbrev for CONSTANTS_BLOCK. +    auto Abbv = std::make_shared<BitCodeAbbrev>(); +    Abbv->Add(BitCodeAbbrevOp(bitc::CST_CODE_SETTYPE)); +    Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, +                              VE.computeBitsRequiredForTypeIndicies())); +    if (Stream.EmitBlockInfoAbbrev(bitc::CONSTANTS_BLOCK_ID, Abbv) != +        CONSTANTS_SETTYPE_ABBREV) +      llvm_unreachable("Unexpected abbrev ordering!"); +  } + +  { // INTEGER abbrev for CONSTANTS_BLOCK. +    auto Abbv = std::make_shared<BitCodeAbbrev>(); +    Abbv->Add(BitCodeAbbrevOp(bitc::CST_CODE_INTEGER)); +    Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); +    if (Stream.EmitBlockInfoAbbrev(bitc::CONSTANTS_BLOCK_ID, Abbv) != +        CONSTANTS_INTEGER_ABBREV) +      llvm_unreachable("Unexpected abbrev ordering!"); +  } + +  { // CE_CAST abbrev for CONSTANTS_BLOCK. +    auto Abbv = std::make_shared<BitCodeAbbrev>(); +    Abbv->Add(BitCodeAbbrevOp(bitc::CST_CODE_CE_CAST)); +    Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 4));  // cast opc +    Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed,       // typeid +                              VE.computeBitsRequiredForTypeIndicies())); +    Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8));    // value id + +    if (Stream.EmitBlockInfoAbbrev(bitc::CONSTANTS_BLOCK_ID, Abbv) != +        CONSTANTS_CE_CAST_Abbrev) +      llvm_unreachable("Unexpected abbrev ordering!"); +  } +  { // NULL abbrev for CONSTANTS_BLOCK. +    auto Abbv = std::make_shared<BitCodeAbbrev>(); +    Abbv->Add(BitCodeAbbrevOp(bitc::CST_CODE_NULL)); +    if (Stream.EmitBlockInfoAbbrev(bitc::CONSTANTS_BLOCK_ID, Abbv) != +        CONSTANTS_NULL_Abbrev) +      llvm_unreachable("Unexpected abbrev ordering!"); +  } + +  // FIXME: This should only use space for first class types! + +  { // INST_LOAD abbrev for FUNCTION_BLOCK. +    auto Abbv = std::make_shared<BitCodeAbbrev>(); +    Abbv->Add(BitCodeAbbrevOp(bitc::FUNC_CODE_INST_LOAD)); +    Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Ptr +    Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed,    // dest ty +                              VE.computeBitsRequiredForTypeIndicies())); +    Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 4)); // Align +    Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // volatile +    if (Stream.EmitBlockInfoAbbrev(bitc::FUNCTION_BLOCK_ID, Abbv) != +        FUNCTION_INST_LOAD_ABBREV) +      llvm_unreachable("Unexpected abbrev ordering!"); +  } +  { // INST_BINOP abbrev for FUNCTION_BLOCK. +    auto Abbv = std::make_shared<BitCodeAbbrev>(); +    Abbv->Add(BitCodeAbbrevOp(bitc::FUNC_CODE_INST_BINOP)); +    Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // LHS +    Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // RHS +    Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 4)); // opc +    if (Stream.EmitBlockInfoAbbrev(bitc::FUNCTION_BLOCK_ID, Abbv) != +        FUNCTION_INST_BINOP_ABBREV) +      llvm_unreachable("Unexpected abbrev ordering!"); +  } +  { // INST_BINOP_FLAGS abbrev for FUNCTION_BLOCK. +    auto Abbv = std::make_shared<BitCodeAbbrev>(); +    Abbv->Add(BitCodeAbbrevOp(bitc::FUNC_CODE_INST_BINOP)); +    Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // LHS +    Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // RHS +    Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 4)); // opc +    Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 8)); // flags +    if (Stream.EmitBlockInfoAbbrev(bitc::FUNCTION_BLOCK_ID, Abbv) != +        FUNCTION_INST_BINOP_FLAGS_ABBREV) +      llvm_unreachable("Unexpected abbrev ordering!"); +  } +  { // INST_CAST abbrev for FUNCTION_BLOCK. +    auto Abbv = std::make_shared<BitCodeAbbrev>(); +    Abbv->Add(BitCodeAbbrevOp(bitc::FUNC_CODE_INST_CAST)); +    Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6));    // OpVal +    Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed,       // dest ty +                              VE.computeBitsRequiredForTypeIndicies())); +    Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 4));  // opc +    if (Stream.EmitBlockInfoAbbrev(bitc::FUNCTION_BLOCK_ID, Abbv) != +        FUNCTION_INST_CAST_ABBREV) +      llvm_unreachable("Unexpected abbrev ordering!"); +  } + +  { // INST_RET abbrev for FUNCTION_BLOCK. +    auto Abbv = std::make_shared<BitCodeAbbrev>(); +    Abbv->Add(BitCodeAbbrevOp(bitc::FUNC_CODE_INST_RET)); +    if (Stream.EmitBlockInfoAbbrev(bitc::FUNCTION_BLOCK_ID, Abbv) != +        FUNCTION_INST_RET_VOID_ABBREV) +      llvm_unreachable("Unexpected abbrev ordering!"); +  } +  { // INST_RET abbrev for FUNCTION_BLOCK. +    auto Abbv = std::make_shared<BitCodeAbbrev>(); +    Abbv->Add(BitCodeAbbrevOp(bitc::FUNC_CODE_INST_RET)); +    Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // ValID +    if (Stream.EmitBlockInfoAbbrev(bitc::FUNCTION_BLOCK_ID, Abbv) != +        FUNCTION_INST_RET_VAL_ABBREV) +      llvm_unreachable("Unexpected abbrev ordering!"); +  } +  { // INST_UNREACHABLE abbrev for FUNCTION_BLOCK. +    auto Abbv = std::make_shared<BitCodeAbbrev>(); +    Abbv->Add(BitCodeAbbrevOp(bitc::FUNC_CODE_INST_UNREACHABLE)); +    if (Stream.EmitBlockInfoAbbrev(bitc::FUNCTION_BLOCK_ID, Abbv) != +        FUNCTION_INST_UNREACHABLE_ABBREV) +      llvm_unreachable("Unexpected abbrev ordering!"); +  } +  { +    auto Abbv = std::make_shared<BitCodeAbbrev>(); +    Abbv->Add(BitCodeAbbrevOp(bitc::FUNC_CODE_INST_GEP)); +    Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); +    Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, // dest ty +                              Log2_32_Ceil(VE.getTypes().size() + 1))); +    Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array)); +    Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); +    if (Stream.EmitBlockInfoAbbrev(bitc::FUNCTION_BLOCK_ID, Abbv) != +        FUNCTION_INST_GEP_ABBREV) +      llvm_unreachable("Unexpected abbrev ordering!"); +  } + +  Stream.ExitBlock(); +} + +/// Write the module path strings, currently only used when generating +/// a combined index file. +void IndexBitcodeWriter::writeModStrings() { +  Stream.EnterSubblock(bitc::MODULE_STRTAB_BLOCK_ID, 3); + +  // TODO: See which abbrev sizes we actually need to emit + +  // 8-bit fixed-width MST_ENTRY strings. +  auto Abbv = std::make_shared<BitCodeAbbrev>(); +  Abbv->Add(BitCodeAbbrevOp(bitc::MST_CODE_ENTRY)); +  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); +  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array)); +  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 8)); +  unsigned Abbrev8Bit = Stream.EmitAbbrev(std::move(Abbv)); + +  // 7-bit fixed width MST_ENTRY strings. +  Abbv = std::make_shared<BitCodeAbbrev>(); +  Abbv->Add(BitCodeAbbrevOp(bitc::MST_CODE_ENTRY)); +  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); +  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array)); +  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 7)); +  unsigned Abbrev7Bit = Stream.EmitAbbrev(std::move(Abbv)); + +  // 6-bit char6 MST_ENTRY strings. +  Abbv = std::make_shared<BitCodeAbbrev>(); +  Abbv->Add(BitCodeAbbrevOp(bitc::MST_CODE_ENTRY)); +  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); +  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array)); +  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Char6)); +  unsigned Abbrev6Bit = Stream.EmitAbbrev(std::move(Abbv)); + +  // Module Hash, 160 bits SHA1. Optionally, emitted after each MST_CODE_ENTRY. +  Abbv = std::make_shared<BitCodeAbbrev>(); +  Abbv->Add(BitCodeAbbrevOp(bitc::MST_CODE_HASH)); +  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32)); +  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32)); +  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32)); +  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32)); +  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32)); +  unsigned AbbrevHash = Stream.EmitAbbrev(std::move(Abbv)); + +  SmallVector<unsigned, 64> Vals; +  forEachModule( +      [&](const StringMapEntry<std::pair<uint64_t, ModuleHash>> &MPSE) { +        StringRef Key = MPSE.getKey(); +        const auto &Value = MPSE.getValue(); +        StringEncoding Bits = getStringEncoding(Key); +        unsigned AbbrevToUse = Abbrev8Bit; +        if (Bits == SE_Char6) +          AbbrevToUse = Abbrev6Bit; +        else if (Bits == SE_Fixed7) +          AbbrevToUse = Abbrev7Bit; + +        Vals.push_back(Value.first); +        Vals.append(Key.begin(), Key.end()); + +        // Emit the finished record. +        Stream.EmitRecord(bitc::MST_CODE_ENTRY, Vals, AbbrevToUse); + +        // Emit an optional hash for the module now +        const auto &Hash = Value.second; +        if (llvm::any_of(Hash, [](uint32_t H) { return H; })) { +          Vals.assign(Hash.begin(), Hash.end()); +          // Emit the hash record. +          Stream.EmitRecord(bitc::MST_CODE_HASH, Vals, AbbrevHash); +        } + +        Vals.clear(); +      }); +  Stream.ExitBlock(); +} + +/// Write the function type metadata related records that need to appear before +/// a function summary entry (whether per-module or combined). +static void writeFunctionTypeMetadataRecords( +    BitstreamWriter &Stream, FunctionSummary *FS, +    std::set<GlobalValue::GUID> &ReferencedTypeIds) { +  if (!FS->type_tests().empty()) { +    Stream.EmitRecord(bitc::FS_TYPE_TESTS, FS->type_tests()); +    for (auto &TT : FS->type_tests()) +      ReferencedTypeIds.insert(TT); +  } + +  SmallVector<uint64_t, 64> Record; + +  auto WriteVFuncIdVec = [&](uint64_t Ty, +                             ArrayRef<FunctionSummary::VFuncId> VFs) { +    if (VFs.empty()) +      return; +    Record.clear(); +    for (auto &VF : VFs) { +      Record.push_back(VF.GUID); +      Record.push_back(VF.Offset); +      ReferencedTypeIds.insert(VF.GUID); +    } +    Stream.EmitRecord(Ty, Record); +  }; + +  WriteVFuncIdVec(bitc::FS_TYPE_TEST_ASSUME_VCALLS, +                  FS->type_test_assume_vcalls()); +  WriteVFuncIdVec(bitc::FS_TYPE_CHECKED_LOAD_VCALLS, +                  FS->type_checked_load_vcalls()); + +  auto WriteConstVCallVec = [&](uint64_t Ty, +                                ArrayRef<FunctionSummary::ConstVCall> VCs) { +    for (auto &VC : VCs) { +      Record.clear(); +      Record.push_back(VC.VFunc.GUID); +      ReferencedTypeIds.insert(VC.VFunc.GUID); +      Record.push_back(VC.VFunc.Offset); +      Record.insert(Record.end(), VC.Args.begin(), VC.Args.end()); +      Stream.EmitRecord(Ty, Record); +    } +  }; + +  WriteConstVCallVec(bitc::FS_TYPE_TEST_ASSUME_CONST_VCALL, +                     FS->type_test_assume_const_vcalls()); +  WriteConstVCallVec(bitc::FS_TYPE_CHECKED_LOAD_CONST_VCALL, +                     FS->type_checked_load_const_vcalls()); +} + +static void writeWholeProgramDevirtResolutionByArg( +    SmallVector<uint64_t, 64> &NameVals, const std::vector<uint64_t> &args, +    const WholeProgramDevirtResolution::ByArg &ByArg) { +  NameVals.push_back(args.size()); +  NameVals.insert(NameVals.end(), args.begin(), args.end()); + +  NameVals.push_back(ByArg.TheKind); +  NameVals.push_back(ByArg.Info); +  NameVals.push_back(ByArg.Byte); +  NameVals.push_back(ByArg.Bit); +} + +static void writeWholeProgramDevirtResolution( +    SmallVector<uint64_t, 64> &NameVals, StringTableBuilder &StrtabBuilder, +    uint64_t Id, const WholeProgramDevirtResolution &Wpd) { +  NameVals.push_back(Id); + +  NameVals.push_back(Wpd.TheKind); +  NameVals.push_back(StrtabBuilder.add(Wpd.SingleImplName)); +  NameVals.push_back(Wpd.SingleImplName.size()); + +  NameVals.push_back(Wpd.ResByArg.size()); +  for (auto &A : Wpd.ResByArg) +    writeWholeProgramDevirtResolutionByArg(NameVals, A.first, A.second); +} + +static void writeTypeIdSummaryRecord(SmallVector<uint64_t, 64> &NameVals, +                                     StringTableBuilder &StrtabBuilder, +                                     const std::string &Id, +                                     const TypeIdSummary &Summary) { +  NameVals.push_back(StrtabBuilder.add(Id)); +  NameVals.push_back(Id.size()); + +  NameVals.push_back(Summary.TTRes.TheKind); +  NameVals.push_back(Summary.TTRes.SizeM1BitWidth); +  NameVals.push_back(Summary.TTRes.AlignLog2); +  NameVals.push_back(Summary.TTRes.SizeM1); +  NameVals.push_back(Summary.TTRes.BitMask); +  NameVals.push_back(Summary.TTRes.InlineBits); + +  for (auto &W : Summary.WPDRes) +    writeWholeProgramDevirtResolution(NameVals, StrtabBuilder, W.first, +                                      W.second); +} + +// Helper to emit a single function summary record. +void ModuleBitcodeWriterBase::writePerModuleFunctionSummaryRecord( +    SmallVector<uint64_t, 64> &NameVals, GlobalValueSummary *Summary, +    unsigned ValueID, unsigned FSCallsAbbrev, unsigned FSCallsProfileAbbrev, +    const Function &F) { +  NameVals.push_back(ValueID); + +  FunctionSummary *FS = cast<FunctionSummary>(Summary); +  std::set<GlobalValue::GUID> ReferencedTypeIds; +  writeFunctionTypeMetadataRecords(Stream, FS, ReferencedTypeIds); + +  NameVals.push_back(getEncodedGVSummaryFlags(FS->flags())); +  NameVals.push_back(FS->instCount()); +  NameVals.push_back(getEncodedFFlags(FS->fflags())); +  NameVals.push_back(FS->refs().size()); + +  for (auto &RI : FS->refs()) +    NameVals.push_back(VE.getValueID(RI.getValue())); + +  bool HasProfileData = +      F.hasProfileData() || ForceSummaryEdgesCold != FunctionSummary::FSHT_None; +  for (auto &ECI : FS->calls()) { +    NameVals.push_back(getValueId(ECI.first)); +    if (HasProfileData) +      NameVals.push_back(static_cast<uint8_t>(ECI.second.Hotness)); +    else if (WriteRelBFToSummary) +      NameVals.push_back(ECI.second.RelBlockFreq); +  } + +  unsigned FSAbbrev = (HasProfileData ? FSCallsProfileAbbrev : FSCallsAbbrev); +  unsigned Code = +      (HasProfileData ? bitc::FS_PERMODULE_PROFILE +                      : (WriteRelBFToSummary ? bitc::FS_PERMODULE_RELBF +                                             : bitc::FS_PERMODULE)); + +  // Emit the finished record. +  Stream.EmitRecord(Code, NameVals, FSAbbrev); +  NameVals.clear(); +} + +// Collect the global value references in the given variable's initializer, +// and emit them in a summary record. +void ModuleBitcodeWriterBase::writeModuleLevelReferences( +    const GlobalVariable &V, SmallVector<uint64_t, 64> &NameVals, +    unsigned FSModRefsAbbrev) { +  auto VI = Index->getValueInfo(V.getGUID()); +  if (!VI || VI.getSummaryList().empty()) { +    // Only declarations should not have a summary (a declaration might however +    // have a summary if the def was in module level asm). +    assert(V.isDeclaration()); +    return; +  } +  auto *Summary = VI.getSummaryList()[0].get(); +  NameVals.push_back(VE.getValueID(&V)); +  GlobalVarSummary *VS = cast<GlobalVarSummary>(Summary); +  NameVals.push_back(getEncodedGVSummaryFlags(VS->flags())); + +  unsigned SizeBeforeRefs = NameVals.size(); +  for (auto &RI : VS->refs()) +    NameVals.push_back(VE.getValueID(RI.getValue())); +  // Sort the refs for determinism output, the vector returned by FS->refs() has +  // been initialized from a DenseSet. +  llvm::sort(NameVals.begin() + SizeBeforeRefs, NameVals.end()); + +  Stream.EmitRecord(bitc::FS_PERMODULE_GLOBALVAR_INIT_REFS, NameVals, +                    FSModRefsAbbrev); +  NameVals.clear(); +} + +// Current version for the summary. +// This is bumped whenever we introduce changes in the way some record are +// interpreted, like flags for instance. +static const uint64_t INDEX_VERSION = 4; + +/// Emit the per-module summary section alongside the rest of +/// the module's bitcode. +void ModuleBitcodeWriterBase::writePerModuleGlobalValueSummary() { +  // By default we compile with ThinLTO if the module has a summary, but the +  // client can request full LTO with a module flag. +  bool IsThinLTO = true; +  if (auto *MD = +          mdconst::extract_or_null<ConstantInt>(M.getModuleFlag("ThinLTO"))) +    IsThinLTO = MD->getZExtValue(); +  Stream.EnterSubblock(IsThinLTO ? bitc::GLOBALVAL_SUMMARY_BLOCK_ID +                                 : bitc::FULL_LTO_GLOBALVAL_SUMMARY_BLOCK_ID, +                       4); + +  Stream.EmitRecord(bitc::FS_VERSION, ArrayRef<uint64_t>{INDEX_VERSION}); + +  if (Index->begin() == Index->end()) { +    Stream.ExitBlock(); +    return; +  } + +  for (const auto &GVI : valueIds()) { +    Stream.EmitRecord(bitc::FS_VALUE_GUID, +                      ArrayRef<uint64_t>{GVI.second, GVI.first}); +  } + +  // Abbrev for FS_PERMODULE_PROFILE. +  auto Abbv = std::make_shared<BitCodeAbbrev>(); +  Abbv->Add(BitCodeAbbrevOp(bitc::FS_PERMODULE_PROFILE)); +  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8));   // valueid +  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6));   // flags +  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8));   // instcount +  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 4));   // fflags +  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 4));   // numrefs +  // numrefs x valueid, n x (valueid, hotness) +  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array)); +  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); +  unsigned FSCallsProfileAbbrev = Stream.EmitAbbrev(std::move(Abbv)); + +  // Abbrev for FS_PERMODULE or FS_PERMODULE_RELBF. +  Abbv = std::make_shared<BitCodeAbbrev>(); +  if (WriteRelBFToSummary) +    Abbv->Add(BitCodeAbbrevOp(bitc::FS_PERMODULE_RELBF)); +  else +    Abbv->Add(BitCodeAbbrevOp(bitc::FS_PERMODULE)); +  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8));   // valueid +  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6));   // flags +  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8));   // instcount +  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 4));   // fflags +  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 4));   // numrefs +  // numrefs x valueid, n x (valueid [, rel_block_freq]) +  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array)); +  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); +  unsigned FSCallsAbbrev = Stream.EmitAbbrev(std::move(Abbv)); + +  // Abbrev for FS_PERMODULE_GLOBALVAR_INIT_REFS. +  Abbv = std::make_shared<BitCodeAbbrev>(); +  Abbv->Add(BitCodeAbbrevOp(bitc::FS_PERMODULE_GLOBALVAR_INIT_REFS)); +  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); // valueid +  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // flags +  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array));  // valueids +  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); +  unsigned FSModRefsAbbrev = Stream.EmitAbbrev(std::move(Abbv)); + +  // Abbrev for FS_ALIAS. +  Abbv = std::make_shared<BitCodeAbbrev>(); +  Abbv->Add(BitCodeAbbrevOp(bitc::FS_ALIAS)); +  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8));   // valueid +  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6));   // flags +  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8));   // valueid +  unsigned FSAliasAbbrev = Stream.EmitAbbrev(std::move(Abbv)); + +  SmallVector<uint64_t, 64> NameVals; +  // Iterate over the list of functions instead of the Index to +  // ensure the ordering is stable. +  for (const Function &F : M) { +    // Summary emission does not support anonymous functions, they have to +    // renamed using the anonymous function renaming pass. +    if (!F.hasName()) +      report_fatal_error("Unexpected anonymous function when writing summary"); + +    ValueInfo VI = Index->getValueInfo(F.getGUID()); +    if (!VI || VI.getSummaryList().empty()) { +      // Only declarations should not have a summary (a declaration might +      // however have a summary if the def was in module level asm). +      assert(F.isDeclaration()); +      continue; +    } +    auto *Summary = VI.getSummaryList()[0].get(); +    writePerModuleFunctionSummaryRecord(NameVals, Summary, VE.getValueID(&F), +                                        FSCallsAbbrev, FSCallsProfileAbbrev, F); +  } + +  // Capture references from GlobalVariable initializers, which are outside +  // of a function scope. +  for (const GlobalVariable &G : M.globals()) +    writeModuleLevelReferences(G, NameVals, FSModRefsAbbrev); + +  for (const GlobalAlias &A : M.aliases()) { +    auto *Aliasee = A.getBaseObject(); +    if (!Aliasee->hasName()) +      // Nameless function don't have an entry in the summary, skip it. +      continue; +    auto AliasId = VE.getValueID(&A); +    auto AliaseeId = VE.getValueID(Aliasee); +    NameVals.push_back(AliasId); +    auto *Summary = Index->getGlobalValueSummary(A); +    AliasSummary *AS = cast<AliasSummary>(Summary); +    NameVals.push_back(getEncodedGVSummaryFlags(AS->flags())); +    NameVals.push_back(AliaseeId); +    Stream.EmitRecord(bitc::FS_ALIAS, NameVals, FSAliasAbbrev); +    NameVals.clear(); +  } + +  Stream.ExitBlock(); +} + +/// Emit the combined summary section into the combined index file. +void IndexBitcodeWriter::writeCombinedGlobalValueSummary() { +  Stream.EnterSubblock(bitc::GLOBALVAL_SUMMARY_BLOCK_ID, 3); +  Stream.EmitRecord(bitc::FS_VERSION, ArrayRef<uint64_t>{INDEX_VERSION}); + +  // Write the index flags. +  uint64_t Flags = 0; +  if (Index.withGlobalValueDeadStripping()) +    Flags |= 0x1; +  if (Index.skipModuleByDistributedBackend()) +    Flags |= 0x2; +  Stream.EmitRecord(bitc::FS_FLAGS, ArrayRef<uint64_t>{Flags}); + +  for (const auto &GVI : valueIds()) { +    Stream.EmitRecord(bitc::FS_VALUE_GUID, +                      ArrayRef<uint64_t>{GVI.second, GVI.first}); +  } + +  // Abbrev for FS_COMBINED. +  auto Abbv = std::make_shared<BitCodeAbbrev>(); +  Abbv->Add(BitCodeAbbrevOp(bitc::FS_COMBINED)); +  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8));   // valueid +  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8));   // modid +  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6));   // flags +  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8));   // instcount +  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 4));   // fflags +  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 4));   // numrefs +  // numrefs x valueid, n x (valueid) +  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array)); +  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); +  unsigned FSCallsAbbrev = Stream.EmitAbbrev(std::move(Abbv)); + +  // Abbrev for FS_COMBINED_PROFILE. +  Abbv = std::make_shared<BitCodeAbbrev>(); +  Abbv->Add(BitCodeAbbrevOp(bitc::FS_COMBINED_PROFILE)); +  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8));   // valueid +  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8));   // modid +  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6));   // flags +  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8));   // instcount +  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 4));   // fflags +  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 4));   // numrefs +  // numrefs x valueid, n x (valueid, hotness) +  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array)); +  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); +  unsigned FSCallsProfileAbbrev = Stream.EmitAbbrev(std::move(Abbv)); + +  // Abbrev for FS_COMBINED_GLOBALVAR_INIT_REFS. +  Abbv = std::make_shared<BitCodeAbbrev>(); +  Abbv->Add(BitCodeAbbrevOp(bitc::FS_COMBINED_GLOBALVAR_INIT_REFS)); +  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8));   // valueid +  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8));   // modid +  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6));   // flags +  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array));    // valueids +  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); +  unsigned FSModRefsAbbrev = Stream.EmitAbbrev(std::move(Abbv)); + +  // Abbrev for FS_COMBINED_ALIAS. +  Abbv = std::make_shared<BitCodeAbbrev>(); +  Abbv->Add(BitCodeAbbrevOp(bitc::FS_COMBINED_ALIAS)); +  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8));   // valueid +  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8));   // modid +  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6));   // flags +  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8));   // valueid +  unsigned FSAliasAbbrev = Stream.EmitAbbrev(std::move(Abbv)); + +  // The aliases are emitted as a post-pass, and will point to the value +  // id of the aliasee. Save them in a vector for post-processing. +  SmallVector<AliasSummary *, 64> Aliases; + +  // Save the value id for each summary for alias emission. +  DenseMap<const GlobalValueSummary *, unsigned> SummaryToValueIdMap; + +  SmallVector<uint64_t, 64> NameVals; + +  // Set that will be populated during call to writeFunctionTypeMetadataRecords +  // with the type ids referenced by this index file. +  std::set<GlobalValue::GUID> ReferencedTypeIds; + +  // For local linkage, we also emit the original name separately +  // immediately after the record. +  auto MaybeEmitOriginalName = [&](GlobalValueSummary &S) { +    if (!GlobalValue::isLocalLinkage(S.linkage())) +      return; +    NameVals.push_back(S.getOriginalName()); +    Stream.EmitRecord(bitc::FS_COMBINED_ORIGINAL_NAME, NameVals); +    NameVals.clear(); +  }; + +  forEachSummary([&](GVInfo I, bool IsAliasee) { +    GlobalValueSummary *S = I.second; +    assert(S); + +    auto ValueId = getValueId(I.first); +    assert(ValueId); +    SummaryToValueIdMap[S] = *ValueId; + +    // If this is invoked for an aliasee, we want to record the above +    // mapping, but then not emit a summary entry (if the aliasee is +    // to be imported, we will invoke this separately with IsAliasee=false). +    if (IsAliasee) +      return; + +    if (auto *AS = dyn_cast<AliasSummary>(S)) { +      // Will process aliases as a post-pass because the reader wants all +      // global to be loaded first. +      Aliases.push_back(AS); +      return; +    } + +    if (auto *VS = dyn_cast<GlobalVarSummary>(S)) { +      NameVals.push_back(*ValueId); +      NameVals.push_back(Index.getModuleId(VS->modulePath())); +      NameVals.push_back(getEncodedGVSummaryFlags(VS->flags())); +      for (auto &RI : VS->refs()) { +        auto RefValueId = getValueId(RI.getGUID()); +        if (!RefValueId) +          continue; +        NameVals.push_back(*RefValueId); +      } + +      // Emit the finished record. +      Stream.EmitRecord(bitc::FS_COMBINED_GLOBALVAR_INIT_REFS, NameVals, +                        FSModRefsAbbrev); +      NameVals.clear(); +      MaybeEmitOriginalName(*S); +      return; +    } + +    auto *FS = cast<FunctionSummary>(S); +    writeFunctionTypeMetadataRecords(Stream, FS, ReferencedTypeIds); + +    NameVals.push_back(*ValueId); +    NameVals.push_back(Index.getModuleId(FS->modulePath())); +    NameVals.push_back(getEncodedGVSummaryFlags(FS->flags())); +    NameVals.push_back(FS->instCount()); +    NameVals.push_back(getEncodedFFlags(FS->fflags())); +    // Fill in below +    NameVals.push_back(0); + +    unsigned Count = 0; +    for (auto &RI : FS->refs()) { +      auto RefValueId = getValueId(RI.getGUID()); +      if (!RefValueId) +        continue; +      NameVals.push_back(*RefValueId); +      Count++; +    } +    NameVals[5] = Count; + +    bool HasProfileData = false; +    for (auto &EI : FS->calls()) { +      HasProfileData |= +          EI.second.getHotness() != CalleeInfo::HotnessType::Unknown; +      if (HasProfileData) +        break; +    } + +    for (auto &EI : FS->calls()) { +      // If this GUID doesn't have a value id, it doesn't have a function +      // summary and we don't need to record any calls to it. +      GlobalValue::GUID GUID = EI.first.getGUID(); +      auto CallValueId = getValueId(GUID); +      if (!CallValueId) { +        // For SamplePGO, the indirect call targets for local functions will +        // have its original name annotated in profile. We try to find the +        // corresponding PGOFuncName as the GUID. +        GUID = Index.getGUIDFromOriginalID(GUID); +        if (GUID == 0) +          continue; +        CallValueId = getValueId(GUID); +        if (!CallValueId) +          continue; +        // The mapping from OriginalId to GUID may return a GUID +        // that corresponds to a static variable. Filter it out here. +        // This can happen when +        // 1) There is a call to a library function which does not have +        // a CallValidId; +        // 2) There is a static variable with the  OriginalGUID identical +        // to the GUID of the library function in 1); +        // When this happens, the logic for SamplePGO kicks in and +        // the static variable in 2) will be found, which needs to be +        // filtered out. +        auto *GVSum = Index.getGlobalValueSummary(GUID, false); +        if (GVSum && +            GVSum->getSummaryKind() == GlobalValueSummary::GlobalVarKind) +          continue; +      } +      NameVals.push_back(*CallValueId); +      if (HasProfileData) +        NameVals.push_back(static_cast<uint8_t>(EI.second.Hotness)); +    } + +    unsigned FSAbbrev = (HasProfileData ? FSCallsProfileAbbrev : FSCallsAbbrev); +    unsigned Code = +        (HasProfileData ? bitc::FS_COMBINED_PROFILE : bitc::FS_COMBINED); + +    // Emit the finished record. +    Stream.EmitRecord(Code, NameVals, FSAbbrev); +    NameVals.clear(); +    MaybeEmitOriginalName(*S); +  }); + +  for (auto *AS : Aliases) { +    auto AliasValueId = SummaryToValueIdMap[AS]; +    assert(AliasValueId); +    NameVals.push_back(AliasValueId); +    NameVals.push_back(Index.getModuleId(AS->modulePath())); +    NameVals.push_back(getEncodedGVSummaryFlags(AS->flags())); +    auto AliaseeValueId = SummaryToValueIdMap[&AS->getAliasee()]; +    assert(AliaseeValueId); +    NameVals.push_back(AliaseeValueId); + +    // Emit the finished record. +    Stream.EmitRecord(bitc::FS_COMBINED_ALIAS, NameVals, FSAliasAbbrev); +    NameVals.clear(); +    MaybeEmitOriginalName(*AS); +  } + +  if (!Index.cfiFunctionDefs().empty()) { +    for (auto &S : Index.cfiFunctionDefs()) { +      NameVals.push_back(StrtabBuilder.add(S)); +      NameVals.push_back(S.size()); +    } +    Stream.EmitRecord(bitc::FS_CFI_FUNCTION_DEFS, NameVals); +    NameVals.clear(); +  } + +  if (!Index.cfiFunctionDecls().empty()) { +    for (auto &S : Index.cfiFunctionDecls()) { +      NameVals.push_back(StrtabBuilder.add(S)); +      NameVals.push_back(S.size()); +    } +    Stream.EmitRecord(bitc::FS_CFI_FUNCTION_DECLS, NameVals); +    NameVals.clear(); +  } + +  if (!Index.typeIds().empty()) { +    for (auto &S : Index.typeIds()) { +      // Skip if not referenced in any GV summary within this index file. +      if (!ReferencedTypeIds.count(GlobalValue::getGUID(S.first))) +        continue; +      writeTypeIdSummaryRecord(NameVals, StrtabBuilder, S.first, S.second); +      Stream.EmitRecord(bitc::FS_TYPE_ID, NameVals); +      NameVals.clear(); +    } +  } + +  Stream.ExitBlock(); +} + +/// Create the "IDENTIFICATION_BLOCK_ID" containing a single string with the +/// current llvm version, and a record for the epoch number. +static void writeIdentificationBlock(BitstreamWriter &Stream) { +  Stream.EnterSubblock(bitc::IDENTIFICATION_BLOCK_ID, 5); + +  // Write the "user readable" string identifying the bitcode producer +  auto Abbv = std::make_shared<BitCodeAbbrev>(); +  Abbv->Add(BitCodeAbbrevOp(bitc::IDENTIFICATION_CODE_STRING)); +  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array)); +  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Char6)); +  auto StringAbbrev = Stream.EmitAbbrev(std::move(Abbv)); +  writeStringRecord(Stream, bitc::IDENTIFICATION_CODE_STRING, +                    "LLVM" LLVM_VERSION_STRING, StringAbbrev); + +  // Write the epoch version +  Abbv = std::make_shared<BitCodeAbbrev>(); +  Abbv->Add(BitCodeAbbrevOp(bitc::IDENTIFICATION_CODE_EPOCH)); +  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); +  auto EpochAbbrev = Stream.EmitAbbrev(std::move(Abbv)); +  SmallVector<unsigned, 1> Vals = {bitc::BITCODE_CURRENT_EPOCH}; +  Stream.EmitRecord(bitc::IDENTIFICATION_CODE_EPOCH, Vals, EpochAbbrev); +  Stream.ExitBlock(); +} + +void ModuleBitcodeWriter::writeModuleHash(size_t BlockStartPos) { +  // Emit the module's hash. +  // MODULE_CODE_HASH: [5*i32] +  if (GenerateHash) { +    uint32_t Vals[5]; +    Hasher.update(ArrayRef<uint8_t>((const uint8_t *)&(Buffer)[BlockStartPos], +                                    Buffer.size() - BlockStartPos)); +    StringRef Hash = Hasher.result(); +    for (int Pos = 0; Pos < 20; Pos += 4) { +      Vals[Pos / 4] = support::endian::read32be(Hash.data() + Pos); +    } + +    // Emit the finished record. +    Stream.EmitRecord(bitc::MODULE_CODE_HASH, Vals); + +    if (ModHash) +      // Save the written hash value. +      std::copy(std::begin(Vals), std::end(Vals), std::begin(*ModHash)); +  } +} + +void ModuleBitcodeWriter::write() { +  writeIdentificationBlock(Stream); + +  Stream.EnterSubblock(bitc::MODULE_BLOCK_ID, 3); +  size_t BlockStartPos = Buffer.size(); + +  writeModuleVersion(); + +  // Emit blockinfo, which defines the standard abbreviations etc. +  writeBlockInfo(); + +  // Emit information about attribute groups. +  writeAttributeGroupTable(); + +  // Emit information about parameter attributes. +  writeAttributeTable(); + +  // Emit information describing all of the types in the module. +  writeTypeTable(); + +  writeComdats(); + +  // Emit top-level description of module, including target triple, inline asm, +  // descriptors for global variables, and function prototype info. +  writeModuleInfo(); + +  // Emit constants. +  writeModuleConstants(); + +  // Emit metadata kind names. +  writeModuleMetadataKinds(); + +  // Emit metadata. +  writeModuleMetadata(); + +  // Emit module-level use-lists. +  if (VE.shouldPreserveUseListOrder()) +    writeUseListBlock(nullptr); + +  writeOperandBundleTags(); +  writeSyncScopeNames(); + +  // Emit function bodies. +  DenseMap<const Function *, uint64_t> FunctionToBitcodeIndex; +  for (Module::const_iterator F = M.begin(), E = M.end(); F != E; ++F) +    if (!F->isDeclaration()) +      writeFunction(*F, FunctionToBitcodeIndex); + +  // Need to write after the above call to WriteFunction which populates +  // the summary information in the index. +  if (Index) +    writePerModuleGlobalValueSummary(); + +  writeGlobalValueSymbolTable(FunctionToBitcodeIndex); + +  writeModuleHash(BlockStartPos); + +  Stream.ExitBlock(); +} + +static void writeInt32ToBuffer(uint32_t Value, SmallVectorImpl<char> &Buffer, +                               uint32_t &Position) { +  support::endian::write32le(&Buffer[Position], Value); +  Position += 4; +} + +/// If generating a bc file on darwin, we have to emit a +/// header and trailer to make it compatible with the system archiver.  To do +/// this we emit the following header, and then emit a trailer that pads the +/// file out to be a multiple of 16 bytes. +/// +/// struct bc_header { +///   uint32_t Magic;         // 0x0B17C0DE +///   uint32_t Version;       // Version, currently always 0. +///   uint32_t BitcodeOffset; // Offset to traditional bitcode file. +///   uint32_t BitcodeSize;   // Size of traditional bitcode file. +///   uint32_t CPUType;       // CPU specifier. +///   ... potentially more later ... +/// }; +static void emitDarwinBCHeaderAndTrailer(SmallVectorImpl<char> &Buffer, +                                         const Triple &TT) { +  unsigned CPUType = ~0U; + +  // Match x86_64-*, i[3-9]86-*, powerpc-*, powerpc64-*, arm-*, thumb-*, +  // armv[0-9]-*, thumbv[0-9]-*, armv5te-*, or armv6t2-*. The CPUType is a magic +  // number from /usr/include/mach/machine.h.  It is ok to reproduce the +  // specific constants here because they are implicitly part of the Darwin ABI. +  enum { +    DARWIN_CPU_ARCH_ABI64      = 0x01000000, +    DARWIN_CPU_TYPE_X86        = 7, +    DARWIN_CPU_TYPE_ARM        = 12, +    DARWIN_CPU_TYPE_POWERPC    = 18 +  }; + +  Triple::ArchType Arch = TT.getArch(); +  if (Arch == Triple::x86_64) +    CPUType = DARWIN_CPU_TYPE_X86 | DARWIN_CPU_ARCH_ABI64; +  else if (Arch == Triple::x86) +    CPUType = DARWIN_CPU_TYPE_X86; +  else if (Arch == Triple::ppc) +    CPUType = DARWIN_CPU_TYPE_POWERPC; +  else if (Arch == Triple::ppc64) +    CPUType = DARWIN_CPU_TYPE_POWERPC | DARWIN_CPU_ARCH_ABI64; +  else if (Arch == Triple::arm || Arch == Triple::thumb) +    CPUType = DARWIN_CPU_TYPE_ARM; + +  // Traditional Bitcode starts after header. +  assert(Buffer.size() >= BWH_HeaderSize && +         "Expected header size to be reserved"); +  unsigned BCOffset = BWH_HeaderSize; +  unsigned BCSize = Buffer.size() - BWH_HeaderSize; + +  // Write the magic and version. +  unsigned Position = 0; +  writeInt32ToBuffer(0x0B17C0DE, Buffer, Position); +  writeInt32ToBuffer(0, Buffer, Position); // Version. +  writeInt32ToBuffer(BCOffset, Buffer, Position); +  writeInt32ToBuffer(BCSize, Buffer, Position); +  writeInt32ToBuffer(CPUType, Buffer, Position); + +  // If the file is not a multiple of 16 bytes, insert dummy padding. +  while (Buffer.size() & 15) +    Buffer.push_back(0); +} + +/// Helper to write the header common to all bitcode files. +static void writeBitcodeHeader(BitstreamWriter &Stream) { +  // Emit the file header. +  Stream.Emit((unsigned)'B', 8); +  Stream.Emit((unsigned)'C', 8); +  Stream.Emit(0x0, 4); +  Stream.Emit(0xC, 4); +  Stream.Emit(0xE, 4); +  Stream.Emit(0xD, 4); +} + +BitcodeWriter::BitcodeWriter(SmallVectorImpl<char> &Buffer) +    : Buffer(Buffer), Stream(new BitstreamWriter(Buffer)) { +  writeBitcodeHeader(*Stream); +} + +BitcodeWriter::~BitcodeWriter() { assert(WroteStrtab); } + +void BitcodeWriter::writeBlob(unsigned Block, unsigned Record, StringRef Blob) { +  Stream->EnterSubblock(Block, 3); + +  auto Abbv = std::make_shared<BitCodeAbbrev>(); +  Abbv->Add(BitCodeAbbrevOp(Record)); +  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); +  auto AbbrevNo = Stream->EmitAbbrev(std::move(Abbv)); + +  Stream->EmitRecordWithBlob(AbbrevNo, ArrayRef<uint64_t>{Record}, Blob); + +  Stream->ExitBlock(); +} + +void BitcodeWriter::writeSymtab() { +  assert(!WroteStrtab && !WroteSymtab); + +  // If any module has module-level inline asm, we will require a registered asm +  // parser for the target so that we can create an accurate symbol table for +  // the module. +  for (Module *M : Mods) { +    if (M->getModuleInlineAsm().empty()) +      continue; + +    std::string Err; +    const Triple TT(M->getTargetTriple()); +    const Target *T = TargetRegistry::lookupTarget(TT.str(), Err); +    if (!T || !T->hasMCAsmParser()) +      return; +  } + +  WroteSymtab = true; +  SmallVector<char, 0> Symtab; +  // The irsymtab::build function may be unable to create a symbol table if the +  // module is malformed (e.g. it contains an invalid alias). Writing a symbol +  // table is not required for correctness, but we still want to be able to +  // write malformed modules to bitcode files, so swallow the error. +  if (Error E = irsymtab::build(Mods, Symtab, StrtabBuilder, Alloc)) { +    consumeError(std::move(E)); +    return; +  } + +  writeBlob(bitc::SYMTAB_BLOCK_ID, bitc::SYMTAB_BLOB, +            {Symtab.data(), Symtab.size()}); +} + +void BitcodeWriter::writeStrtab() { +  assert(!WroteStrtab); + +  std::vector<char> Strtab; +  StrtabBuilder.finalizeInOrder(); +  Strtab.resize(StrtabBuilder.getSize()); +  StrtabBuilder.write((uint8_t *)Strtab.data()); + +  writeBlob(bitc::STRTAB_BLOCK_ID, bitc::STRTAB_BLOB, +            {Strtab.data(), Strtab.size()}); + +  WroteStrtab = true; +} + +void BitcodeWriter::copyStrtab(StringRef Strtab) { +  writeBlob(bitc::STRTAB_BLOCK_ID, bitc::STRTAB_BLOB, Strtab); +  WroteStrtab = true; +} + +void BitcodeWriter::writeModule(const Module &M, +                                bool ShouldPreserveUseListOrder, +                                const ModuleSummaryIndex *Index, +                                bool GenerateHash, ModuleHash *ModHash) { +  assert(!WroteStrtab); + +  // The Mods vector is used by irsymtab::build, which requires non-const +  // Modules in case it needs to materialize metadata. But the bitcode writer +  // requires that the module is materialized, so we can cast to non-const here, +  // after checking that it is in fact materialized. +  assert(M.isMaterialized()); +  Mods.push_back(const_cast<Module *>(&M)); + +  ModuleBitcodeWriter ModuleWriter(M, Buffer, StrtabBuilder, *Stream, +                                   ShouldPreserveUseListOrder, Index, +                                   GenerateHash, ModHash); +  ModuleWriter.write(); +} + +void BitcodeWriter::writeIndex( +    const ModuleSummaryIndex *Index, +    const std::map<std::string, GVSummaryMapTy> *ModuleToSummariesForIndex) { +  IndexBitcodeWriter IndexWriter(*Stream, StrtabBuilder, *Index, +                                 ModuleToSummariesForIndex); +  IndexWriter.write(); +} + +/// Write the specified module to the specified output stream. +void llvm::WriteBitcodeToFile(const Module &M, raw_ostream &Out, +                              bool ShouldPreserveUseListOrder, +                              const ModuleSummaryIndex *Index, +                              bool GenerateHash, ModuleHash *ModHash) { +  SmallVector<char, 0> Buffer; +  Buffer.reserve(256*1024); + +  // If this is darwin or another generic macho target, reserve space for the +  // header. +  Triple TT(M.getTargetTriple()); +  if (TT.isOSDarwin() || TT.isOSBinFormatMachO()) +    Buffer.insert(Buffer.begin(), BWH_HeaderSize, 0); + +  BitcodeWriter Writer(Buffer); +  Writer.writeModule(M, ShouldPreserveUseListOrder, Index, GenerateHash, +                     ModHash); +  Writer.writeSymtab(); +  Writer.writeStrtab(); + +  if (TT.isOSDarwin() || TT.isOSBinFormatMachO()) +    emitDarwinBCHeaderAndTrailer(Buffer, TT); + +  // Write the generated bitstream to "Out". +  Out.write((char*)&Buffer.front(), Buffer.size()); +} + +void IndexBitcodeWriter::write() { +  Stream.EnterSubblock(bitc::MODULE_BLOCK_ID, 3); + +  writeModuleVersion(); + +  // Write the module paths in the combined index. +  writeModStrings(); + +  // Write the summary combined index records. +  writeCombinedGlobalValueSummary(); + +  Stream.ExitBlock(); +} + +// Write the specified module summary index to the given raw output stream, +// where it will be written in a new bitcode block. This is used when +// writing the combined index file for ThinLTO. When writing a subset of the +// index for a distributed backend, provide a \p ModuleToSummariesForIndex map. +void llvm::WriteIndexToFile( +    const ModuleSummaryIndex &Index, raw_ostream &Out, +    const std::map<std::string, GVSummaryMapTy> *ModuleToSummariesForIndex) { +  SmallVector<char, 0> Buffer; +  Buffer.reserve(256 * 1024); + +  BitcodeWriter Writer(Buffer); +  Writer.writeIndex(&Index, ModuleToSummariesForIndex); +  Writer.writeStrtab(); + +  Out.write((char *)&Buffer.front(), Buffer.size()); +} + +namespace { + +/// Class to manage the bitcode writing for a thin link bitcode file. +class ThinLinkBitcodeWriter : public ModuleBitcodeWriterBase { +  /// ModHash is for use in ThinLTO incremental build, generated while writing +  /// the module bitcode file. +  const ModuleHash *ModHash; + +public: +  ThinLinkBitcodeWriter(const Module &M, StringTableBuilder &StrtabBuilder, +                        BitstreamWriter &Stream, +                        const ModuleSummaryIndex &Index, +                        const ModuleHash &ModHash) +      : ModuleBitcodeWriterBase(M, StrtabBuilder, Stream, +                                /*ShouldPreserveUseListOrder=*/false, &Index), +        ModHash(&ModHash) {} + +  void write(); + +private: +  void writeSimplifiedModuleInfo(); +}; + +} // end anonymous namespace + +// This function writes a simpilified module info for thin link bitcode file. +// It only contains the source file name along with the name(the offset and +// size in strtab) and linkage for global values. For the global value info +// entry, in order to keep linkage at offset 5, there are three zeros used +// as padding. +void ThinLinkBitcodeWriter::writeSimplifiedModuleInfo() { +  SmallVector<unsigned, 64> Vals; +  // Emit the module's source file name. +  { +    StringEncoding Bits = getStringEncoding(M.getSourceFileName()); +    BitCodeAbbrevOp AbbrevOpToUse = BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 8); +    if (Bits == SE_Char6) +      AbbrevOpToUse = BitCodeAbbrevOp(BitCodeAbbrevOp::Char6); +    else if (Bits == SE_Fixed7) +      AbbrevOpToUse = BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 7); + +    // MODULE_CODE_SOURCE_FILENAME: [namechar x N] +    auto Abbv = std::make_shared<BitCodeAbbrev>(); +    Abbv->Add(BitCodeAbbrevOp(bitc::MODULE_CODE_SOURCE_FILENAME)); +    Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array)); +    Abbv->Add(AbbrevOpToUse); +    unsigned FilenameAbbrev = Stream.EmitAbbrev(std::move(Abbv)); + +    for (const auto P : M.getSourceFileName()) +      Vals.push_back((unsigned char)P); + +    Stream.EmitRecord(bitc::MODULE_CODE_SOURCE_FILENAME, Vals, FilenameAbbrev); +    Vals.clear(); +  } + +  // Emit the global variable information. +  for (const GlobalVariable &GV : M.globals()) { +    // GLOBALVAR: [strtab offset, strtab size, 0, 0, 0, linkage] +    Vals.push_back(StrtabBuilder.add(GV.getName())); +    Vals.push_back(GV.getName().size()); +    Vals.push_back(0); +    Vals.push_back(0); +    Vals.push_back(0); +    Vals.push_back(getEncodedLinkage(GV)); + +    Stream.EmitRecord(bitc::MODULE_CODE_GLOBALVAR, Vals); +    Vals.clear(); +  } + +  // Emit the function proto information. +  for (const Function &F : M) { +    // FUNCTION:  [strtab offset, strtab size, 0, 0, 0, linkage] +    Vals.push_back(StrtabBuilder.add(F.getName())); +    Vals.push_back(F.getName().size()); +    Vals.push_back(0); +    Vals.push_back(0); +    Vals.push_back(0); +    Vals.push_back(getEncodedLinkage(F)); + +    Stream.EmitRecord(bitc::MODULE_CODE_FUNCTION, Vals); +    Vals.clear(); +  } + +  // Emit the alias information. +  for (const GlobalAlias &A : M.aliases()) { +    // ALIAS: [strtab offset, strtab size, 0, 0, 0, linkage] +    Vals.push_back(StrtabBuilder.add(A.getName())); +    Vals.push_back(A.getName().size()); +    Vals.push_back(0); +    Vals.push_back(0); +    Vals.push_back(0); +    Vals.push_back(getEncodedLinkage(A)); + +    Stream.EmitRecord(bitc::MODULE_CODE_ALIAS, Vals); +    Vals.clear(); +  } + +  // Emit the ifunc information. +  for (const GlobalIFunc &I : M.ifuncs()) { +    // IFUNC: [strtab offset, strtab size, 0, 0, 0, linkage] +    Vals.push_back(StrtabBuilder.add(I.getName())); +    Vals.push_back(I.getName().size()); +    Vals.push_back(0); +    Vals.push_back(0); +    Vals.push_back(0); +    Vals.push_back(getEncodedLinkage(I)); + +    Stream.EmitRecord(bitc::MODULE_CODE_IFUNC, Vals); +    Vals.clear(); +  } +} + +void ThinLinkBitcodeWriter::write() { +  Stream.EnterSubblock(bitc::MODULE_BLOCK_ID, 3); + +  writeModuleVersion(); + +  writeSimplifiedModuleInfo(); + +  writePerModuleGlobalValueSummary(); + +  // Write module hash. +  Stream.EmitRecord(bitc::MODULE_CODE_HASH, ArrayRef<uint32_t>(*ModHash)); + +  Stream.ExitBlock(); +} + +void BitcodeWriter::writeThinLinkBitcode(const Module &M, +                                         const ModuleSummaryIndex &Index, +                                         const ModuleHash &ModHash) { +  assert(!WroteStrtab); + +  // The Mods vector is used by irsymtab::build, which requires non-const +  // Modules in case it needs to materialize metadata. But the bitcode writer +  // requires that the module is materialized, so we can cast to non-const here, +  // after checking that it is in fact materialized. +  assert(M.isMaterialized()); +  Mods.push_back(const_cast<Module *>(&M)); + +  ThinLinkBitcodeWriter ThinLinkWriter(M, StrtabBuilder, *Stream, Index, +                                       ModHash); +  ThinLinkWriter.write(); +} + +// Write the specified thin link bitcode file to the given raw output stream, +// where it will be written in a new bitcode block. This is used when +// writing the per-module index file for ThinLTO. +void llvm::WriteThinLinkBitcodeToFile(const Module &M, raw_ostream &Out, +                                      const ModuleSummaryIndex &Index, +                                      const ModuleHash &ModHash) { +  SmallVector<char, 0> Buffer; +  Buffer.reserve(256 * 1024); + +  BitcodeWriter Writer(Buffer); +  Writer.writeThinLinkBitcode(M, Index, ModHash); +  Writer.writeSymtab(); +  Writer.writeStrtab(); + +  Out.write((char *)&Buffer.front(), Buffer.size()); +} diff --git a/contrib/llvm/lib/Bitcode/Writer/BitcodeWriterPass.cpp b/contrib/llvm/lib/Bitcode/Writer/BitcodeWriterPass.cpp new file mode 100644 index 000000000000..41212e575f8e --- /dev/null +++ b/contrib/llvm/lib/Bitcode/Writer/BitcodeWriterPass.cpp @@ -0,0 +1,86 @@ +//===- BitcodeWriterPass.cpp - Bitcode writing pass -----------------------===// +// +//                     The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// BitcodeWriterPass implementation. +// +//===----------------------------------------------------------------------===// + +#include "llvm/Bitcode/BitcodeWriterPass.h" +#include "llvm/Analysis/ModuleSummaryAnalysis.h" +#include "llvm/Bitcode/BitcodeWriter.h" +#include "llvm/IR/Module.h" +#include "llvm/IR/PassManager.h" +#include "llvm/Pass.h" +using namespace llvm; + +PreservedAnalyses BitcodeWriterPass::run(Module &M, ModuleAnalysisManager &AM) { +  const ModuleSummaryIndex *Index = +      EmitSummaryIndex ? &(AM.getResult<ModuleSummaryIndexAnalysis>(M)) +                       : nullptr; +  WriteBitcodeToFile(M, OS, ShouldPreserveUseListOrder, Index, EmitModuleHash); +  return PreservedAnalyses::all(); +} + +namespace { +  class WriteBitcodePass : public ModulePass { +    raw_ostream &OS; // raw_ostream to print on +    bool ShouldPreserveUseListOrder; +    bool EmitSummaryIndex; +    bool EmitModuleHash; + +  public: +    static char ID; // Pass identification, replacement for typeid +    WriteBitcodePass() : ModulePass(ID), OS(dbgs()) { +      initializeWriteBitcodePassPass(*PassRegistry::getPassRegistry()); +    } + +    explicit WriteBitcodePass(raw_ostream &o, bool ShouldPreserveUseListOrder, +                              bool EmitSummaryIndex, bool EmitModuleHash) +        : ModulePass(ID), OS(o), +          ShouldPreserveUseListOrder(ShouldPreserveUseListOrder), +          EmitSummaryIndex(EmitSummaryIndex), EmitModuleHash(EmitModuleHash) { +      initializeWriteBitcodePassPass(*PassRegistry::getPassRegistry()); +    } + +    StringRef getPassName() const override { return "Bitcode Writer"; } + +    bool runOnModule(Module &M) override { +      const ModuleSummaryIndex *Index = +          EmitSummaryIndex +              ? &(getAnalysis<ModuleSummaryIndexWrapperPass>().getIndex()) +              : nullptr; +      WriteBitcodeToFile(M, OS, ShouldPreserveUseListOrder, Index, +                         EmitModuleHash); +      return false; +    } +    void getAnalysisUsage(AnalysisUsage &AU) const override { +      AU.setPreservesAll(); +      if (EmitSummaryIndex) +        AU.addRequired<ModuleSummaryIndexWrapperPass>(); +    } +  }; +} + +char WriteBitcodePass::ID = 0; +INITIALIZE_PASS_BEGIN(WriteBitcodePass, "write-bitcode", "Write Bitcode", false, +                      true) +INITIALIZE_PASS_DEPENDENCY(ModuleSummaryIndexWrapperPass) +INITIALIZE_PASS_END(WriteBitcodePass, "write-bitcode", "Write Bitcode", false, +                    true) + +ModulePass *llvm::createBitcodeWriterPass(raw_ostream &Str, +                                          bool ShouldPreserveUseListOrder, +                                          bool EmitSummaryIndex, bool EmitModuleHash) { +  return new WriteBitcodePass(Str, ShouldPreserveUseListOrder, +                              EmitSummaryIndex, EmitModuleHash); +} + +bool llvm::isBitcodeWriterPass(Pass *P) { +  return P->getPassID() == (llvm::AnalysisID)&WriteBitcodePass::ID; +} diff --git a/contrib/llvm/lib/Bitcode/Writer/ValueEnumerator.cpp b/contrib/llvm/lib/Bitcode/Writer/ValueEnumerator.cpp new file mode 100644 index 000000000000..d473741e8ceb --- /dev/null +++ b/contrib/llvm/lib/Bitcode/Writer/ValueEnumerator.cpp @@ -0,0 +1,1041 @@ +//===- ValueEnumerator.cpp - Number values and types for bitcode writer ---===// +// +//                     The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements the ValueEnumerator class. +// +//===----------------------------------------------------------------------===// + +#include "ValueEnumerator.h" +#include "llvm/ADT/DenseMap.h" +#include "llvm/ADT/SmallVector.h" +#include "llvm/Config/llvm-config.h" +#include "llvm/IR/Argument.h" +#include "llvm/IR/Attributes.h" +#include "llvm/IR/BasicBlock.h" +#include "llvm/IR/Constant.h" +#include "llvm/IR/DebugInfoMetadata.h" +#include "llvm/IR/DerivedTypes.h" +#include "llvm/IR/Function.h" +#include "llvm/IR/GlobalAlias.h" +#include "llvm/IR/GlobalIFunc.h" +#include "llvm/IR/GlobalObject.h" +#include "llvm/IR/GlobalValue.h" +#include "llvm/IR/GlobalVariable.h" +#include "llvm/IR/Instruction.h" +#include "llvm/IR/Instructions.h" +#include "llvm/IR/Metadata.h" +#include "llvm/IR/Module.h" +#include "llvm/IR/Type.h" +#include "llvm/IR/Use.h" +#include "llvm/IR/UseListOrder.h" +#include "llvm/IR/User.h" +#include "llvm/IR/Value.h" +#include "llvm/IR/ValueSymbolTable.h" +#include "llvm/Support/Casting.h" +#include "llvm/Support/Compiler.h" +#include "llvm/Support/Debug.h" +#include "llvm/Support/MathExtras.h" +#include "llvm/Support/raw_ostream.h" +#include <algorithm> +#include <cassert> +#include <cstddef> +#include <iterator> +#include <tuple> +#include <utility> +#include <vector> + +using namespace llvm; + +namespace { + +struct OrderMap { +  DenseMap<const Value *, std::pair<unsigned, bool>> IDs; +  unsigned LastGlobalConstantID = 0; +  unsigned LastGlobalValueID = 0; + +  OrderMap() = default; + +  bool isGlobalConstant(unsigned ID) const { +    return ID <= LastGlobalConstantID; +  } + +  bool isGlobalValue(unsigned ID) const { +    return ID <= LastGlobalValueID && !isGlobalConstant(ID); +  } + +  unsigned size() const { return IDs.size(); } +  std::pair<unsigned, bool> &operator[](const Value *V) { return IDs[V]; } + +  std::pair<unsigned, bool> lookup(const Value *V) const { +    return IDs.lookup(V); +  } + +  void index(const Value *V) { +    // Explicitly sequence get-size and insert-value operations to avoid UB. +    unsigned ID = IDs.size() + 1; +    IDs[V].first = ID; +  } +}; + +} // end anonymous namespace + +static void orderValue(const Value *V, OrderMap &OM) { +  if (OM.lookup(V).first) +    return; + +  if (const Constant *C = dyn_cast<Constant>(V)) +    if (C->getNumOperands() && !isa<GlobalValue>(C)) +      for (const Value *Op : C->operands()) +        if (!isa<BasicBlock>(Op) && !isa<GlobalValue>(Op)) +          orderValue(Op, OM); + +  // Note: we cannot cache this lookup above, since inserting into the map +  // changes the map's size, and thus affects the other IDs. +  OM.index(V); +} + +static OrderMap orderModule(const Module &M) { +  // This needs to match the order used by ValueEnumerator::ValueEnumerator() +  // and ValueEnumerator::incorporateFunction(). +  OrderMap OM; + +  // In the reader, initializers of GlobalValues are set *after* all the +  // globals have been read.  Rather than awkwardly modeling this behaviour +  // directly in predictValueUseListOrderImpl(), just assign IDs to +  // initializers of GlobalValues before GlobalValues themselves to model this +  // implicitly. +  for (const GlobalVariable &G : M.globals()) +    if (G.hasInitializer()) +      if (!isa<GlobalValue>(G.getInitializer())) +        orderValue(G.getInitializer(), OM); +  for (const GlobalAlias &A : M.aliases()) +    if (!isa<GlobalValue>(A.getAliasee())) +      orderValue(A.getAliasee(), OM); +  for (const GlobalIFunc &I : M.ifuncs()) +    if (!isa<GlobalValue>(I.getResolver())) +      orderValue(I.getResolver(), OM); +  for (const Function &F : M) { +    for (const Use &U : F.operands()) +      if (!isa<GlobalValue>(U.get())) +        orderValue(U.get(), OM); +  } +  OM.LastGlobalConstantID = OM.size(); + +  // Initializers of GlobalValues are processed in +  // BitcodeReader::ResolveGlobalAndAliasInits().  Match the order there rather +  // than ValueEnumerator, and match the code in predictValueUseListOrderImpl() +  // by giving IDs in reverse order. +  // +  // Since GlobalValues never reference each other directly (just through +  // initializers), their relative IDs only matter for determining order of +  // uses in their initializers. +  for (const Function &F : M) +    orderValue(&F, OM); +  for (const GlobalAlias &A : M.aliases()) +    orderValue(&A, OM); +  for (const GlobalIFunc &I : M.ifuncs()) +    orderValue(&I, OM); +  for (const GlobalVariable &G : M.globals()) +    orderValue(&G, OM); +  OM.LastGlobalValueID = OM.size(); + +  for (const Function &F : M) { +    if (F.isDeclaration()) +      continue; +    // Here we need to match the union of ValueEnumerator::incorporateFunction() +    // and WriteFunction().  Basic blocks are implicitly declared before +    // anything else (by declaring their size). +    for (const BasicBlock &BB : F) +      orderValue(&BB, OM); +    for (const Argument &A : F.args()) +      orderValue(&A, OM); +    for (const BasicBlock &BB : F) +      for (const Instruction &I : BB) +        for (const Value *Op : I.operands()) +          if ((isa<Constant>(*Op) && !isa<GlobalValue>(*Op)) || +              isa<InlineAsm>(*Op)) +            orderValue(Op, OM); +    for (const BasicBlock &BB : F) +      for (const Instruction &I : BB) +        orderValue(&I, OM); +  } +  return OM; +} + +static void predictValueUseListOrderImpl(const Value *V, const Function *F, +                                         unsigned ID, const OrderMap &OM, +                                         UseListOrderStack &Stack) { +  // Predict use-list order for this one. +  using Entry = std::pair<const Use *, unsigned>; +  SmallVector<Entry, 64> List; +  for (const Use &U : V->uses()) +    // Check if this user will be serialized. +    if (OM.lookup(U.getUser()).first) +      List.push_back(std::make_pair(&U, List.size())); + +  if (List.size() < 2) +    // We may have lost some users. +    return; + +  bool IsGlobalValue = OM.isGlobalValue(ID); +  llvm::sort(List.begin(), List.end(), [&](const Entry &L, const Entry &R) { +    const Use *LU = L.first; +    const Use *RU = R.first; +    if (LU == RU) +      return false; + +    auto LID = OM.lookup(LU->getUser()).first; +    auto RID = OM.lookup(RU->getUser()).first; + +    // Global values are processed in reverse order. +    // +    // Moreover, initializers of GlobalValues are set *after* all the globals +    // have been read (despite having earlier IDs).  Rather than awkwardly +    // modeling this behaviour here, orderModule() has assigned IDs to +    // initializers of GlobalValues before GlobalValues themselves. +    if (OM.isGlobalValue(LID) && OM.isGlobalValue(RID)) +      return LID < RID; + +    // If ID is 4, then expect: 7 6 5 1 2 3. +    if (LID < RID) { +      if (RID <= ID) +        if (!IsGlobalValue) // GlobalValue uses don't get reversed. +          return true; +      return false; +    } +    if (RID < LID) { +      if (LID <= ID) +        if (!IsGlobalValue) // GlobalValue uses don't get reversed. +          return false; +      return true; +    } + +    // LID and RID are equal, so we have different operands of the same user. +    // Assume operands are added in order for all instructions. +    if (LID <= ID) +      if (!IsGlobalValue) // GlobalValue uses don't get reversed. +        return LU->getOperandNo() < RU->getOperandNo(); +    return LU->getOperandNo() > RU->getOperandNo(); +  }); + +  if (std::is_sorted( +          List.begin(), List.end(), +          [](const Entry &L, const Entry &R) { return L.second < R.second; })) +    // Order is already correct. +    return; + +  // Store the shuffle. +  Stack.emplace_back(V, F, List.size()); +  assert(List.size() == Stack.back().Shuffle.size() && "Wrong size"); +  for (size_t I = 0, E = List.size(); I != E; ++I) +    Stack.back().Shuffle[I] = List[I].second; +} + +static void predictValueUseListOrder(const Value *V, const Function *F, +                                     OrderMap &OM, UseListOrderStack &Stack) { +  auto &IDPair = OM[V]; +  assert(IDPair.first && "Unmapped value"); +  if (IDPair.second) +    // Already predicted. +    return; + +  // Do the actual prediction. +  IDPair.second = true; +  if (!V->use_empty() && std::next(V->use_begin()) != V->use_end()) +    predictValueUseListOrderImpl(V, F, IDPair.first, OM, Stack); + +  // Recursive descent into constants. +  if (const Constant *C = dyn_cast<Constant>(V)) +    if (C->getNumOperands()) // Visit GlobalValues. +      for (const Value *Op : C->operands()) +        if (isa<Constant>(Op)) // Visit GlobalValues. +          predictValueUseListOrder(Op, F, OM, Stack); +} + +static UseListOrderStack predictUseListOrder(const Module &M) { +  OrderMap OM = orderModule(M); + +  // Use-list orders need to be serialized after all the users have been added +  // to a value, or else the shuffles will be incomplete.  Store them per +  // function in a stack. +  // +  // Aside from function order, the order of values doesn't matter much here. +  UseListOrderStack Stack; + +  // We want to visit the functions backward now so we can list function-local +  // constants in the last Function they're used in.  Module-level constants +  // have already been visited above. +  for (auto I = M.rbegin(), E = M.rend(); I != E; ++I) { +    const Function &F = *I; +    if (F.isDeclaration()) +      continue; +    for (const BasicBlock &BB : F) +      predictValueUseListOrder(&BB, &F, OM, Stack); +    for (const Argument &A : F.args()) +      predictValueUseListOrder(&A, &F, OM, Stack); +    for (const BasicBlock &BB : F) +      for (const Instruction &I : BB) +        for (const Value *Op : I.operands()) +          if (isa<Constant>(*Op) || isa<InlineAsm>(*Op)) // Visit GlobalValues. +            predictValueUseListOrder(Op, &F, OM, Stack); +    for (const BasicBlock &BB : F) +      for (const Instruction &I : BB) +        predictValueUseListOrder(&I, &F, OM, Stack); +  } + +  // Visit globals last, since the module-level use-list block will be seen +  // before the function bodies are processed. +  for (const GlobalVariable &G : M.globals()) +    predictValueUseListOrder(&G, nullptr, OM, Stack); +  for (const Function &F : M) +    predictValueUseListOrder(&F, nullptr, OM, Stack); +  for (const GlobalAlias &A : M.aliases()) +    predictValueUseListOrder(&A, nullptr, OM, Stack); +  for (const GlobalIFunc &I : M.ifuncs()) +    predictValueUseListOrder(&I, nullptr, OM, Stack); +  for (const GlobalVariable &G : M.globals()) +    if (G.hasInitializer()) +      predictValueUseListOrder(G.getInitializer(), nullptr, OM, Stack); +  for (const GlobalAlias &A : M.aliases()) +    predictValueUseListOrder(A.getAliasee(), nullptr, OM, Stack); +  for (const GlobalIFunc &I : M.ifuncs()) +    predictValueUseListOrder(I.getResolver(), nullptr, OM, Stack); +  for (const Function &F : M) { +    for (const Use &U : F.operands()) +      predictValueUseListOrder(U.get(), nullptr, OM, Stack); +  } + +  return Stack; +} + +static bool isIntOrIntVectorValue(const std::pair<const Value*, unsigned> &V) { +  return V.first->getType()->isIntOrIntVectorTy(); +} + +ValueEnumerator::ValueEnumerator(const Module &M, +                                 bool ShouldPreserveUseListOrder) +    : ShouldPreserveUseListOrder(ShouldPreserveUseListOrder) { +  if (ShouldPreserveUseListOrder) +    UseListOrders = predictUseListOrder(M); + +  // Enumerate the global variables. +  for (const GlobalVariable &GV : M.globals()) +    EnumerateValue(&GV); + +  // Enumerate the functions. +  for (const Function & F : M) { +    EnumerateValue(&F); +    EnumerateAttributes(F.getAttributes()); +  } + +  // Enumerate the aliases. +  for (const GlobalAlias &GA : M.aliases()) +    EnumerateValue(&GA); + +  // Enumerate the ifuncs. +  for (const GlobalIFunc &GIF : M.ifuncs()) +    EnumerateValue(&GIF); + +  // Remember what is the cutoff between globalvalue's and other constants. +  unsigned FirstConstant = Values.size(); + +  // Enumerate the global variable initializers and attributes. +  for (const GlobalVariable &GV : M.globals()) { +    if (GV.hasInitializer()) +      EnumerateValue(GV.getInitializer()); +    if (GV.hasAttributes()) +      EnumerateAttributes(GV.getAttributesAsList(AttributeList::FunctionIndex)); +  } + +  // Enumerate the aliasees. +  for (const GlobalAlias &GA : M.aliases()) +    EnumerateValue(GA.getAliasee()); + +  // Enumerate the ifunc resolvers. +  for (const GlobalIFunc &GIF : M.ifuncs()) +    EnumerateValue(GIF.getResolver()); + +  // Enumerate any optional Function data. +  for (const Function &F : M) +    for (const Use &U : F.operands()) +      EnumerateValue(U.get()); + +  // Enumerate the metadata type. +  // +  // TODO: Move this to ValueEnumerator::EnumerateOperandType() once bitcode +  // only encodes the metadata type when it's used as a value. +  EnumerateType(Type::getMetadataTy(M.getContext())); + +  // Insert constants and metadata that are named at module level into the slot +  // pool so that the module symbol table can refer to them... +  EnumerateValueSymbolTable(M.getValueSymbolTable()); +  EnumerateNamedMetadata(M); + +  SmallVector<std::pair<unsigned, MDNode *>, 8> MDs; +  for (const GlobalVariable &GV : M.globals()) { +    MDs.clear(); +    GV.getAllMetadata(MDs); +    for (const auto &I : MDs) +      // FIXME: Pass GV to EnumerateMetadata and arrange for the bitcode writer +      // to write metadata to the global variable's own metadata block +      // (PR28134). +      EnumerateMetadata(nullptr, I.second); +  } + +  // Enumerate types used by function bodies and argument lists. +  for (const Function &F : M) { +    for (const Argument &A : F.args()) +      EnumerateType(A.getType()); + +    // Enumerate metadata attached to this function. +    MDs.clear(); +    F.getAllMetadata(MDs); +    for (const auto &I : MDs) +      EnumerateMetadata(F.isDeclaration() ? nullptr : &F, I.second); + +    for (const BasicBlock &BB : F) +      for (const Instruction &I : BB) { +        for (const Use &Op : I.operands()) { +          auto *MD = dyn_cast<MetadataAsValue>(&Op); +          if (!MD) { +            EnumerateOperandType(Op); +            continue; +          } + +          // Local metadata is enumerated during function-incorporation. +          if (isa<LocalAsMetadata>(MD->getMetadata())) +            continue; + +          EnumerateMetadata(&F, MD->getMetadata()); +        } +        EnumerateType(I.getType()); +        if (const CallInst *CI = dyn_cast<CallInst>(&I)) +          EnumerateAttributes(CI->getAttributes()); +        else if (const InvokeInst *II = dyn_cast<InvokeInst>(&I)) +          EnumerateAttributes(II->getAttributes()); + +        // Enumerate metadata attached with this instruction. +        MDs.clear(); +        I.getAllMetadataOtherThanDebugLoc(MDs); +        for (unsigned i = 0, e = MDs.size(); i != e; ++i) +          EnumerateMetadata(&F, MDs[i].second); + +        // Don't enumerate the location directly -- it has a special record +        // type -- but enumerate its operands. +        if (DILocation *L = I.getDebugLoc()) +          for (const Metadata *Op : L->operands()) +            EnumerateMetadata(&F, Op); +      } +  } + +  // Optimize constant ordering. +  OptimizeConstants(FirstConstant, Values.size()); + +  // Organize metadata ordering. +  organizeMetadata(); +} + +unsigned ValueEnumerator::getInstructionID(const Instruction *Inst) const { +  InstructionMapType::const_iterator I = InstructionMap.find(Inst); +  assert(I != InstructionMap.end() && "Instruction is not mapped!"); +  return I->second; +} + +unsigned ValueEnumerator::getComdatID(const Comdat *C) const { +  unsigned ComdatID = Comdats.idFor(C); +  assert(ComdatID && "Comdat not found!"); +  return ComdatID; +} + +void ValueEnumerator::setInstructionID(const Instruction *I) { +  InstructionMap[I] = InstructionCount++; +} + +unsigned ValueEnumerator::getValueID(const Value *V) const { +  if (auto *MD = dyn_cast<MetadataAsValue>(V)) +    return getMetadataID(MD->getMetadata()); + +  ValueMapType::const_iterator I = ValueMap.find(V); +  assert(I != ValueMap.end() && "Value not in slotcalculator!"); +  return I->second-1; +} + +#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) +LLVM_DUMP_METHOD void ValueEnumerator::dump() const { +  print(dbgs(), ValueMap, "Default"); +  dbgs() << '\n'; +  print(dbgs(), MetadataMap, "MetaData"); +  dbgs() << '\n'; +} +#endif + +void ValueEnumerator::print(raw_ostream &OS, const ValueMapType &Map, +                            const char *Name) const { +  OS << "Map Name: " << Name << "\n"; +  OS << "Size: " << Map.size() << "\n"; +  for (ValueMapType::const_iterator I = Map.begin(), +         E = Map.end(); I != E; ++I) { +    const Value *V = I->first; +    if (V->hasName()) +      OS << "Value: " << V->getName(); +    else +      OS << "Value: [null]\n"; +    V->print(errs()); +    errs() << '\n'; + +    OS << " Uses(" << V->getNumUses() << "):"; +    for (const Use &U : V->uses()) { +      if (&U != &*V->use_begin()) +        OS << ","; +      if(U->hasName()) +        OS << " " << U->getName(); +      else +        OS << " [null]"; + +    } +    OS <<  "\n\n"; +  } +} + +void ValueEnumerator::print(raw_ostream &OS, const MetadataMapType &Map, +                            const char *Name) const { +  OS << "Map Name: " << Name << "\n"; +  OS << "Size: " << Map.size() << "\n"; +  for (auto I = Map.begin(), E = Map.end(); I != E; ++I) { +    const Metadata *MD = I->first; +    OS << "Metadata: slot = " << I->second.ID << "\n"; +    OS << "Metadata: function = " << I->second.F << "\n"; +    MD->print(OS); +    OS << "\n"; +  } +} + +/// OptimizeConstants - Reorder constant pool for denser encoding. +void ValueEnumerator::OptimizeConstants(unsigned CstStart, unsigned CstEnd) { +  if (CstStart == CstEnd || CstStart+1 == CstEnd) return; + +  if (ShouldPreserveUseListOrder) +    // Optimizing constants makes the use-list order difficult to predict. +    // Disable it for now when trying to preserve the order. +    return; + +  std::stable_sort(Values.begin() + CstStart, Values.begin() + CstEnd, +                   [this](const std::pair<const Value *, unsigned> &LHS, +                          const std::pair<const Value *, unsigned> &RHS) { +    // Sort by plane. +    if (LHS.first->getType() != RHS.first->getType()) +      return getTypeID(LHS.first->getType()) < getTypeID(RHS.first->getType()); +    // Then by frequency. +    return LHS.second > RHS.second; +  }); + +  // Ensure that integer and vector of integer constants are at the start of the +  // constant pool.  This is important so that GEP structure indices come before +  // gep constant exprs. +  std::stable_partition(Values.begin() + CstStart, Values.begin() + CstEnd, +                        isIntOrIntVectorValue); + +  // Rebuild the modified portion of ValueMap. +  for (; CstStart != CstEnd; ++CstStart) +    ValueMap[Values[CstStart].first] = CstStart+1; +} + +/// EnumerateValueSymbolTable - Insert all of the values in the specified symbol +/// table into the values table. +void ValueEnumerator::EnumerateValueSymbolTable(const ValueSymbolTable &VST) { +  for (ValueSymbolTable::const_iterator VI = VST.begin(), VE = VST.end(); +       VI != VE; ++VI) +    EnumerateValue(VI->getValue()); +} + +/// Insert all of the values referenced by named metadata in the specified +/// module. +void ValueEnumerator::EnumerateNamedMetadata(const Module &M) { +  for (const auto &I : M.named_metadata()) +    EnumerateNamedMDNode(&I); +} + +void ValueEnumerator::EnumerateNamedMDNode(const NamedMDNode *MD) { +  for (unsigned i = 0, e = MD->getNumOperands(); i != e; ++i) +    EnumerateMetadata(nullptr, MD->getOperand(i)); +} + +unsigned ValueEnumerator::getMetadataFunctionID(const Function *F) const { +  return F ? getValueID(F) + 1 : 0; +} + +void ValueEnumerator::EnumerateMetadata(const Function *F, const Metadata *MD) { +  EnumerateMetadata(getMetadataFunctionID(F), MD); +} + +void ValueEnumerator::EnumerateFunctionLocalMetadata( +    const Function &F, const LocalAsMetadata *Local) { +  EnumerateFunctionLocalMetadata(getMetadataFunctionID(&F), Local); +} + +void ValueEnumerator::dropFunctionFromMetadata( +    MetadataMapType::value_type &FirstMD) { +  SmallVector<const MDNode *, 64> Worklist; +  auto push = [&Worklist](MetadataMapType::value_type &MD) { +    auto &Entry = MD.second; + +    // Nothing to do if this metadata isn't tagged. +    if (!Entry.F) +      return; + +    // Drop the function tag. +    Entry.F = 0; + +    // If this is has an ID and is an MDNode, then its operands have entries as +    // well.  We need to drop the function from them too. +    if (Entry.ID) +      if (auto *N = dyn_cast<MDNode>(MD.first)) +        Worklist.push_back(N); +  }; +  push(FirstMD); +  while (!Worklist.empty()) +    for (const Metadata *Op : Worklist.pop_back_val()->operands()) { +      if (!Op) +        continue; +      auto MD = MetadataMap.find(Op); +      if (MD != MetadataMap.end()) +        push(*MD); +    } +} + +void ValueEnumerator::EnumerateMetadata(unsigned F, const Metadata *MD) { +  // It's vital for reader efficiency that uniqued subgraphs are done in +  // post-order; it's expensive when their operands have forward references. +  // If a distinct node is referenced from a uniqued node, it'll be delayed +  // until the uniqued subgraph has been completely traversed. +  SmallVector<const MDNode *, 32> DelayedDistinctNodes; + +  // Start by enumerating MD, and then work through its transitive operands in +  // post-order.  This requires a depth-first search. +  SmallVector<std::pair<const MDNode *, MDNode::op_iterator>, 32> Worklist; +  if (const MDNode *N = enumerateMetadataImpl(F, MD)) +    Worklist.push_back(std::make_pair(N, N->op_begin())); + +  while (!Worklist.empty()) { +    const MDNode *N = Worklist.back().first; + +    // Enumerate operands until we hit a new node.  We need to traverse these +    // nodes' operands before visiting the rest of N's operands. +    MDNode::op_iterator I = std::find_if( +        Worklist.back().second, N->op_end(), +        [&](const Metadata *MD) { return enumerateMetadataImpl(F, MD); }); +    if (I != N->op_end()) { +      auto *Op = cast<MDNode>(*I); +      Worklist.back().second = ++I; + +      // Delay traversing Op if it's a distinct node and N is uniqued. +      if (Op->isDistinct() && !N->isDistinct()) +        DelayedDistinctNodes.push_back(Op); +      else +        Worklist.push_back(std::make_pair(Op, Op->op_begin())); +      continue; +    } + +    // All the operands have been visited.  Now assign an ID. +    Worklist.pop_back(); +    MDs.push_back(N); +    MetadataMap[N].ID = MDs.size(); + +    // Flush out any delayed distinct nodes; these are all the distinct nodes +    // that are leaves in last uniqued subgraph. +    if (Worklist.empty() || Worklist.back().first->isDistinct()) { +      for (const MDNode *N : DelayedDistinctNodes) +        Worklist.push_back(std::make_pair(N, N->op_begin())); +      DelayedDistinctNodes.clear(); +    } +  } +} + +const MDNode *ValueEnumerator::enumerateMetadataImpl(unsigned F, const Metadata *MD) { +  if (!MD) +    return nullptr; + +  assert( +      (isa<MDNode>(MD) || isa<MDString>(MD) || isa<ConstantAsMetadata>(MD)) && +      "Invalid metadata kind"); + +  auto Insertion = MetadataMap.insert(std::make_pair(MD, MDIndex(F))); +  MDIndex &Entry = Insertion.first->second; +  if (!Insertion.second) { +    // Already mapped.  If F doesn't match the function tag, drop it. +    if (Entry.hasDifferentFunction(F)) +      dropFunctionFromMetadata(*Insertion.first); +    return nullptr; +  } + +  // Don't assign IDs to metadata nodes. +  if (auto *N = dyn_cast<MDNode>(MD)) +    return N; + +  // Save the metadata. +  MDs.push_back(MD); +  Entry.ID = MDs.size(); + +  // Enumerate the constant, if any. +  if (auto *C = dyn_cast<ConstantAsMetadata>(MD)) +    EnumerateValue(C->getValue()); + +  return nullptr; +} + +/// EnumerateFunctionLocalMetadataa - Incorporate function-local metadata +/// information reachable from the metadata. +void ValueEnumerator::EnumerateFunctionLocalMetadata( +    unsigned F, const LocalAsMetadata *Local) { +  assert(F && "Expected a function"); + +  // Check to see if it's already in! +  MDIndex &Index = MetadataMap[Local]; +  if (Index.ID) { +    assert(Index.F == F && "Expected the same function"); +    return; +  } + +  MDs.push_back(Local); +  Index.F = F; +  Index.ID = MDs.size(); + +  EnumerateValue(Local->getValue()); +} + +static unsigned getMetadataTypeOrder(const Metadata *MD) { +  // Strings are emitted in bulk and must come first. +  if (isa<MDString>(MD)) +    return 0; + +  // ConstantAsMetadata doesn't reference anything.  We may as well shuffle it +  // to the front since we can detect it. +  auto *N = dyn_cast<MDNode>(MD); +  if (!N) +    return 1; + +  // The reader is fast forward references for distinct node operands, but slow +  // when uniqued operands are unresolved. +  return N->isDistinct() ? 2 : 3; +} + +void ValueEnumerator::organizeMetadata() { +  assert(MetadataMap.size() == MDs.size() && +         "Metadata map and vector out of sync"); + +  if (MDs.empty()) +    return; + +  // Copy out the index information from MetadataMap in order to choose a new +  // order. +  SmallVector<MDIndex, 64> Order; +  Order.reserve(MetadataMap.size()); +  for (const Metadata *MD : MDs) +    Order.push_back(MetadataMap.lookup(MD)); + +  // Partition: +  //   - by function, then +  //   - by isa<MDString> +  // and then sort by the original/current ID.  Since the IDs are guaranteed to +  // be unique, the result of std::sort will be deterministic.  There's no need +  // for std::stable_sort. +  llvm::sort(Order.begin(), Order.end(), [this](MDIndex LHS, MDIndex RHS) { +    return std::make_tuple(LHS.F, getMetadataTypeOrder(LHS.get(MDs)), LHS.ID) < +           std::make_tuple(RHS.F, getMetadataTypeOrder(RHS.get(MDs)), RHS.ID); +  }); + +  // Rebuild MDs, index the metadata ranges for each function in FunctionMDs, +  // and fix up MetadataMap. +  std::vector<const Metadata *> OldMDs = std::move(MDs); +  MDs.reserve(OldMDs.size()); +  for (unsigned I = 0, E = Order.size(); I != E && !Order[I].F; ++I) { +    auto *MD = Order[I].get(OldMDs); +    MDs.push_back(MD); +    MetadataMap[MD].ID = I + 1; +    if (isa<MDString>(MD)) +      ++NumMDStrings; +  } + +  // Return early if there's nothing for the functions. +  if (MDs.size() == Order.size()) +    return; + +  // Build the function metadata ranges. +  MDRange R; +  FunctionMDs.reserve(OldMDs.size()); +  unsigned PrevF = 0; +  for (unsigned I = MDs.size(), E = Order.size(), ID = MDs.size(); I != E; +       ++I) { +    unsigned F = Order[I].F; +    if (!PrevF) { +      PrevF = F; +    } else if (PrevF != F) { +      R.Last = FunctionMDs.size(); +      std::swap(R, FunctionMDInfo[PrevF]); +      R.First = FunctionMDs.size(); + +      ID = MDs.size(); +      PrevF = F; +    } + +    auto *MD = Order[I].get(OldMDs); +    FunctionMDs.push_back(MD); +    MetadataMap[MD].ID = ++ID; +    if (isa<MDString>(MD)) +      ++R.NumStrings; +  } +  R.Last = FunctionMDs.size(); +  FunctionMDInfo[PrevF] = R; +} + +void ValueEnumerator::incorporateFunctionMetadata(const Function &F) { +  NumModuleMDs = MDs.size(); + +  auto R = FunctionMDInfo.lookup(getValueID(&F) + 1); +  NumMDStrings = R.NumStrings; +  MDs.insert(MDs.end(), FunctionMDs.begin() + R.First, +             FunctionMDs.begin() + R.Last); +} + +void ValueEnumerator::EnumerateValue(const Value *V) { +  assert(!V->getType()->isVoidTy() && "Can't insert void values!"); +  assert(!isa<MetadataAsValue>(V) && "EnumerateValue doesn't handle Metadata!"); + +  // Check to see if it's already in! +  unsigned &ValueID = ValueMap[V]; +  if (ValueID) { +    // Increment use count. +    Values[ValueID-1].second++; +    return; +  } + +  if (auto *GO = dyn_cast<GlobalObject>(V)) +    if (const Comdat *C = GO->getComdat()) +      Comdats.insert(C); + +  // Enumerate the type of this value. +  EnumerateType(V->getType()); + +  if (const Constant *C = dyn_cast<Constant>(V)) { +    if (isa<GlobalValue>(C)) { +      // Initializers for globals are handled explicitly elsewhere. +    } else if (C->getNumOperands()) { +      // If a constant has operands, enumerate them.  This makes sure that if a +      // constant has uses (for example an array of const ints), that they are +      // inserted also. + +      // We prefer to enumerate them with values before we enumerate the user +      // itself.  This makes it more likely that we can avoid forward references +      // in the reader.  We know that there can be no cycles in the constants +      // graph that don't go through a global variable. +      for (User::const_op_iterator I = C->op_begin(), E = C->op_end(); +           I != E; ++I) +        if (!isa<BasicBlock>(*I)) // Don't enumerate BB operand to BlockAddress. +          EnumerateValue(*I); + +      // Finally, add the value.  Doing this could make the ValueID reference be +      // dangling, don't reuse it. +      Values.push_back(std::make_pair(V, 1U)); +      ValueMap[V] = Values.size(); +      return; +    } +  } + +  // Add the value. +  Values.push_back(std::make_pair(V, 1U)); +  ValueID = Values.size(); +} + + +void ValueEnumerator::EnumerateType(Type *Ty) { +  unsigned *TypeID = &TypeMap[Ty]; + +  // We've already seen this type. +  if (*TypeID) +    return; + +  // If it is a non-anonymous struct, mark the type as being visited so that we +  // don't recursively visit it.  This is safe because we allow forward +  // references of these in the bitcode reader. +  if (StructType *STy = dyn_cast<StructType>(Ty)) +    if (!STy->isLiteral()) +      *TypeID = ~0U; + +  // Enumerate all of the subtypes before we enumerate this type.  This ensures +  // that the type will be enumerated in an order that can be directly built. +  for (Type *SubTy : Ty->subtypes()) +    EnumerateType(SubTy); + +  // Refresh the TypeID pointer in case the table rehashed. +  TypeID = &TypeMap[Ty]; + +  // Check to see if we got the pointer another way.  This can happen when +  // enumerating recursive types that hit the base case deeper than they start. +  // +  // If this is actually a struct that we are treating as forward ref'able, +  // then emit the definition now that all of its contents are available. +  if (*TypeID && *TypeID != ~0U) +    return; + +  // Add this type now that its contents are all happily enumerated. +  Types.push_back(Ty); + +  *TypeID = Types.size(); +} + +// Enumerate the types for the specified value.  If the value is a constant, +// walk through it, enumerating the types of the constant. +void ValueEnumerator::EnumerateOperandType(const Value *V) { +  EnumerateType(V->getType()); + +  assert(!isa<MetadataAsValue>(V) && "Unexpected metadata operand"); + +  const Constant *C = dyn_cast<Constant>(V); +  if (!C) +    return; + +  // If this constant is already enumerated, ignore it, we know its type must +  // be enumerated. +  if (ValueMap.count(C)) +    return; + +  // This constant may have operands, make sure to enumerate the types in +  // them. +  for (const Value *Op : C->operands()) { +    // Don't enumerate basic blocks here, this happens as operands to +    // blockaddress. +    if (isa<BasicBlock>(Op)) +      continue; + +    EnumerateOperandType(Op); +  } +} + +void ValueEnumerator::EnumerateAttributes(AttributeList PAL) { +  if (PAL.isEmpty()) return;  // null is always 0. + +  // Do a lookup. +  unsigned &Entry = AttributeListMap[PAL]; +  if (Entry == 0) { +    // Never saw this before, add it. +    AttributeLists.push_back(PAL); +    Entry = AttributeLists.size(); +  } + +  // Do lookups for all attribute groups. +  for (unsigned i = PAL.index_begin(), e = PAL.index_end(); i != e; ++i) { +    AttributeSet AS = PAL.getAttributes(i); +    if (!AS.hasAttributes()) +      continue; +    IndexAndAttrSet Pair = {i, AS}; +    unsigned &Entry = AttributeGroupMap[Pair]; +    if (Entry == 0) { +      AttributeGroups.push_back(Pair); +      Entry = AttributeGroups.size(); +    } +  } +} + +void ValueEnumerator::incorporateFunction(const Function &F) { +  InstructionCount = 0; +  NumModuleValues = Values.size(); + +  // Add global metadata to the function block.  This doesn't include +  // LocalAsMetadata. +  incorporateFunctionMetadata(F); + +  // Adding function arguments to the value table. +  for (const auto &I : F.args()) +    EnumerateValue(&I); + +  FirstFuncConstantID = Values.size(); + +  // Add all function-level constants to the value table. +  for (const BasicBlock &BB : F) { +    for (const Instruction &I : BB) +      for (const Use &OI : I.operands()) { +        if ((isa<Constant>(OI) && !isa<GlobalValue>(OI)) || isa<InlineAsm>(OI)) +          EnumerateValue(OI); +      } +    BasicBlocks.push_back(&BB); +    ValueMap[&BB] = BasicBlocks.size(); +  } + +  // Optimize the constant layout. +  OptimizeConstants(FirstFuncConstantID, Values.size()); + +  // Add the function's parameter attributes so they are available for use in +  // the function's instruction. +  EnumerateAttributes(F.getAttributes()); + +  FirstInstID = Values.size(); + +  SmallVector<LocalAsMetadata *, 8> FnLocalMDVector; +  // Add all of the instructions. +  for (const BasicBlock &BB : F) { +    for (const Instruction &I : BB) { +      for (const Use &OI : I.operands()) { +        if (auto *MD = dyn_cast<MetadataAsValue>(&OI)) +          if (auto *Local = dyn_cast<LocalAsMetadata>(MD->getMetadata())) +            // Enumerate metadata after the instructions they might refer to. +            FnLocalMDVector.push_back(Local); +      } + +      if (!I.getType()->isVoidTy()) +        EnumerateValue(&I); +    } +  } + +  // Add all of the function-local metadata. +  for (unsigned i = 0, e = FnLocalMDVector.size(); i != e; ++i) { +    // At this point, every local values have been incorporated, we shouldn't +    // have a metadata operand that references a value that hasn't been seen. +    assert(ValueMap.count(FnLocalMDVector[i]->getValue()) && +           "Missing value for metadata operand"); +    EnumerateFunctionLocalMetadata(F, FnLocalMDVector[i]); +  } +} + +void ValueEnumerator::purgeFunction() { +  /// Remove purged values from the ValueMap. +  for (unsigned i = NumModuleValues, e = Values.size(); i != e; ++i) +    ValueMap.erase(Values[i].first); +  for (unsigned i = NumModuleMDs, e = MDs.size(); i != e; ++i) +    MetadataMap.erase(MDs[i]); +  for (unsigned i = 0, e = BasicBlocks.size(); i != e; ++i) +    ValueMap.erase(BasicBlocks[i]); + +  Values.resize(NumModuleValues); +  MDs.resize(NumModuleMDs); +  BasicBlocks.clear(); +  NumMDStrings = 0; +} + +static void IncorporateFunctionInfoGlobalBBIDs(const Function *F, +                                 DenseMap<const BasicBlock*, unsigned> &IDMap) { +  unsigned Counter = 0; +  for (const BasicBlock &BB : *F) +    IDMap[&BB] = ++Counter; +} + +/// getGlobalBasicBlockID - This returns the function-specific ID for the +/// specified basic block.  This is relatively expensive information, so it +/// should only be used by rare constructs such as address-of-label. +unsigned ValueEnumerator::getGlobalBasicBlockID(const BasicBlock *BB) const { +  unsigned &Idx = GlobalBasicBlockIDs[BB]; +  if (Idx != 0) +    return Idx-1; + +  IncorporateFunctionInfoGlobalBBIDs(BB->getParent(), GlobalBasicBlockIDs); +  return getGlobalBasicBlockID(BB); +} + +uint64_t ValueEnumerator::computeBitsRequiredForTypeIndicies() const { +  return Log2_32_Ceil(getTypes().size() + 1); +} diff --git a/contrib/llvm/lib/Bitcode/Writer/ValueEnumerator.h b/contrib/llvm/lib/Bitcode/Writer/ValueEnumerator.h new file mode 100644 index 000000000000..011356c32601 --- /dev/null +++ b/contrib/llvm/lib/Bitcode/Writer/ValueEnumerator.h @@ -0,0 +1,304 @@ +//===- Bitcode/Writer/ValueEnumerator.h - Number values ---------*- C++ -*-===// +// +//                     The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This class gives values and types Unique ID's. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_LIB_BITCODE_WRITER_VALUEENUMERATOR_H +#define LLVM_LIB_BITCODE_WRITER_VALUEENUMERATOR_H + +#include "llvm/ADT/ArrayRef.h" +#include "llvm/ADT/DenseMap.h" +#include "llvm/ADT/UniqueVector.h" +#include "llvm/IR/Attributes.h" +#include "llvm/IR/Metadata.h" +#include "llvm/IR/Type.h" +#include "llvm/IR/UseListOrder.h" +#include <cassert> +#include <cstdint> +#include <utility> +#include <vector> + +namespace llvm { + +class BasicBlock; +class Comdat; +class Function; +class Instruction; +class LocalAsMetadata; +class MDNode; +class Metadata; +class Module; +class NamedMDNode; +class raw_ostream; +class Type; +class Value; +class ValueSymbolTable; + +class ValueEnumerator { +public: +  using TypeList = std::vector<Type *>; + +  // For each value, we remember its Value* and occurrence frequency. +  using ValueList = std::vector<std::pair<const Value *, unsigned>>; + +  /// Attribute groups as encoded in bitcode are almost AttributeSets, but they +  /// include the AttributeList index, so we have to track that in our map. +  using IndexAndAttrSet = std::pair<unsigned, AttributeSet>; + +  UseListOrderStack UseListOrders; + +private: +  using TypeMapType = DenseMap<Type *, unsigned>; +  TypeMapType TypeMap; +  TypeList Types; + +  using ValueMapType = DenseMap<const Value *, unsigned>; +  ValueMapType ValueMap; +  ValueList Values; + +  using ComdatSetType = UniqueVector<const Comdat *>; +  ComdatSetType Comdats; + +  std::vector<const Metadata *> MDs; +  std::vector<const Metadata *> FunctionMDs; + +  /// Index of information about a piece of metadata. +  struct MDIndex { +    unsigned F = 0;  ///< The ID of the function for this metadata, if any. +    unsigned ID = 0; ///< The implicit ID of this metadata in bitcode. + +    MDIndex() = default; +    explicit MDIndex(unsigned F) : F(F) {} + +    /// Check if this has a function tag, and it's different from NewF. +    bool hasDifferentFunction(unsigned NewF) const { return F && F != NewF; } + +    /// Fetch the MD this references out of the given metadata array. +    const Metadata *get(ArrayRef<const Metadata *> MDs) const { +      assert(ID && "Expected non-zero ID"); +      assert(ID <= MDs.size() && "Expected valid ID"); +      return MDs[ID - 1]; +    } +  }; + +  using MetadataMapType = DenseMap<const Metadata *, MDIndex>; +  MetadataMapType MetadataMap; + +  /// Range of metadata IDs, as a half-open range. +  struct MDRange { +    unsigned First = 0; +    unsigned Last = 0; + +    /// Number of strings in the prefix of the metadata range. +    unsigned NumStrings = 0; + +    MDRange() = default; +    explicit MDRange(unsigned First) : First(First) {} +  }; +  SmallDenseMap<unsigned, MDRange, 1> FunctionMDInfo; + +  bool ShouldPreserveUseListOrder; + +  using AttributeGroupMapType = DenseMap<IndexAndAttrSet, unsigned>; +  AttributeGroupMapType AttributeGroupMap; +  std::vector<IndexAndAttrSet> AttributeGroups; + +  using AttributeListMapType = DenseMap<AttributeList, unsigned>; +  AttributeListMapType AttributeListMap; +  std::vector<AttributeList> AttributeLists; + +  /// GlobalBasicBlockIDs - This map memoizes the basic block ID's referenced by +  /// the "getGlobalBasicBlockID" method. +  mutable DenseMap<const BasicBlock*, unsigned> GlobalBasicBlockIDs; + +  using InstructionMapType = DenseMap<const Instruction *, unsigned>; +  InstructionMapType InstructionMap; +  unsigned InstructionCount; + +  /// BasicBlocks - This contains all the basic blocks for the currently +  /// incorporated function.  Their reverse mapping is stored in ValueMap. +  std::vector<const BasicBlock*> BasicBlocks; + +  /// When a function is incorporated, this is the size of the Values list +  /// before incorporation. +  unsigned NumModuleValues; + +  /// When a function is incorporated, this is the size of the Metadatas list +  /// before incorporation. +  unsigned NumModuleMDs = 0; +  unsigned NumMDStrings = 0; + +  unsigned FirstFuncConstantID; +  unsigned FirstInstID; + +public: +  ValueEnumerator(const Module &M, bool ShouldPreserveUseListOrder); +  ValueEnumerator(const ValueEnumerator &) = delete; +  ValueEnumerator &operator=(const ValueEnumerator &) = delete; + +  void dump() const; +  void print(raw_ostream &OS, const ValueMapType &Map, const char *Name) const; +  void print(raw_ostream &OS, const MetadataMapType &Map, +             const char *Name) const; + +  unsigned getValueID(const Value *V) const; + +  unsigned getMetadataID(const Metadata *MD) const { +    auto ID = getMetadataOrNullID(MD); +    assert(ID != 0 && "Metadata not in slotcalculator!"); +    return ID - 1; +  } + +  unsigned getMetadataOrNullID(const Metadata *MD) const { +    return MetadataMap.lookup(MD).ID; +  } + +  unsigned numMDs() const { return MDs.size(); } + +  bool shouldPreserveUseListOrder() const { return ShouldPreserveUseListOrder; } + +  unsigned getTypeID(Type *T) const { +    TypeMapType::const_iterator I = TypeMap.find(T); +    assert(I != TypeMap.end() && "Type not in ValueEnumerator!"); +    return I->second-1; +  } + +  unsigned getInstructionID(const Instruction *I) const; +  void setInstructionID(const Instruction *I); + +  unsigned getAttributeListID(AttributeList PAL) const { +    if (PAL.isEmpty()) return 0;  // Null maps to zero. +    AttributeListMapType::const_iterator I = AttributeListMap.find(PAL); +    assert(I != AttributeListMap.end() && "Attribute not in ValueEnumerator!"); +    return I->second; +  } + +  unsigned getAttributeGroupID(IndexAndAttrSet Group) const { +    if (!Group.second.hasAttributes()) +      return 0; // Null maps to zero. +    AttributeGroupMapType::const_iterator I = AttributeGroupMap.find(Group); +    assert(I != AttributeGroupMap.end() && "Attribute not in ValueEnumerator!"); +    return I->second; +  } + +  /// getFunctionConstantRange - Return the range of values that corresponds to +  /// function-local constants. +  void getFunctionConstantRange(unsigned &Start, unsigned &End) const { +    Start = FirstFuncConstantID; +    End = FirstInstID; +  } + +  const ValueList &getValues() const { return Values; } + +  /// Check whether the current block has any metadata to emit. +  bool hasMDs() const { return NumModuleMDs < MDs.size(); } + +  /// Get the MDString metadata for this block. +  ArrayRef<const Metadata *> getMDStrings() const { +    return makeArrayRef(MDs).slice(NumModuleMDs, NumMDStrings); +  } + +  /// Get the non-MDString metadata for this block. +  ArrayRef<const Metadata *> getNonMDStrings() const { +    return makeArrayRef(MDs).slice(NumModuleMDs).slice(NumMDStrings); +  } + +  const TypeList &getTypes() const { return Types; } + +  const std::vector<const BasicBlock*> &getBasicBlocks() const { +    return BasicBlocks; +  } + +  const std::vector<AttributeList> &getAttributeLists() const { return AttributeLists; } + +  const std::vector<IndexAndAttrSet> &getAttributeGroups() const { +    return AttributeGroups; +  } + +  const ComdatSetType &getComdats() const { return Comdats; } +  unsigned getComdatID(const Comdat *C) const; + +  /// getGlobalBasicBlockID - This returns the function-specific ID for the +  /// specified basic block.  This is relatively expensive information, so it +  /// should only be used by rare constructs such as address-of-label. +  unsigned getGlobalBasicBlockID(const BasicBlock *BB) const; + +  /// incorporateFunction/purgeFunction - If you'd like to deal with a function, +  /// use these two methods to get its data into the ValueEnumerator! +  void incorporateFunction(const Function &F); + +  void purgeFunction(); +  uint64_t computeBitsRequiredForTypeIndicies() const; + +private: +  void OptimizeConstants(unsigned CstStart, unsigned CstEnd); + +  /// Reorder the reachable metadata. +  /// +  /// This is not just an optimization, but is mandatory for emitting MDString +  /// correctly. +  void organizeMetadata(); + +  /// Drop the function tag from the transitive operands of the given node. +  void dropFunctionFromMetadata(MetadataMapType::value_type &FirstMD); + +  /// Incorporate the function metadata. +  /// +  /// This should be called before enumerating LocalAsMetadata for the +  /// function. +  void incorporateFunctionMetadata(const Function &F); + +  /// Enumerate a single instance of metadata with the given function tag. +  /// +  /// If \c MD has already been enumerated, check that \c F matches its +  /// function tag.  If not, call \a dropFunctionFromMetadata(). +  /// +  /// Otherwise, mark \c MD as visited.  Assign it an ID, or just return it if +  /// it's an \a MDNode. +  const MDNode *enumerateMetadataImpl(unsigned F, const Metadata *MD); + +  unsigned getMetadataFunctionID(const Function *F) const; + +  /// Enumerate reachable metadata in (almost) post-order. +  /// +  /// Enumerate all the metadata reachable from MD.  We want to minimize the +  /// cost of reading bitcode records, and so the primary consideration is that +  /// operands of uniqued nodes are resolved before the nodes are read.  This +  /// avoids re-uniquing them on the context and factors away RAUW support. +  /// +  /// This algorithm guarantees that subgraphs of uniqued nodes are in +  /// post-order.  Distinct subgraphs reachable only from a single uniqued node +  /// will be in post-order. +  /// +  /// \note The relative order of a distinct and uniqued node is irrelevant. +  /// \a organizeMetadata() will later partition distinct nodes ahead of +  /// uniqued ones. +  ///{ +  void EnumerateMetadata(const Function *F, const Metadata *MD); +  void EnumerateMetadata(unsigned F, const Metadata *MD); +  ///} + +  void EnumerateFunctionLocalMetadata(const Function &F, +                                      const LocalAsMetadata *Local); +  void EnumerateFunctionLocalMetadata(unsigned F, const LocalAsMetadata *Local); +  void EnumerateNamedMDNode(const NamedMDNode *NMD); +  void EnumerateValue(const Value *V); +  void EnumerateType(Type *T); +  void EnumerateOperandType(const Value *V); +  void EnumerateAttributes(AttributeList PAL); + +  void EnumerateValueSymbolTable(const ValueSymbolTable &ST); +  void EnumerateNamedMetadata(const Module &M); +}; + +} // end namespace llvm + +#endif // LLVM_LIB_BITCODE_WRITER_VALUEENUMERATOR_H | 
