diff options
Diffstat (limited to 'llvm/lib/MC/MCAssembler.cpp')
| -rw-r--r-- | llvm/lib/MC/MCAssembler.cpp | 1155 | 
1 files changed, 1155 insertions, 0 deletions
diff --git a/llvm/lib/MC/MCAssembler.cpp b/llvm/lib/MC/MCAssembler.cpp new file mode 100644 index 000000000000..cf42fe85b8e5 --- /dev/null +++ b/llvm/lib/MC/MCAssembler.cpp @@ -0,0 +1,1155 @@ +//===- lib/MC/MCAssembler.cpp - Assembler Backend Implementation ----------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "llvm/MC/MCAssembler.h" +#include "llvm/ADT/ArrayRef.h" +#include "llvm/ADT/SmallString.h" +#include "llvm/ADT/SmallVector.h" +#include "llvm/ADT/Statistic.h" +#include "llvm/ADT/StringRef.h" +#include "llvm/ADT/Twine.h" +#include "llvm/MC/MCAsmBackend.h" +#include "llvm/MC/MCAsmInfo.h" +#include "llvm/MC/MCAsmLayout.h" +#include "llvm/MC/MCCodeEmitter.h" +#include "llvm/MC/MCCodeView.h" +#include "llvm/MC/MCContext.h" +#include "llvm/MC/MCDwarf.h" +#include "llvm/MC/MCExpr.h" +#include "llvm/MC/MCFixup.h" +#include "llvm/MC/MCFixupKindInfo.h" +#include "llvm/MC/MCFragment.h" +#include "llvm/MC/MCInst.h" +#include "llvm/MC/MCObjectWriter.h" +#include "llvm/MC/MCSection.h" +#include "llvm/MC/MCSectionELF.h" +#include "llvm/MC/MCSymbol.h" +#include "llvm/MC/MCValue.h" +#include "llvm/Support/Alignment.h" +#include "llvm/Support/Casting.h" +#include "llvm/Support/Debug.h" +#include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/LEB128.h" +#include "llvm/Support/MathExtras.h" +#include "llvm/Support/raw_ostream.h" +#include <cassert> +#include <cstdint> +#include <cstring> +#include <tuple> +#include <utility> + +using namespace llvm; + +#define DEBUG_TYPE "assembler" + +namespace { +namespace stats { + +STATISTIC(EmittedFragments, "Number of emitted assembler fragments - total"); +STATISTIC(EmittedRelaxableFragments, +          "Number of emitted assembler fragments - relaxable"); +STATISTIC(EmittedDataFragments, +          "Number of emitted assembler fragments - data"); +STATISTIC(EmittedCompactEncodedInstFragments, +          "Number of emitted assembler fragments - compact encoded inst"); +STATISTIC(EmittedAlignFragments, +          "Number of emitted assembler fragments - align"); +STATISTIC(EmittedFillFragments, +          "Number of emitted assembler fragments - fill"); +STATISTIC(EmittedOrgFragments, +          "Number of emitted assembler fragments - org"); +STATISTIC(evaluateFixup, "Number of evaluated fixups"); +STATISTIC(FragmentLayouts, "Number of fragment layouts"); +STATISTIC(ObjectBytes, "Number of emitted object file bytes"); +STATISTIC(RelaxationSteps, "Number of assembler layout and relaxation steps"); +STATISTIC(RelaxedInstructions, "Number of relaxed instructions"); +STATISTIC(PaddingFragmentsRelaxations, +          "Number of Padding Fragments relaxations"); +STATISTIC(PaddingFragmentsBytes, +          "Total size of all padding from adding Fragments"); + +} // end namespace stats +} // end anonymous namespace + +// FIXME FIXME FIXME: There are number of places in this file where we convert +// what is a 64-bit assembler value used for computation into a value in the +// object file, which may truncate it. We should detect that truncation where +// invalid and report errors back. + +/* *** */ + +MCAssembler::MCAssembler(MCContext &Context, +                         std::unique_ptr<MCAsmBackend> Backend, +                         std::unique_ptr<MCCodeEmitter> Emitter, +                         std::unique_ptr<MCObjectWriter> Writer) +    : Context(Context), Backend(std::move(Backend)), +      Emitter(std::move(Emitter)), Writer(std::move(Writer)), +      BundleAlignSize(0), RelaxAll(false), SubsectionsViaSymbols(false), +      IncrementalLinkerCompatible(false), ELFHeaderEFlags(0) { +  VersionInfo.Major = 0; // Major version == 0 for "none specified" +} + +MCAssembler::~MCAssembler() = default; + +void MCAssembler::reset() { +  Sections.clear(); +  Symbols.clear(); +  IndirectSymbols.clear(); +  DataRegions.clear(); +  LinkerOptions.clear(); +  FileNames.clear(); +  ThumbFuncs.clear(); +  BundleAlignSize = 0; +  RelaxAll = false; +  SubsectionsViaSymbols = false; +  IncrementalLinkerCompatible = false; +  ELFHeaderEFlags = 0; +  LOHContainer.reset(); +  VersionInfo.Major = 0; +  VersionInfo.SDKVersion = VersionTuple(); + +  // reset objects owned by us +  if (getBackendPtr()) +    getBackendPtr()->reset(); +  if (getEmitterPtr()) +    getEmitterPtr()->reset(); +  if (getWriterPtr()) +    getWriterPtr()->reset(); +  getLOHContainer().reset(); +} + +bool MCAssembler::registerSection(MCSection &Section) { +  if (Section.isRegistered()) +    return false; +  Sections.push_back(&Section); +  Section.setIsRegistered(true); +  return true; +} + +bool MCAssembler::isThumbFunc(const MCSymbol *Symbol) const { +  if (ThumbFuncs.count(Symbol)) +    return true; + +  if (!Symbol->isVariable()) +    return false; + +  const MCExpr *Expr = Symbol->getVariableValue(); + +  MCValue V; +  if (!Expr->evaluateAsRelocatable(V, nullptr, nullptr)) +    return false; + +  if (V.getSymB() || V.getRefKind() != MCSymbolRefExpr::VK_None) +    return false; + +  const MCSymbolRefExpr *Ref = V.getSymA(); +  if (!Ref) +    return false; + +  if (Ref->getKind() != MCSymbolRefExpr::VK_None) +    return false; + +  const MCSymbol &Sym = Ref->getSymbol(); +  if (!isThumbFunc(&Sym)) +    return false; + +  ThumbFuncs.insert(Symbol); // Cache it. +  return true; +} + +bool MCAssembler::isSymbolLinkerVisible(const MCSymbol &Symbol) const { +  // Non-temporary labels should always be visible to the linker. +  if (!Symbol.isTemporary()) +    return true; + +  // Absolute temporary labels are never visible. +  if (!Symbol.isInSection()) +    return false; + +  if (Symbol.isUsedInReloc()) +    return true; + +  return false; +} + +const MCSymbol *MCAssembler::getAtom(const MCSymbol &S) const { +  // Linker visible symbols define atoms. +  if (isSymbolLinkerVisible(S)) +    return &S; + +  // Absolute and undefined symbols have no defining atom. +  if (!S.isInSection()) +    return nullptr; + +  // Non-linker visible symbols in sections which can't be atomized have no +  // defining atom. +  if (!getContext().getAsmInfo()->isSectionAtomizableBySymbols( +          *S.getFragment()->getParent())) +    return nullptr; + +  // Otherwise, return the atom for the containing fragment. +  return S.getFragment()->getAtom(); +} + +bool MCAssembler::evaluateFixup(const MCAsmLayout &Layout, +                                const MCFixup &Fixup, const MCFragment *DF, +                                MCValue &Target, uint64_t &Value, +                                bool &WasForced) const { +  ++stats::evaluateFixup; + +  // FIXME: This code has some duplication with recordRelocation. We should +  // probably merge the two into a single callback that tries to evaluate a +  // fixup and records a relocation if one is needed. + +  // On error claim to have completely evaluated the fixup, to prevent any +  // further processing from being done. +  const MCExpr *Expr = Fixup.getValue(); +  MCContext &Ctx = getContext(); +  Value = 0; +  WasForced = false; +  if (!Expr->evaluateAsRelocatable(Target, &Layout, &Fixup)) { +    Ctx.reportError(Fixup.getLoc(), "expected relocatable expression"); +    return true; +  } +  if (const MCSymbolRefExpr *RefB = Target.getSymB()) { +    if (RefB->getKind() != MCSymbolRefExpr::VK_None) { +      Ctx.reportError(Fixup.getLoc(), +                      "unsupported subtraction of qualified symbol"); +      return true; +    } +  } + +  assert(getBackendPtr() && "Expected assembler backend"); +  bool IsPCRel = getBackendPtr()->getFixupKindInfo(Fixup.getKind()).Flags & +                 MCFixupKindInfo::FKF_IsPCRel; + +  bool IsResolved = false; +  if (IsPCRel) { +    if (Target.getSymB()) { +      IsResolved = false; +    } else if (!Target.getSymA()) { +      IsResolved = false; +    } else { +      const MCSymbolRefExpr *A = Target.getSymA(); +      const MCSymbol &SA = A->getSymbol(); +      if (A->getKind() != MCSymbolRefExpr::VK_None || SA.isUndefined()) { +        IsResolved = false; +      } else if (auto *Writer = getWriterPtr()) { +        IsResolved = Writer->isSymbolRefDifferenceFullyResolvedImpl( +            *this, SA, *DF, false, true); +      } +    } +  } else { +    IsResolved = Target.isAbsolute(); +  } + +  Value = Target.getConstant(); + +  if (const MCSymbolRefExpr *A = Target.getSymA()) { +    const MCSymbol &Sym = A->getSymbol(); +    if (Sym.isDefined()) +      Value += Layout.getSymbolOffset(Sym); +  } +  if (const MCSymbolRefExpr *B = Target.getSymB()) { +    const MCSymbol &Sym = B->getSymbol(); +    if (Sym.isDefined()) +      Value -= Layout.getSymbolOffset(Sym); +  } + +  bool ShouldAlignPC = getBackend().getFixupKindInfo(Fixup.getKind()).Flags & +                       MCFixupKindInfo::FKF_IsAlignedDownTo32Bits; +  assert((ShouldAlignPC ? IsPCRel : true) && +    "FKF_IsAlignedDownTo32Bits is only allowed on PC-relative fixups!"); + +  if (IsPCRel) { +    uint32_t Offset = Layout.getFragmentOffset(DF) + Fixup.getOffset(); + +    // A number of ARM fixups in Thumb mode require that the effective PC +    // address be determined as the 32-bit aligned version of the actual offset. +    if (ShouldAlignPC) Offset &= ~0x3; +    Value -= Offset; +  } + +  // Let the backend force a relocation if needed. +  if (IsResolved && getBackend().shouldForceRelocation(*this, Fixup, Target)) { +    IsResolved = false; +    WasForced = true; +  } + +  return IsResolved; +} + +uint64_t MCAssembler::computeFragmentSize(const MCAsmLayout &Layout, +                                          const MCFragment &F) const { +  assert(getBackendPtr() && "Requires assembler backend"); +  switch (F.getKind()) { +  case MCFragment::FT_Data: +    return cast<MCDataFragment>(F).getContents().size(); +  case MCFragment::FT_Relaxable: +    return cast<MCRelaxableFragment>(F).getContents().size(); +  case MCFragment::FT_CompactEncodedInst: +    return cast<MCCompactEncodedInstFragment>(F).getContents().size(); +  case MCFragment::FT_Fill: { +    auto &FF = cast<MCFillFragment>(F); +    int64_t NumValues = 0; +    if (!FF.getNumValues().evaluateAsAbsolute(NumValues, Layout)) { +      getContext().reportError(FF.getLoc(), +                               "expected assembly-time absolute expression"); +      return 0; +    } +    int64_t Size = NumValues * FF.getValueSize(); +    if (Size < 0) { +      getContext().reportError(FF.getLoc(), "invalid number of bytes"); +      return 0; +    } +    return Size; +  } + +  case MCFragment::FT_LEB: +    return cast<MCLEBFragment>(F).getContents().size(); + +  case MCFragment::FT_Padding: +    return cast<MCPaddingFragment>(F).getSize(); + +  case MCFragment::FT_SymbolId: +    return 4; + +  case MCFragment::FT_Align: { +    const MCAlignFragment &AF = cast<MCAlignFragment>(F); +    unsigned Offset = Layout.getFragmentOffset(&AF); +    unsigned Size = offsetToAlignment(Offset, Align(AF.getAlignment())); + +    // Insert extra Nops for code alignment if the target define +    // shouldInsertExtraNopBytesForCodeAlign target hook. +    if (AF.getParent()->UseCodeAlign() && AF.hasEmitNops() && +        getBackend().shouldInsertExtraNopBytesForCodeAlign(AF, Size)) +      return Size; + +    // If we are padding with nops, force the padding to be larger than the +    // minimum nop size. +    if (Size > 0 && AF.hasEmitNops()) { +      while (Size % getBackend().getMinimumNopSize()) +        Size += AF.getAlignment(); +    } +    if (Size > AF.getMaxBytesToEmit()) +      return 0; +    return Size; +  } + +  case MCFragment::FT_Org: { +    const MCOrgFragment &OF = cast<MCOrgFragment>(F); +    MCValue Value; +    if (!OF.getOffset().evaluateAsValue(Value, Layout)) { +      getContext().reportError(OF.getLoc(), +                               "expected assembly-time absolute expression"); +        return 0; +    } + +    uint64_t FragmentOffset = Layout.getFragmentOffset(&OF); +    int64_t TargetLocation = Value.getConstant(); +    if (const MCSymbolRefExpr *A = Value.getSymA()) { +      uint64_t Val; +      if (!Layout.getSymbolOffset(A->getSymbol(), Val)) { +        getContext().reportError(OF.getLoc(), "expected absolute expression"); +        return 0; +      } +      TargetLocation += Val; +    } +    int64_t Size = TargetLocation - FragmentOffset; +    if (Size < 0 || Size >= 0x40000000) { +      getContext().reportError( +          OF.getLoc(), "invalid .org offset '" + Twine(TargetLocation) + +                           "' (at offset '" + Twine(FragmentOffset) + "')"); +      return 0; +    } +    return Size; +  } + +  case MCFragment::FT_Dwarf: +    return cast<MCDwarfLineAddrFragment>(F).getContents().size(); +  case MCFragment::FT_DwarfFrame: +    return cast<MCDwarfCallFrameFragment>(F).getContents().size(); +  case MCFragment::FT_CVInlineLines: +    return cast<MCCVInlineLineTableFragment>(F).getContents().size(); +  case MCFragment::FT_CVDefRange: +    return cast<MCCVDefRangeFragment>(F).getContents().size(); +  case MCFragment::FT_Dummy: +    llvm_unreachable("Should not have been added"); +  } + +  llvm_unreachable("invalid fragment kind"); +} + +void MCAsmLayout::layoutFragment(MCFragment *F) { +  MCFragment *Prev = F->getPrevNode(); + +  // We should never try to recompute something which is valid. +  assert(!isFragmentValid(F) && "Attempt to recompute a valid fragment!"); +  // We should never try to compute the fragment layout if its predecessor +  // isn't valid. +  assert((!Prev || isFragmentValid(Prev)) && +         "Attempt to compute fragment before its predecessor!"); + +  ++stats::FragmentLayouts; + +  // Compute fragment offset and size. +  if (Prev) +    F->Offset = Prev->Offset + getAssembler().computeFragmentSize(*this, *Prev); +  else +    F->Offset = 0; +  LastValidFragment[F->getParent()] = F; + +  // If bundling is enabled and this fragment has instructions in it, it has to +  // obey the bundling restrictions. With padding, we'll have: +  // +  // +  //        BundlePadding +  //             ||| +  // ------------------------------------- +  //   Prev  |##########|       F        | +  // ------------------------------------- +  //                    ^ +  //                    | +  //                    F->Offset +  // +  // The fragment's offset will point to after the padding, and its computed +  // size won't include the padding. +  // +  // When the -mc-relax-all flag is used, we optimize bundling by writting the +  // padding directly into fragments when the instructions are emitted inside +  // the streamer. When the fragment is larger than the bundle size, we need to +  // ensure that it's bundle aligned. This means that if we end up with +  // multiple fragments, we must emit bundle padding between fragments. +  // +  // ".align N" is an example of a directive that introduces multiple +  // fragments. We could add a special case to handle ".align N" by emitting +  // within-fragment padding (which would produce less padding when N is less +  // than the bundle size), but for now we don't. +  // +  if (Assembler.isBundlingEnabled() && F->hasInstructions()) { +    assert(isa<MCEncodedFragment>(F) && +           "Only MCEncodedFragment implementations have instructions"); +    MCEncodedFragment *EF = cast<MCEncodedFragment>(F); +    uint64_t FSize = Assembler.computeFragmentSize(*this, *EF); + +    if (!Assembler.getRelaxAll() && FSize > Assembler.getBundleAlignSize()) +      report_fatal_error("Fragment can't be larger than a bundle size"); + +    uint64_t RequiredBundlePadding = +        computeBundlePadding(Assembler, EF, EF->Offset, FSize); +    if (RequiredBundlePadding > UINT8_MAX) +      report_fatal_error("Padding cannot exceed 255 bytes"); +    EF->setBundlePadding(static_cast<uint8_t>(RequiredBundlePadding)); +    EF->Offset += RequiredBundlePadding; +  } +} + +void MCAssembler::registerSymbol(const MCSymbol &Symbol, bool *Created) { +  bool New = !Symbol.isRegistered(); +  if (Created) +    *Created = New; +  if (New) { +    Symbol.setIsRegistered(true); +    Symbols.push_back(&Symbol); +  } +} + +void MCAssembler::writeFragmentPadding(raw_ostream &OS, +                                       const MCEncodedFragment &EF, +                                       uint64_t FSize) const { +  assert(getBackendPtr() && "Expected assembler backend"); +  // Should NOP padding be written out before this fragment? +  unsigned BundlePadding = EF.getBundlePadding(); +  if (BundlePadding > 0) { +    assert(isBundlingEnabled() && +           "Writing bundle padding with disabled bundling"); +    assert(EF.hasInstructions() && +           "Writing bundle padding for a fragment without instructions"); + +    unsigned TotalLength = BundlePadding + static_cast<unsigned>(FSize); +    if (EF.alignToBundleEnd() && TotalLength > getBundleAlignSize()) { +      // If the padding itself crosses a bundle boundary, it must be emitted +      // in 2 pieces, since even nop instructions must not cross boundaries. +      //             v--------------v   <- BundleAlignSize +      //        v---------v             <- BundlePadding +      // ---------------------------- +      // | Prev |####|####|    F    | +      // ---------------------------- +      //        ^-------------------^   <- TotalLength +      unsigned DistanceToBoundary = TotalLength - getBundleAlignSize(); +      if (!getBackend().writeNopData(OS, DistanceToBoundary)) +        report_fatal_error("unable to write NOP sequence of " + +                           Twine(DistanceToBoundary) + " bytes"); +      BundlePadding -= DistanceToBoundary; +    } +    if (!getBackend().writeNopData(OS, BundlePadding)) +      report_fatal_error("unable to write NOP sequence of " + +                         Twine(BundlePadding) + " bytes"); +  } +} + +/// Write the fragment \p F to the output file. +static void writeFragment(raw_ostream &OS, const MCAssembler &Asm, +                          const MCAsmLayout &Layout, const MCFragment &F) { +  // FIXME: Embed in fragments instead? +  uint64_t FragmentSize = Asm.computeFragmentSize(Layout, F); + +  support::endianness Endian = Asm.getBackend().Endian; + +  if (const MCEncodedFragment *EF = dyn_cast<MCEncodedFragment>(&F)) +    Asm.writeFragmentPadding(OS, *EF, FragmentSize); + +  // This variable (and its dummy usage) is to participate in the assert at +  // the end of the function. +  uint64_t Start = OS.tell(); +  (void) Start; + +  ++stats::EmittedFragments; + +  switch (F.getKind()) { +  case MCFragment::FT_Align: { +    ++stats::EmittedAlignFragments; +    const MCAlignFragment &AF = cast<MCAlignFragment>(F); +    assert(AF.getValueSize() && "Invalid virtual align in concrete fragment!"); + +    uint64_t Count = FragmentSize / AF.getValueSize(); + +    // FIXME: This error shouldn't actually occur (the front end should emit +    // multiple .align directives to enforce the semantics it wants), but is +    // severe enough that we want to report it. How to handle this? +    if (Count * AF.getValueSize() != FragmentSize) +      report_fatal_error("undefined .align directive, value size '" + +                        Twine(AF.getValueSize()) + +                        "' is not a divisor of padding size '" + +                        Twine(FragmentSize) + "'"); + +    // See if we are aligning with nops, and if so do that first to try to fill +    // the Count bytes.  Then if that did not fill any bytes or there are any +    // bytes left to fill use the Value and ValueSize to fill the rest. +    // If we are aligning with nops, ask that target to emit the right data. +    if (AF.hasEmitNops()) { +      if (!Asm.getBackend().writeNopData(OS, Count)) +        report_fatal_error("unable to write nop sequence of " + +                          Twine(Count) + " bytes"); +      break; +    } + +    // Otherwise, write out in multiples of the value size. +    for (uint64_t i = 0; i != Count; ++i) { +      switch (AF.getValueSize()) { +      default: llvm_unreachable("Invalid size!"); +      case 1: OS << char(AF.getValue()); break; +      case 2: +        support::endian::write<uint16_t>(OS, AF.getValue(), Endian); +        break; +      case 4: +        support::endian::write<uint32_t>(OS, AF.getValue(), Endian); +        break; +      case 8: +        support::endian::write<uint64_t>(OS, AF.getValue(), Endian); +        break; +      } +    } +    break; +  } + +  case MCFragment::FT_Data: +    ++stats::EmittedDataFragments; +    OS << cast<MCDataFragment>(F).getContents(); +    break; + +  case MCFragment::FT_Relaxable: +    ++stats::EmittedRelaxableFragments; +    OS << cast<MCRelaxableFragment>(F).getContents(); +    break; + +  case MCFragment::FT_CompactEncodedInst: +    ++stats::EmittedCompactEncodedInstFragments; +    OS << cast<MCCompactEncodedInstFragment>(F).getContents(); +    break; + +  case MCFragment::FT_Fill: { +    ++stats::EmittedFillFragments; +    const MCFillFragment &FF = cast<MCFillFragment>(F); +    uint64_t V = FF.getValue(); +    unsigned VSize = FF.getValueSize(); +    const unsigned MaxChunkSize = 16; +    char Data[MaxChunkSize]; +    // Duplicate V into Data as byte vector to reduce number of +    // writes done. As such, do endian conversion here. +    for (unsigned I = 0; I != VSize; ++I) { +      unsigned index = Endian == support::little ? I : (VSize - I - 1); +      Data[I] = uint8_t(V >> (index * 8)); +    } +    for (unsigned I = VSize; I < MaxChunkSize; ++I) +      Data[I] = Data[I - VSize]; + +    // Set to largest multiple of VSize in Data. +    const unsigned NumPerChunk = MaxChunkSize / VSize; +    // Set ChunkSize to largest multiple of VSize in Data +    const unsigned ChunkSize = VSize * NumPerChunk; + +    // Do copies by chunk. +    StringRef Ref(Data, ChunkSize); +    for (uint64_t I = 0, E = FragmentSize / ChunkSize; I != E; ++I) +      OS << Ref; + +    // do remainder if needed. +    unsigned TrailingCount = FragmentSize % ChunkSize; +    if (TrailingCount) +      OS.write(Data, TrailingCount); +    break; +  } + +  case MCFragment::FT_LEB: { +    const MCLEBFragment &LF = cast<MCLEBFragment>(F); +    OS << LF.getContents(); +    break; +  } + +  case MCFragment::FT_Padding: { +    if (!Asm.getBackend().writeNopData(OS, FragmentSize)) +      report_fatal_error("unable to write nop sequence of " + +                         Twine(FragmentSize) + " bytes"); +    break; +  } + +  case MCFragment::FT_SymbolId: { +    const MCSymbolIdFragment &SF = cast<MCSymbolIdFragment>(F); +    support::endian::write<uint32_t>(OS, SF.getSymbol()->getIndex(), Endian); +    break; +  } + +  case MCFragment::FT_Org: { +    ++stats::EmittedOrgFragments; +    const MCOrgFragment &OF = cast<MCOrgFragment>(F); + +    for (uint64_t i = 0, e = FragmentSize; i != e; ++i) +      OS << char(OF.getValue()); + +    break; +  } + +  case MCFragment::FT_Dwarf: { +    const MCDwarfLineAddrFragment &OF = cast<MCDwarfLineAddrFragment>(F); +    OS << OF.getContents(); +    break; +  } +  case MCFragment::FT_DwarfFrame: { +    const MCDwarfCallFrameFragment &CF = cast<MCDwarfCallFrameFragment>(F); +    OS << CF.getContents(); +    break; +  } +  case MCFragment::FT_CVInlineLines: { +    const auto &OF = cast<MCCVInlineLineTableFragment>(F); +    OS << OF.getContents(); +    break; +  } +  case MCFragment::FT_CVDefRange: { +    const auto &DRF = cast<MCCVDefRangeFragment>(F); +    OS << DRF.getContents(); +    break; +  } +  case MCFragment::FT_Dummy: +    llvm_unreachable("Should not have been added"); +  } + +  assert(OS.tell() - Start == FragmentSize && +         "The stream should advance by fragment size"); +} + +void MCAssembler::writeSectionData(raw_ostream &OS, const MCSection *Sec, +                                   const MCAsmLayout &Layout) const { +  assert(getBackendPtr() && "Expected assembler backend"); + +  // Ignore virtual sections. +  if (Sec->isVirtualSection()) { +    assert(Layout.getSectionFileSize(Sec) == 0 && "Invalid size for section!"); + +    // Check that contents are only things legal inside a virtual section. +    for (const MCFragment &F : *Sec) { +      switch (F.getKind()) { +      default: llvm_unreachable("Invalid fragment in virtual section!"); +      case MCFragment::FT_Data: { +        // Check that we aren't trying to write a non-zero contents (or fixups) +        // into a virtual section. This is to support clients which use standard +        // directives to fill the contents of virtual sections. +        const MCDataFragment &DF = cast<MCDataFragment>(F); +        if (DF.fixup_begin() != DF.fixup_end()) +          report_fatal_error("cannot have fixups in virtual section!"); +        for (unsigned i = 0, e = DF.getContents().size(); i != e; ++i) +          if (DF.getContents()[i]) { +            if (auto *ELFSec = dyn_cast<const MCSectionELF>(Sec)) +              report_fatal_error("non-zero initializer found in section '" + +                  ELFSec->getSectionName() + "'"); +            else +              report_fatal_error("non-zero initializer found in virtual section"); +          } +        break; +      } +      case MCFragment::FT_Align: +        // Check that we aren't trying to write a non-zero value into a virtual +        // section. +        assert((cast<MCAlignFragment>(F).getValueSize() == 0 || +                cast<MCAlignFragment>(F).getValue() == 0) && +               "Invalid align in virtual section!"); +        break; +      case MCFragment::FT_Fill: +        assert((cast<MCFillFragment>(F).getValue() == 0) && +               "Invalid fill in virtual section!"); +        break; +      } +    } + +    return; +  } + +  uint64_t Start = OS.tell(); +  (void)Start; + +  for (const MCFragment &F : *Sec) +    writeFragment(OS, *this, Layout, F); + +  assert(OS.tell() - Start == Layout.getSectionAddressSize(Sec)); +} + +std::tuple<MCValue, uint64_t, bool> +MCAssembler::handleFixup(const MCAsmLayout &Layout, MCFragment &F, +                         const MCFixup &Fixup) { +  // Evaluate the fixup. +  MCValue Target; +  uint64_t FixedValue; +  bool WasForced; +  bool IsResolved = evaluateFixup(Layout, Fixup, &F, Target, FixedValue, +                                  WasForced); +  if (!IsResolved) { +    // The fixup was unresolved, we need a relocation. Inform the object +    // writer of the relocation, and give it an opportunity to adjust the +    // fixup value if need be. +    if (Target.getSymA() && Target.getSymB() && +        getBackend().requiresDiffExpressionRelocations()) { +      // The fixup represents the difference between two symbols, which the +      // backend has indicated must be resolved at link time. Split up the fixup +      // into two relocations, one for the add, and one for the sub, and emit +      // both of these. The constant will be associated with the add half of the +      // expression. +      MCFixup FixupAdd = MCFixup::createAddFor(Fixup); +      MCValue TargetAdd = +          MCValue::get(Target.getSymA(), nullptr, Target.getConstant()); +      getWriter().recordRelocation(*this, Layout, &F, FixupAdd, TargetAdd, +                                   FixedValue); +      MCFixup FixupSub = MCFixup::createSubFor(Fixup); +      MCValue TargetSub = MCValue::get(Target.getSymB()); +      getWriter().recordRelocation(*this, Layout, &F, FixupSub, TargetSub, +                                   FixedValue); +    } else { +      getWriter().recordRelocation(*this, Layout, &F, Fixup, Target, +                                   FixedValue); +    } +  } +  return std::make_tuple(Target, FixedValue, IsResolved); +} + +void MCAssembler::layout(MCAsmLayout &Layout) { +  assert(getBackendPtr() && "Expected assembler backend"); +  DEBUG_WITH_TYPE("mc-dump", { +      errs() << "assembler backend - pre-layout\n--\n"; +      dump(); }); + +  // Create dummy fragments and assign section ordinals. +  unsigned SectionIndex = 0; +  for (MCSection &Sec : *this) { +    // Create dummy fragments to eliminate any empty sections, this simplifies +    // layout. +    if (Sec.getFragmentList().empty()) +      new MCDataFragment(&Sec); + +    Sec.setOrdinal(SectionIndex++); +  } + +  // Assign layout order indices to sections and fragments. +  for (unsigned i = 0, e = Layout.getSectionOrder().size(); i != e; ++i) { +    MCSection *Sec = Layout.getSectionOrder()[i]; +    Sec->setLayoutOrder(i); + +    unsigned FragmentIndex = 0; +    for (MCFragment &Frag : *Sec) +      Frag.setLayoutOrder(FragmentIndex++); +  } + +  // Layout until everything fits. +  while (layoutOnce(Layout)) +    if (getContext().hadError()) +      return; + +  DEBUG_WITH_TYPE("mc-dump", { +      errs() << "assembler backend - post-relaxation\n--\n"; +      dump(); }); + +  // Finalize the layout, including fragment lowering. +  finishLayout(Layout); + +  DEBUG_WITH_TYPE("mc-dump", { +      errs() << "assembler backend - final-layout\n--\n"; +      dump(); }); + +  // Allow the object writer a chance to perform post-layout binding (for +  // example, to set the index fields in the symbol data). +  getWriter().executePostLayoutBinding(*this, Layout); + +  // Evaluate and apply the fixups, generating relocation entries as necessary. +  for (MCSection &Sec : *this) { +    for (MCFragment &Frag : Sec) { +      // Data and relaxable fragments both have fixups.  So only process +      // those here. +      // FIXME: Is there a better way to do this?  MCEncodedFragmentWithFixups +      // being templated makes this tricky. +      if (isa<MCEncodedFragment>(&Frag) && +          isa<MCCompactEncodedInstFragment>(&Frag)) +        continue; +      if (!isa<MCEncodedFragment>(&Frag) && !isa<MCCVDefRangeFragment>(&Frag) && +          !isa<MCAlignFragment>(&Frag)) +        continue; +      ArrayRef<MCFixup> Fixups; +      MutableArrayRef<char> Contents; +      const MCSubtargetInfo *STI = nullptr; +      if (auto *FragWithFixups = dyn_cast<MCDataFragment>(&Frag)) { +        Fixups = FragWithFixups->getFixups(); +        Contents = FragWithFixups->getContents(); +        STI = FragWithFixups->getSubtargetInfo(); +        assert(!FragWithFixups->hasInstructions() || STI != nullptr); +      } else if (auto *FragWithFixups = dyn_cast<MCRelaxableFragment>(&Frag)) { +        Fixups = FragWithFixups->getFixups(); +        Contents = FragWithFixups->getContents(); +        STI = FragWithFixups->getSubtargetInfo(); +        assert(!FragWithFixups->hasInstructions() || STI != nullptr); +      } else if (auto *FragWithFixups = dyn_cast<MCCVDefRangeFragment>(&Frag)) { +        Fixups = FragWithFixups->getFixups(); +        Contents = FragWithFixups->getContents(); +      } else if (auto *FragWithFixups = dyn_cast<MCDwarfLineAddrFragment>(&Frag)) { +        Fixups = FragWithFixups->getFixups(); +        Contents = FragWithFixups->getContents(); +      } else if (auto *AF = dyn_cast<MCAlignFragment>(&Frag)) { +        // Insert fixup type for code alignment if the target define +        // shouldInsertFixupForCodeAlign target hook. +        if (Sec.UseCodeAlign() && AF->hasEmitNops()) { +          getBackend().shouldInsertFixupForCodeAlign(*this, Layout, *AF); +        } +        continue; +      } else if (auto *FragWithFixups = +                     dyn_cast<MCDwarfCallFrameFragment>(&Frag)) { +        Fixups = FragWithFixups->getFixups(); +        Contents = FragWithFixups->getContents(); +      } else +        llvm_unreachable("Unknown fragment with fixups!"); +      for (const MCFixup &Fixup : Fixups) { +        uint64_t FixedValue; +        bool IsResolved; +        MCValue Target; +        std::tie(Target, FixedValue, IsResolved) = +            handleFixup(Layout, Frag, Fixup); +        getBackend().applyFixup(*this, Fixup, Target, Contents, FixedValue, +                                IsResolved, STI); +      } +    } +  } +} + +void MCAssembler::Finish() { +  // Create the layout object. +  MCAsmLayout Layout(*this); +  layout(Layout); + +  // Write the object file. +  stats::ObjectBytes += getWriter().writeObject(*this, Layout); +} + +bool MCAssembler::fixupNeedsRelaxation(const MCFixup &Fixup, +                                       const MCRelaxableFragment *DF, +                                       const MCAsmLayout &Layout) const { +  assert(getBackendPtr() && "Expected assembler backend"); +  MCValue Target; +  uint64_t Value; +  bool WasForced; +  bool Resolved = evaluateFixup(Layout, Fixup, DF, Target, Value, WasForced); +  if (Target.getSymA() && +      Target.getSymA()->getKind() == MCSymbolRefExpr::VK_X86_ABS8 && +      Fixup.getKind() == FK_Data_1) +    return false; +  return getBackend().fixupNeedsRelaxationAdvanced(Fixup, Resolved, Value, DF, +                                                   Layout, WasForced); +} + +bool MCAssembler::fragmentNeedsRelaxation(const MCRelaxableFragment *F, +                                          const MCAsmLayout &Layout) const { +  assert(getBackendPtr() && "Expected assembler backend"); +  // If this inst doesn't ever need relaxation, ignore it. This occurs when we +  // are intentionally pushing out inst fragments, or because we relaxed a +  // previous instruction to one that doesn't need relaxation. +  if (!getBackend().mayNeedRelaxation(F->getInst(), *F->getSubtargetInfo())) +    return false; + +  for (const MCFixup &Fixup : F->getFixups()) +    if (fixupNeedsRelaxation(Fixup, F, Layout)) +      return true; + +  return false; +} + +bool MCAssembler::relaxInstruction(MCAsmLayout &Layout, +                                   MCRelaxableFragment &F) { +  assert(getEmitterPtr() && +         "Expected CodeEmitter defined for relaxInstruction"); +  if (!fragmentNeedsRelaxation(&F, Layout)) +    return false; + +  ++stats::RelaxedInstructions; + +  // FIXME-PERF: We could immediately lower out instructions if we can tell +  // they are fully resolved, to avoid retesting on later passes. + +  // Relax the fragment. + +  MCInst Relaxed; +  getBackend().relaxInstruction(F.getInst(), *F.getSubtargetInfo(), Relaxed); + +  // Encode the new instruction. +  // +  // FIXME-PERF: If it matters, we could let the target do this. It can +  // probably do so more efficiently in many cases. +  SmallVector<MCFixup, 4> Fixups; +  SmallString<256> Code; +  raw_svector_ostream VecOS(Code); +  getEmitter().encodeInstruction(Relaxed, VecOS, Fixups, *F.getSubtargetInfo()); + +  // Update the fragment. +  F.setInst(Relaxed); +  F.getContents() = Code; +  F.getFixups() = Fixups; + +  return true; +} + +bool MCAssembler::relaxPaddingFragment(MCAsmLayout &Layout, +                                       MCPaddingFragment &PF) { +  assert(getBackendPtr() && "Expected assembler backend"); +  uint64_t OldSize = PF.getSize(); +  if (!getBackend().relaxFragment(&PF, Layout)) +    return false; +  uint64_t NewSize = PF.getSize(); + +  ++stats::PaddingFragmentsRelaxations; +  stats::PaddingFragmentsBytes += NewSize; +  stats::PaddingFragmentsBytes -= OldSize; +  return true; +} + +bool MCAssembler::relaxLEB(MCAsmLayout &Layout, MCLEBFragment &LF) { +  uint64_t OldSize = LF.getContents().size(); +  int64_t Value; +  bool Abs = LF.getValue().evaluateKnownAbsolute(Value, Layout); +  if (!Abs) +    report_fatal_error("sleb128 and uleb128 expressions must be absolute"); +  SmallString<8> &Data = LF.getContents(); +  Data.clear(); +  raw_svector_ostream OSE(Data); +  // The compiler can generate EH table assembly that is impossible to assemble +  // without either adding padding to an LEB fragment or adding extra padding +  // to a later alignment fragment. To accommodate such tables, relaxation can +  // only increase an LEB fragment size here, not decrease it. See PR35809. +  if (LF.isSigned()) +    encodeSLEB128(Value, OSE, OldSize); +  else +    encodeULEB128(Value, OSE, OldSize); +  return OldSize != LF.getContents().size(); +} + +bool MCAssembler::relaxDwarfLineAddr(MCAsmLayout &Layout, +                                     MCDwarfLineAddrFragment &DF) { +  MCContext &Context = Layout.getAssembler().getContext(); +  uint64_t OldSize = DF.getContents().size(); +  int64_t AddrDelta; +  bool Abs = DF.getAddrDelta().evaluateKnownAbsolute(AddrDelta, Layout); +  assert(Abs && "We created a line delta with an invalid expression"); +  (void)Abs; +  int64_t LineDelta; +  LineDelta = DF.getLineDelta(); +  SmallVectorImpl<char> &Data = DF.getContents(); +  Data.clear(); +  raw_svector_ostream OSE(Data); +  DF.getFixups().clear(); + +  if (!getBackend().requiresDiffExpressionRelocations()) { +    MCDwarfLineAddr::Encode(Context, getDWARFLinetableParams(), LineDelta, +                            AddrDelta, OSE); +  } else { +    uint32_t Offset; +    uint32_t Size; +    bool SetDelta = MCDwarfLineAddr::FixedEncode(Context, +                                                 getDWARFLinetableParams(), +                                                 LineDelta, AddrDelta, +                                                 OSE, &Offset, &Size); +    // Add Fixups for address delta or new address. +    const MCExpr *FixupExpr; +    if (SetDelta) { +      FixupExpr = &DF.getAddrDelta(); +    } else { +      const MCBinaryExpr *ABE = cast<MCBinaryExpr>(&DF.getAddrDelta()); +      FixupExpr = ABE->getLHS(); +    } +    DF.getFixups().push_back( +        MCFixup::create(Offset, FixupExpr, +                        MCFixup::getKindForSize(Size, false /*isPCRel*/))); +  } + +  return OldSize != Data.size(); +} + +bool MCAssembler::relaxDwarfCallFrameFragment(MCAsmLayout &Layout, +                                              MCDwarfCallFrameFragment &DF) { +  MCContext &Context = Layout.getAssembler().getContext(); +  uint64_t OldSize = DF.getContents().size(); +  int64_t AddrDelta; +  bool Abs = DF.getAddrDelta().evaluateKnownAbsolute(AddrDelta, Layout); +  assert(Abs && "We created call frame with an invalid expression"); +  (void) Abs; +  SmallVectorImpl<char> &Data = DF.getContents(); +  Data.clear(); +  raw_svector_ostream OSE(Data); +  DF.getFixups().clear(); + +  if (getBackend().requiresDiffExpressionRelocations()) { +    uint32_t Offset; +    uint32_t Size; +    MCDwarfFrameEmitter::EncodeAdvanceLoc(Context, AddrDelta, OSE, &Offset, +                                          &Size); +    if (Size) { +      DF.getFixups().push_back(MCFixup::create( +          Offset, &DF.getAddrDelta(), +          MCFixup::getKindForSizeInBits(Size /*In bits.*/, false /*isPCRel*/))); +    } +  } else { +    MCDwarfFrameEmitter::EncodeAdvanceLoc(Context, AddrDelta, OSE); +  } + +  return OldSize != Data.size(); +} + +bool MCAssembler::relaxCVInlineLineTable(MCAsmLayout &Layout, +                                         MCCVInlineLineTableFragment &F) { +  unsigned OldSize = F.getContents().size(); +  getContext().getCVContext().encodeInlineLineTable(Layout, F); +  return OldSize != F.getContents().size(); +} + +bool MCAssembler::relaxCVDefRange(MCAsmLayout &Layout, +                                  MCCVDefRangeFragment &F) { +  unsigned OldSize = F.getContents().size(); +  getContext().getCVContext().encodeDefRange(Layout, F); +  return OldSize != F.getContents().size(); +} + +bool MCAssembler::layoutSectionOnce(MCAsmLayout &Layout, MCSection &Sec) { +  // Holds the first fragment which needed relaxing during this layout. It will +  // remain NULL if none were relaxed. +  // When a fragment is relaxed, all the fragments following it should get +  // invalidated because their offset is going to change. +  MCFragment *FirstRelaxedFragment = nullptr; + +  // Attempt to relax all the fragments in the section. +  for (MCSection::iterator I = Sec.begin(), IE = Sec.end(); I != IE; ++I) { +    // Check if this is a fragment that needs relaxation. +    bool RelaxedFrag = false; +    switch(I->getKind()) { +    default: +      break; +    case MCFragment::FT_Relaxable: +      assert(!getRelaxAll() && +             "Did not expect a MCRelaxableFragment in RelaxAll mode"); +      RelaxedFrag = relaxInstruction(Layout, *cast<MCRelaxableFragment>(I)); +      break; +    case MCFragment::FT_Dwarf: +      RelaxedFrag = relaxDwarfLineAddr(Layout, +                                       *cast<MCDwarfLineAddrFragment>(I)); +      break; +    case MCFragment::FT_DwarfFrame: +      RelaxedFrag = +        relaxDwarfCallFrameFragment(Layout, +                                    *cast<MCDwarfCallFrameFragment>(I)); +      break; +    case MCFragment::FT_LEB: +      RelaxedFrag = relaxLEB(Layout, *cast<MCLEBFragment>(I)); +      break; +    case MCFragment::FT_Padding: +      RelaxedFrag = relaxPaddingFragment(Layout, *cast<MCPaddingFragment>(I)); +      break; +    case MCFragment::FT_CVInlineLines: +      RelaxedFrag = +          relaxCVInlineLineTable(Layout, *cast<MCCVInlineLineTableFragment>(I)); +      break; +    case MCFragment::FT_CVDefRange: +      RelaxedFrag = relaxCVDefRange(Layout, *cast<MCCVDefRangeFragment>(I)); +      break; +    } +    if (RelaxedFrag && !FirstRelaxedFragment) +      FirstRelaxedFragment = &*I; +  } +  if (FirstRelaxedFragment) { +    Layout.invalidateFragmentsFrom(FirstRelaxedFragment); +    return true; +  } +  return false; +} + +bool MCAssembler::layoutOnce(MCAsmLayout &Layout) { +  ++stats::RelaxationSteps; + +  bool WasRelaxed = false; +  for (iterator it = begin(), ie = end(); it != ie; ++it) { +    MCSection &Sec = *it; +    while (layoutSectionOnce(Layout, Sec)) +      WasRelaxed = true; +  } + +  return WasRelaxed; +} + +void MCAssembler::finishLayout(MCAsmLayout &Layout) { +  assert(getBackendPtr() && "Expected assembler backend"); +  // The layout is done. Mark every fragment as valid. +  for (unsigned int i = 0, n = Layout.getSectionOrder().size(); i != n; ++i) { +    MCSection &Section = *Layout.getSectionOrder()[i]; +    Layout.getFragmentOffset(&*Section.rbegin()); +    computeFragmentSize(Layout, *Section.rbegin()); +  } +  getBackend().finishLayout(*this, Layout); +} + +#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) +LLVM_DUMP_METHOD void MCAssembler::dump() const{ +  raw_ostream &OS = errs(); + +  OS << "<MCAssembler\n"; +  OS << "  Sections:[\n    "; +  for (const_iterator it = begin(), ie = end(); it != ie; ++it) { +    if (it != begin()) OS << ",\n    "; +    it->dump(); +  } +  OS << "],\n"; +  OS << "  Symbols:["; + +  for (const_symbol_iterator it = symbol_begin(), ie = symbol_end(); it != ie; ++it) { +    if (it != symbol_begin()) OS << ",\n           "; +    OS << "("; +    it->dump(); +    OS << ", Index:" << it->getIndex() << ", "; +    OS << ")"; +  } +  OS << "]>\n"; +} +#endif  | 
