summaryrefslogtreecommitdiff
path: root/COFF/Chunks.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'COFF/Chunks.cpp')
-rw-r--r--COFF/Chunks.cpp99
1 files changed, 67 insertions, 32 deletions
diff --git a/COFF/Chunks.cpp b/COFF/Chunks.cpp
index 7d93c28c86c8..557b02654426 100644
--- a/COFF/Chunks.cpp
+++ b/COFF/Chunks.cpp
@@ -8,10 +8,10 @@
//===----------------------------------------------------------------------===//
#include "Chunks.h"
-#include "Error.h"
#include "InputFiles.h"
#include "Symbols.h"
#include "Writer.h"
+#include "lld/Common/ErrorHandler.h"
#include "llvm/ADT/Twine.h"
#include "llvm/BinaryFormat/COFF.h"
#include "llvm/Object/COFF.h"
@@ -29,17 +29,14 @@ using llvm::support::ulittle32_t;
namespace lld {
namespace coff {
-SectionChunk::SectionChunk(ObjectFile *F, const coff_section *H)
+SectionChunk::SectionChunk(ObjFile *F, const coff_section *H)
: Chunk(SectionKind), Repl(this), Header(H), File(F),
Relocs(File->getCOFFObj()->getRelocations(Header)),
NumRelocs(std::distance(Relocs.begin(), Relocs.end())) {
// Initialize SectionName.
File->getCOFFObj()->getSectionName(Header, SectionName);
- Align = Header->getAlignment();
-
- // Chunks may be discarded during comdat merging.
- Discarded = false;
+ Alignment = Header->getAlignment();
// If linker GC is disabled, every chunk starts out alive. If linker GC is
// enabled, treat non-comdat sections as roots. Generally optimized object
@@ -62,7 +59,10 @@ static void applySecRel(const SectionChunk *Sec, uint8_t *Off,
fatal("SECREL relocation cannot be applied to absolute symbols");
}
uint64_t SecRel = S - OS->getRVA();
- assert(SecRel < INT32_MAX && "overflow in SECREL relocation");
+ if (SecRel > UINT32_MAX) {
+ error("overflow in SECREL relocation in section: " + Sec->getSectionName());
+ return;
+ }
add32(Off, SecRel);
}
@@ -119,7 +119,7 @@ static uint16_t readMOV(uint8_t *Off) {
return Imm;
}
-static void applyMOV32T(uint8_t *Off, uint32_t V) {
+void applyMOV32T(uint8_t *Off, uint32_t V) {
uint16_t ImmW = readMOV(Off); // read MOVW operand
uint16_t ImmT = readMOV(Off + 4); // read MOVT operand
uint32_t Imm = ImmW | (ImmT << 16);
@@ -129,6 +129,8 @@ static void applyMOV32T(uint8_t *Off, uint32_t V) {
}
static void applyBranch20T(uint8_t *Off, int32_t V) {
+ if (!isInt<21>(V))
+ fatal("relocation out of range");
uint32_t S = V < 0 ? 1 : 0;
uint32_t J1 = (V >> 19) & 1;
uint32_t J2 = (V >> 18) & 1;
@@ -136,7 +138,7 @@ static void applyBranch20T(uint8_t *Off, int32_t V) {
or16(Off + 2, (J1 << 13) | (J2 << 11) | ((V >> 1) & 0x7ff));
}
-static void applyBranch24T(uint8_t *Off, int32_t V) {
+void applyBranch24T(uint8_t *Off, int32_t V) {
if (!isInt<25>(V))
fatal("relocation out of range");
uint32_t S = V < 0 ? 1 : 0;
@@ -167,36 +169,61 @@ void SectionChunk::applyRelARM(uint8_t *Off, uint16_t Type, OutputSection *OS,
}
}
-static void applyArm64Addr(uint8_t *Off, uint64_t Imm) {
+// Interpret the existing immediate value as a byte offset to the
+// target symbol, then update the instruction with the immediate as
+// the page offset from the current instruction to the target.
+static void applyArm64Addr(uint8_t *Off, uint64_t S, uint64_t P) {
+ uint32_t Orig = read32le(Off);
+ uint64_t Imm = ((Orig >> 29) & 0x3) | ((Orig >> 3) & 0x1FFFFC);
+ S += Imm;
+ Imm = (S >> 12) - (P >> 12);
uint32_t ImmLo = (Imm & 0x3) << 29;
uint32_t ImmHi = (Imm & 0x1FFFFC) << 3;
uint64_t Mask = (0x3 << 29) | (0x1FFFFC << 3);
- write32le(Off, (read32le(Off) & ~Mask) | ImmLo | ImmHi);
+ write32le(Off, (Orig & ~Mask) | ImmLo | ImmHi);
}
// Update the immediate field in a AARCH64 ldr, str, and add instruction.
-static void applyArm64Imm(uint8_t *Off, uint64_t Imm) {
+// Optionally limit the range of the written immediate by one or more bits
+// (RangeLimit).
+static void applyArm64Imm(uint8_t *Off, uint64_t Imm, uint32_t RangeLimit) {
uint32_t Orig = read32le(Off);
Imm += (Orig >> 10) & 0xFFF;
Orig &= ~(0xFFF << 10);
- write32le(Off, Orig | ((Imm & 0xFFF) << 10));
+ write32le(Off, Orig | ((Imm & (0xFFF >> RangeLimit)) << 10));
}
+// Add the 12 bit page offset to the existing immediate.
+// Ldr/str instructions store the opcode immediate scaled
+// by the load/store size (giving a larger range for larger
+// loads/stores). The immediate is always (both before and after
+// fixing up the relocation) stored scaled similarly.
+// Even if larger loads/stores have a larger range, limit the
+// effective offset to 12 bit, since it is intended to be a
+// page offset.
static void applyArm64Ldr(uint8_t *Off, uint64_t Imm) {
- int Size = read32le(Off) >> 30;
- Imm >>= Size;
- applyArm64Imm(Off, Imm);
+ uint32_t Orig = read32le(Off);
+ uint32_t Size = Orig >> 30;
+ // 0x04000000 indicates SIMD/FP registers
+ // 0x00800000 indicates 128 bit
+ if ((Orig & 0x4800000) == 0x4800000)
+ Size += 4;
+ if ((Imm & ((1 << Size) - 1)) != 0)
+ fatal("misaligned ldr/str offset");
+ applyArm64Imm(Off, Imm >> Size, Size);
}
void SectionChunk::applyRelARM64(uint8_t *Off, uint16_t Type, OutputSection *OS,
uint64_t S, uint64_t P) const {
switch (Type) {
- case IMAGE_REL_ARM64_PAGEBASE_REL21: applyArm64Addr(Off, (S >> 12) - (P >> 12)); break;
- case IMAGE_REL_ARM64_PAGEOFFSET_12A: applyArm64Imm(Off, S & 0xfff); break;
+ case IMAGE_REL_ARM64_PAGEBASE_REL21: applyArm64Addr(Off, S, P); break;
+ case IMAGE_REL_ARM64_PAGEOFFSET_12A: applyArm64Imm(Off, S & 0xfff, 0); break;
case IMAGE_REL_ARM64_PAGEOFFSET_12L: applyArm64Ldr(Off, S & 0xfff); break;
case IMAGE_REL_ARM64_BRANCH26: or32(Off, ((S - P) & 0x0FFFFFFC) >> 2); break;
case IMAGE_REL_ARM64_ADDR32: add32(Off, S + Config->ImageBase); break;
+ case IMAGE_REL_ARM64_ADDR32NB: add32(Off, S); break;
case IMAGE_REL_ARM64_ADDR64: add64(Off, S + Config->ImageBase); break;
+ case IMAGE_REL_ARM64_SECREL: applySecRel(this, Off, OS, S); break;
default:
fatal("unsupported relocation type 0x" + Twine::utohexstr(Type));
}
@@ -224,8 +251,19 @@ void SectionChunk::writeTo(uint8_t *Buf) const {
// Get the output section of the symbol for this relocation. The output
// section is needed to compute SECREL and SECTION relocations used in debug
// info.
- SymbolBody *Body = File->getSymbolBody(Rel.SymbolTableIndex);
- Defined *Sym = cast<Defined>(Body);
+ auto *Sym =
+ dyn_cast_or_null<Defined>(File->getSymbol(Rel.SymbolTableIndex));
+ if (!Sym) {
+ if (isCodeView() || isDWARF())
+ continue;
+ // Symbols in early discarded sections are represented using null pointers,
+ // so we need to retrieve the name from the object file.
+ COFFSymbolRef Sym =
+ check(File->getCOFFObj()->getSymbol(Rel.SymbolTableIndex));
+ StringRef Name;
+ File->getCOFFObj()->getSymbolName(Sym, Name);
+ fatal("relocation against symbol in discarded section: " + Name);
+ }
Chunk *C = Sym->getChunk();
OutputSection *OS = C ? C->getOutputSection() : nullptr;
@@ -301,8 +339,8 @@ void SectionChunk::getBaserels(std::vector<Baserel> *Res) {
uint8_t Ty = getBaserelType(Rel);
if (Ty == IMAGE_REL_BASED_ABSOLUTE)
continue;
- SymbolBody *Body = File->getSymbolBody(Rel.SymbolTableIndex);
- if (isa<DefinedAbsolute>(Body))
+ Symbol *Target = File->getSymbol(Rel.SymbolTableIndex);
+ if (!Target || isa<DefinedAbsolute>(Target))
continue;
Res->emplace_back(RVA + Rel.VirtualAddress, Ty);
}
@@ -323,12 +361,8 @@ bool SectionChunk::isCOMDAT() const {
void SectionChunk::printDiscardedMessage() const {
// Removed by dead-stripping. If it's removed by ICF, ICF already
// printed out the name, so don't repeat that here.
- if (Sym && this == Repl) {
- if (Discarded)
- message("Discarded comdat symbol " + Sym->getName());
- else if (!Live)
- message("Discarded " + Sym->getName());
- }
+ if (Sym && this == Repl)
+ message("Discarded " + Sym->getName());
}
StringRef SectionChunk::getDebugName() {
@@ -351,7 +385,7 @@ void SectionChunk::replace(SectionChunk *Other) {
CommonChunk::CommonChunk(const COFFSymbolRef S) : Sym(S) {
// Common symbols are aligned on natural boundaries up to 32 bytes.
// This is what MSVC link.exe does.
- Align = std::min(uint64_t(32), PowerOf2Ceil(Sym.getValue()));
+ Alignment = std::min(uint64_t(32), PowerOf2Ceil(Sym.getValue()));
}
uint32_t CommonChunk::getPermissions() const {
@@ -366,7 +400,7 @@ void StringChunk::writeTo(uint8_t *Buf) const {
ImportThunkChunkX64::ImportThunkChunkX64(Defined *S) : ImpSymbol(S) {
// Intel Optimization Manual says that all branch targets
// should be 16-byte aligned. MSVC linker does this too.
- Align = 16;
+ Alignment = 16;
}
void ImportThunkChunkX64::writeTo(uint8_t *Buf) const {
@@ -397,10 +431,9 @@ void ImportThunkChunkARM::writeTo(uint8_t *Buf) const {
}
void ImportThunkChunkARM64::writeTo(uint8_t *Buf) const {
- int64_t PageOff = (ImpSymbol->getRVA() >> 12) - (RVA >> 12);
int64_t Off = ImpSymbol->getRVA() & 0xfff;
memcpy(Buf + OutputSectionOff, ImportThunkARM64, sizeof(ImportThunkARM64));
- applyArm64Addr(Buf + OutputSectionOff, PageOff);
+ applyArm64Addr(Buf + OutputSectionOff, ImpSymbol->getRVA(), RVA);
applyArm64Ldr(Buf + OutputSectionOff + 4, Off);
}
@@ -488,8 +521,10 @@ void BaserelChunk::writeTo(uint8_t *Buf) const {
uint8_t Baserel::getDefaultType() {
switch (Config->Machine) {
case AMD64:
+ case ARM64:
return IMAGE_REL_BASED_DIR64;
case I386:
+ case ARMNT:
return IMAGE_REL_BASED_HIGHLOW;
default:
llvm_unreachable("unknown machine type");