aboutsummaryrefslogtreecommitdiff
path: root/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'lib/Transforms/Instrumentation/HWAddressSanitizer.cpp')
-rw-r--r--lib/Transforms/Instrumentation/HWAddressSanitizer.cpp594
1 files changed, 379 insertions, 215 deletions
diff --git a/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp b/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp
index d04c2b76288f..90a9f4955a4b 100644
--- a/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp
+++ b/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp
@@ -1,9 +1,8 @@
//===- HWAddressSanitizer.cpp - detector of uninitialized reads -------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -12,6 +11,7 @@
/// based on tagged addressing.
//===----------------------------------------------------------------------===//
+#include "llvm/Transforms/Instrumentation/HWAddressSanitizer.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringRef.h"
@@ -21,6 +21,7 @@
#include "llvm/IR/Constant.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/DebugInfoMetadata.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/IRBuilder.h"
@@ -125,10 +126,10 @@ static cl::opt<bool> ClEnableKhwasan(
// is accessed. The shadow mapping looks like:
// Shadow = (Mem >> scale) + offset
-static cl::opt<unsigned long long> ClMappingOffset(
- "hwasan-mapping-offset",
- cl::desc("HWASan shadow mapping offset [EXPERIMENTAL]"), cl::Hidden,
- cl::init(0));
+static cl::opt<uint64_t>
+ ClMappingOffset("hwasan-mapping-offset",
+ cl::desc("HWASan shadow mapping offset [EXPERIMENTAL]"),
+ cl::Hidden, cl::init(0));
static cl::opt<bool>
ClWithIfunc("hwasan-with-ifunc",
@@ -148,42 +149,46 @@ static cl::opt<bool>
"in a thread-local ring buffer"),
cl::Hidden, cl::init(true));
static cl::opt<bool>
- ClCreateFrameDescriptions("hwasan-create-frame-descriptions",
- cl::desc("create static frame descriptions"),
- cl::Hidden, cl::init(true));
-
-static cl::opt<bool>
ClInstrumentMemIntrinsics("hwasan-instrument-mem-intrinsics",
cl::desc("instrument memory intrinsics"),
cl::Hidden, cl::init(true));
+
+static cl::opt<bool>
+ ClInstrumentLandingPads("hwasan-instrument-landing-pads",
+ cl::desc("instrument landing pads"), cl::Hidden,
+ cl::init(true));
+
+static cl::opt<bool> ClInlineAllChecks("hwasan-inline-all-checks",
+ cl::desc("inline all checks"),
+ cl::Hidden, cl::init(false));
+
namespace {
/// An instrumentation pass implementing detection of addressability bugs
/// using tagged pointers.
-class HWAddressSanitizer : public FunctionPass {
+class HWAddressSanitizer {
public:
- // Pass identification, replacement for typeid.
- static char ID;
-
- explicit HWAddressSanitizer(bool CompileKernel = false, bool Recover = false)
- : FunctionPass(ID) {
+ explicit HWAddressSanitizer(Module &M, bool CompileKernel = false,
+ bool Recover = false) {
this->Recover = ClRecover.getNumOccurrences() > 0 ? ClRecover : Recover;
this->CompileKernel = ClEnableKhwasan.getNumOccurrences() > 0 ?
ClEnableKhwasan : CompileKernel;
- }
- StringRef getPassName() const override { return "HWAddressSanitizer"; }
+ initializeModule(M);
+ }
- bool runOnFunction(Function &F) override;
- bool doInitialization(Module &M) override;
+ bool sanitizeFunction(Function &F);
+ void initializeModule(Module &M);
void initializeCallbacks(Module &M);
+ Value *getDynamicShadowIfunc(IRBuilder<> &IRB);
Value *getDynamicShadowNonTls(IRBuilder<> &IRB);
void untagPointerOperand(Instruction *I, Value *Addr);
- Value *memToShadow(Value *Shadow, Type *Ty, IRBuilder<> &IRB);
- void instrumentMemAccessInline(Value *PtrLong, bool IsWrite,
+ Value *shadowBase();
+ Value *memToShadow(Value *Shadow, IRBuilder<> &IRB);
+ void instrumentMemAccessInline(Value *Ptr, bool IsWrite,
unsigned AccessSizeIndex,
Instruction *InsertBefore);
void instrumentMemIntrinsic(MemIntrinsic *MI);
@@ -193,11 +198,15 @@ public:
Value **MaybeMask);
bool isInterestingAlloca(const AllocaInst &AI);
- bool tagAlloca(IRBuilder<> &IRB, AllocaInst *AI, Value *Tag);
+ bool tagAlloca(IRBuilder<> &IRB, AllocaInst *AI, Value *Tag, size_t Size);
Value *tagPointer(IRBuilder<> &IRB, Type *Ty, Value *PtrLong, Value *Tag);
Value *untagPointer(IRBuilder<> &IRB, Value *PtrLong);
- bool instrumentStack(SmallVectorImpl<AllocaInst *> &Allocas,
- SmallVectorImpl<Instruction *> &RetVec, Value *StackTag);
+ bool instrumentStack(
+ SmallVectorImpl<AllocaInst *> &Allocas,
+ DenseMap<AllocaInst *, std::vector<DbgDeclareInst *>> &AllocaDeclareMap,
+ SmallVectorImpl<Instruction *> &RetVec, Value *StackTag);
+ Value *readRegister(IRBuilder<> &IRB, StringRef Name);
+ bool instrumentLandingPads(SmallVectorImpl<Instruction *> &RetVec);
Value *getNextTagWithCall(IRBuilder<> &IRB);
Value *getStackBaseTag(IRBuilder<> &IRB);
Value *getAllocaTag(IRBuilder<> &IRB, Value *StackTag, AllocaInst *AI,
@@ -205,31 +214,14 @@ public:
Value *getUARTag(IRBuilder<> &IRB, Value *StackTag);
Value *getHwasanThreadSlotPtr(IRBuilder<> &IRB, Type *Ty);
- Value *emitPrologue(IRBuilder<> &IRB, bool WithFrameRecord);
+ void emitPrologue(IRBuilder<> &IRB, bool WithFrameRecord);
private:
LLVMContext *C;
std::string CurModuleUniqueId;
Triple TargetTriple;
- Function *HWAsanMemmove, *HWAsanMemcpy, *HWAsanMemset;
-
- // Frame description is a way to pass names/sizes of local variables
- // to the run-time w/o adding extra executable code in every function.
- // We do this by creating a separate section with {PC,Descr} pairs and passing
- // the section beg/end to __hwasan_init_frames() at module init time.
- std::string createFrameString(ArrayRef<AllocaInst*> Allocas);
- void createFrameGlobal(Function &F, const std::string &FrameString);
- // Get the section name for frame descriptions. Currently ELF-only.
- const char *getFrameSection() { return "__hwasan_frames"; }
- const char *getFrameSectionBeg() { return "__start___hwasan_frames"; }
- const char *getFrameSectionEnd() { return "__stop___hwasan_frames"; }
- GlobalVariable *createFrameSectionBound(Module &M, Type *Ty,
- const char *Name) {
- auto GV = new GlobalVariable(M, Ty, false, GlobalVariable::ExternalLinkage,
- nullptr, Name);
- GV->setVisibility(GlobalValue::HiddenVisibility);
- return GV;
- }
+ FunctionCallee HWAsanMemmove, HWAsanMemcpy, HWAsanMemset;
+ FunctionCallee HWAsanHandleVfork;
/// This struct defines the shadow mapping using the rule:
/// shadow = (mem >> Scale) + Offset.
@@ -253,48 +245,95 @@ private:
Type *IntptrTy;
Type *Int8PtrTy;
Type *Int8Ty;
+ Type *Int32Ty;
bool CompileKernel;
bool Recover;
Function *HwasanCtorFunction;
- Function *HwasanMemoryAccessCallback[2][kNumberOfAccessSizes];
- Function *HwasanMemoryAccessCallbackSized[2];
+ FunctionCallee HwasanMemoryAccessCallback[2][kNumberOfAccessSizes];
+ FunctionCallee HwasanMemoryAccessCallbackSized[2];
- Function *HwasanTagMemoryFunc;
- Function *HwasanGenerateTagFunc;
- Function *HwasanThreadEnterFunc;
+ FunctionCallee HwasanTagMemoryFunc;
+ FunctionCallee HwasanGenerateTagFunc;
+ FunctionCallee HwasanThreadEnterFunc;
Constant *ShadowGlobal;
Value *LocalDynamicShadow = nullptr;
+ Value *StackBaseTag = nullptr;
GlobalValue *ThreadPtrGlobal = nullptr;
};
+class HWAddressSanitizerLegacyPass : public FunctionPass {
+public:
+ // Pass identification, replacement for typeid.
+ static char ID;
+
+ explicit HWAddressSanitizerLegacyPass(bool CompileKernel = false,
+ bool Recover = false)
+ : FunctionPass(ID), CompileKernel(CompileKernel), Recover(Recover) {}
+
+ StringRef getPassName() const override { return "HWAddressSanitizer"; }
+
+ bool doInitialization(Module &M) override {
+ HWASan = llvm::make_unique<HWAddressSanitizer>(M, CompileKernel, Recover);
+ return true;
+ }
+
+ bool runOnFunction(Function &F) override {
+ return HWASan->sanitizeFunction(F);
+ }
+
+ bool doFinalization(Module &M) override {
+ HWASan.reset();
+ return false;
+ }
+
+private:
+ std::unique_ptr<HWAddressSanitizer> HWASan;
+ bool CompileKernel;
+ bool Recover;
+};
+
} // end anonymous namespace
-char HWAddressSanitizer::ID = 0;
+char HWAddressSanitizerLegacyPass::ID = 0;
INITIALIZE_PASS_BEGIN(
- HWAddressSanitizer, "hwasan",
+ HWAddressSanitizerLegacyPass, "hwasan",
"HWAddressSanitizer: detect memory bugs using tagged addressing.", false,
false)
INITIALIZE_PASS_END(
- HWAddressSanitizer, "hwasan",
+ HWAddressSanitizerLegacyPass, "hwasan",
"HWAddressSanitizer: detect memory bugs using tagged addressing.", false,
false)
-FunctionPass *llvm::createHWAddressSanitizerPass(bool CompileKernel,
- bool Recover) {
+FunctionPass *llvm::createHWAddressSanitizerLegacyPassPass(bool CompileKernel,
+ bool Recover) {
assert(!CompileKernel || Recover);
- return new HWAddressSanitizer(CompileKernel, Recover);
+ return new HWAddressSanitizerLegacyPass(CompileKernel, Recover);
+}
+
+HWAddressSanitizerPass::HWAddressSanitizerPass(bool CompileKernel, bool Recover)
+ : CompileKernel(CompileKernel), Recover(Recover) {}
+
+PreservedAnalyses HWAddressSanitizerPass::run(Module &M,
+ ModuleAnalysisManager &MAM) {
+ HWAddressSanitizer HWASan(M, CompileKernel, Recover);
+ bool Modified = false;
+ for (Function &F : M)
+ Modified |= HWASan.sanitizeFunction(F);
+ if (Modified)
+ return PreservedAnalyses::none();
+ return PreservedAnalyses::all();
}
/// Module-level initialization.
///
/// inserts a call to __hwasan_init to the module's constructor list.
-bool HWAddressSanitizer::doInitialization(Module &M) {
+void HWAddressSanitizer::initializeModule(Module &M) {
LLVM_DEBUG(dbgs() << "Init " << M.getName() << "\n");
auto &DL = M.getDataLayout();
@@ -308,47 +347,35 @@ bool HWAddressSanitizer::doInitialization(Module &M) {
IntptrTy = IRB.getIntPtrTy(DL);
Int8PtrTy = IRB.getInt8PtrTy();
Int8Ty = IRB.getInt8Ty();
+ Int32Ty = IRB.getInt32Ty();
HwasanCtorFunction = nullptr;
if (!CompileKernel) {
std::tie(HwasanCtorFunction, std::ignore) =
- createSanitizerCtorAndInitFunctions(M, kHwasanModuleCtorName,
- kHwasanInitName,
- /*InitArgTypes=*/{},
- /*InitArgs=*/{});
- Comdat *CtorComdat = M.getOrInsertComdat(kHwasanModuleCtorName);
- HwasanCtorFunction->setComdat(CtorComdat);
- appendToGlobalCtors(M, HwasanCtorFunction, 0, HwasanCtorFunction);
-
- // Create a zero-length global in __hwasan_frame so that the linker will
- // always create start and stop symbols.
- //
- // N.B. If we ever start creating associated metadata in this pass this
- // global will need to be associated with the ctor.
- Type *Int8Arr0Ty = ArrayType::get(Int8Ty, 0);
- auto GV =
- new GlobalVariable(M, Int8Arr0Ty, /*isConstantGlobal*/ true,
- GlobalVariable::PrivateLinkage,
- Constant::getNullValue(Int8Arr0Ty), "__hwasan");
- GV->setSection(getFrameSection());
- GV->setComdat(CtorComdat);
- appendToCompilerUsed(M, GV);
-
- IRBuilder<> IRBCtor(HwasanCtorFunction->getEntryBlock().getTerminator());
- IRBCtor.CreateCall(
- declareSanitizerInitFunction(M, "__hwasan_init_frames",
- {Int8PtrTy, Int8PtrTy}),
- {createFrameSectionBound(M, Int8Ty, getFrameSectionBeg()),
- createFrameSectionBound(M, Int8Ty, getFrameSectionEnd())});
+ getOrCreateSanitizerCtorAndInitFunctions(
+ M, kHwasanModuleCtorName, kHwasanInitName,
+ /*InitArgTypes=*/{},
+ /*InitArgs=*/{},
+ // This callback is invoked when the functions are created the first
+ // time. Hook them into the global ctors list in that case:
+ [&](Function *Ctor, FunctionCallee) {
+ Comdat *CtorComdat = M.getOrInsertComdat(kHwasanModuleCtorName);
+ Ctor->setComdat(CtorComdat);
+ appendToGlobalCtors(M, Ctor, 0, Ctor);
+ });
}
- if (!TargetTriple.isAndroid())
- appendToCompilerUsed(
- M, ThreadPtrGlobal = new GlobalVariable(
- M, IntptrTy, false, GlobalVariable::ExternalLinkage, nullptr,
- "__hwasan_tls", nullptr, GlobalVariable::InitialExecTLSModel));
-
- return true;
+ if (!TargetTriple.isAndroid()) {
+ Constant *C = M.getOrInsertGlobal("__hwasan_tls", IntptrTy, [&] {
+ auto *GV = new GlobalVariable(M, IntptrTy, /*isConstant=*/false,
+ GlobalValue::ExternalLinkage, nullptr,
+ "__hwasan_tls", nullptr,
+ GlobalVariable::InitialExecTLSModel);
+ appendToCompilerUsed(M, GV);
+ return GV;
+ });
+ ThreadPtrGlobal = cast<GlobalVariable>(C);
+ }
}
void HWAddressSanitizer::initializeCallbacks(Module &M) {
@@ -357,44 +384,55 @@ void HWAddressSanitizer::initializeCallbacks(Module &M) {
const std::string TypeStr = AccessIsWrite ? "store" : "load";
const std::string EndingStr = Recover ? "_noabort" : "";
- HwasanMemoryAccessCallbackSized[AccessIsWrite] =
- checkSanitizerInterfaceFunction(M.getOrInsertFunction(
- ClMemoryAccessCallbackPrefix + TypeStr + "N" + EndingStr,
- FunctionType::get(IRB.getVoidTy(), {IntptrTy, IntptrTy}, false)));
+ HwasanMemoryAccessCallbackSized[AccessIsWrite] = M.getOrInsertFunction(
+ ClMemoryAccessCallbackPrefix + TypeStr + "N" + EndingStr,
+ FunctionType::get(IRB.getVoidTy(), {IntptrTy, IntptrTy}, false));
for (size_t AccessSizeIndex = 0; AccessSizeIndex < kNumberOfAccessSizes;
AccessSizeIndex++) {
HwasanMemoryAccessCallback[AccessIsWrite][AccessSizeIndex] =
- checkSanitizerInterfaceFunction(M.getOrInsertFunction(
+ M.getOrInsertFunction(
ClMemoryAccessCallbackPrefix + TypeStr +
itostr(1ULL << AccessSizeIndex) + EndingStr,
- FunctionType::get(IRB.getVoidTy(), {IntptrTy}, false)));
+ FunctionType::get(IRB.getVoidTy(), {IntptrTy}, false));
}
}
- HwasanTagMemoryFunc = checkSanitizerInterfaceFunction(M.getOrInsertFunction(
- "__hwasan_tag_memory", IRB.getVoidTy(), Int8PtrTy, Int8Ty, IntptrTy));
- HwasanGenerateTagFunc = checkSanitizerInterfaceFunction(
- M.getOrInsertFunction("__hwasan_generate_tag", Int8Ty));
+ HwasanTagMemoryFunc = M.getOrInsertFunction(
+ "__hwasan_tag_memory", IRB.getVoidTy(), Int8PtrTy, Int8Ty, IntptrTy);
+ HwasanGenerateTagFunc =
+ M.getOrInsertFunction("__hwasan_generate_tag", Int8Ty);
- if (Mapping.InGlobal)
- ShadowGlobal = M.getOrInsertGlobal("__hwasan_shadow",
- ArrayType::get(IRB.getInt8Ty(), 0));
+ ShadowGlobal = M.getOrInsertGlobal("__hwasan_shadow",
+ ArrayType::get(IRB.getInt8Ty(), 0));
const std::string MemIntrinCallbackPrefix =
CompileKernel ? std::string("") : ClMemoryAccessCallbackPrefix;
- HWAsanMemmove = checkSanitizerInterfaceFunction(M.getOrInsertFunction(
- MemIntrinCallbackPrefix + "memmove", IRB.getInt8PtrTy(),
- IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IntptrTy));
- HWAsanMemcpy = checkSanitizerInterfaceFunction(M.getOrInsertFunction(
- MemIntrinCallbackPrefix + "memcpy", IRB.getInt8PtrTy(),
- IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IntptrTy));
- HWAsanMemset = checkSanitizerInterfaceFunction(M.getOrInsertFunction(
- MemIntrinCallbackPrefix + "memset", IRB.getInt8PtrTy(),
- IRB.getInt8PtrTy(), IRB.getInt32Ty(), IntptrTy));
-
- HwasanThreadEnterFunc = checkSanitizerInterfaceFunction(
- M.getOrInsertFunction("__hwasan_thread_enter", IRB.getVoidTy()));
+ HWAsanMemmove = M.getOrInsertFunction(MemIntrinCallbackPrefix + "memmove",
+ IRB.getInt8PtrTy(), IRB.getInt8PtrTy(),
+ IRB.getInt8PtrTy(), IntptrTy);
+ HWAsanMemcpy = M.getOrInsertFunction(MemIntrinCallbackPrefix + "memcpy",
+ IRB.getInt8PtrTy(), IRB.getInt8PtrTy(),
+ IRB.getInt8PtrTy(), IntptrTy);
+ HWAsanMemset = M.getOrInsertFunction(MemIntrinCallbackPrefix + "memset",
+ IRB.getInt8PtrTy(), IRB.getInt8PtrTy(),
+ IRB.getInt32Ty(), IntptrTy);
+
+ HWAsanHandleVfork =
+ M.getOrInsertFunction("__hwasan_handle_vfork", IRB.getVoidTy(), IntptrTy);
+
+ HwasanThreadEnterFunc =
+ M.getOrInsertFunction("__hwasan_thread_enter", IRB.getVoidTy());
+}
+
+Value *HWAddressSanitizer::getDynamicShadowIfunc(IRBuilder<> &IRB) {
+ // An empty inline asm with input reg == output reg.
+ // An opaque no-op cast, basically.
+ InlineAsm *Asm = InlineAsm::get(
+ FunctionType::get(Int8PtrTy, {ShadowGlobal->getType()}, false),
+ StringRef(""), StringRef("=r,0"),
+ /*hasSideEffects=*/false);
+ return IRB.CreateCall(Asm, {ShadowGlobal}, ".hwasan.shadow");
}
Value *HWAddressSanitizer::getDynamicShadowNonTls(IRBuilder<> &IRB) {
@@ -403,18 +441,12 @@ Value *HWAddressSanitizer::getDynamicShadowNonTls(IRBuilder<> &IRB) {
return nullptr;
if (Mapping.InGlobal) {
- // An empty inline asm with input reg == output reg.
- // An opaque pointer-to-int cast, basically.
- InlineAsm *Asm = InlineAsm::get(
- FunctionType::get(IntptrTy, {ShadowGlobal->getType()}, false),
- StringRef(""), StringRef("=r,0"),
- /*hasSideEffects=*/false);
- return IRB.CreateCall(Asm, {ShadowGlobal}, ".hwasan.shadow");
+ return getDynamicShadowIfunc(IRB);
} else {
Value *GlobalDynamicAddress =
IRB.GetInsertBlock()->getParent()->getParent()->getOrInsertGlobal(
- kHwasanShadowMemoryDynamicAddress, IntptrTy);
- return IRB.CreateLoad(GlobalDynamicAddress);
+ kHwasanShadowMemoryDynamicAddress, Int8PtrTy);
+ return IRB.CreateLoad(Int8PtrTy, GlobalDynamicAddress);
}
}
@@ -506,29 +538,44 @@ void HWAddressSanitizer::untagPointerOperand(Instruction *I, Value *Addr) {
I->setOperand(getPointerOperandIndex(I), UntaggedPtr);
}
-Value *HWAddressSanitizer::memToShadow(Value *Mem, Type *Ty, IRBuilder<> &IRB) {
+Value *HWAddressSanitizer::shadowBase() {
+ if (LocalDynamicShadow)
+ return LocalDynamicShadow;
+ return ConstantExpr::getIntToPtr(ConstantInt::get(IntptrTy, Mapping.Offset),
+ Int8PtrTy);
+}
+
+Value *HWAddressSanitizer::memToShadow(Value *Mem, IRBuilder<> &IRB) {
// Mem >> Scale
Value *Shadow = IRB.CreateLShr(Mem, Mapping.Scale);
if (Mapping.Offset == 0)
- return Shadow;
+ return IRB.CreateIntToPtr(Shadow, Int8PtrTy);
// (Mem >> Scale) + Offset
- Value *ShadowBase;
- if (LocalDynamicShadow)
- ShadowBase = LocalDynamicShadow;
- else
- ShadowBase = ConstantInt::get(Ty, Mapping.Offset);
- return IRB.CreateAdd(Shadow, ShadowBase);
+ return IRB.CreateGEP(Int8Ty, shadowBase(), Shadow);
}
-void HWAddressSanitizer::instrumentMemAccessInline(Value *PtrLong, bool IsWrite,
+void HWAddressSanitizer::instrumentMemAccessInline(Value *Ptr, bool IsWrite,
unsigned AccessSizeIndex,
Instruction *InsertBefore) {
+ const int64_t AccessInfo = Recover * 0x20 + IsWrite * 0x10 + AccessSizeIndex;
IRBuilder<> IRB(InsertBefore);
+
+ if (!ClInlineAllChecks && TargetTriple.isAArch64() &&
+ TargetTriple.isOSBinFormatELF() && !Recover) {
+ Module *M = IRB.GetInsertBlock()->getParent()->getParent();
+ Ptr = IRB.CreateBitCast(Ptr, Int8PtrTy);
+ IRB.CreateCall(
+ Intrinsic::getDeclaration(M, Intrinsic::hwasan_check_memaccess),
+ {shadowBase(), Ptr, ConstantInt::get(Int32Ty, AccessInfo)});
+ return;
+ }
+
+ Value *PtrLong = IRB.CreatePointerCast(Ptr, IntptrTy);
Value *PtrTag = IRB.CreateTrunc(IRB.CreateLShr(PtrLong, kPointerTagShift),
IRB.getInt8Ty());
Value *AddrLong = untagPointer(IRB, PtrLong);
- Value *ShadowLong = memToShadow(AddrLong, PtrLong->getType(), IRB);
- Value *MemTag = IRB.CreateLoad(IRB.CreateIntToPtr(ShadowLong, Int8PtrTy));
+ Value *Shadow = memToShadow(AddrLong, IRB);
+ Value *MemTag = IRB.CreateLoad(Int8Ty, Shadow);
Value *TagMismatch = IRB.CreateICmpNE(PtrTag, MemTag);
int matchAllTag = ClMatchAllTag.getNumOccurrences() > 0 ?
@@ -540,11 +587,35 @@ void HWAddressSanitizer::instrumentMemAccessInline(Value *PtrLong, bool IsWrite,
}
Instruction *CheckTerm =
- SplitBlockAndInsertIfThen(TagMismatch, InsertBefore, !Recover,
+ SplitBlockAndInsertIfThen(TagMismatch, InsertBefore, false,
MDBuilder(*C).createBranchWeights(1, 100000));
IRB.SetInsertPoint(CheckTerm);
- const int64_t AccessInfo = Recover * 0x20 + IsWrite * 0x10 + AccessSizeIndex;
+ Value *OutOfShortGranuleTagRange =
+ IRB.CreateICmpUGT(MemTag, ConstantInt::get(Int8Ty, 15));
+ Instruction *CheckFailTerm =
+ SplitBlockAndInsertIfThen(OutOfShortGranuleTagRange, CheckTerm, !Recover,
+ MDBuilder(*C).createBranchWeights(1, 100000));
+
+ IRB.SetInsertPoint(CheckTerm);
+ Value *PtrLowBits = IRB.CreateTrunc(IRB.CreateAnd(PtrLong, 15), Int8Ty);
+ PtrLowBits = IRB.CreateAdd(
+ PtrLowBits, ConstantInt::get(Int8Ty, (1 << AccessSizeIndex) - 1));
+ Value *PtrLowBitsOOB = IRB.CreateICmpUGE(PtrLowBits, MemTag);
+ SplitBlockAndInsertIfThen(PtrLowBitsOOB, CheckTerm, false,
+ MDBuilder(*C).createBranchWeights(1, 100000),
+ nullptr, nullptr, CheckFailTerm->getParent());
+
+ IRB.SetInsertPoint(CheckTerm);
+ Value *InlineTagAddr = IRB.CreateOr(AddrLong, 15);
+ InlineTagAddr = IRB.CreateIntToPtr(InlineTagAddr, Int8PtrTy);
+ Value *InlineTag = IRB.CreateLoad(Int8Ty, InlineTagAddr);
+ Value *InlineTagMismatch = IRB.CreateICmpNE(PtrTag, InlineTag);
+ SplitBlockAndInsertIfThen(InlineTagMismatch, CheckTerm, false,
+ MDBuilder(*C).createBranchWeights(1, 100000),
+ nullptr, nullptr, CheckFailTerm->getParent());
+
+ IRB.SetInsertPoint(CheckFailTerm);
InlineAsm *Asm;
switch (TargetTriple.getArch()) {
case Triple::x86_64:
@@ -568,6 +639,8 @@ void HWAddressSanitizer::instrumentMemAccessInline(Value *PtrLong, bool IsWrite,
report_fatal_error("unsupported architecture");
}
IRB.CreateCall(Asm, PtrLong);
+ if (Recover)
+ cast<BranchInst>(CheckFailTerm)->setSuccessor(0, CheckTerm->getParent());
}
void HWAddressSanitizer::instrumentMemIntrinsic(MemIntrinsic *MI) {
@@ -610,7 +683,6 @@ bool HWAddressSanitizer::instrumentMemAccess(Instruction *I) {
return false; //FIXME
IRBuilder<> IRB(I);
- Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy);
if (isPowerOf2_64(TypeSize) &&
(TypeSize / 8 <= (1UL << (kNumberOfAccessSizes - 1))) &&
(Alignment >= (1UL << Mapping.Scale) || Alignment == 0 ||
@@ -618,13 +690,14 @@ bool HWAddressSanitizer::instrumentMemAccess(Instruction *I) {
size_t AccessSizeIndex = TypeSizeToSizeIndex(TypeSize);
if (ClInstrumentWithCalls) {
IRB.CreateCall(HwasanMemoryAccessCallback[IsWrite][AccessSizeIndex],
- AddrLong);
+ IRB.CreatePointerCast(Addr, IntptrTy));
} else {
- instrumentMemAccessInline(AddrLong, IsWrite, AccessSizeIndex, I);
+ instrumentMemAccessInline(Addr, IsWrite, AccessSizeIndex, I);
}
} else {
IRB.CreateCall(HwasanMemoryAccessCallbackSized[IsWrite],
- {AddrLong, ConstantInt::get(IntptrTy, TypeSize / 8)});
+ {IRB.CreatePointerCast(Addr, IntptrTy),
+ ConstantInt::get(IntptrTy, TypeSize / 8)});
}
untagPointerOperand(I, Addr);
@@ -644,27 +717,33 @@ static uint64_t getAllocaSizeInBytes(const AllocaInst &AI) {
}
bool HWAddressSanitizer::tagAlloca(IRBuilder<> &IRB, AllocaInst *AI,
- Value *Tag) {
- size_t Size = (getAllocaSizeInBytes(*AI) + Mapping.getAllocaAlignment() - 1) &
- ~(Mapping.getAllocaAlignment() - 1);
+ Value *Tag, size_t Size) {
+ size_t AlignedSize = alignTo(Size, Mapping.getAllocaAlignment());
Value *JustTag = IRB.CreateTrunc(Tag, IRB.getInt8Ty());
if (ClInstrumentWithCalls) {
IRB.CreateCall(HwasanTagMemoryFunc,
{IRB.CreatePointerCast(AI, Int8PtrTy), JustTag,
- ConstantInt::get(IntptrTy, Size)});
+ ConstantInt::get(IntptrTy, AlignedSize)});
} else {
size_t ShadowSize = Size >> Mapping.Scale;
- Value *ShadowPtr = IRB.CreateIntToPtr(
- memToShadow(IRB.CreatePointerCast(AI, IntptrTy), AI->getType(), IRB),
- Int8PtrTy);
+ Value *ShadowPtr = memToShadow(IRB.CreatePointerCast(AI, IntptrTy), IRB);
// If this memset is not inlined, it will be intercepted in the hwasan
// runtime library. That's OK, because the interceptor skips the checks if
// the address is in the shadow region.
// FIXME: the interceptor is not as fast as real memset. Consider lowering
// llvm.memset right here into either a sequence of stores, or a call to
// hwasan_tag_memory.
- IRB.CreateMemSet(ShadowPtr, JustTag, ShadowSize, /*Align=*/1);
+ if (ShadowSize)
+ IRB.CreateMemSet(ShadowPtr, JustTag, ShadowSize, /*Align=*/1);
+ if (Size != AlignedSize) {
+ IRB.CreateStore(
+ ConstantInt::get(Int8Ty, Size % Mapping.getAllocaAlignment()),
+ IRB.CreateConstGEP1_32(Int8Ty, ShadowPtr, ShadowSize));
+ IRB.CreateStore(JustTag, IRB.CreateConstGEP1_32(
+ Int8Ty, IRB.CreateBitCast(AI, Int8PtrTy),
+ AlignedSize - 1));
+ }
}
return true;
}
@@ -674,10 +753,16 @@ static unsigned RetagMask(unsigned AllocaNo) {
// x = x ^ (mask << 56) can be encoded as a single armv8 instruction for these
// masks.
// The list does not include the value 255, which is used for UAR.
- static unsigned FastMasks[] = {
- 0, 1, 2, 3, 4, 6, 7, 8, 12, 14, 15, 16, 24,
- 28, 30, 31, 32, 48, 56, 60, 62, 63, 64, 96, 112, 120,
- 124, 126, 127, 128, 192, 224, 240, 248, 252, 254};
+ //
+ // Because we are more likely to use earlier elements of this list than later
+ // ones, it is sorted in increasing order of probability of collision with a
+ // mask allocated (temporally) nearby. The program that generated this list
+ // can be found at:
+ // https://github.com/google/sanitizers/blob/master/hwaddress-sanitizer/sort_masks.py
+ static unsigned FastMasks[] = {0, 128, 64, 192, 32, 96, 224, 112, 240,
+ 48, 16, 120, 248, 56, 24, 8, 124, 252,
+ 60, 28, 12, 4, 126, 254, 62, 30, 14,
+ 6, 2, 127, 63, 31, 15, 7, 3, 1};
return FastMasks[AllocaNo % (sizeof(FastMasks) / sizeof(FastMasks[0]))];
}
@@ -688,6 +773,8 @@ Value *HWAddressSanitizer::getNextTagWithCall(IRBuilder<> &IRB) {
Value *HWAddressSanitizer::getStackBaseTag(IRBuilder<> &IRB) {
if (ClGenerateTagsWithCalls)
return getNextTagWithCall(IRB);
+ if (StackBaseTag)
+ return StackBaseTag;
// FIXME: use addressofreturnaddress (but implement it in aarch64 backend
// first).
Module *M = IRB.GetInsertBlock()->getParent()->getParent();
@@ -763,7 +850,8 @@ Value *HWAddressSanitizer::getHwasanThreadSlotPtr(IRBuilder<> &IRB, Type *Ty) {
Function *ThreadPointerFunc =
Intrinsic::getDeclaration(M, Intrinsic::thread_pointer);
Value *SlotPtr = IRB.CreatePointerCast(
- IRB.CreateConstGEP1_32(IRB.CreateCall(ThreadPointerFunc), 0x30),
+ IRB.CreateConstGEP1_32(IRB.getInt8Ty(),
+ IRB.CreateCall(ThreadPointerFunc), 0x30),
Ty->getPointerTo(0));
return SlotPtr;
}
@@ -774,45 +862,21 @@ Value *HWAddressSanitizer::getHwasanThreadSlotPtr(IRBuilder<> &IRB, Type *Ty) {
return nullptr;
}
-// Creates a string with a description of the stack frame (set of Allocas).
-// The string is intended to be human readable.
-// The current form is: Size1 Name1; Size2 Name2; ...
-std::string
-HWAddressSanitizer::createFrameString(ArrayRef<AllocaInst *> Allocas) {
- std::ostringstream Descr;
- for (auto AI : Allocas)
- Descr << getAllocaSizeInBytes(*AI) << " " << AI->getName().str() << "; ";
- return Descr.str();
-}
-
-// Creates a global in the frame section which consists of two pointers:
-// the function PC and the frame string constant.
-void HWAddressSanitizer::createFrameGlobal(Function &F,
- const std::string &FrameString) {
- Module &M = *F.getParent();
- auto DescrGV = createPrivateGlobalForString(M, FrameString, true);
- auto PtrPairTy = StructType::get(F.getType(), DescrGV->getType());
- auto GV = new GlobalVariable(
- M, PtrPairTy, /*isConstantGlobal*/ true, GlobalVariable::PrivateLinkage,
- ConstantStruct::get(PtrPairTy, (Constant *)&F, (Constant *)DescrGV),
- "__hwasan");
- GV->setSection(getFrameSection());
- appendToCompilerUsed(M, GV);
- // Put GV into the F's Comadat so that if F is deleted GV can be deleted too.
- if (auto Comdat =
- GetOrCreateFunctionComdat(F, TargetTriple, CurModuleUniqueId))
- GV->setComdat(Comdat);
-}
+void HWAddressSanitizer::emitPrologue(IRBuilder<> &IRB, bool WithFrameRecord) {
+ if (!Mapping.InTls) {
+ LocalDynamicShadow = getDynamicShadowNonTls(IRB);
+ return;
+ }
-Value *HWAddressSanitizer::emitPrologue(IRBuilder<> &IRB,
- bool WithFrameRecord) {
- if (!Mapping.InTls)
- return getDynamicShadowNonTls(IRB);
+ if (!WithFrameRecord && TargetTriple.isAndroid()) {
+ LocalDynamicShadow = getDynamicShadowIfunc(IRB);
+ return;
+ }
Value *SlotPtr = getHwasanThreadSlotPtr(IRB, IntptrTy);
assert(SlotPtr);
- Instruction *ThreadLong = IRB.CreateLoad(SlotPtr);
+ Instruction *ThreadLong = IRB.CreateLoad(IntptrTy, SlotPtr);
Function *F = IRB.GetInsertBlock()->getParent();
if (F->getFnAttribute("hwasan-abi").getValueAsString() == "interceptor") {
@@ -826,7 +890,7 @@ Value *HWAddressSanitizer::emitPrologue(IRBuilder<> &IRB,
// FIXME: This should call a new runtime function with a custom calling
// convention to avoid needing to spill all arguments here.
IRB.CreateCall(HwasanThreadEnterFunc);
- LoadInst *ReloadThreadLong = IRB.CreateLoad(SlotPtr);
+ LoadInst *ReloadThreadLong = IRB.CreateLoad(IntptrTy, SlotPtr);
IRB.SetInsertPoint(&*Br->getSuccessor(0)->begin());
PHINode *ThreadLongPhi = IRB.CreatePHI(IntptrTy, 2);
@@ -840,15 +904,21 @@ Value *HWAddressSanitizer::emitPrologue(IRBuilder<> &IRB,
TargetTriple.isAArch64() ? ThreadLong : untagPointer(IRB, ThreadLong);
if (WithFrameRecord) {
+ StackBaseTag = IRB.CreateAShr(ThreadLong, 3);
+
// Prepare ring buffer data.
- auto PC = IRB.CreatePtrToInt(F, IntptrTy);
+ Value *PC;
+ if (TargetTriple.getArch() == Triple::aarch64)
+ PC = readRegister(IRB, "pc");
+ else
+ PC = IRB.CreatePtrToInt(F, IntptrTy);
auto GetStackPointerFn =
Intrinsic::getDeclaration(F->getParent(), Intrinsic::frameaddress);
Value *SP = IRB.CreatePtrToInt(
IRB.CreateCall(GetStackPointerFn,
{Constant::getNullValue(IRB.getInt32Ty())}),
IntptrTy);
- // Mix SP and PC. TODO: also add the tag to the mix.
+ // Mix SP and PC.
// Assumptions:
// PC is 0x0000PPPPPPPPPPPP (48 bits are meaningful, others are zero)
// SP is 0xsssssssssssSSSS0 (4 lower bits are zero)
@@ -879,16 +949,38 @@ Value *HWAddressSanitizer::emitPrologue(IRBuilder<> &IRB,
// Get shadow base address by aligning RecordPtr up.
// Note: this is not correct if the pointer is already aligned.
// Runtime library will make sure this never happens.
- Value *ShadowBase = IRB.CreateAdd(
+ LocalDynamicShadow = IRB.CreateAdd(
IRB.CreateOr(
ThreadLongMaybeUntagged,
ConstantInt::get(IntptrTy, (1ULL << kShadowBaseAlignment) - 1)),
ConstantInt::get(IntptrTy, 1), "hwasan.shadow");
- return ShadowBase;
+ LocalDynamicShadow = IRB.CreateIntToPtr(LocalDynamicShadow, Int8PtrTy);
+}
+
+Value *HWAddressSanitizer::readRegister(IRBuilder<> &IRB, StringRef Name) {
+ Module *M = IRB.GetInsertBlock()->getParent()->getParent();
+ Function *ReadRegister =
+ Intrinsic::getDeclaration(M, Intrinsic::read_register, IntptrTy);
+ MDNode *MD = MDNode::get(*C, {MDString::get(*C, Name)});
+ Value *Args[] = {MetadataAsValue::get(*C, MD)};
+ return IRB.CreateCall(ReadRegister, Args);
+}
+
+bool HWAddressSanitizer::instrumentLandingPads(
+ SmallVectorImpl<Instruction *> &LandingPadVec) {
+ for (auto *LP : LandingPadVec) {
+ IRBuilder<> IRB(LP->getNextNode());
+ IRB.CreateCall(
+ HWAsanHandleVfork,
+ {readRegister(IRB, (TargetTriple.getArch() == Triple::x86_64) ? "rsp"
+ : "sp")});
+ }
+ return true;
}
bool HWAddressSanitizer::instrumentStack(
SmallVectorImpl<AllocaInst *> &Allocas,
+ DenseMap<AllocaInst *, std::vector<DbgDeclareInst *>> &AllocaDeclareMap,
SmallVectorImpl<Instruction *> &RetVec, Value *StackTag) {
// Ideally, we want to calculate tagged stack base pointer, and rewrite all
// alloca addresses using that. Unfortunately, offsets are not known yet
@@ -913,14 +1005,22 @@ bool HWAddressSanitizer::instrumentStack(
U.set(Replacement);
}
- tagAlloca(IRB, AI, Tag);
+ for (auto *DDI : AllocaDeclareMap.lookup(AI)) {
+ DIExpression *OldExpr = DDI->getExpression();
+ DIExpression *NewExpr = DIExpression::append(
+ OldExpr, {dwarf::DW_OP_LLVM_tag_offset, RetagMask(N)});
+ DDI->setArgOperand(2, MetadataAsValue::get(*C, NewExpr));
+ }
+
+ size_t Size = getAllocaSizeInBytes(*AI);
+ tagAlloca(IRB, AI, Tag, Size);
for (auto RI : RetVec) {
IRB.SetInsertPoint(RI);
// Re-tag alloca memory with the special UAR tag.
Value *Tag = getUARTag(IRB, StackTag);
- tagAlloca(IRB, AI, Tag);
+ tagAlloca(IRB, AI, Tag, alignTo(Size, Mapping.getAllocaAlignment()));
}
}
@@ -943,7 +1043,7 @@ bool HWAddressSanitizer::isInterestingAlloca(const AllocaInst &AI) {
!AI.isSwiftError());
}
-bool HWAddressSanitizer::runOnFunction(Function &F) {
+bool HWAddressSanitizer::sanitizeFunction(Function &F) {
if (&F == HwasanCtorFunction)
return false;
@@ -955,15 +1055,12 @@ bool HWAddressSanitizer::runOnFunction(Function &F) {
SmallVector<Instruction*, 16> ToInstrument;
SmallVector<AllocaInst*, 8> AllocasToInstrument;
SmallVector<Instruction*, 8> RetVec;
+ SmallVector<Instruction*, 8> LandingPadVec;
+ DenseMap<AllocaInst *, std::vector<DbgDeclareInst *>> AllocaDeclareMap;
for (auto &BB : F) {
for (auto &Inst : BB) {
if (ClInstrumentStack)
if (AllocaInst *AI = dyn_cast<AllocaInst>(&Inst)) {
- // Realign all allocas. We don't want small uninteresting allocas to
- // hide in instrumented alloca's padding.
- if (AI->getAlignment() < Mapping.getAllocaAlignment())
- AI->setAlignment(Mapping.getAllocaAlignment());
- // Instrument some of them.
if (isInterestingAlloca(*AI))
AllocasToInstrument.push_back(AI);
continue;
@@ -973,6 +1070,13 @@ bool HWAddressSanitizer::runOnFunction(Function &F) {
isa<CleanupReturnInst>(Inst))
RetVec.push_back(&Inst);
+ if (auto *DDI = dyn_cast<DbgDeclareInst>(&Inst))
+ if (auto *Alloca = dyn_cast_or_null<AllocaInst>(DDI->getAddress()))
+ AllocaDeclareMap[Alloca].push_back(DDI);
+
+ if (ClInstrumentLandingPads && isa<LandingPadInst>(Inst))
+ LandingPadVec.push_back(&Inst);
+
Value *MaybeMask = nullptr;
bool IsWrite;
unsigned Alignment;
@@ -984,33 +1088,93 @@ bool HWAddressSanitizer::runOnFunction(Function &F) {
}
}
- if (AllocasToInstrument.empty() && ToInstrument.empty())
- return false;
+ initializeCallbacks(*F.getParent());
- if (ClCreateFrameDescriptions && !AllocasToInstrument.empty())
- createFrameGlobal(F, createFrameString(AllocasToInstrument));
+ if (!LandingPadVec.empty())
+ instrumentLandingPads(LandingPadVec);
- initializeCallbacks(*F.getParent());
+ if (AllocasToInstrument.empty() && ToInstrument.empty())
+ return false;
assert(!LocalDynamicShadow);
Instruction *InsertPt = &*F.getEntryBlock().begin();
IRBuilder<> EntryIRB(InsertPt);
- LocalDynamicShadow = emitPrologue(EntryIRB,
- /*WithFrameRecord*/ ClRecordStackHistory &&
- !AllocasToInstrument.empty());
+ emitPrologue(EntryIRB,
+ /*WithFrameRecord*/ ClRecordStackHistory &&
+ !AllocasToInstrument.empty());
bool Changed = false;
if (!AllocasToInstrument.empty()) {
Value *StackTag =
ClGenerateTagsWithCalls ? nullptr : getStackBaseTag(EntryIRB);
- Changed |= instrumentStack(AllocasToInstrument, RetVec, StackTag);
+ Changed |= instrumentStack(AllocasToInstrument, AllocaDeclareMap, RetVec,
+ StackTag);
+ }
+
+ // Pad and align each of the allocas that we instrumented to stop small
+ // uninteresting allocas from hiding in instrumented alloca's padding and so
+ // that we have enough space to store real tags for short granules.
+ DenseMap<AllocaInst *, AllocaInst *> AllocaToPaddedAllocaMap;
+ for (AllocaInst *AI : AllocasToInstrument) {
+ uint64_t Size = getAllocaSizeInBytes(*AI);
+ uint64_t AlignedSize = alignTo(Size, Mapping.getAllocaAlignment());
+ AI->setAlignment(std::max(AI->getAlignment(), 16u));
+ if (Size != AlignedSize) {
+ Type *AllocatedType = AI->getAllocatedType();
+ if (AI->isArrayAllocation()) {
+ uint64_t ArraySize =
+ cast<ConstantInt>(AI->getArraySize())->getZExtValue();
+ AllocatedType = ArrayType::get(AllocatedType, ArraySize);
+ }
+ Type *TypeWithPadding = StructType::get(
+ AllocatedType, ArrayType::get(Int8Ty, AlignedSize - Size));
+ auto *NewAI = new AllocaInst(
+ TypeWithPadding, AI->getType()->getAddressSpace(), nullptr, "", AI);
+ NewAI->takeName(AI);
+ NewAI->setAlignment(AI->getAlignment());
+ NewAI->setUsedWithInAlloca(AI->isUsedWithInAlloca());
+ NewAI->setSwiftError(AI->isSwiftError());
+ NewAI->copyMetadata(*AI);
+ auto *Bitcast = new BitCastInst(NewAI, AI->getType(), "", AI);
+ AI->replaceAllUsesWith(Bitcast);
+ AllocaToPaddedAllocaMap[AI] = NewAI;
+ }
+ }
+
+ if (!AllocaToPaddedAllocaMap.empty()) {
+ for (auto &BB : F)
+ for (auto &Inst : BB)
+ if (auto *DVI = dyn_cast<DbgVariableIntrinsic>(&Inst))
+ if (auto *AI =
+ dyn_cast_or_null<AllocaInst>(DVI->getVariableLocation()))
+ if (auto *NewAI = AllocaToPaddedAllocaMap.lookup(AI))
+ DVI->setArgOperand(
+ 0, MetadataAsValue::get(*C, LocalAsMetadata::get(NewAI)));
+ for (auto &P : AllocaToPaddedAllocaMap)
+ P.first->eraseFromParent();
+ }
+
+ // If we split the entry block, move any allocas that were originally in the
+ // entry block back into the entry block so that they aren't treated as
+ // dynamic allocas.
+ if (EntryIRB.GetInsertBlock() != &F.getEntryBlock()) {
+ InsertPt = &*F.getEntryBlock().begin();
+ for (auto II = EntryIRB.GetInsertBlock()->begin(),
+ IE = EntryIRB.GetInsertBlock()->end();
+ II != IE;) {
+ Instruction *I = &*II++;
+ if (auto *AI = dyn_cast<AllocaInst>(I))
+ if (isa<ConstantInt>(AI->getArraySize()))
+ I->moveBefore(InsertPt);
+ }
}
for (auto Inst : ToInstrument)
Changed |= instrumentMemAccess(Inst);
LocalDynamicShadow = nullptr;
+ StackBaseTag = nullptr;
return Changed;
}