summaryrefslogtreecommitdiff
path: root/lib/asan/asan_poisoning.cc
diff options
context:
space:
mode:
Diffstat (limited to 'lib/asan/asan_poisoning.cc')
-rw-r--r--lib/asan/asan_poisoning.cc47
1 files changed, 34 insertions, 13 deletions
diff --git a/lib/asan/asan_poisoning.cc b/lib/asan/asan_poisoning.cc
index 50877ae591151..abb75ab3bf929 100644
--- a/lib/asan/asan_poisoning.cc
+++ b/lib/asan/asan_poisoning.cc
@@ -64,12 +64,9 @@ struct ShadowSegmentEndpoint {
};
void FlushUnneededASanShadowMemory(uptr p, uptr size) {
- // Since asan's mapping is compacting, the shadow chunk may be
- // not page-aligned, so we only flush the page-aligned portion.
- uptr page_size = GetPageSizeCached();
- uptr shadow_beg = RoundUpTo(MemToShadow(p), page_size);
- uptr shadow_end = RoundDownTo(MemToShadow(p + size), page_size);
- FlushUnneededShadowMemory(shadow_beg, shadow_end - shadow_beg);
+ // Since asan's mapping is compacting, the shadow chunk may be
+ // not page-aligned, so we only flush the page-aligned portion.
+ ReleaseMemoryPagesToOS(MemToShadow(p), MemToShadow(p + size));
}
void AsanPoisonOrUnpoisonIntraObjectRedzone(uptr ptr, uptr size, bool poison) {
@@ -117,9 +114,9 @@ void __asan_poison_memory_region(void const volatile *addr, uptr size) {
ShadowSegmentEndpoint beg(beg_addr);
ShadowSegmentEndpoint end(end_addr);
if (beg.chunk == end.chunk) {
- CHECK(beg.offset < end.offset);
+ CHECK_LT(beg.offset, end.offset);
s8 value = beg.value;
- CHECK(value == end.value);
+ CHECK_EQ(value, end.value);
// We can only poison memory if the byte in end.offset is unaddressable.
// No need to re-poison memory if it is poisoned already.
if (value > 0 && value <= end.offset) {
@@ -131,7 +128,7 @@ void __asan_poison_memory_region(void const volatile *addr, uptr size) {
}
return;
}
- CHECK(beg.chunk < end.chunk);
+ CHECK_LT(beg.chunk, end.chunk);
if (beg.offset > 0) {
// Mark bytes from beg.offset as unaddressable.
if (beg.value == 0) {
@@ -157,9 +154,9 @@ void __asan_unpoison_memory_region(void const volatile *addr, uptr size) {
ShadowSegmentEndpoint beg(beg_addr);
ShadowSegmentEndpoint end(end_addr);
if (beg.chunk == end.chunk) {
- CHECK(beg.offset < end.offset);
+ CHECK_LT(beg.offset, end.offset);
s8 value = beg.value;
- CHECK(value == end.value);
+ CHECK_EQ(value, end.value);
// We unpoison memory bytes up to enbytes up to end.offset if it is not
// unpoisoned already.
if (value != 0) {
@@ -167,7 +164,7 @@ void __asan_unpoison_memory_region(void const volatile *addr, uptr size) {
}
return;
}
- CHECK(beg.chunk < end.chunk);
+ CHECK_LT(beg.chunk, end.chunk);
if (beg.offset > 0) {
*beg.chunk = 0;
beg.chunk++;
@@ -314,6 +311,30 @@ static void PoisonAlignedStackMemory(uptr addr, uptr size, bool do_poison) {
}
}
+void __asan_set_shadow_00(uptr addr, uptr size) {
+ REAL(memset)((void *)addr, 0, size);
+}
+
+void __asan_set_shadow_f1(uptr addr, uptr size) {
+ REAL(memset)((void *)addr, 0xf1, size);
+}
+
+void __asan_set_shadow_f2(uptr addr, uptr size) {
+ REAL(memset)((void *)addr, 0xf2, size);
+}
+
+void __asan_set_shadow_f3(uptr addr, uptr size) {
+ REAL(memset)((void *)addr, 0xf3, size);
+}
+
+void __asan_set_shadow_f5(uptr addr, uptr size) {
+ REAL(memset)((void *)addr, 0xf5, size);
+}
+
+void __asan_set_shadow_f8(uptr addr, uptr size) {
+ REAL(memset)((void *)addr, 0xf8, size);
+}
+
void __asan_poison_stack_memory(uptr addr, uptr size) {
VReport(1, "poisoning: %p %zx\n", (void *)addr, size);
PoisonAlignedStackMemory(addr, size, true);
@@ -388,7 +409,7 @@ const void *__sanitizer_contiguous_container_find_bad_address(
// ending with end.
uptr kMaxRangeToCheck = 32;
uptr r1_beg = beg;
- uptr r1_end = Min(end + kMaxRangeToCheck, mid);
+ uptr r1_end = Min(beg + kMaxRangeToCheck, mid);
uptr r2_beg = Max(beg, mid - kMaxRangeToCheck);
uptr r2_end = Min(end, mid + kMaxRangeToCheck);
uptr r3_beg = Max(end - kMaxRangeToCheck, mid);