summaryrefslogtreecommitdiff
path: root/lib/sanitizer_common/sanitizer_procmaps_mac.cc
diff options
context:
space:
mode:
Diffstat (limited to 'lib/sanitizer_common/sanitizer_procmaps_mac.cc')
-rw-r--r--lib/sanitizer_common/sanitizer_procmaps_mac.cc163
1 files changed, 115 insertions, 48 deletions
diff --git a/lib/sanitizer_common/sanitizer_procmaps_mac.cc b/lib/sanitizer_common/sanitizer_procmaps_mac.cc
index 560451a16d90..0167ab18ba13 100644
--- a/lib/sanitizer_common/sanitizer_procmaps_mac.cc
+++ b/lib/sanitizer_common/sanitizer_procmaps_mac.cc
@@ -36,6 +36,51 @@
namespace __sanitizer {
+// Contains information used to iterate through sections.
+struct MemoryMappedSegmentData {
+ char name[kMaxSegName];
+ uptr nsects;
+ const char *current_load_cmd_addr;
+ u32 lc_type;
+ uptr base_virt_addr;
+ uptr addr_mask;
+};
+
+template <typename Section>
+static void NextSectionLoad(LoadedModule *module, MemoryMappedSegmentData *data,
+ bool isWritable) {
+ const Section *sc = (const Section *)data->current_load_cmd_addr;
+ data->current_load_cmd_addr += sizeof(Section);
+
+ uptr sec_start = (sc->addr & data->addr_mask) + data->base_virt_addr;
+ uptr sec_end = sec_start + sc->size;
+ module->addAddressRange(sec_start, sec_end, /*executable=*/false, isWritable,
+ sc->sectname);
+}
+
+void MemoryMappedSegment::AddAddressRanges(LoadedModule *module) {
+ // Don't iterate over sections when the caller hasn't set up the
+ // data pointer, when there are no sections, or when the segment
+ // is executable. Avoid iterating over executable sections because
+ // it will confuse libignore, and because the extra granularity
+ // of information is not needed by any sanitizers.
+ if (!data_ || !data_->nsects || IsExecutable()) {
+ module->addAddressRange(start, end, IsExecutable(), IsWritable(),
+ data_ ? data_->name : nullptr);
+ return;
+ }
+
+ do {
+ if (data_->lc_type == LC_SEGMENT) {
+ NextSectionLoad<struct section>(module, data_, IsWritable());
+#ifdef MH_MAGIC_64
+ } else if (data_->lc_type == LC_SEGMENT_64) {
+ NextSectionLoad<struct section_64>(module, data_, IsWritable());
+#endif
+ }
+ } while (--data_->nsects);
+}
+
MemoryMappingLayout::MemoryMappingLayout(bool cache_enabled) {
Reset();
}
@@ -63,13 +108,13 @@ void MemoryMappingLayout::Reset() {
// _dyld_image_count is thread-unsafe. We need to register callbacks for
// adding and removing images which will invalidate the MemoryMappingLayout
// state.
- current_image_ = _dyld_image_count();
- current_load_cmd_count_ = -1;
- current_load_cmd_addr_ = 0;
- current_magic_ = 0;
- current_filetype_ = 0;
- current_arch_ = kModuleArchUnknown;
- internal_memset(current_uuid_, 0, kModuleUUIDSize);
+ data_.current_image = _dyld_image_count();
+ data_.current_load_cmd_count = -1;
+ data_.current_load_cmd_addr = 0;
+ data_.current_magic = 0;
+ data_.current_filetype = 0;
+ data_.current_arch = kModuleArchUnknown;
+ internal_memset(data_.current_uuid, 0, kModuleUUIDSize);
}
// The dyld load address should be unchanged throughout process execution,
@@ -138,39 +183,57 @@ const mach_header *get_dyld_hdr() {
// segment.
// Note that the segment addresses are not necessarily sorted.
template <u32 kLCSegment, typename SegmentCommand>
-bool MemoryMappingLayout::NextSegmentLoad(MemoryMappedSegment *segment) {
- const char *lc = current_load_cmd_addr_;
- current_load_cmd_addr_ += ((const load_command *)lc)->cmdsize;
+static bool NextSegmentLoad(MemoryMappedSegment *segment,
+MemoryMappedSegmentData *seg_data, MemoryMappingLayoutData &layout_data) {
+ const char *lc = layout_data.current_load_cmd_addr;
+ layout_data.current_load_cmd_addr += ((const load_command *)lc)->cmdsize;
if (((const load_command *)lc)->cmd == kLCSegment) {
const SegmentCommand* sc = (const SegmentCommand *)lc;
-
- if (current_image_ == kDyldImageIdx) {
+ uptr base_virt_addr, addr_mask;
+ if (layout_data.current_image == kDyldImageIdx) {
+ base_virt_addr = (uptr)get_dyld_hdr();
// vmaddr is masked with 0xfffff because on macOS versions < 10.12,
// it contains an absolute address rather than an offset for dyld.
// To make matters even more complicated, this absolute address
// isn't actually the absolute segment address, but the offset portion
// of the address is accurate when combined with the dyld base address,
// and the mask will give just this offset.
- segment->start = (sc->vmaddr & 0xfffff) + (uptr)get_dyld_hdr();
- segment->end = (sc->vmaddr & 0xfffff) + sc->vmsize + (uptr)get_dyld_hdr();
+ addr_mask = 0xfffff;
} else {
- const sptr dlloff = _dyld_get_image_vmaddr_slide(current_image_);
- segment->start = sc->vmaddr + dlloff;
- segment->end = sc->vmaddr + sc->vmsize + dlloff;
+ base_virt_addr =
+ (uptr)_dyld_get_image_vmaddr_slide(layout_data.current_image);
+ addr_mask = ~0;
+ }
+
+ segment->start = (sc->vmaddr & addr_mask) + base_virt_addr;
+ segment->end = segment->start + sc->vmsize;
+ // Most callers don't need section information, so only fill this struct
+ // when required.
+ if (seg_data) {
+ seg_data->nsects = sc->nsects;
+ seg_data->current_load_cmd_addr =
+ (const char *)lc + sizeof(SegmentCommand);
+ seg_data->lc_type = kLCSegment;
+ seg_data->base_virt_addr = base_virt_addr;
+ seg_data->addr_mask = addr_mask;
+ internal_strncpy(seg_data->name, sc->segname,
+ ARRAY_SIZE(seg_data->name));
}
// Return the initial protection.
segment->protection = sc->initprot;
- segment->offset =
- (current_filetype_ == /*MH_EXECUTE*/ 0x2) ? sc->vmaddr : sc->fileoff;
+ segment->offset = (layout_data.current_filetype ==
+ /*MH_EXECUTE*/ 0x2)
+ ? sc->vmaddr
+ : sc->fileoff;
if (segment->filename) {
- const char *src = (current_image_ == kDyldImageIdx)
+ const char *src = (layout_data.current_image == kDyldImageIdx)
? kDyldPath
- : _dyld_get_image_name(current_image_);
+ : _dyld_get_image_name(layout_data.current_image);
internal_strncpy(segment->filename, src, segment->filename_size);
}
- segment->arch = current_arch_;
- internal_memcpy(segment->uuid, current_uuid_, kModuleUUIDSize);
+ segment->arch = layout_data.current_arch;
+ internal_memcpy(segment->uuid, layout_data.current_uuid, kModuleUUIDSize);
return true;
}
return false;
@@ -202,7 +265,7 @@ ModuleArch ModuleArchFromCpuType(cpu_type_t cputype, cpu_subtype_t cpusubtype) {
}
static const load_command *NextCommand(const load_command *lc) {
- return (const load_command *)((char *)lc + lc->cmdsize);
+ return (const load_command *)((const char *)lc + lc->cmdsize);
}
static void FindUUID(const load_command *first_lc, u8 *uuid_output) {
@@ -232,50 +295,53 @@ static bool IsModuleInstrumented(const load_command *first_lc) {
}
bool MemoryMappingLayout::Next(MemoryMappedSegment *segment) {
- for (; current_image_ >= kDyldImageIdx; current_image_--) {
- const mach_header *hdr = (current_image_ == kDyldImageIdx)
+ for (; data_.current_image >= kDyldImageIdx; data_.current_image--) {
+ const mach_header *hdr = (data_.current_image == kDyldImageIdx)
? get_dyld_hdr()
- : _dyld_get_image_header(current_image_);
+ : _dyld_get_image_header(data_.current_image);
if (!hdr) continue;
- if (current_load_cmd_count_ < 0) {
+ if (data_.current_load_cmd_count < 0) {
// Set up for this image;
- current_load_cmd_count_ = hdr->ncmds;
- current_magic_ = hdr->magic;
- current_filetype_ = hdr->filetype;
- current_arch_ = ModuleArchFromCpuType(hdr->cputype, hdr->cpusubtype);
- switch (current_magic_) {
+ data_.current_load_cmd_count = hdr->ncmds;
+ data_.current_magic = hdr->magic;
+ data_.current_filetype = hdr->filetype;
+ data_.current_arch = ModuleArchFromCpuType(hdr->cputype, hdr->cpusubtype);
+ switch (data_.current_magic) {
#ifdef MH_MAGIC_64
case MH_MAGIC_64: {
- current_load_cmd_addr_ = (char*)hdr + sizeof(mach_header_64);
+ data_.current_load_cmd_addr =
+ (const char *)hdr + sizeof(mach_header_64);
break;
}
#endif
case MH_MAGIC: {
- current_load_cmd_addr_ = (char*)hdr + sizeof(mach_header);
+ data_.current_load_cmd_addr = (const char *)hdr + sizeof(mach_header);
break;
}
default: {
continue;
}
}
- FindUUID((const load_command *)current_load_cmd_addr_, &current_uuid_[0]);
- current_instrumented_ =
- IsModuleInstrumented((const load_command *)current_load_cmd_addr_);
+ FindUUID((const load_command *)data_.current_load_cmd_addr,
+ data_.current_uuid);
+ data_.current_instrumented = IsModuleInstrumented(
+ (const load_command *)data_.current_load_cmd_addr);
}
- for (; current_load_cmd_count_ >= 0; current_load_cmd_count_--) {
- switch (current_magic_) {
- // current_magic_ may be only one of MH_MAGIC, MH_MAGIC_64.
+ for (; data_.current_load_cmd_count >= 0; data_.current_load_cmd_count--) {
+ switch (data_.current_magic) {
+ // data_.current_magic may be only one of MH_MAGIC, MH_MAGIC_64.
#ifdef MH_MAGIC_64
case MH_MAGIC_64: {
if (NextSegmentLoad<LC_SEGMENT_64, struct segment_command_64>(
- segment))
+ segment, segment->data_, data_))
return true;
break;
}
#endif
case MH_MAGIC: {
- if (NextSegmentLoad<LC_SEGMENT, struct segment_command>(segment))
+ if (NextSegmentLoad<LC_SEGMENT, struct segment_command>(
+ segment, segment->data_, data_))
return true;
break;
}
@@ -288,11 +354,13 @@ bool MemoryMappingLayout::Next(MemoryMappedSegment *segment) {
}
void MemoryMappingLayout::DumpListOfModules(
- InternalMmapVector<LoadedModule> *modules) {
+ InternalMmapVectorNoCtor<LoadedModule> *modules) {
Reset();
InternalScopedString module_name(kMaxPathLength);
MemoryMappedSegment segment(module_name.data(), kMaxPathLength);
- for (uptr i = 0; Next(&segment); i++) {
+ MemoryMappedSegmentData data;
+ segment.data_ = &data;
+ while (Next(&segment)) {
if (segment.filename[0] == '\0') continue;
LoadedModule *cur_module = nullptr;
if (!modules->empty() &&
@@ -302,10 +370,9 @@ void MemoryMappingLayout::DumpListOfModules(
modules->push_back(LoadedModule());
cur_module = &modules->back();
cur_module->set(segment.filename, segment.start, segment.arch,
- segment.uuid, current_instrumented_);
+ segment.uuid, data_.current_instrumented);
}
- cur_module->addAddressRange(segment.start, segment.end,
- segment.IsExecutable(), segment.IsWritable());
+ segment.AddAddressRanges(cur_module);
}
}