diff options
Diffstat (limited to 'sys/vm/vm_map.c')
-rw-r--r-- | sys/vm/vm_map.c | 413 |
1 files changed, 241 insertions, 172 deletions
diff --git a/sys/vm/vm_map.c b/sys/vm/vm_map.c index b66b770110fd9..2ec6926b5baf8 100644 --- a/sys/vm/vm_map.c +++ b/sys/vm/vm_map.c @@ -61,7 +61,7 @@ * any improvements or extensions that they make and grant Carnegie the * rights to redistribute these changes. * - * $Id: vm_map.c,v 1.57.2.2 1997/01/26 03:14:59 dyson Exp $ + * $Id: vm_map.c,v 1.57.2.3 1997/01/31 04:17:20 dyson Exp $ */ /* @@ -153,6 +153,7 @@ vm_size_t kentry_data_size; static vm_map_entry_t kentry_free; static vm_map_t kmap_free; extern char kstack[]; +extern int inmprotect; static int kentry_count; static vm_offset_t mapvm_start, mapvm, mapvmmax; @@ -170,7 +171,6 @@ static void vm_map_entry_dispose __P((vm_map_t, vm_map_entry_t)); static void vm_map_entry_unwire __P((vm_map_t, vm_map_entry_t)); static void vm_map_copy_entry __P((vm_map_t, vm_map_t, vm_map_entry_t, vm_map_entry_t)); -static void vm_map_simplify_entry __P((vm_map_t, vm_map_entry_t)); void vm_map_startup() @@ -606,6 +606,7 @@ vm_map_insert(map, object, offset, start, end, prot, max, cow) register vm_map_entry_t prev_entry; vm_map_entry_t temp_entry; vm_object_t prev_object; + u_char protoeflags; if ((object != NULL) && (cow & MAP_NOFAULT)) { panic("vm_map_insert: paradoxical MAP_NOFAULT request"); @@ -637,48 +638,61 @@ vm_map_insert(map, object, offset, start, end, prot, max, cow) (prev_entry->next->start < end)) return (KERN_NO_SPACE); - if ((prev_entry != &map->header) && - (prev_entry->end == start) && - (object == NULL) && - (prev_entry->is_a_map == FALSE) && - (prev_entry->is_sub_map == FALSE) && - (prev_entry->inheritance == VM_INHERIT_DEFAULT) && - (prev_entry->protection == prot) && - (prev_entry->max_protection == max) && - (prev_entry->wired_count == 0)) { + protoeflags = 0; + if (cow & MAP_COPY_NEEDED) + protoeflags |= MAP_ENTRY_NEEDS_COPY; + + if (cow & MAP_COPY_ON_WRITE) + protoeflags |= MAP_ENTRY_COW; + + if (cow & MAP_NOFAULT) + protoeflags |= MAP_ENTRY_NOFAULT; - /* * See if we can avoid creating a new entry by extending one of our - * neighbors. + * neighbors. Or at least extend the object. */ - u_char needs_copy = (cow & MAP_COPY_NEEDED) != 0; - u_char copy_on_write = (cow & MAP_COPY_ON_WRITE) != 0; - u_char nofault = (cow & MAP_NOFAULT) != 0; - - if ((needs_copy == prev_entry->needs_copy) && - (copy_on_write == prev_entry->copy_on_write) && - (nofault == prev_entry->nofault) && - (nofault || vm_object_coalesce(prev_entry->object.vm_object, - OFF_TO_IDX(prev_entry->offset), - (vm_size_t) (prev_entry->end - - prev_entry->start), - (vm_size_t) (end - prev_entry->end)))) { - /* - * Coalesced the two objects - can extend the - * previous map entry to include the new - * range. - */ + if ((object == NULL) && + (prev_entry != &map->header) && + (( prev_entry->eflags & (MAP_ENTRY_IS_A_MAP | MAP_ENTRY_IS_SUB_MAP)) == 0) && + (prev_entry->end == start) && + (prev_entry->wired_count == 0)) { + + + if ((protoeflags == prev_entry->eflags) && + ((cow & MAP_NOFAULT) || + vm_object_coalesce(prev_entry->object.vm_object, + OFF_TO_IDX(prev_entry->offset), + (vm_size_t) (prev_entry->end - prev_entry->start), + (vm_size_t) (end - prev_entry->end)))) { + + /* + * Coalesced the two objects. Can we extend the + * previous map entry to include the new range? + */ + if ((prev_entry->inheritance == VM_INHERIT_DEFAULT) && + (prev_entry->protection == prot) && + (prev_entry->max_protection == max)) { + map->size += (end - prev_entry->end); prev_entry->end = end; - if (!nofault) { + if ((cow & MAP_NOFAULT) == 0) { prev_object = prev_entry->object.vm_object; default_pager_convert_to_swapq(prev_object); } return (KERN_SUCCESS); } + else { + object = prev_entry->object.vm_object; + offset = prev_entry->offset + (prev_entry->end - + prev_entry->start); + + vm_object_reference(object); + } + } } + /* * Create a new entry */ @@ -687,26 +701,10 @@ vm_map_insert(map, object, offset, start, end, prot, max, cow) new_entry->start = start; new_entry->end = end; - new_entry->is_a_map = FALSE; - new_entry->is_sub_map = FALSE; + new_entry->eflags = protoeflags; new_entry->object.vm_object = object; new_entry->offset = offset; - if (cow & MAP_COPY_NEEDED) - new_entry->needs_copy = TRUE; - else - new_entry->needs_copy = FALSE; - - if (cow & MAP_COPY_ON_WRITE) - new_entry->copy_on_write = TRUE; - else - new_entry->copy_on_write = FALSE; - - if (cow & MAP_NOFAULT) - new_entry->nofault = TRUE; - else - new_entry->nofault = FALSE; - if (map->is_main_map) { new_entry->inheritance = VM_INHERIT_DEFAULT; new_entry->protection = prot; @@ -838,28 +836,19 @@ vm_map_find(map, object, offset, addr, length, find_space, prot, max, cow) } /* - * vm_map_simplify_entry: [ internal use only ] + * vm_map_simplify_entry: * - * Simplify the given map entry by: - * removing extra sharing maps - * [XXX maybe later] merging with a neighbor + * Simplify the given map entry by merging with either neighbor. */ -static void +void vm_map_simplify_entry(map, entry) vm_map_t map; vm_map_entry_t entry; { vm_map_entry_t next, prev; - vm_size_t nextsize, prevsize, esize; - - /* - * If this entry corresponds to a sharing map, then see if we can - * remove the level of indirection. If it's not a sharing map, then it - * points to a VM object, so see if we can merge with either of our - * neighbors. - */ + vm_size_t prevsize, esize; - if (entry->is_sub_map || entry->is_a_map || entry->wired_count) + if (entry->eflags & (MAP_ENTRY_IS_SUB_MAP|MAP_ENTRY_IS_A_MAP)) return; prev = entry->prev; @@ -870,14 +859,11 @@ vm_map_simplify_entry(map, entry) (!prev->object.vm_object || (prev->object.vm_object->behavior == entry->object.vm_object->behavior)) && (!prev->object.vm_object || (prev->offset + prevsize == entry->offset)) && - (prev->needs_copy == entry->needs_copy) && - (prev->copy_on_write == entry->copy_on_write) && + (prev->eflags == entry->eflags) && (prev->protection == entry->protection) && (prev->max_protection == entry->max_protection) && (prev->inheritance == entry->inheritance) && - (prev->is_a_map == FALSE) && - (prev->is_sub_map == FALSE) && - (prev->wired_count == 0)) { + (prev->wired_count == entry->wired_count)) { if (map->first_free == prev) map->first_free = entry; if (map->hint == prev) @@ -893,21 +879,17 @@ vm_map_simplify_entry(map, entry) next = entry->next; if (next != &map->header) { - nextsize = next->end - next->start; esize = entry->end - entry->start; if ((entry->end == next->start) && (next->object.vm_object == entry->object.vm_object) && (!next->object.vm_object || (next->object.vm_object->behavior == entry->object.vm_object->behavior)) && (!entry->object.vm_object || (entry->offset + esize == next->offset)) && - (next->needs_copy == entry->needs_copy) && - (next->copy_on_write == entry->copy_on_write) && + (next->eflags == entry->eflags) && (next->protection == entry->protection) && (next->max_protection == entry->max_protection) && (next->inheritance == entry->inheritance) && - (next->is_a_map == FALSE) && - (next->is_sub_map == FALSE) && - (next->wired_count == 0)) { + (next->wired_count == entry->wired_count)) { if (map->first_free == next) map->first_free = entry; if (map->hint == next) @@ -962,7 +944,7 @@ _vm_map_clip_start(map, entry, start) vm_map_entry_link(map, entry->prev, new_entry); - if (entry->is_a_map || entry->is_sub_map) + if (entry->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) vm_map_reference(new_entry->object.share_map); else vm_object_reference(new_entry->object.vm_object); @@ -1006,7 +988,7 @@ _vm_map_clip_end(map, entry, end) vm_map_entry_link(map, entry, new_entry); - if (entry->is_a_map || entry->is_sub_map) + if (entry->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) vm_map_reference(new_entry->object.share_map); else vm_object_reference(new_entry->object.vm_object); @@ -1068,11 +1050,9 @@ vm_map_submap(map, start, end, submap) vm_map_clip_end(map, entry, end); if ((entry->start == start) && (entry->end == end) && - (!entry->is_a_map) && - (entry->object.vm_object == NULL) && - (!entry->copy_on_write)) { - entry->is_a_map = FALSE; - entry->is_sub_map = TRUE; + ((entry->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_COW)) == 0) && + (entry->object.vm_object == NULL)) { + entry->eflags |= MAP_ENTRY_IS_SUB_MAP; vm_map_reference(entry->object.sub_map = submap); result = KERN_SUCCESS; } @@ -1106,8 +1086,9 @@ vm_map_protect(map, start, end, new_prot, set_max) if (vm_map_lookup_entry(map, start, &entry)) { vm_map_clip_start(map, entry, start); - } else + } else { entry = entry->next; + } /* * Make a first pass to check for protection violations. @@ -1115,7 +1096,7 @@ vm_map_protect(map, start, end, new_prot, set_max) current = entry; while ((current != &map->header) && (current->start < end)) { - if (current->is_sub_map) { + if (current->eflags & MAP_ENTRY_IS_SUB_MAP) { vm_map_unlock(map); return (KERN_INVALID_ARGUMENT); } @@ -1152,11 +1133,11 @@ vm_map_protect(map, start, end, new_prot, set_max) */ if (current->protection != old_prot) { -#define MASK(entry) ((entry)->copy_on_write ? ~VM_PROT_WRITE : \ +#define MASK(entry) (((entry)->eflags & MAP_ENTRY_COW) ? ~VM_PROT_WRITE : \ VM_PROT_ALL) #define max(a,b) ((a) > (b) ? (a) : (b)) - if (current->is_a_map) { + if (current->eflags & MAP_ENTRY_IS_A_MAP) { vm_map_entry_t share_entry; vm_offset_t share_end; @@ -1229,9 +1210,22 @@ vm_map_madvise(map, pmap, start, end, advise) for(current = entry; (current != &map->header) && (current->start < end); current = current->next) { - if (current->is_a_map || current->is_sub_map) { + vm_size_t size = current->end - current->start; + + if (current->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) { continue; } + + /* + * Create an object if needed + */ + if (current->object.vm_object == NULL) { + vm_object_t object; + object = vm_object_allocate(OBJT_DEFAULT, OFF_TO_IDX(size)); + current->object.vm_object = object; + current->offset = 0; + } + vm_map_clip_end(map, current, end); switch (advise) { case MADV_NORMAL: @@ -1252,7 +1246,7 @@ vm_map_madvise(map, pmap, start, end, advise) { vm_pindex_t pindex; int count; - vm_size_t size = current->end - current->start; + size = current->end - current->start; pindex = OFF_TO_IDX(entry->offset); count = OFF_TO_IDX(size); /* @@ -1268,7 +1262,7 @@ vm_map_madvise(map, pmap, start, end, advise) { vm_pindex_t pindex; int count; - vm_size_t size = current->end - current->start; + size = current->end - current->start; pindex = OFF_TO_IDX(current->offset); count = OFF_TO_IDX(size); vm_object_madvise(current->object.vm_object, @@ -1341,6 +1335,137 @@ vm_map_inherit(map, start, end, new_inheritance) } /* + * Implement the semantics of mlock + */ +int +vm_map_user_pageable(map, start, end, new_pageable) + register vm_map_t map; + register vm_offset_t start; + register vm_offset_t end; + register boolean_t new_pageable; +{ + register vm_map_entry_t entry; + vm_map_entry_t start_entry; + register vm_offset_t failed = 0; + int rv; + + vm_map_lock(map); + VM_MAP_RANGE_CHECK(map, start, end); + + if (vm_map_lookup_entry(map, start, &start_entry) == FALSE) { + vm_map_unlock(map); + return (KERN_INVALID_ADDRESS); + } + + if (new_pageable) { + + entry = start_entry; + vm_map_clip_start(map, entry, start); + + /* + * Now decrement the wiring count for each region. If a region + * becomes completely unwired, unwire its physical pages and + * mappings. + */ + lock_set_recursive(&map->lock); + + entry = start_entry; + while ((entry != &map->header) && (entry->start < end)) { + if (entry->eflags & MAP_ENTRY_USER_WIRED) { + vm_map_clip_end(map, entry, end); + entry->eflags &= ~MAP_ENTRY_USER_WIRED; + entry->wired_count--; + if (entry->wired_count == 0) + vm_fault_unwire(map, entry->start, entry->end); + } + entry = entry->next; + } + vm_map_simplify_entry(map, start_entry); + lock_clear_recursive(&map->lock); + } else { + + /* + * Because of the possiblity of blocking, etc. We restart + * through the process's map entries from beginning so that + * we don't end up depending on a map entry that could have + * changed. + */ + rescan: + + entry = start_entry; + + while ((entry != &map->header) && (entry->start < end)) { + + if (entry->eflags & MAP_ENTRY_USER_WIRED) { + entry = entry->next; + continue; + } + + if (entry->wired_count != 0) { + entry->wired_count++; + entry->eflags |= MAP_ENTRY_USER_WIRED; + entry = entry->next; + continue; + } + + /* Here on entry being newly wired */ + + if ((entry->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) == 0) { + int copyflag = entry->eflags & MAP_ENTRY_NEEDS_COPY; + if (copyflag && ((entry->protection & VM_PROT_WRITE) != 0)) { + + vm_object_shadow(&entry->object.vm_object, + &entry->offset, + OFF_TO_IDX(entry->end + - entry->start)); + entry->eflags &= ~MAP_ENTRY_NEEDS_COPY; + + } else if (entry->object.vm_object == NULL) { + + entry->object.vm_object = + vm_object_allocate(OBJT_DEFAULT, + OFF_TO_IDX(entry->end - entry->start)); + entry->offset = (vm_offset_t) 0; + + } + default_pager_convert_to_swapq(entry->object.vm_object); + } + + vm_map_clip_start(map, entry, start); + vm_map_clip_end(map, entry, end); + + entry->wired_count++; + entry->eflags |= MAP_ENTRY_USER_WIRED; + + /* First we need to allow map modifications */ + lock_set_recursive(&map->lock); + lock_write_to_read(&map->lock); + + rv = vm_fault_user_wire(map, entry->start, entry->end); + if (rv) { + + entry->wired_count--; + entry->eflags &= ~MAP_ENTRY_USER_WIRED; + + lock_clear_recursive(&map->lock); + vm_map_unlock(map); + + (void) vm_map_user_pageable(map, start, entry->start, TRUE); + return rv; + } + + lock_clear_recursive(&map->lock); + vm_map_unlock(map); + vm_map_lock(map); + + goto rescan; + } + } + vm_map_unlock(map); + return KERN_SUCCESS; +} + +/* * vm_map_pageable: * * Sets the pageability of the specified address @@ -1467,8 +1592,8 @@ vm_map_pageable(map, start, end, new_pageable) * point to sharing maps, because we won't * hold the lock on the sharing map. */ - if (!entry->is_a_map && !entry->is_sub_map) { - int copyflag = entry->needs_copy; + if ((entry->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) == 0) { + int copyflag = entry->eflags & MAP_ENTRY_NEEDS_COPY; if (copyflag && ((entry->protection & VM_PROT_WRITE) != 0)) { @@ -1476,7 +1601,7 @@ vm_map_pageable(map, start, end, new_pageable) &entry->offset, OFF_TO_IDX(entry->end - entry->start)); - entry->needs_copy = FALSE; + entry->eflags &= ~MAP_ENTRY_NEEDS_COPY; } else if (entry->object.vm_object == NULL) { entry->object.vm_object = vm_object_allocate(OBJT_DEFAULT, @@ -1568,6 +1693,7 @@ vm_map_pageable(map, start, end, new_pageable) (void) vm_map_pageable(map, start, failed, TRUE); return (rv); } + vm_map_simplify_entry(map, start_entry); } vm_map_unlock(map); @@ -1608,7 +1734,7 @@ vm_map_clean(map, start, end, syncio, invalidate) * Make a first pass to check for holes. */ for (current = entry; current->start < end; current = current->next) { - if (current->is_sub_map) { + if (current->eflags & MAP_ENTRY_IS_SUB_MAP) { vm_map_unlock_read(map); return (KERN_INVALID_ARGUMENT); } @@ -1627,7 +1753,7 @@ vm_map_clean(map, start, end, syncio, invalidate) for (current = entry; current->start < end; current = current->next) { offset = current->offset + (start - current->start); size = (end <= current->end ? end : current->end) - start; - if (current->is_a_map || current->is_sub_map) { + if (current->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) { register vm_map_t smap; vm_map_entry_t tentry; vm_size_t tsize; @@ -1718,7 +1844,7 @@ vm_map_entry_delete(map, entry) vm_map_entry_unlink(map, entry); map->size -= entry->end - entry->start; - if (entry->is_a_map || entry->is_sub_map) { + if (entry->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) { vm_map_deallocate(entry->object.share_map); } else { vm_object_deallocate(entry->object.vm_object); @@ -1915,7 +2041,8 @@ vm_map_copy_entry(src_map, dst_map, src_entry, dst_entry) vm_map_t src_map, dst_map; register vm_map_entry_t src_entry, dst_entry; { - if (src_entry->is_sub_map || dst_entry->is_sub_map) + if ((dst_entry->eflags|src_entry->eflags) & + (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) return; if (src_entry->wired_count == 0) { @@ -1924,7 +2051,7 @@ vm_map_copy_entry(src_map, dst_map, src_entry, dst_entry) * If the source entry is marked needs_copy, it is already * write-protected. */ - if (!src_entry->needs_copy) { + if ((src_entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) { boolean_t su; @@ -1957,11 +2084,8 @@ vm_map_copy_entry(src_map, dst_map, src_entry, dst_entry) src_entry->object.vm_object->type == OBJT_SWAP)) vm_object_collapse(src_entry->object.vm_object); ++src_entry->object.vm_object->ref_count; - src_entry->copy_on_write = TRUE; - src_entry->needs_copy = TRUE; - - dst_entry->needs_copy = TRUE; - dst_entry->copy_on_write = TRUE; + src_entry->eflags |= (MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY); + dst_entry->eflags |= (MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY); dst_entry->object.vm_object = src_entry->object.vm_object; dst_entry->offset = src_entry->offset; @@ -2015,14 +2139,14 @@ vmspace_fork(vm1) old_entry = old_map->header.next; while (old_entry != &old_map->header) { - if (old_entry->is_sub_map) + if (old_entry->eflags & MAP_ENTRY_IS_SUB_MAP) panic("vm_map_fork: encountered a submap"); switch (old_entry->inheritance) { case VM_INHERIT_NONE: break; - case VM_INHERIT_SHARE: + case VM_INHERIT_SHARE: /* * Clone the entry, creating the shared object if necessary. */ @@ -2033,13 +2157,13 @@ vmspace_fork(vm1) old_entry->start)); old_entry->object.vm_object = object; old_entry->offset = (vm_offset_t) 0; - } else if (old_entry->needs_copy) { + } else if (old_entry->eflags & MAP_ENTRY_NEEDS_COPY) { vm_object_shadow(&old_entry->object.vm_object, &old_entry->offset, OFF_TO_IDX(old_entry->end - - old_entry->start)); + old_entry->start)); - old_entry->needs_copy = 0; + old_entry->eflags &= ~MAP_ENTRY_NEEDS_COPY; object = old_entry->object.vm_object; } @@ -2077,7 +2201,7 @@ vmspace_fork(vm1) *new_entry = *old_entry; new_entry->wired_count = 0; new_entry->object.vm_object = NULL; - new_entry->is_a_map = FALSE; + new_entry->eflags &= ~MAP_ENTRY_IS_A_MAP; vm_map_entry_link(new_map, new_map->header.prev, new_entry); vm_map_copy_entry(old_map, new_map, old_entry, @@ -2173,11 +2297,12 @@ RetryLookup:; entry = tmp_entry; *out_entry = entry; } + /* * Handle submaps. */ - if (entry->is_sub_map) { + if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) { vm_map_t old_map = map; *var_map = map = entry->object.sub_map; @@ -2205,7 +2330,7 @@ RetryLookup:; * If we don't already have a VM object, track it down. */ - su = !entry->is_a_map; + su = (entry->eflags & MAP_ENTRY_IS_A_MAP) == 0; if (su) { share_map = map; share_offset = vaddr; @@ -2237,7 +2362,7 @@ RetryLookup:; * If the entry was copy-on-write, we either ... */ - if (entry->needs_copy) { + if (entry->eflags & MAP_ENTRY_NEEDS_COPY) { /* * If we want to write the page, we may as well handle that * now since we've got the sharing map locked. @@ -2264,7 +2389,7 @@ RetryLookup:; &entry->offset, OFF_TO_IDX(entry->end - entry->start)); - entry->needs_copy = FALSE; + entry->eflags &= ~MAP_ENTRY_NEEDS_COPY; lock_write_to_read(&share_map->lock); } else { @@ -2333,7 +2458,7 @@ vm_map_lookup_done(map, entry) * If this entry references a map, unlock it first. */ - if (entry->is_a_map) + if (entry->eflags & MAP_ENTRY_IS_A_MAP) vm_map_unlock_read(entry->object.share_map); /* @@ -2343,62 +2468,6 @@ vm_map_lookup_done(map, entry) vm_map_unlock_read(map); } -/* - * Routine: vm_map_simplify - * Purpose: - * Attempt to simplify the map representation in - * the vicinity of the given starting address. - * Note: - * This routine is intended primarily to keep the - * kernel maps more compact -- they generally don't - * benefit from the "expand a map entry" technology - * at allocation time because the adjacent entry - * is often wired down. - */ -void -vm_map_simplify(map, start) - vm_map_t map; - vm_offset_t start; -{ - vm_map_entry_t this_entry; - vm_map_entry_t prev_entry; - - vm_map_lock(map); - if ((vm_map_lookup_entry(map, start, &this_entry)) && - ((prev_entry = this_entry->prev) != &map->header) && - (prev_entry->end == start) && - (prev_entry->object.vm_object == this_entry->object.vm_object) && - ((prev_entry->offset + (prev_entry->end - prev_entry->start)) - == this_entry->offset) && - - (map->is_main_map) && - - (prev_entry->is_a_map == FALSE) && - (prev_entry->is_sub_map == FALSE) && - - (this_entry->is_a_map == FALSE) && - (this_entry->is_sub_map == FALSE) && - - (prev_entry->inheritance == this_entry->inheritance) && - (prev_entry->protection == this_entry->protection) && - (prev_entry->max_protection == this_entry->max_protection) && - (prev_entry->wired_count == this_entry->wired_count) && - - (prev_entry->copy_on_write == this_entry->copy_on_write) && - (prev_entry->needs_copy == this_entry->needs_copy)) { - if (map->first_free == this_entry) - map->first_free = prev_entry; - if (map->hint == this_entry) - SAVE_HINT(map, prev_entry); - vm_map_entry_unlink(map, this_entry); - prev_entry->end = this_entry->end; - if (this_entry->object.vm_object) - vm_object_deallocate(this_entry->object.vm_object); - vm_map_entry_dispose(map, this_entry); - } - vm_map_unlock(map); -} - #include "opt_ddb.h" #ifdef DDB #include <sys/kernel.h> @@ -2440,12 +2509,12 @@ DB_SHOW_COMMAND(map, vm_map_print) if (entry->wired_count != 0) db_printf("wired, "); } - if (entry->is_a_map || entry->is_sub_map) { + if (entry->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) { db_printf("share=0x%x, offset=0x%x\n", (int) entry->object.share_map, (int) entry->offset); if ((entry->prev == &map->header) || - (!entry->prev->is_a_map) || + ((entry->prev->eflags & MAP_ENTRY_IS_A_MAP) == 0) || (entry->prev->object.share_map != entry->object.share_map)) { db_indent += 2; @@ -2457,13 +2526,13 @@ DB_SHOW_COMMAND(map, vm_map_print) db_printf("object=0x%x, offset=0x%x", (int) entry->object.vm_object, (int) entry->offset); - if (entry->copy_on_write) + if (entry->eflags & MAP_ENTRY_COW) db_printf(", copy (%s)", - entry->needs_copy ? "needed" : "done"); + (entry->eflags & MAP_ENTRY_NEEDS_COPY) ? "needed" : "done"); db_printf("\n"); if ((entry->prev == &map->header) || - (entry->prev->is_a_map) || + (entry->prev->eflags & MAP_ENTRY_IS_A_MAP) || (entry->prev->object.vm_object != entry->object.vm_object)) { db_indent += 2; |