aboutsummaryrefslogtreecommitdiff
path: root/sys/vm
diff options
context:
space:
mode:
authorTor Egge <tegge@FreeBSD.org>2001-11-10 22:27:09 +0000
committerTor Egge <tegge@FreeBSD.org>2001-11-10 22:27:09 +0000
commit7c654931979b516f7892b3935be02efef4fd31f2 (patch)
treebf106c86cd0a21d895f558fbd95339a12bae7065 /sys/vm
parentdd9d9112ed5434d8dec0a34de64c8f898c2542a7 (diff)
Notes
Diffstat (limited to 'sys/vm')
-rw-r--r--sys/vm/vm_map.c35
1 files changed, 32 insertions, 3 deletions
diff --git a/sys/vm/vm_map.c b/sys/vm/vm_map.c
index 1725f9789b2c..76eebf5c60d1 100644
--- a/sys/vm/vm_map.c
+++ b/sys/vm/vm_map.c
@@ -1288,6 +1288,7 @@ vm_map_user_pageable(map, start, end, new_pageable)
vm_map_entry_t entry;
vm_map_entry_t start_entry;
vm_offset_t estart;
+ vm_offset_t eend;
int rv;
vm_map_lock(map);
@@ -1365,6 +1366,7 @@ vm_map_user_pageable(map, start, end, new_pageable)
entry->wired_count++;
entry->eflags |= MAP_ENTRY_USER_WIRED;
estart = entry->start;
+ eend = entry->end;
/* First we need to allow map modifications */
vm_map_set_recursive(map);
@@ -1379,8 +1381,15 @@ vm_map_user_pageable(map, start, end, new_pageable)
vm_map_clear_recursive(map);
vm_map_unlock(map);
-
- (void) vm_map_user_pageable(map, start, entry->start, TRUE);
+
+ /*
+ * At this point, the map is unlocked, and
+ * entry might no longer be valid. Use copy
+ * of entry start value obtained while entry
+ * was valid.
+ */
+ (void) vm_map_user_pageable(map, start, estart,
+ TRUE);
return rv;
}
@@ -1390,9 +1399,15 @@ vm_map_user_pageable(map, start, end, new_pageable)
if (vm_map_lookup_entry(map, estart, &entry)
== FALSE) {
vm_map_unlock(map);
+ /*
+ * vm_fault_user_wire succeded, thus
+ * the area between start and eend
+ * is wired and has to be unwired
+ * here as part of the cleanup.
+ */
(void) vm_map_user_pageable(map,
start,
- estart,
+ eend,
TRUE);
return (KERN_INVALID_ADDRESS);
}
@@ -1627,6 +1642,20 @@ vm_map_pageable(map, start, end, new_pageable)
(void) vm_map_pageable(map, start, failed, TRUE);
return (rv);
}
+ /*
+ * An exclusive lock on the map is needed in order to call
+ * vm_map_simplify_entry(). If the current lock on the map
+ * is only a shared lock, an upgrade is needed.
+ */
+ if (vm_map_pmap(map) != kernel_pmap &&
+ vm_map_lock_upgrade(map)) {
+ vm_map_lock(map);
+ if (vm_map_lookup_entry(map, start, &start_entry) ==
+ FALSE) {
+ vm_map_unlock(map);
+ return KERN_SUCCESS;
+ }
+ }
vm_map_simplify_entry(map, start_entry);
}