summaryrefslogtreecommitdiff
path: root/sys/vm
diff options
context:
space:
mode:
authorKonstantin Belousov <kib@FreeBSD.org>2009-06-23 20:45:22 +0000
committerKonstantin Belousov <kib@FreeBSD.org>2009-06-23 20:45:22 +0000
commit3364c323e6ad143e0e95e2d1c7c3c3b880515860 (patch)
treeefadbd0bda4d9f0ec36869d4d465b2cabf2dcd1b /sys/vm
parent224fbf9fd641d4f4b44cc3d6a44c7eb1b272968a (diff)
Notes
Diffstat (limited to 'sys/vm')
-rw-r--r--sys/vm/default_pager.c25
-rw-r--r--sys/vm/device_pager.c5
-rw-r--r--sys/vm/phys_pager.c2
-rw-r--r--sys/vm/swap_pager.c159
-rw-r--r--sys/vm/vm.h7
-rw-r--r--sys/vm/vm_extern.h2
-rw-r--r--sys/vm/vm_fault.c6
-rw-r--r--sys/vm/vm_kern.c9
-rw-r--r--sys/vm/vm_map.c312
-rw-r--r--sys/vm/vm_map.h3
-rw-r--r--sys/vm/vm_mmap.c7
-rw-r--r--sys/vm/vm_object.c73
-rw-r--r--sys/vm/vm_object.h5
-rw-r--r--sys/vm/vm_pager.c8
-rw-r--r--sys/vm/vm_pager.h6
-rw-r--r--sys/vm/vnode_pager.c7
16 files changed, 575 insertions, 61 deletions
diff --git a/sys/vm/default_pager.c b/sys/vm/default_pager.c
index 485571b5b904..ceb2c7761271 100644
--- a/sys/vm/default_pager.c
+++ b/sys/vm/default_pager.c
@@ -44,6 +44,7 @@ __FBSDID("$FreeBSD$");
#include <sys/systm.h>
#include <sys/lock.h>
#include <sys/proc.h>
+#include <sys/resourcevar.h>
#include <sys/mutex.h>
#include <vm/vm.h>
@@ -53,7 +54,7 @@ __FBSDID("$FreeBSD$");
#include <vm/swap_pager.h>
static vm_object_t default_pager_alloc(void *, vm_ooffset_t, vm_prot_t,
- vm_ooffset_t);
+ vm_ooffset_t, struct ucred *);
static void default_pager_dealloc(vm_object_t);
static int default_pager_getpages(vm_object_t, vm_page_t *, int, int);
static void default_pager_putpages(vm_object_t, vm_page_t *, int,
@@ -76,12 +77,28 @@ struct pagerops defaultpagerops = {
*/
static vm_object_t
default_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
- vm_ooffset_t offset)
+ vm_ooffset_t offset, struct ucred *cred)
{
+ vm_object_t object;
+ struct uidinfo *uip;
+
if (handle != NULL)
panic("default_pager_alloc: handle specified");
-
- return vm_object_allocate(OBJT_DEFAULT, OFF_TO_IDX(round_page(offset + size)));
+ if (cred != NULL) {
+ uip = cred->cr_ruidinfo;
+ if (!swap_reserve_by_uid(size, uip))
+ return (NULL);
+ uihold(uip);
+ }
+ object = vm_object_allocate(OBJT_DEFAULT,
+ OFF_TO_IDX(round_page(offset + size)));
+ if (cred != NULL) {
+ VM_OBJECT_LOCK(object);
+ object->uip = uip;
+ object->charge = size;
+ VM_OBJECT_UNLOCK(object);
+ }
+ return (object);
}
/*
diff --git a/sys/vm/device_pager.c b/sys/vm/device_pager.c
index afcde979d16c..69d6dcabf23c 100644
--- a/sys/vm/device_pager.c
+++ b/sys/vm/device_pager.c
@@ -54,7 +54,7 @@ __FBSDID("$FreeBSD$");
static void dev_pager_init(void);
static vm_object_t dev_pager_alloc(void *, vm_ooffset_t, vm_prot_t,
- vm_ooffset_t);
+ vm_ooffset_t, struct ucred *);
static void dev_pager_dealloc(vm_object_t);
static int dev_pager_getpages(vm_object_t, vm_page_t *, int, int);
static void dev_pager_putpages(vm_object_t, vm_page_t *, int,
@@ -97,7 +97,8 @@ dev_pager_init()
* MPSAFE
*/
static vm_object_t
-dev_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot, vm_ooffset_t foff)
+dev_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
+ vm_ooffset_t foff, struct ucred *cred)
{
struct cdev *dev;
vm_object_t object, object1;
diff --git a/sys/vm/phys_pager.c b/sys/vm/phys_pager.c
index fe50ff84c44c..42cdab3ebeca 100644
--- a/sys/vm/phys_pager.c
+++ b/sys/vm/phys_pager.c
@@ -60,7 +60,7 @@ phys_pager_init(void)
*/
static vm_object_t
phys_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
- vm_ooffset_t foff)
+ vm_ooffset_t foff, struct ucred *cred)
{
vm_object_t object, object1;
vm_pindex_t pindex;
diff --git a/sys/vm/swap_pager.c b/sys/vm/swap_pager.c
index 769a11d9c152..bcc414916639 100644
--- a/sys/vm/swap_pager.c
+++ b/sys/vm/swap_pager.c
@@ -86,6 +86,8 @@ __FBSDID("$FreeBSD$");
#include <sys/namei.h>
#include <sys/vnode.h>
#include <sys/malloc.h>
+#include <sys/resource.h>
+#include <sys/resourcevar.h>
#include <sys/sysctl.h>
#include <sys/sysproto.h>
#include <sys/blist.h>
@@ -152,6 +154,127 @@ static int nswapdev; /* Number of swap devices */
int swap_pager_avail;
static int swdev_syscall_active = 0; /* serialize swap(on|off) */
+static vm_ooffset_t swap_total;
+SYSCTL_QUAD(_vm, OID_AUTO, swap_total, CTLFLAG_RD, &swap_total, 0, "");
+static vm_ooffset_t swap_reserved;
+SYSCTL_QUAD(_vm, OID_AUTO, swap_reserved, CTLFLAG_RD, &swap_reserved, 0, "");
+static int overcommit = 0;
+SYSCTL_INT(_vm, OID_AUTO, overcommit, CTLFLAG_RW, &overcommit, 0, "");
+
+/* bits from overcommit */
+#define SWAP_RESERVE_FORCE_ON (1 << 0)
+#define SWAP_RESERVE_RLIMIT_ON (1 << 1)
+#define SWAP_RESERVE_ALLOW_NONWIRED (1 << 2)
+
+int
+swap_reserve(vm_ooffset_t incr)
+{
+
+ return (swap_reserve_by_uid(incr, curthread->td_ucred->cr_ruidinfo));
+}
+
+int
+swap_reserve_by_uid(vm_ooffset_t incr, struct uidinfo *uip)
+{
+ vm_ooffset_t r, s, max;
+ int res, error;
+ static int curfail;
+ static struct timeval lastfail;
+
+ if (incr & PAGE_MASK)
+ panic("swap_reserve: & PAGE_MASK");
+
+ res = 0;
+ error = priv_check(curthread, PRIV_VM_SWAP_NOQUOTA);
+ mtx_lock(&sw_dev_mtx);
+ r = swap_reserved + incr;
+ if (overcommit & SWAP_RESERVE_ALLOW_NONWIRED) {
+ s = cnt.v_page_count - cnt.v_free_reserved - cnt.v_wire_count;
+ s *= PAGE_SIZE;
+ } else
+ s = 0;
+ s += swap_total;
+ if ((overcommit & SWAP_RESERVE_FORCE_ON) == 0 || r <= s ||
+ (error = priv_check(curthread, PRIV_VM_SWAP_NOQUOTA)) == 0) {
+ res = 1;
+ swap_reserved = r;
+ }
+ mtx_unlock(&sw_dev_mtx);
+
+ if (res) {
+ PROC_LOCK(curproc);
+ UIDINFO_VMSIZE_LOCK(uip);
+ error = priv_check(curthread, PRIV_VM_SWAP_NORLIMIT);
+ max = (error != 0) ? lim_cur(curproc, RLIMIT_SWAP) : 0;
+ if (max != 0 && uip->ui_vmsize + incr > max &&
+ (overcommit & SWAP_RESERVE_RLIMIT_ON) != 0)
+ res = 0;
+ else
+ uip->ui_vmsize += incr;
+ UIDINFO_VMSIZE_UNLOCK(uip);
+ PROC_UNLOCK(curproc);
+ if (!res) {
+ mtx_lock(&sw_dev_mtx);
+ swap_reserved -= incr;
+ mtx_unlock(&sw_dev_mtx);
+ }
+ }
+ if (!res && ppsratecheck(&lastfail, &curfail, 1)) {
+ printf("uid %d, pid %d: swap reservation for %jd bytes failed\n",
+ curproc->p_pid, uip->ui_uid, incr);
+ }
+
+ return (res);
+}
+
+void
+swap_reserve_force(vm_ooffset_t incr)
+{
+ struct uidinfo *uip;
+
+ mtx_lock(&sw_dev_mtx);
+ swap_reserved += incr;
+ mtx_unlock(&sw_dev_mtx);
+
+ uip = curthread->td_ucred->cr_ruidinfo;
+ PROC_LOCK(curproc);
+ UIDINFO_VMSIZE_LOCK(uip);
+ uip->ui_vmsize += incr;
+ UIDINFO_VMSIZE_UNLOCK(uip);
+ PROC_UNLOCK(curproc);
+}
+
+void
+swap_release(vm_ooffset_t decr)
+{
+ struct uidinfo *uip;
+
+ PROC_LOCK(curproc);
+ uip = curthread->td_ucred->cr_ruidinfo;
+ swap_release_by_uid(decr, uip);
+ PROC_UNLOCK(curproc);
+}
+
+void
+swap_release_by_uid(vm_ooffset_t decr, struct uidinfo *uip)
+{
+
+ if (decr & PAGE_MASK)
+ panic("swap_release: & PAGE_MASK");
+
+ mtx_lock(&sw_dev_mtx);
+ if (swap_reserved < decr)
+ panic("swap_reserved < decr");
+ swap_reserved -= decr;
+ mtx_unlock(&sw_dev_mtx);
+
+ UIDINFO_VMSIZE_LOCK(uip);
+ if (uip->ui_vmsize < decr)
+ printf("negative vmsize for uid = %d\n", uip->ui_uid);
+ uip->ui_vmsize -= decr;
+ UIDINFO_VMSIZE_UNLOCK(uip);
+}
+
static void swapdev_strategy(struct buf *, struct swdevt *sw);
#define SWM_FREE 0x02 /* free, period */
@@ -198,7 +321,7 @@ static struct vm_object swap_zone_obj;
*/
static vm_object_t
swap_pager_alloc(void *handle, vm_ooffset_t size,
- vm_prot_t prot, vm_ooffset_t offset);
+ vm_prot_t prot, vm_ooffset_t offset, struct ucred *);
static void swap_pager_dealloc(vm_object_t object);
static int swap_pager_getpages(vm_object_t, vm_page_t *, int, int);
static void swap_pager_putpages(vm_object_t, vm_page_t *, int, boolean_t, int *);
@@ -440,13 +563,13 @@ swap_pager_swap_init(void)
*/
static vm_object_t
swap_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
- vm_ooffset_t offset)
+ vm_ooffset_t offset, struct ucred *cred)
{
vm_object_t object;
vm_pindex_t pindex;
+ struct uidinfo *uip;
pindex = OFF_TO_IDX(offset + PAGE_MASK + size);
-
if (handle) {
mtx_lock(&Giant);
/*
@@ -457,21 +580,41 @@ swap_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
*/
sx_xlock(&sw_alloc_sx);
object = vm_pager_object_lookup(NOBJLIST(handle), handle);
-
if (object == NULL) {
+ if (cred != NULL) {
+ uip = cred->cr_ruidinfo;
+ if (!swap_reserve_by_uid(size, uip)) {
+ sx_xunlock(&sw_alloc_sx);
+ mtx_unlock(&Giant);
+ return (NULL);
+ }
+ uihold(uip);
+ }
object = vm_object_allocate(OBJT_DEFAULT, pindex);
- object->handle = handle;
-
VM_OBJECT_LOCK(object);
+ object->handle = handle;
+ if (cred != NULL) {
+ object->uip = uip;
+ object->charge = size;
+ }
swp_pager_meta_build(object, 0, SWAPBLK_NONE);
VM_OBJECT_UNLOCK(object);
}
sx_xunlock(&sw_alloc_sx);
mtx_unlock(&Giant);
} else {
+ if (cred != NULL) {
+ uip = cred->cr_ruidinfo;
+ if (!swap_reserve_by_uid(size, uip))
+ return (NULL);
+ uihold(uip);
+ }
object = vm_object_allocate(OBJT_DEFAULT, pindex);
-
VM_OBJECT_LOCK(object);
+ if (cred != NULL) {
+ object->uip = uip;
+ object->charge = size;
+ }
swp_pager_meta_build(object, 0, SWAPBLK_NONE);
VM_OBJECT_UNLOCK(object);
}
@@ -2039,6 +2182,7 @@ swaponsomething(struct vnode *vp, void *id, u_long nblks, sw_strategy_t *strateg
TAILQ_INSERT_TAIL(&swtailq, sp, sw_list);
nswapdev++;
swap_pager_avail += nblks;
+ swap_total += (vm_ooffset_t)nblks * PAGE_SIZE;
swp_sizecheck();
mtx_unlock(&sw_dev_mtx);
}
@@ -2143,6 +2287,7 @@ swapoff_one(struct swdevt *sp, struct ucred *cred)
swap_pager_avail -= blist_fill(sp->sw_blist,
dvbase, dmmax);
}
+ swap_total -= (vm_ooffset_t)nblks * PAGE_SIZE;
mtx_unlock(&sw_dev_mtx);
/*
diff --git a/sys/vm/vm.h b/sys/vm/vm.h
index 1629146403be..57f216398e2f 100644
--- a/sys/vm/vm.h
+++ b/sys/vm/vm.h
@@ -133,5 +133,12 @@ struct kva_md_info {
extern struct kva_md_info kmi;
extern void vm_ksubmap_init(struct kva_md_info *);
+struct uidinfo;
+int swap_reserve(vm_ooffset_t incr);
+int swap_reserve_by_uid(vm_ooffset_t incr, struct uidinfo *uip);
+void swap_reserve_force(vm_ooffset_t incr);
+void swap_release(vm_ooffset_t decr);
+void swap_release_by_uid(vm_ooffset_t decr, struct uidinfo *uip);
+
#endif /* VM_H */
diff --git a/sys/vm/vm_extern.h b/sys/vm/vm_extern.h
index 3c0ede17d531..69bf64c85a02 100644
--- a/sys/vm/vm_extern.h
+++ b/sys/vm/vm_extern.h
@@ -63,7 +63,7 @@ void vm_waitproc(struct proc *);
int vm_mmap(vm_map_t, vm_offset_t *, vm_size_t, vm_prot_t, vm_prot_t, int, objtype_t, void *, vm_ooffset_t);
void vm_set_page_size(void);
struct vmspace *vmspace_alloc(vm_offset_t, vm_offset_t);
-struct vmspace *vmspace_fork(struct vmspace *);
+struct vmspace *vmspace_fork(struct vmspace *, vm_ooffset_t *);
int vmspace_exec(struct proc *, vm_offset_t, vm_offset_t);
int vmspace_unshare(struct proc *);
void vmspace_exit(struct thread *);
diff --git a/sys/vm/vm_fault.c b/sys/vm/vm_fault.c
index 74b0df4798ac..43743f494f02 100644
--- a/sys/vm/vm_fault.c
+++ b/sys/vm/vm_fault.c
@@ -1163,7 +1163,11 @@ vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry)
VM_OBJECT_LOCK(dst_object);
dst_entry->object.vm_object = dst_object;
dst_entry->offset = 0;
-
+ if (dst_entry->uip != NULL) {
+ dst_object->uip = dst_entry->uip;
+ dst_object->charge = dst_entry->end - dst_entry->start;
+ dst_entry->uip = NULL;
+ }
prot = dst_entry->max_protection;
/*
diff --git a/sys/vm/vm_kern.c b/sys/vm/vm_kern.c
index 96fe61b4b151..90065721da8a 100644
--- a/sys/vm/vm_kern.c
+++ b/sys/vm/vm_kern.c
@@ -235,7 +235,8 @@ kmem_suballoc(vm_map_t parent, vm_offset_t *min, vm_offset_t *max,
*min = vm_map_min(parent);
ret = vm_map_find(parent, NULL, 0, min, size, superpage_align ?
- VMFS_ALIGNED_SPACE : VMFS_ANY_SPACE, VM_PROT_ALL, VM_PROT_ALL, 0);
+ VMFS_ALIGNED_SPACE : VMFS_ANY_SPACE, VM_PROT_ALL, VM_PROT_ALL,
+ MAP_ACC_NO_CHARGE);
if (ret != KERN_SUCCESS)
panic("kmem_suballoc: bad status return of %d", ret);
*max = *min + size;
@@ -422,6 +423,8 @@ kmem_alloc_wait(map, size)
vm_offset_t addr;
size = round_page(size);
+ if (!swap_reserve(size))
+ return (0);
for (;;) {
/*
@@ -434,12 +437,14 @@ kmem_alloc_wait(map, size)
/* no space now; see if we can ever get space */
if (vm_map_max(map) - vm_map_min(map) < size) {
vm_map_unlock(map);
+ swap_release(size);
return (0);
}
map->needs_wakeup = TRUE;
vm_map_unlock_and_wait(map, 0);
}
- vm_map_insert(map, NULL, 0, addr, addr + size, VM_PROT_ALL, VM_PROT_ALL, 0);
+ vm_map_insert(map, NULL, 0, addr, addr + size, VM_PROT_ALL,
+ VM_PROT_ALL, MAP_ACC_CHARGED);
vm_map_unlock(map);
return (addr);
}
diff --git a/sys/vm/vm_map.c b/sys/vm/vm_map.c
index d6775b78630d..82d37e604c40 100644
--- a/sys/vm/vm_map.c
+++ b/sys/vm/vm_map.c
@@ -149,6 +149,10 @@ static void vm_map_zdtor(void *mem, int size, void *arg);
static void vmspace_zdtor(void *mem, int size, void *arg);
#endif
+#define ENTRY_CHARGED(e) ((e)->uip != NULL || \
+ ((e)->object.vm_object != NULL && (e)->object.vm_object->uip != NULL && \
+ !((e)->eflags & MAP_ENTRY_NEEDS_COPY)))
+
/*
* PROC_VMSPACE_{UN,}LOCK() can be a noop as long as vmspaces are type
* stable.
@@ -1076,6 +1080,8 @@ vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
vm_map_entry_t prev_entry;
vm_map_entry_t temp_entry;
vm_eflags_t protoeflags;
+ struct uidinfo *uip;
+ boolean_t charge_prev_obj;
VM_MAP_ASSERT_LOCKED(map);
@@ -1103,6 +1109,7 @@ vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
return (KERN_NO_SPACE);
protoeflags = 0;
+ charge_prev_obj = FALSE;
if (cow & MAP_COPY_ON_WRITE)
protoeflags |= MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY;
@@ -1118,6 +1125,27 @@ vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
if (cow & MAP_DISABLE_COREDUMP)
protoeflags |= MAP_ENTRY_NOCOREDUMP;
+ uip = NULL;
+ KASSERT((object != kmem_object && object != kernel_object) ||
+ ((object == kmem_object || object == kernel_object) &&
+ !(protoeflags & MAP_ENTRY_NEEDS_COPY)),
+ ("kmem or kernel object and cow"));
+ if (cow & (MAP_ACC_NO_CHARGE | MAP_NOFAULT))
+ goto charged;
+ if ((cow & MAP_ACC_CHARGED) || ((prot & VM_PROT_WRITE) &&
+ ((protoeflags & MAP_ENTRY_NEEDS_COPY) || object == NULL))) {
+ if (!(cow & MAP_ACC_CHARGED) && !swap_reserve(end - start))
+ return (KERN_RESOURCE_SHORTAGE);
+ KASSERT(object == NULL || (cow & MAP_ENTRY_NEEDS_COPY) ||
+ object->uip == NULL,
+ ("OVERCOMMIT: vm_map_insert o %p", object));
+ uip = curthread->td_ucred->cr_ruidinfo;
+ uihold(uip);
+ if (object == NULL && !(protoeflags & MAP_ENTRY_NEEDS_COPY))
+ charge_prev_obj = TRUE;
+ }
+
+charged:
if (object != NULL) {
/*
* OBJ_ONEMAPPING must be cleared unless this mapping
@@ -1135,11 +1163,13 @@ vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
(prev_entry->eflags == protoeflags) &&
(prev_entry->end == start) &&
(prev_entry->wired_count == 0) &&
- ((prev_entry->object.vm_object == NULL) ||
- vm_object_coalesce(prev_entry->object.vm_object,
- prev_entry->offset,
- (vm_size_t)(prev_entry->end - prev_entry->start),
- (vm_size_t)(end - prev_entry->end)))) {
+ (prev_entry->uip == uip ||
+ (prev_entry->object.vm_object != NULL &&
+ (prev_entry->object.vm_object->uip == uip))) &&
+ vm_object_coalesce(prev_entry->object.vm_object,
+ prev_entry->offset,
+ (vm_size_t)(prev_entry->end - prev_entry->start),
+ (vm_size_t)(end - prev_entry->end), charge_prev_obj)) {
/*
* We were able to extend the object. Determine if we
* can extend the previous map entry to include the
@@ -1152,6 +1182,8 @@ vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
prev_entry->end = end;
vm_map_entry_resize_free(map, prev_entry);
vm_map_simplify_entry(map, prev_entry);
+ if (uip != NULL)
+ uifree(uip);
return (KERN_SUCCESS);
}
@@ -1165,6 +1197,12 @@ vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
offset = prev_entry->offset +
(prev_entry->end - prev_entry->start);
vm_object_reference(object);
+ if (uip != NULL && object != NULL && object->uip != NULL &&
+ !(prev_entry->eflags & MAP_ENTRY_NEEDS_COPY)) {
+ /* Object already accounts for this uid. */
+ uifree(uip);
+ uip = NULL;
+ }
}
/*
@@ -1179,6 +1217,7 @@ vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
new_entry = vm_map_entry_create(map);
new_entry->start = start;
new_entry->end = end;
+ new_entry->uip = NULL;
new_entry->eflags = protoeflags;
new_entry->object.vm_object = object;
@@ -1190,6 +1229,10 @@ vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
new_entry->max_protection = max;
new_entry->wired_count = 0;
+ KASSERT(uip == NULL || !ENTRY_CHARGED(new_entry),
+ ("OVERCOMMIT: vm_map_insert leaks vm_map %p", new_entry));
+ new_entry->uip = uip;
+
/*
* Insert the new entry into the list
*/
@@ -1398,7 +1441,8 @@ vm_map_simplify_entry(vm_map_t map, vm_map_entry_t entry)
(prev->protection == entry->protection) &&
(prev->max_protection == entry->max_protection) &&
(prev->inheritance == entry->inheritance) &&
- (prev->wired_count == entry->wired_count)) {
+ (prev->wired_count == entry->wired_count) &&
+ (prev->uip == entry->uip)) {
vm_map_entry_unlink(map, prev);
entry->start = prev->start;
entry->offset = prev->offset;
@@ -1416,6 +1460,8 @@ vm_map_simplify_entry(vm_map_t map, vm_map_entry_t entry)
*/
if (prev->object.vm_object)
vm_object_deallocate(prev->object.vm_object);
+ if (prev->uip != NULL)
+ uifree(prev->uip);
vm_map_entry_dispose(map, prev);
}
}
@@ -1431,7 +1477,8 @@ vm_map_simplify_entry(vm_map_t map, vm_map_entry_t entry)
(next->protection == entry->protection) &&
(next->max_protection == entry->max_protection) &&
(next->inheritance == entry->inheritance) &&
- (next->wired_count == entry->wired_count)) {
+ (next->wired_count == entry->wired_count) &&
+ (next->uip == entry->uip)) {
vm_map_entry_unlink(map, next);
entry->end = next->end;
vm_map_entry_resize_free(map, entry);
@@ -1441,6 +1488,8 @@ vm_map_simplify_entry(vm_map_t map, vm_map_entry_t entry)
*/
if (next->object.vm_object)
vm_object_deallocate(next->object.vm_object);
+ if (next->uip != NULL)
+ uifree(next->uip);
vm_map_entry_dispose(map, next);
}
}
@@ -1489,6 +1538,21 @@ _vm_map_clip_start(vm_map_t map, vm_map_entry_t entry, vm_offset_t start)
atop(entry->end - entry->start));
entry->object.vm_object = object;
entry->offset = 0;
+ if (entry->uip != NULL) {
+ object->uip = entry->uip;
+ object->charge = entry->end - entry->start;
+ entry->uip = NULL;
+ }
+ } else if (entry->object.vm_object != NULL &&
+ ((entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) &&
+ entry->uip != NULL) {
+ VM_OBJECT_LOCK(entry->object.vm_object);
+ KASSERT(entry->object.vm_object->uip == NULL,
+ ("OVERCOMMIT: vm_entry_clip_start: both uip e %p", entry));
+ entry->object.vm_object->uip = entry->uip;
+ entry->object.vm_object->charge = entry->end - entry->start;
+ VM_OBJECT_UNLOCK(entry->object.vm_object);
+ entry->uip = NULL;
}
new_entry = vm_map_entry_create(map);
@@ -1497,6 +1561,8 @@ _vm_map_clip_start(vm_map_t map, vm_map_entry_t entry, vm_offset_t start)
new_entry->end = start;
entry->offset += (start - entry->start);
entry->start = start;
+ if (new_entry->uip != NULL)
+ uihold(entry->uip);
vm_map_entry_link(map, entry->prev, new_entry);
@@ -1542,6 +1608,21 @@ _vm_map_clip_end(vm_map_t map, vm_map_entry_t entry, vm_offset_t end)
atop(entry->end - entry->start));
entry->object.vm_object = object;
entry->offset = 0;
+ if (entry->uip != NULL) {
+ object->uip = entry->uip;
+ object->charge = entry->end - entry->start;
+ entry->uip = NULL;
+ }
+ } else if (entry->object.vm_object != NULL &&
+ ((entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) &&
+ entry->uip != NULL) {
+ VM_OBJECT_LOCK(entry->object.vm_object);
+ KASSERT(entry->object.vm_object->uip == NULL,
+ ("OVERCOMMIT: vm_entry_clip_end: both uip e %p", entry));
+ entry->object.vm_object->uip = entry->uip;
+ entry->object.vm_object->charge = entry->end - entry->start;
+ VM_OBJECT_UNLOCK(entry->object.vm_object);
+ entry->uip = NULL;
}
/*
@@ -1552,6 +1633,8 @@ _vm_map_clip_end(vm_map_t map, vm_map_entry_t entry, vm_offset_t end)
new_entry->start = entry->end = end;
new_entry->offset += (end - entry->start);
+ if (new_entry->uip != NULL)
+ uihold(entry->uip);
vm_map_entry_link(map, entry, new_entry);
@@ -1724,6 +1807,8 @@ vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end,
{
vm_map_entry_t current;
vm_map_entry_t entry;
+ vm_object_t obj;
+ struct uidinfo *uip;
vm_map_lock(map);
@@ -1751,6 +1836,61 @@ vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end,
current = current->next;
}
+
+ /*
+ * Do an accounting pass for private read-only mappings that
+ * now will do cow due to allowed write (e.g. debugger sets
+ * breakpoint on text segment)
+ */
+ for (current = entry; (current != &map->header) &&
+ (current->start < end); current = current->next) {
+
+ vm_map_clip_end(map, current, end);
+
+ if (set_max ||
+ ((new_prot & ~(current->protection)) & VM_PROT_WRITE) == 0 ||
+ ENTRY_CHARGED(current)) {
+ continue;
+ }
+
+ uip = curthread->td_ucred->cr_ruidinfo;
+ obj = current->object.vm_object;
+
+ if (obj == NULL || (current->eflags & MAP_ENTRY_NEEDS_COPY)) {
+ if (!swap_reserve(current->end - current->start)) {
+ vm_map_unlock(map);
+ return (KERN_RESOURCE_SHORTAGE);
+ }
+ uihold(uip);
+ current->uip = uip;
+ continue;
+ }
+
+ VM_OBJECT_LOCK(obj);
+ if (obj->type != OBJT_DEFAULT && obj->type != OBJT_SWAP) {
+ VM_OBJECT_UNLOCK(obj);
+ continue;
+ }
+
+ /*
+ * Charge for the whole object allocation now, since
+ * we cannot distinguish between non-charged and
+ * charged clipped mapping of the same object later.
+ */
+ KASSERT(obj->charge == 0,
+ ("vm_map_protect: object %p overcharged\n", obj));
+ if (!swap_reserve(ptoa(obj->size))) {
+ VM_OBJECT_UNLOCK(obj);
+ vm_map_unlock(map);
+ return (KERN_RESOURCE_SHORTAGE);
+ }
+
+ uihold(uip);
+ obj->uip = uip;
+ obj->charge = ptoa(obj->size);
+ VM_OBJECT_UNLOCK(obj);
+ }
+
/*
* Go back and fix up protections. [Note that clipping is not
* necessary the second time.]
@@ -1759,8 +1899,6 @@ vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end,
while ((current != &map->header) && (current->start < end)) {
vm_prot_t old_prot;
- vm_map_clip_end(map, current, end);
-
old_prot = current->protection;
if (set_max)
current->protection =
@@ -2470,14 +2608,25 @@ static void
vm_map_entry_delete(vm_map_t map, vm_map_entry_t entry)
{
vm_object_t object;
- vm_pindex_t offidxstart, offidxend, count;
+ vm_pindex_t offidxstart, offidxend, count, size1;
+ vm_ooffset_t size;
vm_map_entry_unlink(map, entry);
- map->size -= entry->end - entry->start;
+ object = entry->object.vm_object;
+ size = entry->end - entry->start;
+ map->size -= size;
+
+ if (entry->uip != NULL) {
+ swap_release_by_uid(size, entry->uip);
+ uifree(entry->uip);
+ }
if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0 &&
- (object = entry->object.vm_object) != NULL) {
- count = OFF_TO_IDX(entry->end - entry->start);
+ (object != NULL)) {
+ KASSERT(entry->uip == NULL || object->uip == NULL ||
+ (entry->eflags & MAP_ENTRY_NEEDS_COPY),
+ ("OVERCOMMIT vm_map_entry_delete: both uip %p", entry));
+ count = OFF_TO_IDX(size);
offidxstart = OFF_TO_IDX(entry->offset);
offidxend = offidxstart + count;
VM_OBJECT_LOCK(object);
@@ -2489,8 +2638,17 @@ vm_map_entry_delete(vm_map_t map, vm_map_entry_t entry)
if (object->type == OBJT_SWAP)
swap_pager_freespace(object, offidxstart, count);
if (offidxend >= object->size &&
- offidxstart < object->size)
+ offidxstart < object->size) {
+ size1 = object->size;
object->size = offidxstart;
+ if (object->uip != NULL) {
+ size1 -= object->size;
+ KASSERT(object->charge >= ptoa(size1),
+ ("vm_map_entry_delete: object->charge < 0"));
+ swap_release_by_uid(ptoa(size1), object->uip);
+ object->charge -= ptoa(size1);
+ }
+ }
}
VM_OBJECT_UNLOCK(object);
} else
@@ -2664,9 +2822,13 @@ vm_map_copy_entry(
vm_map_t src_map,
vm_map_t dst_map,
vm_map_entry_t src_entry,
- vm_map_entry_t dst_entry)
+ vm_map_entry_t dst_entry,
+ vm_ooffset_t *fork_charge)
{
vm_object_t src_object;
+ vm_offset_t size;
+ struct uidinfo *uip;
+ int charged;
VM_MAP_ASSERT_LOCKED(dst_map);
@@ -2689,8 +2851,10 @@ vm_map_copy_entry(
/*
* Make a copy of the object.
*/
+ size = src_entry->end - src_entry->start;
if ((src_object = src_entry->object.vm_object) != NULL) {
VM_OBJECT_LOCK(src_object);
+ charged = ENTRY_CHARGED(src_entry);
if ((src_object->handle == NULL) &&
(src_object->type == OBJT_DEFAULT ||
src_object->type == OBJT_SWAP)) {
@@ -2702,14 +2866,39 @@ vm_map_copy_entry(
}
vm_object_reference_locked(src_object);
vm_object_clear_flag(src_object, OBJ_ONEMAPPING);
+ if (src_entry->uip != NULL &&
+ !(src_entry->eflags & MAP_ENTRY_NEEDS_COPY)) {
+ KASSERT(src_object->uip == NULL,
+ ("OVERCOMMIT: vm_map_copy_entry: uip %p",
+ src_object));
+ src_object->uip = src_entry->uip;
+ src_object->charge = size;
+ }
VM_OBJECT_UNLOCK(src_object);
dst_entry->object.vm_object = src_object;
+ if (charged) {
+ uip = curthread->td_ucred->cr_ruidinfo;
+ uihold(uip);
+ dst_entry->uip = uip;
+ *fork_charge += size;
+ if (!(src_entry->eflags &
+ MAP_ENTRY_NEEDS_COPY)) {
+ uihold(uip);
+ src_entry->uip = uip;
+ *fork_charge += size;
+ }
+ }
src_entry->eflags |= (MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY);
dst_entry->eflags |= (MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY);
dst_entry->offset = src_entry->offset;
} else {
dst_entry->object.vm_object = NULL;
dst_entry->offset = 0;
+ if (src_entry->uip != NULL) {
+ dst_entry->uip = curthread->td_ucred->cr_ruidinfo;
+ uihold(dst_entry->uip);
+ *fork_charge += size;
+ }
}
pmap_copy(dst_map->pmap, src_map->pmap, dst_entry->start,
@@ -2766,7 +2955,7 @@ vmspace_map_entry_forked(const struct vmspace *vm1, struct vmspace *vm2,
* The source map must not be locked.
*/
struct vmspace *
-vmspace_fork(struct vmspace *vm1)
+vmspace_fork(struct vmspace *vm1, vm_ooffset_t *fork_charge)
{
struct vmspace *vm2;
vm_map_t old_map = &vm1->vm_map;
@@ -2777,7 +2966,6 @@ vmspace_fork(struct vmspace *vm1)
int locked;
vm_map_lock(old_map);
-
vm2 = vmspace_alloc(old_map->min_offset, old_map->max_offset);
if (vm2 == NULL)
goto unlock_and_return;
@@ -2809,6 +2997,12 @@ vmspace_fork(struct vmspace *vm1)
atop(old_entry->end - old_entry->start));
old_entry->object.vm_object = object;
old_entry->offset = 0;
+ if (old_entry->uip != NULL) {
+ object->uip = old_entry->uip;
+ object->charge = old_entry->end -
+ old_entry->start;
+ old_entry->uip = NULL;
+ }
}
/*
@@ -2835,6 +3029,12 @@ vmspace_fork(struct vmspace *vm1)
}
VM_OBJECT_LOCK(object);
vm_object_clear_flag(object, OBJ_ONEMAPPING);
+ if (old_entry->uip != NULL) {
+ KASSERT(object->uip == NULL, ("vmspace_fork both uip"));
+ object->uip = old_entry->uip;
+ object->charge = old_entry->end - old_entry->start;
+ old_entry->uip = NULL;
+ }
VM_OBJECT_UNLOCK(object);
/*
@@ -2877,7 +3077,7 @@ vmspace_fork(struct vmspace *vm1)
new_entry);
vmspace_map_entry_forked(vm1, vm2, new_entry);
vm_map_copy_entry(old_map, new_map, old_entry,
- new_entry);
+ new_entry, fork_charge);
break;
}
old_entry = old_entry->next;
@@ -3005,6 +3205,7 @@ vm_map_growstack(struct proc *p, vm_offset_t addr)
size_t grow_amount, max_grow;
rlim_t stacklim, vmemlim;
int is_procstack, rv;
+ struct uidinfo *uip;
Retry:
PROC_LOCK(p);
@@ -3170,13 +3371,17 @@ Retry:
}
grow_amount = addr - stack_entry->end;
-
+ uip = stack_entry->uip;
+ if (uip == NULL && stack_entry->object.vm_object != NULL)
+ uip = stack_entry->object.vm_object->uip;
+ if (uip != NULL && !swap_reserve_by_uid(grow_amount, uip))
+ rv = KERN_NO_SPACE;
/* Grow the underlying object if applicable. */
- if (stack_entry->object.vm_object == NULL ||
- vm_object_coalesce(stack_entry->object.vm_object,
- stack_entry->offset,
- (vm_size_t)(stack_entry->end - stack_entry->start),
- (vm_size_t)grow_amount)) {
+ else if (stack_entry->object.vm_object == NULL ||
+ vm_object_coalesce(stack_entry->object.vm_object,
+ stack_entry->offset,
+ (vm_size_t)(stack_entry->end - stack_entry->start),
+ (vm_size_t)grow_amount, uip != NULL)) {
map->size += (addr - stack_entry->end);
/* Update the current entry. */
stack_entry->end = addr;
@@ -3249,12 +3454,18 @@ vmspace_unshare(struct proc *p)
{
struct vmspace *oldvmspace = p->p_vmspace;
struct vmspace *newvmspace;
+ vm_ooffset_t fork_charge;
if (oldvmspace->vm_refcnt == 1)
return (0);
- newvmspace = vmspace_fork(oldvmspace);
+ fork_charge = 0;
+ newvmspace = vmspace_fork(oldvmspace, &fork_charge);
if (newvmspace == NULL)
return (ENOMEM);
+ if (!swap_reserve_by_uid(fork_charge, p->p_ucred->cr_ruidinfo)) {
+ vmspace_free(newvmspace);
+ return (ENOMEM);
+ }
PROC_VMSPACE_LOCK(p);
p->p_vmspace = newvmspace;
PROC_VMSPACE_UNLOCK(p);
@@ -3300,6 +3511,9 @@ vm_map_lookup(vm_map_t *var_map, /* IN/OUT */
vm_map_t map = *var_map;
vm_prot_t prot;
vm_prot_t fault_type = fault_typea;
+ vm_object_t eobject;
+ struct uidinfo *uip;
+ vm_ooffset_t size;
RetryLookup:;
@@ -3356,7 +3570,7 @@ RetryLookup:;
*wired = (entry->wired_count != 0);
if (*wired)
prot = fault_type = entry->protection;
-
+ size = entry->end - entry->start;
/*
* If the entry was copy-on-write, we either ...
*/
@@ -3378,11 +3592,40 @@ RetryLookup:;
if (vm_map_lock_upgrade(map))
goto RetryLookup;
+ if (entry->uip == NULL) {
+ /*
+ * The debugger owner is charged for
+ * the memory.
+ */
+ uip = curthread->td_ucred->cr_ruidinfo;
+ uihold(uip);
+ if (!swap_reserve_by_uid(size, uip)) {
+ uifree(uip);
+ vm_map_unlock(map);
+ return (KERN_RESOURCE_SHORTAGE);
+ }
+ entry->uip = uip;
+ }
vm_object_shadow(
&entry->object.vm_object,
&entry->offset,
- atop(entry->end - entry->start));
+ atop(size));
entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
+ eobject = entry->object.vm_object;
+ if (eobject->uip != NULL) {
+ /*
+ * The object was not shadowed.
+ */
+ swap_release_by_uid(size, entry->uip);
+ uifree(entry->uip);
+ entry->uip = NULL;
+ } else if (entry->uip != NULL) {
+ VM_OBJECT_LOCK(eobject);
+ eobject->uip = entry->uip;
+ eobject->charge = size;
+ VM_OBJECT_UNLOCK(eobject);
+ entry->uip = NULL;
+ }
vm_map_lock_downgrade(map);
} else {
@@ -3402,8 +3645,15 @@ RetryLookup:;
if (vm_map_lock_upgrade(map))
goto RetryLookup;
entry->object.vm_object = vm_object_allocate(OBJT_DEFAULT,
- atop(entry->end - entry->start));
+ atop(size));
entry->offset = 0;
+ if (entry->uip != NULL) {
+ VM_OBJECT_LOCK(entry->object.vm_object);
+ entry->object.vm_object->uip = entry->uip;
+ entry->object.vm_object->charge = size;
+ VM_OBJECT_UNLOCK(entry->object.vm_object);
+ entry->uip = NULL;
+ }
vm_map_lock_downgrade(map);
}
@@ -3583,9 +3833,15 @@ DB_SHOW_COMMAND(map, vm_map_print)
db_indent -= 2;
}
} else {
+ if (entry->uip != NULL)
+ db_printf(", uip %d", entry->uip->ui_uid);
db_printf(", object=%p, offset=0x%jx",
(void *)entry->object.vm_object,
(uintmax_t)entry->offset);
+ if (entry->object.vm_object && entry->object.vm_object->uip)
+ db_printf(", obj uip %d charge %jx",
+ entry->object.vm_object->uip->ui_uid,
+ (uintmax_t)entry->object.vm_object->charge);
if (entry->eflags & MAP_ENTRY_COW)
db_printf(", copy (%s)",
(entry->eflags & MAP_ENTRY_NEEDS_COPY) ? "needed" : "done");
diff --git a/sys/vm/vm_map.h b/sys/vm/vm_map.h
index 70c3a0bb2463..0c586ab2ae58 100644
--- a/sys/vm/vm_map.h
+++ b/sys/vm/vm_map.h
@@ -114,6 +114,7 @@ struct vm_map_entry {
vm_inherit_t inheritance; /* inheritance */
int wired_count; /* can be paged if = 0 */
vm_pindex_t lastr; /* last read */
+ struct uidinfo *uip; /* tmp storage for creator ref */
};
#define MAP_ENTRY_NOSYNC 0x0001
@@ -310,6 +311,8 @@ long vmspace_wired_count(struct vmspace *vmspace);
#define MAP_PREFAULT_MADVISE 0x0200 /* from (user) madvise request */
#define MAP_STACK_GROWS_DOWN 0x1000
#define MAP_STACK_GROWS_UP 0x2000
+#define MAP_ACC_CHARGED 0x4000
+#define MAP_ACC_NO_CHARGE 0x8000
/*
* vm_fault option flags
diff --git a/sys/vm/vm_mmap.c b/sys/vm/vm_mmap.c
index fb9bb5f12a9d..cc7a0f42c049 100644
--- a/sys/vm/vm_mmap.c
+++ b/sys/vm/vm_mmap.c
@@ -633,6 +633,8 @@ mprotect(td, uap)
return (0);
case KERN_PROTECTION_FAILURE:
return (EACCES);
+ case KERN_RESOURCE_SHORTAGE:
+ return (ENOMEM);
}
return (EINVAL);
}
@@ -1208,7 +1210,7 @@ vm_mmap_vnode(struct thread *td, vm_size_t objsize,
objsize = round_page(va.va_size);
if (va.va_nlink == 0)
flags |= MAP_NOSYNC;
- obj = vm_pager_allocate(OBJT_VNODE, vp, objsize, prot, foff);
+ obj = vm_pager_allocate(OBJT_VNODE, vp, objsize, prot, foff, td->td_ucred);
if (obj == NULL) {
error = ENOMEM;
goto done;
@@ -1289,7 +1291,8 @@ vm_mmap_cdev(struct thread *td, vm_size_t objsize,
dev_relthread(cdev);
if (error != ENODEV)
return (error);
- obj = vm_pager_allocate(OBJT_DEVICE, cdev, objsize, prot, *foff);
+ obj = vm_pager_allocate(OBJT_DEVICE, cdev, objsize, prot, *foff,
+ td->td_ucred);
if (obj == NULL)
return (EINVAL);
*objp = obj;
diff --git a/sys/vm/vm_object.c b/sys/vm/vm_object.c
index c73882c776b1..9e65cb48bbfd 100644
--- a/sys/vm/vm_object.c
+++ b/sys/vm/vm_object.c
@@ -77,6 +77,7 @@ __FBSDID("$FreeBSD$");
#include <sys/mutex.h>
#include <sys/proc.h> /* for curproc, pageproc */
#include <sys/socket.h>
+#include <sys/resourcevar.h>
#include <sys/vnode.h>
#include <sys/vmmeter.h>
#include <sys/sx.h>
@@ -222,6 +223,8 @@ _vm_object_allocate(objtype_t type, vm_pindex_t size, vm_object_t object)
object->generation = 1;
object->ref_count = 1;
object->flags = 0;
+ object->uip = NULL;
+ object->charge = 0;
if ((object->type == OBJT_DEFAULT) || (object->type == OBJT_SWAP))
object->flags = OBJ_ONEMAPPING;
object->pg_color = 0;
@@ -609,6 +612,20 @@ vm_object_destroy(vm_object_t object)
mtx_unlock(&vm_object_list_mtx);
/*
+ * Release the allocation charge.
+ */
+ if (object->uip != NULL) {
+ KASSERT(object->type == OBJT_DEFAULT ||
+ object->type == OBJT_SWAP,
+ ("vm_object_terminate: non-swap obj %p has uip",
+ object));
+ swap_release_by_uid(object->charge, object->uip);
+ object->charge = 0;
+ uifree(object->uip);
+ object->uip = NULL;
+ }
+
+ /*
* Free the space for the object.
*/
uma_zfree(obj_zone, object);
@@ -1347,6 +1364,14 @@ vm_object_split(vm_map_entry_t entry)
orig_object->backing_object_offset + entry->offset;
new_object->backing_object = source;
}
+ if (orig_object->uip != NULL) {
+ new_object->uip = orig_object->uip;
+ uihold(orig_object->uip);
+ new_object->charge = ptoa(size);
+ KASSERT(orig_object->charge >= ptoa(size),
+ ("orig_object->charge < 0"));
+ orig_object->charge -= ptoa(size);
+ }
retry:
if ((m = TAILQ_FIRST(&orig_object->memq)) != NULL) {
if (m->pindex < offidxstart) {
@@ -1757,6 +1782,13 @@ vm_object_collapse(vm_object_t object)
* and no object references within it, all that is
* necessary is to dispose of it.
*/
+ if (backing_object->uip != NULL) {
+ swap_release_by_uid(backing_object->charge,
+ backing_object->uip);
+ backing_object->charge = 0;
+ uifree(backing_object->uip);
+ backing_object->uip = NULL;
+ }
KASSERT(backing_object->ref_count == 1, ("backing_object %p was somehow re-referenced during collapse!", backing_object));
VM_OBJECT_UNLOCK(backing_object);
@@ -1994,13 +2026,15 @@ vm_object_populate(vm_object_t object, vm_pindex_t start, vm_pindex_t end)
* prev_offset Offset into prev_object
* prev_size Size of reference to prev_object
* next_size Size of reference to the second object
+ * reserved Indicator that extension region has
+ * swap accounted for
*
* Conditions:
* The object must *not* be locked.
*/
boolean_t
vm_object_coalesce(vm_object_t prev_object, vm_ooffset_t prev_offset,
- vm_size_t prev_size, vm_size_t next_size)
+ vm_size_t prev_size, vm_size_t next_size, boolean_t reserved)
{
vm_pindex_t next_pindex;
@@ -2039,6 +2073,28 @@ vm_object_coalesce(vm_object_t prev_object, vm_ooffset_t prev_offset,
}
/*
+ * Account for the charge.
+ */
+ if (prev_object->uip != NULL) {
+
+ /*
+ * If prev_object was charged, then this mapping,
+ * althought not charged now, may become writable
+ * later. Non-NULL uip in the object would prevent
+ * swap reservation during enabling of the write
+ * access, so reserve swap now. Failed reservation
+ * cause allocation of the separate object for the map
+ * entry, and swap reservation for this entry is
+ * managed in appropriate time.
+ */
+ if (!reserved && !swap_reserve_by_uid(ptoa(next_size),
+ prev_object->uip)) {
+ return (FALSE);
+ }
+ prev_object->charge += ptoa(next_size);
+ }
+
+ /*
* Remove any pages that may still be in the object from a previous
* deallocation.
*/
@@ -2049,6 +2105,16 @@ vm_object_coalesce(vm_object_t prev_object, vm_ooffset_t prev_offset,
if (prev_object->type == OBJT_SWAP)
swap_pager_freespace(prev_object,
next_pindex, next_size);
+#if 0
+ if (prev_object->uip != NULL) {
+ KASSERT(prev_object->charge >=
+ ptoa(prev_object->size - next_pindex),
+ ("object %p overcharged 1 %jx %jx", prev_object,
+ (uintmax_t)next_pindex, (uintmax_t)next_size));
+ prev_object->charge -= ptoa(prev_object->size -
+ next_pindex);
+ }
+#endif
}
/*
@@ -2198,9 +2264,10 @@ DB_SHOW_COMMAND(object, vm_object_print_static)
return;
db_iprintf(
- "Object %p: type=%d, size=0x%jx, res=%d, ref=%d, flags=0x%x\n",
+ "Object %p: type=%d, size=0x%jx, res=%d, ref=%d, flags=0x%x uip %d charge %jx\n",
object, (int)object->type, (uintmax_t)object->size,
- object->resident_page_count, object->ref_count, object->flags);
+ object->resident_page_count, object->ref_count, object->flags,
+ object->uip ? object->uip->ui_uid : -1, (uintmax_t)object->charge);
db_iprintf(" sref=%d, backing_object(%d)=(%p)+0x%jx\n",
object->shadow_count,
object->backing_object ? object->backing_object->ref_count : 0,
diff --git a/sys/vm/vm_object.h b/sys/vm/vm_object.h
index 99a2a5815be6..bf88cfd6c405 100644
--- a/sys/vm/vm_object.h
+++ b/sys/vm/vm_object.h
@@ -133,6 +133,8 @@ struct vm_object {
int swp_bcount;
} swp;
} un_pager;
+ struct uidinfo *uip;
+ vm_ooffset_t charge;
};
/*
@@ -198,7 +200,8 @@ void vm_object_pip_wait(vm_object_t object, char *waitid);
vm_object_t vm_object_allocate (objtype_t, vm_pindex_t);
void _vm_object_allocate (objtype_t, vm_pindex_t, vm_object_t);
-boolean_t vm_object_coalesce(vm_object_t, vm_ooffset_t, vm_size_t, vm_size_t);
+boolean_t vm_object_coalesce(vm_object_t, vm_ooffset_t, vm_size_t, vm_size_t,
+ boolean_t);
void vm_object_collapse (vm_object_t);
void vm_object_deallocate (vm_object_t);
void vm_object_destroy (vm_object_t);
diff --git a/sys/vm/vm_pager.c b/sys/vm/vm_pager.c
index 6bb5b3bf99c0..3758d2bda39b 100644
--- a/sys/vm/vm_pager.c
+++ b/sys/vm/vm_pager.c
@@ -88,7 +88,7 @@ int cluster_pbuf_freecnt = -1; /* unlimited to begin with */
static int dead_pager_getpages(vm_object_t, vm_page_t *, int, int);
static vm_object_t dead_pager_alloc(void *, vm_ooffset_t, vm_prot_t,
- vm_ooffset_t);
+ vm_ooffset_t, struct ucred *);
static void dead_pager_putpages(vm_object_t, vm_page_t *, int, int, int *);
static boolean_t dead_pager_haspage(vm_object_t, vm_pindex_t, int *, int *);
static void dead_pager_dealloc(vm_object_t);
@@ -105,7 +105,7 @@ dead_pager_getpages(obj, ma, count, req)
static vm_object_t
dead_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
- vm_ooffset_t off)
+ vm_ooffset_t off, struct ucred *cred)
{
return NULL;
}
@@ -227,14 +227,14 @@ vm_pager_bufferinit()
*/
vm_object_t
vm_pager_allocate(objtype_t type, void *handle, vm_ooffset_t size,
- vm_prot_t prot, vm_ooffset_t off)
+ vm_prot_t prot, vm_ooffset_t off, struct ucred *cred)
{
vm_object_t ret;
struct pagerops *ops;
ops = pagertab[type];
if (ops)
- ret = (*ops->pgo_alloc) (handle, size, prot, off);
+ ret = (*ops->pgo_alloc) (handle, size, prot, off, cred);
else
ret = NULL;
return (ret);
diff --git a/sys/vm/vm_pager.h b/sys/vm/vm_pager.h
index 7acc04e99e10..7c8aebac36a1 100644
--- a/sys/vm/vm_pager.h
+++ b/sys/vm/vm_pager.h
@@ -47,7 +47,8 @@
TAILQ_HEAD(pagerlst, vm_object);
typedef void pgo_init_t(void);
-typedef vm_object_t pgo_alloc_t(void *, vm_ooffset_t, vm_prot_t, vm_ooffset_t);
+typedef vm_object_t pgo_alloc_t(void *, vm_ooffset_t, vm_prot_t, vm_ooffset_t,
+ struct ucred *);
typedef void pgo_dealloc_t(vm_object_t);
typedef int pgo_getpages_t(vm_object_t, vm_page_t *, int, int);
typedef void pgo_putpages_t(vm_object_t, vm_page_t *, int, int, int *);
@@ -100,7 +101,8 @@ extern vm_map_t pager_map;
extern struct pagerops *pagertab[];
extern struct mtx pbuf_mtx;
-vm_object_t vm_pager_allocate(objtype_t, void *, vm_ooffset_t, vm_prot_t, vm_ooffset_t);
+vm_object_t vm_pager_allocate(objtype_t, void *, vm_ooffset_t, vm_prot_t,
+ vm_ooffset_t, struct ucred *);
void vm_pager_bufferinit(void);
void vm_pager_deallocate(vm_object_t);
static __inline int vm_pager_get_pages(vm_object_t, vm_page_t *, int, int);
diff --git a/sys/vm/vnode_pager.c b/sys/vm/vnode_pager.c
index 47177e32c404..09223b70bf07 100644
--- a/sys/vm/vnode_pager.c
+++ b/sys/vm/vnode_pager.c
@@ -83,7 +83,8 @@ static void vnode_pager_dealloc(vm_object_t);
static int vnode_pager_getpages(vm_object_t, vm_page_t *, int, int);
static void vnode_pager_putpages(vm_object_t, vm_page_t *, int, boolean_t, int *);
static boolean_t vnode_pager_haspage(vm_object_t, vm_pindex_t, int *, int *);
-static vm_object_t vnode_pager_alloc(void *, vm_ooffset_t, vm_prot_t, vm_ooffset_t);
+static vm_object_t vnode_pager_alloc(void *, vm_ooffset_t, vm_prot_t,
+ vm_ooffset_t, struct ucred *cred);
struct pagerops vnodepagerops = {
.pgo_alloc = vnode_pager_alloc,
@@ -128,7 +129,7 @@ vnode_create_vobject(struct vnode *vp, off_t isize, struct thread *td)
}
}
- object = vnode_pager_alloc(vp, size, 0, 0);
+ object = vnode_pager_alloc(vp, size, 0, 0, td->td_ucred);
/*
* Dereference the reference we just created. This assumes
* that the object is associated with the vp.
@@ -185,7 +186,7 @@ vnode_destroy_vobject(struct vnode *vp)
*/
vm_object_t
vnode_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
- vm_ooffset_t offset)
+ vm_ooffset_t offset, struct ucred *cred)
{
vm_object_t object;
struct vnode *vp;