aboutsummaryrefslogtreecommitdiff
path: root/sys/vm
diff options
context:
space:
mode:
Diffstat (limited to 'sys/vm')
-rw-r--r--sys/vm/uma_core.c22
-rw-r--r--sys/vm/vm_glue.c24
-rw-r--r--sys/vm/vm_meter.c2
-rw-r--r--sys/vm/vm_object.c12
-rw-r--r--sys/vm/vm_pageout.c4
5 files changed, 22 insertions, 42 deletions
diff --git a/sys/vm/uma_core.c b/sys/vm/uma_core.c
index 679b2e20e88b..b80b5cc781f7 100644
--- a/sys/vm/uma_core.c
+++ b/sys/vm/uma_core.c
@@ -4009,21 +4009,15 @@ restart:
/*
* Use the keg's policy if upper layers haven't already specified a
* domain (as happens with first-touch zones).
- *
- * To avoid races we run the iterator with the keg lock held, but that
- * means that we cannot allow the vm_domainset layer to sleep. Thus,
- * clear M_WAITOK and handle low memory conditions locally.
*/
rr = rdomain == UMA_ANYDOMAIN;
+ aflags = flags;
if (rr) {
- aflags = (flags & ~M_WAITOK) | M_NOWAIT;
if (vm_domainset_iter_policy_ref_init(&di, &keg->uk_dr, &domain,
&aflags) != 0)
return (NULL);
- } else {
- aflags = flags;
+ } else
domain = rdomain;
- }
for (;;) {
slab = keg_fetch_free_slab(keg, domain, rr, flags);
@@ -4053,13 +4047,8 @@ restart:
if ((flags & M_WAITOK) == 0)
break;
vm_wait_domain(domain);
- } else if (vm_domainset_iter_policy(&di, &domain) != 0) {
- if ((flags & M_WAITOK) != 0) {
- vm_wait_doms(&keg->uk_dr.dr_policy->ds_mask, 0);
- goto restart;
- }
+ } else if (vm_domainset_iter_policy(&di, &domain) != 0)
break;
- }
}
/*
@@ -5245,7 +5234,7 @@ uma_prealloc(uma_zone_t zone, int items)
KEG_GET(zone, keg);
slabs = howmany(items, keg->uk_ipers);
while (slabs-- > 0) {
- aflags = M_NOWAIT;
+ aflags = M_WAITOK;
if (vm_domainset_iter_policy_ref_init(&di, &keg->uk_dr, &domain,
&aflags) != 0)
panic("%s: Domainset is empty", __func__);
@@ -5266,7 +5255,8 @@ uma_prealloc(uma_zone_t zone, int items)
break;
}
if (vm_domainset_iter_policy(&di, &domain) != 0)
- vm_wait_doms(&keg->uk_dr.dr_policy->ds_mask, 0);
+ panic("%s: Cannot allocate from any domain",
+ __func__);
}
}
}
diff --git a/sys/vm/vm_glue.c b/sys/vm/vm_glue.c
index e0f1807a1b32..18d789c59281 100644
--- a/sys/vm/vm_glue.c
+++ b/sys/vm/vm_glue.c
@@ -441,19 +441,16 @@ vm_thread_kstack_arena_release(void *arena, vmem_addr_t addr, vmem_size_t size)
* Create the kernel stack for a new thread.
*/
static vm_offset_t
-vm_thread_stack_create(struct domainset *ds, int pages)
+vm_thread_stack_create(struct domainset *ds, int pages, int flags)
{
vm_page_t ma[KSTACK_MAX_PAGES];
struct vm_domainset_iter di;
- int req = VM_ALLOC_NORMAL;
- vm_object_t obj;
+ int req;
vm_offset_t ks;
int domain, i;
- obj = vm_thread_kstack_size_to_obj(pages);
- if (vm_ndomains > 1)
- obj->domain.dr_policy = ds;
- vm_domainset_iter_page_init(&di, obj, 0, &domain, &req);
+ vm_domainset_iter_policy_init(&di, ds, &domain, &flags);
+ req = malloc2vm_flags(flags);
do {
/*
* Get a kernel virtual address for this thread's kstack.
@@ -480,7 +477,7 @@ vm_thread_stack_create(struct domainset *ds, int pages)
vm_page_valid(ma[i]);
pmap_qenter(ks, ma, pages);
return (ks);
- } while (vm_domainset_iter_page(&di, obj, &domain, NULL) == 0);
+ } while (vm_domainset_iter_policy(&di, &domain) == 0);
return (0);
}
@@ -532,15 +529,9 @@ vm_thread_new(struct thread *td, int pages)
ks = 0;
if (pages == kstack_pages && kstack_cache != NULL)
ks = (vm_offset_t)uma_zalloc(kstack_cache, M_NOWAIT);
-
- /*
- * Ensure that kstack objects can draw pages from any memory
- * domain. Otherwise a local memory shortage can block a process
- * swap-in.
- */
if (ks == 0)
ks = vm_thread_stack_create(DOMAINSET_PREF(PCPU_GET(domain)),
- pages);
+ pages, M_NOWAIT);
if (ks == 0)
return (0);
@@ -660,7 +651,8 @@ kstack_import(void *arg, void **store, int cnt, int domain, int flags)
ds = DOMAINSET_PREF(domain);
for (i = 0; i < cnt; i++) {
- store[i] = (void *)vm_thread_stack_create(ds, kstack_pages);
+ store[i] = (void *)vm_thread_stack_create(ds, kstack_pages,
+ flags);
if (store[i] == NULL)
break;
}
diff --git a/sys/vm/vm_meter.c b/sys/vm/vm_meter.c
index fef28bb883e4..fee50f49c844 100644
--- a/sys/vm/vm_meter.c
+++ b/sys/vm/vm_meter.c
@@ -96,7 +96,7 @@ struct vmmeter __read_mostly vm_cnt = {
u_long __exclusive_cache_line vm_user_wire_count;
static void
-vmcounter_startup(void)
+vmcounter_startup(void *dummy __unused)
{
counter_u64_t *cnt = (counter_u64_t *)&vm_cnt;
diff --git a/sys/vm/vm_object.c b/sys/vm/vm_object.c
index 6d9ea8bf9d93..5b4517d2bf0c 100644
--- a/sys/vm/vm_object.c
+++ b/sys/vm/vm_object.c
@@ -2522,15 +2522,13 @@ vm_object_list_handler(struct sysctl_req *req, bool swap_only)
continue;
}
mtx_unlock(&vm_object_list_mtx);
+
+ memset(kvo, 0, sizeof(*kvo));
kvo->kvo_size = ptoa(obj->size);
kvo->kvo_resident = obj->resident_page_count;
kvo->kvo_ref_count = obj->ref_count;
kvo->kvo_shadow_count = atomic_load_int(&obj->shadow_count);
kvo->kvo_memattr = obj->memattr;
- kvo->kvo_active = 0;
- kvo->kvo_inactive = 0;
- kvo->kvo_laundry = 0;
- kvo->kvo_flags = 0;
if (!swap_only) {
vm_page_iter_init(&pages, obj);
VM_RADIX_FOREACH(m, &pages) {
@@ -2549,12 +2547,12 @@ vm_object_list_handler(struct sysctl_req *req, bool swap_only)
kvo->kvo_inactive++;
else if (vm_page_in_laundry(m))
kvo->kvo_laundry++;
+
+ if (vm_page_wired(m))
+ kvo->kvo_wired++;
}
}
- kvo->kvo_vn_fileid = 0;
- kvo->kvo_vn_fsid = 0;
- kvo->kvo_vn_fsid_freebsd11 = 0;
freepath = NULL;
fullpath = "";
vp = NULL;
diff --git a/sys/vm/vm_pageout.c b/sys/vm/vm_pageout.c
index 3f1be78342c9..418a9cff8abf 100644
--- a/sys/vm/vm_pageout.c
+++ b/sys/vm/vm_pageout.c
@@ -120,7 +120,7 @@
/* the kernel process "vm_pageout"*/
static void vm_pageout(void);
-static void vm_pageout_init(void);
+static void vm_pageout_init(void *);
static int vm_pageout_clean(vm_page_t m, int *numpagedout);
static int vm_pageout_cluster(vm_page_t m);
static void vm_pageout_mightbe_oom(struct vm_domain *vmd, int page_shortage,
@@ -2333,7 +2333,7 @@ vm_pageout_init_domain(int domain)
}
static void
-vm_pageout_init(void)
+vm_pageout_init(void *dummy __unused)
{
u_long freecount;
int i;