diff options
| author | Mateusz Guzik <mjg@FreeBSD.org> | 2020-01-16 21:45:21 +0000 |
|---|---|---|
| committer | Mateusz Guzik <mjg@FreeBSD.org> | 2020-01-16 21:45:21 +0000 |
| commit | 66f67d5e5e0046a8e90af295ae9b36f3e3ba4b64 (patch) | |
| tree | ca140546d770664e263f017c453bf5747a6c0442 | |
| parent | b7f50b9ad1d567dfffbc99072d073c2a5650e8b1 (diff) | |
Notes
| -rw-r--r-- | sys/kern/vfs_subr.c | 29 |
1 files changed, 23 insertions, 6 deletions
diff --git a/sys/kern/vfs_subr.c b/sys/kern/vfs_subr.c index de199c7acd85..fe916a2661dc 100644 --- a/sys/kern/vfs_subr.c +++ b/sys/kern/vfs_subr.c @@ -1497,21 +1497,22 @@ vtryrecycle(struct vnode *vp) * The routine can try to free a vnode or stall for up to 1 second waiting for * vnlru to clear things up, but ultimately always performs a M_WAITOK allocation. */ -static struct vnode * -vn_alloc(struct mount *mp) +static u_long vn_alloc_cyclecount; + +static struct vnode * __noinline +vn_alloc_hard(struct mount *mp) { u_long rnumvnodes, rfreevnodes; - static u_long cyclecount; mtx_lock(&vnode_list_mtx); rnumvnodes = atomic_load_long(&numvnodes); if (rnumvnodes + 1 < desiredvnodes) { - cyclecount = 0; + vn_alloc_cyclecount = 0; goto alloc; } rfreevnodes = atomic_load_long(&freevnodes); - if (cyclecount++ >= rfreevnodes) { - cyclecount = 0; + if (vn_alloc_cyclecount++ >= rfreevnodes) { + vn_alloc_cyclecount = 0; vstir = 1; } /* @@ -1546,6 +1547,22 @@ alloc: return (uma_zalloc(vnode_zone, M_WAITOK)); } +static struct vnode * +vn_alloc(struct mount *mp) +{ + u_long rnumvnodes; + + if (__predict_false(vn_alloc_cyclecount != 0)) + return (vn_alloc_hard(mp)); + rnumvnodes = atomic_fetchadd_long(&numvnodes, 1) + 1; + if (__predict_false(vnlru_under(rnumvnodes, vlowat))) { + atomic_subtract_long(&numvnodes, 1); + return (vn_alloc_hard(mp)); + } + + return (uma_zalloc(vnode_zone, M_WAITOK)); +} + static void vn_free(struct vnode *vp) { |
