aboutsummaryrefslogtreecommitdiff
path: root/sys/fs/nullfs/null_subr.c
diff options
context:
space:
mode:
Diffstat (limited to 'sys/fs/nullfs/null_subr.c')
-rw-r--r--sys/fs/nullfs/null_subr.c94
1 files changed, 63 insertions, 31 deletions
diff --git a/sys/fs/nullfs/null_subr.c b/sys/fs/nullfs/null_subr.c
index 053614b6910d..bb0ff9966dfd 100644
--- a/sys/fs/nullfs/null_subr.c
+++ b/sys/fs/nullfs/null_subr.c
@@ -36,14 +36,19 @@
#include <sys/systm.h>
#include <sys/kernel.h>
#include <sys/lock.h>
-#include <sys/rwlock.h>
#include <sys/malloc.h>
#include <sys/mount.h>
#include <sys/proc.h>
+#include <sys/rwlock.h>
+#include <sys/smr.h>
#include <sys/vnode.h>
#include <fs/nullfs/null.h>
+#include <vm/uma.h>
+
+VFS_SMR_DECLARE;
+
/*
* Null layer cache:
* Each cache entry holds a reference to the lower vnode
@@ -54,12 +59,12 @@
#define NULL_NHASH(vp) (&null_node_hashtbl[vfs_hash_index(vp) & null_hash_mask])
-static LIST_HEAD(null_node_hashhead, null_node) *null_node_hashtbl;
+static CK_SLIST_HEAD(null_node_hashhead, null_node) *null_node_hashtbl;
static struct rwlock null_hash_lock;
static u_long null_hash_mask;
static MALLOC_DEFINE(M_NULLFSHASH, "nullfs_hash", "NULLFS hash table");
-MALLOC_DEFINE(M_NULLFSNODE, "nullfs_node", "NULLFS vnode private part");
+uma_zone_t __read_mostly null_node_zone;
static void null_hashins(struct mount *, struct null_node *);
@@ -73,6 +78,10 @@ nullfs_init(struct vfsconf *vfsp)
null_node_hashtbl = hashinit(desiredvnodes, M_NULLFSHASH,
&null_hash_mask);
rw_init(&null_hash_lock, "nullhs");
+ null_node_zone = uma_zcreate("nullfs node", sizeof(struct null_node),
+ NULL, NULL, NULL, NULL, 0, UMA_ZONE_ZINIT);
+ VFS_SMR_ZONE_SET(null_node_zone);
+
return (0);
}
@@ -80,6 +89,7 @@ int
nullfs_uninit(struct vfsconf *vfsp)
{
+ uma_zdestroy(null_node_zone);
rw_destroy(&null_hash_lock);
hashdestroy(null_node_hashtbl, M_NULLFSHASH, null_hash_mask);
return (0);
@@ -96,7 +106,7 @@ null_hashget_locked(struct mount *mp, struct vnode *lowervp)
struct null_node *a;
struct vnode *vp;
- ASSERT_VOP_LOCKED(lowervp, "null_hashget");
+ ASSERT_VOP_LOCKED(lowervp, __func__);
rw_assert(&null_hash_lock, RA_LOCKED);
/*
@@ -106,18 +116,21 @@ null_hashget_locked(struct mount *mp, struct vnode *lowervp)
* reference count (but NOT the lower vnode's VREF counter).
*/
hd = NULL_NHASH(lowervp);
- LIST_FOREACH(a, hd, null_hash) {
- if (a->null_lowervp == lowervp && NULLTOV(a)->v_mount == mp) {
- /*
- * Since we have the lower node locked the nullfs
- * node can not be in the process of recycling. If
- * it had been recycled before we grabed the lower
- * lock it would not have been found on the hash.
- */
- vp = NULLTOV(a);
- vref(vp);
- return (vp);
- }
+ CK_SLIST_FOREACH(a, hd, null_hash) {
+ if (a->null_lowervp != lowervp)
+ continue;
+ /*
+ * Since we have the lower node locked the nullfs
+ * node can not be in the process of recycling. If
+ * it had been recycled before we grabed the lower
+ * lock it would not have been found on the hash.
+ */
+ vp = NULLTOV(a);
+ VNPASS(!VN_IS_DOOMED(vp), vp);
+ if (vp->v_mount != mp)
+ continue;
+ vref(vp);
+ return (vp);
}
return (NULL);
}
@@ -126,17 +139,34 @@ struct vnode *
null_hashget(struct mount *mp, struct vnode *lowervp)
{
struct null_node_hashhead *hd;
+ struct null_node *a;
struct vnode *vp;
+ enum vgetstate vs;
- hd = NULL_NHASH(lowervp);
- if (LIST_EMPTY(hd))
- return (NULL);
-
- rw_rlock(&null_hash_lock);
- vp = null_hashget_locked(mp, lowervp);
- rw_runlock(&null_hash_lock);
+ ASSERT_VOP_LOCKED(lowervp, __func__);
+ rw_assert(&null_hash_lock, RA_UNLOCKED);
- return (vp);
+ vfs_smr_enter();
+ hd = NULL_NHASH(lowervp);
+ CK_SLIST_FOREACH(a, hd, null_hash) {
+ if (a->null_lowervp != lowervp)
+ continue;
+ /*
+ * See null_hashget_locked as to why the nullfs vnode can't be
+ * doomed here.
+ */
+ vp = NULLTOV(a);
+ VNPASS(!VN_IS_DOOMED(vp), vp);
+ if (vp->v_mount != mp)
+ continue;
+ vs = vget_prep_smr(vp);
+ vfs_smr_exit();
+ VNPASS(vs != VGET_NONE, vp);
+ vget_finish_ref(vp, vs);
+ return (vp);
+ }
+ vfs_smr_exit();
+ return (NULL);
}
static void
@@ -151,7 +181,7 @@ null_hashins(struct mount *mp, struct null_node *xp)
hd = NULL_NHASH(xp->null_lowervp);
#ifdef INVARIANTS
- LIST_FOREACH(oxp, hd, null_hash) {
+ CK_LIST_FOREACH(oxp, hd, null_hash) {
if (oxp->null_lowervp == xp->null_lowervp &&
NULLTOV(oxp)->v_mount == mp) {
VNASSERT(0, NULLTOV(oxp),
@@ -159,7 +189,7 @@ null_hashins(struct mount *mp, struct null_node *xp)
}
}
#endif
- LIST_INSERT_HEAD(hd, xp, null_hash);
+ CK_SLIST_INSERT_HEAD(hd, xp, null_hash);
}
static void
@@ -174,7 +204,7 @@ null_destroy_proto(struct vnode *vp, void *xp)
VI_UNLOCK(vp);
vgone(vp);
vput(vp);
- free(xp, M_NULLFSNODE);
+ uma_zfree_smr(null_node_zone, xp);
}
/*
@@ -208,12 +238,12 @@ null_nodeget(struct mount *mp, struct vnode *lowervp, struct vnode **vpp)
* Note that duplicate can only appear in hash if the lowervp is
* locked LK_SHARED.
*/
- xp = malloc(sizeof(struct null_node), M_NULLFSNODE, M_WAITOK);
+ xp = uma_zalloc_smr(null_node_zone, M_WAITOK);
error = getnewvnode("nullfs", mp, &null_vnodeops, &vp);
if (error) {
vput(lowervp);
- free(xp, M_NULLFSNODE);
+ uma_zfree_smr(null_node_zone, xp);
return (error);
}
@@ -261,8 +291,8 @@ null_nodeget(struct mount *mp, struct vnode *lowervp, struct vnode **vpp)
return (error);
}
- null_hashins(mp, xp);
vn_set_state(vp, VSTATE_CONSTRUCTED);
+ null_hashins(mp, xp);
rw_wunlock(&null_hash_lock);
*vpp = vp;
@@ -275,9 +305,11 @@ null_nodeget(struct mount *mp, struct vnode *lowervp, struct vnode **vpp)
void
null_hashrem(struct null_node *xp)
{
+ struct null_node_hashhead *hd;
+ hd = NULL_NHASH(xp->null_lowervp);
rw_wlock(&null_hash_lock);
- LIST_REMOVE(xp, null_hash);
+ CK_SLIST_REMOVE(hd, xp, null_node, null_hash);
rw_wunlock(&null_hash_lock);
}