summaryrefslogtreecommitdiff
path: root/sys/kern
diff options
context:
space:
mode:
authorKonstantin Belousov <kib@FreeBSD.org>2008-08-01 09:46:19 +0000
committerKonstantin Belousov <kib@FreeBSD.org>2008-08-01 09:46:19 +0000
commitcb917b839b94adf81f241d51bcd2a1ce82161618 (patch)
tree20f5445f82a8aac528ac6803d9bf5f1b4b31ed62 /sys/kern
parent5e38927c8c78b301b63e4c30508cf75fd60fa0e4 (diff)
Notes
Diffstat (limited to 'sys/kern')
-rw-r--r--sys/kern/kern_lockf.c99
-rw-r--r--sys/kern/vfs_default.c41
-rw-r--r--sys/kern/vfs_subr.c5
3 files changed, 142 insertions, 3 deletions
diff --git a/sys/kern/kern_lockf.c b/sys/kern/kern_lockf.c
index 059d4984b020..8cab502a6f82 100644
--- a/sys/kern/kern_lockf.c
+++ b/sys/kern/kern_lockf.c
@@ -570,6 +570,11 @@ lf_advlockasync(struct vop_advlockasync_args *ap, struct lockf **statep,
* the vnode interlock.
*/
VI_LOCK(vp);
+ if (vp->v_iflag & VI_DOOMED) {
+ VI_UNLOCK(vp);
+ lf_free_lock(lock);
+ return (ENOENT);
+ }
/*
* Allocate a state structure if necessary.
@@ -595,6 +600,16 @@ lf_advlockasync(struct vop_advlockasync_args *ap, struct lockf **statep,
* trying to allocate memory.
*/
VI_LOCK(vp);
+ if (vp->v_iflag & VI_DOOMED) {
+ VI_UNLOCK(vp);
+ sx_xlock(&lf_lock_states_lock);
+ LIST_REMOVE(ls, ls_link);
+ sx_xunlock(&lf_lock_states_lock);
+ sx_destroy(&ls->ls_lock);
+ free(ls, M_LOCKF);
+ lf_free_lock(lock);
+ return (ENOENT);
+ }
if ((*statep) == NULL) {
state = *statep = ls;
VI_UNLOCK(vp);
@@ -687,6 +702,7 @@ lf_advlockasync(struct vop_advlockasync_args *ap, struct lockf **statep,
VI_LOCK(vp);
state->ls_threads--;
+ wakeup(state);
if (LIST_EMPTY(&state->ls_active) && state->ls_threads == 0) {
KASSERT(LIST_EMPTY(&state->ls_pending),
("freeing state with pending locks"));
@@ -722,6 +738,77 @@ lf_advlock(struct vop_advlock_args *ap, struct lockf **statep, u_quad_t size)
return (lf_advlockasync(&a, statep, size));
}
+void
+lf_purgelocks(struct vnode *vp, struct lockf **statep)
+{
+ struct lockf *state;
+ struct lockf_entry *lock, *nlock;
+
+ /*
+ * For this to work correctly, the caller must ensure that no
+ * other threads enter the locking system for this vnode,
+ * e.g. by checking VI_DOOMED. We wake up any threads that are
+ * sleeping waiting for locks on this vnode and then free all
+ * the remaining locks.
+ */
+ VI_LOCK(vp);
+ state = *statep;
+ if (state) {
+ state->ls_threads++;
+ VI_UNLOCK(vp);
+
+ sx_xlock(&state->ls_lock);
+ sx_xlock(&lf_owner_graph_lock);
+ LIST_FOREACH_SAFE(lock, &state->ls_pending, lf_link, nlock) {
+ LIST_REMOVE(lock, lf_link);
+ lf_remove_outgoing(lock);
+ lf_remove_incoming(lock);
+
+ /*
+ * If its an async lock, we can just free it
+ * here, otherwise we let the sleeping thread
+ * free it.
+ */
+ if (lock->lf_async_task) {
+ lf_free_lock(lock);
+ } else {
+ lock->lf_flags |= F_INTR;
+ wakeup(lock);
+ }
+ }
+ sx_xunlock(&lf_owner_graph_lock);
+ sx_xunlock(&state->ls_lock);
+
+ /*
+ * Wait for all other threads, sleeping and otherwise
+ * to leave.
+ */
+ VI_LOCK(vp);
+ while (state->ls_threads > 1)
+ msleep(state, VI_MTX(vp), 0, "purgelocks", 0);
+ *statep = 0;
+ VI_UNLOCK(vp);
+
+ /*
+ * We can just free all the active locks since they
+ * will have no dependancies (we removed them all
+ * above). We don't need to bother locking since we
+ * are the last thread using this state structure.
+ */
+ LIST_FOREACH_SAFE(lock, &state->ls_pending, lf_link, nlock) {
+ LIST_REMOVE(lock, lf_link);
+ lf_free_lock(lock);
+ }
+ sx_xlock(&lf_lock_states_lock);
+ LIST_REMOVE(state, ls_link);
+ sx_xunlock(&lf_lock_states_lock);
+ sx_destroy(&state->ls_lock);
+ free(state, M_LOCKF);
+ } else {
+ VI_UNLOCK(vp);
+ }
+}
+
/*
* Return non-zero if locks 'x' and 'y' overlap.
*/
@@ -1347,7 +1434,10 @@ lf_setlock(struct lockf *state, struct lockf_entry *lock, struct vnode *vp,
* remove our lock graph edges) and/or by another
* process releasing a lock (in which case our edges
* have already been removed and we have been moved to
- * the active list).
+ * the active list). We may also have been woken by
+ * lf_purgelocks which we report to the caller as
+ * EINTR. In that case, lf_purgelocks will have
+ * removed our lock graph edges.
*
* Note that it is possible to receive a signal after
* we were successfully woken (and moved to the active
@@ -1359,6 +1449,11 @@ lf_setlock(struct lockf *state, struct lockf_entry *lock, struct vnode *vp,
* may now have incoming edges from some newer lock
* which is waiting behind us in the queue.
*/
+ if (lock->lf_flags & F_INTR) {
+ error = EINTR;
+ lf_free_lock(lock);
+ goto out;
+ }
if (LIST_EMPTY(&lock->lf_outedges)) {
error = 0;
} else {
@@ -2295,7 +2390,7 @@ lf_printlist(char *tag, struct lockf_entry *lock)
printf("%s: Lock list for ino %ju on dev <%s>:\n",
tag, (uintmax_t)lock->lf_inode->i_number,
devtoname(lock->lf_inode->i_dev));
- LIST_FOREACH(lf, &lock->lf_inode->i_lockf->ls_active, lf_link) {
+ LIST_FOREACH(lf, &lock->lf_vnode->v_lockf->ls_active, lf_link) {
printf("\tlock %p for ",(void *)lf);
lf_print_owner(lock->lf_owner);
printf(", %s, start %jd, end %jd",
diff --git a/sys/kern/vfs_default.c b/sys/kern/vfs_default.c
index 171ac20136a5..68e6c6b9035e 100644
--- a/sys/kern/vfs_default.c
+++ b/sys/kern/vfs_default.c
@@ -44,6 +44,7 @@ __FBSDID("$FreeBSD$");
#include <sys/kernel.h>
#include <sys/limits.h>
#include <sys/lock.h>
+#include <sys/lockf.h>
#include <sys/malloc.h>
#include <sys/mount.h>
#include <sys/mutex.h>
@@ -75,7 +76,8 @@ struct vop_vector default_vnodeops = {
.vop_default = NULL,
.vop_bypass = VOP_EOPNOTSUPP,
- .vop_advlock = VOP_EINVAL,
+ .vop_advlock = vop_stdadvlock,
+ .vop_advlockasync = vop_stdadvlockasync,
.vop_bmap = vop_stdbmap,
.vop_close = VOP_NULL,
.vop_fsync = VOP_NULL,
@@ -201,6 +203,43 @@ vop_nostrategy (struct vop_strategy_args *ap)
}
/*
+ * Advisory record locking support
+ */
+int
+vop_stdadvlock(struct vop_advlock_args *ap)
+{
+ struct vnode *vp = ap->a_vp;
+ struct thread *td = curthread;
+ struct vattr vattr;
+ int error;
+
+ vn_lock(vp, LK_SHARED | LK_RETRY, td);
+ error = VOP_GETATTR(vp, &vattr, td->td_ucred, td);
+ VOP_UNLOCK(vp, 0, td);
+ if (error)
+ return (error);
+
+ return (lf_advlock(ap, &(vp->v_lockf), vattr.va_size));
+}
+
+int
+vop_stdadvlockasync(struct vop_advlockasync_args *ap)
+{
+ struct vnode *vp = ap->a_vp;
+ struct thread *td = curthread;
+ struct vattr vattr;
+ int error;
+
+ vn_lock(vp, LK_SHARED | LK_RETRY, td);
+ error = VOP_GETATTR(vp, &vattr, td->td_ucred, td);
+ VOP_UNLOCK(vp, 0, td);
+ if (error)
+ return (error);
+
+ return (lf_advlockasync(ap, &(vp->v_lockf), vattr.va_size));
+}
+
+/*
* vop_stdpathconf:
*
* Standard implementation of POSIX pathconf, to get information about limits
diff --git a/sys/kern/vfs_subr.c b/sys/kern/vfs_subr.c
index 835cb29af636..4bdbb47d52c3 100644
--- a/sys/kern/vfs_subr.c
+++ b/sys/kern/vfs_subr.c
@@ -59,6 +59,7 @@ __FBSDID("$FreeBSD$");
#include <sys/kdb.h>
#include <sys/kernel.h>
#include <sys/kthread.h>
+#include <sys/lockf.h>
#include <sys/malloc.h>
#include <sys/mount.h>
#include <sys/namei.h>
@@ -2537,6 +2538,10 @@ vgonel(struct vnode *vp)
VNASSERT(vp->v_object == NULL, vp,
("vop_reclaim left v_object vp=%p, tag=%s", vp, vp->v_tag));
/*
+ * Clear the advisory locks and wake up waiting threads.
+ */
+ lf_purgelocks(vp, &(vp->v_lockf));
+ /*
* Delete from old mount point vnode list.
*/
delmntque(vp);