diff options
| author | Konstantin Belousov <kib@FreeBSD.org> | 2008-07-25 11:22:25 +0000 |
|---|---|---|
| committer | Konstantin Belousov <kib@FreeBSD.org> | 2008-07-25 11:22:25 +0000 |
| commit | 96f1567fa702fc5b8f39d035376caae7e26e1ff1 (patch) | |
| tree | a0872ededb23ed0db45cf012b98a5909015d6e4e /sys/kern/kern_lock.c | |
| parent | c5783846f6900418974e4d06540e78a7a629bc75 (diff) | |
Notes
Diffstat (limited to 'sys/kern/kern_lock.c')
| -rw-r--r-- | sys/kern/kern_lock.c | 10 |
1 files changed, 5 insertions, 5 deletions
diff --git a/sys/kern/kern_lock.c b/sys/kern/kern_lock.c index da6af57f9985..b3f0e25847fe 100644 --- a/sys/kern/kern_lock.c +++ b/sys/kern/kern_lock.c @@ -392,12 +392,12 @@ __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk, &contested, &waittime); /* - * If the lock is alredy held by curthread in + * If the lock is already held by curthread in * exclusive way avoid a deadlock. */ if (LK_HOLDER(x) == tid) { LOCK_LOG2(lk, - "%s: %p alredy held in exclusive mode", + "%s: %p already held in exclusive mode", __func__, lk); error = EDEADLK; break; @@ -504,7 +504,7 @@ __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk, LOP_EXCLUSIVE, file, line); /* - * If curthread alredy holds the lock and this one is + * If curthread already holds the lock and this one is * allowed to recurse, simply recurse on it. */ if (lockmgr_xlocked(lk)) { @@ -722,7 +722,7 @@ __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk, LOP_EXCLUSIVE, file, line); /* - * Trying to drain a lock we alredy own will result in a + * Trying to drain a lock we already own will result in a * deadlock. */ if (lockmgr_xlocked(lk)) { @@ -863,7 +863,7 @@ _lockmgr_disown(struct lock *lk, const char *file, int line) _lockmgr_assert(lk, KA_XLOCKED | KA_NOTRECURSED, file, line); /* - * If the owner is alredy LK_KERNPROC just skip the whole operation. + * If the owner is already LK_KERNPROC just skip the whole operation. */ if (LK_HOLDER(lk->lk_lock) != tid) return; |
