aboutsummaryrefslogtreecommitdiff
path: root/fs/dlm
diff options
context:
space:
mode:
authorDavid Teigland <teigland@redhat.com>2007-01-15 10:34:52 -0600
committerSteven Whitehouse <swhiteho@redhat.com>2007-02-05 13:36:55 -0500
commita1bc86e6bddd34362ca08a3a4d898eb4b5c15215 (patch)
tree92b30f8f9400c5a1b71a2e3a17397b9d0b2cc2f2 /fs/dlm
parent1d6e8131cf0064ef5ab5f3411a82b800afbfadee (diff)
[DLM] fix user unlocking
When a user process exits, we clear all the locks it holds. There is a problem, though, with locks that the process had begun unlocking before it exited. We couldn't find the lkb's that were in the process of being unlocked remotely, to flag that they are DEAD. To solve this, we move lkb's being unlocked onto a new list in the per-process structure that tracks what locks the process is holding. We can then go through this list to flag the necessary lkb's when clearing locks for a process when it exits. Signed-off-by: David Teigland <teigland@redhat.com> Signed-off-by: Steven Whitehouse <swhiteho@redhat.com>
Diffstat (limited to 'fs/dlm')
-rw-r--r--fs/dlm/dlm_internal.h1
-rw-r--r--fs/dlm/lock.c30
-rw-r--r--fs/dlm/user.c9
3 files changed, 28 insertions, 12 deletions
diff --git a/fs/dlm/dlm_internal.h b/fs/dlm/dlm_internal.h
index ee993c5c230..61d93201e1b 100644
--- a/fs/dlm/dlm_internal.h
+++ b/fs/dlm/dlm_internal.h
@@ -526,6 +526,7 @@ struct dlm_user_proc {
spinlock_t asts_spin;
struct list_head locks;
spinlock_t locks_spin;
+ struct list_head unlocking;
wait_queue_head_t wait;
};
diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c
index 5bac9827ded..6ad2b8eb96a 100644
--- a/fs/dlm/lock.c
+++ b/fs/dlm/lock.c
@@ -3772,12 +3772,10 @@ int dlm_user_unlock(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
goto out_put;
spin_lock(&ua->proc->locks_spin);
- list_del_init(&lkb->lkb_ownqueue);
+ /* dlm_user_add_ast() may have already taken lkb off the proc list */
+ if (!list_empty(&lkb->lkb_ownqueue))
+ list_move(&lkb->lkb_ownqueue, &ua->proc->unlocking);
spin_unlock(&ua->proc->locks_spin);
-
- /* this removes the reference for the proc->locks list added by
- dlm_user_request */
- unhold_lkb(lkb);
out_put:
dlm_put_lkb(lkb);
out:
@@ -3817,9 +3815,8 @@ int dlm_user_cancel(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
/* this lkb was removed from the WAITING queue */
if (lkb->lkb_grmode == DLM_LOCK_IV) {
spin_lock(&ua->proc->locks_spin);
- list_del_init(&lkb->lkb_ownqueue);
+ list_move(&lkb->lkb_ownqueue, &ua->proc->unlocking);
spin_unlock(&ua->proc->locks_spin);
- unhold_lkb(lkb);
}
out_put:
dlm_put_lkb(lkb);
@@ -3880,11 +3877,6 @@ void dlm_clear_proc_locks(struct dlm_ls *ls, struct dlm_user_proc *proc)
mutex_lock(&ls->ls_clear_proc_locks);
list_for_each_entry_safe(lkb, safe, &proc->locks, lkb_ownqueue) {
- if (lkb->lkb_ast_type) {
- list_del(&lkb->lkb_astqueue);
- unhold_lkb(lkb);
- }
-
list_del_init(&lkb->lkb_ownqueue);
if (lkb->lkb_exflags & DLM_LKF_PERSISTENT) {
@@ -3901,6 +3893,20 @@ void dlm_clear_proc_locks(struct dlm_ls *ls, struct dlm_user_proc *proc)
dlm_put_lkb(lkb);
}
+
+ /* in-progress unlocks */
+ list_for_each_entry_safe(lkb, safe, &proc->unlocking, lkb_ownqueue) {
+ list_del_init(&lkb->lkb_ownqueue);
+ lkb->lkb_flags |= DLM_IFL_DEAD;
+ dlm_put_lkb(lkb);
+ }
+
+ list_for_each_entry_safe(lkb, safe, &proc->asts, lkb_astqueue) {
+ list_del(&lkb->lkb_astqueue);
+ dlm_put_lkb(lkb);
+ }
+
mutex_unlock(&ls->ls_clear_proc_locks);
unlock_recovery(ls);
}
+
diff --git a/fs/dlm/user.c b/fs/dlm/user.c
index c37e93e4f2d..d378b7fe2a1 100644
--- a/fs/dlm/user.c
+++ b/fs/dlm/user.c
@@ -180,6 +180,14 @@ void dlm_user_add_ast(struct dlm_lkb *lkb, int type)
ua->lksb.sb_status == -EAGAIN && !list_empty(&lkb->lkb_ownqueue))
remove_ownqueue = 1;
+ /* unlocks or cancels of waiting requests need to be removed from the
+ proc's unlocking list, again there must be a better way... */
+
+ if (ua->lksb.sb_status == -DLM_EUNLOCK ||
+ (ua->lksb.sb_status == -DLM_ECANCEL &&
+ lkb->lkb_grmode == DLM_LOCK_IV))
+ remove_ownqueue = 1;
+
/* We want to copy the lvb to userspace when the completion
ast is read if the status is 0, the lock has an lvb and
lvb_ops says we should. We could probably have set_lvb_lock()
@@ -523,6 +531,7 @@ static int device_open(struct inode *inode, struct file *file)
proc->lockspace = ls->ls_local_handle;
INIT_LIST_HEAD(&proc->asts);
INIT_LIST_HEAD(&proc->locks);
+ INIT_LIST_HEAD(&proc->unlocking);
spin_lock_init(&proc->asts_spin);
spin_lock_init(&proc->locks_spin);
init_waitqueue_head(&proc->wait);