diff options
| author | Andrew Morton <akpm@osdl.org> | 2003-08-20 10:29:16 -0700 |
|---|---|---|
| committer | Linus Torvalds <torvalds@home.osdl.org> | 2003-08-20 10:29:16 -0700 |
| commit | 7d33101cd55d08cdc51ba5abe9290984cd4198d6 (patch) | |
| tree | 7802dc579c66aa8c090087927b90528e24071e59 | |
| parent | e3e0c299196a8dac91d2bd8e8dbb748d3a7b0f24 (diff) | |
[PATCH] fix /proc mm_struct refcounting bug
From: Suparna Bhattacharya <suparna@in.ibm.com>
The /proc code's bare atomic_inc(&mm->count) is racy against __exit_mm()'s
mmput() on another CPU: it calls mmput() outside task_lock(tsk), and
task_lock() isn't appropriate locking anyway.
So what happens is:
CPU0 CPU1
mmput()
->atomic_dec_and_lock(mm->mm_users)
atomic_inc(mm->mm_users)
->list_del(mm->mmlist)
mmput()
->atomic_dec_and_lock(mm->mm_users)
->list_del(mm->mmlist)
And the double list_del() of course goes splat.
So we use mmlist_lock to synchronise these steps.
The patch implements a new mmgrab() routine which increments mm_users only if
the mm isn't already going away. Changes get_task_mm() and proc_pid_stat()
to call mmgrab() instead of a direct atomic_inc(&mm->mm_users).
Hugh, there's some cruft in swapoff which looks like it should be using
mmgrab()...
| -rw-r--r-- | fs/proc/array.c | 2 | ||||
| -rw-r--r-- | include/linux/sched.h | 4 | ||||
| -rw-r--r-- | kernel/fork.c | 17 |
3 files changed, 21 insertions, 2 deletions
diff --git a/fs/proc/array.c b/fs/proc/array.c index 643ccd392f23..a7a3bdaac4ec 100644 --- a/fs/proc/array.c +++ b/fs/proc/array.c @@ -300,7 +300,7 @@ int proc_pid_stat(struct task_struct *task, char * buffer) task_lock(task); mm = task->mm; if(mm) - atomic_inc(&mm->mm_users); + mm = mmgrab(mm); if (task->tty) { tty_pgrp = task->tty->pgrp; tty_nr = task->tty->device; diff --git a/include/linux/sched.h b/include/linux/sched.h index 61ec12b5b77a..054168543a45 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -638,6 +638,8 @@ static inline void mmdrop(struct mm_struct * mm) /* mmput gets rid of the mappings and all user-space */ extern void mmput(struct mm_struct *); +/* Grab a reference to the mm if its not already going away */ +extern struct mm_struct *mmgrab(struct mm_struct *); /* Remove the current tasks stale references to the old mm_struct */ extern void mm_release(struct task_struct *, struct mm_struct *); @@ -745,7 +747,7 @@ static inline struct mm_struct * get_task_mm(struct task_struct * task) task_lock(task); mm = task->mm; if (mm) - atomic_inc(&mm->mm_users); + mm = mmgrab(mm); task_unlock(task); return mm; diff --git a/kernel/fork.c b/kernel/fork.c index b65c19fe2dce..690b8d77a04b 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -398,6 +398,23 @@ void mmput(struct mm_struct *mm) } } +/* + * Checks if the use count of an mm is non-zero and if so + * returns a reference to it after bumping up the use count. + * If the use count is zero, it means this mm is going away, + * so return NULL. + */ +struct mm_struct *mmgrab(struct mm_struct *mm) +{ + spin_lock(&mmlist_lock); + if (!atomic_read(&mm->mm_users)) + mm = NULL; + else + atomic_inc(&mm->mm_users); + spin_unlock(&mmlist_lock); + return mm; +} + /* Please note the differences between mmput and mm_release. * mmput is called whenever we stop holding onto a mm_struct, * error success whatever. |
