summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorPatrick Mochel <mochel@osdl.org>2003-09-24 18:36:16 -0700
committerPatrick Mochel <mochel@osdl.org>2003-09-24 18:36:16 -0700
commitb71aebdf785c99a97e9cfdb704da4acc7df601a3 (patch)
tree35b512be5f0e003e72b1327b9cf546402a234fee /kernel
parent53f2011513be7d4cf8316fe0c00a75d54558b5f2 (diff)
parent4cad6adc26c3ef2f5ec45dd014aab3cf7b8d87dd (diff)
Merge osdl.org:/home/mochel/src/kernel/linux-2.5-virgin
into osdl.org:/home/mochel/src/kernel/linux-2.5-power
Diffstat (limited to 'kernel')
-rw-r--r--kernel/futex.c117
-rw-r--r--kernel/resource.c100
2 files changed, 128 insertions, 89 deletions
diff --git a/kernel/futex.c b/kernel/futex.c
index 6691f6475869..db0a81a810dd 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -33,7 +33,7 @@
#include <linux/poll.h>
#include <linux/fs.h>
#include <linux/file.h>
-#include <linux/hash.h>
+#include <linux/jhash.h>
#include <linux/init.h>
#include <linux/futex.h>
#include <linux/mount.h>
@@ -44,6 +44,7 @@
/*
* Futexes are matched on equal values of this key.
* The key type depends on whether it's a shared or private mapping.
+ * Don't rearrange members without looking at hash_futex().
*/
union futex_key {
struct {
@@ -79,9 +80,15 @@ struct futex_q {
struct file *filp;
};
-/* The key for the hash is the address + index + offset within page */
-static struct list_head futex_queues[1<<FUTEX_HASHBITS];
-static spinlock_t futex_lock = SPIN_LOCK_UNLOCKED;
+/*
+ * Split the global futex_lock into every hash list lock.
+ */
+struct futex_hash_bucket {
+ spinlock_t lock;
+ struct list_head chain;
+};
+
+static struct futex_hash_bucket futex_queues[1<<FUTEX_HASHBITS];
/* Futex-fs vfsmount entry: */
static struct vfsmount *futex_mnt;
@@ -89,11 +96,12 @@ static struct vfsmount *futex_mnt;
/*
* We hash on the keys returned from get_futex_key (see below).
*/
-static inline struct list_head *hash_futex(union futex_key *key)
+static struct futex_hash_bucket *hash_futex(union futex_key *key)
{
- return &futex_queues[hash_long(key->both.word
- + (unsigned long) key->both.ptr
- + key->both.offset, FUTEX_HASHBITS)];
+ u32 hash = jhash2((u32*)&key->both.word,
+ (sizeof(key->both.word)+sizeof(key->both.ptr))/4,
+ key->both.offset);
+ return &futex_queues[hash & ((1 << FUTEX_HASHBITS)-1)];
}
/*
@@ -214,6 +222,7 @@ static int get_futex_key(unsigned long uaddr, union futex_key *key)
static int futex_wake(unsigned long uaddr, int num)
{
struct list_head *i, *next, *head;
+ struct futex_hash_bucket *bh;
union futex_key key;
int ret;
@@ -223,9 +232,10 @@ static int futex_wake(unsigned long uaddr, int num)
if (unlikely(ret != 0))
goto out;
- head = hash_futex(&key);
+ bh = hash_futex(&key);
+ spin_lock(&bh->lock);
+ head = &bh->chain;
- spin_lock(&futex_lock);
list_for_each_safe(i, next, head) {
struct futex_q *this = list_entry(i, struct futex_q, list);
@@ -239,7 +249,7 @@ static int futex_wake(unsigned long uaddr, int num)
break;
}
}
- spin_unlock(&futex_lock);
+ spin_unlock(&bh->lock);
out:
up_read(&current->mm->mmap_sem);
@@ -254,6 +264,7 @@ static int futex_requeue(unsigned long uaddr1, unsigned long uaddr2,
int nr_wake, int nr_requeue)
{
struct list_head *i, *next, *head1, *head2;
+ struct futex_hash_bucket *bh1, *bh2;
union futex_key key1, key2;
int ret;
@@ -266,10 +277,19 @@ static int futex_requeue(unsigned long uaddr1, unsigned long uaddr2,
if (unlikely(ret != 0))
goto out;
- head1 = hash_futex(&key1);
- head2 = hash_futex(&key2);
+ bh1 = hash_futex(&key1);
+ bh2 = hash_futex(&key2);
+ if (bh1 < bh2) {
+ spin_lock(&bh1->lock);
+ spin_lock(&bh2->lock);
+ } else {
+ spin_lock(&bh2->lock);
+ if (bh1 > bh2)
+ spin_lock(&bh1->lock);
+ }
+ head1 = &bh1->chain;
+ head2 = &bh2->chain;
- spin_lock(&futex_lock);
list_for_each_safe(i, next, head1) {
struct futex_q *this = list_entry(i, struct futex_q, list);
@@ -291,8 +311,14 @@ static int futex_requeue(unsigned long uaddr1, unsigned long uaddr2,
}
}
}
- spin_unlock(&futex_lock);
-
+ if (bh1 < bh2) {
+ spin_unlock(&bh2->lock);
+ spin_unlock(&bh1->lock);
+ } else {
+ if (bh1 > bh2)
+ spin_unlock(&bh1->lock);
+ spin_unlock(&bh2->lock);
+ }
out:
up_read(&current->mm->mmap_sem);
return ret;
@@ -301,28 +327,30 @@ out:
static inline void queue_me(struct futex_q *q, union futex_key *key,
int fd, struct file *filp)
{
- struct list_head *head = hash_futex(key);
+ struct futex_hash_bucket *bh = hash_futex(key);
+ struct list_head *head = &bh->chain;
q->key = *key;
q->fd = fd;
q->filp = filp;
- spin_lock(&futex_lock);
+ spin_lock(&bh->lock);
list_add_tail(&q->list, head);
- spin_unlock(&futex_lock);
+ spin_unlock(&bh->lock);
}
/* Return 1 if we were still queued (ie. 0 means we were woken) */
static inline int unqueue_me(struct futex_q *q)
{
+ struct futex_hash_bucket *bh = hash_futex(&q->key);
int ret = 0;
- spin_lock(&futex_lock);
+ spin_lock(&bh->lock);
if (!list_empty(&q->list)) {
list_del(&q->list);
ret = 1;
}
- spin_unlock(&futex_lock);
+ spin_unlock(&bh->lock);
return ret;
}
@@ -332,8 +360,8 @@ static int futex_wait(unsigned long uaddr, int val, unsigned long time)
int ret, curval;
union futex_key key;
struct futex_q q;
+ struct futex_hash_bucket *bh = NULL;
- try_again:
init_waitqueue_head(&q.waiters);
down_read(&current->mm->mmap_sem);
@@ -367,25 +395,26 @@ static int futex_wait(unsigned long uaddr, int val, unsigned long time)
/*
* There might have been scheduling since the queue_me(), as we
* cannot hold a spinlock across the get_user() in case it
- * faults. So we cannot just set TASK_INTERRUPTIBLE state when
+ * faults, and we cannot just set TASK_INTERRUPTIBLE state when
* queueing ourselves into the futex hash. This code thus has to
- * rely on the futex_wake() code doing a wakeup after removing
- * the waiter from the list.
+ * rely on the futex_wake() code removing us from hash when it
+ * wakes us up.
*/
add_wait_queue(&q.waiters, &wait);
- spin_lock(&futex_lock);
+ bh = hash_futex(&key);
+ spin_lock(&bh->lock);
set_current_state(TASK_INTERRUPTIBLE);
if (unlikely(list_empty(&q.list))) {
/*
* We were woken already.
*/
- spin_unlock(&futex_lock);
+ spin_unlock(&bh->lock);
set_current_state(TASK_RUNNING);
return 0;
}
- spin_unlock(&futex_lock);
+ spin_unlock(&bh->lock);
time = schedule_timeout(time);
set_current_state(TASK_RUNNING);
@@ -394,26 +423,17 @@ static int futex_wait(unsigned long uaddr, int val, unsigned long time)
* we are the only user of it.
*/
- /*
- * Were we woken or interrupted for a valid reason?
- */
- ret = unqueue_me(&q);
- if (ret == 0)
+ /* If we were woken (and unqueued), we succeeded, whatever. */
+ if (!unqueue_me(&q))
return 0;
if (time == 0)
return -ETIMEDOUT;
- if (signal_pending(current))
- return -EINTR;
-
- /*
- * No, it was a spurious wakeup. Try again. Should never happen. :)
- */
- goto try_again;
+ /* A spurious wakeup should never happen. */
+ WARN_ON(!signal_pending(current));
+ return -EINTR;
out_unqueue:
- /*
- * Were we unqueued anyway?
- */
+ /* If we were woken (and unqueued), we succeeded, whatever. */
if (!unqueue_me(&q))
ret = 0;
out_release_sem:
@@ -435,13 +455,14 @@ static unsigned int futex_poll(struct file *filp,
struct poll_table_struct *wait)
{
struct futex_q *q = filp->private_data;
+ struct futex_hash_bucket *bh = hash_futex(&q->key);
int ret = 0;
poll_wait(filp, &q->waiters, wait);
- spin_lock(&futex_lock);
+ spin_lock(&bh->lock);
if (list_empty(&q->list))
ret = POLLIN | POLLRDNORM;
- spin_unlock(&futex_lock);
+ spin_unlock(&bh->lock);
return ret;
}
@@ -587,8 +608,10 @@ static int __init init(void)
register_filesystem(&futex_fs_type);
futex_mnt = kern_mount(&futex_fs_type);
- for (i = 0; i < ARRAY_SIZE(futex_queues); i++)
- INIT_LIST_HEAD(&futex_queues[i]);
+ for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
+ INIT_LIST_HEAD(&futex_queues[i].chain);
+ futex_queues[i].lock = SPIN_LOCK_UNLOCKED;
+ }
return 0;
}
__initcall(init);
diff --git a/kernel/resource.c b/kernel/resource.c
index 58fdf2c88b32..f8ce88129fd2 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -38,75 +38,91 @@ static rwlock_t resource_lock = RW_LOCK_UNLOCKED;
#ifdef CONFIG_PROC_FS
-#define MAX_IORES_LEVEL 5
+enum { MAX_IORES_LEVEL = 5 };
-/*
- * do_resource_list():
- * for reports of /proc/ioports and /proc/iomem;
- * do current entry, then children, then siblings;
- */
-static int do_resource_list(struct seq_file *m, struct resource *res, const char *fmt, int level)
+static void *r_next(struct seq_file *m, void *v, loff_t *pos)
{
- while (res) {
- const char *name;
-
- name = res->name ? res->name : "<BAD>";
- if (level > MAX_IORES_LEVEL)
- level = MAX_IORES_LEVEL;
- seq_printf (m, fmt + 2 * MAX_IORES_LEVEL - 2 * level,
- res->start, res->end, name);
-
- if (res->child)
- do_resource_list(m, res->child, fmt, level + 1);
+ struct resource *p = v;
+ (*pos)++;
+ if (p->child)
+ return p->child;
+ while (!p->sibling && p->parent)
+ p = p->parent;
+ return p->sibling;
+}
- res = res->sibling;
- }
+static void *r_start(struct seq_file *m, loff_t *pos)
+{
+ struct resource *p = m->private;
+ loff_t l = 0;
+ read_lock(&resource_lock);
+ for (p = p->child; p && l < *pos; p = r_next(m, p, &l))
+ ;
+ return p;
+}
- return 0;
+static void r_stop(struct seq_file *m, void *v)
+{
+ read_unlock(&resource_lock);
}
-static int ioresources_show(struct seq_file *m, void *v)
+static int r_show(struct seq_file *m, void *v)
{
struct resource *root = m->private;
- char *fmt;
- int retval;
+ struct resource *r = v, *p;
+ int width = root->end < 0x10000 ? 4 : 8;
+ int depth;
- fmt = root->end < 0x10000
- ? " %04lx-%04lx : %s\n"
- : " %08lx-%08lx : %s\n";
- read_lock(&resource_lock);
- retval = do_resource_list(m, root->child, fmt, 0);
- read_unlock(&resource_lock);
- return retval;
+ for (depth = 0, p = r; depth < MAX_IORES_LEVEL; depth++, p = p->parent)
+ if (p->parent == root)
+ break;
+ seq_printf(m, "%*s%0*lx-%0*lx : %s\n",
+ depth * 2, "",
+ width, r->start,
+ width, r->end,
+ r->name ? r->name : "<BAD>");
+ return 0;
}
-static int ioresources_open(struct file *file, struct resource *root)
+struct seq_operations resource_op = {
+ .start = r_start,
+ .next = r_next,
+ .stop = r_stop,
+ .show = r_show,
+};
+
+static int ioports_open(struct inode *inode, struct file *file)
{
- return single_open(file, ioresources_show, root);
+ int res = seq_open(file, &resource_op);
+ if (!res) {
+ struct seq_file *m = file->private_data;
+ m->private = &ioport_resource;
+ }
+ return res;
}
-static int ioports_open(struct inode *inode, struct file *file)
+static int iomem_open(struct inode *inode, struct file *file)
{
- return ioresources_open(file, &ioport_resource);
+ int res = seq_open(file, &resource_op);
+ if (!res) {
+ struct seq_file *m = file->private_data;
+ m->private = &iomem_resource;
+ }
+ return res;
}
static struct file_operations proc_ioports_operations = {
.open = ioports_open,
.read = seq_read,
.llseek = seq_lseek,
- .release = single_release,
+ .release = seq_release,
};
-static int iomem_open(struct inode *inode, struct file *file)
-{
- return ioresources_open(file, &iomem_resource);
-}
-
static struct file_operations proc_iomem_operations = {
.open = iomem_open,
.read = seq_read,
.llseek = seq_lseek,
- .release = single_release,
+ .release = seq_release,
};
static int __init ioresources_init(void)