summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@home.transmeta.com>2002-05-28 19:10:12 -0700
committerLinus Torvalds <torvalds@home.transmeta.com>2002-05-28 19:10:12 -0700
commit18c16b3368c0cdfd35255243af2a93aac274ac9a (patch)
tree9ce59682b45512a8d439efb4572b519889bdffa3
parentbd7c535870da5a8c957625bcd2f7e6fa688b4e13 (diff)
parentf1c83c72e38afc861f8ebf3430c347f9778d541c (diff)
Merge http://linux-isdn.bkbits.net/linux-2.5.make-drivers
into home.transmeta.com:/home/torvalds/v2.5/linux
-rw-r--r--arch/i386/kernel/entry.S5
-rw-r--r--arch/i386/kernel/i8259.c2
-rw-r--r--drivers/block/ll_rw_blk.c91
-rw-r--r--fs/fat/inode.c3
-rw-r--r--include/asm-i386/hw_irq.h1
-rw-r--r--include/linux/blkdev.h8
-rw-r--r--kernel/sched.c2
7 files changed, 59 insertions, 53 deletions
diff --git a/arch/i386/kernel/entry.S b/arch/i386/kernel/entry.S
index fa40f95cf963..375961512766 100644
--- a/arch/i386/kernel/entry.S
+++ b/arch/i386/kernel/entry.S
@@ -395,6 +395,11 @@ BUILD_INTERRUPT(call_function_interrupt,CALL_FUNCTION_VECTOR)
BUILD_INTERRUPT(apic_timer_interrupt,LOCAL_TIMER_VECTOR)
BUILD_INTERRUPT(error_interrupt,ERROR_APIC_VECTOR)
BUILD_INTERRUPT(spurious_interrupt,SPURIOUS_APIC_VECTOR)
+
+#ifdef CONFIG_X86_MCE_P4THERMAL
+BUILD_INTERRUPT(thermal_interrupt,THERMAL_APIC_VECTOR)
+#endif
+
#endif
ENTRY(divide_error)
diff --git a/arch/i386/kernel/i8259.c b/arch/i386/kernel/i8259.c
index a6f73e4d4249..1cbfe2e706b5 100644
--- a/arch/i386/kernel/i8259.c
+++ b/arch/i386/kernel/i8259.c
@@ -407,7 +407,7 @@ void __init init_IRQ(void)
/* thermal monitor LVT interrupt */
#ifdef CONFIG_X86_MCE_P4THERMAL
- set_intr_gate(THERMAL_APIC_VECTOR, smp_thermal_interrupt);
+ set_intr_gate(THERMAL_APIC_VECTOR, thermal_interrupt);
#endif
#endif
diff --git a/drivers/block/ll_rw_blk.c b/drivers/block/ll_rw_blk.c
index 64de70ca26da..f34dde5dcfcb 100644
--- a/drivers/block/ll_rw_blk.c
+++ b/drivers/block/ll_rw_blk.c
@@ -50,13 +50,7 @@ extern int mac_floppy_init(void);
static kmem_cache_t *request_cachep;
static struct list_head blk_plug_list;
-static spinlock_t blk_plug_lock = SPIN_LOCK_UNLOCKED;
-
-/*
- * The "disk" task queue is used to start the actual requests
- * after a plug
- */
-DECLARE_TASK_QUEUE(tq_disk);
+static spinlock_t blk_plug_lock __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED;
/* blk_dev_struct is:
* request_queue
@@ -794,12 +788,11 @@ void blk_plug_device(request_queue_t *q)
if (!elv_queue_empty(q))
return;
- if (test_and_set_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags))
- return;
-
- spin_lock(&blk_plug_lock);
- list_add_tail(&q->plug.list, &blk_plug_list);
- spin_unlock(&blk_plug_lock);
+ if (!test_and_set_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags)) {
+ spin_lock(&blk_plug_lock);
+ list_add_tail(&q->plug_list, &blk_plug_list);
+ spin_unlock(&blk_plug_lock);
+ }
}
/*
@@ -813,10 +806,8 @@ static inline void __generic_unplug_device(request_queue_t *q)
if (!__test_and_clear_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags))
return;
- if (test_bit(QUEUE_FLAG_STOPPED, &q->queue_flags)) {
- printk("queue was stopped\n");
+ if (test_bit(QUEUE_FLAG_STOPPED, &q->queue_flags))
return;
- }
/*
* was plugged, fire request_fn if queue has stuff to do
@@ -834,22 +825,12 @@ static inline void __generic_unplug_device(request_queue_t *q)
* the device have at them. If a queue is plugged, the I/O scheduler
* is still adding and merging requests on the queue. Once the queue
* gets unplugged (either by manually calling this function, or by
- * running the tq_disk task queue), the request_fn defined for the
+ * calling blk_run_queues()), the request_fn defined for the
* queue is invoked and transfers started.
**/
void generic_unplug_device(void *data)
{
request_queue_t *q = data;
-
- tasklet_schedule(&q->plug.task);
-}
-
-/*
- * the plug tasklet
- */
-static void blk_task_run(unsigned long data)
-{
- request_queue_t *q = (request_queue_t *) data;
unsigned long flags;
spin_lock_irqsave(q->queue_lock, flags);
@@ -858,23 +839,26 @@ static void blk_task_run(unsigned long data)
}
/*
- * clear top flag and schedule tasklet for execution
+ * clear stop flag and run queue
*/
void blk_start_queue(request_queue_t *q)
{
- if (test_and_clear_bit(QUEUE_FLAG_STOPPED, &q->queue_flags))
- tasklet_enable(&q->plug.task);
+ if (test_and_clear_bit(QUEUE_FLAG_STOPPED, &q->queue_flags)) {
+ unsigned long flags;
- tasklet_schedule(&q->plug.task);
+ spin_lock_irqsave(q->queue_lock, flags);
+ if (!elv_queue_empty(q))
+ q->request_fn(q);
+ spin_unlock_irqrestore(q->queue_lock, flags);
+ }
}
/*
- * set stop bit and disable any pending tasklet
+ * set stop bit, queue won't be run until blk_start_queue() is called
*/
void blk_stop_queue(request_queue_t *q)
{
- if (!test_and_set_bit(QUEUE_FLAG_STOPPED, &q->queue_flags))
- tasklet_disable(&q->plug.task);
+ set_bit(QUEUE_FLAG_STOPPED, &q->queue_flags);
}
/*
@@ -882,22 +866,44 @@ void blk_stop_queue(request_queue_t *q)
*/
void blk_run_queues(void)
{
- struct list_head *tmp, *n;
+ struct list_head *n, *tmp, local_plug_list;
unsigned long flags;
+ INIT_LIST_HEAD(&local_plug_list);
+
/*
- * we could splice to the stack prior to running
+ * this will happen fairly often
*/
spin_lock_irqsave(&blk_plug_lock, flags);
- list_for_each_safe(tmp, n, &blk_plug_list) {
- request_queue_t *q = list_entry(tmp, request_queue_t,plug.list);
+ if (list_empty(&blk_plug_list)) {
+ spin_unlock_irqrestore(&blk_plug_lock, flags);
+ return;
+ }
+
+ list_splice(&blk_plug_list, &local_plug_list);
+ INIT_LIST_HEAD(&blk_plug_list);
+ spin_unlock_irqrestore(&blk_plug_lock, flags);
+
+ /*
+ * local_plug_list is now a private copy we can traverse lockless
+ */
+ list_for_each_safe(n, tmp, &local_plug_list) {
+ request_queue_t *q = list_entry(n, request_queue_t, plug_list);
if (!test_bit(QUEUE_FLAG_STOPPED, &q->queue_flags)) {
- list_del(&q->plug.list);
- tasklet_schedule(&q->plug.task);
+ list_del(&q->plug_list);
+ generic_unplug_device(q);
}
}
- spin_unlock_irqrestore(&blk_plug_lock, flags);
+
+ /*
+ * add any remaining queue back to plug list
+ */
+ if (!list_empty(&local_plug_list)) {
+ spin_lock_irqsave(&blk_plug_lock, flags);
+ list_splice(&local_plug_list, &blk_plug_list);
+ spin_unlock_irqrestore(&blk_plug_lock, flags);
+ }
}
static int __blk_cleanup_queue(struct request_list *list)
@@ -1050,8 +1056,7 @@ int blk_init_queue(request_queue_t *q, request_fn_proc *rfn, spinlock_t *lock)
blk_queue_max_hw_segments(q, MAX_HW_SEGMENTS);
blk_queue_max_phys_segments(q, MAX_PHYS_SEGMENTS);
- INIT_LIST_HEAD(&q->plug.list);
- tasklet_init(&q->plug.task, blk_task_run, (unsigned long) q);
+ INIT_LIST_HEAD(&q->plug_list);
return 0;
}
diff --git a/fs/fat/inode.c b/fs/fat/inode.c
index 29e650b39597..69ca844884b6 100644
--- a/fs/fat/inode.c
+++ b/fs/fat/inode.c
@@ -872,7 +872,8 @@ int fat_fill_super(struct super_block *sb, void *data, int silent,
if (!silent)
printk("FAT: Using codepage %s\n", sbi->nls_disk->charset);
- if (sbi->options.isvfat && !sbi->options.utf8) {
+ /* FIXME: utf8 is using iocharset for upper/lower conversion */
+ if (sbi->options.isvfat) {
if (sbi->options.iocharset != NULL) {
sbi->nls_io = load_nls(sbi->options.iocharset);
if (!sbi->nls_io) {
diff --git a/include/asm-i386/hw_irq.h b/include/asm-i386/hw_irq.h
index 8e20a941e5a9..aad2d80cefff 100644
--- a/include/asm-i386/hw_irq.h
+++ b/include/asm-i386/hw_irq.h
@@ -28,6 +28,7 @@ extern int irq_vector[NR_IRQS];
extern void (*interrupt[NR_IRQS])(void);
+extern asmlinkage void thermal_interrupt(void);
extern asmlinkage void smp_thermal_interrupt(struct pt_regs);
#ifdef CONFIG_SMP
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index b9972fe4fc70..407b176d7dad 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -8,7 +8,6 @@
#include <linux/list.h>
#include <linux/pagemap.h>
#include <linux/backing-dev.h>
-#include <linux/interrupt.h>
#include <asm/scatterlist.h>
@@ -137,11 +136,6 @@ struct blk_queue_tag {
int max_depth;
};
-struct blk_plug {
- struct list_head list;
- struct tasklet_struct task;
-};
-
/*
* Default nr free requests per queue, ll_rw_blk will scale it down
* according to available RAM at init time
@@ -183,7 +177,7 @@ struct request_queue
unsigned long bounce_pfn;
int bounce_gfp;
- struct blk_plug plug;
+ struct list_head plug_list;
/*
* various queue flags, see QUEUE_* below
diff --git a/kernel/sched.c b/kernel/sched.c
index b736d163db8f..bbff0f9ecf07 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -13,7 +13,7 @@
* hybrid priority-list and round-robin design with
* an array-switch method of distributing timeslices
* and per-CPU runqueues. Additional code by Davide
- * Libenzi, Robert Love, and Rusty Russel.
+ * Libenzi, Robert Love, and Rusty Russell.
*/
#include <linux/mm.h>