diff options
| author | Ingo Molnar <mingo@elte.hu> | 2002-09-30 22:17:42 -0700 |
|---|---|---|
| committer | Ingo Molnar <mingo@elte.hu> | 2002-09-30 22:17:42 -0700 |
| commit | 6ed12ff83c765aeda7d38d3bf9df7d46d24bfb11 (patch) | |
| tree | d2dd4a9cefd38743d3e51fbbab3d79920bb19ae1 /drivers/message | |
| parent | 7570df54ef8cc5b42500d26562ff50fcbe265aa2 (diff) | |
[PATCH] Workqueue Abstraction
This is the next iteration of the workqueue abstraction.
The framework includes:
- per-CPU queueing support.
on SMP there is a per-CPU worker thread (bound to its CPU) and per-CPU
work queues - this feature is completely transparent to workqueue-users.
keventd automatically uses this feature. XFS can now update to work-queues
and have the same per-CPU performance as it had with its per-CPU worker
threads.
- delayed work submission
there's a new queue_delayed_work(wq, work, delay) function and a new
schedule_delayed_work(work, delay) function. The later one is used to
correctly fix former tq_timer users. I've reverted those changes in 2.5.40
that changed tq_timer uses to schedule_work() - eg. in the case of
random.c or the tty flip queue it was definitely the wrong thing to do.
delayed work means a timer embedded in struct work_struct. I considered
using split struct work_struct and delayed_work_struct types, but lots
of code actively uses task-queues in both delayed and non-delayed mode,
so i went for the more generic approach that allows both methods of work
submission. Delayed timers do not cause any other overhead in the
normal submission path otherwise.
- multithreaded run_workqueue() implementation
the run_workqueue() function can now be called from multiple contexts, and
a worker thread will only use up a single entryy - this property is used
by the flushing code, and can potentially be used in the future to extend
the number of per-CPU worker threads.
- more reliable flushing
there's now a 'pending work' counter, which is used to accurately detect
when the last work-function has finished execution. It's also used to
correctly flush against timed requests. I'm not convinced whether the old
keventd implementation got this detail right.
- i switched the arguments of the queueing function(s) per Jeff's
suggestion, it's more straightforward this way.
Driver fixes:
i have converted almost every affected driver to the new framework. This
cleaned up tons of code. I also fixed a number of drivers that were still
using BHs (these drivers did not compile in 2.5.40).
while this means lots of changes, it might ease the QA decision whether to
put this patch into 2.5.
The pach converts roughly 80% of all tqueue-using code to workqueues - and
all the places that are not converted to workqueues yet are places that do
not compile in vanilla 2.5.40 anyway, due to unrelated changes. I've
converted a fair number of drivers that do not compile in 2.5.40, and i
think i've managed to convert every driver that compiles under 2.5.40.
Diffstat (limited to 'drivers/message')
| -rw-r--r-- | drivers/message/fusion/mptlan.c | 12 | ||||
| -rw-r--r-- | drivers/message/fusion/mptscsih.c | 18 | ||||
| -rw-r--r-- | drivers/message/fusion/mptscsih.h | 5 | ||||
| -rw-r--r-- | drivers/message/i2o/i2o_lan.c | 20 | ||||
| -rw-r--r-- | drivers/message/i2o/i2o_lan.h | 2 |
5 files changed, 24 insertions, 33 deletions
diff --git a/drivers/message/fusion/mptlan.c b/drivers/message/fusion/mptlan.c index f4484ed7ef37..8d3e9a9284bf 100644 --- a/drivers/message/fusion/mptlan.c +++ b/drivers/message/fusion/mptlan.c @@ -132,7 +132,7 @@ struct mpt_lan_priv { u32 total_received; struct net_device_stats stats; /* Per device statistics */ - struct tq_struct post_buckets_task; + struct work_struct post_buckets_task; unsigned long post_buckets_active; }; @@ -876,10 +876,9 @@ mpt_lan_wake_post_buckets_task(struct net_device *dev, int priority) if (test_and_set_bit(0, &priv->post_buckets_active) == 0) { if (priority) { - queue_task(&priv->post_buckets_task, &tq_immediate); - mark_bh(IMMEDIATE_BH); + schedule_work(&priv->post_buckets_task); } else { - queue_task(&priv->post_buckets_task, &tq_timer); + schedule_delayed_work(&priv->post_buckets_task, 1); dioprintk((KERN_INFO MYNAM ": post_buckets queued on " "timer.\n")); } @@ -1365,9 +1364,8 @@ mpt_register_lan_device (MPT_ADAPTER *mpt_dev, int pnum) priv->mpt_dev = mpt_dev; priv->pnum = pnum; - memset(&priv->post_buckets_task, 0, sizeof(struct tq_struct)); - priv->post_buckets_task.routine = mpt_lan_post_receive_buckets; - priv->post_buckets_task.data = dev; + memset(&priv->post_buckets_task, 0, sizeof(struct work_struct)); + INIT_WORK(&priv->post_buckets_task, mpt_lan_post_receive_buckets, dev); priv->post_buckets_active = 0; dlprintk((KERN_INFO MYNAM "@%d: bucketlen = %d\n", diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c index bc0403f85849..63a8ae08a703 100644 --- a/drivers/message/fusion/mptscsih.c +++ b/drivers/message/fusion/mptscsih.c @@ -76,6 +76,7 @@ #include <linux/delay.h> /* for mdelay */ #include <linux/interrupt.h> /* needed for in_interrupt() proto */ #include <linux/reboot.h> /* notifier code */ +#include <linux/workqueue.h> #include "../../scsi/scsi.h" #include "../../scsi/hosts.h" #include "../../scsi/sd.h" @@ -244,7 +245,7 @@ static struct proc_dir_entry proc_mpt_scsihost = */ static spinlock_t mytaskQ_lock = SPIN_LOCK_UNLOCKED; static int mytaskQ_bh_active = 0; -static struct tq_struct mptscsih_ptaskfoo; +static struct work_struct mptscsih_ptaskfoo; static atomic_t mpt_taskQdepth; #endif @@ -255,7 +256,7 @@ static atomic_t mpt_taskQdepth; static spinlock_t dvtaskQ_lock = SPIN_LOCK_UNLOCKED; static int dvtaskQ_active = 0; static int dvtaskQ_release = 0; -static struct tq_struct mptscsih_dvTask; +static struct work_struct mptscsih_dvTask; #endif /* @@ -2019,10 +2020,7 @@ mptscsih_qcmd(Scsi_Cmnd *SCpnt, void (*done)(Scsi_Cmnd *)) if (!dvtaskQ_active) { dvtaskQ_active = 1; spin_unlock_irqrestore(&dvtaskQ_lock, lflags); - mptscsih_dvTask.sync = 0; - mptscsih_dvTask.routine = mptscsih_domainValidation; - mptscsih_dvTask.data = (void *) hd; - + INIT_WORK(&mptscsih_dvTask, mptscsih_domainValidation, (void *) hd); SCHEDULE_TASK(&mptscsih_dvTask); } else { spin_unlock_irqrestore(&dvtaskQ_lock, lflags); @@ -3048,7 +3046,7 @@ mptscsih_old_abort(Scsi_Cmnd *SCpnt) { MPT_SCSI_HOST *hd; MPT_FRAME_HDR *mf; - struct tq_struct *ptaskfoo; + struct work_struct *ptaskfoo; unsigned long flags; int scpnt_idx; @@ -3156,7 +3154,7 @@ mptscsih_old_abort(Scsi_Cmnd *SCpnt) * Oh how cute, no alloc/free/mgmt needed if we use * (bottom/unused portion of) MPT request frame. */ - ptaskfoo = (struct tq_struct *) &mptscsih_ptaskfoo; + ptaskfoo = (struct work_struct *) &mptscsih_ptaskfoo; ptaskfoo->sync = 0; ptaskfoo->routine = mptscsih_taskmgmt_bh; ptaskfoo->data = SCpnt; @@ -3184,7 +3182,7 @@ mptscsih_old_reset(Scsi_Cmnd *SCpnt, unsigned int reset_flags) { MPT_SCSI_HOST *hd; MPT_FRAME_HDR *mf; - struct tq_struct *ptaskfoo; + struct work_struct *ptaskfoo; unsigned long flags; int scpnt_idx; @@ -3286,7 +3284,7 @@ mptscsih_old_reset(Scsi_Cmnd *SCpnt, unsigned int reset_flags) * Oh how cute, no alloc/free/mgmt needed if we use * (bottom/unused portion of) MPT request frame. */ - ptaskfoo = (struct tq_struct *) &mptscsih_ptaskfoo; + ptaskfoo = (struct work_struct *) &mptscsih_ptaskfoo; ptaskfoo->sync = 0; ptaskfoo->routine = mptscsih_taskmgmt_bh; ptaskfoo->data = SCpnt; diff --git a/drivers/message/fusion/mptscsih.h b/drivers/message/fusion/mptscsih.h index 42f29799ce86..768f7e1ce70b 100644 --- a/drivers/message/fusion/mptscsih.h +++ b/drivers/message/fusion/mptscsih.h @@ -157,12 +157,11 @@ struct mptscsih_driver_setup #ifdef HAVE_TQ_SCHED #define SCHEDULE_TASK(x) \ /*MOD_INC_USE_COUNT*/; \ - (x)->next = NULL; \ - queue_task(x, &tq_scheduler) + schedule_work(x) #else #define SCHEDULE_TASK(x) \ /*MOD_INC_USE_COUNT*/; \ - if (schedule_task(x) == 0) { \ + if (schedule_work(x) == 0) { \ /*MOD_DEC_USE_COUNT*/; \ } #endif diff --git a/drivers/message/i2o/i2o_lan.c b/drivers/message/i2o/i2o_lan.c index 7b117be03ad1..5e19d5388d63 100644 --- a/drivers/message/i2o/i2o_lan.c +++ b/drivers/message/i2o/i2o_lan.c @@ -43,7 +43,7 @@ #include <linux/slab.h> #include <linux/init.h> #include <linux/spinlock.h> -#include <linux/tqueue.h> +#include <linux/workqueue.h> #include <asm/io.h> #include <linux/errno.h> @@ -116,11 +116,8 @@ static struct i2o_handler i2o_lan_handler = { }; static int lan_context; -DECLARE_TASK_QUEUE(i2o_post_buckets_task); -struct tq_struct run_i2o_post_buckets_task = { - routine: (void (*)(void *)) run_task_queue, - data: (void *) 0 -}; +struct DECLARE_WORK(run_i2o_post_buckets_task, + (void (*)(void *)) run_task_queue, NULL); /* Functions to handle message failures and transaction errors: ==============================================================*/ @@ -386,8 +383,7 @@ static void i2o_lan_receive_post_reply(struct i2o_handler *h, if (atomic_read(&priv->buckets_out) <= priv->max_buckets_out - priv->bucket_thresh) { run_i2o_post_buckets_task.data = (void *)dev; - queue_task(&run_i2o_post_buckets_task, &tq_immediate); - mark_bh(IMMEDIATE_BH); + schedule_work(&run_i2o_post_buckets_task); } return; @@ -908,7 +904,7 @@ static int i2o_lan_sdu_send(struct sk_buff *skb, struct net_device *dev) if ((priv->tx_batch_mode & 0x01) && !priv->send_active) { priv->send_active = 1; MOD_INC_USE_COUNT; - if (schedule_task(&priv->i2o_batch_send_task) == 0) + if (schedule_work(&priv->i2o_batch_send_task) == 0) MOD_DEC_USE_COUNT; } } else { /* Add new SGL element to the previous message frame */ @@ -996,7 +992,7 @@ static int i2o_lan_packet_send(struct sk_buff *skb, struct net_device *dev) if ((priv->tx_batch_mode & 0x01) && !priv->send_active) { priv->send_active = 1; MOD_INC_USE_COUNT; - if (schedule_task(&priv->i2o_batch_send_task) == 0) + if (schedule_work(&priv->i2o_batch_send_task) == 0) MOD_DEC_USE_COUNT; } } else { /* Add new SGL element to the previous message frame */ @@ -1409,8 +1405,8 @@ struct net_device *i2o_lan_register_device(struct i2o_device *i2o_dev) INIT_LIST_HEAD(&priv->i2o_batch_send_task.list); priv->i2o_batch_send_task.sync = 0; - priv->i2o_batch_send_task.routine = (void *)i2o_lan_batch_send; - priv->i2o_batch_send_task.data = (void *)dev; + INIT_WORK(&priv->i2o_batch_send_task, (void *)i2o_lan_batch_send, + (void *)dev); dev->open = i2o_lan_open; dev->stop = i2o_lan_close; diff --git a/drivers/message/i2o/i2o_lan.h b/drivers/message/i2o/i2o_lan.h index 075f6438d4e7..561d63304d7e 100644 --- a/drivers/message/i2o/i2o_lan.h +++ b/drivers/message/i2o/i2o_lan.h @@ -136,7 +136,7 @@ struct i2o_lan_local { u8 sgl_max; /* max SGLs in one message frame */ u32 m; /* IOP address of the batch msg frame */ - struct tq_struct i2o_batch_send_task; + struct work_struct i2o_batch_send_task; int send_active; struct sk_buff **i2o_fbl; /* Free bucket list (to reuse skbs) */ int i2o_fbl_tail; |
