summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorCorey Minyard <cminyard@mvista.com>2003-04-04 04:09:33 -0800
committerLinus Torvalds <torvalds@home.transmeta.com>2003-04-04 04:09:33 -0800
commitb1ffe92e755607843bffc422d509e5043f115bbb (patch)
tree551fa005cf081cdd74b7d2d1d496a23154988ca1 /drivers
parent856eeb53b42ae2b01e22e4cc359471c4831cda2e (diff)
[PATCH] IPMI driver version 19 release
This fixes some performance problems. Some vendors implement firmware updates over IPMI, and this speeds up that process quite a bit. * Improve the "send - wait for response - send -wait for response - etc" performance when using high-res timers. Before, an ~10ms delay would be added to each message, because it didn't restart the timer if nothing was happing when a new message was started. * Add some checking for leaked messages.
Diffstat (limited to 'drivers')
-rw-r--r--drivers/char/ipmi/ipmi_kcs_intf.c69
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c25
2 files changed, 80 insertions, 14 deletions
diff --git a/drivers/char/ipmi/ipmi_kcs_intf.c b/drivers/char/ipmi/ipmi_kcs_intf.c
index ab162ed8aff0..5042798ec70b 100644
--- a/drivers/char/ipmi/ipmi_kcs_intf.c
+++ b/drivers/char/ipmi/ipmi_kcs_intf.c
@@ -61,6 +61,14 @@
/* Measure times between events in the driver. */
#undef DEBUG_TIMING
+/* Timing parameters. Call every 10 ms when not doing anything,
+ otherwise call every KCS_SHORT_TIMEOUT_USEC microseconds. */
+#define KCS_TIMEOUT_TIME_USEC 10000
+#define KCS_USEC_PER_JIFFY (1000000/HZ)
+#define KCS_TIMEOUT_JIFFIES (KCS_TIMEOUT_TIME_USEC/KCS_USEC_PER_JIFFY)
+#define KCS_SHORT_TIMEOUT_USEC 250 /* .25ms when the SM request a
+ short timeout */
+
#ifdef CONFIG_IPMI_KCS
/* This forces a dependency to the config file for this option. */
#endif
@@ -132,6 +140,8 @@ struct kcs_info
int interrupt_disabled;
};
+static void kcs_restart_short_timer(struct kcs_info *kcs_info);
+
static void deliver_recv_msg(struct kcs_info *kcs_info, struct ipmi_smi_msg *msg)
{
/* Deliver the message to the upper layer with the lock
@@ -309,6 +319,9 @@ static void handle_transaction_done(struct kcs_info *kcs_info)
#endif
switch (kcs_info->kcs_state) {
case KCS_NORMAL:
+ if (!kcs_info->curr_msg)
+ break;
+
kcs_info->curr_msg->rsp_size
= kcs_get_result(kcs_info->kcs_sm,
kcs_info->curr_msg->rsp,
@@ -563,8 +576,9 @@ static void sender(void *send_info,
spin_lock_irqsave(&(kcs_info->kcs_lock), flags);
result = kcs_event_handler(kcs_info, 0);
while (result != KCS_SM_IDLE) {
- udelay(500);
- result = kcs_event_handler(kcs_info, 500);
+ udelay(KCS_SHORT_TIMEOUT_USEC);
+ result = kcs_event_handler(kcs_info,
+ KCS_SHORT_TIMEOUT_USEC);
}
spin_unlock_irqrestore(&(kcs_info->kcs_lock), flags);
return;
@@ -582,6 +596,7 @@ static void sender(void *send_info,
&& (kcs_info->curr_msg == NULL))
{
start_next_msg(kcs_info);
+ kcs_restart_short_timer(kcs_info);
}
spin_unlock_irqrestore(&(kcs_info->kcs_lock), flags);
}
@@ -598,8 +613,9 @@ static void set_run_to_completion(void *send_info, int i_run_to_completion)
if (i_run_to_completion) {
result = kcs_event_handler(kcs_info, 0);
while (result != KCS_SM_IDLE) {
- udelay(500);
- result = kcs_event_handler(kcs_info, 500);
+ udelay(KCS_SHORT_TIMEOUT_USEC);
+ result = kcs_event_handler(kcs_info,
+ KCS_SHORT_TIMEOUT_USEC);
}
}
@@ -613,14 +629,42 @@ static void request_events(void *send_info)
atomic_set(&kcs_info->req_events, 1);
}
-/* Call every 10 ms. */
-#define KCS_TIMEOUT_TIME_USEC 10000
-#define KCS_USEC_PER_JIFFY (1000000/HZ)
-#define KCS_TIMEOUT_JIFFIES (KCS_TIMEOUT_TIME_USEC/KCS_USEC_PER_JIFFY)
-#define KCS_SHORT_TIMEOUT_USEC 500 /* .5ms when the SM request a
- short timeout */
static int initialized = 0;
+/* Must be called with interrupts off and with the kcs_lock held. */
+static void kcs_restart_short_timer(struct kcs_info *kcs_info)
+{
+ if (del_timer(&(kcs_info->kcs_timer))) {
+#ifdef CONFIG_HIGH_RES_TIMERS
+ unsigned long jiffies_now;
+
+ /* If we don't delete the timer, then it will go off
+ immediately, anyway. So we only process if we
+ actually delete the timer. */
+
+ /* We already have irqsave on, so no need for it
+ here. */
+ read_lock(&xtime_lock);
+ jiffies_now = jiffies;
+ kcs_info->kcs_timer.expires = jiffies_now;
+
+ kcs_info->kcs_timer.sub_expires
+ = quick_update_jiffies_sub(jiffies_now);
+ read_unlock(&xtime_lock);
+
+ kcs_info->kcs_timer.sub_expires
+ += usec_to_arch_cycles(KCS_SHORT_TIMEOUT_USEC);
+ while (kcs_info->kcs_timer.sub_expires >= cycles_per_jiffies) {
+ kcs_info->kcs_timer.expires++;
+ kcs_info->kcs_timer.sub_expires -= cycles_per_jiffies;
+ }
+#else
+ kcs_info->kcs_timer.expires = jiffies + 1;
+#endif
+ add_timer(&(kcs_info->kcs_timer));
+ }
+}
+
static void kcs_timeout(unsigned long data)
{
struct kcs_info *kcs_info = (struct kcs_info *) data;
@@ -643,12 +687,11 @@ static void kcs_timeout(unsigned long data)
printk("**Timer: %d.%9.9d\n", t.tv_sec, t.tv_usec);
#endif
jiffies_now = jiffies;
+
time_diff = ((jiffies_now - kcs_info->last_timeout_jiffies)
* KCS_USEC_PER_JIFFY);
kcs_result = kcs_event_handler(kcs_info, time_diff);
- spin_unlock_irqrestore(&(kcs_info->kcs_lock), flags);
-
kcs_info->last_timeout_jiffies = jiffies_now;
if ((kcs_info->irq) && (! kcs_info->interrupt_disabled)) {
@@ -669,6 +712,7 @@ static void kcs_timeout(unsigned long data)
}
} else {
kcs_info->kcs_timer.expires = jiffies + KCS_TIMEOUT_JIFFIES;
+ kcs_info->kcs_timer.sub_expires = 0;
}
#else
/* If requested, take the shortest delay possible */
@@ -681,6 +725,7 @@ static void kcs_timeout(unsigned long data)
do_add_timer:
add_timer(&(kcs_info->kcs_timer));
+ spin_unlock_irqrestore(&(kcs_info->kcs_lock), flags);
}
static void kcs_irq_handler(int irq, void *data, struct pt_regs *regs)
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index 09692da0f027..b8c3834bc141 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -1765,9 +1765,13 @@ static void ipmi_timeout(unsigned long data)
}
+static atomic_t smi_msg_inuse_count = ATOMIC_INIT(0);
+static atomic_t recv_msg_inuse_count = ATOMIC_INIT(0);
+
/* FIXME - convert these to slabs. */
static void free_smi_msg(struct ipmi_smi_msg *msg)
{
+ atomic_dec(&smi_msg_inuse_count);
kfree(msg);
}
@@ -1775,13 +1779,16 @@ struct ipmi_smi_msg *ipmi_alloc_smi_msg(void)
{
struct ipmi_smi_msg *rv;
rv = kmalloc(sizeof(struct ipmi_smi_msg), GFP_ATOMIC);
- if (rv)
+ if (rv) {
rv->done = free_smi_msg;
+ atomic_inc(&smi_msg_inuse_count);
+ }
return rv;
}
static void free_recv_msg(struct ipmi_recv_msg *msg)
{
+ atomic_dec(&recv_msg_inuse_count);
kfree(msg);
}
@@ -1790,8 +1797,10 @@ struct ipmi_recv_msg *ipmi_alloc_recv_msg(void)
struct ipmi_recv_msg *rv;
rv = kmalloc(sizeof(struct ipmi_recv_msg), GFP_ATOMIC);
- if (rv)
+ if (rv) {
rv->done = free_recv_msg;
+ atomic_inc(&recv_msg_inuse_count);
+ }
return rv;
}
@@ -1924,6 +1933,8 @@ static __init int ipmi_init_msghandler(void)
static __exit void cleanup_ipmi(void)
{
+ int count;
+
if (!initialized)
return;
@@ -1940,6 +1951,16 @@ static __exit void cleanup_ipmi(void)
}
initialized = 0;
+
+ /* Check for buffer leaks. */
+ count = atomic_read(&smi_msg_inuse_count);
+ if (count != 0)
+ printk("ipmi_msghandler: SMI message count %d at exit\n",
+ count);
+ count = atomic_read(&recv_msg_inuse_count);
+ if (count != 0)
+ printk("ipmi_msghandler: recv message count %d at exit\n",
+ count);
}
module_exit(cleanup_ipmi);