summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDavid S. Miller <davem@nuts.ninka.net>2002-10-11 04:22:34 -0700
committerDavid S. Miller <davem@nuts.ninka.net>2002-10-11 04:22:34 -0700
commit2da2a587ff702fc975ad0607fdbb606af7c8e56a (patch)
tree447e836adecfd49d248bbc07c1450f0afbdb7745
parent5d344d1ac9069dde4b815364c43f7184ea810789 (diff)
parentac27bdc817c512a48237d7b6d9764d85ebedcc1d (diff)
Merge master.kernel.org:/home/bcrl/net-2.5
into nuts.ninka.net:/home/davem/src/BK/aio-2.5
-rw-r--r--Documentation/networking/ewrk3.txt1
-rw-r--r--drivers/char/keyboard.c31
-rw-r--r--drivers/char/vt_ioctl.c6
-rw-r--r--drivers/input/keyboard/atkbd.c5
-rw-r--r--drivers/input/misc/uinput.c132
-rw-r--r--drivers/input/serio/i8042.c48
-rw-r--r--drivers/net/3c509.c182
-rw-r--r--drivers/net/bonding.c82
-rw-r--r--drivers/net/e1000/e1000_main.c22
-rw-r--r--drivers/net/ewrk3.c86
-rw-r--r--drivers/net/irda/Config.help33
-rw-r--r--drivers/net/irda/Config.in1
-rw-r--r--drivers/net/irda/Makefile3
-rw-r--r--drivers/net/irda/donauboe.c1850
-rw-r--r--drivers/net/irda/donauboe.h363
-rw-r--r--drivers/net/irda/vlsi_ir.c2109
-rw-r--r--drivers/net/mii.c2
-rw-r--r--drivers/net/pcmcia/smc91c92_cs.c253
-rw-r--r--drivers/net/tokenring/tmspci.c2
-rw-r--r--drivers/sbus/char/uctrl.c2
-rw-r--r--drivers/scsi/Config.in5
-rw-r--r--drivers/scsi/NCR53C9x.c149
-rw-r--r--drivers/scsi/NCR53C9x.h11
-rw-r--r--drivers/scsi/aic7xxx_old.c1754
-rw-r--r--drivers/scsi/aic7xxx_old/aic7xxx.h1
-rw-r--r--drivers/scsi/aic7xxx_old/aic7xxx_proc.c107
-rw-r--r--drivers/scsi/esp.c167
-rw-r--r--drivers/scsi/esp.h17
-rw-r--r--drivers/scsi/ips.c33
-rw-r--r--drivers/scsi/ips.h4
-rw-r--r--drivers/scsi/scsi.c33
-rw-r--r--drivers/scsi/scsi.h17
-rw-r--r--drivers/scsi/scsi_ioctl.c8
-rw-r--r--drivers/scsi/scsi_scan.c22
-rw-r--r--drivers/usb/input/hid-input.c2
-rw-r--r--fs/nls/Config.in2
-rw-r--r--include/linux/kd.h3
-rw-r--r--include/linux/sysctl.h3
-rw-r--r--include/linux/tcp.h3
-rw-r--r--include/linux/uinput.h4
-rw-r--r--include/net/flow.h70
-rw-r--r--include/net/ip6_fib.h8
-rw-r--r--include/net/ip6_fw.h54
-rw-r--r--include/net/ip_fib.h23
-rw-r--r--include/net/irda/crc.h2
-rw-r--r--include/net/irda/ircomm_tty.h5
-rw-r--r--include/net/irda/vlsi_ir.h324
-rw-r--r--include/net/route.h41
-rw-r--r--include/net/tcp.h14
-rw-r--r--kernel/sched.c22
-rw-r--r--net/atm/clip.c3
-rw-r--r--net/core/netfilter.c18
-rw-r--r--net/ipv4/af_inet.c1
-rw-r--r--net/ipv4/arp.c12
-rw-r--r--net/ipv4/fib_frontend.c31
-rw-r--r--net/ipv4/fib_hash.c12
-rw-r--r--net/ipv4/fib_rules.c20
-rw-r--r--net/ipv4/fib_semantics.c29
-rw-r--r--net/ipv4/icmp.c32
-rw-r--r--net/ipv4/igmp.c14
-rw-r--r--net/ipv4/ip_gre.c42
-rw-r--r--net/ipv4/ip_nat_dumb.c20
-rw-r--r--net/ipv4/ip_output.c32
-rw-r--r--net/ipv4/ipip.c31
-rw-r--r--net/ipv4/ipmr.c15
-rw-r--r--net/ipv4/netfilter/ip_fw_compat_masq.c3
-rw-r--r--net/ipv4/netfilter/ip_nat_core.c3
-rw-r--r--net/ipv4/netfilter/ipt_MASQUERADE.c23
-rw-r--r--net/ipv4/netfilter/ipt_MIRROR.c7
-rw-r--r--net/ipv4/netfilter/ipt_REJECT.c30
-rw-r--r--net/ipv4/raw.c10
-rw-r--r--net/ipv4/route.c326
-rw-r--r--net/ipv4/syncookies.c19
-rw-r--r--net/ipv4/sysctl_net_ipv4.c2
-rw-r--r--net/ipv4/tcp_input.c126
-rw-r--r--net/ipv4/tcp_ipv4.c25
-rw-r--r--net/ipv4/tcp_minisocks.c3
-rw-r--r--net/ipv4/tcp_timer.c6
-rw-r--r--net/ipv4/udp.c7
-rw-r--r--net/ipv6/Config.in3
-rw-r--r--net/ipv6/Makefile1
-rw-r--r--net/ipv6/ip6_fib.c1
-rw-r--r--net/ipv6/ip6_fw.c390
-rw-r--r--net/ipv6/netfilter/ip6table_mangle.c2
-rw-r--r--net/ipv6/route.c301
-rw-r--r--net/ipv6/sit.c20
-rw-r--r--net/irda/crc.c2
-rw-r--r--net/irda/ircomm/ircomm_param.c11
-rw-r--r--net/irda/ircomm/ircomm_tty.c53
-rw-r--r--net/irda/ircomm/ircomm_tty_attach.c19
-rw-r--r--net/irda/irlmp_event.c32
-rw-r--r--net/irda/irsyms.c2
-rw-r--r--net/irda/irsysctl.c8
-rw-r--r--net/irda/parameters.c20
-rw-r--r--net/irda/qos.c38
-rw-r--r--net/sched/cls_route.c2
-rw-r--r--net/sctp/protocol.c14
97 files changed, 6333 insertions, 3609 deletions
diff --git a/Documentation/networking/ewrk3.txt b/Documentation/networking/ewrk3.txt
index 0427c6c8fc25..90e9e5f16e6b 100644
--- a/Documentation/networking/ewrk3.txt
+++ b/Documentation/networking/ewrk3.txt
@@ -24,6 +24,7 @@ sequences). To utilise this ability, you have to do 8 things:
kernel with the ewrk3 configuration turned off and reboot.
5) insmod ewrk3.o
[Alan Cox: Changed this so you can insmod ewrk3.o irq=x io=y]
+ [Adam Kropelin: Multiple cards now supported by irq=x1,x2 io=y1,y2]
6) run the net startup bits for your new eth?? interface manually
(usually /etc/rc.inet[12] at boot time).
7) enjoy!
diff --git a/drivers/char/keyboard.c b/drivers/char/keyboard.c
index 4a9878fa4a4e..7ad9b8d0253b 100644
--- a/drivers/char/keyboard.c
+++ b/drivers/char/keyboard.c
@@ -264,23 +264,34 @@ void kd_mksound(unsigned int hz, unsigned int ticks)
/*
* Setting the keyboard rate.
*/
+static inline unsigned int ms_to_jiffies(unsigned int ms) {
+ unsigned int j;
+
+ j = (ms * HZ + 500) / 1000;
+ return (j > 0) ? j : 1;
+}
+
int kbd_rate(struct kbd_repeat *rep)
{
- struct list_head * node;
-
- if (rep->rate < 0 || rep->delay < 0)
- return -EINVAL;
+ struct list_head *node;
+ unsigned int d = 0;
+ unsigned int p = 0;
list_for_each(node,&kbd_handler.h_list) {
struct input_handle *handle = to_handle_h(node);
- if (test_bit(EV_REP, handle->dev->evbit)) {
- if (rep->rate > HZ) rep->rate = HZ;
- handle->dev->rep[REP_PERIOD] = rep->rate ? (HZ / rep->rate) : 0;
- handle->dev->rep[REP_DELAY] = rep->delay * HZ / 1000;
- if (handle->dev->rep[REP_DELAY] < handle->dev->rep[REP_PERIOD])
- handle->dev->rep[REP_DELAY] = handle->dev->rep[REP_PERIOD];
+ struct input_dev *dev = handle->dev;
+
+ if (test_bit(EV_REP, dev->evbit)) {
+ if (rep->delay > 0)
+ dev->rep[REP_DELAY] = ms_to_jiffies(rep->delay);
+ if (rep->period > 0)
+ dev->rep[REP_PERIOD] = ms_to_jiffies(rep->period);
+ d = dev->rep[REP_DELAY] * 1000 / HZ;
+ p = dev->rep[REP_PERIOD] * 1000 / HZ;
}
}
+ rep->delay = d;
+ rep->period = p;
return 0;
}
diff --git a/drivers/char/vt_ioctl.c b/drivers/char/vt_ioctl.c
index 5470d57aa33f..fcdb5208acea 100644
--- a/drivers/char/vt_ioctl.c
+++ b/drivers/char/vt_ioctl.c
@@ -430,6 +430,7 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
case KDKBDREP:
{
struct kbd_repeat kbrep;
+ int err;
if (!capable(CAP_SYS_TTY_CONFIG))
return -EPERM;
@@ -437,8 +438,9 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
if (copy_from_user(&kbrep, (void *)arg,
sizeof(struct kbd_repeat)))
return -EFAULT;
- if ((i = kbd_rate( &kbrep )))
- return i;
+ err = kbd_rate(&kbrep);
+ if (err)
+ return err;
if (copy_to_user((void *)arg, &kbrep,
sizeof(struct kbd_repeat)))
return -EFAULT;
diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c
index 8fa0d7957e49..f8e2b5876387 100644
--- a/drivers/input/keyboard/atkbd.c
+++ b/drivers/input/keyboard/atkbd.c
@@ -244,8 +244,9 @@ static int atkbd_command(struct atkbd *atkbd, unsigned char *param, int command)
while (atkbd->cmdcnt && timeout--) udelay(10);
- for (i = 0; i < receive; i++)
- param[i] = atkbd->cmdbuf[(receive - 1) - i];
+ if (param)
+ for (i = 0; i < receive; i++)
+ param[i] = atkbd->cmdbuf[(receive - 1) - i];
if (atkbd->cmdcnt)
return (atkbd->cmdcnt = 0) - 1;
diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c
index 80ae77dfeb00..4593d82bd282 100644
--- a/drivers/input/misc/uinput.c
+++ b/drivers/input/misc/uinput.c
@@ -49,7 +49,7 @@ static int uinput_dev_event(struct input_dev *dev, unsigned int type, unsigned i
udev = (struct uinput_device *)dev->private;
- udev->head = (udev->head + 1) & 0xF;
+ udev->head = (udev->head + 1) % UINPUT_BUFFER_SIZE;
udev->buff[udev->head].type = type;
udev->buff[udev->head].code = code;
udev->buff[udev->head].value = value;
@@ -87,14 +87,14 @@ static int uinput_create_device(struct uinput_device *udev)
input_register_device(udev->dev);
- udev->state |= UIST_CREATED;
+ set_bit(UIST_CREATED, &(udev->state));
return 0;
}
static int uinput_destroy_device(struct uinput_device *udev)
{
- if (!(udev->state & UIST_CREATED)) {
+ if (!test_bit(UIST_CREATED, &(udev->state))) {
printk(KERN_WARNING "%s: create the device first\n", UINPUT_NAME);
return -EINVAL;
}
@@ -135,6 +135,39 @@ error:
return -ENOMEM;
}
+static int uinput_validate_absbits(struct input_dev *dev)
+{
+ unsigned int cnt;
+ int retval = 0;
+
+ for (cnt = 0; cnt < ABS_MAX; cnt++) {
+ if (!test_bit(cnt, dev->absbit))
+ continue;
+
+ if (/*!dev->absmin[cnt] || !dev->absmax[cnt] || */
+ (dev->absmax[cnt] <= dev->absmin[cnt])) {
+ printk(KERN_DEBUG
+ "%s: invalid abs[%02x] min:%d max:%d\n",
+ UINPUT_NAME, cnt,
+ dev->absmin[cnt], dev->absmax[cnt]);
+ retval = -EINVAL;
+ break;
+ }
+
+ if ((dev->absflat[cnt] < dev->absmin[cnt]) ||
+ (dev->absflat[cnt] > dev->absmax[cnt])) {
+ printk(KERN_DEBUG
+ "%s: absflat[%02x] out of range: %d "
+ "(min:%d/max:%d)\n",
+ UINPUT_NAME, cnt, dev->absflat[cnt],
+ dev->absmin[cnt], dev->absmax[cnt]);
+ retval = -EINVAL;
+ break;
+ }
+ }
+ return retval;
+}
+
static int uinput_alloc_device(struct file *file, const char *buffer, size_t count)
{
struct uinput_user_dev user_dev;
@@ -145,14 +178,17 @@ static int uinput_alloc_device(struct file *file, const char *buffer, size_t cou
retval = count;
+ udev = (struct uinput_device *)file->private_data;
+ dev = udev->dev;
+
if (copy_from_user(&user_dev, buffer, sizeof(struct uinput_user_dev))) {
retval = -EFAULT;
goto exit;
}
- udev = (struct uinput_device *)file->private_data;
- dev = udev->dev;
-
+ if (NULL != dev->name)
+ kfree(dev->name);
+
size = strnlen(user_dev.name, UINPUT_MAX_NAME_SIZE);
dev->name = kmalloc(size + 1, GFP_KERNEL);
if (!dev->name) {
@@ -168,7 +204,7 @@ static int uinput_alloc_device(struct file *file, const char *buffer, size_t cou
dev->id.version = user_dev.id.version;
dev->ff_effects_max = user_dev.ff_effects_max;
- size = sizeof(unsigned long) * NBITS(ABS_MAX + 1);
+ size = sizeof(int) * (ABS_MAX + 1);
memcpy(dev->absmax, user_dev.absmax, size);
memcpy(dev->absmin, user_dev.absmin, size);
memcpy(dev->absfuzz, user_dev.absfuzz, size);
@@ -177,33 +213,20 @@ static int uinput_alloc_device(struct file *file, const char *buffer, size_t cou
/* check if absmin/absmax/absfuzz/absflat are filled as
* told in Documentation/input/input-programming.txt */
if (test_bit(EV_ABS, dev->evbit)) {
- unsigned int cnt;
- for (cnt = 1; cnt < ABS_MAX; cnt++)
- if (test_bit(cnt, dev->absbit) &&
- (!dev->absmin[cnt] ||
- !dev->absmax[cnt] ||
- !dev->absfuzz[cnt] ||
- !dev->absflat[cnt])) {
- printk(KERN_DEBUG "%s: set abs fields "
- "first\n", UINPUT_NAME);
- retval = -EINVAL;
- goto free_name;
- }
+ retval = uinput_validate_absbits(dev);
+ if (retval < 0)
+ kfree(dev->name);
}
exit:
return retval;
-free_name:
- kfree(dev->name);
- goto exit;
}
static int uinput_write(struct file *file, const char *buffer, size_t count, loff_t *ppos)
{
struct uinput_device *udev = file->private_data;
-
- if (udev->state & UIST_CREATED) {
+ if (test_bit(UIST_CREATED, &(udev->state))) {
struct input_event ev;
if (copy_from_user(&ev, buffer, sizeof(struct input_event)))
@@ -220,23 +243,28 @@ static ssize_t uinput_read(struct file *file, char *buffer, size_t count, loff_t
{
struct uinput_device *udev = file->private_data;
int retval = 0;
+
+ if (!test_bit(UIST_CREATED, &(udev->state)))
+ return -ENODEV;
- if (udev->head == udev->tail && (udev->state & UIST_CREATED) && (file->f_flags & O_NONBLOCK))
+ if ((udev->head == udev->tail) && (file->f_flags & O_NONBLOCK))
return -EAGAIN;
retval = wait_event_interruptible(udev->waitq,
- udev->head != udev->tail && (udev->state & UIST_CREATED));
-
+ (udev->head != udev->tail) ||
+ !test_bit(UIST_CREATED, &(udev->state)));
+
if (retval)
return retval;
- if (!(udev->state & UIST_CREATED))
+ if (!test_bit(UIST_CREATED, &(udev->state)))
return -ENODEV;
- while (udev->head != udev->tail && retval + sizeof(struct uinput_device) <= count) {
+ while ((udev->head != udev->tail) &&
+ (retval + sizeof(struct uinput_device) <= count)) {
if (copy_to_user(buffer + retval, &(udev->buff[udev->tail]),
sizeof(struct input_event))) return -EFAULT;
- udev->tail = (udev->tail + 1) % (UINPUT_BUFFER_SIZE - 1);
+ udev->tail = (udev->tail + 1) % UINPUT_BUFFER_SIZE;
retval += sizeof(struct input_event);
}
@@ -245,7 +273,7 @@ static ssize_t uinput_read(struct file *file, char *buffer, size_t count, loff_t
static unsigned int uinput_poll(struct file *file, poll_table *wait)
{
- struct uinput_device *udev = file->private_data;
+ struct uinput_device *udev = file->private_data;
poll_wait(file, &udev->waitq, wait);
@@ -257,7 +285,7 @@ static unsigned int uinput_poll(struct file *file, poll_table *wait)
static int uinput_burn_device(struct uinput_device *udev)
{
- if (udev->state & UIST_CREATED)
+ if (test_bit(UIST_CREATED, &(udev->state)))
uinput_destroy_device(udev);
kfree(udev->dev);
@@ -282,50 +310,52 @@ static int uinput_ioctl(struct inode *inode, struct file *file, unsigned int cmd
udev = (struct uinput_device *)file->private_data;
- if (cmd >= UI_SET_EVBIT && (udev->state & UIST_CREATED))
+ /* device attributes can not be changed after the device is created */
+ if (cmd >= UI_SET_EVBIT && test_bit(UIST_CREATED, &(udev->state)))
return -EINVAL;
switch (cmd) {
case UI_DEV_CREATE:
retval = uinput_create_device(udev);
-
break;
+
case UI_DEV_DESTROY:
retval = uinput_destroy_device(udev);
-
break;
+
+
case UI_SET_EVBIT:
set_bit(arg, udev->dev->evbit);
-
- break;
+ break;
+
case UI_SET_KEYBIT:
set_bit(arg, udev->dev->keybit);
-
- break;
+ break;
+
case UI_SET_RELBIT:
set_bit(arg, udev->dev->relbit);
-
- break;
+ break;
+
case UI_SET_ABSBIT:
set_bit(arg, udev->dev->absbit);
-
- break;
+ break;
+
case UI_SET_MSCBIT:
set_bit(arg, udev->dev->mscbit);
-
- break;
+ break;
+
case UI_SET_LEDBIT:
set_bit(arg, udev->dev->ledbit);
-
- break;
+ break;
+
case UI_SET_SNDBIT:
set_bit(arg, udev->dev->sndbit);
-
- break;
+ break;
+
case UI_SET_FFBIT:
set_bit(arg, udev->dev->ffbit);
-
- break;
+ break;
+
default:
retval = -EFAULT;
}
diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
index e6952acd9e1d..7dc627496db1 100644
--- a/drivers/input/serio/i8042.c
+++ b/drivers/input/serio/i8042.c
@@ -26,14 +26,16 @@ MODULE_DESCRIPTION("i8042 keyboard and mouse controller driver");
MODULE_LICENSE("GPL");
MODULE_PARM(i8042_noaux, "1i");
+MODULE_PARM(i8042_nomux, "1i");
MODULE_PARM(i8042_unlock, "1i");
MODULE_PARM(i8042_reset, "1i");
MODULE_PARM(i8042_direct, "1i");
MODULE_PARM(i8042_dumbkbd, "1i");
+static int i8042_reset;
static int i8042_noaux;
+static int i8042_nomux;
static int i8042_unlock;
-static int i8042_reset;
static int i8042_direct;
static int i8042_dumbkbd;
@@ -220,7 +222,6 @@ static int i8042_aux_write(struct serio *port, unsigned char c)
return retval;
}
-
/*
* i8042_open() is called when a port is open by the higher layer.
* It allocates the interrupt and enables in in the chip.
@@ -323,8 +324,8 @@ static struct serio i8042_aux_port =
static struct i8042_values i8042_mux_values[4];
static struct serio i8042_mux_port[4];
-static char i8042_mux_names[4][16];
-static char i8042_mux_short[4][8];
+static char i8042_mux_names[4][32];
+static char i8042_mux_short[4][16];
static char i8042_mux_phys[4][32];
/*
@@ -364,15 +365,15 @@ static void i8042_interrupt(int irq, void *dev_id, struct pt_regs *regs)
dfl = ((str & I8042_STR_PARITY) ? SERIO_PARITY : 0) |
((str & I8042_STR_TIMEOUT) ? SERIO_TIMEOUT : 0);
- if (i8042_mux_values[0].exists && (buffer[i].str & I8042_STR_AUXDATA)) {
+ if (i8042_mux_values[0].exists && (str & I8042_STR_AUXDATA)) {
- if (buffer[i].str & I8042_STR_MUXERR) {
- switch (buffer[i].data) {
+ if (str & I8042_STR_MUXERR) {
+ switch (data) {
case 0xfd:
case 0xfe: dfl = SERIO_TIMEOUT; break;
case 0xff: dfl = SERIO_PARITY; break;
}
- buffer[i].data = 0xfe;
+ data = 0xfe;
} else dfl = 0;
dbg("%02x <- i8042 (interrupt, aux%d, %d%s%s)",
@@ -380,8 +381,7 @@ static void i8042_interrupt(int irq, void *dev_id, struct pt_regs *regs)
dfl & SERIO_PARITY ? ", bad parity" : "",
dfl & SERIO_TIMEOUT ? ", timeout" : "");
- if (i8042_mux_values[(str >> 6)].exists)
- serio_interrupt(i8042_mux_port + (str >> 6), buffer[i].data, dfl);
+ serio_interrupt(i8042_mux_port + ((str >> 6) & 3), data, dfl);
continue;
}
@@ -390,8 +390,8 @@ static void i8042_interrupt(int irq, void *dev_id, struct pt_regs *regs)
dfl & SERIO_PARITY ? ", bad parity" : "",
dfl & SERIO_TIMEOUT ? ", timeout" : "");
- if (i8042_aux_values.exists && (buffer[i].str & I8042_STR_AUXDATA)) {
- serio_interrupt(&i8042_aux_port, buffer[i].data, dfl);
+ if (i8042_aux_values.exists && (str & I8042_STR_AUXDATA)) {
+ serio_interrupt(&i8042_aux_port, data, dfl);
continue;
}
@@ -602,8 +602,14 @@ static int __init i8042_check_mux(struct i8042_values *values)
if (i8042_command(&param, I8042_CMD_AUX_LOOP) || param == 0x5b)
return -1;
- printk(KERN_INFO "i8042.c: Detected active multiplexing controller, rev%d.%d.\n",
- ~param >> 4, ~param & 0xf);
+ printk(KERN_INFO "i8042.c: Detected active multiplexing controller, rev %d.%d.\n",
+ (~param >> 4) & 0xf, ~param & 0xf);
+
+/*
+ * In MUX mode the keyboard translation seems to be always off.
+ */
+
+ i8042_direct = 1;
/*
* Disable all muxed ports by disabling AUX.
@@ -742,6 +748,12 @@ static int __init i8042_setup_reset(char *str)
static int __init i8042_setup_noaux(char *str)
{
i8042_noaux = 1;
+ i8042_nomux = 1;
+ return 1;
+}
+static int __init i8042_setup_nomux(char *str)
+{
+ i8042_nomux = 1;
return 1;
}
static int __init i8042_setup_unlock(char *str)
@@ -762,6 +774,7 @@ static int __init i8042_setup_dumbkbd(char *str)
__setup("i8042_reset", i8042_setup_reset);
__setup("i8042_noaux", i8042_setup_noaux);
+__setup("i8042_nomux", i8042_setup_nomux);
__setup("i8042_unlock", i8042_setup_unlock);
__setup("i8042_direct", i8042_setup_direct);
__setup("i8042_dumbkbd", i8042_setup_dumbkbd);
@@ -796,6 +809,7 @@ static void __init i8042_init_mux_values(struct i8042_values *values, struct ser
sprintf(i8042_mux_short[index], "AUX%d", index);
port->name = i8042_mux_names[index];
port->phys = i8042_mux_phys[index];
+ port->driver = values;
values->name = i8042_mux_short[index];
values->mux = index;
}
@@ -809,8 +823,8 @@ int __init i8042_init(void)
if (i8042_platform_init())
return -EBUSY;
- i8042_aux_values.irq = I8042_AUX_IRQ;
- i8042_kbd_values.irq = I8042_KBD_IRQ;
+ i8042_aux_values.irq = I8042_AUX_IRQ;
+ i8042_kbd_values.irq = I8042_KBD_IRQ;
if (i8042_controller_init())
return -ENODEV;
@@ -821,7 +835,7 @@ int __init i8042_init(void)
for (i = 0; i < 4; i++)
i8042_init_mux_values(i8042_mux_values + i, i8042_mux_port + i, i);
- if (!i8042_noaux && !i8042_check_mux(&i8042_aux_values))
+ if (!i8042_nomux && !i8042_check_mux(&i8042_aux_values))
for (i = 0; i < 4; i++)
i8042_port_register(i8042_mux_values + i, i8042_mux_port + i);
else
diff --git a/drivers/net/3c509.c b/drivers/net/3c509.c
index 277913d5810a..2590f42719c8 100644
--- a/drivers/net/3c509.c
+++ b/drivers/net/3c509.c
@@ -49,11 +49,13 @@
- Power Management support
v1.18c 1Mar2002 David Ruggiero <jdr@farfalle.com>
- Full duplex support
+ v1.19 16Oct2002 Zwane Mwaikambo <zwane@linuxpower.ca>
+ - Additional ethtool features
*/
#define DRV_NAME "3c509"
-#define DRV_VERSION "1.18c"
-#define DRV_RELDATE "1Mar2002"
+#define DRV_VERSION "1.19"
+#define DRV_RELDATE "16Oct2002"
/* A few values that may be tweaked. */
@@ -140,9 +142,11 @@ enum RxFilter {
#define TX_STATUS 0x0B
#define TX_FREE 0x0C /* Remaining free bytes in Tx buffer. */
+#define WN0_CONF_CTRL 0x04 /* Window 0: Configuration control register */
+#define WN0_ADDR_CONF 0x06 /* Window 0: Address configuration register */
#define WN0_IRQ 0x08 /* Window 0: Set IRQ line in bits 12-15. */
#define WN4_MEDIA 0x0A /* Window 4: Various transcvr/media bits. */
-#define MEDIA_TP 0x00C0 /* Enable link beat and jabber for 10baseT. */
+#define MEDIA_TP 0x00C0 /* Enable link beat and jabber for 10baseT. */
#define WN4_NETDIAG 0x06 /* Window 4: Net diagnostic */
#define FD_ENABLE 0x8000 /* Enable full-duplex ("external loopback") */
@@ -981,6 +985,119 @@ el3_close(struct net_device *dev)
return 0;
}
+static int
+el3_link_ok(struct net_device *dev)
+{
+ int ioaddr = dev->base_addr;
+ u16 tmp;
+
+ EL3WINDOW(4);
+ tmp = inw(ioaddr + WN4_MEDIA);
+ EL3WINDOW(1);
+ return tmp & (1<<11);
+}
+
+static int
+el3_netdev_get_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd)
+{
+ u16 tmp;
+ int ioaddr = dev->base_addr;
+
+ EL3WINDOW(0);
+ /* obtain current tranceiver via WN4_MEDIA? */
+ tmp = inw(ioaddr + WN0_ADDR_CONF);
+ ecmd->transceiver = XCVR_INTERNAL;
+ switch (tmp >> 14) {
+ case 0:
+ ecmd->port = PORT_TP;
+ break;
+ case 1:
+ ecmd->port = PORT_AUI;
+ ecmd->transceiver = XCVR_EXTERNAL;
+ break;
+ case 3:
+ ecmd->port = PORT_BNC;
+ default:
+ break;
+ }
+
+ ecmd->duplex = DUPLEX_HALF;
+ ecmd->supported = 0;
+ tmp = inw(ioaddr + WN0_CONF_CTRL);
+ if (tmp & (1<<13))
+ ecmd->supported |= SUPPORTED_AUI;
+ if (tmp & (1<<12))
+ ecmd->supported |= SUPPORTED_BNC;
+ if (tmp & (1<<9)) {
+ ecmd->supported |= SUPPORTED_TP | SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full; /* hmm... */
+ EL3WINDOW(4);
+ tmp = inw(ioaddr + WN4_NETDIAG);
+ if (tmp & FD_ENABLE)
+ ecmd->duplex = DUPLEX_FULL;
+ }
+
+ ecmd->speed = SPEED_10;
+ EL3WINDOW(1);
+ return 0;
+}
+
+static int
+el3_netdev_set_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd)
+{
+ u16 tmp;
+ int ioaddr = dev->base_addr;
+
+ if (ecmd->speed != SPEED_10)
+ return -EINVAL;
+ if ((ecmd->duplex != DUPLEX_HALF) && (ecmd->duplex != DUPLEX_FULL))
+ return -EINVAL;
+ if ((ecmd->transceiver != XCVR_INTERNAL) && (ecmd->transceiver != XCVR_EXTERNAL))
+ return -EINVAL;
+
+ /* change XCVR type */
+ EL3WINDOW(0);
+ tmp = inw(ioaddr + WN0_ADDR_CONF);
+ switch (ecmd->port) {
+ case PORT_TP:
+ tmp &= ~(3<<14);
+ dev->if_port = 0;
+ break;
+ case PORT_AUI:
+ tmp |= (1<<14);
+ dev->if_port = 1;
+ break;
+ case PORT_BNC:
+ tmp |= (3<<14);
+ dev->if_port = 3;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ outw(tmp, ioaddr + WN0_ADDR_CONF);
+ if (dev->if_port == 3) {
+ /* fire up the DC-DC convertor if BNC gets enabled */
+ tmp = inw(ioaddr + WN0_ADDR_CONF);
+ if (tmp & (3 << 14)) {
+ outw(StartCoax, ioaddr + EL3_CMD);
+ udelay(800);
+ } else
+ return -EIO;
+ }
+
+ EL3WINDOW(4);
+ tmp = inw(ioaddr + WN4_NETDIAG);
+ if (ecmd->duplex == DUPLEX_FULL)
+ tmp |= FD_ENABLE;
+ else
+ tmp &= ~FD_ENABLE;
+ outw(tmp, ioaddr + WN4_NETDIAG);
+ EL3WINDOW(1);
+
+ return 0;
+}
+
/**
* netdev_ethtool_ioctl: Handle network interface SIOCETHTOOL ioctls
* @dev: network interface on which out-of-band action is to be performed
@@ -989,9 +1106,11 @@ el3_close(struct net_device *dev)
* Process the various commands of the SIOCETHTOOL interface.
*/
-static int netdev_ethtool_ioctl (struct net_device *dev, void *useraddr)
+static int
+netdev_ethtool_ioctl (struct net_device *dev, void *useraddr)
{
u32 ethcmd;
+ struct el3_private *lp = dev->priv;
/* dev_ioctl() in ../../net/core/dev.c has already checked
capable(CAP_NET_ADMIN), so don't bother with that here. */
@@ -1010,6 +1129,41 @@ static int netdev_ethtool_ioctl (struct net_device *dev, void *useraddr)
return 0;
}
+ /* get settings */
+ case ETHTOOL_GSET: {
+ int ret;
+ struct ethtool_cmd ecmd = { ETHTOOL_GSET };
+ spin_lock_irq(&lp->lock);
+ ret = el3_netdev_get_ecmd(dev, &ecmd);
+ spin_unlock_irq(&lp->lock);
+ if (copy_to_user(useraddr, &ecmd, sizeof(ecmd)))
+ return -EFAULT;
+ return ret;
+ }
+
+ /* set settings */
+ case ETHTOOL_SSET: {
+ int ret;
+ struct ethtool_cmd ecmd;
+ if (copy_from_user(&ecmd, useraddr, sizeof(ecmd)))
+ return -EFAULT;
+ spin_lock_irq(&lp->lock);
+ ret = el3_netdev_set_ecmd(dev, &ecmd);
+ spin_unlock_irq(&lp->lock);
+ return ret;
+ }
+
+ /* get link status */
+ case ETHTOOL_GLINK: {
+ struct ethtool_value edata = { ETHTOOL_GLINK };
+ spin_lock_irq(&lp->lock);
+ edata.data = el3_link_ok(dev);
+ spin_unlock_irq(&lp->lock);
+ if (copy_to_user(useraddr, &edata, sizeof(edata)))
+ return -EFAULT;
+ return 0;
+ }
+
/* get message-level */
case ETHTOOL_GMSGLVL: {
struct ethtool_value edata = {ETHTOOL_GMSGLVL};
@@ -1043,7 +1197,8 @@ static int netdev_ethtool_ioctl (struct net_device *dev, void *useraddr)
* Process the various out-of-band ioctls passed to this driver.
*/
-static int netdev_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
+static int
+netdev_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
{
int rc = 0;
@@ -1060,7 +1215,8 @@ static int netdev_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
return rc;
}
-static void el3_down(struct net_device *dev)
+static void
+el3_down(struct net_device *dev)
{
int ioaddr = dev->base_addr;
@@ -1077,7 +1233,7 @@ static void el3_down(struct net_device *dev)
/* Turn off thinnet power. Green! */
outw(StopCoax, ioaddr + EL3_CMD);
else if (dev->if_port == 0) {
- /* Disable link beat and jabber, if_port may change ere next open(). */
+ /* Disable link beat and jabber, if_port may change here next open(). */
EL3WINDOW(4);
outw(inw(ioaddr + WN4_MEDIA) & ~MEDIA_TP, ioaddr + WN4_MEDIA);
}
@@ -1087,7 +1243,8 @@ static void el3_down(struct net_device *dev)
update_stats(dev);
}
-static void el3_up(struct net_device *dev)
+static void
+el3_up(struct net_device *dev)
{
int i, sw_info, net_diag;
int ioaddr = dev->base_addr;
@@ -1176,7 +1333,8 @@ static void el3_up(struct net_device *dev)
/* Power Management support functions */
#ifdef CONFIG_PM
-static int el3_suspend(struct pm_dev *pdev)
+static int
+el3_suspend(struct pm_dev *pdev)
{
unsigned long flags;
struct net_device *dev;
@@ -1202,7 +1360,8 @@ static int el3_suspend(struct pm_dev *pdev)
return 0;
}
-static int el3_resume(struct pm_dev *pdev)
+static int
+el3_resume(struct pm_dev *pdev)
{
unsigned long flags;
struct net_device *dev;
@@ -1228,7 +1387,8 @@ static int el3_resume(struct pm_dev *pdev)
return 0;
}
-static int el3_pm_callback(struct pm_dev *pdev, pm_request_t rqst, void *data)
+static int
+el3_pm_callback(struct pm_dev *pdev, pm_request_t rqst, void *data)
{
switch (rqst) {
case PM_SUSPEND:
diff --git a/drivers/net/bonding.c b/drivers/net/bonding.c
index 2fd61abaad88..68042f989e99 100644
--- a/drivers/net/bonding.c
+++ b/drivers/net/bonding.c
@@ -186,6 +186,11 @@
* also added text to distinguish type of load balancing (rr or xor)
* - change arp_ip_target module param from "1-12s" (array of 12 ptrs)
* to "s" (a single ptr)
+ *
+ * 2002/09/18 - Jay Vosburgh <fubar at us dot ibm dot com>
+ * - Fixed up bond_check_dev_link() (and callers): removed some magic
+ * numbers, banished local MII_ defines, wrapped ioctl calls to
+ * prevent EFAULT errors
*/
#include <linux/config.h>
@@ -228,15 +233,6 @@
#define BOND_LINK_MON_INTERV 0
#endif
-#undef MII_LINK_UP
-#define MII_LINK_UP 0x04
-
-#undef MII_ENDOF_NWAY
-#define MII_ENDOF_NWAY 0x20
-
-#undef MII_LINK_READY
-#define MII_LINK_READY (MII_LINK_UP)
-
#ifndef BOND_LINK_ARP_INTERV
#define BOND_LINK_ARP_INTERV 0
#endif
@@ -386,13 +382,25 @@ static slave_t *bond_detach_slave(bonding_t *bond, slave_t *slave)
return slave;
}
+/*
+ * Less bad way to call ioctl from within the kernel; this needs to be
+ * done some other way to get the call out of interrupt context.
+ * Needs "ioctl" variable to be supplied by calling context.
+ */
+#define IOCTL(dev, arg, cmd) ({ \
+ int ret; \
+ mm_segment_t fs = get_fs(); \
+ set_fs(get_ds()); \
+ ret = ioctl(dev, arg, cmd); \
+ set_fs(fs); \
+ ret; })
+
/*
- * if <dev> supports MII link status reporting, check its link
- * and report it as a bit field in a short int :
- * - 0x04 means link is up,
- * - 0x20 means end of autonegociation
- * If the device doesn't support MII, then we only report 0x24,
- * meaning that the link is up and running since we can't check it.
+ * if <dev> supports MII link status reporting, check its link status.
+ *
+ * Return either BMSR_LSTATUS, meaning that the link is up (or we
+ * can't tell and just pretend it is), or 0, meaning that the link is
+ * down.
*/
static u16 bond_check_dev_link(struct net_device *dev)
{
@@ -401,7 +409,8 @@ static u16 bond_check_dev_link(struct net_device *dev)
struct mii_ioctl_data *mii;
struct ethtool_value etool;
- if ((ioctl = dev->do_ioctl) != NULL) { /* ioctl to access MII */
+ ioctl = dev->do_ioctl;
+ if (ioctl) {
/* TODO: set pointer to correct ioctl on a per team member */
/* bases to make this more efficient. that is, once */
/* we determine the correct ioctl, we will always */
@@ -415,9 +424,9 @@ static u16 bond_check_dev_link(struct net_device *dev)
/* effect... */
etool.cmd = ETHTOOL_GLINK;
ifr.ifr_data = (char*)&etool;
- if (ioctl(dev, &ifr, SIOCETHTOOL) == 0) {
+ if (IOCTL(dev, &ifr, SIOCETHTOOL) == 0) {
if (etool.data == 1) {
- return(MII_LINK_READY);
+ return BMSR_LSTATUS;
}
else {
return(0);
@@ -431,21 +440,17 @@ static u16 bond_check_dev_link(struct net_device *dev)
/* Yes, the mii is overlaid on the ifreq.ifr_ifru */
mii = (struct mii_ioctl_data *)&ifr.ifr_data;
- if (ioctl(dev, &ifr, SIOCGMIIPHY) != 0) {
- return MII_LINK_READY; /* can't tell */
+ if (IOCTL(dev, &ifr, SIOCGMIIPHY) != 0) {
+ return BMSR_LSTATUS; /* can't tell */
}
- mii->reg_num = 1;
- if (ioctl(dev, &ifr, SIOCGMIIREG) == 0) {
- /*
- * mii->val_out contains MII reg 1, BMSR
- * 0x0004 means link established
- */
- return mii->val_out;
+ mii->reg_num = MII_BMSR;
+ if (IOCTL(dev, &ifr, SIOCGMIIREG) == 0) {
+ return mii->val_out & BMSR_LSTATUS;
}
}
- return MII_LINK_READY; /* spoof link up ( we can't check it) */
+ return BMSR_LSTATUS; /* spoof link up ( we can't check it) */
}
static u16 bond_check_mii_link(bonding_t *bond)
@@ -459,7 +464,7 @@ static u16 bond_check_mii_link(bonding_t *bond)
read_unlock(&bond->ptrlock);
read_unlock_irqrestore(&bond->lock, flags);
- return (has_active_interface ? MII_LINK_READY : 0);
+ return (has_active_interface ? BMSR_LSTATUS : 0);
}
static int bond_open(struct net_device *dev)
@@ -797,8 +802,8 @@ static int bond_enslave(struct net_device *master_dev,
new_slave->link_failure_count = 0;
/* check for initial state */
- if ((miimon <= 0) || ((bond_check_dev_link(slave_dev) & MII_LINK_READY)
- == MII_LINK_READY)) {
+ if ((miimon <= 0) ||
+ (bond_check_dev_link(slave_dev) == BMSR_LSTATUS)) {
#ifdef BONDING_DEBUG
printk(KERN_CRIT "Initial state of slave_dev is BOND_LINK_UP\n");
#endif
@@ -1220,7 +1225,7 @@ static void bond_mii_monitor(struct net_device *master)
switch (slave->link) {
case BOND_LINK_UP: /* the link was up */
- if ((link_state & MII_LINK_UP) == MII_LINK_UP) {
+ if (link_state == BMSR_LSTATUS) {
/* link stays up, tell that this one
is immediately available */
if (IS_UP(dev) && (mindelay > -2)) {
@@ -1256,7 +1261,7 @@ static void bond_mii_monitor(struct net_device *master)
ensure proper action to be taken
*/
case BOND_LINK_FAIL: /* the link has just gone down */
- if ((link_state & MII_LINK_UP) == 0) {
+ if (link_state != BMSR_LSTATUS) {
/* link stays down */
if (slave->delay <= 0) {
/* link down for too long time */
@@ -1285,7 +1290,7 @@ static void bond_mii_monitor(struct net_device *master)
} else {
slave->delay--;
}
- } else if ((link_state & MII_LINK_READY) == MII_LINK_READY) {
+ } else {
/* link up again */
slave->link = BOND_LINK_UP;
printk(KERN_INFO
@@ -1304,7 +1309,7 @@ static void bond_mii_monitor(struct net_device *master)
}
break;
case BOND_LINK_DOWN: /* the link was down */
- if ((link_state & MII_LINK_READY) != MII_LINK_READY) {
+ if (link_state != BMSR_LSTATUS) {
/* the link stays down, nothing more to do */
break;
} else { /* link going up */
@@ -1326,7 +1331,7 @@ static void bond_mii_monitor(struct net_device *master)
case there's something to do.
*/
case BOND_LINK_BACK: /* the link has just come back */
- if ((link_state & MII_LINK_UP) == 0) {
+ if (link_state != BMSR_LSTATUS) {
/* link down again */
slave->link = BOND_LINK_DOWN;
printk(KERN_INFO
@@ -1335,8 +1340,7 @@ static void bond_mii_monitor(struct net_device *master)
master->name,
(updelay - slave->delay) * miimon,
dev->name);
- }
- else if ((link_state & MII_LINK_READY) == MII_LINK_READY) {
+ } else {
/* link stays up */
if (slave->delay == 0) {
/* now the link has been up for long time enough */
@@ -2110,7 +2114,7 @@ static int bond_get_info(char *buf, char **start, off_t offset, int length)
len += sprintf(buf + len, "MII Status: ");
len += sprintf(buf + len,
- link == MII_LINK_READY ? "up\n" : "down\n");
+ link == BMSR_LSTATUS ? "up\n" : "down\n");
len += sprintf(buf + len, "MII Polling Interval (ms): %d\n",
miimon);
len += sprintf(buf + len, "Up Delay (ms): %d\n", updelay);
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index ed1826de999b..e6a6409dd31a 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -135,7 +135,7 @@ static int e1000_init_module(void);
static void e1000_exit_module(void);
static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
static void e1000_remove(struct pci_dev *pdev);
-static void e1000_sw_init(struct e1000_adapter *adapter);
+static int e1000_sw_init(struct e1000_adapter *adapter);
static int e1000_open(struct net_device *netdev);
static int e1000_close(struct net_device *netdev);
static int e1000_setup_tx_resources(struct e1000_adapter *adapter);
@@ -415,7 +415,8 @@ e1000_probe(struct pci_dev *pdev,
/* setup the private structure */
- e1000_sw_init(adapter);
+ if(e1000_sw_init(adapter))
+ goto err_sw_init;
if(adapter->hw.mac_type >= e1000_82543) {
netdev->features = NETIF_F_SG |
@@ -500,6 +501,7 @@ e1000_probe(struct pci_dev *pdev,
cards_found++;
return 0;
+err_sw_init:
err_eeprom:
iounmap(adapter->hw.hw_addr);
err_ioremap:
@@ -555,7 +557,7 @@ e1000_remove(struct pci_dev *pdev)
* OS network device settings (MTU size).
**/
-static void __devinit
+static int __devinit
e1000_sw_init(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
@@ -564,11 +566,11 @@ e1000_sw_init(struct e1000_adapter *adapter)
/* PCI config space info */
- pci_read_config_word(pdev, PCI_VENDOR_ID, &hw->vendor_id);
- pci_read_config_word(pdev, PCI_DEVICE_ID, &hw->device_id);
- pci_read_config_word(pdev, PCI_SUBSYSTEM_VENDOR_ID,
- &hw->subsystem_vendor_id);
- pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &hw->subsystem_id);
+ hw->vendor_id = pdev->vendor;
+ hw->device_id = pdev->device;
+ hw->subsystem_vendor_id = pdev->subsystem_vendor;
+ hw->subsystem_id = pdev->subsystem_device;
+
pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
@@ -581,7 +583,7 @@ e1000_sw_init(struct e1000_adapter *adapter)
if (e1000_set_mac_type(hw)) {
E1000_ERR("Unknown MAC Type\n");
- BUG();
+ return -1;
}
/* flow control settings */
@@ -622,6 +624,8 @@ e1000_sw_init(struct e1000_adapter *adapter)
atomic_set(&adapter->irq_sem, 1);
spin_lock_init(&adapter->stats_lock);
+
+ return 0;
}
/**
diff --git a/drivers/net/ewrk3.c b/drivers/net/ewrk3.c
index 5dbbed1cae47..b89e3be6d14a 100644
--- a/drivers/net/ewrk3.c
+++ b/drivers/net/ewrk3.c
@@ -76,6 +76,7 @@
kernel with the ewrk3 configuration turned off and reboot.
5) insmod ewrk3.o
[Alan Cox: Changed this so you can insmod ewrk3.o irq=x io=y]
+ [Adam Kropelin: now accepts irq=x1,x2 io=y1,y2 for multiple cards]
6) run the net startup bits for your new eth?? interface manually
(usually /etc/rc.inet[12] at boot time).
7) enjoy!
@@ -130,10 +131,12 @@
Add new multicasting code.
0.41 20-Jan-96 Fix IRQ set up problem reported by
<kenneth@bbs.sas.ntu.ac.sg>.
- 0.42 22-Apr-96 Fix alloc_device() bug <jari@markkus2.fimr.fi>
- 0.43 16-Aug-96 Update alloc_device() to conform to de4x5.c
- 0.44 08-Nov-01 use library crc32 functions <Matt_Domsch@dell.com>
- 0.45 19-Jul-02 fix unaligned access on alpha <martin@bruli.net>
+ 0.42 22-Apr-96 Fix alloc_device() bug <jari@markkus2.fimr.fi>
+ 0.43 16-Aug-96 Update alloc_device() to conform to de4x5.c
+ 0.44 08-Nov-01 use library crc32 functions <Matt_Domsch@dell.com>
+ 0.45 19-Jul-02 fix unaligned access on alpha <martin@bruli.net>
+ 0.46 10-Oct-02 cli/sti removal <VDA@port.imtp.ilyichevsk.odessa.ua>
+ Multiple NIC support when module <akropel1@rochester.rr.com>
=========================================================================
*/
@@ -167,7 +170,7 @@
#include "ewrk3.h"
static char version[] __initdata =
-"ewrk3.c:v0.43a 2001/02/04 davies@maniac.ultranet.com\n";
+"ewrk3.c:v0.46 2002/10/09 davies@maniac.ultranet.com\n";
#ifdef EWRK3_DEBUG
static int ewrk3_debug = EWRK3_DEBUG;
@@ -196,6 +199,7 @@ static int ewrk3_debug = 1;
#define EWRK3_IOP_INC 0x20 /* I/O address increment */
#define EWRK3_TOTAL_SIZE 0x20 /* required I/O address length */
+/* If you change this, remember to also change MODULE_PARM array limits */
#ifndef MAX_NUM_EWRK3S
#define MAX_NUM_EWRK3S 21
#endif
@@ -1716,6 +1720,11 @@ static int ewrk3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
break;
case EWRK3_SET_MCA: /* Set a multicast address */
if (capable(CAP_NET_ADMIN)) {
+ if (ioc->len > 1024)
+ {
+ status = -EINVAL;
+ break;
+ }
if (copy_from_user(tmp->addr, ioc->data, ETH_ALEN * ioc->len)) {
status = -EFAULT;
break;
@@ -1843,35 +1852,62 @@ static int ewrk3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
}
#ifdef MODULE
-static struct net_device thisEthwrk;
-static int io = 0x300; /* <--- EDIT THESE LINES FOR YOUR CONFIGURATION */
-static int irq = 5; /* or use the insmod io= irq= options */
+static struct net_device *ewrk3_devs[MAX_NUM_EWRK3S];
+static int ndevs;
+static int io[MAX_NUM_EWRK3S+1] = { 0x300, 0, }; /* <--- EDIT THESE LINES FOR YOUR CONFIGURATION */
+static int irq[MAX_NUM_EWRK3S+1] = { 5, 0, }; /* or use the insmod io= irq= options */
-MODULE_PARM(io, "i");
-MODULE_PARM(irq, "i");
-MODULE_PARM_DESC(io, "EtherWORKS 3 I/O base address");
-MODULE_PARM_DESC(irq, "EtherWORKS 3 IRQ number");
+/* '21' below should really be 'MAX_NUM_EWRK3S' */
+MODULE_PARM(io, "0-21i");
+MODULE_PARM(irq, "0-21i");
+MODULE_PARM_DESC(io, "EtherWORKS 3 I/O base address(es)");
+MODULE_PARM_DESC(irq, "EtherWORKS 3 IRQ number(s)");
int init_module(void)
{
- thisEthwrk.base_addr = io;
- thisEthwrk.irq = irq;
- thisEthwrk.init = ewrk3_probe;
- if (register_netdev(&thisEthwrk) != 0)
- return -EIO;
- return 0;
+ int i=0;
+
+ while( io[i] && irq[i] ) {
+ ewrk3_devs[ndevs] = kmalloc(sizeof(struct net_device), GFP_KERNEL);
+ if (!ewrk3_devs[ndevs])
+ goto error;
+ memset(ewrk3_devs[ndevs], 0, sizeof(struct net_device));
+ ewrk3_devs[ndevs]->base_addr = io[i];
+ ewrk3_devs[ndevs]->irq = irq[i];
+ ewrk3_devs[ndevs]->init = ewrk3_probe;
+
+ if (register_netdev(ewrk3_devs[ndevs]) == 0)
+ ndevs++;
+ else
+ kfree(ewrk3_devs[ndevs]);
+
+ i++;
+ }
+
+ return ndevs ? 0 : -EIO;
+
+error:
+ cleanup_module();
+ return -ENOMEM;
}
void cleanup_module(void)
{
- unregister_netdev(&thisEthwrk);
- if (thisEthwrk.priv) {
- kfree(thisEthwrk.priv);
- thisEthwrk.priv = NULL;
- }
- thisEthwrk.irq = 0;
+ int i;
+
+ for( i=0; i<ndevs; i++ )
+ {
+ unregister_netdev(ewrk3_devs[i]);
+ if (ewrk3_devs[i]->priv) {
+ kfree(ewrk3_devs[i]->priv);
+ ewrk3_devs[i]->priv = NULL;
+ }
+ ewrk3_devs[i]->irq = 0;
- release_region(thisEthwrk.base_addr, EWRK3_TOTAL_SIZE);
+ release_region(ewrk3_devs[i]->base_addr, EWRK3_TOTAL_SIZE);
+ kfree(ewrk3_devs[i]);
+ ewrk3_devs[i] = NULL;
+ }
}
#endif /* MODULE */
MODULE_LICENSE("GPL");
diff --git a/drivers/net/irda/Config.help b/drivers/net/irda/Config.help
index 295357525c8c..e8ec1552be78 100644
--- a/drivers/net/irda/Config.help
+++ b/drivers/net/irda/Config.help
@@ -55,12 +55,22 @@ CONFIG_NSC_FIR
<file:Documentation/modules.txt>. The module will be called
nsc-ircc.o.
-CONFIG_TOSHIBA_FIR
+CONFIG_TOSHIBA_OLD
Say Y here if you want to build support for the Toshiba Type-O IR
chipset. This chipset is used by the Toshiba Libretto 100CT, and
- many more laptops. If you want to compile it as a module, say M
- here and read <file:Documentation/modules.txt>. The module will be
- called toshoboe.o.
+ many more laptops. This driver is obsolete, will no more be
+ maintained and will be removed in favor of the new driver.
+ If you want to compile it as a module, say M here and read
+ <file:Documentation/modules.txt>.
+ The module will be called toshoboe.o.
+
+CONFIG_TOSHIBA_FIR
+ Say Y here if you want to build support for the Toshiba Type-O IR
+ and Donau oboe chipsets. These chipsets are used by the Toshiba
+ Libretto 100/110CT, Tecra 8100, Portege 7020 and many more laptops.
+ If you want to compile it as a module, say M here and read
+ <file:Documentation/modules.txt>.
+ The module will be called donauboe.o.
CONFIG_SMC_IRCC_FIR
Say Y here if you want to build support for the SMC Infrared
@@ -165,3 +175,18 @@ CONFIG_ACT200L_DONGLE
the normal 9-pin serial port connector, and can currently only be
used by IrTTY. To activate support for ACTiSYS IR-200L dongles
you will have to start irattach like this: "irattach -d act200l".
+
+Mobile Action MA600 dongle (Experimental)
+CONFIG_MA600_DONGLE
+ Say Y here if you want to build support for the Mobile Action MA600
+ dongle. If you want to compile it as a module, say M here and read
+ <file:Documentation/modules.txt>. The MA600 dongle attaches to
+ the normal 9-pin serial port connector, and can currently only be
+ tested on IrCOMM. To activate support for MA600 dongles you will
+ have to insert "irattach -d ma600" in the /etc/irda/drivers script.
+ Note: irutils 0.9.15 requires no modification. irutils 0.9.9 needs
+ modification. For more information, download the following tar gzip
+ file.
+
+ There is a pre-compiled module on
+ <http://engsvr.ust.hk/~eetwl95/download/ma600-2.4.x.tar.gz>
diff --git a/drivers/net/irda/Config.in b/drivers/net/irda/Config.in
index 19f98192ae06..34da5d77e4c8 100644
--- a/drivers/net/irda/Config.in
+++ b/drivers/net/irda/Config.in
@@ -28,6 +28,7 @@ comment 'FIR device drivers'
dep_tristate 'IrDA USB dongles (EXPERIMENTAL)' CONFIG_USB_IRDA $CONFIG_IRDA $CONFIG_USB $CONFIG_EXPERIMENTAL
dep_tristate 'NSC PC87108/PC87338' CONFIG_NSC_FIR $CONFIG_IRDA
dep_tristate 'Winbond W83977AF (IR)' CONFIG_WINBOND_FIR $CONFIG_IRDA
+dep_tristate 'Toshiba Type-O IR Port (old driver)' CONFIG_TOSHIBA_OLD $CONFIG_IRDA
dep_tristate 'Toshiba Type-O IR Port' CONFIG_TOSHIBA_FIR $CONFIG_IRDA
if [ "$CONFIG_EXPERIMENTAL" != "n" ]; then
dep_tristate 'SMC IrCC (EXPERIMENTAL)' CONFIG_SMC_IRCC_FIR $CONFIG_IRDA
diff --git a/drivers/net/irda/Makefile b/drivers/net/irda/Makefile
index 7f005a2fe9a3..a35468d16ed7 100644
--- a/drivers/net/irda/Makefile
+++ b/drivers/net/irda/Makefile
@@ -13,7 +13,8 @@ obj-$(CONFIG_USB_IRDA) += irda-usb.o
obj-$(CONFIG_NSC_FIR) += nsc-ircc.o
obj-$(CONFIG_WINBOND_FIR) += w83977af_ir.o
obj-$(CONFIG_SA1100_FIR) += sa1100_ir.o
-obj-$(CONFIG_TOSHIBA_FIR) += toshoboe.o
+obj-$(CONFIG_TOSHIBA_OLD) += toshoboe.o
+obj-$(CONFIG_TOSHIBA_FIR) += donauboe.o
obj-$(CONFIG_SMC_IRCC_FIR) += smc-ircc.o irport.o
obj-$(CONFIG_ALI_FIR) += ali-ircc.o
obj-$(CONFIG_VLSI_FIR) += vlsi_ir.o
diff --git a/drivers/net/irda/donauboe.c b/drivers/net/irda/donauboe.c
new file mode 100644
index 000000000000..ce76c33a8352
--- /dev/null
+++ b/drivers/net/irda/donauboe.c
@@ -0,0 +1,1850 @@
+/*****************************************************************
+ *
+ * Filename: donauboe.c
+ * Version: 2.17
+ * Description: Driver for the Toshiba OBOE (or type-O or 701)
+ * FIR Chipset, also supports the DONAUOBOE (type-DO
+ * or d01) FIR chipset which as far as I know is
+ * register compatible.
+ * Status: Experimental.
+ * Author: James McKenzie <james@fishsoup.dhs.org>
+ * Created at: Sat May 8 12:35:27 1999
+ * Modified: Paul Bristow <paul.bristow@technologist.com>
+ * Modified: Mon Nov 11 19:10:05 1999
+ * Modified: James McKenzie <james@fishsoup.dhs.org>
+ * Modified: Thu Mar 16 12:49:00 2000 (Substantial rewrite)
+ * Modified: Sat Apr 29 00:23:03 2000 (Added DONAUOBOE support)
+ * Modified: Wed May 24 23:45:02 2000 (Fixed chipio_t structure)
+ * Modified: 2.13 Christian Gennerat <christian.gennerat@polytechnique.org>
+ * Modified: 2.13 dim jan 07 21:57:39 2001 (tested with kernel 2.4 & irnet/ppp)
+ * Modified: 2.14 Christian Gennerat <christian.gennerat@polytechnique.org>
+ * Modified: 2.14 lun fev 05 17:55:59 2001 (adapted to patch-2.4.1-pre8-irda1)
+ * Modified: 2.15 Martin Lucina <mato@kotelna.sk>
+ * Modified: 2.15 Fri Jun 21 20:40:59 2002 (sync with 2.4.18, substantial fixes)
+ * Modified: 2.16 Martin Lucina <mato@kotelna.sk>
+ * Modified: 2.16 Sat Jun 22 18:54:29 2002 (fix freeregion, default to verbose)
+ * Modified: 2.17 Christian Gennerat <christian.gennerat@polytechnique.org>
+ * Modified: 2.17 jeu sep 12 08:50:20 2002 (save_flags();cli(); replaced by spinlocks)
+ *
+ * Copyright (c) 1999 James McKenzie, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * Neither James McKenzie nor Cambridge University admit liability nor
+ * provide warranty for any of this software. This material is
+ * provided "AS-IS" and at no charge.
+ *
+ * Applicable Models : Libretto 100/110CT and many more.
+ * Toshiba refers to this chip as the type-O IR port,
+ * or the type-DO IR port.
+ *
+ ********************************************************************/
+
+/* Look at toshoboe.h (currently in include/net/irda) for details of */
+/* Where to get documentation on the chip */
+
+
+static char *rcsid =
+ "$Id: donauboe.c V2.17 jeu sep 12 08:50:20 2002 $";
+
+/* See below for a description of the logic in this driver */
+
+/* Is irda_crc16_table[] exported? not yet */
+/* define this if you get errors about multiple defns of irda_crc16_table */
+#undef CRC_EXPORTED
+
+/* User servicable parts */
+/* Enable the code which probes the chip and does a few tests */
+/* Probe code is very useful for understanding how the hardware works */
+/* Use it with various combinations of TT_LEN, RX_LEN */
+/* Strongly recomended, disable if the probe fails on your machine */
+/* and send me <james@fishsoup.dhs.org> the output of dmesg */
+#define DO_PROBE 1
+
+/* Trace Transmit ring, interrupts, Receive ring or not ? */
+#define PROBE_VERBOSE 1
+
+/* Debug option, examine sent and received raw data */
+/* Irdadump is better, but does not see all packets. enable it if you want. */
+#undef DUMP_PACKETS
+
+/* MIR mode has not been tested. Some behaviour is different */
+/* Seems to work against an Ericsson R520 for me. -Martin */
+#define USE_MIR
+
+/* Schedule back to back hardware transmits wherever possible, otherwise */
+/* we need an interrupt for every frame, unset if oboe works for a bit and */
+/* then hangs */
+#define OPTIMIZE_TX
+
+/* Set the number of slots in the rings */
+/* If you get rx/tx fifo overflows at high bitrates, you can try increasing */
+/* these */
+
+#define RING_SIZE (OBOE_RING_SIZE_RX8 | OBOE_RING_SIZE_TX8)
+#define TX_SLOTS 8
+#define RX_SLOTS 8
+
+
+/* Less user servicable parts below here */
+
+/* Test, Transmit and receive buffer sizes, adjust at your peril */
+/* remarks: nfs usually needs 1k blocks */
+/* remarks: in SIR mode, CRC is received, -> RX_LEN=TX_LEN+2 */
+/* remarks: test accepts large blocks. Standard is 0x80 */
+/* When TT_LEN > RX_LEN (SIR mode) data is stored in successive slots. */
+/* When 3 or more slots are needed for each test packet, */
+/* data received in the first slots is overwritten, even */
+/* if OBOE_CTL_RX_HW_OWNS is not set, without any error! */
+#define TT_LEN 0x80
+#define TX_LEN 0xc00
+#define RX_LEN 0xc04
+/* Real transmitted length (SIR mode) is about 14+(2%*TX_LEN) more */
+/* long than user-defined length (see async_wrap_skb) and is less then 4K */
+/* Real received length is (max RX_LEN) differs from user-defined */
+/* length only b the CRC (2 or 4 bytes) */
+#define BUF_SAFETY 0x7a
+#define RX_BUF_SZ (RX_LEN)
+#define TX_BUF_SZ (TX_LEN+BUF_SAFETY)
+
+
+/* Logic of the netdev part of this driver */
+
+/* The RX ring is filled with buffers, when a packet arrives */
+/* it is DMA'd into the buffer which is marked used and RxDone called */
+/* RxDone forms an skb (and checks the CRC if in SIR mode) and ships */
+/* the packet off upstairs */
+
+/* The transmitter on the oboe chip can work in one of two modes */
+/* for each ring->tx[] the transmitter can either */
+/* a) transmit the packet, leave the trasmitter enabled and proceed to */
+/* the next ring */
+/* OR */
+/* b) transmit the packet, switch off the transmitter and issue TxDone */
+
+/* All packets are entered into the ring in mode b), if the ring was */
+/* empty the transmitter is started. */
+
+/* If OPTIMIZE_TX is defined then in TxDone if the ring contains */
+/* more than one packet, all but the last are set to mode a) [HOWEVER */
+/* the hardware may not notice this, this is why we start in mode b) ] */
+/* then restart the transmitter */
+
+/* If OPTIMIZE_TX is not defined then we just restart the transmitter */
+/* if the ring isn't empty */
+
+/* Speed changes are delayed until the TxRing is empty */
+/* mtt is handled by generating packets with bad CRCs, before the data */
+
+/* TODO: */
+/* check the mtt works ok */
+/* finish the watchdog */
+
+/* No user servicable parts below here */
+
+#define STATIC static
+
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/ioport.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/rtnetlink.h>
+
+#include <asm/system.h>
+#include <asm/io.h>
+
+#include <net/irda/wrapper.h>
+#include <net/irda/irda.h>
+//#include <net/irda/irmod.h>
+//#include <net/irda/irlap_frame.h>
+#include <net/irda/irda_device.h>
+#include <net/irda/crc.h>
+
+#include "donauboe.h"
+
+#define INB(port) inb_p(port)
+#define OUTB(val,port) outb_p(val,port)
+#define OUTBP(val,port) outb_p(val,port)
+
+#define PROMPT OUTB(OBOE_PROMPT_BIT,OBOE_PROMPT);
+
+#if PROBE_VERBOSE
+#define PROBE_DEBUG(args...) (printk (args))
+#else
+#define PROBE_DEBUG(args...) ;
+#endif
+
+/* Set the DMA to be byte at a time */
+#define CONFIG0H_DMA_OFF OBOE_CONFIG0H_RCVANY
+#define CONFIG0H_DMA_ON_NORX CONFIG0H_DMA_OFF| OBOE_CONFIG0H_ENDMAC
+#define CONFIG0H_DMA_ON CONFIG0H_DMA_ON_NORX | OBOE_CONFIG0H_ENRX
+
+static struct pci_device_id toshoboe_pci_tbl[] __initdata = {
+ { PCI_VENDOR_ID_TOSHIBA, PCI_DEVICE_ID_FIR701, PCI_ANY_ID, PCI_ANY_ID, },
+ { PCI_VENDOR_ID_TOSHIBA, PCI_DEVICE_ID_FIRD01, PCI_ANY_ID, PCI_ANY_ID, },
+ { } /* Terminating entry */
+};
+MODULE_DEVICE_TABLE(pci, toshoboe_pci_tbl);
+
+#define DRIVER_NAME "toshoboe"
+static char *driver_name = DRIVER_NAME;
+
+static int max_baud = 4000000;
+static int do_probe = DO_PROBE;
+
+
+/**********************************************************************/
+/* Fcs code */
+
+#ifdef CRC_EXPORTED
+extern __u16 const irda_crc16_table[];
+#else
+static __u16 const irda_crc16_table[256] = {
+ 0x0000, 0x1189, 0x2312, 0x329b, 0x4624, 0x57ad, 0x6536, 0x74bf,
+ 0x8c48, 0x9dc1, 0xaf5a, 0xbed3, 0xca6c, 0xdbe5, 0xe97e, 0xf8f7,
+ 0x1081, 0x0108, 0x3393, 0x221a, 0x56a5, 0x472c, 0x75b7, 0x643e,
+ 0x9cc9, 0x8d40, 0xbfdb, 0xae52, 0xdaed, 0xcb64, 0xf9ff, 0xe876,
+ 0x2102, 0x308b, 0x0210, 0x1399, 0x6726, 0x76af, 0x4434, 0x55bd,
+ 0xad4a, 0xbcc3, 0x8e58, 0x9fd1, 0xeb6e, 0xfae7, 0xc87c, 0xd9f5,
+ 0x3183, 0x200a, 0x1291, 0x0318, 0x77a7, 0x662e, 0x54b5, 0x453c,
+ 0xbdcb, 0xac42, 0x9ed9, 0x8f50, 0xfbef, 0xea66, 0xd8fd, 0xc974,
+ 0x4204, 0x538d, 0x6116, 0x709f, 0x0420, 0x15a9, 0x2732, 0x36bb,
+ 0xce4c, 0xdfc5, 0xed5e, 0xfcd7, 0x8868, 0x99e1, 0xab7a, 0xbaf3,
+ 0x5285, 0x430c, 0x7197, 0x601e, 0x14a1, 0x0528, 0x37b3, 0x263a,
+ 0xdecd, 0xcf44, 0xfddf, 0xec56, 0x98e9, 0x8960, 0xbbfb, 0xaa72,
+ 0x6306, 0x728f, 0x4014, 0x519d, 0x2522, 0x34ab, 0x0630, 0x17b9,
+ 0xef4e, 0xfec7, 0xcc5c, 0xddd5, 0xa96a, 0xb8e3, 0x8a78, 0x9bf1,
+ 0x7387, 0x620e, 0x5095, 0x411c, 0x35a3, 0x242a, 0x16b1, 0x0738,
+ 0xffcf, 0xee46, 0xdcdd, 0xcd54, 0xb9eb, 0xa862, 0x9af9, 0x8b70,
+ 0x8408, 0x9581, 0xa71a, 0xb693, 0xc22c, 0xd3a5, 0xe13e, 0xf0b7,
+ 0x0840, 0x19c9, 0x2b52, 0x3adb, 0x4e64, 0x5fed, 0x6d76, 0x7cff,
+ 0x9489, 0x8500, 0xb79b, 0xa612, 0xd2ad, 0xc324, 0xf1bf, 0xe036,
+ 0x18c1, 0x0948, 0x3bd3, 0x2a5a, 0x5ee5, 0x4f6c, 0x7df7, 0x6c7e,
+ 0xa50a, 0xb483, 0x8618, 0x9791, 0xe32e, 0xf2a7, 0xc03c, 0xd1b5,
+ 0x2942, 0x38cb, 0x0a50, 0x1bd9, 0x6f66, 0x7eef, 0x4c74, 0x5dfd,
+ 0xb58b, 0xa402, 0x9699, 0x8710, 0xf3af, 0xe226, 0xd0bd, 0xc134,
+ 0x39c3, 0x284a, 0x1ad1, 0x0b58, 0x7fe7, 0x6e6e, 0x5cf5, 0x4d7c,
+ 0xc60c, 0xd785, 0xe51e, 0xf497, 0x8028, 0x91a1, 0xa33a, 0xb2b3,
+ 0x4a44, 0x5bcd, 0x6956, 0x78df, 0x0c60, 0x1de9, 0x2f72, 0x3efb,
+ 0xd68d, 0xc704, 0xf59f, 0xe416, 0x90a9, 0x8120, 0xb3bb, 0xa232,
+ 0x5ac5, 0x4b4c, 0x79d7, 0x685e, 0x1ce1, 0x0d68, 0x3ff3, 0x2e7a,
+ 0xe70e, 0xf687, 0xc41c, 0xd595, 0xa12a, 0xb0a3, 0x8238, 0x93b1,
+ 0x6b46, 0x7acf, 0x4854, 0x59dd, 0x2d62, 0x3ceb, 0x0e70, 0x1ff9,
+ 0xf78f, 0xe606, 0xd49d, 0xc514, 0xb1ab, 0xa022, 0x92b9, 0x8330,
+ 0x7bc7, 0x6a4e, 0x58d5, 0x495c, 0x3de3, 0x2c6a, 0x1ef1, 0x0f78
+};
+#endif
+
+STATIC int
+toshoboe_checkfcs (unsigned char *buf, int len)
+{
+ int i;
+ union
+ {
+ __u16 value;
+ __u8 bytes[2];
+ }
+ fcs;
+
+ fcs.value = INIT_FCS;
+
+ for (i = 0; i < len; ++i)
+ fcs.value = irda_fcs (fcs.value, *(buf++));
+
+ return (fcs.value == GOOD_FCS);
+}
+
+/***********************************************************************/
+/* Generic chip handling code */
+#ifdef DUMP_PACKETS
+static unsigned char dump[50];
+STATIC void
+_dumpbufs (unsigned char *data, int len, char tete)
+{
+int i,j;
+char head=tete;
+for (i=0;i<len;i+=16) {
+ for (j=0;j<16 && i+j<len;j++) { sprintf(&dump[3*j],"%02x.",data[i+j]); }
+ dump [3*j]=0;
+ IRDA_DEBUG (2, "%c%s\n",head , dump);
+ head='+';
+ }
+}
+#endif
+
+/* Dump the registers */
+STATIC void
+toshoboe_dumpregs (struct toshoboe_cb *self)
+{
+ __u32 ringbase;
+
+ IRDA_DEBUG (4, "%s()\n", __FUNCTION__);
+
+ ringbase = INB (OBOE_RING_BASE0) << 10;
+ ringbase |= INB (OBOE_RING_BASE1) << 18;
+ ringbase |= INB (OBOE_RING_BASE2) << 26;
+
+ printk (KERN_ERR DRIVER_NAME ": Register dump:\n");
+ printk (KERN_ERR "Interrupts: Tx:%d Rx:%d TxUnder:%d RxOver:%d Sip:%d\n",
+ self->int_tx, self->int_rx, self->int_txunder, self->int_rxover,
+ self->int_sip);
+ printk (KERN_ERR "RX %02x TX %02x RingBase %08x\n",
+ INB (OBOE_RXSLOT), INB (OBOE_TXSLOT), ringbase);
+ printk (KERN_ERR "RING_SIZE %02x IER %02x ISR %02x\n",
+ INB (OBOE_RING_SIZE), INB (OBOE_IER), INB (OBOE_ISR));
+ printk (KERN_ERR "CONFIG1 %02x STATUS %02x\n",
+ INB (OBOE_CONFIG1), INB (OBOE_STATUS));
+ printk (KERN_ERR "CONFIG0 %02x%02x ENABLE %02x%02x\n",
+ INB (OBOE_CONFIG0H), INB (OBOE_CONFIG0L),
+ INB (OBOE_ENABLEH), INB (OBOE_ENABLEL));
+ printk (KERN_ERR "NEW_PCONFIG %02x%02x CURR_PCONFIG %02x%02x\n",
+ INB (OBOE_NEW_PCONFIGH), INB (OBOE_NEW_PCONFIGL),
+ INB (OBOE_CURR_PCONFIGH), INB (OBOE_CURR_PCONFIGL));
+ printk (KERN_ERR "MAXLEN %02x%02x RXCOUNT %02x%02x\n",
+ INB (OBOE_MAXLENH), INB (OBOE_MAXLENL),
+ INB (OBOE_RXCOUNTL), INB (OBOE_RXCOUNTH));
+
+ if (self->ring)
+ {
+ int i;
+ ringbase = virt_to_bus (self->ring);
+ printk (KERN_ERR "Ring at %08x:\n", ringbase);
+ printk (KERN_ERR "RX:");
+ for (i = 0; i < RX_SLOTS; ++i)
+ printk (" (%d,%02x)",self->ring->rx[i].len,self->ring->rx[i].control);
+ printk ("\n");
+ printk (KERN_ERR "TX:");
+ for (i = 0; i < RX_SLOTS; ++i)
+ printk (" (%d,%02x)",self->ring->tx[i].len,self->ring->tx[i].control);
+ printk ("\n");
+ }
+}
+
+/*Don't let the chip look at memory */
+STATIC void
+toshoboe_disablebm (struct toshoboe_cb *self)
+{
+ __u8 command;
+ IRDA_DEBUG (4, "%s()\n", __FUNCTION__);
+
+ pci_read_config_byte (self->pdev, PCI_COMMAND, &command);
+ command &= ~PCI_COMMAND_MASTER;
+ pci_write_config_byte (self->pdev, PCI_COMMAND, command);
+
+}
+
+/* Shutdown the chip and point the taskfile reg somewhere else */
+STATIC void
+toshoboe_stopchip (struct toshoboe_cb *self)
+{
+ IRDA_DEBUG (4, "%s()\n", __FUNCTION__);
+
+ /*Disable interrupts */
+ OUTB (0x0, OBOE_IER);
+ /*Disable DMA, Disable Rx, Disable Tx */
+ OUTB (CONFIG0H_DMA_OFF, OBOE_CONFIG0H);
+ /*Disable SIR MIR FIR, Tx and Rx */
+ OUTB (0x00, OBOE_ENABLEH);
+ /*Point the ring somewhere safe */
+ OUTB (0x3f, OBOE_RING_BASE2);
+ OUTB (0xff, OBOE_RING_BASE1);
+ OUTB (0xff, OBOE_RING_BASE0);
+
+ OUTB (RX_LEN >> 8, OBOE_MAXLENH);
+ OUTB (RX_LEN & 0xff, OBOE_MAXLENL);
+
+ /*Acknoledge any pending interrupts */
+ OUTB (0xff, OBOE_ISR);
+
+ /*Why */
+ OUTB (OBOE_ENABLEH_PHYANDCLOCK, OBOE_ENABLEH);
+
+ /*switch it off */
+ OUTB (OBOE_CONFIG1_OFF, OBOE_CONFIG1);
+
+ toshoboe_disablebm (self);
+}
+
+/* Transmitter initialization */
+STATIC void
+toshoboe_start_DMA (struct toshoboe_cb *self, int opts)
+{
+ OUTB (0x0, OBOE_ENABLEH);
+ OUTB (CONFIG0H_DMA_ON | opts, OBOE_CONFIG0H);
+ OUTB (OBOE_ENABLEH_PHYANDCLOCK, OBOE_ENABLEH);
+ PROMPT;
+}
+
+/*Set the baud rate */
+STATIC void
+toshoboe_setbaud (struct toshoboe_cb *self)
+{
+ __u16 pconfig = 0;
+ __u8 config0l = 0;
+
+ IRDA_DEBUG (2, "%s(%d/%d)\n", __FUNCTION__, self->speed, self->io.speed);
+
+ switch (self->speed)
+ {
+ case 2400:
+ case 4800:
+ case 9600:
+ case 19200:
+ case 38400:
+ case 57600:
+ case 115200:
+#ifdef USE_MIR
+ case 1152000:
+#endif
+ case 4000000:
+ break;
+ default:
+
+ printk (KERN_ERR DRIVER_NAME ": switch to unsupported baudrate %d\n",
+ self->speed);
+ return;
+ }
+
+ switch (self->speed)
+ {
+ /* For SIR the preamble is done by adding XBOFs */
+ /* to the packet */
+ /* set to filtered SIR mode, filter looks for BOF and EOF */
+ case 2400:
+ pconfig |= 47 << OBOE_PCONFIG_BAUDSHIFT;
+ pconfig |= 25 << OBOE_PCONFIG_WIDTHSHIFT;
+ break;
+ case 4800:
+ pconfig |= 23 << OBOE_PCONFIG_BAUDSHIFT;
+ pconfig |= 25 << OBOE_PCONFIG_WIDTHSHIFT;
+ break;
+ case 9600:
+ pconfig |= 11 << OBOE_PCONFIG_BAUDSHIFT;
+ pconfig |= 25 << OBOE_PCONFIG_WIDTHSHIFT;
+ break;
+ case 19200:
+ pconfig |= 5 << OBOE_PCONFIG_BAUDSHIFT;
+ pconfig |= 25 << OBOE_PCONFIG_WIDTHSHIFT;
+ break;
+ case 38400:
+ pconfig |= 2 << OBOE_PCONFIG_BAUDSHIFT;
+ pconfig |= 25 << OBOE_PCONFIG_WIDTHSHIFT;
+ break;
+ case 57600:
+ pconfig |= 1 << OBOE_PCONFIG_BAUDSHIFT;
+ pconfig |= 25 << OBOE_PCONFIG_WIDTHSHIFT;
+ break;
+ case 115200:
+ pconfig |= 0 << OBOE_PCONFIG_BAUDSHIFT;
+ pconfig |= 25 << OBOE_PCONFIG_WIDTHSHIFT;
+ break;
+ default:
+ /*Set to packet based reception */
+ OUTB (RX_LEN >> 8, OBOE_MAXLENH);
+ OUTB (RX_LEN & 0xff, OBOE_MAXLENL);
+ break;
+ }
+
+ switch (self->speed)
+ {
+ case 2400:
+ case 4800:
+ case 9600:
+ case 19200:
+ case 38400:
+ case 57600:
+ case 115200:
+ config0l = OBOE_CONFIG0L_ENSIR;
+ if (self->async)
+ {
+ /*Set to character based reception */
+ /*System will lock if MAXLEN=0 */
+ /*so have to be careful */
+ OUTB (0x01, OBOE_MAXLENH);
+ OUTB (0x01, OBOE_MAXLENL);
+ OUTB (0x00, OBOE_MAXLENH);
+ }
+ else
+ {
+ /*Set to packet based reception */
+ config0l |= OBOE_CONFIG0L_ENSIRF;
+ OUTB (RX_LEN >> 8, OBOE_MAXLENH);
+ OUTB (RX_LEN & 0xff, OBOE_MAXLENL);
+ }
+ break;
+
+#ifdef USE_MIR
+ /* MIR mode */
+ /* Set for 16 bit CRC and enable MIR */
+ /* Preamble now handled by the chip */
+ case 1152000:
+ pconfig |= 0 << OBOE_PCONFIG_BAUDSHIFT;
+ pconfig |= 8 << OBOE_PCONFIG_WIDTHSHIFT;
+ pconfig |= 1 << OBOE_PCONFIG_PREAMBLESHIFT;
+ config0l = OBOE_CONFIG0L_CRC16 | OBOE_CONFIG0L_ENMIR;
+ break;
+#endif
+ /* FIR mode */
+ /* Set for 32 bit CRC and enable FIR */
+ /* Preamble handled by the chip */
+ case 4000000:
+ pconfig |= 0 << OBOE_PCONFIG_BAUDSHIFT;
+ /* Documentation says 14, but toshiba use 15 in their drivers */
+ pconfig |= 15 << OBOE_PCONFIG_PREAMBLESHIFT;
+ config0l = OBOE_CONFIG0L_ENFIR;
+ break;
+ }
+
+ /* Copy into new PHY config buffer */
+ OUTBP (pconfig >> 8, OBOE_NEW_PCONFIGH);
+ OUTB (pconfig & 0xff, OBOE_NEW_PCONFIGL);
+ OUTB (config0l, OBOE_CONFIG0L);
+
+ /* Now make OBOE copy from new PHY to current PHY */
+ OUTB (0x0, OBOE_ENABLEH);
+ OUTB (OBOE_ENABLEH_PHYANDCLOCK, OBOE_ENABLEH);
+ PROMPT;
+
+ /* speed change executed */
+ self->new_speed = 0;
+ self->io.speed = self->speed;
+}
+
+/*Let the chip look at memory */
+STATIC void
+toshoboe_enablebm (struct toshoboe_cb *self)
+{
+ IRDA_DEBUG (4, "%s()\n", __FUNCTION__);
+ pci_set_master (self->pdev);
+}
+
+/*setup the ring */
+STATIC void
+toshoboe_initring (struct toshoboe_cb *self)
+{
+ int i;
+
+ IRDA_DEBUG (4, "%s()\n", __FUNCTION__);
+
+ for (i = 0; i < TX_SLOTS; ++i)
+ {
+ self->ring->tx[i].len = 0;
+ self->ring->tx[i].control = 0x00;
+ self->ring->tx[i].address = virt_to_bus (self->tx_bufs[i]);
+ }
+
+ for (i = 0; i < RX_SLOTS; ++i)
+ {
+ self->ring->rx[i].len = RX_LEN;
+ self->ring->rx[i].len = 0;
+ self->ring->rx[i].address = virt_to_bus (self->rx_bufs[i]);
+ self->ring->rx[i].control = OBOE_CTL_RX_HW_OWNS;
+ }
+}
+
+STATIC void
+toshoboe_resetptrs (struct toshoboe_cb *self)
+{
+ /* Can reset pointers by twidling DMA */
+ OUTB (0x0, OBOE_ENABLEH);
+ OUTBP (CONFIG0H_DMA_OFF, OBOE_CONFIG0H);
+ OUTB (OBOE_ENABLEH_PHYANDCLOCK, OBOE_ENABLEH);
+
+ self->rxs = inb_p (OBOE_RXSLOT) & OBOE_SLOT_MASK;
+ self->txs = inb_p (OBOE_TXSLOT) & OBOE_SLOT_MASK;
+}
+
+/* Called in locked state */
+STATIC void
+toshoboe_initptrs (struct toshoboe_cb *self)
+{
+
+ /* spin_lock_irqsave(self->spinlock, flags); */
+ /* save_flags (flags); */
+
+ /* Can reset pointers by twidling DMA */
+ toshoboe_resetptrs (self);
+
+ OUTB (0x0, OBOE_ENABLEH);
+ OUTB (CONFIG0H_DMA_ON, OBOE_CONFIG0H);
+ OUTB (OBOE_ENABLEH_PHYANDCLOCK, OBOE_ENABLEH);
+
+ self->txpending = 0;
+
+ /* spin_unlock_irqrestore(self->spinlock, flags); */
+ /* restore_flags (flags); */
+}
+
+/* Wake the chip up and get it looking at the rings */
+/* Called in locked state */
+STATIC void
+toshoboe_startchip (struct toshoboe_cb *self)
+{
+ __u32 physaddr;
+
+ IRDA_DEBUG (4, "%s()\n", __FUNCTION__);
+
+ toshoboe_initring (self);
+ toshoboe_enablebm (self);
+ OUTBP (OBOE_CONFIG1_RESET, OBOE_CONFIG1);
+ OUTBP (OBOE_CONFIG1_ON, OBOE_CONFIG1);
+
+ /* Stop the clocks */
+ OUTB (0, OBOE_ENABLEH);
+
+ /*Set size of rings */
+ OUTB (RING_SIZE, OBOE_RING_SIZE);
+
+ /*Acknoledge any pending interrupts */
+ OUTB (0xff, OBOE_ISR);
+
+ /*Enable ints */
+ OUTB (OBOE_INT_TXDONE | OBOE_INT_RXDONE |
+ OBOE_INT_TXUNDER | OBOE_INT_RXOVER | OBOE_INT_SIP , OBOE_IER);
+
+ /*Acknoledge any pending interrupts */
+ OUTB (0xff, OBOE_ISR);
+
+ /*Set the maximum packet length to 0xfff (4095) */
+ OUTB (RX_LEN >> 8, OBOE_MAXLENH);
+ OUTB (RX_LEN & 0xff, OBOE_MAXLENL);
+
+ /*Shutdown DMA */
+ OUTB (CONFIG0H_DMA_OFF, OBOE_CONFIG0H);
+
+ /*Find out where the rings live */
+ physaddr = virt_to_bus (self->ring);
+
+ ASSERT ((physaddr & 0x3ff) == 0,
+ printk (KERN_ERR DRIVER_NAME "ring not correctly aligned\n");
+ return;);
+
+ OUTB ((physaddr >> 10) & 0xff, OBOE_RING_BASE0);
+ OUTB ((physaddr >> 18) & 0xff, OBOE_RING_BASE1);
+ OUTB ((physaddr >> 26) & 0x3f, OBOE_RING_BASE2);
+
+ /*Enable DMA controler in byte mode and RX */
+ OUTB (CONFIG0H_DMA_ON, OBOE_CONFIG0H);
+
+ /* Start up the clocks */
+ OUTB (OBOE_ENABLEH_PHYANDCLOCK, OBOE_ENABLEH);
+
+ /*set to sensible speed */
+ self->speed = 9600;
+ toshoboe_setbaud (self);
+ toshoboe_initptrs (self);
+}
+
+STATIC void
+toshoboe_isntstuck (struct toshoboe_cb *self)
+{
+}
+
+STATIC void
+toshoboe_checkstuck (struct toshoboe_cb *self)
+{
+ unsigned long flags;
+
+ if (0)
+ {
+ spin_lock_irqsave(&self->spinlock, flags);
+
+ /* This will reset the chip completely */
+ printk (KERN_ERR DRIVER_NAME ": Resetting chip\n");
+
+ toshoboe_stopchip (self);
+ toshoboe_startchip (self);
+ spin_unlock_irqrestore(&self->spinlock, flags);
+ }
+}
+
+/*Generate packet of about mtt us long */
+STATIC int
+toshoboe_makemttpacket (struct toshoboe_cb *self, void *buf, int mtt)
+{
+ int xbofs;
+
+ xbofs = ((int) (mtt/100)) * (int) (self->speed);
+ xbofs=xbofs/80000; /*Eight bits per byte, and mtt is in us*/
+ xbofs++;
+
+ IRDA_DEBUG (2, DRIVER_NAME
+ ": generated mtt of %d bytes for %d us at %d baud\n"
+ , xbofs,mtt,self->speed);
+
+ if (xbofs > TX_LEN)
+ {
+ printk (KERN_ERR DRIVER_NAME ": wanted %d bytes MTT but TX_LEN is %d\n",
+ xbofs, TX_LEN);
+ xbofs = TX_LEN;
+ }
+
+ /*xbofs will do for SIR, MIR and FIR,SIR mode doesn't generate a checksum anyway */
+ memset (buf, XBOF, xbofs);
+
+ return xbofs;
+}
+
+/***********************************************************************/
+/* Probe code */
+
+STATIC void
+toshoboe_dumptx (struct toshoboe_cb *self)
+{
+ int i;
+ PROBE_DEBUG(KERN_WARNING "TX:");
+ for (i = 0; i < RX_SLOTS; ++i)
+ PROBE_DEBUG(" (%d,%02x)",self->ring->tx[i].len,self->ring->tx[i].control);
+ PROBE_DEBUG(" [%d]\n",self->speed);
+}
+
+STATIC void
+toshoboe_dumprx (struct toshoboe_cb *self, int score)
+{
+ int i;
+ PROBE_DEBUG(" %d\nRX:",score);
+ for (i = 0; i < RX_SLOTS; ++i)
+ PROBE_DEBUG(" (%d,%02x)",self->ring->rx[i].len,self->ring->rx[i].control);
+ PROBE_DEBUG("\n");
+}
+
+static inline int
+stuff_byte (__u8 byte, __u8 * buf)
+{
+ switch (byte)
+ {
+ case BOF: /* FALLTHROUGH */
+ case EOF: /* FALLTHROUGH */
+ case CE:
+ /* Insert transparently coded */
+ buf[0] = CE; /* Send link escape */
+ buf[1] = byte ^ IRDA_TRANS; /* Complement bit 5 */
+ return 2;
+ /* break; */
+ default:
+ /* Non-special value, no transparency required */
+ buf[0] = byte;
+ return 1;
+ /* break; */
+ }
+}
+
+STATIC int toshoboe_invalid_dev(int irq)
+{
+ printk (KERN_WARNING DRIVER_NAME ": irq %d for unknown device.\n", irq);
+ return 1;
+}
+
+STATIC void
+toshoboe_probeinterrupt (int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct toshoboe_cb *self = (struct toshoboe_cb *) dev_id;
+ __u8 irqstat;
+
+ if (self == NULL && toshoboe_invalid_dev(irq))
+ return;
+
+ irqstat = INB (OBOE_ISR);
+
+/* was it us */
+ if (!(irqstat & OBOE_INT_MASK))
+ return;
+
+/* Ack all the interrupts */
+ OUTB (irqstat, OBOE_ISR);
+
+ if (irqstat & OBOE_INT_TXDONE)
+ {
+ int txp;
+
+ self->int_tx++;
+ PROBE_DEBUG("T");
+
+ txp = INB (OBOE_TXSLOT) & OBOE_SLOT_MASK;
+ if (self->ring->tx[txp].control & OBOE_CTL_TX_HW_OWNS)
+ {
+ self->int_tx+=100;
+ PROBE_DEBUG("S");
+ toshoboe_start_DMA(self, OBOE_CONFIG0H_ENTX | OBOE_CONFIG0H_LOOP);
+ }
+ }
+
+ if (irqstat & OBOE_INT_RXDONE) {
+ self->int_rx++;
+ PROBE_DEBUG("R"); }
+ if (irqstat & OBOE_INT_TXUNDER) {
+ self->int_txunder++;
+ PROBE_DEBUG("U"); }
+ if (irqstat & OBOE_INT_RXOVER) {
+ self->int_rxover++;
+ PROBE_DEBUG("O"); }
+ if (irqstat & OBOE_INT_SIP) {
+ self->int_sip++;
+ PROBE_DEBUG("I"); }
+}
+
+STATIC int
+toshoboe_maketestpacket (unsigned char *buf, int badcrc, int fir)
+{
+ int i;
+ int len = 0;
+ union
+ {
+ __u16 value;
+ __u8 bytes[2];
+ }
+ fcs;
+
+ if (fir)
+ {
+ memset (buf, 0, TT_LEN);
+ return (TT_LEN);
+ }
+
+ fcs.value = INIT_FCS;
+
+ memset (buf, XBOF, 10);
+ len += 10;
+ buf[len++] = BOF;
+
+ for (i = 0; i < TT_LEN; ++i)
+ {
+ len += stuff_byte (i, buf + len);
+ fcs.value = irda_fcs (fcs.value, i);
+ }
+
+ len += stuff_byte (fcs.bytes[0] ^ badcrc, buf + len);
+ len += stuff_byte (fcs.bytes[1] ^ badcrc, buf + len);
+ buf[len++] = EOF;
+ len++;
+ return len;
+}
+
+STATIC int
+toshoboe_probefail (struct toshoboe_cb *self, char *msg)
+{
+ printk (KERN_ERR DRIVER_NAME "probe(%d) failed %s\n",self-> speed, msg);
+ toshoboe_dumpregs (self);
+ toshoboe_stopchip (self);
+ free_irq (self->io.irq, (void *) self);
+ return 0;
+}
+
+STATIC int
+toshoboe_numvalidrcvs (struct toshoboe_cb *self)
+{
+ int i, ret = 0;
+ for (i = 0; i < RX_SLOTS; ++i)
+ if ((self->ring->rx[i].control & 0xe0) == 0)
+ ret++;
+
+ return ret;
+}
+
+STATIC int
+toshoboe_numrcvs (struct toshoboe_cb *self)
+{
+ int i, ret = 0;
+ for (i = 0; i < RX_SLOTS; ++i)
+ if (!(self->ring->rx[i].control & OBOE_CTL_RX_HW_OWNS))
+ ret++;
+
+ return ret;
+}
+
+STATIC int
+toshoboe_probe (struct toshoboe_cb *self)
+{
+ int i, j, n;
+#ifdef USE_MIR
+ int bauds[] = { 9600, 115200, 4000000, 1152000 };
+#else
+ int bauds[] = { 9600, 115200, 4000000 };
+#endif
+ unsigned long flags;
+
+ IRDA_DEBUG (4, "%s()\n", __FUNCTION__);
+
+ if (request_irq (self->io.irq, toshoboe_probeinterrupt,
+ self->io.irqflags, "toshoboe", (void *) self))
+ {
+ printk (KERN_ERR DRIVER_NAME ": probe failed to allocate irq %d\n",
+ self->io.irq);
+ return 0;
+ }
+
+ /* test 1: SIR filter and back to back */
+
+ for (j = 0; j < (sizeof (bauds) / sizeof (int)); ++j)
+ {
+ int fir = (j > 1);
+ toshoboe_stopchip (self);
+
+
+ spin_lock_irqsave(&self->spinlock, flags);
+ /*Address is already setup */
+ toshoboe_startchip (self);
+ self->int_rx = self->int_tx = 0;
+ self->speed = bauds[j];
+ toshoboe_setbaud (self);
+ toshoboe_initptrs (self);
+ spin_unlock_irqrestore(&self->spinlock, flags);
+
+ self->ring->tx[self->txs].control =
+/* (FIR only) OBOE_CTL_TX_SIP needed for switching to next slot */
+/* MIR: all received data is stored in one slot */
+ (fir) ? OBOE_CTL_TX_HW_OWNS | OBOE_CTL_TX_RTCENTX
+ : OBOE_CTL_TX_HW_OWNS ;
+ self->ring->tx[self->txs].len =
+ toshoboe_maketestpacket (self->tx_bufs[self->txs], 0, fir);
+ self->txs++;
+ self->txs %= TX_SLOTS;
+
+ self->ring->tx[self->txs].control =
+ (fir) ? OBOE_CTL_TX_HW_OWNS | OBOE_CTL_TX_SIP
+ : OBOE_CTL_TX_HW_OWNS | OBOE_CTL_TX_RTCENTX ;
+ self->ring->tx[self->txs].len =
+ toshoboe_maketestpacket (self->tx_bufs[self->txs], 0, fir);
+ self->txs++;
+ self->txs %= TX_SLOTS;
+
+ self->ring->tx[self->txs].control =
+ (fir) ? OBOE_CTL_TX_HW_OWNS | OBOE_CTL_TX_RTCENTX
+ : OBOE_CTL_TX_HW_OWNS ;
+ self->ring->tx[self->txs].len =
+ toshoboe_maketestpacket (self->tx_bufs[self->txs], 0, fir);
+ self->txs++;
+ self->txs %= TX_SLOTS;
+
+ self->ring->tx[self->txs].control =
+ (fir) ? OBOE_CTL_TX_HW_OWNS | OBOE_CTL_TX_RTCENTX
+ | OBOE_CTL_TX_SIP | OBOE_CTL_TX_BAD_CRC
+ : OBOE_CTL_TX_HW_OWNS | OBOE_CTL_TX_RTCENTX ;
+ self->ring->tx[self->txs].len =
+ toshoboe_maketestpacket (self->tx_bufs[self->txs], 0, fir);
+ self->txs++;
+ self->txs %= TX_SLOTS;
+
+ toshoboe_dumptx (self);
+ /* Turn on TX and RX and loopback */
+ toshoboe_start_DMA(self, OBOE_CONFIG0H_ENTX | OBOE_CONFIG0H_LOOP);
+
+ i = 0;
+ n = fir ? 1 : 4;
+ while (toshoboe_numvalidrcvs (self) != n)
+ {
+ if (i > 4800)
+ return toshoboe_probefail (self, "filter test");
+ udelay ((9600*(TT_LEN+16))/self->speed);
+ i++;
+ }
+
+ n = fir ? 203 : 102;
+ while ((toshoboe_numrcvs(self) != self->int_rx) || (self->int_tx != n))
+ {
+ if (i > 4800)
+ return toshoboe_probefail (self, "interrupt test");
+ udelay ((9600*(TT_LEN+16))/self->speed);
+ i++;
+ }
+ toshoboe_dumprx (self,i);
+
+ }
+
+ /* test 2: SIR in char at a time */
+
+ toshoboe_stopchip (self);
+ self->int_rx = self->int_tx = 0;
+
+ spin_lock_irqsave(&self->spinlock, flags);
+ toshoboe_startchip (self);
+ spin_unlock_irqrestore(&self->spinlock, flags);
+
+ self->async = 1;
+ self->speed = 115200;
+ toshoboe_setbaud (self);
+ self->ring->tx[self->txs].control =
+ OBOE_CTL_TX_RTCENTX | OBOE_CTL_TX_HW_OWNS;
+ self->ring->tx[self->txs].len = 4;
+
+ ((unsigned char *) self->tx_bufs[self->txs])[0] = 'f';
+ ((unsigned char *) self->tx_bufs[self->txs])[1] = 'i';
+ ((unsigned char *) self->tx_bufs[self->txs])[2] = 's';
+ ((unsigned char *) self->tx_bufs[self->txs])[3] = 'h';
+ toshoboe_dumptx (self);
+ toshoboe_start_DMA(self, OBOE_CONFIG0H_ENTX | OBOE_CONFIG0H_LOOP);
+
+ i = 0;
+ while (toshoboe_numvalidrcvs (self) != 4)
+ {
+ if (i > 100)
+ return toshoboe_probefail (self, "Async test");
+ udelay (100);
+ i++;
+ }
+
+ while ((toshoboe_numrcvs (self) != self->int_rx) || (self->int_tx != 1))
+ {
+ if (i > 100)
+ return toshoboe_probefail (self, "Async interrupt test");
+ udelay (100);
+ i++;
+ }
+ toshoboe_dumprx (self,i);
+
+ self->async = 0;
+ self->speed = 9600;
+ toshoboe_setbaud (self);
+ toshoboe_stopchip (self);
+
+ free_irq (self->io.irq, (void *) self);
+
+ printk (KERN_WARNING DRIVER_NAME ": Self test passed ok\n");
+
+ return 1;
+}
+
+/******************************************************************/
+/* Netdev style code */
+
+/* Transmit something */
+STATIC int
+toshoboe_hard_xmit (struct sk_buff *skb, struct net_device *dev)
+{
+ struct toshoboe_cb *self;
+ __s32 speed;
+ int mtt, len, ctl;
+ unsigned long flags;
+ struct irda_skb_cb *cb = (struct irda_skb_cb *) skb->cb;
+
+ self = (struct toshoboe_cb *) dev->priv;
+
+ ASSERT (self != NULL, return 0; );
+
+ IRDA_DEBUG (1, "%s.tx:%x(%x)%x\n", __FUNCTION__
+ ,skb->len,self->txpending,INB (OBOE_ENABLEH));
+ if (!cb->magic) {
+ IRDA_DEBUG (2, "%s.Not IrLAP:%x\n", __FUNCTION__, cb->magic);
+#ifdef DUMP_PACKETS
+ _dumpbufs(skb->data,skb->len,'>');
+#endif
+ }
+
+ /* change speed pending, wait for its execution */
+ if (self->new_speed)
+ return -EBUSY;
+
+ /* device stopped (apm) wait for restart */
+ if (self->stopped)
+ return -EBUSY;
+
+ toshoboe_checkstuck (self);
+
+ /* Check if we need to change the speed */
+ /* But not now. Wait after transmission if mtt not required */
+ speed=irda_get_next_speed(skb);
+ if ((speed != self->io.speed) && (speed != -1))
+ {
+ spin_lock_irqsave(&self->spinlock, flags);
+
+ if (self->txpending || skb->len)
+ {
+ self->new_speed = speed;
+ IRDA_DEBUG (1, "%s: Queued TxDone scheduled speed change %d\n" ,
+ __FUNCTION__, speed);
+ /* if no data, that's all! */
+ if (!skb->len)
+ {
+ spin_unlock_irqrestore(&self->spinlock, flags);
+ dev_kfree_skb (skb);
+ return 0;
+ }
+ /* True packet, go on, but */
+ /* do not accept anything before change speed execution */
+ netif_stop_queue(dev);
+ /* ready to process TxDone interrupt */
+ spin_unlock_irqrestore(&self->spinlock, flags);
+ }
+ else
+ {
+ /* idle and no data, change speed now */
+ self->speed = speed;
+ toshoboe_setbaud (self);
+ spin_unlock_irqrestore(&self->spinlock, flags);
+ dev_kfree_skb (skb);
+ return 0;
+ }
+
+ }
+
+ if ((mtt = irda_get_mtt(skb)))
+ {
+ /* This is fair since the queue should be empty anyway */
+ spin_lock_irqsave(&self->spinlock, flags);
+
+ if (self->txpending)
+ {
+ spin_unlock_irqrestore(&self->spinlock, flags);
+ return -EBUSY;
+ }
+
+ /* If in SIR mode we need to generate a string of XBOFs */
+ /* In MIR and FIR we need to generate a string of data */
+ /* which we will add a wrong checksum to */
+
+ mtt = toshoboe_makemttpacket (self, self->tx_bufs[self->txs], mtt);
+ IRDA_DEBUG (1, "%s.mtt:%x(%x)%d\n", __FUNCTION__
+ ,skb->len,mtt,self->txpending);
+ if (mtt)
+ {
+ self->ring->tx[self->txs].len = mtt & 0xfff;
+
+ ctl = OBOE_CTL_TX_HW_OWNS | OBOE_CTL_TX_RTCENTX;
+ if (INB (OBOE_ENABLEH) & OBOE_ENABLEH_FIRON)
+ {
+ ctl |= OBOE_CTL_TX_BAD_CRC | OBOE_CTL_TX_SIP ;
+ }
+#ifdef USE_MIR
+ else if (INB (OBOE_ENABLEH) & OBOE_ENABLEH_MIRON)
+ {
+ ctl |= OBOE_CTL_TX_BAD_CRC;
+ }
+#endif
+ self->ring->tx[self->txs].control = ctl;
+
+ OUTB (0x0, OBOE_ENABLEH);
+ /* It is only a timer. Do not send mtt packet outside! */
+ toshoboe_start_DMA(self, OBOE_CONFIG0H_ENTX | OBOE_CONFIG0H_LOOP);
+
+ self->txpending++;
+
+ self->txs++;
+ self->txs %= TX_SLOTS;
+
+ }
+ else
+ {
+ printk(KERN_ERR DRIVER_NAME ": problem with mtt packet - ignored\n");
+ }
+ spin_unlock_irqrestore(&self->spinlock, flags);
+ }
+
+#ifdef DUMP_PACKETS
+dumpbufs(skb->data,skb->len,'>');
+#endif
+
+ spin_lock_irqsave(&self->spinlock, flags);
+
+ if (self->ring->tx[self->txs].control & OBOE_CTL_TX_HW_OWNS)
+ {
+ IRDA_DEBUG (0, "%s.ful:%x(%x)%x\n", __FUNCTION__
+ ,skb->len, self->ring->tx[self->txs].control, self->txpending);
+ toshoboe_start_DMA(self, OBOE_CONFIG0H_ENTX);
+ spin_unlock_irqrestore(&self->spinlock, flags);
+ return -EBUSY;
+ }
+
+ if (INB (OBOE_ENABLEH) & OBOE_ENABLEH_SIRON)
+ {
+ len = async_wrap_skb (skb, self->tx_bufs[self->txs], TX_BUF_SZ);
+ }
+ else
+ {
+ len = skb->len;
+ memcpy (self->tx_bufs[self->txs], skb->data, len);
+ }
+ self->ring->tx[self->txs].len = len & 0x0fff;
+
+ /*Sometimes the HW doesn't see us assert RTCENTX in the interrupt code */
+ /*later this plays safe, we garuntee the last packet to be transmitted */
+ /*has RTCENTX set */
+
+ ctl = OBOE_CTL_TX_HW_OWNS | OBOE_CTL_TX_RTCENTX;
+ if (INB (OBOE_ENABLEH) & OBOE_ENABLEH_FIRON)
+ {
+ ctl |= OBOE_CTL_TX_SIP ;
+ }
+ self->ring->tx[self->txs].control = ctl;
+
+ /* If transmitter is idle start in one-shot mode */
+
+ if (!self->txpending)
+ toshoboe_start_DMA(self, OBOE_CONFIG0H_ENTX);
+
+ self->txpending++;
+
+ self->txs++;
+ self->txs %= TX_SLOTS;
+
+ spin_unlock_irqrestore(&self->spinlock, flags);
+ dev_kfree_skb (skb);
+
+ return 0;
+}
+
+/*interrupt handler */
+STATIC void
+toshoboe_interrupt (int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct toshoboe_cb *self = (struct toshoboe_cb *) dev_id;
+ __u8 irqstat;
+ struct sk_buff *skb = NULL;
+
+ if (self == NULL && toshoboe_invalid_dev(irq))
+ return;
+
+ irqstat = INB (OBOE_ISR);
+
+/* was it us */
+ if (!(irqstat & OBOE_INT_MASK))
+ return;
+
+/* Ack all the interrupts */
+ OUTB (irqstat, OBOE_ISR);
+
+ toshoboe_isntstuck (self);
+
+/* Txdone */
+ if (irqstat & OBOE_INT_TXDONE)
+ {
+ int txp, txpc;
+ int i;
+
+ txp = self->txpending;
+ self->txpending = 0;
+
+ for (i = 0; i < TX_SLOTS; ++i)
+ {
+ if (self->ring->tx[i].control & OBOE_CTL_TX_HW_OWNS)
+ self->txpending++;
+ }
+ IRDA_DEBUG (1, "%s.txd(%x)%x/%x\n", __FUNCTION__
+ ,irqstat,txp,self->txpending);
+
+ txp = INB (OBOE_TXSLOT) & OBOE_SLOT_MASK;
+
+ /* Got anything queued ? start it together */
+ if (self->ring->tx[txp].control & OBOE_CTL_TX_HW_OWNS)
+ {
+ txpc = txp;
+#ifdef OPTIMIZE_TX
+ while (self->ring->tx[txpc].control & OBOE_CTL_TX_HW_OWNS)
+ {
+ txp = txpc;
+ txpc++;
+ txpc %= TX_SLOTS;
+ self->stats.tx_packets++;
+ if (self->ring->tx[txpc].control & OBOE_CTL_TX_HW_OWNS)
+ self->ring->tx[txp].control &= ~OBOE_CTL_TX_RTCENTX;
+ }
+ self->stats.tx_packets--;
+#else
+ self->stats.tx_packets++;
+#endif
+ toshoboe_start_DMA(self, OBOE_CONFIG0H_ENTX);
+ }
+
+ if ((!self->txpending) && (self->new_speed))
+ {
+ self->speed = self->new_speed;
+ IRDA_DEBUG (1, "%s: Executed TxDone scheduled speed change %d\n",
+ __FUNCTION__, self->speed);
+ toshoboe_setbaud (self);
+ }
+
+ /* Tell network layer that we want more frames */
+ if (!self->new_speed)
+ netif_wake_queue(self->netdev);
+ }
+
+ if (irqstat & OBOE_INT_RXDONE)
+ {
+ while (!(self->ring->rx[self->rxs].control & OBOE_CTL_RX_HW_OWNS))
+ {
+ int len = self->ring->rx[self->rxs].len;
+ skb = NULL;
+ IRDA_DEBUG (3, "%s.rcv:%x(%x)\n", __FUNCTION__
+ ,len,self->ring->rx[self->rxs].control);
+
+#ifdef DUMP_PACKETS
+dumpbufs(self->rx_bufs[self->rxs],len,'<');
+#endif
+
+ if (self->ring->rx[self->rxs].control == 0)
+ {
+ __u8 enable = INB (OBOE_ENABLEH);
+
+ /* In SIR mode we need to check the CRC as this */
+ /* hasn't been done by the hardware */
+ if (enable & OBOE_ENABLEH_SIRON)
+ {
+ if (!toshoboe_checkfcs (self->rx_bufs[self->rxs], len))
+ len = 0;
+ /*Trim off the CRC */
+ if (len > 1)
+ len -= 2;
+ else
+ len = 0;
+ IRDA_DEBUG (1, "%s.SIR:%x(%x)\n", __FUNCTION__, len,enable);
+ }
+
+#ifdef USE_MIR
+ else if (enable & OBOE_ENABLEH_MIRON)
+ {
+ if (len > 1)
+ len -= 2;
+ else
+ len = 0;
+ IRDA_DEBUG (2, "%s.MIR:%x(%x)\n", __FUNCTION__, len,enable);
+ }
+#endif
+ else if (enable & OBOE_ENABLEH_FIRON)
+ {
+ if (len > 3)
+ len -= 4; /*FIXME: check this */
+ else
+ len = 0;
+ IRDA_DEBUG (1, "%s.FIR:%x(%x)\n", __FUNCTION__, len,enable);
+ }
+ else
+ IRDA_DEBUG (0, "%s.?IR:%x(%x)\n", __FUNCTION__, len,enable);
+
+ if (len)
+ {
+ skb = dev_alloc_skb (len + 1);
+ if (skb)
+ {
+ skb_reserve (skb, 1);
+
+ skb_put (skb, len);
+ memcpy (skb->data, self->rx_bufs[self->rxs], len);
+
+ self->stats.rx_packets++;
+ skb->dev = self->netdev;
+ skb->mac.raw = skb->data;
+ skb->protocol = htons (ETH_P_IRDA);
+ }
+ else
+ {
+ printk (KERN_INFO
+ "%s(), memory squeeze, dropping frame.\n",
+ __FUNCTION__);
+ }
+ }
+ }
+ else
+ {
+ /* TODO: =========================================== */
+ /* if OBOE_CTL_RX_LENGTH, our buffers are too small */
+ /* (MIR or FIR) data is lost. */
+ /* (SIR) data is splitted in several slots. */
+ /* we have to join all the received buffers received */
+ /*in a large buffer before checking CRC. */
+ IRDA_DEBUG (0, "%s.err:%x(%x)\n", __FUNCTION__
+ ,len,self->ring->rx[self->rxs].control);
+ }
+
+ self->ring->rx[self->rxs].len = 0x0;
+ self->ring->rx[self->rxs].control = OBOE_CTL_RX_HW_OWNS;
+
+ self->rxs++;
+ self->rxs %= RX_SLOTS;
+
+ if (skb)
+ netif_rx (skb);
+
+ }
+ }
+
+ if (irqstat & OBOE_INT_TXUNDER)
+ {
+ printk (KERN_WARNING DRIVER_NAME ": tx fifo underflow\n");
+ }
+ if (irqstat & OBOE_INT_RXOVER)
+ {
+ printk (KERN_WARNING DRIVER_NAME ": rx fifo overflow\n");
+ }
+/* This must be useful for something... */
+ if (irqstat & OBOE_INT_SIP)
+ {
+ self->int_sip++;
+ IRDA_DEBUG (1, "%s.sip:%x(%x)%x\n", __FUNCTION__
+ ,self->int_sip,irqstat,self->txpending);
+ }
+}
+
+STATIC int
+toshoboe_net_init (struct net_device *dev)
+{
+ IRDA_DEBUG (4, "%s()\n", __FUNCTION__);
+
+ /* Setup to be a normal IrDA network device driver */
+ irda_device_setup (dev);
+
+ /* Insert overrides below this line! */
+ return 0;
+}
+
+STATIC int
+toshoboe_net_open (struct net_device *dev)
+{
+ struct toshoboe_cb *self;
+ unsigned long flags;
+
+ IRDA_DEBUG (4, "%s()\n", __FUNCTION__);
+
+ ASSERT (dev != NULL, return -1; );
+ self = (struct toshoboe_cb *) dev->priv;
+
+ ASSERT (self != NULL, return 0; );
+
+ if (self->async)
+ return -EBUSY;
+
+ if (self->stopped)
+ return 0;
+
+ if (request_irq (self->io.irq, toshoboe_interrupt,
+ SA_SHIRQ | SA_INTERRUPT, dev->name, (void *) self))
+ {
+ return -EAGAIN;
+ }
+
+ spin_lock_irqsave(&self->spinlock, flags);
+ toshoboe_startchip (self);
+ spin_unlock_irqrestore(&self->spinlock, flags);
+
+ /* Ready to play! */
+ netif_start_queue(dev);
+
+ /*
+ * Open new IrLAP layer instance, now that everything should be
+ * initialized properly
+ */
+ self->irlap = irlap_open (dev, &self->qos, driver_name);
+
+ self->irdad = 1;
+
+ MOD_INC_USE_COUNT;
+
+ return 0;
+}
+
+STATIC int
+toshoboe_net_close (struct net_device *dev)
+{
+ struct toshoboe_cb *self;
+
+ IRDA_DEBUG (4, "%s()\n", __FUNCTION__);
+
+ ASSERT (dev != NULL, return -1; );
+ self = (struct toshoboe_cb *) dev->priv;
+
+ /* Stop device */
+ netif_stop_queue(dev);
+
+ /* Stop and remove instance of IrLAP */
+ if (self->irlap)
+ irlap_close (self->irlap);
+ self->irlap = NULL;
+
+ self->irdad = 0;
+
+ free_irq (self->io.irq, (void *) self);
+
+ if (!self->stopped)
+ {
+ toshoboe_stopchip (self);
+ }
+
+ MOD_DEC_USE_COUNT;
+
+ return 0;
+}
+
+/*
+ * Function toshoboe_net_ioctl (dev, rq, cmd)
+ *
+ * Process IOCTL commands for this device
+ *
+ */
+STATIC int
+toshoboe_net_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct if_irda_req *irq = (struct if_irda_req *) rq;
+ struct toshoboe_cb *self;
+ unsigned long flags;
+ int ret = 0;
+
+ ASSERT (dev != NULL, return -1; );
+
+ self = dev->priv;
+
+ ASSERT (self != NULL, return -1; );
+
+ IRDA_DEBUG (5, "%s(), %s, (cmd=0x%X)\n", __FUNCTION__, dev->name, cmd);
+
+ /* Disable interrupts & save flags */
+ spin_lock_irqsave(&self->spinlock, flags);
+
+ switch (cmd)
+ {
+ case SIOCSBANDWIDTH: /* Set bandwidth */
+ /* This function will also be used by IrLAP to change the
+ * speed, so we still must allow for speed change within
+ * interrupt context.
+ */
+ IRDA_DEBUG (1, "%s(BANDWIDTH), %s, (%X/%ld\n", __FUNCTION__
+ ,dev->name, INB (OBOE_STATUS), irq->ifr_baudrate );
+ if (!in_interrupt () && !capable (CAP_NET_ADMIN))
+ return -EPERM;
+
+ /* self->speed=irq->ifr_baudrate; */
+ /* toshoboe_setbaud(self); */
+ /* Just change speed once - inserted by Paul Bristow */
+ self->new_speed = irq->ifr_baudrate;
+ break;
+ case SIOCSMEDIABUSY: /* Set media busy */
+ IRDA_DEBUG (1, "%s(MEDIABUSY), %s, (%X/%x)\n", __FUNCTION__
+ ,dev->name, INB (OBOE_STATUS), capable (CAP_NET_ADMIN) );
+ if (!capable (CAP_NET_ADMIN))
+ return -EPERM;
+ irda_device_set_media_busy (self->netdev, TRUE);
+ break;
+ case SIOCGRECEIVING: /* Check if we are receiving right now */
+ irq->ifr_receiving = (INB (OBOE_STATUS) & OBOE_STATUS_RXBUSY) ? 1 : 0;
+ IRDA_DEBUG (3, "%s(RECEIVING), %s, (%X/%x)\n", __FUNCTION__
+ ,dev->name, INB (OBOE_STATUS), irq->ifr_receiving );
+ break;
+ default:
+ IRDA_DEBUG (1, "%s(?), %s, (cmd=0x%X)\n", __FUNCTION__, dev->name, cmd);
+ ret = -EOPNOTSUPP;
+ }
+
+ spin_unlock_irqrestore(&self->spinlock, flags);
+ return ret;
+
+}
+
+MODULE_DESCRIPTION("Toshiba OBOE IrDA Device Driver");
+MODULE_AUTHOR("James McKenzie <james@fishsoup.dhs.org>");
+MODULE_LICENSE("GPL");
+
+MODULE_PARM (max_baud, "i");
+MODULE_PARM_DESC(max_baud, "Maximum baud rate");
+
+MODULE_PARM (do_probe, "i");
+MODULE_PARM_DESC(do_probe, "Enable/disable chip probing and self-test");
+
+STATIC void
+toshoboe_close (struct pci_dev *pci_dev)
+{
+ int i;
+ struct toshoboe_cb *self = (struct toshoboe_cb*)pci_get_drvdata(pci_dev);
+
+ IRDA_DEBUG (4, "%s()\n", __FUNCTION__);
+
+ ASSERT (self != NULL, return; );
+
+ if (!self->stopped)
+ {
+ toshoboe_stopchip (self);
+ }
+
+ release_region (self->io.fir_base, self->io.fir_ext);
+
+ for (i = 0; i < TX_SLOTS; ++i)
+ {
+ kfree (self->tx_bufs[i]);
+ self->tx_bufs[i] = NULL;
+ }
+
+ for (i = 0; i < RX_SLOTS; ++i)
+ {
+ kfree (self->rx_bufs[i]);
+ self->rx_bufs[i] = NULL;
+ }
+
+ if (self->netdev)
+ {
+ /* Remove netdevice */
+ rtnl_lock ();
+ unregister_netdevice (self->netdev);
+ rtnl_unlock ();
+ }
+
+ kfree (self->ringbuf);
+ self->ringbuf = NULL;
+ self->ring = NULL;
+
+ return;
+}
+
+STATIC int
+toshoboe_open (struct pci_dev *pci_dev, const struct pci_device_id *pdid)
+{
+ struct toshoboe_cb *self;
+ struct net_device *dev;
+ int i = 0;
+ int ok = 0;
+ int err;
+
+ IRDA_DEBUG (4, "%s()\n", __FUNCTION__);
+
+ if ((err=pci_enable_device(pci_dev)))
+ return err;
+
+ self = kmalloc (sizeof (struct toshoboe_cb), GFP_KERNEL);
+
+ if (self == NULL)
+ {
+ printk (KERN_ERR DRIVER_NAME ": can't allocate memory for "
+ "IrDA control block\n");
+ return -ENOMEM;
+ }
+
+ memset (self, 0, sizeof (struct toshoboe_cb));
+
+ self->pdev = pci_dev;
+ self->base = pci_resource_start(pci_dev,0);
+
+ self->io.fir_base = self->base;
+ self->io.fir_ext = OBOE_IO_EXTENT;
+ self->io.irq = pci_dev->irq;
+ self->io.irqflags = SA_SHIRQ | SA_INTERRUPT;
+
+ self->speed = self->io.speed = 9600;
+ self->async = 0;
+
+ /* Lock the port that we need */
+ if (NULL==request_region (self->io.fir_base, self->io.fir_ext, driver_name))
+ {
+ printk (KERN_ERR DRIVER_NAME ": can't get iobase of 0x%03x\n"
+ ,self->io.fir_base);
+ err = -EBUSY;
+ goto freeself;
+ }
+
+ spin_lock_init(&self->spinlock);
+
+ irda_init_max_qos_capabilies (&self->qos);
+ self->qos.baud_rate.bits = 0;
+
+ if (max_baud >= 2400)
+ self->qos.baud_rate.bits |= IR_2400;
+ /*if (max_baud>=4800) idev->qos.baud_rate.bits|=IR_4800; */
+ if (max_baud >= 9600)
+ self->qos.baud_rate.bits |= IR_9600;
+ if (max_baud >= 19200)
+ self->qos.baud_rate.bits |= IR_19200;
+ if (max_baud >= 115200)
+ self->qos.baud_rate.bits |= IR_115200;
+#ifdef USE_MIR
+ if (max_baud >= 1152000)
+ {
+ self->qos.baud_rate.bits |= IR_1152000;
+ self->flags |= IFF_MIR;
+ }
+#endif
+ if (max_baud >= 4000000)
+ {
+ self->qos.baud_rate.bits |= (IR_4000000 << 8);
+ self->flags |= IFF_FIR;
+ }
+
+ /*FIXME: work this out... */
+ self->qos.min_turn_time.bits = 0xff;
+
+ irda_qos_bits_to_value (&self->qos);
+
+ self->flags = IFF_SIR | IFF_DMA | IFF_PIO;
+
+ /* Allocate twice the size to guarantee alignment */
+ self->ringbuf = (void *) kmalloc (OBOE_RING_LEN << 1, GFP_KERNEL);
+ if (!self->ringbuf)
+ {
+ printk (KERN_ERR DRIVER_NAME ": can't allocate DMA buffers\n");
+ err = -ENOMEM;
+ goto freeregion;
+ }
+
+ /*We need to align the taskfile on a taskfile size boundary */
+ {
+ __u32 addr;
+
+ addr = (__u32) self->ringbuf;
+ addr &= ~(OBOE_RING_LEN - 1);
+ addr += OBOE_RING_LEN;
+ self->ring = (struct OboeRing *) addr;
+ }
+
+ memset (self->ring, 0, OBOE_RING_LEN);
+ self->io.mem_base = (__u32) self->ring;
+
+ ok = 1;
+ for (i = 0; i < TX_SLOTS; ++i)
+ {
+ self->tx_bufs[i] = kmalloc (TX_BUF_SZ, GFP_KERNEL);
+ if (!self->tx_bufs[i])
+ ok = 0;
+ }
+
+ for (i = 0; i < RX_SLOTS; ++i)
+ {
+ self->rx_bufs[i] = kmalloc (RX_BUF_SZ, GFP_KERNEL);
+ if (!self->rx_bufs[i])
+ ok = 0;
+ }
+
+ if (!ok)
+ {
+ printk (KERN_ERR DRIVER_NAME ": can't allocate rx/tx buffers\n");
+ err = -ENOMEM;
+ goto freebufs;
+ }
+
+ if (do_probe)
+ if (!toshoboe_probe (self))
+ {
+ err = -ENODEV;
+ goto freebufs;
+ }
+
+ if (!(dev = dev_alloc ("irda%d", &err)))
+ {
+ printk (KERN_ERR DRIVER_NAME ": dev_alloc() failed\n");
+ err = -ENOMEM;
+ goto freebufs;
+ }
+
+ dev->priv = (void *) self;
+ self->netdev = dev;
+
+ printk (KERN_INFO "IrDA: Registered device %s\n", dev->name);
+
+ dev->init = toshoboe_net_init;
+ dev->hard_start_xmit = toshoboe_hard_xmit;
+ dev->open = toshoboe_net_open;
+ dev->stop = toshoboe_net_close;
+ dev->do_ioctl = toshoboe_net_ioctl;
+
+ rtnl_lock ();
+ err = register_netdevice (dev);
+ rtnl_unlock ();
+ if (err)
+ {
+ printk (KERN_ERR DRIVER_NAME ": register_netdev() failed\n");
+ err = -ENOMEM;
+ goto freebufs;
+ }
+
+ pci_set_drvdata(pci_dev,self);
+
+ printk (KERN_INFO DRIVER_NAME ": Using multiple tasks, version %s\n", rcsid);
+
+ return 0;
+
+freebufs:
+ for (i = 0; i < TX_SLOTS; ++i)
+ if (self->tx_bufs[i])
+ kfree (self->tx_bufs[i]);
+ for (i = 0; i < RX_SLOTS; ++i)
+ if (self->rx_bufs[i])
+ kfree (self->rx_bufs[i]);
+ kfree(self->ringbuf);
+
+freeregion:
+ release_region (self->io.fir_base, self->io.fir_ext);
+
+freeself:
+ kfree (self);
+
+ return err;
+}
+
+STATIC int
+toshoboe_gotosleep (struct pci_dev *pci_dev, u32 crap)
+{
+ struct toshoboe_cb *self = (struct toshoboe_cb*)pci_get_drvdata(pci_dev);
+ unsigned long flags;
+ int i = 10;
+
+ IRDA_DEBUG (4, "%s()\n", __FUNCTION__);
+
+ if (!self || self->stopped)
+ return 0;
+
+ if ((!self->irdad) && (!self->async))
+ return 0;
+
+/* Flush all packets */
+ while ((i--) && (self->txpending))
+ udelay (10000);
+
+ spin_lock_irqsave(&self->spinlock, flags);
+
+ toshoboe_stopchip (self);
+ self->stopped = 1;
+ self->txpending = 0;
+
+ spin_unlock_irqrestore(&self->spinlock, flags);
+ return 0;
+}
+
+STATIC int
+toshoboe_wakeup (struct pci_dev *pci_dev)
+{
+ struct toshoboe_cb *self = (struct toshoboe_cb*)pci_get_drvdata(pci_dev);
+ unsigned long flags;
+
+ IRDA_DEBUG (4, "%s()\n", __FUNCTION__);
+
+ if (!self || !self->stopped)
+ return 0;
+
+ if ((!self->irdad) && (!self->async))
+ return 0;
+
+ spin_lock_irqsave(&self->spinlock, flags);
+
+ toshoboe_startchip (self);
+ self->stopped = 0;
+
+ netif_wake_queue(self->netdev);
+ spin_unlock_irqrestore(&self->spinlock, flags);
+ return 0;
+}
+
+static struct pci_driver toshoboe_pci_driver = {
+ name : "toshoboe",
+ id_table : toshoboe_pci_tbl,
+ probe : toshoboe_open,
+ remove : toshoboe_close,
+ suspend : toshoboe_gotosleep,
+ resume : toshoboe_wakeup
+};
+
+int __init
+toshoboe_init (void)
+{
+ return pci_module_init(&toshoboe_pci_driver);
+}
+
+STATIC void __exit
+toshoboe_cleanup (void)
+{
+ pci_unregister_driver(&toshoboe_pci_driver);
+}
+
+module_init(toshoboe_init);
+module_exit(toshoboe_cleanup);
diff --git a/drivers/net/irda/donauboe.h b/drivers/net/irda/donauboe.h
new file mode 100644
index 000000000000..2ab173d9a0e4
--- /dev/null
+++ b/drivers/net/irda/donauboe.h
@@ -0,0 +1,363 @@
+/*********************************************************************
+ *
+ * Filename: toshoboe.h
+ * Version: 2.16
+ * Description: Driver for the Toshiba OBOE (or type-O or 701)
+ * FIR Chipset, also supports the DONAUOBOE (type-DO
+ * or d01) FIR chipset which as far as I know is
+ * register compatible.
+ * Status: Experimental.
+ * Author: James McKenzie <james@fishsoup.dhs.org>
+ * Created at: Sat May 8 12:35:27 1999
+ * Modified: 2.16 Martin Lucina <mato@kotelna.sk>
+ * Modified: 2.16 Sat Jun 22 18:54:29 2002 (sync headers)
+ * Modified: 2.17 Christian Gennerat <christian.gennerat@polytechnique.org>
+ * Modified: 2.17 jeu sep 12 08:50:20 2002 (add lock to be used by spinlocks)
+ *
+ * Copyright (c) 1999 James McKenzie, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * Neither James McKenzie nor Cambridge University admit liability nor
+ * provide warranty for any of this software. This material is
+ * provided "AS-IS" and at no charge.
+ *
+ * Applicable Models : Libretto 100/110CT and many more.
+ * Toshiba refers to this chip as the type-O IR port,
+ * or the type-DO IR port.
+ *
+ * IrDA chip set list from Toshiba Computer Engineering Corp.
+ * model method maker controler Version
+ * Portege 320CT FIR,SIR Toshiba Oboe(Triangle)
+ * Portege 3010CT FIR,SIR Toshiba Oboe(Sydney)
+ * Portege 3015CT FIR,SIR Toshiba Oboe(Sydney)
+ * Portege 3020CT FIR,SIR Toshiba Oboe(Sydney)
+ * Portege 7020CT FIR,SIR ? ?
+ *
+ * Satell. 4090XCDT FIR,SIR ? ?
+ *
+ * Libretto 100CT FIR,SIR Toshiba Oboe
+ * Libretto 1000CT FIR,SIR Toshiba Oboe
+ *
+ * TECRA750DVD FIR,SIR Toshiba Oboe(Triangle) REV ID=14h
+ * TECRA780 FIR,SIR Toshiba Oboe(Sandlot) REV ID=32h,33h
+ * TECRA750CDT FIR,SIR Toshiba Oboe(Triangle) REV ID=13h,14h
+ * TECRA8000 FIR,SIR Toshiba Oboe(ISKUR) REV ID=23h
+ *
+ ********************************************************************/
+
+/* The documentation for this chip is allegedly released */
+/* However I have not seen it, not have I managed to contact */
+/* anyone who has. HOWEVER the chip bears a striking resemblence */
+/* to the IrDA controller in the Toshiba RISC TMPR3922 chip */
+/* the documentation for this is freely available at */
+/* http://www.toshiba.com/taec/components/Generic/TMPR3922.shtml */
+/* The mapping between the registers in that document and the */
+/* Registers in the 701 oboe chip are as follows */
+
+
+/* 3922 reg 701 regs, by bit numbers */
+/* 7- 0 15- 8 24-16 31-25 */
+/* $28 0x0 0x1 */
+/* $2c SEE NOTE 1 */
+/* $30 0x6 0x7 */
+/* $34 0x8 0x9 SEE NOTE 2 */
+/* $38 0x10 0x11 */
+/* $3C 0xe SEE NOTE 3 */
+/* $40 0x12 0x13 */
+/* $44 0x14 0x15 */
+/* $48 0x16 0x17 */
+/* $4c 0x18 0x19 */
+/* $50 0x1a 0x1b */
+
+/* FIXME: could be 0x1b 0x1a here */
+
+/* $54 0x1d 0x1c */
+/* $5C 0xf SEE NOTE 4 */
+/* $130 SEE NOTE 5 */
+/* $134 SEE NOTE 6 */
+/* */
+/* NOTES: */
+/* 1. The pointer to ring is packed in most unceremoniusly */
+/* 701 Register Address bits (A9-A0 must be zero) */
+/* 0x4: A17 A16 A15 A14 A13 A12 A11 A10 */
+/* 0x5: A25 A24 A23 A22 A21 A20 A19 A18 */
+/* 0x2: 0 0 A31 A30 A29 A28 A27 A26 */
+/* */
+/* 2. The M$ drivers do a write 0x1 to 0x9, however the 3922 */
+/* documentation would suggest that a write of 0x1 to 0x8 */
+/* would be more appropriate. */
+/* */
+/* 3. This assignment is tenuous at best, register 0xe seems to */
+/* have bits arranged 0 0 0 R/W R/W R/W R/W R/W */
+/* if either of the lower two bits are set the chip seems to */
+/* switch off */
+/* */
+/* 4. Bits 7-4 seem to be different 4 seems just to be generic */
+/* receiver busy flag */
+/* */
+/* 5. and 6. The IER and ISR have a different bit assignment */
+/* The lower three bits of both read back as ones */
+/* ISR is register 0xc, IER is register 0xd */
+/* 7 6 5 4 3 2 1 0 */
+/* 0xc: TxDone RxDone TxUndr RxOver SipRcv 1 1 1 */
+/* 0xd: TxDone RxDone TxUndr RxOver SipRcv 1 1 1 */
+/* TxDone xmitt done (generated only if generate interrupt bit */
+/* is set in the ring) */
+/* RxDone recv completed (or other recv condition if you set it */
+/* up */
+/* TxUnder underflow in Transmit FIFO */
+/* RxOver overflow in Recv FIFO */
+/* SipRcv received serial gap (or other condition you set) */
+/* Interrupts are enabled by writing a one to the IER register */
+/* Interrupts are cleared by writting a one to the ISR register */
+/* */
+/* 6. The remaining registers: 0x6 and 0x3 appear to be */
+/* reserved parts of 16 or 32 bit registersthe remainder */
+/* 0xa 0xb 0x1e 0x1f could possibly be (by their behaviour) */
+/* the Unicast Filter register at $58. */
+/* */
+/* 7. While the core obviously expects 32 bit accesses all the */
+/* M$ drivers do 8 bit accesses, infact the Miniport ones */
+/* write and read back the byte serveral times (why?) */
+
+
+#ifndef TOSHOBOE_H
+#define TOSHOBOE_H
+
+/* Registers */
+
+#define OBOE_IO_EXTENT 0x1f
+
+/*Receive and transmit slot pointers */
+#define OBOE_REG(i) (i+(self->base))
+#define OBOE_RXSLOT OBOE_REG(0x0)
+#define OBOE_TXSLOT OBOE_REG(0x1)
+#define OBOE_SLOT_MASK 0x3f
+
+#define OBOE_TXRING_OFFSET 0x200
+#define OBOE_TXRING_OFFSET_IN_SLOTS 0x40
+
+/*pointer to the ring */
+#define OBOE_RING_BASE0 OBOE_REG(0x4)
+#define OBOE_RING_BASE1 OBOE_REG(0x5)
+#define OBOE_RING_BASE2 OBOE_REG(0x2)
+#define OBOE_RING_BASE3 OBOE_REG(0x3)
+
+/*Number of slots in the ring */
+#define OBOE_RING_SIZE OBOE_REG(0x7)
+#define OBOE_RING_SIZE_RX4 0x00
+#define OBOE_RING_SIZE_RX8 0x01
+#define OBOE_RING_SIZE_RX16 0x03
+#define OBOE_RING_SIZE_RX32 0x07
+#define OBOE_RING_SIZE_RX64 0x0f
+#define OBOE_RING_SIZE_TX4 0x00
+#define OBOE_RING_SIZE_TX8 0x10
+#define OBOE_RING_SIZE_TX16 0x30
+#define OBOE_RING_SIZE_TX32 0x70
+#define OBOE_RING_SIZE_TX64 0xf0
+
+#define OBOE_RING_MAX_SIZE 64
+
+/*Causes the gubbins to re-examine the ring */
+#define OBOE_PROMPT OBOE_REG(0x9)
+#define OBOE_PROMPT_BIT 0x1
+
+/* Interrupt Status Register */
+#define OBOE_ISR OBOE_REG(0xc)
+/* Interrupt Enable Register */
+#define OBOE_IER OBOE_REG(0xd)
+/* Interrupt bits for IER and ISR */
+#define OBOE_INT_TXDONE 0x80
+#define OBOE_INT_RXDONE 0x40
+#define OBOE_INT_TXUNDER 0x20
+#define OBOE_INT_RXOVER 0x10
+#define OBOE_INT_SIP 0x08
+#define OBOE_INT_MASK 0xf8
+
+/*Reset Register */
+#define OBOE_CONFIG1 OBOE_REG(0xe)
+#define OBOE_CONFIG1_RST 0x01
+#define OBOE_CONFIG1_DISABLE 0x02
+#define OBOE_CONFIG1_4 0x08
+#define OBOE_CONFIG1_8 0x08
+
+#define OBOE_CONFIG1_ON 0x8
+#define OBOE_CONFIG1_RESET 0xf
+#define OBOE_CONFIG1_OFF 0xe
+
+#define OBOE_STATUS OBOE_REG(0xf)
+#define OBOE_STATUS_RXBUSY 0x10
+#define OBOE_STATUS_FIRRX 0x04
+#define OBOE_STATUS_MIRRX 0x02
+#define OBOE_STATUS_SIRRX 0x01
+
+
+/*Speed control registers */
+#define OBOE_CONFIG0L OBOE_REG(0x10)
+#define OBOE_CONFIG0H OBOE_REG(0x11)
+
+#define OBOE_CONFIG0H_TXONLOOP 0x80 /*Transmit when looping (dangerous) */
+#define OBOE_CONFIG0H_LOOP 0x40 /*Loopback Tx->Rx */
+#define OBOE_CONFIG0H_ENTX 0x10 /*Enable Tx */
+#define OBOE_CONFIG0H_ENRX 0x08 /*Enable Rx */
+#define OBOE_CONFIG0H_ENDMAC 0x04 /*Enable/reset* the DMA controller */
+#define OBOE_CONFIG0H_RCVANY 0x02 /*DMA mode 1=bytes, 0=dwords */
+
+#define OBOE_CONFIG0L_CRC16 0x80 /*CRC 1=16 bit 0=32 bit */
+#define OBOE_CONFIG0L_ENFIR 0x40 /*Enable FIR */
+#define OBOE_CONFIG0L_ENMIR 0x20 /*Enable MIR */
+#define OBOE_CONFIG0L_ENSIR 0x10 /*Enable SIR */
+#define OBOE_CONFIG0L_ENSIRF 0x08 /*Enable SIR framer */
+#define OBOE_CONFIG0L_SIRTEST 0x04 /*Enable SIR framer in MIR and FIR */
+#define OBOE_CONFIG0L_INVERTTX 0x02 /*Invert Tx Line */
+#define OBOE_CONFIG0L_INVERTRX 0x01 /*Invert Rx Line */
+
+#define OBOE_BOF OBOE_REG(0x12)
+#define OBOE_EOF OBOE_REG(0x13)
+
+#define OBOE_ENABLEL OBOE_REG(0x14)
+#define OBOE_ENABLEH OBOE_REG(0x15)
+
+#define OBOE_ENABLEH_PHYANDCLOCK 0x80 /*Toggle low to copy config in */
+#define OBOE_ENABLEH_CONFIGERR 0x40
+#define OBOE_ENABLEH_FIRON 0x20
+#define OBOE_ENABLEH_MIRON 0x10
+#define OBOE_ENABLEH_SIRON 0x08
+#define OBOE_ENABLEH_ENTX 0x04
+#define OBOE_ENABLEH_ENRX 0x02
+#define OBOE_ENABLEH_CRC16 0x01
+
+#define OBOE_ENABLEL_BROADCAST 0x01
+
+#define OBOE_CURR_PCONFIGL OBOE_REG(0x16) /*Current config */
+#define OBOE_CURR_PCONFIGH OBOE_REG(0x17)
+
+#define OBOE_NEW_PCONFIGL OBOE_REG(0x18)
+#define OBOE_NEW_PCONFIGH OBOE_REG(0x19)
+
+#define OBOE_PCONFIGH_BAUDMASK 0xfc
+#define OBOE_PCONFIGH_WIDTHMASK 0x04
+#define OBOE_PCONFIGL_WIDTHMASK 0xe0
+#define OBOE_PCONFIGL_PREAMBLEMASK 0x1f
+
+#define OBOE_PCONFIG_BAUDMASK 0xfc00
+#define OBOE_PCONFIG_BAUDSHIFT 10
+#define OBOE_PCONFIG_WIDTHMASK 0x04e0
+#define OBOE_PCONFIG_WIDTHSHIFT 5
+#define OBOE_PCONFIG_PREAMBLEMASK 0x001f
+#define OBOE_PCONFIG_PREAMBLESHIFT 0
+
+#define OBOE_MAXLENL OBOE_REG(0x1a)
+#define OBOE_MAXLENH OBOE_REG(0x1b)
+
+#define OBOE_RXCOUNTH OBOE_REG(0x1c) /*Reset on recipt */
+#define OBOE_RXCOUNTL OBOE_REG(0x1d) /*of whole packet */
+
+/* The PCI ID of the OBOE chip */
+#ifndef PCI_DEVICE_ID_FIR701
+#define PCI_DEVICE_ID_FIR701 0x0701
+#endif
+
+#ifndef PCI_DEVICE_ID_FIRD01
+#define PCI_DEVICE_ID_FIRD01 0x0d01
+#endif
+
+struct OboeSlot
+{
+ __u16 len; /*Tweleve bits of packet length */
+ __u8 unused;
+ __u8 control; /*Slot control/status see below */
+ __u32 address; /*Slot buffer address */
+}
+__attribute__ ((packed));
+
+#define OBOE_NTASKS OBOE_TXRING_OFFSET_IN_SLOTS
+
+struct OboeRing
+{
+ struct OboeSlot rx[OBOE_NTASKS];
+ struct OboeSlot tx[OBOE_NTASKS];
+};
+
+#define OBOE_RING_LEN (sizeof(struct OboeRing))
+
+
+#define OBOE_CTL_TX_HW_OWNS 0x80 /*W/R This slot owned by the hardware */
+#define OBOE_CTL_TX_DISTX_CRC 0x40 /*W Disable CRC generation for [FM]IR */
+#define OBOE_CTL_TX_BAD_CRC 0x20 /*W Generate bad CRC */
+#define OBOE_CTL_TX_SIP 0x10 /*W Generate an SIP after xmittion */
+#define OBOE_CTL_TX_MKUNDER 0x08 /*W Generate an underrun error */
+#define OBOE_CTL_TX_RTCENTX 0x04 /*W Enable receiver and generate TXdone */
+ /* After this slot is processed */
+#define OBOE_CTL_TX_UNDER 0x01 /*R Set by hardware to indicate underrun */
+
+
+#define OBOE_CTL_RX_HW_OWNS 0x80 /*W/R This slot owned by hardware */
+#define OBOE_CTL_RX_PHYERR 0x40 /*R Decoder error on receiption */
+#define OBOE_CTL_RX_CRCERR 0x20 /*R CRC error only set for [FM]IR */
+#define OBOE_CTL_RX_LENGTH 0x10 /*R Packet > max Rx length */
+#define OBOE_CTL_RX_OVER 0x08 /*R set to indicate an overflow */
+#define OBOE_CTL_RX_SIRBAD 0x04 /*R SIR had BOF in packet or ABORT sequence */
+#define OBOE_CTL_RX_RXEOF 0x02 /*R Finished receiving on this slot */
+
+
+struct toshoboe_cb
+{
+ struct net_device *netdev; /* Yes! we are some kind of netdevice */
+ struct net_device_stats stats;
+ struct tty_driver ttydev;
+
+ struct irlap_cb *irlap; /* The link layer we are binded to */
+
+ chipio_t io; /* IrDA controller information */
+ struct qos_info qos; /* QoS capabilities for this device */
+
+ __u32 flags; /* Interface flags */
+
+ struct pci_dev *pdev; /*PCI device */
+ int base; /*IO base */
+
+
+ int txpending; /*how many tx's are pending */
+ int txs, rxs; /*Which slots are we at */
+
+ int irdad; /*Driver under control of netdev end */
+ int async; /*Driver under control of async end */
+
+
+ int stopped; /*Stopped by some or other APM stuff */
+
+ int filter; /*In SIR mode do we want to receive
+ frames or byte ranges */
+
+ void *ringbuf; /*The ring buffer */
+ struct OboeRing *ring; /*The ring */
+
+ void *tx_bufs[OBOE_RING_MAX_SIZE]; /*The buffers */
+ void *rx_bufs[OBOE_RING_MAX_SIZE];
+
+
+ int speed; /*Current setting of the speed */
+ int new_speed; /*Set to request a speed change */
+
+/* The spinlock protect critical parts of the driver.
+ * Locking is done like this :
+ * spin_lock_irqsave(&self->spinlock, flags);
+ * Releasing the lock :
+ * spin_unlock_irqrestore(&self->spinlock, flags);
+ */
+ spinlock_t spinlock;
+ /* Used for the probe and diagnostics code */
+ int int_rx;
+ int int_tx;
+ int int_txunder;
+ int int_rxover;
+ int int_sip;
+};
+
+
+#endif
diff --git a/drivers/net/irda/vlsi_ir.c b/drivers/net/irda/vlsi_ir.c
index 73a080b5af50..51d1caed60f2 100644
--- a/drivers/net/irda/vlsi_ir.c
+++ b/drivers/net/irda/vlsi_ir.c
@@ -2,9 +2,7 @@
*
* vlsi_ir.c: VLSI82C147 PCI IrDA controller driver for Linux
*
- * Version: 0.3a, Nov 10, 2001
- *
- * Copyright (c) 2001 Martin Diehl
+ * Copyright (c) 2001-2002 Martin Diehl
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
@@ -25,6 +23,17 @@
#include <linux/module.h>
+MODULE_DESCRIPTION("IrDA SIR/MIR/FIR driver for VLSI 82C147");
+MODULE_AUTHOR("Martin Diehl <info@mdiehl.de>");
+MODULE_LICENSE("GPL");
+EXPORT_NO_SYMBOLS;
+
+#define DRIVER_NAME "vlsi_ir"
+#define DRIVER_VERSION "v0.4"
+
+/********************************************************/
+
+#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/pci.h>
@@ -33,6 +42,9 @@
#include <linux/skbuff.h>
#include <linux/delay.h>
#include <linux/time.h>
+#include <linux/proc_fs.h>
+#include <linux/smp_lock.h>
+#include <asm/uaccess.h>
#include <net/irda/irda.h>
#include <net/irda/irda_device.h>
@@ -40,17 +52,9 @@
#include <net/irda/vlsi_ir.h>
-
/********************************************************/
-
-MODULE_DESCRIPTION("IrDA SIR/MIR/FIR driver for VLSI 82C147");
-MODULE_AUTHOR("Martin Diehl <info@mdiehl.de>");
-MODULE_LICENSE("GPL");
-
-
-static /* const */ char drivername[] = "vlsi_ir";
-
+static /* const */ char drivername[] = DRIVER_NAME;
#define PCI_CLASS_WIRELESS_IRDA 0x0d00
@@ -64,13 +68,8 @@ static struct pci_device_id vlsi_irda_table [] __devinitdata = { {
MODULE_DEVICE_TABLE(pci, vlsi_irda_table);
-
/********************************************************/
-
-MODULE_PARM(clksrc, "i");
-MODULE_PARM_DESC(clksrc, "clock input source selection");
-
/* clksrc: which clock source to be used
* 0: auto - try PLL, fallback to 40MHz XCLK
* 1: on-chip 48MHz PLL
@@ -78,12 +77,10 @@ MODULE_PARM_DESC(clksrc, "clock input source selection");
* 3: external 40MHz XCLK (HP OB-800)
*/
+MODULE_PARM(clksrc, "i");
+MODULE_PARM_DESC(clksrc, "clock input source selection");
static int clksrc = 0; /* default is 0(auto) */
-
-MODULE_PARM(ringsize, "1-2i");
-MODULE_PARM_DESC(ringsize, "TX, RX ring descriptor size");
-
/* ringsize: size of the tx and rx descriptor rings
* independent for tx and rx
* specify as ringsize=tx[,rx]
@@ -92,11 +89,9 @@ MODULE_PARM_DESC(ringsize, "TX, RX ring descriptor size");
* there should be no gain when using rings larger than 8
*/
-static int ringsize[] = {8,8}; /* default is tx=rx=8 */
-
-
-MODULE_PARM(sirpulse, "i");
-MODULE_PARM_DESC(sirpulse, "SIR pulse width tuning");
+MODULE_PARM(ringsize, "1-2i");
+MODULE_PARM_DESC(ringsize, "TX, RX ring descriptor size");
+static int ringsize[] = {8,8}; /* default is tx=8 / rx=8 */
/* sirpulse: tuning of the SIR pulse width within IrPHY 1.3 limits
* 0: very short, 1.5us (exception: 6us at 2.4 kbaud)
@@ -107,323 +102,750 @@ MODULE_PARM_DESC(sirpulse, "SIR pulse width tuning");
* pulse width saves more than 90% of the transmitted IR power.
*/
+MODULE_PARM(sirpulse, "i");
+MODULE_PARM_DESC(sirpulse, "SIR pulse width tuning");
static int sirpulse = 1; /* default is 3/16 bittime */
-
-MODULE_PARM(qos_mtt_bits, "i");
-MODULE_PARM_DESC(qos_mtt_bits, "IrLAP bitfield representing min-turn-time");
-
/* qos_mtt_bits: encoded min-turn-time value we require the peer device
* to use before transmitting to us. "Type 1" (per-station)
* bitfield according to IrLAP definition (section 6.6.8)
- * The HP HDLS-1100 requires 1 msec - don't even know
- * if this is the one which is used by my OB800
+ * Don't know which transceiver is used by my OB800 - the
+ * pretty common HP HDLS-1100 requires 1 msec - so lets use this.
*/
+MODULE_PARM(qos_mtt_bits, "i");
+MODULE_PARM_DESC(qos_mtt_bits, "IrLAP bitfield representing min-turn-time");
static int qos_mtt_bits = 0x04; /* default is 1 ms */
-
/********************************************************/
-
-/* some helpers for operations on ring descriptors */
-
-
-static inline int rd_is_active(struct vlsi_ring *r, unsigned i)
+static void vlsi_reg_debug(unsigned iobase, const char *s)
{
- return ((r->hw[i].rd_status & RD_STAT_ACTIVE) != 0);
-}
+ int i;
-static inline void rd_activate(struct vlsi_ring *r, unsigned i)
-{
- r->hw[i].rd_status |= RD_STAT_ACTIVE;
+ printk(KERN_DEBUG "%s: ", s);
+ for (i = 0; i < 0x20; i++)
+ printk("%02x", (unsigned)inb((iobase+i)));
+ printk("\n");
}
-static inline void rd_set_addr_status(struct vlsi_ring *r, unsigned i, dma_addr_t a, u8 s)
+static void vlsi_ring_debug(struct vlsi_ring *r)
{
- struct ring_descr *rd = r->hw +i;
+ struct ring_descr *rd;
+ unsigned i;
- /* ordering is important for two reasons:
- * - overlayed: writing addr overwrites status
- * - we want to write status last so we have valid address in
- * case status has RD_STAT_ACTIVE set
- */
+ printk(KERN_DEBUG "%s - ring %p / size %u / mask 0x%04x / len %u / dir %d / hw %p\n",
+ __FUNCTION__, r, r->size, r->mask, r->len, r->dir, r->rd[0].hw);
+ printk(KERN_DEBUG "%s - head = %d / tail = %d\n", __FUNCTION__,
+ atomic_read(&r->head) & r->mask, atomic_read(&r->tail) & r->mask);
+ for (i = 0; i < r->size; i++) {
+ rd = &r->rd[i];
+ printk(KERN_DEBUG "%s - ring descr %u: ", __FUNCTION__, i);
+ printk("skb=%p data=%p hw=%p\n", rd->skb, rd->buf, rd->hw);
+ printk(KERN_DEBUG "%s - hw: status=%02x count=%u addr=0x%08x\n",
+ __FUNCTION__, (unsigned) rd_get_status(rd),
+ (unsigned) rd_get_count(rd), (unsigned) rd_get_addr(rd));
+ }
+}
- if ((a & ~DMA_MASK_MSTRPAGE) != MSTRPAGE_VALUE)
- BUG();
+/********************************************************/
- a &= DMA_MASK_MSTRPAGE; /* clear highbyte to make sure we won't write
- * to status - just in case MSTRPAGE_VALUE!=0
- */
- rd->rd_addr = a;
- wmb();
- rd->rd_status = s; /* potentially passes ownership to the hardware */
-}
+#ifdef CONFIG_PROC_FS
-static inline void rd_set_status(struct vlsi_ring *r, unsigned i, u8 s)
+static int vlsi_proc_pdev(struct pci_dev *pdev, char *buf, int len)
{
- r->hw[i].rd_status = s;
-}
+ unsigned iobase = pci_resource_start(pdev, 0);
+ unsigned i;
+ char *out = buf;
-static inline void rd_set_count(struct vlsi_ring *r, unsigned i, u16 c)
-{
- r->hw[i].rd_count = c;
-}
+ if (len < 500)
+ return 0;
-static inline u8 rd_get_status(struct vlsi_ring *r, unsigned i)
-{
- return r->hw[i].rd_status;
+ out += sprintf(out, "\n%s (vid/did: %04x/%04x)\n",
+ pdev->name, (int)pdev->vendor, (int)pdev->device);
+ out += sprintf(out, "pci-power-state: %u\n", (unsigned) pdev->current_state);
+ out += sprintf(out, "resources: irq=%u / io=0x%04x / dma_mask=0x%016Lx\n",
+ pdev->irq, (unsigned)pci_resource_start(pdev, 0), (u64)pdev->dma_mask);
+ out += sprintf(out, "hw registers: ");
+ for (i = 0; i < 0x20; i++)
+ out += sprintf(out, "%02x", (unsigned)inb((iobase+i)));
+ out += sprintf(out, "\n");
+ return out - buf;
}
-
-static inline dma_addr_t rd_get_addr(struct vlsi_ring *r, unsigned i)
+
+static int vlsi_proc_ndev(struct net_device *ndev, char *buf, int len)
{
- dma_addr_t a;
+ vlsi_irda_dev_t *idev = ndev->priv;
+ char *out = buf;
+ u8 byte;
+ u16 word;
+ unsigned delta1, delta2;
+ struct timeval now;
+ unsigned iobase = ndev->base_addr;
+
+ if (len < 1000)
+ return 0;
- a = (r->hw[i].rd_addr & DMA_MASK_MSTRPAGE) | (MSTRPAGE_VALUE << 24);
- return a;
+ out += sprintf(out, "\n%s link state: %s / %s / %s / %s\n", ndev->name,
+ netif_device_present(ndev) ? "attached" : "detached",
+ netif_running(ndev) ? "running" : "not running",
+ netif_carrier_ok(ndev) ? "carrier ok" : "no carrier",
+ netif_queue_stopped(ndev) ? "queue stopped" : "queue running");
+ if (!netif_running(ndev))
+ return out - buf;
+
+ out += sprintf(out, "\nhw-state:\n");
+ pci_read_config_byte(idev->pdev, VLSI_PCI_IRMISC, &byte);
+ out += sprintf(out, "IRMISC:%s%s%s UART%s",
+ (byte&IRMISC_IRRAIL) ? " irrail" : "",
+ (byte&IRMISC_IRPD) ? " irpd" : "",
+ (byte&IRMISC_UARTTST) ? " uarttest" : "",
+ (byte&IRMISC_UARTEN) ? "" : " disabled\n");
+ if (byte&IRMISC_UARTEN) {
+ out += sprintf(out, "@0x%s\n",
+ (byte&2) ? ((byte&1) ? "3e8" : "2e8")
+ : ((byte&1) ? "3f8" : "2f8"));
+ }
+ pci_read_config_byte(idev->pdev, VLSI_PCI_CLKCTL, &byte);
+ out += sprintf(out, "CLKCTL: PLL %s%s%s / clock %s / wakeup %s\n",
+ (byte&CLKCTL_PD_INV) ? "powered" : "down",
+ (byte&CLKCTL_LOCK) ? " locked" : "",
+ (byte&CLKCTL_EXTCLK) ? ((byte&CLKCTL_XCKSEL)?" / 40 MHz XCLK":" / 48 MHz XCLK") : "",
+ (byte&CLKCTL_CLKSTP) ? "stopped" : "running",
+ (byte&CLKCTL_WAKE) ? "enabled" : "disabled");
+ pci_read_config_byte(idev->pdev, VLSI_PCI_MSTRPAGE, &byte);
+ out += sprintf(out, "MSTRPAGE: 0x%02x\n", (unsigned)byte);
+
+ byte = inb(iobase+VLSI_PIO_IRINTR);
+ out += sprintf(out, "IRINTR:%s%s%s%s%s%s%s%s\n",
+ (byte&IRINTR_ACTEN) ? " ACTEN" : "",
+ (byte&IRINTR_RPKTEN) ? " RPKTEN" : "",
+ (byte&IRINTR_TPKTEN) ? " TPKTEN" : "",
+ (byte&IRINTR_OE_EN) ? " OE_EN" : "",
+ (byte&IRINTR_ACTIVITY) ? " ACTIVITY" : "",
+ (byte&IRINTR_RPKTINT) ? " RPKTINT" : "",
+ (byte&IRINTR_TPKTINT) ? " TPKTINT" : "",
+ (byte&IRINTR_OE_INT) ? " OE_INT" : "");
+ word = inw(iobase+VLSI_PIO_RINGPTR);
+ out += sprintf(out, "RINGPTR: rx=%u / tx=%u\n", RINGPTR_GET_RX(word), RINGPTR_GET_TX(word));
+ word = inw(iobase+VLSI_PIO_RINGBASE);
+ out += sprintf(out, "RINGBASE: busmap=0x%08x\n",
+ ((unsigned)word << 10)|(MSTRPAGE_VALUE<<24));
+ word = inw(iobase+VLSI_PIO_RINGSIZE);
+ out += sprintf(out, "RINGSIZE: rx=%u / tx=%u\n", RINGSIZE_TO_RXSIZE(word),
+ RINGSIZE_TO_TXSIZE(word));
+
+ word = inw(iobase+VLSI_PIO_IRCFG);
+ out += sprintf(out, "IRCFG:%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
+ (word&IRCFG_LOOP) ? " LOOP" : "",
+ (word&IRCFG_ENTX) ? " ENTX" : "",
+ (word&IRCFG_ENRX) ? " ENRX" : "",
+ (word&IRCFG_MSTR) ? " MSTR" : "",
+ (word&IRCFG_RXANY) ? " RXANY" : "",
+ (word&IRCFG_CRC16) ? " CRC16" : "",
+ (word&IRCFG_FIR) ? " FIR" : "",
+ (word&IRCFG_MIR) ? " MIR" : "",
+ (word&IRCFG_SIR) ? " SIR" : "",
+ (word&IRCFG_SIRFILT) ? " SIRFILT" : "",
+ (word&IRCFG_SIRTEST) ? " SIRTEST" : "",
+ (word&IRCFG_TXPOL) ? " TXPOL" : "",
+ (word&IRCFG_RXPOL) ? " RXPOL" : "");
+ word = inw(iobase+VLSI_PIO_IRENABLE);
+ out += sprintf(out, "IRENABLE:%s%s%s%s%s%s%s%s\n",
+ (word&IRENABLE_IREN) ? " IRENABLE" : "",
+ (word&IRENABLE_CFGER) ? " CFGERR" : "",
+ (word&IRENABLE_FIR_ON) ? " FIR_ON" : "",
+ (word&IRENABLE_MIR_ON) ? " MIR_ON" : "",
+ (word&IRENABLE_SIR_ON) ? " SIR_ON" : "",
+ (word&IRENABLE_ENTXST) ? " ENTXST" : "",
+ (word&IRENABLE_ENRXST) ? " ENRXST" : "",
+ (word&IRENABLE_CRC16_ON) ? " CRC16_ON" : "");
+ word = inw(iobase+VLSI_PIO_PHYCTL);
+ out += sprintf(out, "PHYCTL: baud-divisor=%u / pulsewidth=%u / preamble=%u\n",
+ (unsigned)PHYCTL_TO_BAUD(word),
+ (unsigned)PHYCTL_TO_PLSWID(word),
+ (unsigned)PHYCTL_TO_PREAMB(word));
+ word = inw(iobase+VLSI_PIO_NPHYCTL);
+ out += sprintf(out, "NPHYCTL: baud-divisor=%u / pulsewidth=%u / preamble=%u\n",
+ (unsigned)PHYCTL_TO_BAUD(word),
+ (unsigned)PHYCTL_TO_PLSWID(word),
+ (unsigned)PHYCTL_TO_PREAMB(word));
+ word = inw(iobase+VLSI_PIO_MAXPKT);
+ out += sprintf(out, "MAXPKT: max. rx packet size = %u\n", word);
+ word = inw(iobase+VLSI_PIO_RCVBCNT) & RCVBCNT_MASK;
+ out += sprintf(out, "RCVBCNT: rx-fifo filling level = %u\n", word);
+
+ out += sprintf(out, "\nsw-state:\n");
+ out += sprintf(out, "IrPHY setup: %d baud - %s encoding\n", idev->baud,
+ (idev->mode==IFF_SIR)?"SIR":((idev->mode==IFF_MIR)?"MIR":"FIR"));
+ do_gettimeofday(&now);
+ if (now.tv_usec >= idev->last_rx.tv_usec) {
+ delta2 = now.tv_usec - idev->last_rx.tv_usec;
+ delta1 = 0;
+ }
+ else {
+ delta2 = 1000000 + now.tv_usec - idev->last_rx.tv_usec;
+ delta1 = 1;
+ }
+ out += sprintf(out, "last rx: %lu.%06u sec\n",
+ now.tv_sec - idev->last_rx.tv_sec - delta1, delta2);
+
+ out += sprintf(out, "RX: packets=%lu / bytes=%lu / errors=%lu / dropped=%lu",
+ idev->stats.rx_packets, idev->stats.rx_bytes, idev->stats.rx_errors,
+ idev->stats.rx_dropped);
+ out += sprintf(out, " / overrun=%lu / length=%lu / frame=%lu / crc=%lu\n",
+ idev->stats.rx_over_errors, idev->stats.rx_length_errors,
+ idev->stats.rx_frame_errors, idev->stats.rx_crc_errors);
+ out += sprintf(out, "TX: packets=%lu / bytes=%lu / errors=%lu / dropped=%lu / fifo=%lu\n",
+ idev->stats.tx_packets, idev->stats.tx_bytes, idev->stats.tx_errors,
+ idev->stats.tx_dropped, idev->stats.tx_fifo_errors);
+
+ return out - buf;
}
-
-static inline u16 rd_get_count(struct vlsi_ring *r, unsigned i)
+
+static int vlsi_proc_ring(struct vlsi_ring *r, char *buf, int len)
{
- return r->hw[i].rd_count;
-}
+ struct ring_descr *rd;
+ unsigned i, j;
+ int h, t;
+ char *out = buf;
-/* producer advances r->head when descriptor was added for processing by hw */
+ if (len < 3000)
+ return 0;
-static inline void ring_put(struct vlsi_ring *r)
-{
- r->head = (r->head + 1) & r->mask;
+ out += sprintf(out, "size %u / mask 0x%04x / len %u / dir %d / hw %p\n",
+ r->size, r->mask, r->len, r->dir, r->rd[0].hw);
+ h = atomic_read(&r->head) & r->mask;
+ t = atomic_read(&r->tail) & r->mask;
+ out += sprintf(out, "head = %d / tail = %d ", h, t);
+ if (h == t)
+ out += sprintf(out, "(empty)\n");
+ else {
+ if (((t+1)&r->mask) == h)
+ out += sprintf(out, "(full)\n");
+ else
+ out += sprintf(out, "(level = %d)\n", ((unsigned)(t-h) & r->mask));
+ rd = &r->rd[h];
+ j = (unsigned) rd_get_count(rd);
+ out += sprintf(out, "current: rd = %d / status = %02x / len = %u\n",
+ h, (unsigned)rd_get_status(rd), j);
+ if (j > 0) {
+ out += sprintf(out, " data:");
+ if (j > 20)
+ j = 20;
+ for (i = 0; i < j; i++)
+ out += sprintf(out, " %02x", (unsigned)((unsigned char *)rd->buf)[i]);
+ out += sprintf(out, "\n");
+ }
+ }
+ for (i = 0; i < r->size; i++) {
+ rd = &r->rd[i];
+ out += sprintf(out, "> ring descr %u: ", i);
+ out += sprintf(out, "skb=%p data=%p hw=%p\n", rd->skb, rd->buf, rd->hw);
+ out += sprintf(out, " hw: status=%02x count=%u busaddr=0x%08x\n",
+ (unsigned) rd_get_status(rd),
+ (unsigned) rd_get_count(rd), (unsigned) rd_get_addr(rd));
+ }
+ return out - buf;
}
-/* consumer advances r->tail when descriptor was removed after getting processed by hw */
-
-static inline void ring_get(struct vlsi_ring *r)
+static int vlsi_proc_print(struct net_device *ndev, char *buf, int len)
{
- r->tail = (r->tail + 1) & r->mask;
-}
+ vlsi_irda_dev_t *idev;
+ unsigned long flags;
+ char *out = buf;
+ if (!ndev || !ndev->priv) {
+ printk(KERN_ERR "%s: invalid ptr!\n", __FUNCTION__);
+ return 0;
+ }
-/********************************************************/
+ idev = ndev->priv;
+
+ if (len < 8000)
+ return 0;
-/* the memory required to hold the 2 descriptor rings */
+ out += sprintf(out, "\n%s %s\n\n", DRIVER_NAME, DRIVER_VERSION);
+ out += sprintf(out, "clksrc: %s\n",
+ (clksrc>=2) ? ((clksrc==3)?"40MHz XCLK":"48MHz XCLK")
+ : ((clksrc==1)?"48MHz PLL":"autodetect"));
+ out += sprintf(out, "ringsize: tx=%d / rx=%d\n",
+ ringsize[0], ringsize[1]);
+ out += sprintf(out, "sirpulse: %s\n", (sirpulse)?"3/16 bittime":"short");
+ out += sprintf(out, "qos_mtt_bits: 0x%02x\n", (unsigned)qos_mtt_bits);
-#define RING_AREA_SIZE (2 * MAX_RING_DESCR * sizeof(struct ring_descr))
+ spin_lock_irqsave(&idev->lock, flags);
+ if (idev->pdev != NULL) {
+ out += vlsi_proc_pdev(idev->pdev, out, len - (out-buf));
+ if (idev->pdev->current_state == 0)
+ out += vlsi_proc_ndev(ndev, out, len - (out-buf));
+ else
+ out += sprintf(out, "\nPCI controller down - resume_ok = %d\n",
+ idev->resume_ok);
+ if (netif_running(ndev) && idev->rx_ring && idev->tx_ring) {
+ out += sprintf(out, "\n--------- RX ring -----------\n\n");
+ out += vlsi_proc_ring(idev->rx_ring, out, len - (out-buf));
+ out += sprintf(out, "\n--------- TX ring -----------\n\n");
+ out += vlsi_proc_ring(idev->tx_ring, out, len - (out-buf));
+ }
+ }
+ out += sprintf(out, "\n");
+ spin_unlock_irqrestore(&idev->lock, flags);
-/* the memory required to hold the rings' buffer entries */
+ return out - buf;
+}
-#define RING_ENTRY_SIZE (2 * MAX_RING_DESCR * sizeof(struct ring_entry))
+static struct proc_dir_entry *vlsi_proc_root = NULL;
-/********************************************************/
+struct vlsi_proc_data {
+ int size;
+ char *data;
+};
-/* just dump all registers */
+/* most of the proc-fops code borrowed from usb/uhci */
-static void vlsi_reg_debug(unsigned iobase, const char *s)
+static int vlsi_proc_open(struct inode *inode, struct file *file)
{
- int i;
+ const struct proc_dir_entry *pde = PDE(inode);
+ struct net_device *ndev = pde->data;
+ vlsi_irda_dev_t *idev = ndev->priv;
+ struct vlsi_proc_data *procdata;
+ const int maxdata = 8000;
- mb();
- printk(KERN_DEBUG "%s: ", s);
- for (i = 0; i < 0x20; i++)
- printk("%02x", (unsigned)inb((iobase+i)));
- printk("\n");
-}
+ lock_kernel();
+ procdata = kmalloc(sizeof(*procdata), GFP_KERNEL);
+ if (!procdata) {
+ unlock_kernel();
+ return -ENOMEM;
+ }
+ procdata->data = kmalloc(maxdata, GFP_KERNEL);
+ if (!procdata->data) {
+ kfree(procdata);
+ unlock_kernel();
+ return -ENOMEM;
+ }
-/********************************************************/
+ down(&idev->sem);
+ procdata->size = vlsi_proc_print(ndev, procdata->data, maxdata);
+ up(&idev->sem);
+
+ file->private_data = procdata;
+ return 0;
+}
-static int vlsi_set_clock(struct pci_dev *pdev)
+static loff_t vlsi_proc_lseek(struct file *file, loff_t off, int whence)
{
- u8 clkctl, lock;
- int i, count;
+ struct vlsi_proc_data *procdata;
+ loff_t new = -1;
+
+ lock_kernel();
+ procdata = file->private_data;
+
+ switch (whence) {
+ case 0:
+ new = off;
+ break;
+ case 1:
+ new = file->f_pos + off;
+ break;
+ }
+ if (new < 0 || new > procdata->size) {
+ unlock_kernel();
+ return -EINVAL;
+ }
+ unlock_kernel();
+ return (file->f_pos = new);
+}
- if (clksrc < 2) { /* auto or PLL: try PLL */
- clkctl = CLKCTL_NO_PD | CLKCTL_CLKSTP;
- pci_write_config_byte(pdev, VLSI_PCI_CLKCTL, clkctl);
+static ssize_t vlsi_proc_read(struct file *file, char *buf, size_t nbytes,
+ loff_t *ppos)
+{
+ struct vlsi_proc_data *procdata = file->private_data;
+ unsigned int pos;
+ unsigned int size;
- /* procedure to detect PLL lock synchronisation:
- * after 0.5 msec initial delay we expect to find 3 PLL lock
- * indications within 10 msec for successful PLL detection.
- */
- udelay(500);
- count = 0;
- for (i = 500; i <= 10000; i += 50) { /* max 10 msec */
- pci_read_config_byte(pdev, VLSI_PCI_CLKCTL, &lock);
- if (lock&CLKCTL_LOCK) {
- if (++count >= 3)
- break;
- }
- udelay(50);
- }
- if (count < 3) {
- if (clksrc == 1) { /* explicitly asked for PLL hence bail out */
- printk(KERN_ERR "%s: no PLL or failed to lock!\n",
- __FUNCTION__);
- clkctl = CLKCTL_CLKSTP;
- pci_write_config_byte(pdev, VLSI_PCI_CLKCTL, clkctl);
- return -1;
- }
- else /* was: clksrc=0(auto) */
- clksrc = 3; /* fallback to 40MHz XCLK (OB800) */
+ pos = *ppos;
+ size = procdata->size;
+ if (pos >= size)
+ return 0;
+ if (nbytes >= size)
+ nbytes = size;
+ if (pos + nbytes > size)
+ nbytes = size - pos;
- printk(KERN_INFO "%s: PLL not locked, fallback to clksrc=%d\n",
- __FUNCTION__, clksrc);
- }
- else { /* got successful PLL lock */
- clksrc = 1;
- return 0;
- }
- }
+ if (!access_ok(VERIFY_WRITE, buf, nbytes))
+ return -EINVAL;
- /* we get here if either no PLL detected in auto-mode or
- the external clock source was explicitly specified */
+ copy_to_user(buf, procdata->data + pos, nbytes);
- clkctl = CLKCTL_EXTCLK | CLKCTL_CLKSTP;
- if (clksrc == 3)
- clkctl |= CLKCTL_XCKSEL;
- pci_write_config_byte(pdev, VLSI_PCI_CLKCTL, clkctl);
+ *ppos += nbytes;
+
+ return nbytes;
+}
- /* no way to test for working XCLK */
+static int vlsi_proc_release(struct inode *inode, struct file *file)
+{
+ struct vlsi_proc_data *procdata = file->private_data;
+
+ kfree(procdata->data);
+ kfree(procdata);
return 0;
}
+static struct file_operations vlsi_proc_fops = {
+ open: vlsi_proc_open,
+ llseek: vlsi_proc_lseek,
+ read: vlsi_proc_read,
+ release: vlsi_proc_release,
+};
+#endif
+
+/********************************************************/
-static void vlsi_start_clock(struct pci_dev *pdev)
+static struct vlsi_ring *vlsi_alloc_ring(struct pci_dev *pdev, struct ring_descr_hw *hwmap,
+ unsigned size, unsigned len, int dir)
{
- u8 clkctl;
-
- printk(KERN_INFO "%s: start clock using %s as input\n", __FUNCTION__,
- (clksrc&2)?((clksrc&1)?"40MHz XCLK":"48MHz XCLK"):"48MHz PLL");
- pci_read_config_byte(pdev, VLSI_PCI_CLKCTL, &clkctl);
- clkctl &= ~CLKCTL_CLKSTP;
- pci_write_config_byte(pdev, VLSI_PCI_CLKCTL, clkctl);
+ struct vlsi_ring *r;
+ struct ring_descr *rd;
+ unsigned i, j;
+ dma_addr_t busaddr;
+
+ if (!size || ((size-1)&size)!=0) /* must be >0 and power of 2 */
+ return NULL;
+
+ r = kmalloc(sizeof(*r) + size * sizeof(struct ring_descr), GFP_KERNEL);
+ if (!r)
+ return NULL;
+ memset(r, 0, sizeof(*r));
+
+ r->pdev = pdev;
+ r->dir = dir;
+ r->len = len;
+ r->rd = (struct ring_descr *)(r+1);
+ r->mask = size - 1;
+ r->size = size;
+ atomic_set(&r->head, 0);
+ atomic_set(&r->tail, 0);
+
+ for (i = 0; i < size; i++) {
+ rd = r->rd + i;
+ memset(rd, 0, sizeof(*rd));
+ rd->hw = hwmap + i;
+ rd->buf = kmalloc(len, GFP_KERNEL|GFP_DMA);
+ if (rd->buf == NULL) {
+ for (j = 0; j < i; j++) {
+ rd = r->rd + j;
+ busaddr = rd_get_addr(rd);
+ rd_set_addr_status(rd, 0, 0);
+ if (busaddr)
+ pci_unmap_single(pdev, busaddr, len, dir);
+ kfree(rd->buf);
+ rd->buf = NULL;
+ }
+ kfree(r);
+ return NULL;
+ }
+ busaddr = pci_map_single(pdev, rd->buf, len, dir);
+ if (!busaddr) {
+ printk(KERN_ERR "%s: failed to create PCI-MAP for %p",
+ __FUNCTION__, rd->buf);
+ BUG();
+ }
+ rd_set_addr_status(rd, busaddr, 0);
+ pci_dma_sync_single(pdev, busaddr, len, dir);
+ /* initially, the dma buffer is owned by the CPU */
+ rd->skb = NULL;
+ }
+ return r;
}
-
-static void vlsi_stop_clock(struct pci_dev *pdev)
+static int vlsi_free_ring(struct vlsi_ring *r)
{
- u8 clkctl;
+ struct ring_descr *rd;
+ unsigned i;
+ dma_addr_t busaddr;
- pci_read_config_byte(pdev, VLSI_PCI_CLKCTL, &clkctl);
- clkctl |= CLKCTL_CLKSTP;
- pci_write_config_byte(pdev, VLSI_PCI_CLKCTL, clkctl);
+ for (i = 0; i < r->size; i++) {
+ rd = r->rd + i;
+ if (rd->skb)
+ dev_kfree_skb_any(rd->skb);
+ busaddr = rd_get_addr(rd);
+ rd_set_addr_status(rd, 0, 0);
+ if (busaddr)
+ pci_unmap_single(r->pdev, busaddr, r->len, r->dir);
+ if (rd->buf)
+ kfree(rd->buf);
+ }
+ kfree(r);
+ return 0;
}
-
-static void vlsi_unset_clock(struct pci_dev *pdev)
+static int vlsi_create_hwif(vlsi_irda_dev_t *idev)
{
- u8 clkctl;
+ char *ringarea;
+ struct ring_descr_hw *hwmap;
- pci_read_config_byte(pdev, VLSI_PCI_CLKCTL, &clkctl);
- if (!(clkctl&CLKCTL_CLKSTP))
- /* make sure clock is already stopped */
- vlsi_stop_clock(pdev);
+ idev->virtaddr = NULL;
+ idev->busaddr = 0;
- clkctl &= ~(CLKCTL_EXTCLK | CLKCTL_NO_PD);
- pci_write_config_byte(pdev, VLSI_PCI_CLKCTL, clkctl);
+ ringarea = pci_alloc_consistent(idev->pdev, HW_RING_AREA_SIZE, &idev->busaddr);
+ if (!ringarea) {
+ printk(KERN_ERR "%s: insufficient memory for descriptor rings\n",
+ __FUNCTION__);
+ goto out;
+ }
+ memset(ringarea, 0, HW_RING_AREA_SIZE);
+
+ hwmap = (struct ring_descr_hw *)ringarea;
+ idev->rx_ring = vlsi_alloc_ring(idev->pdev, hwmap, ringsize[1],
+ XFER_BUF_SIZE, PCI_DMA_FROMDEVICE);
+ if (idev->rx_ring == NULL)
+ goto out_unmap;
+
+ hwmap += MAX_RING_DESCR;
+ idev->tx_ring = vlsi_alloc_ring(idev->pdev, hwmap, ringsize[0],
+ XFER_BUF_SIZE, PCI_DMA_TODEVICE);
+ if (idev->tx_ring == NULL)
+ goto out_free_rx;
+
+ idev->virtaddr = ringarea;
+ return 0;
+
+out_free_rx:
+ vlsi_free_ring(idev->rx_ring);
+out_unmap:
+ idev->rx_ring = idev->tx_ring = NULL;
+ pci_free_consistent(idev->pdev, HW_RING_AREA_SIZE, ringarea, idev->busaddr);
+ idev->busaddr = 0;
+out:
+ return -ENOMEM;
}
-/********************************************************/
+static int vlsi_destroy_hwif(vlsi_irda_dev_t *idev)
+{
+ vlsi_free_ring(idev->rx_ring);
+ vlsi_free_ring(idev->tx_ring);
+ idev->rx_ring = idev->tx_ring = NULL;
+ if (idev->busaddr)
+ pci_free_consistent(idev->pdev,HW_RING_AREA_SIZE,idev->virtaddr,idev->busaddr);
-/* ### FIXME: don't use old virt_to_bus() anymore! */
+ idev->virtaddr = NULL;
+ idev->busaddr = 0;
+
+ return 0;
+}
+/********************************************************/
-static void vlsi_arm_rx(struct vlsi_ring *r)
+static int vlsi_process_rx(struct vlsi_ring *r, struct ring_descr *rd)
{
- unsigned i;
- dma_addr_t ba;
+ u16 status;
+ int crclen, len = 0;
+ struct sk_buff *skb;
+ int ret = 0;
+ struct net_device *ndev = (struct net_device *)pci_get_drvdata(r->pdev);
+ vlsi_irda_dev_t *idev = ndev->priv;
- for (i = 0; i < r->size; i++) {
- if (r->buf[i].data == NULL)
- BUG();
- ba = virt_to_bus(r->buf[i].data);
- rd_set_addr_status(r, i, ba, RD_STAT_ACTIVE);
+ pci_dma_sync_single(r->pdev, rd_get_addr(rd), r->len, r->dir);
+ /* dma buffer now owned by the CPU */
+ status = rd_get_status(rd);
+ if (status & RD_RX_ERROR) {
+ if (status & RD_RX_OVER)
+ ret |= VLSI_RX_OVER;
+ if (status & RD_RX_LENGTH)
+ ret |= VLSI_RX_LENGTH;
+ if (status & RD_RX_PHYERR)
+ ret |= VLSI_RX_FRAME;
+ if (status & RD_RX_CRCERR)
+ ret |= VLSI_RX_CRC;
+ }
+ else {
+ len = rd_get_count(rd);
+ crclen = (idev->mode==IFF_FIR) ? sizeof(u32) : sizeof(u16);
+ len -= crclen; /* remove trailing CRC */
+ if (len <= 0) {
+ printk(KERN_ERR "%s: strange frame (len=%d)\n",
+ __FUNCTION__, len);
+ ret |= VLSI_RX_DROP;
+ }
+ else if (!rd->skb) {
+ printk(KERN_ERR "%s: rx packet dropped\n", __FUNCTION__);
+ ret |= VLSI_RX_DROP;
+ }
+ else {
+ skb = rd->skb;
+ rd->skb = NULL;
+ skb->dev = ndev;
+ memcpy(skb_put(skb,len), rd->buf, len);
+ skb->mac.raw = skb->data;
+ if (in_interrupt())
+ netif_rx(skb);
+ else
+ netif_rx_ni(skb);
+ ndev->last_rx = jiffies;
+ }
}
+ rd_set_status(rd, 0);
+ rd_set_count(rd, 0);
+ /* buffer still owned by CPU */
+
+ return (ret) ? -ret : len;
}
-static int vlsi_alloc_ringbuf(struct vlsi_ring *r)
+static void vlsi_fill_rx(struct vlsi_ring *r)
{
- unsigned i, j;
+ struct ring_descr *rd;
- r->head = r->tail = 0;
- r->mask = r->size - 1;
- for (i = 0; i < r->size; i++) {
- r->buf[i].skb = NULL;
- r->buf[i].data = kmalloc(XFER_BUF_SIZE, GFP_KERNEL|GFP_DMA);
- if (r->buf[i].data == NULL) {
- for (j = 0; j < i; j++) {
- kfree(r->buf[j].data);
- r->buf[j].data = NULL;
+ for (rd = ring_last(r); rd != NULL; rd = ring_put(r)) {
+ if (rd_is_active(rd)) {
+ BUG();
+ break;
+ }
+ if (!rd->skb) {
+ rd->skb = dev_alloc_skb(IRLAP_SKB_ALLOCSIZE);
+ if (rd->skb) {
+ skb_reserve(rd->skb,1);
+ rd->skb->protocol = htons(ETH_P_IRDA);
}
- return -ENOMEM;
+ else
+ break; /* probably not worth logging? */
}
+ /* give dma buffer back to busmaster */
+ pci_dma_prep_single(r->pdev, rd_get_addr(rd), r->len, r->dir);
+ rd_activate(rd);
}
- return 0;
}
-static void vlsi_free_ringbuf(struct vlsi_ring *r)
+static void vlsi_rx_interrupt(struct net_device *ndev)
{
- unsigned i;
+ vlsi_irda_dev_t *idev = ndev->priv;
+ struct vlsi_ring *r = idev->rx_ring;
+ struct ring_descr *rd;
+ int ret;
- for (i = 0; i < r->size; i++) {
- if (r->buf[i].data == NULL)
- continue;
- if (r->buf[i].skb) {
- dev_kfree_skb(r->buf[i].skb);
- r->buf[i].skb = NULL;
+ for (rd = ring_first(r); rd != NULL; rd = ring_get(r)) {
+
+ if (rd_is_active(rd))
+ break;
+
+ ret = vlsi_process_rx(r, rd);
+
+ if (ret < 0) {
+ ret = -ret;
+ idev->stats.rx_errors++;
+ if (ret & VLSI_RX_DROP)
+ idev->stats.rx_dropped++;
+ if (ret & VLSI_RX_OVER)
+ idev->stats.rx_over_errors++;
+ if (ret & VLSI_RX_LENGTH)
+ idev->stats.rx_length_errors++;
+ if (ret & VLSI_RX_FRAME)
+ idev->stats.rx_frame_errors++;
+ if (ret & VLSI_RX_CRC)
+ idev->stats.rx_crc_errors++;
+ }
+ else if (ret > 0) {
+ idev->stats.rx_packets++;
+ idev->stats.rx_bytes += ret;
}
- else
- kfree(r->buf[i].data);
- r->buf[i].data = NULL;
}
-}
+ do_gettimeofday(&idev->last_rx); /* remember "now" for later mtt delay */
-static int vlsi_init_ring(vlsi_irda_dev_t *idev)
-{
- char *ringarea;
+ vlsi_fill_rx(r);
- ringarea = pci_alloc_consistent(idev->pdev, RING_AREA_SIZE, &idev->busaddr);
- if (!ringarea) {
- printk(KERN_ERR "%s: insufficient memory for descriptor rings\n",
- __FUNCTION__);
- return -ENOMEM;
+ if (ring_first(r) == NULL) {
+ /* we are in big trouble, if this should ever happen */
+ printk(KERN_ERR "%s: rx ring exhausted!\n", __FUNCTION__);
+ vlsi_ring_debug(r);
}
- memset(ringarea, 0, RING_AREA_SIZE);
+ else
+ outw(0, ndev->base_addr+VLSI_PIO_PROMPT);
+}
-#if 0
- printk(KERN_DEBUG "%s: (%d,%d)-ring %p / %p\n", __FUNCTION__,
- ringsize[0], ringsize[1], ringarea,
- (void *)(unsigned)idev->busaddr);
-#endif
+/* caller must have stopped the controller from busmastering */
- idev->rx_ring.size = ringsize[1];
- idev->rx_ring.hw = (struct ring_descr *)ringarea;
- if (!vlsi_alloc_ringbuf(&idev->rx_ring)) {
- idev->tx_ring.size = ringsize[0];
- idev->tx_ring.hw = idev->rx_ring.hw + MAX_RING_DESCR;
- if (!vlsi_alloc_ringbuf(&idev->tx_ring)) {
- idev->virtaddr = ringarea;
- return 0;
+static void vlsi_unarm_rx(vlsi_irda_dev_t *idev)
+{
+ struct vlsi_ring *r = idev->rx_ring;
+ struct ring_descr *rd;
+ int ret;
+
+ for (rd = ring_first(r); rd != NULL; rd = ring_get(r)) {
+
+ ret = 0;
+ if (rd_is_active(rd)) {
+ rd_set_status(rd, 0);
+ if (rd_get_count(rd)) {
+ printk(KERN_INFO "%s - dropping rx packet\n", __FUNCTION__);
+ ret = -VLSI_RX_DROP;
+ }
+ rd_set_count(rd, 0);
+ pci_dma_sync_single(r->pdev, rd_get_addr(rd), r->len, r->dir);
+ if (rd->skb) {
+ dev_kfree_skb_any(rd->skb);
+ rd->skb = NULL;
+ }
}
- vlsi_free_ringbuf(&idev->rx_ring);
- }
+ else
+ ret = vlsi_process_rx(r, rd);
- pci_free_consistent(idev->pdev, RING_AREA_SIZE,
- ringarea, idev->busaddr);
- printk(KERN_ERR "%s: insufficient memory for ring buffers\n",
- __FUNCTION__);
- return -1;
+ if (ret < 0) {
+ ret = -ret;
+ idev->stats.rx_errors++;
+ if (ret & VLSI_RX_DROP)
+ idev->stats.rx_dropped++;
+ if (ret & VLSI_RX_OVER)
+ idev->stats.rx_over_errors++;
+ if (ret & VLSI_RX_LENGTH)
+ idev->stats.rx_length_errors++;
+ if (ret & VLSI_RX_FRAME)
+ idev->stats.rx_frame_errors++;
+ if (ret & VLSI_RX_CRC)
+ idev->stats.rx_crc_errors++;
+ }
+ else if (ret > 0) {
+ idev->stats.rx_packets++;
+ idev->stats.rx_bytes += ret;
+ }
+ }
}
+/********************************************************/
+static int vlsi_process_tx(struct vlsi_ring *r, struct ring_descr *rd)
+{
+ u16 status;
+ int len;
+ int ret;
+
+ pci_dma_sync_single(r->pdev, rd_get_addr(rd), r->len, r->dir);
+ /* dma buffer now owned by the CPU */
+ status = rd_get_status(rd);
+ if (status & RD_TX_UNDRN)
+ ret = VLSI_TX_FIFO;
+ else
+ ret = 0;
+ rd_set_status(rd, 0);
-/********************************************************/
+ if (rd->skb) {
+ len = rd->skb->len;
+ dev_kfree_skb_any(rd->skb);
+ rd->skb = NULL;
+ }
+ else /* tx-skb already freed? - should never happen */
+ len = rd_get_count(rd); /* incorrect for SIR! (due to wrapping) */
+ rd_set_count(rd, 0);
+ /* dma buffer still owned by the CPU */
+ return (ret) ? -ret : len;
+}
-static int vlsi_set_baud(struct net_device *ndev)
+static int vlsi_set_baud(struct net_device *ndev, int dolock)
{
vlsi_irda_dev_t *idev = ndev->priv;
unsigned long flags;
@@ -431,18 +853,16 @@ static int vlsi_set_baud(struct net_device *ndev)
unsigned iobase;
u16 config;
unsigned mode;
+ unsigned idle_retry;
int ret;
int baudrate;
+ int fifocnt = 0; /* Keep compiler happy */
baudrate = idev->new_baud;
iobase = ndev->base_addr;
-
+#if 0
printk(KERN_DEBUG "%s: %d -> %d\n", __FUNCTION__, idev->baud, idev->new_baud);
-
- spin_lock_irqsave(&idev->lock, flags);
-
- outw(0, iobase+VLSI_PIO_IRENABLE);
-
+#endif
if (baudrate == 4000000) {
mode = IFF_FIR;
config = IRCFG_FIR;
@@ -455,7 +875,7 @@ static int vlsi_set_baud(struct net_device *ndev)
}
else {
mode = IFF_SIR;
- config = IRCFG_SIR | IRCFG_SIRFILT | IRCFG_RXANY;
+ config = IRCFG_SIR | IRCFG_SIRFILT | IRCFG_RXANY;
switch(baudrate) {
default:
printk(KERN_ERR "%s: undefined baudrate %d - fallback to 9600!\n",
@@ -473,6 +893,32 @@ static int vlsi_set_baud(struct net_device *ndev)
}
}
+ if (dolock)
+ spin_lock_irqsave(&idev->lock, flags);
+ else
+ flags = 0xdead; /* prevent bogus warning about possible uninitialized use */
+
+ for (idle_retry=0; idle_retry < 100; idle_retry++) {
+ fifocnt = inw(ndev->base_addr+VLSI_PIO_RCVBCNT) & RCVBCNT_MASK;
+ if (fifocnt == 0)
+ break;
+ if (!idle_retry)
+ printk(KERN_WARNING "%s: waiting for rx fifo to become empty(%d)\n",
+ __FUNCTION__, fifocnt);
+ if (dolock) {
+ spin_unlock_irqrestore(&idev->lock, flags);
+ udelay(100);
+ spin_lock_irqsave(&idev->lock, flags);
+ }
+ else
+ udelay(100);
+ }
+ if (fifocnt != 0)
+ printk(KERN_ERR "%s: rx fifo not empty(%d)\n", __FUNCTION__, fifocnt);
+
+ outw(0, iobase+VLSI_PIO_IRENABLE);
+ wmb();
+
config |= IRCFG_MSTR | IRCFG_ENRX;
outw(config, iobase+VLSI_PIO_IRCFG);
@@ -480,10 +926,12 @@ static int vlsi_set_baud(struct net_device *ndev)
outw(nphyctl, iobase+VLSI_PIO_NPHYCTL);
wmb();
outw(IRENABLE_IREN, iobase+VLSI_PIO_IRENABLE);
+ mb();
- /* chip fetches IRCFG on next rising edge of its 8MHz clock */
+ udelay(1); /* chip applies IRCFG on next rising edge of its 8MHz clock */
+
+ /* read back settings for validation */
- mb();
config = inw(iobase+VLSI_PIO_IRENABLE) & IRENABLE_MASK;
if (mode == IFF_FIR)
@@ -493,7 +941,6 @@ static int vlsi_set_baud(struct net_device *ndev)
else
config ^= IRENABLE_SIR_ON;
-
if (config != (IRENABLE_IREN|IRENABLE_ENRXST)) {
printk(KERN_ERR "%s: failed to set %s mode!\n", __FUNCTION__,
(mode==IFF_SIR)?"SIR":((mode==IFF_MIR)?"MIR":"FIR"));
@@ -512,7 +959,8 @@ static int vlsi_set_baud(struct net_device *ndev)
ret = 0;
}
}
- spin_unlock_irqrestore(&idev->lock, flags);
+ if (dolock)
+ spin_unlock_irqrestore(&idev->lock, flags);
if (ret)
vlsi_reg_debug(iobase,__FUNCTION__);
@@ -520,278 +968,396 @@ static int vlsi_set_baud(struct net_device *ndev)
return ret;
}
+static inline int vlsi_set_baud_lock(struct net_device *ndev)
+{
+ return vlsi_set_baud(ndev, 1);
+}
+static inline int vlsi_set_baud_nolock(struct net_device *ndev)
+{
+ return vlsi_set_baud(ndev, 0);
+}
-static int vlsi_init_chip(struct net_device *ndev)
+static int vlsi_hard_start_xmit(struct sk_buff *skb, struct net_device *ndev)
{
vlsi_irda_dev_t *idev = ndev->priv;
- unsigned iobase;
- u16 ptr;
-
- iobase = ndev->base_addr;
+ struct vlsi_ring *r = idev->tx_ring;
+ struct ring_descr *rd;
+ unsigned long flags;
+ unsigned iobase = ndev->base_addr;
+ u8 status;
+ u16 config;
+ int mtt;
+ int len, speed;
+ struct timeval now, ready;
- outb(IRINTR_INT_MASK, iobase+VLSI_PIO_IRINTR); /* w/c pending IRQ, disable all INT */
+ speed = irda_get_next_speed(skb);
+ if (speed != -1 && speed != idev->baud) {
+ netif_stop_queue(ndev);
+ idev->new_baud = speed;
+ if (!skb->len) {
+ dev_kfree_skb_any(skb);
+
+ /* due to the completely asynch tx operation we might have
+ * IrLAP racing with the hardware here, f.e. if the controller
+ * is just sending the last packet with current speed while
+ * the LAP is already switching the speed using synchronous
+ * len=0 packet. Immediate execution would lead to hw lockup
+ * requiring a powercycle to reset. Good candidate to trigger
+ * this is the final UA:RSP packet after receiving a DISC:CMD
+ * when getting the LAP down.
+ * Note that we are not protected by the queue_stop approach
+ * because the final UA:RSP arrives _without_ request to apply
+ * new-speed-after-this-packet - hence the driver doesn't know
+ * this was the last packet and doesn't stop the queue. So the
+ * forced switch to default speed from LAP gets through as fast
+ * as only some 10 usec later while the UA:RSP is still processed
+ * by the hardware and we would get screwed.
+ * Note: no locking required since we (netdev->xmit) are the only
+ * supplier for tx and the network layer provides serialization
+ */
+ spin_lock_irqsave(&idev->lock, flags);
+ if (ring_first(idev->tx_ring) == NULL) {
+ /* no race - tx-ring already empty */
+ vlsi_set_baud_nolock(ndev);
+ netif_wake_queue(ndev);
+ }
+ else
+ ; /* keep the speed change pending like it would
+ * for any len>0 packet. tx completion interrupt
+ * will apply it when the tx ring becomes empty.
+ */
+ spin_unlock_irqrestore(&idev->lock, flags);
+ return 0;
+ }
+ status = RD_TX_CLRENTX; /* stop tx-ring after this frame */
+ }
+ else
+ status = 0;
- outw(0, iobase+VLSI_PIO_IRENABLE); /* disable IrPHY-interface */
+ if (skb->len == 0) {
+ printk(KERN_ERR "%s: dropping len=0 packet\n", __FUNCTION__);
+ goto drop;
+ }
- /* disable everything, particularly IRCFG_MSTR - which resets the RING_PTR */
+ /* sanity checks - should never happen!
+ * simply BUGging the violation and dropping the packet
+ */
- outw(0, iobase+VLSI_PIO_IRCFG);
- wmb();
- outw(IRENABLE_IREN, iobase+VLSI_PIO_IRENABLE);
+ rd = ring_last(r);
+ if (!rd) { /* ring full - queue should have been stopped! */
+ BUG();
+ goto drop;
+ }
- mb();
+ if (rd_is_active(rd)) { /* entry still owned by hw! */
+ BUG();
+ goto drop;
+ }
- outw(0, iobase+VLSI_PIO_IRENABLE);
+ if (!rd->buf) { /* no memory for this tx entry - weird! */
+ BUG();
+ goto drop;
+ }
- outw(MAX_PACKET_LENGTH, iobase+VLSI_PIO_MAXPKT); /* max possible value=0x0fff */
+ if (rd->skb) { /* hm, associated old skb still there */
+ BUG();
+ goto drop;
+ }
- outw(BUS_TO_RINGBASE(idev->busaddr), iobase+VLSI_PIO_RINGBASE);
+ /* tx buffer already owned by CPU due to pci_dma_sync_single() either
+ * after initial pci_map_single or after subsequent tx-completion
+ */
- outw(TX_RX_TO_RINGSIZE(idev->tx_ring.size, idev->rx_ring.size),
- iobase+VLSI_PIO_RINGSIZE);
+ if (idev->mode == IFF_SIR) {
+ status |= RD_TX_DISCRC; /* no hw-crc creation */
+ len = async_wrap_skb(skb, rd->buf, r->len);
+
+ /* Some rare worst case situation in SIR mode might lead to
+ * potential buffer overflow. The wrapper detects this, returns
+ * with a shortened frame (without FCS/EOF) but doesn't provide
+ * any error indication about the invalid packet which we are
+ * going to transmit.
+ * Therefore we log if the buffer got filled to the point, where the
+ * wrapper would abort, i.e. when there are less than 5 bytes left to
+ * allow appending the FCS/EOF.
+ */
- ptr = inw(iobase+VLSI_PIO_RINGPTR);
- idev->rx_ring.head = idev->rx_ring.tail = RINGPTR_GET_RX(ptr);
- idev->tx_ring.head = idev->tx_ring.tail = RINGPTR_GET_TX(ptr);
+ if (len >= r->len-5)
+ printk(KERN_WARNING "%s: possible buffer overflow with SIR wrapping!\n",
+ __FUNCTION__);
+ }
+ else {
+ /* hw deals with MIR/FIR mode wrapping */
+ status |= RD_TX_PULSE; /* send 2 us highspeed indication pulse */
+ len = skb->len;
+ if (len > r->len) {
+ printk(KERN_ERR "%s: no space - skb too big (%d)\n",
+ __FUNCTION__, skb->len);
+ goto drop;
+ }
+ else
+ memcpy(rd->buf, skb->data, len);
+ }
- outw(IRCFG_MSTR, iobase+VLSI_PIO_IRCFG); /* ready for memory access */
- wmb();
- outw(IRENABLE_IREN, iobase+VLSI_PIO_IRENABLE);
+ /* do mtt delay before we need to disable interrupts! */
- mb();
+ if ((mtt = irda_get_mtt(skb)) > 0) {
+
+ ready.tv_usec = idev->last_rx.tv_usec + mtt;
+ ready.tv_sec = idev->last_rx.tv_sec;
+ if (ready.tv_usec >= 1000000) {
+ ready.tv_usec -= 1000000;
+ ready.tv_sec++; /* IrLAP 1.1: mtt always < 1 sec */
+ }
+ for(;;) {
+ do_gettimeofday(&now);
+ if (now.tv_sec > ready.tv_sec
+ || (now.tv_sec==ready.tv_sec && now.tv_usec>=ready.tv_usec))
+ break;
+ udelay(100);
+ /* must not sleep here - we are called under xmit_lock! */
+ }
+ }
- idev->new_baud = 9600; /* start with IrPHY using 9600(SIR) mode */
- vlsi_set_baud(ndev);
+ rd->skb = skb; /* remember skb for tx-complete stats */
- outb(IRINTR_INT_MASK, iobase+VLSI_PIO_IRINTR); /* just in case - w/c pending IRQ's */
- wmb();
+ rd_set_count(rd, len);
+ rd_set_status(rd, status); /* not yet active! */
- /* DO NOT BLINDLY ENABLE IRINTR_ACTEN!
- * basically every received pulse fires an ACTIVITY-INT
- * leading to >>1000 INT's per second instead of few 10
+ /* give dma buffer back to busmaster-hw (flush caches to make
+ * CPU-driven changes visible from the pci bus).
*/
- outb(IRINTR_RPKTEN|IRINTR_TPKTEN, iobase+VLSI_PIO_IRINTR);
- wmb();
-
- return 0;
-}
-
-
-/**************************************************************/
+ pci_dma_prep_single(r->pdev, rd_get_addr(rd), r->len, r->dir);
+/*
+ * We need to disable IR output in order to switch to TX mode.
+ * Better not do this blindly anytime we want to transmit something
+ * because TX may already run. However we are racing with the controller
+ * which may stop TX at any time when fetching an inactive descriptor
+ * or one with CLR_ENTX set. So we switch on TX only, if TX was not running
+ * _after_ the new descriptor was activated on the ring. This ensures
+ * we will either find TX already stopped or we can be sure, there
+ * will be a TX-complete interrupt even if the chip stopped doing
+ * TX just after we found it still running. The ISR will then find
+ * the non-empty ring and restart TX processing. The enclosing
+ * spinlock provides the correct serialization to prevent race with isr.
+ */
-static void vlsi_refill_rx(struct vlsi_ring *r)
-{
- do {
- if (rd_is_active(r, r->head))
- BUG();
- rd_activate(r, r->head);
- ring_put(r);
- } while (r->head != r->tail);
-}
+ spin_lock_irqsave(&idev->lock,flags);
+ rd_activate(rd);
-static int vlsi_rx_interrupt(struct net_device *ndev)
-{
- vlsi_irda_dev_t *idev = ndev->priv;
- struct vlsi_ring *r;
- int len;
- u8 status;
- struct sk_buff *skb;
- int crclen;
+ if (!(inw(iobase+VLSI_PIO_IRENABLE) & IRENABLE_ENTXST)) {
+ int fifocnt;
- r = &idev->rx_ring;
- while (!rd_is_active(r, r->tail)) {
+ fifocnt = inw(ndev->base_addr+VLSI_PIO_RCVBCNT) & RCVBCNT_MASK;
+ if (fifocnt != 0)
+ printk(KERN_WARNING "%s: rx fifo not empty(%d)\n",
+ __FUNCTION__, fifocnt);
- status = rd_get_status(r, r->tail);
- if (status & RX_STAT_ERROR) {
- idev->stats.rx_errors++;
- if (status & RX_STAT_OVER)
- idev->stats.rx_over_errors++;
- if (status & RX_STAT_LENGTH)
- idev->stats.rx_length_errors++;
- if (status & RX_STAT_PHYERR)
- idev->stats.rx_frame_errors++;
- if (status & RX_STAT_CRCERR)
- idev->stats.rx_crc_errors++;
- }
- else {
- len = rd_get_count(r, r->tail);
- crclen = (idev->mode==IFF_FIR) ? sizeof(u32) : sizeof(u16);
- if (len < crclen)
- printk(KERN_ERR "%s: strange frame (len=%d)\n",
- __FUNCTION__, len);
- else
- len -= crclen; /* remove trailing CRC */
-
- skb = dev_alloc_skb(len+1);
- if (skb) {
- skb->dev = ndev;
- skb_reserve(skb,1);
- memcpy(skb_put(skb,len), r->buf[r->tail].data, len);
- idev->stats.rx_packets++;
- idev->stats.rx_bytes += len;
- skb->mac.raw = skb->data;
- skb->protocol = htons(ETH_P_IRDA);
- netif_rx(skb);
- ndev->last_rx = jiffies;
- }
- else {
- idev->stats.rx_dropped++;
- printk(KERN_ERR "%s: rx packet dropped\n", __FUNCTION__);
- }
- }
- rd_set_count(r, r->tail, 0);
- rd_set_status(r, r->tail, 0);
- ring_get(r);
- if (r->tail == r->head) {
- printk(KERN_WARNING "%s: rx ring exhausted\n", __FUNCTION__);
- break;
- }
+ config = inw(iobase+VLSI_PIO_IRCFG);
+ rmb();
+ outw(config | IRCFG_ENTX, iobase+VLSI_PIO_IRCFG);
+ mb();
+ outw(0, iobase+VLSI_PIO_PROMPT);
}
+ ndev->trans_start = jiffies;
- do_gettimeofday(&idev->last_rx); /* remember "now" for later mtt delay */
-
- vlsi_refill_rx(r);
-
- mb();
- outw(0, ndev->base_addr+VLSI_PIO_PROMPT);
+ if (ring_put(r) == NULL) {
+ netif_stop_queue(ndev);
+ printk(KERN_DEBUG "%s: tx ring full - queue stopped\n", __FUNCTION__);
+ }
+ spin_unlock_irqrestore(&idev->lock, flags);
return 0;
-}
+drop:
+ dev_kfree_skb_any(skb);
+ idev->stats.tx_errors++;
+ idev->stats.tx_dropped++;
+ return 1;
+}
-static int vlsi_tx_interrupt(struct net_device *ndev)
+static void vlsi_tx_interrupt(struct net_device *ndev)
{
vlsi_irda_dev_t *idev = ndev->priv;
- struct vlsi_ring *r;
+ struct vlsi_ring *r = idev->tx_ring;
+ struct ring_descr *rd;
unsigned iobase;
int ret;
u16 config;
- u16 status;
- r = &idev->tx_ring;
- while (!rd_is_active(r, r->tail)) {
- if (r->tail == r->head)
- break; /* tx ring empty - nothing to send anymore */
+ for (rd = ring_first(r); rd != NULL; rd = ring_get(r)) {
+
+ if (rd_is_active(rd))
+ break;
+
+ ret = vlsi_process_tx(r, rd);
- status = rd_get_status(r, r->tail);
- if (status & TX_STAT_UNDRN) {
+ if (ret < 0) {
+ ret = -ret;
idev->stats.tx_errors++;
- idev->stats.tx_fifo_errors++;
+ if (ret & VLSI_TX_DROP)
+ idev->stats.tx_dropped++;
+ if (ret & VLSI_TX_FIFO)
+ idev->stats.tx_fifo_errors++;
}
- else {
+ else if (ret > 0){
idev->stats.tx_packets++;
- idev->stats.tx_bytes += rd_get_count(r, r->tail); /* not correct for SIR */
- }
- rd_set_count(r, r->tail, 0);
- rd_set_status(r, r->tail, 0);
- if (r->buf[r->tail].skb) {
- rd_set_addr_status(r, r->tail, 0, 0);
- dev_kfree_skb(r->buf[r->tail].skb);
- r->buf[r->tail].skb = NULL;
- r->buf[r->tail].data = NULL;
+ idev->stats.tx_bytes += ret;
}
- ring_get(r);
}
- ret = 0;
- iobase = ndev->base_addr;
+ if (idev->new_baud && rd == NULL) /* tx ring empty and speed change pending */
+ vlsi_set_baud_lock(ndev);
- if (r->head == r->tail) { /* tx ring empty: re-enable rx */
+ iobase = ndev->base_addr;
+ config = inw(iobase+VLSI_PIO_IRCFG);
- outw(0, iobase+VLSI_PIO_IRENABLE);
- config = inw(iobase+VLSI_PIO_IRCFG);
- mb();
+ if (rd == NULL) /* tx ring empty: re-enable rx */
outw((config & ~IRCFG_ENTX) | IRCFG_ENRX, iobase+VLSI_PIO_IRCFG);
- wmb();
- outw(IRENABLE_IREN, iobase+VLSI_PIO_IRENABLE);
+
+ else if (!(inw(iobase+VLSI_PIO_IRENABLE) & IRENABLE_ENTXST)) {
+ int fifocnt;
+
+ fifocnt = inw(iobase+VLSI_PIO_RCVBCNT) & RCVBCNT_MASK;
+ if (fifocnt != 0)
+ printk(KERN_WARNING "%s: rx fifo not empty(%d)\n",
+ __FUNCTION__, fifocnt);
+ outw(config | IRCFG_ENTX, iobase+VLSI_PIO_IRCFG);
}
- else
- ret = 1; /* no speed-change-check */
- mb();
outw(0, iobase+VLSI_PIO_PROMPT);
- if (netif_queue_stopped(ndev)) {
+ if (netif_queue_stopped(ndev) && !idev->new_baud) {
netif_wake_queue(ndev);
printk(KERN_DEBUG "%s: queue awoken\n", __FUNCTION__);
}
- return ret;
}
+/* caller must have stopped the controller from busmastering */
-#if 0 /* disable ACTIVITY handling for now */
-
-static int vlsi_act_interrupt(struct net_device *ndev)
+static void vlsi_unarm_tx(vlsi_irda_dev_t *idev)
{
- printk(KERN_DEBUG "%s\n", __FUNCTION__);
- return 0;
+ struct vlsi_ring *r = idev->tx_ring;
+ struct ring_descr *rd;
+ int ret;
+
+ for (rd = ring_first(r); rd != NULL; rd = ring_get(r)) {
+
+ ret = 0;
+ if (rd_is_active(rd)) {
+ rd_set_status(rd, 0);
+ rd_set_count(rd, 0);
+ pci_dma_sync_single(r->pdev, rd_get_addr(rd), r->len, r->dir);
+ if (rd->skb) {
+ dev_kfree_skb_any(rd->skb);
+ rd->skb = NULL;
+ }
+ printk(KERN_INFO "%s - dropping tx packet\n", __FUNCTION__);
+ ret = -VLSI_TX_DROP;
+ }
+ else
+ ret = vlsi_process_tx(r, rd);
+
+ if (ret < 0) {
+ ret = -ret;
+ idev->stats.tx_errors++;
+ if (ret & VLSI_TX_DROP)
+ idev->stats.tx_dropped++;
+ if (ret & VLSI_TX_FIFO)
+ idev->stats.tx_fifo_errors++;
+ }
+ else if (ret > 0){
+ idev->stats.tx_packets++;
+ idev->stats.tx_bytes += ret;
+ }
+ }
+
}
-#endif
-static void vlsi_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
-{
- struct net_device *ndev = dev_instance;
- vlsi_irda_dev_t *idev = ndev->priv;
- unsigned iobase;
- u8 irintr;
- int boguscount = 32;
- int no_speed_check = 0;
- unsigned got_act;
- unsigned long flags;
+/********************************************************/
- got_act = 0;
- iobase = ndev->base_addr;
- spin_lock_irqsave(&idev->lock,flags);
- do {
- irintr = inb(iobase+VLSI_PIO_IRINTR);
- rmb();
- outb(irintr, iobase+VLSI_PIO_IRINTR); /* acknowledge asap */
- wmb();
+static int vlsi_start_clock(struct pci_dev *pdev)
+{
+ u8 clkctl, lock;
+ int i, count;
- if (!(irintr&=IRINTR_INT_MASK)) /* not our INT - probably shared */
- break;
+ if (clksrc < 2) { /* auto or PLL: try PLL */
+ clkctl = CLKCTL_PD_INV | CLKCTL_CLKSTP;
+ pci_write_config_byte(pdev, VLSI_PCI_CLKCTL, clkctl);
-// vlsi_reg_debug(iobase,__FUNCTION__);
+ /* procedure to detect PLL lock synchronisation:
+ * after 0.5 msec initial delay we expect to find 3 PLL lock
+ * indications within 10 msec for successful PLL detection.
+ */
+ udelay(500);
+ count = 0;
+ for (i = 500; i <= 10000; i += 50) { /* max 10 msec */
+ pci_read_config_byte(pdev, VLSI_PCI_CLKCTL, &lock);
+ if (lock&CLKCTL_LOCK) {
+ if (++count >= 3)
+ break;
+ }
+ udelay(50);
+ }
+ if (count < 3) {
+ if (clksrc == 1) { /* explicitly asked for PLL hence bail out */
+ printk(KERN_ERR "%s: no PLL or failed to lock!\n",
+ __FUNCTION__);
+ clkctl = CLKCTL_CLKSTP;
+ pci_write_config_byte(pdev, VLSI_PCI_CLKCTL, clkctl);
+ return -1;
+ }
+ else /* was: clksrc=0(auto) */
+ clksrc = 3; /* fallback to 40MHz XCLK (OB800) */
- if (irintr&IRINTR_RPKTINT)
- no_speed_check |= vlsi_rx_interrupt(ndev);
+ printk(KERN_INFO "%s: PLL not locked, fallback to clksrc=%d\n",
+ __FUNCTION__, clksrc);
+ }
+ else
+ clksrc = 1; /* got successful PLL lock */
+ }
- if (irintr&IRINTR_TPKTINT)
- no_speed_check |= vlsi_tx_interrupt(ndev);
+ if (clksrc != 1) {
+ /* we get here if either no PLL detected in auto-mode or
+ an external clock source was explicitly specified */
-#if 0 /* disable ACTIVITY handling for now */
+ clkctl = CLKCTL_EXTCLK | CLKCTL_CLKSTP;
+ if (clksrc == 3)
+ clkctl |= CLKCTL_XCKSEL;
+ pci_write_config_byte(pdev, VLSI_PCI_CLKCTL, clkctl);
- if (got_act && irintr==IRINTR_ACTIVITY) /* nothing new */
- break;
+ /* no way to test for working XCLK */
+ }
+ else
+ pci_read_config_byte(pdev, VLSI_PCI_CLKCTL, &clkctl);
- if ((irintr&IRINTR_ACTIVITY) && !(irintr^IRINTR_ACTIVITY) ) {
- no_speed_check |= vlsi_act_interrupt(ndev);
- got_act = 1;
- }
-#endif
- if (irintr & ~(IRINTR_RPKTINT|IRINTR_TPKTINT|IRINTR_ACTIVITY))
- printk(KERN_DEBUG "%s: IRINTR = %02x\n",
- __FUNCTION__, (unsigned)irintr);
-
- } while (--boguscount > 0);
- spin_unlock_irqrestore(&idev->lock,flags);
+ /* ok, now going to connect the chip with the clock source */
- if (boguscount <= 0)
- printk(KERN_ERR "%s: too much work in interrupt!\n", __FUNCTION__);
+ clkctl &= ~CLKCTL_CLKSTP;
+ pci_write_config_byte(pdev, VLSI_PCI_CLKCTL, clkctl);
- else if (!no_speed_check) {
- if (idev->new_baud)
- vlsi_set_baud(ndev);
- }
+ return 0;
}
+static void vlsi_stop_clock(struct pci_dev *pdev)
+{
+ u8 clkctl;
+
+ /* disconnect chip from clock source */
+ pci_read_config_byte(pdev, VLSI_PCI_CLKCTL, &clkctl);
+ clkctl |= CLKCTL_CLKSTP;
+ pci_write_config_byte(pdev, VLSI_PCI_CLKCTL, clkctl);
-/**************************************************************/
+ /* disable all clock sources */
+ clkctl &= ~(CLKCTL_EXTCLK | CLKCTL_PD_INV);
+ pci_write_config_byte(pdev, VLSI_PCI_CLKCTL, clkctl);
+}
+/********************************************************/
/* writing all-zero to the VLSI PCI IO register area seems to prevent
* some occasional situations where the hardware fails (symptoms are
@@ -811,135 +1377,119 @@ static inline void vlsi_clear_regs(unsigned iobase)
outw(0, iobase + i);
}
-
-static int vlsi_open(struct net_device *ndev)
+static int vlsi_init_chip(struct pci_dev *pdev)
{
+ struct net_device *ndev = pci_get_drvdata(pdev);
vlsi_irda_dev_t *idev = ndev->priv;
- struct pci_dev *pdev = idev->pdev;
- int err;
- char hwname[32];
-
- if (pci_request_regions(pdev,drivername)) {
- printk(KERN_ERR "%s: io resource busy\n", __FUNCTION__);
- return -EAGAIN;
- }
-
- /* under some rare occasions the chip apparently comes up
- * with IRQ's pending. So we get interrupts invoked much too early
- * which will immediately kill us again :-(
- * so we better w/c pending IRQ and disable them all
- */
-
- outb(IRINTR_INT_MASK, ndev->base_addr+VLSI_PIO_IRINTR);
+ unsigned iobase;
+ u16 ptr;
- if (request_irq(ndev->irq, vlsi_interrupt, SA_SHIRQ,
- drivername, ndev)) {
- printk(KERN_ERR "%s: couldn't get IRQ: %d\n",
- __FUNCTION__, ndev->irq);
- pci_release_regions(pdev);
- return -EAGAIN;
- }
- printk(KERN_INFO "%s: got resources for %s - irq=%d / io=%04lx\n",
- __FUNCTION__, ndev->name, ndev->irq, ndev->base_addr );
+ /* start the clock and clean the registers */
- if (vlsi_set_clock(pdev)) {
+ if (vlsi_start_clock(pdev)) {
printk(KERN_ERR "%s: no valid clock source\n",
__FUNCTION__);
- free_irq(ndev->irq,ndev);
- pci_release_regions(pdev);
- return -EIO;
+ pci_disable_device(pdev);
+ return -1;
}
+ iobase = ndev->base_addr;
+ vlsi_clear_regs(iobase);
- vlsi_start_clock(pdev);
+ outb(IRINTR_INT_MASK, iobase+VLSI_PIO_IRINTR); /* w/c pending IRQ, disable all INT */
- vlsi_clear_regs(ndev->base_addr);
+ outw(0, iobase+VLSI_PIO_IRENABLE); /* disable IrPHY-interface */
- err = vlsi_init_ring(idev);
- if (err) {
- vlsi_unset_clock(pdev);
- free_irq(ndev->irq,ndev);
- pci_release_regions(pdev);
- return err;
- }
+ /* disable everything, particularly IRCFG_MSTR - (also resetting the RING_PTR) */
- vlsi_init_chip(ndev);
+ outw(0, iobase+VLSI_PIO_IRCFG);
+ wmb();
- printk(KERN_INFO "%s: IrPHY setup: %d baud (%s), %s SIR-pulses\n",
- __FUNCTION__, idev->baud,
- (idev->mode==IFF_SIR)?"SIR":((idev->mode==IFF_MIR)?"MIR":"FIR"),
- (sirpulse)?"3/16 bittime":"short");
+ outw(MAX_PACKET_LENGTH, iobase+VLSI_PIO_MAXPKT); /* max possible value=0x0fff */
- vlsi_arm_rx(&idev->rx_ring);
+ outw(BUS_TO_RINGBASE(idev->busaddr), iobase+VLSI_PIO_RINGBASE);
- do_gettimeofday(&idev->last_rx); /* first mtt may start from now on */
+ outw(TX_RX_TO_RINGSIZE(idev->tx_ring->size, idev->rx_ring->size),
+ iobase+VLSI_PIO_RINGSIZE);
- sprintf(hwname, "VLSI-FIR @ 0x%04x", (unsigned)ndev->base_addr);
- idev->irlap = irlap_open(ndev,&idev->qos,hwname);
+ ptr = inw(iobase+VLSI_PIO_RINGPTR);
+ atomic_set(&idev->rx_ring->head, RINGPTR_GET_RX(ptr));
+ atomic_set(&idev->rx_ring->tail, RINGPTR_GET_RX(ptr));
+ atomic_set(&idev->tx_ring->head, RINGPTR_GET_TX(ptr));
+ atomic_set(&idev->tx_ring->tail, RINGPTR_GET_TX(ptr));
- netif_start_queue(ndev);
- outw(0, ndev->base_addr+VLSI_PIO_PROMPT); /* kick hw state machine */
+ vlsi_set_baud_lock(ndev); /* idev->new_baud used as provided by caller */
- printk(KERN_INFO "%s: device %s operational using (%d,%d) tx,rx-ring\n",
- __FUNCTION__, ndev->name, ringsize[0], ringsize[1]);
+ outb(IRINTR_INT_MASK, iobase+VLSI_PIO_IRINTR); /* just in case - w/c pending IRQ's */
+ wmb();
+
+ /* DO NOT BLINDLY ENABLE IRINTR_ACTEN!
+ * basically every received pulse fires an ACTIVITY-INT
+ * leading to >>1000 INT's per second instead of few 10
+ */
+
+ outb(IRINTR_RPKTEN|IRINTR_TPKTEN, iobase+VLSI_PIO_IRINTR);
return 0;
}
-
-static int vlsi_close(struct net_device *ndev)
+static int vlsi_start_hw(vlsi_irda_dev_t *idev)
{
- vlsi_irda_dev_t *idev = ndev->priv;
struct pci_dev *pdev = idev->pdev;
- u8 cmd;
- unsigned iobase;
+ struct net_device *ndev = pci_get_drvdata(pdev);
+ unsigned iobase = ndev->base_addr;
+ u8 byte;
+ /* we don't use the legacy UART, disable its address decoding */
- iobase = ndev->base_addr;
- netif_stop_queue(ndev);
+ pci_read_config_byte(pdev, VLSI_PCI_IRMISC, &byte);
+ byte &= ~(IRMISC_UARTEN | IRMISC_UARTTST);
+ pci_write_config_byte(pdev, VLSI_PCI_IRMISC, byte);
- if (idev->irlap)
- irlap_close(idev->irlap);
- idev->irlap = NULL;
+ /* enable PCI busmaster access to our 16MB page */
- outb(IRINTR_INT_MASK, iobase+VLSI_PIO_IRINTR); /* w/c pending + disable further IRQ */
- wmb();
- outw(0, iobase+VLSI_PIO_IRENABLE);
- outw(0, iobase+VLSI_PIO_IRCFG); /* disable everything */
- wmb();
- outw(IRENABLE_IREN, iobase+VLSI_PIO_IRENABLE);
- mb(); /* ... from now on */
+ pci_write_config_byte(pdev, VLSI_PCI_MSTRPAGE, MSTRPAGE_VALUE);
+ pci_set_master(pdev);
- outw(0, iobase+VLSI_PIO_IRENABLE);
- wmb();
+ vlsi_init_chip(pdev);
- vlsi_clear_regs(ndev->base_addr);
+ vlsi_fill_rx(idev->rx_ring);
- vlsi_stop_clock(pdev);
+ do_gettimeofday(&idev->last_rx); /* first mtt may start from now on */
- vlsi_unset_clock(pdev);
+ outw(0, iobase+VLSI_PIO_PROMPT); /* kick hw state machine */
- free_irq(ndev->irq,ndev);
+ return 0;
+}
- vlsi_free_ringbuf(&idev->rx_ring);
- vlsi_free_ringbuf(&idev->tx_ring);
+static int vlsi_stop_hw(vlsi_irda_dev_t *idev)
+{
+ struct pci_dev *pdev = idev->pdev;
+ struct net_device *ndev = pci_get_drvdata(pdev);
+ unsigned iobase = ndev->base_addr;
+ unsigned long flags;
- if (idev->busaddr)
- pci_free_consistent(idev->pdev,RING_AREA_SIZE,idev->virtaddr,idev->busaddr);
+ spin_lock_irqsave(&idev->lock,flags);
+ outw(0, iobase+VLSI_PIO_IRENABLE);
+ outw(0, iobase+VLSI_PIO_IRCFG); /* disable everything */
+ wmb();
- idev->virtaddr = NULL;
- idev->busaddr = 0;
+ outb(IRINTR_INT_MASK, iobase+VLSI_PIO_IRINTR); /* w/c pending + disable further IRQ */
+ mb();
+ spin_unlock_irqrestore(&idev->lock,flags);
- pci_read_config_byte(pdev, PCI_COMMAND, &cmd);
- cmd &= ~PCI_COMMAND_MASTER;
- pci_write_config_byte(pdev, PCI_COMMAND, cmd);
+ vlsi_unarm_tx(idev);
+ vlsi_unarm_rx(idev);
- pci_release_regions(pdev);
+ vlsi_clear_regs(iobase);
+ vlsi_stop_clock(pdev);
- printk(KERN_INFO "%s: device %s stopped\n", __FUNCTION__, ndev->name);
+ pci_disable_device(pdev);
return 0;
}
+/**************************************************************/
+
static struct net_device_stats * vlsi_get_stats(struct net_device *ndev)
{
vlsi_irda_dev_t *idev = ndev->priv;
@@ -947,147 +1497,30 @@ static struct net_device_stats * vlsi_get_stats(struct net_device *ndev)
return &idev->stats;
}
-static int vlsi_hard_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+static void vlsi_tx_timeout(struct net_device *ndev)
{
vlsi_irda_dev_t *idev = ndev->priv;
- struct vlsi_ring *r;
- unsigned long flags;
- unsigned iobase;
- u8 status;
- u16 config;
- int mtt;
- int len, speed;
- struct timeval now, ready;
-
-
- status = 0;
-
- speed = irda_get_next_speed(skb);
-
- if (speed != -1 && speed != idev->baud) {
- idev->new_baud = speed;
- if (!skb->len) {
- dev_kfree_skb(skb);
- vlsi_set_baud(ndev);
- return 0;
- }
- status = TX_STAT_CLRENTX; /* stop tx-ring after this frame */
- }
-
- if (skb->len == 0) {
- printk(KERN_ERR "%s: blocking 0-size packet???\n",
- __FUNCTION__);
- dev_kfree_skb(skb);
- return 0;
- }
-
- r = &idev->tx_ring;
-
- if (rd_is_active(r, r->head))
- BUG();
-
- if (idev->mode == IFF_SIR) {
- status |= TX_STAT_DISCRC;
- len = async_wrap_skb(skb, r->buf[r->head].data, XFER_BUF_SIZE);
- }
- else { /* hw deals with MIR/FIR mode */
- len = skb->len;
- memcpy(r->buf[r->head].data, skb->data, len);
- }
-
- rd_set_count(r, r->head, len);
- rd_set_addr_status(r, r->head, virt_to_bus(r->buf[r->head].data), status);
-
- /* new entry not yet activated! */
-
-#if 0
- printk(KERN_DEBUG "%s: dump entry %d: %u %02x %08x\n",
- __FUNCTION__, r->head,
- idev->ring_hw[r->head].rd_count,
- (unsigned)idev->ring_hw[r->head].rd_status,
- idev->ring_hw[r->head].rd_addr & 0xffffffff);
- vlsi_reg_debug(iobase,__FUNCTION__);
-#endif
- /* let mtt delay pass before we need to acquire the spinlock! */
+ vlsi_reg_debug(ndev->base_addr, __FUNCTION__);
+ vlsi_ring_debug(idev->tx_ring);
- if ((mtt = irda_get_mtt(skb)) > 0) {
-
- ready.tv_usec = idev->last_rx.tv_usec + mtt;
- ready.tv_sec = idev->last_rx.tv_sec;
- if (ready.tv_usec >= 1000000) {
- ready.tv_usec -= 1000000;
- ready.tv_sec++; /* IrLAP 1.1: mtt always < 1 sec */
- }
- for(;;) {
- do_gettimeofday(&now);
- if (now.tv_sec > ready.tv_sec
- || (now.tv_sec==ready.tv_sec && now.tv_usec>=ready.tv_usec))
- break;
- udelay(100);
- }
- }
-
-/*
- * race window ahead, due to concurrent controller processing!
- *
- * We need to disable IR output in order to switch to TX mode.
- * Better not do this blindly anytime we want to transmit something
- * because TX may already run. However the controller may stop TX
- * at any time when fetching an inactive descriptor or one with
- * CLR_ENTX set. So we switch on TX only, if TX was not running
- * _after_ the new descriptor was activated on the ring. This ensures
- * we will either find TX already stopped or we can be sure, there
- * will be a TX-complete interrupt even if the chip stopped doing
- * TX just after we found it still running. The ISR will then find
- * the non-empty ring and restart TX processing. The enclosing
- * spinlock is required to get serialization with the ISR right.
- */
-
-
- iobase = ndev->base_addr;
-
- spin_lock_irqsave(&idev->lock,flags);
-
- rd_activate(r, r->head);
- ring_put(r);
-
- if (!(inw(iobase+VLSI_PIO_IRENABLE) & IRENABLE_ENTXST)) {
-
- outw(0, iobase+VLSI_PIO_IRENABLE);
-
- config = inw(iobase+VLSI_PIO_IRCFG);
- rmb();
- outw(config | IRCFG_ENTX, iobase+VLSI_PIO_IRCFG);
- wmb();
- outw(IRENABLE_IREN, iobase+VLSI_PIO_IRENABLE);
- mb();
- outw(0, iobase+VLSI_PIO_PROMPT);
- wmb();
- }
-
- if (r->head == r->tail) {
+ if (netif_running(ndev))
netif_stop_queue(ndev);
- printk(KERN_DEBUG "%s: tx ring full - queue stopped: %d/%d\n",
- __FUNCTION__, r->head, r->tail);
-#if 0
- printk(KERN_INFO "%s: dump stalled entry %d: %u %02x %08x\n",
- __FUNCTION__, r->tail,
- r->hw[r->tail].rd_count,
- (unsigned)r->hw[r->tail].rd_status,
- r->hw[r->tail].rd_addr & 0xffffffff);
-#endif
- vlsi_reg_debug(iobase,__FUNCTION__);
- }
- spin_unlock_irqrestore(&idev->lock, flags);
+ vlsi_stop_hw(idev);
- dev_kfree_skb(skb);
+ /* now simply restart the whole thing */
- return 0;
-}
+ if (!idev->new_baud)
+ idev->new_baud = idev->baud; /* keep current baudrate */
+ if (vlsi_start_hw(idev))
+ printk(KERN_CRIT "%s: failed to restart hw - %s(%s) unusable!\n",
+ __FUNCTION__, idev->pdev->name, ndev->name);
+ else
+ netif_start_queue(ndev);
+}
static int vlsi_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
{
@@ -1097,14 +1530,20 @@ static int vlsi_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
u16 fifocnt;
int ret = 0;
- spin_lock_irqsave(&idev->lock,flags);
switch (cmd) {
case SIOCSBANDWIDTH:
if (!capable(CAP_NET_ADMIN)) {
ret = -EPERM;
break;
}
+ spin_lock_irqsave(&idev->lock, flags);
idev->new_baud = irq->ifr_baudrate;
+ /* when called from userland there might be a minor race window here
+ * if the stack tries to change speed concurrently - which would be
+ * pretty strange anyway with the userland having full control...
+ */
+ vlsi_set_baud_nolock(ndev);
+ spin_unlock_irqrestore(&idev->lock, flags);
break;
case SIOCSMEDIABUSY:
if (!capable(CAP_NET_ADMIN)) {
@@ -1116,7 +1555,7 @@ static int vlsi_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
case SIOCGRECEIVING:
/* the best we can do: check whether there are any bytes in rx fifo.
* The trustable window (in case some data arrives just afterwards)
- * may be as short as 1usec or so at 4Mbps - no way for future-telling.
+ * may be as short as 1usec or so at 4Mbps.
*/
fifocnt = inw(ndev->base_addr+VLSI_PIO_RCVBCNT) & RCVBCNT_MASK;
irq->ifr_receiving = (fifocnt!=0) ? 1 : 0;
@@ -1126,42 +1565,159 @@ static int vlsi_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
__FUNCTION__, cmd);
ret = -EOPNOTSUPP;
}
- spin_unlock_irqrestore(&idev->lock,flags);
return ret;
}
+/********************************************************/
+static void vlsi_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
+{
+ struct net_device *ndev = dev_instance;
+ vlsi_irda_dev_t *idev = ndev->priv;
+ unsigned iobase;
+ u8 irintr;
+ int boguscount = 32;
+ unsigned got_act;
+ unsigned long flags;
+
+ got_act = 0;
+ iobase = ndev->base_addr;
+ do {
+ spin_lock_irqsave(&idev->lock,flags);
+ irintr = inb(iobase+VLSI_PIO_IRINTR);
+ rmb();
+ outb(irintr, iobase+VLSI_PIO_IRINTR); /* acknowledge asap */
+ spin_unlock_irqrestore(&idev->lock,flags);
+
+ if (!(irintr&=IRINTR_INT_MASK)) /* not our INT - probably shared */
+ break;
+
+ if (irintr&IRINTR_RPKTINT)
+ vlsi_rx_interrupt(ndev);
+
+ if (irintr&IRINTR_TPKTINT)
+ vlsi_tx_interrupt(ndev);
+
+ if (!(irintr & ~IRINTR_ACTIVITY))
+ break; /* done if only activity remaining */
+
+ if (irintr & ~(IRINTR_RPKTINT|IRINTR_TPKTINT|IRINTR_ACTIVITY)) {
+ printk(KERN_DEBUG "%s: IRINTR = %02x\n",
+ __FUNCTION__, (unsigned)irintr);
+ vlsi_reg_debug(iobase,__FUNCTION__);
+ }
+ } while (--boguscount > 0);
-int vlsi_irda_init(struct net_device *ndev)
+ if (boguscount <= 0)
+ printk(KERN_WARNING "%s: too much work in interrupt!\n", __FUNCTION__);
+
+}
+
+/********************************************************/
+
+static int vlsi_open(struct net_device *ndev)
{
vlsi_irda_dev_t *idev = ndev->priv;
- struct pci_dev *pdev = idev->pdev;
- u8 byte;
+ int err = -EAGAIN;
+ char hwname[32];
+
+ if (pci_request_regions(idev->pdev, drivername)) {
+ printk(KERN_ERR "%s: io resource busy\n", __FUNCTION__);
+ goto errout;
+ }
+ ndev->base_addr = pci_resource_start(idev->pdev,0);
+ ndev->irq = idev->pdev->irq;
+
+ /* under some rare occasions the chip apparently comes up with
+ * IRQ's pending. We better w/c pending IRQ and disable them all
+ */
+ outb(IRINTR_INT_MASK, ndev->base_addr+VLSI_PIO_IRINTR);
+
+ if (request_irq(ndev->irq, vlsi_interrupt, SA_SHIRQ,
+ drivername, ndev)) {
+ printk(KERN_ERR "%s: couldn't get IRQ: %d\n",
+ __FUNCTION__, ndev->irq);
+ goto errout_io;
+ }
+
+ if ((err = vlsi_create_hwif(idev)) != 0)
+ goto errout_irq;
+
+ sprintf(hwname, "VLSI-FIR @ 0x%04x", (unsigned)ndev->base_addr);
+ idev->irlap = irlap_open(ndev,&idev->qos,hwname);
+ if (!idev->irlap)
+ goto errout_free_ring;
+
+ do_gettimeofday(&idev->last_rx); /* first mtt may start from now on */
+
+ idev->new_baud = 9600; /* start with IrPHY using 9600(SIR) mode */
+
+ if ((err = vlsi_start_hw(idev)) != 0)
+ goto errout_close_irlap;
+
+ netif_start_queue(ndev);
+
+ printk(KERN_INFO "%s: device %s operational\n", __FUNCTION__, ndev->name);
+
+ return 0;
+
+errout_close_irlap:
+ irlap_close(idev->irlap);
+errout_free_ring:
+ vlsi_destroy_hwif(idev);
+errout_irq:
+ free_irq(ndev->irq,ndev);
+errout_io:
+ pci_release_regions(idev->pdev);
+errout:
+ return err;
+}
+
+static int vlsi_close(struct net_device *ndev)
+{
+ vlsi_irda_dev_t *idev = ndev->priv;
+
+ netif_stop_queue(ndev);
+
+ if (idev->irlap)
+ irlap_close(idev->irlap);
+ idev->irlap = NULL;
+
+ vlsi_stop_hw(idev);
+
+ vlsi_destroy_hwif(idev);
+
+ free_irq(ndev->irq,ndev);
+
+ pci_release_regions(idev->pdev);
+
+ printk(KERN_INFO "%s: device %s stopped\n", __FUNCTION__, ndev->name);
+
+ return 0;
+}
+
+static int vlsi_irda_init(struct net_device *ndev)
+{
+ vlsi_irda_dev_t *idev = ndev->priv;
+ struct pci_dev *pdev = idev->pdev;
SET_MODULE_OWNER(ndev);
ndev->irq = pdev->irq;
ndev->base_addr = pci_resource_start(pdev,0);
- /* PCI busmastering - see include file for details! */
+ /* PCI busmastering
+ * see include file for details why we need these 2 masks, in this order!
+ */
- if (pci_set_dma_mask(pdev,DMA_MASK_USED_BY_HW)) {
+ if (pci_set_dma_mask(pdev,DMA_MASK_USED_BY_HW)
+ || pci_set_dma_mask(pdev,DMA_MASK_MSTRPAGE)) {
printk(KERN_ERR "%s: aborting due to PCI BM-DMA address limitations\n",
__FUNCTION__);
return -1;
}
- pci_set_master(pdev);
- pdev->dma_mask = DMA_MASK_MSTRPAGE;
- pci_write_config_byte(pdev, VLSI_PCI_MSTRPAGE, MSTRPAGE_VALUE);
-
- /* we don't use the legacy UART, disable its address decoding */
-
- pci_read_config_byte(pdev, VLSI_PCI_IRMISC, &byte);
- byte &= ~(IRMISC_UARTEN | IRMISC_UARTTST);
- pci_write_config_byte(pdev, VLSI_PCI_IRMISC, byte);
-
irda_init_max_qos_capabilies(&idev->qos);
@@ -1187,6 +1743,8 @@ int vlsi_irda_init(struct net_device *ndev)
ndev->get_stats = vlsi_get_stats;
ndev->hard_start_xmit = vlsi_hard_start_xmit;
ndev->do_ioctl = vlsi_ioctl;
+ ndev->tx_timeout = vlsi_tx_timeout;
+ ndev->watchdog_timeo = 500*HZ/1000; /* max. allowed turn time for IrLAP */
return 0;
}
@@ -1203,6 +1761,8 @@ vlsi_irda_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (pci_enable_device(pdev))
goto out;
+ else
+ pdev->current_state = 0; /* hw must be running now */
printk(KERN_INFO "%s: IrDA PCI controller %s detected\n",
drivername, pdev->name);
@@ -1228,6 +1788,8 @@ vlsi_irda_probe(struct pci_dev *pdev, const struct pci_device_id *id)
ndev->priv = (void *) idev;
spin_lock_init(&idev->lock);
+ init_MUTEX(&idev->sem);
+ down(&idev->sem);
idev->pdev = pdev;
ndev->init = vlsi_irda_init;
strcpy(ndev->name,"irda%d");
@@ -1236,13 +1798,36 @@ vlsi_irda_probe(struct pci_dev *pdev, const struct pci_device_id *id)
__FUNCTION__);
goto out_freedev;
}
+
+#ifdef CONFIG_PROC_FS
+ {
+ struct proc_dir_entry *ent;
+
+ ent = create_proc_entry(ndev->name, S_IFREG|S_IRUGO, vlsi_proc_root);
+ if (!ent) {
+ printk(KERN_ERR "%s: failed to create proc entry\n", __FUNCTION__);
+ goto out_unregister;
+ }
+ ent->data = ndev;
+ ent->proc_fops = &vlsi_proc_fops;
+ ent->size = 0;
+ idev->proc_entry = ent;
+ }
+#endif
+
printk(KERN_INFO "%s: registered device %s\n", drivername, ndev->name);
pci_set_drvdata(pdev, ndev);
+ up(&idev->sem);
return 0;
+out_unregister:
+ up(&idev->sem);
+ unregister_netdev(ndev);
+ goto out_disable;
out_freedev:
+ up(&idev->sem);
kfree(ndev);
out_disable:
pci_disable_device(pdev);
@@ -1254,37 +1839,145 @@ out:
static void __devexit vlsi_irda_remove(struct pci_dev *pdev)
{
struct net_device *ndev = pci_get_drvdata(pdev);
+ vlsi_irda_dev_t *idev;
- if (ndev) {
- printk(KERN_INFO "%s: unregister device %s\n",
- drivername, ndev->name);
-
- unregister_netdev(ndev);
- /* do not free - async completed by unregister_netdev()
- * ndev->destructor called (if present) when going to free
- */
-
- }
- else
+ if (!ndev) {
printk(KERN_CRIT "%s: lost netdevice?\n", drivername);
- pci_set_drvdata(pdev, NULL);
+ return;
+ }
+ idev = ndev->priv;
+ down(&idev->sem);
+ pci_set_drvdata(pdev, NULL);
pci_disable_device(pdev);
- printk(KERN_INFO "%s: %s disabled\n", drivername, pdev->name);
+#ifdef CONFIG_PROC_FS
+ if (idev->proc_entry) {
+ remove_proc_entry(ndev->name, vlsi_proc_root);
+ idev->proc_entry = NULL;
+ }
+#endif
+ up(&idev->sem);
+
+ unregister_netdev(ndev);
+ /* do not free - async completed by unregister_netdev()
+ * ndev->destructor called (if present) when going to free
+ */
+
+ printk(KERN_INFO "%s: %s removed\n", drivername, pdev->name);
+}
+
+#ifdef CONFIG_PM
+
+/* The Controller doesn't provide PCI PM capabilities as defined by PCI specs.
+ * Some of the Linux PCI-PM code however depends on this, for example in
+ * pci_set_power_state(). So we have to take care to perform the required
+ * operations on our own (particularly reflecting the pdev->current_state)
+ * otherwise we might get cheated by pci-pm.
+ */
+
+static int vlsi_irda_save_state(struct pci_dev *pdev, u32 state)
+{
+ if (state < 1 || state > 3 ) {
+ printk( KERN_ERR "%s - %s: invalid pm state request: %u\n",
+ __FUNCTION__, pdev->name, state);
+ return -1;
+ }
+ return 0;
}
static int vlsi_irda_suspend(struct pci_dev *pdev, u32 state)
{
- printk(KERN_ERR "%s - %s\n", __FUNCTION__, pdev->name);
+ struct net_device *ndev = pci_get_drvdata(pdev);
+ vlsi_irda_dev_t *idev;
+
+ if (state < 1 || state > 3 ) {
+ printk( KERN_ERR "%s - %s: invalid pm state request: %u\n",
+ __FUNCTION__, pdev->name, state);
+ return 0;
+ }
+ if (!ndev) {
+ printk(KERN_ERR "%s - %s: no netdevice \n", __FUNCTION__, pdev->name);
+ return 0;
+ }
+ idev = ndev->priv;
+ down(&idev->sem);
+ if (pdev->current_state != 0) { /* already suspended */
+ if (state > pdev->current_state) { /* simply go deeper */
+ pci_set_power_state(pdev,state);
+ pdev->current_state = state;
+ }
+ else
+ printk(KERN_ERR "%s - %s: invalid suspend request %u -> %u\n",
+ __FUNCTION__, pdev->name, pdev->current_state, state);
+ up(&idev->sem);
+ return 0;
+ }
+
+ if (netif_running(ndev)) {
+ netif_device_detach(ndev);
+ vlsi_stop_hw(idev);
+ pci_save_state(pdev, idev->cfg_space);
+ if (!idev->new_baud)
+ /* remember speed settings to restore on resume */
+ idev->new_baud = idev->baud;
+ }
+
+ pci_set_power_state(pdev,state);
+ pdev->current_state = state;
+ idev->resume_ok = 1;
+ up(&idev->sem);
return 0;
}
static int vlsi_irda_resume(struct pci_dev *pdev)
{
- printk(KERN_ERR "%s - %s\n", __FUNCTION__, pdev->name);
+ struct net_device *ndev = pci_get_drvdata(pdev);
+ vlsi_irda_dev_t *idev;
+
+ if (!ndev) {
+ printk(KERN_ERR "%s - %s: no netdevice \n", __FUNCTION__, pdev->name);
+ return 0;
+ }
+ idev = ndev->priv;
+ down(&idev->sem);
+ if (pdev->current_state == 0) {
+ up(&idev->sem);
+ printk(KERN_ERR "%s - %s: already resumed\n", __FUNCTION__, pdev->name);
+ return 0;
+ }
+
+ pci_set_power_state(pdev, 0);
+ pdev->current_state = 0;
+
+ if (!idev->resume_ok) {
+ /* should be obsolete now - but used to happen due to:
+ * - pci layer initially setting pdev->current_state = 4 (unknown)
+ * - pci layer did not walk the save_state-tree (might be APM problem)
+ * so we could not refuse to suspend from undefined state
+ * - vlsi_irda_suspend detected invalid state and refused to save
+ * configuration for resume - but was too late to stop suspending
+ * - vlsi_irda_resume got screwed when trying to resume from garbage
+ *
+ * now we explicitly set pdev->current_state = 0 after enabling the
+ * device and independently resume_ok should catch any garbage config.
+ */
+ printk(KERN_ERR "%s - hm, nothing to resume?\n", __FUNCTION__);
+ up(&idev->sem);
+ return 0;
+ }
+
+ if (netif_running(ndev)) {
+ pci_restore_state(pdev, idev->cfg_space);
+ vlsi_start_hw(idev);
+ netif_device_attach(ndev);
+ }
+ idev->resume_ok = 0;
+ up(&idev->sem);
return 0;
}
+#endif /* CONFIG_PM */
+
/*********************************************************/
static struct pci_driver vlsi_irda_driver = {
@@ -1292,13 +1985,20 @@ static struct pci_driver vlsi_irda_driver = {
.id_table = vlsi_irda_table,
.probe = vlsi_irda_probe,
.remove = __devexit_p(vlsi_irda_remove),
+#ifdef CONFIG_PM
+ .save_state = vlsi_irda_save_state,
.suspend = vlsi_irda_suspend,
.resume = vlsi_irda_resume,
+#endif
};
+#ifdef CONFIG_PROC_FS
+#define PROC_DIR ("driver/" DRIVER_NAME)
+#endif
+
static int __init vlsi_mod_init(void)
{
- int i;
+ int i, ret;
if (clksrc < 0 || clksrc > 3) {
printk(KERN_ERR "%s: invalid clksrc=%d\n", drivername, clksrc);
@@ -1324,14 +2024,27 @@ static int __init vlsi_mod_init(void)
sirpulse = !!sirpulse;
- return pci_module_init(&vlsi_irda_driver);
+#ifdef CONFIG_PROC_FS
+ vlsi_proc_root = create_proc_entry(PROC_DIR, S_IFDIR, 0);
+ if (!vlsi_proc_root)
+ return -ENOMEM;
+#endif
+
+ ret = pci_module_init(&vlsi_irda_driver);
+
+#ifdef CONFIG_PROC_FS
+ if (ret)
+ remove_proc_entry(PROC_DIR, 0);
+#endif
+ return ret;
+
}
static void __exit vlsi_mod_exit(void)
{
pci_unregister_driver(&vlsi_irda_driver);
+ remove_proc_entry(PROC_DIR, 0);
}
module_init(vlsi_mod_init);
module_exit(vlsi_mod_exit);
-
diff --git a/drivers/net/mii.c b/drivers/net/mii.c
index b19e6a3e544a..79da9a0efa91 100644
--- a/drivers/net/mii.c
+++ b/drivers/net/mii.c
@@ -243,7 +243,7 @@ unsigned int mii_check_media (struct mii_if_info *mii,
/* figure out media and duplex from advertise and LPA values */
media = mii_nway_result(lpa & advertise);
- duplex = (media & (ADVERTISE_100FULL | ADVERTISE_10FULL)) ? 1 : 0;
+ duplex = (media & ADVERTISE_FULL) ? 1 : 0;
if (ok_to_print)
printk(KERN_INFO "%s: link up, %sMbps, %s-duplex, lpa 0x%04X\n",
diff --git a/drivers/net/pcmcia/smc91c92_cs.c b/drivers/net/pcmcia/smc91c92_cs.c
index 8b6d3851fe3d..aabba8122077 100644
--- a/drivers/net/pcmcia/smc91c92_cs.c
+++ b/drivers/net/pcmcia/smc91c92_cs.c
@@ -8,7 +8,7 @@
Copyright (C) 1999 David A. Hinds -- dahinds@users.sourceforge.net
- smc91c92_cs.c 1.113 2001/10/13 00:08:53
+ smc91c92_cs.c 1.2 2002/09/28 15:00:00
This driver contains code written by Donald Becker
(becker@scyld.com), Rowan Hughes (x-csrdh@jcu.edu.au),
@@ -37,12 +37,15 @@
#include <linux/crc32.h>
#include <asm/io.h>
#include <asm/system.h>
+#include <asm/uaccess.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/if_arp.h>
#include <linux/ioport.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
#include <pcmcia/version.h>
#include <pcmcia/cs_types.h>
@@ -88,6 +91,9 @@ static const char *version =
#define DEBUG(n, args...)
#endif
+#define DRV_NAME "smc91c92_cs"
+#define DRV_VERSION "1.2"
+
/*====================================================================*/
/* Operational parameter that usually are not changed. */
@@ -109,6 +115,7 @@ static dev_link_t *dev_list;
struct smc_private {
dev_link_t link;
struct net_device dev;
+ spinlock_t lock;
u_short manfid;
u_short cardid;
struct net_device_stats stats;
@@ -122,7 +129,7 @@ struct smc_private {
u_short media_status;
u_short fast_poll;
u_short link_status;
- int phy_id;
+ struct mii_if_info mii_if;
};
/* Special definitions for Megahertz multifunction cards */
@@ -292,9 +299,11 @@ static int s9k_config(struct net_device *dev, struct ifmap *map);
static void smc_set_xcvr(struct net_device *dev, int if_port);
static void smc_reset(struct net_device *dev);
static void media_check(u_long arg);
-static void mdio_sync(ioaddr_t addr);
-static int mdio_read(ioaddr_t addr, int phy_id, int loc);
-static void mdio_write(ioaddr_t addr, int phy_id, int loc, int value);
+static void smc_mdio_sync(ioaddr_t addr);
+static int smc_mdio_read(struct net_device *dev, int phy_id, int loc);
+static void smc_mdio_write(struct net_device *dev, int phy_id, int loc, int value);
+static int smc_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static int smc_link_ok(struct net_device *dev);
/*======================================================================
@@ -346,7 +355,7 @@ static dev_link_t *smc91c92_attach(void)
if (!smc) return NULL;
memset(smc, 0, sizeof(struct smc_private));
link = &smc->link; dev = &smc->dev;
-
+ spin_lock_init(&smc->lock);
link->release.function = &smc91c92_release;
link->release.data = (u_long)link;
link->io.NumPorts1 = 16;
@@ -369,6 +378,7 @@ static dev_link_t *smc91c92_attach(void)
dev->get_stats = &smc91c92_get_stats;
dev->set_config = &s9k_config;
dev->set_multicast_list = &set_rx_mode;
+ dev->do_ioctl = &smc_ioctl;
ether_setup(dev);
dev->open = &smc91c92_open;
dev->stop = &smc91c92_close;
@@ -377,6 +387,12 @@ static dev_link_t *smc91c92_attach(void)
dev->watchdog_timeo = TX_TIMEOUT;
#endif
dev->priv = link->priv = link->irq.Instance = smc;
+
+ smc->mii_if.dev = dev;
+ smc->mii_if.mdio_read = smc_mdio_read;
+ smc->mii_if.mdio_write = smc_mdio_write;
+ smc->mii_if.phy_id_mask = 0x1f;
+ smc->mii_if.reg_num_mask = 0x1f;
/* Register with Card Services */
link->next = dev_list;
@@ -1044,10 +1060,10 @@ static void smc91c92_config(dev_link_t *link)
SMC_SELECT_BANK(3);
for (i = 0; i < 32; i++) {
- j = mdio_read(dev->base_addr + MGMT, i, 1);
+ j = smc_mdio_read(dev, i, 1);
if ((j != 0) && (j != 0xffff)) break;
}
- smc->phy_id = (i < 32) ? i : -1;
+ smc->mii_if.phy_id = (i < 32) ? i : -1;
if (i < 32) {
DEBUG(0, " MII transceiver at index %d, status %x.\n", i, j);
} else {
@@ -1190,7 +1206,7 @@ static int smc91c92_event(event_t event, int priority,
#define MDIO_DATA_WRITE1 (MDIO_DIR_WRITE | MDIO_DATA_OUT)
#define MDIO_DATA_READ 0x02
-static void mdio_sync(ioaddr_t addr)
+static void smc_mdio_sync(ioaddr_t addr)
{
int bits;
for (bits = 0; bits < 32; bits++) {
@@ -1199,12 +1215,13 @@ static void mdio_sync(ioaddr_t addr)
}
}
-static int mdio_read(ioaddr_t addr, int phy_id, int loc)
+static int smc_mdio_read(struct net_device *dev, int phy_id, int loc)
{
+ ioaddr_t addr = dev->base_addr + MGMT;
u_int cmd = (0x06<<10)|(phy_id<<5)|loc;
int i, retval = 0;
- mdio_sync(addr);
+ smc_mdio_sync(addr);
for (i = 13; i >= 0; i--) {
int dat = (cmd&(1<<i)) ? MDIO_DATA_WRITE1 : MDIO_DATA_WRITE0;
outb(dat, addr);
@@ -1218,12 +1235,13 @@ static int mdio_read(ioaddr_t addr, int phy_id, int loc)
return (retval>>1) & 0xffff;
}
-static void mdio_write(ioaddr_t addr, int phy_id, int loc, int value)
+static void smc_mdio_write(struct net_device *dev, int phy_id, int loc, int value)
{
+ ioaddr_t addr = dev->base_addr + MGMT;
u_int cmd = (0x05<<28)|(phy_id<<23)|(loc<<18)|(1<<17)|value;
int i;
- mdio_sync(addr);
+ smc_mdio_sync(addr);
for (i = 31; i >= 0; i--) {
int dat = (cmd&(1<<i)) ? MDIO_DATA_WRITE1 : MDIO_DATA_WRITE0;
outb(dat, addr);
@@ -1777,6 +1795,7 @@ static void fill_multicast_tbl(int count, struct dev_mc_list *addrs,
static void set_rx_mode(struct net_device *dev)
{
ioaddr_t ioaddr = dev->base_addr;
+ struct smc_private *smc = dev->priv;
u_int multicast_table[ 2 ] = { 0, };
unsigned long flags;
u_short rx_cfg_setting;
@@ -1795,16 +1814,15 @@ static void set_rx_mode(struct net_device *dev)
}
/* Load MC table and Rx setting into the chip without interrupts. */
- save_flags(flags);
- cli();
+ spin_lock_irqsave(&smc->lock, flags);
SMC_SELECT_BANK(3);
outl(multicast_table[0], ioaddr + MULTICAST0);
outl(multicast_table[1], ioaddr + MULTICAST4);
SMC_SELECT_BANK(0);
outw(rx_cfg_setting, ioaddr + RCR);
SMC_SELECT_BANK(2);
- restore_flags(flags);
-
+ spin_unlock_irqrestore(&smc->lock, flags);
+
return;
}
@@ -1917,11 +1935,11 @@ static void smc_reset(struct net_device *dev)
SMC_SELECT_BANK(3);
/* Reset MII */
- mdio_write(ioaddr + MGMT, smc->phy_id, 0, 0x8000);
+ smc_mdio_write(dev, smc->mii_if.phy_id, 0, 0x8000);
/* Restart MII autonegotiation */
- mdio_write(ioaddr + MGMT, smc->phy_id, 0, 0x0000);
- mdio_write(ioaddr + MGMT, smc->phy_id, 0, 0x1200);
+ smc_mdio_write(dev, smc->mii_if.phy_id, 0, 0x0000);
+ smc_mdio_write(dev, smc->mii_if.phy_id, 0, 0x1200);
}
/* Enable interrupts. */
@@ -1942,7 +1960,6 @@ static void media_check(u_long arg)
struct net_device *dev = &smc->dev;
ioaddr_t ioaddr = dev->base_addr;
u_short i, media, saved_bank;
- ioaddr_t mii_addr = dev->base_addr + MGMT;
u_short link;
saved_bank = inw(ioaddr + BANK_SELECT);
@@ -1974,20 +1991,20 @@ static void media_check(u_long arg)
}
if (smc->cfg & CFG_MII_SELECT) {
- if (smc->phy_id < 0)
+ if (smc->mii_if.phy_id < 0)
goto reschedule;
SMC_SELECT_BANK(3);
- link = mdio_read(mii_addr, smc->phy_id, 1);
+ link = smc_mdio_read(dev, smc->mii_if.phy_id, 1);
if (!link || (link == 0xffff)) {
printk(KERN_INFO "%s: MII is missing!\n", dev->name);
- smc->phy_id = -1;
+ smc->mii_if.phy_id = -1;
goto reschedule;
}
link &= 0x0004;
if (link != smc->link_status) {
- u_short p = mdio_read(mii_addr, smc->phy_id, 5);
+ u_short p = smc_mdio_read(dev, smc->mii_if.phy_id, 5);
printk(KERN_INFO "%s: %s link beat\n", dev->name,
(link) ? "found" : "lost");
if (link) {
@@ -2043,6 +2060,191 @@ reschedule:
SMC_SELECT_BANK(saved_bank);
}
+static int smc_link_ok(struct net_device *dev)
+{
+ ioaddr_t ioaddr = dev->base_addr;
+ struct smc_private *smc = dev->priv;
+
+ if (smc->cfg & CFG_MII_SELECT) {
+ return mii_link_ok(&smc->mii_if);
+ } else {
+ SMC_SELECT_BANK(0);
+ return inw(ioaddr + EPH) & EPH_LINK_OK;
+ }
+}
+
+static int smc_netdev_get_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd)
+{
+ u16 tmp;
+ ioaddr_t ioaddr = dev->base_addr;
+
+ ecmd->supported = (SUPPORTED_TP | SUPPORTED_AUI |
+ SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full);
+
+ SMC_SELECT_BANK(1);
+ tmp = inw(ioaddr + CONFIG);
+ ecmd->port = (tmp & CFG_AUI_SELECT) ? PORT_AUI : PORT_TP;
+ ecmd->transceiver = XCVR_INTERNAL;
+ ecmd->speed = SPEED_10;
+ ecmd->phy_address = ioaddr + MGMT;
+
+ SMC_SELECT_BANK(0);
+ tmp = inw(ioaddr + TCR);
+ ecmd->duplex = (tmp & TCR_FDUPLX) ? DUPLEX_FULL : DUPLEX_HALF;
+
+ return 0;
+}
+
+static int smc_netdev_set_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd)
+{
+ u16 tmp;
+ ioaddr_t ioaddr = dev->base_addr;
+
+ if (ecmd->speed != SPEED_10)
+ return -EINVAL;
+ if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL)
+ return -EINVAL;
+ if (ecmd->port != PORT_TP && ecmd->port != PORT_AUI)
+ return -EINVAL;
+ if (ecmd->transceiver != XCVR_INTERNAL)
+ return -EINVAL;
+
+ if (ecmd->port == PORT_AUI)
+ smc_set_xcvr(dev, 1);
+ else
+ smc_set_xcvr(dev, 0);
+
+ SMC_SELECT_BANK(0);
+ tmp = inw(ioaddr + TCR);
+ if (ecmd->duplex == DUPLEX_FULL)
+ tmp |= TCR_FDUPLX;
+ else
+ tmp &= ~TCR_FDUPLX;
+ outw(ioaddr + TCR, tmp);
+
+ return 0;
+}
+
+static int smc_ethtool_ioctl (struct net_device *dev, void *useraddr)
+{
+ u32 ethcmd;
+ struct smc_private *smc = dev->priv;
+
+ if (get_user(ethcmd, (u32 *)useraddr))
+ return -EFAULT;
+
+ switch (ethcmd) {
+
+ case ETHTOOL_GDRVINFO: {
+ struct ethtool_drvinfo info = { ETHTOOL_GDRVINFO };
+ strcpy(info.driver, DRV_NAME);
+ strcpy(info.version, DRV_VERSION);
+ if (copy_to_user(useraddr, &info, sizeof(info)))
+ return -EFAULT;
+ return 0;
+ }
+
+ /* get settings */
+ case ETHTOOL_GSET: {
+ int ret;
+ struct ethtool_cmd ecmd = { ETHTOOL_GSET };
+ spin_lock_irq(&smc->lock);
+ if (smc->cfg & CFG_MII_SELECT)
+ ret = mii_ethtool_gset(&smc->mii_if, &ecmd);
+ else
+ ret = smc_netdev_get_ecmd(dev, &ecmd);
+ spin_unlock_irq(&smc->lock);
+ if (copy_to_user(useraddr, &ecmd, sizeof(ecmd)))
+ return -EFAULT;
+ return ret;
+ }
+
+ /* set settings */
+ case ETHTOOL_SSET: {
+ int ret;
+ struct ethtool_cmd ecmd;
+ if (copy_from_user(&ecmd, useraddr, sizeof(ecmd)))
+ return -EFAULT;
+ spin_lock_irq(&smc->lock);
+ if (smc->cfg & CFG_MII_SELECT)
+ ret = mii_ethtool_sset(&smc->mii_if, &ecmd);
+ else
+ ret = smc_netdev_set_ecmd(dev, &ecmd);
+ spin_unlock_irq(&smc->lock);
+ return ret;
+ }
+
+ /* get link status */
+ case ETHTOOL_GLINK: {
+ struct ethtool_value edata = { ETHTOOL_GLINK };
+ spin_lock_irq(&smc->lock);
+ edata.data = smc_link_ok(dev);
+ spin_unlock_irq(&smc->lock);
+ if (copy_to_user(useraddr, &edata, sizeof(edata)))
+ return -EFAULT;
+ return 0;
+ }
+
+#ifdef PCMCIA_DEBUG
+ /* get message-level */
+ case ETHTOOL_GMSGLVL: {
+ struct ethtool_value edata = { ETHTOOL_GMSGLVL };
+ edata.data = pc_debug;
+ if (copy_to_user(useraddr, &edata, sizeof(edata)))
+ return -EFAULT;
+ return 0;
+ }
+
+ /* set message-level */
+ case ETHTOOL_SMSGLVL: {
+ struct ethtool_value edata;
+ if (copy_from_user(&edata, useraddr, sizeof(edata)))
+ return -EFAULT;
+ pc_debug = edata.data;
+ return 0;
+ }
+#endif
+ /* restart autonegotiation */
+ case ETHTOOL_NWAY_RST: {
+ if (smc->cfg & CFG_MII_SELECT)
+ return mii_nway_restart(&smc->mii_if);
+ else
+ return -EOPNOTSUPP;
+ }
+
+ default:
+ break;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static int smc_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct smc_private *smc = dev->priv;
+ struct mii_ioctl_data *mii;
+ int rc = 0;
+
+ mii = (struct mii_ioctl_data *) &rq->ifr_data;
+ if (!netif_running(dev))
+ return -EINVAL;
+
+ switch (cmd) {
+ case SIOCETHTOOL:
+ rc = smc_ethtool_ioctl(dev, (void *) rq->ifr_data);
+ break;
+
+ default:
+ spin_lock_irq(&smc->lock);
+ rc = generic_mii_ioctl(&smc->mii_if, mii, cmd, NULL);
+ spin_unlock_irq(&smc->lock);
+ break;
+ }
+
+ return rc;
+}
+
+
/*====================================================================*/
static int __init init_smc91c92_cs(void)
@@ -2069,3 +2271,4 @@ static void __exit exit_smc91c92_cs(void)
module_init(init_smc91c92_cs);
module_exit(exit_smc91c92_cs);
+
diff --git a/drivers/net/tokenring/tmspci.c b/drivers/net/tokenring/tmspci.c
index bcaeeb7e893b..1123956f675a 100644
--- a/drivers/net/tokenring/tmspci.c
+++ b/drivers/net/tokenring/tmspci.c
@@ -253,7 +253,7 @@ static int __init tms_pci_init (void)
return 0;
}
-static void __devexit tms_pci_rmmod (void)
+static void __exit tms_pci_rmmod (void)
{
pci_unregister_driver (&tms_pci_driver);
}
diff --git a/drivers/sbus/char/uctrl.c b/drivers/sbus/char/uctrl.c
index 425d172a8381..3396145d0246 100644
--- a/drivers/sbus/char/uctrl.c
+++ b/drivers/sbus/char/uctrl.c
@@ -107,7 +107,7 @@ struct uctrl_status {
u8 speaker_volume; /* 0x23 */
u8 control_tft_brightness; /* 0x24 */
u8 control_kbd_repeat_delay; /* 0x28 */
- u8 control_kbd_repeat_rate; /* 0x29 */
+ u8 control_kbd_repeat_period; /* 0x29 */
u8 control_screen_contrast; /* 0x2F */
};
diff --git a/drivers/scsi/Config.in b/drivers/scsi/Config.in
index 0d96c68c4c65..8ad12993d81a 100644
--- a/drivers/scsi/Config.in
+++ b/drivers/scsi/Config.in
@@ -57,11 +57,6 @@ fi
source drivers/scsi/aic7xxx/Config.in
if [ "$CONFIG_SCSI_AIC7XXX" != "y" ]; then
dep_tristate 'Old Adaptec AIC7xxx support' CONFIG_SCSI_AIC7XXX_OLD $CONFIG_SCSI
- if [ "$CONFIG_SCSI_AIC7XXX_OLD" != "n" ]; then
- bool ' Enable Tagged Command Queueing (TCQ) by default' CONFIG_AIC7XXX_OLD_TCQ_ON_BY_DEFAULT
- int ' Maximum number of TCQ commands per device' CONFIG_AIC7XXX_OLD_CMDS_PER_DEVICE 8
- bool ' Collect statistics to report in /proc' CONFIG_AIC7XXX_OLD_PROC_STATS
- fi
fi
# All the I2O code and drivers do not seem to be 64bit safe.
if [ "$CONFIG_X86_64" != "y" ]; then
diff --git a/drivers/scsi/NCR53C9x.c b/drivers/scsi/NCR53C9x.c
index 24ea90a7db77..a83d865e925d 100644
--- a/drivers/scsi/NCR53C9x.c
+++ b/drivers/scsi/NCR53C9x.c
@@ -869,17 +869,19 @@ static int esp_host_info(struct NCR_ESP *esp, char *ptr, off_t offset, int len)
for(i = 0; i < 15; i++) {
if(esp->targets_present & (1 << i)) {
Scsi_Device *SDptr = esp->ehost->host_queue;
+ struct esp_device *esp_dev;
while((SDptr->host != esp->ehost) &&
(SDptr->id != i) &&
(SDptr->next))
SDptr = SDptr->next;
+ esp_dev = SDptr->hostdata;
copy_info(&info, "%d\t\t", i);
copy_info(&info, "%08lx\t", esp->config3[i]);
- copy_info(&info, "[%02lx,%02lx]\t\t\t", SDptr->sync_max_offset,
- SDptr->sync_min_period);
- copy_info(&info, "%s\n", SDptr->disconnect ? "yes" : "no");
+ copy_info(&info, "[%02lx,%02lx]\t\t\t", esp_dev->sync_max_offset,
+ esp_dev->sync_min_period);
+ copy_info(&info, "%s\n", esp_dev->disconnect ? "yes" : "no");
}
}
@@ -1012,6 +1014,7 @@ static inline void build_sync_nego_msg(struct NCR_ESP *esp, int period, int offs
static void esp_exec_cmd(struct NCR_ESP *esp)
{
struct ESP_regs *eregs = esp->eregs;
+ struct esp_device *esp_dev;
Scsi_Cmnd *SCptr;
Scsi_Device *SDptr;
volatile unchar *cmdp = esp->esp_command;
@@ -1033,9 +1036,29 @@ static void esp_exec_cmd(struct NCR_ESP *esp)
panic("esp: esp_exec_cmd and issue queue is NULL");
SDptr = SCptr->device;
+ esp_dev = SDptr->hostdata;
lun = SCptr->lun;
target = SCptr->target;
+ /*
+ * If esp_dev == NULL then we need to allocate a struct for our data
+ */
+ if (!esp_dev) {
+ esp_dev = kmalloc(sizeof(struct esp_device), GFP_ATOMIC);
+ if (!esp_dev) {
+ /* We're SOL. Print a message and bail */
+ printk(KERN_WARNING "esp: no mem for esp_device %d/%d\n",
+ target, lun);
+ esp->current_SC = NULL;
+ SCptr->result = DID_ERROR << 16;
+ SCptr->done(SCptr);
+ return;
+ }
+ memset(esp_dev, 0, sizeof(struct esp_device));
+ SDptr->hostdata = esp_dev;
+ }
+
+
esp->snip = 0;
esp->msgout_len = 0;
@@ -1071,12 +1094,12 @@ static void esp_exec_cmd(struct NCR_ESP *esp)
* selections should not confuse SCSI-1 we hope.
*/
- if(SDptr->sync) {
+ if(esp_dev->sync) {
/* this targets sync is known */
#ifdef CONFIG_SCSI_MAC_ESP
do_sync_known:
#endif
- if(SDptr->disconnect)
+ if(esp_dev->disconnect)
*cmdp++ = IDENTIFY(1, lun);
else
*cmdp++ = IDENTIFY(0, lun);
@@ -1088,7 +1111,7 @@ do_sync_known:
the_esp_command = (ESP_CMD_SELA | ESP_CMD_DMA);
esp_advance_phase(SCptr, in_slct_norm);
}
- } else if(!(esp->targets_present & (1<<target)) || !(SDptr->disconnect)) {
+ } else if(!(esp->targets_present & (1<<target)) || !(esp_dev->disconnect)) {
/* After the bootup SCSI code sends both the
* TEST_UNIT_READY and INQUIRY commands we want
* to at least attempt allowing the device to
@@ -1096,8 +1119,8 @@ do_sync_known:
*/
ESPMISC(("esp: Selecting device for first time. target=%d "
"lun=%d\n", target, SCptr->lun));
- if(!SDptr->borken && !SDptr->disconnect)
- SDptr->disconnect = 1;
+ if(!SDptr->borken && !esp_dev->disconnect)
+ esp_dev->disconnect = 1;
*cmdp++ = IDENTIFY(0, lun);
esp->prevmsgout = NOP;
@@ -1105,8 +1128,8 @@ do_sync_known:
the_esp_command = (ESP_CMD_SELA | ESP_CMD_DMA);
/* Take no chances... */
- SDptr->sync_max_offset = 0;
- SDptr->sync_min_period = 0;
+ esp_dev->sync_max_offset = 0;
+ esp_dev->sync_min_period = 0;
} else {
int toshiba_cdrom_hwbug_wkaround = 0;
@@ -1115,10 +1138,10 @@ do_sync_known:
* Macintosh. Well, maybe later when we figured out how to
* do DMA on the machines that support it ...
*/
- SDptr->disconnect = 1;
- SDptr->sync_max_offset = 0;
- SDptr->sync_min_period = 0;
- SDptr->sync = 1;
+ esp_dev->disconnect = 1;
+ esp_dev->sync_max_offset = 0;
+ esp_dev->sync_min_period = 0;
+ esp_dev->sync = 1;
esp->snip = 0;
goto do_sync_known;
#endif
@@ -1140,7 +1163,7 @@ do_sync_known:
} else {
build_sync_nego_msg(esp, 0, 0);
}
- SDptr->sync = 1;
+ esp_dev->sync = 1;
esp->snip = 1;
/* A fix for broken SCSI1 targets, when they disconnect
@@ -1170,7 +1193,7 @@ do_sync_known:
toshiba_cdrom_hwbug_wkaround || SDptr->borken) {
ESPMISC((KERN_INFO "esp%d: Disabling DISCONNECT for target %d "
"lun %d\n", esp->esp_id, SCptr->target, SCptr->lun));
- SDptr->disconnect = 0;
+ esp_dev->disconnect = 0;
*cmdp++ = IDENTIFY(0, lun);
} else {
*cmdp++ = IDENTIFY(1, lun);
@@ -1192,13 +1215,13 @@ do_sync_known:
*cmdp++ = SCptr->cmnd[i];
esp_write(eregs->esp_busid, (target & 7));
- if (esp->prev_soff != SDptr->sync_max_offset ||
- esp->prev_stp != SDptr->sync_min_period ||
+ if (esp->prev_soff != esp_dev->sync_max_offset ||
+ esp->prev_stp != esp_dev->sync_min_period ||
(esp->erev > esp100a &&
esp->prev_cfg3 != esp->config3[target])) {
- esp->prev_soff = SDptr->sync_max_offset;
+ esp->prev_soff = esp_dev->sync_max_offset;
esp_write(eregs->esp_soff, esp->prev_soff);
- esp->prev_stp = SDptr->sync_min_period;
+ esp->prev_stp = esp_dev->sync_min_period;
esp_write(eregs->esp_stp, esp->prev_stp);
if(esp->erev > esp100a) {
esp->prev_cfg3 = esp->config3[target];
@@ -1697,14 +1720,15 @@ static inline void esp_connect(struct NCR_ESP *esp, struct ESP_regs *eregs,
Scsi_Cmnd *sp)
{
Scsi_Device *dp = sp->device;
+ struct esp_device *esp_dev = dp->hostdata;
- if(esp->prev_soff != dp->sync_max_offset ||
- esp->prev_stp != dp->sync_min_period ||
+ if(esp->prev_soff != esp_dev->sync_max_offset ||
+ esp->prev_stp != esp_dev->sync_min_period ||
(esp->erev > esp100a &&
esp->prev_cfg3 != esp->config3[sp->target])) {
- esp->prev_soff = dp->sync_max_offset;
+ esp->prev_soff = esp_dev->sync_max_offset;
esp_write(eregs->esp_soff, esp->prev_soff);
- esp->prev_stp = dp->sync_min_period;
+ esp->prev_stp = esp_dev->sync_min_period;
esp_write(eregs->esp_stp, esp->prev_stp);
if(esp->erev > esp100a) {
esp->prev_cfg3 = esp->config3[sp->target];
@@ -1837,8 +1861,8 @@ static int esp_do_data(struct NCR_ESP *esp, struct ESP_regs *eregs)
else
ESPDATA(( /*"\n"*/ "\r"));
#endif
- save_flags(flags);
#if 0
+ save_flags(flags);
cli();
#endif
if(thisphase == in_datain) {
@@ -1929,7 +1953,9 @@ static int esp_do_data(struct NCR_ESP *esp, struct ESP_regs *eregs)
if (hmuch == 0)
ESPDATA(("done! \n"));
+#if 0
restore_flags(flags);
+#endif
/* check new bus phase */
if (newphase != oldphase && i < esp->current_transfer_size) {
@@ -1981,6 +2007,7 @@ static int esp_do_data_finale(struct NCR_ESP *esp,
struct ESP_regs *eregs)
{
Scsi_Cmnd *SCptr = esp->current_SC;
+ struct esp_device *esp_dev = SCptr->device->hostdata;
int bogus_data = 0, bytes_sent = 0, fifocnt, ecount = 0;
if(esp->dma_led_off)
@@ -2031,7 +2058,7 @@ static int esp_do_data_finale(struct NCR_ESP *esp,
ecount, fifocnt));
/* If we were in synchronous mode, check for peculiarities. */
- if(SCptr->device->sync_max_offset)
+ if(esp_dev->sync_max_offset)
bogus_data = esp100_sync_hwbug(esp, eregs, SCptr, fifocnt);
else
esp_cmd(esp, eregs, ESP_CMD_FLUSH);
@@ -2102,7 +2129,7 @@ static int esp_do_data_finale(struct NCR_ESP *esp,
ESPLOG(("esp%d: Forcing async for target %d\n", esp->esp_id,
SCptr->target));
SCptr->device->borken = 1;
- SCptr->device->sync = 0;
+ esp_dev->sync = 0;
bytes_sent = 0;
}
@@ -2201,6 +2228,7 @@ static int esp_do_freebus(struct NCR_ESP *esp, struct ESP_regs *eregs)
esp->msgout_len = 0;
esp->prevmsgout = NOP;
if(esp->prevmsgin == COMMAND_COMPLETE) {
+ struct esp_device *esp_dev = SCptr->device->hostdata;
/* Normal end of nexus. */
if(esp->disconnected_SC)
esp_cmd(esp, eregs, ESP_CMD_ESEL);
@@ -2208,8 +2236,7 @@ static int esp_do_freebus(struct NCR_ESP *esp, struct ESP_regs *eregs)
if(SCptr->SCp.Status != GOOD &&
SCptr->SCp.Status != CONDITION_GOOD &&
((1<<SCptr->target) & esp->targets_present) &&
- SCptr->device->sync &&
- SCptr->device->sync_max_offset) {
+ esp_dev->sync && esp_dev->sync_max_offset) {
/* SCSI standard says that the synchronous capabilities
* should be renegotiated at this point. Most likely
* we are about to request sense from this target
@@ -2227,7 +2254,7 @@ static int esp_do_freebus(struct NCR_ESP *esp, struct ESP_regs *eregs)
* loading up a tape.
*/
if(esp_should_clear_sync(SCptr) != 0)
- SCptr->device->sync = 0;
+ esp_dev->sync = 0;
}
ESPDISC(("F<%02x,%02x>", SCptr->target, SCptr->lun));
esp_done(esp, ((SCptr->SCp.Status & 0xff) |
@@ -2476,7 +2503,7 @@ static int esp_disconnect_amidst_phases(struct NCR_ESP *esp,
struct ESP_regs *eregs)
{
Scsi_Cmnd *sp = esp->current_SC;
- Scsi_Device *dp = sp->device;
+ struct esp_device *esp_dev = sp->device->hostdata;
/* This means real problems if we see this
* here. Unless we were actually trying
@@ -2500,9 +2527,9 @@ static int esp_disconnect_amidst_phases(struct NCR_ESP *esp,
case BUS_DEVICE_RESET:
ESPLOG(("device reset successful\n"));
- dp->sync_max_offset = 0;
- dp->sync_min_period = 0;
- dp->sync = 0;
+ esp_dev->sync_max_offset = 0;
+ esp_dev->sync_min_period = 0;
+ esp_dev->sync = 0;
esp_advance_phase(sp, in_resetdev);
esp_done(esp, (DID_RESET << 16));
break;
@@ -2575,7 +2602,7 @@ static int esp_do_phase_determine(struct NCR_ESP *esp,
static int esp_select_complete(struct NCR_ESP *esp, struct ESP_regs *eregs)
{
Scsi_Cmnd *SCptr = esp->current_SC;
- Scsi_Device *SDptr = SCptr->device;
+ struct esp_device *esp_dev = SCptr->device->hostdata;
int cmd_bytes_sent, fcnt;
fcnt = (esp_read(eregs->esp_fflags) & ESP_FF_FBYTES);
@@ -2604,7 +2631,7 @@ static int esp_select_complete(struct NCR_ESP *esp, struct ESP_regs *eregs)
/* What if the target ignores the sdtr? */
if(esp->snip)
- SDptr->sync = 1;
+ esp_dev->sync = 1;
/* See how far, if at all, we got in getting
* the information out to the target.
@@ -2693,7 +2720,7 @@ static int esp_select_complete(struct NCR_ESP *esp, struct ESP_regs *eregs)
*/
if(!fcnt && /* Fifo is empty and... */
/* either we are not doing synchronous transfers or... */
- (!SDptr->sync_max_offset ||
+ (!esp_dev->sync_max_offset ||
/* We are not going into data in phase. */
((esp->sreg & ESP_STAT_PMASK) != ESP_DIP)))
esp_cmd(esp, eregs, ESP_CMD_FLUSH); /* flush is safe */
@@ -2754,9 +2781,9 @@ static int esp_select_complete(struct NCR_ESP *esp, struct ESP_regs *eregs)
esp->snip = 0;
ESPLOG(("esp%d: Failed synchronous negotiation for target %d "
"lun %d\n", esp->esp_id, SCptr->target, SCptr->lun));
- SDptr->sync_max_offset = 0;
- SDptr->sync_min_period = 0;
- SDptr->sync = 1; /* so we don't negotiate again */
+ esp_dev->sync_max_offset = 0;
+ esp_dev->sync_min_period = 0;
+ esp_dev->sync = 1; /* so we don't negotiate again */
/* Run the command again, this time though we
* won't try to negotiate for synchronous transfers.
@@ -2904,16 +2931,16 @@ static int check_singlebyte_msg(struct NCR_ESP *esp,
case MESSAGE_REJECT:
ESPMISC(("msg reject, "));
if(esp->prevmsgout == EXTENDED_MESSAGE) {
- Scsi_Device *SDptr = esp->current_SC->device;
+ struct esp_device *esp_dev = esp->current_SC->device->hostdata;
/* Doesn't look like this target can
* do synchronous or WIDE transfers.
*/
ESPSDTR(("got reject, was trying nego, clearing sync/WIDE\n"));
- SDptr->sync = 1;
- SDptr->wide = 1;
- SDptr->sync_min_period = 0;
- SDptr->sync_max_offset = 0;
+ esp_dev->sync = 1;
+ esp_dev->wide = 1;
+ esp_dev->sync_min_period = 0;
+ esp_dev->sync_max_offset = 0;
return 0;
} else {
ESPMISC(("not sync nego, sending ABORT\n"));
@@ -2929,13 +2956,13 @@ static int check_singlebyte_msg(struct NCR_ESP *esp,
*/
static int target_with_ants_in_pants(struct NCR_ESP *esp,
Scsi_Cmnd *SCptr,
- Scsi_Device *SDptr)
+ struct esp_device *esp_dev)
{
- if(SDptr->sync || SDptr->borken) {
+ if(esp_dev->sync || SCptr->device->borken) {
/* sorry, no can do */
ESPSDTR(("forcing to async, "));
build_sync_nego_msg(esp, 0, 0);
- SDptr->sync = 1;
+ esp_dev->sync = 1;
esp->snip = 1;
ESPLOG(("esp%d: hoping for msgout\n", esp->esp_id));
esp_advance_phase(SCptr, in_the_dark);
@@ -2984,7 +3011,7 @@ static int check_multibyte_msg(struct NCR_ESP *esp,
struct ESP_regs *eregs)
{
Scsi_Cmnd *SCptr = esp->current_SC;
- Scsi_Device *SDptr = SCptr->device;
+ struct esp_device *esp_dev = SCptr->device->hostdata;
unchar regval = 0;
int message_out = 0;
@@ -3000,7 +3027,7 @@ static int check_multibyte_msg(struct NCR_ESP *esp,
/* Target negotiates first! */
ESPSDTR(("target jumps the gun, "));
message_out = EXTENDED_MESSAGE; /* we must respond */
- rval = target_with_ants_in_pants(esp, SCptr, SDptr);
+ rval = target_with_ants_in_pants(esp, SCptr, esp_dev);
if(rval)
return rval;
}
@@ -3045,8 +3072,8 @@ static int check_multibyte_msg(struct NCR_ESP *esp,
if(offset) {
unchar bit;
- SDptr->sync_min_period = (regval & 0x1f);
- SDptr->sync_max_offset = (offset | esp->radelay);
+ esp_dev->sync_min_period = (regval & 0x1f);
+ esp_dev->sync_max_offset = (offset | esp->radelay);
if(esp->erev > esp236) {
if(esp->erev == fas100a)
bit = ESP_CONFIG3_FAST;
@@ -3059,24 +3086,24 @@ static int check_multibyte_msg(struct NCR_ESP *esp,
esp->prev_cfg3 = esp->config3[SCptr->target];
esp_write(eregs->esp_cfg3, esp->prev_cfg3);
}
- esp->prev_soff = SDptr->sync_min_period;
+ esp->prev_soff = esp_dev->sync_min_period;
esp_write(eregs->esp_soff, esp->prev_soff);
- esp->prev_stp = SDptr->sync_max_offset;
+ esp->prev_stp = esp_dev->sync_max_offset;
esp_write(eregs->esp_stp, esp->prev_stp);
ESPSDTR(("soff=%2x stp=%2x cfg3=%2x\n",
- SDptr->sync_max_offset,
- SDptr->sync_min_period,
+ esp_dev->sync_max_offset,
+ esp_dev->sync_min_period,
esp->config3[SCptr->target]));
esp->snip = 0;
- } else if(SDptr->sync_max_offset) {
+ } else if(esp_dev->sync_max_offset) {
unchar bit;
/* back to async mode */
ESPSDTR(("unaccaptable sync nego, forcing async\n"));
- SDptr->sync_max_offset = 0;
- SDptr->sync_min_period = 0;
+ esp_dev->sync_max_offset = 0;
+ esp_dev->sync_min_period = 0;
esp->prev_soff = 0;
esp_write(eregs->esp_soff, 0);
esp->prev_stp = 0;
@@ -3095,7 +3122,7 @@ static int check_multibyte_msg(struct NCR_ESP *esp,
sync_report(esp);
ESPSDTR(("chk multibyte msg: sync is known, "));
- SDptr->sync = 1;
+ esp_dev->sync = 1;
if(message_out) {
ESPLOG(("esp%d: sending sdtr back, hoping for msgout\n",
@@ -3318,7 +3345,7 @@ static int esp_do_msgoutdone(struct NCR_ESP *esp,
default:
if(!fcount(esp, eregs) &&
- !(esp->current_SC->device->sync_max_offset))
+ !(((struct esp_device *)esp->current_SC->device->hostdata)->sync_max_offset))
esp_cmd(esp, eregs, ESP_CMD_FLUSH);
break;
diff --git a/drivers/scsi/NCR53C9x.h b/drivers/scsi/NCR53C9x.h
index e481ad366a09..d8e161840e1a 100644
--- a/drivers/scsi/NCR53C9x.h
+++ b/drivers/scsi/NCR53C9x.h
@@ -283,6 +283,17 @@ enum esp_rev {
espunknown = 0x09
};
+/* We allocate one of these for each scsi device and attach it to
+ * SDptr->hostdata for use in the driver
+ */
+struct esp_device {
+ unsigned char sync_min_period;
+ unsigned char sync_max_offset;
+ unsigned sync:1;
+ unsigned wide:1;
+ unsigned disconnect:1;
+};
+
/* We get one of these for each ESP probed. */
struct NCR_ESP {
struct NCR_ESP *next; /* Next ESP on probed or NULL */
diff --git a/drivers/scsi/aic7xxx_old.c b/drivers/scsi/aic7xxx_old.c
index f2eae55861ec..f06776fd8571 100644
--- a/drivers/scsi/aic7xxx_old.c
+++ b/drivers/scsi/aic7xxx_old.c
@@ -253,7 +253,7 @@
#include <linux/config.h> /* for CONFIG_PCI */
-#define AIC7XXX_C_VERSION "5.2.4"
+#define AIC7XXX_C_VERSION "5.2.5"
#define NUMBER(arr) (sizeof(arr) / sizeof(arr[0]))
#define MIN(a,b) (((a) < (b)) ? (a) : (b))
@@ -274,45 +274,11 @@
# define MMAPIO
#endif
-# if LINUX_VERSION_CODE < KERNEL_VERSION(2,1,95)
-# define cpuid smp_processor_id()
-# define DRIVER_LOCK_INIT \
- spin_lock_init(&p->spin_lock);
-# define DRIVER_LOCK \
- if(!p->cpu_lock_count[cpuid]) { \
- spin_lock_irqsave(&p->spin_lock, cpu_flags); \
- p->cpu_lock_count[cpuid]++; \
- } else { \
- p->cpu_lock_count[cpuid]++; \
- }
-# define DRIVER_UNLOCK \
- if(--p->cpu_lock_count[cpuid] == 0) \
- spin_unlock_irqrestore(&p->spin_lock, cpu_flags);
-# else
-# define DRIVER_LOCK_INIT
-# define DRIVER_LOCK
-# define DRIVER_UNLOCK
-# endif
-
/*
* You can try raising me if tagged queueing is enabled, or lowering
* me if you only have 4 SCBs.
*/
-#ifdef CONFIG_AIC7XXX_OLD_CMDS_PER_DEVICE
-#define AIC7XXX_CMDS_PER_DEVICE CONFIG_AIC7XXX_OLD_CMDS_PER_DEVICE
-#else
#define AIC7XXX_CMDS_PER_DEVICE 32
-#endif
-
-/*
- * Control collection of SCSI transfer statistics for the /proc filesystem.
- *
- * NOTE: Do NOT enable this when running on kernels version 1.2.x and below.
- * NOTE: This does affect performance since it has to maintain statistics.
- */
-#ifdef CONFIG_AIC7XXX_OLD_PROC_STATS
-#define AIC7XXX_PROC_STATS
-#endif
/*
* *** Determining commands per LUN ***
@@ -334,13 +300,8 @@ typedef struct
* Make a define that will tell the driver not to use tagged queueing
* by default.
*/
-#ifdef CONFIG_AIC7XXX_OLD_TCQ_ON_BY_DEFAULT
#define DEFAULT_TAG_COMMANDS {0, 0, 0, 0, 0, 0, 0, 0,\
0, 0, 0, 0, 0, 0, 0, 0}
-#else
-#define DEFAULT_TAG_COMMANDS {255, 255, 255, 255, 255, 255, 255, 255,\
- 255, 255, 255, 255, 255, 255, 255, 255}
-#endif
/*
* Modify this as you see fit for your system. By setting tag_commands
@@ -640,6 +601,11 @@ struct seeprom_config {
#define aic7xxx_mapping(cmd) ((cmd)->SCp.phase)
/*
+ * Get out private data area from a scsi cmd pointer
+ */
+#define AIC_DEV(cmd) ((struct aic_dev_data *)(cmd)->device->hostdata)
+
+/*
* So we can keep track of our host structs
*/
static struct aic7xxx_host *first_aic7xxx = NULL;
@@ -718,7 +684,8 @@ typedef enum {
SCB_MSGOUT_WDTR,
SCB_QUEUED_ABORT = 0x1000,
SCB_QUEUED_FOR_DONE = 0x2000,
- SCB_WAS_BUSY = 0x4000
+ SCB_WAS_BUSY = 0x4000,
+ SCB_QUEUE_FULL = 0x8000
} scb_flag_type;
typedef enum {
@@ -901,20 +868,52 @@ struct target_cmd {
#define AHC_TRANS_USER 0x0008
#define AHC_TRANS_QUITE 0x0010
typedef struct {
- unsigned char cur_width;
- unsigned char goal_width;
- unsigned char cur_period;
- unsigned char goal_period;
- unsigned char cur_offset;
- unsigned char goal_offset;
- unsigned char cur_options;
- unsigned char goal_options;
- unsigned char user_width;
- unsigned char user_period;
- unsigned char user_offset;
- unsigned char user_options;
+ unsigned char width;
+ unsigned char period;
+ unsigned char offset;
+ unsigned char options;
} transinfo_type;
+struct aic_dev_data {
+ volatile scb_queue_type delayed_scbs;
+ volatile unsigned short temp_q_depth;
+ unsigned short max_q_depth;
+ volatile unsigned char active_cmds;
+ unsigned char cmds_sent;
+ /*
+ * Statistics Kept:
+ *
+ * Total Xfers (count for each command that has a data xfer),
+ * broken down by reads && writes.
+ *
+ * Further sorted into a few bins for keeping tabs on how many commands
+ * we get of various sizes.
+ *
+ */
+ long w_total; /* total writes */
+ long r_total; /* total reads */
+ long w_bins[6]; /* binned write */
+ long r_bins[6]; /* binned reads */
+ transinfo_type cur;
+ transinfo_type goal;
+#define BUS_DEVICE_RESET_PENDING 0x01
+#define DEVICE_RESET_DELAY 0x02
+#define DEVICE_PRINT_DTR 0x04
+#define DEVICE_WAS_BUSY 0x08
+#define DEVICE_DTR_SCANNED 0x10
+#define DEVICE_SCSI_3 0x20
+ volatile unsigned char flags;
+ unsigned needppr:1;
+ unsigned needppr_copy:1;
+ unsigned needsdtr:1;
+ unsigned needsdtr_copy:1;
+ unsigned needwdtr:1;
+ unsigned needwdtr_copy:1;
+ unsigned dtr_pending:1;
+ struct scsi_device *SDptr;
+ struct list_head list;
+};
+
/*
* Define a structure used for each host adapter. Note, in order to avoid
* problems with architectures I can't test on (because I don't have one,
@@ -941,10 +940,6 @@ struct aic7xxx_host {
unsigned long isr_count; /* Interrupt count */
unsigned long spurious_int;
scb_data_type *scb_data;
- volatile unsigned short needppr;
- volatile unsigned short needsdtr;
- volatile unsigned short needwdtr;
- volatile unsigned short dtr_pending;
struct aic7xxx_cmd_queue {
Scsi_Cmnd *head;
Scsi_Cmnd *tail;
@@ -954,9 +949,6 @@ struct aic7xxx_host {
* Things read/written on nearly every entry into aic7xxx_queue()
*/
volatile scb_queue_type waiting_scbs;
- unsigned short discenable; /* Targets allowed to disconnect */
- unsigned short tagenable; /* Targets using tagged I/O */
- unsigned short orderedtag; /* Ordered Q tags allowed */
unsigned char unpause; /* unpause value for HCNTRL */
unsigned char pause; /* pause value for HCNTRL */
volatile unsigned char qoutfifonext;
@@ -967,33 +959,13 @@ struct aic7xxx_host {
volatile unsigned char *qoutfifo;
volatile unsigned char *qinfifo;
-#define DEVICE_PRESENT 0x01
-#define BUS_DEVICE_RESET_PENDING 0x02
-#define DEVICE_RESET_DELAY 0x04
-#define DEVICE_PRINT_DTR 0x08
-#define DEVICE_WAS_BUSY 0x10
-#define DEVICE_SCSI_3 0x20
-#define DEVICE_DTR_SCANNED 0x40
- volatile unsigned char dev_flags[MAX_TARGETS];
- volatile unsigned char dev_active_cmds[MAX_TARGETS];
- volatile unsigned short dev_temp_queue_depth[MAX_TARGETS];
- unsigned char dev_commands_sent[MAX_TARGETS];
-
- unsigned int dev_timer_active; /* Which devs have a timer set */
- struct timer_list dev_timer;
- unsigned long dev_expires[MAX_TARGETS];
-
- spinlock_t spin_lock;
- volatile unsigned char cpu_lock_count[NR_CPUS];
-
unsigned char dev_last_queue_full[MAX_TARGETS];
unsigned char dev_last_queue_full_count[MAX_TARGETS];
- unsigned char dev_lun_queue_depth[MAX_TARGETS];
- unsigned short dev_scbs_needed[MAX_TARGETS];
- unsigned short dev_max_queue_depth[MAX_TARGETS];
-
- volatile scb_queue_type delayed_scbs[MAX_TARGETS];
-
+ unsigned short ultraenb; /* Gets downloaded to card as a
+ bitmap */
+ unsigned short discenable; /* Gets downloaded to card as a
+ bitmap */
+ transinfo_type user[MAX_TARGETS];
unsigned char msg_buf[13]; /* The message for the target */
unsigned char msg_type;
@@ -1002,7 +974,6 @@ struct aic7xxx_host {
#define MSG_TYPE_INITIATOR_MSGIN 0x02
unsigned char msg_len; /* Length of message */
unsigned char msg_index; /* Index into msg_buf array */
- transinfo_type transinfo[MAX_TARGETS];
/*
@@ -1018,10 +989,6 @@ struct aic7xxx_host {
int scsi_id_b; /* channel B for twin adapters */
unsigned int bios_address;
int board_name_index;
- unsigned short needppr_copy; /* default config */
- unsigned short needsdtr_copy; /* default config */
- unsigned short needwdtr_copy; /* default config */
- unsigned short ultraenb; /* Ultra mode target list */
unsigned short bios_control; /* bios control - SEEPROM */
unsigned short adapter_control; /* adapter control - SEEPROM */
struct pci_dev *pdev;
@@ -1032,46 +999,12 @@ struct aic7xxx_host {
unsigned short sc_size;
struct aic7xxx_host *next; /* allow for multiple IRQs */
struct Scsi_Host *host; /* pointer to scsi host */
+ struct list_head aic_devs; /* all aic devs on host */
int host_no; /* SCSI host number */
unsigned long mbase; /* I/O memory address */
ahc_chip chip; /* chip type */
ahc_bugs bugs;
dma_addr_t fifo_dma; /* DMA handle for fifo arrays */
- Scsi_Device *Scsi_Dev[MAX_TARGETS][MAX_LUNS];
-
- /*
- * Statistics Kept:
- *
- * Total Xfers (count for each command that has a data xfer),
- * broken down further by reads && writes.
- *
- * Binned sizes, writes && reads:
- * < 512, 512, 1-2K, 2-4K, 4-8K, 8-16K, 16-32K, 32-64K, 64K-128K, > 128K
- *
- * Total amounts read/written above 512 bytes (amts under ignored)
- *
- * NOTE: Enabling this feature is likely to cause a noticeable performance
- * decrease as the accesses into the stats structures blows apart multiple
- * cache lines and is CPU time consuming.
- *
- * NOTE: Since it doesn't really buy us much, but consumes *tons* of RAM
- * and blows apart all sorts of cache lines, I modified this so that we
- * no longer look at the LUN. All LUNs now go into the same bin on each
- * device for stats purposes.
- */
- struct aic7xxx_xferstats {
- long w_total; /* total writes */
- long r_total; /* total reads */
-#ifdef AIC7XXX_PROC_STATS
- long w_bins[8]; /* binned write */
- long r_bins[8]; /* binned reads */
-#endif /* AIC7XXX_PROC_STATS */
- } stats[MAX_TARGETS]; /* [(channel << 3)|target] */
-
-#if 0
- struct target_cmd *targetcmds;
- unsigned int num_targetcmds;
-#endif
};
@@ -1309,14 +1242,6 @@ static int aic7xxx_seltime = 0x10;
#ifdef MODULE
static char * aic7xxx = NULL;
MODULE_PARM(aic7xxx, "s");
-
-/*
- * Just in case someone uses commas to separate items on the insmod
- * command line, we define a dummy buffer here to avoid having insmod
- * write wild stuff into our code segment
- */
-static char dummy_buffer[60] = "Please don't trounce on me insmod!!\n";
-
#endif
#define VERBOSE_NORMAL 0x0000
@@ -1349,6 +1274,13 @@ static int aic7xxx_verbose = VERBOSE_NORMAL | VERBOSE_NEGOTIATION |
*
***************************************************************************/
+static void aic7xxx_set_syncrate(struct aic7xxx_host *p,
+ struct aic7xxx_syncrate *syncrate, int target, int channel,
+ unsigned int period, unsigned int offset, unsigned char options,
+ unsigned int type, struct aic_dev_data *aic_dev);
+static void aic7xxx_set_width(struct aic7xxx_host *p, int target, int channel,
+ int lun, unsigned int width, unsigned int type,
+ struct aic_dev_data *aic_dev);
static void aic7xxx_panic_abort(struct aic7xxx_host *p, Scsi_Cmnd *cmd);
static void aic7xxx_print_card(struct aic7xxx_host *p);
static void aic7xxx_print_scratch_ram(struct aic7xxx_host *p);
@@ -2141,7 +2073,7 @@ aic7xxx_validate_offset(struct aic7xxx_host *p,
static void
aic7xxx_set_syncrate(struct aic7xxx_host *p, struct aic7xxx_syncrate *syncrate,
int target, int channel, unsigned int period, unsigned int offset,
- unsigned char options, unsigned int type)
+ unsigned char options, unsigned int type, struct aic_dev_data *aic_dev)
{
unsigned char tindex;
unsigned short target_mask;
@@ -2158,9 +2090,9 @@ aic7xxx_set_syncrate(struct aic7xxx_host *p, struct aic7xxx_syncrate *syncrate,
offset = 0;
}
- old_period = p->transinfo[tindex].cur_period;
- old_offset = p->transinfo[tindex].cur_offset;
- old_options = p->transinfo[tindex].cur_options;
+ old_period = aic_dev->cur.period;
+ old_offset = aic_dev->cur.offset;
+ old_options = aic_dev->cur.options;
if (type & AHC_TRANS_CUR)
@@ -2223,12 +2155,12 @@ aic7xxx_set_syncrate(struct aic7xxx_host *p, struct aic7xxx_syncrate *syncrate,
aic_outb(p, scsirate, SCSIRATE);
}
aic_outb(p, scsirate, TARG_SCSIRATE + tindex);
- p->transinfo[tindex].cur_period = period;
- p->transinfo[tindex].cur_offset = offset;
- p->transinfo[tindex].cur_options = options;
+ aic_dev->cur.period = period;
+ aic_dev->cur.offset = offset;
+ aic_dev->cur.options = options;
if ( !(type & AHC_TRANS_QUITE) &&
(aic7xxx_verbose & VERBOSE_NEGOTIATION) &&
- (p->dev_flags[tindex] & DEVICE_PRINT_DTR) )
+ (aic_dev->flags & DEVICE_PRINT_DTR) )
{
if (offset)
{
@@ -2243,22 +2175,22 @@ aic7xxx_set_syncrate(struct aic7xxx_host *p, struct aic7xxx_syncrate *syncrate,
printk(INFO_LEAD "Using asynchronous transfers.\n",
p->host_no, channel, target, lun);
}
- p->dev_flags[tindex] &= ~DEVICE_PRINT_DTR;
+ aic_dev->flags &= ~DEVICE_PRINT_DTR;
}
}
if (type & AHC_TRANS_GOAL)
{
- p->transinfo[tindex].goal_period = period;
- p->transinfo[tindex].goal_offset = offset;
- p->transinfo[tindex].goal_options = options;
+ aic_dev->goal.period = period;
+ aic_dev->goal.offset = offset;
+ aic_dev->goal.options = options;
}
if (type & AHC_TRANS_USER)
{
- p->transinfo[tindex].user_period = period;
- p->transinfo[tindex].user_offset = offset;
- p->transinfo[tindex].user_options = options;
+ p->user[tindex].period = period;
+ p->user[tindex].offset = offset;
+ p->user[tindex].options = options;
}
}
@@ -2271,7 +2203,7 @@ aic7xxx_set_syncrate(struct aic7xxx_host *p, struct aic7xxx_syncrate *syncrate,
*-F*************************************************************************/
static void
aic7xxx_set_width(struct aic7xxx_host *p, int target, int channel, int lun,
- unsigned int width, unsigned int type)
+ unsigned int width, unsigned int type, struct aic_dev_data *aic_dev)
{
unsigned char tindex;
unsigned short target_mask;
@@ -2280,7 +2212,7 @@ aic7xxx_set_width(struct aic7xxx_host *p, int target, int channel, int lun,
tindex = target | (channel << 3);
target_mask = 1 << tindex;
- old_width = p->transinfo[tindex].cur_width;
+ old_width = aic_dev->cur.width;
if (type & AHC_TRANS_CUR)
{
@@ -2297,11 +2229,11 @@ aic7xxx_set_width(struct aic7xxx_host *p, int target, int channel, int lun,
if (type & AHC_TRANS_ACTIVE)
aic_outb(p, scsirate, SCSIRATE);
- p->transinfo[tindex].cur_width = width;
+ aic_dev->cur.width = width;
if ( !(type & AHC_TRANS_QUITE) &&
(aic7xxx_verbose & VERBOSE_NEGOTIATION2) &&
- (p->dev_flags[tindex] & DEVICE_PRINT_DTR) )
+ (aic_dev->flags & DEVICE_PRINT_DTR) )
{
printk(INFO_LEAD "Using %s transfers\n", p->host_no, channel, target,
lun, (scsirate & WIDEXFER) ? "Wide(16bit)" : "Narrow(8bit)" );
@@ -2309,23 +2241,23 @@ aic7xxx_set_width(struct aic7xxx_host *p, int target, int channel, int lun,
}
if (type & AHC_TRANS_GOAL)
- p->transinfo[tindex].goal_width = width;
+ aic_dev->goal.width = width;
if (type & AHC_TRANS_USER)
- p->transinfo[tindex].user_width = width;
+ p->user[tindex].width = width;
- if (p->transinfo[tindex].goal_offset)
+ if (aic_dev->goal.offset)
{
if (p->features & AHC_ULTRA2)
{
- p->transinfo[tindex].goal_offset = MAX_OFFSET_ULTRA2;
+ aic_dev->goal.offset = MAX_OFFSET_ULTRA2;
}
else if (width == MSG_EXT_WDTR_BUS_16_BIT)
{
- p->transinfo[tindex].goal_offset = MAX_OFFSET_16BIT;
+ aic_dev->goal.offset = MAX_OFFSET_16BIT;
}
else
{
- p->transinfo[tindex].goal_offset = MAX_OFFSET_8BIT;
+ aic_dev->goal.offset = MAX_OFFSET_8BIT;
}
}
}
@@ -2356,16 +2288,10 @@ scbq_init(volatile scb_queue_type *queue)
static inline void
scbq_insert_head(volatile scb_queue_type *queue, struct aic7xxx_scb *scb)
{
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,1,95)
- unsigned long cpu_flags;
-#endif
-
- DRIVER_LOCK
scb->q_next = queue->head;
queue->head = scb;
if (queue->tail == NULL) /* If list was empty, update tail. */
queue->tail = queue->head;
- DRIVER_UNLOCK
}
/*+F*************************************************************************
@@ -2380,17 +2306,12 @@ static inline struct aic7xxx_scb *
scbq_remove_head(volatile scb_queue_type *queue)
{
struct aic7xxx_scb * scbp;
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,1,95)
- unsigned long cpu_flags;
-#endif
- DRIVER_LOCK
scbp = queue->head;
if (queue->head != NULL)
queue->head = queue->head->q_next;
if (queue->head == NULL) /* If list is now empty, update tail. */
queue->tail = NULL;
- DRIVER_UNLOCK
return(scbp);
}
@@ -2405,11 +2326,6 @@ scbq_remove_head(volatile scb_queue_type *queue)
static inline void
scbq_remove(volatile scb_queue_type *queue, struct aic7xxx_scb *scb)
{
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,1,95)
- unsigned long cpu_flags;
-#endif
-
- DRIVER_LOCK
if (queue->head == scb)
{
/* At beginning of queue, remove from head. */
@@ -2438,7 +2354,6 @@ scbq_remove(volatile scb_queue_type *queue, struct aic7xxx_scb *scb)
}
}
}
- DRIVER_UNLOCK
}
/*+F*************************************************************************
@@ -2452,18 +2367,12 @@ scbq_remove(volatile scb_queue_type *queue, struct aic7xxx_scb *scb)
static inline void
scbq_insert_tail(volatile scb_queue_type *queue, struct aic7xxx_scb *scb)
{
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,1,95)
- unsigned long cpu_flags;
-#endif
-
- DRIVER_LOCK
scb->q_next = NULL;
if (queue->tail != NULL) /* Add the scb at the end of the list. */
queue->tail->q_next = scb;
queue->tail = scb; /* Update the tail. */
if (queue->head == NULL) /* If list was empty, update head. */
queue->head = queue->tail;
- DRIVER_UNLOCK
}
/*+F*************************************************************************
@@ -2743,11 +2652,7 @@ static void
aic7xxx_done_cmds_complete(struct aic7xxx_host *p)
{
Scsi_Cmnd *cmd;
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,1,95)
- unsigned int cpu_flags = 0;
-#endif
- DRIVER_LOCK
while (p->completeq.head != NULL)
{
cmd = p->completeq.head;
@@ -2755,7 +2660,6 @@ aic7xxx_done_cmds_complete(struct aic7xxx_host *p)
cmd->host_scribble = NULL;
cmd->scsi_done(cmd);
}
- DRIVER_UNLOCK
}
/*+F*************************************************************************
@@ -2792,6 +2696,7 @@ static void
aic7xxx_done(struct aic7xxx_host *p, struct aic7xxx_scb *scb)
{
Scsi_Cmnd *cmd = scb->cmd;
+ struct aic_dev_data *aic_dev = cmd->device->hostdata;
int tindex = TARGET_INDEX(cmd);
struct aic7xxx_scb *scbp;
unsigned char queue_depth;
@@ -2846,7 +2751,7 @@ aic7xxx_done(struct aic7xxx_host *p, struct aic7xxx_scb *scb)
if (message_error)
{
if ( (aic7xxx_verbose & VERBOSE_NEGOTIATION2) &&
- (p->dev_flags[tindex] & DEVICE_PRINT_DTR) )
+ (aic_dev->flags & DEVICE_PRINT_DTR) )
{
printk(INFO_LEAD "Device failed to complete Wide Negotiation "
"processing and\n", p->host_no, CTL_OF_SCB(scb));
@@ -2855,8 +2760,7 @@ aic7xxx_done(struct aic7xxx_host *p, struct aic7xxx_scb *scb)
printk(INFO_LEAD "Wide negotiation to this device.\n", p->host_no,
CTL_OF_SCB(scb));
}
- p->needwdtr &= ~mask;
- p->needwdtr_copy &= ~mask;
+ aic_dev->needwdtr = aic_dev->needwdtr_copy = 0;
}
}
if (scb->flags & SCB_MSGOUT_SDTR)
@@ -2864,7 +2768,7 @@ aic7xxx_done(struct aic7xxx_host *p, struct aic7xxx_scb *scb)
if (message_error)
{
if ( (aic7xxx_verbose & VERBOSE_NEGOTIATION2) &&
- (p->dev_flags[tindex] & DEVICE_PRINT_DTR) )
+ (aic_dev->flags & DEVICE_PRINT_DTR) )
{
printk(INFO_LEAD "Device failed to complete Sync Negotiation "
"processing and\n", p->host_no, CTL_OF_SCB(scb));
@@ -2872,10 +2776,9 @@ aic7xxx_done(struct aic7xxx_host *p, struct aic7xxx_scb *scb)
"disabling future\n", p->host_no, CTL_OF_SCB(scb));
printk(INFO_LEAD "Sync negotiation to this device.\n", p->host_no,
CTL_OF_SCB(scb));
- p->dev_flags[tindex] &= ~DEVICE_PRINT_DTR;
+ aic_dev->flags &= ~DEVICE_PRINT_DTR;
}
- p->needsdtr &= ~mask;
- p->needsdtr_copy &= ~mask;
+ aic_dev->needsdtr = aic_dev->needsdtr_copy = 0;
}
}
if (scb->flags & SCB_MSGOUT_PPR)
@@ -2883,7 +2786,7 @@ aic7xxx_done(struct aic7xxx_host *p, struct aic7xxx_scb *scb)
if(message_error)
{
if ( (aic7xxx_verbose & VERBOSE_NEGOTIATION2) &&
- (p->dev_flags[tindex] & DEVICE_PRINT_DTR) )
+ (aic_dev->flags & DEVICE_PRINT_DTR) )
{
printk(INFO_LEAD "Device failed to complete Parallel Protocol "
"Request processing and\n", p->host_no, CTL_OF_SCB(scb));
@@ -2895,20 +2798,17 @@ aic7xxx_done(struct aic7xxx_host *p, struct aic7xxx_scb *scb)
/*
* Disable PPR negotiation and revert back to WDTR and SDTR setup
*/
- p->needppr &= ~mask;
- p->needppr_copy &= ~mask;
- p->needsdtr |= mask;
- p->needsdtr_copy |= mask;
- p->needwdtr |= mask;
- p->needwdtr_copy |= mask;
+ aic_dev->needppr = aic_dev->needppr_copy = 0;
+ aic_dev->needsdtr = aic_dev->needsdtr_copy = 1;
+ aic_dev->needwdtr = aic_dev->needwdtr_copy = 1;
}
}
}
- queue_depth = p->dev_temp_queue_depth[tindex];
- if (queue_depth >= p->dev_active_cmds[tindex])
+ queue_depth = aic_dev->temp_q_depth;
+ if (queue_depth >= aic_dev->active_cmds)
{
- scbp = scbq_remove_head(&p->delayed_scbs[tindex]);
+ scbp = scbq_remove_head(&aic_dev->delayed_scbs);
if (scbp)
{
if (queue_depth == 1)
@@ -2930,9 +2830,9 @@ aic7xxx_done(struct aic7xxx_host *p, struct aic7xxx_scb *scb)
printk(INFO_LEAD "Moving SCB from delayed to waiting queue.\n",
p->host_no, CTL_OF_SCB(scbp));
#endif
- if (queue_depth > p->dev_active_cmds[tindex])
+ if (queue_depth > aic_dev->active_cmds)
{
- scbp = scbq_remove_head(&p->delayed_scbs[tindex]);
+ scbp = scbq_remove_head(&aic_dev->delayed_scbs);
if (scbp)
scbq_insert_tail(&p->waiting_scbs, scbp);
}
@@ -2942,92 +2842,46 @@ aic7xxx_done(struct aic7xxx_host *p, struct aic7xxx_scb *scb)
{
aic7xxx_index_busy_target(p, scb->hscb->target_channel_lun,
/* unbusy */ TRUE);
- if (p->tagenable & (1<<tindex))
+ if (cmd->device->simple_tags)
{
- p->dev_temp_queue_depth[tindex] = p->dev_max_queue_depth[tindex];
+ aic_dev->temp_q_depth = aic_dev->max_q_depth;
}
}
if(scb->flags & SCB_DTR_SCB)
{
- p->dtr_pending &= ~(1 << tindex);
+ aic_dev->dtr_pending = 0;
}
- p->dev_active_cmds[tindex]--;
+ aic_dev->active_cmds--;
p->activescbs--;
+ if ((scb->sg_length >= 512) && (((cmd->result >> 16) & 0xf) == DID_OK))
{
- int actual;
+ long *ptr;
+ int x, i;
- /*
- * XXX: we should actually know how much actually transferred
- * XXX: for each command, but apparently that's too difficult.
- *
- * We set a lower limit of 512 bytes on the transfer length. We
- * ignore anything less than this because we don't have a real
- * reason to count it. Read/Writes to tapes are usually about 20K
- * and disks are a minimum of 512 bytes unless you want to count
- * non-read/write commands (such as TEST_UNIT_READY) which we don't
- */
- actual = scb->sg_length;
- if ((actual >= 512) && (((cmd->result >> 16) & 0xf) == DID_OK))
- {
- struct aic7xxx_xferstats *sp;
-#ifdef AIC7XXX_PROC_STATS
- long *ptr;
- int x;
-#endif /* AIC7XXX_PROC_STATS */
-
- sp = &p->stats[TARGET_INDEX(cmd)];
- /*
- * For block devices, cmd->request.cmd is always == either READ or
- * WRITE. For character devices, this isn't always set properly, so
- * we check data_cmnd[0]. This catches the conditions for st.c, but
- * I'm still not sure if request.cmd is valid for sg devices.
- */
- if ( (rq_data_dir(cmd->request) == WRITE) || (cmd->data_cmnd[0] == WRITE_6) ||
- (cmd->data_cmnd[0] == WRITE_FILEMARKS) )
- {
- sp->w_total++;
-#ifdef AIC7XXX_VERBOSE_DEBUGGING
- if ( (sp->w_total > 16) && (aic7xxx_verbose > 0xffff) )
- aic7xxx_verbose &= 0xffff;
-#endif
-#ifdef AIC7XXX_PROC_STATS
- ptr = sp->w_bins;
-#endif /* AIC7XXX_PROC_STATS */
- }
- else
- {
- sp->r_total++;
-#ifdef AIC7XXX_VERBOSE_DEBUGGING
- if ( (sp->r_total > 16) && (aic7xxx_verbose > 0xffff) )
- aic7xxx_verbose &= 0xffff;
-#endif
-#ifdef AIC7XXX_PROC_STATS
- ptr = sp->r_bins;
-#endif /* AIC7XXX_PROC_STATS */
- }
-#ifdef AIC7XXX_PROC_STATS
- x = -11;
- while(actual)
- {
- actual >>= 1;
- x++;
- }
- if (x < 0)
- {
- ptr[0]++;
- }
- else if (x > 7)
- {
- ptr[7]++;
- }
- else
- {
- ptr[x]++;
+ if (rq_data_dir(cmd->request) == WRITE)
+ {
+ aic_dev->w_total++;
+ ptr = aic_dev->w_bins;
+ }
+ else
+ {
+ aic_dev->r_total++;
+ ptr = aic_dev->r_bins;
+ }
+ x = scb->sg_length;
+ x >>= 10;
+ for(i=0; i<6; i++)
+ {
+ x >>= 2;
+ if(!x) {
+ ptr[i]++;
+ break;
}
-#endif /* AIC7XXX_PROC_STATS */
}
+ if(i == 6 && x)
+ ptr[5]++;
}
aic7xxx_free_scb(p, scb);
aic7xxx_queue_cmd_complete(p, cmd);
@@ -3054,18 +2908,25 @@ aic7xxx_run_done_queue(struct aic7xxx_host *p, /*complete*/ int complete)
scb = p->scb_data->scb_array[i];
if (scb->flags & SCB_QUEUED_FOR_DONE)
{
- if (aic7xxx_verbose & (VERBOSE_ABORT_PROCESS | VERBOSE_RESET_PROCESS))
- printk(INFO_LEAD "Aborting scb %d\n",
- p->host_no, CTL_OF_SCB(scb), scb->hscb->tag);
+ if (scb->flags & SCB_QUEUE_FULL)
+ {
+ scb->cmd->result = QUEUE_FULL << 1;
+ }
+ else
+ {
+ if (aic7xxx_verbose & (VERBOSE_ABORT_PROCESS | VERBOSE_RESET_PROCESS))
+ printk(INFO_LEAD "Aborting scb %d\n",
+ p->host_no, CTL_OF_SCB(scb), scb->hscb->tag);
+ /*
+ * Clear any residual information since the normal aic7xxx_done() path
+ * doesn't touch the residuals.
+ */
+ scb->hscb->residual_SG_segment_count = 0;
+ scb->hscb->residual_data_count[0] = 0;
+ scb->hscb->residual_data_count[1] = 0;
+ scb->hscb->residual_data_count[2] = 0;
+ }
found++;
- /*
- * Clear any residual information since the normal aic7xxx_done() path
- * doesn't touch the residuals.
- */
- scb->hscb->residual_SG_segment_count = 0;
- scb->hscb->residual_data_count[0] = 0;
- scb->hscb->residual_data_count[1] = 0;
- scb->hscb->residual_data_count[2] = 0;
aic7xxx_done(p, scb);
}
}
@@ -3166,12 +3027,12 @@ aic7xxx_search_qinfifo(struct aic7xxx_host *p, int target, int channel,
{
scbq_remove(queue, scbp);
scbq_remove(&p->waiting_scbs, scbp);
- scbq_remove(&p->delayed_scbs[TARGET_INDEX(scbp->cmd)], scbp);
- p->dev_active_cmds[TARGET_INDEX(scbp->cmd)]++;
+ scbq_remove(&AIC_DEV(scbp->cmd)->delayed_scbs, scbp);
+ AIC_DEV(scbp->cmd)->active_cmds++;
p->activescbs++;
}
scbq_insert_tail(queue, scbp);
- p->dev_active_cmds[TARGET_INDEX(scbp->cmd)]--;
+ AIC_DEV(scbp->cmd)->active_cmds--;
p->activescbs--;
scbp->flags |= SCB_WAITINGQ;
if ( !(scbp->tag_action & TAG_ENB) )
@@ -3268,9 +3129,12 @@ static void
aic7xxx_reset_device(struct aic7xxx_host *p, int target, int channel,
int lun, unsigned char tag)
{
- struct aic7xxx_scb *scbp;
+ struct aic7xxx_scb *scbp, *prev_scbp;
+ struct scsi_device *sd;
unsigned char active_scb, tcl;
- int i = 0, j, init_lists = FALSE;
+ int i = 0, init_lists = FALSE;
+ struct list_head *list_item, *list_head;
+ struct aic_dev_data *aic_dev;
/*
* Restore this when we're done
@@ -3297,107 +3161,48 @@ aic7xxx_reset_device(struct aic7xxx_host *p, int target, int channel,
/*
* Deal with the busy target and linked next issues.
*/
+ list_head = &p->aic_devs;
+ list_for_each(list_item, list_head)
{
- int min_target, max_target;
- struct aic7xxx_scb *scbp, *prev_scbp;
+ aic_dev = list_entry(list_item, struct aic_dev_data, list);
+ sd = aic_dev->SDptr;
- /* Make all targets 'relative' to bus A. */
- if (target == ALL_TARGETS)
- {
- switch (channel)
- {
- case 0:
- min_target = 0;
- max_target = (p->features & AHC_WIDE) ? 15 : 7;
- break;
- case 1:
- min_target = 8;
- max_target = 15;
- break;
- case ALL_CHANNELS:
- default:
- min_target = 0;
- max_target = (p->features & (AHC_TWIN|AHC_WIDE)) ? 15 : 7;
- break;
- }
- }
- else
- {
- min_target = target | (channel << 3);
- max_target = min_target;
- }
-
-
- for (i = min_target; i <= max_target; i++)
+ if((target != ALL_TARGETS && target != sd->id) ||
+ (channel != ALL_CHANNELS && channel != sd->channel))
+ continue;
+ if (aic7xxx_verbose & (VERBOSE_ABORT_PROCESS | VERBOSE_RESET_PROCESS))
+ printk(INFO_LEAD "Cleaning up status information "
+ "and delayed_scbs.\n", p->host_no, sd->channel, sd->id, sd->lun);
+ aic_dev->flags &= ~BUS_DEVICE_RESET_PENDING;
+ if ( tag == SCB_LIST_NULL )
+ {
+ aic_dev->dtr_pending = 0;
+ aic_dev->needppr = aic_dev->needppr_copy;
+ aic_dev->needsdtr = aic_dev->needsdtr_copy;
+ aic_dev->needwdtr = aic_dev->needwdtr_copy;
+ aic_dev->flags = DEVICE_PRINT_DTR;
+ aic_dev->temp_q_depth = aic_dev->max_q_depth;
+ }
+ tcl = (sd->id << 4) | (sd->channel << 3) | sd->lun;
+ if ( (aic7xxx_index_busy_target(p, tcl, FALSE) == tag) ||
+ (tag == SCB_LIST_NULL) )
+ aic7xxx_index_busy_target(p, tcl, /* unbusy */ TRUE);
+ prev_scbp = NULL;
+ scbp = aic_dev->delayed_scbs.head;
+ while (scbp != NULL)
{
- if ( i == p->scsi_id )
+ prev_scbp = scbp;
+ scbp = scbp->q_next;
+ if (aic7xxx_match_scb(p, prev_scbp, target, channel, lun, tag))
{
- continue;
- }
- if (aic7xxx_verbose & (VERBOSE_ABORT_PROCESS | VERBOSE_RESET_PROCESS))
- printk(INFO_LEAD "Cleaning up status information "
- "and delayed_scbs.\n", p->host_no, channel, i, lun);
- p->dev_flags[i] &= ~BUS_DEVICE_RESET_PENDING;
- if ( tag == SCB_LIST_NULL )
- {
- p->dev_flags[i] |= DEVICE_PRINT_DTR | DEVICE_RESET_DELAY;
- p->dev_expires[i] = jiffies + (1 * HZ);
- p->dev_timer_active |= (0x01 << i);
- p->dev_last_queue_full_count[i] = 0;
- p->dev_last_queue_full[i] = 0;
- p->dev_temp_queue_depth[i] =
- p->dev_max_queue_depth[i];
- }
- for(j=0; j<MAX_LUNS; j++)
- {
- if (channel == 1)
- tcl = ((i << 4) & 0x70) | (channel << 3) | j;
- else
- tcl = (i << 4) | (channel << 3) | j;
- if ( (aic7xxx_index_busy_target(p, tcl, FALSE) == tag) ||
- (tag == SCB_LIST_NULL) )
- aic7xxx_index_busy_target(p, tcl, /* unbusy */ TRUE);
- }
- j = 0;
- prev_scbp = NULL;
- scbp = p->delayed_scbs[i].head;
- while ( (scbp != NULL) && (j++ <= (p->scb_data->numscbs + 1)) )
- {
- prev_scbp = scbp;
- scbp = scbp->q_next;
- if ( prev_scbp == scbp )
- {
- if (aic7xxx_verbose & (VERBOSE_ABORT | VERBOSE_RESET))
- printk(WARN_LEAD "Yikes!! scb->q_next == scb "
- "in the delayed_scbs queue!\n", p->host_no, channel, i, lun);
- scbp = NULL;
- prev_scbp->q_next = NULL;
- p->delayed_scbs[i].tail = prev_scbp;
- }
- if (aic7xxx_match_scb(p, prev_scbp, target, channel, lun, tag))
+ scbq_remove(&aic_dev->delayed_scbs, prev_scbp);
+ if (prev_scbp->flags & SCB_WAITINGQ)
{
- scbq_remove(&p->delayed_scbs[i], prev_scbp);
- if (prev_scbp->flags & SCB_WAITINGQ)
- {
- p->dev_active_cmds[i]++;
- p->activescbs++;
- }
- prev_scbp->flags &= ~(SCB_ACTIVE | SCB_WAITINGQ);
- prev_scbp->flags |= SCB_RESET | SCB_QUEUED_FOR_DONE;
+ aic_dev->active_cmds++;
+ p->activescbs++;
}
- }
- if ( j > (p->scb_data->numscbs + 1) )
- {
- if (aic7xxx_verbose & (VERBOSE_ABORT | VERBOSE_RESET))
- printk(WARN_LEAD "Yikes!! There's a loop in the "
- "delayed_scbs queue!\n", p->host_no, channel, i, lun);
- scbq_init(&p->delayed_scbs[i]);
- }
- if ( !(p->dev_timer_active & (0x01 << MAX_TARGETS)) ||
- time_after_eq(p->dev_timer.expires, p->dev_expires[i]) )
- {
- mod_timer(&p->dev_timer, p->dev_expires[i]);
- p->dev_timer_active |= (0x01 << MAX_TARGETS);
+ prev_scbp->flags &= ~(SCB_ACTIVE | SCB_WAITINGQ);
+ prev_scbp->flags |= SCB_RESET | SCB_QUEUED_FOR_DONE;
}
}
}
@@ -3417,41 +3222,24 @@ aic7xxx_reset_device(struct aic7xxx_host *p, int target, int channel,
{
struct aic7xxx_scb *scbp, *prev_scbp;
- j = 0;
prev_scbp = NULL;
scbp = p->waiting_scbs.head;
- while ( (scbp != NULL) && (j++ <= (p->scb_data->numscbs + 1)) )
+ while (scbp != NULL)
{
prev_scbp = scbp;
scbp = scbp->q_next;
- if ( prev_scbp == scbp )
- {
- if (aic7xxx_verbose & (VERBOSE_ABORT | VERBOSE_RESET))
- printk(WARN_LEAD "Yikes!! scb->q_next == scb "
- "in the waiting_scbs queue!\n", p->host_no, CTL_OF_SCB(scbp));
- scbp = NULL;
- prev_scbp->q_next = NULL;
- p->waiting_scbs.tail = prev_scbp;
- }
if (aic7xxx_match_scb(p, prev_scbp, target, channel, lun, tag))
{
scbq_remove(&p->waiting_scbs, prev_scbp);
if (prev_scbp->flags & SCB_WAITINGQ)
{
- p->dev_active_cmds[TARGET_INDEX(prev_scbp->cmd)]++;
+ AIC_DEV(prev_scbp->cmd)->active_cmds++;
p->activescbs++;
}
prev_scbp->flags &= ~(SCB_ACTIVE | SCB_WAITINGQ);
prev_scbp->flags |= SCB_RESET | SCB_QUEUED_FOR_DONE;
}
}
- if ( j > (p->scb_data->numscbs + 1) )
- {
- if (aic7xxx_verbose & (VERBOSE_ABORT | VERBOSE_RESET))
- printk(WARN_LEAD "Yikes!! There's a loop in the "
- "waiting_scbs queue!\n", p->host_no, channel, target, lun);
- scbq_init(&p->waiting_scbs);
- }
}
@@ -3466,8 +3254,7 @@ aic7xxx_reset_device(struct aic7xxx_host *p, int target, int channel,
next = aic_inb(p, WAITING_SCBH); /* Start at head of list. */
prev = SCB_LIST_NULL;
- j = 0;
- while ( (next != SCB_LIST_NULL) && (j++ <= (p->scb_data->maxscbs + 1)) )
+ while (next != SCB_LIST_NULL)
{
aic_outb(p, next, SCBPTR);
scb_index = aic_inb(p, SCB_TAG);
@@ -3491,7 +3278,7 @@ aic7xxx_reset_device(struct aic7xxx_host *p, int target, int channel,
next = aic7xxx_abort_waiting_scb(p, scbp, next, prev);
if (scbp->flags & SCB_WAITINGQ)
{
- p->dev_active_cmds[TARGET_INDEX(scbp->cmd)]++;
+ AIC_DEV(scbp->cmd)->active_cmds++;
p->activescbs++;
}
scbp->flags &= ~(SCB_ACTIVE | SCB_WAITINGQ);
@@ -3515,12 +3302,6 @@ aic7xxx_reset_device(struct aic7xxx_host *p, int target, int channel,
}
}
}
- if ( j > (p->scb_data->maxscbs + 1) )
- {
- printk(WARN_LEAD "Yikes!! There is a loop in the waiting for "
- "selection list!\n", p->host_no, channel, target, lun);
- init_lists = TRUE;
- }
}
/*
@@ -3536,8 +3317,7 @@ aic7xxx_reset_device(struct aic7xxx_host *p, int target, int channel,
next = aic_inb(p, DISCONNECTED_SCBH);
prev = SCB_LIST_NULL;
- j = 0;
- while ( (next != SCB_LIST_NULL) && (j++ <= (p->scb_data->maxscbs + 1)) )
+ while (next != SCB_LIST_NULL)
{
aic_outb(p, next, SCBPTR);
scb_index = aic_inb(p, SCB_TAG);
@@ -3556,7 +3336,7 @@ aic7xxx_reset_device(struct aic7xxx_host *p, int target, int channel,
next = aic7xxx_rem_scb_from_disc_list(p, next, prev);
if (scbp->flags & SCB_WAITINGQ)
{
- p->dev_active_cmds[TARGET_INDEX(scbp->cmd)]++;
+ AIC_DEV(scbp->cmd)->active_cmds++;
p->activescbs++;
}
scbp->flags &= ~(SCB_ACTIVE | SCB_WAITINGQ);
@@ -3570,12 +3350,6 @@ aic7xxx_reset_device(struct aic7xxx_host *p, int target, int channel,
}
}
}
- if ( j > (p->scb_data->maxscbs + 1) )
- {
- printk(WARN_LEAD "Yikes!! There is a loop in the disconnected list!\n",
- p->host_no, channel, target, lun);
- init_lists = TRUE;
- }
}
/*
@@ -3586,16 +3360,8 @@ aic7xxx_reset_device(struct aic7xxx_host *p, int target, int channel,
{
unsigned char next;
- j = 0;
next = aic_inb(p, FREE_SCBH);
- if ( (next >= p->scb_data->maxhscbs) && (next != SCB_LIST_NULL) )
- {
- printk(WARN_LEAD "Bogus FREE_SCBH!.\n", p->host_no, channel,
- target, lun);
- init_lists = TRUE;
- next = SCB_LIST_NULL;
- }
- while ( (next != SCB_LIST_NULL) && (j++ <= (p->scb_data->maxscbs + 1)) )
+ while (next != SCB_LIST_NULL)
{
aic_outb(p, next, SCBPTR);
if (aic_inb(p, SCB_TAG) < p->scb_data->numscbs)
@@ -3612,12 +3378,6 @@ aic7xxx_reset_device(struct aic7xxx_host *p, int target, int channel,
next = aic_inb(p, SCB_NEXT);
}
}
- if ( j > (p->scb_data->maxscbs + 1) )
- {
- printk(WARN_LEAD "Yikes!! There is a loop in the free list!\n",
- p->host_no, channel, target, lun);
- init_lists = TRUE;
- }
}
/*
@@ -3678,8 +3438,8 @@ aic7xxx_reset_device(struct aic7xxx_host *p, int target, int channel,
if (scbp->flags & SCB_WAITINGQ)
{
scbq_remove(&p->waiting_scbs, scbp);
- scbq_remove(&p->delayed_scbs[TARGET_INDEX(scbp->cmd)], scbp);
- p->dev_active_cmds[TARGET_INDEX(scbp->cmd)]++;
+ scbq_remove(&AIC_DEV(scbp->cmd)->delayed_scbs, scbp);
+ AIC_DEV(scbp->cmd)->active_cmds++;
p->activescbs++;
}
scbp->flags |= SCB_RESET | SCB_QUEUED_FOR_DONE;
@@ -3774,8 +3534,6 @@ aic7xxx_reset_channel(struct aic7xxx_host *p, int channel, int initiate_reset)
if (channel == 1)
{
- p->needsdtr |= (p->needsdtr_copy & 0xFF00);
- p->dtr_pending &= 0x00FF;
offset_min = 8;
offset_max = 16;
}
@@ -3784,17 +3542,11 @@ aic7xxx_reset_channel(struct aic7xxx_host *p, int channel, int initiate_reset)
if (p->features & AHC_TWIN)
{
/* Channel A */
- p->needsdtr |= (p->needsdtr_copy & 0x00FF);
- p->dtr_pending &= 0xFF00;
offset_min = 0;
offset_max = 8;
}
else
{
- p->needppr = p->needppr_copy;
- p->needsdtr = p->needsdtr_copy;
- p->needwdtr = p->needwdtr_copy;
- p->dtr_pending = 0x0;
offset_min = 0;
if (p->features & AHC_WIDE)
{
@@ -3897,11 +3649,8 @@ static void
aic7xxx_run_waiting_queues(struct aic7xxx_host *p)
{
struct aic7xxx_scb *scb;
- int tindex;
+ struct aic_dev_data *aic_dev;
int sent;
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,1,95)
- unsigned long cpu_flags = 0;
-#endif
if (p->waiting_scbs.head == NULL)
@@ -3912,25 +3661,21 @@ aic7xxx_run_waiting_queues(struct aic7xxx_host *p)
/*
* First handle SCBs that are waiting but have been assigned a slot.
*/
- DRIVER_LOCK
while ((scb = scbq_remove_head(&p->waiting_scbs)) != NULL)
{
- tindex = TARGET_INDEX(scb->cmd);
- if ( !scb->tag_action && (p->tagenable & (1<<tindex)) )
+ aic_dev = scb->cmd->device->hostdata;
+ if ( !scb->tag_action )
{
- p->dev_temp_queue_depth[tindex] = 1;
+ aic_dev->temp_q_depth = 1;
}
- if ( (p->dev_active_cmds[tindex] >=
- p->dev_temp_queue_depth[tindex]) ||
- (p->dev_flags[tindex] & (DEVICE_RESET_DELAY|DEVICE_WAS_BUSY)) ||
- (p->flags & AHC_RESET_DELAY) )
+ if ( aic_dev->active_cmds >= aic_dev->temp_q_depth)
{
- scbq_insert_tail(&p->delayed_scbs[tindex], scb);
+ scbq_insert_tail(&aic_dev->delayed_scbs, scb);
}
else
{
scb->flags &= ~SCB_WAITINGQ;
- p->dev_active_cmds[tindex]++;
+ aic_dev->active_cmds++;
p->activescbs++;
if ( !(scb->tag_action) )
{
@@ -3953,7 +3698,6 @@ aic7xxx_run_waiting_queues(struct aic7xxx_host *p)
if (p->activescbs > p->max_activescbs)
p->max_activescbs = p->activescbs;
}
- DRIVER_UNLOCK
}
#ifdef CONFIG_PCI
@@ -4014,81 +3758,6 @@ aic7xxx_pci_intr(struct aic7xxx_host *p)
/*+F*************************************************************************
* Function:
- * aic7xxx_timer
- *
- * Description:
- * Take expired extries off of delayed queues and place on waiting queue
- * then run waiting queue to start commands.
- ***************************************************************************/
-static void
-aic7xxx_timer(struct aic7xxx_host *p)
-{
- int i, j;
- unsigned long cpu_flags = 0;
- struct aic7xxx_scb *scb;
-
- spin_lock_irqsave(p->host->host_lock, cpu_flags);
- p->dev_timer_active &= ~(0x01 << MAX_TARGETS);
- if ( (p->dev_timer_active & (0x01 << p->scsi_id)) &&
- time_after_eq(jiffies, p->dev_expires[p->scsi_id]) )
- {
- p->flags &= ~AHC_RESET_DELAY;
- p->dev_timer_active &= ~(0x01 << p->scsi_id);
- }
- for(i=0; i<MAX_TARGETS; i++)
- {
- if ( (i != p->scsi_id) &&
- (p->dev_timer_active & (0x01 << i)) &&
- time_after_eq(jiffies, p->dev_expires[i]) )
- {
- p->dev_timer_active &= ~(0x01 << i);
- p->dev_flags[i] &= ~(DEVICE_RESET_DELAY|DEVICE_WAS_BUSY);
- p->dev_temp_queue_depth[i] = p->dev_max_queue_depth[i];
- j = 0;
- while ( ((scb = scbq_remove_head(&p->delayed_scbs[i])) != NULL) &&
- (j++ < p->scb_data->numscbs) )
- {
- scbq_insert_tail(&p->waiting_scbs, scb);
- }
- if (j == p->scb_data->numscbs)
- {
- printk(INFO_LEAD "timer: Yikes, loop in delayed_scbs list.\n",
- p->host_no, 0, i, -1);
- scbq_init(&p->delayed_scbs[i]);
- scbq_init(&p->waiting_scbs);
- /*
- * Well, things are screwed now, wait for a reset to clean the junk
- * out.
- */
- }
- }
- else if ( p->dev_timer_active & (0x01 << i) )
- {
- if ( p->dev_timer_active & (0x01 << MAX_TARGETS) )
- {
- if ( time_after_eq(p->dev_timer.expires, p->dev_expires[i]) )
- {
- p->dev_timer.expires = p->dev_expires[i];
- }
- }
- else
- {
- p->dev_timer.expires = p->dev_expires[i];
- p->dev_timer_active |= (0x01 << MAX_TARGETS);
- }
- }
- }
- if ( p->dev_timer_active & (0x01 << MAX_TARGETS) )
- {
- add_timer(&p->dev_timer);
- }
-
- aic7xxx_run_waiting_queues(p);
- spin_unlock_irqrestore(p->host->host_lock, cpu_flags);
-}
-
-/*+F*************************************************************************
- * Function:
* aic7xxx_construct_ppr
*
* Description:
@@ -4098,16 +3767,14 @@ aic7xxx_timer(struct aic7xxx_host *p)
static void
aic7xxx_construct_ppr(struct aic7xxx_host *p, struct aic7xxx_scb *scb)
{
- int tindex = TARGET_INDEX(scb->cmd);
-
p->msg_buf[p->msg_index++] = MSG_EXTENDED;
p->msg_buf[p->msg_index++] = MSG_EXT_PPR_LEN;
p->msg_buf[p->msg_index++] = MSG_EXT_PPR;
- p->msg_buf[p->msg_index++] = p->transinfo[tindex].goal_period;
+ p->msg_buf[p->msg_index++] = AIC_DEV(scb->cmd)->goal.period;
p->msg_buf[p->msg_index++] = 0;
- p->msg_buf[p->msg_index++] = p->transinfo[tindex].goal_offset;
- p->msg_buf[p->msg_index++] = p->transinfo[tindex].goal_width;
- p->msg_buf[p->msg_index++] = p->transinfo[tindex].goal_options;
+ p->msg_buf[p->msg_index++] = AIC_DEV(scb->cmd)->goal.offset;
+ p->msg_buf[p->msg_index++] = AIC_DEV(scb->cmd)->goal.width;
+ p->msg_buf[p->msg_index++] = AIC_DEV(scb->cmd)->goal.options;
p->msg_len += 8;
}
@@ -4205,11 +3872,7 @@ aic7xxx_calculate_residual (struct aic7xxx_host *p, struct aic7xxx_scb *scb)
* the mid layer didn't check residual data counts to see if the
* command needs retried.
*/
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0)
cmd->resid = scb->sg_length - actual;
-#else
- aic7xxx_error(cmd) = DID_RETRY_COMMAND;
-#endif
aic7xxx_status(cmd) = hscb->target_status;
}
}
@@ -4234,19 +3897,13 @@ aic7xxx_calculate_residual (struct aic7xxx_host *p, struct aic7xxx_scb *scb)
static void
aic7xxx_handle_device_reset(struct aic7xxx_host *p, int target, int channel)
{
- unsigned short targ_mask;
unsigned char tindex = target;
tindex |= ((channel & 0x01) << 3);
- targ_mask = (0x01 << tindex);
/*
* Go back to async/narrow transfers and renegotiate.
*/
- p->needppr |= (p->needppr_copy & targ_mask);
- p->needsdtr |= (p->needsdtr_copy & targ_mask);
- p->needwdtr |= (p->needwdtr_copy & targ_mask);
- p->dtr_pending &= ~targ_mask;
aic_outb(p, 0, TARG_SCSIRATE + tindex);
if (p->features & AHC_ULTRA2)
aic_outb(p, 0, TARG_OFFSET + tindex);
@@ -4268,6 +3925,7 @@ static void
aic7xxx_handle_seqint(struct aic7xxx_host *p, unsigned char intstat)
{
struct aic7xxx_scb *scb;
+ struct aic_dev_data *aic_dev;
unsigned short target_mask;
unsigned char target, lun, tindex;
unsigned char queue_flag = FALSE;
@@ -4387,6 +4045,7 @@ aic7xxx_handle_seqint(struct aic7xxx_host *p, unsigned char intstat)
scb_index = aic_inb(p, SCB_TAG);
scb = p->scb_data->scb_array[scb_index];
+ aic_dev = AIC_DEV(scb->cmd);
last_msg = aic_inb(p, LAST_MSG);
if ( (last_msg == MSG_IDENTIFYFLAG) &&
@@ -4401,7 +4060,8 @@ aic7xxx_handle_seqint(struct aic7xxx_host *p, unsigned char intstat)
* disable ordered tag commands and go on with life just like
* normal.
*/
- p->orderedtag &= ~target_mask;
+ scsi_adjust_queue_depth(scb->cmd->device, MSG_SIMPLE_TAG,
+ scb->cmd->device->new_queue_depth);
scb->tag_action = MSG_SIMPLE_Q_TAG;
scb->hscb->control &= ~SCB_TAG_TYPE;
scb->hscb->control |= MSG_SIMPLE_Q_TAG;
@@ -4416,21 +4076,15 @@ aic7xxx_handle_seqint(struct aic7xxx_host *p, unsigned char intstat)
}
else if (scb->tag_action == MSG_SIMPLE_Q_TAG)
{
- unsigned char i, reset = 0;
+ unsigned char i;
struct aic7xxx_scb *scbp;
int old_verbose;
/*
- * Hmmmm....the device is flaking out on tagged commands. The
- * bad thing is that we already have tagged commands enabled in
- * the device struct in the mid level code. We also have a queue
- * set according to the tagged queue depth. Gonna have to live
- * with it by controlling our queue depth internally and making
- * sure we don't set the tagged command flag any more.
+ * Hmmmm....the device is flaking out on tagged commands.
*/
- p->tagenable &= ~target_mask;
- p->orderedtag &= ~target_mask;
- p->dev_max_queue_depth[tindex] =
- p->dev_temp_queue_depth[tindex] = 1;
+ scsi_adjust_queue_depth(scb->cmd->device, 0 /* untagged */,
+ p->host->cmd_per_lun);
+ aic_dev->max_q_depth = aic_dev->temp_q_depth = 1;
/*
* We set this command up as a bus device reset. However, we have
* to clear the tag type as it's causing us problems. We shouldnt
@@ -4448,7 +4102,7 @@ aic7xxx_handle_seqint(struct aic7xxx_host *p, unsigned char intstat)
old_verbose = aic7xxx_verbose;
aic7xxx_verbose &= ~(VERBOSE_RESET|VERBOSE_ABORT);
- for (i=0; i!=p->scb_data->numscbs; i++)
+ for (i=0; i < p->scb_data->numscbs; i++)
{
scbp = p->scb_data->scb_array[i];
if ((scbp->flags & SCB_ACTIVE) && (scbp != scb))
@@ -4456,11 +4110,10 @@ aic7xxx_handle_seqint(struct aic7xxx_host *p, unsigned char intstat)
if (aic7xxx_match_scb(p, scbp, target, channel, lun, i))
{
aic7xxx_reset_device(p, target, channel, lun, i);
- reset++;
}
- aic7xxx_run_done_queue(p, TRUE);
}
}
+ aic7xxx_run_done_queue(p, TRUE);
aic7xxx_verbose = old_verbose;
/*
* Wait until after the for loop to set the busy index since
@@ -4484,38 +4137,35 @@ aic7xxx_handle_seqint(struct aic7xxx_host *p, unsigned char intstat)
* a parity error during msg_out phase to signal that they don't
* like our settings.
*/
- p->needppr &= ~target_mask;
- p->needppr_copy &= ~target_mask;
+ aic_dev->needppr = aic_dev->needppr_copy = 0;
aic7xxx_set_width(p, target, channel, lun, MSG_EXT_WDTR_BUS_8_BIT,
- (AHC_TRANS_ACTIVE|AHC_TRANS_CUR|AHC_TRANS_QUITE));
+ (AHC_TRANS_ACTIVE|AHC_TRANS_CUR|AHC_TRANS_QUITE), aic_dev);
aic7xxx_set_syncrate(p, NULL, target, channel, 0, 0, 0,
- AHC_TRANS_ACTIVE|AHC_TRANS_CUR|AHC_TRANS_QUITE);
- p->transinfo[tindex].goal_options = 0;
- p->dtr_pending &= ~target_mask;
+ AHC_TRANS_ACTIVE|AHC_TRANS_CUR|AHC_TRANS_QUITE,
+ aic_dev);
+ aic_dev->goal.options = aic_dev->dtr_pending = 0;
scb->flags &= ~SCB_MSGOUT_BITS;
if(aic7xxx_verbose & VERBOSE_NEGOTIATION2)
{
printk(INFO_LEAD "Device is rejecting PPR messages, falling "
"back.\n", p->host_no, channel, target, lun);
}
- if ( p->transinfo[tindex].goal_width )
+ if ( aic_dev->goal.width )
{
- p->needwdtr |= target_mask;
- p->needwdtr_copy |= target_mask;
- p->dtr_pending |= target_mask;
+ aic_dev->needwdtr = aic_dev->needwdtr_copy = 1;
+ aic_dev->dtr_pending = 1;
scb->flags |= SCB_MSGOUT_WDTR;
}
- if ( p->transinfo[tindex].goal_offset )
+ if ( aic_dev->goal.offset )
{
- p->needsdtr |= target_mask;
- p->needsdtr_copy |= target_mask;
- if( !(p->dtr_pending & target_mask) )
+ aic_dev->needsdtr = aic_dev->needsdtr_copy = 1;
+ if( !aic_dev->dtr_pending )
{
- p->dtr_pending |= target_mask;
+ aic_dev->dtr_pending = 1;
scb->flags |= SCB_MSGOUT_SDTR;
}
}
- if ( p->dtr_pending & target_mask )
+ if ( aic_dev->dtr_pending )
{
aic_outb(p, HOST_MSG, MSG_OUT);
aic_outb(p, aic_inb(p, SCSISIGI) | ATNO, SCSISIGO);
@@ -4526,30 +4176,29 @@ aic7xxx_handle_seqint(struct aic7xxx_host *p, unsigned char intstat)
/*
* note 8bit xfers and clear flag
*/
- p->needwdtr &= ~target_mask;
- p->needwdtr_copy &= ~target_mask;
+ aic_dev->needwdtr = aic_dev->needwdtr_copy = 0;
scb->flags &= ~SCB_MSGOUT_BITS;
aic7xxx_set_width(p, target, channel, lun, MSG_EXT_WDTR_BUS_8_BIT,
- (AHC_TRANS_ACTIVE|AHC_TRANS_GOAL|AHC_TRANS_CUR));
+ (AHC_TRANS_ACTIVE|AHC_TRANS_GOAL|AHC_TRANS_CUR), aic_dev);
aic7xxx_set_syncrate(p, NULL, target, channel, 0, 0, 0,
- AHC_TRANS_ACTIVE|AHC_TRANS_CUR|AHC_TRANS_QUITE);
+ AHC_TRANS_ACTIVE|AHC_TRANS_CUR|AHC_TRANS_QUITE,
+ aic_dev);
if(aic7xxx_verbose & VERBOSE_NEGOTIATION2)
{
printk(INFO_LEAD "Device is rejecting WDTR messages, using "
"narrow transfers.\n", p->host_no, channel, target, lun);
}
- p->needsdtr |= (p->needsdtr_copy & target_mask);
+ aic_dev->needsdtr = aic_dev->needsdtr_copy;
}
else if (scb->flags & SCB_MSGOUT_SDTR)
{
/*
* note asynch xfers and clear flag
*/
- p->needsdtr &= ~target_mask;
- p->needsdtr_copy &= ~target_mask;
+ aic_dev->needsdtr = aic_dev->needsdtr_copy = 0;
scb->flags &= ~SCB_MSGOUT_BITS;
aic7xxx_set_syncrate(p, NULL, target, channel, 0, 0, 0,
- (AHC_TRANS_CUR|AHC_TRANS_ACTIVE|AHC_TRANS_GOAL));
+ (AHC_TRANS_CUR|AHC_TRANS_ACTIVE|AHC_TRANS_GOAL), aic_dev);
if(aic7xxx_verbose & VERBOSE_NEGOTIATION2)
{
printk(INFO_LEAD "Device is rejecting SDTR messages, using "
@@ -4603,6 +4252,7 @@ aic7xxx_handle_seqint(struct aic7xxx_host *p, unsigned char intstat)
else
{
cmd = scb->cmd;
+ aic_dev = AIC_DEV(scb->cmd);
hscb->target_status = aic_inb(p, SCB_TARGET_STATUS);
aic7xxx_status(cmd) = hscb->target_status;
@@ -4695,14 +4345,17 @@ aic7xxx_handle_seqint(struct aic7xxx_host *p, unsigned char intstat)
unsigned char active_hscb, next_hscb, prev_hscb, scb_index;
/*
* We have to look three places for queued commands:
- * 1: QINFIFO
- * 2: p->waiting_scbs queue
+ * 1: p->waiting_scbs queue
+ * 2: QINFIFO
* 3: WAITING_SCBS list on card (for commands that are started
* but haven't yet made it to the device)
+ *
+ * Of special note here is that commands on 2 or 3 above will
+ * have already been marked as active, while commands on 1 will
+ * not. The aic7xxx_done() function will want to unmark them
+ * from active, so any commands we pull off of 1 need to
+ * up the active count.
*/
- aic7xxx_search_qinfifo(p, target, channel, lun,
- SCB_LIST_NULL, 0, TRUE,
- &p->delayed_scbs[tindex]);
next_scbp = p->waiting_scbs.head;
while ( next_scbp != NULL )
{
@@ -4712,10 +4365,14 @@ aic7xxx_handle_seqint(struct aic7xxx_host *p, unsigned char intstat)
SCB_LIST_NULL) )
{
scbq_remove(&p->waiting_scbs, prev_scbp);
- scbq_insert_tail(&p->delayed_scbs[tindex],
- prev_scbp);
+ scb->flags = SCB_QUEUED_FOR_DONE | SCB_QUEUE_FULL;
+ p->activescbs++;
+ aic_dev->active_cmds++;
}
}
+ aic7xxx_search_qinfifo(p, target, channel, lun,
+ SCB_LIST_NULL, SCB_QUEUED_FOR_DONE | SCB_QUEUE_FULL,
+ FALSE, NULL);
next_scbp = NULL;
active_hscb = aic_inb(p, SCBPTR);
prev_hscb = next_hscb = scb_index = SCB_LIST_NULL;
@@ -4730,18 +4387,7 @@ aic7xxx_handle_seqint(struct aic7xxx_host *p, unsigned char intstat)
if (aic7xxx_match_scb(p, next_scbp, target, channel, lun,
SCB_LIST_NULL) )
{
- if (next_scbp->flags & SCB_WAITINGQ)
- {
- p->dev_active_cmds[tindex]++;
- p->activescbs--;
- scbq_remove(&p->delayed_scbs[tindex], next_scbp);
- scbq_remove(&p->waiting_scbs, next_scbp);
- }
- scbq_insert_head(&p->delayed_scbs[tindex],
- next_scbp);
- next_scbp->flags |= SCB_WAITINGQ;
- p->dev_active_cmds[tindex]--;
- p->activescbs--;
+ next_scbp->flags = SCB_QUEUED_FOR_DONE | SCB_QUEUE_FULL;
next_hscb = aic_inb(p, SCB_NEXT);
aic_outb(p, 0, SCB_CONTROL);
aic_outb(p, SCB_LIST_NULL, SCB_TAG);
@@ -4771,39 +4417,8 @@ aic7xxx_handle_seqint(struct aic7xxx_host *p, unsigned char intstat)
} /* scb_index >= p->scb_data->numscbs */
}
aic_outb(p, active_hscb, SCBPTR);
- if (scb->flags & SCB_WAITINGQ)
- {
- scbq_remove(&p->delayed_scbs[tindex], scb);
- scbq_remove(&p->waiting_scbs, scb);
- p->dev_active_cmds[tindex]++;
- p->activescbs++;
- }
- scbq_insert_head(&p->delayed_scbs[tindex], scb);
- p->dev_active_cmds[tindex]--;
- p->activescbs--;
- scb->flags |= SCB_WAITINGQ | SCB_WAS_BUSY;
+ aic7xxx_run_done_queue(p, FALSE);
- if ( !(p->dev_timer_active & (0x01 << tindex)) )
- {
- p->dev_timer_active |= (0x01 << tindex);
- if ( p->dev_active_cmds[tindex] )
- {
- p->dev_expires[tindex] = jiffies + HZ;
- }
- else
- {
- p->dev_expires[tindex] = jiffies + (HZ / 10);
- }
- if ( !(p->dev_timer_active & (0x01 << MAX_TARGETS)) )
- {
- p->dev_timer.expires = p->dev_expires[tindex];
- p->dev_timer_active |= (0x01 << MAX_TARGETS);
- add_timer(&p->dev_timer);
- }
- else if ( time_after_eq(p->dev_timer.expires,
- p->dev_expires[tindex]) )
- mod_timer(&p->dev_timer, p->dev_expires[tindex]);
- }
#ifdef AIC7XXX_VERBOSE_DEBUGGING
if( (aic7xxx_verbose & VERBOSE_MINOR_ERROR) ||
(aic7xxx_verbose > 0xffff) )
@@ -4811,13 +4426,12 @@ aic7xxx_handle_seqint(struct aic7xxx_host *p, unsigned char intstat)
if (queue_flag)
printk(INFO_LEAD "Queue full received; queue depth %d, "
"active %d\n", p->host_no, CTL_OF_SCB(scb),
- p->dev_max_queue_depth[tindex],
- p->dev_active_cmds[tindex]);
+ aic_dev->max_q_depth, aic_dev->active_cmds);
else
printk(INFO_LEAD "Target busy\n", p->host_no, CTL_OF_SCB(scb));
-
}
#endif
+#if 0
if (queue_flag)
{
if ( p->dev_last_queue_full[tindex] !=
@@ -4874,11 +4488,12 @@ aic7xxx_handle_seqint(struct aic7xxx_host *p, unsigned char intstat)
}
else
{
- p->dev_flags[tindex] |= DEVICE_WAS_BUSY;
+ aic_dev->flags[tindex] |= DEVICE_WAS_BUSY;
p->dev_temp_queue_depth[tindex] =
p->dev_active_cmds[tindex];
}
}
+#endif
break;
}
@@ -4903,6 +4518,7 @@ aic7xxx_handle_seqint(struct aic7xxx_host *p, unsigned char intstat)
scb_index = aic_inb(p, SCB_TAG);
msg_out = aic_inb(p, MSG_OUT);
scb = p->scb_data->scb_array[scb_index];
+ aic_dev = AIC_DEV(scb->cmd);
p->msg_index = p->msg_len = 0;
/*
* This SCB had a MK_MESSAGE set in its control byte informing
@@ -4948,10 +4564,10 @@ aic7xxx_handle_seqint(struct aic7xxx_host *p, unsigned char intstat)
{
printk(INFO_LEAD "Sending PPR (%d/%d/%d/%d) message.\n",
p->host_no, CTL_OF_SCB(scb),
- p->transinfo[tindex].goal_period,
- p->transinfo[tindex].goal_offset,
- p->transinfo[tindex].goal_width,
- p->transinfo[tindex].goal_options);
+ aic_dev->goal.period,
+ aic_dev->goal.offset,
+ aic_dev->goal.width,
+ aic_dev->goal.options);
}
aic7xxx_construct_ppr(p, scb);
}
@@ -4962,7 +4578,7 @@ aic7xxx_handle_seqint(struct aic7xxx_host *p, unsigned char intstat)
printk(INFO_LEAD "Sending WDTR message.\n", p->host_no,
CTL_OF_SCB(scb));
}
- aic7xxx_construct_wdtr(p, p->transinfo[tindex].goal_width);
+ aic7xxx_construct_wdtr(p, aic_dev->goal.width);
}
else if (scb->flags & SCB_MSGOUT_SDTR)
{
@@ -4992,16 +4608,15 @@ aic7xxx_handle_seqint(struct aic7xxx_host *p, unsigned char intstat)
{
max_sync = AHC_SYNCRATE_FAST;
}
- period = p->transinfo[tindex].goal_period;
+ period = aic_dev->goal.period;
aic7xxx_find_syncrate(p, &period, max_sync, &options);
if (aic7xxx_verbose & VERBOSE_NEGOTIATION2)
{
printk(INFO_LEAD "Sending SDTR %d/%d message.\n", p->host_no,
CTL_OF_SCB(scb), period,
- p->transinfo[tindex].goal_offset);
+ aic_dev->goal.offset);
}
- aic7xxx_construct_sdtr(p, period,
- p->transinfo[tindex].goal_offset);
+ aic7xxx_construct_sdtr(p, period, aic_dev->goal.offset);
}
else
{
@@ -5338,12 +4953,14 @@ aic7xxx_parse_msg(struct aic7xxx_host *p, struct aic7xxx_scb *scb)
unsigned char trans_options, new_trans_options;
unsigned int period, new_period, offset, new_offset, maxsync;
struct aic7xxx_syncrate *syncrate;
+ struct aic_dev_data *aic_dev;
target = scb->cmd->target;
channel = scb->cmd->channel;
lun = scb->cmd->lun;
reply = reject = done = FALSE;
tindex = TARGET_INDEX(scb->cmd);
+ aic_dev = AIC_DEV(scb->cmd);
target_scsirate = aic_inb(p, TARG_SCSIRATE + tindex);
target_mask = (0x01 << tindex);
@@ -5433,44 +5050,43 @@ aic7xxx_parse_msg(struct aic7xxx_host *p, struct aic7xxx_scb *scb)
if ( (scb->flags & (SCB_MSGOUT_SENT|SCB_MSGOUT_SDTR)) !=
(SCB_MSGOUT_SENT|SCB_MSGOUT_SDTR) )
{
- if (!(p->dev_flags[tindex] & DEVICE_DTR_SCANNED))
+ if (!(aic_dev->flags & DEVICE_DTR_SCANNED))
{
/*
* We shouldn't get here unless this is a narrow drive, wide
* devices should trigger this same section of code in the WDTR
* handler first instead.
*/
- p->transinfo[tindex].goal_width = MSG_EXT_WDTR_BUS_8_BIT;
- p->transinfo[tindex].goal_options = 0;
- if(p->transinfo[tindex].user_offset)
+ aic_dev->goal.width = MSG_EXT_WDTR_BUS_8_BIT;
+ aic_dev->goal.options = 0;
+ if(p->user[tindex].offset)
{
- p->needsdtr_copy |= target_mask;
- p->transinfo[tindex].goal_period =
- MAX(10,p->transinfo[tindex].user_period);
+ aic_dev->needsdtr_copy = 1;
+ aic_dev->goal.period = MAX(10,p->user[tindex].period);
if(p->features & AHC_ULTRA2)
{
- p->transinfo[tindex].goal_offset = MAX_OFFSET_ULTRA2;
+ aic_dev->goal.offset = MAX_OFFSET_ULTRA2;
}
else
{
- p->transinfo[tindex].goal_offset = MAX_OFFSET_8BIT;
+ aic_dev->goal.offset = MAX_OFFSET_8BIT;
}
}
else
{
- p->needsdtr_copy &= ~target_mask;
- p->transinfo[tindex].goal_period = 255;
- p->transinfo[tindex].goal_offset = 0;
+ aic_dev->needsdtr_copy = 0;
+ aic_dev->goal.period = 255;
+ aic_dev->goal.offset = 0;
}
- p->dev_flags[tindex] |= DEVICE_DTR_SCANNED | DEVICE_PRINT_DTR;
+ aic_dev->flags |= DEVICE_DTR_SCANNED | DEVICE_PRINT_DTR;
}
- else if ((p->needsdtr_copy & target_mask) == 0)
+ else if (aic_dev->needsdtr_copy == 0)
{
/*
* This is a preemptive message from the target, we've already
* scanned this target and set our options for it, and we
- * don't need a WDTR with this target (for whatever reason),
- * so reject this incoming WDTR
+ * don't need a SDTR with this target (for whatever reason),
+ * so reject this incoming SDTR
*/
reject = TRUE;
break;
@@ -5490,8 +5106,8 @@ aic7xxx_parse_msg(struct aic7xxx_host *p, struct aic7xxx_scb *scb)
* the device isn't allowed to send values greater than the ones
* we first sent to it.
*/
- new_period = MAX(period, p->transinfo[tindex].goal_period);
- new_offset = MIN(offset, p->transinfo[tindex].goal_offset);
+ new_period = MAX(period, aic_dev->goal.period);
+ new_offset = MIN(offset, aic_dev->goal.offset);
}
/*
@@ -5508,7 +5124,7 @@ aic7xxx_parse_msg(struct aic7xxx_host *p, struct aic7xxx_scb *scb)
*/
if ((new_offset == 0) && (new_offset != offset))
{
- p->needsdtr_copy &= ~target_mask;
+ aic_dev->needsdtr_copy = 0;
reply = TRUE;
}
@@ -5525,7 +5141,8 @@ aic7xxx_parse_msg(struct aic7xxx_host *p, struct aic7xxx_scb *scb)
*/
aic7xxx_set_syncrate(p, syncrate, target, channel, new_period,
new_offset, trans_options,
- AHC_TRANS_GOAL|AHC_TRANS_ACTIVE|AHC_TRANS_CUR);
+ AHC_TRANS_GOAL|AHC_TRANS_ACTIVE|AHC_TRANS_CUR,
+ aic_dev);
scb->flags &= ~SCB_MSGOUT_BITS;
scb->flags |= SCB_MSGOUT_SDTR;
aic_outb(p, HOST_MSG, MSG_OUT);
@@ -5535,8 +5152,8 @@ aic7xxx_parse_msg(struct aic7xxx_host *p, struct aic7xxx_scb *scb)
{
aic7xxx_set_syncrate(p, syncrate, target, channel, new_period,
new_offset, trans_options,
- AHC_TRANS_ACTIVE|AHC_TRANS_CUR);
- p->needsdtr &= ~target_mask;
+ AHC_TRANS_ACTIVE|AHC_TRANS_CUR, aic_dev);
+ aic_dev->needsdtr = 0;
}
done = TRUE;
break;
@@ -5566,7 +5183,7 @@ aic7xxx_parse_msg(struct aic7xxx_host *p, struct aic7xxx_scb *scb)
{
reject = TRUE;
if ( (aic7xxx_verbose & VERBOSE_NEGOTIATION2) &&
- ((p->dev_flags[tindex] & DEVICE_PRINT_DTR) ||
+ ((aic_dev->flags & DEVICE_PRINT_DTR) ||
(aic7xxx_verbose > 0xffff)) )
{
printk(INFO_LEAD "Requesting %d bit transfers, rejecting.\n",
@@ -5575,8 +5192,8 @@ aic7xxx_parse_msg(struct aic7xxx_host *p, struct aic7xxx_scb *scb)
} /* We fall through on purpose */
case MSG_EXT_WDTR_BUS_8_BIT:
{
- p->transinfo[tindex].goal_width = MSG_EXT_WDTR_BUS_8_BIT;
- p->needwdtr_copy &= ~target_mask;
+ aic_dev->goal.width = MSG_EXT_WDTR_BUS_8_BIT;
+ aic_dev->needwdtr_copy &= ~target_mask;
break;
}
case MSG_EXT_WDTR_BUS_16_BIT:
@@ -5584,56 +5201,55 @@ aic7xxx_parse_msg(struct aic7xxx_host *p, struct aic7xxx_scb *scb)
break;
}
}
- p->needwdtr &= ~target_mask;
+ aic_dev->needwdtr = 0;
aic7xxx_set_width(p, target, channel, lun, new_bus_width,
- AHC_TRANS_ACTIVE|AHC_TRANS_CUR);
+ AHC_TRANS_ACTIVE|AHC_TRANS_CUR, aic_dev);
}
else
{
- if ( !(p->dev_flags[tindex] & DEVICE_DTR_SCANNED) )
+ if ( !(aic_dev->flags & DEVICE_DTR_SCANNED) )
{
/*
* Well, we now know the WDTR and SYNC caps of this device since
* it contacted us first, mark it as such and copy the user stuff
* over to the goal stuff.
*/
- if( (p->features & AHC_WIDE) && p->transinfo[tindex].user_width )
+ if( (p->features & AHC_WIDE) && p->user[tindex].width )
{
- p->transinfo[tindex].goal_width = MSG_EXT_WDTR_BUS_16_BIT;
- p->needwdtr_copy |= target_mask;
+ aic_dev->goal.width = MSG_EXT_WDTR_BUS_16_BIT;
+ aic_dev->needwdtr_copy = 1;
}
/*
* Devices that support DT transfers don't start WDTR requests
*/
- p->transinfo[tindex].goal_options = 0;
+ aic_dev->goal.options = 0;
- if(p->transinfo[tindex].user_offset)
+ if(p->user[tindex].offset)
{
- p->needsdtr_copy |= target_mask;
- p->transinfo[tindex].goal_period =
- MAX(10,p->transinfo[tindex].user_period);
+ aic_dev->needsdtr_copy = 1;
+ aic_dev->goal.period = MAX(10,p->user[tindex].period);
if(p->features & AHC_ULTRA2)
{
- p->transinfo[tindex].goal_offset = MAX_OFFSET_ULTRA2;
+ aic_dev->goal.offset = MAX_OFFSET_ULTRA2;
}
- else if( p->transinfo[tindex].goal_width )
+ else if( aic_dev->goal.width )
{
- p->transinfo[tindex].goal_offset = MAX_OFFSET_16BIT;
+ aic_dev->goal.offset = MAX_OFFSET_16BIT;
}
else
{
- p->transinfo[tindex].goal_offset = MAX_OFFSET_8BIT;
+ aic_dev->goal.offset = MAX_OFFSET_8BIT;
}
} else {
- p->needsdtr_copy &= ~target_mask;
- p->transinfo[tindex].goal_period = 255;
- p->transinfo[tindex].goal_offset = 0;
+ aic_dev->needsdtr_copy = 0;
+ aic_dev->goal.period = 255;
+ aic_dev->goal.offset = 0;
}
- p->dev_flags[tindex] |= DEVICE_DTR_SCANNED | DEVICE_PRINT_DTR;
+ aic_dev->flags |= DEVICE_DTR_SCANNED | DEVICE_PRINT_DTR;
}
- else if ((p->needwdtr_copy & target_mask) == 0)
+ else if (aic_dev->needwdtr_copy == 0)
{
/*
* This is a preemptive message from the target, we've already
@@ -5658,8 +5274,7 @@ aic7xxx_parse_msg(struct aic7xxx_host *p, struct aic7xxx_scb *scb)
case MSG_EXT_WDTR_BUS_16_BIT:
{
if ( (p->features & AHC_WIDE) &&
- (p->transinfo[tindex].goal_width ==
- MSG_EXT_WDTR_BUS_16_BIT) )
+ (aic_dev->goal.width == MSG_EXT_WDTR_BUS_16_BIT) )
{
new_bus_width = MSG_EXT_WDTR_BUS_16_BIT;
break;
@@ -5668,21 +5283,21 @@ aic7xxx_parse_msg(struct aic7xxx_host *p, struct aic7xxx_scb *scb)
default:
case MSG_EXT_WDTR_BUS_8_BIT:
{
- p->needwdtr_copy &= ~target_mask;
+ aic_dev->needwdtr_copy = 0;
new_bus_width = MSG_EXT_WDTR_BUS_8_BIT;
break;
}
}
scb->flags &= ~SCB_MSGOUT_BITS;
scb->flags |= SCB_MSGOUT_WDTR;
- p->needwdtr &= ~target_mask;
- if((p->dtr_pending & target_mask) == 0)
+ aic_dev->needwdtr = 0;
+ if(aic_dev->dtr_pending == 0)
{
/* there is no other command with SCB_DTR_SCB already set that will
* trigger the release of the dtr_pending bit. Both set the bit
* and set scb->flags |= SCB_DTR_SCB
*/
- p->dtr_pending |= target_mask;
+ aic_dev->dtr_pending = 1;
scb->flags |= SCB_DTR_SCB;
}
aic_outb(p, HOST_MSG, MSG_OUT);
@@ -5693,7 +5308,8 @@ aic7xxx_parse_msg(struct aic7xxx_host *p, struct aic7xxx_scb *scb)
* goal settings as its guidelines.
*/
aic7xxx_set_width(p, target, channel, lun, new_bus_width,
- AHC_TRANS_GOAL|AHC_TRANS_ACTIVE|AHC_TRANS_CUR);
+ AHC_TRANS_GOAL|AHC_TRANS_ACTIVE|AHC_TRANS_CUR,
+ aic_dev);
}
/*
@@ -5704,8 +5320,9 @@ aic7xxx_parse_msg(struct aic7xxx_host *p, struct aic7xxx_scb *scb)
* of needstr.
*/
aic7xxx_set_syncrate(p, NULL, target, channel, 0, 0, 0,
- AHC_TRANS_ACTIVE|AHC_TRANS_CUR|AHC_TRANS_QUITE);
- p->needsdtr |= (p->needsdtr_copy & target_mask);
+ AHC_TRANS_ACTIVE|AHC_TRANS_CUR|AHC_TRANS_QUITE,
+ aic_dev);
+ aic_dev->needsdtr = aic_dev->needsdtr_copy;
done = TRUE;
break;
}
@@ -5745,19 +5362,16 @@ aic7xxx_parse_msg(struct aic7xxx_host *p, struct aic7xxx_scb *scb)
(SCB_MSGOUT_SENT|SCB_MSGOUT_PPR) )
{
/* Have we scanned the device yet? */
- if (!(p->dev_flags[tindex] & DEVICE_DTR_SCANNED))
+ if (!(aic_dev->flags & DEVICE_DTR_SCANNED))
{
/* The device is electing to use PPR messages, so we will too until
* we know better */
- p->needppr |= target_mask;
- p->needppr_copy |= target_mask;
- p->needsdtr &= ~target_mask;
- p->needsdtr_copy &= ~target_mask;
- p->needwdtr &= ~target_mask;
- p->needwdtr_copy &= ~target_mask;
+ aic_dev->needppr = aic_dev->needppr_copy = 1;
+ aic_dev->needsdtr = aic_dev->needsdtr_copy = 0;
+ aic_dev->needwdtr = aic_dev->needwdtr_copy = 0;
/* We know the device is SCSI-3 compliant due to PPR */
- p->dev_flags[tindex] |= DEVICE_SCSI_3;
+ aic_dev->flags |= DEVICE_SCSI_3;
/*
* Not only is the device starting this up, but it also hasn't
@@ -5767,38 +5381,35 @@ aic7xxx_parse_msg(struct aic7xxx_host *p, struct aic7xxx_scb *scb)
* if we didn't find a SEEPROM, we stuffed default values into
* the user settings anyway, so use those in all cases.
*/
- p->transinfo[tindex].goal_width =
- p->transinfo[tindex].user_width;
- if(p->transinfo[tindex].user_offset)
+ aic_dev->goal.width = p->user[tindex].width;
+ if(p->user[tindex].offset)
{
- p->transinfo[tindex].goal_period =
- p->transinfo[tindex].user_period;
- p->transinfo[tindex].goal_options =
- p->transinfo[tindex].user_options;
+ aic_dev->goal.period = p->user[tindex].period;
+ aic_dev->goal.options = p->user[tindex].options;
if(p->features & AHC_ULTRA2)
{
- p->transinfo[tindex].goal_offset = MAX_OFFSET_ULTRA2;
+ aic_dev->goal.offset = MAX_OFFSET_ULTRA2;
}
- else if( p->transinfo[tindex].goal_width &&
+ else if( aic_dev->goal.width &&
(bus_width == MSG_EXT_WDTR_BUS_16_BIT) &&
p->features & AHC_WIDE )
{
- p->transinfo[tindex].goal_offset = MAX_OFFSET_16BIT;
+ aic_dev->goal.offset = MAX_OFFSET_16BIT;
}
else
{
- p->transinfo[tindex].goal_offset = MAX_OFFSET_8BIT;
+ aic_dev->goal.offset = MAX_OFFSET_8BIT;
}
}
else
{
- p->transinfo[tindex].goal_period = 255;
- p->transinfo[tindex].goal_offset = 0;
- p->transinfo[tindex].goal_options = 0;
+ aic_dev->goal.period = 255;
+ aic_dev->goal.offset = 0;
+ aic_dev->goal.options = 0;
}
- p->dev_flags[tindex] |= DEVICE_DTR_SCANNED | DEVICE_PRINT_DTR;
+ aic_dev->flags |= DEVICE_DTR_SCANNED | DEVICE_PRINT_DTR;
}
- else if ((p->needppr_copy & target_mask) == 0)
+ else if (aic_dev->needppr_copy == 0)
{
/*
* This is a preemptive message from the target, we've already
@@ -5825,8 +5436,8 @@ aic7xxx_parse_msg(struct aic7xxx_host *p, struct aic7xxx_scb *scb)
{
case MSG_EXT_WDTR_BUS_16_BIT:
{
- if ( (p->transinfo[tindex].goal_width ==
- MSG_EXT_WDTR_BUS_16_BIT) && p->features & AHC_WIDE)
+ if ( (aic_dev->goal.width == MSG_EXT_WDTR_BUS_16_BIT) &&
+ p->features & AHC_WIDE)
{
break;
}
@@ -5834,7 +5445,7 @@ aic7xxx_parse_msg(struct aic7xxx_host *p, struct aic7xxx_scb *scb)
default:
{
if ( (aic7xxx_verbose & VERBOSE_NEGOTIATION2) &&
- ((p->dev_flags[tindex] & DEVICE_PRINT_DTR) ||
+ ((aic_dev->flags & DEVICE_PRINT_DTR) ||
(aic7xxx_verbose > 0xffff)) )
{
reply = TRUE;
@@ -5862,24 +5473,26 @@ aic7xxx_parse_msg(struct aic7xxx_host *p, struct aic7xxx_scb *scb)
* goal settings as its guidelines.
*/
aic7xxx_set_width(p, target, channel, lun, new_bus_width,
- AHC_TRANS_GOAL|AHC_TRANS_ACTIVE|AHC_TRANS_CUR);
+ AHC_TRANS_GOAL|AHC_TRANS_ACTIVE|AHC_TRANS_CUR,
+ aic_dev);
syncrate = aic7xxx_find_syncrate(p, &new_period, maxsync,
&new_trans_options);
aic7xxx_validate_offset(p, syncrate, &new_offset, new_bus_width);
aic7xxx_set_syncrate(p, syncrate, target, channel, new_period,
new_offset, new_trans_options,
- AHC_TRANS_GOAL|AHC_TRANS_ACTIVE|AHC_TRANS_CUR);
+ AHC_TRANS_GOAL|AHC_TRANS_ACTIVE|AHC_TRANS_CUR,
+ aic_dev);
}
else
{
aic7xxx_set_width(p, target, channel, lun, new_bus_width,
- AHC_TRANS_ACTIVE|AHC_TRANS_CUR);
+ AHC_TRANS_ACTIVE|AHC_TRANS_CUR, aic_dev);
syncrate = aic7xxx_find_syncrate(p, &new_period, maxsync,
&new_trans_options);
aic7xxx_validate_offset(p, syncrate, &new_offset, new_bus_width);
aic7xxx_set_syncrate(p, syncrate, target, channel, new_period,
new_offset, new_trans_options,
- AHC_TRANS_ACTIVE|AHC_TRANS_CUR);
+ AHC_TRANS_ACTIVE|AHC_TRANS_CUR, aic_dev);
}
/*
@@ -5891,17 +5504,14 @@ aic7xxx_parse_msg(struct aic7xxx_host *p, struct aic7xxx_scb *scb)
*/
if(new_trans_options == 0)
{
- p->needppr &= ~target_mask;
- p->needppr_copy &= ~target_mask;
+ aic_dev->needppr = aic_dev->needppr_copy = 0;
if(new_offset)
{
- p->needsdtr |= target_mask;
- p->needsdtr_copy |= target_mask;
+ aic_dev->needsdtr = aic_dev->needsdtr_copy = 1;
}
if (new_bus_width)
{
- p->needwdtr |= target_mask;
- p->needwdtr_copy |= target_mask;
+ aic_dev->needwdtr = aic_dev->needwdtr_copy = 1;
}
}
@@ -5924,7 +5534,7 @@ aic7xxx_parse_msg(struct aic7xxx_host *p, struct aic7xxx_scb *scb)
}
else
{
- p->needppr &= ~target_mask;
+ aic_dev->needppr = 0;
}
done = TRUE;
break;
@@ -6079,6 +5689,7 @@ aic7xxx_handle_scsiint(struct aic7xxx_host *p, unsigned char intstat)
unsigned char scb_index;
unsigned char status;
struct aic7xxx_scb *scb;
+ struct aic_dev_data *aic_dev;
scb_index = aic_inb(p, SCB_TAG);
status = aic_inb(p, SSTAT1);
@@ -6361,7 +5972,6 @@ aic7xxx_handle_scsiint(struct aic7xxx_host *p, unsigned char intstat)
unsigned char mesg_out = MSG_NOOP;
unsigned char lastphase = aic_inb(p, LASTPHASE);
unsigned char sstat2 = aic_inb(p, SSTAT2);
- unsigned char tindex = TARGET_INDEX(scb->cmd);
cmd = scb->cmd;
switch (lastphase)
@@ -6434,33 +6044,33 @@ aic7xxx_handle_scsiint(struct aic7xxx_host *p, unsigned char intstat)
* a parity error during msg_out phase to signal that they don't
* like our settings.
*/
- p->needppr &= ~(1 << tindex);
- p->needppr_copy &= ~(1 << tindex);
+ aic_dev = AIC_DEV(scb->cmd);
+ aic_dev->needppr = aic_dev->needppr_copy = 0;
aic7xxx_set_width(p, scb->cmd->target, scb->cmd->channel, scb->cmd->lun,
MSG_EXT_WDTR_BUS_8_BIT,
- (AHC_TRANS_ACTIVE|AHC_TRANS_CUR|AHC_TRANS_QUITE));
+ (AHC_TRANS_ACTIVE|AHC_TRANS_CUR|AHC_TRANS_QUITE),
+ aic_dev);
aic7xxx_set_syncrate(p, NULL, scb->cmd->target, scb->cmd->channel, 0, 0,
- 0, AHC_TRANS_ACTIVE|AHC_TRANS_CUR|AHC_TRANS_QUITE);
- p->transinfo[tindex].goal_options = 0;
+ 0, AHC_TRANS_ACTIVE|AHC_TRANS_CUR|AHC_TRANS_QUITE,
+ aic_dev);
+ aic_dev->goal.options = 0;
scb->flags &= ~SCB_MSGOUT_BITS;
if(aic7xxx_verbose & VERBOSE_NEGOTIATION2)
{
printk(INFO_LEAD "parity error during PPR message, reverting "
"to WDTR/SDTR\n", p->host_no, CTL_OF_SCB(scb));
}
- if ( p->transinfo[tindex].goal_width )
+ if ( aic_dev->goal.width )
{
- p->needwdtr |= (1 << tindex);
- p->needwdtr_copy |= (1 << tindex);
+ aic_dev->needwdtr = aic_dev->needwdtr_copy = 1;
}
- if ( p->transinfo[tindex].goal_offset )
+ if ( aic_dev->goal.offset )
{
- if( p->transinfo[tindex].goal_period <= 9 )
+ if( aic_dev->goal.period <= 9 )
{
- p->transinfo[tindex].goal_period = 10;
+ aic_dev->goal.period = 10;
}
- p->needsdtr |= (1 << tindex);
- p->needsdtr_copy |= (1 << tindex);
+ aic_dev->needsdtr = aic_dev->needsdtr_copy = 1;
}
scb = NULL;
}
@@ -6665,6 +6275,7 @@ static void
aic7xxx_handle_command_completion_intr(struct aic7xxx_host *p)
{
struct aic7xxx_scb *scb = NULL;
+ struct aic_dev_data *aic_dev;
Scsi_Cmnd *cmd;
unsigned char scb_index, tindex;
@@ -6706,6 +6317,7 @@ aic7xxx_handle_command_completion_intr(struct aic7xxx_host *p)
continue;
}
tindex = TARGET_INDEX(scb->cmd);
+ aic_dev = AIC_DEV(scb->cmd);
if (scb->flags & SCB_QUEUED_ABORT)
{
pause_sequencer(p);
@@ -6738,40 +6350,18 @@ aic7xxx_handle_command_completion_intr(struct aic7xxx_host *p)
/*
* Signal that we need to re-negotiate things.
*/
- p->needppr |= (p->needppr_copy & (1<<tindex));
- p->needsdtr |= (p->needsdtr_copy & (1<<tindex));
- p->needwdtr |= (p->needwdtr_copy & (1<<tindex));
+ aic_dev->needppr = aic_dev->needppr_copy;
+ aic_dev->needsdtr = aic_dev->needsdtr_copy;
+ aic_dev->needwdtr = aic_dev->needwdtr_copy;
}
}
- switch (status_byte(scb->hscb->target_status))
+ cmd = scb->cmd;
+ if (scb->hscb->residual_SG_segment_count != 0)
{
- case QUEUE_FULL:
- case BUSY:
- scb->hscb->target_status = 0;
- scb->cmd->result = 0;
- scb->hscb->residual_SG_segment_count = 0;
- scb->hscb->residual_data_count[0] = 0;
- scb->hscb->residual_data_count[1] = 0;
- scb->hscb->residual_data_count[2] = 0;
- aic7xxx_error(scb->cmd) = DID_OK;
- aic7xxx_status(scb->cmd) = 0;
- /*
- * The QUEUE_FULL/BUSY handler in aic7xxx_seqint takes care of putting
- * this command on a timer and allowing us to retry it. Here, we
- * just 0 out a few values so that they don't carry through to when
- * the command finally does complete.
- */
- break;
- default:
- cmd = scb->cmd;
- if (scb->hscb->residual_SG_segment_count != 0)
- {
- aic7xxx_calculate_residual(p, scb);
- }
- cmd->result |= (aic7xxx_error(cmd) << 16);
- aic7xxx_done(p, scb);
- break;
- }
+ aic7xxx_calculate_residual(p, scb);
+ }
+ cmd->result |= (aic7xxx_error(cmd) << 16);
+ aic7xxx_done(p, scb);
}
}
@@ -6937,6 +6527,79 @@ do_aic7xxx_isr(int irq, void *dev_id, struct pt_regs *regs)
/*+F*************************************************************************
* Function:
+ * aic7xxx_init_aic_dev
+ *
+ * Description:
+ * Set up the initial aic_dev values from the BIOS settings and data
+ * we got back from INQUIRY commands.
+ *-F*************************************************************************/
+static void
+aic7xxx_init_aic_dev(struct aic7xxx_host *p, struct aic_dev_data *aic_dev)
+{
+ struct scsi_device *sdpnt = aic_dev->SDptr;
+ unsigned char tindex;
+
+ tindex = sdpnt->id | (sdpnt->channel << 3);
+
+ scbq_init(&aic_dev->delayed_scbs);
+
+ if (!(aic_dev->flags & DEVICE_DTR_SCANNED))
+ {
+ aic_dev->flags |= DEVICE_DTR_SCANNED;
+
+ if ( sdpnt->wdtr && (p->features & AHC_WIDE) )
+ {
+ aic_dev->needwdtr = aic_dev->needwdtr_copy = 1;
+ aic_dev->goal.width = p->user[tindex].width;
+ }
+ else
+ {
+ aic_dev->needwdtr = aic_dev->needwdtr_copy = 0;
+ pause_sequencer(p);
+ aic7xxx_set_width(p, sdpnt->id, sdpnt->channel, sdpnt->lun,
+ MSG_EXT_WDTR_BUS_8_BIT, (AHC_TRANS_ACTIVE |
+ AHC_TRANS_GOAL |
+ AHC_TRANS_CUR), aic_dev );
+ unpause_sequencer(p, FALSE);
+ }
+ if ( sdpnt->sdtr && p->user[tindex].offset )
+ {
+ aic_dev->goal.period = p->user[tindex].period;
+ aic_dev->goal.options = p->user[tindex].options;
+ if (p->features & AHC_ULTRA2)
+ aic_dev->goal.offset = MAX_OFFSET_ULTRA2;
+ else if (aic_dev->goal.width == MSG_EXT_WDTR_BUS_16_BIT)
+ aic_dev->goal.offset = MAX_OFFSET_16BIT;
+ else
+ aic_dev->goal.offset = MAX_OFFSET_8BIT;
+ if ( sdpnt->ppr && p->user[tindex].period <= 9 &&
+ p->user[tindex].options )
+ {
+ aic_dev->needppr = aic_dev->needppr_copy = 1;
+ aic_dev->needsdtr = aic_dev->needsdtr_copy = 0;
+ aic_dev->needwdtr = aic_dev->needwdtr_copy = 0;
+ aic_dev->flags |= DEVICE_SCSI_3;
+ }
+ else
+ {
+ aic_dev->needsdtr = aic_dev->needsdtr_copy = 1;
+ aic_dev->goal.period = MAX(10, aic_dev->goal.period);
+ aic_dev->goal.options = 0;
+ }
+ }
+ else
+ {
+ aic_dev->needsdtr = aic_dev->needsdtr_copy = 0;
+ aic_dev->goal.period = 255;
+ aic_dev->goal.offset = 0;
+ aic_dev->goal.options = 0;
+ }
+ aic_dev->flags |= DEVICE_PRINT_DTR;
+ }
+}
+
+/*+F*************************************************************************
+ * Function:
* aic7xxx_device_queue_depth
*
* Description:
@@ -6956,81 +6619,16 @@ static void
aic7xxx_device_queue_depth(struct aic7xxx_host *p, Scsi_Device *device)
{
int default_depth = p->host->hostt->cmd_per_lun;
+ struct aic_dev_data *aic_dev = device->hostdata;
unsigned char tindex;
- unsigned short target_mask;
tindex = device->id | (device->channel << 3);
- target_mask = (1 << tindex);
- if (p->dev_max_queue_depth[tindex] > 1)
- {
- /*
- * We've already scanned some lun on this device and enabled tagged
- * queueing on it. So, as long as this lun also supports tagged
- * queueing, enable it here with the same depth. Call SCSI mid layer
- * to adjust depth on this device, and add enough to the max_queue_depth
- * to cover the commands for this lun.
- *
- * Note: there is a shortcoming here. The aic7xxx driver really assumes
- * that if any lun on a device supports tagged queueing, then they *all*
- * do. Our p->tagenable field is on a per target id basis and doesn't
- * differentiate for different luns. If we end up with one lun that
- * doesn't support tagged queueing, it's going to disable tagged queueing
- * on *all* the luns on that target ID :-(
- */
- if(device->tagged_supported) {
- if (aic7xxx_verbose & VERBOSE_NEGOTIATION2)
- {
- printk(INFO_LEAD "Enabled tagged queuing on secondary lun, queue depth +%d.\n",
- p->host_no, device->channel, device->id,
- device->lun, p->dev_lun_queue_depth[tindex]);
- }
- p->dev_max_queue_depth[tindex] += p->dev_lun_queue_depth[tindex];
- p->dev_temp_queue_depth[tindex] += p->dev_lun_queue_depth[tindex];
- scsi_adjust_queue_depth(device, 1, p->dev_lun_queue_depth[tindex]);
- }
- else
- {
- int lun;
- /*
- * Uh ohh, this is what I was talking about. All the other devices on
- * this target ID that support tagged queueing are going to end up
- * getting tagged queueing turned off because of this device. Print
- * out a message to this effect for the user, then disable tagged
- * queueing on all the devices on this ID.
- */
- printk(WARN_LEAD "does not support tagged queuing while other luns on\n"
- " the same target ID do!! Tagged queueing will be disabled for\n"
- " all luns on this target ID!!\n", p->host_no,
- device->channel, device->id, device->lun);
-
- p->dev_lun_queue_depth[tindex] = default_depth;
- p->dev_scbs_needed[tindex] = 0;
- p->dev_temp_queue_depth[tindex] = 1;
- p->dev_max_queue_depth[tindex] = 1;
- p->tagenable &= ~target_mask;
-
- for(lun=0; lun < p->host->max_lun; lun++)
- {
- if(p->Scsi_Dev[tindex][lun] != NULL)
- {
- printk(WARN_LEAD "disabling tagged queuing.\n", p->host_no,
- p->Scsi_Dev[tindex][lun]->channel,
- p->Scsi_Dev[tindex][lun]->id,
- p->Scsi_Dev[tindex][lun]->lun);
- scsi_adjust_queue_depth(p->Scsi_Dev[tindex][lun], 0, default_depth);
- p->dev_scbs_needed[tindex] += default_depth;
- }
- }
- }
- return;
- }
+ if (device->simple_tags)
+ return; // We've already enabled this device
- p->dev_lun_queue_depth[tindex] = default_depth;
- p->dev_scbs_needed[tindex] = default_depth;
- p->dev_temp_queue_depth[tindex] = 1;
- p->dev_max_queue_depth[tindex] = 1;
- p->tagenable &= ~target_mask;
+ aic_dev->temp_q_depth = 1;
+ aic_dev->max_q_depth = 1;
if (device->tagged_supported)
{
@@ -7038,7 +6636,7 @@ aic7xxx_device_queue_depth(struct aic7xxx_host *p, Scsi_Device *device)
default_depth = AIC7XXX_CMDS_PER_DEVICE;
- if (!(p->discenable & target_mask))
+ if (!(p->discenable & (1 << tindex)))
{
if (aic7xxx_verbose & VERBOSE_NEGOTIATION2)
printk(INFO_LEAD "Disconnection disabled, unable to "
@@ -7058,7 +6656,7 @@ aic7xxx_device_queue_depth(struct aic7xxx_host *p, Scsi_Device *device)
" the aic7xxx.c source file.\n");
print_warning = FALSE;
}
- p->dev_lun_queue_depth[tindex] = default_depth;
+ aic_dev->max_q_depth = aic_dev->temp_q_depth = default_depth;
}
else
{
@@ -7069,11 +6667,11 @@ aic7xxx_device_queue_depth(struct aic7xxx_host *p, Scsi_Device *device)
}
else if (aic7xxx_tag_info[p->instance].tag_commands[tindex] == 0)
{
- p->dev_lun_queue_depth[tindex] = default_depth;
+ aic_dev->max_q_depth = aic_dev->temp_q_depth = default_depth;
}
else
{
- p->dev_lun_queue_depth[tindex] =
+ aic_dev->max_q_depth = aic_dev->temp_q_depth =
aic7xxx_tag_info[p->instance].tag_commands[tindex];
}
}
@@ -7083,14 +6681,9 @@ aic7xxx_device_queue_depth(struct aic7xxx_host *p, Scsi_Device *device)
{
printk(INFO_LEAD "Enabled tagged queuing, queue depth %d.\n",
p->host_no, device->channel, device->id,
- device->lun, p->dev_lun_queue_depth[tindex]);
+ device->lun, aic_dev->max_q_depth);
}
- p->dev_max_queue_depth[tindex] = p->dev_lun_queue_depth[tindex];
- p->dev_temp_queue_depth[tindex] = p->dev_lun_queue_depth[tindex];
- p->dev_scbs_needed[tindex] = p->dev_lun_queue_depth[tindex];
- p->tagenable |= target_mask;
- p->orderedtag |= target_mask;
- scsi_adjust_queue_depth(device, 1, p->dev_lun_queue_depth[tindex]);
+ scsi_adjust_queue_depth(device, MSG_ORDERED_TAG, aic_dev->max_q_depth);
}
}
}
@@ -7107,22 +6700,11 @@ aic7xxx_device_queue_depth(struct aic7xxx_host *p, Scsi_Device *device)
void
aic7xxx_slave_detach(Scsi_Device *sdpnt)
{
- struct aic7xxx_host *p = (struct aic7xxx_host *) sdpnt->host->hostdata;
- int lun, tindex;
-
- tindex = sdpnt->id | (sdpnt->channel << 3);
- lun = sdpnt->lun;
- if(p->Scsi_Dev[tindex][lun] == NULL)
- return;
+ struct aic_dev_data *aic_dev = sdpnt->hostdata;
- if(p->tagenable & (1 << tindex))
- {
- p->dev_max_queue_depth[tindex] -= p->dev_lun_queue_depth[tindex];
- if(p->dev_temp_queue_depth[tindex] > p->dev_max_queue_depth[tindex])
- p->dev_temp_queue_depth[tindex] = p->dev_max_queue_depth[tindex];
- }
- p->dev_scbs_needed[tindex] -= p->dev_lun_queue_depth[tindex];
- p->Scsi_Dev[tindex][lun] = NULL;
+ list_del(&aic_dev->list);
+ sdpnt->hostdata = NULL;
+ kfree(aic_dev);
return;
}
@@ -7139,16 +6721,32 @@ int
aic7xxx_slave_attach(Scsi_Device *sdpnt)
{
struct aic7xxx_host *p = (struct aic7xxx_host *) sdpnt->host->hostdata;
- int scbnum, tindex, i;
+ struct aic_dev_data *aic_dev;
+ int scbnum;
+ struct list_head *list_ptr, *list_head;
- tindex = sdpnt->id | (sdpnt->channel << 3);
- p->dev_flags[tindex] |= DEVICE_PRESENT;
+ if(!sdpnt->hostdata) {
+ sdpnt->hostdata = kmalloc(sizeof(struct aic_dev_data), GFP_ATOMIC);
+ if(!sdpnt->hostdata)
+ return 1;
+ memset(sdpnt->hostdata, 0, sizeof(struct aic_dev_data));
+ }
+
+ aic_dev = sdpnt->hostdata;
+ aic_dev->SDptr = sdpnt;
+
+ aic7xxx_init_aic_dev(p, aic_dev);
+
+ list_add(&aic_dev->list, &p->aic_devs);
- p->Scsi_Dev[tindex][sdpnt->lun] = sdpnt;
aic7xxx_device_queue_depth(p, sdpnt);
- for(i = 0, scbnum = 0; i < p->host->max_id; i++)
- scbnum += p->dev_scbs_needed[i];
+ scbnum = 0;
+ list_head = &p->aic_devs;
+ list_for_each(list_ptr, list_head) {
+ aic_dev = list_entry(list_ptr, struct aic_dev_data, list);
+ scbnum += aic_dev->max_q_depth;
+ }
while (scbnum > p->scb_data->numscbs)
{
/*
@@ -7160,72 +6758,6 @@ aic7xxx_slave_attach(Scsi_Device *sdpnt)
break;
}
- /*
- * We only need to check INQUIRY data on one lun of multi lun devices
- * since speed negotiations are not lun specific. Once we've check this
- * particular target id once, the DEVICE_PRESENT flag will be set.
- */
- if (!(p->dev_flags[tindex] & DEVICE_DTR_SCANNED))
- {
- p->dev_flags[tindex] |= DEVICE_DTR_SCANNED;
-
- if ( sdpnt->wdtr && (p->features & AHC_WIDE) )
- {
- p->needwdtr |= (1<<tindex);
- p->needwdtr_copy |= (1<<tindex);
- p->transinfo[tindex].goal_width = p->transinfo[tindex].user_width;
- }
- else
- {
- p->needwdtr &= ~(1<<tindex);
- p->needwdtr_copy &= ~(1<<tindex);
- pause_sequencer(p);
- aic7xxx_set_width(p, sdpnt->id, sdpnt->channel, sdpnt->lun,
- MSG_EXT_WDTR_BUS_8_BIT, (AHC_TRANS_ACTIVE |
- AHC_TRANS_GOAL |
- AHC_TRANS_CUR) );
- unpause_sequencer(p, FALSE);
- }
- if ( sdpnt->sdtr && p->transinfo[tindex].user_offset )
- {
- p->transinfo[tindex].goal_period = p->transinfo[tindex].user_period;
- p->transinfo[tindex].goal_options = p->transinfo[tindex].user_options;
- if (p->features & AHC_ULTRA2)
- p->transinfo[tindex].goal_offset = MAX_OFFSET_ULTRA2;
- else if (p->transinfo[tindex].goal_width == MSG_EXT_WDTR_BUS_16_BIT)
- p->transinfo[tindex].goal_offset = MAX_OFFSET_16BIT;
- else
- p->transinfo[tindex].goal_offset = MAX_OFFSET_8BIT;
- if ( sdpnt->ppr && p->transinfo[tindex].user_period <= 9 &&
- p->transinfo[tindex].user_options )
- {
- p->needppr |= (1<<tindex);
- p->needppr_copy |= (1<<tindex);
- p->needsdtr &= ~(1<<tindex);
- p->needsdtr_copy &= ~(1<<tindex);
- p->needwdtr &= ~(1<<tindex);
- p->needwdtr_copy &= ~(1<<tindex);
- p->dev_flags[tindex] |= DEVICE_SCSI_3;
- }
- else
- {
- p->needsdtr |= (1<<tindex);
- p->needsdtr_copy |= (1<<tindex);
- p->transinfo[tindex].goal_period =
- MAX(10, p->transinfo[tindex].goal_period);
- p->transinfo[tindex].goal_options = 0;
- }
- }
- else
- {
- p->needsdtr &= ~(1<<tindex);
- p->needsdtr_copy &= ~(1<<tindex);
- p->transinfo[tindex].goal_period = 255;
- p->transinfo[tindex].goal_offset = 0;
- p->transinfo[tindex].goal_options = 0;
- }
- p->dev_flags[tindex] |= DEVICE_PRINT_DTR;
- }
return(0);
}
@@ -8347,10 +7879,6 @@ aic7xxx_register(Scsi_Host_Template *template, struct aic7xxx_host *p,
p->completeq.tail = NULL;
scbq_init(&p->scb_data->free_scbs);
scbq_init(&p->waiting_scbs);
- init_timer(&p->dev_timer);
- p->dev_timer.data = (unsigned long)p;
- p->dev_timer.function = (void *)aic7xxx_timer;
- p->dev_timer_active = 0;
/*
* We currently have no commands of any type
@@ -8358,19 +7886,6 @@ aic7xxx_register(Scsi_Host_Template *template, struct aic7xxx_host *p,
p->qinfifonext = 0;
p->qoutfifonext = 0;
- for (i = 0; i < MAX_TARGETS; i++)
- {
- p->dev_commands_sent[i] = 0;
- p->dev_flags[i] = 0;
- p->dev_active_cmds[i] = 0;
- p->dev_last_queue_full[i] = 0;
- p->dev_last_queue_full_count[i] = 0;
- p->dev_max_queue_depth[i] = 1;
- p->dev_temp_queue_depth[i] = 1;
- p->dev_expires[i] = 0;
- scbq_init(&p->delayed_scbs[i]);
- }
-
printk(KERN_INFO "(scsi%d) <%s> found at ", p->host_no,
board_names[p->board_name_index]);
switch(p->chip)
@@ -8590,22 +8105,6 @@ aic7xxx_register(Scsi_Host_Template *template, struct aic7xxx_host *p,
{
/*
* If we reset the bus, then clear the transfer settings, else leave
- * them be
- */
- for (i = 0; i < max_targets; i++)
- {
- aic_outb(p, 0, TARG_SCSIRATE + i);
- if (p->features & AHC_ULTRA2)
- {
- aic_outb(p, 0, TARG_OFFSET + i);
- }
- p->transinfo[i].cur_offset = 0;
- p->transinfo[i].cur_period = 0;
- p->transinfo[i].cur_width = MSG_EXT_WDTR_BUS_8_BIT;
- }
-
- /*
- * If we reset the bus, then clear the transfer settings, else leave
* them be.
*/
aic_outb(p, 0, ULTRA_ENB);
@@ -8776,19 +8275,6 @@ aic7xxx_register(Scsi_Host_Template *template, struct aic7xxx_host *p,
aic7xxx_reset_current_bus(p);
- /*
- * Delay for the reset delay by setting the timer, this will delay
- * future commands sent to any devices.
- */
- p->flags |= AHC_RESET_DELAY;
- for(i=0; i<MAX_TARGETS; i++)
- {
- p->dev_expires[i] = jiffies + (4 * HZ);
- p->dev_timer_active |= (0x01 << i);
- }
- p->dev_timer.expires = p->dev_expires[p->scsi_id];
- add_timer(&p->dev_timer);
- p->dev_timer_active |= (0x01 << MAX_TARGETS);
}
else
{
@@ -8905,7 +8391,6 @@ aic7xxx_alloc(Scsi_Host_Template *sht, struct aic7xxx_host *temp)
{
struct aic7xxx_host *p = NULL;
struct Scsi_Host *host;
- int i;
/*
* Allocate a storage area by registering us with the mid-level
@@ -8919,7 +8404,6 @@ aic7xxx_alloc(Scsi_Host_Template *sht, struct aic7xxx_host *temp)
memset(p, 0, sizeof(struct aic7xxx_host));
*p = *temp;
p->host = host;
- host->max_sectors = 512;
p->scb_data = kmalloc(sizeof(scb_data_type), GFP_ATOMIC);
if (p->scb_data != NULL)
@@ -8938,16 +8422,6 @@ aic7xxx_alloc(Scsi_Host_Template *sht, struct aic7xxx_host *temp)
return(NULL);
}
p->host_no = host->host_no;
- p->tagenable = 0;
- p->orderedtag = 0;
- for (i=0; i<MAX_TARGETS; i++)
- {
- p->transinfo[i].goal_period = 255;
- p->transinfo[i].goal_offset = 0;
- p->transinfo[i].goal_options = 0;
- p->transinfo[i].goal_width = MSG_EXT_WDTR_BUS_8_BIT;
- }
- DRIVER_LOCK_INIT
}
scsi_set_pci_device(host, p->pdev);
return (p);
@@ -9163,7 +8637,7 @@ aic7xxx_load_seeprom(struct aic7xxx_host *p, unsigned char *sxfrctl1)
}
p->discenable = 0;
-
+
/*
* Limit to 16 targets just in case. The 2842 for one is known to
* blow the max_targets setting, future cards might also.
@@ -9197,11 +8671,11 @@ aic7xxx_load_seeprom(struct aic7xxx_host *p, unsigned char *sxfrctl1)
* Just make our sc->device_flags[i] entry match what the card has
* set for this device.
*/
- p->discenable =
- ~(aic_inb(p, DISC_DSB) | (aic_inb(p, DISC_DSB + 1) << 8) );
+ p->discenable =
+ ~(aic_inb(p, DISC_DSB) | (aic_inb(p, DISC_DSB + 1) << 8) );
p->ultraenb =
(aic_inb(p, ULTRA_ENB) | (aic_inb(p, ULTRA_ENB + 1) << 8) );
- sc->device_flags[i] = (p->discenable & mask) ? CFDISC : 0;
+ sc->device_flags[i] = (p->discenable & mask) ? CFDISC : 0;
if (aic_inb(p, TARG_SCSIRATE + i) & WIDEXFER)
sc->device_flags[i] |= CFWIDEB;
if (p->features & AHC_ULTRA2)
@@ -9291,110 +8765,69 @@ aic7xxx_load_seeprom(struct aic7xxx_host *p, unsigned char *sxfrctl1)
{
sc->device_flags[i] &= ~CFXFER;
p->ultraenb &= ~mask;
- p->transinfo[i].user_offset = 0;
- p->transinfo[i].user_period = 0;
- p->transinfo[i].user_options = 0;
- p->transinfo[i].cur_offset = 0;
- p->transinfo[i].cur_period = 0;
- p->transinfo[i].cur_options = 0;
- p->needsdtr_copy &= ~mask;
+ p->user[i].offset = 0;
+ p->user[i].period = 0;
+ p->user[i].options = 0;
}
else
{
if (p->features & AHC_ULTRA3)
{
- p->transinfo[i].user_offset = MAX_OFFSET_ULTRA2;
- p->transinfo[i].cur_offset = aic_inb(p, TARG_OFFSET + i);
+ p->user[i].offset = MAX_OFFSET_ULTRA2;
if( (sc->device_flags[i] & CFXFER) < 0x03 )
{
scsirate = (sc->device_flags[i] & CFXFER);
- p->transinfo[i].user_options = MSG_EXT_PPR_OPTION_DT_CRC;
- if( (aic_inb(p, TARG_SCSIRATE + i) & CFXFER) < 0x03 )
- {
- p->transinfo[i].cur_options =
- ((aic_inb(p, TARG_SCSIRATE + i) & 0x40) ?
- MSG_EXT_PPR_OPTION_DT_CRC : MSG_EXT_PPR_OPTION_DT_UNITS);
- }
- else
- {
- p->transinfo[i].cur_options = 0;
- }
+ p->user[i].options = MSG_EXT_PPR_OPTION_DT_CRC;
}
else
{
scsirate = (sc->device_flags[i] & CFXFER) |
((p->ultraenb & mask) ? 0x18 : 0x10);
- p->transinfo[i].user_options = 0;
- p->transinfo[i].cur_options = 0;
+ p->user[i].options = 0;
}
- p->transinfo[i].user_period = aic7xxx_find_period(p, scsirate,
- AHC_SYNCRATE_ULTRA3);
- p->transinfo[i].cur_period = aic7xxx_find_period(p,
- aic_inb(p, TARG_SCSIRATE + i),
+ p->user[i].period = aic7xxx_find_period(p, scsirate,
AHC_SYNCRATE_ULTRA3);
}
else if (p->features & AHC_ULTRA2)
{
- p->transinfo[i].user_offset = MAX_OFFSET_ULTRA2;
- p->transinfo[i].cur_offset = aic_inb(p, TARG_OFFSET + i);
+ p->user[i].offset = MAX_OFFSET_ULTRA2;
scsirate = (sc->device_flags[i] & CFXFER) |
((p->ultraenb & mask) ? 0x18 : 0x10);
- p->transinfo[i].user_options = 0;
- p->transinfo[i].cur_options = 0;
- p->transinfo[i].user_period = aic7xxx_find_period(p, scsirate,
- AHC_SYNCRATE_ULTRA2);
- p->transinfo[i].cur_period = aic7xxx_find_period(p,
- aic_inb(p, TARG_SCSIRATE + i),
+ p->user[i].options = 0;
+ p->user[i].period = aic7xxx_find_period(p, scsirate,
AHC_SYNCRATE_ULTRA2);
}
else
{
scsirate = (sc->device_flags[i] & CFXFER) << 4;
- p->transinfo[i].user_options = 0;
- p->transinfo[i].cur_options = 0;
- p->transinfo[i].user_offset = MAX_OFFSET_8BIT;
+ p->user[i].options = 0;
+ p->user[i].offset = MAX_OFFSET_8BIT;
if (p->features & AHC_ULTRA)
{
short ultraenb;
ultraenb = aic_inb(p, ULTRA_ENB) |
(aic_inb(p, ULTRA_ENB + 1) << 8);
- p->transinfo[i].user_period = aic7xxx_find_period(p,
- scsirate,
+ p->user[i].period = aic7xxx_find_period(p, scsirate,
(p->ultraenb & mask) ?
AHC_SYNCRATE_ULTRA :
AHC_SYNCRATE_FAST);
- p->transinfo[i].cur_period = aic7xxx_find_period(p,
- aic_inb(p, TARG_SCSIRATE + i),
- (ultraenb & mask) ?
- AHC_SYNCRATE_ULTRA :
- AHC_SYNCRATE_FAST);
}
else
- p->transinfo[i].user_period = aic7xxx_find_period(p,
- scsirate, AHC_SYNCRATE_FAST);
+ p->user[i].period = aic7xxx_find_period(p, scsirate,
+ AHC_SYNCRATE_FAST);
}
- p->needsdtr_copy |= mask;
}
if ( (sc->device_flags[i] & CFWIDEB) && (p->features & AHC_WIDE) )
{
- p->transinfo[i].user_width = MSG_EXT_WDTR_BUS_16_BIT;
- p->needwdtr_copy |= mask;
+ p->user[i].width = MSG_EXT_WDTR_BUS_16_BIT;
}
else
{
- p->transinfo[i].user_width = MSG_EXT_WDTR_BUS_8_BIT;
- p->needwdtr_copy &= ~mask;
+ p->user[i].width = MSG_EXT_WDTR_BUS_8_BIT;
}
- p->transinfo[i].cur_width =
- (aic_inb(p, TARG_SCSIRATE + i) & WIDEXFER) ?
- MSG_EXT_WDTR_BUS_16_BIT : MSG_EXT_WDTR_BUS_8_BIT;
}
aic_outb(p, ~(p->discenable & 0xFF), DISC_DSB);
aic_outb(p, ~((p->discenable >> 8) & 0xFF), DISC_DSB + 1);
- p->needppr = p->needppr_copy = 0;
- p->needwdtr = p->needwdtr_copy;
- p->needsdtr = p->needsdtr_copy;
- p->dtr_pending = 0;
/*
* We set the p->ultraenb from the SEEPROM to begin with, but now we make
@@ -9560,17 +8993,11 @@ aic7xxx_detect(Scsi_Host_Template *template)
*/
if(aic7xxx)
aic7xxx_setup(aic7xxx);
- if(dummy_buffer[0] != 'P')
- printk(KERN_WARNING "aic7xxx: Please read the file /usr/src/linux/drivers"
- "/scsi/README.aic7xxx\n"
- "aic7xxx: to see the proper way to specify options to the aic7xxx "
- "module\n"
- "aic7xxx: Specifically, don't use any commas when passing arguments to\n"
- "aic7xxx: insmod or else it might trash certain memory areas.\n");
#endif
template->proc_name = "aic7xxx";
template->sg_tablesize = AIC7XXX_MAX_SG;
+ template->max_sectors = 2048;
#ifdef CONFIG_PCI
@@ -10728,6 +10155,8 @@ aic7xxx_buildscb(struct aic7xxx_host *p, Scsi_Cmnd *cmd,
{
unsigned short mask;
struct aic7xxx_hwscb *hscb;
+ struct aic_dev_data *aic_dev = cmd->device->hostdata;
+ struct scsi_device *sdptr = cmd->device;
unsigned char tindex = TARGET_INDEX(cmd);
mask = (0x01 << tindex);
@@ -10743,50 +10172,40 @@ aic7xxx_buildscb(struct aic7xxx_host *p, Scsi_Cmnd *cmd,
if (p->discenable & mask)
{
hscb->control |= DISCENB;
- if ( (p->tagenable & mask) &&
+ if ( (sdptr->simple_tags) &&
(cmd->cmnd[0] != TEST_UNIT_READY) )
{
- p->dev_commands_sent[tindex]++;
- if (p->dev_commands_sent[tindex] < 200)
+ aic_dev->cmds_sent++;
+ if (aic_dev->cmds_sent >= 254 && sdptr->ordered_tags)
{
- hscb->control |= MSG_SIMPLE_Q_TAG;
- scb->tag_action = MSG_SIMPLE_Q_TAG;
+ aic_dev->cmds_sent = 0;
+ hscb->control |= MSG_ORDERED_Q_TAG;
+ scb->tag_action = MSG_ORDERED_Q_TAG;
}
else
{
- if (p->orderedtag & mask)
- {
- hscb->control |= MSG_ORDERED_Q_TAG;
- scb->tag_action = MSG_ORDERED_Q_TAG;
- }
- else
- {
- hscb->control |= MSG_SIMPLE_Q_TAG;
- scb->tag_action = MSG_SIMPLE_Q_TAG;
- }
- p->dev_commands_sent[tindex] = 0;
+ hscb->control |= MSG_SIMPLE_Q_TAG;
+ scb->tag_action = MSG_SIMPLE_Q_TAG;
}
}
}
- if ( !(p->dtr_pending & mask) &&
- ( (p->needppr & mask) ||
- (p->needwdtr & mask) ||
- (p->needsdtr & mask) ) &&
- (p->dev_flags[tindex] & DEVICE_DTR_SCANNED) )
+ if ( !(aic_dev->dtr_pending) &&
+ (aic_dev->needppr || aic_dev->needwdtr || aic_dev->needsdtr) &&
+ (aic_dev->flags & DEVICE_DTR_SCANNED) )
{
- p->dtr_pending |= mask;
+ aic_dev->dtr_pending = 1;
scb->tag_action = 0;
hscb->control &= DISCENB;
hscb->control |= MK_MESSAGE;
- if(p->needppr & mask)
+ if(aic_dev->needppr)
{
scb->flags |= SCB_MSGOUT_PPR;
}
- else if(p->needwdtr & mask)
+ else if(aic_dev->needwdtr)
{
scb->flags |= SCB_MSGOUT_WDTR;
}
- else if(p->needsdtr & mask)
+ else if(aic_dev->needsdtr)
{
scb->flags |= SCB_MSGOUT_SDTR;
}
@@ -10887,6 +10306,7 @@ aic7xxx_queue(Scsi_Cmnd *cmd, void (*fn)(Scsi_Cmnd *))
{
struct aic7xxx_host *p;
struct aic7xxx_scb *scb;
+ struct aic_dev_data *aic_dev;
#ifdef AIC7XXX_VERBOSE_DEBUGGING
int tindex = TARGET_INDEX(cmd);
#endif
@@ -10929,34 +10349,25 @@ aic7xxx_queue(Scsi_Cmnd *cmd, void (*fn)(Scsi_Cmnd *))
}
#endif
+ if(!cmd->device->hostdata) {
+ aic_dev = kmalloc(sizeof(struct aic_dev_data), GFP_ATOMIC);
+ if(!aic_dev)
+ return 1;
+ memset(aic_dev, 0, sizeof(struct aic_dev_data));
+ cmd->device->hostdata = aic_dev;
+ }
+
scb = scbq_remove_head(&p->scb_data->free_scbs);
if (scb == NULL)
{
- DRIVER_LOCK
aic7xxx_allocate_scb(p);
- DRIVER_UNLOCK
scb = scbq_remove_head(&p->scb_data->free_scbs);
if(scb == NULL)
+ {
printk(WARN_LEAD "Couldn't get a free SCB.\n", p->host_no,
CTL_OF_CMD(cmd));
- }
- while (scb == NULL)
- {
- /*
- * Well, all SCBs are currently active on the bus. So, we spin here
- * running the interrupt handler until one completes and becomes free.
- * We can do this safely because we either A) hold the driver lock (in
- * 2.0 kernels) or we have the io_request_lock held (in 2.2 and later
- * kernels) and so either way, we won't take any other interrupts and
- * the queue path will block until we release it. Also, we would worry
- * about running the completion queues, but obviously there are plenty
- * of commands outstanding to trigger a later interrupt that will do
- * that for us, so skip it here.
- */
- DRIVER_LOCK
- aic7xxx_isr(p->irq, p, NULL);
- DRIVER_UNLOCK
- scb = scbq_remove_head(&p->scb_data->free_scbs);
+ return 1;
+ }
}
scb->cmd = cmd;
aic7xxx_position(cmd) = scb->hscb->tag;
@@ -10981,13 +10392,11 @@ aic7xxx_queue(Scsi_Cmnd *cmd, void (*fn)(Scsi_Cmnd *))
scb->flags |= SCB_ACTIVE | SCB_WAITINGQ;
- DRIVER_LOCK
scbq_insert_tail(&p->waiting_scbs, scb);
if ( (p->flags & (AHC_IN_ISR | AHC_IN_ABORT | AHC_IN_RESET)) == 0)
{
aic7xxx_run_waiting_queues(p);
}
- DRIVER_UNLOCK
return (0);
}
@@ -11012,6 +10421,7 @@ aic7xxx_bus_device_reset(struct aic7xxx_host *p, Scsi_Cmnd *cmd)
unsigned char saved_scbptr, lastphase;
unsigned char hscb_index;
int disconnected;
+ struct aic_dev_data *aic_dev = AIC_DEV(cmd);
scb = (p->scb_data->scb_array[aic7xxx_position(cmd)]);
hscb = scb->hscb;
@@ -11095,9 +10505,8 @@ aic7xxx_bus_device_reset(struct aic7xxx_host *p, Scsi_Cmnd *cmd)
printk(INFO_LEAD "Device reset message in "
"message buffer\n", p->host_no, CTL_OF_SCB(scb));
scb->flags |= SCB_RESET | SCB_DEVICE_RESET;
- aic7xxx_error(scb->cmd) = DID_RESET;
- p->dev_flags[TARGET_INDEX(scb->cmd)] |=
- BUS_DEVICE_RESET_PENDING;
+ aic7xxx_error(cmd) = DID_RESET;
+ aic_dev->flags |= BUS_DEVICE_RESET_PENDING;
/* Send the abort message to the active SCB. */
aic_outb(p, HOST_MSG, MSG_OUT);
aic_outb(p, lastphase | ATNO, SCSISIGO);
@@ -11115,9 +10524,8 @@ aic7xxx_bus_device_reset(struct aic7xxx_host *p, Scsi_Cmnd *cmd)
printk(WARN_LEAD "Device reset, Message buffer "
"in use\n", p->host_no, CTL_OF_SCB(scb));
scb->flags |= SCB_RESET | SCB_DEVICE_RESET;
- aic7xxx_error(scb->cmd) = DID_RESET;
- p->dev_flags[TARGET_INDEX(scb->cmd)] |=
- BUS_DEVICE_RESET_PENDING;
+ aic7xxx_error(cmd) = DID_RESET;
+ aic_dev->flags |= BUS_DEVICE_RESET_PENDING;
return(SCSI_RESET_ERROR);
}
}
@@ -11143,8 +10551,7 @@ aic7xxx_bus_device_reset(struct aic7xxx_host *p, Scsi_Cmnd *cmd)
*/
scb->hscb->control |= MK_MESSAGE;
scb->flags |= SCB_RESET | SCB_DEVICE_RESET;
- p->dev_flags[TARGET_INDEX(scb->cmd)] |=
- BUS_DEVICE_RESET_PENDING;
+ aic_dev->flags |= BUS_DEVICE_RESET_PENDING;
if (hscb_index != SCB_LIST_NULL)
{
unsigned char scb_control;
@@ -11224,11 +10631,9 @@ aic7xxx_abort(Scsi_Cmnd *cmd)
{
struct aic7xxx_scb *scb = NULL;
struct aic7xxx_host *p;
+ struct aic_dev_data *aic_dev = AIC_DEV(cmd);
int result, found=0;
unsigned char tmp_char, saved_hscbptr, next_hscbptr, prev_hscbptr;
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,1,95)
- unsigned long cpu_flags = 0;
-#endif
p = (struct aic7xxx_host *) cmd->host->hostdata;
scb = (p->scb_data->scb_array[aic7xxx_position(cmd)]);
@@ -11244,8 +10649,6 @@ aic7xxx_abort(Scsi_Cmnd *cmd)
if (aic7xxx_panic_on_abort)
aic7xxx_panic_abort(p, cmd);
- DRIVER_LOCK
-
/*
* Run the isr to grab any command in the QOUTFIFO and any other misc.
* assundry tasks. This should also set up the bh handler if there is
@@ -11267,7 +10670,6 @@ aic7xxx_abort(Scsi_Cmnd *cmd)
printk(INFO_LEAD "Abort called with bogus Scsi_Cmnd "
"pointer.\n", p->host_no, CTL_OF_CMD(cmd));
unpause_sequencer(p, FALSE);
- DRIVER_UNLOCK
return(SCSI_ABORT_NOT_RUNNING);
}
if (scb->cmd != cmd) /* Hmmm...either this SCB is currently free with a */
@@ -11285,7 +10687,6 @@ aic7xxx_abort(Scsi_Cmnd *cmd)
printk(INFO_LEAD "Abort called for already completed"
" command.\n", p->host_no, CTL_OF_CMD(cmd));
unpause_sequencer(p, FALSE);
- DRIVER_UNLOCK
return(SCSI_ABORT_NOT_RUNNING);
}
@@ -11320,18 +10721,15 @@ aic7xxx_abort(Scsi_Cmnd *cmd)
printk(INFO_LEAD "SCB aborted once already, "
"escalating.\n", p->host_no, CTL_OF_SCB(scb));
unpause_sequencer(p, FALSE);
- DRIVER_UNLOCK
return(SCSI_ABORT_SNOOZE);
}
if ( (p->flags & (AHC_RESET_PENDING | AHC_ABORT_PENDING)) ||
- (p->dev_flags[TARGET_INDEX(scb->cmd)] &
- BUS_DEVICE_RESET_PENDING) )
+ (aic_dev->flags & BUS_DEVICE_RESET_PENDING) )
{
if (aic7xxx_verbose & VERBOSE_ABORT_PROCESS)
printk(INFO_LEAD "Reset/Abort pending for this "
"device, not wasting our time.\n", p->host_no, CTL_OF_SCB(scb));
unpause_sequencer(p, FALSE);
- DRIVER_UNLOCK
return(SCSI_ABORT_PENDING);
}
@@ -11393,7 +10791,6 @@ aic7xxx_abort(Scsi_Cmnd *cmd)
p->flags &= ~AHC_IN_ABORT;
scb->flags |= SCB_RECOVERY_SCB; /* Note the fact that we've been */
p->flags |= AHC_ABORT_PENDING; /* here so we will know not to */
- DRIVER_UNLOCK /* muck with other SCBs if this */
return(SCSI_ABORT_PENDING); /* one doesn't complete and clear */
break; /* out. */
default:
@@ -11403,14 +10800,13 @@ aic7xxx_abort(Scsi_Cmnd *cmd)
if ((found == 0) && (scb->flags & SCB_WAITINGQ))
{
- int tindex = TARGET_INDEX(cmd);
if (aic7xxx_verbose & VERBOSE_ABORT_PROCESS)
printk(INFO_LEAD "SCB found on waiting list and "
"aborted.\n", p->host_no, CTL_OF_SCB(scb));
scbq_remove(&p->waiting_scbs, scb);
- scbq_remove(&p->delayed_scbs[tindex], scb);
- p->dev_active_cmds[tindex]++;
+ scbq_remove(&aic_dev->delayed_scbs, scb);
+ aic_dev->active_cmds++;
p->activescbs++;
scb->flags &= ~(SCB_WAITINGQ | SCB_ACTIVE);
scb->flags |= SCB_ABORT | SCB_QUEUED_FOR_DONE;
@@ -11519,7 +10915,6 @@ aic7xxx_abort(Scsi_Cmnd *cmd)
}
p->flags &= ~AHC_IN_ABORT;
unpause_sequencer(p, FALSE);
- DRIVER_UNLOCK
/*
* On the return value. If we found the command and aborted it, then we know
@@ -11553,11 +10948,9 @@ aic7xxx_reset(Scsi_Cmnd *cmd, unsigned int flags)
{
struct aic7xxx_scb *scb = NULL;
struct aic7xxx_host *p;
+ struct aic_dev_data *aic_dev;
int tindex;
int result = -1;
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,1,95)
- unsigned long cpu_flags = 0;
-#endif
#define DEVICE_RESET 0x01
#define BUS_RESET 0x02
#define HOST_RESET 0x04
@@ -11573,7 +10966,15 @@ aic7xxx_reset(Scsi_Cmnd *cmd, unsigned int flags)
}
p = (struct aic7xxx_host *) cmd->host->hostdata;
- scb = (p->scb_data->scb_array[aic7xxx_position(cmd)]);
+ aic_dev = AIC_DEV(cmd);
+ if(aic_dev)
+ {
+ scb = (p->scb_data->scb_array[aic7xxx_position(cmd)]);
+ }
+ else
+ {
+ scb = NULL;
+ }
tindex = TARGET_INDEX(cmd);
/*
@@ -11587,8 +10988,6 @@ aic7xxx_reset(Scsi_Cmnd *cmd, unsigned int flags)
if (aic7xxx_panic_on_abort)
aic7xxx_panic_abort(p, cmd);
- DRIVER_LOCK
-
pause_sequencer(p);
if(flags & SCSI_RESET_SYNCHRONOUS)
@@ -11608,7 +11007,6 @@ aic7xxx_reset(Scsi_Cmnd *cmd, unsigned int flags)
aic7xxx_done_cmds_complete(p);
aic7xxx_run_waiting_queues(p);
unpause_sequencer(p, FALSE);
- DRIVER_UNLOCK
return(SCSI_RESET_NOT_RUNNING);
}
else
@@ -11645,12 +11043,11 @@ aic7xxx_reset(Scsi_Cmnd *cmd, unsigned int flags)
*/
aic7xxx_run_waiting_queues(p);
unpause_sequencer(p, FALSE);
- DRIVER_UNLOCK
return(SCSI_RESET_SUCCESS);
}
if ( (action & DEVICE_RESET) &&
- (p->dev_flags[tindex] & BUS_DEVICE_RESET_PENDING) )
+ (aic_dev->flags & BUS_DEVICE_RESET_PENDING) )
{
if (aic7xxx_verbose & VERBOSE_RESET_PROCESS)
printk(INFO_LEAD "Bus device reset already sent to "
@@ -11681,7 +11078,7 @@ aic7xxx_reset(Scsi_Cmnd *cmd, unsigned int flags)
{
action = HOST_RESET;
}
- if ( (p->dev_flags[tindex] & DEVICE_RESET_DELAY) &&
+ if ( (aic_dev->flags & DEVICE_RESET_DELAY) &&
!(action & (HOST_RESET | BUS_RESET)))
{
if (aic7xxx_verbose & VERBOSE_RESET_PROCESS)
@@ -11710,7 +11107,6 @@ aic7xxx_reset(Scsi_Cmnd *cmd, unsigned int flags)
case RESET_DELAY:
aic7xxx_run_waiting_queues(p);
unpause_sequencer(p, FALSE);
- DRIVER_UNLOCK
if(scb == NULL)
return(SCSI_RESET_PUNT);
else
@@ -11725,21 +11121,12 @@ aic7xxx_reset(Scsi_Cmnd *cmd, unsigned int flags)
aic7xxx_run_waiting_queues(p);
unpause_sequencer(p, FALSE);
p->flags &= ~AHC_IN_RESET;
- DRIVER_UNLOCK
return(result);
break;
case BUS_RESET:
case HOST_RESET:
default:
p->flags |= AHC_IN_RESET | AHC_RESET_DELAY;
- p->dev_expires[p->scsi_id] = jiffies + (1 * HZ);
- p->dev_timer_active |= (0x01 << p->scsi_id);
- if ( !(p->dev_timer_active & (0x01 << MAX_TARGETS)) ||
- time_after_eq(p->dev_timer.expires, p->dev_expires[p->scsi_id]) )
- {
- mod_timer(&p->dev_timer, p->dev_expires[p->scsi_id]);
- p->dev_timer_active |= (0x01 << MAX_TARGETS);
- }
aic7xxx_reset_channel(p, cmd->channel, TRUE);
if ( (p->features & AHC_TWIN) && (action & HOST_RESET) )
{
@@ -11776,7 +11163,6 @@ aic7xxx_reset(Scsi_Cmnd *cmd, unsigned int flags)
*/
aic7xxx_run_waiting_queues(p);
unpause_sequencer(p, FALSE);
- DRIVER_UNLOCK
if(scb == NULL)
return(SCSI_RESET_SUCCESS|SCSI_RESET_HOST_RESET);
else
diff --git a/drivers/scsi/aic7xxx_old/aic7xxx.h b/drivers/scsi/aic7xxx_old/aic7xxx.h
index 80a44cd58213..4d9ba148d0d5 100644
--- a/drivers/scsi/aic7xxx_old/aic7xxx.h
+++ b/drivers/scsi/aic7xxx_old/aic7xxx.h
@@ -46,7 +46,6 @@
eh_host_reset_handler: NULL, \
abort: aic7xxx_abort, \
reset: aic7xxx_reset, \
- select_queue_depths: NULL, \
slave_attach: aic7xxx_slave_attach, \
slave_detach: aic7xxx_slave_detach, \
bios_param: aic7xxx_biosparam, \
diff --git a/drivers/scsi/aic7xxx_old/aic7xxx_proc.c b/drivers/scsi/aic7xxx_old/aic7xxx_proc.c
index 14dcd389dad1..886b84a21641 100644
--- a/drivers/scsi/aic7xxx_old/aic7xxx_proc.c
+++ b/drivers/scsi/aic7xxx_old/aic7xxx_proc.c
@@ -33,7 +33,7 @@
#define BLS (&aic7xxx_buffer[size])
#define HDRB \
-" < 2K 2K+ 4K+ 8K+ 16K+ 32K+ 64K+ 128K+"
+" 0 - 4K 4 - 16K 16 - 64K 64 - 256K 256K - 1M 1M+"
#ifdef PROC_DEBUG
extern int vsprintf(char *, const char *, va_list);
@@ -85,10 +85,12 @@ aic7xxx_proc_info ( char *buffer, char **start, off_t offset, int length,
{
struct Scsi_Host *HBAptr;
struct aic7xxx_host *p;
+ struct aic_dev_data *aic_dev;
+ struct scsi_device *sdptr;
int size = 0;
unsigned char i;
- struct aic7xxx_xferstats *sp;
- unsigned char target;
+ unsigned char tindex;
+ struct list_head *list_item;
HBAptr = NULL;
@@ -130,14 +132,9 @@ aic7xxx_proc_info ( char *buffer, char **start, off_t offset, int length,
*/
size = 4096;
- for (target = 0; target < MAX_TARGETS; target++)
+ list_for_each(list_item, &p->aic_devs)
{
- if (p->dev_flags[target] & DEVICE_PRESENT)
-#ifdef AIC7XXX_PROC_STATS
- size += 512;
-#else
- size += 256;
-#endif
+ size += 512;
}
if (aic7xxx_buffer_size != size)
{
@@ -167,11 +164,6 @@ aic7xxx_proc_info ( char *buffer, char **start, off_t offset, int length,
#else
size += sprintf(BLS, " TCQ Enabled By Default : Disabled\n");
#endif
-#ifdef AIC7XXX_PROC_STATS
- size += sprintf(BLS, " AIC7XXX_PROC_STATS : Enabled\n");
-#else
- size += sprintf(BLS, " AIC7XXX_PROC_STATS : Disabled\n");
-#endif
size += sprintf(BLS, "\n");
size += sprintf(BLS, "Adapter Configuration:\n");
size += sprintf(BLS, " SCSI Adapter: %s\n",
@@ -271,8 +263,6 @@ aic7xxx_proc_info ( char *buffer, char **start, off_t offset, int length,
{
size += sprintf(BLS, " Ultra Enable Flags: 0x%04x\n", p->ultraenb);
}
- size += sprintf(BLS, " Tag Queue Enable Flags: 0x%04x\n", p->tagenable);
- size += sprintf(BLS, "Ordered Queue Tag Flags: 0x%04x\n", p->orderedtag);
size += sprintf(BLS, "Default Tag Queue Depth: %d\n", AIC7XXX_CMDS_PER_DEVICE);
size += sprintf(BLS, " Tagged Queue By Device array for aic7xxx host "
"instance %d:\n", p->instance);
@@ -280,43 +270,27 @@ aic7xxx_proc_info ( char *buffer, char **start, off_t offset, int length,
for(i=0; i < (MAX_TARGETS - 1); i++)
size += sprintf(BLS, "%d,",aic7xxx_tag_info[p->instance].tag_commands[i]);
size += sprintf(BLS, "%d}\n",aic7xxx_tag_info[p->instance].tag_commands[i]);
- size += sprintf(BLS, " Actual queue depth per device for aic7xxx host "
- "instance %d:\n", p->instance);
- size += sprintf(BLS, " {");
- for(i=0; i < (MAX_TARGETS - 1); i++)
- size += sprintf(BLS, "%d,", p->dev_max_queue_depth[i]);
- size += sprintf(BLS, "%d}\n", p->dev_max_queue_depth[i]);
size += sprintf(BLS, "\n");
size += sprintf(BLS, "Statistics:\n\n");
- for (target = 0; target < MAX_TARGETS; target++)
+ list_for_each(list_item, &p->aic_devs)
{
- sp = &p->stats[target];
- if ((p->dev_flags[target] & DEVICE_PRESENT) == 0)
- {
- continue;
- }
- if (p->features & AHC_TWIN)
- {
- size += sprintf(BLS, "(scsi%d:%d:%d:%d)\n",
- p->host_no, (target >> 3), (target & 0x7), 0);
- }
- else
- {
- size += sprintf(BLS, "(scsi%d:%d:%d:%d)\n",
- p->host_no, 0, target, 0);
- }
+ aic_dev = list_entry(list_item, struct aic_dev_data, list);
+ sdptr = aic_dev->SDptr;
+ tindex = sdptr->channel << 3 | sdptr->id;
+ size += sprintf(BLS, "(scsi%d:%d:%d:%d)\n",
+ p->host_no, sdptr->channel, sdptr->id, sdptr->lun);
size += sprintf(BLS, " Device using %s/%s",
- (p->transinfo[target].cur_width == MSG_EXT_WDTR_BUS_16_BIT) ?
+ (aic_dev->cur.width == MSG_EXT_WDTR_BUS_16_BIT) ?
"Wide" : "Narrow",
- (p->transinfo[target].cur_offset != 0) ?
+ (aic_dev->cur.offset != 0) ?
"Sync transfers at " : "Async transfers.\n" );
- if (p->transinfo[target].cur_offset != 0)
+ if (aic_dev->cur.offset != 0)
{
struct aic7xxx_syncrate *sync_rate;
- unsigned char options = p->transinfo[target].cur_options;
- int period = p->transinfo[target].cur_period;
- int rate = (p->transinfo[target].cur_width ==
+ unsigned char options = aic_dev->cur.options;
+ int period = aic_dev->cur.period;
+ int rate = (aic_dev->cur.width ==
MSG_EXT_WDTR_BUS_16_BIT) ? 1 : 0;
sync_rate = aic7xxx_find_syncrate(p, &period, 0, &options);
@@ -324,50 +298,45 @@ aic7xxx_proc_info ( char *buffer, char **start, off_t offset, int length,
{
size += sprintf(BLS, "%s MByte/sec, offset %d\n",
sync_rate->rate[rate],
- p->transinfo[target].cur_offset );
+ aic_dev->cur.offset );
}
else
{
size += sprintf(BLS, "3.3 MByte/sec, offset %d\n",
- p->transinfo[target].cur_offset );
+ aic_dev->cur.offset );
}
}
size += sprintf(BLS, " Transinfo settings: ");
size += sprintf(BLS, "current(%d/%d/%d/%d), ",
- p->transinfo[target].cur_period,
- p->transinfo[target].cur_offset,
- p->transinfo[target].cur_width,
- p->transinfo[target].cur_options);
+ aic_dev->cur.period,
+ aic_dev->cur.offset,
+ aic_dev->cur.width,
+ aic_dev->cur.options);
size += sprintf(BLS, "goal(%d/%d/%d/%d), ",
- p->transinfo[target].goal_period,
- p->transinfo[target].goal_offset,
- p->transinfo[target].goal_width,
- p->transinfo[target].goal_options);
+ aic_dev->goal.period,
+ aic_dev->goal.offset,
+ aic_dev->goal.width,
+ aic_dev->goal.options);
size += sprintf(BLS, "user(%d/%d/%d/%d)\n",
- p->transinfo[target].user_period,
- p->transinfo[target].user_offset,
- p->transinfo[target].user_width,
- p->transinfo[target].user_options);
-#ifdef AIC7XXX_PROC_STATS
+ p->user[tindex].period,
+ p->user[tindex].offset,
+ p->user[tindex].width,
+ p->user[tindex].options);
size += sprintf(BLS, " Total transfers %ld (%ld reads and %ld writes)\n",
- sp->r_total + sp->w_total, sp->r_total, sp->w_total);
+ aic_dev->r_total + aic_dev->w_total, aic_dev->r_total, aic_dev->w_total);
size += sprintf(BLS, "%s\n", HDRB);
size += sprintf(BLS, " Reads:");
- for (i = 0; i < NUMBER(sp->r_bins); i++)
+ for (i = 0; i < NUMBER(aic_dev->r_bins); i++)
{
- size += sprintf(BLS, " %7ld", sp->r_bins[i]);
+ size += sprintf(BLS, " %7ld", aic_dev->r_bins[i]);
}
size += sprintf(BLS, "\n");
size += sprintf(BLS, " Writes:");
- for (i = 0; i < NUMBER(sp->w_bins); i++)
+ for (i = 0; i < NUMBER(aic_dev->w_bins); i++)
{
- size += sprintf(BLS, " %7ld", sp->w_bins[i]);
+ size += sprintf(BLS, " %7ld", aic_dev->w_bins[i]);
}
size += sprintf(BLS, "\n");
-#else
- size += sprintf(BLS, " Total transfers %ld (%ld reads and %ld writes)\n",
- sp->r_total + sp->w_total, sp->r_total, sp->w_total);
-#endif /* AIC7XXX_PROC_STATS */
size += sprintf(BLS, "\n\n");
}
diff --git a/drivers/scsi/esp.c b/drivers/scsi/esp.c
index a4a6e021fd7f..3487094d0730 100644
--- a/drivers/scsi/esp.c
+++ b/drivers/scsi/esp.c
@@ -1362,17 +1362,19 @@ static int esp_host_info(struct esp *esp, char *ptr, off_t offset, int len)
for (i = 0; i < 15; i++) {
if (esp->targets_present & (1 << i)) {
Scsi_Device *SDptr = esp->ehost->host_queue;
+ struct esp_device *esp_dev;
while ((SDptr->host != esp->ehost) &&
(SDptr->id != i) &&
(SDptr->next))
SDptr = SDptr->next;
+ esp_dev = SDptr->hostdata;
copy_info(&info, "%d\t\t", i);
copy_info(&info, "%08lx\t", esp->config3[i]);
- copy_info(&info, "[%02lx,%02lx]\t\t\t", SDptr->sync_max_offset,
- SDptr->sync_min_period);
- copy_info(&info, "%s\t\t", SDptr->disconnect ? "yes" : "no");
+ copy_info(&info, "[%02lx,%02lx]\t\t\t", esp_dev->sync_max_offset,
+ esp_dev->sync_min_period);
+ copy_info(&info, "%s\t\t", esp_dev->disconnect ? "yes" : "no");
copy_info(&info, "%s\n",
(esp->config3[i] & ESP_CONFIG3_EWIDE) ? "yes" : "no");
}
@@ -1533,6 +1535,7 @@ static void esp_exec_cmd(struct esp *esp)
{
Scsi_Cmnd *SCptr;
Scsi_Device *SDptr;
+ struct esp_device *esp_dev;
volatile u8 *cmdp = esp->esp_command;
u8 the_esp_command;
int lun, target;
@@ -1552,9 +1555,29 @@ static void esp_exec_cmd(struct esp *esp)
panic("esp: esp_exec_cmd and issue queue is NULL");
SDptr = SCptr->device;
+ esp_dev = SDptr->hostdata;
lun = SCptr->lun;
target = SCptr->target;
+ /*
+ * We check that esp_dev != NULL. If it is, we allocate it or bail.
+ */
+ if (!esp_dev) {
+ esp_dev = kmalloc(sizeof(struct esp_device), GFP_ATOMIC);
+ if (!esp_dev) {
+ /* We're SOL. Print a message and bail */
+ printk(KERN_WARNING "esp: no mem for esp_device %d/%d\n",
+ target, lun);
+ esp->current_SC = NULL;
+ SCptr->result = DID_ERROR << 16;
+ SCptr->done(SCptr);
+ return;
+ }
+ memset(esp_dev, 0, sizeof(struct esp_device));
+ SDptr->hostdata = esp_dev;
+ }
+ }
+
esp->snip = 0;
esp->msgout_len = 0;
@@ -1590,12 +1613,12 @@ static void esp_exec_cmd(struct esp *esp)
* selections should not confuse SCSI-1 we hope.
*/
- if (SDptr->sync) {
+ if (esp_dev->sync) {
/* this targets sync is known */
#ifndef __sparc_v9__
do_sync_known:
#endif
- if (SDptr->disconnect)
+ if (esp_dev->disconnect)
*cmdp++ = IDENTIFY(1, lun);
else
*cmdp++ = IDENTIFY(0, lun);
@@ -1607,7 +1630,7 @@ do_sync_known:
the_esp_command = (ESP_CMD_SELA | ESP_CMD_DMA);
esp_advance_phase(SCptr, in_slct_norm);
}
- } else if (!(esp->targets_present & (1<<target)) || !(SDptr->disconnect)) {
+ } else if (!(esp->targets_present & (1<<target)) || !(esp_dev->disconnect)) {
/* After the bootup SCSI code sends both the
* TEST_UNIT_READY and INQUIRY commands we want
* to at least attempt allowing the device to
@@ -1615,8 +1638,8 @@ do_sync_known:
*/
ESPMISC(("esp: Selecting device for first time. target=%d "
"lun=%d\n", target, SCptr->lun));
- if (!SDptr->borken && !SDptr->disconnect)
- SDptr->disconnect = 1;
+ if (!SDptr->borken && !esp_dev->disconnect)
+ esp_dev->disconnect = 1;
*cmdp++ = IDENTIFY(0, lun);
esp->prevmsgout = NOP;
@@ -1624,8 +1647,8 @@ do_sync_known:
the_esp_command = (ESP_CMD_SELA | ESP_CMD_DMA);
/* Take no chances... */
- SDptr->sync_max_offset = 0;
- SDptr->sync_min_period = 0;
+ esp_dev->sync_max_offset = 0;
+ esp_dev->sync_min_period = 0;
} else {
/* Sorry, I have had way too many problems with
* various CDROM devices on ESP. -DaveM
@@ -1644,12 +1667,12 @@ do_sync_known:
*/
if(SDptr->type == TYPE_TAPE ||
(SDptr->type != TYPE_ROM && SDptr->removable))
- SDptr->disconnect = 1;
+ esp_dev->disconnect = 1;
else
- SDptr->disconnect = 0;
- SDptr->sync_max_offset = 0;
- SDptr->sync_min_period = 0;
- SDptr->sync = 1;
+ esp_dev->disconnect = 0;
+ esp_dev->sync_max_offset = 0;
+ esp_dev->sync_min_period = 0;
+ esp_dev->sync = 1;
esp->snip = 0;
goto do_sync_known;
}
@@ -1660,16 +1683,16 @@ do_sync_known:
* need to attempt WIDE first, before
* sync nego, as per SCSI 2 standard.
*/
- if (esp->erev == fashme && !SDptr->wide) {
+ if (esp->erev == fashme && !esp_dev->wide) {
if (!SDptr->borken &&
SDptr->type != TYPE_ROM &&
SDptr->removable == 0) {
build_wide_nego_msg(esp, 16);
- SDptr->wide = 1;
+ esp_dev->wide = 1;
esp->wnip = 1;
goto after_nego_msg_built;
} else {
- SDptr->wide = 1;
+ esp_dev->wide = 1;
/* Fall through and try sync. */
}
}
@@ -1692,7 +1715,7 @@ do_sync_known:
} else {
build_sync_nego_msg(esp, 0, 0);
}
- SDptr->sync = 1;
+ esp_dev->sync = 1;
esp->snip = 1;
after_nego_msg_built:
@@ -1725,7 +1748,7 @@ after_nego_msg_built:
cdrom_hwbug_wkaround || SDptr->borken) {
ESPMISC((KERN_INFO "esp%d: Disabling DISCONNECT for target %d "
"lun %d\n", esp->esp_id, SCptr->target, SCptr->lun));
- SDptr->disconnect = 0;
+ esp_dev->disconnect = 0;
*cmdp++ = IDENTIFY(0, lun);
} else {
*cmdp++ = IDENTIFY(1, lun);
@@ -1752,12 +1775,12 @@ after_nego_msg_built:
esp->eregs + ESP_BUSID);
else
sbus_writeb(target & 7, esp->eregs + ESP_BUSID);
- if (esp->prev_soff != SDptr->sync_max_offset ||
- esp->prev_stp != SDptr->sync_min_period ||
+ if (esp->prev_soff != esp_dev->sync_max_offset ||
+ esp->prev_stp != esp_dev->sync_min_period ||
(esp->erev > esp100a &&
esp->prev_cfg3 != esp->config3[target])) {
- esp->prev_soff = SDptr->sync_max_offset;
- esp->prev_stp = SDptr->sync_min_period;
+ esp->prev_soff = esp_dev->sync_max_offset;
+ esp->prev_stp = esp_dev->sync_min_period;
sbus_writeb(esp->prev_soff, esp->eregs + ESP_SOFF);
sbus_writeb(esp->prev_stp, esp->eregs + ESP_STP);
if (esp->erev > esp100a) {
@@ -2462,14 +2485,14 @@ static inline int reconnect_lun(struct esp *esp)
*/
static inline void esp_connect(struct esp *esp, Scsi_Cmnd *sp)
{
- Scsi_Device *dp = sp->device;
+ struct esp_device *esp_dev = sp->device->hostdata;
- if (esp->prev_soff != dp->sync_max_offset ||
- esp->prev_stp != dp->sync_min_period ||
+ if (esp->prev_soff != esp_dev->sync_max_offset ||
+ esp->prev_stp != esp_dev->sync_min_period ||
(esp->erev > esp100a &&
esp->prev_cfg3 != esp->config3[sp->target])) {
- esp->prev_soff = dp->sync_max_offset;
- esp->prev_stp = dp->sync_min_period;
+ esp->prev_soff = esp_dev->sync_max_offset;
+ esp->prev_stp = esp_dev->sync_min_period;
sbus_writeb(esp->prev_soff, esp->eregs + ESP_SOFF);
sbus_writeb(esp->prev_stp, esp->eregs + ESP_STP);
if (esp->erev > esp100a) {
@@ -2601,6 +2624,7 @@ static int esp_do_data(struct esp *esp)
static int esp_do_data_finale(struct esp *esp)
{
Scsi_Cmnd *SCptr = esp->current_SC;
+ struct esp_device *esp_dev = SCptr->device->hostdata;
int bogus_data = 0, bytes_sent = 0, fifocnt, ecount = 0;
ESPDATA(("esp_do_data_finale: "));
@@ -2694,14 +2718,14 @@ static int esp_do_data_finale(struct esp *esp)
/* If we were in synchronous mode, check for peculiarities. */
if (esp->erev == fashme) {
- if (SCptr->device->sync_max_offset) {
+ if (esp_dev->sync_max_offset) {
if (SCptr->SCp.phase == in_dataout)
esp_cmd(esp, ESP_CMD_FLUSH);
} else {
esp_cmd(esp, ESP_CMD_FLUSH);
}
} else {
- if (SCptr->device->sync_max_offset)
+ if (esp_dev->sync_max_offset)
bogus_data = esp100_sync_hwbug(esp, SCptr, fifocnt);
else
esp_cmd(esp, ESP_CMD_FLUSH);
@@ -2730,7 +2754,7 @@ static int esp_do_data_finale(struct esp *esp)
ESPLOG(("esp%d: Forcing async for target %d\n", esp->esp_id,
SCptr->target));
SCptr->device->borken = 1;
- SCptr->device->sync = 0;
+ esp_dev->sync = 0;
bytes_sent = 0;
}
@@ -2815,6 +2839,7 @@ static int esp_should_clear_sync(Scsi_Cmnd *sp)
static int esp_do_freebus(struct esp *esp)
{
Scsi_Cmnd *SCptr = esp->current_SC;
+ struct esp_device *esp_dev = SCptr->device->hostdata;
int rval;
rval = skipahead2(esp, SCptr, in_status, in_msgindone, in_freeing);
@@ -2834,8 +2859,8 @@ static int esp_do_freebus(struct esp *esp)
if (SCptr->SCp.Status != GOOD &&
SCptr->SCp.Status != CONDITION_GOOD &&
((1<<SCptr->target) & esp->targets_present) &&
- SCptr->device->sync &&
- SCptr->device->sync_max_offset) {
+ esp_dev->sync &&
+ esp_dev->sync_max_offset) {
/* SCSI standard says that the synchronous capabilities
* should be renegotiated at this point. Most likely
* we are about to request sense from this target
@@ -2853,7 +2878,7 @@ static int esp_do_freebus(struct esp *esp)
* loading up a tape.
*/
if (esp_should_clear_sync(SCptr) != 0)
- SCptr->device->sync = 0;
+ esp_dev->sync = 0;
}
ESPDISC(("F<%02x,%02x>", SCptr->target, SCptr->lun));
esp_done(esp, ((SCptr->SCp.Status & 0xff) |
@@ -3113,7 +3138,7 @@ static int esp_enter_status(struct esp *esp)
static int esp_disconnect_amidst_phases(struct esp *esp)
{
Scsi_Cmnd *sp = esp->current_SC;
- Scsi_Device *dp = sp->device;
+ struct esp_device *esp_dev = sp->device->hostdata;
/* This means real problems if we see this
* here. Unless we were actually trying
@@ -3137,9 +3162,9 @@ static int esp_disconnect_amidst_phases(struct esp *esp)
case BUS_DEVICE_RESET:
ESPLOG(("device reset successful\n"));
- dp->sync_max_offset = 0;
- dp->sync_min_period = 0;
- dp->sync = 0;
+ esp_dev->sync_max_offset = 0;
+ esp_dev->sync_min_period = 0;
+ esp_dev->sync = 0;
esp_advance_phase(sp, in_resetdev);
esp_done(esp, (DID_RESET << 16));
break;
@@ -3206,7 +3231,7 @@ static int esp_do_phase_determine(struct esp *esp)
static int esp_select_complete(struct esp *esp)
{
Scsi_Cmnd *SCptr = esp->current_SC;
- Scsi_Device *SDptr = SCptr->device;
+ struct esp_device *esp_dev = SCptr->device->hostdata;
int cmd_bytes_sent, fcnt;
if (esp->erev != fashme)
@@ -3241,7 +3266,7 @@ static int esp_select_complete(struct esp *esp)
/* What if the target ignores the sdtr? */
if (esp->snip)
- SDptr->sync = 1;
+ esp_dev->sync = 1;
/* See how far, if at all, we got in getting
* the information out to the target.
@@ -3333,7 +3358,7 @@ static int esp_select_complete(struct esp *esp)
if ((esp->erev != fashme) && /* not a Happy Meal and... */
!fcnt && /* Fifo is empty and... */
/* either we are not doing synchronous transfers or... */
- (!SDptr->sync_max_offset ||
+ (!esp_dev->sync_max_offset ||
/* We are not going into data in phase. */
((esp->sreg & ESP_STAT_PMASK) != ESP_DIP)))
esp_cmd(esp, ESP_CMD_FLUSH); /* flush is safe */
@@ -3395,9 +3420,9 @@ static int esp_select_complete(struct esp *esp)
esp->snip = 0;
ESPLOG(("esp%d: Failed synchronous negotiation for target %d "
"lun %d\n", esp->esp_id, SCptr->target, SCptr->lun));
- SDptr->sync_max_offset = 0;
- SDptr->sync_min_period = 0;
- SDptr->sync = 1; /* so we don't negotiate again */
+ esp_dev->sync_max_offset = 0;
+ esp_dev->sync_min_period = 0;
+ esp_dev->sync = 1; /* so we don't negotiate again */
/* Run the command again, this time though we
* won't try to negotiate for synchronous transfers.
@@ -3537,16 +3562,16 @@ static int check_singlebyte_msg(struct esp *esp)
case MESSAGE_REJECT:
ESPMISC(("msg reject, "));
if (esp->prevmsgout == EXTENDED_MESSAGE) {
- Scsi_Device *SDptr = esp->current_SC->device;
+ struct esp_device *esp_dev = esp->current_SC->device->hostdata;
/* Doesn't look like this target can
* do synchronous or WIDE transfers.
*/
ESPSDTR(("got reject, was trying nego, clearing sync/WIDE\n"));
- SDptr->sync = 1;
- SDptr->wide = 1;
- SDptr->sync_min_period = 0;
- SDptr->sync_max_offset = 0;
+ esp_dev->sync = 1;
+ esp_dev->wide = 1;
+ esp_dev->sync_min_period = 0;
+ esp_dev->sync_max_offset = 0;
return 0;
} else {
ESPMISC(("not sync nego, sending ABORT\n"));
@@ -3562,13 +3587,13 @@ static int check_singlebyte_msg(struct esp *esp)
*/
static int target_with_ants_in_pants(struct esp *esp,
Scsi_Cmnd *SCptr,
- Scsi_Device *SDptr)
+ struct esp_device *esp_dev)
{
- if (SDptr->sync || SDptr->borken) {
+ if (esp_dev->sync || SCptr->SDptr->borken) {
/* sorry, no can do */
ESPSDTR(("forcing to async, "));
build_sync_nego_msg(esp, 0, 0);
- SDptr->sync = 1;
+ esp_dev->sync = 1;
esp->snip = 1;
ESPLOG(("esp%d: hoping for msgout\n", esp->esp_id));
esp_advance_phase(SCptr, in_the_dark);
@@ -3621,7 +3646,7 @@ static void sync_report(struct esp *esp)
static int check_multibyte_msg(struct esp *esp)
{
Scsi_Cmnd *SCptr = esp->current_SC;
- Scsi_Device *SDptr = SCptr->device;
+ struct esp_device *esp_dev = SCptr->device->hostdata;
u8 regval = 0;
int message_out = 0;
@@ -3637,7 +3662,7 @@ static int check_multibyte_msg(struct esp *esp)
/* Target negotiates first! */
ESPSDTR(("target jumps the gun, "));
message_out = EXTENDED_MESSAGE; /* we must respond */
- rval = target_with_ants_in_pants(esp, SCptr, SDptr);
+ rval = target_with_ants_in_pants(esp, SCptr, esp_dev);
if (rval)
return rval;
}
@@ -3684,8 +3709,8 @@ static int check_multibyte_msg(struct esp *esp)
if (offset) {
u8 bit;
- SDptr->sync_min_period = (regval & 0x1f);
- SDptr->sync_max_offset = (offset | esp->radelay);
+ esp_dev->sync_min_period = (regval & 0x1f);
+ esp_dev->sync_max_offset = (offset | esp->radelay);
if (esp->erev == fas100a || esp->erev == fas236 || esp->erev == fashme) {
if ((esp->erev == fas100a) || (esp->erev == fashme))
bit = ESP_CONFIG3_FAST;
@@ -3697,7 +3722,7 @@ static int check_multibyte_msg(struct esp *esp)
* control bits are clear.
*/
if (esp->erev == fashme)
- SDptr->sync_max_offset &= ~esp->radelay;
+ esp_dev->sync_max_offset &= ~esp->radelay;
esp->config3[SCptr->target] |= bit;
} else {
esp->config3[SCptr->target] &= ~bit;
@@ -3705,23 +3730,23 @@ static int check_multibyte_msg(struct esp *esp)
esp->prev_cfg3 = esp->config3[SCptr->target];
sbus_writeb(esp->prev_cfg3, esp->eregs + ESP_CFG3);
}
- esp->prev_soff = SDptr->sync_max_offset;
- esp->prev_stp = SDptr->sync_min_period;
+ esp->prev_soff = esp_dev->sync_max_offset;
+ esp->prev_stp = esp_dev->sync_min_period;
sbus_writeb(esp->prev_soff, esp->eregs + ESP_SOFF);
sbus_writeb(esp->prev_stp, esp->eregs + ESP_STP);
ESPSDTR(("soff=%2x stp=%2x cfg3=%2x\n",
- SDptr->sync_max_offset,
- SDptr->sync_min_period,
+ esp_dev->sync_max_offset,
+ esp_dev->sync_min_period,
esp->config3[SCptr->target]));
esp->snip = 0;
- } else if (SDptr->sync_max_offset) {
+ } else if (esp_dev->sync_max_offset) {
u8 bit;
/* back to async mode */
ESPSDTR(("unaccaptable sync nego, forcing async\n"));
- SDptr->sync_max_offset = 0;
- SDptr->sync_min_period = 0;
+ esp_dev->sync_max_offset = 0;
+ esp_dev->sync_min_period = 0;
esp->prev_soff = 0;
esp->prev_stp = 0;
sbus_writeb(esp->prev_soff, esp->eregs + ESP_SOFF);
@@ -3740,7 +3765,7 @@ static int check_multibyte_msg(struct esp *esp)
sync_report(esp);
ESPSDTR(("chk multibyte msg: sync is known, "));
- SDptr->sync = 1;
+ esp_dev->sync = 1;
if (message_out) {
ESPLOG(("esp%d: sending sdtr back, hoping for msgout\n",
@@ -3786,7 +3811,7 @@ static int check_multibyte_msg(struct esp *esp)
/* Regardless, next try for sync transfers. */
build_sync_nego_msg(esp, esp->sync_defp, 15);
- SDptr->sync = 1;
+ espo_dev->sync = 1;
esp->snip = 1;
message_out = EXTENDED_MESSAGE;
}
@@ -4051,7 +4076,7 @@ static int esp_do_msgoutdone(struct esp *esp)
/* Happy Meal fifo is touchy... */
if ((esp->erev != fashme) &&
!fcount(esp) &&
- !(esp->current_SC->device->sync_max_offset))
+ !(((struct esp_device *)esp->current_SC->device->hostdata)->sync_max_offset))
esp_cmd(esp, ESP_CMD_FLUSH);
break;
@@ -4330,11 +4355,13 @@ static void esp_intr(int irq, void *dev_id, struct pt_regs *pregs)
spin_unlock_irqrestore(esp->ehost->host_lock, flags);
}
-int esp_revoke(Scsi_Device* SDptr)
+void esp_slave_detach(Scsi_Device* SDptr)
{
struct esp *esp = (struct esp *) SDptr->host->hostdata;
esp->targets_present &= ~(1 << SDptr->id);
- return 0;
+ if (SDptr->hostdata)
+ kfree(SDptr->hostdata);
+ SDptr->hostdata = NULL;
}
static Scsi_Host_Template driver_template = SCSI_SPARC_ESP;
diff --git a/drivers/scsi/esp.h b/drivers/scsi/esp.h
index 9949d0ab9efe..864a48769c38 100644
--- a/drivers/scsi/esp.h
+++ b/drivers/scsi/esp.h
@@ -64,6 +64,17 @@ enum esp_rev {
espunknown = 0x07
};
+/* We allocate one of these for each scsi device and attach it to
+ * SDptr->hostdata for use in the driver
+ */
+struct esp_device {
+ unsigned char sync_min_period;
+ unsigned char sync_max_offset;
+ unsigned sync:1;
+ unsigned wide:1;
+ unsigned disconnect:1;
+};
+
/* We get one of these for each ESP probed. */
struct esp {
unsigned long eregs; /* ESP controller registers */
@@ -399,7 +410,7 @@ extern int esp_abort(Scsi_Cmnd *);
extern int esp_reset(Scsi_Cmnd *, unsigned int);
extern int esp_proc_info(char *buffer, char **start, off_t offset, int length,
int hostno, int inout);
-extern int esp_revoke(Scsi_Device* SDptr);
+extern void esp_slave_detach(Scsi_Device* SDptr);
#ifdef CONFIG_SPARC64
#define SCSI_SPARC_ESP { \
@@ -407,7 +418,7 @@ extern int esp_revoke(Scsi_Device* SDptr);
proc_info: &esp_proc_info, \
name: "Sun ESP 100/100a/200", \
detect: esp_detect, \
- revoke: esp_revoke, \
+ slave_detach: esp_slave_detach, \
info: esp_info, \
command: esp_command, \
queuecommand: esp_queue, \
@@ -427,7 +438,7 @@ extern int esp_revoke(Scsi_Device* SDptr);
proc_info: &esp_proc_info, \
name: "Sun ESP 100/100a/200", \
detect: esp_detect, \
- revoke: esp_revoke, \
+ slave_detach: esp_slave_detach, \
info: esp_info, \
command: esp_command, \
queuecommand: esp_queue, \
diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c
index 8f325660d45e..6963127669ce 100644
--- a/drivers/scsi/ips.c
+++ b/drivers/scsi/ips.c
@@ -433,6 +433,7 @@ int ips_eh_abort(Scsi_Cmnd *);
int ips_eh_reset(Scsi_Cmnd *);
int ips_queue(Scsi_Cmnd *, void (*) (Scsi_Cmnd *));
int ips_biosparam(Disk *, struct block_device *, int *);
+int ips_slave_attach(Scsi_Device *);
const char * ips_info(struct Scsi_Host *);
void do_ipsintr(int, void *, struct pt_regs *);
static int ips_hainit(ips_ha_t *);
@@ -481,7 +482,7 @@ static int ips_flash_firmware(ips_ha_t *, ips_passthru_t *, ips_scb_t *);
static void ips_free_flash_copperhead(ips_ha_t *ha);
static void ips_get_bios_version(ips_ha_t *, int);
static void ips_identify_controller(ips_ha_t *);
-static void ips_select_queue_depth(struct Scsi_Host *, Scsi_Device *);
+//static void ips_select_queue_depth(struct Scsi_Host *, Scsi_Device *);
static void ips_chkstatus(ips_ha_t *, IPS_STATUS *);
static void ips_enable_int_copperhead(ips_ha_t *);
static void ips_enable_int_copperhead_memio(ips_ha_t *);
@@ -1087,7 +1088,7 @@ ips_detect(Scsi_Host_Template *SHT) {
sh->n_io_port = io_addr ? 255 : 0;
sh->unique_id = (io_addr) ? io_addr : mem_addr;
sh->irq = irq;
- sh->select_queue_depths = ips_select_queue_depth;
+ //sh->select_queue_depths = ips_select_queue_depth;
sh->sg_tablesize = sh->hostt->sg_tablesize;
sh->can_queue = sh->hostt->can_queue;
sh->cmd_per_lun = sh->hostt->cmd_per_lun;
@@ -1827,7 +1828,7 @@ ips_biosparam(Disk *disk, struct block_device *dev, int geom[]) {
/* Select queue depths for the devices on the contoller */
/* */
/****************************************************************************/
-static void
+/*static void
ips_select_queue_depth(struct Scsi_Host *host, Scsi_Device *scsi_devs) {
Scsi_Device *device;
ips_ha_t *ha;
@@ -1860,6 +1861,30 @@ ips_select_queue_depth(struct Scsi_Host *host, Scsi_Device *scsi_devs) {
}
}
}
+*/
+
+/****************************************************************************/
+/* */
+/* Routine Name: ips_slave_attach */
+/* */
+/* Routine Description: */
+/* */
+/* Set queue depths on devices once scan is complete */
+/* */
+/****************************************************************************/
+int
+ips_slave_attach(Scsi_Device *SDptr)
+{
+ ips_ha_t *ha;
+ int min;
+
+ ha = IPS_HA(SDptr->host);
+ min = ha->max_cmds / 4;
+ if (min < 8)
+ min = ha->max_cmds - 1;
+ scsi_adjust_queue_depth(SDptr, MSG_ORDERED_TAG, min);
+ return 0;
+}
/****************************************************************************/
/* */
@@ -7407,7 +7432,7 @@ static int ips_init_phase1( struct pci_dev *pci_dev, int *indexPtr )
sh->n_io_port = io_addr ? 255 : 0;
sh->unique_id = (io_addr) ? io_addr : mem_addr;
sh->irq = irq;
- sh->select_queue_depths = ips_select_queue_depth;
+ //sh->select_queue_depths = ips_select_queue_depth;
sh->sg_tablesize = sh->hostt->sg_tablesize;
sh->can_queue = sh->hostt->can_queue;
sh->cmd_per_lun = sh->hostt->cmd_per_lun;
diff --git a/drivers/scsi/ips.h b/drivers/scsi/ips.h
index 161e66b4275c..e13b6bc9d4cc 100644
--- a/drivers/scsi/ips.h
+++ b/drivers/scsi/ips.h
@@ -60,6 +60,7 @@
extern int ips_eh_reset(Scsi_Cmnd *);
extern int ips_queue(Scsi_Cmnd *, void (*) (Scsi_Cmnd *));
extern int ips_biosparam(Disk *, struct block_device *, int *);
+ extern int ips_slave_attach(Scsi_Device *);
extern const char * ips_info(struct Scsi_Host *);
extern void do_ips(int, void *, struct pt_regs *);
@@ -481,7 +482,8 @@
eh_host_reset_handler : ips_eh_reset, \
abort : NULL, \
reset : NULL, \
- slave_attach : NULL, \
+ slave_attach : ips_slave_attach, \
+ slave_detach : NULL, \
bios_param : ips_biosparam, \
can_queue : 0, \
this_id: -1, \
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 524fbbc3ad0a..5e6a2db44da5 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -1624,11 +1624,40 @@ void scsi_adjust_queue_depth(Scsi_Device *SDpnt, int tagged, int tags)
/*
* refuse to set tagged depth to an unworkable size
*/
- if(tags == 0)
+ if(tags <= 0)
return;
+ /*
+ * Limit max queue depth on a single lun to 256 for now. Remember,
+ * we allocate a struct scsi_command for each of these and keep it
+ * around forever. Too deep of a depth just wastes memory.
+ */
+ if(tags > 256)
+ return;
+
spin_lock_irqsave(&device_request_lock, flags);
SDpnt->new_queue_depth = tags;
- SDpnt->tagged_queue = tagged;
+ switch(tagged) {
+ case MSG_ORDERED_TAG:
+ SDpnt->ordered_tags = 1;
+ SDpnt->simple_tags = 1;
+ break;
+ case MSG_SIMPLE_TAG:
+ SDpnt->ordered_tags = 0;
+ SDpnt->simple_tags = 1;
+ break;
+ default:
+ printk(KERN_WARNING "(scsi%d:%d:%d:%d) "
+ "scsi_adjust_queue_depth, bad queue type, "
+ "disabled\n", SDpnt->host->host_no,
+ SDpnt->channel, SDpnt->id, SDpnt->lun);
+ case 0:
+ SDpnt->ordered_tags = SDpnt->simple_tags = 0;
+ if(SDpnt->host->cmd_per_lun)
+ SDpnt->new_queue_depth = SDpnt->host->cmd_per_lun;
+ else
+ SDpnt->new_queue_depth = 1;
+ break;
+ }
spin_unlock_irqrestore(&device_request_lock, flags);
if(SDpnt->queue_depth == 0)
{
diff --git a/drivers/scsi/scsi.h b/drivers/scsi/scsi.h
index 113418b2662f..128b1e5e7335 100644
--- a/drivers/scsi/scsi.h
+++ b/drivers/scsi/scsi.h
@@ -587,8 +587,8 @@ struct scsi_device {
char * model; /* ... after scan; point to static string */
char * rev; /* ... "nullnullnullnull" before scan */
unsigned char current_tag; /* current tag */
- unsigned char sync_min_period; /* Not less than this period */
- unsigned char sync_max_offset; /* Not greater than this offset */
+// unsigned char sync_min_period; /* Not less than this period */
+// unsigned char sync_max_offset; /* Not greater than this offset */
unsigned online:1;
unsigned writeable:1;
@@ -599,15 +599,16 @@ struct scsi_device {
unsigned lockable:1; /* Able to prevent media removal */
unsigned borken:1; /* Tell the Seagate driver to be
* painfully slow on this device */
- unsigned disconnect:1; /* can disconnect */
+// unsigned disconnect:1; /* can disconnect */
unsigned soft_reset:1; /* Uses soft reset option */
unsigned sdtr:1; /* Device supports SDTR messages */
unsigned wdtr:1; /* Device supports WDTR messages */
unsigned ppr:1; /* Device supports PPR messages */
unsigned tagged_supported:1; /* Supports SCSI-II tagged queuing */
- unsigned tagged_queue:1; /* SCSI-II tagged queuing enabled */
- unsigned simple_tags:1; /* Device supports simple queue tag messages */
- unsigned ordered_tags:1;/* Device supports ordered queue tag messages */
+ unsigned tagged_queue:1;/* This is going away!!!! Look at simple_tags
+ instead!!! Please fix your driver now!! */
+ unsigned simple_tags:1; /* simple queue tag messages are enabled */
+ unsigned ordered_tags:1;/* ordered queue tag messages are enabled */
unsigned single_lun:1; /* Indicates we should only allow I/O to
* one of the luns for the device at a
* time. */
@@ -619,8 +620,8 @@ struct scsi_device {
unsigned remap:1; /* support remapping */
unsigned starved:1; /* unable to process commands because
host busy */
- unsigned sync:1; /* Sync transfer state, managed by host */
- unsigned wide:1; /* WIDE transfer state, managed by host */
+// unsigned sync:1; /* Sync transfer state, managed by host */
+// unsigned wide:1; /* WIDE transfer state, managed by host */
unsigned int device_blocked; /* Device returned QUEUE_FULL. */
diff --git a/drivers/scsi/scsi_ioctl.c b/drivers/scsi/scsi_ioctl.c
index 40ba88d6f0e0..80f23e46a9e4 100644
--- a/drivers/scsi/scsi_ioctl.c
+++ b/drivers/scsi/scsi_ioctl.c
@@ -424,6 +424,14 @@ int scsi_ioctl(Scsi_Device * dev, int cmd, void *arg)
return 0;
case SCSI_IOCTL_GET_BUS_NUMBER:
return put_user(dev->host->host_no, (int *) arg);
+ /*
+ * The next two ioctls either need to go or need to be changed to
+ * pass tagged queueing changes through the low level drivers.
+ * Simply enabling or disabling tagged queueing without the knowledge
+ * of the low level driver is a *BAD* thing.
+ *
+ * Oct. 10, 2002 - Doug Ledford <dledford@redhat.com>
+ */
case SCSI_IOCTL_TAGGED_ENABLE:
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 6f01e8029adf..0f52ed55b210 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -1477,11 +1477,14 @@ static int scsi_add_lun(Scsi_Device *sdevscan, Scsi_Device **sdevnew,
if (sdt->detect)
sdev->attached += (*sdt->detect) (sdev);
- if (sdev->host->hostt->slave_attach != NULL)
+ if (sdev->host->hostt->slave_attach != NULL) {
if (sdev->host->hostt->slave_attach(sdev) != 0) {
- printk(KERN_INFO "scsi_add_lun: failed low level driver attach, setting device offline");
+ printk(KERN_INFO "%s: scsi_add_lun: failed low level driver attach, setting device offline", devname);
sdev->online = FALSE;
}
+ } else if(sdev->host->cmd_per_lun) {
+ scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
+ }
if (sdevnew != NULL)
*sdevnew = sdev;
@@ -2017,21 +2020,6 @@ static void scsi_scan_selected_lun(struct Scsi_Host *shost, uint channel,
scsi_free_sdev(sdevscan);
if (res == SCSI_SCAN_LUN_PRESENT) {
BUG_ON(sdev == NULL);
- /*
- * FIXME calling select_queue_depths is wrong for adapters
- * that modify queue depths of all scsi devices - the
- * adapter might change a queue depth (not for this sdev),
- * but the mid-layer will not change the queue depth. This
- * does not cause an oops, but queue_depth will not match
- * the actual queue depth used.
- *
- * Perhaps use a default queue depth, and allow them to be
- * modified at boot/insmod time, and/or via sysctl/ioctl/proc;
- * plus have dynamic queue depth adjustment like the
- * aic7xxx driver.
- */
- if (shost->select_queue_depths != NULL)
- (shost->select_queue_depths) (shost, shost->host_queue);
for (sdt = scsi_devicelist; sdt; sdt = sdt->next)
if (sdt->init && sdt->dev_noticed)
diff --git a/drivers/usb/input/hid-input.c b/drivers/usb/input/hid-input.c
index 9090b577643f..13e3d2f2b0b4 100644
--- a/drivers/usb/input/hid-input.c
+++ b/drivers/usb/input/hid-input.c
@@ -348,7 +348,7 @@ static void hidinput_configure_usage(struct hid_device *device, struct hid_field
set_bit(usage->type, input->evbit);
while (usage->code <= max && test_and_set_bit(usage->code, bit)) {
- usage->code = find_next_zero_bit(bit, max + 1, usage->code);
+ usage->code = find_next_zero_bit(bit, usage->code, max + 1);
}
if (usage->code > max) return;
diff --git a/fs/nls/Config.in b/fs/nls/Config.in
index e311a4821f90..8274f5e521f4 100644
--- a/fs/nls/Config.in
+++ b/fs/nls/Config.in
@@ -12,7 +12,7 @@ fi
# msdos and Joliet want NLS
if [ "$CONFIG_JOLIET" = "y" -o "$CONFIG_FAT_FS" != "n" \
-o "$CONFIG_NTFS_FS" != "n" -o "$CONFIG_NCPFS_NLS" = "y" \
- -o "$CONFIG_SMB_NLS" = "y" -o "$CONFIG_JFS_FS" != "n" -o "$CONFIG_CIFS" != "n"]; then
+ -o "$CONFIG_SMB_NLS" = "y" -o "$CONFIG_JFS_FS" != "n" -o "$CONFIG_CIFS" != "n" ]; then
define_bool CONFIG_NLS y
else
define_bool CONFIG_NLS n
diff --git a/include/linux/kd.h b/include/linux/kd.h
index 986ef61c9df9..9eacfd158976 100644
--- a/include/linux/kd.h
+++ b/include/linux/kd.h
@@ -134,7 +134,8 @@ struct kbkeycode {
struct kbd_repeat {
int delay; /* in msec; <= 0: don't change */
- int rate; /* in msec; <= 0: don't change */
+ int period; /* in msec; <= 0: don't change */
+ /* earlier this field was misnamed "rate" */
};
#define KDKBDREP 0x4B52 /* set keyboard delay/repeat rate;
diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
index 9fd7d5c05605..0fb31130009d 100644
--- a/include/linux/sysctl.h
+++ b/include/linux/sysctl.h
@@ -301,7 +301,8 @@ enum
NET_IPV4_NONLOCAL_BIND=88,
NET_IPV4_ICMP_RATELIMIT=89,
NET_IPV4_ICMP_RATEMASK=90,
- NET_TCP_TW_REUSE=91
+ NET_TCP_TW_REUSE=91,
+ NET_TCP_FRTO=92
};
enum {
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index ce34341d6a2c..91b66d7ab004 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -366,6 +366,9 @@ struct tcp_opt {
unsigned int keepalive_intvl; /* time interval between keep alive probes */
int linger2;
+ int frto_counter; /* Number of new acks after RTO */
+ __u32 frto_highmark; /* snd_nxt when RTO occurred */
+
unsigned long last_synq_overflow;
};
diff --git a/include/linux/uinput.h b/include/linux/uinput.h
index db554ad4cdcf..00bf0d29e360 100644
--- a/include/linux/uinput.h
+++ b/include/linux/uinput.h
@@ -29,9 +29,9 @@
#define UINPUT_MINOR 223
#define UINPUT_NAME "uinput"
#define UINPUT_BUFFER_SIZE 16
-#define U_MAX_NAME_SIZE 50
-#define UIST_CREATED 1
+/* state flags => bit index for {set|clear|test}_bit ops */
+#define UIST_CREATED 0
struct uinput_device {
struct input_dev *dev;
diff --git a/include/net/flow.h b/include/net/flow.h
index e1ce1b2aea31..58fbf0e8314a 100644
--- a/include/net/flow.h
+++ b/include/net/flow.h
@@ -1,6 +1,6 @@
/*
*
- * Flow based forwarding rules (usage: firewalling, etc)
+ * Generic internet FLOW.
*
*/
@@ -8,12 +8,16 @@
#define _NET_FLOW_H
struct flowi {
- int proto; /* {TCP, UDP, ICMP} */
+ int oif;
+ int iif;
union {
struct {
__u32 daddr;
__u32 saddr;
+ __u32 fwmark;
+ __u8 tos;
+ __u8 scope;
} ip4_u;
struct {
@@ -27,9 +31,12 @@ struct flowi {
#define fl6_flowlabel nl_u.ip6_u.flowlabel
#define fl4_dst nl_u.ip4_u.daddr
#define fl4_src nl_u.ip4_u.saddr
+#define fl4_fwmark nl_u.ip4_u.fwmark
+#define fl4_tos nl_u.ip4_u.tos
+#define fl4_scope nl_u.ip4_u.scope
- int oif;
-
+ __u8 proto;
+ __u8 flags;
union {
struct {
__u16 sport;
@@ -41,61 +48,8 @@ struct flowi {
__u8 code;
} icmpt;
- unsigned long data;
+ __u32 spi;
} uli_u;
};
-#define FLOWR_NODECISION 0 /* rule not appliable to flow */
-#define FLOWR_SELECT 1 /* flow must follow this rule */
-#define FLOWR_CLEAR 2 /* priority level clears flow */
-#define FLOWR_ERROR 3
-
-struct fl_acc_args {
- int type;
-
-
-#define FL_ARG_FORWARD 1
-#define FL_ARG_ORIGIN 2
-
- union {
- struct sk_buff *skb;
- struct {
- struct sock *sk;
- struct flowi *flow;
- } fl_o;
- } fl_u;
-};
-
-
-struct pkt_filter {
- atomic_t refcnt;
- unsigned int offset;
- __u32 value;
- __u32 mask;
- struct pkt_filter *next;
-};
-
-#define FLR_INPUT 1
-#define FLR_OUTPUT 2
-
-struct flow_filter {
- int type;
- union {
- struct pkt_filter *filter;
- struct sock *sk;
- } u;
-};
-
-struct flow_rule {
- struct flow_rule_ops *ops;
- unsigned char private[0];
-};
-
-struct flow_rule_ops {
- int (*accept)(struct rt6_info *rt,
- struct rt6_info *rule,
- struct fl_acc_args *args,
- struct rt6_info **nrt);
-};
-
#endif
diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
index f8d382f4e7d8..4fb406133dbb 100644
--- a/include/net/ip6_fib.h
+++ b/include/net/ip6_fib.h
@@ -70,14 +70,6 @@ struct rt6_info
u8 rt6i_hoplimit;
atomic_t rt6i_ref;
- union {
- struct flow_rule *rt6iu_flowr;
- struct flow_filter *rt6iu_filter;
- } flow_u;
-
-#define rt6i_flowr flow_u.rt6iu_flowr
-#define rt6i_filter flow_u.rt6iu_filter
-
struct rt6key rt6i_dst;
struct rt6key rt6i_src;
};
diff --git a/include/net/ip6_fw.h b/include/net/ip6_fw.h
deleted file mode 100644
index 7866273d3d56..000000000000
--- a/include/net/ip6_fw.h
+++ /dev/null
@@ -1,54 +0,0 @@
-#ifndef __NET_IP6_FW_H
-#define __NET_IP6_FW_H
-
-#define IP6_FW_LISTHEAD 0x1000
-#define IP6_FW_ACCEPT 0x0001
-#define IP6_FW_REJECT 0x0002
-
-#define IP6_FW_DEBUG 2
-
-#define IP6_FW_MSG_ADD 1
-#define IP6_FW_MSG_DEL 2
-#define IP6_FW_MSG_REPORT 3
-
-
-/*
- * Fast "hack" user interface
- */
-struct ip6_fw_msg {
- struct in6_addr dst;
- struct in6_addr src;
- int dst_len;
- int src_len;
- int action;
- int policy;
- int proto;
- union {
- struct {
- __u16 sport;
- __u16 dport;
- } transp;
-
- unsigned long data;
-
- int icmp_type;
- } u;
-
- int msg_len;
-};
-
-#ifdef __KERNEL__
-
-#include <net/flow.h>
-
-struct ip6_fw_rule {
- struct flow_rule flowr;
- struct ip6_fw_rule *next;
- struct ip6_fw_rule *prev;
- struct flowi info;
- unsigned long policy;
-};
-
-#endif
-
-#endif
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
index 3b84c5bff809..236641e5bc51 100644
--- a/include/net/ip_fib.h
+++ b/include/net/ip_fib.h
@@ -17,6 +17,7 @@
#define _NET_IP_FIB_H
#include <linux/config.h>
+#include <net/flow.h>
struct kern_rta
{
@@ -117,7 +118,7 @@ struct fib_table
{
unsigned char tb_id;
unsigned tb_stamp;
- int (*tb_lookup)(struct fib_table *tb, const struct rt_key *key, struct fib_result *res);
+ int (*tb_lookup)(struct fib_table *tb, const struct flowi *flp, struct fib_result *res);
int (*tb_insert)(struct fib_table *table, struct rtmsg *r,
struct kern_rta *rta, struct nlmsghdr *n,
struct netlink_skb_parms *req);
@@ -130,7 +131,7 @@ struct fib_table
int (*tb_get_info)(struct fib_table *table, char *buf,
int first, int count);
void (*tb_select_default)(struct fib_table *table,
- const struct rt_key *key, struct fib_result *res);
+ const struct flowi *flp, struct fib_result *res);
unsigned char tb_data[0];
};
@@ -152,18 +153,18 @@ static inline struct fib_table *fib_new_table(int id)
return fib_get_table(id);
}
-static inline int fib_lookup(const struct rt_key *key, struct fib_result *res)
+static inline int fib_lookup(const struct flowi *flp, struct fib_result *res)
{
- if (local_table->tb_lookup(local_table, key, res) &&
- main_table->tb_lookup(main_table, key, res))
+ if (local_table->tb_lookup(local_table, flp, res) &&
+ main_table->tb_lookup(main_table, flp, res))
return -ENETUNREACH;
return 0;
}
-static inline void fib_select_default(const struct rt_key *key, struct fib_result *res)
+static inline void fib_select_default(const struct flowi *flp, struct fib_result *res)
{
if (FIB_RES_GW(*res) && FIB_RES_NH(*res).nh_scope == RT_SCOPE_LINK)
- main_table->tb_select_default(main_table, key, res);
+ main_table->tb_select_default(main_table, flp, res);
}
#else /* CONFIG_IP_MULTIPLE_TABLES */
@@ -171,7 +172,7 @@ static inline void fib_select_default(const struct rt_key *key, struct fib_resul
#define main_table (fib_tables[RT_TABLE_MAIN])
extern struct fib_table * fib_tables[RT_TABLE_MAX+1];
-extern int fib_lookup(const struct rt_key *key, struct fib_result *res);
+extern int fib_lookup(const struct flowi *flp, struct fib_result *res);
extern struct fib_table *__fib_new_table(int id);
extern void fib_rule_put(struct fib_rule *r);
@@ -191,7 +192,7 @@ static inline struct fib_table *fib_new_table(int id)
return fib_tables[id] ? : __fib_new_table(id);
}
-extern void fib_select_default(const struct rt_key *key, struct fib_result *res);
+extern void fib_select_default(const struct flowi *flp, struct fib_result *res);
#endif /* CONFIG_IP_MULTIPLE_TABLES */
@@ -204,13 +205,13 @@ extern int inet_rtm_getroute(struct sk_buff *skb, struct nlmsghdr* nlh, void *ar
extern int inet_dump_fib(struct sk_buff *skb, struct netlink_callback *cb);
extern int fib_validate_source(u32 src, u32 dst, u8 tos, int oif,
struct net_device *dev, u32 *spec_dst, u32 *itag);
-extern void fib_select_multipath(const struct rt_key *key, struct fib_result *res);
+extern void fib_select_multipath(const struct flowi *flp, struct fib_result *res);
/* Exported by fib_semantics.c */
extern int ip_fib_check_default(u32 gw, struct net_device *dev);
extern void fib_release_info(struct fib_info *);
extern int fib_semantic_match(int type, struct fib_info *,
- const struct rt_key *, struct fib_result*);
+ const struct flowi *, struct fib_result*);
extern struct fib_info *fib_create_info(const struct rtmsg *r, struct kern_rta *rta,
const struct nlmsghdr *, int *err);
extern int fib_nh_match(struct rtmsg *r, struct nlmsghdr *, struct kern_rta *rta, struct fib_info *fi);
diff --git a/include/net/irda/crc.h b/include/net/irda/crc.h
index 61a5a648864d..a419a992f15f 100644
--- a/include/net/irda/crc.h
+++ b/include/net/irda/crc.h
@@ -28,6 +28,6 @@ static inline __u16 irda_fcs(__u16 fcs, __u8 c)
}
/* Recompute the FCS with len bytes appended. */
-unsigned short crc_calc( __u16 fcs, __u8 const *buf, size_t len);
+unsigned short irda_calc_crc16( __u16 fcs, __u8 const *buf, size_t len);
#endif
diff --git a/include/net/irda/ircomm_tty.h b/include/net/irda/ircomm_tty.h
index 7f9659cc4239..7ff56638b9d6 100644
--- a/include/net/irda/ircomm_tty.h
+++ b/include/net/irda/ircomm_tty.h
@@ -48,7 +48,9 @@
/* This is used as an initial value to max_header_size before the proper
* value is filled in (5 for ttp, 4 for lmp). This allow us to detect
* the state of the underlying connection. - Jean II */
-#define IRCOMM_TTY_HDR_UNITIALISED 32
+#define IRCOMM_TTY_HDR_UNINITIALISED 16
+/* Same for payload size. See qos.c for the smallest max data size */
+#define IRCOMM_TTY_DATA_UNINITIALISED (64 - IRCOMM_TTY_HDR_UNINITIALISED)
/*
* IrCOMM TTY driver state
@@ -83,6 +85,7 @@ struct ircomm_tty_cb {
__u32 max_data_size; /* Max data we can transmit in one packet */
__u32 max_header_size; /* The amount of header space we must reserve */
+ __u32 tx_data_size; /* Max data size of current tx_skb */
struct iriap_cb *iriap; /* Instance used for querying remote IAS */
struct ias_object* obj;
diff --git a/include/net/irda/vlsi_ir.h b/include/net/irda/vlsi_ir.h
index fab3ebb726c3..32d30cbc0920 100644
--- a/include/net/irda/vlsi_ir.h
+++ b/include/net/irda/vlsi_ir.h
@@ -3,9 +3,9 @@
*
* vlsi_ir.h: VLSI82C147 PCI IrDA controller driver for Linux
*
- * Version: 0.3, Sep 30, 2001
+ * Version: 0.4
*
- * Copyright (c) 2001 Martin Diehl
+ * Copyright (c) 2001-2002 Martin Diehl
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
@@ -27,6 +27,26 @@
#ifndef IRDA_VLSI_FIR_H
#define IRDA_VLSI_FIR_H
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,4)
+#ifdef CONFIG_PROC_FS
+/* PDE() introduced in 2.5.4 */
+#define PDE(inode) ((inode)->u.generic_ip)
+#endif
+#endif
+
+/*
+ * #if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,xx)
+ *
+ * missing pci-dma api call to give streaming dma buffer back to hw
+ * patch floating on lkml - probably present in 2.5.26 or later
+ * otherwise defining it as noop is ok, since the vlsi-ir is only
+ * used on two oldish x86-based notebooks which are cache-coherent
+ */
+#define pci_dma_prep_single(dev, addr, size, direction) /* nothing */
+/*
+ * #endif
+ */
+
/* ================================================================ */
/* non-standard PCI registers */
@@ -58,20 +78,20 @@ enum vlsi_pci_clkctl {
/* PLL control */
- CLKCTL_NO_PD = 0x04, /* PD# (inverted power down) signal,
- * i.e. PLL is powered, if NO_PD set */
+ CLKCTL_PD_INV = 0x04, /* PD#: inverted power down signal,
+ * i.e. PLL is powered, if PD_INV set */
CLKCTL_LOCK = 0x40, /* (ro) set, if PLL is locked */
/* clock source selection */
- CLKCTL_EXTCLK = 0x20, /* set to select external clock input */
- CLKCTL_XCKSEL = 0x10, /* set to indicate 40MHz EXTCLK, not 48MHz */
+ CLKCTL_EXTCLK = 0x20, /* set to select external clock input, not PLL */
+ CLKCTL_XCKSEL = 0x10, /* set to indicate EXTCLK is 40MHz, not 48MHz */
/* IrDA block control */
CLKCTL_CLKSTP = 0x80, /* set to disconnect from selected clock source */
CLKCTL_WAKE = 0x08 /* set to enable wakeup feature: whenever IR activity
- * is detected, NO_PD gets set and CLKSTP cleared */
+ * is detected, PD_INV gets set(?) and CLKSTP cleared */
};
/* ------------------------------------------ */
@@ -82,10 +102,9 @@ enum vlsi_pci_clkctl {
#define DMA_MASK_MSTRPAGE 0x00ffffff
#define MSTRPAGE_VALUE (DMA_MASK_MSTRPAGE >> 24)
-
/* PCI busmastering is somewhat special for this guy - in short:
*
- * We select to operate using MSTRPAGE=0 fixed, use ISA DMA
+ * We select to operate using fixed MSTRPAGE=0, use ISA DMA
* address restrictions to make the PCI BM api aware of this,
* but ensure the hardware is dealing with real 32bit access.
*
@@ -151,7 +170,6 @@ enum vlsi_pci_irmisc {
IRMISC_UARTSEL_2e8 = 0x03
};
-
/* ================================================================ */
/* registers mapped to 32 byte PCI IO space */
@@ -350,22 +368,17 @@ enum vlsi_pio_irenable {
#define IRENABLE_MASK 0xff00 /* Read mask */
-
/* ------------------------------------------ */
/* VLSI_PIO_PHYCTL: IR Physical Layer Current Control Register (u16, ro) */
-
/* read-back of the currently applied physical layer status.
* applied from VLSI_PIO_NPHYCTL at rising edge of IRENABLE_IREN
* contents identical to VLSI_PIO_NPHYCTL (see below)
*/
-
-
/* ------------------------------------------ */
-
/* VLSI_PIO_NPHYCTL: IR Physical Layer Next Control Register (u16, rw) */
/* latched during IRENABLE_IREN=0 and applied at 0-1 transition
@@ -382,10 +395,10 @@ enum vlsi_pio_irenable {
* fixed for all SIR speeds at 40MHz input clock (PLSWID=24 at 48MHz).
* IrPHY also allows shorter pulses down to the nominal pulse duration
* at 115.2kbaud (minus some tolerance) which is 1.41 usec.
- * Using the expression PLSWID = 12/(BAUD+1)-1 (multiplied by to for 48MHz)
+ * Using the expression PLSWID = 12/(BAUD+1)-1 (multiplied by two for 48MHz)
* we get the minimum acceptable PLSWID values according to the VLSI
* specification, which provides 1.5 usec pulse width for all speeds (except
- * for 2.4kbaud getting 6usec). This is well inside IrPHY v1.3 specs and
+ * for 2.4kbaud getting 6usec). This is fine with IrPHY v1.3 specs and
* reduces the transceiver power which drains the battery. At 9.6kbaud for
* example this amounts to more than 90% battery power saving!
*
@@ -399,7 +412,21 @@ enum vlsi_pio_irenable {
* PREAMB = 15
*/
-#define BWP_TO_PHYCTL(B,W,P) ((((B)&0x3f)<<10) | (((W)&0x1f)<<5) | (((P)&0x1f)<<0))
+#define PHYCTL_BAUD_SHIFT 10
+#define PHYCTL_BAUD_MASK 0xfc00
+#define PHYCTL_PLSWID_SHIFT 5
+#define PHYCTL_PLSWID_MASK 0x03e0
+#define PHYCTL_PREAMB_SHIFT 0
+#define PHYCTL_PREAMB_MASK 0x001f
+
+#define PHYCTL_TO_BAUD(bwp) (((bwp)&PHYCTL_BAUD_MASK)>>PHYCTL_BAUD_SHIFT)
+#define PHYCTL_TO_PLSWID(bwp) (((bwp)&PHYCTL_PLSWID_MASK)>>PHYCTL_PLSWID_SHIFT)
+#define PHYCTL_TO_PREAMB(bwp) (((bwp)&PHYCTL_PREAMB_MASK)>>PHYCTL_PREAMB_SHIFT)
+
+#define BWP_TO_PHYCTL(b,w,p) ((((b)<<PHYCTL_BAUD_SHIFT)&PHYCTL_BAUD_MASK) \
+ | (((w)<<PHYCTL_PLSWID_SHIFT)&PHYCTL_PLSWID_MASK) \
+ | (((p)<<PHYCTL_PREAMB_SHIFT)&PHYCTL_PREAMB_MASK))
+
#define BAUD_BITS(br) ((115200/(br))-1)
static inline unsigned
@@ -417,7 +444,6 @@ calc_width_bits(unsigned baudrate, unsigned widthselect, unsigned clockselect)
return (tmp>0) ? (tmp-1) : 0;
}
-
#define PHYCTL_SIR(br,ws,cs) BWP_TO_PHYCTL(BAUD_BITS(br),calc_width_bits((br),(ws),(cs)),0)
#define PHYCTL_MIR(cs) BWP_TO_PHYCTL(0,((cs)?9:10),1)
#define PHYCTL_FIR BWP_TO_PHYCTL(0,0,15)
@@ -445,42 +471,61 @@ calc_width_bits(unsigned baudrate, unsigned widthselect, unsigned clockselect)
/* VLSI_PIO_MAXPKT: Maximum Packet Length register (u16, rw) */
-/* specifies the maximum legth (up to 4k - or (4k-1)? - bytes), which a
- * received frame may have - i.e. the size of the corresponding
- * receive buffers. For simplicity we use the same length for
- * receive and submit buffers and increase transfer buffer size
- * byond IrDA-MTU = 2048 so we have sufficient space left when
- * packet size increases during wrapping due to XBOFs and CE's.
- * Even for receiving unwrapped frames we need >MAX_PACKET_LEN
- * space since the controller appends FCS/CRC (2 or 4 bytes)
- * so we use 2*IrDA-MTU for both directions and cover even the
- * worst case, where all data bytes have to be escaped when wrapping.
- * well, this wastes some memory - anyway, later we will
- * either map skb's directly or use pci_pool allocator...
+/* maximum acceptable length for received packets */
+
+/* hw imposed limitation - register uses only [11:0] */
+#define MAX_PACKET_LENGTH 0x0fff
+
+/* IrLAP I-field (apparently not defined elsewhere) */
+#define IRDA_MTU 2048
+
+/* complete packet consists of A(1)+C(1)+I(<=IRDA_MTU) */
+#define IRLAP_SKB_ALLOCSIZE (1+1+IRDA_MTU)
+
+/* the buffers we use to exchange frames with the hardware need to be
+ * larger than IRLAP_SKB_ALLOCSIZE because we may have up to 4 bytes FCS
+ * appended and, in SIR mode, a lot of frame wrapping bytes. The worst
+ * case appears to be a SIR packet with I-size==IRDA_MTU and all bytes
+ * requiring to be escaped to provide transparency. Furthermore, the peer
+ * might ask for quite a number of additional XBOFs:
+ * up to 115+48 XBOFS 163
+ * regular BOF 1
+ * A-field 1
+ * C-field 1
+ * I-field, IRDA_MTU, all escaped 4096
+ * FCS (16 bit at SIR, escaped) 4
+ * EOF 1
+ * AFAICS nothing in IrLAP guarantees A/C field not to need escaping
+ * (f.e. 0xc0/0xc1 - i.e. BOF/EOF - are legal values there) so in the
+ * worst case we have 4269 bytes total frame size.
+ * However, the VLSI uses 12 bits only for all buffer length values,
+ * which limits the maximum useable buffer size <= 4095.
+ * Note this is not a limitation in the receive case because we use
+ * the SIR filtering mode where the hw unwraps the frame and only the
+ * bare packet+fcs is stored into the buffer - in contrast to the SIR
+ * tx case where we have to pass frame-wrapped packets to the hw.
+ * If this would ever become an issue in real life, the only workaround
+ * I see would be using the legacy UART emulation in SIR mode.
*/
-
-#define IRDA_MTU 2048 /* seems to be undefined elsewhere */
-
-#define XFER_BUF_SIZE (2*IRDA_MTU)
-
-#define MAX_PACKET_LENGTH (XFER_BUF_SIZE-1) /* register uses only [11:0] */
+#define XFER_BUF_SIZE MAX_PACKET_LENGTH
/* ------------------------------------------ */
-
/* VLSI_PIO_RCVBCNT: Receive Byte Count Register (u16, ro) */
-/* recive packet counter gets incremented on every non-filtered
+/* receive packet counter gets incremented on every non-filtered
* byte which was put in the receive fifo and reset for each
* new packet. Used to decide whether we are just in the middle
* of receiving
*/
+/* better apply the [11:0] mask when reading, as some docs say the
+ * reserved [15:12] would return 1 when reading - which is wrong AFAICS
+ */
#define RCVBCNT_MASK 0x0fff
-/* ================================================================ */
-
+/******************************************************************/
/* descriptors for rx/tx ring
*
@@ -494,10 +539,10 @@ calc_width_bits(unsigned baudrate, unsigned widthselect, unsigned clockselect)
*
* Attention: Writing addr overwrites status!
*
- * ### FIXME: we depend on endianess here
+ * ### FIXME: depends on endianess (but there ain't no non-i586 ob800 ;-)
*/
-struct ring_descr {
+struct ring_descr_hw {
volatile u16 rd_count; /* tx/rx count [11:0] */
u16 reserved;
union {
@@ -505,60 +550,168 @@ struct ring_descr {
struct {
u8 addr_res[3];
volatile u8 status; /* descriptor status */
- } rd_s;
- } rd_u;
-};
+ } rd_s __attribute__((packed));
+ } rd_u __attribute((packed));
+} __attribute__ ((packed));
#define rd_addr rd_u.addr
#define rd_status rd_u.rd_s.status
-
/* ring descriptor status bits */
-#define RD_STAT_ACTIVE 0x80 /* descriptor owned by hw (both TX,RX) */
+#define RD_ACTIVE 0x80 /* descriptor owned by hw (both TX,RX) */
/* TX ring descriptor status */
-#define TX_STAT_DISCRC 0x40 /* do not send CRC (for SIR) */
-#define TX_STAT_BADCRC 0x20 /* force a bad CRC */
-#define TX_STAT_PULSE 0x10 /* send indication pulse after this frame (MIR/FIR) */
-#define TX_STAT_FRCEUND 0x08 /* force underrun */
-#define TX_STAT_CLRENTX 0x04 /* clear ENTX after this frame */
-#define TX_STAT_UNDRN 0x01 /* TX fifo underrun (probably PCI problem) */
+#define RD_TX_DISCRC 0x40 /* do not send CRC (for SIR) */
+#define RD_TX_BADCRC 0x20 /* force a bad CRC */
+#define RD_TX_PULSE 0x10 /* send indication pulse after this frame (MIR/FIR) */
+#define RD_TX_FRCEUND 0x08 /* force underrun */
+#define RD_TX_CLRENTX 0x04 /* clear ENTX after this frame */
+#define RD_TX_UNDRN 0x01 /* TX fifo underrun (probably PCI problem) */
/* RX ring descriptor status */
-#define RX_STAT_PHYERR 0x40 /* physical encoding error */
-#define RX_STAT_CRCERR 0x20 /* CRC error (MIR/FIR) */
-#define RX_STAT_LENGTH 0x10 /* frame exceeds buffer length */
-#define RX_STAT_OVER 0x08 /* RX fifo overrun (probably PCI problem) */
-#define RX_STAT_SIRBAD 0x04 /* EOF missing: BOF follows BOF (SIR, filtered) */
-
+#define RD_RX_PHYERR 0x40 /* physical encoding error */
+#define RD_RX_CRCERR 0x20 /* CRC error (MIR/FIR) */
+#define RD_RX_LENGTH 0x10 /* frame exceeds buffer length */
+#define RD_RX_OVER 0x08 /* RX fifo overrun (probably PCI problem) */
+#define RD_RX_SIRBAD 0x04 /* EOF missing: BOF follows BOF (SIR, filtered) */
-#define RX_STAT_ERROR 0x7c /* any error in frame */
+#define RD_RX_ERROR 0x7c /* any error in received frame */
+/* the memory required to hold the 2 descriptor rings */
+#define HW_RING_AREA_SIZE (2 * MAX_RING_DESCR * sizeof(struct ring_descr_hw))
-/* ------------------------------------------ */
+/******************************************************************/
-/* contains the objects we've put into the ring descriptors
- * static buffers for now - probably skb's later
+/* sw-ring descriptors consists of a bus-mapped transfer buffer with
+ * associated skb and a pointer to the hw entry descriptor
*/
-struct ring_entry {
- struct sk_buff *skb;
- void *data;
+struct ring_descr {
+ struct ring_descr_hw *hw;
+ struct sk_buff *skb;
+ void *buf;
};
+/* wrappers for operations on hw-exposed ring descriptors
+ * access to the hw-part of the descriptors must use these.
+ */
+
+static inline int rd_is_active(struct ring_descr *rd)
+{
+ return ((rd->hw->rd_status & RD_ACTIVE) != 0);
+}
+
+static inline void rd_activate(struct ring_descr *rd)
+{
+ rd->hw->rd_status |= RD_ACTIVE;
+}
+
+static inline void rd_set_status(struct ring_descr *rd, u8 s)
+{
+ rd->hw->rd_status = s; /* may pass ownership to the hardware */
+}
+
+static inline void rd_set_addr_status(struct ring_descr *rd, dma_addr_t a, u8 s)
+{
+ /* order is important for two reasons:
+ * - overlayed: writing addr overwrites status
+ * - we want to write status last so we have valid address in
+ * case status has RD_ACTIVE set
+ */
+
+ if ((a & ~DMA_MASK_MSTRPAGE)>>24 != MSTRPAGE_VALUE) {
+ BUG();
+ return;
+ }
+
+ a &= DMA_MASK_MSTRPAGE; /* clear highbyte to make sure we won't write
+ * to status - just in case MSTRPAGE_VALUE!=0
+ */
+ rd->hw->rd_addr = a;
+ wmb();
+ rd_set_status(rd, s); /* may pass ownership to the hardware */
+}
+
+static inline void rd_set_count(struct ring_descr *rd, u16 c)
+{
+ rd->hw->rd_count = c;
+}
+
+static inline u8 rd_get_status(struct ring_descr *rd)
+{
+ return rd->hw->rd_status;
+}
+
+static inline dma_addr_t rd_get_addr(struct ring_descr *rd)
+{
+ dma_addr_t a;
+
+ a = (rd->hw->rd_addr & DMA_MASK_MSTRPAGE) | (MSTRPAGE_VALUE << 24);
+ return a;
+}
+
+static inline u16 rd_get_count(struct ring_descr *rd)
+{
+ return rd->hw->rd_count;
+}
+
+/******************************************************************/
+
+/* sw descriptor rings for rx, tx:
+ *
+ * operations follow producer-consumer paradigm, with the hw
+ * in the middle doing the processing.
+ * ring size must be power of two.
+ *
+ * producer advances r->tail after inserting for processing
+ * consumer advances r->head after removing processed rd
+ * ring is empty if head==tail / full if (tail+1)==head
+ */
struct vlsi_ring {
+ struct pci_dev *pdev;
+ int dir;
+ unsigned len;
unsigned size;
unsigned mask;
- unsigned head, tail;
- struct ring_descr *hw;
- struct ring_entry buf[MAX_RING_DESCR];
+ atomic_t head, tail;
+ struct ring_descr *rd;
};
-/* ------------------------------------------ */
+/* ring processing helpers */
+
+static inline struct ring_descr *ring_last(struct vlsi_ring *r)
+{
+ int t;
+
+ t = atomic_read(&r->tail) & r->mask;
+ return (((t+1) & r->mask) == (atomic_read(&r->head) & r->mask)) ? NULL : &r->rd[t];
+}
+
+static inline struct ring_descr *ring_put(struct vlsi_ring *r)
+{
+ atomic_inc(&r->tail);
+ return ring_last(r);
+}
+
+static inline struct ring_descr *ring_first(struct vlsi_ring *r)
+{
+ int h;
+
+ h = atomic_read(&r->head) & r->mask;
+ return (h == (atomic_read(&r->tail) & r->mask)) ? NULL : &r->rd[h];
+}
+
+static inline struct ring_descr *ring_get(struct vlsi_ring *r)
+{
+ atomic_inc(&r->head);
+ return ring_first(r);
+}
+
+/******************************************************************/
/* our private compound VLSI-PCI-IRDA device information */
@@ -575,15 +728,40 @@ typedef struct vlsi_irda_dev {
dma_addr_t busaddr;
void *virtaddr;
- struct vlsi_ring tx_ring, rx_ring;
+ struct vlsi_ring *tx_ring, *rx_ring;
struct timeval last_rx;
spinlock_t lock;
-
+ struct semaphore sem;
+
+ u32 cfg_space[64/sizeof(u32)];
+ u8 resume_ok;
+#ifdef CONFIG_PROC_FS
+ struct proc_dir_entry *proc_entry;
+#endif
+
} vlsi_irda_dev_t;
/********************************************************/
+/* the remapped error flags we use for returning from frame
+ * post-processing in vlsi_process_tx/rx() after it was completed
+ * by the hardware. These functions either return the >=0 number
+ * of transfered bytes in case of success or the negative (-)
+ * of the or'ed error flags.
+ */
+
+#define VLSI_TX_DROP 0x0001
+#define VLSI_TX_FIFO 0x0002
+
+#define VLSI_RX_DROP 0x0100
+#define VLSI_RX_OVER 0x0200
+#define VLSI_RX_LENGTH 0x0400
+#define VLSI_RX_FRAME 0x0800
+#define VLSI_RX_CRC 0x1000
+
+/********************************************************/
+
#endif /* IRDA_VLSI_FIR_H */
diff --git a/include/net/route.h b/include/net/route.h
index 7ddc79e4d07e..621b0c44b250 100644
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -27,6 +27,7 @@
#include <linux/config.h>
#include <net/dst.h>
#include <net/inetpeer.h>
+#include <net/flow.h>
#include <linux/in_route.h>
#include <linux/rtnetlink.h>
#include <linux/route.h>
@@ -45,19 +46,6 @@
#define RT_CONN_FLAGS(sk) (RT_TOS(inet_sk(sk)->tos) | sk->localroute)
-struct rt_key
-{
- __u32 dst;
- __u32 src;
- int iif;
- int oif;
-#ifdef CONFIG_IP_ROUTE_FWMARK
- __u32 fwmark;
-#endif
- __u8 tos;
- __u8 scope;
-};
-
struct inet_peer;
struct rtable
{
@@ -78,7 +66,7 @@ struct rtable
__u32 rt_gateway;
/* Cache lookup keys */
- struct rt_key key;
+ struct flowi fl;
/* Miscellaneous cached information */
__u32 rt_spec_dst; /* RFC1122 specific destination */
@@ -124,7 +112,7 @@ extern void ip_rt_redirect(u32 old_gw, u32 dst, u32 new_gw,
u32 src, u8 tos, struct net_device *dev);
extern void ip_rt_advice(struct rtable **rp, int advice);
extern void rt_cache_flush(int how);
-extern int ip_route_output_key(struct rtable **, const struct rt_key *key);
+extern int ip_route_output_key(struct rtable **, const struct flowi *flp);
extern int ip_route_input(struct sk_buff*, u32 dst, u32 src, u8 tos, struct net_device *devin);
extern unsigned short ip_rt_frag_needed(struct iphdr *iph, unsigned short new_mtu);
extern void ip_rt_update_pmtu(struct dst_entry *dst, unsigned mtu);
@@ -136,16 +124,6 @@ extern int ip_rt_ioctl(unsigned int cmd, void *arg);
extern void ip_rt_get_source(u8 *src, struct rtable *rt);
extern int ip_rt_dump(struct sk_buff *skb, struct netlink_callback *cb);
-/* Deprecated: use ip_route_output_key directly */
-static inline int ip_route_output(struct rtable **rp,
- u32 daddr, u32 saddr, u32 tos, int oif)
-{
- struct rt_key key = { dst:daddr, src:saddr, oif:oif, tos:tos };
-
- return ip_route_output_key(rp, &key);
-}
-
-
static inline void ip_rt_put(struct rtable * rt)
{
if (rt)
@@ -163,15 +141,20 @@ static inline char rt_tos2priority(u8 tos)
static inline int ip_route_connect(struct rtable **rp, u32 dst, u32 src, u32 tos, int oif)
{
+ struct flowi fl = { .nl_u = { .ip4_u = { .daddr = dst,
+ .saddr = src,
+ .tos = tos } },
+ .oif = oif };
+
int err;
- err = ip_route_output(rp, dst, src, tos, oif);
+ err = ip_route_output_key(rp, &fl);
if (err || (dst && src))
return err;
- dst = (*rp)->rt_dst;
- src = (*rp)->rt_src;
+ fl.fl4_dst = (*rp)->rt_dst;
+ fl.fl4_src = (*rp)->rt_src;
ip_rt_put(*rp);
*rp = NULL;
- return ip_route_output(rp, dst, src, tos, oif);
+ return ip_route_output_key(rp, &fl);
}
extern void rt_bind_peer(struct rtable *rt, int create);
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 7183f85e9999..de3c44d714b0 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -472,6 +472,7 @@ extern int sysctl_tcp_rmem[3];
extern int sysctl_tcp_app_win;
extern int sysctl_tcp_adv_win_scale;
extern int sysctl_tcp_tw_reuse;
+extern int sysctl_tcp_frto;
extern atomic_t tcp_memory_allocated;
extern atomic_t tcp_sockets_allocated;
@@ -1856,4 +1857,17 @@ static inline void tcp_v4_setup_caps(struct sock *sk, struct dst_entry *dst)
#define TCP_CHECK_TIMER(sk) do { } while (0)
+static inline int tcp_use_frto(const struct sock *sk)
+{
+ const struct tcp_opt *tp = tcp_sk(sk);
+
+ /* F-RTO must be activated in sysctl and there must be some
+ * unsent new data, and the advertised window should allow
+ * sending it.
+ */
+ return (sysctl_tcp_frto && tp->send_head &&
+ !after(TCP_SKB_CB(tp->send_head)->end_seq,
+ tp->snd_una + tp->snd_wnd));
+}
+
#endif /* _TCP_H */
diff --git a/kernel/sched.c b/kernel/sched.c
index 62e81a6f285d..0464ac0649b8 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1953,7 +1953,6 @@ void set_cpus_allowed(task_t *p, unsigned long new_mask)
BUG();
#endif
- preempt_disable();
rq = task_rq_lock(p, &flags);
p->cpus_allowed = new_mask;
/*
@@ -1962,7 +1961,7 @@ void set_cpus_allowed(task_t *p, unsigned long new_mask)
*/
if (new_mask & (1UL << task_cpu(p))) {
task_rq_unlock(rq, &flags);
- goto out;
+ return;
}
/*
* If the task is not on a runqueue (and not running), then
@@ -1971,17 +1970,16 @@ void set_cpus_allowed(task_t *p, unsigned long new_mask)
if (!p->array && !task_running(rq, p)) {
set_task_cpu(p, __ffs(p->cpus_allowed));
task_rq_unlock(rq, &flags);
- goto out;
+ return;
}
init_completion(&req.done);
req.task = p;
list_add(&req.list, &rq->migration_queue);
task_rq_unlock(rq, &flags);
+
wake_up_process(rq->migration_thread);
wait_for_completion(&req.done);
-out:
- preempt_enable();
}
/*
@@ -1999,16 +1997,12 @@ static int migration_thread(void * data)
sigfillset(&current->blocked);
set_fs(KERNEL_DS);
- set_cpus_allowed(current, 1UL << cpu);
-
/*
- * Migration can happen without a migration thread on the
- * target CPU because here we remove the thread from the
- * runqueue and the helper thread then moves this thread
- * to the target CPU - we'll wake up there.
+ * Either we are running on the right CPU, or there's a
+ * a migration thread on the target CPU, guaranteed.
*/
- if (smp_processor_id() != cpu)
- printk("migration_task %d on cpu=%d\n", cpu, smp_processor_id());
+ set_cpus_allowed(current, 1UL << cpu);
+
ret = setscheduler(0, SCHED_FIFO, &param);
rq = this_rq();
@@ -2055,6 +2049,8 @@ repeat:
if (p->array) {
deactivate_task(p, rq_src);
activate_task(p, rq_dest);
+ if (p->prio < rq_dest->curr->prio)
+ resched_task(rq_dest->curr);
}
}
double_rq_unlock(rq_src, rq_dest);
diff --git a/net/atm/clip.c b/net/atm/clip.c
index 96013cb4c0e5..87bf9d4b28ac 100644
--- a/net/atm/clip.c
+++ b/net/atm/clip.c
@@ -509,6 +509,7 @@ int clip_setentry(struct atm_vcc *vcc,u32 ip)
struct atmarp_entry *entry;
int error;
struct clip_vcc *clip_vcc;
+ struct flowi fl = { .nl_u = { .ip4_u = { .daddr = ip, .tos = 1 } } };
struct rtable *rt;
if (vcc->push != clip_push) {
@@ -525,7 +526,7 @@ int clip_setentry(struct atm_vcc *vcc,u32 ip)
unlink_clip_vcc(clip_vcc);
return 0;
}
- error = ip_route_output(&rt,ip,0,1,0);
+ error = ip_route_output_key(&rt,&fl);
if (error) return error;
neigh = __neigh_lookup(&clip_tbl,&ip,rt->u.dst.dev,1);
ip_rt_put(rt);
diff --git a/net/core/netfilter.c b/net/core/netfilter.c
index 816c063896d9..4af99cda49bf 100644
--- a/net/core/netfilter.c
+++ b/net/core/netfilter.c
@@ -563,13 +563,15 @@ int ip_route_me_harder(struct sk_buff **pskb)
{
struct iphdr *iph = (*pskb)->nh.iph;
struct rtable *rt;
- struct rt_key key = { dst:iph->daddr,
- src:iph->saddr,
- oif:(*pskb)->sk ? (*pskb)->sk->bound_dev_if : 0,
- tos:RT_TOS(iph->tos)|RTO_CONN,
+ struct flowi fl = { .nl_u = { .ip4_u =
+ { .daddr = iph->daddr,
+ .saddr = iph->saddr,
+ .tos = RT_TOS(iph->tos)|RTO_CONN,
#ifdef CONFIG_IP_ROUTE_FWMARK
- fwmark:(*pskb)->nfmark
+ .fwmark = (*pskb)->nfmark
#endif
+ } },
+ .oif = (*pskb)->sk ? (*pskb)->sk->bound_dev_if : 0,
};
struct net_device *dev_src = NULL;
int err;
@@ -578,10 +580,10 @@ int ip_route_me_harder(struct sk_buff **pskb)
0 or a local address; however some non-standard hacks like
ipt_REJECT.c:send_reset() can cause packets with foreign
saddr to be appear on the NF_IP_LOCAL_OUT hook -MB */
- if(key.src && !(dev_src = ip_dev_find(key.src)))
- key.src = 0;
+ if(fl.fl4_src && !(dev_src = ip_dev_find(fl.fl4_src)))
+ fl.fl4_src = 0;
- if ((err=ip_route_output_key(&rt, &key)) != 0) {
+ if ((err=ip_route_output_key(&rt, &fl)) != 0) {
printk("route_me_harder: ip_route_output_key(dst=%u.%u.%u.%u, src=%u.%u.%u.%u, oif=%d, tos=0x%x, fwmark=0x%lx) error %d\n",
NIPQUAD(iph->daddr), NIPQUAD(iph->saddr),
(*pskb)->sk ? (*pskb)->sk->bound_dev_if : 0,
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index f067c5c9848d..885dc70b4176 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -93,6 +93,7 @@
#include <linux/smp_lock.h>
#include <linux/inet.h>
+#include <linux/igmp.h>
#include <linux/netdevice.h>
#include <linux/brlock.h>
#include <net/ip.h>
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index a24265d561e5..083b0904c68c 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -347,11 +347,13 @@ static void arp_solicit(struct neighbour *neigh, struct sk_buff *skb)
static int arp_filter(__u32 sip, __u32 tip, struct net_device *dev)
{
+ struct flowi fl = { .nl_u = { .ip4_u = { .daddr = sip,
+ .saddr = tip } } };
struct rtable *rt;
int flag = 0;
/*unsigned long now; */
- if (ip_route_output(&rt, sip, tip, 0, 0) < 0)
+ if (ip_route_output_key(&rt, &fl) < 0)
return 1;
if (rt->u.dst.dev != dev) {
NET_INC_STATS_BH(ArpFilter);
@@ -890,8 +892,10 @@ int arp_req_set(struct arpreq *r, struct net_device * dev)
if (r->arp_flags & ATF_PERM)
r->arp_flags |= ATF_COM;
if (dev == NULL) {
+ struct flowi fl = { .nl_u = { .ip4_u = { .daddr = ip,
+ .tos = RTO_ONLINK } } };
struct rtable * rt;
- if ((err = ip_route_output(&rt, ip, 0, RTO_ONLINK, 0)) != 0)
+ if ((err = ip_route_output_key(&rt, &fl)) != 0)
return err;
dev = rt->u.dst.dev;
ip_rt_put(rt);
@@ -974,8 +978,10 @@ int arp_req_delete(struct arpreq *r, struct net_device * dev)
}
if (dev == NULL) {
+ struct flowi fl = { .nl_u = { .ip4_u = { .daddr = ip,
+ .tos = RTO_ONLINK } } };
struct rtable * rt;
- if ((err = ip_route_output(&rt, ip, 0, RTO_ONLINK, 0)) != 0)
+ if ((err = ip_route_output_key(&rt, &fl)) != 0)
return err;
dev = rt->u.dst.dev;
ip_rt_put(rt);
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index c101889f0226..74905d6696f2 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -144,17 +144,15 @@ fib_get_procinfo(char *buffer, char **start, off_t offset, int length)
struct net_device * ip_dev_find(u32 addr)
{
- struct rt_key key;
+ struct flowi fl = { .nl_u = { .ip4_u = { .daddr = addr } } };
struct fib_result res;
struct net_device *dev = NULL;
- memset(&key, 0, sizeof(key));
- key.dst = addr;
#ifdef CONFIG_IP_MULTIPLE_TABLES
res.r = NULL;
#endif
- if (!local_table || local_table->tb_lookup(local_table, &key, &res)) {
+ if (!local_table || local_table->tb_lookup(local_table, &fl, &res)) {
return NULL;
}
if (res.type != RTN_LOCAL)
@@ -170,7 +168,7 @@ out:
unsigned inet_addr_type(u32 addr)
{
- struct rt_key key;
+ struct flowi fl = { .nl_u = { .ip4_u = { .daddr = addr } } };
struct fib_result res;
unsigned ret = RTN_BROADCAST;
@@ -179,15 +177,13 @@ unsigned inet_addr_type(u32 addr)
if (MULTICAST(addr))
return RTN_MULTICAST;
- memset(&key, 0, sizeof(key));
- key.dst = addr;
#ifdef CONFIG_IP_MULTIPLE_TABLES
res.r = NULL;
#endif
if (local_table) {
ret = RTN_UNICAST;
- if (local_table->tb_lookup(local_table, &key, &res) == 0) {
+ if (local_table->tb_lookup(local_table, &fl, &res) == 0) {
ret = res.type;
fib_res_put(&res);
}
@@ -207,18 +203,15 @@ int fib_validate_source(u32 src, u32 dst, u8 tos, int oif,
struct net_device *dev, u32 *spec_dst, u32 *itag)
{
struct in_device *in_dev;
- struct rt_key key;
+ struct flowi fl = { .nl_u = { .ip4_u =
+ { .daddr = src,
+ .saddr = dst,
+ .tos = tos } },
+ .iif = oif };
struct fib_result res;
int no_addr, rpf;
int ret;
- key.dst = src;
- key.src = dst;
- key.tos = tos;
- key.oif = 0;
- key.iif = oif;
- key.scope = RT_SCOPE_UNIVERSE;
-
no_addr = rpf = 0;
read_lock(&inetdev_lock);
in_dev = __in_dev_get(dev);
@@ -231,7 +224,7 @@ int fib_validate_source(u32 src, u32 dst, u8 tos, int oif,
if (in_dev == NULL)
goto e_inval;
- if (fib_lookup(&key, &res))
+ if (fib_lookup(&fl, &res))
goto last_resort;
if (res.type != RTN_UNICAST)
goto e_inval_res;
@@ -252,10 +245,10 @@ int fib_validate_source(u32 src, u32 dst, u8 tos, int oif,
goto last_resort;
if (rpf)
goto e_inval;
- key.oif = dev->ifindex;
+ fl.oif = dev->ifindex;
ret = 0;
- if (fib_lookup(&key, &res) == 0) {
+ if (fib_lookup(&fl, &res) == 0) {
if (res.type == RTN_UNICAST) {
*spec_dst = FIB_RES_PREFSRC(res);
ret = FIB_RES_NH(res).nh_scope >= RT_SCOPE_HOST;
diff --git a/net/ipv4/fib_hash.c b/net/ipv4/fib_hash.c
index b16d479e817e..8b4ed3701998 100644
--- a/net/ipv4/fib_hash.c
+++ b/net/ipv4/fib_hash.c
@@ -266,7 +266,7 @@ fn_new_zone(struct fn_hash *table, int z)
}
static int
-fn_hash_lookup(struct fib_table *tb, const struct rt_key *key, struct fib_result *res)
+fn_hash_lookup(struct fib_table *tb, const struct flowi *flp, struct fib_result *res)
{
int err;
struct fn_zone *fz;
@@ -275,7 +275,7 @@ fn_hash_lookup(struct fib_table *tb, const struct rt_key *key, struct fib_result
read_lock(&fib_hash_lock);
for (fz = t->fn_zone_list; fz; fz = fz->fz_next) {
struct fib_node *f;
- fn_key_t k = fz_key(key->dst, fz);
+ fn_key_t k = fz_key(flp->fl4_dst, fz);
for (f = fz_chain(k, fz); f; f = f->fn_next) {
if (!fn_key_eq(k, f->fn_key)) {
@@ -285,17 +285,17 @@ fn_hash_lookup(struct fib_table *tb, const struct rt_key *key, struct fib_result
continue;
}
#ifdef CONFIG_IP_ROUTE_TOS
- if (f->fn_tos && f->fn_tos != key->tos)
+ if (f->fn_tos && f->fn_tos != flp->fl4_tos)
continue;
#endif
f->fn_state |= FN_S_ACCESSED;
if (f->fn_state&FN_S_ZOMBIE)
continue;
- if (f->fn_scope < key->scope)
+ if (f->fn_scope < flp->fl4_scope)
continue;
- err = fib_semantic_match(f->fn_type, FIB_INFO(f), key, res);
+ err = fib_semantic_match(f->fn_type, FIB_INFO(f), flp, res);
if (err == 0) {
res->type = f->fn_type;
res->scope = f->fn_scope;
@@ -338,7 +338,7 @@ static int fib_detect_death(struct fib_info *fi, int order,
}
static void
-fn_hash_select_default(struct fib_table *tb, const struct rt_key *key, struct fib_result *res)
+fn_hash_select_default(struct fib_table *tb, const struct flowi *flp, struct fib_result *res)
{
int order, last_idx;
struct fib_node *f;
diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c
index 8ae6cbeee71e..c271e5965779 100644
--- a/net/ipv4/fib_rules.c
+++ b/net/ipv4/fib_rules.c
@@ -307,28 +307,28 @@ static void fib_rules_attach(struct net_device *dev)
}
}
-int fib_lookup(const struct rt_key *key, struct fib_result *res)
+int fib_lookup(const struct flowi *flp, struct fib_result *res)
{
int err;
struct fib_rule *r, *policy;
struct fib_table *tb;
- u32 daddr = key->dst;
- u32 saddr = key->src;
+ u32 daddr = flp->fl4_dst;
+ u32 saddr = flp->fl4_src;
FRprintk("Lookup: %u.%u.%u.%u <- %u.%u.%u.%u ",
- NIPQUAD(key->dst), NIPQUAD(key->src));
+ NIPQUAD(flp->fl4_dst), NIPQUAD(flp->fl4_src));
read_lock(&fib_rules_lock);
for (r = fib_rules; r; r=r->r_next) {
if (((saddr^r->r_src) & r->r_srcmask) ||
((daddr^r->r_dst) & r->r_dstmask) ||
#ifdef CONFIG_IP_ROUTE_TOS
- (r->r_tos && r->r_tos != key->tos) ||
+ (r->r_tos && r->r_tos != flp->fl4_tos) ||
#endif
#ifdef CONFIG_IP_ROUTE_FWMARK
- (r->r_fwmark && r->r_fwmark != key->fwmark) ||
+ (r->r_fwmark && r->r_fwmark != flp->fl4_fwmark) ||
#endif
- (r->r_ifindex && r->r_ifindex != key->iif))
+ (r->r_ifindex && r->r_ifindex != flp->iif))
continue;
FRprintk("tb %d r %d ", r->r_table, r->r_action);
@@ -351,7 +351,7 @@ FRprintk("tb %d r %d ", r->r_table, r->r_action);
if ((tb = fib_get_table(r->r_table)) == NULL)
continue;
- err = tb->tb_lookup(tb, key, res);
+ err = tb->tb_lookup(tb, flp, res);
if (err == 0) {
res->r = policy;
if (policy)
@@ -369,13 +369,13 @@ FRprintk("FAILURE\n");
return -ENETUNREACH;
}
-void fib_select_default(const struct rt_key *key, struct fib_result *res)
+void fib_select_default(const struct flowi *flp, struct fib_result *res)
{
if (res->r && res->r->r_action == RTN_UNICAST &&
FIB_RES_GW(*res) && FIB_RES_NH(*res).nh_scope == RT_SCOPE_LINK) {
struct fib_table *tb;
if ((tb = fib_get_table(res->r->r_table)) != NULL)
- tb->tb_select_default(tb, key, res);
+ tb->tb_select_default(tb, flp, res);
}
}
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index 071b4af2b05b..4e2674a5ad79 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -349,7 +349,6 @@ static int fib_check_nh(const struct rtmsg *r, struct fib_info *fi, struct fib_n
int err;
if (nh->nh_gw) {
- struct rt_key key;
struct fib_result res;
#ifdef CONFIG_IP_ROUTE_PERVASIVE
@@ -372,16 +371,18 @@ static int fib_check_nh(const struct rtmsg *r, struct fib_info *fi, struct fib_n
nh->nh_scope = RT_SCOPE_LINK;
return 0;
}
- memset(&key, 0, sizeof(key));
- key.dst = nh->nh_gw;
- key.oif = nh->nh_oif;
- key.scope = r->rtm_scope + 1;
-
- /* It is not necessary, but requires a bit of thinking */
- if (key.scope < RT_SCOPE_LINK)
- key.scope = RT_SCOPE_LINK;
- if ((err = fib_lookup(&key, &res)) != 0)
- return err;
+ {
+ struct flowi fl = { .nl_u = { .ip4_u =
+ { .daddr = nh->nh_gw,
+ .scope = r->rtm_scope + 1 } },
+ .oif = nh->nh_oif };
+
+ /* It is not necessary, but requires a bit of thinking */
+ if (fl.fl4_scope < RT_SCOPE_LINK)
+ fl.fl4_scope = RT_SCOPE_LINK;
+ if ((err = fib_lookup(&fl, &res)) != 0)
+ return err;
+ }
err = -EINVAL;
if (res.type != RTN_UNICAST && res.type != RTN_LOCAL)
goto out;
@@ -578,7 +579,7 @@ failure:
}
int
-fib_semantic_match(int type, struct fib_info *fi, const struct rt_key *key, struct fib_result *res)
+fib_semantic_match(int type, struct fib_info *fi, const struct flowi *flp, struct fib_result *res)
{
int err = fib_props[type].error;
@@ -603,7 +604,7 @@ fib_semantic_match(int type, struct fib_info *fi, const struct rt_key *key, stru
for_nexthops(fi) {
if (nh->nh_flags&RTNH_F_DEAD)
continue;
- if (!key->oif || key->oif == nh->nh_oif)
+ if (!flp->oif || flp->oif == nh->nh_oif)
break;
}
#ifdef CONFIG_IP_ROUTE_MULTIPATH
@@ -949,7 +950,7 @@ int fib_sync_up(struct net_device *dev)
fair weighted route distribution.
*/
-void fib_select_multipath(const struct rt_key *key, struct fib_result *res)
+void fib_select_multipath(const struct flowi *flp, struct fib_result *res)
{
struct fib_info *fi = res->fi;
int w;
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index ebbb05b5243d..0407a4babb3a 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -418,9 +418,14 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb)
if (ipc.opt->srr)
daddr = icmp_param->replyopts.faddr;
}
- if (ip_route_output(&rt, daddr, rt->rt_spec_dst,
- RT_TOS(skb->nh.iph->tos), 0))
- goto out_unlock;
+ {
+ struct flowi fl = { .nl_u = { .ip4_u =
+ { .daddr = daddr,
+ .saddr = rt->rt_spec_dst,
+ .tos = RT_TOS(skb->nh.iph->tos) } } };
+ if (ip_route_output_key(&rt, &fl))
+ goto out_unlock;
+ }
if (icmpv4_xrlim_allow(rt, icmp_param->data.icmph.type,
icmp_param->data.icmph.code)) {
ip_build_xmit(sk, icmp_glue_bits, icmp_param,
@@ -526,8 +531,8 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, u32 info)
* Restore original addresses if packet has been translated.
*/
if (rt->rt_flags & RTCF_NAT && IPCB(skb_in)->flags & IPSKB_TRANSLATED) {
- iph->daddr = rt->key.dst;
- iph->saddr = rt->key.src;
+ iph->daddr = rt->fl.fl4_dst;
+ iph->saddr = rt->fl.fl4_src;
}
#endif
@@ -539,9 +544,13 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, u32 info)
IPTOS_PREC_INTERNETCONTROL) :
iph->tos;
- if (ip_route_output(&rt, iph->saddr, saddr, RT_TOS(tos), 0))
- goto out_unlock;
-
+ {
+ struct flowi fl = { .nl_u = { .ip4_u = { .daddr = iph->saddr,
+ .saddr = saddr,
+ .tos = RT_TOS(tos) } } };
+ if (ip_route_output_key(&rt, &fl))
+ goto out_unlock;
+ }
if (ip_options_echo(&icmp_param.replyopts, skb_in))
goto ende;
@@ -563,9 +572,12 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, u32 info)
ipc.addr = iph->saddr;
ipc.opt = &icmp_param.replyopts;
if (icmp_param.replyopts.srr) {
+ struct flowi fl = { .nl_u = { .ip4_u =
+ { .daddr = icmp_param.replyopts.faddr,
+ .saddr = saddr,
+ .tos = RT_TOS(tos) } } };
ip_rt_put(rt);
- if (ip_route_output(&rt, icmp_param.replyopts.faddr,
- saddr, RT_TOS(tos), 0))
+ if (ip_route_output_key(&rt, &fl))
goto out_unlock;
}
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index c887839e79ea..661e65974e39 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -207,8 +207,12 @@ static int igmp_send_report(struct net_device *dev, u32 group, int type)
if (type == IGMP_HOST_LEAVE_MESSAGE)
dst = IGMP_ALL_ROUTER;
- if (ip_route_output(&rt, dst, 0, 0, dev->ifindex))
- return -1;
+ {
+ struct flowi fl = { .nl_u = { .ip4_u = { .daddr = dst } },
+ .oif = dev->ifindex };
+ if (ip_route_output_key(&rt, &fl))
+ return -1;
+ }
if (rt->rt_src == 0) {
ip_rt_put(rt);
return -1;
@@ -374,7 +378,7 @@ int igmp_rcv(struct sk_buff *skb)
case IGMP_HOST_MEMBERSHIP_REPORT:
case IGMP_HOST_NEW_MEMBERSHIP_REPORT:
/* Is it our report looped back? */
- if (((struct rtable*)skb->dst)->key.iif == 0)
+ if (((struct rtable*)skb->dst)->fl.iif == 0)
break;
igmp_heard_report(in_dev, ih->group);
break;
@@ -608,6 +612,8 @@ void ip_mc_destroy_dev(struct in_device *in_dev)
static struct in_device * ip_mc_find_dev(struct ip_mreqn *imr)
{
+ struct flowi fl = { .nl_u = { .ip4_u =
+ { .daddr = imr->imr_address.s_addr } } };
struct rtable *rt;
struct net_device *dev = NULL;
struct in_device *idev = NULL;
@@ -619,7 +625,7 @@ static struct in_device * ip_mc_find_dev(struct ip_mreqn *imr)
__dev_put(dev);
}
- if (!dev && !ip_route_output(&rt, imr->imr_multiaddr.s_addr, 0, 0, 0)) {
+ if (!dev && !ip_route_output_key(&rt, &fl)) {
dev = rt->u.dst.dev;
ip_rt_put(rt);
}
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index ed0525d342bd..f120e87b4cc3 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -412,6 +412,7 @@ out:
u16 flags;
int grehlen = (iph->ihl<<2) + 4;
struct sk_buff *skb2;
+ struct flowi fl;
struct rtable *rt;
if (p[1] != htons(ETH_P_IP))
@@ -488,7 +489,10 @@ out:
skb2->nh.raw = skb2->data;
/* Try to guess incoming interface */
- if (ip_route_output(&rt, eiph->saddr, 0, RT_TOS(eiph->tos), 0)) {
+ memset(&fl, 0, sizeof(fl));
+ fl.fl4_dst = eiph->saddr;
+ fl.fl4_tos = RT_TOS(eiph->tos);
+ if (ip_route_output_key(&rt, &fl)) {
kfree_skb(skb2);
return;
}
@@ -498,7 +502,10 @@ out:
if (rt->rt_flags&RTCF_LOCAL) {
ip_rt_put(rt);
rt = NULL;
- if (ip_route_output(&rt, eiph->daddr, eiph->saddr, eiph->tos, 0) ||
+ fl.fl4_dst = eiph->daddr;
+ fl.fl4_src = eiph->saddr;
+ fl.fl4_tos = eiph->tos;
+ if (ip_route_output_key(&rt, &fl) ||
rt->u.dst.dev->type != ARPHRD_IPGRE) {
ip_rt_put(rt);
kfree_skb(skb2);
@@ -619,7 +626,7 @@ int ipgre_rcv(struct sk_buff *skb)
#ifdef CONFIG_NET_IPGRE_BROADCAST
if (MULTICAST(iph->daddr)) {
/* Looped back packet, drop it! */
- if (((struct rtable*)skb->dst)->key.iif == 0)
+ if (((struct rtable*)skb->dst)->fl.iif == 0)
goto drop;
tunnel->stat.multicast++;
skb->pkt_type = PACKET_BROADCAST;
@@ -749,9 +756,16 @@ static int ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
tos &= ~1;
}
- if (ip_route_output(&rt, dst, tiph->saddr, RT_TOS(tos), tunnel->parms.link)) {
- tunnel->stat.tx_carrier_errors++;
- goto tx_error;
+ {
+ struct flowi fl = { .nl_u = { .ip4_u =
+ { .daddr = dst,
+ .saddr = tiph->saddr,
+ .tos = RT_TOS(tos) } },
+ .oif = tunnel->parms.link };
+ if (ip_route_output_key(&rt, &fl)) {
+ tunnel->stat.tx_carrier_errors++;
+ goto tx_error;
+ }
}
tdev = rt->u.dst.dev;
@@ -1104,10 +1118,13 @@ static int ipgre_open(struct net_device *dev)
MOD_INC_USE_COUNT;
if (MULTICAST(t->parms.iph.daddr)) {
+ struct flowi fl = { .nl_u = { .ip4_u =
+ { .daddr = t->parms.iph.daddr,
+ .saddr = t->parms.iph.saddr,
+ .tos = RT_TOS(t->parms.iph.tos) } },
+ .oif = t->parms.link };
struct rtable *rt;
- if (ip_route_output(&rt, t->parms.iph.daddr,
- t->parms.iph.saddr, RT_TOS(t->parms.iph.tos),
- t->parms.link)) {
+ if (ip_route_output_key(&rt, &fl)) {
MOD_DEC_USE_COUNT;
return -EADDRNOTAVAIL;
}
@@ -1177,8 +1194,13 @@ static int ipgre_tunnel_init(struct net_device *dev)
/* Guess output device to choose reasonable mtu and hard_header_len */
if (iph->daddr) {
+ struct flowi fl = { .nl_u = { .ip4_u =
+ { .daddr = iph->daddr,
+ .saddr = iph->saddr,
+ .tos = RT_TOS(iph->tos) } },
+ .oif = tunnel->parms.link };
struct rtable *rt;
- if (!ip_route_output(&rt, iph->daddr, iph->saddr, RT_TOS(iph->tos), tunnel->parms.link)) {
+ if (!ip_route_output_key(&rt, &fl)) {
tdev = rt->u.dst.dev;
ip_rt_put(rt);
}
diff --git a/net/ipv4/ip_nat_dumb.c b/net/ipv4/ip_nat_dumb.c
index 449c4f44102e..58d1d3f4b8c9 100644
--- a/net/ipv4/ip_nat_dumb.c
+++ b/net/ipv4/ip_nat_dumb.c
@@ -117,23 +117,21 @@ ip_do_nat(struct sk_buff *skb)
if (rt->rt_flags&RTCF_SNAT) {
if (ciph->daddr != osaddr) {
struct fib_result res;
- struct rt_key key;
unsigned flags = 0;
-
- key.src = ciph->daddr;
- key.dst = ciph->saddr;
- key.iif = skb->dev->ifindex;
- key.oif = 0;
+ struct flowi fl = { .nl_u =
+ { .ip4_u =
+ { .daddr = ciph->saddr,
+ .saddr = ciph->daddr,
#ifdef CONFIG_IP_ROUTE_TOS
- key.tos = RT_TOS(ciph->tos);
-#endif
-#ifdef CONFIG_IP_ROUTE_FWMARK
- key.fwmark = 0;
+ .tos = RT_TOS(ciph->tos)
#endif
+ } },
+ .iif = skb->dev->ifindex };
+
/* Use fib_lookup() until we get our own
* hash table of NATed hosts -- Rani
*/
- if (fib_lookup(&key, &res) == 0) {
+ if (fib_lookup(&fl, &res) == 0) {
if (res.r) {
ciph->daddr = fib_rules_policy(ciph->daddr, &res, &flags);
if (ciph->daddr != idaddr)
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 082c29da4047..efbe98cf67b0 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -372,14 +372,20 @@ int ip_queue_xmit(struct sk_buff *skb)
if(opt && opt->srr)
daddr = opt->faddr;
- /* If this fails, retransmit mechanism of transport layer will
- * keep trying until route appears or the connection times itself
- * out.
- */
- if (ip_route_output(&rt, daddr, inet->saddr,
- RT_CONN_FLAGS(sk),
- sk->bound_dev_if))
- goto no_route;
+ {
+ struct flowi fl = { .nl_u = { .ip4_u =
+ { .daddr = daddr,
+ .saddr = inet->saddr,
+ .tos = RT_CONN_FLAGS(sk) } },
+ .oif = sk->bound_dev_if };
+
+ /* If this fails, retransmit mechanism of transport layer will
+ * keep trying until route appears or the connection times itself
+ * out.
+ */
+ if (ip_route_output_key(&rt, &fl))
+ goto no_route;
+ }
__sk_dst_set(sk, &rt->u.dst);
tcp_v4_setup_caps(sk, &rt->u.dst);
}
@@ -991,8 +997,14 @@ void ip_send_reply(struct sock *sk, struct sk_buff *skb, struct ip_reply_arg *ar
daddr = replyopts.opt.faddr;
}
- if (ip_route_output(&rt, daddr, rt->rt_spec_dst, RT_TOS(skb->nh.iph->tos), 0))
- return;
+ {
+ struct flowi fl = { .nl_u = { .ip4_u =
+ { .daddr = daddr,
+ .saddr = rt->rt_spec_dst,
+ .tos = RT_TOS(skb->nh.iph->tos) } } };
+ if (ip_route_output_key(&rt, &fl))
+ return;
+ }
/* And let IP do all the hard work.
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index 83c96cb9db73..1e6ed55c935a 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -355,6 +355,7 @@ out:
int rel_code = 0;
int rel_info = 0;
struct sk_buff *skb2;
+ struct flowi fl;
struct rtable *rt;
if (len < hlen + sizeof(struct iphdr))
@@ -417,7 +418,10 @@ out:
skb2->nh.raw = skb2->data;
/* Try to guess incoming interface */
- if (ip_route_output(&rt, eiph->saddr, 0, RT_TOS(eiph->tos), 0)) {
+ memset(&fl, 0, sizeof(fl));
+ fl.fl4_daddr = eiph->saddr;
+ fl.fl4_tos = RT_TOS(eiph->tos);
+ if (ip_route_output_key(&rt, &key)) {
kfree_skb(skb2);
return;
}
@@ -427,7 +431,10 @@ out:
if (rt->rt_flags&RTCF_LOCAL) {
ip_rt_put(rt);
rt = NULL;
- if (ip_route_output(&rt, eiph->daddr, eiph->saddr, eiph->tos, 0) ||
+ fl.fl4_daddr = eiph->daddr;
+ fl.fl4_src = eiph->saddr;
+ fl.fl4_tos = eiph->tos;
+ if (ip_route_output_key(&rt, &fl) ||
rt->u.dst.dev->type != ARPHRD_IPGRE) {
ip_rt_put(rt);
kfree_skb(skb2);
@@ -560,9 +567,16 @@ static int ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
goto tx_error_icmp;
}
- if (ip_route_output(&rt, dst, tiph->saddr, RT_TOS(tos), tunnel->parms.link)) {
- tunnel->stat.tx_carrier_errors++;
- goto tx_error_icmp;
+ {
+ struct flowi fl = { .nl_u = { .ip4_u =
+ { .daddr = dst,
+ .saddr = tiph->saddr,
+ .tos = RT_TOS(tos) } },
+ .oif = tunnel->parms.link };
+ if (ip_route_output_key(&rt, &fl)) {
+ tunnel->stat.tx_carrier_errors++;
+ goto tx_error_icmp;
+ }
}
tdev = rt->u.dst.dev;
@@ -822,8 +836,13 @@ static int ipip_tunnel_init(struct net_device *dev)
ipip_tunnel_init_gen(dev);
if (iph->daddr) {
+ struct flowi fl = { .nl_u = { .ip4_u =
+ { .daddr = iph->daddr,
+ .saddr = iph->saddr,
+ .tos = RT_TOS(iph->tos) } },
+ .oif = tunnel->parms.link };
struct rtable *rt;
- if (!ip_route_output(&rt, iph->daddr, iph->saddr, RT_TOS(iph->tos), tunnel->parms.link)) {
+ if (!ip_route_output_key(&rt, &fl)) {
tdev = rt->u.dst.dev;
ip_rt_put(rt);
}
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index c3db6285f1d4..a5450b23ef8a 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -1146,11 +1146,20 @@ static void ipmr_queue_xmit(struct sk_buff *skb, struct mfc_cache *c,
#endif
if (vif->flags&VIFF_TUNNEL) {
- if (ip_route_output(&rt, vif->remote, vif->local, RT_TOS(iph->tos), vif->link))
+ struct flowi fl = { .nl_u = { .ip4_u =
+ { .daddr = vif->remote,
+ .saddr = vif->local,
+ .tos = RT_TOS(iph->tos) } },
+ .oif = vif->link };
+ if (ip_route_output_key(&rt, &fl))
return;
encap = sizeof(struct iphdr);
} else {
- if (ip_route_output(&rt, iph->daddr, 0, RT_TOS(iph->tos), vif->link))
+ struct flowi fl = { .nl_u = { .ip4_u =
+ { .daddr = iph->daddr,
+ .tos = RT_TOS(iph->tos) } },
+ .oif = vif->link };
+ if (ip_route_output_key(&rt, &fl))
return;
}
@@ -1244,7 +1253,7 @@ int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local)
if (vif_table[vif].dev != skb->dev) {
int true_vifi;
- if (((struct rtable*)skb->dst)->key.iif == 0) {
+ if (((struct rtable*)skb->dst)->fl.iif == 0) {
/* It is our own packet, looped back.
Very complicated situation...
diff --git a/net/ipv4/netfilter/ip_fw_compat_masq.c b/net/ipv4/netfilter/ip_fw_compat_masq.c
index 708dff11745b..f522eab4038b 100644
--- a/net/ipv4/netfilter/ip_fw_compat_masq.c
+++ b/net/ipv4/netfilter/ip_fw_compat_masq.c
@@ -68,12 +68,13 @@ do_masquerade(struct sk_buff **pskb, const struct net_device *dev)
/* Setup the masquerade, if not already */
if (!info->initialized) {
u_int32_t newsrc;
+ struct flowi fl = { .nl_u = { .ip4_u = { .daddr = iph->daddr } } };
struct rtable *rt;
struct ip_nat_multi_range range;
/* Pass 0 instead of saddr, since it's going to be changed
anyway. */
- if (ip_route_output(&rt, iph->daddr, 0, 0, 0) != 0) {
+ if (ip_route_output_key(&rt, &fl) != 0) {
DEBUGP("ipnat_rule_masquerade: Can't reroute.\n");
return NF_DROP;
}
diff --git a/net/ipv4/netfilter/ip_nat_core.c b/net/ipv4/netfilter/ip_nat_core.c
index 5c72c54e02b2..38a8e37ffd9a 100644
--- a/net/ipv4/netfilter/ip_nat_core.c
+++ b/net/ipv4/netfilter/ip_nat_core.c
@@ -209,10 +209,11 @@ find_appropriate_src(const struct ip_conntrack_tuple *tuple,
static int
do_extra_mangle(u_int32_t var_ip, u_int32_t *other_ipp)
{
+ struct flowi fl = { .nl_u = { .ip4_u = { .daddr = var_ip } } };
struct rtable *rt;
/* FIXME: IPTOS_TOS(iph->tos) --RR */
- if (ip_route_output(&rt, var_ip, 0, 0, 0) != 0) {
+ if (ip_route_output_key(&rt, &fl) != 0) {
DEBUGP("do_extra_mangle: Can't get route to %u.%u.%u.%u\n",
NIPQUAD(var_ip));
return 0;
diff --git a/net/ipv4/netfilter/ipt_MASQUERADE.c b/net/ipv4/netfilter/ipt_MASQUERADE.c
index 82515c49a8f2..fcbc2341447e 100644
--- a/net/ipv4/netfilter/ipt_MASQUERADE.c
+++ b/net/ipv4/netfilter/ipt_MASQUERADE.c
@@ -69,7 +69,6 @@ masquerade_target(struct sk_buff **pskb,
struct ip_nat_multi_range newrange;
u_int32_t newsrc;
struct rtable *rt;
- struct rt_key key;
IP_NF_ASSERT(hooknum == NF_IP_POST_ROUTING);
@@ -84,17 +83,21 @@ masquerade_target(struct sk_buff **pskb,
mr = targinfo;
- key.dst = (*pskb)->nh.iph->daddr;
- key.src = 0; /* Unknown: that's what we're trying to establish */
- key.tos = RT_TOS((*pskb)->nh.iph->tos)|RTO_CONN;
- key.oif = out->ifindex;
+ {
+ struct flowi fl = { .nl_u = { .ip4_u =
+ { .daddr = (*pskb)->nh.iph->daddr,
+ .tos = (RT_TOS((*pskb)->nh.iph->tos) |
+ RTO_CONN),
#ifdef CONFIG_IP_ROUTE_FWMARK
- key.fwmark = (*pskb)->nfmark;
+ .fwmark = (*pskb)->nfmark
#endif
- if (ip_route_output_key(&rt, &key) != 0) {
- /* Shouldn't happen */
- printk("MASQUERADE: No route: Rusty's brain broke!\n");
- return NF_DROP;
+ } },
+ .oif = out->ifindex };
+ if (ip_route_output_key(&rt, &fl) != 0) {
+ /* Shouldn't happen */
+ printk("MASQUERADE: No route: Rusty's brain broke!\n");
+ return NF_DROP;
+ }
}
newsrc = rt->rt_src;
diff --git a/net/ipv4/netfilter/ipt_MIRROR.c b/net/ipv4/netfilter/ipt_MIRROR.c
index 2fd35b69924f..ba003e5a0dd1 100644
--- a/net/ipv4/netfilter/ipt_MIRROR.c
+++ b/net/ipv4/netfilter/ipt_MIRROR.c
@@ -44,12 +44,13 @@ struct in_device;
static int route_mirror(struct sk_buff *skb)
{
struct iphdr *iph = skb->nh.iph;
+ struct flowi fl = { .nl_u = { .ip4_u = { .daddr = iph->saddr,
+ .saddr = iph->daddr,
+ .tos = RT_TOS(iph->tos) | RTO_CONN } } };
struct rtable *rt;
/* Backwards */
- if (ip_route_output(&rt, iph->saddr, iph->daddr,
- RT_TOS(iph->tos) | RTO_CONN,
- 0)) {
+ if (ip_route_output_key(&rt, &fl)) {
return 0;
}
diff --git a/net/ipv4/netfilter/ipt_REJECT.c b/net/ipv4/netfilter/ipt_REJECT.c
index 31adecb393ac..8a444327ae9b 100644
--- a/net/ipv4/netfilter/ipt_REJECT.c
+++ b/net/ipv4/netfilter/ipt_REJECT.c
@@ -130,12 +130,19 @@ static void send_reset(struct sk_buff *oldskb, int local)
nskb->nh.iph->check = ip_fast_csum((unsigned char *)nskb->nh.iph,
nskb->nh.iph->ihl);
- /* Routing: if not headed for us, route won't like source */
- if (ip_route_output(&rt, nskb->nh.iph->daddr,
- local ? nskb->nh.iph->saddr : 0,
- RT_TOS(nskb->nh.iph->tos) | RTO_CONN,
- 0) != 0)
- goto free_nskb;
+ {
+ struct flowi fl = { .nl_u = { .ip4_u =
+ { .daddr = nskb->nh.iph->daddr,
+ .saddr = (local ?
+ nskb->nh.iph->saddr :
+ 0),
+ .tos = (RT_TOS(nskb->nh.iph->tos) |
+ RTO_CONN) } } };
+
+ /* Routing: if not headed for us, route won't like source */
+ if (ip_route_output_key(&rt, &fl))
+ goto free_nskb;
+ }
dst_release(nskb->dst);
nskb->dst = &rt->u.dst;
@@ -207,9 +214,14 @@ static void send_unreach(struct sk_buff *skb_in, int code)
tos = (iph->tos & IPTOS_TOS_MASK) | IPTOS_PREC_INTERNETCONTROL;
- if (ip_route_output(&rt, iph->saddr, saddr, RT_TOS(tos), 0))
- return;
-
+ {
+ struct flowi fl = { .nl_u = { .ip4_u =
+ { .daddr = iph->saddr,
+ .saddr = saddr,
+ .tos = RT_TOS(tos) } } };
+ if (ip_route_output_key(&rt, &fl))
+ return;
+ }
/* RFC says return as much as we can without exceeding 576 bytes. */
length = skb_in->len + sizeof(struct iphdr) + sizeof(struct icmphdr);
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 833d382a79cf..7322f6ff0fc4 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -403,8 +403,14 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
rfh.saddr = inet->mc_addr;
}
- err = ip_route_output(&rt, daddr, rfh.saddr, tos, ipc.oif);
-
+ {
+ struct flowi fl = { .nl_u = { .ip4_u =
+ { .daddr = daddr,
+ .saddr = rfh.saddr,
+ .tos = tos } },
+ .oif = ipc.oif };
+ err = ip_route_output_key(&rt, &fl);
+ }
if (err)
goto done;
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 692053a355e2..4b3b1aba12e5 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -251,7 +251,7 @@ static int rt_cache_get_info(char *buffer, char **start, off_t offset,
(int)r->u.dst.advmss + 40,
r->u.dst.window,
(int)((r->u.dst.rtt >> 3) + r->u.dst.rttvar),
- r->key.tos,
+ r->fl.fl4_tos,
r->u.dst.hh ?
atomic_read(&r->u.dst.hh->hh_refcnt) :
-1,
@@ -332,7 +332,7 @@ static __inline__ int rt_fast_clean(struct rtable *rth)
/* Kill broadcast/multicast entries very aggresively, if they
collide in hash table with more useful entries */
return (rth->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) &&
- rth->key.iif && rth->u.rt_next;
+ rth->fl.iif && rth->u.rt_next;
}
static __inline__ int rt_valuable(struct rtable *rth)
@@ -621,7 +621,7 @@ restart:
write_lock_bh(&rt_hash_table[hash].lock);
while ((rth = *rthp) != NULL) {
- if (memcmp(&rth->key, &rt->key, sizeof(rt->key)) == 0) {
+ if (memcmp(&rth->fl, &rt->fl, sizeof(rt->fl)) == 0) {
/* Put it first */
*rthp = rth->u.rt_next;
rth->u.rt_next = rt_hash_table[hash].chain;
@@ -643,7 +643,7 @@ restart:
/* Try to bind route to arp only if it is output
route or unicast forwarding path.
*/
- if (rt->rt_type == RTN_UNICAST || rt->key.iif == 0) {
+ if (rt->rt_type == RTN_UNICAST || rt->fl.iif == 0) {
int err = arp_bind_neighbour(&rt->u.dst);
if (err) {
write_unlock_bh(&rt_hash_table[hash].lock);
@@ -806,11 +806,11 @@ void ip_rt_redirect(u32 old_gw, u32 daddr, u32 new_gw,
while ((rth = *rthp) != NULL) {
struct rtable *rt;
- if (rth->key.dst != daddr ||
- rth->key.src != skeys[i] ||
- rth->key.tos != tos ||
- rth->key.oif != ikeys[k] ||
- rth->key.iif != 0) {
+ if (rth->fl.fl4_dst != daddr ||
+ rth->fl.fl4_src != skeys[i] ||
+ rth->fl.fl4_tos != tos ||
+ rth->fl.oif != ikeys[k] ||
+ rth->fl.iif != 0) {
rthp = &rth->u.rt_next;
continue;
}
@@ -901,14 +901,14 @@ static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
ret = NULL;
} else if ((rt->rt_flags & RTCF_REDIRECTED) ||
rt->u.dst.expires) {
- unsigned hash = rt_hash_code(rt->key.dst,
- rt->key.src ^
- (rt->key.oif << 5),
- rt->key.tos);
+ unsigned hash = rt_hash_code(rt->fl.fl4_dst,
+ rt->fl.fl4_src ^
+ (rt->fl.oif << 5),
+ rt->fl.fl4_tos);
#if RT_CACHE_DEBUG >= 1
printk(KERN_DEBUG "ip_rt_advice: redirect to "
"%u.%u.%u.%u/%02x dropped\n",
- NIPQUAD(rt->rt_dst), rt->key.tos);
+ NIPQUAD(rt->rt_dst), rt->fl.fl4_tos);
#endif
rt_del(hash, rt);
ret = NULL;
@@ -1052,12 +1052,12 @@ unsigned short ip_rt_frag_needed(struct iphdr *iph, unsigned short new_mtu)
read_lock(&rt_hash_table[hash].lock);
for (rth = rt_hash_table[hash].chain; rth;
rth = rth->u.rt_next) {
- if (rth->key.dst == daddr &&
- rth->key.src == skeys[i] &&
+ if (rth->fl.fl4_dst == daddr &&
+ rth->fl.fl4_src == skeys[i] &&
rth->rt_dst == daddr &&
rth->rt_src == iph->saddr &&
- rth->key.tos == tos &&
- rth->key.iif == 0 &&
+ rth->fl.fl4_tos == tos &&
+ rth->fl.iif == 0 &&
!(rth->u.dst.mxlock & (1 << RTAX_MTU))) {
unsigned short mtu = new_mtu;
@@ -1162,9 +1162,9 @@ void ip_rt_get_source(u8 *addr, struct rtable *rt)
u32 src;
struct fib_result res;
- if (rt->key.iif == 0)
+ if (rt->fl.iif == 0)
src = rt->rt_src;
- else if (fib_lookup(&rt->key, &res) == 0) {
+ else if (fib_lookup(&rt->fl, &res) == 0) {
#ifdef CONFIG_IP_ROUTE_NAT
if (res.type == RTN_NAT)
src = inet_select_addr(rt->u.dst.dev, rt->rt_gateway,
@@ -1263,13 +1263,13 @@ static int ip_route_input_mc(struct sk_buff *skb, u32 daddr, u32 saddr,
atomic_set(&rth->u.dst.__refcnt, 1);
rth->u.dst.flags= DST_HOST;
- rth->key.dst = daddr;
+ rth->fl.fl4_dst = daddr;
rth->rt_dst = daddr;
- rth->key.tos = tos;
+ rth->fl.fl4_tos = tos;
#ifdef CONFIG_IP_ROUTE_FWMARK
- rth->key.fwmark = skb->nfmark;
+ rth->fl.fl4_fwmark= skb->nfmark;
#endif
- rth->key.src = saddr;
+ rth->fl.fl4_src = saddr;
rth->rt_src = saddr;
#ifdef CONFIG_IP_ROUTE_NAT
rth->rt_dst_map = daddr;
@@ -1279,10 +1279,10 @@ static int ip_route_input_mc(struct sk_buff *skb, u32 daddr, u32 saddr,
rth->u.dst.tclassid = itag;
#endif
rth->rt_iif =
- rth->key.iif = dev->ifindex;
+ rth->fl.iif = dev->ifindex;
rth->u.dst.dev = &loopback_dev;
dev_hold(rth->u.dst.dev);
- rth->key.oif = 0;
+ rth->fl.oif = 0;
rth->rt_gateway = daddr;
rth->rt_spec_dst= spec_dst;
rth->rt_type = RTN_MULTICAST;
@@ -1324,10 +1324,19 @@ e_inval:
int ip_route_input_slow(struct sk_buff *skb, u32 daddr, u32 saddr,
u8 tos, struct net_device *dev)
{
- struct rt_key key;
struct fib_result res;
struct in_device *in_dev = in_dev_get(dev);
struct in_device *out_dev = NULL;
+ struct flowi fl = { .nl_u = { .ip4_u =
+ { .daddr = daddr,
+ .saddr = saddr,
+ .tos = tos,
+ .scope = RT_SCOPE_UNIVERSE,
+#ifdef CONFIG_IP_ROUTE_FWMARK
+ .fwmark = skb->nfmark
+#endif
+ } },
+ .iif = dev->ifindex };
unsigned flags = 0;
u32 itag = 0;
struct rtable * rth;
@@ -1341,17 +1350,7 @@ int ip_route_input_slow(struct sk_buff *skb, u32 daddr, u32 saddr,
if (!in_dev)
goto out;
- key.dst = daddr;
- key.src = saddr;
- key.tos = tos;
-#ifdef CONFIG_IP_ROUTE_FWMARK
- key.fwmark = skb->nfmark;
-#endif
- key.iif = dev->ifindex;
- key.oif = 0;
- key.scope = RT_SCOPE_UNIVERSE;
-
- hash = rt_hash_code(daddr, saddr ^ (key.iif << 5), tos);
+ hash = rt_hash_code(daddr, saddr ^ (fl.iif << 5), tos);
/* Check for the most weird martians, which can be not detected
by fib_lookup.
@@ -1375,7 +1374,7 @@ int ip_route_input_slow(struct sk_buff *skb, u32 daddr, u32 saddr,
/*
* Now we are ready to route packet.
*/
- if ((err = fib_lookup(&key, &res)) != 0) {
+ if ((err = fib_lookup(&fl, &res)) != 0) {
if (!IN_DEV_FORWARD(in_dev))
goto e_inval;
goto no_route;
@@ -1395,17 +1394,17 @@ int ip_route_input_slow(struct sk_buff *skb, u32 daddr, u32 saddr,
src_map = fib_rules_policy(saddr, &res, &flags);
if (res.type == RTN_NAT) {
- key.dst = fib_rules_map_destination(daddr, &res);
+ fl.fl4_dst = fib_rules_map_destination(daddr, &res);
fib_res_put(&res);
free_res = 0;
- if (fib_lookup(&key, &res))
+ if (fib_lookup(&fl, &res))
goto e_inval;
free_res = 1;
if (res.type != RTN_UNICAST)
goto e_inval;
flags |= RTCF_DNAT;
}
- key.src = src_map;
+ fl.fl4_src = src_map;
}
#endif
@@ -1431,8 +1430,8 @@ int ip_route_input_slow(struct sk_buff *skb, u32 daddr, u32 saddr,
goto martian_destination;
#ifdef CONFIG_IP_ROUTE_MULTIPATH
- if (res.fi->fib_nhs > 1 && key.oif == 0)
- fib_select_multipath(&key, &res);
+ if (res.fi->fib_nhs > 1 && fl.oif == 0)
+ fib_select_multipath(&fl, &res);
#endif
out_dev = in_dev_get(FIB_RES_DEV(res));
if (out_dev == NULL) {
@@ -1469,26 +1468,26 @@ int ip_route_input_slow(struct sk_buff *skb, u32 daddr, u32 saddr,
atomic_set(&rth->u.dst.__refcnt, 1);
rth->u.dst.flags= DST_HOST;
- rth->key.dst = daddr;
+ rth->fl.fl4_dst = daddr;
rth->rt_dst = daddr;
- rth->key.tos = tos;
+ rth->fl.fl4_tos = tos;
#ifdef CONFIG_IP_ROUTE_FWMARK
- rth->key.fwmark = skb->nfmark;
+ rth->fl.fl4_fwmark= skb->nfmark;
#endif
- rth->key.src = saddr;
+ rth->fl.fl4_src = saddr;
rth->rt_src = saddr;
rth->rt_gateway = daddr;
#ifdef CONFIG_IP_ROUTE_NAT
- rth->rt_src_map = key.src;
- rth->rt_dst_map = key.dst;
+ rth->rt_src_map = fl.fl4_src;
+ rth->rt_dst_map = fl.fl4_dst;
if (flags&RTCF_DNAT)
- rth->rt_gateway = key.dst;
+ rth->rt_gateway = fl.fl4_dst;
#endif
rth->rt_iif =
- rth->key.iif = dev->ifindex;
+ rth->fl.iif = dev->ifindex;
rth->u.dst.dev = out_dev->dev;
dev_hold(rth->u.dst.dev);
- rth->key.oif = 0;
+ rth->fl.oif = 0;
rth->rt_spec_dst= spec_dst;
rth->u.dst.input = ip_forward;
@@ -1546,26 +1545,25 @@ local_input:
atomic_set(&rth->u.dst.__refcnt, 1);
rth->u.dst.flags= DST_HOST;
- rth->key.dst = daddr;
+ rth->fl.fl4_dst = daddr;
rth->rt_dst = daddr;
- rth->key.tos = tos;
+ rth->fl.fl4_tos = tos;
#ifdef CONFIG_IP_ROUTE_FWMARK
- rth->key.fwmark = skb->nfmark;
+ rth->fl.fl4_fwmark= skb->nfmark;
#endif
- rth->key.src = saddr;
+ rth->fl.fl4_src = saddr;
rth->rt_src = saddr;
#ifdef CONFIG_IP_ROUTE_NAT
- rth->rt_dst_map = key.dst;
- rth->rt_src_map = key.src;
+ rth->rt_dst_map = fl.fl4_dst;
+ rth->rt_src_map = fl.fl4_src;
#endif
#ifdef CONFIG_NET_CLS_ROUTE
rth->u.dst.tclassid = itag;
#endif
rth->rt_iif =
- rth->key.iif = dev->ifindex;
+ rth->fl.iif = dev->ifindex;
rth->u.dst.dev = &loopback_dev;
dev_hold(rth->u.dst.dev);
- rth->key.oif = 0;
rth->rt_gateway = daddr;
rth->rt_spec_dst= spec_dst;
rth->u.dst.input= ip_local_deliver;
@@ -1643,14 +1641,14 @@ int ip_route_input(struct sk_buff *skb, u32 daddr, u32 saddr,
read_lock(&rt_hash_table[hash].lock);
for (rth = rt_hash_table[hash].chain; rth; rth = rth->u.rt_next) {
- if (rth->key.dst == daddr &&
- rth->key.src == saddr &&
- rth->key.iif == iif &&
- rth->key.oif == 0 &&
+ if (rth->fl.fl4_dst == daddr &&
+ rth->fl.fl4_src == saddr &&
+ rth->fl.iif == iif &&
+ rth->fl.oif == 0 &&
#ifdef CONFIG_IP_ROUTE_FWMARK
- rth->key.fwmark == skb->nfmark &&
+ rth->fl.fl4_fwmark == skb->nfmark &&
#endif
- rth->key.tos == tos) {
+ rth->fl.fl4_tos == tos) {
rth->u.dst.lastuse = jiffies;
dst_hold(&rth->u.dst);
rth->u.dst.__use++;
@@ -1699,9 +1697,22 @@ int ip_route_input(struct sk_buff *skb, u32 daddr, u32 saddr,
* Major route resolver routine.
*/
-int ip_route_output_slow(struct rtable **rp, const struct rt_key *oldkey)
+int ip_route_output_slow(struct rtable **rp, const struct flowi *oldflp)
{
- struct rt_key key;
+ u32 tos = oldflp->fl4_tos & (IPTOS_RT_MASK | RTO_ONLINK);
+ struct flowi fl = { .nl_u = { .ip4_u =
+ { .daddr = oldflp->fl4_dst,
+ .saddr = oldflp->fl4_src,
+ .tos = tos & IPTOS_RT_MASK,
+ .scope = ((tos & RTO_ONLINK) ?
+ RT_SCOPE_LINK :
+ RT_SCOPE_UNIVERSE),
+#ifdef CONFIG_IP_ROUTE_FWMARK
+ .fwmark = oldflp->fl4_fwmark
+#endif
+ } },
+ .iif = loopback_dev.ifindex,
+ .oif = oldflp->oif };
struct fib_result res;
unsigned flags = 0;
struct rtable *rth;
@@ -1709,33 +1720,21 @@ int ip_route_output_slow(struct rtable **rp, const struct rt_key *oldkey)
unsigned hash;
int free_res = 0;
int err;
- u32 tos;
-
- tos = oldkey->tos & (IPTOS_RT_MASK | RTO_ONLINK);
- key.dst = oldkey->dst;
- key.src = oldkey->src;
- key.tos = tos & IPTOS_RT_MASK;
- key.iif = loopback_dev.ifindex;
- key.oif = oldkey->oif;
-#ifdef CONFIG_IP_ROUTE_FWMARK
- key.fwmark = oldkey->fwmark;
-#endif
- key.scope = (tos & RTO_ONLINK) ? RT_SCOPE_LINK :
- RT_SCOPE_UNIVERSE;
+
res.fi = NULL;
#ifdef CONFIG_IP_MULTIPLE_TABLES
res.r = NULL;
#endif
- if (oldkey->src) {
+ if (oldflp->fl4_src) {
err = -EINVAL;
- if (MULTICAST(oldkey->src) ||
- BADCLASS(oldkey->src) ||
- ZERONET(oldkey->src))
+ if (MULTICAST(oldflp->fl4_src) ||
+ BADCLASS(oldflp->fl4_src) ||
+ ZERONET(oldflp->fl4_src))
goto out;
/* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
- dev_out = ip_dev_find(oldkey->src);
+ dev_out = ip_dev_find(oldflp->fl4_src);
if (dev_out == NULL)
goto out;
@@ -1747,8 +1746,8 @@ int ip_route_output_slow(struct rtable **rp, const struct rt_key *oldkey)
of another iface. --ANK
*/
- if (oldkey->oif == 0
- && (MULTICAST(oldkey->dst) || oldkey->dst == 0xFFFFFFFF)) {
+ if (oldflp->oif == 0
+ && (MULTICAST(oldflp->fl4_dst) || oldflp->fl4_dst == 0xFFFFFFFF)) {
/* Special hack: user can direct multicasts
and limited broadcast via necessary interface
without fiddling with IP_MULTICAST_IF or IP_PKTINFO.
@@ -1764,15 +1763,15 @@ int ip_route_output_slow(struct rtable **rp, const struct rt_key *oldkey)
Luckily, this hack is good workaround.
*/
- key.oif = dev_out->ifindex;
+ fl.oif = dev_out->ifindex;
goto make_route;
}
if (dev_out)
dev_put(dev_out);
dev_out = NULL;
}
- if (oldkey->oif) {
- dev_out = dev_get_by_index(oldkey->oif);
+ if (oldflp->oif) {
+ dev_out = dev_get_by_index(oldflp->oif);
err = -ENODEV;
if (dev_out == NULL)
goto out;
@@ -1781,39 +1780,39 @@ int ip_route_output_slow(struct rtable **rp, const struct rt_key *oldkey)
goto out; /* Wrong error code */
}
- if (LOCAL_MCAST(oldkey->dst) || oldkey->dst == 0xFFFFFFFF) {
- if (!key.src)
- key.src = inet_select_addr(dev_out, 0,
- RT_SCOPE_LINK);
+ if (LOCAL_MCAST(oldflp->fl4_dst) || oldflp->fl4_dst == 0xFFFFFFFF) {
+ if (!fl.fl4_src)
+ fl.fl4_src = inet_select_addr(dev_out, 0,
+ RT_SCOPE_LINK);
goto make_route;
}
- if (!key.src) {
- if (MULTICAST(oldkey->dst))
- key.src = inet_select_addr(dev_out, 0,
- key.scope);
- else if (!oldkey->dst)
- key.src = inet_select_addr(dev_out, 0,
- RT_SCOPE_HOST);
+ if (!fl.fl4_src) {
+ if (MULTICAST(oldflp->fl4_dst))
+ fl.fl4_src = inet_select_addr(dev_out, 0,
+ fl.fl4_scope);
+ else if (!oldflp->fl4_dst)
+ fl.fl4_src = inet_select_addr(dev_out, 0,
+ RT_SCOPE_HOST);
}
}
- if (!key.dst) {
- key.dst = key.src;
- if (!key.dst)
- key.dst = key.src = htonl(INADDR_LOOPBACK);
+ if (!fl.fl4_dst) {
+ fl.fl4_dst = fl.fl4_src;
+ if (!fl.fl4_dst)
+ fl.fl4_dst = fl.fl4_src = htonl(INADDR_LOOPBACK);
if (dev_out)
dev_put(dev_out);
dev_out = &loopback_dev;
dev_hold(dev_out);
- key.oif = loopback_dev.ifindex;
+ fl.oif = loopback_dev.ifindex;
res.type = RTN_LOCAL;
flags |= RTCF_LOCAL;
goto make_route;
}
- if (fib_lookup(&key, &res)) {
+ if (fib_lookup(&fl, &res)) {
res.fi = NULL;
- if (oldkey->oif) {
+ if (oldflp->oif) {
/* Apparently, routing tables are wrong. Assume,
that the destination is on link.
@@ -1832,9 +1831,9 @@ int ip_route_output_slow(struct rtable **rp, const struct rt_key *oldkey)
likely IPv6, but we do not.
*/
- if (key.src == 0)
- key.src = inet_select_addr(dev_out, 0,
- RT_SCOPE_LINK);
+ if (fl.fl4_src == 0)
+ fl.fl4_src = inet_select_addr(dev_out, 0,
+ RT_SCOPE_LINK);
res.type = RTN_UNICAST;
goto make_route;
}
@@ -1849,13 +1848,13 @@ int ip_route_output_slow(struct rtable **rp, const struct rt_key *oldkey)
goto e_inval;
if (res.type == RTN_LOCAL) {
- if (!key.src)
- key.src = key.dst;
+ if (!fl.fl4_src)
+ fl.fl4_src = fl.fl4_dst;
if (dev_out)
dev_put(dev_out);
dev_out = &loopback_dev;
dev_hold(dev_out);
- key.oif = dev_out->ifindex;
+ fl.oif = dev_out->ifindex;
if (res.fi)
fib_info_put(res.fi);
res.fi = NULL;
@@ -1864,31 +1863,31 @@ int ip_route_output_slow(struct rtable **rp, const struct rt_key *oldkey)
}
#ifdef CONFIG_IP_ROUTE_MULTIPATH
- if (res.fi->fib_nhs > 1 && key.oif == 0)
- fib_select_multipath(&key, &res);
+ if (res.fi->fib_nhs > 1 && fl.oif == 0)
+ fib_select_multipath(&fl, &res);
else
#endif
- if (!res.prefixlen && res.type == RTN_UNICAST && !key.oif)
- fib_select_default(&key, &res);
+ if (!res.prefixlen && res.type == RTN_UNICAST && !fl.oif)
+ fib_select_default(&fl, &res);
- if (!key.src)
- key.src = FIB_RES_PREFSRC(res);
+ if (!fl.fl4_src)
+ fl.fl4_src = FIB_RES_PREFSRC(res);
if (dev_out)
dev_put(dev_out);
dev_out = FIB_RES_DEV(res);
dev_hold(dev_out);
- key.oif = dev_out->ifindex;
+ fl.oif = dev_out->ifindex;
make_route:
- if (LOOPBACK(key.src) && !(dev_out->flags&IFF_LOOPBACK))
+ if (LOOPBACK(fl.fl4_src) && !(dev_out->flags&IFF_LOOPBACK))
goto e_inval;
- if (key.dst == 0xFFFFFFFF)
+ if (fl.fl4_dst == 0xFFFFFFFF)
res.type = RTN_BROADCAST;
- else if (MULTICAST(key.dst))
+ else if (MULTICAST(fl.fl4_dst))
res.type = RTN_MULTICAST;
- else if (BADCLASS(key.dst) || ZERONET(key.dst))
+ else if (BADCLASS(fl.fl4_dst) || ZERONET(fl.fl4_dst))
goto e_inval;
if (dev_out->flags & IFF_LOOPBACK)
@@ -1904,7 +1903,7 @@ make_route:
flags |= RTCF_MULTICAST|RTCF_LOCAL;
read_lock(&inetdev_lock);
if (!__in_dev_get(dev_out) ||
- !ip_check_mc(__in_dev_get(dev_out), oldkey->dst))
+ !ip_check_mc(__in_dev_get(dev_out), oldflp->fl4_dst))
flags &= ~RTCF_LOCAL;
read_unlock(&inetdev_lock);
/* If multicast route do not exist use
@@ -1923,25 +1922,24 @@ make_route:
atomic_set(&rth->u.dst.__refcnt, 1);
rth->u.dst.flags= DST_HOST;
- rth->key.dst = oldkey->dst;
- rth->key.tos = tos;
- rth->key.src = oldkey->src;
- rth->key.iif = 0;
- rth->key.oif = oldkey->oif;
+ rth->fl.fl4_dst = oldflp->fl4_dst;
+ rth->fl.fl4_tos = tos;
+ rth->fl.fl4_src = oldflp->fl4_src;
+ rth->fl.oif = oldflp->oif;
#ifdef CONFIG_IP_ROUTE_FWMARK
- rth->key.fwmark = oldkey->fwmark;
+ rth->fl.fl4_fwmark= oldflp->fl4_fwmark;
#endif
- rth->rt_dst = key.dst;
- rth->rt_src = key.src;
+ rth->rt_dst = fl.fl4_dst;
+ rth->rt_src = fl.fl4_src;
#ifdef CONFIG_IP_ROUTE_NAT
- rth->rt_dst_map = key.dst;
- rth->rt_src_map = key.src;
+ rth->rt_dst_map = fl.fl4_dst;
+ rth->rt_src_map = fl.fl4_src;
#endif
- rth->rt_iif = oldkey->oif ? : dev_out->ifindex;
+ rth->rt_iif = oldflp->oif ? : dev_out->ifindex;
rth->u.dst.dev = dev_out;
dev_hold(dev_out);
- rth->rt_gateway = key.dst;
- rth->rt_spec_dst= key.src;
+ rth->rt_gateway = fl.fl4_dst;
+ rth->rt_spec_dst= fl.fl4_src;
rth->u.dst.output=ip_output;
@@ -1949,10 +1947,10 @@ make_route:
if (flags & RTCF_LOCAL) {
rth->u.dst.input = ip_local_deliver;
- rth->rt_spec_dst = key.dst;
+ rth->rt_spec_dst = fl.fl4_dst;
}
if (flags & (RTCF_BROADCAST | RTCF_MULTICAST)) {
- rth->rt_spec_dst = key.src;
+ rth->rt_spec_dst = fl.fl4_src;
if (flags & RTCF_LOCAL && !(dev_out->flags & IFF_LOOPBACK)) {
rth->u.dst.output = ip_mc_output;
rt_cache_stat[smp_processor_id()].out_slow_mc++;
@@ -1962,7 +1960,7 @@ make_route:
struct in_device *in_dev = in_dev_get(dev_out);
if (in_dev) {
if (IN_DEV_MFORWARD(in_dev) &&
- !LOCAL_MCAST(oldkey->dst)) {
+ !LOCAL_MCAST(oldflp->fl4_dst)) {
rth->u.dst.input = ip_mr_input;
rth->u.dst.output = ip_mc_output;
}
@@ -1976,7 +1974,7 @@ make_route:
rth->rt_flags = flags;
- hash = rt_hash_code(oldkey->dst, oldkey->src ^ (oldkey->oif << 5), tos);
+ hash = rt_hash_code(oldflp->fl4_dst, oldflp->fl4_src ^ (oldflp->oif << 5), tos);
err = rt_intern_hash(hash, rth, rp);
done:
if (free_res)
@@ -1993,23 +1991,23 @@ e_nobufs:
goto done;
}
-int ip_route_output_key(struct rtable **rp, const struct rt_key *key)
+int ip_route_output_key(struct rtable **rp, const struct flowi *flp)
{
unsigned hash;
struct rtable *rth;
- hash = rt_hash_code(key->dst, key->src ^ (key->oif << 5), key->tos);
+ hash = rt_hash_code(flp->fl4_dst, flp->fl4_src ^ (flp->oif << 5), flp->fl4_tos);
read_lock_bh(&rt_hash_table[hash].lock);
for (rth = rt_hash_table[hash].chain; rth; rth = rth->u.rt_next) {
- if (rth->key.dst == key->dst &&
- rth->key.src == key->src &&
- rth->key.iif == 0 &&
- rth->key.oif == key->oif &&
+ if (rth->fl.fl4_dst == flp->fl4_dst &&
+ rth->fl.fl4_src == flp->fl4_src &&
+ rth->fl.iif == 0 &&
+ rth->fl.oif == flp->oif &&
#ifdef CONFIG_IP_ROUTE_FWMARK
- rth->key.fwmark == key->fwmark &&
+ rth->fl.fl4_fwmark == flp->fl4_fwmark &&
#endif
- !((rth->key.tos ^ key->tos) &
+ !((rth->fl.fl4_tos ^ flp->fl4_tos) &
(IPTOS_RT_MASK | RTO_ONLINK))) {
rth->u.dst.lastuse = jiffies;
dst_hold(&rth->u.dst);
@@ -2022,7 +2020,7 @@ int ip_route_output_key(struct rtable **rp, const struct rt_key *key)
}
read_unlock_bh(&rt_hash_table[hash].lock);
- return ip_route_output_slow(rp, key);
+ return ip_route_output_slow(rp, flp);
}
static int rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq, int event,
@@ -2042,7 +2040,7 @@ static int rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq, int event,
r->rtm_family = AF_INET;
r->rtm_dst_len = 32;
r->rtm_src_len = 0;
- r->rtm_tos = rt->key.tos;
+ r->rtm_tos = rt->fl.fl4_tos;
r->rtm_table = RT_TABLE_MAIN;
r->rtm_type = rt->rt_type;
r->rtm_scope = RT_SCOPE_UNIVERSE;
@@ -2051,9 +2049,9 @@ static int rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq, int event,
if (rt->rt_flags & RTCF_NOTIFY)
r->rtm_flags |= RTM_F_NOTIFY;
RTA_PUT(skb, RTA_DST, 4, &rt->rt_dst);
- if (rt->key.src) {
+ if (rt->fl.fl4_src) {
r->rtm_src_len = 32;
- RTA_PUT(skb, RTA_SRC, 4, &rt->key.src);
+ RTA_PUT(skb, RTA_SRC, 4, &rt->fl.fl4_src);
}
if (rt->u.dst.dev)
RTA_PUT(skb, RTA_OIF, sizeof(int), &rt->u.dst.dev->ifindex);
@@ -2061,9 +2059,9 @@ static int rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq, int event,
if (rt->u.dst.tclassid)
RTA_PUT(skb, RTA_FLOW, 4, &rt->u.dst.tclassid);
#endif
- if (rt->key.iif)
+ if (rt->fl.iif)
RTA_PUT(skb, RTA_PREFSRC, 4, &rt->rt_spec_dst);
- else if (rt->rt_src != rt->key.src)
+ else if (rt->rt_src != rt->fl.fl4_src)
RTA_PUT(skb, RTA_PREFSRC, 4, &rt->rt_src);
if (rt->rt_dst != rt->rt_gateway)
RTA_PUT(skb, RTA_GATEWAY, 4, &rt->rt_gateway);
@@ -2089,7 +2087,7 @@ static int rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq, int event,
eptr = (struct rtattr*)skb->tail;
#endif
RTA_PUT(skb, RTA_CACHEINFO, sizeof(ci), &ci);
- if (rt->key.iif) {
+ if (rt->fl.iif) {
#ifdef CONFIG_IP_MROUTE
u32 dst = rt->rt_dst;
@@ -2109,7 +2107,7 @@ static int rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq, int event,
}
} else
#endif
- RTA_PUT(skb, RTA_IIF, sizeof(int), &rt->key.iif);
+ RTA_PUT(skb, RTA_IIF, sizeof(int), &rt->fl.iif);
}
nlh->nlmsg_len = skb->tail - b;
@@ -2163,10 +2161,14 @@ int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void *arg)
if (!err && rt->u.dst.error)
err = -rt->u.dst.error;
} else {
+ struct flowi fl = { .nl_u = { .ip4_u = { .daddr = dst,
+ .saddr = src,
+ .tos = rtm->rtm_tos } } };
int oif = 0;
if (rta[RTA_OIF - 1])
memcpy(&oif, RTA_DATA(rta[RTA_OIF - 1]), sizeof(int));
- err = ip_route_output(&rt, dst, src, rtm->rtm_tos, oif);
+ fl.oif = oif;
+ err = ip_route_output_key(&rt, &fl);
}
if (err) {
kfree_skb(skb);
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index 4061c648dd19..b6de66f262af 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -171,14 +171,17 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
* hasn't changed since we received the original syn, but I see
* no easy way to do this.
*/
- if (ip_route_output(&rt,
- opt &&
- opt->srr ? opt->faddr : req->af.v4_req.rmt_addr,
- req->af.v4_req.loc_addr,
- RT_CONN_FLAGS(sk),
- 0)) {
- tcp_openreq_free(req);
- goto out;
+ {
+ struct flowi fl = { .nl_u = { .ip4_u =
+ { .daddr = ((opt && opt->srr) ?
+ opt->faddr :
+ req->af.v4_req.rmt_addr),
+ .saddr = req->af.v4_req.loc_addr,
+ .tos = RT_CONN_FLAGS(sk) } } };
+ if (ip_route_output_key(&rt, &fl)) {
+ tcp_openreq_free(req);
+ goto out;
+ }
}
/* Try to redo what tcp_v4_send_synack did. */
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index b3b60b30c182..03ed6d44d5cf 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -221,6 +221,8 @@ ctl_table ipv4_table[] = {
&sysctl_icmp_ratemask, sizeof(int), 0644, NULL, &proc_dointvec},
{NET_TCP_TW_REUSE, "tcp_tw_reuse",
&sysctl_tcp_tw_reuse, sizeof(int), 0644, NULL, &proc_dointvec},
+ {NET_TCP_FRTO, "tcp_frto",
+ &sysctl_tcp_frto, sizeof(int), 0644, NULL, &proc_dointvec},
{0}
};
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 56f0e451909b..61844116b184 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -60,6 +60,7 @@
* Pasi Sarolahti,
* Panu Kuhlberg: Experimental audit of TCP (re)transmission
* engine. Lots of bugs are found.
+ * Pasi Sarolahti: F-RTO for dealing with spurious RTOs
*/
#include <linux/config.h>
@@ -86,6 +87,7 @@ int sysctl_tcp_adv_win_scale = 2;
int sysctl_tcp_stdurg = 0;
int sysctl_tcp_rfc1337 = 0;
int sysctl_tcp_max_orphans = NR_FILE;
+int sysctl_tcp_frto = 0;
#define FLAG_DATA 0x01 /* Incoming frame contained data. */
#define FLAG_WIN_UPDATE 0x02 /* Incoming ACK was a window update. */
@@ -968,6 +970,89 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
return flag;
}
+/* RTO occurred, but do not yet enter loss state. Instead, transmit two new
+ * segments to see from the next ACKs whether any data was really missing.
+ * If the RTO was spurious, new ACKs should arrive.
+ */
+void tcp_enter_frto(struct sock *sk)
+{
+ struct tcp_opt *tp = tcp_sk(sk);
+ struct sk_buff *skb;
+
+ tp->frto_counter = 1;
+
+ if (tp->ca_state <= TCP_CA_Disorder ||
+ tp->snd_una == tp->high_seq ||
+ (tp->ca_state == TCP_CA_Loss && !tp->retransmits)) {
+ tp->prior_ssthresh = tcp_current_ssthresh(tp);
+ tp->snd_ssthresh = tcp_recalc_ssthresh(tp);
+ }
+
+ /* Have to clear retransmission markers here to keep the bookkeeping
+ * in shape, even though we are not yet in Loss state.
+ * If something was really lost, it is eventually caught up
+ * in tcp_enter_frto_loss.
+ */
+ tp->retrans_out = 0;
+ tp->undo_marker = tp->snd_una;
+ tp->undo_retrans = 0;
+
+ for_retrans_queue(skb, sk, tp) {
+ TCP_SKB_CB(skb)->sacked &= ~TCPCB_RETRANS;
+ }
+ tcp_sync_left_out(tp);
+
+ tp->ca_state = TCP_CA_Open;
+ tp->frto_highmark = tp->snd_nxt;
+}
+
+/* Enter Loss state after F-RTO was applied. Dupack arrived after RTO,
+ * which indicates that we should follow the traditional RTO recovery,
+ * i.e. mark everything lost and do go-back-N retransmission.
+ */
+void tcp_enter_frto_loss(struct sock *sk)
+{
+ struct tcp_opt *tp = tcp_sk(sk);
+ struct sk_buff *skb;
+ int cnt = 0;
+
+ tp->sacked_out = 0;
+ tp->lost_out = 0;
+ tp->fackets_out = 0;
+
+ for_retrans_queue(skb, sk, tp) {
+ cnt++;
+ TCP_SKB_CB(skb)->sacked &= ~TCPCB_LOST;
+ if (!(TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_ACKED)) {
+
+ /* Do not mark those segments lost that were
+ * forward transmitted after RTO
+ */
+ if(!after(TCP_SKB_CB(skb)->end_seq,
+ tp->frto_highmark)) {
+ TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
+ tp->lost_out++;
+ }
+ } else {
+ tp->sacked_out++;
+ tp->fackets_out = cnt;
+ }
+ }
+ tcp_sync_left_out(tp);
+
+ tp->snd_cwnd = tp->frto_counter + tcp_packets_in_flight(tp)+1;
+ tp->snd_cwnd_cnt = 0;
+ tp->snd_cwnd_stamp = tcp_time_stamp;
+ tp->undo_marker = 0;
+ tp->frto_counter = 0;
+
+ tp->reordering = min_t(unsigned int, tp->reordering,
+ sysctl_tcp_reordering);
+ tp->ca_state = TCP_CA_Loss;
+ tp->high_seq = tp->frto_highmark;
+ TCP_ECN_queue_cwr(tp);
+}
+
void tcp_clear_retrans(struct tcp_opt *tp)
{
tp->left_out = 0;
@@ -1539,7 +1624,8 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una,
/* E. Check state exit conditions. State can be terminated
* when high_seq is ACKed. */
if (tp->ca_state == TCP_CA_Open) {
- BUG_TRAP(tp->retrans_out == 0);
+ if (!sysctl_tcp_frto)
+ BUG_TRAP(tp->retrans_out == 0);
tp->retrans_stamp = 0;
} else if (!before(tp->snd_una, tp->high_seq)) {
switch (tp->ca_state) {
@@ -1910,6 +1996,41 @@ static int tcp_ack_update_window(struct sock *sk, struct tcp_opt *tp,
return flag;
}
+static void tcp_process_frto(struct sock *sk, u32 prior_snd_una)
+{
+ struct tcp_opt *tp = tcp_sk(sk);
+
+ tcp_sync_left_out(tp);
+
+ if (tp->snd_una == prior_snd_una ||
+ !before(tp->snd_una, tp->frto_highmark)) {
+ /* RTO was caused by loss, start retransmitting in
+ * go-back-N slow start
+ */
+ tcp_enter_frto_loss(sk);
+ return;
+ }
+
+ if (tp->frto_counter == 1) {
+ /* First ACK after RTO advances the window: allow two new
+ * segments out.
+ */
+ tp->snd_cwnd = tcp_packets_in_flight(tp) + 2;
+ } else {
+ /* Also the second ACK after RTO advances the window.
+ * The RTO was likely spurious. Reduce cwnd and continue
+ * in congestion avoidance
+ */
+ tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_ssthresh);
+ tcp_moderate_cwnd(tp);
+ }
+
+ /* F-RTO affects on two new ACKs following RTO.
+ * At latest on third ACK the TCP behavor is back to normal.
+ */
+ tp->frto_counter = (tp->frto_counter + 1) % 3;
+}
+
/* This routine deals with incoming acks, but not outgoing ones. */
static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
{
@@ -1968,6 +2089,9 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
/* See if we can take anything off of the retransmit queue. */
flag |= tcp_clean_rtx_queue(sk);
+ if (tp->frto_counter)
+ tcp_process_frto(sk, prior_snd_una);
+
if (tcp_ack_is_dubious(tp, flag)) {
/* Advanve CWND, if state allows this. */
if ((flag & FLAG_DATA_ACKED) &&
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 754d615c1aa5..1caae171c1a6 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1266,11 +1266,15 @@ static struct dst_entry* tcp_v4_route_req(struct sock *sk,
{
struct rtable *rt;
struct ip_options *opt = req->af.v4_req.opt;
-
- if (ip_route_output(&rt, ((opt && opt->srr) ? opt->faddr :
- req->af.v4_req.rmt_addr),
- req->af.v4_req.loc_addr,
- RT_CONN_FLAGS(sk), sk->bound_dev_if)) {
+ struct flowi fl = { .nl_u = { .ip4_u =
+ { .daddr = ((opt && opt->srr) ?
+ opt->faddr :
+ req->af.v4_req.rmt_addr),
+ .saddr = req->af.v4_req.loc_addr,
+ .tos = RT_CONN_FLAGS(sk) } },
+ .oif = sk->bound_dev_if };
+
+ if (ip_route_output_key(&rt, &fl)) {
IP_INC_STATS_BH(IpOutNoRoutes);
return NULL;
}
@@ -1909,8 +1913,15 @@ int tcp_v4_rebuild_header(struct sock *sk)
if (inet->opt && inet->opt->srr)
daddr = inet->opt->faddr;
- err = ip_route_output(&rt, daddr, inet->saddr,
- RT_CONN_FLAGS(sk), sk->bound_dev_if);
+ {
+ struct flowi fl = { .nl_u = { .ip4_u =
+ { .daddr = daddr,
+ .saddr = inet->saddr,
+ .tos = RT_CONN_FLAGS(sk) } },
+ .oif = sk->bound_dev_if };
+
+ err = ip_route_output_key(&rt, &fl);
+ }
if (!err) {
__sk_dst_set(sk, &rt->u.dst);
tcp_v4_setup_caps(sk, &rt->u.dst);
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 5c6e42952d46..87fc4e8fc332 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -718,6 +718,9 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct open_request *req,
newtp->snd_cwnd = 2;
newtp->snd_cwnd_cnt = 0;
+ newtp->frto_counter = 0;
+ newtp->frto_highmark = 0;
+
newtp->ca_state = TCP_CA_Open;
tcp_init_xmit_timers(newsk);
skb_queue_head_init(&newtp->out_of_order_queue);
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index d9a4e91003ca..619e89ea2c49 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -374,7 +374,11 @@ static void tcp_retransmit_timer(struct sock *sk)
}
}
- tcp_enter_loss(sk, 0);
+ if (tcp_use_frto(sk)) {
+ tcp_enter_frto(sk);
+ } else {
+ tcp_enter_loss(sk, 0);
+ }
if (tcp_retransmit_skb(sk, skb_peek(&sk->write_queue)) > 0) {
/* Retransmission failed because of local congestion,
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 44b83123c4e8..9152416eef20 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -529,7 +529,12 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
rt = (struct rtable*)sk_dst_check(sk, 0);
if (rt == NULL) {
- err = ip_route_output(&rt, daddr, ufh.saddr, tos, ipc.oif);
+ struct flowi fl = { .nl_u = { .ip4_u =
+ { .daddr = daddr,
+ .saddr = ufh.saddr,
+ .tos = tos } },
+ .oif = ipc.oif };
+ err = ip_route_output_key(&rt, &fl);
if (err)
goto out;
diff --git a/net/ipv6/Config.in b/net/ipv6/Config.in
index da940bb54629..1a58a98faeeb 100644
--- a/net/ipv6/Config.in
+++ b/net/ipv6/Config.in
@@ -2,9 +2,6 @@
# IPv6 configuration
#
-#bool ' IPv6: flow policy support' CONFIG_RT6_POLICY
-#bool ' IPv6: firewall support' CONFIG_IPV6_FIREWALL
-
if [ "$CONFIG_NETFILTER" != "n" ]; then
source net/ipv6/netfilter/Config.in
fi
diff --git a/net/ipv6/Makefile b/net/ipv6/Makefile
index 31f8950011b8..bb23f068cd4a 100644
--- a/net/ipv6/Makefile
+++ b/net/ipv6/Makefile
@@ -12,7 +12,6 @@ ipv6-objs := af_inet6.o ip6_output.o ip6_input.o addrconf.o sit.o \
exthdrs.o sysctl_net_ipv6.o datagram.o proc.o \
ip6_flowlabel.o ipv6_syms.o
-#obj-$(CONFIG_IPV6_FIREWALL) += ip6_fw.o
obj-$(CONFIG_NETFILTER) += netfilter/
include $(TOPDIR)/Rules.make
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 5c9d8b0e6f04..3781e05a9cae 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -452,7 +452,6 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt)
*/
if ((iter->rt6i_dev == rt->rt6i_dev) &&
- (iter->rt6i_flowr == rt->rt6i_flowr) &&
(ipv6_addr_cmp(&iter->rt6i_gateway,
&rt->rt6i_gateway) == 0)) {
if (!(iter->rt6i_flags&RTF_EXPIRES))
diff --git a/net/ipv6/ip6_fw.c b/net/ipv6/ip6_fw.c
deleted file mode 100644
index aa1234efcb31..000000000000
--- a/net/ipv6/ip6_fw.c
+++ /dev/null
@@ -1,390 +0,0 @@
-/*
- * IPv6 Firewall
- * Linux INET6 implementation
- *
- * Authors:
- * Pedro Roque <roque@di.fc.ul.pt>
- *
- * $Id: ip6_fw.c,v 1.16 2001/10/31 08:17:58 davem Exp $
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#include <linux/config.h>
-#include <linux/errno.h>
-#include <linux/types.h>
-#include <linux/string.h>
-#include <linux/socket.h>
-#include <linux/sockios.h>
-#include <linux/net.h>
-#include <linux/route.h>
-#include <linux/netdevice.h>
-#include <linux/in6.h>
-#include <linux/udp.h>
-#include <linux/init.h>
-
-#include <net/ipv6.h>
-#include <net/ip6_route.h>
-#include <net/ip6_fw.h>
-#include <net/netlink.h>
-
-static unsigned long ip6_fw_rule_cnt;
-static struct ip6_fw_rule ip6_fw_rule_list = {
- {0},
- NULL, NULL,
- {0},
- IP6_FW_REJECT
-};
-
-static int ip6_fw_accept(struct dst_entry *dst, struct fl_acc_args *args);
-
-struct flow_rule_ops ip6_fw_ops = {
- ip6_fw_accept
-};
-
-
-static struct rt6_info ip6_fw_null_entry = {
- {{NULL, 0, 0, NULL,
- 0, 0, 0, 0, 0, 0, 0, 0, -ENETUNREACH, NULL, NULL,
- ip6_pkt_discard, ip6_pkt_discard, NULL}},
- NULL, {{{0}}}, 256, RTF_REJECT|RTF_NONEXTHOP, ~0UL,
- 0, &ip6_fw_rule_list, {{{{0}}}, 128}, {{{{0}}}, 128}
-};
-
-static struct fib6_node ip6_fw_fib = {
- NULL, NULL, NULL, NULL,
- &ip6_fw_null_entry,
- 0, RTN_ROOT|RTN_TL_ROOT, 0
-};
-
-rwlock_t ip6_fw_lock = RW_LOCK_UNLOCKED;
-
-
-static void ip6_rule_add(struct ip6_fw_rule *rl)
-{
- struct ip6_fw_rule *next;
-
- write_lock_bh(&ip6_fw_lock);
- ip6_fw_rule_cnt++;
- next = &ip6_fw_rule_list;
- rl->next = next;
- rl->prev = next->prev;
- rl->prev->next = rl;
- next->prev = rl;
- write_unlock_bh(&ip6_fw_lock);
-}
-
-static void ip6_rule_del(struct ip6_fw_rule *rl)
-{
- struct ip6_fw_rule *next, *prev;
-
- write_lock_bh(&ip6_fw_lock);
- ip6_fw_rule_cnt--;
- next = rl->next;
- prev = rl->prev;
- next->prev = prev;
- prev->next = next;
- write_unlock_bh(&ip6_fw_lock);
-}
-
-static __inline__ struct ip6_fw_rule * ip6_fwrule_alloc(void)
-{
- struct ip6_fw_rule *rl;
-
- rl = kmalloc(sizeof(struct ip6_fw_rule), GFP_ATOMIC);
- if (rl)
- {
- memset(rl, 0, sizeof(struct ip6_fw_rule));
- rl->flowr.ops = &ip6_fw_ops;
- }
- return rl;
-}
-
-static __inline__ void ip6_fwrule_free(struct ip6_fw_rule * rl)
-{
- kfree(rl);
-}
-
-static __inline__ int port_match(int rl_port, int fl_port)
-{
- int res = 0;
- if (rl_port == 0 || (rl_port == fl_port))
- res = 1;
- return res;
-}
-
-static int ip6_fw_accept_trans(struct ip6_fw_rule *rl,
- struct fl_acc_args *args)
-{
- int res = FLOWR_NODECISION;
- int proto = 0;
- int sport = 0;
- int dport = 0;
-
- switch (args->type) {
- case FL_ARG_FORWARD:
- {
- struct sk_buff *skb = args->fl_u.skb;
- struct ipv6hdr *hdr = skb->nh.ipv6h;
- int len;
-
- len = skb->len - sizeof(struct ipv6hdr);
-
- proto = hdr->nexthdr;
-
- switch (proto) {
- case IPPROTO_TCP:
- {
- struct tcphdr *th;
-
- if (len < sizeof(struct tcphdr)) {
- res = FLOWR_ERROR;
- goto out;
- }
- th = (struct tcphdr *)(hdr + 1);
- sport = th->source;
- dport = th->dest;
- break;
- }
- case IPPROTO_UDP:
- {
- struct udphdr *uh;
-
- if (len < sizeof(struct udphdr)) {
- res = FLOWR_ERROR;
- goto out;
- }
- uh = (struct udphdr *)(hdr + 1);
- sport = uh->source;
- dport = uh->dest;
- break;
- }
- default:
- goto out;
- };
- break;
- }
-
- case FL_ARG_ORIGIN:
- {
- proto = args->fl_u.fl_o.flow->proto;
-
- if (proto == IPPROTO_ICMPV6) {
- goto out;
- } else {
- sport = args->fl_u.fl_o.flow->uli_u.ports.sport;
- dport = args->fl_u.fl_o.flow->uli_u.ports.dport;
- }
- break;
- }
-
- if (proto == rl->info.proto &&
- port_match(args->fl_u.fl_o.flow->uli_u.ports.sport, sport) &&
- port_match(args->fl_u.fl_o.flow->uli_u.ports.dport, dport)) {
- if (rl->policy & IP6_FW_REJECT)
- res = FLOWR_SELECT;
- else
- res = FLOWR_CLEAR;
- }
-
- default:
-#if IP6_FW_DEBUG >= 1
- printk(KERN_DEBUG "ip6_fw_accept: unknown arg type\n");
-#endif
- goto out;
- };
-
-out:
- return res;
-}
-
-static int ip6_fw_accept(struct dst_entry *dst, struct fl_acc_args *args)
-{
- struct rt6_info *rt;
- struct ip6_fw_rule *rl;
- int proto;
- int res = FLOWR_NODECISION;
-
- rt = (struct rt6_info *) dst;
- rl = (struct ip6_fw_rule *) rt->rt6i_flowr;
-
- proto = rl->info.proto;
-
- switch (proto) {
- case 0:
- if (rl->policy & IP6_FW_REJECT)
- res = FLOWR_SELECT;
- else
- res = FLOWR_CLEAR;
- break;
- case IPPROTO_TCP:
- case IPPROTO_UDP:
- res = ip6_fw_accept_trans(rl, args);
- break;
- case IPPROTO_ICMPV6:
- };
-
- return res;
-}
-
-static struct dst_entry * ip6_fw_dup(struct dst_entry *frule,
- struct dst_entry *rt,
- struct fl_acc_args *args)
-{
- struct ip6_fw_rule *rl;
- struct rt6_info *nrt;
- struct rt6_info *frt;
-
- frt = (struct rt6_info *) frule;
-
- rl = (struct ip6_fw_rule *) frt->rt6i_flowr;
-
- nrt = ip6_rt_copy((struct rt6_info *) rt);
-
- if (nrt) {
- nrt->u.dst.input = frule->input;
- nrt->u.dst.output = frule->output;
-
- nrt->rt6i_flowr = flow_clone(frt->rt6i_flowr);
-
- nrt->rt6i_flags |= RTF_CACHE;
- nrt->rt6i_tstamp = jiffies;
- }
-
- return (struct dst_entry *) nrt;
-}
-
-int ip6_fw_reject(struct sk_buff *skb)
-{
-#if IP6_FW_DEBUG >= 1
- printk(KERN_DEBUG "packet rejected: \n");
-#endif
-
- icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADM_PROHIBITED, 0,
- skb->dev);
- /*
- * send it via netlink, as (rule, skb)
- */
-
- kfree_skb(skb);
- return 0;
-}
-
-int ip6_fw_discard(struct sk_buff *skb)
-{
- printk(KERN_DEBUG "ip6_fw: BUG fw_reject called\n");
- kfree_skb(skb);
- return 0;
-}
-
-int ip6_fw_msg_add(struct ip6_fw_msg *msg)
-{
- struct in6_rtmsg rtmsg;
- struct ip6_fw_rule *rl;
- struct rt6_info *rt;
- int err;
-
- ipv6_addr_copy(&rtmsg.rtmsg_dst, &msg->dst);
- ipv6_addr_copy(&rtmsg.rtmsg_src, &msg->src);
- rtmsg.rtmsg_dst_len = msg->dst_len;
- rtmsg.rtmsg_src_len = msg->src_len;
- rtmsg.rtmsg_metric = IP6_RT_PRIO_FW;
-
- rl = ip6_fwrule_alloc();
-
- if (rl == NULL)
- return -ENOMEM;
-
- rl->policy = msg->policy;
- rl->info.proto = msg->proto;
- rl->info.uli_u.data = msg->u.data;
-
- rtmsg.rtmsg_flags = RTF_NONEXTHOP|RTF_POLICY;
- err = ip6_route_add(&rtmsg);
-
- if (err) {
- ip6_fwrule_free(rl);
- return err;
- }
-
- /* The rest will not work for now. --ABK (989725) */
-
-#ifndef notdef
- ip6_fwrule_free(rl);
- return -EPERM;
-#else
- rt->u.dst.error = -EPERM;
-
- if (msg->policy == IP6_FW_ACCEPT) {
- /*
- * Accept rules are never selected
- * (i.e. packets use normal forwarding)
- */
- rt->u.dst.input = ip6_fw_discard;
- rt->u.dst.output = ip6_fw_discard;
- } else {
- rt->u.dst.input = ip6_fw_reject;
- rt->u.dst.output = ip6_fw_reject;
- }
-
- ip6_rule_add(rl);
-
- rt->rt6i_flowr = flow_clone((struct flow_rule *)rl);
-
- return 0;
-#endif
-}
-
-static int ip6_fw_msgrcv(int unit, struct sk_buff *skb)
-{
- int count = 0;
-
- while (skb->len) {
- struct ip6_fw_msg *msg;
-
- if (skb->len < sizeof(struct ip6_fw_msg)) {
- count = -EINVAL;
- break;
- }
-
- msg = (struct ip6_fw_msg *) skb->data;
- skb_pull(skb, sizeof(struct ip6_fw_msg));
- count += sizeof(struct ip6_fw_msg);
-
- switch (msg->action) {
- case IP6_FW_MSG_ADD:
- ip6_fw_msg_add(msg);
- break;
- case IP6_FW_MSG_DEL:
- break;
- default:
- return -EINVAL;
- };
- }
-
- return count;
-}
-
-static void ip6_fw_destroy(struct flow_rule *rl)
-{
- ip6_fwrule_free((struct ip6_fw_rule *)rl);
-}
-
-#ifdef MODULE
-#define ip6_fw_init module_init
-#endif
-
-void __init ip6_fw_init(void)
-{
- netlink_attach(NETLINK_IP6_FW, ip6_fw_msgrcv);
-}
-
-#ifdef MODULE
-void cleanup_module(void)
-{
- netlink_detach(NETLINK_IP6_FW);
-}
-#endif
diff --git a/net/ipv6/netfilter/ip6table_mangle.c b/net/ipv6/netfilter/ip6table_mangle.c
index 3f5f1344e249..22db9205b8a2 100644
--- a/net/ipv6/netfilter/ip6table_mangle.c
+++ b/net/ipv6/netfilter/ip6table_mangle.c
@@ -157,7 +157,7 @@ ip6t_local_hook(unsigned int hook,
hop_limit = (*pskb)->nh.ipv6h->hop_limit;
/* flowlabel and prio (includes version, which shouldn't change either */
- flowlabel = (u_int32_t) (*pskb)->nh.ipv6h;
+ flowlabel = *((u_int32_t *) (*pskb)->nh.ipv6h);
ret = ip6t_do_table(pskb, hook, in, out, &packet_mangler, NULL);
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 65059ed0784f..68136d37eb51 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -56,8 +56,6 @@
#include <linux/sysctl.h>
#endif
-#undef CONFIG_RT6_POLICY
-
/* Set to 3 to get tracing. */
#define RT6_DEBUG 2
@@ -103,16 +101,22 @@ static struct dst_ops ip6_dst_ops = {
};
struct rt6_info ip6_null_entry = {
- {{NULL, ATOMIC_INIT(1), 1, &loopback_dev,
- -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- -ENETUNREACH, NULL, NULL,
- ip6_pkt_discard, ip6_pkt_discard,
-#ifdef CONFIG_NET_CLS_ROUTE
- 0,
-#endif
- &ip6_dst_ops}},
- NULL, {{{0}}}, RTF_REJECT|RTF_NONEXTHOP, ~0U,
- 255, ATOMIC_INIT(1), {NULL}, {{{{0}}}, 0}, {{{{0}}}, 0}
+ .u = {
+ .dst = {
+ .__refcnt = ATOMIC_INIT(1),
+ .__use = 1,
+ .dev = &loopback_dev,
+ .obsolete = -1,
+ .error = -ENETUNREACH,
+ .input = ip6_pkt_discard,
+ .output = ip6_pkt_discard,
+ .ops = &ip6_dst_ops
+ }
+ },
+ .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
+ .rt6i_metric = ~(u32) 0,
+ .rt6i_hoplimit = 255,
+ .rt6i_ref = ATOMIC_INIT(1),
};
struct fib6_node ip6_routing_table = {
@@ -121,24 +125,6 @@ struct fib6_node ip6_routing_table = {
0, RTN_ROOT|RTN_TL_ROOT|RTN_RTINFO, 0
};
-#ifdef CONFIG_RT6_POLICY
-int ip6_rt_policy = 0;
-
-struct pol_chain *rt6_pol_list = NULL;
-
-
-static int rt6_flow_match_in(struct rt6_info *rt, struct sk_buff *skb);
-static int rt6_flow_match_out(struct rt6_info *rt, struct sock *sk);
-
-static struct rt6_info *rt6_flow_lookup(struct rt6_info *rt,
- struct in6_addr *daddr,
- struct in6_addr *saddr,
- struct fl_acc_args *args);
-
-#else
-#define ip6_rt_policy (0)
-#endif
-
/* Protects all the ip6 fib */
rwlock_t rt6_lock = RW_LOCK_UNLOCKED;
@@ -386,38 +372,6 @@ static struct rt6_info *rt6_cow(struct rt6_info *ort, struct in6_addr *daddr,
return &ip6_null_entry;
}
-#ifdef CONFIG_RT6_POLICY
-static __inline__ struct rt6_info *rt6_flow_lookup_in(struct rt6_info *rt,
- struct sk_buff *skb)
-{
- struct in6_addr *daddr, *saddr;
- struct fl_acc_args arg;
-
- arg.type = FL_ARG_FORWARD;
- arg.fl_u.skb = skb;
-
- saddr = &skb->nh.ipv6h->saddr;
- daddr = &skb->nh.ipv6h->daddr;
-
- return rt6_flow_lookup(rt, daddr, saddr, &arg);
-}
-
-static __inline__ struct rt6_info *rt6_flow_lookup_out(struct rt6_info *rt,
- struct sock *sk,
- struct flowi *fl)
-{
- struct fl_acc_args arg;
-
- arg.type = FL_ARG_ORIGIN;
- arg.fl_u.fl_o.sk = sk;
- arg.fl_u.fl_o.flow = fl;
-
- return rt6_flow_lookup(rt, fl->nl_u.ip6_u.daddr, fl->nl_u.ip6_u.saddr,
- &arg);
-}
-
-#endif
-
#define BACKTRACK() \
if (rt == &ip6_null_entry && strict) { \
while ((fn = fn->parent) != NULL) { \
@@ -450,53 +404,29 @@ restart:
rt = fn->leaf;
if ((rt->rt6i_flags & RTF_CACHE)) {
- if (ip6_rt_policy == 0) {
- rt = rt6_device_match(rt, skb->dev->ifindex, strict);
- BACKTRACK();
- dst_clone(&rt->u.dst);
- goto out;
- }
-
-#ifdef CONFIG_RT6_POLICY
- if ((rt->rt6i_flags & RTF_FLOW)) {
- struct rt6_info *sprt;
-
- for (sprt = rt; sprt; sprt = sprt->u.next) {
- if (rt6_flow_match_in(sprt, skb)) {
- rt = sprt;
- dst_clone(&rt->u.dst);
- goto out;
- }
- }
- }
-#endif
+ rt = rt6_device_match(rt, skb->dev->ifindex, strict);
+ BACKTRACK();
+ dst_clone(&rt->u.dst);
+ goto out;
}
rt = rt6_device_match(rt, skb->dev->ifindex, 0);
BACKTRACK();
- if (ip6_rt_policy == 0) {
- if (!rt->rt6i_nexthop && !(rt->rt6i_flags & RTF_NONEXTHOP)) {
- read_unlock_bh(&rt6_lock);
+ if (!rt->rt6i_nexthop && !(rt->rt6i_flags & RTF_NONEXTHOP)) {
+ read_unlock_bh(&rt6_lock);
- rt = rt6_cow(rt, &skb->nh.ipv6h->daddr,
- &skb->nh.ipv6h->saddr);
+ rt = rt6_cow(rt, &skb->nh.ipv6h->daddr,
+ &skb->nh.ipv6h->saddr);
- if (rt->u.dst.error != -EEXIST || --attempts <= 0)
- goto out2;
- /* Race condition! In the gap, when rt6_lock was
- released someone could insert this route. Relookup.
- */
- goto relookup;
- }
- dst_clone(&rt->u.dst);
- } else {
-#ifdef CONFIG_RT6_POLICY
- rt = rt6_flow_lookup_in(rt, skb);
-#else
- /* NEVER REACHED */
-#endif
+ if (rt->u.dst.error != -EEXIST || --attempts <= 0)
+ goto out2;
+ /* Race condition! In the gap, when rt6_lock was
+ released someone could insert this route. Relookup.
+ */
+ goto relookup;
}
+ dst_clone(&rt->u.dst);
out:
read_unlock_bh(&rt6_lock);
@@ -525,26 +455,10 @@ restart:
rt = fn->leaf;
if ((rt->rt6i_flags & RTF_CACHE)) {
- if (ip6_rt_policy == 0) {
- rt = rt6_device_match(rt, fl->oif, strict);
- BACKTRACK();
- dst_clone(&rt->u.dst);
- goto out;
- }
-
-#ifdef CONFIG_RT6_POLICY
- if ((rt->rt6i_flags & RTF_FLOW)) {
- struct rt6_info *sprt;
-
- for (sprt = rt; sprt; sprt = sprt->u.next) {
- if (rt6_flow_match_out(sprt, sk)) {
- rt = sprt;
- dst_clone(&rt->u.dst);
- goto out;
- }
- }
- }
-#endif
+ rt = rt6_device_match(rt, fl->oif, strict);
+ BACKTRACK();
+ dst_clone(&rt->u.dst);
+ goto out;
}
if (rt->rt6i_flags & RTF_DEFAULT) {
if (rt->rt6i_metric >= IP6_RT_PRIO_ADDRCONF)
@@ -554,29 +468,21 @@ restart:
BACKTRACK();
}
- if (ip6_rt_policy == 0) {
- if (!rt->rt6i_nexthop && !(rt->rt6i_flags & RTF_NONEXTHOP)) {
- read_unlock_bh(&rt6_lock);
+ if (!rt->rt6i_nexthop && !(rt->rt6i_flags & RTF_NONEXTHOP)) {
+ read_unlock_bh(&rt6_lock);
- rt = rt6_cow(rt, fl->nl_u.ip6_u.daddr,
- fl->nl_u.ip6_u.saddr);
+ rt = rt6_cow(rt, fl->nl_u.ip6_u.daddr,
+ fl->nl_u.ip6_u.saddr);
- if (rt->u.dst.error != -EEXIST || --attempts <= 0)
- goto out2;
+ if (rt->u.dst.error != -EEXIST || --attempts <= 0)
+ goto out2;
- /* Race condition! In the gap, when rt6_lock was
- released someone could insert this route. Relookup.
- */
- goto relookup;
- }
- dst_clone(&rt->u.dst);
- } else {
-#ifdef CONFIG_RT6_POLICY
- rt = rt6_flow_lookup_out(rt, sk, fl);
-#else
- /* NEVER REACHED */
-#endif
+ /* Race condition! In the gap, when rt6_lock was
+ released someone could insert this route. Relookup.
+ */
+ goto relookup;
}
+ dst_clone(&rt->u.dst);
out:
read_unlock_bh(&rt6_lock);
@@ -1304,121 +1210,6 @@ int ip6_rt_addr_del(struct in6_addr *addr, struct net_device *dev)
return err;
}
-#ifdef CONFIG_RT6_POLICY
-
-static int rt6_flow_match_in(struct rt6_info *rt, struct sk_buff *skb)
-{
- struct flow_filter *frule;
- struct pkt_filter *filter;
- int res = 1;
-
- if ((frule = rt->rt6i_filter) == NULL)
- goto out;
-
- if (frule->type != FLR_INPUT) {
- res = 0;
- goto out;
- }
-
- for (filter = frule->u.filter; filter; filter = filter->next) {
- __u32 *word;
-
- word = (__u32 *) skb->h.raw;
- word += filter->offset;
-
- if ((*word ^ filter->value) & filter->mask) {
- res = 0;
- break;
- }
- }
-
-out:
- return res;
-}
-
-static int rt6_flow_match_out(struct rt6_info *rt, struct sock *sk)
-{
- struct flow_filter *frule;
- int res = 1;
-
- if ((frule = rt->rt6i_filter) == NULL)
- goto out;
-
- if (frule->type != FLR_INPUT) {
- res = 0;
- goto out;
- }
-
- if (frule->u.sk != sk)
- res = 0;
-out:
- return res;
-}
-
-static struct rt6_info *rt6_flow_lookup(struct rt6_info *rt,
- struct in6_addr *daddr,
- struct in6_addr *saddr,
- struct fl_acc_args *args)
-{
- struct flow_rule *frule;
- struct rt6_info *nrt = NULL;
- struct pol_chain *pol;
-
- for (pol = rt6_pol_list; pol; pol = pol->next) {
- struct fib6_node *fn;
- struct rt6_info *sprt;
-
- fn = fib6_lookup(pol->rules, daddr, saddr);
-
- do {
- for (sprt = fn->leaf; sprt; sprt=sprt->u.next) {
- int res;
-
- frule = sprt->rt6i_flowr;
-#if RT6_DEBUG >= 2
- if (frule == NULL) {
- printk(KERN_DEBUG "NULL flowr\n");
- goto error;
- }
-#endif
- res = frule->ops->accept(rt, sprt, args, &nrt);
-
- switch (res) {
- case FLOWR_SELECT:
- goto found;
- case FLOWR_CLEAR:
- goto next_policy;
- case FLOWR_NODECISION:
- break;
- default:
- goto error;
- };
- }
-
- fn = fn->parent;
-
- } while ((fn->fn_flags & RTN_TL_ROOT) == 0);
-
- next_policy:
- }
-
-error:
- dst_clone(&ip6_null_entry.u.dst);
- return &ip6_null_entry;
-
-found:
- if (nrt == NULL)
- goto error;
-
- nrt->rt6i_flags |= RTF_CACHE;
- dst_clone(&nrt->u.dst);
- err = rt6_ins(nrt);
- if (err)
- nrt->u.dst.error = err;
- return nrt;
-}
-#endif
-
static int fib6_ifdown(struct rt6_info *rt, void *arg)
{
if (((void*)rt->rt6i_dev == arg || arg == NULL) &&
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 29d9d5bcc3df..ccfd558c1031 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -502,9 +502,16 @@ static int ipip6_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
dst = addr6->s6_addr32[3];
}
- if (ip_route_output(&rt, dst, tiph->saddr, RT_TOS(tos), tunnel->parms.link)) {
- tunnel->stat.tx_carrier_errors++;
- goto tx_error_icmp;
+ {
+ struct flowi fl = { .nl_u = { .ip4_u =
+ { .daddr = dst,
+ .saddr = tiph->saddr,
+ .tos = RT_TOS(tos) } },
+ .oif = tunnel->parms.link };
+ if (ip_route_output_key(&rt, &fl)) {
+ tunnel->stat.tx_carrier_errors++;
+ goto tx_error_icmp;
+ }
}
if (rt->rt_type != RTN_UNICAST) {
tunnel->stat.tx_carrier_errors++;
@@ -777,8 +784,13 @@ static int ipip6_tunnel_init(struct net_device *dev)
ipip6_tunnel_init_gen(dev);
if (iph->daddr) {
+ struct flowi fl = { .nl_u = { .ip4_u =
+ { .daddr = iph->daddr,
+ .saddr = iph->saddr,
+ .tos = RT_TOS(iph->tos) } },
+ .oif = tunnel->parms.link };
struct rtable *rt;
- if (!ip_route_output(&rt, iph->daddr, iph->saddr, RT_TOS(iph->tos), tunnel->parms.link)) {
+ if (!ip_route_output_key(&rt, &fl)) {
tdev = rt->u.dst.dev;
ip_rt_put(rt);
}
diff --git a/net/irda/crc.c b/net/irda/crc.c
index b3019d5c2095..b9a46c9e955b 100644
--- a/net/irda/crc.c
+++ b/net/irda/crc.c
@@ -57,7 +57,7 @@ __u16 const irda_crc16_table[256] =
0x7bc7, 0x6a4e, 0x58d5, 0x495c, 0x3de3, 0x2c6a, 0x1ef1, 0x0f78
};
-unsigned short crc_calc( __u16 fcs, __u8 const *buf, size_t len)
+unsigned short irda_calc_crc16( __u16 fcs, __u8 const *buf, size_t len)
{
while (len--)
fcs = irda_fcs(fcs, *buf++);
diff --git a/net/irda/ircomm/ircomm_param.c b/net/irda/ircomm/ircomm_param.c
index 750f125394aa..1e8fabd55142 100644
--- a/net/irda/ircomm/ircomm_param.c
+++ b/net/irda/ircomm/ircomm_param.c
@@ -220,9 +220,16 @@ static int ircomm_param_service_type(void *instance, irda_param_t *param,
/*
* Now the line is ready for some communication. Check if we are a
- * server, and send over some initial parameters
+ * server, and send over some initial parameters.
+ * Client do it in ircomm_tty_state_setup().
+ * Note : we may get called from ircomm_tty_getvalue_confirm(),
+ * therefore before we even have open any socket. And self->client
+ * is initialised to TRUE only later. So, we check if the link is
+ * really initialised. - Jean II
*/
- if (!self->client && (self->settings.service_type != IRCOMM_3_WIRE_RAW))
+ if ((self->max_header_size != IRCOMM_TTY_HDR_UNINITIALISED) &&
+ (!self->client) &&
+ (self->settings.service_type != IRCOMM_3_WIRE_RAW))
{
/* Init connection */
ircomm_tty_send_initial_parameters(self);
diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
index d8139a21cb51..9439b5a4b43a 100644
--- a/net/irda/ircomm/ircomm_tty.c
+++ b/net/irda/ircomm/ircomm_tty.c
@@ -421,8 +421,8 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
self->line = line;
INIT_WORK(&self->tqueue, ircomm_tty_do_softint, self);
- self->max_header_size = IRCOMM_TTY_HDR_UNITIALISED;
- self->max_data_size = 64-self->max_header_size;
+ self->max_header_size = IRCOMM_TTY_HDR_UNINITIALISED;
+ self->max_data_size = IRCOMM_TTY_DATA_UNINITIALISED;
self->close_delay = 5*HZ/10;
self->closing_wait = 30*HZ;
@@ -719,16 +719,26 @@ static int ircomm_tty_write(struct tty_struct *tty, int from_user,
/* We may receive packets from the TTY even before we have finished
* our setup. Not cool.
- * The problem is that we would allocate a skb with bogus header and
- * data size, and when adding data to it later we would get
- * confused.
- * Better to not accept data until we are properly setup. Use bogus
- * header size to check that (safest way to detect it).
+ * The problem is that we don't know the final header and data size
+ * to create the proper skb, so any skb we would create would have
+ * bogus header and data size, so need care.
+ * We use a bogus header size to safely detect this condition.
+ * Another problem is that hw_stopped was set to 0 way before it
+ * should be, so we would drop this skb. It should now be fixed.
+ * One option is to not accept data until we are properly setup.
+ * But, I suspect that when it happens, the ppp line discipline
+ * just "drops" the data, which might screw up connect scripts.
+ * The second option is to create a "safe skb", with large header
+ * and small size (see ircomm_tty_open() for values).
+ * We just need to make sure that when the real values get filled,
+ * we don't mess up the original "safe skb" (see tx_data_size).
* Jean II */
- if (self->max_header_size == IRCOMM_TTY_HDR_UNITIALISED) {
- /* TTY will retry */
- IRDA_DEBUG(2, "%s() : not initialised\n", __FUNCTION__ );
- return len;
+ if (self->max_header_size == IRCOMM_TTY_HDR_UNINITIALISED) {
+ IRDA_DEBUG(1, "%s() : not initialised\n", __FUNCTION__);
+#ifdef IRCOMM_NO_TX_BEFORE_INIT
+ /* We didn't consume anything, TTY will retry */
+ return 0;
+#endif
}
spin_lock_irqsave(&self->spinlock, flags);
@@ -761,8 +771,11 @@ static int ircomm_tty_write(struct tty_struct *tty, int from_user,
* transmit buffer? Cannot use skb_tailroom, since
* dev_alloc_skb gives us a larger skb than we
* requested
+ * Note : use tx_data_size, because max_data_size
+ * may have changed and we don't want to overwrite
+ * the skb. - Jean II
*/
- if ((tailroom = (self->max_data_size-skb->len)) > 0) {
+ if ((tailroom = (self->tx_data_size - skb->len)) > 0) {
/* Adjust data to tailroom */
if (size > tailroom)
size = tailroom;
@@ -783,6 +796,9 @@ static int ircomm_tty_write(struct tty_struct *tty, int from_user,
}
skb_reserve(skb, self->max_header_size);
self->tx_skb = skb;
+ /* Remember skb size because max_data_size may
+ * change later on - Jean II */
+ self->tx_data_size = self->max_data_size;
}
/* Copy data */
@@ -825,17 +841,22 @@ static int ircomm_tty_write_room(struct tty_struct *tty)
ASSERT(self != NULL, return -1;);
ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;);
+#ifdef IRCOMM_NO_TX_BEFORE_INIT
+ /* max_header_size tells us if the channel is initialised or not. */
+ if (self->max_header_size == IRCOMM_TTY_HDR_UNINITIALISED)
+ /* Don't bother us yet */
+ return 0;
+#endif
+
/* Check if we are allowed to transmit any data.
* hw_stopped is the regular flow control.
- * max_header_size tells us if the channel is initialised or not.
* Jean II */
- if ((tty->hw_stopped) ||
- (self->max_header_size == IRCOMM_TTY_HDR_UNITIALISED))
+ if (tty->hw_stopped)
ret = 0;
else {
spin_lock_irqsave(&self->spinlock, flags);
if (self->tx_skb)
- ret = self->max_data_size - self->tx_skb->len;
+ ret = self->tx_data_size - self->tx_skb->len;
else
ret = self->max_data_size;
spin_unlock_irqrestore(&self->spinlock, flags);
diff --git a/net/irda/ircomm/ircomm_tty_attach.c b/net/irda/ircomm/ircomm_tty_attach.c
index 49d0d9f73f81..6ff343dd8de5 100644
--- a/net/irda/ircomm/ircomm_tty_attach.c
+++ b/net/irda/ircomm/ircomm_tty_attach.c
@@ -517,6 +517,23 @@ void ircomm_tty_link_established(struct ircomm_tty_cb *self)
del_timer(&self->watchdog_timer);
+ /* Remove LM-IAS object now so it is not reused.
+ * IrCOMM deals very poorly with multiple incomming connections.
+ * It should looks a lot more like IrNET, and "dup" a server TSAP
+ * to the application TSAP (based on various rules).
+ * This is a cheap workaround allowing multiple clients to
+ * connect to us. It will not always work.
+ * Each IrCOMM socket has an IAS entry. Incomming connection will
+ * pick the first one found. So, when we are fully connected,
+ * we remove our IAS entries so that the next IAS entry is used.
+ * We do that for *both* client and server, because a server
+ * can also create client instances.
+ * Jean II */
+ if (self->obj) {
+ irias_delete_object(self->obj);
+ self->obj = NULL;
+ }
+
/*
* IrCOMM link is now up, and if we are not using hardware
* flow-control, then declare the hardware as running. Otherwise we
@@ -527,7 +544,7 @@ void ircomm_tty_link_established(struct ircomm_tty_cb *self)
IRDA_DEBUG(0, "%s(), waiting for CTS ...\n", __FUNCTION__ );
return;
} else {
- IRDA_DEBUG(2, "%s(), starting hardware!\n", __FUNCTION__ );
+ IRDA_DEBUG(1, "%s(), starting hardware!\n", __FUNCTION__ );
self->tty->hw_stopped = 0;
diff --git a/net/irda/irlmp_event.c b/net/irda/irlmp_event.c
index 4c386ac81dca..75320713724a 100644
--- a/net/irda/irlmp_event.c
+++ b/net/irda/irlmp_event.c
@@ -514,10 +514,10 @@ static int irlmp_state_disconnected(struct lsap_cb *self, IRLMP_EVENT event,
irlmp_next_lsap_state(self, LSAP_SETUP_PEND);
- irlmp_do_lap_event(self->lap, LM_LAP_CONNECT_REQUEST, NULL);
-
/* Start watchdog timer (5 secs for now) */
irlmp_start_watchdog_timer(self, 5*HZ);
+
+ irlmp_do_lap_event(self->lap, LM_LAP_CONNECT_REQUEST, NULL);
break;
case LM_CONNECT_INDICATION:
if (self->conn_skb) {
@@ -529,8 +529,6 @@ static int irlmp_state_disconnected(struct lsap_cb *self, IRLMP_EVENT event,
irlmp_next_lsap_state(self, LSAP_CONNECT_PEND);
- irlmp_do_lap_event(self->lap, LM_LAP_CONNECT_REQUEST, NULL);
-
/* Start watchdog timer
* This is not mentionned in the spec, but there is a rare
* race condition that can get the socket stuck.
@@ -543,10 +541,12 @@ static int irlmp_state_disconnected(struct lsap_cb *self, IRLMP_EVENT event,
* a backup plan. 1 second is plenty (should be immediate).
* Jean II */
irlmp_start_watchdog_timer(self, 1*HZ);
+
+ irlmp_do_lap_event(self->lap, LM_LAP_CONNECT_REQUEST, NULL);
break;
default:
- IRDA_DEBUG(2, "%s(), Unknown event %s\n",
- __FUNCTION__, irlmp_event[event]);
+ IRDA_DEBUG(1, "%s(), Unknown event %s on LSAP %#02x\n",
+ __FUNCTION__, irlmp_event[event], self->slsap_sel);
if (skb)
dev_kfree_skb(skb);
break;
@@ -604,8 +604,8 @@ static int irlmp_state_connect(struct lsap_cb *self, IRLMP_EVENT event,
irlmp_next_lsap_state(self, LSAP_DISCONNECTED);
break;
default:
- IRDA_DEBUG(0, "%s(), Unknown event %s\n",
- __FUNCTION__, irlmp_event[event]);
+ IRDA_DEBUG(0, "%s(), Unknown event %s on LSAP %#02x\n",
+ __FUNCTION__, irlmp_event[event], self->slsap_sel);
if (skb)
dev_kfree_skb(skb);
break;
@@ -666,8 +666,8 @@ static int irlmp_state_connect_pend(struct lsap_cb *self, IRLMP_EVENT event,
irlmp_next_lsap_state(self, LSAP_DISCONNECTED);
break;
default:
- IRDA_DEBUG(0, "%s(), Unknown event %s\n",
- __FUNCTION__, irlmp_event[event]);
+ IRDA_DEBUG(0, "%s(), Unknown event %s on LSAP %#02x\n",
+ __FUNCTION__, irlmp_event[event], self->slsap_sel);
if (skb)
dev_kfree_skb(skb);
break;
@@ -757,8 +757,8 @@ static int irlmp_state_dtr(struct lsap_cb *self, IRLMP_EVENT event,
irlmp_disconnect_indication(self, reason, skb);
break;
default:
- IRDA_DEBUG(0, "%s(), Unknown event %s\n",
- __FUNCTION__, irlmp_event[event]);
+ IRDA_DEBUG(0, "%s(), Unknown event %s on LSAP %#02x\n",
+ __FUNCTION__, irlmp_event[event], self->slsap_sel);
if (skb)
dev_kfree_skb(skb);
break;
@@ -830,8 +830,8 @@ static int irlmp_state_setup(struct lsap_cb *self, IRLMP_EVENT event,
irlmp_disconnect_indication(self, LM_CONNECT_FAILURE, NULL);
break;
default:
- IRDA_DEBUG(0, "%s(), Unknown event %s\n",
- __FUNCTION__, irlmp_event[event]);
+ IRDA_DEBUG(0, "%s(), Unknown event %s on LSAP %#02x\n",
+ __FUNCTION__, irlmp_event[event], self->slsap_sel);
if (skb)
dev_kfree_skb(skb);
break;
@@ -889,8 +889,8 @@ static int irlmp_state_setup_pend(struct lsap_cb *self, IRLMP_EVENT event,
irlmp_disconnect_indication(self, reason, NULL);
break;
default:
- IRDA_DEBUG(0, "%s(), Unknown event %s\n",
- __FUNCTION__, irlmp_event[event]);
+ IRDA_DEBUG(0, "%s(), Unknown event %s on LSAP %#02x\n",
+ __FUNCTION__, irlmp_event[event], self->slsap_sel);
if (skb)
dev_kfree_skb(skb);
break;
diff --git a/net/irda/irsyms.c b/net/irda/irsyms.c
index 31a35dce98e1..0f1e6564310b 100644
--- a/net/irda/irsyms.c
+++ b/net/irda/irsyms.c
@@ -42,6 +42,7 @@
#include <net/irda/wrapper.h>
#include <net/irda/timer.h>
#include <net/irda/parameters.h>
+#include <net/irda/crc.h>
extern struct proc_dir_entry *proc_irda;
@@ -163,6 +164,7 @@ EXPORT_SYMBOL(irda_task_delete);
EXPORT_SYMBOL(async_wrap_skb);
EXPORT_SYMBOL(async_unwrap_char);
+EXPORT_SYMBOL(irda_calc_crc16);
EXPORT_SYMBOL(irda_start_timer);
EXPORT_SYMBOL(setup_dma);
EXPORT_SYMBOL(infrared_mode);
diff --git a/net/irda/irsysctl.c b/net/irda/irsysctl.c
index 0b7395b5a186..6267cd7c6f8d 100644
--- a/net/irda/irsysctl.c
+++ b/net/irda/irsysctl.c
@@ -35,7 +35,7 @@
#define NET_IRDA 412 /* Random number */
enum { DISCOVERY=1, DEVNAME, DEBUG, FAST_POLL, DISCOVERY_SLOTS,
DISCOVERY_TIMEOUT, SLOT_TIMEOUT, MAX_BAUD_RATE, MIN_TX_TURN_TIME,
- MAX_TX_DATA_SIZE, MAX_NOREPLY_TIME, WARN_NOREPLY_TIME,
+ MAX_TX_DATA_SIZE, MAX_TX_WINDOW, MAX_NOREPLY_TIME, WARN_NOREPLY_TIME,
LAP_KEEPALIVE_TIME };
extern int sysctl_discovery;
@@ -48,6 +48,7 @@ extern char sysctl_devname[];
extern int sysctl_max_baud_rate;
extern int sysctl_min_tx_turn_time;
extern int sysctl_max_tx_data_size;
+extern int sysctl_max_tx_window;
extern int sysctl_max_noreply_time;
extern int sysctl_warn_noreply_time;
extern int sysctl_lap_keepalive_time;
@@ -69,6 +70,8 @@ static int max_min_tx_turn_time = 10000; /* See qos.c - IrLAP spec */
static int min_min_tx_turn_time = 0;
static int max_max_tx_data_size = 2048; /* See qos.c - IrLAP spec */
static int min_max_tx_data_size = 64;
+static int max_max_tx_window = 7; /* See qos.c - IrLAP spec */
+static int min_max_tx_window = 1;
static int max_max_noreply_time = 40; /* See qos.c - IrLAP spec */
static int min_max_noreply_time = 3;
static int max_warn_noreply_time = 3; /* 3s == standard */
@@ -125,6 +128,9 @@ static ctl_table irda_table[] = {
{ MAX_TX_DATA_SIZE, "max_tx_data_size", &sysctl_max_tx_data_size,
sizeof(int), 0644, NULL, &proc_dointvec_minmax, &sysctl_intvec,
NULL, &min_max_tx_data_size, &max_max_tx_data_size },
+ { MAX_TX_WINDOW, "max_tx_window", &sysctl_max_tx_window,
+ sizeof(int), 0644, NULL, &proc_dointvec_minmax, &sysctl_intvec,
+ NULL, &min_max_tx_window, &max_max_tx_window },
{ MAX_NOREPLY_TIME, "max_noreply_time", &sysctl_max_noreply_time,
sizeof(int), 0644, NULL, &proc_dointvec_minmax, &sysctl_intvec,
NULL, &min_max_noreply_time, &max_max_noreply_time },
diff --git a/net/irda/parameters.c b/net/irda/parameters.c
index d13d16a37021..ae79795b9133 100644
--- a/net/irda/parameters.c
+++ b/net/irda/parameters.c
@@ -200,11 +200,13 @@ static int irda_extract_integer(void *self, __u8 *buf, int len, __u8 pi,
{
irda_param_t p;
int n = 0;
+ int extract_len; /* Real lenght we extract */
int err;
p.pi = pi; /* In case handler needs to know */
p.pl = buf[1]; /* Extract lenght of value */
p.pv.i = 0; /* Clear value */
+ extract_len = p.pl; /* Default : extract all */
/* Check if buffer is long enough for parsing */
if (len < (2+p.pl)) {
@@ -217,18 +219,30 @@ static int irda_extract_integer(void *self, __u8 *buf, int len, __u8 pi,
/*
* Check that the integer length is what we expect it to be. If the
* handler want a 16 bits integer then a 32 bits is not good enough
+ * PV_INTEGER means that the handler is flexible.
*/
if (((type & PV_MASK) != PV_INTEGER) && ((type & PV_MASK) != p.pl)) {
ERROR("%s: invalid parameter length! "
"Expected %d bytes, but value had %d bytes!\n",
__FUNCTION__, type & PV_MASK, p.pl);
- /* Skip parameter */
- return p.pl+2;
+ /* Most parameters are bit/byte fields or little endian,
+ * so it's ok to only extract a subset of it (the subset
+ * that the handler expect). This is necessary, as some
+ * broken implementations seems to add extra undefined bits.
+ * If the parameter is shorter than we expect or is big
+ * endian, we can't play those tricks. Jean II */
+ if((p.pl < (type & PV_MASK)) || (type & PV_BIG_ENDIAN)) {
+ /* Skip parameter */
+ return p.pl+2;
+ } else {
+ /* Extract subset of it, fallthrough */
+ extract_len = type & PV_MASK;
+ }
}
- switch (p.pl) {
+ switch (extract_len) {
case 1:
n += irda_param_unpack(buf+2, "b", &p.pv.i);
break;
diff --git a/net/irda/qos.c b/net/irda/qos.c
index f22862095c78..cf7da45b97a1 100644
--- a/net/irda/qos.c
+++ b/net/irda/qos.c
@@ -70,13 +70,18 @@ unsigned sysctl_min_tx_turn_time = 10;
* 1.2, chapt 5.3.2.1, p41). But, this number includes the LAP header
* (2 bytes), and CRC (32 bits at 4 Mb/s). So, for the I field (LAP
* payload), that's only 2042 bytes. Oups !
- * I've had trouble trouble transmitting 2048 bytes frames with USB
- * dongles and nsc-ircc at 4 Mb/s, so adjust to 2042... I don't know
- * if this bug applies only for 2048 bytes frames or all negociated
- * frame sizes, but all hardware seem to support "2048 bytes" frames.
- * You can use the sysctl to play with this value anyway.
+ * My nsc-ircc hardware has troubles receiving 2048 bytes frames at 4 Mb/s,
+ * so adjust to 2042... I don't know if this bug applies only for 2048
+ * bytes frames or all negociated frame sizes, but you can use the sysctl
+ * to play with this value anyway.
* Jean II */
unsigned sysctl_max_tx_data_size = 2042;
+/*
+ * Maximum transmit window, i.e. number of LAP frames between turn-around.
+ * This allow to override what the peer told us. Some peers are buggy and
+ * don't always support what they tell us.
+ * Jean II */
+unsigned sysctl_max_tx_window = 7;
static int irlap_param_baud_rate(void *instance, irda_param_t *param, int get);
static int irlap_param_link_disconnect(void *instance, irda_param_t *parm,
@@ -184,7 +189,19 @@ int msb_index (__u16 word)
{
__u16 msb = 0x8000;
int index = 15; /* Current MSB */
-
+
+ /* Check for buggy peers.
+ * Note : there is a small probability that it could be us, but I
+ * would expect driver authors to catch that pretty early and be
+ * able to check precisely what's going on. If a end user sees this,
+ * it's very likely the peer. - Jean II */
+ if (word == 0) {
+ WARNING("%s(), Detected buggy peer, adjust null PV to 0x1!\n",
+ __FUNCTION__);
+ /* The only safe choice (we don't know the array size) */
+ word = 0x1;
+ }
+
while (msb) {
if (word & msb)
break; /* Found it! */
@@ -335,10 +352,14 @@ void irlap_adjust_qos_settings(struct qos_info *qos)
/*
* Make sure the mintt is sensible.
+ * Main culprit : Ericsson T39. - Jean II
*/
if (sysctl_min_tx_turn_time > qos->min_turn_time.value) {
int i;
+ WARNING("%s(), Detected buggy peer, adjust mtt to %dus!\n",
+ __FUNCTION__, sysctl_min_tx_turn_time);
+
/* We don't really need bits, but easier this way */
i = value_highest_bit(sysctl_min_tx_turn_time, min_turn_times,
8, &qos->min_turn_time.bits);
@@ -398,6 +419,11 @@ void irlap_adjust_qos_settings(struct qos_info *qos)
if (qos->data_size.value > sysctl_max_tx_data_size)
/* Allow non discrete adjustement to avoid loosing capacity */
qos->data_size.value = sysctl_max_tx_data_size;
+ /*
+ * Override Tx window if user request it. - Jean II
+ */
+ if (qos->window_size.value > sysctl_max_tx_window)
+ qos->window_size.value = sysctl_max_tx_window;
}
/*
diff --git a/net/sched/cls_route.c b/net/sched/cls_route.c
index f25e4be5a8fc..6227b8124c30 100644
--- a/net/sched/cls_route.c
+++ b/net/sched/cls_route.c
@@ -154,7 +154,7 @@ static int route4_classify(struct sk_buff *skb, struct tcf_proto *tp,
if (head == NULL)
goto old_method;
- iif = ((struct rtable*)dst)->key.iif;
+ iif = ((struct rtable*)dst)->fl.iif;
h = route4_fastmap_hash(id, iif);
if (id == head->fastmap[h].id &&
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index 13f0752d00ad..12ff99ea0f89 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -260,16 +260,10 @@ int sctp_v4_get_dst_mtu(const sockaddr_storage_t *address)
{
int dst_mtu = SCTP_DEFAULT_MAXSEGMENT;
struct rtable *rt;
- struct rt_key key = {
- .dst = address->v4.sin_addr.s_addr,
- .src = 0,
- .iif = 0,
- .oif = 0,
- .tos = 0,
- .scope = 0
- };
-
- if (ip_route_output_key(&rt, &key)) {
+ struct flowi fl = { .nl_u = { .ip4_u =
+ { .daddr = address->v4.sin_addr.s_addr } } };
+
+ if (ip_route_output_key(&rt, &fl)) {
SCTP_DEBUG_PRINTK("sctp_v4_get_dst_mtu:ip_route_output_key"
" failed, returning %d as dst_mtu\n",
dst_mtu);