diff options
Diffstat (limited to 'drivers')
| -rw-r--r-- | drivers/atm/ambassador.c | 75 | ||||
| -rw-r--r-- | drivers/atm/he.c | 37 | ||||
| -rw-r--r-- | drivers/atm/horizon.c | 19 | ||||
| -rw-r--r-- | drivers/atm/idt77252.c | 9 | ||||
| -rw-r--r-- | drivers/infiniband/ulp/ipoib/ipoib.h | 9 | ||||
| -rw-r--r-- | drivers/infiniband/ulp/ipoib/ipoib_ib.c | 4 | ||||
| -rw-r--r-- | drivers/infiniband/ulp/ipoib/ipoib_main.c | 26 | ||||
| -rw-r--r-- | drivers/net/e1000/e1000.h | 1 | ||||
| -rw-r--r-- | drivers/net/e1000/e1000_main.c | 33 | ||||
| -rw-r--r-- | drivers/net/fc/iph5526_ip.h | 1 | ||||
| -rw-r--r-- | drivers/net/sungem.c | 122 | ||||
| -rw-r--r-- | drivers/net/sungem.h | 1 | ||||
| -rw-r--r-- | drivers/net/tg3.c | 126 | ||||
| -rw-r--r-- | drivers/net/tg3.h | 9 | ||||
| -rw-r--r-- | drivers/s390/net/qeth_main.c | 26 |
15 files changed, 174 insertions, 324 deletions
diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c index 924abd2654a0..3870e3787b7b 100644 --- a/drivers/atm/ambassador.c +++ b/drivers/atm/ambassador.c @@ -574,7 +574,6 @@ static int command_do (amb_dev * dev, command * cmd) { amb_cq * cq = &dev->cq; volatile amb_cq_ptrs * ptrs = &cq->ptrs; command * my_slot; - unsigned long timeout; PRINTD (DBG_FLOW|DBG_CMD, "command_do %p", dev); @@ -599,20 +598,14 @@ static int command_do (amb_dev * dev, command * cmd) { // mail the command wr_mem (dev, offsetof(amb_mem, mb.adapter.cmd_address), virt_to_bus (ptrs->in)); - // prepare to wait for cq->pending milliseconds - // effectively one centisecond on i386 - timeout = (cq->pending*HZ+999)/1000; - if (cq->pending > cq->high) cq->high = cq->pending; spin_unlock (&cq->lock); - while (timeout) { - // go to sleep - // PRINTD (DBG_CMD, "wait: sleeping %lu for command", timeout); - set_current_state(TASK_UNINTERRUPTIBLE); - timeout = schedule_timeout (timeout); - } + // these comments were in a while-loop before, msleep removes the loop + // go to sleep + // PRINTD (DBG_CMD, "wait: sleeping %lu for command", timeout); + msleep(cq->pending); // wait for my slot to be reached (all waiters are here or above, until...) while (ptrs->out != my_slot) { @@ -1799,12 +1792,11 @@ static int __init do_loader_command (volatile loader_block * lb, // dump_loader_block (lb); wr_mem (dev, offsetof(amb_mem, doorbell), virt_to_bus (lb) & ~onegigmask); - timeout = command_timeouts[cmd] * HZ/100; + timeout = command_timeouts[cmd] * 10; while (!lb->result || lb->result == cpu_to_be32 (COMMAND_IN_PROGRESS)) if (timeout) { - set_current_state(TASK_UNINTERRUPTIBLE); - timeout = schedule_timeout (timeout); + timeout = msleep_interruptible(timeout); } else { PRINTD (DBG_LOAD|DBG_ERR, "command %d timed out", cmd); dump_registers (dev); @@ -1814,10 +1806,10 @@ static int __init do_loader_command (volatile loader_block * lb, if (cmd == adapter_start) { // wait for start command to acknowledge... - timeout = HZ/10; + timeout = 100; while (rd_plain (dev, offsetof(amb_mem, doorbell))) if (timeout) { - timeout = schedule_timeout (timeout); + timeout = msleep_interruptible(timeout); } else { PRINTD (DBG_LOAD|DBG_ERR, "start command did not clear doorbell, res=%08x", be32_to_cpu (lb->result)); @@ -1932,17 +1924,12 @@ static int amb_reset (amb_dev * dev, int diags) { if (diags) { unsigned long timeout; // 4.2 second wait - timeout = HZ*42/10; - while (timeout) { - set_current_state(TASK_UNINTERRUPTIBLE); - timeout = schedule_timeout (timeout); - } + msleep(4200); // half second time-out - timeout = HZ/2; + timeout = 500; while (!rd_plain (dev, offsetof(amb_mem, mb.loader.ready))) if (timeout) { - set_current_state(TASK_UNINTERRUPTIBLE); - timeout = schedule_timeout (timeout); + timeout = msleep_interruptible(timeout); } else { PRINTD (DBG_LOAD|DBG_ERR, "reset timed out"); return -ETIMEDOUT; @@ -2056,14 +2043,12 @@ static int __init amb_talk (amb_dev * dev) { wr_mem (dev, offsetof(amb_mem, doorbell), virt_to_bus (&a)); // 2.2 second wait (must not touch doorbell during 2 second DMA test) - timeout = HZ*22/10; - while (timeout) - timeout = schedule_timeout (timeout); + msleep(2200); // give the adapter another half second? - timeout = HZ/2; + timeout = 500; while (rd_plain (dev, offsetof(amb_mem, doorbell))) if (timeout) { - timeout = schedule_timeout (timeout); + timeout = msleep_interruptible(timeout); } else { PRINTD (DBG_INIT|DBG_ERR, "adapter init timed out"); return -ETIMEDOUT; @@ -2228,17 +2213,12 @@ static void setup_dev(amb_dev *dev, struct pci_dev *pci_dev) spin_lock_init (&dev->rxq[pool].lock); } -static int setup_pci_dev(struct pci_dev *pci_dev) +static void setup_pci_dev(struct pci_dev *pci_dev) { unsigned char lat; - int ret; // enable bus master accesses pci_set_master(pci_dev); - - ret = pci_enable_device(pci_dev); - if (ret < 0) - goto out; // frobnicate latency (upwards, usually) pci_read_config_byte (pci_dev, PCI_LATENCY_TIMER, &lat); @@ -2251,22 +2231,27 @@ static int setup_pci_dev(struct pci_dev *pci_dev) lat, pci_lat); pci_write_config_byte(pci_dev, PCI_LATENCY_TIMER, pci_lat); } -out: - return ret; } static int __devinit amb_probe(struct pci_dev *pci_dev, const struct pci_device_id *pci_ent) { amb_dev * dev; int err; + unsigned int irq; + + err = pci_enable_device(pci_dev); + if (err < 0) { + PRINTK (KERN_ERR, "skipped broken (PLX rev 2) card"); + goto out; + } // read resources from PCI configuration space - unsigned int irq = pci_dev->irq; + irq = pci_dev->irq; if (pci_dev->device == PCI_DEVICE_ID_MADGE_AMBASSADOR_BAD) { PRINTK (KERN_ERR, "skipped broken (PLX rev 2) card"); err = -EINVAL; - goto out; + goto out_disable; } PRINTD (DBG_INFO, "found Madge ATM adapter (amb) at" @@ -2277,7 +2262,7 @@ static int __devinit amb_probe(struct pci_dev *pci_dev, const struct pci_device_ err = pci_request_region(pci_dev, 1, DEV_LABEL); if (err < 0) { PRINTK (KERN_ERR, "IO range already in use!"); - goto out; + goto out_disable; } dev = kmalloc (sizeof(amb_dev), GFP_KERNEL); @@ -2295,15 +2280,13 @@ static int __devinit amb_probe(struct pci_dev *pci_dev, const struct pci_device_ goto out_free; } - err = setup_pci_dev(pci_dev); - if (err < 0) - goto out_reset; + setup_pci_dev(pci_dev); // grab (but share) IRQ and install handler err = request_irq(irq, interrupt_handler, SA_SHIRQ, DEV_LABEL, dev); if (err < 0) { PRINTK (KERN_ERR, "request IRQ failed!"); - goto out_disable; + goto out_reset; } dev->atm_dev = atm_dev_register (DEV_LABEL, &amb_ops, -1, NULL); @@ -2337,14 +2320,14 @@ out: out_free_irq: free_irq(irq, dev); -out_disable: - pci_disable_device(pci_dev); out_reset: amb_reset(dev, 0); out_free: kfree(dev); out_release: pci_release_region(pci_dev, 1); +out_disable: + pci_disable_device(pci_dev); goto out; } diff --git a/drivers/atm/he.c b/drivers/atm/he.c index 7221439b4937..e64d422470ff 100644 --- a/drivers/atm/he.c +++ b/drivers/atm/he.c @@ -86,44 +86,19 @@ #undef USE_RBPL_POOL /* if memory is tight try this */ #define USE_TPD_POOL /* #undef CONFIG_ATM_HE_USE_SUNI */ - -/* compatibility */ - -#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,69) -typedef void irqreturn_t; -#define IRQ_NONE -#define IRQ_HANDLED -#define IRQ_RETVAL(x) -#endif - -#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,9) -#define __devexit_p(func) func -#endif - -#ifndef MODULE_LICENSE -#define MODULE_LICENSE(x) -#endif - -#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,3) -#define pci_set_drvdata(pci_dev, data) (pci_dev)->driver_data = (data) -#define pci_get_drvdata(pci_dev) (pci_dev)->driver_data -#endif +/* #undef HE_DEBUG */ #include "he.h" - #include "suni.h" - #include <linux/atm_he.h> #define hprintk(fmt,args...) printk(KERN_ERR DEV_LABEL "%d: " fmt, he_dev->number , ##args) -#undef DEBUG -#ifdef DEBUG +#ifdef HE_DEBUG #define HPRINTK(fmt,args...) printk(KERN_DEBUG DEV_LABEL "%d: " fmt, he_dev->number , ##args) -#else +#else /* !HE_DEBUG */ #define HPRINTK(fmt,args...) do { } while (0) -#endif /* DEBUG */ - +#endif /* HE_DEBUG */ /* version definition */ @@ -147,8 +122,8 @@ static u8 read_prom_byte(struct he_dev *he_dev, int addr); /* globals */ -static struct he_dev *he_devs = NULL; -static int disable64 = 0; +static struct he_dev *he_devs; +static int disable64; static short nvpibits = -1; static short nvcibits = -1; static short rx_skb_reserve = 16; diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c index 088440b8056a..1c80cc922e4a 100644 --- a/drivers/atm/horizon.c +++ b/drivers/atm/horizon.c @@ -2706,18 +2706,18 @@ static int __devinit hrz_probe(struct pci_dev *pci_dev, const struct pci_device_ // adapter slot free, read resources from PCI configuration space u32 iobase = pci_resource_start (pci_dev, 0); u32 * membase = bus_to_virt (pci_resource_start (pci_dev, 1)); - u8 irq = pci_dev->irq; + unsigned int irq; unsigned char lat; PRINTD (DBG_FLOW, "hrz_probe"); - /* XXX DEV_LABEL is a guess */ - if (!request_region(iobase, HRZ_IO_EXTENT, DEV_LABEL)) + if (pci_enable_device(pci_dev)) return -EINVAL; - if (pci_enable_device(pci_dev)) { - err = -EINVAL; - goto out_release; + /* XXX DEV_LABEL is a guess */ + if (!request_region(iobase, HRZ_IO_EXTENT, DEV_LABEL)) { + return -EINVAL; + goto out_disable; } dev = kmalloc(sizeof(hrz_dev), GFP_KERNEL); @@ -2725,7 +2725,7 @@ static int __devinit hrz_probe(struct pci_dev *pci_dev, const struct pci_device_ // perhaps we should be nice: deregister all adapters and abort? PRINTD(DBG_ERR, "out of memory"); err = -ENOMEM; - goto out_disable; + goto out_release; } memset(dev, 0, sizeof(hrz_dev)); @@ -2733,6 +2733,7 @@ static int __devinit hrz_probe(struct pci_dev *pci_dev, const struct pci_device_ pci_set_drvdata(pci_dev, dev); // grab IRQ and install handler - move this someplace more sensible + irq = pci_dev->irq; if (request_irq(irq, interrupt_handler, SA_SHIRQ, /* irqflags guess */ @@ -2846,10 +2847,10 @@ out_free_irq: free_irq(dev->irq, dev); out_free: kfree(dev); -out_disable: - pci_disable_device(pci_dev); out_release: release_region(iobase, HRZ_IO_EXTENT); +out_disable: + pci_disable_device(pci_dev); goto out; } diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c index 80e304cf3169..3d8764ab2825 100644 --- a/drivers/atm/idt77252.c +++ b/drivers/atm/idt77252.c @@ -3136,14 +3136,11 @@ deinit_card(struct idt77252_dev *card) } } - if (card->soft_tst) - vfree(card->soft_tst); + vfree(card->soft_tst); - if (card->scd2vc) - vfree(card->scd2vc); + vfree(card->scd2vc); - if (card->vcs) - vfree(card->vcs); + vfree(card->vcs); if (card->raw_cell_hnd) { pci_free_consistent(card->pcidev, 2 * sizeof(u32), diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h index 074394d4f8a9..b097522c55e8 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib.h +++ b/drivers/infiniband/ulp/ipoib/ipoib.h @@ -104,10 +104,10 @@ struct ipoib_buf { }; /* - * Device private locking: tx_lock protects members used in TX fast - * path (and we use LLTX so upper layers don't do extra locking). - * lock protects everything else. lock nests inside of tx_lock (ie - * tx_lock must be acquired first if needed). + * Device private locking: netdev->xmit_lock protects members used + * in TX fast path. + * lock protects everything else. lock nests inside of xmit_lock (ie + * xmit_lock must be acquired first if needed). */ struct ipoib_dev_priv { spinlock_t lock; @@ -150,7 +150,6 @@ struct ipoib_dev_priv { struct ipoib_buf *rx_ring; - spinlock_t tx_lock; struct ipoib_buf *tx_ring; unsigned tx_head; unsigned tx_tail; diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c index ac550991227e..d70f9f53d9d0 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c @@ -247,12 +247,12 @@ static void ipoib_ib_handle_wc(struct net_device *dev, dev_kfree_skb_any(tx_req->skb); - spin_lock_irqsave(&priv->tx_lock, flags); + spin_lock_irqsave(&dev->xmit_lock, flags); ++priv->tx_tail; if (netif_queue_stopped(dev) && priv->tx_head - priv->tx_tail <= IPOIB_TX_RING_SIZE / 2) netif_wake_queue(dev); - spin_unlock_irqrestore(&priv->tx_lock, flags); + spin_unlock_irqrestore(&dev->xmit_lock, flags); if (wc->status != IB_WC_SUCCESS && wc->status != IB_WC_WR_FLUSH_ERR) diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index 63c8168d8af8..90c73a7cea72 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c @@ -411,7 +411,7 @@ static void neigh_add_path(struct sk_buff *skb, struct net_device *dev) /* * We can only be called from ipoib_start_xmit, so we're - * inside tx_lock -- no need to save/restore flags. + * inside dev->xmit_lock -- no need to save/restore flags. */ spin_lock(&priv->lock); @@ -483,7 +483,7 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev, /* * We can only be called from ipoib_start_xmit, so we're - * inside tx_lock -- no need to save/restore flags. + * inside dev->xmit_lock -- no need to save/restore flags. */ spin_lock(&priv->lock); @@ -526,27 +526,11 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev, spin_unlock(&priv->lock); } +/* Called with dev->xmit_lock held and IRQs disabled. */ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct ipoib_dev_priv *priv = netdev_priv(dev); struct ipoib_neigh *neigh; - unsigned long flags; - - local_irq_save(flags); - if (!spin_trylock(&priv->tx_lock)) { - local_irq_restore(flags); - return NETDEV_TX_LOCKED; - } - - /* - * Check if our queue is stopped. Since we have the LLTX bit - * set, we can't rely on netif_stop_queue() preventing our - * xmit function from being called with a full queue. - */ - if (unlikely(netif_queue_stopped(dev))) { - spin_unlock_irqrestore(&priv->tx_lock, flags); - return NETDEV_TX_BUSY; - } if (skb->dst && skb->dst->neighbour) { if (unlikely(!*to_ipoib_neigh(skb->dst->neighbour))) { @@ -601,7 +585,6 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev) } out: - spin_unlock_irqrestore(&priv->tx_lock, flags); return NETDEV_TX_OK; } @@ -797,7 +780,7 @@ static void ipoib_setup(struct net_device *dev) dev->addr_len = INFINIBAND_ALEN; dev->type = ARPHRD_INFINIBAND; dev->tx_queue_len = IPOIB_TX_RING_SIZE * 2; - dev->features = NETIF_F_VLAN_CHALLENGED | NETIF_F_LLTX; + dev->features = NETIF_F_VLAN_CHALLENGED; /* MTU will be reset when mcast join happens */ dev->mtu = IPOIB_PACKET_SIZE - IPOIB_ENCAP_LEN; @@ -812,7 +795,6 @@ static void ipoib_setup(struct net_device *dev) priv->dev = dev; spin_lock_init(&priv->lock); - spin_lock_init(&priv->tx_lock); init_MUTEX(&priv->mcast_mutex); init_MUTEX(&priv->vlan_mutex); diff --git a/drivers/net/e1000/e1000.h b/drivers/net/e1000/e1000.h index 77db78960430..0843a7c9c624 100644 --- a/drivers/net/e1000/e1000.h +++ b/drivers/net/e1000/e1000.h @@ -209,7 +209,6 @@ struct e1000_adapter { /* TX */ struct e1000_desc_ring tx_ring; - spinlock_t tx_lock; uint32_t txd_cmd; uint32_t tx_int_delay; uint32_t tx_abs_int_delay; diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c index aa5ad41acf24..3966e55dcd9a 100644 --- a/drivers/net/e1000/e1000_main.c +++ b/drivers/net/e1000/e1000_main.c @@ -291,7 +291,9 @@ e1000_up(struct e1000_adapter *adapter) e1000_phy_reset(&adapter->hw); } + spin_lock_irq(&netdev->xmit_lock); e1000_set_multi(netdev); + spin_unlock_irq(&netdev->xmit_lock); e1000_restore_vlan(adapter); @@ -520,9 +522,6 @@ e1000_probe(struct pci_dev *pdev, if(pci_using_dac) netdev->features |= NETIF_F_HIGHDMA; - /* hard_start_xmit is safe against parallel locking */ - netdev->features |= NETIF_F_LLTX; - /* before reading the EEPROM, reset the controller to * put the device in a known good starting state */ @@ -732,7 +731,6 @@ e1000_sw_init(struct e1000_adapter *adapter) atomic_set(&adapter->irq_sem, 1); spin_lock_init(&adapter->stats_lock); - spin_lock_init(&adapter->tx_lock); return 0; } @@ -1293,6 +1291,8 @@ e1000_set_mac(struct net_device *netdev, void *p) * list or the network interface flags are updated. This routine is * responsible for configuring the hardware for proper multicast, * promiscuous mode, and all-multi behavior. + * + * Called with netdev->xmit_lock held and IRQs disabled. **/ static void @@ -1304,12 +1304,9 @@ e1000_set_multi(struct net_device *netdev) uint32_t rctl; uint32_t hash_value; int i; - unsigned long flags; /* Check for Promiscuous and All Multicast modes */ - spin_lock_irqsave(&adapter->tx_lock, flags); - rctl = E1000_READ_REG(hw, RCTL); if(netdev->flags & IFF_PROMISC) { @@ -1358,8 +1355,6 @@ e1000_set_multi(struct net_device *netdev) if(hw->mac_type == e1000_82542_rev2_0) e1000_leave_82542_rst(adapter); - - spin_unlock_irqrestore(&adapter->tx_lock, flags); } /* Need to wait a few seconds after link up to get diagnostic information from @@ -1786,6 +1781,8 @@ no_fifo_stall_required: } #define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 ) + +/* Called with dev->xmit_lock held and interrupts disabled. */ static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) { @@ -1794,7 +1791,6 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) unsigned int max_txd_pwr = E1000_MAX_TXD_PWR; unsigned int tx_flags = 0; unsigned int len = skb->len; - unsigned long flags; unsigned int nr_frags = 0; unsigned int mss = 0; int count = 0; @@ -1838,18 +1834,10 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) if(adapter->pcix_82544) count += nr_frags; - local_irq_save(flags); - if (!spin_trylock(&adapter->tx_lock)) { - /* Collision - tell upper layer to requeue */ - local_irq_restore(flags); - return NETDEV_TX_LOCKED; - } - /* need: count + 2 desc gap to keep tail from touching * head, otherwise try next time */ if(unlikely(E1000_DESC_UNUSED(&adapter->tx_ring) < count + 2)) { netif_stop_queue(netdev); - spin_unlock_irqrestore(&adapter->tx_lock, flags); return NETDEV_TX_BUSY; } @@ -1857,7 +1845,6 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) if(unlikely(e1000_82547_fifo_workaround(adapter, skb))) { netif_stop_queue(netdev); mod_timer(&adapter->tx_fifo_stall_timer, jiffies); - spin_unlock_irqrestore(&adapter->tx_lock, flags); return NETDEV_TX_BUSY; } } @@ -1884,7 +1871,6 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) if(unlikely(E1000_DESC_UNUSED(&adapter->tx_ring) < MAX_SKB_FRAGS + 2)) netif_stop_queue(netdev); - spin_unlock_irqrestore(&adapter->tx_lock, flags); return NETDEV_TX_OK; } @@ -2234,13 +2220,13 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter) tx_ring->next_to_clean = i; - spin_lock(&adapter->tx_lock); + spin_lock(&netdev->xmit_lock); if(unlikely(cleaned && netif_queue_stopped(netdev) && netif_carrier_ok(netdev))) netif_wake_queue(netdev); - spin_unlock(&adapter->tx_lock); + spin_unlock(&netdev->xmit_lock); return cleaned; } @@ -2819,7 +2805,10 @@ e1000_suspend(struct pci_dev *pdev, uint32_t state) if(wufc) { e1000_setup_rctl(adapter); + + spin_lock_irq(&netdev->xmit_lock); e1000_set_multi(netdev); + spin_unlock_irq(&netdev->xmit_lock); /* turn on all-multi mode if wake on multicast is enabled */ if(adapter->wol & E1000_WUFC_MC) { diff --git a/drivers/net/fc/iph5526_ip.h b/drivers/net/fc/iph5526_ip.h index b54f727e140a..9fae3b002fec 100644 --- a/drivers/net/fc/iph5526_ip.h +++ b/drivers/net/fc/iph5526_ip.h @@ -18,7 +18,6 @@ static int iph5526_change_mtu(struct net_device *dev, int mtu); static void rx_net_packet(struct fc_info *fi, u_char *buff_addr, int payload_size); static void rx_net_mfs_packet(struct fc_info *fi, struct sk_buff *skb); -unsigned short fc_type_trans(struct sk_buff *skb, struct net_device *dev); static int tx_ip_packet(struct sk_buff *skb, unsigned long len, struct fc_info *fi); static int tx_arp_packet(char *data, unsigned long len, struct fc_info *fi); #endif diff --git a/drivers/net/sungem.c b/drivers/net/sungem.c index 38e94961e1a4..c5cbe1bda9cb 100644 --- a/drivers/net/sungem.c +++ b/drivers/net/sungem.c @@ -835,9 +835,9 @@ static int gem_poll(struct net_device *dev, int *budget) } /* Run TX completion thread */ - spin_lock(&gp->tx_lock); + spin_lock(&dev->xmit_lock); gem_tx(dev, gp, gp->status); - spin_unlock(&gp->tx_lock); + spin_unlock(&dev->xmit_lock); spin_unlock_irqrestore(&gp->lock, flags); @@ -932,12 +932,12 @@ static void gem_tx_timeout(struct net_device *dev) readl(gp->regs + MAC_RXCFG)); spin_lock_irq(&gp->lock); - spin_lock(&gp->tx_lock); + spin_lock(&dev->xmit_lock); gp->reset_task_pending = 2; schedule_work(&gp->reset_task); - spin_unlock(&gp->tx_lock); + spin_unlock(&dev->xmit_lock); spin_unlock_irq(&gp->lock); } @@ -955,7 +955,6 @@ static int gem_start_xmit(struct sk_buff *skb, struct net_device *dev) struct gem *gp = dev->priv; int entry; u64 ctrl; - unsigned long flags; ctrl = 0; if (skb->ip_summed == CHECKSUM_HW) { @@ -969,17 +968,9 @@ static int gem_start_xmit(struct sk_buff *skb, struct net_device *dev) (csum_stuff_off << 21)); } - local_irq_save(flags); - if (!spin_trylock(&gp->tx_lock)) { - /* Tell upper layer to requeue */ - local_irq_restore(flags); - return NETDEV_TX_LOCKED; - } - /* This is a hard error, log it. */ if (TX_BUFFS_AVAIL(gp) <= (skb_shinfo(skb)->nr_frags + 1)) { netif_stop_queue(dev); - spin_unlock_irqrestore(&gp->tx_lock, flags); printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n", dev->name); return NETDEV_TX_BUSY; @@ -1066,7 +1057,6 @@ static int gem_start_xmit(struct sk_buff *skb, struct net_device *dev) dev->name, entry, skb->len); mb(); writel(gp->tx_new, gp->regs + TXDMA_KICK); - spin_unlock_irqrestore(&gp->tx_lock, flags); dev->trans_start = jiffies; @@ -1097,11 +1087,11 @@ static int gem_change_mtu(struct net_device *dev, int new_mtu) } spin_lock_irq(&gp->lock); - spin_lock(&gp->tx_lock); + spin_lock(&dev->xmit_lock); dev->mtu = new_mtu; gp->reset_task_pending = 1; schedule_work(&gp->reset_task); - spin_unlock(&gp->tx_lock); + spin_unlock(&dev->xmit_lock); spin_unlock_irq(&gp->lock); flush_scheduled_work(); @@ -1111,7 +1101,7 @@ static int gem_change_mtu(struct net_device *dev, int new_mtu) #define STOP_TRIES 32 -/* Must be invoked under gp->lock and gp->tx_lock. */ +/* Must be invoked under gp->lock and dev->xmit_lock. */ static void gem_stop(struct gem *gp) { int limit; @@ -1137,7 +1127,7 @@ static void gem_stop(struct gem *gp) printk(KERN_ERR "%s: SW reset is ghetto.\n", gp->dev->name); } -/* Must be invoked under gp->lock and gp->tx_lock. */ +/* Must be invoked under gp->lock and dev->xmit_lock. */ static void gem_start_dma(struct gem *gp) { unsigned long val; @@ -1162,7 +1152,7 @@ static void gem_start_dma(struct gem *gp) } -/* Must be invoked under gp->lock and gp->tx_lock. */ +/* Must be invoked under gp->lock and dev->xmit_lock. */ // XXX dbl check what that function should do when called on PCS PHY static void gem_begin_auto_negotiation(struct gem *gp, struct ethtool_cmd *ep) { @@ -1249,7 +1239,7 @@ non_mii: /* A link-up condition has occurred, initialize and enable the * rest of the chip. * - * Must be invoked under gp->lock and gp->tx_lock. + * Must be invoked under gp->lock and dev->xmit_lock. */ static int gem_set_link_modes(struct gem *gp) { @@ -1356,7 +1346,7 @@ static int gem_set_link_modes(struct gem *gp) return 0; } -/* Must be invoked under gp->lock and gp->tx_lock. */ +/* Must be invoked under gp->lock and dev->xmit_lock. */ static int gem_mdio_link_not_up(struct gem *gp) { switch (gp->lstate) { @@ -1414,7 +1404,7 @@ static void gem_reset_task(void *data) netif_poll_disable(gp->dev); spin_lock_irq(&gp->lock); - spin_lock(&gp->tx_lock); + spin_lock(&gp->dev->xmit_lock); if (gp->hw_running && gp->opened) { netif_stop_queue(gp->dev); @@ -1430,7 +1420,7 @@ static void gem_reset_task(void *data) } gp->reset_task_pending = 0; - spin_unlock(&gp->tx_lock); + spin_unlock(&gp->dev->xmit_lock); spin_unlock_irq(&gp->lock); netif_poll_enable(gp->dev); } @@ -1444,7 +1434,7 @@ static void gem_link_timer(unsigned long data) return; spin_lock_irq(&gp->lock); - spin_lock(&gp->tx_lock); + spin_lock(&gp->dev->xmit_lock); /* If the link of task is still pending, we just * reschedule the link timer @@ -1514,11 +1504,11 @@ static void gem_link_timer(unsigned long data) restart: mod_timer(&gp->link_timer, jiffies + ((12 * HZ) / 10)); out_unlock: - spin_unlock(&gp->tx_lock); + spin_unlock(&gp->dev->xmit_lock); spin_unlock_irq(&gp->lock); } -/* Must be invoked under gp->lock and gp->tx_lock. */ +/* Must be invoked under gp->lock and dev->xmit_lock. */ static void gem_clean_rings(struct gem *gp) { struct gem_init_block *gb = gp->init_block; @@ -1569,7 +1559,7 @@ static void gem_clean_rings(struct gem *gp) } } -/* Must be invoked under gp->lock and gp->tx_lock. */ +/* Must be invoked under gp->lock and dev->xmit_lock. */ static void gem_init_rings(struct gem *gp) { struct gem_init_block *gb = gp->init_block; @@ -1619,7 +1609,7 @@ static void gem_init_rings(struct gem *gp) wmb(); } -/* Must be invoked under gp->lock and gp->tx_lock. */ +/* Must be invoked under gp->lock and dev->xmit_lock. */ static void gem_init_phy(struct gem *gp) { u32 mifcfg; @@ -1757,7 +1747,7 @@ static void gem_init_phy(struct gem *gp) } } -/* Must be invoked under gp->lock and gp->tx_lock. */ +/* Must be invoked under gp->lock and dev->xmit_lock. */ static void gem_init_dma(struct gem *gp) { u64 desc_dma = (u64) gp->gblock_dvma; @@ -1795,7 +1785,7 @@ static void gem_init_dma(struct gem *gp) gp->regs + RXDMA_BLANK); } -/* Must be invoked under gp->lock and gp->tx_lock. */ +/* Must be invoked under dev->xmit_lock. */ static u32 gem_setup_multicast(struct gem *gp) { @@ -1838,7 +1828,7 @@ gem_setup_multicast(struct gem *gp) return rxcfg; } -/* Must be invoked under gp->lock and gp->tx_lock. */ +/* Must be invoked under gp->lock and dev->xmit_lock. */ static void gem_init_mac(struct gem *gp) { unsigned char *e = &gp->dev->dev_addr[0]; @@ -1916,7 +1906,7 @@ static void gem_init_mac(struct gem *gp) writel(0xffffffff, gp->regs + MAC_MCMASK); } -/* Must be invoked under gp->lock and gp->tx_lock. */ +/* Must be invoked under gp->lock and dev->xmit_lock. */ static void gem_init_pause_thresholds(struct gem *gp) { u32 cfg; @@ -2052,7 +2042,7 @@ static int gem_check_invariants(struct gem *gp) return 0; } -/* Must be invoked under gp->lock and gp->tx_lock. */ +/* Must be invoked under gp->lock and dev->xmit_lock. */ static void gem_init_hw(struct gem *gp, int restart_link) { /* On Apple's gmac, I initialize the PHY only after @@ -2150,11 +2140,11 @@ static void gem_stop_phy(struct gem *gp) if (!gp->wake_on_lan) { spin_lock_irqsave(&gp->lock, flags); - spin_lock(&gp->tx_lock); + spin_lock(&gp->dev->xmit_lock); gem_stop(gp); writel(MAC_TXRST_CMD, gp->regs + MAC_TXRST); writel(MAC_RXRST_CMD, gp->regs + MAC_RXRST); - spin_unlock(&gp->tx_lock); + spin_unlock(&gp->dev->xmit_lock); spin_unlock_irqrestore(&gp->lock, flags); } @@ -2202,9 +2192,9 @@ static void gem_shutdown(struct gem *gp) unsigned long flags; spin_lock_irqsave(&gp->lock, flags); - spin_lock(&gp->tx_lock); + spin_lock(&gp->dev->xmit_lock); gem_stop(gp); - spin_unlock(&gp->tx_lock); + spin_unlock(&gp->dev->xmit_lock); spin_unlock_irqrestore(&gp->lock, flags); } } @@ -2265,9 +2255,9 @@ static int gem_open(struct net_device *dev) /* Reset the chip */ spin_lock_irq(&gp->lock); - spin_lock(&gp->tx_lock); + spin_lock(&gp->dev->xmit_lock); gem_stop(gp); - spin_unlock(&gp->tx_lock); + spin_unlock(&gp->dev->xmit_lock); spin_unlock_irq(&gp->lock); gp->hw_running = 1; @@ -2281,7 +2271,7 @@ static int gem_open(struct net_device *dev) printk(KERN_ERR "%s: failed to request irq !\n", gp->dev->name); spin_lock_irq(&gp->lock); - spin_lock(&gp->tx_lock); + spin_lock(&gp->dev->xmit_lock); #ifdef CONFIG_PPC_PMAC if (!hw_was_up && gp->pdev->vendor == PCI_VENDOR_ID_APPLE) gem_apple_powerdown(gp); @@ -2290,14 +2280,14 @@ static int gem_open(struct net_device *dev) gp->pm_timer.expires = jiffies + 10*HZ; add_timer(&gp->pm_timer); up(&gp->pm_sem); - spin_unlock(&gp->tx_lock); + spin_unlock(&gp->dev->xmit_lock); spin_unlock_irq(&gp->lock); return -EAGAIN; } spin_lock_irq(&gp->lock); - spin_lock(&gp->tx_lock); + spin_lock(&gp->dev->xmit_lock); /* Allocate & setup ring buffers */ gem_init_rings(gp); @@ -2307,7 +2297,7 @@ static int gem_open(struct net_device *dev) gp->opened = 1; - spin_unlock(&gp->tx_lock); + spin_unlock(&gp->dev->xmit_lock); spin_unlock_irq(&gp->lock); up(&gp->pm_sem); @@ -2328,7 +2318,7 @@ static int gem_close(struct net_device *dev) /* Stop traffic, mark us closed */ spin_lock_irq(&gp->lock); - spin_lock(&gp->tx_lock); + spin_lock(&gp->dev->xmit_lock); gp->opened = 0; @@ -2343,7 +2333,7 @@ static int gem_close(struct net_device *dev) /* Bye, the pm timer will finish the job */ free_irq(gp->pdev->irq, (void *) dev); - spin_unlock(&gp->tx_lock); + spin_unlock(&gp->dev->xmit_lock); spin_unlock_irq(&gp->lock); /* Fire the PM timer that will shut us down in about 10 seconds */ @@ -2374,7 +2364,7 @@ static int gem_suspend(struct pci_dev *pdev, u32 state) /* If the driver is opened, we stop the DMA */ if (gp->opened) { spin_lock_irq(&gp->lock); - spin_lock(&gp->tx_lock); + spin_lock(&gp->dev->xmit_lock); /* Stop traffic, mark us closed */ netif_device_detach(dev); @@ -2385,7 +2375,7 @@ static int gem_suspend(struct pci_dev *pdev, u32 state) /* Get rid of ring buffers */ gem_clean_rings(gp); - spin_unlock(&gp->tx_lock); + spin_unlock(&gp->dev->xmit_lock); spin_unlock_irq(&gp->lock); if (gp->pdev->vendor == PCI_VENDOR_ID_APPLE) @@ -2419,14 +2409,14 @@ static int gem_resume(struct pci_dev *pdev) } #endif /* CONFIG_PPC_PMAC */ spin_lock_irq(&gp->lock); - spin_lock(&gp->tx_lock); + spin_lock(&gp->dev->xmit_lock); gem_stop(gp); gp->hw_running = 1; gem_init_rings(gp); gem_init_hw(gp, 1); - spin_unlock(&gp->tx_lock); + spin_unlock(&gp->dev->xmit_lock); spin_unlock_irq(&gp->lock); netif_device_attach(dev); @@ -2447,7 +2437,7 @@ static struct net_device_stats *gem_get_stats(struct net_device *dev) struct net_device_stats *stats = &gp->net_stats; spin_lock_irq(&gp->lock); - spin_lock(&gp->tx_lock); + spin_lock(&dev->xmit_lock); if (gp->hw_running) { stats->rx_crc_errors += readl(gp->regs + MAC_FCSERR); @@ -2467,12 +2457,13 @@ static struct net_device_stats *gem_get_stats(struct net_device *dev) writel(0, gp->regs + MAC_LCOLL); } - spin_unlock(&gp->tx_lock); + spin_unlock(&dev->xmit_lock); spin_unlock_irq(&gp->lock); return &gp->net_stats; } +/* Called with dev->xmit_lock held and IRQs disabled. */ static void gem_set_multicast(struct net_device *dev) { struct gem *gp = dev->priv; @@ -2482,9 +2473,6 @@ static void gem_set_multicast(struct net_device *dev) if (!gp->hw_running) return; - spin_lock_irq(&gp->lock); - spin_lock(&gp->tx_lock); - netif_stop_queue(dev); rxcfg = readl(gp->regs + MAC_RXCFG); @@ -2507,9 +2495,6 @@ static void gem_set_multicast(struct net_device *dev) writel(rxcfg, gp->regs + MAC_RXCFG); netif_wake_queue(dev); - - spin_unlock(&gp->tx_lock); - spin_unlock_irq(&gp->lock); } static void gem_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) @@ -2540,7 +2525,7 @@ static int gem_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) /* Return current PHY settings */ spin_lock_irq(&gp->lock); - spin_lock(&gp->tx_lock); + spin_lock(&dev->xmit_lock); cmd->autoneg = gp->want_autoneg; cmd->speed = gp->phy_mii.speed; cmd->duplex = gp->phy_mii.duplex; @@ -2552,7 +2537,7 @@ static int gem_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) */ if (cmd->advertising == 0) cmd->advertising = cmd->supported; - spin_unlock(&gp->tx_lock); + spin_unlock(&dev->xmit_lock); spin_unlock_irq(&gp->lock); } else { // XXX PCS ? cmd->supported = @@ -2592,9 +2577,9 @@ static int gem_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) /* Apply settings and restart link process. */ spin_lock_irq(&gp->lock); - spin_lock(&gp->tx_lock); + spin_lock(&dev->xmit_lock); gem_begin_auto_negotiation(gp, cmd); - spin_unlock(&gp->tx_lock); + spin_unlock(&dev->xmit_lock); spin_unlock_irq(&gp->lock); return 0; @@ -2609,9 +2594,9 @@ static int gem_nway_reset(struct net_device *dev) /* Restart link process. */ spin_lock_irq(&gp->lock); - spin_lock(&gp->tx_lock); + spin_lock(&dev->xmit_lock); gem_begin_auto_negotiation(gp, NULL); - spin_unlock(&gp->tx_lock); + spin_unlock(&dev->xmit_lock); spin_unlock_irq(&gp->lock); return 0; @@ -2863,7 +2848,6 @@ static int __devinit gem_init_one(struct pci_dev *pdev, gp->msg_enable = DEFAULT_MSG; spin_lock_init(&gp->lock); - spin_lock_init(&gp->tx_lock); init_MUTEX(&gp->pm_sem); init_timer(&gp->link_timer); @@ -2899,9 +2883,9 @@ static int __devinit gem_init_one(struct pci_dev *pdev, gem_apple_powerup(gp); #endif spin_lock_irq(&gp->lock); - spin_lock(&gp->tx_lock); + spin_lock(&dev->xmit_lock); gem_stop(gp); - spin_unlock(&gp->tx_lock); + spin_unlock(&dev->xmit_lock); spin_unlock_irq(&gp->lock); /* Fill up the mii_phy structure (even if we won't use it) */ @@ -2967,11 +2951,11 @@ static int __devinit gem_init_one(struct pci_dev *pdev, /* Detect & init PHY, start autoneg */ spin_lock_irq(&gp->lock); - spin_lock(&gp->tx_lock); + spin_lock(&dev->xmit_lock); gp->hw_running = 1; gem_init_phy(gp); gem_begin_auto_negotiation(gp, NULL); - spin_unlock(&gp->tx_lock); + spin_unlock(&dev->xmit_lock); spin_unlock_irq(&gp->lock); if (gp->phy_type == phy_mii_mdio0 || @@ -2982,7 +2966,7 @@ static int __devinit gem_init_one(struct pci_dev *pdev, pci_set_drvdata(pdev, dev); /* GEM can do it all... */ - dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_LLTX; + dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM; if (pci_using_dac) dev->features |= NETIF_F_HIGHDMA; diff --git a/drivers/net/sungem.h b/drivers/net/sungem.h index 00343226fb71..8bbc104d848f 100644 --- a/drivers/net/sungem.h +++ b/drivers/net/sungem.h @@ -953,7 +953,6 @@ enum link_state { struct gem { spinlock_t lock; - spinlock_t tx_lock; void __iomem *regs; int rx_new, rx_old; int tx_new, tx_old; diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c index 8a165aca7542..2088143716af 100644 --- a/drivers/net/tg3.c +++ b/drivers/net/tg3.c @@ -60,8 +60,8 @@ #define DRV_MODULE_NAME "tg3" #define PFX DRV_MODULE_NAME ": " -#define DRV_MODULE_VERSION "3.15" -#define DRV_MODULE_RELDATE "January 6, 2005" +#define DRV_MODULE_VERSION "3.16" +#define DRV_MODULE_RELDATE "January 17, 2005" #define TG3_DEF_MAC_MODE 0 #define TG3_DEF_RX_MODE 0 @@ -2706,7 +2706,11 @@ static int tg3_rx(struct tg3 *tp, int budget) len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */ - if (len > RX_COPY_THRESHOLD) { + if (len > RX_COPY_THRESHOLD + && tp->rx_offset == 2 + /* rx_offset != 2 iff this is a 5701 card running + * in PCI-X mode [see tg3_get_invariants()] */ + ) { int skb_size; skb_size = tg3_alloc_rx_skb(tp, opaque_key, @@ -2812,9 +2816,9 @@ static int tg3_poll(struct net_device *netdev, int *budget) /* run TX completion thread */ if (sblk->idx[0].tx_consumer != tp->tx_cons) { - spin_lock(&tp->tx_lock); + spin_lock(&netdev->xmit_lock); tg3_tx(tp); - spin_unlock(&tp->tx_lock); + spin_unlock(&netdev->xmit_lock); } spin_unlock_irqrestore(&tp->lock, flags); @@ -2935,7 +2939,7 @@ static void tg3_reset_task(void *_data) tg3_netif_stop(tp); spin_lock_irq(&tp->lock); - spin_lock(&tp->tx_lock); + spin_lock(&tp->dev->xmit_lock); restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER; tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER; @@ -2945,7 +2949,7 @@ static void tg3_reset_task(void *_data) tg3_netif_start(tp); - spin_unlock(&tp->tx_lock); + spin_unlock(&tp->dev->xmit_lock); spin_unlock_irq(&tp->lock); if (restart_timer) @@ -3044,6 +3048,7 @@ static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len) (base + len + 8 < base)); } +/* dev->xmit_lock is held and IRQs are disabled. */ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct tg3 *tp = netdev_priv(dev); @@ -3051,39 +3056,12 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) unsigned int i; u32 len, entry, base_flags, mss; int would_hit_hwbug; - unsigned long flags; len = skb_headlen(skb); - /* No BH disabling for tx_lock here. We are running in BH disabled - * context and TX reclaim runs via tp->poll inside of a software - * interrupt. Rejoice! - * - * Actually, things are not so simple. If we are to take a hw - * IRQ here, we can deadlock, consider: - * - * CPU1 CPU2 - * tg3_start_xmit - * take tp->tx_lock - * tg3_timer - * take tp->lock - * tg3_interrupt - * spin on tp->lock - * spin on tp->tx_lock - * - * So we really do need to disable interrupts when taking - * tx_lock here. - */ - local_irq_save(flags); - if (!spin_trylock(&tp->tx_lock)) { - local_irq_restore(flags); - return NETDEV_TX_LOCKED; - } - /* This is a hard error, log it. */ if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) { netif_stop_queue(dev); - spin_unlock_irqrestore(&tp->tx_lock, flags); printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n", dev->name); return NETDEV_TX_BUSY; @@ -3220,7 +3198,7 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) entry, len, last_plus_one, &start, mss)) - goto out_unlock; + goto out; entry = start; } @@ -3232,9 +3210,8 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) if (TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1)) netif_stop_queue(dev); -out_unlock: +out: mmiowb(); - spin_unlock_irqrestore(&tp->tx_lock, flags); dev->trans_start = jiffies; @@ -3269,7 +3246,7 @@ static int tg3_change_mtu(struct net_device *dev, int new_mtu) tg3_netif_stop(tp); spin_lock_irq(&tp->lock); - spin_lock(&tp->tx_lock); + spin_lock(&dev->xmit_lock); tg3_halt(tp); @@ -3279,7 +3256,7 @@ static int tg3_change_mtu(struct net_device *dev, int new_mtu) tg3_netif_start(tp); - spin_unlock(&tp->tx_lock); + spin_unlock(&dev->xmit_lock); spin_unlock_irq(&tp->lock); return 0; @@ -5570,7 +5547,7 @@ static void tg3_timer(unsigned long __opaque) unsigned long flags; spin_lock_irqsave(&tp->lock, flags); - spin_lock(&tp->tx_lock); + spin_lock(&tp->dev->xmit_lock); /* All of this garbage is because when using non-tagged * IRQ status the mailbox/status_block protocol the chip @@ -5586,7 +5563,7 @@ static void tg3_timer(unsigned long __opaque) if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) { tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER; - spin_unlock(&tp->tx_lock); + spin_unlock(&tp->dev->xmit_lock); spin_unlock_irqrestore(&tp->lock, flags); schedule_work(&tp->reset_task); return; @@ -5655,7 +5632,7 @@ static void tg3_timer(unsigned long __opaque) tp->asf_counter = tp->asf_multiplier; } - spin_unlock(&tp->tx_lock); + spin_unlock(&tp->dev->xmit_lock); spin_unlock_irqrestore(&tp->lock, flags); tp->timer.expires = jiffies + tp->timer_offset; @@ -5668,12 +5645,12 @@ static int tg3_open(struct net_device *dev) int err; spin_lock_irq(&tp->lock); - spin_lock(&tp->tx_lock); + spin_lock(&dev->xmit_lock); tg3_disable_ints(tp); tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE; - spin_unlock(&tp->tx_lock); + spin_unlock(&dev->xmit_lock); spin_unlock_irq(&tp->lock); /* The placement of this call is tied @@ -5692,7 +5669,7 @@ static int tg3_open(struct net_device *dev) } spin_lock_irq(&tp->lock); - spin_lock(&tp->tx_lock); + spin_lock(&dev->xmit_lock); err = tg3_init_hw(tp); if (err) { @@ -5712,7 +5689,7 @@ static int tg3_open(struct net_device *dev) tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE; } - spin_unlock(&tp->tx_lock); + spin_unlock(&dev->xmit_lock); spin_unlock_irq(&tp->lock); if (err) { @@ -5722,11 +5699,11 @@ static int tg3_open(struct net_device *dev) } spin_lock_irq(&tp->lock); - spin_lock(&tp->tx_lock); + spin_lock(&dev->xmit_lock); tg3_enable_ints(tp); - spin_unlock(&tp->tx_lock); + spin_unlock(&dev->xmit_lock); spin_unlock_irq(&tp->lock); netif_start_queue(dev); @@ -5974,7 +5951,7 @@ static int tg3_close(struct net_device *dev) del_timer_sync(&tp->timer); spin_lock_irq(&tp->lock); - spin_lock(&tp->tx_lock); + spin_lock(&dev->xmit_lock); #if 0 tg3_dump_state(tp); #endif @@ -5988,7 +5965,7 @@ static int tg3_close(struct net_device *dev) TG3_FLAG_GOT_SERDES_FLOWCTL); netif_carrier_off(tp->dev); - spin_unlock(&tp->tx_lock); + spin_unlock(&dev->xmit_lock); spin_unlock_irq(&tp->lock); free_irq(dev->irq, dev); @@ -6287,15 +6264,10 @@ static void __tg3_set_rx_mode(struct net_device *dev) } } +/* Called with dev->xmit_lock held and IRQs disabled. */ static void tg3_set_rx_mode(struct net_device *dev) { - struct tg3 *tp = netdev_priv(dev); - - spin_lock_irq(&tp->lock); - spin_lock(&tp->tx_lock); __tg3_set_rx_mode(dev); - spin_unlock(&tp->tx_lock); - spin_unlock_irq(&tp->lock); } #define TG3_REGDUMP_LEN (32 * 1024) @@ -6318,7 +6290,7 @@ static void tg3_get_regs(struct net_device *dev, memset(p, 0, TG3_REGDUMP_LEN); spin_lock_irq(&tp->lock); - spin_lock(&tp->tx_lock); + spin_lock(&dev->xmit_lock); #define __GET_REG32(reg) (*(p)++ = tr32(reg)) #define GET_REG32_LOOP(base,len) \ @@ -6368,7 +6340,7 @@ do { p = (u32 *)(orig_p + (reg)); \ #undef GET_REG32_LOOP #undef GET_REG32_1 - spin_unlock(&tp->tx_lock); + spin_unlock(&dev->xmit_lock); spin_unlock_irq(&tp->lock); } @@ -6492,7 +6464,7 @@ static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) } spin_lock_irq(&tp->lock); - spin_lock(&tp->tx_lock); + spin_lock(&dev->xmit_lock); tp->link_config.autoneg = cmd->autoneg; if (cmd->autoneg == AUTONEG_ENABLE) { @@ -6506,7 +6478,7 @@ static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) } tg3_setup_phy(tp, 1); - spin_unlock(&tp->tx_lock); + spin_unlock(&dev->xmit_lock); spin_unlock_irq(&tp->lock); return 0; @@ -6623,7 +6595,7 @@ static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *e tg3_netif_stop(tp); spin_lock_irq(&tp->lock); - spin_lock(&tp->tx_lock); + spin_lock(&dev->xmit_lock); tp->rx_pending = ering->rx_pending; @@ -6636,7 +6608,7 @@ static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *e tg3_halt(tp); tg3_init_hw(tp); tg3_netif_start(tp); - spin_unlock(&tp->tx_lock); + spin_unlock(&dev->xmit_lock); spin_unlock_irq(&tp->lock); return 0; @@ -6657,7 +6629,7 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam tg3_netif_stop(tp); spin_lock_irq(&tp->lock); - spin_lock(&tp->tx_lock); + spin_lock(&dev->xmit_lock); if (epause->autoneg) tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG; else @@ -6673,7 +6645,7 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam tg3_halt(tp); tg3_init_hw(tp); tg3_netif_start(tp); - spin_unlock(&tp->tx_lock); + spin_unlock(&dev->xmit_lock); spin_unlock_irq(&tp->lock); return 0; @@ -6799,14 +6771,14 @@ static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp) struct tg3 *tp = netdev_priv(dev); spin_lock_irq(&tp->lock); - spin_lock(&tp->tx_lock); + spin_lock(&dev->xmit_lock); tp->vlgrp = grp; /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */ __tg3_set_rx_mode(dev); - spin_unlock(&tp->tx_lock); + spin_unlock(&dev->xmit_lock); spin_unlock_irq(&tp->lock); } @@ -6815,10 +6787,10 @@ static void tg3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) struct tg3 *tp = netdev_priv(dev); spin_lock_irq(&tp->lock); - spin_lock(&tp->tx_lock); + spin_lock(&dev->xmit_lock); if (tp->vlgrp) tp->vlgrp->vlan_devices[vid] = NULL; - spin_unlock(&tp->tx_lock); + spin_unlock(&dev->xmit_lock); spin_unlock_irq(&tp->lock); } #endif @@ -8237,7 +8209,6 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, if (pci_using_dac) dev->features |= NETIF_F_HIGHDMA; - dev->features |= NETIF_F_LLTX; #if TG3_VLAN_TAG_USED dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; dev->vlan_rx_register = tg3_vlan_rx_register; @@ -8279,7 +8250,6 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA; #endif spin_lock_init(&tp->lock); - spin_lock_init(&tp->tx_lock); spin_lock_init(&tp->indirect_lock); INIT_WORK(&tp->reset_task, tg3_reset_task, tp); @@ -8492,23 +8462,23 @@ static int tg3_suspend(struct pci_dev *pdev, u32 state) del_timer_sync(&tp->timer); spin_lock_irq(&tp->lock); - spin_lock(&tp->tx_lock); + spin_lock(&dev->xmit_lock); tg3_disable_ints(tp); - spin_unlock(&tp->tx_lock); + spin_unlock(&dev->xmit_lock); spin_unlock_irq(&tp->lock); netif_device_detach(dev); spin_lock_irq(&tp->lock); - spin_lock(&tp->tx_lock); + spin_lock(&dev->xmit_lock); tg3_halt(tp); - spin_unlock(&tp->tx_lock); + spin_unlock(&dev->xmit_lock); spin_unlock_irq(&tp->lock); err = tg3_set_power_state(tp, state); if (err) { spin_lock_irq(&tp->lock); - spin_lock(&tp->tx_lock); + spin_lock(&dev->xmit_lock); tg3_init_hw(tp); @@ -8518,7 +8488,7 @@ static int tg3_suspend(struct pci_dev *pdev, u32 state) netif_device_attach(dev); tg3_netif_start(tp); - spin_unlock(&tp->tx_lock); + spin_unlock(&dev->xmit_lock); spin_unlock_irq(&tp->lock); } @@ -8543,7 +8513,7 @@ static int tg3_resume(struct pci_dev *pdev) netif_device_attach(dev); spin_lock_irq(&tp->lock); - spin_lock(&tp->tx_lock); + spin_lock(&dev->xmit_lock); tg3_init_hw(tp); @@ -8554,7 +8524,7 @@ static int tg3_resume(struct pci_dev *pdev) tg3_netif_start(tp); - spin_unlock(&tp->tx_lock); + spin_unlock(&dev->xmit_lock); spin_unlock_irq(&tp->lock); return 0; diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h index 3b22f53d2579..68b7520784ea 100644 --- a/drivers/net/tg3.h +++ b/drivers/net/tg3.h @@ -1980,12 +1980,11 @@ struct tg3 { * lock: Held during all operations except TX packet * processing. * - * tx_lock: Held during tg3_start_xmit{,_4gbug} and tg3_tx + * dev->xmit_lock: Held during tg3_start_xmit and tg3_tx * * If you want to shut up all asynchronous processing you must - * acquire both locks, 'lock' taken before 'tx_lock'. IRQs must - * be disabled to take 'lock' but only softirq disabling is - * necessary for acquisition of 'tx_lock'. + * acquire both locks, 'lock' taken before 'xmit_lock'. IRQs must + * be disabled to take either lock. */ spinlock_t lock; spinlock_t indirect_lock; @@ -2004,8 +2003,6 @@ struct tg3 { u32 tx_cons; u32 tx_pending; - spinlock_t tx_lock; - struct tg3_tx_buffer_desc *tx_ring; struct tx_ring_info *tx_buffers; dma_addr_t tx_desc_mapping; diff --git a/drivers/s390/net/qeth_main.c b/drivers/s390/net/qeth_main.c index 6a10cc610ad4..a84ff2f17599 100644 --- a/drivers/s390/net/qeth_main.c +++ b/drivers/s390/net/qeth_main.c @@ -5033,27 +5033,6 @@ qeth_neigh_setup(struct net_device *dev, struct neigh_parms *np) return 0; } -#ifdef CONFIG_QETH_IPV6 -int -qeth_ipv6_generate_eui64(u8 * eui, struct net_device *dev) -{ - switch (dev->type) { - case ARPHRD_ETHER: - case ARPHRD_FDDI: - case ARPHRD_IEEE802_TR: - if (dev->addr_len != ETH_ALEN) - return -1; - memcpy(eui, dev->dev_addr, 3); - memcpy(eui + 5, dev->dev_addr + 3, 3); - eui[3] = (dev->dev_id >> 8) & 0xff; - eui[4] = dev->dev_id & 0xff; - return 0; - } - return -1; - -} -#endif - static void qeth_get_mac_for_ipm(__u32 ipm, char *mac, struct net_device *dev) { @@ -5587,11 +5566,8 @@ qeth_netdev_init(struct net_device *dev) } #ifdef CONFIG_QETH_IPV6 /*IPv6 address autoconfiguration stuff*/ - card->dev->dev_id = card->info.unique_id & 0xffff; if (!(card->info.unique_id & UNIQUE_ID_NOT_BY_CARD)) - card->dev->generate_eui64 = qeth_ipv6_generate_eui64; - - + card->dev->dev_id = card->info.unique_id & 0xffff; #endif dev->hard_header_parse = NULL; dev->set_mac_address = qeth_layer2_set_mac_address; |
