patch-2.4.22 linux-2.4.22/drivers/net/e1000/e1000_main.c
Next file: linux-2.4.22/drivers/net/e1000/e1000_osdep.h
Previous file: linux-2.4.22/drivers/net/e1000/e1000_hw.h
Back to the patch index
Back to the overall index
- Lines: 913
- Date:
2003-08-25 04:44:42.000000000 -0700
- Orig file:
linux-2.4.21/drivers/net/e1000/e1000_main.c
- Orig date:
2003-06-13 07:51:34.000000000 -0700
diff -urN linux-2.4.21/drivers/net/e1000/e1000_main.c linux-2.4.22/drivers/net/e1000/e1000_main.c
@@ -30,67 +30,38 @@
/* Change Log
*
- * 5.0.43 3/5/03
- * o Feature: Added support for 82541 and 82547 hardware.
- * o Feature: Added support for Intel Gigabit PHY (IGP) and a variety of
- * eeproms.
- * o Feature: Added support for TCP Segmentation Offload (TSO).
- * o Feature: Added MII ioctl.
- * o Feature: Added support for statistics reporting through ethtool.
- * o Cleanup: Removed proprietary hooks for ANS.
- * o Cleanup: Miscellaneous code changes to improve CPU utilization.
- * - Replaced "%" with conditionals and "+-" operators.
- * - Implemented dynamic Interrupt Throttle Rate (ITR).
- * - Reduced expensive PCI reads of ICR in interrupt.
- * o Bug fix: Request IRQ after descriptor ring setup to avoid panic in
- * shared interrupt instances.
- *
- * 4.4.18 11/27/02
- * o Feature: Added user-settable knob for interrupt throttle rate (ITR).
- * o Cleanup: removed large static array allocations.
- * o Cleanup: C99 struct initializer format.
- * o Bug fix: restore VLAN settings when interface is brought up.
- * o Bug fix: return cleanly in probe if error in detecting MAC type.
- * o Bug fix: Wake up on magic packet by default only if enabled in eeprom.
- * o Bug fix: Validate MAC address in set_mac.
- * o Bug fix: Throw away zero-length Tx skbs.
- * o Bug fix: Make ethtool EEPROM acceses work on older versions of ethtool.
- *
- * 4.4.12 10/15/02
+ * 5.1.13 5/28/03
+ * o Bug fix: request_irq() failure resulted in freeing resources twice!
+ * [Don Fry (brazilnut@us.ibm.com)]
+ * o Bug fix: fix VLAN support on ppc64 [Mark Rakes (mrakes@vivato.net)]
+ * o Bug fix: missing Tx cleanup opportunities during interrupt handling.
+ * o Bug fix: alloc_etherdev failure didn't cleanup regions in probe.
+ * o Cleanup: s/int/unsigned int/ for descriptor ring indexes.
+ *
+ * 5.1.11 5/6/03
+ * o Feature: Added support for 82546EB (Quad-port) hardware.
+ * o Feature: Added support for Diagnostics through Ethtool.
+ * o Cleanup: Removed /proc support.
+ * o Cleanup: Removed proprietary IDIAG interface.
+ * o Bug fix: TSO bug fixes.
+ *
+ * 5.0.42 3/5/03
*/
char e1000_driver_name[] = "e1000";
char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
-char e1000_driver_version[] = "5.0.43-k1";
+char e1000_driver_version[] = "5.1.13-k1";
char e1000_copyright[] = "Copyright (c) 1999-2003 Intel Corporation.";
/* e1000_pci_tbl - PCI Device ID Table
*
- * Private driver_data field (last one) stores an index into e1000_strings
* Wildcard entries (PCI_ANY_ID) should come last
* Last entry must be all 0s
*
* { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
- * Class, Class Mask, String Index }
+ * Class, Class Mask, private data (not used) }
*/
static struct pci_device_id e1000_pci_tbl[] __devinitdata = {
- /* Intel(R) PRO/1000 Network Connection */
- {0x8086, 0x1000, 0x8086, 0x1000, 0, 0, 0},
- {0x8086, 0x1001, 0x8086, 0x1003, 0, 0, 0},
- {0x8086, 0x1004, 0x8086, 0x1004, 0, 0, 0},
- {0x8086, 0x1008, 0x8086, 0x1107, 0, 0, 0},
- {0x8086, 0x1009, 0x8086, 0x1109, 0, 0, 0},
- {0x8086, 0x100C, 0x8086, 0x1112, 0, 0, 0},
- {0x8086, 0x100E, 0x8086, 0x001E, 0, 0, 0},
- /* Compaq Gigabit Ethernet Server Adapter */
- {0x8086, 0x1000, 0x0E11, PCI_ANY_ID, 0, 0, 1},
- {0x8086, 0x1001, 0x0E11, PCI_ANY_ID, 0, 0, 1},
- {0x8086, 0x1004, 0x0E11, PCI_ANY_ID, 0, 0, 1},
- /* IBM Mobile, Desktop & Server Adapters */
- {0x8086, 0x1000, 0x1014, PCI_ANY_ID, 0, 0, 2},
- {0x8086, 0x1001, 0x1014, PCI_ANY_ID, 0, 0, 2},
- {0x8086, 0x1004, 0x1014, PCI_ANY_ID, 0, 0, 2},
- /* Generic */
{0x8086, 0x1000, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{0x8086, 0x1001, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{0x8086, 0x1004, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
@@ -106,6 +77,7 @@
{0x8086, 0x1016, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{0x8086, 0x1017, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{0x8086, 0x101E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ {0x8086, 0x101D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{0x8086, 0x1013, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{0x8086, 0x1019, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
/* required last entry */
@@ -114,12 +86,6 @@
MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
-static char *e1000_strings[] = {
- "Intel(R) PRO/1000 Network Connection",
- "HP Gigabit Ethernet Server Adapter",
- "IBM Mobile, Desktop & Server Adapters"
-};
-
/* Local Function Prototypes */
int e1000_up(struct e1000_adapter *adapter);
@@ -130,7 +96,7 @@
static int e1000_init_module(void);
static void e1000_exit_module(void);
static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
-static void e1000_remove(struct pci_dev *pdev);
+static void __devexit e1000_remove(struct pci_dev *pdev);
static int e1000_sw_init(struct e1000_adapter *adapter);
static int e1000_open(struct net_device *netdev);
static int e1000_close(struct net_device *netdev);
@@ -156,7 +122,13 @@
static inline void e1000_irq_enable(struct e1000_adapter *adapter);
static void e1000_intr(int irq, void *data, struct pt_regs *regs);
static boolean_t e1000_clean_tx_irq(struct e1000_adapter *adapter);
+#ifdef CONFIG_E1000_NAPI
+static int e1000_clean(struct net_device *netdev, int *budget);
+static boolean_t e1000_clean_rx_irq(struct e1000_adapter *adapter,
+ int *work_done, int work_to_do);
+#else
static boolean_t e1000_clean_rx_irq(struct e1000_adapter *adapter);
+#endif
static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter);
static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
@@ -189,7 +161,6 @@
.priority = 0
};
-
/* Exported from other modules */
extern void e1000_check_options(struct e1000_adapter *adapter);
@@ -228,8 +199,9 @@
printk(KERN_INFO "%s\n", e1000_copyright);
ret = pci_module_init(&e1000_driver);
- if(ret >= 0)
+ if(ret >= 0) {
register_reboot_notifier(&e1000_notifier_reboot);
+ }
return ret;
}
@@ -269,12 +241,8 @@
e1000_alloc_rx_buffers(adapter);
if(request_irq(netdev->irq, &e1000_intr, SA_SHIRQ | SA_SAMPLE_RANDOM,
- netdev->name, netdev)) {
- e1000_reset_hw(&adapter->hw);
- e1000_free_tx_resources(adapter);
- e1000_free_rx_resources(adapter);
+ netdev->name, netdev))
return -1;
- }
mod_timer(&adapter->watchdog_timer, jiffies);
e1000_irq_enable(adapter);
@@ -418,6 +386,10 @@
netdev->do_ioctl = &e1000_ioctl;
netdev->tx_timeout = &e1000_tx_timeout;
netdev->watchdog_timeo = 5 * HZ;
+#ifdef CONFIG_E1000_NAPI
+ netdev->poll = &e1000_clean;
+ netdev->weight = 64;
+#endif
netdev->vlan_rx_register = e1000_vlan_rx_register;
netdev->vlan_rx_add_vid = e1000_vlan_rx_add_vid;
netdev->vlan_rx_kill_vid = e1000_vlan_rx_kill_vid;
@@ -428,7 +400,6 @@
netdev->base_addr = adapter->hw.io_base;
adapter->bd_number = cards_found;
- adapter->id_string = e1000_strings[ent->driver_data];
/* setup the private structure */
@@ -445,6 +416,12 @@
netdev->features = NETIF_F_SG;
}
+#ifdef NETIF_F_TSO
+ if((adapter->hw.mac_type >= e1000_82544) &&
+ (adapter->hw.mac_type != e1000_82547))
+ netdev->features |= NETIF_F_TSO;
+#endif
+
if(pci_using_dac)
netdev->features |= NETIF_F_HIGHDMA;
@@ -483,15 +460,14 @@
(void (*)(void *))e1000_tx_timeout_task, netdev);
register_netdev(netdev);
- memcpy(adapter->ifname, netdev->name, IFNAMSIZ);
- adapter->ifname[IFNAMSIZ-1] = 0;
/* we're going to reset, so assume we have no link for now */
netif_carrier_off(netdev);
netif_stop_queue(netdev);
- printk(KERN_INFO "%s: %s\n", netdev->name, adapter->id_string);
+ printk(KERN_INFO "%s: Intel(R) PRO/1000 Network Connection\n",
+ netdev->name);
e1000_check_options(adapter);
/* Initial Wake on LAN setting
@@ -516,9 +492,9 @@
err_eeprom:
iounmap(adapter->hw.hw_addr);
err_ioremap:
- pci_release_regions(pdev);
kfree(netdev);
err_alloc_etherdev:
+ pci_release_regions(pdev);
return -ENOMEM;
}
@@ -551,7 +527,6 @@
e1000_phy_hw_reset(&adapter->hw);
-
iounmap(adapter->hw.hw_addr);
pci_release_regions(pdev);
@@ -814,8 +789,9 @@
e1000_config_collision_dist(&adapter->hw);
- /* Setup Transmit Descriptor Settings for this adapter */
- adapter->txd_cmd = E1000_TXD_CMD_IFCS | E1000_TXD_CMD_IDE;
+ /* Setup Transmit Descriptor Settings for eop descriptor */
+ adapter->txd_cmd = E1000_TXD_CMD_IDE | E1000_TXD_CMD_EOP |
+ E1000_TXD_CMD_IFCS;
if(adapter->hw.report_tx_early == 1)
adapter->txd_cmd |= E1000_TXD_CMD_RS;
@@ -993,35 +969,38 @@
static void
e1000_clean_tx_ring(struct e1000_adapter *adapter)
{
+ struct e1000_desc_ring *tx_ring = &adapter->tx_ring;
+ struct e1000_buffer *buffer_info;
struct pci_dev *pdev = adapter->pdev;
unsigned long size;
- int i;
+ unsigned int i;
/* Free all the Tx ring sk_buffs */
- for(i = 0; i < adapter->tx_ring.count; i++) {
- if(adapter->tx_ring.buffer_info[i].skb) {
+ for(i = 0; i < tx_ring->count; i++) {
+ buffer_info = &tx_ring->buffer_info[i];
+ if(buffer_info->skb) {
pci_unmap_page(pdev,
- adapter->tx_ring.buffer_info[i].dma,
- adapter->tx_ring.buffer_info[i].length,
+ buffer_info->dma,
+ buffer_info->length,
PCI_DMA_TODEVICE);
- dev_kfree_skb(adapter->tx_ring.buffer_info[i].skb);
+ dev_kfree_skb(buffer_info->skb);
- adapter->tx_ring.buffer_info[i].skb = NULL;
+ buffer_info->skb = NULL;
}
}
- size = sizeof(struct e1000_buffer) * adapter->tx_ring.count;
- memset(adapter->tx_ring.buffer_info, 0, size);
+ size = sizeof(struct e1000_buffer) * tx_ring->count;
+ memset(tx_ring->buffer_info, 0, size);
/* Zero out the descriptor ring */
- memset(adapter->tx_ring.desc, 0, adapter->tx_ring.size);
+ memset(tx_ring->desc, 0, tx_ring->size);
- adapter->tx_ring.next_to_use = 0;
- adapter->tx_ring.next_to_clean = 0;
+ tx_ring->next_to_use = 0;
+ tx_ring->next_to_clean = 0;
E1000_WRITE_REG(&adapter->hw, TDH, 0);
E1000_WRITE_REG(&adapter->hw, TDT, 0);
@@ -1037,17 +1016,17 @@
static void
e1000_free_rx_resources(struct e1000_adapter *adapter)
{
+ struct e1000_desc_ring *rx_ring = &adapter->rx_ring;
struct pci_dev *pdev = adapter->pdev;
e1000_clean_rx_ring(adapter);
- kfree(adapter->rx_ring.buffer_info);
- adapter->rx_ring.buffer_info = NULL;
+ kfree(rx_ring->buffer_info);
+ rx_ring->buffer_info = NULL;
- pci_free_consistent(pdev, adapter->rx_ring.size,
- adapter->rx_ring.desc, adapter->rx_ring.dma);
+ pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
- adapter->rx_ring.desc = NULL;
+ rx_ring->desc = NULL;
}
/**
@@ -1058,35 +1037,38 @@
static void
e1000_clean_rx_ring(struct e1000_adapter *adapter)
{
+ struct e1000_desc_ring *rx_ring = &adapter->rx_ring;
+ struct e1000_buffer *buffer_info;
struct pci_dev *pdev = adapter->pdev;
unsigned long size;
- int i;
+ unsigned int i;
/* Free all the Rx ring sk_buffs */
- for(i = 0; i < adapter->rx_ring.count; i++) {
- if(adapter->rx_ring.buffer_info[i].skb) {
+ for(i = 0; i < rx_ring->count; i++) {
+ buffer_info = &rx_ring->buffer_info[i];
+ if(buffer_info->skb) {
pci_unmap_single(pdev,
- adapter->rx_ring.buffer_info[i].dma,
- adapter->rx_ring.buffer_info[i].length,
+ buffer_info->dma,
+ buffer_info->length,
PCI_DMA_FROMDEVICE);
- dev_kfree_skb(adapter->rx_ring.buffer_info[i].skb);
+ dev_kfree_skb(buffer_info->skb);
- adapter->rx_ring.buffer_info[i].skb = NULL;
+ buffer_info->skb = NULL;
}
}
- size = sizeof(struct e1000_buffer) * adapter->rx_ring.count;
- memset(adapter->rx_ring.buffer_info, 0, size);
+ size = sizeof(struct e1000_buffer) * rx_ring->count;
+ memset(rx_ring->buffer_info, 0, size);
/* Zero out the descriptor ring */
- memset(adapter->rx_ring.desc, 0, adapter->rx_ring.size);
+ memset(rx_ring->desc, 0, rx_ring->size);
- adapter->rx_ring.next_to_clean = 0;
- adapter->rx_ring.next_to_use = 0;
+ rx_ring->next_to_clean = 0;
+ rx_ring->next_to_use = 0;
E1000_WRITE_REG(&adapter->hw, RDH, 0);
E1000_WRITE_REG(&adapter->hw, RDT, 0);
@@ -1336,7 +1318,7 @@
struct e1000_adapter *adapter = (struct e1000_adapter *) data;
struct net_device *netdev = adapter->netdev;
struct e1000_desc_ring *txdr = &adapter->tx_ring;
- int i;
+ unsigned int i;
e1000_check_for_link(&adapter->hw);
@@ -1413,14 +1395,67 @@
#define E1000_TX_FLAGS_CSUM 0x00000001
#define E1000_TX_FLAGS_VLAN 0x00000002
+#define E1000_TX_FLAGS_TSO 0x00000004
#define E1000_TX_FLAGS_VLAN_MASK 0xffff0000
#define E1000_TX_FLAGS_VLAN_SHIFT 16
static inline boolean_t
+e1000_tso(struct e1000_adapter *adapter, struct sk_buff *skb)
+{
+#ifdef NETIF_F_TSO
+ struct e1000_context_desc *context_desc;
+ unsigned int i;
+ uint8_t ipcss, ipcso, tucss, tucso, hdr_len;
+ uint16_t ipcse, tucse, mss;
+
+ if(skb_shinfo(skb)->tso_size) {
+ hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2));
+ mss = skb_shinfo(skb)->tso_size;
+ skb->nh.iph->tot_len = 0;
+ skb->nh.iph->check = 0;
+ skb->h.th->check = ~csum_tcpudp_magic(skb->nh.iph->saddr,
+ skb->nh.iph->daddr,
+ 0,
+ IPPROTO_TCP,
+ 0);
+ ipcss = skb->nh.raw - skb->data;
+ ipcso = (void *)&(skb->nh.iph->check) - (void *)skb->data;
+ ipcse = skb->h.raw - skb->data - 1;
+ tucss = skb->h.raw - skb->data;
+ tucso = (void *)&(skb->h.th->check) - (void *)skb->data;
+ tucse = 0;
+
+ i = adapter->tx_ring.next_to_use;
+ context_desc = E1000_CONTEXT_DESC(adapter->tx_ring, i);
+
+ context_desc->lower_setup.ip_fields.ipcss = ipcss;
+ context_desc->lower_setup.ip_fields.ipcso = ipcso;
+ context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse);
+ context_desc->upper_setup.tcp_fields.tucss = tucss;
+ context_desc->upper_setup.tcp_fields.tucso = tucso;
+ context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse);
+ context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss);
+ context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
+ context_desc->cmd_and_length = cpu_to_le32(
+ E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
+ E1000_TXD_CMD_IP | E1000_TXD_CMD_TCP |
+ (skb->len - (hdr_len)));
+
+ if(++i == adapter->tx_ring.count) i = 0;
+ adapter->tx_ring.next_to_use = i;
+
+ return TRUE;
+ }
+#endif
+
+ return FALSE;
+}
+
+static inline boolean_t
e1000_tx_csum(struct e1000_adapter *adapter, struct sk_buff *skb)
{
struct e1000_context_desc *context_desc;
- int i;
+ unsigned int i;
uint8_t css, cso;
if(skb->ip_summed == CHECKSUM_HW) {
@@ -1434,8 +1469,7 @@
context_desc->upper_setup.tcp_fields.tucso = cso;
context_desc->upper_setup.tcp_fields.tucse = 0;
context_desc->tcp_seg_setup.data = 0;
- context_desc->cmd_and_length =
- cpu_to_le32(adapter->txd_cmd | E1000_TXD_CMD_DEXT);
+ context_desc->cmd_and_length = cpu_to_le32(E1000_TXD_CMD_DEXT);
if(++i == adapter->tx_ring.count) i = 0;
adapter->tx_ring.next_to_use = i;
@@ -1450,25 +1484,39 @@
#define E1000_MAX_DATA_PER_TXD (1<<E1000_MAX_TXD_PWR)
static inline int
-e1000_tx_map(struct e1000_adapter *adapter, struct sk_buff *skb)
+e1000_tx_map(struct e1000_adapter *adapter, struct sk_buff *skb,
+ unsigned int first)
{
struct e1000_desc_ring *tx_ring = &adapter->tx_ring;
- int len = skb->len, offset = 0, size, count = 0, i;
+ struct e1000_buffer *buffer_info;
+ int len = skb->len;
+ unsigned int offset = 0, size, count = 0, i;
- int f;
+#ifdef NETIF_F_TSO
+ unsigned int tso = skb_shinfo(skb)->tso_size;
+#endif
+ unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
+ unsigned int f;
len -= skb->data_len;
i = tx_ring->next_to_use;
while(len) {
+ buffer_info = &tx_ring->buffer_info[i];
size = min(len, E1000_MAX_DATA_PER_TXD);
- tx_ring->buffer_info[i].length = size;
- tx_ring->buffer_info[i].dma =
+#ifdef NETIF_F_TSO
+ /* Workaround for premature desc write-backs
+ * in TSO mode. Append 4-byte sentinel desc */
+ if(tso && !nr_frags && size == len && size > 4)
+ size -= 4;
+#endif
+ buffer_info->length = size;
+ buffer_info->dma =
pci_map_single(adapter->pdev,
skb->data + offset,
size,
PCI_DMA_TODEVICE);
- tx_ring->buffer_info[i].time_stamp = jiffies;
+ buffer_info->time_stamp = jiffies;
len -= size;
offset += size;
@@ -1476,7 +1524,7 @@
if(++i == tx_ring->count) i = 0;
}
- for(f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
+ for(f = 0; f < nr_frags; f++) {
struct skb_frag_struct *frag;
frag = &skb_shinfo(skb)->frags[f];
@@ -1484,15 +1532,22 @@
offset = 0;
while(len) {
+ buffer_info = &tx_ring->buffer_info[i];
size = min(len, E1000_MAX_DATA_PER_TXD);
- tx_ring->buffer_info[i].length = size;
- tx_ring->buffer_info[i].dma =
+#ifdef NETIF_F_TSO
+ /* Workaround for premature desc write-backs
+ * in TSO mode. Append 4-byte sentinel desc */
+ if(tso && f == (nr_frags-1) && size == len && size > 4)
+ size -= 4;
+#endif
+ buffer_info->length = size;
+ buffer_info->dma =
pci_map_page(adapter->pdev,
frag->page,
frag->page_offset + offset,
size,
PCI_DMA_TODEVICE);
- tx_ring->buffer_info[i].time_stamp = jiffies;
+ buffer_info->time_stamp = jiffies;
len -= size;
offset += size;
@@ -1500,8 +1555,9 @@
if(++i == tx_ring->count) i = 0;
}
}
- if(--i < 0) i = tx_ring->count - 1;
+ i = (i == 0) ? tx_ring->count - 1 : i - 1;
tx_ring->buffer_info[i].skb = skb;
+ tx_ring->buffer_info[first].next_to_watch = i;
return count;
}
@@ -1511,11 +1567,15 @@
{
struct e1000_desc_ring *tx_ring = &adapter->tx_ring;
struct e1000_tx_desc *tx_desc = NULL;
- uint32_t txd_upper, txd_lower;
- int i;
-
- txd_upper = 0;
- txd_lower = adapter->txd_cmd;
+ struct e1000_buffer *buffer_info;
+ uint32_t txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
+ unsigned int i;
+
+ if(tx_flags & E1000_TX_FLAGS_TSO) {
+ txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
+ E1000_TXD_CMD_TSE;
+ txd_upper |= (E1000_TXD_POPTS_IXSM | E1000_TXD_POPTS_TXSM) << 8;
+ }
if(tx_flags & E1000_TX_FLAGS_CSUM) {
txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
@@ -1530,15 +1590,16 @@
i = tx_ring->next_to_use;
while(count--) {
+ buffer_info = &tx_ring->buffer_info[i];
tx_desc = E1000_TX_DESC(*tx_ring, i);
- tx_desc->buffer_addr = cpu_to_le64(tx_ring->buffer_info[i].dma);
+ tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
tx_desc->lower.data =
- cpu_to_le32(txd_lower | tx_ring->buffer_info[i].length);
+ cpu_to_le32(txd_lower | buffer_info->length);
tx_desc->upper.data = cpu_to_le32(txd_upper);
if(++i == tx_ring->count) i = 0;
}
- tx_desc->lower.data |= cpu_to_le32(E1000_TXD_CMD_EOP);
+ tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
/* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only
@@ -1598,7 +1659,8 @@
e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
{
struct e1000_adapter *adapter = netdev->priv;
- int tx_flags = 0;
+ unsigned int first;
+ unsigned int tx_flags = 0;
if(skb->len <= 0) {
dev_kfree_skb_any(skb);
@@ -1623,10 +1685,14 @@
tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT);
}
- if(e1000_tx_csum(adapter, skb))
+ first = adapter->tx_ring.next_to_use;
+
+ if(e1000_tso(adapter, skb))
+ tx_flags |= E1000_TX_FLAGS_TSO;
+ else if(e1000_tx_csum(adapter, skb))
tx_flags |= E1000_TX_FLAGS_CSUM;
- e1000_tx_queue(adapter, e1000_tx_map(adapter, skb), tx_flags);
+ e1000_tx_queue(adapter, e1000_tx_map(adapter, skb, first), tx_flags);
netdev->trans_start = jiffies;
@@ -1858,6 +1924,7 @@
}
if((hw->mac_type <= e1000_82546) &&
+ (hw->phy_type == e1000_phy_m88) &&
!e1000_read_phy_reg(hw, M88E1000_RX_ERR_CNTR, &phy_tmp))
adapter->phy_stats.receive_errors += phy_tmp;
}
@@ -1904,7 +1971,9 @@
struct net_device *netdev = data;
struct e1000_adapter *adapter = netdev->priv;
uint32_t icr = E1000_READ_REG(&adapter->hw, ICR);
- int i;
+#ifndef CONFIG_E1000_NAPI
+ unsigned int i;
+#endif
if(!icr)
return; /* Not our interrupt */
@@ -1914,12 +1983,52 @@
mod_timer(&adapter->watchdog_timer, jiffies);
}
+#ifdef CONFIG_E1000_NAPI
+ if(netif_rx_schedule_prep(netdev)) {
+
+ /* Disable interrupts and register for poll. The flush
+ of the posted write is intentionally left out.
+ */
+
+ atomic_inc(&adapter->irq_sem);
+ E1000_WRITE_REG(&adapter->hw, IMC, ~0);
+ __netif_rx_schedule(netdev);
+ }
+#else
for(i = 0; i < E1000_MAX_INTR; i++)
- if(!e1000_clean_rx_irq(adapter) &&
+ if(!e1000_clean_rx_irq(adapter) &
!e1000_clean_tx_irq(adapter))
break;
+#endif
+}
+#ifdef CONFIG_E1000_NAPI
+/**
+ * e1000_clean - NAPI Rx polling callback
+ * @adapter: board private structure
+ **/
+
+static int
+e1000_clean(struct net_device *netdev, int *budget)
+{
+ struct e1000_adapter *adapter = netdev->priv;
+ int work_to_do = min(*budget, netdev->quota);
+ int work_done = 0;
+
+ e1000_clean_tx_irq(adapter);
+ e1000_clean_rx_irq(adapter, &work_done, work_to_do);
+
+ *budget -= work_done;
+ netdev->quota -= work_done;
+
+ if(work_done < work_to_do) {
+ netif_rx_complete(netdev);
+ e1000_irq_enable(adapter);
+ }
+
+ return (work_done >= work_to_do);
}
+#endif
/**
* e1000_clean_tx_irq - Reclaim resources after transmit completes
@@ -1932,39 +2041,48 @@
struct e1000_desc_ring *tx_ring = &adapter->tx_ring;
struct net_device *netdev = adapter->netdev;
struct pci_dev *pdev = adapter->pdev;
- struct e1000_tx_desc *tx_desc;
- int i, cleaned = FALSE;
+ struct e1000_tx_desc *tx_desc, *eop_desc;
+ struct e1000_buffer *buffer_info;
+ unsigned int i, eop;
+ boolean_t cleaned = FALSE;
i = tx_ring->next_to_clean;
- tx_desc = E1000_TX_DESC(*tx_ring, i);
+ eop = tx_ring->buffer_info[i].next_to_watch;
+ eop_desc = E1000_TX_DESC(*tx_ring, eop);
- while(tx_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) {
+ while(eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) {
- cleaned = TRUE;
+ for(cleaned = FALSE; !cleaned; ) {
+ tx_desc = E1000_TX_DESC(*tx_ring, i);
+ buffer_info = &tx_ring->buffer_info[i];
- if(tx_ring->buffer_info[i].dma) {
+ if(buffer_info->dma) {
- pci_unmap_page(pdev,
- tx_ring->buffer_info[i].dma,
- tx_ring->buffer_info[i].length,
- PCI_DMA_TODEVICE);
+ pci_unmap_page(pdev,
+ buffer_info->dma,
+ buffer_info->length,
+ PCI_DMA_TODEVICE);
- tx_ring->buffer_info[i].dma = 0;
- }
+ buffer_info->dma = 0;
+ }
- if(tx_ring->buffer_info[i].skb) {
+ if(buffer_info->skb) {
- dev_kfree_skb_any(tx_ring->buffer_info[i].skb);
+ dev_kfree_skb_any(buffer_info->skb);
- tx_ring->buffer_info[i].skb = NULL;
- }
+ buffer_info->skb = NULL;
+ }
- tx_desc->buffer_addr = 0;
- tx_desc->lower.data = 0;
- tx_desc->upper.data = 0;
+ tx_desc->buffer_addr = 0;
+ tx_desc->lower.data = 0;
+ tx_desc->upper.data = 0;
- if(++i == tx_ring->count) i = 0;
- tx_desc = E1000_TX_DESC(*tx_ring, i);
+ cleaned = (i == eop);
+ if(++i == tx_ring->count) i = 0;
+ }
+
+ eop = tx_ring->buffer_info[i].next_to_watch;
+ eop_desc = E1000_TX_DESC(*tx_ring, eop);
}
tx_ring->next_to_clean = i;
@@ -1981,31 +2099,46 @@
**/
static boolean_t
+#ifdef CONFIG_E1000_NAPI
+e1000_clean_rx_irq(struct e1000_adapter *adapter, int *work_done,
+ int work_to_do)
+#else
e1000_clean_rx_irq(struct e1000_adapter *adapter)
+#endif
{
struct e1000_desc_ring *rx_ring = &adapter->rx_ring;
struct net_device *netdev = adapter->netdev;
struct pci_dev *pdev = adapter->pdev;
struct e1000_rx_desc *rx_desc;
+ struct e1000_buffer *buffer_info;
struct sk_buff *skb;
unsigned long flags;
uint32_t length;
uint8_t last_byte;
- int i, cleaned = FALSE;
+ unsigned int i;
+ boolean_t cleaned = FALSE;
i = rx_ring->next_to_clean;
rx_desc = E1000_RX_DESC(*rx_ring, i);
while(rx_desc->status & E1000_RXD_STAT_DD) {
+ buffer_info = &rx_ring->buffer_info[i];
+
+#ifdef CONFIG_E1000_NAPI
+ if(*work_done >= work_to_do)
+ break;
+
+ (*work_done)++;
+#endif
cleaned = TRUE;
pci_unmap_single(pdev,
- rx_ring->buffer_info[i].dma,
- rx_ring->buffer_info[i].length,
+ buffer_info->dma,
+ buffer_info->length,
PCI_DMA_FROMDEVICE);
- skb = rx_ring->buffer_info[i].skb;
+ skb = buffer_info->skb;
length = le16_to_cpu(rx_desc->length);
if(!(rx_desc->status & E1000_RXD_STAT_EOP)) {
@@ -2016,7 +2149,7 @@
dev_kfree_skb_irq(skb);
rx_desc->status = 0;
- rx_ring->buffer_info[i].skb = NULL;
+ buffer_info->skb = NULL;
if(++i == rx_ring->count) i = 0;
@@ -2044,7 +2177,7 @@
dev_kfree_skb_irq(skb);
rx_desc->status = 0;
- rx_ring->buffer_info[i].skb = NULL;
+ buffer_info->skb = NULL;
if(++i == rx_ring->count) i = 0;
@@ -2060,16 +2193,27 @@
e1000_rx_checksum(adapter, rx_desc, skb);
skb->protocol = eth_type_trans(skb, netdev);
+#ifdef CONFIG_E1000_NAPI
+ if(adapter->vlgrp && (rx_desc->status & E1000_RXD_STAT_VP)) {
+ vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
+ le16_to_cpu(rx_desc->special &
+ E1000_RXD_SPC_VLAN_MASK));
+ } else {
+ netif_receive_skb(skb);
+ }
+#else /* CONFIG_E1000_NAPI */
if(adapter->vlgrp && (rx_desc->status & E1000_RXD_STAT_VP)) {
vlan_hwaccel_rx(skb, adapter->vlgrp,
- (rx_desc->special & E1000_RXD_SPC_VLAN_MASK));
+ le16_to_cpu(rx_desc->special &
+ E1000_RXD_SPC_VLAN_MASK));
} else {
netif_rx(skb);
}
+#endif /* CONFIG_E1000_NAPI */
netdev->last_rx = jiffies;
rx_desc->status = 0;
- rx_ring->buffer_info[i].skb = NULL;
+ buffer_info->skb = NULL;
if(++i == rx_ring->count) i = 0;
@@ -2095,13 +2239,15 @@
struct net_device *netdev = adapter->netdev;
struct pci_dev *pdev = adapter->pdev;
struct e1000_rx_desc *rx_desc;
+ struct e1000_buffer *buffer_info;
struct sk_buff *skb;
int reserve_len = 2;
- int i;
+ unsigned int i;
i = rx_ring->next_to_use;
+ buffer_info = &rx_ring->buffer_info[i];
- while(!rx_ring->buffer_info[i].skb) {
+ while(!buffer_info->skb) {
rx_desc = E1000_RX_DESC(*rx_ring, i);
skb = dev_alloc_skb(adapter->rx_buffer_len + reserve_len);
@@ -2119,15 +2265,15 @@
skb->dev = netdev;
- rx_ring->buffer_info[i].skb = skb;
- rx_ring->buffer_info[i].length = adapter->rx_buffer_len;
- rx_ring->buffer_info[i].dma =
+ buffer_info->skb = skb;
+ buffer_info->length = adapter->rx_buffer_len;
+ buffer_info->dma =
pci_map_single(pdev,
skb->data,
adapter->rx_buffer_len,
PCI_DMA_FROMDEVICE);
- rx_desc->buffer_addr = cpu_to_le64(rx_ring->buffer_info[i].dma);
+ rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
if((i & ~(E1000_RX_BUFFER_WRITE - 1)) == i) {
/* Force memory writes to complete before letting h/w
@@ -2140,6 +2286,7 @@
}
if(++i == rx_ring->count) i = 0;
+ buffer_info = &rx_ring->buffer_info[i];
}
rx_ring->next_to_use = i;
@@ -2518,7 +2665,6 @@
return NOTIFY_DONE;
}
-
static int
e1000_suspend(struct pci_dev *pdev, uint32_t state)
{
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)